Whamcloud - gitweb
Branch HEAD
[fs/lustre-release.git] / lustre / kernel_patches / patches / qsnet-rhel-2.4.patch
1 Index: linux-2.4.21/arch/i386/kernel/i386_ksyms.c
2 ===================================================================
3 --- linux-2.4.21.orig/arch/i386/kernel/i386_ksyms.c     2005-06-01 22:51:51.000000000 -0400
4 +++ linux-2.4.21/arch/i386/kernel/i386_ksyms.c  2005-06-01 23:12:54.521450960 -0400
5 @@ -220,3 +220,12 @@
6  EXPORT_SYMBOL_GPL(__PAGE_KERNEL);
7  extern unsigned long long __supported_pte_mask;
8  EXPORT_SYMBOL_GPL(__supported_pte_mask);
9 +
10 +extern asmlinkage long sys_open(const char *, int, int);
11 +EXPORT_SYMBOL(sys_open);
12 +extern asmlinkage off_t sys_lseek(unsigned int, off_t, unsigned int);
13 +EXPORT_SYMBOL(sys_lseek);
14 +extern asmlinkage long sys_poll(struct pollfd *, unsigned int, long);
15 +EXPORT_SYMBOL(sys_poll);
16 +extern asmlinkage long sys_kill(int, int);
17 +EXPORT_SYMBOL(sys_kill);
18 Index: linux-2.4.21/arch/ia64/kernel/ia64_ksyms.c
19 ===================================================================
20 --- linux-2.4.21.orig/arch/ia64/kernel/ia64_ksyms.c     2005-06-01 22:51:59.000000000 -0400
21 +++ linux-2.4.21/arch/ia64/kernel/ia64_ksyms.c  2005-06-01 23:14:43.773842072 -0400
22 @@ -207,3 +207,13 @@
23  EXPORT_SYMBOL_GPL(show_state);
24  EXPORT_SYMBOL_GPL(show_regs);
25  EXPORT_SYMBOL(pm_power_off);
26 +
27 +#define __KERNEL_SYSCALLS__ 1
28 +#include <asm/unistd.h>
29 +EXPORT_SYMBOL(sys_open);
30 +extern asmlinkage off_t sys_lseek(unsigned int, off_t, unsigned int);
31 +EXPORT_SYMBOL(sys_lseek);
32 +extern asmlinkage long sys_poll(struct pollfd *, unsigned int, long);
33 +EXPORT_SYMBOL(sys_poll);
34 +extern asmlinkage long sys_kill(int, int);
35 +EXPORT_SYMBOL(sys_kill);
36 Index: linux-2.4.21/arch/x86_64/kernel/x8664_ksyms.c
37 ===================================================================
38 --- linux-2.4.21.orig/arch/x86_64/kernel/x8664_ksyms.c  2005-06-01 22:51:51.000000000 -0400
39 +++ linux-2.4.21/arch/x86_64/kernel/x8664_ksyms.c       2005-06-01 23:12:54.522450808 -0400
40 @@ -215,6 +215,10 @@
41  EXPORT_SYMBOL(sys_exit);
42  EXPORT_SYMBOL(sys_open);
43  EXPORT_SYMBOL(sys_lseek);
44 +extern asmlinkage long sys_poll(struct pollfd *, unsigned int, long);
45 +EXPORT_SYMBOL(sys_poll);
46 +extern asmlinkage long sys_kill(int, int);
47 +EXPORT_SYMBOL(sys_kill);
48  EXPORT_SYMBOL(sys_delete_module);
49  EXPORT_SYMBOL(sys_sync);
50  EXPORT_SYMBOL(sys_pause);
51 Index: linux-2.4.21/Documentation/Configure.help
52 ===================================================================
53 --- linux-2.4.21.orig/Documentation/Configure.help      2005-06-01 23:12:39.856680344 -0400
54 +++ linux-2.4.21/Documentation/Configure.help   2005-06-01 23:12:54.547447008 -0400
55 @@ -28030,6 +28030,54 @@
56    kernel tree does. Such modules that use library CRC32 functions
57    require M here.
58  
59 +
60 +Enable support for Quadrics QsNet (QSNET)
61 +CONFIG_QSNET
62 +    Quadrics QsNet is a high bandwidth, ultra low latency cluster 
63 +    interconnect which provides both user and kernel programmers with
64 +    secure, direct access to the Quadrics network.
65 +         
66 +Elan 3 device driver (ELAN3)
67 +CONFIG_ELAN3
68 +   This is the main device driver for the Quadrics QsNet (Elan3) PCI
69 +   device. This is a high bandwidth, ultra low latency interconnect 
70 +   which provides both user and kernel programmers with secure, direct
71 +   access to the Quadrics network.
72 +         
73 +Elan 3 Kernel Comms (EP3)
74 +CONFIG_EP3
75 +   This modules implements the QsNet kernel communications layer. This
76 +   is used to layer kernel level facilities on top of the basic Elan3 
77 +   device driver. These can be used to implement subsystems such as
78 +   TCP/IP and remote filing systems over the QsNet interconnect.
79 +         
80 +Elan IP device (EIP)
81 +CONFIG_EIP
82 +   This is a network IP device driver for the Quadrics QsNet device.
83 +   It allows the TCP/IP protocol to be run over the Quadrics interconnect.
84 +
85 +Elan 4 device driver (ELAN4)
86 +CONFIG_ELAN4
87 +   This is the main device driver for the Quadrics QsNetII (Elan4) PCI-X 
88 +   device. This is a high bandwidth, ultra low latency interconnect which 
89 +   provides both user and kernel programmers with secure, direct access to 
90 +   the Quadrics network.
91 +Resource Management System support (RMS)
92 +CONFIG_RMS
93 +   This is a support module for the Quadrics RMS resource manager. It 
94 +   provides kernel services for monitoring and controlling user job 
95 +   execution, termination and cleanup.
96 +     
97 +Switch monitoring (JTAG)
98 +CONFIG_JTAG
99 +   The jtag interface is used to allow processes to send and retrieve jtag
100 +   information to a Quadrics QsNet Elite switch via the parallel port. 
101 +   The module requires a /dev/jtag[0-3] entry (usually there is only a 
102 +   /dev/jtag0) device and a particular device only allows one process at a
103 +   time to access this resource. 
104 +   For more information about JTag interface, please refer to the IEEE 
105 +   document on http://www.ieee.org 
106 +  
107  #
108  # A couple of things I keep forgetting:
109  #   capitalize: AppleTalk, Ethernet, DOS, DMA, FAT, FTP, Internet,
110 Index: linux-2.4.21/drivers/net/Config.in
111 ===================================================================
112 --- linux-2.4.21.orig/drivers/net/Config.in     2005-06-01 22:52:03.000000000 -0400
113 +++ linux-2.4.21/drivers/net/Config.in  2005-06-01 23:12:54.549446704 -0400
114 @@ -272,6 +272,9 @@
115  
116  endmenu
117  
118 +# Quadrics QsNet
119 +source drivers/net/qsnet/Config.in
120 +
121  if [ "$CONFIG_PPC_ISERIES" = "y" ]; then
122     dep_tristate 'iSeries Virtual Ethernet driver support' CONFIG_VETH $CONFIG_PPC_ISERIES
123  fi
124 Index: linux-2.4.21/drivers/net/Makefile
125 ===================================================================
126 --- linux-2.4.21.orig/drivers/net/Makefile      2005-06-01 22:52:03.000000000 -0400
127 +++ linux-2.4.21/drivers/net/Makefile   2005-06-01 23:12:54.550446552 -0400
128 @@ -8,7 +8,7 @@
129  obj-n           :=
130  obj-            :=
131  
132 -mod-subdirs     := appletalk arcnet fc irda tokenring pcmcia wireless wireless_old wan
133 +mod-subdirs     := appletalk arcnet fc irda tokenring pcmcia wireless wireless_old wan qsnet
134  
135  O_TARGET := net.o
136  
137 @@ -48,6 +48,7 @@
138  subdir-$(CONFIG_DEV_APPLETALK) += appletalk
139  subdir-$(CONFIG_SK98LIN) += sk98lin
140  subdir-$(CONFIG_SKFP) += skfp
141 +subdir-$(CONFIG_QSNET) += qsnet
142  subdir-$(CONFIG_E100) += e100
143  subdir-$(CONFIG_E1000) += e1000
144  subdir-$(CONFIG_BONDING) += bonding
145 Index: linux-2.4.21/drivers/net/qsnet/Config.in
146 ===================================================================
147 --- linux-2.4.21.orig/drivers/net/qsnet/Config.in       2004-02-23 16:02:56.000000000 -0500
148 +++ linux-2.4.21/drivers/net/qsnet/Config.in    2005-06-01 23:12:54.550446552 -0400
149 @@ -0,0 +1,25 @@
150 +#
151 +# Config.in for Quadrics QsNet
152 +#
153 +# Copyright (c) 2004 Quadrics Ltd
154 +#
155 +# File: drivers/net/qsnet/Config.in
156 +#
157 +
158 +mainmenu_option next_comment
159 +comment "Quadrics QsNet device support"
160 +
161 +dep_tristate "Enable support for Quadrics QsNet"        CONFIG_QSNET    $CONFIG_PCI
162 +
163 +dep_tristate "Elan 3 device driver"                    CONFIG_ELAN3    $CONFIG_QSNET
164 +dep_tristate "Elan 4 device driver"                    CONFIG_ELAN4    $CONFIG_QSNET
165 +
166 +if [ "$CONFIG_ELAN3" = "$CONFIG_QSNET" ] || [ "$CONFIG_ELAN4" = "$CONFIG_QSNET" ]; then
167 +       dep_tristate "Elan Kernel Comms"                        CONFIG_EP       $CONFIG_QSNET
168 +fi
169 +dep_tristate "Elan IP device"                             CONFIG_EIP      $CONFIG_NET  $CONFIG_EP
170 +
171 +dep_tristate "Resource Management System support"        CONFIG_RMS      $CONFIG_QSNET
172 +dep_tristate "Switch monitoring"                         CONFIG_JTAG     $CONFIG_QSNET
173 +
174 +endmenu
175 Index: linux-2.4.21/drivers/net/qsnet/eip/eip_linux.c
176 ===================================================================
177 --- linux-2.4.21.orig/drivers/net/qsnet/eip/eip_linux.c 2004-02-23 16:02:56.000000000 -0500
178 +++ linux-2.4.21/drivers/net/qsnet/eip/eip_linux.c      2005-06-01 23:12:54.553446096 -0400
179 @@ -0,0 +1,1565 @@
180 +/*
181 + *    Copyright (c) 2003 by Quadrics Ltd.
182 + * 
183 + *    For licensing information please see the supplied COPYING file
184 + *
185 + */
186 +
187 +#ident "@(#)$Id: eip_linux.c,v 1.89.2.3 2004/12/20 16:54:05 mike Exp $"
188 +
189 +#include <qsnet/kernel.h>
190 +#include <qsnet/debug.h>
191 +
192 +#include <linux/module.h>
193 +
194 +#include <linux/init.h>
195 +#include <linux/list.h>
196 +#include <linux/netdevice.h>
197 +#include <linux/etherdevice.h>
198 +#include <linux/skbuff.h>
199 +#include <linux/kernel.h>
200 +#include <linux/proc_fs.h>
201 +#include <linux/time.h>
202 +#include <linux/version.h>
203 +
204 +#include <asm/uaccess.h>
205 +#include <asm/unaligned.h>
206 +
207 +#undef ASSERT
208 +#include <net/sock.h>
209 +#include <net/ip.h>
210 +
211 +
212 +
213 +#include <elan/epcomms.h>
214 +#include <elan/epsvc.h>
215 +
216 +#include "eip_linux.h"
217 +#include "eip_stats.h"
218 +
219 +#ifdef UNUSED
220 +static void eip_skb_display(struct sk_buff *);
221 +#endif
222 +static void eip_iph_display(struct iphdr *);
223 +#ifdef UNUSED
224 +static void eip_eiph_display(EIP_HEADER *);
225 +static void eip_packet_display(unsigned char *);
226 +#endif
227 +static void eip_tmd_display(EIP_TMD *);
228 +static void eip_tmd_head_display(EIP_TMD_HEAD *);
229 +static void eip_rmd_display(EIP_RMD *);
230 +static void eip_rmd_head_display(EIP_RMD_HEAD *);
231 +
232 +static void eip_rmd_reclaim(EIP_RMD *);
233 +
234 +static inline EP_NMH *eip_dma_reserve(int, int);
235 +static inline void __eip_tmd_load(EIP_TMD *, EP_RAILMASK *);
236 +static inline void __eip_tmd_unload(EIP_TMD *);
237 +static inline unsigned long eip_buff_alloc(int, int);
238 +static inline void eip_buff_free(unsigned long, int);
239 +static struct iphdr *eip_ipfrag_get(char *);
240 +static inline void eip_rmd_free(EIP_RMD *);
241 +static inline void eip_skb_load(EIP_RMD *);
242 +static inline void eip_skb_unload(EIP_RMD *);
243 +static inline void eip_rmd_requeue(EIP_RMD *);
244 +static EIP_RMD *eip_rmd_alloc(int, int);
245 +static int eip_rmd_alloc_replace(EIP_RMD *, int, int);
246 +static int eip_rmd_alloc_queue(int, int, int, int);
247 +static int eip_rmds_alloc(void);
248 +static void eip_rxhandler(EP_RXD *);
249 +static void eip_rx_tasklet(unsigned long);
250 +static inline void eip_tmd_init(EIP_TMD *, unsigned long, EIP_TMD_HEAD *, unsigned long, int);
251 +static inline EIP_TMD *eip_tmd_get(int);
252 +static inline void eip_tmd_put(EIP_TMD *);
253 +static inline void eip_tmd_load(EIP_TMD *);
254 +static inline void eip_tmd_unload(EIP_TMD *);
255 +static inline EIP_TMD *eip_tmd_alloc_queue(EIP_TMD *, EIP_TMD_HEAD *, int);
256 +static inline EIP_TMD *eip_tmd_alloc_queue_copybreak(EIP_TMD_HEAD *, int);
257 +static inline EIP_TMD *eip_tmd_alloc_queue_aggreg(EIP_TMD_HEAD *, int);
258 +static int eip_tmds_alloc(void);
259 +int eip_hard_start_xmit(struct sk_buff *, struct net_device *);
260 +static inline int eip_do_xmit(EIP_TMD *, EP_NMD *i, EP_PAYLOAD *);
261 +static void eip_txhandler(EP_TXD *, void *, EP_STATUS);
262 +static void eip_tx_tasklet(unsigned long);
263 +void eip_stop_queue(void);
264 +void eip_start_queue(void);
265 +static int eip_open(struct net_device *);
266 +static int eip_close(struct net_device *);
267 +static struct net_device_stats *eip_get_stats(struct net_device *);
268 +static int eip_change_mtu(struct net_device *, int);
269 +
270 +static int eip_rx_dropping = 0;
271 +static int eip_rx_tasklet_locked = 1;
272 +
273 +/* Global */
274 +struct timer_list eip_rx_tasklet_timer;
275 +       
276 +EIP_RX *eip_rx = NULL;
277 +EIP_TX *eip_tx = NULL;
278 +int  eip_checksum_state=CHECKSUM_NONE;
279 +
280 +int tmd_max = EIP_TMD_MAX_NR;
281 +int rmd_max = EIP_RMD_MAX_NR;
282 +int rx_envelope_nr = EIP_RX_ENVELOPE_NR;
283 +int rx_granularity = EIP_RX_GRANULARITY;
284 +int tx_copybreak_max = EIP_TX_COPYBREAK_MAX;
285 +EP_RAILMASK tx_railmask = EP_RAILMASK_ALL;
286 +int eipdebug = 0;
287 +
288 +#ifdef UNUSED
289 +static void eip_skb_display(struct sk_buff *skb)
290 +{
291 +       if (skb) {
292 +               __EIP_DBG_PRINTF("SKB [%p] : len %d truesize %d  proto %x pkt type %x cloned %d users %d summed %d\n", 
293 +                       skb, skb->len, skb->truesize, skb->protocol, skb->pkt_type, skb->cloned, atomic_read(&skb->users), skb->ip_summed);
294 +               __EIP_DBG_PRINTF("SKB [%p] : skb_shinfo dataref %d nr_frags %d frag_list[%p] (device %p)\n", skb,
295 +                        atomic_read(&skb_shinfo(skb)->dataref), skb_shinfo(skb)->nr_frags, skb_shinfo(skb)->frag_list, skb->dev);
296 +               __EIP_DBG_PRINTF("SKB [%p] : head[%p] data[%p] tail [%p] end [%p] data_len [%d]\n", skb, skb->head, skb->data, 
297 +                               skb->tail, skb->end, skb->data_len);
298 +               __EIP_DBG_PRINTF("SKB [%p] : Transport Layer h.(th, uh, icmph, raw)[%p]\n", skb, skb->h.th);
299 +               __EIP_DBG_PRINTF("SKB [%p] : Network Layer      nh.(iph, arph, raw)[%p]\n", skb, skb->nh.iph);
300 +               __EIP_DBG_PRINTF("SKB [%p] : Link Layer         mac.(ethernet, raw)[%p]\n", skb, skb->mac.ethernet);
301 +               return;
302 +       }
303 +       EIP_ERR_PRINTF("SKB IS NULL - NO SKB TO DISPLAY\n");
304 +}
305 +#endif
306 +static void eip_iph_display(struct iphdr *iph)
307 +{
308 +       if (iph) {
309 +               __EIP_DBG_PRINTF("IPH [%p] : version %d header len %d TOS 0x%x Total len %d\n", 
310 +                       iph, iph->version, iph->ihl, htons(iph->tos), htons(iph->tot_len));
311 +               __EIP_DBG_PRINTF("IPH [%p] : id %d frag flags 0x%x offset %d\n",
312 +                               iph, htons(iph->id), (iph->frag_off & htons(IP_CE | IP_DF | IP_MF)) >> 4, 
313 +                               (htons(iph->frag_off) << 3) & IP_OFFSET);
314 +               __EIP_DBG_PRINTF("IPH [%p] : TTL %d proto %d header checksum 0x%x\n", iph, iph->ttl, iph->protocol, iph->check);
315 +               __EIP_DBG_PRINTF("IPH [%p] : IP src %u.%u.%u.%u dest %u.%u.%u.%u\n", iph, 
316 +                                ((unsigned char *)&(iph->saddr))[0],((unsigned char *)&(iph->saddr))[1], ((unsigned char *)&(iph->saddr))[2],((unsigned char *)&(iph->saddr))[3],
317 +                                ((unsigned char *)&(iph->daddr))[0],((unsigned char *)&(iph->daddr))[1], ((unsigned char *)&(iph->daddr))[2],((unsigned char *)&(iph->daddr))[3]);
318 +               return;
319 +       }
320 +       EIP_ERR_PRINTF("IPH IS NULL - NO IPH TO DISPLAY\n");
321 +}
322 +#ifdef UNUSED
323 +static void eip_eiph_display(EIP_HEADER * eiph)
324 +{
325 +       if (eiph) {
326 +               __EIP_DBG_PRINTF("EIPH [%p] : dhost %04x.%04x.%04x sap %x\n", eiph, eiph->h_dhost.ip_bcast, eiph->h_dhost.ip_inst, 
327 +                               eiph->h_dhost.ip_addr, eiph->h_sap);
328 +               __EIP_DBG_PRINTF("EIPH [%p] : shost %04x.%04x.%04x \n", eiph, eiph->h_shost.ip_bcast, eiph->h_shost.ip_inst,
329 +                                eiph->h_shost.ip_addr);
330 +               return;
331 +       }
332 +       EIP_ERR_PRINTF("EIPH IS NULL - NO EIPH TO DISPLAY\n");
333 +}
334 +static void eip_packet_display(unsigned char *data)
335 +{
336 +       eip_eiph_display((EIP_HEADER *) data);
337 +       eip_iph_display((struct iphdr *) (data + EIP_HEADER_PAD + ETH_HLEN));
338 +}
339 +#endif
340 +static void eip_tmd_display(EIP_TMD * tmd)
341 +{
342 +       if (tmd) {
343 +               __EIP_DBG_PRINTF("\t\tTMD [%p] : next[%p] skb[%p] DVMA[%d]\n", tmd, tmd->chain.next, tmd->skb, tmd->dvma_idx);
344 +               if (tmd->dma_base)
345 +                       __EIP_DBG_PRINTF("TMD [%p] : head[%p] *data 0x%lx\n", tmd, tmd->head, *((unsigned long *) tmd->dma_base));
346 +               else
347 +                       __EIP_DBG_PRINTF("TMD [%p] : head[%p] NO DATA !!!\n", tmd, tmd->head);
348 +               __EIP_DBG_PRINTF("TMD [%p] : DMA(%lx,%d,%d) ebase[%x]\n",tmd,  tmd->dma_base, tmd->dma_len, tmd->nmd.nmd_len,
349 +                                tmd->nmd.nmd_addr);
350 +               return;
351 +       }
352 +       EIP_ERR_PRINTF("TMD IS NULL - NO TMD TO DISPLAY\n");
353 +       
354 +}
355 +static void eip_ipf_display(EIP_IPFRAG * ipf)
356 +{
357 +       if (ipf) {
358 +               __EIP_DBG_PRINTF("IPF[%p] : datagram len %d dma correction %d uts %lx frag_nr %d\n", ipf, ipf->datagram_len,
359 +                               ipf->dma_correction, ipf->timestamp.tv_usec, ipf->frag_nr);
360 +               eip_tmd_display((EIP_TMD *) ipf);
361 +               return;
362 +       }
363 +       EIP_ERR_PRINTF("IPF IS NULL - NO IPF TO DISPLAY\n");
364 +}
365 +
366 +static void eip_tmd_head_display(EIP_TMD_HEAD * head)
367 +{
368 +       if (head) {
369 +               __EIP_DBG_PRINTF("TMD HEAD [%p] : handle[%p] tmds[%p] %3.3d/%3.3d/%3.3d\n", head, head->handle, head->tmd, 
370 +                       EIP_STAT_QUEUED_GET(&head->stats), EIP_STAT_ALLOC_GET(&head->stats),
371 +                       eip_tx->tmd_max_nr);
372 +               return;
373 +       }
374 +       EIP_ERR_PRINTF("TMD HEAD IS NULL - NO TMD HEAD TO DISPLAY\n");
375 +}
376 +static void eip_rmd_display(EIP_RMD * rmd)
377 +{
378 +       if (rmd) {
379 +               __EIP_DBG_PRINTF("RMD [%p] : next[%p] rxd[%p] DVMA[%d]\n", rmd, rmd->chain.next, rmd->rxd, rmd->dvma_idx);
380 +               __EIP_DBG_PRINTF("RMD [%p] : head[%p]\n", rmd, rmd->head); 
381 +               __EIP_DBG_PRINTF("RMD [%p] : ebase[%x]\n", rmd,  rmd->nmd.nmd_addr); 
382 +               return;
383 +       }
384 +       EIP_ERR_PRINTF("RMD IS NULL - NO RMD TO DISPLAY\n");
385 +}
386 +static void eip_rmd_head_display(EIP_RMD_HEAD * head)
387 +{
388 +       if (head) {
389 +               __EIP_DBG_PRINTF("RMD HEAD [%p] : rcvr[%p] handle[%p] busy list[%p]\n", head, head->rcvr, head->handle, head->busy_list);
390 +               __EIP_DBG_PRINTF("RMD HEAD [%p] : %3.3d/%3.3d/%3.3d\n", head, 
391 +                               EIP_STAT_QUEUED_GET(&head->stats), EIP_STAT_ALLOC_GET(&head->stats), eip_rx->rmd_max_nr);
392 +               return;
393 +       }
394 +       EIP_ERR_PRINTF("RMD HEAD IS NULL - NO RMD HEAD TO DISPLAY\n");
395 +}
396 +
397 +/* END  - DISPLAY FUNCTIONS */
398 +static inline EP_NMH *eip_dma_reserve(int pages_nr, int perm)
399 +{
400 +       EP_NMH *handle = ep_dvma_reserve(eip_tx->ep_system, pages_nr, perm);
401 +       
402 +       if (handle)
403 +               EIP_DBG_PRINTF(EIP_DBG_EP_DVMA, "HANDLE [%p] %d pages of elan address space reserved\n", 
404 +                               handle, pages_nr);
405 +       else
406 +               EIP_ERR_PRINTF("cannot reserve %d page(s) of elan address space\n", pages_nr);
407 +
408 +       return handle;
409 +}
410 +
411 +static inline void __eip_tmd_load(EIP_TMD * tmd, EP_RAILMASK *rmask)
412 +{
413 +       EIP_ASSERT(tmd->nmd.nmd_len > 0);
414 +       
415 +       ep_dvma_load(eip_tx->ep_system, NULL, (caddr_t) tmd->dma_base, tmd->nmd.nmd_len, tmd->head->handle,
416 +                       tmd->dvma_idx, rmask, &tmd->nmd);
417 +}
418 +
419 +static inline void __eip_tmd_unload(EIP_TMD * tmd)
420 +{
421 +       EIP_ASSERT(tmd->nmd.nmd_addr && tmd->head->handle);
422 +       
423 +       ep_dvma_unload(eip_tx->ep_system, tmd->head->handle, &tmd->nmd);
424 +       tmd->nmd.nmd_addr = 0;
425 +}
426 +static inline unsigned long eip_buff_alloc(int buff_len, int gfp)
427 +{
428 +       unsigned long buff_base = (buff_len < PAGE_SIZE) ? 
429 +                               (unsigned long) kmalloc(buff_len, gfp) :
430 +                               __get_dma_pages(gfp, get_order(buff_len));
431 +       
432 +       if (likely(buff_base))
433 +               return buff_base;
434 +
435 +       EIP_ERR_PRINTF("cannot allocate %db of memory\n", buff_len);
436 +       return 0;
437 +}
438 +static inline void eip_buff_free(unsigned long buff_base, int buff_len)
439 +{
440 +       (buff_len < PAGE_SIZE) ?  kfree((void *) buff_base) :
441 +               free_pages(buff_base, get_order(buff_len));
442 +}
443 +static struct iphdr *eip_ipfrag_get(char *data)
444 +{
445 +       struct ethhdr *eh = (struct ethhdr *) (data);
446 +       struct iphdr *iph;
447 +
448 +       if (eh->h_proto == htons(ETH_P_IP)) {
449 +               iph = (struct iphdr *) ((char *) eh + ETH_HLEN);
450 +
451 +               /* EIP_DBG(eip_iph_display(iph)); */
452 +
453 +               if ((iph->frag_off & htons(IP_MF | IP_OFFSET)))
454 +                       return iph;
455 +       }
456 +       return NULL;
457 +}
458 +
459 +static inline void eip_rmd_free(EIP_RMD * rmd)
460 +{
461 +       EIP_ASSERT2(rmd->nmd.nmd_addr == 0, eip_rmd_display, rmd);
462 +       
463 +       if ( rmd->skb != NULL) 
464 +               kfree_skb (rmd->skb);
465 +       
466 +       kfree(rmd);
467 +
468 +       EIP_DBG_PRINTF(EIP_DBG_MEMFREE, "RMD [%p] : FREED\n", rmd);
469 +}
470 +static inline void eip_skb_load(EIP_RMD * rmd)
471 +{
472 +       EP_RAILMASK rmask = rmd->rxd ? ep_rxd_railmask (rmd->rxd) : 0;
473 +
474 +       EIP_ASSERT(skb_tailroom(rmd->skb) > 0);
475 +
476 +       ep_dvma_load(eip_tx->ep_system, NULL, (caddr_t) rmd->skb->data, skb_tailroom(rmd->skb), rmd->head->handle,
477 +                    rmd->dvma_idx, &rmask, &rmd->nmd);
478 +       
479 +       EIP_DBG_PRINTF(EIP_DBG_RMD_EP_DVMA, "RMD [%p] : LOADED\n", rmd);
480 +}
481 +static inline void eip_skb_unload(EIP_RMD * rmd)
482 +{
483 +       EIP_ASSERT(rmd->nmd.nmd_addr && rmd->head->handle);
484 +       
485 +       ep_dvma_unload(eip_tx->ep_system, rmd->head->handle, &rmd->nmd);
486 +       rmd->nmd.nmd_addr = 0;
487 +       
488 +       EIP_DBG_PRINTF(EIP_DBG_RMD_EP_DVMA, "RMD [%p] : UNLOADED\n", rmd);
489 +}
490 +static inline void eip_rmd_requeue(EIP_RMD * rmd)
491 +{
492 +       EIP_ASSERT(rmd->rxd);
493 +
494 +       rmd->chain.next    = NULL;
495 +
496 +       ep_requeue_receive(rmd->rxd, eip_rxhandler, rmd, &rmd->nmd, EP_NO_ALLOC|EP_NO_SLEEP );
497 +
498 +       atomic_inc(&rmd->head->stats);
499 +       
500 +       EIP_DBG_PRINTF(EIP_DBG_RMD_QUEUE, "RMD [%p] : REQUEUED\n", rmd);
501 +}
502 +static EIP_RMD * eip_rmd_alloc(int svc, int gfp)
503 +{
504 +       int buff_len = EIP_SVC_SMALLEST_LEN << svc;
505 +       EIP_RMD *rmd;
506 +       struct sk_buff *skb;
507 +
508 +       if (!(skb = alloc_skb((buff_len -  EIP_EXTRA), gfp)))
509 +               return NULL;
510 +       
511 +       skb_reserve(skb, 2);
512 +
513 +       if (!(rmd = (EIP_RMD *) kmalloc(buff_len, gfp))) {
514 +               kfree_skb(skb);
515 +               return NULL;
516 +       }
517 +
518 +       rmd->skb = skb;
519 +
520 +       rmd->chain.next = NULL;
521 +       rmd->rxd = NULL;
522 +       rmd->head = &eip_rx->head[svc];
523 +
524 +       return rmd;
525 +}
526 +
527 +static int eip_rmd_alloc_replace(EIP_RMD *rmd, int svc, int gfp) 
528 +{
529 +       struct sk_buff *skb,*old;
530 +       int buff_len = EIP_SVC_SMALLEST_LEN << svc;
531 +
532 +       if (!(skb = alloc_skb(buff_len, gfp)))
533 +               return 1;
534 +       
535 +       skb_reserve(skb, 2);
536 +
537 +       eip_skb_unload(rmd);
538 +
539 +       old      = rmd->skb;
540 +       rmd->skb = skb;
541 +
542 +       eip_skb_load(rmd);
543 +
544 +       eip_rmd_requeue(rmd);
545 +
546 +       kfree_skb(old);
547 +
548 +       return 0;
549 +}
550 +
551 +static int eip_rmd_alloc_queue(int svc, int dvma_idx, int gfp, int attr)
552 +{
553 +       EIP_RMD * rmd = eip_rmd_alloc(svc, gfp);
554 +
555 +       if (!rmd)
556 +               return 1;
557 +
558 +       EIP_STAT_ALLOC_ADD(&rmd->head->stats, 1);
559 +
560 +       rmd->dvma_idx = dvma_idx;
561 +       eip_skb_load(rmd);
562 +
563 +       EIP_DBG2(EIP_DBG_RMD, eip_rmd_display, rmd, "RMD [%p] : ALLOCATED for SVC 0x%x\n", rmd, svc);
564 +
565 +       if (ep_queue_receive(rmd->head->rcvr, eip_rxhandler, (void *) rmd, &rmd->nmd, attr) == ESUCCESS) {
566 +               atomic_inc(&rmd->head->stats);
567 +               EIP_DBG_PRINTF(EIP_DBG_RMD_QUEUE, "RMD [%p] : QUEUED on SVC 0x%x\n", rmd, svc);
568 +               return 0;
569 +       }
570 +       
571 +       EIP_ERR_PRINTF("RMD [%p] : couldn't be QUEUED on SVC 0x%x\n", rmd, svc);
572 +
573 +       EIP_STAT_ALLOC_SUB(&rmd->head->stats, 1);
574 +
575 +       eip_skb_unload(rmd);
576 +       eip_rmd_free(rmd);
577 +
578 +       return 1;
579 +}
580 +
581 +static int eip_rmds_alloc(void)
582 +{
583 +       int idx, svc;
584 +
585 +       eip_rx->irq_list = NULL;
586 +       eip_rx->irq_list_nr = 0;
587 +
588 +       for (svc = 0; svc < EIP_SVC_NR; svc++) {
589 +               eip_rx->head[svc].rcvr = ep_alloc_rcvr(eip_tx->ep_system, EIP_SVC_EP(svc), rx_envelope_nr);
590 +               if (!eip_rx->head[svc].rcvr) {
591 +                       EIP_ERR_PRINTF("Cannot install receiver for SVC 0x%x - maybe cable is disconnected\n", svc);
592 +                       return -EAGAIN;
593 +               }
594 +
595 +               eip_rx->head[svc].handle =
596 +                   eip_dma_reserve(EIP_DVMA_PAGES((EIP_SVC_SMALLEST_LEN << svc)) * eip_rx->rmd_max_nr,
597 +                                   EP_PERM_WRITE);
598 +               if (!eip_rx->head[svc].handle)
599 +                       return -ENOMEM;
600 +               
601 +               EIP_DBG(EIP_DBG_RMD_HEAD, eip_rmd_head_display, &eip_rx->head[svc]);
602 +
603 +               for (idx = 0; idx < EIP_RMD_NR; idx++) {
604 +                       if (eip_rmd_alloc_queue(svc, idx * EIP_DVMA_PAGES((EIP_SVC_SMALLEST_LEN << svc)), 
605 +                                               GFP_KERNEL, EP_NO_SLEEP))
606 +                               return -ENOMEM;
607 +               }
608 +       }
609 +       return 0;
610 +}
611 +static void eip_rmds_free(void)
612 +{
613 +       unsigned long flags;
614 +       EIP_RMD *rmd;
615 +       int svc; 
616 +       
617 +       spin_lock_irqsave(&eip_rx->lock, flags);
618 +       rmd = eip_rx->irq_list;
619 +       eip_rx->irq_list = NULL;
620 +       eip_rx->irq_list_nr = 0;
621 +       spin_unlock_irqrestore(&eip_rx->lock, flags);
622 +
623 +       eip_rmd_reclaim(rmd);
624 +       
625 +       for (svc = 0; svc < EIP_SVC_NR ; svc++) {
626 +               
627 +               while ((rmd = eip_rx->head[svc].busy_list)) {
628 +                       eip_rx->head[svc].busy_list = NULL;
629 +                       eip_rmd_reclaim(rmd);
630 +                       if (eip_rx->head[svc].busy_list) {
631 +                               EIP_DBG_PRINTF(EIP_DBG_RMD_QUEUE, "Still RMD [%p] on BUSY list SVC 0x%d - Scheduling\n", rmd, svc);     
632 +                               schedule();
633 +                       }
634 +               }
635 +
636 +               EIP_ASSERT(EIP_STAT_QUEUED_GET(&eip_rx->head[svc].stats) == EIP_STAT_ALLOC_GET(&eip_rx->head[svc].stats));
637 +               
638 +               EIP_DBG_PRINTF(EIP_DBG_GEN, "HEAD[%p] : FREEING RCVR [%p]\n", &eip_rx->head[svc],
639 +                               eip_rx->head[svc].rcvr);
640 +               
641 +               ep_free_rcvr(eip_rx->head[svc].rcvr);
642 +
643 +               EIP_DBG_PRINTF(EIP_DBG_EP_DVMA, "HEAD[%p] : RELEASING DVMA [%p]\n", &eip_rx->head[svc], 
644 +                               eip_rx->head[svc].handle);
645 +
646 +               ep_dvma_release(eip_tx->ep_system, eip_rx->head[svc].handle);
647 +       }
648 +
649 +}
650 +static int eip_rx_queues_low (void) {
651 +       int svc;
652 +       for (svc = 0; svc < EIP_SVC_NR; svc++) 
653 +               if (EIP_STAT_QUEUED_GET(&eip_rx->head[svc].stats)  < EIP_RMD_ALLOC_THRESH) 
654 +                       return (1);
655 +       return (0);
656 +}
657 +static void eip_rxhandler(EP_RXD * rxd)
658 +{
659 +       EIP_RMD *rmd            = (EIP_RMD *) ep_rxd_arg(rxd);
660 +       EP_STATUS ret           = ep_rxd_status(rxd);
661 +       EP_PAYLOAD * payload    = ep_rxd_payload(rxd);
662 +       unsigned long data      = (unsigned long) rmd->skb->data; 
663 +       int frag_nr             = 0;
664 +       int len;
665 +
666 +       struct sk_buff *skb;
667 +       static char count = 0;
668 +
669 +       atomic_dec(&rmd->head->stats);
670 +       rmd->rxd = rxd;
671 +
672 +       if (likely(ret == EP_SUCCESS)) {
673 +
674 +               rmd->head->dma++;
675 +
676 +               if ( eip_rx_dropping) {
677 +                   eip_rmd_requeue(rmd);
678 +                   return;
679 +               }
680 +
681 +               len = (payload) ? payload->Data[frag_nr++] : ep_rxd_len(rxd);
682 +
683 +               EIP_DBG(EIP_DBG_RMD, eip_rmd_display, rmd);
684 +
685 +again:
686 +               if ( (skb = skb_clone(rmd->skb, GFP_ATOMIC)) ) {
687 +                       unsigned int off = (data - (unsigned long) rmd->skb->data);
688 +
689 +                       /* have to set the length before calling
690 +                        * skb pull as it will not allow you to
691 +                        * pull past the end */
692 +
693 +                       skb_put (skb, off + len);
694 +                       skb_pull (skb, off);
695 +
696 +                       skb->protocol = eth_type_trans(skb, eip_rx->net_device);
697 +                       skb->ip_summed = eip_checksum_state;
698 +                       skb->dev = eip_rx->net_device;
699 +
700 +                       /* Fabien/David/Mike this is a hack/fix to allow aggrigation of packets to work.
701 +                        * The problem is ip_frag looks at the truesize to see if it is caching too much space.
702 +                        * As we are reusing a large skb (cloned) for a number of small fragments, they appear to take up alot of space.
703 +                        * so ip_frag dropped them after 4 frags (not good). So we lie and set the truesize to just bigger than the data. 
704 +                        */
705 +                       if (payload) 
706 +                               skb->truesize = SKB_DATA_ALIGN(skb->len + EIP_HEADER_PAD) +sizeof(struct sk_buff);
707 +
708 +               }
709 +               if ( (skb) && 
710 +                    (netif_rx(skb) != NET_RX_DROP)){
711 +
712 +                       eip_rx->bytes += len;
713 +                       
714 +                       if (payload && payload->Data[frag_nr] ) {
715 +                               data += EIP_IP_ALIGN(len);
716 +                               len   = payload->Data[frag_nr++];
717 +                               goto again;
718 +                       }
719 +                       eip_rx->packets += ++frag_nr;
720 +               } else if ( (eip_rx->dropped++ % 20) == 0)
721 +                               __EIP_DBG_PRINTK("Packet dropped by the TCP/IP stack - increase /proc/sys/net/core/netdev_max_backlog\n");
722 +       } else if (ret == EP_SHUTDOWN ) {
723 +               EIP_DBG2(EIP_DBG_RMD, eip_rmd_display, rmd, "ABORTING\n");
724 +                ep_complete_receive(rxd);
725 +                eip_skb_unload(rmd);
726 +               EIP_STAT_ALLOC_SUB(&rmd->head->stats, 1);
727 +                eip_rmd_free(rmd);
728 +               return;
729 +       } else {
730 +               EP_ENVELOPE *env = ep_rxd_envelope(rxd);
731 +               EP_NMD *nmd ;
732 +               
733 +               EIP_ERR_PRINTF("RMD[%p] : RECEIVE ret = %d\n", rmd, ret);
734 +
735 +               for (len = 0 ; len < env->nFrags ; len++) {
736 +                       nmd = &env->Frags[len];
737 +                       EIP_ERR_PRINTF("RMD[%p] : ep_frag #%d nmd_addr [%x] nmd_len %d\n", rmd, len, 
738 +                                       (unsigned int) nmd->nmd_addr, nmd->nmd_len);
739 +               }
740 +               eip_rx->errors++;
741 +               EIP_ASSERT2(atomic_read(&skb_shinfo(rmd->skb)->dataref) == 1, eip_rmd_display, rmd);
742 +       }
743 +
744 +       /* data is used to store the irq flags */
745 +       spin_lock_irqsave(&eip_rx->lock, data);
746 +       rmd->chain.next = eip_rx->irq_list;
747 +       eip_rx->irq_list = rmd;
748 +       eip_rx->irq_list_nr++;
749 +       spin_unlock_irqrestore(&eip_rx->lock, data);
750 +
751 +       if ( !timer_pending (&eip_rx_tasklet_timer)      /* the timer not already set                     */
752 +            && ( (count++ % eip_rx->sysctl_granularity) /* and either we have passed up a number of them */
753 +                 || eip_rx_queues_low() ))              /* or we are low                                 */
754 +               mod_timer (&eip_rx_tasklet_timer, lbolt + 1);
755 +}
756 +
757 +/* dest ; if the buffer still reference on it mocve the rmd to the dest list */
758 +static void eip_rmd_reclaim(EIP_RMD *rmd) 
759 +{
760 +       EIP_RMD *rmd_next = rmd;
761 +       int dataref;
762 +
763 +       while (rmd_next) {
764 +               rmd = rmd_next;
765 +               rmd_next = rmd_next->chain.next;
766 +
767 +               dataref = atomic_read(&skb_shinfo(rmd->skb)->dataref);
768 +               EIP_ASSERT(dataref > 0);
769 +               
770 +               if (dataref == 1) {
771 +                       eip_rmd_requeue(rmd);
772 +               } else {
773 +                       rmd->chain.next = rmd->head->busy_list;
774 +                       rmd->head->busy_list = rmd;
775 +               }
776 +       }
777 +}
778 +static void eip_rx_tasklet(unsigned long arg)
779 +{
780 +       EIP_RMD *rmd, *rmd_next;
781 +       unsigned long flags;
782 +       short svc, queued;
783 +       int   needs_reschedule;
784 +
785 +       if (eip_rx_tasklet_locked) /* we dont want the tasklet to do anything when we are finishing */
786 +           return;
787 +
788 +       for (svc = 0; svc < EIP_SVC_NR; svc++) {
789 +               rmd = eip_rx->head[svc].busy_list;
790 +               eip_rx->head[svc].busy_list = NULL;
791 +               eip_rmd_reclaim(rmd);
792 +       }
793 +
794 +       spin_lock_irqsave(&eip_rx->lock, flags);
795 +       rmd = eip_rx->irq_list;
796 +       eip_rx->irq_list = NULL;
797 +       eip_rx->irq_list_nr = 0;
798 +       spin_unlock_irqrestore(&eip_rx->lock, flags);
799 +       
800 +       eip_rmd_reclaim(rmd);
801 +
802 +       needs_reschedule = 0;
803 +
804 +       for (svc = 0; svc < EIP_SVC_NR; svc++) {
805 +               /* the plan is : allocate some more if possible or steall some dvma space from those on the EIP_BUSY_LIST */
806 +               queued = EIP_STAT_QUEUED_GET(&eip_rx->head[svc].stats);
807 +
808 +               EIP_ASSERT(queued >= 0 && queued <= EIP_RMD_MAX_NR);    
809 +               
810 +               if (queued < EIP_RMD_ALLOC_THRESH) {
811 +                       short allocated = EIP_STAT_ALLOC_GET(&eip_rx->head[svc].stats);
812 +                       short how_many; 
813 +
814 +                       EIP_ASSERT(allocated >= 0 && allocated <= EIP_RMD_MAX_NR);
815 +                       
816 +                       if (likely(allocated < eip_rx->rmd_max_nr)) {
817 +
818 +                               how_many = (((allocated / EIP_RMD_ALLOC_STEP) + 1) * EIP_RMD_ALLOC_STEP);
819 +                               if (how_many > eip_rx->rmd_max_nr)
820 +                                       how_many = eip_rx->rmd_max_nr;
821 +
822 +                               for (; allocated < how_many &&  
823 +                                                       (eip_rmd_alloc_queue(svc, allocated * EIP_DVMA_PAGES((EIP_SVC_SMALLEST_LEN << svc)), 
824 +                                                                             GFP_ATOMIC, EP_NO_ALLOC|EP_NO_SLEEP) == 0) ; allocated++);
825 +                               if ( allocated != how_many ) {
826 +                                       eip_rx->reschedule++;
827 +                                       needs_reschedule = 1;
828 +                               }
829 +                       } else {
830 +                               /* steal how_many rmds and put them on the aside list */
831 +                               how_many = EIP_RMD_ALLOC_THRESH - queued;
832 +
833 +                               EIP_ASSERT(how_many >= 0 && how_many <= EIP_RMD_ALLOC_THRESH);
834 +                               
835 +                               rmd_next = eip_rx->head[svc].busy_list;
836 +                               eip_rx->head[svc].busy_list = NULL;
837 +
838 +                               while (how_many-- && rmd_next) {
839 +                                       rmd = rmd_next;
840 +                                       rmd_next = rmd_next->chain.next;
841 +
842 +                                       if (eip_rmd_alloc_replace(rmd, svc, GFP_ATOMIC)) {
843 +                                               rmd_next = rmd;
844 +                                               break;
845 +                                       }
846 +                               }
847 +                               eip_rx->head[svc].busy_list = rmd_next;
848 +                               if ( how_many )
849 +                                       needs_reschedule = 1;
850 +                       }
851 +               }
852 +       }
853 +       
854 +       if ( needs_reschedule && ( !timer_pending (&eip_rx_tasklet_timer)))
855 +               mod_timer (&eip_rx_tasklet_timer, lbolt + 2);
856 +}
857 +
858 +static inline void eip_tmd_init(EIP_TMD * tmd, unsigned long buff_base, EIP_TMD_HEAD * head, unsigned long buff_len,
859 +                               int dvma_idx)
860 +{
861 +       tmd->dvma_idx = dvma_idx;
862 +       tmd->dma_base = buff_base;
863 +       tmd->dma_len = -1;
864 +       tmd->skb = NULL;
865 +       tmd->head = head;
866 +       tmd->chain.next = NULL;
867 +
868 +       if (tmd->head != &eip_tx->head[EIP_TMD_STD]) {
869 +               tmd->nmd.nmd_len = buff_len;
870 +               eip_tmd_load(tmd);
871 +       } else  {
872 +               tmd->nmd.nmd_len  = -1;
873 +               tmd->nmd.nmd_addr = 0;
874 +       }
875 +}
876 +
877 +static inline EIP_TMD *eip_tmd_get(int id)
878 +{
879 +       unsigned long flags;
880 +       EIP_TMD *tmd = NULL;
881 +       spin_lock_irqsave(&eip_tx->lock, flags);
882 +       while ((tmd = eip_tx->head[id].tmd) == NULL) {
883 +               spin_unlock_irqrestore(&eip_tx->lock, flags);
884 +               if (ep_enable_txcallbacks(eip_tx->xmtr) == 0) {
885 +
886 +                       spin_lock_irqsave (&eip_tx->lock, flags);
887 +                       if (eip_tx->head[id].tmd == NULL) {
888 +                               __EIP_DBG_PRINTF("Cannot get a TMD on head %d ... stopping queue\n", id);
889 +                               
890 +                               eip_stop_queue ();
891 +                               
892 +                               spin_unlock_irqrestore (&eip_tx->lock, flags);
893 +
894 +                               return NULL;
895 +                       }
896 +                       spin_unlock_irqrestore (&eip_tx->lock, flags);
897 +               }
898 +
899 +               ep_disable_txcallbacks(eip_tx->xmtr);
900 +               spin_lock_irqsave(&eip_tx->lock, flags);
901 +       }
902 +       eip_tx->head[id].tmd = tmd->chain.next;
903 +       spin_unlock_irqrestore(&eip_tx->lock, flags);
904 +       atomic_dec(&tmd->head->stats);
905 +       return tmd;
906 +}
907 +
908 +static inline void eip_tmd_put(EIP_TMD * tmd)
909 +{
910 +       unsigned long flags;
911 +
912 +       tmd->skb = NULL;
913 +
914 +       spin_lock_irqsave(&eip_tx->lock, flags);
915 +       tmd->chain.next = tmd->head->tmd;
916 +       tmd->head->tmd = tmd;
917 +       spin_unlock_irqrestore(&eip_tx->lock, flags);
918 +       atomic_inc(&tmd->head->stats);
919 +
920 +       eip_start_queue();
921 +
922 +       EIP_DBG_PRINTF(EIP_DBG_TMD_QUEUE, "TMD [%p] : REQUEUED\n", tmd);
923 +}
924 +static inline void eip_tmd_load(EIP_TMD * tmd)
925 +{
926 +       EP_RAILMASK rmask = tx_railmask;
927 +
928 +       __eip_tmd_load(tmd, &rmask);
929 +       
930 +       EIP_DBG_PRINTF(EIP_DBG_EP_DVMA, "TMD [%p] : LOADED\n", tmd);
931 +}
932 +static inline void eip_tmd_unload(EIP_TMD * tmd)
933 +{
934 +       __eip_tmd_unload(tmd);
935 +       
936 +       EIP_DBG_PRINTF(EIP_DBG_EP_DVMA, "TMD [%p] : UNLOADED\n", tmd);
937 +}
938 +static inline void eip_tmd_free(EIP_TMD * tmd)
939 +{
940 +       eip_buff_free(tmd->dma_base, tmd->nmd.nmd_len);
941 +       
942 +       EIP_DBG_PRINTF(EIP_DBG_MEMFREE, "TMD [%p] : FREED\n", tmd);
943 +       
944 +       EIP_STAT_ALLOC_SUB(&tmd->head->stats, 1);
945 +}
946 +
947 +/* tmd on a separate block */
948 +static inline EIP_TMD *eip_tmd_alloc_queue(EIP_TMD * tmd, EIP_TMD_HEAD * head, int dvma_idx)
949 +{
950 +       eip_tmd_init(tmd, 0, head, -1, dvma_idx);
951 +
952 +       eip_tmd_put(tmd);
953 +
954 +       EIP_STAT_ALLOC_ADD(&tmd->head->stats, 1);
955 +       EIP_DBG(EIP_DBG_TMD, eip_tmd_display, tmd);
956 +       return tmd;
957 +}
958 +/* tmd on the buffer */
959 +static inline EIP_TMD *eip_tmd_alloc_queue_copybreak(EIP_TMD_HEAD * head, int dvma_idx)
960 +{
961 +       EIP_TMD *tmd;
962 +       unsigned long buff_base;
963 +
964 +       if (!(buff_base = eip_buff_alloc(tx_copybreak_max + sizeof(EIP_TMD), GFP_KERNEL)))
965 +               return NULL;
966 +
967 +       tmd = (EIP_TMD *) (buff_base + tx_copybreak_max);
968 +       eip_tmd_init(tmd, buff_base, head, tx_copybreak_max, dvma_idx);
969 +
970 +       eip_tmd_put(tmd);
971 +       EIP_STAT_ALLOC_ADD(&tmd->head->stats, 1);
972 +       EIP_DBG(EIP_DBG_TMD, eip_tmd_display, tmd);
973 +       return tmd;
974 +}
975 +
976 +/* ipf are on the buffer */
977 +static inline EIP_TMD *eip_tmd_alloc_queue_aggreg(EIP_TMD_HEAD * head, int dvma_idx)
978 +{
979 +       EIP_TMD *tmd;
980 +       unsigned long buff_base;
981 +
982 +       if (!(buff_base = eip_buff_alloc(EIP_SVC_BIGGEST_LEN, GFP_KERNEL)))
983 +               return NULL;
984 +
985 +       tmd = (EIP_TMD *) (buff_base + EIP_SVC_BIGGEST_LEN - sizeof(EIP_IPFRAG));
986 +       eip_tmd_init(tmd, buff_base, head, EIP_SVC_BIGGEST_LEN - sizeof(EIP_IPFRAG), dvma_idx);
987 +
988 +       eip_tmd_put(tmd);
989 +       EIP_STAT_ALLOC_ADD(&tmd->head->stats, 1);
990 +       EIP_DBG(EIP_DBG_TMD, eip_tmd_display, tmd);
991 +       return tmd;
992 +}
993 +
994 +static int eip_tmds_alloc()
995 +{
996 +       int i;
997 +       int page_nr;
998 +       EIP_TMD *tmd;
999 +
1000 +       page_nr = EIP_DVMA_PAGES(tx_copybreak_max);
1001 +
1002 +       eip_tx->head[EIP_TMD_COPYBREAK].handle = eip_dma_reserve(page_nr * eip_tx->tmd_max_nr, EP_PERM_READ);
1003 +       
1004 +       EIP_DBG(EIP_DBG_TMD_HEAD, eip_tmd_head_display, &eip_tx->head[EIP_TMD_COPYBREAK]);
1005 +
1006 +       for (i = 0; i < EIP_TMD_NR; i++) {
1007 +               if (!eip_tmd_alloc_queue_copybreak(&eip_tx->head[EIP_TMD_COPYBREAK], i * page_nr))
1008 +                       return -ENOMEM;
1009 +       }
1010 +
1011 +       eip_tx->head[EIP_TMD_STD].handle =
1012 +           eip_dma_reserve(EIP_DVMA_PAGES(EIP_SVC_BIGGEST_LEN) * eip_tx->tmd_max_nr, EP_PERM_READ);
1013 +       
1014 +       EIP_DBG(EIP_DBG_TMD_HEAD, eip_tmd_head_display, &eip_tx->head[EIP_TMD_STD]);
1015 +
1016 +       tmd = kmalloc(sizeof(EIP_TMD) * EIP_TMD_NR, GFP_KERNEL);
1017 +       if (!tmd) {
1018 +               EIP_ERR_PRINTF("Cannot ALLOCATE %d of tmds\n", (int) sizeof(EIP_TMD) * EIP_TMD_NR);
1019 +               return -ENOMEM;
1020 +       }
1021 +       
1022 +       page_nr = EIP_DVMA_PAGES(EIP_SVC_BIGGEST_LEN);
1023 +       
1024 +       for (i = 0; i < EIP_TMD_NR; i++, tmd++) {
1025 +               if (!eip_tmd_alloc_queue(tmd, &eip_tx->head[EIP_TMD_STD], i * page_nr))
1026 +                       return -ENOMEM;
1027 +       }
1028 +
1029 +       page_nr = EIP_DVMA_PAGES(EIP_SVC_BIGGEST_LEN);
1030 +
1031 +       eip_tx->head[EIP_TMD_AGGREG].handle = eip_dma_reserve(page_nr * eip_tx->tmd_max_nr, EP_PERM_READ);
1032 +       EIP_DBG(EIP_DBG_TMD_HEAD, eip_tmd_head_display, &eip_tx->head[EIP_TMD_AGGREG]);
1033 +
1034 +       for (i = 0; i < EIP_TMD_NR; i++) {
1035 +               if (!eip_tmd_alloc_queue_aggreg(&eip_tx->head[EIP_TMD_AGGREG], i * page_nr))
1036 +                       return -ENOMEM;
1037 +       }
1038 +       return 0;
1039 +}
1040 +
1041 +static void eip_tmds_free(void) 
1042 +{
1043 +       EIP_TMD *tmd;
1044 +       EIP_TMD *tmd_next;
1045 +       int i;
1046 +       
1047 +       ep_poll_transmits(eip_tx->xmtr);
1048 +
1049 +       for (i = 0 ; i < 3 ; i++) {
1050 +again:
1051 +               if (EIP_STAT_QUEUED_GET(&eip_rx->head[i].stats) < EIP_STAT_ALLOC_GET(&eip_rx->head[i].stats)) {
1052 +                       EIP_DBG_PRINTF(EIP_DBG_TMD, "Polling XMTR [%p]\n", eip_tx->xmtr);       
1053 +                       ep_poll_transmits(eip_tx->xmtr);
1054 +                       goto again;
1055 +               }
1056 +       }
1057 +       /* everything should be queued */
1058 +        if ((tmd = eip_tx->head[EIP_TMD_COPYBREAK].tmd)) {
1059 +            do {
1060 +                       tmd_next = tmd->chain.next;
1061 +                        eip_tmd_unload(tmd);
1062 +                       
1063 +                       EIP_DBG(EIP_DBG_TMD, eip_tmd_display, tmd);
1064 +                       
1065 +                        eip_tmd_free(tmd);
1066 +            } while (tmd_next && (tmd = tmd_next));
1067 +        }
1068 +       
1069 +       EIP_DBG_PRINTF(EIP_DBG_TMD_EP_DVMA, "HEAD[EIP_TMD_COPYBREAK] release DVMA [%p]\n",
1070 +                       eip_tx->head[EIP_TMD_COPYBREAK].handle);        
1071 +       
1072 +        ep_dvma_release(eip_tx->ep_system, eip_tx->head[EIP_TMD_COPYBREAK].handle);
1073 +       
1074 +       /* these ones have been allocated as a block */
1075 +       if ((tmd = eip_tx->head[EIP_TMD_STD].tmd)) {
1076 +               do {
1077 +                       if (tmd->dvma_idx == 0 ) {
1078 +                               kfree(tmd);
1079 +                               /* eip_tmd_free(tmd); */
1080 +                               EIP_STAT_ALLOC_SUB(&tmd->head->stats, EIP_TMD_NR);
1081 +                               tmd_next = NULL;
1082 +                               EIP_DBG_PRINTF(EIP_DBG_TMD_EP_DVMA, "TMD HEAD[%p] : [EIP_TMD_STD] BLOCK FREED\n", tmd); 
1083 +                       } else 
1084 +                               tmd_next = tmd->chain.next;
1085 +               } while (tmd_next && (tmd = tmd_next));
1086 +       }
1087 +       EIP_DBG_PRINTF(EIP_DBG_TMD_EP_DVMA, "HEAD[EIP_TMD_STD] release DVMA [%p]\n", 
1088 +                       eip_tx->head[EIP_TMD_STD].handle);      
1089 +       
1090 +        ep_dvma_release(eip_tx->ep_system, eip_tx->head[EIP_TMD_STD].handle);
1091 +       
1092 +       if ((tmd = eip_tx->head[EIP_TMD_AGGREG].tmd)) {
1093 +               do {
1094 +                       tmd_next = tmd->chain.next;
1095 +
1096 +                       EIP_DBG(EIP_DBG_TMD, eip_tmd_display, tmd);
1097 +                       
1098 +                       eip_tmd_unload(tmd);
1099 +                       eip_tmd_free(tmd);
1100 +               } while (tmd_next && (tmd = tmd_next));
1101 +       }
1102 +       EIP_DBG_PRINTF(EIP_DBG_TMD_EP_DVMA, "TMD HEAD[%p] : [EIP_TMD_AGGREG] release DVMA\n", 
1103 +                       eip_tx->head[EIP_TMD_AGGREG].handle);   
1104 +       
1105 +        ep_dvma_release(eip_tx->ep_system, eip_tx->head[EIP_TMD_AGGREG].handle);
1106 +
1107 +       ep_free_xmtr(eip_tx->xmtr);
1108 +       EIP_DBG_PRINTF(EIP_DBG_TMD, "XMTR[%p] : FREED\n", eip_tx->xmtr);
1109 +}
1110 +
1111 +static inline void eip_ipf_skb_add(EIP_IPFRAG * ipf, struct sk_buff *skb)
1112 +{
1113 +       int align = EIP_IP_ALIGN(skb->len);
1114 +       
1115 +       
1116 +       if (ipf->dma_len == -1) {       /* like a virgin; touched for the very first time */
1117 +               do_gettimeofday(&ipf->timestamp);
1118 +               /* FIXE ME put that in release tmd code */
1119 +               ipf->frag_nr            = 0;
1120 +               ipf->dma_len            = 0;
1121 +               ipf->datagram_len       = -1;
1122 +               ipf->dma_correction     = 0;
1123 +       }
1124 +       
1125 +       memcpy((void *) (ipf->dma_base + ipf->dma_len), skb->data, skb->len);
1126 +       
1127 +       if (ipf->datagram_len == -1) {
1128 +               struct iphdr * iph = skb->nh.iph;
1129 +               int offset = ntohs(iph->frag_off);
1130 +
1131 +               /* last one ? ;  offset & ~IP_OFFSET = IP fragment flags */
1132 +               if (((offset & ~IP_OFFSET) & IP_MF) == 0) {
1133 +                       offset &= IP_OFFSET;
1134 +                       offset <<= 3;    
1135 +                       ipf->datagram_len = offset + htons(iph->tot_len) - sizeof(struct iphdr);
1136 +               }
1137 +       }
1138 +
1139 +       skb->next                       = ipf->skb;
1140 +       ipf->skb                        = skb;
1141 +       ipf->payload.Data[ipf->frag_nr] = skb->len;
1142 +       ipf->dma_len                   += align;
1143 +       ipf->dma_correction            += align - skb->len  + ETH_HLEN + sizeof(struct iphdr);
1144 +       /* FIXME ; Count got wrong if ip header has options */
1145 +
1146 +       ipf->frag_nr++;
1147 +
1148 +       EIP_DBG2(EIP_DBG_TMD, eip_ipf_display, ipf, "ADDED skb[%p] len %db ALIGNED(%db)\n", skb, skb->len, EIP_IP_ALIGN(skb->len));
1149 +}
1150 +
1151 +#define eip_ipf_hasroom(ipf, skb) ((ipf->dma_len + EIP_IP_ALIGN(skb->len) < eip_tx->sysctl_ipfrag_copybreak))
1152 +int eip_hard_start_xmit(struct sk_buff *skb, struct net_device *devnet) 
1153 +{
1154 +
1155 +       EIP_TMD *tmd;
1156 +       EP_NMD nmd;
1157 +       struct iphdr *iph;
1158 +       int j;
1159 +
1160 +       if (skb->destructor){
1161 +               atomic_inc(&eip_tx->destructor);
1162 +               tasklet_schedule(&eip_tx->tasklet);
1163 +       } 
1164 +
1165 +       if (!(iph = eip_ipfrag_get(skb->data)) || (eip_tx->sysctl_aggregation == 0)) { /* not ip fragment */
1166 +no_aggreg:
1167 +               j = (skb->len < eip_tx->sysctl_copybreak) ? EIP_TMD_COPYBREAK : EIP_TMD_STD; /* j = head id */
1168 +               
1169 +               if (!(tmd = eip_tmd_get(j))) {
1170 +                       if (skb->destructor)
1171 +                               atomic_dec(&eip_tx->destructor);
1172 +                       return 1;
1173 +               }
1174 +               
1175 +               tmd->dma_len    = skb->len;
1176 +               tmd->skb        = skb;
1177 +               tmd->skb->next  = NULL;
1178 +               tmd->chain.next = NULL;
1179 +               
1180 +               if (j == EIP_TMD_COPYBREAK) {
1181 +                       memcpy((void *) tmd->dma_base, skb->data, skb->len);
1182 +                       
1183 +                       ep_nmd_subset(&nmd, &tmd->nmd, 0, skb->len);
1184 +#ifdef EIP_MORE_STATS
1185 +                       eip_tx->sent_copybreak++;
1186 +#endif
1187 +                       return eip_do_xmit(tmd, &nmd, NULL);
1188 +               }
1189 +               tmd->dma_base           = (unsigned long) skb->data;
1190 +               tmd->nmd.nmd_len        = skb->len;
1191 +               eip_tmd_load(tmd);
1192 +
1193 +#ifdef EIP_MORE_STATS
1194 +               eip_tx->sent_std++;
1195 +#endif
1196 +               return eip_do_xmit(tmd, &tmd->nmd, NULL);
1197 +       } else if ( skb->len > EIP_SVC_BIGGEST_LEN/2 ) { 
1198 +               /* don't aggregate when we have a full mtu of data */
1199 +               /* or more than 32k ; in this case it is cheaper   */
1200 +               /* to just map the buffer and send it              */
1201 +               goto no_aggreg;
1202 +       } else {
1203 +               EIP_IPFRAG *ipf = NULL;
1204 +               unsigned long flags;
1205 +               struct list_head *l;
1206 +               struct iphdr *iph2;
1207 +               int i;
1208 +               __u16 id = iph->id;
1209 +               __u32 saddr = iph->saddr;
1210 +               __u32 daddr = iph->daddr;
1211 +               __u8 protocol = iph->protocol;
1212 +
1213 +                       EIP_DBG(EIP_DBG_IPH, eip_iph_display, iph);
1214 +
1215 +               j = 0;
1216 +
1217 +               /* here we can't have full mtu size aggregated packet */
1218 +               EIP_ASSERT_RET(skb->len < eip_tx->sysctl_ipfrag_copybreak, 0);
1219 +
1220 +               spin_lock_irqsave(&eip_tx->ipfraglock, flags);
1221 +               list_for_each(l, &eip_tx->ipfrag) {
1222 +                       ipf = list_entry(l, EIP_IPFRAG, list);
1223 +                       iph2 = eip_ipfrag_get((char *) ipf->dma_base);
1224 +                       
1225 +                        EIP_ASSERT(iph2);
1226 +                       
1227 +                       if ((iph2->id == id) && 
1228 +                                       (get_unaligned(&iph2->saddr) == saddr) && 
1229 +                                       (get_unaligned(&iph2->daddr) == daddr) && 
1230 +                                       (iph2->protocol == protocol)) {
1231 +                               /* || timeout */
1232 +                               if (eip_ipf_hasroom(ipf, skb)) {
1233 +                                       
1234 +                                       eip_ipf_skb_add(ipf, skb);
1235 +                                       
1236 +                                       if ((ipf->datagram_len != -1) && 
1237 +                                                       (ipf->dma_len == (ipf->datagram_len + ipf->dma_correction) || 
1238 +                                                        ipf->frag_nr == (128 / sizeof(uint32_t)))) {
1239 +send_aggreg:
1240 +                                               ipf->payload.Data[ipf->frag_nr] = 0;
1241 +                                               list_del(&ipf->list);
1242 +                                               eip_tx->ipfrag_count--;
1243 +                                               spin_unlock_irqrestore(&eip_tx->ipfraglock, flags);
1244 +                                       
1245 +                                               ep_nmd_subset(&nmd, &ipf->nmd, 0, ipf->dma_len);
1246 +                                               
1247 +#ifdef EIP_MORE_STATS
1248 +                                               eip_tx->sent_aggreg++;
1249 +#endif
1250 +                                               if ((i = eip_do_xmit((EIP_TMD *) ipf, &nmd, &ipf->payload)) != EP_SUCCESS)
1251 +                                                       return i;
1252 +                                               if (j)
1253 +                                                       goto new;
1254 +                                               return 0;
1255 +                                       }
1256 +                                       
1257 +                                       spin_unlock_irqrestore(&eip_tx->ipfraglock, flags);
1258 +                                       tasklet_schedule(&eip_tx->tasklet);
1259 +                                       return 0;
1260 +                               } else {
1261 +                                       EIP_DBG_PRINTF(EIP_DBG_TMD, "IPF[%p] : FULL %db full - sending it\n", ipf, ipf->dma_len);
1262 +                                       j = 1;
1263 +                                       goto send_aggreg;
1264 +                               }
1265 +                       }
1266 +               }
1267 +               spin_unlock_irqrestore(&eip_tx->ipfraglock, flags);
1268 +new:
1269 +               if (!(ipf = (EIP_IPFRAG *) eip_tmd_get(EIP_TMD_AGGREG)))
1270 +                       goto no_aggreg;
1271 +
1272 +               eip_ipf_skb_add(ipf, skb);
1273 +               
1274 +               spin_lock_irqsave(&eip_tx->ipfraglock, flags);
1275 +               list_add_tail(&ipf->list, &eip_tx->ipfrag);
1276 +               eip_tx->ipfrag_count++;
1277 +               spin_unlock_irqrestore(&eip_tx->ipfraglock, flags);
1278 +               tasklet_schedule(&eip_tx->tasklet);
1279 +       }
1280 +       return 0;
1281 +}
1282 +static int eip_do_xmit(EIP_TMD * tmd, EP_NMD *nmd, EP_PAYLOAD *payload)
1283 +{
1284 +       EIP_HEADER *eiph = (EIP_HEADER *) tmd->dma_base;
1285 +       int         attr = EP_SET_DATA((EP_NO_SLEEP | EP_NO_INTERRUPT | EP_NO_FAILOVER), EP_TYPE_SVC_INDICATOR, EP_SVC_EIP);
1286 +       unsigned long flags;
1287 +       int svc, rnum;
1288 +
1289 +       SIZE_TO_SVC(nmd->nmd_len, svc);
1290 +
1291 +       EIP_DBG(EIP_DBG_TMD, eip_tmd_display, tmd);
1292 +       /* EIP_DBG(eip_eiph_display(eiph)); */
1293 +       
1294 +       if (unlikely (eiph->h_dhost.ip_bcast))
1295 +               rnum = ep_pickRail (EP_NMD_RAILMASK (nmd) & tx_railmask & ep_xmtr_availrails(eip_tx->xmtr));
1296 +       else
1297 +               rnum = ep_pickRail (EP_NMD_RAILMASK (nmd) & tx_railmask & ep_xmtr_noderails(eip_tx->xmtr, ntohs(eiph->h_dhost.ip_addr)));
1298 +
1299 +       if (rnum >= 0)
1300 +               attr = EP_SET_PREFRAIL(attr, rnum);
1301 +
1302 +       /* add to inuse list  */
1303 +       spin_lock_irqsave (&eip_tx->lock, flags);
1304 +       list_add_tail (&tmd->chain.link, &eip_tx->inuse);
1305 +       spin_unlock_irqrestore (&eip_tx->lock, flags);
1306 +
1307 +       /* ENOMEM EINVAL ECONNREFUSED ESUCCESS */
1308 +       svc = (unlikely(eiph->h_dhost.ip_bcast)) ? 
1309 +               ep_multicast_message(eip_tx->xmtr, -1, -1, NULL, EIP_SVC_EP(svc), attr | EP_NOT_MYSELF, eip_txhandler, tmd, payload, nmd, 1) :
1310 +
1311 +               ep_transmit_message(eip_tx->xmtr, ntohs(eiph->h_dhost.ip_addr), EIP_SVC_EP(svc),  attr, eip_txhandler, tmd, payload, nmd, 1);
1312 +               
1313 +       if (likely(svc == EP_SUCCESS))
1314 +               return 0;
1315 +       else if (svc == ENOMEM) {
1316 +               EIP_ERR_PRINTF("%s", "Memory allocation error ...\n");
1317 +               eip_tx->errors++;
1318 +       }
1319 +       else
1320 +       {
1321 +               /* EP_EINVAL occurs when the svc has a bad value or the iovec has too many frag; */
1322 +               /* we don't use the latter option here                                        */
1323 +               __EIP_DBG_PRINTF("TMD [%p] : DROPPED skb[%p] status = %d from ep_?_message\n", tmd, tmd->skb, svc);
1324 +
1325 +               eip_tx->dropped++;
1326 +       }
1327 +
1328 +       eip_txhandler(NULL, tmd, -99);
1329 +
1330 +       /* Quadrics GNAT sw-elan/4397 - since we will "never" be able to send this packet to the */
1331 +       /* destination node, we drop it and feign success - this has the same behaviour as an    */
1332 +       /* ethernet where it sticks the packet on the wire, but no-one receives it.              */
1333 +       return 0;
1334 +}
1335 +
1336 +static void eip_txhandler(EP_TXD * txd, void *arg, EP_STATUS status)
1337 +{
1338 +       EIP_TMD *tmd = (EIP_TMD *) arg;
1339 +       struct sk_buff *skb_next;
1340 +       unsigned long flags;
1341 +       int svc = 0;
1342 +       
1343 +       if (likely(status == EP_SUCCESS)) {
1344 +               SIZE_TO_SVC(tmd->dma_len, svc);
1345 +               eip_tx->dma[svc]++;
1346 +               eip_tx->bytes += tmd->dma_len;
1347 +               
1348 +               if (tmd->head == &eip_tx->head[EIP_TMD_AGGREG]) {
1349 +                       EIP_IPFRAG *ipf = (EIP_IPFRAG *) tmd;
1350 +                       eip_tx->packets += ipf->frag_nr;
1351 +               } else
1352 +                       eip_tx->packets++;
1353 +       } else {
1354 +               if (tmd->head == &eip_tx->head[EIP_TMD_AGGREG]) {
1355 +                       EIP_IPFRAG *ipf = (EIP_IPFRAG *) tmd;
1356 +                       eip_tx->dropped += ipf->frag_nr;
1357 +                       EIP_DBG_PRINTF(EIP_DBG_TMD, "txhandler aggreg packet dropped status = %d\n", status);
1358 +               } else  {
1359 +                       eip_tx->dropped++;
1360 +                       EIP_DBG_PRINTF(EIP_DBG_TMD, "txhandler packet dropped status = %d\n", status);
1361 +               }
1362 +       }
1363 +
1364 +       if (tmd->head == &eip_tx->head[EIP_TMD_STD]) {
1365 +               eip_tmd_unload(tmd);
1366 +               tmd->dma_base = 0;
1367 +               tmd->nmd.nmd_len = -1;
1368 +       }
1369 +               
1370 +       tmd->dma_len = -1;
1371 +       
1372 +       svc = 0;
1373 +       while (tmd->skb) {
1374 +               svc++;
1375 +               
1376 +               if (tmd->skb->destructor)
1377 +                       atomic_dec(&eip_tx->destructor);
1378 +
1379 +               skb_next = tmd->skb->next;
1380 +               dev_kfree_skb_any(tmd->skb);
1381 +               tmd->skb = skb_next;
1382 +       }
1383 +       EIP_DBG_PRINTF(EIP_DBG_TMD, "IPF/TMD [%p] : %d skb RELEASE/FREED\n", tmd, svc);
1384 +
1385 +       /* remove from inuse list  */
1386 +       spin_lock_irqsave (&eip_tx->lock, flags);
1387 +       list_del (&tmd->chain.link);
1388 +       spin_unlock_irqrestore (&eip_tx->lock, flags);
1389 +
1390 +       eip_tmd_put(tmd);
1391 +}
1392 +
1393 +static void eip_tx_tasklet(unsigned long arg)
1394 +{
1395 +       struct timeval now;
1396 +       unsigned long flags;
1397 +       EIP_IPFRAG *ipf, *ipfq = NULL;
1398 +       EP_NMD nmd;
1399 +       struct list_head *list;
1400 +       struct list_head *tmp;
1401 +       char resched = 0;
1402 +       char poll = 1;
1403 +       
1404 +       do_gettimeofday(&now);
1405 +       
1406 +       spin_lock_irqsave(&eip_tx->ipfraglock, flags);
1407 +       if (eip_tx->ipfrag_count) {
1408 +               list_for_each_safe(list, tmp, &eip_tx->ipfrag) {
1409 +                       ipf = list_entry(list, EIP_IPFRAG, list);
1410 +                       /* delta = (((now.tv_sec - ipf->timestamp.tv_sec) * 1000000UL) + now.tv_usec) - ipf->timestamp.tv_usec; */
1411 +                       if (((((now.tv_sec - ipf->timestamp.tv_sec) * 1000000UL) + now.tv_usec) - 
1412 +                                       ipf->timestamp.tv_usec) >= (1000UL * eip_tx->sysctl_ipfrag_to)) {
1413 +                               list_del(&ipf->list);
1414 +                               eip_tx->ipfrag_count--;
1415 +                               ipf->chain.next = (EIP_TMD *) ipfq;
1416 +                               ipfq = ipf;
1417 +                       }
1418 +               }
1419 +       }
1420 +       if (eip_tx->ipfrag_count)
1421 +               resched = 1;
1422 +       spin_unlock_irqrestore(&eip_tx->ipfraglock, flags);
1423 +
1424 +       while (ipfq) {
1425 +               poll = 0;
1426 +
1427 +               ep_nmd_subset(&nmd, &ipfq->nmd, 0, ipfq->dma_len);
1428 +               
1429 +               ipfq->payload.Data[ipfq->frag_nr] = 0;
1430 +               
1431 +#ifdef EIP_MORE_STATS
1432 +               eip_tx->sent_aggreg++;
1433 +#endif
1434 +               ipf = (EIP_IPFRAG *) ipfq->chain.next;
1435 +               eip_do_xmit((EIP_TMD *) ipfq, &nmd, &ipfq->payload);
1436 +               ipfq = ipf;
1437 +       }
1438 +       
1439 +       if (poll)
1440 +               ep_poll_transmits(eip_tx->xmtr);
1441 +
1442 +       if (atomic_read(&eip_tx->destructor) || resched )
1443 +               tasklet_schedule(&eip_tx->tasklet);
1444 +}
1445 +void eip_start_queue()
1446 +{
1447 +       if (netif_queue_stopped(eip_tx->net_device)) {
1448 +               EIP_DBG_PRINTK(EIP_DBG_GEN, "Waking up %s queue\n", eip_tx->net_device->name);
1449 +               netif_wake_queue(eip_tx->net_device);
1450 +       }
1451 +}
1452 +void eip_stop_queue()
1453 +{
1454 +       EIP_DBG_PRINTK(EIP_DBG_GEN, "Stopping %s queue\n", eip_tx->net_device->name);
1455 +       netif_stop_queue(eip_tx->net_device);
1456 +}
1457 +
1458 +static int eip_open(struct net_device *devnet)
1459 +{
1460 +       if (devnet->flags & IFF_PROMISC)
1461 +               EIP_DBG_PRINTK(EIP_DBG_GEN, "%s entering in promiscuous mode\n", devnet->name);
1462 +
1463 +       netif_start_queue(devnet);
1464 +       EIP_DBG_PRINTK(EIP_DBG_GEN, "iface %s MAC %02x:%02x:%02x:%02x:%02x:%02x up\n",
1465 +                       devnet->name, (devnet->dev_addr[0]) & 0xff,
1466 +                       (devnet->dev_addr[1]) & 0xff, (devnet->dev_addr[2]) & 0xff, (devnet->dev_addr[3]) & 0xff,
1467 +                       (devnet->dev_addr[4]) & 0xff, (devnet->dev_addr[5]) & 0xff);
1468 +       return 0;
1469 +}
1470 +
1471 +static int eip_close(struct net_device *devnet)
1472 +{
1473 +       if (devnet->flags & IFF_PROMISC)
1474 +               EIP_DBG_PRINTK(EIP_DBG_GEN, "%s leaving promiscuous mode\n", devnet->name);
1475 +
1476 +       netif_stop_queue(devnet);
1477 +
1478 +       eip_rx_tasklet(0);
1479 +
1480 +       EIP_DBG_PRINTK(EIP_DBG_GEN, "iface %s MAC %02x:%02x:%02x:%02x:%02x:%02x down\n", 
1481 +               devnet->name, (devnet->dev_addr[0]) & 0xff,
1482 +               (devnet->dev_addr[1]) & 0xff, (devnet->dev_addr[2]) & 0xff, (devnet->dev_addr[3]) & 0xff,
1483 +               (devnet->dev_addr[4]) & 0xff, (devnet->dev_addr[5]) & 0xff);
1484 +       return 0;
1485 +}
1486 +
1487 +static struct net_device_stats *eip_get_stats(struct net_device *devnet)
1488 +{
1489 +       static struct net_device_stats stats;
1490 +
1491 +       stats.rx_packets = eip_rx->packets;
1492 +       stats.rx_bytes = eip_rx->bytes;
1493 +       stats.rx_errors = eip_rx->errors;
1494 +       stats.rx_dropped = eip_rx->dropped;
1495 +
1496 +       stats.tx_packets = eip_tx->packets;
1497 +       stats.tx_bytes = eip_tx->bytes;
1498 +       stats.tx_errors = eip_tx->errors;
1499 +       stats.tx_dropped = eip_tx->dropped;
1500 +       return &stats;
1501 +}
1502 +
1503 +static int eip_change_mtu(struct net_device *devnet, int mtu)
1504 +{
1505 +       if (mtu <= EIP_MTU_MAX) {
1506 +               EIP_DBG_PRINTK(EIP_DBG_GEN, "MTU size changed from %d to %d\n", devnet->mtu, mtu);
1507 +               devnet->mtu = mtu;
1508 +       }
1509 +       return 0;
1510 +}
1511 +
1512 +#ifdef MODULE
1513 +int eip_init(void)
1514 +{
1515 +       struct net_device *devnet;
1516 +       int errno = 0;
1517 +
1518 +       eip_rx_dropping = 0; 
1519 +       eip_rx_tasklet_locked = 1;
1520 +
1521 +       /* timer up but not started */
1522 +       init_timer (&eip_rx_tasklet_timer);
1523 +       eip_rx_tasklet_timer.function = eip_rx_tasklet;
1524 +       eip_rx_tasklet_timer.data     = (unsigned long) 0;
1525 +       eip_rx_tasklet_timer.expires  = lbolt + hz;
1526 +
1527 +       devnet = alloc_etherdev(sizeof(EIP_RX) + sizeof(EIP_TX));
1528 +       if (!devnet) {
1529 +               EIP_ERR_PRINTF("Unable to ALLOCATE etherdev structure\n");
1530 +               return -ENOMEM;
1531 +       }
1532 +       strcpy (devnet->name, "eip0");
1533 +
1534 +       EIP_DBG_PRINTK(EIP_DBG_GEN, "Enabling aggregation code\n");
1535 +       devnet->change_mtu = eip_change_mtu;
1536 +       devnet->mtu = EIP_MTU_MAX;
1537 +       devnet->open = eip_open;
1538 +       devnet->stop = eip_close;
1539 +       devnet->hard_start_xmit = eip_hard_start_xmit;
1540 +       devnet->get_stats = eip_get_stats;
1541 +
1542 +        /* devnet->features |= (NETIF_F_DYNALLOC); */
1543 +        /* devnet->features = (NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_HIGHDMA); */
1544 +        /* devnet->features |= (NETIF_F_SG|NETIF_F_FRAGLIST|NETIF_F_HIGHDMA|NETIF_F_HW_CSUM); */
1545 +
1546 +       eip_rx = (EIP_RX *) devnet->priv;
1547 +       eip_tx = (EIP_TX *) (eip_rx + 1);
1548 +
1549 +       /* instance 0 */
1550 +       eip_tx->ep_system = ep_system();
1551 +       if (eip_tx->ep_system == NULL) {
1552 +               EIP_ERR_PRINTF("kernel comms for iface %s does not exist\n", devnet->name);
1553 +               errno = -ENXIO;
1554 +               goto out;
1555 +       }
1556 +       if (ep_waitfor_nodeid(eip_tx->ep_system) == ELAN_INVALID_NODE) {
1557 +               EIP_ERR_PRINTF("network position not found\n");
1558 +               errno = -EAGAIN;
1559 +               goto out;
1560 +       }
1561 +       eip_tx->xmtr = ep_alloc_xmtr(eip_tx->ep_system);
1562 +       if (!eip_tx->xmtr) {
1563 +               EIP_ERR_PRINTF("Cannot create allocated transmitter - maybe cable is disconnected\n");
1564 +               errno = -EAGAIN;
1565 +               goto out;
1566 +       }
1567 +       /* assign MAC address */
1568 +       *((int *) &devnet->dev_addr[4]) = htons(ep_nodeid(eip_tx->ep_system));
1569 +       eip_rx->net_device = devnet;
1570 +       eip_tx->net_device = devnet;
1571 +
1572 +       atomic_set(&eip_tx->destructor, 0);
1573 +
1574 +       if ((tmd_max >= EIP_TMD_MIN_NR) && (tmd_max <= EIP_TMD_MAX_NR)) {
1575 +               EIP_DBG_PRINTF(EIP_DBG_GEN, "Setting tmd_max_nr to %d\n", tmd_max);
1576 +               eip_tx->tmd_max_nr = tmd_max;
1577 +       } else {
1578 +               EIP_ERR_PRINTF("parameter error : %d <= tmd_max(%d) <= %d using default %d\n", 
1579 +                               EIP_TMD_MIN_NR, tmd_max, EIP_TMD_MAX_NR, EIP_TMD_MAX_NR);
1580 +               eip_tx->tmd_max_nr = EIP_TMD_MAX_NR;
1581 +       }
1582 +
1583 +       if ((rmd_max >= EIP_RMD_MIN_NR) && (rmd_max <= EIP_RMD_MAX_NR)) {
1584 +               EIP_DBG_PRINTF(EIP_DBG_GEN, "Setting rmd_max_nr to %d\n", rmd_max);
1585 +               eip_rx->rmd_max_nr = rmd_max;
1586 +       } else {
1587 +               EIP_ERR_PRINTF("parameter error : %d <= rmd_max(%d) <= %d using default %d\n", EIP_RMD_MIN_NR,
1588 +                          rmd_max, EIP_RMD_MAX_NR, EIP_RMD_MAX_NR);
1589 +               eip_rx->rmd_max_nr = EIP_RMD_MAX_NR;
1590 +       }
1591 +
1592 +       if ((rx_envelope_nr > 0) && (rx_envelope_nr <= 1024)) { /* > 1024 don't be silly */
1593 +               EIP_DBG_PRINTK(EIP_DBG_GEN, "Setting rx_envelope_nr to %d\n", rx_envelope_nr);
1594 +       } else {
1595 +               EIP_ERR_PRINTF("parameter error : 0 < rx_envelope_nr(%d) <= 1024 using default %d\n",
1596 +                          rx_envelope_nr, EIP_RX_ENVELOPE_NR);
1597 +               rx_envelope_nr = EIP_RX_ENVELOPE_NR;
1598 +       }
1599 +
1600 +       if (tx_copybreak_max <= EIP_TX_COPYBREAK_MAX) {
1601 +               EIP_DBG_PRINTF(EIP_DBG_GEN, "Setting tx_copybreak_max to %d\n", tx_copybreak_max);
1602 +       } else {
1603 +               EIP_ERR_PRINTF("parameter error : tx_copybreak_max > %d using default %d\n",
1604 +                          EIP_TX_COPYBREAK_MAX, EIP_TX_COPYBREAK_MAX);
1605 +               tx_copybreak_max = EIP_TX_COPYBREAK_MAX;
1606 +       }
1607 +#ifdef EIP_MORE_STATS
1608 +       eip_tx->sent_copybreak = 0;
1609 +       eip_tx->sent_std = 0;
1610 +       eip_tx->sent_aggreg = 0;
1611 +#endif
1612 +
1613 +       eip_tx->ipfrag_count = 0;
1614 +       eip_aggregation_set(1);
1615 +       eip_rx_granularity_set(rx_granularity);
1616 +       eip_tx_copybreak_set(EIP_TX_COPYBREAK);
1617 +       eip_ipfrag_to_set(EIP_IPFRAG_TO);
1618 +       eip_ipfrag_copybreak_set(EIP_IPFRAG_COPYBREAK);
1619 +
1620 +       spin_lock_init(&eip_tx->lock);
1621 +       spin_lock_init(&eip_tx->ipfraglock);
1622 +       spin_lock_init(&eip_rx->lock);
1623 +       tasklet_init(&eip_rx->tasklet, eip_rx_tasklet, 0);
1624 +       tasklet_init(&eip_tx->tasklet, eip_tx_tasklet, 0);
1625 +       INIT_LIST_HEAD(&eip_tx->ipfrag);
1626 +       INIT_LIST_HEAD(&eip_tx->inuse);
1627 +
1628 +       /* if we fail here cannot do much yet; waiting for rcvr remove code in ep. */
1629 +       errno = eip_tmds_alloc();
1630 +       if (errno)
1631 +               goto out;
1632 +
1633 +       errno = eip_rmds_alloc();
1634 +       if (errno)
1635 +               goto out;
1636 +
1637 +       errno = eip_stats_init();
1638 +       if (errno)
1639 +               goto out;
1640 +
1641 +       if (ep_svc_indicator_set(eip_tx->ep_system, EP_SVC_EIP) != EP_SUCCESS) {
1642 +               EIP_ERR_PRINTF("Cannot set the service indicator\n");
1643 +               errno = -EINVAL;
1644 +               goto out;
1645 +       }
1646 +
1647 +       eip_rx_tasklet_locked = 0;
1648 +       tasklet_schedule(&eip_rx->tasklet);
1649 +
1650 +       SET_MODULE_OWNER(eip_tx->net_device);
1651 +
1652 +       if (register_netdev(devnet)) {
1653 +               printk("eip: failed to register netdev\n");
1654 +               goto out;
1655 +       }
1656 +
1657 +       EIP_DBG_PRINTK(EIP_DBG_GEN, "iface %s MAC %02x:%02x:%02x:%02x:%02x:%02x ready\n", 
1658 +               devnet->name, (devnet->dev_addr[0]) & 0xff,
1659 +               (devnet->dev_addr[1]) & 0xff, (devnet->dev_addr[2]) & 0xff, (devnet->dev_addr[3]) & 0xff,
1660 +               (devnet->dev_addr[4]) & 0xff, (devnet->dev_addr[5]) & 0xff);
1661 +
1662 +       return 0;
1663 +      out:
1664 +       unregister_netdev(devnet);
1665 +#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 5, 25)
1666 +       kfree(devnet);
1667 +#else
1668 +       free_netdev(devnet);
1669 +#endif
1670 +
1671 +       return errno;
1672 +}
1673 +void eip_exit(void)
1674 +{
1675 +       int i;
1676 +
1677 +       eip_rx_dropping = 1;                /* means that new messages wont be sent to tcp stack */
1678 +       eip_rx_tasklet_locked = 1;
1679 +
1680 +       netif_stop_queue(eip_tx->net_device);
1681 +
1682 +       if (ep_svc_indicator_clear(eip_tx->ep_system, EP_SVC_EIP) != EP_SUCCESS) {
1683 +               EIP_ERR_PRINTF("Cannot unset the service indicator\n");
1684 +       }
1685 +
1686 +       schedule_timeout(10);
1687 +       
1688 +       del_timer_sync (&eip_rx_tasklet_timer);
1689 +
1690 +       tasklet_disable(&eip_rx->tasklet);
1691 +       tasklet_disable(&eip_tx->tasklet);
1692 +
1693 +       tasklet_kill(&eip_tx->tasklet);
1694 +       tasklet_kill(&eip_rx->tasklet);
1695 +
1696 +        eip_rmds_free();
1697 +        eip_tmds_free();
1698 +
1699 +       /* that things freed */
1700 +       for (i = 0 ; i < EIP_SVC_NR ; i++) {
1701 +               if ( EIP_STAT_ALLOC_GET(&eip_rx->head[i].stats) != 0 )
1702 +                       EIP_ERR_PRINTF("%d RMDs not FREED on SVC[%d]\n", EIP_STAT_ALLOC_GET(&eip_rx->head[i].stats), i);
1703 +       }
1704 +       for (i = 0 ; i < 3 ; i++) {
1705 +               if ( EIP_STAT_ALLOC_GET(&eip_tx->head[i].stats) != 0 )
1706 +                       EIP_ERR_PRINTF("%d TMDs not freed on TX HEAD[%d]\n", EIP_STAT_ALLOC_GET(&eip_tx->head[i].stats), i);
1707 +               
1708 +       }
1709 +       unregister_netdev(eip_tx->net_device);
1710 +       kfree(eip_tx->net_device);
1711 +       
1712 +       eip_stats_cleanup();
1713 +}
1714 +
1715 +module_init(eip_init);
1716 +module_exit(eip_exit);
1717 +
1718 +MODULE_PARM(eipdebug, "i");
1719 +MODULE_PARM_DESC(eipdebug, "Set debug flags");
1720 +
1721 +MODULE_PARM(rx_envelope_nr, "i");
1722 +MODULE_PARM_DESC(rx_enveloppe_nr, "Number of allocated enveloppe on the rx side");
1723 +
1724 +MODULE_PARM(tx_copybreak_max, "i");
1725 +MODULE_PARM_DESC(tx_copybreak_max, "Maximum size of the tx copybreak limit (default 512)");
1726 +
1727 +MODULE_PARM(tmd_max, "i");
1728 +MODULE_PARM(rmd_max, "i");
1729 +MODULE_PARM_DESC(tmd_max, "Maximun number of transmit buffers (default 64)");
1730 +MODULE_PARM_DESC(rmd_max, "Maximun number of receive buffers (default 64)");
1731 +
1732 +MODULE_PARM(tx_railmask, "i");
1733 +MODULE_PARM_DESC(tx_railmask, "Mask of which rails transmits can be queued on");
1734 +
1735 +MODULE_AUTHOR("Quadrics Ltd.");
1736 +MODULE_DESCRIPTION("Elan IP driver");
1737 +MODULE_LICENSE("GPL");
1738 +#endif /* MODULE */
1739 +
1740 +/*
1741 + * Local variables:
1742 + * c-file-style: "linux"
1743 + * End:
1744 + */
1745 Index: linux-2.4.21/drivers/net/qsnet/eip/eip_linux.h
1746 ===================================================================
1747 --- linux-2.4.21.orig/drivers/net/qsnet/eip/eip_linux.h 2004-02-23 16:02:56.000000000 -0500
1748 +++ linux-2.4.21/drivers/net/qsnet/eip/eip_linux.h      2005-06-01 23:12:54.554445944 -0400
1749 @@ -0,0 +1,399 @@
1750 +/*
1751 + *    Copyright (c) 2003 by Quadrics Ltd.
1752 + * 
1753 + *    For licensing information please see the supplied COPYING file
1754 + *
1755 + */
1756 +
1757 +#ident "$Id: eip_linux.h,v 1.46.2.1 2004/10/01 10:49:38 mike Exp $"
1758 +
1759 +#ifndef __EIP_LINUX_H
1760 +#define __EIP_LINUX_H
1761 +
1762 +#define EIP_WATERMARK                  (0xfab1e)
1763 +
1764 +#define EIP_PAGES(s)                   (((s - 1) >> PAGE_SHIFT) + 1)
1765 +#define EIP_DVMA_PAGES(s)              ((s < PAGE_SIZE) ? EIP_PAGES(s) + 1 : EIP_PAGES(s))
1766 +
1767 +#define EIP_SVC_SMALLEST_LEN           (1 << 9)        /* 512 */
1768 +#define EIP_SVC_BIGGEST_LEN            (1 << 16)       /* 64k */
1769 +
1770 +#define EIP_SVC_SMALLEST               (0)
1771 +#define EIP_SVC_BIGGEST                        (7)
1772 +
1773 +#define EIP_SVC_NR                     (8)
1774 +#define EIP_SVC_EP(s)                  (s + EP_MSG_SVC_EIP512)
1775 +
1776 +#define EIP_STAT_ALLOC_SHIFT           (8)
1777 +#define EIP_STAT_ALLOC_GET(atomicp)    ((int) atomic_read(atomicp) >> EIP_STAT_ALLOC_SHIFT)
1778 +#define EIP_STAT_ALLOC_ADD(atomicp, v) (atomic_add((v << EIP_STAT_ALLOC_SHIFT), atomicp))
1779 +#define EIP_STAT_ALLOC_SUB(atomicp, v) (atomic_sub((v << EIP_STAT_ALLOC_SHIFT), atomicp))
1780 +
1781 +#define EIP_STAT_QUEUED_MASK           (0xff)
1782 +#define EIP_STAT_QUEUED_GET(atomicp)   ((int) atomic_read(atomicp) & EIP_STAT_QUEUED_MASK)
1783 +
1784 +#define EIP_RMD_NR                     (8)
1785 +#define EIP_RMD_MIN_NR                 (8)
1786 +#define EIP_RMD_MAX_NR                 (64)    /* should be < than (1 << EIP_STAT_ALLOC_SHIFT) */
1787 +
1788 +#define EIP_RMD_ALLOC_STEP             (8)
1789 +#define EIP_RMD_ALLOC_THRESH           (16)
1790 +
1791 +#define EIP_RMD_ALLOC                  (1)
1792 +#define EIP_RMD_REPLACE                        (0)
1793 +
1794 +#define EIP_TMD_NR                     (64)
1795 +#define EIP_TMD_MIN_NR                 (16)
1796 +#define EIP_TMD_MAX_NR                 (64)    /* should be < than (1 << EIP_STAT_ALLOC_SHIFT) */
1797 +
1798 +#define EIP_TMD_TYPE_NR                        (3)
1799 +#define EIP_TMD_COPYBREAK              (0x0)
1800 +#define EIP_TMD_STD                    (0x1)
1801 +#define EIP_TMD_AGGREG                 (0x2)
1802 +
1803 +#define EIP_TX_COPYBREAK               (512)
1804 +#define EIP_TX_COPYBREAK_MAX           (1024)
1805 +
1806 +#define EIP_IPFRAG_TO                  (50)    /* time out before a frag is sent in msec */
1807 +#define EIP_IPFRAG_COPYBREAK           (EIP_SVC_BIGGEST_LEN - sizeof(EIP_IPFRAG) - EIP_HEADER_PAD)
1808 +
1809 +#define EIP_RX_ENVELOPE_NR             ((EIP_RMD_MAX_NR*EIP_SVC_NR)/2)
1810 +#define EIP_RX_GRANULARITY             (1)
1811 +
1812 +#define EIP_IP_ALIGN(X)                        (((X) + (15)) & ~(15))
1813 +#define EIP_EXTRA                      roundup (sizeof(EIP_RMD), 256)
1814 +#define EIP_RCV_DMA_LEN(s)                     (s - EIP_EXTRA - EIP_HEADER_PAD)
1815 +#define EIP_MTU_MAX                    (EIP_RCV_DMA_LEN(EIP_SVC_BIGGEST_LEN) - (ETH_HLEN))
1816 +
1817 +#define SIZE_TO_SVC(s, svc)                                                                    \
1818 +       do {                                                                                    \
1819 +                                       if (s <= EIP_RCV_DMA_LEN((1 << 9)))  {svc = 0;break;}   \
1820 +                                       if (s <= EIP_RCV_DMA_LEN((1 << 10))) {svc = 1;break;}   \
1821 +                                       if (s <= EIP_RCV_DMA_LEN((1 << 11))) {svc = 2;break;}   \
1822 +                                       if (s <= EIP_RCV_DMA_LEN((1 << 12))) {svc = 3;break;}   \
1823 +                                       if (s <= EIP_RCV_DMA_LEN((1 << 13))) {svc = 4;break;}   \
1824 +                                       if (s <= EIP_RCV_DMA_LEN((1 << 14))) {svc = 5;break;}   \
1825 +                                       if (s <= EIP_RCV_DMA_LEN((1 << 15))) {svc = 6;break;}   \
1826 +                                       if (s <= EIP_RCV_DMA_LEN((1 << 16))) {svc = 7;break;}   \
1827 +                                       svc = -666;                                             \
1828 +                                       EIP_ASSERT(1 == 0);                                     \
1829 +       } while (0)
1830 +
1831 +extern int eipdebug;
1832 +#define EIP_ASSERT_ON 
1833 +/* #define NO_DEBUG */
1834 +
1835 +
1836 +/* ######################## */
1837 +#ifdef NO_DEBUG
1838 +#define __EIP_DBG_PRINTF(fmt, args...)
1839 +#define EIP_DBG_PRINTF(flag, fmt, args...)
1840 +#else
1841 +
1842 +#define EIP_DBG_RMD            0x1
1843 +#define EIP_DBG_TMD            0x2
1844 +#define EIP_DBG_RMD_HEAD       0x4
1845 +#define EIP_DBG_TMD_HEAD       0x8
1846 +#define EIP_DBG_EIPH           0x10
1847 +#define EIP_DBG_IPH            0x20
1848 +#define EIP_DBG_RMD_EP_DVMA    0x40
1849 +#define EIP_DBG_TMD_EP_DVMA    0x80
1850 +#define EIP_DBG_EP_DVMA                (EIP_DBG_RMD_EP_DVMA|EIP_DBG_TMD_EP_DVMA)
1851 +#define EIP_DBG_MEMALLOC       0x100
1852 +#define EIP_DBG_MEMFREE                0x200
1853 +#define EIP_DBG_RMD_QUEUE      0x400
1854 +#define EIP_DBG_TMD_QUEUE      0x800
1855 +#define EIP_DBG_GEN            0x1000
1856 +#define EIP_DBG_DEBUG          0x2000
1857 +       
1858 +#define __EIP_DBG_PRINTF(fmt, args...) (qsnet_debugf (QSNET_DEBUG_BUFFER, " CPU #%d %s: " fmt, smp_processor_id(), __func__, ## args))
1859 +#define EIP_DBG_PRINTF(flag, fmt, args...) (unlikely(eipdebug & flag) ? __EIP_DBG_PRINTF(fmt, ## args):(void)0)
1860 +
1861 +#define __EIP_DBG_PRINTK(fmt, args...) (qsnet_debugf (QSNET_DEBUG_BUF_CON, " CPU #%d %s: " fmt, smp_processor_id(), __func__, ## args))
1862 +#define EIP_DBG_PRINTK(flag, fmt, args...) (unlikely(eipdebug & flag) ? __EIP_DBG_PRINTF(fmt, ## args):(void)0)
1863 +           
1864 +#define EIP_ERR_PRINTF(fmt, args...)   __EIP_DBG_PRINTK("!!! ERROR !!! - " fmt, ## args)
1865 +
1866 +       
1867 +#define EIP_DBG2(flag, fn, fn_arg, fmt, args...)                                                               \
1868 +    if (unlikely(eipdebug & flag)) {                                                                           \
1869 +           qsnet_debugf (QSNET_DEBUG_BUFFER, "+CPU #%d %s: " fmt, smp_processor_id(), __func__, ##args);       \
1870 +            (void)(fn)(fn_arg);                                                                                \
1871 +           qsnet_debugf (QSNET_DEBUG_BUFFER, "-CPU #%d %s: " fmt, smp_processor_id(), __func__, ##args);       \
1872 +    }
1873 +
1874 +
1875 +#define EIP_DBG(flag, fn, args...)                                                             \
1876 +    if (unlikely(eipdebug & flag)) {                                                           \
1877 +           qsnet_debugf (QSNET_DEBUG_BUFFER, "+CPU #%d %s\n", smp_processor_id(), __func__);   \
1878 +            (void)(fn)(args);                                                                  \
1879 +           qsnet_debugf (QSNET_DEBUG_BUFFER, "-CPU #%d %s :\n", smp_processor_id(), __func__); \
1880 +    }
1881 +#endif /* NO_DEBUG */
1882 +
1883 +
1884 +#ifdef EIP_ASSERT_ON
1885 +
1886 +#define __EIP_ASSERT_PRINT(exp)                                \
1887 +               eipdebug = 0xffff;                              \
1888 +               EIP_ERR_PRINTF("ASSERT : %s, %s::%d\n",         \
1889 +                      #exp, __BASE_FILE__, __LINE__);          
1890 +
1891 +#define EIP_ASSERT(exp)                                                        \
1892 +               if (!(exp)) {                                           \
1893 +                       __EIP_ASSERT_PRINT(exp);                        \
1894 +                       netif_stop_queue(eip_tx->net_device);           \
1895 +               }
1896 +
1897 +#define EIP_ASSERT2(exp, f, arg)                                       \
1898 +       do {                                                            \
1899 +               if (!(exp)) {                                           \
1900 +                       __EIP_ASSERT_PRINT(exp);                        \
1901 +                       f(arg);                                         \
1902 +               }                                                       \
1903 +       } while (0)
1904 +
1905 +#define EIP_ASSERT_BUG(exp)                                            \
1906 +       do {                                                            \
1907 +               if (!(exp)) {                                           \
1908 +                       __EIP_ASSERT_PRINT(exp);                        \
1909 +                       BUG();                                          \
1910 +               }                                                       \
1911 +       } while (0)
1912 +
1913 +#define EIP_ASSERT_GOTO(exp, label, f, arg)                            \
1914 +       do {                                                            \
1915 +               if (!(exp)) {                                           \
1916 +                       __EIP_ASSERT_PRINT(exp);                        \
1917 +                       f(arg);                                         \
1918 +                       goto label;                                     \
1919 +               }                                                       \
1920 +       } while (0)
1921 +
1922 +#define EIP_ASSERT_RET(exp, ret)                                       \
1923 +       do {                                                            \
1924 +               if (!(exp)) {                                           \
1925 +                       __EIP_ASSERT_PRINT(exp);                        \
1926 +                       return ret;                                     \
1927 +               }                                                       \
1928 +       } while (0)
1929 +
1930 +#define EIP_ASSERT_RETURN(exp, f, arg)                                 \
1931 +       do {                                                            \
1932 +               if (!(exp)) {                                           \
1933 +                       __EIP_ASSERT_PRINT(exp);                        \
1934 +                       f(arg);                                         \
1935 +                       return;                                         \
1936 +               }                                                       \
1937 +       } while (0)
1938 +
1939 +#define EIP_ASSERT_RETNULL(exp, f, arg)                                        \
1940 +       do {                                                            \
1941 +               if (!(exp)) {                                           \
1942 +                       __EIP_ASSERT_PRINT(exp);                        \
1943 +                       f(arg);                                         \
1944 +                       return NULL;                                    \
1945 +               }                                                       \
1946 +       } while (0)
1947 +
1948 +#else
1949 +
1950 +#define EIP_ASSERT(exp)                do {} while(0)
1951 +#define EIP_ASSERT_OUT(exp)            do {} while(0)
1952 +#define EIP_ASSERT_RETURN(exp)                 do {} while(0)
1953 +#define EIP_ASSERT_RETNULL(exp)                do {} while(0)
1954 +#define EIP_ASSERT_BUG(exp)            do {} while(0)
1955 +
1956 +#endif /* EIP_ASSERT */
1957 +
1958 +
1959 +
1960 +typedef struct {
1961 +       u_short ip_bcast;
1962 +       u_short ip_inst;
1963 +       u_short ip_addr;
1964 +} EIP_ADDRESS;
1965 +
1966 +typedef struct {
1967 +       EIP_ADDRESS h_dhost;
1968 +       EIP_ADDRESS h_shost;
1969 +       u_short h_sap;
1970 +} EIP_HEADER;
1971 +#define EIP_HEADER_PAD                 (2)
1972 +
1973 +typedef struct eip_proc_fs {
1974 +       const char *name;
1975 +       struct proc_dir_entry **parent;
1976 +       read_proc_t *read;
1977 +       write_proc_t *write;
1978 +       unsigned char allocated;
1979 +       struct proc_dir_entry *entry;
1980 +} EIP_PROC_FS;
1981 +
1982 +#define EIP_PROC_ROOT_DIR              "eip"
1983 +
1984 +#define EIP_PROC_DEBUG_DIR             "debug"
1985 +#define EIP_PROC_DEBUG_RX_FLUSH                "rx_flush"
1986 +#define EIP_PROC_DEBUG_TX_FLUSH                "tx_flush"
1987 +
1988 +#define EIP_PROC_AGGREG_DIR            "aggregation"
1989 +#define EIP_PROC_AGGREG_ONOFF          "enable"
1990 +#define EIP_PROC_AGGREG_TO             "timeout"
1991 +#define EIP_PROC_AGGREG_COPYBREAK      "copybreak"
1992 +
1993 +#define EIP_PROC_TX_COPYBREAK          "tx_copybreak"
1994 +#define EIP_PROC_STATS                 "stats"
1995 +#define EIP_PROC_RX_GRAN               "rx_granularity"
1996 +#define EIP_PROC_TX_RAILMASK           "tx_railmask"
1997 +#define EIP_PROC_TMD_INUSE             "tmd_inuse"
1998 +#define EIP_PROC_EIPDEBUG              "eipdebug"
1999 +#define EIP_PROC_CHECKSUM               "checksum"
2000 +
2001 +/* RX */
2002 +/* dma_len is used to keep the len of a received packet */
2003 +/* nmd.nmd_len is the max dma that can be received      */
2004 +/*                                                      */
2005 +struct eip_rmd {
2006 +       struct sk_buff *skb;
2007 +
2008 +       EP_NMD nmd;
2009 +       u16 dvma_idx;
2010 +
2011 +       EP_RXD *rxd;
2012 +       struct eip_rmd_head *head;
2013 +       union {
2014 +               struct list_head link;                          /* when on "busy" list */
2015 +               struct eip_rmd  *next;                          /* all other lists */
2016 +       } chain;
2017 +};
2018 +typedef struct eip_rmd EIP_RMD;
2019 +struct eip_rmd_head {
2020 +       EP_NMH *handle;
2021 +
2022 +       EP_RCVR *rcvr;
2023 +       EIP_RMD *busy_list;
2024 +
2025 +       /* stats */
2026 +       atomic_t stats;
2027 +       unsigned long dma;
2028 +};
2029 +
2030 +typedef struct eip_rmd_head EIP_RMD_HEAD;
2031 +typedef struct eip_rx {
2032 +       struct eip_rmd_head head[EIP_SVC_NR];
2033 +
2034 +       EIP_RMD *irq_list;
2035 +       short    irq_list_nr;   
2036 +
2037 +       /* stats */
2038 +       unsigned long packets;
2039 +       unsigned long bytes;
2040 +       unsigned long errors;
2041 +       unsigned long dropped;
2042 +       unsigned long reschedule;
2043 +
2044 +       spinlock_t lock;
2045 +       struct tasklet_struct tasklet;
2046 +       unsigned char rmd_max_nr;
2047 +       unsigned char sysctl_granularity;
2048 +       struct net_device *net_device;
2049 +} EIP_RX;
2050 +
2051 +/* TX */
2052 +/* dma_len_max is the maximum len for a given DMA                      */
2053 +/* where mnd.nmd_len is the len of the packet to send ~> than skb->len */
2054 +typedef struct eip_ipfrag_handle {
2055 +       /* common with tmd */
2056 +       unsigned long dma_base;
2057 +       int dma_len;
2058 +       EP_NMD nmd;
2059 +       u16 dvma_idx;
2060 +
2061 +       struct sk_buff *skb;
2062 +       struct eip_tmd_head *head;
2063 +       union {
2064 +               struct list_head link;                          /* when on "busy" list */
2065 +               struct eip_tmd  *next;                          /* all other lists */
2066 +       } chain;
2067 +
2068 +       /* private */
2069 +       struct list_head list;
2070 +       struct timeval timestamp;
2071 +       unsigned int frag_nr;
2072 +       int datagram_len; /* Ip data */
2073 +       int dma_correction;
2074 +       EP_PAYLOAD payload;
2075 +} EIP_IPFRAG;
2076 +
2077 +struct eip_tmd {
2078 +       unsigned long dma_base;
2079 +       int dma_len;
2080 +       EP_NMD nmd;
2081 +       u16 dvma_idx;
2082 +
2083 +       struct sk_buff *skb;
2084 +       struct eip_tmd_head *head;
2085 +       union {
2086 +               struct list_head link;                          /* when on "busy" list */
2087 +               struct eip_tmd  *next;                          /* all other lists */
2088 +       } chain;
2089 +};
2090 +
2091 +struct eip_tmd_head {
2092 +       EP_NMH *handle;
2093 +
2094 +       struct eip_tmd *tmd;
2095 +       atomic_t stats;
2096 +};
2097 +
2098 +typedef struct eip_tmd EIP_TMD;
2099 +typedef struct eip_tmd_head EIP_TMD_HEAD;
2100 +
2101 +/* #define EIP_MORE_STATS */
2102 +
2103 +typedef struct eip_tx {
2104 +       struct net_device *net_device;
2105 +       EP_XMTR *xmtr;
2106 +       EP_SYS *ep_system;
2107 +
2108 +       struct eip_tmd_head head[EIP_TMD_TYPE_NR];
2109 +       struct list_head inuse;
2110 +       atomic_t destructor;
2111 +
2112 +       /* stats */
2113 +       unsigned long packets;
2114 +       unsigned long bytes;
2115 +       unsigned long errors;
2116 +       unsigned long dropped;
2117 +       unsigned long dma[EIP_SVC_NR];
2118 +       
2119 +#ifdef EIP_MORE_STATS
2120 +       unsigned long sent_copybreak;
2121 +       unsigned long sent_std;
2122 +       unsigned long sent_aggreg;
2123 +#endif
2124 +
2125 +       unsigned char tmd_max_nr;
2126 +
2127 +       unsigned short sysctl_copybreak;
2128 +       unsigned short sysctl_ipfrag_to;
2129 +       unsigned short sysctl_ipfrag_copybreak;
2130 +       unsigned short sysctl_aggregation;
2131 +
2132 +       unsigned short ipfrag_count;
2133 +       struct list_head ipfrag;
2134 +       spinlock_t ipfraglock;
2135 +
2136 +       spinlock_t lock;
2137 +       struct tasklet_struct tasklet;
2138 +} EIP_TX;
2139 +
2140 +/* =============================================== */
2141 +    /* unsigned long   multicast; */
2142 +#endif                         /* __EIP_LINUX_H */
2143 +
2144 +/*
2145 + * Local variables:
2146 + * c-file-style: "linux"
2147 + * End:
2148 + */
2149 Index: linux-2.4.21/drivers/net/qsnet/eip/eip_stats.c
2150 ===================================================================
2151 --- linux-2.4.21.orig/drivers/net/qsnet/eip/eip_stats.c 2004-02-23 16:02:56.000000000 -0500
2152 +++ linux-2.4.21/drivers/net/qsnet/eip/eip_stats.c      2005-06-01 23:12:54.555445792 -0400
2153 @@ -0,0 +1,374 @@
2154 +/*
2155 + *    Copyright (c) 2003 by Quadrics Ltd.
2156 + * 
2157 + *    For licensing information please see the supplied COPYING file
2158 + *
2159 + */
2160 +
2161 +/*
2162 + * $Id: eip_stats.c,v 1.34.2.1 2005/01/26 14:31:56 mike Exp $
2163 + * $Source: /cvs/master/quadrics/eipmod/eip_stats.c,v $
2164 + */
2165 +
2166 +#include <qsnet/kernel.h>
2167 +#include <linux/module.h>
2168 +
2169 +#include <elan/epcomms.h>
2170 +
2171 +#include <linux/netdevice.h>
2172 +
2173 +#include <linux/kernel.h>
2174 +#include <linux/proc_fs.h>
2175 +
2176 +#include <asm/atomic.h>
2177 +
2178 +#include <qsnet/procfs_linux.h>
2179 +
2180 +#include "eip_linux.h"
2181 +#include "eip_stats.h"
2182 +
2183 +extern EIP_RX *eip_rx;
2184 +extern EIP_TX *eip_tx;
2185 +extern int tx_copybreak_max;
2186 +extern EP_RAILMASK tx_railmask;
2187 +extern int  eip_checksum_state;
2188 +extern void eip_stop_queue(void);
2189 +extern void eip_start_queue(void);
2190 +
2191 +static int eip_stats_read(char *buf, char **start, off_t off, int count, int *eof, void *data)
2192 +{
2193 +       int i, outlen = 0;
2194 +
2195 +       *buf = '\0';
2196 +       strcat(buf, "\n");
2197 +       strcat(buf, "--------------------------------------------+------------+-----------------+\n");
2198 +       strcat(buf, "    SKB/DMA    |               | Rx         | Tx         |  TMD TYPE       |\n");
2199 +       strcat(buf, "--------------------------------------------+------------|-----------------+\n");
2200 +
2201 +       i = 0;
2202 +       sprintf(buf + strlen(buf), " [%5d/%5d] | [%3.3d/%3.3d/%3.3d] | %10ld | %10ld | #1[%3.3d/%3.3d/%3.3d] |\n",
2203 +               EIP_SVC_SMALLEST_LEN << i, (int) EIP_RCV_DMA_LEN((EIP_SVC_SMALLEST_LEN << i)),
2204 +               EIP_STAT_QUEUED_GET(&eip_rx->head[i].stats), EIP_STAT_ALLOC_GET(&eip_rx->head[i].stats),
2205 +               eip_rx->rmd_max_nr, eip_rx->head[i].dma, eip_tx->dma[i],
2206 +               EIP_STAT_QUEUED_GET(&eip_tx->head[i].stats), EIP_STAT_ALLOC_GET(&eip_tx->head[i].stats),
2207 +                eip_tx->tmd_max_nr);
2208 +
2209 +       i++;
2210 +       sprintf(buf + strlen(buf), " [%5d/%5d] | [%3.3d/%3.3d/%3.3d] | %10ld | %10ld | #2[%3.3d/%3.3d/%3.3d] |\n",
2211 +               EIP_SVC_SMALLEST_LEN << i, (int) EIP_RCV_DMA_LEN((EIP_SVC_SMALLEST_LEN << i)),
2212 +               EIP_STAT_QUEUED_GET(&eip_rx->head[i].stats), EIP_STAT_ALLOC_GET(&eip_rx->head[i].stats),
2213 +               eip_rx->rmd_max_nr, eip_rx->head[i].dma, eip_tx->dma[i],
2214 +               EIP_STAT_QUEUED_GET(&eip_tx->head[i].stats), EIP_STAT_ALLOC_GET(&eip_tx->head[i].stats),
2215 +               eip_tx->tmd_max_nr);
2216 +
2217 +       i++;
2218 +       sprintf(buf + strlen(buf), " [%5d/%5d] | [%3.3d/%3.3d/%3.3d] | %10ld | %10ld | #3[%3.3d/%3.3d/%3.3d] |\n",
2219 +               EIP_SVC_SMALLEST_LEN << i, (int) EIP_RCV_DMA_LEN((EIP_SVC_SMALLEST_LEN << i)),
2220 +               EIP_STAT_QUEUED_GET(&eip_rx->head[i].stats), EIP_STAT_ALLOC_GET(&eip_rx->head[i].stats),
2221 +               eip_rx->rmd_max_nr, eip_rx->head[i].dma, eip_tx->dma[i],
2222 +               EIP_STAT_QUEUED_GET(&eip_tx->head[i].stats), EIP_STAT_ALLOC_GET(&eip_tx->head[i].stats),
2223 +               eip_tx->tmd_max_nr);
2224 +
2225 +       i++;
2226 +       sprintf(buf + strlen(buf), " [%5d/%5d] | [%3.3d/%3.3d/%3.3d] | %10ld | %10ld +-----------------+\n",
2227 +               EIP_SVC_SMALLEST_LEN << i, (int) EIP_RCV_DMA_LEN((EIP_SVC_SMALLEST_LEN << i)),
2228 +               EIP_STAT_QUEUED_GET(&eip_rx->head[i].stats), EIP_STAT_ALLOC_GET(&eip_rx->head[i].stats),
2229 +               eip_rx->rmd_max_nr, eip_rx->head[i].dma, eip_tx->dma[i]);
2230 +
2231 +       i++;
2232 +       sprintf(buf + strlen(buf), " [%5d/%5d] | [%3.3d/%3.3d/%3.3d] | %10ld | %10ld |\n",
2233 +               EIP_SVC_SMALLEST_LEN << i, (int) EIP_RCV_DMA_LEN((EIP_SVC_SMALLEST_LEN << i)),
2234 +               EIP_STAT_QUEUED_GET(&eip_rx->head[i].stats), EIP_STAT_ALLOC_GET(&eip_rx->head[i].stats),
2235 +               eip_rx->rmd_max_nr, eip_rx->head[i].dma, eip_tx->dma[i]);
2236 +
2237 +       i++;
2238 +       sprintf(buf + strlen(buf), " [%5d/%5d] | [%3.3d/%3.3d/%3.3d] | %10ld | %10ld |\n",
2239 +               EIP_SVC_SMALLEST_LEN << i, (int) EIP_RCV_DMA_LEN((EIP_SVC_SMALLEST_LEN << i)),
2240 +               EIP_STAT_QUEUED_GET(&eip_rx->head[i].stats), EIP_STAT_ALLOC_GET(&eip_rx->head[i].stats),
2241 +               eip_rx->rmd_max_nr, eip_rx->head[i].dma, eip_tx->dma[i]);
2242 +
2243 +       i++;
2244 +       sprintf(buf + strlen(buf), " [%5d/%5d] | [%3.3d/%3.3d/%3.3d] | %10ld | %10ld |\n",
2245 +               EIP_SVC_SMALLEST_LEN << i, (int) EIP_RCV_DMA_LEN((EIP_SVC_SMALLEST_LEN << i)),
2246 +               EIP_STAT_QUEUED_GET(&eip_rx->head[i].stats), EIP_STAT_ALLOC_GET(&eip_rx->head[i].stats),
2247 +               eip_rx->rmd_max_nr, eip_rx->head[i].dma, eip_tx->dma[i]);
2248 +
2249 +       i++;
2250 +       sprintf(buf + strlen(buf), " [%5d/%5d] | [%3.3d/%3.3d/%3.3d] | %10ld | %10ld |\n",
2251 +               EIP_SVC_SMALLEST_LEN << i, (int) EIP_RCV_DMA_LEN((EIP_SVC_SMALLEST_LEN << i)),
2252 +               EIP_STAT_QUEUED_GET(&eip_rx->head[i].stats), EIP_STAT_ALLOC_GET(&eip_rx->head[i].stats),
2253 +               eip_rx->rmd_max_nr, eip_rx->head[i].dma, eip_tx->dma[i]);
2254 +
2255 +       strcat(buf, "--------------------------------------------+------------+\n");
2256 +       sprintf(buf + strlen(buf), " RMD IRQ %4.4d                    %10lu | %10lu |\n",
2257 +               eip_rx->irq_list_nr, 
2258 +               eip_rx->packets, eip_tx->packets);
2259 +       strcat(buf, "--------------------------------------------+------------+\n");
2260 +
2261 +#ifdef EIP_MORE_STATS
2262 +       strcat(buf, "\n");
2263 +       sprintf(buf + strlen(buf), " Copybreak %10ld Std %10ld Aggreg %10ld\n",
2264 +                       eip_tx->sent_copybreak, eip_tx->sent_std, eip_tx->sent_aggreg);
2265 +#endif
2266 +
2267 +
2268 +       strcat(buf, "\n");
2269 +       sprintf(buf + strlen(buf), "Rx bytes: %lu (%lu Mb) errors: %lu dropped: %lu reschedule: %lu\n",
2270 +               eip_rx->bytes, eip_rx->bytes / (1024 * 1024), eip_rx->errors, eip_rx->dropped, eip_rx->reschedule);
2271 +       sprintf(buf + strlen(buf), "Tx bytes: %lu (%lu Mb) errors: %lu dropped: %lu\n",
2272 +               eip_tx->bytes, eip_tx->bytes / (1024 * 1024), eip_tx->errors, eip_tx->dropped);
2273 +       strcat(buf, "\n");
2274 +
2275 +       outlen = strlen(buf);
2276 +       ASSERT(outlen < PAGE_SIZE);
2277 +       *eof = 1;
2278 +       return outlen;
2279 +}
2280 +
2281 +void eip_stats_dump(void)
2282 +{
2283 +    int eof;
2284 +
2285 +    char *buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
2286 +
2287 +    if (buf == NULL)
2288 +    {
2289 +       printk("no memory to produce eip_stats\n");
2290 +       return;
2291 +    }
2292 +
2293 +    eip_stats_read(buf, NULL, 0, 0, &eof, NULL);
2294 +
2295 +    printk(buf);
2296 +
2297 +    kfree(buf);
2298 +}
2299 +
2300 +static int eip_stats_write(struct file *file, const char *buf, unsigned long count, void *data)
2301 +{
2302 +       int i;
2303 +       unsigned long flags;
2304 +
2305 +       spin_lock_irqsave(&eip_rx->lock, flags);
2306 +       eip_rx->packets = 0;
2307 +       eip_rx->bytes = 0;
2308 +       eip_rx->errors = 0;
2309 +       eip_rx->dropped = 0;
2310 +       eip_rx->reschedule = 0;
2311 +       for (i = 0; i < EIP_SVC_NR; eip_rx->head[i].dma = 0, i++);
2312 +       spin_unlock_irqrestore(&eip_rx->lock, flags);
2313 +
2314 +       spin_lock_irqsave(&eip_tx->lock, flags);
2315 +       eip_tx->packets = 0;
2316 +       eip_tx->bytes = 0;
2317 +       eip_tx->errors = 0;
2318 +       eip_tx->dropped = 0;
2319 +#ifdef EIP_MORE_STATS
2320 +       eip_tx->sent_copybreak = 0;
2321 +       eip_tx->sent_std = 0;
2322 +       eip_tx->sent_aggreg = 0;
2323 +#endif
2324 +       for (i = 0; i < EIP_SVC_NR; eip_tx->dma[i] = 0, i++);
2325 +       spin_unlock_irqrestore(&eip_tx->lock, flags);
2326 +
2327 +       return count;
2328 +}
2329 +
2330 +#define                eip_stats_var_write(name)                                                                       \
2331 +static int eip_stats_##name##_write(struct file *file, const char *buf, unsigned long count, void *data)       \
2332 +{                                                                                                              \
2333 +       char * b = (char *) buf;                                                                                \
2334 +       *(b + count) = '\0';                                                                                    \
2335 +       eip_##name##_set((int) simple_strtoul(b, NULL, 10));                                                    \
2336 +       return count;                                                                                           \
2337 +}
2338 +
2339 +#define        eip_stats_var_read(name, var)                                                                   \
2340 +static int eip_stats_##name##_read(char *buf, char **start, off_t off, int count, int *eof, void *data)                \
2341 +{                                                                                                              \
2342 +       sprintf(buf, "%d\n", var);                                                                              \
2343 +       *eof = 1;                                                                                               \
2344 +       return strlen(buf);                                                                                     \
2345 +}
2346 +
2347 +
2348 +#define                eip_stats_var_set(name, min, max, default, var)                                                                 \
2349 +void eip_##name##_set(int i)                                                                                                   \
2350 +{                                                                                                                              \
2351 +       if ( (i >= min) && (i <= max)) {                                                                                        \
2352 +               EIP_DBG_PRINTK(EIP_DBG_GEN, "Setting " #name " to %d\n", i);                                                    \
2353 +               var =(unsigned short) i;                                                                                        \
2354 +       }                                                                                                                       \
2355 +       else {                                                                                                                  \
2356 +               EIP_ERR_PRINTF("parameter error : %d <= " #name "(%d) <= %d using default %d\n", min, i, (int) max, (int) default);     \
2357 +       }                                                                                                                       \
2358 +}
2359 +
2360 +eip_stats_var_set(tx_copybreak, 0, tx_copybreak_max, EIP_TX_COPYBREAK, eip_tx->sysctl_copybreak);
2361 +eip_stats_var_set(rx_granularity, 1, EIP_RMD_MIN_NR, EIP_RX_GRANULARITY, eip_rx->sysctl_granularity);
2362 +eip_stats_var_set(tx_railmask, 0, EP_RAILMASK_ALL, EP_RAILMASK_ALL, tx_railmask);
2363 +eip_stats_var_set(ipfrag_to, 0, (1 << 16), EIP_IPFRAG_TO, eip_tx->sysctl_ipfrag_to);
2364 +eip_stats_var_set(aggregation, 0, 1, 1, eip_tx->sysctl_aggregation);
2365 +eip_stats_var_set(ipfrag_copybreak, 0, EIP_IPFRAG_COPYBREAK, EIP_IPFRAG_COPYBREAK, eip_tx->sysctl_ipfrag_copybreak);
2366 +/* eip_stats_var_set(eipdebug, 0, , 0, eipdebug); */
2367 +
2368 +eip_stats_var_read(aggregation, eip_tx->sysctl_aggregation);
2369 +eip_stats_var_read(ipfrag_count, eip_tx->ipfrag_count);
2370 +eip_stats_var_read(ipfrag_to, eip_tx->sysctl_ipfrag_to);
2371 +eip_stats_var_read(ipfrag_copybreak, eip_tx->sysctl_ipfrag_copybreak);
2372 +eip_stats_var_read(tx_copybreak, eip_tx->sysctl_copybreak);
2373 +eip_stats_var_read(rx_granularity, eip_rx->sysctl_granularity);
2374 +eip_stats_var_read(tx_railmask, tx_railmask);
2375 +
2376 +eip_stats_var_write(aggregation);
2377 +eip_stats_var_write(ipfrag_to);
2378 +eip_stats_var_write(ipfrag_copybreak);
2379 +eip_stats_var_write(tx_copybreak);
2380 +eip_stats_var_write(rx_granularity);
2381 +eip_stats_var_write(tx_railmask);
2382 +
2383 +
2384 +static int eip_checksum_write(struct file *file, const char *buf, unsigned long count, void *data)
2385 +{
2386 +       char * b = (char *) buf;
2387 +       int    value;
2388 +
2389 +       *(b + count) = '\0';
2390 +
2391 +       value = (int) simple_strtoul(b, NULL, 10);
2392 +       if  ((value >= CHECKSUM_NONE) && (value <= CHECKSUM_UNNECESSARY)) 
2393 +               eip_checksum_state = value;
2394 +       else 
2395 +               EIP_ERR_PRINTF("%d <= checksum(%d) <= %d using old value %d\n", CHECKSUM_NONE, value, CHECKSUM_UNNECESSARY, eip_checksum_state);
2396 +
2397 +       return count;
2398 +}
2399 +
2400 +static int eip_checksum_read(char *buf, char **start, off_t off, int count, int *eof, void *data)
2401 +{
2402 +       switch ( eip_checksum_state ) 
2403 +       {
2404 +       case 0  : sprintf(buf, "0 CHECKSUM_NONE\n");                      break;
2405 +       case 1  : sprintf(buf, "1 CHECKSUM_HW\n");                        break;
2406 +       case 2  : sprintf(buf, "2 CHECKSUM_UNNECESSARY\n");               break;
2407 +       default : sprintf(buf, "%d INVALID VALUE\n", eip_checksum_state); break;
2408 +       }
2409 +       *eof = 1;
2410 +       return strlen(buf);
2411 +}
2412 +
2413 +static int eip_stats_eipdebug_read(char *buf, char **start, off_t off, int count, int *eof, void *data)
2414 +{
2415 +       *buf = '\0';
2416 +       sprintf(buf + strlen(buf), "0x%x\n", eipdebug);
2417 +       *eof = 1;
2418 +       return strlen(buf);
2419 +}
2420 +static int eip_stats_eipdebug_write(struct file *file, const char *buf, unsigned long count, void *data)
2421 +{
2422 +       char * p = (char *) buf;
2423 +       *(p + count - 1) = '\0';
2424 +       eipdebug = simple_strtoul(p, NULL, 0);
2425 +       __EIP_DBG_PRINTK("Setting eipdebug to 0x%x\n", eipdebug);
2426 +       return count;
2427 +}
2428 +
2429 +static int eip_stats_tmd_inuse_read(char *page, char **start, off_t off, int count, int *eof, void *data)
2430 +{
2431 +       struct list_head *lp;
2432 +       unsigned long flags;
2433 +       unsigned int len = 0;
2434 +
2435 +       spin_lock_irqsave(&eip_tx->lock, flags);
2436 +       list_for_each (lp, &eip_tx->inuse) {
2437 +               EIP_TMD *tmd = list_entry (lp, EIP_TMD, chain.link);
2438 +               EIP_HEADER *eiph = (EIP_HEADER *) tmd->dma_base;
2439 +               
2440 +                len += sprintf(page+len, "tmd=%p id=%d len=%d\n",
2441 +                              tmd, eiph ? ntohs(eiph->h_dhost.ip_addr) : -1,
2442 +                              tmd->dma_len);
2443 +
2444 +                if (len + 40 >= count)
2445 +                        break;
2446 +        }
2447 +        spin_unlock_irqrestore(&eip_tx->lock, flags);
2448 +
2449 +       return qsnet_proc_calc_metrics (page, start, off, count, eof, len);
2450 +}
2451 +
2452 +static int eip_stats_debug_rx_flush(struct file *file, const char *buf, unsigned long count, void *data)
2453 +{
2454 +       EIP_DBG_PRINTF(EIP_DBG_GEN, "Flushing rx ...\n");
2455 +       tasklet_schedule(&eip_rx->tasklet);
2456 +       return count;
2457 +}
2458 +static int eip_stats_debug_tx_flush(struct file *file, const char *buf, unsigned long count, void *data)
2459 +{
2460 +       EIP_DBG_PRINTF(EIP_DBG_GEN, "Flushing tx ... %d tmds reclaimed\n", ep_enable_txcallbacks(eip_tx->xmtr));
2461 +       ep_disable_txcallbacks(eip_tx->xmtr);
2462 +       tasklet_schedule(&eip_tx->tasklet);
2463 +       return count;
2464 +}
2465 +
2466 +#define EIP_PROC_PARENT_NR     (3)
2467 +/* NOTE : the parents should be declared b4 the children */
2468 +static EIP_PROC_FS eip_procs[] = {
2469 +       /* {name, parent, read fn, write fn, allocated, entry}, */
2470 +       {EIP_PROC_ROOT_DIR, &qsnet_procfs_root, NULL, NULL, 0, NULL},
2471 +       {EIP_PROC_DEBUG_DIR, &eip_procs[0].entry, NULL, NULL, 0, NULL},
2472 +       {EIP_PROC_AGGREG_DIR, &eip_procs[0].entry, NULL, NULL, 0, NULL},        /* end of parents */
2473 +       {EIP_PROC_STATS, &eip_procs[0].entry, eip_stats_read, eip_stats_write, 0, NULL},
2474 +       {EIP_PROC_TX_COPYBREAK, &eip_procs[0].entry, eip_stats_tx_copybreak_read, eip_stats_tx_copybreak_write, 0, NULL},
2475 +       {EIP_PROC_RX_GRAN, &eip_procs[0].entry, eip_stats_rx_granularity_read, eip_stats_rx_granularity_write, 0, NULL},
2476 +       {EIP_PROC_TX_RAILMASK, &eip_procs[0].entry, eip_stats_tx_railmask_read, eip_stats_tx_railmask_write, 0, NULL},
2477 +       {EIP_PROC_TMD_INUSE, &eip_procs[0].entry, eip_stats_tmd_inuse_read, NULL, 0, NULL},
2478 +       {EIP_PROC_EIPDEBUG, &eip_procs[0].entry, eip_stats_eipdebug_read, eip_stats_eipdebug_write, 0, NULL},
2479 +       {EIP_PROC_CHECKSUM, &eip_procs[0].entry, eip_checksum_read, eip_checksum_write, 0, NULL},
2480 +       {EIP_PROC_DEBUG_RX_FLUSH, &eip_procs[1].entry, NULL, eip_stats_debug_rx_flush, 0, NULL},
2481 +       {EIP_PROC_DEBUG_TX_FLUSH, &eip_procs[1].entry, NULL, eip_stats_debug_tx_flush, 0, NULL},
2482 +       {"ipfrag_count", &eip_procs[2].entry, eip_stats_ipfrag_count_read, NULL, 0, NULL},
2483 +       {EIP_PROC_AGGREG_TO, &eip_procs[2].entry, eip_stats_ipfrag_to_read, eip_stats_ipfrag_to_write, 0, NULL},
2484 +       {EIP_PROC_AGGREG_ONOFF, &eip_procs[2].entry, eip_stats_aggregation_read, eip_stats_aggregation_write, 0, NULL},
2485 +       {EIP_PROC_AGGREG_COPYBREAK, &eip_procs[2].entry, eip_stats_ipfrag_copybreak_read, eip_stats_ipfrag_copybreak_write, 0, NULL},
2486 +       {NULL, NULL, NULL, NULL, 1, NULL},
2487 +};
2488 +
2489 +int eip_stats_init(void)
2490 +{
2491 +       int p;
2492 +
2493 +       for (p = 0; !eip_procs[p].allocated; p++) {
2494 +               if (p < EIP_PROC_PARENT_NR)
2495 +                       eip_procs[p].entry = proc_mkdir(eip_procs[p].name, *eip_procs[p].parent);
2496 +               else
2497 +                       eip_procs[p].entry = create_proc_entry(eip_procs[p].name, 0, *eip_procs[p].parent);
2498 +
2499 +               if (!eip_procs[p].entry) {
2500 +                       EIP_ERR_PRINTF("%s\n", "Cannot allocate proc entry");
2501 +                       eip_stats_cleanup();
2502 +                       return -ENOMEM;
2503 +               }
2504 +
2505 +               eip_procs[p].entry->owner = THIS_MODULE;
2506 +               eip_procs[p].entry->write_proc = eip_procs[p].write;
2507 +               eip_procs[p].entry->read_proc = eip_procs[p].read;
2508 +               eip_procs[p].allocated = 1;
2509 +       }
2510 +       eip_procs[p].allocated = 0;
2511 +       return 0;
2512 +}
2513 +
2514 +void eip_stats_cleanup(void)
2515 +{
2516 +       int p;
2517 +       for (p = 0; eip_procs[p].allocated; p++) {
2518 +               EIP_DBG_PRINTF(EIP_DBG_GEN, "Removing %s from proc\n", eip_procs[p].name);
2519 +               remove_proc_entry(eip_procs[p].name, *eip_procs[p].parent);
2520 +       }
2521 +}
2522 +
2523 +/*
2524 + * Local variables:
2525 + * c-file-style: "linux"
2526 + * End:
2527 + */
2528 Index: linux-2.4.21/drivers/net/qsnet/eip/eip_stats.h
2529 ===================================================================
2530 --- linux-2.4.21.orig/drivers/net/qsnet/eip/eip_stats.h 2004-02-23 16:02:56.000000000 -0500
2531 +++ linux-2.4.21/drivers/net/qsnet/eip/eip_stats.h      2005-06-01 23:12:54.555445792 -0400
2532 @@ -0,0 +1,22 @@
2533 +/*
2534 + *    Copyright (c) 2003 by Quadrics Ltd.
2535 + * 
2536 + *    For licensing information please see the supplied COPYING file
2537 + *
2538 + */
2539 +
2540 +#ident "$Id: eip_stats.h,v 1.14 2004/05/10 14:47:47 daniel Exp $"
2541 +
2542 +#ifndef __EIP_STATS_H
2543 +#define        __EIP_STATS_H
2544 +
2545 +int eip_stats_init(void);
2546 +void eip_stats_cleanup(void);
2547 +void eip_rx_granularity_set(int);
2548 +void eip_tx_copybreak_set(int);
2549 +void eip_ipfrag_to_set(int);
2550 +void eip_aggregation_set(int);
2551 +void eip_ipfrag_copybreak_set(int);
2552 +void eip_stats_dump(void);
2553 +
2554 +#endif                         /* __EIP_STATS_H */
2555 Index: linux-2.4.21/drivers/net/qsnet/eip/Makefile
2556 ===================================================================
2557 --- linux-2.4.21.orig/drivers/net/qsnet/eip/Makefile    2004-02-23 16:02:56.000000000 -0500
2558 +++ linux-2.4.21/drivers/net/qsnet/eip/Makefile 2005-06-01 23:12:54.555445792 -0400
2559 @@ -0,0 +1,31 @@
2560 +#
2561 +# Makefile for Quadrics QsNet
2562 +#
2563 +# Copyright (c) 2002-2004 Quadrics Ltd
2564 +#
2565 +# File: drivers/net/qsnet/eip/Makefile
2566 +#
2567 +
2568 +
2569 +#
2570 +
2571 +#
2572 +# Makefile for Quadrics QsNet
2573 +#
2574 +# Copyright (c) 2004 Quadrics Ltd.
2575 +#
2576 +# File: driver/net/qsnet/eip/Makefile
2577 +#
2578 +
2579 +list-multi             := eip.o
2580 +eip-objs       := eip_linux.o eip_stats.o
2581 +export-objs            := 
2582 +obj-$(CONFIG_EIP)      := eip.o
2583 +
2584 +eip.o : $(eip-objs)
2585 +       $(LD) -r -o $@ $(eip-objs)
2586 +
2587 +EXTRA_CFLAGS           +=  -DDEBUG -DDEBUG_PRINTF -DDEBUG_ASSERT
2588 +
2589 +include $(TOPDIR)/Rules.make
2590 +
2591 Index: linux-2.4.21/drivers/net/qsnet/eip/Makefile.conf
2592 ===================================================================
2593 --- linux-2.4.21.orig/drivers/net/qsnet/eip/Makefile.conf       2004-02-23 16:02:56.000000000 -0500
2594 +++ linux-2.4.21/drivers/net/qsnet/eip/Makefile.conf    2005-06-01 23:12:54.555445792 -0400
2595 @@ -0,0 +1,10 @@
2596 +# Flags for generating QsNet Linux Kernel Makefiles
2597 +MODNAME                =       eip.o
2598 +MODULENAME     =       eip
2599 +KOBJFILES      =       eip_linux.o eip_stats.o
2600 +EXPORT_KOBJS   =       
2601 +CONFIG_NAME    =       CONFIG_EIP
2602 +SGALFC         =       
2603 +# EXTRALINES START
2604 +
2605 +# EXTRALINES END
2606 Index: linux-2.4.21/drivers/net/qsnet/eip/quadrics_version.h
2607 ===================================================================
2608 --- linux-2.4.21.orig/drivers/net/qsnet/eip/quadrics_version.h  2004-02-23 16:02:56.000000000 -0500
2609 +++ linux-2.4.21/drivers/net/qsnet/eip/quadrics_version.h       2005-06-01 23:12:54.556445640 -0400
2610 @@ -0,0 +1 @@
2611 +#define QUADRICS_VERSION "4.30qsnet"
2612 Index: linux-2.4.21/drivers/net/qsnet/elan/bitmap.c
2613 ===================================================================
2614 --- linux-2.4.21.orig/drivers/net/qsnet/elan/bitmap.c   2004-02-23 16:02:56.000000000 -0500
2615 +++ linux-2.4.21/drivers/net/qsnet/elan/bitmap.c        2005-06-01 23:12:54.556445640 -0400
2616 @@ -0,0 +1,287 @@
2617 +/*
2618 + *    Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd.
2619 + *
2620 + *    For licensing information please see the supplied COPYING file
2621 + *
2622 + */
2623 +
2624 +#ident "@(#)$Id: bitmap.c,v 1.5 2004/01/20 17:32:17 david Exp $"
2625 +/*      $Source: /cvs/master/quadrics/elanmod/shared/bitmap.c,v $*/
2626 +
2627 +#if defined(__KERNEL__)
2628 +#include <qsnet/kernel.h>
2629 +#endif
2630 +#include <qsnet/config.h>
2631 +#include <elan/bitmap.h>
2632 +
2633 +/*
2634 + * Return the index of the first available bit in the 
2635 + * bitmap , or -1 for failure
2636 + */
2637 +int
2638 +bt_freebit (bitmap_t *bitmap, int nbits)
2639 +{
2640 +    int last = (--nbits) >> BT_ULSHIFT;
2641 +    int maxbit;
2642 +    int        i, j;
2643 +
2644 +    /* look for a word with a bit off */
2645 +    for (i = 0; i <= last; i++)
2646 +       if (bitmap[i] != ~((bitmap_t) 0))
2647 +           break;
2648 +
2649 +    if (i <= last)
2650 +    {
2651 +       /* found an word with a bit off,  now see which bit it is */
2652 +       maxbit = (i == last) ? (nbits & BT_ULMASK) : (BT_NBIPUL-1);
2653 +       for (j = 0; j <= maxbit; j++)
2654 +           if ((bitmap[i] & (1 << j)) == 0)
2655 +               return ((i << BT_ULSHIFT) | j);
2656 +    }
2657 +    return (-1);
2658 +    
2659 +}
2660 +
2661 +/*
2662 + * bt_lowbit:
2663 + *     Return the index of the lowest set bit in the
2664 + *     bitmap, or -1 for failure.
2665 + */
2666 +int
2667 +bt_lowbit (bitmap_t *bitmap, int nbits)
2668 +{
2669 +    int last = (--nbits) >> BT_ULSHIFT;
2670 +    int maxbit;
2671 +    int i, j;
2672 +    
2673 +    /* look for a word with a bit on */
2674 +    for (i = 0; i <= last; i++)
2675 +       if (bitmap[i] != 0)
2676 +           break;
2677 +    if (i <= last)
2678 +    {
2679 +       /* found a word bit a bit on, now see which bit it is */
2680 +       maxbit = (i == last) ? (nbits & BT_ULMASK) : (BT_NBIPUL-1);
2681 +       for (j = 0; j <= maxbit; j++)
2682 +           if (bitmap[i] & (1 << j))
2683 +               return ((i << BT_ULSHIFT) | j);
2684 +    }
2685 +
2686 +    return (-1);
2687 +}
2688 +
2689 +/*
2690 + * Return the index of the first available bit in the 
2691 + * bitmap , or -1 for failure
2692 + */
2693 +int
2694 +bt_nextbit (bitmap_t *bitmap, int nbits, int last, int isset)
2695 +{
2696 +    int first = ((last+1) + BT_NBIPUL-1) >> BT_ULSHIFT;
2697 +    int end   = (--nbits) >> BT_ULSHIFT;
2698 +    int maxbit;
2699 +    int        i, j;
2700 +
2701 +    /* look for bits before the first whole word */
2702 +    if (((last+1) & BT_ULMASK) != 0)
2703 +    {
2704 +       maxbit = ((first-1) == last) ? (nbits & BT_ULMASK) : (BT_NBIPUL-1);
2705 +       for (j = ((last+1) & BT_ULMASK); j <= maxbit; j++)
2706 +           if ((bitmap[first-1] & (1 << j)) == (isset << j))
2707 +               return (((first-1) << BT_ULSHIFT) | j);
2708 +    }
2709 +
2710 +    /* look for a word with a bit off */
2711 +    for (i = first; i <= end; i++)
2712 +       if (bitmap[i] != (isset ? 0 : ~((bitmap_t) 0)))
2713 +           break;
2714 +
2715 +    if (i <= end)
2716 +    {
2717 +       /* found an word with a bit off,  now see which bit it is */
2718 +       maxbit = (i == end) ? (nbits & BT_ULMASK) : (BT_NBIPUL-1);
2719 +       for (j = 0; j <= maxbit; j++)
2720 +           if ((bitmap[i] & (1 << j)) == (isset << j))
2721 +               return ((i << BT_ULSHIFT) | j);
2722 +    }
2723 +    return (-1);
2724 +}
2725 +
2726 +void
2727 +bt_copy (bitmap_t *a, bitmap_t *b, int nbits)
2728 +{
2729 +    int i;
2730 +
2731 +    for (i = 0; i < (nbits>>BT_ULSHIFT); i++)
2732 +       b[i] = a[i];
2733 +
2734 +    for (i <<= BT_ULSHIFT; i < nbits; i++)
2735 +       if (BT_TEST(a, i))
2736 +           BT_SET(b,i);
2737 +       else
2738 +           BT_CLEAR(b,i);
2739 +}
2740 +
2741 +void
2742 +bt_zero (bitmap_t *bitmap, int nbits)
2743 +{
2744 +    int i;
2745 +
2746 +    for (i = 0; i < (nbits>>BT_ULSHIFT); i++)
2747 +       bitmap[i] = 0;
2748 +
2749 +    for (i <<= BT_ULSHIFT; i < nbits; i++)
2750 +       BT_CLEAR(bitmap,i);
2751 +}
2752 +
2753 +void
2754 +bt_fill (bitmap_t *bitmap, int nbits)
2755 +{
2756 +    int i;
2757 +
2758 +    for (i = 0; i < (nbits>>BT_ULSHIFT); i++)
2759 +       bitmap[i] = ~((bitmap_t) 0);
2760 +
2761 +    for (i <<= BT_ULSHIFT; i < nbits; i++)
2762 +       BT_SET(bitmap,i);
2763 +}
2764 +
2765 +int
2766 +bt_cmp (bitmap_t *a, bitmap_t *b, int nbits)
2767 +{
2768 +    int i;
2769 +
2770 +    for (i = 0; i < (nbits>>BT_ULSHIFT); i++)
2771 +       if (a[i] != b[i])
2772 +           return (1);
2773 +
2774 +    for (i <<= BT_ULSHIFT; i < nbits; i++)
2775 +       if (BT_TEST (a, i) != BT_TEST(b, i))
2776 +           return (1);
2777 +    return (0);
2778 +}
2779 +
2780 +void
2781 +bt_intersect (bitmap_t *a, bitmap_t *b, int nbits)
2782 +{
2783 +    int i;
2784 +    
2785 +    for (i = 0; i < (nbits>>BT_ULSHIFT); i++)
2786 +       a[i] &= b[i];
2787 +
2788 +    for (i <<= BT_ULSHIFT; i < nbits; i++)
2789 +       if (BT_TEST (a, i) && BT_TEST (b, i))
2790 +           BT_SET (a, i);
2791 +       else
2792 +           BT_CLEAR (a, i);
2793 +}
2794 +
2795 +void
2796 +bt_remove (bitmap_t *a, bitmap_t *b, int nbits)
2797 +{
2798 +    int i;
2799 +
2800 +    for (i = 0; i < (nbits>>BT_ULSHIFT); i++)
2801 +       a[i] &= ~b[i];
2802 +
2803 +    for (i <<= BT_ULSHIFT; i < nbits; i++)
2804 +       if (BT_TEST (b, i))
2805 +           BT_CLEAR (a, i);
2806 +}
2807 +
2808 +void
2809 +bt_add (bitmap_t *a, bitmap_t *b, int nbits)
2810 +{
2811 +    int i;
2812 +
2813 +    for (i = 0; i < (nbits>>BT_ULSHIFT); i++)
2814 +       a[i] |= b[i];
2815 +
2816 +    for (i <<= BT_ULSHIFT; i < nbits; i++)
2817 +       if (BT_TEST(b, i))
2818 +           BT_SET (a, i);
2819 +}
2820 +
2821 +/*
2822 + * bt_spans : partition a spans partition b
2823 + *    == all bits set in 'b' are set in 'a'
2824 + */
2825 +int
2826 +bt_spans (bitmap_t *a, bitmap_t *b, int nbits)
2827 +{
2828 +    int i;
2829 +    
2830 +    for (i = 0; i < nbits; i++)
2831 +       if (BT_TEST (b, i) && !BT_TEST (a, i))
2832 +           return (0);
2833 +    return (1);
2834 +}
2835 +
2836 +/*
2837 + * bt_subset: copy [base,base+nbits-1] from 'a' to 'b'
2838 + */
2839 +void
2840 +bt_subset (bitmap_t *a, bitmap_t *b, int base, int nbits)
2841 +{
2842 +    int i;
2843 +
2844 +    for (i = 0; i < nbits; i++)
2845 +    {
2846 +       if (BT_TEST (a, base+i))
2847 +           BT_SET(b,i);
2848 +       else
2849 +           BT_CLEAR (b,i);
2850 +    }
2851 +}
2852 +
2853 +void 
2854 +bt_up (bitmap_t *a, bitmap_t *b, bitmap_t *c, int nbits)
2855 +{
2856 +    int i;
2857 +    
2858 +    for (i = 0; i < nbits; i++)
2859 +    {
2860 +       if (!BT_TEST (a, i) && BT_TEST (b, i))
2861 +       {
2862 +           BT_SET (c, i);
2863 +        }
2864 +       else
2865 +       {
2866 +           BT_CLEAR (c, i);
2867 +        }
2868 +    }
2869 +}
2870 +
2871 +void 
2872 +bt_down (bitmap_t *a, bitmap_t *b, bitmap_t *c, int nbits)
2873 +{
2874 +    int i;
2875 +    
2876 +    for (i = 0; i < nbits; i++)
2877 +    {
2878 +       if (BT_TEST (a, i) && !BT_TEST (b, i))
2879 +       {
2880 +           BT_SET (c, i);
2881 +        }
2882 +       else
2883 +       {
2884 +           BT_CLEAR (c, i);
2885 +        }
2886 +    }
2887 +}
2888 +
2889 +int
2890 +bt_nbits (bitmap_t *a, int nbits)
2891 +{
2892 +    int i, c;
2893 +    for (i = 0, c = 0; i < nbits; i++)
2894 +       if (BT_TEST (a, i))
2895 +           c++;
2896 +    return (c);
2897 +}
2898 +
2899 +/*
2900 + * Local variables:
2901 + * c-file-style: "stroustrup"
2902 + * End:
2903 + */
2904 Index: linux-2.4.21/drivers/net/qsnet/elan/capability.c
2905 ===================================================================
2906 --- linux-2.4.21.orig/drivers/net/qsnet/elan/capability.c       2004-02-23 16:02:56.000000000 -0500
2907 +++ linux-2.4.21/drivers/net/qsnet/elan/capability.c    2005-06-01 23:12:54.557445488 -0400
2908 @@ -0,0 +1,628 @@
2909 +/*
2910 + *    Copyright (c) 2003 by Quadrics Ltd.
2911 + * 
2912 + *    For licensing information please see the supplied COPYING file
2913 + *
2914 + */
2915 +
2916 +#ident "@(#)$Id: capability.c,v 1.13 2004/07/20 10:15:33 david Exp $"
2917 +/*      $Source: /cvs/master/quadrics/elanmod/modsrc/capability.c,v $ */
2918 +
2919 +
2920 +#include <qsnet/kernel.h>
2921 +#include <elan/elanmod.h>
2922 +
2923 +static LIST_HEAD(elan_cap_list); 
2924 +
2925 +typedef struct elan_vp_struct
2926 +{
2927 +       struct list_head list;
2928 +       ELAN_CAPABILITY  vp;
2929 +} ELAN_VP_NODE_STRUCT;
2930 +
2931 +
2932 +typedef struct elan_attached_struct
2933 +{
2934 +       void               *cb_args;
2935 +       ELAN_DESTROY_CB  cb_func;
2936 +} ELAN_ATTACHED_STRUCT;
2937 +
2938 +typedef struct elan_cap_node_struct
2939 +{
2940 +       struct list_head list;
2941 +       ELAN_CAP_STRUCT     node;
2942 +       ELAN_ATTACHED_STRUCT *attached[ELAN_MAX_RAILS];
2943 +       struct list_head vp_list;
2944 +} ELAN_CAP_NODE_STRUCT;
2945 +
2946 +
2947 +ELAN_CAP_NODE_STRUCT *
2948 +find_cap_node(ELAN_CAPABILITY *cap)
2949 +{
2950 +       struct list_head        *tmp;
2951 +       ELAN_CAP_NODE_STRUCT *ptr=NULL;
2952 +
2953 +       list_for_each(tmp, &elan_cap_list) {
2954 +               ptr = list_entry(tmp, ELAN_CAP_NODE_STRUCT , list);
2955 +               /* is it an exact match */
2956 +               if ( ELAN_CAP_TYPE_MATCH(&ptr->node.cap,cap) 
2957 +                    && ELAN_CAP_GEOM_MATCH(&ptr->node.cap,cap)) {
2958 +                       return ptr;
2959 +               }
2960 +       }
2961 +       return ptr;
2962 +};
2963 +
2964 +ELAN_VP_NODE_STRUCT *
2965 +find_vp_node( ELAN_CAP_NODE_STRUCT *cap_node,ELAN_CAPABILITY *map)
2966 +{
2967 +       struct list_head       * tmp;
2968 +       ELAN_VP_NODE_STRUCT * ptr = NULL;
2969 +
2970 +       list_for_each(tmp, &cap_node->vp_list) {
2971 +               ptr = list_entry(tmp, ELAN_VP_NODE_STRUCT , list);
2972 +               /* is it an exact match */
2973 +               if ( ELAN_CAP_TYPE_MATCH(&ptr->vp,map) 
2974 +                    && ELAN_CAP_GEOM_MATCH(&ptr->vp,map)){
2975 +                       return ptr;
2976 +               }
2977 +       }
2978 +       return ptr;
2979 +}
2980 +
2981 +int 
2982 +elan_validate_cap(ELAN_CAPABILITY *cap)
2983 +{
2984 +       char                      space[127];
2985 +
2986 +       ELAN_DEBUG1 (ELAN_DBG_VP,"elan_validate_cap %s\n",elan_capability_string(cap,space));
2987 +
2988 +       /* check versions */
2989 +       if (cap->cap_version != ELAN_CAP_VERSION_NUMBER)
2990 +       {
2991 +               ELAN_DEBUG2 (ELAN_DBG_VP,"elan_validate_cap: (cap->Version != ELAN_CAP_VERSION) %d %d\n", cap->cap_version, ELAN_CAP_VERSION_NUMBER);
2992 +               return (EINVAL);
2993 +       }
2994 +
2995 +       /* check its not HWTEST */
2996 +       if ( cap->cap_type & ELAN_CAP_TYPE_HWTEST )
2997 +       {
2998 +               ELAN_DEBUG0 (ELAN_DBG_VP,"elan_validate_cap: failed type = ELAN_CAP_TYPE_HWTEST \n");   
2999 +               return (EINVAL);
3000 +       }
3001 +       
3002 +       /* check its type */
3003 +       switch (cap->cap_type & ELAN_CAP_TYPE_MASK)
3004 +       {
3005 +       case ELAN_CAP_TYPE_KERNEL :     
3006 +               ELAN_DEBUG0 (ELAN_DBG_VP,"elan_validate_cap: failed type = ELAN_CAP_TYPE_KERNEL \n");   
3007 +               return (EINVAL);
3008 +
3009 +               /* check it has a valid type */
3010 +       case ELAN_CAP_TYPE_BLOCK:
3011 +       case ELAN_CAP_TYPE_CYCLIC:
3012 +               break;
3013 +
3014 +               /* all others are failed as well */
3015 +       default:
3016 +               ELAN_DEBUG1 (ELAN_DBG_VP,"elan_validate_cap: failed unknown type = %x \n", (cap->cap_type & ELAN_CAP_TYPE_MASK));       
3017 +               return (EINVAL);
3018 +       }
3019 +       
3020 +       if ((cap->cap_lowcontext == ELAN_CAP_UNINITIALISED) || (cap->cap_highcontext == ELAN_CAP_UNINITIALISED)
3021 +           || (cap->cap_lownode == ELAN_CAP_UNINITIALISED) || (cap->cap_highnode    == ELAN_CAP_UNINITIALISED))
3022 +       {
3023 +               
3024 +               ELAN_DEBUG4 (ELAN_DBG_VP,"elan_validate_cap: ELAN_CAP_UNINITIALISED   LowNode %d   HighNode %d   LowContext %d   highContext %d\n",
3025 +                            cap->cap_lownode , cap->cap_highnode,
3026 +                            cap->cap_lowcontext , cap->cap_highcontext);
3027 +               return (EINVAL);
3028 +       }       
3029 +
3030 +       if (cap->cap_lowcontext > cap->cap_highcontext)
3031 +       {
3032 +               ELAN_DEBUG2 (ELAN_DBG_VP,"elan_validate_cap: (cap->cap_lowcontext > cap->cap_highcontext) %d %d\n",cap->cap_lowcontext , cap->cap_highcontext);
3033 +               return (EINVAL);
3034 +       }
3035 +       
3036 +       if (cap->cap_lownode > cap->cap_highnode)
3037 +       {
3038 +               ELAN_DEBUG2 (ELAN_DBG_VP,"elan_validate_cap: (cap->cap_lownode > cap->cap_highnode) %d %d\n",cap->cap_lownode, cap->cap_highnode);
3039 +               return (EINVAL);
3040 +       }
3041 +
3042 +       if (cap->cap_mycontext != ELAN_CAP_UNINITIALISED) 
3043 +       {
3044 +               ELAN_DEBUG1 (ELAN_DBG_VP,"elan_validate_cap: failed cap->cap_mycontext is set %d  \n", cap->cap_mycontext);
3045 +               return (EINVAL);
3046 +       }
3047 +
3048 +
3049 +       if ((ELAN_CAP_NUM_NODES(cap) * ELAN_CAP_NUM_CONTEXTS(cap)) > ELAN_MAX_VPS)
3050 +       {
3051 +               ELAN_DEBUG6 (ELAN_DBG_VP,"elan_validate_cap: too many vps  LowNode %d   HighNode %d   LowContext %d   highContext %d,  %d >% d\n",
3052 +                            cap->cap_lownode , cap->cap_highnode,
3053 +                            cap->cap_lowcontext , cap->cap_highcontext,
3054 +                            (ELAN_CAP_NUM_NODES(cap) * ELAN_CAP_NUM_CONTEXTS(cap)),
3055 +                            ELAN_MAX_VPS);
3056 +               
3057 +               return (EINVAL);
3058 +       }
3059 +
3060 +       return (ESUCCESS);
3061 +}
3062 +
3063 +int
3064 +elan_validate_map(ELAN_CAPABILITY *cap, ELAN_CAPABILITY *map)
3065 +{
3066 +       ELAN_CAP_NODE_STRUCT * ptr  = NULL;
3067 +       ELAN_VP_NODE_STRUCT  * vptr = NULL;
3068 +       char space[256];
3069 +
3070 +       kmutex_lock(&elan_mutex);
3071 +
3072 +       ELAN_DEBUG0 (ELAN_DBG_VP,"elan_validate_map \n");
3073 +       ELAN_DEBUG1 (ELAN_DBG_VP,"elan_validate_map cap = %s \n",elan_capability_string(cap,space));
3074 +       ELAN_DEBUG1 (ELAN_DBG_VP,"elan_validate_map map = %s \n",elan_capability_string(map,space));
3075 +
3076 +       /* does cap exist    */
3077 +       ptr = find_cap_node(cap);
3078 +       if ( ptr == NULL ) 
3079 +       {
3080 +               ELAN_DEBUG0 (ELAN_DBG_VP,"elan_validate_map: cap not found \n");
3081 +               kmutex_unlock(&elan_mutex);
3082 +               return EINVAL;
3083 +       }
3084 +       /* is it active */
3085 +       if ( ! ptr->node.active ) 
3086 +       {
3087 +               ELAN_DEBUG0 (ELAN_DBG_VP,"elan_validate_map: cap not active \n");
3088 +               kmutex_unlock(&elan_mutex);
3089 +               return EINVAL;
3090 +       }
3091 +
3092 +       /* are they the same */
3093 +       if ( ELAN_CAP_TYPE_MATCH(cap,map) 
3094 +            && ELAN_CAP_GEOM_MATCH(cap,map)) 
3095 +       {
3096 +               ELAN_DEBUG0 (ELAN_DBG_VP,"elan_validate_map: cap == map  passed\n");
3097 +               kmutex_unlock(&elan_mutex);
3098 +               return ESUCCESS;
3099 +       }
3100 +
3101 +       /* is map in map list */
3102 +       vptr = find_vp_node(ptr, map);
3103 +       if ( vptr == NULL ) 
3104 +       {
3105 +               ELAN_DEBUG0 (ELAN_DBG_VP,"elan_validate_map:  map not found\n");
3106 +               kmutex_unlock(&elan_mutex);
3107 +               return EINVAL;
3108 +       }
3109 +       
3110 +       ELAN_DEBUG0 (ELAN_DBG_VP,"elan_validate_map:  map passed\n");
3111 +       kmutex_unlock(&elan_mutex);
3112 +       return ESUCCESS;
3113 +}
3114 +
3115 +int
3116 +elan_create_cap(ELAN_CAP_OWNER owner, ELAN_CAPABILITY *cap)
3117 +{
3118 +       char                      space[127];
3119 +       struct list_head        * tmp;
3120 +       ELAN_CAP_NODE_STRUCT * ptr = NULL;
3121 +       int                       i, rail;
3122 +
3123 +       kmutex_lock(&elan_mutex);
3124 +
3125 +       ELAN_DEBUG1 (ELAN_DBG_VP,"elan_create_cap %s\n",elan_capability_string(cap,space));     
3126 +
3127 +       /* need to check that the cap does not over lap another one 
3128 +          or is an exact match with only the userkey changing */
3129 +       list_for_each(tmp, &elan_cap_list) {
3130 +               ptr = list_entry(tmp, ELAN_CAP_NODE_STRUCT , list);
3131 +
3132 +               /* is it an exact match */
3133 +               if ( ELAN_CAP_TYPE_MATCH(&ptr->node.cap,cap) 
3134 +                    && ELAN_CAP_GEOM_MATCH(&ptr->node.cap,cap)
3135 +                    && (&ptr->node.owner == owner)) {
3136 +                       if ( ptr->node.active ) {
3137 +                               /* dont inc attached count as its like a create */
3138 +                               ptr->node.cap.cap_userkey = cap->cap_userkey;
3139 +                               kmutex_unlock(&elan_mutex);
3140 +                               return ESUCCESS;
3141 +                       }
3142 +                       else
3143 +                       {
3144 +                               kmutex_unlock(&elan_mutex);
3145 +                               return EINVAL;
3146 +                       }
3147 +               }
3148 +               
3149 +               /* does it overlap, even with ones being destroyed */
3150 +               if (elan_cap_overlap(&ptr->node.cap,cap))
3151 +               {
3152 +                       kmutex_unlock(&elan_mutex);
3153 +                       return  EACCES;
3154 +               }
3155 +       }
3156 +
3157 +       /* create it */
3158 +       KMEM_ALLOC(ptr, ELAN_CAP_NODE_STRUCT *, sizeof(ELAN_CAP_NODE_STRUCT), 1);
3159 +       if (ptr == NULL)
3160 +       {
3161 +               kmutex_unlock(&elan_mutex);
3162 +               return  ENOMEM;
3163 +       }
3164 +
3165 +       /* create space for the attached array */
3166 +       for(rail=0;rail<ELAN_MAX_RAILS;rail++)
3167 +       {
3168 +               ptr->attached[rail]=NULL;
3169 +               if ( ELAN_CAP_IS_RAIL_SET(cap,rail) ) 
3170 +               {
3171 +                       KMEM_ALLOC(ptr->attached[rail], ELAN_ATTACHED_STRUCT *, sizeof(ELAN_ATTACHED_STRUCT) *  ELAN_CAP_NUM_CONTEXTS(cap), 1);
3172 +                       if (ptr->attached[rail] == NULL) 
3173 +                       {
3174 +                               for(;rail>=0;rail--)
3175 +                                       if ( ptr->attached[rail] )
3176 +                                               KMEM_FREE(ptr->attached[rail], sizeof(ELAN_ATTACHED_STRUCT) *  ELAN_CAP_NUM_CONTEXTS(cap));
3177 +
3178 +                               KMEM_FREE(ptr, sizeof(ELAN_CAP_NODE_STRUCT));
3179 +                               kmutex_unlock(&elan_mutex);
3180 +                               return  ENOMEM;
3181 +                       }
3182 +                       /* blank the attached array */
3183 +                       for(i=0;i<ELAN_CAP_NUM_CONTEXTS(cap);i++)
3184 +                               ptr->attached[rail][i].cb_func = NULL;
3185 +               }
3186 +       }       
3187 +       
3188 +       ptr->node.owner     = owner;
3189 +       ptr->node.cap       = *cap;
3190 +       ptr->node.attached  = 1;    /* creator counts as attached */
3191 +       ptr->node.active    = 1;
3192 +       ptr->vp_list.next   = &(ptr->vp_list);
3193 +       ptr->vp_list.prev   = &(ptr->vp_list);
3194 +
3195 +       list_add_tail(&ptr->list, &elan_cap_list);      
3196 +
3197 +       kmutex_unlock(&elan_mutex);
3198 +       return  ESUCCESS;
3199 +}
3200 +
3201 +void
3202 +elan_destroy_cap_test(ELAN_CAP_NODE_STRUCT *cap_ptr)
3203 +{
3204 +       /* called by someone holding the mutex   */
3205 +       struct list_head       * vp_tmp;
3206 +       ELAN_VP_NODE_STRUCT * vp_ptr = NULL;
3207 +       int                      rail;
3208 +
3209 +       /* check to see if it can be deleted now */
3210 +       if ( cap_ptr->node.attached == 0 ) {
3211 +               
3212 +               ELAN_DEBUG0(ELAN_DBG_CAP,"elan_destroy_cap_test: attached == 0\n");     
3213 +               
3214 +               /* delete the vp list */
3215 +               list_for_each(vp_tmp, &(cap_ptr->vp_list)) {
3216 +                       vp_ptr = list_entry(vp_tmp, ELAN_VP_NODE_STRUCT , list);
3217 +                       list_del(&vp_ptr->list);
3218 +                       KMEM_FREE( vp_ptr, sizeof(ELAN_VP_NODE_STRUCT));
3219 +               }
3220 +               
3221 +               list_del(&cap_ptr->list);
3222 +
3223 +               /* delete space for the attached array */
3224 +               for(rail=0;rail<ELAN_MAX_RAILS;rail++)
3225 +                       if (cap_ptr->attached[rail]) 
3226 +                               KMEM_FREE(cap_ptr->attached[rail], sizeof(ELAN_ATTACHED_STRUCT) * ELAN_CAP_NUM_CONTEXTS(&(cap_ptr->node.cap)));
3227 +                       
3228 +               KMEM_FREE(cap_ptr, sizeof(ELAN_CAP_NODE_STRUCT));               
3229 +       }
3230 +}
3231 +
3232 +int
3233 +elan_destroy_cap(ELAN_CAP_OWNER owner, ELAN_CAPABILITY *cap)
3234 +{
3235 +       char                      space[127];
3236 +       struct list_head        * el;
3237 +       struct list_head        * nel;
3238 +       ELAN_CAP_NODE_STRUCT * ptr = NULL;
3239 +       int                       i, rail;
3240 +       int                       found = 0;
3241 +
3242 +       kmutex_lock(&elan_mutex);
3243 +
3244 +       ELAN_DEBUG1 (ELAN_DBG_CAP,"elan_destroy_cap %s\n",elan_capability_string(cap,space));   
3245 +
3246 +       list_for_each_safe (el, nel, &elan_cap_list) {
3247 +               ptr = list_entry(el, ELAN_CAP_NODE_STRUCT , list);
3248 +               
3249 +               /* is it an exact match */
3250 +               if ( (ptr->node.owner == owner )
3251 +                    && (  (cap == NULL) 
3252 +                          || (ELAN_CAP_TYPE_MATCH(&ptr->node.cap,cap) && ELAN_CAP_GEOM_MATCH(&ptr->node.cap,cap)))) {
3253 +
3254 +                       if ( ptr->node.active ) {
3255 +
3256 +                               /* mark as in active and dec attached count */
3257 +                               ptr->node.active = 0;
3258 +                               ptr->node.attached--;
3259 +                               ptr->node.owner  = 0; /* no one own's it now */
3260 +                               
3261 +                               /* need to tell any one who was attached that this has been destroy'd */
3262 +                               for(rail=0;rail<ELAN_MAX_RAILS;rail++)
3263 +                                       if (ELAN_CAP_IS_RAIL_SET( &(ptr->node.cap), rail)) {
3264 +                                               for(i=0;i< ELAN_CAP_NUM_CONTEXTS(&(ptr->node.cap));i++)
3265 +                                                       if ( ptr->attached[rail][i].cb_func != NULL) 
3266 +                                                               ptr->attached[rail][i].cb_func(ptr->attached[rail][i].cb_args, cap, NULL);
3267 +                                       }
3268 +                               
3269 +                               /* now try to destroy it */
3270 +                               elan_destroy_cap_test(ptr);
3271 +                               
3272 +                               /* found it */
3273 +                               found = 1;
3274 +                       }
3275 +               }
3276 +       }
3277 +       
3278 +       if ( found )
3279 +       {
3280 +               kmutex_unlock(&elan_mutex);
3281 +               return ESUCCESS;
3282 +       }
3283 +
3284 +       /* failed */
3285 +       ELAN_DEBUG0(ELAN_DBG_CAP,"elan_destroy_cap: didnt find it \n"); 
3286 +
3287 +       kmutex_unlock(&elan_mutex);
3288 +       return EINVAL;
3289 +}
3290 +
3291 +int 
3292 +elan_get_caps(uint *number_of_results, uint array_size, ELAN_CAP_STRUCT *caps)
3293 +{
3294 +       uint                      results = 0;
3295 +       struct list_head        * tmp;
3296 +       ELAN_CAP_NODE_STRUCT * ptr = NULL;
3297 +       
3298 +
3299 +       kmutex_lock(&elan_mutex);
3300 +
3301 +       ELAN_DEBUG0(ELAN_DBG_CAP,"elan_get_caps\n");    
3302 +
3303 +       list_for_each(tmp, &elan_cap_list) {
3304 +               ptr = list_entry(tmp, ELAN_CAP_NODE_STRUCT , list);
3305 +               
3306 +               copyout(&ptr->node, &caps[results], sizeof (ELAN_CAP_STRUCT));
3307 +               
3308 +               results++;
3309 +               
3310 +               if ( results >= array_size )
3311 +               {
3312 +                       copyout(&results, number_of_results, sizeof(uint));     
3313 +                       kmutex_unlock(&elan_mutex);
3314 +                       return ESUCCESS;
3315 +               }
3316 +       }
3317 +
3318 +       copyout(&results, number_of_results, sizeof(uint));     
3319 +
3320 +       kmutex_unlock(&elan_mutex);
3321 +       return ESUCCESS;
3322 +}
3323 +
3324 +int
3325 +elan_create_vp(ELAN_CAP_OWNER owner, ELAN_CAPABILITY *cap, ELAN_CAPABILITY *map)
3326 +{
3327 +       ELAN_CAP_NODE_STRUCT * cap_ptr = NULL;
3328 +       ELAN_VP_NODE_STRUCT  * vp_ptr  = NULL;
3329 +       
3330 +       kmutex_lock(&elan_mutex);
3331 +
3332 +
3333 +       ELAN_DEBUG0(ELAN_DBG_CAP,"elan_create_vp\n");
3334 +
3335 +       /* the railmasks must match */
3336 +       if ( cap->cap_railmask != map->cap_railmask)
3337 +       {
3338 +               kmutex_unlock(&elan_mutex);
3339 +               return  EINVAL;
3340 +       }
3341 +
3342 +       /* does the cap exist */
3343 +       cap_ptr = find_cap_node(cap);
3344 +       if ((cap_ptr == NULL) || ( cap_ptr->node.owner != owner ) || (! cap_ptr->node.active) )
3345 +       {
3346 +               kmutex_unlock(&elan_mutex);
3347 +               return  EINVAL;
3348 +       }
3349 +       
3350 +       /* is there already a mapping */
3351 +       vp_ptr = find_vp_node(cap_ptr,map);
3352 +       if ( vp_ptr != NULL) 
3353 +       {
3354 +               kmutex_unlock(&elan_mutex);
3355 +               return  EINVAL;
3356 +       }
3357 +
3358 +       /* create space for mapping */
3359 +       KMEM_ALLOC(vp_ptr, ELAN_VP_NODE_STRUCT *, sizeof(ELAN_VP_NODE_STRUCT), 1);
3360 +       if (vp_ptr == NULL)
3361 +       {
3362 +               kmutex_unlock(&elan_mutex);
3363 +               return  ENOMEM;
3364 +       }
3365 +                       
3366 +       /* copy map */
3367 +       vp_ptr->vp = *map;
3368 +       list_add_tail(&vp_ptr->list, &(cap_ptr->vp_list));      
3369 +       kmutex_unlock(&elan_mutex);
3370 +       return  ESUCCESS;
3371 +}
3372 +
3373 +int
3374 +elan_destroy_vp(ELAN_CAP_OWNER owner, ELAN_CAPABILITY *cap, ELAN_CAPABILITY *map)
3375 +{
3376 +       ELAN_CAP_NODE_STRUCT * cap_ptr = NULL;
3377 +       ELAN_VP_NODE_STRUCT  * vp_ptr  = NULL;
3378 +       int                       i, rail;
3379 +
3380 +       kmutex_lock(&elan_mutex);
3381 +
3382 +       ELAN_DEBUG0(ELAN_DBG_CAP,"elan_destroy_vp\n");  
3383 +
3384 +       cap_ptr = find_cap_node(cap);
3385 +       if ((cap_ptr!=NULL) && (cap_ptr->node.owner == owner) && ( cap_ptr->node.active))
3386 +       {               
3387 +               vp_ptr = find_vp_node( cap_ptr, map );
3388 +               if ( vp_ptr != NULL ) 
3389 +               {
3390 +                       list_del(&vp_ptr->list);
3391 +                       KMEM_FREE(vp_ptr, sizeof(ELAN_VP_NODE_STRUCT));
3392 +             
3393 +                       /* need to tell those who are attached that map is nolonger in use */
3394 +                       for(rail=0;rail<ELAN_MAX_RAILS;rail++)
3395 +                               if (ELAN_CAP_IS_RAIL_SET(cap, rail))
3396 +                               {
3397 +                                       for(i=0;i< ELAN_CAP_NUM_CONTEXTS(&(cap_ptr->node.cap));i++)
3398 +                                               if ( cap_ptr->attached[rail][i].cb_func != NULL) 
3399 +                                                       cap_ptr->attached[rail][i].cb_func( cap_ptr->attached[rail][i].cb_args, cap, map);
3400 +                               }
3401 +
3402 +                       kmutex_unlock(&elan_mutex);
3403 +                       return  ESUCCESS;
3404 +               }
3405 +       }       
3406 +       
3407 +       /* didnt find it */
3408 +       kmutex_unlock(&elan_mutex);
3409 +       return  EINVAL;
3410 +}
3411 +
3412 +int 
3413 +elan_attach_cap(ELAN_CAPABILITY *cap, unsigned int rail, void *args, ELAN_DESTROY_CB func)
3414 +{
3415 +       char                  space[127];
3416 +       struct list_head     *el;
3417 +
3418 +       ELAN_DEBUG1 (ELAN_DBG_CAP,"elan_attach_cap %s\n",elan_capability_string(cap,space));
3419 +
3420 +       /* currently must provide a call back, as null mean something */
3421 +       if ( func == NULL)
3422 +               return (EINVAL);
3423 +
3424 +       /* mycontext must be set and correct */
3425 +       if ( ! ELAN_CAP_VALID_MYCONTEXT(cap))
3426 +               return (EINVAL);
3427 +
3428 +       /* rail must be one of the rails in railmask */
3429 +       if (((1 << rail) & cap->cap_railmask) == 0)
3430 +               return (EINVAL);
3431 +       
3432 +       kmutex_lock(&elan_mutex);
3433 +
3434 +       list_for_each(el, &elan_cap_list) {
3435 +               ELAN_CAP_NODE_STRUCT *cap_ptr = list_entry(el, ELAN_CAP_NODE_STRUCT , list);
3436 +               
3437 +               /* is it an exact match */
3438 +               if (ELAN_CAP_MATCH(&cap_ptr->node.cap,cap) && cap_ptr->node.active) {
3439 +                       unsigned int attached_index = cap->cap_mycontext - cap->cap_lowcontext;
3440 +                       
3441 +                       if ( cap_ptr->attached[rail][attached_index].cb_func != NULL ) /* only one per ctx per rail */
3442 +                       {
3443 +                               kmutex_unlock(&elan_mutex);
3444 +                               return   EINVAL;
3445 +                       }
3446 +
3447 +                       /* keep track of who attached as we might need to tell them when */
3448 +                       /* cap or maps get destroyed                                     */
3449 +                       cap_ptr->attached[rail][ attached_index ].cb_func = func;
3450 +                       cap_ptr->attached[rail][ attached_index ].cb_args = args;
3451 +                       cap_ptr->node.attached++;
3452 +
3453 +                       ELAN_DEBUG0(ELAN_DBG_CAP,"elan_attach_cap: passed\n");
3454 +                       kmutex_unlock(&elan_mutex);
3455 +                       return ESUCCESS;
3456 +               }
3457 +       }
3458 +       
3459 +       ELAN_DEBUG0(ELAN_DBG_CAP,"elan_attach_cap: failed to find \n");
3460 +
3461 +       /* didnt find one */
3462 +       kmutex_unlock(&elan_mutex);
3463 +       return EINVAL;
3464 +}
3465 +
3466 +int 
3467 +elan_detach_cap(ELAN_CAPABILITY *cap, unsigned int rail)
3468 +{
3469 +       struct list_head *el, *nel;
3470 +       char              space[256];
3471 +
3472 +       kmutex_lock(&elan_mutex);
3473 +
3474 +       ELAN_DEBUG1(ELAN_DBG_CAP,"elan_detach_cap %s\n",elan_capability_string(cap,space));
3475 +       list_for_each_safe (el, nel, &elan_cap_list) {
3476 +               ELAN_CAP_NODE_STRUCT *ptr = list_entry (el, ELAN_CAP_NODE_STRUCT, list);
3477 +
3478 +               /* is it an exact match */
3479 +               if (ELAN_CAP_TYPE_MATCH(&ptr->node.cap,cap) &&
3480 +                   ELAN_CAP_GEOM_MATCH(&ptr->node.cap,cap) &&
3481 +                   (ptr->node.cap.cap_railmask & cap->cap_railmask) == cap->cap_railmask) {
3482 +               
3483 +                       unsigned int attached_index = cap->cap_mycontext - cap->cap_lowcontext;
3484 +
3485 +                       if ( ptr->attached[rail][ attached_index ].cb_func == NULL ) 
3486 +                               ELAN_DEBUG0(ELAN_DBG_CAP,"elanmod_detach_cap already removed \n");
3487 +
3488 +                       ptr->attached[rail][ attached_index ].cb_func = NULL;
3489 +                       ptr->attached[rail][ attached_index ].cb_args = (void *)0;
3490 +
3491 +                       ptr->node.attached--;
3492 +
3493 +                       ELAN_DEBUG1(ELAN_DBG_CAP,"elanmod_detach_cap new attach count%d \n", ptr->node.attached);
3494 +
3495 +                       elan_destroy_cap_test(ptr);
3496 +
3497 +                       ELAN_DEBUG0(ELAN_DBG_CAP,"elan_detach_cap: success\n"); 
3498 +
3499 +                       kmutex_unlock(&elan_mutex);
3500 +                       return  ESUCCESS;
3501 +               }
3502 +       }
3503 +
3504 +       ELAN_DEBUG0(ELAN_DBG_CAP,"elan_detach_cap: failed to find\n");
3505 +       kmutex_unlock(&elan_mutex);
3506 +       return  EINVAL;
3507 +}
3508 +
3509 +int
3510 +elan_cap_dump()
3511 +{
3512 +       struct list_head        * tmp;
3513 +       ELAN_CAP_NODE_STRUCT * ptr = NULL;
3514 +       
3515 +       kmutex_lock(&elan_mutex);       
3516 +       
3517 +       list_for_each(tmp, &elan_cap_list) {
3518 +               ptr = list_entry(tmp, ELAN_CAP_NODE_STRUCT , list);
3519 +
3520 +               ELAN_DEBUG2 (ELAN_DBG_ALL, "cap dump: owner %p type %x\n", ptr->node.owner, ptr->node.cap.cap_type);
3521 +                       
3522 +               ELAN_DEBUG5 (ELAN_DBG_ALL, "cap dump: LowNode %d   HighNode %d   LowContext %d   mycontext %d   highContext %d\n",
3523 +                            ptr->node.cap.cap_lownode , ptr->node.cap.cap_highnode,
3524 +                            ptr->node.cap.cap_lowcontext , ptr->node.cap.cap_mycontext, ptr->node.cap.cap_highcontext);
3525 +
3526 +       }
3527 +
3528 +       kmutex_unlock(&elan_mutex);
3529 +       return  ESUCCESS;
3530 +}
3531 +
3532 +/*
3533 + * Local variables:
3534 + * c-file-style: "linux"
3535 + * End:
3536 + */
3537 Index: linux-2.4.21/drivers/net/qsnet/elan/capability_general.c
3538 ===================================================================
3539 --- linux-2.4.21.orig/drivers/net/qsnet/elan/capability_general.c       2004-02-23 16:02:56.000000000 -0500
3540 +++ linux-2.4.21/drivers/net/qsnet/elan/capability_general.c    2005-06-01 23:12:54.558445336 -0400
3541 @@ -0,0 +1,446 @@
3542 +/*
3543 + *    Copyright (c) 2003 by Quadrics Ltd.
3544 + * 
3545 + *    For licensing information please see the supplied COPYING file
3546 + *
3547 + */
3548 +
3549 +#ident "@(#)$Id: capability_general.c,v 1.10 2004/02/25 13:47:59 daniel Exp $"
3550 +/*      $Source: /cvs/master/quadrics/elanmod/shared/capability_general.c,v $ */
3551 +
3552 +#if defined(__KERNEL__)
3553 +
3554 +#include <qsnet/kernel.h>
3555 +
3556 +#else
3557 +
3558 +#include <stdlib.h>
3559 +#include <stdio.h>
3560 +#include <sys/param.h>
3561 +
3562 +#endif
3563 +
3564 +#include <elan/elanmod.h>
3565 +
3566 +
3567 +void
3568 +elan_nullcap (ELAN_CAPABILITY *cap)
3569 +{
3570 +       register int i;
3571 +
3572 +       for (i = 0; i < sizeof (cap->cap_userkey)/sizeof(cap->cap_userkey.key_values[0]); i++)
3573 +               cap->cap_userkey.key_values[i] = ELAN_CAP_UNINITIALISED;
3574 +    
3575 +       cap->cap_lowcontext  = ELAN_CAP_UNINITIALISED;
3576 +       cap->cap_highcontext = ELAN_CAP_UNINITIALISED;
3577 +       cap->cap_mycontext   = ELAN_CAP_UNINITIALISED;
3578 +       cap->cap_lownode     = ELAN_CAP_UNINITIALISED;
3579 +       cap->cap_highnode    = ELAN_CAP_UNINITIALISED;
3580 +       cap->cap_railmask    = ELAN_CAP_UNINITIALISED;
3581 +       cap->cap_type        = ELAN_CAP_UNINITIALISED;
3582 +       cap->cap_spare       = 0;
3583 +       cap->cap_version     = ELAN_CAP_VERSION_NUMBER;
3584 +       
3585 +       for (i = 0; i < sizeof (cap->cap_bitmap)/sizeof (cap->cap_bitmap[0]); i++)
3586 +               cap->cap_bitmap[i] = 0;
3587 +}
3588 +
3589 +char *
3590 +elan_capability_string (ELAN_CAPABILITY *cap, char *str)
3591 +{
3592 +       if (cap == NULL) 
3593 +               sprintf (str, "[-.-.-.-] cap = NULL\n");
3594 +       else
3595 +               sprintf (str, "[%x.%x.%x.%x] Version %x Type %x \n"
3596 +                        "Context %x.%x.%x Node %x.%x\n",
3597 +                        cap->cap_userkey.key_values[0], cap->cap_userkey.key_values[1],
3598 +                        cap->cap_userkey.key_values[2], cap->cap_userkey.key_values[3],
3599 +                        cap->cap_version, cap->cap_type, 
3600 +                        cap->cap_lowcontext, cap->cap_mycontext, cap->cap_highcontext,
3601 +                        cap->cap_lownode, cap->cap_highnode);
3602 +       
3603 +       return (str);
3604 +}
3605 +
3606 +ELAN_LOCATION
3607 +elan_vp2location (u_int process, ELAN_CAPABILITY *cap)
3608 +{
3609 +       ELAN_LOCATION location;
3610 +       int i, vp, node, context, nnodes, nctxs;
3611 +
3612 +       vp = 0;
3613 +
3614 +       location.loc_node    = ELAN_INVALID_NODE;
3615 +       location.loc_context = -1;
3616 +       
3617 +       nnodes = cap->cap_highnode - cap->cap_lownode + 1;
3618 +       nctxs  = cap->cap_highcontext - cap->cap_lowcontext + 1;
3619 +       
3620 +       switch (cap->cap_type & ELAN_CAP_TYPE_MASK)
3621 +       {
3622 +       case ELAN_CAP_TYPE_BLOCK:
3623 +               for (node = 0, i = 0; node < nnodes; node++)
3624 +               {
3625 +                       for (context = 0; context < nctxs; context++)
3626 +                       {
3627 +                               if ((cap->cap_type & ELAN_CAP_TYPE_NO_BITMAP) || BT_TEST (cap->cap_bitmap, context + (node * nctxs)))
3628 +                               {
3629 +                                       if (vp == process)
3630 +                                       {
3631 +                                               /* Return relative indices within the capability box */
3632 +                                               location.loc_node    = node;
3633 +                                               location.loc_context = context;
3634 +
3635 +                                               return (location);
3636 +                                       }
3637 +                      
3638 +                                       vp++;
3639 +                               }
3640 +                       }
3641 +               }
3642 +               break;
3643 +       
3644 +       case ELAN_CAP_TYPE_CYCLIC:
3645 +               for (context = 0, i = 0; context < nctxs; context++)
3646 +               {
3647 +                       for (node = 0; node < nnodes; node++)
3648 +                       {
3649 +                               if ((cap->cap_type & ELAN_CAP_TYPE_NO_BITMAP) || BT_TEST (cap->cap_bitmap, node + (context * nnodes)))
3650 +                               {
3651 +                                       if (vp == process)
3652 +                                       {
3653 +                                               location.loc_node    = node;
3654 +                                               location.loc_context = context;
3655 +
3656 +                                               return (location);
3657 +                                       }
3658 +                   
3659 +                                       vp++;
3660 +                               }
3661 +                       }
3662 +               }
3663 +               break;
3664 +       }
3665 +    
3666 +       return( location );
3667 +}
3668 +
3669 +int
3670 +elan_location2vp (ELAN_LOCATION location, ELAN_CAPABILITY *cap)
3671 +{
3672 +    int  vp, node, context, nnodes, nctxs;
3673 +
3674 +    nnodes = cap->cap_highnode - cap->cap_lownode + 1;
3675 +    nctxs  = cap->cap_highcontext - cap->cap_lowcontext + 1;
3676 +
3677 +    vp = 0;
3678 +    
3679 +    switch (cap->cap_type & ELAN_CAP_TYPE_MASK)
3680 +    {
3681 +    case ELAN_CAP_TYPE_BLOCK:
3682 +       for (node = 0 ; node < nnodes ; node++)
3683 +       {
3684 +           for (context = 0; context < nctxs; context++)
3685 +           {
3686 +               if ((cap->cap_type & ELAN_CAP_TYPE_NO_BITMAP) || BT_TEST (cap->cap_bitmap, context + (node * nctxs)))
3687 +               {
3688 +                   if ((location.loc_node == node) && (location.loc_context == context))
3689 +                   {
3690 +                       /* Found it ! */
3691 +                       return( vp );
3692 +                   }
3693 +                   
3694 +                   vp++;
3695 +               }
3696 +           }
3697 +       }
3698 +       break;
3699 +       
3700 +    case ELAN_CAP_TYPE_CYCLIC:
3701 +       for (context = 0; context < nctxs; context++)
3702 +       {
3703 +           for (node = 0; node < nnodes; node++)
3704 +           {
3705 +               if ((cap->cap_type & ELAN_CAP_TYPE_NO_BITMAP) || BT_TEST (cap->cap_bitmap, node + (context * nnodes)))
3706 +               {
3707 +                   if ((location.loc_node == node) && (location.loc_context == context))
3708 +                   {
3709 +                       /* Found it ! */
3710 +                       return( vp );
3711 +                   }
3712 +                   
3713 +                   vp++;
3714 +               }
3715 +           }
3716 +       }
3717 +       break;
3718 +    }
3719 +    
3720 +    /* Failed to find it */
3721 +    return( -1 );
3722 +}
3723 +
3724 +/* Return the number of processes as described by a capability */
3725 +int
3726 +elan_nvps (ELAN_CAPABILITY *cap)
3727 +{
3728 +       int i, c, nbits = ELAN_CAP_BITMAPSIZE(cap);
3729 +
3730 +       if (cap->cap_type & ELAN_CAP_TYPE_NO_BITMAP)
3731 +               return (nbits);
3732 +
3733 +       for (i = 0, c = 0; i < nbits; i++)
3734 +               if (BT_TEST (cap->cap_bitmap, i))
3735 +                       c++;
3736 +
3737 +       return (c);
3738 +}
3739 +
3740 +/* Return the number of local processes on a given node as described by a capability */
3741 +int
3742 +elan_nlocal (int node, ELAN_CAPABILITY *cap)
3743 +{
3744 +       int vp;
3745 +       ELAN_LOCATION loc;
3746 +       int nLocal = 0;
3747 +
3748 +       for (vp = 0; vp < elan_nvps(cap); vp++)
3749 +       {
3750 +               loc = elan_vp2location(vp, cap);
3751 +               if (loc.loc_node == node)
3752 +                       nLocal++;
3753 +       }
3754 +
3755 +       return (nLocal);
3756 +}
3757 +
3758 +/* Return the maximum number of local processes on any node as described by a capability */
3759 +int
3760 +elan_maxlocal (ELAN_CAPABILITY *cap)
3761 +{
3762 +       return(cap->cap_highcontext - cap->cap_lowcontext + 1);
3763 +}
3764 +
3765 +/* Return the vps of the local processes on a given node as described by a capability */
3766 +int
3767 +elan_localvps (int node, ELAN_CAPABILITY *cap, int *vps, int size)
3768 +{
3769 +       int context;
3770 +       ELAN_LOCATION loc;
3771 +       int nLocal = 0;
3772 +    
3773 +       loc.loc_node = node;
3774 +
3775 +       for (context = 0; context < MIN(size, elan_maxlocal(cap)); context++)
3776 +       {
3777 +               loc.loc_context = context;
3778 +       
3779 +               /* Should return -1 if none found */
3780 +               if ( (vps[context] = elan_location2vp( loc, cap )) != -1)
3781 +                       nLocal++;
3782 +       }
3783 +
3784 +       return (nLocal);
3785 +}
3786 +
3787 +/* Return the number of rails that this capability utilises */
3788 +int
3789 +elan_nrails (ELAN_CAPABILITY *cap)
3790 +{
3791 +       int nrails = 0;
3792 +       unsigned int railmask;
3793 +
3794 +       /* Test for a multi-rail capability */
3795 +       if (cap->cap_type & ELAN_CAP_TYPE_MULTI_RAIL)
3796 +       {
3797 +               /* Grab rail bitmask from capability */
3798 +               railmask = cap->cap_railmask;
3799 +       
3800 +               while (railmask)
3801 +               {
3802 +                       if (railmask & 1)
3803 +                               nrails++;
3804 +           
3805 +                       railmask >>= 1;
3806 +               }
3807 +       }
3808 +       else 
3809 +               /* Default to just one rail */
3810 +               nrails = 1;
3811 +       
3812 +       return (nrails);
3813 +}
3814 +
3815 +/* Fill out an array giving the physical rail numbers utilised by a capability */
3816 +int
3817 +elan_rails (ELAN_CAPABILITY *cap, int *rails)
3818 +{
3819 +       int nrails, rail;
3820 +       unsigned int railmask;
3821 +
3822 +       /* Test for a multi-rail capability */
3823 +       if (cap->cap_type & ELAN_CAP_TYPE_MULTI_RAIL)
3824 +       {
3825 +               /* Grab rail bitmask from capability */
3826 +               railmask = cap->cap_railmask;
3827 +       
3828 +               nrails = rail = 0;
3829 +               while (railmask)
3830 +               {
3831 +                       if (railmask & 1)
3832 +                               rails[nrails++] = rail;
3833 +           
3834 +                       rail++;
3835 +                       railmask >>= 1;
3836 +               }
3837 +       }
3838 +       else
3839 +       {
3840 +               /* Default to just one rail */
3841 +               rails[0] = 0;
3842 +               nrails = 1;
3843 +       }
3844 +
3845 +       return( nrails );
3846 +}
3847 +
3848 +int 
3849 +elan_cap_overlap(ELAN_CAPABILITY *cap1, ELAN_CAPABILITY *cap2)
3850 +{
3851 +       /* by context */
3852 +       if ( cap1->cap_highcontext < cap2->cap_lowcontext ) return (0);
3853 +       if ( cap1->cap_lowcontext  > cap2->cap_highcontext) return (0);
3854 +       
3855 +       /* by node */
3856 +       if ( cap1->cap_highnode < cap2->cap_lownode ) return (0);
3857 +       if ( cap1->cap_lownode  > cap2->cap_highnode) return (0);
3858 +
3859 +       /* by rail */
3860 +       /* they overlap if they have a rail in common */
3861 +       return (cap1->cap_railmask & cap2->cap_railmask);
3862 +}
3863 +
3864 +#if !defined(__KERNEL__)
3865 +
3866 +/* Fill out an array that hints at the best use of the rails on a
3867 + * per process basis. The library user can then decide whether or not
3868 + * to take this into account (e.g. TPORTs)
3869 + * All processes calling this fn will be returned the same information.
3870 + */
3871 +int
3872 +elan_prefrails(ELAN_CAPABILITY *cap, int *pref, int nvp)
3873 +{
3874 +       int i;
3875 +       int nrails = elan_nrails(cap);
3876 +       int maxlocal = elan_maxlocal(cap);
3877 +
3878 +       /* Test for a multi-rail capability */
3879 +       if (! (cap->cap_type & ELAN_CAP_TYPE_MULTI_RAIL))
3880 +       {
3881 +               /* Default to just one rail */
3882 +               for (i = 0; i < nvp; i++)
3883 +                       pref[i] = 0;
3884 +
3885 +               return( 0 );
3886 +       }
3887 +
3888 +       /*
3889 +        * We allocate rails on a per node basis sharing our the rails
3890 +        * equally amongst the local processes. However, if there is only
3891 +        * one process per node and multiple rails, then we use a different
3892 +        * algorithm where rails are allocated across all the processes in 
3893 +        * a round-robin fashion
3894 +        */
3895 +    
3896 +       if (maxlocal == 1)
3897 +       {
3898 +               /* Allocate rails in a round-robin manner */
3899 +               for (i = 0; i < nvp; i++)
3900 +                       *pref++ = i % nrails;
3901 +       }
3902 +       else
3903 +       {
3904 +               int node;
3905 +               int *vps;
3906 +               int nnodes = cap->cap_highnode - cap->cap_lownode + 1;
3907 +
3908 +               vps = (int *) malloc(sizeof(int)*maxlocal);
3909 +
3910 +               /* Grab the local process info for each node and allocate
3911 +                * rails to those vps on an equal basis
3912 +                */
3913 +               for (node = 0; node < nnodes; node++)
3914 +               {
3915 +                       int nlocal;
3916 +                       int pprail;
3917 +
3918 +                       /* Grab an array of local vps */
3919 +                       nlocal = elan_localvps(node, cap, vps, maxlocal);
3920 +           
3921 +                       /* Calculate the number processes per rail */
3922 +                       if ((pprail = nlocal/nrails) == 0)
3923 +                               pprail = 1;
3924 +
3925 +                       /* Allocate processes to rails */
3926 +                       for (i = 0; i < nlocal; i++)
3927 +                       {
3928 +                               pref[vps[i]] = (i / pprail) % nrails;
3929 +                       }
3930 +               }
3931 +       
3932 +               free(vps);
3933 +       }
3934 +
3935 +       return( 0 );
3936 +}
3937 +
3938 +void 
3939 +elan_get_random_key(ELAN_USERKEY *key)
3940 +{
3941 +    int i;
3942 +    for (i = 0; i < sizeof(key->key_values) / sizeof(key->key_values[0]); i++)
3943 +       key->key_values[i] = lrand48();
3944 +}
3945 +
3946 +int elan_lowcontext(ELAN_CAPABILITY *cap)
3947 +{
3948 +    return(cap->cap_lowcontext);
3949 +}
3950 +
3951 +int elan_mycontext(ELAN_CAPABILITY *cap)
3952 +{
3953 +    return(cap->cap_mycontext);
3954 +}
3955 +
3956 +int elan_highcontext(ELAN_CAPABILITY *cap)
3957 +{
3958 +    return(cap->cap_highcontext);
3959 +}
3960 +
3961 +int elan_lownode(ELAN_CAPABILITY *cap)
3962 +{
3963 +    return(cap->cap_lownode);
3964 +}
3965 +
3966 +int elan_highnode(ELAN_CAPABILITY *cap)
3967 +{
3968 +    return(cap->cap_highnode);
3969 +}
3970 +
3971 +int elan_captype(ELAN_CAPABILITY *cap)
3972 +{
3973 +    return(cap->cap_type);
3974 +}
3975 +
3976 +int elan_railmask(ELAN_CAPABILITY *cap)
3977 +{
3978 +    return(cap->cap_railmask);
3979 +}
3980 +
3981 +#endif
3982 +
3983 +/*
3984 + * Local variables:
3985 + * c-file-style: "linux"
3986 + * End:
3987 + */
3988 Index: linux-2.4.21/drivers/net/qsnet/elan/device.c
3989 ===================================================================
3990 --- linux-2.4.21.orig/drivers/net/qsnet/elan/device.c   2004-02-23 16:02:56.000000000 -0500
3991 +++ linux-2.4.21/drivers/net/qsnet/elan/device.c        2005-06-01 23:12:54.559445184 -0400
3992 @@ -0,0 +1,147 @@
3993 +/*
3994 + *    Copyright (c) 2003 by Quadrics Ltd.
3995 + * 
3996 + *    For licensing information please see the supplied COPYING file
3997 + *
3998 + */
3999 +
4000 +#ident "@(#)$Id: device.c,v 1.5 2003/09/24 13:55:37 david Exp $"
4001 +/*      $Source: /cvs/master/quadrics/elanmod/modsrc/device.c,v $*/
4002 +
4003 +#include <qsnet/kernel.h>
4004 +#include <elan/elanmod.h>
4005 +
4006 +static LIST_HEAD(elan_dev_list);
4007 +
4008 +ELAN_DEV_STRUCT *
4009 +elan_dev_find (ELAN_DEV_IDX devidx)
4010 +{
4011 +       struct list_head   *tmp;
4012 +       ELAN_DEV_STRUCT *ptr=NULL;
4013 +
4014 +       list_for_each(tmp, &elan_dev_list) {
4015 +               ptr = list_entry(tmp, ELAN_DEV_STRUCT , node);
4016 +               if (ptr->devidx == devidx) 
4017 +                       return ptr;
4018 +               if (ptr->devidx > devidx)
4019 +                       return ERR_PTR(-ENXIO);
4020 +       }
4021 +       
4022 +       return ERR_PTR(-EINVAL);
4023 +}
4024 +
4025 +ELAN_DEV_STRUCT *
4026 +elan_dev_find_byrail (unsigned short deviceid, unsigned rail)
4027 +{
4028 +       struct list_head   *tmp;
4029 +       ELAN_DEV_STRUCT *ptr=NULL;
4030 +
4031 +       list_for_each(tmp, &elan_dev_list) {
4032 +               ptr = list_entry(tmp, ELAN_DEV_STRUCT , node);
4033 +
4034 +               ELAN_DEBUG5 (ELAN_DBG_ALL,"elan_dev_find_byrail devidx %d - %04x %04x,  %d %d \n", ptr->devidx, 
4035 +                            ptr->devinfo->dev_device_id, deviceid, ptr->devinfo->dev_rail, rail);
4036 +
4037 +               if (ptr->devinfo->dev_device_id == deviceid && ptr->devinfo->dev_rail == rail)
4038 +                       return ptr;
4039 +       }
4040 +       
4041 +       return NULL;
4042 +}
4043 +
4044 +ELAN_DEV_IDX
4045 +elan_dev_register (ELAN_DEVINFO *devinfo, ELAN_DEV_OPS *ops, void * user_data)
4046 +{
4047 +       ELAN_DEV_STRUCT *ptr;
4048 +       ELAN_DEV_IDX        devidx = 0;
4049 +       struct list_head   *tmp;
4050 +
4051 +        kmutex_lock(&elan_mutex);
4052 +
4053 +       /* is it already registered */
4054 +       if ((ptr = elan_dev_find_byrail(devinfo->dev_device_id, devinfo->dev_rail)) != NULL) 
4055 +       {
4056 +               kmutex_unlock(&elan_mutex);
4057 +               return EINVAL;
4058 +       }
4059 +
4060 +       /* find a free device idx */
4061 +       list_for_each (tmp, &elan_dev_list) {
4062 +               if (list_entry (tmp, ELAN_DEV_STRUCT, node)->devidx != devidx)
4063 +                       break;
4064 +               devidx++;
4065 +       }
4066 +
4067 +       /* create it and add */
4068 +       KMEM_ALLOC(ptr, ELAN_DEV_STRUCT *, sizeof(ELAN_DEV_STRUCT), 1);
4069 +       if (ptr == NULL)
4070 +       {
4071 +               kmutex_unlock(&elan_mutex);
4072 +               return ENOMEM;
4073 +       }
4074 +
4075 +       ptr->devidx    = devidx;
4076 +       ptr->ops       = ops;
4077 +       ptr->devinfo   = devinfo;
4078 +       ptr->user_data = user_data;
4079 +
4080 +       /* insert this entry *before* the last entry we've found */
4081 +       list_add_tail(&ptr->node, tmp);
4082 +
4083 +       kmutex_unlock(&elan_mutex);
4084 +       return  ESUCCESS;
4085 +}
4086 +
4087 +int
4088 +elan_dev_deregister (ELAN_DEVINFO *devinfo)
4089 +{
4090 +       ELAN_DEV_STRUCT *target;
4091 +
4092 +       kmutex_lock(&elan_mutex);
4093 +
4094 +       if ((target = elan_dev_find_byrail (devinfo->dev_device_id, devinfo->dev_rail)) == NULL)
4095 +       {
4096 +               kmutex_unlock(&elan_mutex);
4097 +               return  EINVAL;
4098 +       }
4099 +
4100 +       list_del(&target->node);
4101 +
4102 +       /* delete target entry */
4103 +       KMEM_FREE(target, sizeof(ELAN_DEV_STRUCT));
4104 +
4105 +       kmutex_unlock(&elan_mutex);
4106 +       return  ESUCCESS;
4107 +}
4108 +
4109 +int
4110 +elan_dev_dump ()
4111 +{
4112 +       struct list_head   *tmp;
4113 +       ELAN_DEV_STRUCT *ptr=NULL;
4114 +
4115 +       kmutex_lock(&elan_mutex);       
4116 +
4117 +       list_for_each(tmp, &elan_dev_list) {
4118 +               ptr = list_entry(tmp, ELAN_DEV_STRUCT , node);
4119 +
4120 +               ELAN_DEBUG3 (ELAN_DBG_ALL,"dev dump: index %u rail %u elan%c\n", 
4121 +                            ptr->devidx, ptr->devinfo->dev_rail, '3' + ptr->devinfo->dev_device_id);
4122 +               ELAN_DEBUG5 (ELAN_DBG_ALL,"dev dump: Vid %x   Did %x  Rid %x  DR %d  DVal %x\n",
4123 +                            ptr->devinfo->dev_vendor_id,
4124 +                            ptr->devinfo->dev_device_id,
4125 +                            ptr->devinfo->dev_revision_id,
4126 +                            ptr->devinfo->dev_driver_version,
4127 +                            ptr->devinfo->dev_num_down_links_value);
4128 +
4129 +       }
4130 +
4131 +       kmutex_unlock(&elan_mutex);
4132 +       return ESUCCESS;
4133 +}
4134 +
4135 +/*
4136 + * Local variables:
4137 + * c-file-style: "linux"
4138 + * End:
4139 + */
4140 Index: linux-2.4.21/drivers/net/qsnet/elan/devinfo.c
4141 ===================================================================
4142 --- linux-2.4.21.orig/drivers/net/qsnet/elan/devinfo.c  2004-02-23 16:02:56.000000000 -0500
4143 +++ linux-2.4.21/drivers/net/qsnet/elan/devinfo.c       2005-06-01 23:12:54.559445184 -0400
4144 @@ -0,0 +1,78 @@
4145 +/*
4146 + *    Copyright (c) 2003 by Quadrics Ltd.
4147 + * 
4148 + *    For licensing information please see the supplied COPYING file
4149 + *
4150 + */
4151 +
4152 +#ident "@(#)$Id: devinfo.c,v 1.5 2003/09/24 13:55:37 david Exp $"
4153 +/*      $Source: /cvs/master/quadrics/elanmod/modsrc/devinfo.c,v $*/
4154 +
4155 +#include <qsnet/kernel.h>
4156 +#include <elan/elanmod.h>
4157 +
4158 +int 
4159 +elan_get_devinfo(ELAN_DEV_IDX devidx, ELAN_DEVINFO *devinfo)
4160 +{
4161 +       ELAN_DEV_STRUCT *target;
4162 +       int                 res;
4163 +
4164 +       kmutex_lock(&elan_mutex);
4165 +
4166 +       target = elan_dev_find (devidx);
4167 +
4168 +       if (IS_ERR (target))
4169 +               res = PTR_ERR(target);
4170 +       else
4171 +       {
4172 +               copyout(target->devinfo, devinfo, sizeof(ELAN_DEVINFO));
4173 +               res = ESUCCESS;
4174 +       }
4175 +       
4176 +       kmutex_unlock(&elan_mutex);
4177 +       return res;
4178 +}
4179 +
4180 +int 
4181 +elan_get_position(ELAN_DEV_IDX devidx, ELAN_POSITION *position)
4182 +{
4183 +       ELAN_DEV_STRUCT *target;
4184 +       int                 res;
4185 +
4186 +       kmutex_lock(&elan_mutex);
4187 +
4188 +       target = elan_dev_find(devidx);
4189 +
4190 +       if (IS_ERR (target))
4191 +               res = PTR_ERR(target);
4192 +       else
4193 +               res = target->ops->get_position(target->user_data, position);
4194 +       
4195 +       kmutex_unlock(&elan_mutex);
4196 +       return res;
4197 +}
4198 +
4199 +int 
4200 +elan_set_position(ELAN_DEV_IDX devidx, unsigned short nodeId, unsigned short numNodes)
4201 +{
4202 +       ELAN_DEV_STRUCT *target;
4203 +       int                 res;
4204 +
4205 +       kmutex_lock(&elan_mutex);
4206 +
4207 +       target = elan_dev_find(devidx);
4208 +
4209 +       if (IS_ERR (target))
4210 +               res = PTR_ERR (target);
4211 +       else
4212 +               res = target->ops->set_position(target->user_data, nodeId, numNodes);
4213 +       
4214 +       kmutex_unlock(&elan_mutex);
4215 +       return res;
4216 +}
4217 +
4218 +/*
4219 + * Local variables:
4220 + * c-file-style: "linux"
4221 + * End:
4222 + */
4223 Index: linux-2.4.21/drivers/net/qsnet/elan/elanmod.c
4224 ===================================================================
4225 --- linux-2.4.21.orig/drivers/net/qsnet/elan/elanmod.c  2004-02-23 16:02:56.000000000 -0500
4226 +++ linux-2.4.21/drivers/net/qsnet/elan/elanmod.c       2005-06-01 23:12:54.559445184 -0400
4227 @@ -0,0 +1,149 @@
4228 +/*
4229 + *    Copyright (c) 2003 by Quadrics Ltd.
4230 + * 
4231 + *    For licensing information please see the supplied COPYING file
4232 + *
4233 + */
4234 +#ident "@(#)$Id: elanmod.c,v 1.11 2004/06/18 09:28:16 mike Exp $"
4235 +/*      $Source: /cvs/master/quadrics/elanmod/modsrc/elanmod.c,v $*/
4236 +
4237 +#include <qsnet/kernel.h>
4238 +#include <elan/elanmod.h>
4239 +
4240 +kmutex_t  elan_mutex;
4241 +
4242 +int 
4243 +elan_init()
4244 +{
4245 +       kmutex_init(&elan_mutex);
4246 +       return (ESUCCESS);
4247 +}
4248 +
4249 +int 
4250 +elan_fini()
4251 +{
4252 +       kmutex_destroy(&elan_mutex);
4253 +       return (ESUCCESS);
4254 +}
4255 +
4256 +int 
4257 +elanmod_classify_cap (ELAN_POSITION *position, ELAN_CAPABILITY *cap, unsigned use)
4258 +{
4259 +       if (cap->cap_version != ELAN_CAP_VERSION_NUMBER)
4260 +       {
4261 +               ELAN_DEBUG2 (ELAN_DBG_VP, "elanmod_classify_cap: (cap->Version != ELAN_CAP_VERSION) %d %d\n", cap->cap_version, ELAN_CAP_VERSION_NUMBER);
4262 +               return (-EINVAL);
4263 +       }
4264 +       
4265 +       if (cap->cap_lowcontext == ELAN_CAP_UNINITIALISED || cap->cap_highcontext == ELAN_CAP_UNINITIALISED)
4266 +       {
4267 +               ELAN_DEBUG3 (ELAN_DBG_VP, "elanmod_classify_cap: LowContext %d    HighContext %d MyContext %d\n",
4268 +                            cap->cap_lowcontext , cap->cap_highcontext, cap->cap_mycontext);
4269 +               return (-EINVAL);
4270 +       }
4271 +       
4272 +       if (cap->cap_lowcontext > cap->cap_highcontext)
4273 +       {
4274 +               ELAN_DEBUG2 (ELAN_DBG_VP, "elanmod_classify_cap: (cap->cap_lowcontext > cap->cap_highcontext) %d %d\n",cap->cap_lowcontext , cap->cap_highcontext);
4275 +               return (-EINVAL);
4276 +       }
4277 +       
4278 +       
4279 +       switch (cap->cap_type & ELAN_CAP_TYPE_MASK)
4280 +       {
4281 +       case ELAN_CAP_TYPE_BLOCK:
4282 +       case ELAN_CAP_TYPE_CYCLIC:
4283 +               if (position->pos_mode == ELAN_POS_UNKNOWN)
4284 +               {
4285 +                       ELAN_DEBUG0 (ELAN_DBG_VP, "elanmod_classify_cap: Position Unknown \n");
4286 +                       return (-EAGAIN);
4287 +               }
4288 +               
4289 +               if ( ! ( ELAN_USER_CONTEXT(cap->cap_lowcontext) && ELAN_USER_CONTEXT(cap->cap_highcontext)))
4290 +               {
4291 +                       ELAN_DEBUG4 (ELAN_DBG_VP, "elanmod_classify_cap:  USER_BASE_CONTEXT %d %d %d %d \n" ,  ELAN_USER_BASE_CONTEXT_NUM,cap->cap_lowcontext, cap->cap_highcontext ,ELAN_USER_TOP_CONTEXT_NUM);
4292 +                       return (-EINVAL);
4293 +               }
4294 +               if (cap->cap_lownode == ELAN_CAP_UNINITIALISED)
4295 +                       cap->cap_lownode = position->pos_nodeid;
4296 +               if (cap->cap_highnode == ELAN_CAP_UNINITIALISED)
4297 +                       cap->cap_highnode = position->pos_nodeid;
4298 +               
4299 +               if (cap->cap_lownode < 0 || cap->cap_highnode >= position->pos_nodes || cap->cap_lownode > cap->cap_highnode)
4300 +               {
4301 +                       ELAN_DEBUG3 ( ELAN_DBG_VP,"elanmod_classify_cap: low %d high %d pos %d \n" , cap->cap_lownode  ,cap->cap_highnode, position->pos_nodes);
4302 +                       
4303 +                       return (-EINVAL);
4304 +               }
4305 +               
4306 +               if ((cap->cap_highnode < position->pos_nodeid) || (cap->cap_lownode > position->pos_nodeid))
4307 +               {
4308 +                       ELAN_DEBUG3 (ELAN_DBG_VP, "elanmod_classify_cap: node not i range low %d high %d this %d\n",
4309 +                                    cap->cap_lownode, cap->cap_highnode, position->pos_nodeid);
4310 +                       return (-EINVAL);
4311 +               }
4312 +
4313 +               break;
4314 +       default:
4315 +               ELAN_DEBUG1 (ELAN_DBG_VP, "elanmod_classify_cap: cant decode type %x \n", cap->cap_type & ELAN_CAP_TYPE_MASK);
4316 +               return (-EINVAL);
4317 +
4318 +       }
4319 +
4320 +       switch (use)
4321 +       {
4322 +       case ELAN_USER_ATTACH:
4323 +       case ELAN_USER_DETACH:
4324 +               if (cap->cap_mycontext == ELAN_CAP_UNINITIALISED)
4325 +               {
4326 +                       ELAN_DEBUG0 (ELAN_DBG_VP, "elanmod_classify_cap: cap->cap_mycontext == ELAN_CAP_UNINITIALISED");
4327 +                       return (-EINVAL);
4328 +               }
4329 +       
4330 +               if ((cap->cap_mycontext != ELAN_CAP_UNINITIALISED) && 
4331 +                   (cap->cap_mycontext < cap->cap_lowcontext || cap->cap_mycontext > cap->cap_highcontext))
4332 +               {
4333 +                       ELAN_DEBUG3 (ELAN_DBG_VP, "elanmod_classify_cap: cap->cap_mycontext out of range %d %d %d \n", cap->cap_lowcontext,cap->cap_mycontext,cap->cap_highcontext);
4334 +                       return (-EINVAL);
4335 +               }   
4336 +               break;
4337 +
4338 +       case ELAN_USER_P2P:
4339 +               break;
4340 +
4341 +       case ELAN_USER_BROADCAST:
4342 +               if (! (cap->cap_type & ELAN_CAP_TYPE_BROADCASTABLE)) {
4343 +                       ELAN_DEBUG0 (ELAN_DBG_VP, "elanmod_classify_cap: use ELAN_USER_BROADCAST but cap not ELAN_CAP_TYPE_BROADCASTABLE\n");
4344 +                       return (-EINVAL);
4345 +               }
4346 +               break;
4347 +
4348 +       default:
4349 +               ELAN_DEBUG1 (ELAN_DBG_VP, "elanmod_classify_cap: unknown use (%d)\n",use);
4350 +               return (-EINVAL);
4351 +       }
4352 +
4353 +
4354 +
4355 +       /* is any ctxt an rms one ?? */
4356 +       if (ELAN_RMS_CONTEXT(cap->cap_lowcontext) || ELAN_RMS_CONTEXT(cap->cap_highcontext))
4357 +       {
4358 +               /* so both low and high must be */
4359 +               if (!(ELAN_RMS_CONTEXT(cap->cap_lowcontext) && ELAN_RMS_CONTEXT(cap->cap_highcontext))) 
4360 +               {
4361 +                       ELAN_DEBUG2 (ELAN_DBG_VP, "elanmod_classify_cap: not rms ctxt %x %x\n",cap->cap_lowcontext,cap->cap_highcontext );
4362 +                       return (-EINVAL);
4363 +               }
4364 +               ELAN_DEBUG0 (ELAN_DBG_VP, "elanmod_classify_cap: returning ELAN_CAP_RMS\n");
4365 +               return (ELAN_CAP_RMS);
4366 +       }
4367 +
4368 +       ELAN_DEBUG0 (ELAN_DBG_VP, "elanmod_classify_cap: returning ELAN_CAP_OK\n");
4369 +       return (ELAN_CAP_OK);
4370 +}
4371 +
4372 +/*
4373 + * Local variables:
4374 + * c-file-style: "linux"
4375 + * End:
4376 + */
4377 Index: linux-2.4.21/drivers/net/qsnet/elan/elanmod_linux.c
4378 ===================================================================
4379 --- linux-2.4.21.orig/drivers/net/qsnet/elan/elanmod_linux.c    2004-02-23 16:02:56.000000000 -0500
4380 +++ linux-2.4.21/drivers/net/qsnet/elan/elanmod_linux.c 2005-06-01 23:12:54.560445032 -0400
4381 @@ -0,0 +1,410 @@
4382 +/*
4383 + *    Copyright (c) 2003 by Quadrics Ltd.
4384 + * 
4385 + *    For licensing information please see the supplied COPYING file
4386 + *
4387 + */
4388 +
4389 +#ident "@(#)$Id: elanmod_linux.c,v 1.16 2004/06/14 15:45:37 mike Exp $"
4390 +/*      $Source: /cvs/master/quadrics/elanmod/modsrc/elanmod_linux.c,v $*/
4391 +
4392 +#include <qsnet/kernel.h>
4393 +
4394 +#include <elan/elanmod.h>
4395 +#include <elan/elanmod_linux.h>
4396 +
4397 +#include <linux/module.h>
4398 +
4399 +#include <linux/sysctl.h>
4400 +#include <linux/init.h>
4401 +
4402 +#include <qsnet/procfs_linux.h>
4403 +
4404 +MODULE_AUTHOR("Quadrics Ltd.");
4405 +MODULE_DESCRIPTION("Elan support module");
4406 +
4407 +MODULE_LICENSE("GPL");
4408 +
4409 +/* elanmod.c */
4410 +EXPORT_SYMBOL(elanmod_classify_cap);
4411 +
4412 +/* bitmap.c */
4413 +#include <elan/bitmap.h>
4414 +
4415 +EXPORT_SYMBOL(bt_freebit);
4416 +EXPORT_SYMBOL(bt_lowbit); 
4417 +EXPORT_SYMBOL(bt_nextbit);
4418 +EXPORT_SYMBOL(bt_copy);
4419 +EXPORT_SYMBOL(bt_zero); 
4420 +EXPORT_SYMBOL(bt_fill); 
4421 +EXPORT_SYMBOL(bt_cmp); 
4422 +EXPORT_SYMBOL(bt_intersect);
4423 +EXPORT_SYMBOL(bt_remove); 
4424 +EXPORT_SYMBOL(bt_add); 
4425 +EXPORT_SYMBOL(bt_spans);
4426 +EXPORT_SYMBOL(bt_subset);  
4427 +EXPORT_SYMBOL(bt_up);
4428 +EXPORT_SYMBOL(bt_down);
4429 +EXPORT_SYMBOL(bt_nbits);
4430 +
4431 +/* capability.c */
4432 +EXPORT_SYMBOL(elan_nullcap);
4433 +EXPORT_SYMBOL(elan_detach_cap);
4434 +EXPORT_SYMBOL(elan_attach_cap);
4435 +EXPORT_SYMBOL(elan_validate_map);
4436 +
4437 +/* stats.c */
4438 +EXPORT_SYMBOL(elan_stats_register);
4439 +EXPORT_SYMBOL(elan_stats_deregister);
4440 +
4441 +/* device.c */
4442 +EXPORT_SYMBOL(elan_dev_deregister);
4443 +EXPORT_SYMBOL(elan_dev_register);
4444 +
4445 +/* debug */
4446 +int  elan_debug_mode = QSNET_DEBUG_BUFFER; 
4447 +int  elan_debug_mask;
4448 +
4449 +static struct proc_dir_entry *elan_procfs_root;
4450 +
4451 +extern void elan_procfs_init(void);
4452 +extern void elan_procfs_fini(void);
4453 +
4454 +static int elan_open    (struct inode *ino, struct file *fp);
4455 +static int elan_release (struct inode *ino, struct file *fp);
4456 +static int elan_ioctl   (struct inode *ino, struct file *fp, unsigned int cmd, unsigned long arg);
4457 +
4458 +static struct file_operations elan_fops = 
4459 +{
4460 +       ioctl:   elan_ioctl,
4461 +       open:    elan_open,
4462 +       release: elan_release,
4463 +};
4464 +
4465 +static int __init elan_start(void)
4466 +{
4467 +       int res;
4468 +
4469 +       elan_procfs_init(); 
4470 +
4471 +       if ((res = elan_init()) != ESUCCESS)
4472 +       {
4473 +               elan_procfs_fini();
4474 +               return (-res);
4475 +       }
4476 +
4477 +       return (0);
4478 +}
4479 +
4480 +static void __exit elan_exit(void)
4481 +{
4482 +       elan_fini();
4483 +       elan_procfs_fini();
4484 +}
4485 +
4486 +
4487 +/* Declare the module init and exit functions */
4488 +void
4489 +elan_procfs_init()
4490 +{
4491 +       struct proc_dir_entry  *p;
4492 +       
4493 +       elan_procfs_root = proc_mkdir("elan",   qsnet_procfs_root);
4494 +       
4495 +       qsnet_proc_register_hex(elan_procfs_root, "debug_mask", &elan_debug_mask, 0);
4496 +       qsnet_proc_register_hex(elan_procfs_root, "debug_mode", &elan_debug_mode, 0);
4497 +
4498 +       if ((p = create_proc_entry ("ioctl", 0, elan_procfs_root)) != NULL)
4499 +       {
4500 +               p->proc_fops = &elan_fops;
4501 +               p->data      = 0;
4502 +               p->owner     = THIS_MODULE;
4503 +       }   
4504 +}
4505 +
4506 +void
4507 +elan_procfs_fini()
4508 +{
4509 +       remove_proc_entry ("debug_mask", elan_procfs_root);
4510 +       remove_proc_entry ("debug_mode", elan_procfs_root);
4511 +       
4512 +       remove_proc_entry ("ioctl",   elan_procfs_root); 
4513 +       remove_proc_entry ("version", elan_procfs_root);  
4514 +       
4515 +       remove_proc_entry ("elan",   qsnet_procfs_root);
4516 +}
4517 +
4518 +module_init(elan_start);
4519 +module_exit(elan_exit);
4520 +
4521 +static int
4522 +elan_open (struct inode *inode, struct file *fp)
4523 +{
4524 +       MOD_INC_USE_COUNT;
4525 +       fp->private_data = NULL;
4526 +       return (0);
4527 +}
4528 +
4529 +static int
4530 +elan_release (struct inode *inode, struct file *fp)
4531 +{
4532 +       /* mark all caps owned by fp to be destroyed */
4533 +       elan_destroy_cap(fp,NULL);
4534 +
4535 +       MOD_DEC_USE_COUNT;
4536 +       return (0);
4537 +}
4538 +
4539 +static int 
4540 +elan_ioctl(struct inode *inode, struct file *fp, unsigned int cmd, unsigned long arg)
4541 +{
4542 +       int rep = 0;
4543 +
4544 +       switch (cmd) 
4545 +       {
4546 +       case ELANCTRL_STATS_GET_NEXT :
4547 +       {
4548 +               ELANCTRL_STATS_GET_NEXT_STRUCT args;
4549 +
4550 +               if (copy_from_user (&args, (void *) arg, sizeof (ELANCTRL_STATS_GET_NEXT_STRUCT)))
4551 +                       return (-EFAULT); 
4552 +
4553 +               /* uses copyin/copyout */
4554 +               if (elan_stats_get_next_index(args.statidx, args.next_statidx) != 0 ) 
4555 +                       return (-EINVAL);       
4556 +
4557 +               break;
4558 +       }
4559 +       case ELANCTRL_STATS_FIND_INDEX :
4560 +       {
4561 +               ELANCTRL_STATS_FIND_INDEX_STRUCT args;
4562 +
4563 +               if (copy_from_user (&args, (void *) arg, sizeof (ELANCTRL_STATS_FIND_INDEX_STRUCT)))
4564 +                       return (-EFAULT); 
4565 +
4566 +               /* uses copyin/copyout */
4567 +               if (elan_stats_find_index(args.block_name, args.statidx, args.num_entries) != 0 ) 
4568 +                       return (-EINVAL);       
4569 +
4570 +               break;
4571 +       }
4572 +       case ELANCTRL_STATS_GET_BLOCK_INFO :
4573 +       {
4574 +               ELANCTRL_STATS_GET_BLOCK_INFO_STRUCT args;
4575 +               
4576 +               if (copy_from_user (&args, (void *) arg, sizeof (ELANCTRL_STATS_GET_BLOCK_INFO_STRUCT)))
4577 +                       return (-EFAULT);
4578 +
4579 +               /* uses copyin/copyout */
4580 +               if (elan_stats_get_block_info(args.statidx, args.block_name, args.num_entries) != 0 ) 
4581 +                       return (-EINVAL);
4582 +               break;          
4583 +       }
4584 +       case ELANCTRL_STATS_GET_INDEX_NAME :
4585 +       {
4586 +               ELANCTRL_STATS_GET_INDEX_NAME_STRUCT args;
4587 +               
4588 +               if (copy_from_user (&args, (void *) arg, sizeof (ELANCTRL_STATS_GET_INDEX_NAME_STRUCT)))
4589 +                       return (-EFAULT);
4590 +
4591 +               /* uses copyin/copyout */
4592 +               if (elan_stats_get_index_name(args.statidx, args.index, args.name) != 0 )
4593 +                       return (-EINVAL);
4594 +               break;
4595 +       }
4596 +       case ELANCTRL_STATS_CLEAR_BLOCK :
4597 +       {
4598 +               ELANCTRL_STATS_CLEAR_BLOCK_STRUCT args;
4599 +               
4600 +               if (copy_from_user (&args, (void *) arg, sizeof (ELANCTRL_STATS_CLEAR_BLOCK_STRUCT)))
4601 +                       return (-EFAULT);
4602 +
4603 +               /* statidx is not a pointer */
4604 +               if (elan_stats_clear_block(args.statidx) != 0 )
4605 +                       return (-EINVAL);
4606 +               break;
4607 +       }
4608 +       case ELANCTRL_STATS_GET_BLOCK :
4609 +       {
4610 +               ELANCTRL_STATS_GET_BLOCK_STRUCT args;
4611 +               
4612 +               if (copy_from_user (&args, (void *) arg, sizeof (ELANCTRL_STATS_GET_BLOCK_STRUCT)))
4613 +                       return (-EFAULT);
4614 +
4615 +               /* uses copyin/copyout */
4616 +               if (elan_stats_get_block(args.statidx, args.entries, args.values) != 0 )
4617 +                       return (-EINVAL);
4618 +               break;
4619 +       }
4620 +       case ELANCTRL_GET_DEVINFO :
4621 +       {
4622 +               ELANCTRL_GET_DEVINFO_STRUCT args;
4623 +               
4624 +               if (copy_from_user (&args, (void *) arg, sizeof (ELANCTRL_GET_DEVINFO_STRUCT)))
4625 +                       return (-EFAULT);
4626 +
4627 +               /* uses copyin/copyout */
4628 +               if (elan_get_devinfo(args.devidx, args.devinfo) != 0 )
4629 +                       return (-EINVAL);
4630 +               break;          
4631 +       }
4632 +       case ELANCTRL_GET_POSITION :
4633 +       {
4634 +               ELANCTRL_GET_POSITION_STRUCT args;
4635 +               
4636 +               if (copy_from_user (&args, (void *) arg, sizeof (ELANCTRL_GET_POSITION_STRUCT)))
4637 +                       return (-EFAULT); 
4638 +
4639 +               /* uses copyin/copyout */
4640 +               if (elan_get_position(args.devidx, args.position) != 0 )
4641 +                       return (-EINVAL);
4642 +               break;          
4643 +       }
4644 +       case ELANCTRL_SET_POSITION :
4645 +       {
4646 +               ELANCTRL_SET_POSITION_STRUCT args;
4647 +               
4648 +               if (copy_from_user (&args, (void *) arg, sizeof (ELANCTRL_SET_POSITION_STRUCT)))
4649 +                       return (-EFAULT);
4650 +
4651 +               /* uses copyin/copyout */
4652 +               if (elan_set_position(args.devidx, args.nodeId, args.numNodes) != 0 )
4653 +                       return (-EINVAL);       
4654 +               break;          
4655 +       }
4656 +       case ELANCTRL_CREATE_CAP  :
4657 +       {
4658 +               ELANCTRL_CREATE_CAP_STRUCT *args;
4659 +
4660 +               /* get space for args */
4661 +               KMEM_ALLOC(args, ELANCTRL_CREATE_CAP_STRUCT *, sizeof(ELANCTRL_CREATE_CAP_STRUCT), 1);
4662 +               if (args == NULL)
4663 +                       return(-ENOMEM);        
4664 +
4665 +               /* copy them */
4666 +               if (copy_from_user (args, (void *) arg, sizeof (ELANCTRL_CREATE_CAP_STRUCT)))
4667 +                       return (-EFAULT);
4668 +               else 
4669 +               {
4670 +                       if ((elan_validate_cap(&args->cap) != 0) || (elan_create_cap(fp,&args->cap) != 0 )) 
4671 +                               rep = (-EINVAL);
4672 +               }
4673 +
4674 +               /* free the space */
4675 +               KMEM_FREE(args, sizeof(ELANCTRL_CREATE_CAP_STRUCT));
4676 +
4677 +               break;          
4678 +       }
4679 +       case ELANCTRL_DESTROY_CAP  :
4680 +       {
4681 +               ELANCTRL_DESTROY_CAP_STRUCT *args;
4682 +
4683 +               /* get space for args */
4684 +               KMEM_ALLOC(args, ELANCTRL_DESTROY_CAP_STRUCT *, sizeof(ELANCTRL_DESTROY_CAP_STRUCT), 1);
4685 +               if (args == NULL)
4686 +                       return(-ENOMEM);        
4687 +
4688 +               /* copy them */
4689 +               if (copy_from_user (args, (void *) arg, sizeof (ELANCTRL_DESTROY_CAP_STRUCT)))
4690 +                       rep = (-EFAULT);
4691 +               else 
4692 +               {
4693 +                       if (elan_destroy_cap(fp, &args->cap) != 0 )
4694 +                               rep = (-EINVAL);
4695 +               }
4696 +
4697 +               /* free the space */
4698 +               KMEM_FREE(args, sizeof(ELANCTRL_DESTROY_CAP_STRUCT));
4699 +
4700 +               break;          
4701 +       }
4702 +       case ELANCTRL_CREATE_VP  :
4703 +       {
4704 +               ELANCTRL_CREATE_VP_STRUCT *args;
4705 +
4706 +               /* get space for args */
4707 +               KMEM_ALLOC(args, ELANCTRL_CREATE_VP_STRUCT *, sizeof(ELANCTRL_CREATE_VP_STRUCT), 1);
4708 +               if (args == NULL)
4709 +                       return(-ENOMEM);        
4710 +
4711 +               /* copy them */
4712 +               if (copy_from_user (args, (void *) arg, sizeof (ELANCTRL_CREATE_VP_STRUCT)))
4713 +                       return (-EFAULT);
4714 +               else
4715 +               {
4716 +                       if ((elan_validate_cap( &args->map) != 0) || (elan_create_vp(fp, &args->cap, &args->map) != 0 ))
4717 +                               rep = (-EINVAL);        
4718 +               }
4719 +
4720 +               KMEM_FREE(args, sizeof(ELANCTRL_CREATE_VP_STRUCT ));
4721 +
4722 +               break;          
4723 +       }
4724 +       case ELANCTRL_DESTROY_VP  :
4725 +       {
4726 +               ELANCTRL_DESTROY_VP_STRUCT *args;
4727 +
4728 +               /* get space for args */
4729 +               KMEM_ALLOC(args, ELANCTRL_DESTROY_VP_STRUCT *, sizeof(ELANCTRL_DESTROY_VP_STRUCT), 1);
4730 +               if (args == NULL)
4731 +                       return(-ENOMEM);        
4732 +               
4733 +               /* copy them */
4734 +               if (copy_from_user (args, (void *) arg, sizeof (ELANCTRL_DESTROY_VP_STRUCT)))
4735 +                       rep = (-EFAULT);
4736 +               else 
4737 +               {
4738 +                       if (elan_destroy_vp(fp, &args->cap, &args->map) != 0 )
4739 +                               rep = (-EINVAL);        
4740 +               }
4741 +
4742 +               KMEM_FREE(args, sizeof(ELANCTRL_DESTROY_VP_STRUCT ));
4743 +
4744 +               break;          
4745 +       }
4746 +
4747 +       case ELANCTRL_GET_CAPS  :
4748 +       {
4749 +               ELANCTRL_GET_CAPS_STRUCT args;
4750 +               if (copy_from_user (&args, (void *) arg, sizeof (ELANCTRL_GET_CAPS_STRUCT)))
4751 +                       return (-EFAULT);
4752 +
4753 +               /* uses copyin/copyout */
4754 +               if (elan_get_caps(args.number_of_results, args.array_size, args.caps) != 0 )
4755 +                       return (-EINVAL);
4756 +               break;          
4757 +       }
4758 +       case ELANCTRL_DEBUG_DUMP :
4759 +       {
4760 +               elan_cap_dump();
4761 +               elan_dev_dump();
4762 +
4763 +               break;
4764 +       }
4765 +       case ELANCTRL_DEBUG_BUFFER :
4766 +       {
4767 +               ELANCTRL_DEBUG_BUFFER_STRUCT args;
4768 +
4769 +               if (copy_from_user (&args, (void *) arg, sizeof (ELANCTRL_DEBUG_BUFFER_STRUCT)))
4770 +                       return (-EFAULT);
4771 +
4772 +               /* uses copyin/copyout */
4773 +               if ((args.size = qsnet_debug_buffer (args.buffer, args.size)) != -1 &&
4774 +                   copy_to_user ((void *) arg, &args, sizeof (ELANCTRL_DEBUG_BUFFER_STRUCT)))
4775 +                       return (-EFAULT);
4776 +               break;
4777 +       }
4778 +       default:
4779 +               return (-EINVAL);
4780 +               break;
4781 +       }
4782 +
4783 +       return (rep);
4784 +}
4785 +
4786 +
4787 +/*
4788 + * Local variables:
4789 + * c-file-style: "linux"
4790 + * End:
4791 + */
4792 Index: linux-2.4.21/drivers/net/qsnet/elan/Makefile
4793 ===================================================================
4794 --- linux-2.4.21.orig/drivers/net/qsnet/elan/Makefile   2004-02-23 16:02:56.000000000 -0500
4795 +++ linux-2.4.21/drivers/net/qsnet/elan/Makefile        2005-06-01 23:12:54.560445032 -0400
4796 @@ -0,0 +1,31 @@
4797 +#
4798 +# Makefile for Quadrics QsNet
4799 +#
4800 +# Copyright (c) 2002-2004 Quadrics Ltd
4801 +#
4802 +# File: drivers/net/qsnet/elan/Makefile
4803 +#
4804 +
4805 +
4806 +#
4807 +
4808 +#
4809 +# Makefile for Quadrics QsNet
4810 +#
4811 +# Copyright (c) 2004 Quadrics Ltd.
4812 +#
4813 +# File: driver/net/qsnet/elan/Makefile
4814 +#
4815 +
4816 +list-multi             := elan.o
4817 +elan-objs      := elanmod.o device.o stats.o devinfo.o capability.o elanmod_linux.o capability_general.o bitmap.o
4818 +export-objs            := elanmod_linux.o
4819 +obj-$(CONFIG_QSNET)    := elan.o
4820 +
4821 +elan.o : $(elan-objs)
4822 +       $(LD) -r -o $@ $(elan-objs)
4823 +
4824 +EXTRA_CFLAGS           +=  -DDEBUG -DDEBUG_PRINTF -DDEBUG_ASSERT
4825 +
4826 +include $(TOPDIR)/Rules.make
4827 +
4828 Index: linux-2.4.21/drivers/net/qsnet/elan/Makefile.conf
4829 ===================================================================
4830 --- linux-2.4.21.orig/drivers/net/qsnet/elan/Makefile.conf      2004-02-23 16:02:56.000000000 -0500
4831 +++ linux-2.4.21/drivers/net/qsnet/elan/Makefile.conf   2005-06-01 23:12:54.561444880 -0400
4832 @@ -0,0 +1,10 @@
4833 +# Flags for generating QsNet Linux Kernel Makefiles
4834 +MODNAME                =       elan.o
4835 +MODULENAME     =       elan
4836 +KOBJFILES      =       elanmod.o device.o stats.o devinfo.o capability.o elanmod_linux.o capability_general.o bitmap.o
4837 +EXPORT_KOBJS   =       elanmod_linux.o 
4838 +CONFIG_NAME    =       CONFIG_QSNET
4839 +SGALFC         =       
4840 +# EXTRALINES START
4841 +
4842 +# EXTRALINES END
4843 Index: linux-2.4.21/drivers/net/qsnet/elan/quadrics_version.h
4844 ===================================================================
4845 --- linux-2.4.21.orig/drivers/net/qsnet/elan/quadrics_version.h 2004-02-23 16:02:56.000000000 -0500
4846 +++ linux-2.4.21/drivers/net/qsnet/elan/quadrics_version.h      2005-06-01 23:12:54.561444880 -0400
4847 @@ -0,0 +1 @@
4848 +#define QUADRICS_VERSION "4.30qsnet"
4849 Index: linux-2.4.21/drivers/net/qsnet/elan/stats.c
4850 ===================================================================
4851 --- linux-2.4.21.orig/drivers/net/qsnet/elan/stats.c    2004-02-23 16:02:56.000000000 -0500
4852 +++ linux-2.4.21/drivers/net/qsnet/elan/stats.c 2005-06-01 23:12:54.562444728 -0400
4853 @@ -0,0 +1,277 @@
4854 +/*
4855 + *    Copyright (c) 2003 by Quadrics Ltd.
4856 + * 
4857 + *    For licensing information please see the supplied COPYING file
4858 + *
4859 + */
4860 +
4861 +#ident "@(#)$Id: stats.c,v 1.6 2003/09/24 13:55:37 david Exp $"
4862 +/*      $Source: /cvs/master/quadrics/elanmod/modsrc/stats.c,v $*/
4863 +
4864 +#include <qsnet/kernel.h>
4865 +#include <elan/elanmod.h>
4866 +
4867 +static LIST_HEAD(elan_stats_list);
4868 +static ELAN_STATS_IDX elan_next_statidx=0;
4869 +
4870 +ELAN_STATS_STRUCT *
4871 +elan_stats_find(ELAN_STATS_IDX statidx)
4872 +{
4873 +       struct list_head     *tmp;
4874 +       ELAN_STATS_STRUCT *ptr=NULL;
4875 +
4876 +       list_for_each(tmp, &elan_stats_list) {
4877 +               ptr = list_entry(tmp, ELAN_STATS_STRUCT , node);
4878 +               if ( ptr->statidx == statidx ) 
4879 +                       return ptr;
4880 +       }
4881 +
4882 +       ELAN_DEBUG1 (ELAN_DBG_CTRL, "elan_stats_find failed %d\n", statidx);    
4883 +       return NULL;
4884 +}
4885 +
4886 +ELAN_STATS_STRUCT *
4887 +elan_stats_find_by_name(caddr_t block_name)
4888 +{
4889 +       struct list_head     *tmp;
4890 +       ELAN_STATS_STRUCT *ptr=NULL;
4891 +
4892 +       list_for_each(tmp, &elan_stats_list)    {
4893 +               ptr = list_entry(tmp, ELAN_STATS_STRUCT , node);
4894 +               if (!strcmp(ptr->block_name, block_name)) 
4895 +               {
4896 +                       ELAN_DEBUG3 (ELAN_DBG_CTRL, "elan_stats_find_by_name found %s (%d,%d)\n", block_name, ptr->statidx, ptr->num_entries);  
4897 +                       return ptr;
4898 +               }
4899 +       }
4900 +
4901 +       ELAN_DEBUG1 (ELAN_DBG_CTRL, "elan_stats_find_by_name failed %s\n", block_name);
4902 +       return NULL;
4903 +}
4904 +
4905 +ELAN_STATS_STRUCT *
4906 +elan_stats_find_next(ELAN_STATS_IDX statidx)
4907 +{
4908 +       struct list_head     *tmp;
4909 +       ELAN_STATS_STRUCT *ptr=NULL;
4910 +
4911 +       list_for_each(tmp, &elan_stats_list) {
4912 +               ptr = list_entry(tmp, ELAN_STATS_STRUCT , node);
4913 +         
4914 +               if ( ptr->statidx > statidx ) 
4915 +                       return ptr;       
4916 +       }       
4917 +
4918 +       return NULL;
4919 +}
4920 +
4921 +int 
4922 +elan_stats_get_next_index (ELAN_STATS_IDX statidx, ELAN_STATS_IDX *next_block)
4923 +{
4924 +       ELAN_STATS_STRUCT *target;
4925 +       ELAN_STATS_IDX        next = 0;
4926 +
4927 +       kmutex_lock(&elan_mutex);
4928 +
4929 +       if ((target = elan_stats_find_next(statidx)) != NULL)
4930 +               next = target->statidx;
4931 +
4932 +       copyout(&next, next_block, sizeof(ELAN_STATS_IDX) );
4933 +
4934 +       kmutex_unlock(&elan_mutex);
4935 +       return 0;
4936 +}
4937 +
4938 +int 
4939 +elan_stats_find_index  (caddr_t  block_name, ELAN_STATS_IDX *statidx,  uint *num_entries)
4940 +
4941 +{
4942 +       ELAN_STATS_STRUCT *target;
4943 +       ELAN_STATS_IDX        index   = 0;
4944 +       uint                  entries = 0;
4945 +
4946 +       kmutex_lock(&elan_mutex);
4947 +
4948 +       ELAN_DEBUG1(ELAN_DBG_CTRL, "elan_stats_find_index %s \n", block_name);
4949 +
4950 +       if ((target = elan_stats_find_by_name(block_name)) != NULL)
4951 +       {
4952 +               index   = target->statidx;
4953 +               entries = target->num_entries;
4954 +       }
4955 +
4956 +       ELAN_DEBUG3(ELAN_DBG_CTRL, "elan_stats_find_index found %d %d (target=%p)\n", index, entries, target);
4957 +
4958 +       copyout(&index,   statidx,     sizeof(ELAN_STATS_IDX));
4959 +       copyout(&entries, num_entries, sizeof(uint));
4960 +
4961 +       kmutex_unlock(&elan_mutex);
4962 +       return  ESUCCESS;
4963 +}
4964 +
4965 +int 
4966 +elan_stats_get_block_info (ELAN_STATS_IDX statidx, caddr_t  block_name, uint *num_entries)
4967 +{
4968 +       ELAN_STATS_STRUCT *target;
4969 +       int                   res=EINVAL;
4970 +
4971 +       kmutex_lock(&elan_mutex);
4972 +
4973 +       ELAN_DEBUG1(ELAN_DBG_CTRL, "elan_stats_get_block_info statidx %d\n",statidx);
4974 +
4975 +       if ((target = elan_stats_find(statidx)) != NULL)
4976 +       {
4977 +               ELAN_DEBUG2(ELAN_DBG_CTRL, "elan_stats_get_block_info name %s entries %d\n",block_name, *num_entries);
4978 +               
4979 +               copyout( target->block_name,  block_name,  ELAN_STATS_NAME_MAX_LEN);
4980 +               copyout(&target->num_entries, num_entries, sizeof(uint));
4981 +
4982 +               res = ESUCCESS;
4983 +       }
4984 +
4985 +       kmutex_unlock(&elan_mutex);
4986 +       return res;
4987 +}
4988 +
4989 +int 
4990 +elan_stats_get_index_name (ELAN_STATS_IDX statidx, uint index, caddr_t name)
4991 +{
4992 +       ELAN_STATS_STRUCT *target;
4993 +       int                   res=EINVAL;
4994 +
4995 +       kmutex_lock(&elan_mutex);
4996 +
4997 +       ELAN_DEBUG2(ELAN_DBG_CTRL, "elan_stats_get_index_name statidx %d index %d\n",statidx, index);
4998 +
4999 +       if ((target = elan_stats_find(statidx)) != NULL)
5000 +       {
5001 +               if ( target->ops->elan_stats_get_name== NULL) 
5002 +               {
5003 +                       ELAN_DEBUG0(ELAN_DBG_CTRL, "elan_stats_get_index_name no callback\n");  
5004 +                       kmutex_unlock(&elan_mutex);
5005 +                       return  res;
5006 +               }
5007 +
5008 +               if ((res = target->ops->elan_stats_get_name(target->arg, index, name)) == 0)
5009 +                       ELAN_DEBUG1(ELAN_DBG_CTRL, "elan_stats_get_index_name name %s\n",name); 
5010 +
5011 +       }
5012 +       kmutex_unlock(&elan_mutex);
5013 +       return  res;
5014 +}
5015 +
5016 +int 
5017 +elan_stats_get_block (ELAN_STATS_IDX statidx, uint entries, ulong *values)
5018 +{
5019 +       ELAN_STATS_STRUCT *target;
5020 +       int                   res=EINVAL;
5021 +
5022 +       kmutex_lock(&elan_mutex);
5023 +
5024 +       
5025 +       if ((target = elan_stats_find(statidx)) != NULL)
5026 +       {
5027 +               if ( target->ops->elan_stats_get_block == NULL) 
5028 +               {
5029 +                       kmutex_unlock(&elan_mutex);
5030 +                       return  res;
5031 +               }
5032 +
5033 +               res = target->ops->elan_stats_get_block(target->arg, entries, values);
5034 +       }
5035 +
5036 +       kmutex_unlock(&elan_mutex);
5037 +       return  res;
5038 +}
5039 +
5040 +int 
5041 +elan_stats_clear_block (ELAN_STATS_IDX statidx)
5042 +{
5043 +       ELAN_STATS_STRUCT *target;
5044 +       int                   res=EINVAL;
5045 +
5046 +       kmutex_lock(&elan_mutex);
5047 +
5048 +       if ((target = elan_stats_find(statidx)) != NULL)
5049 +       {
5050 +               if ( target->ops->elan_stats_clear_block == NULL) 
5051 +               {
5052 +                       kmutex_unlock(&elan_mutex);
5053 +                       return  res;
5054 +               }
5055 +       
5056 +               res = target->ops->elan_stats_clear_block(target->arg);
5057 +       }
5058 +       kmutex_unlock(&elan_mutex);
5059 +       return  res;
5060 +}
5061 +
5062 +void
5063 +elan_stats_next_statidx(void)
5064 +{
5065 +       /* XXXXX need to put not in use check here incase we loop MRH */
5066 +       /* tho its a bigish loop :)                                   */
5067 +       elan_next_statidx++;
5068 +       if (!elan_next_statidx)
5069 +               elan_next_statidx++;
5070 +}
5071 +
5072 +int 
5073 +elan_stats_register (ELAN_STATS_IDX    *statidx, 
5074 +                       char              *block_name, 
5075 +                       uint               num_entries,
5076 +                       ELAN_STATS_OPS *ops,
5077 +                       void              *arg)
5078 +{
5079 +       ELAN_STATS_STRUCT *target;
5080 +
5081 +       kmutex_lock(&elan_mutex);
5082 +
5083 +       /* create it and add */
5084 +       KMEM_ALLOC(target, ELAN_STATS_STRUCT *, sizeof(ELAN_STATS_STRUCT), 1);
5085 +       if (target == NULL)
5086 +       {
5087 +               kmutex_unlock(&elan_mutex);
5088 +               return  ENOMEM;
5089 +       }
5090 +
5091 +       elan_stats_next_statidx();
5092 +
5093 +       *statidx = elan_next_statidx;
5094 +
5095 +       target->statidx     = elan_next_statidx;
5096 +       target->num_entries = num_entries;
5097 +       target->ops         = ops;
5098 +       target->arg         = arg;
5099 +       strcpy(target->block_name, block_name);
5100 +       
5101 +       list_add_tail(&target->node, &elan_stats_list);
5102 +
5103 +       kmutex_unlock(&elan_mutex);
5104 +       return  0;
5105 +}
5106 +
5107 +int
5108 +elan_stats_deregister (ELAN_STATS_IDX statidx)
5109 +{
5110 +       ELAN_STATS_STRUCT *target;
5111 +
5112 +       kmutex_lock(&elan_mutex);
5113 +       if ((target = elan_stats_find(statidx)) != NULL)
5114 +       {
5115 +
5116 +               list_del(&target->node);
5117 +               
5118 +               /* delete target entry */
5119 +               KMEM_FREE(target, sizeof(ELAN_STATS_STRUCT));
5120 +       }
5121 +       kmutex_unlock(&elan_mutex);
5122 +
5123 +       return  target == NULL ? EINVAL : 0;
5124 +}
5125 +
5126 +/*
5127 + * Local variables:
5128 + * c-file-style: "linux"
5129 + * End:
5130 + */
5131 Index: linux-2.4.21/drivers/net/qsnet/elan3/context.c
5132 ===================================================================
5133 --- linux-2.4.21.orig/drivers/net/qsnet/elan3/context.c 2004-02-23 16:02:56.000000000 -0500
5134 +++ linux-2.4.21/drivers/net/qsnet/elan3/context.c      2005-06-01 23:12:54.565444272 -0400
5135 @@ -0,0 +1,2101 @@
5136 +/*
5137 + *    Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd.
5138 + * 
5139 + *    For licensing information please see the supplied COPYING file
5140 + *
5141 + */
5142 +
5143 +#ident "@(#)$Id: context.c,v 1.116.2.1 2004/11/12 14:24:18 mike Exp $"
5144 +/*      $Source: /cvs/master/quadrics/elan3mod/elan3/os/context.c,v $ */
5145 +
5146 +#include <qsnet/kernel.h>
5147 +#include <qsnet/autoconf.h>
5148 +#include <elan/elanmod.h>
5149 +#include <elan3/elanregs.h>
5150 +#include <elan3/elandev.h>
5151 +#include <elan3/elanvp.h>
5152 +#include <elan3/elan3mmu.h>
5153 +#include <elan3/elanctxt.h>
5154 +#include <elan3/elan3mmu.h>
5155 +#include <elan3/elandebug.h>
5156 +#include <elan3/urom_addrs.h>
5157 +#include <elan3/thread.h>
5158 +#include <elan3/vmseg.h>
5159 +#include <elan3/elan3ops.h>
5160 +#include <elan3/elansyscall.h>
5161 +/*
5162 + * Global variables configurable from /etc/system file
5163 + *     (OR /etc/sysconfigtab on Digital UNIX)
5164 + */
5165 +int ntrapped_threads   = 64;
5166 +int ntrapped_dmas      = 64;
5167 +int ntrapped_events    = E3_NonSysCntxQueueSize + 128;
5168 +int ntrapped_commands  = 64;
5169 +int noverflow_commands = 1024;
5170 +int nswapped_threads   = 64;
5171 +int nswapped_dmas      = 64;
5172 +
5173 +#define NUM_HALTOPS    8
5174 +
5175 +void *SwapListsLockInfo;
5176 +void *CmdLockInfo;
5177 +
5178 +static void HaltSwapContext (ELAN3_DEV *dev, void *arg);
5179 +
5180 +static char *OthersStateStrings[]  = {"others_running", "others_halting", "others_swapping", 
5181 +                                     "others_halting_more", "others_swapping_more", "others_swapped"};
5182 +
5183 +ELAN3_CTXT *
5184 +elan3_alloc (ELAN3_DEV *dev, int  kernel)
5185 +{
5186 +    ELAN3_CTXT    *ctxt;
5187 +    int           i;
5188 +    unsigned long flags;
5189 +
5190 +    PRINTF1 (DBG_DEVICE, DBG_FN, "elan3_alloc: %s\n", kernel ? "kernel" : "user");
5191 +
5192 +    KMEM_ZALLOC (ctxt, ELAN3_CTXT *, sizeof (ELAN3_CTXT), TRUE);
5193 +    
5194 +    if (ctxt == NULL)
5195 +       return (NULL);
5196 +
5197 +    elan_nullcap (&ctxt->Capability);
5198 +
5199 +    ctxt->Device      = dev;
5200 +    ctxt->OthersState = CTXT_OTHERS_SWAPPED;
5201 +    ctxt->RefCnt      = 1;
5202 +    ctxt->Position    = dev->Position;
5203 +
5204 +    if (kernel)
5205 +       ctxt->Status = CTXT_DETACHED | CTXT_SWAPPED_OUT | CTXT_KERNEL;
5206 +    else
5207 +       ctxt->Status = CTXT_DETACHED | CTXT_SWAPPED_OUT | CTXT_NO_LWPS;
5208 +
5209 +    ctxt->Elan3mmu = elan3mmu_alloc (ctxt);
5210 +
5211 +    kcondvar_init (&ctxt->Wait);
5212 +    kcondvar_init (&ctxt->CommandPortWait);
5213 +    kcondvar_init (&ctxt->LwpWait);
5214 +    kcondvar_init (&ctxt->HaltWait);
5215 +
5216 +    spin_lock_init (&ctxt->InputFaultLock);
5217 +
5218 +    kmutex_init (&ctxt->SwapListsLock);
5219 +    kmutex_init (&ctxt->CmdPortLock);
5220 +    kmutex_init (&ctxt->NetworkErrorLock);
5221 +    kmutex_init (&ctxt->CmdLock);
5222 +
5223 +    krwlock_init (&ctxt->VpLock);
5224 +
5225 +    KMEM_GETPAGES (ctxt->FlagPage, ELAN3_FLAGSTATS *, 1, TRUE);
5226 +    if (!ctxt->FlagPage)
5227 +       goto error;
5228 +    bzero ((char *) ctxt->FlagPage, PAGESIZE);
5229 +
5230 +    KMEM_ZALLOC (ctxt->CommandTraps, COMMAND_TRAP *,    sizeof (COMMAND_TRAP)    * ntrapped_commands, TRUE);
5231 +    if (!ctxt->CommandTraps)
5232 +       goto error;
5233 +
5234 +    KMEM_ZALLOC (ctxt->ThreadTraps,  THREAD_TRAP *,     sizeof (THREAD_TRAP)     * ntrapped_threads,  TRUE);
5235 +    if (!ctxt->ThreadTraps)
5236 +       goto error;
5237 +
5238 +    KMEM_ZALLOC (ctxt->DmaTraps,     DMA_TRAP *,        sizeof (DMA_TRAP)        * ntrapped_dmas,     TRUE);
5239 +    if (!ctxt->DmaTraps)
5240 +       goto error;
5241 +
5242 +    KMEM_ZALLOC (ctxt->EventCookies, EVENT_COOKIE *,    sizeof (EVENT_COOKIE)    * ntrapped_events,   TRUE);
5243 +    if (!ctxt->EventCookies)
5244 +       goto error;
5245 +
5246 +    KMEM_ZALLOC (ctxt->Commands,     CProcTrapBuf_BE *, sizeof (CProcTrapBuf_BE) * noverflow_commands,TRUE);
5247 +    if (!ctxt->Commands)
5248 +       goto error;
5249 +
5250 +    KMEM_ZALLOC (ctxt->SwapThreads,  E3_Addr *,         sizeof (E3_Addr)         * nswapped_threads,  TRUE);
5251 +    if (!ctxt->SwapThreads)
5252 +       goto error;
5253 +
5254 +    KMEM_ZALLOC (ctxt->SwapDmas,     E3_DMA_BE *,       sizeof (E3_DMA_BE)       * nswapped_dmas,     TRUE);
5255 +    if (!ctxt->SwapDmas)
5256 +       goto error;
5257 +
5258 +    /*
5259 +     * "slop" is defined as follows :
5260 +     *     number of entries REQUIRED to be left spare to consume all other traps
5261 +     *     up until the time that the context can be swapped out.
5262 +     *  
5263 +     * CommandTrapQ : 1 command issued by main + 1 issued by the thread processor per elan
5264 +     * ThreadTrapQ  : 2 from command + 2 input
5265 +     * DmaTrapQ     : 2 from command + 2 input
5266 +     * EventTrapQ   : 2 from command + 1 thread + 1 dma + 2 input + E3_NonSysCntxQueueSize
5267 +     */
5268 +    spin_lock_irqsave (&dev->IntrLock, flags);
5269 +    ELAN3_QUEUE_INIT (ctxt->CommandTrapQ, ntrapped_commands,  2);
5270 +    ELAN3_QUEUE_INIT (ctxt->ThreadTrapQ,  ntrapped_threads,   4);
5271 +    ELAN3_QUEUE_INIT (ctxt->DmaTrapQ,     ntrapped_dmas,      4);
5272 +    ELAN3_QUEUE_INIT (ctxt->EventCookieQ, ntrapped_events,    MIN(E3_NonSysCntxQueueSize + 6, ntrapped_events - 6));
5273 +    ELAN3_QUEUE_INIT (ctxt->CommandQ,     noverflow_commands, 0);
5274 +    ELAN3_QUEUE_INIT (ctxt->SwapThreadQ,  nswapped_threads,   0);
5275 +    ELAN3_QUEUE_INIT (ctxt->SwapDmaQ,     nswapped_dmas,      0);
5276 +    spin_unlock_irqrestore (&dev->IntrLock, flags);
5277 +
5278 +#if defined(DIGITAL_UNIX)
5279 +    /* Allocate the segelan for the command port */
5280 +    if (! kernel && elan3_segelan3_create (ctxt) == NULL)
5281 +    {
5282 +       elan3_detach(ctxt);
5283 +       elan3_free (ctxt);
5284 +       return ((ELAN3_CTXT *) NULL);
5285 +    }
5286 +#endif
5287 +
5288 +    /*
5289 +     * Initialise the Input Fault list 
5290 +     */
5291 +    spin_lock (&ctxt->InputFaultLock);
5292 +    for (i = 0; i < NUM_INPUT_FAULT_SAVE; i++)
5293 +       ctxt->InputFaults[i].Next = (i == (NUM_INPUT_FAULT_SAVE-1)) ? NULL : &ctxt->InputFaults[i+1];
5294 +    ctxt->InputFaultList = &ctxt->InputFaults[0];
5295 +    spin_unlock (&ctxt->InputFaultLock);
5296 +
5297 +    ReserveHaltOperations (dev, NUM_HALTOPS, TRUE);
5298 +    
5299 +    if ((ctxt->RouteTable = AllocateRouteTable (ctxt->Device, ELAN3_MAX_VPS)) == NULL)
5300 +    {
5301 +       PRINTF0 (DBG_DEVICE, DBG_FN, "elan3_alloc: cannot map route table\n");
5302 +       elan3_detach(ctxt);
5303 +       elan3_free (ctxt);
5304 +       return ((ELAN3_CTXT *) NULL);
5305 +    }  
5306 +
5307 +    return (ctxt);
5308 +
5309 +
5310 + error:
5311 +
5312 +    elan3_detach(ctxt);
5313 +    elan3_free (ctxt);
5314 +    if (ctxt->FlagPage)
5315 +       KMEM_FREEPAGES ((void *) ctxt->FlagPage, 1);
5316 +    if (ctxt->CommandTraps)
5317 +       KMEM_FREE ((void *) ctxt->CommandTraps, sizeof (COMMAND_TRAP)    * ntrapped_commands);
5318 +    if (ctxt->ThreadTraps)
5319 +       KMEM_FREE ((void *) ctxt->ThreadTraps,  sizeof (THREAD_TRAP)     * ntrapped_threads);
5320 +    if (ctxt->DmaTraps)
5321 +       KMEM_FREE ((void *) ctxt->DmaTraps,     sizeof (DMA_TRAP)        * ntrapped_dmas);
5322 +    if (ctxt->EventCookies)
5323 +       KMEM_FREE ((void *) ctxt->EventCookies, sizeof (EVENT_COOKIE)    * ntrapped_events);
5324 +    if (ctxt->Commands)
5325 +       KMEM_FREE ((void *) ctxt->Commands,     sizeof (CProcTrapBuf_BE) * noverflow_commands);
5326 +    if (ctxt->SwapThreads)
5327 +       KMEM_FREE ((void *) ctxt->SwapThreads,  sizeof (E3_Addr)         * nswapped_threads);
5328 +    if (ctxt->SwapDmas)
5329 +       KMEM_FREE ((void *) ctxt->SwapDmas,     sizeof (E3_DMA_BE)       * nswapped_dmas);
5330 +
5331 +    kcondvar_destroy (&ctxt->Wait);
5332 +    kcondvar_destroy (&ctxt->CommandPortWait);
5333 +    kcondvar_destroy (&ctxt->LwpWait);
5334 +    kcondvar_destroy (&ctxt->HaltWait);
5335 +
5336 +    kmutex_destroy (&ctxt->SwapListsLock);
5337 +    kmutex_destroy (&ctxt->CmdLock);
5338 +    kmutex_destroy (&ctxt->NetworkErrorLock);
5339 +    spin_lock_destroy  (&ctxt->InputFaultLock);
5340 +
5341 +    krwlock_destroy (&ctxt->VpLock);
5342 +
5343 +    KMEM_FREE (ctxt, sizeof (ELAN3_CTXT));
5344 +
5345 +    return (NULL);
5346 +}
5347 +
5348 +void
5349 +elan3_free (ELAN3_CTXT *ctxt)
5350 +{
5351 +    ELAN3_DEV     *dev = ctxt->Device;
5352 +    NETERR_FIXUP *nef;
5353 +    
5354 +    PRINTF1 (ctxt, DBG_FN, "elan3_free: %p \n", ctxt);
5355 +   
5356 +    elan3_removevp (ctxt, ELAN3_INVALID_PROCESS);                      /* Remove any virtual process mappings */
5357 +
5358 +#if defined(DIGITAL_UNIX)
5359 +    WaitForContext (ctxt);                                     /* wait for all references to this context to go away */
5360 +#endif
5361 +
5362 +    if (ctxt->RouteTable)
5363 +       FreeRouteTable (dev, ctxt->RouteTable);
5364 +    ctxt->RouteTable = NULL;
5365 +
5366 +    elan3mmu_free (ctxt->Elan3mmu);                            /* free of our Elan3mmu  */
5367 +
5368 +    if (ctxt->Private)                                         /* Call back to "user" to free off  */
5369 +       ELAN3_OP_FREE_PRIVATE (ctxt);                           /* private data */
5370 +
5371 +#if defined(DIGITAL_UNIX)
5372 +    if (! CTXT_IS_KERNEL(ctxt))
5373 +       elan3_segelan3_destroy (ctxt);                          /* Unmap the command port from the users address space. */
5374 +#endif
5375 +   
5376 +    ReleaseHaltOperations (dev, NUM_HALTOPS);
5377 +
5378 +    if (ctxt->Input0Resolver)
5379 +       CancelNetworkErrorResolver (ctxt->Input0Resolver);
5380 +
5381 +    if (ctxt->Input1Resolver)
5382 +       CancelNetworkErrorResolver (ctxt->Input1Resolver);
5383 +
5384 +    while ((nef = ctxt->NetworkErrorFixups) != NULL)
5385 +    {
5386 +       ctxt->NetworkErrorFixups = nef->Next;
5387 +
5388 +       CompleteNetworkErrorFixup (ctxt, nef, ESRCH);
5389 +    }
5390 +
5391 +    KMEM_FREEPAGES ((void *) ctxt->FlagPage, 1);
5392 +
5393 +    KMEM_FREE ((void *) ctxt->CommandTraps, sizeof (COMMAND_TRAP)    * ntrapped_commands);
5394 +    KMEM_FREE ((void *) ctxt->ThreadTraps,  sizeof (THREAD_TRAP)     * ntrapped_threads);
5395 +    KMEM_FREE ((void *) ctxt->DmaTraps,     sizeof (DMA_TRAP)        * ntrapped_dmas);
5396 +    KMEM_FREE ((void *) ctxt->EventCookies, sizeof (EVENT_COOKIE)    * ntrapped_events);
5397 +    KMEM_FREE ((void *) ctxt->Commands,     sizeof (CProcTrapBuf_BE) * noverflow_commands);
5398 +    KMEM_FREE ((void *) ctxt->SwapThreads,  sizeof (E3_Addr)         * nswapped_threads);
5399 +    KMEM_FREE ((void *) ctxt->SwapDmas,     sizeof (E3_DMA_BE)       * nswapped_dmas);
5400 +
5401 +    kcondvar_destroy (&ctxt->Wait);
5402 +    kcondvar_destroy (&ctxt->CommandPortWait);
5403 +    kcondvar_destroy (&ctxt->LwpWait);
5404 +    kcondvar_destroy (&ctxt->HaltWait);
5405 +
5406 +    kmutex_destroy (&ctxt->SwapListsLock);
5407 +    kmutex_destroy (&ctxt->CmdLock);
5408 +    kmutex_destroy (&ctxt->NetworkErrorLock);
5409 +    spin_lock_destroy  (&ctxt->InputFaultLock);
5410 +
5411 +    krwlock_destroy (&ctxt->VpLock);
5412 +
5413 +    KMEM_FREE (ctxt, sizeof (ELAN3_CTXT));
5414 +}
5415 +
5416 +int 
5417 +elan3_doattach(ELAN3_CTXT *ctxt, ELAN_CAPABILITY *cap)
5418 +{
5419 +    unsigned long pgnum = ((cap->cap_mycontext & MAX_ROOT_CONTEXT_MASK) * sizeof (E3_CommandPort)) / PAGE_SIZE;
5420 +    unsigned long pgoff = ((cap->cap_mycontext & MAX_ROOT_CONTEXT_MASK) * sizeof (E3_CommandPort)) & (PAGE_SIZE-1);
5421 +    ELAN3_DEV     *dev   = ctxt->Device;
5422 +    int           res   = ESUCCESS;
5423 +    unsigned long flags;
5424 +
5425 +    /* Map in the command port for this context */
5426 +    if (MapDeviceRegister (dev, ELAN3_BAR_COMMAND_PORT, &ctxt->CommandPage, pgnum * PAGE_SIZE, PAGE_SIZE, &ctxt->CommandPageHandle) != ESUCCESS)
5427 +    {
5428 +       PRINTF0 (ctxt, DBG_FN, "elan3_doattach: MapDeviceRegister failed");
5429 +       return (EINVAL);
5430 +    }
5431 +
5432 +    ctxt->CommandPort = ctxt->CommandPage + pgoff;
5433 +
5434 +    spin_lock_irqsave (&dev->IntrLock, flags);
5435 +
5436 +    res = 0;
5437 +    if (ELAN3_DEV_CTX_TABLE(dev,cap->cap_mycontext) != NULL)
5438 +       res = EBUSY;
5439 +    else
5440 +    {
5441 +       if ((res = elan3mmu_attach (ctxt->Device, cap->cap_mycontext, ctxt->Elan3mmu, 
5442 +                                   ctxt->RouteTable->Table, ctxt->RouteTable->Size-1)) == 0)
5443 +       {
5444 +           ELAN3_DEV_CTX_TABLE(dev,cap->cap_mycontext) = ctxt;
5445 +           ctxt->Capability                            = *cap;
5446 +       }
5447 +    }
5448 +
5449 +    spin_unlock_irqrestore (&dev->IntrLock, flags);
5450 +
5451 +    if (res == ESUCCESS)
5452 +       elan3_swapin (ctxt, CTXT_DETACHED);
5453 +    else 
5454 +    {
5455 +       UnmapDeviceRegister (dev, &ctxt->CommandPageHandle);
5456 +       ctxt->CommandPage = (ioaddr_t) 0; 
5457 +       ctxt->CommandPort = (ioaddr_t) 0;
5458 +    }
5459 +
5460 +    return (res);
5461 +}
5462 +
5463 +void
5464 +elan3_destroy_callback( void * args, ELAN_CAPABILITY *cap, ELAN_CAPABILITY *map)
5465 +{
5466 +    if (map == NULL) 
5467 +    {
5468 +       /* the cap is being destroyed */
5469 +       PRINTF0 (NULL, DBG_VP, "elan3_destroy_callback: the cap is being destroyed \n");
5470 +    }
5471 +    else
5472 +    {
5473 +       /* the map is being destroyed */
5474 +       PRINTF0 (NULL, DBG_VP, "elan3_destroy_callback: the map is being destroyed \n");
5475 +    }
5476 +}
5477 +
5478 +int
5479 +elan3_attach (ELAN3_CTXT *ctxt, ELAN_CAPABILITY *cap)
5480 +{
5481 +    ELAN3_DEV *dev = ctxt->Device;
5482 +    int type;
5483 +    int res;
5484 +
5485 +    switch (type = elan3_validate_cap (dev, cap, ELAN_USER_ATTACH))
5486 +    {
5487 +    case ELAN_CAP_OK:
5488 +       /* nothing */
5489 +       break;
5490 +
5491 +    case ELAN_CAP_RMS:
5492 +       if ((res = elan_attach_cap(cap, dev->Devinfo.dev_rail, ctxt, elan3_destroy_callback)) != 0)
5493 +           return res;
5494 +       break;
5495 +
5496 +    default:
5497 +       return (EINVAL);
5498 +    }
5499 +
5500 +    if (((res = elan3_doattach(ctxt,cap)) != ESUCCESS) && (type == ELAN_CAP_RMS))
5501 +       elan_detach_cap(cap, dev->Devinfo.dev_rail);
5502 +
5503 +    return res;
5504 +}
5505 +
5506 +void
5507 +elan3_detach ( ELAN3_CTXT *ctxt )
5508 +{
5509 +    ELAN3_DEV   *dev                 = ctxt->Device;
5510 +    int need_to_call_elanmod_detach = 0;
5511 +    unsigned long flags;
5512 +
5513 +    PRINTF1 (ctxt, DBG_FN, "elan3_detach: %p \n", ctxt );
5514 +    
5515 +    if (ctxt->Capability.cap_mycontext == ELAN_CAP_UNINITIALISED)
5516 +    {
5517 +       PRINTF0 (ctxt, DBG_FN, "elan3_detach: context not attached \n");
5518 +       return ;
5519 +    }
5520 +
5521 +    /* must you be in the ctx_table ?? */
5522 +    
5523 +    switch (ctxt->Capability.cap_type & ELAN_CAP_TYPE_MASK)
5524 +    {
5525 +    case ELAN_CAP_TYPE_BLOCK:
5526 +    case ELAN_CAP_TYPE_CYCLIC:
5527 +    {
5528 +       if (ELAN3_SYSTEM_CONTEXT (ctxt->Capability.cap_mycontext))
5529 +           return ;
5530 +
5531 +       if (! (ctxt->Capability.cap_type & ELAN_CAP_TYPE_HWTEST))
5532 +           need_to_call_elanmod_detach = 1;
5533 +
5534 +       break;
5535 +    }  
5536 +    default:
5537 +       return ;
5538 +    }
5539 +
5540 +    elan3_swapout (ctxt, CTXT_DETACHED);
5541 +
5542 +    spin_lock_irqsave (&dev->IntrLock, flags);
5543 +
5544 +    elan3mmu_detach (dev, ctxt->Capability.cap_mycontext);
5545 +    ELAN3_DEV_CTX_TABLE(dev,ctxt->Capability.cap_mycontext) = NULL;
5546 +
5547 +    spin_unlock_irqrestore (&dev->IntrLock, flags);
5548 +
5549 +    if (ctxt->CommandPage)
5550 +    {
5551 +       UnmapDeviceRegister (dev, &ctxt->CommandPageHandle);
5552 +       ctxt->CommandPage = (ioaddr_t) 0;
5553 +    }
5554 +    
5555 +    if (need_to_call_elanmod_detach) 
5556 +       elan_detach_cap(&ctxt->Capability, dev->Devinfo.dev_rail);
5557 +
5558 +    elan_nullcap (&ctxt->Capability);
5559 +
5560 +}
5561 +
5562 +void
5563 +elan3_dodetach ( ELAN3_CTXT *ctxt )
5564 +{
5565 +    ELAN3_DEV     *dev = ctxt->Device;
5566 +    unsigned long flags;
5567 +
5568 +    PRINTF1 (ctxt, DBG_FN, "elan3_dodetach: %p \n", ctxt );
5569 +    
5570 +    if (ctxt->Capability.cap_mycontext == ELAN_CAP_UNINITIALISED)
5571 +    {
5572 +       PRINTF0 (ctxt, DBG_FN, "elan3_dodetach: context not attached \n");
5573 +       return ;
5574 +    }
5575 +
5576 +    elan3_swapout (ctxt, CTXT_DETACHED);
5577 +
5578 +    spin_lock_irqsave (&dev->IntrLock, flags);
5579 +
5580 +    elan3mmu_detach (dev, ctxt->Capability.cap_mycontext);
5581 +    ELAN3_DEV_CTX_TABLE(dev,ctxt->Capability.cap_mycontext) = NULL;
5582 +
5583 +    spin_unlock_irqrestore (&dev->IntrLock, flags);
5584 +
5585 +    if (ctxt->CommandPage)
5586 +    {
5587 +       UnmapDeviceRegister (dev, &ctxt->CommandPageHandle);
5588 +       ctxt->CommandPage = (ioaddr_t) 0;
5589 +    }
5590 +    
5591 +    elan_nullcap (&ctxt->Capability);
5592 +}
5593 +
5594 +void
5595 +elan3_swapin (ELAN3_CTXT *ctxt, int reason)
5596 +{
5597 +    ELAN3_DEV *dev = ctxt->Device;
5598 +    unsigned long flags;
5599 +
5600 +    spin_lock_irqsave (&dev->IntrLock, flags);
5601 +
5602 +    ASSERT (ctxt->Status & CTXT_SWAPPED_REASONS);
5603 +
5604 +    PRINTF3 (ctxt, DBG_SWAP, "elan3_swapin: status %x State %s reason %x\n", 
5605 +            ctxt->Status, OthersStateStrings[ctxt->OthersState], reason);
5606 +
5607 +    while (ctxt->Status & CTXT_SWAPPING_OUT)                   /* In transition */
5608 +       kcondvar_wait (&ctxt->LwpWait, &dev->IntrLock, &flags);
5609 +
5610 +    if (reason == CTXT_NO_LWPS && ctxt->LwpCount++ != 0)       /* Added another LWP */
5611 +    {
5612 +       spin_unlock_irqrestore (&dev->IntrLock, flags);
5613 +       return;
5614 +    }
5615 +
5616 +    if ((ctxt->Status & ~reason) & CTXT_SWAPPED_REASONS)
5617 +       ctxt->Status &= ~reason;
5618 +    else
5619 +    {
5620 +       ASSERT (ctxt->Status & CTXT_SWAPPED_OUT);
5621 +       ASSERT (ctxt->OthersState == CTXT_OTHERS_SWAPPED);
5622 +       
5623 +       /*
5624 +        * Will not be swapped out anymore, so ask the "user" to perform 
5625 +        * any swapping in he needs before letting the context run again.
5626 +        */
5627 +       
5628 +       ctxt->Status &= ~(CTXT_SWAPPED_OUT | CTXT_QUEUES_EMPTY | reason);
5629 +       ctxt->OthersState = CTXT_OTHERS_RUNNING;
5630 +
5631 +       if (ctxt->Input0Trap.State == CTXT_STATE_OK && ctxt->Input1Trap.State == CTXT_STATE_OK)
5632 +           SetInputterStateForContext (ctxt, 0, NULL);
5633 +       
5634 +       kcondvar_wakeupall (&ctxt->Wait, &dev->IntrLock);
5635 +    }
5636 +
5637 +    PRINTF2 (ctxt, DBG_SWAP, "elan3_swapin: all done - status %x state %s\n",
5638 +            ctxt->Status, OthersStateStrings[ctxt->OthersState]);
5639 +    spin_unlock_irqrestore (&dev->IntrLock, flags);
5640 +}
5641 +
5642 +
5643 +void
5644 +elan3_swapout (ELAN3_CTXT *ctxt, int reason)
5645 +{
5646 +    ELAN3_DEV     *dev = ctxt->Device;
5647 +    int           cansleep;
5648 +    unsigned long flags;
5649 +
5650 +    spin_lock_irqsave (&dev->IntrLock, flags);
5651 +
5652 +    PRINTF3 (ctxt, DBG_SWAP, "elan3_swapout: status %x state %s reason %x\n", 
5653 +            ctxt->Status, OthersStateStrings[ctxt->OthersState], reason);
5654 +
5655 +    if (reason == CTXT_NO_LWPS)
5656 +    {
5657 +       if (--ctxt->LwpCount != 0)                              /* Still other LWPs running */
5658 +       {
5659 +           spin_unlock_irqrestore (&dev->IntrLock, flags);
5660 +           return;
5661 +       }
5662 +
5663 +       kcondvar_wakeupall (&ctxt->LwpWait, &dev->IntrLock);            /* Wakeup anyone waiting on LwpCount */
5664 +    }
5665 +    
5666 +    ctxt->Status |= reason;
5667 +    
5668 +    while (ctxt->Status & CTXT_SWAPPING_OUT)                   /* wait for someone else to finish swapping */
5669 +       kcondvar_wait (&ctxt->LwpWait, &dev->IntrLock, &flags);         /* out */
5670 +
5671 +    if (ctxt->Status & CTXT_SWAPPED_OUT)
5672 +    {
5673 +       if (reason == CTXT_NO_LWPS)                             /* Wakeup other thread waiting on LWP exit */
5674 +           kcondvar_wakeupall (&ctxt->LwpWait, &dev->IntrLock);
5675 +       
5676 +       spin_unlock_irqrestore (&dev->IntrLock, flags);
5677 +       return;
5678 +    }
5679 +    
5680 +    /*
5681 +     * mark the context as swapping out.
5682 +     */
5683 +    ctxt->Status |= CTXT_SWAPPING_OUT;
5684 +    
5685 +    if (reason != CTXT_FIXUP_NETERR)
5686 +    {
5687 +       /*
5688 +        * Stop all of the lwps.
5689 +        */
5690 +       while (ctxt->LwpCount)
5691 +       {
5692 +           kcondvar_wakeupall (&ctxt->Wait, &dev->IntrLock);           /* Wake up any lwps */
5693 +           kcondvar_wait (&ctxt->LwpWait, &dev->IntrLock, &flags);             /* then wait for them to enter elan3_swapout */
5694 +       }
5695 +    }
5696 +    
5697 +    StartSwapoutContext (ctxt, 0, NULL);
5698 +    for (;;)
5699 +    {
5700 +       PRINTF0 (ctxt, DBG_SWAP, "elan3_swapout: HandleExceptions\n");
5701 +
5702 +       cansleep = (HandleExceptions(ctxt, &flags) == ESUCCESS);
5703 +
5704 +       PRINTF2 (ctxt, DBG_SWAP, "elan3_swapout: OthersState=%d cansleep=%d\n", ctxt->OthersState, cansleep);
5705 +
5706 +       if (ctxt->OthersState == CTXT_OTHERS_SWAPPED)
5707 +           break;
5708 +
5709 +       if (cansleep)
5710 +           kcondvar_wait (&ctxt->Wait, &dev->IntrLock, &flags);
5711 +    }
5712 +    PRINTF0 (ctxt, DBG_SWAP, "elan3_swapout: swapped out\n");
5713 +    
5714 +    ASSERT (ELAN3_QUEUE_EMPTY (ctxt->DmaTrapQ));
5715 +    ASSERT (ELAN3_QUEUE_EMPTY (ctxt->ThreadTrapQ));
5716 +
5717 +    ctxt->Status |=  CTXT_SWAPPED_OUT;
5718 +    ctxt->Status &= ~CTXT_SWAPPING_OUT;
5719 +
5720 +    kcondvar_wakeupall (&ctxt->LwpWait, &dev->IntrLock);
5721 +
5722 +    PRINTF2 (ctxt, DBG_SWAP, "elan3_swapout: all done - status %x state %s\n",
5723 +            ctxt->Status, OthersStateStrings[ctxt->OthersState]);
5724 +
5725 +    spin_unlock_irqrestore (&dev->IntrLock, flags);
5726 +}
5727 +
5728 +int
5729 +elan3_pagefault (ELAN3_CTXT *ctxt, E3_FaultSave_BE *FaultSave, int npages)
5730 +{
5731 +    E3_Addr     elanAddr = FaultSave->s.FaultAddress;
5732 +    int                writeable;
5733 +    int                res;
5734 +
5735 +    PRINTF3 (ctxt, DBG_FAULT, "elan3_pagefault: elanAddr %08x FSR %08x : %s\n", elanAddr, FaultSave->s.FSR.Status,
5736 +            FaultSave->s.FSR.s.ProtFault ? "protection fault" : "pte invalid");
5737 +    
5738 +    /* Look at the FSR to determine the fault type etc */
5739 +    
5740 +    if (FaultSave->s.FSR.Status == 0)                          /* this is a target abort/parity error, so look */
5741 +    {                                                          /* at the PCI config space registers to determine  */
5742 +       ElanBusError (ctxt->Device);
5743 +       return (EFAULT);                                        
5744 +    }
5745 +    
5746 +    if (FaultSave->s.FSR.s.AlignmentErr)                       /* Alignment errors are always fatal. */
5747 +    {
5748 +       PRINTF0 (ctxt, DBG_FAULT, "elan3_pagefault: Alignment error\n");
5749 +       return (EFAULT);
5750 +    }
5751 +
5752 +    if (FaultSave->s.FSR.s.WalkBadData)                                /* Memory ECC error during a walk */
5753 +    {
5754 +       PRINTF0 (ctxt, DBG_FAULT, "elan3_pagefault: Memory ECC error during walk\n");
5755 +       return (EFAULT);
5756 +    }
5757 +
5758 +    if (!FaultSave->s.FSR.s.ProtFault &&                       /* DMA memory type changed */
5759 +       !FaultSave->s.FSR.s.Walking)
5760 +    {
5761 +       PRINTF0 (ctxt, DBG_FAULT, "elan3_pagefault: DMA memory type changed\n");
5762 +       return (EFAULT);
5763 +    }
5764 +
5765 +    ASSERT (FaultSave->s.FSR.s.ProtFault ?                     /* protection errors, should always have a valid pte */
5766 +           (!FaultSave->s.FSR.s.Walking || !(FaultSave->s.FSR.s.Level==3) ||  FaultSave->s.FSR.s.FaultPte == ELAN3_ET_PTE) : 
5767 +           FaultSave->s.FSR.s.FaultPte == ELAN3_ET_INVALID);   /* otherwise it must be an invalid pte */
5768 +
5769 +    /*
5770 +     * Determine whether to fault for a 'write' from the access permissions we need, and not
5771 +     * from the access type (WrAcc).
5772 +     */
5773 +    writeable = (FaultSave->s.FSR.s.AccTypePerm & (1 << FSR_WritePermBit));
5774 +
5775 +    /* Check that we have the right permissions for this access type. */
5776 +    if ((res = elan3mmu_checkperm (ctxt->Elan3mmu, (elanAddr&PAGEMASK), npages*PAGESIZE, FaultSave->s.FSR.s.AccTypePerm)) != 0)
5777 +    {
5778 +       PRINTF1 (ctxt, DBG_FAULT, "elan3_pagefault: %s\n", (res == ENOMEM) ? "no protection mapping" : "protection error");
5779 +       
5780 +       return (res);
5781 +    }
5782 +
5783 +    res = LoadElanTranslation (ctxt, (elanAddr&PAGEMASK), npages*PAGESIZE, FaultSave->s.FSR.s.ProtFault, writeable);
5784 +
5785 +    if (res == ESUCCESS)
5786 +    {
5787 +       BumpStat (ctxt->Device, PageFaults);
5788 +       BumpUserStat (ctxt, PageFaults);
5789 +    }
5790 +
5791 +    PRINTF1 (ctxt, DBG_FAULT, "elan3_pagefault: -> %d\n", res);
5792 +
5793 +    return (res);
5794 +}
5795 +
5796 +void
5797 +elan3_block_inputter (ELAN3_CTXT *ctxt, int block)
5798 +{
5799 +    ELAN3_DEV *dev = ctxt->Device;
5800 +    unsigned long flags;
5801 +
5802 +    spin_lock_irqsave (&dev->IntrLock, flags);
5803 +    
5804 +    if (block)
5805 +       ctxt->Status |= CTXT_USER_FILTERING;
5806 +    else
5807 +       ctxt->Status &= ~CTXT_USER_FILTERING;
5808 +
5809 +    if (ctxt->Capability.cap_mycontext != ELAN_CAP_UNINITIALISED)
5810 +       SetInputterStateForContext (ctxt, 0, NULL);
5811 +    spin_unlock_irqrestore (&dev->IntrLock, flags);
5812 +}
5813 +
5814 +int
5815 +FixupNetworkErrors (ELAN3_CTXT *ctxt, unsigned long *flags)
5816 +{
5817 +    ELAN3_DEV   *dev = ctxt->Device;
5818 +    NETERR_FIXUP *nef;
5819 +
5820 +    ASSERT (SPINLOCK_HELD (&dev->IntrLock));
5821 +    
5822 +    if (ctxt->NetworkErrorFixups == NULL)
5823 +       return (ESUCCESS);
5824 +
5825 +    spin_unlock_irqrestore (&dev->IntrLock, *flags);
5826 +    
5827 +    kmutex_lock (&ctxt->NetworkErrorLock);                     /* single thread while fixing up errors */
5828 +    elan3_swapout (ctxt, CTXT_FIXUP_NETERR);
5829 +
5830 +    spin_lock_irqsave (&dev->IntrLock, *flags);
5831 +    while ((nef = ctxt->NetworkErrorFixups) != NULL)
5832 +    {
5833 +       ctxt->NetworkErrorFixups = nef->Next;
5834 +       spin_unlock_irqrestore (&dev->IntrLock, *flags);
5835 +
5836 +       if (ELAN3_OP_FIXUP_NETWORK_ERROR (ctxt, nef) == OP_FAILED)
5837 +           CompleteNetworkErrorFixup (ctxt, nef, EINVAL);
5838 +
5839 +       spin_lock_irqsave (&dev->IntrLock, *flags);
5840 +    }
5841 +    spin_unlock_irqrestore (&dev->IntrLock, *flags);
5842 +
5843 +    elan3_swapin (ctxt, CTXT_FIXUP_NETERR);
5844 +
5845 +    kmutex_unlock (&ctxt->NetworkErrorLock);
5846 +    spin_lock_irqsave (&dev->IntrLock, *flags);
5847 +    return (EAGAIN);
5848 +}
5849 +
5850 +int
5851 +CompleteNetworkErrorResolver (ELAN3_CTXT *ctxt, INPUT_TRAP *trap, NETERR_RESOLVER *rvp)
5852 +{
5853 +    int state;
5854 +
5855 +    switch (rvp->Status)
5856 +    {
5857 +    case ESUCCESS:
5858 +       /*
5859 +        * the item still existed at the source - if it's a wait for EOP transaction
5860 +        * then the source will retry - otherwise the remote event will have been
5861 +        * cleared and we should execute it
5862 +        */
5863 +       PRINTF1 (ctxt, DBG_NETERR, "CompleteNetworkErrorResolver: ESUCCESS zero WaitForEopTransaction %p\n", trap->WaitForEopTransaction);
5864 +
5865 +       state = trap->WaitForEopTransaction ? CTXT_STATE_OK : CTXT_STATE_NEEDS_RESTART;
5866 +
5867 +       break;
5868 +
5869 +    case ESRCH:        
5870 +       /*
5871 +        * the item was not found at the source - we should always execute the transaction
5872 +        * since it will never be resent
5873 +        */
5874 +       PRINTF1 (ctxt, DBG_NETERR, "CompleteNetworkErrorResolver: ESRCH execute WaitForEopTransaction %p\n", trap->WaitForEopTransaction);
5875 +       state = CTXT_STATE_NEEDS_RESTART;
5876 +       break;
5877 +
5878 +    default:                                                   /* other errors */
5879 +       PRINTF1 (ctxt, DBG_NETERR, "CompleteNetworkErrorResolver: %d\n", rvp->Status);
5880 +       if (ElanException (ctxt, EXCEPTION_NETWORK_ERROR, INPUT_PROC, trap, &rvp) == OP_HANDLED)
5881 +           state = CTXT_STATE_NEEDS_RESTART;
5882 +       else
5883 +           state = CTXT_STATE_OK;
5884 +       break;
5885 +    }
5886 +
5887 +    FreeNetworkErrorResolver (rvp);
5888 +
5889 +    return (state);
5890 +}
5891 +
5892 +int
5893 +HandleExceptions (ELAN3_CTXT *ctxt, unsigned long *flags)
5894 +{
5895 +    ELAN3_DEV        *dev    = ctxt->Device;
5896 +    THREAD_TRAP      tproc;
5897 +    DMA_TRAP         dproc;
5898 +    NETERR_RESOLVER *rvp;
5899 +    int                     state;
5900 +
5901 +    if (ctxt->Status & CTXT_COMMAND_OVERFLOW_ERROR)
5902 +    {
5903 +       ctxt->Status &= ~CTXT_COMMAND_OVERFLOW_ERROR;
5904 +       spin_unlock_irqrestore (&dev->IntrLock, *flags);
5905 +       ElanException (ctxt, EXCEPTION_COMMAND_OVERFLOW, COMMAND_PROC, NULL);
5906 +       spin_lock_irqsave (&dev->IntrLock, *flags);
5907 +       return (EAGAIN);
5908 +    }
5909 +    
5910 +    if (! ELAN3_QUEUE_BACK_EMPTY (ctxt->CommandTrapQ))
5911 +    {
5912 +       /* XXXX: unmap translations to the command port */
5913 +
5914 +       spin_unlock_irqrestore (&dev->IntrLock, *flags);
5915 +       ResolveCProcTrap (ctxt);
5916 +       spin_lock_irqsave (&dev->IntrLock, *flags);
5917 +       return (EAGAIN);
5918 +    }
5919 +    
5920 +    if (ctxt->Input0Trap.State == CTXT_STATE_TRAPPED)
5921 +    {
5922 +       ctxt->Input0Trap.State = CTXT_STATE_RESOLVING;
5923 +
5924 +       spin_unlock_irqrestore (&dev->IntrLock, *flags);
5925 +       ResolveIProcTrap (ctxt, &ctxt->Input0Trap, &ctxt->Input0Resolver);
5926 +       spin_lock_irqsave (&dev->IntrLock, *flags);
5927 +       return (EAGAIN);
5928 +    }
5929 +
5930 +    if (ctxt->Input1Trap.State == CTXT_STATE_TRAPPED)
5931 +    {
5932 +       ctxt->Input1Trap.State = CTXT_STATE_RESOLVING;
5933 +
5934 +       spin_unlock_irqrestore (&dev->IntrLock, *flags);
5935 +       ResolveIProcTrap (ctxt, &ctxt->Input1Trap, &ctxt->Input1Resolver);
5936 +       spin_lock_irqsave (&dev->IntrLock, *flags);
5937 +       return (EAGAIN);
5938 +    }
5939 +
5940 +    if ((rvp = ctxt->Input0Resolver) != NULL && rvp->Completed)
5941 +    {
5942 +       ASSERT (ctxt->Input0Trap.State == CTXT_STATE_NETWORK_ERROR);
5943 +
5944 +       ctxt->Input0Resolver = NULL;
5945 +       
5946 +       spin_unlock_irqrestore (&dev->IntrLock, *flags);
5947 +       state = CompleteNetworkErrorResolver (ctxt, &ctxt->Input0Trap, rvp);
5948 +       spin_lock_irqsave (&dev->IntrLock, *flags);
5949 +       ctxt->Input0Trap.State = state;
5950 +       return (EAGAIN);
5951 +    }
5952 +
5953 +    if ((rvp = ctxt->Input1Resolver) != NULL && rvp->Completed)
5954 +    {
5955 +       ASSERT (ctxt->Input1Trap.State == CTXT_STATE_NETWORK_ERROR);
5956 +
5957 +       ctxt->Input1Resolver = NULL;
5958 +       
5959 +       spin_unlock_irqrestore (&dev->IntrLock, *flags);
5960 +       state = CompleteNetworkErrorResolver (ctxt,&ctxt->Input1Trap, rvp);
5961 +       spin_lock_irqsave (&dev->IntrLock, *flags);
5962 +       ctxt->Input1Trap.State = state;
5963 +       return (EAGAIN);
5964 +    }
5965 +
5966 +    if (NextTProcTrap (ctxt, &tproc))
5967 +    {
5968 +       spin_unlock_irqrestore (&dev->IntrLock, *flags);
5969 +       ResolveTProcTrap (ctxt, &tproc);
5970 +       spin_lock_irqsave (&dev->IntrLock, *flags);
5971 +       return (EAGAIN);
5972 +    }
5973 +    ctxt->Status &= ~CTXT_THREAD_QUEUE_FULL;
5974 +
5975 +    if (NextDProcTrap (ctxt, &dproc))
5976 +    {
5977 +       spin_unlock_irqrestore (&dev->IntrLock, *flags);
5978 +       ResolveDProcTrap (ctxt, &dproc);
5979 +       spin_lock_irqsave (&dev->IntrLock, *flags);
5980 +       return (EAGAIN);
5981 +    }
5982 +    ctxt->Status &= ~CTXT_DMA_QUEUE_FULL;
5983 +
5984 +    /* Handle all event interrupts. */
5985 +    if (! ELAN3_QUEUE_EMPTY (ctxt->EventCookieQ))
5986 +    {
5987 +       while (! ELAN3_QUEUE_EMPTY (ctxt->EventCookieQ))
5988 +       {
5989 +           E3_uint32 cookie = *ELAN3_QUEUE_FRONT (ctxt->EventCookieQ, ctxt->EventCookies);
5990 +
5991 +           ELAN3_QUEUE_REMOVE (ctxt->EventCookieQ);
5992 +
5993 +           spin_unlock_irqrestore (&dev->IntrLock, *flags);
5994 +           if (ELAN3_OP_EVENT (ctxt, cookie, OP_LWP) != OP_DEFER)
5995 +               spin_lock_irqsave (&dev->IntrLock, *flags);
5996 +           else
5997 +           {
5998 +               spin_lock_irqsave (&dev->IntrLock, *flags);     /* place the cookie back on the queue. */
5999 +                                                               /* note we place it on the front to ensure  */
6000 +               ELAN3_QUEUE_ADD_FRONT (ctxt->EventCookieQ);     /* event ordering. */
6001 +               *ELAN3_QUEUE_FRONT (ctxt->EventCookieQ, ctxt->EventCookies) = cookie;
6002 +           }
6003 +       }
6004 +       return (EAGAIN);
6005 +    }
6006 +    ctxt->Status &= ~CTXT_EVENT_QUEUE_FULL;
6007 +
6008 +    if (! ELAN3_QUEUE_EMPTY (ctxt->SwapDmaQ))
6009 +    {
6010 +       while (! ELAN3_QUEUE_EMPTY (ctxt->SwapDmaQ))
6011 +       {
6012 +           E3_DMA_BE DmaDesc = *ELAN3_QUEUE_FRONT (ctxt->SwapDmaQ, ctxt->SwapDmas);
6013 +
6014 +           ELAN3_QUEUE_REMOVE (ctxt->SwapDmaQ);
6015 +
6016 +           spin_unlock_irqrestore (&dev->IntrLock, *flags);
6017 +           RestartDmaDesc (ctxt, &DmaDesc);
6018 +           spin_lock_irqsave (&dev->IntrLock, *flags);
6019 +       }
6020 +       return (EAGAIN);
6021 +    }
6022 +    
6023 +    if (! ELAN3_QUEUE_EMPTY (ctxt->SwapThreadQ))
6024 +    {
6025 +       while (! ELAN3_QUEUE_EMPTY (ctxt->SwapThreadQ))
6026 +       {
6027 +           E3_Addr StackPointer = *ELAN3_QUEUE_FRONT (ctxt->SwapThreadQ, ctxt->SwapThreads);
6028 +
6029 +           ELAN3_QUEUE_REMOVE (ctxt->SwapThreadQ);
6030 +
6031 +           spin_unlock_irqrestore (&dev->IntrLock, *flags);
6032 +           ReissueStackPointer (ctxt, StackPointer);
6033 +           spin_lock_irqsave (&dev->IntrLock, *flags);
6034 +       }
6035 +       return (EAGAIN);
6036 +    }
6037 +    
6038 +    switch (ctxt->OthersState)
6039 +    {
6040 +    case CTXT_OTHERS_SWAPPING:
6041 +       if (! (ctxt->Status & CTXT_OTHERS_REASONS))
6042 +           ctxt->OthersState = CTXT_OTHERS_RUNNING;
6043 +       else
6044 +           ctxt->OthersState = CTXT_OTHERS_SWAPPED;
6045 +
6046 +       PRINTF1 (ctxt, DBG_LWP, "HandleExceptions: OthersState : swapping -> %s\n", OthersStateStrings[ctxt->OthersState]);
6047 +           
6048 +       break;
6049 +
6050 +    case CTXT_OTHERS_SWAPPING_MORE:
6051 +       ctxt->OthersState = CTXT_OTHERS_HALTING_MORE;
6052 +       QueueHaltOperation (dev, 0, NULL, INT_DProcHalted | INT_TProcHalted, HaltSwapContext, ctxt);
6053 +
6054 +       PRINTF1 (ctxt, DBG_LWP, "HandleExceptions: OthersState : swapping_more -> %s\n", OthersStateStrings[ctxt->OthersState]);
6055 +       break;
6056 +    }
6057 +    return (ESUCCESS);
6058 +}
6059 +
6060 +int
6061 +RestartContext (ELAN3_CTXT *ctxt, unsigned long *flags)
6062 +{
6063 +    ELAN3_DEV *dev = ctxt->Device;
6064 +    int       res;
6065 +
6066 +    ASSERT (SPINLOCK_HELD (&dev->IntrLock));
6067 +
6068 +    PRINTF1 (ctxt, DBG_LWP, "RestartContext: status %x\n", ctxt->Status);
6069 +
6070 +    if (! (ctxt->Status & CTXT_OTHERS_REASONS))
6071 +    {
6072 +       if (! ELAN3_QUEUE_FRONT_EMPTY (ctxt->CommandTrapQ) || ! ELAN3_QUEUE_EMPTY(ctxt->CommandQ))
6073 +       {
6074 +           spin_unlock_irqrestore (&dev->IntrLock, *flags);
6075 +           RestartCProcTrap (ctxt);
6076 +           spin_lock_irqsave (&dev->IntrLock, *flags);
6077 +           return (EAGAIN);
6078 +       }
6079 +
6080 +       if (ctxt->Input0Trap.State == CTXT_STATE_NEEDS_RESTART)
6081 +       {
6082 +           ctxt->Input0Trap.State = CTXT_STATE_EXECUTING;
6083 +
6084 +           spin_unlock_irqrestore (&dev->IntrLock, *flags);
6085 +           res = RestartIProcTrap (ctxt, &ctxt->Input0Trap);
6086 +           spin_lock_irqsave (&dev->IntrLock, *flags);
6087 +           
6088 +           if (res == ESUCCESS)
6089 +               ctxt->Input0Trap.State = CTXT_STATE_OK;
6090 +           else
6091 +               ctxt->Input0Trap.State = CTXT_STATE_NEEDS_RESTART;
6092 +           return (EAGAIN);
6093 +       }
6094 +
6095 +       if (ctxt->Input1Trap.State == CTXT_STATE_NEEDS_RESTART)
6096 +       {
6097 +           ctxt->Input1Trap.State = CTXT_STATE_EXECUTING;
6098 +
6099 +           spin_unlock_irqrestore (&dev->IntrLock, *flags);
6100 +           res = RestartIProcTrap (ctxt, &ctxt->Input1Trap);
6101 +           spin_lock_irqsave (&dev->IntrLock, *flags);
6102 +
6103 +           if (res == ESUCCESS)
6104 +               ctxt->Input1Trap.State = CTXT_STATE_OK;
6105 +           else
6106 +               ctxt->Input1Trap.State = CTXT_STATE_NEEDS_RESTART;
6107 +           return (EAGAIN);
6108 +       }
6109 +
6110 +       if (SetEventsNeedRestart (ctxt))
6111 +       {
6112 +           spin_unlock_irqrestore (&dev->IntrLock, *flags);
6113 +           RestartSetEvents (ctxt);
6114 +           spin_lock_irqsave (&dev->IntrLock, *flags);
6115 +           return (EAGAIN);
6116 +       }
6117 +
6118 +       SetInputterStateForContext (ctxt, 0, NULL);
6119 +
6120 +       if (TProcNeedsRestart (ctxt))
6121 +       {
6122 +           spin_unlock_irqrestore (&dev->IntrLock, *flags);
6123 +
6124 +           LoadCommandPortTranslation (ctxt);
6125 +           RestartTProcItems (ctxt);
6126 +           spin_lock_irqsave (&dev->IntrLock, *flags);
6127 +           return (EAGAIN);
6128 +       }
6129 +
6130 +       if (DProcNeedsRestart (ctxt))
6131 +       {
6132 +           spin_unlock_irqrestore (&dev->IntrLock, *flags);
6133 +           RestartDProcItems (ctxt);
6134 +           spin_lock_irqsave (&dev->IntrLock, *flags);
6135 +           return (EAGAIN);
6136 +       }
6137 +
6138 +       if (ELAN3_QUEUE_EMPTY (ctxt->CommandTrapQ))
6139 +       {
6140 +           PRINTF1 (ctxt, DBG_LWP, "RestartContext: setting Command Flag at %p to 0\n", &ctxt->FlagPage->CommandFlag);
6141 +
6142 +           ctxt->FlagPage->CommandFlag = 0;
6143 +
6144 +           if (ctxt->Status & CTXT_WAITING_COMMAND)
6145 +           {
6146 +               PRINTF0 (ctxt, DBG_LWP, "RestartContext: waking up threads waiting for commandport\n");
6147 +               
6148 +               ctxt->Status &= ~CTXT_WAITING_COMMAND;
6149 +               
6150 +               kcondvar_wakeupall (&ctxt->CommandPortWait, &dev->IntrLock);
6151 +           }
6152 +       }
6153 +    }
6154 +
6155 +    return (ESUCCESS);
6156 +}
6157 +
6158 +static void
6159 +HaltSwapContext (ELAN3_DEV *dev, void *arg)
6160 +{
6161 +    ELAN3_CTXT        *ctxt    = (ELAN3_CTXT *) arg;
6162 +    int                      SysCntx = (ctxt->Capability.cap_mycontext & SYS_CONTEXT_BIT);
6163 +    E3_ThreadQueue_BE thread;
6164 +    E3_DMA_BE         dma;
6165 +    sdramaddr_t       FPtr, BPtr;
6166 +    sdramaddr_t              Base, Top;
6167 +    u_int           *runCount;
6168 +    unsigned long     flags;
6169 +
6170 +    spin_lock_irqsave (&dev->IntrLock, flags);
6171 +
6172 +    ASSERT (ctxt->OthersState == CTXT_OTHERS_HALTING || ctxt->OthersState == CTXT_OTHERS_HALTING_MORE);
6173 +
6174 +    PRINTF2 (ctxt, DBG_SWAP, "HaltSwapContext: status %x state %s\n", ctxt->Status, OthersStateStrings[ctxt->OthersState]);
6175 +
6176 +    if (! (ctxt->Status & CTXT_OTHERS_REASONS))
6177 +    {
6178 +       if (ctxt->OthersState == CTXT_OTHERS_HALTING_MORE)
6179 +       {
6180 +           runCount = SysCntx ? &dev->HaltAllCount : &dev->HaltNonContext0Count;
6181 +
6182 +           if (--(*runCount) == 0)
6183 +               SetSchedStatusRegister (dev, 0, NULL);
6184 +       }
6185 +       ctxt->OthersState = CTXT_OTHERS_RUNNING;
6186 +       
6187 +       PRINTF0 (ctxt, DBG_SWAP, "HaltSwapContext: no more reason to swap -> others_running\n");
6188 +
6189 +       kcondvar_wakeupall (&ctxt->Wait, &dev->IntrLock);
6190 +       spin_unlock_irqrestore (&dev->IntrLock, flags);
6191 +       return;
6192 +    }
6193 +
6194 +    /*
6195 +     * Capture all other processors since we're not being responsive to 
6196 +     * the command processor interrupt.
6197 +     */
6198 +    CAPTURE_CPUS();
6199 +
6200 +    if (SysCntx)
6201 +    {
6202 +       FPtr = read_reg32 (dev, TProc_SysCntx_FPtr);
6203 +       BPtr = read_reg32 (dev, TProc_SysCntx_BPtr);
6204 +       Base = dev->TAndQBase + offsetof (E3_TrapAndQueue, SysCntxThreadQueue[0]);
6205 +       Top  = dev->TAndQBase + offsetof (E3_TrapAndQueue, SysCntxThreadQueue[E3_SysCntxQueueSize-1]);
6206 +    }
6207 +    else
6208 +    {
6209 +       FPtr  = read_reg32 (dev, TProc_NonSysCntx_FPtr);
6210 +       BPtr  = read_reg32 (dev, TProc_NonSysCntx_BPtr);
6211 +       Base  = dev->TAndQBase + offsetof (E3_TrapAndQueue, NonSysCntxThreadQueue[0]);
6212 +       Top   = dev->TAndQBase + offsetof (E3_TrapAndQueue, NonSysCntxThreadQueue[E3_NonSysCntxQueueSize-1]);
6213 +    }
6214 +
6215 +    while (FPtr != BPtr)
6216 +    {
6217 +       elan3_sdram_copyq_from_sdram (dev, FPtr, (void *) &thread, sizeof (E3_ThreadQueue_BE));
6218 +       
6219 +       if (thread.s.Context == ctxt->Capability.cap_mycontext)
6220 +       {
6221 +           if (ELAN3_QUEUE_FULL (ctxt->SwapThreadQ))
6222 +               break;
6223 +           
6224 +           *ELAN3_QUEUE_BACK(ctxt->SwapThreadQ, ctxt->SwapThreads) = thread.s.Thread;
6225 +           ELAN3_QUEUE_ADD (ctxt->SwapThreadQ);
6226 +           
6227 +           /*
6228 +            * Remove this entry from the queue by replacing it with 
6229 +            * the "magic" thread value.
6230 +            *
6231 +            * NOTE: we must preserve the SYS_CONTEXT_BIT since the Elan uses this
6232 +            * to mark the approriate run queue as empty.
6233 +            */
6234 +           thread.s.Context = SysCntx ? SYS_CONTEXT_BIT : 0;
6235 +           thread.s.Thread  = VanishingStackPointer;
6236 +
6237 +           elan3_sdram_copyq_to_sdram (dev, (void *) &thread, FPtr, sizeof (E3_ThreadQueue_BE));
6238 +       }
6239 +       
6240 +       FPtr = (FPtr == Top) ? Base : FPtr + sizeof (E3_ThreadQueue);
6241 +    }
6242 +
6243 +    ASSERT (elan3_sdram_readl (dev, dev->TAndQBase + offsetof (E3_TrapAndQueue, DProc.s.FSR)) == 0);
6244 +    ASSERT (elan3_sdram_readl (dev, dev->TAndQBase + offsetof (E3_TrapAndQueue, DProcData0.s.FSR.Status)) == 0);
6245 +    ASSERT (elan3_sdram_readl (dev, dev->TAndQBase + offsetof (E3_TrapAndQueue, DProcData1.s.FSR.Status)) == 0);
6246 +    ASSERT (elan3_sdram_readl (dev, dev->TAndQBase + offsetof (E3_TrapAndQueue, DProcData2.s.FSR.Status)) == 0);
6247 +    ASSERT (elan3_sdram_readl (dev, dev->TAndQBase + offsetof (E3_TrapAndQueue, DProcData3.s.FSR.Status)) == 0);
6248 +
6249 +    if (SysCntx)
6250 +    {
6251 +       FPtr  = read_reg32 (dev, DProc_SysCntx_FPtr);
6252 +       BPtr  = read_reg32 (dev, DProc_SysCntx_BPtr);
6253 +       Base  = dev->TAndQBase + offsetof (E3_TrapAndQueue, SysCntxDmaQueue[0]);
6254 +       Top   = dev->TAndQBase + offsetof (E3_TrapAndQueue, SysCntxDmaQueue[E3_SysCntxQueueSize-1]);
6255 +    }
6256 +    else
6257 +    {
6258 +       FPtr  = read_reg32 (dev, DProc_NonSysCntx_FPtr);
6259 +       BPtr  = read_reg32 (dev, DProc_NonSysCntx_BPtr);
6260 +       Base  = dev->TAndQBase + offsetof (E3_TrapAndQueue, NonSysCntxDmaQueue[0]);
6261 +       Top   = dev->TAndQBase + offsetof (E3_TrapAndQueue, NonSysCntxDmaQueue[E3_NonSysCntxQueueSize-1]);
6262 +    }
6263 +
6264 +    while (FPtr != BPtr)
6265 +    {
6266 +       elan3_sdram_copyq_from_sdram (dev, FPtr, &dma, sizeof (E3_DMA_BE));
6267 +           
6268 +       if (dma.s.dma_u.s.Context == ctxt->Capability.cap_mycontext)
6269 +       {
6270 +           if (ELAN3_QUEUE_FULL (ctxt->SwapDmaQ))
6271 +               break;
6272 +           
6273 +           *ELAN3_QUEUE_BACK (ctxt->SwapDmaQ, ctxt->SwapDmas) = dma;
6274 +           ELAN3_QUEUE_ADD (ctxt->SwapDmaQ);
6275 +
6276 +           /*
6277 +            * Remove the DMA from the queue by replacing it with one with
6278 +            * zero size and no events.
6279 +            *
6280 +            * NOTE: we must preserve the SYS_CONTEXT_BIT since the Elan uses this
6281 +            * to mark the approriate run queue as empty.
6282 +            */
6283 +           dma.s.dma_type            = ((SysCntx ? SYS_CONTEXT_BIT : 0) << 16);
6284 +           dma.s.dma_size            = 0;
6285 +           dma.s.dma_source          = (E3_Addr) 0;
6286 +           dma.s.dma_dest            = (E3_Addr) 0;
6287 +           dma.s.dma_destCookieVProc = (E3_Addr) 0;
6288 +           dma.s.dma_srcEvent        = (E3_Addr) 0;
6289 +           dma.s.dma_srcCookieVProc  = (E3_Addr) 0;
6290 +
6291 +           elan3_sdram_copyq_to_sdram (dev, &dma, FPtr, sizeof (E3_DMA_BE));
6292 +       }
6293 +
6294 +       FPtr = (FPtr == Top) ? Base : FPtr + sizeof (E3_DMA);
6295 +    }
6296 +
6297 +    /*
6298 +     * Release the other processors now before signalling the LWP.
6299 +     */
6300 +    RELEASE_CPUS();
6301 +
6302 +    if (! ELAN3_QUEUE_FULL (ctxt->SwapDmaQ) && !ELAN3_QUEUE_FULL (ctxt->SwapThreadQ))
6303 +    {
6304 +       /*
6305 +        * We've compleletly emptied the elan queues of items in this
6306 +        * context, so we now mark it as fully swapped out.
6307 +        */
6308 +       if (ctxt->OthersState == CTXT_OTHERS_HALTING_MORE)
6309 +       {
6310 +           runCount = SysCntx ? &dev->HaltAllCount : &dev->HaltNonContext0Count;
6311 +           
6312 +           if (--(*runCount) == 0)
6313 +               SetSchedStatusRegister (dev, 0, NULL);
6314 +           
6315 +       }
6316 +       PRINTF0 (ctxt, DBG_SWAP, "HaltSwapContext: queues emptied -> others_swapping\n");
6317 +
6318 +       ctxt->OthersState = CTXT_OTHERS_SWAPPING;
6319 +       kcondvar_wakeupall (&ctxt->Wait, &dev->IntrLock);
6320 +    }
6321 +    else
6322 +    {
6323 +       if (ctxt->OthersState == CTXT_OTHERS_HALTING)
6324 +       {
6325 +           runCount = SysCntx ? &dev->HaltAllCount : &dev->HaltNonContext0Count;
6326 +           
6327 +           if ((*runCount)++ == 0)
6328 +               SetSchedStatusRegister (dev, 0, NULL);
6329 +       }
6330 +       PRINTF0 (ctxt, DBG_SWAP, "HaltSwapContext: queues not emptied -> others_swapping_more\n");
6331 +
6332 +       ctxt->OthersState = CTXT_OTHERS_SWAPPING_MORE;
6333 +       kcondvar_wakeupone (&ctxt->Wait, &dev->IntrLock);
6334 +    }
6335 +    spin_unlock_irqrestore (&dev->IntrLock, flags);
6336 +}
6337 +
6338 +void
6339 +UnloadCommandPageMapping (ELAN3_CTXT *ctxt)
6340 +{
6341 +    /*
6342 +     * Unload the Elan translations,  and flag the main processor to stall after 
6343 +     * issueing its next command.
6344 +     */
6345 +    if (ctxt->CommandPageMapping != NULL && (ctxt->Status & CTXT_COMMAND_MAPPED_ELAN))
6346 +    {
6347 +       ELAN3MMU_RGN *rgn = elan3mmu_rgnat_main (ctxt->Elan3mmu, ctxt->CommandPageMapping);
6348 +       
6349 +       if (rgn != NULL)
6350 +       {
6351 +           E3_Addr eaddr = rgn->rgn_ebase + (ctxt->CommandPageMapping - rgn->rgn_mbase);
6352 +           
6353 +           PRINTF1 (ctxt, DBG_INTR, "UnloadCommandPageMapping: unmapping command port at addr %08x\n", eaddr);
6354 +           
6355 +           elan3mmu_unload (ctxt->Elan3mmu, eaddr, PAGESIZE, PTE_UNLOAD);
6356 +       }
6357 +       
6358 +       ctxt->Status &= ~CTXT_COMMAND_MAPPED_ELAN;
6359 +    }
6360 +}
6361 +
6362 +void
6363 +StartSwapoutContext (ELAN3_CTXT *ctxt, E3_uint32 Pend, E3_uint32 *Maskp)
6364 +{
6365 +    ELAN3_DEV   *dev     = ctxt->Device;
6366 +    int                SysCntx = (ctxt->Capability.cap_mycontext & SYS_CONTEXT_BIT);
6367 +    u_int      *runCount;
6368 +
6369 +    ASSERT (SPINLOCK_HELD (&dev->IntrLock));
6370 +
6371 +    PRINTF2 (ctxt, DBG_SWAP, "StartSwapoutContext: Status %x OthersState %s\n",
6372 +            ctxt->Status, OthersStateStrings [ctxt->OthersState]);
6373 +    /*
6374 +     * Disable the inputters,  we should already have a reason for it.
6375 +     */
6376 +    SetInputterStateForContext (ctxt, Pend, Maskp);
6377 +
6378 +    UnloadCommandPageMapping (ctxt);
6379 +
6380 +    /* 
6381 +     * Flag main processor to stall after issueing next command
6382 +     */
6383 +    PRINTF1 (ctxt, DBG_SWAP, "StartSwapoutContext: setting Command Flag at %p to 1\n", &ctxt->FlagPage->CommandFlag);
6384 +
6385 +    ctxt->FlagPage->CommandFlag = 1;
6386 +
6387 +    PRINTF1 (ctxt, DBG_SWAP, "StartSwapoutContext: OthersState=%d\n", ctxt->OthersState);
6388 +
6389 +    /*
6390 +     * And queue a haltop to stop the queues and clear it out.
6391 +     */
6392 +    switch (ctxt->OthersState)
6393 +    {
6394 +    case CTXT_OTHERS_RUNNING:
6395 +       PRINTF0 (ctxt, DBG_SWAP, "StartSwapoutContext: -> others_halting\n");
6396 +
6397 +       ctxt->OthersState = CTXT_OTHERS_HALTING;
6398 +
6399 +       QueueHaltOperation (dev, Pend, Maskp, INT_DProcHalted | INT_TProcHalted, HaltSwapContext, ctxt);
6400 +       break;
6401 +       
6402 +    case CTXT_OTHERS_SWAPPING:
6403 +       PRINTF0 (ctxt, DBG_SWAP, "StartSwapoutContext: -> others_swapping_more\n");
6404 +       ctxt->OthersState = CTXT_OTHERS_SWAPPING_MORE;
6405 +
6406 +       runCount = SysCntx ? &dev->HaltAllCount : &dev->HaltNonContext0Count;
6407 +           
6408 +       if ((*runCount)++ == 0)
6409 +           SetSchedStatusRegister (dev, Pend, Maskp);
6410 +       break;
6411 +    default:
6412 +       PRINTF1 (ctxt, DBG_SWAP, "StartSwapoutContext: OthersState=%d\n", ctxt->OthersState);
6413 +       break;
6414 +    }
6415 +}
6416 +
6417 +#if defined(DIGITAL_UNIX)
6418 +/* temporary tweaks to priority bump */
6419 +int lwp_do_prio = 1;
6420 +int lwp_do_nxm = 1;
6421 +int lwp_prio = BASEPRI_USER-1;
6422 +#elif defined(LINUX)
6423 +/* This is the default nice level for the helper LWP */
6424 +int LwpNice = -1;
6425 +#endif
6426 +
6427 +int
6428 +elan3_lwp (ELAN3_CTXT *ctxt)
6429 +{
6430 +    ELAN3_DEV     *dev = ctxt->Device;
6431 +    int                  res;
6432 +    unsigned long flags;
6433 +
6434 +    PRINTF1 (ctxt, DBG_LWP, "elan3_lwp: started, context 0x%x\n", ctxt->Capability.cap_mycontext);
6435 +
6436 +#if defined(DIGITAL_UNIX)
6437 +    {
6438 +        thread_t mythread = current_thread();
6439 +        if (lwp_do_prio && (lwp_do_nxm || !IS_NXM_TASK(mythread->task)))
6440 +        {
6441 +            mythread->priority = mythread->sched_pri = lwp_prio;
6442 +            mythread->max_priority = BASEPRI_HIGHEST;
6443 +            (void) thread_priority(mythread, lwp_prio, 0, 1);
6444 +        }
6445 +    }
6446 +#elif defined(LINUX)
6447 +    {
6448 +       /* Do the priority trick for the helper LWP so that it
6449 +        * runs in preferance to the user threads which may be
6450 +        * burning CPU waiting for a trap to be fixed up
6451 +        */
6452 +#ifdef NO_O1_SCHED
6453 +       if (LwpNice >= -20 && LwpNice < 20)
6454 +           current->nice = LwpNice;
6455 +#else
6456 +       set_user_nice(current, LwpNice);
6457 +#endif
6458 +    }
6459 +#endif
6460 +
6461 +    elan3_swapin (ctxt, CTXT_NO_LWPS);
6462 +
6463 +    spin_lock_irqsave (&dev->IntrLock, flags);
6464 +
6465 +    /* If we're swapped out, and not detached (or exiting) then wait until we're swapped back in */
6466 +    /* since otherwise we could "spin" forever continually calling elan3_lwp() */
6467 +    if ((ctxt->Status & CTXT_SWAPPED_REASONS) && ! (ctxt->Status & (CTXT_DETACHED|CTXT_EXITING)))
6468 +       kcondvar_waitsig (&ctxt->Wait, &dev->IntrLock, &flags);
6469 +
6470 +    for (;;)
6471 +    {
6472 +#if defined(DIGITAL_UNIX)
6473 +        if (thread_should_halt(current_thread()) || 
6474 +            CURSIG_CHECK(task_to_proc(current_thread()->task), u.np_uthread))
6475 +       {
6476 +           PRINTF1 (ctxt, DBG_LWP, "elan3_lwp: exiting on %s\n", 
6477 +                    thread_should_halt(current_thread()) ? "halt" : "signal");
6478 +            break;
6479 +       }
6480 +#endif
6481 +
6482 +       if (ctxt->Status & CTXT_SWAPPED_REASONS)
6483 +       {
6484 +           PRINTF0 (ctxt, DBG_LWP, "elan3_lwp: exiting on swapped reasons\n");
6485 +           break;
6486 +       }
6487 +
6488 +       if (! (ctxt->inhibit))
6489 +       {
6490 +           if (FixupNetworkErrors (ctxt, &flags) == ESUCCESS &&
6491 +               HandleExceptions (ctxt, &flags) == ESUCCESS &&
6492 +               RestartContext (ctxt, &flags) == ESUCCESS)
6493 +               {
6494 +                   if (kcondvar_waitsig (&ctxt->Wait, &dev->IntrLock, &flags) == 0)
6495 +                   {
6496 +                       PRINTF0 (ctxt, DBG_LWP, "elan3_lwp: exiting by kcondvar_wait_sig()\n");
6497 +                       break;
6498 +                   }
6499 +               }
6500 +       }
6501 +       else
6502 +       {
6503 +           printk("elan3_lwp :: skipping as inhibited\n");
6504 +           if (kcondvar_waitsig (&ctxt->Wait, &dev->IntrLock, &flags) == 0)
6505 +           {
6506 +               PRINTF0 (ctxt, DBG_LWP, "elan3_lwp: exiting by kcondvar_wait_sig()\n");
6507 +               break;
6508 +           }
6509 +       }
6510 +
6511 +    }
6512 +
6513 +    /* Return EINVAL to elan3_syscall_lwp() when we want it to exit */
6514 +    res = (ctxt->Status & (CTXT_DETACHED|CTXT_EXITING)) ? EINVAL : 0;
6515 +
6516 +    spin_unlock_irqrestore (&dev->IntrLock, flags);
6517 +    
6518 +    elan3_swapout (ctxt, CTXT_NO_LWPS);
6519 +
6520 +    spin_lock_irqsave (&dev->IntrLock, flags);
6521 +    FixupNetworkErrors (ctxt, &flags);
6522 +    spin_unlock_irqrestore (&dev->IntrLock, flags);
6523 +
6524 +    return (res);
6525 +}
6526 +
6527 +void
6528 +SetInputterStateForContext (ELAN3_CTXT *ctxt, E3_uint32 Pend, E3_uint32 *Maskp)
6529 +{
6530 +    ELAN3_DEV  *dev          = NULL;
6531 +    int        new_disabled = 0;
6532 +    int               ctxnum;
6533 +
6534 +    ASSERT (ctxt != NULL);
6535 +    dev  = ctxt->Device;
6536 +    ASSERT (SPINLOCK_HELD (&dev->IntrLock));
6537 +
6538 +    new_disabled = (ctxt->Input0Trap.State != CTXT_STATE_OK ||
6539 +                   ctxt->Input1Trap.State != CTXT_STATE_OK ||
6540 +                   (ctxt->Status & CTXT_INPUTTER_REASONS) != 0);
6541 +    
6542 +
6543 +    ctxnum   = ctxt->Capability.cap_mycontext;
6544 +
6545 +#ifndef __lock_lint  
6546 +    PRINTF2 (ctxt , DBG_IPROC, "SetInputterState: ctxnum %x %s attached\n", ctxnum, ctxt->Disabled ? "disabled " : "");
6547 +#endif /* __lock_lint */
6548 +        
6549 +    if (ctxt->Disabled != new_disabled)
6550 +    {
6551 +       PRINTF2 (ctxt, DBG_IPROC, "SetInputterState: ctxnum %x change %s\n", ctxnum, new_disabled ? "enabled to disabled" : "disabled to enabled");
6552 +       
6553 +       ctxt->Disabled = new_disabled;
6554 +
6555 +       /* synchronize the context filter for this context */
6556 +       elan3mmu_set_context_filter (dev, ctxnum, new_disabled, Pend, Maskp);
6557 +    }
6558 +}
6559 +
6560 +int
6561 +CheckCommandQueueFlushed (ELAN3_CTXT *ctxt, E3_uint32 cflags, int how, unsigned long *flags)
6562 +{
6563 +    ELAN3_DEV *dev    = ctxt->Device;
6564 +    int       delay  = 1;
6565 +    int i, SeenComQueueEmpty;
6566 +
6567 +    ASSERT (SPINLOCK_HELD (&dev->IntrLock));
6568 +    ASSERT (cflags != DmaComQueueNotEmpty || dev->HaltDmaDequeueCount != 0);
6569 +
6570 +    /*
6571 +     * Flush the command processor queues and poll the queue to see it it empties.
6572 +     */
6573 +    if (dev->FlushCommandCount++ == 0)
6574 +       SetSchedStatusRegister (dev, 0, NULL);
6575 +
6576 +    /* 
6577 +     * Ensure previous writes have been flushed through the write buffers
6578 +     */
6579 +    wmb(); mmiob();
6580 +
6581 +    /*
6582 +     * If the command processor traps,  or it's taking too long to observe
6583 +     * the queue as emtpy,  then we need to force the interrupt handler to 
6584 +     * run for us.  So queue a halt operation for the dma processor.
6585 +     */
6586 +    SeenComQueueEmpty = !(read_reg32 (dev, ComQueueStatus) & cflags);
6587 +    for (i = 20; i > 0 || (how & ISSUE_COMMAND_CANT_WAIT); i--)
6588 +    {
6589 +       if (SeenComQueueEmpty || (read_reg32 (dev, Exts.InterruptReg) & (INT_CProc | INT_ComQueue)))
6590 +           break;
6591 +       
6592 +       mb();
6593 +       DELAY (delay);
6594 +
6595 +       if ((delay <<= 1) == 0) delay = 1;
6596 +
6597 +       SeenComQueueEmpty = !(read_reg32 (dev, ComQueueStatus) & cflags);
6598 +    }
6599 +
6600 +    if (--dev->FlushCommandCount == 0)
6601 +       SetSchedStatusRegister (dev, 0, NULL);
6602 +
6603 +    /*
6604 +     * If we've seen the command queue that we're interested in with nothing in it
6605 +     * and the command processor has not trapped then the commands we've
6606 +     * issued have been successfully processed.
6607 +     */
6608 +    if (SeenComQueueEmpty && ! (read_reg32 (dev, Exts.InterruptReg) & (INT_CProc | INT_ComQueue)))
6609 +    {
6610 +       PRINTF0 (ctxt, DBG_CMD, "CheckCommandQueueFlushed: observed dma queue empty and command proc not trapped\n");
6611 +
6612 +       if (cflags == DmaComQueueNotEmpty && --dev->HaltDmaDequeueCount == 0)
6613 +           SetSchedStatusRegister (dev, 0, NULL);
6614 +
6615 +       return (ISSUE_COMMAND_OK);
6616 +    }
6617 +
6618 +    if ((how & ISSUE_COMMAND_CANT_WAIT) != 0)
6619 +       return (ISSUE_COMMAND_WAIT);
6620 +    
6621 +    /*
6622 +     * Halt the dma processor and wait for it to halt,  if the command we've issued has
6623 +     * trapped then the interrupt handler will have moved it to the context structure.
6624 +     */
6625 +    PRINTF0 (ctxt, DBG_CMD, "CheckCommandQueueFlushed: waiting for dproc to halt\n");
6626 +    QueueHaltOperation (dev, 0, NULL, INT_DProcHalted, WakeupLwp, ctxt);
6627 +    while (! ctxt->Halted)
6628 +    {
6629 +       PRINTF1 (ctxt, DBG_CMD, "CheckCommandQueueFlushed: waiting for Halted - %d\n", ctxt->Halted);
6630 +
6631 +       kcondvar_wait (&ctxt->HaltWait, &dev->IntrLock, flags);
6632 +
6633 +       PRINTF1 (ctxt, DBG_CMD, "CheckCommandQueueFlushed: woken for Halted - %d\n", ctxt->Halted);
6634 +    }
6635 +    ctxt->Halted = 0;
6636 +    
6637 +    PRINTF0 (ctxt, DBG_CMD, "CheckCommandQueueFlushed: dproc halted, checking for trap\n");
6638 +    
6639 +    if (cflags == DmaComQueueNotEmpty && --dev->HaltDmaDequeueCount == 0)
6640 +       SetSchedStatusRegister (dev, 0, NULL);
6641 +
6642 +    return (ELAN3_QUEUE_BACK_EMPTY (ctxt->CommandTrapQ) ? ISSUE_COMMAND_OK : ISSUE_COMMAND_TRAPPED);
6643 +}
6644 +
6645 +int
6646 +WaitForCommandPort (ELAN3_CTXT *ctxt)
6647 +{
6648 +    ELAN3_DEV     *dev = ctxt->Device;
6649 +    int                  res;
6650 +    unsigned long flags;
6651 +
6652 +    spin_lock_irqsave (&dev->IntrLock, flags);
6653 +
6654 +    if (ctxt->Status & CTXT_DETACHED)
6655 +       res = EINVAL;
6656 +    else 
6657 +    {
6658 +       if (! ELAN3_QUEUE_EMPTY (ctxt->CommandTrapQ) || (ctxt->Status & CTXT_OTHERS_REASONS))
6659 +       {
6660 +           ctxt->Status |= CTXT_WAITING_COMMAND;
6661 +           if (CTXT_IS_KERNEL(ctxt))
6662 +               kcondvar_wait (&ctxt->CommandPortWait, &dev->IntrLock, &flags);
6663 +           else 
6664 +               kcondvar_waitsig (&ctxt->CommandPortWait, &dev->IntrLock, &flags);
6665 +       }
6666 +       
6667 +       res = (!ELAN3_QUEUE_EMPTY(ctxt->CommandTrapQ) || (ctxt->Status & CTXT_OTHERS_REASONS)) ? EAGAIN : 0;
6668 +    }
6669 +       
6670 +    spin_unlock_irqrestore (&dev->IntrLock, flags);
6671 +
6672 +    return (res);
6673 +}
6674 +
6675 +static char *
6676 +CommandName (int offset)
6677 +{
6678 +    switch (offset)
6679 +    {
6680 +    case offsetof (E3_CommandPort, PutDma):    return ("PutDma");
6681 +    case offsetof (E3_CommandPort, GetDma):    return ("GetDma");
6682 +    case offsetof (E3_CommandPort, RunThread): return ("RunThread");
6683 +    case offsetof (E3_CommandPort, WaitEvent0):        return ("WaitEvent0");
6684 +    case offsetof (E3_CommandPort, WaitEvent1):        return ("WaitEvent1");
6685 +    case offsetof (E3_CommandPort, SetEvent):  return ("SetEvent");
6686 +    default:                                   return ("Bad Command");
6687 +    }
6688 +}
6689 +
6690 +int
6691 +IssueCommand (ELAN3_CTXT *ctxt, unsigned cmdoff, E3_Addr value, int cflags)
6692 +{
6693 +    ELAN3_DEV     *dev = ctxt->Device;
6694 +    int                  res;
6695 +    unsigned long flags;
6696 +
6697 +    spin_lock_irqsave (&dev->IntrLock, flags);
6698 +
6699 +    if ((! (cflags & ISSUE_COMMAND_FOR_CPROC) && !ELAN3_QUEUE_EMPTY (ctxt->CommandTrapQ)) || (ctxt->Status & CTXT_OTHERS_REASONS))
6700 +    {
6701 +       /*
6702 +        * Cannot issue commands for non-cproc traps if command port is trapped, 
6703 +        * nor if the dma/thread trap queues are full, or we're swapping out
6704 +        */
6705 +       PRINTF2 (ctxt, DBG_CMD, "IssueCommand: %s %08x -> ISSUE_COMMAND_RETRY\n",
6706 +                CommandName (cmdoff), value);
6707 +
6708 +       res = ISSUE_COMMAND_RETRY;
6709 +    }
6710 +    else
6711 +    {
6712 +       PRINTF2 (ctxt, DBG_CMD, "IssueCommand: %s %08x -> ISSUE_COMMAND_OK\n",
6713 +                CommandName (cmdoff), value);
6714 +
6715 +       mb();                                                   /* ensure writes to main memory completed */
6716 +       writel (value, ctxt->CommandPort + cmdoff);             /* issue command */
6717 +       mmiob();                                                /* and flush through IO writes */
6718 +
6719 +       res = ISSUE_COMMAND_OK;
6720 +    }
6721 +    spin_unlock_irqrestore (&dev->IntrLock, flags);
6722 +    
6723 +    return (res);
6724 +}
6725 +
6726 +int
6727 +IssueDmaCommand (ELAN3_CTXT *ctxt, E3_Addr value, void *item, int how)
6728 +{
6729 +    ELAN3_DEV     *dev    = ctxt->Device;
6730 +    int                  res;
6731 +    unsigned long flags;
6732 +
6733 +    /*
6734 +     * Since we may be issuing a command that could trap, and we're interested in
6735 +     * the outcome, the command port trap resolving code must be locked out.
6736 +     */
6737 +    kmutex_lock (&ctxt->CmdLock);
6738 +    spin_lock_irqsave (&dev->IntrLock, flags);
6739 +
6740 +    if ((! (how & ISSUE_COMMAND_FOR_CPROC) && !ELAN3_QUEUE_EMPTY (ctxt->CommandTrapQ)) || (ctxt->Status & CTXT_OTHERS_REASONS))
6741 +    {
6742 +       PRINTF2 (ctxt, DBG_CMD, "IssueDmaCommand: PutDma %08x [%p] -> ISSUE_COMMAND_RETRY\n", value, item);
6743 +
6744 +       /*
6745 +        * Cannot issue commands for non-cproc traps if command port is trapped, 
6746 +        * nor if the dma/thread trap queues are full, or we're swapping out
6747 +        */
6748 +       spin_unlock_irqrestore (&dev->IntrLock, flags);
6749 +       kmutex_unlock (&ctxt->CmdLock);
6750 +       return (ISSUE_COMMAND_RETRY);
6751 +    }
6752 +    
6753 +    ASSERT (item == NULL || ctxt->CommandPortItem == NULL);
6754 +
6755 +    /*
6756 +     * Stop the DMA processor from removing entries from the 
6757 +     * command port, and force the command processor to do this.
6758 +     * This means that if a trap occurs then it will be the command
6759 +     * processor that traps.
6760 +     */
6761 +    if (dev->HaltDmaDequeueCount++ == 0)
6762 +       SetSchedStatusRegister (dev, 0, NULL);
6763 +
6764 +    PRINTF2 (ctxt, DBG_CMD, "IssueDmaCommand: PutDma %08x [%p]\n", value, item);
6765 +
6766 +    /*
6767 +     * Always issue the DMA to the 'write' command,  since we've asserted HaltDmaDequeue
6768 +     * the command processor will read the descriptor and transfer it to the run queue. 
6769 +     * The command processor looks at the dma_direction field to determine whether it is
6770 +     * a read or a write and whether to alter the dma_souce of the descriptr on the run 
6771 +     * queue
6772 +     */
6773 +    mb();                                                      /* ensure writes to main memory ccompleted */
6774 +    writel (value, ctxt->CommandPort + offsetof (E3_CommandPort, PutDma));
6775 +    mmiob();                                                   /* and flush through IO writes */
6776 +    
6777 +    res = CheckCommandQueueFlushed (ctxt, DmaComQueueNotEmpty, how, &flags);
6778 +
6779 +    if (res == ISSUE_COMMAND_TRAPPED)
6780 +    {
6781 +       PRINTF2 (ctxt, DBG_CMD, "IssueDmaCommand: PutDma %08x [%p] -> ISSUE_COMMAND_TRAPPED\n", value, item);
6782 +       /*
6783 +        * Remember the item we're issueing so that if the command port traps the item will not
6784 +        * get freed off until the descriptor has been read after the command trap has been fixed
6785 +        * up.
6786 +        */
6787 +       if (item != NULL)
6788 +           ctxt->CommandPortItem = item;
6789 +    }
6790 +
6791 +    spin_unlock_irqrestore (&dev->IntrLock, flags);
6792 +    kmutex_unlock (&ctxt->CmdLock);
6793 +
6794 +    return (res);
6795 +}
6796 +
6797 +int
6798 +WaitForDmaCommand (ELAN3_CTXT *ctxt, void *item, int how)
6799 +{
6800 +    ELAN3_DEV     *dev = ctxt->Device;
6801 +    int           res;
6802 +    unsigned long flags;
6803 +
6804 +    spin_lock_irqsave (&dev->IntrLock, flags);
6805 +
6806 +    res = CheckCommandQueueFlushed (ctxt, DmaComQueueNotEmpty, how, &flags);
6807 +
6808 +    if (res == ISSUE_COMMAND_TRAPPED && item != NULL)
6809 +       ctxt->CommandPortItem = item;
6810 +
6811 +    spin_unlock_irqrestore (&dev->IntrLock, flags);
6812 +    
6813 +    return (res);
6814 +}
6815 +
6816 +void
6817 +FixupEventTrap (ELAN3_CTXT *ctxt, int proc, void *trap, E3_uint32 TrapType, E3_FaultSave_BE *FaultSaveArea, int flags)
6818 +{
6819 +    ASSERT (! CTXT_IS_KERNEL (ctxt));
6820 +
6821 +    /*
6822 +     * This code re-issues the part of the set event that trapped.
6823 +     */
6824 +    switch (TrapType)
6825 +    {
6826 +    case MI_ChainedEventError:
6827 +       ElanException (ctxt, EXCEPTION_CHAINED_EVENT, proc, trap, FaultSaveArea->s.EventAddress);
6828 +       break;
6829 +       
6830 +
6831 +    case MI_SetEventReadWait:
6832 +       /*
6833 +        * Fault occured on the read for the event location. Just re-issue
6834 +        * setevent using EventAddress in E3_FaultSave
6835 +        */
6836 +       PRINTF1 (ctxt, DBG_EVENT, "FixupEventTrap: MI_SetEventReadWait: re-issuing setevent %08x\n", 
6837 +                FaultSaveArea->s.EventAddress);
6838 +       
6839 +       ReissueEvent (ctxt, (E3_Addr) FaultSaveArea->s.EventAddress, flags);
6840 +       break;
6841 +
6842 +    case MI_DoSetEvent:
6843 +    {
6844 +       /*
6845 +        * Fault occured because the block write of a block copy event trapped.
6846 +        * Must grab the event type, source and dest then simulate the block copy and then
6847 +        * perform the set. Once the block copy is started the event location cannot be read
6848 +        * again.
6849 +        */
6850 +       E3_Event *EventPtr  = (E3_Event *) elan3mmu_mainaddr (ctxt->Elan3mmu, FaultSaveArea->s.EventAddress);
6851 +       E3_uint32 EventType = fuword (&EventPtr->ev_Type);
6852 +       
6853 +       /*
6854 +        * Check that the event has the block copy bit
6855 +        * set in it,  since we couldn't trap here if it
6856 +        * didn't
6857 +        */
6858 +       if ((EventType & EV_TYPE_BCOPY) != EV_TYPE_BCOPY)
6859 +       {
6860 +           PRINTF1 (ctxt, DBG_EVENT, "FixupEventTrap: MI_DoSetEvent: Unexpected type=%x\n", EventType);
6861 +           ElanException (ctxt, EXCEPTION_BAD_EVENT, proc, trap, FaultSaveArea, TrapType);
6862 +           break;
6863 +       }
6864 +       
6865 +       PRINTF1 (ctxt, DBG_EVENT, "FixupEventTrap: MI_DoSetEvent: RunEventType %x\n", EventType);
6866 +
6867 +       if (RunEventType (ctxt, FaultSaveArea, EventType))
6868 +           ElanException (ctxt, EXCEPTION_BAD_EVENT, proc, trap, FaultSaveArea, TrapType);
6869 +
6870 +       break;
6871 +    }
6872 +    
6873 +    case MI_ThreadUpdateNonSysCntxBack:
6874 +    case MI_ThreadUpdateSysCntxBack:
6875 +    {
6876 +       /*
6877 +        * Fault occured because the block write of a block copy event trapped.
6878 +        * Must grab the event type, source and dest then simulate the block copy and then
6879 +        * run the thread. Once the block copy is started the event location cannot be read
6880 +        * again.
6881 +        */
6882 +       E3_Event *EventPtr = (E3_Event *) elan3mmu_mainaddr (ctxt->Elan3mmu, FaultSaveArea->s.EventAddress);
6883 +       E3_uint32 EventType = fuword (&EventPtr->ev_Type);
6884 +
6885 +       /*
6886 +        * Check for the correct EventPtr type
6887 +        */
6888 +       if ((EventType & (EV_TYPE_MASK_THREAD|EV_TYPE_MASK_BCOPY)) != (EV_TYPE_BCOPY | EV_TYPE_THREAD))
6889 +       {
6890 +           PRINTF1 (ctxt, DBG_EVENT, "FixupEventTrap: MI_ThreadUpdateCntx0Back: Unexpected type=%x for setevent trap. Should be thread\n", EventType);
6891 +           ElanException (ctxt, EXCEPTION_BAD_EVENT, proc, trap, FaultSaveArea, TrapType);
6892 +           break;
6893 +       }
6894 +       
6895 +       PRINTF1 (ctxt, DBG_EVENT, "FixupEventTrap: MI_ThreadUpdateCntx0Back: RunEventType %x\n", EventType);
6896 +       if (RunEventType (ctxt, FaultSaveArea, EventType))
6897 +           ElanException (ctxt, EXCEPTION_BAD_EVENT, proc, trap, FaultSaveArea, TrapType);
6898 +       break;
6899 +    }
6900 +    
6901 +    case MI_EventIntUpdateBPtr:
6902 +    {
6903 +       /*
6904 +        * Fault occured because the block write of a block copy event trapped.
6905 +        * Must grab the event type, source and dest then simulate the block copy and then
6906 +        * run the dma. Once the block copy is started the event location cannot be read
6907 +        * again.
6908 +        */
6909 +       E3_Event *EventPtr = (E3_Event *) elan3mmu_mainaddr (ctxt->Elan3mmu, FaultSaveArea->s.EventAddress);
6910 +       E3_uint32 EventType = fuword (&EventPtr->ev_Type);
6911 +
6912 +       /*
6913 +        * Check for the correct EventPtr type
6914 +        */
6915 +       if ((EventType & (EV_TYPE_MASK_EVIRQ|EV_TYPE_MASK_BCOPY)) != (EV_TYPE_BCOPY | EV_TYPE_EVIRQ))
6916 +       {
6917 +           PRINTF1 (ctxt, DBG_EVENT, "FixupEventTrap: MI_EventIntUpdateBPtr: Unexpected type=%x\n", EventType);
6918 +           ElanException (ctxt, EXCEPTION_BAD_EVENT, proc, trap, FaultSaveArea, TrapType);
6919 +           break;
6920 +       }
6921 +
6922 +       PRINTF1 (ctxt, DBG_EVENT, "FixupEventTrap: MI_EventIntUpdateBPtr: RunEventType %x\n", EventType);
6923 +       if (RunEventType(ctxt, FaultSaveArea, EventType))
6924 +           ElanException (ctxt, EXCEPTION_BAD_EVENT, proc, trap, FaultSaveArea, TrapType);
6925 +       break;
6926 +    }
6927 +    
6928 +    case MI_RunDmaDesc:
6929 +    {
6930 +       /*
6931 +        * Fault occured because the block write of a block copy event trapped.
6932 +        * Must grab the event type, source and dest then simulate the block copy and then
6933 +        * run the dma. Once the block copy is started the event location cannot be read
6934 +        * again.
6935 +        */
6936 +       E3_Event *EventPtr = (E3_Event *) elan3mmu_mainaddr (ctxt->Elan3mmu, FaultSaveArea->s.EventAddress);
6937 +       E3_uint32 EventType = fuword (&EventPtr->ev_Type);
6938 +
6939 +       /*
6940 +        * Check for the correct EventPtr type
6941 +        */
6942 +       if ((EventType & (EV_TYPE_MASK_DMA|EV_TYPE_MASK_BCOPY)) != (EV_TYPE_BCOPY | EV_TYPE_DMA))
6943 +       {
6944 +           PRINTF1 (ctxt, DBG_EVENT, "FixupEventTrap: MI_RunDmaDesc: Unexpected type=%x\n", EventType);
6945 +           ElanException (ctxt, EXCEPTION_BAD_EVENT, proc, trap, FaultSaveArea, TrapType);
6946 +           break;
6947 +       }
6948 +
6949 +       PRINTF1 (ctxt, DBG_EVENT, "FixupEventTrap: MI_RunDmaDesc: RunEventType %x\n", EventType);
6950 +       if (RunEventType(ctxt, FaultSaveArea, EventType))
6951 +           ElanException (ctxt, EXCEPTION_BAD_EVENT, proc, trap, FaultSaveArea, TrapType);
6952 +       break;
6953 +    }
6954 +    
6955 +    case MI_WaitForCntxDmaDescRead:
6956 +    case MI_WaitForNonCntxDmaDescRead:
6957 +       /*
6958 +        * Fault occured on the read of the dma descriptor. Run dma using the
6959 +        * Fault Address in FaultSave.
6960 +        */
6961 +       PRINTF1 (ctxt, DBG_EVENT, "FixupEventTrap: MI_WaitForCntxDmaDescRead: re-issue dma at %08x\n", FaultSaveArea->s.FaultAddress);
6962 +       
6963 +       RestartDmaPtr (ctxt, FaultSaveArea->s.FaultAddress);
6964 +       break;
6965 +    
6966 +    case MI_FinishedSetEvent:
6967 +       /*
6968 +        * Fault occured because the block write of a block copy event trapped.
6969 +        * Simulate the block copy.
6970 +        */
6971 +       if (SimulateBlockCopy (ctxt, FaultSaveArea->s.EventAddress))
6972 +           ElanException (ctxt, EXCEPTION_BAD_EVENT, proc, trap, FaultSaveArea, TrapType);
6973 +       break;
6974 +       
6975 +    case MI_BlockCopyEvent:
6976 +    case MI_BlockCopyWaitForReadData:
6977 +    {
6978 +       /*
6979 +        * Fault occured on the read or write of the data for a block copy
6980 +        * event. Simulate the block copy using EventAddress in E3_FaultSave. Must also sample
6981 +        * the event type and then perform a run.
6982 +        */
6983 +       E3_Event *EventPtr = (E3_Event *) elan3mmu_mainaddr (ctxt->Elan3mmu, FaultSaveArea->s.EventAddress);
6984 +       E3_uint32 EventType = fuword (&EventPtr->ev_Type);
6985 +
6986 +       PRINTF0 (ctxt, DBG_EVENT, "FixupEventTrap: MI_BlockCopyWaitForReadData: BCopy read fault in BCopy event. Simulating BCopy.\n");
6987 +       
6988 +       if (RunEventType(ctxt, FaultSaveArea, EventType))
6989 +           ElanException (ctxt, EXCEPTION_BAD_EVENT, proc, trap, FaultSaveArea, TrapType);
6990 +       break;
6991 +    }
6992 +    
6993 +    case MI_EventQueueOverflow:
6994 +    case MI_ThreadQueueOverflow:
6995 +    case MI_DmaQueueOverflow:
6996 +       /* XXXX: should handle queue overflow */
6997 +       PRINTF0 (ctxt, DBG_EVENT, "FixupEventTrap: Queue overflow\n");
6998 +
6999 +       ElanException (ctxt, EXCEPTION_QUEUE_OVERFLOW, proc, trap, FaultSaveArea, TrapType);
7000 +       break;
7001 +
7002 +    default:
7003 +       ElanException (ctxt, EXCEPTION_BUS_ERROR, proc, trap, FaultSaveArea, TrapType);
7004 +       break;
7005 +    }
7006 +}
7007 +
7008 +int
7009 +SimulateBlockCopy (ELAN3_CTXT *ctxt, E3_Addr EventAddress)
7010 +{
7011 +    E3_Addr  SourcePtrElan;
7012 +    E3_Addr  DestPtrElan;
7013 +    unsigned DataType;
7014 +    int      i;
7015 +
7016 +    if (ELAN3_OP_START_FAULT_CHECK (ctxt))
7017 +    {
7018 +       ELAN3_OP_END_FAULT_CHECK (ctxt);
7019 +
7020 +       ElanException (ctxt, EXCEPTION_FAULTED, EVENT_PROC, NULL, EventAddress);
7021 +       return (TRUE);
7022 +    }
7023 +
7024 +    SourcePtrElan = ELAN3_OP_LOAD32 (ctxt, EventAddress + offsetof (E3_BlockCopyEvent, ev_Source));
7025 +    DestPtrElan   = ELAN3_OP_LOAD32 (ctxt, EventAddress + offsetof (E3_BlockCopyEvent, ev_Dest));
7026 +    DataType      = DestPtrElan & EV_BCOPY_DTYPE_MASK;
7027 +    DestPtrElan  &= ~EV_BCOPY_DTYPE_MASK;
7028 +
7029 +
7030 +    PRINTF3 (ctxt, DBG_EVENT, "SimulateBlockCopy: Event %08x SourcePtr %08x DestPtr %08x\n",
7031 +            EventAddress, SourcePtrElan, DestPtrElan);
7032 +
7033 +    if (SourcePtrElan & EV_WCOPY)
7034 +       ELAN3_OP_STORE32 (ctxt, DestPtrElan, SourcePtrElan);
7035 +    else
7036 +    {
7037 +       /*
7038 +        * NOTE: since the block copy could be to sdram, we issue the writes backwards,
7039 +        *       except we MUST ensure that the last item in the block is written last.
7040 +        */
7041 +#if defined(__LITTLE_ENDIAN__)
7042 +       /*
7043 +        * For little endian cpu's we don't need to worry about the data type.
7044 +        */
7045 +       for (i = E3_BLK_SIZE-(2*sizeof (E3_uint64)); i >= 0; i -= sizeof (E3_uint64))
7046 +           ELAN3_OP_STORE64 (ctxt, DestPtrElan + i, ELAN3_OP_LOAD64 (ctxt, SourcePtrElan + i));
7047 +
7048 +       i = E3_BLK_SIZE - sizeof (E3_uint64);
7049 +       ELAN3_OP_STORE64 (ctxt, DestPtrElan + i, ELAN3_OP_LOAD64 (ctxt, SourcePtrElan + i));
7050 +#else
7051 +       switch (DataType)
7052 +       {
7053 +       case EV_TYPE_BCOPY_BYTE:
7054 +           for (i = E3_BLK_SIZE-(2*sizeof (E3_uint8)); i >= 0; i -= sizeof (E3_uint8))
7055 +               ELAN3_OP_STORE8 (ctxt, DestPtrElan + i, ELAN3_OP_LOAD8 (ctxt, SourcePtrElan + i));
7056 +           
7057 +           i = E3_BLK_SIZE - sizeof (E3_uint8);
7058 +           ELAN3_OP_STORE8 (ctxt, DestPtrElan + i, ELAN3_OP_LOAD8 (ctxt, SourcePtrElan + i));
7059 +           break;
7060 +
7061 +       case EV_TYPE_BCOPY_HWORD: 
7062 +           for (i = E3_BLK_SIZE-(2*sizeof (E3_uint16)); i >= 0; i -= sizeof (E3_uint16))
7063 +               ELAN3_OP_STORE16 (ctxt, DestPtrElan + i, ELAN3_OP_LOAD16 (ctxt, SourcePtrElan + i));
7064 +           
7065 +           i = E3_BLK_SIZE - sizeof (E3_uint16);
7066 +           ELAN3_OP_STORE16 (ctxt, DestPtrElan + i, ELAN3_OP_LOAD16 (ctxt, SourcePtrElan + i));
7067 +           break;
7068 +           
7069 +       case EV_TYPE_BCOPY_WORD:  
7070 +           for (i = E3_BLK_SIZE-(2*sizeof (E3_uint32)); i >= 0; i -= sizeof (E3_uint32))
7071 +               ELAN3_OP_STORE32 (ctxt, DestPtrElan + i, ELAN3_OP_LOAD32 (ctxt, SourcePtrElan + i));
7072 +           
7073 +           i = E3_BLK_SIZE - sizeof (E3_uint32);
7074 +           ELAN3_OP_STORE32 (ctxt, DestPtrElan + i, ELAN3_OP_LOAD32 (ctxt, SourcePtrElan + i));
7075 +           break;
7076 +           
7077 +       case EV_TYPE_BCOPY_DWORD: 
7078 +           for (i = E3_BLK_SIZE-(2*sizeof (E3_uint64)); i >= 0; i -= sizeof (E3_uint64))
7079 +               ELAN3_OP_STORE64 (ctxt, DestPtrElan + i, ELAN3_OP_LOAD64 (ctxt, SourcePtrElan + i));
7080 +           
7081 +           i = E3_BLK_SIZE - sizeof (E3_uint64);
7082 +           ELAN3_OP_STORE64 (ctxt, DestPtrElan + i, ELAN3_OP_LOAD64 (ctxt, SourcePtrElan + i));
7083 +           break;
7084 +       }
7085 +#endif
7086 +    }
7087 +    ELAN3_OP_END_FAULT_CHECK (ctxt);
7088 +
7089 +    return (FALSE);
7090 +}
7091 +
7092 +void
7093 +ReissueEvent (ELAN3_CTXT *ctxt, E3_Addr addr, int flags)
7094 +{
7095 +    PRINTF1 (ctxt, DBG_CMD, "ReissueEvent : Event=%08x\n", addr);
7096 +
7097 +    if (IssueCommand (ctxt, offsetof (E3_CommandPort, SetEvent), addr, flags) == ISSUE_COMMAND_RETRY)
7098 +    {
7099 +       PRINTF1 (ctxt, DBG_CMD, "ReissueEvent: queue event %08x\n", addr);
7100 +
7101 +       kmutex_lock (&ctxt->SwapListsLock);
7102 +       ctxt->ItemCount[LIST_SETEVENT]++;
7103 +       ELAN3_OP_PUT_WORD_ITEM (ctxt, LIST_SETEVENT, addr);
7104 +       kmutex_unlock (&ctxt->SwapListsLock);
7105 +    }
7106 +}
7107 +
7108 +int
7109 +SetEventsNeedRestart (ELAN3_CTXT *ctxt)
7110 +{
7111 +    return (ctxt->ItemCount[LIST_SETEVENT] != 0);
7112 +}
7113 +
7114 +void
7115 +RestartSetEvents (ELAN3_CTXT *ctxt)
7116 +{
7117 +    void     *item;
7118 +    E3_uint32 EventPointer;
7119 +
7120 +    kmutex_lock (&ctxt->SwapListsLock);
7121 +    
7122 +    while (ctxt->ItemCount[LIST_SETEVENT])
7123 +    {
7124 +       if (! ELAN3_OP_GET_WORD_ITEM (ctxt, LIST_SETEVENT, &item, &EventPointer))
7125 +           ctxt->ItemCount[LIST_SETEVENT] = 0;
7126 +       else
7127 +       {
7128 +           if (IssueCommand (ctxt, offsetof (E3_CommandPort, SetEvent), EventPointer, FALSE) == ISSUE_COMMAND_RETRY)
7129 +           {
7130 +               ELAN3_OP_PUTBACK_ITEM (ctxt, LIST_SETEVENT, item);
7131 +               kmutex_unlock (&ctxt->SwapListsLock);
7132 +               return;
7133 +           }
7134 +           
7135 +           ctxt->ItemCount[LIST_SETEVENT]--;
7136 +           ELAN3_OP_FREE_WORD_ITEM (ctxt, item);
7137 +       }
7138 +    }
7139 +    kmutex_unlock (&ctxt->SwapListsLock);
7140 +}
7141 +
7142 +int
7143 +RunEventType(ELAN3_CTXT *ctxt, E3_FaultSave_BE *FaultSaveArea, E3_uint32 EventType)
7144 +{
7145 +    int failed = FALSE;
7146 +
7147 +    if ((EventType & EV_TYPE_BCOPY) != 0)
7148 +       failed = SimulateBlockCopy(ctxt, FaultSaveArea->s.EventAddress);
7149 +    
7150 +    if ((EventType & EV_TYPE_MASK) == EV_TYPE_THREAD)
7151 +       ReissueStackPointer (ctxt, EventType & ~(EV_TYPE_MASK_THREAD|EV_TYPE_MASK_BCOPY));
7152 +    else if ((EventType & EV_TYPE_MASK) == EV_TYPE_DMA)
7153 +       RestartDmaPtr (ctxt, EventType & ~(EV_TYPE_MASK_DMA|EV_TYPE_MASK_BCOPY));
7154 +    else if ((EventType & EV_TYPE_EVIRQ) != 0)
7155 +       QueueEventInterrupt (ctxt, EventType & ~(EV_TYPE_MASK_EVIRQ|EV_TYPE_MASK_BCOPY));
7156 +    else /* Chained event */
7157 +    {
7158 +       if ((EventType & ~EV_TYPE_BCOPY) != 0) /* not null setevent */
7159 +           ReissueEvent (ctxt, EventType & ~(EV_TYPE_MASK_CHAIN|EV_TYPE_MASK_BCOPY), FALSE);
7160 +    }
7161 +
7162 +    return (failed);
7163 +}
7164 +
7165 +void
7166 +WakeupLwp (ELAN3_DEV *dev, void *arg)
7167 +{
7168 +    ELAN3_CTXT    *ctxt = (ELAN3_CTXT *) arg;
7169 +    unsigned long flags;
7170 +
7171 +    PRINTF1 (ctxt, DBG_INTR, "WakeupLwp: %d\n", SPINLOCK_HELD (&dev->IntrLock));
7172 +
7173 +    spin_lock_irqsave (&dev->IntrLock, flags);
7174 +    ctxt->Halted = 1;
7175 +    kcondvar_wakeupone (&ctxt->HaltWait, &dev->IntrLock);
7176 +
7177 +    PRINTF0 (ctxt, DBG_INTR, "WakeupLwp: woken up context\n");
7178 +    spin_unlock_irqrestore (&dev->IntrLock, flags);
7179 +}
7180 +
7181 +void
7182 +QueueEventInterrupt (ELAN3_CTXT *ctxt, E3_uint32 cookie)
7183 +{
7184 +    ELAN3_DEV     *dev = ctxt->Device;
7185 +    unsigned long flags;
7186 +
7187 +    PRINTF1 (ctxt, DBG_EVENT, "QueueEventInterrupt: cookie %08x\n", cookie);
7188 +
7189 +    if (ELAN3_OP_EVENT (ctxt, cookie, OP_INTR) == OP_DEFER)
7190 +    {
7191 +       spin_lock_irqsave (&ctxt->Device->IntrLock, flags);
7192 +
7193 +       if (ELAN3_QUEUE_REALLY_FULL (ctxt->EventCookieQ))
7194 +       {
7195 +           ctxt->Status |= CTXT_COMMAND_OVERFLOW_ERROR;
7196 +           StartSwapoutContext (ctxt, 0, NULL);
7197 +       }
7198 +       else
7199 +       {
7200 +           *(ELAN3_QUEUE_BACK (ctxt->EventCookieQ, ctxt->EventCookies)) = cookie;
7201 +           
7202 +           ELAN3_QUEUE_ADD (ctxt->EventCookieQ);
7203 +           kcondvar_wakeupone (&ctxt->Wait, &dev->IntrLock);
7204 +           if (ELAN3_QUEUE_FULL (ctxt->EventCookieQ))
7205 +           {
7206 +               ctxt->Status |= CTXT_EVENT_QUEUE_FULL;
7207 +               StartSwapoutContext (ctxt, 0, NULL);
7208 +           }
7209 +       }
7210 +       spin_unlock_irqrestore (&ctxt->Device->IntrLock, flags);
7211 +    }
7212 +}
7213 +
7214 +int
7215 +ElanException (ELAN3_CTXT *ctxt, int type, int proc, void *trap, ...)
7216 +{
7217 +    int     res;
7218 +    va_list ap;
7219 +
7220 +    va_start (ap, trap);
7221 +
7222 +    PRINTF2 (ctxt, DBG_FN, "ElanException: proc %d type %d\n", proc, type);
7223 +
7224 +    res = ELAN3_OP_EXCEPTION (ctxt, type, proc, trap, ap);
7225 +
7226 +    va_end (ap);
7227 +    
7228 +    return (res);
7229 +}
7230 +
7231 +
7232 +/*
7233 + * Local variables:
7234 + * c-file-style: "stroustrup"
7235 + * End:
7236 + */
7237 Index: linux-2.4.21/drivers/net/qsnet/elan3/context_linux.c
7238 ===================================================================
7239 --- linux-2.4.21.orig/drivers/net/qsnet/elan3/context_linux.c   2004-02-23 16:02:56.000000000 -0500
7240 +++ linux-2.4.21/drivers/net/qsnet/elan3/context_linux.c        2005-06-01 23:12:54.566444120 -0400
7241 @@ -0,0 +1,228 @@
7242 +/*
7243 + *    Copyright (c) 2003 by Quadrics Limited.
7244 + * 
7245 + *    For licensing information please see the supplied COPYING file
7246 + *
7247 + */
7248 +
7249 +#ident "@(#)$Id: context_linux.c,v 1.28.2.2 2004/10/28 11:54:56 david Exp $"
7250 +/*      $Source: /cvs/master/quadrics/elan3mod/elan3/os/context_linux.c,v $*/
7251 +
7252 +#include <qsnet/kernel.h>
7253 +#include <qsnet/kpte.h>
7254 +
7255 +#include <elan3/elanregs.h>
7256 +#include <elan3/elandev.h>
7257 +#include <elan3/elanvp.h>
7258 +#include <elan3/elan3mmu.h>
7259 +#include <elan3/elanctxt.h>
7260 +#include <elan3/elandebug.h>
7261 +#include <elan3/urom_addrs.h>
7262 +#include <elan3/thread.h>
7263 +
7264 +int
7265 +LoadElanTranslation (ELAN3_CTXT *ctxt, E3_Addr addr, int len, int protFault, int writeable)
7266 +{
7267 +    ELAN3MMU           *elan3mmu = ctxt->Elan3mmu;
7268 +    ELAN3MMU_RGN       *rgn;
7269 +    caddr_t            mainAddr;
7270 +    int                        perm;
7271 +    unsigned int        off;
7272 +    unsigned long       flags;
7273 +
7274 +    ASSERT (PAGE_ALIGNED (addr) && PAGE_ALIGNED (len));
7275 +
7276 +    PRINTF (ctxt, DBG_FAULT, "LoadElanTranslation: addr %08x len %08x%s%s\n", 
7277 +        addr, len, protFault ? " prot fault" : "", writeable ? " writeable" : "");
7278 +
7279 +    /* Ensure there's enough elan mmu tables for us to use */
7280 +    elan3mmu_expand (elan3mmu, addr, len, PTBL_LEVEL_3, 0);
7281 +
7282 +    while (len > 0) 
7283 +    {
7284 +       /*
7285 +        * Retrieve permission region and calculate main address
7286 +        */
7287 +       spin_lock (&elan3mmu->elan3mmu_lock);
7288 +
7289 +       rgn = elan3mmu_rgnat_elan (elan3mmu, addr);
7290 +       if (rgn == NULL) {
7291 +           PRINTF (ctxt, DBG_FAULT, "LoadElanTranslation: no permission region at %lx %p\n", 
7292 +               (u_long) addr, rgn);
7293 +           spin_unlock (&elan3mmu->elan3mmu_lock);
7294 +           return (EFAULT);
7295 +       }
7296 +       mainAddr = rgn->rgn_mbase + (addr - rgn->rgn_ebase);
7297 +
7298 +       ASSERT (PAGE_ALIGNED ((unsigned long)mainAddr));
7299 +
7300 +       spin_unlock (&elan3mmu->elan3mmu_lock);
7301 +
7302 +       /*
7303 +        * If we're tying to load a translation to the elan command port, 
7304 +        * then don't do it now, but mark the context to have it reloaded
7305 +        * just before we restart any threads. We do this because we don't
7306 +        * want to call into the segment driver since we could then block
7307 +        * waiting for the command port to become available.
7308 +        */
7309 +       if (mainAddr == ctxt->CommandPageMapping)
7310 +       {
7311 +           PRINTF (ctxt, DBG_FAULT, "LoadElanTranslation: addr=%08x maps command port\n", addr);
7312 +
7313 +           spin_lock_irqsave (&ctxt->Device->IntrLock, flags);
7314 +           UnloadCommandPageMapping (ctxt);
7315 +           spin_unlock_irqrestore (&ctxt->Device->IntrLock, flags);
7316 +       }
7317 +       else 
7318 +       {
7319 +           struct vm_area_struct *area;
7320 +           struct mm_struct *mm = current->mm;
7321 +           pte_t *ptep_ptr;
7322 +           pte_t  ptep_value;
7323 +
7324 +           down_read (&current->mm->mmap_sem);
7325 +
7326 +           if ((area = find_vma_intersection(mm, (unsigned long)mainAddr, (unsigned long)mainAddr + PAGESIZE)) == NULL)
7327 +           {
7328 +               PRINTF (ctxt, DBG_FAULT, "LoadElanTranslation: %p no vma\n", mainAddr);
7329 +               up_read (&current->mm->mmap_sem);
7330 +               return EFAULT;
7331 +           }
7332 +
7333 +           if (writeable && !(area->vm_flags & VM_WRITE)) 
7334 +           {
7335 +               PRINTF (ctxt, DBG_FAULT, "LoadElanTranslation: %p not writeable\n", mainAddr);
7336 +               up_read (&current->mm->mmap_sem);
7337 +               return EFAULT;
7338 +           }
7339 +           
7340 +           spin_lock (&mm->page_table_lock);
7341 +
7342 +           /* dont deference the pointer after the unmap */
7343 +           ptep_ptr = find_pte_map (mm, (unsigned long)mainAddr);  
7344 +           if (ptep_ptr) {
7345 +               ptep_value = *ptep_ptr;
7346 +               pte_unmap(ptep_ptr);
7347 +           }
7348 +
7349 +           PRINTF (ctxt, DBG_FAULT, "LoadElanTranslation: %p %s %s\n", 
7350 +                   mainAddr, writeable ? "writeable" : "readonly", 
7351 +                   !ptep_ptr ? "invalid" : pte_none(ptep_value) ? "none " : !pte_present(ptep_value) ? "swapped " : 
7352 +                   writeable && !pte_write(ptep_value) ? "COW" : "OK");
7353 +           
7354 +           if (!ptep_ptr || pte_none(ptep_value) || !pte_present(ptep_value) || (writeable && !pte_write(ptep_value))) 
7355 +           {  
7356 +               spin_unlock (&mm->page_table_lock);
7357 +
7358 +               make_pages_present((unsigned long)mainAddr, (unsigned long)mainAddr + PAGE_SIZE);
7359 +
7360 +               spin_lock (&mm->page_table_lock);
7361 +
7362 +               /* dont deference the pointer after the unmap */
7363 +               ptep_ptr = find_pte_map (mm, (unsigned long)mainAddr);  
7364 +               if (ptep_ptr) {
7365 +                   ptep_value = *ptep_ptr;
7366 +                   pte_unmap(ptep_ptr);
7367 +               }
7368 +
7369 +               if (!ptep_ptr || pte_none(ptep_value) || !pte_present(ptep_value) || (writeable && !pte_write(ptep_value))) 
7370 +               {
7371 +                   spin_unlock (&mm->page_table_lock);
7372 +                   up_read (&current->mm->mmap_sem);
7373 +                   return EFAULT;
7374 +               }
7375 +           } 
7376 +
7377 +           /* don't allow user write access to kernel pages if not kernel */
7378 +           if (!pte_read(ptep_value))
7379 +           {
7380 +               spin_unlock (&mm->page_table_lock);
7381 +               up_read (&current->mm->mmap_sem);
7382 +               return EFAULT;
7383 +           }
7384 +
7385 +           if (writeable)
7386 +               pte_mkdirty(ptep_value);
7387 +           pte_mkyoung (ptep_value);
7388 +
7389 +           /* now load the elan pte */
7390 +           if (writeable)
7391 +               perm  = rgn->rgn_perm;
7392 +           else
7393 +               perm = ELAN3_PERM_READONLY(rgn->rgn_perm & ELAN3_PTE_PERM_MASK) | (rgn->rgn_perm & ~ELAN3_PTE_PERM_MASK);
7394 +
7395 +           for (off = 0; off < PAGE_SIZE; off += ELAN3_PAGE_SIZE)
7396 +               elan3mmu_pteload (elan3mmu, PTBL_LEVEL_3, addr + off, pte_phys(ptep_value) + off, perm, PTE_LOAD | PTE_NO_SLEEP);
7397 +
7398 +           spin_unlock (&mm->page_table_lock);
7399 +           up_read (&current->mm->mmap_sem);
7400 +       }
7401 +
7402 +       len -= PAGESIZE;
7403 +       addr += PAGESIZE;
7404 +    }
7405 +    return (ESUCCESS);
7406 +}
7407 +
7408 +
7409 +/*
7410 + * LoadCommandPortTranslation:
7411 + *    explicitly load an elan translation to the command port.
7412 + *    but only do it if the command port is accessible.
7413 + *
7414 + *    we call this function just after we have restarted
7415 + *    and trapped commands,  since when a command traps
7416 + *    the elan translation to the command port is unloaded.
7417 + */
7418 +void
7419 +LoadCommandPortTranslation (ELAN3_CTXT *ctxt)
7420 +{
7421 +    ELAN3MMU     *elan3mmu = ctxt->Elan3mmu;
7422 +    ELAN3MMU_RGN *rgn;
7423 +    E3_Addr       addr;
7424 +    int                  perm;
7425 +    physaddr_t    phys;
7426 +    unsigned int  off;
7427 +    unsigned long flags;
7428 +
7429 +    PRINTF (ctxt, DBG_FAULT, "LoadCommandPortTranslation: SegAddr=%p Status=%x\n", ctxt->CommandPageMapping, ctxt->Status);
7430 +
7431 +    if (ctxt->CommandPageMapping != NULL  && !(ctxt->Status & CTXT_COMMAND_MAPPED_ELAN))
7432 +    {
7433 +       spin_lock (&elan3mmu->elan3mmu_lock);
7434 +       
7435 +       rgn = elan3mmu_rgnat_main (elan3mmu, ctxt->CommandPageMapping);
7436 +       if (rgn == (ELAN3MMU_RGN *) NULL) 
7437 +       {
7438 +           PRINTF(ctxt, DBG_FAULT, "LoadCommandPortTranslation: no permission for command port\n");
7439 +           spin_unlock (&elan3mmu->elan3mmu_lock);
7440 +           return;
7441 +       }
7442 +       
7443 +       addr = rgn->rgn_ebase + (ctxt->CommandPageMapping - rgn->rgn_mbase);
7444 +       perm = rgn->rgn_perm;
7445 +       phys = kmem_to_phys((caddr_t) ctxt->CommandPage);
7446 +
7447 +       spin_lock_irqsave (&ctxt->Device->IntrLock, flags);
7448 +       if (ELAN3_QUEUE_EMPTY(ctxt->CommandTrapQ) && !(ctxt->Status & CTXT_OTHERS_REASONS))
7449 +       {
7450 +           PRINTF(ctxt, DBG_FAULT, "LoadCommandPortTranslation: load xlation addr=%08x phys=%llx perm=%d\n", 
7451 +                  addr, (unsigned long long)phys, perm);
7452 +
7453 +           ctxt->Status |= CTXT_COMMAND_MAPPED_ELAN;
7454 +
7455 +           for (off = 0; off < PAGESIZE; off += ELAN3_PAGE_SIZE)
7456 +               elan3mmu_pteload (elan3mmu, PTBL_LEVEL_3, addr + off, phys + off, perm, PTE_LOAD | PTE_NO_SLEEP);
7457 +       }
7458 +       spin_unlock_irqrestore (&ctxt->Device->IntrLock, flags);
7459 +       
7460 +       spin_unlock (&elan3mmu->elan3mmu_lock);
7461 +    }
7462 +}
7463 +
7464 +
7465 +/*
7466 + * Local variables:
7467 + * c-file-style: "stroustrup"
7468 + * End:
7469 + */
7470 Index: linux-2.4.21/drivers/net/qsnet/elan3/cproc.c
7471 ===================================================================
7472 --- linux-2.4.21.orig/drivers/net/qsnet/elan3/cproc.c   2004-02-23 16:02:56.000000000 -0500
7473 +++ linux-2.4.21/drivers/net/qsnet/elan3/cproc.c        2005-06-01 23:12:54.567443968 -0400
7474 @@ -0,0 +1,539 @@
7475 +/*
7476 + *    Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd.
7477 + * 
7478 + *    For licensing information please see the supplied COPYING file
7479 + *
7480 + */
7481 +
7482 +#ident "@(#)$Id: cproc.c,v 1.46 2004/02/10 15:05:10 david Exp $"
7483 +/*      $Source: /cvs/master/quadrics/elan3mod/elan3/os/cproc.c,v $ */
7484 +
7485 +
7486 +#include <qsnet/kernel.h>
7487 +
7488 +#include <elan3/elanregs.h>
7489 +#include <elan3/elandev.h>
7490 +#include <elan3/elanvp.h>
7491 +#include <elan3/elan3mmu.h>
7492 +#include <elan3/elanctxt.h>
7493 +#include <elan3/elandebug.h>
7494 +#include <elan3/urom_addrs.h>
7495 +#include <elan3/vmseg.h>
7496 +
7497 +void
7498 +HandleCProcTrap (ELAN3_DEV *dev, E3_uint32 Pend, E3_uint32 *Maskp)
7499 +{
7500 +    E3_FaultSave_BE     FaultSave;
7501 +    CProcTrapBuf_BE    TrapBuf;
7502 +    COMMAND_TRAP       *trap;
7503 +    ELAN3_CTXT        *ctxt;
7504 +    sdramaddr_t         CurrTrap;
7505 +    sdramaddr_t         LastTrapAddr;
7506 +    int                NTrapEntries;
7507 +    int                        NewPend;
7508 +    unsigned long       flags;
7509 +
7510 +    /* 
7511 +     * Temporarily mask out the command processor interrupt, since
7512 +     * we may cause it be re-asserted when we re-issue the commands
7513 +     * from the overflow queue area.
7514 +     */
7515 +    DISABLE_INT_MASK (dev, INT_CProc | INT_ComQueue);
7516 +
7517 +    NewPend = read_reg32 (dev, Exts.InterruptReg);
7518 +
7519 +    do {
7520 +       if (NewPend & INT_ComQueue)
7521 +       {
7522 +           if ((read_reg32 (dev, ComQueueStatus) & ComQueueError) != 0)
7523 +           {
7524 +               printk ("elan%d: InterruptReg=%x ComQueueStatus=%x\n", dev->Instance,
7525 +                       read_reg32 (dev, Exts.InterruptReg), read_reg32 (dev, ComQueueStatus));
7526 +               panic ("elan: command queue has overflowed !!");
7527 +               /* NOTREACHED */
7528 +           }
7529 +
7530 +           BumpStat (dev, ComQueueHalfFull);
7531 +
7532 +           /*
7533 +            * Capture the other cpus and stop the threads processor then
7534 +            * allow the command processor to eagerly flush the command queue.
7535 +            */
7536 +           dev->FlushCommandCount++; dev->HaltThreadCount++;
7537 +           SetSchedStatusRegister (dev, Pend, Maskp);
7538 +
7539 +           CAPTURE_CPUS();
7540 +
7541 +           while ((read_reg32 (dev, ComQueueStatus) & ComQueueNotEmpty) != 0)
7542 +               mb();
7543 +           
7544 +           /*
7545 +            * Let the threads processor run again, and release the cross call.
7546 +            */
7547 +           RELEASE_CPUS();
7548 +
7549 +           dev->FlushCommandCount--; dev->HaltThreadCount--;
7550 +           SetSchedStatusRegister (dev, Pend, Maskp);
7551 +
7552 +           /*
7553 +            * Re-sample the interrupt register to see if the command processor
7554 +            * has trapped while flushing the queue.  Preserve the INT_ComQueue
7555 +            * bit, so we can clear the ComQueueStatus register later.
7556 +            */
7557 +           NewPend = (read_reg32 (dev, Exts.InterruptReg) | INT_ComQueue);
7558 +       }
7559 +       
7560 +       CurrTrap = dev->CommandPortTraps[dev->CurrentCommandPortTrap];
7561 +       
7562 +       if (NewPend & INT_CProc)
7563 +       {
7564 +           BumpStat (dev, CProcTraps);
7565 +
7566 +           /*
7567 +            * Copy the MMU Fault Save area and zero it out for future traps.
7568 +            */
7569 +           elan3_sdram_copyq_from_sdram (dev, dev->TAndQBase + offsetof (E3_TrapAndQueue, CProc), &FaultSave, sizeof (E3_FaultSave));
7570 +           elan3_sdram_zeroq_sdram      (dev, dev->TAndQBase + offsetof (E3_TrapAndQueue, CProc), sizeof (E3_FaultSave));
7571 +
7572 +           /*
7573 +            * First entry in the cproc trap save area is the value of Areg and Breg for the
7574 +            * uWord before the address fault.
7575 +            */
7576 +           TrapBuf.Align64 = elan3_sdram_readq (dev, CurrTrap); CurrTrap += sizeof (TrapBuf.Align64);
7577 +
7578 +           ctxt = ELAN3_DEV_CTX_TABLE(dev, (TrapBuf.r.Breg >> 16));
7579 +           if (ctxt == NULL)
7580 +           {
7581 +               PRINTF2 (DBG_DEVICE, DBG_INTR, "HandleCProcTrap: context invalid [%08x.%08x]\n", TrapBuf.r.Areg, TrapBuf.r.Breg);
7582 +               BumpStat (dev, InvalidContext);
7583 +           }
7584 +           else
7585 +           {
7586 +               if (ELAN3_QUEUE_REALLY_FULL (ctxt->CommandTrapQ))
7587 +               {
7588 +                   if ((ctxt->Status & CTXT_COMMAND_OVERFLOW_ERROR) == 0)
7589 +                   {
7590 +                       ctxt->Status |= CTXT_COMMAND_OVERFLOW_ERROR;
7591 +                       StartSwapoutContext (ctxt, Pend, Maskp);
7592 +                   }
7593 +               }
7594 +               else
7595 +               {
7596 +                   trap = ELAN3_QUEUE_BACK (ctxt->CommandTrapQ, ctxt->CommandTraps);
7597 +                   
7598 +                   trap->FaultSave     = FaultSave;
7599 +                   trap->Status.Status = read_reg32 (dev, Exts.CProcStatus.Status);
7600 +                   trap->TrapBuf       = TrapBuf;
7601 +                   
7602 +                   /*
7603 +                    * The command processor does not stop after it has trapped. It will continue
7604 +                    * to save commands for other contexts into the commands port save area.
7605 +                    * The valid context for the trap is held in FaultSave. As some of this
7606 +                    * trap code uses the context in the status register the local copy must be
7607 +                    * updated with the trap context.
7608 +                    */
7609 +                   trap->Status.s.Context = (TrapBuf.r.Breg >> 16);
7610 +                   
7611 +                   PRINTF4 (ctxt, DBG_INTR, "HandleCProcTrap: WakeupFnt=%x Cntx=%x SuspAddr=%x TrapType=%s\n",
7612 +                            trap->Status.s.WakeupFunction, trap->Status.s.Context,
7613 +                            trap->Status.s.SuspendAddr, MiToName(trap->Status.s.TrapType));
7614 +                   PRINTF2 (ctxt, DBG_INTR, "HandleCProcTrap: Areg=%08x Breg=%08x\n", 
7615 +                            trap->TrapBuf.r.Areg, trap->TrapBuf.r.Breg);
7616 +                   
7617 +                   if (ELAN3_OP_CPROC_TRAP (ctxt, trap) == OP_DEFER)
7618 +                   {
7619 +                       ELAN3_QUEUE_ADD (ctxt->CommandTrapQ);
7620 +                       
7621 +                       PRINTF1 (ctxt, DBG_INTR, "HandleCProcTrap: setting Command Flag at %p to 1\n", &ctxt->FlagPage->CommandFlag);
7622 +                       
7623 +                       ctxt->FlagPage->CommandFlag = 1;
7624 +                       
7625 +                       kcondvar_wakeupone (&ctxt->Wait, &dev->IntrLock);
7626 +                   }
7627 +               }
7628 +
7629 +               UnloadCommandPageMapping (ctxt);
7630 +           }
7631 +       }
7632 +       
7633 +       /*
7634 +        * Now change the CommandPortTrap queue.
7635 +        * Must stop the command processor, wait for it to stop, find the final
7636 +        * entry in the current cproc trap save area, reset the comm port
7637 +        * trap save address to the other queue, clear the command port interrupt and
7638 +        * set it running normally again, and then let it go again. This is not very
7639 +        * time critical but it would be a good idea to prevent a higher priority
7640 +        * interrupt from slowing down the process to prevent to fifos filling.
7641 +        */
7642 +       spin_lock_irqsave (&dev->CProcLock, flags);
7643 +
7644 +       SET_SCHED_STATUS (dev, CProcStop);
7645 +
7646 +       while ((read_reg32 (dev, Exts.SchCntReg) & CProcStopped) == 0)
7647 +       {
7648 +           PRINTF0 (DBG_DEVICE, DBG_INTR, "HandleCProcTrap: waiting for command processor to stop\n");
7649 +           mb();
7650 +       }
7651 +       
7652 +       /*
7653 +        * Remember how many entries are in the saved command queue,  and 
7654 +        * re-initialise it, before restarting the command processor.
7655 +        */
7656 +       NTrapEntries = (read_reg32 (dev, CProc_TrapSave_Addr) - dev->CommandPortTraps[dev->CurrentCommandPortTrap])/sizeof (E3_uint64);
7657 +       LastTrapAddr = dev->CommandPortTraps[dev->CurrentCommandPortTrap] + NTrapEntries*sizeof (TrapBuf);
7658 +
7659 +       dev->CurrentCommandPortTrap ^= 1;
7660 +       write_reg32 (dev, CProc_TrapSave_Addr, dev->CommandPortTraps[dev->CurrentCommandPortTrap]);
7661 +
7662 +       PRINTF1 (DBG_DEVICE, DBG_INTR, "HandleCProcTrap: command trap queue has %d entries\n", NTrapEntries);
7663 +
7664 +       if (NTrapEntries > ELAN3_COMMAND_TRAP_SIZE/sizeof (E3_uint64))
7665 +           panic ("HandleCProcTrap: command trap queue has overflowed\n");
7666 +       
7667 +       if (NewPend & INT_CProc)
7668 +       {
7669 +           /*
7670 +            * Clear the CProc interrupt and set it running normally again. Nothing should
7671 +            * be running now that could issue commands apart from this trap handler.
7672 +            */
7673 +           PULSE_SCHED_STATUS (dev, RestartCProc);
7674 +       }
7675 +       
7676 +       if (NewPend & INT_ComQueue)
7677 +       {
7678 +           /*
7679 +            * Write any value here to clear out the half full and error bits of the command
7680 +            * overflow queues. This will also remove the overflow interrupt.
7681 +            */
7682 +           write_reg32 (dev, ComQueueStatus, 0);
7683 +       }
7684 +       
7685 +       /*
7686 +        * And let the command processor start again
7687 +        */
7688 +       CLEAR_SCHED_STATUS (dev, CProcStop);
7689 +       
7690 +       /*
7691 +        * Now re-issue all the commands that were issued after the command port trapped.
7692 +        * Should halt the dma processor and force command sto be put onto the run queues
7693 +        * to ensure that a remote re-issued command is handled correctly. NOTE it is
7694 +        * not necessary to wait for the dma processor to stop and this will reduce the
7695 +        * performance impact. As CProcHalt is asserted all commands will be flushed
7696 +        * to the queues.
7697 +        */
7698 +       dev->HaltDmaDequeueCount++; dev->FlushCommandCount++;
7699 +       SetSchedStatusRegister (dev, Pend, Maskp);
7700 +       
7701 +       /*
7702 +        * XXXX: should we do a capture/release if the trap overflow
7703 +        *       area has a "large" number of commands in it,  since
7704 +        *       we will just stuff them all back in, together with 
7705 +        *       all those issued by the other cpus/thread processors.
7706 +        */
7707 +       while (CurrTrap != LastTrapAddr)
7708 +       {
7709 +           /* Read the next saved (but not trapped) command */
7710 +           TrapBuf.Align64 = elan3_sdram_readq (dev, CurrTrap); CurrTrap += sizeof (TrapBuf);
7711 +           
7712 +
7713 +           ctxt = ELAN3_DEV_CTX_TABLE(dev, (TrapBuf.s.ContextType >> 16));
7714 +           
7715 +           if (ctxt == NULL)
7716 +           {
7717 +               PRINTF1 (DBG_DEVICE, DBG_INTR, "HandleCProcTrap: context %x invalid\n", TrapBuf.s.ContextType >> 16);
7718 +               BumpStat (dev, InvalidContext);
7719 +           }
7720 +           else
7721 +           {
7722 +               if (!ELAN3_QUEUE_EMPTY (ctxt->CommandTrapQ) || (ctxt->Status & CTXT_OTHERS_REASONS))
7723 +               {
7724 +                   PRINTF3 (ctxt, DBG_INTR, "HandleCProcTrap: save command %x context %x - %08x\n",
7725 +                            (TrapBuf.s.ContextType>>3) & 0x3ff, TrapBuf.s.ContextType >> 17, TrapBuf.s.Addr);
7726 +                   
7727 +                   if (ELAN3_QUEUE_REALLY_FULL (ctxt->CommandQ))
7728 +                   {
7729 +                       ctxt->Status |= CTXT_COMMAND_OVERFLOW_ERROR;
7730 +                       StartSwapoutContext (ctxt, Pend, Maskp);
7731 +                   }
7732 +                   else
7733 +                   {
7734 +                       *ELAN3_QUEUE_BACK(ctxt->CommandQ, ctxt->Commands) = TrapBuf;
7735 +
7736 +                       ELAN3_QUEUE_ADD (ctxt->CommandQ);
7737 +                   }
7738 +                   continue;
7739 +               }
7740 +               
7741 +               /* Reissue the command to the command port for this context */
7742 +               PRINTF2 (ctxt, DBG_INTR, "HandleCProcTrap: re-issue command %x - %08x\n",
7743 +                        (TrapBuf.s.ContextType>>5) & 0xff, TrapBuf.s.Addr);
7744 +               
7745 +               mb();
7746 +               if (ELAN3_OP_CPROC_REISSUE(ctxt, &TrapBuf) != OP_HANDLED)
7747 +                   ((E3_uint32 *) ctxt->CommandPort)[(TrapBuf.s.ContextType>>5) & 0xff] = TrapBuf.s.Addr;
7748 +               mmiob();
7749 +           }
7750 +       }
7751 +       
7752 +       while ((read_reg32 (dev, ComQueueStatus) & ComQueueNotEmpty) != 0)
7753 +       {
7754 +           PRINTF0 (DBG_DEVICE, DBG_INTR, "HandleCProcTrap: waiting for queues to empty after reissueing commands\n");
7755 +           mb();
7756 +       }
7757 +       
7758 +       dev->HaltDmaDequeueCount--; dev->FlushCommandCount--;
7759 +       SetSchedStatusRegister (dev, Pend, Maskp);
7760 +       
7761 +       spin_unlock_irqrestore (&dev->CProcLock, flags);
7762 +
7763 +       /*
7764 +        * Re-read the interrupt register and see if we've got another command
7765 +        * port interrupt
7766 +        */
7767 +       NewPend = read_reg32 (dev, Exts.InterruptReg);
7768 +    } while ((NewPend & (INT_CProc | INT_ComQueue)) != 0);
7769 +
7770 +
7771 +    /*
7772 +     * Re-enable the command processor interrupt as we've finished 
7773 +     * polling it.
7774 +     */
7775 +    ENABLE_INT_MASK (dev, INT_CProc | INT_ComQueue);
7776 +}
7777 +
7778 +void
7779 +ResolveCProcTrap (ELAN3_CTXT *ctxt)
7780 +{
7781 +    ELAN3_DEV     *dev = ctxt->Device;
7782 +    COMMAND_TRAP *trap;
7783 +    int                  res;
7784 +    unsigned long flags;
7785 +
7786 +    kmutex_lock (&ctxt->CmdLock);
7787 +    spin_lock_irqsave (&dev->IntrLock, flags);
7788 +
7789 +    while (! ELAN3_QUEUE_BACK_EMPTY (ctxt->CommandTrapQ))
7790 +    {
7791 +       trap = ELAN3_QUEUE_MIDDLE(ctxt->CommandTrapQ, ctxt->CommandTraps);
7792 +       spin_unlock_irqrestore (&dev->IntrLock, flags);
7793 +
7794 +       switch (trap->Status.s.TrapType)
7795 +       {
7796 +       case MI_EventIntUpdateBPtr:
7797 +       case MI_ChainedEventError:
7798 +       case MI_EventQueueOverflow:
7799 +       case MI_ThreadQueueOverflow:
7800 +       case MI_DmaQueueOverflow:
7801 +           PRINTF1 (ctxt, DBG_CPROC, "ResolveCProcTrap: %s\n", MiToName (trap->Status.s.TrapType));
7802 +           break;
7803 +           
7804 +       default:
7805 +           /* All other traps are MMU related, we should have a fault address and FSR */
7806 +           if ((res = elan3_pagefault (ctxt, &trap->FaultSave, 1)) != ESUCCESS)
7807 +           {
7808 +               PRINTF1 (ctxt, DBG_CPROC, "ResolveCProcTrap: elan3_pagefault failed for address %08x\n", 
7809 +                        trap->FaultSave.s.FaultAddress);
7810 +               ElanException (ctxt, EXCEPTION_INVALID_ADDR, COMMAND_PROC, trap, &trap->FaultSave, res);
7811 +               
7812 +               /* Set the trap type to 0 so the command does not get re-issued */
7813 +               trap->Status.s.TrapType = 0;
7814 +           }
7815 +           break;
7816 +       }
7817 +       
7818 +       spin_lock_irqsave (&dev->IntrLock, flags);
7819 +
7820 +       ELAN3_QUEUE_CONSUME (ctxt->CommandTrapQ);
7821 +    }
7822 +    spin_unlock_irqrestore (&dev->IntrLock, flags);
7823 +    kmutex_unlock (&ctxt->CmdLock);
7824 +}
7825 +
7826 +int
7827 +RestartCProcTrap (ELAN3_CTXT *ctxt)
7828 +{
7829 +    ELAN3_DEV     *dev      = ctxt->Device;
7830 +    COMMAND_TRAP  trap;
7831 +    void        *item;
7832 +    int                  res;
7833 +    unsigned long flags;
7834 +
7835 +    spin_lock_irqsave (&dev->IntrLock, flags);
7836 +
7837 +    while (! ELAN3_QUEUE_FRONT_EMPTY (ctxt->CommandTrapQ))
7838 +    {
7839 +       trap = (*ELAN3_QUEUE_FRONT (ctxt->CommandTrapQ, ctxt->CommandTraps));
7840 +       ELAN3_QUEUE_REMOVE (ctxt->CommandTrapQ);
7841 +       spin_unlock_irqrestore (&dev->IntrLock, flags);
7842 +       
7843 +       BumpUserStat (ctxt, CProcTraps);
7844 +
7845 +       switch (trap.Status.s.TrapType)
7846 +       {
7847 +       case 0:
7848 +           res = ISSUE_COMMAND_OK;
7849 +           break;
7850 +           
7851 +       case MI_WaitForWaitEventDesc:
7852 +           /*
7853 +            * Fault occured on the read of wait event descriptor for wait event type 0.
7854 +            * Fault already fixed. Just re-issue the wait command. Wait event descriptor addr
7855 +            * is in the Areg save value.
7856 +            */
7857 +           PRINTF1 (ctxt, DBG_CPROC, "RestartCProcTrap: WaitEvent type0 desc read fault %08x\n", 
7858 +                    trap.TrapBuf.r.Areg);
7859 +           
7860 +           res = IssueCommand (ctxt, offsetof (E3_CommandPort, WaitEvent0), trap.TrapBuf.r.Areg, ISSUE_COMMAND_FOR_CPROC);
7861 +           break;
7862 +
7863 +       case MI_WaitForEventReadTy0:
7864 +           /*
7865 +            * Fault occured on the read of event location for wait event type 0.
7866 +            * Fault already fixed. Just re-issue the wait command. Wait event descriptor addr
7867 +            * is in the Areg save value.
7868 +            */
7869 +           PRINTF1 (ctxt, DBG_CPROC, "RestartCProcTrap: WaitEvent type0 event loc fault %08x\n",
7870 +                    trap.TrapBuf.r.Areg);
7871 +           
7872 +           res = IssueCommand (ctxt, offsetof (E3_CommandPort, WaitEvent0), trap.TrapBuf.r.Areg, ISSUE_COMMAND_FOR_CPROC);
7873 +           break;
7874 +           
7875 +       case MI_WaitForEventReadTy1:
7876 +           /*
7877 +            * Fault occured on the read of the event location for wait event type 1.
7878 +            * Areg has the original ptr and count.
7879 +            * Fault already fixed. Just re-issue the wait command using Areg and context.
7880 +            */
7881 +           PRINTF1 (ctxt, DBG_CPROC, "RestartCProcTrap: WaitEvent type1 event location read fault %08x\n",
7882 +                    trap.TrapBuf.r.Areg);
7883 +           res = IssueCommand (ctxt, offsetof (E3_CommandPort, WaitEvent1), trap.TrapBuf.r.Areg, ISSUE_COMMAND_FOR_CPROC);
7884 +           break;
7885 +           
7886 +       case MI_WaitForCntxDmaDescRead:
7887 +       case MI_WaitForNonCntxDmaDescRead:
7888 +           /*
7889 +            * Fault occured on the read of the dma descriptor. Run dma using the
7890 +            * Fault Address in FaultSave.
7891 +            */
7892 +           PRINTF1 (ctxt, DBG_CPROC, "RestartCProcTrap: MI_WaitForCntxDmaDescRead: re-issue dma at %08x\n", 
7893 +                    trap.FaultSave.s.FaultAddress);
7894 +           
7895 +           res = IssueDmaCommand (ctxt, trap.FaultSave.s.FaultAddress, NULL, ISSUE_COMMAND_FOR_CPROC);
7896 +           break;
7897 +           
7898 +       default:
7899 +           /*
7900 +            * Assume the fault will be fixed by FixupEventTrap.
7901 +            */
7902 +           FixupEventTrap (ctxt, COMMAND_PROC, &trap, trap.Status.s.TrapType, &trap.FaultSave, ISSUE_COMMAND_FOR_CPROC);
7903 +
7904 +           res = ISSUE_COMMAND_OK;
7905 +           break;
7906 +       }
7907 +
7908 +       switch (res)
7909 +       {
7910 +       case ISSUE_COMMAND_OK:                                  /* command re-issued ok*/
7911 +           break;
7912 +
7913 +       case ISSUE_COMMAND_TRAPPED:                             /* command trapped,  it will have been copied */
7914 +           return (EAGAIN);                                    /* to the back of the trap queue */
7915 +
7916 +       case ISSUE_COMMAND_RETRY:                               /* didn't issue command, so place back at front for */
7917 +           spin_lock_irqsave (&dev->IntrLock, flags);          /* later (after resolving other traps */
7918 +
7919 +           if (ELAN3_QUEUE_REALLY_FULL (ctxt->CommandTrapQ))
7920 +               ctxt->Status |= CTXT_COMMAND_OVERFLOW_ERROR;
7921 +           else
7922 +           {
7923 +               ELAN3_QUEUE_ADD_FRONT(ctxt->CommandTrapQ);
7924 +               (*ELAN3_QUEUE_FRONT (ctxt->CommandTrapQ, ctxt->CommandTraps)) = trap;
7925 +           }
7926 +           spin_unlock_irqrestore (&dev->IntrLock, flags);
7927 +           return (EAGAIN);
7928 +
7929 +       default:
7930 +           return (EINVAL);
7931 +       }
7932 +       spin_lock_irqsave (&dev->IntrLock, flags);
7933 +    }  
7934 +
7935 +    /*
7936 +     * GNAT 5409 - if CommandPortItem was not NULL, but other reasons were set,
7937 +     *             then we'd not free the CommandPortItem even though we'd re-
7938 +     *             issued all trapped and overflowed commands.  Hence only return
7939 +     *             without clearing CommandPortItem if we will be called again as
7940 +     *             either CommandTrapQ or CommandQ is not empty.
7941 +     */
7942 +
7943 +    /* Now run the overflowed commands for this context */
7944 +    if (! ELAN3_QUEUE_EMPTY (ctxt->CommandQ))
7945 +    {
7946 +       if (! ELAN3_QUEUE_EMPTY (ctxt->CommandTrapQ) || (ctxt->Status & CTXT_OTHERS_REASONS))
7947 +       {
7948 +           PRINTF0 (ctxt, DBG_CPROC, "RestartCProcTrap: cannot issue overflowed commands\n");
7949 +           spin_unlock_irqrestore (&dev->IntrLock, flags);
7950 +           return (EAGAIN);
7951 +       }
7952 +
7953 +       /*
7954 +        * Just re-issue the commands,  if one traps then the remainder will 
7955 +        * just get placed in the overflow queue again and the interrupt handler
7956 +        * will copy them back in here.
7957 +        *
7958 +        * Stop the dma processor from taking commands,  since one of the commands
7959 +        * could be a re-issued remote dma, which must be processed by the command
7960 +        * processor.
7961 +        */
7962 +       
7963 +       if (dev->HaltDmaDequeueCount++ == 0)
7964 +           SetSchedStatusRegister (dev, 0, NULL);
7965 +       
7966 +       while (! ELAN3_QUEUE_EMPTY (ctxt->CommandQ))
7967 +       {
7968 +           CProcTrapBuf_BE *TrapBuf = ELAN3_QUEUE_FRONT (ctxt->CommandQ, ctxt->Commands);
7969 +           
7970 +           PRINTF2 (ctxt, DBG_CPROC, "RestartCProcTrap: re-issue command %x - %08x\n",
7971 +                    (TrapBuf->s.ContextType>>5) & 0xff, TrapBuf->s.Addr);
7972 +           mb();                                                       /* ensure writes to main memory completed */
7973 +           ((E3_uint32 *) ctxt->CommandPort)[(TrapBuf->s.ContextType>>5) & 0xff] = TrapBuf->s.Addr;
7974 +           mmiob();                                            /* and flush through IO writes */
7975 +           
7976 +           ELAN3_QUEUE_REMOVE (ctxt->CommandQ);
7977 +       }
7978 +       
7979 +       /* observe the command processor having halted */
7980 +       res = CheckCommandQueueFlushed (ctxt, DmaComQueueNotEmpty, 0, &flags);
7981 +       
7982 +       if (res != ISSUE_COMMAND_OK)
7983 +       {
7984 +           PRINTF0 (ctxt, DBG_CPROC, "RestartCProcTrap: trapped after issueing overflowed commands\n");
7985 +           spin_unlock_irqrestore (&dev->IntrLock, flags);
7986 +           return (EAGAIN);
7987 +       }
7988 +    }
7989 +
7990 +    /* remove the command port item, while holding the lock */
7991 +    item = ctxt->CommandPortItem;
7992 +    ctxt->CommandPortItem = NULL;
7993 +
7994 +    spin_unlock_irqrestore (&dev->IntrLock, flags);
7995 +       
7996 +    if (item != NULL)                                          /* Free of any item that may have been stored */
7997 +    {                                                          /* because of the commandport trap */
7998 +       PRINTF1 (ctxt, DBG_CPROC, "RestartCProcTrap: commandPortItem %p\n", item);
7999 +
8000 +       kmutex_lock (&ctxt->SwapListsLock);
8001 +       ELAN3_OP_FREE_BLOCK_ITEM (ctxt, item);
8002 +       kmutex_unlock (&ctxt->SwapListsLock);
8003 +    }
8004 +
8005 +    return (ESUCCESS);
8006 +}
8007 +
8008 +
8009 +/*
8010 + * Local variables:
8011 + * c-file-style: "stroustrup"
8012 + * End:
8013 + */
8014 Index: linux-2.4.21/drivers/net/qsnet/elan3/dproc.c
8015 ===================================================================
8016 --- linux-2.4.21.orig/drivers/net/qsnet/elan3/dproc.c   2004-02-23 16:02:56.000000000 -0500
8017 +++ linux-2.4.21/drivers/net/qsnet/elan3/dproc.c        2005-06-01 23:12:54.568443816 -0400
8018 @@ -0,0 +1,553 @@
8019 +/*
8020 + *    Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd.
8021 + * 
8022 + *    For licensing information please see the supplied COPYING file
8023 + *
8024 + */
8025 +
8026 +#ident "@(#)$Id: dproc.c,v 1.52 2003/09/24 13:57:25 david Exp $"
8027 +/*      $Source: /cvs/master/quadrics/elan3mod/elan3/os/dproc.c,v $ */
8028 +
8029 +#include <qsnet/kernel.h>
8030 +
8031 +#include <elan3/elanregs.h>
8032 +#include <elan3/elandev.h>
8033 +#include <elan3/elanvp.h>
8034 +#include <elan3/elan3mmu.h>
8035 +#include <elan3/elanctxt.h>
8036 +#include <elan3/elandebug.h>
8037 +#include <elan3/urom_addrs.h>
8038 +#include <elan3/intrinsics.h>
8039 +#include <elan3/dma.h>
8040 +#include <elan3/vmseg.h>
8041 +
8042 +#define DMA_RETRY_FAIL_COUNT   8
8043 +
8044 +static void PrintUserDma (ELAN3_CTXT *ctxt, E3_Addr addr);
8045 +
8046 +int
8047 +HandleDProcTrap (ELAN3_DEV *dev, E3_uint32 *RestartBits)
8048 +{
8049 +    DMA_TRAP    *trap   = dev->DmaTrap;
8050 +
8051 +    ASSERT(SPINLOCK_HELD (&dev->IntrLock));
8052 +
8053 +    /* Scoop out the trap information, before restarting the Elan */
8054 +    trap->Status.Status = read_reg32 (dev, Exts.DProcStatus.Status);
8055 +    
8056 +    ASSERT(trap->Status.s.WakeupFunction == WakeupNever);
8057 +
8058 +    /* copy the normal dma access fault type */
8059 +    elan3_sdram_copyq_from_sdram (dev, dev->TAndQBase + offsetof (E3_TrapAndQueue, DProc), &trap->FaultSave, sizeof (E3_FaultSave_BE));
8060 +    
8061 +    /* copy all 4 of the dma data fault type */
8062 +    elan3_sdram_copyq_from_sdram (dev, dev->TAndQBase + offsetof (E3_TrapAndQueue, DProcData0), &trap->Data0, 4*sizeof (E3_FaultSave_BE));
8063 +    
8064 +    /* Copy the DMA descriptor */
8065 +    copy_dma_regs (dev, &trap->Desc);
8066 +    
8067 +    /* Copy the packet info */
8068 +    trap->PacketInfo.Value = read_reg32 (dev, Exts.Dmas.DmaRds.DMA_PacketInfo.Value);
8069 +
8070 +    /* update device statistics */
8071 +    BumpStat (dev, DProcTraps);
8072 +    switch (trap->Status.s.TrapType)
8073 +    {
8074 +    case MI_DmaPacketTimedOutOrPacketError:
8075 +       if (trap->PacketInfo.s.PacketTimeout)
8076 +           BumpStat (dev, DmaOutputTimeouts);
8077 +       else if (trap->PacketInfo.s.PacketAckValue == C_ACK_ERROR)
8078 +           BumpStat (dev, DmaPacketAckErrors);
8079 +       break;
8080 +       
8081 +    case MI_DmaFailCountError:
8082 +       BumpStat (dev, DmaRetries);
8083 +       break;
8084 +    }
8085 +
8086 +    /* Must now zero all the FSRs so that a subsequent fault can be seen */
8087 +    elan3_sdram_zeroq_sdram (dev, dev->TAndQBase + offsetof (E3_TrapAndQueue, DProc), sizeof (E3_FaultSave));
8088 +    elan3_sdram_zeroq_sdram (dev, dev->TAndQBase + offsetof (E3_TrapAndQueue, DProcData0), 4*sizeof (E3_FaultSave));
8089 +           
8090 +    *RestartBits |= RestartDProc;
8091 +    return (TRUE);
8092 +}
8093 +
8094 +void
8095 +DeliverDProcTrap (ELAN3_DEV *dev, DMA_TRAP *dmaTrap, E3_uint32 Pend)
8096 +{
8097 +    ELAN3_CTXT     *ctxt;
8098 +    E3_FaultSave_BE *FaultArea;
8099 +    DMA_TRAP       *trap;
8100 +    register int     i;
8101 +
8102 +    ASSERT(SPINLOCK_HELD (&dev->IntrLock));
8103 +
8104 +    ctxt = ELAN3_DEV_CTX_TABLE(dev, dmaTrap->Status.s.Context);
8105 +
8106 +    if (ctxt == NULL)
8107 +    {
8108 +       PRINTF1 (DBG_DEVICE, DBG_INTR, "DeliverDProcTrap: context %x invalid\n", dmaTrap->Status.s.Context);
8109 +       BumpStat (dev, InvalidContext);
8110 +    }
8111 +    else
8112 +    {
8113 +       if (ELAN3_OP_DPROC_TRAP (ctxt, dmaTrap) == OP_DEFER)
8114 +       {
8115 +           if (ELAN3_QUEUE_REALLY_FULL (ctxt->DmaTrapQ))
8116 +           {
8117 +               ctxt->Status |= CTXT_COMMAND_OVERFLOW_ERROR;
8118 +               StartSwapoutContext (ctxt, Pend, NULL);
8119 +           }
8120 +           else
8121 +           {
8122 +               trap = ELAN3_QUEUE_BACK (ctxt->DmaTrapQ, ctxt->DmaTraps);
8123 +               
8124 +               bcopy (dmaTrap, trap, sizeof (DMA_TRAP));
8125 +               
8126 +               PRINTF5 (ctxt, DBG_INTR, "DeliverDProcTrap: WakeupFnt=%x Cntx=%x SuspAddr=%x PacketInfo=%x TrapType=%s\n",
8127 +                        trap->Status.s.WakeupFunction, trap->Status.s.Context, 
8128 +                        trap->Status.s.SuspendAddr, trap->PacketInfo.Value, MiToName (trap->Status.s.TrapType));
8129 +               PRINTF3 (ctxt, DBG_INTR, "                    FaultAddr=%x EventAddr=%x FSR=%x\n",
8130 +                        trap->FaultSave.s.FaultAddress, trap->FaultSave.s.EventAddress,
8131 +                        trap->FaultSave.s.FSR.Status);
8132 +               for (i = 0, FaultArea = &trap->Data0; i < 4; i++, FaultArea++)
8133 +                   PRINTF4 (ctxt, DBG_INTR, "                  %d FaultAddr=%x EventAddr=%x FSR=%x\n", i,
8134 +                            FaultArea->s.FaultAddress, FaultArea->s.EventAddress, FaultArea->s.FSR.Status);
8135 +               
8136 +               PRINTF4 (ctxt, DBG_INTR, "                 type %08x size %08x source %08x dest %08x\n",
8137 +                        trap->Desc.s.dma_type, trap->Desc.s.dma_size, trap->Desc.s.dma_source, trap->Desc.s.dma_dest);
8138 +               PRINTF2 (ctxt, DBG_INTR, "                 Dest event %08x cookie/proc %08x\n",
8139 +                        trap->Desc.s.dma_destEvent, trap->Desc.s.dma_destCookieVProc);
8140 +               PRINTF2 (ctxt, DBG_INTR, "                 Source event %08x cookie/proc %08x\n",
8141 +                        trap->Desc.s.dma_srcEvent, trap->Desc.s.dma_srcCookieVProc);
8142 +               ELAN3_QUEUE_ADD (ctxt->DmaTrapQ);
8143 +               kcondvar_wakeupone (&ctxt->Wait, &dev->IntrLock);
8144 +               
8145 +               if (ELAN3_QUEUE_FULL (ctxt->DmaTrapQ))
8146 +               {
8147 +                   PRINTF0 (ctxt, DBG_INTR, "DeliverDProcTrap: dma queue full, must swap out\n");
8148 +                   ctxt->Status |= CTXT_DMA_QUEUE_FULL;
8149 +                   
8150 +                   StartSwapoutContext (ctxt, Pend, NULL);
8151 +               }
8152 +           }
8153 +       }
8154 +    }
8155 +}
8156 +
8157 +int
8158 +NextDProcTrap (ELAN3_CTXT *ctxt, DMA_TRAP *trap)
8159 +{
8160 +    ELAN3_DEV *dev = ctxt->Device;
8161 +
8162 +    ASSERT (SPINLOCK_HELD (&dev->IntrLock));
8163 +    
8164 +    if (ELAN3_QUEUE_EMPTY (ctxt->DmaTrapQ))
8165 +       return (0);
8166 +
8167 +    *trap = *ELAN3_QUEUE_FRONT (ctxt->DmaTrapQ, ctxt->DmaTraps);
8168 +    ELAN3_QUEUE_REMOVE (ctxt->DmaTrapQ);
8169 +    
8170 +    return (1);
8171 +}
8172 +
8173 +void
8174 +ResolveDProcTrap (ELAN3_CTXT *ctxt, DMA_TRAP *trap)
8175 +{
8176 +    E3_FaultSave_BE *FaultArea;
8177 +    int                     FaultHandled = 0;
8178 +    int                     res;
8179 +    register int     i;
8180 +    
8181 +    PRINTF4 (ctxt, DBG_DPROC, "ResolveDProcTrap: WakeupFnt=%x Cntx=%x SuspAddr=%x TrapType=%s\n",
8182 +            trap->Status.s.WakeupFunction, trap->Status.s.Context, 
8183 +            trap->Status.s.SuspendAddr, MiToName (trap->Status.s.TrapType));
8184 +    PRINTF3 (ctxt, DBG_DPROC, "                    FaultAddr=%x EventAddr=%x FSR=%x\n",
8185 +            trap->FaultSave.s.FaultAddress, trap->FaultSave.s.EventAddress,
8186 +            trap->FaultSave.s.FSR.Status);
8187 +    for (i = 0, FaultArea = &trap->Data0; i < 4; i++, FaultArea++)
8188 +       PRINTF4 (ctxt, DBG_DPROC, "                  %d FaultAddr=%x EventAddr=%x FSR=%x\n", i,
8189 +                FaultArea->s.FaultAddress, FaultArea->s.EventAddress, FaultArea->s.FSR.Status);
8190 +
8191 +    PRINTF4 (ctxt, DBG_DPROC, "                  type %08x size %08x source %08x dest %08x\n",
8192 +            trap->Desc.s.dma_type, trap->Desc.s.dma_size, trap->Desc.s.dma_source, trap->Desc.s.dma_dest);
8193 +    PRINTF2 (ctxt, DBG_DPROC, "                  Dest event %08x cookie/proc %08x\n",
8194 +            trap->Desc.s.dma_destEvent, trap->Desc.s.dma_destCookieVProc);
8195 +    PRINTF2 (ctxt, DBG_DPROC, "                  Source event %08x cookie/proc %08x\n",
8196 +            trap->Desc.s.dma_srcEvent, trap->Desc.s.dma_srcCookieVProc);
8197 +    
8198 +    BumpUserStat (ctxt, DProcTraps);
8199 +
8200 +    switch (trap->Status.s.TrapType)
8201 +    {
8202 +    case MI_DmaPacketTimedOutOrPacketError:
8203 +       /*
8204 +        * Faulted due to packet timeout or a PAckError.
8205 +        * Reset fail count and reissue the same desc.
8206 +        */
8207 +       PRINTF0 (ctxt, DBG_DPROC, "ResolveDProcTrap: got a PAckError or the output timed out. Rescheduling dma.\n");
8208 +       if (ElanException (ctxt, EXCEPTION_PACKET_TIMEOUT, DMA_PROC, trap) == OP_IGNORE)
8209 +       {
8210 +           BumpUserStat (ctxt, DmaRetries);
8211 +
8212 +           trap->Desc.s.dma_failCount = DMA_RETRY_FAIL_COUNT;
8213 +
8214 +           RestartDmaTrap (ctxt, trap);
8215 +       }
8216 +       return;
8217 +
8218 +    case MI_DmaFailCountError:
8219 +       /*
8220 +        * Faulted due to dma fail count.
8221 +        * Reset fail count and reissue the same desc.
8222 +        */
8223 +       PRINTF1 (ctxt, DBG_DPROC, "ResolveDProcTrap: Reset dma fail count to %d\n", DMA_RETRY_FAIL_COUNT);
8224 +       
8225 +       if (ElanException (ctxt, EXCEPTION_DMA_RETRY_FAIL, DMA_PROC, trap) == OP_IGNORE)
8226 +       {
8227 +           BumpUserStat (ctxt, DmaRetries);
8228 +
8229 +           trap->Desc.s.dma_failCount = DMA_RETRY_FAIL_COUNT;
8230 +
8231 +           RestartDmaTrap (ctxt, trap);
8232 +       }
8233 +       return;
8234 +
8235 +    case MI_TimesliceDmaQueueOverflow:
8236 +       PRINTF0 (ctxt, DBG_DPROC, "ResolveDProcTrap: dma timeslice queue overflow\n");
8237 +       RestartDmaTrap (ctxt, trap);
8238 +       return;
8239 +       
8240 +    case MI_UnimplementedError:
8241 +       PRINTF0 (ctxt, DBG_DPROC, "ResolveDProcTrap: unimplemented dma trap\n");
8242 +       if (ElanException (ctxt, EXCEPTION_UNIMPLEMENTED, DMA_PROC, trap) == OP_IGNORE)
8243 +           RestartDmaTrap (ctxt, trap);
8244 +       return;
8245 +
8246 +    case MI_EventQueueOverflow:
8247 +    case MI_ThreadQueueOverflow:
8248 +    case MI_DmaQueueOverflow:
8249 +       PRINTF0 (ctxt, DBG_DPROC, "ResolveDProcTrap: trapped on a write set event.\n");
8250 +       FixupEventTrap (ctxt, DMA_PROC, trap, trap->Status.s.TrapType, &trap->FaultSave, 0);
8251 +       return;
8252 +
8253 +    case MI_RemoteDmaCommand:
8254 +    case MI_RunDmaCommand:
8255 +    case MI_DequeueNonSysCntxDma:
8256 +    case MI_DequeueSysCntxDma:
8257 +       /*
8258 +        * The DMA processor has trapped due to outstanding prefetches from the previous 
8259 +        * dma.  The "current" dma has not been consumed, so we just ignore the trap
8260 +        */
8261 +       return;
8262 +
8263 +    case MI_WaitForRemoteDescRead2:
8264 +    case MI_ExecuteDmaDescriptorForRun:
8265 +       /*
8266 +        * The DMA processor has trapped while fetching the dma descriptor, so
8267 +        * zero it out to not confuse the user on an error
8268 +        */
8269 +       bzero (&trap->Desc, sizeof (trap->Desc));
8270 +       break;
8271 +    }
8272 +
8273 +    /*
8274 +     * All other uWords will have updated one of the fault areas,  so fix
8275 +     * any faults found in them.  If there were no faults found then it 
8276 +     * must have been a bus error
8277 +     */
8278 +    for (i = 0, FaultArea = &trap->Data0; i < 4; i++, FaultArea++)
8279 +    {
8280 +       if (FaultArea->s.FSR.Status != 0)
8281 +       {
8282 +           FaultHandled++;
8283 +
8284 +           ASSERT ((FaultArea->s.FSR.Status & FSR_SizeMask) == FSR_Block64 ||
8285 +                   (FaultArea->s.FSR.Status & FSR_SizeMask) == FSR_Block32);
8286 +           
8287 +           ASSERT (FaultArea->s.FaultContext == trap->Status.s.Context);
8288 +           
8289 +           if (((trap->Desc.s.dma_source & PAGEOFFSET) >= (PAGESIZE-E3_BLK_SIZE)) &&
8290 +               ((trap->Desc.s.dma_source & PAGEMASK) != ((trap->Desc.s.dma_source + trap->Desc.s.dma_size-1) & PAGEMASK)))
8291 +           {
8292 +               /* XXXX: dma started within last 64 bytes of the page
8293 +                *       terminate the process if it has pagefaulted */
8294 +               if (FaultArea->s.FaultAddress == (trap->Desc.s.dma_source & ~(E3_BLK_SIZE-1)))
8295 +               {
8296 +                   printk ("elan%d: invalid dma - context=%x source=%x\n", ctxt->Device->Instance, 
8297 +                           ctxt->Capability.cap_mycontext, trap->Desc.s.dma_source);
8298 +                   
8299 +                   if (ElanException (ctxt, EXCEPTION_BAD_DMA, DMA_PROC, trap, NULL, 0) != OP_IGNORE)
8300 +                       return;
8301 +               }
8302 +           }
8303 +
8304 +           if (trap->Desc.s.dma_size != 0 && (res = elan3_pagefault (ctxt, FaultArea, 1)) != ESUCCESS)
8305 +           {
8306 +               /* XXXX: Rev B Elans can prefetch data passed the end of the dma descriptor */
8307 +               /*       if the fault relates to this, then just ignore it */
8308 +               if (FaultArea->s.FaultAddress < (trap->Desc.s.dma_source+trap->Desc.s.dma_size) ||
8309 +                   FaultArea->s.FaultAddress > (trap->Desc.s.dma_source+trap->Desc.s.dma_size+E3_BLK_SIZE*2))
8310 +               {
8311 +                   PRINTF1 (ctxt, DBG_DPROC, "ResolveDProcTrap: elan3_pagefault failed for address %x\n",
8312 +                            FaultArea->s.FaultAddress);
8313 +                   
8314 +                   if (ElanException (ctxt, EXCEPTION_INVALID_ADDR, DMA_PROC, trap, FaultArea, res) != OP_IGNORE)
8315 +                       return;
8316 +               }
8317 +           }
8318 +       }
8319 +    }
8320 +    
8321 +    if (trap->FaultSave.s.FSR.Status != 0)
8322 +    {
8323 +       FaultHandled++;
8324 +
8325 +       ASSERT (trap->FaultSave.s.FaultContext == trap->Status.s.Context);
8326 +
8327 +       if ((trap->FaultSave.s.FSR.Status & FSR_SizeMask) == FSR_RouteFetch)
8328 +       {
8329 +           res = ResolveVirtualProcess (ctxt, trap->FaultSave.s.FaultAddress & 0xffff); /* mask out cookie */
8330 +
8331 +           switch (res)
8332 +           {
8333 +           default:
8334 +               if (ElanException (ctxt, EXCEPTION_INVALID_PROCESS, DMA_PROC, trap, trap->FaultSave.s.FaultAddress, res) != OP_IGNORE)
8335 +                   return;
8336 +               
8337 +           case EAGAIN:
8338 +               /* XXXX; wait on trail blazing code */
8339 +
8340 +           case 0:
8341 +               break;
8342 +           }
8343 +       }
8344 +       else
8345 +       {
8346 +           if ((res = elan3_pagefault (ctxt, &trap->FaultSave, 1)) != ESUCCESS)
8347 +           {
8348 +               PRINTF1 (ctxt, DBG_DPROC, "ResolveDProcTrap: elan3_pagefault failed for address %x\n",
8349 +                        trap->FaultSave.s.FaultAddress);
8350 +
8351 +               if (ElanException (ctxt, EXCEPTION_INVALID_ADDR, DMA_PROC, trap, &trap->FaultSave, res) != OP_IGNORE)
8352 +                   return;
8353 +           }
8354 +       }
8355 +    }
8356 +
8357 +    if (! FaultHandled)
8358 +    {
8359 +       ElanBusError (ctxt->Device);
8360 +
8361 +       if (ElanException (ctxt, EXCEPTION_INVALID_ADDR, DMA_PROC, trap, &trap->FaultSave, EFAULT) != OP_IGNORE)
8362 +           return;
8363 +    }
8364 +
8365 +    switch (trap->Status.s.TrapType)
8366 +    {
8367 +    case MI_WaitForRemoteDescRead2:
8368 +       /*
8369 +        * Faulted while trying to read the dma descriptor for a read dma.
8370 +        * Fix fault and re-issue using FaultAddress.
8371 +        */
8372 +       PRINTF1 (ctxt, DBG_DPROC, "ResolveDProcTrap: trapped reading a remote dma descriptor at %x.\n",
8373 +                trap->FaultSave.s.FaultAddress);
8374 +       
8375 +       RestartDmaPtr (ctxt, trap->FaultSave.s.FaultAddress);
8376 +       break;
8377 +       
8378 +    case MI_ExecuteDmaDescriptorForRun:
8379 +       /*
8380 +        * Faulted while trying to read the dma descriptor for a write dma.
8381 +        * Fix fault and re-issue using FaultAddress.
8382 +        */
8383 +       PRINTF1 (ctxt, DBG_DPROC, "ResolveDProcTrap: trapped reading a write dma descriptor at %x.\n", 
8384 +                trap->FaultSave.s.FaultAddress);
8385 +       
8386 +       RestartDmaPtr (ctxt, trap->FaultSave.s.FaultAddress);
8387 +       break;
8388 +       
8389 +    case MI_WaitForRemoteRoutes1:
8390 +    case MI_WaitForRemoteRoutes2:
8391 +    case MI_SendRemoteDmaDesc:
8392 +    case MI_SendDmaIdentify:
8393 +    case MI_SendRemoteDmaRoutes2:
8394 +    case MI_WaitForDmaRoutes1:
8395 +    case MI_DmaLoop:
8396 +    case MI_ExitDmaLoop:
8397 +    case MI_GetDestEventValue:
8398 +    case MI_SendFinalUnlockTrans:
8399 +    case MI_SendNullSetEvent:
8400 +    case MI_SendFinalSetEvent:
8401 +    case MI_SendDmaEOP:
8402 +       /*
8403 +        * Faulted either fetching routes or fetching dma data.
8404 +        * Fix fault and re-issue using FaultAddress.
8405 +        */
8406 +
8407 +    case MI_SendEOPforRemoteDma:
8408 +    case MI_LookAtRemoteAck:
8409 +    case MI_FailedAckIfCCis0:
8410 +       /*
8411 +        * Possible fault when reading the remote desc into the dma data buffers
8412 +        */
8413 +       PRINTF0 (ctxt, DBG_DPROC, "ResolveDProcTrap:  trapped reading a dma data or fetching a route\n");
8414 +       RestartDmaTrap (ctxt, trap);
8415 +       break;
8416 +       
8417 +    case MI_DequeueSysCntxDma:
8418 +    case MI_DequeueNonSysCntxDma:
8419 +    case MI_RemoteDmaCommand:
8420 +    case MI_RunDmaCommand:
8421 +       /*
8422 +        * It is possible that a dma can get back onto the queue while outstanding dma
8423 +        * have not finished trapping. In this case the trap can be ignored as the dma
8424 +        * state has been saved. It might trap again the next time it comes to the front
8425 +        * of the queue and be fixed then.
8426 +        */
8427 +       PRINTF0 (ctxt, DBG_DPROC, "ResolveDProcTrap: trap after dma has finished. ignored\n");
8428 +       break;
8429 +       
8430 +    default:
8431 +       PRINTF0 (ctxt, DBG_DPROC, "ResolveDProcTrap: trapped on a write set event.\n");
8432 +       FixupEventTrap (ctxt, DMA_PROC, trap, trap->Status.s.TrapType, &trap->FaultSave, 0);
8433 +       break;
8434 +    }
8435 +}
8436 +
8437 +int
8438 +DProcNeedsRestart (ELAN3_CTXT *ctxt)
8439 +{
8440 +    return (ctxt->ItemCount[LIST_DMA_PTR] != 0 ||
8441 +           ctxt->ItemCount[LIST_DMA_DESC] != 0);
8442 +}
8443 +
8444 +void
8445 +RestartDProcItems (ELAN3_CTXT *ctxt)
8446 +{
8447 +    void      *item;
8448 +    E3_Addr    value;
8449 +    int               res;
8450 +    
8451 +    kmutex_lock (&ctxt->SwapListsLock);
8452 +    while (ctxt->ItemCount[LIST_DMA_PTR])
8453 +    {
8454 +       if (! ELAN3_OP_GET_WORD_ITEM (ctxt, LIST_DMA_PTR, &item, &value))
8455 +           ctxt->ItemCount[LIST_DMA_PTR] = 0;
8456 +       else
8457 +       {
8458 +           PRINTF1 (ctxt, DBG_DPROC, "RestartDProc: issue write dma at %x\n", value);
8459 +           PrintUserDma (ctxt, value);
8460 +
8461 +           res = IssueDmaCommand (ctxt, value, NULL, 0);
8462 +           
8463 +           if (res == ISSUE_COMMAND_RETRY)
8464 +           {
8465 +               ELAN3_OP_PUTBACK_ITEM (ctxt, LIST_DMA_PTR, item);
8466 +               kmutex_unlock (&ctxt->SwapListsLock);
8467 +               return;
8468 +           }
8469 +           
8470 +           ctxt->ItemCount[LIST_DMA_PTR]--;
8471 +           ELAN3_OP_FREE_WORD_ITEM (ctxt, item);
8472 +       }
8473 +    }
8474 +    
8475 +    while (ctxt->ItemCount[LIST_DMA_DESC])
8476 +    {
8477 +       if (! ELAN3_OP_GET_BLOCK_ITEM (ctxt, LIST_DMA_DESC, &item, &value))
8478 +           ctxt->ItemCount[LIST_DMA_DESC] = 0;
8479 +       else
8480 +       {
8481 +           PRINTF1 (ctxt, DBG_DPROC, "RestartDProc: issue dma desc at %x\n", value);
8482 +           PrintUserDma (ctxt, value);
8483 +
8484 +           res = IssueDmaCommand (ctxt, value, item, 0);
8485 +
8486 +           switch (res)
8487 +           {
8488 +           case ISSUE_COMMAND_OK:
8489 +               ctxt->ItemCount[LIST_DMA_DESC]--;
8490 +               ELAN3_OP_FREE_BLOCK_ITEM (ctxt, item);
8491 +               break;
8492 +               
8493 +           case ISSUE_COMMAND_RETRY:
8494 +               ELAN3_OP_PUTBACK_ITEM (ctxt, LIST_DMA_DESC, item);
8495 +               kmutex_unlock (&ctxt->SwapListsLock);
8496 +               return;
8497 +               
8498 +           case ISSUE_COMMAND_TRAPPED:
8499 +               ctxt->ItemCount[LIST_DMA_DESC]--;
8500 +               /* The item will be freed off when the command port trap */
8501 +               /* fixed up and the command successfully re-issued */
8502 +               break;
8503 +           }
8504 +       }
8505 +    }
8506 +
8507 +    kmutex_unlock (&ctxt->SwapListsLock);
8508 +}
8509 +
8510 +void
8511 +RestartDmaDesc(ELAN3_CTXT *ctxt, E3_DMA_BE *desc)
8512 +{
8513 +    kmutex_lock (&ctxt->SwapListsLock);
8514 +    if (desc->s.dma_direction != DMA_WRITE)
8515 +       desc->s.dma_direction = (desc->s.dma_direction & ~DMA_READ) | DMA_READ_REQUEUE;
8516 +
8517 +    ELAN3_OP_PUT_BLOCK_ITEM (ctxt, LIST_DMA_DESC, (E3_uint32 *) desc);
8518 +    ctxt->ItemCount[LIST_DMA_DESC]++;
8519 +
8520 +    kmutex_unlock (&ctxt->SwapListsLock);
8521 +}
8522 +
8523 +void
8524 +RestartDmaTrap(ELAN3_CTXT *ctxt, DMA_TRAP *trap)
8525 +{
8526 +    /* Negative length DMAs are illegal, since they hangup the dma processor,
8527 +     * if they got generated then they will have been spotted by PollForDmahungup,
8528 +     * and delivered to us with a Dequeue  suspend address,
8529 +     *
8530 +     * GNAT sw-elan3/3908: Moved this check into this new function to avoid
8531 +     * it sampling old or invalid register state
8532 +     */
8533 +    if (trap->Desc.s.dma_size > E3_MAX_DMA_SIZE)
8534 +       ElanException (ctxt, EXCEPTION_BAD_DMA, DMA_PROC, trap, NULL, 0);
8535 +    else
8536 +       RestartDmaDesc (ctxt, &trap->Desc);
8537 +}
8538 +
8539 +void
8540 +RestartDmaPtr (ELAN3_CTXT *ctxt, E3_Addr ptr)
8541 +{
8542 +    kmutex_lock (&ctxt->SwapListsLock);
8543 +    ELAN3_OP_PUT_WORD_ITEM (ctxt, LIST_DMA_PTR, ptr);
8544 +    ctxt->ItemCount[LIST_DMA_PTR]++;
8545 +    kmutex_unlock (&ctxt->SwapListsLock);
8546 +}
8547 +
8548 +static void
8549 +PrintUserDma (ELAN3_CTXT *ctxt, E3_Addr addr)
8550 +{
8551 +    E3_DMA *dma;
8552 +
8553 +    /* Dont call a function which takes locks unless we need to */
8554 +    if (!(elan3_debug & DBG_DPROC))
8555 +        return;
8556 +
8557 +    dma = (E3_DMA *) elan3mmu_mainaddr (ctxt->Elan3mmu, addr);
8558 +
8559 +    PRINTF4 (ctxt, DBG_DPROC, "DMA: type %08x size %08x source %08x dest %08x\n",
8560 +            fuword ((int *) &dma->dma_type), fuword ((int *) &dma->dma_size), 
8561 +            fuword ((int *) &dma->dma_source), fuword ((int *) &dma->dma_dest));
8562 +    PRINTF4 (ctxt, DBG_DPROC, "DMA: Dest %08x %08x  Local %08x %08x\n",
8563 +            fuword ((int *) &dma->dma_destEvent), fuword ((int *) &dma->dma_destCookieProc), 
8564 +            fuword ((int *) &dma->dma_srcEvent), fuword ((int *) &dma->dma_srcCookieProc));
8565 +}
8566 +
8567 +/*
8568 + * Local variables:
8569 + * c-file-style: "stroustrup"
8570 + * End:
8571 + */
8572 Index: linux-2.4.21/drivers/net/qsnet/elan3/elan3mmu_generic.c
8573 ===================================================================
8574 --- linux-2.4.21.orig/drivers/net/qsnet/elan3/elan3mmu_generic.c        2004-02-23 16:02:56.000000000 -0500
8575 +++ linux-2.4.21/drivers/net/qsnet/elan3/elan3mmu_generic.c     2005-06-01 23:12:54.573443056 -0400
8576 @@ -0,0 +1,3255 @@
8577 +/*
8578 + *    Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd.
8579 + *
8580 + *    For licensing information please see the supplied COPYING file
8581 + *
8582 + */
8583 +
8584 +#ident "@(#)$Id: elan3mmu_generic.c,v 1.75.2.1 2004/12/14 10:19:51 mike Exp $"
8585 +/*      $Source: /cvs/master/quadrics/elan3mod/elan3/vm/elan3mmu_generic.c,v $*/
8586 +
8587 +#include <qsnet/kernel.h>
8588 +
8589 +#include <elan3/elanregs.h>
8590 +#include <elan3/elandev.h>
8591 +#include <elan3/elanvp.h>
8592 +#include <elan3/elan3mmu.h>
8593 +#include <elan3/elanctxt.h>
8594 +#include <elan3/elandebug.h>
8595 +#include <elan3/urom_addrs.h>
8596 +#include <elan3/thread.h>
8597 +
8598 +#ifdef CONFIG_MPSAS
8599 +#  define zero_all_ptbls
8600 +#endif
8601 +
8602 +/*
8603 + * Debugging
8604 + */
8605 +int    elan3mmu_debug = 0;
8606 +
8607 +#define        N_L3PTBL_MTX    (0x20)
8608 +#define        N_L2PTBL_MTX    (0x40)
8609 +#define        N_L1PTBL_MTX    (0x20)
8610 +
8611 +#define        L3PTBL_MTX_HASH(p) \
8612 +       ((((uintptr_t)(p) >> 12) ^ ((uintptr_t)(p) >> 2)) & (N_L3PTBL_MTX - 1))
8613 +static spinlock_t l3ptbl_lock[N_L3PTBL_MTX];
8614 +
8615 +#define        L2PTBL_MTX_HASH(p)   \
8616 +       ((((uintptr_t)(p) >> 12) ^ ((uintptr_t)(p) >> 2)) & (N_L2PTBL_MTX - 1))
8617 +static spinlock_t l2ptbl_lock[N_L2PTBL_MTX];
8618 +
8619 +#define        L1PTBL_MTX_HASH(p)   \
8620 +       ((((uintptr_t)(p) >> 12) ^ ((uintptr_t)(p) >> 2)) & (N_L1PTBL_MTX - 1))
8621 +static spinlock_t l1ptbl_lock[N_L1PTBL_MTX];
8622 +
8623 +
8624 +#define        BASE2VA(p)      ((E3_Addr)((p)->ptbl_base << 16))
8625 +#define        VA2BASE(v)      ((u_short)(((uintptr_t)(v)) >> 16))
8626 +
8627 +ELAN3MMU_GLOBAL_STATS  elan3mmu_global_stats;
8628 +
8629 +static void          elan3mmu_flush_context_filter (ELAN3_DEV *dev, void *);
8630 +static void          elan3mmu_unload_loop (ELAN3MMU *elan3mmu, ELAN3_PTBL *ptbl, int first_valid, int nptes, int flags);
8631 +
8632 +static ELAN3_PTBL    *elan3mmu_create_ptbls (ELAN3_DEV *dev, int level, int attr, int keep);
8633 +static ELAN3_PTBL    *elan3mmu_ta_to_ptbl (ELAN3MMU *elan3mmu, ELAN3_PTP *ptp);
8634 +
8635 +static ELAN3_PTBL    *elan3mmu_alloc_pte    (ELAN3_DEV *dev, ELAN3MMU *elan3mmu, int *idx);
8636 +void                 elan3mmu_free_lXptbl  (ELAN3_DEV *dev, ELAN3_PTBL *ptbl);
8637 +
8638 +void                 elan3mmu_free_pte  (ELAN3_DEV *dev,  ELAN3MMU *elan3mmu,  ELAN3_PTBL *ptbl_ptr, int idx);
8639 +
8640 +static ELAN3_PTBL    *elan3mmu_alloc_l1ptbl (ELAN3_DEV *dev, int attr, ELAN3MMU *elan3mmu);
8641 +static ELAN3_PTBL    *elan3mmu_alloc_l2ptbl (ELAN3_DEV *dev, int attr, ELAN3_PTBL *parent, ELAN3MMU *elan3mmu,
8642 +                                           E3_Addr base, spinlock_t **plock, unsigned long *flags);
8643 +static ELAN3_PTBL    *elan3mmu_alloc_l3ptbl (ELAN3_DEV *dev, int attr, ELAN3_PTBL *parent, ELAN3MMU *elan3mmu,
8644 +                                           E3_Addr base, spinlock_t **plock, unsigned long *flags);
8645 +
8646 +static int          elan3mmu_steal_this_ptbl (ELAN3_DEV *dev, ELAN3_PTBL *l3ptbl);
8647 +static ELAN3_PTBL    *elan3mmu_steal_l3ptbl (ELAN3_DEV *dev, int attr);
8648 +
8649 +static spinlock_t   *elan3mmu_ptbl_to_lock (int level, ELAN3_PTBL *ptbl);
8650 +
8651 +/*
8652 + * Encoding of MMU permissions against access type,
8653 + * to allow quick permission checking against access 
8654 + * type.
8655 + */
8656 +u_char elan3mmu_permissionTable[] =
8657 +{
8658 +    0xcc,      /* 11001100 ELAN3_PERM_NULL        */
8659 +    0x01,      /* 00000001 ELAN3_PERM_LOCALREAD   */
8660 +    0x05,      /* 00000101 ELAN3_PERM_READ        */
8661 +    0x33,      /* 00110011 ELAN3_PERM_NOREMOTE    */
8662 +    0x37,      /* 00110111 ELAN3_PERM_REMOTEREAD  */
8663 +    0x3f,      /* 00111111 ELAN3_PERM_REMOTEWRITE */
8664 +    0xf7,      /* 11110111 ELAN3_PERM_REMOTEEVENT */
8665 +    0xff,      /* 11111111 ELAN3_PERM_REMOTEALL          */
8666 +} ;
8667 +
8668 +void
8669 +elan3mmu_init()
8670 +{
8671 +    register int i;
8672 +
8673 +    HAT_PRINTF0 (1, "elan3mmu_init: initialising elan mmu\n");
8674 +
8675 +    for (i = 0; i < N_L1PTBL_MTX; i++)
8676 +       spin_lock_init (&l1ptbl_lock[i]);
8677 +
8678 +    for (i = 0; i < N_L2PTBL_MTX; i++)
8679 +       spin_lock_init (&l2ptbl_lock[i]);
8680 +
8681 +    for (i = 0; i < N_L3PTBL_MTX; i++)
8682 +       spin_lock_init (&l3ptbl_lock[i]);
8683 +
8684 +    elan3mmu_global_stats.version = ELAN3MMU_STATS_VERSION;
8685 +
8686 +    elan3mmu_init_osdep();
8687 +}
8688 +
8689 +void
8690 +elan3mmu_fini()
8691 +{
8692 +    register int i;
8693 +
8694 +    HAT_PRINTF0 (1, "elan3mmu_fini: finalising elan mmu\n");
8695 +
8696 +    for (i = 0; i < N_L1PTBL_MTX; i++)
8697 +       spin_lock_destroy (&l1ptbl_lock[i]);
8698 +
8699 +    for (i = 0; i < N_L2PTBL_MTX; i++)
8700 +       spin_lock_destroy (&l2ptbl_lock[i]);
8701 +
8702 +    for (i = 0; i < N_L3PTBL_MTX; i++)
8703 +       spin_lock_destroy (&l3ptbl_lock[i]);
8704 +
8705 +    elan3mmu_fini_osdep();
8706 +}
8707 +
8708 +ELAN3MMU *
8709 +elan3mmu_alloc (ELAN3_CTXT *ctxt)
8710 +{
8711 +    ELAN3MMU  *elan3mmu;
8712 +    ELAN3_PTBL *l1ptbl;
8713 +
8714 +    ALLOC_ELAN3MMU (elan3mmu, TRUE);
8715 +    
8716 +    spin_lock_init (&elan3mmu->elan3mmu_lock);
8717 +
8718 +    spin_lock (&elan3mmu->elan3mmu_lock);                      /* lock_lint */
8719 +
8720 +    elan3mmu->elan3mmu_ergns    = NULL;
8721 +    elan3mmu->elan3mmu_etail    = NULL;
8722 +    elan3mmu->elan3mmu_ergnlast = NULL;
8723 +    elan3mmu->elan3mmu_mrgns    = NULL;
8724 +    elan3mmu->elan3mmu_mtail    = NULL;
8725 +    elan3mmu->elan3mmu_mrgnlast = NULL;
8726 +    elan3mmu->elan3mmu_ctxt     = ctxt;
8727 +
8728 +    spin_lock_init (&elan3mmu->elan3mmu_lXptbl_lock);
8729 +    elan3mmu->elan3mmu_lXptbl   = NULL;
8730 +
8731 +    spin_unlock (&elan3mmu->elan3mmu_lock);                    /* lock_lint */
8732 +
8733 +    l1ptbl = elan3mmu_alloc_l1ptbl(ctxt->Device, 0, elan3mmu);
8734 +
8735 +    elan3mmu->elan3mmu_ctp      = (sdramaddr_t) 0;
8736 +    elan3mmu->elan3mmu_dev      = ctxt->Device;
8737 +    elan3mmu->elan3mmu_l1ptbl   = l1ptbl;
8738 +
8739 +    /* Ensure that there are at least some level 3 page tables,  since if a level 2 and */
8740 +    /* a level 3 table are allocated together, then the level 3 is allocated with the NO_ALLOC */
8741 +    /* flag,  thus there MUST be at least one that can be stolen or on the free list */
8742 +    if (elan3mmu->elan3mmu_dev->Level[PTBL_LEVEL_3].PtblFreeList == NULL)
8743 +       elan3mmu_create_ptbls (elan3mmu->elan3mmu_dev, PTBL_LEVEL_3, 0, 0);
8744 +
8745 +    HAT_PRINTF1 (1, "elan3mmu_alloc: elan3mmu %p\n", elan3mmu);
8746 +
8747 +    elan3mmu_alloc_osdep (elan3mmu);
8748 +
8749 +    return (elan3mmu);
8750 +}
8751 +
8752 +void 
8753 +elan3mmu_free (ELAN3MMU *elan3mmu)
8754 +{
8755 +    ELAN3MMU_RGN   *rgn;
8756 +    ELAN3_PTBL    *l1ptbl;
8757 +    spinlock_t    *l1lock;
8758 +    unsigned long   l1flags;
8759 +    unsigned long   flags;
8760 +
8761 +    HAT_PRINTF1 (1, "elan3mmu_free : elan3mmu %p\n", elan3mmu);
8762 +    
8763 +    /*
8764 +     * Invalidate the level1 page table,  since it's already removed
8765 +     * from the context table, there is no need to flush the tlb.
8766 +     */
8767 +    l1ptbl = elan3mmu->elan3mmu_l1ptbl;
8768 +    elan3mmu->elan3mmu_l1ptbl = NULL;
8769 +    
8770 +    if (elan3mmu_lock_ptbl (l1ptbl, LK_PTBL_FAILOK, elan3mmu, (E3_Addr) 0, PTBL_LEVEL_1, &l1lock, &l1flags) == LK_PTBL_OK)
8771 +    {
8772 +       elan3mmu_l1inval (elan3mmu, l1ptbl, PTE_UNLOAD_NOFLUSH);
8773 +       elan3mmu_free_l1ptbl (elan3mmu->elan3mmu_dev, l1ptbl, l1lock, l1flags);
8774 +    }
8775 +
8776 +    /*
8777 +     * Free of any permission regions.
8778 +     */
8779 +    spin_lock (&elan3mmu->elan3mmu_lock);                                      /* lock_lint */
8780 +    while ((rgn = elan3mmu->elan3mmu_mrgns) != NULL)
8781 +    {
8782 +       spin_lock_irqsave (&elan3mmu->elan3mmu_dev->IntrLock, flags);           /* lock_lint */
8783 +       elan3mmu_removergn_elan (elan3mmu, rgn->rgn_ebase);
8784 +       elan3mmu_removergn_main (elan3mmu, rgn->rgn_mbase);
8785 +       spin_unlock_irqrestore (&elan3mmu->elan3mmu_dev->IntrLock, flags);      /* lock_lint */
8786 +       
8787 +       FREE_ELAN3MMU_RGN (rgn);
8788 +    }
8789 +    elan3mmu->elan3mmu_mrgnlast = NULL;
8790 +    elan3mmu->elan3mmu_ergnlast = NULL;
8791 +
8792 +    /* 
8793 +     * Free the lXptbl list
8794 +     */
8795 +    ASSERT (elan3mmu->elan3mmu_lXptbl == NULL); /* XXXX MRH need to add list removal */
8796
8797 +    elan3mmu->elan3mmu_lXptbl = NULL;
8798 +    spin_lock_destroy (&elan3mmu->elan3mmu_lXptbl_lock);
8799 +
8800 +
8801 +    spin_unlock (&elan3mmu->elan3mmu_lock);                                    /* lock_lint */
8802 +
8803 +    spin_lock_destroy (&elan3mmu->elan3mmu_lock);
8804 +
8805 +    FREE_ELAN3MMU (elan3mmu);
8806 +}
8807 +
8808 +/*================================================================================*/
8809 +/* Interface routines to device driver */
8810 +static void
8811 +elan3mmu_flush_context_filter (ELAN3_DEV *dev, void *arg)
8812 +{
8813 +    unsigned long flags;
8814 +
8815 +    spin_lock_irqsave (&dev->IntrLock, flags);
8816 +    ASSERT ((read_reg32 (dev, Exts.InterruptReg) & (INT_DiscardingSysCntx | INT_DiscardingNonSysCntx)) == 
8817 +           (INT_DiscardingSysCntx | INT_DiscardingNonSysCntx));
8818 +
8819 +    dev->FilterHaltQueued = 0;
8820 +
8821 +    write_reg32 (dev, Input_Context_Fil_Flush, 0);
8822 +
8823 +    HAT_PRINTF0 (1, "elan3mmu_flush_context_filter completed\n");
8824 +
8825 +    spin_unlock_irqrestore (&dev->IntrLock, flags);
8826 +}
8827 +
8828 +void
8829 +elan3mmu_set_context_filter (ELAN3_DEV *dev, int ctx, int disabled, E3_uint32 Pend, E3_uint32 *Maskp)
8830 +{
8831 +    int         mctx = ctx & MAX_ROOT_CONTEXT_MASK;
8832 +    sdramaddr_t ctp  = dev->ContextTable + mctx * sizeof (E3_ContextControlBlock);
8833 +
8834 +    ASSERT (SPINLOCK_HELD (&dev->IntrLock));
8835 +
8836 +    ASSERT ((mctx < 32 || mctx >= ELAN3_KCOMM_BASE_CONTEXT_NUM) ? (ctx & SYS_CONTEXT_BIT) : ! (ctx & SYS_CONTEXT_BIT));
8837 +
8838 +    elan3_sdram_writel (dev, ctp + offsetof (E3_ContextControlBlock, filter), 
8839 +                 ((ctx & SYS_CONTEXT_BIT) ? E3_CCB_CNTX0 : 0) | (disabled ? E3_CCB_DISCARD_ALL : 0));
8840 +
8841 +    HAT_PRINTF4 (1, "elan3mmu_set_context_filter: ctx %x [%lx] -> %s (%x)\n", ctx, ctp,
8842 +                disabled ? "up" : "down", elan3_sdram_readl (dev, ctp + offsetof (E3_ContextControlBlock, filter)));
8843 +
8844 +    /* queue a halt operation to flush the context filter while the inputter is halted */
8845 +    if (dev->FilterHaltQueued == 0)
8846 +    {
8847 +       dev->FilterHaltQueued = 1;
8848 +       QueueHaltOperation (dev, Pend, Maskp, INT_DiscardingSysCntx | INT_DiscardingNonSysCntx, 
8849 +                           elan3mmu_flush_context_filter, NULL);
8850 +    }
8851 +}
8852 +
8853 +int
8854 +elan3mmu_attach (ELAN3_DEV *dev, int ctx, ELAN3MMU *elan3mmu, sdramaddr_t routeTable, E3_uint32 routeMask)
8855 +{
8856 +    sdramaddr_t ctp;
8857 +    ELAN3_PTP    trootptp;
8858 +
8859 +    ASSERT (SPINLOCK_HELD (&dev->IntrLock));
8860 +
8861 +    ctx &= MAX_ROOT_CONTEXT_MASK;                                              /* Mask out all high bits in context */
8862 +    
8863 +    if (ctx < 0 || ctx >= dev->ContextTableSize)
8864 +       return (EINVAL);
8865 +
8866 +    ctp = dev->ContextTable + ctx * sizeof (E3_ContextControlBlock);
8867 +    
8868 +    trootptp = elan3_readptp (dev, ctp + offsetof (E3_ContextControlBlock, rootPTP));
8869 +    
8870 +    if (ELAN3_PTP_TYPE(trootptp) != ELAN3_ET_INVALID)
8871 +       return (EBUSY);
8872 +
8873 +    elan3mmu->elan3mmu_ctp = ctp;
8874 +    
8875 +    trootptp = PTBL_TO_PTADDR (elan3mmu->elan3mmu_l1ptbl) | ELAN3_ET_PTP;
8876 +    
8877 +    HAT_PRINTF4 (1, "elan3mmu_attach: ctp at %08lx : trootptp=%08x VPT_ptr=%08lx VPT_mask=%08x\n",
8878 +                ctp, trootptp, routeTable, routeMask);
8879 +    
8880 +    elan3_writeptp (dev, ctp + offsetof (E3_ContextControlBlock, rootPTP), trootptp);
8881 +    elan3_writeptp (dev, ctp + offsetof (E3_ContextControlBlock, VPT_ptr), routeTable);
8882 +    elan3_writeptp (dev, ctp + offsetof (E3_ContextControlBlock, VPT_mask), routeMask);
8883 +    
8884 +    return (ESUCCESS);
8885 +}
8886 +
8887 +void
8888 +elan3mmu_detach (ELAN3_DEV *dev, int ctx)
8889 +{
8890 +    ELAN3_PTP    invalidptp = ELAN3_INVALID_PTP;
8891 +    sdramaddr_t ctp;
8892 +    
8893 +    ctx &= MAX_ROOT_CONTEXT_MASK;                                              /* Mask out all high bits in context */
8894 +    
8895 +    if (ctx < 0 || ctx >= dev->ContextTableSize)
8896 +       return;
8897 +    
8898 +    ctp = dev->ContextTable + ctx * sizeof (E3_ContextControlBlock);
8899 +    
8900 +    HAT_PRINTF1 (1, "elan3mmu_detach: clearing ptp at %lx\n", ctp);
8901 +    
8902 +    elan3_writeptp (dev, ctp + offsetof (E3_ContextControlBlock, rootPTP), invalidptp);
8903 +    elan3_writeptp (dev, ctp + offsetof (E3_ContextControlBlock, VPT_mask), 0);
8904 +    elan3_writeptp (dev, ctp + offsetof (E3_ContextControlBlock, VPT_ptr), 0);
8905 +    
8906 +    ElanFlushTlb (dev);
8907 +}
8908 +
8909 +int
8910 +elan3mmu_reference (ELAN3MMU *elan3mmu, int ctx)
8911 +{
8912 +    ELAN3_DEV              *dev = elan3mmu->elan3mmu_dev;
8913 +    sdramaddr_t            ctp;
8914 +    E3_ContextControlBlock ccb;
8915 +    ELAN3_PTP               trootptp;
8916 +
8917 +    ctx &= MAX_ROOT_CONTEXT_MASK;                                              /* Mask out all high bits in context */
8918 +    
8919 +    if (ctx < 0 || ctx >= dev->ContextTableSize)
8920 +       return (EINVAL);
8921 +
8922 +    ctp = dev->ContextTable + ctx * sizeof (E3_ContextControlBlock);
8923 +
8924 +    trootptp = elan3_readptp (dev, ctp + offsetof (E3_ContextControlBlock, rootPTP));
8925 +    
8926 +    if (ELAN3_PTP_TYPE(trootptp) != ELAN3_ET_INVALID)
8927 +       return (EBUSY);
8928 +    
8929 +    elan3_sdram_copyl_from_sdram (dev, elan3mmu->elan3mmu_ctp, &ccb, sizeof (E3_ContextControlBlock));
8930 +    elan3_sdram_copyl_to_sdram (dev, &ccb, ctp, sizeof (E3_ContextControlBlock));
8931 +    
8932 +    return (ESUCCESS);
8933 +    
8934 +}
8935 +/*================================================================================*/
8936 +/* Elan permission regions. */
8937 +
8938 +/* elan address region management */
8939 +ELAN3MMU_RGN *
8940 +elan3mmu_findrgn_elan (ELAN3MMU *elan3mmu,
8941 +                      E3_Addr addr, int tail)
8942 +{
8943 +    ELAN3MMU_RGN *next = NULL;
8944 +    ELAN3MMU_RGN *rgn;
8945 +    ELAN3MMU_RGN *hirgn;
8946 +    ELAN3MMU_RGN *lorgn;
8947 +    E3_Addr       base;
8948 +    E3_Addr       lastaddr;
8949 +    int                  forward;
8950 +
8951 +    ASSERT (SPINLOCK_HELD (&elan3mmu->elan3mmu_dev->IntrLock) || SPINLOCK_HELD (&elan3mmu->elan3mmu_lock));
8952 +
8953 +    if (elan3mmu->elan3mmu_ergns == NULL)
8954 +       return (NULL);
8955 +
8956 +    rgn = elan3mmu->elan3mmu_ergnlast;
8957 +    if (rgn == NULL)
8958 +       rgn = elan3mmu->elan3mmu_ergns;
8959 +
8960 +    forward = 0;
8961 +    if ((u_long) (base = rgn->rgn_ebase) < (u_long)addr)
8962 +    {
8963 +       if ((u_long)addr <= ((u_long) base + rgn->rgn_len - 1))
8964 +           return (rgn);                                       /* ergnlast contained addr */
8965 +
8966 +       hirgn = elan3mmu->elan3mmu_etail;
8967 +
8968 +       if ((u_long) (lastaddr = (hirgn->rgn_ebase + hirgn->rgn_len - 1)) < (u_long) addr)
8969 +           return (tail ? hirgn : NULL);                       /* addr is out of range */
8970 +       
8971 +       if ((u_long) (addr - base) > (u_long) (lastaddr - addr))
8972 +           rgn = hirgn;
8973 +       else
8974 +       {
8975 +           rgn = rgn->rgn_enext;
8976 +           forward++;
8977 +       }
8978 +    }
8979 +    else
8980 +    {
8981 +       lorgn = elan3mmu->elan3mmu_ergns;
8982 +
8983 +       if ((u_long)lorgn->rgn_ebase > (u_long) addr)
8984 +           return (lorgn);                                     /* lowest regions is higher than addr */
8985 +       if ((u_long)(addr - lorgn->rgn_ebase) < (u_long) (base - addr))
8986 +       {
8987 +           rgn = lorgn;                                        /* search forward from head */
8988 +           forward++;
8989 +       }
8990 +    }
8991 +    if (forward)
8992 +    {
8993 +       while ((u_long)(rgn->rgn_ebase + rgn->rgn_len - 1) < (u_long)addr)
8994 +           rgn = rgn->rgn_enext;
8995 +
8996 +       if ((u_long)rgn->rgn_ebase <= (u_long)addr)
8997 +           elan3mmu->elan3mmu_ergnlast = rgn;
8998 +       return (rgn);
8999 +    }
9000 +    else
9001 +    {
9002 +       while ((u_long)rgn->rgn_ebase > (u_long)addr)
9003 +       {
9004 +           next = rgn;
9005 +           rgn = rgn->rgn_eprev;
9006 +       }
9007 +
9008 +       if ((u_long) (rgn->rgn_ebase + rgn->rgn_len - 1) < (u_long)addr)
9009 +           return (next);
9010 +       else
9011 +       {
9012 +           elan3mmu->elan3mmu_ergnlast = rgn;
9013 +           return (rgn);
9014 +       }
9015 +    }
9016 +}
9017 +
9018 +int
9019 +elan3mmu_addrgn_elan (ELAN3MMU *elan3mmu, ELAN3MMU_RGN *nrgn)
9020 +{
9021 +    ELAN3MMU_RGN *rgn   = elan3mmu_findrgn_elan (elan3mmu, nrgn->rgn_ebase, 1);
9022 +    E3_Addr       nbase = nrgn->rgn_ebase;
9023 +    E3_Addr      ntop  = nbase + nrgn->rgn_len - 1; /* avoid wrap */
9024 +    E3_Addr      base;
9025 +
9026 +    ASSERT (SPINLOCK_HELD (&elan3mmu->elan3mmu_dev->IntrLock) && SPINLOCK_HELD (&elan3mmu->elan3mmu_lock));
9027 +
9028 +    if (rgn == NULL)
9029 +    {
9030 +       elan3mmu->elan3mmu_ergns = elan3mmu->elan3mmu_etail = nrgn;
9031 +       nrgn->rgn_enext = nrgn->rgn_eprev = NULL;
9032 +    }
9033 +    else
9034 +    {
9035 +       base = rgn->rgn_ebase;
9036 +
9037 +       if ((u_long)(base + rgn->rgn_len - 1) < (u_long)nbase)  /* top of region below requested address */
9038 +       {                                                       /* so insert after region (and hence at end */
9039 +           nrgn->rgn_eprev = rgn;                              /* of list */
9040 +           nrgn->rgn_enext = NULL;
9041 +           rgn->rgn_enext = elan3mmu->elan3mmu_etail = nrgn;
9042 +       }
9043 +       else
9044 +       {
9045 +           if ((u_long)nbase >= (u_long)base || (u_long)ntop >= (u_long)base)
9046 +               return (-1);                                    /* overlapping region */
9047 +
9048 +           nrgn->rgn_enext = rgn;                              /* insert before region */
9049 +           nrgn->rgn_eprev = rgn->rgn_eprev;
9050 +           rgn->rgn_eprev  = nrgn;
9051 +           if (elan3mmu->elan3mmu_ergns == rgn)
9052 +               elan3mmu->elan3mmu_ergns = nrgn;
9053 +           else
9054 +               nrgn->rgn_eprev->rgn_enext = nrgn;
9055 +       }
9056 +    }
9057 +    elan3mmu->elan3mmu_ergnlast = nrgn;
9058 +    
9059 +    return (0);
9060 +}
9061 +
9062 +ELAN3MMU_RGN *
9063 +elan3mmu_removergn_elan (ELAN3MMU *elan3mmu, E3_Addr addr)
9064 +{
9065 +    ELAN3MMU_RGN *rgn = elan3mmu_findrgn_elan (elan3mmu, addr, 0);
9066 +    
9067 +    ASSERT (SPINLOCK_HELD (&elan3mmu->elan3mmu_dev->IntrLock) && SPINLOCK_HELD (&elan3mmu->elan3mmu_lock));
9068 +
9069 +    if (rgn == NULL || rgn->rgn_ebase != addr)
9070 +       return (NULL);
9071 +    
9072 +    elan3mmu->elan3mmu_ergnlast = rgn->rgn_enext;
9073 +    if (rgn == elan3mmu->elan3mmu_etail)
9074 +       elan3mmu->elan3mmu_etail = rgn->rgn_eprev;
9075 +    else
9076 +       rgn->rgn_enext->rgn_eprev = rgn->rgn_eprev;
9077 +    
9078 +    if (rgn == elan3mmu->elan3mmu_ergns)
9079 +       elan3mmu->elan3mmu_ergns = rgn->rgn_enext;
9080 +    else
9081 +       rgn->rgn_eprev->rgn_enext = rgn->rgn_enext;
9082 +
9083 +    return (rgn);
9084 +}
9085 +
9086 +ELAN3MMU_RGN *
9087 +elan3mmu_rgnat_elan (ELAN3MMU *elan3mmu, E3_Addr addr)
9088 +{
9089 +    ELAN3MMU_RGN *rgn = elan3mmu_findrgn_elan (elan3mmu, addr, 0);
9090 +    E3_Addr       base;
9091 +
9092 +    if (rgn != NULL && (u_long)(base = rgn->rgn_ebase) <= (u_long)addr && (u_long)addr <= (u_long)(base + rgn->rgn_len - 1))
9093 +       return (rgn);
9094 +    return (NULL);
9095 +}
9096 +
9097 +/* main address region management */
9098 +ELAN3MMU_RGN *
9099 +elan3mmu_findrgn_main (ELAN3MMU *elan3mmu,
9100 +                      caddr_t addr, int tail)
9101 +{
9102 +    ELAN3MMU_RGN *next = NULL;
9103 +    ELAN3MMU_RGN *rgn;
9104 +    ELAN3MMU_RGN *hirgn;
9105 +    ELAN3MMU_RGN *lorgn;
9106 +    caddr_t       lastaddr;
9107 +    caddr_t       base;
9108 +    int                  forward;
9109 +
9110 +    ASSERT (SPINLOCK_HELD (&elan3mmu->elan3mmu_dev->IntrLock) || SPINLOCK_HELD (&elan3mmu->elan3mmu_lock));
9111 +
9112 +    if (elan3mmu->elan3mmu_mrgns == NULL)
9113 +       return (NULL);
9114 +
9115 +    rgn = elan3mmu->elan3mmu_mrgnlast;
9116 +    if (rgn == NULL)
9117 +       rgn = elan3mmu->elan3mmu_mrgns;
9118 +
9119 +    forward = 0;
9120 +    if ((base = rgn->rgn_mbase) < addr)
9121 +    {
9122 +       if (addr <= (base + rgn->rgn_len - 1))
9123 +           return (rgn);                                       /* ergnlast contained addr */
9124 +
9125 +       hirgn = elan3mmu->elan3mmu_mtail;
9126 +       if ((lastaddr = hirgn->rgn_mbase + hirgn->rgn_len - 1) < addr)
9127 +           return (tail ? hirgn : NULL);                       /* addr is out of range */
9128 +       
9129 +       if ((addr - base) > (lastaddr - addr))
9130 +           rgn = hirgn;
9131 +       else
9132 +       {
9133 +           rgn = rgn->rgn_mnext;
9134 +           forward++;
9135 +       }
9136 +    }
9137 +    else
9138 +    {
9139 +       lorgn = elan3mmu->elan3mmu_mrgns;
9140 +       if (lorgn->rgn_mbase > addr)
9141 +           return (lorgn);                                     /* lowest regions is higher than addr */
9142 +       if ((addr - lorgn->rgn_mbase) < (base - addr))
9143 +       {
9144 +           rgn = lorgn;                                        /* search forward from head */
9145 +           forward++;
9146 +       }
9147 +    }
9148 +    if (forward)
9149 +    {
9150 +       while ((rgn->rgn_mbase + rgn->rgn_len - 1) < addr)
9151 +           rgn = rgn->rgn_mnext;
9152 +
9153 +       if (rgn->rgn_mbase <= addr)
9154 +           elan3mmu->elan3mmu_mrgnlast = rgn;
9155 +       return (rgn);
9156 +    }
9157 +    else
9158 +    {
9159 +       while (rgn->rgn_mbase > addr)
9160 +       {
9161 +           next = rgn;
9162 +           rgn = rgn->rgn_mprev;
9163 +       }
9164 +       if ((rgn->rgn_mbase + rgn->rgn_len - 1) < addr)
9165 +           return (next);
9166 +       else
9167 +       {
9168 +           elan3mmu->elan3mmu_mrgnlast = rgn;
9169 +           return (rgn);
9170 +       }
9171 +    }
9172 +}
9173 +
9174 +int
9175 +elan3mmu_addrgn_main (ELAN3MMU *elan3mmu, ELAN3MMU_RGN *nrgn)
9176 +{
9177 +    ELAN3MMU_RGN *rgn   = elan3mmu_findrgn_main (elan3mmu, nrgn->rgn_mbase, 1);
9178 +    caddr_t       nbase = nrgn->rgn_mbase;
9179 +    caddr_t      ntop  = nbase + nrgn->rgn_len - 1;
9180 +    caddr_t      base;
9181 +
9182 +    ASSERT (SPINLOCK_HELD (&elan3mmu->elan3mmu_dev->IntrLock) && SPINLOCK_HELD (&elan3mmu->elan3mmu_lock));
9183 +
9184 +    if (rgn == NULL)
9185 +    {
9186 +       elan3mmu->elan3mmu_mrgns = elan3mmu->elan3mmu_mtail = nrgn;
9187 +       nrgn->rgn_mnext = nrgn->rgn_mprev = NULL;
9188 +    }
9189 +    else
9190 +    {
9191 +       base = rgn->rgn_mbase;
9192 +
9193 +       if ((base + rgn->rgn_len - 1) < nbase)                  /* top of region below requested address */
9194 +       {                                                       /* so insert after region (and hence at end */
9195 +           nrgn->rgn_mprev = rgn;                              /* of list */
9196 +           nrgn->rgn_mnext = NULL;
9197 +           rgn->rgn_mnext = elan3mmu->elan3mmu_mtail = nrgn;
9198 +       }
9199 +       else
9200 +       {
9201 +           if (nbase >= base || ntop >= base)
9202 +               return (-1);                                    /* overlapping region */
9203 +
9204 +           nrgn->rgn_mnext = rgn;                              /* insert before region */
9205 +           nrgn->rgn_mprev = rgn->rgn_mprev;
9206 +           rgn->rgn_mprev  = nrgn;
9207 +           if (elan3mmu->elan3mmu_mrgns == rgn)
9208 +               elan3mmu->elan3mmu_mrgns = nrgn;
9209 +           else
9210 +               nrgn->rgn_mprev->rgn_mnext = nrgn;
9211 +       }
9212 +    }
9213 +    elan3mmu->elan3mmu_mrgnlast = nrgn;
9214 +    
9215 +    return (0);
9216 +}
9217 +
9218 +ELAN3MMU_RGN *
9219 +elan3mmu_removergn_main (ELAN3MMU *elan3mmu, caddr_t addr)
9220 +{
9221 +    ELAN3MMU_RGN *rgn = elan3mmu_findrgn_main (elan3mmu, addr, 0);
9222 +    
9223 +    ASSERT (SPINLOCK_HELD (&elan3mmu->elan3mmu_dev->IntrLock) && SPINLOCK_HELD (&elan3mmu->elan3mmu_lock));
9224 +
9225 +    if (rgn == NULL || rgn->rgn_mbase != addr)
9226 +       return (NULL);
9227 +    
9228 +    elan3mmu->elan3mmu_mrgnlast = rgn->rgn_mnext;
9229 +    if (rgn == elan3mmu->elan3mmu_mtail)
9230 +       elan3mmu->elan3mmu_mtail = rgn->rgn_mprev;
9231 +    else
9232 +       rgn->rgn_mnext->rgn_mprev = rgn->rgn_mprev;
9233 +    
9234 +    if (rgn == elan3mmu->elan3mmu_mrgns)
9235 +       elan3mmu->elan3mmu_mrgns = rgn->rgn_mnext;
9236 +    else
9237 +       rgn->rgn_mprev->rgn_mnext = rgn->rgn_mnext;
9238 +
9239 +    return (rgn);
9240 +}
9241 +
9242 +ELAN3MMU_RGN *
9243 +elan3mmu_rgnat_main (ELAN3MMU *elan3mmu, caddr_t addr)
9244 +{
9245 +    ELAN3MMU_RGN *rgn = elan3mmu_findrgn_main (elan3mmu, addr, 0);
9246 +    caddr_t       base;
9247 +
9248 +    if (rgn != NULL && (base = rgn->rgn_mbase) <= addr && addr <= (base + rgn->rgn_len - 1))
9249 +       return (rgn);
9250 +    return (NULL);
9251 +}
9252 +
9253 +int
9254 +elan3mmu_setperm (ELAN3MMU *elan3mmu,
9255 +                 caddr_t   maddr,
9256 +                 E3_Addr   eaddr,
9257 +                 u_int     len,
9258 +                 u_int     perm)
9259 +{
9260 +    ELAN3_DEV     *dev = elan3mmu->elan3mmu_dev;
9261 +    ELAN3MMU_RGN *nrgn;
9262 +    unsigned long  flags;
9263 +
9264 +    HAT_PRINTF4 (1, "elan3mmu_setperm: user %p elan %08x len %x perm %x\n", maddr, eaddr, len, perm);
9265 +
9266 +    if ((((uintptr_t) maddr) & PAGEOFFSET) || (eaddr & PAGEOFFSET) || (len & PAGEOFFSET)) 
9267 +    {
9268 +        HAT_PRINTF0 (1, "elan3mmu_setperm:  alignment failure\n");
9269 +       return (EINVAL);
9270 +    }
9271 +
9272 +    if (((uintptr_t) maddr + len - 1) < (uintptr_t) maddr || ((u_long)eaddr + len - 1) < (u_long)eaddr) 
9273 +    {
9274 +       HAT_PRINTF0 (1, "elan3mmu_setperm:  range failure\n");
9275 +       return (EINVAL);
9276 +    }
9277 +
9278 +    ALLOC_ELAN3MMU_RGN(nrgn, TRUE);
9279 +    
9280 +    spin_lock (&elan3mmu->elan3mmu_lock);
9281 +    nrgn->rgn_mbase = maddr;
9282 +    nrgn->rgn_ebase = eaddr;
9283 +    nrgn->rgn_len   = len;
9284 +    nrgn->rgn_perm  = perm;
9285 +
9286 +    spin_lock_irqsave (&dev->IntrLock, flags);
9287 +    if (elan3mmu_addrgn_elan (elan3mmu, nrgn) < 0)
9288 +    {
9289 +       HAT_PRINTF0 (1, "elan3mmu_setperm:  elan address exists\n");
9290 +       spin_unlock_irqrestore (&dev->IntrLock, flags);
9291 +       spin_unlock (&elan3mmu->elan3mmu_lock);
9292 +
9293 +       FREE_ELAN3MMU_RGN (nrgn);
9294 +       return (EINVAL);
9295 +    }
9296 +    
9297 +    if (elan3mmu_addrgn_main (elan3mmu, nrgn) < 0)
9298 +    {
9299 +       HAT_PRINTF0 (1, "elan3mmu_setperm:  main address exists\n");
9300 +       elan3mmu_removergn_elan (elan3mmu, eaddr);
9301 +
9302 +       spin_unlock_irqrestore (&dev->IntrLock, flags);
9303 +       spin_unlock (&elan3mmu->elan3mmu_lock);
9304 +
9305 +       FREE_ELAN3MMU_RGN (nrgn);
9306 +       return (EINVAL);
9307 +    }
9308 +    spin_unlock_irqrestore (&dev->IntrLock, flags);
9309 +    spin_unlock (&elan3mmu->elan3mmu_lock);
9310 +
9311 +    return (ESUCCESS);
9312 +}
9313 +
9314 +void
9315 +elan3mmu_clrperm (ELAN3MMU *elan3mmu,
9316 +                 E3_Addr   addr,
9317 +                 u_int     len)
9318 +{
9319 +    E3_Addr       raddr;
9320 +    E3_Addr       rtop;
9321 +    ELAN3MMU_RGN *nrgn;
9322 +    ELAN3MMU_RGN *rgn;
9323 +    ELAN3MMU_RGN *rgn_next;
9324 +    u_int        ssize;
9325 +    unsigned long flags;
9326 +    int                  res;
9327 +
9328 +    HAT_PRINTF2 (1, "elan3mmu_clrperm: elan %08x len %x\n", addr, len);
9329 +
9330 +    raddr = (addr & PAGEMASK);
9331 +    rtop = ((addr + len - 1) & PAGEMASK) + PAGEOFFSET;
9332 +
9333 +    ALLOC_ELAN3MMU_RGN (nrgn, TRUE);
9334 +
9335 +    spin_lock (&elan3mmu->elan3mmu_lock);
9336 +    
9337 +    for (rgn = elan3mmu_findrgn_elan (elan3mmu, addr, 0); rgn != NULL; rgn = rgn_next)
9338 +    {
9339 +       if (rtop < rgn->rgn_ebase)                              /* rtop was in a gap */
9340 +           break;
9341 +       
9342 +       rgn_next = rgn->rgn_enext;                              /* Save next region pointer */
9343 +       
9344 +       if (raddr <= rgn->rgn_ebase && rtop >= (rgn->rgn_ebase + rgn->rgn_len - 1)) 
9345 +       {
9346 +           /* whole region is cleared */
9347 +           elan3mmu_unload (elan3mmu, rgn->rgn_ebase, rgn->rgn_len, PTE_UNLOAD);
9348 +           
9349 +           spin_lock_irqsave (&elan3mmu->elan3mmu_dev->IntrLock, flags);
9350 +           elan3mmu_removergn_elan (elan3mmu, rgn->rgn_ebase);
9351 +           elan3mmu_removergn_main (elan3mmu, rgn->rgn_mbase);
9352 +           spin_unlock_irqrestore (&elan3mmu->elan3mmu_dev->IntrLock, flags);
9353 +
9354 +           FREE_ELAN3MMU_RGN (rgn);
9355 +       }
9356 +       else if (raddr <= rgn->rgn_ebase)
9357 +       {
9358 +           /* clearing at beginning, so shrink size and increment base ptrs */
9359 +           ssize = rtop - rgn->rgn_ebase + 1;
9360 +
9361 +           elan3mmu_unload (elan3mmu, rgn->rgn_ebase, ssize, PTE_UNLOAD);
9362 +           
9363 +           spin_lock_irqsave (&elan3mmu->elan3mmu_dev->IntrLock, flags);
9364 +           rgn->rgn_mbase += ssize;
9365 +           rgn->rgn_ebase += ssize;
9366 +           rgn->rgn_len   -= ssize;
9367 +           spin_unlock_irqrestore (&elan3mmu->elan3mmu_dev->IntrLock, flags);
9368 +           
9369 +       }
9370 +       else if (rtop >= (rgn->rgn_ebase + rgn->rgn_len - 1))
9371 +       {
9372 +           /* clearing at end, so just shrink length of region */
9373 +           ssize = ((rgn->rgn_ebase + rgn->rgn_len - 1) - raddr) + 1;
9374 +
9375 +           elan3mmu_unload (elan3mmu, raddr, ssize, PTE_UNLOAD);
9376 +
9377 +           spin_lock_irqsave (&elan3mmu->elan3mmu_dev->IntrLock, flags);
9378 +           rgn->rgn_len -= ssize;
9379 +           spin_unlock_irqrestore (&elan3mmu->elan3mmu_dev->IntrLock, flags);
9380 +       }
9381 +       else
9382 +       {
9383 +           /* the section to go is in the middle,  so need to  */
9384 +           /* split it into two regions */
9385 +           elan3mmu_unload (elan3mmu, raddr, rtop - raddr + 1, PTE_UNLOAD);
9386 +
9387 +           spin_lock_irqsave (&elan3mmu->elan3mmu_dev->IntrLock, flags);
9388 +
9389 +           ASSERT (nrgn != NULL);
9390 +
9391 +           nrgn->rgn_mbase = rgn->rgn_mbase + (rtop - rgn->rgn_ebase + 1);;
9392 +           nrgn->rgn_ebase = rtop + 1;
9393 +           nrgn->rgn_len   = ((rgn->rgn_ebase + rgn->rgn_len - 1) - rtop);
9394 +           nrgn->rgn_perm  = rgn->rgn_perm;
9395 +
9396 +           rgn->rgn_len = (raddr - rgn->rgn_ebase);            /* shrink original region */
9397 +
9398 +           res = elan3mmu_addrgn_elan (elan3mmu, nrgn);        /* insert new region */
9399 +           ASSERT (res == 0);                                  /* which cannot fail */
9400 +
9401 +           res = elan3mmu_addrgn_main (elan3mmu, nrgn);        
9402 +           ASSERT (res == 0);
9403 +           spin_unlock_irqrestore (&elan3mmu->elan3mmu_dev->IntrLock, flags);
9404 +
9405 +           nrgn = NULL;
9406 +       }
9407 +    }
9408 +    spin_unlock (&elan3mmu->elan3mmu_lock);
9409 +
9410 +    if (nrgn != NULL)
9411 +       FREE_ELAN3MMU_RGN (nrgn);
9412 +}
9413 +
9414 +int
9415 +elan3mmu_checkperm (ELAN3MMU *elan3mmu,
9416 +                   E3_Addr   addr,
9417 +                   u_int     len,
9418 +                   u_int     access)
9419 +{
9420 +    E3_Addr     raddr = (((E3_Addr) addr) & PAGEMASK);
9421 +    u_int        rtop = ((addr + len - 1) & PAGEMASK) + PAGEOFFSET;
9422 +    u_int       rsize = rtop - raddr + 1;
9423 +    ELAN3MMU_RGN *rgn;
9424 +
9425 +    HAT_PRINTF3 (1, "elan3mmu_checkperm: user %08x len %x access %x\n", addr, len, access);
9426 +    
9427 +    
9428 +    if ((raddr + rsize - 1) < raddr)
9429 +       return (ENOMEM);
9430 +    
9431 +    spin_lock (&elan3mmu->elan3mmu_lock);
9432 +    if ((rgn = elan3mmu_rgnat_elan (elan3mmu, raddr)) == (ELAN3MMU_RGN *) NULL)
9433 +    {
9434 +       spin_unlock (&elan3mmu->elan3mmu_lock);
9435 +       return (ENOMEM);
9436 +    }
9437 +    else
9438 +    {
9439 +       register int ssize;
9440 +       
9441 +       for (; rsize != 0; rsize -= ssize, raddr += ssize)
9442 +       {
9443 +           if (raddr > (rgn->rgn_ebase + rgn->rgn_len - 1))
9444 +           {
9445 +               rgn  = rgn->rgn_enext;
9446 +               
9447 +               if (rgn == NULL || raddr != rgn->rgn_ebase)
9448 +               {
9449 +                   spin_unlock (&elan3mmu->elan3mmu_lock);
9450 +                   return (ENOMEM);
9451 +               }
9452 +           }
9453 +           if ((raddr + rsize - 1) > (rgn->rgn_ebase + rgn->rgn_len - 1))
9454 +               ssize = ((rgn->rgn_ebase + rgn->rgn_len - 1) - raddr) + 1;
9455 +           else
9456 +               ssize = rsize;
9457 +           
9458 +           HAT_PRINTF4 (1, "elan3mmu_checkperm : rgn %x -> %x perm %x access %x\n",
9459 +                        rgn->rgn_ebase, rgn->rgn_ebase + rgn->rgn_len, rgn->rgn_perm, access);
9460 +
9461 +           if (ELAN3_INCOMPAT_ACCESS (rgn->rgn_perm, access))
9462 +           {
9463 +               spin_unlock (&elan3mmu->elan3mmu_lock);
9464 +               return (EACCES);
9465 +           }
9466 +       }
9467 +    }
9468 +    
9469 +    spin_unlock (&elan3mmu->elan3mmu_lock);
9470 +    
9471 +    return (ESUCCESS);
9472 +}
9473 +
9474 +caddr_t
9475 +elan3mmu_mainaddr (ELAN3MMU *elan3mmu, E3_Addr addr)
9476 +{
9477 +    ELAN3MMU_RGN *rgn;
9478 +    caddr_t      raddr;
9479 +    
9480 +    spin_lock (&elan3mmu->elan3mmu_lock);
9481 +    if ((rgn = elan3mmu_rgnat_elan (elan3mmu, addr)) == (ELAN3MMU_RGN *) NULL)
9482 +       raddr = NULL;
9483 +    else
9484 +       raddr = rgn->rgn_mbase + (addr - rgn->rgn_ebase);
9485 +    spin_unlock (&elan3mmu->elan3mmu_lock);
9486 +
9487 +    return (raddr);
9488 +}
9489 +
9490 +E3_Addr
9491 +elan3mmu_elanaddr (ELAN3MMU *elan3mmu, caddr_t addr)
9492 +{
9493 +    ELAN3MMU_RGN *rgn;
9494 +    E3_Addr       raddr;
9495 +
9496 +    spin_lock (&elan3mmu->elan3mmu_lock);
9497 +    if ((rgn = elan3mmu_rgnat_main (elan3mmu, addr)) == (ELAN3MMU_RGN *) NULL)
9498 +       raddr = (E3_Addr) 0;
9499 +    else
9500 +       raddr = rgn->rgn_ebase + (addr - rgn->rgn_mbase);
9501 +    spin_unlock (&elan3mmu->elan3mmu_lock);
9502 +
9503 +    return (raddr);
9504 +}
9505 +
9506 +void
9507 +elan3mmu_displayrgns(ELAN3MMU *elan3mmu)
9508 +{
9509 +    ELAN3MMU_RGN *rgn;
9510 +
9511 +    spin_lock (&elan3mmu->elan3mmu_lock);
9512 +    HAT_PRINTF0 (1, "elan3mmu_displayrgns: main regions\n");
9513 +    for (rgn = elan3mmu->elan3mmu_mrgns; rgn; rgn = (rgn->rgn_mnext == elan3mmu->elan3mmu_mrgns) ? NULL : rgn->rgn_mnext)
9514 +       HAT_PRINTF5 (1, "    RGN %p ebase %08x mbase %p len %08x perm %08x\n", rgn, rgn->rgn_ebase, rgn->rgn_mbase, rgn->rgn_len, rgn->rgn_perm);
9515 +    HAT_PRINTF0 (1, "elan3mmu_displayrgns: elan regions\n");
9516 +    for (rgn = elan3mmu->elan3mmu_ergns; rgn; rgn = (rgn->rgn_enext == elan3mmu->elan3mmu_ergns) ? NULL : rgn->rgn_enext)
9517 +       HAT_PRINTF5 (1, "    RGN %p ebase %08x mbase %p len %08x perm %08x\n", rgn, rgn->rgn_ebase, rgn->rgn_mbase, rgn->rgn_len, rgn->rgn_perm);
9518 +
9519 +    spin_unlock (&elan3mmu->elan3mmu_lock);
9520 +}
9521 +
9522 +/*============================================================================*/
9523 +/* Private functions */
9524 +#define ELAN3_PTE_IS_VALID(ptbl, pte)  \
9525 +          ((ptbl->ptbl_flags & PTBL_KERNEL) ? \
9526 +          (pte&(~ELAN3_PTE_REF)) != elan3mmu_kernel_invalid_pte(ptbl->ptbl_elan3mmu) : \
9527 +          ELAN3_PTE_VALID(pte))
9528 +
9529 +void
9530 +elan3mmu_expand (ELAN3MMU *elan3mmu, E3_Addr addr, int len, int level, int attr)
9531 +{
9532 +    ELAN3_PTBL          *ptbl;
9533 +    sdramaddr_t                pte;
9534 +    spinlock_t        *lock;
9535 +    u_int              span;
9536 +    unsigned long       flags;
9537 +
9538 +    HAT_PRINTF3 (1, "elan3mmu_expand: elan3mmu %p %08x to %08x\n", elan3mmu, 
9539 +                addr, addr + len);
9540 +
9541 +    for ( ; len != 0; addr += span, len -= span)
9542 +    {
9543 +       /* as we asked for level 3 we know its a pte */
9544 +       pte = elan3mmu_ptealloc (elan3mmu, addr, level, &ptbl, &lock, attr, &flags);
9545 +
9546 +       switch (level)
9547 +       {
9548 +       case PTBL_LEVEL_3:
9549 +           span = MIN(len, ELAN3_L3_PTSIZE - ((E3_Addr) addr & ELAN3_L3_PTOFFSET));
9550 +           break;
9551 +       case PTBL_LEVEL_2:
9552 +           span = MIN(len, ELAN3_L2_PTSIZE - ((E3_Addr) addr & ELAN3_L2_PTOFFSET));
9553 +           break;
9554 +       default:
9555 +           span = len;
9556 +           break;
9557 +       }
9558 +       
9559 +       if (pte != (sdramaddr_t) 0)
9560 +           elan3mmu_unlock_ptbl (ptbl, lock, flags);
9561 +    }
9562 +}
9563 +
9564 +void
9565 +elan3mmu_reserve (ELAN3MMU *elan3mmu, E3_Addr addr, u_int npages, sdramaddr_t *ptes)
9566 +{
9567 +    ELAN3_PTBL          *ptbl;
9568 +    sdramaddr_t                pte;
9569 +    spinlock_t        *lock;
9570 +    u_int              span;
9571 +    int                        len;
9572 +    int                        i;
9573 +    unsigned long       flags;
9574 +
9575 +    HAT_PRINTF3 (1, "elan3mmu_reserve: elan3mmu %p %08x to %08x\n", elan3mmu, 
9576 +                addr, addr + (npages << ELAN3_PAGE_SHIFT));
9577 +
9578 +    for (len = (npages << ELAN3_PAGE_SHIFT); len != 0; addr += span, len -= span)
9579 +    {
9580 +       /* as we asked for level 3 we know its a pte */
9581 +       pte = elan3mmu_ptealloc (elan3mmu, addr, 3, &ptbl, &lock, 0, &flags);
9582 +
9583 +       span = MIN(len, ELAN3_L3_PTSIZE - ((E3_Addr) addr & ELAN3_L3_PTOFFSET));
9584 +       
9585 +       if (ptes != NULL)
9586 +       {
9587 +           for (i = 0; i < span; i += ELAN3_PAGE_SIZE, pte += ELAN3_PTE_SIZE)
9588 +               *ptes++ = pte;
9589 +           ptbl->ptbl_valid += (span >> ELAN3_PAGE_SHIFT);
9590 +
9591 +           HAT_PRINTF4 (2, "elan3mmu_reserve: inc valid for level %d ptbl %p to %d   (%d)\n", 
9592 +                    PTBL_LEVEL(ptbl->ptbl_flags), ptbl, ptbl->ptbl_valid, (span >> ELAN3_PAGE_SHIFT));
9593 +
9594 +       }
9595 +
9596 +       elan3mmu_unlock_ptbl (ptbl, lock, flags);
9597 +    }
9598 +}
9599 +
9600 +void
9601 +elan3mmu_release (ELAN3MMU *elan3mmu, E3_Addr addr, u_int npages, sdramaddr_t *ptes)
9602 +{
9603 +    ELAN3_DEV           *dev = elan3mmu->elan3mmu_dev;
9604 +    ELAN3_PTBL          *ptbl;
9605 +    sdramaddr_t                pte;
9606 +    ELAN3_PTE          tpte;
9607 +    spinlock_t        *lock;
9608 +    u_int              span;
9609 +    int                        len;
9610 +    int                        i;
9611 +    int                        level;
9612 +    unsigned long       flags;
9613 +    
9614 +    HAT_PRINTF3 (1, "elan3mmu_release: elan3mmu %p %08x to %08x\n", elan3mmu, 
9615 +                addr, addr + (npages << ELAN3_PAGE_SHIFT));
9616 +
9617 +    if (ptes == NULL)
9618 +       return;
9619 +
9620 +    tpte = elan3mmu_kernel_invalid_pte (elan3mmu);
9621 +
9622 +    for (len = (npages << ELAN3_PAGE_SHIFT); len != 0; addr += span, len -= span)
9623 +    {
9624 +       /* as we asked for level 3 we know its a pte */
9625 +       pte = elan3mmu_ptefind(elan3mmu, addr, &level, &ptbl, &lock, &flags);
9626 +       ASSERT (level == PTBL_LEVEL_3);
9627 +
9628 +       span = MIN(len, ELAN3_L3_PTSIZE - ((E3_Addr) addr & ELAN3_L3_PTOFFSET));
9629 +
9630 +
9631 +       for (i = 0 ; i < span; i += ELAN3_PAGE_SIZE, pte += ELAN3_PTE_SIZE)
9632 +           elan3_writepte (dev, pte, tpte);
9633 +       ptbl->ptbl_valid -= (span >> ELAN3_PAGE_SHIFT);
9634 +
9635 +       HAT_PRINTF3 (2, "elan3mmu_release: inc valid for level %d ptbl %p to %d\n", 
9636 +                    PTBL_LEVEL(ptbl->ptbl_flags), ptbl, ptbl->ptbl_valid);
9637 +
9638 +       elan3mmu_unlock_ptbl (ptbl, lock, flags);
9639 +    }
9640 +    ElanFlushTlb (elan3mmu->elan3mmu_dev);
9641 +}
9642 +
9643 +void
9644 +elan3mmu_pteload (ELAN3MMU *elan3mmu, int level, E3_Addr addr, physaddr_t paddr, int perm, int attr)
9645 +    
9646 +{
9647 +    ELAN3_DEV     *dev;
9648 +    ELAN3_PTBL    *ptbl;
9649 +    spinlock_t   *lock;
9650 +    unsigned long flags;
9651 +    ELAN3_PTE      newpte;
9652 +    ELAN3_PTE      oldpte;
9653 +    sdramaddr_t   pte;
9654 +
9655 +    ASSERT((level == PTBL_LEVEL_2) || (level == PTBL_LEVEL_3));
9656 +
9657 +    /* Generate the new pte which we're going to load */
9658 +    dev = elan3mmu->elan3mmu_dev;
9659 +
9660 +    newpte = elan3mmu_phys_to_pte (dev, paddr, perm);
9661 +    
9662 +    if (attr & PTE_LOAD_BIG_ENDIAN)
9663 +       newpte |= ELAN3_PTE_BIG_ENDIAN;
9664 +
9665 +    HAT_PRINTF4 (1, "elan3mmu_pteload: elan3mmu %p level %d addr %x pte %llx\n", elan3mmu, level, addr, (long long) newpte);
9666 +    HAT_PRINTF5 (1, "elan3mmu_pteload:%s%s%s perm=%d phys=%llx\n",
9667 +                (newpte & ELAN3_PTE_LOCAL)  ? " local" : "",
9668 +                (newpte & ELAN3_PTE_64_BIT)     ? " 64 bit" : "",
9669 +                (newpte & ELAN3_PTE_BIG_ENDIAN) ? " big-endian" : " little-endian",
9670 +                (u_int) (newpte & ELAN3_PTE_PERM_MASK) >> ELAN3_PTE_PERM_SHIFT,
9671 +                (unsigned long long) (newpte & ELAN3_PTE_PFN_MASK));
9672 +                 
9673 +    if (level == PTBL_LEVEL_3)
9674 +       pte = elan3mmu_ptealloc (elan3mmu, addr, level, &ptbl, &lock, attr, &flags);
9675 +    else
9676 +    {
9677 +       sdramaddr_t ptp = elan3mmu_ptealloc (elan3mmu, addr, level, &ptbl, &lock, attr, &flags);
9678 +
9679 +       pte = elan3mmu_ptp2pte (elan3mmu, ptp, level);
9680 +
9681 +       HAT_PRINTF3 (2, "elan3mmu_pteload: level %d ptp at %lx => pte at %lx\n", level, ptp, pte);
9682 +    }
9683 +
9684 +    if (pte == (sdramaddr_t) 0)
9685 +    {
9686 +       ASSERT (level == PTBL_LEVEL_3 && (attr & (PTE_NO_SLEEP | PTE_NO_STEAL)) == (PTE_NO_SLEEP | PTE_NO_STEAL));
9687 +       return;
9688 +    }
9689 +
9690 +    ASSERT (ptbl->ptbl_elan3mmu == elan3mmu);
9691 +    ASSERT (PTBL_LEVEL(ptbl->ptbl_flags) == level);
9692 +    ASSERT (PTBL_IS_LOCKED (ptbl->ptbl_flags));
9693 +    
9694 +    oldpte = elan3_readpte (dev, pte);
9695 +
9696 +    HAT_PRINTF3 (2, "elan3mmu_pteload: modify pte at %lx from %llx to %llx\n", pte, (long long) oldpte, (long long) newpte);
9697 +
9698 +    if (ELAN3_PTE_IS_VALID(ptbl, oldpte))
9699 +    {
9700 +       ELAN3MMU_STAT(ptereload);
9701 +
9702 +       ASSERT ((newpte & ~((E3_uint64)ELAN3_PTE_PERM_MASK | ELAN3_RM_MASK)) == (oldpte & ~((E3_uint64)ELAN3_PTE_PERM_MASK | ELAN3_RM_MASK)));
9703 +       
9704 +       if ((newpte & ~ELAN3_RM_MASK) != (oldpte & ~ELAN3_RM_MASK))
9705 +       {
9706 +           /* We're modifying a valid translation, it must be mapping the same page */
9707 +           /* so we use elan3_modifypte to not affect the referenced and modified bits */
9708 +           elan3_modifypte (dev, pte, newpte);
9709 +
9710 +
9711 +           ElanFlushTlb (elan3mmu->elan3mmu_dev);
9712 +       }
9713 +    }
9714 +    else
9715 +    {
9716 +       ELAN3MMU_STAT(pteload);
9717 +
9718 +       ptbl->ptbl_valid++;
9719 +
9720 +       HAT_PRINTF3 (2, "elan3mmu_pteload: inc valid for level %d ptbl %p to %d\n", 
9721 +                    PTBL_LEVEL(ptbl->ptbl_flags), ptbl, ptbl->ptbl_valid);
9722 +
9723 +       HAT_PRINTF2 (2, "elan3mmu_pteload: write pte %lx to %llx\n", pte, (long long) newpte);
9724 +
9725 +       elan3_writepte (dev, pte, newpte);
9726 +
9727 +       if (ptbl->ptbl_flags & PTBL_KERNEL)
9728 +           ElanFlushTlb (elan3mmu->elan3mmu_dev);
9729 +
9730 +    }
9731 +
9732 +    elan3mmu_unlock_ptbl (ptbl, lock, flags);
9733 +}
9734 +
9735 +void
9736 +elan3mmu_unload (ELAN3MMU *elan3mmu, E3_Addr addr, u_int len, int attr)
9737 +{
9738 +    ELAN3_PTBL          *ptbl;
9739 +    sdramaddr_t         ptp;
9740 +    spinlock_t        *lock;
9741 +    int                        level;
9742 +    u_int              span;
9743 +    unsigned long      flags;
9744 +
9745 +    HAT_PRINTF3(1, "elan3mmu_unload (elan3mmu %p addr %x -> %x)\n", elan3mmu, addr, addr+len-1);
9746 +
9747 +    for (; len != 0; addr += span, len -= span)
9748 +    {
9749 +       ptp  = elan3mmu_ptefind(elan3mmu, addr, &level, &ptbl, &lock, &flags);
9750 +
9751 +       span = MIN(len, ELAN3_L3_PTSIZE - ((E3_Addr) addr & ELAN3_L3_PTOFFSET));
9752 +
9753 +       if (ptp != (sdramaddr_t) 0)
9754 +       {
9755 +           HAT_PRINTF2 (2, "elan3mmu_unload: unload [%x,%x]\n", addr, addr + span);
9756 +           
9757 +           if ( level ==  PTBL_LEVEL_3 ) 
9758 +               elan3mmu_unload_loop (elan3mmu, ptbl, ptp - PTBL_TO_PTADDR(ptbl), span >> ELAN3_PAGE_SHIFT, attr);
9759 +           else
9760 +           {
9761 +               ELAN3_PTP    invalidptp = ELAN3_INVALID_PTP;
9762 +               ELAN3_DEV   *dev = elan3mmu->elan3mmu_dev;
9763 +               ELAN3_PTBL  *lXptbl;
9764 +               ELAN3_PTP    tptp;
9765 +               int         idx;
9766 +
9767 +               tptp = elan3_readptp (elan3mmu->elan3mmu_dev, ptp);
9768 +
9769 +               ASSERT (ELAN3_PTP_TYPE(tptp) == ELAN3_ET_PTE);
9770 +
9771 +               lXptbl = elan3mmu_ta_to_ptbl (elan3mmu, &tptp);
9772 +               idx    = (PTP_TO_PT_PADDR(tptp) - PTBL_TO_PTADDR(lXptbl))/ELAN3_PTE_SIZE;
9773 +
9774 +               if ( level == PTBL_LEVEL_1) 
9775 +                   span = MIN(len, ELAN3_L2_PTSIZE - ((E3_Addr) addr & ELAN3_L2_PTOFFSET));
9776 +               else
9777 +                   span = MIN(len, ELAN3_L3_PTSIZE - ((E3_Addr) addr & ELAN3_L3_PTOFFSET));
9778 +
9779 +               /* invalidate the ptp. */
9780 +               elan3_writeptp (dev, ptp, invalidptp);
9781 +               if (! (attr & PTE_UNLOAD_NOFLUSH))
9782 +                   ElanFlushTlb (dev);     
9783 +    
9784 +               elan3mmu_free_pte ( dev, elan3mmu, lXptbl, idx); 
9785 +
9786 +               ptbl->ptbl_valid--;
9787 +
9788 +               HAT_PRINTF3 (2, "elan3mmu_unload: dec valid for level %d ptbl %p to %d\n", 
9789 +                            PTBL_LEVEL(ptbl->ptbl_flags), ptbl, ptbl->ptbl_valid);     
9790 +
9791 +           }
9792 +           elan3mmu_unlock_ptbl (ptbl, lock, flags);
9793 +       }
9794 +    }
9795 +}
9796 +
9797 +static void
9798 +elan3mmu_unload_loop (ELAN3MMU *elan3mmu, ELAN3_PTBL *ptbl, int first_valid, int nptes, int flags)
9799 +{
9800 +    ELAN3_DEV   *dev = elan3mmu->elan3mmu_dev;
9801 +    sdramaddr_t pte;
9802 +    ELAN3_PTE    tpte;
9803 +    int         last_valid = first_valid + nptes;
9804 +    int                i;
9805 +    
9806 +    HAT_PRINTF3 (1, "elan3mmu_unloadloop: ptbl %p entries [%d->%d]\n", ptbl, first_valid, last_valid);
9807 +
9808 +    ASSERT (PTBL_IS_LOCKED (ptbl->ptbl_flags));
9809 +    ASSERT (PTBL_LEVEL(ptbl->ptbl_flags) == PTBL_LEVEL_3);
9810 +    
9811 +    pte = PTBL_TO_PTADDR(ptbl) + first_valid;
9812 +    
9813 +    for (i = first_valid; i < last_valid; i++, pte += ELAN3_PTE_SIZE)
9814 +    {
9815 +       if (ptbl->ptbl_valid == 0)
9816 +           break;
9817 +
9818 +       tpte = elan3_readpte (dev, pte);
9819 +       if (! ELAN3_PTE_IS_VALID(ptbl, tpte))
9820 +           continue;
9821 +       
9822 +       elan3mmu_pteunload (ptbl, pte, flags, NO_MLIST_LOCK);
9823 +    }
9824 +}
9825 +
9826 +void
9827 +elan3mmu_pteunload (ELAN3_PTBL *ptbl, sdramaddr_t pte, int flags, int got_mlist_lock)
9828 +{
9829 +    ELAN3_DEV   *dev = ptbl->ptbl_elan3mmu->elan3mmu_dev;
9830 +    ELAN3_PTE    tpte;
9831 +
9832 +    ASSERT (PTBL_LEVEL (ptbl->ptbl_flags) == PTBL_LEVEL_3);
9833 +    ASSERT (PTBL_IS_LOCKED (ptbl->ptbl_flags));
9834 +
9835 +    HAT_PRINTF2 (1, "elan3mmu_pteunload: ptbl %p pte %lx\n", ptbl, pte);
9836 +
9837 +    ELAN3MMU_STAT (pteunload);
9838 +
9839 +    elan3_invalidatepte (dev, pte);
9840 +
9841 +    if (! (flags & PTE_UNLOAD_NOFLUSH))
9842 +       ElanFlushTlb (dev);
9843 +    
9844 +    tpte = ELAN3_INVALID_PTE;
9845 +    elan3_writepte (dev, pte, tpte);
9846 +    
9847 +    if (ptbl->ptbl_flags & PTBL_KERNEL)
9848 +    {
9849 +       tpte = elan3mmu_kernel_invalid_pte(ptbl->ptbl_elan3mmu);
9850 +
9851 +       elan3_writepte (dev, pte, tpte);
9852 +    }
9853 +
9854 +    ptbl->ptbl_valid--;
9855 +
9856 +    HAT_PRINTF3 (2, "elan3mmu_pteunload: dec valid for level %d ptbl %p to %d\n", 
9857 +                PTBL_LEVEL(ptbl->ptbl_flags), ptbl, ptbl->ptbl_valid);
9858 +
9859 +}
9860 +
9861 +void
9862 +elan3mmu_ptesync (ELAN3_PTBL *ptbl, sdramaddr_t pte, int flags, int got_mlist_lock)
9863 +{
9864 +
9865 +}
9866 +
9867 +/*
9868 + * Create more page tables at a given level for this Elan.
9869 + */
9870 +static ELAN3_PTBL *
9871 +elan3mmu_create_ptbls (ELAN3_DEV *dev, int level, int attr, int keep)
9872 +{
9873 +    sdramaddr_t          pts;
9874 +    ELAN3_PTBL    *ptbl;
9875 +    ELAN3_PTBL    *first;
9876 +    ELAN3_PTBL    *last;
9877 +    ELAN3_PTBL_GR *ptg;
9878 +    register int  i;
9879 +    register int  inc;
9880 +    
9881 +    HAT_PRINTF1 (2, "elan3mmu_create_ptbls: create level %d ptbls\n", level);
9882 +
9883 +    pts = elan3_sdram_alloc (dev, PTBL_GROUP_SIZE);
9884 +    if (pts == (sdramaddr_t) 0)
9885 +    {
9886 +       HAT_PRINTF0 (2, "elan3mmu_create_ptbls: cannot map elan pages\n");
9887 +
9888 +       ELAN3MMU_STAT (create_ptbl_failed);
9889 +       return (NULL);
9890 +    }
9891 +    
9892 +    HAT_PRINTF1 (2, "elan3mmu_create_ptbls: pts at %lx\n", pts);
9893 +    
9894 +    ALLOC_PTBL_GR (ptg, !(attr & PTE_NO_SLEEP));               /* Allocate the group of page tables */
9895 +    if (ptg == NULL)                                           /* for this page */
9896 +    {
9897 +       HAT_PRINTF0 (2, "elan3mmu_create_ptbls: cannot allocate page table group\n");
9898 +
9899 +       elan3_sdram_free (dev, pts, PTBL_GROUP_SIZE);
9900 +
9901 +       ELAN3MMU_STAT (create_ptbl_failed);
9902 +       return (NULL);
9903 +    }
9904 +
9905 +    HAT_PRINTF1 (2, "elan3mmu_create_ptbls: ptg is %p\n", ptg);
9906 +    
9907 +    ElanSetPtblGr (dev, pts, ptg);
9908 +    
9909 +    HAT_PRINTF4 (2, "elan3mmu_create_ptbls: zeroing %d bytes at %lx, %d bytes at %p\n",
9910 +                PTBL_GROUP_SIZE, pts, (int) sizeof (ELAN3_PTBL_GR), ptg);
9911 +
9912 +#ifndef zero_all_ptbls
9913 +    elan3_sdram_zeroq_sdram (dev, pts, PTBL_GROUP_SIZE);               /* Ensure that all PTEs/PTPs are invalid */
9914 +#endif
9915 +    bzero ((caddr_t) ptg, sizeof (ELAN3_PTBL_GR));
9916 +    
9917 +    ptg->pg_addr  = pts;
9918 +    ptg->pg_level = level;
9919 +
9920 +    ptbl = ptg->pg_ptbls;                                      /* Initialise the index in all page tables */
9921 +    for (i = 0; i < PTBLS_PER_GROUP_MAX; i++)
9922 +    {
9923 +       ptbl->ptbl_index = (u_char) i;
9924 +       ptbl->ptbl_next  = (ELAN3_PTBL *) 0xdeaddead;
9925 +       ptbl++;
9926 +    }
9927 +    
9928 +    switch (level)                                             /* Determine the number of ptbls we can  */
9929 +    {                                                          /* allocate from this page, by jumping  */
9930 +    case PTBL_LEVEL_X: inc = PTBLS_PER_PTBL_LX; break;         /* multiples of the smallest. */
9931 +    case PTBL_LEVEL_1: inc = PTBLS_PER_PTBL_L1; break;
9932 +    case PTBL_LEVEL_2: inc = PTBLS_PER_PTBL_L2; break;
9933 +    case PTBL_LEVEL_3: inc = PTBLS_PER_PTBL_L3; break;
9934 +    default:           inc = PTBLS_PER_PTBL_L3; break;
9935 +    }
9936 +
9937 +    ptbl = ptg->pg_ptbls;                                      /* Chain them together */
9938 +    for (i = 0; i < PTBLS_PER_GROUP_MAX; i += inc, ptbl += inc)
9939 +       ptbl->ptbl_next = ptbl + inc;
9940 +
9941 +    first = ptg->pg_ptbls;                                     /* Determine list of */
9942 +    last  = first + PTBLS_PER_GROUP_MAX - inc;                 /* ptbls to add to free list */
9943 +    if (! keep)
9944 +       ptbl = NULL;
9945 +    else
9946 +    {
9947 +       ptbl  = first;
9948 +       first = first->ptbl_next;
9949 +    }
9950 +    
9951 +    spin_lock (&dev->Level[level].PtblLock);
9952 +    dev->Level[level].PtblTotal     += PTBLS_PER_GROUP_MAX/inc;                /* Increment the counts */
9953 +    dev->Level[level].PtblFreeCount += PTBLS_PER_GROUP_MAX/inc;
9954 +
9955 +    ELAN3MMU_SET_STAT (num_ptbl_level[level], dev->Level[level].PtblTotal);
9956 +
9957 +    if (keep)
9958 +       dev->Level[level].PtblFreeCount--;
9959 +    
9960 +    last->ptbl_next = dev->Level[level].PtblFreeList;                  /* And add to free list */
9961 +    dev->Level[level].PtblFreeList = first;
9962 +    spin_unlock (&dev->Level[level].PtblLock);
9963 +    
9964 +    spin_lock (&dev->PtblGroupLock);
9965 +    ptg->pg_next = dev->Level[level].PtblGroupList;
9966 +    dev->Level[level].PtblGroupList = ptg;
9967 +    spin_unlock (&dev->PtblGroupLock);
9968 +
9969 +    HAT_PRINTF1 (2, "elan3mmu_create_ptbls: returning ptbl %p\n", ptbl);
9970 +    
9971 +    return (ptbl);
9972 +}
9973 +
9974 +static ELAN3_PTBL *
9975 +elan3mmu_ta_to_ptbl (ELAN3MMU *elan3mmu, ELAN3_PTP *ptp)
9976 +{
9977 +    E3_Addr      ptpa  = PTP_TO_PT_PADDR(*ptp);
9978 +    ELAN3_PTBL_GR *pg    = ElanGetPtblGr (elan3mmu->elan3mmu_dev, (sdramaddr_t)ptpa & ~(PTBL_GROUP_SIZE-1));
9979 +    
9980 +    return (pg->pg_ptbls + ((ptpa - pg->pg_addr) >> ELAN3_PT_SHIFT));
9981 +}
9982 +
9983 +static ELAN3_PTBL *
9984 +elan3mmu_alloc_lXptbl (ELAN3_DEV *dev, int attr,  ELAN3MMU *elan3mmu)
9985 +{
9986 +    ELAN3_PTBL *ptbl = NULL;
9987 +
9988 +    spin_lock (&dev->Level[PTBL_LEVEL_X].PtblLock);
9989 +    if (dev->Level[PTBL_LEVEL_X].PtblFreeList)
9990 +    {
9991 +       ptbl = dev->Level[PTBL_LEVEL_X].PtblFreeList;
9992 +
9993 +       HAT_PRINTF1 (2, "elan3mmu_alloc_lXptbl: found ptbl %p on free list\n", ptbl);
9994 +
9995 +       dev->Level[PTBL_LEVEL_X].PtblFreeList = ptbl->ptbl_next;
9996 +       dev->Level[PTBL_LEVEL_X].PtblFreeCount--;
9997 +    }
9998 +    spin_unlock (&dev->Level[PTBL_LEVEL_X].PtblLock);
9999 +    
10000 +    if (ptbl == NULL) 
10001 +    {
10002 +       ptbl = elan3mmu_create_ptbls (dev, PTBL_LEVEL_X, attr, 1);
10003 +
10004 +       HAT_PRINTF1 (2, "elan3mmu_alloc_lXptbl: created level X ptbl %p\n", ptbl);
10005 +    }
10006 +
10007 +    if (ptbl == NULL)
10008 +    {
10009 +       if ((attr & PTE_NO_STEAL))
10010 +       {
10011 +           HAT_PRINTF0 (2, "elan3mmu_alloc_lXptbl: not allowed to steal ptbl for use at level 2\n");
10012 +           return NULL;
10013 +       }
10014 +
10015 +       ELAN3MMU_STAT(lX_alloc_l3);
10016 +
10017 +       ptbl = elan3mmu_steal_l3ptbl (dev, attr);
10018 +       
10019 +       HAT_PRINTF1 (2, "elan3mmu_alloc_lXptbl: stolen level3 ptbl %p used as level 2\n", ptbl);
10020 +    }
10021 +
10022 +    ptbl->ptbl_elan3mmu = elan3mmu;
10023 +    ptbl->ptbl_base     = 0;
10024 +    ptbl->ptbl_parent   = 0;
10025 +    ptbl->ptbl_flags    = PTBL_LEVEL_X | PTBL_ALLOCED;
10026 +    
10027 +    HAT_PRINTF2 (2, "elan3mmu_alloc_lXptbl: ptbl %p dev %p\n", ptbl, dev);
10028 +
10029 +#ifdef zero_all_ptbls
10030 +    elan3_sdram_zero_sdarm (dev, PTBL_TO_PTADDR(ptbl), ELAN3_LX_ENTRIES*ELAN3_PTE_SIZE);
10031 +#endif
10032 +
10033 +    return (ptbl);
10034 +}
10035 +
10036 +static ELAN3_PTBL *
10037 +elan3mmu_alloc_pte (ELAN3_DEV *dev, ELAN3MMU *elan3mmu, int *idx)
10038 +{
10039 +    ELAN3_PTBL   * ptbl_ptr;
10040 +    int           index;
10041 +
10042 +    /* lock whilst looking for space */
10043 +    spin_lock (&elan3mmu->elan3mmu_lXptbl_lock);
10044 +    
10045 +    /* walk the lXptbl list */
10046 +    ptbl_ptr = elan3mmu->elan3mmu_lXptbl;
10047 +    while ( ptbl_ptr != NULL ) 
10048 +    {
10049 +       /* does this ptlb have any free ones */
10050 +       if (  (index = ptbl_ptr->ptbl_valid) < ELAN3_LX_ENTRIES) 
10051 +       {
10052 +           /*  better to search  from valid count as its likly to be free */
10053 +           index = ptbl_ptr->ptbl_valid; 
10054 +           do {
10055 +               if ((ptbl_ptr->ptbl_base & (1 << index)) == 0)
10056 +                   goto found;
10057 +
10058 +               /* move index on and wrap back to start if needed */
10059 +               if ((++index) == ELAN3_LX_ENTRIES) 
10060 +                   index = 0;
10061 +           } while (index != ptbl_ptr->ptbl_valid);
10062 +
10063 +           panic ("elan3mmu_alloc_pte: has ptbl valid < 32 when but no free pte's");
10064 +       }
10065 +       ptbl_ptr = ptbl_ptr->ptbl_parent;
10066 +    }
10067 +       
10068 +    /* unlock so we can create space */
10069 +    spin_unlock (&elan3mmu->elan3mmu_lXptbl_lock); 
10070 +
10071 +    /* if create some more */
10072 +    ptbl_ptr = elan3mmu_alloc_lXptbl(dev, 0, elan3mmu);
10073 +
10074 +    /* get the lock again */
10075 +    spin_lock (&elan3mmu->elan3mmu_lXptbl_lock);
10076 +       
10077 +    /* add to front of list as its obviously got free ones on it */
10078 +    ptbl_ptr->ptbl_parent     = elan3mmu->elan3mmu_lXptbl;
10079 +    elan3mmu->elan3mmu_lXptbl = ptbl_ptr;
10080 +
10081 +    /* grap the first one */
10082 +    index = 0;
10083 +    
10084 + found:
10085 +    ptbl_ptr->ptbl_base |= (1 << index);
10086 +    ptbl_ptr->ptbl_valid++;
10087 +
10088 +    HAT_PRINTF3 (2, "elan3mmu_alloc_pte: inc valid for level %d ptbl %p to %d\n", 
10089 +                PTBL_LEVEL(ptbl_ptr->ptbl_flags), ptbl_ptr, ptbl_ptr->ptbl_valid);
10090 +
10091 +    /* release the loc and return it */
10092 +    spin_unlock (&elan3mmu->elan3mmu_lXptbl_lock); 
10093 +
10094 +    *idx = index;
10095 +    return (ptbl_ptr);
10096 +}
10097 +
10098 +static ELAN3_PTBL *
10099 +elan3mmu_alloc_l1ptbl (ELAN3_DEV *dev, int attr, ELAN3MMU *elan3mmu)
10100 +{
10101 +    ELAN3_PTBL *ptbl = NULL;
10102 +    ELAN3_PTBL *p;
10103 +    int i,j;
10104 +    
10105 +    spin_lock (&dev->Level[PTBL_LEVEL_1].PtblLock);
10106 +    if (dev->Level[PTBL_LEVEL_1].PtblFreeList)
10107 +    {
10108 +       ptbl = dev->Level[PTBL_LEVEL_1].PtblFreeList;
10109 +       dev->Level[PTBL_LEVEL_1].PtblFreeList = ptbl->ptbl_next;
10110 +       dev->Level[PTBL_LEVEL_1].PtblFreeCount--;
10111 +    }
10112 +    spin_unlock (&dev->Level[PTBL_LEVEL_1].PtblLock);
10113 +    
10114 +    if (ptbl == NULL)
10115 +       ptbl = elan3mmu_create_ptbls (dev, PTBL_LEVEL_1, attr, 1);
10116 +    
10117 +    if (ptbl == NULL)
10118 +       panic ("elan3mmu_alloc_l1ptbl: cannot alloc ptbl");
10119 +    
10120 +    for (p = ptbl, j = i = 0; i < PTBLS_PER_PTBL_L1; i++, p++)
10121 +    {
10122 +       p->ptbl_elan3mmu = elan3mmu;
10123 +       p->ptbl_base     = VA2BASE (j);
10124 +       p->ptbl_flags    = PTBL_LEVEL_1 | PTBL_GROUPED;
10125 +       p->ptbl_parent   = NULL;
10126 +       
10127 +       j += L1_VA_PER_PTBL;
10128 +    }
10129 +    
10130 +    /* Now mark the real page table as allocated */
10131 +    /* level 1 ptbls are returned unlocked */
10132 +    ptbl->ptbl_flags = PTBL_LEVEL_1 | PTBL_ALLOCED;
10133 +    
10134 +    HAT_PRINTF2 (2, "elan3mmu_alloc_l1ptbl: ptbl %p dev %p\n", ptbl, dev);
10135 +
10136 +#ifdef zero_all_ptbls
10137 +    elan3_sdram_zeroq_sdram (dev, PTBL_TO_PTADDR(ptbl), ELAN3_L1_ENTRIES*ELAN3_PTP_SIZE);
10138 +#endif
10139 +
10140 +    return (ptbl);
10141 +}
10142 +
10143 +static ELAN3_PTBL *
10144 +elan3mmu_alloc_l2ptbl (ELAN3_DEV *dev, int attr, ELAN3_PTBL *parent, ELAN3MMU *elan3mmu, E3_Addr base, spinlock_t **plock, unsigned long *flags)
10145 +{
10146 +    ELAN3_PTBL *ptbl = NULL;
10147 +    ELAN3_PTBL *p;
10148 +    int        i;
10149 +    int        j;
10150 +    unsigned long ptbl_flags;
10151 +
10152 +    spin_lock_irqsave (&dev->Level[PTBL_LEVEL_2].PtblLock, ptbl_flags);
10153 +    if (dev->Level[PTBL_LEVEL_2].PtblFreeList)
10154 +    {
10155 +       ptbl = dev->Level[PTBL_LEVEL_2].PtblFreeList;
10156 +
10157 +       HAT_PRINTF1 (2, "elan3mmu_alloc_l2ptbl: found ptbl %p on free list\n", ptbl);
10158 +
10159 +       dev->Level[PTBL_LEVEL_2].PtblFreeList = ptbl->ptbl_next;
10160 +       dev->Level[PTBL_LEVEL_2].PtblFreeCount--;
10161 +    }
10162 +    spin_unlock_irqrestore (&dev->Level[PTBL_LEVEL_2].PtblLock, ptbl_flags);
10163 +    
10164 +    if (ptbl == NULL) 
10165 +    {
10166 +       ptbl = elan3mmu_create_ptbls (dev, PTBL_LEVEL_2, attr, 1);
10167 +
10168 +       HAT_PRINTF1 (2, "elan3mmu_alloc_l2ptbl: created level 2 ptbl %p\n", ptbl);
10169 +    }
10170 +
10171 +    if (ptbl == NULL)
10172 +    {
10173 +       if ((attr & PTE_NO_STEAL))
10174 +       {
10175 +           HAT_PRINTF0 (2, "elan3mmu_alloc_l2ptbl: not allowted to steal ptbl for use at level 2\n");
10176 +           return (NULL);
10177 +       }
10178 +
10179 +       ELAN3MMU_STAT(l2_alloc_l3);
10180 +
10181 +       ptbl = elan3mmu_steal_l3ptbl (dev, attr);
10182 +       
10183 +       HAT_PRINTF1 (2, "elan3mmu_alloc_l2ptbl: stolen level3 ptbl %p used as level 2\n", ptbl);
10184 +    }
10185 +    
10186 +    *plock = elan3mmu_ptbl_to_lock (PTBL_LEVEL_2, ptbl);
10187 +    spin_lock_irqsave (*plock, *flags);
10188 +    
10189 +    for (p = ptbl, j = i = 0; i < PTBLS_PER_PTBL_L2; i++, p++)
10190 +    {
10191 +       p->ptbl_elan3mmu = elan3mmu;
10192 +       p->ptbl_base     = VA2BASE (base + j);
10193 +       p->ptbl_flags    = PTBL_LEVEL_2 | PTBL_GROUPED;
10194 +       p->ptbl_parent   = parent;
10195 +       
10196 +       j += L2_VA_PER_PTBL;
10197 +    }
10198 +    
10199 +    ptbl->ptbl_flags  = PTBL_LEVEL_2 | PTBL_ALLOCED | PTBL_LOCKED;
10200 +    
10201 +    HAT_PRINTF3 (2, "elan3mmu_alloc_l2ptbl: ptbl %p dev %p base %x\n", ptbl, dev, base);
10202 +
10203 +#ifdef zero_all_ptbls
10204 +    elan3_sdram_zero_sdarm (dev, PTBL_TO_PTADDR(ptbl), ELAN3_L2_ENTRIES*ELAN3_PTP_SIZE);
10205 +#endif
10206 +
10207 +    return (ptbl);
10208 +}
10209 +
10210 +static ELAN3_PTBL *
10211 +elan3mmu_alloc_l3ptbl (ELAN3_DEV *dev, int attr, ELAN3_PTBL *parent, ELAN3MMU *elan3mmu, E3_Addr base, spinlock_t **plock, unsigned long *flags)
10212 +{
10213 +    ELAN3_PTBL *ptbl = NULL;
10214 +    ELAN3_PTBL *p;
10215 +    int               i;
10216 +    int               j;
10217 +    unsigned long ptbl_flags;
10218 +
10219 +    spin_lock_irqsave (&dev->Level[PTBL_LEVEL_3].PtblLock, ptbl_flags);
10220 +    if (dev->Level[PTBL_LEVEL_3].PtblFreeList)
10221 +    {
10222 +       HAT_PRINTF1 (2, "elan3mmu_alloc_l3ptbl: found ptbl %p on free list\n", ptbl);
10223 +
10224 +       ptbl = dev->Level[PTBL_LEVEL_3].PtblFreeList;
10225 +       dev->Level[PTBL_LEVEL_3].PtblFreeList = ptbl->ptbl_next;
10226 +       dev->Level[PTBL_LEVEL_3].PtblFreeCount--;
10227 +    }
10228 +    spin_unlock_irqrestore (&dev->Level[PTBL_LEVEL_3].PtblLock, ptbl_flags);
10229 +    
10230 +    if (ptbl == NULL)
10231 +    {
10232 +       ptbl = elan3mmu_create_ptbls (dev, PTBL_LEVEL_3, attr, 1);
10233 +
10234 +       HAT_PRINTF1 (2, "elan3mmu_alloc_l3ptbl: created level 3 ptbl %p\n", ptbl);
10235 +    }
10236 +
10237 +    if (ptbl == NULL)
10238 +    {
10239 +       if ((attr & PTE_NO_STEAL))
10240 +       {
10241 +           HAT_PRINTF0 (2, "elan3mmu_alloc_l3ptbl: not allowed to steal ptbl for use at level 3\n");
10242 +           return (NULL);
10243 +       }
10244 +
10245 +       ptbl = elan3mmu_steal_l3ptbl (dev, attr);
10246 +
10247 +       HAT_PRINTF1 (2, "elan3mmu_alloc_l3ptbl: stolen level3 ptbl %p\n", ptbl);
10248 +    }
10249 +    
10250 +    *plock = elan3mmu_ptbl_to_lock (PTBL_LEVEL_3, ptbl);
10251 +    spin_lock_irqsave (*plock,*flags);
10252 +    
10253 +    for (p = ptbl, j = i = 0; i < PTBLS_PER_PTBL_L3; i++, p++)
10254 +    {
10255 +       p->ptbl_elan3mmu = elan3mmu;
10256 +       p->ptbl_base     = VA2BASE (base + j);
10257 +       p->ptbl_flags    = PTBL_LEVEL_3 | PTBL_GROUPED;
10258 +       p->ptbl_parent   = parent;
10259 +       
10260 +       j += L3_VA_PER_PTBL;
10261 +    }
10262 +    
10263 +    ptbl->ptbl_flags = PTBL_LEVEL_3 | PTBL_ALLOCED | PTBL_LOCKED;
10264 +    
10265 +    HAT_PRINTF3 (2, "elan3mmu_alloc_l3ptbl: ptbl %p dev %p base %x\n", ptbl, dev, base);
10266 +
10267 +#ifdef zero_all_ptbls
10268 +    elan3_sdram_zeroq_sdram (dev, PTBL_TO_PTADDR(ptbl), ELAN3_L3_ENTRIES*ELAN3_PTE_SIZE);
10269 +#endif
10270 +
10271 +    return (ptbl);
10272 +}
10273 +
10274 +void 
10275 +elan3mmu_free_pte  (ELAN3_DEV *dev,  ELAN3MMU *elan3mmu,  ELAN3_PTBL *ptbl_ptr, int idx)
10276 +{  
10277 +    sdramaddr_t pte  = PTBL_TO_PTADDR (ptbl_ptr) | (idx * sizeof (ELAN3_PTE));
10278 +    ELAN3_PTE    tpte = ELAN3_INVALID_PTE;
10279 +    ELAN3_PTBL *prev;
10280 +
10281 +    /* ensure that the pte is invalid when free */
10282 +    elan3_writepte (dev, pte, tpte);
10283 +
10284 +    /* lock whilst removing */
10285 +    spin_lock (&elan3mmu->elan3mmu_lXptbl_lock);
10286 +
10287 +    HAT_PRINTF4 (2, "elan3mmu_free_pte idx %d   ptbl_ptr %p ptbl_base  %x  ptbl_ptr->ptbl_valid %d \n", 
10288 +                idx, ptbl_ptr, ptbl_ptr->ptbl_base, ptbl_ptr->ptbl_valid);
10289 +    /* make sure it was set */
10290 +    ASSERT ( ptbl_ptr->ptbl_base & (1 << idx) ); 
10291 +    ASSERT ( ptbl_ptr->ptbl_valid > 0  );
10292 +
10293 +    ptbl_ptr->ptbl_base &= ~(1 << idx);
10294 +    ptbl_ptr->ptbl_valid--;
10295 +
10296 +    HAT_PRINTF3 (2, "elan3mmu_free_pte: dec valid for level %d ptbl %p to %d\n", 
10297 +                PTBL_LEVEL(ptbl_ptr->ptbl_flags), ptbl_ptr, ptbl_ptr->ptbl_valid); 
10298
10299 +    /* was that the last one on this page */
10300 +    if ( ! ptbl_ptr->ptbl_valid ) 
10301 +    {
10302 +       /* so no bits should be set then */
10303 +       ASSERT ( ptbl_ptr->ptbl_base == 0 );
10304 +
10305 +       /* is this the first page ?? */
10306 +       if ( elan3mmu->elan3mmu_lXptbl == ptbl_ptr ) 
10307 +       {
10308 +           /* make the list start at the second element */
10309 +            elan3mmu->elan3mmu_lXptbl = ptbl_ptr->ptbl_parent;
10310 +
10311 +            /* put ptbl back on free list */
10312 +            elan3mmu_free_lXptbl(dev, ptbl_ptr);
10313 +
10314 +            /* unlock and return */
10315 +            spin_unlock (&elan3mmu->elan3mmu_lXptbl_lock);
10316 +            return ;
10317 +       }
10318 +
10319 +       /* scan thro list looking for this page */
10320 +       prev = elan3mmu->elan3mmu_lXptbl;
10321 +       while ( prev->ptbl_parent != NULL ) 
10322 +       {
10323 +           if ( prev->ptbl_parent == ptbl_ptr ) /* its the next one */
10324 +           {
10325 +               /* remove element from chain */
10326 +               prev->ptbl_parent =  ptbl_ptr->ptbl_parent;
10327 +
10328 +               /* put ptbl back on free list */
10329 +               elan3mmu_free_lXptbl(dev, ptbl_ptr);
10330 +
10331 +               /* unlock and return */
10332 +               spin_unlock (&elan3mmu->elan3mmu_lXptbl_lock);
10333 +               return ;
10334 +           }           
10335 +           prev = prev->ptbl_parent;
10336 +       }
10337 +       
10338 +               panic ("elan3mmu_free_pte: failed to find ptbl in chain");
10339 +       /* NOTREACHED */
10340 +    }
10341 +    
10342 +    spin_unlock (&elan3mmu->elan3mmu_lXptbl_lock);
10343 +}
10344 +
10345 +void
10346 +elan3mmu_free_lXptbl (ELAN3_DEV *dev, ELAN3_PTBL *ptbl)
10347 +{
10348 +    ELAN3_PTBL_GR *ptg;
10349 +
10350 +    HAT_PRINTF2 (2, "elan3mmu_free_lXptbl: dev %p ptbl %p\n", dev, ptbl);
10351 +
10352 +    ASSERT (ptbl->ptbl_flags & PTBL_ALLOCED);
10353 +    ASSERT ((ptbl->ptbl_flags & PTBL_KEEP) == 0);
10354 +    ASSERT (PTBL_LEVEL(ptbl->ptbl_flags) == PTBL_LEVEL_X);
10355 +    ASSERT (ptbl->ptbl_valid == 0);
10356 +   
10357 +    ptbl->ptbl_flags = 0;
10358 +
10359 +    ptg = PTBL_TO_GR(ptbl);
10360 +
10361 +    if (ptg->pg_level == PTBL_LEVEL_3)
10362 +    {
10363 +       ELAN3MMU_STAT(lX_freed_l3);
10364 +
10365 +       HAT_PRINTF1 (2, "elan3mmu_free_lXptbl: freeing stolen level 3 ptbl %p\n", ptbl);
10366 +
10367 +       /* this was really a level 3 ptbl which we had to steal */
10368 +       spin_lock (&dev->Level[PTBL_LEVEL_3].PtblLock);
10369 +       ptbl->ptbl_next = dev->Level[PTBL_LEVEL_3].PtblFreeList;
10370 +       dev->Level[PTBL_LEVEL_3].PtblFreeList = ptbl;
10371 +       dev->Level[PTBL_LEVEL_3].PtblFreeCount++;
10372 +       spin_unlock (&dev->Level[PTBL_LEVEL_3].PtblLock);
10373 +    }
10374 +    else
10375 +    {
10376 +       spin_lock (&dev->Level[PTBL_LEVEL_X].PtblLock);
10377 +       ptbl->ptbl_next = dev->Level[PTBL_LEVEL_X].PtblFreeList;
10378 +       dev->Level[PTBL_LEVEL_X].PtblFreeList = ptbl;
10379 +       dev->Level[PTBL_LEVEL_X].PtblFreeCount++;
10380 +       spin_unlock (&dev->Level[PTBL_LEVEL_X].PtblLock);
10381 +    }
10382 +}
10383 +
10384 +void
10385 +elan3mmu_free_l1ptbl (ELAN3_DEV *dev, ELAN3_PTBL *ptbl, spinlock_t *lock, unsigned long flags)
10386 +{
10387 +    HAT_PRINTF3 (2, "elan3mmu_free_l1ptbl: dev %p ptbl %p ptbl->ptbl_valid %x \n", dev, ptbl, ptbl->ptbl_valid);
10388 +
10389 +    ASSERT (ptbl->ptbl_flags & PTBL_ALLOCED);
10390 +    ASSERT ((ptbl->ptbl_flags & PTBL_KEEP) == 0);
10391 +    ASSERT (PTBL_LEVEL(ptbl->ptbl_flags) == PTBL_LEVEL_1);
10392 +    ASSERT (ptbl->ptbl_valid == 0);
10393 +    
10394 +    HAT_PRINTF2 (2, "elan3mmu_free_l1ptbl: dev %p ptbl %p\n", dev, ptbl);
10395 +
10396 +    ptbl->ptbl_flags = 0;
10397 +    spin_unlock (lock);
10398 +    
10399 +    spin_lock (&dev->Level[PTBL_LEVEL_1].PtblLock);
10400 +    ptbl->ptbl_next = dev->Level[PTBL_LEVEL_1].PtblFreeList;
10401 +    dev->Level[PTBL_LEVEL_1].PtblFreeList = ptbl;
10402 +    dev->Level[PTBL_LEVEL_1].PtblFreeCount++;
10403 +    spin_unlock (&dev->Level[PTBL_LEVEL_1].PtblLock);
10404 +
10405 +    local_irq_restore (flags);
10406 +}
10407 +
10408 +void
10409 +elan3mmu_free_l2ptbl (ELAN3_DEV *dev, ELAN3_PTBL *ptbl, spinlock_t *lock, unsigned long flags)
10410 +{
10411 +    ELAN3_PTBL_GR *ptg;
10412 +
10413 +    HAT_PRINTF2 (2, "elan3mmu_free_l2ptbl: dev %p ptbl %p\n", dev, ptbl);
10414 +
10415 +    ASSERT (PTBL_IS_LOCKED(ptbl->ptbl_flags));
10416 +    ASSERT (ptbl->ptbl_flags & PTBL_ALLOCED);
10417 +    ASSERT ((ptbl->ptbl_flags & PTBL_KEEP) == 0);
10418 +    ASSERT (PTBL_LEVEL(ptbl->ptbl_flags) == PTBL_LEVEL_2);
10419 +    ASSERT (ptbl->ptbl_valid == 0);
10420 +   
10421 +    ptbl->ptbl_flags = 0;
10422 +    spin_unlock (lock);
10423 +
10424 +    ptg = PTBL_TO_GR(ptbl);
10425 +
10426 +    if (ptg->pg_level == PTBL_LEVEL_3)
10427 +    {
10428 +       ELAN3MMU_STAT(l2_freed_l3);
10429 +
10430 +       HAT_PRINTF1 (2, "elan3mmu_free_l2ptbl: freeing stolen level 3 ptbl %p\n", ptbl);
10431 +
10432 +       /* this was really a level 3 ptbl which we had to steal */
10433 +       spin_lock (&dev->Level[PTBL_LEVEL_3].PtblLock);
10434 +       ptbl->ptbl_next = dev->Level[PTBL_LEVEL_3].PtblFreeList;
10435 +       dev->Level[PTBL_LEVEL_3].PtblFreeList = ptbl;
10436 +       dev->Level[PTBL_LEVEL_3].PtblFreeCount++;
10437 +       spin_unlock (&dev->Level[PTBL_LEVEL_3].PtblLock);
10438 +    }
10439 +    else
10440 +    {
10441 +       spin_lock (&dev->Level[PTBL_LEVEL_2].PtblLock);
10442 +       ptbl->ptbl_next = dev->Level[PTBL_LEVEL_2].PtblFreeList;
10443 +       dev->Level[PTBL_LEVEL_2].PtblFreeList = ptbl;
10444 +       dev->Level[PTBL_LEVEL_2].PtblFreeCount++;
10445 +       spin_unlock (&dev->Level[PTBL_LEVEL_2].PtblLock);
10446 +    }  
10447 +    local_irq_restore (flags);
10448 +}
10449 +
10450 +void
10451 +elan3mmu_free_l3ptbl (ELAN3_DEV *dev, ELAN3_PTBL *ptbl, spinlock_t *lock, unsigned long flags)
10452 +{
10453 +    ASSERT (PTBL_IS_LOCKED(ptbl->ptbl_flags));
10454 +    ASSERT (ptbl->ptbl_flags & PTBL_ALLOCED);
10455 +    ASSERT ((ptbl->ptbl_flags & PTBL_KEEP) == 0);
10456 +    ASSERT (PTBL_LEVEL(ptbl->ptbl_flags) == PTBL_LEVEL_3);
10457 +    ASSERT (ptbl->ptbl_valid == 0);
10458 +    
10459 +    HAT_PRINTF2 (2, "elan3mmu_free_l3ptbl: dev %p ptbl %p\n", dev, ptbl);
10460 +
10461 +    if (ptbl->ptbl_flags & PTBL_KERNEL)                                /* if the ptbl has been used by the kernel */
10462 +    {                                                          /* then zero all the pte's, since they will */
10463 +       elan3_sdram_zeroq_sdram (dev, PTBL_TO_PTADDR(ptbl), ELAN3_L3_ENTRIES*ELAN3_PTE_SIZE);
10464 +    }
10465 +
10466 +    ptbl->ptbl_flags = 0;
10467 +    spin_unlock (lock);
10468 +    
10469 +    spin_lock (&dev->Level[PTBL_LEVEL_3].PtblLock);
10470 +    ptbl->ptbl_next = dev->Level[PTBL_LEVEL_3].PtblFreeList;
10471 +    dev->Level[PTBL_LEVEL_3].PtblFreeList = ptbl;
10472 +    dev->Level[PTBL_LEVEL_3].PtblFreeCount++;
10473 +    spin_unlock (&dev->Level[PTBL_LEVEL_3].PtblLock);
10474 +
10475 +    local_irq_restore (flags);
10476 +}
10477 +
10478 +void
10479 +elan3mmu_kernel_l3ptbl (ELAN3_PTBL *ptbl)
10480 +{
10481 +    ELAN3_DEV   *dev  = ptbl->ptbl_elan3mmu->elan3mmu_dev;
10482 +    sdramaddr_t pte  = PTBL_TO_PTADDR(ptbl);
10483 +    ELAN3_PTE    tpte = elan3mmu_kernel_invalid_pte(ptbl->ptbl_elan3mmu);
10484 +    int                i;
10485 +
10486 +    ptbl->ptbl_flags |= PTBL_KERNEL;
10487 +    for (i = 0; i < ELAN3_L3_ENTRIES; i++, pte += ELAN3_PTE_SIZE)
10488 +    {
10489 +       elan3_writepte (dev, pte, tpte);
10490 +    }
10491 +}
10492 +       
10493 +#define PTBL_CAN_STEAL(flag)   (((flag) & (PTBL_KERNEL|PTBL_KEEP)) == 0 && (((flag) & PTBL_ALLOCED) && PTBL_LEVEL(flag) == PTBL_LEVEL_3))
10494 +#define PTBL_MAY_STEAL(flag)   (((flag) & (PTBL_KERNEL|PTBL_KEEP|PTBL_LOCKED)) == 0 && (((flag) & PTBL_ALLOCED) && PTBL_LEVEL(flag) == PTBL_LEVEL_3))
10495 +
10496 +static int
10497 +elan3mmu_steal_this_ptbl (ELAN3_DEV *dev, ELAN3_PTBL *l3ptbl)
10498 +{
10499 +    ELAN3_PTBL  *l2ptbl     = l3ptbl->ptbl_parent;
10500 +    E3_Addr     l2addr     = BASE2VA(l2ptbl);
10501 +    E3_Addr     l3addr     = BASE2VA(l3ptbl);
10502 +    ELAN3_PTP    invalidptp = ELAN3_INVALID_PTP;
10503 +    sdramaddr_t l2ptp;
10504 +    spinlock_t *l2lock;
10505 +    unsigned long l2flags;
10506 +
10507 +    HAT_PRINTF5 (1, "elan3mmu_steal_this_ptbl: l3ptbl %p (%x) l2ptbl %p (%x) l2addr %x\n",
10508 +                l3ptbl, l3ptbl->ptbl_flags, l2ptbl, l2ptbl->ptbl_flags, l2addr);
10509 +
10510 +    if (PTBL_CAN_STEAL (l3ptbl->ptbl_flags) &&
10511 +       elan3mmu_lock_ptbl (l2ptbl, LK_PTBL_NOWAIT, l3ptbl->ptbl_elan3mmu, l2addr, PTBL_LEVEL_2, &l2lock, &l2flags) == LK_PTBL_OK)
10512 +    {
10513 +       ELAN3MMU_STAT(stolen_ptbls);
10514 +
10515 +       /* Locked both L3 and L2 page tables. */
10516 +       l2ptp = PTBL_TO_PTADDR (l2ptbl) + ELAN3_L2_INDEX(l3addr)*ELAN3_PTP_SIZE;
10517 +       
10518 +       /* detach the level 3 page table */
10519 +       elan3_writeptp (dev, l2ptp, invalidptp);
10520 +       ElanFlushTlb (dev);
10521 +
10522 +       l2ptbl->ptbl_valid--;
10523 +
10524 +       HAT_PRINTF3 (2, "elan3mmu_steal_this_ptbl: dec valid for level %d ptbl %p to %d\n", PTBL_LEVEL(l2ptbl->ptbl_flags), l2ptbl, l2ptbl->ptbl_valid); 
10525 +
10526 +       elan3mmu_unlock_ptbl (l2ptbl, l2lock, l2flags);
10527 +
10528 +       elan3mmu_unload_loop (l3ptbl->ptbl_elan3mmu, l3ptbl, 0, ELAN3_L3_ENTRIES, PTE_UNLOAD_NOFLUSH);
10529 +
10530 +       ASSERT (l3ptbl->ptbl_valid == 0);
10531 +
10532 +       l3ptbl->ptbl_flags = 0;
10533 +       return (1);
10534 +    }
10535 +    return (0);
10536 +}
10537 +
10538 +static ELAN3_PTBL *
10539 +elan3mmu_steal_l3ptbl (ELAN3_DEV *dev, int attr)
10540 +{
10541 +    ELAN3_PTBL_GR      *ptg;
10542 +    ELAN3_PTBL         *ptbl;
10543 +    spinlock_t         *lock;
10544 +    unsigned long        group_flags;
10545 +    unsigned long        ptbl_flags;
10546 +    register int        i;
10547 +
10548 +    HAT_PRINTF1 (2, "elan3mmu_steal_l3ptbl: attr %x\n", attr);
10549 +
10550 +    spin_lock_irqsave (&dev->PtblGroupLock, group_flags);
10551 +
10552 +    ptg = dev->Level3PtblGroupHand;
10553 +
10554 +    if (ptg == NULL)
10555 +       ptg = dev->Level[PTBL_LEVEL_3].PtblGroupList;
10556 +    
10557 +    for (;;)
10558 +    {
10559 +       while (ptg)
10560 +       {
10561 +           for (i = 0, ptbl = ptg->pg_ptbls; i < PTBLS_PER_GROUP_MAX; i++, ptbl++)
10562 +           {
10563 +               if (PTBL_MAY_STEAL (ptbl->ptbl_flags) &&
10564 +                   elan3mmu_lock_this_ptbl (ptbl, LK_PTBL_NOWAIT, &lock, &ptbl_flags) == LK_PTBL_OK)
10565 +               {
10566 +                   if (elan3mmu_steal_this_ptbl (dev, ptbl ))
10567 +                   {
10568 +                       HAT_PRINTF1 (2, "elan3mmu_steal_l3ptbl: stolen ptbl %p\n", ptbl);
10569 +
10570 +                       elan3mmu_unlock_ptbl (ptbl, lock,ptbl_flags);
10571 +
10572 +                       dev->Level3PtblGroupHand = ptg->pg_next;
10573 +
10574 +                       spin_unlock_irqrestore (&dev->PtblGroupLock, group_flags);
10575 +
10576 +                       return (ptbl);
10577 +                   }
10578 +                   elan3mmu_unlock_ptbl (ptbl, lock, ptbl_flags);
10579 +               }
10580 +           }
10581 +           ptg = ptg->pg_next;
10582 +       }
10583 +       
10584 +       if (dev->Level[PTBL_LEVEL_3].PtblFreeList)
10585 +       {
10586 +           spin_lock (&dev->Level[PTBL_LEVEL_3].PtblLock);
10587 +           ptbl = dev->Level[PTBL_LEVEL_3].PtblFreeList;
10588 +           if (ptbl != NULL)
10589 +           {
10590 +               dev->Level[PTBL_LEVEL_3].PtblFreeList = ptbl->ptbl_next;
10591 +               dev->Level[PTBL_LEVEL_3].PtblFreeCount--;
10592 +           }
10593 +           spin_unlock (&dev->Level[PTBL_LEVEL_3].PtblLock);
10594 +
10595 +           if (ptbl != NULL)
10596 +           {
10597 +               HAT_PRINTF1 (2, "elan3mmu_steal_l3ptbl: found ptbl %p on free list\n", ptbl);
10598 +               break;
10599 +           }
10600 +       }
10601 +
10602 +       ptbl = elan3mmu_create_ptbls (dev, PTBL_LEVEL_3, attr, 1);
10603 +
10604 +       if (ptbl != NULL)
10605 +       {
10606 +           HAT_PRINTF1 (2, "elan3mmu_steal_l3ptbl: created new ptbl %p\n", ptbl);
10607 +           break;
10608 +       }
10609 +       
10610 +       HAT_PRINTF0 (1, "elan3mmu_steal_l3ptbl: cannot find a ptbl, retrying\n");
10611 +       ptg = dev->Level[PTBL_LEVEL_3].PtblGroupList;
10612 +    }
10613 +
10614 +    spin_unlock (&dev->PtblGroupLock);
10615 +    return (ptbl);
10616 +}
10617 +
10618 +sdramaddr_t
10619 +elan3mmu_ptefind (ELAN3MMU *elan3mmu, E3_Addr addr, int *level, 
10620 +                 ELAN3_PTBL **pptbl, spinlock_t **plock, unsigned long *flags)
10621 +{
10622 +    ELAN3_DEV   *dev = elan3mmu->elan3mmu_dev;
10623 +    ELAN3_PTBL  *l1ptbl;
10624 +    sdramaddr_t l1ptp;
10625 +    ELAN3_PTP    tl1ptp;
10626 +    E3_Addr     l1base;
10627 +    ELAN3_PTBL  *l2ptbl;
10628 +    sdramaddr_t l2ptp;
10629 +    ELAN3_PTP    tl2ptp;
10630 +    E3_Addr     l2base;
10631 +    ELAN3_PTBL  *l3ptbl;
10632 +    sdramaddr_t l3pte;
10633 +    spinlock_t *l1lock;
10634 +    spinlock_t *l2lock;
10635 +    spinlock_t *l3lock;
10636 +    unsigned long l1flags;
10637 +    unsigned long l2flags;
10638 +    unsigned long l3flags;
10639 +
10640 +    HAT_PRINTF2 (2, "elan3mmu_ptefind: elan3mmu %p addr %x\n", elan3mmu, addr);
10641 +
10642 +    l1ptbl = elan3mmu->elan3mmu_l1ptbl;
10643 +    *level = 0;
10644 +
10645 +    if (l1ptbl == NULL)
10646 +       return ((sdramaddr_t) NULL);
10647 +
10648 +    l1ptp  = PTBL_TO_PTADDR(l1ptbl) + ELAN3_L1_INDEX(addr)*ELAN3_PTP_SIZE;
10649 +    l1base = ELAN3_L1_BASE(addr);
10650 +    
10651 +retryl1:
10652 +    tl1ptp = elan3_readptp (dev, l1ptp);
10653 +    
10654 +    HAT_PRINTF4 (2, "elan3mmu_ptefind: l1ptbl %p l1ptp %lx l1base %x : tl1ptp %x\n", l1ptbl, l1ptp, l1base, tl1ptp);
10655 +
10656 +    switch (ELAN3_PTP_TYPE(tl1ptp))
10657 +    {
10658 +    case ELAN3_ET_PTE:
10659 +       elan3mmu_lock_ptbl (l1ptbl, LK_PTBL_FAILOK, elan3mmu, addr, PTBL_LEVEL_1, &l1lock, &l1flags);
10660 +
10661 +       tl1ptp = elan3_readptp (dev, l1ptp);
10662 +       if (ELAN3_PTP_TYPE(tl1ptp) != ELAN3_ET_PTE)
10663 +       {
10664 +           elan3mmu_unlock_ptbl (l1ptbl, l1lock, l1flags);
10665 +           goto retryl1;
10666 +       }
10667 +       
10668 +       *level = 1;
10669 +       *pptbl = l1ptbl;
10670 +       *plock = l1lock;
10671 +       *flags = l1flags;
10672 +       
10673 +       /* return with l1lock */
10674 +       return (l1ptp);  
10675 +
10676 +    case ELAN3_ET_INVALID:
10677 +       return ((sdramaddr_t) 0);
10678 +       
10679 +    case ELAN3_ET_PTP:
10680 +       break;
10681 +
10682 +    default:
10683 +       panic ("elan3mmu_ptefind: found bad entry in level 1 page table");
10684 +       /* NOTREACHED */
10685 +    }
10686 +    
10687 +    HAT_PRINTF1 (2, "elan3mmu_ptefind: chain to level 2 ptbl from ptp %x\n", tl1ptp);
10688 +
10689 +    l2ptbl = elan3mmu_ta_to_ptbl (elan3mmu, &tl1ptp);
10690 +    l2ptp  = PTBL_TO_PTADDR(l2ptbl) + ELAN3_L2_INDEX(addr)*ELAN3_PTP_SIZE;
10691 +    l2base = ELAN3_L2_BASE(addr);
10692 +    
10693 +    tl2ptp = elan3_readptp (dev, l2ptp);
10694 +    
10695 +    HAT_PRINTF4 (2, "elan3mmu_ptefind: l2ptbl %p l2ptp %lx l2base %x : tl2ptp %x\n", l2ptbl, l2ptp, l2base, tl2ptp);
10696 +
10697 +    switch (ELAN3_PTP_TYPE(tl2ptp))
10698 +    {
10699 +    case ELAN3_ET_PTE:
10700 +       switch (elan3mmu_lock_ptbl (l2ptbl, LK_PTBL_FAILOK, elan3mmu, addr, PTBL_LEVEL_2, &l2lock, &l2flags))
10701 +       {
10702 +       case LK_PTBL_OK:
10703 +           tl2ptp = elan3_readptp (dev, l2ptp);
10704 +           if (ELAN3_PTP_TYPE(tl2ptp) != ELAN3_ET_PTE)
10705 +           {
10706 +               elan3mmu_unlock_ptbl (l2ptbl, l2lock, l2flags);
10707 +               goto retryl1;
10708 +           }
10709 +           
10710 +           *level = 2;
10711 +           *pptbl = l2ptbl;
10712 +           *plock = l2lock;
10713 +           *flags = l2flags;
10714 +           
10715 +           /* return with l2lock */
10716 +           return (l2ptp); 
10717 +           
10718 +       case LK_PTBL_MISMATCH:
10719 +           HAT_PRINTF6 (2, "elan3mmu_ptefind: PTBL_MISMATCH : ptbl %p flags %x elan3mmu %p base %x (%p %x)\n",
10720 +                        l2ptbl, l2ptbl->ptbl_flags, l2ptbl->ptbl_elan3mmu, l2ptbl->ptbl_base, elan3mmu, addr);
10721 +           
10722 +           /*
10723 +            * We've trogged down to this ptbl,  but someone has just
10724 +            * stolen it,  so try all over again.
10725 +            */
10726 +           goto retryl1;
10727 +           
10728 +       default:
10729 +           panic ("elan3mmu_ptefind: elan3mmu_lock_ptbl returned bad value");
10730 +           /* NOTREACHED */
10731 +       }
10732 +    case ELAN3_ET_INVALID:
10733 +       return ((sdramaddr_t) 0);
10734 +       
10735 +    case ELAN3_ET_PTP:
10736 +       break;
10737 +    default:
10738 +       panic ("elan3mmu_ptefind: found bad entry in level 2 page table");
10739 +       /* NOTREACHED */
10740 +    }
10741 +    
10742 +    HAT_PRINTF1 (2, "elan3mmu_ptefind: chain to level 3 page table from ptp %x\n", tl2ptp);
10743 +
10744 +    l3ptbl = elan3mmu_ta_to_ptbl (elan3mmu, &tl2ptp);
10745 +    l3pte  = PTBL_TO_PTADDR(l3ptbl) + ELAN3_L3_INDEX(addr)*ELAN3_PTE_SIZE;
10746 +    
10747 +    HAT_PRINTF2 (2, "elan3mmu_ptefind: l3ptbl %p l3pte %lx\n", l3ptbl, l3pte);
10748 +                
10749 +    switch (elan3mmu_lock_ptbl (l3ptbl, LK_PTBL_FAILOK, elan3mmu, addr, PTBL_LEVEL_3, &l3lock, &l3flags))
10750 +    {
10751 +    case LK_PTBL_OK:
10752 +       *level = 3;
10753 +       *plock = l3lock;
10754 +       *pptbl = l3ptbl;
10755 +       *flags = l3flags;
10756 +
10757 +       return (l3pte);
10758 +       
10759 +    case LK_PTBL_FAILED:
10760 +       panic ("elan3mmu_ptefind: l3 lock failed");
10761 +       /* NOTREACHED */
10762 +
10763 +    case LK_PTBL_MISMATCH:
10764 +       HAT_PRINTF6 (2, "elan3mmu_ptefind: PTBL_MISMATCH : ptbl %p flags %x elan3mmu %p base %x (%p %x)\n",
10765 +                    l3ptbl, l3ptbl->ptbl_flags, l3ptbl->ptbl_elan3mmu, l3ptbl->ptbl_base, elan3mmu, addr);
10766 +                    
10767 +       /*
10768 +        * We've trogged down to this ptbl,  but someone has just
10769 +        * stolen it,  so try all over again.
10770 +        */
10771 +       goto retryl1;
10772 +       
10773 +    default:
10774 +       panic ("elan3mmu_ptefind: elan3mmu_lock_ptbl returned bad value");
10775 +       /* NOTREACHED */
10776 +    }
10777 +    /* NOTREACHED */
10778 +    return ((sdramaddr_t) 0);
10779 +}
10780 +
10781 +sdramaddr_t 
10782 +elan3mmu_ptp2pte (ELAN3MMU *elan3mmu, sdramaddr_t ptp, int level)
10783 +{
10784 +    ELAN3_PTP tptp = elan3_readptp (elan3mmu->elan3mmu_dev, ptp);
10785 +
10786 +    ASSERT (level != 3 && ELAN3_PTP_TYPE(tptp) == ELAN3_ET_PTE);
10787 +
10788 +    return PTP_TO_PT_PADDR(tptp);
10789 +}
10790 +
10791 +sdramaddr_t
10792 +elan3mmu_ptealloc (ELAN3MMU *elan3mmu, E3_Addr addr, int level, 
10793 +                  ELAN3_PTBL **pptbl, spinlock_t **plock, int attr, unsigned long *flags)
10794 +{
10795 +    ELAN3_DEV   *dev     = elan3mmu->elan3mmu_dev;
10796 +    ELAN3_PTBL  *l1ptbl;
10797 +    ELAN3_PTBL  *lXptbl;
10798 +    int         idx;
10799 +    sdramaddr_t l1ptp;
10800 +    ELAN3_PTP    tl1ptp;
10801 +    E3_Addr     l1base;
10802 +    spinlock_t *l1lock;
10803 +    ELAN3_PTBL  *l2ptbl;
10804 +    sdramaddr_t l2ptp;
10805 +    ELAN3_PTP    tl2ptp;
10806 +    E3_Addr     l2base;
10807 +    spinlock_t *l2lock;
10808 +    ELAN3_PTBL  *l3ptbl;
10809 +    sdramaddr_t l3pte;
10810 +    E3_Addr     l3base;
10811 +    spinlock_t *l3lock;
10812 +
10813 +    unsigned long l1flags;
10814 +    unsigned long l2flags;
10815 +    unsigned long l3flags;
10816 +
10817 +    HAT_PRINTF2 (2, "elan3mmu_ptealloc: elan3mmu %p addr %x\n", elan3mmu, addr);
10818 +
10819 +    l1ptbl = elan3mmu->elan3mmu_l1ptbl;
10820 +    if (l1ptbl == NULL)
10821 +       return ((sdramaddr_t) 0);
10822 +
10823 +    l1ptp  = PTBL_TO_PTADDR(l1ptbl) + ELAN3_L1_INDEX(addr)*ELAN3_PTP_SIZE;
10824 +    l1base = ELAN3_L1_BASE(addr);
10825 +               
10826 +retryl1:
10827 +    tl1ptp = elan3_readptp (dev, l1ptp);
10828 +
10829 +    HAT_PRINTF5 (2, "elan3mmu_ptealloc: l1ptbl %p 1ptp %lx l1base %x (%x) : tl1ptp %x\n",
10830 +                l1ptbl, l1ptp, l1base, l1ptbl->ptbl_base, tl1ptp);
10831 +
10832 +    switch (ELAN3_PTP_TYPE(tl1ptp))
10833 +    {
10834 +    case ELAN3_ET_PTE:
10835 +       if (level == PTBL_LEVEL_1)
10836 +       {
10837 +           elan3mmu_lock_ptbl (l1ptbl, 0, elan3mmu, addr, PTBL_LEVEL_1, &l1lock, &l1flags);
10838 +
10839 +           tl1ptp = elan3_readptp (dev, l1ptp);
10840 +           if (ELAN3_PTP_TYPE(tl1ptp) != ELAN3_ET_PTE)
10841 +           {
10842 +               elan3mmu_unlock_ptbl (l1ptbl, l1lock, l1flags);
10843 +               goto retryl1;
10844 +           }
10845 +           
10846 +           *pptbl = l1ptbl;
10847 +           *plock = l1lock;
10848 +           *flags = l1flags;
10849 +
10850 +           /* return holding l1lock */
10851 +           return (l1ptp);
10852 +       }
10853 +       panic ("elan3mmu_ptealloc: found pte in level 1 page table");
10854 +       /* NOTREACHED */
10855 +
10856 +    case ELAN3_ET_PTP:
10857 +       if (level == PTBL_LEVEL_1)
10858 +           panic ("elan3mmu_ptealloc: found PTP when loading a level 1 PTE\n");
10859 +       break;
10860 +
10861 +    case ELAN3_ET_INVALID:
10862 +       if (level == PTBL_LEVEL_1)
10863 +       {
10864 +           if ((lXptbl = elan3mmu_alloc_pte (dev, elan3mmu,  &idx)) == NULL)
10865 +               return ((sdramaddr_t) 0);
10866 +
10867 +           elan3mmu_lock_ptbl (l1ptbl, 0, elan3mmu, addr, PTBL_LEVEL_1, &l1lock, &l1flags);
10868 +
10869 +           tl1ptp = elan3_readptp (dev, l1ptp);
10870 +           if (ELAN3_PTP_TYPE(tl1ptp) != ELAN3_ET_INVALID)
10871 +           {
10872 +               /* raced with someone else, whose got there first */
10873 +               elan3mmu_free_pte (dev, elan3mmu, lXptbl, idx);
10874 +
10875 +               /* drop the l1lock and retry */
10876 +               elan3mmu_unlock_ptbl (l1ptbl, l1lock, l1flags);
10877 +               goto retryl1;
10878 +           }
10879 +           
10880 +           tl1ptp = PTBL_TO_PTADDR(lXptbl) | (idx * ELAN3_PTE_SIZE) | ELAN3_ET_PTE;
10881 +           
10882 +           elan3_writeptp (dev, l1ptp, tl1ptp);
10883 +
10884 +           *pptbl = l1ptbl;
10885 +           *plock = l1lock;
10886 +           *flags = l1flags;
10887 +
10888 +           /* return holding l1lock */
10889 +           return (l1ptp);
10890 +       }
10891 +
10892 +       if (level == PTBL_LEVEL_2)
10893 +       {
10894 +           if ((lXptbl = elan3mmu_alloc_pte (dev, elan3mmu, &idx)) == NULL)
10895 +               return ((sdramaddr_t) 0);
10896 +
10897 +           if ((l2ptbl = elan3mmu_alloc_l2ptbl (dev, attr, l1ptbl, elan3mmu, ELAN3_L2_BASE(addr), &l2lock, &l2flags)) == NULL)
10898 +           {
10899 +               elan3mmu_free_pte (dev, elan3mmu, lXptbl, idx); 
10900 +               return ((sdramaddr_t) 0);
10901 +           }
10902 +
10903 +           /* Connect l2ptbl to the new LX pte */
10904 +           l2ptp  = PTBL_TO_PTADDR(l2ptbl) + ELAN3_L2_INDEX(addr) * ELAN3_PTP_SIZE;
10905 +           tl2ptp = PTBL_TO_PTADDR(lXptbl) | (idx * ELAN3_PTE_SIZE) | ELAN3_ET_PTE;
10906 +
10907 +           elan3_writeptp (dev, l2ptp, tl2ptp);
10908 +
10909 +           /* Now need to lock the l1 ptbl */
10910 +           elan3mmu_unlock_ptbl (l2ptbl, l2lock, l2flags);
10911 +
10912 +           elan3mmu_lock_ptbl (l1ptbl, 0, elan3mmu, addr, PTBL_LEVEL_1, &l1lock, &l1flags);
10913 +           elan3mmu_lock_ptbl (l2ptbl, 0, elan3mmu, addr, PTBL_LEVEL_2, &l2lock, &l2flags);
10914 +
10915 +           tl1ptp = elan3_readptp (dev, l1ptp);
10916 +           if (ELAN3_PTP_TYPE(tl1ptp) != ELAN3_ET_INVALID)
10917 +           {
10918 +               HAT_PRINTF0 (2, "elan3mmu_ptealloc: beaten to it,  free l2 ptbl/lx pte\n");
10919 +               
10920 +               tl2ptp = ELAN3_INVALID_PTP;
10921 +               elan3_writeptp (dev, l2ptp, tl2ptp);
10922 +               
10923 +               HAT_PRINTF2 (2, "elan3mmu_ptealloc: write level 2 ptp %lx to %x\n", l2ptp, tl2ptp);
10924 +               HAT_PRINTF2 (2, "elan3mmu_ptealloc: freeing l2 ptbl %p (%x)\n", l2ptbl, l2ptbl->ptbl_flags);
10925 +               
10926 +               elan3mmu_free_l2ptbl (dev, l2ptbl, l2lock, l2flags);
10927 +               elan3mmu_free_pte (dev, elan3mmu, lXptbl, idx);
10928 +
10929 +               elan3mmu_unlock_ptbl (l1ptbl, l1lock, l1flags);
10930 +
10931 +               goto retryl1;
10932 +           }
10933 +           
10934 +           /* Now have L1 locked,  so install the L2 ptbl */
10935 +           l1ptp  = PTBL_TO_PTADDR(l1ptbl) + ELAN3_L1_INDEX(addr)*ELAN3_PTP_SIZE;
10936 +           tl1ptp = PTBL_TO_PTADDR(l2ptbl) | ELAN3_ET_PTP;
10937 +           l1ptbl->ptbl_valid++;
10938 +
10939 +           HAT_PRINTF3 (2, "elan3mmu_ptealloc: inc valid for level %d ptbl %p to %d\n", 
10940 +                        PTBL_LEVEL(l1ptbl->ptbl_flags), l1ptbl, l1ptbl->ptbl_valid);
10941 +           
10942 +           elan3_writeptp (dev, l1ptp, tl1ptp);
10943 +           
10944 +           HAT_PRINTF2 (2, "elan3mmu_ptealloc: write l1ptp %lx to %x\n", l1ptp, tl1ptp);
10945 +
10946 +           /* unordered unlock - lock l1ptbl, lock l2ptbl, unlock l1ptbl */
10947 +           elan3mmu_unlock_ptbl (l1ptbl, l1lock, l2flags); /* need to unlock with the l2flags to keep irq order correct */
10948 +
10949 +           *pptbl = l2ptbl;
10950 +           *plock = l2lock;
10951 +           *flags = l1flags; /* return the l1flags here as we have released the l2flags already to keep order */
10952 +
10953 +           /* return holding l2lock */
10954 +           return (l2ptp);
10955 +       }
10956 +
10957 +       HAT_PRINTF0 (2, "elan3mmu_ptealloc: allocating level 2 and level 3 page tables\n");
10958 +
10959 +       /* Allocate a level 2 and level 3 page table and link them together */
10960 +       if ((l2ptbl = elan3mmu_alloc_l2ptbl (dev, attr, l1ptbl, elan3mmu, ELAN3_L2_BASE(addr), &l2lock, &l2flags)) == NULL)
10961 +           return ((sdramaddr_t) 0);
10962 +
10963 +       if ((l3ptbl = elan3mmu_alloc_l3ptbl (dev, attr | PTE_NO_SLEEP, l2ptbl, elan3mmu, ELAN3_L3_BASE(addr), &l3lock, &l3flags)) == NULL)
10964 +       {
10965 +           elan3mmu_unlock_ptbl (l2ptbl, l2lock, l2flags);
10966 +           return ((sdramaddr_t) 0);
10967 +       }
10968 +
10969 +       ASSERT (PTBL_IS_LOCKED (l2ptbl->ptbl_flags));
10970 +       ASSERT (PTBL_LEVEL (l2ptbl->ptbl_flags) == PTBL_LEVEL_2);
10971 +       ASSERT (PTBL_IS_LOCKED (l3ptbl->ptbl_flags));
10972 +       ASSERT (PTBL_LEVEL (l3ptbl->ptbl_flags) == PTBL_LEVEL_3);
10973 +
10974 +       HAT_PRINTF6 (2, "elan3mmu_ptealloc: l2ptbl %p (%x,%x) l3ptbl %p (%x,%x)\n",
10975 +                    l2ptbl, l2ptbl->ptbl_flags, l2ptbl->ptbl_base,
10976 +                    l3ptbl, l3ptbl->ptbl_flags, l3ptbl->ptbl_base);
10977 +
10978 +       if (CTXT_IS_KERNEL (elan3mmu->elan3mmu_ctxt))
10979 +       {
10980 +           l2ptbl->ptbl_flags |= PTBL_KERNEL;
10981 +           elan3mmu_kernel_l3ptbl (l3ptbl);
10982 +       }
10983 +       
10984 +       /*
10985 +        * Connect L3 ptbl to the new L2 ptbl.
10986 +        */
10987 +       l2ptp  = PTBL_TO_PTADDR(l2ptbl) + ELAN3_L2_INDEX(addr) * ELAN3_PTP_SIZE;
10988 +       tl2ptp = PTBL_TO_PTADDR(l3ptbl) | ELAN3_ET_PTP;
10989 +
10990 +       l2ptbl->ptbl_valid = 1;
10991 +
10992 +       HAT_PRINTF3 (2, "elan3mmu_ptealloc: set valid for level %d ptbl %p to %d\n", 
10993 +                    PTBL_LEVEL(l2ptbl->ptbl_flags), l2ptbl, l2ptbl->ptbl_valid);
10994 +
10995 +       HAT_PRINTF2 (2, "elan3mmu_ptealloc: write level 2 ptp %lx to %x\n", l2ptp, tl2ptp);
10996 +
10997 +       elan3_writeptp (dev, l2ptp, tl2ptp);
10998 +
10999 +       /* 
11000 +        * Now need to lock the l1 ptbl - to maintain lock ordering
11001 +        * we set the PTBL_KEEP bit to stop the l3 ptbl from being 
11002 +        * stolen and drop the locks in the order we aquired them
11003 +        */
11004 +       l3ptbl->ptbl_flags |= PTBL_KEEP;
11005 +
11006 +       elan3mmu_unlock_ptbl (l3ptbl, l3lock, l3flags);
11007 +       elan3mmu_unlock_ptbl (l2ptbl, l2lock, l2flags);
11008 +
11009 +       elan3mmu_lock_ptbl (l1ptbl, 0, elan3mmu, addr, PTBL_LEVEL_1, &l1lock, &l1flags);
11010 +       elan3mmu_lock_ptbl (l3ptbl, 0, elan3mmu, addr, PTBL_LEVEL_3, &l3lock, &l3flags);
11011 +           
11012 +       l3ptbl->ptbl_flags &= ~PTBL_KEEP;
11013 +          
11014 +       /* Now have l1 and l3 ptbls locked,  so install the new l2 ptbl into the l1. */
11015 +       tl1ptp = elan3_readptp (dev, l1ptp);
11016 +
11017 +       HAT_PRINTF2 (2, "elan3mmu_ptealloc: l1ptp %lx is %x\n", l1ptp, tl1ptp);
11018 +
11019 +       if (ELAN3_PTP_TYPE(tl1ptp) != ELAN3_ET_INVALID)
11020 +       {
11021 +           HAT_PRINTF0 (2, "elan3mmu_ptealloc: beaten to it,  free l2/l3 ptbls\n");
11022 +
11023 +           /* free off the level 3 page table */
11024 +           HAT_PRINTF2 (2, "elan3mmu_ptealloc: freeing l3 ptbl %p (%x)\n", l3ptbl, l3ptbl->ptbl_flags);
11025 +
11026 +           l3ptbl->ptbl_flags &= ~PTBL_KEEP;
11027 +           elan3mmu_free_l3ptbl (dev, l3ptbl, l3lock, l3flags);
11028 +
11029 +           /* and unlock the level 1 ptbl */
11030 +           elan3mmu_unlock_ptbl (l1ptbl, l1lock, l1flags);
11031 +           
11032 +           /* lock the level 2 page table, and clear out the PTP, then free it */
11033 +           (void) elan3mmu_lock_ptbl (l2ptbl, 0, elan3mmu, addr, PTBL_LEVEL_2, &l2lock, &l2flags);
11034 +
11035 +           HAT_PRINTF2 (2, "elan3mmu_ptealloc: locked l2 ptbl %p (%x)\n", l2ptbl, l2ptbl->ptbl_flags);
11036 +           
11037 +           tl2ptp = ELAN3_INVALID_PTP;
11038 +           elan3_writeptp (dev, l2ptp, tl2ptp);
11039 +           l2ptbl->ptbl_valid = 0;
11040 +
11041 +           HAT_PRINTF3 (2, "elan3mmu_ptealloc: set to 0 valid for level %d ptbl %p to %d\n", PTBL_LEVEL(l2ptbl->ptbl_flags), l2ptbl, l2ptbl->ptbl_valid); 
11042 +
11043 +           HAT_PRINTF2 (2, "elan3mmu_ptealloc: write level 2 ptp %lx to %x\n", l2ptp, tl2ptp);
11044 +           HAT_PRINTF2 (2, "elan3mmu_ptealloc: freeing l2 ptbl %p (%x)\n", l2ptbl, l2ptbl->ptbl_flags);
11045 +
11046 +           elan3mmu_free_l2ptbl (dev, l2ptbl, l2lock, l2flags);
11047 +
11048 +           goto retryl1;
11049 +       }
11050 +       
11051 +       HAT_PRINTF4 (2, "elan3mmu_ptealloc: l1ptbl is %p (%x), l3ptbl is %p (%x)\n", 
11052 +                    l1ptbl, l1ptbl->ptbl_flags, l3ptbl, l3ptbl->ptbl_flags);
11053 +
11054 +       /* Now have L1 and L3 locked,  so install the L2 ptbl */
11055 +       l1ptp  = PTBL_TO_PTADDR(l1ptbl) + ELAN3_L1_INDEX(addr)*ELAN3_PTP_SIZE;
11056 +       tl1ptp = PTBL_TO_PTADDR(l2ptbl) | ELAN3_ET_PTP;
11057 +       l1ptbl->ptbl_valid++;
11058 +
11059 +       HAT_PRINTF3 (2, "elan3mmu_ptealloc: inc valid for level %d ptbl %p to %d\n", 
11060 +                    PTBL_LEVEL(l1ptbl->ptbl_flags), l1ptbl, l1ptbl->ptbl_valid);
11061 +
11062 +       elan3_writeptp (dev, l1ptp, tl1ptp);
11063 +
11064 +       HAT_PRINTF2 (2, "elan3mmu_ptealloc: write l1ptp %lx to %x\n", l1ptp, tl1ptp);
11065 +
11066 +       /* unordered unlock - lock l1ptbl, lock l3ptbl, unlock l1ptbl */
11067 +       elan3mmu_unlock_ptbl (l1ptbl, l1lock, l3flags); /* free using l3flags to keep irq ordering */
11068 +
11069 +       l3pte = PTBL_TO_PTADDR (l3ptbl) + ELAN3_L3_INDEX(addr)*ELAN3_PTE_SIZE;
11070 +
11071 +       /* Level 3 ptbl is already locked,  so just return the pte */
11072 +       *pptbl = l3ptbl;
11073 +       *plock = l3lock;
11074 +       *flags = l1flags; /* return l1flags to keep irq ordering */
11075 +
11076 +       return (l3pte);
11077 +
11078 +    default:
11079 +       panic ("elan3mmu_ptealloc: found bad entry in level 1 page table");
11080 +       /* NOTREACHED */
11081 +    }
11082 +
11083 +    HAT_PRINTF1 (2, "elan3mmu_ptealloc: chain to level 2 ptbl from ptp %x\n", tl1ptp);
11084 +
11085 +    l2ptbl = elan3mmu_ta_to_ptbl (elan3mmu, &tl1ptp);
11086 +    l2ptp  = PTBL_TO_PTADDR(l2ptbl) + ELAN3_L2_INDEX(addr)*ELAN3_PTP_SIZE;
11087 +    l2base = ELAN3_L2_BASE(addr);
11088 +
11089 +    tl2ptp = elan3_readptp (dev, l2ptp);
11090 +
11091 +    HAT_PRINTF5 (2, "elan3mmu_ptealloc: l2ptbl %p l2ptp %lx l2base %x (%x) : tl2ptp %x\n",
11092 +                l2ptbl, l2ptp, l2base, l2ptbl->ptbl_base, tl2ptp);
11093 +
11094 +    switch (ELAN3_PTP_TYPE(tl2ptp))
11095 +    {
11096 +    case ELAN3_ET_PTE:
11097 +       if (level == PTBL_LEVEL_2) {
11098 +           /* this is a pointer to a pte,  we should just return it */
11099 +
11100 +           switch (elan3mmu_lock_ptbl (l2ptbl, 0, elan3mmu, addr, PTBL_LEVEL_2, &l2lock, &l2flags))
11101 +           {
11102 +           case LK_PTBL_OK:
11103 +               break;
11104 +       
11105 +           case LK_PTBL_FAILED:
11106 +               panic ("elan3mmu_ptealloc: l2 lock failed");
11107 +               /* NOTREACHED */
11108 +               
11109 +           case LK_PTBL_MISMATCH:
11110 +               HAT_PRINTF6 (2, "elan3mmu_ptealloc: PTBL_MISMATCH : ptbl %p flags %x elan3mmu %p base %x (%p %x)\n",
11111 +                            l2ptbl, l2ptbl->ptbl_flags, l2ptbl->ptbl_elan3mmu, l2ptbl->ptbl_base, elan3mmu, addr);
11112 +               
11113 +               /*
11114 +                * We've trogged down to this ptbl,  but someone has just
11115 +                * stolen it,  so try all over again.
11116 +                */
11117 +               goto retryl1;
11118 +               
11119 +           default:
11120 +               panic ("elan3mmu_ptealloc: elan3mmu_lock_ptbl returned bad value");
11121 +               /* NOTREACHED */
11122 +           }
11123 +
11124 +
11125 +           tl2ptp = elan3_readptp (dev, l2ptp);
11126 +           if (ELAN3_PTP_TYPE(tl2ptp) != ELAN3_ET_PTE)
11127 +           {
11128 +               elan3mmu_unlock_ptbl (l2ptbl, l2lock, l2flags);
11129 +               goto retryl1;
11130 +           }
11131 +
11132 +           *pptbl = l2ptbl;
11133 +           *plock = l2lock;
11134 +           *flags = l2flags;
11135 +
11136 +           /* return holdind l2lock */
11137 +           return (l2ptp);
11138 +       }
11139 +       panic ("elan3mmu: found pte in level 2 page table");
11140 +       /* NOTREACHED */
11141 +
11142 +    case ELAN3_ET_PTP:
11143 +       break;
11144 +
11145 +    case ELAN3_ET_INVALID:
11146 +       if (level == PTBL_LEVEL_2) 
11147 +       {
11148 +           if ((lXptbl = elan3mmu_alloc_pte (dev, elan3mmu, &idx)) == NULL)
11149 +               return ((sdramaddr_t) 0);
11150 +
11151 +           switch (elan3mmu_lock_ptbl (l2ptbl, 0, elan3mmu, addr, PTBL_LEVEL_2, &l2lock, &l2flags))
11152 +           {
11153 +           case LK_PTBL_OK:
11154 +               break;
11155 +       
11156 +           case LK_PTBL_FAILED:
11157 +               panic ("elan3mmu_ptealloc: l2 lock failed");
11158 +               /* NOTREACHED */
11159 +               
11160 +           case LK_PTBL_MISMATCH:
11161 +               HAT_PRINTF6 (2, "elan3mmu_ptealloc: PTBL_MISMATCH : ptbl %p flags %x elan3mmu %p base %x (%p %x)\n",
11162 +                            l2ptbl, l2ptbl->ptbl_flags, l2ptbl->ptbl_elan3mmu, l2ptbl->ptbl_base, elan3mmu, addr);
11163 +               
11164 +               /*
11165 +                * We've trogged down to this ptbl,  but someone has just
11166 +                * stolen it,  so try all over again.
11167 +                */
11168 +               goto retryl1;
11169 +               
11170 +           default:
11171 +               panic ("elan3mmu_ptealloc: elan3mmu_lock_ptbl returned bad value");
11172 +               /* NOTREACHED */
11173 +           }
11174 +
11175 +           tl2ptp = elan3_readptp (dev, l2ptp);
11176 +           if (ELAN3_PTP_TYPE(tl2ptp) != ELAN3_ET_INVALID)
11177 +           {
11178 +               HAT_PRINTF0 (2, "elan3mmu_ptealloc: beaten to it,  free lx pte\n");
11179 +
11180 +               elan3mmu_free_pte (dev, elan3mmu, lXptbl, idx);
11181 +
11182 +               elan3mmu_unlock_ptbl (l2ptbl, l2lock, l2flags);
11183 +               goto retryl1;
11184 +           }
11185 +
11186 +           /* Connect l2ptbl to the new LX pte */
11187 +           tl2ptp = PTBL_TO_PTADDR(lXptbl) | (idx * ELAN3_PTE_SIZE) | ELAN3_ET_PTE;
11188 +                  
11189 +           HAT_PRINTF3 (2, "elan3mmu_ptealloc: inc valid for level %d ptbl %p to %d\n", 
11190 +                        PTBL_LEVEL(l2ptbl->ptbl_flags), l2ptbl, l2ptbl->ptbl_valid);
11191 +           
11192 +           elan3_writeptp (dev, l2ptp, tl2ptp);
11193 +           
11194 +           HAT_PRINTF2 (2, "elan3mmu_ptealloc: write l2ptp %lx to %x\n", l2ptp, tl2ptp);
11195 +
11196 +           *pptbl = l2ptbl;
11197 +           *plock = l2lock;
11198 +           *flags = l2flags;
11199 +
11200 +           /* return holding l2lock */
11201 +           return (l2ptp);
11202 +       }
11203 +       HAT_PRINTF0 (2, "elan3mmu_ptealloc: allocate level 3 page table\n");
11204 +
11205 +       if ((l3ptbl = elan3mmu_alloc_l3ptbl (dev, attr, l2ptbl, elan3mmu, ELAN3_L3_BASE(addr), &l3lock, &l3flags)) == NULL)
11206 +           return ((sdramaddr_t) 0);
11207 +
11208 +       if (CTXT_IS_KERNEL (elan3mmu->elan3mmu_ctxt))
11209 +           elan3mmu_kernel_l3ptbl (l3ptbl);
11210 +
11211 +       /* 
11212 +        * Now need to lock the l2 ptbl - to maintain lock ordering
11213 +        * we set the PTBL_KEEP bit to stop the l3 ptbl from being 
11214 +        * stolen and drop the locks in the order we aquired them
11215 +        */
11216 +       l3ptbl->ptbl_flags |= PTBL_KEEP;
11217 +
11218 +       elan3mmu_unlock_ptbl (l3ptbl, l3lock, l3flags);
11219 +
11220 +       if (elan3mmu_lock_ptbl (l2ptbl, LK_PTBL_FAILOK, elan3mmu, addr, PTBL_LEVEL_2, &l2lock, &l2flags) == LK_PTBL_MISMATCH)
11221 +       {
11222 +           HAT_PRINTF0 (2, "elan3mmu_ptealloc: l2ptbl freed, free l3 ptbl and try again\n");
11223 +             
11224 +           elan3mmu_lock_ptbl (l3ptbl, 0, elan3mmu, addr, PTBL_LEVEL_3, &l3lock, &l3flags);
11225 +
11226 +           /* free off the level 3 page table, and try again */
11227 +           l3ptbl->ptbl_flags &= ~PTBL_KEEP;
11228 +           elan3mmu_free_l3ptbl (dev, l3ptbl, l3lock, l3flags);
11229 +           
11230 +           goto retryl1;
11231 +       }
11232 +
11233 +       elan3mmu_lock_ptbl (l3ptbl, 0, elan3mmu, addr, PTBL_LEVEL_3, &l3lock, &l3flags);
11234 +
11235 +       l3ptbl->ptbl_flags &= ~PTBL_KEEP;
11236 +
11237 +       /* Now have L2 and L3 ptbls locked, see if someone has beaten us to it. */
11238 +       tl2ptp = elan3_readptp (dev, l2ptp);
11239 +
11240 +       HAT_PRINTF2 (2, "elan3mmu_ptealloc: l2ptp at %lx is %x\n", l2ptp, tl2ptp);
11241 +
11242 +       if (ELAN3_PTP_TYPE(tl2ptp) != ELAN3_ET_INVALID)
11243 +       {
11244 +           HAT_PRINTF0 (2, "elan3mmu_ptealloc: beaten to it, free l3 ptbl and try again\n");
11245 +
11246 +           /* free off the level 3 page table, and try again */
11247 +           l3ptbl->ptbl_flags &= ~PTBL_KEEP;
11248 +           elan3mmu_free_l3ptbl (dev, l3ptbl, l3lock, l3flags);
11249 +           
11250 +           /* Someone has allocated the ptbl before us */
11251 +           elan3mmu_unlock_ptbl (l2ptbl, l2lock, l2flags);
11252 +           
11253 +           goto retryl1;
11254 +       }
11255 +
11256 +       ASSERT (PTBL_IS_LOCKED (l2ptbl->ptbl_flags));
11257 +
11258 +       /* Install the L3 ptbl into the L2 one */
11259 +       l2ptp  = PTBL_TO_PTADDR(l2ptbl) + ELAN3_L2_INDEX(addr)*ELAN3_PTP_SIZE;
11260 +       tl2ptp = PTBL_TO_PTADDR(l3ptbl) | ELAN3_ET_PTP;
11261 +       l2ptbl->ptbl_valid++;
11262 +
11263 +       HAT_PRINTF3 (2, "elan3mmu_ptealloc: inc valid for level %d ptbl %p to %d\n",
11264 +                    PTBL_LEVEL(l2ptbl->ptbl_flags), l2ptbl, l2ptbl->ptbl_valid);
11265 +
11266 +       elan3_writeptp (dev, l2ptp, tl2ptp);
11267 +
11268 +       HAT_PRINTF2 (2, "elan3mmu_ptealloc: write level 2 ptp %lx to %x\n", l2ptp, tl2ptp);
11269 +
11270 +       /* unordered unlock - lock l2ptbl, lock l3ptbl, unlock l2ptbl */
11271 +       elan3mmu_unlock_ptbl (l2ptbl, l2lock, l3flags); /* free with the l3flags to keep irq ordering */
11272 +
11273 +       l3pte = PTBL_TO_PTADDR(l3ptbl) + ELAN3_L3_INDEX(addr)*ELAN3_PTE_SIZE;
11274 +       
11275 +       /* Level 3 ptbl is already locked, so just return the pte */
11276 +       *pptbl = l3ptbl;
11277 +       *plock = l3lock;
11278 +       *flags = l2flags; /* return l2flags to keep irq ordering */
11279 +
11280 +       return (l3pte);
11281 +
11282 +    default:
11283 +       panic ("elan3mmu_ptealloc: found bad entry in level 2 page table");
11284 +       /* NOTREACHED */
11285 +    }
11286 +
11287 +    HAT_PRINTF1 (2, "elan3mmu_ptealloc: chain to level 3 page table from ptp %x\n", tl2ptp);
11288 +
11289 +    l3ptbl = elan3mmu_ta_to_ptbl (elan3mmu, &tl2ptp);
11290 +    l3pte  = PTBL_TO_PTADDR(l3ptbl) + ELAN3_L3_INDEX(addr)*ELAN3_PTE_SIZE;
11291 +    l3base = ELAN3_L3_BASE(addr);
11292 +
11293 +    HAT_PRINTF4 (2, "elan3mmu_ptealloc: l3ptbl %p 3pte %lx l3base %x (%x)\n",
11294 +                l3ptbl, l3pte, l3base, l3ptbl->ptbl_base);
11295 +                
11296 +    if (elan3mmu_lock_ptbl (l3ptbl, LK_PTBL_FAILOK, elan3mmu, addr, PTBL_LEVEL_3, &l3lock, &l3flags) == LK_PTBL_OK)
11297 +    {
11298 +       *pptbl = l3ptbl;
11299 +       *plock = l3lock;
11300 +       *flags = l3flags;
11301 +
11302 +       return (l3pte);
11303 +    }
11304 +
11305 +    /* got all the way down here,  but its been nicked before we could lock it */
11306 +    /* so try all over again */
11307 +    goto retryl1;
11308 +}
11309 +
11310 +void
11311 +elan3mmu_l1inval (ELAN3MMU *elan3mmu, ELAN3_PTBL *l1ptbl, int attr)
11312 +{
11313 +    ELAN3_DEV     *dev = elan3mmu->elan3mmu_dev;
11314 +    ELAN3_PTP      invalidptp = ELAN3_INVALID_PTP;
11315 +    ELAN3_PTP      tl1ptp;
11316 +    sdramaddr_t   l1ptp;
11317 +    E3_Addr       addr;
11318 +    spinlock_t   *l2lock;
11319 +    ELAN3_PTBL    *l2ptbl;
11320 +    ELAN3_PTBL    *lXptbl;
11321 +    int           idx;
11322 +    int                  i;
11323 +    int                  ret;
11324 +    unsigned long flags;
11325 +
11326 +    l1ptp = PTBL_TO_PTADDR(l1ptbl);
11327 +
11328 +    HAT_PRINTF2 (1, "elan3mmu_l1inval: l1ptbl %p l1ptp %lx\n", l1ptbl, l1ptp);
11329 +
11330 +    for (i = 0, addr = 0; i < ELAN3_L1_ENTRIES; i++, l1ptp += ELAN3_PTP_SIZE)
11331 +    {
11332 +       tl1ptp = elan3_readptp (dev, l1ptp);
11333 +       switch (ELAN3_PTP_TYPE(tl1ptp))
11334 +       {
11335 +       case ELAN3_ET_PTE:
11336 +           lXptbl = elan3mmu_ta_to_ptbl (elan3mmu, &tl1ptp);
11337 +           idx    = (PTP_TO_PT_PADDR(tl1ptp) - PTBL_TO_PTADDR(lXptbl))/ELAN3_PTE_SIZE;  
11338 +
11339 +           HAT_PRINTF3 (2, "elan3mmu_l1inval: l1ptbl %p : lXptbl %p idx %d\n",
11340 +                        l1ptbl, lXptbl, idx);
11341 +
11342 +           /* invalidate the L1 pte. */
11343 +           elan3_writeptp (dev, l1ptp, invalidptp);
11344 +           if (! (attr & PTE_UNLOAD_NOFLUSH))
11345 +               ElanFlushTlb (dev);         
11346 +
11347 +           l1ptbl->ptbl_valid--;
11348 +           elan3mmu_free_pte ( dev, elan3mmu,  lXptbl, idx); 
11349 +
11350 +           HAT_PRINTF3 (2, "elan3mmu_l1inval: dec valid for level %d ptbl %p to %d\n",
11351 +                    PTBL_LEVEL(l1ptbl->ptbl_flags), l1ptbl, l1ptbl->ptbl_valid);
11352 +           
11353 +           break;
11354 +
11355 +       case ELAN3_ET_PTP:
11356 +           HAT_PRINTF5 (2, "elan3mmu_l1inval: l1ptbl %p : ptp %lx (%x) addr %x (%d)\n",
11357 +                        l1ptbl, l1ptp, tl1ptp, addr, i);
11358 +
11359 +           /* invalidate the L1 ptp. */
11360 +           elan3_writeptp (dev, l1ptp, invalidptp);
11361 +           if (! (attr & PTE_UNLOAD_NOFLUSH))
11362 +               ElanFlushTlb (dev);
11363 +
11364 +           /* invalidate the level 2 page table */
11365 +           l2ptbl = elan3mmu_ta_to_ptbl (elan3mmu, &tl1ptp);
11366 +           ret    = elan3mmu_l2inval (elan3mmu, l2ptbl, attr | PTE_UNLOAD_NOFLUSH, addr, &l2lock, &flags);
11367 +
11368 +           ASSERT ((l2ptbl->ptbl_flags & PTBL_KEEP) == 0);
11369 +           
11370 +           if (ret == LK_PTBL_OK)
11371 +           {
11372 +               if (((l2ptbl->ptbl_flags & PTBL_KEEP) == 0) && l2ptbl->ptbl_valid == 0)
11373 +               {
11374 +                   HAT_PRINTF1 (2, "elan3mmu_l1inval: free l2ptbl %p\n", l2ptbl);
11375 +                   
11376 +                   l1ptbl->ptbl_valid--;
11377 +                   elan3mmu_free_l2ptbl (elan3mmu->elan3mmu_dev, l2ptbl, l2lock, flags);
11378 +
11379 +                   HAT_PRINTF3 (2, "elan3mmu_l1inval: dec valid for level %d ptbl %p to %d\n", 
11380 +                                PTBL_LEVEL(l1ptbl->ptbl_flags), l1ptbl, l1ptbl->ptbl_valid);
11381 +               }
11382 +               else
11383 +               {
11384 +                   /* need to keep this page table,  so even though its now empty, */
11385 +                   /* chain it back in */
11386 +                   HAT_PRINTF1 (2, "elan3mmu_l1inval: keep l2ptbl %p\n", l2ptbl);
11387 +
11388 +                   elan3_writeptp (dev, l1ptp, tl1ptp);
11389 +                   elan3mmu_unlock_ptbl (l2ptbl, l2lock, flags);
11390 +               }    
11391 +           }
11392 +           else
11393 +           {
11394 +               l1ptbl->ptbl_valid--;
11395 +
11396 +               HAT_PRINTF3 (2, "elan3mmu_l1inval: dec valid for level %d ptbl %p to %d\n", 
11397 +                            PTBL_LEVEL(l1ptbl->ptbl_flags), l1ptbl, l1ptbl->ptbl_valid);
11398 +           }
11399 +           break;
11400 +           
11401 +       case ELAN3_ET_INVALID:
11402 +           break;
11403 +
11404 +       default:
11405 +           panic ("elan3mmu_l1inval: found invalid entry in level 1 page table");
11406 +           /* NOTREACHED */
11407 +       }
11408 +
11409 +       if (l1ptbl->ptbl_valid == 0)
11410 +           break;
11411 +
11412 +       addr += ELAN3_L1_SIZE;
11413 +    }
11414 +}
11415 +
11416 +int
11417 +elan3mmu_l2inval (ELAN3MMU *elan3mmu, ELAN3_PTBL *l2ptbl, int attr, E3_Addr addr, spinlock_t **pl2lock, unsigned long *flags)
11418 +{
11419 +    ELAN3_DEV   *dev = elan3mmu->elan3mmu_dev;
11420 +    ELAN3_PTP    invalidptp = ELAN3_INVALID_PTP;
11421 +    ELAN3_PTP    tl2ptp;
11422 +    sdramaddr_t l2ptp;
11423 +    spinlock_t *l3lock;
11424 +    unsigned long l3flags;
11425 +    ELAN3_PTBL  *l3ptbl;
11426 +    ELAN3_PTBL  *lXptbl;
11427 +    int         idx;
11428 +    int                i;
11429 +    int                ret;
11430 +
11431 +    HAT_PRINTF2 (1, "elan3mmu_l2inval: l2ptbl %p addr %x\n", l2ptbl, addr);
11432 +
11433 +    ASSERT (PTBL_LEVEL (l2ptbl->ptbl_flags) == PTBL_LEVEL_2);
11434 +    ASSERT (PTBL_LEVEL (l2ptbl->ptbl_parent->ptbl_flags) == PTBL_LEVEL_1);
11435 +
11436 +    ret = elan3mmu_lock_ptbl (l2ptbl, LK_PTBL_FAILOK, elan3mmu, addr, PTBL_LEVEL_2, pl2lock, flags);
11437 +
11438 +    ASSERT (ret == LK_PTBL_OK);
11439 +    ASSERT (l2ptbl->ptbl_elan3mmu == elan3mmu);
11440 +    ASSERT (l2ptbl->ptbl_parent->ptbl_elan3mmu == elan3mmu);
11441 +
11442 +    l2ptp = PTBL_TO_PTADDR(l2ptbl);
11443 +
11444 +    for (i = 0; i < ELAN3_L2_ENTRIES; i++, l2ptp += ELAN3_PTP_SIZE)
11445 +    {
11446 +       tl2ptp = elan3_readptp (dev, l2ptp);
11447 +       switch (ELAN3_PTP_TYPE(tl2ptp))
11448 +       {
11449 +       case ELAN3_ET_PTE:
11450 +           lXptbl = elan3mmu_ta_to_ptbl (elan3mmu, &tl2ptp);
11451 +           idx    = (PTP_TO_PT_PADDR(tl2ptp) - PTBL_TO_PTADDR(lXptbl))/ELAN3_PTE_SIZE;  
11452 +
11453 +           HAT_PRINTF3 (2, "elan3mmu_l2inval: l2ptbl %p : lXptbl %p idx %d\n",
11454 +                        l2ptbl, lXptbl, idx);
11455 +
11456 +           /* invalidate the L2 pte. */
11457 +           elan3_writeptp (dev, l2ptp, invalidptp);
11458 +           if (! (attr & PTE_UNLOAD_NOFLUSH))
11459 +               ElanFlushTlb (dev);
11460 +
11461 +           l2ptbl->ptbl_valid--;
11462 +           elan3mmu_free_pte ( dev, elan3mmu, lXptbl, idx); 
11463 +
11464 +           HAT_PRINTF3 (2, "elan3mmu_l2inval: dec valid for level %d ptbl %p to %d\n", PTBL_LEVEL(l2ptbl->ptbl_flags), l2ptbl, l2ptbl->ptbl_valid); 
11465 +
11466 +           break;
11467 +           
11468 +       case ELAN3_ET_PTP:
11469 +           HAT_PRINTF5 (2, "elan3mmu_l2inval: l2ptbl %p : ptp %lx (%x) addr %x (%d)\n",
11470 +                        l2ptbl, l2ptp, tl2ptp, addr, i);
11471 +
11472 +           /* invalidate the L2 ptp. */
11473 +           elan3_writeptp (dev, l2ptp, invalidptp);
11474 +           if (! (attr & PTE_UNLOAD_NOFLUSH))
11475 +               ElanFlushTlb (dev);
11476 +           
11477 +           /* unload the level 3 page table */
11478 +           l3ptbl = elan3mmu_ta_to_ptbl (elan3mmu, &tl2ptp);
11479 +           ret = elan3mmu_l3inval (elan3mmu, l3ptbl, attr | PTE_UNLOAD_NOFLUSH, addr, &l3lock, &l3flags);
11480 +           
11481 +           if (ret == LK_PTBL_OK)
11482 +           {
11483 +               if ((l3ptbl->ptbl_flags & PTBL_KEEP) == 0 && l3ptbl->ptbl_valid == 0)
11484 +               {
11485 +                   /* decrement the valid count of the level 2 page table, and */
11486 +                   /* free off the level 3 page table */
11487 +                   HAT_PRINTF1 (2, "elan3mmu_l2inval: free l3ptbl %p\n", l3ptbl);
11488 +
11489 +                   l2ptbl->ptbl_valid--;
11490 +                   elan3mmu_free_l3ptbl (elan3mmu->elan3mmu_dev, l3ptbl, l3lock, l3flags);
11491 +
11492 +                   HAT_PRINTF3 (2, "elan3mmu_l2inval: dec valid for level %d ptbl %p to %d\n", 
11493 +                                PTBL_LEVEL(l2ptbl->ptbl_flags), l2ptbl, l2ptbl->ptbl_valid);
11494 +               }
11495 +               else
11496 +               {
11497 +                   /* need to keep this page table,  so even though its now empty, */
11498 +                   /* chain it back in */
11499 +                   HAT_PRINTF1 (2, "elan3mmu_l2inval: keep l3ptbl %p\n", l3ptbl);
11500 +
11501 +                   elan3_writeptp (dev, l2ptp, tl2ptp);
11502 +                   elan3mmu_unlock_ptbl (l3ptbl, l3lock, l3flags);
11503 +               }
11504 +           }
11505 +           else
11506 +           {
11507 +               l2ptbl->ptbl_valid--;
11508 +
11509 +               HAT_PRINTF3 (2, "elan3mmu_l2inval: dec valid for level %d ptbl %p to %d\n", 
11510 +                            PTBL_LEVEL(l2ptbl->ptbl_flags), l2ptbl, l2ptbl->ptbl_valid);
11511 +           }
11512 +           break;
11513 +           
11514 +       case ELAN3_ET_INVALID:
11515 +           break;
11516 +
11517 +       default:
11518 +           panic ("elan3mmu_l2inval: found pte in level 2 page table");
11519 +           /* NOTREACHED */
11520 +       }
11521 +
11522 +       if (l2ptbl->ptbl_valid == 0)
11523 +           break;
11524 +
11525 +       addr += ELAN3_L2_SIZE;
11526 +    }
11527 +
11528 +    ASSERT (PTBL_IS_LOCKED(l2ptbl->ptbl_flags));
11529 +
11530 +    return (ret);
11531 +}
11532 +
11533 +int 
11534 +elan3mmu_l3inval (ELAN3MMU *elan3mmu, ELAN3_PTBL *l3ptbl, int attr, E3_Addr addr, spinlock_t **pl3lock, unsigned long *flags)
11535 +{
11536 +    int ret;
11537 +
11538 +    HAT_PRINTF3 (2, "elan3mmu_l3inval: l3ptbl %p parent %p addr %x\n", l3ptbl, l3ptbl->ptbl_parent, addr);
11539 +
11540 +    ASSERT (PTBL_IS_LOCKED (l3ptbl->ptbl_parent->ptbl_flags));
11541 +    ASSERT (PTBL_LEVEL (l3ptbl->ptbl_parent->ptbl_flags) == PTBL_LEVEL_2);
11542 +    ASSERT (l3ptbl->ptbl_parent->ptbl_elan3mmu == elan3mmu);
11543 +    ASSERT (l3ptbl->ptbl_parent->ptbl_base == VA2BASE (ELAN3_L2_BASE(addr)));
11544 +    
11545 +    ret = elan3mmu_lock_ptbl (l3ptbl, LK_PTBL_FAILOK, elan3mmu, addr, PTBL_LEVEL_3, pl3lock, flags);
11546 +
11547 +    ASSERT (ret == LK_PTBL_OK);
11548 +    ASSERT (PTBL_LEVEL (l3ptbl->ptbl_flags) == PTBL_LEVEL_3);
11549 +
11550 +    elan3mmu_unload_loop (elan3mmu, l3ptbl, 0, ELAN3_L3_ENTRIES, attr);
11551 +
11552 +    ASSERT (PTBL_IS_LOCKED (l3ptbl->ptbl_flags));
11553 +
11554 +    return (ret);
11555 + }
11556 +
11557 +int
11558 +elan3mmu_lock_this_ptbl (ELAN3_PTBL *ptbl, int flag, spinlock_t **plock, unsigned long *flags)
11559 +{
11560 +    int         level = PTBL_LEVEL (ptbl->ptbl_flags);
11561 +    spinlock_t *lock  = elan3mmu_ptbl_to_lock (level, ptbl);
11562 +
11563 +    local_irq_save (*flags);
11564 +
11565 +    if ((flag & LK_PTBL_NOWAIT) == 0)
11566 +       spin_lock (lock);
11567 +    else if (! spin_trylock (lock)) {
11568 +       local_irq_restore (*flags);
11569 +       return (LK_PTBL_FAILED);
11570 +    }
11571 +    
11572 +    if (level != PTBL_LEVEL (ptbl->ptbl_flags))
11573 +    {
11574 +       spin_unlock (lock);     
11575 +       local_irq_restore (*flags);
11576 +       return (LK_PTBL_MISMATCH);
11577 +    }
11578 +
11579 +    ptbl->ptbl_flags |= PTBL_LOCKED;
11580 +    *plock = lock;
11581 +    return (LK_PTBL_OK);
11582 +}
11583 +
11584 +int
11585 +elan3mmu_lock_ptbl (ELAN3_PTBL *ptbl, u_int flag, ELAN3MMU *elan3mmu, E3_Addr va, int level, spinlock_t **plock, unsigned long *flags)
11586 +{
11587 +    spinlock_t *lock = elan3mmu_ptbl_to_lock (level, ptbl);
11588 +    int         res  = LK_PTBL_MISMATCH;
11589 +
11590 +    local_irq_save (*flags);
11591 +    
11592 +    if ((flag & LK_PTBL_NOWAIT) == 0)
11593 +       spin_lock (lock);
11594 +    else if (spin_trylock (lock) == 0) {
11595 +       local_irq_restore(*flags);
11596 +       return (LK_PTBL_FAILED);
11597 +    }
11598 +    
11599 +    if (PTBL_LEVEL (ptbl->ptbl_flags) != level)
11600 +    {
11601 +       res = LK_PTBL_MISMATCH;
11602 +       goto mismatch;
11603 +    }
11604 +    
11605 +    /* We have the right mutex,  so check that its the ptbl we want. */
11606 +    switch (level)
11607 +    {
11608 +    case PTBL_LEVEL_1: va = ELAN3_L1_BASE(va); break;
11609 +    case PTBL_LEVEL_2: va = ELAN3_L2_BASE(va); break;
11610 +    case PTBL_LEVEL_3: va = ELAN3_L3_BASE(va); break;
11611 +    }
11612 +
11613 +    if (ptbl->ptbl_elan3mmu != elan3mmu || ptbl->ptbl_base != VA2BASE(va))
11614 +    {
11615 +       res = LK_PTBL_MISMATCH;
11616 +       goto mismatch;
11617 +    }
11618 +
11619 +    ASSERT ((ptbl->ptbl_flags & PTBL_LOCKED) == 0);
11620 +    ptbl->ptbl_flags |= PTBL_LOCKED;
11621 +
11622 +    *plock = lock;
11623 +    return (LK_PTBL_OK);
11624 +
11625 +mismatch:
11626 +    if (! (flag & LK_PTBL_FAILOK))
11627 +       panic ("elan3mmu: failed to lock ptbl\n");
11628 +       
11629 +    spin_unlock (lock);
11630 +    local_irq_restore(*flags);
11631 +    return (res);
11632 +}
11633 +
11634 +void
11635 +elan3mmu_unlock_ptbl (ELAN3_PTBL *ptbl, spinlock_t *lock, unsigned long flags)
11636 +{
11637 +    ptbl->ptbl_flags &= ~PTBL_LOCKED;
11638 +    spin_unlock_irqrestore (lock,flags);
11639 +}
11640 +
11641 +static spinlock_t *
11642 +elan3mmu_ptbl_to_lock (int level, ELAN3_PTBL *ptbl)
11643 +{
11644 +    switch (level)
11645 +    {
11646 +    case PTBL_LEVEL_3: return (&l3ptbl_lock[L3PTBL_MTX_HASH(ptbl)]);
11647 +    case PTBL_LEVEL_2: return (&l2ptbl_lock[L2PTBL_MTX_HASH(ptbl)]);
11648 +    case PTBL_LEVEL_1: return (&l1ptbl_lock[L1PTBL_MTX_HASH(ptbl)]);
11649 +    case PTBL_LEVEL_X: 
11650 +       panic ("elan3mmu: ptbl_to_lock, bad level X");
11651 +    default:
11652 +       panic ("elan3mmu: ptbl_to_lock, bad level");
11653 +       /* NOTREACHED */
11654 +    }
11655 +    return (NULL);
11656 +}
11657 +
11658 +void
11659 +elan3mmu_display (ELAN3MMU *elan3mmu, E3_Addr addr)
11660 +{
11661 +    ELAN3_DEV   *dev = elan3mmu->elan3mmu_dev;
11662 +    ELAN3_PTBL  *l1ptbl;
11663 +    sdramaddr_t l1ptp;
11664 +    spinlock_t *l1lock;
11665 +    ELAN3_PTE    tl1pte;
11666 +    ELAN3_PTP    tl1ptp;
11667 +    E3_Addr     l1base;
11668 +    ELAN3_PTBL  *l2ptbl;
11669 +    sdramaddr_t l2ptp;
11670 +    ELAN3_PTE    tl2pte;
11671 +    spinlock_t *l2lock;
11672 +    ELAN3_PTP    tl2ptp;
11673 +    E3_Addr     l2base;
11674 +    ELAN3_PTBL  *l3ptbl;
11675 +    sdramaddr_t l3pte;
11676 +    ELAN3_PTE    tl3pte;
11677 +    spinlock_t *l3lock;
11678 +    ELAN3_PTBL  *lXptbl;
11679 +    int         idx;
11680 +    unsigned long flags;
11681 +
11682 +    elan3_debugf (NULL, DBG_HAT, "elan3mmu_display: elan3mmu %p addr %x\n", elan3mmu, addr);
11683 +
11684 +    l1ptbl = elan3mmu->elan3mmu_l1ptbl;
11685 +
11686 +    if (l1ptbl == NULL)
11687 +       return;
11688 +
11689 +    l1ptp  = PTBL_TO_PTADDR(l1ptbl) + ELAN3_L1_INDEX(addr)*ELAN3_PTP_SIZE;
11690 +    l1base = ELAN3_L1_BASE(addr);
11691 +    
11692 +    tl1ptp = elan3_readptp (dev, l1ptp);
11693 +    elan3_debugf (NULL, DBG_HAT, "elan3mmu_display: l1ptbl %p l1ptp %lx l1base %x : tl1ptp %x\n", l1ptbl, l1ptp, l1base, tl1ptp);
11694 +    
11695 +    switch (ELAN3_PTP_TYPE(tl1ptp))
11696 +    {
11697 +    case ELAN3_ET_PTE:
11698 +       elan3_debugf (NULL, DBG_HAT, "elan3mmu_display: level 1 page table for pte %x\n", tl1ptp);
11699 +    
11700 +       lXptbl = elan3mmu_ta_to_ptbl (elan3mmu, &tl1ptp);
11701 +       idx    = (PTP_TO_PT_PADDR(tl1ptp) - PTBL_TO_PTADDR(lXptbl))/ELAN3_PTE_SIZE;  
11702 +       
11703 +       elan3_debugf (NULL, DBG_HAT, "elan3mmu_display: lXptbl %p idx %d\n",lXptbl, idx);
11704 +
11705 +       tl1pte = elan3_readpte (dev,(PTBL_TO_PTADDR (lXptbl) + idx * ELAN3_PTE_SIZE));
11706 +
11707 +       switch (elan3mmu_lock_ptbl (l1ptbl, LK_PTBL_FAILOK, elan3mmu, addr, PTBL_LEVEL_1, &l1lock, &flags))
11708 +       {
11709 +       case LK_PTBL_OK:
11710 +           elan3mmu_unlock_ptbl (l1ptbl, l1lock, flags);
11711 +           elan3_debugf (NULL, DBG_HAT, "elan3mmu_display: lvl 1 l1pte matches value %llx\n", (long long) tl1pte);
11712 +           break;
11713 +           
11714 +       case LK_PTBL_FAILED:
11715 +           panic ("elan3mmu_display: l1 lock failed");
11716 +           /* NOTREACHED */
11717 +           
11718 +       case LK_PTBL_MISMATCH:
11719 +           elan3_debugf (NULL, DBG_HAT, "elan3mmu_display: PTBL_MISMATCH : lvl 1 ptbl %p flags %x elan3mmu %p base %x (%p %x) %llx\n",
11720 +                         l1ptbl, l1ptbl->ptbl_flags, l1ptbl->ptbl_elan3mmu, l1ptbl->ptbl_base, elan3mmu, addr, (long long)tl1pte);
11721 +           
11722 +           break;
11723 +       default:
11724 +           panic ("elan3mmu_display: lvl 1 elan3mmu_lock_ptbl returned bad value");
11725 +           /* NOTREACHED */
11726 +       }
11727 +       return;
11728 +       
11729 +    case ELAN3_ET_INVALID:
11730 +       return;
11731 +       
11732 +    case ELAN3_ET_PTP:
11733 +       break;
11734 +       
11735 +    default:
11736 +       panic ("elan3mmu_display: found bad entry in level 1 page table");
11737 +       /* NOTREACHED */
11738 +    }
11739 +    
11740 +    elan3_debugf (NULL, DBG_HAT, "elan3mmu_display: chain to level 2 ptbl from ptp %x\n", tl1ptp);
11741 +    
11742 +    l2ptbl = elan3mmu_ta_to_ptbl (elan3mmu, &tl1ptp);
11743 +    l2ptp  = PTBL_TO_PTADDR(l2ptbl) + ELAN3_L2_INDEX(addr)*ELAN3_PTP_SIZE;
11744 +    l2base = ELAN3_L2_BASE(addr);
11745 +    
11746 +    tl2ptp = elan3_readptp (dev, l2ptp);
11747 +    elan3_debugf (NULL, DBG_HAT, "elan3mmu_display: l2ptbl %p l2ptp %lx l2base %x : tl2ptp %x\n",
11748 +                l2ptbl, l2ptp, l2base, tl2ptp);
11749 +    
11750 +    switch (ELAN3_PTP_TYPE(tl2ptp))
11751 +    {
11752 +    case ELAN3_ET_PTE:
11753 +       elan3_debugf (NULL, DBG_HAT, "elan3mmu_display: level 2 page table for pte %x\n", tl2ptp);
11754 +    
11755 +       lXptbl = elan3mmu_ta_to_ptbl (elan3mmu, &tl2ptp);
11756 +       idx    = (PTP_TO_PT_PADDR(tl2ptp) - PTBL_TO_PTADDR(lXptbl))/ELAN3_PTE_SIZE;  
11757 +       
11758 +       elan3_debugf (NULL, DBG_HAT, "elan3mmu_display: lXptbl %p idx %d\n",lXptbl, idx);
11759 +
11760 +       tl2pte = elan3_readpte (dev,(PTBL_TO_PTADDR (lXptbl) + idx * ELAN3_PTE_SIZE));
11761 +
11762 +       switch (elan3mmu_lock_ptbl (l2ptbl, LK_PTBL_FAILOK, elan3mmu, addr, PTBL_LEVEL_2, &l2lock, &flags))
11763 +       {
11764 +       case LK_PTBL_OK:
11765 +           elan3mmu_unlock_ptbl (l2ptbl, l2lock, flags);
11766 +           elan3_debugf (NULL, DBG_HAT, "elan3mmu_display: lvl 2 l1pte matches value %llx\n", (long long)tl2pte);
11767 +           break;
11768 +           
11769 +       case LK_PTBL_FAILED:
11770 +           panic ("elan3mmu_display: l2 lock failed");
11771 +           /* NOTREACHED */
11772 +           
11773 +       case LK_PTBL_MISMATCH:
11774 +           elan3_debugf (NULL, DBG_HAT, "elan3mmu_display: PTBL_MISMATCH : lvl 2 ptbl %p flags %x elan3mmu %p base %x (%p %x) %llx\n",
11775 +                         l2ptbl, l2ptbl->ptbl_flags, l2ptbl->ptbl_elan3mmu, l2ptbl->ptbl_base, elan3mmu, addr, (long long) tl2pte);
11776 +           
11777 +           break;
11778 +       default:
11779 +           panic ("elan3mmu_display: lvl 2 elan3mmu_lock_ptbl returned bad value");
11780 +           /* NOTREACHED */
11781 +       }
11782 +       return;
11783 +       
11784 +    case ELAN3_ET_INVALID:
11785 +       return;
11786 +       
11787 +    case ELAN3_ET_PTP:
11788 +       break;
11789 +
11790 +    default:
11791 +       panic ("elan3mmu_display: found bad entry in level 2 page table");
11792 +       /* NOTREACHED */
11793 +    }
11794 +    
11795 +    elan3_debugf (NULL, DBG_HAT, "elan3mmu_display: chain to level 3 page table from ptp %x\n", tl2ptp);
11796 +    
11797 +    l3ptbl = elan3mmu_ta_to_ptbl (elan3mmu, &tl2ptp);
11798 +    l3pte  = PTBL_TO_PTADDR(l3ptbl) + ELAN3_L3_INDEX(addr)*ELAN3_PTE_SIZE;
11799 +    
11800 +    elan3_debugf (NULL, DBG_HAT, "elan3mmu_display: l3ptbl %p l3pte %lx\n",l3ptbl, l3pte);
11801 +    
11802 +    tl3pte = elan3_readpte (dev, l3pte);
11803 +    switch (elan3mmu_lock_ptbl (l3ptbl, LK_PTBL_FAILOK, elan3mmu, addr, PTBL_LEVEL_3, &l3lock, &flags))
11804 +    {
11805 +    case LK_PTBL_OK:
11806 +       elan3mmu_unlock_ptbl (l3ptbl, l3lock, flags);
11807 +       elan3_debugf (NULL, DBG_HAT, "elan3mmu_display: l3pte matches value %llx\n", (long long) tl3pte);
11808 +       break;
11809 +       
11810 +    case LK_PTBL_FAILED:
11811 +       panic ("elan3mmu_display: l3 lock failed");
11812 +       /* NOTREACHED */
11813 +       
11814 +    case LK_PTBL_MISMATCH:
11815 +       elan3_debugf (NULL, DBG_HAT, "elan3mmu_display: PTBL_MISMATCH : ptbl %p flags %x elan3mmu %p base %x (%p %x) %llx\n",
11816 +                    l3ptbl, l3ptbl->ptbl_flags, l3ptbl->ptbl_elan3mmu, l3ptbl->ptbl_base, elan3mmu, addr, (long long) tl3pte);
11817 +       
11818 +       break;
11819 +       
11820 +    default:
11821 +       panic ("elan3mmu_display: elan3mmu_lock_ptbl returned bad value");
11822 +       /* NOTREACHED */
11823 +    }
11824 +}
11825 +
11826 +
11827 +/*
11828 + * Local variables:
11829 + * c-file-style: "stroustrup"
11830 + * End:
11831 + */
11832 Index: linux-2.4.21/drivers/net/qsnet/elan3/elan3mmu_linux.c
11833 ===================================================================
11834 --- linux-2.4.21.orig/drivers/net/qsnet/elan3/elan3mmu_linux.c  2004-02-23 16:02:56.000000000 -0500
11835 +++ linux-2.4.21/drivers/net/qsnet/elan3/elan3mmu_linux.c       2005-06-01 23:12:54.574442904 -0400
11836 @@ -0,0 +1,284 @@
11837 +/*
11838 + *    Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd.
11839 + *
11840 + *    For licensing information please see the supplied COPYING file
11841 + *
11842 + */
11843 +
11844 +#ident "@(#)$Id: elan3mmu_linux.c,v 1.50.2.3 2004/12/14 10:19:51 mike Exp $"
11845 +/*      $Source: /cvs/master/quadrics/elan3mod/elan3/vm/elan3mmu_linux.c,v $*/
11846 +
11847 +#include <qsnet/kernel.h>
11848 +#include <qsnet/kpte.h>
11849 +
11850 +#include <elan3/elanregs.h>
11851 +#include <elan3/elandev.h>
11852 +#include <elan3/elanvp.h>
11853 +#include <elan3/elan3mmu.h>
11854 +#include <elan3/elanctxt.h>
11855 +#include <elan3/elandebug.h>
11856 +#include <elan3/urom_addrs.h>
11857 +#include <elan3/thread.h>
11858 +
11859 +/*
11860 + * Strategy for syncing main <-> elan pte's:
11861 + * 
11862 + *   Install callbacks for linux flush_tlb_page(), flush_tlb_range(),
11863 + *   flush_tlb_all(), and flush_tlb_mm() so when a main PTE changes,
11864 + *   the elan translations, if any, are invalidated.  They can then be
11865 + *   faulted in again with the correct physical page, perms, etc., on demand. 
11866 + * 
11867 + *   Callbacks are stacked on the mm_struct, one per context.  We also stack
11868 + *   a ctxt pointer so we don't have to do lookups on every call.
11869 + *
11870 + *   Sanity check -- we clearly want to flush the elan PTEs in these 
11871 + *   situations, all of which are covered by tlb_flush_{page,range}()
11872 + *
11873 + *     1) kernel/vmscan.c::try_to_swap_out() swaps out a page
11874 + *
11875 + *     2) kernel/mremap.c::copy_one_pte() moves a page as a result of the 
11876 + *     mremap system call
11877 + * 
11878 + *     3) kernel/mprotect.c::change_pte_range() changes the permissions of a 
11879 + *     page as the result of the mprotect system call
11880 + *
11881 + * Other Notes: 
11882 + * 
11883 + *   Dirty a page in the mains page tables when it is faulted into the elan.
11884 + *   This way it will not be thrown away by the swapper.
11885 + * 
11886 + *   Pages write protected for COW are copied by elan3mmu_main_pagefault()
11887 + *   when a writeable translation is loaded into the elan.
11888 + */
11889 +
11890 +caddr_t             elan3mmu_kernel_invalid_space;
11891 +ELAN3_PTE     elan3mmu_kernel_invalid_pte_val;
11892 +
11893 +void
11894 +elan3mmu_init_osdep (void)
11895 +{
11896 +    pte_t *pte;
11897 +
11898 +    KMEM_GETPAGES (elan3mmu_kernel_invalid_space, caddr_t, 1, TRUE);
11899 +
11900 +    ASSERT(elan3mmu_kernel_invalid_space != NULL);
11901 +
11902 +    pte = find_pte_kernel ((unsigned long) elan3mmu_kernel_invalid_space);
11903 +
11904 +    elan3mmu_kernel_invalid_pte_val = ELAN3_PTE_64_BIT | (pte_phys(*pte) & ELAN3_PTE_PFN_MASK) | ELAN3_PERM_REMOTEREAD | ELAN3_ET_PTE;
11905 +
11906 +#ifdef __alpha
11907 +    /*
11908 +     * NOTE: Elan sign-extends bit 48 of the physical address, so if we need to
11909 +     *       set any of bits 63:48, then we will set them all by setting bit 48/
11910 +     */
11911 +    if (alpha_mv.pci_dac_offset & 0xFFFF000000000000ull)
11912 +        elan3mmu_kernel_invalid_pte_val |= (1ull << 48);
11913 +    else
11914 +       elan3mmu_kernel_invalid_pte_val |= alpha_mv.pci_dac_offset;
11915 +#endif
11916 +
11917 +    HAT_PRINTF(0x10, "elan3mmu_invalid_space at %p phys=%llx pte=%llx\n", elan3mmu_kernel_invalid_space, 
11918 +              (unsigned long long) pte_phys(*pte), (unsigned long long) elan3mmu_kernel_invalid_pte_val);
11919 +}
11920 +
11921 +void
11922 +elan3mmu_fini_osdep()
11923 +{
11924 +    KMEM_FREEPAGES (elan3mmu_kernel_invalid_space, 1);
11925 +}
11926 +
11927 +void
11928 +elan3mmu_alloc_osdep (ELAN3MMU *elan3mmu)
11929 +{
11930 +    elan3mmu->elan3mmu_coproc_mm = current->mm;
11931 +}
11932 +
11933 +/*
11934 + * Convert physical page frame number to elan pte.
11935 + */
11936 +ELAN3_PTE
11937 +elan3mmu_phys_to_pte (ELAN3_DEV *dev, physaddr_t paddr, int perm)
11938 +{
11939 +    ELAN3_PTE newpte;
11940 +    
11941 +    ASSERT (paddr != 0);
11942 +    
11943 +    if ((paddr & dev->SdramPhysMask) == dev->SdramPhysBase)            /* SDRAM, turn on PTE_LOCAL bit */
11944 +    {
11945 +       PRINTF(NULL, DBG_HAT, "elan3mmu_phys_to_pte: phys %llx SDRAM\n", (unsigned long long) paddr);
11946 +       
11947 +       newpte = ELAN3_PTE_LOCAL | (paddr & ELAN3_PTE_PFN_MASK & ~dev->SdramPhysMask) | perm | ELAN3_ET_PTE;
11948 +    }
11949 +#if defined(LINUX_ALPHA)
11950 +    else if ((paddr & dev->PciPhysMask) == dev->PciPhysBase)
11951 +    {
11952 +       PRINTF(NULL, DBG_HAT, "elan3mmu_phys_to_pte: phys %llx PCI\n", (unsigned long long) paddr);
11953 +       newpte = ELAN3_PTE_64_BIT | (paddr & ELAN3_PTE_PFN_MASK & ~dev->PciPhysMask) | perm | ELAN3_ET_PTE;
11954 +    }
11955 +#endif
11956 +    else                                               /* main memory, must convert to PCI view */
11957 +    {
11958 +       PRINTF(NULL, DBG_HAT, "elan3mmu_phys_to_pte: phys %llx is main memory\n", (unsigned long long) paddr);
11959 +
11960 +       /* main memory, just set the architecture specific PTE_BYPASS bit */
11961 +       /* This requires the Tsunami chipset being programmed to support
11962 +        * the monster window option. This is in linux-2.4.5 and later kernels 
11963 +        * and is also patched into the RH 7.1/2.4.3-12 Alpha kernel
11964 +        */
11965 +       newpte = ELAN3_PTE_64_BIT | (paddr & ELAN3_PTE_PFN_MASK) | perm | ELAN3_ET_PTE;
11966 +
11967 +#ifdef __alpha
11968 +       /*
11969 +        * NOTE: Elan sign-extends bit 48 of the physical address, so if we need to
11970 +        *       set any of bits 63:48, then we will set them all by setting bit 48/
11971 +        */
11972 +       if (alpha_mv.pci_dac_offset & 0xFFFF000000000000ull)
11973 +            newpte |= (1ull << 48);
11974 +        else
11975 +           newpte |= alpha_mv.pci_dac_offset;
11976 +#endif
11977 +    }
11978 +
11979 +    if ( ELAN3_PERM_WRITEABLE( perm )) 
11980 +       newpte |= ( ELAN3_PTE_MOD | ELAN3_PTE_REF );
11981 +    else
11982 +       newpte |= ( ELAN3_PTE_REF ) ;
11983 +
11984 +    return (newpte);
11985 +}
11986 +
11987 +ELAN3_PTE
11988 +elan3mmu_kernel_invalid_pte (ELAN3MMU *elan3mmu)
11989 +{
11990 +    if (elan3mmu->elan3mmu_dev->Devinfo.dev_revision_id == PCI_REVISION_ID_ELAN3_REVB)
11991 +       return (elan3mmu_kernel_invalid_pte_val);
11992 +    return (ELAN3_INVALID_PTE);
11993 +}
11994 +
11995 +/* 
11996 + * Invalidate a range of addresses for specified context.
11997 + */
11998 +void
11999 +elan3mmu_pte_range_unload (ELAN3MMU *elan3mmu, struct mm_struct *mm, caddr_t addr, unsigned long len)
12000 +{
12001 +    E3_Addr       eaddr;
12002 +    ELAN3MMU_RGN *rgn;
12003 +    unsigned long span;
12004 +
12005 +    spin_lock (&elan3mmu->elan3mmu_lock);
12006 +
12007 +    for (; len; len -= span, addr += span)
12008 +    {
12009 +       rgn = elan3mmu_findrgn_main (elan3mmu, addr, 0);
12010 +
12011 +       if (rgn == NULL || (rgn->rgn_mbase + rgn->rgn_len) < addr)
12012 +           span = len;
12013 +       else if (rgn->rgn_mbase > addr)
12014 +           span = MIN(len, rgn->rgn_mbase - addr);
12015 +       else
12016 +       {
12017 +           span  = MIN(len, (rgn->rgn_mbase + rgn->rgn_len) - addr);
12018 +           eaddr = rgn->rgn_ebase + (addr - rgn->rgn_mbase);
12019 +
12020 +            HAT_PRINTF(0x10, "  unloading eaddr %x main %p (%ld pages)\n", 
12021 +             eaddr, addr, btopr(span));
12022 +           elan3mmu_unload (elan3mmu, eaddr, span, PTE_UNLOAD);
12023 +       }                       /* takes care of elan tlb flush also */
12024 +    }
12025 +
12026 +    spin_unlock (&elan3mmu->elan3mmu_lock);
12027 +}
12028 +
12029 +/*
12030 + *
12031 + */
12032 +void
12033 +elan3mmu_update_range (ELAN3MMU *elan3mmu, struct mm_struct *mm, caddr_t vaddr, E3_Addr eaddr, u_int len, u_int perm)
12034 +{
12035 +    u_int roperm = ELAN3_PERM_READONLY(perm & ELAN3_PTE_PERM_MASK) | (perm & ~ELAN3_PTE_PERM_MASK);
12036 +    u_int off;
12037 +
12038 +    HAT_PRINTF3(1, "elan3mmu_update_range (elan3mmu %p addr %p -> %p)\n", elan3mmu, vaddr, vaddr+len-1);
12039 +
12040 +    while (len > 0)
12041 +    {
12042 +       pte_t *pte_ptr;
12043 +       pte_t  pte_value;
12044 +
12045 +       pte_ptr = find_pte_map(mm, (unsigned long)vaddr);
12046 +       if (pte_ptr) {
12047 +           pte_value = *pte_ptr;
12048 +           pte_unmap(pte_ptr);
12049 +       }
12050 +
12051 +       HAT_PRINTF(0x10, "  elan3mmu_update_range %x (%p) %s\n", eaddr, vaddr, 
12052 +               !pte_ptr ? "invalid" : pte_none(pte_value) ? "none " : !pte_present(pte_value) ? "swapped " : 
12053 +               !pte_write(pte_value) ? "RO/COW" : "OK");
12054 +       
12055 +       if (pte_ptr && !pte_none(pte_value) && pte_present(pte_value))
12056 +           for (off = 0; off < PAGE_SIZE; off += ELAN3_PAGE_SIZE)
12057 +               elan3mmu_pteload (elan3mmu, PTBL_LEVEL_3, eaddr + off, pte_phys(pte_value) + off, pte_write(pte_value) ? perm : roperm, PTE_LOAD|PTE_NO_SLEEP|PTE_NO_STEAL);
12058 +       vaddr += PAGESIZE;
12059 +       eaddr += PAGESIZE;
12060 +       len   -= PAGESIZE;
12061 +    }
12062 +}
12063 +
12064 +/* 
12065 + * Update a range of addresses for specified context.
12066 + */
12067 +void
12068 +elan3mmu_pte_range_update (ELAN3MMU *elan3mmu, struct mm_struct *mm,caddr_t vaddr, unsigned long len)
12069 +{
12070 +    E3_Addr       eaddr;
12071 +    ELAN3MMU_RGN *rgn;
12072 +    unsigned long span;
12073 +
12074 +    spin_lock (&elan3mmu->elan3mmu_lock);
12075 +
12076 +    for (; len; len -= span, vaddr += span)
12077 +    {
12078 +       rgn = elan3mmu_findrgn_main (elan3mmu, vaddr, 0);
12079 +
12080 +       if (rgn == NULL || (rgn->rgn_mbase + rgn->rgn_len) < vaddr)
12081 +           span = len;
12082 +       else if (rgn->rgn_mbase > vaddr)
12083 +           span = MIN(len, rgn->rgn_mbase - vaddr);
12084 +       else
12085 +       {
12086 +           span  = MIN(len, (rgn->rgn_mbase + rgn->rgn_len) - vaddr);
12087 +           eaddr = rgn->rgn_ebase + (vaddr - rgn->rgn_mbase);
12088 +
12089 +            HAT_PRINTF(0x10, "  updating eaddr %u main %p (%ld pages)\n", 
12090 +             eaddr, vaddr, btopr(span));
12091 +           
12092 +           elan3mmu_update_range(elan3mmu, mm, vaddr, eaddr, span, rgn->rgn_perm);
12093 +       }                       
12094 +    }
12095 +
12096 +    spin_unlock (&elan3mmu->elan3mmu_lock);
12097 +}
12098 +
12099 +/* 
12100 + * Invalidate all ptes for the given context.
12101 + */
12102 +void
12103 +elan3mmu_pte_ctxt_unload(ELAN3MMU *elan3mmu)
12104 +{
12105 +    ELAN3_PTBL  *l1ptbl   = (elan3mmu ? elan3mmu->elan3mmu_l1ptbl : NULL);
12106 +    spinlock_t *l1mtx;
12107 +    unsigned long flags;
12108 +
12109 +    if (l1ptbl && elan3mmu_lock_ptbl (l1ptbl, LK_PTBL_FAILOK, elan3mmu, (E3_Addr) 0, 1, &l1mtx, &flags) == LK_PTBL_OK)
12110 +    {
12111 +       elan3mmu_l1inval(elan3mmu, elan3mmu->elan3mmu_l1ptbl, 0);
12112 +       elan3mmu_unlock_ptbl (l1ptbl, l1mtx, flags);
12113 +    }
12114 +}
12115 +
12116 +/*
12117 + * Local variables:
12118 + * c-file-style: "stroustrup"
12119 + * End:
12120 + */
12121 Index: linux-2.4.21/drivers/net/qsnet/elan3/elan3ops.c
12122 ===================================================================
12123 --- linux-2.4.21.orig/drivers/net/qsnet/elan3/elan3ops.c        2004-02-23 16:02:56.000000000 -0500
12124 +++ linux-2.4.21/drivers/net/qsnet/elan3/elan3ops.c     2005-06-01 23:12:54.575442752 -0400
12125 @@ -0,0 +1,170 @@
12126 +/*
12127 + *    Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd.
12128 + *
12129 + *    For licensing information please see the supplied COPYING file
12130 + *
12131 + */
12132 +
12133 +#ident "@(#)$Id: elan3ops.c,v 1.4 2003/09/24 13:57:25 david Exp $"
12134 +/*      $Source: /cvs/master/quadrics/elan3mod/elan3/os/elan3ops.c,v $*/
12135 +
12136 +#include <qsnet/kernel.h>
12137 +#include <elan/elanmod.h>
12138 +
12139 +#include <elan3/elanregs.h>
12140 +#include <elan3/elandev.h>
12141 +#include <elan3/elan3ops.h>
12142 +
12143 +extern ELAN_STATS_OPS elan3_device_stats_ops;
12144 +
12145 +ELAN_DEV_OPS elan3_dev_ops = {
12146 +
12147 +       get_position,
12148 +       set_position,   
12149 +
12150 +       ELAN_DEV_OPS_VERSION
12151 +};
12152 +
12153 +ELAN_STATS_OPS elan3_device_stats_ops = {
12154 +       ELAN_STATS_OPS_VERSION,
12155 +
12156 +       stats_get_index_name,
12157 +       stats_get_block,
12158 +       stats_clear_block
12159 +};
12160 +
12161 +static char *elan3_device_stats_names[ELAN3_NUM_STATS] = 
12162 +{
12163 +               "version field",                 /* not cleared */
12164 +               "elan interrupts",
12165 +               "tlb flushes",
12166 +               "traps with invalid context",
12167 +               "interrupts com queue half full",
12168 +               "cproc traps",
12169 +               "dproc traps",
12170 +               "tproc traps",
12171 +               "iproc traps",
12172 +               "event interrupts",
12173 +               "elan page faults",
12174 +               "EopBadAcks",
12175 +               "EopResets",
12176 +               "InputterBadLength",
12177 +               "InputterCRCDiscards",
12178 +               "InputterCRCErrors",
12179 +               "InputterCRCBad",       
12180 +               "errors in dma data",
12181 +               "errors after dma identify",
12182 +               "errors after thread identify",
12183 +               "dma retries",
12184 +               "dma output timeouts",
12185 +               "dma packet ack errors",
12186 +               "forced tproc traps",
12187 +               "too many instruction traps",
12188 +               "output timeouts",
12189 +               "packet ack errors",
12190 +               "LockError",
12191 +               "DeskewError",
12192 +               "PhaseError",
12193 +               "DataError",
12194 +               "FifoOvFlow0",
12195 +               "FifoOvFlow1",
12196 +               "link error value on data error",
12197 +               "correctable ecc errors",
12198 +               "uncorrectable ecc errors",
12199 +               "multiple ecc errors",
12200 +               "sdram bytes free",              /* not cleared */
12201 +               "longest interrupt in ticks",
12202 +               "punts of event int's to thread",
12203 +               "reschedules of event int's thread"
12204 +};
12205 +
12206 +int 
12207 +stats_get_index_name (void *arg, uint  index, caddr_t name)
12208 +{
12209 +       copyout (elan3_device_stats_names[index], name, strlen (elan3_device_stats_names[index]) + 1  /* with \0 */);
12210 +
12211 +       return (0);
12212 +}
12213 +
12214 +int
12215 +stats_get_block (void *arg, uint entries, ulong *value)
12216 +{
12217 +       ELAN3_DEV *dev = (ELAN3_DEV *) arg;
12218 +
12219 +       if ( entries >  ELAN3_NUM_STATS ) /* if space too big only send valid portion */
12220 +               entries = ELAN3_NUM_STATS;
12221 +       
12222 +       copyout(&dev->Stats, value, sizeof(ulong) * entries);
12223 +
12224 +       return (0);
12225 +}
12226 +
12227 +int 
12228 +stats_clear_block (void *arg)
12229 +{
12230 +       ELAN3_DEV *dev = (ELAN3_DEV *) arg;
12231 +       u_long   *ptr = (u_long *) &dev->Stats;
12232 +       int                n;
12233 +       
12234 +       for (n = 0; n < ELAN3_NUM_STATS; n++)
12235 +       {
12236 +               switch (n) 
12237 +               {
12238 +               case offsetof (ELAN3_STATS, Version)/sizeof(u_long):
12239 +               case offsetof (ELAN3_STATS, SdramBytesFree)/sizeof(u_long):
12240 +                       break;
12241 +               default:
12242 +                       ptr[n] = (ulong)0;
12243 +               }
12244 +       }
12245 +       return (0);
12246 +}
12247 +
12248 +int 
12249 +get_position (void *user_data, ELAN_POSITION *position)
12250 +{
12251 +       ELAN3_DEV *dev = (ELAN3_DEV *)user_data;
12252 +
12253 +       copyout(&dev->Position, position, sizeof(ELAN_POSITION));
12254 +
12255 +       return (0);     
12256 +}
12257 +
12258 +int 
12259 +set_position (void *user_data, unsigned short nodeId, unsigned short numNodes)
12260 +{
12261 +       ELAN3_DEV *dev = (ELAN3_DEV *)user_data;
12262 +
12263 +       if (ComputePosition (&dev->Position, nodeId, numNodes, dev->Devinfo.dev_num_down_links_value) != 0)
12264 +               return (EINVAL);
12265 +       
12266 +       return (0);     
12267 +}
12268 +
12269 +int
12270 +elan3_register_dev_stats(ELAN3_DEV * dev) 
12271 +{
12272 +       char name[ELAN_STATS_NAME_MAX_LEN+1];
12273 +
12274 +       sprintf (name, ELAN3_STATS_DEV_FMT, dev->Instance);
12275 +
12276 +       elan_stats_register(&dev->StatsIndex,
12277 +                              name,
12278 +                              sizeof (elan3_device_stats_names)/sizeof (elan3_device_stats_names[0]),
12279 +                              &elan3_device_stats_ops,
12280 +                              (void *)dev);
12281 +
12282 +       return (0);
12283 +}
12284 +
12285 +void
12286 +elan3_deregister_dev_stats(ELAN3_DEV * dev) 
12287 +{
12288 +       elan_stats_deregister(dev->StatsIndex);
12289 +}
12290 +
12291 +/*
12292 + * Local variables:
12293 + * c-file-style: "linux"
12294 + * End:
12295 + */
12296 Index: linux-2.4.21/drivers/net/qsnet/elan3/elandebug.c
12297 ===================================================================
12298 --- linux-2.4.21.orig/drivers/net/qsnet/elan3/elandebug.c       2004-02-23 16:02:56.000000000 -0500
12299 +++ linux-2.4.21/drivers/net/qsnet/elan3/elandebug.c    2005-06-01 23:12:54.575442752 -0400
12300 @@ -0,0 +1,151 @@
12301 +/*
12302 + *    Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd.
12303 + * 
12304 + *    For licensing information please see the supplied COPYING file
12305 + *
12306 + */
12307 +
12308 +#ident "@(#)$Id: elandebug.c,v 1.25 2003/09/24 13:57:25 david Exp $"
12309 +/*      $Source: /cvs/master/quadrics/elan3mod/elan3/os/elandebug.c,v $*/
12310 +
12311 +#include <qsnet/kernel.h>
12312 +#include <elan3/elanregs.h>
12313 +#include <elan3/elandev.h>
12314 +#include <elan3/elanvp.h>
12315 +#include <elan3/elan3mmu.h>
12316 +#include <elan3/elanctxt.h>
12317 +#include <elan3/elandebug.h>
12318 +
12319 +
12320 +void
12321 +elan3_debugf (void *p, unsigned int mode, char *fmt,...)
12322 +{
12323 +    char prefix[128];
12324 +
12325 +#if defined (DIGITAL_UNIX)
12326 +#define PREFIX_FMT     "[%lx.%08x]"
12327 +#define PREFIX_VAL     (int)CURTHREAD()
12328 +#else
12329 +#define PREFIX_FMT     "[%lx.%04d]"
12330 +#define PREFIX_VAL     (current->pid)
12331 +#endif
12332 +
12333 +    if ((unsigned long) p > DBG_NTYPES)
12334 +    {
12335 +       ELAN3_CTXT *ctxt = (ELAN3_CTXT *) p;
12336 +
12337 +        if (elan3_debug_display_ctxt && (ctxt->Capability.cap_mycontext & MAX_ROOT_CONTEXT_MASK) != elan3_debug_display_ctxt)
12338 +            return;
12339 +        if (elan3_debug_ignore_ctxt  && (ctxt->Capability.cap_mycontext & MAX_ROOT_CONTEXT_MASK) == elan3_debug_ignore_ctxt)
12340 +            return;
12341
12342 +       if (ctxt->Capability.cap_mycontext == ELAN_CAP_UNINITIALISED)
12343 +           sprintf (prefix, PREFIX_FMT " (XXX) ", lbolt, PREFIX_VAL);
12344 +       else
12345 +           sprintf (prefix, PREFIX_FMT " (%03x) ", lbolt, PREFIX_VAL,
12346 +                    ctxt->Capability.cap_mycontext & MAX_ROOT_CONTEXT_MASK);
12347 +    }
12348 +    else
12349 +    {
12350 +       char *what;
12351 +
12352 +       if (elan3_debug_ignore_dev & (1 << ((unsigned long) p)))
12353 +           return;
12354 +
12355 +       switch ((unsigned long) p)
12356 +       {
12357 +       case (int) DBG_DEVICE: what = "dev"; break;
12358 +       case (int) DBG_KCOMM:  what = "kcm"; break;
12359 +       case (int) DBG_ICS:    what = "ics"; break;
12360 +       case (int) DBG_USER:   what = "usr"; break;
12361 +       default:               what = NULL; break;
12362 +       }
12363 +           
12364 +       if (what)
12365 +           sprintf (prefix, PREFIX_FMT " [%s] ", lbolt,  PREFIX_VAL, what);
12366 +       else
12367 +           sprintf (prefix, PREFIX_FMT " [%3d] ", lbolt,  PREFIX_VAL, (int)(long)what);
12368 +    }
12369 +
12370 +    {
12371 +       va_list       ap;
12372 +
12373 +       va_start (ap, fmt);
12374 +       qsnet_vdebugf ((((mode & elan3_debug_buffer)?QSNET_DEBUG_BUFFER:0)|((mode & elan3_debug_console)?QSNET_DEBUG_CONSOLE:0)) , prefix, fmt, ap);
12375 +       va_end (ap);
12376 +    }
12377 +}
12378 +
12379 +
12380 +void
12381 +elan3_alloc_panicstate (ELAN3_DEV *dev, int allocsdram)
12382 +{
12383 +    register int bank;
12384 +
12385 +    if (dev->PanicState.RegPtr == NULL)
12386 +       KMEM_ZALLOC (dev->PanicState.RegPtr, E3_Regs *, sizeof (E3_Regs), 1);
12387 +
12388 +    if (allocsdram)
12389 +       for (bank = 0; bank < ELAN3_SDRAM_NUM_BANKS; bank++)
12390 +           if (dev->PanicState.Sdram[bank] == NULL && dev->SdramBanks[bank].Size)
12391 +               KMEM_ZALLOC (dev->PanicState.Sdram[bank], char *, dev->SdramBanks[bank].Size, 1);
12392 +}
12393 +
12394 +void
12395 +elan3_free_panicstate (ELAN3_DEV *dev)
12396 +{
12397 +    register int bank;
12398 +
12399 +    if (dev->PanicState.RegPtr != NULL)
12400 +       KMEM_FREE (dev->PanicState.RegPtr, sizeof (E3_Regs));
12401 +
12402 +    for (bank = 0; bank < ELAN3_SDRAM_NUM_BANKS; bank++)
12403 +       if (dev->PanicState.Sdram[bank] != NULL && dev->SdramBanks[bank].Size)
12404 +           KMEM_FREE (dev->PanicState.Sdram[bank], dev->SdramBanks[bank].Size);
12405 +
12406 +    bzero (&dev->PanicState, sizeof (dev->PanicState));
12407 +}
12408 +
12409 +void
12410 +elan3_save_panicstate (ELAN3_DEV *dev)
12411 +{
12412 +    register int bank;
12413 +    
12414 +    if (dev->PanicState.RegPtr)
12415 +    {
12416 +       printk ("elan%d: saving state on panic .....\n", dev->Devinfo.dev_instance);
12417 +
12418 +       bcopy ((void *) dev->RegPtr, (void *) dev->PanicState.RegPtr, sizeof (E3_Regs));
12419 +       
12420 +       for (bank = 0; bank < ELAN3_SDRAM_NUM_BANKS; bank++)
12421 +           if (dev->SdramBanks[bank].Size && dev->PanicState.Sdram[bank])
12422 +               elan3_sdram_copyq_from_sdram (dev, (bank << ELAN3_SDRAM_BANK_SHIFT), dev->PanicState.Sdram[bank], dev->SdramBanks[bank].Size);
12423 +       
12424 +    }
12425 +}
12426 +
12427 +int
12428 +elan3_assfail (ELAN3_DEV *dev, char *string, char *file, int line)
12429 +{
12430 +    if (panicstr)
12431 +       return (0);
12432 +
12433 +    printk ("elan: assertion failed '%s' File '%s' Line %d\n", string, file, line);
12434 +
12435 +#if defined(LINUX)
12436 +    elan3_save_panicstate (dev);
12437 +
12438 +    panic ("elan: assertion failed '%s' File '%s' Line %d\n", string, file, line);
12439 +#else
12440 +    cmn_err (CE_PANIC, "elan: assertion failed '%s' File '%s' Line %d\n", string, file, line);
12441 +#endif
12442 +    /*NOTREACHED*/
12443 +    return (0);
12444 +}
12445 +
12446 +
12447 +/*
12448 + * Local variables:
12449 + * c-file-style: "stroustrup"
12450 + * End:
12451 + */
12452 Index: linux-2.4.21/drivers/net/qsnet/elan3/elandev_generic.c
12453 ===================================================================
12454 --- linux-2.4.21.orig/drivers/net/qsnet/elan3/elandev_generic.c 2004-02-23 16:02:56.000000000 -0500
12455 +++ linux-2.4.21/drivers/net/qsnet/elan3/elandev_generic.c      2005-06-01 23:12:54.578442296 -0400
12456 @@ -0,0 +1,1862 @@
12457 +/*
12458 + *    Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd.
12459 + * 
12460 + *    For licensing information please see the supplied COPYING file
12461 + *
12462 + */
12463 +
12464 +#ident "@(#)$Id: elandev_generic.c,v 1.111.2.3 2004/11/15 11:12:36 mike Exp $"
12465 +/*      $Source: /cvs/master/quadrics/elan3mod/elan3/os/elandev_generic.c,v $*/
12466 +
12467 +#include <qsnet/kernel.h>
12468 +#include <qsnet/kthread.h>
12469 +
12470 +#include <elan3/dma.h>
12471 +#include <elan3/elanregs.h>
12472 +#include <elan3/elandev.h>
12473 +#include <elan3/elanvp.h>
12474 +#include <elan3/elan3mmu.h>
12475 +#include <elan3/elanctxt.h>
12476 +#include <elan3/elandebug.h>
12477 +#include <elan3/elansyscall.h>
12478 +#include <elan3/urom_addrs.h>
12479 +#include <elan3/elan3ops.h>
12480 +
12481 +/*
12482 + * Module globals, configurable from system file.
12483 + */
12484 +u_int  elan3_debug                  = 0;
12485 +u_int  elan3_debug_console       = 0;
12486 +u_int  elan3_debug_buffer           = -1;
12487 +u_int  elan3_debug_ignore_dev       = 0;
12488 +u_int  elan3_debug_ignore_kcomm     = 0;
12489 +u_int  elan3_debug_ignore_ctxt      = 0;
12490 +u_int  elan3_debug_display_ctxt     = 0;
12491 +
12492 +int    eventint_punt_loops;
12493 +int    eventint_punt_ticks;
12494 +int    eventint_resched_ticks;
12495 +
12496 +static void InitialiseDmaBuffers (ELAN3_DEV *dev, ioaddr_t CmdPort);
12497 +static int  ProbeSdram (ELAN3_DEV *dev);
12498 +static void InitialiseSdram (ELAN3_DEV *dev);
12499 +static void ReEnableErrorInterrupts (void *arg);
12500 +void        PollForDmaHungup (void *arg);
12501 +static void elan3_event_interrupt (ELAN3_DEV *dev);
12502 +
12503 +/*
12504 + * BaseAddr is ptr to the start of a table aligned on a power of two byte address.
12505 + * SizePower must be in the range of 6 to 12. It defines the number of valid contexts as
12506 + * shown below.
12507 + *
12508 + * SizePower   Valid Contexts  Table size in bytes.
12509 + *     6            64               1k
12510 + *     7           128               2k
12511 + *     8           256               4K
12512 + *     9           512               8k
12513 + *    10          1024              16k
12514 + *    11          2048              32k
12515 + *    12          4096              64k
12516 + */
12517 +#define GEN_CONTEXT_PTR(BaseAddr, SizePower) (((E3_uint32) BaseAddr) | \
12518 +                                             (~((1 << ((SizePower) - 6)) - 1) & 0x3f))
12519 +
12520 +int
12521 +InitialiseElan (ELAN3_DEV *dev, ioaddr_t CmdPort)
12522 +{
12523 +    E3_IprocTrapHeader_BE   TrapCleanup[4];
12524 +    E3_ContextControlBlock  ContextControlBlock;
12525 +    sdramaddr_t             ptr;
12526 +    int                            res;
12527 +    int                            i;
12528 +
12529 +    eventint_punt_loops    = 100;
12530 +    eventint_punt_ticks    = (hz/100);
12531 +    eventint_resched_ticks = (hz/4);
12532 +    
12533 +    dev->Stats.Version     = ELAN3_STATS_VERSION;
12534 +    dev->Position.pos_mode = ELAN_POS_UNKNOWN;
12535 +
12536 +    /*
12537 +     * The elan should have already been reset, so the interrupt mask
12538 +     * should be 0 and the schedule status register should be set to
12539 +     * its initial state
12540 +     */
12541 +    ASSERT (dev->InterruptMask == 0);
12542 +    ASSERT ((read_reg32 (dev, Exts.SchCntReg) & HaltStopAndExtTestMask) == Sched_Initial_Value);
12543 +
12544 +    /*
12545 +     * Write any value here to clear out the half full and error bits of the command
12546 +     * overflow queues.
12547 +     */
12548 +    write_reg32 (dev, ComQueueStatus, 0);
12549 +
12550 +    /* Initialise the cache tags before touching the SDRAM */
12551 +    /* we initialise them to "map" the bottom of SDRAM */
12552 +    for (i = 0; i < E3_NumCacheLines; i++)
12553 +    {
12554 +       write_cache_tag (dev, Tags[i][0].Value, 0x0000000000000000ULL);
12555 +       write_cache_tag (dev, Tags[i][1].Value, 0x0000080000000000ULL);
12556 +       write_cache_tag (dev, Tags[i][2].Value, 0x0000100000000000ULL);
12557 +       write_cache_tag (dev, Tags[i][3].Value, 0x0000180000000000ULL);
12558 +    }
12559 +
12560 +#ifndef CONFIG_MPSAS
12561 +    for (i = 0; i < E3_NumCacheLines*(E3_CACHELINE_SIZE/sizeof(E3_uint64)); i++)
12562 +    {
12563 +       write_cache_set (dev, Set0[i], 0xcac1ecac1ecac1e0ULL);
12564 +       write_cache_set (dev, Set1[i], 0xcac1ecac1ecac1e1ULL);
12565 +       write_cache_set (dev, Set2[i], 0xcac1ecac1ecac1e2ULL);
12566 +       write_cache_set (dev, Set3[i], 0xcac1ecac1ecac1e3ULL);
12567 +    }
12568 +#endif
12569 +
12570 +    if ((res = ProbeSdram(dev)) != ESUCCESS)
12571 +       return (res);
12572 +
12573 +    /* Enable all cache sets before initialising the sdram allocators */
12574 +    write_reg32 (dev, Cache_Control_Reg.ContReg, (dev->Cache_Control_Reg |= CONT_EN_ALL_SETS));
12575 +
12576 +    InitialiseSdram (dev);
12577 +
12578 +    dev->TAndQBase              = elan3_sdram_alloc (dev, ELAN3_TANDQ_SIZE);
12579 +    dev->ContextTable           = elan3_sdram_alloc (dev, ELAN3_CONTEXT_SIZE);
12580 +    dev->ContextTableSize       = ELAN3_NUM_CONTEXTS;
12581 +    dev->CommandPortTraps[0]    = elan3_sdram_alloc (dev, ELAN3_COMMAND_TRAP_SIZE);
12582 +    dev->CommandPortTraps[1]    = elan3_sdram_alloc (dev, ELAN3_COMMAND_TRAP_SIZE);
12583 +    dev->CurrentCommandPortTrap = 0;
12584 +
12585 +    PRINTF3 (DBG_DEVICE, DBG_CONFIG, "InitialiseElan: ContextTable %08lx TAndQ %08lx CommandPortTrap %08lx\n",
12586 +            dev->ContextTable, dev->TAndQBase, dev->CommandPortTraps[0]);
12587 +
12588 +    /* Allocate the thread amd dma trap areas */
12589 +    KMEM_ZALLOC (dev->ThreadTrap, THREAD_TRAP *, sizeof (THREAD_TRAP), TRUE);
12590 +    KMEM_ZALLOC (dev->DmaTrap, DMA_TRAP *, sizeof (DMA_TRAP), TRUE);
12591 +
12592 +    /* Allocate the ctxt table */
12593 +    KMEM_ZALLOC (dev->CtxtTable,  ELAN3_CTXT **, dev->ContextTableSize * sizeof ( ELAN3_CTXT *), TRUE);
12594 +
12595 +    /* Initialise halt queue list */
12596 +    dev->HaltOperationsTailpp   = &dev->HaltOperations;
12597 +
12598 +    /* From elan3/code/harness/elanstuff.c */
12599 +    /* Init the clock. */
12600 +    write_ureg64 (dev, Clock.NanoSecClock, 0);
12601 +    
12602 +    /* Init the instruction count reg. */
12603 +    write_ureg32 (dev, InstCount.s.StatsCount, 0);
12604 +    
12605 +    /* Init the stats control reg. Must be done before the count regs.*/
12606 +    write_ureg32 (dev, StatCont.StatsControl, 0);
12607 +    
12608 +    /* Init the stats count regs. */
12609 +    write_ureg32 (dev, StatCounts[0].s.StatsCount, 0);
12610 +    write_ureg32 (dev, StatCounts[1].s.StatsCount, 0);
12611 +    write_ureg32 (dev, StatCounts[2].s.StatsCount, 0);
12612 +    write_ureg32 (dev, StatCounts[3].s.StatsCount, 0);
12613 +    write_ureg32 (dev, StatCounts[4].s.StatsCount, 0);
12614 +    write_ureg32 (dev, StatCounts[5].s.StatsCount, 0);
12615 +    write_ureg32 (dev, StatCounts[6].s.StatsCount, 0);
12616 +    write_ureg32 (dev, StatCounts[7].s.StatsCount, 0);
12617 +    
12618 +    /*
12619 +     * Initialise the Context_Ptr and Fault_Base_Ptr
12620 +     */
12621 +    write_reg32 (dev, Fault_Base_Ptr, dev->TAndQBase + offsetof(E3_TrapAndQueue, IProcSysCntx));
12622 +    write_reg32 (dev, Context_Ptr, GEN_CONTEXT_PTR (dev->ContextTable, ELAN3_LN2_NUM_CONTEXTS));
12623 +
12624 +    /* scrub the TProc Registers */
12625 +    for (i = 0; i < 8; i++)
12626 +       write_reg32 (dev, Globals[i], 0xdeadbabe);
12627 +    for (i = 0; i < 8; i++)
12628 +       write_reg32 (dev, Outs[i], 0xdeadbabe);
12629 +    for (i = 0; i < 8; i++)
12630 +       write_reg32 (dev, Locals[i], 0xdeadbabe);
12631 +    for (i = 0; i < 8; i++)
12632 +       write_reg32 (dev, Ins[i], 0xdeadbabe);
12633 +
12634 +    /*
12635 +     * Initialise the Queue pointers.  Arrange them so that the starting positions are
12636 +     * farthest apart in one set of the cache. Thus 512 bytes apart,  but with cntx0
12637 +     * thread the same as the interrupt queue.
12638 +     */
12639 +    write_reg32 (dev, TProc_NonSysCntx_FPtr, dev->TAndQBase + offsetof (E3_TrapAndQueue, NonSysCntxThreadQueue[0xc0]));
12640 +    write_reg32 (dev, TProc_NonSysCntx_BPtr, dev->TAndQBase + offsetof (E3_TrapAndQueue, NonSysCntxThreadQueue[0xc0]));
12641 +    write_reg32 (dev, TProc_SysCntx_FPtr,    dev->TAndQBase + offsetof (E3_TrapAndQueue, SysCntxThreadQueue[0x80]));
12642 +    write_reg32 (dev, TProc_SysCntx_BPtr,    dev->TAndQBase + offsetof (E3_TrapAndQueue, SysCntxThreadQueue[0x80]));
12643 +    
12644 +    write_reg32 (dev, DProc_NonSysCntx_FPtr, dev->TAndQBase + offsetof (E3_TrapAndQueue, NonSysCntxDmaQueue[0]));
12645 +    write_reg32 (dev, DProc_NonSysCntx_BPtr, dev->TAndQBase + offsetof (E3_TrapAndQueue, NonSysCntxDmaQueue[0]));
12646 +    write_reg32 (dev, DProc_SysCntx_FPtr,    dev->TAndQBase + offsetof (E3_TrapAndQueue, SysCntxDmaQueue[0x10]));
12647 +    write_reg32 (dev, DProc_SysCntx_BPtr,    dev->TAndQBase + offsetof (E3_TrapAndQueue, SysCntxDmaQueue[0x10]));
12648 +    
12649 +    dev->Event_Int_Queue_FPtr = dev->TAndQBase + offsetof (E3_TrapAndQueue, EventIntQueue[0x80]);
12650 +    write_reg32 (dev, Event_Int_Queue_FPtr, dev->Event_Int_Queue_FPtr);
12651 +    write_reg32 (dev, Event_Int_Queue_BPtr, dev->TAndQBase + offsetof (E3_TrapAndQueue, EventIntQueue[0x80]));
12652 +    
12653 +    
12654 +    /* Initialise Input_Trap_Base to last 8 Kbytes of trap area, uCode adds the right offset */
12655 +    write_reg32 (dev, Input_Trap_Base, dev->TAndQBase + offsetof (E3_TrapAndQueue, SysCntxThreadQueue[0]));
12656 +    
12657 +    /* Ptr to word used to save the SP to when a thread deschedules */
12658 +    write_reg32 (dev, Thread_SP_Save_Ptr, dev->TAndQBase + offsetof (E3_TrapAndQueue, Thread_SP_Save));
12659 +    
12660 +    /* Initialise the command trap base */
12661 +    write_reg32 (dev, CProc_TrapSave_Addr, dev->CommandPortTraps[0]);
12662 +    
12663 +    /* Initialise the set event tracing registers */
12664 +    write_reg32 (dev, Event_Trace_Ptr, 0);
12665 +    write_reg32 (dev, Event_Trace_Mask, 0);
12666 +    
12667 +    /* Initialise Tlb_Line_Value to zero. The TLB cannot be read while either the */
12668 +    /* uCode or thread proc might be running. Must be set to 0. */
12669 +    write_reg64 (dev, Tlb_Line_Value, 0);
12670 +
12671 +    /* Control register. Cache everything, Enable MMU, RefreshRate=3, CasLatency=1, StartSDR */
12672 +    dev->Cache_Control_Reg |= CONT_MMU_ENABLE | CONT_EN_ALL_SETS | CONT_CACHE_ALL | CONT_ENABLE_ECC;
12673 +
12674 +#if ELAN3_PAGE_SHIFT == 13
12675 +    dev->Cache_Control_Reg |= CONT_ENABLE_8K_PAGES;
12676 +#endif
12677 +
12678 +    write_reg32 (dev, Cache_Control_Reg.ContReg,  dev->Cache_Control_Reg);
12679 +
12680 +    /*
12681 +     * Initialise the context table to be discard for all contexts
12682 +     */
12683 +    ContextControlBlock.rootPTP  = 0;
12684 +    ContextControlBlock.filter   = E3_CCB_DISCARD_ALL;
12685 +    ContextControlBlock.VPT_mask = 0;
12686 +    ContextControlBlock.VPT_ptr  = 0;
12687 +
12688 +    for (i = 0, ptr = dev->ContextTable; i < ELAN3_NUM_CONTEXTS; i++, ptr += sizeof (E3_ContextControlBlock))
12689 +       elan3_sdram_copyl_to_sdram (dev, &ContextControlBlock, ptr, sizeof (E3_ContextControlBlock));
12690 +
12691 +    /* From elan3/code/trap_handler/init.c */
12692 +    /*
12693 +     * Initialise the Trap And Queue area in Elan SDRAM.
12694 +     */
12695 +    TrapCleanup[0].s.TrTypeCntx.TypeContext = 0;
12696 +    TrapCleanup[0].s.TrAddr                = 0;
12697 +    TrapCleanup[0].s.IProcTrapStatus.Status = CRC_STATUS_GOOD;
12698 +    TrapCleanup[0].s.TrData0               = 0;
12699 +    TrapCleanup[1].s.TrTypeCntx.TypeContext = 0;
12700 +    TrapCleanup[1].s.TrAddr                = 0;
12701 +    TrapCleanup[1].s.IProcTrapStatus.Status = CRC_STATUS_GOOD;
12702 +    TrapCleanup[1].s.TrData0               = 0;
12703 +    TrapCleanup[2].s.TrTypeCntx.TypeContext = 0;
12704 +    TrapCleanup[2].s.TrAddr                = 0;
12705 +    TrapCleanup[2].s.IProcTrapStatus.Status = CRC_STATUS_GOOD;
12706 +    TrapCleanup[2].s.TrData0               = 0;
12707 +    TrapCleanup[3].s.TrTypeCntx.TypeContext = 0;
12708 +    TrapCleanup[3].s.TrAddr                = 0;
12709 +    TrapCleanup[3].s.IProcTrapStatus.Status = CRC_STATUS_GOOD;
12710 +    TrapCleanup[3].s.TrData0               = 0;
12711 +
12712 +    elan3_sdram_writel (dev, dev->TAndQBase + offsetof (E3_TrapAndQueue, IProcSysCntx.s.FaultContext),  0);
12713 +    elan3_sdram_writel (dev, dev->TAndQBase + offsetof (E3_TrapAndQueue, IProcSysCntx.s.FSR.Status), 0);
12714 +    elan3_sdram_writel (dev, dev->TAndQBase + offsetof (E3_TrapAndQueue, IProcNonSysCntx.s.FaultContext), 0);
12715 +    elan3_sdram_writel (dev, dev->TAndQBase + offsetof (E3_TrapAndQueue, IProcNonSysCntx.s.FSR.Status), 0);
12716 +    
12717 +    /* Must now zero all the FSRs so that a subsequent Fault can be seen */ 
12718 +    elan3_sdram_zeroq_sdram (dev, dev->TAndQBase + offsetof (E3_TrapAndQueue, CProc), 16);
12719 +
12720 +    elan3_sdram_zeroq_sdram (dev, dev->TAndQBase + offsetof (E3_TrapAndQueue, DProc), 16);
12721 +    elan3_sdram_zeroq_sdram (dev, dev->TAndQBase + offsetof (E3_TrapAndQueue, DProcData0), 64);
12722 +    
12723 +    elan3_sdram_zeroq_sdram (dev, dev->TAndQBase + offsetof (E3_TrapAndQueue, TProc), 16);
12724 +    elan3_sdram_zeroq_sdram (dev, dev->TAndQBase + offsetof (E3_TrapAndQueue, ThreadProcData), 16);
12725 +    elan3_sdram_zeroq_sdram (dev, dev->TAndQBase + offsetof (E3_TrapAndQueue, ThreadProcInst), 16);
12726 +    elan3_sdram_zeroq_sdram (dev, dev->TAndQBase + offsetof (E3_TrapAndQueue, ThreadProcOpen), 16);
12727 +
12728 +    elan3_sdram_copyq_to_sdram (dev, TrapCleanup, dev->TAndQBase + offsetof (E3_TrapAndQueue, VCh0_C0_TrHead[0]), 64);
12729 +    elan3_sdram_copyq_to_sdram (dev, TrapCleanup, dev->TAndQBase + offsetof (E3_TrapAndQueue, VCh1_C0_TrHead[0]), 64);
12730 +
12731 +    elan3_sdram_copyq_to_sdram (dev, TrapCleanup, dev->TAndQBase + offsetof (E3_TrapAndQueue, VCh0_NonC0_TrHead[0]), 64);
12732 +    elan3_sdram_copyq_to_sdram (dev, TrapCleanup, dev->TAndQBase + offsetof (E3_TrapAndQueue, VCh1_NonC0_TrHead[0]), 64);
12733 +
12734 +    InitialiseDmaBuffers(dev, CmdPort);
12735 +
12736 +    /* reserve a halt operation for flushing the context filter */
12737 +    ReserveHaltOperations (dev, 1, TRUE);
12738 +
12739 +    /* Allow the Thread/Dma to run */
12740 +    CLEAR_SCHED_STATUS (dev, HaltThread | HaltDmas);
12741 +
12742 +    /* Enable All Interrrupts */
12743 +    SET_INT_MASK (dev, (INT_PciMemErr | INT_SDRamInt | INT_EventInterrupt | INT_LinkError | INT_ComQueue |
12744 +                       INT_TProc | INT_CProc | INT_DProc | INT_IProcCh1NonSysCntx | 
12745 +                       INT_IProcCh1SysCntx | INT_IProcCh0NonSysCntx | INT_IProcCh0SysCntx));
12746 +
12747 +    /* Take the link out of boundary scan */
12748 +    SET_SCHED_LINK_VALUE (dev, 0, 0);
12749 +    
12750 +    /* And clear any link errors */
12751 +    PULSE_SCHED_STATUS (dev, ClearLinkErrorInt);
12752 +
12753 +    /* XXXX: clear discard context 0,  AFTER setting up the kernel comms */
12754 +    CLEAR_SCHED_STATUS (dev, DiscardSysCntxIn | DiscardNonSysCntxIn);
12755 +
12756 +    /* Start a thread to handle excessive Event Interrrupts */
12757 +    if (kernel_thread_create (elan3_event_interrupt, (caddr_t) dev) == NULL)
12758 +    {
12759 +       panic ("InitialiseElan: cannot start elan3_event_interrupt\n");
12760 +       return (EFAIL);
12761 +    }
12762 +    dev->EventInterruptThreadStarted = 1;
12763 +
12764 +    ReserveHaltOperations (dev, 1, TRUE);
12765 +
12766 +    PollForDmaHungup (dev);
12767 +
12768 +    /* register the device and stats with elanmod for RMS */
12769 +    dev->DeviceIdx = elan_dev_register(&dev->Devinfo, &elan3_dev_ops, (void *) dev);
12770 +    
12771 +    elan3_register_dev_stats(dev);
12772 +
12773 +    return (ESUCCESS);
12774 +}
12775 +
12776 +static void
12777 +InitialiseDmaBuffers(ELAN3_DEV *dev, ioaddr_t CmdPort)
12778 +{
12779 +   register int i;
12780 +
12781 +   /* GNAT sw-elan3/3908:
12782 +    * Clear down the power on state of the Dma_Desc registers to make sure we don't
12783 +    * try and interpret them when a trap happens.
12784 +    */
12785 +   write_reg32 (dev, Dma_Desc.dma_type,            0);
12786 +   write_reg32 (dev, Dma_Desc.dma_size,            0);
12787 +   write_reg32 (dev, Dma_Desc.dma_source,          0);
12788 +   write_reg32 (dev, Dma_Desc.dma_dest,            0);
12789 +   write_reg32 (dev, Dma_Desc.dma_destEvent,       0);
12790 +   write_reg32 (dev, Dma_Desc.dma_destCookieVProc, 0);
12791 +   write_reg32 (dev, Dma_Desc.dma_srcEvent,        0);
12792 +   write_reg32 (dev, Dma_Desc.dma_srcCookieVProc,  0);
12793 +   
12794 +   /*
12795 +    * The following is a sequence of writes to remove X's from the dma buffers and 
12796 +    * registers. It is only safe to write these registers after reset and before any
12797 +    * dma's have been issued. The chip will NOT function corectly if they are written at
12798 +    * any other time or in a different order.
12799 +    */
12800 +   write_reg64 (dev, Exts.Dmas.DmaWrs.LdAlignment, 0);
12801 +   write_reg64 (dev, Exts.Dmas.DmaWrs.LdDmaType, 0);
12802 +   write_reg64 (dev, Exts.Dmas.DmaWrs.ResetAckNLdBytesToWr, ((u_longlong_t)0x1000) << 32);
12803 +   write_reg64 (dev, Exts.Dmas.DmaWrs.LdBytesToRd, ((u_longlong_t)0x100) << 32);
12804 +
12805 +   for (i=0;i<(4*8);i++)
12806 +       write_reg64 (dev, Dma_Alignment_Port[0], 0);
12807 +
12808 +   /*
12809 +    * This is used to clear out X's from some of the trap registers. This is required to
12810 +    * prevent the first traps from possibly writting X's into the SDram and upsetting the
12811 +    * ECC value. It requires that the trap save area registers have been set up but does
12812 +    * not require any translations to be ready.
12813 +    */
12814 +   writel (-1, CmdPort + offsetof (E3_CommandPort, SetEvent));
12815 +   while ((read_reg32 (dev, Exts.InterruptReg) & INT_CProc) == 0)
12816 +   {
12817 +       mb();
12818 +       DELAY (1);
12819 +   }
12820 +
12821 +   write_reg32 (dev, CProc_TrapSave_Addr, dev->CommandPortTraps[dev->CurrentCommandPortTrap]);
12822 +   
12823 +   PULSE_SCHED_STATUS(dev, RestartCProc);
12824 +}
12825 +
12826 +void
12827 +FinaliseElan (ELAN3_DEV *dev)
12828 +{
12829 +    ELAN3_PTBL_GR *ptg;
12830 +    ELAN3_HALTOP  *op;
12831 +    ELAN3_HALTOP  *chain = NULL;
12832 +    int           bank;
12833 +    int                  indx;
12834 +    int                  size;
12835 +    unsigned long flags;
12836 +    int           level;
12837 +
12838 +    elan_stats_deregister (dev->StatsIndex);
12839 +    elan_dev_deregister(&dev->Devinfo);
12840 +
12841 +    /* Cancel the dma poller */
12842 +    cancel_timer_fn (&dev->DmaPollTimeoutId);
12843 +
12844 +    /* release it's halt operation */
12845 +    ReleaseHaltOperations (dev, 1);
12846 +
12847 +    /* stop all kernel threads */
12848 +    dev->ThreadsShouldStop = 1;
12849 +
12850 +    spin_lock_irqsave (&dev->IntrLock, flags);
12851 +    while (dev->EventInterruptThreadStarted && !dev->EventInterruptThreadStopped)
12852 +    {
12853 +       kcondvar_wakeupall (&dev->IntrWait, &dev->IntrLock);
12854 +       kcondvar_wait (&dev->IntrWait, &dev->IntrLock, &flags);
12855 +    }
12856 +    spin_unlock_irqrestore (&dev->IntrLock, flags);
12857 +
12858 +    /* Set the interrupt mask to 0 and the schedule control register to run nothing */
12859 +    SET_INT_MASK (dev, 0);
12860 +    SET_SCHED_STATUS (dev, DiscardNonSysCntxIn | DiscardSysCntxIn | HaltThread | HaltDmas);
12861 +
12862 +    /* Cancel any link error timeout */
12863 +    if (timer_fn_queued(&dev->ErrorTimeoutId))
12864 +       cancel_timer_fn (&dev->ErrorTimeoutId);
12865 +
12866 +    /* Free of and page tables that have been allocated */
12867 +    spin_lock (&dev->PtblGroupLock);
12868 +    for(level=0; level<4; level++) 
12869 +    {
12870 +       while ((ptg = dev->Level[level].PtblGroupList) != NULL)
12871 +       {
12872 +           dev->Level[level].PtblGroupList = ptg->pg_next;
12873 +
12874 +           elan3_sdram_free (dev, ptg->pg_addr, PTBL_GROUP_SIZE);
12875 +           FREE_PTBL_GR(ptg);
12876 +       }
12877 +    }
12878
12879 +    spin_unlock (&dev->PtblGroupLock);
12880 +
12881 +    /* Free of all halt operations */
12882 +    spin_lock_irqsave (&dev->FreeHaltLock, flags);
12883 +    while ((op = dev->FreeHaltOperations) != NULL)
12884 +    {
12885 +       dev->FreeHaltOperations = op->Next;
12886 +
12887 +       /* Keep a list of 'freed' ops for later KMEM_FREE call */
12888 +       op->Next = chain;
12889 +       chain = op;
12890 +    }
12891 +    spin_unlock_irqrestore (&dev->FreeHaltLock, flags);
12892 +
12893 +    /* Have now dropped the spinlock - can call KMEM_FREE */
12894 +    while ((op = chain) != NULL)
12895 +    {
12896 +       chain = op->Next;
12897 +
12898 +       KMEM_FREE (op, sizeof (ELAN3_HALTOP));
12899 +    }
12900 +       
12901 +    /* Free of the ctxt table */
12902 +    KMEM_FREE (dev->CtxtTable,  dev->ContextTableSize * sizeof (ELAN3_CTXT *));
12903 +
12904 +    /* Free of the thread and dma atrap areas */
12905 +    KMEM_FREE (dev->ThreadTrap, sizeof (THREAD_TRAP));
12906 +    KMEM_FREE (dev->DmaTrap, sizeof (DMA_TRAP));
12907 +
12908 +    /* Free of the memsegs and pages */
12909 +    for (bank = 0; bank < ELAN3_SDRAM_NUM_BANKS; bank++)
12910 +    {
12911 +       if (dev->SdramBanks[bank].Size)
12912 +       {
12913 +           UnmapDeviceRegister (dev, &dev->SdramBanks[bank].Handle);
12914 +
12915 +           KMEM_FREE (dev->SdramBanks[bank].PtblGroups, sizeof (ELAN3_PTBL_GR *) * (dev->SdramBanks[bank].Size / PTBL_GROUP_SIZE));
12916 +
12917 +           for (indx = 0, size = SDRAM_MIN_BLOCK_SIZE; size <= dev->SdramBanks[bank].Size; indx++, size <<= 1)
12918 +               KMEM_FREE (dev->SdramBanks[bank].Bitmaps[indx], sizeof (bitmap_t)*BT_BITOUL(dev->SdramBanks[bank].Size/size));
12919 +       }
12920 +    }
12921 +    elan3_sdram_fini (dev);
12922 +}
12923 +
12924 +#define INIT_PATTERN(offset)   (0xBEEC000000000011ull | ((u_longlong_t)(offset)) << 16)
12925 +#define FREE_PATTERN(offset)   (0xBEEC000000000022ull | ((u_longlong_t)(offset)) << 16)
12926 +
12927 +static int
12928 +ProbeSdram (ELAN3_DEV *dev)
12929 +{
12930 +    int                          Instance;
12931 +    u_int                Bank;
12932 +    int                          MemSpaceSize;
12933 +    int                          BankMaxSize;
12934 +    int                          BankOffset;
12935 +    int                          BankSize;
12936 +    ioaddr_t             BankBase;
12937 +    ioaddr_t             PageBase;
12938 +    ioaddr_t             PageBase1;
12939 +    ioaddr_t             PageBase2;
12940 +    DeviceMappingHandle   BankHandle;
12941 +    DeviceMappingHandle   PageHandle;
12942 +    DeviceMappingHandle   PageHandle1;
12943 +    DeviceMappingHandle   PageHandle2;
12944 +    register int          i;
12945 +    u_longlong_t         value;
12946 +    extern int            sdram_bank_limit;
12947 +
12948 +    /* NOTE: The Cache control register is set to only enable cache set 0 */
12949 +    /*       and has ECC disabled */
12950 +    Instance = dev->Instance;
12951 +    
12952 +    /* Determine the size of the SDRAM from the BAR register */
12953 +    if (DeviceRegisterSize (dev, ELAN3_BAR_SDRAM, &MemSpaceSize) != ESUCCESS)
12954 +    {
12955 +       printk ("elan%d: cannot determine SDRAM size\n", Instance);
12956 +       return (EFAIL);
12957 +    }
12958 +
12959 +    elan3_sdram_init (dev);
12960 +
12961 +    BankMaxSize = MemSpaceSize / ELAN3_SDRAM_NUM_BANKS;
12962 +
12963 +    for (Bank = 0; Bank < ELAN3_SDRAM_NUM_BANKS; Bank++)
12964 +    {
12965 +       BankOffset = Bank * BankMaxSize;
12966 +       
12967 +       PRINTF3 (DBG_DEVICE, DBG_CONFIG, "elan%d: Probing RAM Bank %d (max size %08x)\n", Instance, Bank, BankMaxSize);
12968 +       
12969 +       /* Probe the memory bank by mapping two pages that are the size of the cache apart */
12970 +       /* this guarantees that when we store the second pattern we displace the first pattern */
12971 +       /* from the cache, also store the second pattern again the size of the cache up again */
12972 +       /* to ensure that the SDRAM wires don't stay floating at pattern1 */
12973 +
12974 +       if (MapDeviceRegister (dev, ELAN3_BAR_SDRAM, &BankBase, BankOffset, PAGESIZE, &BankHandle) != ESUCCESS)
12975 +       {
12976 +           printk ("elan%d: Cannot probe memory bank %d\n", Instance, Bank);
12977 +           continue;
12978 +       }
12979 +       
12980 +       if (MapDeviceRegister (dev, ELAN3_BAR_SDRAM, &PageBase1, BankOffset + ELAN3_MAX_CACHE_SIZE, PAGESIZE, &PageHandle1) != ESUCCESS)
12981 +       {
12982 +           printk ("elan%d: Cannot probe memory bank %d\n", Instance, Bank);
12983 +           UnmapDeviceRegister (dev, &BankHandle);
12984 +           continue;
12985 +       }
12986 +
12987 +       if (MapDeviceRegister (dev, ELAN3_BAR_SDRAM, &PageBase2, BankOffset + 2*ELAN3_MAX_CACHE_SIZE, PAGESIZE, &PageHandle2) != ESUCCESS)
12988 +       {
12989 +           printk ("elan%d: Cannot probe memory bank %d\n", Instance, Bank);
12990 +           UnmapDeviceRegister (dev, &BankHandle);
12991 +           UnmapDeviceRegister (dev, &PageHandle1);
12992 +           continue;
12993 +       }
12994 +
12995 +#define PATTERN0       (0x5555555555555555L)
12996 +#define PATTERN1       (0xAAAAAAAAAAAAAAAAL)
12997 +       writeq (PATTERN0, (u_longlong_t *) BankBase);
12998 +       writeq (PATTERN1, (u_longlong_t *) PageBase1);
12999 +       writeq (PATTERN1, (u_longlong_t *) PageBase2);
13000 +
13001 +       mmiob();
13002 +
13003 +       value = readq ((u_longlong_t *) BankBase);
13004 +
13005 +       if (value != PATTERN0)
13006 +       {
13007 +           UnmapDeviceRegister (dev, &BankHandle);
13008 +           UnmapDeviceRegister (dev, &PageHandle1);
13009 +           UnmapDeviceRegister (dev, &PageHandle2);
13010 +           continue;
13011 +       }
13012 +
13013 +       writeq (PATTERN1, (u_longlong_t *) BankBase);
13014 +       writeq (PATTERN0, (u_longlong_t *) PageBase1);
13015 +       writeq (PATTERN0, (u_longlong_t *) PageBase2);
13016 +
13017 +       mmiob();
13018 +       
13019 +       value = readq ((u_longlong_t *) BankBase);
13020 +       if (value != PATTERN1)
13021 +       {
13022 +           UnmapDeviceRegister (dev, &BankHandle);
13023 +           UnmapDeviceRegister (dev, &PageHandle1);
13024 +           UnmapDeviceRegister (dev, &PageHandle2);
13025 +           continue;
13026 +       }
13027 +       UnmapDeviceRegister (dev, &PageHandle1);
13028 +       UnmapDeviceRegister (dev, &PageHandle2);
13029 +
13030 +       /* Bank is present, so work out its size,  we store tha maximum size at the base */
13031 +       /* and then store the address at each address  on every power of two address until */
13032 +       /* we reach the minimum mappable size (PAGESIZE), we then read back the value at the */
13033 +       /* base to determine the bank size */
13034 +       writeq ((u_longlong_t) BankMaxSize, (u_longlong_t *) BankBase);
13035 +
13036 +       for (BankSize = (BankMaxSize>>1); BankSize > PAGESIZE; BankSize >>= 1)
13037 +       {
13038 +           if (MapDeviceRegister (dev, ELAN3_BAR_SDRAM, &PageBase, BankOffset + BankSize, PAGESIZE, &PageHandle) == ESUCCESS)
13039 +           {
13040 +               writeq (BankSize, (u_longlong_t *) PageBase);
13041 +               UnmapDeviceRegister (dev, &PageHandle);
13042 +           }
13043 +       }
13044 +       mmiob();
13045 +
13046 +       BankSize = (u_long) readq ((u_longlong_t *) BankBase);
13047 +       
13048 +       if (sdram_bank_limit == 0 || BankSize <= (sdram_bank_limit * 1024 * 1024))
13049 +           printk ("elan%d: memory bank %d is %dK\n", Instance, Bank, BankSize / 1024);
13050 +       else
13051 +       {
13052 +           BankSize = (sdram_bank_limit * 1024 * 1024);
13053 +           printk ("elan%d: limit memory bank %d to %dK\n", Instance, Bank, BankSize / 1024);
13054 +       }
13055 +
13056 +       UnmapDeviceRegister (dev, &BankHandle);
13057 +       
13058 +       /* Now map all of this bank into the kernel */
13059 +       if (MapDeviceRegister (dev, ELAN3_BAR_SDRAM, &BankBase, BankOffset, BankSize, &BankHandle) != ESUCCESS)
13060 +       {
13061 +           printk ("elan%d: Cannot initialise memory bank %d\n", Instance, Bank);
13062 +           continue;
13063 +       }
13064 +       
13065 +       dev->SdramBanks[Bank].Size    = BankSize;
13066 +       dev->SdramBanks[Bank].Mapping = BankBase;
13067 +       dev->SdramBanks[Bank].Handle  = BankHandle;
13068 +
13069 +#ifndef CONFIG_MPSAS
13070 +       /* Initialise it for ECC */
13071 +       preemptable_start {
13072 +           for (i = 0; i < BankSize; i += 8)
13073 +           {
13074 +               elan3_sdram_writeq (dev, (Bank << ELAN3_SDRAM_BANK_SHIFT) | i, INIT_PATTERN(BankOffset+i));
13075 +
13076 +               preemptable_check();
13077 +           }
13078 +       } preemptable_end;
13079 +#endif
13080 +    }
13081 +    
13082 +    return (ESUCCESS);
13083 +}
13084 +
13085 +static void
13086 +InitialiseSdram (ELAN3_DEV *dev)
13087 +{
13088 +    int indx, size, b;
13089 +
13090 +    for (b = 0; b < ELAN3_SDRAM_NUM_BANKS; b++)
13091 +    {
13092 +       ELAN3_SDRAM_BANK *bank = &dev->SdramBanks[b];
13093 +
13094 +       if (bank->Size == 0)
13095 +           continue;
13096 +
13097 +       /* allocate a ptbl group pointer for each possible ptbl group in this bank */
13098 +       KMEM_ZALLOC (bank->PtblGroups, ELAN3_PTBL_GR **, sizeof (ELAN3_PTBL_GR *) * bank->Size/PTBL_GROUP_SIZE, TRUE);
13099 +           
13100 +       /* allocate the buddy allocator bitmaps */
13101 +       for (indx = 0, size = SDRAM_MIN_BLOCK_SIZE; size <= bank->Size; indx++, size <<= 1)
13102 +           KMEM_ZALLOC (bank->Bitmaps[indx], bitmap_t *, sizeof (bitmap_t)*BT_BITOUL(bank->Size/size), TRUE);
13103 +           
13104 +       /* and add it to the sdram buddy allocator */
13105 +       elan3_sdram_add (dev, (b << ELAN3_SDRAM_BANK_SHIFT), (b << ELAN3_SDRAM_BANK_SHIFT) + bank->Size);
13106 +    }
13107 +}
13108 +
13109 +#include <elan3/vpd.h>
13110 +
13111 +int
13112 +ReadVitalProductData (ELAN3_DEV *dev, int *CasLatency)
13113 +{
13114 +    DeviceMappingHandle RomHandle;
13115 +    unsigned char      *RomBase;
13116 +    unsigned char      *PCIDataPtr;
13117 +    unsigned char      *VPDPtr;
13118 +    unsigned char      *lim;
13119 +    int                        type;
13120 +    int                        i, len, len2;
13121 +    char               name[3] = "XX";
13122 +    char               value[256];
13123 +    int                        finished = 0;
13124 +
13125 +    
13126 +    /* default valud for CAS latency is 3 */
13127 +    (*CasLatency) = CAS_LATENCY_3;
13128 +
13129 +    if (MapDeviceRegister (dev, ELAN3_BAR_EBUS, (ioaddr_t *) &RomBase, ELAN3_EBUS_ROM_OFFSET, ELAN3_EBUS_ROM_SIZE, &RomHandle) != ESUCCESS)
13130 +    {
13131 +       printk ("elan%d: Cannot map ROM\n", dev->Instance);
13132 +       return (EFAIL);
13133 +    }
13134 +    
13135 +    /* Check the ROM signature */
13136 +    if (RomBase[0] != 0x55 || RomBase[1] != 0xAA)
13137 +    {
13138 +       printk ("elan%d: Invalid ROM signature %02x %02x\n", dev->Instance, RomBase[0], RomBase[1]);
13139 +       return (ESUCCESS);
13140 +    }
13141 +    
13142 +    PCIDataPtr = RomBase + ((RomBase[0x19] << 8) | RomBase[0x18]);
13143 +
13144 +    /* check the pci data structure */
13145 +    if (PCIDataPtr[0] != 'P' || PCIDataPtr[1] != 'C' || PCIDataPtr[2] != 'I' || PCIDataPtr[3] != 'R')
13146 +    {
13147 +       printk ("elan%d: Invalid PCI Data structure\n", dev->Instance);
13148 +       return (ESUCCESS);
13149 +    }
13150 +    
13151 +    /* Extract the VPD pointer */
13152 +    VPDPtr = RomBase + ((PCIDataPtr[9] << 8) | PCIDataPtr[8]);
13153 +
13154 +    if (VPDPtr == RomBase)
13155 +    {
13156 +       printk ("elan%d: No Vital Product Data\n", dev->Instance);
13157 +       return (ESUCCESS);
13158 +    }
13159 +
13160 +    while (! finished)
13161 +    {
13162 +       type = *VPDPtr++;
13163 +       
13164 +       if (type & LARGE_RESOURCE_BIT)
13165 +       {
13166 +           len = *(VPDPtr++);
13167 +           len += *(VPDPtr++) << 8;
13168 +
13169 +           switch (type & ~LARGE_RESOURCE_BIT)
13170 +           {
13171 +           case LARGE_RESOURCE_STRING:
13172 +               printk ("elan%d: ", dev->Instance);
13173 +               for (i = 0; i < len; i++)
13174 +                   printk ("%c", *VPDPtr++);
13175 +               printk ("\n");
13176 +               break;
13177 +               
13178 +           case LARGE_RESOURCE_VENDOR_DEFINED:
13179 +               VPDPtr += len;
13180 +               break;
13181 +               
13182 +           case LARGE_RESOURCE_VITAL_PRODUCT_DATA:
13183 +               for (lim = VPDPtr + len; VPDPtr < lim; )
13184 +               {
13185 +                   name[0] = *VPDPtr++;
13186 +                   name[1] = *VPDPtr++;
13187 +                   len2    = *VPDPtr++;
13188 +
13189 +                   for (i = 0; i < len2 && VPDPtr < lim; i++)
13190 +                       value[i] = *VPDPtr++;
13191 +                   value[i] = '\0';
13192 +
13193 +                   if (! strcmp (name, "SN"))
13194 +                       printk ("elan%d: Serial Number - %s\n", dev->Instance, value);
13195 +
13196 +                   if (! strcmp (name, "Z0"))
13197 +                       (*CasLatency) = (strcmp (value, "CAS_LATENCY_2") ? CAS_LATENCY_3 : CAS_LATENCY_2);
13198 +               }
13199 +               break;
13200 +               
13201 +           default:
13202 +               printk ("elan%d: unknown large resource %x\n", dev->Instance, type);
13203 +               finished = 1;
13204 +               break;
13205 +           }
13206 +       }
13207 +       else
13208 +       {
13209 +           len = type & 0x7;
13210 +
13211 +           switch (type >> 3)
13212 +           {
13213 +           case SMALL_RESOURCE_COMPATIBLE_DEVICE_ID:
13214 +               VPDPtr += len;
13215 +               break;
13216 +
13217 +           case SMALL_RESOURCE_VENDOR_DEFINED:
13218 +               VPDPtr += len;
13219 +               break;
13220 +               
13221 +           case SMALL_RESOURCE_END_TAG:
13222 +               finished = 1;
13223 +               break;
13224 +               
13225 +           default:
13226 +               printk ("elan%d: unknown small resource %x\n", dev->Instance, type >> 3);
13227 +               finished = 1;
13228 +               break;
13229 +           }
13230 +       }
13231 +    }
13232 +    
13233 +    UnmapDeviceRegister (dev, &RomHandle);
13234 +    return (ESUCCESS);
13235 +}
13236 +
13237 +void
13238 +ElanSetPtblGr (ELAN3_DEV *dev, sdramaddr_t offset, ELAN3_PTBL_GR *ptg)
13239 +{
13240 +    int bank = offset >> ELAN3_SDRAM_BANK_SHIFT;
13241 +    
13242 +    dev->SdramBanks[bank].PtblGroups[(offset & (ELAN3_SDRAM_BANK_SIZE-1)) / PTBL_GROUP_SIZE] = ptg;
13243 +}
13244 +
13245 +ELAN3_PTBL_GR *
13246 +ElanGetPtblGr (ELAN3_DEV *dev, sdramaddr_t offset)
13247 +{
13248 +    int bank = offset >> ELAN3_SDRAM_BANK_SHIFT;
13249 +    
13250 +    return (dev->SdramBanks[bank].PtblGroups[(offset & (ELAN3_SDRAM_BANK_SIZE-1)) / PTBL_GROUP_SIZE]);
13251 +}
13252 +
13253 +void
13254 +ElanFlushTlb (ELAN3_DEV *dev)
13255 +{
13256 +    unsigned long flags;
13257 +
13258 +    spin_lock_irqsave (&dev->TlbLock, flags);
13259 +    BumpStat (dev, TlbFlushes);
13260 +
13261 +    write_reg32 (dev, Cache_Control_Reg.ContReg, dev->Cache_Control_Reg | MMU_FLUSH);
13262 +    mmiob();
13263 +    spin_unlock_irqrestore (&dev->TlbLock, flags);
13264 +
13265 +    while (! (read_reg32 (dev, Cache_Control_Reg.ContReg) & MMU_FLUSHED))
13266 +       mb();
13267 +}
13268 +
13269 +void
13270 +KillNegativeDma (ELAN3_DEV *dev, void *arg)
13271 +{
13272 +    DMA_TRAP     *trap    = dev->DmaTrap;
13273 +    E3_Status_Reg status;
13274 +    sdramaddr_t   FPtr, BPtr;
13275 +    sdramaddr_t   Base, Top;
13276 +    unsigned long flags;
13277 +
13278 +    spin_lock_irqsave (&dev->IntrLock, flags);
13279 +
13280 +    ASSERT (read_reg32 (dev, Exts.InterruptReg) & INT_DProcHalted);
13281 +
13282 +    /* Initialise the trap to deliver to the offending user process */
13283 +    trap->Status.Status   = read_reg32 (dev, Exts.DProcStatus.Status);
13284 +    trap->PacketInfo.Value = 0;
13285 +    
13286 +    bzero (&trap->FaultSave, sizeof (trap->FaultSave));
13287 +    bzero (&trap->Data0, sizeof (trap->Data0));
13288 +    bzero (&trap->Data1, sizeof (trap->Data1));
13289 +    bzero (&trap->Data2, sizeof (trap->Data2));
13290 +    bzero (&trap->Data3, sizeof (trap->Data3));
13291 +
13292 +    /* run down the kernel dma run queue and panic on a -ve length dma */
13293 +    FPtr  = read_reg32 (dev, DProc_SysCntx_FPtr);
13294 +    BPtr  = read_reg32 (dev, DProc_SysCntx_BPtr);
13295 +    Base  = dev->TAndQBase + offsetof (E3_TrapAndQueue, SysCntxDmaQueue[0]);
13296 +    Top   = dev->TAndQBase + offsetof (E3_TrapAndQueue, SysCntxDmaQueue[E3_SysCntxQueueSize-1]);
13297 +    
13298 +    while (FPtr != BPtr)
13299 +    {
13300 +       elan3_sdram_copyq_from_sdram (dev, FPtr, &trap->Desc, sizeof (E3_DMA_BE));
13301 +       
13302 +       if (trap->Desc.s.dma_size > E3_MAX_DMA_SIZE)
13303 +           panic ("KillNegativeDma: -ve sized kernel dma\n");
13304 +
13305 +       FPtr = (FPtr == Top) ? Base : FPtr + sizeof (E3_DMA);
13306 +    }
13307 +
13308 +    /* run down the user dma run queue and "remove" and -ve length dma's */
13309 +    FPtr  = read_reg32 (dev, DProc_NonSysCntx_FPtr);
13310 +    BPtr  = read_reg32 (dev, DProc_NonSysCntx_BPtr);
13311 +    Base  = dev->TAndQBase + offsetof (E3_TrapAndQueue, NonSysCntxDmaQueue[0]);
13312 +    Top   = dev->TAndQBase + offsetof (E3_TrapAndQueue, NonSysCntxDmaQueue[E3_NonSysCntxQueueSize-1]);
13313 +    
13314 +    while (FPtr != BPtr)
13315 +    {
13316 +       elan3_sdram_copyq_from_sdram (dev, FPtr, &trap->Desc, sizeof (E3_DMA_BE));
13317 +       
13318 +       if (trap->Desc.s.dma_size > E3_MAX_DMA_SIZE)
13319 +       {
13320 +           PRINTF3 (NULL, DBG_INTR, "KillNegativeDma: remove dma - context %d size %d SuspendAddr %x\n", 
13321 +                    trap->Desc.s.dma_u.s.Context, trap->Desc.s.dma_size, trap->Status.s.SuspendAddr);
13322 +
13323 +           trap->Status.s.TrapType = trap->Status.s.SuspendAddr;
13324 +           trap->Status.s.Context  = trap->Desc.s.dma_u.s.Context;
13325 +
13326 +           DeliverDProcTrap (dev, trap, 0);
13327 +
13328 +           /*
13329 +            * Remove the DMA from the queue by replacing it with one with
13330 +            * zero size and no events.
13331 +            *
13332 +            * NOTE: we must preserve the SYS_CONTEXT_BIT since the Elan uses this
13333 +            * to mark the approriate run queue as empty.
13334 +            */
13335 +           trap->Desc.s.dma_type            = 0;
13336 +           trap->Desc.s.dma_size            = 0;
13337 +           trap->Desc.s.dma_source          = (E3_Addr) 0;
13338 +           trap->Desc.s.dma_dest            = (E3_Addr) 0;
13339 +           trap->Desc.s.dma_destCookieVProc = (E3_Addr) 0;
13340 +           trap->Desc.s.dma_srcEvent        = (E3_Addr) 0;
13341 +           trap->Desc.s.dma_srcCookieVProc  = (E3_Addr) 0;
13342 +
13343 +           elan3_sdram_copyq_to_sdram (dev, &trap->Desc, FPtr, sizeof (E3_DMA_BE));
13344 +       }
13345 +
13346 +       FPtr = (FPtr == Top) ? Base : FPtr + sizeof (E3_DMA);
13347 +    }
13348 +
13349 +    status.Status = read_reg32 (dev, Exts.DProcStatus.Status);
13350 +
13351 +    if (status.s.SuspendAddr == MI_DequeueNonSysCntxDma || 
13352 +       status.s.SuspendAddr == MI_DequeueSysCntxDma ||
13353 +       status.s.SuspendAddr == MI_DmaLoop)
13354 +    {
13355 +       PRINTF0 (NULL, DBG_INTR, "KillNegativeDma: unlock dma processor\n");
13356 +       write_reg32 (dev, Exts.Dmas.DmaWrs.LdAlignment, 0);
13357 +       write_reg32 (dev, Exts.Dmas.DmaWrs.LdDmaType,   0);
13358 +       mmiob();
13359 +       
13360 +       DELAY (10);
13361 +       
13362 +       write_reg32 (dev, Exts.Dmas.DmaWrs.LdAlignment, 0);
13363 +       write_reg32 (dev, Exts.Dmas.DmaWrs.LdDmaType,   0);
13364 +       mmiob();
13365 +    }
13366 +
13367 +    PRINTF0 (NULL, DBG_INTR, "KillNegativeDma: dma processor restarted\n");
13368 +
13369 +    spin_unlock_irqrestore (&dev->IntrLock, flags);
13370 +
13371 +    schedule_timer_fn (&dev->DmaPollTimeoutId, PollForDmaHungup, (void *) dev, 1);
13372 +}
13373 +
13374 +void
13375 +ForceTProcTrap (ELAN3_DEV *dev, void *arg)
13376 +{
13377 +    printk ("elan%d: forced tproc trap .....\n", dev->Instance);
13378 +
13379 +    schedule_timer_fn (&dev->DmaPollTimeoutId, PollForDmaHungup, (void *) dev, 1);
13380 +}
13381 +
13382 +void
13383 +PollForDmaHungup (void *arg)
13384 +{
13385 +    ELAN3_DEV     *dev   = (ELAN3_DEV *) arg;
13386 +    unsigned long flags;
13387 +    E3_Status_Reg status;
13388 +    E3_uint32     insn1, insn3;
13389 +    register int  i;
13390 +
13391 +    if (read_reg32 (dev, Dma_Desc.dma_size) > E3_MAX_DMA_SIZE)
13392 +    {
13393 +       status.Status = read_reg32 (dev, Exts.DProcStatus);
13394 +
13395 +       PRINTF2 (NULL, DBG_INTR, "PollForDmaHungup: size %x SuspendAddr %x\n", read_reg32 (dev, Dma_Desc.dma_size), status.s.SuspendAddr);
13396 +
13397 +       if (status.s.SuspendAddr == MI_DequeueNonSysCntxDma || 
13398 +           status.s.SuspendAddr == MI_DequeueSysCntxDma ||
13399 +           status.s.SuspendAddr == MI_DmaLoop)
13400 +       {
13401 +           printk ("elan%d: PollForDmaHungup: size %x context %d SuspendAddr %x\n", 
13402 +                   dev->Instance, read_reg32 (dev, Dma_Desc.dma_size),
13403 +                   status.s.Context, status.s.SuspendAddr);
13404 +       
13405 +           PRINTF2 (NULL, DBG_INTR, "PollForDmaHungup: dma_size %x status %x\n",
13406 +                    read_reg32 (dev, Dma_Desc.dma_size), status.Status);
13407 +           
13408 +           spin_lock_irqsave (&dev->IntrLock, flags);
13409 +           QueueHaltOperation (dev, 0, NULL, INT_DProcHalted, KillNegativeDma, NULL);
13410 +           spin_unlock_irqrestore (&dev->IntrLock, flags);
13411 +           
13412 +           return;
13413 +       }
13414 +    }
13415 +
13416 +    status.Status = read_reg32 (dev, Exts.TProcStatus);
13417 +    if (status.s.WakeupFunction == WakeupStopped)
13418 +    {
13419 +       E3_uint32 PC = read_reg32 (dev, ExecutePC);
13420 +
13421 +       /* See if it's likely that the thread is really "stuck" on a waitevent/break 
13422 +        * instruction ......... */
13423 +       for (i = 0; i < 10; i++)
13424 +       {
13425 +           status.Status = read_reg32 (dev, Exts.TProcStatus);
13426 +           insn1         = read_reg32 (dev, IBufferReg[1]);
13427 +           insn3         = read_reg32 (dev, IBufferReg[3]);
13428 +           
13429 +           if (! (status.s.WakeupFunction == WakeupStopped && read_reg32 (dev, ExecutePC) == PC &&     /* stopping and it could be a break/waitevent */
13430 +                  (insn1 == 0x81a00000 || insn3 == 0x81a00000 ||                                       /* break instruction */
13431 +                   insn1 == 0x81b00000 || insn3 == 0x81b00000)))                                       /* waitevent instruction  */
13432 +               break;
13433 +       }
13434 +
13435 +       if (i == 10)
13436 +       {
13437 +           printk ("elan%d: forcing tproc trap from %s instruction at pc %x\n", dev->Instance, 
13438 +                   (insn1 == 0x81a00000 || insn3 == 0x81a00000) ? "break" : "waitevent", PC);
13439 +
13440 +           spin_lock_irqsave (&dev->IntrLock, flags);
13441 +           QueueHaltOperation (dev, 0, NULL, INT_TProcHalted, ForceTProcTrap, NULL);
13442 +           spin_unlock_irqrestore (&dev->IntrLock, flags);
13443 +           return;
13444 +       }
13445 +    }
13446 +
13447 +    schedule_timer_fn (&dev->DmaPollTimeoutId, PollForDmaHungup, (void *) dev, 10);
13448 +}
13449 +
13450 +/*=======================================================================================*/
13451 +/*
13452 + * Interrupt handler.
13453 + */
13454 +static void
13455 +ReEnableErrorInterrupts (void *arg)
13456 +{
13457 +    ELAN3_DEV     *dev = (ELAN3_DEV *) arg;
13458 +    unsigned long flags;
13459 +
13460 +    spin_lock_irqsave (&dev->IntrLock, flags);
13461 +
13462 +    if ((dev->SchCntReg & LinkBoundaryScan) == 0)
13463 +       ENABLE_INT_MASK (dev, INT_ErrorInterrupts);
13464 +
13465 +    PRINTF1 (DBG_DEVICE, DBG_INTR, "ReEnableErrorInterrupts: IntMask=%x\n", read_reg32 (dev, Exts.InterruptMask));
13466 +
13467 +    spin_unlock_irqrestore (&dev->IntrLock, flags);
13468 +}
13469 +
13470 +void
13471 +CheckForExcessiveErrorRate (ELAN3_DEV *dev)
13472 +{
13473 +    if (dev->ErrorTime == (lbolt/hz))
13474 +    {
13475 +       if (dev->ErrorsPerTick++ > 100)
13476 +       {
13477 +           PRINTF0 (DBG_DEVICE, DBG_INTR, "CheckForExcessiveErrorRate: too many links errors, disabling interrupt\n");
13478 +
13479 +           DISABLE_INT_MASK (dev, INT_ErrorInterrupts);
13480 +
13481 +           schedule_timer_fn (&dev->ErrorTimeoutId, ReEnableErrorInterrupts, (void *) dev, hz);
13482 +       }
13483 +    }
13484 +    else
13485 +    {
13486 +       dev->ErrorTime     = (lbolt/hz);
13487 +       dev->ErrorsPerTick = 0;
13488 +    }
13489 +}
13490 +/*=======================================================================================*/
13491 +/*
13492 + * Interrupt handler.
13493 + */
13494 +static void
13495 +HandlePciMemErr (ELAN3_DEV *dev)
13496 +{
13497 +    PRINTF0 (DBG_DEVICE, DBG_INTR, "HandlePciMemErr : masking out interrupt\n");
13498 +    
13499 +    ElanBusError (dev);
13500 +    panic ("elan pci memory error\n");
13501 +}
13502 +
13503 +static void
13504 +HandleSDRamInterrupt (ELAN3_DEV *dev)
13505 +{
13506 +    E3_uint32     EccStatus0 = read_reg32 (dev, ECC_STATUS0);
13507 +    E3_uint32     EccStatus1 = read_reg32 (dev, ECC_STATUS1);
13508 +    unsigned long flags;
13509 +
13510 +    PRINTF5 (DBG_DEVICE, DBG_INTR, "elan: ECC error - Addr=%x UE=%x CE=%x ME=%x Syn=%x\n",
13511 +            EccStatus0 & ECC_ADDR_MASK, EccStatus0 & ECC_UE_MASK, 
13512 +            EccStatus0 & ECC_CE_MASK, EccStatus0 & ECC_ME_MASK, 
13513 +            EccStatus1 & ECC_SYN_MASK);
13514 +
13515 +    if (EccStatus0 & (ECC_UE_MASK|ECC_CE_MASK))
13516 +    {
13517 +       printk ("elan%d: ECC memory error (Address=%08x Syndrome=%02x %s%s%s)\n",
13518 +               dev->Instance, 
13519 +               (EccStatus0 & ECC_ADDR_MASK), (EccStatus1 & ECC_SYN_MASK), 
13520 +               (EccStatus0 & ECC_UE_MASK) ? "Uncorrectable "   : "",
13521 +               (EccStatus0 & ECC_CE_MASK) ? "Correctable "     : "",
13522 +               (EccStatus0 & ECC_ME_MASK) ? "Multiple Errors " : "");
13523 +    }
13524 +
13525 +    if (EccStatus0 & ECC_UE_MASK)
13526 +       panic ("elan: Uncorrectable ECC memory error");
13527 +    if (EccStatus0 & ECC_CE_MASK)
13528 +       BumpStat (dev, CorrectableErrors);
13529 +    if (EccStatus0 & ECC_ME_MASK)
13530 +       BumpStat (dev, MultipleErrors);
13531 +
13532 +    /*
13533 +     * Clear the interrupt and reset the error flags.
13534 +     * Note. Might loose an UE or CE if it occurs between reading the status and
13535 +     *       clearing the interrupt. I don't think this matters very much as the
13536 +     *   status reg will only be used to identify a bad simm.
13537 +     */
13538 +
13539 +    spin_lock_irqsave (&dev->TlbLock, flags);
13540 +    write_reg32 (dev, Cache_Control_Reg.ContReg, dev->Cache_Control_Reg | CLEAR_SDRAM_ERROR);
13541 +    mmiob();
13542 +    spin_unlock_irqrestore (&dev->TlbLock, flags);
13543 +
13544 +    CheckForExcessiveErrorRate (dev);
13545 +}
13546 +
13547 +static int
13548 +HandleEventInterrupt (ELAN3_DEV *dev, int nticks, unsigned long *flags)
13549 +{
13550 +    E3_uint32 Fptr  = dev->Event_Int_Queue_FPtr;
13551 +    E3_uint32 Bptr  = read_reg32 (dev, Event_Int_Queue_BPtr);                                          /* PCI read */
13552 +    long      tlim  = lbolt + nticks;
13553 +    long      count = 0;
13554 +    ELAN3_CTXT *ctxt;
13555 +
13556 +    ASSERT (SPINLOCK_HELD (&dev->IntrLock));
13557 +    ASSERT ((dev->InterruptMask & INT_EventInterrupt) == 0);
13558 +           
13559 +    while (Fptr != Bptr)
13560 +    {
13561 +       while (Fptr != Bptr)
13562 +       {
13563 +           E3_EventInt_BE  EvInt;
13564 +           E3_uint32       Context;
13565 +
13566 +           /* If we're running in the interrupt handler and have seen a high
13567 +            * rate of event interrupts then punt to the thread  - however on 
13568 +            * Linux the elan interrupt handler can block the timer interrupt,
13569 +            * and so lbolt (jiffies) is not incremented, hence we punt after
13570 +            a number of loops instead */
13571 +#if defined(LINUX)
13572 +           if (in_interrupt() && ++count > eventint_punt_loops)
13573 +               return (EAGAIN);
13574 +#endif
13575 +
13576 +           if (nticks && ((int) (lbolt - tlim)) > 0)
13577 +           {
13578 +               PRINTF2 (DBG_DEVICE, DBG_INTR, "HandleEventInterrupt: Fptr %x Bptr %x punting to thread\n", Fptr, Bptr);
13579 +               return (EAGAIN);
13580 +           }
13581 +
13582 +           elan3_sdram_copyq_from_sdram (dev, Fptr, (void *) &EvInt, 8);                               /* PCI read */
13583 +           
13584 +           /* The context number is held in the top 16 bits of the EventContext */
13585 +           Context = (EvInt.s.EventContext >> 16) & MAX_ROOT_CONTEXT_MASK;
13586 +           
13587 +           PRINTF2 (DBG_DEVICE, DBG_INTR, "HandleEventInterrupt: Context %d : Cookie %x\n", Context, EvInt.s.IntCookie);
13588 +           
13589 +           ctxt = ELAN3_DEV_CTX_TABLE(dev, Context);
13590 +
13591 +           /* Work out new fptr, and store it in the device, since we'll be dropping the IntrLock */
13592 +           Fptr = E3_EVENT_INTQ_NEXT(Fptr);
13593 +           dev->Event_Int_Queue_FPtr = Fptr;
13594 +
13595 +           if (ctxt == NULL)
13596 +           {
13597 +               PRINTF3 (DBG_DEVICE, DBG_INTR, "HandleEventInterrupt: Fptr %x Bptr %x context %d invalid\n",
13598 +                        Fptr, Bptr, Context);
13599 +               BumpStat (dev, InvalidContext);
13600 +           }
13601 +           else
13602 +           {
13603 +               BumpStat (dev, EventInterrupts);
13604 +               
13605 +               spin_unlock_irqrestore (&dev->IntrLock, *flags);
13606 +               QueueEventInterrupt (ctxt, EvInt.s.IntCookie);
13607 +               spin_lock_irqsave (&dev->IntrLock, *flags);
13608 +           }
13609 +           
13610 +           /* Re-read the FPtr, since we've dropped the IntrLock */
13611 +           Fptr = dev->Event_Int_Queue_FPtr;
13612 +           
13613 +           /* Store the new FPtr to the elan, this also clears the interrupt. */
13614 +           write_reg32 (dev, Event_Int_Queue_FPtr, Fptr);                                      /* PCI write */
13615 +
13616 +           mmiob();
13617 +       }
13618 +
13619 +       mb();
13620 +       Bptr = read_reg32 (dev, Event_Int_Queue_BPtr);                                          /* PCI read */
13621 +    }
13622 +
13623 +    return (ESUCCESS);
13624 +}
13625 +
13626 +int
13627 +SetLinkBoundaryScan (ELAN3_DEV *dev)
13628 +{
13629 +    int           res = ESUCCESS;
13630 +    unsigned long flags;
13631 +
13632 +    spin_lock_irqsave (&dev->IntrLock, flags);
13633 +    if ((dev->SchCntReg & LinkBoundaryScan) != 0)
13634 +       res = EAGAIN;
13635 +    else
13636 +    {
13637 +       PRINTF0 (DBG_DEVICE, DBG_BSCAN, "SetLinkBoundaryScan: setting link into boundary scan mode\n");
13638 +
13639 +       /*
13640 +        * We're going to set the link into boundary scan mode,  so firstly
13641 +        * set the inputters to discard everything.
13642 +        */
13643 +       if (dev->DiscardAllCount++ == 0)
13644 +           SetSchedStatusRegister (dev, read_reg32 (dev, Exts.InterruptReg), NULL);
13645 +
13646 +       /*
13647 +        * Now disable the error interrupts
13648 +        */
13649 +       DISABLE_INT_MASK (dev, INT_ErrorInterrupts);
13650 +       
13651 +       /*
13652 +        * And set the link into boundary scan mode, and drive
13653 +        * a reset token onto the link.
13654 +        */
13655 +       SET_SCHED_LINK_VALUE (dev, 1, LinkResetToken);
13656 +    }
13657 +
13658 +    spin_unlock_irqrestore (&dev->IntrLock, flags);
13659 +
13660 +    return (res);
13661 +}
13662 +
13663 +void
13664 +ClearLinkBoundaryScan (ELAN3_DEV *dev)
13665 +{
13666 +    unsigned long flags;
13667 +
13668 +    spin_lock_irqsave (&dev->IntrLock, flags);
13669 +    if ((dev->SchCntReg & LinkBoundaryScan) != 0)
13670 +    {
13671 +       PRINTF0 (DBG_DEVICE, DBG_BSCAN, "ClearLinkBoundaryScan: taking link out of boundary scan mode\n");
13672 +
13673 +       /*
13674 +        * Take the link out of boundary scan 
13675 +        */
13676 +       SET_SCHED_LINK_VALUE (dev, 0, 0);
13677 +
13678 +       /*
13679 +        * Clear any link errors.
13680 +        */
13681 +       PULSE_SCHED_STATUS (dev, ClearLinkErrorInt);
13682 +
13683 +       /*
13684 +        * Re-enable the error interrupts.
13685 +        */
13686 +       if (! timer_fn_queued(&dev->ErrorTimeoutId))
13687 +           ENABLE_INT_MASK (dev, INT_ErrorInterrupts);
13688 +
13689 +       /*
13690 +        * And stop the inputter from discarding all packets.
13691 +        */
13692 +       if (--dev->DiscardAllCount == 0)
13693 +           SetSchedStatusRegister (dev, 0, NULL);
13694 +    }
13695 +    spin_unlock_irqrestore (&dev->IntrLock, flags);
13696 +}
13697 +
13698 +int
13699 +WriteBoundaryScanValue (ELAN3_DEV *dev, int value)
13700 +{
13701 +    int           res = 0;
13702 +    unsigned long flags;
13703 +
13704 +    spin_lock_irqsave (&dev->IntrLock, flags);
13705 +    if ((dev->SchCntReg & LinkBoundaryScan) != 0)
13706 +    {
13707 +       PRINTF1 (DBG_DEVICE, DBG_BSCAN, "WriteBoundaryScanValue: driving value 0x%x onto link\n", value);
13708 +       SET_SCHED_LINK_VALUE (dev, 1, value);
13709 +
13710 +       res = read_reg32 (dev, Exts.LinkState);
13711 +
13712 +       PRINTF1 (DBG_DEVICE, DBG_BSCAN, "WriteBoundaryScanValue: return 0x%x\n", res);
13713 +    }
13714 +    spin_unlock_irqrestore (&dev->IntrLock, flags);
13715 +
13716 +    return (res);
13717 +}
13718 +
13719 +int
13720 +ReadBoundaryScanValue(ELAN3_DEV *dev, int link)
13721 +{
13722 +    int           res;
13723 +    unsigned long flags;
13724 +
13725 +    spin_lock_irqsave (&dev->IntrLock, flags);
13726 +    if ((dev->SchCntReg & LinkBoundaryScan) == 0)
13727 +    {
13728 +       PRINTF1 (DBG_DEVICE, DBG_BSCAN, "ReadBoundaryScanValue: set linkval 0x%x\n",  link);
13729 +       SET_SCHED_LINK_VALUE (dev, 0, link);
13730 +    }
13731 +    res = read_reg32 (dev, Exts.LinkState);
13732 +    PRINTF1 (DBG_DEVICE, DBG_BSCAN, "ReadBoundaryScanValue: return 0x%x\n", res);
13733 +
13734 +    spin_unlock_irqrestore (&dev->IntrLock, flags);
13735 +
13736 +    return (res);
13737 +}
13738 +
13739 +static int
13740 +ReadLinkVal (ELAN3_DEV *dev, int link)
13741 +{
13742 +    if ((dev->SchCntReg & LinkBoundaryScan) == 0)
13743 +       SET_SCHED_LINK_VALUE (dev, 0, link);
13744 +    
13745 +    return (read_reg32 (dev, Exts.LinkState));
13746 +}
13747 +
13748 +static void
13749 +HandleLinkError (ELAN3_DEV *dev)
13750 +{
13751 +    E3_uint32 value = read_reg32 (dev, Exts.LinkErrorTypes);
13752 +
13753 +    PRINTF1 (DBG_DEVICE, DBG_LINKERR, "HandleLinkError: LinkErrorTypes %08x - clearing\n", value);
13754 +    
13755 +    if (value & LS_LockError)   BumpStat (dev, LockError);
13756 +    if (value & LS_DeskewError) BumpStat (dev, DeskewError);
13757 +    if (value & LS_PhaseError)  BumpStat (dev, PhaseError);
13758 +    if (value & LS_DataError)   BumpStat (dev, DataError);
13759 +    if (value & LS_FifoOvFlow0) BumpStat (dev, FifoOvFlow0);
13760 +    if (value & LS_FifoOvFlow1) BumpStat (dev, FifoOvFlow1);
13761 +
13762 +    if (value & LS_DataError)
13763 +       dev->Stats.LinkErrorValue = ReadLinkVal (dev, 12) | (ReadLinkVal (dev, 13) << 9);
13764 +
13765 +    PULSE_SCHED_STATUS (dev, ClearLinkErrorInt);
13766 +
13767 +    CheckForExcessiveErrorRate (dev);
13768 +}
13769 +
13770 +static void
13771 +HandleErrorInterrupt (ELAN3_DEV *dev, E3_uint32 Pend)
13772 +{
13773 +    if (Pend & INT_PciMemErr)
13774 +       HandlePciMemErr (dev);
13775 +    
13776 +    if (Pend & INT_SDRamInt)
13777 +       HandleSDRamInterrupt (dev);
13778 +    
13779 +    if (Pend & INT_LinkError)
13780 +       HandleLinkError (dev);
13781 +}
13782 +       
13783 +static void
13784 +HandleAnyIProcTraps (ELAN3_DEV *dev, E3_uint32 Pend)
13785 +{
13786 +    E3_uint32       RestartBits = 0;
13787 +    
13788 +    if (Pend & INT_IProcCh0SysCntx)
13789 +    {
13790 +       HandleIProcTrap (dev, 0, Pend,
13791 +                        dev->TAndQBase + offsetof (E3_TrapAndQueue, IProcSysCntx),
13792 +                        dev->TAndQBase + offsetof (E3_TrapAndQueue, VCh0_C0_TrHead[0]),
13793 +                        dev->TAndQBase + offsetof (E3_TrapAndQueue, VCh0_C0_TrData[0]));
13794 +                        
13795 +       RestartBits |= RestartCh0SysCntx;
13796 +    }
13797 +    
13798 +    if (Pend & INT_IProcCh1SysCntx)
13799 +    {
13800 +       HandleIProcTrap (dev, 1, Pend,
13801 +                        dev->TAndQBase + offsetof (E3_TrapAndQueue, IProcSysCntx),
13802 +                        dev->TAndQBase + offsetof (E3_TrapAndQueue, VCh1_C0_TrHead[0]),
13803 +                        dev->TAndQBase + offsetof (E3_TrapAndQueue, VCh1_C0_TrData[0]));
13804 +                        
13805 +       RestartBits |= RestartCh1SysCntx;
13806 +    }
13807 +
13808 +    if (Pend & INT_IProcCh0NonSysCntx)
13809 +    {
13810 +       HandleIProcTrap (dev, 0, Pend,
13811 +                        dev->TAndQBase + offsetof (E3_TrapAndQueue, IProcNonSysCntx),
13812 +                        dev->TAndQBase + offsetof (E3_TrapAndQueue, VCh0_NonC0_TrHead[0]),
13813 +                        dev->TAndQBase + offsetof (E3_TrapAndQueue, VCh0_NonC0_TrData[0]));
13814 +
13815 +       RestartBits |= RestartCh0NonSysCntx;
13816 +    }
13817 +    
13818 +
13819 +    if (Pend & INT_IProcCh1NonSysCntx)
13820 +    {
13821 +       HandleIProcTrap (dev, 1, Pend,
13822 +                        dev->TAndQBase + offsetof (E3_TrapAndQueue, IProcNonSysCntx),
13823 +                        dev->TAndQBase + offsetof (E3_TrapAndQueue, VCh1_NonC0_TrHead[0]),
13824 +                        dev->TAndQBase + offsetof (E3_TrapAndQueue, VCh1_NonC0_TrData[0]));
13825 +       RestartBits |= RestartCh1NonSysCntx;
13826 +    }
13827 +
13828 +    PULSE_SCHED_STATUS (dev, RestartBits);
13829 +}
13830 +
13831 +static void
13832 +elan3_event_interrupt (ELAN3_DEV *dev)
13833 +{
13834 +    unsigned long flags;
13835 +
13836 +    kernel_thread_init("elan3_event_int");
13837 +
13838 +    spin_lock_irqsave (&dev->IntrLock, flags);
13839 +    for (;;)
13840 +    {
13841 +       /* Make sure we never sleep with the EventInterrupt disabled */
13842 +       if (! (dev->InterruptMask & INT_EventInterrupt))
13843 +       {
13844 +           if (HandleEventInterrupt (dev, eventint_resched_ticks, &flags) != ESUCCESS)
13845 +               BumpStat (dev, EventRescheds);
13846 +           
13847 +           ENABLE_INT_MASK (dev, INT_EventInterrupt);
13848 +       }
13849 +
13850 +       if (dev->ThreadsShouldStop)
13851 +           break;
13852 +
13853 +       kcondvar_wait (&dev->IntrWait, &dev->IntrLock, &flags);
13854 +    }
13855 +    
13856 +    dev->EventInterruptThreadStopped = 1;
13857 +    kcondvar_wakeupall (&dev->IntrWait, &dev->IntrLock);
13858 +
13859 +    spin_unlock_irqrestore (&dev->IntrLock, flags);
13860 +
13861 +    kernel_thread_exit ();
13862 +}
13863 +
13864 +int
13865 +InterruptHandler (ELAN3_DEV *dev)
13866 +{
13867 +    E3_uint32     Mask;
13868 +    E3_uint32     Pend;
13869 +    E3_uint32     RestartBits;
13870 +    int           deliverDProcTrap;
13871 +    int                  deliverTProcTrap;
13872 +    static long   lboltsave;
13873 +    int           loop_count = 0; 
13874 +    unsigned long flags;
13875 +    int  tproc_delivered;
13876 +
13877 +    spin_lock_irqsave (&dev->IntrLock, flags);
13878 +
13879 +    BumpStat (dev, Interrupts);
13880 +
13881 +    Mask = dev->InterruptMask;
13882 +    Pend = read_reg32 (dev, Exts.InterruptReg);                                                /* PCI read */
13883 +
13884 +    /* Save the lbolt so we know how long in do loop or in event handling */
13885 +    lboltsave = lbolt;
13886 +
13887 +    if ((Pend & Mask) == INT_EventInterrupt)
13888 +    {
13889 +       DISABLE_INT_MASK (dev, INT_EventInterrupt);
13890 +
13891 +       if (HandleEventInterrupt (dev, eventint_punt_ticks, &flags) == ESUCCESS)
13892 +           ENABLE_INT_MASK (dev, INT_EventInterrupt);
13893 +       else
13894 +       {
13895 +           BumpStat (dev, EventPunts);
13896 +
13897 +           kcondvar_wakeupone (&dev->IntrWait, &dev->IntrLock);
13898 +       }
13899 +
13900 +        if ((lbolt - lboltsave) > dev->Stats.LongestInterrupt)
13901 +            dev->Stats.LongestInterrupt = (lbolt - lboltsave);
13902 +       spin_unlock_irqrestore (&dev->IntrLock, flags);
13903 +       return (ESUCCESS);
13904 +    }
13905 +
13906 +    if ((Pend & Mask) == 0)
13907 +    {
13908 +       PRINTF3 (DBG_DEVICE, DBG_INTR, "InterruptHandler: Spurious Pend %x Mask %x SchedStatus %x\n", 
13909 +                Pend, Mask, read_reg32 (dev, Exts.SchCntReg));
13910 +
13911 +        if ((lbolt - lboltsave) > dev->Stats.LongestInterrupt)
13912 +            dev->Stats.LongestInterrupt = (lbolt - lboltsave);
13913 +       spin_unlock_irqrestore (&dev->IntrLock, flags);
13914 +       return (EFAIL);
13915 +    }
13916 +
13917 +    PRINTF3 (DBG_DEVICE, DBG_INTR, "InterruptHandler: Pend %x Mask %08x SchedStatus %x\n", 
13918 +            Pend, Mask, read_reg32 (dev, Exts.SchCntReg));
13919 +
13920 +    do {
13921 +       loop_count++;
13922 +       RestartBits = 0;
13923 +
13924 +       if (Pend & Mask & (INT_CProc | INT_ComQueue))
13925 +           HandleCProcTrap (dev, Pend, &Mask);
13926 +
13927 +       tproc_delivered = 0;
13928 +
13929 +       if (Pend & Mask & INT_TProc) {
13930 +           ELAN_REG_REC(Pend);
13931 +           tproc_delivered = 1;
13932 +           deliverTProcTrap = HandleTProcTrap (dev, &RestartBits);
13933 +       }
13934 +       else
13935 +           deliverTProcTrap = 0;
13936 +
13937 +       if (Pend & Mask & INT_DProc)
13938 +           deliverDProcTrap = HandleDProcTrap (dev, &RestartBits);
13939 +       else
13940 +           deliverDProcTrap = 0;
13941 +
13942 +       ASSERT ((RestartBits & RestartDProc) == 0 || (read_reg32 (dev, Exts.DProcStatus.Status) >> 29) == 4);
13943 +       ASSERT ((RestartBits & RestartDProc) == 0 || elan3_sdram_readl (dev, dev->TAndQBase + offsetof (E3_TrapAndQueue, DProc.s.FSR.Status))      == 0);
13944 +       ASSERT ((RestartBits & RestartDProc) == 0 || elan3_sdram_readl (dev, dev->TAndQBase + offsetof (E3_TrapAndQueue, DProcData0.s.FSR.Status)) == 0);
13945 +       ASSERT ((RestartBits & RestartDProc) == 0 || elan3_sdram_readl (dev, dev->TAndQBase + offsetof (E3_TrapAndQueue, DProcData1.s.FSR.Status)) == 0);
13946 +       ASSERT ((RestartBits & RestartDProc) == 0 || elan3_sdram_readl (dev, dev->TAndQBase + offsetof (E3_TrapAndQueue, DProcData2.s.FSR.Status)) == 0);
13947 +       ASSERT ((RestartBits & RestartDProc) == 0 || elan3_sdram_readl (dev, dev->TAndQBase + offsetof (E3_TrapAndQueue, DProcData3.s.FSR.Status)) == 0);
13948 +
13949 +       PULSE_SCHED_STATUS (dev, RestartBits);          /* Restart any processors which had trapped. */
13950 +       SET_INT_MASK (dev, Mask);                       /* And install the new interrupt mask */
13951 +
13952 +       if ((Pend & Mask & INT_TProc) && deliverTProcTrap)
13953 +           DeliverTProcTrap (dev, dev->ThreadTrap, Pend);
13954 +
13955 +       if ((Pend & Mask & INT_DProc) && deliverDProcTrap)
13956 +           DeliverDProcTrap (dev, dev->DmaTrap, Pend);
13957 +
13958 +       if (Pend & Mask & INT_Inputters)
13959 +           HandleAnyIProcTraps (dev, Pend);
13960 +       
13961 +       if (Pend & Mask & INT_EventInterrupt)
13962 +       {
13963 +           DISABLE_INT_MASK (dev, INT_EventInterrupt);
13964 +           
13965 +           if (loop_count == 1 && HandleEventInterrupt (dev, eventint_punt_ticks, &flags) == ESUCCESS) /* always punt to the thread if we've */
13966 +               ENABLE_INT_MASK (dev, INT_EventInterrupt);                                              /* been round the loop once */
13967 +           else
13968 +           {
13969 +               BumpStat (dev, EventPunts);
13970 +
13971 +               kcondvar_wakeupone (&dev->IntrWait, &dev->IntrLock);
13972 +           }
13973 +       }
13974 +
13975 +       if (Pend & (INT_Halted | INT_Discarding))
13976 +           ProcessHaltOperations (dev, Pend);
13977 +
13978 +       if (Pend & Mask & INT_ErrorInterrupts)
13979 +           HandleErrorInterrupt (dev, Pend);
13980 +
13981 +       Mask = dev->InterruptMask;
13982 +       Pend = read_reg32 (dev, Exts.InterruptReg);     /* PCI read */
13983 +       
13984 +       if (tproc_delivered)
13985 +           ELAN_REG_REC(Pend);
13986 +
13987 +       PRINTF3 (DBG_DEVICE, DBG_INTR, "InterruptHandler: Pend %x Mask %08x SchedStatus %x\n", 
13988 +                Pend, Mask, read_reg32 (dev, Exts.SchCntReg));
13989 +    }  while ((Pend & Mask) != 0);
13990 +
13991 +    if ((lbolt - lboltsave) > dev->Stats.LongestInterrupt)
13992 +        dev->Stats.LongestInterrupt = (lbolt - lboltsave);
13993 +    spin_unlock_irqrestore (&dev->IntrLock, flags);
13994 +
13995 +    PRINTF2 (DBG_DEVICE, DBG_INTR, "InterruptHandler: lbolt is %lx; start lbolt is %lx\n", 
13996 +            lbolt, lboltsave);
13997 +
13998 +    return (ESUCCESS);
13999 +}
14000 +
14001 +void
14002 +SetSchedStatusRegister (ELAN3_DEV *dev, E3_uint32 Pend, volatile E3_uint32 *Maskp)
14003 +{
14004 +    E3_uint32 HaltMask  = dev->HaltOperationsMask;
14005 +    E3_uint32 Mask      = Maskp ? *Maskp : dev->InterruptMask;
14006 +    E3_uint32 ClearBits = 0;
14007 +    E3_uint32 SetBits   = 0;
14008 +
14009 +    PRINTF5 (DBG_DEVICE, DBG_INTR, "SetSchedStatusRegister: HaltOperationsMask=%x HaltAll=%d HaltDmaDequeue=%d HaltThread=%d DiscardAll=%d\n",
14010 +            HaltMask, dev->HaltAllCount, dev->HaltDmaDequeueCount, dev->HaltThreadCount, dev->DiscardAllCount);
14011 +
14012 +    if (dev->FlushCommandCount)
14013 +       SetBits |= FlushCommandQueues;
14014 +    
14015 +    if ((HaltMask & INT_DProcHalted) || dev->HaltAllCount)
14016 +    {
14017 +       SetBits |= HaltDmas | HaltDmaDequeue;
14018 +       if (Pend & INT_DProcHalted)
14019 +           Mask &= ~INT_DProcHalted;
14020 +       else
14021 +           Mask |= INT_DProcHalted;
14022 +    }
14023 +
14024 +    if (dev->HaltDmaDequeueCount)
14025 +    {
14026 +       SetBits |= HaltDmaDequeue;
14027 +       if (Pend & INT_DProcHalted)
14028 +           Mask &= ~INT_DProcHalted;
14029 +       else
14030 +           Mask |= INT_DProcHalted;
14031 +    }
14032 +
14033 +    if ((HaltMask & INT_TProcHalted) || dev->HaltAllCount || dev->HaltThreadCount)
14034 +    {
14035 +       SetBits |= HaltThread;
14036 +       if (Pend & INT_TProcHalted)
14037 +           Mask &= ~INT_TProcHalted;
14038 +       else
14039 +           Mask |= INT_TProcHalted;
14040 +    }
14041 +
14042 +    if ((HaltMask & INT_DiscardingSysCntx) || dev->DiscardAllCount)
14043 +    {
14044 +       SetBits |= DiscardSysCntxIn;
14045 +       if (Pend & INT_DiscardingSysCntx)
14046 +           Mask &= ~INT_DiscardingSysCntx;
14047 +       else
14048 +           Mask |= INT_DiscardingSysCntx;
14049 +    }
14050 +
14051 +    if ((HaltMask & INT_DiscardingNonSysCntx) || dev->DiscardNonContext0Count || dev->DiscardAllCount)
14052 +    {
14053 +       SetBits |= DiscardNonSysCntxIn;
14054 +       if (Pend & INT_DiscardingNonSysCntx)
14055 +           Mask &= ~INT_DiscardingNonSysCntx;
14056 +       else
14057 +           Mask |= INT_DiscardingNonSysCntx;
14058 +    }
14059 +
14060 +    if (dev->HaltNonContext0Count)
14061 +       SetBits |= StopNonSysCntxs;
14062 +
14063 +    ClearBits = SetBits ^ (FlushCommandQueues | HaltDmas | HaltDmaDequeue | HaltThread |
14064 +                          DiscardSysCntxIn | DiscardNonSysCntxIn | StopNonSysCntxs);
14065 +
14066 +    PRINTF4 (DBG_DEVICE, DBG_INTR, "SetSchedStatusRegister: SetBits=%x InterruptMask=%x InterruptReg=%x Mask=%x\n",
14067 +            SetBits, dev->InterruptMask, read_reg32 (dev, Exts.InterruptReg), Mask);
14068 +
14069 +    MODIFY_SCHED_STATUS (dev, SetBits, ClearBits);
14070 +
14071 +    if (Maskp)
14072 +       *Maskp = Mask;                                          /* copyback new interrupt mask */
14073 +    else
14074 +       SET_INT_MASK(dev, Mask);
14075 +}
14076 +
14077 +void
14078 +FreeHaltOperation (ELAN3_DEV *dev, ELAN3_HALTOP *op)
14079 +{
14080 +    unsigned long flags;
14081 +
14082 +    spin_lock_irqsave (&dev->FreeHaltLock, flags);
14083 +    op->Next = dev->FreeHaltOperations;
14084 +    dev->FreeHaltOperations = op;
14085 +    spin_unlock_irqrestore (&dev->FreeHaltLock, flags);
14086 +}
14087 +
14088 +int
14089 +ReserveHaltOperations (ELAN3_DEV *dev, int count, int cansleep)
14090 +{
14091 +    ELAN3_HALTOP   *op;
14092 +    unsigned long flags;
14093 +
14094 +    spin_lock_irqsave (&dev->FreeHaltLock, flags);
14095 +    while ((dev->NumHaltOperations - dev->ReservedHaltOperations) < count)
14096 +    {
14097 +       spin_unlock_irqrestore (&dev->FreeHaltLock, flags);
14098 +
14099 +       KMEM_ZALLOC (op, ELAN3_HALTOP *, sizeof (ELAN3_HALTOP), cansleep);
14100 +
14101 +       if (op == NULL)
14102 +           return (FALSE);
14103 +
14104 +       spin_lock_irqsave (&dev->FreeHaltLock, flags);
14105 +
14106 +       dev->NumHaltOperations++;
14107 +
14108 +       op->Next = dev->FreeHaltOperations;
14109 +       dev->FreeHaltOperations = op;
14110 +    }
14111 +                   
14112 +    dev->ReservedHaltOperations += count;
14113 +    
14114 +    spin_unlock_irqrestore (&dev->FreeHaltLock, flags);
14115 +
14116 +    return (TRUE);
14117 +}
14118 +
14119 +void
14120 +ReleaseHaltOperations (ELAN3_DEV *dev, int count)
14121 +{
14122 +    unsigned long flags;
14123 +
14124 +    spin_lock_irqsave (&dev->FreeHaltLock, flags);
14125 +    dev->ReservedHaltOperations -= count;
14126 +    spin_unlock_irqrestore (&dev->FreeHaltLock, flags);
14127 +}
14128 +
14129 +void
14130 +QueueHaltOperation (ELAN3_DEV *dev, E3_uint32 Pend, volatile E3_uint32 *Maskp, 
14131 +                   E3_uint32 ReqMask, void (*Function)(ELAN3_DEV *, void *), void *Arguement)
14132 +{
14133 +    ELAN3_HALTOP *op;
14134 +
14135 +    ASSERT (SPINLOCK_HELD (&dev->IntrLock));
14136 +    
14137 +    spin_lock (&dev->FreeHaltLock);
14138 +    op = dev->FreeHaltOperations;
14139 +
14140 +    ASSERT (op != NULL);
14141 +
14142 +    dev->FreeHaltOperations = op->Next;
14143 +    spin_unlock (&dev->FreeHaltLock);
14144 +
14145 +    op->Mask      = ReqMask;
14146 +    op->Function  = (void (*)(void *, void *))Function;
14147 +    op->Arguement = Arguement;
14148 +
14149 +    dev->HaltOperationsMask |= ReqMask;                                /* Add our bits to the global bits needed. */
14150 +    SetSchedStatusRegister (dev, Pend, Maskp);                 /* Set the control register and the interrupt mask */
14151 +
14152 +    /*
14153 +     * If the condition is already satisfied, then SetSchedStatusRegister will
14154 +     * have masked out the interrupt, so re-enable it now to take it straight
14155 +     * away
14156 +     */
14157 +    if (Maskp == NULL)
14158 +    {
14159 +       if ((read_reg32 (dev, Exts.InterruptReg) & ReqMask) == ReqMask)
14160 +           ENABLE_INT_MASK (dev, ReqMask);
14161 +    }
14162 +    else
14163 +    {
14164 +       if ((Pend & ReqMask) == ReqMask)
14165 +           *Maskp |= ReqMask;
14166 +    }
14167 +
14168 +    *dev->HaltOperationsTailpp = op;                           /* Queue at end of list, since ProcessHaltOperations */
14169 +    dev->HaltOperationsTailpp = &op->Next;                     /* drops the IntrLock while running down the list */
14170 +    op->Next = NULL;
14171 +}
14172 +                   
14173 +void
14174 +ProcessHaltOperations (ELAN3_DEV *dev, E3_uint32 Pend)
14175 +{
14176 +    E3_uint32     Mask;
14177 +    ELAN3_HALTOP  *op;
14178 +    ELAN3_HALTOP **prevp;
14179 +    E3_uint32     haltMask;
14180 +    ELAN3_HALTOP  *next;
14181 +
14182 +    PRINTF1 (DBG_DEVICE, DBG_INTR, "ProcessHaltOperations: Pend %x\n", Pend);
14183 +
14184 +    for (;;)
14185 +    {
14186 +       ELAN3_HALTOP  *head = NULL;
14187 +       ELAN3_HALTOP **tailp = &head;
14188 +
14189 +       /*
14190 +        * Generate a list of halt operations which can be called now.
14191 +        */
14192 +       for (haltMask = 0, prevp = &dev->HaltOperations; (op = *prevp) != NULL; )
14193 +       {
14194 +           if ((Pend & op->Mask) != op->Mask)
14195 +           {
14196 +               haltMask |= op->Mask;
14197 +               prevp = &op->Next;
14198 +           }
14199 +           else
14200 +           {
14201 +               *prevp = op->Next;                              /* remove from list */
14202 +               if (op->Next == NULL)
14203 +                   dev->HaltOperationsTailpp = prevp;
14204 +               
14205 +               *tailp = op;                                    /* add to local list */
14206 +               op->Next = NULL;
14207 +               tailp = &op->Next;
14208 +           }
14209 +       }
14210 +
14211 +       if (head == NULL)                                       /* nothing to do, so update */
14212 +       {                                                       /* the schedule status register */
14213 +           dev->HaltOperationsMask = haltMask;                 /* and the interrupt mask */
14214 +           SetSchedStatusRegister (dev, Pend, NULL);
14215 +           return;
14216 +       }
14217 +
14218 +       /*
14219 +        * flush the command queues, before calling any operations
14220 +        */
14221 +       Mask = dev->InterruptMask;
14222 +       
14223 +       if (dev->FlushCommandCount++ == 0)
14224 +           SetSchedStatusRegister (dev, Pend, &Mask);
14225 +       
14226 +       if ((read_reg32 (dev, ComQueueStatus) & ComQueueNotEmpty) != 0)
14227 +       {
14228 +           if (dev->HaltThreadCount++ == 0)
14229 +               SetSchedStatusRegister (dev, Pend, &Mask);
14230 +
14231 +           CAPTURE_CPUS();
14232 +
14233 +           while ((read_reg32 (dev, ComQueueStatus) & ComQueueNotEmpty) != 0)
14234 +               mb();
14235 +
14236 +           RELEASE_CPUS();
14237 +                   
14238 +           if (--dev->HaltThreadCount == 0)
14239 +               SetSchedStatusRegister (dev, Pend, &Mask);
14240 +       }
14241 +               
14242 +       if (read_reg32 (dev, Exts.InterruptReg) & INT_CProc)
14243 +       {
14244 +           PRINTF0 (DBG_DEVICE, DBG_INTR, "ProcessHaltOperations: command processor has trapped\n");
14245 +           HandleCProcTrap (dev, Pend, &Mask);
14246 +       }
14247 +       
14248 +       if (--dev->FlushCommandCount == 0)
14249 +           SetSchedStatusRegister (dev, Pend, &Mask);
14250 +       
14251 +       PRINTF2 (DBG_DEVICE, DBG_INTR, "ProcessHaltOperations: interrupt mask %08x -> %08x\n", 
14252 +                dev->InterruptMask, Mask);
14253 +       
14254 +       SET_INT_MASK (dev, Mask);
14255 +       spin_unlock (&dev->IntrLock);
14256 +
14257 +       /*
14258 +        * now process the list of operations
14259 +        * we have
14260 +        */
14261 +       for (op = head; op != NULL; op = next)
14262 +       {
14263 +           next = op->Next;
14264 +
14265 +           op->Function (dev, op->Arguement);
14266 +           
14267 +           FreeHaltOperation (dev, op);
14268 +       }
14269 +
14270 +       spin_lock (&dev->IntrLock);
14271 +    }
14272 +}
14273 +
14274 +int
14275 +ComputePosition (ELAN_POSITION *pos, unsigned nodeId, unsigned numNodes, unsigned numDownLinksVal)
14276 +{
14277 +    int i, lvl, n;
14278 +    char numDownLinks[ELAN_MAX_LEVELS];
14279 +
14280 +    if (nodeId >= numNodes)
14281 +       return (EINVAL);
14282 +
14283 +    for (i = 0; i < ELAN_MAX_LEVELS; i++, numDownLinksVal >>= 4)
14284 +       numDownLinks[i] = numDownLinksVal & 7;
14285 +    
14286 +    for (lvl = 0, n = numNodes; n > ((lvl % 3) == 2 ? 8 : 4) && lvl < ELAN_MAX_LEVELS; lvl++)
14287 +    {
14288 +       if (numDownLinks[lvl] == 0)
14289 +           numDownLinks[lvl] = 4;
14290 +       
14291 +       if ((n % numDownLinks[lvl]) != 0)
14292 +           return (EINVAL);
14293 +       
14294 +       n /= numDownLinks[lvl];
14295 +    }
14296 +
14297 +    if (numDownLinks[lvl] == 0)
14298 +       numDownLinks[lvl] = n;
14299 +
14300 +    if (numDownLinks[lvl] != n)
14301 +       return (EINVAL);
14302 +
14303 +    for (i = 0; i <= lvl; i++)
14304 +       pos->pos_arity[i] = numDownLinks[lvl - i];
14305 +
14306 +    pos->pos_nodes  = numNodes;
14307 +    pos->pos_levels = lvl + 1;
14308 +    pos->pos_nodeid = nodeId;
14309 +    pos->pos_mode   = ELAN_POS_MODE_SWITCHED;
14310 +
14311 +    return (0);
14312 +}
14313 +
14314 +/*
14315 + * Local variables:
14316 + * c-file-style: "stroustrup"
14317 + * End:
14318 + */
14319 Index: linux-2.4.21/drivers/net/qsnet/elan3/elandev_linux.c
14320 ===================================================================
14321 --- linux-2.4.21.orig/drivers/net/qsnet/elan3/elandev_linux.c   2004-02-23 16:02:56.000000000 -0500
14322 +++ linux-2.4.21/drivers/net/qsnet/elan3/elandev_linux.c        2005-06-01 23:12:54.582441688 -0400
14323 @@ -0,0 +1,2302 @@
14324 +/*
14325 + *    Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd.
14326 + *
14327 + *    For licensing information please see the supplied COPYING file
14328 + *
14329 + */
14330 +
14331 +#ident "$Id: elandev_linux.c,v 1.102.2.4 2004/12/20 16:55:17 mike Exp $"
14332 +/*     $Source: /cvs/master/quadrics/elan3mod/elan3/os/elandev_linux.c,v $*/
14333 +
14334 +#include <qsnet/kernel.h>
14335 +#include <qsnet/kpte.h>
14336 +
14337 +#include <linux/config.h>
14338 +#include <linux/mm.h>
14339 +#include <linux/pci.h>
14340 +#include <linux/reboot.h>
14341 +#include <linux/notifier.h>
14342 +
14343 +#include <linux/init.h>
14344 +#include <linux/module.h>
14345 +
14346 +#include <linux/pci.h>
14347 +#include <linux/ptrack.h>
14348 +
14349 +#include <asm/uaccess.h>
14350 +#include <asm/io.h>
14351 +#include <asm/pgalloc.h>
14352 +#include <asm/pgtable.h>
14353 +
14354 +#include <elan/devinfo.h>
14355 +#include <elan/elanmod.h>
14356 +
14357 +#include <elan3/elanregs.h>
14358 +#include <elan3/elandev.h>
14359 +#include <elan3/elanvp.h>
14360 +#include <elan3/elanio.h>
14361 +#include <elan3/elan3mmu.h>
14362 +#include <elan3/elanctxt.h>
14363 +#include <elan3/elandebug.h>
14364 +#include <elan3/elansyscall.h>
14365 +
14366 +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,2,0)
14367 +#error please use a 2.2 series kernel or newer
14368 +#endif
14369 +
14370 +/* Minor numbers encoded as :
14371 + *   [5:0]     device number
14372 + *   [15:6]    function number
14373 + */
14374 +#define ELAN3_DEVICE_MASK          0x3F
14375 +
14376 +#define ELAN3_MINOR_CONTROL      0
14377 +#define ELAN3_MINOR_MEM          1
14378 +#define ELAN3_MINOR_USER        2
14379 +#define ELAN3_MINOR_SHIFT        6
14380 +
14381 +#define ELAN3_DEVICE(inode)    (MINOR(inode->i_rdev) & ELAN3_DEVICE_MASK)
14382 +#define ELAN3_MINOR(inode)     (MINOR(inode->i_rdev) >> ELAN3_MINOR_SHIFT)
14383 +
14384 +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0)
14385 +#      define SetPageReserved(page)    set_bit(PG_reserved, &(page)->flags)
14386 +#      define ClearPageReserved(page)  clear_bit(PG_reserved, &(page)->flags)
14387 +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,4,23)
14388 +typedef void irqreturn_t;
14389 +#endif
14390 +#       define IRQ_NONE
14391 +#       define IRQ_HANDLED
14392 +#       define IRQ_RETVAL(x)
14393 +#endif
14394 +
14395 +
14396 +/*
14397 + * Function prototypes.
14398 + */
14399 +static int     elanattach(int instance, struct pci_dev *pcidev);
14400 +static int     elandetach(int instance);
14401 +
14402 +static int     elan3_open (struct inode *inode, struct file *file);
14403 +static int     elan3_ioctl (struct inode *inode, struct file *file, 
14404 +                            unsigned int cmd, unsigned long arg);
14405 +static int     elan3_mmap (struct file *file, struct vm_area_struct *vm_area);
14406 +static int     elan3_release (struct inode *inode, struct file *file);
14407 +
14408 +static int      elan3_reboot_event (struct notifier_block *self, unsigned long event, void *buffer);
14409 +static int      elan3_panic_event (struct notifier_block *self, unsigned long event, void *buffer);
14410 +
14411 +static irqreturn_t InterruptHandlerWrapper(int irq, void *dev_id, struct pt_regs *regs);
14412 +
14413 +static int     ConfigurePci(ELAN3_DEV *dev);
14414 +static int     ResetElan(ELAN3_DEV *dev, ioaddr_t intPalAddr);
14415 +
14416 +static void     elan3_shutdown_devices(int panicing);
14417 +
14418 +/*
14419 + * Globals. 
14420 + */
14421 +static ELAN3_DEV       *elan3_devices[ELAN3_MAX_CONTROLLER];
14422 +static int       NodeId = ELAN3_INVALID_NODE;
14423 +static int       NumNodes;
14424 +static int       DownLinks;
14425 +static int       RandomRoutingDisabled;
14426 +int              BackToBackMaster;
14427 +int              BackToBackSlave;
14428 +int              enable_sdram_writecombining;
14429 +int             sdram_bank_limit;
14430 +extern int       LwpNice;
14431 +
14432 +char *    elan_reg_rec_file [ELAN_REG_REC_MAX];
14433 +int       elan_reg_rec_line [ELAN_REG_REC_MAX];
14434 +long      elan_reg_rec_lbolt[ELAN_REG_REC_MAX];
14435 +int       elan_reg_rec_cpu  [ELAN_REG_REC_MAX];
14436 +E3_uint32 elan_reg_rec_reg  [ELAN_REG_REC_MAX];
14437 +int       elan_reg_rec_index;
14438 +
14439 +MODULE_AUTHOR("Quadrics Ltd.");
14440 +MODULE_DESCRIPTION("Elan3 Device Driver");
14441 +
14442 +MODULE_LICENSE("GPL");
14443 +
14444 +MODULE_PARM(NodeId,"i");
14445 +MODULE_PARM(NumNodes,"i");
14446 +MODULE_PARM(RandomRoutingDisabled,"i");
14447 +MODULE_PARM(DownLinks,"i");
14448 +MODULE_PARM(BackToBackMaster,"i");
14449 +MODULE_PARM(BackToBackSlave,"i");
14450 +MODULE_PARM(LwpNice, "i");
14451 +MODULE_PARM(elan3_debug, "i");
14452 +MODULE_PARM(elan3_debug_console, "i");
14453 +MODULE_PARM(elan3_debug_buffer, "i");
14454 +MODULE_PARM(elan3mmu_debug, "i");
14455 +MODULE_PARM(sdram_bank_limit, "i");
14456 +
14457 +/* elan3/os/context.c */
14458 +EXPORT_SYMBOL(elan3_alloc);
14459 +EXPORT_SYMBOL(elan3_attach);
14460 +EXPORT_SYMBOL(elan3_doattach);
14461 +EXPORT_SYMBOL(elan3_free);
14462 +EXPORT_SYMBOL(elan3_detach);
14463 +EXPORT_SYMBOL(elan3_dodetach);
14464 +EXPORT_SYMBOL(elan3_block_inputter);
14465 +EXPORT_SYMBOL(CheckCommandQueueFlushed);
14466 +
14467 +/* elan3/os/sdram.c */
14468 +EXPORT_SYMBOL(elan3_sdram_alloc);
14469 +EXPORT_SYMBOL(elan3_sdram_free);
14470 +EXPORT_SYMBOL(elan3_sdram_to_phys);
14471 +EXPORT_SYMBOL(elan3_sdram_writeb);
14472 +EXPORT_SYMBOL(elan3_sdram_writew);
14473 +EXPORT_SYMBOL(elan3_sdram_writel);
14474 +EXPORT_SYMBOL(elan3_sdram_writeq);
14475 +EXPORT_SYMBOL(elan3_sdram_readb);
14476 +EXPORT_SYMBOL(elan3_sdram_readw);
14477 +EXPORT_SYMBOL(elan3_sdram_readl);
14478 +EXPORT_SYMBOL(elan3_sdram_readq);
14479 +EXPORT_SYMBOL(elan3_sdram_zerob_sdram);
14480 +EXPORT_SYMBOL(elan3_sdram_zerow_sdram);
14481 +EXPORT_SYMBOL(elan3_sdram_zerol_sdram);
14482 +EXPORT_SYMBOL(elan3_sdram_zeroq_sdram);
14483 +EXPORT_SYMBOL(elan3_sdram_copyb_to_sdram);
14484 +EXPORT_SYMBOL(elan3_sdram_copyw_to_sdram);
14485 +EXPORT_SYMBOL(elan3_sdram_copyl_to_sdram);
14486 +EXPORT_SYMBOL(elan3_sdram_copyq_to_sdram);
14487 +EXPORT_SYMBOL(elan3_sdram_copyb_from_sdram);
14488 +EXPORT_SYMBOL(elan3_sdram_copyw_from_sdram);
14489 +EXPORT_SYMBOL(elan3_sdram_copyl_from_sdram);
14490 +EXPORT_SYMBOL(elan3_sdram_copyq_from_sdram);
14491 +
14492 +/* elan3/os/tproc.c */
14493 +EXPORT_SYMBOL(DeliverTProcTrap);
14494 +EXPORT_SYMBOL(HandleTProcTrap);
14495 +EXPORT_SYMBOL(SaveThreadToStack);
14496 +
14497 +/* elan3/os/tprocinsts.c */
14498 +EXPORT_SYMBOL(RollThreadToClose);
14499 +
14500 +/* elan3/os/iproc.c */
14501 +EXPORT_SYMBOL(InspectIProcTrap);
14502 +EXPORT_SYMBOL(IProcTrapString);
14503 +EXPORT_SYMBOL(SimulateUnlockQueue);
14504 +
14505 +/* elan3/os/cproc.c */
14506 +EXPORT_SYMBOL(HandleCProcTrap);
14507 +
14508 +/* elan3/os/route_table.c */
14509 +EXPORT_SYMBOL(GenerateRoute);
14510 +EXPORT_SYMBOL(LoadRoute);
14511 +EXPORT_SYMBOL(InvalidateRoute);
14512 +EXPORT_SYMBOL(ValidateRoute);
14513 +EXPORT_SYMBOL(ClearRoute);
14514 +EXPORT_SYMBOL(GenerateProbeRoute);
14515 +EXPORT_SYMBOL(GenerateCheckRoute);
14516 +
14517 +/* elan3/os/elandev_generic.c */
14518 +EXPORT_SYMBOL(elan3_debug);
14519 +EXPORT_SYMBOL(QueueHaltOperation);
14520 +EXPORT_SYMBOL(ReleaseHaltOperations);
14521 +EXPORT_SYMBOL(ReserveHaltOperations);
14522 +
14523 +/* elan3/vm/elan3mmu_generic.c */
14524 +EXPORT_SYMBOL(elan3mmu_pteload);
14525 +EXPORT_SYMBOL(elan3mmu_unload);
14526 +EXPORT_SYMBOL(elan3mmu_set_context_filter);
14527 +EXPORT_SYMBOL(elan3mmu_reserve);
14528 +EXPORT_SYMBOL(elan3mmu_attach);
14529 +EXPORT_SYMBOL(elan3mmu_detach);
14530 +EXPORT_SYMBOL(elan3mmu_release);
14531 +/* elan3/vm/elan3mmu_linux.c */
14532 +EXPORT_SYMBOL(elan3mmu_phys_to_pte);
14533 +EXPORT_SYMBOL(elan3mmu_kernel_invalid_pte);
14534 +
14535 +/* elan3/os/elan3_debug.c */
14536 +EXPORT_SYMBOL(elan3_debugf);
14537 +
14538 +/* elan3/os/minames.c */
14539 +EXPORT_SYMBOL(MiToName);
14540 +
14541 +/* elan3/os/elandev_generic.c */
14542 +EXPORT_SYMBOL(MapDeviceRegister);
14543 +EXPORT_SYMBOL(UnmapDeviceRegister);
14544 +
14545 +EXPORT_SYMBOL(elan_reg_rec_lbolt);
14546 +EXPORT_SYMBOL(elan_reg_rec_file);
14547 +EXPORT_SYMBOL(elan_reg_rec_index);
14548 +EXPORT_SYMBOL(elan_reg_rec_cpu);
14549 +EXPORT_SYMBOL(elan_reg_rec_reg);
14550 +EXPORT_SYMBOL(elan_reg_rec_line);
14551 +
14552 +/*
14553 + * Standard device entry points.
14554 + */
14555 +#if defined(CONFIG_DUMP) || defined(CONFIG_DUMP_MODULE)
14556 +
14557 +#include <linux/dump.h>
14558 +
14559 +static int      elan3_dump_event (struct notifier_block *self, unsigned long event, void *buffer);
14560 +
14561 +static struct notifier_block elan3_dump_notifier = 
14562 +{
14563 +    notifier_call:     elan3_dump_event,
14564 +    priority:          0,
14565 +};
14566 +
14567 +static int
14568 +elan3_dump_event (struct notifier_block *self, unsigned long event, void *buffer)
14569 +{
14570 +    if ( event == DUMP_BEGIN )
14571 +       elan3_shutdown_devices (FALSE);
14572 +
14573 +    return (NOTIFY_DONE);
14574 +}
14575 +
14576 +#endif
14577 +
14578 +static struct file_operations elan3_fops = {
14579 +        ioctl:   elan3_ioctl,          /* ioctl */
14580 +        mmap:    elan3_mmap,           /* mmap */
14581 +        open:    elan3_open,           /* open */
14582 +        release: elan3_release,                /* release */
14583 +};
14584 +
14585 +static struct notifier_block elan3_reboot_notifier = 
14586 +{
14587 +    notifier_call:     elan3_reboot_event,
14588 +    priority:          0,
14589 +};
14590 +
14591 +static struct notifier_block elan3_panic_notifier = 
14592 +{
14593 +    notifier_call:     elan3_panic_event,
14594 +    priority:          0,
14595 +};
14596 +
14597 +ELAN3_DEV *
14598 +elan3_device (int instance)
14599 +{
14600 +       if (instance < 0 || instance >= ELAN3_MAX_CONTROLLER)
14601 +           return ((ELAN3_DEV *) NULL);
14602 +       return elan3_devices[instance];
14603 +}
14604 +EXPORT_SYMBOL(elan3_device);
14605 +
14606 +/*
14607 + * Called at rmmod time.  elandetach() for each card + general cleanup.
14608 + */
14609 +#ifdef MODULE
14610 +static void __exit elan3_exit(void)
14611 +{
14612 +       int i;
14613 +
14614 +       printk("elan: preparing to remove module\n");
14615 +
14616 +#if defined(CONFIG_DUMP) || defined(CONFIG_DUMP_MODULE)
14617 +       unregister_dump_notifier (&elan3_dump_notifier);
14618 +#endif
14619 +       unregister_reboot_notifier (&elan3_reboot_notifier);
14620 +       notifier_chain_unregister (&panic_notifier_list, &elan3_panic_notifier);
14621 +
14622 +       /* call elandetach() for each device configured. */
14623 +       for (i = 0; i < ELAN3_MAX_CONTROLLER; i++)
14624 +               if (elan3_devices[i] != NULL)
14625 +                       elandetach(i);
14626 +
14627 +       FinaliseNetworkErrorResolver();
14628 +       elan3mmu_fini();
14629 +
14630 +       cookie_fini();
14631 +       unregister_chrdev(ELAN3_MAJOR, ELAN3_NAME);
14632 +
14633 +       elan3_procfs_fini();
14634 +
14635 +       printk("elan: module removed\n");
14636 +}
14637 +
14638 +/*
14639 + * Called at insmod time.  First we perform general driver initialization,
14640 + * then call elanattach() for each card.
14641 + */
14642 +#ifdef MODULE
14643 +static int __init elan3_init(void)
14644 +#else
14645 +__initfunc(int elan3_init(void))
14646 +#endif
14647 +{
14648 +       int e;
14649 +       int boards;
14650 +       struct pci_dev *dev;
14651 +       char revid;
14652 +
14653 +       elan_reg_rec_index=0;
14654 +       {
14655 +           int i;
14656 +           for(i=0;i<ELAN_REG_REC_MAX;i++)
14657 +               elan_reg_rec_file[i] = NULL;
14658 +       }       
14659 +
14660 +       /* register major/minor num */
14661 +       e = register_chrdev(ELAN3_MAJOR, ELAN3_NAME, &elan3_fops);
14662 +       if (e < 0)
14663 +               return e;
14664 +
14665 +       elan3_procfs_init ();
14666 +
14667 +       cookie_init();
14668 +       elan3mmu_init();
14669 +       InitialiseNetworkErrorResolver();
14670 +
14671 +       /* call elanattach() for each device found on PCI */
14672 +       memset(elan3_devices, 0, sizeof(elan3_devices));
14673 +       boards = 0;
14674 +       for (dev = NULL; (dev = pci_find_device(PCI_VENDOR_ID_QUADRICS, PCI_DEVICE_ID_ELAN3, dev)) != NULL ;) 
14675 +       {
14676 +           pci_read_config_byte (dev, PCI_REVISION_ID, &revid);
14677 +
14678 +           if (revid == PCI_REVISION_ID_ELAN3_REVA)
14679 +               printk ("elan at pci %s - RevA device not supported\n", dev->slot_name);
14680 +           else
14681 +           {
14682 +               if (boards < ELAN3_MAX_CONTROLLER)
14683 +                       /* Count successfully attached devices */ 
14684 +                       boards += ((elanattach(boards, dev) == 0) ? 1 : 0);
14685 +               else
14686 +               {
14687 +                   printk ("elan: max controllers = %d\n", ELAN3_MAX_CONTROLLER);
14688 +                   break;
14689 +               }
14690 +           }
14691 +       }
14692 +#if defined(CONFIG_DUMP) || defined(CONFIG_DUMP_MODULE)
14693 +       register_dump_notifier (&elan3_dump_notifier);
14694 +#endif
14695 +       register_reboot_notifier (&elan3_reboot_notifier);
14696 +       notifier_chain_register (&panic_notifier_list, &elan3_panic_notifier);
14697 +
14698 +       return 0;
14699 +}
14700 +
14701 +/* Declare the module init and exit functions */
14702 +module_init(elan3_init);
14703 +module_exit(elan3_exit);
14704 +
14705 +#endif
14706 +
14707 +static void
14708 +elan3_shutdown_devices(int panicing)
14709 +{
14710 +    ELAN3_DEV *dev;
14711 +    unsigned long flags;
14712 +    register int i;
14713 +
14714 +    local_irq_save (flags);
14715 +    for (i = 0; i < ELAN3_MAX_CONTROLLER; i++)
14716 +    {
14717 +       if ((dev = elan3_devices[i]) != NULL)
14718 +       {
14719 +           if (! panicing) spin_lock (&dev->IntrLock);
14720 +           
14721 +           printk(KERN_INFO "elan%d: forcing link into reset\n", dev->Instance);
14722 +
14723 +           /*
14724 +            * We're going to set the link into boundary scan mode,  so firstly
14725 +            * set the inputters to discard everything.
14726 +            */
14727 +           if (dev->DiscardAllCount++ == 0)
14728 +               SetSchedStatusRegister (dev, read_reg32 (dev, Exts.InterruptReg), NULL);
14729 +
14730 +           dev->LinkShutdown = 1;
14731 +           
14732 +           /*
14733 +            * Now disable the error interrupts
14734 +            */
14735 +           DISABLE_INT_MASK (dev, INT_ErrorInterrupts);
14736 +           
14737 +           /*
14738 +            * And set the link into boundary scan mode, and drive
14739 +            * a reset token onto the link.
14740 +            */
14741 +           SET_SCHED_LINK_VALUE (dev, 1, LinkResetToken);
14742 +
14743 +           if (! panicing) spin_unlock (&dev->IntrLock);
14744 +       }
14745 +    }
14746 +    local_irq_restore (flags);
14747 +}
14748 +
14749 +static int
14750 +elan3_reboot_event (struct notifier_block *self, unsigned long event, void *buffer)
14751 +{
14752 +    if (! (event == SYS_RESTART || event == SYS_HALT || event == SYS_POWER_OFF))
14753 +       return (NOTIFY_DONE);
14754 +
14755 +    elan3_shutdown_devices (FALSE);
14756 +
14757 +    return (NOTIFY_DONE);
14758 +}
14759 +
14760 +static int
14761 +elan3_panic_event (struct notifier_block *self, unsigned long event, void *buffer)
14762 +{
14763 +    elan3_shutdown_devices (TRUE);
14764 +
14765 +    return (NOTIFY_DONE);
14766 +}
14767 +
14768 +#include <elan3/elan3ops.h>
14769 +/*
14770 + * Called by init_module() for each card discovered on PCI.
14771 + */
14772 +static int
14773 +elanattach(int instance, struct pci_dev *pcidev)
14774 +{
14775 +       ELAN3_DEV *dev;
14776 +       int ramSize;
14777 +       int level;
14778 +       ioaddr_t sdramAddr, cmdPortAddr, intPalAddr;
14779 +       DeviceMappingHandle handle;
14780 +
14781 +       printk("elan%d: attach, irq=%d\n", instance, pcidev->irq);
14782 +
14783 +       /*
14784 +        * Allocate the ELAN3_DEV structure.
14785 +        */
14786 +       KMEM_ZALLOC(dev, ELAN3_DEV *, sizeof(ELAN3_DEV), TRUE);
14787 +       if (dev == NULL) {
14788 +               printk ("elan%d: KMEM_ALLOC failed\n", instance);
14789 +               return (-ENOMEM);
14790 +       }
14791 +       elan3_devices[instance] = dev;
14792 +       dev->Osdep.pci = pcidev;
14793 +
14794 +       dev->Instance = instance;
14795 +
14796 +       /* Initialise the device information */
14797 +       pci_read_config_word (pcidev, PCI_VENDOR_ID,   &dev->Devinfo.dev_vendor_id);
14798 +       pci_read_config_word (pcidev, PCI_DEVICE_ID,   &dev->Devinfo.dev_device_id);
14799 +       pci_read_config_byte (pcidev, PCI_REVISION_ID, &dev->Devinfo.dev_revision_id);
14800 +
14801 +       dev->Devinfo.dev_instance             = instance;
14802 +       dev->Devinfo.dev_rail                 = instance;
14803 +       dev->Devinfo.dev_driver_version       = 0;
14804 +       dev->Devinfo.dev_num_down_links_value = DownLinks;
14805 +
14806 +       dev->Position.pos_mode                = ELAN_POS_UNKNOWN;
14807 +       dev->Position.pos_random_disabled     = RandomRoutingDisabled;
14808 +       
14809 +       /*
14810 +        * Set up PCI config regs.
14811 +        */
14812 +       if (ConfigurePci(dev) != ESUCCESS)
14813 +           goto fail0;
14814 +
14815 +       /*
14816 +        * Determine the PFnums of the SDRAM and command port
14817 +        */
14818 +       if (MapDeviceRegister(dev, ELAN3_BAR_SDRAM, &sdramAddr, 0, PAGESIZE, &handle) != ESUCCESS)
14819 +           goto fail1;
14820 +
14821 +       DeviceRegisterSize(dev, ELAN3_BAR_SDRAM, &ramSize);
14822 +       
14823 +       dev->SdramPhysMask = ~((physaddr_t) ramSize - 1);
14824 +       dev->SdramPhysBase = kmem_to_phys((void *) sdramAddr);
14825 +
14826 +       UnmapDeviceRegister (dev, &handle);
14827 +
14828 +#if defined(LINUX_ALPHA)
14829 +       /*
14830 +        * consider a physical address to be on the same pci bus
14831 +        * as us if it's physical address is "close" to our sdram
14832 +        * physical address.
14833 +        * this is almost certainly incorrect for large memory (> 2Gb)
14834 +        * i386 machines - and is only correct for alpha for 32 bit
14835 +        * base address registers.
14836 +        *
14837 +        * Modified this to match the Tru64 driver value;
14838 +        * i.e. PciPhysMask = 0xfffffffffffc0000
14839 +        */
14840 +#  define PCI_ADDR_MASK (0x7FFFFFFFl)
14841 +
14842 +       dev->PciPhysMask = ~PCI_ADDR_MASK;
14843 +       dev->PciPhysBase = dev->SdramPhysBase & dev->PciPhysMask;
14844 +#endif
14845 +       /*
14846 +        * Now reset the elan chip.
14847 +        */
14848 +       if (MapDeviceRegister(dev, ELAN3_BAR_REGISTERS, &dev->RegPtr, 0, 0, &dev->RegHandle) != ESUCCESS)
14849 +           goto fail1;
14850 +
14851 +       if (MapDeviceRegister(dev, ELAN3_BAR_EBUS, &intPalAddr, ELAN3_EBUS_INTPAL_OFFSET, PAGESIZE,
14852 +                             &handle) != ESUCCESS)
14853 +           goto fail2;
14854 +
14855 +       ResetElan(dev, intPalAddr);     
14856 +
14857 +       UnmapDeviceRegister (dev, &handle);
14858 +
14859 +       /* 
14860 +        * Initialise the device mutex's which must be accessible from the 
14861 +        * interrupt handler.  
14862 +        */
14863 +       kcondvar_init (&dev->IntrWait);
14864 +       spin_lock_init (&dev->IntrLock);
14865 +       spin_lock_init (&dev->TlbLock);
14866 +       spin_lock_init (&dev->CProcLock);
14867 +       spin_lock_init (&dev->FreeHaltLock);
14868 +       for(level=0; level<4; level++)
14869 +           spin_lock_init (&dev->Level[level].PtblLock);
14870 +       spin_lock_init (&dev->PtblGroupLock);
14871 +
14872 +       /*
14873 +        * Add the interrupt handler,  
14874 +        */
14875 +       if (request_irq(dev->Osdep.pci->irq, InterruptHandlerWrapper, 
14876 +           SA_SHIRQ, "elan3", dev) != 0) {
14877 +               printk ("elan%d: request_irq failed\n", instance);
14878 +               goto fail3;
14879 +       }
14880 +
14881 +       if (MapDeviceRegister(dev, ELAN3_BAR_COMMAND_PORT, &cmdPortAddr, 0, PAGESIZE, &handle) != ESUCCESS)
14882 +           goto fail4;
14883 +       
14884 +       if (InitialiseElan(dev, cmdPortAddr) == EFAIL) {
14885 +               printk ("elan%d: InitialiseElan failed\n", instance);
14886 +               UnmapDeviceRegister (dev, &handle);
14887 +               goto fail4;
14888 +       }
14889 +       UnmapDeviceRegister (dev, &handle);
14890 +
14891 +       /* If our nodeid is defined, then set it now */
14892 +       if (NodeId != ELAN3_INVALID_NODE && ComputePosition (&dev->Position, NodeId, NumNodes, DownLinks) == 0)
14893 +       {
14894 +           if (RandomRoutingDisabled & ((1 << (dev->Position.pos_levels-1))-1))
14895 +               printk ("elan%d: NodeId=%d NodeLevel=%d NumNodes=%d (random routing disabled 0x%x)\n", 
14896 +                       dev->Instance, dev->Position.pos_nodeid, dev->Position.pos_levels, dev->Position.pos_nodes, RandomRoutingDisabled);
14897 +           else
14898 +               printk ("elan%d: NodeId=%d NodeLevel=%d NumNodes=%d (random routing ok)\n",
14899 +                       dev->Instance, dev->Position.pos_nodeid, dev->Position.pos_levels, dev->Position.pos_nodes);
14900 +       }
14901 +
14902 +       if (BackToBackMaster || BackToBackSlave)
14903 +       {
14904 +           dev->Position.pos_mode     = ELAN_POS_MODE_BACKTOBACK;
14905 +           dev->Position.pos_nodeid   = (BackToBackMaster == 0);
14906 +           dev->Position.pos_nodes    = 2;
14907 +           dev->Position.pos_levels   = 1;
14908 +           dev->Position.pos_arity[0] = 2;
14909 +
14910 +           printk ("elan%d: back-to-back %s - elan node %d\n", dev->Instance,
14911 +                   BackToBackMaster ? "master" : "slave", dev->Position.pos_nodeid);
14912 +       }
14913 +
14914 +       elan3_procfs_device_init (dev);
14915 +       
14916 +       /* Success */
14917 +       return (0);
14918 +
14919 +fail4:
14920 +       free_irq(dev->Osdep.pci->irq, dev);
14921 +
14922 +fail3:
14923 +       kcondvar_destroy (&dev->IntrWait);
14924 +       spin_lock_destroy (&dev->IntrLock);
14925 +       spin_lock_destroy (&dev->InfoLock);
14926 +       spin_lock_destroy (&dev->TlbLock);
14927 +       spin_lock_destroy (&dev->CProcLock);
14928 +       spin_lock_destroy (&dev->FreeHaltLock);
14929 +       spin_lock_destroy (&dev->Level1PtblLock);
14930 +       spin_lock_destroy (&dev->Level2PtblLock);
14931 +       spin_lock_destroy (&dev->Level3PtblLock);
14932 +       spin_lock_destroy (&dev->PtblGroupLock);
14933 +
14934 +fail2:
14935 +       UnmapDeviceRegister (dev, &dev->RegHandle);
14936 +
14937 +fail1:
14938 +       pci_disable_device (dev->Osdep.pci);
14939 +fail0:
14940 +       KMEM_FREE(dev, sizeof(ELAN3_DEV));
14941 +
14942 +       elan3_devices[instance] = NULL;
14943 +       
14944 +       /* Failure */
14945 +       return (-ENODEV);
14946 +}
14947 +
14948 +/*
14949 + * Called by elan3_exit() for each board found on PCI.
14950 + */
14951 +static int
14952 +elandetach(int instance)
14953 +{
14954 +       ELAN3_DEV *dev = elan3_devices[instance];
14955 +
14956 +       printk("elan%d: detach\n", instance);
14957 +
14958 +       elan3_procfs_device_fini (dev);
14959 +
14960 +       FinaliseElan (dev);
14961 +
14962 +       UnmapDeviceRegister (dev, &dev->RegHandle);
14963 +
14964 +       free_irq(dev->Osdep.pci->irq, dev);
14965 +
14966 +       pci_disable_device(dev->Osdep.pci);
14967 +
14968 +       kcondvar_destroy (&dev->IntrWait);
14969 +       spin_lock_destroy (&dev->IntrLock);
14970 +       spin_lock_destroy (&dev->InfoLock);
14971 +       spin_lock_destroy (&dev->TlbLock);
14972 +       spin_lock_destroy (&dev->CProcLock);
14973 +       spin_lock_destroy (&dev->FreeHaltLock);
14974 +       spin_lock_destroy (&dev->Level1PtblLock);
14975 +       spin_lock_destroy (&dev->Level2PtblLock);
14976 +       spin_lock_destroy (&dev->Level3PtblLock);
14977 +       spin_lock_destroy (&dev->PtblGroupLock);
14978 +
14979 +       KMEM_FREE(dev, sizeof(ELAN3_DEV));
14980 +       elan3_devices[instance] = NULL; 
14981 +
14982 +       return 0;
14983 +}
14984 +
14985 +/*
14986 + * generic ioctls - available on control and user devices.
14987 + */
14988 +
14989 +static int
14990 +device_stats_ioctl (ELAN3_DEV *dev, unsigned long arg)
14991 +{
14992 +    ELAN3IO_STATS_STRUCT *args;
14993 +
14994 +    KMEM_ALLOC(args, ELAN3IO_STATS_STRUCT *, sizeof(ELAN3IO_STATS_STRUCT), TRUE);
14995 +       
14996 +    if (args == NULL)
14997 +       return (-ENOMEM);
14998 +
14999 +    if (copy_from_user (args, (void *) arg, sizeof (ELAN3IO_STATS_STRUCT)))
15000 +    {
15001 +       KMEM_FREE(args, sizeof(ELAN3IO_STATS_STRUCT));
15002 +       return (-EFAULT);
15003 +    }
15004 +
15005 +    switch (args->which)
15006 +    {
15007 +    case ELAN3_SYS_STATS_DEVICE:
15008 +       if (copy_to_user (args->ptr, &dev->Stats, sizeof (ELAN3_STATS)))
15009 +       {
15010 +           KMEM_FREE(args, sizeof(ELAN3IO_STATS_STRUCT));
15011 +           return (-EFAULT);
15012 +       }
15013 +       KMEM_FREE(args, sizeof(ELAN3IO_STATS_STRUCT));
15014 +       return (0);
15015 +
15016 +    case ELAN3_SYS_STATS_MMU:
15017 +       if (copy_to_user (args->ptr, &elan3mmu_global_stats, sizeof (ELAN3MMU_GLOBAL_STATS)))
15018 +       {
15019 +           KMEM_FREE(args, sizeof(ELAN3IO_STATS_STRUCT));
15020 +           return (-EFAULT);
15021 +       }
15022 +       KMEM_FREE(args, sizeof(ELAN3IO_STATS_STRUCT));
15023 +       return (0);
15024 +           
15025 +    default:
15026 +       KMEM_FREE(args, sizeof(ELAN3IO_STATS_STRUCT));
15027 +       return (-EINVAL);
15028 +    }
15029 +}
15030 +
15031 +/*
15032 + * /dev/elan3/controlX - control device
15033 + *
15034 + */
15035 +
15036 +typedef struct control_private
15037 +{
15038 +    u_int              pr_boundary_scan;
15039 +} CONTROL_PRIVATE;
15040 +
15041 +static int
15042 +control_open (struct inode *inode, struct file *file)
15043 +{
15044 +    CONTROL_PRIVATE *pr;
15045 +
15046 +    KMEM_ALLOC(pr, CONTROL_PRIVATE *, sizeof (CONTROL_PRIVATE), TRUE);
15047 +
15048 +    if (pr == NULL)
15049 +       return (-ENOMEM);
15050 +
15051 +    pr->pr_boundary_scan = 0;
15052 +    
15053 +    file->private_data = (void *) pr;
15054 +
15055 +    MOD_INC_USE_COUNT;
15056 +
15057 +    return (0);
15058 +}
15059 +
15060 +static int
15061 +control_release (struct inode *inode, struct file *file)
15062 +{
15063 +    ELAN3_DEV        *dev = elan3_devices[ELAN3_DEVICE(inode)];
15064 +    CONTROL_PRIVATE *pr  = (CONTROL_PRIVATE *) file->private_data;
15065 +
15066 +    if (pr->pr_boundary_scan)
15067 +       ClearLinkBoundaryScan(dev);
15068 +
15069 +    KMEM_FREE (pr, sizeof(CONTROL_PRIVATE));
15070 +
15071 +    MOD_DEC_USE_COUNT;
15072 +    return (0);
15073 +}
15074 +
15075 +static int
15076 +control_ioctl (struct inode *inode, struct file *file, 
15077 +              unsigned int cmd, unsigned long arg)
15078 +{
15079 +    ELAN3_DEV        *dev = elan3_devices[ELAN3_DEVICE(inode)];
15080 +    CONTROL_PRIVATE *pr  = (CONTROL_PRIVATE *) file->private_data;
15081 +    int                     res;
15082 +
15083 +    switch (cmd) 
15084 +    {
15085 +    case ELAN3IO_SET_BOUNDARY_SCAN:
15086 +       if (SetLinkBoundaryScan (dev) == 0)
15087 +           pr->pr_boundary_scan = 1;
15088 +       return (0);
15089 +
15090 +    case ELAN3IO_CLEAR_BOUNDARY_SCAN:
15091 +       if (pr->pr_boundary_scan == 0)
15092 +           return (-EINVAL);
15093 +
15094 +       pr->pr_boundary_scan = 0;
15095 +
15096 +       ClearLinkBoundaryScan (dev);
15097 +       return (0);
15098 +
15099 +    case ELAN3IO_READ_LINKVAL:
15100 +    {
15101 +       E3_uint32 val;
15102 +
15103 +       if (pr->pr_boundary_scan == 0)
15104 +           return (-EINVAL);
15105 +
15106 +       if (copy_from_user(&val, (E3_uint32 *)arg, sizeof(E3_uint32)))
15107 +           return (-EFAULT);
15108 +
15109 +       val = ReadBoundaryScanValue (dev, val);
15110 +
15111 +       if (copy_to_user((E3_uint32 *)arg, &val, sizeof(E3_uint32)))
15112 +           return (-EFAULT);
15113 +       return (0);
15114 +    }
15115 +       
15116 +    case ELAN3IO_WRITE_LINKVAL:
15117 +    {
15118 +       E3_uint32 val;
15119 +
15120 +       if (pr->pr_boundary_scan == 0)
15121 +           return (-EINVAL);
15122 +
15123 +       if (copy_from_user(&val, (E3_uint32 *)arg, sizeof(E3_uint32)))
15124 +           return (-EFAULT);
15125 +
15126 +       val = WriteBoundaryScanValue (dev, val);
15127 +
15128 +       if (copy_to_user((E3_uint32 *)arg, &val, sizeof(E3_uint32)))
15129 +           return (-EFAULT);
15130 +       
15131 +       return (0);
15132 +    }
15133 +
15134 +    case ELAN3IO_SET_POSITION:
15135 +    {
15136 +       ELAN3IO_SET_POSITION_STRUCT args;
15137 +
15138 +       if (copy_from_user (&args, (void *) arg, sizeof (ELAN3IO_SET_POSITION_STRUCT)))
15139 +           return (-EFAULT);
15140 +       
15141 +       if (ComputePosition (&dev->Position, args.nodeId, args.numNodes, dev->Devinfo.dev_num_down_links_value) != 0)
15142 +           return (-EINVAL);
15143 +
15144 +       return (0);
15145 +    }
15146 +
15147 +    case ELAN3IO_SET_DEBUG:
15148 +    {
15149 +       ELAN3IO_SET_DEBUG_STRUCT args;
15150 +
15151 +       if (copy_from_user (&args, (void *) arg, sizeof (ELAN3IO_SET_DEBUG_STRUCT)))
15152 +           return (-EFAULT);
15153 +
15154 +       if (! strcmp (args.what, "elan3_debug"))
15155 +           elan3_debug = args.value;
15156 +       else if (! strcmp (args.what, "elan3_debug_console"))
15157 +           elan3_debug_console = args.value;
15158 +       else if (! strcmp (args.what, "elan3_debug_buffer"))
15159 +           elan3_debug_buffer = args.value;
15160 +       else if (! strcmp (args.what, "elan3_debug_ignore_dev"))
15161 +           elan3_debug_ignore_dev = args.value;
15162 +       else if (! strcmp (args.what, "elan3_debug_ignore_ctxt"))
15163 +           elan3_debug_ignore_ctxt = args.value;
15164 +       else if (! strcmp (args.what, "elan3mmu_debug"))
15165 +           elan3mmu_debug = args.value;
15166 +       
15167 +       return (0);
15168 +    }
15169 +
15170 +    case ELAN3IO_NETERR_SERVER:
15171 +    {
15172 +       ELAN3IO_NETERR_SERVER_STRUCT args;
15173 +
15174 +       if (copy_from_user (&args, (void *) arg, sizeof (ELAN3IO_NETERR_SERVER_STRUCT)))
15175 +           return (-EFAULT);
15176 +       
15177 +       res = AddNeterrServerSyscall (args.elanid, args.addr, args.name, NULL);
15178 +       return (set_errno (res));
15179 +    }
15180 +    
15181 +    case ELAN3IO_NETERR_FIXUP:
15182 +    {
15183 +       NETERR_MSG *msg;
15184 +
15185 +       KMEM_ALLOC(msg, NETERR_MSG *, sizeof (NETERR_MSG), TRUE);
15186 +
15187 +       if (msg == NULL)
15188 +           return (set_errno (ENOMEM));
15189 +       
15190 +       if (copy_from_user (msg, (void *) arg, sizeof (NETERR_MSG)))
15191 +           res = EFAULT;
15192 +       else
15193 +           res = ExecuteNetworkErrorFixup (msg);
15194 +
15195 +       KMEM_FREE (msg, sizeof (NETERR_MSG));
15196 +       return (set_errno (res));
15197 +    }
15198 +
15199 +    case ELAN3IO_STATS:
15200 +       return (device_stats_ioctl (dev, arg));
15201 +
15202 +    case ELAN3IO_GET_DEVINFO:
15203 +    {
15204 +       if (copy_to_user ((void *) arg, &dev->Devinfo, sizeof (ELAN_DEVINFO)))
15205 +           return (-EFAULT);
15206 +       return (0);
15207 +    }
15208 +
15209 +    case ELAN3IO_GET_POSITION:
15210 +    {
15211 +       if (copy_to_user ((void *) arg, &dev->Position, sizeof (ELAN_POSITION)))
15212 +           return (-EFAULT);
15213 +       return (0);
15214 +    }
15215 +    default:
15216 +       return (-EINVAL);
15217 +    }
15218 +}
15219 +
15220 +static int
15221 +control_mmap (struct file *file, struct vm_area_struct *vma)
15222 +{
15223 +    ELAN3_DEV         *dev   = elan3_devices[ELAN3_DEVICE(file->f_dentry->d_inode)];
15224 +    int                space = OFF_TO_SPACE(vma->vm_pgoff << PAGE_SHIFT);
15225 +    int                off   = OFF_TO_OFFSET(vma->vm_pgoff << PAGE_SHIFT);
15226 +    int                size;
15227 +    ioaddr_t           addr;
15228 +    DeviceMappingHandle handle;
15229 +    physaddr_t         phys;
15230 +    
15231 +    if (space < ELAN3_BAR_SDRAM || space > ELAN3_BAR_EBUS)
15232 +       return (-EINVAL);
15233 +
15234 +    if (off < 0 || DeviceRegisterSize (dev, space, &size) != ESUCCESS || off > size)
15235 +       return (-EINVAL);
15236 +
15237 +    if (MapDeviceRegister(dev, space, &addr, off, PAGESIZE, &handle) != ESUCCESS)
15238 +       return (-EINVAL);
15239 +
15240 +    phys = kmem_to_phys((caddr_t) addr);
15241 +    UnmapDeviceRegister(dev, &handle);
15242 +
15243 +#ifdef NO_RMAP
15244 +    if (remap_page_range(vma->vm_start, phys, vma->vm_end - vma->vm_start, vma->vm_page_prot))
15245 +#else
15246 +    if (remap_page_range(vma, vma->vm_start, phys, vma->vm_end - vma->vm_start, vma->vm_page_prot))
15247 +#endif
15248 +       return (-EAGAIN);
15249 +
15250 +    return (0);
15251 +}
15252 +
15253 +/*
15254 + * /dev/elan3/sdramX - sdram access device
15255 + */
15256 +typedef struct mem_page
15257 +{
15258 +    struct mem_page *pg_next;
15259 +    sdramaddr_t      pg_addr;
15260 +    u_long          pg_pgoff;
15261 +    u_int           pg_ref;
15262 +} MEM_PAGE;
15263 +
15264 +#define MEM_HASH_SIZE  32
15265 +#define MEM_HASH(pgoff)        ((pgoff) & (MEM_HASH_SIZE-1))
15266 +
15267 +typedef struct mem_private
15268 +{
15269 +    ELAN3_DEV   *pr_dev;
15270 +    MEM_PAGE   *pr_pages[MEM_HASH_SIZE];
15271 +    spinlock_t  pr_lock;
15272 +} MEM_PRIVATE;
15273 +
15274 +static void 
15275 +mem_freepage (MEM_PRIVATE *pr, MEM_PAGE *pg)
15276 +{
15277 +    PRINTF (DBG_DEVICE, DBG_SEG, "mem_freepage: pr=%p pgoff=%lx pg=%p ref=%d\n", pr, pg->pg_pgoff, pg, pg->pg_ref);
15278 +
15279 +    elan3_sdram_free (pr->pr_dev, pg->pg_addr, PAGE_SIZE);
15280 +    KMEM_FREE (pg, sizeof(MEM_PAGE));
15281 +}
15282 +
15283 +static MEM_PAGE *
15284 +mem_getpage (MEM_PRIVATE *pr, u_long pgoff, virtaddr_t addr)
15285 +{
15286 +    int       hashval = MEM_HASH (pgoff);
15287 +    MEM_PAGE *npg = NULL;
15288 +    MEM_PAGE *pg;
15289 +
15290 +    PRINTF (DBG_DEVICE, DBG_SEG, "mem_getpage: pr=%p pgoff=%lx addr=%lx\n", pr, pgoff, addr);
15291 +    
15292 + again:
15293 +    spin_lock (&pr->pr_lock);
15294 +    for (pg = pr->pr_pages[hashval]; pg; pg = pg->pg_next)
15295 +       if (pg->pg_pgoff == pgoff)
15296 +           break;
15297 +    
15298 +    if (pg != NULL)
15299 +    {
15300 +       PRINTF (DBG_DEVICE, DBG_SEG, "mem_getpage: pr=%p pgoff=%lx addr=%lx -> found %p addr=%lx\n", pr, pgoff, addr, pg, pg->pg_addr);
15301 +
15302 +       pg->pg_ref++;
15303 +       spin_unlock (&pr->pr_lock);
15304 +
15305 +       if (npg != NULL)                                        /* we'd raced and someone else had created */
15306 +           mem_freepage (pr, npg);                             /* this page - so free of our new one*/
15307 +       return (pg);
15308 +    }
15309 +    
15310 +    if (npg != NULL)                                           /* didn't find the page, so inset the */
15311 +    {                                                          /* new one we've just created */
15312 +       npg->pg_next = pr->pr_pages[hashval];
15313 +       pr->pr_pages[hashval] = npg;
15314 +       
15315 +       spin_unlock (&pr->pr_lock);
15316 +       return (npg);
15317 +    }
15318 +    
15319 +    spin_unlock (&pr->pr_lock);                                        /* drop spinlock before creating a new page */
15320 +    
15321 +    KMEM_ALLOC(npg, MEM_PAGE *, sizeof (MEM_PAGE), TRUE);
15322 +
15323 +    if (npg == NULL)
15324 +       return (NULL);
15325 +
15326 +    if ((npg->pg_addr = elan3_sdram_alloc (pr->pr_dev, PAGE_SIZE)) == 0)
15327 +    {
15328 +       KMEM_FREE (npg, sizeof (MEM_PAGE));
15329 +       return (NULL);
15330 +    }
15331 +
15332 +    /* zero the page before returning it to the user */
15333 +    elan3_sdram_zeroq_sdram (pr->pr_dev, npg->pg_addr, PAGE_SIZE);
15334 +    
15335 +    npg->pg_pgoff = pgoff;
15336 +    npg->pg_ref   = 1;
15337 +    
15338 +    /* created a new page - so have to rescan before inserting it */
15339 +    goto again;
15340 +}
15341 +
15342 +static void
15343 +mem_droppage (MEM_PRIVATE *pr, u_long pgoff, int dontfree)
15344 +{
15345 +    MEM_PAGE **ppg;
15346 +    MEM_PAGE  *pg;
15347 +
15348 +    spin_lock (&pr->pr_lock);
15349 +    for (ppg = &pr->pr_pages[MEM_HASH(pgoff)]; *ppg; ppg = &(*ppg)->pg_next)
15350 +       if ((*ppg)->pg_pgoff == pgoff)
15351 +           break;
15352 +
15353 +    pg = *ppg;
15354 +
15355 +    ASSERT (*ppg != NULL);
15356 +    
15357 +    PRINTF (DBG_DEVICE, DBG_SEG, "mem_droppage: pr=%p pgoff=%lx pg=%p ref=%d dontfree=%d\n", pr, pgoff, (*ppg), (*ppg)->pg_ref, dontfree);
15358 +
15359 +    if (--pg->pg_ref == 0 && !dontfree)
15360 +    {
15361 +       *ppg = pg->pg_next;
15362 +
15363 +       mem_freepage (pr, pg);
15364 +    }
15365 +
15366 +    spin_unlock (&pr->pr_lock);
15367 +}
15368 +
15369 +static int
15370 +mem_open (struct inode *inode, struct file *file)
15371 +{
15372 +    ELAN3_DEV    *dev = elan3_devices[ELAN3_DEVICE(inode)];
15373 +    MEM_PRIVATE *pr;
15374 +    register int i;
15375 +
15376 +    KMEM_ALLOC(pr, MEM_PRIVATE *, sizeof (MEM_PRIVATE), TRUE);
15377 +
15378 +    if (pr == NULL)
15379 +       return (-ENOMEM);
15380 +
15381 +    spin_lock_init (&pr->pr_lock);
15382 +    pr->pr_dev = dev;
15383 +    for (i = 0; i < MEM_HASH_SIZE; i++)
15384 +       pr->pr_pages[i] = NULL;
15385 +
15386 +    file->private_data = (void *) pr;
15387 +    
15388 +    MOD_INC_USE_COUNT;
15389 +    return (0);
15390 +}
15391 +
15392 +static int
15393 +mem_release (struct inode *node, struct file *file)
15394 +{
15395 +    MEM_PRIVATE *pr = (MEM_PRIVATE *) file->private_data;
15396 +    MEM_PAGE    *pg, *next;
15397 +    int          i;
15398 +
15399 +    /* free off any pages that we'd allocated */
15400 +    spin_lock (&pr->pr_lock);
15401 +    for (i = 0; i < MEM_HASH_SIZE; i++)
15402 +    {
15403 +       for (pg = pr->pr_pages[i]; pg; pg = next)
15404 +       {
15405 +           next = pg->pg_next;
15406 +           mem_freepage (pr, pg);
15407 +       }
15408 +    }
15409 +    spin_unlock (&pr->pr_lock);
15410 +
15411 +    KMEM_FREE (pr, sizeof (MEM_PRIVATE));
15412 +
15413 +    MOD_DEC_USE_COUNT;
15414 +    return (0);
15415 +}
15416 +
15417 +static int
15418 +mem_ioctl (struct inode *inode, struct file *file, 
15419 +                 unsigned int cmd, unsigned long arg)
15420 +{
15421 +    return (-EINVAL);
15422 +}
15423 +
15424 +static void mem_vma_open(struct vm_area_struct *vma)
15425 +{
15426 +    MEM_PRIVATE   *pr = (MEM_PRIVATE *) vma->vm_private_data;
15427 +    unsigned long addr;
15428 +    unsigned long pgoff;
15429 +
15430 +    PRINTF (DBG_DEVICE, DBG_SEG, "mem_vma_open: vm_mm=%p start=%lx end=%lx pgoff=%lx file=%p\n",
15431 +           vma->vm_mm, vma->vm_start, vma->vm_end, vma->vm_pgoff, vma->vm_file);
15432 +
15433 +    preemptable_start {
15434 +       for (addr = vma->vm_start, pgoff = vma->vm_pgoff; addr < vma->vm_end; addr += PAGE_SIZE, pgoff++) {
15435 +           mem_getpage (pr, pgoff, addr);
15436 +           preemptable_check();
15437 +       }
15438 +    } preemptable_end;
15439 +}
15440 +
15441 +static void mem_vma_close(struct vm_area_struct *vma)
15442 +{
15443 +    MEM_PRIVATE  *pr  = (MEM_PRIVATE *) vma->vm_private_data;
15444 +    unsigned long addr;
15445 +    unsigned long pgoff;
15446 +
15447 +    PRINTF (DBG_DEVICE, DBG_SEG, "mem_vma_close: vm_mm=%p start=%lx end=%lx pgoff=%lx file=%p\n",
15448 +           vma->vm_mm, vma->vm_start, vma->vm_end, vma->vm_pgoff, vma->vm_file);
15449 +
15450 +    /* NOTE: the call to close may not have the same vm_start/vm_end values as 
15451 +     *       were passed into mmap()/open() - since if an partial unmap had occured
15452 +     *       then the vma could have been shrunk or even split.
15453 +     *
15454 +     *       if a the vma is split then an vma_open() will be called for the top
15455 +     *       portion - thus causing the reference counts to become incorrect.
15456 +     *
15457 +     * We drop the reference to any pages we're notified about - so they get freed
15458 +     * earlier than when the device is finally released.
15459 +     */
15460 +    for (pgoff = vma->vm_pgoff, addr = vma->vm_start; addr < vma->vm_end; addr += PAGE_SIZE, pgoff++)
15461 +       mem_droppage (pr, pgoff, 0);
15462 +}
15463 +
15464 +static struct vm_operations_struct mem_vm_ops = {
15465 +    open:              mem_vma_open,
15466 +    close:             mem_vma_close,
15467 +};
15468 +
15469 +static int
15470 +mem_mmap (struct file *file, struct vm_area_struct *vma)
15471 +{
15472 +    MEM_PRIVATE  *pr = (MEM_PRIVATE *) file->private_data;
15473 +    MEM_PAGE     *pg;
15474 +    unsigned long addr;
15475 +    unsigned long pgoff;
15476 +
15477 +    PRINTF (DBG_DEVICE, DBG_SEG, "mem_mmap: vm_mm=%p start=%lx end=%lx pgoff=%lx prot=%lx file=%p\n",
15478 +           vma->vm_mm, vma->vm_start, vma->vm_end, vma->vm_pgoff, vma->vm_page_prot.pgprot , file);
15479 +
15480 +    preemptable_start {
15481 +       for (addr = vma->vm_start, pgoff = vma->vm_pgoff; addr < vma->vm_end; addr += PAGE_SIZE, pgoff++)
15482 +       {
15483 +           if ((pg = mem_getpage (pr, pgoff, addr)) == NULL)
15484 +               goto failed;
15485 +           
15486 +#ifdef LINUX_SPARC
15487 +           pgprot_val(vma->vm_page_prot) &= ~(_PAGE_CACHE);
15488 +           pgprot_val(vma->vm_page_prot) |= _PAGE_IE;
15489 +#elif defined(pgprot_noncached)
15490 +           vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
15491 +#endif
15492 +           
15493 +#if defined(__ia64__)
15494 +           if (enable_sdram_writecombining)
15495 +               vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
15496 +#endif
15497 +           PRINTF (DBG_DEVICE, DBG_SEG, "mem_mmap: addr %lx -> pg=%p addr=%lx phys=%lx flags=%lx prot=%lx\n",
15498 +                   addr, pg, pg->pg_addr, elan3_sdram_to_phys (pr->pr_dev, pg->pg_addr), vma->vm_flags, vma->vm_page_prot.pgprot);
15499 +           
15500 +#ifdef NO_RMAP
15501 +           if (remap_page_range (addr, elan3_sdram_to_phys (pr->pr_dev, pg->pg_addr), PAGE_SIZE, vma->vm_page_prot))
15502 +#else
15503 +           if (remap_page_range (vma, addr, elan3_sdram_to_phys (pr->pr_dev, pg->pg_addr), PAGE_SIZE, vma->vm_page_prot))
15504 +#endif
15505 +           {
15506 +               mem_droppage (pr, pgoff, 0);                    /* drop our reference to this page */
15507 +               goto failed;
15508 +           }
15509 +
15510 +           preemptable_check();
15511 +       }
15512 +    } preemptable_end;
15513 +
15514 +    /* Don't try to swap out Elan SDRAM pages.. */
15515 +    vma->vm_flags |= VM_RESERVED;
15516 +    
15517 +    /*
15518 +     * Don't dump SDRAM pages to a core file 
15519 +     * (Pity I would really like to do this but it crashes in elf_core_dump() as
15520 +     * it can only handle pages that are in the mem_map area (addy 11/01/2002))
15521 +     */
15522 +    vma->vm_flags |= VM_IO;
15523 +
15524 +    vma->vm_ops          = &mem_vm_ops;
15525 +    vma->vm_file         = file;
15526 +    vma->vm_private_data = (void *) pr;
15527 +
15528 +    return (0);
15529 +
15530 + failed:
15531 +    PRINTF (DBG_DEVICE, DBG_SEG, "mem_mmap: failed\n");
15532 +
15533 +    /* free of any pages we've already allocated/referenced */
15534 +    while ((--pgoff) >= vma->vm_pgoff)
15535 +       mem_droppage (pr, pgoff, 0);
15536 +
15537 +    return (-ENOMEM);
15538 +}
15539 +
15540 +/*
15541 + * /dev/elan3/userX - control device
15542 + *
15543 + * "user_private" can be referenced from a number of places
15544 + *   1) the "file" structure.
15545 + *   2) the "mm" coproc ops
15546 + *   3) the "mmap" of the command port.
15547 + *
15548 + */
15549 +typedef struct user_private
15550 +{
15551 +    spinlock_t        pr_lock;
15552 +    atomic_t         pr_mappings;
15553 +    atomic_t          pr_ref;
15554 +    ELAN3_CTXT        *pr_ctxt;
15555 +    struct mm_struct *pr_mm;
15556 +    coproc_ops_t      pr_coproc;
15557 +} USER_PRIVATE;
15558 +
15559 +static void
15560 +user_free (USER_PRIVATE *pr)
15561 +{
15562 +    /* Have to unreserve the FlagPage or else we leak memory like a sieve! */
15563 +    ClearPageReserved(pte_page(*find_pte_kernel((unsigned long) pr->pr_ctxt->FlagPage)));
15564 +
15565 +    elan3_detach(pr->pr_ctxt);
15566 +    elan3_free (pr->pr_ctxt);
15567 +
15568 +    KMEM_FREE (pr, sizeof(USER_PRIVATE));
15569 +
15570 +    MOD_DEC_USE_COUNT;
15571 +}
15572 +
15573 +static void
15574 +user_coproc_release (void *arg, struct mm_struct *mm)
15575 +{
15576 +    USER_PRIVATE *pr = (USER_PRIVATE *) arg;
15577 +
15578 +    PRINTF3 (pr->pr_ctxt, DBG_SEG, "user_coproc_release: ctxt=%p pr=%p ref=%d\n",
15579 +            pr->pr_ctxt, pr, atomic_read (&pr->pr_ref));
15580 +
15581 +    elan3mmu_pte_ctxt_unload (pr->pr_ctxt->Elan3mmu);
15582 +
15583 +    pr->pr_mm = NULL;
15584 +
15585 +    if (atomic_dec_and_test (&pr->pr_ref))
15586 +       user_free (pr);
15587 +}
15588 +
15589 +static void
15590 +user_coproc_sync_range (void *arg, struct mm_struct *mm, unsigned long start, unsigned long end)
15591 +{
15592 +    USER_PRIVATE *pr = (USER_PRIVATE *) arg;
15593 +
15594 +    PRINTF2 (pr->pr_ctxt, DBG_SEG, "user_coproc_sync_range: start=%lx end=%lx\n", start, end);
15595 +
15596 +    ASSERT(start <= end);
15597 +
15598 +    elan3mmu_pte_range_unload(pr->pr_ctxt->Elan3mmu, mm, (caddr_t) start, end-start);
15599 +}
15600 +
15601 +static void
15602 +user_coproc_invalidate_range (void *arg, struct mm_struct *mm, unsigned long start, unsigned long end)
15603 +{
15604 +    USER_PRIVATE *pr = (USER_PRIVATE *) arg;
15605 +
15606 +    PRINTF2 (pr->pr_ctxt, DBG_SEG, "user_coproc_invalidate_range: start=%lx end=%lx\n", start, end);
15607 +
15608 +    ASSERT(start <= end);
15609 +
15610 +    elan3mmu_pte_range_unload(pr->pr_ctxt->Elan3mmu, mm, (caddr_t) start, end-start);
15611 +}
15612 +
15613 +static void
15614 +user_coproc_update_range (void *arg, struct mm_struct *mm, unsigned long start, unsigned long end)
15615 +{
15616 +    USER_PRIVATE *pr = (USER_PRIVATE *) arg;
15617 +
15618 +    ASSERT(start <= end && ((start & PAGEOFFSET) == 0) && ((end & PAGEOFFSET) == 0));
15619 +
15620 +    PRINTF2 (pr->pr_ctxt, DBG_SEG, "user_coproc_update_range: start=%lx end=%lx\n", start, end);
15621 +
15622 +    elan3mmu_pte_range_update (pr->pr_ctxt->Elan3mmu, mm,(caddr_t) start, end-start);
15623 +}
15624 +
15625 +static void
15626 +user_coproc_change_protection (void *arg, struct mm_struct *mm, unsigned long start, unsigned long end, pgprot_t newprot)
15627 +{
15628 +    USER_PRIVATE *pr = (USER_PRIVATE *) arg;
15629 +
15630 +    PRINTF2 (pr->pr_ctxt, DBG_SEG, "user_coproc_change_protection: start=%lx end=%lx\n", start, end);
15631 +
15632 +    ASSERT(start <= end);
15633 +
15634 +    elan3mmu_pte_range_unload(pr->pr_ctxt->Elan3mmu, mm, (caddr_t) start, end-start);
15635 +}
15636 +
15637 +static void
15638 +user_coproc_sync_page (void *arg, struct vm_area_struct *vma, unsigned long addr)
15639 +{
15640 +    USER_PRIVATE *pr = (USER_PRIVATE *) arg;
15641 +
15642 +    PRINTF1 (pr->pr_ctxt, DBG_SEG, "user_coproc_sync_page: addr=%lx\n", addr);
15643 +
15644 +    elan3mmu_pte_range_unload(pr->pr_ctxt->Elan3mmu, vma->vm_mm, (caddr_t) (addr & PAGE_MASK), PAGE_SIZE);
15645 +}
15646 +
15647 +static void
15648 +user_coproc_invalidate_page (void *arg, struct vm_area_struct *vma, unsigned long addr)
15649 +{
15650 +    USER_PRIVATE *pr = (USER_PRIVATE *) arg;
15651 +    
15652 +    PRINTF1 (pr->pr_ctxt, DBG_SEG, "user_coproc_invalidate_page: addr=%lx\n", addr);
15653 +
15654 +    elan3mmu_pte_range_unload(pr->pr_ctxt->Elan3mmu, vma->vm_mm, (caddr_t) (addr & PAGE_MASK), PAGE_SIZE);
15655 +}
15656 +
15657 +static void
15658 +user_coproc_update_page (void *arg, struct vm_area_struct *vma, unsigned long addr)
15659 +{
15660 +    USER_PRIVATE *pr = (USER_PRIVATE *) arg;
15661 +
15662 +    PRINTF1 (pr->pr_ctxt, DBG_SEG, "user_coproc_update_page: addr=%lx\n", addr);
15663 +
15664 +    elan3mmu_pte_range_update (pr->pr_ctxt->Elan3mmu,vma->vm_mm, (caddr_t) (addr & PAGE_MASK), PAGE_SIZE);
15665 +}
15666 +
15667 +int
15668 +user_ptrack_handler (void *arg, int phase, struct task_struct *child)
15669 +{
15670 +    USER_PRIVATE *pr   = (USER_PRIVATE *) arg;
15671 +    ELAN3_CTXT    *ctxt = pr->pr_ctxt;
15672 +
15673 +    PRINTF5 (pr->pr_ctxt, DBG_FN, "user_ptrack_handler: ctxt=%p pr=%p ref=%d phase %d mm->ref %d\n", 
15674 +            pr->pr_ctxt, pr, atomic_read (&pr->pr_ref), phase, atomic_read (&current->mm->mm_count));
15675 +
15676 +    if (phase == PTRACK_PHASE_EXIT)
15677 +    {
15678 +       /* this will force the helper thread to exit */
15679 +       elan3_swapout (ctxt, CTXT_EXITING);
15680 +       
15681 +       if (atomic_dec_and_test (&pr->pr_ref))
15682 +           user_free (pr);
15683 +    }  
15684 +    return PTRACK_FINISHED;
15685 +}
15686 +
15687 +static int
15688 +user_open (struct inode *inode, struct file *file)
15689 +{
15690 +    ELAN3_DEV     *dev = elan3_devices[ELAN3_DEVICE(inode)];
15691 +    USER_PRIVATE *pr;
15692 +    ELAN3_CTXT    *ctxt;
15693 +
15694 +    if (dev == NULL)
15695 +       return (-ENXIO);
15696 +
15697 +    KMEM_ALLOC(pr, USER_PRIVATE *, sizeof (USER_PRIVATE), TRUE);
15698 +
15699 +    if (pr == NULL)
15700 +       return (-ENOMEM);
15701 +    
15702 +    if ((ctxt = elan3_alloc (dev, 0)) == NULL)
15703 +    {
15704 +       KMEM_FREE (pr, sizeof (USER_PRIVATE));
15705 +       return (-ENOMEM);
15706 +    }
15707 +
15708 +    if (sys_init (ctxt) == NULL)
15709 +    {
15710 +       elan3_detach(ctxt);
15711 +       elan3_free (ctxt);
15712 +       KMEM_FREE (pr, sizeof (USER_PRIVATE));
15713 +       return (-ENOMEM);
15714 +    }
15715 +
15716 +    /* initialise refcnt to 3 - one for "file", one for XA handler, one for the coproc ops */
15717 +    atomic_set (&pr->pr_ref, 3);
15718 +
15719 +    atomic_set (&pr->pr_mappings, 0);
15720 +    spin_lock_init (&pr->pr_lock);
15721 +
15722 +    pr->pr_ctxt = ctxt;
15723 +    pr->pr_mm   = current->mm;
15724 +
15725 +    /* register an ptrack handler to force the helper thread to exit when we do */
15726 +    if (ptrack_register (user_ptrack_handler, pr) < 0)
15727 +    {
15728 +       elan3_detach(ctxt);
15729 +       elan3_free (ctxt);
15730 +       KMEM_FREE (pr, sizeof (USER_PRIVATE));
15731 +       return (-ENOMEM);
15732 +    }
15733 +
15734 +    /* register a coproc callback to notify us of translation changes */
15735 +    
15736 +    pr->pr_coproc.arg               = (void *) pr;
15737 +    pr->pr_coproc.release           = user_coproc_release;
15738 +    pr->pr_coproc.sync_range        = user_coproc_sync_range;
15739 +    pr->pr_coproc.invalidate_range  = user_coproc_invalidate_range;
15740 +    pr->pr_coproc.update_range      = user_coproc_update_range;
15741 +    pr->pr_coproc.change_protection = user_coproc_change_protection;
15742 +    pr->pr_coproc.sync_page         = user_coproc_sync_page;
15743 +    pr->pr_coproc.invalidate_page   = user_coproc_invalidate_page;
15744 +    pr->pr_coproc.update_page       = user_coproc_update_page;
15745 +    
15746 +    spin_lock (&current->mm->page_table_lock);
15747 +    register_coproc_ops (current->mm, &pr->pr_coproc);
15748 +    spin_unlock (&current->mm->page_table_lock);
15749 +
15750 +    file->private_data = (void *) pr;
15751 +
15752 +    PRINTF2 (pr->pr_ctxt, DBG_FN, "user_open: done ctxt=%p pr=%p\n", ctxt, pr);
15753 +
15754 +    MOD_INC_USE_COUNT;
15755 +    return (0);
15756 +}
15757 +
15758 +static int
15759 +user_release (struct inode *inode, struct file *file)
15760 +{
15761 +    USER_PRIVATE *pr = (USER_PRIVATE *) file->private_data;
15762 +    
15763 +    PRINTF3 (pr->pr_ctxt, DBG_FN, "user_release: ctxt=%p pr=%p ref=%d\n", pr->pr_ctxt, pr,
15764 +            atomic_read (&pr->pr_ref));
15765 +
15766 +    if (atomic_dec_and_test (&pr->pr_ref))
15767 +       user_free (pr);
15768 +
15769 +    return (0);
15770 +}
15771 +
15772 +static int
15773 +user_ioctl (struct inode *inode, struct file *file, 
15774 +           unsigned int cmd, unsigned long arg)
15775 +{
15776 +    USER_PRIVATE *pr   = (USER_PRIVATE *) file->private_data;
15777 +    ELAN3_CTXT    *ctxt = pr->pr_ctxt;
15778 +    SYS_CTXT     *sctx = (SYS_CTXT *) ctxt->Private;
15779 +    int           res  = 0;
15780 +
15781 +    if (current->mm != pr->pr_mm)
15782 +       return (-EINVAL);
15783 +    
15784 +    PRINTF4 (ctxt, DBG_FN, "user_ioctl: ctxt=%p cmd=%x(%d) arg=%lx\n", ctxt, cmd, _IOC_NR(cmd), arg);
15785 +
15786 +    switch (cmd)
15787 +    {
15788 +    case ELAN3IO_FREE:
15789 +       if (atomic_read (&pr->pr_mappings) > 0)
15790 +           return (-EINVAL);
15791 +       
15792 +       spin_lock (&current->mm->page_table_lock);
15793 +       if (pr->pr_mm != current->mm)
15794 +           spin_unlock (&current->mm->page_table_lock);
15795 +       else
15796 +       {
15797 +           unregister_coproc_ops (current->mm, &pr->pr_coproc);
15798 +           spin_unlock (&current->mm->page_table_lock);
15799 +
15800 +           user_coproc_release (pr, current->mm);
15801 +       }
15802 +
15803 +       if (ptrack_registered (user_ptrack_handler, pr))
15804 +       {
15805 +           ptrack_deregister (user_ptrack_handler, pr);
15806 +           user_ptrack_handler (pr, PTRACK_PHASE_EXIT, NULL);
15807 +       }
15808 +       break;
15809 +       
15810 +    case ELAN3IO_ATTACH:
15811 +    {
15812 +       ELAN_CAPABILITY *cap;
15813 +
15814 +       KMEM_ALLOC(cap, ELAN_CAPABILITY *, sizeof (ELAN_CAPABILITY), TRUE);
15815 +
15816 +       if (cap == NULL)
15817 +           return (set_errno (EFAULT));
15818 +
15819 +       if (copy_from_user (cap, (void *) arg, sizeof (ELAN_CAPABILITY)))
15820 +           res = EFAULT;
15821 +       else
15822 +       {
15823 +           if ((res = elan3_attach (ctxt, cap)) == 0)
15824 +           {
15825 +               if (copy_to_user ((void *) arg, cap, sizeof (ELAN_CAPABILITY)))
15826 +               {
15827 +                   elan3_detach (ctxt);
15828 +                   res = EFAULT;
15829 +               }
15830 +           }
15831 +       }
15832 +       KMEM_FREE (cap, sizeof(ELAN_CAPABILITY));
15833 +       break;
15834 +    }
15835 +    
15836 +    case ELAN3IO_DETACH:
15837 +       spin_lock (&pr->pr_lock);
15838 +       if (atomic_read (&pr->pr_mappings) > 0)
15839 +           res = EINVAL;
15840 +       else
15841 +           elan3_detach (ctxt);
15842 +       spin_unlock (&pr->pr_lock);
15843 +       break;
15844 +
15845 +    case ELAN3IO_ADDVP:
15846 +    {
15847 +       ELAN3IO_ADDVP_STRUCT *args;
15848 +
15849 +       KMEM_ALLOC(args, ELAN3IO_ADDVP_STRUCT *, sizeof (ELAN3IO_ADDVP_STRUCT), TRUE);
15850 +
15851 +       if (args == NULL)
15852 +           return (set_errno (ENOMEM));
15853 +       
15854 +       if (copy_from_user (args, (void *) arg, sizeof (ELAN3IO_ADDVP_STRUCT)))
15855 +           res = EFAULT;
15856 +       else
15857 +       {
15858 +           if ( (res=elan3_addvp (ctxt, args->process, &args->capability)) != 0)
15859 +               PRINTF0 (ctxt, DBG_FN, "ELAN3IO_ADDVP elan3_addvp failed \n");  
15860 +       }
15861 +
15862 +       KMEM_FREE (args, sizeof (ELAN3IO_ADDVP_STRUCT));
15863 +       break;
15864 +    }
15865 +
15866 +    case ELAN3IO_REMOVEVP:
15867 +       res = elan3_removevp (ctxt, arg);
15868 +       break;
15869 +       
15870 +    case ELAN3IO_BCASTVP:
15871 +    {
15872 +       ELAN3IO_BCASTVP_STRUCT args;
15873 +
15874 +       if (copy_from_user (&args, (void *) arg, sizeof (ELAN3IO_BCASTVP_STRUCT)))
15875 +           return (-EFAULT);
15876 +       
15877 +       res = elan3_addbcastvp (ctxt, args.process, args.lowvp, args.highvp);
15878 +       break;
15879 +    }
15880 +
15881 +    case ELAN3IO_LOAD_ROUTE:
15882 +    {
15883 +       ELAN3IO_LOAD_ROUTE_STRUCT args;
15884 +
15885 +       if (copy_from_user (&args, (void *) arg, sizeof (ELAN3IO_LOAD_ROUTE_STRUCT)))
15886 +           return (-EFAULT);
15887 +       
15888 +       res = elan3_load_route (ctxt, args.process, args.flits);
15889 +       break;
15890 +    }
15891 +
15892 +    case ELAN3IO_CHECK_ROUTE:
15893 +    {
15894 +       ELAN3IO_CHECK_ROUTE_STRUCT args;
15895 +
15896 +       args.routeError = 0;
15897 +
15898 +       if (copy_from_user (&args, (void *) arg, sizeof (ELAN3IO_LOAD_ROUTE_STRUCT)))
15899 +           return (-EFAULT);
15900 +       
15901 +       if ((res = elan3_check_route (ctxt, args.process, args.flits, & args.routeError)) ==  ESUCCESS)
15902 +       {
15903 +           if (copy_to_user ( (void *) arg, &args,sizeof (ELAN3IO_LOAD_ROUTE_STRUCT)))
15904 +               return (-EFAULT);
15905 +       }
15906 +       break;
15907 +    }
15908 +
15909 +    case ELAN3IO_PROCESS_2_LOCATION:
15910 +    {
15911 +       ELAN3IO_PROCESS_2_LOCATION_STRUCT args;
15912 +       ELAN_LOCATION                    loc;
15913 +
15914 +       if (copy_from_user (&args, (void *) arg, sizeof (ELAN3IO_PROCESS_2_LOCATION_STRUCT)))
15915 +           return (-EFAULT);
15916 +
15917 +       krwlock_write (&ctxt->VpLock);
15918 +       loc = ProcessToLocation (ctxt, NULL, args.process , NULL);
15919 +       krwlock_done (&ctxt->VpLock);
15920 +
15921 +       args.loc = loc;
15922 +
15923 +       if (copy_to_user ( (void *) arg, &args,sizeof (ELAN3IO_PROCESS_2_LOCATION_STRUCT)))
15924 +           return (-EFAULT);
15925 +
15926 +       break;
15927 +    }
15928 +
15929 +    case ELAN3IO_GET_ROUTE:
15930 +    {
15931 +       ELAN3IO_GET_ROUTE_STRUCT args;
15932 +
15933 +       if (copy_from_user (&args, (void *) arg, sizeof (ELAN3IO_GET_ROUTE_STRUCT)))
15934 +           return (-EFAULT);
15935 +       
15936 +       if ((res = elan3_get_route (ctxt, args.process, args.flits)) ==  ESUCCESS)
15937 +       {
15938 +           if (copy_to_user ( (void *) arg, &args,sizeof (ELAN3IO_GET_ROUTE_STRUCT)))
15939 +               return (-EFAULT);
15940 +       }
15941 +       break;
15942 +    }
15943 +
15944 +    case ELAN3IO_RESET_ROUTE:
15945 +    {
15946 +       ELAN3IO_RESET_ROUTE_STRUCT args;
15947 +
15948 +       if (copy_from_user (&args, (void *) arg, sizeof (ELAN3IO_RESET_ROUTE_STRUCT)))
15949 +           return (-EFAULT);
15950 +       
15951 +       res = elan3_reset_route (ctxt, args.process);
15952 +       break;
15953 +    }
15954 +
15955 +    case ELAN3IO_VP2NODEID:
15956 +    {
15957 +       ELAN3IO_VP2NODEID_STRUCT *vp2nodeId;
15958 +       ELAN_LOCATION           location;
15959 +
15960 +       KMEM_ALLOC (vp2nodeId, ELAN3IO_VP2NODEID_STRUCT *, sizeof(ELAN3IO_VP2NODEID_STRUCT), TRUE);
15961 +       if (vp2nodeId == NULL) 
15962 +           return (set_errno (ENOMEM));
15963 +
15964 +       if (copy_from_user (vp2nodeId, (void *) arg, sizeof (ELAN3IO_VP2NODEID_STRUCT))) {
15965 +           KMEM_FREE (vp2nodeId, sizeof(ELAN3IO_VP2NODEID_STRUCT));
15966 +           return (-EFAULT);
15967 +       }
15968 +
15969 +       krwlock_write (&ctxt->VpLock);
15970 +       location = ProcessToLocation (ctxt, NULL, vp2nodeId->process , NULL);
15971 +       krwlock_done (&ctxt->VpLock);
15972 +
15973 +       vp2nodeId->nodeId = location.loc_node;
15974 +       if (copy_to_user ( (void *) arg, vp2nodeId, sizeof (ELAN3IO_VP2NODEID_STRUCT))) {
15975 +           KMEM_FREE (vp2nodeId, sizeof(ELAN3IO_VP2NODEID_STRUCT));
15976 +           return (-EFAULT);
15977 +       }
15978 +
15979 +       KMEM_FREE (vp2nodeId, sizeof(ELAN3IO_VP2NODEID_STRUCT));
15980 +
15981 +       break;
15982 +    }
15983 +
15984 +    case ELAN3IO_PROCESS:
15985 +       return (elan3_process (ctxt));
15986 +
15987 +    case ELAN3IO_SETPERM:
15988 +    {
15989 +       ELAN3IO_SETPERM_STRUCT args;
15990 +
15991 +       if (copy_from_user (&args, (void *) arg, sizeof (ELAN3IO_SETPERM_STRUCT)))
15992 +           return (-EFAULT);
15993 +
15994 +       res = elan3mmu_setperm (ctxt->Elan3mmu, args.maddr, args.eaddr, args.len, args.perm);
15995 +       break;
15996 +    }
15997 +
15998 +    case ELAN3IO_CLEARPERM:
15999 +    {
16000 +       ELAN3IO_CLEARPERM_STRUCT args;
16001 +
16002 +       if (copy_from_user (&args, (void *) arg, sizeof (ELAN3IO_CLEARPERM_STRUCT)))
16003 +           return (-EFAULT);
16004 +
16005 +       elan3mmu_clrperm (ctxt->Elan3mmu, args.eaddr, args.len);
16006 +       break;
16007 +    }
16008 +
16009 +    case ELAN3IO_CHANGEPERM:
16010 +    {
16011 +       ELAN3IO_CHANGEPERM_STRUCT args;
16012 +
16013 +       if (copy_from_user (&args, (void *) arg, sizeof (ELAN3IO_CHANGEPERM_STRUCT)))
16014 +           return (-EFAULT);
16015 +
16016 +       res = EINVAL;
16017 +       break;
16018 +    }
16019 +
16020 +    case ELAN3IO_HELPER_THREAD:
16021 +       res = elan3_lwp (ctxt);
16022 +       break;
16023 +       
16024 +    case ELAN3IO_WAITCOMMAND:
16025 +       res = WaitForCommandPort (ctxt);
16026 +       break;
16027 +
16028 +    case ELAN3IO_BLOCK_INPUTTER:
16029 +       elan3_block_inputter (ctxt, arg);
16030 +       break;
16031 +
16032 +    case ELAN3IO_SET_FLAGS:
16033 +       sctx->Flags = arg;
16034 +       break;
16035 +
16036 +    case ELAN3IO_SET_SIGNAL:
16037 +       sctx->signal = arg;
16038 +       break;
16039 +
16040 +    case ELAN3IO_WAITEVENT:
16041 +       res = sys_waitevent (ctxt, (E3_Event *) arg);
16042 +       break;
16043 +
16044 +    case ELAN3IO_ALLOC_EVENTCOOKIE:
16045 +       res = cookie_alloc_cookie (sctx->Table, arg);
16046 +       break;
16047 +
16048 +    case ELAN3IO_FREE_EVENTCOOKIE:
16049 +       res = cookie_free_cookie (sctx->Table, arg);
16050 +       break;
16051 +
16052 +    case ELAN3IO_ARM_EVENTCOOKIE:
16053 +       res = cookie_arm_cookie (sctx->Table, arg);
16054 +       break;
16055 +
16056 +    case ELAN3IO_WAIT_EVENTCOOKIE:
16057 +       res = cookie_wait_cookie (sctx->Table, arg);
16058 +       break;
16059 +
16060 +    case ELAN3IO_SWAPSPACE:
16061 +       if (fuword (&((SYS_SWAP_SPACE *) arg)->Magic) != SYS_SWAP_MAGIC)
16062 +           return (set_errno (EINVAL));
16063 +       
16064 +       ((SYS_CTXT *) ctxt->Private)->Swap = (SYS_SWAP_SPACE *) arg;
16065 +       break;
16066 +
16067 +    case ELAN3IO_EXCEPTION_SPACE:
16068 +       if (fuword (&((SYS_EXCEPTION_SPACE *) arg)->Magic) != SYS_EXCEPTION_MAGIC)
16069 +           return (set_errno (EINVAL));
16070 +
16071 +       ((SYS_CTXT *) ctxt->Private)->Exceptions = (SYS_EXCEPTION_SPACE *) arg;
16072 +       break;
16073 +
16074 +    case ELAN3IO_GET_EXCEPTION:
16075 +    {
16076 +       SYS_EXCEPTION *exception;
16077 +
16078 +       if (((SYS_CTXT *) ctxt->Private)->Exceptions == NULL)
16079 +           return (set_errno (EINVAL));
16080 +       
16081 +       KMEM_ALLOC(exception, SYS_EXCEPTION *, sizeof (SYS_EXCEPTION), TRUE);
16082 +
16083 +       if (exception == NULL)
16084 +           return (set_errno (ENOMEM));
16085 +
16086 +       if ((res = sys_getException (((SYS_CTXT *) ctxt->Private), exception)) == 0 &&
16087 +           copy_to_user ((void *) arg, exception, sizeof (SYS_EXCEPTION)))
16088 +           res = EFAULT;
16089 +       
16090 +       KMEM_FREE (exception, sizeof (SYS_EXCEPTION));
16091 +       break;
16092 +    }
16093 +    
16094 +    case ELAN3IO_UNLOAD:
16095 +    {
16096 +       ELAN3MMU             *elan3mmu = ctxt->Elan3mmu;
16097 +       ELAN3IO_UNLOAD_STRUCT args;
16098 +       int                   span;
16099 +       unsigned long         flags;
16100 +       E3_Addr               eaddr;
16101 +       caddr_t               addr;
16102 +       size_t                len;
16103 +
16104 +       if (copy_from_user (&args, (void *) arg, sizeof (ELAN3IO_UNLOAD_STRUCT)))
16105 +           return (-EFAULT);
16106 +
16107 +       addr = (caddr_t) args.addr;
16108 +       len  = args.len;
16109 +
16110 +       if (((unsigned long) addr & PAGEMASK) || (len & PAGEMASK) || (len < 0))
16111 +           return -EINVAL;
16112 +
16113 +       spin_lock_irqsave (&elan3mmu->elan3mmu_lock, flags);
16114 +       for (; len; len -= span, addr += span)
16115 +       {
16116 +           ELAN3MMU_RGN *rgn = elan3mmu_findrgn_main (elan3mmu, addr, 0);
16117 +           
16118 +           if (rgn == NULL || (rgn->rgn_mbase + rgn->rgn_len) < addr)
16119 +               span = len;
16120 +           else if (rgn->rgn_mbase > addr)
16121 +               span = MIN(len, rgn->rgn_mbase - addr);
16122 +           else
16123 +           {
16124 +               span  = MIN(len, (rgn->rgn_mbase + rgn->rgn_len) - addr);
16125 +               eaddr = rgn->rgn_ebase + (addr - rgn->rgn_mbase);
16126 +               
16127 +               elan3mmu_unload (elan3mmu, eaddr, span, PTE_UNLOAD);
16128 +           }
16129 +       }
16130 +       spin_unlock_irqrestore (&elan3mmu->elan3mmu_lock, flags);
16131 +       
16132 +       return 0;
16133 +    }
16134 +
16135 +    case ELAN3IO_GET_DEVINFO:
16136 +    {
16137 +       ELAN3IO_GET_DEVINFO_STRUCT args;
16138 +
16139 +       if (copy_from_user (&args, (void *) arg, sizeof (ELAN3IO_GET_DEVINFO_STRUCT)))
16140 +           return (-EFAULT);
16141 +       
16142 +       if (copy_to_user ((void *) args.devinfo, &ctxt->Device->Devinfo, sizeof (ELAN_DEVINFO))) 
16143 +           res = EFAULT;
16144 +       break;
16145 +    }
16146 +
16147 +    case ELAN3IO_GET_POSITION:
16148 +    {
16149 +       ELAN3IO_GET_POSITION_STRUCT args;
16150 +
16151 +       if (copy_from_user (&args, (void *) arg, sizeof (ELAN3IO_GET_POSITION_STRUCT)))
16152 +           return (-EFAULT);   
16153 +
16154 +       if (copy_to_user ((void *) args.position, &ctxt->Device->Position, sizeof (ELAN_POSITION)))
16155 +           res = EFAULT;
16156 +       break;
16157 +    }
16158 +
16159 +    default:
16160 +       return (-EINVAL);
16161 +    }
16162 +
16163 +    return (res ? set_errno (res) : 0);
16164 +}
16165 +
16166 +static void user_vma_open(struct vm_area_struct *vma)
16167 +{
16168 +    USER_PRIVATE *pr = (USER_PRIVATE *) vma->vm_private_data;
16169 +
16170 +    PRINTF (DBG_DEVICE, DBG_SEG, "user_vma_open: vm_mm=%p start=%lx end=%lx pgoff=%lx file=%p\n",
16171 +           vma->vm_mm, vma->vm_start, vma->vm_end, vma->vm_pgoff, vma->vm_file);
16172 +
16173 +    if (vma->vm_pgoff == ELAN3IO_OFF_COMMAND_PAGE)
16174 +       if (atomic_dec_and_test (&pr->pr_mappings))
16175 +           pr->pr_ctxt->CommandPageMapping = NULL;
16176 +}
16177 +
16178 +static void user_vma_close(struct vm_area_struct *vma)
16179 +{
16180 +    USER_PRIVATE *pr = (USER_PRIVATE *) vma->vm_private_data;
16181 +
16182 +    PRINTF (DBG_DEVICE, DBG_SEG, "user_vma_close: vm_mm=%p start=%lx end=%lx pgoff=%lx file=%p\n",
16183 +           vma->vm_mm, vma->vm_start, vma->vm_end, vma->vm_pgoff, vma->vm_file);
16184 +
16185 +    if (vma->vm_pgoff == ELAN3IO_OFF_COMMAND_PAGE)
16186 +       atomic_inc (&pr->pr_mappings);
16187 +}
16188 +
16189 +static struct vm_operations_struct user_vm_ops = {
16190 +    open:              user_vma_open,
16191 +    close:             user_vma_close,
16192 +};
16193 +
16194 +static int
16195 +user_mmap (struct file *file, struct vm_area_struct *vma)
16196 +{
16197 +    USER_PRIVATE  *pr   = (USER_PRIVATE *) file->private_data;
16198 +    ELAN3_CTXT     *ctxt = pr->pr_ctxt; 
16199 +    ioaddr_t       ioaddr;
16200 +
16201 +    /* 
16202 +     * NOTE - since we need to maintain the reference count on
16203 +     *        the user_private we only permit single page 
16204 +     *        mmaps - this means that we will certainly see
16205 +     *        the correct number of closes to maintain the
16206 +     *        the reference count correctly.
16207 +     */
16208 +    
16209 +    if ((vma->vm_end - vma->vm_start) != PAGE_SIZE)
16210 +       return (-EINVAL);
16211 +
16212 +    PRINTF (DBG_DEVICE, DBG_SEG, "user_mmap: vm_mm=%p start=%lx end=%lx pgoff=%lx flags=%lx prot=%lx file=%p\n",
16213 +           vma->vm_mm, vma->vm_start, vma->vm_end, vma->vm_pgoff, vma->vm_flags, vma->vm_page_prot.pgprot, vma->vm_file);
16214 +
16215 +    switch (vma->vm_pgoff)
16216 +    {
16217 +    default:
16218 +       return (-EINVAL);
16219 +       
16220 +    case ELAN3IO_OFF_COMMAND_PAGE:
16221 +       spin_lock (&pr->pr_lock);
16222 +       if (ctxt->CommandPage == (ioaddr_t) 0 || atomic_read (&pr->pr_mappings) != 0)
16223 +       {
16224 +           PRINTF (DBG_DEVICE, DBG_SEG, "user_mmap: command port - %s\n", ctxt->CommandPort ? "already mapped" : "not attached");
16225 +           spin_unlock (&pr->pr_lock);
16226 +           return (-EINVAL);
16227 +       }
16228 +#ifdef LINUX_SPARC
16229 +       pgprot_val(vma->vm_page_prot) &= ~(_PAGE_CACHE);
16230 +       pgprot_val(vma->vm_page_prot) |= _PAGE_IE;
16231 +#elif defined(pgprot_noncached)
16232 +       vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
16233 +#endif
16234 +
16235 +       PRINTF (DBG_DEVICE, DBG_SEG, "user_mmap: commandport at %lx phys %llx prot %lx\n", 
16236 +               vma->vm_start, (unsigned long long) kmem_to_phys ((void *) ctxt->CommandPort), vma->vm_page_prot.pgprot);
16237 +
16238 +       /* Don't try to swap out physical pages.. */
16239 +       vma->vm_flags |= VM_RESERVED;
16240 +    
16241 +       /*
16242 +        * Don't dump addresses that are not real memory to a core file.
16243 +        */
16244 +       vma->vm_flags |= VM_IO;
16245 +
16246 +#ifdef NO_RMAP
16247 +       if (remap_page_range (vma->vm_start, kmem_to_phys ((void *) ctxt->CommandPage), vma->vm_end - vma->vm_start, vma->vm_page_prot))
16248 +#else 
16249 +       if (remap_page_range (vma, vma->vm_start, kmem_to_phys ((void *) ctxt->CommandPage), vma->vm_end - vma->vm_start, vma->vm_page_prot))
16250 +#endif
16251 +       {
16252 +           spin_unlock (&pr->pr_lock);
16253 +           return (-EAGAIN);
16254 +       }
16255 +       ctxt->CommandPageMapping = (void *) vma->vm_start;
16256 +       
16257 +       atomic_inc (&pr->pr_mappings);
16258 +       
16259 +       spin_unlock (&pr->pr_lock);
16260 +       break;
16261 +
16262 +    case ELAN3IO_OFF_UREG_PAGE:
16263 +#ifdef LINUX_SPARC
16264 +       pgprot_val(vma->vm_page_prot) &= ~(_PAGE_CACHE);
16265 +       pgprot_val(vma->vm_page_prot) |= _PAGE_IE;
16266 +#elif defined(pgprot_noncached)
16267 +       vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
16268 +#endif
16269 +       ioaddr = ctxt->Device->RegPtr + (offsetof (E3_Regs, URegs) & PAGEMASK);
16270 +
16271 +       PRINTF (DBG_DEVICE, DBG_SEG, "user_mmap: user_regs at %lx phys %llx prot %lx\n", vma->vm_start, 
16272 +               (unsigned long long) kmem_to_phys ((void *) ioaddr), vma->vm_page_prot.pgprot);
16273 +
16274 +       /* Don't try to swap out physical pages.. */
16275 +       vma->vm_flags |= VM_RESERVED;
16276 +    
16277 +       /*
16278 +        * Don't dump addresses that are not real memory to a core file.
16279 +        */
16280 +       vma->vm_flags |= VM_IO;
16281 +
16282 +#ifdef NO_RMAP
16283 +       if (remap_page_range (vma->vm_start, kmem_to_phys ((void *) ioaddr),
16284 +#else
16285 +       if (remap_page_range (vma, vma->vm_start, kmem_to_phys ((void *) ioaddr),
16286 +#endif
16287 +                             vma->vm_end - vma->vm_start, vma->vm_page_prot))
16288 +           return (-EAGAIN);
16289 +       break;
16290 +       
16291 +    case ELAN3IO_OFF_FLAG_PAGE:
16292 +       PRINTF (DBG_DEVICE, DBG_SEG, "user_mmap: flag page at %lx phys %llx\n", vma->vm_start, 
16293 +               (unsigned long long) kmem_to_phys ((void *) ctxt->FlagPage));
16294 +
16295 +       /* we do not want to have this area swapped out, lock it */
16296 +       vma->vm_flags |= VM_LOCKED;
16297 +
16298 +       /* Mark the page as reserved or else the remap_page_range() doesn't remap it */
16299 +       SetPageReserved(pte_page(*find_pte_kernel((unsigned long) ctxt->FlagPage)));
16300 +       
16301 +#ifdef NO_RMAP
16302 +       if (remap_page_range (vma->vm_start, kmem_to_phys ((void *) ctxt->FlagPage),
16303 +#else
16304 +       if (remap_page_range (vma, vma->vm_start, kmem_to_phys ((void *) ctxt->FlagPage),
16305 +#endif
16306 +                             vma->vm_end - vma->vm_start, vma->vm_page_prot))
16307 +           return (-EAGAIN);
16308 +       break;
16309 +    }
16310 +
16311 +    ASSERT (vma->vm_ops == NULL);
16312 +    
16313 +    vma->vm_ops          = &user_vm_ops;
16314 +    vma->vm_file         = file;
16315 +    vma->vm_private_data = (void *) pr;
16316 +    
16317 +    return (0);
16318 +}
16319 +
16320 +/* driver entry points */
16321 +static int
16322 +elan3_open (struct inode *inode, struct file *file)
16323 +{
16324 +    if (elan3_devices[ELAN3_DEVICE(inode)] == NULL)
16325 +       return (-ENXIO);
16326 +
16327 +    PRINTF (DBG_DEVICE, DBG_FN, "elan3_open: device %d minor %d file=%p\n", ELAN3_DEVICE(inode), ELAN3_MINOR(inode), file);
16328 +    
16329 +    switch (ELAN3_MINOR (inode))
16330 +    {
16331 +    case ELAN3_MINOR_CONTROL:
16332 +       return (control_open (inode, file));
16333 +    case ELAN3_MINOR_MEM:
16334 +       return (mem_open (inode, file));
16335 +    case ELAN3_MINOR_USER:
16336 +       return (user_open (inode, file));
16337 +    default:
16338 +       return (-ENXIO);
16339 +    }
16340 +}
16341 +
16342 +static int
16343 +elan3_release (struct inode *inode, struct file *file)
16344 +{
16345 +    PRINTF (DBG_DEVICE, DBG_FN, "elan3_release: device %d minor %d file=%p\n", ELAN3_DEVICE(inode), ELAN3_MINOR(inode), file);
16346 +    
16347 +    switch (ELAN3_MINOR (inode))
16348 +    {
16349 +    case ELAN3_MINOR_CONTROL:
16350 +       return (control_release (inode, file));
16351 +    case ELAN3_MINOR_MEM:
16352 +       return (mem_release (inode, file));
16353 +    case ELAN3_MINOR_USER:
16354 +       return (user_release (inode, file));
16355 +    default:
16356 +       return (-ENXIO);
16357 +    }
16358 +}
16359 +
16360 +static int
16361 +elan3_ioctl (struct inode *inode, struct file *file, 
16362 +            unsigned int cmd, unsigned long arg)
16363 +{
16364 +    switch (ELAN3_MINOR (inode))
16365 +    {
16366 +    case ELAN3_MINOR_CONTROL:
16367 +       return (control_ioctl (inode, file, cmd, arg));
16368 +    case ELAN3_MINOR_MEM:
16369 +       return (mem_ioctl (inode, file, cmd, arg));
16370 +    case ELAN3_MINOR_USER:
16371 +       return (user_ioctl (inode, file, cmd, arg));
16372 +    default:
16373 +       return (-ENXIO);
16374 +    }
16375 +}
16376 +
16377 +
16378 +static int
16379 +elan3_mmap (struct file *file, struct vm_area_struct *vma)
16380 +{
16381 +    PRINTF (DBG_DEVICE, DBG_SEG, "elan3_mmap: instance %d minor %d start=%lx end=%lx pgoff=%lx flags=%lx prot=%lx\n", 
16382 +           ELAN3_DEVICE (file->f_dentry->d_inode), ELAN3_MINOR (file->f_dentry->d_inode),
16383 +           vma->vm_start, vma->vm_end, vma->vm_pgoff, vma->vm_flags, vma->vm_page_prot.pgprot);
16384 +
16385 +    switch (ELAN3_MINOR (file->f_dentry->d_inode))
16386 +    {
16387 +    case ELAN3_MINOR_CONTROL:
16388 +       return (control_mmap (file, vma));
16389 +    case ELAN3_MINOR_MEM:
16390 +       return (mem_mmap (file, vma));
16391 +    case ELAN3_MINOR_USER:
16392 +       return (user_mmap (file, vma));
16393 +    default:
16394 +       return (-ENXIO);
16395 +    }
16396 +}
16397 +
16398 +static irqreturn_t
16399 +InterruptHandlerWrapper(int irq, void *dev_id, struct pt_regs *regs)
16400 +{
16401 +    if (InterruptHandler ((ELAN3_DEV *)dev_id) == 0)
16402 +       return IRQ_HANDLED;
16403 +    else
16404 +       return IRQ_NONE;
16405 +}
16406 +
16407 +
16408 +/* 
16409 + * Elan specific PCI configuration registers.
16410 + */
16411 +
16412 +#define PCI_CONF_PARITY_PHYS_LO         0x40
16413 +#define PCI_CONF_PARITY_PHYS_HI         0x44
16414 +#define PCI_CONF_PARITY_PHASE_ADDR      0x46
16415 +#define PCI_CONF_PARITY_MASTER_TYPE     0x47
16416 +#define PCI_CONF_ELAN3_CTRL              0x48
16417
16418 +#define ECTRL_EXTEND_LATENCY            (1 << 0)
16419 +#define ECTRL_ENABLE_PREFETCH           (1 << 1)
16420 +#define ECTRL_SOFTWARE_INTERNAL_RESET   (1 << 2)
16421 +#define ECTRL_REDUCED_RETRY_RATE        (1 << 3)
16422 +#define ECTRL_CLOCK_DIVIDE_RATE_SHIFT   4
16423 +#define ECTRL_COMMS_DIVIDE_RATE_SHIFT   10
16424 +#define ECTRL_FORCE_COMMSCLK_LOCAL      (1 << 14)
16425 +
16426 +/*
16427 + * Configure PCI.
16428 + */
16429 +static int
16430 +ConfigurePci(ELAN3_DEV *dev)
16431 +{
16432 +       struct pci_dev *pci = dev->Osdep.pci;
16433 +       u32 rom_address;
16434 +
16435 +       if (pci_enable_device(pci))
16436 +           return (ENXIO);
16437 +
16438 +       /* disable ROM */
16439 +       pci_read_config_dword(pci, PCI_ROM_ADDRESS, &rom_address);
16440 +       rom_address &= ~PCI_ROM_ADDRESS_ENABLE;
16441 +       pci_write_config_dword(pci, PCI_ROM_ADDRESS, rom_address);
16442 +       mb();
16443 +
16444 +       /* this is in 32-bit WORDS */
16445 +       pci_write_config_byte(pci, PCI_CACHE_LINE_SIZE, (64 >> 2));
16446 +       mb();
16447 +
16448 +       /* allow 40 ticks to respond, 16 data phases */
16449 +       pci_write_config_byte(pci, PCI_LATENCY_TIMER, 255);
16450 +       mb();
16451 +
16452 +       /* don't enable PCI_COMMAND_SERR--see note in elandev_dunix.c */
16453 +       pci_write_config_word(pci, PCI_COMMAND, PCI_COMMAND_MEMORY 
16454 +           | PCI_COMMAND_MASTER | PCI_COMMAND_INVALIDATE | PCI_COMMAND_PARITY);
16455 +       mb();
16456 +
16457 +       return ESUCCESS;
16458 +}
16459 +
16460 +/* 
16461 + * Reset chip to a known state.
16462 + */
16463 +static int
16464 +ResetElan(ELAN3_DEV *dev, ioaddr_t intPalAddr)
16465 +{
16466 +       struct pci_dev *pci = dev->Osdep.pci;
16467 +       int instance = dev->Instance;
16468 +       u32 val;
16469 +       u8 revid;
16470 +       int CasLatency;
16471 +       int res;
16472 +
16473 +       /* determine rev of board */
16474 +       pci_read_config_byte(pci, PCI_REVISION_ID, &revid);
16475 +
16476 +       /* GNAT 2328 - don't set ECTRL_ENABLE_PREFETCH on Elan rev A */
16477 +       val = ECTRL_EXTEND_LATENCY | (39 << ECTRL_CLOCK_DIVIDE_RATE_SHIFT)
16478 +           | (6 << ECTRL_COMMS_DIVIDE_RATE_SHIFT);
16479 +       switch (revid) 
16480 +       {
16481 +               case PCI_REVISION_ID_ELAN3_REVA:
16482 +                       printk("elan%d: is an elan3 (revision a) - not supported\n", instance);
16483 +                       return (EFAIL);
16484 +
16485 +               case PCI_REVISION_ID_ELAN3_REVB:        
16486 +                       val |= ECTRL_ENABLE_PREFETCH;
16487 +                       if (BackToBackMaster)
16488 +                               val |= ECTRL_FORCE_COMMSCLK_LOCAL;
16489 +                       printk("elan%d: is an elan3 (revision b)\n", instance);
16490 +                       break;
16491 +               default:
16492 +                       printk("elan%d: unsupported elan3 revision %d\n", 
16493 +                           instance, revid);
16494 +                       return EFAIL;
16495 +       }
16496 +       pci_write_config_dword(pci, PCI_CONF_ELAN3_CTRL, val);
16497 +       mb();
16498 +
16499 +       /*
16500 +        * GNAT: 2474
16501 +        * Hit reset on the Elan, then we MUST initialise the schedule status
16502 +        * register to drive reset on the link before the link can come out
16503 +        * of reset (15 uS). We need to keep it like this until we've 
16504 +        * initialised SDRAM
16505 +        */
16506 +       pci_read_config_dword(pci, PCI_CONF_ELAN3_CTRL, &val);
16507 +       pci_write_config_dword(pci, PCI_CONF_ELAN3_CTRL, 
16508 +           val | ECTRL_SOFTWARE_INTERNAL_RESET);
16509 +       mb();
16510 +
16511 +       /* Read the Vital Product Data to determine the cas latency */
16512 +       if ((res = ReadVitalProductData (dev, &CasLatency)) != ESUCCESS)
16513 +           return (res);
16514 +
16515 +       /*
16516 +        * Now clear the Software internal reset bit, and start the sdram
16517 +        */
16518 +       pci_write_config_dword(pci, PCI_CONF_ELAN3_CTRL, val);
16519 +       mb();
16520 +
16521 +       /* 
16522 +        * Enable SDRAM before sizing and initalising it for ECC.
16523 +        * NOTE: don't enable all sets of the cache (yet), nor ECC 
16524 +        */
16525 +       dev->Cache_Control_Reg = (CasLatency | REFRESH_RATE_16US);
16526 +
16527 +       write_reg32 (dev, Cache_Control_Reg.ContReg, (dev->Cache_Control_Reg | SETUP_SDRAM));
16528 +       mb();
16529 +
16530 +       INIT_SCHED_STATUS(dev, Sched_Initial_Value);
16531 +
16532 +       /*
16533 +        * Set the interrupt mask to 0 and enable the interrupt PAL
16534 +        * by writing any value to it.
16535 +        */
16536 +       SET_INT_MASK (dev, 0);
16537 +       writeb (0, intPalAddr);
16538
16539 +       return ESUCCESS;
16540 +}
16541 +
16542 +/*
16543 + * Determine the size of elan PCI address spaces.  EFAIL is returned if 
16544 + * unused or invalid BAR is specified, or if board reports I/O mapped space.
16545 + */
16546 +int
16547 +DeviceRegisterSize(ELAN3_DEV *dev, int rnumber, int *sizep)
16548 +{
16549 +       struct pci_dev *pdev = dev->Osdep.pci;
16550 +
16551 +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,4,0)
16552 +       *sizep = pci_resource_size(pdev, rnumber);
16553 +#else
16554 +       *sizep = pci_resource_end(pdev, rnumber) - pci_resource_start(pdev, rnumber) + 1;
16555 +#endif
16556 +       return ESUCCESS;
16557 +}
16558 +
16559 +/*
16560 + * Map PCI memory into kernel virtual address space.  On the alpha, 
16561 + * we just return appropriate kseg address, and Unmap is a no-op.
16562 + */
16563 +int
16564 +MapDeviceRegister(ELAN3_DEV *dev, int rnumber, ioaddr_t *addrp,
16565 +                 int off, int len, DeviceMappingHandle *handlep)
16566 +{      
16567 +       struct pci_dev *pdev = dev->Osdep.pci;
16568 +
16569 +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,4,0)
16570 +       u64 base = pci_get_base_address(pdev, rnumber);
16571 +       *addrp = (ioaddr_t) pci_base_to_kseg(base + off, pdev->bus->number);
16572 +
16573 +#else
16574 +       if (len == 0)
16575 +           len = pci_resource_end(pdev, rnumber) - pci_resource_start(pdev, rnumber) + 1;
16576 +       
16577 +       if (len == 0)
16578 +           return (EINVAL);
16579 +
16580 +       *addrp = (ioaddr_t) ioremap_nocache (pci_resource_start(pdev, rnumber) + off, len);
16581 +#endif
16582 +
16583 +       *handlep = (void *) *addrp;
16584 +
16585 +       return (*addrp ? ESUCCESS : ENOMEM);
16586 +}
16587 +void
16588 +UnmapDeviceRegister(ELAN3_DEV *dev, DeviceMappingHandle *handlep)
16589 +{
16590 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,4,0)
16591 +    iounmap (*handlep);
16592 +#endif
16593 +}
16594 +
16595 +void
16596 +ElanBusError (ELAN3_DEV *dev)
16597 +{
16598 +       struct pci_dev  *pci = dev->Osdep.pci;  
16599 +       u8  phaseaddr, type;
16600 +       u16 status, cmd, physhi;
16601 +       u32 physlo;
16602
16603 +       printk("elan%d: bus error occured\n", dev->Instance);
16604 +
16605 +       pci_read_config_word (pci, PCI_STATUS,                  &status);
16606 +       pci_read_config_word (pci, PCI_COMMAND,                 &cmd);
16607 +       pci_read_config_dword(pci, PCI_CONF_PARITY_PHYS_LO,     &physlo);
16608 +       pci_read_config_word (pci, PCI_CONF_PARITY_PHYS_HI,     &physhi);
16609 +       pci_read_config_byte (pci, PCI_CONF_PARITY_PHASE_ADDR,  &phaseaddr); 
16610 +       pci_read_config_byte (pci, PCI_CONF_PARITY_MASTER_TYPE, &type);
16611 +
16612 +#define PCI_CONF_STAT_FORMAT   "\20" \
16613 +       "\6SIXTY_SIX_MHZ\7UDF\10FAST_BACK\11PARITY" \
16614 +       "\14SIG_TARGET_ABORT\15REC_TARGET_ABORT\16REC_MASTER_ABORT" \
16615 +       "\17SIG_SYSTEM_ERROR\20DETECTED_PARITY"
16616 +
16617 +       printk ("elan%d: status %x cmd %4x physaddr %04x%08x phase %x type %x\n",
16618 +               dev->Instance, status, cmd, physhi, physlo, phaseaddr, type);
16619 +}
16620 +
16621 +/*
16622 + * Local variables:
16623 + * c-file-style: "stroustrup"
16624 + * End:
16625 + */
16626 Index: linux-2.4.21/drivers/net/qsnet/elan3/elansyscall.c
16627 ===================================================================
16628 --- linux-2.4.21.orig/drivers/net/qsnet/elan3/elansyscall.c     2004-02-23 16:02:56.000000000 -0500
16629 +++ linux-2.4.21/drivers/net/qsnet/elan3/elansyscall.c  2005-06-01 23:12:54.584441384 -0400
16630 @@ -0,0 +1,1230 @@
16631 +/*
16632 + *    Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd.
16633 + * 
16634 + *    For licensing information please see the supplied COPYING file
16635 + *
16636 + */
16637 +
16638 +#ident "@(#)$Id: elansyscall.c,v 1.99.2.1 2004/10/28 17:08:56 david Exp $"
16639 +/*      $Source: /cvs/master/quadrics/elan3mod/elan3/os/elansyscall.c,v $*/
16640 +
16641 +#include <qsnet/kernel.h>
16642 +#include <qsnet/autoconf.h>
16643 +
16644 +#include <elan/elanmod.h>
16645 +#include <elan3/elanregs.h>
16646 +#include <elan3/elandev.h>
16647 +#include <elan3/elanvp.h>
16648 +#include <elan3/elan3mmu.h>
16649 +#include <elan3/elanctxt.h>
16650 +#include <elan3/elandebug.h>
16651 +#include <elan3/elansyscall.h>
16652 +#include <elan/devinfo.h>
16653 +
16654 +static int       sys_exception (ELAN3_CTXT *ctxt, int type, int proc, void *trap, va_list ap);
16655 +static int       sys_getWordItem (ELAN3_CTXT *ctxt, int list, void **itemp, E3_uint32 *valuep);
16656 +static int       sys_getBlockItem (ELAN3_CTXT *ctxt, int list, void **itemp, E3_Addr *valuep);
16657 +static void      sys_putWordItem (ELAN3_CTXT *ctxt, int list, E3_uint32 value);
16658 +static void      sys_putBlockItem (ELAN3_CTXT *ctxt, int list, E3_uint32 *ptr);
16659 +static void      sys_putbackItem (ELAN3_CTXT *ctxt, int list, void *item);
16660 +static void      sys_freeWordItem (ELAN3_CTXT *ctxt, void *item);
16661 +static void      sys_freeBlockItem (ELAN3_CTXT *ctxt, void *item);
16662 +static int       sys_countItems (ELAN3_CTXT *ctxt, int list);
16663 +static int       sys_event (ELAN3_CTXT *ctxt, E3_uint32 cookie, int flag);
16664 +static void      sys_swapin (ELAN3_CTXT *ctxt);
16665 +static void      sys_swapout (ELAN3_CTXT *ctxt);
16666 +static void      sys_freePrivate (ELAN3_CTXT *ctxt);
16667 +static int       sys_fixupNetworkError (ELAN3_CTXT *ctxt, NETERR_FIXUP *nef);
16668 +static int       sys_startFaultCheck (ELAN3_CTXT *ctxt);
16669 +static void      sys_endFaultCheck (ELAN3_CTXT *ctxt);
16670 +static E3_uint8  sys_load8 (ELAN3_CTXT *ctxt, E3_Addr addr);
16671 +static void      sys_store8 (ELAN3_CTXT *ctxt, E3_Addr addr, E3_uint8 val);
16672 +static E3_uint16 sys_load16 (ELAN3_CTXT *ctxt, E3_Addr addr);
16673 +static void      sys_store16 (ELAN3_CTXT *ctxt, E3_Addr addr, E3_uint16 val);
16674 +static E3_uint32 sys_load32 (ELAN3_CTXT *ctxt, E3_Addr addr);
16675 +static void      sys_store32 (ELAN3_CTXT *ctxt, E3_Addr addr, E3_uint32 val);
16676 +static E3_uint64 sys_load64 (ELAN3_CTXT *ctxt, E3_Addr addr);
16677 +static void      sys_store64 (ELAN3_CTXT *ctxt, E3_Addr addr, E3_uint64 val);
16678 +
16679 +static ELAN3_OPS elan3_sys_ops = {
16680 +    ELAN3_OPS_VERSION,         /* Version */
16681 +
16682 +    sys_exception,             /* Exception */
16683 +    sys_getWordItem,           /* GetWordItem */
16684 +    sys_getBlockItem,          /* GetBlockItem */
16685 +    sys_putWordItem,           /* PutWordItem */
16686 +    sys_putBlockItem,          /* PutBlockItem */
16687 +    sys_putbackItem,           /* PutbackItem */
16688 +    sys_freeWordItem,          /* FreeWordItem */
16689 +    sys_freeBlockItem,         /* FreeBlockItem */
16690 +    sys_countItems,            /* CountItems */
16691 +    sys_event,                 /* Event */
16692 +    sys_swapin,                        /* Swapin */
16693 +    sys_swapout,               /* Swapout */
16694 +    sys_freePrivate,           /* FreePrivate */
16695 +    sys_fixupNetworkError,     /* FixupNetworkError */
16696 +    NULL,                      /* DProcTrap */
16697 +    NULL,                      /* TProcTrap */
16698 +    NULL,                      /* IProcTrap */
16699 +    NULL,                      /* CProcTrap */
16700 +    NULL,                      /* CProcReissue */
16701 +    sys_startFaultCheck,       /* StartFaultCheck */
16702 +    sys_endFaultCheck,          /* EndFaultCheck */
16703 +    sys_load8,                 /* Load8 */
16704 +    sys_store8,                        /* Store8 */
16705 +    sys_load16,                        /* Load16 */
16706 +    sys_store16,               /* Store16 */
16707 +    sys_load32,                        /* Load32 */
16708 +    sys_store32,               /* Store32 */
16709 +    sys_load64,                        /* Load64 */
16710 +    sys_store64                        /* Store64 */
16711 +};
16712 +
16713 +va_list null_valist;
16714 +
16715 +SYS_CTXT *
16716 +sys_init (ELAN3_CTXT *ctxt)
16717 +{
16718 +    SYS_CTXT *sctx;
16719 +
16720 +    /* Allocate and initialise the context private data */
16721 +    KMEM_ZALLOC (sctx, SYS_CTXT *, sizeof  (SYS_CTXT), TRUE);
16722 +
16723 +    if (sctx == NULL)
16724 +       return ((SYS_CTXT *) NULL);
16725 +
16726 +    sctx->Swap    = NULL;
16727 +    sctx->Armed   = 0;
16728 +    sctx->Backoff = 1;
16729 +    sctx->Table   = cookie_alloc_table ((unsigned long) ELAN3_MY_TASK_HANDLE(), 0);
16730 +    sctx->signal  = SIGSEGV;
16731 +
16732 +    if (sctx->Table == NULL)
16733 +    {
16734 +       KMEM_FREE (sctx, sizeof (SYS_CTXT));
16735 +       return ((SYS_CTXT *) NULL);
16736 +    }
16737 +
16738 +    kmutex_init  (&sctx->Lock);
16739 +    spin_lock_init (&sctx->WaitLock);
16740 +    kcondvar_init (&sctx->NetworkErrorWait);
16741 +    
16742 +    /* Install my context operations and private data */
16743 +    ctxt->Operations = &elan3_sys_ops;
16744 +    ctxt->Private    = (void *) sctx;
16745 +    
16746 +    return (sctx);
16747 +}
16748 +
16749 +/* returns -ve on error or ELAN_CAP_OK or ELAN_CAP_RMS */
16750 +/* use = ELAN_USER_ATTACH, ELAN_USER_P2P, ELAN_USER_BROADCAST */
16751 +int 
16752 +elan3_validate_cap(ELAN3_DEV *dev, ELAN_CAPABILITY *cap ,int use)
16753 +{
16754 +     /* Don't allow a user process to attach to system context */
16755 +    if (ELAN3_SYSTEM_CONTEXT (cap->cap_lowcontext) || ELAN3_SYSTEM_CONTEXT (cap->cap_highcontext)
16756 +       || cap->cap_highcontext <= ELAN_USER_BASE_CONTEXT_NUM  || cap->cap_highcontext <= ELAN_USER_BASE_CONTEXT_NUM)
16757 +    {
16758 +       PRINTF2 (DBG_DEVICE, DBG_VP,"elan3_validate_cap: lctx %x hctx %x \n",cap->cap_lowcontext,  cap->cap_highcontext);
16759 +       PRINTF3 (DBG_DEVICE, DBG_VP,"elan3_validate_cap: bit %x  low %x high %x\n", ((cap->cap_lowcontext) & SYS_CONTEXT_BIT),
16760 +                E3_NUM_CONTEXT_0, ELAN3_KCOMM_BASE_CONTEXT_NUM);
16761 +
16762 +
16763 +       PRINTF0 (DBG_DEVICE, DBG_VP,"elan3_validate_cap: user process cant attach to system cap\n");
16764 +       return (-EINVAL);
16765 +    }
16766
16767 +    if (cap->cap_type & ELAN_CAP_TYPE_HWTEST)
16768 +    {
16769 +       if (!(cap->cap_type & ELAN_CAP_TYPE_NO_BITMAP)) /* cant have a bit map */
16770 +       {
16771 +           PRINTF0 (DBG_DEVICE, DBG_VP, "elanmod_classify_cap: ELAN_CAP_TYPE_HWTEST must have ELAN_CAP_TYPE_NO_BITMAP\n");
16772 +           return (-EINVAL);
16773 +       }
16774 +       
16775 +       if (cap->cap_lowcontext != cap->cap_highcontext) 
16776 +       {
16777 +           PRINTF2 (DBG_DEVICE, DBG_VP, "elanmod_classify_cap: ELAN_CAP_TYPE_HWTEST (cap->cap_lowcontext != cap->cap_highcontext) %d %d\n",cap->cap_lowcontext , cap->cap_highcontext) ;
16778 +           return (-EINVAL);
16779 +       }
16780 +       
16781 +       if ( ! (ELAN3_HWTEST_CONTEXT(cap->cap_lowcontext) && ELAN3_HWTEST_CONTEXT(cap->cap_highcontext)))
16782 +       {
16783 +           PRINTF3 (DBG_DEVICE, DBG_VP, "elanmod_classify_cap: ELAN_CAP_TYPE_HWTEST HWTEST_BASE_CONTEXT %d %d %d \n" , ELAN3_HWTEST_BASE_CONTEXT_NUM,cap->cap_lowcontext ,ELAN3_HWTEST_TOP_CONTEXT_NUM);
16784 +           return (-EINVAL);
16785 +       }
16786 +       
16787 +       if (cap->cap_lownode != ELAN_CAP_UNINITIALISED || cap->cap_highnode != ELAN_CAP_UNINITIALISED)
16788 +       {
16789 +           PRINTF0 (DBG_DEVICE, DBG_VP, "elanmod_classify_cap: ELAN_CAP_TYPE_HWTEST nodes != ELAN_CAP_UNINITIALISED\n");
16790 +           return (-EINVAL);
16791 +       }
16792 +
16793 +       return ELAN_CAP_OK;
16794 +    }
16795 +
16796 +    return elanmod_classify_cap(&dev->Position, cap, use);
16797 +}
16798 +
16799 +int
16800 +sys_waitevent (ELAN3_CTXT *ctxt, E3_Event *event)
16801 +{
16802 +    SYS_CTXT    *sctx = (SYS_CTXT *) ctxt->Private;
16803 +    EVENT_COOKIE cookie;
16804 +
16805 +    if (ctxt->Device->Devinfo.dev_revision_id == PCI_REVISION_ID_ELAN3_REVA)
16806 +       return (EINVAL);
16807 +
16808 +    cookie = fuword ((int *) &event->ev_Type) & ~(EV_TYPE_MASK_EVIRQ | EV_TYPE_MASK_BCOPY);
16809 +
16810 +    if (cookie_alloc_cookie (sctx->Table, cookie) != ESUCCESS)
16811 +       return (EINVAL);
16812 +
16813 +    cookie_arm_cookie (sctx->Table, cookie);
16814 +
16815 +    if (fuword ((int *) &event->ev_Count) > 0)
16816 +       cookie_wait_cookie (sctx->Table, cookie);
16817 +    
16818 +    cookie_free_cookie (sctx->Table, cookie);
16819 +    
16820 +    return (ESUCCESS);
16821 +}
16822 +
16823 +static void *
16824 +sys_getItem (SYS_SWAP_SPACE *sp, int list)
16825 +{
16826 +    void *itemp = (void *) fuptr_noerr ((void **) &sp->ItemListsHead[list]);
16827 +    void *next;
16828 +    
16829 +    PRINTF4 (DBG_DEVICE, DBG_SYSCALL, "sys_getItem: sp=%p list=%d head=%p itemp=%p\n",
16830 +            sp, list, &sp->ItemListsHead[list], itemp);
16831 +    
16832 +    if (itemp == NULL)
16833 +       return (NULL);
16834 +
16835 +    next = (void *) fuptr_noerr ((void *) itemp);
16836 +
16837 +    suptr_noerr ((void *) &sp->ItemListsHead[list], (void *) next);
16838 +    if (next == NULL)
16839 +       suptr_noerr ((void *) &sp->ItemListsTailp[list], (void *)&sp->ItemListsHead[list]);
16840 +    return (itemp);
16841 +}
16842 +
16843 +static void
16844 +sys_putItemBack (SYS_SWAP_SPACE *sp, int list, void *itemp)
16845 +{
16846 +    PRINTF4 (DBG_DEVICE, DBG_SYSCALL, "sys_putItemBack: sp=%p list=%d itemp=%p value=%08x\n",
16847 +            sp, list, itemp, fuword_noerr ((int *) &((SYS_WORD_ITEM *) itemp)->Value));
16848 +
16849 +    suptr_noerr ((void **) itemp, NULL);                                                       /* item->Next = NULL */
16850 +    suptr_noerr ((void **) fuptr_noerr ((void **) &sp->ItemListsTailp[list]), (void *)itemp);  /* *Tailp = item */
16851 +    suptr_noerr ((void **) &sp->ItemListsTailp[list], (void *) itemp);                         /* Tailp = &item->Next */
16852 +}
16853 +
16854 +static void
16855 +sys_putItemFront (SYS_SWAP_SPACE *sp, int list, void *itemp)
16856 +{
16857 +    PRINTF4 (DBG_DEVICE, DBG_SYSCALL, "sys_putItemFront: sp=%p list=%d itemp=%p value=%08x\n",
16858 +            sp, list, itemp, fuword_noerr ((int *) &((SYS_WORD_ITEM *) itemp)->Value));
16859 +
16860 +    suptr_noerr ((void **) itemp, fuptr_noerr ((void **) &sp->ItemListsHead[list]));           /* item->Next = Head */
16861 +    suptr_noerr ((void **) &sp->ItemListsHead[list], (void *) itemp);                          /* Head = item */
16862 +
16863 +    if (fuptr_noerr ((void **) &sp->ItemListsTailp[list]) == (void *) &sp->ItemListsHead[list])        /* if (Tailp == &Head) */
16864 +       suptr_noerr ((void **) &sp->ItemListsTailp[list], (void *) itemp);                      /*    Tailp = &Item->Next */
16865 +}
16866 +
16867 +
16868 +static int
16869 +sys_getWordItem (ELAN3_CTXT *ctxt, int list, void **itemp, E3_uint32 *valuep)
16870 +{
16871 +    SYS_CTXT      *sctx = (SYS_CTXT *) ctxt->Private;
16872 +    SYS_SWAP_SPACE *sp   = sctx->Swap;
16873 +    SYS_WORD_ITEM  *item;
16874 +    int                    res;
16875 +    label_t        ljb;
16876 +
16877 +    kmutex_lock (&sctx->Lock);
16878 +    
16879 +    if (on_fault (&ljb))
16880 +    {
16881 +       no_fault();
16882 +       kmutex_unlock (&sctx->Lock);
16883 +       sys_exception (ctxt, EXCEPTION_SWAP_FAULT, list, (void *) NULL, null_valist);
16884 +       return (0);
16885 +    }
16886 +
16887 +    item = (SYS_WORD_ITEM *) sys_getItem (sp, list);
16888 +
16889 +    if (item == NULL)
16890 +       res = 0;
16891 +    else
16892 +    {
16893 +       if (list == LIST_DMA_PTR)
16894 +           sctx->Armed = TRUE;
16895 +
16896 +       *itemp  = (void *) item;
16897 +       *valuep = (E3_Addr) fuword_noerr ((E3_int32 *) &item->Value);
16898 +
16899 +       PRINTF3 (ctxt, DBG_SYSCALL, "sys_getWordItem: list=%d -> item=%p value=%08x\n", list, *itemp, *valuep);
16900 +
16901 +       res = 1;
16902 +    }
16903 +    
16904 +    no_fault();
16905 +    kmutex_unlock (&sctx->Lock);
16906 +
16907 +    return (res);
16908 +}
16909 +
16910 +static int
16911 +sys_getBlockItem (ELAN3_CTXT *ctxt, int list, void **itemp, E3_Addr *valuep)
16912 +{
16913 +    SYS_CTXT      *sctx = (SYS_CTXT *) ctxt->Private;
16914 +    SYS_SWAP_SPACE *sp   = sctx->Swap;
16915 +    SYS_BLOCK_ITEM *item;
16916 +    int                    res;
16917 +    label_t        ljb;
16918 +
16919 +    kmutex_lock (&sctx->Lock);
16920 +    
16921 +    if (on_fault (&ljb))
16922 +    {
16923 +       no_fault();
16924 +       kmutex_unlock (&sctx->Lock);
16925 +       sys_exception (ctxt, EXCEPTION_SWAP_FAULT, list, (void *) NULL, null_valist);
16926 +       return (0);
16927 +    }
16928 +
16929 +    item = sys_getItem (sp, list);
16930 +
16931 +    if (item == NULL)
16932 +       res = 0;
16933 +    else
16934 +    {
16935 +       E3_uint32 *dest = fuptr_noerr ((void **) &item->Pointer);
16936 +
16937 +       if (list == LIST_DMA_DESC)
16938 +           sctx->Armed = TRUE;
16939 +
16940 +       *itemp  = (void *) item;
16941 +       *valuep = elan3mmu_elanaddr (ctxt->Elan3mmu, (caddr_t) dest);
16942 +
16943 +       PRINTF3 (ctxt, DBG_SYSCALL, "sys_getBlockItem: list=%d -> item=%p addr=%08x\n", list, *itemp, *valuep);
16944 +       PRINTF4 (ctxt, DBG_SYSCALL, "                  %08x %08x %08x %08x\n",
16945 +                fuword_noerr ((int *) &dest[0]), fuword_noerr ((int *) &dest[1]), 
16946 +                fuword_noerr ((int *) &dest[2]), fuword_noerr ((int *) &dest[3]));
16947 +       PRINTF4 (ctxt, DBG_SYSCALL, "                  %08x %08x %08x %08x\n",
16948 +                fuword_noerr ((int *) &dest[4]), fuword_noerr ((int *) &dest[5]),
16949 +                fuword_noerr ((int *) &dest[6]), fuword_noerr ((int *) &dest[7]));
16950 +
16951 +       
16952 +       res = 1;
16953 +    }
16954 +    
16955 +    no_fault();
16956 +    kmutex_unlock (&sctx->Lock);
16957 +
16958 +    return (res);
16959 +}
16960 +
16961 +static void
16962 +sys_putWordItem (ELAN3_CTXT *ctxt, int list, E3_Addr value)
16963 +{
16964 +    SYS_CTXT      *sctx = (SYS_CTXT *) ctxt->Private;
16965 +    SYS_SWAP_SPACE *sp   = sctx->Swap;
16966 +    SYS_WORD_ITEM  *item;
16967 +    label_t        ljp;
16968 +
16969 +    kmutex_lock (&sctx->Lock);
16970 +
16971 +    PRINTF2 (ctxt,DBG_SYSCALL, "sys_putWordItem: list=%x value=%x\n", list, value);
16972 +
16973 +    if (on_fault (&ljp))
16974 +    {
16975 +       no_fault();
16976 +       kmutex_unlock (&sctx->Lock);
16977 +       
16978 +       sys_exception (ctxt, EXCEPTION_SWAP_FAULT, list, (void *) NULL, null_valist);
16979 +       return;
16980 +    }
16981 +
16982 +    item = sys_getItem (sp, LIST_FREE_WORD);
16983 +
16984 +    PRINTF1 (ctxt, DBG_SYSCALL, "sys_putWordItem: item=%p\n", item);
16985 +
16986 +    if (item == NULL)
16987 +    {
16988 +       no_fault();
16989 +       kmutex_unlock (&sctx->Lock);
16990 +       
16991 +       sys_exception (ctxt, EXCEPTION_SWAP_FAILED, list, (void *) NULL, null_valist);
16992 +       return;
16993 +    }
16994 +    
16995 +    PRINTF2 (ctxt, DBG_SYSCALL, "sys_putWordItem: storing value=%08x at %p\n", value, &item->Value);
16996 +
16997 +    PRINTF2 (ctxt, DBG_SYSCALL, "sys_putWordItem: item=%p value=%08x\n", item, value);
16998 +
16999 +    suword_noerr ((E3_int32 *) &item->Value, value);                                           /* write "value" into item */
17000 +
17001 +    sys_putItemBack (sp, list, item);
17002 +
17003 +    no_fault();
17004 +    kmutex_unlock (&sctx->Lock);
17005 +}
17006 +
17007 +static void
17008 +sys_putBlockItem (ELAN3_CTXT *ctxt, int list, E3_uint32 *ptr)
17009 +{
17010 +    SYS_CTXT      *sctx = (SYS_CTXT *) ctxt->Private;
17011 +    SYS_SWAP_SPACE *sp   = sctx->Swap;
17012 +    SYS_BLOCK_ITEM *item;
17013 +    label_t        ljp;
17014 +    E3_uint32      *source;
17015 +    E3_uint32      *dest;
17016 +
17017 +    PRINTF2 (ctxt, DBG_SYSCALL, "sys_putBlockItem: list=%x ptr=%p\n", list, ptr);
17018 +
17019 +    kmutex_lock (&sctx->Lock);
17020 +    
17021 +    if (on_fault (&ljp))
17022 +    {
17023 +       no_fault();
17024 +       kmutex_unlock (&sctx->Lock);
17025 +       
17026 +       sys_exception (ctxt, EXCEPTION_SWAP_FAULT, list, (void *) NULL, null_valist);
17027 +       return;
17028 +    }
17029 +
17030 +    item = sys_getItem (sp, LIST_FREE_BLOCK);                  /* get an item from the freelist. */
17031 +
17032 +    if (item == NULL)
17033 +    {
17034 +       no_fault();
17035 +       kmutex_unlock (&sctx->Lock);
17036 +       
17037 +       sys_exception (ctxt, EXCEPTION_SWAP_FAILED, list, (void *) NULL, null_valist);
17038 +       return;
17039 +    }
17040 +
17041 +    /*
17042 +     * The block will have been read using 64 bit reads,  since we have
17043 +     * to write it to user memory using 32 bit writes, we need to perform
17044 +     * an endian swap on the Ultrasparc.
17045 +     */
17046 +    dest   = (E3_uint32 *) fuptr_noerr ((void **) &item->Pointer);
17047 +    source = (E3_uint32 *) ptr;
17048 +
17049 +    PRINTF2 (ctxt, DBG_SYSCALL, "sys_putBlockItem: item=%p dest=%p\n",item, dest);
17050 +    PRINTF4 (ctxt, DBG_SYSCALL, "                  %08x %08x %08x %08x\n",
17051 +           source[0^WordEndianFlip], source[1^WordEndianFlip], source[2^WordEndianFlip], source[3^WordEndianFlip]);
17052 +    PRINTF4 (ctxt, DBG_SYSCALL, "                  %08x %08x %08x %08x\n",
17053 +            source[4^WordEndianFlip], source[5^WordEndianFlip], source[6^WordEndianFlip], source[7^WordEndianFlip]);
17054 +
17055 +    suword_noerr ((E3_int32 *) &dest[7], (E3_int32) source[7^WordEndianFlip]);
17056 +    suword_noerr ((E3_int32 *) &dest[6], (E3_int32) source[6^WordEndianFlip]);
17057 +    suword_noerr ((E3_int32 *) &dest[5], (E3_int32) source[5^WordEndianFlip]);
17058 +    suword_noerr ((E3_int32 *) &dest[4], (E3_int32) source[4^WordEndianFlip]);
17059 +    suword_noerr ((E3_int32 *) &dest[3], (E3_int32) source[3^WordEndianFlip]);
17060 +    suword_noerr ((E3_int32 *) &dest[2], (E3_int32) source[2^WordEndianFlip]);
17061 +    suword_noerr ((E3_int32 *) &dest[1], (E3_int32) source[1^WordEndianFlip]);
17062 +    suword_noerr ((E3_int32 *) &dest[0], (E3_int32) source[0^WordEndianFlip]);
17063 +
17064 +    sys_putItemBack (sp, list, item);                          /* chain onto list of items. */
17065 +
17066 +    no_fault();
17067 +    kmutex_unlock (&sctx->Lock);
17068 +}
17069 +
17070 +static void
17071 +sys_freeWordItem (ELAN3_CTXT *ctxt, void *itemp)
17072 +{
17073 +    SYS_CTXT      *sctx = (SYS_CTXT *) ctxt->Private;
17074 +    SYS_SWAP_SPACE *sp   = sctx->Swap;
17075 +    label_t        ljp;
17076 +
17077 +    kmutex_lock (&sctx->Lock);
17078 +    
17079 +    if (on_fault (&ljp))
17080 +    {
17081 +       no_fault();
17082 +       kmutex_unlock (&sctx->Lock);
17083 +       
17084 +       sys_exception (ctxt, EXCEPTION_SWAP_FAULT, LIST_FREE_WORD, (void *) NULL, null_valist);
17085 +       return;
17086 +    }
17087 +
17088 +    sys_putItemBack (sp, LIST_FREE_WORD, itemp);
17089 +
17090 +    no_fault();
17091 +    kmutex_unlock (&sctx->Lock);
17092 +}
17093 +
17094 +static void
17095 +sys_freeBlockItem (ELAN3_CTXT *ctxt, void *itemp)
17096 +{
17097 +    SYS_CTXT       *sctx = (SYS_CTXT *) ctxt->Private;
17098 +    SYS_SWAP_SPACE *sp   = sctx->Swap;
17099 +    SYS_BLOCK_ITEM *item = (SYS_BLOCK_ITEM *)itemp;
17100 +    E3_uint32      *dest;
17101 +    label_t        ljp;
17102 +
17103 +    kmutex_lock (&sctx->Lock);
17104 +    
17105 +    if (on_fault (&ljp))
17106 +    {
17107 +       no_fault();
17108 +       kmutex_unlock (&sctx->Lock);
17109 +       
17110 +       sys_exception (ctxt, EXCEPTION_SWAP_FAULT, LIST_FREE_BLOCK, (void *) NULL, null_valist);
17111 +       return;
17112 +    }
17113 +#ifdef DEBUG_PRINTF
17114 +    dest = (E3_uint32 *) fuptr_noerr ((void **) &item->Pointer);
17115 +
17116 +    PRINTF2 (ctxt, DBG_SYSCALL, "sys_freeBlockItem: item=%p dest=%p\n", item, dest);
17117 +    PRINTF4 (ctxt, DBG_SYSCALL, "                  %08x %08x %08x %08x\n",
17118 +            fuword_noerr ((int *) &dest[0]), fuword_noerr ((int *) &dest[1]), 
17119 +            fuword_noerr ((int *) &dest[2]), fuword_noerr ((int *) &dest[3]));
17120 +    PRINTF4 (ctxt, DBG_SYSCALL, "                  %08x %08x %08x %08x\n",
17121 +            fuword_noerr ((int *) &dest[4]), fuword_noerr ((int *) &dest[5]),
17122 +            fuword_noerr ((int *) &dest[6]), fuword_noerr ((int *) &dest[7]));
17123 +#endif
17124 +
17125 +    sys_putItemBack (sp, LIST_FREE_BLOCK, itemp);
17126 +
17127 +    no_fault();
17128 +    kmutex_unlock (&sctx->Lock);
17129 +}
17130 +
17131 +static void
17132 +sys_putbackItem (ELAN3_CTXT *ctxt, int list, void *itemp)
17133 +{
17134 +    SYS_CTXT       *sctx = (SYS_CTXT *) ctxt->Private;
17135 +    SYS_SWAP_SPACE *sp   = sctx->Swap;
17136 +    label_t        ljp;
17137 +
17138 +    kmutex_lock (&sctx->Lock);
17139 +    
17140 +    if (on_fault (&ljp))
17141 +    {
17142 +       no_fault();
17143 +       kmutex_unlock (&sctx->Lock);
17144 +       
17145 +       sys_exception (ctxt, EXCEPTION_SWAP_FAULT, list, (void *) NULL, null_valist);
17146 +       return;
17147 +    }
17148 +
17149 +    sys_putItemFront (sp, list, itemp);
17150 +
17151 +    no_fault();
17152 +    kmutex_unlock (&sctx->Lock);
17153 +}
17154 +
17155 +static int
17156 +sys_countItems (ELAN3_CTXT *ctxt, int list)
17157 +{
17158 +    SYS_CTXT      *sctx  = (SYS_CTXT *) ctxt->Private;
17159 +    SYS_SWAP_SPACE *sp    = sctx->Swap;
17160 +    int                    count = 0;
17161 +    void          *item;
17162 +    label_t        ljb;
17163 +
17164 +    kmutex_lock (&sctx->Lock);
17165 +    
17166 +    if (on_fault (&ljb))
17167 +    {
17168 +       no_fault();
17169 +       kmutex_unlock (&sctx->Lock);
17170 +       sys_exception (ctxt, EXCEPTION_SWAP_FAULT, list, (void *) NULL, null_valist);
17171 +       return (0);
17172 +    }
17173 +
17174 +    for (item = (void *) fuptr_noerr ((void **) &sp->ItemListsHead[list]); 
17175 +        item != NULL;
17176 +        item = (void *) fuptr_noerr ((void **) item))
17177 +    {
17178 +       count++;
17179 +    }
17180 +
17181 +    no_fault();
17182 +    kmutex_unlock (&sctx->Lock);
17183 +
17184 +    return (count);
17185 +}
17186 +
17187 +
17188 +long sys_longTime;
17189 +long sys_shortTime;
17190 +int  sys_waitTicks;
17191 +int  sys_maxBackoff;
17192 +
17193 +#define SYS_LONG_TIME          MAX((hz * 5) / 1000, 1)         /* 5 ms */
17194 +#define SYS_SHORT_TIME         MAX((hz * 2) / 1000, 1)         /* 2 ms */
17195 +#define SYS_WAIT_TICKS         MAX((hz * 1) / 1000, 1)         /* 1 ms  - backoff granularity */
17196 +#define SYS_MAX_BACKOFF                MAX((hz * 5) / 1000, 1)         /* 5 ms  - max backoff for "nacked" packets*/
17197 +#define SYS_TIMEOUT_BACKOFF    MAX((hz * 10) / 1000, 1)        /* 10 ms - backoff for output timeout (point to point) */
17198 +#define SYS_BCAST_BACKOFF      MAX((hz * 50) / 1000, 1)        /* 50 ms - backoff for output timeout (broadcast) */
17199 +#define SYS_NETERR_BACKOFF     MAX((hz * 10) / 1000, 1)        /* 10 ms - delay for network error in dma data */
17200 +
17201 +static void
17202 +sys_backoffWait (ELAN3_CTXT *ctxt, int ticks)
17203 +{
17204 +    SYS_CTXT *sctx = (SYS_CTXT *) ctxt->Private;
17205 +    long      t;
17206 +
17207 +    spin_lock (&sctx->WaitLock);
17208 +
17209 +    t = lbolt - sctx->Time;
17210 +
17211 +    if (sys_longTime   == 0) sys_longTime   = SYS_LONG_TIME;
17212 +    if (sys_shortTime  == 0) sys_shortTime  = SYS_SHORT_TIME;
17213 +    if (sys_waitTicks  == 0) sys_waitTicks  = SYS_WAIT_TICKS;
17214 +    if (sys_maxBackoff == 0) sys_maxBackoff = SYS_MAX_BACKOFF;
17215 +
17216 +    if (t > sys_longTime)                                      /* It's a long time since the last trap */
17217 +       sctx->Backoff = 0;                                      /* so set the backoff back down to 0 */
17218 +
17219 +    if (ticks)
17220 +    {
17221 +       PRINTF2 (ctxt, DBG_DPROC, "sys_backoffWait : Waiting - %d ticks [%lx]\n", ticks, t);
17222 +       kcondvar_timedwait (&sctx->NetworkErrorWait, &sctx->WaitLock, NULL, lbolt + ticks);
17223 +    }
17224 +    else if (sctx->Armed)
17225 +    {
17226 +       if (t < sys_shortTime)                                  /* It's been a short time since the last */
17227 +       {                                                       /* trap, so increase the backoff */
17228 +           sctx->Backoff++;
17229 +           
17230 +           if (sctx->Backoff > sys_maxBackoff)
17231 +               sctx->Backoff = sys_maxBackoff;
17232 +       }
17233 +
17234 +       PRINTF2 (ctxt, DBG_DPROC, "sys_backoffWait : Waiting - %d [%lx]\n", sctx->Backoff, t);
17235 +
17236 +       if (sctx->Backoff)
17237 +           kcondvar_timedwaitsig (&sctx->NetworkErrorWait, &sctx->WaitLock, NULL, lbolt + sctx->Backoff * sys_waitTicks);
17238 +
17239 +       sctx->Armed = 0;
17240 +    }
17241 +    else
17242 +    {
17243 +       PRINTF1 (ctxt, DBG_DPROC, "sys_backoffWait : Not Waiting - %d\n", sctx->Backoff);
17244 +
17245 +    }
17246 +    sctx->Time = lbolt;
17247 +
17248 +    spin_unlock (&sctx->WaitLock);
17249 +}
17250 +
17251 +static int
17252 +trapSize (int proc)
17253 +{
17254 +    switch (proc)
17255 +    {
17256 +    case DMA_PROC:     return (sizeof (DMA_TRAP));
17257 +    case THREAD_PROC:  return (sizeof (THREAD_TRAP));
17258 +    case COMMAND_PROC: return (sizeof (COMMAND_TRAP));
17259 +    case INPUT_PROC:   return (sizeof (INPUT_TRAP));
17260 +    default:           return (0);
17261 +    }
17262 +}
17263 +
17264 +static int
17265 +sys_exception (ELAN3_CTXT *ctxt, int type, int proc, void *trapp, va_list ap)
17266 +{
17267 +    SYS_CTXT *sctx  = (SYS_CTXT *) ctxt->Private;
17268 +    int              res;
17269 +
17270 +    PRINTF2 (ctxt, DBG_SYSCALL, "sys_exception: type %d proc %d\n", type, proc);
17271 +
17272 +    switch (type)
17273 +    {
17274 +    case EXCEPTION_INVALID_ADDR:
17275 +    {
17276 +       E3_FaultSave_BE *faultSave = va_arg (ap, E3_FaultSave_BE *);
17277 +       int              res       = va_arg (ap, int);
17278 +       
17279 +       sys_addException (sctx, type, proc, trapp, trapSize(proc), faultSave, res, 0);
17280 +       break;
17281 +    }
17282 +    
17283 +    case EXCEPTION_UNIMP_INSTR:
17284 +    {
17285 +       E3_uint32 instr = va_arg (ap, E3_uint32);
17286 +       
17287 +       sys_addException (sctx, type, proc, trapp, trapSize(proc), NULL, 0, instr);
17288 +       break;
17289 +    }
17290 +    
17291 +    case EXCEPTION_INVALID_PROCESS:
17292 +    {
17293 +       E3_uint32 vproc = va_arg (ap, E3_uint32);
17294 +       int       res  = va_arg (ap, int);
17295 +
17296 +       switch (proc)
17297 +       {
17298 +       case DMA_PROC:
17299 +           if (sctx->Flags & ELAN3_SYS_FLAG_DMA_BADVP)
17300 +           {
17301 +               DMA_TRAP *trap = (DMA_TRAP *) trapp;
17302 +
17303 +               if (trap->Desc.s.dma_direction != DMA_WRITE)
17304 +                   trap->Desc.s.dma_srcEvent = trap->Desc.s.dma_destEvent;
17305 +
17306 +               trap->Desc.s.dma_direction       = DMA_WRITE;
17307 +               trap->Desc.s.dma_size            = 0;
17308 +               trap->Desc.s.dma_source          = (E3_Addr) 0;
17309 +               trap->Desc.s.dma_dest            = (E3_Addr) 0;
17310 +               trap->Desc.s.dma_destEvent       = (E3_Addr) 0;
17311 +               trap->Desc.s.dma_destCookieVProc = 0;
17312 +               trap->Desc.s.dma_srcCookieVProc  = 0;
17313 +               
17314 +               return (OP_IGNORE);
17315 +           }
17316 +           break;
17317 +
17318 +       case THREAD_PROC:
17319 +           if (sctx->Flags & ELAN3_SYS_FLAG_THREAD_BADVP)
17320 +           {
17321 +               THREAD_TRAP *trap = (THREAD_TRAP *) trapp;
17322 +
17323 +               trap->TrapBits.s.PacketAckValue = E3_PAckError;
17324 +               
17325 +               return (OP_IGNORE);
17326 +           }
17327 +           break;
17328 +       }
17329 +           
17330 +       sys_addException (sctx, type, proc, trapp, trapSize(proc), NULL, res, vproc);
17331 +       break;
17332 +    }
17333 +    
17334 +    case EXCEPTION_FAULTED:
17335 +    {
17336 +       E3_Addr addr = va_arg (ap, E3_Addr);
17337 +
17338 +       sys_addException (sctx, type, proc, trapp, trapSize(proc), NULL, 0, addr);
17339 +       break;
17340 +    }
17341 +    
17342 +    case EXCEPTION_QUEUE_OVERFLOW:
17343 +    {
17344 +       E3_FaultSave_BE *faultSave = va_arg (ap, E3_FaultSave_BE *);
17345 +       int              trapType  = va_arg (ap, int);
17346 +       
17347 +       sys_addException (sctx, type, proc, trapp, trapSize(proc), faultSave, 0, trapType);
17348 +       break;
17349 +    }
17350 +    
17351 +    case EXCEPTION_COMMAND_OVERFLOW:
17352 +    {
17353 +       int count = va_arg (ap, int);
17354 +       
17355 +       sys_addException (sctx, type, proc, trapp, trapSize(proc), NULL, 0, count);
17356 +       break;
17357 +    }
17358 +    
17359 +    case EXCEPTION_CHAINED_EVENT:
17360 +    {
17361 +       E3_Addr addr = va_arg (ap, E3_Addr);
17362 +       
17363 +       sys_addException (sctx, type, proc, trapp, trapSize(proc), NULL, 0, addr);
17364 +       break;
17365 +    }
17366 +    
17367 +    case EXCEPTION_DMA_RETRY_FAIL:
17368 +    case EXCEPTION_PACKET_TIMEOUT:
17369 +       if (proc != DMA_PROC)
17370 +           sys_backoffWait (ctxt, SYS_TIMEOUT_BACKOFF);
17371 +       else
17372 +       {
17373 +           DMA_TRAP *trap = (DMA_TRAP *) trapp;
17374 +           
17375 +           if (sctx->Flags & ELAN3_SYS_FLAG_DMAFAIL)
17376 +           {
17377 +               E3_BlockCopyEvent *event;
17378 +
17379 +               if (trap->Desc.s.dma_direction != DMA_WRITE)
17380 +                   trap->Desc.s.dma_srcEvent = trap->Desc.s.dma_destEvent;
17381 +
17382 +               /* change the source word to be E3_EVENT_FAILED */
17383 +               if ((event = (E3_BlockCopyEvent *) elan3mmu_mainaddr (ctxt->Elan3mmu, trap->Desc.s.dma_srcEvent)) == NULL)
17384 +               {
17385 +                   sys_addException (sctx, type, proc, trapp, trapSize(proc), NULL, 0, 0);
17386 +                   break;
17387 +               }
17388 +
17389 +               suword (&event->ev_Source, E3_EVENT_FAILED);
17390 +               wmb(); mmiob();
17391 +               
17392 +               trap->Desc.s.dma_direction       = DMA_WRITE;
17393 +               trap->Desc.s.dma_size            = 0;
17394 +               trap->Desc.s.dma_source          = (E3_Addr) 0;
17395 +               trap->Desc.s.dma_dest            = (E3_Addr) 0;
17396 +               trap->Desc.s.dma_destEvent       = (E3_Addr) 0;
17397 +               trap->Desc.s.dma_destCookieVProc = 0;
17398 +               trap->Desc.s.dma_srcCookieVProc  = 0;
17399 +               
17400 +               return (OP_IGNORE);
17401 +           }
17402 +
17403 +           if (type == EXCEPTION_DMA_RETRY_FAIL)
17404 +               sys_backoffWait (ctxt, 0);
17405 +           else
17406 +           {
17407 +               ELAN_LOCATION location;
17408 +
17409 +               krwlock_read (&ctxt->VpLock);
17410 +               location = ProcessToLocation (ctxt, NULL, trap->Desc.s.dma_direction == DMA_WRITE ? 
17411 +                                             trap->Desc.s.dma_destVProc : trap->Desc.s.dma_srcVProc, NULL);
17412 +               krwlock_done (&ctxt->VpLock);
17413 +               
17414 +               sys_backoffWait (ctxt, location.loc_node == ELAN3_INVALID_NODE ? SYS_BCAST_BACKOFF : SYS_TIMEOUT_BACKOFF);
17415 +           }
17416 +       }
17417 +       return (OP_IGNORE);
17418 +       
17419 +    case EXCEPTION_NETWORK_ERROR:
17420 +    {
17421 +       INPUT_TRAP       *trap  = (INPUT_TRAP *) trapp;
17422 +       NETERR_RESOLVER **rvpp  = va_arg (ap, NETERR_RESOLVER **);
17423 +
17424 +       ASSERT (trap->State == CTXT_STATE_NETWORK_ERROR);
17425 +
17426 +       if (! (sctx->Flags & ELAN3_SYS_FLAG_NETERR) && (trap->DmaIdentifyTransaction || trap->ThreadIdentifyTransaction))
17427 +       {
17428 +           if ((*rvpp) != (NETERR_RESOLVER *) NULL)
17429 +               res = (*rvpp)->Status;
17430 +           else if ((res = QueueNetworkErrorResolver (ctxt, trap, rvpp)) == ESUCCESS)
17431 +           {
17432 +               /* Successfully queued the network error resolver */
17433 +               return (OP_HANDLED);
17434 +           }
17435 +
17436 +           /* network error resolution has failed - either a bad cookie or */
17437 +           /* an rpc error has occured */
17438 +           sys_addException (sctx, type, proc, trapp, trapSize(proc), NULL, res, 0);
17439 +       }
17440 +       else
17441 +       {
17442 +           /* Must be an overlaped dma packet. Must wait long enough to
17443 +            * ensure that the sending dma'er has tried to send the next
17444 +            * packet and had it discarded. In the real world this should
17445 +            * be greater than an output timeout. (About 8mSec) */
17446 +           
17447 +           sys_backoffWait (ctxt, SYS_NETERR_BACKOFF);
17448 +           
17449 +           /* set this inputter state to be ok, since we've been called 
17450 +            * by the lwp it will lower the context filter for us, so 
17451 +            * re-enabling the inputter,  note we don't need to execute
17452 +            * any of the packet since the dma process will re-transmit
17453 +            * it after receiving a nack for the next packet */
17454 +           trap->State = CTXT_STATE_OK;
17455 +           
17456 +           return (OP_HANDLED);
17457 +       }
17458 +       break;
17459 +    }
17460 +    
17461 +    default:
17462 +       sys_addException (sctx, type, proc, trapp, trapSize(proc), NULL, 0, 0);
17463 +       break;
17464 +    }
17465 +    
17466 +    if (type != EXCEPTION_DEBUG)
17467 +#ifdef LINUX
17468 +#ifdef NO_NPTL
17469 +       psignal (CURPROC()->p_opptr, sctx->signal);
17470 +#else
17471 +       psignal (CURPROC()->parent, sctx->signal);
17472 +#endif
17473 +#else
17474 +       psignal (CURPROC(), sctx->signal);
17475 +#endif
17476 +    return (OP_HANDLED);
17477 +}
17478 +
17479 +static int
17480 +sys_event (ELAN3_CTXT *ctxt, E3_uint32 cookie, int flag)
17481 +{
17482 +    SYS_CTXT *sctx = (SYS_CTXT *) ctxt->Private;
17483 +
17484 +    cookie_fire_cookie (sctx->Table, cookie);
17485 +
17486 +    return (OP_HANDLED); 
17487 +}
17488 +
17489 +static void
17490 +sys_swapin (ELAN3_CTXT *ctxt)
17491 +{
17492 +    PRINTF0 (ctxt, DBG_SYSCALL, "sys_swapin\n");
17493 +}
17494 +
17495 +static void
17496 +sys_swapout (ELAN3_CTXT *ctxt)
17497 +{
17498 +    PRINTF0 (ctxt, DBG_SYSCALL, "sys_swapout\n");
17499 +}
17500 +
17501 +static void
17502 +sys_freePrivate (ELAN3_CTXT *ctxt)
17503 +{
17504 +    SYS_CTXT *sctx = (SYS_CTXT *) ctxt->Private;
17505 +
17506 +    cookie_free_table (sctx->Table);
17507 +
17508 +    kmutex_destroy (&sctx->Lock);
17509 +    spin_lock_destroy (&sctx->WaitLock);
17510 +    kcondvar_destroy (&sctx->NetworkErrorWait);
17511 +
17512 +    KMEM_FREE (sctx, sizeof (SYS_CTXT));
17513 +    ctxt->Private = NULL;
17514 +}
17515 +
17516 +static int
17517 +sys_checkThisDma (ELAN3_CTXT *ctxt, NETERR_FIXUP *nef, E3_DMA *dma)
17518 +{
17519 +    E3_DmaType type;
17520 +    E3_uint32  cookie;
17521 +    E3_uint32  cvproc;
17522 +    int               ignore;
17523 +    int               match;
17524 +
17525 +    type.type = fuword_noerr ((int *) &dma->dma_type);
17526 +
17527 +    if (type.s.direction == DMA_WRITE)
17528 +    {
17529 +       cookie = fuword_noerr ((int *) &dma->dma_srcCookieVProc);
17530 +       cvproc = fuword_noerr ((int *) &dma->dma_destCookieVProc);
17531 +    }
17532 +    else
17533 +    {
17534 +       cookie = fuword_noerr ((int *) &dma->dma_destCookieVProc);
17535 +       cvproc = fuword_noerr ((int *) &dma->dma_srcCookieVProc);
17536 +    }
17537 +
17538 +    PRINTF5 (ctxt, DBG_NETERR, "sys_checkThisDma: dir = %d cookie = %08x cvproc = %08x CookieVProc %08x DstProcess %04x\n",
17539 +            type.s.direction, cookie, cvproc, nef->Message.CookieVProc, nef->Message.DstProcess);
17540 +
17541 +    /* A DMA matches a network errror fixup if it's going to the right place (or is a broadcast)
17542 +     * and the approriate cookie matches, except that we ignore DMA's which don't have a destEvent
17543 +     * since they don't have any atomic behaviour (though they still send the identify) */
17544 +
17545 +    ignore = (type.s.direction == DMA_WRITE && cookie == 0 &&
17546 +             fuword_noerr ((int *) &dma->dma_destEvent) == 0);
17547 +    match  = (nef->Message.CookieVProc == cookie &&
17548 +             (nef->Message.DstProcess == (cvproc & DMA_PROCESS_MASK) || nef->Message.WaitForEop));
17549 +
17550 +    PRINTF2 (ctxt, DBG_NETERR, "  -> %s %s\n", ignore ? "ignore" : match ? "matched" : "not-matched", nef->Message.WaitForEop ? "wait for eop" : "");
17551 +
17552 +    if (match && !ignore && !nef->Message.WaitForEop)
17553 +    {
17554 +       PRINTF0 (ctxt, DBG_NETERR, "sys_checkThisDma: nuking the dma\n");
17555 +
17556 +       /* NOTE - we access the dma descriptor backwards since it could exist in sdram */
17557 +       if (type.s.direction != DMA_WRITE)
17558 +           suword_noerr ((int *) &dma->dma_srcEvent, 0);
17559 +
17560 +       suword_noerr ((int *) &dma->dma_destEvent, 0);
17561 +       suword_noerr ((int *) &dma->dma_dest,      0);
17562 +       suword_noerr ((int *) &dma->dma_source,    0);
17563 +       suword_noerr ((int *) &dma->dma_size,      0);
17564 +
17565 +       if (type.s.direction != DMA_WRITE)
17566 +           suword_noerr ((int *) &dma->dma_type, fuword_noerr ((int *) &dma->dma_type) & E3_DMA_CONTEXT_MASK);
17567 +
17568 +       wmb(); mmiob();
17569 +    }
17570 +
17571 +    return (match && !ignore);
17572 +}
17573 +
17574 +static int
17575 +sys_fixupNetworkError (ELAN3_CTXT *ctxt, NETERR_FIXUP *nef)
17576 +{
17577 +    SYS_CTXT       *sctx    = (SYS_CTXT *) ctxt->Private;
17578 +    SYS_SWAP_SPACE *sp      = sctx->Swap;
17579 +    int                    matched = 0;
17580 +    SYS_WORD_ITEM  *wordp;
17581 +    SYS_BLOCK_ITEM *blockp;
17582 +    label_t        ljb;
17583 +    int                    res;
17584 +
17585 +    PRINTF3 (ctxt, DBG_NETERR, "sys_fixupnetworkError %08x %08x %08x\n", 
17586 +            nef->Message.CookieAddr, nef->Message.CookieVProc, nef->Message.NextCookie);
17587 +
17588 +    if (nef->Message.CookieAddr == (E3_Addr) 0)                        /* It's a DMA which requires fixing up */
17589 +    {
17590 +       kmutex_lock (&sctx->Lock);
17591 +
17592 +       if (on_fault (&ljb))
17593 +           res = EFAULT;
17594 +       else
17595 +       {
17596 +           /* scan the dma ptr list */
17597 +           for (wordp = (SYS_WORD_ITEM *) fuptr_noerr ((void **) &sp->ItemListsHead[LIST_DMA_PTR]);
17598 +                wordp != NULL; 
17599 +                wordp = (SYS_WORD_ITEM *) fuptr_noerr ((void **) &wordp->Next))
17600 +           {
17601 +               E3_uint32 value = fuword_noerr ((int *) &wordp->Value);
17602 +               E3_DMA    *dma  = (E3_DMA *) elan3mmu_mainaddr (ctxt->Elan3mmu, value);
17603 +
17604 +               PRINTF3 (ctxt, DBG_NETERR, "sys_fixupnetworkError: check block item %p Value %08x dma %p\n", wordp, value, dma);
17605 +
17606 +               matched += sys_checkThisDma (ctxt, nef, dma);
17607 +           }
17608 +       
17609 +           /* scan the dma desc list */
17610 +           for (blockp = (SYS_BLOCK_ITEM *) fuptr_noerr ((void **) &sp->ItemListsHead[LIST_DMA_DESC]);
17611 +                blockp != NULL; 
17612 +                blockp = (SYS_BLOCK_ITEM *) fuptr_noerr ((void **) &blockp->Next))
17613 +           {
17614 +               E3_DMA *dma = (E3_DMA *) fuptr_noerr ((void *) &blockp->Pointer);
17615 +               
17616 +               PRINTF2 (ctxt, DBG_NETERR, "sys_fixupnetworkError: check block item %p Pointer %p\n", blockp, dma);
17617 +
17618 +               matched += sys_checkThisDma (ctxt, nef, dma);
17619 +           }
17620 +           
17621 +           /* If we've still not found it, then check the command port item */
17622 +           /* it MUST be present as a command waiting to be executed, as */
17623 +           /* otherwise it could have already happened and we will claim to */
17624 +           /* have found it, but not realy */
17625 +           if (ctxt->CommandPortItem != NULL)
17626 +           {
17627 +               E3_DMA *dma = (E3_DMA *) fuptr_noerr ((void *) &((SYS_BLOCK_ITEM *) ctxt->CommandPortItem)->Pointer);
17628 +
17629 +               if (sys_checkThisDma (ctxt, nef, dma))
17630 +               {
17631 +                   printk ("!!! it's the command port item - need to ensure that the command exists\n");
17632 +                   matched++;
17633 +               }
17634 +           }
17635 +
17636 +           res = matched ? ESUCCESS : ESRCH;
17637 +       }
17638 +       no_fault();
17639 +       kmutex_unlock (&sctx->Lock);
17640 +
17641 +       if (matched > 1)
17642 +           ElanException (ctxt, EXCEPTION_COOKIE_ERROR, DMA_PROC, NULL, NULL, nef->Message.CookieVProc);
17643 +    }
17644 +    else                                                       /* It's a thread which requires fixing up */
17645 +    {
17646 +       E3_int32  *cookiePtr = (E3_int32 *) elan3mmu_mainaddr (ctxt->Elan3mmu, nef->Message.CookieAddr);
17647 +       E3_uint32  curval    = fuword_noerr (cookiePtr);
17648 +
17649 +       if (curval == nef->Message.CookieVProc)         /* thread doesn't think it's been done */
17650 +       {
17651 +           if (! nef->Message.WaitForEop)
17652 +           {
17653 +               suword_noerr (cookiePtr, nef->Message.NextCookie);
17654 +               mb(); mmiob();
17655 +           }
17656 +           
17657 +           res = ESUCCESS;
17658 +       }
17659 +       else                                                    /* thread thinks that it's been executed */
17660 +       {
17661 +           res = ESRCH;
17662 +       }
17663 +    }
17664 +    
17665 +    CompleteNetworkErrorFixup (ctxt, nef, res);
17666 +
17667 +    return (OP_HANDLED);
17668 +}
17669 +
17670 +
17671 +static int
17672 +sys_startFaultCheck (ELAN3_CTXT *ctxt)
17673 +{
17674 +    return (0);
17675 +}
17676 +
17677 +static void
17678 +sys_endFaultCheck (ELAN3_CTXT *ctxt)
17679 +{
17680 +    wmb();
17681 +}
17682 +
17683 +static E3_uint8
17684 +sys_load8 (ELAN3_CTXT *ctxt, E3_Addr addr)
17685 +{
17686 +    E3_uint8 *maddr = (E3_uint8 *) elan3mmu_mainaddr (ctxt->Elan3mmu, addr);
17687 +
17688 +    return (fubyte_noerr (maddr));
17689 +}
17690 +
17691 +static void
17692 +sys_store8 (ELAN3_CTXT *ctxt, E3_Addr addr, E3_uint8 val)
17693 +{
17694 +    E3_uint8 *maddr = (E3_uint8 *) elan3mmu_mainaddr (ctxt->Elan3mmu, addr);
17695 +
17696 +    subyte_noerr (maddr, val);
17697 +    wmb(); mmiob();
17698 +}
17699 +
17700 +static E3_uint16
17701 +sys_load16 (ELAN3_CTXT *ctxt, E3_Addr addr)
17702 +{
17703 +    E3_uint16 *maddr = (E3_uint16 *) elan3mmu_mainaddr (ctxt->Elan3mmu, addr);
17704 +
17705 +    return (fusword_noerr (maddr));
17706 +}
17707 +
17708 +static void
17709 +sys_store16 (ELAN3_CTXT *ctxt, E3_Addr addr, E3_uint16 val)
17710 +{
17711 +    E3_uint16 *maddr = (E3_uint16 *) elan3mmu_mainaddr (ctxt->Elan3mmu, addr);
17712 +
17713 +    susword_noerr (maddr, val);
17714 +    wmb(); mmiob();
17715 +}
17716 +
17717 +static E3_uint32
17718 +sys_load32 (ELAN3_CTXT *ctxt, E3_Addr addr)
17719 +{
17720 +    E3_uint32 *maddr = (E3_uint32 *) elan3mmu_mainaddr (ctxt->Elan3mmu, addr);
17721 +
17722 +    return (fuword_noerr (maddr));
17723 +}
17724 +
17725 +static void
17726 +sys_store32 (ELAN3_CTXT *ctxt, E3_Addr addr, E3_uint32 val)
17727 +{
17728 +    E3_uint32 *maddr = (E3_uint32 *) elan3mmu_mainaddr (ctxt->Elan3mmu, addr);
17729 +
17730 +    suword_noerr (maddr, val);
17731 +    wmb(); mmiob();
17732 +}
17733 +
17734 +static E3_uint64
17735 +sys_load64 (ELAN3_CTXT *ctxt, E3_Addr addr)
17736 +{
17737 +    E3_uint64 *maddr = (E3_uint64 *) elan3mmu_mainaddr (ctxt->Elan3mmu, addr);
17738 +
17739 +    return (fulonglong_noerr ((long long *) maddr));
17740 +}
17741 +
17742 +static void
17743 +sys_store64 (ELAN3_CTXT *ctxt, E3_Addr addr, E3_uint64 val)
17744 +{
17745 +    E3_uint64 *maddr = (E3_uint64 *) elan3mmu_mainaddr (ctxt->Elan3mmu, addr);
17746 +
17747 +    sulonglong_noerr ((long long *) maddr, val);
17748 +    wmb(); mmiob();
17749 +}
17750 +
17751 +
17752 +void
17753 +sys_addException (SYS_CTXT *sctx, int type, int proc, caddr_t trapp, int size,
17754 +                 E3_FaultSave_BE *faultSave, u_long res, u_long value)
17755 +{
17756 +    SYS_EXCEPTION      *ex_ptr;
17757 +    int                        front;
17758 +    int                        back;
17759 +    int                        count;
17760 +    label_t            ljp;
17761 +
17762 +    PRINTF4 (DBG_DEVICE, DBG_FN, "sys_addException: type %d proc %d res %ld value %ld\n",
17763 +            type, proc, res, value);
17764 +
17765 +    KMEM_ZALLOC (ex_ptr, SYS_EXCEPTION *, sizeof  (SYS_EXCEPTION), TRUE);
17766 +
17767 +    if (ex_ptr != NULL)
17768 +    {
17769 +       bzero ((caddr_t) ex_ptr, sizeof (SYS_EXCEPTION));
17770 +
17771 +       ex_ptr->Type  = type;
17772 +       ex_ptr->Proc  = proc;
17773 +       ex_ptr->Res   = res;
17774 +       ex_ptr->Value = value;
17775 +       
17776 +       if (trapp && size)
17777 +           bcopy (trapp, (caddr_t) &ex_ptr->Union, size);
17778 +       if (faultSave)
17779 +           bcopy ((caddr_t) faultSave, (caddr_t) &ex_ptr->FaultArea, sizeof (E3_FaultSave_BE));
17780 +    }
17781 +
17782 +    kmutex_lock (&sctx->Lock);
17783 +    if (! on_fault (&ljp))
17784 +    {
17785 +       front = fuword_noerr (&sctx->Exceptions->Front);
17786 +       back  = fuword_noerr (&sctx->Exceptions->Back);
17787 +       count = fuword_noerr (&sctx->Exceptions->Count);
17788 +
17789 +       if (count <= 0 || front < 0 || back < 0 || front >= count || back >= count)
17790 +           suword_noerr (&sctx->Exceptions->Overflow, fuword_noerr (&sctx->Exceptions->Overflow) + 1);
17791 +       else if (((front+1) % count ) == back)
17792 +           suword_noerr (&sctx->Exceptions->Overflow, fuword_noerr (&sctx->Exceptions->Overflow) + 1);
17793 +       else
17794 +       {
17795 +           if (ex_ptr != NULL)
17796 +               copyout_noerr ((caddr_t) ex_ptr, (caddr_t) &sctx->Exceptions->Exceptions[front], sizeof (SYS_EXCEPTION));
17797 +           else
17798 +           {
17799 +               suword_noerr (&sctx->Exceptions->Exceptions[front].Type, EXCEPTION_ENOMEM);
17800 +               suword_noerr (&sctx->Exceptions->Exceptions[front].Proc, 0);
17801 +           }
17802 +           suword_noerr (&sctx->Exceptions->Front, (front + 1) % count);
17803 +       }
17804 +
17805 +       /* always reset the magic number in case it's been overwritten */
17806 +       /* so that 'edb' can find the exception page in the core file */
17807 +       suword_noerr (&sctx->Exceptions->Magic, SYS_EXCEPTION_MAGIC);
17808 +    }
17809 +    no_fault();
17810 +    kmutex_unlock (&sctx->Lock);
17811 +    
17812 +    if (ex_ptr != NULL)
17813 +       KMEM_FREE (ex_ptr, sizeof  (SYS_EXCEPTION));
17814 +}
17815 +
17816 +int
17817 +sys_getException (SYS_CTXT *sctx, SYS_EXCEPTION *ex)
17818 +{
17819 +    int     front;
17820 +    int     back;
17821 +    int     count;
17822 +    int     res;
17823 +    label_t ljp;
17824 +
17825 +    if (sctx->Exceptions == NULL)
17826 +       return (EINVAL);
17827 +
17828 +    kmutex_lock (&sctx->Lock);
17829 +    if (on_fault (&ljp))
17830 +    {
17831 +       no_fault();
17832 +       kmutex_unlock (&sctx->Lock);
17833 +       return (EFAULT);
17834 +    }
17835 +    
17836 +    front = fuword_noerr (&sctx->Exceptions->Front);
17837 +    back  = fuword_noerr (&sctx->Exceptions->Back);
17838 +    count = fuword_noerr (&sctx->Exceptions->Count);
17839 +
17840 +    if (count <= 0 || front < 0 || back < 0 || front >= count || back >= count || back == front)
17841 +       res = EINVAL;
17842 +    else
17843 +    {
17844 +       copyin_noerr ((caddr_t) &sctx->Exceptions->Exceptions[back], (caddr_t) ex, sizeof (SYS_EXCEPTION));
17845 +       suword_noerr (&sctx->Exceptions->Back, (back+1) % count);
17846 +
17847 +       res = ESUCCESS;
17848 +    }
17849 +    no_fault();
17850 +    kmutex_unlock (&sctx->Lock);
17851 +
17852 +    return (res);
17853 +}
17854 +
17855 +
17856 +/*
17857 + * Local variables:
17858 + * c-file-style: "stroustrup"
17859 + * End:
17860 + */
17861 Index: linux-2.4.21/drivers/net/qsnet/elan3/eventcookie.c
17862 ===================================================================
17863 --- linux-2.4.21.orig/drivers/net/qsnet/elan3/eventcookie.c     2004-02-23 16:02:56.000000000 -0500
17864 +++ linux-2.4.21/drivers/net/qsnet/elan3/eventcookie.c  2005-06-01 23:12:54.585441232 -0400
17865 @@ -0,0 +1,324 @@
17866 +/*
17867 + *    Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd.
17868 + * 
17869 + *    For licensing information please see the supplied COPYING file
17870 + *
17871 + */
17872 +
17873 +#ident "@(#)$Id: eventcookie.c,v 1.7 2003/08/13 10:03:03 fabien Exp $"
17874 +/*      $Source: /cvs/master/quadrics/elan3mod/elan3/os/eventcookie.c,v $*/
17875 +
17876 +#include <qsnet/kernel.h>
17877 +#include <elan3/elanregs.h>
17878 +#include <elan3/elandev.h>
17879 +#include <elan3/elanvp.h>
17880 +#include <elan3/elan3mmu.h>
17881 +#include <elan3/elanctxt.h>
17882 +#include <elan3/elandebug.h>
17883 +#include <elan3/urom_addrs.h>
17884 +#include <elan3/thread.h>
17885 +#include <elan3/vmseg.h>
17886 +
17887 +static EVENT_COOKIE_TABLE *cookie_tables;
17888 +static spinlock_t         cookie_table_lock;
17889 +
17890 +/*
17891 + * cookie_drop_entry:
17892 + *   drop the reference to a cookie held 
17893 + *   by the cookie table
17894 + */
17895 +static void
17896 +cookie_drop_entry (EVENT_COOKIE_ENTRY *ent)
17897 +{
17898 +    unsigned long flags;
17899 +
17900 +    spin_lock_irqsave (&ent->ent_lock, flags);
17901 +    if (--ent->ent_ref != 0)
17902 +    {
17903 +       ent->ent_fired = ent->ent_cookie;
17904 +       kcondvar_wakeupall (&ent->ent_wait, &ent->ent_lock);
17905 +
17906 +       spin_unlock_irqrestore (&ent->ent_lock, flags);
17907 +    }
17908 +    else
17909 +    {
17910 +       spin_unlock_irqrestore (&ent->ent_lock, flags);
17911 +
17912 +       spin_lock_destroy (&ent->ent_lock);
17913 +       kcondvar_destroy (&ent->ent_wait);
17914 +
17915 +       KMEM_FREE (ent, sizeof (EVENT_COOKIE_ENTRY));
17916 +    }
17917 +}
17918 +
17919 +void
17920 +cookie_init()
17921 +{
17922 +    spin_lock_init (&cookie_table_lock);
17923 +}
17924 +
17925 +void
17926 +cookie_fini()
17927 +{
17928 +    spin_lock_destroy (&cookie_table_lock);
17929 +}
17930 +
17931 +EVENT_COOKIE_TABLE *
17932 +cookie_alloc_table (unsigned long task, unsigned long handle)
17933 +{
17934 +    EVENT_COOKIE_TABLE *tbl, *ntbl;
17935 +
17936 +    KMEM_ZALLOC (ntbl, EVENT_COOKIE_TABLE *, sizeof (EVENT_COOKIE_TABLE), TRUE);
17937 +
17938 +    if (ntbl == NULL)
17939 +       return (NULL);
17940 +
17941 +    spin_lock (&cookie_table_lock);
17942 +    
17943 +    for (tbl = cookie_tables; tbl; tbl = tbl->tbl_next)
17944 +       if (tbl->tbl_task == task && tbl->tbl_handle == handle)
17945 +           break;
17946 +    
17947 +    if (tbl != NULL)
17948 +       tbl->tbl_ref++;
17949 +    else
17950 +    {
17951 +       spin_lock_init (&ntbl->tbl_lock);
17952 +
17953 +       ntbl->tbl_task    = task;
17954 +       ntbl->tbl_handle  = handle;
17955 +       ntbl->tbl_ref     = 1;
17956 +       ntbl->tbl_entries = NULL;
17957 +
17958 +       if ((ntbl->tbl_next = cookie_tables) != NULL)
17959 +           cookie_tables->tbl_prev = ntbl;
17960 +       cookie_tables = ntbl;
17961 +       ntbl->tbl_prev = NULL;
17962 +    }
17963 +    spin_unlock (&cookie_table_lock);
17964 +
17965 +    if (tbl == NULL)
17966 +       return (ntbl);
17967 +    else
17968 +    {
17969 +       KMEM_FREE (ntbl, sizeof (EVENT_COOKIE_TABLE));
17970 +       return (tbl);
17971 +    }    
17972 +}
17973 +
17974 +void
17975 +cookie_free_table (EVENT_COOKIE_TABLE *tbl)
17976 +{
17977 +    EVENT_COOKIE_ENTRY *ent;
17978 +
17979 +    spin_lock (&cookie_table_lock);
17980 +    if (tbl->tbl_ref > 1)
17981 +    {
17982 +       tbl->tbl_ref--;
17983 +       spin_unlock (&cookie_table_lock);
17984 +       return;
17985 +    }
17986 +    
17987 +    if (tbl->tbl_prev)
17988 +       tbl->tbl_prev->tbl_next = tbl->tbl_next;
17989 +    else
17990 +       cookie_tables = tbl->tbl_next;
17991 +    if (tbl->tbl_next)
17992 +       tbl->tbl_next->tbl_prev = tbl->tbl_prev;
17993 +    
17994 +    spin_unlock (&cookie_table_lock);
17995 +    
17996 +    /* NOTE - table no longer visible to other threads
17997 +     *        no need to aquire tbl_lock */
17998 +    while ((ent = tbl->tbl_entries) != NULL)
17999 +    {
18000 +       if ((tbl->tbl_entries = ent->ent_next) != NULL)
18001 +           ent->ent_next->ent_prev = NULL;
18002 +       
18003 +       cookie_drop_entry (ent);
18004 +    }
18005 +    spin_lock_destroy (&tbl->tbl_lock);
18006 +
18007 +    KMEM_FREE (tbl, sizeof (EVENT_COOKIE_TABLE));
18008 +}
18009 +
18010 +int
18011 +cookie_alloc_cookie (EVENT_COOKIE_TABLE *tbl, EVENT_COOKIE cookie)
18012 +{
18013 +    EVENT_COOKIE_ENTRY *ent, *nent;
18014 +    unsigned long flags;
18015 +
18016 +    KMEM_ZALLOC (nent, EVENT_COOKIE_ENTRY *, sizeof (EVENT_COOKIE_ENTRY), TRUE);
18017 +    
18018 +    spin_lock_irqsave (&tbl->tbl_lock, flags);
18019 +    for (ent = tbl->tbl_entries; ent; ent = ent->ent_next)
18020 +       if (ent->ent_cookie == cookie)
18021 +           break;
18022 +
18023 +    if (ent == NULL)
18024 +    {
18025 +       kcondvar_init (&nent->ent_wait);
18026 +       spin_lock_init (&nent->ent_lock);
18027 +
18028 +       nent->ent_ref    = 1;
18029 +       nent->ent_cookie = cookie;
18030 +
18031 +       if ((nent->ent_next = tbl->tbl_entries) != NULL)
18032 +           tbl->tbl_entries->ent_prev = nent;
18033 +       tbl->tbl_entries = nent;
18034 +       nent->ent_prev = NULL;
18035 +    }
18036 +    spin_unlock_irqrestore (&tbl->tbl_lock, flags);
18037 +
18038 +    if (ent == NULL)
18039 +       return (ESUCCESS);
18040 +    else
18041 +    {
18042 +       KMEM_FREE (nent, sizeof (EVENT_COOKIE_ENTRY));
18043 +       return (EINVAL);
18044 +    }
18045 +}
18046 +
18047 +int
18048 +cookie_free_cookie (EVENT_COOKIE_TABLE *tbl, EVENT_COOKIE cookie)
18049 +{
18050 +    EVENT_COOKIE_ENTRY *ent;
18051 +    unsigned long flags;
18052 +
18053 +    spin_lock_irqsave (&tbl->tbl_lock, flags);
18054 +    for (ent = tbl->tbl_entries; ent; ent = ent->ent_next)
18055 +       if (ent->ent_cookie == cookie)
18056 +           break;
18057 +    
18058 +    if (ent == NULL)
18059 +    {
18060 +       spin_unlock_irqrestore (&tbl->tbl_lock, flags);
18061 +       return (EINVAL);
18062 +    }
18063 +
18064 +    if (ent->ent_prev == NULL)
18065 +       tbl->tbl_entries = ent->ent_next;
18066 +    else
18067 +       ent->ent_prev->ent_next = ent->ent_next;
18068 +
18069 +    if (ent->ent_next != NULL)
18070 +       ent->ent_next->ent_prev = ent->ent_prev;
18071 +    
18072 +    spin_unlock_irqrestore (&tbl->tbl_lock, flags);
18073 +
18074 +    cookie_drop_entry (ent);
18075 +
18076 +    return (ESUCCESS);
18077 +}
18078 +
18079 +/*
18080 + * cookie_fire_cookie:
18081 + *    fire the cookie - this is called from the event interrupt.
18082 + */
18083 +int
18084 +cookie_fire_cookie (EVENT_COOKIE_TABLE *tbl, EVENT_COOKIE cookie)
18085 +{
18086 +    EVENT_COOKIE_ENTRY *ent;
18087 +    unsigned long flags;
18088 +
18089 +    spin_lock_irqsave (&tbl->tbl_lock, flags);
18090 +    for (ent = tbl->tbl_entries; ent; ent = ent->ent_next)
18091 +       if (ent->ent_cookie == cookie)
18092 +           break;
18093 +    
18094 +    if (ent == NULL)
18095 +    {
18096 +       spin_unlock_irqrestore (&tbl->tbl_lock, flags);
18097 +       return (EINVAL);
18098 +    }
18099 +           
18100 +    spin_lock (&ent->ent_lock);
18101 +    ent->ent_fired = cookie;
18102 +    kcondvar_wakeupall (&ent->ent_wait, &ent->ent_lock);
18103 +    spin_unlock (&ent->ent_lock);
18104 +
18105 +    spin_unlock_irqrestore (&tbl->tbl_lock, flags);
18106 +
18107 +    return (ESUCCESS);
18108 +}    
18109 +
18110 +/*
18111 + * cookie_wait_cookie:
18112 + *    deschedule on a cookie if it has not already fired.
18113 + *    note - if the cookie is removed from the table, then
18114 + *           we free it off when we're woken up.
18115 + */
18116 +int
18117 +cookie_wait_cookie (EVENT_COOKIE_TABLE *tbl, EVENT_COOKIE cookie)
18118 +{
18119 +    EVENT_COOKIE_ENTRY *ent;
18120 +    unsigned long flags;
18121 +    
18122 +    spin_lock_irqsave (&tbl->tbl_lock, flags);
18123 +    for (ent = tbl->tbl_entries; ent; ent = ent->ent_next)
18124 +       if (ent->ent_cookie == cookie)
18125 +           break;
18126 +    
18127 +    if (ent == NULL)
18128 +    {
18129 +       spin_unlock_irqrestore (&tbl->tbl_lock, flags);
18130 +       return (EINVAL);
18131 +    }
18132 +
18133 +    spin_lock (&ent->ent_lock);
18134 +    spin_unlock (&tbl->tbl_lock);
18135 +
18136 +    if (ent->ent_fired != 0)
18137 +    {
18138 +       spin_unlock_irqrestore (&ent->ent_lock, flags);
18139 +       return (ESUCCESS);
18140 +    }
18141 +
18142 +    ent->ent_ref++;
18143 +    kcondvar_waitsig (&ent->ent_wait, &ent->ent_lock, &flags);
18144 +    
18145 +    if (--ent->ent_ref > 0)
18146 +       spin_unlock_irqrestore (&ent->ent_lock, flags);
18147 +    else
18148 +    {
18149 +       spin_unlock_irqrestore (&ent->ent_lock, flags);
18150 +       
18151 +       spin_lock_destroy (&ent->ent_lock);
18152 +       kcondvar_destroy (&ent->ent_wait);
18153 +
18154 +       KMEM_FREE (ent, sizeof (EVENT_COOKIE_ENTRY));
18155 +    }
18156 +    return (ESUCCESS);
18157 +}
18158 +
18159 +int
18160 +cookie_arm_cookie (EVENT_COOKIE_TABLE *tbl, EVENT_COOKIE cookie)
18161 +{
18162 +    EVENT_COOKIE_ENTRY *ent;
18163 +    unsigned long flags;
18164 +
18165 +    spin_lock_irqsave (&tbl->tbl_lock, flags);
18166 +    for (ent = tbl->tbl_entries; ent; ent = ent->ent_next)
18167 +       if (ent->ent_cookie == cookie)
18168 +           break;
18169 +    
18170 +    if (ent == NULL)
18171 +    {
18172 +       spin_unlock_irqrestore (&tbl->tbl_lock, flags);
18173 +       return (EINVAL);
18174 +    }
18175 +           
18176 +    spin_lock (&ent->ent_lock);
18177 +    ent->ent_fired = 0;
18178 +    spin_unlock (&ent->ent_lock);
18179 +
18180 +    spin_unlock_irqrestore (&tbl->tbl_lock, flags);
18181 +
18182 +    return (ESUCCESS);
18183 +}
18184 +
18185 +/*
18186 + * Local variables:
18187 + * c-file-style: "stroustrup"
18188 + * End:
18189 + */
18190 Index: linux-2.4.21/drivers/net/qsnet/elan3/iproc.c
18191 ===================================================================
18192 --- linux-2.4.21.orig/drivers/net/qsnet/elan3/iproc.c   2004-02-23 16:02:56.000000000 -0500
18193 +++ linux-2.4.21/drivers/net/qsnet/elan3/iproc.c        2005-06-01 23:12:54.586441080 -0400
18194 @@ -0,0 +1,925 @@
18195 +/*
18196 + *    Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd.
18197 + * 
18198 + *    For licensing information please see the supplied COPYING file
18199 + *
18200 + */
18201 +
18202 +#ident "@(#)$Id: iproc.c,v 1.47 2003/09/24 13:57:25 david Exp $"
18203 +/*      $Source: /cvs/master/quadrics/elan3mod/elan3/os/iproc.c,v $ */
18204 +
18205 +#include <qsnet/kernel.h>
18206 +
18207 +#include <elan3/elanregs.h>
18208 +#include <elan3/elandev.h>
18209 +#include <elan3/elanvp.h>
18210 +#include <elan3/elan3mmu.h>
18211 +#include <elan3/elanctxt.h>
18212 +#include <elan3/elandebug.h>
18213 +#include <elan3/urom_addrs.h>
18214 +#include <elan3/trtype.h>
18215 +#include <elan3/vmseg.h>
18216 +
18217 +
18218 +static int TrSizeTable[] = {0, 8, 16, 32, 64};
18219 +
18220 +static void  ConvertTransactionToSetEvent (ELAN3_CTXT *ctxt, E3_IprocTrapHeader_BE *hdrp, E3_Addr Addr);
18221 +static void  SimulateBlockWrite  (ELAN3_CTXT *ctxt, E3_IprocTrapHeader_BE *hdrp, E3_IprocTrapData_BE *datap);
18222 +static void  SimulateWriteWord   (ELAN3_CTXT *ctxt, E3_IprocTrapHeader_BE *hdrp, E3_IprocTrapData_BE *datap);
18223 +static void  SimulateWriteDWord  (ELAN3_CTXT *ctxt, E3_IprocTrapHeader_BE *hdrp, E3_IprocTrapData_BE *datap);
18224 +static void  SimulateTraceRoute  (ELAN3_CTXT *ctxt, E3_IprocTrapHeader_BE *hdrp, E3_IprocTrapData_BE *datap);
18225 +static void  BumpInputterStats (ELAN3_DEV *dev, E3_IprocTrapHeader_BE *hdrp);
18226 +
18227 +void
18228 +HandleIProcTrap (ELAN3_DEV           *dev, 
18229 +                int                 Channel,
18230 +                E3_uint32           Pend,
18231 +                sdramaddr_t         FaultSaveOff,
18232 +                sdramaddr_t         TransactionsOff,
18233 +                sdramaddr_t         DataOff)
18234 +{
18235 +    E3_IprocTrapHeader_BE Transaction0;
18236 +    ELAN3_CTXT          *ctxt;
18237 +    INPUT_TRAP           *trap;
18238 +    register int          i;
18239 +
18240 +    /*
18241 +     * Read the 1st set of transactions, so we can determine the 
18242 +     * context for the trap 
18243 +     */
18244 +    elan3_sdram_copyq_from_sdram (dev, TransactionsOff, (void *) &Transaction0, 16);
18245 +    
18246 +    BumpStat (dev, IProcTraps);
18247 +    BumpInputterStats (dev, &Transaction0);
18248 +
18249 +    if (Transaction0.s.TrTypeCntx.s.TypeCntxInvalid)
18250 +    {
18251 +       /*
18252 +        * The context is not valid. This will occur if the packet
18253 +        * trapped for an EopError with no IdentTrans or an error corrupted the context
18254 +        * giving a CRC error on the first transaction and the Ack had not been returned.
18255 +        */
18256 +       if (Transaction0.s.TrTypeCntx.s.LastTrappedTrans)
18257 +       {
18258 +           PRINTF0 (DBG_DEVICE, DBG_IPROC, "iproc: Error on EOP without a good context, ignoring trap\n");
18259 +       }
18260 +       else
18261 +       {
18262 +           /* Check that only crap has been received.  If not then die. */
18263 +           if (! Transaction0.s.IProcTrapStatus.s.BadLength &&
18264 +               (Transaction0.s.IProcTrapStatus.Status & CRC_MASK) == CRC_STATUS_GOOD)
18265 +           {
18266 +               printk ("iproc: Did not have a valid context for the trap area.\n");
18267 +               printk ("iproc: TrTypeCntx=%x TrAddr=%x TrData0=%x IProcTrapStatus=%x\n",
18268 +                        Transaction0.s.TrTypeCntx.TypeContext, Transaction0.s.TrAddr,
18269 +                        Transaction0.s.TrData0, Transaction0.s.IProcTrapStatus.Status);
18270 +               panic ("elan3: iproc did not have a valid context");
18271 +               /* NOTREACHED */
18272 +           }
18273 +           PRINTF0 (DBG_DEVICE, DBG_IPROC, "iproc: First transaction is bad, ignoring trap\n");
18274 +       }
18275 +    }
18276 +    else
18277 +    {
18278 +       ctxt = ELAN3_DEV_CTX_TABLE(dev, Transaction0.s.TrTypeCntx.s.Context);
18279 +       
18280 +       if (ctxt == NULL)
18281 +       {
18282 +           PRINTF1 (DBG_DEVICE, DBG_INTR, "HandleIProcTrap: context %x invalid\n", 
18283 +                    Transaction0.s.TrTypeCntx.s.Context);
18284 +
18285 +           BumpStat (dev, InvalidContext);
18286 +       }
18287 +       else
18288 +       {
18289 +           trap = (Channel == 0) ? &ctxt->Input0Trap : &ctxt->Input1Trap;
18290 +
18291 +           ASSERT (trap->State == CTXT_STATE_OK);
18292 +           
18293 +           trap->Transactions[0] = Transaction0;
18294 +
18295 +           PRINTF1 (ctxt, DBG_INTR, "HandleIProcTrap: %s\n", IProcTrapString (&trap->Transactions[0], NULL));
18296 +           /*
18297 +            * Copy the rest of the transactions into the trap area.
18298 +            */
18299 +           for (i = 0; !(trap->Transactions[i].s.TrTypeCntx.s.LastTrappedTrans);)
18300 +           {
18301 +               if (++i >= MAX_TRAPPED_TRANS)
18302 +               {
18303 +                   trap->Overflow = 1;
18304 +                   break;
18305 +               }
18306 +
18307 +               elan3_sdram_copyq_from_sdram (dev, TransactionsOff + i*sizeof (E3_IprocTrapHeader), (void *) &trap->Transactions[i], 16);
18308 +
18309 +               PRINTF1 (ctxt, DBG_INTR, "                 %s\n", IProcTrapString (&trap->Transactions[i], NULL));
18310 +
18311 +               BumpInputterStats (dev, &trap->Transactions[i]);
18312 +           }
18313 +           
18314 +           /*
18315 +            * Remember the number of transactions we've copied.
18316 +            */
18317 +           trap->NumTransactions = i+1;
18318 +
18319 +           PRINTF1 (ctxt, DBG_INTR, "                 NumTransactions = %d\n", trap->NumTransactions);
18320 +           
18321 +           /*
18322 +            * Copy all the data blocks in one go to let the Elan prefetcher work 
18323 +            */
18324 +           elan3_sdram_copyq_from_sdram (dev, DataOff, trap->DataBuffers, trap->NumTransactions*sizeof (E3_IprocTrapData));
18325 +
18326 +           /*
18327 +            * Copy fault save area and clear out for next time round.
18328 +            */
18329 +           elan3_sdram_copyq_from_sdram (dev, FaultSaveOff, (void *) &trap->FaultSave, 16);
18330 +           elan3_sdram_zeroq_sdram (dev, FaultSaveOff, 16);
18331 +
18332 +           if (ELAN3_OP_IPROC_TRAP (ctxt, trap, Channel) == OP_DEFER)
18333 +           {
18334 +               /*
18335 +                * Mark the trap as valid and set the inputter state to 
18336 +                * raise the context filter.
18337 +                */
18338 +               trap->State = CTXT_STATE_TRAPPED;
18339 +               kcondvar_wakeupone (&ctxt->Wait, &dev->IntrLock);
18340 +               
18341 +               SetInputterStateForContext (ctxt, Pend, NULL);
18342 +           }
18343 +       }
18344 +    }
18345 +}
18346 +
18347 +void
18348 +InspectIProcTrap (ELAN3_CTXT *ctxt, INPUT_TRAP *trap)
18349 +{
18350 +    int              i;
18351 +    int              StatusValid;
18352 +
18353 +    trap->AckSent                  = 0;
18354 +    trap->BadTransaction            = 0;
18355 +    
18356 +    trap->TrappedTransaction        = NULL;
18357 +    trap->TrappedDataBuffer        = NULL;
18358 +    trap->WaitForEopTransaction     = NULL;
18359 +    trap->WaitForEopDataBuffer      = NULL;
18360 +    trap->DmaIdentifyTransaction    = NULL;
18361 +    trap->ThreadIdentifyTransaction = NULL;
18362 +    trap->LockQueuePointer          = (E3_Addr) 0;
18363 +    trap->UnlockQueuePointer        = (E3_Addr) 0;
18364 +
18365 +    /*
18366 +     * Now scan all the transactions received 
18367 +     */
18368 +    for (i = 0; i < trap->NumTransactions ; i++)
18369 +    {
18370 +       E3_IprocTrapHeader_BE *hdrp = &trap->Transactions[i];
18371 +       E3_IprocTrapData_BE   *datap = &trap->DataBuffers[i];
18372 +
18373 +       StatusValid = hdrp->s.TrTypeCntx.s.StatusRegValid != 0;
18374 +       
18375 +       if (StatusValid && hdrp->s.IProcTrapStatus.s.AckSent)   /* Remember if we've sent the ack back */
18376 +           trap->AckSent = 1;
18377 +       
18378 +       if (hdrp->s.TrTypeCntx.s.LastTrappedTrans)              /* Check for EOP */
18379 +       {
18380 +           ASSERT (i == trap->NumTransactions - 1);
18381 +
18382 +           switch (hdrp->s.IProcTrapStatus.Status & E3_IPS_EopType)
18383 +           {
18384 +           case EOP_GOOD:
18385 +               /* if we get an EOP_GOOD then the outputer should have received a PAckOk. */  
18386 +               /* unless it was a flood, in which case someone must have sent an ack */
18387 +               /* but not necessarily us */
18388 +               break;
18389 +
18390 +           case EOP_BADACK:
18391 +               BumpUserStat (ctxt, EopBadAcks);
18392 +
18393 +               /* if we get an EOP_BADACK then the outputer did not receive a PAckOk even if
18394 +                * we sent a PAckOk. We can clear tinfo.AckSent. */
18395 +               if (trap->AckSent == 1)
18396 +               {
18397 +                   PRINTF0 (ctxt, DBG_IPROC, "InspectIProcTrap: Network error destroyed PAckOk\n");
18398 +                   trap->AckSent = 0;
18399 +               }
18400 +               break;
18401 +
18402 +           case EOP_ERROR_RESET:
18403 +               BumpUserStat (ctxt, EopResets);
18404 +
18405 +               /* if we get an EOP_ERROR_RESET then the outputer may or may not have got a PAckOk. */
18406 +               trap->BadTransaction = 1;
18407 +               break;
18408 +
18409 +           default:
18410 +               panic ("InspectIProcTrap: invalid EOP type in status register\n");
18411 +               /* NOTREACHED */
18412 +           }
18413 +           continue;
18414 +       }
18415 +
18416 +       PRINTF2 (ctxt, DBG_IPROC, "InspectIProcTrap: %2d: %s\n", i, IProcTrapString (hdrp, datap));
18417 +       
18418 +       if (! StatusValid)                                      /* We're looking at transactions stored before the trap */
18419 +       {                                                       /* these should only be identifies and lock transactions */
18420 +
18421 +           if (hdrp->s.TrTypeCntx.s.Type & TR_WRITEBLOCK_BIT)
18422 +               panic ("InspectIProcTrap: writeblock transaction found in input trap header before trap occured\n");
18423 +
18424 +           switch (hdrp->s.TrTypeCntx.s.Type & TR_OPCODE_TYPE_MASK)
18425 +           {
18426 +           case TR_LOCKQUEUE & TR_OPCODE_TYPE_MASK:
18427 +               if (trap->LockQueuePointer)                             /* Already seen a LOCKQUEUE transaction in this packet, */
18428 +               {                                               /* the user program should not have done this !! */
18429 +                   ElanException (ctxt, EXCEPTION_BAD_PACKET, INPUT_PROC, trap);
18430 +                   return;
18431 +               }
18432 +
18433 +               trap->LockQueuePointer = (E3_Addr) hdrp->s.TrAddr;      /* Remember the queue pointer in case we need to unlock it */
18434 +               break;
18435 +
18436 +           case TR_DMAIDENTIFY & TR_OPCODE_TYPE_MASK:
18437 +               if (trap->DmaIdentifyTransaction ||             /* Already seen an identify transaction in this packet */
18438 +                   trap->ThreadIdentifyTransaction)            /* the user program should not have done this */
18439 +               {                                                       
18440 +                   ElanException (ctxt, EXCEPTION_BAD_PACKET, INPUT_PROC, trap);
18441 +                   return;
18442 +               }
18443 +               trap->DmaIdentifyTransaction = hdrp;
18444 +               break;
18445 +
18446 +           case TR_THREADIDENTIFY & TR_OPCODE_TYPE_MASK:
18447 +               if (trap->DmaIdentifyTransaction ||             /* Already seen an identify transaction in this packet */
18448 +                   trap->ThreadIdentifyTransaction)            /* the user program should not have done this */
18449 +               {                                                       
18450 +                   ElanException (ctxt, EXCEPTION_BAD_PACKET, INPUT_PROC, trap);
18451 +                   return;
18452 +               }
18453 +               trap->ThreadIdentifyTransaction = hdrp;
18454 +               break;
18455 +               
18456 +           default:
18457 +               panic ("InspectIProcTrap: invalid transaction found in input trap header before trap occured\n");
18458 +               /* NOTREACHED */
18459 +           }
18460 +           continue;
18461 +       }
18462 +
18463 +       if (StatusValid && trap->TrappedTransaction == NULL)    /* Remember the transaction which caused the */
18464 +       {                                                       /* trap */
18465 +           trap->TrappedTransaction = hdrp;
18466 +           trap->TrappedDataBuffer  = datap;
18467 +       }
18468 +
18469 +       if(hdrp->s.IProcTrapStatus.s.BadLength ||
18470 +          ((hdrp->s.IProcTrapStatus.Status & CRC_MASK) == CRC_STATUS_ERROR) ||
18471 +          ((hdrp->s.IProcTrapStatus.Status & CRC_MASK) == CRC_STATUS_BAD))
18472 +       {
18473 +           int j;
18474 +           PRINTF0 (ctxt, DBG_IPROC, "InspectIProcTrap: transaction has a bad crc\n");
18475 +           for (j=0; j<TRANS_DATA_WORDS; j+=4)
18476 +              PRINTF5 (ctxt, DBG_IPROC, "InspectIProcTrap: Data %0d %8x %8x %8x %8x\n",
18477 +                       j, datap->TrData[j], datap->TrData[j+1], datap->TrData[j+2], datap->TrData[j+3]);
18478 +           trap->BadTransaction = 1;
18479 +           continue;
18480 +       }
18481 +       
18482 +       /* No more to do if it's a writeblock transaction */
18483 +       if (hdrp->s.TrTypeCntx.s.Type & TR_WRITEBLOCK_BIT)
18484 +           continue;
18485 +
18486 +       
18487 +       if (GET_STATUS_TRAPTYPE(hdrp->s.IProcTrapStatus) == MI_InputDoTrap &&
18488 +           (hdrp->s.TrTypeCntx.s.Type & TR_WAIT_FOR_EOP) != 0)
18489 +       {
18490 +           /*
18491 +            * This is a wait for eop transaction that has trapped because the inputer
18492 +            * then received a EopError. The next transaction saved should always be an
18493 +            * EopError.
18494 +            */
18495 +           PRINTF0 (ctxt, DBG_IPROC, "InspectIProcTrap: got a trapped WaitForEop transaction due to EopError\n");
18496 +           
18497 +           trap->WaitForEopTransaction = hdrp;
18498 +           trap->WaitForEopDataBuffer  = datap;
18499 +           continue;
18500 +       }
18501 +
18502 +       switch (hdrp->s.TrTypeCntx.s.Type & TR_OPCODE_TYPE_MASK)
18503 +       {
18504 +       case TR_UNLOCKQUEUE & TR_OPCODE_TYPE_MASK:
18505 +           if (trap->UnlockQueuePointer)
18506 +           {
18507 +               ElanException (ctxt, EXCEPTION_BAD_PACKET, INPUT_PROC, trap);
18508 +               return;
18509 +           }
18510 +           trap->UnlockQueuePointer = (E3_Addr) hdrp->s.TrAddr;
18511 +           break;
18512 +       }
18513 +    }
18514 +}
18515 +
18516 +void
18517 +ResolveIProcTrap (ELAN3_CTXT *ctxt, INPUT_TRAP *trap, NETERR_RESOLVER **rvpp)
18518 +{
18519 +    ELAN3_DEV     *dev = ctxt->Device;
18520 +    int           res;
18521 +    unsigned long flags;
18522 +
18523 +    ASSERT (! CTXT_IS_KERNEL (ctxt));
18524 +
18525 +    BumpUserStat (ctxt, IProcTraps);
18526 +
18527 +    InspectIProcTrap (ctxt, trap);
18528 +
18529 +    /*
18530 +     * fixup page fault if we've trapped because of one.
18531 +     */
18532 +    if (trap->FaultSave.s.FaultContext != 0)
18533 +    {
18534 +       /*
18535 +        * If it's a WRITEBLOCK transaction, then see if we remember faulting
18536 +        * before it, and try and prefault in a sensible amount past it.
18537 +        */
18538 +       int                fixedFault = FALSE;
18539 +       INPUT_FAULT_SAVE  *entry;
18540 +       INPUT_FAULT_SAVE **predp;
18541 +       int                npages;
18542 +
18543 +       if ((trap->TrappedTransaction->s.TrTypeCntx.s.Type & TR_WRITEBLOCK_BIT) != 0 && /* a DMA packet */
18544 +           trap->LockQueuePointer == (E3_Addr) 0 &&                                    /* but not a queueing DMA */
18545 +           trap->TrappedTransaction->s.TrAddr != 0)                                    /* and not a DMA to 0 */
18546 +       {
18547 +           spin_lock (&ctxt->InputFaultLock);
18548 +           
18549 +           for (predp = &ctxt->InputFaultList; (entry = *predp)->Next != NULL ; predp = &entry->Next)
18550 +           {
18551 +               if (entry->Addr == trap->TrappedTransaction->s.TrAddr)
18552 +                   break;
18553 +           }
18554 +           
18555 +           *predp = entry->Next;
18556 +           entry->Next = ctxt->InputFaultList;
18557 +           ctxt->InputFaultList = entry;
18558 +           
18559 +           if (entry->Addr == trap->TrappedTransaction->s.TrAddr)
18560 +           {
18561 +               if ((entry->Count <<= 1) > MAX_INPUT_FAULT_PAGES)
18562 +                   entry->Count = MAX_INPUT_FAULT_PAGES;
18563 +           }
18564 +           else
18565 +           {
18566 +               entry->Count = MIN_INPUT_FAULT_PAGES;
18567 +           }
18568 +           
18569 +           entry->Addr = trap->TrappedTransaction->s.TrAddr + (entry->Count * PAGESIZE);
18570 +           npages = entry->Count;
18571 +           
18572 +           spin_unlock (&ctxt->InputFaultLock);
18573 +           
18574 +           if (elan3_pagefault (ctxt, &trap->FaultSave, npages) != ESUCCESS)
18575 +           {
18576 +               PRINTF2 (ctxt, DBG_IPROC, "ResolveIProcTrap: pagefaulting %d pages at %08x - failed\n", 
18577 +                        npages, trap->TrappedTransaction->s.TrAddr);
18578 +           }
18579 +           else
18580 +           {
18581 +               PRINTF2 (ctxt, DBG_IPROC, "ResolveIProcTrap: pagefaulting %d pages at %08x - succeeded\n", 
18582 +                        npages, trap->TrappedTransaction->s.TrAddr);
18583 +               
18584 +               fixedFault = TRUE;
18585 +           }
18586 +       }
18587 +
18588 +       /* Workaround WRITEBLOCK transaction executed when LOCKQUEUE transaction missed */
18589 +       /* the packet will have been nacked */
18590 +       if ((trap->TrappedTransaction->s.TrTypeCntx.s.Type & TR_WRITEBLOCK_BIT) &&      /* a DMA packet */
18591 +           trap->LockQueuePointer == 0 && trap->UnlockQueuePointer &&                  /* a queueing DMA */
18592 +           trap->TrappedTransaction->s.TrAddr == trap->FaultSave.s.FaultAddress)       /* and missed lockqueue */
18593 +       {
18594 +           fixedFault = TRUE;
18595 +       }
18596 +
18597 +       if (! fixedFault)
18598 +       {
18599 +           if ((res = elan3_pagefault (ctxt, &trap->FaultSave, 1)) != ESUCCESS)
18600 +           {
18601 +               PRINTF1 (ctxt, DBG_IPROC, "ResolveIProcTrap: elan3_pagefault failed at %x\n", 
18602 +                        trap->FaultSave.s.FaultAddress);
18603 +               ElanException (ctxt, EXCEPTION_INVALID_ADDR, INPUT_PROC, trap, &trap->FaultSave, res);
18604 +               return;
18605 +           }
18606 +       }
18607 +    }
18608 +
18609 +    if (! trap->AckSent && trap->LockQueuePointer)                     /* Queued DMA */
18610 +    {                                                                  /* The ack was not sent, so the queue will be locked. */
18611 +       SimulateUnlockQueue (ctxt, trap->LockQueuePointer, FALSE);      /* We must unlock it. */
18612 +    }
18613 +
18614 +    if (trap->AckSent && trap->BadTransaction)
18615 +    {
18616 +       if (trap->DmaIdentifyTransaction)
18617 +       {
18618 +           PRINTF0 (ctxt, DBG_IPROC, "ResolveIProcTrap: Dma identify needs network resultion\n");
18619 +
18620 +           BumpStat (dev, DmaIdentifyNetworkErrors);
18621 +           BumpUserStat (ctxt, DmaIdentifyNetworkErrors);
18622 +
18623 +           if (trap->WaitForEopTransaction)
18624 +               PRINTF0 (ctxt, DBG_IPROC, "ResolveIProcTrap: have delayed wait for eop transaction\n");
18625 +       }
18626 +       else if (trap->ThreadIdentifyTransaction)
18627 +       {
18628 +           PRINTF0 (ctxt, DBG_IPROC, "ResolveIProcTrap: Thread identify needs network resolution\n");
18629 +
18630 +           BumpStat (dev, ThreadIdentifyNetworkErrors);
18631 +           BumpUserStat (ctxt, ThreadIdentifyNetworkErrors);
18632 +
18633 +           if (trap->WaitForEopTransaction)
18634 +               PRINTF0 (ctxt, DBG_IPROC, "ResolveIProcTrap: have delayed wait for eop transaction\n");
18635 +       }
18636 +       else
18637 +       {
18638 +           BumpStat (dev, DmaNetworkErrors);
18639 +           BumpUserStat (ctxt, DmaNetworkErrors);
18640 +       }
18641 +    }
18642 +
18643 +    spin_lock_irqsave (&dev->IntrLock, flags);
18644 +    
18645 +    if (! trap->AckSent)
18646 +    {
18647 +       PRINTF0 (ctxt, DBG_IPROC, "ResolveIProcTrap: ack not sent, lowering context filter\n");
18648 +
18649 +       trap->State = CTXT_STATE_OK;
18650 +    }
18651 +    else
18652 +    {
18653 +       if (trap->BadTransaction)
18654 +       {
18655 +           PRINTF0 (ctxt, DBG_IPROC, "ResolveIProcTrap: ack sent, waiting on bad transaction\n");
18656 +           trap->State = CTXT_STATE_NETWORK_ERROR;
18657 +       }
18658 +       else
18659 +       {
18660 +           PRINTF0 (ctxt, DBG_IPROC, "ResolveIProcTrap: ack sent, waiting on packet to be re-executed\n");
18661 +           trap->State = CTXT_STATE_NEEDS_RESTART;
18662 +       }
18663 +    }
18664 +
18665 +    spin_unlock_irqrestore (&dev->IntrLock, flags);
18666 +
18667 +    if (trap->AckSent && trap->BadTransaction)
18668 +       ElanException (ctxt, EXCEPTION_NETWORK_ERROR, INPUT_PROC, trap, rvpp);
18669 +}
18670 +
18671 +int
18672 +RestartIProcTrap (ELAN3_CTXT *ctxt, INPUT_TRAP *trap)
18673 +{
18674 +    PRINTF1 (ctxt, DBG_IPROC, "RestartIProc: %d transactions\n", trap->NumTransactions);
18675 +
18676 +    if (trap->TrappedTransaction == NULL)                      /* No transaction trapped - probably a network */
18677 +       return (ESUCCESS);                                      /* error */
18678 +
18679 +    while (! trap->TrappedTransaction->s.TrTypeCntx.s.LastTrappedTrans)
18680 +    {
18681 +       E3_IprocTrapHeader_BE *hdrp = trap->TrappedTransaction;
18682 +       E3_IprocTrapData_BE   *datap = trap->TrappedDataBuffer;
18683 +       
18684 +       ASSERT (hdrp->s.TrTypeCntx.s.StatusRegValid != 0);
18685 +
18686 +       PRINTF2 (ctxt, DBG_IPROC, "RestartIProc: TrType=0x%x Status=0x%x\n",
18687 +                hdrp->s.TrTypeCntx.TypeContext, hdrp->s.IProcTrapStatus.Status);
18688 +       
18689 +       if ((hdrp->s.TrTypeCntx.s.Type & TR_WRITEBLOCK_BIT) != 0)
18690 +       {
18691 +           PRINTF1 (ctxt, DBG_IPROC, "RestartIProc: WRITEBLOCK : Addr %x\n", hdrp->s.TrAddr);
18692 +           SimulateBlockWrite (ctxt, hdrp, datap);
18693 +       }
18694 +       else
18695 +       {
18696 +           switch (hdrp->s.TrTypeCntx.s.Type & TR_OPCODE_TYPE_MASK)
18697 +           {
18698 +           case TR_SETEVENT & TR_OPCODE_TYPE_MASK:
18699 +               PRINTF1 (ctxt, DBG_IPROC, "RestartIProc: SETEVENT : %x\n", hdrp->s.TrAddr);
18700 +
18701 +               if (GET_STATUS_TRAPTYPE(hdrp->s.IProcTrapStatus) != MI_InputDoTrap)
18702 +                   FixupEventTrap (ctxt, INPUT_PROC, trap, GET_STATUS_TRAPTYPE(hdrp->s.IProcTrapStatus), &trap->FaultSave, FALSE);
18703 +               else if (hdrp->s.TrAddr)
18704 +               {
18705 +                   if (IssueCommand (ctxt, offsetof (E3_CommandPort, SetEvent), hdrp->s.TrAddr, FALSE) != ISSUE_COMMAND_OK)
18706 +                       return (EAGAIN);
18707 +               }
18708 +               break;
18709 +
18710 +           case TR_WRITEWORD & TR_OPCODE_TYPE_MASK:
18711 +               SimulateWriteWord (ctxt, hdrp, datap);
18712 +               break;
18713 +
18714 +           case TR_WRITEDOUBLEWORD & TR_OPCODE_TYPE_MASK:
18715 +               SimulateWriteDWord (ctxt, hdrp, datap);
18716 +               break;
18717 +               
18718 +           case TR_UNLOCKQUEUE & TR_OPCODE_TYPE_MASK:
18719 +               if (GET_STATUS_TRAPTYPE(hdrp->s.IProcTrapStatus) == MI_InputDoTrap)
18720 +                   ElanException (ctxt, EXCEPTION_BAD_PACKET, INPUT_PROC, trap);
18721 +               else
18722 +               {
18723 +                   switch (GET_STATUS_TRAPTYPE (hdrp->s.IProcTrapStatus))
18724 +                   {
18725 +                   case MI_WaitForUnLockDescRead:
18726 +                       /*
18727 +                        * Fault occured on the read of the queue descriptor - since the ack
18728 +                        * has been sent we need to move the queue on one slot.
18729 +                        */
18730 +                       PRINTF0 (ctxt, DBG_IPROC, "RestartIProc: TR_UNLOCKQUEUE : desc read fault\n");
18731 +
18732 +                       SimulateUnlockQueue (ctxt, trap->LockQueuePointer, TRUE);
18733 +                       
18734 +                       if (IssueCommand (ctxt, offsetof (E3_CommandPort, SetEvent),
18735 +                                         hdrp->s.TrAddr + E3_QUEUE_EVENT_OFFSET, FALSE) != ISSUE_COMMAND_OK)
18736 +                       {
18737 +                           /* Failed to issue setevent to complete queue unlock, since we've already unlocked */
18738 +                           /* the queue, we should "convert" this transaction into a setevent transaction that */
18739 +                           /* hasn't trapped */
18740 +                           PRINTF0 (ctxt, DBG_IPROC, "RestartIProc: could not issue setevent for SimulateUnlockQueue\n");
18741 +
18742 +                           ConvertTransactionToSetEvent (ctxt, hdrp, hdrp->s.TrAddr + E3_QUEUE_EVENT_OFFSET);
18743 +                           return (EAGAIN);
18744 +                       }
18745 +                       break;
18746 +                       
18747 +                   case MI_DoSetEvent:
18748 +                       /*
18749 +                        * Fault occured on either the write to unlock the queue or during 
18750 +                        * processing of the event.  Test the fault address against the
18751 +                        * queue address to find out which - in this case, since the ack
18752 +                        * has been sent we need to move the queue on one slot.
18753 +                        */
18754 +                       if (trap->FaultSave.s.FaultAddress == trap->LockQueuePointer)
18755 +                       {
18756 +                           PRINTF0 (ctxt, DBG_IPROC, "RestartIProc: fixed unlock queue write to unlock fault\n");
18757 +
18758 +                           SimulateUnlockQueue (ctxt, trap->LockQueuePointer, TRUE);
18759 +                           
18760 +                           if (IssueCommand (ctxt, offsetof (E3_CommandPort, SetEvent),
18761 +                                             hdrp->s.TrAddr + E3_QUEUE_EVENT_OFFSET, FALSE) != ISSUE_COMMAND_OK)
18762 +                           {
18763 +                               /* Failed to issue setevent to complete queue unlock, since we've already unlocked */
18764 +                               /* the queue, we should "convert" this transaction into a setevent transaction that */
18765 +                               /* hasn't trapped */
18766 +                               PRINTF0 (ctxt, DBG_IPROC, "RestartIProc: could not issue setevent for SimulateUnlockQueue\n");
18767 +                               
18768 +                               ConvertTransactionToSetEvent (ctxt, hdrp, hdrp->s.TrAddr + E3_QUEUE_EVENT_OFFSET);
18769 +                               return (EFAIL);
18770 +                           }
18771 +                           break;
18772 +                       }
18773 +                       /*DROPTHROUGH*/
18774 +                       
18775 +                   default:
18776 +                       FixupEventTrap (ctxt, INPUT_PROC, trap, GET_STATUS_TRAPTYPE (hdrp->s.IProcTrapStatus),
18777 +                                       &trap->FaultSave, FALSE);
18778 +                       break;
18779 +                   }
18780 +                   trap->LockQueuePointer = trap->UnlockQueuePointer = 0;
18781 +               }
18782 +               break;
18783 +
18784 +           case TR_SENDDISCARD & TR_OPCODE_TYPE_MASK:
18785 +               /* Just ignore send-discard transactions */
18786 +               PRINTF0 (ctxt, DBG_IPROC, "RestartIProc: ignore SENDDISCARD\n");
18787 +               break;
18788 +
18789 +           case TR_REMOTEDMA & TR_OPCODE_TYPE_MASK:
18790 +               PRINTF0 (ctxt, DBG_IPROC, "RestartIProc: REMOTEDMA\n");         
18791 +
18792 +               /* modify the dma type since it will still be a "read" dma */
18793 +               ((E3_DMA_BE *) datap)->s.dma_type &= ~(DMA_TYPE_READ | E3_DMA_CONTEXT_MASK);
18794 +               ((E3_DMA_BE *) datap)->s.dma_type |= DMA_TYPE_ISREMOTE;
18795 +
18796 +               RestartDmaDesc (ctxt, (E3_DMA_BE *) datap);
18797 +               break;
18798 +
18799 +           case TR_TRACEROUTE & TR_OPCODE_TYPE_MASK:
18800 +               PRINTF0 (ctxt, DBG_IPROC, "RestartIProc: TRACEROUTE\n");
18801 +               SimulateTraceRoute (ctxt, hdrp, datap);
18802 +               break;
18803 +
18804 +           default:
18805 +               ElanException (ctxt, EXCEPTION_BAD_PACKET, INPUT_PROC, trap);
18806 +               break;
18807 +           }
18808 +       }
18809 +
18810 +       /*
18811 +        * We've successfully processed this transaction, so move onto the 
18812 +        * next one.
18813 +        */
18814 +       trap->TrappedTransaction++;
18815 +       trap->TrappedDataBuffer++;
18816 +    }
18817 +    
18818 +    return (ESUCCESS);
18819 +}
18820 +
18821 +static void
18822 +ConvertTransactionToSetEvent (ELAN3_CTXT *ctxt, E3_IprocTrapHeader_BE *hdrp, E3_Addr Addr)
18823 +{
18824 +    hdrp->s.TrTypeCntx.s.Type           = TR_SETEVENT;
18825 +    hdrp->s.TrTypeCntx.s.StatusRegValid = 0;
18826 +    hdrp->s.TrAddr                      = Addr;
18827 +}
18828 +
18829 +void
18830 +SimulateBlockWrite (ELAN3_CTXT *ctxt, E3_IprocTrapHeader_BE *hdrp, E3_IprocTrapData_BE *datap)
18831 +{
18832 +    void     *saddr  = (void *) ((unsigned long) datap + (hdrp->s.TrAddr & 0x3f));
18833 +    unsigned  nbytes = (hdrp->s.TrTypeCntx.s.Type) & TR_PARTSIZE_MASK;
18834 +    int       i;
18835 +
18836 +    if (nbytes == 0)
18837 +       nbytes = sizeof (E3_IprocTrapData_BE);
18838 +
18839 +    if (ELAN3_OP_START_FAULT_CHECK (ctxt))
18840 +    {
18841 +       ELAN3_OP_END_FAULT_CHECK (ctxt);
18842 +
18843 +       PRINTF1 (ctxt, DBG_IPROC, "SimulateBlockWrite: faulted at %x\n", hdrp->s.TrAddr);
18844 +       ElanException (ctxt, EXCEPTION_FAULTED, INPUT_PROC, NULL, hdrp->s.TrAddr);
18845 +       return;
18846 +    }
18847 +
18848 +    /*
18849 +     * NOTE: since the block copy could be to sdram, we issue the writes backwards,
18850 +     *       except we MUST ensure that the last item in the block is written last.
18851 +     */
18852 +    switch (((hdrp->s.TrTypeCntx.s.Type) >> TR_TYPE_SHIFT) & TR_TYPE_MASK)
18853 +    {
18854 +    case TR_TYPE_BYTE:                                         /* 8 bit */
18855 +       for (i = nbytes - (2*sizeof (E3_uint8)); i >= 0; i -= sizeof (E3_uint8))
18856 +           ELAN3_OP_STORE8 (ctxt, hdrp->s.TrAddr + i, ((E3_uint8 *) saddr)[i]);
18857 +       i = nbytes - sizeof (E3_uint8);
18858 +       ELAN3_OP_STORE8 (ctxt, hdrp->s.TrAddr + i, ((E3_uint8 *) saddr)[i]);
18859 +       break;
18860 +       
18861 +    case TR_TYPE_SHORT:                                                /* 16 bit */
18862 +       for (i = nbytes - (2*sizeof (E3_uint16)); i >= 0; i -= sizeof (E3_uint16))
18863 +       ELAN3_OP_STORE16 (ctxt, hdrp->s.TrAddr + i, ((E3_uint16 *) saddr)[i]);
18864 +       i = nbytes - sizeof (E3_uint16);
18865 +       ELAN3_OP_STORE16 (ctxt, hdrp->s.TrAddr + i, ((E3_uint16 *) saddr)[i]);
18866 +       break;
18867 +       
18868 +    case TR_TYPE_WORD:                                         /* 32 bit */
18869 +       for (i = nbytes - (2*sizeof (E3_uint32)); i >= 0; i -= sizeof (E3_uint32))
18870 +           ELAN3_OP_STORE32 (ctxt, hdrp->s.TrAddr + i, ((E3_uint32 *) saddr)[i]);
18871 +       i = nbytes - sizeof (E3_uint32);
18872 +       ELAN3_OP_STORE32 (ctxt, hdrp->s.TrAddr + i, ((E3_uint32 *) saddr)[i]);
18873 +       break;
18874 +       
18875 +    case TR_TYPE_DWORD:                                                /* 64 bit  */
18876 +       for (i = nbytes - (2*sizeof (E3_uint64)); i >= 0; i -= sizeof (E3_uint64))
18877 +           ELAN3_OP_STORE64 (ctxt, hdrp->s.TrAddr + i, ((E3_uint64 *) saddr)[i]);
18878 +       i = nbytes - sizeof (E3_uint64);
18879 +       ELAN3_OP_STORE64 (ctxt, hdrp->s.TrAddr + i, ((E3_uint64 *) saddr)[i]);
18880 +       break;
18881 +    }
18882 +    ELAN3_OP_END_FAULT_CHECK (ctxt);
18883 +}
18884 +
18885 +void
18886 +SimulateWriteWord (ELAN3_CTXT *ctxt, E3_IprocTrapHeader_BE *hdrp, E3_IprocTrapData_BE *datap)
18887 +{
18888 +    if (ELAN3_OP_START_FAULT_CHECK (ctxt))
18889 +    {
18890 +       ELAN3_OP_END_FAULT_CHECK (ctxt);
18891 +
18892 +       PRINTF1 (ctxt, DBG_IPROC, "SimulateWriteWord: faulted at %x\n", hdrp->s.TrAddr);
18893 +       ElanException (ctxt, EXCEPTION_FAULTED, INPUT_PROC, NULL, hdrp->s.TrAddr);
18894 +       return;
18895 +    }
18896 +
18897 +    ELAN3_OP_STORE32 (ctxt, hdrp->s.TrAddr, ((E3_uint32 *) datap)[WordEndianFlip]);
18898 +    ELAN3_OP_END_FAULT_CHECK (ctxt);
18899 +}
18900 +
18901 +void
18902 +SimulateWriteDWord (ELAN3_CTXT *ctxt, E3_IprocTrapHeader_BE *hdrp, E3_IprocTrapData_BE *datap)
18903 +{
18904 +    if (ELAN3_OP_START_FAULT_CHECK (ctxt))
18905 +    {
18906 +       ELAN3_OP_END_FAULT_CHECK (ctxt);
18907 +
18908 +       PRINTF1 (ctxt, DBG_IPROC, "SimulateWriteDWord: faulted at %x\n", hdrp->s.TrAddr);
18909 +       ElanException (ctxt, EXCEPTION_FAULTED, INPUT_PROC, NULL, hdrp->s.TrAddr);
18910 +       return;
18911 +    }
18912 +
18913 +    ELAN3_OP_STORE64 (ctxt, hdrp->s.TrAddr, ((E3_uint64 *) datap)[0]);
18914 +    ELAN3_OP_END_FAULT_CHECK (ctxt);
18915 +}
18916 +
18917 +void
18918 +SimulateTraceRoute (ELAN3_CTXT *ctxt, E3_IprocTrapHeader_BE *hdrp, E3_IprocTrapData_BE *datap)
18919 +{
18920 +    E3_uint32 *saddr  = (E3_uint32 *) ((unsigned long) datap + (hdrp->s.TrAddr & 0x3f));
18921 +    unsigned   nwords = TrSizeTable[(hdrp->s.TrTypeCntx.s.Type >> TR_SIZE_SHIFT) & TR_SIZE_MASK] / sizeof (E3_uint32);
18922 +    int        i;
18923 +
18924 +    if (ELAN3_OP_START_FAULT_CHECK (ctxt))
18925 +    {
18926 +       ELAN3_OP_END_FAULT_CHECK (ctxt);
18927 +
18928 +       PRINTF1 (ctxt, DBG_IPROC, "SimulateTraceRoute: faulted at %x\n", hdrp->s.TrAddr);
18929 +       ElanException (ctxt, EXCEPTION_FAULTED, INPUT_PROC, NULL, hdrp->s.TrAddr);
18930 +       return;
18931 +    }
18932 +    
18933 +    for (i = nwords-2; i >= 0; i--)
18934 +       ELAN3_OP_STORE32 (ctxt, hdrp->s.TrAddr + (i * sizeof (E3_uint32)), saddr[i ^ WordEndianFlip]);
18935 +
18936 +    i = nwords-1;
18937 +    ELAN3_OP_STORE32 (ctxt, hdrp->s.TrAddr + (i * sizeof (E3_uint32)), saddr[i ^ WordEndianFlip]);
18938 +
18939 +    ELAN3_OP_END_FAULT_CHECK (ctxt);
18940 +}
18941 +
18942 +void
18943 +SimulateUnlockQueue (ELAN3_CTXT *ctxt, E3_Addr QueuePointer, int SentAck)
18944 +{
18945 +    E3_uint32 QueueLock;
18946 +    E3_Addr   QueueBPTR;
18947 +    E3_Addr   QueueFPTR;
18948 +    E3_uint64 QueueStateAndBPTR;
18949 +
18950 +    if (ELAN3_OP_START_FAULT_CHECK (ctxt))
18951 +    {
18952 +       ELAN3_OP_END_FAULT_CHECK (ctxt);
18953 +
18954 +       PRINTF1 (ctxt, DBG_IPROC, "UnlockQueue: faulted with QueuePointer %x\n", QueuePointer);
18955 +       ElanException (ctxt, EXCEPTION_FAULTED, INPUT_PROC, NULL, QueuePointer);
18956 +       return;
18957 +    }
18958 +    
18959 +    if (SentAck)
18960 +    {
18961 +       QueueBPTR = ELAN3_OP_LOAD32 (ctxt, QueuePointer + offsetof (E3_Queue, q_bptr));
18962 +       QueueFPTR = ELAN3_OP_LOAD32 (ctxt, QueuePointer + offsetof (E3_Queue, q_fptr));
18963 +
18964 +       if (QueueBPTR == ELAN3_OP_LOAD32 (ctxt, QueuePointer + offsetof (E3_Queue, q_top)))     /* move on back pointer */
18965 +           QueueBPTR = ELAN3_OP_LOAD32 (ctxt, QueuePointer + offsetof (E3_Queue, q_base));
18966 +       else
18967 +           QueueBPTR += ELAN3_OP_LOAD32 (ctxt, QueuePointer + offsetof (E3_Queue, q_size));
18968 +       
18969 +       QueueLock = ELAN3_OP_LOAD32 (ctxt, QueuePointer + offsetof (E3_Queue, q_state));
18970 +
18971 +       if (QueueBPTR == QueueFPTR)                             /* and set full bit if fptr == bptr */
18972 +           QueueLock |= E3_QUEUE_FULL;
18973 +       
18974 +       QueueLock &= ~E3_QUEUE_LOCKED;
18975 +       
18976 +       QueueStateAndBPTR = (E3_uint64)QueueLock << 32 | QueueBPTR;
18977 +
18978 +       ELAN3_OP_STORE64 (ctxt, QueuePointer + offsetof (E3_Queue, q_state), QueueStateAndBPTR);
18979 +    }
18980 +    else
18981 +    {
18982 +       QueueLock = ELAN3_OP_LOAD32 (ctxt, QueuePointer + offsetof (E3_Queue, q_state));
18983 +
18984 +       QueueLock &= ~E3_QUEUE_LOCKED;
18985 +       
18986 +       ELAN3_OP_STORE32 (ctxt, QueuePointer + offsetof (E3_Queue, q_state), QueueLock);
18987 +    }
18988 +
18989 +    no_fault();
18990 +}
18991 +
18992 +static void
18993 +BumpInputterStats (ELAN3_DEV *dev, E3_IprocTrapHeader_BE *hdrp)
18994 +{
18995 +    if (hdrp->s.TrTypeCntx.s.LastTrappedTrans)                 /* EOP */
18996 +    {
18997 +       switch (hdrp->s.IProcTrapStatus.Status & E3_IPS_EopType)
18998 +       {
18999 +       case EOP_BADACK:
19000 +           BumpStat (dev, EopBadAcks);
19001 +           break;
19002 +       case EOP_ERROR_RESET:
19003 +           BumpStat (dev, EopResets);
19004 +           break;
19005 +       }
19006 +    }
19007 +    else if (hdrp->s.TrTypeCntx.s.StatusRegValid)
19008 +    {
19009 +       /*
19010 +        * Errors are tested in order of badness. i.e. badlength will prevent a BadCrc and so on...
19011 +        */
19012 +       if (hdrp->s.IProcTrapStatus.s.BadLength)
19013 +           BumpStat (dev, InputterBadLength);
19014 +       else if ((hdrp->s.IProcTrapStatus.Status & CRC_MASK) == CRC_STATUS_BAD)
19015 +           BumpStat (dev, InputterCRCBad);
19016 +       else if ((hdrp->s.IProcTrapStatus.Status & CRC_MASK) == CRC_STATUS_ERROR)
19017 +           BumpStat (dev, InputterCRCErrors);
19018 +       else if ((hdrp->s.IProcTrapStatus.Status & CRC_MASK) == CRC_STATUS_DISCARD)
19019 +           BumpStat (dev, InputterCRCDiscards);
19020 +    }
19021 +}
19022 +
19023 +char *
19024 +IProcTrapString (E3_IprocTrapHeader_BE *hdrp, E3_IprocTrapData_BE *datap)
19025 +{
19026 +    static char buffer[256];
19027 +    static char typeString[256];
19028 +    static char statusString[256];
19029 +    char *ptr;
19030 +    E3_Addr     Addr        = hdrp->s.TrAddr;
19031 +    E3_uint32   Type        = hdrp->s.TrTypeCntx.s.Type;
19032 +    E3_uint32   Context     = hdrp->s.TrTypeCntx.s.Context;
19033 +    E3_uint32   StatusValid = hdrp->s.TrTypeCntx.s.StatusRegValid;
19034 +    
19035 +    if (hdrp->s.TrTypeCntx.s.LastTrappedTrans)
19036 +    {
19037 +       switch (hdrp->s.IProcTrapStatus.Status & E3_IPS_EopType)
19038 +       {
19039 +       case EOP_GOOD:          sprintf (typeString, "EOP GOOD"); break;
19040 +       case EOP_BADACK:        sprintf (typeString, "EOP BADACK"); break;
19041 +       case EOP_ERROR_RESET:   sprintf (typeString, "EOP ERROR RESET"); break;
19042 +       default:                sprintf (typeString, "EOP - bad status"); break;
19043 +       }
19044 +       sprintf (buffer, "%15s Cntx=%08x", typeString, Context);
19045 +    }
19046 +    else
19047 +    {
19048 +       if (Type & TR_WRITEBLOCK_BIT)
19049 +       {
19050 +           switch ((Type >> TR_TYPE_SHIFT) & TR_TYPE_MASK)
19051 +           {
19052 +           case TR_TYPE_BYTE:  ptr = "Byte";    break;
19053 +           case TR_TYPE_SHORT: ptr = "Short";   break;
19054 +           case TR_TYPE_WORD:  ptr = "Word";    break;
19055 +           case TR_TYPE_DWORD: ptr = "Double";  break;
19056 +           default:            ptr = "Unknown"; break;
19057 +           }
19058 +           
19059 +           sprintf (typeString, "WriteBlock Type=%s Size=%2d", ptr, Type & TR_PARTSIZE_MASK);
19060 +       }
19061 +       else
19062 +       {
19063 +           switch (Type & TR_OPCODE_TYPE_MASK)
19064 +           {
19065 +           case TR_SETEVENT & TR_OPCODE_TYPE_MASK:             sprintf (typeString, "Setevent"); break;
19066 +           case TR_REMOTEDMA & TR_OPCODE_TYPE_MASK:            sprintf (typeString, "Remote DMA"); break;
19067 +           case TR_LOCKQUEUE & TR_OPCODE_TYPE_MASK:            sprintf (typeString, "Lock Queue"); break;
19068 +           case TR_UNLOCKQUEUE & TR_OPCODE_TYPE_MASK:          sprintf (typeString, "Unlock Queue"); break;
19069 +           case TR_SENDDISCARD & TR_OPCODE_TYPE_MASK:          sprintf (typeString, "Send Discard"); break;
19070 +           case TR_DMAIDENTIFY & TR_OPCODE_TYPE_MASK:          sprintf (typeString, "DMA Identify"); break;
19071 +           case TR_THREADIDENTIFY & TR_OPCODE_TYPE_MASK:       sprintf (typeString, "Thread Identify"); break;
19072 +           case TR_GTE & TR_OPCODE_TYPE_MASK:                  sprintf (typeString, "GTE"); break;
19073 +           case TR_LT & TR_OPCODE_TYPE_MASK:                   sprintf (typeString, "LT"); break;
19074 +           case TR_EQ & TR_OPCODE_TYPE_MASK:                   sprintf (typeString, "EQ"); break;
19075 +           case TR_NEQ & TR_OPCODE_TYPE_MASK:                  sprintf (typeString, "NEQ"); break;
19076 +           case TR_WRITEWORD & TR_OPCODE_TYPE_MASK:            sprintf (typeString, "Write Word"); break;
19077 +           case TR_WRITEDOUBLEWORD & TR_OPCODE_TYPE_MASK:      sprintf (typeString, "Write Double"); break;
19078 +           case TR_ATOMICADDWORD & TR_OPCODE_TYPE_MASK:        sprintf (typeString, "Atomic Add"); break;
19079 +           case TR_TESTANDWRITE & TR_OPCODE_TYPE_MASK:         sprintf (typeString, "Test and Write"); break;
19080 +           default:                                            sprintf (typeString, "Type=%d", Type & TR_OPCODE_TYPE_MASK); break;
19081 +           }
19082 +       }
19083 +       sprintf (buffer, "%15s Addr=%08x Cntx=%08x", typeString, Addr, Context);
19084 +       /*(Type & TR_SENDACK)      ? " Sendack" : "", */
19085 +       /*(Type & TR_LAST_TRANS)   ? " LastTrans" : "", */
19086 +       /*(Type & TR_WAIT_FOR_EOP) ? " WaitForEop" : ""); */
19087 +    }
19088 +    
19089 +    if (StatusValid)
19090 +    {
19091 +       sprintf (statusString, " Type=%s %x", MiToName (hdrp->s.IProcTrapStatus.s.TrapType), hdrp->s.IProcTrapStatus.Status);
19092 +       strcat (buffer, statusString);
19093 +
19094 +       if (hdrp->s.IProcTrapStatus.s.BadLength)
19095 +           strcat (buffer, " BadLength");
19096 +       switch (hdrp->s.IProcTrapStatus.Status & CRC_MASK)
19097 +       {
19098 +       case CRC_STATUS_DISCARD:
19099 +           strcat (buffer, " CRC Discard");
19100 +           break;
19101 +       case CRC_STATUS_ERROR:
19102 +           strcat (buffer, " CRC Error");
19103 +           break;
19104 +
19105 +       case CRC_STATUS_BAD:
19106 +           strcat (buffer, " CRC Bad");
19107 +           break;
19108 +       }
19109 +    }
19110 +
19111 +    return (buffer);
19112 +}
19113 +
19114 +
19115 +/*
19116 + * Local variables:
19117 + * c-file-style: "stroustrup"
19118 + * End:
19119 + */
19120 Index: linux-2.4.21/drivers/net/qsnet/elan3/Makefile
19121 ===================================================================
19122 --- linux-2.4.21.orig/drivers/net/qsnet/elan3/Makefile  2004-02-23 16:02:56.000000000 -0500
19123 +++ linux-2.4.21/drivers/net/qsnet/elan3/Makefile       2005-06-01 23:12:54.587440928 -0400
19124 @@ -0,0 +1,31 @@
19125 +#
19126 +# Makefile for Quadrics QsNet
19127 +#
19128 +# Copyright (c) 2002-2004 Quadrics Ltd
19129 +#
19130 +# File: drivers/net/qsnet/elan3/Makefile
19131 +#
19132 +
19133 +
19134 +#
19135 +
19136 +#
19137 +# Makefile for Quadrics QsNet
19138 +#
19139 +# Copyright (c) 2004 Quadrics Ltd.
19140 +#
19141 +# File: driver/net/qsnet/elan3/Makefile
19142 +#
19143 +
19144 +list-multi             := elan3.o
19145 +elan3-objs     := context.o cproc.o dproc.o elandebug.o elandev_generic.o elansyscall.o eventcookie.o iproc.o sdram.o minames.o network_error.o route_table.o tproc.o tprocinsts.o routecheck.o virtual_process.o elan3ops.o context_linux.o elandev_linux.o procfs_linux.o tproc_linux.o elan3mmu_generic.o elan3mmu_linux.o
19146 +export-objs            := elandev_linux.o procfs_linux.o
19147 +obj-$(CONFIG_ELAN3)    := elan3.o
19148 +
19149 +elan3.o : $(elan3-objs)
19150 +       $(LD) -r -o $@ $(elan3-objs)
19151 +
19152 +EXTRA_CFLAGS           +=  -DDEBUG -DDEBUG_PRINTF -DDEBUG_ASSERT
19153 +
19154 +include $(TOPDIR)/Rules.make
19155 +
19156 Index: linux-2.4.21/drivers/net/qsnet/elan3/Makefile.conf
19157 ===================================================================
19158 --- linux-2.4.21.orig/drivers/net/qsnet/elan3/Makefile.conf     2004-02-23 16:02:56.000000000 -0500
19159 +++ linux-2.4.21/drivers/net/qsnet/elan3/Makefile.conf  2005-06-01 23:12:54.587440928 -0400
19160 @@ -0,0 +1,10 @@
19161 +# Flags for generating QsNet Linux Kernel Makefiles
19162 +MODNAME                =       elan3.o
19163 +MODULENAME     =       elan3
19164 +KOBJFILES      =       context.o cproc.o dproc.o elandebug.o elandev_generic.o elansyscall.o eventcookie.o iproc.o sdram.o minames.o network_error.o route_table.o tproc.o tprocinsts.o routecheck.o virtual_process.o elan3ops.o context_linux.o elandev_linux.o procfs_linux.o tproc_linux.o elan3mmu_generic.o elan3mmu_linux.o
19165 +EXPORT_KOBJS   =       elandev_linux.o procfs_linux.o
19166 +CONFIG_NAME    =       CONFIG_ELAN3
19167 +SGALFC         =       
19168 +# EXTRALINES START
19169 +
19170 +# EXTRALINES END
19171 Index: linux-2.4.21/drivers/net/qsnet/elan3/minames.c
19172 ===================================================================
19173 --- linux-2.4.21.orig/drivers/net/qsnet/elan3/minames.c 2004-02-23 16:02:56.000000000 -0500
19174 +++ linux-2.4.21/drivers/net/qsnet/elan3/minames.c      2005-06-01 23:12:54.587440928 -0400
19175 @@ -0,0 +1,38 @@
19176 +/*
19177 + *    Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd.
19178 + *
19179 + *    For licensing information please see the supplied COPYING file
19180 + *
19181 + */
19182 +
19183 +#ident "@(#)$Id: minames.c,v 1.12 2003/06/07 15:57:49 david Exp $"
19184 +/*      $Source: /cvs/master/quadrics/elan3mod/elan3/os/minames.c,v $*/
19185 +
19186 +#include <qsnet/kernel.h>
19187 +#include <elan3/urom_addrs.h>
19188 +
19189 +caddr_t
19190 +MiToName (int mi)
19191 +{
19192 +    static char space[32];
19193 +    static struct {
19194 +       int   mi;
19195 +       char *name;
19196 +    } info[] = {
19197 +#include <elan3/minames.h>
19198 +    };
19199 +    register int i;
19200 +
19201 +
19202 +    for (i = 0; i < sizeof(info)/sizeof(info[0]); i++)
19203 +       if (info[i].mi == mi)
19204 +           return (info[i].name);
19205 +    sprintf (space, "MI %x", mi);
19206 +    return (space);
19207 +}
19208 +
19209 +/*
19210 + * Local variables:
19211 + * c-file-style: "stroustrup"
19212 + * End:
19213 + */
19214 Index: linux-2.4.21/drivers/net/qsnet/elan3/network_error.c
19215 ===================================================================
19216 --- linux-2.4.21.orig/drivers/net/qsnet/elan3/network_error.c   2004-02-23 16:02:56.000000000 -0500
19217 +++ linux-2.4.21/drivers/net/qsnet/elan3/network_error.c        2005-06-01 23:12:54.589440624 -0400
19218 @@ -0,0 +1,777 @@
19219 +/*
19220 + *    Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd.
19221 + *
19222 + *    For licensing information please see the supplied COPYING file
19223 + *
19224 + */
19225 +
19226 +#ident "@(#)$Id: network_error.c,v 1.32.2.1 2004/10/28 11:54:57 david Exp $"
19227 +/*      $Source: /cvs/master/quadrics/elan3mod/elan3/os/network_error.c,v $*/
19228 +
19229 +#include <qsnet/kernel.h>
19230 +#include <qsnet/kthread.h>
19231 +
19232 +#include <elan3/elanregs.h>
19233 +#include <elan3/elandev.h>
19234 +#include <elan3/elanvp.h>
19235 +#include <elan3/elan3mmu.h>
19236 +#include <elan3/elanctxt.h>
19237 +#include <elan3/elan3mmu.h>
19238 +#include <elan3/elandebug.h>
19239 +
19240 +#ifdef DIGITAL_UNIX
19241 +#include <sys/cred.h>
19242 +#include <sys/mbuf.h>
19243 +#include <sys/utsname.h>
19244 +#include <net/if.h>
19245 +#include <netinet/in.h>
19246 +#include <netinet/in_var.h>
19247 +
19248 +#include <rpc/types.h>
19249 +#include <rpc/auth.h>
19250 +#include <rpc/xdr.h>
19251 +#include <rpc/clnt.h>
19252 +
19253 +typedef xdrproc_t kxdrproc_t;
19254 +#endif
19255 +
19256 +#ifdef LINUX
19257 +#include <linux/sunrpc/types.h>
19258 +#include <linux/sunrpc/auth.h>
19259 +#include <linux/sunrpc/xdr.h>
19260 +#include <linux/sunrpc/clnt.h>
19261 +
19262 +#include <linux/utsname.h>
19263 +#define SYS_NMLN       __NEW_UTS_LEN
19264 +#endif
19265 +
19266 +#include <elan3/neterr_rpc.h>
19267 +
19268 +spinlock_t       ResolveRequestLock;
19269 +kcondvar_t       ResolveRequestWait;
19270 +
19271 +NETERR_RESOLVER  *ResolveRequestHead;
19272 +NETERR_RESOLVER **ResolveRequestTailp = &ResolveRequestHead;
19273 +int              ResolveRequestCount;
19274 +int              ResolveRequestThreads;
19275 +int              ResolveRequestMaxThreads = 4;
19276 +int              ResolveRequestTimeout = 60;
19277 +
19278 +typedef struct neterr_server
19279 +{
19280 +    struct neterr_server *Next;
19281 +    struct neterr_server *Prev;
19282 +    unsigned             ElanId;
19283 +
19284 +    char                *Name;
19285 +    int                          RefCount;
19286 +    struct sockaddr_in    Addr;
19287 +} NETERR_SERVER;
19288 +
19289 +#define NETERR_HASH_ENTRIES    64
19290 +#define NETERR_HASH(elanid)    (((unsigned) elanid) % NETERR_HASH_ENTRIES)
19291 +NETERR_SERVER *NeterrServerHash[NETERR_HASH_ENTRIES];
19292 +kmutex_t       NeterrServerLock;
19293 +
19294 +static NETERR_SERVER *FindNeterrServer (int elanId);
19295 +static void           DereferenceNeterrServer (NETERR_SERVER *server);
19296 +static int            CallNeterrServer (NETERR_SERVER *server, NETERR_MSG *msg);
19297 +
19298 +void
19299 +InitialiseNetworkErrorResolver ()
19300 +{
19301 +    spin_lock_init (&ResolveRequestLock);
19302 +    kcondvar_init (&ResolveRequestWait);
19303 +    
19304 +    ResolveRequestHead  = NULL;
19305 +    ResolveRequestTailp = &ResolveRequestHead;
19306 +
19307 +    kmutex_init (&NeterrServerLock);
19308 +}
19309 +
19310 +void
19311 +FinaliseNetworkErrorResolver ()
19312 +{
19313 +    spin_lock_destroy (&ResolveRequestLock);
19314 +    kcondvar_destroy (&ResolveRequestWait);
19315 +    
19316 +    kmutex_destroy (&NeterrServerLock);
19317 +}
19318 +
19319 +static NETERR_RESOLVER *
19320 +AllocateNetworkErrorResolver (void)
19321 +{
19322 +    NETERR_RESOLVER *rvp;
19323 +
19324 +    KMEM_ZALLOC (rvp, NETERR_RESOLVER *, sizeof (NETERR_RESOLVER), TRUE);
19325 +    spin_lock_init (&rvp->Lock);
19326 +
19327 +    return (rvp);
19328 +}
19329 +
19330 +void
19331 +FreeNetworkErrorResolver (NETERR_RESOLVER *rvp)
19332 +{
19333 +    spin_lock_destroy (&rvp->Lock);
19334 +    KMEM_FREE (rvp, sizeof (NETERR_RESOLVER));
19335 +}
19336 +
19337 +static void
19338 +elan3_neterr_resolver (void)
19339 +{
19340 +    NETERR_RESOLVER *rvp;
19341 +    NETERR_SERVER   *server;
19342 +    int                     status;
19343 +    unsigned long    flags;
19344 +
19345 +    kernel_thread_init("elan3_neterr_resolver");
19346 +    spin_lock (&ResolveRequestLock);
19347 +
19348 +    while ((rvp = ResolveRequestHead) != NULL)
19349 +    {
19350 +       if ((ResolveRequestHead = rvp->Next) == NULL)
19351 +           ResolveRequestTailp = &ResolveRequestHead;
19352 +       
19353 +       spin_unlock (&ResolveRequestLock);
19354 +
19355 +       PRINTF1 (DBG_DEVICE, DBG_NETERR, "elan3_neterr_resolver: rvp = %p\n", rvp);
19356 +       PRINTF1 (DBG_DEVICE, DBG_NETERR, "                      Rail          %d\n", rvp->Message.Rail);
19357 +       PRINTF1 (DBG_DEVICE, DBG_NETERR, "                      SrcCapability %s\n", CapabilityString (&rvp->Message.SrcCapability));
19358 +       PRINTF1 (DBG_DEVICE, DBG_NETERR, "                      DstCapability %s\n", CapabilityString (&rvp->Message.DstCapability));
19359 +       PRINTF1 (DBG_DEVICE, DBG_NETERR, "                      CookieAddr    %08x\n", rvp->Message.CookieAddr);
19360 +       PRINTF1 (DBG_DEVICE, DBG_NETERR, "                      CookieVProc   %08x\n", rvp->Message.CookieVProc);
19361 +       PRINTF1 (DBG_DEVICE, DBG_NETERR, "                      NextCookie    %08x\n", rvp->Message.NextCookie);
19362 +       PRINTF1 (DBG_DEVICE, DBG_NETERR, "                      WaitForEop    %08x\n", rvp->Message.WaitForEop);
19363 +       
19364 +       if ((server = FindNeterrServer (rvp->Location.loc_node)) == NULL)
19365 +           status = ECONNREFUSED;
19366 +       else if (ResolveRequestTimeout && ((int)(lbolt - rvp->Timestamp)) > (ResolveRequestTimeout*HZ))
19367 +       {
19368 +           printk ("elan_neterr: rpc to '%s' timedout - context %d killed\n", server->Name, rvp->Message.SrcCapability.cap_mycontext);
19369 +           status = ECONNABORTED;
19370 +       }
19371 +       else
19372 +       {
19373 +           status = CallNeterrServer (server, &rvp->Message);
19374 +
19375 +           DereferenceNeterrServer (server);
19376 +       }
19377 +       
19378 +       if ((status == EINTR || status == ETIMEDOUT) && rvp->Ctxt != NULL)
19379 +       {
19380 +           PRINTF1 (DBG_DEVICE, DBG_NETERR, "elan3_neterr_resolver: retry rvp=%p\n", rvp);
19381 +           spin_lock (&ResolveRequestLock);
19382 +           rvp->Next = NULL;
19383 +           *ResolveRequestTailp = rvp;
19384 +           ResolveRequestTailp = &rvp->Next;
19385 +       }
19386 +       else
19387 +       {
19388 +           rvp->Status = status;
19389 +           
19390 +           spin_lock (&rvp->Lock);
19391 +           
19392 +           if (rvp->Ctxt != NULL)
19393 +           {
19394 +               PRINTF2 (rvp->Ctxt, DBG_NETERR, "elan3_neterr_resolver: completing rvp %p for ctxt %p\n", rvp, rvp->Ctxt);
19395 +               spin_lock_irqsave (&rvp->Ctxt->Device->IntrLock, flags);
19396 +               
19397 +               rvp->Completed = TRUE;
19398 +               
19399 +               kcondvar_wakeupall (&rvp->Ctxt->Wait, &rvp->Ctxt->Device->IntrLock);
19400 +               
19401 +               /*
19402 +                * drop the locks out of order since the rvp can get freeed
19403 +                * as soon as we drop the IntrLock - so cannot reference the
19404 +                * rvp after this.
19405 +                */
19406 +               
19407 +               spin_unlock (&rvp->Lock);
19408 +               spin_unlock_irqrestore (&rvp->Ctxt->Device->IntrLock, flags);
19409 +           }
19410 +           else
19411 +           {
19412 +               PRINTF2 (DBG_DEVICE, DBG_NETERR, "elan3_neterr_resolver: completing rvp %p for deceased ctxt %p\n", rvp, rvp->Ctxt);
19413 +               spin_unlock (&rvp->Lock);
19414 +               FreeNetworkErrorResolver (rvp);
19415 +           }
19416 +           
19417 +           spin_lock (&ResolveRequestLock);
19418 +           ResolveRequestCount--;
19419 +       }
19420 +    }
19421 +
19422 +    ResolveRequestThreads--;
19423 +
19424 +    spin_unlock (&ResolveRequestLock);
19425 +    kernel_thread_exit();
19426 +}
19427 +
19428 +int
19429 +QueueNetworkErrorResolver (ELAN3_CTXT *ctxt, INPUT_TRAP *trap, NETERR_RESOLVER **rvpp)
19430 +{
19431 +    int                           isdma   = trap->DmaIdentifyTransaction != NULL;
19432 +    E3_IprocTrapHeader_BE *hdrp    = isdma ? trap->DmaIdentifyTransaction : trap->ThreadIdentifyTransaction;
19433 +    E3_uint32              process = isdma ? (hdrp->s.TrAddr & 0xFFFF) : (hdrp->s.TrData0 & 0xFFFF);
19434 +    NETERR_RESOLVER       *rvp;
19435 +
19436 +    PRINTF2 (ctxt, DBG_NETERR, "QueueNetworkErrorResolver: process = %d %s\n", process, isdma ? "(dma)" : "(thread)");
19437 +
19438 +    if ((rvp = AllocateNetworkErrorResolver()) == NULL)
19439 +    {
19440 +       PRINTF0 (ctxt, DBG_NETERR, "QueueNetworkErrorResolver: cannot allocate resolver\n");
19441 +       return (ENOMEM);
19442 +    }
19443 +
19444 +    rvp->Message.Rail = ctxt->Device->Devinfo.dev_rail;
19445 +
19446 +    krwlock_read (&ctxt->VpLock);
19447 +    rvp->Location = ProcessToLocation (ctxt, NULL, process, &rvp->Message.SrcCapability);
19448 +    krwlock_done (&ctxt->VpLock);
19449 +
19450 +    if (rvp->Location.loc_node == ELAN3_INVALID_NODE)
19451 +    {
19452 +       PRINTF0 (ctxt, DBG_NETERR, "QueueNetworkErrorResolver: invalid elan id\n");
19453 +
19454 +       FreeNetworkErrorResolver (rvp);
19455 +       return (EINVAL);
19456 +    }
19457 +
19458 +    rvp->Message.DstCapability = ctxt->Capability;
19459 +    rvp->Message.DstProcess    = elan3_process (ctxt);
19460 +    rvp->Message.WaitForEop    = (trap->WaitForEopTransaction != NULL);
19461 +
19462 +    if (isdma)
19463 +    {
19464 +       rvp->Message.CookieAddr  = 0;
19465 +       rvp->Message.CookieVProc = hdrp->s.TrAddr;
19466 +       rvp->Message.NextCookie  = 0;
19467 +    }
19468 +    else
19469 +    {
19470 +       rvp->Message.CookieAddr  = hdrp->s.TrAddr;
19471 +       rvp->Message.CookieVProc = hdrp->s.TrData0;
19472 +       rvp->Message.NextCookie  = hdrp->s.TrData1;
19473 +    }
19474 +
19475 +    rvp->Completed = FALSE;
19476 +    rvp->Ctxt      = ctxt;
19477 +    rvp->Timestamp = lbolt;
19478 +
19479 +    spin_lock (&ResolveRequestLock);
19480 +
19481 +    rvp->Next = NULL;
19482 +    *ResolveRequestTailp = rvp;
19483 +    ResolveRequestTailp = &rvp->Next;
19484 +    ResolveRequestCount++;
19485 +
19486 +    kcondvar_wakeupone (&ResolveRequestWait, &ResolveRequestLock);
19487 +
19488 +    if (ResolveRequestCount < ResolveRequestThreads || ResolveRequestThreads >= ResolveRequestMaxThreads)
19489 +       spin_unlock (&ResolveRequestLock);
19490 +    else
19491 +    {
19492 +       ResolveRequestThreads++;
19493 +
19494 +       spin_unlock (&ResolveRequestLock);
19495 +       if (kernel_thread_create (elan3_neterr_resolver, NULL) == NULL)
19496 +       {
19497 +           spin_lock (&ResolveRequestLock);
19498 +           ResolveRequestThreads--;
19499 +           spin_unlock (&ResolveRequestLock);
19500 +           
19501 +           if (ResolveRequestThreads == 0)
19502 +           {
19503 +               PRINTF0 (ctxt, DBG_NETERR, "QueueNetworkErrorResolver: cannot thread pool\n");
19504 +
19505 +               FreeNetworkErrorResolver (rvp);
19506 +               return (ENOMEM);
19507 +           }
19508 +       }
19509 +    }
19510 +
19511 +    *rvpp = rvp;
19512 +    return (ESUCCESS);
19513 +}
19514 +
19515 +void
19516 +CancelNetworkErrorResolver (NETERR_RESOLVER *rvp)
19517 +{
19518 +    spin_lock (&rvp->Lock);
19519 +
19520 +    PRINTF2 (rvp->Ctxt, DBG_NETERR, "CancelNetworkErrorResolver: rvp=%p %s\n", rvp, rvp->Completed ? "Completed" : "Pending");
19521 +
19522 +    if (rvp->Completed)
19523 +    {
19524 +       spin_unlock (&rvp->Lock);
19525 +       FreeNetworkErrorResolver (rvp);
19526 +    }
19527 +    else
19528 +    {
19529 +       rvp->Ctxt = NULL;
19530 +       spin_unlock (&rvp->Lock);
19531 +    }
19532 +}
19533 +
19534 +static NETERR_FIXUP *
19535 +AllocateNetworkErrorFixup (void)
19536 +{
19537 +    NETERR_FIXUP *nef;
19538 +
19539 +    KMEM_ZALLOC (nef, NETERR_FIXUP *, sizeof (NETERR_FIXUP), TRUE);
19540 +
19541 +    if (nef == (NETERR_FIXUP *) NULL)
19542 +       return (NULL);
19543 +
19544 +    kcondvar_init (&nef->Wait);
19545 +
19546 +    return (nef);
19547 +}
19548 +
19549 +static void
19550 +FreeNetworkErrorFixup (NETERR_FIXUP *nef)
19551 +{
19552 +    kcondvar_destroy (&nef->Wait);
19553 +    KMEM_FREE (nef, sizeof (NETERR_FIXUP));
19554 +}
19555 +
19556 +int
19557 +ExecuteNetworkErrorFixup (NETERR_MSG *msg)
19558 +{
19559 +    ELAN3_DEV      *dev;
19560 +    ELAN3_CTXT   *ctxt;
19561 +    NETERR_FIXUP  *nef;
19562 +    NETERR_FIXUP **predp;
19563 +    int                   rc;
19564 +    unsigned long  flags;
19565 +
19566 +    PRINTF1 (DBG_DEVICE, DBG_NETERR, "ExecuteNetworkErrorFixup: msg = %p\n", msg);
19567 +    PRINTF1 (DBG_DEVICE, DBG_NETERR, "                      Rail          %d\n", msg->Rail);
19568 +    PRINTF1 (DBG_DEVICE, DBG_NETERR, "                      SrcCapability %s\n", CapabilityString (&msg->SrcCapability));
19569 +    PRINTF1 (DBG_DEVICE, DBG_NETERR, "                      DstCapability %s\n", CapabilityString (&msg->DstCapability));
19570 +    PRINTF1 (DBG_DEVICE, DBG_NETERR, "                      CookieAddr    %08x\n", msg->CookieAddr);
19571 +    PRINTF1 (DBG_DEVICE, DBG_NETERR, "                      CookieVProc   %08x\n", msg->CookieVProc);
19572 +    PRINTF1 (DBG_DEVICE, DBG_NETERR, "                      NextCookie    %08x\n", msg->NextCookie);
19573 +    PRINTF1 (DBG_DEVICE, DBG_NETERR, "                      WaitForEop    %08x\n", msg->WaitForEop);
19574 +       
19575 +    if ((dev = elan3_device (msg->Rail)) == NULL)
19576 +       return (ESRCH);
19577 +
19578 +    if ((nef = AllocateNetworkErrorFixup()) == NULL)
19579 +       return (ENOMEM);
19580 +
19581 +    if (nef == (NETERR_FIXUP *) NULL)
19582 +       return (ENOMEM);
19583 +    
19584 +    bcopy (msg, &nef->Message, sizeof (NETERR_MSG));
19585 +
19586 +    spin_lock_irqsave (&dev->IntrLock, flags);
19587 +    
19588 +    ctxt = ELAN3_DEV_CTX_TABLE(dev, msg->SrcCapability.cap_mycontext);
19589 +
19590 +    if (ctxt == NULL)
19591 +       rc = ESRCH;
19592 +    else if (!ELAN_CAP_MATCH (&msg->SrcCapability, &ctxt->Capability))
19593 +       rc = EPERM;
19594 +    else
19595 +    {  
19596 +       if (ctxt->Status & CTXT_NO_LWPS)
19597 +           rc = EAGAIN;
19598 +       else
19599 +       {
19600 +           for (predp = &ctxt->NetworkErrorFixups; *predp != NULL; predp = &(*predp)->Next)
19601 +               ;
19602 +           nef->Next = NULL;
19603 +           *predp = nef;
19604 +           
19605 +           kcondvar_wakeupone (&ctxt->Wait, &dev->IntrLock);
19606 +
19607 +           while (! nef->Completed)
19608 +               kcondvar_wait (&nef->Wait, &dev->IntrLock, &flags);
19609 +
19610 +           rc = nef->Status;
19611 +       }
19612 +    }
19613 +    
19614 +    spin_unlock_irqrestore (&dev->IntrLock, flags);
19615 +
19616 +    FreeNetworkErrorFixup (nef);
19617 +
19618 +    return (rc);
19619 +}
19620 +
19621 +void
19622 +CompleteNetworkErrorFixup (ELAN3_CTXT *ctxt, NETERR_FIXUP *nef, int status)
19623 +{
19624 +    ELAN3_DEV *dev = ctxt->Device;
19625 +    unsigned long flags;
19626 +
19627 +    PRINTF2 (ctxt, DBG_NETERR, "CompleteNetworkErrorFixup: %p %d\n", nef, status);
19628 +
19629 +    spin_lock_irqsave (&dev->IntrLock, flags);
19630 +
19631 +    nef->Status = status;
19632 +    nef->Completed = TRUE;
19633 +    kcondvar_wakeupone (&nef->Wait, &dev->IntrLock);
19634 +
19635 +    spin_unlock_irqrestore (&dev->IntrLock, flags);
19636 +}
19637 +
19638 +
19639 +static NETERR_SERVER *
19640 +NewNeterrServer (int elanId, struct sockaddr_in *addr, char *name)
19641 +{
19642 +    NETERR_SERVER *server;
19643 +
19644 +    KMEM_ZALLOC (server, NETERR_SERVER *, sizeof (NETERR_SERVER), TRUE);
19645 +    KMEM_ALLOC  (server->Name, char *, strlen (name)+1, TRUE);
19646 +
19647 +    bcopy (addr, &server->Addr, sizeof (struct sockaddr_in));
19648 +    bcopy (name, server->Name, strlen (name)+1);
19649 +
19650 +    server->ElanId   = elanId;
19651 +    server->RefCount = 1;
19652 +    
19653 +    return (server);
19654 +}
19655 +
19656 +static void
19657 +DeleteNeterrServer (NETERR_SERVER *server)
19658 +{
19659 +    KMEM_FREE (server->Name, strlen(server->Name)+1);
19660 +    KMEM_FREE (server, sizeof (NETERR_SERVER));
19661 +}
19662 +
19663 +static NETERR_SERVER *
19664 +FindNeterrServer (int elanId)
19665 +{
19666 +    NETERR_SERVER *server;
19667 +    
19668 +    kmutex_lock (&NeterrServerLock);
19669 +    
19670 +    for (server = NeterrServerHash[NETERR_HASH(elanId)]; server != NULL; server = server->Next)
19671 +       if (server->ElanId == elanId)
19672 +           break;
19673 +
19674 +    if (server != NULL)
19675 +       server->RefCount++;
19676 +    kmutex_unlock (&NeterrServerLock);
19677 +
19678 +    return (server);
19679 +}
19680 +
19681 +static void
19682 +DereferenceNeterrServer (NETERR_SERVER *server)
19683 +{
19684 +    kmutex_lock (&NeterrServerLock);
19685 +    if ((--server->RefCount) == 0)
19686 +       DeleteNeterrServer (server);
19687 +    kmutex_unlock  (&NeterrServerLock);
19688 +}
19689 +
19690 +int
19691 +AddNeterrServer (int elanId, struct sockaddr_in *addr, char *name)
19692 +{
19693 +    NETERR_SERVER *server;
19694 +    NETERR_SERVER *old;
19695 +    int            hashval = NETERR_HASH(elanId);
19696 +
19697 +    server = NewNeterrServer (elanId, addr, name);
19698 +    
19699 +    if (server == NULL)
19700 +       return (ENOMEM);
19701 +    
19702 +    kmutex_lock (&NeterrServerLock);
19703 +    for (old = NeterrServerHash[hashval]; old != NULL; old = old->Next)
19704 +       if (old->ElanId == elanId)
19705 +           break;
19706 +    
19707 +    /* remove "old" server from hash table */
19708 +    if (old != NULL)
19709 +    {
19710 +       if (old->Prev)
19711 +           old->Prev->Next = old->Next;
19712 +       else
19713 +           NeterrServerHash[hashval] = old->Next;
19714 +       if (old->Next)
19715 +           old->Next->Prev = old->Prev;
19716 +    }
19717 +
19718 +    /* insert "new" server into hash table */
19719 +    if ((server->Next = NeterrServerHash[hashval]) != NULL)
19720 +       server->Next->Prev = server;
19721 +    server->Prev = NULL;
19722 +    NeterrServerHash[hashval] = server;
19723 +
19724 +    kmutex_unlock (&NeterrServerLock);
19725 +
19726 +    if (old != NULL)
19727 +       DereferenceNeterrServer (old);
19728 +    
19729 +    return (ESUCCESS);
19730 +}
19731 +
19732 +int
19733 +AddNeterrServerSyscall (int elanId, void *addrp, void *namep, char *unused)
19734 +{
19735 +    struct sockaddr_in addr;
19736 +    char              *name;
19737 +    int                error;
19738 +    int                nob;
19739 +
19740 +    /* Sanity check the supplied elanId argument */
19741 +    if (elanId < 0)
19742 +       return ( set_errno(EINVAL) );
19743 +
19744 +    KMEM_ALLOC (name, caddr_t, SYS_NMLN, TRUE);
19745 +    
19746 +    if (copyin ((caddr_t) addrp, (caddr_t) &addr, sizeof (addr)) ||
19747 +       copyinstr ((caddr_t) namep, name, SYS_NMLN, &nob))
19748 +    {
19749 +       error = EFAULT;
19750 +    }
19751 +    else
19752 +    {
19753 +       PRINTF2 (DBG_DEVICE, DBG_NETERR, "AddNeterrServer: '%s' at elanid %d\n", name, elanId);
19754 +
19755 +       error = AddNeterrServer (elanId, &addr, name);
19756 +    }
19757 +    KMEM_FREE (name, SYS_NMLN);
19758 +
19759 +    return (error ? set_errno(error) : ESUCCESS);
19760 +}
19761 +
19762 +
19763 +#if defined(DIGITAL_UNIX)
19764 +static int
19765 +CallNeterrServer (NETERR_SERVER *server, NETERR_MSG *msg)
19766 +{
19767 +    cred_t        *cr = crget();
19768 +    struct rpc_err  rpcerr;
19769 +    extern cred_t  *kcred;
19770 +    struct timeval  wait;
19771 +    enum clnt_stat  rc;
19772 +    int                    status;
19773 +    CLIENT         *clnt;
19774 +    int             error;
19775 +
19776 +    PRINTF4 (DBG_DEVICE, DBG_NETRPC, "CallNeterrServer(%s) - family=%d port=%d addr=%08x\n", server->Name,
19777 +            server->Addr.sin_family, server->Addr.sin_port, server->Addr.sin_addr.s_addr);
19778 +
19779 +    if ((clnt = clntkudp_create (&server->Addr, (struct sockaddr_in *)0, NETERR_PROGRAM, NETERR_VERSION, 1, cr)) == NULL)
19780 +    {
19781 +       PRINTF1 (DBG_DEVICE, DBG_NETRPC, "CallNeterrServer(%s): clntkudp_create error\n", server->Name);
19782 +
19783 +       return (ENOMEM);
19784 +    }
19785 +    
19786 +    wait.tv_sec  = NETERR_RPC_TIMEOUT;
19787 +    wait.tv_usec = 0;
19788 +    
19789 +    PRINTF2 (DBG_DEVICE, DBG_NETRPC, "CallNeterrServer(%s): CLNT_CALL timeout = %d\n", server->Name, NETERR_RPC_TIMEOUT);
19790 +   
19791 +    rc = CLNT_CALL(clnt, NETERR_FIXUP_RPC, xdr_neterr_msg, (void *)msg, xdr_int, (void *) &status, wait);
19792 +
19793 +    PRINTF3 (DBG_DEVICE, DBG_NETRPC, "CallNeterrServer(%s): CLNT_CALL -> %d (%s)\n", server->Name, rc, clnt_sperrno(rc));;
19794 +
19795 +    switch (rc)
19796 +    {
19797 +    case RPC_SUCCESS:
19798 +       break;
19799 +
19800 +    case RPC_INTR:
19801 +       status = EINTR;
19802 +       break;
19803 +
19804 +    case RPC_TIMEDOUT:
19805 +       status = ETIMEDOUT;
19806 +       break;
19807 +
19808 +    default:
19809 +       printf ("CallNeterrServer(%s): %s\n", server->Name, clnt_sperrno(status));
19810 +       status = ENOENT;
19811 +       break;
19812 +    }
19813 +
19814 +    CLNT_DESTROY(clnt);
19815 +
19816 +    crfree(cr);
19817 +    
19818 +    ASSERT(rc == RPC_SUCCESS || status != 0);
19819 +
19820 +    PRINTF2 (DBG_DEVICE, DBG_NETRPC, "CallNeterrServer(%s): status=%d\n", server->Name, status);
19821 +
19822 +    return (status);
19823 +}
19824 +#endif
19825 +
19826 +#if defined(LINUX)
19827 +
19828 +#define xdrsize(type) ((sizeof(type) + 3) >> 2)
19829 +
19830 +static int
19831 +xdr_error(struct rpc_rqst *req, u32 *p, void *dummy)
19832 +{
19833 +    return -EIO;
19834 +}
19835 +
19836 +static int
19837 +xdr_decode_int(struct rpc_rqst *req, u32 *p, int *res)
19838 +{ 
19839 +    *res = ntohl(*p++);
19840 +    return 0;
19841 +}
19842 +
19843 +#define XDR_capability_sz ((12 + BT_BITOUL(ELAN3_MAX_VPS)) * sizeof (u32))
19844 +
19845 +static int
19846 +xdr_encode_capability(u32 *p, ELAN_CAPABILITY *cap)
19847 +{
19848 +    u32 *pp = p;
19849 +
19850 +    /* basic xdr unit is u32 - for opaque types we must round up to that */
19851 +    memcpy(p, &cap->cap_userkey, sizeof(cap->cap_userkey));
19852 +    p += xdrsize(cap->cap_userkey);
19853 +
19854 +    *p++ = htonl(cap->cap_version);
19855 +    ((u16 *) (p++))[1] = htons(cap->cap_type);
19856 +    *p++ = htonl(cap->cap_lowcontext);
19857 +    *p++ = htonl(cap->cap_highcontext);
19858 +    *p++ = htonl(cap->cap_mycontext);
19859 +    *p++ = htonl(cap->cap_lownode);
19860 +    *p++ = htonl(cap->cap_highnode);
19861 +    *p++ = htonl(cap->cap_railmask);
19862 +
19863 +    memcpy(p, &cap->cap_bitmap[0], sizeof(cap->cap_bitmap));
19864 +    p += xdrsize(cap->cap_bitmap);
19865 +
19866 +    ASSERT (((unsigned long) p - (unsigned long) pp) == XDR_capability_sz);
19867 +
19868 +    return (p - pp);
19869 +}
19870 +
19871 +
19872 +#define XDR_neterr_sz  (((1 + 5) * sizeof (u32)) + (2*XDR_capability_sz))
19873 +
19874 +static int
19875 +xdr_encode_neterr_msg(struct rpc_rqst *req, u32 *p, NETERR_MSG *msg)
19876 +{
19877 +    u32 *pp = p;
19878 +
19879 +    *p++ = htonl(msg->Rail);
19880 +
19881 +    p += xdr_encode_capability(p, &msg->SrcCapability);
19882 +    p += xdr_encode_capability(p, &msg->DstCapability);
19883 +
19884 +    *p++ = htonl(msg->DstProcess);
19885 +    *p++ = htonl(msg->CookieAddr);
19886 +    *p++ = htonl(msg->CookieVProc);
19887 +    *p++ = htonl(msg->NextCookie);
19888 +    *p++ = htonl(msg->WaitForEop);
19889 +
19890 +    ASSERT (((unsigned long) p - (unsigned long) pp) == XDR_neterr_sz);
19891 +
19892 +    req->rq_slen = xdr_adjust_iovec(req->rq_svec, p);
19893 +
19894 +    return 0;
19895 +}
19896 +
19897 +static struct rpc_procinfo neterr_procedures[2] = 
19898 +{
19899 +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0)
19900 +#      define RPC_ID_NULL      "neterr_null"
19901 +#      define RPC_ID_FIXUP_RPC "neterr_fixup_rpc"
19902 +#else
19903 +#      define RPC_ID_NULL      NETERR_NULL_RPC
19904 +#      define RPC_ID_FIXUP_RPC NETERR_FIXUP_RPC
19905 +#endif
19906 +    {  
19907 +       RPC_ID_NULL,                    /* procedure name or number*/
19908 +       (kxdrproc_t) xdr_error,         /* xdr encode fun */
19909 +        (kxdrproc_t) xdr_error,        /* xdr decode fun */
19910 +       0,                              /* req buffer size */
19911 +       0,                              /* call count */
19912 +    },
19913 +    {  
19914 +       RPC_ID_FIXUP_RPC,
19915 +        (kxdrproc_t) xdr_encode_neterr_msg,
19916 +        (kxdrproc_t) xdr_decode_int,
19917 +       XDR_neterr_sz,
19918 +       0,                      
19919 +    },
19920 +};
19921 +
19922 +static struct rpc_version neterr_version1 = 
19923 +{
19924 +    1,                         /* version */
19925 +    2,                         /* number of procedures */
19926 +    neterr_procedures  /* procedures */
19927 +};
19928 +
19929 +static struct rpc_version *neterr_version[] = 
19930 +{
19931 +    NULL,
19932 +    &neterr_version1,
19933 +};
19934 +
19935 +static struct rpc_stat neterr_stats;
19936 +
19937 +static struct rpc_program neterr_program = 
19938 +{
19939 +    NETERR_SERVICE,
19940 +    NETERR_PROGRAM,
19941 +    sizeof(neterr_version)/sizeof(neterr_version[0]),
19942 +    neterr_version,
19943 +    &neterr_stats,
19944 +};
19945 +
19946 +static int
19947 +CallNeterrServer (NETERR_SERVER *server, NETERR_MSG *msg)
19948 +{
19949 +    struct rpc_xprt   *xprt;
19950 +    struct rpc_clnt   *clnt;
19951 +    struct rpc_timeout to;
19952 +    int                rc, status;
19953 +    
19954 +    PRINTF (DBG_DEVICE, DBG_NETRPC, "CallNeterrServer(%s)\n", server->Name);
19955 +
19956 +    xprt_set_timeout(&to, 1, NETERR_RPC_TIMEOUT * HZ);
19957 +
19958 +    if ((xprt = xprt_create_proto(IPPROTO_UDP, &server->Addr, &to)) == NULL)
19959 +    {
19960 +       PRINTF (DBG_DEVICE, DBG_NETRPC, "CallNeterrServer(%s) xprt_create_proto failed\n", server->Name);
19961 +       return EFAIL;
19962 +    }
19963 +
19964 +    if ((clnt = rpc_create_client(xprt, server->Name, &neterr_program, NETERR_VERSION, RPC_AUTH_NULL)) == NULL)
19965 +    {
19966 +       PRINTF (DBG_DEVICE, DBG_NETRPC, "CallNeterrServer(%s) rpc_create_client failed\n", server->Name);
19967 +       xprt_destroy (xprt);
19968 +       
19969 +       return EFAIL;
19970 +    }
19971 +
19972 +    clnt->cl_softrtry = 1;
19973 +    clnt->cl_chatty   = 0;
19974 +    clnt->cl_oneshot  = 1;
19975 +    clnt->cl_intr     = 0;
19976 +
19977 +    if ((rc = rpc_call(clnt, NETERR_FIXUP_RPC, msg, &status, 0)) < 0)
19978 +    {
19979 +       /* RPC error has occured - determine whether we should retry */
19980 +
19981 +       status = ETIMEDOUT;
19982 +    }
19983 +
19984 +    PRINTF (DBG_DEVICE, DBG_NETRPC, "CallNeterrServer(%s): -> %d\n", server->Name, status);
19985 +
19986 +    return (status);
19987 +}
19988 +
19989 +#endif /* defined(LINUX) */
19990 +
19991 +/*
19992 + * Local variables:
19993 + * c-file-style: "stroustrup"
19994 + * End:
19995 + */
19996 Index: linux-2.4.21/drivers/net/qsnet/elan3/procfs_linux.c
19997 ===================================================================
19998 --- linux-2.4.21.orig/drivers/net/qsnet/elan3/procfs_linux.c    2004-02-23 16:02:56.000000000 -0500
19999 +++ linux-2.4.21/drivers/net/qsnet/elan3/procfs_linux.c 2005-06-01 23:12:54.589440624 -0400
20000 @@ -0,0 +1,195 @@
20001 +/*
20002 + *    Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd.
20003 + *
20004 + *    For licensing information please see the supplied COPYING file
20005 + *
20006 + */
20007 +
20008 +#ident "@(#)$Id: procfs_linux.c,v 1.21 2003/09/24 13:57:25 david Exp $"
20009 +/*      $Source: /cvs/master/quadrics/elan3mod/elan3/os/procfs_linux.c,v $*/
20010 +
20011 +#include <qsnet/kernel.h>
20012 +
20013 +#include <elan3/elanregs.h>
20014 +#include <elan3/elandev.h>
20015 +#include <elan3/elandebug.h>
20016 +#include <elan3/elan3mmu.h>
20017 +#include <elan3/elanvp.h>
20018 +
20019 +#include <linux/module.h>
20020 +#include <linux/ctype.h>
20021 +
20022 +#include <qsnet/procfs_linux.h>
20023 +
20024 +struct proc_dir_entry *elan3_procfs_root;
20025 +struct proc_dir_entry *elan3_config_root;
20026 +
20027 +static int
20028 +proc_read_position (char *page, char **start, off_t off,
20029 +                   int count, int *eof, void *data)
20030 +{
20031 +    ELAN3_DEV *dev = (ELAN3_DEV *) data;
20032 +    int       len;
20033 +
20034 +    if (dev->Position.pos_mode == ELAN_POS_UNKNOWN)
20035 +       len = sprintf (page, "<unknown>\n");
20036 +    else
20037 +       len = sprintf (page, 
20038 +                      "NodeId                 %d\n"
20039 +                      "NumLevels              %d\n"
20040 +                      "NumNodes               %d\n",
20041 +                      dev->Position.pos_nodeid, dev->Position.pos_levels, dev->Position.pos_nodes);
20042 +
20043 +    return (qsnet_proc_calc_metrics (page, start, off, count, eof, len));
20044 +}
20045 +
20046 +static int
20047 +proc_write_position (struct file *file, const char *buf, unsigned long count, void *data)
20048 +{
20049 +    ELAN3_DEV *dev      = (ELAN3_DEV *) data;
20050 +    unsigned  nodeid   = ELAN3_INVALID_NODE;
20051 +    unsigned  numnodes = 0;
20052 +    char     *page, *p;
20053 +    int       res;
20054 +
20055 +    if (count == 0)
20056 +       return (0);
20057 +
20058 +    if (count >= PAGE_SIZE)
20059 +       return (-EINVAL);
20060 +
20061 +    if ((page = (char *) __get_free_page (GFP_KERNEL)) == NULL)
20062 +       return (-ENOMEM);
20063 +
20064 +    MOD_INC_USE_COUNT;
20065 +
20066 +    if (copy_from_user (page, buf, count))
20067 +       res = -EFAULT;
20068 +    else
20069 +    {
20070 +       page[count] = '\0';
20071 +       
20072 +       if (page[count-1] == '\n')
20073 +           page[count-1] = '\0';
20074 +
20075 +       if (! strcmp (page, "<unknown>"))
20076 +       {
20077 +           dev->Position.pos_mode      = ELAN_POS_UNKNOWN;
20078 +           dev->Position.pos_nodeid    = ELAN3_INVALID_NODE;
20079 +           dev->Position.pos_nodes     = 0;
20080 +           dev->Position.pos_levels    = 0;
20081 +       }
20082 +       else
20083 +       {
20084 +           for (p = page; *p; )
20085 +           {
20086 +               while (isspace (*p))
20087 +                   p++;
20088 +               
20089 +               if (! strncmp (p, "NodeId=", strlen("NodeId=")))
20090 +                   nodeid   = simple_strtoul (p + strlen ("NodeId="), NULL, 0);
20091 +               if (! strncmp (p, "NumNodes=", strlen ("NumNodes=")))
20092 +                   numnodes = simple_strtoul (p + strlen ("NumNodes="), NULL, 0);
20093 +               
20094 +               while (*p && !isspace(*p))
20095 +                   p++;
20096 +           }
20097 +
20098 +           if (ComputePosition (&dev->Position, nodeid, numnodes, dev->Devinfo.dev_num_down_links_value) != 0)
20099 +               printk ("elan%d: invalid values for NodeId=%d NumNodes=%d\n", dev->Instance, nodeid, numnodes);
20100 +           else
20101 +               printk ("elan%d: setting NodeId=%d NumNodes=%d NumLevels=%d\n", dev->Instance, dev->Position.pos_nodeid,
20102 +                       dev->Position.pos_nodes, dev->Position.pos_levels);
20103 +       }
20104 +    }
20105 +
20106 +    MOD_DEC_USE_COUNT;
20107 +    free_page ((unsigned long) page);
20108 +
20109 +    return (count);
20110 +}
20111 +
20112 +
20113 +void
20114 +elan3_procfs_device_init (ELAN3_DEV *dev)
20115 +{
20116 +    struct proc_dir_entry *dir, *p;
20117 +    char name[NAME_MAX];
20118 +
20119 +    sprintf (name, "device%d", dev->Instance);
20120 +    dir = dev->Osdep.procdir = proc_mkdir (name, elan3_procfs_root);
20121 +
20122 +    if ((p = create_proc_entry ("position", 0, dir)) != NULL)
20123 +    {
20124 +       p->read_proc  = proc_read_position;
20125 +       p->write_proc = proc_write_position;
20126 +       p->data       = dev;
20127 +       p->owner      = THIS_MODULE;
20128 +    }
20129 +
20130 +}
20131 +
20132 +void
20133 +elan3_procfs_device_fini (ELAN3_DEV *dev)
20134 +{
20135 +    struct proc_dir_entry *dir = dev->Osdep.procdir;
20136 +    char name[NAME_MAX];
20137 +
20138 +    remove_proc_entry ("position", dir);
20139 +
20140 +    sprintf (name, "device%d", dev->Instance);
20141 +    remove_proc_entry (name, elan3_procfs_root);
20142 +}
20143 +
20144 +void
20145 +elan3_procfs_init()
20146 +{
20147 +    extern int eventint_punt_loops;
20148 +    extern int ResolveRequestTimeout;
20149 +
20150 +    elan3_procfs_root = proc_mkdir("elan3",  qsnet_procfs_root);
20151 +
20152 +    elan3_config_root = proc_mkdir("config", elan3_procfs_root);
20153 +
20154 +    qsnet_proc_register_hex (elan3_config_root, "elan3_debug",           &elan3_debug,           0);
20155 +    qsnet_proc_register_hex (elan3_config_root, "elan3_debug_console",   &elan3_debug_console,   0);
20156 +    qsnet_proc_register_hex (elan3_config_root, "elan3_debug_buffer",    &elan3_debug_buffer,    0);
20157 +    qsnet_proc_register_hex (elan3_config_root, "elan3mmu_debug",      &elan3mmu_debug,      0);
20158 +    qsnet_proc_register_int (elan3_config_root, "eventint_punt_loops", &eventint_punt_loops, 0);
20159 +    qsnet_proc_register_int (elan3_config_root, "neterr_timeout",      &ResolveRequestTimeout, 0);
20160 +
20161 +#if defined(__ia64__)
20162 +    {
20163 +       extern int enable_sdram_writecombining;
20164 +       qsnet_proc_register_int (elan3_config_root, "enable_sdram_writecombining", &enable_sdram_writecombining, 0);
20165 +    }
20166 +#endif
20167 +}
20168 +
20169 +void
20170 +elan3_procfs_fini()
20171 +{
20172 +#if defined(__ia64__)
20173 +    remove_proc_entry ("enable_sdram_writecombining", elan3_config_root);
20174 +#endif
20175 +    remove_proc_entry ("neterr_timeout",      elan3_config_root);
20176 +    remove_proc_entry ("eventint_punt_loops", elan3_config_root);
20177 +    remove_proc_entry ("elan3mmu_debug",      elan3_config_root);
20178 +    remove_proc_entry ("elan3_debug_buffer",    elan3_config_root);
20179 +    remove_proc_entry ("elan3_debug_console",   elan3_config_root);
20180 +    remove_proc_entry ("elan3_debug",           elan3_config_root);
20181 +
20182 +    remove_proc_entry ("config",  elan3_procfs_root);
20183 +    remove_proc_entry ("version", elan3_procfs_root);
20184
20185 +    remove_proc_entry ("elan3",  qsnet_procfs_root);
20186 +}
20187 +
20188 +EXPORT_SYMBOL(elan3_procfs_root);
20189 +EXPORT_SYMBOL(elan3_config_root);
20190 +
20191 +/*
20192 + * Local variables:
20193 + * c-file-style: "stroustrup"
20194 + * End:
20195 + */
20196 Index: linux-2.4.21/drivers/net/qsnet/elan3/quadrics_version.h
20197 ===================================================================
20198 --- linux-2.4.21.orig/drivers/net/qsnet/elan3/quadrics_version.h        2004-02-23 16:02:56.000000000 -0500
20199 +++ linux-2.4.21/drivers/net/qsnet/elan3/quadrics_version.h     2005-06-01 23:12:54.589440624 -0400
20200 @@ -0,0 +1 @@
20201 +#define QUADRICS_VERSION "4.30qsnet"
20202 Index: linux-2.4.21/drivers/net/qsnet/elan3/routecheck.c
20203 ===================================================================
20204 --- linux-2.4.21.orig/drivers/net/qsnet/elan3/routecheck.c      2004-02-23 16:02:56.000000000 -0500
20205 +++ linux-2.4.21/drivers/net/qsnet/elan3/routecheck.c   2005-06-01 23:12:54.590440472 -0400
20206 @@ -0,0 +1,313 @@
20207 +/*
20208 + *    Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd.
20209 + *
20210 + *    For licensing information please see the supplied COPYING file
20211 + *
20212 + */
20213 +
20214 +/* ------------------------------------------------------------- */
20215 +
20216 +#include <qsnet/kernel.h>
20217 +
20218 +#include <elan3/elanregs.h>
20219 +#include <elan3/elandev.h>
20220 +#include <elan3/elanvp.h>
20221 +#include <elan3/elan3mmu.h>
20222 +#include <elan3/elanctxt.h>
20223 +#include <elan3/elandebug.h>
20224 +#include <elan3/urom_addrs.h>
20225 +#include <elan3/thread.h>
20226 +#include <elan3/vmseg.h>
20227 +
20228 +/* ---------------------------------------------------------------------- */
20229 +typedef struct elan3_net_location {
20230 +    int netid;
20231 +    int plane;
20232 +    int level;
20233 +} ELAN3_NET_LOCATION;
20234 +/* ---------------------------------------------------------------------- */
20235 +#define FLIT_LINK_ARRAY_MAX (ELAN3_MAX_LEVELS*2)
20236 +/* ---------------------------------------------------------------------- */
20237 +int 
20238 +elan3_route_follow_link( ELAN3_CTXT *ctxt, ELAN3_NET_LOCATION *loc, int link)
20239 +{
20240 +    ELAN_POSITION *pos = &ctxt->Position;
20241 +
20242 +    if ((link<0) || (link>7)) 
20243 +    {
20244 +       PRINTF1 (ctxt, DBG_VP, "elan3_route_follow_link: link (%d) out of range \n",link);
20245 +       return (ELAN3_ROUTE_INVALID);
20246 +    }   
20247 +
20248 +    /* going up or down ? */
20249 +    if ( link >= pos->pos_arity[loc->level] ) 
20250 +    {
20251 +       /* Up */
20252 +       if (loc->level >= pos->pos_levels)
20253 +           loc->plane = 0;
20254 +       else
20255 +       {
20256 +           if ((loc->level == 1) && (pos->pos_arity[0]  == 8)) /* oddness in some machines ie 512 */
20257 +               loc->plane = (16 * ( loc->plane / 8 )) + (4 * ( loc->plane % 4)) 
20258 +                   +(link - pos->pos_arity[loc->level]);
20259 +           else
20260 +               loc->plane = (loc->plane * (8 - pos->pos_arity[loc->level]))
20261 +                   +(link - pos->pos_arity[loc->level]);
20262 +       }
20263 +       loc->level--; 
20264 +       if ( loc->level < 0 )
20265 +       {
20266 +           PRINTF0 (ctxt, DBG_VP, "elan3_route_follow_link: link goes off the top\n");
20267 +           return (ELAN3_ROUTE_INVALID_LEVEL);
20268 +       }
20269 +       loc->netid = loc->netid / pos->pos_arity[loc->level];
20270 +    }
20271 +    else
20272 +    {
20273 +       /* going down */
20274 +       if ((loc->level == 0) && (pos->pos_arity[0] == 8)) /* oddness in some machines ie 512 */
20275 +           loc->netid = link % 2;
20276 +       else
20277 +           loc->netid =(loc->netid * pos->pos_arity[loc->level])+link;
20278 +
20279 +       loc->level++;
20280 +       if (loc->level > pos->pos_levels)
20281 +       {
20282 +           PRINTF0 (ctxt, DBG_VP, "elan3_route_follow_link: link goes off the bottom\n");
20283 +           return (ELAN3_ROUTE_INVALID_LEVEL); 
20284 +       }
20285 +
20286 +       if ( loc->level >= (pos->pos_levels-1))
20287 +           loc->plane = 0;
20288 +       else
20289 +           if ((loc->level == 1) && (pos->pos_arity[0] == 8)) /* oddness in some machines ie 512 */
20290 +               loc->plane = (((loc->plane)>>2)*2) - ( ((loc->plane)>>2) & 3  ) + ((link<2)?0:4); /* ((p/4) % 4) */
20291 +           else 
20292 +               loc->plane = loc->plane/(8-pos->pos_arity[loc->level]);
20293 +    }
20294 +    return (ELAN3_ROUTE_SUCCESS);
20295 +}
20296 +/* ---------------------------------------------------------------------- */
20297 +int /* assumes they are connected, really only used for finding the MyLink */
20298 +elan3_route_get_mylink (ELAN_POSITION *pos, ELAN3_NET_LOCATION *locA, ELAN3_NET_LOCATION *locB)
20299 +{
20300 +    /* whats the My Link for locA to LocB */
20301 +    if ( locA->level > locB->level ) 
20302 +       return locB->plane - (locA->plane * (8 - pos->pos_arity[locA->level])) + pos->pos_arity[locA->level];
20303 +    
20304 +    return  locB->netid - (locA->netid * pos->pos_arity[locA->level]);
20305 +}
20306 +/* ---------------------------------------------------------------------- */
20307 +#define FIRST_GET_HIGH_PRI(FLIT)    (FLIT & FIRST_HIGH_PRI)
20308 +#define FIRST_GET_AGE(FLIT)         ((FLIT & FIRST_AGE(15))>>11)
20309 +#define FIRST_GET_TIMEOUT(FLIT)     ((FLIT & FIRST_TIMEOUT(3))>>9)
20310 +#define FIRST_GET_NEXT(FLIT)        ((FLIT & FIRST_PACKED(3))>>7)
20311 +#define FIRST_GET_ROUTE(FLIT)       (FLIT & 0x7f)
20312 +#define FIRST_GET_BCAST(FLIT)       (FLIT & 0x40)
20313 +#define FIRST_GET_IS_INVALID(FLIT)  ((FLIT & 0x78) == 0x08)
20314 +#define FIRST_GET_TYPE(FLIT)        ((FLIT & 0x30)>>4)
20315 +#define PRF_GET_ROUTE(FLIT,N)       ((FLIT >> (N*4)) & 0x0F)
20316 +#define PRF_GET_IS_MYLINK(ROUTE)    (ROUTE == PACKED_MYLINK)
20317 +#define PRF_GET_IS_NORMAL(ROUTE)    (ROUTE & 0x8)
20318 +#define PRF_GET_NORMAL_LINK(ROUTE)  (ROUTE & 0x7)
20319 +#define PRF_MOVE_ON(INDEX,NEXT)     do { if (NEXT==3) {NEXT=0;INDEX++;} else {NEXT++; }} while (0);
20320 +/* ---------------------------------------------------------------------- */
20321 +int /* turn level needed or -1 if not possible */
20322 +elan3_route_get_min_turn_level( ELAN_POSITION *pos, int nodeId)
20323 +{
20324 +    int l,range = 1;
20325 +
20326 +    for(l=pos->pos_levels-1;l>=0;l--)
20327 +    {
20328 +       range = range * pos->pos_arity[l];
20329 +       
20330 +       if ( ((pos->pos_nodeid - (pos->pos_nodeid % range)) <= nodeId ) 
20331 +            && (nodeId <= (pos->pos_nodeid - (pos->pos_nodeid % range)+range -1))) 
20332 +           return l;
20333 +    }
20334 +    return -1;
20335 +}
20336 +/* ---------------------------------------------------------------------- */
20337 +int  
20338 +elan3_route_check(ELAN3_CTXT *ctxt, E3_uint16 *flits, int destNodeId)
20339 +{
20340 +    ELAN3_NET_LOCATION lastLoc,currLoc;
20341 +    int               err;
20342 +    int               turnLevel;
20343 +    int               goingDown;
20344 +    int               lnk,index,next,val;
20345 +    ELAN_POSITION    *pos = &ctxt->Position;
20346 +   
20347 +    /* is the dest possible */
20348 +    if ( (destNodeId <0 ) || (destNodeId >= pos->pos_nodes))
20349 +       return  (ELAN3_ROUTE_PROC_RANGE);
20350 +
20351 +    /* 
20352 +     * walk the route, 
20353 +     * - to see if we get there 
20354 +     * - checking we dont turn around 
20355 +     */
20356 +    currLoc.netid = pos->pos_nodeid;         /* the elan */
20357 +    currLoc.plane = 0;
20358 +    currLoc.level = pos->pos_levels;
20359 +
20360 +    turnLevel = currLoc.level; /* track the how far the route goes in */
20361 +    goingDown = 0;             /* once set we cant go up again ie only one change of direction */
20362 +
20363 +    /* move onto the network from the elan */
20364 +    if ((err=elan3_route_follow_link(ctxt,&currLoc,4)) != ELAN3_ROUTE_SUCCESS) 
20365 +    {
20366 +       PRINTF0 (ctxt, DBG_VP, "elan3_route_check: initial elan3_route_follow_link failed\n");
20367 +       return err;
20368 +    }
20369 +    /* do the first part of flit */
20370 +    switch ( FIRST_GET_TYPE(flits[0]) ) 
20371 +    {
20372 +    case 0  /* sent */   : { lnk = (flits[0] & 0x7);                                 break; }    
20373 +    case PACKED_MYLINK  : { lnk = pos->pos_nodeid % pos->pos_arity[pos->pos_levels-1];    break; }
20374 +    case PACKED_ADAPTIVE : { lnk = 7; /* all routes are the same just check one */    break; }
20375 +    default : 
20376 +       PRINTF1 (ctxt, DBG_VP, "elan3_route_check: unexpected first flit (%d)\n",flits[0]);
20377 +       return (ELAN3_ROUTE_INVALID); 
20378 +    }
20379 +    
20380 +    /* move along this link and check new location */
20381 +    memcpy(&lastLoc,&currLoc,sizeof(ELAN3_NET_LOCATION)); /* keep track of last loc */
20382 +    if ((err=elan3_route_follow_link(ctxt,&currLoc,lnk)) != ELAN3_ROUTE_SUCCESS ) 
20383 +    {
20384 +       PRINTF0 (ctxt, DBG_VP, "elan3_route_check: elan3_route_follow_link failed\n");
20385 +       return err;
20386 +    }
20387 +    if ((currLoc.level > pos->pos_levels) || (currLoc.level < 0 )) 
20388 +    { 
20389 +       PRINTF0 (ctxt, DBG_VP, "elan3_route_check: route leaves machine\n");
20390 +       return (ELAN3_ROUTE_INVALID_LEVEL);
20391 +    }
20392 +    if ( lastLoc.level < currLoc.level ) 
20393 +    {
20394 +       turnLevel = lastLoc.level;
20395 +       goingDown = 1;
20396 +    }
20397 +    else 
20398 +    {
20399 +       if (turnLevel > currLoc.level)
20400 +           turnLevel =  currLoc.level;
20401 +       if  (goingDown) 
20402 +       {
20403 +           PRINTF0 (ctxt, DBG_VP, "elan3_route_check: route ocilated\n");
20404 +           return (ELAN3_ROUTE_OCILATES);
20405 +       }
20406 +    }   
20407 +
20408 +    /* loop on doing the remaining flits */
20409 +    index = 1;
20410 +    next  = FIRST_GET_NEXT(flits[0]);
20411 +    val   = PRF_GET_ROUTE(flits[index],next);
20412 +    while(val)
20413 +    {
20414 +       if (PRF_GET_IS_NORMAL(val) ) 
20415 +           lnk = PRF_GET_NORMAL_LINK(val);
20416 +       else
20417 +       {
20418 +         switch ( val ) 
20419 +         {
20420 +         case  PACKED_MYLINK : 
20421 +         {
20422 +             lnk = elan3_route_get_mylink(pos, &currLoc,&lastLoc);
20423 +             break;
20424 +         }
20425 +         default : 
20426 +             PRINTF1 (ctxt, DBG_VP, "elan3_route_check: unexpected packed flit (%d)\n",val);
20427 +             return (ELAN3_ROUTE_INVALID);
20428 +         }
20429 +       }
20430 +
20431 +       /* move along this link and check new location */
20432 +       memcpy(&lastLoc,&currLoc,sizeof(ELAN3_NET_LOCATION)); /* keep track of last loc */
20433 +       if ((err=elan3_route_follow_link(ctxt,&currLoc,lnk)) != ELAN3_ROUTE_SUCCESS) 
20434 +           return err;
20435 +       
20436 +       if ((currLoc.level > pos->pos_levels ) || ( currLoc.level < 0 ))
20437 +       { 
20438 +           PRINTF0 (ctxt, DBG_VP, "elan3_route_check: route leaves machine\n");
20439 +           return (ELAN3_ROUTE_INVALID_LEVEL);
20440 +       }
20441 +
20442 +       if ( lastLoc.level < currLoc.level ) 
20443 +           goingDown = 1;
20444 +       else 
20445 +       {
20446 +           if (turnLevel > currLoc.level)
20447 +               turnLevel =  currLoc.level;
20448 +           if  (goingDown) 
20449 +           {
20450 +               PRINTF0 (ctxt, DBG_VP, "elan3_route_check: route ocilated\n");
20451 +               return (ELAN3_ROUTE_OCILATES);
20452 +           }
20453 +       }   
20454 +
20455 +       /* move to next part of flit */
20456 +       PRF_MOVE_ON(index,next);
20457 +       if ( index >= MAX_FLITS)
20458 +       {
20459 +           PRINTF0 (ctxt, DBG_VP, "elan3_route_check: route too long\n");
20460 +           return (ELAN3_ROUTE_TOO_LONG);
20461 +       }
20462 +       /* extract the new value */
20463 +       val = PRF_GET_ROUTE(flits[index],next);
20464 +    }
20465 +
20466 +    /* have we got to where we want ? */
20467 +    if ((currLoc.level != pos->pos_levels) || (currLoc.netid != destNodeId))
20468 +    {
20469 +       PRINTF2 (ctxt, DBG_VP, "elan3_route_check: goes to %d instead of %d\n",currLoc.netid , destNodeId );
20470 +       return (ELAN3_ROUTE_WRONG_DEST);
20471 +    }
20472 +
20473 +    /*
20474 +     * there is the case of src == dest 
20475 +     * getTurnLevel returns pos->pos_levels, and turnLevel is (pos->pos_levels -1) 
20476 +     * then we assume they really want to  go onto the network.
20477 +     * otherwise we check that the turn at the appriate level
20478 +     */
20479 +    if ( (pos->pos_nodeid != destNodeId) || ( turnLevel != (pos->pos_levels -1)) )
20480 +    {
20481 +       int lev;
20482 +       if ((lev = elan3_route_get_min_turn_level(pos,destNodeId)) == -1) 
20483 +       {
20484 +           PRINTF0 (ctxt, DBG_VP, "elan3_route_check: cant calculate turn level\n");
20485 +           return (ELAN3_ROUTE_INVALID); /* not sure this can happen here as checks above should protect me */
20486 +       }
20487 +       if (turnLevel != lev) 
20488 +       {
20489 +           PRINTF2 (ctxt, DBG_VP, "elan3_route_check: turn level should be %d but is %d \n", lev, turnLevel);
20490 +           return (ELAN3_ROUTE_TURN_LEVEL);
20491 +       }
20492 +    }
20493 +    return (ELAN3_ROUTE_SUCCESS);
20494 +}
20495 +/* ---------------------------------------------------------------------- */
20496 +int
20497 +elan3_route_broadcast_check(ELAN3_CTXT *ctxt , E3_uint16 *flits, int lowNode, int highNode ) 
20498 +{
20499 +    E3_uint16 flitsTmp[MAX_FLITS];
20500 +    int       nflits,i;
20501 +    
20502 +    nflits = GenerateRoute (&ctxt->Position, flitsTmp, lowNode, highNode, DEFAULT_ROUTE_TIMEOUT, DEFAULT_ROUTE_PRIORITY);
20503 +    
20504 +    for(i=0;i<nflits;i++)
20505 +       if ( flitsTmp[i] != flits[i] ) 
20506 +       {
20507 +           PRINTF3 (ctxt, DBG_VP, "elan3_route_broadcast_check:  flit[%d] %d (should be %d)\n",i,flits[i],flitsTmp[i]);
20508 +           return (ELAN3_ROUTE_INVALID);   
20509 +       }
20510 +    
20511 +    return (ELAN3_ROUTE_SUCCESS);
20512 +}
20513 +/* ---------------------------------------------------------------------- */
20514 +
20515 +/*
20516 + * Local variables:
20517 + * c-file-style: "stroustrup"
20518 + * End:
20519 + */
20520 Index: linux-2.4.21/drivers/net/qsnet/elan3/route_table.c
20521 ===================================================================
20522 --- linux-2.4.21.orig/drivers/net/qsnet/elan3/route_table.c     2004-02-23 16:02:56.000000000 -0500
20523 +++ linux-2.4.21/drivers/net/qsnet/elan3/route_table.c  2005-06-01 23:12:54.591440320 -0400
20524 @@ -0,0 +1,560 @@
20525 +/*
20526 + *    Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd.
20527 + *
20528 + *    For licensing information please see the supplied COPYING file
20529 + *
20530 + */
20531 +
20532 +#ident "$Id: route_table.c,v 1.23 2003/09/24 13:57:25 david Exp $"
20533 +/*      $Source: /cvs/master/quadrics/elan3mod/elan3/os/route_table.c,v $ */
20534 +
20535 +#include <qsnet/kernel.h>
20536 +
20537 +#include <elan3/elanregs.h>
20538 +#include <elan3/elandev.h>
20539 +#include <elan3/elanvp.h>
20540 +#include <elan3/elan3mmu.h>
20541 +#include <elan3/elanctxt.h>
20542 +#include <elan3/elandebug.h>
20543 +
20544 +static sdramaddr_t
20545 +AllocateLargeRoute (ELAN3_DEV *dev, ELAN3_ROUTE_TABLE *tbl, int ctxnum, E3_uint64 *smallRoute)
20546 +{
20547 +    int                  bit = -1;
20548 +    ELAN3_ROUTES  *rent;
20549 +    unsigned long flags;
20550 +    
20551 +    spin_lock_irqsave (&tbl->Lock, flags);
20552 +    
20553 +    for (rent = tbl->LargeRoutes; rent; rent = rent->Next)
20554 +    {
20555 +       if ((bit = bt_freebit (rent->Bitmap, NROUTES_PER_BLOCK)) != -1)
20556 +           break;
20557 +    }
20558 +    
20559 +    if (bit == -1)                                             /* No spare entries in large routes */
20560 +    {                                                          /* so allocate a new page */
20561 +       PRINTF0 (DBG_DEVICE, DBG_VP, "AllocateLargeRoute: allocate route entries\n");
20562 +       
20563 +       spin_unlock_irqrestore (&tbl->Lock, flags);
20564 +
20565 +       KMEM_ZALLOC(rent, ELAN3_ROUTES *, sizeof (ELAN3_ROUTES), TRUE);
20566 +       
20567 +       if (rent == (ELAN3_ROUTES *) NULL)
20568 +           return ((sdramaddr_t) 0);
20569 +       
20570 +       rent->Routes = elan3_sdram_alloc (dev, PAGESIZE);
20571 +       if (rent->Routes == (sdramaddr_t) 0)
20572 +       {
20573 +           KMEM_FREE (rent, sizeof (ELAN3_ROUTES));
20574 +           return ((sdramaddr_t) 0);
20575 +       }
20576 +
20577 +       spin_lock_irqsave (&tbl->Lock, flags);
20578 +
20579 +       /* Add to list of large routes */
20580 +       rent->Next       = tbl->LargeRoutes;
20581 +       tbl->LargeRoutes = rent;
20582 +
20583 +       /* and use entry 0 */
20584 +       bit = 0;
20585 +    }
20586 +    
20587 +    /* Set the bit in the bitmap to mark this route as allocated */
20588 +    BT_SET (rent->Bitmap, bit);
20589 +    
20590 +    /* And generate the small route pointer and the pointer to the large routes */
20591 +    (*smallRoute) = BIG_ROUTE_PTR(rent->Routes + (bit*NBYTES_PER_LARGE_ROUTE), ctxnum);
20592 +
20593 +    PRINTF4 (DBG_DEVICE, DBG_VP, "AllocateLargeRoute: rent %p using entry %d at %lx with route pointer %llx\n",
20594 +            rent, bit, rent->Routes + (bit * NBYTES_PER_LARGE_ROUTE), (long long) (*smallRoute));
20595 +
20596 +    /* Invalidate the large route */
20597 +    elan3_sdram_zeroq_sdram (dev, rent->Routes + (bit * NBYTES_PER_LARGE_ROUTE), NBYTES_PER_LARGE_ROUTE);
20598 +
20599 +    spin_unlock_irqrestore (&tbl->Lock, flags);
20600 +
20601 +    return (rent->Routes + (bit * NBYTES_PER_LARGE_ROUTE));
20602 +}
20603 +
20604 +static void
20605 +FreeLargeRoute (ELAN3_DEV *dev, ELAN3_ROUTE_TABLE *tbl, E3_uint64 smallRoute)
20606 +{
20607 +    E3_Addr      addr = (E3_Addr) (smallRoute & ((1ULL << ROUTE_CTXT_SHIFT)-1));
20608 +    ELAN3_ROUTES *rent;
20609 +
20610 +    PRINTF1 (DBG_DEVICE, DBG_VP, "FreeLargeRoute: free route %llx\n", (long long) smallRoute);
20611 +
20612 +    ASSERT (SPINLOCK_HELD (&tbl->Lock));
20613 +
20614 +    for (rent = tbl->LargeRoutes; rent; rent = rent->Next)
20615 +    {
20616 +       if (rent->Routes <= addr && (rent->Routes + ROUTE_BLOCK_SIZE) > addr)
20617 +       {
20618 +           int indx = (addr - rent->Routes)/NBYTES_PER_LARGE_ROUTE;
20619 +           
20620 +           PRINTF2 (DBG_DEVICE, DBG_VP, "FreeLargeRoute: rent=%p indx=%d\n", rent, indx);
20621 +           
20622 +           BT_CLEAR(rent->Bitmap, indx);
20623 +           return;
20624 +       }
20625 +    }
20626 +
20627 +    panic ("elan: FreeLargeRoute - route not found in large route tables");
20628 +}
20629 +
20630 +static void
20631 +FreeLargeRoutes (ELAN3_DEV *dev, ELAN3_ROUTE_TABLE *tbl)
20632 +{
20633 +    ELAN3_ROUTES *rent;
20634 +
20635 +    while ((rent = tbl->LargeRoutes) != NULL)
20636 +    {
20637 +       PRINTF1 (DBG_DEVICE, DBG_VP, "FreeLargeRoutes: free rent %p\n", rent);
20638 +
20639 +       tbl->LargeRoutes = rent->Next;
20640 +
20641 +       elan3_sdram_free (dev, rent->Routes, PAGESIZE);
20642 +       
20643 +       KMEM_FREE (rent, sizeof(ELAN3_ROUTES));
20644 +    }
20645 +}
20646 +
20647 +int
20648 +GetRoute (ELAN3_DEV *dev, ELAN3_ROUTE_TABLE *tbl, int process, E3_uint16 *flits)
20649 +{
20650 +    E3_uint64  routeValue;
20651 +    sdramaddr_t largeRouteOff;
20652 +  
20653 +    if (process < 0 || process >= tbl->Size)
20654 +       return (EINVAL);
20655 +
20656 +    routeValue = elan3_sdram_readq (dev, tbl->Table + process * NBYTES_PER_SMALL_ROUTE);
20657 +
20658 +    if (routeValue & ROUTE_PTR)
20659 +    {
20660 +       largeRouteOff = (routeValue & ROUTE_PTR_MASK);
20661 +       
20662 +       routeValue = elan3_sdram_readq (dev, largeRouteOff + 0);
20663 +       flits[0] = routeValue & 0xffff;
20664 +       flits[1] = (routeValue >> 16)  & 0xffff;
20665 +       flits[2] = (routeValue >> 32)  & 0xffff;
20666 +       flits[3] = (routeValue >> 48)  & 0xffff;
20667 +       
20668 +       routeValue = elan3_sdram_readq (dev, largeRouteOff + 8);
20669 +       flits[4] = routeValue & 0xffff;
20670 +       flits[5] = (routeValue >> 16)  & 0xffff;
20671 +       flits[6] = (routeValue >> 32)  & 0xffff;
20672 +       flits[6] = (routeValue >> 48)  & 0xffff;
20673 +    }
20674 +    else
20675 +    {
20676 +       flits[0] = routeValue & 0xffff;
20677 +       flits[1] = (routeValue >> 16)  & 0xffff;
20678 +       flits[2] = (routeValue >> 32)  & 0xffff;
20679 +    }
20680 +
20681 +    return (ESUCCESS);
20682 +}
20683 +
20684 +ELAN3_ROUTE_TABLE *
20685 +AllocateRouteTable (ELAN3_DEV *dev, int size)
20686 +{
20687 +    ELAN3_ROUTE_TABLE *tbl;
20688 +
20689 +    KMEM_ZALLOC (tbl, ELAN3_ROUTE_TABLE *, sizeof (ELAN3_ROUTE_TABLE), TRUE);
20690 +
20691 +    if (tbl == (ELAN3_ROUTE_TABLE *) NULL)
20692 +       return (NULL);
20693 +    
20694 +    tbl->Size  = size;
20695 +    tbl->Table = elan3_sdram_alloc (dev, size*NBYTES_PER_SMALL_ROUTE);
20696 +
20697 +    if (tbl->Table == 0)
20698 +    {
20699 +       KMEM_FREE (tbl, sizeof (ELAN3_ROUTE_TABLE));
20700 +       return (NULL);
20701 +    }
20702 +    spin_lock_init (&tbl->Lock);
20703 +
20704 +    /* zero the route table */
20705 +    elan3_sdram_zeroq_sdram (dev, tbl->Table, size*NBYTES_PER_SMALL_ROUTE);
20706 +
20707 +    return (tbl);
20708 +}
20709 +
20710 +void
20711 +FreeRouteTable (ELAN3_DEV *dev, ELAN3_ROUTE_TABLE *tbl)
20712 +{
20713 +    elan3_sdram_free (dev, tbl->Table, tbl->Size*NBYTES_PER_SMALL_ROUTE);
20714 +
20715 +    FreeLargeRoutes (dev, tbl);
20716 +
20717 +    spin_lock_destroy (&tbl->Lock);
20718 +
20719 +    KMEM_FREE (tbl, sizeof (ELAN3_ROUTE_TABLE));
20720 +}
20721 +
20722 +int
20723 +LoadRoute (ELAN3_DEV *dev, ELAN3_ROUTE_TABLE *tbl, int process, int ctxnum, int nflits, E3_uint16 *flits)
20724 +{
20725 +    E3_uint64    routeValue;
20726 +    E3_uint64    largeRouteValue;
20727 +    sdramaddr_t   largeRouteOff;
20728 +    unsigned long flags;
20729 +
20730 +    if (process < 0 || process >= tbl->Size)
20731 +       return (EINVAL);
20732 +
20733 +    PRINTF3 (DBG_DEVICE, DBG_VP, "LoadRoute: table %lx process %d ctxnum %x\n", tbl->Table ,process, ctxnum);
20734 +
20735 +    if (nflits < 4)
20736 +    {
20737 +       spin_lock_irqsave (&tbl->Lock, flags);
20738 +
20739 +       /* See if we're replacing a "large" route */
20740 +       routeValue = elan3_sdram_readq (dev, tbl->Table + process * NBYTES_PER_SMALL_ROUTE);
20741 +       if (routeValue & ROUTE_PTR)
20742 +           FreeLargeRoute (dev, tbl, routeValue);
20743 +
20744 +       routeValue = SMALL_ROUTE(flits, ctxnum);
20745 +
20746 +       if ( routeValue &  ROUTE_PTR)
20747 +           PRINTF0 (DBG_DEVICE, DBG_VP, "SHOULD BE  A SMALL ROUTE !!!!!!!\n");
20748 +
20749 +       PRINTF2 (DBG_DEVICE, DBG_VP, "LoadRoute: loading small route %d  %llx\n", process, (long long) routeValue);
20750 +       elan3_sdram_writeq (dev, tbl->Table + process * NBYTES_PER_SMALL_ROUTE, routeValue);
20751 +    }
20752 +    else
20753 +    {
20754 +       E3_uint64 value0 = BIG_ROUTE0(flits);
20755 +       E3_uint64 value1 = BIG_ROUTE1(flits);
20756 +
20757 +       if ((largeRouteOff = AllocateLargeRoute (dev, tbl, ctxnum, &largeRouteValue)) == (sdramaddr_t) 0)
20758 +           return (ENOMEM);
20759 +
20760 +       spin_lock_irqsave (&tbl->Lock, flags);
20761 +           
20762 +       routeValue = elan3_sdram_readq (dev, tbl->Table + process * NBYTES_PER_SMALL_ROUTE);
20763 +
20764 +       if ((routeValue & ROUTE_PTR) == 0)
20765 +           elan3_sdram_writeq (dev, tbl->Table + process * NBYTES_PER_SMALL_ROUTE, largeRouteValue);
20766 +       else
20767 +       {
20768 +           FreeLargeRoute (dev, tbl, largeRouteValue);
20769 +
20770 +           largeRouteOff   = (routeValue & ROUTE_PTR_MASK);
20771 +       }
20772 +
20773 +       PRINTF3 (DBG_DEVICE, DBG_VP, "LoadRoute: loading large route %d - %llx %llx\n", process, 
20774 +                (long long) value0, (long long) value1);
20775 +
20776 +       elan3_sdram_writeq (dev, largeRouteOff + 0, value0);
20777 +       elan3_sdram_writeq (dev, largeRouteOff + 8, value1);
20778 +    }
20779 +
20780 +    spin_unlock_irqrestore (&tbl->Lock, flags);
20781 +    return (ESUCCESS);
20782 +}
20783 +void
20784 +InvalidateRoute (ELAN3_DEV *dev, ELAN3_ROUTE_TABLE *tbl, int process)
20785 +{
20786 +    E3_uint64 routeValue;
20787 +    unsigned long flags;
20788 +
20789 +    if (process < 0 || process >= tbl->Size)
20790 +       return;
20791 +
20792 +    spin_lock_irqsave (&tbl->Lock, flags);
20793 +
20794 +    /* unset ROUTE_VALID
20795 +     * does not matter if its short or long, will check when we re-use it
20796 +     */
20797 +    routeValue = elan3_sdram_readq (dev, tbl->Table + process * NBYTES_PER_SMALL_ROUTE);
20798 +    elan3_sdram_writeq (dev, tbl->Table + process * NBYTES_PER_SMALL_ROUTE, (routeValue & (~ROUTE_VALID)));
20799 +
20800 +    spin_unlock_irqrestore (&tbl->Lock, flags);
20801 +}
20802 +void
20803 +ValidateRoute (ELAN3_DEV *dev, ELAN3_ROUTE_TABLE *tbl, int process)
20804 +{
20805 +    E3_uint64 routeValue;
20806 +    unsigned long flags;
20807 +
20808 +    if (process < 0 || process >= tbl->Size)
20809 +       return;
20810 +
20811 +    PRINTF2 (DBG_DEVICE, DBG_VP, "ValidateRoute: table %ld process %d  \n", tbl->Table ,process);
20812 +
20813 +    spin_lock_irqsave (&tbl->Lock, flags);
20814 +
20815 +    /* set ROUTE_VALID
20816 +     */
20817 +    routeValue = elan3_sdram_readq (dev, tbl->Table + process * NBYTES_PER_SMALL_ROUTE);
20818 +    elan3_sdram_writeq (dev, tbl->Table + process * NBYTES_PER_SMALL_ROUTE, (routeValue | ROUTE_VALID));
20819 +
20820 +    spin_unlock_irqrestore (&tbl->Lock, flags);
20821 +}
20822 +void
20823 +ClearRoute (ELAN3_DEV *dev, ELAN3_ROUTE_TABLE *tbl, int process)
20824 +{
20825 +    E3_uint64 routeValue;
20826 +    unsigned long flags;
20827 +
20828 +    if (process < 0 || process >= tbl->Size)
20829 +       return;
20830 +
20831 +    spin_lock_irqsave (&tbl->Lock, flags);
20832 +
20833 +    PRINTF2 (DBG_DEVICE, DBG_VP, "ClearRoute: table %ld process %d  \n", tbl->Table ,process);
20834 +
20835 +    routeValue = elan3_sdram_readq (dev, tbl->Table + process * NBYTES_PER_SMALL_ROUTE);
20836 +
20837 +    elan3_sdram_writeq (dev, tbl->Table + process * NBYTES_PER_SMALL_ROUTE, 0);
20838 +
20839 +    if (routeValue & ROUTE_PTR)
20840 +       FreeLargeRoute (dev, tbl, routeValue);
20841 +
20842 +    spin_unlock_irqrestore (&tbl->Lock, flags);
20843 +}
20844 +
20845 +static int
20846 +ElanIdEqual (ELAN_POSITION *pos, int level, int ida, int idb)
20847 +{
20848 +    int l;
20849 +
20850 +    for (l = pos->pos_levels-1; l >= level; l--)
20851 +    {
20852 +       ida /= pos->pos_arity[l];
20853 +       idb /= pos->pos_arity[l];
20854 +    }
20855 +       
20856 +    return (ida == idb);
20857 +}
20858 +
20859 +static int
20860 +RouteDown (ELAN_POSITION *pos, int level, int elanid)
20861 +{
20862 +    int l;
20863 +
20864 +    for (l = (pos->pos_levels - 1); level < pos->pos_levels - 1; level++, l--)
20865 +    {  
20866 +       if (  pos->pos_arity[l] )
20867 +           elanid /= pos->pos_arity[l];    
20868 +    }
20869 +    elanid %= pos->pos_arity[l];
20870 +
20871 +    return elanid;
20872 +}
20873 +
20874 +static int
20875 +InitPackedAndFlits (u_char *packed, E3_uint16 *flits)
20876 +{
20877 +    int rb = 0;
20878 +
20879 +    bzero ((caddr_t) packed, MAX_PACKED+4);
20880 +    bzero ((caddr_t) flits, MAX_FLITS * sizeof (E3_uint16));
20881 +
20882 +    /* Initialise 4 bytes of packed, so that the "padding" */
20883 +    /* NEVER terminates with 00, as this is recognised as */
20884 +    /* as CRC flit */
20885 +    packed[rb++] = 0xF;
20886 +    packed[rb++] = 0xF;
20887 +    packed[rb++] = 0xF;
20888 +    packed[rb++] = 0xF;
20889 +
20890 +    return (rb);
20891 +}
20892 +
20893 +static int
20894 +PackThemRoutesUp (E3_uint16 *flits, u_char *packed, int rb, int timeout, int highPri)
20895 +{
20896 +    int i, nflits;
20897 +
20898 +    flits[0] |= FIRST_TIMEOUT(timeout);
20899 +    if (highPri)
20900 +       flits[0] |= FIRST_HIGH_PRI;
20901 +
20902 +    /* round up the number of route bytes to flits */
20903 +    /* and subtract the 4 extra we've padded out with */
20904 +    nflits = (rb-1)/4;
20905 +    
20906 +    for (i = nflits; i > 0; i--)
20907 +    {
20908 +       flits[i] = (packed[rb-1] << 12 |
20909 +                   packed[rb-2] << 8  |
20910 +                   packed[rb-3] << 4  |
20911 +                   packed[rb-4] << 0);
20912 +       rb -= 4;
20913 +    }
20914 +    
20915 +    /* Now set the position of the first packed route  */
20916 +    /* byte in the 2nd 16 bit flit, taking account of the */
20917 +    /* 4 byte padding */
20918 +    flits[0] |= FIRST_PACKED (4-rb);
20919 +    
20920 +    return (nflits+1);
20921 +}
20922 +
20923 +int
20924 +GenerateRoute (ELAN_POSITION *pos, E3_uint16 *flits, int lowid, int highid, int timeout, int highPri)
20925 +{
20926 +    int     broadcast  = (lowid != highid);
20927 +    int     rb         = 0;
20928 +    int            first      = 1;
20929 +    int     noRandom   = 0;
20930 +    int     level;
20931 +    u_char  packed[MAX_PACKED+4];
20932 +    int     numDownLinks;
20933 +
20934 +    rb = InitPackedAndFlits (packed, flits);
20935 +
20936 +    for (level = pos->pos_levels-1;                            /* Move up out of the elan */
20937 +        level > 0 && ! (ElanIdEqual (pos, level, pos->pos_nodeid, lowid) &&
20938 +                        ElanIdEqual (pos, level, pos->pos_nodeid, highid)); level--)
20939 +    {
20940 +       noRandom |= pos->pos_random_disabled & (1 << (pos->pos_levels-1-level));
20941 +    }
20942 +    
20943 +    for (level = pos->pos_levels-1;                            /* Move up out of the elan */
20944 +        level > 0 && ! (ElanIdEqual (pos, level, pos->pos_nodeid, lowid) &&
20945 +                        ElanIdEqual (pos, level, pos->pos_nodeid, highid)); level--)
20946 +    {
20947 +       numDownLinks = pos->pos_arity [level];
20948 +       if (first)
20949 +       {
20950 +           if (broadcast || noRandom)
20951 +               flits[0] = FIRST_BCAST_TREE;
20952 +           else
20953 +           {
20954 +               if (numDownLinks == 4) 
20955 +                   flits[0] = FIRST_ADAPTIVE;
20956 +               else
20957 +                   flits[0] = FIRST_ROUTE( numDownLinks + ( lowid % (8-numDownLinks) ));
20958 +           }
20959 +           first = 0;
20960 +       }
20961 +       else
20962 +       {
20963 +           if (broadcast || noRandom)
20964 +               packed[rb++] = PACKED_BCAST_TREE;
20965 +           else 
20966 +           {
20967 +               if (numDownLinks == 4) 
20968 +                   packed[rb++] = PACKED_ADAPTIVE;
20969 +               else
20970 +                   packed[rb++] = PACKED_ROUTE( numDownLinks + ( lowid % (8-numDownLinks) ));
20971 +           }               
20972 +       }
20973 +    }
20974 +    
20975 +    while (level < pos->pos_levels)
20976 +    {
20977 +       int lowRoute  = RouteDown (pos, level, lowid);
20978 +       int highRoute = RouteDown (pos, level, highid);
20979 +
20980 +       if (first)
20981 +       {
20982 +           if (broadcast)
20983 +               flits[0] = FIRST_BCAST(highRoute, lowRoute);
20984 +           else
20985 +               flits[0] = FIRST_ROUTE(lowRoute);
20986 +
20987 +           first = 0;
20988 +       }
20989 +       else
20990 +       {
20991 +           if (broadcast)
20992 +           {
20993 +               packed[rb++] = PACKED_BCAST0(highRoute, lowRoute);
20994 +               packed[rb++] = PACKED_BCAST1(highRoute, lowRoute);
20995 +           }
20996 +           else
20997 +               packed[rb++] = PACKED_ROUTE(lowRoute);
20998 +       }
20999 +       
21000 +       level++;
21001 +    }
21002 +
21003 +#ifdef ELITE_REVA_SUPPORTED
21004 +    if (broadcast && (pos->pos_levels == 3))
21005 +    {
21006 +      packed[rb++] = PACKED_BCAST0(0, 0);
21007 +      packed[rb++] = PACKED_BCAST1(0, 0);
21008 +    }
21009 +#endif
21010 +
21011 +    return (PackThemRoutesUp (flits, packed, rb, timeout, highPri));
21012 +}
21013 +
21014 +int
21015 +GenerateCheckRoute (ELAN_POSITION *pos, E3_uint16 *flits, int level, int adaptive)
21016 +{
21017 +    int     notfirst = 0;
21018 +    int     l, rb;
21019 +    u_char  packed[MAX_PACKED+4];
21020 +
21021 +    rb = InitPackedAndFlits (packed, flits);
21022 +
21023 +    for (l = pos->pos_levels-1; l > level; l--)
21024 +       if (! notfirst++)
21025 +           flits[0] = adaptive ? FIRST_ADAPTIVE : FIRST_BCAST_TREE;
21026 +       else
21027 +           packed[rb++] = adaptive ? PACKED_ADAPTIVE : PACKED_BCAST_TREE;
21028 +
21029 +    if (! notfirst++ ) 
21030 +       flits[0] = FIRST_MYLINK;
21031 +    else
21032 +       packed[rb++] = PACKED_MYLINK;
21033 +
21034 +    for (l++ /* consume mylink */; l < pos->pos_levels; l++)
21035 +       if (! notfirst++)
21036 +           flits[0] = FIRST_ROUTE (RouteDown (pos, l, pos->pos_nodeid));
21037 +       else
21038 +           packed[rb++] = PACKED_ROUTE (RouteDown (pos, l, pos->pos_nodeid));
21039 +
21040 +
21041 +    return (PackThemRoutesUp (flits, packed, rb, DEFAULT_ROUTE_TIMEOUT, HIGH_ROUTE_PRIORITY));
21042 +}
21043 +
21044 +
21045 +/*
21046 + * In this case "level" is the number of levels counted from the bottom.
21047 + */
21048 +int
21049 +GenerateProbeRoute (E3_uint16 *flits, int nodeid, int level, int *linkup, int *linkdown, int adaptive )
21050 +{
21051 +    int            first = 1;
21052 +    int     i, rb;
21053 +    u_char  packed[MAX_PACKED+4];
21054 +
21055 +    rb = InitPackedAndFlits (packed, flits);
21056 +
21057 +    /* Generate "up" routes */
21058 +    for (i = 0; i < level; i++)
21059 +    {
21060 +       if (first)
21061 +           flits[0] = linkup ? FIRST_ROUTE(linkup[i]) : adaptive ? FIRST_ADAPTIVE : FIRST_BCAST_TREE;
21062 +       else
21063 +           packed[rb++] = linkup ? PACKED_ROUTE(linkup[i]) : adaptive ? PACKED_ADAPTIVE : PACKED_BCAST_TREE;
21064 +       first = 0;
21065 +    }
21066 +
21067 +    /* Generate a "to-me" route down */
21068 +    if (first)
21069 +       flits[0] = FIRST_MYLINK;
21070 +    else
21071 +       packed[rb++] = PACKED_MYLINK;
21072 +
21073 +    for (i = level-1; i >= 0; i--)
21074 +       packed[rb++] =  PACKED_ROUTE(linkdown[i]);
21075 +
21076 +    return (PackThemRoutesUp (flits, packed, rb, DEFAULT_ROUTE_TIMEOUT, HIGH_ROUTE_PRIORITY));
21077 +}
21078 +
21079 +
21080 +/*
21081 + * Local variables:
21082 + * c-file-style: "stroustrup"
21083 + * End:
21084 + */
21085 Index: linux-2.4.21/drivers/net/qsnet/elan3/sdram.c
21086 ===================================================================
21087 --- linux-2.4.21.orig/drivers/net/qsnet/elan3/sdram.c   2004-02-23 16:02:56.000000000 -0500
21088 +++ linux-2.4.21/drivers/net/qsnet/elan3/sdram.c        2005-06-01 23:12:54.593440016 -0400
21089 @@ -0,0 +1,807 @@
21090 +/*
21091 + *    Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd.
21092 + * 
21093 + *    For licensing information please see the supplied COPYING file
21094 + *
21095 + */
21096 +
21097 +#ident "@(#)$Id: sdram.c,v 1.17 2003/09/24 13:57:25 david Exp $"
21098 +/*      $Source: /cvs/master/quadrics/elan3mod/elan3/os/sdram.c,v $*/
21099 +
21100 +
21101 +#include <qsnet/kernel.h>
21102 +
21103 +#include <elan3/elanregs.h>
21104 +#include <elan3/elandev.h>
21105 +#include <elan3/elandebug.h>
21106 +
21107 +/* sdram access functions */
21108 +#define sdram_off_to_bank(dev,off)     (&dev->SdramBanks[(off) >> ELAN3_SDRAM_BANK_SHIFT])
21109 +#define sdram_off_to_offset(dev,off)   ((off) & (ELAN3_SDRAM_BANK_SIZE-1))
21110 +#define sdram_off_to_bit(dev,indx,off) (sdram_off_to_offset(dev,off) >> (SDRAM_MIN_BLOCK_SHIFT+(indx)))
21111 +
21112 +#define sdram_off_to_mapping(dev,off)  (sdram_off_to_bank(dev,off)->Mapping + sdram_off_to_offset(dev,off))
21113 +    
21114 +unsigned char
21115 +elan3_sdram_readb (ELAN3_DEV *dev, sdramaddr_t off)
21116 +{
21117 +    return (readb ((unsigned char *) sdram_off_to_mapping(dev, off)));
21118 +}
21119 +
21120 +unsigned short
21121 +elan3_sdram_readw (ELAN3_DEV *dev, sdramaddr_t off)
21122 +{
21123 +    return (readw ((unsigned short *) sdram_off_to_mapping(dev, off)));
21124 +}
21125 +
21126 +unsigned int
21127 +elan3_sdram_readl (ELAN3_DEV *dev, sdramaddr_t off)
21128 +{
21129 +    return (readl ((unsigned int *) sdram_off_to_mapping(dev, off)));
21130 +}
21131 +
21132 +unsigned long long
21133 +elan3_sdram_readq (ELAN3_DEV *dev, sdramaddr_t off)
21134 +{
21135 +    return (readq ((unsigned long long *) sdram_off_to_mapping(dev, off)));
21136 +}
21137 +
21138 +void
21139 +elan3_sdram_writeb (ELAN3_DEV *dev, sdramaddr_t off, unsigned char val)
21140 +{
21141 +    writeb (val, (unsigned char *) sdram_off_to_mapping(dev, off));
21142 +    wmb();
21143 +}
21144 +
21145 +void
21146 +elan3_sdram_writew (ELAN3_DEV *dev, sdramaddr_t off, unsigned short val)
21147 +{
21148 +    writew (val, (unsigned short *) sdram_off_to_mapping(dev, off));
21149 +    wmb();
21150 +}
21151 +
21152 +void
21153 +elan3_sdram_writel (ELAN3_DEV *dev, sdramaddr_t off, unsigned int val)
21154 +{
21155 +    writel (val, (unsigned int *) sdram_off_to_mapping(dev, off));
21156 +    wmb();
21157 +}
21158 +
21159 +void
21160 +elan3_sdram_writeq (ELAN3_DEV *dev, sdramaddr_t off, unsigned long long val)
21161 +{
21162 +    writeq (val, (unsigned long long *) sdram_off_to_mapping(dev, off));
21163 +    wmb();
21164 +}
21165 +
21166 +void
21167 +elan3_sdram_copyb_from_sdram (ELAN3_DEV *dev, sdramaddr_t from, void *to, int nbytes)
21168 +{
21169 +    bcopy ((void *)sdram_off_to_mapping(dev, from), to, nbytes);
21170 +}
21171 +
21172 +void
21173 +elan3_sdram_copyw_from_sdram (ELAN3_DEV *dev, sdramaddr_t from, void *to, int nbytes)
21174 +{
21175 +#ifdef __LITTLE_ENDIAN__
21176 +    bcopy ((void *)sdram_off_to_mapping(dev, from), to, nbytes);
21177 +#else
21178 +#error incorrect for big endian
21179 +#endif
21180 +}
21181 +
21182 +void
21183 +elan3_sdram_copyl_from_sdram (ELAN3_DEV *dev, sdramaddr_t from, void *to, int nbytes)
21184 +{
21185 +#ifdef __LITTLE_ENDIAN__
21186 +    bcopy ((void *)sdram_off_to_mapping(dev, from), to, nbytes);
21187 +#else
21188 +#error incorrect for big endian
21189 +#endif
21190 +}
21191 +
21192 +void
21193 +elan3_sdram_copyq_from_sdram (ELAN3_DEV *dev, sdramaddr_t from, void *to, int nbytes)
21194 +{
21195 +#ifdef __LITTLE_ENDIAN__
21196 +    bcopy ((void *)sdram_off_to_mapping(dev, from), to, nbytes);
21197 +#else
21198 +#error incorrect for big endian
21199 +#endif
21200 +}
21201 +
21202 +#define E3_WRITEBUFFER_SIZE            16
21203 +#define E3_WRITEBUFFER_OFFSET(x)       (((unsigned long) x) & (E3_WRITEBUFFER_SIZE-1))
21204 +#define E3_WRITEBUFFER_BASE(x)         (((unsigned long) x) & ~((unsigned long) (E3_WRITEBUFFER_SIZE-1)))
21205 +
21206 +void
21207 +elan3_sdram_copyb_to_sdram (ELAN3_DEV *dev, void *from, sdramaddr_t to, int nbytes)
21208 +{
21209 +    virtaddr_t dbase = (virtaddr_t) sdram_off_to_mapping (dev, to);
21210 +    virtaddr_t dlim  = (virtaddr_t) dbase + nbytes;
21211 +    virtaddr_t slim  = (virtaddr_t) from + nbytes;
21212 +    unsigned   nbase = E3_WRITEBUFFER_SIZE - E3_WRITEBUFFER_OFFSET (dbase);
21213 +    unsigned   ntop  = E3_WRITEBUFFER_OFFSET (dlim - sizeof (uint8_t)) + sizeof (uint8_t);
21214 +    int        i;
21215 +
21216 +    if (E3_WRITEBUFFER_BASE(dbase) == E3_WRITEBUFFER_BASE(dlim))
21217 +    {
21218 +       for (i = 0; i < nbytes/sizeof(uint8_t); i++)
21219 +           writeb (((uint8_t *) from)[i], &((uint8_t *) dbase)[i]);
21220 +       wmb();
21221 +    }
21222 +    else
21223 +    {
21224 +       if (ntop < E3_WRITEBUFFER_SIZE)
21225 +       {
21226 +           slim -= ntop;
21227 +           dlim -= ntop;
21228 +           
21229 +           for (i = 0; i < ntop/sizeof(uint8_t); i++)
21230 +               writeb (((uint8_t *) slim)[i], &((uint8_t *) dlim)[i]);
21231 +           wmb();
21232 +       }
21233 +       
21234 +       while (dlim >= (dbase + E3_WRITEBUFFER_SIZE))
21235 +       {
21236 +           dlim -= E3_WRITEBUFFER_SIZE;
21237 +           slim -= E3_WRITEBUFFER_SIZE;
21238 +
21239 +           for (i = 0; i < E3_WRITEBUFFER_SIZE/sizeof (uint8_t); i++)
21240 +               writeb (((uint8_t *) slim)[i], &((uint8_t *) dlim)[i]);
21241 +           wmb();
21242 +       }
21243 +       
21244 +       if (nbase < E3_WRITEBUFFER_SIZE)
21245 +       {
21246 +           for (i = 0; i < nbase/sizeof(uint8_t); i++)
21247 +               writeb (((uint8_t *) from)[i], &((uint8_t *) dbase)[i]);
21248 +           wmb();
21249 +       }
21250 +    }
21251 +}
21252 +
21253 +void
21254 +elan3_sdram_zerob_sdram (ELAN3_DEV *dev, sdramaddr_t to, int nbytes)
21255 +{
21256 +    virtaddr_t dbase = (virtaddr_t) sdram_off_to_mapping (dev, to);
21257 +    virtaddr_t dlim  = (virtaddr_t) dbase + nbytes;
21258 +    unsigned   nbase = E3_WRITEBUFFER_SIZE - E3_WRITEBUFFER_OFFSET (dbase);
21259 +    unsigned   ntop  = E3_WRITEBUFFER_OFFSET (dlim - sizeof (uint8_t)) + sizeof (uint8_t);
21260 +    int        i;
21261 +
21262 +    if (E3_WRITEBUFFER_BASE(dbase) == E3_WRITEBUFFER_BASE(dlim))
21263 +    {
21264 +       for (i = 0; i < nbytes/sizeof(uint8_t); i++)
21265 +           writeb (0, &((uint8_t *) dbase)[i]);
21266 +       wmb();
21267 +    }
21268 +    else
21269 +    {
21270 +       if (ntop < E3_WRITEBUFFER_SIZE)
21271 +       {
21272 +           dlim -= ntop;
21273 +           
21274 +           for (i = 0; i < ntop/sizeof(uint8_t); i++)
21275 +               writeb (0, &((uint8_t *) dlim)[i]);
21276 +           wmb();
21277 +       }
21278 +       
21279 +       while (dlim >= (dbase + E3_WRITEBUFFER_SIZE))
21280 +       {
21281 +           dlim -= E3_WRITEBUFFER_SIZE;
21282 +
21283 +           writeq (0, &((uint64_t *) dlim)[0]);
21284 +           writeq (0, &((uint64_t *) dlim)[1]);
21285 +
21286 +           wmb();
21287 +       }
21288 +       
21289 +       if (nbase < E3_WRITEBUFFER_SIZE)
21290 +       {
21291 +           for (i = 0; i < nbase/sizeof(uint8_t); i++)
21292 +               writeb (0, &((uint8_t *) dbase)[i]);
21293 +           wmb();
21294 +       }
21295 +    }
21296 +}
21297 +
21298 +void
21299 +elan3_sdram_copyw_to_sdram (ELAN3_DEV *dev, void *from, sdramaddr_t to, int nbytes)
21300 +{
21301 +    virtaddr_t dbase = (virtaddr_t) sdram_off_to_mapping (dev, to);
21302 +    virtaddr_t dlim  = (virtaddr_t) dbase + nbytes;
21303 +    virtaddr_t slim  = (virtaddr_t) from + nbytes;
21304 +    unsigned   nbase = E3_WRITEBUFFER_SIZE - E3_WRITEBUFFER_OFFSET (dbase);
21305 +    unsigned   ntop  = E3_WRITEBUFFER_OFFSET (dlim - sizeof (uint16_t)) + sizeof (uint16_t);
21306 +    int        i;
21307 +
21308 +    if (E3_WRITEBUFFER_BASE(dbase) == E3_WRITEBUFFER_BASE(dlim))
21309 +    {
21310 +       for (i = 0; i < nbytes/sizeof(uint16_t); i++)
21311 +           writew (((uint16_t *) from)[i], &((uint16_t *) dbase)[i]);
21312 +       wmb();
21313 +    }
21314 +    else
21315 +    {
21316 +       if (ntop < E3_WRITEBUFFER_SIZE)
21317 +       {
21318 +           slim -= ntop;
21319 +           dlim -= ntop;
21320 +
21321 +           for (i = 0; i < ntop/sizeof(uint16_t); i++)
21322 +               writew (((uint16_t *) slim)[i], &((uint16_t *) dlim)[i]);
21323 +           wmb();
21324 +       }
21325 +       
21326 +       while (dlim >= (dbase + E3_WRITEBUFFER_SIZE))
21327 +       {
21328 +           dlim -= E3_WRITEBUFFER_SIZE;
21329 +           slim -= E3_WRITEBUFFER_SIZE;
21330 +
21331 +           writew (((uint16_t *) slim)[0], &((uint16_t *) dlim)[0]);
21332 +           writew (((uint16_t *) slim)[1], &((uint16_t *) dlim)[1]);
21333 +           writew (((uint16_t *) slim)[2], &((uint16_t *) dlim)[2]);
21334 +           writew (((uint16_t *) slim)[3], &((uint16_t *) dlim)[3]);
21335 +           writew (((uint16_t *) slim)[4], &((uint16_t *) dlim)[4]);
21336 +           writew (((uint16_t *) slim)[5], &((uint16_t *) dlim)[5]);
21337 +           writew (((uint16_t *) slim)[6], &((uint16_t *) dlim)[6]);
21338 +           writew (((uint16_t *) slim)[7], &((uint16_t *) dlim)[7]);
21339 +           wmb();
21340 +       }
21341 +       
21342 +       if (nbase < E3_WRITEBUFFER_SIZE)
21343 +       {
21344 +           for (i = 0; i < nbase/sizeof(uint16_t); i++)
21345 +               writew (((uint16_t *) from)[i], &((uint16_t *) dbase)[i]);
21346 +           wmb();
21347 +       }
21348 +    }
21349 +}
21350 +
21351 +void
21352 +elan3_sdram_zerow_sdram (ELAN3_DEV *dev, sdramaddr_t to, int nbytes)
21353 +{
21354 +    virtaddr_t dbase = (virtaddr_t) sdram_off_to_mapping (dev, to);
21355 +    virtaddr_t dlim  = (virtaddr_t) dbase + nbytes;
21356 +    unsigned   nbase = E3_WRITEBUFFER_SIZE - E3_WRITEBUFFER_OFFSET (dbase);
21357 +    unsigned   ntop  = E3_WRITEBUFFER_OFFSET (dlim - sizeof (uint16_t)) + sizeof (uint16_t);
21358 +    int        i;
21359 +
21360 +    if (E3_WRITEBUFFER_BASE(dbase) == E3_WRITEBUFFER_BASE(dlim))
21361 +    {
21362 +       for (i = 0; i < nbytes/sizeof(uint16_t); i++)
21363 +           writew (0, &((uint16_t *) dbase)[i]);
21364 +       wmb();
21365 +    }
21366 +    else
21367 +    {
21368 +       if (ntop < E3_WRITEBUFFER_SIZE)
21369 +       {
21370 +           dlim -= ntop;
21371 +           
21372 +           for (i = 0; i < ntop/sizeof(uint16_t); i++)
21373 +               writew (0, &((uint16_t *) dlim)[i]);
21374 +           wmb();
21375 +       }
21376 +       
21377 +       while (dlim >= (dbase + E3_WRITEBUFFER_SIZE))
21378 +       {
21379 +           dlim -= E3_WRITEBUFFER_SIZE;
21380 +
21381 +           writeq (0, &((uint64_t *) dlim)[0]);
21382 +           writeq (0, &((uint64_t *) dlim)[1]);
21383 +           wmb();
21384 +       }
21385 +       
21386 +       if (nbase < E3_WRITEBUFFER_SIZE)
21387 +       {
21388 +           for (i = 0; i < nbase/sizeof(uint16_t); i++)
21389 +               writew (0, &((uint16_t *) dbase)[i]);
21390 +           wmb();
21391 +       }
21392 +    }
21393 +}
21394 +
21395 +void
21396 +elan3_sdram_copyl_to_sdram (ELAN3_DEV *dev, void *from, sdramaddr_t to, int nbytes)
21397 +{
21398 +    virtaddr_t dbase = (virtaddr_t) sdram_off_to_mapping (dev, to);
21399 +    virtaddr_t dlim  = (virtaddr_t) dbase + nbytes;
21400 +    virtaddr_t slim  = (virtaddr_t) from + nbytes;
21401 +    unsigned   nbase = E3_WRITEBUFFER_SIZE - E3_WRITEBUFFER_OFFSET (dbase);
21402 +    unsigned   ntop  = E3_WRITEBUFFER_OFFSET (dlim - sizeof (uint32_t)) + sizeof (uint32_t);
21403 +    int        i;
21404 +
21405 +    if (E3_WRITEBUFFER_BASE(dbase) == E3_WRITEBUFFER_BASE(dlim))
21406 +    {
21407 +       for (i = 0; i < nbytes/sizeof(uint32_t); i++)
21408 +           writel (((uint32_t *) from)[i], &((uint32_t *) dbase)[i]);
21409 +       wmb();
21410 +    }
21411 +    else
21412 +    {
21413 +       if (ntop < E3_WRITEBUFFER_SIZE)
21414 +       {
21415 +           slim -= ntop;
21416 +           dlim -= ntop;
21417 +
21418 +           for (i = 0; i < ntop/sizeof(uint32_t); i++)
21419 +               writel (((uint32_t *) slim)[i], &((uint32_t *) dlim)[i]);
21420 +           wmb();
21421 +       }
21422 +       
21423 +       while (dlim >= (dbase + E3_WRITEBUFFER_SIZE))
21424 +       {
21425 +           dlim -= E3_WRITEBUFFER_SIZE;
21426 +           slim -= E3_WRITEBUFFER_SIZE;
21427 +
21428 +           writel (((uint32_t *) slim)[0], &((uint32_t *) dlim)[0]);
21429 +           writel (((uint32_t *) slim)[1], &((uint32_t *) dlim)[1]);
21430 +           writel (((uint32_t *) slim)[2], &((uint32_t *) dlim)[2]);
21431 +           writel (((uint32_t *) slim)[3], &((uint32_t *) dlim)[3]);
21432 +           wmb();
21433 +       }
21434 +       
21435 +       if (nbase < E3_WRITEBUFFER_SIZE)
21436 +       {
21437 +           for (i = 0; i < nbase/sizeof(uint32_t); i++)
21438 +               writel (((uint32_t *) from)[i], &((uint32_t *) dbase)[i]);
21439 +           wmb();
21440 +       }
21441 +    }
21442 +}
21443 +
21444 +void
21445 +elan3_sdram_zerol_sdram (ELAN3_DEV *dev, sdramaddr_t to, int nbytes)
21446 +{
21447 +    virtaddr_t dbase = (virtaddr_t) sdram_off_to_mapping (dev, to);
21448 +    virtaddr_t dlim  = (virtaddr_t) dbase + nbytes;
21449 +    unsigned   nbase = E3_WRITEBUFFER_SIZE - E3_WRITEBUFFER_OFFSET (dbase);
21450 +    unsigned   ntop  = E3_WRITEBUFFER_OFFSET (dlim - sizeof (uint32_t)) + sizeof (uint32_t);
21451 +    int        i;
21452 +
21453 +    if (E3_WRITEBUFFER_BASE(dbase) == E3_WRITEBUFFER_BASE(dlim))
21454 +    {
21455 +       for (i = 0; i < nbytes/sizeof(uint32_t); i++)
21456 +           writel (0, &((uint32_t *) dbase)[i]);
21457 +       wmb();
21458 +    }
21459 +    else
21460 +    {
21461 +       if (ntop < E3_WRITEBUFFER_SIZE)
21462 +       {
21463 +           dlim -= ntop;
21464 +
21465 +           for (i = 0; i < ntop/sizeof(uint32_t); i++)
21466 +               writel (0, &((uint32_t *) dlim)[i]);
21467 +           wmb();
21468 +       }
21469 +       
21470 +       while (dlim >= (dbase + E3_WRITEBUFFER_SIZE))
21471 +       {
21472 +           dlim -= E3_WRITEBUFFER_SIZE;
21473 +
21474 +           writeq (0, &((uint64_t *) dlim)[0]);
21475 +           writeq (0, &((uint64_t *) dlim)[1]);
21476 +           wmb();
21477 +       }
21478 +       
21479 +       if (nbase < E3_WRITEBUFFER_SIZE)
21480 +       {
21481 +           for (i = 0; i < nbase/sizeof(uint32_t); i++)
21482 +               writel (0, &((uint32_t *) dbase)[i]);
21483 +           wmb();
21484 +       }
21485 +    }
21486 +}
21487 +
21488 +void
21489 +elan3_sdram_copyq_to_sdram (ELAN3_DEV *dev, void *from, sdramaddr_t to, int nbytes)
21490 +{
21491 +    virtaddr_t dbase = (virtaddr_t) sdram_off_to_mapping (dev, to);
21492 +    virtaddr_t dlim  = (virtaddr_t) dbase + nbytes;
21493 +    virtaddr_t slim  = (virtaddr_t) from + nbytes;
21494 +    unsigned   nbase = E3_WRITEBUFFER_SIZE - E3_WRITEBUFFER_OFFSET (dbase);
21495 +    unsigned   ntop  = E3_WRITEBUFFER_OFFSET (dlim - sizeof (uint64_t)) + sizeof (uint64_t);
21496 +
21497 +    if (E3_WRITEBUFFER_BASE(dbase) == E3_WRITEBUFFER_BASE(dlim))
21498 +    {
21499 +       writeq (((uint64_t *) from)[0], &((uint64_t *) dbase)[0]);
21500 +       wmb();
21501 +    }
21502 +    else
21503 +    {
21504 +       if (ntop < E3_WRITEBUFFER_SIZE)
21505 +       {
21506 +           slim -= ntop;
21507 +           dlim -= ntop;
21508 +
21509 +           writeq (((uint64_t *) slim)[0], &((uint64_t *) dlim)[0]);
21510 +           wmb();
21511 +       }
21512 +       
21513 +       while (dlim >= (dbase + E3_WRITEBUFFER_SIZE))
21514 +       {
21515 +           dlim -= E3_WRITEBUFFER_SIZE;
21516 +           slim -= E3_WRITEBUFFER_SIZE;
21517 +
21518 +           writeq (((uint64_t *) slim)[0], &((uint64_t *) dlim)[0]);
21519 +           writeq (((uint64_t *) slim)[1], &((uint64_t *) dlim)[1]);
21520 +           wmb();
21521 +       }
21522 +       
21523 +       if (nbase < E3_WRITEBUFFER_SIZE)
21524 +       {
21525 +           writeq (((uint64_t *) from)[0], &((uint64_t *) dbase)[0]);
21526 +           wmb();
21527 +       }
21528 +    }
21529 +}
21530 +
21531 +void
21532 +elan3_sdram_zeroq_sdram (ELAN3_DEV *dev, sdramaddr_t to, int nbytes)
21533 +{
21534 +    virtaddr_t dbase = (virtaddr_t) sdram_off_to_mapping (dev, to);
21535 +    virtaddr_t dlim  = (virtaddr_t) dbase + nbytes;
21536 +    unsigned   nbase = E3_WRITEBUFFER_SIZE - E3_WRITEBUFFER_OFFSET (dbase);
21537 +    unsigned   ntop  = E3_WRITEBUFFER_OFFSET (dlim - sizeof (uint64_t)) + sizeof (uint64_t);
21538 +
21539 +    if (E3_WRITEBUFFER_BASE(dbase) == E3_WRITEBUFFER_BASE(dlim))
21540 +    {
21541 +       writeq (0, &((uint64_t *) dbase)[0]);
21542 +       wmb();
21543 +    }
21544 +    else
21545 +    {
21546 +       if (ntop < E3_WRITEBUFFER_SIZE)
21547 +       {
21548 +           dlim -= ntop;
21549 +
21550 +           writeq (0, &((uint64_t *) dlim)[0]);
21551 +           wmb();
21552 +       }
21553 +       
21554 +       while (dlim >= (dbase + E3_WRITEBUFFER_SIZE))
21555 +       {
21556 +           dlim -= E3_WRITEBUFFER_SIZE;
21557 +           
21558 +           writeq (0, &((uint64_t *) dlim)[0]);
21559 +           writeq (0, &((uint64_t *) dlim)[1]);
21560 +           wmb();
21561 +       }
21562 +       
21563 +       if (nbase < E3_WRITEBUFFER_SIZE)
21564 +       {
21565 +           writeq (0, &((uint64_t *) dbase)[0]);
21566 +           wmb();
21567 +       }
21568 +    }
21569 +}
21570 +
21571 +physaddr_t
21572 +elan3_sdram_to_phys (ELAN3_DEV *dev, sdramaddr_t off)
21573 +{
21574 +#if defined(DIGITAL_UNIX)
21575 +    return (KSEG_TO_PHYS (sdram_off_to_mapping (dev, off)));
21576 +#elif defined(LINUX)
21577 +    return (kmem_to_phys ((void *) sdram_off_to_mapping (dev, off)));
21578 +#endif    
21579 +}
21580 +
21581 +/* sdram buddy allocator */
21582 +#define read_next(dev, block)          elan3_sdram_readl(dev, block + 0)
21583 +#define read_prev(dev, block)          elan3_sdram_readl(dev, block + 4)
21584 +#define write_next(dev, block, val)    (elan3_sdram_writel(dev, block + 0, val), val)
21585 +#define write_prev(dev, block, val)    (elan3_sdram_writel(dev, block + 4, val), val)
21586 +
21587 +#define freelist_insert(dev,idx,block)\
21588 +do {\
21589 +    sdramaddr_t next = dev->SdramFreeLists[(idx)];\
21590 +\
21591 +    /*\
21592 +     * block->prev = NULL;\
21593 +     * block->next = next;\
21594 +     * if (next != NULL)\
21595 +     *    next->prev = block;\
21596 +     * freelist = block;\
21597 +     */\
21598 +    write_prev (dev, block, (sdramaddr_t) 0);\
21599 +    write_next (dev, block, next);\
21600 +    if (next != (sdramaddr_t) 0)\
21601 +       write_prev (dev, next, block);\
21602 +    dev->SdramFreeLists[idx] = block;\
21603 +\
21604 +    dev->SdramFreeCounts[idx]++;\
21605 +    dev->Stats.SdramBytesFree += (SDRAM_MIN_BLOCK_SIZE << idx);\
21606 +} while (0)
21607 +
21608 +#define freelist_remove(dev,idx,block)\
21609 +do {\
21610 +    /*\
21611 +     * if (block->prev)\
21612 +     *     block->prev->next = block->next;\
21613 +     * else\
21614 +     *     dev->SdramFreeLists[idx] = block->next;\
21615 +     * if (block->next)\
21616 +     *     block->next->prev = block->prev;\
21617 +     */\
21618 +    sdramaddr_t blocknext = read_next (dev, block);\
21619 +    sdramaddr_t blockprev = read_prev (dev, block);\
21620 +\
21621 +    if (blockprev)\
21622 +       write_next (dev, blockprev, blocknext);\
21623 +    else\
21624 +       dev->SdramFreeLists[idx] = blocknext;\
21625 +    if (blocknext)\
21626 +       write_prev (dev, blocknext, blockprev);\
21627 +\
21628 +    dev->SdramFreeCounts[idx]--;\
21629 +    dev->Stats.SdramBytesFree -= (SDRAM_MIN_BLOCK_SIZE << idx);\
21630 +} while (0)
21631 +
21632 +#define freelist_removehead(dev,idx,block)\
21633 +do {\
21634 +    sdramaddr_t blocknext = read_next (dev, block);\
21635 +\
21636 +    if ((dev->SdramFreeLists[idx] = blocknext) != 0)\
21637 +       write_prev (dev, blocknext, 0);\
21638 +\
21639 +    dev->SdramFreeCounts[idx]--;\
21640 +    dev->Stats.SdramBytesFree -= (SDRAM_MIN_BLOCK_SIZE << idx);\
21641 +} while (0)
21642 +
21643 +#if defined(DEBUG)
21644 +static int
21645 +display_blocks (ELAN3_DEV *dev, int indx, char *string)
21646 +{
21647 +    sdramaddr_t block;
21648 +    int nbytes = 0;
21649 +
21650 +    printk ("%s - indx %d\n", string, indx);
21651 +    for (block = dev->SdramFreeLists[indx]; block != (sdramaddr_t) 0; block = read_next (dev, block))
21652 +    {
21653 +       printk ("  %lx", block);
21654 +       nbytes += (SDRAM_MIN_BLOCK_SIZE << indx);
21655 +    }
21656 +    printk ("\n");
21657 +
21658 +    return (nbytes);
21659 +}
21660 +
21661 +
21662 +void
21663 +elan3_sdram_display (ELAN3_DEV *dev, char *string)
21664 +{
21665 +    int indx;
21666 +    int nbytes = 0;
21667 +
21668 +    printk ("elan3_sdram_display: dev=%p\n", dev);
21669 +    for (indx = 0; indx < SDRAM_NUM_FREE_LISTS; indx++)
21670 +       if (dev->SdramFreeLists[indx] != (sdramaddr_t) 0)
21671 +           nbytes += display_blocks (dev, indx, string);
21672 +    printk ("\n%d bytes free\n", nbytes);
21673 +}
21674 +
21675 +void
21676 +elan3_sdram_verify (ELAN3_DEV *dev)
21677 +{
21678 +    int indx, size, nbits, i, b;
21679 +    sdramaddr_t block;
21680 +
21681 +    for (indx = 0, size = SDRAM_MIN_BLOCK_SIZE; indx < SDRAM_NUM_FREE_LISTS; indx++, size <<= 1)
21682 +    {
21683 +       unsigned count = 0;
21684 +
21685 +       for (block = dev->SdramFreeLists[indx]; block; block = read_next (dev, block), count++)
21686 +       {
21687 +           ELAN3_SDRAM_BANK *bank = sdram_off_to_bank (dev, block);
21688 +           unsigned         off  = sdram_off_to_offset (dev, block);
21689 +           int              bit  = sdram_off_to_bit (dev, indx, block);
21690 +
21691 +           if ((block & (size-1)) != 0)
21692 +               printk ("elan3_sdram_verify: block=%lx indx=%x - not aligned\n", block, indx);
21693 +           
21694 +           if (bank == NULL || off > bank->Size)
21695 +               printk ("elan3_sdram_verify: block=%lx indx=%x - outside bank\n", block, indx);
21696 +           else if (BT_TEST (bank->Bitmaps[indx], bit) == 0)
21697 +               printk ("elan3_sdram_verify: block=%lx indx=%x - bit not set\n", block, indx);
21698 +           else
21699 +           {
21700 +               for (i = indx-1, nbits = 2; i >= 0; i--, nbits <<= 1)
21701 +               {
21702 +                   bit = sdram_off_to_bit (dev, i, block);
21703 +
21704 +                   for (b = 0; b < nbits; b++)
21705 +                       if (BT_TEST(bank->Bitmaps[i], bit + b))
21706 +                           printk ("elan3_sdram_verify: block=%lx indx=%x - also free i=%d bit=%x\n", block, indx, i, bit+b);
21707 +               }
21708 +           }
21709 +       }
21710 +
21711 +       if (dev->SdramFreeCounts[indx] != count)
21712 +           printk ("elan3_sdram_verify: indx=%x expected %d got %d\n", indx, dev->SdramFreeCounts[indx], count);
21713 +    }
21714 +}
21715 +
21716 +#endif /* defined(DEBUG) */
21717 +
21718 +static void
21719 +free_block (ELAN3_DEV *dev, sdramaddr_t block, int indx)
21720 +{
21721 +    ELAN3_SDRAM_BANK *bank = sdram_off_to_bank (dev, block);
21722 +    unsigned        bit  = sdram_off_to_bit(dev, indx, block);
21723 +    unsigned         size = SDRAM_MIN_BLOCK_SIZE << indx;
21724 +
21725 +    PRINTF3 (DBG_DEVICE, DBG_SDRAM, "free_block: block=%lx indx=%d bit=%x\n", block, indx, bit);
21726 +
21727 +    ASSERT ((block & (size-1)) == 0);
21728 +    ASSERT (BT_TEST (bank->Bitmaps[indx], bit) == 0);
21729 +    
21730 +    while (BT_TEST (bank->Bitmaps[indx], bit ^ 1))
21731 +    {
21732 +       sdramaddr_t buddy = block ^ size;
21733 +       
21734 +       PRINTF3 (DBG_DEVICE, DBG_SDRAM, "free_block: merge block=%lx buddy=%lx indx=%d\n", block, buddy, indx);
21735 +
21736 +       BT_CLEAR (bank->Bitmaps[indx], bit ^ 1);
21737 +
21738 +       freelist_remove (dev, indx, buddy);
21739 +       
21740 +       block = (block < buddy) ? block : buddy;
21741 +       indx++;
21742 +       size <<= 1;
21743 +       bit >>= 1;
21744 +    }
21745 +
21746 +    PRINTF3 (DBG_DEVICE, DBG_SDRAM, "free_block: free block=%lx indx=%d bit=%x\n", block, indx, bit);
21747 +
21748 +    freelist_insert (dev, indx, block);
21749 +
21750 +    BT_SET (bank->Bitmaps[indx], bit);
21751 +}
21752 +
21753 +void
21754 +elan3_sdram_init (ELAN3_DEV *dev)
21755 +{
21756 +    int indx;
21757 +
21758 +    spin_lock_init (&dev->SdramLock);
21759 +
21760 +    for (indx = 0; indx < SDRAM_NUM_FREE_LISTS; indx++)
21761 +    {
21762 +       dev->SdramFreeLists[indx]  = (sdramaddr_t) 0;
21763 +       dev->SdramFreeCounts[indx] = 0;
21764 +    }
21765 +}
21766 +
21767 +void
21768 +elan3_sdram_fini (ELAN3_DEV *dev)
21769 +{
21770 +    spin_lock_destroy (&dev->SdramLock);
21771 +}
21772 +
21773 +void
21774 +elan3_sdram_add (ELAN3_DEV *dev, sdramaddr_t base, sdramaddr_t top)
21775 +{
21776 +    register int indx;
21777 +    register unsigned long size;
21778 +
21779 +    /* align to the minimum block size */
21780 +    base = (base + SDRAM_MIN_BLOCK_SIZE - 1) & ~((sdramaddr_t) SDRAM_MIN_BLOCK_SIZE-1);
21781 +    top &= ~((sdramaddr_t) SDRAM_MIN_BLOCK_SIZE-1);
21782 +
21783 +    /* don't allow 0 as a valid "base" */
21784 +    if (base == 0)
21785 +       base = E3_CACHE_SIZE;
21786 +
21787 +    /* carve the bottom to the biggest boundary */
21788 +    for (indx = 0, size = SDRAM_MIN_BLOCK_SIZE; indx < SDRAM_NUM_FREE_LISTS; indx++, size <<= 1)
21789 +    {
21790 +       if ((base & size) == 0)
21791 +           continue;
21792 +
21793 +       if ((base + size) > top)
21794 +           break;
21795 +
21796 +       free_block (dev, base, indx);
21797 +       
21798 +       base += size;
21799 +    }
21800 +
21801 +    /* carve the top down to the biggest boundary */
21802 +    for (indx = 0, size = SDRAM_MIN_BLOCK_SIZE; indx < SDRAM_NUM_FREE_LISTS; indx++, size <<= 1)
21803 +    {
21804 +       if ((top & size) == 0)
21805 +           continue;
21806 +
21807 +       if ((top - size) < base)
21808 +           break;
21809 +
21810 +       free_block (dev, (top - size), indx);
21811 +       
21812 +       top -= size;
21813 +    }
21814 +
21815 +    /* now free of the space in between */
21816 +    while (base < top)
21817 +    {
21818 +       free_block (dev, base, (SDRAM_NUM_FREE_LISTS-1));
21819 +
21820 +       base += SDRAM_MAX_BLOCK_SIZE;
21821 +    }
21822 +}
21823 +
21824 +sdramaddr_t
21825 +elan3_sdram_alloc (ELAN3_DEV *dev, int nbytes)
21826 +{
21827 +    sdramaddr_t block;
21828 +    register int i, indx;
21829 +    unsigned long size;
21830 +    unsigned long flags;
21831 +
21832 +    spin_lock_irqsave (&dev->SdramLock, flags);
21833 +
21834 +    for (indx = 0, size = SDRAM_MIN_BLOCK_SIZE; size < nbytes; indx++, size <<= 1)
21835 +       ;
21836 +
21837 +    PRINTF2 (DBG_DEVICE, DBG_SDRAM, "elan3_sdram_alloc: nbytes=%d indx=%d\n", nbytes, indx);
21838 +
21839 +    /* find the smallest block which is big enough for this allocation */
21840 +    for (i = indx; i < SDRAM_NUM_FREE_LISTS; i++, size <<= 1)
21841 +       if (dev->SdramFreeLists[i])
21842 +           break;
21843 +    
21844 +    if (i == SDRAM_NUM_FREE_LISTS)
21845 +    {
21846 +       spin_unlock_irqrestore (&dev->SdramLock, flags);
21847 +       return ((sdramaddr_t) 0);
21848 +    }
21849 +    
21850 +    PRINTF2 (DBG_DEVICE, DBG_SDRAM, "elan3_sdram_alloc: use block=%lx indx=%d\n", dev->SdramFreeLists[i], i);
21851 +
21852 +    /* remove the block from the free list */
21853 +    freelist_removehead (dev, i, (block = dev->SdramFreeLists[i]));
21854 +
21855 +    /* clear the approriate bit in the bitmap */
21856 +    BT_CLEAR (sdram_off_to_bank (dev, block)->Bitmaps[i], sdram_off_to_bit (dev,i, block));
21857 +
21858 +    /* and split it up as required */
21859 +    while (i-- > indx)
21860 +       free_block (dev, block + (size >>= 1), i);
21861 +
21862 +    PRINTF1 (DBG_DEVICE, DBG_SDRAM, "elan3_sdram_alloc: return block=%lx\n", block);
21863 +
21864 +    spin_unlock_irqrestore (&dev->SdramLock, flags);
21865 +
21866 +    ASSERT ((block & ((SDRAM_MIN_BLOCK_SIZE << (indx))-1)) == 0);
21867 +
21868 +    return ((sdramaddr_t) block);
21869 +}
21870 +
21871 +void
21872 +elan3_sdram_free (ELAN3_DEV *dev, sdramaddr_t block, int nbytes)
21873 +{
21874 +    register int indx;
21875 +    unsigned long size;
21876 +    unsigned long flags;
21877 +
21878 +    spin_lock_irqsave (&dev->SdramLock, flags);
21879 +
21880 +    for (indx = 0, size = SDRAM_MIN_BLOCK_SIZE; size < nbytes; indx++, size <<= 1)
21881 +       ;
21882 +
21883 +    PRINTF2 (DBG_DEVICE, DBG_SDRAM, "elan3_sdram_free: indx=%d block=%lx\n", indx, block);
21884 +    
21885 +    free_block (dev, block, indx);
21886 +
21887 +    spin_unlock_irqrestore (&dev->SdramLock, flags);
21888 +}
21889 +
21890 +
21891 +
21892 +/*
21893 + * Local variables:
21894 + * c-file-style: "stroustrup"
21895 + * End:
21896 + */
21897 Index: linux-2.4.21/drivers/net/qsnet/elan3/tproc.c
21898 ===================================================================
21899 --- linux-2.4.21.orig/drivers/net/qsnet/elan3/tproc.c   2004-02-23 16:02:56.000000000 -0500
21900 +++ linux-2.4.21/drivers/net/qsnet/elan3/tproc.c        2005-06-01 23:12:54.594439864 -0400
21901 @@ -0,0 +1,778 @@
21902 +/*
21903 + *    Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd.
21904 + *
21905 + *    For licensing information please see the supplied COPYING file
21906 + *
21907 + */
21908 +
21909 +#ident "@(#)$Id: tproc.c,v 1.51.2.1 2004/11/15 11:12:36 mike Exp $"
21910 +/*      $Source: /cvs/master/quadrics/elan3mod/elan3/os/tproc.c,v $ */
21911 +
21912 +#include <qsnet/kernel.h>
21913 +
21914 +#include <elan3/elanregs.h>
21915 +#include <elan3/elandev.h>
21916 +#include <elan3/elanvp.h>
21917 +#include <elan3/elan3mmu.h>
21918 +#include <elan3/elanctxt.h>
21919 +#include <elan3/elandebug.h>
21920 +#include <elan3/urom_addrs.h>
21921 +#include <elan3/thread.h>
21922 +#include <elan3/elansyscall.h>
21923 +#include <elan3/threadsyscall.h>
21924 +#include <elan3/intrinsics.h>
21925 +#include <elan3/vmseg.h>
21926 +
21927 +int
21928 +HandleTProcTrap (ELAN3_DEV *dev, E3_uint32 *RestartBits)
21929 +{
21930 +    THREAD_TRAP  *trap  = dev->ThreadTrap;
21931 +    int           delay = 1;
21932 +
21933 +    ASSERT(SPINLOCK_HELD (&dev->IntrLock));
21934 +
21935 +    trap->Status.Status  = read_reg32 (dev, Exts.TProcStatus);
21936 +    trap->sp             = read_reg32 (dev, Thread_Desc_SP);
21937 +    trap->pc             = read_reg32 (dev, ExecutePC);
21938 +    trap->npc            = read_reg32 (dev, ExecuteNPC);
21939 +    trap->StartPC        = read_reg32 (dev, StartPC);
21940 +    trap->mi             = GET_STATUS_TRAPTYPE(trap->Status);
21941 +    trap->TrapBits.Bits  = read_reg32 (dev, TrapBits.Bits);
21942 +    trap->DirtyBits.Bits = read_reg32 (dev, DirtyBits.Bits);
21943 +
21944 +    if ( ! (trap->Status.s.WakeupFunction == SleepOneTick) ) {
21945 +       int p,i;
21946 +       E3_uint32 reg = read_reg32 (dev, Exts.InterruptReg);    
21947 +
21948 +       ELAN_REG_REC(reg);
21949 +       p = elan_reg_rec_index;
21950 +       for(i=0;i<ELAN_REG_REC_MAX;i++) {
21951 +           if (elan_reg_rec_file[i] != NULL ) 
21952 +               printk("Elan Reg Record[%2d](%ld): cpu %d  reg %x [%d:%s]\n", p, elan_reg_rec_lbolt[p], elan_reg_rec_cpu[p], elan_reg_rec_reg[p],
21953 +                      elan_reg_rec_line[p], elan_reg_rec_file[p]);
21954 +           p = ( (p+1) % ELAN_REG_REC_MAX);
21955 +       }
21956 +    }
21957 +    
21958 +    ASSERT(trap->Status.s.WakeupFunction == SleepOneTick);
21959 +
21960 +    /* copy the four access fault areas */
21961 +    elan3_sdram_copyq_from_sdram (dev, dev->TAndQBase + offsetof (E3_TrapAndQueue, TProc),          (void *) &trap->FaultSave, 16);
21962 +    elan3_sdram_copyq_from_sdram (dev, dev->TAndQBase + offsetof (E3_TrapAndQueue, ThreadProcData), (void *) &trap->DataFaultSave, 16);
21963 +    elan3_sdram_copyq_from_sdram (dev, dev->TAndQBase + offsetof (E3_TrapAndQueue, ThreadProcInst), (void *) &trap->InstFaultSave, 16);
21964 +    elan3_sdram_copyq_from_sdram (dev, dev->TAndQBase + offsetof (E3_TrapAndQueue, ThreadProcOpen), (void *) &trap->OpenFaultSave, 16);
21965 +    
21966 +    /* copy the registers,  note the endian swap flips the odd registers into the even registers
21967 +       and visa versa. */
21968 +    copy_thread_regs (dev, trap->Registers);
21969 +
21970 +    /*
21971 +     * If the output was open then the ack may not have returned yet. Must wait for the
21972 +     * ack to become valid and update trap_dirty with the new value. Will simulate the
21973 +     * instructions later.
21974 +     */
21975 +    if (trap->TrapBits.s.OutputWasOpen)
21976 +    {
21977 +       trap->TrapBits.Bits = read_reg32 (dev, TrapBits.Bits);
21978 +       while (! trap->TrapBits.s.AckBufferValid)
21979 +       {
21980 +           PRINTF0 (DBG_DEVICE, DBG_INTR, "tproc: waiting for ack to become valid\n");
21981 +           trap->TrapBits.Bits = read_reg32 (dev, TrapBits.Bits);
21982 +           DELAY (delay);
21983 +
21984 +           if ((delay <<= 1) == 0) delay = 1;
21985 +       }
21986 +    }
21987 +    
21988 +    /* update device statistics */
21989 +    BumpStat (dev, TProcTraps);
21990 +    switch (trap->mi)
21991 +    {
21992 +    case MI_UnimplementedError:
21993 +       if (trap->TrapBits.s.ForcedTProcTrap)
21994 +           BumpStat (dev, ForcedTProcTraps);
21995 +       if (trap->TrapBits.s.ThreadTimeout)
21996 +       {
21997 +           if (trap->TrapBits.s.PacketTimeout)
21998 +               BumpStat (dev, ThreadOutputTimeouts);
21999 +           else if (trap->TrapBits.s.PacketAckValue == E3_PAckError)
22000 +               BumpStat (dev, ThreadPacketAckErrors);
22001 +       }
22002 +       if (trap->TrapBits.s.TrapForTooManyInsts)
22003 +           BumpStat (dev, TrapForTooManyInsts);
22004 +       break;
22005 +    }
22006 +
22007 +    elan3_sdram_zeroq_sdram (dev, dev->TAndQBase + offsetof (E3_TrapAndQueue, TProc), 16);
22008 +    elan3_sdram_zeroq_sdram (dev, dev->TAndQBase + offsetof (E3_TrapAndQueue, ThreadProcData), 16);
22009 +    elan3_sdram_zeroq_sdram (dev, dev->TAndQBase + offsetof (E3_TrapAndQueue, ThreadProcInst), 16);
22010 +    elan3_sdram_zeroq_sdram (dev, dev->TAndQBase + offsetof (E3_TrapAndQueue, ThreadProcOpen), 16);
22011 +
22012 +    *RestartBits |= RestartTProc;
22013 +
22014 +    return (TRUE);
22015 +}
22016 +
22017 +void
22018 +DeliverTProcTrap (ELAN3_DEV *dev, THREAD_TRAP *threadTrap, E3_uint32 Pend)
22019 +{
22020 +    ELAN3_CTXT      *ctxt;
22021 +    THREAD_TRAP      *trap;
22022 +
22023 +    ASSERT(SPINLOCK_HELD (&dev->IntrLock));
22024 +
22025 +    ctxt = ELAN3_DEV_CTX_TABLE(dev, threadTrap->Status.s.Context);
22026 +
22027 +    if (ctxt == NULL)
22028 +    {
22029 +       PRINTF1 (DBG_DEVICE, DBG_INTR, "DeliverTProcTrap: context %x invalid\n", threadTrap->Status.s.Context);
22030 +       BumpStat (dev, InvalidContext);
22031 +    }
22032 +    else
22033 +    {
22034 +       if (ELAN3_OP_TPROC_TRAP (ctxt, threadTrap) == OP_DEFER)
22035 +       {
22036 +           if (ELAN3_QUEUE_REALLY_FULL (ctxt->ThreadTrapQ))
22037 +           {
22038 +               ctxt->Status |= CTXT_COMMAND_OVERFLOW_ERROR;
22039 +               StartSwapoutContext (ctxt, Pend, NULL);
22040 +           }
22041 +           else
22042 +           {
22043 +               trap = ELAN3_QUEUE_BACK (ctxt->ThreadTrapQ, ctxt->ThreadTraps);
22044 +               
22045 +               bcopy (threadTrap, trap, sizeof (THREAD_TRAP));
22046 +               
22047 +               PRINTF4 (ctxt, DBG_INTR, "DeliverTProcTrap: SP=%08x PC=%08x NPC=%08x StartPC %08x\n",
22048 +                        trap->sp, trap->pc, trap->npc, trap->StartPC);
22049 +               PRINTF3 (ctxt, DBG_INTR, "       mi=%s trap=%08x dirty=%08x\n",
22050 +                        MiToName (trap->mi), trap->TrapBits.Bits, trap->DirtyBits.Bits);
22051 +               PRINTF3 (ctxt, DBG_INTR, "       FaultSave : FaultAddress %08x EventAddress %08x FSR %08x\n",
22052 +                        trap->FaultSave.s.FaultAddress, trap->FaultSave.s.EventAddress, trap->FaultSave.s.FSR.Status);
22053 +               PRINTF3 (ctxt, DBG_INTR, "       DataFault : FaultAddress %08x EventAddress %08x FSR %08x\n",
22054 +                        trap->DataFaultSave.s.FaultAddress, trap->DataFaultSave.s.EventAddress, trap->DataFaultSave.s.FSR.Status);
22055 +               PRINTF3 (ctxt, DBG_INTR, "       InstFault : FaultAddress %08x EventAddress %08x FSR %08x\n",
22056 +                        trap->InstFaultSave.s.FaultAddress, trap->InstFaultSave.s.EventAddress, trap->InstFaultSave.s.FSR.Status);
22057 +               PRINTF3 (ctxt, DBG_INTR, "       OpenFault : FaultAddress %08x EventAddress %08x FSR %08x\n",
22058 +                        trap->OpenFaultSave.s.FaultAddress, trap->OpenFaultSave.s.EventAddress, trap->OpenFaultSave.s.FSR.Status);
22059 +               
22060 +               PRINTF4 (ctxt, DBG_INTR, "       g0=%08x g1=%08x g2=%08x g3=%08x\n", 
22061 +                        trap->Registers[REG_GLOBALS+(0^WordEndianFlip)], trap->Registers[REG_GLOBALS+(1^WordEndianFlip)], 
22062 +                        trap->Registers[REG_GLOBALS+(2^WordEndianFlip)], trap->Registers[REG_GLOBALS+(3^WordEndianFlip)]);
22063 +               PRINTF4 (ctxt, DBG_INTR, "       g4=%08x g5=%08x g6=%08x g7=%08x\n", 
22064 +                        trap->Registers[REG_GLOBALS+(4^WordEndianFlip)], trap->Registers[REG_GLOBALS+(5^WordEndianFlip)], 
22065 +                        trap->Registers[REG_GLOBALS+(6^WordEndianFlip)], trap->Registers[REG_GLOBALS+(7^WordEndianFlip)]);
22066 +               PRINTF4 (ctxt, DBG_INTR, "       o0=%08x o1=%08x o2=%08x o3=%08x\n", 
22067 +                        trap->Registers[REG_OUTS+(0^WordEndianFlip)], trap->Registers[REG_OUTS+(1^WordEndianFlip)], 
22068 +                        trap->Registers[REG_OUTS+(2^WordEndianFlip)], trap->Registers[REG_OUTS+(3^WordEndianFlip)]);
22069 +               PRINTF4 (ctxt, DBG_INTR, "       o4=%08x o5=%08x o6=%08x o7=%08x\n", 
22070 +                        trap->Registers[REG_OUTS+(4^WordEndianFlip)], trap->Registers[REG_OUTS+(5^WordEndianFlip)], 
22071 +                        trap->Registers[REG_OUTS+(6^WordEndianFlip)], trap->Registers[REG_OUTS+(7^WordEndianFlip)]);
22072 +               PRINTF4 (ctxt, DBG_INTR, "       l0=%08x l1=%08x l2=%08x l3=%08x\n", 
22073 +                        trap->Registers[REG_LOCALS+(0^WordEndianFlip)], trap->Registers[REG_LOCALS+(1^WordEndianFlip)],
22074 +                        trap->Registers[REG_LOCALS+(2^WordEndianFlip)], trap->Registers[REG_LOCALS+(3^WordEndianFlip)]);
22075 +               PRINTF4 (ctxt, DBG_INTR, "       l4=%08x l5=%08x l6=%08x l7=%08x\n", 
22076 +                        trap->Registers[REG_LOCALS+(4^WordEndianFlip)], trap->Registers[REG_LOCALS+(5^WordEndianFlip)],
22077 +                        trap->Registers[REG_LOCALS+(6^WordEndianFlip)], trap->Registers[REG_LOCALS+(7^WordEndianFlip)]);
22078 +               PRINTF4 (ctxt, DBG_INTR, "       i0=%08x i1=%08x i2=%08x i3=%08x\n", 
22079 +                        trap->Registers[REG_INS+(0^WordEndianFlip)], trap->Registers[REG_INS+(1^WordEndianFlip)],
22080 +                        trap->Registers[REG_INS+(2^WordEndianFlip)], trap->Registers[REG_INS+(3^WordEndianFlip)]);
22081 +               PRINTF4 (ctxt, DBG_INTR, "       i4=%08x i5=%08x i6=%08x i7=%08x\n", 
22082 +                        trap->Registers[REG_INS+(4^WordEndianFlip)], trap->Registers[REG_INS+(5^WordEndianFlip)],
22083 +                        trap->Registers[REG_INS+(6^WordEndianFlip)], trap->Registers[REG_INS+(7^WordEndianFlip)]);
22084 +               
22085 +               ELAN3_QUEUE_ADD (ctxt->ThreadTrapQ);
22086 +               kcondvar_wakeupone (&ctxt->Wait, &dev->IntrLock);
22087 +               
22088 +               if (ELAN3_QUEUE_FULL (ctxt->ThreadTrapQ))
22089 +               {
22090 +                   PRINTF0 (ctxt, DBG_INTR, "DeliverTProcTrap: thread queue full,  must swap out\n");
22091 +                   ctxt->Status |= CTXT_THREAD_QUEUE_FULL;
22092 +                   
22093 +                   StartSwapoutContext (ctxt, Pend, NULL);
22094 +               }
22095 +           }
22096 +       }
22097 +    }
22098 +}
22099 +
22100 +int
22101 +NextTProcTrap (ELAN3_CTXT *ctxt, THREAD_TRAP *trap)
22102 +{
22103 +    ELAN3_DEV *dev = ctxt->Device;
22104 +
22105 +    ASSERT (SPINLOCK_HELD (&dev->IntrLock));
22106 +    
22107 +    if (ELAN3_QUEUE_EMPTY (ctxt->ThreadTrapQ))
22108 +       return (0);
22109 +
22110 +    *trap = *ELAN3_QUEUE_FRONT (ctxt->ThreadTrapQ, ctxt->ThreadTraps);
22111 +    ELAN3_QUEUE_REMOVE (ctxt->ThreadTrapQ);
22112 +    
22113 +    return (1);
22114 +}
22115 +
22116 +void
22117 +ResolveTProcTrap (ELAN3_CTXT *ctxt, THREAD_TRAP *trap)
22118 +{
22119 +    int       i;
22120 +    int       res;
22121 +    E3_Addr   StackPointer;
22122 +
22123 +    PRINTF4 (ctxt, DBG_TPROC, "ResolveTProcTrap: SP=%08x PC=%08x NPC=%08x StartPC %08x\n",
22124 +            trap->sp, trap->pc, trap->npc, trap->StartPC);
22125 +    PRINTF3 (ctxt, DBG_TPROC, "       mi=%s trap=%08x dirty=%08x\n",
22126 +            MiToName (trap->mi), trap->TrapBits.Bits, trap->DirtyBits.Bits);
22127 +    PRINTF3 (ctxt, DBG_TPROC, "       FaultSave : FaultAddress %08x EventAddress %08x FSR %08x\n",
22128 +            trap->FaultSave.s.FaultAddress, trap->FaultSave.s.EventAddress, trap->FaultSave.s.FSR.Status);
22129 +    PRINTF3 (ctxt, DBG_TPROC, "       DataFault : FaultAddress %08x EventAddress %08x FSR %08x\n",
22130 +            trap->DataFaultSave.s.FaultAddress, trap->DataFaultSave.s.EventAddress, trap->DataFaultSave.s.FSR.Status);
22131 +    PRINTF3 (ctxt, DBG_TPROC, "       InstFault : FaultAddress %08x EventAddress %08x FSR %08x\n",
22132 +            trap->InstFaultSave.s.FaultAddress, trap->InstFaultSave.s.EventAddress, trap->InstFaultSave.s.FSR.Status);
22133 +    PRINTF3 (ctxt, DBG_TPROC, "       OpenFault : FaultAddress %08x EventAddress %08x FSR %08x\n",
22134 +            trap->OpenFaultSave.s.FaultAddress, trap->OpenFaultSave.s.EventAddress, trap->OpenFaultSave.s.FSR.Status);
22135 +    
22136 +    PRINTF4 (ctxt, DBG_TPROC, "       g0=%08x g1=%08x g2=%08x g3=%08x\n", 
22137 +            trap->Registers[REG_GLOBALS+(0^WordEndianFlip)], trap->Registers[REG_GLOBALS+(1^WordEndianFlip)], 
22138 +            trap->Registers[REG_GLOBALS+(2^WordEndianFlip)], trap->Registers[REG_GLOBALS+(3^WordEndianFlip)]);
22139 +    PRINTF4 (ctxt, DBG_TPROC, "       g4=%08x g5=%08x g6=%08x g7=%08x\n", 
22140 +            trap->Registers[REG_GLOBALS+(4^WordEndianFlip)], trap->Registers[REG_GLOBALS+(5^WordEndianFlip)], 
22141 +            trap->Registers[REG_GLOBALS+(6^WordEndianFlip)], trap->Registers[REG_GLOBALS+(7^WordEndianFlip)]);
22142 +    PRINTF4 (ctxt, DBG_TPROC, "       o0=%08x o1=%08x o2=%08x o3=%08x\n", 
22143 +            trap->Registers[REG_OUTS+(0^WordEndianFlip)], trap->Registers[REG_OUTS+(1^WordEndianFlip)], 
22144 +            trap->Registers[REG_OUTS+(2^WordEndianFlip)], trap->Registers[REG_OUTS+(3^WordEndianFlip)]);
22145 +    PRINTF4 (ctxt, DBG_TPROC, "       o4=%08x o5=%08x o6=%08x o7=%08x\n", 
22146 +            trap->Registers[REG_OUTS+(4^WordEndianFlip)], trap->Registers[REG_OUTS+(5^WordEndianFlip)], 
22147 +            trap->Registers[REG_OUTS+(6^WordEndianFlip)], trap->Registers[REG_OUTS+(7^WordEndianFlip)]);
22148 +    PRINTF4 (ctxt, DBG_TPROC, "       l0=%08x l1=%08x l2=%08x l3=%08x\n", 
22149 +            trap->Registers[REG_LOCALS+(0^WordEndianFlip)], trap->Registers[REG_LOCALS+(1^WordEndianFlip)],
22150 +            trap->Registers[REG_LOCALS+(2^WordEndianFlip)], trap->Registers[REG_LOCALS+(3^WordEndianFlip)]);
22151 +    PRINTF4 (ctxt, DBG_TPROC, "       l4=%08x l5=%08x l6=%08x l7=%08x\n", 
22152 +            trap->Registers[REG_LOCALS+(4^WordEndianFlip)], trap->Registers[REG_LOCALS+(5^WordEndianFlip)],
22153 +            trap->Registers[REG_LOCALS+(6^WordEndianFlip)], trap->Registers[REG_LOCALS+(7^WordEndianFlip)]);
22154 +    PRINTF4 (ctxt, DBG_TPROC, "       i0=%08x i1=%08x i2=%08x i3=%08x\n", 
22155 +            trap->Registers[REG_INS+(0^WordEndianFlip)], trap->Registers[REG_INS+(1^WordEndianFlip)],
22156 +            trap->Registers[REG_INS+(2^WordEndianFlip)], trap->Registers[REG_INS+(3^WordEndianFlip)]);
22157 +    PRINTF4 (ctxt, DBG_TPROC, "       i4=%08x i5=%08x i6=%08x i7=%08x\n", 
22158 +            trap->Registers[REG_INS+(4^WordEndianFlip)], trap->Registers[REG_INS+(5^WordEndianFlip)],
22159 +            trap->Registers[REG_INS+(6^WordEndianFlip)], trap->Registers[REG_INS+(7^WordEndianFlip)]);
22160 +           
22161 +
22162 +    BumpUserStat (ctxt, TProcTraps);
22163 +
22164 +    switch (trap->mi)
22165 +    {
22166 +    case MI_UnimplementedError:
22167 +    {
22168 +       /*
22169 +        * This occurs if the threads processor trapped. All other cases will be for the ucode
22170 +        * thread trapping.
22171 +        */
22172 +       int restart = 1;
22173 +       int skip    = 0;
22174 +       
22175 +       PRINTF1 (ctxt, DBG_TPROC, "TProc: Mi=Unimp. Using trap->TrapBits=%x\n", trap->TrapBits.Bits);
22176 +       
22177 +       /*
22178 +        * Data Access Exception.
22179 +        */
22180 +       if (trap->TrapBits.s.DataAccessException)
22181 +       {
22182 +           ASSERT (CTXT_IS_KERNEL(ctxt) || trap->DataFaultSave.s.FSR.Status == 0 ||
22183 +                   ctxt->Capability.cap_mycontext == trap->DataFaultSave.s.FaultContext);
22184 +
22185 +           PRINTF1 (ctxt, DBG_TPROC, "ResolveTProcTrap: DataAccessException %08x\n", trap->DataFaultSave.s.FaultAddress);
22186 +
22187 +           if ((res = elan3_pagefault (ctxt, &trap->DataFaultSave, 1)) != ESUCCESS)
22188 +           {
22189 +               PRINTF1 (ctxt, DBG_TPROC, "ResolveTProcTrap: elan3_pagefault failed for data %08x\n",
22190 +                        trap->DataFaultSave.s.FaultAddress);
22191 +
22192 +               if (ElanException (ctxt, EXCEPTION_INVALID_ADDR, THREAD_PROC, trap, &trap->DataFaultSave, res) != OP_IGNORE)
22193 +                   restart = 0;
22194 +           }
22195 +       }
22196 +       
22197 +       /* 
22198 +        * Instruction Access Exception.
22199 +        */
22200 +       if (trap->TrapBits.s.InstAccessException)
22201 +       {
22202 +           ASSERT (CTXT_IS_KERNEL (ctxt) || trap->InstFaultSave.s.FSR.Status == 0 ||
22203 +                   ctxt->Capability.cap_mycontext == trap->InstFaultSave.s.FaultContext);
22204 +           
22205 +           PRINTF1 (ctxt, DBG_TPROC, "ResolveTProcTrap: InstAccessException %08x\n", trap->InstFaultSave.s.FaultAddress);
22206 +
22207 +           if ((res = elan3_pagefault (ctxt, &trap->InstFaultSave, 1)) != ESUCCESS)
22208 +           {
22209 +               PRINTF1 (ctxt, DBG_TPROC, "ResolveTProcTrap: elan3_pagefault failed for inst %08x\n",
22210 +                        trap->InstFaultSave.s.FaultAddress);
22211 +
22212 +               ElanException (ctxt, EXCEPTION_INVALID_ADDR, THREAD_PROC, trap, &trap->InstFaultSave, res);
22213 +               restart = 0;
22214 +           }
22215 +       }
22216 +       
22217 +       /*
22218 +        * Forced TProc trap/Unimplemented instruction
22219 +        *
22220 +        *  If there is a force tproc trap then don't look at 
22221 +        *  the unimplemented instruction bit - since it can
22222 +        *  be set in obscure circumstances.
22223 +        */
22224 +       if (trap->TrapBits.s.ForcedTProcTrap)
22225 +           PRINTF0 (ctxt, DBG_TPROC, "ResolveTProcTrap: forced tproc trap, restarting\n");
22226 +       else if (trap->TrapBits.s.Unimplemented)
22227 +       {
22228 +           E3_uint32 instr = ELAN3_OP_LOAD32 (ctxt, trap->pc & PC_MASK);
22229 +
22230 +           PRINTF1 (ctxt, DBG_TPROC, "ResolveTProcTrap: unimplemented instruction %08x\n", instr);
22231 +
22232 +           if ((instr & OPCODE_MASK) == OPCODE_Ticc &&
22233 +               (instr & OPCODE_IMM)  == OPCODE_IMM &&
22234 +               (Ticc_COND(instr)     == Ticc_TA))
22235 +           {
22236 +               switch (INSTR_IMM(instr))
22237 +               {
22238 +               case ELAN3_ELANCALL_TRAPNUM:
22239 +                   /*
22240 +                    * Since the thread cannot easily access the global variable which holds
22241 +                    * the elan system call number, we provide a different trap for the elan
22242 +                    * system call, and copy the system call number into %g1 before calling
22243 +                    * ThreadSyscall().
22244 +                    */
22245 +                   BumpUserStat (ctxt, ThreadElanCalls);
22246 +
22247 +                   if (ThreadElancall (ctxt, trap, &skip) != ESUCCESS)
22248 +                   {
22249 +                       ElanException (ctxt, EXCEPTION_BAD_SYSCALL, THREAD_PROC, trap);
22250 +                       restart = 0;
22251 +                   }
22252 +                   break;
22253 +
22254 +               case ELAN3_SYSCALL_TRAPNUM:
22255 +                   BumpUserStat (ctxt, ThreadSystemCalls);
22256 +
22257 +                   if (ThreadSyscall (ctxt, trap, &skip) != ESUCCESS)
22258 +                   {
22259 +                       ElanException (ctxt, EXCEPTION_BAD_SYSCALL, THREAD_PROC, trap);
22260 +                       restart = 0;
22261 +                   }
22262 +                   break;
22263 +
22264 +               case ELAN3_DEBUG_TRAPNUM:
22265 +                   ElanException (ctxt, EXCEPTION_DEBUG, THREAD_PROC, trap);
22266 +                   skip = 1;
22267 +                   break;
22268 +                   
22269 +               case ELAN3_ABORT_TRAPNUM:
22270 +               default:
22271 +                   ElanException (ctxt, EXCEPTION_UNIMP_INSTR, THREAD_PROC, trap, instr);
22272 +                   restart = 0;
22273 +                   break;
22274 +               }
22275 +                   
22276 +           }
22277 +           else
22278 +           {
22279 +               ElanException (ctxt, EXCEPTION_UNIMP_INSTR, THREAD_PROC, trap, instr);
22280 +               restart = 0;
22281 +           }
22282 +       }
22283 +       
22284 +       /*
22285 +        * Faulted fetching routes.
22286 +        */
22287 +       if (trap->TrapBits.s.OpenRouteFetch)
22288 +       {
22289 +           PRINTF1 (ctxt, DBG_TPROC, "ResolveTProcTrap: OpenRouteFetch %08x\n", trap->OpenFaultSave.s.FaultAddress);
22290 +           
22291 +           if ((res = ResolveVirtualProcess (ctxt, trap->OpenFaultSave.s.FaultAddress)) != ESUCCESS &&
22292 +               ElanException (ctxt, EXCEPTION_INVALID_PROCESS, THREAD_PROC, trap, trap->DataFaultSave.s.FaultAddress, res) != OP_IGNORE)
22293 +           {
22294 +               restart = 0;
22295 +           }
22296 +           else if (RollThreadToClose (ctxt, trap, E3_PAckDiscard) != ESUCCESS)        /* Force a discard */
22297 +           {
22298 +               restart = 0;
22299 +           }
22300 +       }
22301 +       
22302 +       /*
22303 +        * Thread Timeout
22304 +        */
22305 +       if (trap->TrapBits.s.ThreadTimeout)
22306 +       {
22307 +           if (ElanException (ctxt, EXCEPTION_PACKET_TIMEOUT, THREAD_PROC, trap) != OP_IGNORE)
22308 +               restart = 0;
22309 +           else
22310 +           {
22311 +               PRINTF0 (ctxt, DBG_TPROC, "ResolveTProcTrap: timeout or PAckError!\n");
22312 +               
22313 +               /* Might deschedule the thread for a while or mark the link error here. */
22314 +               if (! trap->TrapBits.s.OutputWasOpen && RollThreadToClose (ctxt, trap, trap->TrapBits.s.PacketAckValue) != ESUCCESS)
22315 +               {
22316 +                   restart = 0;
22317 +               }
22318 +           }
22319 +       }
22320 +       
22321 +       /*
22322 +        * Open exception
22323 +        */
22324 +       if (trap->TrapBits.s.OpenException)
22325 +       {
22326 +           PRINTF0 (ctxt, DBG_TPROC, "ResolveTProcTrap: open exception\n");
22327 +           if (ElanException (ctxt, EXCEPTION_THREAD_KILLED, THREAD_PROC, trap) != OP_IGNORE)
22328 +               restart = 0;
22329 +       }
22330 +       
22331 +       /*
22332 +        * Too many instructions.
22333 +        */
22334 +       if (trap->TrapBits.s.TrapForTooManyInsts)
22335 +       {
22336 +           PRINTF0 (ctxt, DBG_TPROC, "ResolveTProcTrap: too many instructions\n");
22337 +           if (ElanException (ctxt, EXCEPTION_THREAD_KILLED, THREAD_PROC, trap) != OP_IGNORE)
22338 +               restart = 0;
22339 +       }
22340 +       
22341 +       if (restart)
22342 +       {
22343 +           /*
22344 +            * If the output was open when the trap was taken then the trap code must move
22345 +            * the PC on past the close instruction and simulate the effect of all the instructions
22346 +            * that do not output onto the link. The value of the ack received is then used to
22347 +            * simulate the close instruction.
22348 +            */
22349 +           if (trap->TrapBits.s.OutputWasOpen && RollThreadToClose(ctxt, trap, trap->TrapBits.s.PacketAckValue) != ESUCCESS)
22350 +           {
22351 +               /*
22352 +                * Don't restart if we couldn't roll it forweards 
22353 +                * to a close instruction.
22354 +                */
22355 +               break;
22356 +           }
22357 +
22358 +           /*
22359 +            * We must check back 3 instructions from the PC,  and if we see the
22360 +            * c_close_cookie() sequence then we must execute the instructions to
22361 +            * the end of it.
22362 +            */
22363 +           /* XXXX: code to be written */
22364 +           
22365 +           StackPointer = SaveThreadToStack (ctxt, trap, skip);
22366 +           
22367 +           ReissueStackPointer (ctxt, StackPointer);
22368 +       }
22369 +       
22370 +       break;
22371 +    }
22372 +    
22373 +    /*
22374 +     * This case is different from the others as %o6 has been overwritten with
22375 +     * the SP. The real PC can be read from StartPC and written back
22376 +     * into %o6 on the stack.
22377 +     */
22378 +    case MI_TProcNext:                 /* Reading the outs block */
22379 +    {
22380 +       E3_Addr stack = (trap->sp & SP_MASK) - sizeof (E3_Stack);
22381 +
22382 +       if (ELAN3_OP_START_FAULT_CHECK (ctxt))
22383 +       {
22384 +           ELAN3_OP_END_FAULT_CHECK (ctxt);
22385 +
22386 +           PRINTF0 (ctxt, DBG_TPROC, "ResolveTProcTrap: faulted writing StartPc to o6\n");
22387 +           ElanException (ctxt, EXCEPTION_CANNOT_SAVE_THREAD, THREAD_PROC, NULL);
22388 +           break;
22389 +       }
22390 +       ELAN3_OP_STORE32 (ctxt, stack + offsetof (E3_Stack, Outs[6]), trap->StartPC & PC_MASK);
22391 +       ELAN3_OP_END_FAULT_CHECK (ctxt);
22392 +       /* DROPTHROUGH */
22393 +    }
22394 +    /*
22395 +     * all of these will be generated when starting up a thread.
22396 +     * Just re-issue the command after fixing the trap. The ucode keeps the startup
22397 +     * from trap information in Thread_Desc_SP while it is still loading the regs.
22398 +     */
22399 +    case MI_WaitForGlobalsRead:                /* Reading the globals block (trap restart) */
22400 +    case MI_WaitForNPCRead:            /* Reading the nPC, V and C (trap restart) */
22401 +    case MI_WaitForPCload:             /* Reading the PC, N and Z (trap restart) */
22402 +    case MI_WaitForInsRead:            /* Reading the ins block (trap restart) */
22403 +    case MI_WaitForLocals:             /* Reading the ins block (trap restart) */
22404 +    case MI_WaitForPCload2:            /* Reading the PC (normal thread start) */
22405 +    case MI_WaitForSpStore:            /* Writing the SP to the outs block */
22406 +       PRINTF2 (ctxt, DBG_TPROC, "ResolveTProcTrap: %s %08x\n", MiToName (trap->mi), trap->InstFaultSave.s.FaultAddress);
22407 +
22408 +       if ((res = elan3_pagefault (ctxt, &trap->FaultSave, 1)) != ESUCCESS)
22409 +       {
22410 +           PRINTF1 (ctxt, DBG_TPROC, "ResolveTProcTrap: elan3_pagefault failed at %08x\n",
22411 +                    trap->FaultSave.s.FaultAddress);
22412 +           if (ElanException (ctxt, EXCEPTION_INVALID_ADDR, THREAD_PROC, &trap->FaultSave, trap, res) != OP_IGNORE)
22413 +               break;
22414 +       }
22415 +
22416 +       ReissueStackPointer (ctxt, trap->sp);
22417 +       break;
22418 +       
22419 +       /*
22420 +        * These traps could occur after the threads proc has stopped (either for a wait,
22421 +        * break, or suspend, but not a trap). Must simulate the uCode's job.
22422 +        */
22423 +    case MI_WaitForOutsWrite:          /* Writing the outs block */
22424 +    case MI_WaitForNPCWrite:           /* Writing the nPC block */
22425 +    {
22426 +       E3_uint32 DeschedBits = (trap->TrapBits.Bits & E3_TProcDescheduleMask);
22427 +       E3_Addr   stack       = (trap->sp & SP_MASK) - sizeof (E3_Stack);
22428 +       
22429 +       PRINTF1 (ctxt, DBG_TPROC, "ResolveTProcTrap: trapped on %s while stopping a thread\n", MiToName(trap->mi));
22430 +       
22431 +       /*
22432 +        * Copy npc into o6.
22433 +        */
22434 +       trap->Registers[REG_OUTS+(6^WordEndianFlip)] = trap->npc;
22435 +       
22436 +       if (ELAN3_OP_START_FAULT_CHECK (ctxt))
22437 +       {
22438 +           ELAN3_OP_END_FAULT_CHECK (ctxt);
22439 +
22440 +           PRINTF0 (ctxt, DBG_TPROC, "ResolveTProcTrap: faulted writing outs to stack\n");
22441 +           ElanException (ctxt, EXCEPTION_CANNOT_SAVE_THREAD, THREAD_PROC, NULL);
22442 +           break;
22443 +       }
22444 +       
22445 +       /*
22446 +        * Now write the outs back to the stack. NOTE then endian flip is undone.
22447 +        */
22448 +       for (i = 0; i < 8; i++)
22449 +           ELAN3_OP_STORE32 (ctxt, stack + offsetof (E3_Stack, Outs[i]), trap->Registers[REG_OUTS+(i^WordEndianFlip)]);
22450 +       ELAN3_OP_END_FAULT_CHECK (ctxt);
22451 +       
22452 +       /*
22453 +        * thread has been saved. Now find out why the thread proc stopped.
22454 +        */
22455 +       if (DeschedBits == E3_TProcDescheduleSuspend)
22456 +       {
22457 +           PRINTF0 (ctxt, DBG_TPROC, "ResolveTProcTrap: suspend instruction executed\n");
22458 +           break;
22459 +       }
22460 +       
22461 +       /*
22462 +        * Break. Just reissue the command.
22463 +        */
22464 +       if (DeschedBits == E3_TProcDescheduleBreak)
22465 +       {
22466 +           PRINTF1 (ctxt, DBG_TPROC, "ResolveTProcTrap: break instruction, reissue sp %08x\n", trap->sp);
22467 +           ReissueStackPointer (ctxt, trap->sp);
22468 +           break;
22469 +       }
22470 +       
22471 +       ASSERT (DeschedBits == E3_TProcDescheduleWait);
22472 +        
22473 +       /* DROPTHROUGH to fix up a wait event */
22474 +    }
22475 +    
22476 +    /*
22477 +     * Trapped here trying to execute a wait instruction. All the thread state has already
22478 +     * been saved and the trap has been fixed so simplest thing to do is to start the
22479 +     * thread up at the wait instruction again.
22480 +     */
22481 +    case MI_WaitForEventWaitAddr:      /* Reading back the %o0,%o1 pair for a
22482 +                                          wait event instr. */
22483 +    case MI_WaitForWaitEventAccess:    /* Locked dword read of the event location.
22484 +                                          Note that this read is done with write
22485 +                                          permissions so we never get a trap on the write */
22486 +    {
22487 +       E3_Addr stack = (trap->sp & SP_MASK) - sizeof (E3_Stack);
22488 +       
22489 +       if ((res = elan3_pagefault (ctxt, &trap->FaultSave, 1)) != ESUCCESS)
22490 +       {
22491 +           PRINTF1 (ctxt, DBG_TPROC, "ResolveTProcTrap: elan3_pagefault failed at %08x\n", 
22492 +                    trap->FaultSave.s.FaultAddress);
22493 +           if (ElanException (ctxt, EXCEPTION_INVALID_ADDR, THREAD_PROC, trap, &trap->DataFaultSave, res) != OP_IGNORE)
22494 +               break;
22495 +       }
22496 +
22497 +       if (ELAN3_OP_START_FAULT_CHECK (ctxt))
22498 +       {
22499 +           ELAN3_OP_END_FAULT_CHECK (ctxt);
22500 +
22501 +           PRINTF0 (ctxt, DBG_TPROC, "ResolveTProcTrap: faulted writing pc to stack\n");
22502 +           ElanException (ctxt, EXCEPTION_CANNOT_SAVE_THREAD, THREAD_PROC, NULL);
22503 +           break;
22504 +       }
22505 +
22506 +       ELAN3_OP_STORE32 (ctxt, stack + offsetof (E3_Stack, Outs[6]), trap->pc);
22507 +       ELAN3_OP_END_FAULT_CHECK (ctxt);
22508 +       
22509 +       ReissueStackPointer (ctxt, trap->sp);
22510 +       break;
22511 +    }
22512 +    
22513 +    /*
22514 +     * Assume the fault will be fixed by FixupEventTrap.
22515 +     */
22516 +    default:
22517 +       FixupEventTrap (ctxt, THREAD_PROC, trap, trap->mi, &trap->FaultSave, 0);
22518 +       break;
22519 +    }
22520 +}
22521 +
22522 +int
22523 +TProcNeedsRestart (ELAN3_CTXT *ctxt)
22524 +{
22525 +    return (ctxt->ItemCount[LIST_THREAD] != 0);
22526 +}
22527 +
22528 +void
22529 +RestartTProcItems (ELAN3_CTXT *ctxt)
22530 +{
22531 +    void     *item;
22532 +    E3_uint32 StackPointer;
22533 +
22534 +    kmutex_lock (&ctxt->SwapListsLock);
22535 +    
22536 +    while (ctxt->ItemCount[LIST_THREAD])
22537 +    {
22538 +       if (! ELAN3_OP_GET_WORD_ITEM (ctxt, LIST_THREAD, &item, &StackPointer))
22539 +           ctxt->ItemCount[LIST_THREAD] = 0;
22540 +       else
22541 +       {
22542 +           if (IssueCommand (ctxt, offsetof (E3_CommandPort, RunThread), StackPointer, 0) == ISSUE_COMMAND_RETRY)
22543 +           {
22544 +               ELAN3_OP_PUTBACK_ITEM (ctxt, LIST_THREAD, item);
22545 +               kmutex_unlock (&ctxt->SwapListsLock);
22546 +               return;
22547 +           }
22548 +           
22549 +           ctxt->ItemCount[LIST_THREAD]--;
22550 +           ELAN3_OP_FREE_WORD_ITEM (ctxt, item);
22551 +       }
22552 +    }
22553 +    kmutex_unlock (&ctxt->SwapListsLock);
22554 +}
22555 +
22556 +E3_Addr
22557 +SaveThreadToStack (ELAN3_CTXT *ctxt, THREAD_TRAP *trap, int SkipInstruction)
22558 +{
22559 +    E3_Addr      stack = (trap->sp & SP_MASK) - sizeof (E3_Stack);
22560 +    E3_Addr      orflag;
22561 +    register int i;
22562 +
22563 +    /*
22564 +     * When the thread deschedules normally, the N & Z flags are written 
22565 +     * to the stack in o6, and the V & C flags are lost.
22566 +     * Since the Elan will store the NPC into o6 (to skip the instruction), 
22567 +     * the CC flags are visible to the trap handler in the trapped PC and NPC.   
22568 +     * If the instruction needs to be re-executed then the CC flags need to be
22569 +     * kept in the right place to be read in when the thread re-starts.
22570 +     *
22571 +     * PC  has N & Z from trapped NPC.
22572 +     * NPC has V & C from trapped PC.
22573 +     */
22574 +    if (SkipInstruction)
22575 +    {
22576 +       trap->Registers[REG_OUTS+(6^WordEndianFlip)]    = trap->npc;
22577 +       trap->Registers[REG_GLOBALS+(0^WordEndianFlip)] = ((trap->npc & PC_MASK) + 4) | (trap->pc & CC_MASK);
22578 +    }
22579 +    else
22580 +    {
22581 +       trap->Registers[REG_OUTS+(6^WordEndianFlip)]    = (trap->pc & PC_MASK) | (trap->npc & CC_MASK);
22582 +       trap->Registers[REG_GLOBALS+(0^WordEndianFlip)] = (trap->npc & PC_MASK) | (trap->pc & CC_MASK);
22583 +    }
22584 +    
22585 +    if (ELAN3_OP_START_FAULT_CHECK(ctxt))
22586 +    {
22587 +       PRINTF0 (ctxt, DBG_TPROC, "RestartThread: faulted writing out thread\n");
22588 +       ELAN3_OP_END_FAULT_CHECK(ctxt);
22589 +
22590 +       ElanException (ctxt, EXCEPTION_CANNOT_SAVE_THREAD, THREAD_PROC, NULL);
22591 +       return ((E3_Addr) 0);
22592 +    }
22593 +
22594 +
22595 +#ifdef DEBUG_PRINTF
22596 +    PRINTF4 (ctxt, DBG_TPROC, "SaveThreadToStack: SP=%08x PC=%08x NPC=%08x DIRTY=%08x\n",
22597 +            trap->sp, trap->pc, trap->npc, trap->DirtyBits.Bits);
22598 +    if (trap->DirtyBits.s.GlobalsDirty)
22599 +    {
22600 +       PRINTF4 (ctxt, DBG_TPROC, "       g0=%08x g1=%08x g2=%08x g3=%08x\n", 
22601 +                trap->Registers[REG_GLOBALS+(0^WordEndianFlip)], trap->Registers[REG_GLOBALS+(1^WordEndianFlip)], 
22602 +                trap->Registers[REG_GLOBALS+(2^WordEndianFlip)], trap->Registers[REG_GLOBALS+(3^WordEndianFlip)]);
22603 +       PRINTF4 (ctxt, DBG_TPROC, "       g4=%08x g5=%08x g6=%08x g7=%08x\n", 
22604 +                trap->Registers[REG_GLOBALS+(4^WordEndianFlip)], trap->Registers[REG_GLOBALS+(5^WordEndianFlip)], 
22605 +                trap->Registers[REG_GLOBALS+(6^WordEndianFlip)], trap->Registers[REG_GLOBALS+(7^WordEndianFlip)]);
22606 +    }
22607 +    if (trap->DirtyBits.s.OutsDirty)
22608 +    {
22609 +       PRINTF4 (ctxt, DBG_TPROC, "       o0=%08x o1=%08x o2=%08x o3=%08x\n", 
22610 +                trap->Registers[REG_OUTS+(0^WordEndianFlip)], trap->Registers[REG_OUTS+(1^WordEndianFlip)], 
22611 +                trap->Registers[REG_OUTS+(2^WordEndianFlip)], trap->Registers[REG_OUTS+(3^WordEndianFlip)]);
22612 +       PRINTF4 (ctxt, DBG_TPROC, "       o4=%08x o5=%08x o6=%08x o7=%08x\n", 
22613 +                trap->Registers[REG_OUTS+(4^WordEndianFlip)], trap->Registers[REG_OUTS+(5^WordEndianFlip)], 
22614 +                trap->Registers[REG_OUTS+(6^WordEndianFlip)], trap->Registers[REG_OUTS+(7^WordEndianFlip)]);
22615 +    }
22616 +    if (trap->DirtyBits.s.LocalsDirty)
22617 +    {
22618 +       PRINTF4 (ctxt, DBG_TPROC, "       l0=%08x l1=%08x l2=%08x l3=%08x\n", 
22619 +                trap->Registers[REG_LOCALS+(0^WordEndianFlip)], trap->Registers[REG_LOCALS+(1^WordEndianFlip)], 
22620 +                trap->Registers[REG_LOCALS+(2^WordEndianFlip)], trap->Registers[REG_LOCALS+(3^WordEndianFlip)]);
22621 +       PRINTF4 (ctxt, DBG_TPROC, "       l4=%08x l5=%08x l6=%08x l7=%08x\n", 
22622 +                trap->Registers[REG_LOCALS+(4^WordEndianFlip)], trap->Registers[REG_LOCALS+(5^WordEndianFlip)], 
22623 +                trap->Registers[REG_LOCALS+(6^WordEndianFlip)], trap->Registers[REG_LOCALS+(7^WordEndianFlip)]);
22624 +    }
22625 +    if (trap->DirtyBits.s.InsDirty)
22626 +    {
22627 +       PRINTF4 (ctxt, DBG_TPROC, "       i0=%08x i1=%08x i2=%08x i3=%08x\n", 
22628 +                trap->Registers[REG_INS+(0^WordEndianFlip)], trap->Registers[REG_INS+(1^WordEndianFlip)], 
22629 +                trap->Registers[REG_INS+(2^WordEndianFlip)], trap->Registers[REG_INS+(3^WordEndianFlip)]);
22630 +       PRINTF4 (ctxt, DBG_TPROC, "       i4=%08x i5=%08x i6=%08x i7=%08x\n", 
22631 +                trap->Registers[REG_INS+(4^WordEndianFlip)], trap->Registers[REG_INS+(5^WordEndianFlip)], 
22632 +                trap->Registers[REG_INS+(6^WordEndianFlip)], trap->Registers[REG_INS+(7^WordEndianFlip)]);
22633 +    }
22634 +#endif 
22635 +    
22636 +    PRINTF1 (ctxt, DBG_TPROC, "flushing registers to stack %08x\n", stack);
22637 +
22638 +    /* 
22639 +     * NOTE - store the register to the stack in reverse order, since the stack 
22640 +     * will be allocated in sdram, and we cannot use the sdram accessing functions 
22641 +     * here, as it is "mapped" in user-space.
22642 +     */
22643 +    for (i = 0; i < 8; i++)
22644 +    {
22645 +       if (trap->DirtyBits.s.GlobalsDirty & (1 << i))
22646 +           ELAN3_OP_STORE32 (ctxt, stack + offsetof (E3_Stack, Globals[i]), trap->Registers[REG_GLOBALS+(i^WordEndianFlip)]);
22647 +       if (trap->DirtyBits.s.OutsDirty & (1 << i))
22648 +           ELAN3_OP_STORE32 (ctxt, stack + offsetof (E3_Stack, Outs[i]),    trap->Registers[REG_OUTS+(i^WordEndianFlip)]);
22649 +       if (trap->DirtyBits.s.LocalsDirty & (1 << i))
22650 +           ELAN3_OP_STORE32 (ctxt, stack + offsetof (E3_Stack, Locals[i]),  trap->Registers[REG_LOCALS+(i^WordEndianFlip)]);
22651 +       if (trap->DirtyBits.s.InsDirty & (1 << i))
22652 +           ELAN3_OP_STORE32 (ctxt, stack + offsetof (E3_Stack, Ins[i]),     trap->Registers[REG_INS+(i^WordEndianFlip)]);
22653 +    }
22654 +
22655 +    /* always restore all registers */
22656 +    orflag = ThreadRestartFromTrapBit | ThreadReloadAllRegs;
22657 +    
22658 +    ELAN3_OP_END_FAULT_CHECK (ctxt);
22659 +
22660 +    return (trap->sp | orflag);
22661 +}
22662 +
22663 +void
22664 +ReissueStackPointer (ELAN3_CTXT *ctxt, E3_Addr StackPointer)
22665 +{
22666 +    PRINTF1 (ctxt, DBG_TPROC, "ReissueStackPointer : Queue SP %08x\n", StackPointer);
22667 +    
22668 +    kmutex_lock (&ctxt->SwapListsLock);
22669 +    ctxt->ItemCount[LIST_THREAD]++;
22670 +    ELAN3_OP_PUT_WORD_ITEM (ctxt, LIST_THREAD, StackPointer);
22671 +    kmutex_unlock (&ctxt->SwapListsLock);
22672 +}
22673 +
22674 +
22675 +/*
22676 + * Local variables:
22677 + * c-file-style: "stroustrup"
22678 + * End:
22679 + */
22680 Index: linux-2.4.21/drivers/net/qsnet/elan3/tprocinsts.c
22681 ===================================================================
22682 --- linux-2.4.21.orig/drivers/net/qsnet/elan3/tprocinsts.c      2004-02-23 16:02:56.000000000 -0500
22683 +++ linux-2.4.21/drivers/net/qsnet/elan3/tprocinsts.c   2005-06-01 23:12:54.595439712 -0400
22684 @@ -0,0 +1,401 @@
22685 +/*
22686 + *    Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd.
22687 + *
22688 + *    For licensing information please see the supplied COPYING file
22689 + *
22690 + */
22691 +
22692 +#ident "@(#)$Id: tprocinsts.c,v 1.20 2003/09/24 13:57:25 david Exp $"
22693 +/*      $Source: /cvs/master/quadrics/elan3mod/elan3/os/tprocinsts.c,v $*/
22694 +
22695 +#include <qsnet/kernel.h>
22696 +
22697 +#include <elan3/elanregs.h>
22698 +#include <elan3/elandev.h>
22699 +#include <elan3/elanvp.h>
22700 +#include <elan3/elan3mmu.h>
22701 +#include <elan3/elanctxt.h>
22702 +#include <elan3/elandebug.h>
22703 +#include <elan3/urom_addrs.h>
22704 +#include <elan3/thread.h>
22705 +#include <elan3/vmseg.h>
22706 +#include <elan3/elan3mmu.h>
22707 +
22708 +#define MAXINSTR       256             /* # Instructions to look at while looking for close */
22709 +
22710 +static E3_uint32 ALU (ELAN3_CTXT *ctxt,
22711 +                     E3_uint32 fcode, E3_uint32 X, E3_uint32 Y,
22712 +                     E3_uint32 *Z, E3_uint32 *N, E3_uint32 *C, E3_uint32 *V);
22713 +
22714 +char *OpcodeNames[] =
22715 +{
22716 +   "ADD   ",
22717 +   "AND   ",
22718 +   "OR    ",
22719 +   "XOR   ",
22720 +   "SUB   ",
22721 +   "ANDN  ",
22722 +   "ORN   ",
22723 +   "XNOR  ",
22724 +   "ADDX  ",
22725 +   "UNIP  ",
22726 +   "UMUL  ",
22727 +   "SMUL  ",
22728 +   "SUBX  ",
22729 +   "UNIP  ",
22730 +   "UDIV  ",
22731 +   "SDIV  ",
22732 +   "ADDcc ",
22733 +   "ANDcc ",
22734 +   "ORcc  ",
22735 +   "XORcc ",
22736 +   "SUBcc ",
22737 +   "ANDNcc",
22738 +   "ORNcc ",
22739 +   "XNORcc",
22740 +   "ADDXcc",
22741 +   "UNIPcc",
22742 +   "UMULcc",
22743 +   "SMULcc",
22744 +   "SUBXcc",
22745 +   "UNIPcc",
22746 +   "UDIVcc",
22747 +   "SDIVcc"
22748 +};
22749 +
22750 +#define REGISTER_VALUE(trap, rN)               (((rN) == 0) ? 0 : (trap)->Registers[(rN)^WordEndianFlip])
22751 +#define ASSIGN_REGISTER(trap, rN, value)       ((rN) != 0 ? trap->Registers[(rN)^WordEndianFlip] = (value) : 0)
22752 +
22753 +int
22754 +RollThreadToClose (ELAN3_CTXT *ctxt, THREAD_TRAP *trap, E3_uint32 PAckVal)
22755 +{
22756 +    E3_Addr   pc      = (trap->pc & PC_MASK);
22757 +    E3_Addr   npc     = (trap->npc & PC_MASK);
22758 +    E3_uint32 Z       = (trap->npc & PSR_Z_BIT) ? 1 : 0;
22759 +    E3_uint32 N       = (trap->npc & PSR_N_BIT) ? 1 : 0;
22760 +    E3_uint32 C       = (trap->pc  & PSR_C_BIT) ? 1 : 0;
22761 +    E3_uint32 V       = (trap->pc  & PSR_V_BIT) ? 1 : 0;
22762 +    E3_uint32 instr;
22763 +    E3_Addr   addr;
22764 +
22765 +    if (ELAN3_OP_START_FAULT_CHECK (ctxt))
22766 +    {
22767 +    failed:
22768 +       ELAN3_OP_END_FAULT_CHECK (ctxt);
22769 +
22770 +       ElanException (ctxt, EXCEPTION_SIMULATION_FAILED, THREAD_PROC, trap);
22771 +       return (EFAULT);
22772 +    }
22773 +
22774 +    /*
22775 +     * Thread trapped with output open, or while closing,
22776 +     * so roll the PC forwards to the instruction after the
22777 +     * next c_close, and execute that with the register
22778 +     * specified in c_close set to the trap which occured.
22779 +     * (This is not 1 which means an ACK)
22780 +     */
22781 +    PRINTF1 (ctxt, DBG_TPROC, "RollThreadToClose: roll pc %x to c_close\n", pc);
22782 +    
22783 +    for (;;)
22784 +    {
22785 +       instr = ELAN3_OP_LOAD32 (ctxt, pc);
22786 +
22787 +       PRINTF2 (ctxt, DBG_TPROC, "RollThreadToClose: PC=%x INSTR=%x\n", pc, instr);
22788 +
22789 +       switch (OPCODE_CLASS(instr))
22790 +       {
22791 +       case OPCODE_CLASS_0:
22792 +           switch ((instr) & OPCODE_CLASS0_MASK)
22793 +           {
22794 +           case OPCODE_SETHI:
22795 +               PRINTF3 (ctxt, DBG_TPROC, "PC %x : sethi r%d = %x\n", pc, INSTR_RD(instr), instr << 10);
22796 +
22797 +               ASSIGN_REGISTER (trap, INSTR_RD(instr), instr << 10);
22798 +               break;
22799 +
22800 +           case OPCODE_SENDREG:
22801 +               PRINTF1 (ctxt, DBG_TPROC, "PC %x : sendreg\n", pc);
22802 +               break;
22803 +               
22804 +           case OPCODE_SENDMEM:
22805 +               PRINTF1 (ctxt, DBG_TPROC, "PC %x : sendmem\n", pc);
22806 +               break;
22807 +               
22808 +           case OPCODE_BICC:
22809 +           {
22810 +               int     DoBranch   = (instr >> 28) & 1;
22811 +               int     CondBranch = 1;
22812 +               E3_Addr OldnPC     = npc;
22813 +
22814 +               PRINTF5 (ctxt, DBG_TPROC, "PC %x : Bicc Z=%x N=%x C=%x V=%x ", pc, Z, N, C, V);
22815 +               switch (instr & OPCODE_BICC_MASK)
22816 +               {
22817 +               case OPCODE_BICC_BN:    CondBranch = 0;                 break;
22818 +               case OPCODE_BICC_BE:    DoBranch ^= Z;                  break;
22819 +               case OPCODE_BICC_BLE:   DoBranch ^= Z | (N ^ V);        break;
22820 +               case OPCODE_BICC_BL:    DoBranch ^= N ^ V;              break;
22821 +               case OPCODE_BICC_BLEU:  DoBranch ^= C | Z;              break;
22822 +               case OPCODE_BICC_BCS:   DoBranch ^= C;                  break;
22823 +               case OPCODE_BICC_BNEG:  DoBranch ^= N;                  break;
22824 +               case OPCODE_BICC_BVS:   DoBranch ^= V;                  break;
22825 +               }
22826 +
22827 +               /* Do the branch */
22828 +               if (DoBranch != 0)
22829 +               {
22830 +                   npc = pc + (((instr & 0x3fffff) << 2) |
22831 +                               (((instr & 0x200000) != 0) ? 0xff000000 : 0));
22832 +                   
22833 +                   PRINTF2 (ctxt, DBG_TPROC, "PC %x : branch taken to %x\n", pc, npc);
22834 +               }
22835 +               else
22836 +               {
22837 +                   npc = npc + 4;
22838 +                   PRINTF1 (ctxt, DBG_TPROC, "PC %x : branch not taken\n", pc);
22839 +               }
22840 +               pc = OldnPC;
22841 +
22842 +               /* Test if the next is annuled */
22843 +               if (((instr & OPCODE_BICC_ANNUL) != 0) &
22844 +                   ((DoBranch == 0) | (CondBranch == 0)))
22845 +               {
22846 +                   PRINTF1 (ctxt, DBG_TPROC, "PC %x : branch annulled\n", pc);
22847 +
22848 +                   pc = npc;
22849 +                   npc += 4;
22850 +               }
22851 +
22852 +               /*
22853 +                * we've already consumed the instruction - so continue rather
22854 +                * than break;
22855 +                */
22856 +               continue;
22857 +           }
22858 +           
22859 +           default:
22860 +               PRINTF2 (ctxt, DBG_TPROC, "PC %x : unknown class 0 instr %x\n", pc, instr);
22861 +               goto failed;
22862 +           }
22863 +           break;
22864 +
22865 +       case OPCODE_CLASS_1:
22866 +               PRINTF2 (ctxt, DBG_TPROC, "PC %x : unknown class 1 instr %x\n", pc, instr);
22867 +               goto failed;
22868 +               
22869 +       case OPCODE_CLASS_2:
22870 +       {
22871 +           E3_uint32 X = REGISTER_VALUE (trap, INSTR_RS1(instr));
22872 +           E3_uint32 Y = (instr & OPCODE_IMM) ? INSTR_IMM(instr) : REGISTER_VALUE (trap, INSTR_RS2(instr));
22873 +           
22874 +           if ((instr & OPCODE_NOT_ALUOP) == 0)
22875 +           {
22876 +               E3_uint32 fcode  = (instr >> OPCODE_FCODE_SHIFT) & OPCODE_FCODE_MASK;
22877 +               E3_uint32 result = ALU (ctxt, fcode, X, Y, &Z, &N, &C, &V);
22878 +
22879 +               PRINTF5 (ctxt, DBG_TPROC, "PC %x : %s %x %x -> %x", pc, OpcodeNames[fcode], X, Y, result);
22880 +               PRINTF4 (ctxt, DBG_TPROC, "        Z=%x N=%x C=%x V=%x\n", Z, N, C, V);
22881 +               
22882 +               ASSIGN_REGISTER (trap, INSTR_RD(instr), result);
22883 +           }
22884 +           else
22885 +           {
22886 +               switch (instr & OPCODE_MASK)
22887 +               {
22888 +               case OPCODE_OPEN:
22889 +                   PRINTF1 (ctxt, DBG_TPROC, "PC %x : c_open\n", pc);
22890 +                   break;
22891 +
22892 +               case OPCODE_CLOSE:
22893 +                   PRINTF1 (ctxt, DBG_TPROC, "PC %x : c_close\n", pc);
22894 +                   goto found_close;
22895 +
22896 +               case OPCODE_SLL:
22897 +                   PRINTF1 (ctxt, DBG_TPROC, "PC %x : SLL\n", pc);
22898 +
22899 +                   ASSIGN_REGISTER (trap, INSTR_RD(instr), X << Y);
22900 +                   break;
22901 +                   
22902 +               case OPCODE_SRL:
22903 +                   PRINTF1 (ctxt, DBG_TPROC, "PC %x : SRL\n", pc);
22904 +                   
22905 +                   ASSIGN_REGISTER (trap, INSTR_RD(instr), X >> Y);
22906 +                   break;
22907 +                   
22908 +               case OPCODE_SRA:
22909 +                   PRINTF1 (ctxt, DBG_TPROC, "PC %x : SRA\n", pc);
22910 +                   
22911 +                   ASSIGN_REGISTER (trap, INSTR_RD(instr), X >> Y);
22912 +                   break;
22913 +                   
22914 +               case OPCODE_BREAKTEST:
22915 +                   PRINTF1 (ctxt, DBG_TPROC, "PC %x : BREAKTEST  not allowed while open\n", pc);
22916 +                   goto failed;
22917 +                   
22918 +               case OPCODE_BREAK:
22919 +                   PRINTF1 (ctxt, DBG_TPROC, "PC %x : BREAK not allowed while open\n", pc);
22920 +                   goto failed;
22921 +
22922 +               case OPCODE_SUSPEND:
22923 +                   PRINTF1 (ctxt, DBG_TPROC, "PC %x : SUSPEND not allowed while open\n", pc);
22924 +                   goto failed;
22925 +                   
22926 +               case OPCODE_WAIT:
22927 +                   PRINTF1 (ctxt, DBG_TPROC, "PC %x : WAIT not allowed while open\n", pc);
22928 +                   goto failed;
22929 +
22930 +               default:
22931 +                   PRINTF2 (ctxt, DBG_TPROC, "PC %x : unknown class 2 instr %x\n", pc, instr);
22932 +                   goto failed;
22933 +               }
22934 +           }
22935 +           break;
22936 +       }
22937 +       
22938 +       case OPCODE_CLASS_3:
22939 +       {
22940 +           if ((instr & OPCODE_IMM) != 0)
22941 +               addr = REGISTER_VALUE (trap, INSTR_RS1(instr)) + INSTR_IMM(instr);
22942 +           else
22943 +               addr = (REGISTER_VALUE (trap, INSTR_RS1(instr)) + 
22944 +                       REGISTER_VALUE (trap, INSTR_RS2(instr)));
22945 +
22946 +           switch (instr & OPCODE_MASK)
22947 +           {
22948 +           case OPCODE_LD:
22949 +               PRINTF3 (ctxt, DBG_TPROC, "PC %x : LD [%x], r%d\n", pc, addr, INSTR_RD(instr));
22950 +               
22951 +               ASSIGN_REGISTER (trap, INSTR_RD(instr), ELAN3_OP_LOAD32 (ctxt, addr));
22952 +               break;
22953 +               
22954 +           case OPCODE_LDD:
22955 +           case OPCODE_LDBLOCK16:
22956 +           case OPCODE_LDBLOCK32:
22957 +           case OPCODE_LDBLOCK64:
22958 +               PRINTF2 (ctxt, DBG_TPROC, "PC %x : LDBLOCKx @ %x is not possible while output open\n", pc, addr);
22959 +               goto failed;
22960 +           
22961 +           case OPCODE_ST:
22962 +               PRINTF2 (ctxt, DBG_TPROC, "PC %x : ST @ %x\n", pc, addr);
22963 +               
22964 +               ELAN3_OP_STORE32 (ctxt, addr, REGISTER_VALUE (trap, INSTR_RD(instr)));
22965 +               break;
22966 +                             
22967 +           case OPCODE_STD:
22968 +           case OPCODE_STBLOCK16:
22969 +           case OPCODE_STBLOCK32:
22970 +           case OPCODE_STBLOCK64:
22971 +               PRINTF2 (ctxt, DBG_TPROC, "PC %x : STD @ %x is not posisble while output open\n", pc, addr);
22972 +               goto failed;
22973 +
22974 +           case OPCODE_SWAP:
22975 +               PRINTF2 (ctxt, DBG_TPROC, "PC %x : SWAP @ %x is not posible while output open\n", pc, addr);
22976 +               goto failed;
22977 +               
22978 +           default:
22979 +               PRINTF2 (ctxt, DBG_TPROC, "PC %x : unknown class 3 instr %x\n", pc, instr);
22980 +               goto failed;
22981 +           }
22982 +           break;
22983 +       }}
22984 +
22985 +       pc = npc;
22986 +       npc += 4;
22987 +    }
22988 +    
22989 +found_close:
22990 +    ELAN3_OP_END_FAULT_CHECK (ctxt);
22991 +
22992 +    PRINTF1 (ctxt, DBG_TPROC, "PC %x : c_close\n", pc);
22993 +    
22994 +    /*
22995 +     * Found the new pc, and have the close instruction in *instr
22996 +     */
22997 +    ASSIGN_REGISTER (trap, INSTR_RD(instr), PAckVal);
22998 +    
22999 +    /*
23000 +     * Move to instruction after close.
23001 +    */
23002 +    trap->pc = npc;
23003 +    
23004 +    /* Insert the value of Z and N from the close inst */
23005 +    trap->npc = (npc + 4) | ((PAckVal == E3_PAckOk) ? 1 :
23006 +                            (PAckVal == E3_PAckTestFail) ? 2 : 0);
23007 +
23008 +    return (ESUCCESS);
23009 +}
23010 +
23011 +E3_uint32
23012 +ALU (ELAN3_CTXT *ctxt,
23013 +     E3_uint32 fcode, E3_uint32 X, E3_uint32 Y,
23014 +     E3_uint32 *Z, E3_uint32 *N, E3_uint32 *C, E3_uint32 *V)
23015 +{
23016 +    E3_uint32 XMSB, YMSB, ZMSB, Cprime;
23017 +    E3_uint32 Yprime;
23018 +    E3_uint32 Result=0;
23019 +    
23020 +    Yprime = ((fcode >> 2) & 1) ? ~Y : Y;
23021 +    Cprime = ((fcode >> 2) & 1) ^ (*C & ((fcode >> 3) & 1));
23022 +    XMSB             = (X >> 31) & 1;
23023 +    YMSB             = (Yprime >> 31) & 1;
23024 +    /* mul or div */
23025 +    if ((fcode & 0xa) == 0xa)
23026 +    {
23027 +       PRINTF0 (ctxt, DBG_TPROC, "ALU: tried a multiply or a divide\n");
23028 +       return (0);
23029 +    }
23030 +
23031 +    switch (fcode & 3)
23032 +    {
23033 +       /*ADD */
23034 +    case 0:
23035 +       Result = X + Yprime + Cprime ;
23036 +       if ((fcode & 0x10) == 0)
23037 +           return (Result);
23038 +       
23039 +       ZMSB   = Result >> 31;
23040 +       *V = ((XMSB & YMSB & ~ZMSB) | (~XMSB &~YMSB &  ZMSB));
23041 +       *C = ((fcode >> 2) & 1) ^ ( (XMSB & YMSB) | (~ZMSB & (XMSB | YMSB)));
23042 +       break;
23043 +       
23044 +       /*AND */
23045 +    case 1:
23046 +       Result = X & Yprime ;
23047 +       if ((fcode & 0x10) == 0)
23048 +           return (Result);
23049 +       
23050 +       *V = 0;
23051 +       *C = 0;
23052 +       break;
23053 +       
23054 +       /*OR  */
23055 +    case 2:
23056 +       Result = X | Yprime ;
23057 +       if ((fcode & 0x10) == 0)
23058 +           return (Result);
23059 +       
23060 +       *V = 0;
23061 +       *C = 0;
23062 +       break;
23063 +       
23064 +       /*XOR */
23065 +    case 3:
23066 +       Result = X ^ Yprime ;
23067 +       if ((fcode & 0x10) == 0)
23068 +           return (Result);
23069 +       
23070 +       *V = 0;
23071 +       *C = 0;
23072 +       break;
23073 +    }
23074 +    
23075 +    *Z = (Result == 0) ? 1 : 0;
23076 +    *N = (Result >> 31) & 1;
23077 +
23078 +    return (Result);
23079 +}
23080 +
23081 +/*
23082 + * Local variables:
23083 + * c-file-style: "stroustrup"
23084 + * End:
23085 + */
23086 Index: linux-2.4.21/drivers/net/qsnet/elan3/tproc_linux.c
23087 ===================================================================
23088 --- linux-2.4.21.orig/drivers/net/qsnet/elan3/tproc_linux.c     2004-02-23 16:02:56.000000000 -0500
23089 +++ linux-2.4.21/drivers/net/qsnet/elan3/tproc_linux.c  2005-06-01 23:12:54.596439560 -0400
23090 @@ -0,0 +1,215 @@
23091 +/*
23092 + *    Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd.
23093 + *
23094 + *    For licensing information please see the supplied COPYING file
23095 + *
23096 + */
23097 +
23098 +#ident "$Id: tproc_linux.c,v 1.19.2.1 2004/10/28 17:08:56 david Exp $"
23099 +/*      $Source: /cvs/master/quadrics/elan3mod/elan3/os/tproc_linux.c,v $*/
23100 +
23101 +#include <qsnet/kernel.h>
23102 +#include <qsnet/autoconf.h>
23103 +
23104 +#include <asm/mman.h>
23105 +#include <linux/file.h>
23106 +
23107 +#ifdef NO_ABI
23108 +#include <asm/poll.h>
23109 +extern asmlinkage long sys_open(const char *, int, int);
23110 +extern asmlinkage ssize_t sys_write(unsigned int, const char *, size_t);
23111 +extern asmlinkage ssize_t sys_read(unsigned int, char *, size_t);
23112 +extern asmlinkage off_t sys_lseek(unsigned int, off_t, unsigned int);
23113 +extern asmlinkage long sys_poll(struct pollfd *, unsigned int, long);
23114 +extern asmlinkage long sys_kill(int, int); 
23115 +#else
23116 +#      include <linux/syscall.h>
23117 +#endif
23118 +
23119 +#include <elan3/elanregs.h>
23120 +#include <elan3/elandev.h>
23121 +#include <elan3/elanvp.h>
23122 +#include <elan3/elan3mmu.h>
23123 +#include <elan3/elanctxt.h>
23124 +#include <elan3/elandebug.h>
23125 +#include <elan3/urom_addrs.h>
23126 +#include <elan3/thread.h>
23127 +#include <elan3/elansyscall.h>
23128 +#include <elan3/threadsyscall.h>
23129 +
23130 +/*
23131 + * NOTE: system calls from kernel on Linux are different on alpha and i386 
23132 + *       on alpha they return -errno on failure 
23133 + *       on i386  they return -1 on failure and set errno 
23134 + */
23135 +
23136 +static void
23137 +ReturnSyscall (THREAD_TRAP *trap, unsigned long rc, int *skip)
23138 +{
23139 +    if (rc >= (unsigned long) (-130))
23140 +    {
23141 +       trap->pc |= PSR_C_BIT;  /* clear carry to indicate failure */
23142 +
23143 +       trap->Registers[REG_OUTS+(0^WordEndianFlip)] = -rc;
23144 +    } 
23145 +    else 
23146 +    {
23147 +       trap->pc &= ~PSR_C_BIT; /* set carry to indicate success */
23148 +       trap->Registers[REG_OUTS+(0^WordEndianFlip)] = rc;
23149 +    }
23150 +    trap->Registers[REG_OUTS+(1^WordEndianFlip)] = 0;
23151 +    *skip = 1;
23152 +}
23153 +
23154 +static void 
23155 +dump_regs(ELAN3_CTXT *ctxt, THREAD_TRAP *trap)
23156 +{
23157 +    PRINTF (ctxt, DBG_TPROC, "               OUTS %08x %08x %08x %08x\n",
23158 +      trap->Registers[REG_OUTS+(0^WordEndianFlip)], 
23159 +      trap->Registers[REG_OUTS+(1^WordEndianFlip)],
23160 +      trap->Registers[REG_OUTS+(2^WordEndianFlip)], 
23161 +      trap->Registers[REG_OUTS+(3^WordEndianFlip)]);
23162 +    PRINTF (ctxt, DBG_TPROC, "                    %08x %08x %08x %08x\n",
23163 +      trap->Registers[REG_OUTS+(4^WordEndianFlip)], 
23164 +      trap->Registers[REG_OUTS+(5^WordEndianFlip)],
23165 +      trap->Registers[REG_OUTS+(6^WordEndianFlip)], 
23166 +      trap->Registers[REG_OUTS+(7^WordEndianFlip)]);
23167 +}
23168 +
23169 +int
23170 +ThreadSyscall (ELAN3_CTXT *ctxt, THREAD_TRAP *trap, int *skip)
23171 +{
23172 +    int                  code;
23173 +    caddr_t       maddr;
23174 +    struct file  *file;
23175 +    unsigned long rc;
23176 +    int           i;
23177 +    uintptr_t     av[6];
23178 +    uintptr_t     ptr;
23179 +   
23180 +    PRINTF (ctxt, DBG_TPROC, "ThreadSyscall: PC %08x G1 %08x\n", 
23181 +      trap->pc, trap->Registers[REG_GLOBALS+(1^WordEndianFlip)]);
23182 +    dump_regs(ctxt, trap);
23183 +
23184 +    code = trap->Registers[REG_GLOBALS+(1^WordEndianFlip)];
23185 +
23186 +    /* Copy the system call arguments from %o0-%o5 */
23187 +    for (i = 0; i < 6; i++)
23188 +       av[i] = trap->Registers[REG_OUTS+(i^WordEndianFlip)];
23189 +    
23190 +    rc = (unsigned long) -EINVAL;
23191 +
23192 +    switch (code) {
23193 +       case ELAN3_SYS_open:
23194 +           maddr = elan3mmu_mainaddr (ctxt->Elan3mmu, (E3_Addr) av[0]);
23195 +           if (maddr != NULL)
23196 +               rc = sys_open((const char *)maddr, av[1], av[2]);
23197 +           break;
23198 +
23199 +       case ELAN3_SYS_close:
23200 +           rc = sys_close(av[0]);
23201 +           break;
23202 +
23203 +       case ELAN3_SYS_write:
23204 +           maddr = elan3mmu_mainaddr (ctxt->Elan3mmu, (E3_Addr) av[1]);
23205 +           if (maddr != NULL)
23206 +               rc = sys_write(av[0], (const char *)maddr, av[2]);
23207 +           break;
23208 +
23209 +       case ELAN3_SYS_read:
23210 +           maddr = elan3mmu_mainaddr (ctxt->Elan3mmu, (E3_Addr) av[1]);
23211 +           if (maddr != NULL)
23212 +               rc = sys_read(av[0], (char *)maddr, av[2]);
23213 +           break;
23214 +
23215 +       case ELAN3_SYS_poll:
23216 +           maddr = elan3mmu_mainaddr (ctxt->Elan3mmu, (E3_Addr) av[0]);
23217 +           if (maddr != NULL)
23218 +               rc = sys_poll((struct pollfd *)maddr, av[1], av[2]);
23219 +           break;
23220 +       
23221 +       case ELAN3_SYS_lseek:
23222 +           rc = sys_lseek(av[0], av[1], av[2]);
23223 +           break;
23224 +           
23225 +       case ELAN3_SYS_mmap:
23226 +           if ((E3_Addr) av[0] == (E3_Addr) 0)
23227 +               maddr = NULL;
23228 +           else if ((maddr = elan3mmu_mainaddr (ctxt->Elan3mmu, (E3_Addr) av[0])) == NULL)
23229 +               break;
23230 +       
23231 +           file = NULL;
23232 +           /* GNAT 5515: If *not* anonymous memory need to do fget */
23233 +           if ((av[3] & MAP_ANONYMOUS) == 0 && (file = fget (av[4])) == NULL)
23234 +           {
23235 +               rc = -EBADF;
23236 +               break;
23237 +           }
23238 +
23239 +           down_write (&current->mm->mmap_sem);
23240 +           ptr = do_mmap_pgoff (file, (unsigned long) maddr, av[1], av[2], av[3], av[5] >>PAGE_SHIFT);
23241 +           up_write (&current->mm->mmap_sem);
23242 +
23243 +           if (file)
23244 +               fput (file);
23245 +           
23246 +           if (IS_ERR((void *) ptr))
23247 +               rc = PTR_ERR((void *) ptr);
23248 +           else
23249 +               rc = elan3mmu_elanaddr (ctxt->Elan3mmu, (caddr_t)ptr);
23250 +
23251 +           break;
23252 +       
23253 +       case ELAN3_SYS_munmap:
23254 +           maddr = elan3mmu_mainaddr (ctxt->Elan3mmu, (E3_Addr) av[0]);
23255 +
23256 +#ifdef AC
23257 +           if (maddr != NULL)
23258 +               rc = do_munmap(current->mm, (unsigned long) maddr, av[1], 1);
23259 +#else
23260 +           if (maddr != NULL)
23261 +               rc = do_munmap(current->mm, (unsigned long) maddr, av[1]);
23262 +#endif
23263 +           break;
23264 +
23265 +       case ELAN3_SYS_kill:
23266 +           rc = sys_kill(av[0], av[1]);
23267 +           break;
23268 +
23269 +       case ELAN3_SYS_getpid:
23270 +           rc = current->pid;
23271 +           break;
23272 +
23273 +       default:
23274 +           return EINVAL;
23275 +    }
23276 +    ReturnSyscall(trap, rc, skip);
23277 +    return ESUCCESS;
23278 +}
23279 +
23280 +
23281 +int
23282 +ThreadElancall (ELAN3_CTXT *ctxt, THREAD_TRAP *trap, int *skip)
23283 +{
23284 +       int ret = ESUCCESS;
23285 +
23286 +       PRINTF (ctxt, DBG_TPROC, "ThreadElancall: PC %08x\n", trap->pc);
23287 +       dump_regs(ctxt, trap);
23288 +
23289 +       /*
23290 +        * Elan system call 'type' is passed in o0
23291 +        */
23292 +       switch (trap->Registers[REG_OUTS+(0^WordEndianFlip)]) 
23293 +       {
23294 +       default:
23295 +               ret = EINVAL;
23296 +               break;
23297 +       }
23298 +       return ret;
23299 +}
23300 +
23301 +/*
23302 + * Local variables:
23303 + * c-file-style: "stroustrup"
23304 + * End:
23305 + */
23306 Index: linux-2.4.21/drivers/net/qsnet/elan3/virtual_process.c
23307 ===================================================================
23308 --- linux-2.4.21.orig/drivers/net/qsnet/elan3/virtual_process.c 2004-02-23 16:02:56.000000000 -0500
23309 +++ linux-2.4.21/drivers/net/qsnet/elan3/virtual_process.c      2005-06-01 23:12:54.597439408 -0400
23310 @@ -0,0 +1,884 @@
23311 +/*
23312 + *    Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd.
23313 + *
23314 + *    For licensing information please see the supplied COPYING file
23315 + *
23316 + */
23317 +
23318 +#ident "@(#)$Id: virtual_process.c,v 1.68 2004/06/07 13:50:10 mike Exp $"
23319 +/*      $Source: /cvs/master/quadrics/elan3mod/elan3/os/virtual_process.c,v $*/
23320 +
23321 +#include <qsnet/kernel.h>
23322 +
23323 +#include <elan/elanmod.h>
23324 +#include <elan3/elanregs.h>
23325 +#include <elan3/elandev.h>
23326 +#include <elan3/elanvp.h>
23327 +#include <elan3/elan3mmu.h>
23328 +#include <elan3/elanctxt.h>
23329 +#include <elan3/elandebug.h>
23330 +#include <elan3/urom_addrs.h>
23331 +#include <elan3/thread.h>
23332 +#include <elan3/vmseg.h>
23333 +#include <elan3/elansyscall.h>
23334 +
23335 +static ELAN3_VPSEG *
23336 +InstallSegment (ELAN3_CTXT *ctxt, int process, int entries)
23337 +{
23338 +    ELAN3_VPSEG **prevSeg, *seg;
23339 +    int lastTop = -1;
23340 +    int        top     = process + entries-1;
23341 +
23342 +    ASSERT (krwlock_is_write_locked (&ctxt->VpLock));
23343 +
23344 +    for (prevSeg = &ctxt->VpSegs; (seg = (*prevSeg)) != NULL; prevSeg = &seg->Next)
23345 +    {
23346 +       int thisTop = seg->Process + seg->Entries - 1;
23347 +
23348 +        if (process < seg->Process && (process <= lastTop || top >= seg->Process))
23349 +       {
23350 +           /*
23351 +            * Overlaps with last segment, or this one 
23352 +            */
23353 +           return (NULL);
23354 +       }
23355 +       if (seg->Process > process)
23356 +           break;
23357 +       
23358 +       lastTop = thisTop;
23359 +    }
23360 +    
23361 +    KMEM_ZALLOC (seg, ELAN3_VPSEG *, sizeof (ELAN3_VPSEG), TRUE);
23362 +    
23363 +    if (seg == (ELAN3_VPSEG *) NULL)
23364 +       return (NULL);
23365 +    
23366 +    seg->Process = process;
23367 +    seg->Entries = entries;
23368 +
23369 +
23370 +    PRINTF2 (ctxt, DBG_VP, "InstallSegment: add seg %p before %p\n", seg, *prevSeg);
23371 +
23372 +    seg->Next = *prevSeg;
23373 +    *prevSeg = seg;
23374 +
23375 +    return (seg);
23376 +}
23377 +
23378 +static int 
23379 +RemoveSegment (ELAN3_CTXT *ctxt, ELAN3_VPSEG *seg)
23380 +{
23381 +    ELAN3_VPSEG **prevSeg, *thisSeg;
23382 +
23383 +    ASSERT (krwlock_is_write_locked (&ctxt->VpLock));
23384 +
23385 +    for (prevSeg = &ctxt->VpSegs; (thisSeg = (*prevSeg)) != NULL; prevSeg = &thisSeg->Next)
23386 +    {
23387 +       if (thisSeg == seg)
23388 +           break;
23389 +    }
23390 +
23391 +    if (thisSeg == (ELAN3_VPSEG *) NULL)
23392 +       return (EINVAL);
23393 +
23394 +
23395 +    PRINTF2 (ctxt, DBG_VP, "RemoveSegment: remove seg %p next %p\n", thisSeg, thisSeg->Next);
23396 +
23397 +    *prevSeg = thisSeg->Next;
23398 +    
23399 +    KMEM_FREE ((caddr_t) seg, sizeof (ELAN3_VPSEG));
23400 +
23401 +    return (ESUCCESS);
23402 +}
23403 +
23404 +static ELAN3_VPSEG *
23405 +FindSegment (ELAN3_CTXT *ctxt, int low, int high)
23406 +{
23407 +    ELAN3_VPSEG *seg;
23408 +
23409 +    ASSERT(krwlock_is_locked (&ctxt->VpLock));
23410 +    
23411 +    for (seg = ctxt->VpSegs; seg; seg = seg->Next)
23412 +    {
23413 +       if (seg->Process <= low && (seg->Process + seg->Entries) > high)
23414 +           return (seg);
23415 +    }
23416 +
23417 +    return ((ELAN3_VPSEG *) NULL);
23418 +}
23419 +
23420 +ELAN_LOCATION
23421 +ProcessToLocation (ELAN3_CTXT *ctxt, ELAN3_VPSEG *seg, int process, ELAN_CAPABILITY *cap)
23422 +{
23423 +    ELAN_LOCATION location;
23424 +    int           nnodes,nctxs;
23425 +    int           node,ctx,i;
23426 +
23427 +    ASSERT(krwlock_is_locked (&ctxt->VpLock));
23428 +
23429 +    location.loc_node    = ELAN3_INVALID_NODE;
23430 +    location.loc_context = -1;
23431 +
23432 +    PRINTF3 (ctxt, DBG_VP, "ProcessToLocation: process %d seg %p cap %p\n", process, seg, cap);
23433 +
23434 +    if (seg == NULL)
23435 +       seg = FindSegment (ctxt, process, process);
23436 +
23437 +    if (!seg || (seg->Type != ELAN3_VPSEG_P2P))
23438 +       return (location);
23439 +
23440 +    cap    = &seg->SegCapability;
23441 +    nnodes = ELAN_CAP_NUM_NODES (cap);
23442 +    nctxs  = ELAN_CAP_NUM_CONTEXTS (cap);
23443 +
23444 +    switch (seg->SegCapability.cap_type & ELAN_CAP_TYPE_MASK)
23445 +    {
23446 +    case ELAN_CAP_TYPE_BLOCK:
23447 +    {
23448 +       int entries = ELAN_CAP_ENTRIES(cap);
23449 +
23450 +       for (node = 0, i = 0; node < nnodes && i < entries; node++)
23451 +       {
23452 +           for (ctx = 0; ctx < nctxs && i < entries; ctx++)
23453 +           {
23454 +               if (( seg->SegCapability.cap_type & ELAN_CAP_TYPE_NO_BITMAP) || BT_TEST (seg->SegCapability.cap_bitmap, ctx + (node * nctxs)))
23455 +               {
23456 +                   if (i++ == (process - seg->Process))
23457 +                   { 
23458 +                       location.loc_node    = seg->SegCapability.cap_lownode    + node;
23459 +                       location.loc_context = seg->SegCapability.cap_lowcontext + ctx;
23460 +                       goto found;
23461 +                   }
23462 +               }
23463 +           }
23464 +       }
23465 +       break;
23466 +    }
23467 +    case ELAN_CAP_TYPE_CYCLIC:
23468 +    {
23469 +       int entries = ELAN_CAP_ENTRIES(cap);
23470 +
23471 +       for (ctx = 0, i = 0; ctx < nctxs && i < entries; ctx++)
23472 +       {
23473 +           for (node = 0; node < nnodes && i < entries; node++)
23474 +           {
23475 +               if ((seg->SegCapability.cap_type & ELAN_CAP_TYPE_NO_BITMAP) || BT_TEST (seg->SegCapability.cap_bitmap, node + (ctx * nnodes)))
23476 +               {                                   
23477 +                   if (i++ ==  (process - seg->Process))
23478 +                   { 
23479 +                       location.loc_node    = seg->SegCapability.cap_lownode    + node;
23480 +                       location.loc_context = seg->SegCapability.cap_lowcontext + ctx;
23481 +                       goto found;
23482 +                   }
23483 +               }
23484 +           }
23485 +       }
23486 +       break;  
23487 +    }
23488 +    default:
23489 +       break;
23490 +    }
23491 +       
23492 + found:
23493 +    
23494 +    PRINTF3 (ctxt, DBG_VP, "ProcessToLocation: process %d -> Node %d Context %d\n", process, location.loc_node,  location.loc_context);
23495 +
23496 +    if (cap != NULL)
23497 +    {
23498 +       bcopy ((caddr_t) &seg->SegCapability, (caddr_t) cap, sizeof (ELAN_CAPABILITY));
23499 +       cap->cap_mycontext = location.loc_context;
23500 +    }
23501 +
23502 +    return (location);
23503 +}
23504 +
23505 +int
23506 +LocationToProcess (ELAN3_CTXT *ctxt, ELAN3_VPSEG *seg, ELAN_LOCATION loc, ELAN_CAPABILITY *cap)
23507 +{
23508 +    int nnodes,nctxs;
23509 +    int node,ctx,i;
23510 +
23511 +    if (seg == NULL)
23512 +       return ELAN3_INVALID_PROCESS;
23513 +
23514 +    if (!seg || (seg->Type != ELAN3_VPSEG_P2P))
23515 +       return ELAN3_INVALID_PROCESS;
23516 +
23517 +    nnodes = cap->cap_highnode - cap->cap_lownode + 1;
23518 +    nctxs  = cap->cap_highcontext - cap->cap_lowcontext + 1;
23519 +
23520 +    switch (cap->cap_type & ELAN_CAP_TYPE_MASK)
23521 +    {
23522 +    case ELAN_CAP_TYPE_BLOCK:
23523 +    {
23524 +       int entries = ELAN_CAP_ENTRIES(cap);
23525 +
23526 +       for (node = 0, i = 0; node < nnodes && i < entries; node++)
23527 +       {
23528 +           for (ctx = 0; ctx < nctxs && i < entries; ctx++)
23529 +           {
23530 +               if ((cap->cap_type & ELAN_CAP_TYPE_NO_BITMAP) || BT_TEST (cap->cap_bitmap, ctx + (node * nctxs)))
23531 +               {
23532 +                   if ((loc.loc_node    == (cap->cap_lownode + node) ) 
23533 +                       && (loc.loc_context == (cap->cap_lowcontext + ctx) ))
23534 +                   {
23535 +                       return (i + seg->Process);
23536 +                   }
23537 +                   i++;
23538 +               }
23539 +           }
23540 +       }
23541 +       break;
23542 +    }  
23543 +    case ELAN_CAP_TYPE_CYCLIC:
23544 +    {
23545 +       int entries = ELAN_CAP_ENTRIES(cap);
23546 +
23547 +       for (ctx = 0, i = 0; ctx < nctxs && i < entries; ctx++)
23548 +       {
23549 +           for (node = 0; node < nnodes && i < entries; node++)
23550 +           {
23551 +               if ((cap->cap_type & ELAN_CAP_TYPE_NO_BITMAP) || BT_TEST (cap->cap_bitmap, node + (ctx * nnodes)))
23552 +               {
23553 +                   if ((loc.loc_node   == (cap->cap_lownode + node) ) 
23554 +                       && (loc.loc_context == (cap->cap_lowcontext + ctx) ))
23555 +                   {
23556 +                       return (i + seg->Process);
23557 +                   }
23558 +                   i++;
23559 +                   
23560 +               }
23561 +           }
23562 +       }
23563 +       break;
23564 +    }  
23565 +    default:
23566 +       break;
23567 +    }
23568 +       
23569 +    return ELAN3_INVALID_PROCESS;
23570 +}
23571 +
23572 +int
23573 +elan3_addvp (ELAN3_CTXT *ctxt, int process, ELAN_CAPABILITY *cap)
23574 +{
23575 +    ELAN3_DEV       *dev = ctxt->Device;
23576 +    ELAN_POSITION    *pos = &ctxt->Position;
23577 +    ELAN3_VPSEG       *seg;
23578 +    int                      i;
23579 +    int                      nodeOff;
23580 +    int                      ctxOff;
23581 +    int                      nnodes;
23582 +    int                      nctxs;
23583 +    E3_uint16         flits[MAX_FLITS];
23584 +    int               nflits;
23585 +    int               entries;
23586 +
23587 +    PRINTF2 (ctxt, DBG_VP, "elan3_addvp: %d -> %s\n", process, CapabilityString (cap));
23588 +
23589 +    entries = ELAN_CAP_ENTRIES(cap);
23590 +    if (entries <= 0 || (process + entries) > ELAN3_MAX_VPS)
23591 +       return (EINVAL);
23592 +
23593 +    /*
23594 +     * Scan the virtual process segment list, to add this entry, and ensure that
23595 +     * the ranges don't overlap.
23596 +     */
23597 +    krwlock_write (&ctxt->VpLock);
23598 +
23599 +    /* check cap. */
23600 +    switch (elan3_validate_cap (ctxt->Device, cap, ELAN_USER_P2P))
23601 +    {
23602 +    case ELAN_CAP_OK:
23603 +       /* nothing */
23604 +       break;
23605 +
23606 +    case ELAN_CAP_RMS:
23607 +       if ( elan_validate_map(cap, cap) != ESUCCESS)
23608 +       {
23609 +           krwlock_done (&ctxt->VpLock);
23610 +           return (EINVAL);
23611 +       }
23612 +       break;
23613 +
23614 +    default:
23615 +       krwlock_done (&ctxt->VpLock);
23616 +       return (EINVAL);
23617 +    }
23618 +
23619 +    if ((seg = InstallSegment (ctxt, process, entries)) == NULL)
23620 +    {
23621 +       PRINTF0 (ctxt, DBG_VP, "elan3_addvp: failed to find a seg\n");
23622 +       krwlock_done (&ctxt->VpLock);
23623 +       return (EINVAL);
23624 +    }
23625 +    
23626 +    seg->Type                        = ELAN3_VPSEG_P2P;
23627 +    seg->SegCapability               = *cap;
23628 +    seg->SegCapability.cap_mycontext = ELAN_CAP_UNINITIALISED;
23629 +
23630 +    PRINTF3 (ctxt, DBG_VP, "elan3_addvp: segment type %x  %d %d\n",
23631 +           seg->SegCapability.cap_type, seg->Process, entries);
23632 +
23633 +
23634 +    nnodes = cap->cap_highnode - cap->cap_lownode + 1;
23635 +    nctxs  = cap->cap_highcontext - cap->cap_lowcontext + 1;
23636 +
23637 +    /* position not determined, so cannot load any routes, the hwtest
23638 +     * process must explicitly set it's own routes */
23639 +    
23640 +    if (!(cap->cap_type & ELAN_CAP_TYPE_HWTEST) && (pos->pos_mode != ELAN_POS_UNKNOWN))
23641 +    {
23642 +       switch (cap->cap_type & ELAN_CAP_TYPE_MASK)
23643 +       {
23644 +       case ELAN_CAP_TYPE_BLOCK:
23645 +           for (nodeOff = 0, i = 0; nodeOff < nnodes && i < entries; nodeOff++)
23646 +           {
23647 +               for (ctxOff = 0; ctxOff < nctxs && i < entries; ctxOff++)
23648 +               {
23649 +                   if ((cap->cap_type & ELAN_CAP_TYPE_NO_BITMAP) || BT_TEST (cap->cap_bitmap, ctxOff + (nodeOff * nctxs)))
23650 +                   {
23651 +                       /* Don't load a route if there's no switch and trying to talk to myself */
23652 +                       if (pos->pos_mode == ELAN_POS_MODE_SWITCHED ||
23653 +                           (pos->pos_mode == ELAN_POS_MODE_LOOPBACK && cap->cap_lownode + nodeOff == pos->pos_nodeid) ||
23654 +                           (pos->pos_mode == ELAN_POS_MODE_BACKTOBACK && cap->cap_lownode + nodeOff != pos->pos_nodeid))
23655 +                       {
23656 +                           PRINTF3 (ctxt, DBG_VP, "elan3_addvp: virtual process %d -> node %d context %d\n",
23657 +                                    seg->Process + i, cap->cap_lownode +nodeOff, cap->cap_lowcontext +ctxOff);
23658 +                           
23659 +                           nflits = GenerateRoute (pos, flits, cap->cap_lownode + nodeOff, cap->cap_lownode + nodeOff,
23660 +                                                   DEFAULT_ROUTE_TIMEOUT, DEFAULT_ROUTE_PRIORITY);
23661 +                           
23662 +
23663 +
23664 +                           LoadRoute (dev, ctxt->RouteTable, seg->Process+i, cap->cap_lowcontext + ctxOff, nflits, flits);  
23665 +                       }
23666 +                       
23667 +                       i++;
23668 +                   }
23669 +               }
23670 +           }
23671 +           break;
23672 +           
23673 +       case ELAN_CAP_TYPE_CYCLIC:
23674 +           for (ctxOff = 0, i = 0; ctxOff < nctxs && i < entries; ctxOff++)
23675 +           {
23676 +               for (nodeOff = 0; nodeOff < nnodes && i < entries; nodeOff++)
23677 +               {
23678 +                   if ((cap->cap_type & ELAN_CAP_TYPE_NO_BITMAP) || BT_TEST (cap->cap_bitmap, nodeOff + (ctxOff * nnodes)))
23679 +                   {
23680 +                       /* Don't load a route if there's no switch and trying to talk to myself */
23681 +                       if (pos->pos_mode == ELAN_POS_MODE_SWITCHED ||
23682 +                           (pos->pos_mode == ELAN_POS_MODE_LOOPBACK && cap->cap_lownode + nodeOff == pos->pos_nodeid) ||
23683 +                           (pos->pos_mode == ELAN_POS_MODE_BACKTOBACK && cap->cap_lownode + nodeOff != pos->pos_nodeid))
23684 +                       {
23685 +                           PRINTF3 (ctxt, DBG_VP, "elan3_addvp: virtual process %d -> node %d context %d\n",
23686 +                                    seg->Process + i, cap->cap_lownode + nodeOff, cap->cap_lowcontext +ctxOff);
23687 +                       
23688 +                           nflits = GenerateRoute (pos, flits, cap->cap_lownode + nodeOff, cap->cap_lownode + nodeOff,
23689 +                                                   DEFAULT_ROUTE_TIMEOUT, DEFAULT_ROUTE_PRIORITY);
23690 +                           
23691 +
23692 +                           LoadRoute (dev, ctxt->RouteTable, seg->Process+i, cap->cap_lowcontext +ctxOff, nflits, flits);  
23693 +                       } 
23694 +                       i++;                
23695 +                   }
23696 +               }
23697 +           }
23698 +           break;      
23699 +       default:
23700 +           break;
23701 +       }
23702 +    }
23703 +  
23704 +    krwlock_done (&ctxt->VpLock);
23705 +
23706 +    return (ESUCCESS);
23707 +}
23708 +
23709 +int
23710 +elan3_removevp (ELAN3_CTXT *ctxt, int process)
23711 +{
23712 +    ELAN3_VPSEG *seg;
23713 +    ELAN3_VPSEG *next;
23714 +    int                i;
23715 +
23716 +    krwlock_write (&ctxt->VpLock);
23717 +
23718 +    PRINTF1 (ctxt, DBG_VP, "elan3_removevp: remove process %d\n", process);
23719 +
23720 +    if (process == ELAN3_INVALID_PROCESS)
23721 +       seg = ctxt->VpSegs;
23722 +    else
23723 +       seg = FindSegment (ctxt, process, process);
23724 +
23725 +    if (seg == (ELAN3_VPSEG *) NULL)
23726 +    {
23727 +       krwlock_done (&ctxt->VpLock);
23728 +       return (EINVAL);
23729 +    }
23730 +    
23731 +    do {
23732 +       PRINTF3 (ctxt, DBG_VP, "elan3_removevp: segment is %p [%x,%x]\n",
23733 +                seg, seg->Process, seg->Process+seg->Entries);
23734 +
23735 +       for (i = 0; i < seg->Entries; i++)
23736 +           ClearRoute (ctxt->Device, ctxt->RouteTable, seg->Process+i);
23737 +
23738 +        /* get Next pointer value before structure is free'd */
23739 +        next = seg->Next;      
23740 +       RemoveSegment (ctxt, seg);
23741 +
23742 +    } while (process == ELAN3_INVALID_PROCESS && (seg = next) != NULL);
23743 +    
23744 +    krwlock_done (&ctxt->VpLock);
23745 +
23746 +    return (ESUCCESS);
23747 +}
23748 +
23749 +int
23750 +elan3_addbcastvp (ELAN3_CTXT *ctxt, int process, int lowProc, int highProc)
23751 +{
23752 +    ELAN_POSITION *pos = &ctxt->Position;
23753 +    ELAN3_VPSEG    *seg;
23754 +    ELAN3_VPSEG    *aseg;
23755 +    int            virtualProcess;
23756 +    E3_uint64     routeValue;
23757 +
23758 +    PRINTF3 (ctxt, DBG_VP, "elan3_addbcastvp: process %d [%d,%d]\n", process, lowProc, highProc);
23759 +
23760 +    if (lowProc > highProc || pos->pos_mode != ELAN_POS_MODE_SWITCHED)
23761 +       return (EINVAL);
23762 +    
23763 +    krwlock_write (&ctxt->VpLock);
23764 +
23765 +    if ((aseg = FindSegment (ctxt, lowProc, highProc)) == NULL || (aseg->Type != ELAN3_VPSEG_P2P))
23766 +    {
23767 +       PRINTF2 (ctxt, DBG_VP, "elan3_addbcastvp: process [%d,%d] does not map to p2p segment\n", lowProc, highProc);
23768 +       krwlock_done (&ctxt->VpLock);
23769 +       return (EINVAL);
23770 +    }
23771 +
23772 +    /* check aseg->SegCapability */    
23773 +    switch (elan3_validate_cap (ctxt->Device, &aseg->SegCapability, ELAN_USER_BROADCAST))
23774 +    {
23775 +    case ELAN_CAP_OK:
23776 +       /* nothing */
23777 +       break;
23778 +       
23779 +    case ELAN_CAP_RMS:
23780 +       if ( elan_validate_map(&ctxt->Capability, &aseg->SegCapability) != ESUCCESS )
23781 +       {
23782 +           krwlock_done (&ctxt->VpLock);
23783 +           return (EINVAL);
23784 +       }
23785 +       break;
23786 +
23787 +    default:
23788 +       krwlock_done (&ctxt->VpLock);
23789 +       return (EINVAL);
23790 +    }
23791 +
23792 +    if ( ProcessToLocation (ctxt, aseg, lowProc,  NULL).loc_context != 
23793 +        ProcessToLocation (ctxt, aseg, highProc, NULL).loc_context)
23794 +    {
23795 +       PRINTF2 (ctxt, DBG_VP, "elan3_addbcastvp: process [%d,%d] does not map to single context\n", lowProc, highProc);
23796 +       krwlock_done (&ctxt->VpLock);
23797 +       return (EINVAL);
23798 +    }
23799 +    
23800 +    if ((seg = InstallSegment (ctxt, process, 1)) == NULL)
23801 +    {
23802 +       krwlock_done (&ctxt->VpLock);
23803 +       return (EINVAL);
23804 +    }
23805 +
23806 +    seg->Type        = ELAN3_VPSEG_BROADCAST;
23807 +    seg->SegLowProc  = lowProc;
23808 +    seg->SegHighProc = highProc;
23809 +
23810 +    PRINTF4 (ctxt, DBG_VP, "elan3_addbcastvp: installed seg %p Type %d LowProc %d HighProc %d\n",
23811 +           seg, seg->Type, seg->SegLowProc, seg->SegHighProc);
23812 +
23813 +    for (virtualProcess = lowProc; virtualProcess <= highProc; virtualProcess++)
23814 +    {
23815 +       if (virtualProcess < 0 || virtualProcess >= ctxt->RouteTable->Size)
23816 +           routeValue = 0;
23817 +       else
23818 +           routeValue = elan3_sdram_readq ( ctxt->Device, ctxt->RouteTable->Table + virtualProcess * NBYTES_PER_SMALL_ROUTE);
23819 +       
23820 +       if (! (routeValue & ROUTE_VALID))
23821 +       {
23822 +           PRINTF2 (ctxt, DBG_VP, "loadvp[%x]: broadcast %x not valid\n", 
23823 +                    ctxt->Capability.cap_mycontext, virtualProcess);
23824 +           break;
23825 +       }
23826 +    }
23827 +           
23828 +    if (virtualProcess > highProc)                     /* All vps now present */
23829 +    {                                          /* so load up broadcast route */
23830 +       E3_uint16     flits[MAX_FLITS];
23831 +       ELAN_LOCATION low    = ProcessToLocation (ctxt, aseg, lowProc,   NULL);
23832 +       ELAN_LOCATION high   = ProcessToLocation (ctxt, aseg, highProc,  NULL);
23833 +       int           nflits = GenerateRoute (pos, flits, low.loc_node, high.loc_node, DEFAULT_ROUTE_TIMEOUT, DEFAULT_ROUTE_PRIORITY);
23834 +       
23835 +       PRINTF6 (ctxt, DBG_VP, "loadvp[%x]: broadcast %d -> %x.%x [%x.%x]\n", ctxt->Capability.cap_mycontext,
23836 +                seg->Process, low.loc_node, high.loc_node, 
23837 +                low.loc_context, high.loc_context);
23838 +       
23839 +       LoadRoute ( ctxt->Device, ctxt->RouteTable, seg->Process, low.loc_context, nflits, flits);
23840 +    }
23841 +
23842 +    krwlock_done (&ctxt->VpLock);
23843 +
23844 +    return (ESUCCESS);
23845 +}
23846 +
23847 +int
23848 +elan3_process (ELAN3_CTXT *ctxt)
23849 +{
23850 +    int           res = ELAN3_INVALID_PROCESS;
23851 +    ELAN3_VPSEG   *seg;
23852 +    ELAN_LOCATION loc;
23853 +
23854 +    krwlock_write (&ctxt->VpLock);
23855 +
23856 +    loc.loc_node    = ctxt->Position.pos_nodeid;
23857 +    loc.loc_context = ctxt->Capability.cap_mycontext;
23858 +
23859 +    for (seg = ctxt->VpSegs ; seg; seg = seg->Next)
23860 +    {
23861 +       if (seg->Type == ELAN3_VPSEG_P2P &&
23862 +           seg->SegCapability.cap_lowcontext  <= ctxt->Capability.cap_mycontext &&
23863 +           seg->SegCapability.cap_highcontext >= ctxt->Capability.cap_mycontext &&
23864 +           seg->SegCapability.cap_lownode     <= ctxt->Position.pos_nodeid &&
23865 +           seg->SegCapability.cap_highnode    >= ctxt->Position.pos_nodeid)
23866 +       {
23867 +           if ((res=LocationToProcess (ctxt,seg,loc,&ctxt->Capability)) != ELAN3_INVALID_PROCESS)
23868 +           {
23869 +                krwlock_done (&ctxt->VpLock);
23870 +                return res;
23871 +           }
23872 +       }
23873 +    }
23874 +
23875 +    krwlock_done (&ctxt->VpLock);
23876 +
23877 +    return (res);
23878 +}
23879 +
23880 +int
23881 +elan3_check_route (ELAN3_CTXT *ctxt, int process, E3_uint16 *flits, E3_uint32 *routeError)
23882 +{
23883 +    PRINTF5 (ctxt, DBG_VP, "elan3_check_route: vp=%d flits=%04x %04x %04x %04x\n",
23884 +            process, flits[0], flits[1], flits[2], flits[3]);
23885 +    PRINTF4 (ctxt, DBG_VP, "                            %04x %04x %04x %04x\n",
23886 +            flits[4], flits[5], flits[6], flits[7]);
23887 +
23888 +    krwlock_read (&ctxt->VpLock);
23889 +    *routeError=elan3_route_check(ctxt,flits,ProcessToLocation (ctxt, NULL, process, NULL).loc_node);
23890 +    krwlock_done (&ctxt->VpLock);
23891 +
23892 +    return (ESUCCESS); /* the call is a success tho the errorcode may be set */
23893 +}
23894 +
23895 +int
23896 +elan3_load_route (ELAN3_CTXT *ctxt, int process, E3_uint16 *flits)
23897 +{
23898 +    ELAN3_VPSEG *seg;
23899 +    int                res = 0;
23900 +    int                nflits;
23901 +    int         err;
23902 +
23903 +    PRINTF5 (ctxt, DBG_VP, "elan3_load_route: vp=%d flits=%04x %04x %04x %04x\n",
23904 +            process, flits[0], flits[1], flits[2], flits[3]);
23905 +    PRINTF4 (ctxt, DBG_VP, "                            %04x %04x %04x %04x\n",
23906 +            flits[4], flits[5], flits[6], flits[7]);
23907 +
23908 +    krwlock_write (&ctxt->VpLock);
23909 +
23910 +    /* check the route is valid */
23911 +    if (!(ctxt->Capability.cap_type & ELAN_CAP_TYPE_HWTEST))
23912 +    {
23913 +       /* must have already attached to define my context number */
23914 +       if (ctxt->Capability.cap_mycontext == ELAN_CAP_UNINITIALISED)
23915 +       {
23916 +           krwlock_done (&ctxt->VpLock);
23917 +           return (EINVAL);
23918 +       }
23919 +
23920 +       if ((err=elan3_route_check(ctxt,flits,ProcessToLocation (ctxt, NULL, process, NULL).loc_node)) != ELAN3_ROUTE_SUCCESS)
23921 +       {
23922 +           krwlock_done (&ctxt->VpLock);
23923 +           return (EINVAL);
23924 +       }
23925 +    }
23926 +
23927 +    if ((seg = FindSegment (ctxt, process, process)) == NULL || seg->Type != ELAN3_VPSEG_P2P)
23928 +    {
23929 +       krwlock_done (&ctxt->VpLock);
23930 +       return (EINVAL);
23931 +    }
23932 +
23933 +    /* Calculate number of flits in this route */
23934 +    for (nflits = 0; nflits < MAX_FLITS && flits[nflits]; nflits++)
23935 +       ;
23936 +    
23937 +    res = LoadRoute (ctxt->Device, ctxt->RouteTable, process, ProcessToLocation (ctxt, seg, process, NULL).loc_context, nflits, flits);
23938 +
23939 +    krwlock_done (&ctxt->VpLock);
23940 +
23941 +    return (res);
23942 +}
23943 +
23944 +int
23945 +elan3_get_route (ELAN3_CTXT *ctxt, int process, E3_uint16 *flits)
23946 +{
23947 +    ELAN3_VPSEG *seg;
23948 +    int                res = 0;
23949 +
23950 +    PRINTF1 (ctxt, DBG_VP, "elan3_get_route: vp=%d \n",  process);
23951 +
23952 +    krwlock_write (&ctxt->VpLock);
23953 +
23954 +    if (ctxt->RouteTable == NULL)  /* is there a route table */
23955 +    {
23956 +       krwlock_done (&ctxt->VpLock);
23957 +       return (EINVAL);
23958 +    }
23959 +
23960 +    if ((seg = FindSegment (ctxt, process, process)) != NULL && seg->Type != ELAN3_VPSEG_P2P)
23961 +    {
23962 +       krwlock_done (&ctxt->VpLock);
23963 +       return (EINVAL);
23964 +    }
23965 +    
23966 +    if (seg == NULL)
23967 +    {
23968 +       krwlock_done (&ctxt->VpLock);
23969 +       return (EINVAL);
23970 +    }
23971 +    
23972 +    res = GetRoute (ctxt->Device, ctxt->RouteTable, process, flits);
23973 +    
23974 +    krwlock_done (&ctxt->VpLock);
23975 +
23976 +    return (res);
23977 +}
23978 +
23979 +int
23980 +elan3_reset_route (ELAN3_CTXT *ctxt, int process)
23981 +{
23982 +    E3_uint16     flits[MAX_FLITS];
23983 +
23984 +    PRINTF1 (ctxt, DBG_VP, "elan3_reset_route: vp=%d \n",  process);
23985
23986 +    GenerateRoute (&ctxt->Position, flits, process, process, DEFAULT_ROUTE_TIMEOUT, DEFAULT_ROUTE_PRIORITY);
23987 +    
23988 +    return elan3_load_route(ctxt,process,flits);
23989 +}
23990 +
23991 +int
23992 +ResolveVirtualProcess (ELAN3_CTXT *ctxt, int process)
23993 +{
23994 +    E3_uint16    flits[MAX_FLITS];
23995 +    ELAN3_DEV     *dev = ctxt->Device;
23996 +    int                  res = ESUCCESS;
23997 +    ELAN3_VPSEG   *seg;
23998 +    ELAN3_VPSEG   *aseg;
23999 +    E3_uint64    routeValue;
24000 +
24001 +    krwlock_read (&ctxt->VpLock);
24002 +
24003 +    PRINTF1 (ctxt, DBG_VP, "ResolveVirtualProcess: vp=%d \n",  process);
24004 +
24005 +    if (ctxt->RouteTable == NULL || process < 0 || process >= ctxt->RouteTable->Size)
24006 +    {
24007 +       krwlock_done (&ctxt->VpLock);
24008 +       return (EINVAL);
24009 +    }
24010 +
24011 +    if (! (seg = FindSegment (ctxt, process, process)))
24012 +    {
24013 +       PRINTF1 (ctxt, DBG_VP, "ResolveVirtualProcess: cannot find segment for virtual process %d\n", process);
24014 +       krwlock_done (&ctxt->VpLock);
24015 +       return (EINVAL);
24016 +    }
24017 +    
24018 +    /* check cap. */
24019 +    switch (elan3_validate_cap (ctxt->Device, &seg->SegCapability, ((seg->Type == ELAN3_VPSEG_P2P) ? ELAN_USER_P2P : ELAN_USER_BROADCAST)))
24020 +    {
24021 +    case ELAN_CAP_OK:
24022 +       /* nothing */
24023 +       break;
24024 +
24025 +    case ELAN_CAP_RMS:
24026 +       if ( elan_validate_map(&ctxt->Capability, &seg->SegCapability) != ESUCCESS)
24027 +       {
24028 +           krwlock_done (&ctxt->VpLock);
24029 +           return (EINVAL);
24030 +       }
24031 +       break;
24032 +
24033 +    default:
24034 +       krwlock_done (&ctxt->VpLock);
24035 +       return (EINVAL);
24036 +    }
24037 +
24038 +    BumpUserStat (ctxt, LoadVirtualProcess);
24039 +
24040 +    routeValue = elan3_sdram_readq (dev, ctxt->RouteTable->Table + process * NBYTES_PER_SMALL_ROUTE);
24041 +    if (routeValue & ROUTE_VALID)                              /* Virtual process already */
24042 +    {                                                          /* loaded */
24043 +       krwlock_done (&ctxt->VpLock);
24044 +       return (ESUCCESS);                      
24045 +    }
24046 +    
24047 +    switch (seg->Type)
24048 +    {
24049 +    case ELAN3_VPSEG_P2P:
24050 +       switch (seg->SegCapability.cap_type & ELAN_CAP_TYPE_MASK)
24051 +       {
24052 +       case ELAN_CAP_TYPE_BLOCK:
24053 +       case ELAN_CAP_TYPE_CYCLIC:
24054 +           if ((res = elan_validate_map (&ctxt->Capability,&seg->SegCapability)) == ESUCCESS &&
24055 +               (res = GetRoute(dev, ctxt->RouteTable ,process,  flits)) == ESUCCESS)
24056 +           {
24057 +               if (elan3_route_check(ctxt, flits, ProcessToLocation (ctxt, seg, process, NULL).loc_node))
24058 +                   res = EINVAL;
24059 +               else
24060 +                   ValidateRoute(dev, ctxt->RouteTable, process);
24061 +           }
24062 +           break;
24063 +       default:
24064 +           res = EINVAL;
24065 +           break;
24066 +       }
24067 +       break;
24068 +
24069 +    case ELAN3_VPSEG_BROADCAST:
24070 +       /* Find the segment that this broadcast range spans. */
24071 +       aseg = FindSegment (ctxt, seg->SegLowProc, seg->SegHighProc);
24072 +       
24073 +       if (aseg == NULL || (aseg->Type != ELAN3_VPSEG_P2P) || !(aseg->SegCapability.cap_type & ELAN_CAP_TYPE_BROADCASTABLE))
24074 +       {
24075 +           PRINTF2 (ctxt, DBG_VP, "resolveVirtualProcess: %d -> EINVAL (%s)\n", process, 
24076 +                    (aseg == NULL ? "no segment" : ((seg->Type != ELAN3_VPSEG_P2P) ? "not point to point" :
24077 +                                                    "not broadcastable")));
24078 +           res = EINVAL;
24079 +           break;
24080 +       }
24081 +       
24082 +       switch (aseg->SegCapability.cap_type & ELAN_CAP_TYPE_MASK)
24083 +       {
24084 +       case ELAN_CAP_TYPE_BLOCK:
24085 +       case ELAN_CAP_TYPE_CYCLIC:
24086 +       {
24087 +           ELAN_LOCATION lowNode  = ProcessToLocation (ctxt,aseg,seg->SegLowProc  , NULL);
24088 +           ELAN_LOCATION highNode = ProcessToLocation (ctxt,aseg,seg->SegHighProc , NULL);
24089 +
24090 +
24091 +           if ((res = elan_validate_map (&ctxt->Capability,&aseg->SegCapability)) == ESUCCESS &&
24092 +               (res=GetRoute(dev, ctxt->RouteTable ,process,  flits)) == ESUCCESS)
24093 +           {
24094 +               if (elan3_route_broadcast_check(ctxt,flits, lowNode.loc_node , highNode.loc_node ) != ELAN3_ROUTE_SUCCESS )
24095 +                   res = EINVAL;
24096 +               else
24097 +                   ValidateRoute(dev, ctxt->RouteTable, process);
24098 +           }
24099 +           break;
24100 +       }
24101 +
24102 +       default:
24103 +           res = EINVAL;
24104 +           break;
24105 +       }
24106 +    default:
24107 +       res  = EINVAL;
24108 +       break;
24109 +    }
24110 +
24111 +    krwlock_done (&ctxt->VpLock);
24112 +    return (res);
24113 +}        
24114 +
24115 +void
24116 +UnloadVirtualProcess (ELAN3_CTXT *ctxt, ELAN_CAPABILITY *cap)
24117 +{
24118 +    ELAN3_DEV        *dev  = ctxt->Device;
24119 +    ELAN3_VPSEG      *seg;
24120 +    ELAN_CAPABILITY *scap;
24121 +    int              i;
24122 +
24123 +    for (seg = ctxt->VpSegs; seg; seg = seg->Next)
24124 +    {
24125 +       switch (seg->Type)
24126 +       {
24127 +       case ELAN3_VPSEG_P2P:
24128 +           scap = &seg->SegCapability;
24129 +           
24130 +           if (cap == NULL || ELAN_CAP_MATCH (scap, cap))
24131 +           {
24132 +               PRINTF2 (ctxt, DBG_VP, "unloadvp: segment [%x.%x]\n", 
24133 +                        seg->Process, seg->Process + seg->Entries-1);
24134 +               
24135 +               for (i = 0; i < seg->Entries; i++)
24136 +                   InvalidateRoute (dev, ctxt->RouteTable, seg->Process+i);
24137 +           }
24138 +           break;
24139 +
24140 +       case ELAN3_VPSEG_BROADCAST:
24141 +           for (i = 0; i < seg->Entries; i++)
24142 +           {
24143 +               ELAN3_VPSEG *aseg = FindSegment (ctxt, seg->SegLowProc, seg->SegHighProc);
24144 +               
24145 +               if (aseg != NULL && ELAN_CAP_MATCH(&aseg->SegCapability, cap))
24146 +               {
24147 +                   PRINTF1 (ctxt, DBG_VP, "unloadvp: broadcast vp %d\n", seg->Process);
24148 +               
24149 +                   InvalidateRoute (dev, ctxt->RouteTable, seg->Process+i);
24150 +               }
24151 +           }
24152 +       }
24153 +    }
24154 +}
24155 +
24156 +caddr_t
24157 +CapabilityString (ELAN_CAPABILITY *cap)
24158 +{
24159 +#define CAPSTR_LEN     200
24160 +#define NCAPSTRS       4
24161 +    static char       space[CAPSTR_LEN*NCAPSTRS];
24162 +    static int        bufnum;
24163 +    static spinlock_t lock;
24164 +    static int       lockinitialised;
24165 +    int                      num;
24166 +    unsigned long     flags;
24167 +
24168 +    if (! lockinitialised)
24169 +    {
24170 +       spin_lock_init (&lock);
24171 +       lockinitialised = 1;
24172 +    }
24173 +
24174 +    spin_lock_irqsave (&lock, flags);
24175 +    
24176 +    if ((num = ++bufnum) == NCAPSTRS)
24177 +       num = bufnum = 0;
24178 +    spin_unlock_irqrestore (&lock, flags);
24179 +
24180 +    sprintf (space + (num * CAPSTR_LEN), "%4x %4x %4x %4x %4x %4x %4x [%x.%x.%x.%x]", cap->cap_type,
24181 +            cap->cap_lownode, cap->cap_highnode, 
24182 +            cap->cap_lowcontext, cap->cap_mycontext, cap->cap_highcontext,  ELAN_CAP_ENTRIES(cap),
24183 +            cap->cap_userkey.key_values[0],  cap->cap_userkey.key_values[1],
24184 +            cap->cap_userkey.key_values[2],  cap->cap_userkey.key_values[3]);
24185 +
24186 +    return (space + (num * CAPSTR_LEN));
24187 +}
24188 +
24189 +
24190 +/*
24191 + * Local variables:
24192 + * c-file-style: "stroustrup"
24193 + * End:
24194 + */
24195 Index: linux-2.4.21/drivers/net/qsnet/elan4/debug.c
24196 ===================================================================
24197 --- linux-2.4.21.orig/drivers/net/qsnet/elan4/debug.c   2004-02-23 16:02:56.000000000 -0500
24198 +++ linux-2.4.21/drivers/net/qsnet/elan4/debug.c        2005-06-01 23:12:54.597439408 -0400
24199 @@ -0,0 +1,94 @@
24200 +/*
24201 + *    Copyright (c) 2001-2002 by Quadrics Supercomputers World Ltd.
24202 + *    Copyright (c) 2002-2003 by Quadrics Ltd.
24203 + * 
24204 + *    For licensing information please see the supplied COPYING file
24205 + *
24206 + */
24207 +
24208 +#ident "@(#)$Id: debug.c,v 1.16 2004/07/07 11:22:33 addy Exp $"
24209 +/*      $Source: /cvs/master/quadrics/elan4mod/debug.c,v $*/
24210 +
24211 +#include <qsnet/kernel.h>
24212 +
24213 +#include <elan4/debug.h>
24214 +#include <elan4/device.h>
24215 +
24216 +unsigned       elan4_debug           = 0;
24217 +unsigned       elan4_debug_toconsole = 0;
24218 +unsigned       elan4_debug_tobuffer  = DBG_ALL;
24219 +
24220 +unsigned       elan4_debug_display_ctxt;
24221 +unsigned       elan4_debug_ignore_ctxt;
24222 +unsigned       elan4_debug_ignore_type;
24223 +
24224 +void
24225 +elan4_debug_init()
24226 +{
24227 +    if ((elan4_debug & elan4_debug_tobuffer) != 0)
24228 +       qsnet_debug_alloc();
24229 +}
24230 +
24231 +void
24232 +elan4_debug_fini()
24233 +{
24234 +}
24235 +
24236 +void
24237 +elan4_debugf (void *type, int mode, char *fmt,...)
24238 +{
24239 +    char    prefix[128];
24240 +    int     where = 0;
24241 +    va_list ap;
24242 +
24243 +    if ((mode & elan4_debug_tobuffer) != 0 || type == DBG_BUFFER)
24244 +       where |= QSNET_DEBUG_BUFFER;
24245 +    if ((mode & elan4_debug_toconsole) != 0 || type == DBG_CONSOLE)
24246 +       where |= QSNET_DEBUG_CONSOLE;
24247 +
24248 +    if (where == 0)
24249 +       return;
24250 +    
24251 +    if ((unsigned long) type > DBG_NTYPES)
24252 +    {
24253 +       ELAN4_CTXT *ctxt = (ELAN4_CTXT *) type;
24254 +
24255 +        if (elan4_debug_display_ctxt && ctxt->ctxt_num != elan4_debug_display_ctxt)
24256 +            return;
24257 +        if (elan4_debug_ignore_ctxt  && ctxt->ctxt_num == elan4_debug_ignore_ctxt)
24258 +            return;
24259 +
24260 +       sprintf (prefix, "[%08ld.%04d] elan4 (%03x) ", lbolt,  current->pid, ctxt->ctxt_num);
24261 +    }
24262 +    else if ((unsigned long) type == (int) DBG_CONSOLE)
24263 +       prefix[0] = '\0';
24264 +    else
24265 +    {
24266 +       char *what;
24267 +
24268 +       if (elan4_debug_ignore_type & (1 << ((unsigned long) type)))
24269 +           return;
24270 +
24271 +       switch ((unsigned long) type)
24272 +       {
24273 +       case (int) DBG_DEVICE: what = "dev"; break;
24274 +       case (int) DBG_USER:   what = "usr"; break;
24275 +       default:               what = NULL; break;
24276 +       }
24277 +           
24278 +       if (what)
24279 +           sprintf (prefix, "[%08ld.%04d] elan4 [%s] ", lbolt, current->pid, what);
24280 +       else
24281 +           sprintf (prefix, "[%08ld.%04d] elan4 [%3d] ", lbolt, current->pid, (int)(long)type);
24282 +    }
24283 +
24284 +    va_start(ap,fmt);
24285 +    qsnet_vdebugf (where, prefix, fmt, ap);
24286 +    va_end (ap);
24287 +}
24288 +
24289 +/*
24290 + * Local variables:
24291 + * c-file-style: "stroustrup"
24292 + * End:
24293 + */
24294 Index: linux-2.4.21/drivers/net/qsnet/elan4/device.c
24295 ===================================================================
24296 --- linux-2.4.21.orig/drivers/net/qsnet/elan4/device.c  2004-02-23 16:02:56.000000000 -0500
24297 +++ linux-2.4.21/drivers/net/qsnet/elan4/device.c       2005-06-01 23:12:54.602438648 -0400
24298 @@ -0,0 +1,2805 @@
24299 +/*
24300 + *    Copyright (c) 2001-2002 by Quadrics Supercomputers World Ltd.
24301 + *    Copyright (c) 2002-2003 by Quadrics Ltd.
24302 + * 
24303 + *    For licensing information please see the supplied COPYING file
24304 + *
24305 + */
24306 +
24307 +#ident "@(#)$Id: device.c,v 1.87.6.3 2005/01/18 14:25:35 david Exp $"
24308 +/*      $Source: /cvs/master/quadrics/elan4mod/device.c,v $*/
24309 +
24310 +#include <qsnet/kernel.h>
24311 +#include <qsnet/kthread.h>
24312 +
24313 +#include <elan4/sdram.h>
24314 +#include <elan4/debug.h>
24315 +#include <elan4/device.h>
24316 +#include <elan4/commands.h>
24317 +#include <elan4/trtype.h>
24318 +#include <elan4/neterr.h>
24319 +
24320 +#include <elan4/i2c.h>
24321 +#include <elan3/vpd.h>
24322 +
24323 +/* allow this code to compile against an Eagle elanmod */
24324 +#ifdef __ELANMOD_DEVICE_H
24325 +#define ELAN_DEV_OPS           ELANMOD_DEV_OPS
24326 +#define ELAN_DEV_OPS_VERSION   ELANMOD_DEV_OPS_VERSION
24327 +#define elan_dev_register      elanmod_dev_register
24328 +#define elan_dev_deregister    elanmod_dev_deregister
24329 +#endif
24330 +
24331 +/* XXXX configurational defines */
24332 +
24333 +#if defined (CONFIG_MPSAS)
24334 +#define HASH_0_SIZE_VAL                        (12 + 6)
24335 +#define HASH_1_SIZE_VAL                        (2 + 6)
24336 +#define CTXT_TABLE_SHIFT               8
24337 +#define LN2_MAX_CQS                    8               /* 256 */
24338 +#else
24339 +#define HASH_0_SIZE_VAL                        (13 + 6)
24340 +#define HASH_1_SIZE_VAL                        (2 + 6)
24341 +#define CTXT_TABLE_SHIFT               12
24342 +#define LN2_MAX_CQS                    10              /* 1024 */
24343 +#endif
24344 +
24345 +unsigned int elan4_hash_0_size_val       = HASH_0_SIZE_VAL;
24346 +unsigned int elan4_hash_1_size_val       = HASH_1_SIZE_VAL;
24347 +unsigned int elan4_ctxt_table_shift      = CTXT_TABLE_SHIFT;
24348 +unsigned int elan4_ln2_max_cqs           = LN2_MAX_CQS;
24349 +unsigned int elan4_dmaq_highpri_size     = 2;                  /* 8192 entries */
24350 +unsigned int elan4_threadq_highpri_size  = 1;                  /* 1024 entries */
24351 +unsigned int elan4_dmaq_lowpri_size      = 2;                  /* 8192 entries */
24352 +unsigned int elan4_threadq_lowpri_size   = 1;                  /* 1024 entries */
24353 +unsigned int elan4_interruptq_size       = 0;                  /* 1024 entries */
24354 +unsigned int elan4_mainint_punt_loops    = 1;
24355 +unsigned int elan4_mainint_resched_ticks = 0;
24356 +
24357 +static int 
24358 +elan4_op_get_position (void *arg, ELAN_POSITION *ptr)
24359 +{
24360 +    ELAN4_DEV     *dev = (ELAN4_DEV *)arg;
24361 +    ELAN_POSITION  pos;
24362 +
24363 +    elan4_get_position (dev, &pos);
24364 +
24365 +    return copyout (&pos, ptr, sizeof (ELAN_POSITION));
24366 +}
24367 +
24368 +static int 
24369 +elan4_op_set_position (void *arg, unsigned short nodeid, unsigned short numnodes)
24370 +{
24371 +    /* XXXXX 
24372 +
24373 +       ELAN4_DEV *dev = (ELAN4_DEV *) arg;
24374 +
24375 +       compute_position (&pos, nodeid, numnode, num_down_links_value);
24376 +
24377 +       return elan4_set_position (dev, pos);
24378 +    */
24379 +    return EINVAL;
24380 +}
24381 +
24382 +ELAN_DEV_OPS elan4_dev_ops = 
24383 +{
24384 +    elan4_op_get_position,
24385 +    elan4_op_set_position,
24386 +
24387 +    ELAN_DEV_OPS_VERSION
24388 +};
24389 +
24390 +static E4_uint32
24391 +elan4_read_filter (ELAN4_DEV *dev, unsigned networkctx)
24392 +{
24393 +    return (elan4_sdram_readl (dev, dev->dev_ctxtable + (networkctx * sizeof (E4_ContextControlBlock)) + 
24394 +                              offsetof (E4_ContextControlBlock, Filter)));
24395 +}
24396 +
24397 +static void
24398 +elan4_write_filter (ELAN4_DEV *dev, unsigned networkctx, E4_uint32 value)
24399 +{
24400 +    elan4_sdram_writel (dev, (dev->dev_ctxtable + (networkctx * sizeof (E4_ContextControlBlock)) +
24401 +                       offsetof (E4_ContextControlBlock, Filter)), value);
24402 +    pioflush_sdram(dev);
24403 +}
24404 +
24405 +void
24406 +elan4_set_schedstatus (ELAN4_DEV *dev, E4_uint32 intreg)
24407 +{
24408 +    E4_uint32 setbits  = 0;
24409 +    E4_uint32 intmask  = 0;
24410 +    E4_uint32 haltmask;
24411 +    E4_uint32 next_sched;
24412 +    E4_uint32 next_intmask;
24413 +    unsigned long flags;
24414 +
24415 +    spin_lock_irqsave (&dev->dev_intmask_lock, flags);
24416 +
24417 +    haltmask = (dev->dev_haltop_mask | dev->dev_haltop_active);
24418 +
24419 +    if ((haltmask & INT_DProcHalted) || dev->dev_halt_all_count || dev->dev_halt_dproc_count)
24420 +       setbits |= SCH_DProcHalt;
24421 +    
24422 +    if ((haltmask & INT_TProcHalted) || dev->dev_halt_all_count || dev->dev_halt_tproc_count)
24423 +       setbits |= SCH_TProcHalt;
24424 +
24425 +    if ((haltmask & INT_CProcHalted) || dev->dev_halt_all_count || dev->dev_halt_cproc_count)
24426 +       setbits |= SCH_CProcHalt;
24427 +
24428 +    if ((haltmask & INT_DiscardingLowPri) || dev->dev_discard_all_count || dev->dev_discard_lowpri_count)
24429 +       setbits |= SCH_DiscardLowPriInput;
24430 +    
24431 +    if ((haltmask & INT_DiscardingHighPri) || dev->dev_discard_all_count || dev->dev_discard_highpri_count)
24432 +       setbits |= SCH_DiscardHighPriInput;
24433 +    
24434 +    if (dev->dev_halt_lowpri_count)
24435 +       setbits |= SCH_StopLowPriQueues;
24436 +    
24437 +    if (haltmask & INT_DProcHalted) intmask |= INT_DProcHalted;
24438 +    if (haltmask & INT_TProcHalted) intmask |= INT_TProcHalted;
24439 +    if (haltmask & INT_CProcHalted) intmask |= INT_CProcHalted;
24440 +    if (haltmask & INT_DiscardingLowPri) intmask |= INT_DiscardingLowPri;
24441 +    if (haltmask & INT_DiscardingHighPri) intmask |= INT_DiscardingHighPri;
24442 +
24443 +    next_intmask = (dev->dev_intmask     & ~(INT_Halted | INT_Discarding)) | (intmask & ~intreg);
24444 +    next_sched   = (dev->dev_schedstatus & ~(SCH_Halt | SCH_Discard))      | setbits;
24445 +
24446 +    PRINTF5 (DBG_DEVICE, DBG_REGISTER, "elan4_set_schedstatus: haltmask=%x setbits=%x intmask=%x next_sched=%x next_intmask=%x\n",
24447 +            haltmask, setbits, intmask, next_sched, next_intmask);
24448 +
24449 +    CHANGE_INT_MASK (dev, next_intmask);
24450 +    CHANGE_SCHED_STATUS (dev, next_sched);
24451 +
24452 +    spin_unlock_irqrestore (&dev->dev_intmask_lock, flags);
24453 +}
24454 +
24455 +void
24456 +elan4_queue_haltop (ELAN4_DEV *dev, ELAN4_HALTOP *op)
24457 +{
24458 +    unsigned long flags;
24459 +
24460 +    spin_lock_irqsave (&dev->dev_haltop_lock, flags);
24461 +
24462 +    /* add to the end of the halt operations list */
24463 +    list_add_tail (&op->op_link, &dev->dev_haltop_list);
24464 +
24465 +    if ((dev->dev_haltop_mask & op->op_mask) != op->op_mask)
24466 +    {
24467 +       dev->dev_haltop_mask |= op->op_mask;
24468 +       
24469 +       elan4_set_schedstatus (dev, 0);
24470 +    }
24471 +    
24472 +    spin_unlock_irqrestore (&dev->dev_haltop_lock, flags);
24473 +}
24474 +
24475 +void
24476 +elan4_queue_intop (ELAN4_DEV *dev, ELAN4_CQ *cq, ELAN4_INTOP *op)
24477 +{
24478 +    unsigned long flags;
24479 +
24480 +    spin_lock_irqsave (&dev->dev_intop_lock, flags);
24481 +
24482 +    op->op_cookie = INTOP_ONESHOT | ((dev->dev_intop_cookie++) & INTOP_VALUE_MASK);
24483 +
24484 +    list_add_tail (&op->op_link, &dev->dev_intop_list);
24485 +
24486 +    writeq ((op->op_cookie << E4_MAIN_INT_SHIFT) | INTERRUPT_CMD, cq->cq_mapping);
24487 +
24488 +    spin_unlock_irqrestore (&dev->dev_intop_lock, flags);
24489 +}
24490 +
24491 +void
24492 +elan4_register_intop (ELAN4_DEV *dev, ELAN4_INTOP *op)
24493 +{
24494 +    unsigned long flags;
24495 +
24496 +    spin_lock_irqsave (&dev->dev_intop_lock, flags);
24497 +
24498 +    op->op_cookie = INTOP_PERSISTENT | ((dev->dev_intop_cookie++) & INTOP_VALUE_MASK);
24499 +
24500 +    list_add_tail (&op->op_link, &dev->dev_intop_list);
24501 +
24502 +    spin_unlock_irqrestore (&dev->dev_intop_lock, flags);
24503 +}
24504 +
24505 +void
24506 +elan4_deregister_intop (ELAN4_DEV *dev, ELAN4_INTOP *op)
24507 +{
24508 +    unsigned long flags;
24509 +
24510 +    spin_lock_irqsave (&dev->dev_intop_lock, flags);
24511 +    list_del (&op->op_link);
24512 +    spin_unlock_irqrestore (&dev->dev_intop_lock, flags);
24513 +}
24514 +
24515 +static __inline__ void
24516 +__issue_dma_flushop_cmd (ELAN4_CQ *cq)
24517 +{
24518 +    writeq (DMA_ShMemWrite | RUN_DMA_CMD, cq->cq_mapping);
24519 +    writeq (0 /* cookie */,               cq->cq_mapping);
24520 +    writeq (0 /* vproc */,                cq->cq_mapping);
24521 +    writeq (0 /* srcAddr */,              cq->cq_mapping);
24522 +    writeq (0 /* dstAddr */,              cq->cq_mapping);
24523 +    writeq (0 /* srcEvent */,             cq->cq_mapping);
24524 +    writeq (0 /* dstEvent */,             cq->cq_mapping);
24525 +    writeq (SET_EVENT_CMD,                cq->cq_mapping);
24526 +}
24527 +
24528 +static void
24529 +handle_dma_flushops_intop (ELAN4_DEV *dev, void *arg)
24530 +{
24531 +    unsigned int  hipri        = ((unsigned long) arg & 1);
24532 +    E4_uint64     status       = dev->dev_dma_flushop[hipri].status;
24533 +    ELAN4_CQ     *cq           = dev->dev_dma_flushop[hipri].cq;
24534 +    sdramaddr_t   cqdesc       = dev->dev_cqaddr + (elan4_cq2num(cq) * sizeof (E4_CommandQueueDesc));
24535 +    E4_uint64     queuePtrs    = elan4_sdram_readq (dev, cqdesc + offsetof (E4_CommandQueueDesc, CQ_QueuePtrs));
24536 +    E4_uint32     completedPtr = CQ_CompletedPtr(queuePtrs);
24537 +    E4_uint32     size         = CQ_Size ((queuePtrs >> CQ_SizeShift) & CQ_SizeMask);
24538 +    unsigned long flags;
24539 +
24540 +    /*
24541 +     * Since we're called from a main interrupt which was issued through the approriate
24542 +     * flushcq the command queue descriptor for dma flushing can no longer be in the 
24543 +     * insert cache, nor can it be in the extractor (as it's trapped), hence it is
24544 +     * safe to modify the completed pointer
24545 +     */
24546 +
24547 +    spin_lock_irqsave (&dev->dev_haltop_lock, flags);
24548 +
24549 +    ASSERT (status != 0);
24550 +
24551 +    /* skip over either the DMA/SETEVENT or just the SETEVENT depending on the trap type */
24552 +    if (CPROC_TrapType (status) == CommandProcDmaQueueOverflow)
24553 +       completedPtr = (completedPtr & ~(size-1)) | ((completedPtr + 64) & (size - 1));
24554 +    else
24555 +       completedPtr = (completedPtr & ~(size-1)) | ((completedPtr + 8) & (size - 1));
24556 +    
24557 +    elan4_sdram_writel (dev, cqdesc + offsetof (E4_CommandQueueDesc, CQ_QueuePtrs) + 4,
24558 +                       ((queuePtrs >> 32) & ~CQ_PtrOffsetMask) | (completedPtr & CQ_PtrOffsetMask));
24559 +    
24560 +    elan4_restartcq (dev, dev->dev_dma_flushop[hipri].cq);
24561 +
24562 +    if (! list_empty (&dev->dev_dma_flushop[hipri].list))
24563 +       __issue_dma_flushop_cmd (dev->dev_dma_flushop[hipri].cq);
24564 +
24565 +    dev->dev_dma_flushop[hipri].status = 0;
24566 +    
24567 +    spin_unlock_irqrestore (&dev->dev_haltop_lock, flags);
24568 +
24569 +}
24570 +
24571 +static void
24572 +handle_dma_flushops (ELAN4_DEV *dev, E4_uint64 status, int cqnum)
24573 +{
24574 +    unsigned int       hipri  = (cqnum == elan4_cq2num(dev->dev_dma_flushop[1].cq) ? 1 : 0);
24575 +    ELAN4_CQ          *cq     = dev->dev_dma_flushop[hipri].cq;
24576 +    ELAN4_CQ          *flushq = dev->dev_flush_cq[elan4_cq2num(cq) & (COMMAND_INSERTER_CACHE_ENTRIES-1)];
24577 +    struct list_head  *ops;
24578 +    unsigned long      flags;
24579 +    int                       qfull,count;
24580 +    E4_uint64         queuePtrs;
24581 +    LIST_HEAD(list);
24582 +    
24583 +    spin_lock_irqsave (&dev->dev_haltop_lock, flags);
24584 +    
24585 +    ASSERT (cqnum == elan4_cq2num (dev->dev_dma_flushop[hipri].cq));
24586 +    ASSERT (! list_empty (&dev->dev_dma_flushop[hipri].list));
24587 +    ASSERT (dev->dev_dma_flushop[hipri].status == 0);
24588 +    
24589 +    /* remove the whole list */
24590 +    ops = dev->dev_dma_flushop[hipri].list.next;
24591 +
24592 +    list_del_init (&dev->dev_dma_flushop[hipri].list);
24593 +    
24594 +    /* and add it to our local list */
24595 +    list_add_tail (&list, ops);
24596 +    
24597 +    /* now determine whether the queue was full - since it cannot be empty 
24598 +     * then if the front and back pointers are the same then it is full */
24599 +    queuePtrs = hipri ? read_reg64 (dev, DProcHighPriPtrs) : read_reg64 (dev, DProcLowPriPtrs);
24600 +    qfull     = (E4_QueueFrontPointer (queuePtrs) == E4_QueueBackPointer (queuePtrs));
24601 +    
24602 +    if (CPROC_TrapType(status) == CommandProcDmaQueueOverflow && !qfull)
24603 +       printk (" ******* queue overflow trap - but queue not full\n");
24604 +
24605 +    if (qfull && CPROC_TrapType(status) != CommandProcDmaQueueOverflow)
24606 +       printk (" ****** queue full - but not overflow trap : %llx %llx %x\n", 
24607 +               read_reg64 (dev, DProcLowPriPtrs), read_reg64 (dev, DProcHighPriPtrs), CPROC_TrapType(status));
24608 +
24609 +    /* Store the status register, this also indicates that the intop is pending */
24610 +    dev->dev_dma_flushop[hipri].status = status;
24611 +
24612 +    spin_unlock_irqrestore (&dev->dev_haltop_lock, flags);
24613 +
24614 +    /* Issue a main interrupt command to the approriate flush command queue,
24615 +     * which will then safely update the completed pointer to skip over the
24616 +     * command which has trapped, also prevent any new commands to be issued
24617 +     * to the command queue.
24618 +     */
24619 +    dev->dev_dma_flushop[hipri].intop.op_function = handle_dma_flushops_intop;
24620 +    dev->dev_dma_flushop[hipri].intop.op_arg      = (void *) (unsigned long) hipri;
24621 +
24622 +    elan4_queue_intop (dev, flushq, &dev->dev_dma_flushop[hipri].intop);
24623 +    
24624 +    /* now execute all operations */
24625 +    for (count = 0; ! list_empty (&list); count++)
24626 +    {
24627 +       ELAN4_DMA_FLUSHOP *op = list_entry (list.next, ELAN4_DMA_FLUSHOP, op_link);
24628 +       
24629 +       list_del (&op->op_link);
24630 +       
24631 +       (*op->op_function) (dev, op->op_arg, qfull);
24632 +    }
24633 +
24634 +    /* finally release the "reasons" for halting */
24635 +    spin_lock_irqsave (&dev->dev_haltop_lock, flags);
24636 +    if ((dev->dev_halt_dproc_count -= count) == 0)
24637 +       elan4_set_schedstatus (dev, 0);
24638 +    spin_unlock_irqrestore (&dev->dev_haltop_lock, flags);
24639 +       
24640 +    return;
24641 +}
24642 +
24643 +void
24644 +elan4_queue_dma_flushop (ELAN4_DEV *dev, ELAN4_DMA_FLUSHOP *op, int hipri)
24645 +{
24646 +    unsigned long flags;
24647 +
24648 +    spin_lock_irqsave (&dev->dev_haltop_lock, flags);
24649 +
24650 +    if (dev->dev_halt_dproc_count++ == 0)                      /* ensure that the DMA processor cannot */
24651 +       elan4_set_schedstatus (dev, 0);                         /* execute the DMA we issue. */
24652 +
24653 +    if (list_empty (&dev->dev_dma_flushop[hipri].list) && dev->dev_dma_flushop[hipri].status == 0)
24654 +       __issue_dma_flushop_cmd (dev->dev_dma_flushop[hipri].cq);
24655 +       
24656 +    list_add_tail (&op->op_link, &dev->dev_dma_flushop[hipri].list);
24657 +
24658 +    spin_unlock_irqrestore (&dev->dev_haltop_lock, flags);
24659 +}
24660 +
24661 +static void
24662 +enable_elan_errors (void *arg)
24663 +{
24664 +    ELAN4_DEV *dev = (ELAN4_DEV *) arg;
24665 +
24666 +    ENABLE_INT_MASK (dev, INT_ErrorInterrupts);
24667 +}
24668 +
24669 +#define ERROR_DISABLE_PERIOD   (hz/2)
24670 +#define ERROR_SAMPLE_PERIOD    (hz/10)
24671 +#define ERROR_LIMIT            (100)
24672 +
24673 +static __inline__ void
24674 +check_error_rate (ELAN4_DEV *dev)
24675 +{
24676 +    if (dev->dev_error_time == (lbolt/ERROR_SAMPLE_PERIOD))
24677 +    {
24678 +        if (++dev->dev_errors_per_period >= ERROR_LIMIT && (dev->dev_intmask & INT_ErrorInterrupts))
24679 +       {
24680 +           DISABLE_INT_MASK (dev, INT_ErrorInterrupts);
24681 +           
24682 +           schedule_timer_fn (&dev->dev_error_timeoutid, enable_elan_errors, (void *) dev, ERROR_DISABLE_PERIOD);
24683 +       }
24684 +    }
24685 +    else
24686 +    {
24687 +       dev->dev_error_time        = (lbolt/ERROR_SAMPLE_PERIOD);
24688 +       dev->dev_errors_per_period = 0;
24689 +    }
24690 +}
24691 +
24692 +static __inline__ int
24693 +handle_mainints (ELAN4_DEV *dev, int nticks, int nintr)
24694 +{
24695 +    E4_uint32 nfptr = dev->dev_interruptq_nfptr;
24696 +    E4_uint32 bptr  = read_reg32 (dev, MainIntQueuePtrs.s.Back);
24697 +    E4_uint32 qsize = E4_QueueSize(elan4_interruptq_size);
24698 +    E4_uint32 qmask = qsize - 1;
24699 +    long      tlim  = lbolt + nticks;
24700 +    int       done = 0;
24701 +    unsigned long flags;
24702 +
24703 +    do {
24704 +       int todo  = ((bptr - nfptr) & qmask) / E4_MainIntEntrySize;
24705 +
24706 +       ASSERT (todo > 0);
24707 +
24708 +       PRINTF4 (DBG_DEVICE, DBG_MAININT, "handle_mainints: fptr %x nfptr %x bptr %x : %d todo\n", 
24709 +                read_reg32 (dev, MainIntQueuePtrs.s.Front), nfptr, bptr, todo);
24710 +
24711 +       if (nintr >= 0 && (done + todo) > nintr)                /* punt because too may to do in interrupt */
24712 +       {
24713 +           PRINTF4 (DBG_DEVICE, DBG_MAININT, "handle_mainints: punting (done %d todo %d) (bptr %x fptr %x)\n",
24714 +                    done, todo, bptr, read_reg32 (dev, MainIntQueuePtrs.s.Front));
24715 +
24716 +           return 1;
24717 +       }
24718 +
24719 +       BucketDevStat (dev, s_mainints, todo, MainIntBuckets);
24720 +
24721 +       /* consume all the entries in the queue which we think are there */
24722 +       do {
24723 +           E4_uint64   value = elan4_sdram_readq (dev, nfptr);
24724 +           ELAN4_CTXT *ctxt  = elan4_localctxt (dev, E4_MAIN_INT_CTX (value));
24725 +           E4_uint32   fptr  = nfptr;
24726 +
24727 +           PRINTF2 (DBG_DEVICE, DBG_MAININT, "handle_mainints: process cookie %llx - write fptr=%x\n", value, nfptr);
24728 +
24729 +           if (ctxt == NULL)
24730 +               PRINTF1 (DBG_DEVICE, DBG_INTR, "handle_mainints: context %d invalid\n", E4_MAIN_INT_CTX (value));
24731 +           else
24732 +               ctxt->ctxt_ops->op_interrupt (ctxt, E4_MAIN_INT_COOKIE(value));
24733 +
24734 +           /* compute the next queue front pointer, before updating the front pointer
24735 +            * since we need to ensure that elan4_queue_mainintop doesn't see the queue
24736 +            * as being empty if an extra interrupt is queued in between */
24737 +           dev->dev_interruptq_nfptr = nfptr = (nfptr & ~qmask) | ((nfptr + sizeof (E4_uint64)) & qmask);
24738 +    
24739 +           /* update the queue front pointer, doing this will clear the
24740 +            * interrupt for *all* interrupt cookies which have previously 
24741 +            * been added to the queue */
24742 +           write_reg32 (dev, MainIntQueuePtrs.s.Front, E4_QueueFrontValue (fptr, elan4_interruptq_size));
24743 +           pioflush_reg (dev);
24744 +       } while (bptr != nfptr);
24745 +       
24746 +       /* re-sample the back pointer and if it's different from the previous
24747 +        * queue front pointer, then the queue has something on it again */
24748 +       done += todo;
24749 +       
24750 +       if ((nticks > 0 && ((int) (lbolt - tlim)) > 0))         /* been executing for too long in thread */
24751 +           return 1;
24752 +
24753 +       bptr = read_reg32 (dev, MainIntQueuePtrs.s.Back);
24754 +
24755 +       PRINTF3 (DBG_DEVICE, DBG_MAININT, "handle_mainints: resample : fptr %x nfptr %x bptr %x\n", 
24756 +                read_reg32 (dev, MainIntQueuePtrs.s.Front), nfptr, bptr);
24757 +
24758 +       /* at this point we've made some space in the interrupt queue,
24759 +        * so check to see if we've got anything to restart */
24760 +       spin_lock_irqsave (&dev->dev_mainint_lock, flags);
24761 +       while (! list_empty (&dev->dev_interruptq_list))
24762 +       {
24763 +           ELAN4_INTOP *op = list_entry (dev->dev_interruptq_list.next, ELAN4_INTOP, op_link);
24764 +           
24765 +           list_del (&op->op_link);
24766 +
24767 +           op->op_function (dev, op->op_arg);
24768 +       }
24769 +       spin_unlock_irqrestore (&dev->dev_mainint_lock, flags);
24770 +
24771 +    } while (bptr != nfptr);
24772 +
24773 +    return 0;
24774 +}
24775 +
24776 +static void
24777 +elan4_mainint_thread (ELAN4_DEV *dev)
24778 +{
24779 +    unsigned long flags;
24780 +
24781 +    kernel_thread_init ("elan4_mainint");
24782 +    
24783 +    spin_lock_irqsave (&dev->dev_mainint_lock, flags);
24784 +    for (;;)
24785 +    {
24786 +       if (dev->dev_stop_threads)
24787 +           break;
24788 +       
24789 +       if (! (dev->dev_intmask & INT_MainInterrupt))
24790 +       {
24791 +           spin_unlock_irqrestore (&dev->dev_mainint_lock, flags);
24792 +           
24793 +           if (handle_mainints (dev, elan4_mainint_resched_ticks, -1))
24794 +               BumpDevStat (dev, s_mainint_rescheds);
24795 +
24796 +           spin_lock_irqsave (&dev->dev_mainint_lock, flags);
24797 +           ENABLE_INT_MASK (dev, INT_MainInterrupt);
24798 +       }
24799 +       
24800 +       kcondvar_wait (&dev->dev_mainint_wait, &dev->dev_mainint_lock, &flags);
24801 +    }
24802 +
24803 +    dev->dev_mainint_stopped = 1;
24804 +    kcondvar_wakeupall (&dev->dev_mainint_wait, &dev->dev_mainint_lock);
24805 +
24806 +    spin_unlock_irqrestore (&dev->dev_mainint_lock, flags);
24807 +
24808 +    kernel_thread_exit();
24809 +}
24810 +
24811 +void
24812 +elan4_queue_mainintop (ELAN4_DEV *dev, ELAN4_INTOP *op)
24813 +{
24814 +    unsigned long flags;
24815 +
24816 +    spin_lock_irqsave (&dev->dev_mainint_lock, flags);
24817 +    if (dev->dev_interruptq_nfptr == read_reg32 (dev, MainIntQueuePtrs.s.Back))
24818 +       op->op_function (dev, op->op_arg);
24819 +    else
24820 +       list_add_tail (&op->op_link, &dev->dev_interruptq_list);
24821 +    spin_unlock_irqrestore (&dev->dev_mainint_lock, flags);
24822 +}
24823 +
24824 +static __inline__ E4_uint32
24825 +handle_cproc_trap (ELAN4_DEV *dev)
24826 +{
24827 +    E4_uint32   cqptr   = read_reg32 (dev, CommandControl.CommandQueueDescsBase) & E4_QueueDescPtrMask;
24828 +    unsigned    cqnum   = ((cqptr - dev->dev_cqaddr) / sizeof (E4_CommandQueueDesc));
24829 +    sdramaddr_t cqdesc  = dev->dev_cqaddr + (cqnum * sizeof (E4_CommandQueueDesc));
24830 +    E4_uint64   control = elan4_sdram_readq (dev, cqdesc + offsetof (E4_CommandQueueDesc, CQ_Control));
24831 +    E4_uint64   status  = read_reg64 (dev, CProcStatus);
24832 +    ELAN4_CTXT *ctxt    = elan4_localctxt (dev, CQ_Context (control));
24833 +
24834 +    PRINTF4 (DBG_DEVICE, DBG_INTR, "handle_cproc_trap: cqnum=%d status=%016llx control=%016llx TrapType\n", 
24835 +            cqnum, status, control, CPROC_TrapType (status));
24836 +    PRINTF4 (DBG_DEVICE, DBG_INTR, "                   %016llx %016llx %016llx %016llx\n",
24837 +            elan4_sdram_readq (dev, cqdesc + offsetof (E4_CommandQueueDesc, CQ_QueuePtrs)),
24838 +            elan4_sdram_readq (dev, cqdesc + offsetof (E4_CommandQueueDesc, CQ_HoldingValue)),
24839 +            elan4_sdram_readq (dev, cqdesc + offsetof (E4_CommandQueueDesc, CQ_AckBuffers)),
24840 +            elan4_sdram_readq (dev, cqdesc + offsetof (E4_CommandQueueDesc, CQ_Control)));
24841 +
24842 +    BumpDevStat (dev, s_cproc_traps);
24843 +
24844 +    ctxt->ctxt_ops->op_cproc_trap (ctxt, status, cqnum);
24845 +
24846 +    return (CPROC_TrapType (status) == CommandProcWaitTrap ? SCH_RestartCProc | SCH_RestartEProc : SCH_RestartCProc);
24847 +}
24848 +
24849 +static __inline__ E4_uint32
24850 +handle_dproc_trap (ELAN4_DEV *dev, int unit)
24851 +{
24852 +    E4_uint64   status  = (unit == 0) ? read_reg64 (dev, DProc0Status) : read_reg64 (dev, DProc1Status);
24853 +    E4_uint32   restart = (unit == 0) ? SCH_RestartDma0Proc : SCH_RestartDma1Proc;
24854 +    ELAN4_CTXT *ctxt    = elan4_localctxt (dev, DPROC_Context (status));
24855 +    
24856 +    PRINTF3 (DBG_DEVICE, DBG_INTR, "handle_dproc_trap: unit %d context %d%s\n", unit, DPROC_Context(status),
24857 +            DPROC_PrefetcherFault(status) ? " (prefetcher)" : "");
24858 +
24859 +    if (DPROC_PrefetcherFault (status))
24860 +       restart |= SCH_RestartDmaPrefetchProc;
24861 +                     
24862 +    BumpDevStat (dev, s_dproc_traps);
24863 +
24864 +    ctxt->ctxt_ops->op_dproc_trap (ctxt, status, unit);
24865 +
24866 +    return (restart);
24867 +}
24868 +
24869 +static __inline__ E4_uint32
24870 +handle_eproc_trap (ELAN4_DEV *dev)
24871 +{
24872 +    E4_uint64   status = read_reg64 (dev, EProcStatus);
24873 +    ELAN4_CTXT *ctxt   = elan4_localctxt (dev, EPROC_Context (status));
24874 +
24875 +    BumpDevStat (dev, s_eproc_traps);
24876 +
24877 +    ctxt->ctxt_ops->op_eproc_trap (ctxt, status);
24878 +
24879 +    return (SCH_RestartEProc);
24880 +}
24881 +
24882 +static __inline__ E4_uint32
24883 +handle_tproc_trap (ELAN4_DEV *dev)
24884 +{
24885 +    E4_uint64   status = read_reg64 (dev, TProcStatus);
24886 +    ELAN4_CTXT *ctxt   = elan4_localctxt (dev, TPROC_Context (status));
24887 +
24888 +    BumpDevStat (dev, s_tproc_traps);
24889 +
24890 +    ctxt->ctxt_ops->op_tproc_trap (ctxt, status);
24891 +    
24892 +    return (SCH_RestartTProc);
24893 +}
24894 +
24895 +static __inline__ void
24896 +handle_haltints (ELAN4_DEV *dev, E4_uint32 intreg)
24897 +{
24898 +    struct list_head  list   = LIST_HEAD_INIT(list);
24899 +    E4_uint32         mask   = 0;
24900 +    E4_uint32         active = 0;
24901 +    struct list_head *entry;
24902 +    struct list_head *next;
24903 +    unsigned long     flags;
24904 +
24905 +    BumpDevStat (dev, s_haltints);
24906 +
24907 +    spin_lock_irqsave (&dev->dev_haltop_lock, flags);
24908 +
24909 +    list_for_each_safe (entry, next, &dev->dev_haltop_list) {
24910 +       ELAN4_HALTOP *op = list_entry (entry, ELAN4_HALTOP, op_link);
24911 +
24912 +       PRINTF (DBG_DEVICE, DBG_INTR, "handle_haltints: op=%p op_mask=%x intreg=%x\n", op, op->op_mask, intreg);
24913 +
24914 +       if ((op->op_mask & intreg) != op->op_mask)
24915 +           mask |= op->op_mask;
24916 +       else
24917 +       {
24918 +           list_del (&op->op_link);                            /* remove from list */
24919 +           list_add_tail (&op->op_link, &list);                /* add to local list */
24920 +
24921 +           active |= op->op_mask;
24922 +       }
24923 +    }
24924 +
24925 +    ASSERT (dev->dev_haltop_mask == (mask | active));
24926 +
24927 +    dev->dev_haltop_mask = mask;
24928 +
24929 +    if (list_empty (&list))
24930 +       elan4_set_schedstatus (dev, intreg);
24931 +    else
24932 +    {
24933 +       dev->dev_haltop_active = active;
24934 +       spin_unlock_irqrestore (&dev->dev_haltop_lock, flags);
24935 +
24936 +       while (! list_empty (&list)) 
24937 +       {
24938 +           ELAN4_HALTOP *op = list_entry (list.next, ELAN4_HALTOP, op_link);
24939 +           
24940 +           list_del (&op->op_link);
24941 +
24942 +           (*op->op_function) (dev, op->op_arg);
24943 +       }
24944 +
24945 +       spin_lock_irqsave (&dev->dev_haltop_lock, flags);
24946 +       dev->dev_haltop_active = 0;
24947 +
24948 +       elan4_set_schedstatus (dev, 0);
24949 +    }
24950 +
24951 +    spin_unlock_irqrestore (&dev->dev_haltop_lock, flags);
24952 +}
24953 +
24954 +static __inline__ E4_uint32
24955 +handle_iproc_trap (ELAN4_DEV *dev, unsigned unit)
24956 +{
24957 +    sdramaddr_t hdroff = dev->dev_inputtraparea + offsetof (E4_IprocTrapState, TrHeader[0][unit]);
24958 +    E4_uint64   status = elan4_sdram_readq (dev, hdroff + offsetof (E4_IprocTrapHeader, IProcStatusCntxAndTrType));
24959 +    E4_uint32   filter = elan4_read_filter (dev, IPROC_NetworkContext (status));
24960 +    ELAN4_CTXT *ctxt   = elan4_localctxt (dev, filter & E4_FILTER_CONTEXT_MASK);
24961 +
24962 +    /*
24963 +     * The context is not valid in the following case :
24964 +     *     ack not been sent AND bad CRC/bad length.
24965 +     *
24966 +     *  NOTE TransCRCStatus and BadLength only valid if NOT an EopTrap.
24967 +     */
24968 +    ASSERT ((IPROC_GoodAckSent (status) & (1 << IPROC_InputterChan (status))) || IPROC_EOPTrap (status) ||
24969 +           (IPROC_TransCRCStatus (status) == CRC_STATUS_GOOD && !IPROC_BadLength (status)));
24970 +    
24971 +    BumpDevStat (dev, s_iproc_traps);
24972 +
24973 +    ctxt->ctxt_ops->op_iproc_trap (ctxt, status, unit);
24974 +
24975 +    return (SCH_RestartCh0LowPriInput << unit);
24976 +}
24977 +
24978 +void
24979 +handle_pcimemerr (ELAN4_DEV *dev)
24980 +{
24981 +    elan4_pcierror (dev);
24982 +
24983 +    check_error_rate (dev);
24984 +}
24985 +
24986 +void
24987 +handle_sdramint (ELAN4_DEV *dev)
24988 +{
24989 +    E4_uint64 status = read_reg64 (dev, SDRamECCStatus);
24990 +    char      errstr[200];
24991 +    int              i;
24992 +    int              Found = 0;
24993 +
24994 +    PRINTF0 (DBG_DEVICE, DBG_INTR, "handle_sdramint\n");
24995 +
24996 +    /* search for this error already being logged */
24997 +    for (i = sizeof (dev->dev_sdramerrs)/sizeof (dev->dev_sdramerrs[0]) - 1; i >= 0; i--)
24998 +        if ((dev->dev_sdramerrs[i] & 0x000fffffffffffffULL) == status)
24999 +       {
25000 +            Found = 1;
25001 +           dev->dev_sdramerrs[i] += 10000000000000ULL; // Keep a count.
25002 +           break;
25003 +       }
25004 +
25005 +    /* stash the status for /proc */
25006 +    if (!Found)
25007 +    {
25008 +       for (i = sizeof (dev->dev_sdramerrs)/sizeof (dev->dev_sdramerrs[0]) - 1; i > 0; i--)
25009 +           dev->dev_sdramerrs[i] = dev->dev_sdramerrs[i-1];
25010 +       dev->dev_sdramerrs[0] = status;
25011 +    }
25012 +
25013 +    printk ("elan%d: ECC Error %s\n", dev->dev_instance, elan4_sdramerr2str (dev, status, errstr));
25014 +
25015 +    if (!ECC_UncorrectableErr(status) && !ECC_MultUncorrectErrs(status))
25016 +       printk ("elan%d: ECC error data=%016llx\n", dev->dev_instance, elan4_sdram_readq (dev, ECC_Addr(status)));
25017 +
25018 +    if (ECC_CorrectableErr (status))
25019 +       BumpDevStat (dev, s_correctable_errors);
25020 +    if (ECC_MultCorrectErrs (status))
25021 +       BumpDevStat (dev, s_multiple_errors);
25022 +
25023 +    if (ECC_UncorrectableErr(status))
25024 +       panic ("elan%d: uncorrectable ECC error\n", dev->dev_instance);
25025 +    if (ECC_MultUncorrectErrs(status))
25026 +       panic ("elan%d: muliple uncorrectable ECC error\n", dev->dev_instance);
25027 +    
25028 +    PULSE_SYSCONTROL (dev, CONT_CLEAR_SDRAM_ERROR);
25029 +
25030 +    check_error_rate (dev);
25031 +}
25032 +
25033 +static void
25034 +clear_linkerr_led (void *arg)
25035 +{
25036 +    ELAN4_DEV *dev = (ELAN4_DEV *) arg;
25037 +
25038 +    write_i2c (dev, I2cStatus, read_i2c (dev, I2cStatus) | I2cCntl_ClearLinkError);
25039 +}
25040 +
25041 +void
25042 +handle_linkerror (ELAN4_DEV *dev)
25043 +{
25044 +    E4_uint32 LinkState;
25045 +    E4_uint32 CurrState = read_reg32 (dev, LinkControlReg);
25046 +
25047 +    /* Set for reading errors. */
25048 +    write_reg32 (dev, LinkControlReg,
25049 +                 (CurrState = CurrState & ~((LCONT_TEST_CONTROL_MASK << LCONT_TEST_CONTROL_SHIFT) |
25050 +                                           (LCONT_TEST_VALUE_MASK << LCONT_TEST_VALUE_SHIFT))));
25051 +    LinkState = LCONT_LINK_STATE(CurrState = read_reg32 (dev, LinkControlReg));
25052 +
25053 +#ifdef DEBUG
25054 +    {
25055 +       E4_uint8 ErrorMsg[256], DataErrorVal[64];
25056 +
25057 +       strcpy (ErrorMsg, "handle_linkerror:");
25058 +       if (LinkState & LS_LockError)    strcat (ErrorMsg, " LockError");
25059 +       if (LinkState & LS_DeskewError)  strcat (ErrorMsg, " DeskewError");
25060 +       if (LinkState & LS_PhaseError)   strcat (ErrorMsg, " PhaseError");
25061 +       if (LinkState & LS_DataError)
25062 +       {
25063 +           E4_uint32 error[4];
25064 +           E4_uint32 i;
25065 +           strcat (ErrorMsg, " DataError");
25066 +           /* Errors */
25067 +           for(i = LRS_ErrorVal8to0; i <= LRS_ErrorVal35to27; i++)
25068 +           {
25069 +               write_reg32 (dev, LinkControlReg,
25070 +                            CurrState | LCONT_TEST_VALUE(i) | (LCONT_READ_STATE << LCONT_TEST_CONTROL_SHIFT));
25071 +               error[i - LRS_ErrorVal8to0] = LCONT_LINK_STATE(read_reg32 (dev, LinkControlReg));
25072 +           }
25073 +           sprintf (DataErrorVal, " Link State Error Val: %09llx %03x %03x %03x %03x", 
25074 +                    (unsigned long long) ((error[0] & 0x1ffUL) | ((error[1] & 0x1ffUL) << 9)  |
25075 +                                 ((error[2] & 0x1ffUL) << 18) | ((error[3] & 0x1ffUL) << 27)),
25076 +                    error[3], error[2], error[1], error[0]);
25077 +           strcat (ErrorMsg, DataErrorVal);
25078 +       }
25079 +       if (LinkState & LS_FifoOvFlow0)  strcat (ErrorMsg, " FifoOvFlow0");
25080 +       if (LinkState & LS_FifoOvFlow1)  strcat (ErrorMsg, " FifoOvFlow1");
25081 +       if (LinkState & LS_Mod45Changed)         strcat (ErrorMsg, " Mod45Changed");
25082 +       if (LinkState & LS_PAckNotSeenError) strcat (ErrorMsg, " PAckNotSeenError");
25083 +       strcat (ErrorMsg, "\n");
25084 +       PRINTF0 (DBG_DEVICE, DBG_INTR, ErrorMsg);
25085 +    }
25086 +#endif
25087 +
25088 +    BumpDevStat (dev, s_link_errors);
25089 +    
25090 +    if (LinkState & LS_LockError)       BumpDevStat (dev, s_lock_errors);
25091 +    if (LinkState & LS_DeskewError)     BumpDevStat (dev, s_deskew_errors);
25092 +    if (LinkState & LS_PhaseError)      BumpDevStat (dev, s_phase_errors);
25093 +    if (LinkState & LS_DataError)       BumpDevStat (dev, s_data_errors);
25094 +    if (LinkState & LS_FifoOvFlow0)     BumpDevStat (dev, s_fifo_overflow0);
25095 +    if (LinkState & LS_FifoOvFlow1)     BumpDevStat (dev, s_fifo_overflow1);
25096 +    if (LinkState & LS_Mod45Changed)    BumpDevStat (dev, s_mod45changed);
25097 +    if (LinkState & LS_PAckNotSeenError) BumpDevStat (dev, s_pack_not_seen);
25098 +
25099 +    PULSE_SCHED_RESTART (dev, SCH_ClearLinkErrorInt);
25100 +    
25101 +    /* schedule a timer to clear the link error LED, so that it stays on 
25102 +     * for a second for every link error that occurs */
25103 +    if (dev->dev_devinfo.dev_revision_id != PCI_REVISION_ID_ELAN4_REVA && !timer_fn_queued (&dev->dev_linkerr_timeoutid))
25104 +       schedule_timer_fn (&dev->dev_linkerr_timeoutid, clear_linkerr_led, (void *) dev, HZ);
25105 +      
25106 +    check_error_rate (dev);
25107 +}
25108 +
25109 +void
25110 +handle_linkportkeyfail (ELAN4_DEV *dev)
25111 +{
25112 +    PRINTF0 (DBG_DEVICE, DBG_INTR, "handle_linkportkeyfail\n");
25113 +
25114 +    BumpDevStat (dev, s_linkport_keyfail);
25115 +
25116 +    PULSE_SYSCONTROL (dev, CONT_CLEAR_LINKPORT_INT);
25117 +    
25118 +    check_error_rate (dev);
25119 +}
25120 +
25121 +
25122 +static __inline__ void
25123 +__elan4_4msi0 (ELAN4_DEV *dev, E4_uint32 intreg, E4_uint32 intmask)
25124 +{
25125 +    unsigned long flags;
25126 +
25127 +    if (intreg & intmask & INT_MainInterrupt)
25128 +    {
25129 +       DISABLE_INT_MASK (dev, INT_MainInterrupt);
25130 +
25131 +       if (handle_mainints (dev, -1, elan4_mainint_punt_loops) == 0)
25132 +           ENABLE_INT_MASK (dev, INT_MainInterrupt);
25133 +       else
25134 +       {
25135 +           BumpDevStat (dev, s_mainint_punts);
25136 +           
25137 +           spin_lock_irqsave (&dev->dev_mainint_lock, flags);
25138 +           kcondvar_wakeupone (&dev->dev_mainint_wait, &dev->dev_mainint_lock);
25139 +           spin_unlock_irqrestore (&dev->dev_mainint_lock, flags);
25140 +       }
25141 +    }
25142 +}
25143 +
25144 +static __inline__ void
25145 +__elan4_4msi1 (ELAN4_DEV *dev, E4_uint32 intreg, E4_uint32 intmask)
25146 +{
25147 +    E4_uint32 restart = 0;
25148 +
25149 +    PRINTF1 (DBG_DEVICE, DBG_INTR, "__elan4_4msi1: %x\n", intreg);
25150 +    
25151 +    spin_lock (&dev->dev_trap_lock);
25152 +    
25153 +    if (intreg & intmask & INT_CProc)
25154 +       restart |= handle_cproc_trap (dev);
25155 +    if (intreg & intmask & INT_EProc) 
25156 +       restart |= handle_eproc_trap (dev);
25157 +    if (intreg & intmask & INT_Dma0Proc) 
25158 +       restart |= handle_dproc_trap (dev, 0);
25159 +    if (intreg & intmask & INT_Dma1Proc) 
25160 +       restart |= handle_dproc_trap (dev, 1);
25161 +    if (intreg & intmask & INT_TProc)
25162 +       restart |= handle_tproc_trap (dev);
25163 +    
25164 +    PULSE_SCHED_RESTART (dev, restart);
25165 +    
25166 +    spin_unlock (&dev->dev_trap_lock);
25167 +    
25168 +    if (intreg & (INT_Halted|INT_Discarding))
25169 +       handle_haltints (dev, intreg);
25170 +}
25171 +
25172 +static __inline__ void
25173 +__elan4_4msi2 (ELAN4_DEV *dev, E4_uint32 intreg, E4_uint32 intmask)
25174 +{
25175 +    E4_uint32 restart = 0;
25176 +
25177 +    PRINTF1 (DBG_DEVICE, DBG_INTR, "__elan4_4msi2: %x\n", intreg);
25178 +    
25179 +    spin_lock (&dev->dev_trap_lock);
25180 +    if (intreg & intmask & INT_IProcCh0LowPri)
25181 +       restart |= handle_iproc_trap (dev, 0);
25182 +    
25183 +    if (intreg & intmask & INT_IProcCh1LowPri)
25184 +       restart |= handle_iproc_trap (dev, 1);
25185 +    
25186 +    if (intreg & intmask & INT_IProcCh0HighPri)
25187 +       restart |= handle_iproc_trap (dev, 2);
25188 +    
25189 +    if (intreg & intmask & INT_IProcCh1HighPri)
25190 +       restart |= handle_iproc_trap (dev, 3);
25191 +    
25192 +    PULSE_SCHED_RESTART (dev, restart);
25193 +    
25194 +    spin_unlock (&dev->dev_trap_lock);
25195 +}
25196 +
25197 +static __inline__ void
25198 +__elan4_4msi3 (ELAN4_DEV *dev, E4_uint32 intreg, E4_uint32 intmask)
25199 +{
25200 +    PRINTF1 (DBG_DEVICE, DBG_INTR, "__elan4_4msi3: %x\n", intreg);
25201 +    
25202 +    if (intreg & intmask & INT_PciMemErr)
25203 +       handle_pcimemerr (dev);
25204 +    
25205 +    if (intreg & intmask & INT_SDRamInt)
25206 +       handle_sdramint (dev);
25207 +    
25208 +    if (intreg & intmask & INT_LinkError)
25209 +       handle_linkerror (dev);
25210 +    
25211 +    if (intreg & intmask & INT_LinkPortKeyFail)
25212 +       handle_linkportkeyfail (dev);
25213 +}
25214 +
25215 +int
25216 +elan4_1msi0 (ELAN4_DEV *dev)
25217 +{
25218 +    E4_uint32 intmask = dev->dev_intmask;
25219 +    E4_uint32 intreg;
25220 +
25221 +    if (intmask == 0 || ((intreg = read_reg32 (dev, InterruptReg)) & intmask) == 0)
25222 +       return (0);
25223 +
25224 +    BumpDevStat (dev, s_interrupts);
25225 +    
25226 +    do {
25227 +       PRINTF1 (DBG_DEVICE, DBG_INTR, "elan4_1msi0: %x\n", intreg);
25228 +
25229 +       if (intreg & intmask & INT_MSI0)
25230 +           __elan4_4msi0(dev, intreg, intmask);
25231 +       if (intreg & intmask & INT_MSI1)
25232 +           __elan4_4msi1(dev, intreg, intmask);
25233 +       if (intreg & intmask & INT_MSI2)
25234 +           __elan4_4msi2(dev, intreg, intmask); 
25235 +       if (intreg & intmask & INT_MSI3)
25236 +           __elan4_4msi3(dev, intreg, intmask);
25237 +
25238 +       /* must ensure that the read of the interrupt mask
25239 +        * completes before the read of the interrupt register
25240 +        * since the main interrupt thread clears it's interrupt
25241 +        * and then re-enables it in the interrupt mask. */
25242 +       intmask = dev->dev_intmask;
25243 +       mb();
25244 +       intreg = read_reg32 (dev, InterruptReg);
25245 +
25246 +    } while ((intreg & intmask) != 0);
25247 +
25248 +    return (1);
25249 +}
25250 +
25251 +/* local context management */
25252 +int
25253 +elan4_insertctxt (ELAN4_DEV *dev, ELAN4_CTXT *ctxt, ELAN4_TRAP_OPS *ops)
25254 +{
25255 +    unsigned long flags;
25256 +    int tbl;
25257 +
25258 +    ctxt->ctxt_dev = dev;
25259 +    ctxt->ctxt_ops = ops;
25260 +
25261 +    INIT_LIST_HEAD (&ctxt->ctxt_cqalist);
25262 +    spin_lock_init (&ctxt->ctxt_mmulock);
25263 +
25264 +    for (tbl = 0; tbl < NUM_HASH_TABLES; tbl++)
25265 +    {
25266 +       KMEM_ZALLOC (ctxt->ctxt_mmuhash[tbl], ELAN4_HASH_ENTRY **,  dev->dev_hashsize[tbl] * sizeof (ELAN4_HASH_ENTRY *), 1);
25267 +       
25268 +       if (ctxt->ctxt_mmuhash[tbl] == NULL)
25269 +       {
25270 +           if (tbl != 0)
25271 +               KMEM_FREE (ctxt->ctxt_mmuhash[0], dev->dev_hashsize[0] * sizeof (ELAN4_HASH_ENTRY *));
25272 +           spin_lock_destroy (&ctxt->ctxt_mmulock);
25273 +           return (-ENOMEM);
25274 +       }
25275 +    }
25276 +
25277 +    spin_lock_irqsave (&dev->dev_ctxt_lock, flags);
25278 +
25279 +    if ((ctxt->ctxt_num = bt_freebit (dev->dev_ctxmap, (1 << dev->dev_ctxtableshift))) >= 0)
25280 +    {
25281 +       /* chain onto the lists of all contexts */
25282 +       list_add (&ctxt->ctxt_link, &dev->dev_ctxt_list);
25283 +
25284 +       BT_SET (dev->dev_ctxmap, ctxt->ctxt_num);
25285 +    }
25286 +    
25287 +    spin_unlock_irqrestore (&dev->dev_ctxt_lock, flags);
25288 +
25289 +    return (ctxt->ctxt_num < 0 ? -ENOMEM : 0);
25290 +}
25291 +
25292 +void
25293 +elan4_removectxt (ELAN4_DEV *dev, ELAN4_CTXT *ctxt)
25294 +{
25295 +    unsigned long flags;
25296 +    int tbl;
25297 +
25298 +    /* remove from list of contexts */
25299 +    spin_lock_irqsave (&dev->dev_ctxt_lock, flags);
25300 +
25301 +    list_del (&ctxt->ctxt_link);
25302 +
25303 +    BT_CLEAR (dev->dev_ctxmap, ctxt->ctxt_num);
25304 +
25305 +    spin_unlock_irqrestore (&dev->dev_ctxt_lock, flags);
25306 +
25307 +    spin_lock_destroy (&ctxt->ctxt_info_lock);
25308 +
25309 +    for (tbl = 0; tbl < NUM_HASH_TABLES; tbl++)
25310 +       KMEM_FREE (ctxt->ctxt_mmuhash[tbl],  dev->dev_hashsize[tbl] * sizeof (ELAN4_HASH_ENTRY *));
25311 +
25312 +    spin_lock_destroy (&ctxt->ctxt_mmulock);
25313 +}
25314 +
25315 +ELAN4_CTXT *
25316 +elan4_localctxt (ELAN4_DEV *dev, unsigned num)
25317 +{
25318 +    struct list_head *entry;
25319 +    unsigned long flags;
25320 +
25321 +    spin_lock_irqsave (&dev->dev_ctxt_lock, flags);
25322 +
25323 +    list_for_each (entry, &dev->dev_ctxt_list) {
25324 +       ELAN4_CTXT *ctxt = list_entry (entry, ELAN4_CTXT, ctxt_link);
25325 +       
25326 +       if (ctxt->ctxt_num == num)
25327 +       {
25328 +           spin_unlock_irqrestore (&dev->dev_ctxt_lock, flags);
25329 +           return (ctxt);
25330 +       }
25331 +    }
25332 +    spin_unlock_irqrestore (&dev->dev_ctxt_lock, flags);
25333 +
25334 +    return ((ELAN4_CTXT *) NULL);
25335 +}
25336 +
25337 +ELAN4_CTXT *
25338 +elan4_networkctxt (ELAN4_DEV *dev, unsigned num)
25339 +{
25340 +    E4_uint32 filter = elan4_read_filter (dev, num);
25341 +    
25342 +    if ((filter & E4_FILTER_CONTEXT_MASK) == INVALID_CONTEXT)
25343 +       return NULL;
25344 +    else
25345 +       return elan4_localctxt (dev, filter & E4_FILTER_CONTEXT_MASK);
25346 +}
25347 +
25348 +/* network context management */
25349 +int
25350 +elan4_attach_filter (ELAN4_CTXT *ctxt, unsigned int ctxnum)
25351 +{
25352 +    ELAN4_DEV        *dev = ctxt->ctxt_dev;
25353 +    int               res = 0;
25354 +    E4_uint32         filter;
25355 +    unsigned long     flags;
25356 +    
25357 +    spin_lock_irqsave (&dev->dev_ctxt_lock, flags);
25358 +    
25359 +    filter = elan4_read_filter (dev, ctxnum);
25360 +    if ((filter & E4_FILTER_CONTEXT_MASK) != INVALID_CONTEXT)
25361 +    {
25362 +       PRINTF2 (ctxt, DBG_NETWORK_CTX, "elan4_attach_filter: ctx=%d filter=%x -> EBUSY\n", ctxnum, filter);
25363 +       res = -EBUSY;
25364 +    }
25365 +    else
25366 +    {
25367 +       PRINTF1 (ctxt, DBG_NETWORK_CTX, "elan4_attach_filter: ctx=%d - SUCCESS\n", ctxnum);
25368 +
25369 +       elan4_write_filter (dev, ctxnum, ctxt->ctxt_num | E4_FILTER_DISCARD_ALL);
25370 +       PULSE_SCHED_RESTART (dev, SCH_ContextFilterFlush);
25371 +    }
25372 +    spin_unlock_irqrestore (&dev->dev_ctxt_lock, flags);
25373 +    
25374 +    return (res);
25375 +}
25376 +
25377 +void
25378 +elan4_detach_filter (ELAN4_CTXT *ctxt, unsigned int ctxnum)
25379 +{
25380 +    ELAN4_DEV *dev = ctxt->ctxt_dev;
25381 +
25382 +    PRINTF1 (ctxt, DBG_NETWORK_CTX, "elan4_detach_filter: detach from network context %d\n", ctxnum);
25383 +           
25384 +    elan4_write_filter (dev, ctxnum, INVALID_CONTEXT | E4_FILTER_DISCARD_ALL);
25385 +    PULSE_SCHED_RESTART (dev, SCH_ContextFilterFlush);
25386 +}
25387 +
25388 +void
25389 +elan4_set_filter (ELAN4_CTXT *ctxt, unsigned int ctxnum, E4_uint32 state)
25390 +{
25391 +    ELAN4_DEV *dev = ctxt->ctxt_dev;
25392 +
25393 +    PRINTF6 (ctxt, DBG_NETWORK_CTX, "elan4_set_filter: set filter state %x for network context %d <%s%s%s%s>\n", state, ctxnum,
25394 +            (state & E4_FILTER_DISCARD_ALL) ? "discard,"  : "",
25395 +            (state & E4_FILTER_ACKOK_ALL)   ? "ack-ok,"   : "",
25396 +            (state & E4_FILTER_HIGH_PRI)    ? "high-pri," : "",
25397 +            (state & E4_FILTER_STATS)       ? "stats,"    : "");
25398 +           
25399 +    elan4_write_filter (dev, ctxnum, ctxt->ctxt_num | state);
25400 +    PULSE_SCHED_RESTART (dev, SCH_ContextFilterFlush);
25401 +}
25402 +
25403 +void
25404 +elan4_set_routetable (ELAN4_CTXT *ctxt, ELAN4_ROUTE_TABLE *tbl)
25405 +{
25406 +    ELAN4_DEV *dev   = ctxt->ctxt_dev;
25407 +    E4_uint32  value = tbl ? (E4_VPT_VALID | E4_VPT_VALUE(tbl->tbl_entries, tbl->tbl_size)) : 0;
25408 +    
25409 +    /* and insert into the vp table */
25410 +    elan4_sdram_writel (dev, (dev->dev_ctxtable + (ctxt->ctxt_num * sizeof (E4_ContextControlBlock)) +
25411 +                       offsetof (E4_ContextControlBlock, VirtualProcessTable)), value);
25412 +    pioflush_sdram(dev);
25413 +
25414 +    PULSE_SYSCONTROL (dev, CONT_ROUTE_FLUSH);
25415 +}
25416 +
25417 +/* command queue management */
25418 +ELAN4_CQA *
25419 +elan4_getcqa (ELAN4_CTXT *ctxt, unsigned int idx)
25420 +{
25421 +    ELAN4_DEV *dev = ctxt->ctxt_dev;
25422 +    struct list_head *el;
25423 +
25424 +    spin_lock (&dev->dev_cqlock);
25425 +    list_for_each (el, &ctxt->ctxt_cqalist) {
25426 +       ELAN4_CQA *cqa = list_entry (el, ELAN4_CQA, cqa_link);
25427 +       
25428 +       if (cqa->cqa_idx == idx)
25429 +       {
25430 +           cqa->cqa_ref++;
25431 +
25432 +           spin_unlock (&dev->dev_cqlock);
25433 +           return cqa;
25434 +       }
25435 +    }
25436 +    spin_unlock (&dev->dev_cqlock);
25437 +    return NULL;
25438 +}
25439 +
25440 +void
25441 +elan4_putcqa (ELAN4_CTXT *ctxt, unsigned int idx)
25442 +{
25443 +    ELAN4_DEV *dev = ctxt->ctxt_dev;
25444 +    struct list_head *el, *nel;
25445 +
25446 +    spin_lock (&dev->dev_cqlock);
25447 +    list_for_each_safe (el, nel, &ctxt->ctxt_cqalist) {
25448 +       ELAN4_CQA *cqa = list_entry (el, ELAN4_CQA, cqa_link);
25449 +       
25450 +       if (cqa->cqa_idx == idx)
25451 +       {
25452 +           if (--cqa->cqa_ref || bt_lowbit (cqa->cqa_bitmap, ELAN4_CQ_PER_CQA) != -1)
25453 +               spin_unlock (&dev->dev_cqlock);
25454 +           else
25455 +           {
25456 +               list_del (&cqa->cqa_link);
25457 +               
25458 +               BT_CLEAR (ctxt->ctxt_cqamap, cqa->cqa_idx);
25459 +               BT_CLEAR (dev->dev_cqamap, cqa->cqa_cqnum/ELAN4_CQ_PER_CQA);
25460 +               spin_unlock (&dev->dev_cqlock);
25461 +               
25462 +               KMEM_FREE (cqa, sizeof (ELAN4_CQA));
25463 +           }
25464 +           return;
25465 +       }
25466 +    }
25467 +    spin_unlock (&dev->dev_cqlock);
25468 +
25469 +    printk ("elan4_putcqa: idx %d not found\n", idx);
25470 +    BUG();
25471 +}
25472 +
25473 +static ELAN4_CQ *
25474 +elan4_getcq (ELAN4_CTXT *ctxt, unsigned int type)
25475 +{
25476 +    ELAN4_DEV        *dev = ctxt->ctxt_dev;
25477 +    ELAN4_CQA        *cqa;
25478 +    struct list_head *el;
25479 +    int                      cidx, didx;
25480 +
25481 +    spin_lock (&dev->dev_cqlock);
25482 +    list_for_each (el, &ctxt->ctxt_cqalist) {
25483 +       cqa = list_entry (el, ELAN4_CQA, cqa_link);
25484 +
25485 +       if (cqa->cqa_type == type && (cidx = bt_freebit (cqa->cqa_bitmap, ELAN4_CQ_PER_CQA)) >=0)
25486 +       {
25487 +           BT_SET (cqa->cqa_bitmap, cidx);
25488 +           
25489 +           spin_unlock (&dev->dev_cqlock);
25490 +           return &cqa->cqa_cq[cidx];
25491 +       }
25492 +    }
25493 +    spin_unlock (&dev->dev_cqlock);
25494 +
25495 +    /* allocate a new cqa and it's chunk of command queue descriptors */
25496 +    KMEM_ZALLOC (cqa, ELAN4_CQA *, sizeof (ELAN4_CQA), 1);
25497 +    if (cqa == NULL)
25498 +       return NULL;
25499 +
25500 +    spin_lock (&dev->dev_cqlock);
25501 +    cidx = bt_freebit (ctxt->ctxt_cqamap, ELAN4_MAX_CQA);
25502 +
25503 +    /* On architectures which have MTRR registers for write-combinig
25504 +     * the top command queues from dev->dev_cqreorder upwards are
25505 +     * used for reordered queues.  Without MTRR registers any page
25506 +     * sized group can use write combinig through the ptes. */
25507 +#ifdef CONFIG_MTRR
25508 +    if ((type & CQ_Reorder) != 0)
25509 +       didx = bt_nextbit (dev->dev_cqamap, dev->dev_cqcount, dev->dev_cqreorder - 1, 0);
25510 +    else
25511 +       didx = bt_freebit (dev->dev_cqamap, dev->dev_cqreorder);
25512 +#else
25513 +    didx = bt_freebit (dev->dev_cqamap, dev->dev_cqcount);
25514 +#endif
25515 +
25516 +    if (cidx < 0 || didx < 0)
25517 +    {
25518 +       spin_unlock (&dev->dev_cqlock);
25519 +       KMEM_FREE (cqa, sizeof (ELAN4_CQA));
25520 +       return NULL;
25521 +    }
25522 +
25523 +    BT_SET (ctxt->ctxt_cqamap, cidx);
25524 +    BT_SET (dev->dev_cqamap, didx);
25525 +
25526 +    cqa->cqa_idx   = cidx;
25527 +    cqa->cqa_type  = type;
25528 +    cqa->cqa_cqnum = (didx * ELAN4_CQ_PER_CQA);
25529 +    
25530 +    list_add_tail (&cqa->cqa_link, &ctxt->ctxt_cqalist);
25531 +    
25532 +    /* initialise the cqa struct */
25533 +    for (cidx = 0; cidx < ELAN4_CQ_PER_CQA; cidx++)
25534 +    {
25535 +       cqa->cqa_cq[cidx].cq_idx = cidx;
25536 +       cqa->cqa_cq[cidx].cq_cqa = cqa;
25537 +    }
25538 +
25539 +    /* no mappings yet */
25540 +    cqa->cqa_ref = 0;
25541 +
25542 +    /* we're going to return entry zero */
25543 +    BT_SET (cqa->cqa_bitmap, 0);
25544 +    spin_unlock (&dev->dev_cqlock);
25545 +    
25546 +    return &cqa->cqa_cq[0];
25547 +}
25548 +
25549 +static void
25550 +elan4_putcq (ELAN4_CTXT *ctxt, ELAN4_CQ *cq)
25551 +{
25552 +    ELAN4_DEV        *dev = ctxt->ctxt_dev;
25553 +    ELAN4_CQA        *cqa = cq->cq_cqa;
25554 +
25555 +    spin_lock (&dev->dev_cqlock);
25556 +
25557 +    BT_CLEAR (cqa->cqa_bitmap, cq->cq_idx);
25558 +
25559 +    if (bt_lowbit (cqa->cqa_bitmap, ELAN4_CQ_PER_CQA) != -1 || cqa->cqa_ref)
25560 +       spin_unlock (&dev->dev_cqlock);
25561 +    else
25562 +    {
25563 +       list_del (&cqa->cqa_link);
25564 +       
25565 +       BT_CLEAR (ctxt->ctxt_cqamap, cqa->cqa_idx);
25566 +       BT_CLEAR (dev->dev_cqamap, cqa->cqa_cqnum/ELAN4_CQ_PER_CQA);
25567 +       spin_unlock (&dev->dev_cqlock);
25568 +       
25569 +       KMEM_FREE (cqa, sizeof (ELAN4_CQA));
25570 +    }
25571 +}
25572 +
25573 +ELAN4_CQ *
25574 +elan4_alloccq (ELAN4_CTXT *ctxt, unsigned cqsize, unsigned perm, unsigned cqtype)
25575 +{
25576 +    ELAN4_DEV   *dev = ctxt->ctxt_dev;
25577 +    ELAN4_CQ    *cq;
25578 +    int         cqnum;
25579 +    sdramaddr_t cqdesc;
25580 +    unsigned    offset;
25581 +    E4_uint64   value;
25582 +
25583 +    if ((cq = elan4_getcq (ctxt, cqtype)) == NULL)
25584 +       return NULL;
25585 +
25586 +    cqnum = elan4_cq2num(cq);
25587 +    
25588 +    cq->cq_space = elan4_sdram_alloc (dev, CQ_Size(cqsize));
25589 +    if (cq->cq_space == (virtaddr_t) 0)
25590 +    {
25591 +       elan4_putcq (ctxt, cq);
25592 +       return (NULL);
25593 +    }
25594 +
25595 +    cq->cq_size   = cqsize;
25596 +    cq->cq_perm   = perm;
25597 +    
25598 +    /* and finally initialise the command queue descriptor */
25599 +    cqdesc = dev->dev_cqaddr + (cqnum * sizeof (E4_CommandQueueDesc));
25600 +
25601 +    value  = CQ_QueuePtrsValue (cqsize, cq->cq_space, cq->cq_space);
25602 +    if (dev->dev_devinfo.dev_revision_id == PCI_REVISION_ID_ELAN4_REVA)
25603 +       value |= ((cqtype & CQ_Priority) ? CQ_RevA_Priority : 0);
25604 +    else
25605 +       value |= (((cqtype & CQ_Priority) ? CQ_RevB_Priority : 0) | 
25606 +                 ((cqtype & CQ_Reorder)  ? CQ_RevB_ReorderingQueue : CQ_RevB_32bitWriteQueue));
25607 +
25608 +    elan4_sdram_writeq (dev, cqdesc + offsetof (E4_CommandQueueDesc, CQ_QueuePtrs), value);
25609 +    elan4_sdram_writeq (dev, cqdesc + offsetof (E4_CommandQueueDesc, CQ_HoldingValue), 0);
25610 +    elan4_sdram_writeq (dev, cqdesc + offsetof (E4_CommandQueueDesc, CQ_AckBuffers), 0);
25611 +    elan4_sdram_writeq (dev, cqdesc + offsetof (E4_CommandQueueDesc, CQ_Control), CQ_ControlValue (ctxt->ctxt_num, 2, perm));
25612 +    pioflush_sdram (dev);
25613 +
25614 +    offset = (cqnum + dev->dev_cqoffset) * CQ_CommandMappingSize;
25615 +
25616 +    cq->cq_mapping = elan4_map_device (dev, ELAN4_BAR_REGISTERS, (offset & ~(PAGE_SIZE-1)), 
25617 +                                      PAGE_SIZE, &cq->cq_handle) + (offset & (PAGE_SIZE-1));
25618 +#ifdef CONFIG_MPSAS
25619 +    if (ctxt == &dev->dev_ctxt)
25620 +       return (cq);
25621 +#endif
25622 +
25623 +    elan4_sdram_flushcache (dev, cq->cq_space, CQ_Size(cqsize));
25624 +
25625 +    return (cq);
25626 +}
25627 +    
25628 +void
25629 +elan4_freecq (ELAN4_CTXT *ctxt, ELAN4_CQ *cq)
25630 +{
25631 +    ELAN4_DEV *dev    = ctxt->ctxt_dev;
25632 +    unsigned   offset = (elan4_cq2num(cq) + dev->dev_cqoffset) * CQ_CommandMappingSize;
25633 +
25634 +    elan4_flushcq (dev, cq);
25635 +
25636 +    elan4_unmap_device (dev, cq->cq_mapping - (offset & (PAGE_SIZE-1)), PAGE_SIZE, &cq->cq_handle);
25637 +    elan4_sdram_free (dev, cq->cq_space, CQ_Size (cq->cq_size));
25638 +
25639 +    elan4_putcq (ctxt, cq);
25640 +}
25641 +
25642 +void
25643 +elan4_restartcq (ELAN4_DEV *dev, ELAN4_CQ *cq)
25644 +{
25645 +    sdramaddr_t   cqdesc = dev->dev_cqaddr + (elan4_cq2num(cq) * sizeof (E4_CommandQueueDesc));
25646 +    int           hipri;
25647 +    unsigned long flags;
25648 +    
25649 +    PRINTF1 (DBG_DEVICE, DBG_CPROC, "restartcq: restarting cq %p\n", cq);
25650 +    
25651 +    spin_lock_irqsave (&dev->dev_requeue_lock, flags);
25652 +
25653 +    while (read_reg32 (dev, CommandControl.CommandRequeuePtr) & E4_CommandRequeueBusy)
25654 +       ;
25655 +    
25656 +    if (dev->dev_devinfo.dev_revision_id == PCI_REVISION_ID_ELAN4_REVA)
25657 +       hipri = (elan4_sdram_readq (dev, cqdesc + offsetof (E4_CommandQueueDesc, CQ_QueuePtrs)) & CQ_RevA_Priority) != 0;
25658 +    else
25659 +       hipri = (elan4_sdram_readq (dev, cqdesc + offsetof (E4_CommandQueueDesc, CQ_QueuePtrs)) & CQ_RevB_Priority) != 0;
25660 +    
25661 +    if (hipri)
25662 +    {
25663 +       PRINTF1 (DBG_DEVICE, DBG_CPROC, "restartcq: restart cq %d as high pri\n", elan4_cq2num(cq));
25664 +       write_reg32 (dev, CommandControl.CommandRequeuePtr, cqdesc | E4_CommandRequeueHighPri);
25665 +    }
25666 +    else
25667 +    {
25668 +       PRINTF1 (DBG_DEVICE, DBG_CPROC, "restartcq: restart cq %d as low pri\n", elan4_cq2num(cq));
25669 +       write_reg32 (dev, CommandControl.CommandRequeuePtr, cqdesc);
25670 +    }
25671 +    pioflush_reg (dev);
25672 +    
25673 +    spin_unlock_irqrestore (&dev->dev_requeue_lock, flags);
25674 +}
25675 +
25676 +static void
25677 +flushcq_intop (ELAN4_DEV *dev, void *arg)
25678 +{
25679 +    unsigned long flags;
25680 +
25681 +    spin_lock_irqsave (&dev->dev_flush_lock, flags);
25682 +    dev->dev_flush_finished |= (1 << (unsigned long) arg);
25683 +    kcondvar_wakeupall (&dev->dev_flush_wait, &dev->dev_flush_lock);
25684 +    spin_unlock_irqrestore (&dev->dev_flush_lock, flags);
25685 +}
25686 +void
25687 +elan4_flushcq (ELAN4_DEV *dev, ELAN4_CQ *cq)
25688 +{
25689 +    int                  flushqnum = elan4_cq2num(cq) & (COMMAND_INSERTER_CACHE_ENTRIES-1);
25690 +    ELAN4_CQ     *flushq    = dev->dev_flush_cq[flushqnum];
25691 +    unsigned long flags;
25692 +
25693 +    PRINTF (DBG_DEVICE, DBG_FLUSH, "elan4_flushcq: cqnum=%d\n", elan4_cq2num(cq));
25694 +
25695 +    spin_lock_irqsave (&dev->dev_flush_lock, flags);
25696 +
25697 +    while (! (dev->dev_flush_finished & (1 << flushqnum)))
25698 +       kcondvar_wait (&dev->dev_flush_wait, &dev->dev_flush_lock, &flags);
25699 +    
25700 +    dev->dev_flush_finished &= ~(1 << flushqnum);
25701 +
25702 +    dev->dev_flush_op[flushqnum].op_function = flushcq_intop;
25703 +    dev->dev_flush_op[flushqnum].op_arg      = (void *) (unsigned long) flushqnum;
25704 +    
25705 +    elan4_queue_intop (dev, flushq, &dev->dev_flush_op[flushqnum]);
25706 +
25707 +    while (! (dev->dev_flush_finished & (1 << flushqnum)))
25708 +       kcondvar_wait (&dev->dev_flush_wait, &dev->dev_flush_lock, &flags);
25709 +    
25710 +    spin_unlock_irqrestore (&dev->dev_flush_lock, flags);
25711 +}
25712 +
25713 +void
25714 +elan4_updatecq (ELAN4_DEV *dev, ELAN4_CQ *cq, unsigned perm, unsigned restart)
25715 +{
25716 +    sdramaddr_t cqdesc  = dev->dev_cqaddr + (elan4_cq2num(cq) * sizeof (E4_CommandQueueDesc));
25717 +    E4_uint32   control = elan4_sdram_readl (dev, cqdesc + offsetof (E4_CommandQueueDesc, CQ_Control));
25718 +
25719 +    /* Write the command queues control word, but ensure that the ChannelNotCompleted fields
25720 +     * are not modified.   We use this to just alter the RestartCount/Permissions fields */
25721 +
25722 +    elan4_sdram_writel (dev, cqdesc + offsetof (E4_CommandQueueDesc, CQ_Control), 
25723 +                       CQ_ControlValue (CQ_Context (control), restart ? restart : CQ_RestartCount (control), perm));
25724 +}
25725 +
25726 +/* instruction cache flush */
25727 +static __inline__ void
25728 +elan4_flush_icache_locked (ELAN4_DEV *dev)
25729 +{
25730 +    int i, j;
25731 +
25732 +    PRINTF0 (DBG_DEVICE, DBG_FLUSH, "elan4_flush_icache_locked: flushing icache\n");
25733 +
25734 +    for (i = 0; i < (E4_ICacheLines/E4_ICachePortSize); i++)
25735 +    {
25736 +        write_reg64 (dev, ICachePort_Cntl_Addr, i << E4_ICacheTagAddrShift);
25737 +        for (j = 0; j < E4_ICachePortSize; j++)
25738 +           write_reg64 (dev, ICachePort[j], E4_InvalidTagValue);
25739 +    }
25740 +
25741 +    /*
25742 +     * Initialise the top of the ICache Set0 with a instruction which will
25743 +     * cause a know trap fingerprint so that the application can identify it
25744 +     * and ignore the trap.
25745 +     */
25746 +    write_reg64 (dev, ICachePort_Cntl_Addr, E4_ICacheFixupOffset | E4_AccessICacheRams);
25747 +
25748 +    /* Errata 24: must ensure that the DCache is flushed after loading 
25749 +     *            code for the thread processor. */
25750 +    if (dev->dev_devinfo.dev_revision_id == PCI_REVISION_ID_ELAN4_REVA)
25751 +       elan4_sdram_flushcache (dev, 0, E4_CacheSize);
25752 +
25753 +    pioflush_reg (dev);
25754 +}
25755 +
25756 +static void
25757 +device_iflush_haltop (ELAN4_DEV *dev, void *arg)
25758 +{
25759 +    unsigned long flags;
25760 +
25761 +    spin_lock_irqsave (&dev->dev_flush_lock, flags);
25762 +    
25763 +    elan4_flush_icache_locked (dev);
25764 +
25765 +    dev->dev_iflush_queued = 0;
25766 +
25767 +    kcondvar_wakeupall (&dev->dev_flush_wait, &dev->dev_flush_lock);
25768 +    spin_unlock_irqrestore (&dev->dev_flush_lock, flags);
25769 +}
25770 +
25771 +void
25772 +elan4_flush_icache_halted (ELAN4_CTXT *ctxt)
25773 +{
25774 +    ELAN4_DEV *dev = ctxt->ctxt_dev;
25775 +    unsigned long flags;
25776 +
25777 +    spin_lock_irqsave (&dev->dev_flush_lock, flags);
25778 +    
25779 +    elan4_flush_icache_locked (dev);
25780 +
25781 +    spin_unlock_irqrestore (&dev->dev_flush_lock, flags);
25782 +}
25783 +
25784 +void
25785 +elan4_flush_icache (ELAN4_CTXT *ctxt)
25786 +{
25787 +    ELAN4_DEV *dev = ctxt->ctxt_dev;
25788 +    unsigned long flags;
25789 +    
25790 +    spin_lock_irqsave (&dev->dev_flush_lock, flags);
25791 +
25792 +    PRINTF1 (DBG_DEVICE, DBG_FLUSH, "elan4_flush_icache: queued=%d\n", dev->dev_iflush_queued);
25793 +
25794 +    if (! dev->dev_iflush_queued)
25795 +    {
25796 +       dev->dev_iflush_queued = 1;
25797 +       
25798 +       elan4_queue_haltop (dev, &dev->dev_iflush_haltop);
25799 +    }
25800 +
25801 +    while (dev->dev_iflush_queued)
25802 +       kcondvar_wait (&dev->dev_flush_wait, &dev->dev_flush_lock, &flags);
25803 +
25804 +    spin_unlock_irqrestore (&dev->dev_flush_lock, flags);
25805 +}
25806 +
25807 +/* device context operations */
25808 +static void
25809 +device_cproc_trap (ELAN4_CTXT *ctxt, E4_uint64 status, unsigned cqnum)
25810 +{
25811 +    ELAN4_DEV        *dev  = ctxt->ctxt_dev;
25812 +    ELAN4_CPROC_TRAP *trap = &dev->dev_cproc_trap;
25813 +
25814 +    elan4_extract_cproc_trap (dev, status, trap, cqnum);
25815 +
25816 +    DBGCMD (DBG_DEVICE, DBG_FLUSH, elan4_display_cproc_trap (DBG_DEVICE, DBG_FLUSH, "device_cproc_trap", trap));
25817 +
25818 +    switch (CPROC_TrapType (trap->tr_status))
25819 +    {
25820 +    case CommandProcInterruptQueueOverflow:
25821 +       PRINTF (ctxt, DBG_FLUSH, "device_cproc_trap: cqnum=%d\n", cqnum);
25822 +
25823 +       /* XXXX: we could either just hit restart (and hope) - or we could extract
25824 +        *       the event interrupt cookie out and "complete" the command before
25825 +        *       restarting it */
25826 +       elan4_restartcq (dev, dev->dev_flush_cq[cqnum]);
25827 +       return;
25828 +
25829 +    case CommandProcDmaQueueOverflow:
25830 +    case CommandProcPermissionTrap:
25831 +       handle_dma_flushops (dev, status, cqnum);
25832 +       return;
25833 +       
25834 +    default:
25835 +       printk ("device_cproc_trap: status=%llx control=%llx TrapType=%x cqnum=%d\n", (long long) trap->tr_status,
25836 +               elan4_sdram_readq (dev, dev->dev_cqaddr + cqnum * sizeof (E4_CommandQueueDesc) +
25837 +                                  offsetof (E4_CommandQueueDesc, CQ_Control)),
25838 +               (int) CPROC_TrapType(trap->tr_status), cqnum);
25839 +       panic ("device_cproc_trap");
25840 +    }
25841 +}
25842 +
25843 +static void
25844 +device_tproc_trap (ELAN4_CTXT *ctxt, E4_uint64 status)
25845 +{
25846 +    ELAN4_TPROC_TRAP trap;
25847 +
25848 +    elan4_extract_tproc_trap (ctxt->ctxt_dev, status, &trap);
25849 +
25850 +    elan4_display_tproc_trap (DBG_CONSOLE, DBG_TRAP, "device_tproc_trap", &trap);
25851 +    panic ("device_tproc_trap");
25852 +}
25853 +
25854 +static void
25855 +device_dproc_trap (ELAN4_CTXT *ctxt, E4_uint64 status, unsigned unit)
25856 +{
25857 +    ELAN4_DPROC_TRAP trap;
25858 +
25859 +    elan4_extract_dproc_trap (ctxt->ctxt_dev, status, &trap, unit);
25860 +
25861 +    elan4_display_dproc_trap (DBG_CONSOLE, DBG_TRAP, "device_dproc_trap", &trap);
25862 +    panic ("device_dproc_trap");
25863 +}
25864 +
25865 +static void
25866 +device_interrupt (ELAN4_CTXT *ctxt, E4_uint64 cookie)
25867 +{
25868 +    ELAN4_DEV *dev = (ELAN4_DEV *) ctxt;
25869 +    struct list_head *el,*nel;
25870 +    unsigned long flags;
25871 +
25872 +    PRINTF (ctxt, DBG_FLUSH, "device_interrupt: cookie=%llx\n", cookie);
25873 +
25874 +    spin_lock_irqsave (&dev->dev_intop_lock, flags);
25875 +    list_for_each_safe (el, nel, &dev->dev_intop_list) {
25876 +       ELAN4_INTOP *op = list_entry (el, ELAN4_INTOP, op_link);
25877 +
25878 +       if (op->op_cookie == cookie)
25879 +       {
25880 +           if ((op->op_cookie & INTOP_TYPE_MASK) == INTOP_ONESHOT)
25881 +               list_del (&op->op_link);
25882 +
25883 +           spin_unlock_irqrestore (&dev->dev_intop_lock, flags);
25884 +           
25885 +           (*op->op_function)(dev, op->op_arg);
25886 +           return;
25887 +       }
25888 +    }
25889 +    spin_unlock_irqrestore (&dev->dev_intop_lock, flags);
25890 +
25891 +    panic ("device_interrupt: interrupt cookie %llx not found\n", cookie);
25892 +}
25893 +
25894 +static void
25895 +device_iproc_trap (ELAN4_CTXT *ctxt, E4_uint64 status, unsigned unit)
25896 +{
25897 +    ELAN4_DEV *dev = ctxt->ctxt_dev;
25898 +    ELAN4_IPROC_TRAP *trap = &dev->dev_iproc_trap;
25899 +
25900 +    elan4_extract_iproc_trap (dev, status, trap, unit);
25901 +    elan4_inspect_iproc_trap (trap);
25902 +
25903 +    DBGCMD (ctxt, DBG_IPROC, elan4_display_iproc_trap (ctxt, DBG_IPROC, "device_iproc_trap", trap));
25904 +
25905 +    if (elan4_neterr_iproc_trap (dev, trap))
25906 +       return;
25907 +
25908 +    elan4_display_iproc_trap (DBG_CONSOLE, DBG_TRAP, "device_iproc_trap", trap);
25909 +    panic ("device_iproc_trap: unexpected trap\n");
25910 +}
25911 +
25912 +ELAN4_TRAP_OPS device_trap_ops = 
25913 +{
25914 +    NULL,
25915 +    device_cproc_trap,
25916 +    device_dproc_trap,
25917 +    device_tproc_trap,
25918 +    device_iproc_trap,
25919 +    device_interrupt,
25920 +};
25921 +
25922 +/*
25923 + * elan4_initialise_device
25924 + *    initialise the ELAN4_DEV struct - spinlocks,cvs etc.
25925 + *    map the registers, sdram etc
25926 + */
25927 +int
25928 +elan4_initialise_device (ELAN4_DEV *dev)
25929 +{
25930 +    int i, bit;
25931 +
25932 +    if (elan4_mainint_resched_ticks == 0)
25933 +       elan4_mainint_resched_ticks = (hz/4);
25934 +
25935 +    /* map the registers */
25936 +    switch (dev->dev_devinfo.dev_revision_id)
25937 +    {
25938 +    case PCI_REVISION_ID_ELAN4_REVA:
25939 +       dev->dev_regs = elan4_map_device (dev, ELAN4_BAR_REGISTERS, ELAN4_REVA_REG_OFFSET, ELAN4_REG_SIZE, &dev->dev_regs_handle);
25940 +       
25941 +       dev->dev_rom  = elan4_map_device (dev, ELAN4_BAR_REGISTERS, ELAN4_REVA_EBUS_OFFSET + ELAN4_REVA_EBUS_ROM_OFFSET, 
25942 +                                         ELAN4_REVA_EBUS_ROM_SIZE, &dev->dev_rom_handle);
25943 +       break;
25944 +       
25945 +    case PCI_REVISION_ID_ELAN4_REVB:
25946 +       dev->dev_regs = elan4_map_device (dev, ELAN4_BAR_REGISTERS, ELAN4_REVB_REG_OFFSET, ELAN4_REG_SIZE, &dev->dev_regs_handle);
25947 +       dev->dev_rom  = (ioaddr_t) 0;
25948 +       dev->dev_i2c  = elan4_map_device (dev, ELAN4_BAR_REGISTERS, ELAN4_REVB_I2C_OFFSET, ELAN4_REVB_I2C_SIZE, &dev->dev_i2c_handle);
25949 +       break;
25950 +
25951 +    default:
25952 +       return -EINVAL;
25953 +    }
25954 +
25955 +    /* XXXX: parse the ebus rom to determine the sdram configuration */
25956 +    {
25957 +       extern long long       sdram_cfg;
25958 +
25959 +       if (sdram_cfg == 0)
25960 +           dev->dev_sdram_cfg = SDRAM_STARTUP_VALUE;
25961 +       else
25962 +           dev->dev_sdram_cfg = sdram_cfg;
25963 +    }
25964 +
25965 +    for (bit = 0; ((1 << bit) & elan4_resource_len (dev, ELAN4_BAR_SDRAM)) == 0; bit++)
25966 +       ;
25967 +
25968 +    switch ((dev->dev_sdram_cfg >> SDRAM_RamSize_SH) & 3)
25969 +    {
25970 +    case 0:                    /* 64Mbit, 128Mbit, 256Mbit, 512Mbit or 1Gbit (16-bit output) */
25971 +       dev->dev_sdram_numbanks = 4; bit -= 2;
25972 +       for (i = 0; i < dev->dev_sdram_numbanks; i++)
25973 +       {
25974 +           dev->dev_sdram_banks[i].b_base = (i << bit);
25975 +           dev->dev_sdram_banks[i].b_size = (1 << bit);
25976 +       }
25977 +       break;
25978 +
25979 +    case 1:                    /*  64Mbit, 128Mbit, 256Mbit or 512Mbit (8-bit output) */
25980 +       dev->dev_sdram_numbanks = 4; bit -= 2;
25981 +       for (i = 0; i < dev->dev_sdram_numbanks; i++)
25982 +       {
25983 +           dev->dev_sdram_banks[i].b_base = ((i & 2) << (bit)) | ((i & 1) << (bit-1));
25984 +           dev->dev_sdram_banks[i].b_size = (1 << bit);
25985 +       }
25986 +       break;
25987 +       
25988 +    case 2:                    /* 2Gbit (16-bit output) or 1Gbit (8-bit output) */
25989 +       dev->dev_sdram_numbanks = 2; bit--;
25990 +       for (i = 0; i < dev->dev_sdram_numbanks; i++)
25991 +       {
25992 +           dev->dev_sdram_banks[i].b_base = (i << bit);
25993 +           dev->dev_sdram_banks[i].b_size = (1 << bit);
25994 +       }
25995 +       break;
25996 +
25997 +    case 3:                    /* 4Gbit (16-bit output) or 2Gbit (8-bit output) */
25998 +       dev->dev_sdram_numbanks = 1;
25999 +       dev->dev_sdram_banks[0].b_base = 0;
26000 +       dev->dev_sdram_banks[0].b_size = (1 << bit);
26001 +       break;
26002 +    }
26003 +
26004 +    elan4_sdram_init (dev);
26005 +
26006 +    /* initialise locks for classes of interrupts */
26007 +    spin_lock_init (&dev->dev_trap_lock);
26008 +    spin_lock_init (&dev->dev_intop_lock);
26009 +    spin_lock_init (&dev->dev_haltop_lock);
26010 +    spin_lock_init (&dev->dev_mainint_lock);
26011 +
26012 +    /* initialise other locks */
26013 +    spin_lock_init (&dev->dev_i2c_lock);
26014 +
26015 +    spin_lock_init (&dev->dev_mmulock);
26016 +    spin_lock_init (&dev->dev_cqlock);
26017 +    spin_lock_init (&dev->dev_ctxlock);
26018 +
26019 +    spin_lock_init (&dev->dev_intmask_lock);
26020 +    spin_lock_init (&dev->dev_syscontrol_lock);
26021 +
26022 +    spin_lock_init (&dev->dev_ctxt_lock);
26023 +    spin_lock_init (&dev->dev_flush_lock);
26024 +    spin_lock_init (&dev->dev_requeue_lock);
26025 +
26026 +    kmutex_init (&dev->dev_lock);
26027 +
26028 +    kcondvar_init (&dev->dev_mainint_wait);
26029 +    kcondvar_init (&dev->dev_flush_wait);
26030 +
26031 +    /* initialsie lists */
26032 +    INIT_LIST_HEAD (&dev->dev_ctxt_list);
26033 +    INIT_LIST_HEAD (&dev->dev_intop_list);
26034 +    INIT_LIST_HEAD (&dev->dev_interruptq_list);
26035 +    INIT_LIST_HEAD (&dev->dev_hc_list);
26036 +    INIT_LIST_HEAD (&dev->dev_haltop_list);
26037 +    INIT_LIST_HEAD (&dev->dev_dma_flushop[0].list);
26038 +    INIT_LIST_HEAD (&dev->dev_dma_flushop[1].list);
26039 +
26040 +    dev->dev_state = ELAN4_STATE_STOPPED;
26041 +
26042 +    return (0);
26043 +}
26044 +
26045 +void
26046 +elan4_finalise_device (ELAN4_DEV *dev)
26047 +{
26048 +    kcondvar_destroy (&dev->dev_flush_wait);
26049 +    kcondvar_destroy (&dev->dev_mainint_wait);
26050 +
26051 +    kmutex_destroy (&dev->dev_lock);
26052 +
26053 +    spin_lock_destroy (&dev->dev_requeue_lock);
26054 +    spin_lock_destroy (&dev->dev_flush_lock);
26055 +    spin_lock_destroy (&dev->dev_ctxt_lock);
26056 +
26057 +    spin_lock_destroy (&dev->dev_syscontrol_lock);
26058 +    spin_lock_destroy (&dev->dev_intmask_lock);
26059 +
26060 +    spin_lock_destroy (&dev->dev_ctxlock);
26061 +    spin_lock_destroy (&dev->dev_cqlock);
26062 +    spin_lock_destroy (&dev->dev_mmulock);
26063 +
26064 +    spin_lock_destroy (&dev->dev_i2c_lock);
26065 +
26066 +    spin_lock_destroy (&dev->dev_mainint_lock);
26067 +    spin_lock_destroy (&dev->dev_haltop_lock);
26068 +    spin_lock_destroy (&dev->dev_intop_lock);
26069 +    spin_lock_destroy (&dev->dev_trap_lock);
26070 +
26071 +    while (! list_empty (&dev->dev_hc_list))
26072 +    {
26073 +       ELAN4_HASH_CHUNK *hc = list_entry (dev->dev_hc_list.next, ELAN4_HASH_CHUNK, hc_link);
26074 +       
26075 +       list_del (&hc->hc_link);
26076 +
26077 +       KMEM_FREE(hc, sizeof (ELAN4_HASH_CHUNK));
26078 +    }
26079 +    
26080 +    elan4_sdram_fini (dev);
26081 +    
26082 +    switch (dev->dev_devinfo.dev_revision_id)
26083 +    {
26084 +    case PCI_REVISION_ID_ELAN4_REVA:
26085 +       elan4_unmap_device (dev, dev->dev_rom,  ELAN4_REVA_EBUS_ROM_SIZE, &dev->dev_rom_handle);
26086 +       elan4_unmap_device (dev, dev->dev_regs, ELAN4_REG_SIZE, &dev->dev_regs_handle);
26087 +       break;
26088 +    case PCI_REVISION_ID_ELAN4_REVB:
26089 +       elan4_unmap_device (dev, dev->dev_i2c,  ELAN4_REVB_I2C_SIZE, &dev->dev_i2c_handle);
26090 +       elan4_unmap_device (dev, dev->dev_regs, ELAN4_REG_SIZE, &dev->dev_regs_handle);
26091 +       break;
26092 +    }
26093 +}
26094 +
26095 +static void
26096 +initialise_cache (ELAN4_DEV *dev)
26097 +{
26098 +    register int set, line;
26099 +
26100 +    /* Initialise the cache to "map" the bottom of sdram - we will use
26101 +     * this space for cache flushing, so require the cache to be set
26102 +     * up so that cachelines for this are in the correct set.
26103 +     *
26104 +     * XXXX: for MPSAS we set bit 28, to ensure that any access to 
26105 +     *       sdram causes the line to be filled first to expunge any
26106 +     *       Xs. */
26107 +    for (set = 0; set < E4_NumCacheSets; set++)
26108 +       for (line = 0; line < E4_NumCacheLines; line++)
26109 +           write_tag (dev, Tags[set][line], (((E4_uint64) set) << 29) | (1 << 28) | (line << 16));
26110 +}
26111 +
26112 +#ifndef CONFIG_MPSAS
26113 +static void
26114 +initialise_cache_tags (ELAN4_DEV *dev, unsigned addr)
26115 +{
26116 +    register int set, line;
26117 +
26118 +    /* Initialise the whole cache to hold sdram at "addr" as direct mapped */
26119 +
26120 +    for (set = 0; set < E4_NumCacheSets; set++)
26121 +       for (line = 0; line < E4_NumCacheLines; line++)
26122 +           write_tag (dev, Tags[set][line], addr | (set << 13) | (1 << 11));
26123 +}
26124 +
26125 +static void
26126 +initialise_ecc (ELAN4_DEV *dev, ELAN4_SDRAM_BANK *bank)
26127 +{
26128 +    register int i, addr;
26129 +
26130 +    if (dev->dev_devinfo.dev_revision_id == PCI_REVISION_ID_ELAN4_REVA)
26131 +    {
26132 +        initialise_cache_tags (dev, E4_CacheSize);
26133 +        for (addr = 0; addr < bank->b_size; addr += E4_CacheSize)
26134 +        {
26135 +           for (i = 0; i < E4_CacheSize; i += sizeof (E4_uint64))
26136 +               writeq (0xbeec000000000000ull | addr | i, bank->b_ioaddr + addr + i);
26137 +           initialise_cache_tags (dev, addr);
26138 +        }
26139 +    }
26140 +    else
26141 +    {
26142 +       /* Write the whole of this bank of sdram. */
26143 +        for (addr = 0; addr < bank->b_size; addr += sizeof (E4_uint64))
26144 +           writeq (0xbeec000000000000ull | addr, bank->b_ioaddr + addr);
26145 +
26146 +       /* Now flush out the top out of the cache */
26147 +        for (addr = 0; addr < E4_CacheSize; addr += sizeof (E4_uint64))
26148 +           writeq (0xbeec000000000000ull | addr, bank->b_ioaddr + addr);
26149 +
26150 +       /* Now read the top value of sdram to guarantee the write has occured before the ecc is enabled */
26151 +       readq (bank->b_ioaddr + bank->b_size - sizeof (E4_uint64));
26152 +    }
26153 +}
26154 +#endif
26155 +
26156 +#ifdef CONFIG_MPSAS
26157 +static void
26158 +do_initdma (ELAN4_DEV *dev)
26159 +{
26160 +#define VIRTUAL_ADDRESS        0x10000000ull
26161 +    ELAN4_CQ  *cq  = dev->dev_flush_cq[0];
26162 +    E4_uint64 value;
26163 +    E4_uint32 intreg;
26164 +    E4_uint64 status;
26165 +
26166 +    PRINTF (DBG_DEVICE, DBG_CONFIG, "elan: performing initialising dma\n");
26167 +    
26168 +    DISABLE_INT_MASK (dev, INT_Dma0Proc | INT_Dma1Proc);
26169 +    
26170 +    /* initialise the context filter */
26171 +    elan4_attach_filter (&dev->dev_ctxt, 0);
26172 +
26173 +    /* now issue a DMA - we expect this to trap */
26174 +    writeq (E4_DMA_TYPE_SIZE (128*4, DMA_DataTypeByte, 0, 0) | RUN_DMA_CMD, cq->cq_mapping + (0 << 3));
26175 +    writeq (0,                                                              cq->cq_mapping + (1 << 3));
26176 +    writeq (0,                                                              cq->cq_mapping + (2 << 3));
26177 +    writeq (dev->dev_tproc_space,                                           cq->cq_mapping + (3 << 3));
26178 +    writeq (dev->dev_tproc_space,                                           cq->cq_mapping + (4 << 3));
26179 +    writeq (0,                                                              cq->cq_mapping + (5 << 3));
26180 +    writeq (0,                                                              cq->cq_mapping + (6 << 3));
26181 +    
26182 +    /* spin waiting for it to trap - then restart the dma processor */
26183 +    do {
26184 +       value   = read_reg64 (dev, IntAndMaskReg);
26185 +       intreg  = (value >> E4_INTERRUPT_REG_SHIFT);
26186 +    } while ((intreg & (INT_Dma0Proc | INT_Dma1Proc)) == 0);
26187 +    
26188 +    /* check it trapped for the right reason */
26189 +    status = (intreg & INT_Dma0Proc) ? read_reg64 (dev, DProc0Status) : read_reg64 (dev, DProc1Status);
26190 +    
26191 +    if (DPROC_PrefetcherFault (status) || (DPROC_TrapType(status) != DmaProcFailCountError && DPROC_TrapType(status) != DmaProcPacketAckError))
26192 +    {
26193 +       printk ("elan: bad dma trap, status = %lx\n", (long)status);
26194 +       panic ("elan: bad dma trap\n");
26195 +    }
26196 +    
26197 +    PULSE_SCHED_RESTART (dev, SCH_RestartDma0Proc | SCH_RestartDma1Proc | SCH_RestartDmaPrefetchProc);
26198 +
26199 +    elan4_detach _filter (&dev->dev_ctxt, 0);
26200 +
26201 +    ENABLE_INT_MASK (dev, INT_Dma0Proc | INT_Dma1Proc);
26202 +#undef VIRTUAL_ADDRESS
26203 +}
26204 +#endif
26205 +
26206 +static int
26207 +ebus_read_vpd (ELAN4_DEV *dev, unsigned char *data, unsigned int nob)
26208 +{
26209 +    unsigned int pci_data_ptr;
26210 +    unsigned int vpd_ptr;
26211 +    register int i;
26212 +
26213 +    if (read_ebus_rom (dev, 0) != 0x55 || read_ebus_rom (dev, 1) != 0xaa)
26214 +    {
26215 +       printk ("elan%d: invalid rom signature in ebus rom\n", dev->dev_instance);
26216 +       return -EINVAL;
26217 +    }
26218 +
26219 +    pci_data_ptr = (read_ebus_rom (dev, 0x19) << 8) | read_ebus_rom (dev, 0x18);
26220 +
26221 +    /* check the pci data structure */
26222 +    if (read_ebus_rom (dev, pci_data_ptr + 0) != 'P' ||
26223 +       read_ebus_rom (dev, pci_data_ptr + 1) != 'C' ||
26224 +       read_ebus_rom (dev, pci_data_ptr + 2) != 'I' ||
26225 +       read_ebus_rom (dev, pci_data_ptr + 3) != 'R')
26226 +    {
26227 +       printk ("elan%d: invalid pci data structure in ebus rom\n", dev->dev_instance);
26228 +       return -EINVAL;
26229 +    }
26230 +    
26231 +    /* extract the VPD pointer */
26232 +    vpd_ptr = (read_ebus_rom (dev, pci_data_ptr + 9) << 8) | read_ebus_rom (dev, pci_data_ptr + 8);
26233 +
26234 +    if (vpd_ptr == 0)
26235 +    {
26236 +       printk ("elan%d: no vital product data in ebus rom\n", dev->dev_instance);
26237 +       return -EINVAL;
26238 +    }
26239 +    
26240 +    /* read the vpd data */
26241 +    for (i = 0; i < nob; i++)
26242 +       data[i] = read_ebus_rom (dev, vpd_ptr + i);
26243 +
26244 +    return 0;
26245 +}
26246 +
26247 +int
26248 +elan4_read_vpd (ELAN4_DEV *dev, unsigned char *tag, unsigned char *result) 
26249 +{
26250 +    unsigned char vpd[I2C_ELAN_EEPROM_VPD_SIZE];
26251 +    unsigned char *ptr = vpd;
26252 +    unsigned int   finished = 0;
26253 +    unsigned char *lim;
26254 +    unsigned char  name[3];
26255 +    unsigned char  value[256];
26256 +    unsigned char  type;
26257 +    unsigned int   len, len2;
26258 +    register int   i;
26259 +
26260 +    if (dev->dev_devinfo.dev_revision_id == PCI_REVISION_ID_ELAN4_REVA)
26261 +    {
26262 +       if (ebus_read_vpd (dev, vpd, I2C_ELAN_EEPROM_VPD_SIZE) < 0)
26263 +       {
26264 +           PRINTF1 (DBG_DEVICE, DBG_CONFIG, "elan%d: elan4_read_vpd, unable to read serial number from EBUS rom\n", dev->dev_instance);
26265 +           return -EINVAL ;
26266 +       }       
26267 +    }
26268 +    else
26269 +    {
26270 +       if (i2c_read_rom (dev, I2C_ELAN_EEPROM_VPD_BASEADDR, I2C_ELAN_EEPROM_VPD_SIZE, vpd) < 0)
26271 +       {
26272 +           PRINTF1 (DBG_DEVICE, DBG_CONFIG, "elan%d: elan4_read_vpd, unable to read serial number from I2C rom\n", dev->dev_instance);
26273 +           return  -EINVAL;
26274 +       }
26275 +    }
26276 +
26277 +    result[0] = 0;
26278 +    while (! finished)
26279 +    {
26280 +       type = *ptr++;
26281 +       
26282 +       if (type & LARGE_RESOURCE_BIT)
26283 +       {
26284 +           len = *(ptr++);
26285 +           len += *(ptr++) << 8;
26286 +           
26287 +           switch (type & ~LARGE_RESOURCE_BIT)
26288 +           {
26289 +           case LARGE_RESOURCE_STRING:
26290 +           case LARGE_RESOURCE_VENDOR_DEFINED:
26291 +               ptr += len;
26292 +               break;
26293 +               
26294 +           case LARGE_RESOURCE_VITAL_PRODUCT_DATA:
26295 +               for (lim = ptr + len; ptr < lim; )
26296 +               {
26297 +                   name[0] = *ptr++;
26298 +                   name[1] = *ptr++;
26299 +                   name[2] = '\0';
26300 +                   len2    = *ptr++;
26301 +                   
26302 +                   for (i = 0; i < len2 && ptr < lim; i++)
26303 +                       value[i] = *ptr++;
26304 +                   value[i] = '\0';
26305 +                                   
26306 +                   PRINTF3 (DBG_DEVICE, DBG_CONFIG, "elan%d: elan4_read_vpd, %s: $s\n", dev->dev_instance, name, value);
26307 +
26308 +                   if (tag != NULL) 
26309 +                   { /* looking for just one tag */
26310 +                       if (!strcmp (name, tag))
26311 +                           strcpy(result, value);
26312 +                   } 
26313 +                   else 
26314 +                   { /* get all tags */
26315 +                       strcat(result,name);
26316 +                       strcat(result,": ");
26317 +                       strcat(result,value);
26318 +                       strcat(result,"\n");
26319 +                   }
26320 +               }
26321 +               break;
26322 +               
26323 +           default:
26324 +               PRINTF2 (DBG_DEVICE, DBG_CONFIG, "elan%d: elan4_read_vpd, unknown large resource %x\n", dev->dev_instance, type);
26325 +               finished = 1;
26326 +               break;
26327 +           }
26328 +       }
26329 +       else
26330 +       {
26331 +           len = type & 0x7;
26332 +           
26333 +           switch (type >> 3)
26334 +           {
26335 +           case SMALL_RESOURCE_COMPATIBLE_DEVICE_ID:
26336 +               ptr += len;
26337 +               break;
26338 +               
26339 +           case SMALL_RESOURCE_VENDOR_DEFINED:
26340 +               ptr += len;
26341 +               break;
26342 +               
26343 +           case SMALL_RESOURCE_END_TAG:
26344 +               finished = 1;
26345 +               break;
26346 +               
26347 +           default:
26348 +               PRINTF2 (DBG_DEVICE, DBG_CONFIG, "elan%d: elan4_read_vpd, unknown small resource %x\n", dev->dev_instance, type >> 3);
26349 +               finished = 1;
26350 +               break;
26351 +           }
26352 +       }
26353 +    }
26354 +
26355 +    if ( result[0] == 0 ) {
26356 +       if ( tag != 0 ) 
26357 +           PRINTF2 (DBG_DEVICE, DBG_CONFIG, "elan%d: elan4_read_vpd, failed to find tag %s\n", dev->dev_instance, tag);
26358 +       else
26359 +           PRINTF1 (DBG_DEVICE, DBG_CONFIG, "elan%d: elan4_read_vpd, failed to find any tags\n", dev->dev_instance);
26360 +       return -EINVAL;
26361 +    }
26362 +
26363 +    return (0);
26364 +}
26365 +
26366 +int
26367 +elan4_start_device (ELAN4_DEV *dev)
26368 +{
26369 +    E4_VirtualProcessEntry entry;
26370 +    unsigned              pagesizeval[2];
26371 +    unsigned              hashsizeval[2];
26372 +    register int           i, j, tbl, res;
26373 +    unsigned               attempts = 0;
26374 +    E4_PAGE_SIZE_TABLE;
26375 +    unsigned char          serial[256];
26376 +
26377 +    PRINTF (DBG_DEVICE, DBG_ALL, "elan4_start_device: entered\n");
26378 +
26379 +    dev->dev_state = ELAN4_STATE_STARTING;
26380 +
26381 + tryagain:
26382 +    /* Initialise the pci config space */
26383 +    if ((res = elan4_pciinit (dev)) < 0)
26384 +       return (res);
26385 +
26386 +    /* Display the serial number */
26387 +    if (elan4_read_vpd (dev, "SN", serial))
26388 +       printk("elan%d: SN: failed to read\n", dev->dev_instance);
26389 +    else
26390 +       printk("elan%d: SN: %s\n", dev->dev_instance, serial);
26391 +
26392 +    /* initialise the interrupt mask to zero */
26393 +    SET_INT_MASK (dev, 0);
26394 +
26395 +    /* Initialise the device registers */
26396 +    write_reg64 (dev, TlbLineValue, 0);
26397 +    write_reg64 (dev, SysControlReg, 0);
26398 +
26399 +    /* Initialise the SDRAM using the configuration value from the ROM */
26400 +    write_reg64 (dev, SDRamConfigReg, dev->dev_sdram_cfg | SDRAM_SETUP);
26401 +
26402 +    /* Setup the linkport registers */
26403 +    write_reg64 (dev, LinkPortLock, 0);
26404 +    write_reg64 (dev, LinkPortKey,  LINK_PORT_LOCK_VALUE);
26405 +
26406 +    /* Setup the tick rates, start the clock, and init the stats registers */
26407 +    write_ureg32 (dev, ClockTickRate.s.TickRates, ELAN4_CLOCK_TICK_RATE);
26408 +    write_ureg64 (dev, Clock, 0);
26409 +    write_ureg32 (dev, InstCount.s.StatsCount, 0);
26410 +    for (i = 0; i < 8; i++)
26411 +       write_ureg32 (dev, StatCounts[i].s.StatsCount, 0);
26412 +
26413 +    /* Initialise the Link Control register - disable the TLB prefetcher on RevB
26414 +     * as it can cause very occasional data corruption. */
26415 +    if (dev->dev_devinfo.dev_revision_id == PCI_REVISION_ID_ELAN4_REVB)
26416 +       write_reg32 (dev, LinkControlReg, LCONT_REVB_DISABLE_TLB_PREFETCH);
26417 +    else
26418 +       write_reg32 (dev, LinkControlReg, 0);
26419 +
26420 +    /* Initialise the Link Control Settings to set the PLL Reference Value */
26421 +    write_reg32 (dev, LinkContSettings, 
26422 +                (elan4_mod45disable ? LCONT_MOD45_DISABLE : 0) |
26423 +                (3 << LCONT_CONFIG_PHASE_SHIFT) |
26424 +                ((elan4_pll_div & LCONT_PLL_REF_VAL_BITS_MASK) << LCONT_PLL_REF_VAL_BITS_SHIFT) |
26425 +                (LCONT_VOD_360 << LCONT_LVDS_VOLTAGE_BITS_SHIFT) |
26426 +                (LCONT_TERM_AUTO_OHM << LCONT_LVDS_TERMINATION_SHIFT));
26427 +
26428 +    /* Clear the link error LED on RevB and above */
26429 +    if (dev->dev_devinfo.dev_revision_id != PCI_REVISION_ID_ELAN4_REVA)
26430 +       write_i2c (dev, I2cStatus, read_i2c (dev, I2cStatus) | I2cCntl_ClearLinkError);
26431 +
26432 +    initialise_cache (dev);
26433 +
26434 +    /* Initialise the MMU hash table parameters */
26435 +    /* Select the largest elan pagesize which is spanned by the
26436 +     * system pagesize for mmu table 0*/
26437 +    for (i = 0; i < E4_PAGE_SIZE_TABLE_SIZE; i++)
26438 +       if (PageSizeTable[i] > PAGE_SHIFT)
26439 +           break;
26440 +
26441 +    pagesizeval[0] = i - 1;
26442 +    hashsizeval[0] = elan4_hash_0_size_val;
26443 +       
26444 +    /* Select a suitable elan pagesize to match any "large" page
26445 +     * support that the OS provides. */
26446 +    pagesizeval[1] = PAGE_SIZE_4M;
26447 +    hashsizeval[1] = elan4_hash_1_size_val;
26448 +
26449 +    for (tbl = 0; tbl < NUM_HASH_TABLES; tbl++)
26450 +    {
26451 +       dev->dev_pagesizeval[tbl]   = pagesizeval[tbl];
26452 +       dev->dev_pageshift[tbl]     = PageSizeTable[pagesizeval[tbl]];
26453 +       dev->dev_hashsize[tbl]      = (1 << hashsizeval[tbl])/sizeof (E4_HashTableEntry);
26454 +       dev->dev_rsvd_hashmask[tbl] = ((1 << (27 - dev->dev_pageshift[tbl]))-1) & ~((1 << hashsizeval[tbl])-1);
26455 +       dev->dev_rsvd_hashval[tbl]  = 0xFFFFFFFF;
26456 +    }
26457 +
26458 +    PRINTF2 (DBG_DEVICE, DBG_CONFIG, "elan4_start_device: pageshifts %d,%d\n", dev->dev_pageshift[0], 
26459 +            NUM_HASH_TABLES == 2 ? dev->dev_pageshift[1] : 0);
26460 +
26461 +    /* Initialise the control register to the desired value */
26462 +    dev->dev_syscontrol = (CONT_EN_ALL_SETS | CONT_MMU_ENABLE | CONT_CACHE_ALL | CONT_2K_NOT_1K_DMA_PACKETS |
26463 +                          (pagesizeval[0] << CONT_TABLE0_PAGE_SIZE_SHIFT) | (hashsizeval[0] << CONT_TABLE0_MASK_SIZE_SHIFT));
26464 +
26465 +    if (NUM_HASH_TABLES == 2)
26466 +       dev->dev_syscontrol |= CONT_TWO_HASH_TABLES | (pagesizeval[1] << CONT_TABLE1_PAGE_SIZE_SHIFT) | (hashsizeval[1] << CONT_TABLE1_MASK_SIZE_SHIFT);
26467 +
26468 +    write_reg64 (dev, SysControlReg, dev->dev_syscontrol);
26469 +
26470 +    /* use direct mapped pci writes during sdram initialisation, since for 
26471 +     * cache flushing to work, we need to ensure that the cacheflush page
26472 +     * never gets lines into the incorrect cache set. */
26473 +    SET_SYSCONTROL (dev, dev_direct_map_pci_writes, CONT_DIRECT_MAP_PCI_WRITES);
26474 +
26475 +    if (dev->dev_devinfo.dev_revision_id == PCI_REVISION_ID_ELAN4_REVB)
26476 +       elan4_sdram_setup_delay_lines(dev);
26477 +
26478 +    for (i = res = 0; i < dev->dev_sdram_numbanks; i++)
26479 +       if (dev->dev_sdram_banks[i].b_size)
26480 +           res |= elan4_sdram_init_bank (dev, &dev->dev_sdram_banks[i]);
26481 +
26482 +    if (! res)
26483 +    {
26484 +       if (dev->dev_devinfo.dev_device_id == PCI_REVISION_ID_ELAN4_REVB && ++attempts < 5)
26485 +       {
26486 +           printk ("elan%d: sdram not working, resetting\n", dev->dev_instance);
26487 +           goto tryagain;
26488 +       }
26489 +
26490 +       printk ("elan%d: could not find any sdram banks\n", dev->dev_instance);
26491 +       goto failed;
26492 +    }
26493 +
26494 +#ifndef CONFIG_MPSAS
26495 +    PRINTF0 (DBG_DEVICE, DBG_CONFIG, "elan4_start_device: initialising for ECC\n");
26496 +
26497 +    for (i = 0 ; i < dev->dev_sdram_numbanks; i++)
26498 +       if (dev->dev_sdram_banks[i].b_ioaddr)
26499 +           initialise_ecc (dev, &dev->dev_sdram_banks[i]);
26500 +#endif
26501 +
26502 +    dev->dev_sdram_initial_ecc_val = read_reg64 (dev, SDRamECCStatus);
26503 +
26504 +    /* Now enable ECC after we've scrubbed the memory */
26505 +    write_reg64 (dev, SDRamConfigReg, dev->dev_sdram_cfg | SDRAM_ENABLE_ECC);
26506 +
26507 +    /* clear any errors, and flush the tlb/route cache */
26508 +    PULSE_SYSCONTROL (dev, CONT_TLB_FLUSH | CONT_ROUTE_FLUSH | CONT_CLEAR_LINKPORT_INT | CONT_CLEAR_SDRAM_ERROR);
26509 +
26510 +    write_ureg32 (dev, InstCount.s.StatsCount, 0);
26511 +
26512 +    /* Initialise the thread processor's register file */
26513 +    for (i = 0; i < 64; i++)
26514 +       write_reg64 (dev, TProcRegs[i], 0);
26515 +
26516 +    /* Initialise the thread processor's ICache tags */
26517 +    for (i = 0; i < (E4_ICacheLines/E4_ICachePortSize); i++)
26518 +    {
26519 +        write_reg64 (dev, ICachePort_Cntl_Addr, i << E4_ICacheTagAddrShift);
26520 +        for (j = 0; j < E4_ICachePortSize; j++)
26521 +           write_reg64 (dev, ICachePort[j], E4_InvalidTagValue);
26522 +    }
26523 +
26524 +    /*
26525 +     * Initialise the ICache with a sethi %hi(addr << 7), %r0
26526 +     * writing 8 64 bit values per loop of sethi %g0 values ending in 77 for something different??
26527 +     */
26528 +    for (i = 0; i < E4_ICacheSizeInBytes; i += (E4_ICachePortSize << 3))
26529 +    {
26530 +       write_reg64 (dev, ICachePort_Cntl_Addr, E4_AccessICacheRams | (i >> 3));
26531 +
26532 +       for (j = 0; j < E4_ICachePortSize; j++)
26533 +           write_reg64 (dev, ICachePort[j], 
26534 +                        (E4_uint64) (((E4_uint64)i << (4+7))    + ((E4_uint64)j << (1+7))    + (0x077)) |
26535 +                        (E4_uint64) (((E4_uint64)i << (4+7+32)) + ((E4_uint64)j << (1+7+32)) + (0x0e7)) << 32);
26536 +    }
26537 +
26538 +    /*
26539 +     * Initialise the top of the ICache Set0 with a instruction which will
26540 +     * cause a know trap fingerprint so that the application can identify it
26541 +     * and ignore the trap.
26542 +     */
26543 +    write_reg64 (dev, ICachePort_Cntl_Addr, E4_ICacheFixupOffset | E4_AccessICacheRams);
26544 +    for (i = 0; i < E4_ICachePortSize; i++)
26545 +       write_reg64 (dev, ICachePort[i], E4_ICacheFixupInsn | (E4_ICacheFixupInsn << 32));
26546 +
26547 +    /* create the buddy allocator for SDRAM */
26548 +    for (i = 0; i < dev->dev_sdram_numbanks; i++)
26549 +       if (dev->dev_sdram_banks[i].b_ioaddr)
26550 +           elan4_sdram_add_bank (dev, &dev->dev_sdram_banks[i]);
26551 +
26552 +    dev->dev_ctxtableshift        = elan4_ctxt_table_shift;
26553 +    dev->dev_cqcount              = (1 << elan4_ln2_max_cqs);
26554 +#ifdef CONFIG_MTRR
26555 +    if (dev->dev_devinfo.dev_revision_id == PCI_REVISION_ID_ELAN4_REVB)
26556 +       dev->dev_cqreorder = dev->dev_cqcount >> 1;
26557 +    else
26558 +       dev->dev_cqreorder = dev->dev_cqcount;
26559 +#endif
26560 +
26561 +    /* allocate the sdram for cache flushing whilst still in direct mapped mode */
26562 +    dev->dev_cacheflush_space = elan4_sdram_alloc (dev, E4_CacheSize);
26563 +
26564 +    /* and longer need direct mapped pci writes */
26565 +    CLEAR_SYSCONTROL (dev, dev_direct_map_pci_writes, CONT_DIRECT_MAP_PCI_WRITES);
26566 +
26567 +    /* allocate the hash tables, command queues, context tables etc */
26568 +    PRINTF0 (DBG_DEVICE, DBG_CONFIG, "elan4_start_device: allocating hash tables, command queueus, context tables\n");
26569 +
26570 +    dev->dev_comqlowpri       = elan4_sdram_alloc (dev, (1 << COMMAND_RUN_QUEUE_BITS));
26571 +    dev->dev_comqhighpri      = elan4_sdram_alloc (dev, (1 << COMMAND_RUN_QUEUE_BITS));
26572 +    dev->dev_cqaddr           = elan4_sdram_alloc (dev, sizeof (E4_CommandQueueDesc) * dev->dev_cqcount);
26573 +    dev->dev_dmaqhighpri      = elan4_sdram_alloc (dev, E4_QueueSize(elan4_dmaq_highpri_size));
26574 +    dev->dev_dmaqlowpri       = elan4_sdram_alloc (dev, E4_QueueSize(elan4_dmaq_lowpri_size));
26575 +    dev->dev_threadqhighpri   = elan4_sdram_alloc (dev, E4_QueueSize(elan4_threadq_highpri_size));
26576 +    dev->dev_threadqlowpri    = elan4_sdram_alloc (dev, E4_QueueSize(elan4_threadq_lowpri_size));
26577 +    dev->dev_interruptq       = elan4_sdram_alloc (dev, E4_QueueSize(elan4_interruptq_size));
26578 +
26579 +    dev->dev_ctxtable         = elan4_sdram_alloc (dev, (1 << dev->dev_ctxtableshift) * sizeof (E4_ContextControlBlock));
26580 +    dev->dev_faultarea        = elan4_sdram_alloc (dev, CUN_Entries * sizeof (E4_FaultSave));
26581 +    dev->dev_inputtraparea    = elan4_sdram_alloc (dev, sizeof (E4_IprocTrapState));
26582 +
26583 +    dev->dev_sdrampages[0]    = elan4_sdram_alloc (dev, SDRAM_PAGE_SIZE);
26584 +    dev->dev_sdrampages[1]    = elan4_sdram_alloc (dev, SDRAM_PAGE_SIZE);
26585 +
26586 +    for (tbl = 0; tbl < NUM_HASH_TABLES; tbl++)
26587 +    {
26588 +       dev->dev_hashtable[tbl] = elan4_sdram_alloc (dev, dev->dev_hashsize[tbl] * sizeof (E4_HashTableEntry));
26589 +#ifndef CONFIG_MPSAS
26590 +       /* Initialise hash tables to invalid (zero) */
26591 +       elan4_sdram_zeroq_sdram (dev, dev->dev_hashtable[tbl], dev->dev_hashsize[tbl] * sizeof (E4_HashTableEntry));
26592 +#endif
26593 +    }
26594 +
26595 +    /* Initialise all context filters to discard */
26596 +#ifdef CONFIG_MPSAS
26597 +    if (sas_memset_dev (dev->dev_osdep.pdev, ELAN4_BAR_SDRAM, dev->dev_ctxtable, 
26598 +                       E4_FILTER_DISCARD_ALL, (1 << (dev->dev_ctxtableshift-1))) < 0)
26599 +    {
26600 +       for (i = 0; i < (1 << dev->dev_ctxtableshift); i++)
26601 +           elan4_write_filter (dev, i, E4_FILTER_DISCARD_ALL);
26602 +    }
26603 +#else
26604 +    for (i = 0; i < (1 << dev->dev_ctxtableshift); i++)
26605 +       elan4_write_filter (dev, i, E4_FILTER_DISCARD_ALL);
26606 +#endif
26607 +
26608 +    PRINTF4 (DBG_DEVICE, DBG_CONFIG, "elan4_start_device: hashtables %x,%x, %x,%x\n", dev->dev_hashtable[0], 
26609 +           dev->dev_hashsize[0], dev->dev_hashtable[1], dev->dev_hashsize[1]);
26610 +
26611 +    /* install the hash table pointers */
26612 +    PRINTF0 (DBG_DEVICE, DBG_CONFIG, "elan4_start_device: initialise registers with table addresses\n");
26613 +    write_reg64 (dev, MmuTableBasePtrs, (((E4_uint64) dev->dev_hashtable[0]) | ((E4_uint64) dev->dev_hashtable[1]) << 32));
26614 +    write_reg64 (dev, MmuFaultAndRootCntxPtr, (((E4_uint64) dev->dev_ctxtableshift) | 
26615 +                                              ((E4_uint64) dev->dev_ctxtable) |
26616 +                                              ((E4_uint64) dev->dev_faultarea) << 32));
26617 +    write_reg64 (dev, InputTrapAndFilter, (((E4_uint64) dev->dev_ctxtableshift) | 
26618 +                                          ((E4_uint64) dev->dev_ctxtable) |
26619 +                                          ((E4_uint64) dev->dev_inputtraparea) << 32));
26620 +    /*
26621 +     * The run ptrs have this format: (Front << 32) | Back
26622 +     * The base for both the front and back is uses the high bits of the back pointer.
26623 +     * So writting just the base value is good enough.
26624 +     */
26625 +    write_reg64 (dev, CommandLowPriRunPtrs,  dev->dev_comqlowpri);
26626 +    write_reg64 (dev, CommandHighPriRunPtrs, dev->dev_comqhighpri);
26627 +
26628 +    /* Initialise the run queues */
26629 +    write_reg64 (dev, DProcHighPriPtrs,       E4_QueueValue (dev->dev_dmaqhighpri,    elan4_dmaq_highpri_size));
26630 +    write_reg64 (dev, DProcLowPriPtrs,        E4_QueueValue (dev->dev_dmaqlowpri,     elan4_dmaq_lowpri_size));
26631 +    write_reg64 (dev, TProcHighPriPtrs,       E4_QueueValue (dev->dev_threadqhighpri, elan4_threadq_highpri_size));
26632 +    write_reg64 (dev, TProcLowPriPtrs,        E4_QueueValue (dev->dev_threadqlowpri,  elan4_threadq_lowpri_size));
26633 +
26634 +    /* Initialise the interrupt queue as "empty" - this is actually with one entry on it */
26635 +    write_reg64 (dev, MainIntQueuePtrs.Value, (((E4_uint64) E4_QueueFrontValue (dev->dev_interruptq, elan4_interruptq_size) << 32) |
26636 +                                              ((E4_uint64) E4_QueueBackPointer(dev->dev_interruptq + E4_MainIntEntrySize))));
26637 +    
26638 +    dev->dev_interruptq_nfptr = dev->dev_interruptq + E4_MainIntEntrySize;
26639 +
26640 +    /*
26641 +     * Flush the context filter before dropping the Discard all bits in the schedule status register.
26642 +     * Also hit the SCH_RestartTProc to clear out X's from the trap state and
26643 +     * hit the SCH_RestartDmaPrefetchProc to clear out X's from the prev register.
26644 +     */
26645 +    PULSE_SCHED_RESTART (dev, SCH_ContextFilterFlush | SCH_RestartTProc | SCH_RestartDmaPrefetchProc);
26646 +
26647 +    /* setup the schedule status register. */
26648 +    SET_SCHED_STATUS (dev, SCH_CProcTimeout6p2us | SCH_DProcTimeslice512us);
26649 +
26650 +    /*
26651 +     * Now initialise the inserter cache.s
26652 +     * Bit 31 of the first word of the descriptor is a valid bit. This must be cleared.
26653 +     * Bit 31 becomes a used bit in the descriptors in memory.
26654 +     */
26655 +    for (i = 0; i < COMMAND_INSERTER_CACHE_ENTRIES; i++)
26656 +    {
26657 +       write_reg32 (dev, CommandControl.CommandQueueDescsBase, i);     /* select a cache line */
26658 +       write_reg64 (dev, CommandCacheTestPort, 0);                     /* Mark it invalid */
26659 +    }
26660 +    
26661 +    /* Setup the pointer to the command descriptors */
26662 +    /*   the table must be aligned on a CQ_CommandDescsAlignement boundary */
26663 +    /*   since we've allocated a small table - we work out the offset of the */
26664 +    /*   first entry in our table for mapping in the command ports later */
26665 +    dev->dev_cqoffset = (dev->dev_cqaddr & (CQ_CommandDescsAlignment-1)) / sizeof (E4_CommandQueueDesc);
26666 +
26667 +    write_reg32 (dev, CommandControl.CommandQueueDescsBase, (dev->dev_cqaddr & ~(CQ_CommandDescsAlignment-1)) | COM_ENABLE_DEQUEUE);
26668 +
26669 +    /* allocate the bitmaps for cq,ctxt allocation */
26670 +    KMEM_ZALLOC (dev->dev_cqamap, bitmap_t *, BT_BITOUL(dev->dev_cqcount/ELAN4_CQ_PER_CQA) * sizeof (bitmap_t), 1);
26671 +    KMEM_ZALLOC (dev->dev_ctxmap, bitmap_t *, BT_BITOUL(1 << dev->dev_ctxtableshift) * sizeof (bitmap_t), 1);
26672 +
26673 +    if (dev->dev_cqamap == NULL || dev->dev_ctxmap == NULL)
26674 +       goto failed;
26675 +
26676 +    /* Make every fourth context be invalid for ICache fixup.
26677 +     * context 0 is also invalid - since it is used to indicate 
26678 +     * an invalid tag. */
26679 +    for (i = 0; i < (1 << dev->dev_ctxtableshift); i += 4)
26680 +       BT_SET (dev->dev_ctxmap, i);
26681 +    
26682 +    /* initialise the halt operations */
26683 +    dev->dev_haltop_mask   = 0;
26684 +    dev->dev_haltop_active = 0;
26685 +
26686 +    /* allocate the hash table shadow structures - and place all blocks on the free lists */
26687 +    for (tbl = 0; tbl < NUM_HASH_TABLES; tbl++)
26688 +    {
26689 +       KMEM_ZALLOC (dev->dev_mmuhash[tbl], ELAN4_HASH_ENTRY *,  dev->dev_hashsize[tbl] * sizeof (ELAN4_HASH_ENTRY), 1);
26690 +       KMEM_ZALLOC (dev->dev_mmufree[tbl], ELAN4_HASH_ENTRY **, dev->dev_hashsize[tbl] * sizeof (ELAN4_HASH_ENTRY *), 1);
26691 +
26692 +       if (dev->dev_mmuhash[tbl] == NULL || dev->dev_mmufree[tbl] == NULL)
26693 +           goto failed;
26694 +
26695 +       for (i = 0; i < dev->dev_hashsize[tbl]; i++)
26696 +       {
26697 +           dev->dev_mmuhash[tbl][i].he_entry = dev->dev_hashtable[tbl] + (i * sizeof (E4_HashTableEntry));
26698 +           dev->dev_mmufree[tbl][i]          = &dev->dev_mmuhash[tbl][i];
26699 +       }
26700 +    }
26701 +
26702 +    /* setup the interrupt mask register */
26703 +    SET_INT_MASK (dev, (INT_MSI0 | INT_MSI1 | INT_MSI2 | INT_MSI3) & ~(INT_Discarding | INT_Halted));
26704 +
26705 +    /* start a thread to handle excessive main interrupts */
26706 +    if (kernel_thread_create (elan4_mainint_thread, (caddr_t) dev) == NULL)
26707 +       goto failed;
26708 +    dev->dev_mainint_started = 1;
26709 +    
26710 +    /* install the device context - and allocate the first 16 command queues */
26711 +    if (elan4_insertctxt (dev, &dev->dev_ctxt, &device_trap_ops) != 0)
26712 +       goto failed;
26713 +
26714 +    /* Allocate command queues, one for each entry in the inserter cache, 
26715 +     * we'll use these queues to flush the insert cache */
26716 +    for (i = 0; i < COMMAND_INSERTER_CACHE_ENTRIES; i++)
26717 +    {
26718 +       if ((dev->dev_flush_cq[i] = elan4_alloccq (&dev->dev_ctxt, CQ_Size1K, CQ_DmaStartEnableBit | CQ_InterruptEnableBit, 
26719 +                                                  CQ_Priority)) == NULL)
26720 +           goto failed;
26721 +       
26722 +       ASSERT (elan4_cq2num(dev->dev_flush_cq[i]) == i);
26723 +
26724 +       dev->dev_flush_finished |= (1 << i);
26725 +    }
26726 +
26727 +    /* Allocate command queues for dma halt operations */
26728 +    if ((dev->dev_dma_flushop[0].cq = elan4_alloccq (&dev->dev_ctxt, CQ_Size1K, CQ_DmaStartEnableBit, 0)) == NULL ||
26729 +       (dev->dev_dma_flushop[1].cq = elan4_alloccq (&dev->dev_ctxt, CQ_Size1K, CQ_DmaStartEnableBit, CQ_Priority)) == NULL)
26730 +       goto failed;
26731 +
26732 +#ifdef CONFIG_MPSAS
26733 +    elan4_sdram_flushcache (dev, 0, E4_CacheSize);
26734 +#endif
26735 +
26736 +    /* initialise halt operation for flushing the icache */
26737 +    dev->dev_iflush_haltop.op_function = device_iflush_haltop;
26738 +    dev->dev_iflush_haltop.op_arg      = dev;
26739 +    dev->dev_iflush_haltop.op_mask     = INT_TProcHalted;
26740 +
26741 +    /* Allocate a route table, and create a valid route for vp==0, this is used
26742 +     * when a DMA is removed from the dma run queue */
26743 +    if ((dev->dev_routetable = elan4_alloc_routetable (dev, 0)) == NULL)
26744 +       goto failed;
26745 +
26746 +    elan4_set_routetable (&dev->dev_ctxt, dev->dev_routetable);
26747 +
26748 +    entry.Values[0] = FIRST_MYLINK;
26749 +    entry.Values[1] = 0;
26750 +
26751 +    elan4_write_route (dev, dev->dev_routetable, 0, &entry);
26752 +
26753 +    /* map the sdram pages into the elan */
26754 +    dev->dev_tproc_suspend = DEVICE_TPROC_SUSPEND_ADDR;
26755 +    dev->dev_tproc_space   = DEVICE_TPROC_SPACE_ADDR;
26756 +
26757 +    elan4mmu_pteload (&dev->dev_ctxt, 0, dev->dev_tproc_suspend, (dev->dev_sdrampages[0] >> PTE_PADDR_SHIFT) | PTE_SetPerm(PERM_LocExecute));
26758 +    elan4mmu_pteload (&dev->dev_ctxt, 0, dev->dev_tproc_space,   (dev->dev_sdrampages[1] >> PTE_PADDR_SHIFT) | PTE_SetPerm(PERM_LocDataWrite));
26759 +
26760 +    /* and store the thread suspend sequence in it for use when a thread is removed from the run queue */
26761 +    elan4_sdram_writel (dev, dev->dev_sdrampages[0], DEVICE_TPROC_SUSPEND_INSTR);
26762 +
26763 +#ifdef CONFIG_MPSAS
26764 +    do_initdma (dev);
26765 +#endif
26766 +    
26767 +    if (!elan4_neterr_init (dev))
26768 +       goto failed;
26769 +
26770 +    elan4_configure_mtrr (dev);
26771 +
26772 +    /* finally register the device with elanmod for rms */
26773 +    dev->dev_idx = elan_dev_register (&dev->dev_devinfo, &elan4_dev_ops, (void *) dev);
26774 +
26775 +    dev->dev_state = ELAN4_STATE_STARTED;
26776 +
26777 +    return (0);
26778 +
26779 + failed:
26780 +    printk ("elan%d: failed to start elan4 device - stopping\n", dev->dev_instance);
26781 +
26782 +    elan4_stop_device (dev);
26783 +    return (-ENOMEM);
26784 +}
26785 +
26786 +void
26787 +elan4_stop_device (ELAN4_DEV *dev)
26788 +{
26789 +    unsigned long flags;
26790 +    int i, tbl;
26791 +
26792 +    dev->dev_state = ELAN4_STATE_STOPPING;
26793 +
26794 +    elan_dev_deregister (&dev->dev_devinfo);
26795 +
26796 +    elan4_unconfigure_mtrr (dev);
26797 +
26798 +    elan4_neterr_destroy (dev);
26799 +
26800 +    if (dev->dev_tproc_suspend)
26801 +       elan4mmu_unload_range (&dev->dev_ctxt, 0, dev->dev_tproc_suspend, 1 << dev->dev_pageshift[0]);
26802 +
26803 +    if (dev->dev_tproc_space)
26804 +       elan4mmu_unload_range (&dev->dev_ctxt, 0, dev->dev_tproc_space,   1 << dev->dev_pageshift[0]);
26805 +
26806 +    if (dev->dev_routetable)
26807 +    {
26808 +       elan4_set_routetable (&dev->dev_ctxt, NULL);
26809 +       elan4_free_routetable (dev, dev->dev_routetable);
26810 +    }
26811 +
26812 +    for (i = 0; i < 2; i++)
26813 +       if (dev->dev_dma_flushop[i].cq)
26814 +           elan4_freecq (&dev->dev_ctxt, dev->dev_dma_flushop[i].cq);
26815 +
26816 +    /* free of the device context - and insert cache flushing command queues */
26817 +    for (i = 0; i < COMMAND_INSERTER_CACHE_ENTRIES; i++)
26818 +       if (dev->dev_flush_cq[i])
26819 +           elan4_freecq (&dev->dev_ctxt, dev->dev_flush_cq[i]);
26820 +
26821 +    if (dev->dev_ctxt.ctxt_dev)
26822 +       elan4_removectxt (dev, &dev->dev_ctxt);
26823 +
26824 +    /* stop the mainint thread */
26825 +    spin_lock_irqsave (&dev->dev_mainint_lock, flags);
26826 +    dev->dev_stop_threads = 1;
26827 +
26828 +    while (dev->dev_mainint_started && !dev->dev_mainint_stopped)
26829 +    {
26830 +       kcondvar_wakeupall (&dev->dev_mainint_wait, &dev->dev_mainint_lock);
26831 +       kcondvar_wait (&dev->dev_mainint_wait, &dev->dev_mainint_lock, &flags);
26832 +    }
26833 +    dev->dev_mainint_started = dev->dev_mainint_stopped = 0;
26834 +    spin_unlock_irqrestore (&dev->dev_mainint_lock, flags);
26835 +
26836 +    /* cancel any error interrupt timeouts */
26837 +    if (timer_fn_queued (&dev->dev_error_timeoutid))
26838 +       cancel_timer_fn (&dev->dev_error_timeoutid);
26839 +
26840 +    if (dev->dev_devinfo.dev_revision_id != PCI_REVISION_ID_ELAN4_REVA && timer_fn_queued (&dev->dev_linkerr_timeoutid))
26841 +       cancel_timer_fn (&dev->dev_linkerr_timeoutid);
26842 +    
26843 +    /* reset the interrupt mask register to zero */
26844 +    if (dev->dev_regs)
26845 +       SET_INT_MASK (dev, 0);
26846 +
26847 +    for (tbl = 0; tbl < NUM_HASH_TABLES; tbl++)
26848 +    {
26849 +       if (dev->dev_mmuhash[tbl])
26850 +           KMEM_FREE (dev->dev_mmuhash[tbl], dev->dev_hashsize[tbl] * sizeof (ELAN4_HASH_ENTRY));
26851 +       if (dev->dev_mmufree[tbl])
26852 +           KMEM_FREE (dev->dev_mmufree[tbl], dev->dev_hashsize[tbl] * sizeof (ELAN4_HASH_ENTRY *));
26853 +       if (dev->dev_hashtable[tbl])
26854 +           elan4_sdram_free (dev, dev->dev_hashtable[tbl], dev->dev_hashsize[tbl] * sizeof (E4_HashTableEntry));
26855 +    }
26856 +
26857 +    if (dev->dev_cqamap)
26858 +       KMEM_FREE (dev->dev_cqamap, BT_BITOUL (dev->dev_cqcount/ELAN4_CQ_PER_CQA) * sizeof (bitmap_t));
26859 +    if (dev->dev_ctxmap)
26860 +       KMEM_FREE (dev->dev_ctxmap, BT_BITOUL(1 << dev->dev_ctxtableshift) * sizeof (bitmap_t));
26861 +
26862 +    if (dev->dev_comqlowpri)
26863 +       elan4_sdram_free (dev, dev->dev_comqlowpri,     (1 << COMMAND_RUN_QUEUE_BITS));
26864 +    if (dev->dev_comqhighpri)
26865 +       elan4_sdram_free (dev, dev->dev_comqhighpri,    (1 << COMMAND_RUN_QUEUE_BITS));
26866 +    if (dev->dev_cqaddr)
26867 +       elan4_sdram_free (dev, dev->dev_cqaddr,         sizeof (E4_CommandQueueDesc) * dev->dev_cqcount);
26868 +    if (dev->dev_dmaqhighpri)
26869 +       elan4_sdram_free (dev, dev->dev_dmaqhighpri,    E4_QueueSize(elan4_dmaq_highpri_size));
26870 +    if (dev->dev_dmaqlowpri)
26871 +       elan4_sdram_free (dev, dev->dev_dmaqlowpri,     E4_QueueSize(elan4_dmaq_lowpri_size));
26872 +    if (dev->dev_threadqhighpri)
26873 +       elan4_sdram_free (dev, dev->dev_threadqhighpri, E4_QueueSize(elan4_threadq_highpri_size));
26874 +    if (dev->dev_threadqlowpri)
26875 +       elan4_sdram_free (dev, dev->dev_threadqlowpri,  E4_QueueSize(elan4_threadq_lowpri_size));
26876 +    if (dev->dev_interruptq)
26877 +       elan4_sdram_free (dev, dev->dev_interruptq,     E4_QueueSize(elan4_interruptq_size));
26878 +    
26879 +    if (dev->dev_ctxtable)
26880 +       elan4_sdram_free (dev, dev->dev_ctxtable,       (1 << dev->dev_ctxtableshift) * sizeof (E4_ContextControlBlock));
26881 +    if (dev->dev_faultarea)
26882 +       elan4_sdram_free (dev, dev->dev_faultarea,      CUN_Entries * sizeof (E4_FaultSave));
26883 +    if (dev->dev_inputtraparea)
26884 +       elan4_sdram_free (dev, dev->dev_inputtraparea,  sizeof (E4_IprocTrapState));
26885 +
26886 +    if (dev->dev_sdrampages[0])
26887 +       elan4_sdram_free (dev, dev->dev_sdrampages[0],  SDRAM_PAGE_SIZE);
26888 +    if (dev->dev_sdrampages[1])
26889 +       elan4_sdram_free (dev, dev->dev_sdrampages[1],  SDRAM_PAGE_SIZE);
26890 +
26891 +    for (i = 0; i < dev->dev_sdram_numbanks; i++)
26892 +       if (dev->dev_sdram_banks[i].b_ioaddr)
26893 +               elan4_sdram_fini_bank (dev, &dev->dev_sdram_banks[i]);
26894 +
26895 +    elan4_pcifini (dev);
26896 +
26897 +    dev->dev_state = ELAN4_STATE_STOPPED;
26898 +
26899 +    if (dev->dev_ack_errors)
26900 +        kfree(dev->dev_ack_errors);
26901 +    if (dev->dev_dproc_timeout)
26902 +        kfree(dev->dev_dproc_timeout);
26903 +    if (dev->dev_cproc_timeout)
26904 +        kfree(dev->dev_cproc_timeout);
26905 +}
26906 +
26907 +static __inline__ int
26908 +compute_arity (int lvl, unsigned n, char *arity)
26909 +{
26910 +    if (arity[lvl] == 0)
26911 +    {
26912 +       if (n <= 8)
26913 +           arity[lvl] = n;
26914 +       else
26915 +           arity[lvl] = 4;
26916 +    }
26917 +
26918 +    return (arity[lvl]);
26919 +}
26920 +
26921 +int
26922 +elan4_compute_position (ELAN_POSITION *pos, unsigned nodeid, unsigned numnodes, unsigned arityval)
26923 +{
26924 +    int i, lvl, n;
26925 +    char arity[ELAN_MAX_LEVELS];
26926 +
26927 +    if (nodeid >= numnodes)
26928 +       return -EINVAL;
26929 +
26930 +    for (i = 0; i < ELAN_MAX_LEVELS; i++, arityval >>= 4)
26931 +       arity[i] = arityval & 7;
26932 +    
26933 +    for (lvl = 0, n = numnodes; n > compute_arity(lvl, n, arity) && lvl < ELAN_MAX_LEVELS; lvl++)
26934 +    {
26935 +       if ((n % arity[lvl]) != 0)
26936 +           return -EINVAL;
26937 +       
26938 +       n /= arity[lvl];
26939 +    }
26940 +
26941 +    if (arity[lvl] != n)
26942 +       return -EINVAL;
26943 +
26944 +    for (i = 0; i <= lvl; i++)
26945 +       pos->pos_arity[i] = arity[lvl - i];
26946 +
26947 +    pos->pos_nodes  = numnodes;
26948 +    pos->pos_levels = lvl + 1;
26949 +    pos->pos_nodeid = nodeid;
26950 +    pos->pos_mode   = ELAN_POS_MODE_SWITCHED;
26951 +
26952 +    return 0;
26953 +}
26954 +
26955 +int
26956 +elan4_get_position (ELAN4_DEV *dev, ELAN_POSITION *pos)
26957 +{
26958 +    kmutex_lock (&dev->dev_lock);
26959 +    *pos = dev->dev_position;
26960 +    kmutex_unlock (&dev->dev_lock);
26961 +
26962 +    return (pos->pos_mode);
26963 +}
26964 +
26965 +int
26966 +elan4_set_position (ELAN4_DEV *dev, ELAN_POSITION *pos)
26967 +{
26968 +    int forceLocal = 0;
26969 +    int nnodes, i;
26970 +    unsigned int *ack_errors;
26971 +    unsigned int *dproc_timeout;
26972 +    unsigned int *cproc_timeout;
26973 +
26974 +    switch (pos->pos_mode)
26975 +    {
26976 +    case ELAN_POS_UNKNOWN:
26977 +       break;
26978 +       
26979 +    case ELAN_POS_MODE_SWITCHED:
26980 +       if (pos->pos_levels > ELAN_MAX_LEVELS)
26981 +           return (-EINVAL);
26982 +       
26983 +       for (i = 0, nnodes = 1; i < pos->pos_levels; i++)
26984 +       {
26985 +
26986 +           if (pos->pos_arity[i] <= 0 || (i == 0 ? pos->pos_arity[i] > 8 : pos->pos_arity[i] >= 8))  /* allow an 8 way top-switch */
26987 +               return (-EINVAL);
26988 +           
26989 +           nnodes *= pos->pos_arity[i];
26990 +       }
26991 +
26992 +       if (pos->pos_nodes > nnodes || pos->pos_nodeid >= pos->pos_nodes)
26993 +           return (-EINVAL);
26994 +       break;
26995 +       
26996 +    case ELAN_POS_MODE_LOOPBACK:
26997 +       if (pos->pos_levels != 1 || pos->pos_nodes != 1 || pos->pos_nodeid != 0 || pos->pos_arity[0] != 1)
26998 +           return (-EINVAL);
26999 +
27000 +       forceLocal = 1;
27001 +       break;
27002 +
27003 +    case ELAN_POS_MODE_BACKTOBACK:
27004 +       if (pos->pos_levels != 1 || pos->pos_nodes != 2 || pos->pos_nodeid >= 2 || pos->pos_arity[0] != 2)
27005 +           return (-EINVAL);
27006 +
27007 +       forceLocal = (pos->pos_nodeid == 0);
27008 +       break;
27009 +
27010 +    default:
27011 +       return (-EINVAL);
27012 +    }
27013 +
27014 +    ack_errors = kmalloc(pos->pos_nodes * sizeof(unsigned int), GFP_KERNEL);
27015 +    if (!ack_errors)
27016 +       return (-EINVAL);
27017 +    memset(ack_errors, 0, pos->pos_nodes * sizeof(unsigned int));
27018 +    dproc_timeout = kmalloc(pos->pos_nodes * sizeof(unsigned int), GFP_KERNEL);
27019 +    if (!dproc_timeout) 
27020 +    {
27021 +        kfree(ack_errors);
27022 +        return (-EINVAL);
27023 +    }
27024 +    memset(dproc_timeout, 0, pos->pos_nodes * sizeof(unsigned int));
27025 +    cproc_timeout = kmalloc(pos->pos_nodes * sizeof(unsigned int), GFP_KERNEL);
27026 +    if (!cproc_timeout)
27027 +    {
27028 +        kfree(ack_errors);
27029 +        kfree(dproc_timeout);
27030 +        return (-EINVAL);
27031 +    }
27032 +    memset(cproc_timeout, 0, pos->pos_nodes * sizeof(unsigned int));
27033 +       
27034 +    kmutex_lock (&dev->dev_lock);
27035 +    dev->dev_position = *pos;
27036 +    dev->dev_ack_errors = ack_errors;
27037 +    dev->dev_dproc_timeout = dproc_timeout;
27038 +    dev->dev_cproc_timeout = cproc_timeout;
27039 +    spin_lock_init(&dev->dev_error_routes_lock);
27040 +
27041 +    if (forceLocal)
27042 +       write_reg32 (dev, LinkContSettings, read_reg32 (dev, LinkContSettings) | LCONT_FORCE_COMMSCLK_LOCAL);
27043 +    else
27044 +       write_reg32 (dev, LinkContSettings, read_reg32 (dev, LinkContSettings) & ~LCONT_FORCE_COMMSCLK_LOCAL);
27045 +
27046 +    pioflush_reg (dev);
27047 +    kmutex_unlock (&dev->dev_lock);
27048 +
27049 +    return (0);
27050 +}
27051 +
27052 +void
27053 +elan4_get_params (ELAN4_DEV *dev, ELAN_PARAMS *params, unsigned short *mask)
27054 +{
27055 +    kmutex_lock (&dev->dev_lock);
27056 +
27057 +    *mask = dev->dev_devinfo.dev_params_mask;
27058 +    memcpy (params, &dev->dev_devinfo.dev_params, sizeof (ELAN_PARAMS));
27059 +    
27060 +    kmutex_unlock (&dev->dev_lock);
27061 +}
27062 +
27063 +void
27064 +elan4_set_params (ELAN4_DEV *dev, ELAN_PARAMS *params, unsigned short mask)
27065 +{      
27066 +    int i;
27067 +
27068 +    kmutex_lock (&dev->dev_lock);
27069 +    for (i = 0; i < ELAN4_PARAM_COUNT; i++)
27070 +       if (mask & (1 << i))
27071 +           dev->dev_devinfo.dev_params.values[i] = params->values[i];
27072 +    
27073 +    dev->dev_devinfo.dev_params_mask |= mask;
27074 +    kmutex_unlock (&dev->dev_lock);
27075 +}
27076 +
27077 +
27078 +EXPORT_SYMBOL(elan4_get_position);
27079 +EXPORT_SYMBOL(elan4_set_position);
27080 +
27081 +EXPORT_SYMBOL(elan4_queue_haltop);
27082 +EXPORT_SYMBOL(elan4_queue_dma_flushop);
27083 +EXPORT_SYMBOL(elan4_queue_mainintop);
27084 +
27085 +EXPORT_SYMBOL(elan4_insertctxt);
27086 +EXPORT_SYMBOL(elan4_removectxt);
27087 +
27088 +EXPORT_SYMBOL(elan4_attach_filter);
27089 +EXPORT_SYMBOL(elan4_detach_filter);
27090 +EXPORT_SYMBOL(elan4_set_filter);
27091 +EXPORT_SYMBOL(elan4_set_routetable);
27092 +
27093 +EXPORT_SYMBOL(elan4_alloccq);
27094 +EXPORT_SYMBOL(elan4_freecq);
27095 +EXPORT_SYMBOL(elan4_restartcq);
27096 +
27097 +EXPORT_SYMBOL(elan4_flush_icache);
27098 +
27099 +/*
27100 + * Local variables:
27101 + * c-file-style: "stroustrup"
27102 + * End:
27103 + */
27104 Index: linux-2.4.21/drivers/net/qsnet/elan4/device_Linux.c
27105 ===================================================================
27106 --- linux-2.4.21.orig/drivers/net/qsnet/elan4/device_Linux.c    2004-02-23 16:02:56.000000000 -0500
27107 +++ linux-2.4.21/drivers/net/qsnet/elan4/device_Linux.c 2005-06-01 23:12:54.606438040 -0400
27108 @@ -0,0 +1,2625 @@
27109 +/*
27110 + *    Copyright (c) 2001-2002 by Quadrics Supercomputers World Ltd.
27111 + *    Copyright (c) 2002-2003 by Quadrics Ltd.
27112 + * 
27113 + *    For licensing information please see the supplied COPYING file
27114 + *
27115 + */
27116 +
27117 +#ident "@(#)$Id: device_Linux.c,v 1.74.6.9 2005/01/18 14:44:11 david Exp $"
27118 +/*      $Source: /cvs/master/quadrics/elan4mod/device_Linux.c,v $*/
27119 +
27120 +#include <qsnet/kernel.h>
27121 +#include <qsnet/kthread.h>
27122 +#include <qsnet/kpte.h>
27123 +
27124 +#include <asm/io.h>
27125 +#include <asm/irq.h>
27126 +#ifdef CONFIG_MTRR
27127 +#include <asm/mtrr.h>
27128 +#endif
27129 +
27130 +#include <linux/init.h>
27131 +#include <linux/pci.h>
27132 +#include <linux/module.h>
27133 +#include <linux/reboot.h>
27134 +#include <linux/notifier.h>
27135 +
27136 +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0)
27137 +#include <linux/wrapper.h>
27138 +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,4,23)
27139 +typedef void irqreturn_t;
27140 +#endif
27141 +#       define IRQ_NONE
27142 +#       define IRQ_HANDLED
27143 +#endif
27144 +
27145 +#include <elan4/debug.h>
27146 +#include <elan4/device.h>
27147 +#include <elan4/user.h>
27148 +#include <elan4/ioctl.h>
27149 +#include <elan4/intcookie.h>
27150 +
27151 +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,4,0)
27152 +#error please use a 2.4.0 series kernel or newer
27153 +#endif
27154 +
27155 +
27156 +#if defined(LINUX_SPARC) || defined(LINUX_PPC64)
27157 +#define __io_remap_page_range(from,offset,size,prot)   remap_page_range(from,offset,size,prot)
27158 +#define __remap_page_range(from,offset,size,prot)      remap_page_range(from,offset,size,prot)
27159 +#elif defined(NO_RMAP)
27160 +#define __io_remap_page_range(from,offset,size,prot)   io_remap_page_range(from,offset,size,prot)
27161 +#define __remap_page_range(from,offset,size,prot)      remap_page_range(from,offset,size,prot)
27162 +#else
27163 +#define __io_remap_page_range(from,offset,size,prot)   io_remap_page_range(vma,from,offset,size,prot)
27164 +#define __remap_page_range(from,offset,size,prot)      remap_page_range(vma,from,offset,size,prot)
27165 +#endif
27166 +
27167 +#ifndef pgprot_noncached
27168 +static inline pgprot_t pgprot_noncached(pgprot_t _prot)
27169 +{
27170 +       unsigned long prot = pgprot_val(_prot);
27171 +#if defined(__powerpc__)
27172 +       prot |= _PAGE_NO_CACHE | _PAGE_GUARDED;
27173 +#elif defined(__sparc__)
27174 +       prot &= ~(_PAGE_CACHE);
27175 +       prot |= _PAGE_IE;
27176 +#endif
27177 +
27178 +       return __pgprot(prot);
27179 +}
27180 +#endif
27181 +
27182 +#ifndef pgprot_writecombine
27183 +static inline pgprot_t pgprot_writecombine (pgprot_t _prot)
27184 +{
27185 +    return _prot;
27186 +}
27187 +#endif
27188 +
27189 +#define ELAN4_DRIVER_VERSION           0x103           /* 16 bit value */
27190 +
27191 +/*
27192 + * Function prototypes.
27193 + */
27194 +static int        elan4_attach_device (int instance, struct pci_dev *pdev);
27195 +static void       elan4_detach_device (ELAN4_DEV *dev);
27196 +
27197 +static int        elan4_open (struct inode *inode, struct file *file);
27198 +static int        elan4_release(struct inode *inode, struct file *file);
27199 +static int        elan4_ioctl (struct inode *inode, struct file *file, 
27200 +                               unsigned int cmd, unsigned long arg);
27201 +static int        elan4_mmap (struct file *file, struct vm_area_struct *vm_area);
27202 +
27203 +static irqreturn_t elan4_irq (int irq, void *arg, struct pt_regs *regs);
27204 +
27205 +static void        elan4_shutdown_devices(int panicing);
27206 +
27207 +static int      disabled;                                      /* bitmask of which devices not to start */
27208 +unsigned int   elan4_pll_cfg      = 0;
27209 +int            elan4_pll_div      = 31;                        /* RevC PCB */
27210 +int            elan4_mod45disable = 0;
27211 +static int      optimise_pci_bus   = 1;                                /* 0 => don't, 1 => if ok, 2 => always */
27212 +static int      default_features   = 0;                                /* default values for dev_features */
27213 +
27214 +long long       sdram_cfg = SDRAM_STARTUP_VALUE;
27215 +static int      sdram_cfg_lo;
27216 +static int     sdram_cfg_hi;
27217 +int            sdram_bank_limit;
27218 +
27219 +MODULE_AUTHOR("Quadrics Ltd.");
27220 +MODULE_DESCRIPTION("Elan 4 Device Driver");
27221 +MODULE_LICENSE("GPL");
27222 +
27223 +MODULE_PARM(elan4_debug, "i");
27224 +MODULE_PARM(elan4_debug_toconsole, "i");
27225 +MODULE_PARM(elan4_debug_tobuffer, "i");
27226 +MODULE_PARM(elan4_debug_mmu, "i");
27227 +MODULE_PARM(elan4_pll_cfg, "i");
27228 +MODULE_PARM(elan4_pll_div, "i");
27229 +MODULE_PARM(elan4_mod45disable, "i");
27230 +MODULE_PARM(optimise_pci_bus, "i");
27231 +MODULE_PARM(default_features, "i");
27232 +
27233 +MODULE_PARM(disabled, "i");
27234 +MODULE_PARM(sdram_cfg_lo, "i");
27235 +MODULE_PARM(sdram_cfg_hi, "i");
27236 +MODULE_PARM(sdram_bank_limit, "i");
27237 +
27238 +MODULE_PARM(elan4_hash_0_size_val, "i");
27239 +MODULE_PARM(elan4_hash_1_size_val, "i");
27240 +MODULE_PARM(elan4_ctxt_table_shift, "i");
27241 +MODULE_PARM(elan4_ln2_max_cqs, "i");
27242 +MODULE_PARM(elan4_dmaq_highpri_size, "i");
27243 +MODULE_PARM(elan4_threadq_highpri_size, "i");
27244 +MODULE_PARM(elan4_dmaq_lowpri_size, "i");
27245 +MODULE_PARM(elan4_threadq_lowpri_size, "i");
27246 +MODULE_PARM(elan4_interruptq_size, "i");
27247 +
27248 +MODULE_PARM(elan4_mainint_punt_loops, "i");
27249 +MODULE_PARM(elan4_mainint_resched_ticks, "i");
27250 +
27251 +MODULE_PARM(user_p2p_route_options, "i");
27252 +MODULE_PARM(user_bcast_route_options, "i");
27253 +MODULE_PARM(user_dproc_retry_count, "i");
27254 +MODULE_PARM(user_cproc_retry_count, "i");
27255 +
27256 +/*
27257 + * Standard device entry points.
27258 + */
27259 +static struct file_operations elan4_fops = {
27260 +    ioctl:   elan4_ioctl,
27261 +    mmap:    elan4_mmap,
27262 +    open:    elan4_open,
27263 +    release: elan4_release,
27264 +};
27265 +
27266 +ELAN4_DEV *elan4_devices[ELAN4_MAX_CONTROLLER];
27267 +
27268 +#if defined(CONFIG_DEVFS_FS)
27269 +static devfs_handle_t devfs_handle;
27270 +#endif
27271 +
27272 +
27273 +#if defined(CONFIG_PPC64) || defined(CONFIG_SPARC64) || defined(CONFIG_X86_64)
27274 +static int
27275 +elan4_ioctl32_cmds[] =
27276 +{      /* /dev/elan/control */
27277 +    ELAN4IO_DEVINFO,
27278 +    ELAN4IO_GET_POSITION,
27279 +    ELAN4IO_SET_POSITION,
27280 +    ELAN4IO_GET_PARAMS,
27281 +    ELAN4IO_SET_PARAMS,
27282 +
27283 +    /* /dev/elan4/user */
27284 +    ELAN4IO_POSITION,
27285 +    ELAN4IO_FREE,
27286 +    ELAN4IO_ATTACH,
27287 +    ELAN4IO_DETACH,
27288 +    ELAN4IO_BLOCK_INPUTTER,
27289 +
27290 +    ELAN4IO_ADD_P2PVP,
27291 +    ELAN4IO_ADD_BCASTVP,
27292 +    ELAN4IO_REMOVEVP,
27293 +    ELAN4IO_SET_ROUTE,
27294 +    ELAN4IO_RESET_ROUTE,
27295 +    ELAN4IO_GET_ROUTE,
27296 +    ELAN4IO_CHECK_ROUTE,
27297 +
27298 +    ELAN4IO_ALLOCCQ,
27299 +    ELAN4IO_FREECQ,
27300 +    ELAN4IO_SETPERM32,
27301 +    ELAN4IO_CLRPERM32,
27302 +    ELAN4IO_TRAPSIG,
27303 +    ELAN4IO_TRAPHANDLER32,
27304 +    ELAN4IO_REQUIRED_MAPPINGS,
27305 +       
27306 +    ELAN4IO_RESUME_EPROC_TRAP,
27307 +    ELAN4IO_RESUME_CPROC_TRAP,
27308 +    ELAN4IO_RESUME_DPROC_TRAP,
27309 +    ELAN4IO_RESUME_TPROC_TRAP,
27310 +    ELAN4IO_RESUME_IPROC_TRAP,
27311 +
27312 +    ELAN4IO_FLUSH_ICACHE,
27313 +
27314 +    ELAN4IO_STOP_CTXT,
27315 +
27316 +    ELAN4IO_ALLOC_INTCOOKIE,
27317 +    ELAN4IO_FREE_INTCOOKIE,
27318 +    ELAN4IO_ARM_INTCOOKIE,
27319 +    ELAN4IO_WAIT_INTCOOKIE,
27320 +
27321 +    ELAN4IO_ALLOC_TRAP_QUEUES,
27322 +    ELAN4IO_NETERR_MSG,
27323 +    ELAN4IO_NETERR_TIMER,
27324 +    ELAN4IO_NETERR_FIXUP,
27325 +
27326 +    ELAN4IO_DUMPCQ32,
27327 +};
27328 +
27329 +static int      elan4_ioctl32 (unsigned int fd, unsigned int cmd, 
27330 +                              unsigned long arg, struct file *file);
27331 +#endif
27332 +
27333 +/*
27334 + * Standard device entry points.
27335 + */
27336 +#if defined(CONFIG_DUMP) || defined(CONFIG_DUMP_MODULE)
27337 +
27338 +#include <linux/dump.h>
27339 +
27340 +static int
27341 +elan4_dump_event (struct notifier_block *self, unsigned long event, void *buffer)
27342 +{
27343 +    if (event == DUMP_BEGIN)
27344 +       elan4_shutdown_devices (FALSE);
27345 +
27346 +    return (NOTIFY_DONE);
27347 +}
27348 +static struct notifier_block elan4_dump_notifier = 
27349 +{
27350 +    notifier_call:     elan4_dump_event,
27351 +    priority:          0,
27352 +};
27353 +
27354 +#endif
27355 +
27356 +static int
27357 +elan4_reboot_event (struct notifier_block *self, unsigned long event, void *buffer)
27358 +{
27359 +    if ((event == SYS_RESTART || event == SYS_HALT || event == SYS_POWER_OFF))
27360 +       elan4_shutdown_devices (0);
27361 +
27362 +    return (NOTIFY_DONE);
27363 +}
27364 +
27365 +static struct notifier_block elan4_reboot_notifier = 
27366 +{
27367 +    notifier_call:     elan4_reboot_event,
27368 +    priority:          0,
27369 +};
27370 +
27371 +static int
27372 +elan4_panic_event (struct notifier_block *self, unsigned long event, void *buffer)
27373 +{
27374 +    elan4_shutdown_devices (1);
27375 +
27376 +    return (NOTIFY_DONE);
27377 +}
27378 +
27379 +static struct notifier_block elan4_panic_notifier = 
27380 +{
27381 +    notifier_call:     elan4_panic_event,
27382 +    priority:          0,
27383 +};
27384 +
27385 +static int __init
27386 +elan4_init (void)
27387 +{
27388 +    int             err;
27389 +    struct pci_dev *pdev;
27390 +    int                    count;
27391 +#if defined(__ia64)
27392 +    int             seenRevA = 0;
27393 +#endif
27394 +    
27395 +    if ((err = register_chrdev (ELAN4_MAJOR, ELAN4_NAME, &elan4_fops)) < 0)
27396 +       return (err);
27397 +
27398 +#if defined(CONFIG_DEVFS_FS)
27399 +    devfs_handle = devfs_mk_dir (NULL, "elan4", NULL);
27400 +#endif
27401 +
27402 +    intcookie_init();
27403 +    elan4_debug_init();
27404 +    elan4_procfs_init();
27405 +    
27406 +#ifdef CONFIG_MPSAS
27407 +    sas_init();
27408 +#endif
27409 +
27410 +    if (sdram_cfg_lo != 0 && sdram_cfg_hi != 0)
27411 +       sdram_cfg = (((unsigned long long) sdram_cfg_hi) << 32) | ((unsigned long long) sdram_cfg_lo);
27412 +
27413 +    for (count = 0, pdev = NULL; (pdev = pci_find_device(PCI_VENDOR_ID_QUADRICS, PCI_DEVICE_ID_ELAN4, pdev)) != NULL ; count++)
27414 +    {
27415 +#if defined(__ia64)
27416 +       unsigned char revid;
27417 +       
27418 +       pci_read_config_byte (pdev, PCI_REVISION_ID, &revid);
27419 +
27420 +       if (revid == PCI_REVISION_ID_ELAN4_REVA && seenRevA++ != 0 && pci_find_device (PCI_VENDOR_ID_HP, 0x122e, NULL))
27421 +       {
27422 +           printk ("elan: only a single elan4a supported on rx2600\n");
27423 +           continue;
27424 +       }
27425 +#endif
27426 +
27427 +       if (count < ELAN4_MAX_CONTROLLER)
27428 +           elan4_attach_device (count, pdev);
27429 +    }
27430 +
27431 +    if (count >= ELAN4_MAX_CONTROLLER)
27432 +       printk ("elan: found %d elan4 devices - only support %d\n", count, ELAN4_MAX_CONTROLLER);
27433 +
27434 +#if defined(CONFIG_PPC64) || defined(CONFIG_SPARC64) || defined(CONFIG_X86_64)
27435 +    lock_kernel();
27436 +    {
27437 +       extern int register_ioctl32_conversion(unsigned int cmd, int (*handler)(unsigned int, unsigned int, unsigned long, struct file *));
27438 +       register int i;
27439 +       for (i = 0; i < sizeof (elan4_ioctl32_cmds)/sizeof(elan4_ioctl32_cmds[0]); i++)
27440 +           register_ioctl32_conversion (elan4_ioctl32_cmds[i], elan4_ioctl32);
27441 +    }
27442 +    unlock_kernel();
27443 +#endif
27444 +
27445 +#if defined(CONFIG_DUMP) || defined(CONFIG_DUMP_MODULE)
27446 +    register_dump_notifier (&elan4_dump_notifier);
27447 +#endif
27448 +    register_reboot_notifier (&elan4_reboot_notifier);
27449 +
27450 +#if !defined(NO_PANIC_NOTIFIER)
27451 +    notifier_chain_register (&panic_notifier_list, &elan4_panic_notifier);
27452 +#endif
27453 +
27454 +    return (0);
27455 +}
27456 +
27457 +#ifdef MODULE
27458 +static void __exit
27459 +elan4_exit (void)
27460 +{
27461 +    int i;
27462 +
27463 +#if defined(CONFIG_PPC64) || defined(CONFIG_SPARC64) || defined(CONFIG_X86_64)
27464 +    lock_kernel();
27465 +    {
27466 +       extern void unregister_ioctl32_conversion(unsigned int cmd);
27467 +
27468 +       for (i = 0; i < sizeof (elan4_ioctl32_cmds)/sizeof(elan4_ioctl32_cmds[0]); i++)
27469 +           unregister_ioctl32_conversion (elan4_ioctl32_cmds[i]);
27470 +    }
27471 +    unlock_kernel();
27472 +#endif
27473 +
27474 +#if defined(CONFIG_DUMP) || defined(CONFIG_DUMP_MODULE)
27475 +    unregister_dump_notifier (&elan4_dump_notifier);
27476 +#endif
27477 +    unregister_reboot_notifier (&elan4_reboot_notifier);
27478 +
27479 +#if !defined(NO_PANIC_NOTIFIER)
27480 +    notifier_chain_unregister (&panic_notifier_list, &elan4_panic_notifier);
27481 +#endif
27482 +
27483 +    for (i = 0; i < ELAN4_MAX_CONTROLLER; i++)
27484 +       if (elan4_devices[i] != NULL)
27485 +           elan4_detach_device (elan4_devices[i]);
27486 +    
27487 +    elan4_procfs_fini();
27488 +    elan4_debug_fini();
27489 +    intcookie_fini();
27490 +
27491 +#if defined(CONFIG_DEVFS_FS)
27492 +    devfs_unregister (devfs_handle);
27493 +#endif
27494 +
27495 +    unregister_chrdev(ELAN4_MAJOR, ELAN4_NAME);
27496 +}
27497 +
27498 +module_init (elan4_init);
27499 +module_exit (elan4_exit);
27500 +
27501 +#else
27502 +__initcall (elan4_init);
27503 +#endif
27504 +
27505 +/*
27506 + * Minor numbers encoded as :
27507 + *   [5:0]     device number
27508 + *   [15:6]    function number
27509 + */
27510 +#define ELAN4_DEVICE_MASK      0x3F
27511 +#define ELAN4_DEVICE(inode)    (MINOR((inode)->i_rdev) & ELAN4_DEVICE_MASK)
27512 +
27513 +#define ELAN4_MINOR_CONTROL    0
27514 +#define ELAN4_MINOR_MEM                1
27515 +#define ELAN4_MINOR_USER       2
27516 +
27517 +#define ELAN4_MINOR_SHIFT      6
27518 +#define ELAN4_MINOR(inode)     (MINOR((inode)->i_rdev) >> ELAN4_MINOR_SHIFT)
27519 +
27520 +/*
27521 + * Called by init_module() for each card discovered on PCI.
27522 + */
27523 +static int
27524 +elan4_attach_device (int instance, struct pci_dev *pdev)
27525 +{
27526 +    ELAN4_DEV *dev;
27527 +    int res;
27528 +
27529 +    if ((dev = (ELAN4_DEV *) kmalloc (sizeof (ELAN4_DEV), GFP_KERNEL)) == NULL)
27530 +       return (-ENOMEM);
27531 +    memset (dev, 0, sizeof (ELAN4_DEV));
27532 +
27533 +    /* setup os dependent section of ELAN4_DEV */
27534 +    dev->dev_instance   = instance;
27535 +    dev->dev_osdep.pdev = pdev;
27536 +    dev->dev_features   = default_features;
27537 +
27538 +    /* initialise the devinfo */
27539 +    pci_read_config_word (dev->dev_osdep.pdev, PCI_VENDOR_ID,   &dev->dev_devinfo.dev_vendor_id);
27540 +    pci_read_config_word (dev->dev_osdep.pdev, PCI_DEVICE_ID,   &dev->dev_devinfo.dev_device_id);
27541 +    pci_read_config_byte (dev->dev_osdep.pdev, PCI_REVISION_ID, &dev->dev_devinfo.dev_revision_id);
27542 +
27543 +    dev->dev_devinfo.dev_rail                 = instance;
27544 +    dev->dev_devinfo.dev_driver_version       = ELAN4_DRIVER_VERSION;
27545 +    dev->dev_devinfo.dev_num_down_links_value = 0;
27546 +
27547 +    dev->dev_position.pos_mode = ELAN_POS_UNKNOWN;
27548 +
27549 +    /* initialise the data structures and map the device */
27550 +    if ((res = elan4_initialise_device (dev)) != 0)
27551 +    {
27552 +       kfree (dev);
27553 +       return res;
27554 +    }
27555 +
27556 +    /* add the interrupt handler */
27557 +    if (request_irq (pdev->irq, elan4_irq, SA_SHIRQ, "elan4", dev) != 0)
27558 +    {
27559 +       elan4_finalise_device (dev);
27560 +       kfree (dev);
27561 +       return -ENXIO;
27562 +    }
27563 +
27564 +    if (pci_request_regions(dev->dev_osdep.pdev, "elan4"))
27565 +    {
27566 +       free_irq (dev->dev_osdep.pdev->irq, dev);
27567 +       kfree (dev);
27568 +       return -ENODEV;
27569 +    }
27570 +
27571 +#if defined(CONFIG_DEVFS_FS)
27572 +    {
27573 +       char name[16];
27574 +       
27575 +       sprintf (name, "control%d", dev->dev_instance);
27576 +       dev->dev_osdep.devfs_control = devfs_register(devfs_handle, name, DEVFS_FL_NONE, ELAN4_MAJOR,
27577 +                                                     dev->dev_instance | (ELAN4_MINOR_CONTROL << ELAN4_MINOR_SHIFT), S_IFCHR | S_IRUSR | S_IWUSR, 
27578 +                                                     &elan4_fops, NULL);
27579 +       sprintf (name, "sdram%d", dev->dev_instance);
27580 +       dev->dev_osdep.devfs_sdram =  devfs_register(devfs_handle, name, DEVFS_FL_NONE, ELAN4_MAJOR,
27581 +                                                    dev->dev_instance | (ELAN4_MINOR_MEM << ELAN4_MINOR_SHIFT), S_IFCHR | S_IRUSR|S_IWUSR | S_IRGRP|S_IWGRP | S_IROTH|S_IWOTH,
27582 +                                                    &elan4_fops, NULL);
27583 +       sprintf (name, "user%d", dev->dev_instance);
27584 +       dev->dev_osdep.devfs_user =  devfs_register(devfs_handle, name, DEVFS_FL_NONE, ELAN4_MAJOR,
27585 +                                                   dev->dev_instance | (ELAN4_MINOR_USER << ELAN4_MINOR_SHIFT), S_IFCHR | S_IRUSR|S_IWUSR | S_IRGRP|S_IWGRP | S_IROTH|S_IWOTH,
27586 +                                                   &elan4_fops, NULL);
27587 +    }
27588 +#endif
27589 +
27590 +    /* add the procfs entry */
27591 +    elan4_procfs_device_init (dev);
27592 +
27593 +    /* allow the device to be referenced now */
27594 +    elan4_devices[instance] = dev;
27595 +
27596 +    if ((disabled & (1 << instance)) == 0)
27597 +    {
27598 +       if (elan4_start_device (dev) != 0)
27599 +       {
27600 +           printk ("elan%d: auto-start of device failed\n", dev->dev_instance);
27601 +
27602 +           elan4_detach_device (dev);
27603 +           return (-ENXIO);
27604 +       }
27605 +       
27606 +       dev->dev_state = ELAN4_STATE_STARTED;
27607 +    }
27608 +
27609 +#if defined (__sparc)
27610 +    printk ("elan%d: at pci %s (irq = %s)\n", instance, pdev->slot_name, __irq_itoa(pdev->irq));
27611 +#else
27612 +    printk ("elan%d: at pci %s (irq = %d)\n", instance, pdev->slot_name, pdev->irq);
27613 +#endif
27614 +
27615 +    return (0);
27616 +}
27617 +
27618 +/*
27619 + * Called by cleanup_module() for each board found on PCI.
27620 + */
27621 +static void
27622 +elan4_detach_device (ELAN4_DEV *dev)
27623 +{
27624 +    /* stop the chip and free of resources */
27625 +    if (dev->dev_state == ELAN4_STATE_STARTED)
27626 +       elan4_stop_device (dev);
27627 +    
27628 +    elan4_devices[dev->dev_instance] = NULL;
27629 +
27630 +#if defined(CONFIG_DEVFS_FS)
27631 +    devfs_unregister (dev->dev_osdep.devfs_control);
27632 +    devfs_unregister (dev->dev_osdep.devfs_sdram);
27633 +    devfs_unregister (dev->dev_osdep.devfs_user);
27634 +#endif
27635 +
27636 +    /* release the address space */
27637 +    pci_release_regions (dev->dev_osdep.pdev);
27638 +
27639 +    /* release the interrupt */
27640 +    free_irq (dev->dev_osdep.pdev->irq, dev);
27641 +
27642 +    /* remove the procfs entry */
27643 +    elan4_procfs_device_fini (dev);
27644 +
27645 +    /* unmap the device and finalise the data structures */
27646 +    elan4_finalise_device (dev);
27647 +    
27648 +    kfree (dev);
27649 +}
27650 +
27651 +/*
27652 + * Maintain reference counts on the device
27653 + */
27654 +ELAN4_DEV *
27655 +elan4_reference_device (int instance, int state)
27656 +{
27657 +    ELAN4_DEV *dev = elan4_devices[instance];
27658 +
27659 +    if (dev == NULL)
27660 +       return (NULL);
27661 +
27662 +    kmutex_lock (&dev->dev_lock);
27663 +
27664 +    if ((dev->dev_state & state) == 0)
27665 +    {
27666 +       kmutex_unlock (&dev->dev_lock);
27667 +       return (NULL);
27668 +    }
27669 +
27670 +    dev->dev_references++;
27671 +    kmutex_unlock (&dev->dev_lock);
27672 +
27673 +#ifdef MODULE
27674 +    MOD_INC_USE_COUNT;
27675 +#endif
27676 +
27677 +#ifdef CONFIG_MPSAS
27678 +    sas_set_position(dev);
27679 +#endif
27680 +
27681 +    return (dev);
27682 +}
27683 +
27684 +void
27685 +elan4_dereference_device (ELAN4_DEV *dev)
27686 +{
27687 +    kmutex_lock (&dev->dev_lock);
27688 +    dev->dev_references--;
27689 +    kmutex_unlock (&dev->dev_lock);
27690 +
27691 +#ifdef MODULE
27692 +    MOD_DEC_USE_COUNT;
27693 +#endif
27694 +}
27695 +
27696 +static void
27697 +elan4_shutdown_devices(int panicing)
27698 +{
27699 +    ELAN4_DEV *dev;
27700 +    unsigned long flags;
27701 +    register int i;
27702 +
27703 +    local_irq_save (flags);
27704 +    for (i = 0; i < ELAN4_MAX_CONTROLLER; i++)
27705 +    {
27706 +       if ((dev = elan4_devices[i]) != NULL)
27707 +       {
27708 +           printk(KERN_INFO "elan%d: forcing link into reset\n", dev->dev_instance);
27709 +
27710 +           /* set the inputters to discard everything */
27711 +           if (! panicing) spin_lock (&dev->dev_haltop_lock);
27712 +
27713 +           if (dev->dev_discard_lowpri_count++ == 0)
27714 +               elan4_set_schedstatus (dev, 0);
27715 +           if (dev->dev_discard_highpri_count++ == 0)
27716 +               elan4_set_schedstatus (dev, 0);
27717 +
27718 +           if (! panicing) spin_unlock (&dev->dev_haltop_lock);
27719 +
27720 +           /* ideally we'd like to halt all the outputters too,
27721 +            * however this will prevent the kernel comms flushing
27722 +            * to work correctly .....
27723 +            */
27724 +       }
27725 +    }
27726 +    local_irq_restore (flags);
27727 +}
27728 +
27729 +/*
27730 + * /dev/elan4/controlX - control device
27731 + *
27732 + */
27733 +static int
27734 +control_open (struct inode *inode, struct file *file)
27735 +{
27736 +    ELAN4_DEV       *dev = elan4_reference_device (ELAN4_DEVICE(inode), ELAN4_STATE_STOPPED | ELAN4_STATE_STARTED);
27737 +    CONTROL_PRIVATE *pr;
27738 +    
27739 +    if (dev == NULL)
27740 +       return (-ENXIO);
27741 +    
27742 +    if ((pr = (CONTROL_PRIVATE *) kmalloc (sizeof (CONTROL_PRIVATE), GFP_KERNEL)) == NULL)
27743 +    {
27744 +       elan4_dereference_device (dev);
27745 +       
27746 +       return (-ENOMEM);
27747 +    }
27748 +
27749 +    PRINTF (DBG_USER, DBG_FILE, "control_open: dev=%p pr=%p\n", dev, pr);
27750 +
27751 +    pr->pr_dev           = dev;
27752 +    pr->pr_boundary_scan = 0;
27753 +
27754 +    file->private_data = (void *) pr;
27755 +
27756 +    return (0);
27757 +}
27758 +
27759 +static int
27760 +control_release (struct inode *inode, struct file *file)
27761 +{
27762 +    CONTROL_PRIVATE *pr  = (CONTROL_PRIVATE *) file->private_data;
27763 +    ELAN4_DEV       *dev = pr->pr_dev;
27764 +
27765 +    PRINTF (DBG_DEVICE, DBG_FILE, "control_release: pr=%p\n", pr);
27766 +
27767 +    //if (pr->pr_boundary_scan)
27768 +    //    elan4_clear_boundary_scan (dev, pr);
27769 +
27770 +    elan4_dereference_device (dev);
27771 +
27772 +    kfree (pr);
27773 +
27774 +    return (0);
27775 +}
27776 +
27777 +static int
27778 +control_ioctl (struct inode *inode, struct file *file, 
27779 +                    unsigned int cmd, unsigned long arg)
27780 +{
27781 +    CONTROL_PRIVATE *pr  = (CONTROL_PRIVATE *) file->private_data;
27782 +
27783 +    PRINTF (DBG_DEVICE, DBG_FILE, "control_ioctl: cmd=%x arg=%lx\n", cmd, arg);
27784 +
27785 +    switch (cmd)
27786 +    {
27787 +    case ELAN4IO_DEVINFO:
27788 +       if (copy_to_user ((void *) arg, &pr->pr_dev->dev_devinfo, sizeof (ELAN_DEVINFO)))
27789 +           return (-EFAULT);
27790 +       return (0);
27791 +
27792 +    case ELAN4IO_GET_POSITION:
27793 +    {
27794 +       ELAN_POSITION pos;
27795 +
27796 +       elan4_get_position (pr->pr_dev, &pos);
27797 +
27798 +       if (copy_to_user ((void *) arg, &pos, sizeof (ELAN_POSITION)))
27799 +           return (-EFAULT);
27800 +
27801 +       return (0);
27802 +    }
27803 +
27804 +    case ELAN4IO_SET_POSITION:
27805 +    {
27806 +       ELAN_POSITION pos;
27807 +
27808 +       if (copy_from_user (&pos, (void *) arg, sizeof (ELAN_POSITION)))
27809 +           return (-EFAULT);
27810 +       
27811 +       return (elan4_set_position (pr->pr_dev, &pos));
27812 +    }
27813 +
27814 +    case ELAN4IO_OLD_GET_PARAMS:
27815 +    {
27816 +       ELAN_PARAMS params;
27817 +       unsigned short mask;
27818 +
27819 +       elan4_get_params (pr->pr_dev, &params, &mask);
27820 +
27821 +       if (copy_to_user ((void *) arg, &params, sizeof (ELAN_PARAMS)))
27822 +           return (-EFAULT);
27823 +
27824 +       return (0);
27825 +    }
27826 +
27827 +    case ELAN4IO_OLD_SET_PARAMS:
27828 +    {
27829 +       ELAN_PARAMS params;
27830 +
27831 +       if (copy_from_user (&params, (void *) arg, sizeof (ELAN_PARAMS)))
27832 +           return (-EFAULT);
27833 +       
27834 +       elan4_set_params (pr->pr_dev, &params, 3);
27835 +       
27836 +       return (0);
27837 +    }
27838 +
27839 +    case ELAN4IO_SET_PARAMS:
27840 +    {
27841 +       ELAN4IO_PARAMS_STRUCT args;
27842 +
27843 +       if (copy_from_user (&args, (void *) arg, sizeof (ELAN4IO_PARAMS_STRUCT)))
27844 +           return (-EFAULT);
27845 +       
27846 +       elan4_set_params (pr->pr_dev, &args.p_params, args.p_mask);
27847 +       
27848 +       return (0);
27849 +    }
27850 +
27851 +    case ELAN4IO_GET_PARAMS:
27852 +    {
27853 +       ELAN4IO_PARAMS_STRUCT args;
27854 +
27855 +       elan4_get_params (pr->pr_dev, &args.p_params, &args.p_mask);
27856 +
27857 +       if (copy_to_user ((void *) arg, &args, sizeof (ELAN_PARAMS)))
27858 +           return (-EFAULT);
27859 +
27860 +       return (0);
27861 +    }
27862 +    }
27863 +
27864 +    return (-EINVAL);
27865 +}
27866 +
27867 +static int
27868 +control_mmap (struct file *file, struct vm_area_struct *vma)
27869 +{
27870 +    CONTROL_PRIVATE *pr  = (CONTROL_PRIVATE *) file->private_data;
27871 +    unsigned        bar = OFF_TO_BAR (vma->vm_pgoff << PAGE_SHIFT);
27872 +    unsigned long    off = OFF_TO_OFFSET (vma->vm_pgoff << PAGE_SHIFT);
27873 +    long            len = vma->vm_end - vma->vm_start;
27874 +
27875 +    PRINTF (DBG_USER, DBG_FILE, "control_mmap: pr=%p bar=%x off=%x\n", pr, bar, off);
27876 +
27877 +    /* check bar number and translate the standard psuedo bars */
27878 +    switch (bar)
27879 +    {
27880 +    case ELAN4_BAR_SDRAM:
27881 +    case ELAN4_BAR_REGISTERS:
27882 +       break;
27883 +
27884 +    default:
27885 +       return (-EINVAL);
27886 +    }
27887 +
27888 +    if (off < 0 || (off + len) > pci_resource_len (pr->pr_dev->dev_osdep.pdev, bar))
27889 +       return (-EINVAL);
27890 +
27891 +    vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
27892 +
27893 +    if (__io_remap_page_range (vma->vm_start, pci_resource_start (pr->pr_dev->dev_osdep.pdev, bar) + off, len, vma->vm_page_prot))
27894 +       return (-EAGAIN);
27895 +
27896 +    return (0);
27897 +}
27898 +
27899 +/*
27900 + * /dev/elan4/sdramX - sdram access device
27901 + */
27902 +static void 
27903 +mem_freepage (MEM_PRIVATE *pr, MEM_PAGE *pg)
27904 +{
27905 +    PRINTF (DBG_USER, DBG_MEM, "mem_freepage: pr=%p pgoff=%lx pg=%p ref=%d\n", pr, pg->pg_pgoff, pg, pg->pg_ref);
27906 +
27907 +    elan4_sdram_free (pr->pr_dev, pg->pg_addr, SDRAM_PAGE_SIZE);
27908 +    kfree (pg);
27909 +}
27910 +
27911 +static MEM_PAGE *
27912 +mem_getpage (MEM_PRIVATE *pr, unsigned long pgoff)
27913 +{
27914 +    int       hashval = MEM_HASH (pgoff);
27915 +    MEM_PAGE *npg = NULL;
27916 +    MEM_PAGE *pg;
27917 +
27918 +    ASSERT ((pgoff & SDRAM_PGOFF_OFFSET) == 0);
27919 +
27920 +    PRINTF (DBG_USER, DBG_MEM, "mem_getpage: pr=%p pgoff=%lx\n", pr, pgoff);
27921 +    
27922 + again:
27923 +    spin_lock (&pr->pr_lock);
27924 +    for (pg = pr->pr_pages[hashval]; pg; pg = pg->pg_next)
27925 +       if (pg->pg_pgoff == pgoff)
27926 +           break;
27927 +    
27928 +    if (pg != NULL)
27929 +    {
27930 +       PRINTF (DBG_USER, DBG_MEM, "mem_getpage: pr=%p pgoff=%lx -> found %p addr=%x\n", pr, pgoff, pg, pg->pg_addr);
27931 +
27932 +       pg->pg_ref++;
27933 +       spin_unlock (&pr->pr_lock);
27934 +
27935 +       if (npg != NULL)                                        /* we'd raced and someone else had created */
27936 +           mem_freepage (pr, npg);                             /* this page - so free of our new one*/
27937 +       return (pg);
27938 +    }
27939 +    
27940 +    if (npg != NULL)                                           /* didn't find the page, so inset the */
27941 +    {                                                          /* new one we've just created */
27942 +       npg->pg_next = pr->pr_pages[hashval];
27943 +       pr->pr_pages[hashval] = npg;
27944 +       
27945 +       spin_unlock (&pr->pr_lock);
27946 +       return (npg);
27947 +    }
27948 +    
27949 +    spin_unlock (&pr->pr_lock);                                        /* drop spinlock before creating a new page */
27950 +    
27951 +    if ((npg = (MEM_PAGE *) kmalloc (sizeof (MEM_PAGE), GFP_KERNEL)) == NULL)
27952 +       return (NULL);
27953 +    
27954 +    if ((npg->pg_addr = elan4_sdram_alloc (pr->pr_dev, SDRAM_PAGE_SIZE)) == 0)
27955 +    {
27956 +       kfree (npg);
27957 +       return (NULL);
27958 +    }
27959 +
27960 +#ifndef CONFIG_MPSAS
27961 +    /* zero the page before returning it to the user */
27962 +    elan4_sdram_zeroq_sdram (pr->pr_dev, npg->pg_addr, SDRAM_PAGE_SIZE);
27963 +#endif
27964 +    
27965 +    npg->pg_pgoff = pgoff;
27966 +    npg->pg_ref   = 1;
27967 +    
27968 +    /* created a new page - so have to rescan before inserting it */
27969 +    goto again;
27970 +}
27971 +
27972 +static void
27973 +mem_droppage (MEM_PRIVATE *pr, unsigned long pgoff, int dontfree)
27974 +{
27975 +    MEM_PAGE **ppg;
27976 +    MEM_PAGE  *pg;
27977 +
27978 +    spin_lock (&pr->pr_lock);
27979 +    for (ppg = &pr->pr_pages[MEM_HASH(pgoff)]; *ppg; ppg = &(*ppg)->pg_next)
27980 +       if ((*ppg)->pg_pgoff == pgoff)
27981 +           break;
27982 +
27983 +    pg = *ppg;
27984 +
27985 +    ASSERT (*ppg != NULL);
27986 +
27987 +    PRINTF (DBG_USER, DBG_MEM, "mem_droppage: pr=%p pgoff=%lx pg=%p ref=%d dontfree=%d\n", pr, pgoff, (*ppg), (*ppg)->pg_ref, dontfree);
27988 +
27989 +    if (--pg->pg_ref == 0 && !dontfree)
27990 +    {
27991 +       *ppg = pg->pg_next;
27992 +
27993 +       mem_freepage (pr, pg);
27994 +    }
27995 +
27996 +    spin_unlock (&pr->pr_lock);
27997 +}
27998 +
27999 +static int
28000 +mem_open (struct inode *inode, struct file *file)
28001 +{
28002 +    ELAN4_DEV   *dev = elan4_reference_device (ELAN4_DEVICE(inode), ELAN4_STATE_STARTED);
28003 +    MEM_PRIVATE *pr;
28004 +    register int i;
28005 +
28006 +    if (dev == NULL)
28007 +       return (-ENXIO);
28008 +
28009 +    if ((pr = (MEM_PRIVATE *) kmalloc (sizeof (MEM_PRIVATE), GFP_KERNEL)) == NULL)
28010 +    {
28011 +       elan4_dereference_device (dev);
28012 +       return (-ENOMEM);
28013 +    }
28014 +
28015 +    spin_lock_init (&pr->pr_lock);
28016 +    pr->pr_dev = dev;
28017 +    for (i = 0; i < MEM_HASH_SIZE; i++)
28018 +       pr->pr_pages[i] = NULL;
28019 +
28020 +    file->private_data = (void *) pr;
28021 +    
28022 +    return (0);
28023 +}
28024 +
28025 +static int
28026 +mem_release (struct inode *node, struct file *file)
28027 +{
28028 +    MEM_PRIVATE *pr = (MEM_PRIVATE *) file->private_data;
28029 +    MEM_PAGE    *pg, *next;
28030 +    int          i;
28031 +
28032 +    /* free off any pages that we'd allocated */
28033 +    spin_lock (&pr->pr_lock);
28034 +    for (i = 0; i < MEM_HASH_SIZE; i++)
28035 +    {
28036 +       for (pg = pr->pr_pages[i]; pg; pg = next)
28037 +       {
28038 +           next = pg->pg_next;
28039 +           mem_freepage (pr, pg);
28040 +       }
28041 +    }
28042 +    spin_unlock (&pr->pr_lock);
28043 +
28044 +    elan4_dereference_device (pr->pr_dev);
28045 +    kfree (pr);
28046 +
28047 +    return (0);
28048 +}
28049 +
28050 +static int
28051 +mem_ioctl (struct inode *inode, struct file *file, 
28052 +                 unsigned int cmd, unsigned long arg)
28053 +{
28054 +    return (-EINVAL);
28055 +}
28056 +
28057 +static void 
28058 +mem_vma_open (struct vm_area_struct *vma)
28059 +{
28060 +    MEM_PRIVATE   *pr = (MEM_PRIVATE *) vma->vm_private_data;
28061 +    unsigned long addr;
28062 +    unsigned long pgoff;
28063 +
28064 +    PRINTF (DBG_USER, DBG_MEM, "mem_vma_open: vm_mm=%p start=%lx end=%lx pgoff=%lx file=%p\n",
28065 +           vma->vm_mm, vma->vm_start, vma->vm_end, vma->vm_pgoff, vma->vm_file);
28066 +    
28067 +    for (addr = vma->vm_start, pgoff = vma->vm_pgoff; addr < vma->vm_end; addr += PAGE_SIZE, pgoff++)
28068 +       mem_getpage (pr, pgoff & SDRAM_PGOFF_MASK);
28069 +}
28070 +
28071 +static void
28072 +mem_vma_close (struct vm_area_struct *vma)
28073 +{
28074 +    MEM_PRIVATE  *pr  = (MEM_PRIVATE *) vma->vm_private_data;
28075 +    unsigned long addr;
28076 +    unsigned long pgoff;
28077 +
28078 +    PRINTF (DBG_USER, DBG_MEM, "mem_vma_close: vm_mm=%p start=%lx end=%lx pgoff=%lx file=%p\n",
28079 +           vma->vm_mm, vma->vm_start, vma->vm_end, vma->vm_pgoff, vma->vm_file);
28080 +
28081 +    /* NOTE: the call to close may not have the same vm_start/vm_end values as 
28082 +     *       were passed into mmap()/open() - since if an partial unmap had occurred
28083 +     *       then the vma could have been shrunk or even split.
28084 +     *
28085 +     *       if a the vma is split then an vma_open() will be called for the top
28086 +     *       portion - thus causing the reference counts to become incorrect.
28087 +     *
28088 +     * We drop the reference to any pages we're notified about - so they get freed
28089 +     * earlier than when the device is finally released.
28090 +     */
28091 +    for (pgoff = vma->vm_pgoff, addr = vma->vm_start; addr < vma->vm_end; addr += PAGE_SIZE, pgoff++)
28092 +       mem_droppage (pr, pgoff & SDRAM_PGOFF_MASK, 0);
28093 +}
28094 +
28095 +struct vm_operations_struct mem_vm_ops = {
28096 +    open:              mem_vma_open,
28097 +    close:             mem_vma_close,
28098 +};
28099 +
28100 +static int
28101 +mem_mmap (struct file *file, struct vm_area_struct *vma)
28102 +{
28103 +    MEM_PRIVATE  *pr = (MEM_PRIVATE *) file->private_data;
28104 +    MEM_PAGE     *pg;
28105 +    unsigned long addr;
28106 +    unsigned long pgoff;
28107 +
28108 +    PRINTF (DBG_USER, DBG_MEM, "mem_mmap: vm_mm=%p start=%lx end=%lx pgoff=%lx file=%p\n",
28109 +           vma->vm_mm, vma->vm_start, vma->vm_end, vma->vm_pgoff, file);
28110 +
28111 +    for (addr = vma->vm_start, pgoff = vma->vm_pgoff; addr < vma->vm_end; addr += PAGE_SIZE, pgoff++)
28112 +    {
28113 +       if ((pg = mem_getpage (pr, pgoff & SDRAM_PGOFF_MASK)) == NULL)
28114 +           goto failed;
28115 +
28116 +       PRINTF (DBG_USER, DBG_MEM, "mem_mmap: addr %lx -> pg=%p sdram=%x+%x bar=%lx\n",
28117 +               addr, pg, pg->pg_addr, (pgoff & SDRAM_PGOFF_OFFSET) * PAGE_SIZE,
28118 +               pci_resource_start (pr->pr_dev->dev_osdep.pdev, ELAN4_BAR_SDRAM));
28119 +
28120 +       vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
28121 +
28122 +       if (! (pr->pr_dev->dev_features & ELAN4_FEATURE_NO_WRITE_COMBINE))
28123 +           vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
28124 +
28125 +       if (__io_remap_page_range (addr, pci_resource_start (pr->pr_dev->dev_osdep.pdev, ELAN4_BAR_SDRAM) +
28126 +                                pg->pg_addr + (pgoff & SDRAM_PGOFF_OFFSET) * PAGE_SIZE, PAGE_SIZE, vma->vm_page_prot))
28127 +       {
28128 +           mem_droppage (pr, pgoff & SDRAM_PGOFF_MASK, 0); /* drop our reference to this page */
28129 +           goto failed;
28130 +       }
28131 +
28132 +#if defined(conditional_schedule)
28133 +       conditional_schedule();
28134 +#endif
28135 +    }
28136 +
28137 +    /* Don't allow these pages to be swapped out of dumped */
28138 +    vma->vm_flags |= (VM_RESERVED | VM_IO);
28139 +
28140 +    vma->vm_ops          = &mem_vm_ops;
28141 +    vma->vm_file         = file;
28142 +    vma->vm_private_data = (void *) pr;
28143 +
28144 +    return (0);
28145 +
28146 + failed:
28147 +    /* free of any pages we've already allocated/referenced */
28148 +    while (pgoff-- > vma->vm_pgoff)
28149 +       mem_droppage (pr, pgoff & SDRAM_PGOFF_MASK, 0);
28150 +
28151 +    return (-ENOMEM);
28152 +}
28153 +
28154 +/*
28155 + * /dev/elan4/userX - control device
28156 + *
28157 + */
28158 +static inline void
28159 +user_private_free (USER_PRIVATE *pr)
28160 +{
28161 +    ELAN4_DEV *dev = pr->pr_uctx->uctx_ctxt.ctxt_dev;
28162 +
28163 +    ASSERT (atomic_read (&pr->pr_ref) == 0);
28164 +
28165 +    user_free (pr->pr_uctx);
28166 +    kfree (pr);
28167 +
28168 +    elan4_dereference_device (dev);
28169 +}
28170 +
28171 +static void
28172 +user_coproc_release (void *arg, struct mm_struct *mm)
28173 +{
28174 +    USER_PRIVATE *pr = (USER_PRIVATE *) arg;
28175 +
28176 +    PRINTF (pr->pr_uctx, DBG_IOPROC, "user_coproc_release: ref=%d\n", atomic_read (&pr->pr_ref));
28177 +
28178 +    elan4mmu_invalidate_ctxt (&pr->pr_uctx->uctx_ctxt);
28179 +
28180 +    pr->pr_mm = NULL;
28181 +
28182 +    if (atomic_dec_and_test (&pr->pr_ref))
28183 +       user_private_free (pr);
28184 +}
28185 +
28186 +static void
28187 +user_coproc_sync_range (void *arg, struct mm_struct *mm, unsigned long start, unsigned long end)
28188 +{
28189 +    USER_PRIVATE *pr = (USER_PRIVATE *) arg;
28190 +
28191 +    PRINTF (pr->pr_uctx, DBG_IOPROC, "user_coproc_sync_range: start=%lx end=%lx\n", start, end);
28192 +
28193 +    /* XXXX: this is intended to sync the modified bit from our page tables,
28194 +     *       into the main cpu's modified bits - however since we do not
28195 +     *       syncronize our modified bit on a coproc_invalidate_page() call,
28196 +     *       then it could get lost if we modify the page after the last
28197 +     *       modification and writepage has occurred. Hence we invalidate
28198 +     *       all translations and allow it to refault.
28199 +     */
28200 +
28201 +    user_unload_main (pr->pr_uctx, start, end - start);
28202 +}
28203 +
28204 +static void
28205 +user_coproc_invalidate_range (void *arg, struct mm_struct *mm, unsigned long start, unsigned long end)
28206 +{
28207 +    USER_PRIVATE *pr = (USER_PRIVATE *) arg;
28208 +
28209 +    PRINTF (pr->pr_uctx, DBG_IOPROC, "user_coproc_invalidate_range: start=%lx end=%lx\n", start, end);
28210 +
28211 +    user_unload_main (pr->pr_uctx, start, end - start);
28212 +}
28213 +
28214 +static void
28215 +user_coproc_update_range (void *arg, struct mm_struct *mm, unsigned long start, unsigned long end)
28216 +{
28217 +    USER_PRIVATE *pr = (USER_PRIVATE *) arg;
28218 +
28219 +    PRINTF (pr->pr_uctx, DBG_IOPROC, "user_coproc_update_range: start=%lx end=%lx\n", start, end);
28220 +
28221 +#if defined(CONFIG_MPSAS)
28222 +    if (((end - start) >> PAGE_SHIFT) > 16)
28223 +       return;
28224 +#endif
28225 +
28226 +    user_update_main (pr->pr_uctx, mm, start, end - start);
28227 +}
28228 +
28229 +static void
28230 +user_coproc_change_protection (void *arg, struct mm_struct *mm, unsigned long start, unsigned long end, pgprot_t newprot)
28231 +{
28232 +    USER_PRIVATE *pr = (USER_PRIVATE *) arg;
28233 +
28234 +    PRINTF (pr->pr_uctx, DBG_IOPROC, "user_coproc_change_protection: start=%lx end=%lx\n", start, end);
28235 +
28236 +    user_unload_main (pr->pr_uctx, start, end - start);
28237 +}
28238 +
28239 +static void
28240 +user_coproc_sync_page (void *arg, struct vm_area_struct *vma, unsigned long addr)
28241 +{
28242 +    USER_PRIVATE *pr = (USER_PRIVATE *) arg;
28243 +
28244 +    PRINTF (pr->pr_uctx, DBG_IOPROC, "user_coproc_sync_page: addr=%lx\n", addr);
28245 +
28246 +    user_unload_main (pr->pr_uctx, addr & PAGE_MASK, PAGE_SIZE);
28247 +}
28248 +
28249 +static void
28250 +user_coproc_invalidate_page (void *arg, struct vm_area_struct *vma, unsigned long addr)
28251 +{
28252 +    USER_PRIVATE *pr = (USER_PRIVATE *) arg;
28253 +
28254 +    PRINTF (pr->pr_uctx, DBG_IOPROC, "user_coproc_invalidate_page: addr=%lx\n", addr);
28255 +
28256 +    user_unload_main (pr->pr_uctx, addr & PAGE_MASK, PAGE_SIZE);
28257 +}
28258 +
28259 +static void
28260 +user_coproc_update_page (void *arg, struct vm_area_struct *vma, unsigned long addr)
28261 +{
28262 +    USER_PRIVATE *pr = (USER_PRIVATE *) arg;
28263 +
28264 +    PRINTF (pr->pr_uctx, DBG_IOPROC, "user_coproc_update_page: addr=%lx\n", addr);
28265 +
28266 +    user_update_main (pr->pr_uctx, vma->vm_mm, addr & PAGE_MASK, PAGE_SIZE);
28267 +}
28268 +
28269 +static int
28270 +user_open (struct inode *inode, struct file *file)
28271 +{
28272 +    ELAN4_DEV    *dev;
28273 +    USER_PRIVATE *pr;
28274 +    USER_CTXT    *uctx;
28275 +    
28276 +    PRINTF (DBG_USER, DBG_FILE, "user_open: mm=%p users=%d count=%d\n", current->mm,
28277 +           atomic_read (&current->mm->mm_users), atomic_read (&current->mm->mm_count));
28278 +
28279 +    if ((dev = elan4_reference_device (ELAN4_DEVICE(inode), ELAN4_STATE_STARTED)) == NULL)
28280 +       return (-ENXIO);
28281 +    
28282 +    if ((pr = (USER_PRIVATE *) kmalloc (sizeof (USER_PRIVATE), GFP_KERNEL)) == NULL)
28283 +    {
28284 +       elan4_dereference_device (dev);
28285 +       return (-ENOMEM);
28286 +    }
28287 +
28288 +    uctx = user_alloc (dev);
28289 +
28290 +    if (IS_ERR(uctx))
28291 +    {
28292 +       elan4_dereference_device (dev);
28293 +       kfree (pr);
28294 +
28295 +       return PTR_ERR(uctx);
28296 +    }
28297 +
28298 +    /* initialise refcnt to 2 - one for "file", one for the coproc ops */
28299 +    atomic_set (&pr->pr_ref, 2);
28300 +
28301 +    pr->pr_uctx = uctx;
28302 +    pr->pr_mm   = current->mm;
28303 +
28304 +    /* register a coproc callback to notify us of translation changes */
28305 +    pr->pr_coproc.arg               = (void *) pr;
28306 +    pr->pr_coproc.release           = user_coproc_release;
28307 +    pr->pr_coproc.sync_range        = user_coproc_sync_range;
28308 +    pr->pr_coproc.invalidate_range  = user_coproc_invalidate_range;
28309 +    pr->pr_coproc.update_range      = user_coproc_update_range;
28310 +    pr->pr_coproc.change_protection = user_coproc_change_protection;
28311 +    pr->pr_coproc.sync_page         = user_coproc_sync_page;
28312 +    pr->pr_coproc.invalidate_page   = user_coproc_invalidate_page;
28313 +    pr->pr_coproc.update_page       = user_coproc_update_page;
28314 +    
28315 +    spin_lock (&current->mm->page_table_lock);
28316 +    register_coproc_ops (current->mm, &pr->pr_coproc);
28317 +    spin_unlock (&current->mm->page_table_lock);
28318 +
28319 +    file->private_data = (void *) pr;
28320 +
28321 +    return (0);
28322 +}
28323 +
28324 +static int
28325 +user_release (struct inode *inode, struct file *file)
28326 +{
28327 +    USER_PRIVATE *pr = (USER_PRIVATE *) file->private_data;
28328 +
28329 +    PRINTF (pr->pr_uctx, DBG_FILE, "user_release: ref=%d\n", atomic_read (&pr->pr_ref));
28330 +
28331 +    if (atomic_dec_and_test (&pr->pr_ref))
28332 +       user_private_free (pr);
28333 +
28334 +    return (0);
28335 +}
28336 +
28337 +static int
28338 +user_ioctl (struct inode *inode, struct file *file, 
28339 +           unsigned int cmd, unsigned long arg)
28340 +{
28341 +    USER_PRIVATE *pr   = (USER_PRIVATE *) file->private_data;
28342 +    USER_CTXT    *uctx = pr->pr_uctx;
28343 +    int           res  = 0;
28344 +
28345 +    PRINTF (uctx, DBG_FILE, "user_ioctl: cmd=%x arg=%lx\n", cmd, arg);
28346 +
28347 +    if (current->mm != pr->pr_mm)
28348 +       return (-EINVAL);
28349 +    
28350 +    switch (cmd)
28351 +    {
28352 +    case ELAN4IO_DEVINFO:
28353 +       if (copy_to_user ((void *) arg, &uctx->uctx_ctxt.ctxt_dev->dev_devinfo, sizeof (ELAN_DEVINFO)))
28354 +           return (-EFAULT);
28355 +       return (0);
28356 +
28357 +    case ELAN4IO_POSITION:
28358 +    {
28359 +       ELAN4_DEV *dev = uctx->uctx_ctxt.ctxt_dev;
28360 +
28361 +       if (copy_to_user ((void *) arg, &dev->dev_position, sizeof (ELAN_POSITION)))
28362 +           return (-EFAULT);
28363 +       return (0);
28364 +    }
28365 +
28366 +    case ELAN4IO_FREE:
28367 +       spin_lock (&current->mm->page_table_lock);
28368 +       if (pr->pr_mm != current->mm)
28369 +           spin_unlock (&current->mm->page_table_lock);
28370 +       else
28371 +       {
28372 +           unregister_coproc_ops (current->mm, &pr->pr_coproc);
28373 +           spin_unlock (&current->mm->page_table_lock);
28374 +
28375 +           user_coproc_release (pr, current->mm);
28376 +       }
28377 +       return (0);
28378 +
28379 +    case ELAN4IO_ATTACH:
28380 +    {
28381 +       ELAN_CAPABILITY *cap;
28382 +
28383 +       if ((cap = kmalloc (sizeof (ELAN_CAPABILITY), GFP_KERNEL)) == NULL)
28384 +           return (-ENOMEM);
28385 +
28386 +       if (copy_from_user (cap, (void *) arg, sizeof (ELAN_CAPABILITY)))
28387 +           res = -EFAULT;
28388 +       else if ((res = user_attach (uctx, cap)) == 0 && 
28389 +                copy_to_user ((void *) arg, cap, sizeof (ELAN_CAPABILITY)))
28390 +       {
28391 +           user_detach (uctx, cap);
28392 +           res = -EFAULT;
28393 +       }
28394 +
28395 +       kfree (cap);
28396 +       return (res);
28397 +    }
28398 +
28399 +    case ELAN4IO_DETACH:
28400 +    {
28401 +       ELAN_CAPABILITY *cap;
28402 +
28403 +       if ((cap = kmalloc (sizeof (ELAN_CAPABILITY), GFP_KERNEL)) == NULL)
28404 +           return (-ENOMEM);
28405 +
28406 +       if (copy_from_user (cap, (void *) arg, sizeof (ELAN_CAPABILITY)))
28407 +           res = -EFAULT;
28408 +       else
28409 +           user_detach (uctx, cap);
28410 +
28411 +       kfree (cap);
28412 +       return (res);
28413 +    }
28414 +
28415 +    case ELAN4IO_BLOCK_INPUTTER:
28416 +       user_block_inputter (uctx, arg);
28417 +       return (0);
28418 +
28419 +    case ELAN4IO_ADD_P2PVP:
28420 +    {
28421 +       ELAN4IO_ADD_P2PVP_STRUCT *args;
28422 +       
28423 +       if ((args = kmalloc (sizeof (ELAN4IO_ADD_P2PVP_STRUCT), GFP_KERNEL)) == NULL)
28424 +           return (-ENOMEM);
28425 +
28426 +       if (copy_from_user (args, (void *) arg, sizeof (ELAN4IO_ADD_P2PVP_STRUCT)))
28427 +           res = -EFAULT;
28428 +       else 
28429 +           res = user_add_p2pvp (uctx, args->vp_process, &args->vp_capability);
28430 +       
28431 +       kfree (args);
28432 +       return (res);
28433 +    }
28434 +
28435 +    case ELAN4IO_ADD_BCASTVP:
28436 +    {
28437 +       ELAN4IO_ADD_BCASTVP_STRUCT args;
28438 +
28439 +       if (copy_from_user (&args, (void *) arg, sizeof (ELAN4IO_ADD_BCASTVP_STRUCT)))
28440 +           return (-EFAULT);
28441 +
28442 +       return (user_add_bcastvp (uctx, args.vp_process, args.vp_lowvp, args.vp_highvp));
28443 +    }
28444 +
28445 +    case ELAN4IO_REMOVEVP:
28446 +       return (user_removevp (uctx, arg));
28447 +
28448 +    case ELAN4IO_SET_ROUTE:
28449 +    {
28450 +       ELAN4IO_ROUTE_STRUCT args;
28451 +       
28452 +       if (copy_from_user (&args, (void *) arg, sizeof (ELAN4IO_ROUTE_STRUCT)))
28453 +           return (-EFAULT);
28454 +
28455 +       return (user_set_route (uctx, args.rt_process, &args.rt_route));
28456 +    }
28457 +
28458 +    case ELAN4IO_RESET_ROUTE:
28459 +    {
28460 +       ELAN4IO_ROUTE_STRUCT args;
28461 +       
28462 +       if (copy_from_user (&args, (void *) arg, sizeof (ELAN4IO_ROUTE_STRUCT)))
28463 +           return (-EFAULT);
28464 +
28465 +       return (user_reset_route (uctx, args.rt_process));
28466 +    }
28467 +
28468 +    case ELAN4IO_GET_ROUTE:
28469 +    {
28470 +       ELAN4IO_ROUTE_STRUCT args;
28471 +       
28472 +       if (copy_from_user (&args, (void *) arg, sizeof (ELAN4IO_ROUTE_STRUCT)))
28473 +           return (-EFAULT);
28474 +
28475 +       if ((res = user_get_route (uctx, args.rt_process, &args.rt_route)) == 0 &&
28476 +           copy_to_user ((void *) arg, &args, sizeof (ELAN4IO_ROUTE_STRUCT)))
28477 +           res = -EFAULT;
28478 +
28479 +       return (res);
28480 +    }
28481 +
28482 +    case ELAN4IO_CHECK_ROUTE:
28483 +    {
28484 +       ELAN4IO_ROUTE_STRUCT args;
28485 +       
28486 +       if (copy_from_user (&args, (void *) arg, sizeof (ELAN4IO_ROUTE_STRUCT)))
28487 +           return (-EFAULT);
28488 +
28489 +       if ((res = user_check_route (uctx, args.rt_process, &args.rt_route, &args.rt_error)) == 0 &&
28490 +           copy_to_user ((void *) arg, &args, sizeof (ELAN4IO_ROUTE_STRUCT)))
28491 +           res = -EFAULT;
28492 +
28493 +       return (res);
28494 +    }
28495 +       
28496 +    case ELAN4IO_ALLOCCQ:
28497 +    {
28498 +       ELAN4IO_ALLOCCQ_STRUCT args;
28499 +       USER_CQ              *ucq;
28500 +
28501 +       if (copy_from_user (&args, (void *) arg, sizeof (ELAN4IO_ALLOCCQ_STRUCT)))
28502 +           return (-EFAULT);
28503 +       
28504 +       ucq = user_alloccq (uctx, args.cq_size & CQ_SizeMask, args.cq_perm & CQ_PermissionMask,
28505 +                           (args.cq_type & ELAN4IO_CQ_TYPE_REORDER) ? UCQ_REORDER : 0);
28506 +       if (IS_ERR (ucq))
28507 +           return PTR_ERR (ucq);
28508 +       
28509 +       args.cq_indx = elan4_cq2idx (ucq->ucq_cq);
28510 +       
28511 +       if (copy_to_user ((void *) arg, &args, sizeof (ELAN4IO_ALLOCCQ_STRUCT)))
28512 +       {
28513 +           user_dropcq (uctx, ucq);
28514 +           return (-EFAULT);
28515 +       }
28516 +       
28517 +       /* don't drop the reference on the cq until the context is freed,
28518 +        * or the caller explicitly frees the cq */
28519 +       return (0);
28520 +    }
28521 +       
28522 +    case ELAN4IO_FREECQ:
28523 +    {
28524 +       USER_CQ *ucq;
28525 +       unsigned indx;
28526 +
28527 +       if (copy_from_user (&indx, (void *) arg, sizeof (unsigned)))
28528 +           return (-EFAULT);
28529 +
28530 +       if ((ucq = user_findcq (uctx, indx)) == NULL)           /* can't free unallocated cq */
28531 +           return (-EINVAL);
28532 +       
28533 +       user_dropcq (uctx, ucq);                                /* drop the reference we've just taken */
28534 +
28535 +       if ((ucq->ucq_flags & UCQ_SYSTEM))                      /* can't free device driver cq */
28536 +           return (-EINVAL);
28537 +
28538 +       user_dropcq (uctx, ucq);                                /* and the one held from the alloccq call */
28539 +
28540 +       return (0);
28541 +    }
28542 +
28543 +    case ELAN4IO_DUMPCQ:
28544 +    {
28545 +       ELAN4IO_DUMPCQ_STRUCT args;
28546 +       ELAN4_DEV *dev = uctx->uctx_ctxt.ctxt_dev;
28547 +       USER_CQ *ucq;
28548 +       void *buf;
28549 +       int i;
28550 +       
28551 +       if (copy_from_user (&args, (void *) arg, sizeof(ELAN4IO_DUMPCQ_STRUCT)))
28552 +           return (-EFAULT);
28553 +
28554 +       if ((ucq = user_findcq (uctx, args.cq_indx)) == NULL)
28555 +           return (-EINVAL);
28556 +
28557 +       if (args.bufsize)
28558 +       {
28559 +           E4_uint32 usedBufSize = min(args.cq_size, args.bufsize);
28560 +
28561 +           KMEM_ALLOC (buf, void *, usedBufSize, 0);
28562 +
28563 +           if (buf == NULL)
28564 +               return (-ENOMEM);
28565 +
28566 +           for (i=0; i<usedBufSize; i+=sizeof(int))
28567 +               ((int *)buf)[i/sizeof(int)] = elan4_sdram_readl(dev, ucq->ucq_cq->cq_space + i);
28568 +
28569 +           if (copy_to_user((void *)args.buffer, buf, usedBufSize))
28570 +           {
28571 +               KMEM_FREE(buf, args.bufsize);
28572 +               return (-EFAULT);
28573 +           }
28574 +           KMEM_FREE(buf, usedBufSize);
28575 +           args.bufsize = usedBufSize;
28576 +       }
28577 +
28578 +       args.cq_size = CQ_Size(ucq->ucq_cq->cq_size);
28579 +       args.cq_space = ucq->ucq_cq->cq_space;
28580 +
28581 +
28582 +       if (copy_to_user((void *)arg, &args, sizeof(ELAN4IO_DUMPCQ_STRUCT)))
28583 +       {
28584 +           return (-EFAULT);
28585 +       }
28586 +       
28587 +       user_dropcq (uctx, ucq); /* drop the reference we've just taken */
28588 +
28589 +       return (0);
28590 +    }
28591 +
28592 +    case ELAN4IO_SETPERM:
28593 +    {
28594 +       ELAN4IO_PERM_STRUCT args;
28595 +       
28596 +       if (copy_from_user (&args, (void *) arg, sizeof (ELAN4IO_PERM_STRUCT)))
28597 +           return (-EFAULT);
28598 +
28599 +       return (user_setperm (uctx, args.ps_maddr, args.ps_eaddr, args.ps_len, args.ps_perm));
28600 +    }
28601 +
28602 +    case ELAN4IO_CLRPERM:
28603 +    {
28604 +       ELAN4IO_PERM_STRUCT args;
28605 +
28606 +       if (copy_from_user (&args, (void *) arg, sizeof (ELAN4IO_PERM_STRUCT)))
28607 +           return (-EFAULT);
28608 +
28609 +       user_clrperm (uctx, args.ps_eaddr, args.ps_len);
28610 +       return (0);
28611 +    }
28612 +    
28613 +    case ELAN4IO_TRAPSIG:
28614 +    {
28615 +       ELAN4IO_TRAPSIG_STRUCT args;
28616 +
28617 +       if (copy_from_user (&args, (void *) arg, sizeof (ELAN4IO_TRAPSIG_STRUCT)))
28618 +           return (-EFAULT);
28619 +
28620 +       pr->pr_uctx->uctx_trap_pid   = current->pid;
28621 +       pr->pr_uctx->uctx_trap_signo = args.ts_signo;
28622 +       
28623 +       return (0);
28624 +    }
28625 +    
28626 +    case ELAN4IO_TRAPHANDLER:
28627 +    {
28628 +       ELAN4IO_TRAPHANDLER_STRUCT args;
28629 +
28630 +       if (copy_from_user (&args, (void *) arg, sizeof (ELAN4IO_TRAPHANDLER_STRUCT)))
28631 +           return (-EFAULT);
28632 +
28633 +       return (user_trap_handler (pr->pr_uctx, (ELAN4_USER_TRAP *)args.th_trapp, args.th_nticks));
28634 +    }
28635 +
28636 +    case ELAN4IO_REQUIRED_MAPPINGS:
28637 +    {
28638 +       ELAN4IO_REQUIRED_MAPPINGS_STRUCT args;
28639 +       
28640 +       if (copy_from_user (&args, (void *) arg, sizeof (ELAN4IO_REQUIRED_MAPPINGS_STRUCT)))
28641 +           return (-EFAULT);
28642 +
28643 +       pr->pr_uctx->uctx_upage_addr    = args.rm_upage_addr;
28644 +       pr->pr_uctx->uctx_trestart_addr = args.rm_trestart_addr;
28645 +
28646 +       return (0);
28647 +    }
28648 +
28649 +    case ELAN4IO_ALLOC_TRAP_QUEUES:
28650 +    {
28651 +       ELAN4IO_ALLOC_TRAP_QUEUES_STRUCT args;
28652 +
28653 +       if (copy_from_user (&args, (void *) arg, sizeof (ELAN4IO_ALLOC_TRAP_QUEUES_STRUCT)))
28654 +           return (-EFAULT);
28655 +
28656 +       return (user_alloc_trap_queues (uctx, args.tq_ndproc_traps, args.tq_neproc_traps, 
28657 +                                       args.tq_ntproc_traps, args.tq_nthreads, args.tq_ndmas));
28658 +    }
28659 +
28660 +    case ELAN4IO_RESUME_EPROC_TRAP:
28661 +    {
28662 +       ELAN4IO_RESUME_EPROC_TRAP_STRUCT args;
28663 +       
28664 +       if (copy_from_user (&args, (void *) arg, sizeof (ELAN4IO_RESUME_EPROC_TRAP_STRUCT)))
28665 +           return (-EFAULT);
28666 +
28667 +       return (user_resume_eproc_trap (pr->pr_uctx, args.rs_addr));
28668 +    }
28669 +
28670 +    case ELAN4IO_RESUME_CPROC_TRAP:
28671 +    {
28672 +       ELAN4IO_RESUME_CPROC_TRAP_STRUCT args;
28673 +       
28674 +       if (copy_from_user (&args, (void *) arg, sizeof (ELAN4IO_RESUME_CPROC_TRAP_STRUCT)))
28675 +           return (-EFAULT);
28676 +
28677 +       return (user_resume_cproc_trap (pr->pr_uctx, args.rs_indx));
28678 +    }
28679 +
28680 +    case ELAN4IO_RESUME_DPROC_TRAP:
28681 +    {
28682 +       ELAN4IO_RESUME_DPROC_TRAP_STRUCT args;
28683 +       
28684 +       if (copy_from_user (&args, (void *) arg, sizeof (ELAN4IO_RESUME_DPROC_TRAP_STRUCT)))
28685 +           return (-EFAULT);
28686 +
28687 +       return (user_resume_dproc_trap (pr->pr_uctx, &args.rs_desc));
28688 +    }
28689 +
28690 +    case ELAN4IO_RESUME_TPROC_TRAP:
28691 +    {
28692 +       ELAN4IO_RESUME_TPROC_TRAP_STRUCT args;
28693 +       
28694 +       if (copy_from_user (&args, (void *) arg, sizeof (ELAN4IO_RESUME_TPROC_TRAP_STRUCT)))
28695 +           return (-EFAULT);
28696 +
28697 +       return (user_resume_tproc_trap (pr->pr_uctx, &args.rs_regs));
28698 +    }
28699 +
28700 +    case ELAN4IO_RESUME_IPROC_TRAP:
28701 +    {
28702 +       ELAN4IO_RESUME_IPROC_TRAP_STRUCT args;
28703 +       
28704 +       if (copy_from_user (&args, (void *) arg, sizeof (ELAN4IO_RESUME_IPROC_TRAP_STRUCT)))
28705 +           return (-EFAULT);
28706 +
28707 +       return (user_resume_iproc_trap (pr->pr_uctx, args.rs_channel, args.rs_trans, 
28708 +                                       &args.rs_header, &args.rs_data));
28709 +    }
28710 +
28711 +    case ELAN4IO_FLUSH_ICACHE:
28712 +       elan4_flush_icache (&uctx->uctx_ctxt);
28713 +       return (0);
28714 +
28715 +    case ELAN4IO_STOP_CTXT:
28716 +       if (arg)
28717 +           user_swapout (uctx, UCTX_USER_STOPPED);
28718 +       else
28719 +           user_swapin (uctx, UCTX_USER_STOPPED);
28720 +       return (0);
28721 +
28722 +    case ELAN4IO_ALLOC_INTCOOKIE_TABLE:
28723 +    {
28724 +       ELAN_CAPABILITY *cap;
28725 +       INTCOOKIE_TABLE *tbl;
28726 +
28727 +       if ((cap = kmalloc (sizeof (ELAN_CAPABILITY), GFP_KERNEL)) == NULL)
28728 +           return (-ENOMEM);
28729 +
28730 +       if (copy_from_user (cap, (void *) arg, sizeof (ELAN_CAPABILITY)))
28731 +           res = -EFAULT;
28732 +       else
28733 +       {
28734 +           tbl = intcookie_alloc_table(cap);
28735 +
28736 +           if (tbl == NULL)
28737 +               res = -ENOMEM;
28738 +           else
28739 +           {
28740 +               /* Install the intcookie table we've just created */
28741 +               spin_lock (&uctx->uctx_spinlock);
28742 +               if (uctx->uctx_intcookie_table != NULL)
28743 +                   res = -EBUSY;
28744 +               else
28745 +                   uctx->uctx_intcookie_table = tbl;
28746 +               spin_unlock (&uctx->uctx_spinlock);
28747 +               
28748 +               /* drop the table we created if there already was one */
28749 +               if (res != 0)
28750 +                   intcookie_free_table (tbl);
28751 +           }
28752 +       }
28753 +
28754 +       kfree (cap);
28755 +
28756 +       return (res);
28757 +    }
28758 +
28759 +    case ELAN4IO_FREE_INTCOOKIE_TABLE:
28760 +    {
28761 +       INTCOOKIE_TABLE *tbl;
28762 +
28763 +       spin_lock (&uctx->uctx_spinlock);
28764 +       tbl = uctx->uctx_intcookie_table;
28765 +       uctx->uctx_intcookie_table = NULL;
28766 +       spin_unlock (&uctx->uctx_spinlock);
28767 +
28768 +       if (tbl != NULL)
28769 +           intcookie_free_table (tbl);
28770 +
28771 +       return (tbl == NULL ? -EINVAL : 0);
28772 +    }
28773 +
28774 +    case ELAN4IO_ALLOC_INTCOOKIE:
28775 +    {
28776 +       /* For backwards compatibility with the old libs (pre 1.8.0)
28777 +        * we allocate an intcookie table on the first cookie
28778 +        * alloc if one hasn't be created already
28779 +        */
28780 +       if (uctx->uctx_intcookie_table == NULL)
28781 +       {
28782 +           ELAN_CAPABILITY *cap;
28783 +           INTCOOKIE_TABLE *tbl;
28784 +           
28785 +           if ((cap = kmalloc (sizeof (ELAN_CAPABILITY), GFP_KERNEL)) == NULL)
28786 +               return (-ENOMEM);
28787 +
28788 +           /* Create a dummy capability */
28789 +           elan_nullcap(cap);
28790 +
28791 +           /* Must be unique for each process on a node */
28792 +           cap->cap_mycontext = (int) ELAN4_TASK_HANDLE();
28793 +
28794 +           /* Create a new intcookie table */
28795 +           tbl = intcookie_alloc_table(cap);
28796 +
28797 +           /* Hang intcookie table off uctx */
28798 +           spin_lock (&uctx->uctx_spinlock);
28799 +           if (uctx->uctx_intcookie_table == NULL)
28800 +           {
28801 +               uctx->uctx_intcookie_table = tbl;
28802 +               spin_unlock (&uctx->uctx_spinlock);
28803 +           }
28804 +           else
28805 +           {
28806 +               spin_unlock (&uctx->uctx_spinlock);
28807 +               intcookie_free_table(tbl);
28808 +           }
28809 +
28810 +           kfree(cap);
28811 +       }
28812 +       
28813 +       return (intcookie_alloc (uctx->uctx_intcookie_table, arg));
28814 +    }
28815 +
28816 +    case ELAN4IO_FREE_INTCOOKIE:
28817 +       if (uctx->uctx_intcookie_table == NULL)
28818 +           return -EINVAL;
28819 +       else
28820 +           return (intcookie_free (uctx->uctx_intcookie_table, arg));
28821 +
28822 +    case ELAN4IO_ARM_INTCOOKIE:
28823 +       if (uctx->uctx_intcookie_table == NULL)
28824 +           return -EINVAL;
28825 +       else
28826 +           return (intcookie_arm (uctx->uctx_intcookie_table, arg));
28827 +
28828 +    case ELAN4IO_WAIT_INTCOOKIE:
28829 +       if (uctx->uctx_intcookie_table == NULL)
28830 +           return -EINVAL;
28831 +       else
28832 +           return (intcookie_wait (uctx->uctx_intcookie_table, arg));
28833 +
28834 +    case ELAN4IO_FIRE_INTCOOKIE:
28835 +    {
28836 +       ELAN4IO_FIRECAP_STRUCT *args;
28837 +
28838 +       if ((args = kmalloc (sizeof (ELAN4IO_FIRECAP_STRUCT), GFP_KERNEL)) == NULL)
28839 +           return (-ENOMEM);
28840 +
28841 +       if (copy_from_user (args, (void *) arg, sizeof (ELAN4IO_FIRECAP_STRUCT)))
28842 +           res = -EFAULT;
28843 +       else
28844 +           res = intcookie_fire_cap (&args->fc_capability, args->fc_cookie);
28845 +       
28846 +       kfree (args);
28847 +
28848 +       return (res);
28849 +    }
28850 +
28851 +    case ELAN4IO_NETERR_MSG:
28852 +    {
28853 +       ELAN4IO_NETERR_MSG_STRUCT args;
28854 +       
28855 +       if (copy_from_user (&args, (void *) arg, sizeof (ELAN4IO_NETERR_MSG_STRUCT)))
28856 +           return (-EFAULT);
28857 +
28858 +       return (user_send_neterr_msg (uctx, args.nm_vp, args.nm_nctx, args.nm_retries, &args.nm_msg));
28859 +    }
28860 +
28861 +    case ELAN4IO_NETERR_TIMER:
28862 +    {
28863 +       unsigned long ticks = ((unsigned long) arg * HZ) / 1000;
28864 +
28865 +       PRINTF (uctx, DBG_NETERR, "elan4_neterr_timer: arg %ld inc %ld\n", arg, ticks);
28866 +
28867 +       mod_timer (&uctx->uctx_neterr_timer, (jiffies + (ticks > 0 ? ticks : 1)));
28868 +       return 0;
28869 +    }
28870 +               
28871 +    case ELAN4IO_NETERR_FIXUP:
28872 +    {
28873 +       ELAN4IO_NETERR_FIXUP_STRUCT args;
28874 +
28875 +       if (copy_from_user (&args, (void *) arg, sizeof (ELAN4IO_NETERR_FIXUP_STRUCT)))
28876 +           return (-EFAULT);
28877 +
28878 +       if (args.nf_sten)
28879 +           return (user_neterr_sten (uctx, args.nf_vp, args.nf_cookie, args.nf_waitforeop));
28880 +       else
28881 +           return (user_neterr_dma  (uctx, args.nf_vp, args.nf_cookie, args.nf_waitforeop));
28882 +    }
28883 +    default:
28884 +       PRINTF (uctx, DBG_FILE, "user_ioctl: invalid ioctl %x\n", cmd);
28885 +       return (-EINVAL);
28886 +    }
28887 +}
28888 +
28889 +static void
28890 +user_vma_open (struct vm_area_struct *vma)
28891 +{
28892 +    USER_PRIVATE *pr   = (USER_PRIVATE *) vma->vm_private_data;
28893 +    USER_CTXT    *uctx = pr->pr_uctx;
28894 +    unsigned long addr;
28895 +    unsigned long pgoff;
28896 +
28897 +    PRINTF (uctx, DBG_FILE, "user_vma_open: vm_mm=%p start=%lx end=%lx pgoff=%lx file=%p\n",
28898 +           vma->vm_mm, vma->vm_start, vma->vm_end, vma->vm_pgoff, vma->vm_file);
28899 +
28900 +    for (addr = vma->vm_start, pgoff = vma->vm_pgoff; addr < vma->vm_end; addr += PAGE_SIZE, pgoff++)
28901 +       elan4_getcqa (&uctx->uctx_ctxt, pgoff);
28902 +}
28903 +
28904 +static void 
28905 +user_vma_close (struct vm_area_struct *vma)
28906 +{
28907 +    USER_PRIVATE *pr   = (USER_PRIVATE *) vma->vm_private_data;
28908 +    USER_CTXT    *uctx = pr->pr_uctx;
28909 +    unsigned long addr;
28910 +    unsigned long pgoff;
28911 +
28912 +    PRINTF (uctx, DBG_FILE, "user_vma_close: vm_mm=%p start=%lx end=%lx pgoff=%lx file=%p\n",
28913 +           vma->vm_mm, vma->vm_start, vma->vm_end, vma->vm_pgoff, vma->vm_file);
28914 +
28915 +    /* NOTE: the same comments apply as mem_vma_close */
28916 +    for (addr = vma->vm_start, pgoff = vma->vm_pgoff; addr < vma->vm_end; addr += PAGE_SIZE, pgoff++)
28917 +       if (elan4_getcqa (&uctx->uctx_ctxt, pgoff) != NULL)
28918 +       {
28919 +           elan4_putcqa (&uctx->uctx_ctxt, pgoff);                     /* drop the reference we've just taken */
28920 +           elan4_putcqa (&uctx->uctx_ctxt, pgoff);                     /* and the one held by the mmap */
28921 +       }
28922 +}
28923 +
28924 +struct vm_operations_struct user_vm_ops = {
28925 +    open:              user_vma_open,
28926 +    close:             user_vma_close,
28927 +};
28928 +
28929 +static int
28930 +user_mmap (struct file *file, struct vm_area_struct *vma)
28931 +{
28932 +    USER_PRIVATE *pr    = (USER_PRIVATE *) file->private_data;
28933 +    USER_CTXT    *uctx  = pr->pr_uctx;
28934 +    ELAN4_DEV     *dev   = uctx->uctx_ctxt.ctxt_dev;
28935 +    ELAN4_CQA     *cqa;
28936 +    unsigned long addr;
28937 +    unsigned long pgoff;
28938 +    int           res;
28939 +    ioaddr_t      ioaddr;
28940 +    
28941 +    for (addr = vma->vm_start, pgoff = vma->vm_pgoff; addr < vma->vm_end; addr += PAGE_SIZE, pgoff++)
28942 +    {
28943 +       switch (pgoff)
28944 +       {
28945 +       default:
28946 +           PRINTF (uctx, DBG_FILE, "user_mmap: command queue %ld mapping at %lx\n",  pgoff, addr);
28947 +           
28948 +           if ((cqa = elan4_getcqa (&uctx->uctx_ctxt, pgoff)) == NULL)
28949 +           {
28950 +               res = -EINVAL;
28951 +               goto failed;
28952 +           }
28953 +
28954 +           PRINTF (uctx, DBG_FILE, "user_mmap: cqa=%p idx=%d num=%d ref=%d\n", cqa, cqa->cqa_idx, cqa->cqa_cqnum, cqa->cqa_ref);
28955 +    
28956 +           vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
28957 +
28958 +           if (! (dev->dev_features & ELAN4_FEATURE_NO_WRITE_COMBINE) && (cqa->cqa_type & CQ_Reorder) != 0)
28959 +               vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
28960 +
28961 +           PRINTF (uctx, DBG_FILE, "user_mmap: remap_page_range (%lx, %lx, %lx, %lx)\n",
28962 +                   addr, pci_resource_start (dev->dev_osdep.pdev, ELAN4_BAR_REGISTERS) + 
28963 +                   (cqa->cqa_cqnum + dev->dev_cqoffset) * CQ_CommandMappingSize, PAGE_SIZE,
28964 +                   vma->vm_page_prot);
28965 +
28966 +           /* Don't allow these pages to be swapped out of dumped */
28967 +           vma->vm_flags |= (VM_RESERVED | VM_IO);
28968 +
28969 +           if (__io_remap_page_range (addr, 
28970 +                                      pci_resource_start (dev->dev_osdep.pdev, ELAN4_BAR_REGISTERS) + 
28971 +                                      (cqa->cqa_cqnum + dev->dev_cqoffset) * CQ_CommandMappingSize,
28972 +                                      PAGE_SIZE, vma->vm_page_prot))
28973 +           {
28974 +               PRINTF (uctx, DBG_FILE, "user_mmap: remap_page_range failed\n");
28975 +
28976 +               elan4_putcqa (&uctx->uctx_ctxt, pgoff);
28977 +               res = -ENOMEM;
28978 +               goto failed;
28979 +           }
28980 +           break;
28981 +           
28982 +       case ELAN4_OFF_USER_REGS:
28983 +           vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
28984 +
28985 +           /* Don't allow these pages to be swapped out of dumped */
28986 +           vma->vm_flags |= (VM_RESERVED | VM_IO);
28987 +
28988 +           switch (dev->dev_devinfo.dev_revision_id)
28989 +           {
28990 +           case PCI_REVISION_ID_ELAN4_REVA:
28991 +               ioaddr = pci_resource_start (dev->dev_osdep.pdev, ELAN4_BAR_REGISTERS) + ELAN4_REVA_REG_OFFSET + offsetof(E4_Registers, uRegs);
28992 +               break;
28993 +               
28994 +           case PCI_REVISION_ID_ELAN4_REVB:
28995 +               ioaddr = pci_resource_start (dev->dev_osdep.pdev, ELAN4_BAR_REGISTERS) + ELAN4_REVB_REG_OFFSET + offsetof(E4_Registers, uRegs);
28996 +               break;
28997 +
28998 +           default:
28999 +               res = -EINVAL;
29000 +               goto failed;
29001 +           }
29002 +
29003 +           PRINTF (uctx, DBG_FILE, "user_mmap: user_regs at %lx ioaddr %lx prot %lx\n",
29004 +                   addr, ioaddr, vma->vm_page_prot.pgprot);
29005 +
29006 +           if (__io_remap_page_range (addr,  (ioaddr & PAGEMASK), PAGE_SIZE, vma->vm_page_prot))
29007 +           {                     
29008 +               res = -EAGAIN;
29009 +               goto failed;
29010 +           }
29011 +
29012 +           break;
29013 +           
29014 +       case ELAN4_OFF_USER_PAGE:
29015 +           PRINTF (uctx, DBG_FILE, "user_mmap: shared user page - kaddr=%lx uaddr=%lx phys=%lx\n", 
29016 +                   uctx->uctx_upage, addr, kmem_to_phys (uctx->uctx_upage));
29017 +
29018 +           /* we do not want to have this area swapped out, lock it */
29019 +           vma->vm_flags |= VM_LOCKED;
29020 +           
29021 +           /* Mark the page as reserved or else the remap_page_range() doesn't remap it */
29022 +           SetPageReserved(pte_page(*find_pte_kernel((unsigned long) uctx->uctx_upage)));
29023 +       
29024 +           if (__remap_page_range (addr, kmem_to_phys (uctx->uctx_upage), PAGE_SIZE, vma->vm_page_prot))
29025 +           {
29026 +               PRINTF (uctx, DBG_FILE, "user_mmap: remap_page_range (user_page) failed\n");
29027 +               res = -ENOMEM;
29028 +               goto failed;
29029 +           }
29030 +           break;
29031 +           
29032 +       case ELAN4_OFF_TPROC_TRAMPOLINE:
29033 +           vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
29034 +
29035 +           PRINTF (uctx, DBG_FILE, "user_mmap: tproc trampoline - kaddr=%lx uaddr=%lx phys=%lx\n", uctx->uctx_trampoline, addr, 
29036 +                   pci_resource_start (dev->dev_osdep.pdev, ELAN4_BAR_SDRAM) + uctx->uctx_trampoline + (addr & (SDRAM_PGOFF_OFFSET << PAGE_SHIFT)));
29037 +
29038 +           /* Don't allow these pages to be swapped out of dumped */
29039 +           vma->vm_flags |= (VM_RESERVED | VM_IO);
29040 +
29041 +           if (__io_remap_page_range (addr, pci_resource_start (dev->dev_osdep.pdev, ELAN4_BAR_SDRAM) + 
29042 +                                      uctx->uctx_trampoline + (addr & (SDRAM_PGOFF_OFFSET << PAGE_SHIFT)),
29043 +                                      PAGE_SIZE, vma->vm_page_prot))
29044 +           {
29045 +               PRINTF (uctx, DBG_FILE, "user_mmap: remap_page_range (tproc_trampoline) failed\n");
29046 +               res = -ENOMEM;
29047 +               goto failed;
29048 +           }
29049 +           break;
29050 +           
29051 +       case ELAN4_OFF_DEVICE_STATS:
29052 +           printk ("user_mmap: device_stats\n");
29053 +           break;
29054 +       }
29055 +       
29056 +    }
29057 +
29058 +    ASSERT (vma->vm_ops == NULL);
29059 +    
29060 +    /* Don't try to swap out physical pages.. */
29061 +    vma->vm_flags |= VM_RESERVED;
29062 +    
29063 +    /*
29064 +     * Don't dump addresses that are not real memory to a core file.
29065 +     */
29066 +    vma->vm_flags |= VM_IO;
29067 +
29068 +    vma->vm_ops          = &user_vm_ops;
29069 +    vma->vm_file         = file;
29070 +    vma->vm_private_data = (void *) pr;
29071 +    
29072 +    return (0);
29073 +
29074 + failed:
29075 +    for (addr -= PAGE_SIZE, pgoff--; addr >= vma->vm_start; addr -= PAGE_SIZE, pgoff--)
29076 +       elan4_putcqa (&uctx->uctx_ctxt, pgoff);         /* drop the reference we've just taken */
29077 +    return (res);
29078 +}
29079 +
29080 +/* driver entry points */
29081 +static int
29082 +elan4_open (struct inode *inode, struct file *file)
29083 +{
29084 +    PRINTF (DBG_USER, DBG_FILE, "elan4_open: device %d minor %d file=%p\n", ELAN4_DEVICE(inode), ELAN4_MINOR(inode), file);
29085 +    
29086 +    switch (ELAN4_MINOR (inode))
29087 +    {
29088 +    case ELAN4_MINOR_CONTROL:
29089 +       return (control_open (inode, file));
29090 +    case ELAN4_MINOR_MEM:
29091 +       return (mem_open (inode, file));
29092 +    case ELAN4_MINOR_USER:
29093 +       return (user_open (inode, file));
29094 +    default:
29095 +       return (-ENXIO);
29096 +    }
29097 +}
29098 +
29099 +static int
29100 +elan4_release (struct inode *inode, struct file *file)
29101 +{
29102 +    PRINTF (DBG_USER, DBG_FILE, "elan4_release: device %d minor %d file=%p\n", ELAN4_DEVICE(inode), ELAN4_MINOR(inode), file);
29103 +    
29104 +    switch (ELAN4_MINOR (inode))
29105 +    {
29106 +    case ELAN4_MINOR_CONTROL:
29107 +       return (control_release (inode, file));
29108 +    case ELAN4_MINOR_MEM:
29109 +       return (mem_release (inode, file));
29110 +    case ELAN4_MINOR_USER:
29111 +       return (user_release (inode, file));
29112 +    default:
29113 +       return (-ENXIO);
29114 +    }
29115 +}
29116 +
29117 +static int
29118 +elan4_ioctl (struct inode *inode, struct file *file, 
29119 +            unsigned int cmd, unsigned long arg)
29120 +{
29121 +    PRINTF (DBG_USER, DBG_FILE, "elan4_ioctl: device %d minor %d cmd %x\n", ELAN4_DEVICE(inode), ELAN4_MINOR(inode), cmd);
29122 +    
29123 +    switch (ELAN4_MINOR (inode))
29124 +    {
29125 +    case ELAN4_MINOR_CONTROL:
29126 +       return (control_ioctl (inode, file, cmd, arg));
29127 +    case ELAN4_MINOR_MEM:
29128 +       return (mem_ioctl (inode, file, cmd, arg));
29129 +    case ELAN4_MINOR_USER:
29130 +       return (user_ioctl (inode, file, cmd, arg));
29131 +    default:
29132 +       return (-ENXIO);
29133 +    }
29134 +}
29135 +
29136 +#if defined(CONFIG_PPC64) || defined(CONFIG_SPARC64) || defined(CONFIG_X86_64)
29137 +static int
29138 +elan4_ioctl32 (unsigned int fd, unsigned int cmd, unsigned long arg, struct file *file)
29139 +{
29140 +    struct inode *inode = file->f_dentry->d_inode;
29141 +    extern int sys_ioctl (unsigned int fd, unsigned int cmd, unsigned long arg);
29142 +
29143 +    PRINTF (DBG_USER, DBG_FILE, "elan4_ioctl32: device %d minor %d cmd %x\n", ELAN4_DEVICE(inode), ELAN4_MINOR(inode), cmd);
29144 +    
29145 +    if (ELAN4_MINOR (inode) == ELAN4_MINOR_USER)
29146 +    {
29147 +       USER_PRIVATE *pr    = (USER_PRIVATE *) file->private_data;
29148 +       USER_CTXT    *uctx  = pr->pr_uctx;
29149 +
29150 +       if (current->mm != pr->pr_mm)
29151 +           return -EINVAL;
29152 +       
29153 +       switch (cmd)
29154 +       {
29155 +       case ELAN4IO_SETPERM32:
29156 +       {
29157 +           ELAN4IO_PERM_STRUCT32 args;
29158 +           
29159 +           if (copy_from_user (&args, (void *) arg, sizeof (ELAN4IO_PERM_STRUCT32)))
29160 +               return (-EFAULT);
29161 +           
29162 +           PRINTF (DBG_USER, DBG_FILE, "user_ioctl32: setperm maddr=%x eaddr=%llx len=%llxx perm=%d\n",
29163 +                   args.ps_maddr, args.ps_eaddr,args.ps_len, args.ps_perm);
29164 +
29165 +           return (user_setperm (uctx, args.ps_maddr, args.ps_eaddr, args.ps_len, args.ps_perm));
29166 +       }
29167 +       
29168 +       case ELAN4IO_CLRPERM32:
29169 +       {
29170 +           ELAN4IO_PERM_STRUCT32 args;
29171 +           
29172 +           if (copy_from_user (&args, (void *) arg, sizeof (ELAN4IO_PERM_STRUCT32)))
29173 +               return (-EFAULT);
29174 +           
29175 +           PRINTF (DBG_USER, DBG_FILE, "user_ioctl32: clrperm eaddr=%llx len=%ll\n",
29176 +                   args.ps_eaddr, args.ps_len);
29177 +
29178 +           user_clrperm (uctx, args.ps_eaddr, args.ps_len);
29179 +           return (0);
29180 +       }
29181 +    
29182 +       case ELAN4IO_TRAPHANDLER32:
29183 +       {
29184 +           ELAN4IO_TRAPHANDLER_STRUCT32 args;
29185 +           
29186 +           if (copy_from_user (&args, (void *) arg, sizeof (ELAN4IO_TRAPHANDLER_STRUCT32)))
29187 +               return (-EFAULT);
29188 +           
29189 +           PRINTF (DBG_USER, DBG_FILE, "user_ioctl32: traphandler trapp=%x nticks=%d\n",
29190 +                   args.th_trapp, args.th_nticks);
29191 +
29192 +           return (user_trap_handler (pr->pr_uctx, (ELAN4_USER_TRAP *)(unsigned long)args.th_trapp, args.th_nticks));
29193 +       }
29194 +       }
29195 +    }
29196 +
29197 +    PRINTF (DBG_USER, DBG_FILE, "elan4_ioctl32: fd=%d cmd=%x arg=%lx file=%p\n", fd, cmd, arg, file);
29198 +    return (sys_ioctl (fd, cmd, arg));
29199 +}
29200 +#endif
29201 +
29202 +
29203 +
29204 +static int
29205 +elan4_mmap (struct file *file, struct vm_area_struct *vma)
29206 +{
29207 +    PRINTF (DBG_USER, DBG_FILE, "elan4_mmap: instance %d minor %d start=%lx end=%lx pgoff=%lx\n", 
29208 +           ELAN4_DEVICE (file->f_dentry->d_inode), ELAN4_MINOR (file->f_dentry->d_inode),
29209 +           vma->vm_start, vma->vm_end, vma->vm_pgoff);
29210 +
29211 +    switch (ELAN4_MINOR (file->f_dentry->d_inode))
29212 +    {
29213 +    case ELAN4_MINOR_CONTROL:
29214 +       return (control_mmap (file, vma));
29215 +    case ELAN4_MINOR_MEM:
29216 +       return (mem_mmap (file, vma));
29217 +    case ELAN4_MINOR_USER:
29218 +       return (user_mmap (file, vma));
29219 +    default:
29220 +       return (-ENXIO);
29221 +    }
29222 +}
29223 +
29224 +void
29225 +elan4_update_intel_p64h2 (ELAN4_DEV *dev, struct pci_dev *bridge)
29226 +{
29227 +    u16 cnf;
29228 +    
29229 +    pci_read_config_word (bridge, 0x40 /* CNF */, &cnf);
29230 +    
29231 +    /* We expect the CNF register to be configured as follows
29232 +     *
29233 +     * [8]   == 1      PMODE PCI Mode
29234 +     * [7:6] == 2/3    PFREQ PCI Frequency (100/133)
29235 +     * [5]   == 0      RSDIS Restreaming Disable
29236 +     * [4:3] == 0x     PP    Prefetch Policy
29237 +     * [2]   == 0       DTD   Delayed Transaction Depth
29238 +     * [1:0] == 10      MDT   MaximumDelaedTransactions
29239 +     */
29240 +    
29241 +    if ((cnf & (1 << 8)) == 0)
29242 +       printk ("elan%d: strangeness - elan reports PCI-X but P64H2 reports PCI mode !\n", dev->dev_instance);
29243 +    else if ((cnf & 0xb7) != 0x82 && (cnf & 0xb7) != 0x84 && optimise_pci_bus < 2)
29244 +       printk ("elan%d: P64H2 CNF is not configured as expected : RSDIS=%d PP=%d DTD=%d MDT=%d\n",
29245 +               dev->dev_instance, (cnf >> 5) & 1, (cnf >> 3) & 3, (cnf >> 2) & 1, cnf & 3);
29246 +    else
29247 +    {
29248 +       switch ((cnf >> 6) & 3)
29249 +       {
29250 +       case 2:                                         /* PCI-X 100 */
29251 +           pci_write_config_word (bridge, 0xfc /* PC100 */, 0x7777);
29252 +           
29253 +           printk ("elan%d: optimise P64H2 : setting MDT=0, DTD=1, PFC=777 for PCI-X 100\n", dev->dev_instance);
29254 +           
29255 +           break;
29256 +           
29257 +       case 3:                                         /* PCI-X 133 */
29258 +           pci_write_config_word (bridge, 0xfe /* PC133 */, 0x7777);
29259 +           
29260 +           printk ("elan%d: optimise P64H2 : setting MDT=0, DTD=1, PFC=777 for PCI-X 133\n", dev->dev_instance);
29261 +           break;
29262 +       }
29263 +       
29264 +       pci_write_config_word (bridge, 0x40 /* CNF */, (cnf & 0xfff8) | 0x4);   /* DTD=1 MDT=0 */
29265 +    }
29266 +}
29267 +
29268 +int
29269 +elan4_optimise_intel_p64h2 (ELAN4_DEV *dev, struct pci_dev *pdev)
29270 +{
29271 +    struct pci_bus   *bus      = pdev->bus;
29272 +    struct pci_dev   *bridge   = bus->self;
29273 +    unsigned int      devcount = 0;
29274 +    u8                revision;
29275 +    u32               ectrl;
29276 +    struct list_head *el;
29277 +    
29278 +    pci_read_config_dword (pdev, PCI_ELAN_CONTROL, &ectrl);
29279 +
29280 +    /* We can only run in PCI-Xmode with a B1 stepping P64H2 because of P64H2 Errata 3 */
29281 +    pci_read_config_byte (bridge, PCI_REVISION_ID, &revision);
29282 +    if (revision < 0x04)
29283 +    {
29284 +       if ((ectrl & ECTRL_INITIALISATION_MODE) != Pci2_2)
29285 +       {
29286 +           static const char *p64h2_stepping[4] = {"UNKNOWN", "UNKNOWN", "UNKNOWN", "B0"};
29287 +
29288 +           printk ("elan%d: unable to use device because of P64H2 Errata 3 on\n"
29289 +                   "       %s stepping part and running in a PCI-X slot\n", 
29290 +                   dev->dev_instance, p64h2_stepping[revision]);
29291 +           return -EINVAL;
29292 +       }
29293 +    }
29294 +    
29295 +    /* We can only alter the bus configuration registers if the Elan is the only device
29296 +     * on the bus ... */
29297 +    list_for_each (el, &bus->devices) {
29298 +       struct pci_dev *pcip = list_entry (el, struct pci_dev, bus_list);
29299 +
29300 +       if (pcip == pdev || (pcip->vendor == PCI_VENDOR_ID_INTEL && pcip->device == 0x1462 /* P64H2 HOTPLUG */))
29301 +           continue;
29302 +           
29303 +       devcount++;
29304 +    }
29305 +
29306 +    if (devcount > 0 || !list_empty (&bus->children))
29307 +    {
29308 +       printk ("elan%d: unable to optimise P64H2 settings as %s%s\n", dev->dev_instance,
29309 +               (devcount > 0) ? "more than one device on bus" :  "",
29310 +               ! list_empty (&bus->children) ? "has child buses" : "");
29311 +       return 0;
29312 +    }
29313 +
29314 +#ifdef __ia64
29315 +    if ((ectrl & ECTRL_INITIALISATION_MODE) == PciX100to133MHz)
29316 +    {
29317 +       struct pci_dev *pcip;
29318 +       unsigned int sioh_good      = 0;
29319 +       unsigned int sioh_downgrade = 0;
29320 +       unsigned int snc_good       = 0;
29321 +       unsigned int snc_downgrade  = 0;
29322 +       
29323 +       /* Search for the associated SIOH and SNC on ia64,
29324 +        * if we have a C2 SIOH and a C0/C1 SNC, then we can
29325 +        * reconfigure the P64H2 as follows:
29326 +        *    CNF:MDT   = 0
29327 +        *    CNF:DTD   = 1
29328 +        *    CNF:PC133 = 7777
29329 +        *
29330 +        * if not, then issue a warning that down rev parts
29331 +        * affect bandwidth.
29332 +        */
29333 +       for (pcip = NULL; (pcip = pci_find_device (PCI_VENDOR_ID_INTEL, 0x500, pcip)); )
29334 +       {
29335 +           pci_read_config_byte (pcip, PCI_REVISION_ID, &revision);
29336 +           
29337 +           if (revision >= 0x21)
29338 +               snc_good++;
29339 +           else
29340 +           {
29341 +               printk ("elan%d: SNC revision %x (%s)\n", dev->dev_instance, revision,
29342 +                       revision == 0x00 ? "A0" : revision == 0x01 ? "A1" : 
29343 +                       revision == 0x02 ? "A2" : revision == 0x03 ? "A3" :
29344 +                       revision == 0x10 ? "B0" : revision == 0x20 ? "C0" : 
29345 +                       revision == 0x21 ? "C1" : "UNKNOWN");
29346 +           
29347 +               snc_downgrade++;
29348 +           }
29349 +       }
29350 +
29351 +       for (pcip = NULL; (pcip = pci_find_device (PCI_VENDOR_ID_INTEL, 0x510, pcip)) != NULL; )
29352 +       {
29353 +           pci_read_config_byte (pcip, PCI_REVISION_ID, &revision);
29354 +           
29355 +           
29356 +           if (revision >= 0x22)
29357 +               sioh_good++;
29358 +           else
29359 +           {
29360 +               printk ("elan%d: SIOH revsision %x (%s)\n", dev->dev_instance, revision,
29361 +                       revision == 0x10 ? "C0" : revision == 0x20 ? "C0" : 
29362 +                       revision == 0x21 ? "C1" : revision == 0x22 ? "C2" : "UNKNOWN");
29363 +
29364 +               sioh_downgrade++;
29365 +           }
29366 +       }
29367 +
29368 +       if (optimise_pci_bus < 2 && (sioh_downgrade || snc_downgrade))
29369 +           printk ("elan%d: unable to optimise as SNC/SIOH below required C1/C2 steppings\n", dev->dev_instance);
29370 +       else if (optimise_pci_bus < 2 && (sioh_good == 0 || snc_good == 0))
29371 +           printk ("elan%d: unable to optimise as cannot determine SNC/SIOH revision\n", dev->dev_instance);
29372 +       else
29373 +           elan4_update_intel_p64h2 (dev, bridge);
29374 +    }
29375 +#endif
29376 +    
29377 +#ifdef __i386
29378 +    if ((ectrl & ECTRL_INITIALISATION_MODE) == PciX100to133MHz)
29379 +       elan4_update_intel_p64h2 (dev, bridge);
29380 +#endif     
29381 +    return 0;
29382 +}
29383 +
29384 +int
29385 +elan4_optimise_intel_pxh (ELAN4_DEV *dev, struct pci_dev *pdev)
29386 +{
29387 +#ifdef __i386
29388 +    printk ("elan%d: unable to use device on this platform in 32 bit mode\n", dev->dev_instance);
29389 +
29390 +    return -EINVAL;
29391 +#endif
29392 +
29393 +    dev->dev_features |= ELAN4_FEATURE_NO_DWORD_READ;
29394 +
29395 +    return 0;
29396 +}
29397 +
29398 +void
29399 +elan4_optimise_serverworks_ciobx2 (ELAN4_DEV *dev)
29400 +{
29401 +    struct pci_dev *pdev = dev->dev_osdep.pdev;
29402 +    struct pci_dev *pcip;
29403 +    unsigned char   bus;
29404 +    unsigned int    dor;
29405 +    
29406 +    /* Find the CIOBX2 for our bus number */
29407 +    for (pcip = NULL; (pcip = pci_find_device (PCI_VENDOR_ID_SERVERWORKS, 0x0101, pcip)) != NULL;)
29408 +    {
29409 +       pci_read_config_byte (pcip, 0x44 /* BUSNUM */, &bus);
29410 +       
29411 +       if (pdev->bus->number == bus)
29412 +       {
29413 +           printk ("elan%d: optimise CIOBX2 : setting DOR to disable read pipe lining\n", dev->dev_instance);
29414 +
29415 +           pci_read_config_dword (pcip, 0x78 /* DOR */, &dor);
29416 +           pci_write_config_dword (pcip, 0x78 /* DOR */, dor | (1 << 16));
29417 +       }
29418 +    }
29419 +}
29420 +
29421 +int
29422 +elan4_optimise_bus (ELAN4_DEV *dev)
29423 +{
29424 +    struct pci_dev *pdev = dev->dev_osdep.pdev;
29425 +
29426 +    if (pdev->bus && pdev->bus->self) 
29427 +    {
29428 +       struct pci_dev *bridge = pdev->bus->self;
29429 +       
29430 +       if (bridge->vendor == PCI_VENDOR_ID_INTEL && bridge->device == 0x1460 /* Intel P64H2 */)
29431 +           return elan4_optimise_intel_p64h2 (dev, pdev);
29432 +
29433 +       if ((bridge->vendor == PCI_VENDOR_ID_INTEL && bridge->device == 0x0329) /* Intel 6700PXH Fn 0 */ ||
29434 +           (bridge->vendor == PCI_VENDOR_ID_INTEL && bridge->device == 0x032a) /* Intel 6700PXH Fn 2 */ ||
29435 +           (bridge->vendor == PCI_VENDOR_ID_INTEL && bridge->device == 0x032c) /* Intel 6702PXH */ ||
29436 +           (bridge->vendor == PCI_VENDOR_ID_INTEL && bridge->device == 0x0320) /* Intel PXH-D */)
29437 +           return elan4_optimise_intel_pxh (dev, pdev);
29438 +    }
29439 +
29440 +    if (pci_find_device (PCI_VENDOR_ID_HP, 0x122e, NULL) != NULL)              /* on HP ZX1 set the relaxed ordering  */
29441 +       dev->dev_pteval = PTE_RelaxedOrder;                                     /* bit to get better DMA bandwidth. */
29442 +
29443 +    if (pci_find_device (PCI_VENDOR_ID_SERVERWORKS, 0x0101, NULL) != NULL)     /* ServerWorks CIOBX2 */
29444 +       elan4_optimise_serverworks_ciobx2 (dev);
29445 +
29446 +    return 0;
29447 +}
29448 +
29449 +int
29450 +elan4_pciinit (ELAN4_DEV *dev)
29451 +{
29452 +    int res;
29453 +    u32 value;
29454 +    u16 command;
29455 +    u8 cacheline;
29456 +    unsigned long flags;
29457 +
29458 +    if (optimise_pci_bus && (res = elan4_optimise_bus (dev)) <0)
29459 +       return (res);
29460 +
29461 +    if ((res = pci_enable_device (dev->dev_osdep.pdev)) < 0)
29462 +       return (res);
29463 +
29464 +    pci_read_config_dword (dev->dev_osdep.pdev, PCI_ELAN_CONTROL, &value);
29465 +    if ((value & ECTRL_INITIALISATION_MODE) == Pci2_2)
29466 +       printk ("elan%d: is an elan4%c (PCI-2.2)\n", dev->dev_instance, 'a' + dev->dev_devinfo.dev_revision_id);
29467 +    else
29468 +    {
29469 +       switch (value & ECTRL_INITIALISATION_MODE)
29470 +       {
29471 +       case PciX50To66MHz:
29472 +           printk ("elan%d: is an elan4%c (PCI-X 50-66)\n", dev->dev_instance, 'a' + dev->dev_devinfo.dev_revision_id);
29473 +           break;
29474 +           
29475 +       case PciX66to100MHz:
29476 +           printk ("elan%d: is an elan4%c (PCI-X 66-100)\n", dev->dev_instance, 'a' + dev->dev_devinfo.dev_revision_id);
29477 +           break;
29478 +           
29479 +       case PciX100to133MHz:
29480 +           printk ("elan%d: is an elan4%c (PCI-X 100-133)\n", dev->dev_instance, 'a' + dev->dev_devinfo.dev_revision_id);
29481 +           break;
29482 +           
29483 +       default:
29484 +           printk ("elan%d: Invalid PCI-X mode\n", dev->dev_instance);
29485 +           return (-EINVAL);
29486 +       }
29487 +    }
29488 +
29489 +    /* initialise the elan pll control register */
29490 +    pci_read_config_dword (dev->dev_osdep.pdev, PCI_ELAN_PLL_CONTROL, &value);
29491 +
29492 +    if (elan4_pll_cfg)
29493 +    {
29494 +       printk ("elan%d: setting pll control to %08x\n", dev->dev_instance, elan4_pll_cfg);
29495 +
29496 +       pci_write_config_dword (dev->dev_osdep.pdev, PCI_ELAN_PLL_CONTROL, elan4_pll_cfg);
29497 +    }
29498 +    else
29499 +    {
29500 +       if (dev->dev_devinfo.dev_revision_id == PCI_REVISION_ID_ELAN4_REVA)
29501 +           pci_write_config_dword (dev->dev_osdep.pdev, PCI_ELAN_PLL_CONTROL, 
29502 +                                   (value & ~ECTRL_SYS_CLOCK_RATIO_MASK) | ECTRL_SYS_CLOCK_RATIO_4_3);
29503 +       else
29504 +           pci_write_config_dword (dev->dev_osdep.pdev, PCI_ELAN_PLL_CONTROL, 
29505 +                                   (value & ~ECTRL_SYS_CLOCK_RATIO_MASK) | ECTRL_SYS_CLOCK_RATIO_6_5 | SysPll_FeedForwardISel0 | SysPll_FeedForwardISel1);
29506 +    }  
29507 +
29508 +    /* initialise the elan control register */
29509 +    pci_read_config_dword (dev->dev_osdep.pdev, PCI_ELAN_CONTROL, &value);
29510 +
29511 +    value = ((15 << ECTRL_IPROC_HIGH_PRI_TIME_SHIFT) |
29512 +            (15 << ECTRL_OTHER_HIGH_PRI_TIME_SHIFT) |
29513 +            (value & ECTRL_28_NOT_30_BIT_LOCAL_BAR) |
29514 +            (dev->dev_topaddrmode ? ECTRL_ExtraMasterAddrBits : 0) |
29515 +            ECTRL_ENABLE_LATENCY_RESET | 
29516 +            ECTRL_ENABLE_WRITEBURSTS | 
29517 +            ECTRL_ENABLE_2_2READBURSTS);
29518 +
29519 +#ifdef LINUX_SPARC
29520 +    value &= ~(ECTRL_ENABLE_LATENCY_RESET | ECTRL_ENABLE_WRITEBURSTS);
29521 +#endif
29522 +
29523 +    pci_write_config_dword (dev->dev_osdep.pdev, PCI_ELAN_CONTROL, value | ECTRL_SOFTWARE_INTERNAL_RESET);
29524 +
29525 +    switch (dev->dev_devinfo.dev_revision_id)
29526 +    {
29527 +    case PCI_REVISION_ID_ELAN4_REVA:
29528 +       /* Delay 10ms here if we've changed the sysclock ratio */
29529 +       /* to allow the PLL to stabalise before proceeding */
29530 +       udelay (10000);
29531 +       break;
29532 +       
29533 +    case PCI_REVISION_ID_ELAN4_REVB:
29534 +    {
29535 +       unsigned char val = read_i2c (dev, I2cLedsValue);
29536 +
29537 +       /* On RevB we have to explicitly reset the PLLs */
29538 +       pci_read_config_word (dev->dev_osdep.pdev, PCI_COMMAND, &command);
29539 +
29540 +       write_i2c (dev, I2cLedsValue, val | 0x80);
29541 +       udelay (1000);
29542 +
29543 +       /* Issue the PLL counter reset and immediately inhibit all pci interaction 
29544 +        * while the PLL is recovering. The write to the PCI_COMMAND register has 
29545 +        * to occur within 50uS of the write to the i2c registers */
29546 +       local_irq_save (flags);
29547 +       write_i2c (dev, I2cLedsValue, val & ~0x80);
29548 +       pci_write_config_word (dev->dev_osdep.pdev, PCI_COMMAND, (1 << 10) /* PCI_COMMAND_DISABLE_INT */);
29549 +       local_irq_restore (flags);
29550 +
29551 +       /* Wait for the write to occur and for the PLL to regain lock */
29552 +       udelay (20000); udelay (20000);
29553 +
29554 +       /* Re-enable pci interaction and clear any spurious errors deteced */
29555 +       pci_write_config_word (dev->dev_osdep.pdev, PCI_STATUS, PCI_STATUS_DETECTED_PARITY | PCI_STATUS_SIG_SYSTEM_ERROR);
29556 +       pci_write_config_word (dev->dev_osdep.pdev, PCI_COMMAND, command);
29557 +       break;
29558 +    }
29559 +    }
29560 +
29561 +    pci_write_config_dword (dev->dev_osdep.pdev, PCI_ELAN_CONTROL, value);
29562 +
29563 +    /* Enable master accesses */
29564 +    pci_set_master (dev->dev_osdep.pdev);
29565 +
29566 +    /* Verify that the memWrInvalidate bit is set */
29567 +    pci_read_config_word (dev->dev_osdep.pdev, PCI_COMMAND, &command);
29568 +    pci_read_config_byte (dev->dev_osdep.pdev, PCI_CACHE_LINE_SIZE, &cacheline);
29569 +
29570 +    if ((command & PCI_COMMAND_INVALIDATE) == 0)
29571 +    {
29572 +       printk ("elan%d: enable MemWrInvalidate (cacheline %d)\n",
29573 +               dev->dev_instance, cacheline * 4);
29574 +
29575 +       pci_write_config_word (dev->dev_osdep.pdev, PCI_COMMAND, command | PCI_COMMAND_INVALIDATE);
29576 +    }
29577 +
29578 +    return (0);
29579 +}
29580 +
29581 +void
29582 +elan4_pcifini (ELAN4_DEV *dev)
29583 +{
29584 +    u32 value;
29585 +
29586 +    pci_read_config_dword (dev->dev_osdep.pdev, PCI_ELAN_CONTROL, &value);
29587 +    pci_write_config_dword (dev->dev_osdep.pdev, PCI_ELAN_CONTROL, value | ECTRL_SOFTWARE_INTERNAL_RESET);
29588 +    pci_write_config_dword (dev->dev_osdep.pdev, PCI_ELAN_CONTROL, value);
29589 +
29590 +    pci_disable_device (dev->dev_osdep.pdev);
29591 +}
29592 +
29593 +void
29594 +elan4_pcierror (ELAN4_DEV *dev)
29595 +{
29596 +    struct pci_dev *pci = dev->dev_osdep.pdev;
29597 +    u8  type;
29598 +    u16 status, cmd;
29599 +    u32 physlo, physhi, control;
29600 +    
29601 +    printk("elan%d: pci error has occurred\n", dev->dev_instance);
29602 +
29603 +    pci_read_config_word  (pci, PCI_STATUS,             &status);
29604 +    pci_read_config_word  (pci, PCI_COMMAND,             &cmd);
29605 +    pci_read_config_dword (pci, PCI_ELAN_CONTROL,       &control);
29606 +
29607 +    if (control & ECTRL_REC_SPLIT_COMP_MESSAGE)
29608 +    {
29609 +       u32 message, attr;
29610 +       
29611 +       pci_write_config_dword (pci, PCI_ELAN_CONTROL, control & ~ECTRL_SELECT_SPLIT_MESS_ATTR);
29612 +       pci_read_config_dword (pci, PCI_ELAN_SPLIT_MESSAGE_VALUE, &message);
29613 +       pci_write_config_dword (pci, PCI_ELAN_CONTROL, control | ECTRL_SELECT_SPLIT_MESS_ATTR);
29614 +       pci_read_config_dword (pci, PCI_ELAN_SPLIT_MESSAGE_VALUE, &attr);
29615 +
29616 +       printk ("elan%d: pcierror - received split completion message - attr=%08x, message=%08x\n", 
29617 +               dev->dev_instance, attr, message);
29618 +
29619 +       pci_write_config_dword (pci, PCI_ELAN_CONTROL, control | ECTRL_REC_SPLIT_COMP_MESSAGE); /* clear the error */
29620 +    }
29621 +    else
29622 +    {
29623 +       pci_read_config_dword (pci, PCI_ELAN_PARITY_ADDR_LO, &physlo);
29624 +       pci_read_config_dword (pci, PCI_ELAN_PARITY_ADDR_HI, &physhi);
29625 +       pci_read_config_byte  (pci, PCI_ELAN_PARITY_TYPE,    &type);
29626 +       
29627 +       printk ("elan%d: pcierror - status %x cmd %4x physaddr %08x%08x type %x\n", 
29628 +               dev->dev_instance, status, cmd, physhi, physlo, type);
29629 +       
29630 +       if (status & PCI_STATUS_PARITY)
29631 +           printk ("elan%d: parity error signalled (PERR)\n", dev->dev_instance);
29632 +       if (status & PCI_STATUS_DETECTED_PARITY)
29633 +           printk ("elan%d: detected parity error\n", dev->dev_instance);
29634 +       if (status & PCI_STATUS_REC_MASTER_ABORT)
29635 +           printk ("elan%d: received master abort\n", dev->dev_instance);
29636 +       if (status & PCI_STATUS_REC_TARGET_ABORT)
29637 +           printk ("elan%d: received target abort\n", dev->dev_instance);
29638 +       if (status & PCI_STATUS_SIG_SYSTEM_ERROR)
29639 +           printk ("elan%d: signalled SERR\n", dev->dev_instance);
29640 +       if (status & PCI_STATUS_SIG_TARGET_ABORT)
29641 +           printk ("elan%d: signalled target abort\n", dev->dev_instance);
29642 +
29643 +       pci_write_config_word (pci, PCI_STATUS, status);        /* clear the errors */
29644 +    }
29645 +
29646 +    DISABLE_INT_MASK (dev, INT_PciMemErr);
29647 +
29648 +#ifdef notdef
29649 +    panic ("elan%d: pcierror\n", dev->dev_instance);           /* better panic ! */
29650 +#endif
29651 +}
29652 +
29653 +static irqreturn_t
29654 +elan4_irq (int irq, void *arg, struct pt_regs *regs)
29655 +{
29656 +    if (elan4_1msi0 ((ELAN4_DEV *) arg))
29657 +           return IRQ_HANDLED;
29658 +    else
29659 +           return IRQ_NONE;
29660 +}
29661 +
29662 +ioaddr_t
29663 +elan4_map_device (ELAN4_DEV *dev, unsigned bar, unsigned off, unsigned size, ELAN4_MAP_HANDLE *handle)
29664 +{
29665 +    return (ioaddr_t) ioremap_nocache (pci_resource_start (dev->dev_osdep.pdev, bar) + off, size);
29666 +}
29667 +
29668 +void
29669 +elan4_unmap_device (ELAN4_DEV *dev, ioaddr_t ptr, unsigned size, ELAN4_MAP_HANDLE *handle)
29670 +{
29671 +    iounmap ((void *) ptr);
29672 +}
29673 +
29674 +unsigned long
29675 +elan4_resource_len (ELAN4_DEV *dev, unsigned bar)
29676 +{
29677 +    return (pci_resource_len (dev->dev_osdep.pdev, bar));
29678 +}
29679 +
29680 +void
29681 +elan4_configure_mtrr (ELAN4_DEV *dev)
29682 +{
29683 +#ifdef CONFIG_MTRR
29684 +    if (! (dev->dev_features & ELAN4_FEATURE_NO_WRITE_COMBINE))
29685 +    {
29686 +       /* try and initialise the MTRR registers to enable write-combining */
29687 +       dev->dev_osdep.sdram_mtrr = mtrr_add (pci_resource_start (dev->dev_osdep.pdev, ELAN4_BAR_SDRAM), 
29688 +                                             pci_resource_len   (dev->dev_osdep.pdev, ELAN4_BAR_SDRAM),
29689 +                                             MTRR_TYPE_WRCOMB, 1);
29690 +       if (dev->dev_osdep.sdram_mtrr < 0)
29691 +           printk ("elan%d: cannot configure MTRR for sdram\n", dev->dev_instance);
29692 +       
29693 +       if (dev->dev_devinfo.dev_revision_id == PCI_REVISION_ID_ELAN4_REVB)
29694 +       {
29695 +           dev->dev_osdep.regs_mtrr = mtrr_add (pci_resource_start (dev->dev_osdep.pdev, ELAN4_BAR_REGISTERS) + 
29696 +                                                (dev->dev_cqoffset + dev->dev_cqreorder) * CQ_CommandMappingSize,
29697 +                                                CQ_CommandMappingSize * (dev->dev_cqcount >> 1), 
29698 +                                                MTRR_TYPE_WRCOMB, 1);
29699 +           
29700 +           if (dev->dev_osdep.regs_mtrr < 0)
29701 +               printk ("elan%d: cannot configure MTRR for command ports\n", dev->dev_instance);
29702 +       }
29703 +    }
29704 +#endif
29705 +}
29706 +
29707 +void
29708 +elan4_unconfigure_mtrr (ELAN4_DEV *dev)
29709 +{
29710 +#ifdef CONFIG_MTRR
29711 +    if (! (dev->dev_features & ELAN4_FEATURE_NO_WRITE_COMBINE))
29712 +    {
29713 +       if (dev->dev_osdep.sdram_mtrr >=0 )
29714 +           mtrr_del (dev->dev_osdep.sdram_mtrr, pci_resource_start (dev->dev_osdep.pdev, ELAN4_BAR_SDRAM), 
29715 +                     pci_resource_len   (dev->dev_osdep.pdev, ELAN4_BAR_SDRAM));
29716 +       
29717 +       if (dev->dev_devinfo.dev_revision_id == PCI_REVISION_ID_ELAN4_REVB && dev->dev_osdep.regs_mtrr >= 0)
29718 +           mtrr_del (dev->dev_osdep.regs_mtrr, 
29719 +                     pci_resource_start (dev->dev_osdep.pdev, ELAN4_BAR_REGISTERS) + 
29720 +                     (dev->dev_cqoffset + dev->dev_cqreorder) * CQ_CommandMappingSize,
29721 +                     CQ_CommandMappingSize * (dev->dev_cqcount >> 1));
29722 +    }
29723 +#endif
29724 +}
29725 +
29726 +EXPORT_SYMBOL(elan4_reference_device);
29727 +EXPORT_SYMBOL(elan4_dereference_device);
29728 +
29729 +/*
29730 + * Local variables:
29731 + * c-file-style: "stroustrup"
29732 + * End:
29733 + */
29734 Index: linux-2.4.21/drivers/net/qsnet/elan4/i2c.c
29735 ===================================================================
29736 --- linux-2.4.21.orig/drivers/net/qsnet/elan4/i2c.c     2004-02-23 16:02:56.000000000 -0500
29737 +++ linux-2.4.21/drivers/net/qsnet/elan4/i2c.c  2005-06-01 23:12:54.607437888 -0400
29738 @@ -0,0 +1,248 @@
29739 +/*
29740 + *    Copyright (c) 2001-2002 by Quadrics Supercomputers World Ltd.
29741 + *    Copyright (c) 2002-2003 by Quadrics Ltd.
29742 + * 
29743 + *    For licensing information please see the supplied COPYING file
29744 + *
29745 + */
29746 +
29747 +#ident "@(#)$Id: i2c.c,v 1.4 2004/01/07 13:37:45 jon Exp $"
29748 +/*      $Source: /cvs/master/quadrics/elan4mod/i2c.c,v $*/
29749 +#include <qsnet/kernel.h>
29750 +
29751 +#include <elan4/sdram.h>
29752 +#include <elan4/debug.h>
29753 +#include <elan4/device.h>
29754 +#include <elan4/commands.h>
29755 +
29756 +#include <elan4/i2c.h>
29757 +#include <elan4/pci.h>
29758 +#include <elan4/ioctl.h>
29759 +#include <elan4/registers.h>
29760 +
29761 +#define I2C_POLL_LIMIT         8
29762 +
29763 +static int
29764 +i2c_poll_busy (ELAN4_DEV *dev)
29765 +{
29766 +    int t    = 100;
29767 +    int loop = 0;
29768 +    volatile unsigned char val;
29769 +
29770 +    /* wait for any led I2C operation to finish */
29771 +    while (((val = read_i2c (dev, I2cPortControl)) & I2cCntl_I2cPortBusy) && loop++ < I2C_POLL_LIMIT)
29772 +    {
29773 +       DELAY (t);
29774 +       
29775 +       if (t < 500000)
29776 +           t <<= 1;
29777 +    }
29778 +    if (loop >= I2C_POLL_LIMIT)
29779 +    {
29780 +       printk ("elan%d: I2c has timed out waiting for I2cPortBusy to clear!\n", dev->dev_instance);
29781 +       printk ("elan%d: I2cPortControl=%x I2cLedBase=%x I2cStatus=%x\n",
29782 +               dev->dev_instance, val, read_i2c (dev, I2cLedBase), read_i2c (dev, I2cStatus));
29783 +    }
29784 +
29785 +    return val;
29786 +}
29787 +
29788 +static int
29789 +i2c_poll_stopped (ELAN4_DEV *dev)
29790 +{
29791 +    int t    = 100;
29792 +    int loop = 0;
29793 +    unsigned char val=0, newval;
29794 +
29795 +    /* wait for any led I2C operation to finish. Must see it stopped at least twice */
29796 +    while (!(((newval = read_i2c (dev, I2cPortControl)) & I2cCntl_I2cStopped) &&
29797 +             (val & I2cCntl_I2cStopped)) &&
29798 +             (loop++ < I2C_POLL_LIMIT))
29799 +    {
29800 +       DELAY (t);
29801 +       
29802 +       if (t < 500000)
29803 +           t <<= 1;
29804 +       val = newval;
29805 +    }
29806 +
29807 +    return val;
29808 +}
29809 +
29810 +int
29811 +i2c_disable_auto_led_update (ELAN4_DEV *dev)
29812 +{
29813 +    spin_lock (&dev->dev_i2c_lock);
29814 +
29815 +    if (dev->dev_i2c_led_disabled++ == 0)
29816 +    {
29817 +       write_i2c (dev, I2cLedBase, read_i2c (dev, I2cLedBase) & ~I2cCntl_I2cUpdatingLedReg);
29818 +
29819 +       if (! (i2c_poll_stopped (dev) & I2cCntl_I2cStopped))
29820 +       {
29821 +           write_i2c (dev, I2cLedBase, read_i2c (dev, I2cLedBase) | I2cCntl_I2cUpdatingLedReg);
29822 +           
29823 +           spin_unlock (&dev->dev_i2c_lock);
29824 +           
29825 +           return -EAGAIN;
29826 +       }
29827 +       
29828 +       write_i2c (dev, I2cStatus, read_i2c (dev, I2cStatus) & ~I2cCntl_SampleNewLedValues);
29829 +    }
29830 +
29831 +    spin_unlock (&dev->dev_i2c_lock);
29832 +
29833 +    return 0;
29834 +}
29835 +
29836 +void
29837 +i2c_enable_auto_led_update (ELAN4_DEV *dev)
29838 +{
29839 +    spin_lock (&dev->dev_i2c_lock);
29840 +    if (--dev->dev_i2c_led_disabled == 0)
29841 +    {
29842 +       write_i2c (dev, I2cLedBase, read_i2c (dev, I2cLedBase) | I2cCntl_I2cUpdatingLedReg);
29843 +       write_i2c (dev, I2cStatus, read_i2c (dev, I2cStatus) | I2cCntl_SampleNewLedValues);
29844 +    }
29845 +
29846 +    spin_unlock (&dev->dev_i2c_lock);
29847 +}
29848 +
29849 +int
29850 +i2c_write (ELAN4_DEV *dev, unsigned int address, unsigned int count, unsigned char *data)
29851 +{
29852 +    int i;
29853 +
29854 +    if (! (i2c_poll_busy (dev) & I2cCntl_I2cStopped))
29855 +       return -EAGAIN;
29856 +    
29857 +    write_i2c (dev, I2cWrData,      I2C_WRITE_ADDR(address));
29858 +    write_i2c (dev, I2cPortControl, I2cCntl_I2cPortWrite);
29859 +    
29860 +    if (i2c_poll_busy (dev) & I2cCntl_I2cPortAccFailed)
29861 +       return -ENXIO;
29862 +
29863 +    for (i = 0; i < count; i++)
29864 +    {
29865 +       write_i2c (dev, I2cWrData, data[i]);
29866 +       write_i2c (dev, I2cPortControl, I2cCntl_I2cPortWrite | (i == (count-1) ? I2cCntl_I2cPortGenStopBit : 0));
29867 +    }
29868 +
29869 +    return 0;
29870 +}
29871 +
29872 +int
29873 +i2c_read (ELAN4_DEV *dev, unsigned int address, unsigned int count, unsigned char *data)
29874 +{
29875 +    int i;
29876 +
29877 +    if (! (i2c_poll_busy (dev) & I2cCntl_I2cStopped))
29878 +       return -EAGAIN; /* not idle */ 
29879 +
29880 +    write_i2c (dev, I2cWrData,      I2C_READ_ADDR(address));
29881 +    write_i2c (dev, I2cPortControl, I2cCntl_I2cPortWrite);
29882 +
29883 +    if (i2c_poll_busy (dev) & I2cCntl_I2cPortAccFailed)
29884 +       return -ENXIO;
29885 +    
29886 +    for (i = 0; i < count; i++)
29887 +    {
29888 +       write_i2c (dev, I2cWrData, 0xff);
29889 +       write_i2c (dev, I2cPortControl, I2cCntl_I2cPortRead | ((i == count-1) ? I2cCntl_I2cPortGenStopBit : 0));
29890 +
29891 +       i2c_poll_busy (dev);
29892 +
29893 +       data[i] = read_i2c (dev, I2cRdData);
29894 +    }
29895 +
29896 +    return 0;
29897 +}
29898 +
29899 +int
29900 +i2c_writereg (ELAN4_DEV *dev, unsigned int address, unsigned int reg, unsigned int count, unsigned char *data)
29901 +{
29902 +    int i;
29903 +
29904 +    if (! (i2c_poll_busy (dev) & I2cCntl_I2cStopped))
29905 +       return -EAGAIN; /* not idle */ 
29906 +
29907 +    write_i2c (dev, I2cWrData,      I2C_WRITE_ADDR(address));
29908 +    write_i2c (dev, I2cPortControl, I2cCntl_I2cPortWrite);
29909 +
29910 +    if (i2c_poll_busy (dev) & I2cCntl_I2cPortAccFailed)
29911 +       return -ENXIO;
29912 +    
29913 +    write_i2c (dev, I2cWrData,      reg);
29914 +    write_i2c (dev, I2cPortControl, I2cCntl_I2cPortWrite);
29915 +
29916 +    if (i2c_poll_busy (dev) & I2cCntl_I2cPortAccFailed)
29917 +       return -ENXIO;
29918 +    
29919 +    for (i = 0; i < count; i++)
29920 +    {
29921 +       write_i2c (dev, I2cWrData, data[i]);
29922 +       write_i2c (dev, I2cPortControl, I2cCntl_I2cPortWrite | ((i == count-1) ? I2cCntl_I2cPortGenStopBit : 0));
29923 +
29924 +       if (i2c_poll_busy (dev) & I2cCntl_I2cPortAccFailed)
29925 +           printk (" i2c_writereg: off %d failed\n", i);
29926 +    }
29927 +
29928 +    return 0;
29929 +}
29930 +
29931 +int
29932 +i2c_readreg (ELAN4_DEV *dev, unsigned int address, unsigned int reg, unsigned int count, unsigned char *data)
29933 +{
29934 +    if (! (i2c_poll_busy (dev) & I2cCntl_I2cStopped))
29935 +       return -EAGAIN; /* not idle */ 
29936 +
29937 +    write_i2c (dev, I2cWrData,      I2C_WRITE_ADDR(address));
29938 +    write_i2c (dev, I2cPortControl, I2cCntl_I2cPortWrite);
29939 +
29940 +    if (i2c_poll_busy (dev) & I2cCntl_I2cPortAccFailed)
29941 +       return -ENXIO;
29942 +    
29943 +    write_i2c (dev, I2cWrData,      reg);
29944 +    write_i2c (dev, I2cPortControl, I2cCntl_I2cPortWrite | I2cCntl_I2cPortGenStopBit);
29945 +
29946 +    if (i2c_poll_busy (dev) & I2cCntl_I2cPortAccFailed)
29947 +       return -ENXIO;
29948 +
29949 +    return i2c_read (dev, address, count, data);
29950 +}
29951 +
29952 +int
29953 +i2c_read_rom (ELAN4_DEV *dev, unsigned int addr, unsigned int len, unsigned char *data)
29954 +{
29955 +    unsigned int top = addr + len;
29956 +    int res;
29957 +
29958 +    if ((res = i2c_disable_auto_led_update (dev)) == 0)
29959 +    {
29960 +       /* read the rom in chunks that don't span the block boundary */
29961 +       while (addr < top)
29962 +       {
29963 +           unsigned int thisnob  = top - addr;
29964 +           unsigned int blocknob = I2C_24LC16B_BLOCKSIZE - I2C_24LC16B_BLOCKOFFSET(addr);
29965 +           
29966 +           if (thisnob > blocknob)
29967 +               thisnob = blocknob;
29968 +
29969 +           if ((res = i2c_readreg (dev, I2C_EEPROM_ADDR + I2C_24LC16B_BLOCKADDR(addr),
29970 +                                   I2C_24LC16B_BLOCKOFFSET(addr), thisnob, data)) < 0)
29971 +               break;
29972 +           
29973 +           addr += thisnob;
29974 +           data += thisnob;
29975 +       }
29976 +
29977 +       i2c_enable_auto_led_update (dev);
29978 +    }
29979 +    return res;
29980 +}
29981 +
29982 +/*
29983 + * Local variables:
29984 + * c-file-style: "stroustrup"
29985 + * End:
29986 + */
29987 Index: linux-2.4.21/drivers/net/qsnet/elan4/intcookie.c
29988 ===================================================================
29989 --- linux-2.4.21.orig/drivers/net/qsnet/elan4/intcookie.c       2004-02-23 16:02:56.000000000 -0500
29990 +++ linux-2.4.21/drivers/net/qsnet/elan4/intcookie.c    2005-06-01 23:12:54.608437736 -0400
29991 @@ -0,0 +1,371 @@
29992 +/*
29993 + *    Copyright (c) 2001-2002 by Quadrics Supercomputers World Ltd.
29994 + *    Copyright (c) 2002-2003 by Quadrics Ltd.
29995 + * 
29996 + *    For licensing information please see the supplied COPYING file
29997 + *
29998 + */
29999 +
30000 +#ident "@(#)$Id: intcookie.c,v 1.14 2004/08/09 14:02:37 daniel Exp $"
30001 +/*      $Source: /cvs/master/quadrics/elan4mod/intcookie.c,v $*/
30002 +
30003 +#include <qsnet/kernel.h>
30004 +
30005 +#include <elan4/debug.h>
30006 +#include <elan4/types.h>
30007 +#include <elan/capability.h>
30008 +#include <elan4/intcookie.h>
30009 +
30010 +static INTCOOKIE_TABLE *intcookie_tables;
30011 +static spinlock_t      intcookie_table_lock;
30012 +
30013 +/*
30014 + * intcookie_drop_entry:
30015 + *   drop the reference to a cookie held 
30016 + *   by the cookie table
30017 + */
30018 +static void
30019 +intcookie_drop_entry (INTCOOKIE_ENTRY *ent)
30020 +{
30021 +    unsigned long flags;
30022 +
30023 +    spin_lock_irqsave (&ent->ent_lock, flags);
30024 +    if (--ent->ent_ref != 0)
30025 +    {
30026 +       ent->ent_fired = ent->ent_cookie;
30027 +       kcondvar_wakeupall (&ent->ent_wait, &ent->ent_lock);
30028 +
30029 +       spin_unlock_irqrestore (&ent->ent_lock, flags);
30030 +    }
30031 +    else
30032 +    {
30033 +       spin_unlock_irqrestore (&ent->ent_lock, flags);
30034 +
30035 +       spin_lock_destroy (&ent->ent_lock);
30036 +       kcondvar_destroy (&ent->ent_wait);
30037 +
30038 +       KMEM_FREE (ent, sizeof (INTCOOKIE_ENTRY));
30039 +    }
30040 +}
30041 +
30042 +void
30043 +intcookie_init()
30044 +{
30045 +    spin_lock_init (&intcookie_table_lock);
30046 +}
30047 +
30048 +void
30049 +intcookie_fini()
30050 +{
30051 +    spin_lock_destroy (&intcookie_table_lock);
30052 +}
30053 +
30054 +INTCOOKIE_TABLE *
30055 +intcookie_alloc_table (ELAN_CAPABILITY *cap)
30056 +{
30057 +    INTCOOKIE_TABLE *tbl, *ntbl;
30058 +    ELAN_CAPABILITY *ncap;
30059 +    
30060 +    KMEM_ZALLOC (ntbl, INTCOOKIE_TABLE *, sizeof (INTCOOKIE_TABLE), 1);
30061 +
30062 +    if (ntbl == NULL)
30063 +       return (NULL);
30064 +
30065 +    KMEM_ALLOC (ncap, ELAN_CAPABILITY *, ELAN_CAP_SIZE(cap), 1);
30066 +
30067 +    if (ncap == NULL)
30068 +    {
30069 +       KMEM_FREE (ntbl, sizeof (INTCOOKIE_TABLE));
30070 +       return (NULL);
30071 +    }
30072 +
30073 +    spin_lock (&intcookie_table_lock);
30074 +    
30075 +    for (tbl = intcookie_tables; tbl; tbl = tbl->tbl_next)
30076 +       if (ELAN_CAP_MATCH (tbl->tbl_cap, cap) && tbl->tbl_cap->cap_mycontext == cap->cap_mycontext)
30077 +           break;
30078 +    
30079 +    if (tbl != NULL)
30080 +       tbl->tbl_ref++;
30081 +    else
30082 +    {
30083 +       spin_lock_init (&ntbl->tbl_lock);
30084 +
30085 +       ntbl->tbl_cap     = ncap;
30086 +       ntbl->tbl_ref     = 1;
30087 +       ntbl->tbl_entries = NULL;
30088 +       
30089 +       /* Save supplied cap */
30090 +       bcopy (cap, ncap, ELAN_CAP_SIZE(cap));
30091 +
30092 +       if ((ntbl->tbl_next = intcookie_tables) != NULL)
30093 +           intcookie_tables->tbl_prev = ntbl;
30094 +       intcookie_tables = ntbl;
30095 +       ntbl->tbl_prev = NULL;
30096 +    }
30097 +    spin_unlock (&intcookie_table_lock);
30098 +
30099 +    if (tbl == NULL)
30100 +       return (ntbl);
30101 +    else
30102 +    {
30103 +       KMEM_FREE (ntbl, sizeof (INTCOOKIE_TABLE));
30104 +       KMEM_FREE (ncap, ELAN_CAP_SIZE(cap));
30105 +       return (tbl);
30106 +    }    
30107 +}
30108 +
30109 +void
30110 +intcookie_free_table (INTCOOKIE_TABLE *tbl)
30111 +{
30112 +    INTCOOKIE_ENTRY *ent;
30113 +
30114 +    spin_lock (&intcookie_table_lock);
30115 +    if (tbl->tbl_ref > 1)
30116 +    {
30117 +       tbl->tbl_ref--;
30118 +       spin_unlock (&intcookie_table_lock);
30119 +       return;
30120 +    }
30121 +    
30122 +    if (tbl->tbl_prev)
30123 +       tbl->tbl_prev->tbl_next = tbl->tbl_next;
30124 +    else
30125 +       intcookie_tables = tbl->tbl_next;
30126 +    if (tbl->tbl_next)
30127 +       tbl->tbl_next->tbl_prev = tbl->tbl_prev;
30128 +    
30129 +    spin_unlock (&intcookie_table_lock);
30130 +    
30131 +    /* NOTE - table no longer visible to other threads
30132 +     *        no need to aquire tbl_lock */
30133 +    while ((ent = tbl->tbl_entries) != NULL)
30134 +    {
30135 +       if ((tbl->tbl_entries = ent->ent_next) != NULL)
30136 +           ent->ent_next->ent_prev = NULL;
30137 +       
30138 +       intcookie_drop_entry (ent);
30139 +    }
30140 +    spin_lock_destroy (&tbl->tbl_lock);
30141 +
30142 +    KMEM_FREE (tbl->tbl_cap, ELAN_CAP_SIZE(tbl->tbl_cap));
30143 +    KMEM_FREE (tbl, sizeof (INTCOOKIE_TABLE));
30144 +}
30145 +
30146 +int
30147 +intcookie_alloc (INTCOOKIE_TABLE *tbl, ELAN4_INTCOOKIE cookie)
30148 +{
30149 +    INTCOOKIE_ENTRY *ent, *nent;
30150 +    unsigned long flags;
30151 +
30152 +    KMEM_ZALLOC (nent, INTCOOKIE_ENTRY *, sizeof (INTCOOKIE_ENTRY), 1);
30153 +
30154 +    if (nent == NULL)
30155 +       return (-ENOMEM);
30156 +    
30157 +    spin_lock_irqsave (&tbl->tbl_lock, flags);
30158 +    for (ent = tbl->tbl_entries; ent; ent = ent->ent_next)
30159 +       if (ent->ent_cookie == cookie)
30160 +           break;
30161 +
30162 +    if (ent == NULL)
30163 +    {
30164 +       kcondvar_init (&nent->ent_wait);
30165 +       spin_lock_init (&nent->ent_lock);
30166 +
30167 +       nent->ent_ref    = 1;
30168 +       nent->ent_cookie = cookie;
30169 +
30170 +       if ((nent->ent_next = tbl->tbl_entries) != NULL)
30171 +           tbl->tbl_entries->ent_prev = nent;
30172 +       tbl->tbl_entries = nent;
30173 +       nent->ent_prev = NULL;
30174 +    }
30175 +    spin_unlock_irqrestore (&tbl->tbl_lock, flags);
30176 +
30177 +    if (ent == NULL)
30178 +       return (0);
30179 +    else
30180 +    {
30181 +       KMEM_FREE (nent, sizeof (INTCOOKIE_ENTRY));
30182 +       return (-EINVAL);
30183 +    }
30184 +}
30185 +
30186 +int
30187 +intcookie_free (INTCOOKIE_TABLE *tbl, ELAN4_INTCOOKIE cookie)
30188 +{
30189 +    INTCOOKIE_ENTRY *ent;
30190 +    unsigned long flags;
30191 +
30192 +    spin_lock_irqsave (&tbl->tbl_lock, flags);
30193 +    for (ent = tbl->tbl_entries; ent; ent = ent->ent_next)
30194 +       if (ent->ent_cookie == cookie)
30195 +           break;
30196 +    
30197 +    if (ent == NULL)
30198 +    {
30199 +       spin_unlock_irqrestore (&tbl->tbl_lock, flags);
30200 +       return (-EINVAL);
30201 +    }
30202 +
30203 +    if (ent->ent_prev == NULL)
30204 +       tbl->tbl_entries = ent->ent_next;
30205 +    else
30206 +       ent->ent_prev->ent_next = ent->ent_next;
30207 +
30208 +    if (ent->ent_next != NULL)
30209 +       ent->ent_next->ent_prev = ent->ent_prev;
30210 +    
30211 +    spin_unlock_irqrestore (&tbl->tbl_lock, flags);
30212 +
30213 +    intcookie_drop_entry (ent);
30214 +
30215 +    return (0);
30216 +}
30217 +
30218 +/*
30219 + * intcookie_fire_cookie:
30220 + *    fire the cookie - this is called from the event interrupt.
30221 + */
30222 +int
30223 +intcookie_fire (INTCOOKIE_TABLE *tbl, ELAN4_INTCOOKIE cookie)
30224 +{
30225 +    INTCOOKIE_ENTRY *ent;
30226 +    unsigned long flags;
30227 +
30228 +    spin_lock_irqsave (&tbl->tbl_lock, flags);
30229 +    for (ent = tbl->tbl_entries; ent; ent = ent->ent_next)
30230 +       if (ent->ent_cookie == cookie)
30231 +           break;
30232 +    
30233 +    if (ent == NULL)
30234 +    {
30235 +       spin_unlock_irqrestore (&tbl->tbl_lock, flags);
30236 +       return (-EINVAL);
30237 +    }
30238 +           
30239 +    spin_lock (&ent->ent_lock);
30240 +    ent->ent_fired = cookie;
30241 +    kcondvar_wakeupall (&ent->ent_wait, &ent->ent_lock);
30242 +    spin_unlock (&ent->ent_lock);
30243 +
30244 +    spin_unlock_irqrestore (&tbl->tbl_lock, flags);
30245 +
30246 +    return (0);
30247 +}    
30248 +
30249 +int
30250 +intcookie_fire_cap (ELAN_CAPABILITY *cap, ELAN4_INTCOOKIE cookie)
30251 +{
30252 +    int res;
30253 +    INTCOOKIE_TABLE *tbl;
30254 +
30255 +    spin_lock (&intcookie_table_lock);
30256 +    
30257 +    for (tbl = intcookie_tables; tbl; tbl = tbl->tbl_next)
30258 +       if (ELAN_CAP_MATCH (tbl->tbl_cap, cap) && tbl->tbl_cap->cap_mycontext == cap->cap_mycontext)
30259 +           break;
30260 +    
30261 +    if (tbl != NULL)
30262 +       tbl->tbl_ref++;
30263 +
30264 +    spin_unlock (&intcookie_table_lock);
30265 +
30266 +    /* No matching table found */
30267 +    if (tbl == NULL)
30268 +       return (-EINVAL);
30269 +
30270 +    /* Fire the correct cookie */
30271 +    res = intcookie_fire (tbl, cookie);
30272 +
30273 +    /* Decrement reference count (and free if necessary) */
30274 +    intcookie_free_table (tbl);
30275 +
30276 +    return (res);
30277 +}
30278 +
30279 +/*
30280 + * intcookie_wait_cookie:
30281 + *    deschedule on a cookie if it has not already fired.
30282 + *    note - if the cookie is removed from the table, then
30283 + *           we free it off when we're woken up.
30284 + */
30285 +int
30286 +intcookie_wait (INTCOOKIE_TABLE *tbl, ELAN4_INTCOOKIE cookie)
30287 +{
30288 +    INTCOOKIE_ENTRY *ent;
30289 +    unsigned long flags;
30290 +    int res;
30291 +    
30292 +    spin_lock_irqsave (&tbl->tbl_lock, flags);
30293 +    for (ent = tbl->tbl_entries; ent; ent = ent->ent_next)
30294 +       if (ent->ent_cookie == cookie)
30295 +           break;
30296 +    
30297 +    if (ent == NULL)
30298 +    {
30299 +       spin_unlock_irqrestore (&tbl->tbl_lock, flags);
30300 +       return (-EINVAL);
30301 +    }
30302 +
30303 +    spin_lock (&ent->ent_lock);
30304 +    spin_unlock (&tbl->tbl_lock);
30305 +
30306 +    if (ent->ent_fired != 0)
30307 +    {
30308 +       spin_unlock_irqrestore (&ent->ent_lock, flags);
30309 +       return (0);
30310 +    }
30311 +
30312 +    ent->ent_ref++;
30313 +    kcondvar_waitsig (&ent->ent_wait, &ent->ent_lock, &flags);
30314 +    
30315 +    res = ent->ent_fired ? 0 : -EINTR;
30316 +
30317 +    if (--ent->ent_ref > 0)
30318 +       spin_unlock_irqrestore (&ent->ent_lock, flags);
30319 +    else
30320 +    {
30321 +       spin_unlock_irqrestore (&ent->ent_lock, flags);
30322 +       
30323 +       spin_lock_destroy (&ent->ent_lock);
30324 +       kcondvar_destroy (&ent->ent_wait);
30325 +
30326 +       KMEM_FREE (ent, sizeof (INTCOOKIE_ENTRY));
30327 +    }
30328 +
30329 +    return (res);
30330 +}
30331 +
30332 +int
30333 +intcookie_arm (INTCOOKIE_TABLE *tbl, ELAN4_INTCOOKIE cookie)
30334 +{
30335 +    INTCOOKIE_ENTRY *ent;
30336 +    unsigned long flags;
30337 +
30338 +    spin_lock_irqsave (&tbl->tbl_lock, flags);
30339 +    for (ent = tbl->tbl_entries; ent; ent = ent->ent_next)
30340 +       if (ent->ent_cookie == cookie)
30341 +           break;
30342 +    
30343 +    if (ent == NULL)
30344 +    {
30345 +       spin_unlock_irqrestore (&tbl->tbl_lock, flags);
30346 +       return (-EINVAL);
30347 +    }
30348 +           
30349 +    spin_lock (&ent->ent_lock);
30350 +    ent->ent_fired = 0;
30351 +    spin_unlock (&ent->ent_lock);
30352 +
30353 +    spin_unlock_irqrestore (&tbl->tbl_lock, flags);
30354 +
30355 +    return (0);
30356 +}
30357 +
30358 +/*
30359 + * Local variables:
30360 + * c-file-style: "stroustrup"
30361 + * End:
30362 + */
30363 Index: linux-2.4.21/drivers/net/qsnet/elan4/Makefile
30364 ===================================================================
30365 --- linux-2.4.21.orig/drivers/net/qsnet/elan4/Makefile  2004-02-23 16:02:56.000000000 -0500
30366 +++ linux-2.4.21/drivers/net/qsnet/elan4/Makefile       2005-06-01 23:12:54.608437736 -0400
30367 @@ -0,0 +1,31 @@
30368 +#
30369 +# Makefile for Quadrics QsNet
30370 +#
30371 +# Copyright (c) 2002-2004 Quadrics Ltd
30372 +#
30373 +# File: drivers/net/qsnet/elan4/Makefile
30374 +#
30375 +
30376 +
30377 +#
30378 +
30379 +#
30380 +# Makefile for Quadrics QsNet
30381 +#
30382 +# Copyright (c) 2004 Quadrics Ltd.
30383 +#
30384 +# File: driver/net/qsnet/elan4/Makefile
30385 +#
30386 +
30387 +list-multi             := elan4.o
30388 +elan4-objs     := device.o i2c.o mmu.o sdram.o debug.o routetable.o trap.o user.o user_ddcq.o regions.o intcookie.o neterr.o device_Linux.o user_Linux.o procfs_Linux.o mmu_Linux.o
30389 +export-objs            := device.o device_Linux.o mmu.o mmu_Linux.o procfs_Linux.o routetable.o sdram.o trap.o
30390 +obj-$(CONFIG_ELAN4)    := elan4.o
30391 +
30392 +elan4.o : $(elan4-objs)
30393 +       $(LD) -r -o $@ $(elan4-objs)
30394 +
30395 +EXTRA_CFLAGS           +=  -DDEBUG -DDEBUG_PRINTF -DDEBUG_ASSERT
30396 +
30397 +include $(TOPDIR)/Rules.make
30398 +
30399 Index: linux-2.4.21/drivers/net/qsnet/elan4/Makefile.conf
30400 ===================================================================
30401 --- linux-2.4.21.orig/drivers/net/qsnet/elan4/Makefile.conf     2004-02-23 16:02:56.000000000 -0500
30402 +++ linux-2.4.21/drivers/net/qsnet/elan4/Makefile.conf  2005-06-01 23:12:54.608437736 -0400
30403 @@ -0,0 +1,10 @@
30404 +# Flags for generating QsNet Linux Kernel Makefiles
30405 +MODNAME                =       elan4.o
30406 +MODULENAME     =       elan4
30407 +KOBJFILES      =       device.o i2c.o mmu.o sdram.o debug.o routetable.o trap.o user.o user_ddcq.o regions.o intcookie.o neterr.o device_Linux.o user_Linux.o procfs_Linux.o mmu_Linux.o
30408 +EXPORT_KOBJS   =       device.o device_Linux.o mmu.o mmu_Linux.o procfs_Linux.o routetable.o sdram.o trap.o
30409 +CONFIG_NAME    =       CONFIG_ELAN4
30410 +SGALFC         =       
30411 +# EXTRALINES START
30412 +
30413 +# EXTRALINES END
30414 Index: linux-2.4.21/drivers/net/qsnet/elan4/mmu.c
30415 ===================================================================
30416 --- linux-2.4.21.orig/drivers/net/qsnet/elan4/mmu.c     2004-02-23 16:02:56.000000000 -0500
30417 +++ linux-2.4.21/drivers/net/qsnet/elan4/mmu.c  2005-06-01 23:12:54.610437432 -0400
30418 @@ -0,0 +1,854 @@
30419 +/*
30420 + *    Copyright (c) 2001-2002 by Quadrics Supercomputers World Ltd.
30421 + *    Copyright (c) 2002-2003 by Quadrics Ltd.
30422 + * 
30423 + *    For licensing information please see the supplied COPYING file
30424 + *
30425 + */
30426 +
30427 +#ident "@(#)$Id: mmu.c,v 1.29.6.2 2005/01/18 16:58:12 david Exp $"
30428 +/*      $Source: /cvs/master/quadrics/elan4mod/mmu.c,v $*/
30429 +
30430 +#include <qsnet/kernel.h>
30431 +#include <qsnet/kpte.h>
30432 +
30433 +#include <elan4/debug.h>
30434 +#include <elan4/device.h>
30435 +
30436 +int elan4_debug_mmu;
30437 +
30438 +/* Permission table - see ELAN4 MMU documentation */
30439 +u_char elan4_permtable[] =
30440 +{
30441 +   0x00, /* 0x000000 - Disable */
30442 +   0x00, /* 0x000000 - Unused  */
30443 +   0x01, /* 0x000001 - Local Data Read */
30444 +   0x03, /* 0x000011 - Local Data Write */
30445 +   0x11, /* 0x010001 - Local Read */
30446 +   0x10, /* 0x010000 - Local Execute */
30447 +   0x05, /* 0x000101 - Read Only */
30448 +   0x13, /* 0x010011 - Local Write */
30449 +   0x20, /* 0x100000 - Local Event Access */
30450 +   0x23, /* 0x100011 - Local Event Write Ac */
30451 +   0xa3, /* 1x100011 - Remote Ev Loc Write */
30452 +   0xaf, /* 1x101111 - Remote All */
30453 +   0x07, /* 0x000111 - Remote Read Only */
30454 +   0x0d, /* 0x001101 - Remote Write Only */
30455 +   0x0f, /* 0x001111 - Remote Read/Write */
30456 +   0xbf, /* 1x111111 - No Fault */
30457 +};
30458 +
30459 +u_char elan4_permreadonly[] = 
30460 +{
30461 +    PERM_Disabled,             /* PERM_Disabled */
30462 +    PERM_Disabled,             /* PERM_Unused */
30463 +    PERM_LocDataRead,          /* PERM_LocDataRead */
30464 +    PERM_LocDataRead,          /* PERM_LocDataWrite */
30465 +    PERM_LocRead,              /* PERM_LocRead */
30466 +    PERM_LocExecute,           /* PERM_LocExecute */
30467 +    PERM_ReadOnly,             /* PERM_ReadOnly */
30468 +    PERM_LocRead,              /* PERM_LocWrite */
30469 +    PERM_LocEventOnly,         /* PERM_LocEventOnly */
30470 +    PERM_LocDataRead,          /* PERM_LocEventWrite */
30471 +    PERM_LocDataRead,          /* PERM_RemoteEvent */
30472 +    PERM_ReadOnly,             /* PERM_RemoteAll */
30473 +    PERM_RemoteReadOnly,       /* PERM_RemoteReadOnly */
30474 +    PERM_ReadOnly,             /* PERM_RemoteWriteLocRead */
30475 +    PERM_ReadOnly,             /* PERM_DataReadWrite */
30476 +    PERM_ReadOnly,             /* PERM_NoFault */
30477 +};
30478 +
30479 +static void
30480 +elan4mmu_synctag (ELAN4_DEV *dev, ELAN4_HASH_ENTRY *he, int tagidx)
30481 +{
30482 +    E4_uint64 value = (he->he_tag[tagidx] & HE_TAG_VALID) ? he->he_tag[tagidx] & (TAG_ADDRESS_MASK | TAG_CONTEXT_MASK) : INVALID_CONTEXT;
30483 +    
30484 +    if (he->he_next)
30485 +       value |= ((tagidx == 0) ? 
30486 +                 ((he->he_next->he_entry >> TAG_CHAINPTR_HIGH_SHIFT) & TAG_CHAINPTR_30TO19_MASK) :
30487 +                 ((he->he_next->he_entry << TAG_CHAINPTR_LOW_SHIFT) & TAG_CHAINPTR_18TO6_MASK));
30488 +    else if (tagidx == 0)
30489 +       value |= TAG_CHAINPTR_30TO19_MASK;
30490 +    
30491 +    MPRINTF (DBG_DEVICE, 4, "elan4mmu_synctag: he=%p tagidx=%d he->he_tag=%llx -> value=%llx\n", he, tagidx, he->he_tag[tagidx], value);
30492 +
30493 +    elan4_sdram_writeq (dev, he->he_entry + E4MMU_TAG_OFFSET(tagidx), value);
30494 +}
30495 +
30496 +static void
30497 +elan4mmu_chain_hents (ELAN4_DEV *dev, ELAN4_HASH_ENTRY *phe, ELAN4_HASH_ENTRY *he)
30498 +{
30499 +    ASSERT ((elan4_sdram_readq (dev, phe->he_entry + E4MMU_TAG_OFFSET(0)) & TAG_CHAINPTR_30TO19_MASK) == TAG_CHAINPTR_30TO19_MASK);
30500 +
30501 +    elan4_sdram_writeq (dev, phe->he_entry + E4MMU_TAG_OFFSET(1),
30502 +                       ((phe->he_tag[1] & (TAG_ADDRESS_MASK | TAG_CONTEXT_MASK)) | ((he->he_entry << TAG_CHAINPTR_LOW_SHIFT) & TAG_CHAINPTR_18TO6_MASK)));
30503 +    elan4_sdram_writeq (dev, phe->he_entry + E4MMU_TAG_OFFSET(0),
30504 +                       ((phe->he_tag[0] & (TAG_ADDRESS_MASK | TAG_CONTEXT_MASK)) | ((he->he_entry >> TAG_CHAINPTR_HIGH_SHIFT) & TAG_CHAINPTR_30TO19_MASK)));
30505 +}
30506 +
30507 +static void
30508 +elan4mmu_writepte (ELAN4_DEV *dev, ELAN4_HASH_ENTRY *he, int tagidx, int pteidx, E4_uint64 value)
30509 +{
30510 +    /*
30511 +     * NOTE - we can only change a valid PTE if we're upgrading it's permissions,
30512 +     * any other changes should have invalidated it first. */
30513 +
30514 +    MPRINTF (DBG_DEVICE, 4, "elan4mmu_writepte: he=%p tagidx=%d pteidx=%x value=%llx\n", he, tagidx, pteidx, (unsigned long long) value);
30515 +
30516 +    if (pteidx == 3)
30517 +    {
30518 +       elan4_sdram_writew (dev, he->he_entry + E4MMU_PTE3_WORD1_OFFSET(tagidx), (value >> 16) & 0xFFFF);
30519 +       elan4_sdram_writew (dev, he->he_entry + E4MMU_PTE3_WORD2_OFFSET(tagidx), (value >> 32) & 0xFFFF);
30520 +       elan4_sdram_writew (dev, he->he_entry + E4MMU_PTE3_WORD0_OFFSET(tagidx), (value >> 0)  & 0xFFFF);
30521 +    }
30522 +    else
30523 +    {
30524 +       elan4_sdram_writew (dev, he->he_entry + E4MMU_PTE_HIGH_OFFSET(tagidx, pteidx), (value >> 32) & 0xFFFF);
30525 +       elan4_sdram_writel (dev, he->he_entry + E4MMU_PTE_LOW_OFFSET(tagidx, pteidx), value & 0xFFFFFFFF);
30526 +    }
30527 +}
30528 +
30529 +static void
30530 +elan4mmu_invalidatepte (ELAN4_DEV *dev, ELAN4_HASH_ENTRY *he, int tagidx, int pteidx)
30531 +{
30532 +    if (pteidx == 3)
30533 +       elan4_sdram_writeb (dev, he->he_entry + E4MMU_PTE3_WORD0_OFFSET(tagidx), PTE_SetPerm (PERM_Disabled));
30534 +    else
30535 +       elan4_sdram_writeb (dev, he->he_entry + E4MMU_PTE_LOW_OFFSET(tagidx, pteidx), PTE_SetPerm (PERM_Disabled));
30536 +}
30537 +
30538 +static E4_uint64
30539 +elan4mmu_readpte (ELAN4_DEV *dev, ELAN4_HASH_ENTRY *he, int tagidx, int pteidx)
30540 +{
30541 +    if (pteidx == 3)
30542 +       return (((E4_uint64) elan4_sdram_readw (dev, he->he_entry + E4MMU_PTE3_WORD0_OFFSET(tagidx)) << 0)  |
30543 +               ((E4_uint64) elan4_sdram_readw (dev, he->he_entry + E4MMU_PTE3_WORD1_OFFSET(tagidx)) << 16) |
30544 +               ((E4_uint64) elan4_sdram_readw (dev, he->he_entry + E4MMU_PTE3_WORD2_OFFSET(tagidx)) << 32));
30545 +    else
30546 +       return ((E4_uint64) elan4_sdram_readl (dev, he->he_entry + E4MMU_PTE_LOW_OFFSET(tagidx, pteidx)) |
30547 +               ((E4_uint64) elan4_sdram_readw (dev, he->he_entry + E4MMU_PTE_HIGH_OFFSET(tagidx, pteidx)) << 32));
30548 +}
30549 +
30550 +
30551 +void
30552 +elan4mmu_flush_tlb (ELAN4_DEV *dev)
30553 +{
30554 +    PULSE_SYSCONTROL (dev, CONT_TLB_FLUSH);
30555 +
30556 +    while (read_reg64 (dev, SysControlReg) & CONT_TLB_FLUSH)
30557 +       DELAY (1);
30558 +}
30559 +
30560 +/*
30561 + * elanmmu_flush_tlb_hash - this flushes the hash copy entries and the elan
30562 + * tlb.  However after the write to the hash copy entry if the elan was
30563 + * in the process of walking, then it could write the hash copy with a valid
30564 + * entry which we had just invalidated. However once we've seen the tlb flushed
30565 + * then if the walk engine had done a write - then we need to invaldate the
30566 + * hash copy entries again and reflush the tlb.
30567 + *
30568 + * If we're invalidating a lot of hash blocks, then the chances are that the
30569 + * walk engine will perform a write - so we flush the tlb first, then invalidate
30570 + * the hash copy entries, then flush the tlb again.
30571 + */
30572 +static void
30573 +elan4mmu_flush_tlb_hash (ELAN4_DEV *dev, int tbl, unsigned baseidx, unsigned topidx)
30574 +{
30575 +    int       notmany = (abs(topidx - baseidx) < 5) ? 1 : 0;
30576 +    int       hashidx;
30577 +    E4_uint32 reg;
30578 +
30579 +    if (notmany)
30580 +       PULSE_SYSCONTROL (dev, CONT_CLEAR_WALK_WROTE_TABLES);
30581 +    else
30582 +       elan4mmu_flush_tlb(dev);
30583 +
30584 +    do {
30585 +       for (hashidx = baseidx; hashidx <= topidx; hashidx++)
30586 +           if (dev->dev_mmuhash[tbl][hashidx].he_tag[0] & HE_TAG_COPY)
30587 +           {
30588 +               ASSERT ((dev->dev_mmuhash[tbl][hashidx].he_tag[0] & HE_TAG_VALID) == 0);
30589 +               ASSERT ((dev->dev_mmuhash[tbl][hashidx].he_tag[1] & HE_TAG_VALID) == 0);
30590 +
30591 +               elan4mmu_synctag (dev, &dev->dev_mmuhash[tbl][hashidx], 0);
30592 +               elan4mmu_synctag (dev, &dev->dev_mmuhash[tbl][hashidx], 1);
30593 +           }
30594 +       
30595 +       PULSE_SYSCONTROL (dev, CONT_TLB_FLUSH);
30596 +       
30597 +       while ((reg = read_reg64 (dev, SysControlReg)) & CONT_TLB_FLUSH)
30598 +           DELAY (1);
30599 +       
30600 +    } while (notmany-- && (reg & CONT_CLEAR_WALK_WROTE_TABLES) != 0);
30601 +}
30602 +
30603 +void
30604 +elan4mmu_display_hent (ELAN4_DEV *dev, ELAN4_HASH_ENTRY *he, int hashidx)
30605 +{
30606 +    int tagidx;
30607 +
30608 +    elan4_debugf (DBG_DEVICE, DBG_MMU, "elan4mmu_display_hent: hashidx=%d he=%p entry at %lx\n", hashidx, he, he->he_entry);
30609 +    elan4_debugf (DBG_DEVICE, DBG_MMU, "                       next=%p prev=%p chain=%p,%p\n", he->he_next, he->he_prev, he->he_chain[0], he->he_chain[1]);
30610 +    for (tagidx = 0; tagidx < 2; tagidx++)
30611 +    {
30612 +       E4_uint64 tag  = elan4_sdram_readq (dev, he->he_entry + E4MMU_TAG_OFFSET(tagidx));
30613 +       E4_uint64 pte0 = elan4_sdram_readq (dev, he->he_entry + E4MMU_PTE_LOW_OFFSET(tagidx, 0));
30614 +       E4_uint64 pte1 = elan4_sdram_readq (dev, he->he_entry + E4MMU_PTE_LOW_OFFSET(tagidx, 1));
30615 +       E4_uint64 pte2 = elan4_sdram_readq (dev, he->he_entry + E4MMU_PTE_LOW_OFFSET(tagidx, 2));
30616 +       E4_uint64 pte3 = ((pte0 >> 48) | (pte1 >> 32) | (pte2 >> 16));
30617 +       
30618 +       elan4_debugf (DBG_DEVICE, DBG_MMU, "                       Tag %d (%llx,%08x) context=%04x vaddr=%llx\n", tagidx, he->he_tag[tagidx], he->he_pte[tagidx], (int) (tag & TAG_CONTEXT_MASK), (tag & TAG_ADDRESS_MASK));
30619 +       elan4_debugf (DBG_DEVICE, DBG_MMU, "                       Pte 0 - PPN=%llx PERM=%x TYPE=%x%s%s\n", (pte0 & PTE_PPN_MASK) >> PTE_PPN_SHIFT, 
30620 +                     (int) (pte0 & PTE_PERM_MASK) >> PTE_PERM_SHIFT, (int)(pte0 & PTE_TYPE_MASK), (pte0 & PTE_MOD_MASK) ? " mod" : "", (pte0 & PTE_REF_MASK) ? " ref" : "");
30621 +       elan4_debugf (DBG_DEVICE, DBG_MMU, "                       Pte 1 - PPN=%llx PERM=%x TYPE=%x%s%s\n", (pte1 & PTE_PPN_MASK) >> PTE_PPN_SHIFT, 
30622 +                     (int) (pte1 & PTE_PERM_MASK) >> PTE_PERM_SHIFT, (int)(pte1 & PTE_TYPE_MASK), (pte1 & PTE_MOD_MASK) ? " mod" : "", (pte1 & PTE_REF_MASK) ? " ref" : "");
30623 +       elan4_debugf (DBG_DEVICE, DBG_MMU, "                       Pte 2 - PPN=%llx PERM=%x TYPE=%x%s%s\n", (pte2 & PTE_PPN_MASK) >> PTE_PPN_SHIFT, 
30624 +                     (int) (pte2 & PTE_PERM_MASK) >> PTE_PERM_SHIFT, (int)(pte2 & PTE_TYPE_MASK), (pte2 & PTE_MOD_MASK) ? " mod" : "", (pte2 & PTE_REF_MASK) ? " ref" : "");
30625 +       elan4_debugf (DBG_DEVICE, DBG_MMU, "                       Pte 3 - PPN=%llx PERM=%x TYPE=%x%s%s\n", (pte3 & PTE_PPN_MASK) >> PTE_PPN_SHIFT, 
30626 +                     (int) (pte3 & PTE_PERM_MASK) >> PTE_PERM_SHIFT, (int)(pte3 & PTE_TYPE_MASK), (pte3 & PTE_MOD_MASK) ? " mod" : "", (pte3 & PTE_REF_MASK) ? " ref" : "");
30627 +    }
30628 +}
30629 +
30630 +static __inline__ ELAN4_HASH_ENTRY *
30631 +he_ctxt_next (ELAN4_HASH_ENTRY *he, int ctxnum)
30632 +{
30633 +    return ((he->he_tag[0] & TAG_CONTEXT_MASK) == ctxnum) ? he->he_chain[0] : he->he_chain[1];
30634 +}
30635 +
30636 +static __inline__ ELAN4_HASH_ENTRY *
30637 +he_ctxt_unlink (ELAN4_CTXT *ctxt, int tbl, int hashidx, ELAN4_HASH_ENTRY *prevhe, ELAN4_HASH_ENTRY *he, ELAN4_HASH_ENTRY *next)
30638 +{
30639 +    /* Check whether either tag is in use by this context */
30640 +    if ((he->he_tag[0] & TAG_CONTEXT_MASK) == ctxt->ctxt_num || (he->he_tag[1] & TAG_CONTEXT_MASK) == ctxt->ctxt_num)
30641 +       return he;
30642 +
30643 +    if (prevhe == NULL)
30644 +       ctxt->ctxt_mmuhash[tbl][hashidx] = next;
30645 +    else
30646 +    {
30647 +       /* previous he, ensure that both chain pointers are changed is this ctxt is using both tags */
30648 +       ASSERT ((prevhe->he_tag[0] & TAG_CONTEXT_MASK) == ctxt->ctxt_num || (prevhe->he_tag[1] & TAG_CONTEXT_MASK) == ctxt->ctxt_num);
30649 +
30650 +       if ((prevhe->he_tag[0] & TAG_CONTEXT_MASK) == ctxt->ctxt_num)
30651 +           prevhe->he_chain[0] = next;
30652 +       if ((prevhe->he_tag[1] & TAG_CONTEXT_MASK) == ctxt->ctxt_num)
30653 +           prevhe->he_chain[1] = next;
30654 +    }
30655 +
30656 +    return prevhe;
30657 +}
30658 +
30659 +void
30660 +elan4mmu_display (ELAN4_CTXT *ctxt, int tbl, const char *tag)
30661 +{
30662 +    ELAN4_DEV *dev = ctxt->ctxt_dev;
30663 +    ELAN4_HASH_ENTRY *he;
30664 +    int hashidx;
30665 +
30666 +    for (hashidx = 0; hashidx < dev->dev_hashsize[tbl]; hashidx++)
30667 +       for (he = ctxt->ctxt_mmuhash[tbl][hashidx]; he != NULL; he = he_ctxt_next (he, ctxt->ctxt_num))
30668 +       {
30669 +           elan4_debugf (DBG_DEVICE, DBG_MMU, "%s: hashidx=%d he=%p tags <%llx,%llx>\n", tag, hashidx, he,
30670 +                         (he->he_tag[0] & TAG_CONTEXT_MASK) == ctxt->ctxt_num ? E4MMU_TAG2VADDR (he->he_tag[0], hashidx, dev->dev_pageshift[tbl], dev->dev_hashsize[tbl]-1) : 0,
30671 +                         (he->he_tag[1] & TAG_CONTEXT_MASK) == ctxt->ctxt_num ? E4MMU_TAG2VADDR (he->he_tag[1], hashidx, dev->dev_pageshift[tbl], dev->dev_hashsize[tbl]-1) : 0);
30672 +           elan4mmu_display_hent (dev, he, hashidx);
30673 +       }
30674 +}
30675 +
30676 +static ELAN4_HASH_ENTRY *
30677 +elan4mmu_alloc_hent (ELAN4_DEV *dev, int tbl, int hashidx, E4_uint64 newtag, int *tagidx)
30678 +{
30679 +    ELAN4_HASH_ENTRY *he, *phe;
30680 +    unsigned long flags;
30681 +    int i;
30682 +
30683 +    spin_lock_irqsave (&dev->dev_mmulock, flags);
30684 +
30685 +    /* 2nd see if there are any partial free blocks */
30686 +    if ((he = dev->dev_mmufree[tbl][hashidx]) != NULL)
30687 +    {
30688 +       *tagidx = ((he->he_tag[0] & TAG_CONTEXT_MASK) == INVALID_CONTEXT) ? 0 : 1;
30689 +       
30690 +       MPRINTF (DBG_DEVICE, 3, "elan4mmu_alloc_hent: allocate he=%p idx=%d%s\n", he, *tagidx, (he == &dev->dev_mmuhash[tbl][hashidx]) ? " hash-block" : "");
30691 +       
30692 +       he->he_tag[*tagidx] = newtag | HE_TAG_VALID;
30693 +
30694 +       elan4mmu_synctag (dev, he, *tagidx);
30695 +       
30696 +       if ((he->he_tag[(*tagidx) ^ 1] & TAG_CONTEXT_MASK) != INVALID_CONTEXT)
30697 +       {
30698 +           MPRINTF (DBG_DEVICE, 3, "elan4mmu_alloc_hent: block full - remove from freelist\n");
30699 +           dev->dev_mmufree[tbl][hashidx] = he->he_chain[*tagidx];
30700 +       }
30701 +       
30702 +       spin_unlock_irqrestore (&dev->dev_mmulock, flags);
30703 +       return (he);
30704 +    }
30705 +    
30706 +    if ((he = dev->dev_mmufreelist) != NULL)
30707 +       dev->dev_mmufreelist = he->he_next;
30708 +    else
30709 +    {
30710 +       ELAN4_HASH_CHUNK *hc;
30711 +       sdramaddr_t       entry;
30712 +
30713 +       KMEM_ALLOC (hc, ELAN4_HASH_CHUNK *, sizeof (ELAN4_HASH_CHUNK), 0);
30714 +       
30715 +       if (hc == NULL)
30716 +       {
30717 +           spin_unlock_irqrestore (&dev->dev_mmulock, flags);
30718 +           return ((ELAN4_HASH_ENTRY *) NULL);
30719 +       }
30720 +       
30721 +       if ((entry = elan4_sdram_alloc (dev, sizeof (E4_HashTableEntry) * ELAN4_HENT_CHUNKS)) == (sdramaddr_t) 0)
30722 +       {
30723 +           spin_unlock_irqrestore (&dev->dev_mmulock, flags);
30724 +
30725 +           KMEM_FREE (hc, sizeof (ELAN4_HASH_CHUNK));
30726 +           return ((ELAN4_HASH_ENTRY *) NULL);
30727 +       }
30728 +
30729 +       list_add_tail (&hc->hc_link, &dev->dev_hc_list);
30730 +
30731 +       elan4_sdram_zeroq_sdram (dev, entry, sizeof (E4_HashTableEntry) * ELAN4_HENT_CHUNKS);
30732 +
30733 +       /* no initialise all chunks and chain all but the first onto the freelist */
30734 +       for (i = 0; i < ELAN4_HENT_CHUNKS; i++, entry += sizeof (E4_HashTableEntry))
30735 +       {
30736 +           hc->hc_hents[i].he_entry = entry;
30737 +
30738 +           if (i == 0)
30739 +               he = &hc->hc_hents[0];
30740 +           else
30741 +           {
30742 +               hc->hc_hents[i].he_next = dev->dev_mmufreelist;
30743 +               dev->dev_mmufreelist = &hc->hc_hents[i];
30744 +           }
30745 +       }
30746 +    }
30747 +
30748 +    /* Initialise hash entry, using slot 0 */
30749 +    *tagidx = 0;
30750 +
30751 +    he->he_next     = NULL;
30752 +    he->he_prev     = NULL;
30753 +    he->he_chain[0] = NULL;
30754 +    he->he_chain[1] = NULL;
30755 +    he->he_tag[0]   = newtag | HE_TAG_VALID;
30756 +    he->he_tag[1]   = E4MMU_TAG(0, INVALID_CONTEXT);
30757 +    he->he_pte[0]   = 0;
30758 +    he->he_pte[1]   = 0;
30759 +    
30760 +    elan4mmu_synctag (dev, he, 0);
30761 +    
30762 +    /* add slot 1 to freelist */
30763 +    he->he_chain[1] = dev->dev_mmufree[tbl][hashidx];
30764 +    dev->dev_mmufree[tbl][hashidx] = he;
30765 +    
30766 +    /* add to mmuhash lists */
30767 +    for (phe = &dev->dev_mmuhash[tbl][hashidx]; phe->he_next; phe = phe->he_next)
30768 +       ;
30769 +    phe->he_next = he;
30770 +    he->he_prev  = phe;
30771 +    he->he_next  = NULL;
30772 +    
30773 +    /* finally chain the hash block into the hash tables */
30774 +    elan4mmu_chain_hents (dev, phe, he);
30775 +    
30776 +    spin_unlock_irqrestore (&dev->dev_mmulock, flags);
30777 +    return (he);
30778 +}
30779 +
30780 +static void
30781 +elan4mmu_free_hent (ELAN4_DEV *dev, int tbl, int hashidx, ELAN4_HASH_ENTRY *he, int tagidx)
30782 +{
30783 +    unsigned long flags;
30784 +    int pteidx;
30785 +
30786 +    /* Invalidate the tag, and zero all ptes */
30787 +    for (pteidx = 0; pteidx < 4; pteidx++)
30788 +       if (HE_GET_PTE(he, tagidx, pteidx))
30789 +           elan4mmu_writepte (dev, he, tagidx, pteidx, 0);
30790 +
30791 +    spin_lock_irqsave (&dev->dev_mmulock, flags);
30792 +
30793 +    he->he_tag[tagidx] = E4MMU_TAG(0, INVALID_CONTEXT);
30794 +    he->he_pte[tagidx] = 0;
30795 +
30796 +    elan4mmu_synctag (dev, he, tagidx);
30797 +
30798 +    if ((he->he_tag[tagidx^1] & TAG_CONTEXT_MASK) == INVALID_CONTEXT) /* Both tags are now free */
30799 +    {
30800 +       if (he == &dev->dev_mmuhash[tbl][hashidx])              /* it's the hash block entry */
30801 +       {                                                       /* so as it's already on the freelist */
30802 +           he->he_chain[tagidx] = he->he_chain[tagidx^1];      /* just copy it's chain pointers */
30803 +
30804 +           MPRINTF (DBG_DEVICE, 3, "elan4mmu_free_hent: tbl=%d hashidx=%x tagidx=%d he=%p => all free but hashblk\n", tbl, hashidx, tagidx, he);
30805 +       }
30806 +       else
30807 +       {
30808 +           MPRINTF (DBG_DEVICE, 3, "elan4mmu_free_hent: tbl=%d hashidx=%x tagidx=%d he=%p => all free\n", tbl, hashidx, tagidx, he);
30809 +           
30810 +           /* XXXX - should remove it from the hash table, and 
30811 +           *         place back on the anonymous freelist */
30812 +           he->he_chain[tagidx] = he->he_chain[tagidx^1];
30813 +       }
30814 +    }
30815 +    else
30816 +    {
30817 +       /* Other tag still in use */
30818 +       he->he_chain[tagidx] = dev->dev_mmufree[tbl][hashidx];
30819 +       dev->dev_mmufree[tbl][hashidx] = he;
30820 +
30821 +       MPRINTF (DBG_DEVICE, 3, "elan4mmu_free_hent: tbl=%d hashidx=%x tagidx=%d he=%p => other tag in use\n", tbl, hashidx, tagidx, he);
30822 +    }
30823 +    spin_unlock_irqrestore (&dev->dev_mmulock, flags);
30824 +}
30825 +
30826 +ELAN4_HASH_ENTRY *
30827 +elan4mmu_ptealloc (ELAN4_CTXT *ctxt, int tbl, E4_Addr vaddr, unsigned int *tagidxp)
30828 +{
30829 +    ELAN4_DEV        *dev     = ctxt->ctxt_dev;
30830 +    unsigned         ctxnum  = ctxt->ctxt_num;
30831 +    unsigned          hashidx = E4MMU_HASH_INDEX (ctxnum, vaddr, dev->dev_pageshift[tbl], dev->dev_hashsize[tbl]-1);
30832 +    E4_uint64         newtag  = E4MMU_TAG(vaddr, ctxnum);
30833 +    ELAN4_HASH_ENTRY *he      = &dev->dev_mmuhash[tbl][hashidx];
30834 +    unsigned         tagidx;
30835 +
30836 +    MPRINTF (ctxt, 2, "elan4mmu_ptealloc: tbl=%d ctxnum=%d vaddr=%llx -> hashidx %d\n", tbl, ctxnum, vaddr, hashidx);
30837 +
30838 +    /* 1st) check whether we're reloading an existing entry */
30839 +    for (he = ctxt->ctxt_mmuhash[tbl][hashidx]; he != NULL; he = he_ctxt_next (he, ctxnum))
30840 +    {
30841 +       ASSERT ((he->he_tag[0] & TAG_CONTEXT_MASK) == ctxnum || (he->he_tag[1] & TAG_CONTEXT_MASK) == ctxnum);
30842 +
30843 +       for (tagidx = 0; tagidx < 2; tagidx++)
30844 +       {
30845 +           if ((he->he_tag[tagidx] & (TAG_ADDRESS_MASK | TAG_CONTEXT_MASK | HE_TAG_VALID)) == (newtag | HE_TAG_VALID))
30846 +           {
30847 +               MPRINTF (ctxt, 2, "elan4mmu_ptealloc: return old he %p tagidx %d\n", he, tagidx);
30848 +
30849 +               *tagidxp = tagidx;
30850 +               return he;
30851 +           }
30852 +       }
30853 +    }
30854 +
30855 +    if ((he = elan4mmu_alloc_hent (dev, tbl, hashidx, newtag, &tagidx)) == NULL)
30856 +       return NULL;
30857 +
30858 +    /* chain onto context hash */
30859 +    if ((he->he_tag[tagidx ^ 1] & TAG_CONTEXT_MASK) == ctxnum) /* already chained using other link */
30860 +    {                                                          /* so ensure both slots are chained the same */
30861 +       he->he_chain[tagidx] = he->he_chain[tagidx^1];
30862 +    }
30863 +    else
30864 +    {
30865 +       he->he_chain[tagidx] = ctxt->ctxt_mmuhash[tbl][hashidx];
30866 +       ctxt->ctxt_mmuhash[tbl][hashidx] = he;
30867 +    }
30868 +
30869 +    MPRINTF (ctxt, 2, "elan4mmu_ptealloc: return new he %p tagidx %d\n", he, tagidx);
30870 +
30871 +    *tagidxp = tagidx;
30872 +
30873 +    return he;
30874 +}
30875 +
30876 +int
30877 +elan4mmu_pteload (ELAN4_CTXT *ctxt, int tbl, E4_Addr vaddr, E4_uint64 newpte)
30878 +{
30879 +    ELAN4_DEV        *dev     = ctxt->ctxt_dev;
30880 +    unsigned          pteidx  = E4MMU_SHIFT_ADDR(vaddr, dev->dev_pageshift[tbl]) & 3;
30881 +    unsigned         tagidx;
30882 +    ELAN4_HASH_ENTRY *he;
30883 +
30884 +    MPRINTF (ctxt, 0, "elan4mmu_pteload: ctx=%d tbl=%d pteidx=%d vaddr=%llx pte=%llx\n", 
30885 +           ctxt->ctxt_num, tbl, pteidx, (unsigned long long)vaddr, newpte);
30886 +
30887 +    spin_lock (&ctxt->ctxt_mmulock);
30888 +
30889 +    if ((he = elan4mmu_ptealloc (ctxt, tbl, vaddr, &tagidx)) == NULL)
30890 +    {
30891 +       spin_unlock (&ctxt->ctxt_mmulock);
30892 +       return -ENOMEM;
30893 +    }
30894 +
30895 +    MPRINTF (ctxt, 1, "elan4mmu_pteload: %s he=%p tagidx=%d pteidx=%d\n", HE_GET_PTE(he,0,pteidx) ? "reloading" : "loading", he, tagidx, pteidx);
30896 +    
30897 +    ASSERT (HE_GET_PTE(he,tagidx,pteidx) == 0 ||                                                       /* invalid -> valid */
30898 +           (elan4mmu_readpte (dev, he, tagidx, pteidx) & PTE_PPN_MASK) == (newpte & PTE_PPN_MASK));    /* or same phys address */
30899 +    
30900 +    elan4mmu_writepte (dev, he, tagidx, pteidx, newpte);
30901 +    
30902 +    HE_SET_PTE(he, tagidx, pteidx, (newpte & PTE_PERM_TYPE_MASK));
30903 +
30904 +    spin_unlock (&ctxt->ctxt_mmulock);
30905 +    return 0;
30906 +}
30907 +
30908 +void
30909 +elan4mmu_unload_range (ELAN4_CTXT *ctxt, int tbl, E4_Addr start, unsigned long len)
30910 +{
30911 +    ELAN4_DEV        *dev       = ctxt->ctxt_dev;
30912 +    unsigned          ctxnum    = ctxt->ctxt_num;
30913 +    unsigned long     tagspan   = (1 << (dev->dev_pageshift[tbl] + 2));
30914 +    E4_Addr           end       = start + len - 1;
30915 +    int                      needflush = 0;
30916 +    unsigned          baseidx, topidx;
30917 +    unsigned          hashidx, tagidx, pteidx;
30918 +    ELAN4_HASH_ENTRY *he, *prevhe, *next;
30919 +    
30920 +    MPRINTF (ctxt, 0, "elan4mmu_unload_range: tbl=%d start=%llx end=%llx len=%lx\n", tbl, start, end, len);
30921 +
30922 +    /* determine how much of the hash table we've got to scan */
30923 +    
30924 +    /* GNAT 6760: When we have a Main page size which maps onto multiple Elan pages
30925 +     * we need to do something a bit more clever here or else it takes ms per page invalidate
30926 +     * This change helps in the meantime
30927 +     */
30928 +    /* if (len <= (1 << dev->dev_pageshift[tbl])) */
30929 +    if (len <= PAGE_SIZE)
30930 +    {
30931 +       baseidx = E4MMU_HASH_INDEX (ctxnum, start, dev->dev_pageshift[tbl], dev->dev_hashsize[tbl]-1);
30932 +       topidx  = E4MMU_HASH_INDEX (ctxnum, end,   dev->dev_pageshift[tbl], dev->dev_hashsize[tbl]-1);
30933 +
30934 +       if (baseidx != topidx)
30935 +       {
30936 +           /* GNAT 6760: Need to search whole of the hash table (slow!) */
30937 +           baseidx = 0;
30938 +           topidx  = dev->dev_hashsize[tbl] - 1;
30939 +       }
30940 +    }
30941 +    else
30942 +    {
30943 +       baseidx = 0;
30944 +       topidx  = dev->dev_hashsize[tbl] - 1;
30945 +    }
30946 +
30947 +    MPRINTF (ctxt, 1, "elan4mmu_unload_range: baseidx=%d topidx=%d\n", baseidx, topidx);
30948 +
30949 +    spin_lock (&ctxt->ctxt_mmulock);
30950 +
30951 +    /* 1st - invalidate the tag for all hash blocks which are completely invalidated,
30952 +     *       and remember the first/last hash blocks */
30953 +    for (hashidx = baseidx; hashidx <= topidx; hashidx++)
30954 +       for (he = ctxt->ctxt_mmuhash[tbl][hashidx]; he != NULL; he = he_ctxt_next (he, ctxnum))
30955 +           for (tagidx = 0; tagidx < 2; tagidx++)
30956 +               if ((he->he_tag[tagidx] & TAG_CONTEXT_MASK) == ctxnum)
30957 +               {
30958 +                   E4_Addr base = E4MMU_TAG2VADDR (he->he_tag[tagidx], hashidx, dev->dev_pageshift[tbl], dev->dev_hashsize[tbl]-1);
30959 +                   E4_Addr top  = base + (tagspan -1);
30960 +                   
30961 +                   if (start < top && end > base)
30962 +                   {
30963 +                       unsigned bidx = (start <= base) ? 0 : (start & (tagspan-1)) >> dev->dev_pageshift[tbl];
30964 +                       unsigned tidx = (end   >= top)  ? 3 : (end   & (tagspan-1)) >> dev->dev_pageshift[tbl];
30965 +                       
30966 +                       MPRINTF (ctxt, 1, "elan4mmu_unload_range: he=%p base=%llx top=%llx hashidx=%d bidx=%d tidx=%d\n", he, base, top, hashidx, bidx, tidx);
30967 +                       
30968 +                       for (pteidx = bidx; pteidx <= tidx; pteidx++)
30969 +                           if (HE_GET_PTE(he, tagidx, pteidx))
30970 +                           {
30971 +                               elan4mmu_invalidatepte (dev, he, tagidx, pteidx);
30972 +                               needflush = 1;
30973 +                           }
30974 +                   }
30975 +                   else if (base >= start && top <= end)               /* hash entry completely spanned */
30976 +                   {                                                   /* so invalidate the tag */
30977 +                       MPRINTF (ctxt, 1, "elan4mmu_unload_range: he=%p base=%llx top=%llx spanned\n", he, base, top);
30978 +
30979 +                       he->he_tag[tagidx] &= ~HE_TAG_VALID;
30980 +                       
30981 +                       elan4mmu_synctag (dev, he, tagidx);
30982 +                       needflush = 1;
30983 +                   }
30984 +               }
30985 +
30986 +    if (needflush)
30987 +    {
30988 +       /* 2nd invalidate the first/last hash blocks if they are partially invalidated
30989 +        * and flush the tlb/hash copy blocks */
30990 +       elan4mmu_flush_tlb_hash (dev, tbl, baseidx, topidx);
30991 +       
30992 +       /* 3rd free off the hash entries which are completely invalidated */
30993 +       for (hashidx = baseidx; hashidx <= topidx; hashidx++)
30994 +           for (prevhe = NULL, he = ctxt->ctxt_mmuhash[tbl][hashidx]; he != NULL; he = next)
30995 +           {
30996 +               next = he_ctxt_next (he, ctxnum);
30997 +               
30998 +               for (tagidx = 0; tagidx < 2; tagidx++)
30999 +                   if ((he->he_tag[tagidx] & TAG_CONTEXT_MASK) == ctxnum)
31000 +                   {
31001 +                       E4_Addr base = E4MMU_TAG2VADDR (he->he_tag[tagidx], hashidx, dev->dev_pageshift[tbl], dev->dev_hashsize[tbl]-1);
31002 +                       E4_Addr top  = base + (tagspan -1);
31003 +                       
31004 +                       if (start < top && end > base)
31005 +                       {
31006 +                           unsigned bidx = (start <= base) ? 0 : (start & (tagspan-1)) >> dev->dev_pageshift[tbl];
31007 +                           unsigned tidx = (end   >= top)  ? 3 : (end   & (tagspan-1)) >> dev->dev_pageshift[tbl];
31008 +                           
31009 +                           MPRINTF (ctxt, 1, "elan4mmu_unload_range: he=%p base=%llx top=%llx bidx=%d tidx=%d\n", he, base, top, bidx, tidx);
31010 +                           
31011 +                           for (pteidx = bidx; pteidx <= tidx; pteidx++)
31012 +                               if (HE_GET_PTE(he, tagidx, pteidx))
31013 +                               {
31014 +                                   HE_SET_PTE(he, tagidx, pteidx, 0);
31015 +                                   
31016 +                                   elan4mmu_writepte (dev, he, tagidx, pteidx, 0);
31017 +                               }
31018 +                       }
31019 +                       
31020 +                       if ((base >= start && top <= end) || he->he_pte[tagidx] == 0)   /* hash entry completely spanned or all pte's cleared */
31021 +                       {                                                                       /* so invalidate the pte's and free it */
31022 +                           
31023 +                           MPRINTF (ctxt, 1, "elan4mmu_unload_range: he=%p base=%llx top=%llx spanned or empty\n", he, base, top);
31024 +                           
31025 +                           elan4mmu_free_hent (dev, tbl, hashidx, he, tagidx);
31026 +                       }
31027 +                   }
31028 +               
31029 +               prevhe = he_ctxt_unlink (ctxt, tbl, hashidx, prevhe, he, next);
31030 +           }
31031 +    }
31032 +    spin_unlock (&ctxt->ctxt_mmulock);
31033 +}
31034 +
31035 +void
31036 +elan4mmu_invalidate_ctxt (ELAN4_CTXT *ctxt)
31037 +{
31038 +    ELAN4_DEV *dev    = ctxt->ctxt_dev;
31039 +    int        ctxnum = ctxt->ctxt_num;
31040 +    ELAN4_HASH_ENTRY *he;
31041 +    int tbl, hashidx, tagidx;
31042 +
31043 +    MPRINTF (ctxt, 0, "elan4mmu_invalidate_ctxt: invalidating ctxnum=%d\n", ctxnum);
31044 +
31045 +    spin_lock (&ctxt->ctxt_mmulock);
31046 +
31047 +    /* 1st invalidate all tags belonging to me */
31048 +    for (tbl = 0; tbl < NUM_HASH_TABLES; tbl++)
31049 +       for (hashidx = 0; hashidx < dev->dev_hashsize[tbl]; hashidx++)
31050 +           for (he = ctxt->ctxt_mmuhash[tbl][hashidx]; he != NULL; he = he_ctxt_next (he, ctxnum))
31051 +               for (tagidx = 0; tagidx < 2; tagidx++)
31052 +                   if ((he->he_tag[tagidx] & TAG_CONTEXT_MASK) == ctxnum) /* own tag block */
31053 +                   {
31054 +                       MPRINTF (ctxt, 1, "elan4mmu_invalidate_ctxt: he=%p addr=%llx hashidx=%d tagidx=%d\n", 
31055 +                                he, he->he_tag[tagidx] & TAG_ADDRESS_MASK, hashidx, tagidx);
31056 +
31057 +                       he->he_tag[tagidx] &= ~HE_TAG_VALID;
31058 +                       
31059 +                       elan4mmu_synctag (dev, he, tagidx);
31060 +                   }
31061 +
31062 +    /* 2nd flush the tlb & cached hash block */
31063 +    elan4mmu_flush_tlb (dev);
31064 +    
31065 +    /* 3rd invalidate all pte's and free off the hash entries */
31066 +    for (tbl = 0; tbl < NUM_HASH_TABLES; tbl++)
31067 +       for (hashidx = 0; hashidx < dev->dev_hashsize[tbl]; hashidx++)
31068 +           while ((he = ctxt->ctxt_mmuhash[tbl][hashidx]) != NULL)
31069 +           {
31070 +               ctxt->ctxt_mmuhash[tbl][hashidx] = he_ctxt_next (he, ctxnum);
31071 +
31072 +               for (tagidx = 0; tagidx < 2; tagidx++)
31073 +                   if ((he->he_tag[tagidx] & TAG_CONTEXT_MASK) == ctxnum)
31074 +                       elan4mmu_free_hent (dev, tbl, hashidx, he, tagidx);
31075 +           }
31076 +    spin_unlock (&ctxt->ctxt_mmulock);
31077 +}
31078 +
31079 +ELAN4_HASH_CACHE *
31080 +elan4mmu_reserve (ELAN4_CTXT *ctxt, int tbl, E4_Addr start, unsigned int npages, int cansleep)
31081 +{
31082 +    ELAN4_DEV        *dev      = ctxt->ctxt_dev;
31083 +    E4_Addr           end      = start + (npages << dev->dev_pageshift[tbl]) - 1;
31084 +    unsigned long     tagshift = dev->dev_pageshift[tbl] + 2;
31085 +    E4_Addr           tagspan  = 1 << tagshift;
31086 +    E4_Addr           base     = (start & ~(tagspan-1));
31087 +    E4_Addr           top      = (end   & ~(tagspan-1)) + (tagspan-1);
31088 +    unsigned int      nhes     = (top - base + 1) >> tagshift;
31089 +    ELAN4_HASH_CACHE *hc;
31090 +    unsigned int      tagidx,  pteidx;
31091 +    E4_Addr           addr;
31092 +    int                      i;
31093 +    
31094 +    MPRINTF (ctxt, 0, "elan4mmu_reserve: start=%llx npages=%d\n", start, npages);
31095 +    MPRINTF (ctxt, 0, "         pageshift=%d tagspan=%lx base=%llx top=%llx end=%llx nhes=%d\n",
31096 +            dev->dev_pageshift[tbl], tagspan, base, top, end, nhes);
31097 +
31098 +    KMEM_ALLOC (hc, ELAN4_HASH_CACHE *, offsetof (ELAN4_HASH_CACHE, hc_hes[nhes]), cansleep);
31099 +
31100 +    if (hc == NULL)
31101 +       return NULL;
31102 +
31103 +    hc->hc_start = start;
31104 +    hc->hc_end   = end;
31105 +    hc->hc_tbl   = tbl;
31106 +
31107 +    spin_lock (&ctxt->ctxt_mmulock);
31108 +    for (addr = base, i = 0; i < nhes; addr += tagspan, i++)
31109 +    {
31110 +       unsigned bidx = (i == 0)        ? (start & (tagspan-1)) >> dev->dev_pageshift[tbl] : 0;
31111 +       unsigned tidx = (i == (nhes-1)) ? (end   & (tagspan-1)) >> dev->dev_pageshift[tbl] : 3;
31112 +
31113 +       
31114 +       if ((hc->hc_hes[i] = elan4mmu_ptealloc (ctxt, tbl, addr & ~(tagspan-1), &tagidx)) == NULL)
31115 +           goto failed;
31116 +
31117 +
31118 +       MPRINTF (ctxt, 2, "elan4mmu_reserve: tbl=%d addr=%llx -> hashidx=%d tagidx=%d\n", tbl, addr & ~(tagspan-1), 
31119 +                E4MMU_HASH_INDEX (ctxt->ctxt_num, (addr & ~(tagspan-1)), dev->dev_pageshift[tbl], dev->dev_hashsize[tbl]-1), tagidx);
31120 +                        
31121 +       for (pteidx = bidx; pteidx <= tidx; pteidx++)
31122 +       {
31123 +           ASSERT (HE_GET_PTE (hc->hc_hes[i], tagidx, pteidx) == 0);
31124 +
31125 +           MPRINTF (ctxt, 2, "elan4mmu_reserve: i=%d addr=%llx he=%p (tagidx=%d pteidx=%d)\n",
31126 +                    i, addr, hc->hc_hes[i], tagidx, pteidx);
31127 +
31128 +           HE_SET_PTE (hc->hc_hes[i], tagidx, pteidx, PTE_PERM_TYPE_MASK);
31129 +       }
31130 +    }
31131 +    spin_unlock (&ctxt->ctxt_mmulock);
31132 +
31133 +    return hc;
31134 +
31135 + failed:
31136 +    for (i--, addr -= tagspan; i >= 0; i--, addr -= tagspan)
31137 +    {
31138 +       unsigned bidx    = (i == 0) ? (start & (tagspan-1)) >> dev->dev_pageshift[tbl] : 0;
31139 +       unsigned tidx    = (i == (nhes-1)) ? (end   & (tagspan-1)) >> dev->dev_pageshift[tbl] : 3;
31140 +       unsigned hashidx = E4MMU_HASH_INDEX (ctxt->ctxt_num, addr, dev->dev_pageshift[tbl], dev->dev_hashsize[tbl]-1);
31141 +       unsigned tagidx  = (addr == E4MMU_TAG2VADDR (hc->hc_hes[i]->he_tag[0], hashidx, dev->dev_pageshift[tbl], dev->dev_hashsize[tbl]-1)) ? 0 : 1;
31142 +           
31143 +       for (pteidx = bidx; pteidx <= tidx; pteidx++)
31144 +           HE_SET_PTE(hc->hc_hes[i], tagidx, pteidx, 0);
31145 +
31146 +       if (hc->hc_hes[i]->he_pte[tagidx] == 0)
31147 +           elan4mmu_free_hent (dev, tbl, hashidx, hc->hc_hes[i], tagidx);
31148 +    }
31149 +    spin_unlock (&ctxt->ctxt_mmulock);
31150 +
31151 +    KMEM_FREE (hc, offsetof (ELAN4_HASH_CACHE, hc_hes[nhes]));
31152 +    
31153 +    return NULL;
31154 +}
31155 +
31156 +void
31157 +elan4mmu_release (ELAN4_CTXT *ctxt, ELAN4_HASH_CACHE *hc)
31158 +{
31159 +    ELAN4_DEV        *dev      = ctxt->ctxt_dev;
31160 +    E4_Addr          start    = hc->hc_start;
31161 +    E4_Addr           end      = hc->hc_end;
31162 +    unsigned long     tagshift = dev->dev_pageshift[hc->hc_tbl] + 2;
31163 +    E4_Addr           tagspan  = 1 << tagshift;
31164 +    E4_Addr           base     = (start & ~(tagspan-1));
31165 +    E4_Addr           top      = (end   & ~(tagspan-1)) + (tagspan-1);
31166 +    unsigned int      nhes     = (top - base + 1) >> tagshift;
31167 +    ELAN4_HASH_ENTRY *prevhe, *he, *next;
31168 +    E4_Addr           addr;
31169 +    unsigned int      pteidx;
31170 +    int                      i;
31171 +
31172 +    spin_lock (&ctxt->ctxt_mmulock);
31173 +
31174 +    MPRINTF (ctxt, 0, "elan4mmu_release: base=%llx top=%llx\n", base, top);
31175 +
31176 +    for (addr = base, i = 0; i < nhes; addr += tagspan, i++)
31177 +    {
31178 +       unsigned bidx    = (i == 0)        ? (start & (tagspan-1)) >> dev->dev_pageshift[hc->hc_tbl] : 0;
31179 +       unsigned tidx    = (i == (nhes-1)) ? (end   & (tagspan-1)) >> dev->dev_pageshift[hc->hc_tbl] : 3;
31180 +       unsigned hashidx = E4MMU_HASH_INDEX (ctxt->ctxt_num, addr, dev->dev_pageshift[hc->hc_tbl], dev->dev_hashsize[hc->hc_tbl]-1);
31181 +       unsigned tagidx  = (addr == E4MMU_TAG2VADDR (hc->hc_hes[i]->he_tag[0], hashidx, dev->dev_pageshift[hc->hc_tbl], dev->dev_hashsize[hc->hc_tbl]-1)) ? 0 : 1;
31182 +           
31183 +       for (pteidx = bidx; pteidx <= tidx; pteidx++)
31184 +       {
31185 +           elan4mmu_invalidatepte (dev, hc->hc_hes[i], tagidx, pteidx);
31186 +
31187 +           HE_SET_PTE(hc->hc_hes[i], tagidx, pteidx, 0);
31188 +       }
31189 +
31190 +       MPRINTF (ctxt, 2, "elan4mmu_release: i=%d addr=%llx he=%p (hashidx=%d tagidx=%d pteidx=%d) pte=%x\n",
31191 +                i, addr, hc->hc_hes[i], hashidx, tagidx, pteidx, hc->hc_hes[i]->he_pte[tagidx]);
31192 +
31193 +       /* remove from context hash */
31194 +       for (prevhe = NULL, he = ctxt->ctxt_mmuhash[hc->hc_tbl][hashidx], next = he_ctxt_next (he, ctxt->ctxt_num); he != hc->hc_hes[i]; he = next)
31195 +           next = he_ctxt_next (he, ctxt->ctxt_num);
31196 +       
31197 +       if (hc->hc_hes[i]->he_pte[tagidx] == 0)
31198 +           elan4mmu_free_hent (dev, hc->hc_tbl, hashidx, hc->hc_hes[i], tagidx);
31199 +
31200 +       prevhe = he_ctxt_unlink (ctxt, hc->hc_tbl, hashidx, prevhe, he, next);
31201 +    }
31202 +    spin_unlock (&ctxt->ctxt_mmulock);
31203 +}
31204 +
31205 +void
31206 +elan4mmu_set_pte (ELAN4_CTXT *ctxt, ELAN4_HASH_CACHE *hc, unsigned int idx, E4_uint64 newpte)
31207 +{
31208 +    ELAN4_DEV        *dev      = ctxt->ctxt_dev;
31209 +    unsigned int      tbl      = hc->hc_tbl;
31210 +    unsigned int      tagshift = dev->dev_pageshift[tbl] + 2;
31211 +    E4_Addr           tagspan  = 1 << tagshift;
31212 +    E4_Addr           addr     = hc->hc_start + (idx << dev->dev_pageshift[tbl]);
31213 +    ELAN4_HASH_ENTRY *he       = hc->hc_hes[(addr - (hc->hc_start & ~(tagspan-1))) >> tagshift];
31214 +    unsigned          pteidx   = E4MMU_SHIFT_ADDR(addr, dev->dev_pageshift[tbl]) & 3;
31215 +    unsigned          tagidx   = he->he_tag[0] == (E4MMU_TAG (addr, ctxt->ctxt_num) | HE_TAG_VALID) ? 0 : 1;
31216 +
31217 +    MPRINTF (ctxt, 2, "elan4mmu_set_pte: idx=%d addr=%llx he=%p (tagidx=%d pteidx=%d) newpte=%llx\n", idx, addr, he, tagidx, pteidx, newpte);
31218 +
31219 +    ASSERT (he->he_tag[tagidx] == (E4MMU_TAG (addr, ctxt->ctxt_num) | HE_TAG_VALID));
31220 +
31221 +    elan4mmu_writepte (dev, he, tagidx, pteidx, newpte);
31222 +}
31223 +
31224 +E4_uint64
31225 +elan4mmu_get_pte (ELAN4_CTXT *ctxt, ELAN4_HASH_CACHE *hc, unsigned int idx)
31226 +{
31227 +    ELAN4_DEV        *dev      = ctxt->ctxt_dev;
31228 +    unsigned int      tbl      = hc->hc_tbl;
31229 +    unsigned int      tagshift = dev->dev_pageshift[tbl] + 2;
31230 +    E4_Addr           tagspan  = 1 << tagshift;
31231 +    E4_Addr           addr     = hc->hc_start + (idx << dev->dev_pageshift[tbl]);
31232 +    ELAN4_HASH_ENTRY *he       = hc->hc_hes[(addr - (hc->hc_start & ~(tagspan-1))) >> tagshift];
31233 +    unsigned          pteidx   = E4MMU_SHIFT_ADDR(addr, dev->dev_pageshift[tbl]) & 3;
31234 +    unsigned          tagidx   = he->he_tag[0] == (E4MMU_TAG (addr, ctxt->ctxt_num) | HE_TAG_VALID) ? 0 : 1;
31235 +
31236 +    ASSERT (he->he_tag[tagidx] == (E4MMU_TAG (addr, ctxt->ctxt_num) | HE_TAG_VALID));
31237 +
31238 +    return elan4mmu_readpte (dev, he, tagidx, pteidx);
31239 +}
31240 +
31241 +void
31242 +elan4mmu_clear_pte (ELAN4_CTXT *ctxt, ELAN4_HASH_CACHE *hc, unsigned int idx)
31243 +{
31244 +    ELAN4_DEV        *dev      = ctxt->ctxt_dev;
31245 +    unsigned int      tbl      = hc->hc_tbl;
31246 +    unsigned int      tagshift = dev->dev_pageshift[tbl] + 2;
31247 +    E4_Addr           tagspan  = 1 << tagshift;
31248 +    E4_Addr           addr     = hc->hc_start + (idx << dev->dev_pageshift[tbl]);
31249 +    ELAN4_HASH_ENTRY *he       = hc->hc_hes[(addr - (hc->hc_start & ~(tagspan-1))) >> tagshift];
31250 +    unsigned          pteidx   = E4MMU_SHIFT_ADDR(addr, dev->dev_pageshift[tbl]) & 3;
31251 +    unsigned          tagidx   = he->he_tag[0] == (E4MMU_TAG (addr, ctxt->ctxt_num) | HE_TAG_VALID) ? 0 : 1;
31252 +
31253 +    MPRINTF (ctxt, 2, "elan4mmu_clear_pte: idx=%d addr=%llx he=%p (tagidx=%d pteidx=%d)\n", idx, addr, he, tagidx, pteidx);
31254 +
31255 +    ASSERT (he->he_tag[tagidx] == (E4MMU_TAG (addr, ctxt->ctxt_num) | HE_TAG_VALID));
31256 +
31257 +    elan4mmu_invalidatepte (dev, he, tagidx, pteidx);
31258 +}
31259 +
31260 +EXPORT_SYMBOL(elan4mmu_flush_tlb);
31261 +EXPORT_SYMBOL(elan4mmu_pteload);
31262 +EXPORT_SYMBOL(elan4mmu_unload_range);
31263 +EXPORT_SYMBOL(elan4mmu_reserve);
31264 +EXPORT_SYMBOL(elan4mmu_release);
31265 +EXPORT_SYMBOL(elan4mmu_set_pte);
31266 +EXPORT_SYMBOL(elan4mmu_get_pte);
31267 +EXPORT_SYMBOL(elan4mmu_clear_pte);
31268 +/*
31269 + * Local variables:
31270 + * c-file-style: "stroustrup"
31271 + * End:
31272 + */
31273 Index: linux-2.4.21/drivers/net/qsnet/elan4/mmu_Linux.c
31274 ===================================================================
31275 --- linux-2.4.21.orig/drivers/net/qsnet/elan4/mmu_Linux.c       2004-02-23 16:02:56.000000000 -0500
31276 +++ linux-2.4.21/drivers/net/qsnet/elan4/mmu_Linux.c    2005-06-01 23:12:54.611437280 -0400
31277 @@ -0,0 +1,265 @@
31278 +/*
31279 + *    Copyright (c) 2001-2002 by Quadrics Supercomputers World Ltd.
31280 + *    Copyright (c) 2002-2003 by Quadrics Ltd.
31281 + *
31282 + *    For licensing information please see the supplied COPYING file
31283 + *
31284 + */
31285 +
31286 +#ident "@(#)$Id: mmu_Linux.c,v 1.8 2004/05/10 14:10:46 daniel Exp $"
31287 +/*      $Source: /cvs/master/quadrics/elan4mod/mmu_Linux.c,v $*/
31288 +
31289 +#include <qsnet/kernel.h>
31290 +
31291 +#include <elan4/debug.h>
31292 +#include <elan4/device.h>
31293 +
31294 +#include <linux/pci.h>
31295 +#include <linux/version.h>
31296 +
31297 +/*
31298 + * Convert a physical address into an pte.  This should generate a "local" pte for 
31299 + * physical addresses which are elan4 sdram or elan4 command queues.  For elan4
31300 + * registers and other addresses on the same bus, this should be the local pci 
31301 + * bus address.  All other addresses should access the physical address via the
31302 + * PCI bridge.
31303 + */
31304 +
31305 +#ifdef __alpha
31306 +#define ioaddr2paddr(ioaddr)   virt_to_phys((void *) __ioremap(ioaddr, PAGE_SIZE))
31307 +#elif defined(__ia64)
31308 +#define ioaddr2paddr(ioaddr)   ((ioaddr) & ~__IA64_UNCACHED_OFFSET)
31309 +#else
31310 +#define ioaddr2paddr(ioaddr)   (ioaddr)
31311 +#endif
31312 +
31313 +int
31314 +elan4mmu_categorise_paddr (ELAN4_DEV *dev, physaddr_t *physp)
31315 +{
31316 +    physaddr_t sdram_base = ioaddr2paddr (pci_resource_start (dev->dev_osdep.pdev, ELAN4_BAR_SDRAM));
31317 +    physaddr_t sdram_top  = ioaddr2paddr (pci_resource_end (dev->dev_osdep.pdev, ELAN4_BAR_SDRAM));
31318 +    physaddr_t regs_base  = ioaddr2paddr (pci_resource_start (dev->dev_osdep.pdev, ELAN4_BAR_REGISTERS));
31319 +    physaddr_t regs_top   = ioaddr2paddr (pci_resource_end (dev->dev_osdep.pdev, ELAN4_BAR_REGISTERS));
31320 +    physaddr_t phys       = *physp;
31321 +    int        iscommand;
31322 +
31323 +    if (phys >= sdram_base && phys <= sdram_top)
31324 +    {
31325 +       (*physp) = (phys ^ sdram_base);
31326 +       return ELAN4MMU_PADDR_SDRAM;
31327 +    }
31328 +    
31329 +    if (phys >= regs_base && phys < regs_top)
31330 +    {
31331 +       if (dev->dev_devinfo.dev_revision_id == PCI_REVISION_ID_ELAN4_REVA)
31332 +           iscommand = (phys < (regs_base + ELAN4_REVA_REG_OFFSET));
31333 +       else
31334 +           iscommand = (phys < (regs_base + ELAN4_REVB_I2C_OFFSET));
31335 +       
31336 +       if (iscommand)
31337 +       {
31338 +           (*physp) = phys ^ regs_base;
31339 +
31340 +           return ELAN4MMU_PADDR_COMMAND;
31341 +       }
31342 +       else
31343 +       {
31344 +           // XXXX (*physp) = phys2bus (phys);
31345 +
31346 +           return ELAN4MMU_PADDR_LOCALPCI;
31347 +       }
31348 +    }
31349 +
31350 +#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 5, 0)
31351 +    if (VALID_PAGE (virt_to_page (phys_to_virt (phys))))
31352 +#else
31353 +    if (virt_addr_valid (phys_to_virt (phys)))
31354 +#endif
31355 +       return ELAN4MMU_PADDR_PAGE;
31356 +    
31357 +    return ELAN4MMU_PADDR_OTHER;
31358 +}
31359 +
31360 +int
31361 +elan4mmu_sdram_aliascheck (ELAN4_CTXT *ctxt, E4_Addr addr, physaddr_t phys)
31362 +{
31363 +    ELAN4_DEV *dev = ctxt->ctxt_dev;
31364 +
31365 +    /*
31366 +     * On MPSAS we don't allocate a large enough context table, so 
31367 +     * if we see an address/context pair which would "alias" because
31368 +     * they differ in unchecked hash bits to a previous pteload, 
31369 +     * then we kill the application.
31370 +     */
31371 +    unsigned hashval = (E4MMU_SHIFT_ADDR(addr, (dev->dev_pageshift[0]) + 2) ^ E4MMU_CONTEXT_SCRAMBLE(ctxt->ctxt_num));
31372 +    
31373 +    if (dev->dev_rsvd_hashval[0] == 0xFFFFFFFF)
31374 +       dev->dev_rsvd_hashval[0] = hashval & dev->dev_rsvd_hashmask[0];
31375 +    
31376 +    if ((hashval & dev->dev_rsvd_hashmask[0]) != dev->dev_rsvd_hashval[0])
31377 +    {
31378 +       printk ("elan4mmu_sdram_aliascheck: vaddr=%016llx ctxnum=%x -> [%x] overlaps %x - %x [hashidx=%x]\n", (unsigned long long) addr, 
31379 +               ctxt->ctxt_num, hashval, hashval & dev->dev_rsvd_hashmask[0], dev->dev_rsvd_hashval[0],
31380 +               E4MMU_HASH_INDEX (ctxt->ctxt_num, addr, dev->dev_pageshift[0], dev->dev_hashsize[0]-1));
31381 +       
31382 +       return 0;
31383 +    }
31384 +
31385 +    if (((addr & (SDRAM_PGOFF_OFFSET << PAGE_SHIFT)) != (phys & (SDRAM_PGOFF_OFFSET << PAGE_SHIFT))))
31386 +    {
31387 +       printk ("elan4mmu_sdram_aliascheck: vaddr=%016llx incorrectly alias sdram at %lx\n", (unsigned long long) addr, 
31388 +               phys ^ pci_resource_start (dev->dev_osdep.pdev, ELAN4_BAR_SDRAM));
31389 +       return 0;
31390 +    }
31391 +
31392 +    return 1;
31393 +}
31394 +
31395 +int
31396 +elan4mmu_alloc_topaddr (ELAN4_DEV *dev, physaddr_t paddr, unsigned type)
31397 +{
31398 +#if defined(__i386) && !defined(CONFIG_X86_PAE)
31399 +    if (dev->dev_topaddrvalid == 0)
31400 +    {
31401 +       dev->dev_topaddrvalid = 1;
31402 +
31403 +       pci_write_config_word (dev->dev_osdep.pdev, PCI_ELAN_TOPPHYSADDR(0), 0);
31404 +       pci_write_config_word (dev->dev_osdep.pdev, PCI_ELAN_TOPPHYSADDR(1), 0);
31405 +       pci_write_config_word (dev->dev_osdep.pdev, PCI_ELAN_TOPPHYSADDR(2), 0);
31406 +       pci_write_config_word (dev->dev_osdep.pdev, PCI_ELAN_TOPPHYSADDR(3), 0);
31407 +    }
31408 +    return (0);
31409 +#else
31410 +    register int i;
31411 +    E4_uint16 match;
31412 +
31413 +    if (dev->dev_topaddrmode)                                  /* ExtraMasterAddrBits=1 => match {paddr[63:50],type[3:2]} */
31414 +       match = ((paddr >> 48) & ~3) | ((type >> 2) & 3);
31415 +    else                                                       /* ExtraMasterAddrBits=0 => match {paddr[63:48]} */
31416 +       match = (paddr >> 48);
31417 +    
31418 +    MPRINTF (DBG_DEVICE, 2, "elan4mmu_alloc_topaddr: mode=%d paddr=%lx type=%x match=%x [%x %x.%x.%x.%x]\n",
31419 +            dev->dev_topaddrmode, paddr, type, match, dev->dev_topaddrvalid,
31420 +            dev->dev_topaddr[0], dev->dev_topaddr[1], dev->dev_topaddr[2], dev->dev_topaddr[3]);
31421 +    
31422 +    for (i = 0; i < 4; i++)
31423 +       if ((dev->dev_topaddrvalid & (1 << i)) && dev->dev_topaddr[i] == match)
31424 +           return (i);
31425 +    
31426 +    for (i = 0; i < 4; i++)
31427 +    {
31428 +       if ((dev->dev_topaddrvalid & (1 << i)) == 0)
31429 +       {
31430 +           MPRINTF (DBG_DEVICE, 2, "elan4mmu_alloc_topaddr: allocate slot %d for %x\n", i, match);
31431 +
31432 +           dev->dev_topaddrvalid |= (1 << i);
31433 +           dev->dev_topaddr[i] = match;
31434 +
31435 +           pci_write_config_word (dev->dev_osdep.pdev, PCI_ELAN_TOPPHYSADDR(i), match);
31436 +           return (i);
31437 +       }
31438 +    }
31439 +
31440 +    panic ("elan4mmu_alloc_topaddr: all topaddrs in use\n");
31441 +    return (0);
31442 +#endif
31443 +}
31444 +
31445 +E4_uint64
31446 +elan4mmu_phys2pte (ELAN4_DEV *dev, physaddr_t phys, unsigned perm)
31447 +{
31448 +    physaddr_t sdram_base = ioaddr2paddr (pci_resource_start (dev->dev_osdep.pdev, ELAN4_BAR_SDRAM));
31449 +    physaddr_t sdram_top  = ioaddr2paddr (pci_resource_end (dev->dev_osdep.pdev, ELAN4_BAR_SDRAM));
31450 +    physaddr_t regs_base  = ioaddr2paddr (pci_resource_start (dev->dev_osdep.pdev, ELAN4_BAR_REGISTERS));
31451 +    physaddr_t regs_top   = ioaddr2paddr (pci_resource_end (dev->dev_osdep.pdev, ELAN4_BAR_REGISTERS));
31452 +    int        iscommand;
31453 +    E4_uint64  pte;
31454 +    unsigned   type;
31455 +    
31456 +    if (phys >= sdram_base && phys <= sdram_top)
31457 +    {
31458 +       phys ^= sdram_base;
31459 +       type  = PTE_SetPerm (perm);
31460 +    }
31461 +    else if (phys >= regs_base && phys < regs_top)
31462 +    {
31463 +       if (dev->dev_devinfo.dev_revision_id == PCI_REVISION_ID_ELAN4_REVA)
31464 +           iscommand = (phys < (regs_base + ELAN4_REVA_REG_OFFSET));
31465 +       else
31466 +           iscommand = (phys < (regs_base + ELAN4_REVB_I2C_OFFSET));
31467 +       
31468 +       if (iscommand)
31469 +       {
31470 +           phys ^= regs_base;
31471 +           type  = PTE_SetPerm (perm) | PTE_CommandQueue;
31472 +       }
31473 +       else
31474 +       {
31475 +           type = PTE_SetPerm (perm) | PTE_PciNotLocal;
31476 +           // phys = phys2bus (phys);
31477 +       }
31478 +    }
31479 +    else
31480 +    {
31481 +       type = PTE_SetPerm (perm) | PTE_PciNotLocal | dev->dev_pteval;
31482 +
31483 +#ifdef LINUX_SPARC
31484 +       /* XXXX if not local pci bus, then or in the bypass bit */
31485 +       phys |= 0xfffe000000000000;
31486 +       type |= PTE_BigEndian;
31487 +#endif
31488 +
31489 +
31490 +#if defined(__alpha)
31491 +       phys |= alpha_mv.pci_dac_offset;
31492 +#endif
31493 +    }
31494 +
31495 +    if ((type & PTE_PciNotLocal) == 0)
31496 +       pte = (phys >> PTE_PADDR_SHIFT) | type;
31497 +    else
31498 +    {
31499 +       unsigned topaddr = elan4mmu_alloc_topaddr (dev, phys, type);
31500 +       
31501 +       if (dev->dev_topaddrmode)
31502 +           pte = (phys >> PTE_PADDR_SHIFT) | (type & ~0xc) | (topaddr << 2);
31503 +       else
31504 +           pte = ((phys >> PTE_PADDR_SHIFT) & ~PTE_TOPADDR_MASK) | (((E4_uint64) topaddr) << 45) | type;
31505 +    }
31506 +
31507 +    return pte;
31508 +}
31509 +
31510 +physaddr_t
31511 +elan4mmu_pte2phys (ELAN4_DEV *dev, E4_uint64 pte)
31512 +{
31513 +    physaddr_t sdram_base = ioaddr2paddr (pci_resource_start (dev->dev_osdep.pdev, ELAN4_BAR_SDRAM));
31514 +    physaddr_t regs_base  = ioaddr2paddr (pci_resource_start (dev->dev_osdep.pdev, ELAN4_BAR_REGISTERS));
31515 +    physaddr_t phys;
31516 +    
31517 +    if (pte & PTE_PciNotLocal)
31518 +    {
31519 +       if (dev->dev_topaddrmode)
31520 +           phys = ((physaddr_t)(dev->dev_topaddr[(pte >> 2) & 3] & 0xfffc) << 48) | ((pte & PTE_PPN_MASK) << PTE_PADDR_SHIFT);
31521 +       else
31522 +           phys = ((physaddr_t)(dev->dev_topaddr[(pte >> 45) & 3] & 0xffff) << 48)| ((pte & PTE_PPN_MASK & ~PTE_TOPADDR_MASK) << PTE_PADDR_SHIFT);
31523 +
31524 +#ifdef LINUX_SPARC     /* XXXX if not local pci bus, then or in the bypass bit */
31525 +       phys ^= 0xfffe000000000000;
31526 +#endif
31527 +
31528 +#if defined(__alpha)
31529 +       phys ^= alpha_mv.pci_dac_offset;
31530 +#endif
31531 +       return phys;
31532 +    }
31533 +    
31534 +    if (pte & PTE_CommandQueue)
31535 +       return (regs_base | ((pte & PTE_PPN_MASK) << PTE_PADDR_SHIFT));
31536 +    
31537 +    /* sdram */
31538 +    return (sdram_base | ((pte & PTE_PPN_MASK) << PTE_PADDR_SHIFT));
31539 +}
31540 +
31541 +EXPORT_SYMBOL(elan4mmu_phys2pte);
31542 +EXPORT_SYMBOL(elan4mmu_pte2phys);
31543 Index: linux-2.4.21/drivers/net/qsnet/elan4/neterr.c
31544 ===================================================================
31545 --- linux-2.4.21.orig/drivers/net/qsnet/elan4/neterr.c  2004-02-23 16:02:56.000000000 -0500
31546 +++ linux-2.4.21/drivers/net/qsnet/elan4/neterr.c       2005-06-01 23:12:54.612437128 -0400
31547 @@ -0,0 +1,270 @@
31548 +/*
31549 + *    Copyright (c) 2001-2002 by Quadrics Supercomputers World Ltd.
31550 + *    Copyright (c) 2002-2003 by Quadrics Ltd.
31551 + * 
31552 + *    For licensing information please see the supplied COPYING file
31553 + *
31554 + */
31555 +
31556 +#ident "@(#)$Id: neterr.c,v 1.4.6.3 2004/11/05 13:11:17 david Exp $"
31557 +/*      $Source: /cvs/master/quadrics/elan4mod/neterr.c,v $*/
31558 +
31559 +#include <qsnet/kernel.h>
31560 +
31561 +#include <elan4/sdram.h>
31562 +#include <elan4/debug.h>
31563 +#include <elan4/device.h>
31564 +#include <elan4/commands.h>
31565 +#include <elan4/trtype.h>
31566 +#include <elan4/neterr.h>
31567 +
31568 +typedef struct neterr_inputq
31569 +{
31570 +    E4_InputQueue      inputq;                                 /* input queue */
31571 +    E4_Event32         qevent;                                 /* input queue event */
31572 +    E4_uint64          sent;                                   /* # messages sent (cq flow control)*/
31573 +} NETERR_INPUTQ;
31574 +
31575 +#define NETERR_NSLOTS  64                                      /* single page of queue space (4Kb) */
31576 +
31577 +#define NETERR_RETRIES 16
31578 +#define NETERR_CQ_SIZE CQ_Size8K
31579 +#define NETERR_CQ_MSGS (CQ_Size(NETERR_CQ_SIZE) / (21*8))
31580 +#define NETERR_VP_COUNT        64                                      /* this *must* be > NETERR_CQ_MSGS */
31581 +#define NETERR_VP_BASE 1                                       /* use vp 1 upwards */
31582 +
31583 +void
31584 +elan4_neterr_interrupt (ELAN4_DEV *dev, void *arg)
31585 +{
31586 +    E4_Addr          qfptr  = elan4_sdram_readq (dev, dev->dev_neterr_inputq + offsetof (NETERR_INPUTQ, inputq.q_fptr));
31587 +    E4_Addr          qbptr  = elan4_sdram_readq (dev, dev->dev_neterr_inputq + offsetof (NETERR_INPUTQ, inputq.q_bptr));
31588 +    E4_Addr          qfirst = DEVICE_NETERR_SLOTS_ADDR;
31589 +    E4_Addr          qlast  = qfirst + (NETERR_NSLOTS-1) * ELAN4_NETERR_MSG_SIZE;
31590 +    ELAN4_CQ        *cq     = dev->dev_neterr_intcq;
31591 +    int              count  = 0;
31592 +    ELAN4_CTXT      *ctxt;
31593 +    ELAN4_NETERR_MSG msg;
31594 +
31595 +    while (qfptr != qbptr)
31596 +    {
31597 +       elan4_sdram_copyq_from_sdram (dev, dev->dev_neterr_slots + (qfptr - qfirst), &msg, ELAN4_NETERR_MSG_SIZE);
31598 +
31599 +       ctxt = elan4_networkctxt (dev, msg.msg_context);
31600 +
31601 +       if (ctxt != NULL && ctxt->ctxt_ops->op_neterrmsg)
31602 +           ctxt->ctxt_ops->op_neterrmsg (ctxt, &msg);
31603 +       else
31604 +           PRINTF (DBG_DEVICE, DBG_NETERR, "elan4_neterr_interrupt: no process - sender %d.%d\n", msg.msg_sender.loc_node, msg.msg_sender.loc_context);
31605 +
31606 +       count++;
31607 +
31608 +       /* move on the from pointer */
31609 +       qfptr = (qfptr == qlast) ? qfirst : qfptr + ELAN4_NETERR_MSG_SIZE;
31610 +
31611 +       elan4_sdram_writeq (dev, dev->dev_neterr_inputq + offsetof (NETERR_INPUTQ, inputq.q_fptr), qfptr);
31612 +    }
31613 +
31614 +    if (count == 0)
31615 +    {
31616 +       printk ("elan4_neterr_interrupt: spurious\n");
31617 +       return;
31618 +    }
31619 +
31620 +    /* Issue the waitevent to the interrupt queue */
31621 +    writeq (WAIT_EVENT_CMD | (DEVICE_NETERR_INPUTQ_ADDR + offsetof (NETERR_INPUTQ, qevent)),   cq->cq_mapping);
31622 +    writeq (  E4_EVENT_INIT_VALUE (-32 * count, E4_EVENT_WRITE, E4_EVENT_DTYPE_LONG, 0),       cq->cq_mapping);
31623 +    writeq (  DEVICE_NETERR_INTCQ_ADDR,                                                                cq->cq_mapping);
31624 +    writeq (INTERRUPT_CMD | (dev->dev_neterr_intop.op_cookie << E4_MAIN_INT_SHIFT),            cq->cq_mapping);
31625 +
31626 +    pioflush_reg (dev);
31627 +}
31628 +
31629 +int
31630 +elan4_neterr_init (ELAN4_DEV *dev)
31631 +{
31632 +    unsigned int intqaddr;
31633 +    E4_Addr     qfirst, qlast;
31634 +    
31635 +    if ((dev->dev_neterr_inputq = elan4_sdram_alloc (dev, SDRAM_PAGE_SIZE)) == 0)
31636 +       return 0;
31637 +
31638 +    if ((dev->dev_neterr_slots = elan4_sdram_alloc (dev, roundup (NETERR_NSLOTS * ELAN4_NETERR_MSG_SIZE, SDRAM_PAGE_SIZE))) == 0)
31639 +       return 0;
31640 +
31641 +    if ((dev->dev_neterr_msgcq = elan4_alloccq (&dev->dev_ctxt, NETERR_CQ_SIZE, CQ_STENEnableBit | CQ_WriteEnableBit, CQ_Priority)) == NULL)
31642 +       return 0;
31643 +
31644 +    if ((dev->dev_neterr_intcq = elan4_alloccq (&dev->dev_ctxt, CQ_Size1K, CQ_WaitEventEnableBit | CQ_InterruptEnableBit, CQ_Priority)) == NULL)
31645 +       return 0;
31646 +
31647 +    intqaddr = (dev->dev_cqoffset + elan4_cq2num (dev->dev_neterr_intcq)) * CQ_CommandMappingSize;
31648 +    qfirst   = DEVICE_NETERR_SLOTS_ADDR;
31649 +    qlast    = qfirst + (NETERR_NSLOTS-1) * ELAN4_NETERR_MSG_SIZE;
31650 +
31651 +    spin_lock_init (&dev->dev_neterr_lock);
31652 +
31653 +    /* Register an interrupt operation */
31654 +    dev->dev_neterr_intop.op_function = elan4_neterr_interrupt;
31655 +    dev->dev_neterr_intop.op_arg      = NULL;
31656 +
31657 +    elan4_register_intop (dev, &dev->dev_neterr_intop);
31658 +
31659 +    /* Initialise the inputq descriptor and event */
31660 +    elan4_sdram_writeq (dev, dev->dev_neterr_inputq + offsetof (NETERR_INPUTQ, inputq.q_fptr), qfirst);
31661 +    elan4_sdram_writeq (dev, dev->dev_neterr_inputq + offsetof (NETERR_INPUTQ, inputq.q_bptr), qfirst);
31662 +    elan4_sdram_writeq (dev, dev->dev_neterr_inputq + offsetof (NETERR_INPUTQ, inputq.q_control), E4_InputQueueControl (qfirst, qlast, ELAN4_NETERR_MSG_SIZE));
31663 +    elan4_sdram_writeq (dev, dev->dev_neterr_inputq + offsetof (NETERR_INPUTQ, inputq.q_event), DEVICE_NETERR_INPUTQ_ADDR + offsetof (NETERR_INPUTQ, qevent));
31664 +    
31665 +    elan4_sdram_writeq (dev, dev->dev_neterr_inputq + offsetof (NETERR_INPUTQ, qevent.ev_CountAndType), E4_EVENT_INIT_VALUE (-32, E4_EVENT_WRITE, E4_EVENT_DTYPE_LONG, 0));
31666 +    elan4_sdram_writeq (dev, dev->dev_neterr_inputq + offsetof (NETERR_INPUTQ, qevent.ev_WritePtr), DEVICE_NETERR_INTCQ_ADDR);
31667 +    elan4_sdram_writeq (dev, dev->dev_neterr_inputq + offsetof (NETERR_INPUTQ, qevent.ev_WriteValue), (dev->dev_neterr_intop.op_cookie << E4_MAIN_INT_SHIFT) | INTERRUPT_CMD);
31668 +
31669 +    elan4_sdram_writeq (dev, dev->dev_neterr_inputq + offsetof (NETERR_INPUTQ, sent), 0);
31670 +
31671 +    /* Map them all into the device context */
31672 +    elan4mmu_pteload (&dev->dev_ctxt, 0, DEVICE_NETERR_INPUTQ_ADDR, (dev->dev_neterr_inputq >> PTE_PADDR_SHIFT) | PTE_SetPerm(PERM_RemoteAll));
31673 +    elan4mmu_pteload (&dev->dev_ctxt, 0, DEVICE_NETERR_INTCQ_ADDR,  (intqaddr >> PTE_PADDR_SHIFT) | PTE_SetPerm(PERM_LocDataWrite) | PTE_CommandQueue);
31674 +    elan4mmu_pteload (&dev->dev_ctxt, 0, DEVICE_NETERR_SLOTS_ADDR, (dev->dev_neterr_slots >> PTE_PADDR_SHIFT) | PTE_SetPerm(PERM_DataReadWrite));
31675 +
31676 +    /* finally attach to the neterr context */
31677 +    if (elan4_attach_filter (&dev->dev_ctxt, ELAN4_NETERR_CONTEXT_NUM) != 0)
31678 +       panic ("elan4_neterr_init: failed to attach to neterr context\n");
31679 +
31680 +    /* and drop the context filter */
31681 +    elan4_set_filter (&dev->dev_ctxt, ELAN4_NETERR_CONTEXT_NUM, E4_FILTER_HIGH_PRI);
31682 +
31683 +    return 1;
31684 +}
31685 +
31686 +void
31687 +elan4_neterr_destroy (ELAN4_DEV *dev)
31688 +{
31689 +    if (dev->dev_neterr_intcq)
31690 +    {
31691 +       elan4_detach_filter (&dev->dev_ctxt, ELAN4_NETERR_CONTEXT_NUM);
31692 +       
31693 +       elan4mmu_unload_range (&dev->dev_ctxt, 0, DEVICE_NETERR_SLOTS_ADDR,  1 << dev->dev_pageshift[0]);
31694 +       elan4mmu_unload_range (&dev->dev_ctxt, 0, DEVICE_NETERR_INTCQ_ADDR,  1 << dev->dev_pageshift[0]);
31695 +       elan4mmu_unload_range (&dev->dev_ctxt, 0, DEVICE_NETERR_INPUTQ_ADDR, 1 << dev->dev_pageshift[0]);
31696 +
31697 +       spin_lock_destroy (&dev->dev_neterr_lock);
31698 +    }
31699 +
31700 +    if (dev->dev_neterr_intcq)
31701 +       elan4_freecq (&dev->dev_ctxt, dev->dev_neterr_intcq);
31702 +    dev->dev_neterr_intcq = NULL;
31703 +
31704 +    if (dev->dev_neterr_msgcq)
31705 +       elan4_freecq (&dev->dev_ctxt, dev->dev_neterr_msgcq);
31706 +    dev->dev_neterr_msgcq = NULL;
31707 +
31708 +    if (dev->dev_neterr_slots)
31709 +       elan4_sdram_free (dev, dev->dev_neterr_slots, roundup (NETERR_NSLOTS * ELAN4_NETERR_MSG_SIZE, SDRAM_PAGE_SIZE));
31710 +    dev->dev_neterr_slots = 0;
31711 +    
31712 +    if (dev->dev_neterr_inputq)
31713 +       elan4_sdram_free (dev, dev->dev_neterr_inputq, SDRAM_PAGE_SIZE);
31714 +    dev->dev_neterr_inputq = 0;
31715 +}
31716 +
31717 +int
31718 +elan4_neterr_sendmsg (ELAN4_DEV *dev, unsigned int nodeid, unsigned int retries, ELAN4_NETERR_MSG *msg)
31719 +{
31720 +    ELAN4_CQ  *cq = dev->dev_neterr_msgcq;
31721 +    E4_uint64  sent;
31722 +    E4_VirtualProcessEntry route;
31723 +    unsigned int vp;
31724 +    unsigned long flags;
31725 +
31726 +    spin_lock_irqsave (&dev->dev_neterr_lock, flags);
31727 +
31728 +    sent = elan4_sdram_readq (dev, dev->dev_neterr_inputq + offsetof (NETERR_INPUTQ, sent));
31729 +
31730 +    PRINTF (DBG_DEVICE, DBG_NETERR, "elan4_neterr_sendmsg: nodeid=%d retries=%d cookie=%llx sender=%d,%d%s\n", 
31731 +           nodeid, retries, msg->msg_cookies[0], msg->msg_sender.loc_node, msg->msg_sender.loc_context,
31732 +           (dev->dev_neterr_queued - sent) >= NETERR_CQ_MSGS ? " - no cq space" : "");
31733 +
31734 +    if ((dev->dev_neterr_queued - sent) >= NETERR_CQ_MSGS)
31735 +    {
31736 +       spin_unlock_irqrestore (&dev->dev_neterr_lock, flags);
31737 +       return 0;
31738 +    }
31739 +
31740 +    vp = NETERR_VP_BASE + (dev->dev_neterr_queued % NETERR_VP_COUNT);
31741 +
31742 +    if (elan4_generate_route (&dev->dev_position, &route, ELAN4_NETERR_CONTEXT_NUM, nodeid, nodeid, FIRST_SYSTEM_PACKET | FIRST_HIGH_PRI) < 0)
31743 +    {
31744 +       spin_unlock_irqrestore (&dev->dev_neterr_lock, flags);
31745 +       return 0;
31746 +    }
31747 +
31748 +    elan4_write_route (dev, dev->dev_routetable, vp, &route);
31749 +
31750 +    writeq ((GUARD_CMD | GUARD_CHANNEL(0) | GUARD_RESET(retries)),                             cq->cq_mapping);
31751 +    writeq (NOP_CMD,                                                                           cq->cq_mapping);
31752 +    
31753 +    writeq (OPEN_STEN_PKT_CMD | OPEN_PACKET (0, PACK_OK | RESTART_COUNT_ZERO, vp),             cq->cq_mapping);
31754 +    writeq (SEND_TRANS_CMD | (TR_INPUT_Q_GETINDEX << 16),                                      cq->cq_mapping);
31755 +    writeq (  DEVICE_NETERR_INPUTQ_ADDR + offsetof (NETERR_INPUTQ, inputq),                    cq->cq_mapping);
31756 +
31757 +    writeq (SEND_TRANS_CMD | (TR_WRITE (64 >> 3, 0, TR_DATATYPE_DWORD) << 16),                 cq->cq_mapping);
31758 +    writeq (  0 /* address */,                                                                 cq->cq_mapping);
31759 +    writeq (  ((E4_uint64 *) msg)[0],                                                          cq->cq_mapping);
31760 +    writeq (  ((E4_uint64 *) msg)[1],                                                          cq->cq_mapping);
31761 +    writeq (  ((E4_uint64 *) msg)[2],                                                          cq->cq_mapping);
31762 +    writeq (  ((E4_uint64 *) msg)[3],                                                          cq->cq_mapping);
31763 +    writeq (  ((E4_uint64 *) msg)[4],                                                          cq->cq_mapping);
31764 +    writeq (  ((E4_uint64 *) msg)[5],                                                          cq->cq_mapping);
31765 +    writeq (  ((E4_uint64 *) msg)[6],                                                          cq->cq_mapping);
31766 +    writeq (  ((E4_uint64 *) msg)[7],                                                          cq->cq_mapping);
31767 +
31768 +    writeq (SEND_TRANS_CMD | (TR_INPUT_Q_COMMIT << 16),                                                cq->cq_mapping);
31769 +    writeq (  DEVICE_NETERR_INPUTQ_ADDR + offsetof (NETERR_INPUTQ, inputq),                    cq->cq_mapping);
31770 +    writeq (  0 /* cookie */,                                                                  cq->cq_mapping);
31771 +    
31772 +    writeq (GUARD_CMD | GUARD_CHANNEL(0) | GUARD_RESET(NETERR_RETRIES),                                cq->cq_mapping);
31773 +    writeq (WRITE_DWORD_CMD | (DEVICE_NETERR_INPUTQ_ADDR + offsetof (NETERR_INPUTQ, sent)),    cq->cq_mapping);
31774 +    writeq (  ++dev->dev_neterr_queued,                                                                cq->cq_mapping);
31775 +
31776 +    pioflush_reg (dev);
31777 +
31778 +    spin_unlock_irqrestore (&dev->dev_neterr_lock, flags);
31779 +
31780 +    return 1;
31781 +}
31782 +
31783 +int
31784 +elan4_neterr_iproc_trap (ELAN4_DEV *dev, ELAN4_IPROC_TRAP *trap)
31785 +{
31786 +    E4_IprocTrapHeader *hdrp = &trap->tr_transactions[trap->tr_trappedTrans];
31787 +    unsigned long flags;
31788 +
31789 +    switch (IPROC_TrapValue (hdrp->IProcStatusCntxAndTrType))
31790 +    {
31791 +    case InputEopErrorOnWaitForEop:
31792 +    case InputEopErrorTrap:
31793 +    case InputCrcErrorAfterPAckOk:
31794 +       return 1;
31795 +
31796 +    case InputEventEngineTrapped:
31797 +       printk ("elan%d: device_iproc_trap: InputEventEngineTrapped - Trans=%x TrAddr=%llx\n", 
31798 +               dev->dev_instance, (int)IPROC_TransactionType (hdrp->IProcStatusCntxAndTrType), (long long) hdrp->TrAddr);
31799 +
31800 +       if ((IPROC_TransactionType (hdrp->IProcStatusCntxAndTrType) & TR_OPCODE_MASK) == (TR_INPUT_Q_COMMIT & TR_OPCODE_MASK) &&
31801 +           hdrp->TrAddr == DEVICE_NETERR_INPUTQ_ADDR + offsetof (NETERR_INPUTQ, inputq))
31802 +       {
31803 +           spin_lock_irqsave (&dev->dev_neterr_lock, flags);
31804 +           writeq ((DEVICE_NETERR_INPUTQ_ADDR + offsetof (NETERR_INPUTQ, qevent)) | SET_EVENT_CMD, dev->dev_neterr_msgcq->cq_mapping);
31805 +           spin_unlock_irqrestore (&dev->dev_neterr_lock, flags);
31806 +           return 1;
31807 +       }
31808 +       
31809 +    default:
31810 +       return 0;
31811 +    }
31812 +}
31813 +/*
31814 + * Local variables:
31815 + * c-file-style: "stroustrup"
31816 + * End:
31817 + */
31818 Index: linux-2.4.21/drivers/net/qsnet/elan4/procfs_Linux.c
31819 ===================================================================
31820 --- linux-2.4.21.orig/drivers/net/qsnet/elan4/procfs_Linux.c    2004-02-23 16:02:56.000000000 -0500
31821 +++ linux-2.4.21/drivers/net/qsnet/elan4/procfs_Linux.c 2005-06-01 23:12:54.613436976 -0400
31822 @@ -0,0 +1,1041 @@
31823 +/*
31824 + *    Copyright (c) 2001-2002 by Quadrics Supercomputers World Ltd.
31825 + *    Copyright (c) 2002-2003 by Quadrics Ltd.
31826 + *
31827 + *    For licensing information please see the supplied COPYING file
31828 + *
31829 + */
31830 +
31831 +#ident "@(#)$Id: procfs_Linux.c,v 1.27.2.5 2005/01/18 14:36:17 david Exp $ $Name: QSNETMODULES-4-30_20050128 $"
31832 +/*      $Source: /cvs/master/quadrics/elan4mod/procfs_Linux.c,v $*/
31833 +
31834 +#include <qsnet/kernel.h>
31835 +
31836 +#include <linux/module.h>
31837 +#include <linux/proc_fs.h>
31838 +#include <linux/ctype.h>
31839 +
31840 +#include <qsnet/procfs_linux.h>
31841 +
31842 +#include <elan4/i2c.h>
31843 +#include <elan4/debug.h>
31844 +#include <elan4/device.h>
31845 +#include <elan4/user.h>
31846 +
31847 +/*
31848 + *
31849 + * procfs format for elan4:
31850 + *
31851 + * /proc/qsnet/elan4/config
31852 + *    elan4_debug
31853 + *    elan4_debug_toconsole
31854 + *    elan4_debug_tobuffer
31855 + *    elan4_debug_display_ctxt
31856 + *    elan4_debug_ignore_ctxt
31857 + *    elan4_debug_ignore_type
31858 + *    elan4_debug_mmu
31859 + *    elan4_mainint_punt_loops
31860 + *    user_p2p_route_options
31861 + *    user_bcast_route_options
31862 + *
31863 + * /proc/qsnet/elan4/deviceN
31864 + *    stats
31865 + *    position
31866 + *    vpd
31867 + */
31868 +
31869 +struct proc_dir_entry *elan4_procfs_root;
31870 +struct proc_dir_entry *elan4_config_root;
31871 +
31872 +/* borrowed from fs/proc/proc_misc - helper for proc_read_int */
31873 +static int 
31874 +proc_calc_metrics(char *page, char **start, off_t off, int count, int *eof, int len)
31875 +{
31876 +    if (len <= off+count) *eof = 1;
31877 +    *start = page + off;
31878 +    len -= off;
31879 +    if (len>count) len = count;
31880 +    if (len<0) len = 0;
31881 +    return len;
31882 +}
31883 +
31884 +static int
31885 +proc_read_devinfo (char *page, char **start, off_t off,
31886 +                   int count, int *eof, void *data)
31887 +{
31888 +    ELAN4_DEV *dev = (ELAN4_DEV *) data;
31889 +    int        len = 0;
31890 +
31891 +    if (! dev)
31892 +       len = sprintf (page, "<unknown>\n");
31893 +    else
31894 +    {
31895 +       len += sprintf (page + len, "dev_vendor_id            0x%x\n", dev->dev_devinfo.dev_vendor_id);
31896 +       len += sprintf (page + len, "dev_device_id            0x%x\n", dev->dev_devinfo.dev_vendor_id);
31897 +       len += sprintf (page + len, "dev_revision_id          0x%x\n", dev->dev_devinfo.dev_revision_id);
31898 +       len += sprintf (page + len, "dev_instance             0x%x\n", dev->dev_devinfo.dev_instance);
31899 +       len += sprintf (page + len, "dev_rail                 0x%x\n", dev->dev_devinfo.dev_rail);
31900 +       len += sprintf (page + len, "dev_driver_version       0x%x\n", dev->dev_devinfo.dev_driver_version);
31901 +       len += sprintf (page + len, "dev_params_mask          0x%x\n", dev->dev_devinfo.dev_params_mask);
31902 +       len += sprintf (page + len, "dev_params:                  \n");
31903 +       len += sprintf (page + len, " 0  - PciCmdQPadFlag     0x%x\n", dev->dev_devinfo.dev_params.values[0]);
31904 +       len += sprintf (page + len, " 1  - EventCopyWinPt     0x%x\n", dev->dev_devinfo.dev_params.values[1]);
31905 +       len += sprintf (page + len, " 2  - PciWriteCombining  0x%x\n", dev->dev_devinfo.dev_params.values[2]);
31906 +       len += sprintf (page + len, " 3  -                    0x%x\n", dev->dev_devinfo.dev_params.values[3]);
31907 +       len += sprintf (page + len, " 4  -                    0x%x\n", dev->dev_devinfo.dev_params.values[4]);
31908 +       len += sprintf (page + len, " 5  -                    0x%x\n", dev->dev_devinfo.dev_params.values[5]);
31909 +       len += sprintf (page + len, " 6  -                    0x%x\n", dev->dev_devinfo.dev_params.values[6]);
31910 +       len += sprintf (page + len, " 7  -                    0x%x\n", dev->dev_devinfo.dev_params.values[7]);
31911 +       len += sprintf (page + len, " 8  -                    0x%x\n", dev->dev_devinfo.dev_params.values[8]);
31912 +       len += sprintf (page + len, " 9  -                    0x%x\n", dev->dev_devinfo.dev_params.values[9]);
31913 +       len += sprintf (page + len, " 10 -                    0x%x\n", dev->dev_devinfo.dev_params.values[10]);
31914 +       len += sprintf (page + len, " 11 -                    0x%x\n", dev->dev_devinfo.dev_params.values[11]);
31915 +       len += sprintf (page + len, "dev_num_down_links_value 0x%x\n", dev->dev_devinfo.dev_num_down_links_value);
31916 +
31917 +       len += sprintf (page + len, "features                 0x%x\n", dev->dev_features);
31918 +    }
31919 +
31920 +    return (qsnet_proc_calc_metrics (page, start, off, count, eof, len));
31921 +}
31922 +
31923 +static int
31924 +proc_read_position (char *page, char **start, off_t off,
31925 +                   int count, int *eof, void *data)
31926 +{
31927 +    ELAN4_DEV *dev = (ELAN4_DEV *) data;
31928 +    int        len;
31929 +
31930 +    if (dev->dev_position.pos_mode == ELAN_POS_UNKNOWN)
31931 +       len = sprintf (page, "<unknown>\n");
31932 +    else
31933 +       len = sprintf (page, 
31934 +                      "NodeId                 %d\n"
31935 +                      "NumLevels              %d\n"
31936 +                      "NumNodes               %d\n",
31937 +                      dev->dev_position.pos_nodeid, 
31938 +                      dev->dev_position.pos_levels, 
31939 +                      dev->dev_position.pos_nodes);
31940 +
31941 +    return (qsnet_proc_calc_metrics (page, start, off, count, eof, len));
31942 +}
31943 +
31944 +static int
31945 +proc_write_position (struct file *file, const char *buf, unsigned long count, void *data)
31946 +{
31947 +    ELAN4_DEV *dev     = (ELAN4_DEV *) data;
31948 +    unsigned  nodeid   = ELAN_INVALID_NODE;
31949 +    unsigned  numnodes = 0;
31950 +    char     *page, *p;
31951 +    int       res;
31952 +    ELAN_POSITION pos;
31953 +
31954 +    if (count == 0)
31955 +       return (0);
31956 +
31957 +    if (count >= PAGE_SIZE)
31958 +       return (-EINVAL);
31959 +
31960 +    if ((page = (char *) __get_free_page (GFP_KERNEL)) == NULL)
31961 +       return (-ENOMEM);
31962 +
31963 +    MOD_INC_USE_COUNT;
31964 +
31965 +    if (copy_from_user (page, buf, count))
31966 +       res = -EFAULT;
31967 +    else
31968 +    {
31969 +       page[count] = '\0';
31970 +       
31971 +       if (page[count-1] == '\n')
31972 +           page[count-1] = '\0';
31973 +
31974 +       if (! strcmp (page, "<unknown>"))
31975 +       {
31976 +           pos.pos_mode      = ELAN_POS_UNKNOWN;
31977 +           pos.pos_nodeid    = ELAN_INVALID_NODE;
31978 +           pos.pos_nodes     = 0;
31979 +           pos.pos_levels    = 0;
31980 +       }
31981 +       else
31982 +       {
31983 +           for (p = page; *p; )
31984 +           {
31985 +               while (isspace (*p))
31986 +                   p++;
31987 +               
31988 +               if (! strncmp (p, "NodeId=", strlen("NodeId=")))
31989 +                   nodeid   = simple_strtoul (p + strlen ("NodeId="), NULL, 0);
31990 +               if (! strncmp (p, "NumNodes=", strlen ("NumNodes=")))
31991 +                   numnodes = simple_strtoul (p + strlen ("NumNodes="), NULL, 0);
31992 +               
31993 +               while (*p && !isspace(*p))
31994 +                   p++;
31995 +           }
31996 +
31997 +           if (elan4_compute_position (&pos, nodeid, numnodes, dev->dev_devinfo.dev_num_down_links_value) != 0)
31998 +               printk ("elan%d: invalid values for NodeId=%d NumNodes=%d\n", dev->dev_instance, nodeid, numnodes);
31999 +           else
32000 +           {
32001 +               printk ("elan%d: setting NodeId=%d NumNodes=%d NumLevels=%d\n", dev->dev_instance, pos.pos_nodeid,
32002 +                       pos.pos_nodes, pos.pos_levels);
32003 +
32004 +               if (elan4_set_position (dev, &pos) < 0)
32005 +                   printk ("elan%d: failed to set device position\n", dev->dev_instance);
32006 +           }
32007 +       }
32008 +    }
32009 +
32010 +    MOD_DEC_USE_COUNT;
32011 +    free_page ((unsigned long) page);
32012 +
32013 +    return (count);
32014 +}
32015 +
32016 +static int
32017 +proc_read_temp (char *page, char **start, off_t off,
32018 +               int count, int *eof, void *data)
32019 +{
32020 +    ELAN4_DEV    *dev = (ELAN4_DEV *) data;
32021 +    unsigned char values[2];
32022 +    int           len;
32023 +
32024 +    if (i2c_disable_auto_led_update (dev) < 0)
32025 +       len = sprintf (page, "<unknown>");
32026 +    else
32027 +    {
32028 +       if (i2c_read (dev, I2C_TEMP_ADDR, 2, values) < 0)
32029 +           len = sprintf (page, "<not-present>");
32030 +       else
32031 +           len = sprintf (page, "%s%d%s\n", (values[0] & 0x80) ? "-" : "",
32032 +                          (values[0] & 0x80) ? -((signed char)values[0]) - 1 : values[0],
32033 +                          (values[1] & 0x80) ? ".5" : ".0");
32034 +
32035 +       i2c_enable_auto_led_update (dev);
32036 +    }
32037 +
32038 +    return (qsnet_proc_calc_metrics (page, start, off, count, eof, len));
32039 +}
32040 +
32041 +static int
32042 +proc_read_eccerr (char *page, char **start, off_t off,
32043 +                 int count, int *eof, void *data)
32044 +{
32045 +    ELAN4_DEV    *dev = (ELAN4_DEV *) data;
32046 +    char          errstr[200];
32047 +    register int  i, len = 0;
32048 +
32049 +    *page = '\0';
32050 +
32051 +    for (i = 0; i < sizeof (dev->dev_sdramerrs)/sizeof(dev->dev_sdramerrs[0]); i++)
32052 +       if (dev->dev_sdramerrs[i])
32053 +           len += sprintf (page + len, "%s occured %0d times\n",
32054 +                           elan4_sdramerr2str (dev, dev->dev_sdramerrs[i] & 0x000fffffffffffffULL, errstr),
32055 +                           (int) (dev->dev_sdramerrs[i] >> 52) + 1);
32056 +
32057 +    return (qsnet_proc_calc_metrics (page, start, off, count, eof, len));
32058 +}
32059 +
32060 +static int
32061 +proc_read_vpd (char *page, char **start, off_t off,
32062 +              int count, int *eof, void *data)
32063 +{
32064 +    ELAN4_DEV *dev = (ELAN4_DEV *) data;
32065 +    int        len;
32066 +
32067 +    if ( elan4_read_vpd (dev, NULL, page) )
32068 +       len = sprintf (page, "no vpd tags found\n");
32069 +    else
32070 +       len = strlen(page)+1;
32071 +
32072 +    return (qsnet_proc_calc_metrics (page, start, off, count, eof, len));
32073 +}
32074 +
32075 +static struct device_info 
32076 +{
32077 +    char *name;
32078 +    int (*read_func) (char *page, char **start, off_t off, int count, int *eof, void *data);
32079 +    int (*write_func) (struct file *file, const char *buf, unsigned long count, void *data);
32080 +    unsigned minrev;
32081 +} device_info[] = {
32082 +    {"devinfo",   proc_read_devinfo,  NULL,                0},
32083 +    {"position",  proc_read_position, proc_write_position, 0},
32084 +    {"temp",      proc_read_temp,     NULL,                1},
32085 +    {"eccerr",    proc_read_eccerr,   NULL,                0},
32086 +    {"vpd",       proc_read_vpd,      NULL,                0},
32087 +};
32088 +
32089 +static int
32090 +proc_read_link_stats (char *page, char **start, off_t off, int count, int *eof, void *data)
32091 +{
32092 +    ELAN4_DEV *dev = (ELAN4_DEV *) data;
32093 +    char      *p   = page;
32094 +
32095 +    p += sprintf (p, "%20s %ld\n", "link_errors", dev->dev_stats.s_link_errors);
32096 +    p += sprintf (p, "%20s %ld\n", "lock_errors", dev->dev_stats.s_lock_errors);
32097 +    p += sprintf (p, "%20s %ld\n", "deskew_errors", dev->dev_stats.s_deskew_errors);
32098 +    p += sprintf (p, "%20s %ld\n", "phase_errors", dev->dev_stats.s_phase_errors);
32099 +
32100 +    p += sprintf (p, "%20s %ld\n", "data_errors", dev->dev_stats.s_data_errors);
32101 +    p += sprintf (p, "%20s %ld\n", "fifo_overflow0", dev->dev_stats.s_fifo_overflow0);
32102 +    p += sprintf (p, "%20s %ld\n", "fifo_overflow1", dev->dev_stats.s_fifo_overflow1);
32103 +    p += sprintf (p, "%20s %ld\n", "mod45changed", dev->dev_stats.s_mod45changed);
32104 +    p += sprintf (p, "%20s %ld\n", "pack_not_seen", dev->dev_stats.s_pack_not_seen);
32105 +
32106 +    p += sprintf (p, "%20s %ld\n", "linkport_keyfail", dev->dev_stats.s_linkport_keyfail);
32107 +    p += sprintf (p, "%20s %ld\n", "eop_reset", dev->dev_stats.s_eop_reset);
32108 +    p += sprintf (p, "%20s %ld\n", "bad_length", dev->dev_stats.s_bad_length);
32109 +    p += sprintf (p, "%20s %ld\n", "crc_error", dev->dev_stats.s_crc_error);
32110 +    p += sprintf (p, "%20s %ld\n", "crc_bad", dev->dev_stats.s_crc_bad);
32111 +
32112 +    p += sprintf (p, "%20s %ld\n", "cproc_timeout", dev->dev_stats.s_cproc_timeout);
32113 +    p += sprintf (p, "%20s %ld\n", "dproc_timeout", dev->dev_stats.s_dproc_timeout);
32114 +
32115 +    return (proc_calc_metrics (page, start, off, count, eof, p - page));
32116 +}
32117 +
32118 +static char *
32119 +proc_sprintf_bucket_stat (char *p, char *name, unsigned long *stats, int *buckets)
32120 +{
32121 +    int i;
32122 +
32123 +    p += sprintf (p, "%20s ", name);
32124 +
32125 +    for (i = 0; i < ELAN4_DEV_STATS_BUCKETS-1; i++)
32126 +       p += sprintf (p, "%ld(<=%d) ", stats[i], buckets[i]);
32127 +    p += sprintf (p, "%ld(>%d)\n", stats[i], buckets[i-1]);
32128 +
32129 +    return p;
32130 +}
32131 +
32132 +static int
32133 +proc_read_intr_stats (char *page, char **start, off_t off, int count, int *eof, void *data)
32134 +{
32135 +    ELAN4_DEV *dev = (ELAN4_DEV *) data;
32136 +    char      *p   = page;
32137 +
32138 +    p += sprintf (p, "%20s %ld\n", "interrupts",       dev->dev_stats.s_interrupts);
32139 +    p += sprintf (p, "%20s %ld\n", "haltints",         dev->dev_stats.s_haltints);
32140 +
32141 +    p += sprintf (p, "%20s %ld\n", "mainint_punts",    dev->dev_stats.s_mainint_punts);
32142 +    p += sprintf (p, "%20s %ld\n", "mainint_rescheds", dev->dev_stats.s_mainint_rescheds);
32143 +
32144 +    p  = proc_sprintf_bucket_stat (p, "mainints", dev->dev_stats.s_mainints, MainIntBuckets);
32145 +
32146 +    return (proc_calc_metrics (page, start, off, count, eof, p - page));
32147 +}
32148 +
32149 +static int
32150 +proc_read_trap_stats (char *page, char **start, off_t off, int count, int *eof, void *data)
32151 +{
32152 +    ELAN4_DEV *dev = (ELAN4_DEV *) data;
32153 +    char      *p   = page;
32154 +
32155 +    p += sprintf (p, "%20s %ld\n", "cproc_traps", dev->dev_stats.s_cproc_traps);
32156 +    p += sprintf (p, "%20s %ld\n", "dproc_traps", dev->dev_stats.s_dproc_traps);
32157 +    p += sprintf (p, "%20s %ld\n", "eproc_traps", dev->dev_stats.s_eproc_traps);
32158 +    p += sprintf (p, "%20s %ld\n", "iproc_traps", dev->dev_stats.s_iproc_traps);
32159 +    p += sprintf (p, "%20s %ld\n", "tproc_traps", dev->dev_stats.s_tproc_traps);
32160 +
32161 +    return (proc_calc_metrics (page, start, off, count, eof, p - page));
32162 +}
32163 +
32164 +static int
32165 +proc_read_cproc_trap_stats (char *page, char **start, off_t off, int count, int *eof, void *data)
32166 +{
32167 +    ELAN4_DEV *dev = (ELAN4_DEV *) data;
32168 +    char      *p   = page;
32169 +    int        i;
32170 +    extern char *const CProcTrapNames[];
32171 +
32172 +    for (i = 0; i < sizeof (dev->dev_stats.s_cproc_trap_types)/sizeof(dev->dev_stats.s_cproc_trap_types[0]); i++)
32173 +       p += sprintf (p, "%-40s %ld\n", CProcTrapNames[i], dev->dev_stats.s_cproc_trap_types[i]);
32174 +
32175 +    return (proc_calc_metrics (page, start, off, count, eof, p - page));
32176 +}
32177 +
32178 +static int
32179 +proc_read_dproc_trap_stats (char *page, char **start, off_t off, int count, int *eof, void *data)
32180 +{
32181 +    ELAN4_DEV *dev = (ELAN4_DEV *) data;
32182 +    char      *p   = page;
32183 +    int        i;
32184 +    extern char *const DProcTrapNames[];
32185 +
32186 +    for (i = 0; i < sizeof (dev->dev_stats.s_dproc_trap_types)/sizeof(dev->dev_stats.s_dproc_trap_types[0]); i++)
32187 +       p += sprintf (p, "%-40s %ld\n", DProcTrapNames[i], dev->dev_stats.s_dproc_trap_types[i]);
32188 +
32189 +    return (proc_calc_metrics (page, start, off, count, eof, p - page));
32190 +}
32191 +
32192 +static int
32193 +proc_read_eproc_trap_stats (char *page, char **start, off_t off, int count, int *eof, void *data)
32194 +{
32195 +    ELAN4_DEV *dev = (ELAN4_DEV *) data;
32196 +    char      *p   = page;
32197 +    int        i;
32198 +    extern char *const EProcTrapNames[];
32199 +
32200 +    for (i = 0; i < sizeof (dev->dev_stats.s_eproc_trap_types)/sizeof(dev->dev_stats.s_eproc_trap_types[0]); i++)
32201 +       p += sprintf (p, "%-40s %ld\n", EProcTrapNames[i], dev->dev_stats.s_eproc_trap_types[i]);
32202 +
32203 +    return (proc_calc_metrics (page, start, off, count, eof, p - page));
32204 +}
32205 +
32206 +static int
32207 +proc_read_iproc_trap_stats (char *page, char **start, off_t off, int count, int *eof, void *data)
32208 +{
32209 +    ELAN4_DEV *dev = (ELAN4_DEV *) data;
32210 +    char      *p   = page;
32211 +    int        i;
32212 +    extern char *const IProcTrapNames[];
32213 +
32214 +    for (i = 0; i < sizeof (dev->dev_stats.s_iproc_trap_types)/sizeof(dev->dev_stats.s_iproc_trap_types[0]); i++)
32215 +       p += sprintf (p, "%-40s %ld\n", IProcTrapNames[i], dev->dev_stats.s_iproc_trap_types[i]);
32216 +
32217 +    return (proc_calc_metrics (page, start, off, count, eof, p - page));
32218 +}
32219 +
32220 +static int
32221 +proc_read_tproc_trap_stats (char *page, char **start, off_t off, int count, int *eof, void *data)
32222 +{
32223 +    ELAN4_DEV *dev = (ELAN4_DEV *) data;
32224 +    char      *p   = page;
32225 +    int        i;
32226 +    extern char *const TProcTrapNames[];
32227 +
32228 +    for (i = 0; i < sizeof (dev->dev_stats.s_tproc_trap_types)/sizeof(dev->dev_stats.s_tproc_trap_types[0]); i++)
32229 +       p += sprintf (p, "%-40s %ld\n", TProcTrapNames[i], dev->dev_stats.s_tproc_trap_types[i]);
32230 +
32231 +    return (proc_calc_metrics (page, start, off, count, eof, p - page));
32232 +}
32233 +
32234 +static int
32235 +proc_read_sdram_stats (char *page, char **start, off_t off, int count, int *eof, void *data)
32236 +{
32237 +    ELAN4_DEV *dev = (ELAN4_DEV *) data;
32238 +    char      *p   = page;
32239 +
32240 +    p += sprintf (p, "%20s %ld\n",  "correctable_errors", dev->dev_stats.s_correctable_errors);
32241 +    p += sprintf (p, "%20s %ld\n",  "multiple_errors",    dev->dev_stats.s_multiple_errors);
32242 +    p += sprintf (p, "%20s %ldK\n", "sdram_bytes_free",   dev->dev_stats.s_sdram_bytes_free/1024);
32243 +
32244 +    return (proc_calc_metrics (page, start, off, count, eof, p - page));
32245 +}
32246 +
32247 +void
32248 +elan4_ringbuf_store(ELAN4_ROUTE_RINGBUF *ringbuf, E4_VirtualProcessEntry *route, ELAN4_DEV *dev)
32249 +{
32250 +    int newend;
32251 +    unsigned long flags;
32252 +
32253 +    spin_lock_irqsave(&dev->dev_error_routes_lock, flags);
32254 +    bcopy(route, &ringbuf->routes[ringbuf->end], sizeof(E4_VirtualProcessEntry));
32255 +    newend = ringbuf->end + 1;
32256 +    if (newend >= DEV_STASH_ROUTE_COUNT)
32257 +        newend -= DEV_STASH_ROUTE_COUNT;
32258 +    if (newend == ringbuf->start)
32259 +        ringbuf->start += 1;
32260 +    if (ringbuf->start >= DEV_STASH_ROUTE_COUNT)
32261 +        ringbuf->start -= DEV_STASH_ROUTE_COUNT;
32262 +    ringbuf->end = newend;
32263 +    spin_unlock_irqrestore(&dev->dev_error_routes_lock, flags);
32264 +}
32265 +       
32266 +static int
32267 +proc_read_dproc_timeout_stats (char *page, char **start, off_t off, int count, int *eof, void *data)
32268 +{
32269 +       ELAN4_DEV *dev = (ELAN4_DEV *) data;
32270 +       char      *p   = page;
32271 +       unsigned int *dproc_timeout;
32272 +
32273 +       dproc_timeout = dev->dev_dproc_timeout;
32274 +
32275 +       if (!dproc_timeout) 
32276 +               p += sprintf (p, "No stats available\n");
32277 +       else
32278 +       {
32279 +               int i;
32280 +
32281 +               for (i=0; i<dev->dev_position.pos_nodes; i++) 
32282 +                       if (dproc_timeout[i] != 0) 
32283 +                               p += sprintf (p, "Node %d: %u errors\n", i, dproc_timeout[i]);
32284 +       }
32285 +
32286 +       return (proc_calc_metrics (page, start, off, count, eof, p - page));
32287 +}
32288 +
32289 +static int
32290 +elan4_route2str (E4_VirtualProcessEntry *route, char *routeStr)
32291 +{
32292 +    int        part = 0;
32293 +    int        shift;
32294 +    int        broadcast;
32295 +    E4_uint64  value;
32296 +    char      *ptr = routeStr;
32297 +    int        b;
32298 +
32299 +    /* unpack first */
32300 +    value = route->Values[part] & 0x7f;
32301 +    if ( (value & 0x78) == 0) {
32302 +        /* empty route */
32303 +        strcpy(routeStr,"Invalid lead route");
32304 +        return (-EINVAL);
32305 +    }
32306 +
32307 +    if ( value & 0x40 ) {
32308 +        /* broad cast */
32309 +       strcpy(routeStr,"Broadcast");
32310 +       return (-EINVAL);
32311 +    } else {
32312 +        switch ((value  & 0x30) >> 4) {
32313 +        case 0: { *ptr++ = '0' + (value & 0x7); break; }
32314 +        case 1: { *ptr++ = 'M';                 break; }
32315 +        case 2: { *ptr++ = 'U';                 break; }
32316 +        case 3: { *ptr++ = 'A';                 break; }
32317 +        }
32318 +    }
32319 +
32320 +    shift = 16;
32321 +    broadcast = 0;
32322 +    while ( 1 ) {
32323 +        b =  (route->Values[part] >> shift) & 0xf;
32324 +
32325 +        if ( broadcast ) {
32326 +            /* about to pick up the second byte of a broadcast pair */
32327 +            broadcast = 0;
32328 +        } else {
32329 +            if ( b & 0x8) {
32330 +                /*  output link */
32331 +                 *ptr++ = '0' + (b & 0x7);
32332 +            } else {
32333 +                if ( b & 0x4) {
32334 +                    /* broad cast */
32335 +                    broadcast = 1;
32336 +                } else {
32337 +                    switch ( b & 0x3 ) {
32338 +                    case 0: { *ptr++ =  0 ; return (0);     break; }
32339 +                    case 1: { *ptr++ = 'M';                 break; }
32340 +                    case 2: { *ptr++ = 'U';                 break; }
32341 +                    case 3: { *ptr++ = 'A';                 break; }
32342 +                    }
32343 +                }
32344 +            }
32345 +        }
32346 +
32347 +        shift += 4; 
32348 +        if ( part != 0 ) {
32349 +            if ( shift > 36) {
32350 +                /* too far, now in the crc value */
32351 +                strcpy(routeStr,"Invalid route length");
32352 +                return (-EINVAL);
32353 +            }
32354 +        } else { 
32355 +            if ( shift >= 64) { 
32356 +                /* move to the next 64 bits */
32357 +                part = 1;
32358 +                shift = 2;
32359 +            }
32360 +        }
32361 +    }
32362 +
32363 +    /* never reached */
32364 +    return (-EINVAL);
32365 +}
32366 +
32367 +
32368 +static int
32369 +proc_read_dproc_timeout_routes (char *page, char **start, off_t off, int count, int *eof, void *data)
32370 +{
32371 +       ELAN4_DEV *dev = (ELAN4_DEV *) data;
32372 +       char      *p   = page;
32373 +       ELAN4_ROUTE_RINGBUF *ringbuf;
32374 +       char      routestr[33];
32375 +
32376 +       ringbuf = &dev->dev_dproc_timeout_routes;
32377 +
32378 +       if (!ringbuf) 
32379 +               p += sprintf (p, "No stats available\n");
32380 +       else
32381 +       {
32382 +               int start;
32383 +               int end;
32384 +               int i;
32385 +               unsigned long flags;
32386 +
32387 +               memset(&routestr, 0, 33);
32388 +
32389 +               spin_lock_irqsave(&dev->dev_error_routes_lock, flags);
32390 +               
32391 +               start = ringbuf->start;
32392 +               end = ringbuf->end;
32393 +
32394 +               if (end < start)
32395 +                       end = DEV_STASH_ROUTE_COUNT;
32396 +
32397 +               for (i=start; i<end; i++) 
32398 +               {
32399 +                       elan4_route2str (&ringbuf->routes[i], routestr);
32400 +                       p += sprintf (p, "Route %llx %llx->%s\n", ringbuf->routes[i].Values[0], ringbuf->routes[i].Values[1], routestr);
32401 +               }
32402 +
32403 +               if (ringbuf->end < start)
32404 +               {
32405 +                       start = 0;
32406 +                       end = ringbuf->end;
32407 +                       for (i=start; i<end; i++)
32408 +                       {
32409 +                               elan4_route2str (&ringbuf->routes[i], routestr);
32410 +                               p += sprintf (p, "Route %llx %llx->%s\n", ringbuf->routes[i].Values[0], ringbuf->routes[i].Values[1], routestr);
32411 +                       }
32412 +               }
32413 +
32414 +               spin_unlock_irqrestore(&dev->dev_error_routes_lock, flags);
32415 +       }
32416 +
32417 +       return (proc_calc_metrics (page, start, off, count, eof, p - page));
32418 +}
32419 +
32420 +
32421 +static int
32422 +proc_read_cproc_timeout_stats (char *page, char **start, off_t off, int count, int *eof, void *data)
32423 +{
32424 +       ELAN4_DEV *dev = (ELAN4_DEV *) data;
32425 +       char      *p   = page;
32426 +       unsigned int *cproc_timeout;
32427 +
32428 +       cproc_timeout = dev->dev_cproc_timeout;
32429 +
32430 +       if (!cproc_timeout) 
32431 +               p += sprintf (p, "No stats available\n");
32432 +       else
32433 +       {
32434 +               int i;
32435 +
32436 +               for (i=0; i<dev->dev_position.pos_nodes; i++) 
32437 +                       if (cproc_timeout[i] != 0) 
32438 +                               p += sprintf (p, "Node %d: %u errors\n", i, cproc_timeout[i]);
32439 +       }
32440 +
32441 +       return (proc_calc_metrics (page, start, off, count, eof, p - page));
32442 +}
32443 +
32444 +static int
32445 +proc_read_cproc_timeout_routes (char *page, char **start, off_t off, int count, int *eof, void *data)
32446 +{
32447 +       ELAN4_DEV *dev = (ELAN4_DEV *) data;
32448 +       char      *p   = page;
32449 +       ELAN4_ROUTE_RINGBUF *ringbuf;
32450 +       char      routestr[33];
32451 +
32452 +       ringbuf = &dev->dev_cproc_timeout_routes;
32453 +
32454 +       if (!ringbuf) 
32455 +               p += sprintf (p, "No stats available\n");
32456 +       else
32457 +       {
32458 +               int start;
32459 +               int end;
32460 +               int i;
32461 +               unsigned long flags;
32462 +
32463 +               memset(&routestr, 0, 33);
32464 +
32465 +               spin_lock_irqsave(&dev->dev_error_routes_lock, flags);
32466 +               
32467 +               start = ringbuf->start;
32468 +               end = ringbuf->end;
32469 +
32470 +               if (end < start)
32471 +                       end = DEV_STASH_ROUTE_COUNT;
32472 +
32473 +               for (i=start; i<end; i++) 
32474 +               {
32475 +                       elan4_route2str (&ringbuf->routes[i], routestr);
32476 +                       p += sprintf (p, "Route %llx %llx->%s\n", ringbuf->routes[i].Values[0], ringbuf->routes[i].Values[1], routestr);
32477 +               }
32478 +
32479 +               if (ringbuf->end < start)
32480 +               {
32481 +                       start = 0;
32482 +                       end = ringbuf->end;
32483 +                       for (i=start; i<end; i++)
32484 +                       {
32485 +                               elan4_route2str (&ringbuf->routes[i], routestr);
32486 +                               p += sprintf (p, "Route %llx %llx->%s\n", ringbuf->routes[i].Values[0], ringbuf->routes[i].Values[1], routestr);
32487 +                       }
32488 +               }
32489 +
32490 +               spin_unlock_irqrestore(&dev->dev_error_routes_lock, flags);
32491 +       }
32492 +
32493 +       return (proc_calc_metrics (page, start, off, count, eof, p - page));
32494 +}
32495 +
32496 +static int
32497 +proc_read_traperr_stats (char *page, char **start, off_t off, int count, int *eof, void *data)
32498 +{
32499 +       ELAN4_DEV *dev = (ELAN4_DEV *) data;
32500 +       char      *p   = page;
32501 +       unsigned int *ack_errors;
32502 +
32503 +       ack_errors = dev->dev_ack_errors;
32504 +
32505 +       if (!ack_errors) 
32506 +               p += sprintf (p, "No stats available\n");
32507 +       else
32508 +       {
32509 +               int i;
32510 +
32511 +               for (i=0; i<dev->dev_position.pos_nodes; i++) 
32512 +                       if (ack_errors[i] != 0) 
32513 +                               p += sprintf (p, "Node %d: %u errors\n", i, ack_errors[i]);
32514 +       }
32515 +
32516 +       return (proc_calc_metrics (page, start, off, count, eof, p - page));
32517 +}
32518 +
32519 +static int
32520 +proc_read_ackerror_routes (char *page, char **start, off_t off, int count, int *eof, void *data)
32521 +{
32522 +       ELAN4_DEV *dev = (ELAN4_DEV *) data;
32523 +       char      *p   = page;
32524 +       ELAN4_ROUTE_RINGBUF *ringbuf;
32525 +       char      routestr[33];
32526 +
32527 +       ringbuf = &dev->dev_ack_error_routes;
32528 +
32529 +       if (!ringbuf) 
32530 +               p += sprintf (p, "No stats available\n");
32531 +       else
32532 +       {
32533 +               int start;
32534 +               int end;
32535 +               int i;
32536 +               unsigned long flags;
32537 +
32538 +               memset(&routestr, 0, 33);
32539 +
32540 +               spin_lock_irqsave(&dev->dev_error_routes_lock, flags);
32541 +               
32542 +               start = ringbuf->start;
32543 +               end = ringbuf->end;
32544 +
32545 +               if (end < start)
32546 +                       end = DEV_STASH_ROUTE_COUNT;
32547 +
32548 +               for (i=start; i<end; i++) 
32549 +               {
32550 +                       elan4_route2str (&ringbuf->routes[i], routestr);
32551 +                       p += sprintf (p, "Route %llx %llx->%s\n", ringbuf->routes[i].Values[0], ringbuf->routes[i].Values[1], routestr);
32552 +               }
32553 +
32554 +               if (ringbuf->end < start)
32555 +               {
32556 +                       start = 0;
32557 +                       end = ringbuf->end;
32558 +                       for (i=start; i<end; i++)
32559 +                       {
32560 +                               elan4_route2str (&ringbuf->routes[i], routestr);
32561 +                               p += sprintf (p, "Route %llx %llx->%s\n", ringbuf->routes[i].Values[0], ringbuf->routes[i].Values[1], routestr);
32562 +                       }
32563 +               }
32564 +
32565 +               spin_unlock_irqrestore(&dev->dev_error_routes_lock, flags);
32566 +       }
32567 +
32568 +       return (proc_calc_metrics (page, start, off, count, eof, p - page));
32569 +}
32570 +
32571 +static struct stats_info 
32572 +{
32573 +    char *name;
32574 +    int (*read_func) (char *page, char **start, off_t off, int count, int *eof, void *data);
32575 +    int (*write_func) (struct file *file, const char *buf, unsigned long count, void *data); 
32576 +} stats_info[] = {
32577 +    {"link",     proc_read_link_stats, NULL},
32578 +    {"intr",     proc_read_intr_stats, NULL},
32579 +    {"trap",     proc_read_trap_stats, NULL},
32580 +    {"cproc",    proc_read_cproc_trap_stats, NULL},
32581 +    {"dproc",    proc_read_dproc_trap_stats, NULL},
32582 +    {"eproc",    proc_read_eproc_trap_stats, NULL},
32583 +    {"iproc",    proc_read_iproc_trap_stats, NULL},
32584 +    {"tproc",    proc_read_tproc_trap_stats, NULL},
32585 +    {"sdram",    proc_read_sdram_stats, NULL},
32586 +    {"trapdmaerr", proc_read_traperr_stats, NULL},
32587 +    {"dproctimeout", proc_read_dproc_timeout_stats, NULL},
32588 +    {"cproctimeout", proc_read_cproc_timeout_stats, NULL},
32589 +    {"dproctimeoutroutes", proc_read_dproc_timeout_routes, NULL},
32590 +    {"cproctimeoutroutes", proc_read_cproc_timeout_routes, NULL},
32591 +    {"ackerrroutes", proc_read_ackerror_routes, NULL},
32592 +};
32593 +
32594 +static int
32595 +proc_read_sysconfig (char *page, char **start, off_t off, int count, int *eof, void *data)
32596 +{
32597 +    ELAN4_DEV *dev        = (ELAN4_DEV *) data;
32598 +    E4_uint32  syscontrol = dev->dev_syscontrol;
32599 +    int               len       = 0;
32600 +
32601 +   *eof = 1;
32602 +   if (off != 0)
32603 +      return (0);
32604 +
32605 +    if (syscontrol & CONT_EN_ALL_SETS)
32606 +       len += sprintf (page + len, "%sEN_ALL_SETS", len == 0 ? "" : " ");
32607 +    if (syscontrol & CONT_MMU_ENABLE)
32608 +       len += sprintf (page + len, "%sMMU_ENABLE", len == 0 ? "" : " ");
32609 +    if (syscontrol & CONT_CACHE_HASH_TABLE)
32610 +       len += sprintf (page + len, "%sCACHE_HASH_TABLE", len == 0 ? "" : " ");
32611 +    if (syscontrol & CONT_CACHE_CHAINS)
32612 +       len += sprintf (page + len, "%sCACHE_CHAINS", len == 0 ? "" : " ");
32613 +    if (syscontrol & CONT_CACHE_ROOT_CNTX)
32614 +       len += sprintf (page + len, "%sCACHE_ROOT_CNTX", len == 0 ? "" : " ");
32615 +    if (syscontrol & CONT_CACHE_STEN_ROUTES)
32616 +       len += sprintf (page + len, "%sCACHE_STEN_ROUTES", len == 0 ? "" : " ");
32617 +    if (syscontrol & CONT_CACHE_DMA_ROUTES)
32618 +       len += sprintf (page + len, "%sCACHE_DMA_ROUTES", len == 0 ? "" : " ");
32619 +    if (syscontrol & CONT_INHIBIT_MAX_CHAIN_ITEMS)
32620 +       len += sprintf (page + len, "%sINHIBIT_MAX_CHAIN_ITEMS", len == 0 ? "" : " ");
32621 +
32622 +    len += sprintf (page + len, "%sTABLE0_MASK_SIZE=%d", len == 0 ? "" : " ", (syscontrol >> CONT_TABLE0_MASK_SIZE_SHIFT) & PAGE_MASK_MASK);
32623 +    len += sprintf (page + len, "%sTABLE0_PAGE_SIZE=%d", len == 0 ? "" : " ", (syscontrol >> CONT_TABLE0_PAGE_SIZE_SHIFT) & PAGE_SIZE_MASK);
32624 +    len += sprintf (page + len, "%sTABLE1_MASK_SIZE=%d", len == 0 ? "" : " ", (syscontrol >> CONT_TABLE1_MASK_SIZE_SHIFT) & PAGE_MASK_MASK);
32625 +    len += sprintf (page + len, "%sTABLE1_PAGE_SIZE=%d", len == 0 ? "" : " ", (syscontrol >> CONT_TABLE1_PAGE_SIZE_SHIFT) & PAGE_SIZE_MASK);
32626 +
32627 +    if (syscontrol & CONT_2K_NOT_1K_DMA_PACKETS)
32628 +       len += sprintf (page + len, "%s2K_NOT_1K_DMA_PACKETS", len == 0 ? "" : " ");
32629 +    if (syscontrol & CONT_ALIGN_ALL_DMA_PACKETS)
32630 +       len += sprintf (page + len, "%sALIGN_ALL_DMA_PACKETS", len == 0 ? "" : " ");
32631 +    if (syscontrol & CONT_DIRECT_MAP_PCI_WRITES)
32632 +       len += sprintf (page + len, "%sDIRECT_MAP_PCI_WRITES", len == 0 ? "" : " ");
32633 +
32634 +    len += sprintf (page + len, "\n");
32635 +
32636 +   *start = page;
32637 +   return (len);
32638 +}
32639 +
32640 +static int
32641 +proc_write_sysconfig (struct file *file, const char *ubuffer, unsigned long count, void *data)
32642 +{
32643 +    ELAN4_DEV *dev       = (ELAN4_DEV *) data;
32644 +    unsigned long page   = __get_free_page (GFP_KERNEL);
32645 +    char         *buffer = (char *)page;
32646 +    int            add   = 0;
32647 +    int            sub   = 0;
32648 +    
32649 +    count = MIN (count, PAGE_SIZE - 1);
32650 +    if (copy_from_user (buffer, ubuffer, count))
32651 +    {
32652 +       free_page (page);
32653 +       return (-EFAULT);
32654 +    }
32655 +   
32656 +    buffer[count] = 0;                         /* terminate string */
32657 +
32658 +    while (*buffer != 0)
32659 +    {
32660 +       char *ptr;
32661 +       char *end;
32662 +       int   ch;
32663 +       int   val;
32664 +       int   op;
32665 +
32666 +       ch = *buffer;
32667 +       if (ch == ' ' || ch == '\t' || ch == '\r' || ch == '\n')
32668 +       {
32669 +           buffer++;
32670 +           continue;
32671 +       }
32672 +      
32673 +       op = *buffer;
32674 +       if (op == '+' || op == '-')
32675 +           buffer++;
32676 +
32677 +       for (end = buffer; *end != 0; end++)
32678 +           if (*end == ' ' || *end == '\t' ||
32679 +               *end == '\r' || *end == '\n')
32680 +               break;
32681 +      
32682 +       if (end == buffer)
32683 +           break;
32684 +      
32685 +       ch = *end;
32686 +       *end = 0;
32687 +
32688 +       for (ptr = buffer; *ptr != 0; ptr++)
32689 +           if ('a' <= *ptr && *ptr <= 'z')
32690 +               *ptr = *ptr + 'A' - 'a';
32691 +      
32692 +       if (!strcmp (buffer, "EN_ALL_SETS"))
32693 +           val = CONT_EN_ALL_SETS;
32694 +       if (!strcmp (buffer, "CACHE_HASH_TABLE"))
32695 +           val = CONT_CACHE_HASH_TABLE;
32696 +       else if (!strcmp (buffer, "CACHE_CHAINS"))
32697 +           val = CONT_CACHE_CHAINS;
32698 +       else if (!strcmp (buffer, "CACHE_ROOT_CNTX"))
32699 +           val = CONT_CACHE_ROOT_CNTX;
32700 +       else if (!strcmp (buffer, "CACHE_STEN_ROUTES"))
32701 +           val = CONT_CACHE_STEN_ROUTES;
32702 +       else if (!strcmp (buffer, "CACHE_DMA_ROUTES"))
32703 +           val = CONT_CACHE_DMA_ROUTES;
32704 +       else if (!strcmp (buffer, "2K_NOT_1K_DMA_PACKETS"))
32705 +           val = CONT_2K_NOT_1K_DMA_PACKETS;
32706 +       else if (!strcmp (buffer, "ALIGN_ALL_DMA_PACKETS"))
32707 +           val = CONT_ALIGN_ALL_DMA_PACKETS;
32708 +       else
32709 +           val = 0;
32710 +
32711 +       if (op == '+')
32712 +           add |= val;
32713 +       else if (op == '-')
32714 +           sub |= val;
32715 +
32716 +       *end = ch;
32717 +       buffer = end;
32718 +    }
32719 +
32720 +    if ((add | sub) & CONT_EN_ALL_SETS)
32721 +       elan4_sdram_flushcache (dev, 0, E4_CacheSize);
32722 +
32723 +    CHANGE_SYSCONTROL (dev, add, sub);
32724 +   
32725 +    if ((add | sub) & CONT_EN_ALL_SETS)
32726 +       elan4_sdram_flushcache (dev, 0, E4_CacheSize);
32727 +
32728 +    free_page (page);
32729 +    return (count);
32730 +}
32731 +
32732 +static struct config_info 
32733 +{
32734 +    char *name;
32735 +    int (*read_func) (char *page, char **start, off_t off, int count, int *eof, void *data);
32736 +    int (*write_func) (struct file *file, const char *buf, unsigned long count, void *data); 
32737 +} config_info[] = {
32738 +    {"sysconfig",  proc_read_sysconfig, proc_write_sysconfig},
32739 +};
32740 +
32741 +void
32742 +elan4_procfs_device_init (ELAN4_DEV *dev)
32743 +{
32744 +    struct proc_dir_entry *p;
32745 +    char name[NAME_MAX];
32746 +    int i;
32747 +
32748 +    sprintf (name, "device%d", dev->dev_instance);
32749 +    dev->dev_osdep.procdir  = proc_mkdir (name, elan4_procfs_root);
32750 +
32751 +    for (i = 0; i < sizeof (device_info)/sizeof (device_info[0]); i++)
32752 +    {
32753 +       if (dev->dev_devinfo.dev_revision_id < device_info[i].minrev)
32754 +           continue;
32755 +
32756 +       if ((p = create_proc_entry (device_info[i].name, 0, dev->dev_osdep.procdir)) != NULL)
32757 +       {
32758 +           p->read_proc  = device_info[i].read_func;
32759 +           p->write_proc = device_info[i].write_func;
32760 +           p->data       = dev;
32761 +           p->owner      = THIS_MODULE;
32762 +       }
32763 +    }
32764 +
32765 +    dev->dev_osdep.configdir = proc_mkdir ("config", dev->dev_osdep.procdir);
32766 +    for (i = 0; i < sizeof (config_info)/sizeof (config_info[0]); i++)
32767 +    {
32768 +       if ((p = create_proc_entry (config_info[i].name, 0, dev->dev_osdep.configdir)) != NULL)
32769 +       {
32770 +           p->read_proc  = config_info[i].read_func;
32771 +           p->write_proc = config_info[i].write_func;
32772 +           p->data       = dev;
32773 +           p->owner      = THIS_MODULE;
32774 +       }
32775 +    }
32776 +
32777 +    dev->dev_osdep.statsdir = proc_mkdir ("stats", dev->dev_osdep.procdir);
32778 +    for (i = 0; i < sizeof (stats_info)/sizeof (stats_info[0]); i++)
32779 +    {
32780 +       if ((p = create_proc_entry (stats_info[i].name, 0, dev->dev_osdep.statsdir)) != NULL)
32781 +       {
32782 +           p->read_proc  = stats_info[i].read_func;
32783 +           p->write_proc = stats_info[i].write_func;
32784 +           p->data       = dev;
32785 +           p->owner      = THIS_MODULE;
32786 +       }
32787 +    }
32788 +}
32789 +
32790 +void
32791 +elan4_procfs_device_fini (ELAN4_DEV *dev)
32792 +{
32793 +    char name[NAME_MAX];
32794 +    int i;
32795 +
32796 +    for (i = 0; i < sizeof (stats_info)/sizeof (stats_info[0]); i++)
32797 +       remove_proc_entry (stats_info[i].name, dev->dev_osdep.statsdir);
32798 +    remove_proc_entry ("stats", dev->dev_osdep.procdir);
32799 +
32800 +    for (i = 0; i < sizeof (config_info)/sizeof (config_info[0]); i++)
32801 +       remove_proc_entry (config_info[i].name, dev->dev_osdep.configdir);
32802 +    remove_proc_entry ("config", dev->dev_osdep.procdir);
32803 +
32804 +    for (i = 0; i < sizeof (device_info)/sizeof (device_info[0]); i++)
32805 +    {
32806 +       if (dev->dev_devinfo.dev_revision_id < device_info[i].minrev)
32807 +           continue;
32808 +       
32809 +       remove_proc_entry (device_info[i].name, dev->dev_osdep.procdir);
32810 +    }
32811 +
32812 +    sprintf (name, "device%d", dev->dev_instance);
32813 +    remove_proc_entry (name, elan4_procfs_root);
32814 +}
32815 +
32816 +void
32817 +elan4_procfs_init(void)
32818 +{
32819 +    elan4_procfs_root = proc_mkdir("elan4", qsnet_procfs_root);
32820 +    elan4_config_root = proc_mkdir("config", elan4_procfs_root);
32821 +
32822 +    qsnet_proc_register_hex (elan4_config_root, "elan4_debug",              &elan4_debug,              0);
32823 +    qsnet_proc_register_hex (elan4_config_root, "elan4_debug_toconsole",    &elan4_debug_toconsole,    0);
32824 +    qsnet_proc_register_hex (elan4_config_root, "elan4_debug_tobuffer",     &elan4_debug_tobuffer,     0);
32825 +    qsnet_proc_register_int (elan4_config_root, "elan4_debug_mmu",          &elan4_debug_mmu,          0);
32826 +    qsnet_proc_register_int (elan4_config_root, "elan4_mainint_punt_loops", &elan4_mainint_punt_loops, 0);
32827 +    qsnet_proc_register_hex (elan4_config_root, "user_p2p_route_options",   &user_p2p_route_options,   0);
32828 +    qsnet_proc_register_hex (elan4_config_root, "user_bcast_route_options", &user_bcast_route_options, 0);
32829 +    qsnet_proc_register_int (elan4_config_root, "user_dproc_retry_count",   &user_dproc_retry_count,    0);
32830 +    qsnet_proc_register_int (elan4_config_root, "user_cproc_retry_count",   &user_cproc_retry_count,    0);
32831 +    qsnet_proc_register_int (elan4_config_root, "num_fault_save",   &num_fault_save,    0);
32832 +    qsnet_proc_register_int (elan4_config_root, "min_fault_pages",   &min_fault_pages,    0);
32833 +    qsnet_proc_register_int (elan4_config_root, "max_fault_pages",   &max_fault_pages,    0);
32834 +}
32835 +
32836 +void
32837 +elan4_procfs_fini(void)
32838 +{
32839 +    remove_proc_entry ("max_fault_pages",          elan4_config_root);
32840 +    remove_proc_entry ("min_fault_pages",          elan4_config_root);
32841 +    remove_proc_entry ("num_fault_save",           elan4_config_root);
32842 +    remove_proc_entry ("user_cproc_retry_count",   elan4_config_root);
32843 +    remove_proc_entry ("user_dproc_retry_count",   elan4_config_root);
32844 +    remove_proc_entry ("user_bcast_route_options", elan4_config_root);
32845 +    remove_proc_entry ("user_p2p_route_options",   elan4_config_root);
32846 +    remove_proc_entry ("elan4_mainint_punt_loops", elan4_config_root);
32847 +    remove_proc_entry ("elan4_debug_mmu",          elan4_config_root);
32848 +    remove_proc_entry ("elan4_debug_tobuffer",     elan4_config_root);
32849 +    remove_proc_entry ("elan4_debug_toconsole",    elan4_config_root);
32850 +    remove_proc_entry ("elan4_debug",              elan4_config_root);
32851 +
32852 +    remove_proc_entry ("config", elan4_procfs_root);
32853 +    remove_proc_entry ("elan4", qsnet_procfs_root);
32854 +}
32855 +
32856 +EXPORT_SYMBOL(elan4_procfs_root);
32857 +EXPORT_SYMBOL(elan4_config_root);
32858 +
32859 +/*
32860 + * Local variables:
32861 + * c-file-style: "stroustrup"
32862 + * End:
32863 + */
32864 Index: linux-2.4.21/drivers/net/qsnet/elan4/quadrics_version.h
32865 ===================================================================
32866 --- linux-2.4.21.orig/drivers/net/qsnet/elan4/quadrics_version.h        2004-02-23 16:02:56.000000000 -0500
32867 +++ linux-2.4.21/drivers/net/qsnet/elan4/quadrics_version.h     2005-06-01 23:12:54.614436824 -0400
32868 @@ -0,0 +1 @@
32869 +#define QUADRICS_VERSION "4.30qsnet"
32870 Index: linux-2.4.21/drivers/net/qsnet/elan4/regions.c
32871 ===================================================================
32872 --- linux-2.4.21.orig/drivers/net/qsnet/elan4/regions.c 2004-02-23 16:02:56.000000000 -0500
32873 +++ linux-2.4.21/drivers/net/qsnet/elan4/regions.c      2005-06-01 23:12:54.615436672 -0400
32874 @@ -0,0 +1,609 @@
32875 +/*
32876 + *    Copyright (c) 2001-2002 by Quadrics Supercomputers World Ltd.
32877 + *    Copyright (c) 2002-2003 by Quadrics Ltd.
32878 + * 
32879 + *    For licensing information please see the supplied COPYING file
32880 + *
32881 + */
32882 +
32883 +#ident "@(#)$Id: regions.c,v 1.18.2.1 2004/11/18 11:31:08 david Exp $"
32884 +/*      $Source: /cvs/master/quadrics/elan4mod/regions.c,v $*/
32885 +
32886 +#include <qsnet/kernel.h>
32887 +
32888 +#include <elan4/debug.h>
32889 +#include <elan4/device.h>
32890 +#include <elan4/user.h>
32891 +
32892 +/*================================================================================*/
32893 +/* elan address region management */
32894 +USER_RGN *
32895 +user_findrgn_elan (USER_CTXT *uctx, E4_Addr addr, int tail)
32896 +{
32897 +    USER_RGN *rgn;
32898 +    USER_RGN *hirgn;
32899 +    USER_RGN *lorgn;
32900 +    E4_Addr   base;
32901 +    E4_Addr   lastaddr;
32902 +    int              forward;
32903 +    
32904 +    ASSERT (SPINLOCK_HELD (&uctx->uctx_rgnlock) || kmutex_is_locked (&uctx->uctx_rgnmutex));
32905 +
32906 +    if (uctx->uctx_ergns == NULL)
32907 +       return (NULL);
32908 +
32909 +    rgn = uctx->uctx_ergnlast;
32910 +    if (rgn == NULL)
32911 +       rgn = uctx->uctx_ergns;
32912 +
32913 +    forward = 0;
32914 +    if ((base = rgn->rgn_ebase) < addr)
32915 +    {
32916 +       if (addr <= (base + rgn->rgn_len - 1))
32917 +           return (rgn);                                       /* ergnlast contained addr */
32918 +       
32919 +       hirgn = uctx->uctx_etail;
32920 +       
32921 +       if ((lastaddr = (hirgn->rgn_ebase + hirgn->rgn_len - 1)) < addr)
32922 +           return (tail ? hirgn : NULL);                       /* addr is out of range */
32923 +       
32924 +       if ((addr - base) > (lastaddr - addr))
32925 +           rgn = hirgn;
32926 +       else
32927 +       {
32928 +           rgn = rgn->rgn_enext;
32929 +           forward++;
32930 +       }
32931 +    }
32932 +    else
32933 +    {
32934 +       lorgn = uctx->uctx_ergns;
32935 +
32936 +       if (lorgn->rgn_ebase > addr)
32937 +           return (lorgn);                                     /* lowest regions is higher than addr */
32938 +       if ((addr - lorgn->rgn_ebase) < (base - addr))
32939 +       {
32940 +           rgn = lorgn;                                        /* search forward from head */
32941 +           forward++;
32942 +       }
32943 +    }
32944 +    if (forward)
32945 +    {
32946 +       while ((rgn->rgn_ebase + rgn->rgn_len - 1) < addr)
32947 +           rgn = rgn->rgn_enext;
32948 +
32949 +       if (rgn->rgn_ebase <= addr)
32950 +           uctx->uctx_ergnlast = rgn;
32951 +       return (rgn);
32952 +    }
32953 +    else
32954 +    {
32955 +       while (rgn->rgn_ebase > addr)
32956 +           rgn = rgn->rgn_eprev;
32957 +
32958 +       if ((rgn->rgn_ebase + rgn->rgn_len - 1) < addr)
32959 +           return (rgn->rgn_enext);
32960 +       else
32961 +       {
32962 +           uctx->uctx_ergnlast = rgn;
32963 +           return (rgn);
32964 +       }
32965 +    }
32966 +}
32967 +
32968 +static int
32969 +user_addrgn_elan (USER_CTXT *uctx, USER_RGN  *nrgn)
32970 +{
32971 +    USER_RGN *rgn   = user_findrgn_elan (uctx, nrgn->rgn_ebase, 1);
32972 +    E4_Addr   nbase = nrgn->rgn_ebase;
32973 +    E4_Addr   ntop  = nbase + nrgn->rgn_len - 1;
32974 +    E4_Addr   base;
32975 +
32976 +    ASSERT (SPINLOCK_HELD (&uctx->uctx_rgnlock) && kmutex_is_locked (&uctx->uctx_rgnmutex));
32977 +
32978 +    if (rgn == NULL)
32979 +    {
32980 +       uctx->uctx_ergns = uctx->uctx_etail = nrgn;
32981 +       nrgn->rgn_enext = nrgn->rgn_eprev = NULL;
32982 +    }
32983 +    else
32984 +    {
32985 +       base = rgn->rgn_ebase;
32986 +       
32987 +       if ((base + rgn->rgn_len - 1) < nbase)                  /* top of region below requested address */
32988 +       {                                                       /* so insert after region (and hence at end */
32989 +           nrgn->rgn_eprev = rgn;                              /* of list */
32990 +           nrgn->rgn_enext = NULL;
32991 +           rgn->rgn_enext = uctx->uctx_etail = nrgn;
32992 +       }
32993 +       else
32994 +       {
32995 +           if (nbase >= base || ntop >= base)                  /* overlapping region */
32996 +               return (-1);
32997 +           
32998 +           nrgn->rgn_enext = rgn;                              /* insert before region */
32999 +           nrgn->rgn_eprev = rgn->rgn_eprev;
33000 +           rgn->rgn_eprev  = nrgn;
33001 +           if (uctx->uctx_ergns == rgn)
33002 +               uctx->uctx_ergns = nrgn;
33003 +           else
33004 +               nrgn->rgn_eprev->rgn_enext = nrgn;
33005 +       }
33006 +    }
33007 +    uctx->uctx_ergnlast = nrgn;
33008 +    
33009 +    return (0);
33010 +}
33011 +
33012 +static USER_RGN *
33013 +user_removergn_elan (USER_CTXT *uctx, USER_RGN  *rgn)
33014 +{
33015 +    ASSERT (SPINLOCK_HELD (&uctx->uctx_rgnlock) && kmutex_is_locked (&uctx->uctx_rgnmutex));
33016 +
33017 +    uctx->uctx_ergnlast = rgn->rgn_enext;
33018 +    if (rgn == uctx->uctx_etail)
33019 +       uctx->uctx_etail = rgn->rgn_eprev;
33020 +    else
33021 +       rgn->rgn_enext->rgn_eprev = rgn->rgn_eprev;
33022 +    
33023 +    if (rgn == uctx->uctx_ergns)
33024 +       uctx->uctx_ergns = rgn->rgn_enext;
33025 +    else
33026 +       rgn->rgn_eprev->rgn_enext = rgn->rgn_enext;
33027 +
33028 +    return (rgn);
33029 +}
33030 +
33031 +USER_RGN *
33032 +user_rgnat_elan (USER_CTXT *uctx, E4_Addr addr)
33033 +{
33034 +    USER_RGN *rgn = user_findrgn_elan (uctx, addr, 0);
33035 +
33036 +    if (rgn != NULL && rgn->rgn_ebase <= addr && addr <= (rgn->rgn_ebase + rgn->rgn_len - 1))
33037 +       return (rgn);
33038 +
33039 +    return (NULL);
33040 +}
33041 +
33042 +/* main address region management */
33043 +USER_RGN *
33044 +user_findrgn_main (USER_CTXT *uctx, virtaddr_t addr, int tail)
33045 +{
33046 +    USER_RGN  *rgn;
33047 +    USER_RGN  *hirgn;
33048 +    USER_RGN  *lorgn;
33049 +    virtaddr_t lastaddr;
33050 +    virtaddr_t base;
33051 +    int               forward;
33052 +    
33053 +    ASSERT (SPINLOCK_HELD (&uctx->uctx_rgnlock) || kmutex_is_locked (&uctx->uctx_rgnmutex));
33054 +
33055 +    if (uctx->uctx_mrgns == NULL)
33056 +       return (NULL);
33057 +    
33058 +    rgn = uctx->uctx_mrgnlast;
33059 +    if (rgn == NULL)
33060 +       rgn = uctx->uctx_mrgns;
33061 +    
33062 +    forward = 0;
33063 +    if ((base = rgn->rgn_mbase) < addr)
33064 +    {
33065 +       if (addr <= (base + rgn->rgn_len - 1))
33066 +           return (rgn);                                       /* ergnlast contained addr */
33067 +       
33068 +       hirgn = uctx->uctx_mtail;
33069 +       if ((lastaddr = hirgn->rgn_mbase + hirgn->rgn_len - 1) < addr)
33070 +           return (tail ? hirgn : NULL);                       /* addr is out of range */
33071 +       
33072 +       if ((addr - base) > (lastaddr - addr))
33073 +           rgn = hirgn;
33074 +       else
33075 +       {
33076 +           rgn = rgn->rgn_mnext;
33077 +           forward++;
33078 +       }
33079 +    }
33080 +    else
33081 +    {
33082 +       lorgn = uctx->uctx_mrgns;
33083 +       if (lorgn->rgn_mbase > addr)
33084 +           return (lorgn);                                     /* lowest regions is higher than addr */
33085 +       if ((addr - lorgn->rgn_mbase) < (base - addr))
33086 +       {
33087 +           rgn = lorgn;                                        /* search forward from head */
33088 +           forward++;
33089 +       }
33090 +    }
33091 +    if (forward)
33092 +    {
33093 +       while ((rgn->rgn_mbase + rgn->rgn_len - 1) < addr)
33094 +           rgn = rgn->rgn_mnext;
33095 +
33096 +       if (rgn->rgn_mbase <= addr)
33097 +           uctx->uctx_mrgnlast = rgn;
33098 +       return (rgn);
33099 +    }
33100 +    else
33101 +    {
33102 +       while (rgn->rgn_mbase > addr)
33103 +           rgn = rgn->rgn_mprev;
33104 +
33105 +       if ((rgn->rgn_mbase + rgn->rgn_len - 1) < addr)
33106 +           return (rgn->rgn_mnext);
33107 +       else
33108 +       {
33109 +           uctx->uctx_mrgnlast = rgn;
33110 +           return (rgn);
33111 +       }
33112 +    }
33113 +}
33114 +
33115 +static int
33116 +user_addrgn_main (USER_CTXT *uctx, USER_RGN *nrgn)
33117 +{
33118 +    USER_RGN  *rgn   = user_findrgn_main (uctx, nrgn->rgn_mbase, 1);
33119 +    virtaddr_t nbase = nrgn->rgn_mbase;
33120 +    virtaddr_t ntop  = nbase + nrgn->rgn_len - 1;
33121 +    virtaddr_t base;
33122 +
33123 +    ASSERT (SPINLOCK_HELD (&uctx->uctx_rgnlock) && kmutex_is_locked (&uctx->uctx_rgnmutex));
33124 +
33125 +    if (rgn == NULL)
33126 +    {
33127 +       uctx->uctx_mrgns = uctx->uctx_mtail = nrgn;
33128 +       nrgn->rgn_mnext = nrgn->rgn_mprev = NULL;
33129 +    }
33130 +    else
33131 +    {
33132 +       base = rgn->rgn_mbase;
33133 +
33134 +       if ((base + rgn->rgn_len - 1) < nbase)                  /* top of region below requested address */
33135 +       {                                                       /* so insert after region (and hence at end */
33136 +           nrgn->rgn_mprev = rgn;                              /* of list */
33137 +           nrgn->rgn_mnext = NULL;
33138 +           rgn->rgn_mnext = uctx->uctx_mtail = nrgn;
33139 +       }
33140 +       else
33141 +       {
33142 +           if (nbase >= base || ntop >= base)                  /* overlapping region */
33143 +               return (-1);
33144 +
33145 +           nrgn->rgn_mnext = rgn;                              /* insert before region */
33146 +           nrgn->rgn_mprev = rgn->rgn_mprev;
33147 +           rgn->rgn_mprev  = nrgn;
33148 +           if (uctx->uctx_mrgns == rgn)
33149 +               uctx->uctx_mrgns = nrgn;
33150 +           else
33151 +               nrgn->rgn_mprev->rgn_mnext = nrgn;
33152 +       }
33153 +    }
33154 +    uctx->uctx_mrgnlast = nrgn;
33155 +    
33156 +    return (0);
33157 +}
33158 +
33159 +static USER_RGN *
33160 +user_removergn_main (USER_CTXT *uctx, USER_RGN *rgn)
33161 +{
33162 +    ASSERT (SPINLOCK_HELD (&uctx->uctx_rgnlock) && kmutex_is_locked (&uctx->uctx_rgnmutex));
33163 +
33164 +    uctx->uctx_mrgnlast = rgn->rgn_mnext;
33165 +    if (rgn == uctx->uctx_mtail)
33166 +       uctx->uctx_mtail = rgn->rgn_mprev;
33167 +    else
33168 +       rgn->rgn_mnext->rgn_mprev = rgn->rgn_mprev;
33169 +    
33170 +    if (rgn == uctx->uctx_mrgns)
33171 +       uctx->uctx_mrgns = rgn->rgn_mnext;
33172 +    else
33173 +       rgn->rgn_mprev->rgn_mnext = rgn->rgn_mnext;
33174 +
33175 +    return (rgn);
33176 +}
33177 +
33178 +/* Remove whole region from both lists */
33179 +static void
33180 +user_removergn (USER_CTXT *uctx, USER_RGN *rgn)
33181 +{
33182 +    spin_lock (&uctx->uctx_rgnlock);
33183 +
33184 +    elan4mmu_unload_range (&uctx->uctx_ctxt, 0 /* XXXX tbl */, rgn->rgn_ebase, rgn->rgn_len);
33185 +           
33186 +    user_removergn_elan (uctx, rgn);
33187 +    user_removergn_main (uctx, rgn);
33188 +    
33189 +    spin_unlock (&uctx->uctx_rgnlock);
33190 +    
33191 +    KMEM_FREE (rgn, sizeof (USER_RGN));
33192 +}
33193 +
33194 +/* Remove all allocated regions */
33195 +void
33196 +user_freergns (USER_CTXT *uctx)
33197 +{
33198 +    kmutex_lock (&uctx->uctx_rgnmutex);
33199 +
33200 +    while (uctx->uctx_mrgns)
33201 +       user_removergn(uctx, uctx->uctx_mrgns);
33202 +
33203 +    kmutex_unlock (&uctx->uctx_rgnmutex);
33204 +
33205 +    ASSERT (uctx->uctx_ergns == NULL);
33206 +}
33207 +
33208 +USER_RGN *
33209 +user_rgnat_main (USER_CTXT *uctx, virtaddr_t addr)
33210 +{
33211 +    USER_RGN *rgn = user_findrgn_main (uctx, addr, 0);
33212 +    
33213 +    if (rgn != NULL && rgn->rgn_mbase <= addr && addr <= (rgn->rgn_mbase + rgn->rgn_len - 1))
33214 +       return (rgn);
33215 +    return (NULL);
33216 +}
33217 +
33218 +int
33219 +user_setperm (USER_CTXT *uctx, virtaddr_t maddr, E4_Addr eaddr, unsigned long len, unsigned perm)
33220 +{
33221 +    USER_RGN *nrgn;
33222 +
33223 +    PRINTF4 (uctx, DBG_PERM, "user_setperm: user %lx elan %llx len %lx perm %x\n", maddr, (long long) eaddr, len, perm);
33224 +
33225 +    if ((maddr & PAGEOFFSET) || (eaddr & PAGEOFFSET) || (len & PAGEOFFSET))
33226 +    {
33227 +        PRINTF0 (uctx, DBG_PERM, "user_setperm:  alignment failure\n");
33228 +       return (-EINVAL);
33229 +    }
33230 +    
33231 +    if ((maddr + len - 1) <= maddr || (eaddr + len - 1) <= eaddr) 
33232 +    {
33233 +       PRINTF0 (uctx, DBG_PERM, "user_setperm:  range failure\n");
33234 +       return (-EINVAL);
33235 +    }
33236 +
33237 +    KMEM_ALLOC (nrgn, USER_RGN *, sizeof (USER_RGN), 1);
33238 +    
33239 +    if (nrgn == NULL)
33240 +       return (-ENOMEM);
33241 +
33242 +    nrgn->rgn_mbase = maddr;
33243 +    nrgn->rgn_ebase = eaddr;
33244 +    nrgn->rgn_len   = len;
33245 +    nrgn->rgn_perm  = perm;
33246 +
33247 +    kmutex_lock (&uctx->uctx_rgnmutex);
33248 +    spin_lock (&uctx->uctx_rgnlock);
33249 +
33250 +    if (user_addrgn_elan (uctx, nrgn) < 0)
33251 +    {
33252 +       PRINTF0 (uctx, DBG_PERM, "user_setperm:  elan address exists\n");
33253 +       spin_unlock (&uctx->uctx_rgnlock);
33254 +       kmutex_unlock (&uctx->uctx_rgnmutex);
33255 +
33256 +       KMEM_FREE (nrgn, sizeof (USER_RGN));
33257 +       return (-EINVAL);
33258 +    }
33259 +    
33260 +    if (user_addrgn_main (uctx, nrgn) < 0)
33261 +    {
33262 +       PRINTF0 (uctx, DBG_PERM, "user_setperm:  main address exists\n");
33263 +       user_removergn_elan (uctx, nrgn);
33264 +       
33265 +       spin_unlock (&uctx->uctx_rgnlock);
33266 +       kmutex_unlock (&uctx->uctx_rgnmutex);
33267 +
33268 +       KMEM_FREE (nrgn, sizeof (USER_RGN));
33269 +       return (-EINVAL);
33270 +    }
33271 +    spin_unlock (&uctx->uctx_rgnlock);
33272 +
33273 +    if ((perm & PERM_Preload))
33274 +       user_preload_main (uctx, maddr, len);
33275 +
33276 +    kmutex_unlock (&uctx->uctx_rgnmutex);
33277 +
33278 +    return (0);
33279 +}
33280 +
33281 +void
33282 +user_clrperm (USER_CTXT *uctx, E4_Addr addr, unsigned long len)
33283 +{
33284 +    E4_Addr       raddr;
33285 +    E4_Addr       rtop;
33286 +    USER_RGN     *nrgn;
33287 +    USER_RGN     *rgn;
33288 +    USER_RGN     *rgn_next;
33289 +    unsigned long ssize;
33290 +    int                  res;
33291 +
33292 +    PRINTF2 (uctx, DBG_PERM, "user_clrperm: elan %llx len %lx\n", addr, len);
33293 +
33294 +    raddr = (addr & PAGEMASK);
33295 +    rtop = ((addr + len - 1) & PAGEMASK) + (PAGESIZE-1);
33296 +
33297 +    kmutex_lock (&uctx->uctx_rgnmutex);
33298 +    
33299 +    for (rgn = user_findrgn_elan (uctx, addr, 0); rgn != NULL; rgn = rgn_next)
33300 +    {
33301 +       if (rtop < rgn->rgn_ebase)                              /* rtop was in a gap */
33302 +           break;
33303 +       
33304 +       rgn_next = rgn->rgn_enext;                              /* Save next region pointer */
33305 +       
33306 +       PRINTF (uctx, DBG_PERM, "              elan %llx->%llx main %p->%p\n", 
33307 +               rgn->rgn_ebase, rgn->rgn_ebase + rgn->rgn_len-1,
33308 +               rgn->rgn_mbase, rgn->rgn_mbase + rgn->rgn_len-1);
33309 +
33310 +       if (raddr <= rgn->rgn_ebase && rtop >= (rgn->rgn_ebase + rgn->rgn_len - 1))
33311 +       {
33312 +           /* whole region is cleared */
33313 +
33314 +           PRINTF (uctx, DBG_PERM, "              whole region\n");
33315 +           PRINTF (uctx, DBG_PERM, "              unload elan %llx->%llx\n", rgn->rgn_ebase, rgn->rgn_ebase + rgn->rgn_len-1);
33316 +           user_removergn (uctx, rgn);
33317 +       }
33318 +       else if (raddr <= rgn->rgn_ebase)
33319 +       {
33320 +           /* clearing at beginning, so shrink size and increment base ptrs */
33321 +           ssize = rtop - rgn->rgn_ebase + 1;
33322 +           
33323 +           PRINTF (uctx, DBG_PERM, "              clear at beginning %x\n", ssize);
33324 +
33325 +           spin_lock (&uctx->uctx_rgnlock);
33326 +
33327 +           PRINTF (uctx, DBG_PERM, "              unload elan %llx->%llx\n", rgn->rgn_ebase, rgn->rgn_ebase + ssize-1);
33328 +           elan4mmu_unload_range (&uctx->uctx_ctxt, 0 /* XXXX tbl */, rgn->rgn_ebase, ssize);
33329 +
33330 +           rgn->rgn_mbase += ssize;
33331 +           rgn->rgn_ebase += ssize;
33332 +           rgn->rgn_len   -= ssize;
33333 +           
33334 +           spin_unlock(&uctx->uctx_rgnlock);
33335 +       }
33336 +       else if (rtop >= (rgn->rgn_ebase + rgn->rgn_len - 1))
33337 +       {
33338 +           /* clearing at end, so just shrink length of region */
33339 +           ssize = (rgn->rgn_ebase + rgn->rgn_len - 1) - raddr + 1;
33340 +
33341 +           PRINTF (uctx, DBG_PERM, "              clear at end %x\n", ssize);
33342 +
33343 +           spin_lock (&uctx->uctx_rgnlock);
33344 +
33345 +           PRINTF (uctx, DBG_PERM, "              unload elan %llx->%llx\n", raddr, raddr+ssize-1);
33346 +           elan4mmu_unload_range (&uctx->uctx_ctxt, 0 /* XXXX tbl */, raddr, ssize);
33347 +
33348 +           rgn->rgn_len -= ssize;
33349 +
33350 +           spin_unlock(&uctx->uctx_rgnlock);
33351 +       }
33352 +       else
33353 +       {
33354 +           /* the section to go is in the middle,  so need to  */
33355 +           /* split it into two regions */
33356 +           KMEM_ALLOC (nrgn, USER_RGN *, sizeof (USER_RGN), 1);
33357 +
33358 +           spin_lock (&uctx->uctx_rgnlock);
33359 +
33360 +           PRINTF (uctx, DBG_PERM, "              unload elan %llx->%llx\n", raddr, rtop);
33361 +           elan4mmu_unload_range (&uctx->uctx_ctxt, 0 /* XXXX tbl */, raddr, rtop - raddr + 1);
33362 +
33363 +           nrgn->rgn_mbase = rgn->rgn_mbase + (rtop - rgn->rgn_ebase + 1);
33364 +           nrgn->rgn_ebase = rtop + 1;
33365 +           nrgn->rgn_len   = (rgn->rgn_ebase + rgn->rgn_len - 1) - rtop;
33366 +           nrgn->rgn_perm  = rgn->rgn_perm;
33367 +
33368 +           PRINTF (uctx, DBG_PERM, "              new elan %llx->%llx main %p->%p\n", 
33369 +                   nrgn->rgn_ebase, nrgn->rgn_ebase + nrgn->rgn_len-1,
33370 +                   nrgn->rgn_mbase, nrgn->rgn_mbase + nrgn->rgn_len-1);
33371 +
33372 +           rgn->rgn_len = (raddr - rgn->rgn_ebase);            /* shrink original region */
33373 +
33374 +           PRINTF (uctx, DBG_PERM, "              old elan %llx->%llx main %p->%p\n", 
33375 +                   rgn->rgn_ebase, rgn->rgn_ebase + rgn->rgn_len-1,
33376 +                   rgn->rgn_mbase, rgn->rgn_mbase + rgn->rgn_len-1);
33377 +
33378 +           res = user_addrgn_elan (uctx, nrgn);                /* insert new region */
33379 +           ASSERT (res == 0);                                  /* which cannot fail */
33380 +
33381 +           res = user_addrgn_main (uctx, nrgn);        
33382 +           ASSERT (res == 0);
33383 +
33384 +           spin_unlock(&uctx->uctx_rgnlock);
33385 +       }
33386 +    }
33387 +    kmutex_unlock (&uctx->uctx_rgnmutex);
33388 +}
33389 +
33390 +int
33391 +user_checkperm (USER_CTXT *uctx, E4_Addr raddr, unsigned long rsize, unsigned access)
33392 +{
33393 +    USER_RGN *rgn;
33394 +
33395 +    PRINTF3 (uctx, DBG_PERM, "user_checkperm: elan %lx len %lx access %x\n", raddr, rsize, access);
33396 +    
33397 +    if ((raddr + rsize - 1) < raddr)
33398 +       return (-ENOMEM);
33399 +    
33400 +    kmutex_lock (&uctx->uctx_rgnmutex);
33401 +    if ((rgn = user_rgnat_elan (uctx, raddr)) == (USER_RGN *) NULL)
33402 +    {
33403 +       kmutex_unlock (&uctx->uctx_rgnmutex);
33404 +       return (-ENOMEM);
33405 +    }
33406 +    else
33407 +    {
33408 +       register int ssize;
33409 +       
33410 +       for (; rsize != 0; rsize -= ssize, raddr += ssize)
33411 +       {
33412 +           if (raddr > (rgn->rgn_ebase + rgn->rgn_len - 1))
33413 +           {
33414 +               rgn  = rgn->rgn_enext;
33415 +               
33416 +               if (rgn == NULL || raddr != rgn->rgn_ebase)
33417 +               {
33418 +                   kmutex_unlock (&uctx->uctx_rgnmutex);
33419 +                   return (-ENOMEM);
33420 +               }
33421 +           }
33422 +           if ((raddr + rsize - 1) > (rgn->rgn_ebase + rgn->rgn_len - 1))
33423 +               ssize = ((rgn->rgn_ebase + rgn->rgn_len - 1) - raddr) + 1;
33424 +           else
33425 +               ssize = rsize;
33426 +           
33427 +           PRINTF4 (uctx, DBG_PERM, "user_checkperm : rgn %lx -> %lx perm %x access %x\n",
33428 +                    rgn->rgn_ebase, rgn->rgn_ebase + (E4_Addr)rgn->rgn_len, rgn->rgn_perm, access);
33429 +
33430 +           if (ELAN4_INCOMPAT_ACCESS (rgn->rgn_perm, access))
33431 +           {
33432 +               kmutex_unlock (&uctx->uctx_rgnmutex);
33433 +               return (-EACCES);
33434 +           }
33435 +       }
33436 +    }
33437 +    
33438 +    kmutex_unlock (&uctx->uctx_rgnmutex);
33439 +    
33440 +    return (0);
33441 +}
33442 +
33443 +virtaddr_t
33444 +user_elan2main (USER_CTXT *uctx, E4_Addr addr)
33445 +{
33446 +    USER_RGN  *rgn;
33447 +    virtaddr_t raddr;
33448 +    
33449 +    spin_lock (&uctx->uctx_rgnlock);
33450 +    
33451 +    if ((rgn = user_rgnat_elan (uctx, addr)) == (USER_RGN *) NULL)
33452 +       raddr = (virtaddr_t) 0;
33453 +    else
33454 +       raddr = rgn->rgn_mbase + (addr - rgn->rgn_ebase);
33455 +
33456 +    spin_unlock (&uctx->uctx_rgnlock);
33457 +
33458 +    return (raddr);
33459 +}
33460 +
33461 +E4_Addr
33462 +user_main2elan (USER_CTXT *uctx, virtaddr_t addr)
33463 +{
33464 +    USER_RGN *rgn;
33465 +    E4_Addr   raddr;
33466 +
33467 +    spin_lock (&uctx->uctx_rgnlock);
33468 +    
33469 +    if ((rgn = user_rgnat_main (uctx, addr)) == (USER_RGN *) NULL)
33470 +       raddr = (virtaddr_t) 0;
33471 +    else
33472 +       raddr = rgn->rgn_ebase + (addr - rgn->rgn_mbase);
33473 +    
33474 +    spin_unlock (&uctx->uctx_rgnlock);
33475 +
33476 +    return (raddr);
33477 +}
33478 +
33479 +/*
33480 + * Local variables:
33481 + * c-file-style: "stroustrup"
33482 + * End:
33483 + */
33484 Index: linux-2.4.21/drivers/net/qsnet/elan4/routetable.c
33485 ===================================================================
33486 --- linux-2.4.21.orig/drivers/net/qsnet/elan4/routetable.c      2004-02-23 16:02:56.000000000 -0500
33487 +++ linux-2.4.21/drivers/net/qsnet/elan4/routetable.c   2005-06-01 23:12:54.615436672 -0400
33488 @@ -0,0 +1,249 @@
33489 +/*
33490 + *    Copyright (c) 2001-2002 by Quadrics Supercomputers World Ltd.
33491 + *    Copyright (c) 2002-2003 by Quadrics Ltd.
33492 + * 
33493 + *    For licensing information please see the supplied COPYING file
33494 + *
33495 + */
33496 +
33497 +#ident "@(#)$Id: routetable.c,v 1.15 2004/07/20 09:29:40 david Exp $"
33498 +/*      $Source: /cvs/master/quadrics/elan4mod/routetable.c,v $*/
33499 +
33500 +#include <qsnet/kernel.h>
33501 +
33502 +#include <elan4/sdram.h>
33503 +#include <elan4/debug.h>
33504 +#include <elan4/device.h>
33505 +
33506 +ELAN4_ROUTE_TABLE *
33507 +elan4_alloc_routetable (ELAN4_DEV *dev, unsigned size)
33508 +{
33509 +    ELAN4_ROUTE_TABLE *tbl;
33510 +
33511 +    KMEM_ZALLOC (tbl, ELAN4_ROUTE_TABLE *, sizeof (ELAN4_ROUTE_TABLE), 1);
33512 +
33513 +    if (tbl == (ELAN4_ROUTE_TABLE *) NULL)
33514 +       return (NULL);
33515 +    
33516 +    tbl->tbl_size    = (size & E4_VPT_SIZE_MASK);
33517 +    tbl->tbl_entries = elan4_sdram_alloc (dev, (E4_VPT_MIN_ENTRIES << tbl->tbl_size) * sizeof (E4_VirtualProcessEntry));
33518 +
33519 +    if (tbl->tbl_entries == 0)
33520 +    {
33521 +       KMEM_FREE (tbl, sizeof (ELAN4_ROUTE_TABLE));
33522 +       return ((ELAN4_ROUTE_TABLE *) NULL);
33523 +    }
33524 +
33525 +    spin_lock_init (&tbl->tbl_lock);
33526 +
33527 +    /* zero the route table */
33528 +    elan4_sdram_zeroq_sdram (dev, tbl->tbl_entries, (E4_VPT_MIN_ENTRIES << tbl->tbl_size) * sizeof (E4_VirtualProcessEntry));
33529 +
33530 +    return (tbl);
33531 +}
33532 +
33533 +void
33534 +elan4_free_routetable (ELAN4_DEV *dev, ELAN4_ROUTE_TABLE *tbl)
33535 +{
33536 +    elan4_sdram_free (dev, tbl->tbl_entries, (E4_VPT_MIN_ENTRIES << tbl->tbl_size) * sizeof (E4_VirtualProcessEntry));
33537 +    
33538 +    spin_lock_destroy (&tbl->tbl_lock);
33539 +
33540 +    KMEM_FREE (tbl, sizeof (ELAN4_ROUTE_TABLE));
33541 +}
33542 +
33543 +void
33544 +elan4_write_route (ELAN4_DEV *dev, ELAN4_ROUTE_TABLE *tbl, unsigned vp, E4_VirtualProcessEntry *entry)
33545 +{
33546 +    ASSERT (vp < (E4_VPT_MIN_ENTRIES << tbl->tbl_size));
33547 +    
33548 +    elan4_sdram_writeq (dev, tbl->tbl_entries + (vp * sizeof (E4_VirtualProcessEntry)) + offsetof (E4_VirtualProcessEntry, Values[1]), entry->Values[1]);
33549 +    elan4_sdram_writeq (dev, tbl->tbl_entries + (vp * sizeof (E4_VirtualProcessEntry)) + offsetof (E4_VirtualProcessEntry, Values[0]), entry->Values[0]);
33550 +    pioflush_sdram (dev);
33551 +}
33552 +
33553 +void
33554 +elan4_read_route (ELAN4_DEV *dev, ELAN4_ROUTE_TABLE *tbl, unsigned vp, E4_VirtualProcessEntry *entry)
33555 +{
33556 +    ASSERT (vp < (E4_VPT_MIN_ENTRIES << tbl->tbl_size));
33557 +    
33558 +    entry->Values[0] = elan4_sdram_readq (dev, tbl->tbl_entries + (vp * sizeof (E4_VirtualProcessEntry)) + offsetof (E4_VirtualProcessEntry, Values[0]));
33559 +    entry->Values[1] = elan4_sdram_readq (dev, tbl->tbl_entries + (vp * sizeof (E4_VirtualProcessEntry)) + offsetof (E4_VirtualProcessEntry, Values[1]));
33560 +}
33561 +
33562 +void
33563 +elan4_invalidate_route (ELAN4_DEV *dev, ELAN4_ROUTE_TABLE *tbl, unsigned vp)
33564 +{
33565 +    ASSERT (vp < (E4_VPT_MIN_ENTRIES << tbl->tbl_size));
33566 +
33567 +    elan4_sdram_writeq (dev, tbl->tbl_entries + (vp * sizeof (E4_VirtualProcessEntry)) + offsetof (E4_VirtualProcessEntry, Values[0]), 0);
33568 +    elan4_sdram_writeq (dev, tbl->tbl_entries + (vp * sizeof (E4_VirtualProcessEntry)) + offsetof (E4_VirtualProcessEntry, Values[1]), 0);
33569 +    pioflush_sdram (dev);
33570 +}
33571 +
33572 +static void
33573 +pack_them_routes (E4_VirtualProcessEntry *entry, E4_uint16 first, E4_uint8 *packed, unsigned ctx)
33574 +{
33575 +    E4_uint64 value0 = first;
33576 +    E4_uint64 value1 = ROUTE_CTXT_VALUE(ctx);
33577 +    E4_uint32 ThirdRouteBCastVal;
33578 +    register int i;
33579 +
33580 +    for (i = 0; i < (ROUTE_NUM_PACKED >> 1); i++)
33581 +    {
33582 +       value0 |= ((E4_uint64) packed[i]) << ((i << 2) + ROUTE_PACKED_OFFSET);
33583 +       value1 |= ((E4_uint64) packed[i+(ROUTE_NUM_PACKED >> 1)]) << ((i << 2));
33584 +    }
33585 +
33586 +    /* DMA fix for large broadcast route values that fall into the double issue of route value 3 bug. */
33587 +    /* NOTE - this is only required when the link is running in Mod45 mode, it could be automatically
33588 +     *        disabled when Mod44 is detected */
33589 +    
33590 +    /* First seach for the alignment type. The bug is only sensitive to an odd bcast aligment on the 3rd word. */
33591 +    for (i=4;i<16;i++)
33592 +       if (((value0 >> (i*4)) & 0xc) == 4)
33593 +           i++;
33594 +    
33595 +    if (i == 17)
33596 +    {
33597 +       ThirdRouteBCastVal = value1 & 0xcccccccc;
33598 +       if      (((value1 & 0xfffff0000000ULL) == 0ULL) && (ThirdRouteBCastVal == 0x04444444))
33599 +           value1 |= 0x140000000ULL;
33600 +       else if (((value1 & 0xfffffff00000ULL) == 0ULL) && (ThirdRouteBCastVal == 0x00044444))
33601 +           value1 |= 0x1400000ULL;
33602 +       else if (((value1 & 0xfffffffff000ULL) == 0ULL) && (ThirdRouteBCastVal == 0x00000444))
33603 +           value1 |= 0x14000ULL;
33604 +       else if (((value1 & 0xfffffffffff0ULL) == 0ULL) && (ThirdRouteBCastVal == 0x00000004))
33605 +           value1 |= 0x140ULL;
33606 +    }
33607 +    
33608 +    entry->Values[0] = value0;
33609 +    entry->Values[1] = value1;
33610 +}
33611 +
33612 +int
33613 +elan4_generate_route (ELAN_POSITION *pos, E4_VirtualProcessEntry *route, unsigned ctx, unsigned lowid, unsigned highid, unsigned options)
33614 +{
33615 +    unsigned int broadcast    = (lowid != highid);
33616 +    unsigned int noadaptive   = 0;
33617 +    int          padbcast     = 0;
33618 +    E4_uint16    first;
33619 +    int                 rb;
33620 +    E4_uint8     packed[ROUTE_NUM_PACKED];
33621 +    int                 level, llink, hlink;
33622 +
33623 + regenerate_routes:
33624 +    first = 0;
33625 +    rb    = 0;
33626 +
33627 +    switch (pos->pos_mode)
33628 +    {
33629 +    case ELAN_POS_MODE_LOOPBACK:
33630 +       if (lowid != highid || lowid != pos->pos_nodeid)
33631 +           return (-EINVAL);
33632 +       
33633 +       route->Values[0] = FIRST_MYLINK;
33634 +       route->Values[1] = ROUTE_CTXT_VALUE (ctx);
33635 +       return (0);
33636 +
33637 +    case ELAN_POS_MODE_BACKTOBACK:
33638 +       if (lowid != highid || lowid == pos->pos_nodeid)
33639 +           return (-EINVAL);
33640 +       
33641 +       route->Values[0] = FIRST_MYLINK;
33642 +       route->Values[1] = ROUTE_CTXT_VALUE (ctx);
33643 +       return (0);
33644 +
33645 +    case ELAN_POS_MODE_SWITCHED:
33646 +    {
33647 +       unsigned char *arityp  = &pos->pos_arity[pos->pos_levels - 1];
33648 +       unsigned int   spanned = *arityp;
33649 +       unsigned int   broadcasting = 0;
33650 +       
33651 +       bzero (packed, sizeof (packed));
33652 +
33653 +       /* XXXX compute noadaptive ? */
33654 +
33655 +       for (level = 0; 
33656 +            level < pos->pos_levels && ! ((pos->pos_nodeid / spanned) == (lowid / spanned) &&
33657 +                                          (pos->pos_nodeid / spanned) ==  (highid / spanned)); 
33658 +            level++, spanned *= *(--arityp))
33659 +       {
33660 +           if (first == 0)
33661 +               first = (broadcast || noadaptive) ? FIRST_BCAST_TREE : FIRST_ADAPTIVE;
33662 +           else if (broadcast && padbcast)
33663 +           {
33664 +               padbcast = 0;
33665 +               packed[rb++] = PACKED_BCAST0(4, 4);
33666 +               packed[rb++] = PACKED_BCAST1(4, 4);
33667 +           }
33668 +           else
33669 +               packed[rb++] = (broadcast || noadaptive) ? PACKED_BCAST_TREE : PACKED_ADAPTIVE;    
33670 +       }
33671 +
33672 +       while (level >= 0)
33673 +       {
33674 +           spanned /= *arityp;
33675 +           
33676 +           llink = (lowid  / spanned) % *arityp;
33677 +           hlink = (highid / spanned) % *arityp;
33678 +           
33679 +           if (llink != hlink || broadcasting)
33680 +           {
33681 +               broadcasting = 1;
33682 +               
33683 +               if (first == 0)
33684 +                   first = FIRST_BCAST (hlink, llink);
33685 +               else
33686 +               {
33687 +                   packed[rb++] = PACKED_BCAST0(hlink, llink);
33688 +                   
33689 +                   if ((rb % 4) == 0 && PACKED_BCAST1(hlink, llink) == 0)
33690 +                   {
33691 +                       padbcast = 1;
33692 +                       goto regenerate_routes;
33693 +                   }
33694 +                   
33695 +                   packed[rb++] = PACKED_BCAST1(hlink, llink);
33696 +               }
33697 +           }
33698 +           else
33699 +           {
33700 +               if (first == 0)
33701 +                   first = FIRST_ROUTE(llink);
33702 +               else
33703 +                   packed[rb++] = PACKED_ROUTE(llink);
33704 +           }
33705 +           
33706 +           level--;
33707 +           arityp++;
33708 +       }
33709 +
33710 +       pack_them_routes (route, first | (options & FIRST_OPTIONS_MASK), packed, ctx);
33711 +       return (0);
33712 +    }
33713 +    }
33714 +
33715 +    return (-EINVAL);
33716 +}
33717 +
33718 +int
33719 +elan4_check_route (ELAN_POSITION *postiion, ELAN_LOCATION location, E4_VirtualProcessEntry *route, unsigned flags)
33720 +{
33721 +    /* XXXX - TBD */
33722 +    return (0);
33723 +}
33724 +
33725 +EXPORT_SYMBOL(elan4_alloc_routetable);
33726 +EXPORT_SYMBOL(elan4_free_routetable);
33727 +EXPORT_SYMBOL(elan4_write_route);
33728 +EXPORT_SYMBOL(elan4_read_route);
33729 +EXPORT_SYMBOL(elan4_invalidate_route);
33730 +EXPORT_SYMBOL(elan4_generate_route);
33731 +EXPORT_SYMBOL(elan4_check_route);
33732 +
33733 +/*
33734 + * Local variables:
33735 + * c-file-style: "stroustrup"
33736 + * End:
33737 + */
33738 Index: linux-2.4.21/drivers/net/qsnet/elan4/sdram.c
33739 ===================================================================
33740 --- linux-2.4.21.orig/drivers/net/qsnet/elan4/sdram.c   2004-02-23 16:02:56.000000000 -0500
33741 +++ linux-2.4.21/drivers/net/qsnet/elan4/sdram.c        2005-06-01 23:12:54.617436368 -0400
33742 @@ -0,0 +1,1034 @@
33743 +/*
33744 + *    Copyright (c) 2001-2002 by Quadrics Supercomputers World Ltd.
33745 + *    Copyright (c) 2002-2003 by Quadrics Ltd.
33746 + * 
33747 + *    For licensing information please see the supplied COPYING file
33748 + *
33749 + */
33750 +
33751 +#ident "@(#)$Id: sdram.c,v 1.29.6.1 2004/11/29 11:39:13 addy Exp $"
33752 +/*      $Source: /cvs/master/quadrics/elan4mod/sdram.c,v $*/
33753 +
33754 +#include <qsnet/kernel.h>
33755 +
33756 +#include <elan4/debug.h>
33757 +#include <elan4/device.h>
33758 +
33759 +EXPORT_SYMBOL_GPL(elan4_sdram_readb);
33760 +EXPORT_SYMBOL_GPL(elan4_sdram_readw);
33761 +EXPORT_SYMBOL_GPL(elan4_sdram_readl);
33762 +EXPORT_SYMBOL_GPL(elan4_sdram_readq);
33763 +EXPORT_SYMBOL_GPL(elan4_sdram_writeb);
33764 +EXPORT_SYMBOL_GPL(elan4_sdram_writew);
33765 +EXPORT_SYMBOL_GPL(elan4_sdram_writel);
33766 +EXPORT_SYMBOL_GPL(elan4_sdram_writeq);
33767 +EXPORT_SYMBOL_GPL(elan4_sdram_zerob_sdram);
33768 +EXPORT_SYMBOL_GPL(elan4_sdram_zerow_sdram);
33769 +EXPORT_SYMBOL_GPL(elan4_sdram_zerol_sdram);
33770 +EXPORT_SYMBOL_GPL(elan4_sdram_zeroq_sdram);
33771 +EXPORT_SYMBOL_GPL(elan4_sdram_copyb_from_sdram);
33772 +EXPORT_SYMBOL_GPL(elan4_sdram_copyw_from_sdram);
33773 +EXPORT_SYMBOL_GPL(elan4_sdram_copyl_from_sdram);
33774 +EXPORT_SYMBOL_GPL(elan4_sdram_copyq_from_sdram);
33775 +EXPORT_SYMBOL_GPL(elan4_sdram_copyb_to_sdram);
33776 +EXPORT_SYMBOL_GPL(elan4_sdram_copyw_to_sdram);
33777 +EXPORT_SYMBOL_GPL(elan4_sdram_copyl_to_sdram);
33778 +EXPORT_SYMBOL_GPL(elan4_sdram_copyq_to_sdram);
33779 +EXPORT_SYMBOL_GPL(elan4_sdram_alloc);
33780 +EXPORT_SYMBOL_GPL(elan4_sdram_free);
33781 +EXPORT_SYMBOL_GPL(elan4_sdram_flushcache);
33782 +
33783 +#define SDRAM_MIN_BANK_SIZE            ((1 << 15) * 8)         /* 256 Kbytes */
33784 +
33785 +static inline ELAN4_SDRAM_BANK *
33786 +sdramaddr_to_bank (ELAN4_DEV *dev, sdramaddr_t saddr)
33787 +{
33788 +    register int i;
33789 +    
33790 +    for (i = 0; i < dev->dev_sdram_numbanks; i++)
33791 +    {
33792 +       ELAN4_SDRAM_BANK *bank = &dev->dev_sdram_banks[i];
33793 +
33794 +       if (saddr >= bank->b_base && saddr < (bank->b_base + bank->b_size))
33795 +           return (bank);
33796 +    }
33797 +    printk ("sdramaddr_to_bank: sdram address %lx not in a sdram bank\n", saddr);
33798 +    BUG();
33799 +
33800 +    return (NULL);     /* NOTREACHED */
33801 +}
33802 +
33803 +static inline int
33804 +sdramaddr_to_bankoffset (ELAN4_DEV *dev, sdramaddr_t saddr)
33805 +{
33806 +    return (saddr & (sdramaddr_to_bank (dev, saddr)->b_size-1));
33807 +}
33808 +
33809 +static inline int
33810 +sdramaddr_to_bit(ELAN4_DEV *dev, int indx, sdramaddr_t saddr)
33811 +{
33812 +    return (sdramaddr_to_bankoffset(dev, saddr) >> (SDRAM_MIN_BLOCK_SHIFT+(indx)));
33813 +}
33814 +
33815 +static inline ioaddr_t
33816 +sdramaddr_to_ioaddr (ELAN4_DEV *dev, sdramaddr_t saddr)
33817 +{
33818 +    ELAN4_SDRAM_BANK *bank = sdramaddr_to_bank (dev, saddr);
33819 +
33820 +    return (bank->b_ioaddr + (saddr - bank->b_base));
33821 +}
33822 +
33823 +unsigned char
33824 +elan4_sdram_readb (ELAN4_DEV *dev, sdramaddr_t off)
33825 +{
33826 +    return (__elan4_readb (dev, sdramaddr_to_ioaddr(dev, off)));
33827 +}
33828 +
33829 +unsigned short
33830 +elan4_sdram_readw (ELAN4_DEV *dev, sdramaddr_t off)
33831 +{
33832 +    return (__elan4_readw (dev, sdramaddr_to_ioaddr(dev, off)));
33833 +}
33834 +
33835 +unsigned int
33836 +elan4_sdram_readl (ELAN4_DEV *dev, sdramaddr_t off)
33837 +{
33838 +    return (__elan4_readl (dev, sdramaddr_to_ioaddr(dev, off)));
33839 +}
33840 +
33841 +unsigned long long
33842 +elan4_sdram_readq (ELAN4_DEV *dev, sdramaddr_t off)
33843 +{
33844 +    return (readq (sdramaddr_to_ioaddr(dev, off)));
33845 +}
33846 +
33847 +void
33848 +elan4_sdram_writeb (ELAN4_DEV *dev, sdramaddr_t off, unsigned char val)
33849 +{
33850 +    writeb (val, sdramaddr_to_ioaddr(dev, off));
33851 +
33852 +    mb();
33853 +}
33854 +
33855 +void
33856 +elan4_sdram_writew (ELAN4_DEV *dev, sdramaddr_t off, unsigned short val)
33857 +{
33858 +    writew (val, sdramaddr_to_ioaddr(dev, off));
33859 +
33860 +    mb();
33861 +}
33862 +
33863 +void
33864 +elan4_sdram_writel (ELAN4_DEV *dev, sdramaddr_t off, unsigned int val)
33865 +{
33866 +    writel (val, sdramaddr_to_ioaddr(dev, off));
33867 +
33868 +    mb();
33869 +}
33870 +
33871 +void
33872 +elan4_sdram_writeq (ELAN4_DEV *dev, sdramaddr_t off, unsigned long long val)
33873 +{
33874 +    writeq (val, sdramaddr_to_ioaddr(dev, off));
33875 +
33876 +    mb();
33877 +}
33878 +
33879 +void
33880 +elan4_sdram_zerob_sdram (ELAN4_DEV *dev, sdramaddr_t to, int nbytes)
33881 +{
33882 +    ioaddr_t dest = sdramaddr_to_ioaddr (dev, to);
33883 +    ioaddr_t lim  = dest + nbytes;
33884 +
33885 +    for (; dest < lim; dest += sizeof (u8))
33886 +       writeb (0, dest);
33887 +}
33888 +
33889 +void
33890 +elan4_sdram_zerow_sdram (ELAN4_DEV *dev, sdramaddr_t to, int nbytes)
33891 +{
33892 +    ioaddr_t dest = sdramaddr_to_ioaddr (dev, to);
33893 +    ioaddr_t lim  = dest + nbytes;
33894 +
33895 +    for (; dest < lim; dest += sizeof (u8))
33896 +       writeb (0, dest);
33897 +}
33898 +
33899 +void
33900 +elan4_sdram_zerol_sdram (ELAN4_DEV *dev, sdramaddr_t to, int nbytes)
33901 +{
33902 +    ioaddr_t dest = sdramaddr_to_ioaddr (dev, to);
33903 +    ioaddr_t lim  = dest + nbytes;
33904 +
33905 +    for (; dest < lim; dest += sizeof (u32))
33906 +       writel (0, dest);
33907 +}
33908 +
33909 +void
33910 +elan4_sdram_zeroq_sdram (ELAN4_DEV *dev, sdramaddr_t to, int nbytes)
33911 +{
33912 +    ioaddr_t dest = sdramaddr_to_ioaddr (dev, to);
33913 +    ioaddr_t lim  = dest + nbytes;
33914 +
33915 +#ifdef CONFIG_MPSAS
33916 +    if (sas_memset_dev (dev->dev_osdep.pdev, ELAN4_BAR_SDRAM, to, 0, nbytes) == 0)
33917 +       return;
33918 +#endif
33919 +
33920 +    for (; dest < lim; dest += sizeof (u64))
33921 +       writeq (0, dest);
33922 +}
33923 +
33924 +void
33925 +elan4_sdram_copyb_from_sdram (ELAN4_DEV *dev, sdramaddr_t from, void *to, int nbytes)
33926 +{
33927 +    ioaddr_t src  = sdramaddr_to_ioaddr (dev, from);
33928 +    u8      *dest = (u8 *) to;
33929 +    ioaddr_t lim  = src + nbytes;
33930 +
33931 +    for (; src < lim; src += sizeof (u8))
33932 +       *dest++ = __elan4_readb (dev, src);
33933 +}
33934 +
33935 +void
33936 +elan4_sdram_copyw_from_sdram (ELAN4_DEV *dev, sdramaddr_t from, void *to, int nbytes)
33937 +{
33938 +    ioaddr_t src  = sdramaddr_to_ioaddr (dev, from);
33939 +    u16     *dest = (u16 *) to;
33940 +    ioaddr_t lim  = src + nbytes;
33941 +
33942 +    for (; src < lim; src += sizeof (u16))
33943 +       *dest++ = __elan4_readw (dev, src);
33944 +}
33945 +
33946 +void
33947 +elan4_sdram_copyl_from_sdram (ELAN4_DEV *dev, sdramaddr_t from, void *to, int nbytes)
33948 +{
33949 +    ioaddr_t src  = sdramaddr_to_ioaddr (dev, from);
33950 +    u32     *dest = (u32 *) to;
33951 +    ioaddr_t lim  = src + nbytes;
33952 +
33953 +    for (; src < lim; src += sizeof (u32))
33954 +       *dest++ = __elan4_readl (dev, src);
33955 +}
33956 +
33957 +void
33958 +elan4_sdram_copyq_from_sdram (ELAN4_DEV *dev, sdramaddr_t from, void *to, int nbytes)
33959 +{
33960 +    ioaddr_t src  = sdramaddr_to_ioaddr (dev, from);
33961 +    u64     *dest = (u64 *) to;
33962 +    ioaddr_t lim  = src + nbytes;
33963 +
33964 +#ifdef CONFIG_MPSAS
33965 +    if (sas_copyfrom_dev (dev->dev_osdep.pdev, ELAN4_BAR_SDRAM, from, (unsigned long) to, nbytes) == 0)
33966 +       return;
33967 +#endif
33968 +
33969 +    for (; src < lim; src += sizeof (u64))
33970 +       *dest++ = readq (src);
33971 +}
33972 +
33973 +void
33974 +elan4_sdram_copyb_to_sdram (ELAN4_DEV *dev, void *from, sdramaddr_t to, int nbytes)
33975 +{
33976 +    ioaddr_t dest = sdramaddr_to_ioaddr (dev, to);
33977 +    u8      *src  = (u8 *) from;
33978 +    ioaddr_t lim  = dest + nbytes;
33979 +
33980 +    for (; dest < lim; dest += sizeof (u8))
33981 +       writeb (*src++, dest);
33982 +
33983 +    mb();
33984 +}
33985 +
33986 +void
33987 +elan4_sdram_copyw_to_sdram (ELAN4_DEV *dev, void *from, sdramaddr_t to, int nbytes)
33988 +{
33989 +    ioaddr_t dest = sdramaddr_to_ioaddr (dev, to);
33990 +    u16     *src  = (u16 *) from;
33991 +    ioaddr_t lim  = dest + nbytes;
33992 +
33993 +    for (; dest < lim; dest += sizeof (u16))
33994 +       writew (*src++, dest);
33995 +
33996 +    mb();
33997 +}
33998 +
33999 +void
34000 +elan4_sdram_copyl_to_sdram (ELAN4_DEV *dev, void *from, sdramaddr_t to, int nbytes)
34001 +{
34002 +    ioaddr_t dest = sdramaddr_to_ioaddr (dev, to);
34003 +    u32     *src  = (u32 *) from;
34004 +    ioaddr_t lim  = dest + nbytes;
34005 +
34006 +    for (; dest < lim; dest += sizeof (u16))
34007 +       writew (*src++, dest);
34008 +
34009 +    mb();
34010 +}
34011 +
34012 +void
34013 +elan4_sdram_copyq_to_sdram (ELAN4_DEV *dev, void *from, sdramaddr_t to, int nbytes)
34014 +{
34015 +    ioaddr_t dest = sdramaddr_to_ioaddr (dev, to);
34016 +    u64     *src  = (u64 *) from;
34017 +    ioaddr_t lim  = dest + nbytes;
34018 +
34019 +#ifdef CONFIG_MPSAS
34020 +    if (sas_copyto_dev (dev->dev_osdep.pdev, ELAN4_BAR_SDRAM, to, (unsigned long) from, nbytes) == 0)
34021 +       return;
34022 +#endif
34023 +
34024 +    for (; dest < lim; dest += sizeof (u64))
34025 +       writeq (*src++, dest);
34026 +
34027 +    mb();
34028 +}
34029 +
34030 +/* sdram buddy allocator */
34031 +typedef struct sdramblock
34032 +{
34033 +    sdramaddr_t        next;
34034 +    sdramaddr_t prev;
34035 +} sdramblock_t;
34036 +
34037 +static inline sdramaddr_t
34038 +read_next (ELAN4_DEV *dev, sdramaddr_t block)
34039 +{
34040 +    return __elan4_readl (dev, sdramaddr_to_ioaddr (dev, block + offsetof (sdramblock_t, next)));
34041 +}
34042 +
34043 +static inline sdramaddr_t
34044 +read_prev (ELAN4_DEV *dev, sdramaddr_t block)
34045 +{
34046 +    return __elan4_readl (dev, sdramaddr_to_ioaddr (dev, block + offsetof (sdramblock_t, prev)));
34047 +}
34048 +
34049 +static inline void
34050 +write_next (ELAN4_DEV *dev, sdramaddr_t block, sdramaddr_t val)
34051 +{
34052 +    writel (val, sdramaddr_to_ioaddr (dev, block + offsetof (sdramblock_t, next)));
34053 +}
34054 +
34055 +static inline void
34056 +write_prev (ELAN4_DEV *dev, sdramaddr_t block, sdramaddr_t val)
34057 +{
34058 +    writel (val, sdramaddr_to_ioaddr (dev, block + offsetof (sdramblock_t, prev)));
34059 +}
34060 +
34061 +static inline void
34062 +freelist_insert (ELAN4_DEV *dev, int idx, sdramaddr_t block)
34063 +{
34064 +    sdramaddr_t next = dev->dev_sdram_freelists[(idx)];
34065 +
34066 +    /*
34067 +     * block->prev = NULL;
34068 +     * block->next = next;
34069 +     * if (next != NULL)
34070 +     *    next->prev = block;
34071 +     * freelist = block;
34072 +     */
34073 +    write_prev (dev, block, (sdramaddr_t) 0);
34074 +    write_next (dev, block, next);
34075 +    if (next != (sdramaddr_t) 0)
34076 +       write_prev (dev, next, block);
34077 +    dev->dev_sdram_freelists[idx] = block;
34078 +
34079 +    dev->dev_sdram_freecounts[idx]++;
34080 +    dev->dev_stats.s_sdram_bytes_free += (SDRAM_MIN_BLOCK_SIZE << idx);
34081 +
34082 +    mb();
34083 +}
34084 +
34085 +static inline void
34086 +freelist_remove (ELAN4_DEV *dev,int idx, sdramaddr_t block)
34087 +{
34088 +    /*
34089 +     * if (block->prev)
34090 +     *     block->prev->next = block->next;
34091 +     * else
34092 +     *     dev->dev_sdram_freelists[idx] = block->next;
34093 +     * if (block->next)
34094 +     *     block->next->prev = block->prev;
34095 +     */
34096 +    sdramaddr_t blocknext = read_next (dev, block);
34097 +    sdramaddr_t blockprev = read_prev (dev, block);
34098 +
34099 +    if (blockprev)
34100 +       write_next (dev, blockprev, blocknext);
34101 +    else
34102 +       dev->dev_sdram_freelists[idx] = blocknext;
34103 +    if (blocknext)
34104 +       write_prev (dev, blocknext, blockprev);
34105 +
34106 +    dev->dev_sdram_freecounts[idx]--;
34107 +    dev->dev_stats.s_sdram_bytes_free -= (SDRAM_MIN_BLOCK_SIZE << idx);
34108 +
34109 +    mb();
34110 +}
34111 +
34112 +static inline void
34113 +freelist_removehead(ELAN4_DEV *dev, int idx, sdramaddr_t block)
34114 +{
34115 +    sdramaddr_t blocknext = read_next (dev, block);
34116 +
34117 +    if ((dev->dev_sdram_freelists[idx] = blocknext) != 0)
34118 +       write_prev (dev, blocknext, 0);
34119 +
34120 +    dev->dev_sdram_freecounts[idx]--;
34121 +    dev->dev_stats.s_sdram_bytes_free -= (SDRAM_MIN_BLOCK_SIZE << idx);
34122 +
34123 +    mb();
34124 +}
34125 +
34126 +#ifdef DEBUG
34127 +static int
34128 +display_blocks (ELAN4_DEV *dev, int indx, char *string)
34129 +{
34130 +    sdramaddr_t block;
34131 +    int nbytes = 0;
34132 +
34133 +    PRINTF (DBG_DEVICE, DBG_SDRAM, "%s - indx %d\n", string, indx);
34134 +    for (block = dev->dev_sdram_freelists[indx]; block != (sdramaddr_t) 0; block = read_next (dev, block))
34135 +    {
34136 +       PRINTF (DBG_DEVICE, DBG_SDRAM, "  %x\n", block);
34137 +       nbytes += (SDRAM_MIN_BLOCK_SIZE << indx);
34138 +    }
34139 +
34140 +    return (nbytes);
34141 +}
34142 +
34143 +void
34144 +elan4_sdram_display (ELAN4_DEV *dev, char *string)
34145 +{
34146 +    int indx;
34147 +    int nbytes = 0;
34148 +    
34149 +    PRINTF (DBG_DEVICE, DBG_SDRAM, "elan4_sdram_display: dev=%p\n", dev);
34150 +    for (indx = 0; indx < SDRAM_NUM_FREE_LISTS; indx++)
34151 +       if (dev->dev_sdram_freelists[indx] != (sdramaddr_t) 0)
34152 +           nbytes += display_blocks (dev, indx, string);
34153 +    PRINTF (DBG_DEVICE, DBG_SDRAM, "\n%d bytes free - %d pages free\n", nbytes, nbytes/SDRAM_PAGE_SIZE);
34154 +}
34155 +
34156 +void
34157 +elan4_sdram_verify (ELAN4_DEV *dev)
34158 +{
34159 +    int indx, size, nbits, i, b;
34160 +    sdramaddr_t block;
34161 +
34162 +    for (indx = 0, size = SDRAM_MIN_BLOCK_SIZE; indx < SDRAM_NUM_FREE_LISTS; indx++, size <<= 1)
34163 +    {
34164 +       unsigned count = 0;
34165 +
34166 +       for (block = dev->dev_sdram_freelists[indx]; block; block = read_next (dev, block), count++)
34167 +       {
34168 +           ELAN4_SDRAM_BANK *bank = sdramaddr_to_bank (dev, block);
34169 +           unsigned         off  = sdramaddr_to_bankoffset (dev, block);
34170 +           int              bit  = sdramaddr_to_bit (dev, indx, block);
34171 +
34172 +           if ((block & (size-1)) != 0)
34173 +               printk ("elan4_sdram_verify: block=%lx indx=%x - not aligned\n", block, indx);
34174 +           
34175 +           if (bank == NULL || off > bank->b_size)
34176 +               printk ("elan4_sdram_verify: block=%lx indx=%x - outside bank\n", block, indx);
34177 +           else if (BT_TEST (bank->b_bitmaps[indx], bit) == 0)
34178 +               printk ("elan4_sdram_verify: block=%lx indx=%x - bit not set\n", block, indx);
34179 +           else
34180 +           {
34181 +               for (i = indx-1, nbits = 2; i >= 0; i--, nbits <<= 1)
34182 +               {
34183 +                   bit = sdramaddr_to_bit (dev, i, block);
34184 +
34185 +                   for (b = 0; b < nbits; b++)
34186 +                       if (BT_TEST(bank->b_bitmaps[i], bit + b))
34187 +                           printk ("elan4_sdram_verify: block=%lx indx=%x - also free i=%d bit=%x\n", block, indx, i, bit+b);
34188 +               }
34189 +           }
34190 +       }
34191 +
34192 +       if (dev->dev_sdram_freecounts[indx] != count)
34193 +           printk ("elan4_sdram_verify: indx=%x expected %d got %d\n", indx, dev->dev_sdram_freecounts[indx], count);
34194 +    }
34195 +}
34196 +
34197 +#endif
34198 +
34199 +static void
34200 +free_block (ELAN4_DEV *dev, sdramaddr_t block, int indx)
34201 +{
34202 +    ELAN4_SDRAM_BANK *bank = sdramaddr_to_bank (dev, block);
34203 +    unsigned         bit  = sdramaddr_to_bit (dev, indx, block);
34204 +    unsigned         size = SDRAM_MIN_BLOCK_SIZE << indx;
34205 +
34206 +    PRINTF3 (DBG_DEVICE, DBG_SDRAM, "free_block: block=%x indx=%d bit=%x\n", block, indx, bit);
34207 +
34208 +    ASSERT ((block & (size-1)) == 0);
34209 +    ASSERT (BT_TEST (bank->b_bitmaps[indx], bit) == 0);
34210 +
34211 +    while (BT_TEST (bank->b_bitmaps[indx], bit ^ 1))
34212 +    {
34213 +       sdramaddr_t buddy = block ^ size;
34214 +       
34215 +       PRINTF3 (DBG_DEVICE, DBG_SDRAM, "free_block: merge block=%x buddy=%x indx=%d\n", block, buddy, indx);
34216 +       
34217 +       BT_CLEAR (bank->b_bitmaps[indx], bit ^ 1);
34218 +       
34219 +       freelist_remove (dev, indx, buddy);
34220 +       
34221 +       block = (block < buddy) ? block : buddy;
34222 +       indx++;
34223 +       size <<= 1;
34224 +       bit >>= 1;
34225 +    }
34226 +    
34227 +    PRINTF3 (DBG_DEVICE, DBG_SDRAM, "free_block: free block=%x indx=%d bit=%x\n", block, indx, bit);
34228 +    
34229 +    freelist_insert (dev, indx, block);
34230 +    
34231 +    BT_SET (bank->b_bitmaps[indx], bit);
34232 +}
34233 +
34234 +void
34235 +elan4_sdram_init (ELAN4_DEV *dev)
34236 +{
34237 +    int indx;
34238 +
34239 +    spin_lock_init (&dev->dev_sdram_lock);
34240 +
34241 +    for (indx = 0; indx < SDRAM_NUM_FREE_LISTS; indx++)
34242 +    {
34243 +       dev->dev_sdram_freelists[indx]  = (sdramaddr_t) 0;
34244 +       dev->dev_sdram_freecounts[indx] = 0;
34245 +    }
34246 +}
34247 +
34248 +void
34249 +elan4_sdram_fini (ELAN4_DEV *dev)
34250 +{
34251 +    spin_lock_destroy (&dev->dev_sdram_lock);
34252 +}
34253 +
34254 +#ifdef CONFIG_MPSAS
34255 +/* size of Elan SDRAM in simulation */
34256 +#define SDRAM_used_addr_bits           (16)
34257 +#define SDRAM_SIMULATION_BANK_SIZE     ((1 << SDRAM_used_addr_bits) * 8)       /* 128 kbytes */
34258 +
34259 +static int
34260 +elan4_sdram_probe_bank (ELAN4_DEV *dev, ELAN4_SDRAM_BANK *bank)
34261 +{
34262 +    printk ("elan%d: memory bank %d is %d Kb\n", dev->dev_instance, (int) (bank - dev->dev_sdram_banks), (int) (SDRAM_SIMULATION_BANK_SIZE / 1024));
34263 +
34264 +    bank->b_size = SDRAM_SIMULATION_BANK_SIZE;
34265 +
34266 +    return 1;
34267 +}
34268 +
34269 +#else
34270 +
34271 +static void
34272 +initialise_cache_tags (ELAN4_DEV *dev, unsigned addr)
34273 +{
34274 +    register int set, line;
34275 +
34276 +    mb();
34277 +
34278 +    /* Initialise the whole cache to hold sdram at "addr" as direct mapped */
34279 +
34280 +    for (set = 0; set < E4_NumCacheSets; set++)
34281 +       for (line = 0; line < E4_NumCacheLines; line++)
34282 +           write_tag (dev, Tags[set][line], addr | (set << 13) | (1 << 11));
34283 +
34284 +    read_tag (dev, Tags[set][line]);   /* read it back to guarantee the memory system is quite again */
34285 +    mb();
34286 +}
34287 +
34288 +static __inline__ int
34289 +sdram_GreyToBinary(int GreyVal, int NoOfBits)
34290 +{
34291 +    int Bit;
34292 +    int BinaryVal=0;
34293 +    for (Bit=(1 << (NoOfBits-1)); Bit != 0; Bit >>= 1)
34294 +       BinaryVal ^= (GreyVal & Bit) ^ ((BinaryVal >> 1) & Bit);
34295 +    return (BinaryVal);
34296 +}
34297 +
34298 +static __inline__ int
34299 +sdram_BinaryToGrey(int BinaryVal)
34300 +{
34301 +    return (BinaryVal ^ (BinaryVal >> 1));
34302 +}
34303 +
34304 +void
34305 +elan4_sdram_setup_delay_lines (ELAN4_DEV *dev)
34306 +{
34307 +    /* This is used to fix the SDRAM delay line values */
34308 +    int i, AutoGenDelayValue=0;
34309 +    int NewDelayValue;
34310 +
34311 +    if (dev->dev_sdram_cfg & SDRAM_FIXED_DELAY_ENABLE)   /* already setup. */
34312 +       return;
34313 +
34314 +    /* now get an average of 10 dll values */
34315 +    for (i=0;i<10;i++)
34316 +       AutoGenDelayValue += sdram_GreyToBinary(SDRAM_GET_DLL_DELAY(read_reg64 (dev, SDRamConfigReg)),
34317 +                                              SDRAM_FIXED_DLL_DELAY_BITS);
34318 +
34319 +    NewDelayValue = SDRAM_DLL_CORRECTION_FACTOR + (AutoGenDelayValue / 10); /* Mean of 10 values */
34320 +
34321 +    dev->dev_sdram_cfg = (dev->dev_sdram_cfg & ~(SDRAM_FIXED_DLL_DELAY_MASK << SDRAM_FIXED_DLL_DELAY_SHIFT)) |
34322 +                         SDRAM_FIXED_DELAY_ENABLE | SDRAM_FIXED_DLL_DELAY(sdram_BinaryToGrey(NewDelayValue));
34323 +
34324 +    write_reg64 (dev, SDRamConfigReg, dev->dev_sdram_cfg);     /* Put back the new value */
34325 +
34326 +    pioflush_reg (dev);
34327 +}
34328 +
34329 +static int
34330 +elan4_sdram_probe_bank (ELAN4_DEV *dev, ELAN4_SDRAM_BANK *bank)
34331 +{
34332 +    unsigned long      mappedsize = bank->b_size;
34333 +    ioaddr_t           ioaddr;
34334 +    unsigned long long value, size;
34335 +    register int       i;
34336 +    extern int         sdram_bank_limit;
34337 +
34338 +    if (mappedsize > SDRAM_MAX_BLOCK_SIZE)
34339 +       mappedsize = SDRAM_MAX_BLOCK_SIZE;
34340 +
34341 +    while ((ioaddr = elan4_map_device (dev, ELAN4_BAR_SDRAM, bank->b_base, mappedsize, &bank->b_handle)) == 0)
34342 +    {
34343 +       if (mappedsize <= (64*1024*1024))                       /* boards normally populated with 64mb, so winge if we can't see this much */
34344 +           printk ("elan%d: could not map bank %d size %dMb\n", dev->dev_instance, (int)(bank - dev->dev_sdram_banks), (int)mappedsize/(1024*1024));
34345 +
34346 +       if ((mappedsize >>= 1) < (1024*1024))
34347 +           return 0;
34348 +    }
34349 +
34350 +    /* first probe to see if the memory bank is present */
34351 +    if (dev->dev_devinfo.dev_revision_id == PCI_REVISION_ID_ELAN4_REVA)
34352 +       initialise_cache_tags (dev, E4_CacheSize);
34353 +
34354 +    for (i = 0; i < 64; i++)
34355 +    {
34356 +       unsigned long long pattern = (1ull << i);
34357 +
34358 +       writeq (pattern, ioaddr);                                       /* write pattern at base  */
34359 +
34360 +        if (dev->dev_devinfo.dev_revision_id == PCI_REVISION_ID_ELAN4_REVA)
34361 +           initialise_cache_tags (dev, 0);
34362 +
34363 +       writeq (~pattern, ioaddr + E4_CacheSize);                       /* write ~pattern at cachesize */
34364 +
34365 +        if (dev->dev_devinfo.dev_revision_id == PCI_REVISION_ID_ELAN4_REVA)
34366 +          initialise_cache_tags (dev, E4_CacheSize);
34367 +       
34368 +       writeq (~pattern, ioaddr + 2*E4_CacheSize);                     /* write ~pattern at 2*cachesize */
34369 +        if (dev->dev_devinfo.dev_revision_id == PCI_REVISION_ID_ELAN4_REVA)
34370 +           initialise_cache_tags (dev, 2*E4_CacheSize);
34371 +       
34372 +       value = readq (ioaddr);                                 /* read pattern back at 0 */
34373 +       
34374 +       if (value != pattern)
34375 +       {
34376 +           printk ("elan%d: sdram bank %d not present\n", dev->dev_instance, (int) (bank - dev->dev_sdram_banks));
34377 +           elan4_unmap_device (dev, ioaddr, mappedsize, &bank->b_handle);
34378 +           return 0;
34379 +       }
34380 +    }
34381 +    
34382 +    /* sdram bank is present, so work out it's size.  We store the maximum size at the base
34383 +     * and then store the address at each address on every power of two address until
34384 +     * we reach the minimum mappable size (PAGESIZE), we then read back the value at the
34385 +     * base to determine the bank size */
34386 +    writeq (mappedsize, ioaddr);
34387 +    if (dev->dev_devinfo.dev_revision_id == PCI_REVISION_ID_ELAN4_REVA)
34388 +        initialise_cache_tags (dev, 0);
34389 +
34390 +    for (size = mappedsize >> 1; size > PAGE_SIZE; size >>= 1)
34391 +    {
34392 +       writeq (size, ioaddr + size);
34393 +        if (dev->dev_devinfo.dev_revision_id == PCI_REVISION_ID_ELAN4_REVA)
34394 +           initialise_cache_tags (dev, size);
34395 +    }
34396 +
34397 +    if ((size = readq (ioaddr)) < SDRAM_MIN_BANK_SIZE)
34398 +    {
34399 +       printk ("elan%d: memory bank %d dubious\n", dev->dev_instance, (int) (bank - dev->dev_sdram_banks));
34400 +       elan4_unmap_device (dev, ioaddr, mappedsize, &bank->b_handle);
34401 +       return 0;
34402 +    }
34403 +
34404 +    if (sdram_bank_limit == 0 || size <= (sdram_bank_limit * 1024 * 1024))
34405 +       printk ("elan%d: memory bank %d is %d Mb\n", dev->dev_instance, (int) (bank - dev->dev_sdram_banks), (int) (size / (1024*1024)));
34406 +    else
34407 +    {
34408 +       size = (sdram_bank_limit * 1024 * 1024);
34409 +       printk ("elan%d: limit bank %d to %d Mb\n", dev->dev_instance, (int) (bank - dev->dev_sdram_banks), (int) (size / (1024*1024)));
34410 +    }
34411 +
34412 +    bank->b_size = size;
34413 +
34414 +    elan4_unmap_device (dev, ioaddr, mappedsize, &bank->b_handle);
34415 +    return 1;
34416 +}
34417 +#endif
34418 +
34419 +int
34420 +elan4_sdram_init_bank (ELAN4_DEV *dev, ELAN4_SDRAM_BANK *bank)
34421 +{
34422 +    int indx, size;
34423 +
34424 +    bank->b_ioaddr = 0;
34425 +
34426 +    if (! elan4_sdram_probe_bank (dev, bank))
34427 +       return 0;
34428 +
34429 +    if ((bank->b_ioaddr = elan4_map_device (dev, ELAN4_BAR_SDRAM, bank->b_base, bank->b_size, &bank->b_handle)) == (ioaddr_t) 0)
34430 +    {
34431 +       printk ("elan%d: could not map sdrambank %d\n", dev->dev_instance, (int) (bank - dev->dev_sdram_banks));
34432 +       return 0;
34433 +    }
34434 +
34435 +    for (indx = 0, size = SDRAM_MIN_BLOCK_SIZE; size <= bank->b_size; indx++, size <<= 1) /* allocate the buddy allocator bitmaps */
34436 +       KMEM_ZALLOC (bank->b_bitmaps[indx], bitmap_t *, sizeof (bitmap_t) * BT_BITOUL(bank->b_size/size), 1);
34437 +    
34438 +    return 1;
34439 +}
34440 +
34441 +void
34442 +elan4_sdram_fini_bank (ELAN4_DEV *dev, ELAN4_SDRAM_BANK *bank)
34443 +{
34444 +    int indx, size;
34445 +
34446 +    for (indx = 0, size = SDRAM_MIN_BLOCK_SIZE; size <= bank->b_size; indx++, size <<= 1)
34447 +       KMEM_FREE (bank->b_bitmaps[indx], sizeof (bitmap_t) * BT_BITOUL(bank->b_size/size));
34448 +    
34449 +    elan4_unmap_device (dev, bank->b_ioaddr, bank->b_size, &bank->b_handle);
34450 +}
34451 +
34452 +void
34453 +elan4_sdram_add_bank (ELAN4_DEV *dev, ELAN4_SDRAM_BANK *bank)
34454 +{
34455 +    sdramaddr_t base = bank->b_base;
34456 +    sdramaddr_t top  = bank->b_base + bank->b_size;
34457 +    register int indx;
34458 +    register unsigned long size;
34459 +
34460 +    /* align to the minimum block size */
34461 +    base = (base + SDRAM_MIN_BLOCK_SIZE - 1) & ~((sdramaddr_t) SDRAM_MIN_BLOCK_SIZE-1);
34462 +    top &= ~((sdramaddr_t) SDRAM_MIN_BLOCK_SIZE-1);
34463 +
34464 +    /* don't allow 0 as a valid "base" */
34465 +    if (base == 0)
34466 +       base = SDRAM_MIN_BLOCK_SIZE;
34467 +
34468 +    /* carve the bottom to the biggest boundary */
34469 +    for (indx = 0, size = SDRAM_MIN_BLOCK_SIZE; indx < SDRAM_NUM_FREE_LISTS; indx++, size <<= 1)
34470 +    {
34471 +       if ((base & size) == 0)
34472 +           continue;
34473 +
34474 +       if ((base + size) > top)
34475 +           break;
34476 +
34477 +       free_block (dev, base, indx);
34478 +       
34479 +       base += size;
34480 +    }
34481 +
34482 +    /* carve the top down to the biggest boundary */
34483 +    for (indx = 0, size = SDRAM_MIN_BLOCK_SIZE; indx < SDRAM_NUM_FREE_LISTS; indx++, size <<= 1)
34484 +    {
34485 +       if ((top & size) == 0)
34486 +           continue;
34487 +
34488 +       if ((top - size) < base)
34489 +           break;
34490 +
34491 +       free_block (dev, (top - size), indx);
34492 +       
34493 +       top -= size;
34494 +    }
34495 +
34496 +    /* now free of the space in between */
34497 +    while (base < top)
34498 +    {
34499 +       free_block (dev, base, (SDRAM_NUM_FREE_LISTS-1));
34500 +
34501 +       base += SDRAM_MAX_BLOCK_SIZE;
34502 +    }
34503 +}
34504 +
34505 +sdramaddr_t
34506 +elan4_sdram_alloc (ELAN4_DEV *dev, int nbytes)
34507 +{
34508 +    sdramaddr_t block;
34509 +    register int i, indx;
34510 +    unsigned long size;
34511 +    unsigned long flags;
34512 +
34513 +    spin_lock_irqsave (&dev->dev_sdram_lock, flags);
34514 +
34515 +    for (indx = 0, size = SDRAM_MIN_BLOCK_SIZE; size < nbytes; indx++, size <<= 1)
34516 +       ;
34517 +
34518 +    PRINTF2 (DBG_DEVICE, DBG_SDRAM, "elan4_sdram_alloc: nbytes=%d indx=%d\n", nbytes, indx);
34519 +
34520 +    /* need to split a bigger block up */
34521 +    for (i = indx; i < SDRAM_NUM_FREE_LISTS; i++, size <<= 1)
34522 +       if (dev->dev_sdram_freelists[i])
34523 +           break;
34524 +    
34525 +    if (i == SDRAM_NUM_FREE_LISTS)
34526 +    {
34527 +       spin_unlock_irqrestore (&dev->dev_sdram_lock, flags);
34528 +       printk ("elan4_sdram_alloc: %d bytes failed\n", nbytes);
34529 +       return ((sdramaddr_t) 0);
34530 +    }
34531 +    
34532 +    PRINTF2 (DBG_DEVICE, DBG_SDRAM, "elan4_sdram_alloc: use block=%x indx=%d\n", dev->dev_sdram_freelists[i], i);
34533 +
34534 +    /* remove the block from the free list */
34535 +    freelist_removehead (dev, i, (block = dev->dev_sdram_freelists[i]));
34536 +
34537 +    /* clear the approriate bit in the bitmap */
34538 +    BT_CLEAR (sdramaddr_to_bank (dev, block)->b_bitmaps[i], sdramaddr_to_bit (dev,i, block));
34539 +
34540 +    /* and split it up as required */
34541 +    while (i-- > indx)
34542 +       free_block (dev, block + (size >>= 1), i);
34543 +
34544 +    spin_unlock_irqrestore (&dev->dev_sdram_lock, flags);
34545 +
34546 +    ASSERT ((block & ((SDRAM_MIN_BLOCK_SIZE << (indx))-1)) == 0);
34547 +
34548 +#ifdef CONFIG_MPSAS
34549 +    elan4_sdram_zeroq_sdram (dev, block, sizeof (sdramblock_t));
34550 +#endif
34551 +
34552 +    return ((sdramaddr_t) block);
34553 +}
34554 +
34555 +void
34556 +elan4_sdram_free (ELAN4_DEV *dev, sdramaddr_t block, int nbytes)
34557 +{
34558 +    register int indx;
34559 +    unsigned long size;
34560 +    unsigned long flags;
34561 +
34562 +    spin_lock_irqsave (&dev->dev_sdram_lock, flags);
34563 +
34564 +    for (indx = 0, size = SDRAM_MIN_BLOCK_SIZE; size < nbytes; indx++, size <<= 1)
34565 +       ;
34566 +
34567 +    PRINTF2 (DBG_DEVICE, DBG_SDRAM, "elan4_sdram_free: indx=%d block=%x\n", indx, block);
34568 +
34569 +    free_block (dev, block, indx);
34570 +
34571 +    spin_unlock_irqrestore (&dev->dev_sdram_lock, flags);
34572 +}
34573 +
34574 +void
34575 +elan4_sdram_flushcache (ELAN4_DEV *dev, sdramaddr_t addr, int len)
34576 +{
34577 +    int set, off;
34578 +
34579 +    SET_SYSCONTROL (dev, dev_direct_map_pci_writes, CONT_DIRECT_MAP_PCI_WRITES);
34580 +
34581 +    /*
34582 +     * if flushing more than a single set (8K), then you have to flush the whole cache.
34583 +     *   NOTE - in the real world we will probably want to generate a burst across
34584 +     *          the pci bus.
34585 +     */
34586 +    if (len >= E4_CacheSetSize)
34587 +    {
34588 +       PRINTF3 (DBG_DEVICE, DBG_SDRAM, "elan4_sdram_flushcache: addr=%x len=%x (%x) => whole cache\n", addr, len, addr + len);
34589 +
34590 +#ifdef CONFIG_MPSAS
34591 +       elan4_sdram_zeroq_sdram (dev, dev->dev_cacheflush_space, E4_CacheSize);
34592 +#else
34593 +       for (set = 0; set < E4_NumCacheSets; set++)
34594 +           for (off = 0; off < E4_CacheSetSize; off += E4_CacheLineSize)
34595 +               elan4_sdram_writeq (dev, dev->dev_cacheflush_space + (set * E4_CacheSetSize) + off, 0);
34596 +#endif
34597 +    }
34598 +    else
34599 +    {
34600 +       unsigned base    = addr & ~(E4_CACHELINE_SIZE-1);
34601 +       unsigned top     = (addr + len + (E4_CACHELINE_SIZE-1)) & ~(E4_CACHELINE_SIZE-1);
34602 +       unsigned baseoff = base & (E4_CacheSetSize-1);
34603 +       unsigned topoff  = top  & (E4_CacheSetSize-1);
34604 +
34605 +       if ((base ^ top) & E4_CacheSetSize)                     /* wraps */
34606 +       {
34607 +           PRINTF7 (DBG_DEVICE, DBG_SDRAM, "elan4_sdram_flushcache: addr=%x len=%x (%x) => split cache (%x,%x %x,%x)\n", 
34608 +                    addr, len, addr + len, 0, topoff, baseoff, E4_CacheSetSize);
34609 +
34610 +#ifdef CONFIG_MPSAS
34611 +           for (set = 0; set < E4_NumCacheSets; set++)
34612 +           {
34613 +               elan4_sdram_zeroq_sdram (dev, dev->dev_cacheflush_space + (set * E4_CacheSetSize), topoff);
34614 +               elan4_sdram_zeroq_sdram (dev, dev->dev_cacheflush_space + (set * E4_CacheSetSize) + baseoff, E4_CacheSetSize - baseoff);
34615 +           }
34616 +#else
34617 +           for (set = 0; set < E4_NumCacheSets; set++)
34618 +           {
34619 +               for (off = 0; off < (top & (E4_CacheSetSize-1)); off += E4_CACHELINE_SIZE)
34620 +                   elan4_sdram_writeq (dev, dev->dev_cacheflush_space + (set * E4_CacheSetSize) + off, 0);
34621 +               
34622 +               for (off = (base & (E4_CacheSetSize-1)); off < E4_CacheSetSize; off += E4_CACHELINE_SIZE)
34623 +                   elan4_sdram_writeq (dev, dev->dev_cacheflush_space + (set * E4_CacheSetSize) + off, 0);
34624 +           }
34625 +#endif
34626 +       }
34627 +       else
34628 +       {
34629 +           PRINTF5 (DBG_DEVICE, DBG_SDRAM, "elan4_sdram_flushcache: addr=%x len=%x (%x) => part cache (%x,%x)\n", 
34630 +                    addr, len, addr + len, baseoff, topoff);
34631 +
34632 +#ifdef CONFIG_MPSAS
34633 +           for (set = 0; set < E4_NumCacheSets; set++)
34634 +               elan4_sdram_zeroq_sdram (dev, dev->dev_cacheflush_space + (set * E4_CacheSetSize) + baseoff, topoff - baseoff);
34635 +#else
34636 +           for (set = 0; set < E4_NumCacheSets; set++)
34637 +               for (off = (base & (E4_CacheSetSize-1)); off < (top & (E4_CacheSetSize-1)); off += E4_CACHELINE_SIZE)
34638 +                   elan4_sdram_writeq (dev, dev->dev_cacheflush_space + (set * E4_CacheSetSize) + off, 0);
34639 +#endif
34640 +       }
34641 +    }
34642 +    pioflush_sdram (dev);
34643 +    
34644 +    CLEAR_SYSCONTROL (dev, dev_direct_map_pci_writes, CONT_DIRECT_MAP_PCI_WRITES);
34645 +}
34646 +
34647 +static char *
34648 +get_correctableErr_bitpos(uint SyndromeBits)
34649 +{
34650 +    switch (SyndromeBits)
34651 +    {
34652 +    case 0x00: return ("NoErr");
34653 +    case 0x31: return ("00"); 
34654 +    case 0x32: return ("01"); 
34655 +    case 0xc4: return ("02"); 
34656 +    case 0xc8: return ("03"); 
34657 +    case 0x26: return ("04"); 
34658 +    case 0x91: return ("05"); 
34659 +    case 0x89: return ("06"); 
34660 +    case 0x64: return ("07"); 
34661 +    case 0xc1: return ("08"); 
34662 +    case 0xf2: return ("09"); 
34663 +    case 0x34: return ("10"); 
34664 +    case 0xf8: return ("11"); 
34665 +    case 0xf1: return ("12"); 
34666 +    case 0xc2: return ("13"); 
34667 +    case 0xf4: return ("14"); 
34668 +    case 0x38: return ("15"); 
34669 +    case 0xd6: return ("16"); 
34670 +    case 0xa1: return ("17"); 
34671 +    case 0x79: return ("18"); 
34672 +    case 0xa4: return ("19"); 
34673 +    case 0xd9: return ("20"); 
34674 +    case 0xa2: return ("21"); 
34675 +    case 0x76: return ("22"); 
34676 +    case 0xa8: return ("23"); 
34677 +    case 0xe6: return ("24"); 
34678 +    case 0x51: return ("25"); 
34679 +    case 0xb9: return ("26"); 
34680 +    case 0x54: return ("27"); 
34681 +    case 0xe9: return ("28"); 
34682 +    case 0x52: return ("29"); 
34683 +    case 0xb6: return ("30"); 
34684 +    case 0x58: return ("31"); 
34685 +    case 0x13: return ("32"); 
34686 +    case 0x23: return ("33"); 
34687 +    case 0x4c: return ("34"); 
34688 +    case 0x8c: return ("35"); 
34689 +    case 0x62: return ("36"); 
34690 +    case 0x19: return ("37"); 
34691 +    case 0x98: return ("38"); 
34692 +    case 0x46: return ("39"); 
34693 +    case 0x1c: return ("40"); 
34694 +    case 0x2f: return ("41"); 
34695 +    case 0x43: return ("42"); 
34696 +    case 0x8f: return ("43"); 
34697 +    case 0x1f: return ("44"); 
34698 +    case 0x2c: return ("45"); 
34699 +    case 0x4f: return ("46"); 
34700 +    case 0x83: return ("47"); 
34701 +    case 0x6d: return ("48"); 
34702 +    case 0x1a: return ("49"); 
34703 +    case 0x97: return ("50"); 
34704 +    case 0x4a: return ("51"); 
34705 +    case 0x9d: return ("52"); 
34706 +    case 0x2a: return ("53"); 
34707 +    case 0x67: return ("54"); 
34708 +    case 0x8a: return ("55"); 
34709 +    case 0x6e: return ("56"); 
34710 +    case 0x15: return ("57"); 
34711 +    case 0x9b: return ("58"); 
34712 +    case 0x45: return ("59"); 
34713 +    case 0x9e: return ("60"); 
34714 +    case 0x25: return ("61"); 
34715 +    case 0x6b: return ("62"); 
34716 +    case 0x85: return ("63"); 
34717 +    case 0x01: return ("C0"); 
34718 +    case 0x02: return ("C1"); 
34719 +    case 0x04: return ("C2"); 
34720 +    case 0x08: return ("C3"); 
34721 +    case 0x10: return ("C4"); 
34722 +    case 0x20: return ("C5"); 
34723 +    case 0x40: return ("C6"); 
34724 +    case 0x80: return ("C7"); 
34725 +
34726 +    case 0x07: case 0x0b: case 0x0d: case 0x0e: case 0x3d: case 0x3e: case 0x70: case 0x7c: // T  
34727 +    case 0xb0: case 0xbc: case 0xc7: case 0xcb: case 0xd0: case 0xd3: case 0xe0: case 0xe3: // T  
34728 +       return ("triple");
34729 +
34730 +    case 0x0f: case 0x55: case 0x5a: case 0xa5: case 0xaa: case 0xf0: case 0xff: // Q  
34731 +       return ("quadruple");
34732 +
34733 +    case 0x16: case 0x29: case 0x37: case 0x3b: case 0x49: case 0x57: case 0x5b: case 0x5d: case 0x5e: case 0x61: // M  
34734 +    case 0x68: case 0x73: case 0x75: case 0x7a: case 0x7f: case 0x86: case 0x92: case 0x94: case 0xa7: case 0xab: // M  
34735 +    case 0xad: case 0xae: case 0xb3: case 0xb5: case 0xba: case 0xbf: case 0xcd: case 0xce: case 0xd5: case 0xda: // M  
34736 +    case 0xdc: case 0xdf: case 0xe5: case 0xea: case 0xec: case 0xef: case 0xf7: case 0xfb: case 0xfd: case 0xfe: // M  
34737 +       return ("multiple");
34738 +
34739 +    default:   // all other cases
34740 +       return ("double");
34741 +    }
34742 +}
34743 +
34744 +char *
34745 +elan4_sdramerr2str (ELAN4_DEV *dev, E4_uint64 status, char *str)
34746 +{
34747 +    E4_uint64 StartupSyndrome    = dev->dev_sdram_initial_ecc_val;
34748 +    int       RisingDQSsyndrome  = ((ECC_RisingDQSSyndrome(status) == ECC_RisingDQSSyndrome(StartupSyndrome)) ?
34749 +                                   0 : ECC_RisingDQSSyndrome(status));
34750 +    int              FallingDQSsyndrome = ((ECC_FallingDQSSyndrome(status) == ECC_FallingDQSSyndrome(StartupSyndrome)) ?
34751 +                                   0 : ECC_FallingDQSSyndrome(status));
34752 +    E4_uint64 Addr = ECC_Addr(status);
34753 +    int       Bank = (Addr >> 6) & 3;
34754 +    int       Cas  = ((Addr >> 3) & 7) | ((Addr >> (8 - 3)) & 0xf8) | ((Addr >> (25 - 8)) & 0x100) |
34755 +                    ((Addr >> (27 - 9)) & 0x200) | ((Addr >> (29 - 10)) & 0xc00);
34756 +    int       Ras  = ((Addr >> 13) & 0xfff) | ((Addr >> (26 - 12)) & 0x1000) | ((Addr >> (28 - 13)) & 0x2000) |
34757 +                    ((Addr >> (30 - 14)) & 0x4000);
34758 +
34759 +    sprintf (str, "Addr=%07llx Bank=%x Ras=%x Cas=%x Falling DQS=%s Rising DQS=%s Syndrome=%x%s%s%s%s",                /* 41 + 16 + 8 + 15 + 24 + 13 + 22 + 10 + 10 == 151 */
34760 +            (long long)Addr, Bank, Ras, Cas,
34761 +            get_correctableErr_bitpos(FallingDQSsyndrome),
34762 +            get_correctableErr_bitpos(RisingDQSsyndrome),
34763 +            (int)ECC_Syndrome(status),
34764 +            ECC_UncorrectableErr(status)   ? " Uncorrectable" : "",
34765 +            ECC_MultUncorrectErrs(status)  ? " Multiple-Uncorrectable" : "",
34766 +            ECC_CorrectableErr(status)     ? " Correctable" : "",
34767 +            ECC_MultCorrectErrs(status)    ? " Multiple-Correctable" : "");
34768 +
34769 +    return str;
34770 +}
34771 +
34772 +/*
34773 + * Local variables:
34774 + * c-file-style: "stroustrup"
34775 + * End:
34776 + */
34777 Index: linux-2.4.21/drivers/net/qsnet/elan4/trap.c
34778 ===================================================================
34779 --- linux-2.4.21.orig/drivers/net/qsnet/elan4/trap.c    2004-02-23 16:02:56.000000000 -0500
34780 +++ linux-2.4.21/drivers/net/qsnet/elan4/trap.c 2005-06-01 23:12:54.619436064 -0400
34781 @@ -0,0 +1,778 @@
34782 +/*
34783 + *    Copyright (c) 2002-2003 by Quadrics Ltd.
34784 + *    Copyright (c) 2001-2002 by Quadrics Supercomputers World Ltd.
34785 + * 
34786 + *    For licensing information please see the supplied COPYING file
34787 + *
34788 + */
34789 +
34790 +#ident "@(#)$Id: trap.c,v 1.19.10.2 2004/11/03 14:24:32 duncant Exp $"
34791 +/*      $Source: /cvs/master/quadrics/elan4mod/trap.c,v $*/
34792 +
34793 +#include <qsnet/kernel.h>
34794 +
34795 +#include <elan4/debug.h>
34796 +#include <elan4/device.h>
34797 +
34798 +#include <elan4/trtype.h>
34799 +#include <elan4/commands.h>
34800 +
34801 +char * const PermTypes[16] = 
34802 +{
34803 +    "Disabled",       "Unused",          "LocalDataRead", "LocalDataWrite",
34804 +    "LocalRead",      "LocalExecute",    "ReadOnly",      "LocalWrite",
34805 +    "LocalEventOnly", "LocalEventWrite", "RemoteEvent",   "RemoteAll",
34806 +    "RemoteReadOnly", "RemoteWriteOnly", "DataReadWrite", "NoFault",
34807 +};
34808 +
34809 +char * const AccTypes[] =
34810 +{
34811 +    "LocalDataRead ", "LocalDataWrite", "RemoteRead    ", "RemoteWrite   ",
34812 +    "Execute       ", "LocalEvent    ", "Unused        ", "RemoteEvent   "
34813 +};
34814 +char * const DataTypes[] = {"Byte ", "HWord", "Word ", "DWord"};
34815 +char * const PhysTypes[] = {"Special Read", "Special Write", "Physical Read", "Physical Write"};
34816 +    
34817 +char * const EProcTrapNames[] = {
34818 +    "EventProcNoFault",
34819 +    "EventProcAddressAlignment",
34820 +    "EventProcMemoryFault",
34821 +    "EventProcCountWrapError",
34822 +};
34823 +
34824 +char * const CProcTrapNames[] = {
34825 +    "CommandProcNoFault",
34826 +    "CommandProcInserterError",
34827 +    "CommandProcPermissionTrap",
34828 +    "CommandProcSendTransInvalid",
34829 +    "CommandProcSendTransExpected",
34830 +    "CommandProcDmaQueueOverflow",
34831 +    "CommandProcInterruptQueueOverflow",
34832 +    "CommandProcMemoryFault",
34833 +    "CommandProcRouteFetchFault",
34834 +    "CommandProcFailCountZero",
34835 +    "CommandProcAddressAlignment",
34836 +    "CommandProcWaitTrap",
34837 +    "CommandProcMultipleGuards",
34838 +    "CommandProcOpenOnGuardedChan",
34839 +    "CommandProcThreadQueueOverflow",
34840 +    "CommandProcBadData",
34841 +};
34842 +
34843 +char *const CProcInsertError[] = {
34844 +    "No Error",
34845 +    "Overflowed",
34846 +    "Invalid Write Size",
34847 +    "Invalid Write Order",
34848 +};
34849 +
34850 +char * const DProcTrapNames[] = {
34851 +    "DmaProcNoFault",
34852 +    "DmaProcRouteFetchFault",
34853 +    "DmaProcFailCountError",
34854 +    "DmaProcPacketAckError",
34855 +    "DmaProcRunQueueReadFault",
34856 +    "DmaProcQueueOverFlow",
34857 +};
34858 +
34859 +char *const IProcTrapNames[] = {
34860 +    "InputNoFault",
34861 +    "InputAddressAlignment",
34862 +    "InputMemoryFault",
34863 +    "InputInvalidTransType",
34864 +    "InputDmaQueueOverflow",
34865 +    "InputEventEngineTrapped",
34866 +    "InputCrcErrorAfterPAckOk",
34867 +    "InputEopErrorOnWaitForEop",
34868 +    "InputEopErrorTrap",
34869 +    "InputDiscardAfterAckOk",
34870 +};
34871 +
34872 +char *const TProcTrapNames[] = {
34873 +    "HaltThread",
34874 +    "TrapForTooManyInstructions",
34875 +    "InstAccessException",
34876 +    "Unimplemented",
34877 +    "DataAccessException",
34878 +    "DataAlignmentError",
34879 +    "TrapForUsingBadData",
34880 +};
34881 +
34882 +#define declare_spaces(space, str)             char space[64]; do { int i; for (i = 0; i < strlen(str); i++) spaces[i] = ' '; space[i] = '\0'; } while (0)
34883 +#define declare_prefix(space, spaces, str)     char space[64]; do { strcpy (space, spaces); strcat (space, str); } while (0)
34884 +
34885 +void
34886 +elan4_display_farea (void *type, int mode, char *str, E4_FaultSave *farea)
34887 +{
34888 +    E4_uint32 FSR = FaultSaveFSR(farea->FSRAndFaultContext);
34889 +
34890 +    declare_spaces(spaces, str);
34891 +    
34892 +    elan4_debugf (type, mode, "%s Fault occurred at %016llx for context %4x\n", str,
34893 +                 farea->FaultAddress, FaultSaveContext(farea->FSRAndFaultContext));
34894 +    
34895 +    if (FSR & AT_VirtualWriteAccBit)                           /* Virtual write access */
34896 +       elan4_debugf (type, mode, "%s FSR=%x: Virtual Write. DWSize=0x%x EndP=0x%x Access=%s DT=%s\n",
34897 +                     spaces, FSR, FSR & AT_VirtualWriteSizeMask,
34898 +                     (FSR >> AT_VirtualWriteEndPtrShift) & AT_VirtualWriteEndPtrMask,
34899 +                     AccTypes[(FSR >> AT_PermBitsShift) & AT_PermBitsMask],
34900 +                     DataTypes[(FSR >> AT_BlkDataTyShift) & AT_BlkDataTyMask]);
34901 +    else if (FSR & AT_VirtualReadAccBit)                       /* Virtual read access */
34902 +       elan4_debugf (type, mode, "%s FSR=%x: Virtual Read. DWSize=0x%x Access=%s DT=%s\n",
34903 +                     spaces, FSR, FSR & AT_VirtualReadSizeMask,
34904 +                     AccTypes[(FSR >> AT_PermBitsShift) & AT_PermBitsMask],
34905 +                     DataTypes[(FSR >> AT_BlkDataTyShift) & AT_BlkDataTyMask]);
34906 +    else
34907 +       elan4_debugf (type, mode, "%s FSR=%x: %s. Size=0x%x\n", spaces,
34908 +                     FSR, PhysTypes[(FSR >> AT_SelBitsShift) & AT_SelBitsMask],
34909 +                     FSR & AT_OtherSizeMask);
34910 +    elan4_debugf (type, mode, "%s FSR: %s %s%s %sWalking\n", spaces,
34911 +                 (FSR & AT_NonAlloc) ? "NonAlloc" : "Alloc",
34912 +                 (FSR & AT_DmaData) ? "Dma " : "",
34913 +                 (FSR & FSR_WalkForThread) ? "ThreadAcc" : "UnitsAcc",
34914 +                 (FSR & FSR_Walking) ? "" : "Not");
34915 +    PRINTF (type, mode, "%s FSR: %s%sHashTable=%s\n", spaces,
34916 +           (FSR & FSR_NoTranslationsFound) ? "NoTranslationsFound " : "",
34917 +           (FSR & FSR_WalkingProtectionFault) ? "WalkingProtectionFault " : "",
34918 +           (FSR & FSR_HashTable1) ? "1" : "0");
34919 +    if (FSR & (FSR_RouteVProcErr | FSR_FaultForBadData))
34920 +       elan4_debugf (type, mode, "%s FSR: %s%s\n", spaces,
34921 +                     (FSR & FSR_RouteVProcErr) ? "RouteVProcErr " : "",
34922 +                     (FSR & FSR_FaultForBadData) ? "FaultForBadData " : "");
34923 +}
34924 +
34925 +void
34926 +elan4_display_eproc_trap (void *type, int mode, char *str, ELAN4_EPROC_TRAP *trap)
34927 +{
34928 +    declare_spaces (spaces, str);
34929 +
34930 +    elan4_debugf (type, mode, "%s Status=%016llx %s EventAddr=%016llx CountAndType=%016llx\n", str,
34931 +                 trap->tr_status, EProcTrapNames[EPROC_TrapType(trap->tr_status)],
34932 +                 trap->tr_eventaddr, trap->tr_event.ev_CountAndType);
34933 +    elan4_debugf (type, mode, "%s Param=%016llx.%016llx\n", spaces,
34934 +                 trap->tr_event.ev_Params[0], trap->tr_event.ev_Params[1]);
34935 +
34936 +    elan4_display_farea (type, mode, strcat (spaces, EPROC_Port0Fault(trap->tr_status) ? " EPROC0" : " EPROC1"), &trap->tr_faultarea);
34937 +}
34938 +
34939 +void
34940 +elan4_display_cproc_trap (void *type, int mode, char *str, ELAN4_CPROC_TRAP *trap)
34941 +{
34942 +    declare_spaces(spaces, str);
34943 +
34944 +    elan4_debugf (type, mode, "%s Status=%llx %s Command=%llx\n", str, trap->tr_status, 
34945 +                 CProcTrapNames[CPROC_TrapType(trap->tr_status)], trap->tr_command);
34946 +    elan4_debugf (type, mode, "%s Desc=%016llx %016llx %016llx %016llx\n", str,
34947 +                 trap->tr_qdesc.CQ_QueuePtrs, trap->tr_qdesc.CQ_HoldingValue,
34948 +                 trap->tr_qdesc.CQ_AckBuffers, trap->tr_qdesc.CQ_Control);
34949 +
34950 +    switch (CPROC_TrapType (trap->tr_status))
34951 +    {
34952 +    case CommandProcInserterError:
34953 +       elan4_debugf (type, mode, "%s   %s\n", str, CProcInsertError[CQ_RevB_ErrorType(trap->tr_qdesc.CQ_QueuePtrs)]);
34954 +       break;
34955 +
34956 +    case CommandProcWaitTrap:
34957 +       elan4_display_eproc_trap (type, mode, spaces, &trap->tr_eventtrap);
34958 +       break;
34959 +
34960 +    default:
34961 +       elan4_display_farea (type, mode, spaces, &trap->tr_faultarea);
34962 +       break;
34963 +    }
34964 +}
34965 +
34966 +void
34967 +elan4_display_dproc_trap (void *type, int mode, char *str, ELAN4_DPROC_TRAP *trap)
34968 +{
34969 +    declare_spaces (spaces, str);
34970 +
34971 +    elan4_debugf (type, mode, "%s status %llx - %s\n", str,
34972 +                 trap->tr_status, DProcTrapNames[DPROC_TrapType(trap->tr_status)]);
34973 +
34974 +    elan4_debugf (type, mode, "%s DESC %016llx %016llx %016llx %016llx\n", spaces, trap->tr_desc.dma_typeSize, 
34975 +                 trap->tr_desc.dma_cookie, trap->tr_desc.dma_vproc, trap->tr_desc.dma_srcAddr);
34976 +    elan4_debugf (type, mode, "%s      %016llx %016llx %016llx\n", spaces, trap->tr_desc.dma_dstAddr, 
34977 +                 trap->tr_desc.dma_srcEvent, trap->tr_desc.dma_dstEvent);
34978 +
34979 +    if (DPROC_PrefetcherFault (trap->tr_status))
34980 +       elan4_display_farea (type, mode, spaces, &trap->tr_prefetchFault);
34981 +}
34982 +
34983 +void
34984 +elan4_display_tproc_trap (void *type, int mode, char *str, ELAN4_TPROC_TRAP *trap)
34985 +{
34986 +    register int i;
34987 +    declare_spaces (spaces, str);
34988 +
34989 +    elan4_debugf (type, mode, "%s PC=%016llx nPC=%016llx State=%016llx Status=%016llx -%s%s%s%s\n", str,
34990 +                 trap->tr_pc, trap->tr_npc, trap->tr_state, trap->tr_status, 
34991 +                 (trap->tr_state & TS_TrapForTooManyInstructions) ? " TrapForTooManyInstructions" : "",
34992 +                 (trap->tr_state & TS_Unimplemented)              ? " Unimplemented"              : "",
34993 +                 (trap->tr_state & TS_DataAlignmentError)         ? " DataAlignmentError"         : "",
34994 +                 (trap->tr_state & TS_InstAccessException)        ? " InstAccessException"        : "",
34995 +                 (trap->tr_state & TS_DataAccessException)        ? " DataAlignmentError"         : "");
34996 +    
34997 +    for (i = 0; i < 64; i += 4)
34998 +       elan4_debugf (type, mode, "%s r%d - %016llx %016llx %016llx %016llx\n", spaces, i,
34999 +                     trap->tr_regs[i], trap->tr_regs[i+1], trap->tr_regs[i+2], trap->tr_regs[i+3]);
35000 +    
35001 +    if (trap->tr_state & TS_InstAccessException)
35002 +    {
35003 +       declare_prefix (prefix, spaces, "Inst");
35004 +
35005 +       elan4_display_farea (type, mode, prefix, &trap->tr_instFault);
35006 +    }
35007 +
35008 +    if (trap->tr_state & TS_DataAccessException)
35009 +    {
35010 +       declare_prefix (prefix, spaces, "Data");
35011 +       elan4_display_farea (type, mode, prefix, &trap->tr_dataFault);
35012 +    }
35013 +}
35014 +
35015 +void
35016 +elan4_display_iproc_trap (void *type, int mode, char *str, ELAN4_IPROC_TRAP *trap)
35017 +{
35018 +    register int i;
35019 +    declare_spaces (spaces, str);
35020 +
35021 +    for (i = 0; i < trap->tr_numTransactions; i++)
35022 +    {
35023 +       E4_IprocTrapHeader *hdrp    = &trap->tr_transactions[i];
35024 +       E4_uint64           status  = hdrp->IProcStatusCntxAndTrType;
35025 +       E4_Addr             addr    = hdrp->TrAddr;
35026 +       char               *typeString;
35027 +       char                buffer[256];
35028 +       char               *ptr = buffer;
35029 +       
35030 +       if (IPROC_EOPTrap(status))
35031 +       {
35032 +           switch (IPROC_EOPType(status))
35033 +           {
35034 +           case EOP_GOOD:        typeString = "EopGood";   break;
35035 +           case EOP_BADACK:      typeString = "EopBadAck"; break;
35036 +           case EOP_ERROR_RESET: typeString = "EopReset";  break;
35037 +           default:              typeString = "EopBad";    break;
35038 +           }
35039 +           
35040 +           ptr += sprintf (ptr, "%15s Cntx=%-6d", typeString, IPROC_NetworkContext(status));
35041 +       }
35042 +       else
35043 +       {
35044 +           if (IPROC_BadLength(status))
35045 +               typeString = "BadLength";
35046 +           else if (IPROC_TransCRCStatus(status) == CRC_STATUS_DISCARD)
35047 +               typeString = "DiscardCrc";
35048 +           else if (IPROC_TransCRCStatus(status) == CRC_STATUS_ERROR)
35049 +               typeString = "ErrorCrc Remote Network error";
35050 +           else if (IPROC_TransCRCStatus(status) == CRC_STATUS_BAD)
35051 +               typeString = "BadCrc Cable error into this node.";
35052 +           else
35053 +           {
35054 +               if ((IPROC_TransactionType(status) & TR_BLOCK_OPCODE_MASK) == TR_WRITEBLOCK)
35055 +                   typeString = "WriteBlock";
35056 +               else
35057 +               {
35058 +                   switch (IPROC_TransactionType(status) & TR_OPCODE_MASK)
35059 +                   {
35060 +                   case TR_SETEVENT_IDENTIFY & TR_OPCODE_MASK: typeString = "SetEvent";        break;
35061 +                   case TR_REMOTEDMA & TR_OPCODE_MASK:         typeString = "RemoteDma";       break;
35062 +                   case TR_SENDDISCARD & TR_OPCODE_MASK:       typeString = "SendDiscard";     break;
35063 +                   case TR_GTE & TR_OPCODE_MASK:               typeString = "GTE";             break;
35064 +                   case TR_LT & TR_OPCODE_MASK:                typeString = "LT";              break;
35065 +                   case TR_EQ & TR_OPCODE_MASK:                typeString = "EQ";              break;
35066 +                   case TR_NEQ & TR_OPCODE_MASK:               typeString = "NEQ";             break;
35067 +                   case TR_IDENTIFY & TR_OPCODE_MASK:          typeString = "Idenfity";        break;
35068 +                   case TR_ADDWORD & TR_OPCODE_MASK:           typeString = "AddWord";         break;
35069 +                   case TR_INPUT_Q_COMMIT & TR_OPCODE_MASK:    typeString = "InputQCommit";    break;
35070 +                   case TR_TESTANDWRITE & TR_OPCODE_MASK:      typeString = "TestAndWrite";    break;
35071 +                   case TR_INPUT_Q_GETINDEX & TR_OPCODE_MASK:  typeString = "InputQGetIndex";  break;
35072 +                   case TR_TRACEROUTE_TRANS & TR_OPCODE_MASK:  typeString = "TraceRoute";      break;
35073 +                   default:                                    typeString = "Unknown";         break;
35074 +                   }
35075 +               }
35076 +           }
35077 +
35078 +           ptr += sprintf (ptr, "%15s Cntx=%-6d Addr=%016llx", typeString, IPROC_NetworkContext(status), (unsigned long long) addr);
35079 +       }
35080 +       
35081 +       
35082 +       if (IPROC_TrapValue(status) != InputNoFault)
35083 +       {
35084 +           ptr += sprintf (ptr, " TrType=%2d ChanTrapped=%x GoodAck=%x BadAck=%x InputterChan=%d", IPROC_TrapValue(status),
35085 +                           IPROC_ChannelTrapped(status), IPROC_GoodAckSent(status), IPROC_BadAckSent(status),
35086 +                           IPROC_InputterChan(status));
35087 +           if (IPROC_EOPTrap(status))
35088 +               ptr += sprintf (ptr, " EOPType=%d", IPROC_EOPType(status));
35089 +           else
35090 +               ptr += sprintf (ptr, " %s%s%s%s", 
35091 +                               IPROC_FirstTrans(status) ? " FirstTrans" : "",
35092 +                               IPROC_LastTrans(status) ? " LastTrans" : "",
35093 +                               (IPROC_TransactionType(status) & TR_WAIT_FOR_EOP) ? " WaitForEop" : "",
35094 +                               (IPROC_GoodAckSent(status) &  (1 << IPROC_Channel(status))) ? " AckSent" : "");
35095 +       }
35096 +       
35097 +       elan4_debugf (type, mode, "%s %s\n", str, buffer);
35098 +
35099 +       str = spaces;
35100 +    }
35101 +
35102 +    elan4_display_farea (type, mode, spaces, &trap->tr_faultarea);
35103 +}
35104 +
35105 +#define elan4_sdram_copy_faultarea(dev, unit, farea) \
35106 +    elan4_sdram_copyq_from_sdram ((dev), (dev)->dev_faultarea + (unit) * sizeof (E4_FaultSave), (E4_uint64 *) farea, sizeof (E4_FaultSave));
35107 +
35108 +void
35109 +elan4_extract_eproc_trap (ELAN4_DEV *dev, E4_uint64 status, ELAN4_EPROC_TRAP *trap, int iswaitevent)
35110 +{
35111 +    /* only one of the memory ports can fault at a time */
35112 +    ASSERT (EPROC_TrapType(status) != EventProcMemoryFault || (EPROC_Port0Fault(status) ^ EPROC_Port1Fault(status)) == 1);
35113 +
35114 +    trap->tr_status = status;
35115 +    
35116 +    if (EPROC_Port0Fault(status))
35117 +       elan4_sdram_copy_faultarea (dev, CUN_EventProc0, &trap->tr_faultarea);
35118 +    if (EPROC_Port1Fault(status))
35119 +       elan4_sdram_copy_faultarea (dev, CUN_EventProc1, &trap->tr_faultarea);
35120 +
35121 +    if (iswaitevent)
35122 +    {
35123 +       /*
35124 +        * for waitevents the Event address is always taken from the command processor
35125 +        * 
35126 +        * if we trapped during the copy then we take the "Event" from the event processor
35127 +        * since we need to complete the copy.  Otherwise we'll be reissuing the original
35128 +        * command again
35129 +        */
35130 +       E4_uint32 fsr = FaultSaveFSR(trap->tr_faultarea.FSRAndFaultContext);
35131 +
35132 +       trap->tr_eventaddr = read_reg64 (dev, CommandHold) ^ WAIT_EVENT_CMD;
35133 +
35134 +       if (EPROC_TrapType(trap->tr_status) == EventProcMemoryFault && 
35135 +           (AT_Perm(fsr) == AT_PermLocalDataRead || AT_Perm(fsr) == AT_PermLocalDataWrite))
35136 +       {
35137 +           trap->tr_event.ev_CountAndType = read_reg64 (dev, EventCountAndType);
35138 +           trap->tr_event.ev_Params[0]    = read_reg64 (dev, EventParameters[0]);
35139 +           trap->tr_event.ev_Params[1]    = read_reg64 (dev, EventParameters[1]);
35140 +       }
35141 +       else
35142 +       {
35143 +           trap->tr_event.ev_Params[0]    = read_reg64 (dev, CommandCopy[5]);
35144 +           trap->tr_event.ev_CountAndType = read_reg64 (dev, CommandCopy[4]);
35145 +           trap->tr_event.ev_Params[1]    = read_reg64 (dev, CommandCopy[6]);
35146 +
35147 +       }
35148 +    }
35149 +    else
35150 +    {
35151 +       trap->tr_eventaddr             = read_reg64 (dev, EventAddress);
35152 +       trap->tr_event.ev_CountAndType = read_reg64 (dev, EventCountAndType);
35153 +       trap->tr_event.ev_Params[0]    = read_reg64 (dev, EventParameters[0]);
35154 +       trap->tr_event.ev_Params[1]    = read_reg64 (dev, EventParameters[1]);
35155 +    }
35156 +
35157 +    BumpDevStat (dev, s_eproc_trap_types[EPROC_TrapType(status)]);
35158 +}
35159 +
35160 +int 
35161 +cproc_open_extract_vp (ELAN4_DEV *dev, ELAN4_CQ *cq)
35162 +{
35163 +       /* cq = ucq->ucq_cq */
35164 +       if ((cq->cq_perm & CQ_STENEnableBit) != 0)
35165 +       {
35166 +            sdramaddr_t   cqdesc       = dev->dev_cqaddr + (elan4_cq2num(cq) * sizeof (E4_CommandQueueDesc));
35167 +           E4_uint64     queuePtrs    = elan4_sdram_readq (dev, cqdesc + offsetof (E4_CommandQueueDesc, CQ_QueuePtrs));
35168 +           sdramaddr_t   insertPtr    = (queuePtrs & CQ_PtrMask);
35169 +           sdramaddr_t   commandPtr   = CQ_CompletedPtr (queuePtrs);
35170 +           unsigned int  cqSize       = CQ_Size ((queuePtrs >> CQ_SizeShift) & CQ_SizeMask);
35171 +           E4_uint64     openCommand  = 0;
35172 +
35173 +           if (dev->dev_devinfo.dev_revision_id != PCI_REVISION_ID_ELAN4_REVA && (queuePtrs & CQ_RevB_ReorderingQueue))
35174 +           {
35175 +               E4_uint32 oooMask = elan4_sdram_readl (dev, cqdesc + offsetof (E4_CommandQueueDesc, CQ_HoldingValue));
35176 +
35177 +               for (; (oooMask & 1) != 0; oooMask >>= 1)
35178 +                   insertPtr = (insertPtr & ~(cqSize-1)) | ((insertPtr + sizeof (E4_uint64)) & (cqSize-1));
35179 +           }
35180 +
35181 +           while (commandPtr != insertPtr)
35182 +           {
35183 +               E4_uint64    command = elan4_sdram_readq (dev, commandPtr);
35184 +               unsigned int cmdSize;
35185 +
35186 +                switch (__categorise_command (command, &cmdSize))
35187 +               {
35188 +               case 0:
35189 +                   (void) __whole_command (&commandPtr, insertPtr, cqSize, cmdSize);
35190 +                   break;
35191 +
35192 +               case 1: /* open */
35193 +                   return (command >> 32);
35194 +                           
35195 +                   break; /* Not reached */
35196 +
35197 +               case 2:
35198 +                   if (openCommand == 0)
35199 +                       (void) __whole_command (&commandPtr, insertPtr, cqSize, cmdSize);
35200 +                   /* Else we should have stopped by now */
35201 +                   else ASSERT(1==2);
35202 +               case 3:
35203 +                   printk ("cproc_open_extract_vp: invalid command %llx\n", command);
35204 +                   return -1;
35205 +               }
35206 +           } /* while */
35207 +       }
35208 +
35209 +       return -1;
35210 +}
35211 +
35212 +void
35213 +elan4_extract_cproc_trap (ELAN4_DEV *dev, E4_uint64 status, ELAN4_CPROC_TRAP *trap, unsigned cqnum)
35214 +{
35215 +    /* extract the state from the device */
35216 +    elan4_sdram_copy_faultarea (dev, CUN_CommandProc, &trap->tr_faultarea);
35217 +
35218 +    trap->tr_status  = status;
35219 +    trap->tr_command = read_reg64 (dev, CommandHold);
35220 +    
35221 +    elan4_sdram_copyq_from_sdram (dev, dev->dev_cqaddr + (cqnum * sizeof (E4_CommandQueueDesc)), &trap->tr_qdesc, sizeof (E4_CommandQueueDesc));
35222 +
35223 +    if (CPROC_TrapType (status) == CommandProcWaitTrap)
35224 +       elan4_extract_eproc_trap (dev, read_reg64 (dev, EProcStatus), &trap->tr_eventtrap, 1);
35225 +
35226 +    BumpDevStat (dev, s_cproc_trap_types[CPROC_TrapType(status)]);
35227 +
35228 +    if (PackValue(trap->tr_qdesc.CQ_AckBuffers, 0) == PackTimeout || PackValue(trap->tr_qdesc.CQ_AckBuffers, 1) == PackTimeout)
35229 +       BumpDevStat (dev, s_cproc_timeout);
35230 +}
35231 +
35232 +void
35233 +elan4_extract_dproc_trap (ELAN4_DEV *dev, E4_uint64 status, ELAN4_DPROC_TRAP *trap, unsigned unit)
35234 +{
35235 +    trap->tr_status = status;
35236 +    
35237 +    if (unit == 0)
35238 +    {
35239 +       trap->tr_desc.dma_typeSize   = read_reg64 (dev, Dma0Desc.dma_typeSize);
35240 +       trap->tr_desc.dma_cookie     = read_reg64 (dev, Dma0Desc.dma_cookie);
35241 +       trap->tr_desc.dma_vproc      = read_reg64 (dev, Dma0Desc.dma_vproc);
35242 +       trap->tr_desc.dma_srcAddr    = read_reg64 (dev, Dma0Desc.dma_srcAddr);
35243 +       trap->tr_desc.dma_dstAddr    = read_reg64 (dev, Dma0Desc.dma_dstAddr);
35244 +       trap->tr_desc.dma_srcEvent   = read_reg64 (dev, Dma0Desc.dma_srcEvent);
35245 +       trap->tr_desc.dma_dstEvent   = read_reg64 (dev, Dma0Desc.dma_dstEvent);
35246 +       
35247 +       elan4_sdram_copy_faultarea (dev, CUN_DProcPA0, &trap->tr_packAssemFault);
35248 +    }
35249 +    else
35250 +    {
35251 +       trap->tr_desc.dma_typeSize   = read_reg64 (dev, Dma1Desc.dma_typeSize);
35252 +       trap->tr_desc.dma_cookie     = read_reg64 (dev, Dma1Desc.dma_cookie);
35253 +       trap->tr_desc.dma_vproc      = read_reg64 (dev, Dma1Desc.dma_vproc);
35254 +       trap->tr_desc.dma_srcAddr    = read_reg64 (dev, Dma1Desc.dma_srcAddr);
35255 +       trap->tr_desc.dma_dstAddr    = read_reg64 (dev, Dma1Desc.dma_dstAddr);
35256 +       trap->tr_desc.dma_srcEvent   = read_reg64 (dev, Dma1Desc.dma_srcEvent);
35257 +       trap->tr_desc.dma_dstEvent   = read_reg64 (dev, Dma1Desc.dma_dstEvent);
35258 +       
35259 +       elan4_sdram_copy_faultarea (dev, CUN_DProcPA1, &trap->tr_packAssemFault);
35260 +    }
35261 +    
35262 +    if (DPROC_PrefetcherFault (trap->tr_status))
35263 +       elan4_sdram_copy_faultarea (dev, (CUN_DProcData0 | DPROC_FaultUnitNo(trap->tr_status)), &trap->tr_prefetchFault);
35264 +
35265 +    if (DPROC_PacketTimeout (trap->tr_status))
35266 +       BumpDevStat (dev, s_dproc_timeout);
35267 +
35268 +    BumpDevStat (dev, s_dproc_trap_types[DPROC_TrapType(status)]);
35269 +}    
35270 +
35271 +void
35272 +elan4_extract_tproc_trap (ELAN4_DEV *dev, E4_uint64 status, ELAN4_TPROC_TRAP *trap)
35273 +{
35274 +    int i;
35275 +
35276 +    trap->tr_status = status;
35277 +    trap->tr_state  = read_reg64 (dev, Thread_Trap_State);
35278 +    trap->tr_pc     = read_reg64 (dev, PC_W);
35279 +    trap->tr_npc    = read_reg64 (dev, nPC_W);
35280 +    trap->tr_dirty  = read_reg64 (dev, DirtyBits);
35281 +    trap->tr_bad    = read_reg64 (dev, BadBits);
35282 +
35283 +#ifdef CONFIG_MPSAS
35284 +    if (sas_copyfrom_dev (dev->dev_osdep.pdev, ELAN4_BAR_REGISTERS, 
35285 +                         ((dev->dev_devinfo.dev_revision_id == PCI_REVISION_ID_ELAN4_REVA) ? ELAN4_REVA_REG_OFFSET : ELAN4_REVB_REG_OFFSET) +
35286 +                         offsetof (E4_Registers, Regs.TProcRegs), (unsigned long) &trap->tr_regs, 64*sizeof (E4_uint64)) < 0)
35287 +    {
35288 +       for (i = 0; i < 64; i++)
35289 +           if (trap->tr_dirty & ((E4_uint64) 1 << i))
35290 +               trap->tr_regs[i] = read_reg64 (dev, TProcRegs[i]);
35291 +    }
35292 +
35293 +    for (i = 0; i < 64; i++)
35294 +       if (! (trap->tr_dirty & ((E4_uint64) 1 << i)))
35295 +           trap->tr_regs[i] = 0xdeadbabedeadbabeULL;
35296 +#else
35297 +    for (i = 0; i < 64; i++)
35298 +    {
35299 +       if (trap->tr_dirty & ((E4_uint64) 1 << i))
35300 +           trap->tr_regs[i] = read_reg64 (dev, TProcRegs[i]);
35301 +       else
35302 +           trap->tr_regs[i] = 0xdeadbabedeadbabeULL;
35303 +    }
35304 +#endif
35305 +    
35306 +    if (trap->tr_state & TS_DataAccessException)
35307 +       elan4_sdram_copy_faultarea (dev, CUN_TProcData0 | TS_DataPortNo (trap->tr_state), &trap->tr_dataFault);
35308 +
35309 +    if (trap->tr_state & TS_InstAccessException)
35310 +       elan4_sdram_copy_faultarea (dev, CUN_TProcInst, &trap->tr_instFault);
35311 +
35312 +    for (i = 0; i < 7; i++)
35313 +       if (trap->tr_state & (1 << i))
35314 +           BumpDevStat (dev, s_tproc_trap_types[i]);
35315 +}
35316 +
35317 +void
35318 +elan4_extract_iproc_trap (ELAN4_DEV *dev, E4_uint64 status, ELAN4_IPROC_TRAP *trap, unsigned unit)
35319 +{
35320 +    sdramaddr_t hdroff  = dev->dev_inputtraparea + offsetof (E4_IprocTrapState, TrHeader[0][unit]);
35321 +    sdramaddr_t dataoff = dev->dev_inputtraparea + offsetof (E4_IprocTrapState, TrData[0][unit]);
35322 +    register int i, j;
35323 +    int                  CurrUnitNo    = (unit >= 2) ? CUN_IProcHighPri : CUN_IProcLowPri;
35324 +    sdramaddr_t CurrFaultArea = dev->dev_faultarea + (CurrUnitNo * sizeof (E4_FaultSave));
35325 +
35326 +    /* Finally copy the fault area */
35327 +    elan4_sdram_copy_faultarea (dev, CurrUnitNo, &trap->tr_faultarea);
35328 +
35329 +    /*
35330 +     * Clear out the fault save area after reading to allow a fault on the write of the back pointer of
35331 +     * an InputQCommit to be obsurved if a simultaneous event proc trap occurs.
35332 +     */
35333 +    elan4_sdram_writeq (dev, CurrFaultArea + offsetof(E4_FaultSave, FSRAndFaultContext), 0x0ULL);
35334 +    elan4_sdram_writeq (dev, CurrFaultArea + offsetof(E4_FaultSave, FaultAddress), 0x0ULL);
35335 +
35336 +    /* copy the transaction headers */
35337 +    trap->tr_transactions[0].IProcStatusCntxAndTrType = status;
35338 +    trap->tr_transactions[0].TrAddr                   = elan4_sdram_readq (dev, hdroff + offsetof (E4_IprocTrapHeader, TrAddr));
35339 +    
35340 +    for (i = 0; !IPROC_EOPTrap(trap->tr_transactions[i].IProcStatusCntxAndTrType);)
35341 +    {
35342 +       if (IPROC_BadLength (trap->tr_transactions[i].IProcStatusCntxAndTrType))
35343 +           BumpDevStat (dev, s_bad_length);
35344 +       else if (IPROC_TransCRCStatus (trap->tr_transactions[i].IProcStatusCntxAndTrType) == CRC_STATUS_BAD)
35345 +           BumpDevStat (dev, s_crc_bad);
35346 +       else if (IPROC_TransCRCStatus (trap->tr_transactions[i].IProcStatusCntxAndTrType) == CRC_STATUS_ERROR)
35347 +           BumpDevStat (dev, s_crc_error);
35348 +
35349 +       BumpDevStat (dev, s_iproc_trap_types[IPROC_TrapValue (trap->tr_transactions[i].IProcStatusCntxAndTrType)]);
35350 +
35351 +       hdroff += NO_OF_INPUT_CHANNELS*sizeof (E4_IprocTrapHeader);
35352 +
35353 +       if (++i == MAX_TRAPPED_TRANS)
35354 +           break;
35355 +
35356 +       elan4_sdram_copyq_from_sdram (dev, hdroff, &trap->tr_transactions[i], sizeof (E4_IprocTrapHeader));
35357 +    }
35358 +    
35359 +    if (IPROC_EOPType (trap->tr_transactions[i].IProcStatusCntxAndTrType) == EOP_ERROR_RESET)
35360 +       BumpDevStat (dev, s_eop_reset);
35361 +
35362 +    /* Remember the number of transactions we've copied */
35363 +    trap->tr_numTransactions = i + 1;
35364 +    
35365 +    /* Copy all the data blocks in one go */
35366 +    for (i = 0; i < MIN (trap->tr_numTransactions, MAX_TRAPPED_TRANS); i++, dataoff += NO_OF_INPUT_CHANNELS*sizeof (E4_IprocTrapData))
35367 +    {
35368 +       if (IPROC_BadLength(status) || IPROC_TransCRCStatus (status) != CRC_STATUS_GOOD)
35369 +           elan4_sdram_copyq_from_sdram (dev, dataoff, trap->tr_dataBuffers[i].Data, TRANS_DATA_DWORDS*sizeof(E4_uint64));
35370 +       else
35371 +       {
35372 +           int trtype  = IPROC_TransactionType(trap->tr_transactions[i].IProcStatusCntxAndTrType);
35373 +           int ndwords = (trtype & TR_SIZE_MASK) >> TR_SIZE_SHIFT;
35374 +
35375 +           elan4_sdram_copyq_from_sdram (dev, dataoff, trap->tr_dataBuffers[i].Data, ndwords*sizeof(E4_uint64));
35376 +
35377 +           for (j = ndwords; j < TRANS_DATA_DWORDS; j++)
35378 +               trap->tr_dataBuffers[i].Data[j] = 0xbeec0f212345678ull;
35379 +       }
35380 +    }
35381 +    
35382 +}
35383 +
35384 +void
35385 +elan4_inspect_iproc_trap (ELAN4_IPROC_TRAP *trap)
35386 +{
35387 +    int i;
35388 +
35389 +    trap->tr_flags          = 0;
35390 +    trap->tr_trappedTrans    = TR_TRANS_INVALID;
35391 +    trap->tr_waitForEopTrans = TR_TRANS_INVALID;
35392 +    trap->tr_identifyTrans   = TR_TRANS_INVALID;
35393 +
35394 +    if (trap->tr_numTransactions > MAX_TRAPPED_TRANS)
35395 +       trap->tr_flags = TR_FLAG_TOOMANY_TRANS;
35396 +
35397 +    /*
35398 +     * Now scan all the transactions received 
35399 +     */
35400 +    for (i = 0; i < MIN(trap->tr_numTransactions, MAX_TRAPPED_TRANS) ; i++)
35401 +    {
35402 +       E4_IprocTrapHeader *hdrp   = &trap->tr_transactions[i];
35403 +       E4_uint64           status = hdrp->IProcStatusCntxAndTrType;
35404 +
35405 +       if (trap->tr_identifyTrans == TR_TRANS_INVALID)
35406 +       {
35407 +           switch (IPROC_TransactionType (status) & (TR_OPCODE_MASK | TR_SIZE_MASK))
35408 +           {
35409 +           case TR_IDENTIFY          & (TR_OPCODE_MASK | TR_SIZE_MASK):
35410 +           case TR_REMOTEDMA         & (TR_OPCODE_MASK | TR_SIZE_MASK):
35411 +           case TR_SETEVENT_IDENTIFY & (TR_OPCODE_MASK | TR_SIZE_MASK):
35412 +           case TR_INPUT_Q_COMMIT    & (TR_OPCODE_MASK | TR_SIZE_MASK):
35413 +           case TR_ADDWORD           & (TR_OPCODE_MASK | TR_SIZE_MASK):
35414 +           case TR_TESTANDWRITE      & (TR_OPCODE_MASK | TR_SIZE_MASK):
35415 +               trap->tr_identifyTrans = i;
35416 +               break;
35417 +           }
35418 +       }
35419 +
35420 +       if (IPROC_TrapValue(status) == InputNoFault)            /* We're looking at transactions stored before the trap */
35421 +           continue;                                           /* these should only be identifies */
35422 +       
35423 +       if (trap->tr_trappedTrans == TR_TRANS_INVALID)          /* Remember the transaction which caused the */
35424 +           trap->tr_trappedTrans = i;                          /* trap */
35425 +
35426 +       if (IPROC_GoodAckSent (status) & (1 << IPROC_InputterChan (status)))
35427 +           trap->tr_flags |= TR_FLAG_ACK_SENT;
35428 +           
35429 +       if (IPROC_EOPTrap(status))                              /* Check for EOP */
35430 +       {
35431 +           ASSERT (i == trap->tr_numTransactions - 1);
35432 +
35433 +           switch (IPROC_EOPType(status))
35434 +           {
35435 +           case EOP_GOOD:
35436 +               /* if we get an EOP_GOOD then the outputer should have received a PAckOk. */  
35437 +               /* unless it was a flood, in which case someone must have sent an ack */
35438 +               /* but not necessarily us */
35439 +               break;
35440 +
35441 +           case EOP_BADACK:
35442 +               /* if we get an EOP_BADACK then the outputer did not receive a PAckOk even if
35443 +                * we sent a PAckOk. WFlag this to ignore the AckSent. */
35444 +               trap->tr_flags |= TR_FLAG_EOP_BAD;
35445 +               break;
35446 +
35447 +           case EOP_ERROR_RESET:
35448 +               /* if we get an EOP_ERROR_RESET then the outputer may or may not have got a PAckOk. */
35449 +               trap->tr_flags |= TR_FLAG_EOP_ERROR;
35450 +               break;
35451 +
35452 +           default:
35453 +               printk ("elan4_inspect_iproc_trap: unknown eop type %d", IPROC_EOPType(status));
35454 +               BUG();
35455 +               /* NOTREACHED */
35456 +           }
35457 +           continue;
35458 +       }
35459 +       else
35460 +       {
35461 +           if (IPROC_BadLength(status) || (IPROC_TransCRCStatus (status) == CRC_STATUS_ERROR ||
35462 +                                           IPROC_TransCRCStatus (status) == CRC_STATUS_BAD))
35463 +           {
35464 +               {
35465 +                   register int j;
35466 +                   if (IPROC_BadLength(status))
35467 +                       PRINTF2 (DBG_DEVICE, DBG_INTR, "LinkError: Trapped on bad length data. status=%016llx Address=%016llx\n",
35468 +                                status, hdrp->TrAddr);
35469 +                   else
35470 +                       PRINTF2 (DBG_DEVICE, DBG_INTR, "LinkError: Trapped with bad CRC. status=%016llx Address=%016llx\n",
35471 +                                status, hdrp->TrAddr);
35472 +                   for (j = 0; j < TRANS_DATA_DWORDS; j++)
35473 +                       PRINTF2 (DBG_DEVICE, DBG_INTR, "LinkError: DataBuffers[%d] : %016llx\n", j, trap->tr_dataBuffers[i].Data[j]);
35474 +               }
35475 +
35476 +               trap->tr_flags |= TR_FLAG_BAD_TRANS;
35477 +               continue;
35478 +           }
35479 +           
35480 +           if (IPROC_TransCRCStatus (status) == CRC_STATUS_DISCARD)
35481 +               continue;
35482 +
35483 +           if ((((IPROC_TransactionType(status) & TR_BLOCK_OPCODE_MASK) == TR_WRITEBLOCK) ||
35484 +                (IPROC_TransactionType(status) == TR_TRACEROUTE_TRANS)) &&
35485 +               (trap->tr_flags & TR_FLAG_ACK_SENT) && trap->tr_identifyTrans == TR_TRANS_INVALID)
35486 +           {
35487 +               /* 
35488 +                * Writeblock after the ack is sent without an identify transaction - this is 
35489 +                * considered to be a DMA packet and requires the next packet to be nacked - since 
35490 +                * the DMA processor will send this in a deterministic time and there's an upper 
35491 +                * limit on the network latency (the output timeout) we just need to hold the context 
35492 +                * filter up for a while.
35493 +                */
35494 +               trap->tr_flags |= TR_FLAG_DMA_PACKET;
35495 +           }
35496 +           
35497 +           if (IPROC_LastTrans(status) && (IPROC_TransactionType(status) & TR_WAIT_FOR_EOP))
35498 +           {
35499 +               /*
35500 +                * WaitForEop transactions - if we have to do network error fixup
35501 +                * then we may need to execute/ignore this transaction dependant
35502 +                * on whether the source will be resending it.
35503 +                */
35504 +               trap->tr_waitForEopTrans = i;
35505 +           }
35506 +
35507 +           /*
35508 +            * This is a special case caused by a minor input processor bug.
35509 +            * If simultaneous InputMemoryFault and InputEventEngineTrapped occur then the chip will probably return
35510 +            * InputEventEngineTrapped even though the write of the back pointer has not occured and must be done by
35511 +            * the trap handler.
35512 +            * In this case the fault address will equal q->q_bptr. If there has been only EventEngineTrap then the
35513 +            * the fault address should be zero as the trap handler now always zeros this after every input trap.
35514 +            */
35515 +           if ((IPROC_TransactionType (status) & TR_OPCODE_MASK) == (TR_INPUT_Q_COMMIT & TR_OPCODE_MASK) &&
35516 +               trap->tr_faultarea.FaultAddress == hdrp->TrAddr + offsetof(E4_InputQueue, q_bptr) &&
35517 +               IPROC_TrapValue(status) == InputEventEngineTrapped)
35518 +           {
35519 +               hdrp->IProcStatusCntxAndTrType = (status & 0xFFFFFFF0FFFFFFFFull) | ((E4_uint64) InputMemoryFault << 32);
35520 +           }
35521 +       }
35522 +
35523 +       PRINTF (DBG_DEVICE, DBG_INTR, "inspect[%d] status=%llx TrapValue=%d -> flags %x\n", i, status, IPROC_TrapValue(status), trap->tr_flags);
35524 +    }
35525 +}
35526 +
35527 +E4_uint64
35528 +elan4_trapped_open_command (ELAN4_DEV *dev, ELAN4_CQ *cq)
35529 +{
35530 +    sdramaddr_t cqdesc     = dev->dev_cqaddr + elan4_cq2num(cq) * sizeof (E4_CommandQueueDesc);
35531 +    E4_uint64   cqcontrol  = elan4_sdram_readq (dev, cqdesc + offsetof (E4_CommandQueueDesc, CQ_Control));
35532 +    E4_uint32   extractOff = CQ_ExtractPtr (cqcontrol) & (CQ_Size(cq->cq_size)-1);
35533 +    
35534 +    if (extractOff == 0)
35535 +       extractOff = CQ_Size(cq->cq_size) - sizeof (E4_uint64);
35536 +    else
35537 +       extractOff -= sizeof (E4_uint64);
35538 +
35539 +    return (elan4_sdram_readq (dev, cq->cq_space + extractOff));
35540 +}
35541 +
35542 +EXPORT_SYMBOL(elan4_extract_eproc_trap);
35543 +EXPORT_SYMBOL(elan4_display_eproc_trap);
35544 +EXPORT_SYMBOL(elan4_extract_cproc_trap);
35545 +EXPORT_SYMBOL(elan4_display_cproc_trap);
35546 +EXPORT_SYMBOL(elan4_extract_dproc_trap);
35547 +EXPORT_SYMBOL(elan4_display_dproc_trap);
35548 +EXPORT_SYMBOL(elan4_extract_tproc_trap);
35549 +EXPORT_SYMBOL(elan4_display_tproc_trap);
35550 +EXPORT_SYMBOL(elan4_extract_iproc_trap);
35551 +EXPORT_SYMBOL(elan4_inspect_iproc_trap);
35552 +EXPORT_SYMBOL(elan4_display_iproc_trap);
35553 +
35554 +
35555 +/*
35556 + * Local variables:
35557 + * c-file-style: "stroustrup"
35558 + * End:
35559 + */
35560 Index: linux-2.4.21/drivers/net/qsnet/elan4/user.c
35561 ===================================================================
35562 --- linux-2.4.21.orig/drivers/net/qsnet/elan4/user.c    2004-02-23 16:02:56.000000000 -0500
35563 +++ linux-2.4.21/drivers/net/qsnet/elan4/user.c 2005-06-01 23:12:54.624435304 -0400
35564 @@ -0,0 +1,3352 @@
35565 +/*
35566 + *    Copyright (c) 2001-2002 by Quadrics Supercomputers World Ltd.
35567 + *    Copyright (c) 2002-2003 by Quadrics Ltd.
35568 + * 
35569 + *    For licensing information please see the supplied COPYING file
35570 + *
35571 + */
35572 +
35573 +#ident "@(#)$Id: user.c,v 1.68.2.9 2004/12/20 16:56:51 mike Exp $"
35574 +/*      $Source: /cvs/master/quadrics/elan4mod/user.c,v $*/
35575 +
35576 +#include <qsnet/kernel.h>
35577 +#include <qsnet/kpte.h>
35578 +
35579 +#include <elan/elanmod.h>
35580 +#include <elan4/debug.h>
35581 +#include <elan4/device.h>
35582 +#include <elan4/user.h>
35583 +
35584 +#include <elan4/trtype.h>
35585 +#include <elan4/commands.h>
35586 +
35587 +#include <stdarg.h>
35588 +
35589 +/* allow this code to compile against an Eagle elanmod */
35590 +#ifdef __ELANMOD_DEVICE_H
35591 +#define elan_attach_cap(cap,rnum,args,func)    elanmod_attach_cap(cap,args,func)
35592 +#define elan_detach_cap(cap,rnum)              elanmod_detach_cap(cap)
35593 +#endif
35594 +
35595 +#define NETERR_MSGS    16
35596 +
35597 +int user_p2p_route_options   = FIRST_TIMEOUT(3);
35598 +int user_bcast_route_options = FIRST_TIMEOUT(3);
35599 +int user_dproc_retry_count   = 15;
35600 +int user_cproc_retry_count   = 2;
35601 +
35602 +int num_fault_save           = 30;
35603 +int min_fault_pages          = 1;
35604 +int max_fault_pages          = 128;
35605 +
35606 +static int
35607 +user_validate_cap (USER_CTXT *uctx, ELAN_CAPABILITY *cap, unsigned use)
35608 +{
35609 +    /* Don't allow a user process to attach to system context */
35610 +    if (ELAN4_SYSTEM_CONTEXT (cap->cap_lowcontext) || ELAN4_SYSTEM_CONTEXT (cap->cap_highcontext))
35611 +    {
35612 +       PRINTF3 (DBG_DEVICE, DBG_VP,"user_validate_cap: lctx %x hctx %x high %x\n", cap->cap_lowcontext, cap->cap_highcontext, ELAN4_KCOMM_BASE_CONTEXT_NUM);
35613 +       PRINTF0 (DBG_DEVICE, DBG_VP,"user_validate_cap: user process cant attach to system cap\n");
35614 +       return (EINVAL);
35615 +    }
35616 +    
35617 +    return elanmod_classify_cap(&uctx->uctx_position, cap, use);
35618 +}
35619 +
35620 +static __inline__ void
35621 +__user_signal_trap (USER_CTXT *uctx)
35622 +{
35623 +    switch (uctx->uctx_trap_state)
35624 +    {
35625 +    case UCTX_TRAP_IDLE:
35626 +       PRINTF (uctx, DBG_TRAP, "user_signal_trap: deliver signal %d to pid %d\n", uctx->uctx_trap_signo, uctx->uctx_trap_pid);
35627 +
35628 +       if (uctx->uctx_trap_signo)
35629 +           kill_proc (uctx->uctx_trap_pid, uctx->uctx_trap_signo, 1);
35630 +       break;
35631 +
35632 +    case UCTX_TRAP_SLEEPING:
35633 +       PRINTF (uctx, DBG_TRAP, "user_signal_trap: wakeup sleeping trap handler\n");
35634 +
35635 +       kcondvar_wakeupone (&uctx->uctx_wait, &uctx->uctx_spinlock);
35636 +       break;
35637 +    }
35638 +    uctx->uctx_trap_state = UCTX_TRAP_SIGNALLED;
35639 +}
35640 +
35641 +static void
35642 +user_signal_timer (unsigned long arg)
35643 +{
35644 +    USER_CTXT    *uctx = (USER_CTXT *) arg;
35645 +    unsigned long flags;
35646 +
35647 +    PRINTF (uctx, DBG_TRAP, "user_signal_timer: state=%d pid=%d signal=%d (now %d start %d)\n",
35648 +           uctx->uctx_trap_state, uctx->uctx_trap_pid, uctx->uctx_trap_signo, jiffies,
35649 +           uctx->uctx_int_start);
35650 +
35651 +    spin_lock_irqsave (&uctx->uctx_spinlock, flags);
35652 +    __user_signal_trap (uctx);
35653 +    spin_unlock_irqrestore (&uctx->uctx_spinlock, flags);
35654 +}
35655 +
35656 +#define MAX_INTS_PER_TICK      50
35657 +#define MIN_INTS_PER_TICK      20
35658 +
35659 +static void
35660 +user_signal_trap (USER_CTXT *uctx)
35661 +{
35662 +    ASSERT (SPINLOCK_HELD (&uctx->uctx_spinlock));
35663 +
35664 +    PRINTF (uctx, DBG_TRAP, "user_signal_trap: state=%d pid=%d signal=%d%s\n", uctx->uctx_trap_state,
35665 +           uctx->uctx_trap_pid, uctx->uctx_trap_signo, timer_pending(&uctx->uctx_int_timer) ? " (timer-pending)" : "");
35666 +
35667 +    uctx->uctx_int_count++;
35668 +
35669 +    if (timer_pending (&uctx->uctx_int_timer))
35670 +       return;
35671 +
35672 +    if (uctx->uctx_int_count > ((int)(jiffies - uctx->uctx_int_start) * MAX_INTS_PER_TICK))
35673 +    {
35674 +       PRINTF (uctx, DBG_TRAP, "user_signal_trap: deferring signal for %d ticks (count %d ticks %d -> %d)\n", 
35675 +               uctx->uctx_int_delay + 1, uctx->uctx_int_count, (int) (jiffies - uctx->uctx_int_start),
35676 +               ((int)(jiffies - uctx->uctx_int_start) * MAX_INTS_PER_TICK));
35677 +
35678 +       /* We're interrupting too fast, so defer this signal */
35679 +       uctx->uctx_int_timer.expires = jiffies + (++uctx->uctx_int_delay);
35680 +
35681 +       add_timer (&uctx->uctx_int_timer);
35682 +    }
35683 +    else
35684 +    {
35685 +       __user_signal_trap (uctx);
35686 +
35687 +       PRINTF (uctx, DBG_TRAP, "user_signal_trap: check signal for %d ticks (count %d ticks %d -> %d)\n", 
35688 +               uctx->uctx_int_delay + 1, uctx->uctx_int_count, (int) (jiffies - uctx->uctx_int_start),
35689 +               (int)(jiffies - uctx->uctx_int_start) * MIN_INTS_PER_TICK);
35690 +           
35691 +       if (uctx->uctx_int_count < ((int) (jiffies - uctx->uctx_int_start)) * MIN_INTS_PER_TICK)
35692 +       {
35693 +           PRINTF (uctx, DBG_TRAP, "user_signal_trap: reset interrupt throttle (count %d ticks %d)\n", 
35694 +                   uctx->uctx_int_count, (int) (jiffies - uctx->uctx_int_start));
35695 +
35696 +           uctx->uctx_int_start = jiffies;
35697 +           uctx->uctx_int_count = 0;
35698 +           uctx->uctx_int_delay = 0;
35699 +       }
35700 +    }
35701 +}
35702 +
35703 +static void
35704 +user_neterr_timer (unsigned long arg)
35705 +{
35706 +    USER_CTXT *uctx = (USER_CTXT *) arg;
35707 +    unsigned long flags;
35708 +    
35709 +    spin_lock_irqsave (&uctx->uctx_spinlock, flags);
35710 +
35711 +    uctx->uctx_status |= UCTX_NETERR_TIMER;
35712 +    
35713 +    user_signal_trap (uctx);
35714 +
35715 +    spin_unlock_irqrestore (&uctx->uctx_spinlock, flags);
35716 +}
35717 +
35718 +static void
35719 +user_flush_dma_runqueue (ELAN4_DEV *dev, USER_CTXT *uctx, int qfull)
35720 +{
35721 +    E4_uint64          qptrs = read_reg64 (dev, DProcLowPriPtrs);
35722 +    E4_uint32          qsize = E4_QueueSize (E4_QueueSizeValue (qptrs));
35723 +    E4_uint32          qfptr = E4_QueueFrontPointer (qptrs);
35724 +    E4_uint32          qbptr = E4_QueueBackPointer (qptrs);
35725 +    E4_DProcQueueEntry qentry;
35726 +
35727 +    while ((qfptr != qbptr) || qfull)
35728 +    {
35729 +       E4_uint64 typeSize = elan4_sdram_readq (dev, qfptr + offsetof (E4_DProcQueueEntry, Desc.dma_typeSize));
35730 +
35731 +       if (DMA_Context (typeSize) == uctx->uctx_ctxt.ctxt_num)
35732 +       {
35733 +           elan4_sdram_copyq_from_sdram (dev, qfptr, &qentry, sizeof (E4_DProcQueueEntry));
35734 +
35735 +           PRINTF4 (uctx, DBG_SWAP, "user_flush_dma_runqueue: %016llx %016llx %016llx %016llx\n", qentry.Desc.dma_typeSize, 
35736 +                    qentry.Desc.dma_cookie, qentry.Desc.dma_vproc, qentry.Desc.dma_srcAddr);
35737 +           PRINTF3 (uctx, DBG_SWAP, "                         %016llx %016llx %016llx\n", qentry.Desc.dma_dstAddr, 
35738 +                    qentry.Desc.dma_srcEvent, qentry.Desc.dma_dstEvent);
35739 +
35740 +           if (RING_QUEUE_REALLY_FULL (uctx->uctx_dmaQ))
35741 +               uctx->uctx_status |= UCTX_DPROC_QUEUE_OVERFLOW;
35742 +           else
35743 +           {
35744 +               *RING_QUEUE_BACK (uctx->uctx_dmaQ, uctx->uctx_dmas) = qentry.Desc;
35745 +               (void) RING_QUEUE_ADD (uctx->uctx_dmaQ);
35746 +           }
35747 +           
35748 +           qentry.Desc.dma_typeSize = DMA_ShMemWrite | dev->dev_ctxt.ctxt_num;
35749 +           qentry.Desc.dma_cookie   = 0;
35750 +           qentry.Desc.dma_vproc    = 0;
35751 +           qentry.Desc.dma_srcAddr  = 0;
35752 +           qentry.Desc.dma_dstAddr  = 0;
35753 +           qentry.Desc.dma_srcEvent = 0;
35754 +           qentry.Desc.dma_dstEvent = 0;
35755 +
35756 +           elan4_sdram_copyq_to_sdram (dev, &qentry, qfptr, sizeof (E4_DProcQueueEntry));
35757 +       }
35758 +
35759 +       qfptr = (qfptr & ~(qsize-1)) | ((qfptr + sizeof (E4_DProcQueueEntry)) & (qsize-1));
35760 +       qfull = 0;
35761 +    }
35762 +}
35763 +
35764 +static void
35765 +user_flush_thread_runqueue (ELAN4_DEV *dev, USER_CTXT *uctx, int qfull)
35766 +{
35767 +    E4_uint64          qptrs = read_reg64 (dev, TProcLowPriPtrs);
35768 +    E4_uint32          qsize = E4_QueueSize (E4_QueueSizeValue (qptrs));
35769 +    E4_uint32          qfptr = E4_QueueFrontPointer (qptrs);
35770 +    E4_uint32          qbptr = E4_QueueBackPointer (qptrs);
35771 +    E4_TProcQueueEntry qentry;
35772 +
35773 +    while ((qfptr != qbptr) || qfull)
35774 +    {
35775 +       E4_uint64 context = elan4_sdram_readq (dev, qfptr + offsetof (E4_TProcQueueEntry, Context));
35776 +
35777 +       if (TPROC_Context (context) == uctx->uctx_ctxt.ctxt_num)
35778 +       {
35779 +           elan4_sdram_copyq_from_sdram (dev, qfptr, &qentry, sizeof (E4_TProcQueueEntry));
35780 +
35781 +           PRINTF (uctx, DBG_SWAP, "user_flush_thread_runqueue: %016llx %016llx %016llx %016llx\n", qentry.Regs.Registers[0],
35782 +                   qentry.Regs.Registers[1], qentry.Regs.Registers[2], qentry.Regs.Registers[3]);
35783 +           PRINTF (uctx, DBG_SWAP, "                            %016llx %016llx %016llx\n", 
35784 +                   qentry.Regs.Registers[4], qentry.Regs.Registers[5], qentry.Regs.Registers[6]);
35785 +
35786 +           if (RING_QUEUE_REALLY_FULL (uctx->uctx_threadQ))
35787 +               uctx->uctx_status |= UCTX_TPROC_QUEUE_OVERFLOW;
35788 +           else
35789 +           {
35790 +               *RING_QUEUE_BACK (uctx->uctx_threadQ, uctx->uctx_threads) = qentry.Regs;
35791 +               (void) RING_QUEUE_ADD (uctx->uctx_threadQ);
35792 +           }
35793 +           
35794 +           /* change the thread to execute the suspend sequence */
35795 +           qentry.Regs.Registers[0] = dev->dev_tproc_suspend;
35796 +           qentry.Regs.Registers[1] = dev->dev_tproc_space;
35797 +           qentry.Context           = dev->dev_ctxt.ctxt_num;
35798 +
35799 +           elan4_sdram_copyq_to_sdram (dev, &qentry, qfptr, sizeof (E4_TProcQueueEntry));
35800 +       }
35801 +       
35802 +       qfptr = (qfptr & ~(qsize-1)) | ((qfptr + sizeof (E4_TProcQueueEntry)) & (qsize-1));
35803 +       qfull = 0;
35804 +    }
35805 +}
35806 +
35807 +static void
35808 +user_flush_dmas (ELAN4_DEV *dev, void *arg, int qfull)
35809 +{
35810 +    USER_CTXT        *uctx = (USER_CTXT *) arg;
35811 +    unsigned long     flags;
35812 +    
35813 +    ASSERT ((read_reg32 (dev, InterruptReg) & INT_DProcHalted) != 0);
35814 +
35815 +    spin_lock_irqsave (&uctx->uctx_spinlock, flags);
35816 +
35817 +    if ((uctx->uctx_status & (UCTX_SWAPPED_REASONS|UCTX_STOPPED_REASONS)) == 0)
35818 +    {
35819 +       PRINTF1 (uctx, DBG_SWAP, "user_flush_dmas: status %x - no more reasons\n", uctx->uctx_status);
35820 +
35821 +       uctx->uctx_status &= ~UCTX_STOPPING;
35822 +
35823 +       user_signal_trap (uctx);
35824 +    }
35825 +    else
35826 +    {
35827 +       user_flush_dma_runqueue (dev, uctx, qfull);
35828 +
35829 +       uctx->uctx_status = (uctx->uctx_status | UCTX_STOPPED) & ~UCTX_STOPPING;
35830 +    
35831 +       PRINTF1 (uctx, DBG_SWAP, "user_flush_dmas: statux %x - stopped\n", uctx->uctx_status);
35832 +
35833 +       kcondvar_wakeupall (&uctx->uctx_wait, &uctx->uctx_spinlock);
35834 +    }
35835 +
35836 +    spin_unlock_irqrestore (&uctx->uctx_spinlock, flags);
35837 +}
35838 +
35839 +static void
35840 +user_flush (ELAN4_DEV *dev, void *arg)
35841 +{
35842 +    USER_CTXT        *uctx = (USER_CTXT *) arg;
35843 +    struct list_head *entry;
35844 +    unsigned long     flags;
35845 +
35846 +    ASSERT ((read_reg32 (dev, InterruptReg) & (INT_Halted|INT_Discarding)) == (INT_Halted|INT_Discarding));
35847 +
35848 +    spin_lock_irqsave (&uctx->uctx_spinlock, flags);
35849 +
35850 +    if ((uctx->uctx_status & (UCTX_SWAPPED_REASONS|UCTX_STOPPED_REASONS)) == 0)
35851 +    {
35852 +       PRINTF1 (uctx, DBG_SWAP, "user_flush: status %x - no more reasons\n", uctx->uctx_status);
35853 +
35854 +       uctx->uctx_status &= ~UCTX_STOPPING;
35855 +
35856 +       user_signal_trap (uctx);
35857 +    }
35858 +    else
35859 +    {
35860 +       PRINTF1 (uctx, DBG_SWAP, "user_flush: status %x - flushing context\n", uctx->uctx_status);
35861 +
35862 +       list_for_each (entry, &uctx->uctx_cqlist) {
35863 +           USER_CQ *ucq = list_entry (entry, USER_CQ, ucq_link);
35864 +
35865 +           if (ucq->ucq_state == UCQ_RUNNING)
35866 +           {
35867 +               /* NOTE: since the inserter can still be running we modify the permissions
35868 +                *       to zero then when the extractor starts up again it will trap */
35869 +               PRINTF1 (uctx, DBG_SWAP, "user_flush: stopping cq indx=%d\n", elan4_cq2idx(ucq->ucq_cq));
35870 +
35871 +               elan4_updatecq (dev, ucq->ucq_cq, 0, 0);
35872 +           }
35873 +       }
35874 +       
35875 +       user_flush_thread_runqueue (dev, uctx, TPROC_LowRunQueueFull(read_reg64 (dev, TProcStatus)));
35876 +
35877 +       /* since we can't determine whether the dma run queue is full or empty, we use a dma
35878 +        * halt operation to do the flushing - as the reason for halting the dma processor 
35879 +        * will be released when we return, we keep it halted until the flush has completed */
35880 +       elan4_queue_dma_flushop (dev, &uctx->uctx_dma_flushop, 0);
35881 +
35882 +       if (uctx->uctx_status & UCTX_EXITING)
35883 +           elan4_flush_icache_halted (&uctx->uctx_ctxt);
35884 +    }
35885 +
35886 +    spin_unlock_irqrestore (&uctx->uctx_spinlock, flags);
35887 +}
35888 +
35889 +static void
35890 +user_set_filter (USER_CTXT *uctx, E4_uint32 state)
35891 +{
35892 +    struct list_head *entry;
35893 +
35894 +    ASSERT (SPINLOCK_HELD (&uctx->uctx_spinlock));
35895 +
35896 +    list_for_each (entry, &uctx->uctx_cent_list) {
35897 +       USER_CTXT_ENTRY *cent = list_entry (entry, USER_CTXT_ENTRY, cent_link);
35898 +
35899 +       elan4_set_filter (&uctx->uctx_ctxt, cent->cent_cap->cap_mycontext, state);
35900 +    }
35901 +}
35902 +
35903 +static void
35904 +user_start_nacking (USER_CTXT *uctx, unsigned reason)
35905 +{
35906 +    PRINTF2 (uctx, DBG_SWAP, "user_start_nacking: status %x reason %x\n", uctx->uctx_status, reason);
35907 +
35908 +    ASSERT (SPINLOCK_HELD (&uctx->uctx_spinlock));
35909 +
35910 +    if (UCTX_NACKING(uctx))
35911 +       uctx->uctx_status |= reason;
35912 +    else
35913 +    {
35914 +       uctx->uctx_status |= reason;
35915 +
35916 +       user_set_filter (uctx, E4_FILTER_STATS | E4_FILTER_DISCARD_ALL);
35917 +    }
35918 +}
35919 +
35920 +static void
35921 +user_stop_nacking (USER_CTXT *uctx, unsigned reason)
35922 +{
35923 +    PRINTF2 (uctx, DBG_SWAP, "user_stop_nacking: status %x reason %x\n", uctx->uctx_status, reason);
35924 +    
35925 +    ASSERT (SPINLOCK_HELD (&uctx->uctx_spinlock));
35926 +    
35927 +    uctx->uctx_status &= ~reason;
35928 +    
35929 +    if (! UCTX_NACKING (uctx))
35930 +       user_set_filter (uctx, E4_FILTER_STATS);
35931 +}
35932 +
35933 +static void
35934 +user_start_stopping (USER_CTXT *uctx, unsigned reason)
35935 +{
35936 +    ELAN4_DEV *dev =uctx->uctx_ctxt.ctxt_dev;
35937 +
35938 +    PRINTF2 (uctx, DBG_SWAP, "user_start_stopping: status %x reason %x\n", uctx->uctx_status, reason);
35939 +
35940 +    ASSERT (! (uctx->uctx_status & UCTX_STOPPED));
35941 +
35942 +    user_start_nacking (uctx, reason);
35943 +    
35944 +    if ((uctx->uctx_status & UCTX_STOPPING) != 0)
35945 +       return;
35946 +    
35947 +    uctx->uctx_status |= UCTX_STOPPING;
35948 +
35949 +    /* queue the halt operation to  remove all threads/dmas/cqs from the run queues */
35950 +    /*    and also flush through the context filter change */
35951 +    elan4_queue_haltop (dev, &uctx->uctx_haltop);
35952 +}
35953 +
35954 +static void
35955 +user_stop_stopping (USER_CTXT *uctx, unsigned reason)
35956 +{
35957 +    PRINTF2 (uctx, DBG_SWAP, "user_stop_stopping: status %x reason %x\n", uctx->uctx_status, reason);
35958 +    
35959 +    user_stop_nacking (uctx, reason);
35960 +
35961 +    if (UCTX_RUNNABLE (uctx))
35962 +    {
35963 +       uctx->uctx_status &= ~UCTX_STOPPED;
35964 +
35965 +       PRINTF1 (uctx, DBG_SWAP, "user_stop_stopping: no more reasons => %x\n", uctx->uctx_status);
35966 +
35967 +       user_signal_trap (uctx);
35968 +    }
35969 +}
35970 +
35971 +void
35972 +user_swapout (USER_CTXT *uctx, unsigned reason)
35973 +{
35974 +    ELAN4_DEV *dev = uctx->uctx_ctxt.ctxt_dev;
35975 +    unsigned long flags;
35976 +    
35977 +    spin_lock_irqsave (&uctx->uctx_spinlock, flags);
35978 +    
35979 +    PRINTF2 (uctx, DBG_SWAP, "user_swapout: status %x reason %x\n", uctx->uctx_status, reason);
35980 +    
35981 +    user_start_nacking (uctx, reason);
35982 +    
35983 +    while (uctx->uctx_status & (UCTX_SWAPPING|UCTX_STOPPING) &&                /* wait for someone else to finish */
35984 +          uctx->uctx_trap_count > 0)                                           /* and for trap handlers to notice */
35985 +    {                                                                          /* and exit */
35986 +       PRINTF1 (uctx, DBG_SWAP, "user_swapout: waiting for %d trap handlers to exit/previous swapout\n", uctx->uctx_trap_count);
35987 +
35988 +       kcondvar_wakeupall (&uctx->uctx_wait, &uctx->uctx_spinlock);
35989 +       kcondvar_wait (&uctx->uctx_wait, &uctx->uctx_spinlock, &flags);
35990 +    }
35991 +
35992 +    if (uctx->uctx_status & UCTX_SWAPPED)                                      /* already swapped out */
35993 +    {
35994 +       spin_unlock_irqrestore (&uctx->uctx_spinlock, flags);
35995 +       return;
35996 +    }
35997 +    
35998 +    uctx->uctx_status |= (UCTX_SWAPPING|UCTX_STOPPING);                                /* mark the context as swapping & stopping */
35999 +    
36000 +    /* queue the halt operation to  remove all threads/dmas/cqs from the run queues */
36001 +    /*    and also flush through the context filter change */
36002 +    elan4_queue_haltop (dev, &uctx->uctx_haltop);
36003 +    
36004 +    while (! (uctx->uctx_status & UCTX_STOPPED))
36005 +       kcondvar_wait (&uctx->uctx_wait, &uctx->uctx_spinlock, &flags);
36006 +
36007 +    /* all state has been removed from the elan - we can now "tidy" it up */
36008 +
36009 +    PRINTF0 (uctx, DBG_SWAP, "user_swapout: swapped out\n");
36010 +    
36011 +    uctx->uctx_status = (uctx->uctx_status & ~UCTX_SWAPPING) | UCTX_SWAPPED;
36012 +    
36013 +    kcondvar_wakeupall (&uctx->uctx_wait, &uctx->uctx_spinlock);
36014 +
36015 +    PRINTF1 (uctx, DBG_SWAP, "user_swapout: all done - status %x\n", uctx->uctx_status);
36016 +
36017 +    spin_unlock_irqrestore (&uctx->uctx_spinlock, flags);
36018 +}
36019 +
36020 +void
36021 +user_swapin (USER_CTXT *uctx, unsigned reason)
36022 +{
36023 +    unsigned long flags;
36024 +
36025 +    spin_lock_irqsave (&uctx->uctx_spinlock, flags);
36026 +
36027 +    ASSERT (uctx->uctx_status & UCTX_SWAPPED_REASONS);
36028 +
36029 +    PRINTF2 (uctx, DBG_SWAP, "user_swapin: status %x reason %x\n", uctx->uctx_status, reason);
36030 +
36031 +    while (uctx->uctx_status & (UCTX_SWAPPING|UCTX_STOPPING))                  /* wait until other threads have */
36032 +       kcondvar_wait (&uctx->uctx_wait, &uctx->uctx_spinlock, &flags);         /* completed their swap operation */
36033 +
36034 +    ASSERT (uctx->uctx_status & (UCTX_SWAPPED | UCTX_STOPPED));
36035 +
36036 +    user_stop_nacking (uctx, reason);
36037 +
36038 +    if (! (uctx->uctx_status & UCTX_SWAPPED_REASONS))
36039 +    {
36040 +       uctx->uctx_status &= ~UCTX_SWAPPED;
36041 +
36042 +       /* no longer swapped out - wakeup anyone sleeping waiting for swapin */
36043 +       kcondvar_wakeupall (&uctx->uctx_wait, &uctx->uctx_spinlock);
36044 +
36045 +       if (! (uctx->uctx_status & UCTX_STOPPED_REASONS))
36046 +       {
36047 +           uctx->uctx_status &= ~UCTX_STOPPED;
36048 +           user_signal_trap (uctx);
36049 +       }
36050 +    }
36051 +
36052 +    PRINTF1 (uctx, DBG_SWAP, "user_swapin: all done - status %x\n", uctx->uctx_status);
36053 +
36054 +    spin_unlock_irqrestore (&uctx->uctx_spinlock, flags);
36055 +}
36056 +
36057 +void
36058 +user_destroy_callback (void *arg, ELAN_CAPABILITY *cap, ELAN_CAPABILITY *map)
36059 +{
36060 +    USER_CTXT *uctx = (USER_CTXT *) arg;
36061 +
36062 +    PRINTF (uctx, DBG_VP, "user_destroy_callback: %s\n", map == NULL ? "cap destoyed" : "map destroyed");
36063 +}
36064 +
36065 +int
36066 +user_attach (USER_CTXT *uctx, ELAN_CAPABILITY *cap)
36067 +{
36068 +    ELAN4_DEV       *dev = uctx->uctx_ctxt.ctxt_dev;
36069 +    USER_CTXT_ENTRY *cent;
36070 +    unsigned long flags;
36071 +    int ctype, res;
36072 +    
36073 +    if ((ctype = user_validate_cap (uctx, cap, ELAN_USER_ATTACH)) < 0)
36074 +       return ctype;
36075 +
36076 +    if ((ctype == ELAN_CAP_RMS) && (res = elan_attach_cap (cap, dev->dev_devinfo.dev_rail, uctx, user_destroy_callback)) != 0)
36077 +    {
36078 +       /* NOTE: elan_attach_cap returns +ve errnos */
36079 +       return -res;
36080 +    }
36081 +
36082 +    KMEM_ALLOC (cent, USER_CTXT_ENTRY *, sizeof (USER_CTXT_ENTRY), 1);
36083 +    if (cent == NULL)
36084 +    {
36085 +       if (ctype == ELAN_CAP_RMS)
36086 +           elan_detach_cap (cap, dev->dev_devinfo.dev_rail);
36087 +
36088 +       return -ENOMEM;
36089 +    }
36090 +
36091 +    KMEM_ALLOC (cent->cent_cap, ELAN_CAPABILITY *, ELAN_CAP_SIZE(cap), 1);
36092 +    if (cent->cent_cap == NULL)
36093 +    {
36094 +       if (ctype == ELAN_CAP_RMS)
36095 +           elan_detach_cap (cap, dev->dev_devinfo.dev_rail);
36096 +
36097 +       KMEM_FREE (cent, sizeof (USER_CTXT_ENTRY));
36098 +       return -ENOMEM;
36099 +    }
36100 +
36101 +    bcopy (cap, cent->cent_cap, ELAN_CAP_SIZE(cap));
36102 +
36103 +    if ((res = elan4_attach_filter (&uctx->uctx_ctxt, cap->cap_mycontext)) != 0)
36104 +    {
36105 +       if (ctype == ELAN_CAP_RMS)
36106 +           elan_detach_cap (cap, dev->dev_devinfo.dev_rail);
36107 +       
36108 +       KMEM_FREE (cent->cent_cap, ELAN_CAP_SIZE (cap));
36109 +       KMEM_FREE (cent, sizeof (USER_CTXT_ENTRY));
36110 +
36111 +       return res;
36112 +    }
36113 +
36114 +    spin_lock_irqsave (&uctx->uctx_spinlock, flags);
36115 +
36116 +    list_add_tail (&cent->cent_link, &uctx->uctx_cent_list);
36117 +
36118 +    if (! UCTX_NACKING (uctx))
36119 +       user_set_filter (uctx, E4_FILTER_STATS);
36120 +
36121 +    spin_unlock_irqrestore (&uctx->uctx_spinlock, flags);
36122 +
36123 +    return (0);
36124 +    
36125 +}
36126 +
36127 +void
36128 +user_detach (USER_CTXT *uctx, ELAN_CAPABILITY *cap)
36129 +{
36130 +    ELAN4_DEV         *dev = uctx->uctx_ctxt.ctxt_dev;
36131 +    struct list_head  *entry;
36132 +    struct list_head  *next;
36133 +    struct list_head   list;
36134 +    unsigned long      flags;
36135 +
36136 +    INIT_LIST_HEAD (&list);
36137 +
36138 +    spin_lock_irqsave (&uctx->uctx_spinlock, flags);
36139 +    
36140 +    PRINTF (uctx, DBG_NETWORK_CTX, cap ? "user_detach: network context %d\n" : "user_detach: all network contexts\n", cap ? cap->cap_mycontext : 0);
36141 +
36142 +    list_for_each_safe (entry, next, &uctx->uctx_cent_list) {
36143 +       USER_CTXT_ENTRY *cent = list_entry (entry, USER_CTXT_ENTRY, cent_link);
36144 +
36145 +       if (cap == NULL || ELAN_CAP_MATCH (cap, cent->cent_cap))
36146 +       {
36147 +           PRINTF1 (uctx, DBG_NETWORK_CTX, "user_detach: detach from network context %d\n", cent->cent_cap->cap_mycontext);
36148 +           
36149 +           elan4_detach_filter (&uctx->uctx_ctxt, cent->cent_cap->cap_mycontext);
36150 +
36151 +           list_del (&cent->cent_link);
36152 +           list_add_tail (&cent->cent_link, &list);
36153 +       }
36154 +    }
36155 +
36156 +    spin_unlock_irqrestore (&uctx->uctx_spinlock, flags);
36157 +
36158 +    while (! list_empty (&list))
36159 +    {
36160 +       USER_CTXT_ENTRY *cent = list_entry (list.next, USER_CTXT_ENTRY, cent_link);
36161 +
36162 +       list_del (&cent->cent_link);
36163 +
36164 +       if (user_validate_cap (uctx, cent->cent_cap, ELAN_USER_DETACH) == ELAN_CAP_RMS)
36165 +           elan_detach_cap (cent->cent_cap, dev->dev_devinfo.dev_rail); 
36166 +       
36167 +       KMEM_FREE (cent->cent_cap, ELAN_CAP_SIZE (cent->cent_cap));
36168 +       KMEM_FREE (cent, sizeof (USER_CTXT_ENTRY));
36169 +    }
36170 +}
36171 +
36172 +void
36173 +user_block_inputter (USER_CTXT *uctx, unsigned blocked)
36174 +{
36175 +    unsigned long flags;
36176 +    int isblocked;
36177 +
36178 +    spin_lock_irqsave (&uctx->uctx_spinlock, flags);
36179 +    
36180 +    isblocked = (uctx->uctx_status & UCTX_USER_FILTERING);
36181 +
36182 +    if (blocked && !isblocked)
36183 +       user_start_nacking (uctx, UCTX_USER_FILTERING);
36184 +
36185 +    if (!blocked && isblocked)
36186 +       user_stop_nacking (uctx, UCTX_USER_FILTERING);
36187 +
36188 +    spin_unlock_irqrestore (&uctx->uctx_spinlock, flags);
36189 +}
36190 +
36191 +static USER_VPSEG *
36192 +user_install_vpseg (USER_CTXT *uctx, unsigned process, unsigned entries)
36193 +{
36194 +    struct list_head *entry;
36195 +    USER_VPSEG       *seg;
36196 +
36197 +    ASSERT (kmutex_is_locked (&uctx->uctx_vpseg_lock));
36198 +
36199 +    list_for_each (entry, &uctx->uctx_vpseg_list) {
36200 +       seg = list_entry (entry, USER_VPSEG, vps_link);
36201 +
36202 +       if (process <= (seg->vps_process + seg->vps_entries-1) && 
36203 +           (process + entries - 1) >= seg->vps_process)
36204 +           return ((USER_VPSEG *) NULL);
36205 +    }
36206 +
36207 +    KMEM_ZALLOC (seg, USER_VPSEG *, sizeof (USER_VPSEG), 1);
36208 +    
36209 +    if (seg == (USER_VPSEG *) NULL)
36210 +       return ((USER_VPSEG *) NULL);
36211 +
36212 +    seg->vps_process = process;
36213 +    seg->vps_entries = entries;
36214 +
36215 +    list_add_tail (&seg->vps_link, &uctx->uctx_vpseg_list);
36216 +
36217 +    return (seg);
36218 +}
36219 +
36220 +static void
36221 +user_remove_vpseg (USER_CTXT *uctx, USER_VPSEG *seg)
36222 +{
36223 +    ASSERT (kmutex_is_locked (&uctx->uctx_vpseg_lock));
36224 +
36225 +    list_del (&seg->vps_link);
36226 +    
36227 +    switch (seg->vps_type)
36228 +    {
36229 +    case USER_VPSEG_P2P:
36230 +       /* These pointers (union) are only valid for P2P segs */
36231 +       if (seg->vps_p2p_routes)
36232 +           KMEM_FREE (seg->vps_p2p_routes, sizeof (E4_VirtualProcessEntry) * seg->vps_entries);
36233 +       
36234 +       if (seg->vps_p2p_cap)
36235 +           KMEM_FREE (seg->vps_p2p_cap, ELAN_CAP_SIZE(seg->vps_p2p_cap));
36236 +
36237 +       break;
36238 +       
36239 +    case USER_VPSEG_BCAST:
36240 +       ;
36241 +    }
36242 +
36243 +    KMEM_FREE (seg, sizeof (USER_VPSEG));
36244 +}
36245 +
36246 +static USER_VPSEG *
36247 +user_find_vpseg (USER_CTXT *uctx, unsigned low, unsigned high)
36248 +{
36249 +    struct list_head *entry;
36250 +
36251 +    ASSERT (kmutex_is_locked (&uctx->uctx_vpseg_lock));
36252 +
36253 +    list_for_each (entry, &uctx->uctx_vpseg_list) {
36254 +       USER_VPSEG *seg = list_entry (entry, USER_VPSEG, vps_link);
36255 +
36256 +       if (seg->vps_process <= low && (seg->vps_process + seg->vps_entries) > high)
36257 +           return (seg);
36258 +    }
36259 +
36260 +    return ((USER_VPSEG *) NULL);
36261 +}
36262 +
36263 +static ELAN_LOCATION 
36264 +user_process2location (USER_CTXT *uctx, USER_VPSEG *seg, unsigned process)
36265 +{
36266 +    ELAN_LOCATION location;
36267 +    int           nnodes, nctxs;
36268 +    int           nodeOff, ctxOff, vpOff;
36269 +
36270 +    location.loc_node    = ELAN_INVALID_NODE;
36271 +    location.loc_context = -1;
36272 +
36273 +    if (seg == NULL)
36274 +       seg = user_find_vpseg (uctx, process, process);
36275 +
36276 +    if (seg == NULL || (seg->vps_type != USER_VPSEG_P2P))
36277 +       return (location);
36278 +
36279 +    nnodes = ELAN_CAP_NUM_NODES (seg->vps_p2p_cap);
36280 +    nctxs  = ELAN_CAP_NUM_CONTEXTS (seg->vps_p2p_cap);
36281 +
36282 +    switch (seg->vps_p2p_cap->cap_type & ELAN_CAP_TYPE_MASK)
36283 +    {
36284 +    case ELAN_CAP_TYPE_BLOCK:
36285 +       for (nodeOff = 0, vpOff = 0; nodeOff < nnodes; nodeOff++)
36286 +       {
36287 +           for (ctxOff = 0; ctxOff < nctxs; ctxOff++)
36288 +           {
36289 +               if ((seg->vps_p2p_cap->cap_type & ELAN_CAP_TYPE_NO_BITMAP) || BT_TEST (seg->vps_p2p_cap->cap_bitmap, ctxOff + (nodeOff * nctxs)))
36290 +               {
36291 +                   if (vpOff++ == (process - seg->vps_process))
36292 +                   { 
36293 +                       location.loc_node    = seg->vps_p2p_cap->cap_lownode + nodeOff;
36294 +                       location.loc_context = seg->vps_p2p_cap->cap_lowcontext + ctxOff;
36295 +                       goto found;
36296 +                   }
36297 +               }
36298 +           }
36299 +       }
36300 +       break;
36301 +       
36302 +    case ELAN_CAP_TYPE_CYCLIC:
36303 +       for (ctxOff = 0, vpOff = 0; ctxOff < nctxs; ctxOff++)
36304 +       {
36305 +           for (nodeOff = 0; nodeOff < nnodes; nodeOff++)
36306 +           {
36307 +               if ((seg->vps_p2p_cap->cap_type & ELAN_CAP_TYPE_NO_BITMAP) || BT_TEST (seg->vps_p2p_cap->cap_bitmap, nodeOff + (ctxOff * nnodes)))
36308 +               {                                   
36309 +                   if (vpOff++ ==  (process - seg->vps_process))
36310 +                   { 
36311 +                       location.loc_node    = seg->vps_p2p_cap->cap_lownode + nodeOff;
36312 +                       location.loc_context = seg->vps_p2p_cap->cap_lowcontext + ctxOff;
36313 +                       goto found;
36314 +                   }
36315 +               }
36316 +           }
36317 +       }
36318 +       break;  
36319 +    }
36320 +       
36321 + found:
36322 +    return (location);
36323 +}
36324 +
36325 +static unsigned 
36326 +user_location2process (USER_CTXT *uctx, ELAN_LOCATION location)
36327 +{
36328 +    unsigned int      process = ELAN_INVALID_PROCESS;
36329 +    struct list_head *entry;
36330 +    int               nnodes, nctxs;
36331 +    int               nodeOff, ctxOff, vpOff;
36332 +
36333 +    kmutex_lock (&uctx->uctx_vpseg_lock);
36334 +    list_for_each (entry, &uctx->uctx_vpseg_list) {
36335 +       USER_VPSEG *seg = list_entry (entry, USER_VPSEG, vps_link);
36336 +
36337 +       if (seg->vps_type != USER_VPSEG_P2P)
36338 +           continue;
36339 +
36340 +       if (location.loc_node >= seg->vps_p2p_cap->cap_lownode && location.loc_node <= seg->vps_p2p_cap->cap_highnode &&
36341 +           location.loc_context >= seg->vps_p2p_cap->cap_lowcontext && location.loc_context <= seg->vps_p2p_cap->cap_highcontext)
36342 +       {
36343 +           nnodes = ELAN_CAP_NUM_NODES (seg->vps_p2p_cap);
36344 +           nctxs  = ELAN_CAP_NUM_CONTEXTS (seg->vps_p2p_cap);
36345 +
36346 +           switch (seg->vps_p2p_cap->cap_type & ELAN_CAP_TYPE_MASK)
36347 +           {
36348 +           case ELAN_CAP_TYPE_BLOCK:
36349 +               for (nodeOff = 0, vpOff = 0; nodeOff < nnodes; nodeOff++)
36350 +               {
36351 +                   for (ctxOff = 0; ctxOff < nctxs; ctxOff++)
36352 +                   {
36353 +                       if ((seg->vps_p2p_cap->cap_type & ELAN_CAP_TYPE_NO_BITMAP) || BT_TEST (seg->vps_p2p_cap->cap_bitmap, ctxOff + (nodeOff * nctxs)))
36354 +                       {
36355 +                           if (location.loc_node == seg->vps_p2p_cap->cap_lownode + nodeOff &&
36356 +                               location.loc_context == seg->vps_p2p_cap->cap_lowcontext + ctxOff)
36357 +                           {
36358 +                               process = seg->vps_process + vpOff;
36359 +                               goto found;
36360 +                           }
36361 +                           vpOff++;
36362 +                       }
36363 +                   }
36364 +               }
36365 +               break;
36366 +       
36367 +           case ELAN_CAP_TYPE_CYCLIC:
36368 +               for (ctxOff = 0, vpOff = 0; ctxOff < nctxs; ctxOff++)
36369 +               {
36370 +                   for (nodeOff = 0; nodeOff < nnodes; nodeOff++)
36371 +                   {
36372 +                       if ((seg->vps_p2p_cap->cap_type & ELAN_CAP_TYPE_NO_BITMAP) || BT_TEST (seg->vps_p2p_cap->cap_bitmap, nodeOff + (ctxOff * nnodes)))
36373 +                       {
36374 +                           if (location.loc_node == seg->vps_p2p_cap->cap_lownode + nodeOff &&
36375 +                               location.loc_context == seg->vps_p2p_cap->cap_lowcontext + ctxOff)
36376 +                           {
36377 +                               process = seg->vps_process + vpOff;
36378 +                               goto found;
36379 +                           }
36380 +                           vpOff++;
36381 +                       }
36382 +                   }
36383 +               }
36384 +               break;
36385 +           }
36386 +       }
36387 +    }
36388 + found:
36389 +    kmutex_unlock (&uctx->uctx_vpseg_lock);
36390 +
36391 +    return (process);
36392 +}
36393 +
36394 +static void
36395 +user_loadroute_vpseg (USER_CTXT *uctx, USER_VPSEG *seg, ELAN_POSITION *pos)
36396 +{
36397 +    ELAN4_DEV             *dev    = uctx->uctx_ctxt.ctxt_dev;
36398 +    ELAN_CAPABILITY       *cap    = seg->vps_p2p_cap;
36399 +    unsigned               nnodes = ELAN_CAP_NUM_NODES (cap);
36400 +    unsigned               nctxs  = ELAN_CAP_NUM_CONTEXTS (cap);
36401 +    E4_VirtualProcessEntry route;
36402 +    unsigned              nodeOff;
36403 +    unsigned              ctxOff;
36404 +    unsigned              vpOff;
36405 +
36406 +    switch (cap->cap_type & ELAN_CAP_TYPE_MASK)
36407 +    {
36408 +    case ELAN_CAP_TYPE_BLOCK:
36409 +       for (nodeOff = 0, vpOff = 0; nodeOff < nnodes; nodeOff++)
36410 +       {
36411 +           for (ctxOff = 0; ctxOff < nctxs; ctxOff++)
36412 +           {
36413 +               if ((cap->cap_type & ELAN_CAP_TYPE_NO_BITMAP) || BT_TEST (cap->cap_bitmap, ctxOff + (nodeOff * nctxs)))
36414 +               {
36415 +                   if (seg->vps_p2p_routes != NULL)
36416 +                       route = seg->vps_p2p_routes[vpOff];
36417 +                   else if (elan4_generate_route (&uctx->uctx_position, &route, cap->cap_lowcontext + ctxOff,
36418 +                                                  cap->cap_lownode + nodeOff, cap->cap_lownode + nodeOff, user_p2p_route_options) < 0)
36419 +                   {
36420 +                       vpOff++;
36421 +                       continue;
36422 +                   }
36423 +
36424 +                   PRINTF5 (uctx, DBG_VP, "user_loadroute_vpseg: virtual process %d -> node %d context %d [%016llx.%016llx]\n",
36425 +                            seg->vps_process + vpOff, cap->cap_lownode + nodeOff, cap->cap_lowcontext + ctxOff,
36426 +                            route.Values[0], route.Values[1]);
36427 +                   
36428 +                   elan4_write_route (dev, uctx->uctx_routetable, seg->vps_process + vpOff, &route);
36429 +                                             
36430 +                   vpOff++;
36431 +               }
36432 +           }
36433 +       }
36434 +       break;
36435 +
36436 +    case ELAN_CAP_TYPE_CYCLIC:
36437 +       for (ctxOff = 0, vpOff = 0; ctxOff < nctxs; ctxOff++)
36438 +       {
36439 +           for (nodeOff = 0; nodeOff < nnodes; nodeOff++)
36440 +           {
36441 +               if ((cap->cap_type & ELAN_CAP_TYPE_NO_BITMAP) || BT_TEST (cap->cap_bitmap, nodeOff + (ctxOff * nnodes)))
36442 +               {
36443 +                   if (seg->vps_p2p_routes != NULL)
36444 +                       route = seg->vps_p2p_routes[vpOff];
36445 +                   else if (elan4_generate_route (&uctx->uctx_position, &route, cap->cap_lowcontext + ctxOff,
36446 +                                                  cap->cap_lownode + nodeOff, cap->cap_lownode + nodeOff, user_p2p_route_options) < 0)
36447 +                   {
36448 +                       vpOff++;
36449 +                       continue;
36450 +                   }
36451 +
36452 +                   PRINTF5 (uctx, DBG_VP, "user_loadroute_vpseg: virtual process %d -> node %d context %d [%016llx.%016llx]\n",
36453 +                            seg->vps_process + vpOff, cap->cap_lownode + nodeOff, cap->cap_lowcontext + ctxOff,
36454 +                            route.Values[0], route.Values[1]);
36455 +                   
36456 +                   elan4_write_route (dev, uctx->uctx_routetable, seg->vps_process + vpOff, &route);
36457 +                                             
36458 +                   vpOff++;
36459 +               }
36460 +           }
36461 +       }
36462 +       break;
36463 +    }
36464 +}
36465 +
36466 +static int
36467 +user_loadroute_bcast (USER_CTXT *uctx, USER_VPSEG *seg)
36468 +{
36469 +    ELAN4_DEV             *dev = uctx->uctx_ctxt.ctxt_dev;
36470 +    ELAN_POSITION         *pos = &uctx->uctx_position;
36471 +    E4_VirtualProcessEntry route;
36472 +    USER_VPSEG            *aseg;
36473 +    int                    res;
36474 +    ELAN_LOCATION          low;
36475 +    ELAN_LOCATION          high;
36476 +
36477 +    if ((aseg = user_find_vpseg (uctx, seg->vps_bcast_lowvp, seg->vps_bcast_highvp)) == NULL || aseg->vps_type != USER_VPSEG_P2P)
36478 +       return (-EINVAL);
36479 +    
36480 +#ifdef use_elanmod
36481 +    if ((res = user_validate_cap (dev, aseg->vps_p2p_cap, ELAN_USER_BROADCAST)) < 0)
36482 +       return (res);
36483 +#endif
36484 +    
36485 +    low  = user_process2location (uctx, aseg, seg->vps_bcast_lowvp);
36486 +    high = user_process2location (uctx, aseg, seg->vps_bcast_highvp);
36487 +
36488 +    if (low.loc_context != high.loc_context)
36489 +       return (-EINVAL);
36490 +
36491 +    /* NOTE: if loopback can only broadcast to ourself - 
36492 +     *       if back-to-back can only broadcast to other node */
36493 +    if ((pos->pos_mode == ELAN_POS_MODE_LOOPBACK   && low.loc_node != high.loc_node && low.loc_node != pos->pos_nodeid) ||
36494 +       (pos->pos_mode == ELAN_POS_MODE_BACKTOBACK && low.loc_node != high.loc_node && low.loc_node == pos->pos_nodeid))
36495 +    {
36496 +       return (-EINVAL);
36497 +    }
36498 +    
36499 +    if ((res = elan4_generate_route (pos, &route, low.loc_context, low.loc_node, high.loc_node, user_bcast_route_options)) < 0)
36500 +       return (res);
36501 +
36502 +    PRINTF (uctx, DBG_VP, "user_loadroute_bcast: virtual process %d -> nodes %d.%d context %d [%016llx.%016llx]\n",
36503 +           seg->vps_process, low.loc_node, high.loc_node, low.loc_context, route.Values[0], route.Values[1]);
36504 +    
36505 +    elan4_write_route (dev, uctx->uctx_routetable, seg->vps_process, &route);
36506 +    return (0);
36507 +}
36508 +
36509 +int
36510 +user_add_p2pvp (USER_CTXT *uctx, unsigned process, ELAN_CAPABILITY *cap)
36511 +{
36512 +    USER_VPSEG      *seg;
36513 +    ELAN_CAPABILITY *ncap;
36514 +    unsigned         entries;
36515 +
36516 +    if ((cap->cap_type & ELAN_CAP_TYPE_NO_BITMAP) == 0)
36517 +       entries = bt_nbits (cap->cap_bitmap , ELAN_CAP_BITMAPSIZE(cap));
36518 +    else
36519 +       entries = ELAN_CAP_BITMAPSIZE(cap);
36520 +    
36521 +    if ((process + entries) > (E4_VPT_MIN_ENTRIES << uctx->uctx_routetable->tbl_size))
36522 +       return (-EINVAL);
36523 +
36524 +    KMEM_ALLOC (ncap, ELAN_CAPABILITY *, ELAN_CAP_SIZE (cap), 1);
36525 +
36526 +    if (ncap == NULL)
36527 +       return (-ENOMEM);
36528 +    
36529 +    bcopy (cap, ncap, ELAN_CAP_SIZE (cap));
36530 +
36531 +    kmutex_lock (&uctx->uctx_vpseg_lock);
36532 +
36533 +    if ((seg = user_install_vpseg (uctx, process, entries)) == NULL)
36534 +    {
36535 +       kmutex_unlock (&uctx->uctx_vpseg_lock);
36536 +       return (-EINVAL);
36537 +    }
36538 +    
36539 +    seg->vps_type       = USER_VPSEG_P2P;
36540 +    seg->vps_p2p_cap    = ncap;
36541 +    seg->vps_p2p_routes = NULL;
36542 +
36543 +    user_loadroute_vpseg (uctx, seg, &uctx->uctx_position);
36544 +    
36545 +    kmutex_unlock (&uctx->uctx_vpseg_lock);
36546 +
36547 +    return (0);
36548 +}
36549 +
36550 +int
36551 +user_add_bcastvp (USER_CTXT *uctx, unsigned process, unsigned lowvp, unsigned highvp)
36552 +{
36553 +    USER_VPSEG *seg;
36554 +    int         res;
36555 +
36556 +    if (lowvp > highvp || process >= (E4_VPT_MIN_ENTRIES << uctx->uctx_routetable->tbl_size))
36557 +       return (-EINVAL);
36558 +
36559 +    kmutex_lock (&uctx->uctx_vpseg_lock);
36560 +
36561 +    if ((seg = user_install_vpseg (uctx, process, 1)) == NULL)
36562 +    {
36563 +       kmutex_unlock (&uctx->uctx_vpseg_lock);
36564 +       return (-EINVAL);
36565 +    }
36566 +
36567 +    seg->vps_type         = USER_VPSEG_BCAST;
36568 +    seg->vps_bcast_lowvp  = lowvp;
36569 +    seg->vps_bcast_highvp = highvp;
36570 +
36571 +    if ((res = user_loadroute_bcast (uctx, seg)) < 0)
36572 +       user_remove_vpseg (uctx, seg);
36573 +
36574 +    kmutex_unlock (&uctx->uctx_vpseg_lock);
36575 +    return (res);
36576 +}
36577 +
36578 +int
36579 +user_removevp (USER_CTXT *uctx, unsigned process)
36580 +{
36581 +    USER_VPSEG *seg;
36582 +
36583 +    kmutex_lock (&uctx->uctx_vpseg_lock);
36584 +    
36585 +    if (process == ELAN_INVALID_PROCESS)
36586 +       seg = list_entry (uctx->uctx_vpseg_list.next, USER_VPSEG, vps_link);
36587 +    else
36588 +       seg = user_find_vpseg (uctx, process, process);
36589 +
36590 +    if (seg == NULL)
36591 +    {
36592 +       kmutex_unlock (&uctx->uctx_vpseg_lock);
36593 +       return (-EINVAL);
36594 +    }
36595 +
36596 +    do {
36597 +       ELAN4_DEV    *dev = uctx->uctx_ctxt.ctxt_dev;
36598 +       int i;
36599 +
36600 +       for (i = 0; i < seg->vps_entries; i++)
36601 +           elan4_invalidate_route (dev, uctx->uctx_routetable, seg->vps_process + i);
36602 +
36603 +       user_remove_vpseg (uctx, seg);
36604 +
36605 +    } while (process == ELAN_INVALID_PROCESS && (seg = list_entry (uctx->uctx_vpseg_list.next, USER_VPSEG, vps_link)) != NULL);
36606 +
36607 +    kmutex_unlock (&uctx->uctx_vpseg_lock);
36608 +
36609 +    return (0);
36610 +}
36611 +
36612 +int
36613 +user_set_route (USER_CTXT *uctx, unsigned process, E4_VirtualProcessEntry *route)
36614 +{
36615 +    ELAN4_DEV    *dev = uctx->uctx_ctxt.ctxt_dev;
36616 +    USER_VPSEG   *seg;
36617 +    ELAN_LOCATION location;
36618 +
36619 +    kmutex_lock (&uctx->uctx_vpseg_lock);
36620 +
36621 +    if ((seg = user_find_vpseg (uctx, process, process)) == NULL || seg->vps_type != USER_VPSEG_P2P)
36622 +    {
36623 +       kmutex_unlock (&uctx->uctx_vpseg_lock);
36624 +       return (-EINVAL);
36625 +    }
36626 +
36627 +    /* check that the route supplied is valid and goes to the correct place */
36628 +    location = user_process2location (uctx, seg, process);
36629 +
36630 +    if (elan4_check_route (&uctx->uctx_position, location, route, 0) != 0)
36631 +    {
36632 +       kmutex_unlock (&uctx->uctx_vpseg_lock);
36633 +       return (-EINVAL);
36634 +    }
36635 +
36636 +    if (seg->vps_p2p_routes == NULL)
36637 +       KMEM_ZALLOC (seg->vps_p2p_routes, E4_VirtualProcessEntry *, sizeof (E4_VirtualProcessEntry) * seg->vps_entries, 1);
36638 +    
36639 +    if (seg->vps_p2p_routes == NULL)
36640 +    {
36641 +       kmutex_unlock (&uctx->uctx_vpseg_lock);
36642 +       return (-ENOMEM);
36643 +    }
36644 +    
36645 +    seg->vps_p2p_routes[process - seg->vps_process].Values[0] = route->Values[0];
36646 +    seg->vps_p2p_routes[process - seg->vps_process].Values[1] = ROUTE_CTXT_VALUE(location.loc_context) | (route->Values[1] & ~ROUTE_CTXT_MASK);
36647 +    
36648 +    PRINTF (uctx, DBG_ROUTE, "user_set_route: vp=%d -> %016llx%016llx\n", process, 
36649 +           seg->vps_p2p_routes[process - seg->vps_process].Values[1], seg->vps_p2p_routes[process - seg->vps_process].Values[0]);
36650 +
36651 +    elan4_write_route (dev, uctx->uctx_routetable, process, &seg->vps_p2p_routes[process - seg->vps_process]);
36652 +
36653 +    kmutex_unlock (&uctx->uctx_vpseg_lock);
36654 +
36655 +    return (0);
36656 +}
36657 +
36658 +int
36659 +user_reset_route (USER_CTXT *uctx, unsigned process)
36660 +{
36661 +    ELAN4_DEV             *dev = uctx->uctx_ctxt.ctxt_dev;
36662 +    E4_VirtualProcessEntry route;
36663 +    ELAN_LOCATION          location;
36664 +    USER_VPSEG            *seg;
36665 +
36666 +    kmutex_lock (&uctx->uctx_vpseg_lock);
36667 +
36668 +    if ((seg = user_find_vpseg (uctx, process, process)) == NULL || seg->vps_type != USER_VPSEG_P2P)
36669 +    {
36670 +       kmutex_unlock (&uctx->uctx_vpseg_lock);
36671 +       return (-EINVAL);
36672 +    }
36673 +
36674 +    if (seg->vps_p2p_routes != NULL)
36675 +    {
36676 +       seg->vps_p2p_routes[process - seg->vps_process].Values[0] = 0;
36677 +       seg->vps_p2p_routes[process - seg->vps_process].Values[1] = 0;
36678 +    }
36679 +    
36680 +    /* generate the default route to this location */
36681 +    location = user_process2location (uctx, seg, process);
36682 +
36683 +    PRINTF (uctx, DBG_ROUTE, "user_reset_route: vp=%d\n", process);
36684 +
36685 +    if (elan4_generate_route (&uctx->uctx_position, &route, location.loc_context, location.loc_node, location.loc_node, 0) < 0)
36686 +       elan4_invalidate_route (dev, uctx->uctx_routetable, process);
36687 +    else
36688 +       elan4_write_route (dev, uctx->uctx_routetable, process, &route);
36689 +
36690 +    kmutex_unlock (&uctx->uctx_vpseg_lock);
36691 +
36692 +    return (0);
36693 +}
36694 +
36695 +int
36696 +user_get_route (USER_CTXT *uctx, unsigned process, E4_VirtualProcessEntry *route)
36697 +{
36698 +    ELAN4_DEV  *dev = uctx->uctx_ctxt.ctxt_dev;
36699 +    USER_VPSEG   *seg;
36700 +    
36701 +    kmutex_lock (&uctx->uctx_vpseg_lock);
36702 +
36703 +    if ((seg = user_find_vpseg (uctx, process, process)) == NULL || seg->vps_type != USER_VPSEG_P2P)
36704 +    {
36705 +       kmutex_unlock (&uctx->uctx_vpseg_lock);
36706 +       return (-EINVAL);
36707 +    }
36708 +
36709 +    elan4_read_route (dev, uctx->uctx_routetable, process, route);
36710 +
36711 +    kmutex_unlock (&uctx->uctx_vpseg_lock);
36712 +    return (0);
36713 +}
36714 +
36715 +int
36716 +user_check_route (USER_CTXT *uctx, unsigned process, E4_VirtualProcessEntry *route, unsigned *error)
36717 +{
36718 +    ELAN4_DEV  *dev = uctx->uctx_ctxt.ctxt_dev;
36719 +    USER_VPSEG *seg;
36720 +    
36721 +    kmutex_lock (&uctx->uctx_vpseg_lock);
36722 +
36723 +    if ((seg = user_find_vpseg (uctx, process, process)) == NULL || seg->vps_type != USER_VPSEG_P2P)
36724 +    {
36725 +       kmutex_unlock (&uctx->uctx_vpseg_lock);
36726 +       return (-EINVAL);
36727 +    }
36728 +
36729 +    elan4_read_route (dev, uctx->uctx_routetable, process, route);
36730 +
36731 +    *error = elan4_check_route (&uctx->uctx_position, user_process2location (uctx, seg, process), route, 0);
36732 +
36733 +    kmutex_unlock (&uctx->uctx_vpseg_lock);
36734 +    return (0);
36735 +}
36736 +
36737 +int
36738 +user_send_neterr_msg (USER_CTXT *uctx, unsigned int vp, unsigned int nctx, unsigned int retries, ELAN4_NETERR_MSG *msg)
36739 +{
36740 +    USER_VPSEG   *seg;
36741 +    ELAN_LOCATION location;
36742 +    unsigned long flags;
36743 +    int                  res, found = 0;
36744 +    struct list_head *el;
36745 +
36746 +    kmutex_lock (&uctx->uctx_vpseg_lock);
36747 +    /* determine the location of the virtual process */
36748 +    if ((seg = user_find_vpseg (uctx, vp, vp)) == NULL)
36749 +    {
36750 +       PRINTF (uctx, DBG_NETERR, "user_send_neterr_msg: vp=%d has no vpseg\n", vp);
36751 +
36752 +       kmutex_unlock (&uctx->uctx_vpseg_lock);
36753 +       return -EINVAL;
36754 +    }
36755 +
36756 +    switch (seg->vps_type)
36757 +    {
36758 +    case USER_VPSEG_P2P:
36759 +       location = user_process2location (uctx, seg, vp);
36760 +       break;
36761 +
36762 +    case USER_VPSEG_BCAST:
36763 +       PRINTF (uctx, DBG_NETERR, "user_send_neterr_msg: vp=%d is a bcast vp\n", vp);
36764 +       kmutex_unlock (&uctx->uctx_vpseg_lock);
36765 +       return -EINVAL;
36766 +    }
36767 +
36768 +    /*  check that we're attached to the network context */
36769 +    spin_lock_irqsave (&uctx->uctx_spinlock, flags);
36770 +    list_for_each (el , &uctx->uctx_cent_list) {
36771 +       USER_CTXT_ENTRY *cent = list_entry (el, USER_CTXT_ENTRY, cent_link);
36772 +       
36773 +       if (cent->cent_cap->cap_mycontext == nctx)
36774 +           found++;
36775 +    }
36776 +    spin_unlock_irqrestore (&uctx->uctx_spinlock, flags);
36777 +    
36778 +    if (! found)
36779 +    {
36780 +       PRINTF (uctx, DBG_NETERR, "user_send_neterr_msg: nctx=%d not attached\n", nctx);
36781 +       kmutex_unlock (&uctx->uctx_vpseg_lock);
36782 +
36783 +       return -EINVAL;
36784 +    }
36785 +
36786 +    /* Update the fields which the user might have "faked" */
36787 +    msg->msg_context            = location.loc_context;
36788 +    msg->msg_sender.loc_node    = uctx->uctx_position.pos_nodeid;
36789 +    msg->msg_sender.loc_context = nctx;
36790 +
36791 +    res = elan4_neterr_sendmsg (uctx->uctx_ctxt.ctxt_dev, location.loc_node, retries, msg);
36792 +
36793 +    kmutex_unlock (&uctx->uctx_vpseg_lock);
36794 +
36795 +    return (res);
36796 +}
36797 +
36798 +
36799 +static int
36800 +user_resolvevp (USER_CTXT *uctx, unsigned process)
36801 +{
36802 +    int                    res = 0;
36803 +    USER_VPSEG            *seg;
36804 +    ELAN_LOCATION          location;
36805 +    E4_VirtualProcessEntry route;
36806 +
36807 +    PRINTF1 (uctx, DBG_VP, "user_resolvevp: process=%d\n", process);
36808 +
36809 +    kmutex_lock (&uctx->uctx_vpseg_lock);
36810 +
36811 +    if ((seg = user_find_vpseg (uctx, process, process)) == NULL)
36812 +    {
36813 +       kmutex_unlock (&uctx->uctx_vpseg_lock);
36814 +       return (-EINVAL);
36815 +    }
36816 +
36817 +    switch (seg->vps_type)
36818 +    {
36819 +    case USER_VPSEG_P2P:
36820 +#ifdef use_elanmod
36821 +       if ((res = user_validate_cap (uctx, seg->vps_p2p_cap, ELAN_USER_P2P)) != 0)
36822 +           break;
36823 +#endif
36824 +
36825 +       location = user_process2location (uctx, seg, process);
36826 +
36827 +       PRINTF (uctx, DBG_VP, "user_resolvevp: vp=%d -> node=%d ctx=%d\n", process, location.loc_node, location.loc_context);
36828 +       
36829 +       if (seg->vps_p2p_routes != NULL && seg->vps_p2p_routes[process - seg->vps_process].Values[0] != 0)
36830 +           route = seg->vps_p2p_routes[process - seg->vps_process];
36831 +       else if ((res = elan4_generate_route (&uctx->uctx_position, &route, location.loc_context, location.loc_node, location.loc_node, user_p2p_route_options)) < 0)
36832 +           break;;
36833 +       
36834 +       elan4_write_route (uctx->uctx_ctxt.ctxt_dev, uctx->uctx_routetable, process, &route);
36835 +       break;
36836 +
36837 +    case USER_VPSEG_BCAST:
36838 +       res = user_loadroute_bcast (uctx, seg);
36839 +       break;
36840 +       
36841 +    default:
36842 +       res = -EINVAL;
36843 +       break;
36844 +    }
36845 +
36846 +    kmutex_unlock (&uctx->uctx_vpseg_lock);
36847 +    return (res);
36848 +}
36849 +
36850 +static void
36851 +user_eproc_trap (ELAN4_CTXT *ctxt, E4_uint64 status)
36852 +{
36853 +    USER_CTXT    *uctx = (USER_CTXT *) ctxt;
36854 +    unsigned long flags;
36855 +
36856 +    spin_lock_irqsave (&uctx->uctx_spinlock, flags);
36857 +
36858 +    if (RING_QUEUE_REALLY_FULL (uctx->uctx_eprocTrapQ) || (uctx->uctx_status & UCTX_STOPPED))
36859 +    {
36860 +       PRINTF (uctx, DBG_EPROC, "user_eproc_trap: %s\n", (uctx->uctx_status & UCTX_STOPPED) ? "context stopped" : "trap queue overflow");
36861 +
36862 +       uctx->uctx_status |= UCTX_EPROC_QUEUE_ERROR;
36863 +    }
36864 +    else
36865 +    {
36866 +       elan4_extract_eproc_trap (ctxt->ctxt_dev, status, RING_QUEUE_BACK (uctx->uctx_eprocTrapQ, uctx->uctx_eprocTraps), 0);
36867 +       
36868 +       DBGCMD (ctxt, DBG_EPROC, elan4_display_eproc_trap (ctxt, DBG_EPROC, "user_eproc_trap", RING_QUEUE_BACK(uctx->uctx_eprocTrapQ, uctx->uctx_eprocTraps)));
36869 +       
36870 +       if (RING_QUEUE_ADD (uctx->uctx_eprocTrapQ))
36871 +           user_start_stopping (uctx, UCTX_EPROC_QUEUE_FULL);
36872 +    }
36873 +
36874 +    user_signal_trap (uctx);
36875 +
36876 +    spin_unlock_irqrestore (&uctx->uctx_spinlock, flags);
36877 +}
36878 +
36879 +static void
36880 +user_cproc_trap (ELAN4_CTXT *ctxt, E4_uint64 status, unsigned cqnum)
36881 +{
36882 +    USER_CTXT        *uctx = (USER_CTXT *) ctxt;
36883 +    USER_CQ          *ucq  = NULL;
36884 +    struct list_head *entry;
36885 +    unsigned long     flags;
36886 +
36887 +    spin_lock_irqsave (&uctx->uctx_spinlock, flags);
36888 +    
36889 +    list_for_each (entry, &uctx->uctx_cqlist) {
36890 +       ucq = list_entry (entry, USER_CQ, ucq_link);
36891 +
36892 +       if (elan4_cq2num(ucq->ucq_cq) == cqnum)
36893 +           break;
36894 +    }
36895 +
36896 +    ASSERT (ucq != NULL);
36897 +
36898 +    if (ucq->ucq_state != UCQ_RUNNING && CPROC_TrapType (status) == CommandProcInserterError)
36899 +    {
36900 +       PRINTF (ctxt, DBG_TRAP, "user_cproc_trap CommandProcInserterError\n");
36901 +       ucq->ucq_errored++;
36902 +    }
36903 +    else
36904 +    {
36905 +       int vp;
36906 +       
36907 +       ASSERT (ucq->ucq_state == UCQ_RUNNING);
36908 +
36909 +       elan4_extract_cproc_trap (ctxt->ctxt_dev, status, &ucq->ucq_trap, cqnum);
36910 +
36911 +       DBGCMD (ctxt, DBG_CPROC, elan4_display_cproc_trap (ctxt, DBG_CPROC, "user_cproc_trap", &ucq->ucq_trap));
36912 +
36913 +       ucq->ucq_state = UCQ_TRAPPED;
36914 +
36915 +       if ((vp = cproc_open_extract_vp(uctx->uctx_ctxt.ctxt_dev, ucq->ucq_cq)) != -1)
36916 +       {
36917 +           E4_VirtualProcessEntry route;
36918 +
36919 +           elan4_read_route(uctx->uctx_ctxt.ctxt_dev, uctx->uctx_routetable, vp,  &route);
36920 +           elan4_ringbuf_store(&uctx->uctx_ctxt.ctxt_dev->dev_cproc_timeout_routes, &route, uctx->uctx_ctxt.ctxt_dev);
36921 +       }
36922 +    }
36923 +
36924 +    user_signal_trap (uctx);
36925 +       
36926 +    spin_unlock_irqrestore (&uctx->uctx_spinlock, flags);
36927 +}
36928 +
36929 +static void
36930 +user_dproc_trap (ELAN4_CTXT *ctxt, E4_uint64 status, unsigned unit)
36931 +{
36932 +    USER_CTXT *uctx = (USER_CTXT *) ctxt;
36933 +    unsigned long flags;
36934 +
36935 +    spin_lock_irqsave (&uctx->uctx_spinlock, flags);
36936 +
36937 +    if (RING_QUEUE_REALLY_FULL (uctx->uctx_dprocTrapQ) || (uctx->uctx_status & UCTX_STOPPED))
36938 +    {
36939 +       PRINTF (uctx, DBG_DPROC, "user_dproc_trap: %s\n", (uctx->uctx_status & UCTX_STOPPED) ? "context stopped" : "trap queue overflow");
36940 +
36941 +       uctx->uctx_status |= UCTX_DPROC_QUEUE_ERROR;
36942 +    }
36943 +    else
36944 +    {
36945 +       ELAN4_DPROC_TRAP *trap = RING_QUEUE_BACK (uctx->uctx_dprocTrapQ, uctx->uctx_dprocTraps);
36946 +       
36947 +       elan4_extract_dproc_trap (ctxt->ctxt_dev, status, trap, unit);
36948 +
36949 +       if ((DPROC_PacketTimeout (trap->tr_status)) || (DPROC_TrapType (trap->tr_status) == DmaProcFailCountError))
36950 +       {
36951 +           E4_VirtualProcessEntry route;
36952 +
36953 +            elan4_read_route(ctxt->ctxt_dev, uctx->uctx_routetable, trap->tr_desc.dma_vproc, &route);
36954 +
36955 +           if ((route.Values[0] != 0) || (route.Values[1] != 0))
36956 +            {
36957 +               elan4_ringbuf_store(&uctx->uctx_ctxt.ctxt_dev->dev_dproc_timeout_routes, &route, uctx->uctx_ctxt.ctxt_dev);
36958 +           }
36959 +       }
36960 +       else if (DPROC_TrapType (trap->tr_status) == DmaProcPacketAckError)
36961 +       {
36962 +           E4_VirtualProcessEntry route;
36963 +
36964 +            elan4_read_route(ctxt->ctxt_dev, uctx->uctx_routetable, trap->tr_desc.dma_vproc, &route);
36965 +
36966 +           if ((route.Values[0] != 0) || (route.Values[1] != 0))
36967 +            {
36968 +               elan4_ringbuf_store(&uctx->uctx_ctxt.ctxt_dev->dev_ack_error_routes, &route, uctx->uctx_ctxt.ctxt_dev);
36969 +            }
36970 +       } 
36971 +       else 
36972 +       
36973 +       DBGCMD (ctxt, DBG_DPROC, elan4_display_dproc_trap (ctxt, DBG_DPROC, "user_dproc_trap", trap));
36974 +
36975 +       if (!DPROC_PrefetcherFault (status) && DPROC_TrapType(status) == DmaProcFailCountError && !RING_QUEUE_FULL (uctx->uctx_dmaQ))
36976 +       {
36977 +           trap->tr_desc.dma_typeSize |= DMA_FailCount (user_dproc_retry_count);
36978 +
36979 +           *RING_QUEUE_BACK (uctx->uctx_dmaQ, uctx->uctx_dmas) = trap->tr_desc;
36980 +    
36981 +           (void) RING_QUEUE_ADD (uctx->uctx_dmaQ);
36982 +       }
36983 +       else
36984 +       {
36985 +           if (RING_QUEUE_ADD (uctx->uctx_dprocTrapQ))
36986 +               user_start_stopping (uctx, UCTX_DPROC_QUEUE_FULL);
36987 +       }
36988 +    }
36989 +
36990 +    user_signal_trap (uctx);
36991 +
36992 +    spin_unlock_irqrestore (&uctx->uctx_spinlock, flags);
36993 +}
36994 +
36995 +static void
36996 +user_tproc_trap (ELAN4_CTXT *ctxt, E4_uint64 status)
36997 +{
36998 +    USER_CTXT *uctx = (USER_CTXT *) ctxt;
36999 +    unsigned long flags;
37000 +
37001 +    spin_lock_irqsave (&uctx->uctx_spinlock, flags);
37002 +
37003 +    if (RING_QUEUE_REALLY_FULL (uctx->uctx_tprocTrapQ) || (uctx->uctx_status & UCTX_STOPPED))
37004 +    {
37005 +       PRINTF (uctx, DBG_TPROC, "user_tproc_trap: %s\n", (uctx->uctx_status & UCTX_STOPPED) ? "context stopped" : "trap queue overflow");
37006 +
37007 +       uctx->uctx_status |= UCTX_TPROC_QUEUE_ERROR;
37008 +    }
37009 +    else
37010 +    {
37011 +       elan4_extract_tproc_trap (ctxt->ctxt_dev, status, RING_QUEUE_BACK (uctx->uctx_tprocTrapQ, uctx->uctx_tprocTraps));
37012 +       
37013 +       DBGCMD (ctxt, DBG_TPROC, elan4_display_tproc_trap (ctxt, DBG_TPROC, "user_tproc_trap", RING_QUEUE_BACK (uctx->uctx_tprocTrapQ, uctx->uctx_tprocTraps)));
37014 +       
37015 +       if (RING_QUEUE_ADD (uctx->uctx_tprocTrapQ))
37016 +           user_start_stopping (uctx, UCTX_TPROC_QUEUE_FULL);
37017 +    }
37018 +    user_signal_trap (uctx);
37019 +
37020 +    spin_unlock_irqrestore (&uctx->uctx_spinlock, flags);
37021 +}
37022 +
37023 +static void
37024 +user_iproc_trap (ELAN4_CTXT *ctxt, E4_uint64 status, unsigned unit)
37025 +{
37026 +    USER_CTXT       *uctx  = (USER_CTXT *) ctxt;
37027 +    USER_IPROC_TRAP *utrap = &uctx->uctx_iprocTrap[unit & 1];
37028 +    unsigned long    flags;
37029 +
37030 +    spin_lock_irqsave (&uctx->uctx_spinlock, flags);
37031 +
37032 +    ASSERT (utrap->ut_state == UTS_IPROC_RUNNING);
37033 +
37034 +    elan4_extract_iproc_trap (ctxt->ctxt_dev, status, &utrap->ut_trap, unit);
37035 +    DBGCMD (ctxt, DBG_IPROC, elan4_display_iproc_trap (ctxt, DBG_IPROC, "user_iproc_trap", &utrap->ut_trap));
37036 +
37037 +    utrap->ut_state = UTS_IPROC_TRAPPED;
37038 +
37039 +    user_start_nacking (uctx, unit ? UCTX_IPROC_CH0_TRAPPED : UCTX_IPROC_CH1_TRAPPED);
37040 +
37041 +    user_signal_trap (uctx);
37042 +
37043 +    spin_unlock_irqrestore (&uctx->uctx_spinlock, flags);
37044 +}
37045 +
37046 +static void
37047 +user_interrupt (ELAN4_CTXT *ctxt, E4_uint64 cookie)
37048 +{
37049 +    USER_CTXT *uctx = (USER_CTXT *) ctxt;
37050 +    unsigned long flags;
37051 +
37052 +    spin_lock_irqsave (&uctx->uctx_spinlock, flags);
37053 +
37054 +    PRINTF1 (uctx, DBG_TRAP, "user_interrupt: cookie=%llx\n", cookie);
37055 +
37056 +    switch (cookie)
37057 +    {
37058 +    case ELAN4_INT_COOKIE_DDCQ:
37059 +       uctx->uctx_ddcq_intr--;
37060 +
37061 +       user_signal_trap (uctx);
37062 +       break;
37063 +
37064 +    default:
37065 +       if (uctx->uctx_intcookie_table == NULL || intcookie_fire (uctx->uctx_intcookie_table, cookie) != 0)
37066 +       {
37067 +           PRINTF2 (uctx, DBG_TRAP, "user_interrupt: cookie=%llx %s\n", cookie, uctx->uctx_intcookie_table ? "not found" : "no table");
37068 +           uctx->uctx_status |= UCTX_EPROC_QUEUE_ERROR;
37069 +           user_signal_trap (uctx);
37070 +       }
37071 +       break;
37072 +    }
37073 +
37074 +    spin_unlock_irqrestore (&uctx->uctx_spinlock, flags);
37075 +}
37076 +
37077 +static void
37078 +user_neterrmsg (ELAN4_CTXT *ctxt, ELAN4_NETERR_MSG *msg)
37079 +{
37080 +    USER_CTXT *uctx = (USER_CTXT *) ctxt;
37081 +    unsigned long flags;
37082 +
37083 +    spin_lock_irqsave (&uctx->uctx_spinlock, flags);
37084 +    
37085 +    if (! RING_QUEUE_FULL (uctx->uctx_msgQ))
37086 +    {
37087 +       memcpy (RING_QUEUE_BACK (uctx->uctx_msgQ, uctx->uctx_msgs), msg, sizeof (ELAN4_NETERR_MSG));
37088 +
37089 +       (void) RING_QUEUE_ADD (uctx->uctx_msgQ);
37090 +    
37091 +       user_signal_trap (uctx);
37092 +    }
37093 +    
37094 +    spin_unlock_irqrestore (&uctx->uctx_spinlock, flags);
37095 +}
37096 +
37097 +ELAN4_TRAP_OPS user_trap_ops = 
37098 +{
37099 +    user_eproc_trap,
37100 +    user_cproc_trap,
37101 +    user_dproc_trap,
37102 +    user_tproc_trap,
37103 +    user_iproc_trap,
37104 +    user_interrupt,
37105 +    user_neterrmsg,
37106 +};
37107 +
37108 +static int
37109 +deliver_trap (ELAN4_USER_TRAP *utrapp, int type, unsigned proc, void *trap, ...)
37110 +{
37111 +    register int i, len;
37112 +    va_list ap;
37113 +
37114 +    PRINTF (NULL, DBG_TRAP, "deliver_trap: type=%d proc=%d\n", type, proc);
37115 +
37116 +    switch (proc)
37117 +    {
37118 +    case UTS_CPROC:      len = sizeof (ELAN4_CPROC_TRAP); break;
37119 +    case UTS_DPROC:      len = sizeof (ELAN4_DPROC_TRAP); break;
37120 +    case UTS_EPROC:      len = sizeof (ELAN4_EPROC_TRAP); break;
37121 +    case UTS_IPROC:      len = sizeof (ELAN4_IPROC_TRAP); break;
37122 +    case UTS_TPROC:      len = sizeof (ELAN4_TPROC_TRAP); break;
37123 +    case UTS_NETERR_MSG: len = sizeof (ELAN4_NETERR_MSG); break;
37124 +    default:             len = 0; break;
37125 +    }
37126 +
37127 +    if (put_user (type, &utrapp->ut_type) || put_user (proc, &utrapp->ut_proc) || copy_to_user (&utrapp->ut_trap, trap, len))
37128 +       return (UTS_EFAULT);
37129 +
37130 +    va_start (ap, trap);
37131 +    for (i = 0; i < sizeof (utrapp->ut_args)/sizeof (utrapp->ut_args[0]); i++)
37132 +       if (put_user (va_arg (ap, unsigned long), &utrapp->ut_args[i]))
37133 +           return (UTS_EFAULT);
37134 +    va_end (ap);
37135 +
37136 +    return (type);
37137 +}
37138 +
37139 +static int
37140 +user_pagefault (USER_CTXT *uctx, E4_FaultSave *farea)
37141 +{
37142 +    E4_Addr      addr = farea->FaultAddress;
37143 +    E4_uint32    fsr  = FaultSaveFSR(farea->FSRAndFaultContext);
37144 +    FAULT_SAVE  *entry;
37145 +    FAULT_SAVE **predp;
37146 +    int count;
37147 +
37148 +    PRINTF2 (uctx, DBG_FAULT, "user_pagefault: addr=%llx fsr %x\n", (unsigned long long) addr, fsr);
37149 +    
37150 +    if ((fsr & FSR_FaultForBadData) != 0)                      /* Memory ECC error during walk */
37151 +    {
37152 +       PRINTF0 (uctx, DBG_FAULT, "user_pagefault: ECC error during walk\n");
37153 +       return (-EFAULT);
37154 +    }
37155 +    
37156 +    if ((fsr & FSR_FaultForMaxChainCount) != 0)                        /* Have walked a chain of 1024 items */
37157 +    {
37158 +       PRINTF0 (uctx, DBG_FAULT, "user_pagefault: pte chain too long\n");
37159 +       return (-EFAULT);
37160 +    }
37161 +    
37162 +    if (uctx->uctx_num_fault_save)
37163 +    {
37164 +        spin_lock (&uctx->uctx_fault_lock);
37165 +        for( predp = &uctx->uctx_fault_list; (entry = *predp)->next != NULL; predp = &entry->next)
37166 +        {
37167 +           if (entry->addr == (addr & ~((E4_Addr) PAGE_SIZE-1)))
37168 +               break;
37169 +        }
37170 +
37171 +        *predp = entry->next;
37172 +        entry->next = uctx->uctx_fault_list;
37173 +        uctx->uctx_fault_list = entry;
37174 +
37175 +        if (entry->addr == (addr & ~((E4_Addr) PAGE_SIZE-1)))
37176 +        {
37177 +           if ((entry->count <<= 1) > max_fault_pages)
37178 +               entry->count = max_fault_pages;
37179 +        }
37180 +        else
37181 +           entry->count = min_fault_pages;
37182 +
37183 +        entry->addr = (addr & ~((E4_Addr) PAGE_SIZE-1))+(entry->count * PAGE_SIZE);
37184 +        count = entry->count;
37185 +        spin_unlock (&uctx->uctx_fault_lock);
37186 +
37187 +        if (user_load_range (uctx, addr & ~((E4_Addr) PAGE_SIZE-1), count * PAGESIZE, fsr) == 0)
37188 +       return 0;
37189 +
37190 +       /* else pre-faulting has failed, try just this page */
37191 +    }
37192 +
37193 +    return (user_load_range (uctx, addr & ~((E4_Addr) PAGE_SIZE-1), PAGE_SIZE, fsr));
37194 +
37195 +}
37196 +
37197 +static int
37198 +queue_dma_for_retry (USER_CTXT *uctx, ELAN4_USER_TRAP *utrapp, E4_DMA *dma)
37199 +{
37200 +    unsigned long flags;
37201 +
37202 +    spin_lock_irqsave (&uctx->uctx_spinlock, flags);
37203 +
37204 +    if (RING_QUEUE_FULL (uctx->uctx_dmaQ))
37205 +    {
37206 +       spin_unlock_irqrestore (&uctx->uctx_spinlock, flags);
37207 +       
37208 +       return (deliver_trap (utrapp, UTS_QUEUE_OVERFLOW, UTS_NOPROC, NULL, UCTX_DPROC_QUEUE_OVERFLOW));
37209 +    }
37210 +
37211 +    *RING_QUEUE_BACK (uctx->uctx_dmaQ, uctx->uctx_dmas) = *dma;
37212 +    
37213 +    (void) RING_QUEUE_ADD (uctx->uctx_dmaQ);
37214 +    spin_unlock_irqrestore (&uctx->uctx_spinlock, flags);
37215 +
37216 +    return (UTS_FINISHED);
37217 +}
37218 +
37219 +static int
37220 +queue_thread_for_retry (USER_CTXT *uctx, ELAN4_USER_TRAP *utrapp, E4_ThreadRegs *regs)
37221 +{
37222 +    unsigned long flags;
37223 +    
37224 +    spin_lock_irqsave (&uctx->uctx_spinlock, flags);
37225 +
37226 +    if (RING_QUEUE_FULL (uctx->uctx_threadQ))
37227 +    {
37228 +       spin_unlock_irqrestore (&uctx->uctx_spinlock, flags);
37229 +
37230 +       return (deliver_trap (utrapp, UTS_QUEUE_OVERFLOW, UTS_NOPROC, NULL, UCTX_TPROC_QUEUE_OVERFLOW));
37231 +    }
37232 +
37233 +    *RING_QUEUE_BACK (uctx->uctx_threadQ, uctx->uctx_threads) = *regs;
37234 +    (void) RING_QUEUE_ADD (uctx->uctx_threadQ);
37235 +    
37236 +    spin_unlock_irqrestore (&uctx->uctx_spinlock, flags);
37237 +
37238 +    return (UTS_FINISHED);
37239 +}
37240 +
37241 +static int
37242 +fixup_eproc_trap (USER_CTXT *uctx, ELAN4_EPROC_TRAP *trap, int waitevent)
37243 +{
37244 +    E4_FaultSave *farea = &trap->tr_faultarea;
37245 +    E4_uint32     fsr   = FaultSaveFSR(farea->FSRAndFaultContext);
37246 +    E4_uint64     CountAndType;
37247 +    E4_uint64     CopySource;
37248 +    E4_uint64     CopyDest;
37249 +
37250 +    /*
37251 +     * Event processor can trap as follows :
37252 +     *   1) Event location read                (faddr == event location & Event Permission)
37253 +     *   2) Event location write       (faddr == event location & Event Permission)
37254 +     *   3) Copy Source read           Read Access
37255 +     *   4) Copy/Write dest write      other
37256 +     *
37257 +     *  NOTE - it is possible to see both 3) and 4) together - but only with physical errors.
37258 +     */
37259 +    if (AT_Perm(fsr) == AT_PermLocalDataRead || AT_Perm(fsr) == AT_PermLocalDataWrite)
37260 +    {
37261 +       /* 
37262 +        * We complete the copy/write by issuing a waitevent 0 of the approriate type.
37263 +        *   - NB mask off bottom bits of EventAddr in case of partial setevent
37264 +        */
37265 +       E4_uint64 EventAddr = trap->tr_eventaddr & ~((E4_uint64) E4_EVENT_ALIGN-1);
37266 +
37267 +       if (! user_ddcq_check (uctx, 4))
37268 +           return (0);
37269 +       
37270 +       if ((trap->tr_event.ev_CountAndType & E4_EVENT_COPY_TYPE_MASK) == E4_EVENT_WRITE)
37271 +       {
37272 +           /* case 4) faulted on write word to destination */
37273 +
37274 +           CountAndType = trap->tr_event.ev_CountAndType & E4_EVENT_TYPE_MASK;
37275 +           
37276 +           PRINTF (uctx, DBG_TRAP, "fixup_eproc_trap: write Event=%llx CountAndType=%llx\n", EventAddr, CountAndType);
37277 +           PRINTF (uctx, DBG_TRAP, "                  WritePtr=%llx WriteValue=%llx\n", 
37278 +                   trap->tr_event.ev_WritePtr, trap->tr_event.ev_WriteValue);
37279 +
37280 +           user_ddcq_waitevent (uctx, EventAddr, CountAndType, trap->tr_event.ev_WritePtr, trap->tr_event.ev_WriteValue);
37281 +       }
37282 +       else
37283 +       {
37284 +           /* case 3) or case 4) faulted on read/write of copy */
37285 +           if (AT_Perm (fsr) == AT_PermLocalDataRead)
37286 +           {
37287 +               CountAndType = (trap->tr_event.ev_CountAndType & E4_EVENT_DATA_TYPE_MASK) | EPROC_CopySize(trap->tr_status);
37288 +               CopySource   = trap->tr_event.ev_CopySource - EVENT_COPY_BLOCK_SIZE;
37289 +               CopyDest     = trap->tr_event.ev_CopyDest;
37290 +           }
37291 +           else
37292 +           {
37293 +               CountAndType = ((trap->tr_event.ev_CountAndType & E4_EVENT_DATA_TYPE_MASK) | 
37294 +                               ((EPROC_CopySize(trap->tr_status) + EVENT_COPY_NDWORDS) & E4_EVENT_COPY_SIZE_MASK));
37295 +               CopySource   = trap->tr_event.ev_CopySource - EVENT_COPY_BLOCK_SIZE;
37296 +               CopyDest     = trap->tr_event.ev_CopyDest - EVENT_COPY_BLOCK_SIZE;
37297 +           }
37298 +           
37299 +           PRINTF (uctx, DBG_TRAP, "fixup_eproc_trap: copy Event=%llx CountAndType=%llx\n", EventAddr, CountAndType);
37300 +           PRINTF (uctx, DBG_TRAP, "                  CopySource=%llx CopyDest=%llx\n", CopySource, CopyDest);
37301 +
37302 +           user_ddcq_waitevent (uctx, EventAddr, CountAndType, CopySource, CopyDest);
37303 +       }
37304 +    }
37305 +    else
37306 +    {
37307 +       E4_uint64 EventAddr  = trap->tr_eventaddr & ~((E4_uint64) E4_EVENT_ALIGN-1);
37308 +       E4_uint32 EventCount = trap->tr_eventaddr & (E4_EVENT_ALIGN-1);
37309 +
37310 +       /* case 1) or 2) - just reissue the event */
37311 +       if (! waitevent)
37312 +           PRINTF (uctx, DBG_TRAP, "fixup_eproc_trap: setevent EventAddr=%llx EventCount=%x\n", EventAddr, EventCount);
37313 +       else
37314 +       {
37315 +           PRINTF (uctx, DBG_TRAP, "fixup_eproc_trap: waitevent Event=%llx CountAndType=%llx\n", EventAddr, trap->tr_event.ev_CountAndType);
37316 +           PRINTF (uctx, DBG_TRAP, "                  Param[0]=%llx Param[1]=%llx\n",
37317 +                    trap->tr_event.ev_Params[0], trap->tr_event.ev_Params[1]);
37318 +       }
37319 +
37320 +       if (! user_ddcq_check (uctx, waitevent ? 4 : 2))
37321 +           return (0);
37322 +       
37323 +       if (waitevent)
37324 +           user_ddcq_waitevent (uctx, EventAddr, trap->tr_event.ev_CountAndType, 
37325 +                                 trap->tr_event.ev_Params[0], trap->tr_event.ev_Params[1]);
37326 +       else
37327 +           user_ddcq_seteventn (uctx, EventAddr, EventCount);
37328 +    }
37329 +
37330 +    return (1);
37331 +}
37332 +
37333 +
37334 +static int
37335 +resolve_eproc_trap (USER_CTXT *uctx, ELAN4_USER_TRAP *utrapp, ELAN4_EPROC_TRAP *trap)
37336 +{
37337 +    switch (EPROC_TrapType (trap->tr_status))
37338 +    {
37339 +    case EventProcNoFault:
37340 +       PRINTF (uctx, DBG_TRAP, "resolve_eproc_trap: EventProcNoFault\n");
37341 +
37342 +       return (UTS_FINISHED);
37343 +       
37344 +    case EventProcAddressAlignment:
37345 +       return (deliver_trap (utrapp, UTS_ALIGNMENT_ERROR, UTS_EPROC, trap));
37346 +
37347 +    case EventProcMemoryFault:
37348 +       PRINTF (uctx, DBG_TRAP, "resolve_eproc_trap: EventProcMemoryFault @ %llx\n", trap->tr_faultarea.FaultAddress);
37349 +
37350 +       if (user_pagefault (uctx, &trap->tr_faultarea) != 0)
37351 +           return (deliver_trap (utrapp, UTS_INVALID_ADDR, UTS_EPROC, trap));
37352 +
37353 +       return (UTS_FINISHED);
37354 +       
37355 +    case EventProcCountWrapError:
37356 +       return (deliver_trap (utrapp, UTS_BAD_TRAP, UTS_EPROC, trap));
37357 +
37358 +    default:
37359 +       printk ("resolve_eproc_trap: bad trap type %d\n", EPROC_TrapType (trap->tr_status));
37360 +       BUG();
37361 +    }
37362 +
37363 +    return (UTS_FINISHED);
37364 +}
37365 +
37366 +static int
37367 +resolve_cproc_trap (USER_CTXT *uctx, ELAN4_USER_TRAP *utrapp, USER_CQ *ucq)
37368 +{
37369 +    ELAN4_DEV        *dev    = uctx->uctx_ctxt.ctxt_dev;
37370 +    ELAN4_CPROC_TRAP *trap   = &ucq->ucq_trap;
37371 +    E4_uint64         command;
37372 +    int               res;
37373 +
37374 +    PRINTF2 (uctx, DBG_CPROC, "resolve_cproc_trap: cq %p is trapped - Status %lx\n", ucq, trap->tr_status);
37375 +    
37376 +    switch (CPROC_TrapType (trap->tr_status))
37377 +    {
37378 +    case CommandProcDmaQueueOverflow:
37379 +       PRINTF (uctx, DBG_CPROC, "resolve_cproc_trap: CommandProcDmaQueueOverflow\n");
37380 +       /*
37381 +        * XXXX: should wait for the queue to become emptier if we're 
37382 +        *       responsible for it being very full
37383 +        */
37384 +       ucq->ucq_state = UCQ_NEEDS_RESTART;
37385 +       break;
37386 +
37387 +    case CommandProcInterruptQueueOverflow:
37388 +       PRINTF (uctx, DBG_CPROC, "resolve_cproc_trap: CommandProcInterruptQueueOverflow\n");
37389 +       /*
37390 +        * XXXX: should wait for the queue to become emptier if we're
37391 +        *       responsible for it being very full
37392 +        */
37393 +       ucq->ucq_state = UCQ_NEEDS_RESTART;
37394 +       break;
37395 +       
37396 +    case CommandProcWaitTrap:
37397 +       PRINTF0 (uctx, DBG_CPROC, "resolve_cproc_trap: CommandProcWaitTrap\n");
37398 +       
37399 +       if ((res = resolve_eproc_trap (uctx, utrapp, &trap->tr_eventtrap)) != UTS_FINISHED)
37400 +       {
37401 +           ucq->ucq_state = UCQ_STOPPED;
37402 +
37403 +           return (res);
37404 +       }
37405 +       
37406 +       if (fixup_eproc_trap (uctx, &trap->tr_eventtrap, 1) == 0)
37407 +           return UTS_RESCHEDULE;
37408 +
37409 +       ucq->ucq_state = UCQ_NEEDS_RESTART;
37410 +       break;
37411 +       
37412 +    case CommandProcMemoryFault:
37413 +       PRINTF1 (uctx, DBG_CPROC, "resolve_cproc_trap: CommandProcMemoryFault at %llx\n", trap->tr_faultarea.FaultAddress);
37414 +       if (user_pagefault (uctx, &trap->tr_faultarea) != 0)
37415 +       {
37416 +           ucq->ucq_state = UCQ_STOPPED;
37417 +
37418 +           return (deliver_trap (utrapp, UTS_INVALID_ADDR, UTS_CPROC, trap, elan4_cq2idx(ucq->ucq_cq)));
37419 +       }
37420 +       
37421 +       ucq->ucq_state = UCQ_NEEDS_RESTART;
37422 +       break;
37423 +       
37424 +    case CommandProcRouteFetchFault:
37425 +       command = elan4_trapped_open_command (dev, ucq->ucq_cq);
37426 +       
37427 +       PRINTF1 (uctx, DBG_CPROC, "resolve_cproc_trap: CommandProcRouteFetchFault to vp %d\n", (int) (command >> 32));
37428 +       
37429 +       if (user_resolvevp (uctx, (unsigned) (command >> 32)) != 0)
37430 +       {
37431 +           ucq->ucq_state = UCQ_STOPPED;
37432 +
37433 +           return (deliver_trap (utrapp, UTS_INVALID_VPROC, UTS_CPROC, trap, elan4_cq2idx(ucq->ucq_cq), (long) (command >> 32)));
37434 +       }
37435 +
37436 +       ucq->ucq_state = UCQ_NEEDS_RESTART;
37437 +       break;
37438 +       
37439 +    case CommandProcFailCountZero:
37440 +       PRINTF0 (uctx, DBG_CPROC, "resolve_cproc_trap: CommandProcFailCountZero - reset failcount\n");
37441 +       
37442 +       /* NOTE - we must not modify the ChannelNotCompleted bits - so modify */
37443 +       /*        the restart count with a part-word store */
37444 +       elan4_updatecq (dev, ucq->ucq_cq, ucq->ucq_cq->cq_perm, user_cproc_retry_count);
37445 +
37446 +       ucq->ucq_state = UCQ_NEEDS_RESTART;
37447 +       break;
37448 +
37449 +    case CommandProcAddressAlignment:
37450 +       ucq->ucq_state = UCQ_STOPPED;
37451 +
37452 +       return (deliver_trap (utrapp, UTS_ALIGNMENT_ERROR, UTS_CPROC, trap, elan4_cq2idx(ucq->ucq_cq)));
37453 +
37454 +    case CommandProcPermissionTrap:
37455 +    {
37456 +       sdramaddr_t cqdesc = dev->dev_cqaddr + (elan4_cq2num(ucq->ucq_cq) * sizeof (E4_CommandQueueDesc));
37457 +       E4_uint64   control = elan4_sdram_readq (dev, cqdesc + offsetof (E4_CommandQueueDesc, CQ_Control));
37458 +
37459 +       PRINTF (uctx, DBG_CPROC, "resolve_cproc_trap: CommandProcPermissionTrap - %s\n",
37460 +               (control & CQ_PermissionMask) != ucq->ucq_cq->cq_perm ? "resume from stop" : "permission denied");
37461 +       
37462 +       if ((control & CQ_PermissionMask) == ucq->ucq_cq->cq_perm)
37463 +           return (deliver_trap (utrapp, UTS_PERMISSION_DENIED, UTS_CPROC, trap, elan4_cq2idx(ucq->ucq_cq)));
37464 +
37465 +       elan4_updatecq (dev, ucq->ucq_cq, ucq->ucq_cq->cq_perm, 0);
37466 +
37467 +       ucq->ucq_state = UCQ_NEEDS_RESTART;
37468 +       break;
37469 +    }
37470 +    
37471 +    case CommandProcBadData:
37472 +       ucq->ucq_state = UCQ_STOPPED;
37473 +
37474 +       return (deliver_trap (utrapp, UTS_INVALID_COMMAND, UTS_CPROC, trap, elan4_cq2idx(ucq->ucq_cq)));
37475 +
37476 +    default:
37477 +       ucq->ucq_state = UCQ_STOPPED;
37478 +
37479 +       return (deliver_trap (utrapp, UTS_BAD_TRAP, UTS_CPROC, trap, elan4_cq2idx(ucq->ucq_cq)));
37480 +    }
37481 +
37482 +    return (UTS_FINISHED);
37483 +}
37484 +
37485 +static int
37486 +resolve_dproc_trap (USER_CTXT *uctx, ELAN4_USER_TRAP *utrapp, ELAN4_DPROC_TRAP *trap)
37487 +{
37488 +    ELAN_LOCATION location;
37489 +    int node;
37490 +
37491 +    if (DPROC_PrefetcherFault (trap->tr_status))
37492 +    {
37493 +       PRINTF (uctx, DBG_DPROC, "resolve_dproc_trap: PrefetcherFault at %llx\n", trap->tr_prefetchFault.FaultAddress);
37494 +
37495 +       if (user_pagefault (uctx, &trap->tr_prefetchFault) != 0)
37496 +           return (deliver_trap (utrapp, UTS_INVALID_ADDR, UTS_DPROC, trap));
37497 +       
37498 +       return (queue_dma_for_retry (uctx, utrapp, &trap->tr_desc));
37499 +    }
37500 +    
37501 +    switch (DPROC_TrapType (trap->tr_status))
37502 +    {
37503 +    case DmaProcRouteFetchFault:
37504 +       PRINTF (uctx, DBG_DPROC, "resolve_dproc_trap: DmaProcRouteFetchFault vp %d\n", trap->tr_desc.dma_vproc);
37505 +
37506 +       if (user_resolvevp (uctx, trap->tr_desc.dma_vproc) != 0)
37507 +           return (deliver_trap (utrapp, UTS_INVALID_VPROC, UTS_DPROC, trap, trap->tr_desc.dma_vproc));
37508 +       
37509 +       return (queue_dma_for_retry (uctx, utrapp, &trap->tr_desc /* immediate */));
37510 +       
37511 +    case DmaProcFailCountError:
37512 +       PRINTF (uctx, DBG_DPROC, "resolve_dproc_trap: DmaProcFailCountError - vp %d cookie %llx\n",
37513 +               trap->tr_desc.dma_vproc, trap->tr_desc.dma_cookie);
37514 +
37515 +       trap->tr_desc.dma_typeSize |= DMA_FailCount (user_dproc_retry_count);
37516 +
37517 +       kmutex_lock (&uctx->uctx_vpseg_lock);
37518 +       location = user_process2location(uctx, NULL, trap->tr_desc.dma_vproc);
37519 +       kmutex_unlock (&uctx->uctx_vpseg_lock);
37520 +       node = location.loc_node;
37521 +
37522 +        if ((node >= 0) && (node <= uctx->uctx_ctxt.ctxt_dev->dev_position.pos_nodes))
37523 +        { 
37524 +           kmutex_lock(&uctx->uctx_ctxt.ctxt_dev->dev_lock);
37525 +           uctx->uctx_ctxt.ctxt_dev->dev_dproc_timeout[node]++;
37526 +           kmutex_unlock(&uctx->uctx_ctxt.ctxt_dev->dev_lock);
37527 +        }
37528 +       
37529 +       return (queue_dma_for_retry (uctx, utrapp, &trap->tr_desc /* XXXX - backoff for some time later */));
37530 +
37531 +    case DmaProcPacketAckError:
37532 +       PRINTF (uctx, DBG_DPROC, "resolve_dproc_trap: DmaProcPacketAckError - %d%s\n", DPROC_PacketAckValue (trap->tr_status), 
37533 +               DPROC_PacketTimeout (trap->tr_status) ? " timeout" : "");
37534 +
37535 +       kmutex_lock (&uctx->uctx_vpseg_lock);
37536 +       location = user_process2location(uctx, NULL, trap->tr_desc.dma_vproc);
37537 +       kmutex_unlock (&uctx->uctx_vpseg_lock);
37538 +       node = location.loc_node;
37539 +
37540 +       if ((node >= 0) && (node <= uctx->uctx_ctxt.ctxt_dev->dev_position.pos_nodes))
37541 +       {
37542 +           kmutex_lock(&uctx->uctx_ctxt.ctxt_dev->dev_lock);
37543 +           uctx->uctx_ctxt.ctxt_dev->dev_ack_errors[node]++;
37544 +           kmutex_unlock(&uctx->uctx_ctxt.ctxt_dev->dev_lock);
37545 +       }
37546 +
37547 +       return (queue_dma_for_retry (uctx, utrapp, &trap->tr_desc /* XXXX - backoff for some time later */));
37548 +
37549 +    case DmaProcQueueOverflow:
37550 +       PRINTF (uctx, DBG_DPROC, "resolve_dproc_trap: DmaProcQueueOverflow\n");
37551 +       return (queue_dma_for_retry (uctx, utrapp, &trap->tr_desc /* XXXX - backoff for some time later */));
37552 +       
37553 +    case DmaProcRunQueueReadFault:
37554 +       return (deliver_trap (utrapp, UTS_BAD_TRAP, UTS_DPROC, trap));
37555 +       
37556 +    default:
37557 +       printk ("resolve_dproc_trap: unknown trap type : %d\n", DPROC_TrapType(trap->tr_status));
37558 +       BUG();
37559 +    }
37560 +    return UTS_FINISHED;
37561 +}
37562 +
37563 +int
37564 +resolve_tproc_trap (USER_CTXT *uctx, ELAN4_USER_TRAP *utrapp, ELAN4_TPROC_TRAP *trap)
37565 +{
37566 +    PRINTF (uctx, DBG_TPROC, "resolve_tproc_trap: trap state = %lx\n", trap->tr_state);
37567 +
37568 +    if (trap->tr_state & TS_TrapForTooManyInstructions)
37569 +       return (deliver_trap (utrapp, UTS_BAD_TRAP, UTS_TPROC, trap));
37570 +    
37571 +    if (trap->tr_state & TS_Unimplemented)
37572 +       return (deliver_trap (utrapp, UTS_UNIMP_INSTR, UTS_TPROC, trap));
37573 +    
37574 +    if (trap->tr_state & TS_DataAlignmentError)
37575 +       return (deliver_trap (utrapp, UTS_ALIGNMENT_ERROR, UTS_TPROC, trap));
37576 +    
37577 +    if ((trap->tr_state & TS_InstAccessException) && user_pagefault (uctx, &trap->tr_instFault) != 0)
37578 +       return (deliver_trap (utrapp, UTS_INVALID_ADDR, UTS_TPROC, trap));
37579 +    
37580 +    if ((trap->tr_state & TS_DataAccessException) && user_pagefault (uctx, &trap->tr_dataFault) != 0)
37581 +       return (deliver_trap (utrapp, UTS_INVALID_ADDR, UTS_TPROC, trap));
37582 +    
37583 +    /* If we're restarting from trap - then just need to re-issue it */
37584 +    if (trap->tr_pc == uctx->uctx_trestart_addr || (trap->tr_state & TS_TrappedFlag))
37585 +    {
37586 +       PRINTF (uctx, DBG_TPROC, "resolve_tproc_trap: trapped in trap code PC=%llx SP=%llx\n", trap->tr_pc, trap->tr_regs[1]);
37587 +
37588 +       trap->tr_regs[0] = uctx->uctx_trestart_addr;
37589 +    }
37590 +    else
37591 +    {
37592 +       E4_uint64 *sp = (E4_uint64 *) user_elan2main (uctx, trap->tr_regs[1]);
37593 +       int        i, reload;
37594 +
37595 +       /* need to store the register on the stack see */
37596 +       /*  lib_tproc_trampoline_elan4_thread.S for stack layout */
37597 +#define TS_STACK_OFF(REG)      ((((REG)&7)) - (((REG)>>3)*8) - 8)
37598 +       for (reload = 0, i = 0; i < 64; i++)
37599 +       {
37600 +           if (trap->tr_dirty & ((E4_uint64) 1 << i))
37601 +           {
37602 +               PRINTF (uctx, DBG_TPROC, "resolve_tproc_trap: %%r%d  [%016llx] -> %p\n", i, trap->tr_regs[i], &sp[TS_STACK_OFF(i)]);
37603 +
37604 +               sulonglong ((u64 *) &sp[TS_STACK_OFF(i)], trap->tr_regs[i]);
37605 +               
37606 +               reload |= (1 << (i >> 3));
37607 +           }
37608 +       }
37609 +#undef TS_STACK_OFF
37610 +
37611 +       PRINTF (uctx, DBG_TPROC, "resolve_tproc_trap: pc %llx npc %llx\n", trap->tr_pc, trap->tr_npc);
37612 +       PRINTF (uctx, DBG_TPROC, "resolve_tproc_trap: CC %x reload %x\n", (int) (trap->tr_state >> TS_XCCshift), reload);
37613 +
37614 +       trap->tr_regs[0] = uctx->uctx_trestart_addr;
37615 +       trap->tr_regs[2] = trap->tr_pc;
37616 +       trap->tr_regs[3] = trap->tr_npc;
37617 +       trap->tr_regs[4] = (trap->tr_state >> TS_XCCshift) & TS_XCCmask;
37618 +       trap->tr_regs[5] = reload;
37619 +    }
37620 +
37621 +    return (queue_thread_for_retry (uctx, utrapp, (E4_ThreadRegs *) trap->tr_regs));
37622 +}
37623 +
37624 +static int
37625 +resolve_iproc_trap (USER_CTXT *uctx, ELAN4_USER_TRAP *utrapp, int channel)
37626 +{
37627 +    USER_IPROC_TRAP  *utrap = &uctx->uctx_iprocTrap[channel];
37628 +    ELAN4_IPROC_TRAP *trap  = &utrap->ut_trap;
37629 +    unsigned long     flags;
37630 +
37631 +    elan4_inspect_iproc_trap (trap);
37632 +
37633 +    if (trap->tr_flags & TR_FLAG_TOOMANY_TRANS)
37634 +       return (deliver_trap (utrapp, UTS_INVALID_TRANS, UTS_IPROC, trap, channel));
37635 +
37636 +    ASSERT (trap->tr_trappedTrans >= 0 && trap->tr_trappedTrans < trap->tr_numTransactions);
37637 +
37638 +    switch (IPROC_TrapValue (trap->tr_transactions[trap->tr_trappedTrans].IProcStatusCntxAndTrType))
37639 +    {
37640 +    case InputMemoryFault:
37641 +       if (user_pagefault (uctx, &trap->tr_faultarea) != 0)
37642 +       {
37643 +           utrap->ut_state = UTS_IPROC_STOPPED;
37644 +           
37645 +           return (deliver_trap (utrapp, UTS_INVALID_ADDR, UTS_IPROC, trap, channel));
37646 +       }
37647 +       break;
37648 +
37649 +    case InputDmaQueueOverflow:
37650 +    case InputEventEngineTrapped:
37651 +       /* nothing to do for these 2 - restarting will simulate the transactions */
37652 +       break;
37653 +
37654 +    case InputEopErrorOnWaitForEop:
37655 +    case InputEopErrorTrap:
37656 +       break;
37657 +
37658 +    case InputCrcErrorAfterPAckOk:
37659 +       PRINTF (DBG_DEVICE, DBG_IPROC, "InputCrcErrorAfterPAckOk: flags %x\n", trap->tr_flags);
37660 +
37661 +       ASSERT ((trap->tr_flags & TR_FLAG_ACK_SENT) && ((trap->tr_flags & (TR_FLAG_DMA_PACKET|TR_FLAG_BAD_TRANS)) ||
37662 +                                                       ((trap->tr_flags & TR_FLAG_EOP_ERROR) && trap->tr_identifyTrans == TR_TRANS_INVALID)));
37663 +       break;
37664 +
37665 +    case InputDiscardAfterAckOk:
37666 +       return (deliver_trap (utrapp, UTS_INVALID_TRANS, UTS_IPROC, trap, channel));
37667 +
37668 +    case InputAddressAlignment:
37669 +       return (deliver_trap (utrapp, UTS_ALIGNMENT_ERROR, UTS_IPROC, trap, channel));
37670 +
37671 +    case InputInvalidTransType:
37672 +       return (deliver_trap (utrapp, UTS_INVALID_TRANS, UTS_IPROC, trap, channel));
37673 +
37674 +    default:
37675 +       printk ("resolve_iproc_trap: unknown trap type %d\n", IPROC_TrapValue (trap->tr_transactions[trap->tr_trappedTrans].IProcStatusCntxAndTrType));
37676 +       BUG();
37677 +       /* NOTREACHED */
37678 +    }
37679 +
37680 +    if (! (trap->tr_flags & TR_FLAG_ACK_SENT) || (trap->tr_flags & TR_FLAG_EOP_BAD))
37681 +    {
37682 +       spin_lock_irqsave (&uctx->uctx_spinlock, flags);
37683 +
37684 +       utrap->ut_state = UTS_IPROC_RUNNING;
37685 +
37686 +       user_stop_nacking (uctx, channel ? UCTX_IPROC_CH0_TRAPPED : UCTX_IPROC_CH1_TRAPPED);
37687 +       
37688 +       spin_unlock_irqrestore (&uctx->uctx_spinlock, flags);
37689 +    }
37690 +    else if ((trap->tr_flags & (TR_FLAG_DMA_PACKET | TR_FLAG_BAD_TRANS)) || ((trap->tr_flags & TR_FLAG_EOP_ERROR) && (trap->tr_identifyTrans == TR_TRANS_INVALID)))
37691 +    {
37692 +       /* 
37693 +        * TR_FLAG_DMA_PACKET   means a DMA packet has faulted.
37694 +        *
37695 +        * TR_FLAG_BAD_TRANS    means we have a transaction with a bad CRC after the transaction
37696 +        *                      which sent the ack - this implies it's an overlapped ack DMA packet
37697 +        *
37698 +        * TR_FLAG_EOP_ERROR    means we've received an EOP reset - if we hadn't seen an identify
37699 +        *                      transaction then it's a DMA packet.
37700 +        *
37701 +        * To ensure that the DMA processor works correctly the next packet must be NACKed to 
37702 +        * cause it to resend this one.
37703 +        */
37704 +       PRINTF (uctx, DBG_IPROC, "resolve_iproc_trap: %s during DMA packet\n",
37705 +               (trap->tr_flags & TR_FLAG_BAD_TRANS) ? "BadTransaction" : (trap->tr_flags & TR_FLAG_EOP_ERROR) ? "EopError" : "trap");
37706 +
37707 +       spin_lock_irqsave (&uctx->uctx_spinlock, flags);
37708 +
37709 +       if (trap->tr_flags & TR_FLAG_DMA_PACKET)
37710 +       {
37711 +           if (! (trap->tr_flags & TR_FLAG_BAD_TRANS))
37712 +               utrap->ut_state = UTS_IPROC_EXECUTE_PACKET;
37713 +           else
37714 +           {
37715 +               kcondvar_t waithere;
37716 +
37717 +               /* We must ensure that the next packet is always nacked, so
37718 +                * we wait here for an output timeout before dropping the 
37719 +                * context filter - we just pause here for 4 mS */
37720 +               kcondvar_init (&waithere);
37721 +               kcondvar_timedwait (&waithere, &uctx->uctx_spinlock, &flags, lbolt + (HZ/250) + 1);;
37722 +               kcondvar_destroy (&waithere);
37723 +
37724 +               utrap->ut_state = UTS_IPROC_RUNNING;
37725 +               
37726 +               user_stop_nacking (uctx, channel ? UCTX_IPROC_CH0_TRAPPED : UCTX_IPROC_CH1_TRAPPED);
37727 +           }
37728 +       }
37729 +       else
37730 +       {
37731 +           utrap->ut_state = UTS_IPROC_RUNNING;
37732 +
37733 +           user_stop_nacking (uctx, channel ? UCTX_IPROC_CH0_TRAPPED : UCTX_IPROC_CH1_TRAPPED);
37734 +       }
37735 +
37736 +       spin_unlock_irqrestore (&uctx->uctx_spinlock, flags);
37737 +    }
37738 +    else if (trap->tr_flags & TR_FLAG_EOP_ERROR)
37739 +    {
37740 +       PRINTF (uctx, DBG_IPROC, "resolve_iproc_trap: EopError with identify\n");
37741 +
37742 +       utrap->ut_state = UTS_IPROC_NETWORK_ERROR;
37743 +    }
37744 +    else
37745 +    {
37746 +       PRINTF (uctx, DBG_IPROC, "resolve_iproc_trap: execute packet\n");
37747 +
37748 +       utrap->ut_state = UTS_IPROC_EXECUTE_PACKET;
37749 +    }
37750 +
37751 +    return UTS_FINISHED;
37752 +}
37753 +
37754 +
37755 +static int
37756 +resolve_cproc_traps (USER_CTXT *uctx, ELAN4_USER_TRAP *utrapp)
37757 +{
37758 +    struct list_head *entry;
37759 +    int res = UTS_FINISHED;
37760 +
37761 +    kmutex_lock (&uctx->uctx_cqlock);
37762 +    list_for_each (entry, &uctx->uctx_cqlist) {
37763 +       USER_CQ *ucq = list_entry (entry, USER_CQ, ucq_link);
37764 +
37765 +       if (ucq->ucq_state == UCQ_TRAPPED)
37766 +       {
37767 +           res = resolve_cproc_trap (uctx, utrapp, ucq);
37768 +
37769 +           if (res != UTS_FINISHED)
37770 +               break;
37771 +       }
37772 +
37773 +       if (ucq->ucq_errored)
37774 +       {
37775 +           ucq->ucq_errored = 0;
37776 +           res = deliver_trap (utrapp, UTS_CPROC_ERROR, UTS_CPROC, &ucq->ucq_trap, elan4_cq2idx(ucq->ucq_cq));
37777 +           break;
37778 +       }
37779 +    }
37780 +    kmutex_unlock (&uctx->uctx_cqlock);
37781 +
37782 +    return (res);
37783 +}
37784 +
37785 +static int
37786 +resolve_eproc_traps (USER_CTXT *uctx, ELAN4_USER_TRAP *utrapp)
37787 +{
37788 +    unsigned long flags;
37789 +    int res;
37790 +
37791 +    spin_lock_irqsave (&uctx->uctx_spinlock, flags);
37792 +    while (! RING_QUEUE_EMPTY (uctx->uctx_eprocTrapQ))
37793 +    {
37794 +       ELAN4_EPROC_TRAP trap = *RING_QUEUE_FRONT (uctx->uctx_eprocTrapQ, uctx->uctx_eprocTraps);
37795 +
37796 +       (void) RING_QUEUE_REMOVE (uctx->uctx_eprocTrapQ);
37797 +
37798 +       spin_unlock_irqrestore (&uctx->uctx_spinlock, flags);
37799 +
37800 +       if ((res = resolve_eproc_trap (uctx, utrapp, &trap)) != UTS_FINISHED)
37801 +           return (res);
37802 +
37803 +       if (fixup_eproc_trap (uctx, &trap, 0) == 0)
37804 +       {
37805 +           PRINTF (uctx, DBG_EPROC, "resolve_eproc_trap: could not fixup eproc trap - requeue it\n");
37806 +
37807 +           spin_lock_irqsave (&uctx->uctx_spinlock, flags);
37808 +           if (RING_QUEUE_REALLY_FULL(uctx->uctx_eprocTrapQ))
37809 +               uctx->uctx_status |= UCTX_EPROC_QUEUE_OVERFLOW;
37810 +           else
37811 +           {
37812 +               *RING_QUEUE_FRONT(uctx->uctx_eprocTrapQ, uctx->uctx_eprocTraps) = trap;
37813 +           
37814 +               (void) RING_QUEUE_ADD_FRONT(uctx->uctx_eprocTrapQ);
37815 +           }
37816 +           spin_unlock_irqrestore (&uctx->uctx_spinlock, flags);
37817 +
37818 +           return UTS_RESCHEDULE;
37819 +       }
37820 +       
37821 +       spin_lock_irqsave (&uctx->uctx_spinlock, flags);
37822 +    }
37823 +
37824 +    if (uctx->uctx_status & UCTX_EPROC_QUEUE_FULL)
37825 +       user_stop_stopping (uctx, UCTX_EPROC_QUEUE_FULL);
37826 +
37827 +    spin_unlock_irqrestore (&uctx->uctx_spinlock, flags);
37828 +    return (UTS_FINISHED);
37829 +}
37830 +           
37831 +static int
37832 +resolve_dproc_traps (USER_CTXT *uctx, ELAN4_USER_TRAP *utrapp)
37833 +{
37834 +    unsigned long flags;
37835 +    int res;
37836 +    
37837 +    spin_lock_irqsave (&uctx->uctx_spinlock, flags);
37838 +    while (! RING_QUEUE_EMPTY (uctx->uctx_dprocTrapQ))
37839 +    {
37840 +       ELAN4_DPROC_TRAP trap = *RING_QUEUE_FRONT(uctx->uctx_dprocTrapQ, uctx->uctx_dprocTraps);
37841 +       
37842 +       (void) RING_QUEUE_REMOVE (uctx->uctx_dprocTrapQ);
37843 +
37844 +       spin_unlock_irqrestore (&uctx->uctx_spinlock, flags);
37845 +
37846 +       if ((res = resolve_dproc_trap (uctx, utrapp, &trap)) != UTS_FINISHED)
37847 +           return (res);
37848 +       
37849 +       spin_lock_irqsave (&uctx->uctx_spinlock, flags);
37850 +    }
37851 +
37852 +    if (uctx->uctx_status & UCTX_DPROC_QUEUE_FULL)
37853 +       user_stop_stopping (uctx, UCTX_DPROC_QUEUE_FULL);
37854 +    
37855 +    spin_unlock_irqrestore (&uctx->uctx_spinlock, flags);
37856 +    return (UTS_FINISHED);
37857 +}
37858 +
37859 +static int
37860 +resolve_tproc_traps (USER_CTXT *uctx, ELAN4_USER_TRAP *utrapp)
37861 +{
37862 +    unsigned long flags;
37863 +    int res;
37864 +    
37865 +    spin_lock_irqsave (&uctx->uctx_spinlock, flags);
37866 +    while (! RING_QUEUE_EMPTY (uctx->uctx_tprocTrapQ))
37867 +    {
37868 +       ELAN4_TPROC_TRAP trap = *RING_QUEUE_FRONT(uctx->uctx_tprocTrapQ, uctx->uctx_tprocTraps);
37869 +       
37870 +       (void) RING_QUEUE_REMOVE (uctx->uctx_tprocTrapQ);
37871 +
37872 +       spin_unlock_irqrestore (&uctx->uctx_spinlock, flags);
37873 +
37874 +       if ((res = resolve_tproc_trap (uctx, utrapp, &trap)) != UTS_FINISHED)
37875 +           return (res);
37876 +       
37877 +       spin_lock_irqsave (&uctx->uctx_spinlock, flags);
37878 +    }
37879 +
37880 +    if (uctx->uctx_status & UCTX_TPROC_QUEUE_FULL)
37881 +       user_stop_stopping (uctx, UCTX_TPROC_QUEUE_FULL);
37882 +
37883 +    spin_unlock_irqrestore (&uctx->uctx_spinlock, flags);
37884 +    return (UTS_FINISHED);
37885 +}
37886 +
37887 +static int
37888 +resolve_iproc_traps (USER_CTXT *uctx, ELAN4_USER_TRAP *utrapp)
37889 +{
37890 +    unsigned long flags;
37891 +    int i, res;
37892 +
37893 +    spin_lock_irqsave (&uctx->uctx_spinlock, flags);
37894 +    for (i = 0; i < 2; i++)
37895 +       if (uctx->uctx_iprocTrap[i].ut_state == UTS_IPROC_TRAPPED)
37896 +       {
37897 +           uctx->uctx_iprocTrap[i].ut_state = UTS_IPROC_RESOLVING;
37898 +           spin_unlock_irqrestore(&uctx->uctx_spinlock, flags);
37899 +           
37900 +           if ((res = resolve_iproc_trap (uctx, utrapp, i)) != UTS_FINISHED)
37901 +               return (res);
37902 +           
37903 +           spin_lock_irqsave (&uctx->uctx_spinlock, flags);
37904 +       }
37905 +    spin_unlock_irqrestore (&uctx->uctx_spinlock, flags);
37906 +    
37907 +    return (UTS_FINISHED);
37908 +}
37909 +
37910 +static int
37911 +resolve_all_traps (USER_CTXT *uctx, ELAN4_USER_TRAP *utrapp)
37912 +{
37913 +    int res;
37914 +
37915 +    if ((res = resolve_iproc_traps (uctx, utrapp)) != UTS_FINISHED ||
37916 +       (res = resolve_cproc_traps (uctx, utrapp)) != UTS_FINISHED ||
37917 +       (res = resolve_eproc_traps (uctx, utrapp)) != UTS_FINISHED ||
37918 +       (res = resolve_dproc_traps (uctx, utrapp)) != UTS_FINISHED ||
37919 +       (res = resolve_tproc_traps (uctx, utrapp)) != UTS_FINISHED)
37920 +       return (res);
37921 +
37922 +    if (uctx->uctx_status & UCTX_OVERFLOW_REASONS)
37923 +       return (deliver_trap (utrapp, UTS_QUEUE_OVERFLOW, UTS_NOPROC, NULL, uctx->uctx_status));
37924 +
37925 +    if (uctx->uctx_status & UCTX_ERROR_REASONS)
37926 +       return (deliver_trap (utrapp, UTS_QUEUE_ERROR, UTS_NOPROC, NULL, uctx->uctx_status));
37927 +
37928 +    return (UTS_FINISHED);
37929 +}
37930 +
37931 +static int
37932 +execute_iproc_traps (USER_CTXT *uctx, ELAN4_USER_TRAP *utrapp)
37933 +{
37934 +    unsigned long flags;
37935 +    int i;
37936 +
37937 +    spin_lock_irqsave (&uctx->uctx_spinlock, flags);
37938 +    for (i = 0; i < 2; i++)
37939 +       switch (uctx->uctx_iprocTrap[i].ut_state)
37940 +       {
37941 +       case UTS_IPROC_EXECUTE_PACKET:
37942 +           uctx->uctx_iprocTrap[i].ut_state = UTS_IPROC_EXECUTING;
37943 +           spin_unlock_irqrestore(&uctx->uctx_spinlock, flags);
37944 +           
37945 +           return (deliver_trap (utrapp, UTS_EXECUTE_PACKET, UTS_IPROC, &uctx->uctx_iprocTrap[i].ut_trap, i));
37946 +
37947 +       case UTS_IPROC_NETWORK_ERROR:
37948 +           spin_unlock_irqrestore(&uctx->uctx_spinlock, flags);
37949 +           
37950 +           return (deliver_trap (utrapp, UTS_NETWORK_ERROR_TRAP, UTS_IPROC, &uctx->uctx_iprocTrap[i].ut_trap, i));
37951 +       }
37952 +    spin_unlock_irqrestore (&uctx->uctx_spinlock, flags);
37953 +    
37954 +    return (UTS_FINISHED);
37955 +}
37956 +
37957 +static int
37958 +progress_neterr (USER_CTXT *uctx, ELAN4_USER_TRAP *utrapp)
37959 +{
37960 +    unsigned long flags;
37961 +
37962 +    spin_lock_irqsave (&uctx->uctx_spinlock, flags);
37963 +    if (! RING_QUEUE_EMPTY (uctx->uctx_msgQ))
37964 +    {
37965 +       ELAN4_NETERR_MSG msg = *RING_QUEUE_FRONT (uctx->uctx_msgQ, uctx->uctx_msgs);
37966 +       
37967 +       (void) RING_QUEUE_REMOVE (uctx->uctx_msgQ);
37968 +       spin_unlock_irqrestore (&uctx->uctx_spinlock, flags);
37969 +       
37970 +       return deliver_trap (utrapp, UTS_NETWORK_ERROR_MSG, UTS_NETERR_MSG, &msg, user_location2process (uctx, msg.msg_sender));
37971 +    }
37972 +    
37973 +    if (uctx->uctx_status & UCTX_NETERR_TIMER)
37974 +    {
37975 +       uctx->uctx_status &= ~UCTX_NETERR_TIMER;
37976 +
37977 +       spin_unlock_irqrestore (&uctx->uctx_spinlock, flags);
37978 +       
37979 +       return deliver_trap (utrapp, UTS_NETWORK_ERROR_TIMER, UTS_NOPROC, NULL);
37980 +    }
37981 +    spin_unlock_irqrestore (&uctx->uctx_spinlock, flags);
37982 +    
37983 +    return (UTS_FINISHED);
37984 +}
37985 +
37986 +static void
37987 +restart_command_queues (USER_CTXT *uctx)
37988 +{
37989 +    struct list_head *entry;
37990 +
37991 +    ASSERT (SPINLOCK_HELD (&uctx->uctx_spinlock));
37992 +    
37993 +    list_for_each (entry, &uctx->uctx_cqlist) {
37994 +       USER_CQ *ucq = list_entry (entry, USER_CQ, ucq_link);
37995 +       
37996 +       if (ucq->ucq_state == UCQ_NEEDS_RESTART)
37997 +       {
37998 +           ucq->ucq_state = UCQ_RUNNING;
37999 +           
38000 +           elan4_restartcq (uctx->uctx_ctxt.ctxt_dev, ucq->ucq_cq);
38001 +       }
38002 +    }
38003 +}
38004 +
38005 +static int
38006 +restart_dmas (USER_CTXT *uctx)
38007 +{
38008 +    PRINTF (uctx, DBG_TRAP, "restart_dmas: back=%d front=%d\n", uctx->uctx_dmaQ.q_back, uctx->uctx_dmaQ.q_front);
38009 +
38010 +    while (! RING_QUEUE_EMPTY (uctx->uctx_dmaQ))
38011 +    {
38012 +       if (! user_ddcq_check (uctx, 7))
38013 +           return (0);
38014 +
38015 +       user_ddcq_run_dma (uctx, RING_QUEUE_FRONT(uctx->uctx_dmaQ, uctx->uctx_dmas));
38016 +       
38017 +       (void) RING_QUEUE_REMOVE (uctx->uctx_dmaQ);
38018 +    }
38019 +
38020 +    return (1);
38021 +}
38022 +
38023 +static int
38024 +restart_threads (USER_CTXT *uctx)
38025 +{
38026 +    PRINTF (uctx, DBG_TRAP, "restart_threads: back=%d front=%d\n", uctx->uctx_threadQ.q_back, uctx->uctx_threadQ.q_front);
38027 +
38028 +    while (! RING_QUEUE_EMPTY (uctx->uctx_threadQ))
38029 +    {
38030 +       if (! user_ddcq_check (uctx, 7))
38031 +           return (0);
38032 +
38033 +       user_ddcq_run_thread (uctx, RING_QUEUE_FRONT(uctx->uctx_threadQ, uctx->uctx_threads));
38034 +       
38035 +       (void) RING_QUEUE_REMOVE (uctx->uctx_threadQ);
38036 +    }
38037 +
38038 +    return (1);
38039 +}
38040 +
38041 +int
38042 +user_resume_eproc_trap (USER_CTXT *uctx, E4_Addr addr)
38043 +{
38044 +    PRINTF2 (uctx, DBG_RESUME, "user_resume_eproc_trap: addr=%llx -> %s\n", addr, user_ddcq_check(uctx, 2) ? "success" : "EAGAIN");
38045 +
38046 +    if (! user_ddcq_check (uctx, 2))
38047 +       return (-EAGAIN);
38048 +
38049 +    user_ddcq_setevent (uctx, addr);
38050 +
38051 +    return (0);
38052 +}
38053 +
38054 +int
38055 +user_resume_cproc_trap (USER_CTXT *uctx, unsigned indx)
38056 +{
38057 +    struct list_head *entry;
38058 +    unsigned long flags;
38059 +
38060 +    PRINTF1 (uctx, DBG_RESUME, "user_resume_cproc_trap: indx=%d\n", indx);
38061 +
38062 +    spin_lock_irqsave (&uctx->uctx_spinlock, flags);
38063 +
38064 +    list_for_each (entry, &uctx->uctx_cqlist) {
38065 +       USER_CQ *ucq = list_entry (entry, USER_CQ, ucq_link);
38066 +       
38067 +       if (elan4_cq2idx(ucq->ucq_cq) == indx && ucq->ucq_state == UCQ_STOPPED && !(ucq->ucq_flags & UCQ_SYSTEM))
38068 +       {
38069 +           ucq->ucq_state = UCQ_NEEDS_RESTART;
38070 +           
38071 +           user_signal_trap (uctx);
38072 +
38073 +           spin_unlock_irqrestore (&uctx->uctx_spinlock, flags);
38074 +           return (0);
38075 +       }
38076 +    }
38077 +    spin_unlock_irqrestore (&uctx->uctx_spinlock, flags);
38078 +
38079 +    return (-EINVAL);
38080 +}
38081 +
38082 +int
38083 +user_resume_dproc_trap (USER_CTXT *uctx, E4_DMA *dma)
38084 +{
38085 +    unsigned long flags;
38086 +    int res = 0;
38087 +
38088 +    spin_lock_irqsave (&uctx->uctx_spinlock, flags);
38089 +    if (RING_QUEUE_FULL (uctx->uctx_dmaQ))
38090 +       res = -ENOMEM;
38091 +    else
38092 +    {
38093 +       *RING_QUEUE_BACK (uctx->uctx_dmaQ, uctx->uctx_dmas) = *dma;
38094 +       (void) RING_QUEUE_ADD (uctx->uctx_dmaQ);
38095 +
38096 +       user_signal_trap (uctx);
38097 +    }
38098 +    spin_unlock_irqrestore (&uctx->uctx_spinlock, flags);
38099 +    return (res);
38100 +}
38101 +
38102 +int
38103 +user_resume_tproc_trap (USER_CTXT *uctx, E4_ThreadRegs *regs)
38104 +{
38105 +    unsigned long flags;
38106 +    int res = 0;
38107 +
38108 +    spin_lock_irqsave (&uctx->uctx_spinlock, flags);
38109 +    if (RING_QUEUE_FULL (uctx->uctx_threadQ))
38110 +       res = -ENOMEM;
38111 +    else
38112 +    {
38113 +       *RING_QUEUE_BACK (uctx->uctx_threadQ, uctx->uctx_threads) = *regs;
38114 +       (void) RING_QUEUE_ADD (uctx->uctx_threadQ);
38115 +
38116 +       user_signal_trap (uctx);
38117 +    }
38118 +    spin_unlock_irqrestore (&uctx->uctx_spinlock, flags);
38119 +    return (res);
38120 +}
38121 +
38122 +int
38123 +user_resume_iproc_trap (USER_CTXT *uctx, unsigned channel, unsigned trans,
38124 +                       E4_IprocTrapHeader *hdrp, E4_IprocTrapData *datap)
38125 +{
38126 +    unsigned long flags;
38127 +    int res = 0;
38128 +
38129 +    if (channel >= 2)
38130 +       return (-EINVAL);
38131 +
38132 +    spin_lock_irqsave (&uctx->uctx_spinlock, flags);
38133 +    if (uctx->uctx_iprocTrap[channel].ut_state != UTS_IPROC_STOPPED &&
38134 +       uctx->uctx_iprocTrap[channel].ut_state != UTS_IPROC_EXECUTING &&
38135 +       uctx->uctx_iprocTrap[channel].ut_state != UTS_IPROC_NETWORK_ERROR)
38136 +       res = -EINVAL;
38137 +    else
38138 +    {
38139 +       ELAN4_IPROC_TRAP *trap = &uctx->uctx_iprocTrap[channel].ut_trap;
38140 +
38141 +       if (trans < trap->tr_numTransactions)
38142 +       {
38143 +           PRINTF1 (uctx, DBG_RESUME, "user_resume_iproc_trap: trans=%d -> execute\n", trans);
38144 +
38145 +           uctx->uctx_iprocTrap[channel].ut_state = UTS_IPROC_EXECUTE_PACKET;
38146 +           trap->tr_trappedTrans                  = trans;
38147 +           trap->tr_transactions[trans]           = *hdrp;
38148 +           trap->tr_dataBuffers[trans]            = *datap;
38149 +       }
38150 +       else
38151 +       {
38152 +           PRINTF1 (uctx, DBG_RESUME, "user_resume_iproc_trap: trans=%d -> running\n", trans);
38153 +
38154 +           uctx->uctx_iprocTrap[channel].ut_state = UTS_IPROC_RUNNING;
38155 +       
38156 +           user_stop_nacking (uctx, channel ? UCTX_IPROC_CH0_TRAPPED : UCTX_IPROC_CH1_TRAPPED);
38157 +       }
38158 +    }
38159 +    spin_unlock_irqrestore (&uctx->uctx_spinlock, flags);
38160 +    
38161 +    return (res);
38162 +}
38163 +
38164 +int
38165 +__categorise_command (E4_uint64 command, int *cmdSize)
38166 +{
38167 +    switch (command & 0x3)
38168 +    {
38169 +    case RUN_THREAD_CMD: *cmdSize = 7; break;
38170 +       
38171 +    default:
38172 +       switch (command & 0x7)
38173 +       {
38174 +       case WRITE_DWORD_CMD: *cmdSize = 2; break;
38175 +       case ADD_DWORD_CMD:   *cmdSize = 2; break;
38176 +           
38177 +       default:
38178 +           switch (command & 0xF)
38179 +           {
38180 +           case OPEN_STEN_PKT_CMD:
38181 +               *cmdSize = 1;
38182 +               return 1;
38183 +               
38184 +           case COPY64_CMD:    *cmdSize = 2; break;
38185 +           case GUARD_CMD:     *cmdSize = 1; break;
38186 +           case INTERRUPT_CMD: *cmdSize = 1; break;
38187 +           case RUN_DMA_CMD:   *cmdSize = 7; break;
38188 +               
38189 +           default:
38190 +               switch (command & 0x1f)
38191 +               {
38192 +               case SEND_TRANS_CMD:
38193 +                   *cmdSize = 2 + (((command >> 16) & TR_SIZE_MASK) >> TR_SIZE_SHIFT);
38194 +                   return 2;
38195 +                   
38196 +               case SET_EVENT_CMD:    *cmdSize = 1; break;
38197 +               case SET_EVENTN_CMD:   *cmdSize = 2; break;
38198 +               case WAIT_EVENT_CMD:   *cmdSize = 4; break;
38199 +
38200 +               default:
38201 +                   switch (command & 0x3f)
38202 +                   {
38203 +                   case NOP_CMD:            *cmdSize = 1; break;
38204 +                   case MAKE_EXT_CLEAN_CMD: *cmdSize = 1; break;
38205 +                   default:
38206 +                       return 3;
38207 +                   }
38208 +                   break;
38209 +               }
38210 +           }
38211 +       }
38212 +    }
38213 +
38214 +    return 0;
38215 +}
38216 +
38217 +int
38218 +__whole_command (sdramaddr_t *commandPtr, sdramaddr_t insertPtr, unsigned int cqSize, unsigned int cmdSize)
38219 +{
38220 +    /* Move onto next command */
38221 +    while (cmdSize-- && (*commandPtr) != insertPtr)
38222 +       *commandPtr = ((*commandPtr) & ~(cqSize-1)) | (((*commandPtr) + sizeof (E4_uint64)) & (cqSize-1));
38223 +
38224 +    return cmdSize == -1;
38225 +}
38226 +
38227 +int
38228 +user_neterr_sten (USER_CTXT *uctx, unsigned int vp, E4_uint64 cookie, int waitforeop)
38229 +{
38230 +    ELAN4_DEV        *dev   = uctx->uctx_ctxt.ctxt_dev;
38231 +    int                      found = 0;
38232 +    struct list_head *el;
38233 +
38234 +    user_swapout (uctx, UCTX_NETERR_FIXUP);
38235 +
38236 +    kmutex_lock (&uctx->uctx_cqlock);
38237 +    list_for_each (el, &uctx->uctx_cqlist) {
38238 +       USER_CQ *ucq = list_entry (el, USER_CQ, ucq_link);
38239 +       
38240 +       if ((ucq->ucq_cq->cq_perm & CQ_STENEnableBit) != 0)
38241 +       {
38242 +           sdramaddr_t   cqdesc       = dev->dev_cqaddr + (elan4_cq2num(ucq->ucq_cq) * sizeof (E4_CommandQueueDesc));
38243 +           E4_uint64     queuePtrs    = elan4_sdram_readq (dev, cqdesc + offsetof (E4_CommandQueueDesc, CQ_QueuePtrs));
38244 +           sdramaddr_t   insertPtr    = (queuePtrs & CQ_PtrMask);
38245 +           sdramaddr_t   commandPtr   = CQ_CompletedPtr (queuePtrs);
38246 +           unsigned int  cqSize       = CQ_Size ((queuePtrs >> CQ_SizeShift) & CQ_SizeMask);
38247 +           E4_uint64     openCommand  = 0;
38248 +
38249 +           if (dev->dev_devinfo.dev_revision_id != PCI_REVISION_ID_ELAN4_REVA && (queuePtrs & CQ_RevB_ReorderingQueue))
38250 +           {
38251 +               E4_uint32 oooMask = elan4_sdram_readl (dev, cqdesc + offsetof (E4_CommandQueueDesc, CQ_HoldingValue));
38252 +               
38253 +               for (; (oooMask & 1) != 0; oooMask >>= 1)
38254 +                   insertPtr = (insertPtr & ~(cqSize-1)) | ((insertPtr + sizeof (E4_uint64)) & (cqSize-1));
38255 +           }
38256 +
38257 +           while (commandPtr != insertPtr)
38258 +           {
38259 +               E4_uint64    command = elan4_sdram_readq (dev, commandPtr);
38260 +               sdramaddr_t  identifyPtr;
38261 +               unsigned int cmdSize;
38262 +               
38263 +               switch (__categorise_command (command, &cmdSize))
38264 +               {
38265 +               case 0:
38266 +                   (void) __whole_command (&commandPtr, insertPtr, cqSize, cmdSize);
38267 +                   break;
38268 +                   
38269 +               case 1:
38270 +                   PRINTF (uctx, DBG_NETERR, "user_neterr_sten: cq=%d OPEN %llx\n", elan4_cq2num (ucq->ucq_cq), command);
38271 +                   
38272 +                   if ((command >> 32) == vp)
38273 +                       openCommand = command;
38274 +
38275 +                   (void) __whole_command (&commandPtr, insertPtr, cqSize, cmdSize);
38276 +                   break;
38277 +                   
38278 +               case 2:
38279 +                   PRINTF (uctx, DBG_NETERR, "user_neterr_sten: cq=%d SENDTRANS %llx\n", elan4_cq2num (ucq->ucq_cq), command);
38280 +                   
38281 +                   if (openCommand == 0)
38282 +                       (void) __whole_command (&commandPtr, insertPtr, cqSize, cmdSize);
38283 +                   else
38284 +                   {
38285 +                       switch ((command >> 16) & (TR_OPCODE_MASK | TR_SIZE_MASK))
38286 +                       {
38287 +                       case TR_IDENTIFY  & (TR_OPCODE_MASK | TR_SIZE_MASK):
38288 +                       case TR_REMOTEDMA & (TR_OPCODE_MASK | TR_SIZE_MASK):
38289 +                           PRINTF (uctx, DBG_NETERR, "user_neterr_sten: TR_IDENTIFY/TR_REMOTEDMA\n");
38290 +                           identifyPtr = (commandPtr & ~(cqSize-1)) | ((commandPtr + sizeof (E4_uint64)) & (cqSize-1));
38291 +                           break;
38292 +                           
38293 +                       case TR_SETEVENT_IDENTIFY & (TR_OPCODE_MASK | TR_SIZE_MASK):
38294 +                       case TR_INPUT_Q_COMMIT    & (TR_OPCODE_MASK | TR_SIZE_MASK):
38295 +                           PRINTF (uctx, DBG_NETERR, "user_neterr_sten: TR_SETEVENT_IDENTIFY/TR_INPUT_Q_COMMIT\n");
38296 +                           identifyPtr = (commandPtr & ~(cqSize-1)) | ((commandPtr + 2*sizeof (E4_uint64)) & (cqSize-1));
38297 +                           break;
38298 +                           
38299 +                       case TR_ADDWORD & (TR_OPCODE_MASK | TR_SIZE_MASK):
38300 +                           PRINTF (uctx, DBG_NETERR, "user_neterr_sten: TR_ADDWORD\n");
38301 +                           identifyPtr = (commandPtr & ~(cqSize-1)) | ((commandPtr + 3*sizeof (E4_uint64)) & (cqSize-1));
38302 +                           break;
38303 +                           
38304 +                       case TR_TESTANDWRITE & (TR_OPCODE_MASK | TR_SIZE_MASK):
38305 +                           PRINTF (uctx, DBG_NETERR, "user_neterr_sten: TR_TESTANDWRITE\n");
38306 +                           identifyPtr = (commandPtr & ~(cqSize-1)) | ((commandPtr + 4*sizeof (E4_uint64)) & (cqSize-1));
38307 +                           break;
38308 +                           
38309 +                       default:
38310 +                           identifyPtr = 0;
38311 +                       }
38312 +                       
38313 +                       if (! __whole_command (&commandPtr, insertPtr, cqSize, cmdSize))
38314 +                       {
38315 +                           PRINTF (uctx, DBG_NETERR, "user_neterr_sten: not whole command\n");
38316 +                           openCommand = 0;
38317 +                       }
38318 +
38319 +                       else if (identifyPtr)
38320 +                       {
38321 +                           E4_uint64 tcookie = elan4_sdram_readq (dev, identifyPtr);
38322 +                           
38323 +                           PRINTF (uctx, DBG_NETERR, "user_neterr_sten: cookie=%llx [%llx]\n", tcookie, cookie);
38324 +                           
38325 +                           if (tcookie == cookie)
38326 +                           {
38327 +                               unsigned int vchan = (openCommand >> 4) & 0x1f;
38328 +                               
38329 +                               PRINTF (uctx, DBG_NETERR, "user_neterr_sten: cookie matches - vchan=%d\n", vchan);
38330 +                               
38331 +                               if (! waitforeop)
38332 +                               {
38333 +                                   /* Alter the CQ_AckBuffer for this channel to indicate an
38334 +                                    * ack was received */
38335 +                                   E4_uint64 value  = elan4_sdram_readq (dev, cqdesc + offsetof (E4_CommandQueueDesc, CQ_AckBuffers));
38336 +                                   E4_uint64 nvalue = ((value & ~((E4_uint64)0xf << ((vchan & 0xf) << 2))) |
38337 +                                                       ((E4_uint64) PackOk << ((vchan & 0xf) << 2)));
38338 +                                   
38339 +                                   PRINTF (uctx, DBG_NETERR, "user_neterr_sten: CQ_AckBuffers %llx -> %llx\n", value, nvalue);
38340 +                                   
38341 +                                   elan4_sdram_writeq (dev, cqdesc + offsetof (E4_CommandQueueDesc, CQ_AckBuffers), nvalue);
38342 +                                   pioflush_sdram (dev);
38343 +                               }
38344 +                               
38345 +                               found++;
38346 +                           }
38347 +                           openCommand = 0;
38348 +                       }
38349 +                       
38350 +                       if ((command >> 16) & TR_LAST_AND_SEND_ACK)
38351 +                           openCommand = 0;
38352 +                   }
38353 +                   break;
38354 +                   
38355 +               case 3:
38356 +                   PRINTF (uctx, DBG_NETERR, "user_neterr_sten: invalid command %llx\n", command);
38357 +                   kmutex_unlock (&uctx->uctx_cqlock);
38358 +                   return -EINVAL;
38359 +               }
38360 +               
38361 +           }
38362 +       }
38363 +    }
38364 +    kmutex_unlock (&uctx->uctx_cqlock);
38365 +
38366 +    user_swapin (uctx, UCTX_NETERR_FIXUP);
38367 +
38368 +    return found;
38369 +}
38370 +
38371 +int
38372 +user_neterr_dma (USER_CTXT *uctx, unsigned int vp, E4_uint64 cookie, int waitforeop)
38373 +{
38374 +    unsigned long flags;
38375 +    int found = 0;
38376 +    int idx;
38377 +
38378 +    user_swapout (uctx, UCTX_NETERR_FIXUP);
38379 +
38380 +    spin_lock_irqsave (&uctx->uctx_spinlock, flags);
38381 +    RING_QUEUE_ITERATE (uctx->uctx_dmaQ, idx) {
38382 +       E4_DMA *dma = &uctx->uctx_dmas[idx];
38383 +
38384 +       if (dma->dma_vproc == vp && dma->dma_cookie == cookie)
38385 +       {
38386 +           PRINTF (uctx, DBG_NETERR, "user_neterr_dma: dmaQ matches %s\n", waitforeop ? "waitforeop" : "remove remoteness");
38387 +
38388 +           if (! waitforeop) 
38389 +           {
38390 +               dma->dma_dstEvent = 0;
38391 +               dma->dma_typeSize = DMA_ShMemWrite | DMA_Context (dma->dma_typeSize);
38392 +           }
38393 +           found++;
38394 +       }
38395 +    }
38396 +
38397 +    RING_QUEUE_ITERATE (uctx->uctx_dprocTrapQ, idx) {
38398 +       ELAN4_DPROC_TRAP *trap = &uctx->uctx_dprocTraps[idx];
38399 +
38400 +       if (trap->tr_desc.dma_vproc == vp && trap->tr_desc.dma_cookie == cookie)
38401 +       {
38402 +           PRINTF (uctx, DBG_NETERR, "user_neterr_dma: dmaTrapQ matches %s\n", waitforeop ? "waitforeop" : "remove remoteness");
38403 +
38404 +           if (! waitforeop) 
38405 +           {
38406 +               trap->tr_desc.dma_dstEvent = 0;
38407 +               trap->tr_desc.dma_typeSize = DMA_ShMemWrite | DMA_Context (trap->tr_desc.dma_typeSize);
38408 +           }
38409 +           found++;
38410 +       }
38411 +    }
38412 +
38413 +    /* The device driver command queue should be empty at this point ! */
38414 +    if (user_ddcq_flush (uctx) == 0)
38415 +       found = -EAGAIN;
38416 +
38417 +    spin_unlock_irqrestore (&uctx->uctx_spinlock, flags);
38418 +
38419 +    /* The device driver command queue should be empty at this point ! */
38420 +    if (user_ddcq_flush (uctx) == 0)
38421 +       found = -EAGAIN;
38422 +    
38423 +    user_swapin (uctx, UCTX_NETERR_FIXUP);
38424 +
38425 +    return found;
38426 +}
38427 +
38428 +int
38429 +user_trap_handler (USER_CTXT *uctx, ELAN4_USER_TRAP *utrapp, int nticks)
38430 +{
38431 +    unsigned long entered = jiffies;
38432 +    unsigned int  need_reenter = 0;
38433 +    unsigned long flags;
38434 +    int           res;
38435 +
38436 +    spin_lock_irqsave (&uctx->uctx_spinlock, flags);
38437 +
38438 +    PRINTF1 (uctx, DBG_TRAP, "user_trap_handler: entered state=%d\n", uctx->uctx_trap_state);
38439 +
38440 +    uctx->uctx_trap_count++;
38441 +    
38442 +    for (;;)
38443 +    {
38444 +       if (uctx->uctx_status & UCTX_SWAPPED_REASONS)
38445 +       {
38446 +           PRINTF0 (uctx, DBG_TRAP, "user_trap_handler: exiting on swapped reasons\n");
38447 +           
38448 +           res = UTS_FINISHED;
38449 +           goto no_more_to_do;
38450 +       }
38451 +
38452 +       if ((long) (jiffies - entered) > HZ)
38453 +       {
38454 +           PRINTF0 (uctx, DBG_TRAP, "user_trap_handler: exiting for reschedule\n");
38455 +           res = UTS_RESCHEDULE;
38456 +           goto no_more_to_do;
38457 +       }
38458 +       
38459 +       switch (uctx->uctx_trap_state)
38460 +       {
38461 +       case UCTX_TRAP_ACTIVE:
38462 +           uctx->uctx_trap_state = UCTX_TRAP_SLEEPING;
38463 +           
38464 +           if (nticks == 0 || need_reenter || kcondvar_timedwaitsig (&uctx->uctx_wait, &uctx->uctx_spinlock, &flags, lbolt + nticks) != CV_RET_NORMAL)
38465 +           {
38466 +               PRINTF0 (uctx, DBG_TRAP, "user_trap_handler: exiting by kcondvar_timedwaitsig\n");
38467 +
38468 +               res = UTS_FINISHED;
38469 +               goto no_more_to_do;
38470 +           }
38471 +
38472 +           /* Have slept above, so resample entered */
38473 +           entered = jiffies;
38474 +           
38475 +           uctx->uctx_trap_state = UCTX_TRAP_SIGNALLED;
38476 +           continue;
38477 +
38478 +       case UCTX_TRAP_IDLE:
38479 +       case UCTX_TRAP_SIGNALLED:
38480 +           uctx->uctx_trap_state = UCTX_TRAP_ACTIVE;
38481 +           break;
38482 +       }
38483 +       spin_unlock_irqrestore (&uctx->uctx_spinlock, flags);
38484 +
38485 +       PRINTF2 (uctx, DBG_TRAP, "user_trap_handler: resolve traps - state=%d status=%x\n", uctx->uctx_trap_state, uctx->uctx_status);
38486 +
38487 +       switch ((res = resolve_all_traps (uctx, utrapp)))
38488 +       {
38489 +       case UTS_FINISHED:
38490 +           break;
38491 +           
38492 +       case UTS_RESCHEDULE:
38493 +           need_reenter++;
38494 +           break;
38495 +
38496 +       default:
38497 +           spin_lock_irqsave (&uctx->uctx_spinlock, flags);
38498 +           goto no_more_to_do;
38499 +       }
38500 +
38501 +       spin_lock_irqsave (&uctx->uctx_spinlock, flags);
38502 +       if (! user_ddcq_flush (uctx))
38503 +       {
38504 +           PRINTF0 (uctx, DBG_TRAP, "user_trap_handler: ddcq not flushed - re-enter\n");
38505 +           need_reenter++;
38506 +           continue;
38507 +       }
38508 +       spin_unlock_irqrestore (&uctx->uctx_spinlock, flags);
38509 +
38510 +       if ((res = progress_neterr (uctx, utrapp)) != UTS_FINISHED)
38511 +       {
38512 +           spin_lock_irqsave (&uctx->uctx_spinlock, flags);
38513 +           goto no_more_to_do;
38514 +       }
38515 +
38516 +       if ((res = execute_iproc_traps (uctx, utrapp)) != UTS_FINISHED)
38517 +       {
38518 +           spin_lock_irqsave (&uctx->uctx_spinlock, flags);
38519 +           goto no_more_to_do;
38520 +       }
38521 +
38522 +       PRINTF2 (uctx, DBG_TRAP, "user_trap_handler: restart items - state=%d status=%x\n", uctx->uctx_trap_state, uctx->uctx_status);
38523 +
38524 +       spin_lock_irqsave (&uctx->uctx_spinlock, flags);
38525 +       if (UCTX_RUNNABLE (uctx))
38526 +       {
38527 +           restart_command_queues (uctx);
38528 +
38529 +           if (! restart_threads (uctx) || ! restart_dmas (uctx))
38530 +           {
38531 +               PRINTF0 (uctx, DBG_TRAP, "user_trap_handler: ddcq full - re-enter\n");
38532 +               need_reenter++;
38533 +           }
38534 +       }
38535 +    }
38536 + no_more_to_do:
38537 +    uctx->uctx_trap_state = UCTX_TRAP_IDLE;
38538 +
38539 +    /*
38540 +     * Always ensure that the command queue is flushed with a flow control
38541 +     * write, so that on the next trap we (hopefully) find it empty and so
38542 +     * can immediately restart the context.   Also if we need to be re-enter
38543 +     * the trap handler and don't have an interrupt outstanding, then issue
38544 +     * one now.
38545 +     */
38546 +    user_ddcq_flush (uctx);
38547 +    if (need_reenter && uctx->uctx_ddcq_intr == 0)
38548 +    {
38549 +       uctx->uctx_ddcq_intr++;
38550 +       user_ddcq_intr (uctx);
38551 +    }
38552 +
38553 +    if (--uctx->uctx_trap_count == 0 && (uctx->uctx_status & UCTX_SWAPPING))
38554 +       kcondvar_wakeupall (&uctx->uctx_wait, &uctx->uctx_spinlock);
38555 +
38556 +    spin_unlock_irqrestore (&uctx->uctx_spinlock, flags);
38557 +
38558 +    PRINTF2 (uctx, DBG_TRAP, "user_trap_handler: finished state=%d res=%d\n", uctx->uctx_trap_state, res);
38559 +
38560 +    return (res == UTS_EFAULT ? -EFAULT : 0);
38561 +}
38562 +
38563 +USER_CQ *
38564 +user_alloccq (USER_CTXT *uctx, unsigned cqsize, unsigned perm, unsigned uflags)
38565 +{
38566 +    USER_CQ      *ucq;
38567 +    unsigned long flags;
38568 +
38569 +    KMEM_ZALLOC (ucq, USER_CQ *, sizeof (USER_CQ), 1);
38570 +
38571 +    if (ucq == (USER_CQ *) NULL)
38572 +       return ERR_PTR(-ENOMEM);
38573 +    
38574 +    /* NOTE - do not allow the user to create high-priority queues as we only flush through the low-priority run queues */
38575 +    if ((ucq->ucq_cq = elan4_alloccq (&uctx->uctx_ctxt, cqsize, perm, (uflags & UCQ_REORDER) ? CQ_Reorder : 0)) == NULL)
38576 +    {
38577 +       KMEM_FREE (ucq, sizeof (USER_CQ));
38578 +       
38579 +       PRINTF2 (uctx, DBG_CQ, "user_alloccq: failed elan4_allocq cqsize %d uflags %x\n", cqsize, uflags);
38580 +
38581 +       return ERR_PTR(-ENOMEM);
38582 +    }
38583 +    
38584 +    atomic_set (&ucq->ucq_ref, 1);
38585 +
38586 +    ucq->ucq_state = UCQ_RUNNING;
38587 +    ucq->ucq_flags = uflags;
38588 +    
38589 +    PRINTF3 (uctx, DBG_CQ, "user_alloccq: ucq=%p idx=%d cqnum=%d\n", ucq, elan4_cq2idx (ucq->ucq_cq), elan4_cq2num(ucq->ucq_cq));
38590 +
38591 +    /* chain it onto the context */
38592 +    kmutex_lock (&uctx->uctx_cqlock);
38593 +    spin_lock_irqsave (&uctx->uctx_spinlock, flags);
38594 +    list_add (&ucq->ucq_link, &uctx->uctx_cqlist);
38595 +    spin_unlock_irqrestore (&uctx->uctx_spinlock, flags);
38596 +    kmutex_unlock (&uctx->uctx_cqlock);
38597 +
38598 +    return (ucq);
38599 +}
38600 +
38601 +USER_CQ *
38602 +user_findcq (USER_CTXT *uctx, unsigned idx)
38603 +{
38604 +    struct list_head *entry;
38605 +    unsigned long flags;
38606 +
38607 +    spin_lock_irqsave (&uctx->uctx_spinlock, flags);
38608 +    list_for_each (entry, &uctx->uctx_cqlist) {
38609 +       USER_CQ *ucq = list_entry (entry, USER_CQ, ucq_link);
38610 +
38611 +       if (elan4_cq2idx(ucq->ucq_cq) == idx)
38612 +       {
38613 +           atomic_inc (&ucq->ucq_ref);
38614 +           spin_unlock_irqrestore (&uctx->uctx_spinlock, flags);
38615 +           return (ucq);
38616 +       }
38617 +    }
38618 +    spin_unlock_irqrestore (&uctx->uctx_spinlock, flags);
38619 +
38620 +    return (NULL);
38621 +}
38622 +
38623 +void
38624 +user_dropcq (USER_CTXT *uctx, USER_CQ *ucq)
38625 +{
38626 +    unsigned long flags;
38627 +
38628 +    PRINTF2 (uctx, DBG_CQ, "user_dropcq: ucq=%p ref=%d\n", ucq, atomic_read (&ucq->ucq_ref));
38629 +
38630 +    kmutex_lock (&uctx->uctx_cqlock);
38631 +    if (! atomic_dec_and_test (&ucq->ucq_ref))
38632 +    {
38633 +       kmutex_unlock (&uctx->uctx_cqlock);
38634 +       return;
38635 +    }
38636 +
38637 +    spin_lock_irqsave (&uctx->uctx_spinlock, flags);
38638 +    list_del (&ucq->ucq_link);
38639 +    spin_unlock_irqrestore (&uctx->uctx_spinlock, flags);
38640 +
38641 +    kmutex_unlock (&uctx->uctx_cqlock);
38642 +
38643 +    elan4_freecq (&uctx->uctx_ctxt, ucq->ucq_cq);
38644 +
38645 +    KMEM_FREE (ucq, sizeof (USER_CQ));
38646 +}
38647 +
38648 +int
38649 +user_alloc_trap_queues (USER_CTXT *uctx, unsigned ndproc_traps, unsigned neproc_traps, 
38650 +                       unsigned ntproc_traps, unsigned nthreads, unsigned ndmas)
38651 +{
38652 +    ELAN4_DPROC_TRAP *dprocs;
38653 +    ELAN4_EPROC_TRAP *eprocs;
38654 +    ELAN4_TPROC_TRAP *tprocs;
38655 +    E4_DMA           *dmas;
38656 +    E4_ThreadRegs    *threads;
38657 +    ELAN4_NETERR_MSG *msgs;
38658 +    unsigned long     flags;
38659 +
38660 +    int nmsgs = NETERR_MSGS;
38661 +
38662 +    /* bounds check the values that have been passed in */
38663 +    if (ndproc_traps < 2 || ndproc_traps > 10000 ||
38664 +       ntproc_traps < 1 || ntproc_traps > 100   ||
38665 +       neproc_traps < 6 || neproc_traps > 10000 ||
38666 +       nthreads     < 2 || nthreads     > 10000 ||
38667 +       ndmas        < 2 || ndmas        > 10000)
38668 +       return -EINVAL;
38669 +
38670 +    if (uctx->uctx_dmas != NULL)
38671 +       return -EBUSY;
38672 +
38673 +    KMEM_ZALLOC (dprocs, ELAN4_DPROC_TRAP *, ndproc_traps * sizeof (ELAN4_DPROC_TRAP), 1);
38674 +    KMEM_ZALLOC (eprocs, ELAN4_EPROC_TRAP *, neproc_traps * sizeof (ELAN4_EPROC_TRAP), 1);
38675 +    KMEM_ZALLOC (tprocs, ELAN4_TPROC_TRAP *, ntproc_traps * sizeof (ELAN4_TPROC_TRAP), 1);
38676 +    KMEM_ZALLOC (threads, E4_ThreadRegs *, nthreads * sizeof (E4_ThreadRegs), 1);
38677 +    KMEM_ZALLOC (dmas, E4_DMA *, ndmas * sizeof (E4_DMA), 1);
38678 +    KMEM_ZALLOC (msgs, ELAN4_NETERR_MSG *, nmsgs * sizeof (ELAN4_NETERR_MSG), 1);
38679 +
38680 +    if (dprocs == NULL || eprocs == NULL || tprocs == NULL || dmas == NULL || threads == NULL || msgs == NULL)
38681 +    {
38682 +       if (dprocs != NULL) KMEM_FREE (dprocs, ndproc_traps * sizeof (ELAN4_DPROC_TRAP));
38683 +       if (eprocs != NULL) KMEM_FREE (eprocs, neproc_traps * sizeof (ELAN4_EPROC_TRAP));
38684 +       if (tprocs != NULL) KMEM_FREE (tprocs, ntproc_traps * sizeof (ELAN4_TPROC_TRAP));
38685 +       if (threads != NULL) KMEM_FREE (threads, nthreads * sizeof (E4_ThreadRegs));
38686 +       if (dmas != NULL) KMEM_FREE (dmas, ndmas * sizeof (E4_DMA));
38687 +       if (msgs != NULL) KMEM_FREE (msgs, nmsgs * sizeof (ELAN4_NETERR_MSG));
38688 +       
38689 +       return -ENOMEM;
38690 +    }
38691 +    
38692 +    spin_lock_irqsave (&uctx->uctx_spinlock, flags);
38693 +
38694 +    uctx->uctx_dprocTraps = dprocs;
38695 +    uctx->uctx_eprocTraps = eprocs;
38696 +    uctx->uctx_tprocTraps = tprocs;
38697 +    uctx->uctx_threads    = threads;
38698 +    uctx->uctx_dmas       = dmas;
38699 +    uctx->uctx_msgs       = msgs;
38700 +
38701 +    RING_QUEUE_INIT (uctx->uctx_dprocTrapQ, ndproc_traps, 1 /* 1 for 2nd dma */);
38702 +    RING_QUEUE_INIT (uctx->uctx_tprocTrapQ, ntproc_traps, 0);
38703 +    RING_QUEUE_INIT (uctx->uctx_eprocTrapQ, neproc_traps, 5 /* 1 for command, 2 for dma, 2 for inputter */);
38704 +    RING_QUEUE_INIT (uctx->uctx_threadQ,    nthreads,     1);
38705 +    RING_QUEUE_INIT (uctx->uctx_dmaQ,       ndmas,        1);
38706 +    RING_QUEUE_INIT (uctx->uctx_msgQ,       nmsgs,        0);
38707 +
38708 +    spin_unlock_irqrestore (&uctx->uctx_spinlock, flags);
38709 +    
38710 +    return 0;
38711 +}
38712 +
38713 +USER_CTXT *
38714 +user_alloc (ELAN4_DEV *dev)
38715 +{
38716 +    USER_CTXT *uctx;
38717 +    int res;
38718 +    int i;
38719 +
38720 +    /* Allocate and initialise the context private data */
38721 +    KMEM_ZALLOC (uctx, USER_CTXT *, sizeof  (USER_CTXT), 1);
38722 +
38723 +    if (uctx == NULL)
38724 +       return ERR_PTR(-ENOMEM);
38725 +
38726 +    if (elan4_get_position (dev, &uctx->uctx_position) == ELAN_POS_UNKNOWN)
38727 +    {
38728 +       KMEM_FREE (uctx, sizeof (USER_CTXT));
38729 +       return ERR_PTR(-EAGAIN);
38730 +    }
38731 +    
38732 +    if ((res = elan4_insertctxt (dev, &uctx->uctx_ctxt, &user_trap_ops)) != 0)
38733 +    {
38734 +       KMEM_FREE (uctx, sizeof (USER_CTXT));
38735 +       return ERR_PTR(res);
38736 +    }
38737 +
38738 +    KMEM_GETPAGES (uctx->uctx_upage, ELAN4_USER_PAGE *, btopr (sizeof (ELAN4_USER_PAGE)), 1);
38739 +    if (uctx->uctx_upage == NULL)
38740 +    {
38741 +       elan4_removectxt (dev, &uctx->uctx_ctxt);
38742 +       KMEM_FREE (uctx, sizeof (USER_CTXT));
38743 +       return ERR_PTR(-ENOMEM);
38744 +    }
38745 +    
38746 +    if ((uctx->uctx_trampoline = elan4_sdram_alloc (dev, SDRAM_PAGE_SIZE)) == 0)
38747 +    {
38748 +       KMEM_FREEPAGES (uctx->uctx_upage, btopr (sizeof (ELAN4_USER_PAGE)));
38749 +       elan4_removectxt (dev, &uctx->uctx_ctxt);
38750 +
38751 +       KMEM_FREE (uctx, sizeof (USER_CTXT));
38752 +       return ERR_PTR(-ENOMEM);
38753 +    }
38754 +    
38755 +    if ((uctx->uctx_routetable = elan4_alloc_routetable (dev, 4 /* 512 << 4 == 8192 entries */)) == NULL)
38756 +    {
38757 +       elan4_sdram_free (dev, uctx->uctx_trampoline, SDRAM_PAGE_SIZE);
38758 +       KMEM_FREEPAGES (uctx->uctx_upage, btopr (sizeof (ELAN4_USER_PAGE)));
38759 +       elan4_removectxt (dev, &uctx->uctx_ctxt);
38760 +
38761 +       KMEM_FREE (uctx, sizeof (USER_CTXT));
38762 +       return ERR_PTR(-ENOMEM);
38763 +    }
38764 +
38765 +    elan4_set_routetable (&uctx->uctx_ctxt, uctx->uctx_routetable);
38766 +
38767 +    /* initialise the trap and swap queues to be really full */
38768 +    RING_QUEUE_INIT (uctx->uctx_dprocTrapQ, 0, 1);
38769 +    RING_QUEUE_INIT (uctx->uctx_tprocTrapQ, 0, 1);
38770 +    RING_QUEUE_INIT (uctx->uctx_eprocTrapQ, 0, 1);
38771 +    RING_QUEUE_INIT (uctx->uctx_threadQ, 0, 1);
38772 +    RING_QUEUE_INIT (uctx->uctx_dmaQ, 0, 1);
38773 +
38774 +    INIT_LIST_HEAD (&uctx->uctx_cent_list);
38775 +    INIT_LIST_HEAD (&uctx->uctx_vpseg_list);
38776 +    INIT_LIST_HEAD (&uctx->uctx_cqlist);
38777 +
38778 +    uctx->uctx_haltop.op_function = user_flush;
38779 +    uctx->uctx_haltop.op_arg      = uctx;
38780 +    uctx->uctx_haltop.op_mask     = INT_Halted|INT_Discarding;
38781 +
38782 +    uctx->uctx_dma_flushop.op_function = user_flush_dmas;
38783 +    uctx->uctx_dma_flushop.op_arg      = uctx;
38784 +
38785 +    kmutex_init (&uctx->uctx_vpseg_lock);
38786 +    kmutex_init (&uctx->uctx_cqlock);
38787 +    kmutex_init (&uctx->uctx_rgnmutex);
38788 +
38789 +    spin_lock_init (&uctx->uctx_spinlock);
38790 +    spin_lock_init (&uctx->uctx_rgnlock);
38791 +    spin_lock_init (&uctx->uctx_fault_lock);
38792 +
38793 +    kcondvar_init (&uctx->uctx_wait);
38794 +
38795 +    if ((uctx->uctx_ddcq = user_alloccq (uctx, CQ_Size1K, CQ_EnableAllBits, UCQ_SYSTEM)) == NULL)
38796 +    {
38797 +       user_free (uctx);
38798 +       return ERR_PTR(-ENOMEM);
38799 +    }
38800 +
38801 +    uctx->uctx_trap_count = 0;
38802 +    uctx->uctx_trap_state = UCTX_TRAP_IDLE;
38803 +    uctx->uctx_status     = 0 /* UCTX_DETACHED | UCTX_SWAPPED | UCTX_STOPPED */;
38804 +
38805 +    init_timer (&uctx->uctx_int_timer);
38806 +
38807 +    uctx->uctx_int_timer.function = user_signal_timer;
38808 +    uctx->uctx_int_timer.data     = (unsigned long) uctx;
38809 +    uctx->uctx_int_start          = jiffies;
38810 +    uctx->uctx_int_count          = 0;
38811 +    uctx->uctx_int_delay          = 0;
38812 +
38813 +    init_timer (&uctx->uctx_neterr_timer);
38814 +    uctx->uctx_neterr_timer.function = user_neterr_timer;
38815 +    uctx->uctx_neterr_timer.data     = (unsigned long) uctx;
38816 +
38817 +    uctx->uctx_upage->upage_ddcq_completed = 0;
38818 +    uctx->uctx_ddcq_completed              = 0;
38819 +    uctx->uctx_ddcq_insertcnt              = 0;
38820 +
38821 +    uctx->uctx_num_fault_save = num_fault_save;
38822 +    if (uctx->uctx_num_fault_save) 
38823 +    {  
38824 +       KMEM_ZALLOC (uctx->uctx_faults, FAULT_SAVE *, (sizeof(FAULT_SAVE) * uctx->uctx_num_fault_save), 1);
38825 +        if ( uctx->uctx_faults == NULL) 
38826 +       {
38827 +           user_free (uctx);
38828 +           return ERR_PTR(-ENOMEM);
38829 +        }
38830 +    
38831 +        for (i = 0; i < uctx->uctx_num_fault_save; i++)
38832 +           uctx->uctx_faults[i].next = (i == (uctx->uctx_num_fault_save-1) ? NULL : &uctx->uctx_faults[i+1]);
38833 +
38834 +    }
38835 +    uctx->uctx_fault_list = uctx->uctx_faults;
38836 +
38837 +    return (uctx);
38838 +}
38839 +
38840 +void
38841 +user_free (USER_CTXT *uctx)
38842 +{
38843 +    ELAN4_DEV *dev = uctx->uctx_ctxt.ctxt_dev;
38844 +
38845 +    user_swapout (uctx, UCTX_EXITING);
38846 +
38847 +    /* Detach from all input contexts */
38848 +    user_detach (uctx, NULL);
38849 +
38850 +    /* since we're single threaded here - (called from close()) */
38851 +    /* we don't need to hold the lock to drop the command queues */
38852 +    /* since they cannot be mapped into user space */
38853 +    while (! list_empty (&uctx->uctx_cqlist))
38854 +       user_dropcq (uctx, list_entry (uctx->uctx_cqlist.next, USER_CQ, ucq_link));
38855 +
38856 +    /* Free off all of vpseg_list */
38857 +    kmutex_lock (&uctx->uctx_vpseg_lock);
38858 +    while (! list_empty (&uctx->uctx_vpseg_list))
38859 +       user_remove_vpseg(uctx, list_entry (uctx->uctx_vpseg_list.next, USER_VPSEG, vps_link));
38860 +    kmutex_unlock (&uctx->uctx_vpseg_lock);
38861 +    
38862 +    if (timer_pending (&uctx->uctx_int_timer))
38863 +       del_timer_sync (&uctx->uctx_int_timer);
38864 +    
38865 +    if (timer_pending (&uctx->uctx_neterr_timer))
38866 +       del_timer_sync (&uctx->uctx_neterr_timer);
38867 +
38868 +    if (uctx->uctx_dprocTraps)
38869 +       KMEM_FREE (uctx->uctx_dprocTraps, uctx->uctx_dprocTrapQ.q_size * sizeof (ELAN4_DPROC_TRAP));
38870 +    if (uctx->uctx_tprocTraps)
38871 +       KMEM_FREE (uctx->uctx_tprocTraps, uctx->uctx_tprocTrapQ.q_size * sizeof (ELAN4_TPROC_TRAP));
38872 +    if (uctx->uctx_eprocTraps)
38873 +       KMEM_FREE (uctx->uctx_eprocTraps, uctx->uctx_eprocTrapQ.q_size * sizeof (ELAN4_EPROC_TRAP));
38874 +    if (uctx->uctx_dmas)
38875 +       KMEM_FREE (uctx->uctx_dmas, uctx->uctx_dmaQ.q_size * sizeof (E4_DMA));
38876 +    if (uctx->uctx_msgs)
38877 +       KMEM_FREE (uctx->uctx_msgs, NETERR_MSGS * sizeof (ELAN4_NETERR_MSG));
38878 +    if (uctx->uctx_threads)
38879 +       KMEM_FREE (uctx->uctx_threads, uctx->uctx_threadQ.q_size * sizeof (E4_ThreadRegs));
38880 +    if (uctx->uctx_faults)
38881 +       KMEM_FREE (uctx->uctx_faults, (sizeof(FAULT_SAVE) * uctx->uctx_num_fault_save));
38882 +
38883 +    if (uctx->uctx_intcookie_table)
38884 +       intcookie_free_table (uctx->uctx_intcookie_table);
38885 +
38886 +    elan4_set_routetable (&uctx->uctx_ctxt, NULL);
38887 +    elan4_free_routetable (dev, uctx->uctx_routetable);
38888 +
38889 +    /* Free off all USER_RGNs */
38890 +    user_freergns(uctx);
38891 +
38892 +    elan4_sdram_free (dev, uctx->uctx_trampoline, SDRAM_PAGE_SIZE);
38893 +
38894 +    /* Clear the PG_Reserved bit before free to avoid a memory leak */
38895 +    ClearPageReserved(pte_page(*find_pte_kernel((unsigned long) uctx->uctx_upage)));
38896 +    KMEM_FREEPAGES (uctx->uctx_upage, btopr (sizeof (ELAN4_USER_PAGE)));
38897 +
38898 +    elan4_removectxt (dev, &uctx->uctx_ctxt);
38899 +
38900 +    kcondvar_destroy (&uctx->uctx_wait);
38901 +
38902 +    spin_lock_destroy (&uctx->uctx_rgnlock);
38903 +    spin_lock_destroy (&uctx->uctx_spinlock);
38904 +
38905 +    kmutex_destroy (&uctx->uctx_rgnmutex);
38906 +    kmutex_destroy (&uctx->uctx_cqlock);
38907 +    kmutex_destroy (&uctx->uctx_vpseg_lock);
38908 +
38909 +    KMEM_FREE (uctx, sizeof (USER_CTXT));
38910 +}
38911 +
38912 +/*
38913 + * Local variables:
38914 + * c-file-style: "stroustrup"
38915 + * End:
38916 + */
38917 Index: linux-2.4.21/drivers/net/qsnet/elan4/user_ddcq.c
38918 ===================================================================
38919 --- linux-2.4.21.orig/drivers/net/qsnet/elan4/user_ddcq.c       2004-02-23 16:02:56.000000000 -0500
38920 +++ linux-2.4.21/drivers/net/qsnet/elan4/user_ddcq.c    2005-06-01 23:12:54.625435152 -0400
38921 @@ -0,0 +1,226 @@
38922 +/*
38923 + *    Copyright (c) 2001-2002 by Quadrics Supercomputers World Ltd.
38924 + *    Copyright (c) 2002-2003 by Quadrics Ltd.
38925 + * 
38926 + *    For licensing information please see the supplied COPYING file
38927 + *
38928 + */
38929 +
38930 +#ident "@(#)$Id: user_ddcq.c,v 1.15 2004/06/23 11:06:05 addy Exp $"
38931 +/*      $Source: /cvs/master/quadrics/elan4mod/user_ddcq.c,v $*/
38932 +
38933 +#include <qsnet/kernel.h>
38934 +
38935 +#include <elan4/debug.h>
38936 +#include <elan4/device.h>
38937 +#include <elan4/user.h>
38938 +#include <elan4/commands.h>
38939 +
38940 +#if PAGE_SIZE < CQ_CommandMappingSize
38941 +#  define ELAN4_COMMAND_QUEUE_MAPPING  PAGE_SIZE
38942 +#else
38943 +#  define ELAN4_COMMAND_QUEUE_MAPPING  CQ_CommandMappingSize
38944 +#endif
38945 +
38946 +/* The user device driver command queue is used for re-issuing 
38947 + * trapped items.  It is allocated as a 1K command queue, and
38948 + * we insert command flow writes event 256 words.
38949 + */
38950 +#define USER_CTRLFLOW_COUNT    256
38951 +
38952 +/* Flow control of the device driver command queue is handled by periodically 
38953 + * inserting dword writes into the command stream.   When you need to know 
38954 + * that the queue has been flushed, then you insert an extra contorl flow
38955 + * write into the command queue.   Should the queue not be flushed, but the
38956 + * trap handler be returning to user space, then it will also insert and 
38957 + * extra interrupt command to ensure that it is re-entered after the queue
38958 + * has been flushed.
38959 + *
38960 + * Note - we account the space for the interrupt command on each control 
38961 + * flow write so that we do not overflow the queue even if we end up 
38962 + * inserting an interrupt for every command flow write.  In general only
38963 + * a single interrupt should get inserted....
38964 + */
38965 +
38966 +#define user_ddcq_command_write(value,off) do { \
38967 +    PRINTF(uctx, DBG_DDCQ, "user_ddcq_command_write: cmdptr=%x off=%d value=%llx\n", cmdptr, off, value);\
38968 +    writeq(value, cmdptr + (off << 3)); \
38969 +} while (0)
38970 +
38971 +#define user_ddcq_command_space(uctx)  \
38972 +    ((CQ_Size (uctx->uctx_ddcq->ucq_cq->cq_size)>>3) - ((uctx)->uctx_ddcq_insertcnt - (uctx)->uctx_upage->upage_ddcq_completed))
38973 +
38974 +#define user_ddcq_command_flow_write(uctx) do { \
38975 +   E4_uint64 iptr   = (uctx)->uctx_ddcq_insertcnt; \
38976 +   ioaddr_t  cmdptr = (uctx)->uctx_ddcq->ucq_cq->cq_mapping + ((iptr<<3) & ((ELAN4_COMMAND_QUEUE_MAPPING >> 1)-1));\
38977 +\
38978 +    (uctx)->uctx_ddcq_completed = ((uctx)->uctx_ddcq_insertcnt += 3);\
38979 +\
38980 +    PRINTF (uctx, DBG_DDCQ, "user_ddcq_command_flow_write: completed=%llx [%llx] addr=%llx\n", (uctx)->uctx_ddcq_completed, \
38981 +           (uctx)->uctx_upage->upage_ddcq_completed, (uctx)->uctx_upage_addr); \
38982 +    user_ddcq_command_write (GUARD_CMD       | GUARD_ALL_CHANNELS,      0);\
38983 +    user_ddcq_command_write (WRITE_DWORD_CMD | (uctx)->uctx_upage_addr, 1);\
38984 +    user_ddcq_command_write ((uctx)->uctx_ddcq_completed,               2);\
38985 +} while (0)
38986 +
38987 +#define user_ddcq_command_flow_intr(uctx) do { \
38988 +   E4_uint64 iptr   = (uctx)->uctx_ddcq_insertcnt; \
38989 +   ioaddr_t  cmdptr = (uctx)->uctx_ddcq->ucq_cq->cq_mapping + ((iptr<<3) & ((ELAN4_COMMAND_QUEUE_MAPPING >> 1)-1));\
38990 +\
38991 +    PRINTF (uctx, DBG_DDCQ, "user_ddcq_command_flow_intr: completed=%llx [%llx] addr=%llx\n", (uctx)->uctx_ddcq_completed, \
38992 +           (uctx)->uctx_upage->upage_ddcq_completed, (uctx)->uctx_upage_addr); \
38993 +    user_ddcq_command_write (INTERRUPT_CMD   | ELAN4_INT_COOKIE_DDCQ,   3);\
38994 +} while (0)
38995 +
38996 +#define user_ddcq_command_prologue(uctx, count) do { \
38997 +   E4_uint64 iptr   = (uctx)->uctx_ddcq_insertcnt; \
38998 +   ioaddr_t  cmdptr = (uctx)->uctx_ddcq->ucq_cq->cq_mapping + ((iptr<<3) & ((ELAN4_COMMAND_QUEUE_MAPPING >> 1)-1));\
38999 +   PRINTF(uctx, DBG_DDCQ, "user_ddcq_command_prologue: iptr=%llx cmdptr=%x\n", iptr, cmdptr);
39000 +
39001 +#define user_ddcq_command_epilogue(uctx, count, extra) \
39002 +   (uctx)->uctx_ddcq_insertcnt = iptr + (count);\
39003 +\
39004 +   PRINTF(uctx, DBG_DDCQ, "user_ddcq_command_epilogue: iptr=%llx + %x + %x - completed %llx\n", iptr, count, extra, (uctx)->uctx_ddcq_completed);\
39005 +   if (((iptr) + (count) + (extra)) > ((uctx)->uctx_ddcq_completed + USER_CTRLFLOW_COUNT))\
39006 +       user_ddcq_command_flow_write(uctx); \
39007 +} while (0)
39008 +
39009 +int
39010 +user_ddcq_check (USER_CTXT *uctx, unsigned num)
39011 +{
39012 +    PRINTF (uctx, DBG_DDCQ, "user_check_ddcq: insert=%llx completed=%llx num=%d\n", 
39013 +           uctx->uctx_ddcq_insertcnt, uctx->uctx_upage->upage_ddcq_completed, num);
39014 +
39015 +    /* Ensure that there is enough space for the command we want to issue,
39016 +     * PLUS the guard/writeword for the control flow flush.
39017 +     * PLUS the interrupt command for rescheduling */
39018 +    if (user_ddcq_command_space (uctx) > (num + 4))
39019 +    {
39020 +       PRINTF (uctx, DBG_DDCQ, "user_ddcq_check: loads of space\n");
39021 +
39022 +       return (1);
39023 +    }
39024 +    
39025 +    PRINTF (uctx, DBG_DDCQ, "user_ddcq_check: not enough space - reschedule\n");
39026 +
39027 +    uctx->uctx_trap_state = UCTX_TRAP_SIGNALLED;
39028 +    return (0);
39029 +}
39030 +
39031 +int
39032 +user_ddcq_flush (USER_CTXT *uctx)
39033 +{
39034 +    ELAN4_DEV *dev = uctx->uctx_ctxt.ctxt_dev;
39035 +    USER_CQ   *ucq = uctx->uctx_ddcq;
39036 +
39037 +    switch (ucq->ucq_state)
39038 +    {
39039 +    case UCQ_TRAPPED:
39040 +       PRINTF (uctx, DBG_DDCQ, "user_ddcq_flush: command queue is trapped\n");
39041 +       return (0);
39042 +       
39043 +    case UCQ_NEEDS_RESTART:
39044 +       PRINTF (uctx, DBG_DDCQ, "user_ddcq_flush: restarting command queue\n");
39045 +
39046 +       if (UCTX_RUNNABLE (uctx))
39047 +       {
39048 +           ucq->ucq_state = UCQ_RUNNING;
39049 +           elan4_restartcq (dev, ucq->ucq_cq);
39050 +       }
39051 +       break;
39052 +    }
39053 +
39054 +    PRINTF (uctx, DBG_DDCQ, "user_ddcq_flush: insertcnt=%llx completed=%llx [%llx]\n", 
39055 +           uctx->uctx_ddcq_insertcnt, uctx->uctx_ddcq_completed, uctx->uctx_upage->upage_ddcq_completed);
39056 +
39057 +    if (uctx->uctx_ddcq_completed != uctx->uctx_ddcq_insertcnt)
39058 +       user_ddcq_command_flow_write (uctx);
39059 +
39060 +    return (uctx->uctx_ddcq_completed == uctx->uctx_upage->upage_ddcq_completed);
39061 +}
39062 +
39063 +void
39064 +user_ddcq_intr (USER_CTXT *uctx)
39065 +{
39066 +    user_ddcq_command_flow_intr (uctx);
39067 +}
39068 +
39069 +void
39070 +user_ddcq_run_dma (USER_CTXT *uctx, E4_DMA *dma)
39071 +{
39072 +    PRINTF (uctx, DBG_DDCQ, "user_ddcq_run_dma: cookie=%llx vproc=%llx\n",  dma->dma_cookie, dma->dma_vproc);
39073 +
39074 +    user_ddcq_command_prologue(uctx, 7) {
39075 +
39076 +       user_ddcq_command_write ((dma->dma_typeSize & ~DMA_ContextMask) | RUN_DMA_CMD, 0);
39077 +       user_ddcq_command_write (dma->dma_cookie,                                      1);
39078 +       user_ddcq_command_write (dma->dma_vproc,                                       2);
39079 +       user_ddcq_command_write (dma->dma_srcAddr,                                     3);
39080 +       user_ddcq_command_write (dma->dma_dstAddr,                                     4);
39081 +       user_ddcq_command_write (dma->dma_srcEvent,                                    5);
39082 +       user_ddcq_command_write (dma->dma_dstEvent,                                    6);
39083 +
39084 +    } user_ddcq_command_epilogue (uctx, 7, 0);
39085 +}
39086 +
39087 +void
39088 +user_ddcq_run_thread (USER_CTXT *uctx, E4_ThreadRegs *regs)
39089 +{
39090 +    PRINTF (uctx, DBG_DDCQ, "user_ddcq_run_thread: PC=%llx SP=%llx\n", regs->Registers[0], regs->Registers[1]);
39091 +
39092 +    user_ddcq_command_prologue(uctx, 7) {
39093 +
39094 +       user_ddcq_command_write (regs->Registers[0] | RUN_THREAD_CMD, 0);
39095 +       user_ddcq_command_write (regs->Registers[1],                  1);
39096 +       user_ddcq_command_write (regs->Registers[2],                  2);
39097 +       user_ddcq_command_write (regs->Registers[3],                  3);
39098 +       user_ddcq_command_write (regs->Registers[4],                  4);
39099 +       user_ddcq_command_write (regs->Registers[5],                  5);
39100 +       user_ddcq_command_write (regs->Registers[6],                  6);
39101 +       
39102 +    } user_ddcq_command_epilogue (uctx, 7, 0);
39103 +}
39104 +
39105 +void
39106 +user_ddcq_setevent (USER_CTXT *uctx, E4_Addr addr)
39107 +{
39108 +    user_ddcq_command_prologue (uctx, 1) {
39109 +
39110 +       user_ddcq_command_write (SET_EVENT_CMD | addr, 0);
39111 +    
39112 +    } user_ddcq_command_epilogue (uctx, 1, 0);
39113 +}
39114 +
39115 +void
39116 +user_ddcq_seteventn (USER_CTXT *uctx, E4_Addr addr, E4_uint32 count)
39117 +{
39118 +    PRINTF (uctx, DBG_DDCQ, "user_ddcq_seteventn: addr=%llx count=%lx\n", addr, count);
39119 +
39120 +    user_ddcq_command_prologue (uctx, 2) {
39121 +
39122 +       user_ddcq_command_write (SET_EVENTN_CMD, 0);
39123 +       user_ddcq_command_write (addr | count,   1);
39124 +
39125 +    } user_ddcq_command_epilogue (uctx, 2, 0);
39126 +}
39127 +
39128 +void
39129 +user_ddcq_waitevent (USER_CTXT *uctx, E4_Addr addr, E4_uint64 CountAndType, E4_uint64 Param0, E4_uint64 Param1)
39130 +{
39131 +    PRINTF (uctx, DBG_DDCQ, "user_ddcq_waitevent: addr=%llx CountAndType=%llx Param=%llx,%llx\n", addr, CountAndType, Param0, Param1);
39132 +
39133 +    user_ddcq_command_prologue (uctx, 4) {
39134 +
39135 +       user_ddcq_command_write (WAIT_EVENT_CMD | addr, 0);
39136 +       user_ddcq_command_write (CountAndType,          1);
39137 +       user_ddcq_command_write (Param0,                2);
39138 +       user_ddcq_command_write (Param1,                3);
39139 +
39140 +    } user_ddcq_command_epilogue (uctx, 4, 0);
39141 +}
39142 +
39143 +/*
39144 + * Local variables:
39145 + * c-file-style: "stroustrup"
39146 + * End:
39147 + */
39148 Index: linux-2.4.21/drivers/net/qsnet/elan4/user_Linux.c
39149 ===================================================================
39150 --- linux-2.4.21.orig/drivers/net/qsnet/elan4/user_Linux.c      2004-02-23 16:02:56.000000000 -0500
39151 +++ linux-2.4.21/drivers/net/qsnet/elan4/user_Linux.c   2005-06-01 23:12:54.626435000 -0400
39152 @@ -0,0 +1,377 @@
39153 +/*
39154 + *    Copyright (c) 2001-2002 by Quadrics Supercomputers World Ltd.
39155 + *    Copyright (c) 2002-2003 by Quadrics Ltd.
39156 + * 
39157 + *    For licensing information please see the supplied COPYING file
39158 + *
39159 + */
39160 +
39161 +#ident "@(#)$Id: user_Linux.c,v 1.25.2.4 2005/01/18 14:36:10 david Exp $"
39162 +/*      $Source: /cvs/master/quadrics/elan4mod/user_Linux.c,v $*/
39163 +
39164 +#include <qsnet/kernel.h>
39165 +#include <qsnet/kpte.h>
39166 +
39167 +#include <linux/pci.h>
39168 +
39169 +#include <elan4/debug.h>
39170 +#include <elan4/device.h>
39171 +#include <elan4/user.h>
39172 +
39173 +static int
39174 +user_pteload (USER_CTXT *uctx, E4_Addr addr, physaddr_t phys, int perm)
39175 +{
39176 +    ELAN4_DEV *dev = uctx->uctx_ctxt.ctxt_dev;
39177 +    E4_uint64  newpte = elan4mmu_phys2pte (dev, phys, perm);
39178 +    
39179 +    /*
39180 +     * On MPSAS we don't allocate a large enough context table, so 
39181 +     * if we see an address/context pair which would "alias" because
39182 +     * they differ in unchecked hash bits to a previous pteload, 
39183 +     * then we kill the application.
39184 +     */
39185 +    {
39186 +       unsigned hashval = (E4MMU_SHIFT_ADDR(addr, (dev->dev_pageshift[0]) + 2) ^ E4MMU_CONTEXT_SCRAMBLE(uctx->uctx_ctxt.ctxt_num));
39187 +       
39188 +       if (dev->dev_rsvd_hashval[0] == 0xFFFFFFFF)
39189 +           dev->dev_rsvd_hashval[0] = hashval & dev->dev_rsvd_hashmask[0];
39190 +       
39191 +       if ((hashval & dev->dev_rsvd_hashmask[0]) != dev->dev_rsvd_hashval[0])
39192 +       {
39193 +           printk ("user_pteload: vaddr=%016llx ctxnum=%x -> [%x] overlaps %x - %x [hashidx=%x]\n", (unsigned long long) addr, 
39194 +                   uctx->uctx_ctxt.ctxt_num, hashval, hashval & dev->dev_rsvd_hashmask[0], dev->dev_rsvd_hashval[0],
39195 +                   E4MMU_HASH_INDEX (uctx->uctx_ctxt.ctxt_num, addr, dev->dev_pageshift[0], dev->dev_hashsize[0]-1));
39196 +           
39197 +           return -EFAULT;
39198 +       }
39199 +    }
39200 +
39201 +    if ((newpte & (PTE_PciNotLocal | PTE_CommandQueue)) == 0 && 
39202 +       ((addr & (SDRAM_PGOFF_OFFSET << PAGE_SHIFT)) != (phys & (SDRAM_PGOFF_OFFSET << PAGE_SHIFT))))
39203 +    {
39204 +       printk ("user_pteload: vaddr=%016llx incorrectly alias sdram at %lx\n", (unsigned long long) addr, 
39205 +               phys ^ pci_resource_start (dev->dev_osdep.pdev, ELAN4_BAR_SDRAM));
39206 +       return -EFAULT;
39207 +    }
39208 +
39209 +    if (newpte & PTE_PciNotLocal)
39210 +       PRINTF (uctx, DBG_FAULT, "user_pteload: addr=%llx -> pte=%llx (pci)\n", addr, newpte);
39211 +    else if (newpte & PTE_CommandQueue)
39212 +       PRINTF (uctx, DBG_FAULT, "user_pteload: addr=%llx -> pte=%llx (command)\n", addr, newpte);
39213 +    else
39214 +       PRINTF (uctx, DBG_FAULT, "user_pteload: addr=%llx -> pte=%llx (sdram)\n", addr, newpte);
39215 +
39216 +    elan4mmu_pteload (&uctx->uctx_ctxt, 0, addr, newpte);
39217 +
39218 +    return (0);
39219 +}
39220 +
39221 +int
39222 +user_load_range (USER_CTXT *uctx, E4_Addr eaddr, unsigned long nbytes, E4_uint32 fsr)
39223 +{
39224 +    ELAN4_DEV             *dev       = uctx->uctx_ctxt.ctxt_dev;
39225 +    struct mm_struct      *mm        = current->mm;
39226 +    int                    writeable = (AT_Perm(fsr) == AT_PermLocalDataWrite ||
39227 +                                       AT_Perm(fsr) == AT_PermRemoteWrite    ||
39228 +                                       AT_Perm(fsr) == AT_PermLocalEvent     ||
39229 +                                       AT_Perm(fsr) == AT_PermRemoteEvent);
39230 +    struct vm_area_struct *vma;
39231 +    int                    i, perm;
39232 +    unsigned long          len;
39233 +    unsigned long          maddr;
39234 +    physaddr_t            phys;
39235 +
39236 +    kmutex_lock (&uctx->uctx_rgnmutex);
39237 +
39238 +    while (nbytes > 0)
39239 +    {
39240 +       USER_RGN *rgn = user_rgnat_elan (uctx, eaddr);
39241 +
39242 +       if (rgn == NULL || ELAN4_INCOMPAT_ACCESS (rgn->rgn_perm, AT_Perm (fsr)))
39243 +       {
39244 +           PRINTF (uctx, DBG_FAULT, "user_load_range: eaddr=%llx -> %s\n", eaddr, rgn == NULL ? "no mapping" : "no permission");
39245 +
39246 +           kmutex_unlock (&uctx->uctx_rgnmutex);
39247 +           return (rgn == NULL ? -EFAULT : -EPERM);
39248 +       }
39249 +
39250 +       if (writeable)
39251 +           perm = rgn->rgn_perm;
39252 +/* This is the correct code but it breaks the Eagle libraries (1.6.X) - backed out (addy 24.08.04)
39253 +       else if (AT_Perm(fsr) == AT_PermExecute && (rgn->rgn_perm & PERM_Mask) != PERM_LocExecute)
39254 +*/
39255 +       else if (AT_Perm(fsr) == AT_PermExecute)
39256 +           perm = PERM_LocRead | (rgn->rgn_perm & ~PERM_Mask);
39257 +       else
39258 +           perm = ELAN4_PERM_READONLY (rgn->rgn_perm & PERM_Mask) | (rgn->rgn_perm & ~PERM_Mask);
39259 +
39260 +       PRINTF (uctx, DBG_FAULT, "user_load_range: rgn=%p [%llx.%lx.%x]\n", rgn, rgn->rgn_ebase, rgn->rgn_mbase, rgn->rgn_len);
39261 +
39262 +       len = ((rgn->rgn_ebase + rgn->rgn_len) - eaddr);
39263 +       if (len > nbytes)
39264 +           len = nbytes;
39265 +       nbytes -= len;
39266 +       
39267 +       maddr = rgn->rgn_mbase + (eaddr - rgn->rgn_ebase);
39268 +
39269 +       PRINTF (uctx, DBG_FAULT, "user_load_range: eaddr=%llx->%llx -> %lx->%lx len=%x perm=%x\n", eaddr, 
39270 +               eaddr + len, maddr, maddr + len, len, perm);
39271 +
39272 +       down_read (&mm->mmap_sem);
39273 +       while (len > 0)
39274 +       {
39275 +           if ((vma = find_vma_intersection (mm, maddr, maddr + PAGE_SIZE)) == NULL ||
39276 +               (writeable && !(vma->vm_flags & VM_WRITE)))
39277 +           {
39278 +               PRINTF (DBG_USER, DBG_FAULT, "ctxt_pagefault: %s %lx\n", vma ? "no writeble at" : "no vma for", maddr);
39279 +               up_read (&mm->mmap_sem);
39280 +               kmutex_unlock (&uctx->uctx_rgnmutex);
39281 +               return (-EFAULT);
39282 +           }
39283 +
39284 +           spin_lock (&mm->page_table_lock);
39285 +           {
39286 +               pte_t *ptep_ptr;
39287 +               pte_t  ptep_value;
39288 +
39289 +               ptep_ptr = find_pte_map (mm, maddr);
39290 +               if (ptep_ptr) {
39291 +                   ptep_value = *ptep_ptr;
39292 +                   pte_unmap(ptep_ptr);
39293 +               }
39294 +
39295 +               PRINTF (uctx, DBG_FAULT, "user_load_range: %lx %s %s\n", maddr, writeable ? "writeable" : "readonly", 
39296 +                       !ptep_ptr ? "invalid" : pte_none(ptep_value) ? "none " : !pte_present(ptep_value) ? "swapped " : 
39297 +                       writeable && !pte_write(ptep_value) ? "COW" : "OK");
39298 +               
39299 +               if (ptep_ptr == NULL || pte_none(ptep_value) || !pte_present(ptep_value) || (writeable && !pte_write(ptep_value)) || !pte_read (ptep_value))
39300 +               {
39301 +                   spin_unlock (&mm->page_table_lock);
39302 +                   
39303 +                   make_pages_present(maddr, maddr + PAGE_SIZE);
39304 +                   
39305 +                   spin_lock (&mm->page_table_lock);
39306 +
39307 +                   ptep_ptr = find_pte_map (mm, maddr);
39308 +                   if (ptep_ptr) {
39309 +                       ptep_value = *ptep_ptr;
39310 +                       pte_unmap(ptep_ptr);
39311 +                   }
39312 +                   
39313 +                   if (ptep_ptr == NULL || pte_none(ptep_value) || !pte_present(ptep_value) || (writeable && !pte_write(ptep_value)) || !pte_read (ptep_value))
39314 +                   {   
39315 +                       spin_unlock (&mm->page_table_lock);
39316 +                       up_read (&mm->mmap_sem);
39317 +                       kmutex_unlock (&uctx->uctx_rgnmutex);
39318 +                       return (-EFAULT);
39319 +                   }
39320 +               } 
39321 +               
39322 +               if (writeable)
39323 +                   pte_mkdirty(ptep_value);
39324 +               pte_mkyoung (ptep_value);
39325 +
39326 +               phys = pte_phys (ptep_value);
39327 +
39328 +               for (i = 0; i < PAGE_SIZE; i += (1 << dev->dev_pageshift[0]))
39329 +               {
39330 +                   if (user_pteload (uctx, eaddr, phys, perm) < 0)
39331 +                   {
39332 +                       spin_unlock (&mm->page_table_lock);
39333 +                       up_read (&mm->mmap_sem);
39334 +                       kmutex_unlock (&uctx->uctx_rgnmutex);
39335 +                       return (-EFAULT);
39336 +                   }
39337 +                   
39338 +                   eaddr += (1 << dev->dev_pageshift[0]);
39339 +                   phys  += (1 << dev->dev_pageshift[0]);
39340 +               }
39341 +           }
39342 +           spin_unlock (&mm->page_table_lock);
39343 +               
39344 +           maddr += PAGE_SIZE;
39345 +           len   -= PAGE_SIZE;
39346 +       }
39347 +       up_read (&mm->mmap_sem);
39348 +    }
39349 +    kmutex_unlock (&uctx->uctx_rgnmutex);
39350 +
39351 +    PRINTF (uctx, DBG_FAULT, "user_load_range: alldone\n");
39352 +
39353 +    return (0);
39354 +}
39355 +
39356 +void
39357 +user_preload_main (USER_CTXT *uctx, virtaddr_t addr, unsigned long len)
39358 +{
39359 +    virtaddr_t             lim = addr + len - 1;
39360 +    struct vm_area_struct *vma;
39361 +
39362 +    down_read (&current->mm->mmap_sem);
39363 +
39364 +    if ((vma = find_vma (current->mm, addr)) != NULL)
39365 +    {
39366 +       do {
39367 +           unsigned long start = vma->vm_start;
39368 +           unsigned long end   = vma->vm_end;
39369 +
39370 +           if ((start-1) >= lim)
39371 +               break;
39372 +
39373 +           if (start < addr) start = addr;
39374 +           if ((end-1) > lim) end = lim+1;
39375 +               
39376 +           if (vma->vm_flags & VM_IO)
39377 +               continue;
39378 +
39379 +           user_unload_main (uctx, start, end - start);
39380 +
39381 +           make_pages_present (start, end);
39382 +
39383 +           user_update_main (uctx, current->mm, start, end - start);
39384 +
39385 +       } while ((vma = find_vma (current->mm, vma->vm_end)) != NULL);
39386 +    }
39387 +    up_read (&current->mm->mmap_sem);
39388 +}
39389 +
39390 +static void
39391 +user_update_range (USER_CTXT *uctx, int tbl, struct mm_struct *mm, virtaddr_t maddr, E4_Addr eaddr, unsigned long len, int perm)
39392 +{
39393 +    ELAN4_DEV *dev    = uctx->uctx_ctxt.ctxt_dev;
39394 +    int        roperm = ELAN4_PERM_READONLY(perm & PERM_Mask) | (perm & ~PERM_Mask);
39395 +    int        nbytes;
39396 +
39397 +    while (len > 0)
39398 +    {
39399 +       pte_t *ptep_ptr;
39400 +       pte_t  ptep_value;
39401 +       
39402 +       ptep_ptr = find_pte_map (mm, maddr);
39403 +       if (ptep_ptr) {
39404 +           ptep_value = *ptep_ptr;
39405 +           pte_unmap(ptep_ptr);
39406 +       }
39407 +
39408 +       PRINTF (uctx, DBG_IOPROC, "user_update_range: %llx (%lx) %s\n", eaddr, maddr, 
39409 +               !ptep_ptr ? "invalid" : pte_none(ptep_value) ? "none " : !pte_present(ptep_value) ? "swapped " : 
39410 +               !pte_write(ptep_value) ? "RO/COW" : "OK");
39411 +       
39412 +       if (ptep_ptr && !pte_none(ptep_value) && pte_present(ptep_value) && pte_read (ptep_value)) {
39413 +           physaddr_t phys_value = pte_phys(ptep_value);
39414 +           for (nbytes = 0; nbytes < PAGE_SIZE; nbytes += (1 << dev->dev_pageshift[0]))
39415 +           {
39416 +               user_pteload (uctx, eaddr, phys_value, pte_write (ptep_value) ? perm : roperm);
39417 +
39418 +               eaddr       += (1 << dev->dev_pageshift[0]);
39419 +               phys_value  += (1 << dev->dev_pageshift[0]);
39420 +           }
39421 +       }
39422 +
39423 +       maddr += PAGE_SIZE;
39424 +       len   -= PAGE_SIZE;
39425 +    }
39426 +}
39427 +
39428 +void
39429 +user_update_main (USER_CTXT *uctx, struct mm_struct *mm, virtaddr_t start, unsigned long len)
39430 +{
39431 +    USER_RGN     *rgn;
39432 +    unsigned long ssize;
39433 +    virtaddr_t    end = start + len - 1;
39434 +
39435 +    spin_lock (&uctx->uctx_rgnlock);
39436 +
39437 +    PRINTF (uctx, DBG_IOPROC, "user_update_main: start=%lx end=%lx\n", start, end);
39438 +
39439 +    for (rgn = user_findrgn_main (uctx, start, 0); rgn != NULL; rgn = rgn->rgn_mnext)
39440 +    {
39441 +       if (end < rgn->rgn_mbase)
39442 +           break;
39443 +       
39444 +       if (start <= rgn->rgn_mbase && end >= (rgn->rgn_mbase + rgn->rgn_len - 1)) 
39445 +       {
39446 +           PRINTF (uctx, DBG_IOPROC, "user_update_main: whole %lx -> %lx\n", rgn->rgn_mbase, rgn->rgn_mbase + rgn->rgn_len - 1);
39447 +
39448 +           user_update_range (uctx, 0 /* tbl */, mm, rgn->rgn_mbase, rgn->rgn_ebase, rgn->rgn_len, rgn->rgn_perm);
39449 +       }
39450 +       else if (start <= rgn->rgn_mbase)
39451 +       {
39452 +           ssize = end - rgn->rgn_mbase + 1;
39453 +
39454 +           PRINTF (uctx, DBG_IOPROC, "user_update_main: start %lx -> %lx\n", rgn->rgn_mbase, rgn->rgn_mbase + ssize);
39455 +
39456 +           user_update_range (uctx, 0 /* tbl */, mm, rgn->rgn_mbase, rgn->rgn_ebase, ssize, rgn->rgn_perm);
39457 +       }
39458 +       else if (end >= (rgn->rgn_mbase + rgn->rgn_len - 1))
39459 +       {
39460 +           ssize = (rgn->rgn_mbase + rgn->rgn_len) - start;
39461 +
39462 +           PRINTF (uctx, DBG_IOPROC, "user_update_main: end   %lx -> %lx\n", start, start + ssize);
39463 +
39464 +           user_update_range (uctx, 0 /* tbl */, mm, start, rgn->rgn_ebase + (start - rgn->rgn_mbase), ssize, rgn->rgn_perm);
39465 +       }
39466 +       else
39467 +       {
39468 +           PRINTF (uctx, DBG_IOPROC, "user_update_main: middle %lx -> %lx\n", start, end);
39469 +
39470 +           user_update_range (uctx, 0 /* tbl */, mm, start, rgn->rgn_ebase + (start - rgn->rgn_mbase), len, rgn->rgn_perm);
39471 +       }
39472 +    }
39473 +    spin_unlock (&uctx->uctx_rgnlock);
39474 +}
39475 +
39476 +void
39477 +user_unload_main (USER_CTXT *uctx, virtaddr_t start, unsigned long len)
39478 +{
39479 +    USER_RGN     *rgn;
39480 +    unsigned long ssize;
39481 +    virtaddr_t    end = start + len - 1;
39482 +
39483 +    spin_lock (&uctx->uctx_rgnlock);
39484 +
39485 +    PRINTF (uctx, DBG_IOPROC, "user_unload_main: start=%lx end=%lx\n", start, end);
39486 +
39487 +    for (rgn = user_findrgn_main (uctx, start, 0); rgn != NULL; rgn = rgn->rgn_mnext)
39488 +    {
39489 +       if (end < rgn->rgn_mbase)
39490 +           break;
39491 +       
39492 +       if (start <= rgn->rgn_mbase && end >= (rgn->rgn_mbase + rgn->rgn_len - 1))
39493 +       {
39494 +           PRINTF (uctx, DBG_IOPROC, "user_unload_main: whole %lx -> %lx\n", rgn->rgn_mbase, rgn->rgn_mbase + rgn->rgn_len - 1);
39495 +
39496 +           elan4mmu_unload_range (&uctx->uctx_ctxt, 0 /* tbl */, rgn->rgn_ebase, rgn->rgn_len);
39497 +       }
39498 +       else if (start <= rgn->rgn_mbase)
39499 +       {
39500 +           ssize = end - rgn->rgn_mbase + 1;
39501 +
39502 +           PRINTF (uctx, DBG_IOPROC, "user_unload_main: start %lx -> %lx\n", rgn->rgn_mbase, rgn->rgn_mbase + ssize);
39503 +
39504 +           elan4mmu_unload_range (&uctx->uctx_ctxt, 0 /* tbl */, rgn->rgn_ebase, ssize);
39505 +       }
39506 +       else if (end >= (rgn->rgn_mbase + rgn->rgn_len - 1))
39507 +       {
39508 +           ssize = (rgn->rgn_mbase + rgn->rgn_len) - start;
39509 +           
39510 +           PRINTF (uctx, DBG_IOPROC, "user_unload_main: end   %lx -> %lx\n", start, start + ssize);
39511 +           
39512 +           elan4mmu_unload_range (&uctx->uctx_ctxt, 0 /* tbl */, rgn->rgn_ebase + (start - rgn->rgn_mbase), ssize);
39513 +       }
39514 +       else
39515 +       {
39516 +
39517 +           PRINTF (uctx, DBG_IOPROC, "user_unload_main: middle %lx -> %lx\n", start, end);
39518 +
39519 +           elan4mmu_unload_range (&uctx->uctx_ctxt, 0 /* tbl */, rgn->rgn_ebase + (start - rgn->rgn_mbase), len);
39520 +       }
39521 +    }
39522 +    spin_unlock (&uctx->uctx_rgnlock);
39523 +}
39524 +
39525 +/*
39526 + * Local variables:
39527 + * c-file-style: "stroustrup"
39528 + * End:
39529 + */
39530 Index: linux-2.4.21/drivers/net/qsnet/ep/asm_elan4_thread.S
39531 ===================================================================
39532 --- linux-2.4.21.orig/drivers/net/qsnet/ep/asm_elan4_thread.S   2004-02-23 16:02:56.000000000 -0500
39533 +++ linux-2.4.21/drivers/net/qsnet/ep/asm_elan4_thread.S        2005-06-01 23:12:54.626435000 -0400
39534 @@ -0,0 +1,78 @@
39535 +/*
39536 + *    Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd.
39537 + *    Copyright (c) 2002-2003 by Quadrics Ltd.
39538 + *
39539 + *    For licensing information please see the supplied COPYING file
39540 + *
39541 + */
39542 +
39543 +#ident "@(#)$Id: asm_elan4_thread.S,v 1.1 2003/09/23 13:55:11 david Exp $ $Name: QSNETMODULES-4-30_20050128 $"
39544 +/*      $Source: /cvs/master/quadrics/epmod/asm_elan4_thread.S,v $*/
39545 +
39546 +#include <elan4/events.h>
39547 +#include <elan4/commands.h>
39548 +
39549 +/*
39550 + * c_reschedule (E4_uint64 *commandport)
39551 + */            
39552 +       .global c_reschedule
39553 +c_reschedule:
39554 +       add             %sp, -128, %sp
39555 +       st64            %r16, [%sp]                     // preserve call preserved registers
39556 +       st64            %r24, [%sp + 64]                // - see CALL_USED_REGISTERS.
39557 +       mov             %r16,%r16                       // BUG FIX: E4 RevA
39558 +       mov             %r24,%r24                       // BUG FIX: E4 RevA
39559 +       nop                                             // BUG FIX: E4 RevA
39560 +       nop                                             // BUG FIX: E4 RevA
39561 +       
39562 +       mov             %r7, %r18                       // (%r2) return pc
39563 +1:     call            2f
39564 +        mov            %sp, %r17                       // (%r1) SP
39565 +2:     add             %r7, (3f-1b), %r16              // (%r0) PC
39566 +       mov             NOP_CMD, %r23                   // "nop" command
39567 +       st64suspend     %r16, [%r8]
39568 +3:     ld64            [%sp], %r16
39569 +       ld64            [%sp + 64], %r24                // restore call preserved register
39570 +       jmpl            %r2+8, %r0                      // and return
39571 +        add            %sp, 128, %sp
39572 +       
39573 +
39574 +/*
39575 + * c_waitevent (E4_uint64 *commandport, E4_Event *event, E4_uint64 count)
39576 + */
39577 +       .global c_waitevent
39578 +c_waitevent:
39579 +       add             %sp, -192, %sp
39580 +       st64            %r16, [%sp + 64]                // preserve call preserved registers
39581 +       st64            %r24, [%sp + 128]               // - see CALL_USED_REGISTERS.
39582 +       mov             %r16,%r16                       // BUG FIX: E4 RevA
39583 +       mov             %r24,%r24                       // BUG FIX: E4 RevA
39584 +       nop                                             // BUG FIX: E4 RevA
39585 +       nop                                             // BUG FIX: E4 RevA
39586 +
39587 +       mov             %r7, %r18                       // (%r2) return pc
39588 +1:     call            2f
39589 +        mov            %sp, %r17                       // (%r1) SP
39590 +2:     add             %r7, (3f-1b), %r16              // (%r0) PC
39591 +       st32            %r16, [%sp]                     // event source block
39592 +       mov             MAKE_EXT_CLEAN_CMD, %r23        // "flush command queue desc" command
39593 +       st8             %r23, [%sp+56]                  // event source block
39594 +       mov             %r16,%r16                       // BUG FIX: E4 RevA
39595 +       mov             %r23,%r23                       // BUG FIX: E4 RevA
39596 +       nop                                             // BUG FIX: E4 RevA
39597 +       nop                                             // BUG FIX: E4 RevA
39598 +       
39599 +
39600 +       or              %r9, WAIT_EVENT_CMD, %r16
39601 +       sll8            %r10, 32, %r17
39602 +       or              %r17, E4_EVENT_TYPE_VALUE(E4_EVENT_COPY, E4_EVENT_DTYPE_LONG, 8), %r17
39603 +       mov             %sp, %r18
39604 +       mov             %r8, %r19
39605 +       
39606 +       st32suspend     %r16, [%r8]
39607 +       
39608 +3:     ld64            [%sp + 64], %r16                // restore call preserved register
39609 +       ld64            [%sp + 128], %r24
39610 +       jmpl            %r2+8, %r0                      // and return
39611 +        add            %sp, 192, %sp
39612 +
39613 Index: linux-2.4.21/drivers/net/qsnet/ep/assym_elan4.h
39614 ===================================================================
39615 --- linux-2.4.21.orig/drivers/net/qsnet/ep/assym_elan4.h        2004-02-23 16:02:56.000000000 -0500
39616 +++ linux-2.4.21/drivers/net/qsnet/ep/assym_elan4.h     2005-06-01 23:12:54.627434848 -0400
39617 @@ -0,0 +1,20 @@
39618 +/*
39619 + *    Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd.
39620 + *    Copyright (c) 2002-2003 by Quadrics Ltd.
39621 + *
39622 + *    For licensing information please see the supplied COPYING file
39623 + *
39624 + */
39625 +
39626 +#ident "@(#)$Id: genassym_elan4.c,v 1.3 2004/04/25 11:26:07 david Exp $ $Name: QSNETMODULES-4-30_20050128 $"
39627 +/*      $Source: /cvs/master/quadrics/epmod/genassym_elan4.c,v $*/
39628 +
39629 +/* Generated by genassym_elan4 - do not modify */
39630 +
39631 +#define EP4_RCVR_THREAD_STALL  0
39632 +#define EP4_RCVR_PENDING_TAILP 128
39633 +#define EP4_RCVR_PENDING_HEAD  136
39634 +#define EP4_RCVR_DEBUG         176
39635 +#define EP4_RXD_NEXT           664
39636 +#define EP4_RXD_QUEUED         728
39637 +#define EP4_RXD_DEBUG          944
39638 Index: linux-2.4.21/drivers/net/qsnet/ep/cm.c
39639 ===================================================================
39640 --- linux-2.4.21.orig/drivers/net/qsnet/ep/cm.c 2004-02-23 16:02:56.000000000 -0500
39641 +++ linux-2.4.21/drivers/net/qsnet/ep/cm.c      2005-06-01 23:12:54.632434088 -0400
39642 @@ -0,0 +1,3000 @@
39643 +/*
39644 + *    Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd.
39645 + *    Copyright (c) 2002-2003 by Quadrics Ltd.
39646 + *
39647 + *    For licensing information please see the supplied COPYING file
39648 + *
39649 + */
39650 +
39651 +#ident "@(#)$Id: cm.c,v 1.83.2.6 2005/01/13 12:37:57 mike Exp $"
39652 +/*      $Source: /cvs/master/quadrics/epmod/cm.c,v $ */
39653 +
39654 +#include <qsnet/kernel.h>
39655 +
39656 +#include <elan/kcomm.h>
39657 +
39658 +#include "kcomm_vp.h"
39659 +#include "debug.h"
39660 +#include "cm.h"
39661 +#include <elan/epsvc.h>
39662 +
39663 +#include <qsnet/procfs_linux.h>
39664 +
39665 +#if defined(LINUX)
39666 +#include "conf_linux.h"
39667 +#endif
39668 +
39669 +int BranchingRatios[CM_MAX_LEVELS];
39670 +
39671 +int MachineId      = -1;
39672 +int BrokenLevel    = -1;                       /* Simulates Broken Network */
39673 +int RejoinCheck    = 1;
39674 +int RejoinPanic    = 0;
39675 +
39676 +static int
39677 +SegmentNo (CM_RAIL *cmRail, u_int nodeid, u_int lvl)
39678 +{
39679 +    int i;
39680 +
39681 +    ASSERT (lvl < cmRail->NumLevels);
39682 +    
39683 +    for (i = 0; i < lvl; i++)
39684 +       nodeid /= cmRail->Levels[i].NumSegs;
39685 +    
39686 +    return (nodeid % cmRail->Levels[lvl].NumSegs);
39687 +}
39688 +
39689 +static int
39690 +ClusterIds (CM_RAIL *cmRail, int clvl, int *clmin, int *clmax)
39691 +{
39692 +    int clid  = cmRail->Rail->Position.pos_nodeid - cmRail->Levels[clvl].MinNodeId;
39693 +
39694 +    if (clvl == 0)
39695 +       *clmin = *clmax = clid;
39696 +    else
39697 +    {
39698 +       *clmin = cmRail->Levels[clvl - 1].MinNodeId - cmRail->Levels[clvl].MinNodeId;
39699 +       *clmax = *clmin + cmRail->Levels[clvl - 1].NumNodes - 1;
39700 +    }
39701 +    return (clid);
39702 +}
39703 +
39704 +#if defined(PER_CPU_TIMEOUT)
39705 +static void
39706 +__Schedule_Discovery (CM_RAIL *cmRail)         /* we urgently need to schedule discovery */
39707 +{
39708 +    cmRail->NextDiscoverTime = lbolt;
39709 +
39710 +    if (cmRail->NextRunTime == 0 || AFTER (cmRail->NextRunTime, cmRail->NextDiscoverTime))
39711 +       cmRail->NextRunTime = cmRail->NextDiscoverTime;
39712 +}
39713 +
39714 +static void
39715 +__Schedule_Heartbeat (CM_RAIL *cmRail)
39716 +{
39717 +    cmRail->NextHeartbeatTime = lbolt;
39718 +
39719 +    if (cmRail->NextRunTime == 0 || AFTER (cmRail->NextRunTime, cmRail->NextHeartbeatTime))
39720 +       cmRail->NextRunTime = cmRail->NextHeartbeatTime;
39721 +}
39722 +#else
39723 +
39724 +static void
39725 +__Schedule_Timer (CM_RAIL *cmRail, long tick)
39726 +{
39727 +    if (! timer_pending (&cmRail->HeartbeatTimer) || AFTER (cmRail->NextRunTime, tick))
39728 +    {
39729 +       cmRail->NextRunTime = tick;
39730 +
39731 +       mod_timer (&cmRail->HeartbeatTimer, tick);
39732 +    }
39733 +}
39734 +
39735 +static void
39736 +__Schedule_Discovery (CM_RAIL *cmRail)         /* we urgently need to schedule discovery */
39737 +{
39738 +    __Schedule_Timer (cmRail, cmRail->NextDiscoverTime = lbolt);
39739 +}
39740 +
39741 +static void
39742 +__Schedule_Heartbeat (CM_RAIL *cmRail)
39743 +{
39744 +    __Schedule_Timer (cmRail, cmRail->NextHeartbeatTime = lbolt);
39745 +}
39746 +#endif
39747 +
39748 +static int
39749 +MsgBusy (CM_RAIL *cmRail, int msgNumber)
39750 +{
39751 +    switch (ep_outputq_state (cmRail->Rail, cmRail->MsgQueue, msgNumber))
39752 +    {
39753 +    case EP_OUTPUTQ_BUSY:                      /* still busy */
39754 +       return 1;
39755 +       
39756 +    case EP_OUTPUTQ_FAILED:                    /* NACKed */
39757 +    {
39758 +#if defined(DEBUG_PRINTF)
39759 +       CM_MSG  *msg  = ep_outputq_msg (cmRail->Rail, cmRail->MsgQueue, msgNumber);
39760 +       uint8_t  type  = msg->Hdr.Type;
39761 +       uint16_t nmaps = msg->Hdr.NumMaps;
39762 +       int16_t  off   = msg->Payload.Statemaps[CM_MSG_MAP(0)].offset;
39763 +       
39764 +       CPRINTF4 (((type == CM_MSG_TYPE_DISCOVER_LEADER) || (type == CM_MSG_TYPE_DISCOVER_SUBORDINATE))  ? 6 : 3, /* we expect broadcasts to be NACKed */
39765 +                 "%s: msg %d type %d  failed%s\n", cmRail->Rail->Name, msgNumber, type, 
39766 +                 (type != CM_MSG_TYPE_HEARTBEAT) ? "" : nmaps == 0 ? ": null heartbeat" :
39767 +                 off == STATEMAP_RESET ? ": heartbeat with R statemaps" : ": heartbeat with statemaps");
39768 +#endif
39769 +       return 0;
39770 +    }
39771 +    
39772 +    case EP_OUTPUTQ_FINISHED:
39773 +       return 0;
39774 +
39775 +    default:
39776 +       panic ("MsgBusy - bad return code from ep_outputq_state\n");
39777 +       /* NOTREACHED */
39778 +    }
39779 +    return 0;
39780 +}
39781 +
39782 +static void
39783 +LaunchMessage (CM_RAIL *cmRail, int msgNumber, int vp, int qnum, int retries, int type, int lvl, int nmaps)
39784 +{
39785 +    CM_MSG *msg = ep_outputq_msg (cmRail->Rail, cmRail->MsgQueue, msgNumber);
39786 +    CM_HDR *hdr = &msg->Hdr;
39787 +
39788 +    ASSERT (nmaps >= 0 && nmaps <= CM_MSG_MAXMAPS);
39789 +    ASSERT (SPINLOCK_HELD (&cmRail->Lock));
39790 +
39791 +    hdr->Version   = CM_MSG_VERSION;
39792 +    hdr->ParamHash = cmRail->ParamHash;
39793 +    hdr->Timestamp = cmRail->Timestamp;
39794 +    hdr->Checksum  = 0;
39795 +    hdr->NodeId    = cmRail->Rail->Position.pos_nodeid;
39796 +    hdr->MachineId = MachineId;
39797 +    hdr->NumMaps   = nmaps;
39798 +    hdr->Level     = lvl;
39799 +    hdr->Type      = type;
39800 +    hdr->Checksum  = CheckSum ((char *)msg + CM_MSG_BASE(nmaps), CM_MSG_SIZE(nmaps));
39801 +
39802 +    if (BrokenLevel != -1 && (lvl >= ((BrokenLevel >> (cmRail->Rail->Number*4)) & 0xf)))                       /* Simulate broken network? */
39803 +       return;
39804 +
39805 +    if (ep_outputq_send (cmRail->Rail, cmRail->MsgQueue, msgNumber, 
39806 +                        CM_MSG_SIZE(nmaps), vp, qnum, retries));
39807 +       IncrStat (cmRail, LaunchMessageFail);
39808 +}
39809 +
39810 +static int
39811 +SendMessage (CM_RAIL *cmRail, int nodeId, int lvl, int type)
39812 +{
39813 +    int        msgNumber = CM_NUM_NODE_MSG_BUFFERS + cmRail->NextSpareMsg;
39814 +    int n         = CM_NUM_SPARE_MSG_BUFFERS;
39815 +    int retries;
39816 +
39817 +    ASSERT (type == CM_MSG_TYPE_IMCOMING ||    /* other types must use SendToSgmt */
39818 +           type == CM_MSG_TYPE_REJOIN);
39819 +   
39820 +    while (n-- > 0 && MsgBusy (cmRail, msgNumber))     /* search for idle "spare" buffer */
39821 +    {
39822 +       if (++(cmRail->NextSpareMsg) == CM_NUM_SPARE_MSG_BUFFERS)
39823 +           cmRail->NextSpareMsg = 0;
39824 +      
39825 +       msgNumber = CM_NUM_NODE_MSG_BUFFERS + cmRail->NextSpareMsg;
39826 +    }
39827 +
39828 +    if (n == 0)                                        /* all "spare" message buffers busy */
39829 +    {
39830 +       CPRINTF3 (3, "%s: all spare message buffers busy: trying to send type %d to %d\n",
39831 +                 cmRail->Rail->Name, type, nodeId);
39832 +       return (0);
39833 +    }
39834 +
39835 +    /* NB IMCOMING may be echoed by MANY nodes, so we don't (and musn't) have any retries */
39836 +    retries = (type == CM_MSG_TYPE_IMCOMING) ? 0 : CM_P2P_DMA_RETRIES;
39837 +   
39838 +    LaunchMessage (cmRail, msgNumber, EP_VP_NODE (nodeId), EP_SYSTEMQ_INTR, /* eager receive */
39839 +                  retries, type, lvl, 0);
39840 +   
39841 +    if (++(cmRail->NextSpareMsg) == CM_NUM_SPARE_MSG_BUFFERS) /* check this one last next time */
39842 +       cmRail->NextSpareMsg = 0;
39843 +
39844 +    return (1);
39845 +}
39846 +
39847 +static int
39848 +SendToSgmt (CM_RAIL *cmRail, CM_SGMT *sgmt, int type)
39849 +{    
39850 +    bitmap_t         seg;
39851 +    int              offset;
39852 +    int              nmaps;
39853 +    int              sidx;
39854 +    int              clvl;
39855 +    
39856 +    ASSERT (sgmt->Level <= cmRail->TopLevel);
39857 +
39858 +    if (MsgBusy (cmRail, sgmt->MsgNumber))             /* previous message still busy */
39859 +    {
39860 +       CPRINTF3 (3, "%s: node message buffer busy: trying to send type %d to %d\n",
39861 +                 cmRail->Rail->Name, type, sgmt->NodeId);
39862 +      
39863 +       return (0);
39864 +    }
39865 +
39866 +    switch (type)
39867 +    {
39868 +    case CM_MSG_TYPE_RESOLVE_LEADER:
39869 +    case CM_MSG_TYPE_DISCOVER_LEADER:
39870 +       ASSERT (sgmt->State == CM_SGMT_ABSENT);
39871 +       ASSERT (sgmt->Level == ((cmRail->Role == CM_ROLE_LEADER_CANDIDATE) ? cmRail->TopLevel : cmRail->TopLevel - 1));
39872 +       ASSERT (sgmt->Level < cmRail->NumLevels);
39873 +       ASSERT (sgmt->Sgmt == cmRail->Levels[sgmt->Level].MySgmt);
39874 +      
39875 +       /* broadcast to me and all my peers at this level (== my segment in the level above) */
39876 +       sidx = (sgmt->Level == cmRail->NumLevels - 1) ? 0 : cmRail->Levels[sgmt->Level + 1].MySgmt;
39877 +
39878 +       LaunchMessage (cmRail, sgmt->MsgNumber, EP_VP_BCAST (sgmt->Level + 1, sidx), 
39879 +                      EP_SYSTEMQ_INTR, 0,              /* eager rx; no retries */
39880 +                      type, sgmt->Level, 0);
39881 +       return (1);
39882 +      
39883 +    case CM_MSG_TYPE_DISCOVER_SUBORDINATE:
39884 +       ASSERT (sgmt->Sgmt != cmRail->Levels[sgmt->Level].MySgmt);
39885 +       ASSERT (sgmt->State == CM_SGMT_WAITING);
39886 +       ASSERT (sgmt->Level > 0);                       /* broadcasting just to subtree */
39887 +      
39888 +       LaunchMessage (cmRail, sgmt->MsgNumber, EP_VP_BCAST (sgmt->Level, sgmt->Sgmt), 
39889 +                      EP_SYSTEMQ_INTR, 0,              /* eager rx; no retries */
39890 +                      CM_MSG_TYPE_DISCOVER_SUBORDINATE, sgmt->Level, 0);
39891 +       return (1);
39892 +      
39893 +    case CM_MSG_TYPE_NOTIFY:
39894 +       ASSERT (sgmt->State == CM_SGMT_PRESENT);
39895 +      
39896 +       LaunchMessage (cmRail, sgmt->MsgNumber, EP_VP_NODE (sgmt->NodeId),
39897 +                      EP_SYSTEMQ_INTR, CM_P2P_DMA_RETRIES, /* eager rx; lots of retries */
39898 +                      CM_MSG_TYPE_NOTIFY, sgmt->Level, 0);
39899 +       return (1);
39900 +      
39901 +    case CM_MSG_TYPE_HEARTBEAT:
39902 +    {
39903 +       CM_MSG *msg = ep_outputq_msg (cmRail->Rail, cmRail->MsgQueue, sgmt->MsgNumber);
39904 +       CM_HDR *hdr = &msg->Hdr;
39905 +
39906 +       ASSERT (sgmt->State == CM_SGMT_PRESENT);
39907 +       
39908 +       hdr->AckSeq = sgmt->AckSeq;
39909 +   
39910 +       if (!sgmt->MsgAcked)                    /* Current message not acknowledged */
39911 +       {
39912 +           /* must have been something significant to require an ack */
39913 +           ASSERT (sgmt->SendMaps);
39914 +           ASSERT (sgmt->NumMaps > 0);
39915 +           
39916 +           CPRINTF3 (3, "%s: retrying heartbeat to %d (%d entries)\n", cmRail->Rail->Name, sgmt->NodeId, sgmt->NumMaps);
39917 +
39918 +           IncrStat (cmRail, RetryHeartbeat);
39919 +
39920 +           nmaps = sgmt->NumMaps;
39921 +       }
39922 +       else
39923 +       {
39924 +           nmaps = 0;
39925 +      
39926 +           if (sgmt->SendMaps)                 /* can send maps */
39927 +           {
39928 +               for (clvl = sgmt->Level; clvl < cmRail->NumLevels; clvl++)
39929 +               {
39930 +                   if (!sgmt->Maps[clvl].OutputMapValid)
39931 +                       continue;
39932 +                   
39933 +                   while ((offset = statemap_findchange (sgmt->Maps[clvl].OutputMap, &seg, 1)) >= 0)
39934 +                   {
39935 +                       CM_STATEMAP_ENTRY *map = &msg->Payload.Statemaps[CM_MSG_MAP(nmaps)];
39936 +
39937 +                       sgmt->Maps[clvl].SentChanges = 1;
39938 +                       
39939 +                       map->level  = clvl;
39940 +                       map->offset = offset;
39941 +                       map->seg[0] = seg & 0xffff;
39942 +                       map->seg[1] = (seg >> 16) & 0xffff;
39943 +#if (BT_ULSHIFT == 6)
39944 +                       map->seg[2] = (seg >> 32) & 0xffff;
39945 +                       map->seg[3] = (seg >> 48) & 0xffff;
39946 +#elif (BT_ULSHIFT != 5)
39947 +#error "Bad value for BT_ULSHIFT"
39948 +#endif
39949 +                       if (++nmaps == CM_MSG_MAXMAPS)
39950 +                           goto msg_full;
39951 +                   }
39952 +
39953 +                   if (sgmt->Maps[clvl].SentChanges)
39954 +                   {
39955 +                       CM_STATEMAP_ENTRY *map = &msg->Payload.Statemaps[CM_MSG_MAP(nmaps)];
39956 +
39957 +                       sgmt->Maps[clvl].SentChanges = 0;
39958 +
39959 +                       map->level  = clvl;
39960 +                       map->offset = STATEMAP_NOMORECHANGES;
39961 +                       
39962 +                       if (++nmaps == CM_MSG_MAXMAPS)
39963 +                           goto msg_full;
39964 +                   }
39965 +               }
39966 +           }
39967 +           
39968 +           ASSERT (nmaps < CM_MSG_MAXMAPS);
39969 +
39970 +       msg_full:
39971 +           sgmt->NumMaps = nmaps;              /* remember how many incase we retry */
39972 +
39973 +           if (nmaps == 0)                     /* no changes to send */
39974 +               hdr->Seq = sgmt->MsgSeq;        /* this one can be dropped */
39975 +           else
39976 +           {
39977 +               hdr->Seq = ++(sgmt->MsgSeq);    /* on to next message number */
39978 +               sgmt->MsgAcked = 0;             /* need this one to be acked before I can send another */
39979 +
39980 +               IncrStat (cmRail, MapChangesSent);
39981 +           }
39982 +       }
39983 +
39984 +       LaunchMessage (cmRail, sgmt->MsgNumber, EP_VP_NODE (sgmt->NodeId), 
39985 +                      EP_SYSTEMQ_POLLED,  CM_P2P_DMA_RETRIES, /* polled receive, lots of retries */
39986 +                      CM_MSG_TYPE_HEARTBEAT, sgmt->Level, nmaps);
39987 +
39988 +       IncrStat (cmRail, HeartbeatsSent);
39989 +
39990 +       return (1);
39991 +    }
39992 +
39993 +    default:                                   /* other types must use SendMessage */
39994 +       printk ("SendToSgmt: invalid type %d\n", type);
39995 +       ASSERT (0);
39996 +
39997 +       return (1);
39998 +    }
39999 +}
40000 +
40001 +static char *
40002 +GlobalStatusString (statemap_t *map, int idx)
40003 +{
40004 +    char *strings[] = {"....", "S...", "C...", "R...", 
40005 +                      ".s..", "Ss..", "Cs..", "Rs..", 
40006 +                      "..r.", "S.r.", "C.r.", "R.r.", 
40007 +                      ".sr.", "Ssr.", "Csr.", "Rsr.", 
40008 +                      "...R", "S..R", "C..R", "R..R", 
40009 +                      ".s.R", "Ss.R", "Cs.R", "Rs.R", 
40010 +                      "..rR", "S.rR", "C.rR", "R.rR", 
40011 +                      ".srR", "SsrR", "CsrR", "RsrR"};
40012 +    
40013 +    return (strings[statemap_getbits (map, idx * CM_GSTATUS_BITS, CM_GSTATUS_BITS)]);
40014 +}
40015 +
40016 +static char *
40017 +MapString (char *name, statemap_t *map, int nnodes, char *trailer)
40018 +{
40019 +    static char *space;
40020 +    int          i;
40021 +
40022 +    if (space == NULL)
40023 +       KMEM_ALLOC (space, char *, EP_MAX_NODES*(CM_GSTATUS_BITS+1), 0);
40024 +
40025 +    if (space == NULL)
40026 +       return ("<cannot allocate memory>");
40027 +    else
40028 +    {
40029 +       char *ptr = space;
40030 +
40031 +       sprintf (space, "%s ", name); ptr += strlen (ptr);
40032 +       for (i = 0; i < nnodes; i++, ptr += strlen (ptr))
40033 +           sprintf (ptr, "%s%s", i == 0 ? "" : ",", GlobalStatusString (map, i));
40034 +       sprintf (ptr, " %s", trailer);
40035 +       return (space);
40036 +    }
40037 +}
40038 +
40039 +void
40040 +DisplayMap (DisplayInfo *di, CM_RAIL *cmRail, char *name, statemap_t *map, int nnodes, char *trailer)
40041 +{
40042 +    char  linebuf[256];
40043 +    char *ptr = linebuf;
40044 +    int   i;
40045 +
40046 +#define NODES_PER_LINE 32
40047 +    for (i = 0; i < nnodes; i++)
40048 +    {
40049 +       if (ptr == linebuf)
40050 +       {
40051 +           sprintf (ptr, "%4d", i);
40052 +           ptr += strlen (ptr);
40053 +       }
40054 +       
40055 +       sprintf (ptr, ",%s", GlobalStatusString (map, i));
40056 +       ptr += strlen (ptr);
40057 +
40058 +       if ((i % NODES_PER_LINE) == (NODES_PER_LINE-1) || (i == (nnodes-1)))
40059 +       {
40060 +           (di->func)(di->arg, "%s: %s %s %s\n", cmRail->Rail->Name, name, linebuf, trailer);
40061 +           ptr = linebuf;
40062 +       }
40063 +    }
40064 +#undef NODES_PER_LINE
40065 +}
40066 +
40067 +void
40068 +DisplayNodeMaps (DisplayInfo *di, CM_RAIL *cmRail)
40069 +{
40070 +    int   lvl;
40071 +    int   clvl;
40072 +    char  mapname[128];
40073 +    
40074 +    (di->func)(di->arg, "%s: Node %d maps...\n", cmRail->Rail->Name, cmRail->Rail->Position.pos_nodeid);
40075 +
40076 +    for (clvl = 0; clvl < cmRail->NumLevels; clvl++)
40077 +    {
40078 +       int nnodes = cmRail->Levels[clvl].NumNodes;
40079 +
40080 +       (di->func)(di->arg, "%s: Cluster level %d: Connected %ld - %s%s\n", 
40081 +                  cmRail->Rail->Name, clvl, cmRail->Levels[clvl].Connected,
40082 +                  cmRail->Levels[clvl].Online     ? "Online" : "Offline",
40083 +                  cmRail->Levels[clvl].Restarting ? ", Restarting" : "");
40084 +
40085 +       for (lvl = 0; lvl < cmRail->TopLevel && lvl <= clvl; lvl++)
40086 +       {
40087 +           CM_LEVEL *level = &cmRail->Levels[lvl];
40088 +
40089 +           sprintf (mapname, "%10s%2d", "Level", lvl);
40090 +           DisplayMap (di, cmRail, mapname, level->SubordinateMap[clvl], nnodes,
40091 +                       level->SubordinateMapValid[clvl] ? "" : "(invalid)");
40092 +       }
40093 +
40094 +       sprintf (mapname, "%12s", "Local");
40095 +       DisplayMap (di, cmRail, mapname, cmRail->Levels[clvl].LocalMap, nnodes, "");
40096 +
40097 +       sprintf (mapname, "%12s", "Subtree");
40098 +       DisplayMap (di, cmRail, mapname, cmRail->Levels[clvl].SubTreeMap, nnodes, 
40099 +                   cmRail->Levels[clvl].SubTreeMapValid ? "" : "(invalid)");
40100 +
40101 +       sprintf (mapname, "%12s", "Global");
40102 +       DisplayMap (di, cmRail, mapname, cmRail->Levels[clvl].GlobalMap, nnodes, 
40103 +                   cmRail->Levels[clvl].GlobalMapValid ? "" : "(invalid)");
40104 +
40105 +       sprintf (mapname, "%12s", "LastGlobal");
40106 +       DisplayMap (di, cmRail, mapname, cmRail->Levels[clvl].LastGlobalMap, nnodes, "");
40107 +    }
40108 +}
40109 +
40110 +void
40111 +DisplayNodeSgmts (DisplayInfo *di, CM_RAIL *cmRail)
40112 +{
40113 +    int   lvl;
40114 +    int   sidx;
40115 +    
40116 +    (di->func)(di->arg, "%s: Node %d segments...\n", cmRail->Rail->Name, cmRail->NodeId);
40117 +    
40118 +    for (lvl = 0; lvl <= cmRail->TopLevel && lvl < cmRail->NumLevels; lvl++)
40119 +    {
40120 +       (di->func)(di->arg, "   level %d: ", lvl);
40121 +       
40122 +       for (sidx = 0; sidx < ((lvl == cmRail->TopLevel) ? 1 : cmRail->Levels[lvl].NumSegs); sidx++)
40123 +       {
40124 +           CM_SGMT *sgmt = &cmRail->Levels[lvl].Sgmts[sidx];
40125 +               
40126 +           if (sgmt->State == CM_SGMT_PRESENT)
40127 +               (di->func)(di->arg, "[%d, in: %d out: %d %s%s]", 
40128 +                       sgmt->NodeId,
40129 +                       sgmt->AckSeq,
40130 +                       sgmt->MsgSeq,
40131 +                       sgmt->MsgAcked ? "A" : "-",
40132 +                       sgmt->SendMaps ? "!" : "-");
40133 +           else
40134 +               (di->func)(di->arg, "[%s]", (sgmt->State == CM_SGMT_ABSENT ? "absent" :
40135 +                                sgmt->State == CM_SGMT_WAITING ? "waiting" :
40136 +                                sgmt->State == CM_SGMT_COMING ? "coming" : "UNKNOWN"));
40137 +       }
40138 +       (di->func)(di->arg, "\n");
40139 +    }
40140 +}
40141 +
40142 +
40143 +static void
40144 +StartConnecting (CM_RAIL *cmRail, CM_SGMT *sgmt, int NodeId, int Timestamp)
40145 +{
40146 +    int clvl;
40147 +
40148 +    CPRINTF4 (2, "%s: lvl %d subtree %d node %d -> connecting\n", cmRail->Rail->Name, sgmt->Level, sgmt->Sgmt, NodeId);
40149 +
40150 +    /* Only reconnect the same guy if he was reborn */
40151 +    ASSERT (sgmt->State != CM_SGMT_PRESENT ||
40152 +           (sgmt->NodeId == NodeId && sgmt->Timestamp != Timestamp));
40153 +   
40154 +    /* After we've connected to a new peer, we wait to receive
40155 +     * STATEMAP_RESET before we accumulate changes and we wait for a
40156 +     * complete map to be received before we propagate changes to other
40157 +     * nodes.
40158 +     *
40159 +     * If I'm the subordinate, I can start sending maps right away, since
40160 +     * the leader is ready for them already.  If I'm the leader, I hold off
40161 +     * sending maps until I've seen the subordinate's first heartbeat,
40162 +     * because the subordinate might miss my NOTIFY message, still think
40163 +     * she's a leader candidate and ignore my heartbeats.
40164 +     */
40165 +    sgmt->SendMaps = (sgmt->Level == cmRail->TopLevel); /* I can send maps to my leader (she NOTIFIED me) */
40166 +
40167 +    for (clvl = sgmt->Level; clvl < cmRail->NumLevels; clvl++)
40168 +    {
40169 +       statemap_reset (sgmt->Maps[clvl].CurrentInputMap);
40170 +       statemap_reset (sgmt->Maps[clvl].InputMap);
40171 +       statemap_reset (sgmt->Maps[clvl].OutputMap);
40172 +       
40173 +       sgmt->Maps[clvl].InputMapValid = 0;
40174 +       sgmt->Maps[clvl].OutputMapValid = 0;
40175 +       sgmt->Maps[clvl].SentChanges = 0;
40176 +
40177 +       if (sgmt->Level == cmRail->TopLevel)    /* connection to leader */
40178 +       {
40179 +           ASSERT (sgmt->Sgmt == 0);
40180 +           ASSERT (cmRail->Role == CM_ROLE_SUBORDINATE);
40181 +
40182 +           if (cmRail->Levels[clvl].SubTreeMapValid) /* already got a subtree map to send up */
40183 +           {
40184 +               statemap_setmap (sgmt->Maps[clvl].OutputMap, cmRail->Levels[clvl].SubTreeMap);
40185 +               sgmt->Maps[clvl].OutputMapValid = 1;
40186 +
40187 +               statemap_clearchanges (cmRail->Levels[clvl].SubTreeMap);
40188 +           }
40189 +       }
40190 +       else                                    /* connection to subordinate */
40191 +       {
40192 +           ASSERT (sgmt->Sgmt != cmRail->Levels[sgmt->Level].MySgmt);
40193 +
40194 +           if (cmRail->Levels[clvl].GlobalMapValid) /* already got a global map to broadcast */
40195 +           {
40196 +               statemap_setmap (sgmt->Maps[clvl].OutputMap, cmRail->Levels[clvl].GlobalMap);
40197 +               sgmt->Maps[clvl].OutputMapValid = 1;
40198 +           }
40199 +       }
40200 +    }
40201 +    
40202 +    /* Initialise sequence counters */
40203 +    sgmt->MsgSeq = sgmt->AckSeq = 0;
40204 +    sgmt->MsgAcked = 1;                        /* ready to send a new sequenced message */
40205 +   
40206 +    sgmt->State      = CM_SGMT_PRESENT;
40207 +    sgmt->NodeId     = NodeId;
40208 +    sgmt->UpdateTick = lbolt;
40209 +    sgmt->Timestamp  = Timestamp;
40210 +}
40211 +
40212 +static void
40213 +StartSubTreeDiscovery (CM_RAIL *cmRail, CM_SGMT *sgmt)
40214 +{
40215 +    sgmt->State = CM_SGMT_WAITING;
40216 +    sgmt->UpdateTick = lbolt;
40217 +    sgmt->WaitingTick = lbolt;
40218 +
40219 +    if (sgmt->Level > 0)
40220 +       __Schedule_Discovery (cmRail);
40221 +}
40222 +
40223 +void
40224 +StartSubordinateDiscovery (CM_RAIL *cmRail)
40225 +{
40226 +    int       i;
40227 +    int       lvl = cmRail->TopLevel - 1;
40228 +    CM_LEVEL *level = &cmRail->Levels[lvl];
40229 +
40230 +    ASSERT (lvl >= 0 && lvl < cmRail->NumLevels);
40231 +
40232 +    for (i = 0; i < level->NumSegs; i++)
40233 +    {
40234 +        CM_SGMT *sgmt = &level->Sgmts[i];
40235 +       
40236 +       if (i != level->MySgmt)         /* No-one should connect here */
40237 +           StartSubTreeDiscovery (cmRail, sgmt);
40238 +    }
40239 +}
40240 +
40241 +void
40242 +StartLeaderDiscovery (CM_RAIL *cmRail)
40243 +{
40244 +    int       i;
40245 +    int       clvl;
40246 +    CM_LEVEL *level = &cmRail->Levels[cmRail->TopLevel];
40247 +
40248 +    ASSERT (cmRail->TopLevel < cmRail->NumLevels);
40249 +
40250 +    for (clvl = cmRail->TopLevel; clvl < cmRail->NumLevels; clvl++)
40251 +    {
40252 +        cmRail->Levels[clvl].GlobalMapValid = 0;
40253 +       cmRail->Levels[clvl].SubTreeMapValid = 0;
40254 +        level->SubordinateMapValid[clvl] = 0;
40255 +    }
40256 +
40257 +    for (i = 0; i < level->NumSegs; i++)
40258 +    {
40259 +        CM_SGMT *sgmt = &level->Sgmts[i];
40260 +       
40261 +       sgmt->State = CM_SGMT_ABSENT;
40262 +    }
40263 +
40264 +    cmRail->DiscoverStartTick = lbolt;
40265 +    cmRail->Role = CM_ROLE_LEADER_CANDIDATE;
40266 +   
40267 +    __Schedule_Discovery (cmRail);
40268 +}
40269 +
40270 +static void
40271 +RaiseTopLevel (CM_RAIL *cmRail)
40272 +{
40273 +    ASSERT (cmRail->NumLevels != 0);
40274 +    ASSERT (cmRail->TopLevel < cmRail->NumLevels);
40275 +
40276 +    CPRINTF2 (2, "%s: RaiseTopLevel %d\n", cmRail->Rail->Name, cmRail->TopLevel + 1);
40277 +
40278 +    if (++cmRail->TopLevel == cmRail->NumLevels)       /* whole machine leader? */
40279 +       cmRail->Role = CM_ROLE_LEADER;
40280 +    else
40281 +       StartLeaderDiscovery (cmRail);          /* look for my leader */
40282 +
40283 +    StartSubordinateDiscovery (cmRail);                /* and any direct subordinates */
40284 +}
40285 +
40286 +static void
40287 +LowerTopLevel (CM_RAIL *cmRail, int lvl)
40288 +{
40289 +    ASSERT (cmRail->NumLevels != 0);
40290 +    ASSERT (lvl < cmRail->NumLevels);
40291 +
40292 +    CPRINTF2 (2, "%s: LowerTopLevel %d\n", cmRail->Rail->Name, lvl);
40293 +
40294 +    if (lvl == 0)
40295 +       cmRail->Timestamp = lbolt;
40296 +
40297 +    cmRail->TopLevel = lvl;
40298 +
40299 +    StartLeaderDiscovery (cmRail);             /* look for my leader */
40300 +}
40301 +
40302 +static int
40303 +IShouldLead (CM_RAIL *cmRail, CM_MSG *msg)
40304 +{
40305 +    /* NB, this function MUST be consistently calculated on any nodes, just
40306 +     * from the info supplied in the message.  Otherwise leadership
40307 +     * arbitration during concurrent discovery will fail.
40308 +     */
40309 +    return (cmRail->NodeId < msg->Hdr.NodeId);
40310 +}
40311 +
40312 +static int
40313 +SumCheck (CM_MSG *msg)
40314 +{
40315 +    CM_HDR   *hdr   = &msg->Hdr;
40316 +    uint16_t  sum   = hdr->Checksum;
40317 +    uint16_t  nmaps = hdr->NumMaps;
40318 +
40319 +    if (nmaps > CM_MSG_MAXMAPS) {
40320 +       printk ("SumCheck: nmaps %d > CM_MSG_MAXMAPS\n", nmaps);
40321 +       return 0;
40322 +    }
40323 +    
40324 +    if ((hdr->Type != CM_MSG_TYPE_HEARTBEAT) && nmaps != 0) {
40325 +       printk ("SumCheck: type(%d) not HEARTBEAT and nmaps(%d) != 0\n", hdr->Type, nmaps);
40326 +       return 0;
40327 +    }
40328 +
40329 +    hdr->Checksum = 0;
40330 +    
40331 +    if (CheckSum ((char *)msg + CM_MSG_BASE(nmaps), CM_MSG_SIZE(nmaps)) != sum) {
40332 +       printk ("SumCheck: checksum failed %x %x\n", CheckSum ((char *)msg + CM_MSG_BASE(nmaps), CM_MSG_SIZE(nmaps)), sum);
40333 +
40334 +       return 0;
40335 +    }
40336 +       
40337 +    return 1;
40338 +}
40339 +
40340 +static void
40341 +ProcessMessage (EP_RAIL *rail, void *arg, void *msgbuf)
40342 +{
40343 +    CM_RAIL       *cmRail = (CM_RAIL *) arg;
40344 +    CM_MSG         *msg    = (CM_MSG *) msgbuf;
40345 +    CM_HDR         *hdr    = &msg->Hdr;
40346 +    int             lvl;
40347 +    int             sidx;
40348 +    CM_LEVEL       *level;
40349 +    CM_SGMT        *sgmt;
40350 +    bitmap_t        seg;
40351 +    int             i;
40352 +    int            delay;
40353 +    static long    tlast;
40354 +    static int     count;
40355 +
40356 +    /* Poll the message Version field until the message has completely
40357 +     * arrived in main memory. */
40358 +    for (delay = 1; hdr->Version == EP_SYSTEMQ_UNRECEIVED && delay < EP_SYSTEMQ_UNRECEIVED_TLIMIT; delay <<= 1)
40359 +       DELAY (delay);
40360 +
40361 +    /* Display a message every 60 seconds if we see an "old" format message */
40362 +    if (hdr->Version == EP_SYSTEMQ_UNRECEIVED && (((lbolt - tlast) > 60*HZ) ? (count = 0) : ++count) < 1)
40363 +    {
40364 +       printk ("%s: received old protocol message (type %d from node %d)\n", cmRail->Rail->Name, 
40365 +               ((uint8_t *) msg)[20], ((uint16_t *) msg)[4]);
40366 +
40367 +       tlast = lbolt;
40368 +       goto finished;
40369 +    }
40370 +
40371 +    if (hdr->Version != CM_MSG_VERSION || hdr->ParamHash != cmRail->ParamHash || hdr->MachineId != MachineId)
40372 +    {
40373 +       CPRINTF8 (1, "%s: invalid message : Version %08x (%08x) ParamHash %08x (%08x) MachineId %04x (%04x) Nodeid %d\n", cmRail->Rail->Name,
40374 +                 hdr->Version, CM_MSG_VERSION, hdr->ParamHash, cmRail->ParamHash, hdr->MachineId, MachineId, hdr->NodeId);
40375 +       goto finished;
40376 +    }
40377 +
40378 +    if (!SumCheck (msg))
40379 +    {
40380 +       printk ("%s: checksum failed on msg from %d?\n", cmRail->Rail->Name, hdr->NodeId);
40381 +       goto finished;
40382 +    }
40383 +    
40384 +    if (hdr->NodeId == cmRail->NodeId)         /* ignore my own broadcast */       
40385 +    {
40386 +       CPRINTF3 (6, "%s: node %d type %d: ignored (MESSAGE FROM ME)\n", 
40387 +                 cmRail->Rail->Name, hdr->NodeId, hdr->Type);
40388 +
40389 +       if (hdr->Type != CM_MSG_TYPE_DISCOVER_LEADER && hdr->Type != CM_MSG_TYPE_RESOLVE_LEADER)
40390 +           printk ("%s: node %d type %d: ignored (MESSAGE FROM ME)\n", 
40391 +                   cmRail->Rail->Name, hdr->NodeId, hdr->Type);
40392 +       goto finished;
40393 +    }
40394 +
40395 +    lvl = hdr->Level;
40396 +    level = &cmRail->Levels[lvl];
40397 +
40398 +    if (BrokenLevel != -1 && (lvl >= ((BrokenLevel >> (cmRail->Rail->Number*4)) & 0xf)))                       /* Simulate broken network? */
40399 +       goto finished;
40400 +    
40401 +    if (lvl >= cmRail->NumLevels ||            /* from outer space  */
40402 +       hdr->NodeId < level->MinNodeId ||       /* from outside this level's subtree */
40403 +       hdr->NodeId >= level->MinNodeId + level->NumNodes)
40404 +    {
40405 +       printk ("%s: lvl %d node %d type %d: ignored (%s)\n", 
40406 +               cmRail->Rail->Name, lvl, hdr->NodeId, hdr->Type, 
40407 +               lvl >= cmRail->NumLevels ? "level too big for machine" : "outside subtree");
40408 +       goto finished;
40409 +    }
40410 +
40411 +    sidx = SegmentNo (cmRail, hdr->NodeId, lvl);
40412 +    sgmt = &level->Sgmts[sidx];
40413 +    
40414 +    switch (hdr->Type)
40415 +    {
40416 +    case CM_MSG_TYPE_RESOLVE_LEADER:
40417 +       if (lvl >= cmRail->TopLevel)
40418 +       {
40419 +           CPRINTF4 (6, "%s: lvl %d sidx %d node %d RESOLVE_LEADER: ignored (above my level)\n", 
40420 +                     cmRail->Rail->Name, lvl, sidx, hdr->NodeId);
40421 +           break;
40422 +       }
40423 +
40424 +       /* someone else thinks they lead at the same level as me */
40425 +       CPRINTF4 (1, "%s: lvl %d sidx %d node %d RESOLVE_LEADER: !REJOIN (putsch)\n", 
40426 +                 cmRail->Rail->Name, lvl, sidx, hdr->NodeId);
40427 +       
40428 +       printk ("%s: lvl %d sidx %d node %d RESOLVE_LEADER: !REJOIN (putsch)\n", 
40429 +               cmRail->Rail->Name, lvl, sidx, hdr->NodeId);
40430 +       
40431 +
40432 +       SendMessage (cmRail, hdr->NodeId, lvl, CM_MSG_TYPE_REJOIN);
40433 +       break;
40434 +       
40435 +    case CM_MSG_TYPE_DISCOVER_LEADER:
40436 +       if (lvl > cmRail->TopLevel)
40437 +       {
40438 +           CPRINTF4 (6, "%s: lvl %d sidx %d node %d DISCOVER_LEADER: ignored (above my level)\n", 
40439 +                     cmRail->Rail->Name, lvl, sidx, hdr->NodeId);
40440 +           break;
40441 +       }
40442 +
40443 +       if (sidx == level->MySgmt)              /* someone I led thinks they lead some of my subtrees */
40444 +       {
40445 +           CPRINTF4 (1, "%s: lvl %d sidx %d node %d DISCOVER_LEADER: !REJOIN (putsch)\n", 
40446 +                     cmRail->Rail->Name, lvl, sidx, hdr->NodeId);
40447 +
40448 +           printk ("%s: lvl %d sidx %d node %d DISCOVER_LEADER: !REJOIN (putsch)\n", 
40449 +                   cmRail->Rail->Name, lvl, sidx, hdr->NodeId);
40450 +
40451 +           SendMessage (cmRail, hdr->NodeId, hdr->Level, CM_MSG_TYPE_REJOIN);
40452 +           break;
40453 +       }       
40454 +
40455 +       if (lvl < cmRail->TopLevel)                     /* I'm the leader of this level */
40456 +       {
40457 +           if (sgmt->State == CM_SGMT_PRESENT &&       /* someone thinks someone I lead is dead */
40458 +               sgmt->NodeId != hdr->NodeId)
40459 +           {
40460 +               /* My subordinate's death could be noticed by one of her peers
40461 +                * before I do.  If she _is_ dead, I'll notice before long and
40462 +                * NOTIFY this discover.  If this discover completes before I
40463 +                * detect my subordinate's death, the discovering node will
40464 +                * try to take over from me, and then I'll RESET her.
40465 +                */
40466 +               CPRINTF4 (6, "%s: lvl %d sidx %d node %d DISCOVER_LEADER: ignored (got established subordinate)\n", 
40467 +                         cmRail->Rail->Name, lvl, sidx, hdr->NodeId);
40468 +               return;
40469 +           }
40470 +
40471 +           if (sgmt->State != CM_SGMT_PRESENT || /* New connection */
40472 +               sgmt->Timestamp != hdr->Timestamp) /* new incarnation */
40473 +               StartConnecting (cmRail, sgmt, hdr->NodeId, hdr->Timestamp);
40474 +         
40475 +           CPRINTF4 (2, "%s: lvl %d sidx %d node %d DISCOVER_LEADER: !NOTIFY)\n", 
40476 +                     cmRail->Rail->Name, lvl, sidx, hdr->NodeId);
40477 +         
40478 +           SendToSgmt (cmRail, sgmt, CM_MSG_TYPE_NOTIFY);
40479 +           break;
40480 +       }
40481 +
40482 +       ASSERT (lvl == cmRail->TopLevel);
40483 +
40484 +       if (cmRail->Role == CM_ROLE_SUBORDINATE)
40485 +       {
40486 +           /* I think my leader is alive, in which case she'll NOTIFY this
40487 +            * DISCOVER.  If she's dead, I'll start to become a leader
40488 +            * candidate and handle this appropriately.
40489 +            */
40490 +           CPRINTF3 (6, "%s: lvl %d node %d DISCOVER: ignored (I'm a subordinate)\n", 
40491 +                     cmRail->Rail->Name, lvl, hdr->NodeId);
40492 +           break;
40493 +       }
40494 +       
40495 +       ASSERT (cmRail->Role == CM_ROLE_LEADER_CANDIDATE);
40496 +
40497 +       /* A peer at this level is bidding for leadership along with me */
40498 +       if (IShouldLead (cmRail, msg))
40499 +       {
40500 +           CPRINTF3 (6, "%s: lvl %d node %d DISCOVER: but I should lead\n", 
40501 +                     cmRail->Rail->Name, lvl, hdr->NodeId);
40502 +
40503 +           /* So there _is_ someone there; She'll be seeing my DISCOVER
40504 +            * messages and extending her discovery period, so that when I
40505 +            * become leader, I'll NOTIFY her.  In the meantime I'll flag her
40506 +            * activity, so she remains WAITING.
40507 +            */
40508 +           sgmt->UpdateTick = lbolt;
40509 +           break;
40510 +       }
40511 +       
40512 +       /* Defer to sender... */
40513 +       CPRINTF3 (6, "%s: lvl %d node %d DISCOVER: delaying me becoming leader\n", 
40514 +                 cmRail->Rail->Name, lvl, hdr->NodeId);
40515 +       
40516 +       StartLeaderDiscovery (cmRail);
40517 +       break;
40518 +
40519 +    case CM_MSG_TYPE_DISCOVER_SUBORDINATE:
40520 +       if (lvl <= cmRail->TopLevel)
40521 +       {
40522 +           CPRINTF3 (6, "%s: lvl %d node %d DISCOVER_SUBORDINATE: ignored (from my subtree)\n", 
40523 +                     cmRail->Rail->Name, lvl, hdr->NodeId);
40524 +           break;
40525 +       }
40526 +       
40527 +       if (cmRail->Role != CM_ROLE_LEADER_CANDIDATE)
40528 +       {
40529 +           CPRINTF3 (6, "%s: lvl %d node %d DISCOVER_SUBORDINATE: ignored (I'm not looking for a leader)\n", 
40530 +                     cmRail->Rail->Name, lvl, hdr->NodeId);
40531 +           break;
40532 +       }
40533 +       
40534 +       if (hdr->Level > cmRail->BroadcastLevel && AFTER (lbolt, cmRail->BroadcastLevelTick + EP_WITHDRAW_TIMEOUT))
40535 +       {
40536 +           CPRINTF3 (6, "%s: lvl %d node %d DISCOVER_SUBORDINATE: ignored (broadcast level too low)\n",
40537 +                     cmRail->Rail->Name, lvl, hdr->NodeId);
40538 +           break;
40539 +       }
40540 +
40541 +       CPRINTF3 (2, "%s: lvl %d node %d DISCOVER_SUBORDINATE: !IMCOMING\n", 
40542 +                 cmRail->Rail->Name, lvl, hdr->NodeId);
40543 +       
40544 +       SendMessage (cmRail, hdr->NodeId, hdr->Level, CM_MSG_TYPE_IMCOMING);
40545 +       break;
40546 +
40547 +    case CM_MSG_TYPE_IMCOMING:
40548 +       if (lvl > cmRail->TopLevel ||           /* from peer or node above me */
40549 +           sgmt->State == CM_SGMT_PRESENT ||   /* already got a subtree */
40550 +           sgmt->State == CM_SGMT_ABSENT)      /* already written off this subtree */
40551 +       {
40552 +           CPRINTF4 (2, "%s: lvl %d sidx %d node %d IMCOMING: ignored\n", cmRail->Rail->Name, lvl, sidx, hdr->NodeId);
40553 +           break;
40554 +       }
40555 +
40556 +       CPRINTF4 (2, "%s: lvl %d sidx %d node %d IMCOMING: waiting...\n", cmRail->Rail->Name, lvl, sidx, hdr->NodeId);
40557 +
40558 +       sgmt->State = CM_SGMT_COMING;
40559 +       sgmt->UpdateTick = lbolt;
40560 +       break;
40561 +       
40562 +    case CM_MSG_TYPE_NOTIFY:
40563 +       if (cmRail->Role != CM_ROLE_LEADER_CANDIDATE || /* I'm not looking for a leader */
40564 +           lvl != cmRail->TopLevel)            /* at this level */
40565 +       {
40566 +           /* If this person really should be my leader, my existing leader
40567 +            * will time out, and I'll discover this one. */
40568 +           CPRINTF4 (2, "%s: lvl %d node %d NOTIFY: ignored (%s)\n", 
40569 +                     cmRail->Rail->Name, lvl, hdr->NodeId,
40570 +                     lvl < cmRail->TopLevel ? "already leader" : 
40571 +                     lvl > cmRail->TopLevel ? "lvl too high" : "already subordinate");
40572 +           break;
40573 +       }
40574 +
40575 +       CPRINTF3 (2, "%s: lvl %d node %d NOTIFY: becoming subordinate\n", 
40576 +                 cmRail->Rail->Name, lvl, hdr->NodeId);
40577 +       
40578 +       cmRail->Role = CM_ROLE_SUBORDINATE;             /* Now I've found my level */
40579 +       StartConnecting (cmRail, &level->Sgmts[0], hdr->NodeId, hdr->Timestamp);
40580 +       break;
40581 +
40582 +    case CM_MSG_TYPE_HEARTBEAT:
40583 +       if (lvl > cmRail->TopLevel)
40584 +       {
40585 +           CPRINTF3 (2, "%s: lvl %d node %d H/BEAT: ignored (lvl too high)\n", 
40586 +                     cmRail->Rail->Name, lvl, hdr->NodeId);
40587 +           break;
40588 +       }
40589 +
40590 +       if (lvl == cmRail->TopLevel)                    /* heartbeat from my leader */
40591 +       {
40592 +           if (cmRail->Role == CM_ROLE_LEADER_CANDIDATE) /* but I've not got one */
40593 +           {
40594 +               /* I'm probably a new incarnation of myself; I'll keep doing
40595 +                * discovery until my previous existence's leader NOTIFY's me.
40596 +                * If I was this node's leader, she'll time me out (I'm not
40597 +                * sending heartbeats to her) and we'll fight it out for
40598 +                * leadership. */
40599 +               CPRINTF3 (2, "%s: lvl %d node %d H/BEAT ignored (no leader)\n", 
40600 +                         cmRail->Rail->Name, lvl, hdr->NodeId);
40601 +               break;
40602 +           }
40603 +           sidx = 0;
40604 +           sgmt = &level->Sgmts[0];
40605 +       }
40606 +      
40607 +       if (sgmt->State != CM_SGMT_PRESENT ||   /* not fully connected with this guy */
40608 +           sgmt->NodeId != hdr->NodeId ||      /* someone else impersonating my peer */
40609 +           sgmt->Timestamp != hdr->Timestamp)  /* new incarnation of my peer */
40610 +       {
40611 +           CPRINTF4 (1, "%s: lvl %d sidx %d node %d H/BEAT: !REJOIN\n", 
40612 +                     cmRail->Rail->Name, lvl, sidx, hdr->NodeId);
40613 +         
40614 +           printk ("%s: lvl %d sidx %d node %d H/BEAT: !REJOIN %s\n",
40615 +                   cmRail->Rail->Name, lvl, sidx, hdr->NodeId,
40616 +                   sgmt->State != CM_SGMT_PRESENT ? "not present" :
40617 +                   sgmt->NodeId != hdr->NodeId ? "someone else" : "new incarnation");
40618 +           
40619 +           SendMessage (cmRail, hdr->NodeId, hdr->Level, CM_MSG_TYPE_REJOIN);
40620 +           break;
40621 +       }
40622 +
40623 +       if (!((hdr->Seq == sgmt->AckSeq) ||     /* NOT duplicate message or */
40624 +             (hdr->Seq == (CM_SEQ)(sgmt->AckSeq + 1))) || /* expected message */
40625 +           !((hdr->AckSeq == sgmt->MsgSeq) ||  /* NOT expected ack or */
40626 +             (hdr->AckSeq == (CM_SEQ)(sgmt->MsgSeq - 1)))) /* duplicate ack */
40627 +       {
40628 +           CPRINTF9 (1, "%s: lvl %d sidx %d node %d type %d: H/BEAT !REJOIN (out-of-seq) M(%d,a%d) S%d,A%d\n", 
40629 +                     cmRail->Rail->Name, lvl, sidx, hdr->NodeId, hdr->Type, 
40630 +                     (int)hdr->Seq, (int)hdr->AckSeq, (int)sgmt->MsgSeq, (int)sgmt->AckSeq);
40631 +        
40632 +           printk ("%s: lvl %d sidx %d node %d type %d: H/BEAT !REJOIN (out-of-seq) M(%d,a%d) S%d,A%d\n", 
40633 +                   cmRail->Rail->Name, lvl, sidx, hdr->NodeId, hdr->Type, 
40634 +                   (int)hdr->Seq, (int)hdr->AckSeq, (int)sgmt->MsgSeq, (int)sgmt->AckSeq);
40635 +        
40636 +           SendMessage (cmRail, hdr->NodeId, hdr->Level, CM_MSG_TYPE_REJOIN);
40637 +           break;
40638 +       }
40639 +
40640 +       IncrStat (cmRail, HeartbeatsRcvd);
40641 +
40642 +       sgmt->UpdateTick = lbolt;
40643 +       sgmt->SendMaps = 1;
40644 +
40645 +       if (sgmt->MsgSeq == hdr->AckSeq)                /* acking current message */
40646 +           sgmt->MsgAcked = 1;                 /* can send the next one */
40647 +
40648 +       if (hdr->Seq == sgmt->AckSeq)           /* discard duplicate (or NULL heartbeat) */
40649 +       {
40650 +           CPRINTF6 (6, "%s: lvl %d sidx %d node %d type %d: %s H/BEAT\n", 
40651 +                     cmRail->Rail->Name, lvl, sidx, hdr->NodeId, hdr->Type,
40652 +                     hdr->NumMaps == 0 ? "null" : "duplicate");
40653 +           break;
40654 +       }
40655 +
40656 +       CPRINTF7 (6, "%s: lvl %d sidx %d node %d type %d: seq %d maps %d H/BEAT\n", 
40657 +                 cmRail->Rail->Name, lvl, sidx, hdr->NodeId, hdr->Type, hdr->Seq, hdr->NumMaps);
40658 +
40659 +       sgmt->AckSeq = hdr->Seq;                        /* ready to receive next one */
40660 +       
40661 +       for (i = 0; i < hdr->NumMaps; i++)
40662 +       {
40663 +           CM_STATEMAP_ENTRY *map  = &msg->Payload.Statemaps[CM_MSG_MAP(i)];
40664 +           int                clvl = map->level;
40665 +           
40666 +           if (clvl < 0)                       /* end of message */
40667 +               break;
40668 +
40669 +           if (clvl < sgmt->Level)             /* bad level */
40670 +           {
40671 +               CPRINTF6 (1, "%s: lvl %d sidx %d node %d type %d: H/BEAT !REJOIN (bad clevel %d)\n", 
40672 +                         cmRail->Rail->Name, lvl, sidx, hdr->NodeId, hdr->Type, clvl);
40673 +
40674 +               SendMessage (cmRail, hdr->NodeId, hdr->Level, CM_MSG_TYPE_REJOIN);
40675 +               goto finished;
40676 +           }
40677 +
40678 +           if (map->offset == STATEMAP_NOMORECHANGES) /* end of atomic changes */
40679 +           {
40680 +               if (!sgmt->Maps[clvl].InputMapValid || /* not set InputMap yet */
40681 +                   statemap_changed (sgmt->Maps[clvl].CurrentInputMap)) /* previously applied changes */
40682 +               {
40683 +                   CPRINTF3 (4, "%s: received new clvl %d map from %d\n", cmRail->Rail->Name, clvl, sgmt->NodeId);
40684 +
40685 +                   statemap_setmap (sgmt->Maps[clvl].InputMap, sgmt->Maps[clvl].CurrentInputMap);
40686 +                   sgmt->Maps[clvl].InputMapValid = 1;
40687 +
40688 +                   statemap_clearchanges (sgmt->Maps[clvl].CurrentInputMap);
40689 +               }
40690 +               continue;
40691 +           }
40692 +           
40693 +           seg = ((bitmap_t)map->seg[0])
40694 +               | (((bitmap_t)map->seg[1]) << 16)
40695 +#if (BT_ULSHIFT == 6)
40696 +               | (((bitmap_t)map->seg[2]) << 32)
40697 +               | (((bitmap_t)map->seg[3]) << 48)
40698 +#elif (BT_ULSHIFT != 5)
40699 +#error "Bad value for BT_ULSHIFT"
40700 +#endif
40701 +               ;
40702 +           statemap_setseg (sgmt->Maps[clvl].CurrentInputMap, map->offset, seg);
40703 +       }
40704 +       break;
40705 +
40706 +    case CM_MSG_TYPE_REJOIN:
40707 +       CPRINTF5 (1, "%s: lvl %d sidx %d node %d type %d: REJOIN\n",
40708 +                 cmRail->Rail->Name, lvl, sidx, hdr->NodeId, hdr->Type);
40709 +       printk ("%s: lvl %d sidx %d node %d type %d: REJOIN\n", 
40710 +               cmRail->Rail->Name, lvl, sidx, hdr->NodeId, hdr->Type);
40711 +
40712 +       LowerTopLevel (cmRail, 0);
40713 +
40714 +       IncrStat (cmRail, RejoinRequest);
40715 +       break;
40716 +
40717 +    default:
40718 +       printk ("%s: lvl=%d unknown message type %d\n", cmRail->Rail->Name, lvl, hdr->Type);
40719 +       break;
40720 +    }
40721 + finished:
40722 +    hdr->Version = EP_SYSTEMQ_UNRECEIVED;
40723 +}
40724 +
40725 +static void
40726 +PollInputQueues (CM_RAIL *cmRail)
40727 +{
40728 +    ep_poll_inputq (cmRail->Rail, cmRail->IntrQueue, 0, ProcessMessage, cmRail);
40729 +    ep_poll_inputq (cmRail->Rail, cmRail->PolledQueue, 0, ProcessMessage, cmRail);
40730 +}
40731 +
40732 +static void
40733 +IntrQueueCallback (EP_RAIL *rail, void *arg)
40734 +{
40735 +    CM_RAIL *cmRail = (CM_RAIL *) arg;
40736 +    unsigned long flags;
40737 +
40738 +    /* If the lock is held, then don't bother spinning for it, 
40739 +     * since the messages will be received at this, or the
40740 +     * next heartbeat */
40741 +    local_irq_save (flags);
40742 +    if (spin_trylock (&cmRail->Lock))
40743 +    {
40744 +       if (AFTER (lbolt, cmRail->NextRunTime + MSEC2TICKS(CM_TIMER_SCHEDULE_TIMEOUT)))
40745 +           printk ("%s: heartbeat timer stuck - scheduled\n", cmRail->Rail->Name);
40746 +       else
40747 +           ep_poll_inputq (rail, cmRail->IntrQueue, 0, ProcessMessage, cmRail);
40748 +       spin_unlock (&cmRail->Lock);
40749 +    }
40750 +    local_irq_restore (flags);
40751 +}
40752 +
40753 +char *
40754 +sprintClPeers (char *str, CM_RAIL *cmRail, int clvl)
40755 +{
40756 +   int clLo     = cmRail->Levels[clvl].MinNodeId;
40757 +   int clHi     = clLo + cmRail->Levels[clvl].NumNodes - 1;
40758 +   int subClLo  = (clvl == 0) ? cmRail->NodeId : cmRail->Levels[clvl - 1].MinNodeId;
40759 +   int subClHi  = subClLo + ((clvl == 0) ? 0 : cmRail->Levels[clvl - 1].NumNodes - 1);
40760 +   
40761 +   if (subClHi == clHi)
40762 +      sprintf (str, "[%d-%d]", clLo, subClLo - 1);
40763 +   else if (subClLo == clLo)
40764 +      sprintf (str, "[%d-%d]", subClHi + 1, clHi);
40765 +   else
40766 +      sprintf (str, "[%d-%d][%d-%d]", clLo, subClLo - 1, subClHi + 1, clHi);
40767 +
40768 +   return (str);
40769 +}
40770 +
40771 +static void
40772 +RestartComms (CM_RAIL *cmRail, int clvl)
40773 +{
40774 +    int             base;
40775 +    int             nodeId;
40776 +    int             lstat;
40777 +    int             numClNodes;
40778 +    int             subClMin;
40779 +    int             subClMax;
40780 +    int             myClId;
40781 +    int             thisClId;
40782 +    
40783 +    myClId     = ClusterIds (cmRail, clvl, &subClMin, &subClMax);
40784 +    base       = myClId * CM_GSTATUS_BITS;
40785 +    numClNodes = cmRail->Levels[clvl].NumNodes;
40786 +
40787 +    statemap_setbits (cmRail->Levels[clvl].LocalMap, base, 
40788 +                     CM_GSTATUS_CLOSING | CM_GSTATUS_MAY_START | CM_GSTATUS_RESTART, CM_GSTATUS_BITS);
40789 +    cmRail->Levels[clvl].Restarting = 1;
40790 +
40791 +    if (cmRail->Levels[clvl].Online)
40792 +    {
40793 +       cmRail->Levels[clvl].Online = 0;
40794 +       
40795 +       for (thisClId = 0; thisClId < numClNodes; thisClId++)
40796 +       {
40797 +           if (thisClId == subClMin)   /* skip sub-cluster; it's just someone in this cluster */
40798 +           {                           /* that wants me to restart */
40799 +               thisClId = subClMax;
40800 +               continue;
40801 +           }
40802 +           
40803 +           nodeId = cmRail->Levels[clvl].MinNodeId + thisClId;
40804 +           base   = thisClId * CM_GSTATUS_BITS;
40805 +           lstat  = statemap_getbits (cmRail->Levels[clvl].LocalMap, base, CM_GSTATUS_BITS);
40806 +           
40807 +           if ((lstat & CM_GSTATUS_ACK_MASK) == CM_GSTATUS_MAY_RUN)
40808 +           {
40809 +               switch (ep_disconnect_node (cmRail->Rail, nodeId))
40810 +               {
40811 +               case EP_NODE_CONNECTING:
40812 +                   /* gstat must == RUNNING */
40813 +                   cmRail->Levels[clvl].Connected--;
40814 +                   break;
40815 +               case EP_NODE_DISCONNECTED:
40816 +                   /* CLOSING || STARTING || (lstat & RESTART) */
40817 +                   break;
40818 +               }
40819 +           }
40820 +       }
40821 +    }
40822 +}
40823 +
40824 +static void
40825 +UpdateGlobalStatus (CM_RAIL *cmRail)
40826 +{
40827 +    char            clNodeStr[32];                             /* [%d-%d][%d-%d] */
40828 +    int             nodeId;
40829 +    int             offset;
40830 +    int             base;
40831 +    bitmap_t        gstat;
40832 +    bitmap_t        lgstat;
40833 +    bitmap_t        lstat;
40834 +    int             clvl;
40835 +    int             numClNodes;
40836 +    int             subClMin;
40837 +    int             subClMax;
40838 +    int             myClId;
40839 +    int             thisClId;
40840 +    int             lastClId;
40841 +
40842 +    for (clvl = 0; clvl < cmRail->NumLevels; clvl++)
40843 +    {
40844 +       if (!cmRail->Levels[clvl].GlobalMapValid || /* not got the global map yet */
40845 +           !statemap_changed (cmRail->Levels[clvl].GlobalMap)) /* no changes to respond to */
40846 +       {
40847 +           CPRINTF2 (6, "%s: Got invalid or unchanged clvl %d global map\n", cmRail->Rail->Name, clvl);
40848 +           continue;
40849 +       }
40850 +       
40851 +       CPRINTF2 (5, "%s: Got valid changed clvl %d global map\n", cmRail->Rail->Name, clvl);
40852 +       
40853 +       lastClId = -1;
40854 +       myClId = ClusterIds (cmRail, clvl, &subClMin, &subClMax);
40855 +       numClNodes = cmRail->Levels[clvl].NumNodes;
40856 +       
40857 +       while ((offset = statemap_findchange (cmRail->Levels[clvl].GlobalMap, &gstat, 1)) >= 0)
40858 +       {
40859 +           /*
40860 +            * Check every node that this segment covers - however
40861 +            * if the last node we checked in the previous segmemt
40862 +            * is also the first node in this segment, then skip
40863 +            * it.
40864 +            */
40865 +           if ((thisClId = (offset/CM_GSTATUS_BITS)) == lastClId)
40866 +               thisClId++;
40867 +           lastClId = (offset + BT_NBIPUL - 1)/CM_GSTATUS_BITS;
40868 +           
40869 +           /* check each node that might have changed */
40870 +           for ( ; thisClId <= lastClId && thisClId < numClNodes; thisClId++)
40871 +           {
40872 +               base = thisClId * CM_GSTATUS_BITS;
40873 +               nodeId = cmRail->Levels[clvl].MinNodeId + thisClId;
40874 +
40875 +               if (thisClId >= subClMin && thisClId <= subClMax) /* skip sub-cluster */
40876 +                   continue;
40877 +
40878 +               /* This isn't me; I need to sense what this node is driving
40879 +                * (just the starting and running bits) and respond
40880 +                * appropriately...
40881 +                */
40882 +               lgstat = statemap_getbits (cmRail->Levels[clvl].LastGlobalMap, base, CM_GSTATUS_BITS) & CM_GSTATUS_STATUS_MASK;
40883 +               gstat  = statemap_getbits (cmRail->Levels[clvl].GlobalMap,     base, CM_GSTATUS_BITS) & CM_GSTATUS_STATUS_MASK;
40884 +
40885 +               if (lgstat == gstat)            /* no change in peer state */
40886 +                   continue;
40887 +
40888 +               CPRINTF5 (3, "%s: Node %d: lgstat %s, gstat %s, lstat %s\n", cmRail->Rail->Name, nodeId,
40889 +                         GlobalStatusString (cmRail->Levels[clvl].LastGlobalMap, thisClId),
40890 +                         GlobalStatusString (cmRail->Levels[clvl].GlobalMap, thisClId),
40891 +                         GlobalStatusString (cmRail->Levels[clvl].LocalMap, thisClId));
40892 +
40893 +               /* What I'm currently driving as my acknowledgement */
40894 +               lstat = statemap_getbits (cmRail->Levels[clvl].LocalMap, base, CM_GSTATUS_BITS);
40895 +
40896 +               switch (gstat)
40897 +               {
40898 +               case CM_GSTATUS_STARTING:
40899 +                   if ((lgstat == CM_GSTATUS_ABSENT || lgstat == CM_GSTATUS_CLOSING) && lstat == CM_GSTATUS_MAY_START)
40900 +                   {
40901 +                       CPRINTF2 (1, "%s: ===================node %d STARTING\n", cmRail->Rail->Name, nodeId);
40902 +                       
40903 +                       ASSERT (cmRail->Rail->Nodes[nodeId].State == EP_NODE_DISCONNECTED);
40904 +
40905 +                       statemap_setbits (cmRail->Levels[clvl].LocalMap, base, CM_GSTATUS_MAY_RUN, CM_GSTATUS_BITS);
40906 +                       continue;
40907 +                   }
40908 +                   break;
40909 +                   
40910 +               case CM_GSTATUS_RUNNING:
40911 +                   if ((lgstat == CM_GSTATUS_ABSENT   && lstat == CM_GSTATUS_MAY_START) ||
40912 +                       (lgstat == CM_GSTATUS_STARTING && lstat == CM_GSTATUS_MAY_RUN))
40913 +                   {
40914 +                       CPRINTF3 (1, "%s: ===================node %d%s RUNNING\n", cmRail->Rail->Name, nodeId,
40915 +                                 lgstat == CM_GSTATUS_ABSENT ? " Already" : "");
40916 +
40917 +                       ASSERT (cmRail->Rail->Nodes[nodeId].State == EP_NODE_DISCONNECTED);
40918 +
40919 +                       if (cmRail->Levels[clvl].Online)
40920 +                       {
40921 +                           ep_connect_node (cmRail->Rail, nodeId);
40922 +
40923 +                           cmRail->Levels[clvl].Connected++;
40924 +                       }
40925 +
40926 +                       statemap_setbits (cmRail->Levels[clvl].LocalMap, base, CM_GSTATUS_MAY_RUN, CM_GSTATUS_BITS);
40927 +                       continue;
40928 +                   }
40929 +                   break;
40930 +                   
40931 +               case CM_GSTATUS_CLOSING:
40932 +                   CPRINTF4 (1, "%s: ===================node %d CLOSING%s%s\n", cmRail->Rail->Name, nodeId,
40933 +                             (lstat & CM_GSTATUS_RESTART) ? " for Restart" : "",
40934 +                             cmRail->Levels[clvl].Online ? "" : " (offline)");
40935 +
40936 +                   if ((lstat & CM_GSTATUS_ACK_MASK) == CM_GSTATUS_MAY_RUN)
40937 +                   {
40938 +                       switch (ep_disconnect_node (cmRail->Rail, nodeId))
40939 +                       {
40940 +                       case EP_NODE_CONNECTING:
40941 +                           cmRail->Levels[clvl].Connected--;
40942 +                           /* DROPTHROUGH */
40943 +                       case EP_NODE_DISCONNECTED:
40944 +                           lstat = CM_GSTATUS_MAY_START;
40945 +                           break;
40946 +                       }
40947 +                   }
40948 +
40949 +                   if ((lstat & CM_GSTATUS_ACK_MASK) == CM_GSTATUS_MAY_START) /* clear restart if we've disconnected */
40950 +                       statemap_setbits (cmRail->Levels[clvl].LocalMap, base, CM_GSTATUS_MAY_START, CM_GSTATUS_BITS);
40951 +                   continue;
40952 +                   
40953 +               default:
40954 +                   break;
40955 +               }
40956 +
40957 +               /* "unexpected" state change forces me to ask her to restart */
40958 +               if (! (lstat & CM_GSTATUS_RESTART))             /* not requesting restart already */
40959 +               {
40960 +                   CPRINTF5 (1, "%s: ===================node %d %s, old %s new %s\n", cmRail->Rail->Name, nodeId,
40961 +                             (gstat == CM_GSTATUS_ABSENT)  ? "ABSENT" : "REQUEST RESTART", 
40962 +                             GlobalStatusString (cmRail->Levels[clvl].LastGlobalMap, thisClId),
40963 +                             GlobalStatusString (cmRail->Levels[clvl].GlobalMap, thisClId));
40964 +
40965 +                   /* request restart */
40966 +                   if (cmRail->Levels[clvl].Online && lstat == CM_GSTATUS_MAY_RUN)
40967 +                   {
40968 +                       switch (ep_disconnect_node (cmRail->Rail, nodeId))
40969 +                       {
40970 +                       case EP_NODE_CONNECTING:
40971 +                           cmRail->Levels[clvl].Connected--;
40972 +                           /* DROPTHROUGH */
40973 +                       case EP_NODE_DISCONNECTED:
40974 +                           lstat = CM_GSTATUS_MAY_START;
40975 +                           break;
40976 +                       }
40977 +                   }
40978 +
40979 +                   statemap_setbits (cmRail->Levels[clvl].LocalMap, base, lstat | CM_GSTATUS_RESTART, CM_GSTATUS_BITS);
40980 +                   continue;
40981 +               }
40982 +
40983 +               continue;
40984 +           }
40985 +       }
40986 +    
40987 +       /* Now check myself - see what everyone else thinks I'm doing */
40988 +       base   = myClId * CM_GSTATUS_BITS;
40989 +       lstat  = statemap_getbits (cmRail->Levels[clvl].LocalMap,  base, CM_GSTATUS_BITS);
40990 +       gstat  = statemap_getbits (cmRail->Levels[clvl].GlobalMap, base, CM_GSTATUS_BITS);
40991 +       lgstat = statemap_getbits (cmRail->Levels[clvl].LastGlobalMap, base, CM_GSTATUS_BITS);
40992 +
40993 +       if (lgstat == gstat)                    /* my state in this cluster hasn't changed */
40994 +       {
40995 +           CPRINTF3 (6, "%s: my clvl %d global status unchanged from %s\n", cmRail->Rail->Name,
40996 +                     clvl, GlobalStatusString (cmRail->Levels[clvl].GlobalMap, myClId));
40997 +           goto all_done;
40998 +       }
40999 +
41000 +       if ((gstat & CM_GSTATUS_RESTART) != 0)  /* someone wants me to restart */
41001 +       {
41002 +           if ((lstat & CM_GSTATUS_STATUS_MASK) == CM_GSTATUS_CLOSING) /* I'm already restarting */
41003 +               goto all_done;
41004 +           
41005 +           CPRINTF2 (1, "%s: ===================RESTART REQUEST from %s\n", cmRail->Rail->Name,
41006 +                     sprintClPeers (clNodeStr, cmRail, clvl));
41007 +           
41008 +           printk ("%s: Restart Request from %s\n", cmRail->Rail->Name,
41009 +                   sprintClPeers (clNodeStr, cmRail, clvl));
41010 +           
41011 +           RestartComms (cmRail, clvl);
41012 +           goto all_done;
41013 +       }
41014 +       
41015 +       CPRINTF6 (5, "%s: clvl %d: lgstat %s gstat %s, lstat %s%s\n", cmRail->Rail->Name, clvl,
41016 +                 GlobalStatusString (cmRail->Levels[clvl].LastGlobalMap, myClId),
41017 +                 GlobalStatusString (cmRail->Levels[clvl].GlobalMap, myClId),
41018 +                 GlobalStatusString (cmRail->Levels[clvl].LocalMap, myClId),
41019 +                 (gstat != lstat) ? " (IGNORED)" : "");
41020 +                       
41021 +       if (gstat != lstat)                     /* not everyone agrees with me */
41022 +           goto all_done;
41023 +
41024 +       switch (lstat)
41025 +       {
41026 +       default:
41027 +           ASSERT (0);                         /* I never drive this */
41028 +           
41029 +       case CM_GSTATUS_CLOSING | CM_GSTATUS_MAY_START: /* I can restart now (have seen restart go away) */
41030 +           ASSERT (!cmRail->Levels[clvl].Online);
41031 +           
41032 +           CPRINTF2 (1,"%s: ===================NODES %s AGREE I MAY START\n", cmRail->Rail->Name,
41033 +                     sprintClPeers (clNodeStr, cmRail, clvl));
41034 +           printk ("%s: ===================NODES %s AGREE I MAY START\n", cmRail->Rail->Name,
41035 +                   sprintClPeers (clNodeStr, cmRail, clvl));
41036 +           
41037 +           statemap_setbits (cmRail->Levels[clvl].LocalMap, base, 
41038 +                             CM_GSTATUS_STARTING | CM_GSTATUS_MAY_RUN, CM_GSTATUS_BITS);
41039 +           goto all_done;
41040 +           
41041 +       case CM_GSTATUS_STARTING | CM_GSTATUS_MAY_RUN:
41042 +           ASSERT (!cmRail->Levels[clvl].Online);
41043 +           
41044 +           CPRINTF2 (1, "%s: ===================NODES %s AGREE I MAY RUN\n", cmRail->Rail->Name,
41045 +                     sprintClPeers (clNodeStr, cmRail, clvl));
41046 +           printk ("%s: ===================NODES %s AGREE I MAY RUN\n", cmRail->Rail->Name,
41047 +                   sprintClPeers (clNodeStr, cmRail, clvl));
41048 +           
41049 +           statemap_setbits (cmRail->Levels[clvl].LocalMap, base, 
41050 +                             CM_GSTATUS_RUNNING | CM_GSTATUS_MAY_RUN, CM_GSTATUS_BITS);
41051 +           goto all_done;
41052 +           
41053 +       case CM_GSTATUS_RUNNING | CM_GSTATUS_MAY_RUN:
41054 +           if (! cmRail->Levels[clvl].Online)
41055 +           {
41056 +               CPRINTF2 (1, "%s: ===================NODES %s AGREE I'M RUNNING\n", cmRail->Rail->Name,
41057 +                         sprintClPeers (clNodeStr, cmRail, clvl));
41058 +               printk ("%s: ===================NODES %s AGREE I'M RUNNING\n", cmRail->Rail->Name,
41059 +                       sprintClPeers (clNodeStr, cmRail, clvl));
41060 +               
41061 +               cmRail->Levels[clvl].Online = 1;
41062 +               
41063 +               for (thisClId = 0; thisClId < numClNodes; thisClId++)
41064 +               {
41065 +                   if (thisClId == subClMin)   /* skip sub-cluster */
41066 +                   {
41067 +                       thisClId = subClMax;
41068 +                       continue;
41069 +                   }
41070 +                   
41071 +                   nodeId = cmRail->Levels[clvl].MinNodeId + thisClId;
41072 +                   
41073 +                   base  = thisClId * CM_GSTATUS_BITS;
41074 +                   lstat = statemap_getbits (cmRail->Levels[clvl].LocalMap, base, CM_GSTATUS_BITS);
41075 +                   gstat = statemap_getbits (cmRail->Levels[clvl].GlobalMap, base, CM_GSTATUS_BITS) & CM_GSTATUS_STATUS_MASK;
41076 +                   
41077 +                   /* Only connect to her if I see her as running and I'm not requesting her 
41078 +                    * to restart - this means that I was offline when I saw her transition
41079 +                    * to running and haven't seen her in a "bad" state since. */
41080 +                   if (gstat == CM_GSTATUS_RUNNING && ! (lstat & CM_GSTATUS_RESTART))
41081 +                   {
41082 +                       CPRINTF5 (1, "%s: node %d lgstat %s gstat %s, lstat %s -> CONNECT\n", cmRail->Rail->Name, nodeId,
41083 +                                 GlobalStatusString (cmRail->Levels[clvl].LastGlobalMap, thisClId),
41084 +                                 GlobalStatusString (cmRail->Levels[clvl].GlobalMap, thisClId),
41085 +                                 GlobalStatusString (cmRail->Levels[clvl].LocalMap, thisClId));
41086 +                       
41087 +                       if (lstat == CM_GSTATUS_MAY_START)
41088 +                           statemap_setbits (cmRail->Levels[clvl].LocalMap, base, CM_GSTATUS_MAY_RUN, CM_GSTATUS_BITS);
41089 +
41090 +                       ep_connect_node (cmRail->Rail, nodeId);
41091 +
41092 +                       cmRail->Levels[clvl].Connected++;
41093 +                   }
41094 +               }
41095 +           }
41096 +           goto all_done;
41097 +       }
41098 +
41099 +    all_done:
41100 +       statemap_setmap (cmRail->Levels[clvl].LastGlobalMap, cmRail->Levels[clvl].GlobalMap);
41101 +    }
41102 +}
41103 +
41104 +static void
41105 +ReduceGlobalMap (CM_RAIL *cmRail, int clvl)
41106 +{
41107 +    int       lvl;
41108 +    int       sidx;
41109 +    int       recompute;
41110 +    CM_LEVEL *level;
41111 +    int       cTopLevel;
41112 +    int       cRole;
41113 +
41114 +    if (clvl < cmRail->TopLevel)
41115 +    {
41116 +       cTopLevel = clvl + 1;
41117 +       cRole = CM_ROLE_LEADER;
41118 +    }
41119 +    else
41120 +    {
41121 +       cTopLevel = cmRail->TopLevel;
41122 +       cRole = cmRail->Role;
41123 +    }
41124 +    
41125 +    /* Update cmRail->Levels[*].SubordinateMap[clvl] for all subordinate levels */
41126 +    for (lvl = 0; lvl < cTopLevel; lvl++)
41127 +    {
41128 +       level = &cmRail->Levels[lvl];
41129 +
41130 +       /* We need to recompute this level's statemap if...
41131 +        * . Previous level's statemap has changes to propagate OR
41132 +        * . This level's statemap has not been computed yet OR
41133 +        * . A subordinate at this level has sent me a change.
41134 +        * Note that we can only do this if all subordinates from this
41135 +        * level down are present with valid statemaps, or absent (i.e. not
41136 +        * timing out).
41137 +        */
41138 +
41139 +       ASSERT (lvl == 0 || cmRail->Levels[lvl - 1].SubordinateMapValid[clvl]);
41140 +
41141 +       recompute = !level->SubordinateMapValid[clvl] ||
41142 +                   (lvl > 0 && statemap_changed (cmRail->Levels[lvl - 1].SubordinateMap[clvl]));
41143 +         
41144 +       for (sidx = 0; sidx < level->NumSegs; sidx++)
41145 +       {
41146 +           CM_SGMT *sgmt = &level->Sgmts[sidx];
41147 +
41148 +           if (!(sgmt->State == CM_SGMT_ABSENT || /* absent nodes contribute zeros */
41149 +                 (sgmt->State == CM_SGMT_PRESENT && /* present nodes MUST have received a map to contribute */
41150 +                  sgmt->Maps[clvl].InputMapValid)))
41151 +           {
41152 +               CPRINTF5 (5, "%s: waiting for clvl %d lvl %d seg %d node %d\n", cmRail->Rail->Name,
41153 +                         clvl, lvl, sidx, sgmt->NodeId);
41154 +
41155 +               /* Gotta wait for this guy, so we can't compute this level,
41156 +                * or any higher levels. */
41157 +               return;
41158 +           }
41159 +
41160 +           if (statemap_changed (sgmt->Maps[clvl].InputMap))
41161 +           {
41162 +               ASSERT (sgmt->Maps[clvl].InputMapValid);
41163 +
41164 +               recompute = 1;
41165 +
41166 +               CPRINTF7 (5, "%s: %s clvl %d map from @ %d %d (%d) - %s\n",
41167 +                         cmRail->Rail->Name, sgmt->State == CM_SGMT_ABSENT ? "newly absent" : "got new",
41168 +                         clvl, lvl, sidx, sgmt->NodeId, 
41169 +                         MapString ("Input", sgmt->Maps[clvl].InputMap, cmRail->Levels[clvl].NumNodes, ""));
41170 +           }
41171 +       }
41172 +
41173 +       if (recompute)
41174 +       {
41175 +           if (lvl == 0)
41176 +               statemap_reset (cmRail->Levels[clvl].TmpMap);
41177 +           else
41178 +           {
41179 +               ASSERT (cmRail->Levels[lvl - 1].SubordinateMapValid[clvl]);
41180 +               
41181 +               statemap_copy (cmRail->Levels[clvl].TmpMap, cmRail->Levels[lvl - 1].SubordinateMap[clvl]);
41182 +               statemap_clearchanges (cmRail->Levels[lvl - 1].SubordinateMap[clvl]);
41183 +           }
41184 +        
41185 +           for (sidx = 0; sidx < level->NumSegs; sidx++)
41186 +           {
41187 +               CM_SGMT *sgmt = &level->Sgmts[sidx];
41188 +               
41189 +               if (sgmt->State != CM_SGMT_ABSENT)      /* absent nodes contribute zeroes */
41190 +               {
41191 +                   ASSERT (sgmt->State == CM_SGMT_PRESENT);
41192 +                   ASSERT (sgmt->Maps[clvl].InputMapValid);
41193 +                   statemap_ormap (cmRail->Levels[clvl].TmpMap, sgmt->Maps[clvl].InputMap);
41194 +               }
41195 +               statemap_clearchanges (sgmt->Maps[clvl].InputMap);
41196 +           }
41197 +
41198 +           statemap_setmap (level->SubordinateMap[clvl], cmRail->Levels[clvl].TmpMap);
41199 +           level->SubordinateMapValid[clvl] = 1;
41200 +
41201 +           CPRINTF4 (5, "%s: recompute clvl %d level %d statemap - %s\n", cmRail->Rail->Name, clvl, lvl,
41202 +                     MapString ("level", level->SubordinateMap[clvl], cmRail->Levels[clvl].NumNodes, ""));
41203 +       }
41204 +    }
41205 +
41206 +    if (cRole == CM_ROLE_LEADER_CANDIDATE)     /* don't know this cluster's leader yet */
41207 +       return;
41208 +
41209 +    ASSERT (cTopLevel == 0 || cmRail->Levels[cTopLevel - 1].SubordinateMapValid[clvl]);
41210 +
41211 +    /* Update SubTreeMap */
41212 +    
41213 +    if (!cmRail->Levels[clvl].SubTreeMapValid || 
41214 +       statemap_changed (cmRail->Levels[clvl].LocalMap) ||
41215 +       (cTopLevel > 0 && statemap_changed (cmRail->Levels[cTopLevel - 1].SubordinateMap[clvl])))
41216 +    {
41217 +       statemap_copy (cmRail->Levels[clvl].TmpMap, cmRail->Levels[clvl].LocalMap);
41218 +       statemap_clearchanges (cmRail->Levels[clvl].LocalMap);
41219 +
41220 +       if (cTopLevel > 0)
41221 +       {
41222 +           statemap_ormap (cmRail->Levels[clvl].TmpMap, cmRail->Levels[cTopLevel - 1].SubordinateMap[clvl]);
41223 +           statemap_clearchanges (cmRail->Levels[cTopLevel - 1].SubordinateMap[clvl]);
41224 +       }
41225 +
41226 +       statemap_setmap (cmRail->Levels[clvl].SubTreeMap, cmRail->Levels[clvl].TmpMap);
41227 +       cmRail->Levels[clvl].SubTreeMapValid = 1;
41228 +
41229 +       CPRINTF3 (5, "%s: recompute clvl %d subtree map - %s\n", cmRail->Rail->Name, clvl,
41230 +                 MapString ("subtree", cmRail->Levels[clvl].SubTreeMap, cmRail->Levels[clvl].NumNodes, ""));
41231 +    }
41232 +
41233 +    if (cRole == CM_ROLE_SUBORDINATE)          /* got a leader (Not me) */
41234 +    {                                          /* => send SubTreeMap to her */
41235 +       CM_SGMT *leader = &cmRail->Levels[cmRail->TopLevel].Sgmts[0];
41236 +
41237 +       ASSERT (leader->State == CM_SGMT_PRESENT);
41238 +       ASSERT (cmRail->Levels[clvl].SubTreeMapValid);
41239 +
41240 +       if (!leader->Maps[clvl].OutputMapValid ||
41241 +           statemap_changed (cmRail->Levels[clvl].SubTreeMap))
41242 +       {
41243 +           statemap_setmap (leader->Maps[clvl].OutputMap, cmRail->Levels[clvl].SubTreeMap);
41244 +           leader->Maps[clvl].OutputMapValid = 1;
41245 +
41246 +           statemap_clearchanges (cmRail->Levels[clvl].SubTreeMap);
41247 +
41248 +           CPRINTF3 (5, "%s: sending clvl %d subtree map to leader (%d)\n", cmRail->Rail->Name, clvl, leader->NodeId);
41249 +       }
41250 +    }
41251 +}
41252 +
41253 +void
41254 +BroadcastGlobalMap (CM_RAIL *cmRail, int clvl)
41255 +{
41256 +    int       lvl;
41257 +    int       sidx;
41258 +    CM_LEVEL *level;
41259 +    CM_SGMT  *leader;
41260 +    int       cTopLevel;
41261 +    int       cRole;
41262 +
41263 +    if (clvl < cmRail->TopLevel)
41264 +    {
41265 +       cTopLevel = clvl + 1;
41266 +       cRole = CM_ROLE_LEADER;
41267 +    }
41268 +    else
41269 +    {
41270 +       cTopLevel = cmRail->TopLevel;
41271 +       cRole = cmRail->Role;
41272 +    }
41273 +
41274 +    switch (cRole)
41275 +    {
41276 +    default:
41277 +       ASSERT (0);
41278 +       
41279 +    case CM_ROLE_LEADER_CANDIDATE:             /* don't know this cluster's leader yet */
41280 +       return;
41281 +
41282 +    case CM_ROLE_LEADER:                       /* cluster leader: */
41283 +       ASSERT (clvl < cmRail->TopLevel);               /* set GlobalMap from SubTreeMap */
41284 +       
41285 +       if (!cmRail->Levels[clvl].SubTreeMapValid)      /* can't set global map */
41286 +           return;
41287 +
41288 +       if (cmRail->Levels[clvl].GlobalMapValid &&      /* already set global map */
41289 +           !statemap_changed (cmRail->Levels[clvl].SubTreeMap)) /* no changes to propagate */
41290 +           return;
41291 +       
41292 +       statemap_setmap (cmRail->Levels[clvl].GlobalMap, cmRail->Levels[clvl].SubTreeMap);
41293 +       cmRail->Levels[clvl].GlobalMapValid = 1;
41294 +       statemap_clearchanges (cmRail->Levels[clvl].SubTreeMap);
41295 +
41296 +       CPRINTF2 (5, "%s: whole cluster %d leader setting global map\n", cmRail->Rail->Name, clvl);
41297 +
41298 +       UpdateGlobalStatus (cmRail);
41299 +       break;
41300 +       
41301 +    case CM_ROLE_SUBORDINATE:                  /* cluster subordinate: */
41302 +       ASSERT (clvl >= cmRail->TopLevel);              /* receive GlobalMap from leader */
41303 +       ASSERT (cmRail->TopLevel < cmRail->NumLevels);
41304 +       
41305 +       leader = &cmRail->Levels[cmRail->TopLevel].Sgmts[0];
41306 +       ASSERT (leader->State == CM_SGMT_PRESENT);
41307 +
41308 +       if (!leader->Maps[clvl].InputMapValid)  /* can't set global map */
41309 +           return;
41310 +       
41311 +       if (cmRail->Levels[clvl].GlobalMapValid &&      /* already set global map */
41312 +           !statemap_changed (leader->Maps[clvl].InputMap)) /* no changes to propagate */
41313 +           return;
41314 +
41315 +       statemap_setmap (cmRail->Levels[clvl].GlobalMap, leader->Maps[clvl].InputMap);
41316 +       cmRail->Levels[clvl].GlobalMapValid = 1;
41317 +       statemap_clearchanges (leader->Maps[clvl].InputMap);
41318 +
41319 +       CPRINTF3 (5, "%s: getting clvl %d global map from leader (%d)\n", cmRail->Rail->Name, clvl, leader->NodeId);
41320 +
41321 +       UpdateGlobalStatus (cmRail);
41322 +       break;
41323 +    }
41324 +
41325 +    CPRINTF3 (5, "%s: clvl %d %s\n", cmRail->Rail->Name, clvl,
41326 +             MapString ("global", cmRail->Levels[clvl].GlobalMap, cmRail->Levels[clvl].NumNodes, ""));
41327 +    
41328 +    /* Broadcast global map to all subordinates */
41329 +    for (lvl = 0; lvl < cTopLevel; lvl++)
41330 +    {
41331 +       level = &cmRail->Levels[lvl];
41332 +       
41333 +       for (sidx = 0; sidx < level->NumSegs; sidx++)
41334 +       {
41335 +           CM_SGMT *sgmt = &level->Sgmts[sidx];
41336 +           
41337 +           if (sgmt->State == CM_SGMT_PRESENT)
41338 +           {
41339 +               statemap_setmap (sgmt->Maps[clvl].OutputMap, cmRail->Levels[clvl].GlobalMap);
41340 +               sgmt->Maps[clvl].OutputMapValid = 1;
41341 +               
41342 +               CPRINTF5 (5, "%s: sending clvl %d global map to subordinate %d %d (%d)\n", 
41343 +                         cmRail->Rail->Name, clvl, lvl, sidx, sgmt->NodeId);
41344 +           }
41345 +       }
41346 +    }
41347 +}
41348 +
41349 +static void
41350 +CheckPeerPulse (CM_RAIL *cmRail, CM_SGMT *sgmt)
41351 +{
41352 +    int clvl, sendRejoin;
41353 +    
41354 +    switch (sgmt->State)
41355 +    {
41356 +    case CM_SGMT_ABSENT:
41357 +       break;
41358 +
41359 +    case CM_SGMT_WAITING:                      /* waiting for a subtree */
41360 +       if (!AFTER (lbolt, sgmt->UpdateTick + MSEC2TICKS(CM_DISCOVER_TIMEOUT)))
41361 +           break;
41362 +      
41363 +       CPRINTF3 (2, "%s: lvl %d subtree %d contains no live nodes\n", cmRail->Rail->Name, 
41364 +                 sgmt->Level, (int) (sgmt - &cmRail->Levels[sgmt->Level].Sgmts[0]));
41365 +
41366 +       sgmt->State = CM_SGMT_ABSENT;
41367 +       for (clvl = sgmt->Level; clvl < cmRail->NumLevels; clvl++)
41368 +       {
41369 +           statemap_zero (sgmt->Maps[clvl].InputMap);          /* need to start propagating zeros (flags change) */
41370 +           sgmt->Maps[clvl].InputMapValid = 1;         /* and must indicate that the map is now valid */
41371 +       }
41372 +       break;
41373 +
41374 +    case CM_SGMT_COMING:                               /* lost/waiting subtree sent me IMCOMING */
41375 +       ASSERT (sgmt->Level > 0);                       /* we only do subtree discovery below our own level */
41376 +
41377 +       if (AFTER (lbolt, sgmt->WaitingTick + MSEC2TICKS(CM_WAITING_TIMEOUT)))
41378 +       {
41379 +           CPRINTF3 (1, "%s: lvl %d subtree %d waiting too long\n", cmRail->Rail->Name,
41380 +                     sgmt->Level, (int) (sgmt - &cmRail->Levels[sgmt->Level].Sgmts[0]));
41381 +           printk ("%s: lvl %d subtree %d waiting too long\n", cmRail->Rail->Name,
41382 +                   sgmt->Level, (int) (sgmt - &cmRail->Levels[sgmt->Level].Sgmts[0]));
41383 +
41384 +           sgmt->State = CM_SGMT_ABSENT;
41385 +           for (clvl = sgmt->Level; clvl < cmRail->NumLevels; clvl++)
41386 +           {
41387 +               statemap_zero (sgmt->Maps[clvl].InputMap);              /* need to start propagating zeros (flags change) */
41388 +               sgmt->Maps[clvl].InputMapValid = 1;             /* and must indicate that the map is now valid */
41389 +           }
41390 +           break;
41391 +       }
41392 +
41393 +       if (!AFTER (lbolt, sgmt->UpdateTick + MSEC2TICKS(CM_DISCOVER_TIMEOUT)))
41394 +           break;
41395 +
41396 +       CPRINTF3 (2, "%s: lvl %d subtree %d hasn't connected yet\n", cmRail->Rail->Name,
41397 +                 sgmt->Level, (int) (sgmt - &cmRail->Levels[sgmt->Level].Sgmts[0]));
41398 +
41399 +       sgmt->State = CM_SGMT_WAITING;
41400 +       sgmt->UpdateTick = lbolt;
41401 +
41402 +       if (sgmt->Level > 0)
41403 +           __Schedule_Discovery (cmRail);
41404 +       break;
41405 +      
41406 +    case CM_SGMT_PRESENT:
41407 +       if (!AFTER (lbolt, sgmt->UpdateTick + MSEC2TICKS(CM_HEARTBEAT_TIMEOUT)))
41408 +           break;
41409 +
41410 +       if (sgmt->Level == cmRail->TopLevel)            /* leader died */
41411 +       {
41412 +           sendRejoin = (sgmt->State == CM_SGMT_PRESENT && sgmt->AckSeq == 0);
41413 +
41414 +           CPRINTF4 (1, "%s: leader (%d) node %d JUST DIED%s\n", 
41415 +                     cmRail->Rail->Name, sgmt->Level, sgmt->NodeId,
41416 +                     sendRejoin ? ": !REJOIN" : "");
41417 +           
41418 +           printk ("%s: lvl %d leader (%d) JUST DIED%s\n", 
41419 +                   cmRail->Rail->Name, sgmt->Level, sgmt->NodeId,
41420 +                   sendRejoin ? ": !REJOIN" : "");
41421 +       
41422 +           if (sendRejoin)
41423 +           {
41424 +               /* she's not sent us any heartbeats even though she responded to a discover
41425 +                * so tell her to rejoin the tree at the bottom, this will mean that she 
41426 +                * has to run the heartbeat timer before being able to rejoin the tree. */
41427 +               SendMessage (cmRail, sgmt->NodeId, sgmt->Level, CM_MSG_TYPE_REJOIN);
41428 +           }
41429 +
41430 +           StartLeaderDiscovery (cmRail);
41431 +           break;
41432 +       }
41433 +
41434 +       sendRejoin = (sgmt->State == CM_SGMT_PRESENT && sgmt->AckSeq == 0);
41435 +
41436 +       CPRINTF5 (2, "%s: lvl %d subordinate %d (%d) JUST DIED%s\n", cmRail->Rail->Name, 
41437 +                 sgmt->Level, (int) (sgmt - &cmRail->Levels[sgmt->Level].Sgmts[0]), sgmt->NodeId,
41438 +                 sendRejoin ? ": !REJOIN" : "");
41439 +       printk ("%s: lvl %d subordinate %d (%d) JUST DIED%s\n", cmRail->Rail->Name, 
41440 +               sgmt->Level, (int) (sgmt - &cmRail->Levels[sgmt->Level].Sgmts[0]), sgmt->NodeId,
41441 +               sendRejoin ? ": !REJOIN" : "");
41442 +
41443 +       if (sendRejoin)
41444 +       {
41445 +           /* she's not sent us any heartbeats even though she responded to a discover
41446 +            * so tell her to rejoin the tree at the bottom, this will mean that she 
41447 +            * has to run the heartbeat timer before being able to rejoin the tree. */
41448 +           SendMessage (cmRail, sgmt->NodeId, sgmt->Level, CM_MSG_TYPE_REJOIN);
41449 +       }
41450 +
41451 +       StartSubTreeDiscovery (cmRail, sgmt);
41452 +       break;
41453 +        
41454 +    default:
41455 +       ASSERT (0);
41456 +    }
41457 +}
41458 +
41459 +static void
41460 +CheckPeerPulses (CM_RAIL *cmRail)
41461 +{
41462 +    int lvl;
41463 +    int sidx;
41464 +   
41465 +    /* check children are alive */
41466 +    for (lvl = 0; lvl < cmRail->TopLevel; lvl++)
41467 +       for (sidx = 0; sidx < cmRail->Levels[lvl].NumSegs; sidx++)
41468 +           CheckPeerPulse (cmRail, &cmRail->Levels[lvl].Sgmts[sidx]);
41469 +
41470 +    /* check leader is alive */
41471 +    if (cmRail->Role == CM_ROLE_SUBORDINATE)
41472 +    {
41473 +       ASSERT (cmRail->TopLevel < cmRail->NumLevels);
41474 +       ASSERT (cmRail->Levels[cmRail->TopLevel].Sgmts[0].State == CM_SGMT_PRESENT);
41475 +      
41476 +       CheckPeerPulse (cmRail, &cmRail->Levels[cmRail->TopLevel].Sgmts[0]);
41477 +    }
41478 +}
41479 +
41480 +static void
41481 +SendHeartbeats (CM_RAIL *cmRail)
41482 +{
41483 +    int lvl;
41484 +
41485 +    /* Send heartbeats to my children */
41486 +    for (lvl = 0; lvl < cmRail->TopLevel; lvl++)
41487 +    {
41488 +       CM_LEVEL *level = &cmRail->Levels[lvl];
41489 +       int       sidx;
41490 +       
41491 +       for (sidx = 0; sidx < level->NumSegs; sidx++)
41492 +       {
41493 +           CM_SGMT *sgmt = &cmRail->Levels[lvl].Sgmts[sidx];
41494 +
41495 +           if (sgmt->State == CM_SGMT_PRESENT)
41496 +               SendToSgmt (cmRail, sgmt, CM_MSG_TYPE_HEARTBEAT);
41497 +       }
41498 +    }
41499 +
41500 +    /* Send heartbeat to my leader */
41501 +    if (cmRail->Role == CM_ROLE_SUBORDINATE)
41502 +    {
41503 +       ASSERT (cmRail->TopLevel < cmRail->NumLevels);
41504 +       SendToSgmt (cmRail, &cmRail->Levels[cmRail->TopLevel].Sgmts[0], CM_MSG_TYPE_HEARTBEAT);
41505 +    }
41506 +}
41507 +
41508 +static int
41509 +BroadcastDiscover (CM_RAIL *cmRail)
41510 +{
41511 +    int       sidx;
41512 +    int              lvl;
41513 +    int       msgType;
41514 +    CM_LEVEL *level;
41515 +    int       urgent;
41516 +
41517 +    ASSERT (cmRail->TopLevel <= cmRail->NumLevels);
41518 +    ASSERT ((cmRail->Role == CM_ROLE_LEADER) ? (cmRail->TopLevel == cmRail->NumLevels) :
41519 +           (cmRail->Role == CM_ROLE_SUBORDINATE) ? (cmRail->Levels[cmRail->TopLevel].Sgmts[0].State == CM_SGMT_PRESENT) :
41520 +           (cmRail->Role == CM_ROLE_LEADER_CANDIDATE));
41521 +
41522 +    if (cmRail->Role != CM_ROLE_LEADER_CANDIDATE)      /* got a leader/lead whole machine */
41523 +    {
41524 +       urgent = 0;                             /* non-urgent leader discovery */
41525 +       lvl = cmRail->TopLevel - 1;             /* on nodes I lead (resolves leader conflicts) */
41526 +       msgType = CM_MSG_TYPE_RESOLVE_LEADER;
41527 +    }
41528 +    else
41529 +    {
41530 +       urgent = 1;                             /* urgent leader discovery */
41531 +       lvl = cmRail->TopLevel;                 /* on nodes I'd like to lead */
41532 +       msgType = CM_MSG_TYPE_DISCOVER_LEADER;
41533 +    }
41534 +
41535 +    if (lvl >= 0)
41536 +    {
41537 +       if (lvl > cmRail->BroadcastLevel)
41538 +       {
41539 +           /* Unable to broadcast at this level in the spanning tree, so we 
41540 +            * just continue doing discovery until we are able to broadcast */
41541 +           CPRINTF4 (6, "%s: broadcast level %d too low to discover %d at level %d\n",
41542 +                     cmRail->Rail->Name, cmRail->BroadcastLevel, msgType, lvl);
41543 +
41544 +           cmRail->DiscoverStartTick = lbolt;
41545 +       }
41546 +       else
41547 +       {
41548 +           level = &cmRail->Levels[lvl];
41549 +           SendToSgmt (cmRail, &level->Sgmts[level->MySgmt], msgType);
41550 +       }
41551 +    }
41552 +    
41553 +    while (lvl > 0)
41554 +    {
41555 +       level = &cmRail->Levels[lvl];
41556 +      
41557 +       for (sidx = 0; sidx < level->NumSegs; sidx++)
41558 +       {
41559 +           CM_SGMT *sgmt = &level->Sgmts[sidx];
41560 +        
41561 +           if (sgmt->State == CM_SGMT_WAITING)
41562 +           {
41563 +               ASSERT (sidx != level->MySgmt);
41564 +               /* Do subordinate discovery.  Existing subordinates will
41565 +                * ignore it, but leader candidates will send IMCOMING.
41566 +                * This is always urgent since we'll assume a subtree is
41567 +                * absent if I don't get IMCOMING within the timeout.
41568 +                */
41569 +               SendToSgmt (cmRail, sgmt, CM_MSG_TYPE_DISCOVER_SUBORDINATE);
41570 +               urgent = 1;
41571 +           }
41572 +       }
41573 +       lvl--;
41574 +    }
41575 +   
41576 +    return (urgent);
41577 +}
41578 +
41579 +static void
41580 +CheckBroadcast (CM_RAIL *cmRail)
41581 +{
41582 +    int  clvl;
41583 +
41584 +    for (clvl = cmRail->NumLevels-1; clvl >= 0 && cmRail->Rail->SwitchBroadcastLevel < cmRail->Levels[clvl].SwitchLevel; clvl--)
41585 +       ;
41586 +
41587 +    if (cmRail->OfflineReasons || cmRail->Rail->System->Shutdown)
41588 +       clvl = -1;
41589 +
41590 +    /* if the level at which we can broadcast drops, then we must rejoin the
41591 +     * spanning tree at the highest level for which broadcast is good. */
41592 +    if (cmRail->BroadcastLevel > clvl && clvl < (int)(cmRail->Role == CM_ROLE_LEADER ? cmRail->TopLevel - 1 : cmRail->TopLevel))
41593 +    {
41594 +       printk ("%s: REJOINING at level %d because %s\n", cmRail->Rail->Name, clvl+1, 
41595 +               (cmRail->OfflineReasons & CM_OFFLINE_MANAGER) ? "of manager thread" :
41596 +               (cmRail->OfflineReasons & CM_OFFLINE_PROCFS)  ? "force offline"  : 
41597 +               cmRail->Rail->System->Shutdown ? "system shutdown" : "broadcast level changed");
41598 +       LowerTopLevel (cmRail, clvl+1);
41599 +    }
41600 +    
41601 +    if (cmRail->BroadcastLevel != clvl)
41602 +    {
41603 +       cmRail->BroadcastLevel     = clvl;
41604 +       cmRail->BroadcastLevelTick = lbolt;
41605 +    }
41606 +
41607 +    /* schedule the update thread, to withdraw from comms with 
41608 +     * nodes "outside" of the valid broadcastable range. */
41609 +    for (clvl = 0; clvl < cmRail->NumLevels; clvl++)
41610 +    {
41611 +       if (cmRail->BroadcastLevel < clvl)
41612 +       {
41613 +           if (AFTER (lbolt, cmRail->BroadcastLevelTick + EP_WITHDRAW_TIMEOUT) && 
41614 +               !(cmRail->Levels[clvl].OfflineReasons & CM_OFFLINE_BROADCAST))
41615 +           {
41616 +               printk ("%s: Withdraw at Level %d\n", cmRail->Rail->Name, clvl);
41617 +               cmRail->Levels[clvl].OfflineReasons |= CM_OFFLINE_BROADCAST;
41618 +           }
41619 +       }
41620 +       else
41621 +       {
41622 +           if (cmRail->Levels[clvl].OfflineReasons & CM_OFFLINE_BROADCAST)
41623 +           {
41624 +               printk ("%s: Rejoin at Level %d\n", cmRail->Rail->Name, clvl);
41625 +               cmRail->Levels[clvl].OfflineReasons &= ~CM_OFFLINE_BROADCAST;
41626 +           }
41627 +       }
41628 +    }
41629 +       
41630 +}
41631 +
41632 +static void
41633 +CheckManager (CM_RAIL *cmRail)
41634 +{
41635 +    long time,  state = ep_kthread_state (&cmRail->Rail->System->ManagerThread, &time);
41636 +
41637 +    if (state == KT_STATE_RUNNING && BEFORE (lbolt, time + MSEC2TICKS(CM_THREAD_RUNNING_TIMEOUT)))
41638 +       state = KT_STATE_SLEEPING;
41639 +    if (state != KT_STATE_SLEEPING && BEFORE (lbolt, time + MSEC2TICKS(CM_THREAD_SCHEDULE_TIMEOUT)))
41640 +       state = KT_STATE_SLEEPING;
41641 +
41642 +    if ((cmRail->OfflineReasons & CM_OFFLINE_MANAGER) && state == KT_STATE_SLEEPING)
41643 +    {
41644 +       printk ("%s: manager thread unstuck\n", cmRail->Rail->Name);
41645 +
41646 +       cmRail->OfflineReasons &= ~CM_OFFLINE_MANAGER;
41647 +    }
41648 +
41649 +    if (!(cmRail->OfflineReasons & CM_OFFLINE_MANAGER) && state != KT_STATE_SLEEPING)
41650 +    {
41651 +       printk ("%s: manager thread stuck - %s\n", cmRail->Rail->Name,
41652 +               state == KT_STATE_SCHEDULED ? "scheduled" : 
41653 +               state == KT_STATE_RUNNING ? "running" : 
41654 +               state == KT_STATE_STALLED ? "stalled" : "unknown");
41655 +
41656 +       cmRail->OfflineReasons |= CM_OFFLINE_MANAGER;
41657 +    }
41658 +}
41659 +
41660 +static void
41661 +CheckOfflineReasons (CM_RAIL *cmRail, int clvl)
41662 +{
41663 +    int subClMin, subClMax, myClId;
41664 +    char clNodeStr[32];                                /* [%d-%d][%d-%d] */
41665 +
41666 +    if (cmRail->Levels[clvl].OfflineReasons)
41667 +    {
41668 +       if (cmRail->Levels[clvl].Online)
41669 +       {
41670 +           printk ("%s: Withdraw from %s\n", cmRail->Rail->Name, sprintClPeers (clNodeStr, cmRail, clvl));
41671 +           
41672 +           RestartComms (cmRail, clvl);
41673 +       }
41674 +    }
41675 +    else
41676 +    {
41677 +       if (cmRail->Levels[clvl].Restarting && cmRail->Levels[clvl].Connected == 0)
41678 +       {
41679 +           printk ("%s: Rejoin with %s\n", cmRail->Rail->Name, sprintClPeers (clNodeStr, cmRail, clvl));
41680 +
41681 +           myClId = ClusterIds (cmRail, clvl, &subClMin, &subClMax);
41682 +           
41683 +           ASSERT (statemap_getbits (cmRail->Levels[clvl].LocalMap, myClId * CM_GSTATUS_BITS, CM_GSTATUS_BITS) == 
41684 +                   (CM_GSTATUS_CLOSING | CM_GSTATUS_MAY_START | CM_GSTATUS_RESTART));
41685 +    
41686 +           statemap_setbits (cmRail->Levels[clvl].LocalMap, myClId * CM_GSTATUS_BITS,
41687 +                             CM_GSTATUS_CLOSING | CM_GSTATUS_MAY_START, CM_GSTATUS_BITS);
41688 +
41689 +           cmRail->Levels[clvl].Restarting = 0;
41690 +       }
41691 +    }
41692 +}
41693 +
41694 +void
41695 +DoHeartbeatWork (CM_RAIL *cmRail)
41696 +{
41697 +    long now = lbolt;
41698 +    int  clvl;
41699 +
41700 +    if ((RejoinCheck || RejoinPanic) &&
41701 +       AFTER (now, cmRail->NextRunTime + MSEC2TICKS (CM_TIMER_SCHEDULE_TIMEOUT))) /* If I've been unresponsive for too long */
41702 +    {
41703 +       /* I'd better reconnect to the network because I've not been playing the game */
41704 +       CPRINTF4 (1, "%s: REJOINING because I was too slow (heartbeat) [%ld,%ld,(%ld)]\n", cmRail->Rail->Name, now,  cmRail->NextRunTime, (long int)MSEC2TICKS (CM_TIMER_SCHEDULE_TIMEOUT));
41705 +       printk ("%s: REJOINING because I was too slow (heartbeat) [%ld,%ld,(%ld)]\n", cmRail->Rail->Name, now,  cmRail->NextRunTime, (long int)MSEC2TICKS (CM_TIMER_SCHEDULE_TIMEOUT));
41706 +       
41707 +       LowerTopLevel (cmRail, 0);
41708 +       
41709 +       IncrStat (cmRail, RejoinTooSlow);
41710 +       
41711 +       if (RejoinPanic)
41712 +           panic ("ep: REJOINING because I was too slow (heartbeat)\n");
41713 +    }
41714 +    
41715 +    PollInputQueues (cmRail);
41716 +    
41717 +    if (cmRail->NextDiscoverTime && ! BEFORE (now, cmRail->NextDiscoverTime))
41718 +    {
41719 +       if (BroadcastDiscover (cmRail))         /* urgent discovery required? */
41720 +           cmRail->NextDiscoverTime = now + MSEC2TICKS (CM_URGENT_DISCOVER_INTERVAL);
41721 +       else
41722 +           cmRail->NextDiscoverTime = now + MSEC2TICKS (CM_PERIODIC_DISCOVER_INTERVAL);
41723 +       
41724 +       if (cmRail->Role == CM_ROLE_LEADER_CANDIDATE && AFTER (now, cmRail->DiscoverStartTick + MSEC2TICKS (CM_DISCOVER_TIMEOUT)))
41725 +           RaiseTopLevel (cmRail);
41726 +    }
41727 +    
41728 +    if (cmRail->NextHeartbeatTime && ! BEFORE (now, cmRail->NextHeartbeatTime))
41729 +    {
41730 +       CheckPosition (cmRail->Rail);
41731 +       CheckPeerPulses (cmRail);
41732 +       CheckBroadcast (cmRail);
41733 +       CheckManager (cmRail);
41734 +       
41735 +       for (clvl = 0; clvl < cmRail->NumLevels; clvl++)
41736 +       {
41737 +           CheckOfflineReasons (cmRail, clvl);
41738 +           ReduceGlobalMap (cmRail, clvl);
41739 +           BroadcastGlobalMap (cmRail, clvl);
41740 +       }
41741 +       
41742 +       SendHeartbeats (cmRail);
41743 +       
41744 +       /* Compute the next heartbeat time, but "drift" it towards the last
41745 +        * periodic discovery time we saw from the whole machine leader */
41746 +       cmRail->NextHeartbeatTime = now + MSEC2TICKS (CM_HEARTBEAT_INTERVAL);
41747 +    }
41748 +
41749 +    if (cmRail->NextDiscoverTime && AFTER (cmRail->NextHeartbeatTime, cmRail->NextDiscoverTime))
41750 +       cmRail->NextRunTime = cmRail->NextDiscoverTime;
41751 +    else 
41752 +       cmRail->NextRunTime = cmRail->NextHeartbeatTime;
41753 +}
41754 +
41755 +#define CM_SVC_INDICATOR_OFFSET(CMRAIL,CLVL,IND,NODEID)     ( ( CMRAIL->Levels[CLVL].NumNodes * CM_GSTATUS_BITS ) \
41756 +                                                               + ( CMRAIL->Levels[CLVL].NumNodes * IND ) \
41757 +                                                               + ( NODEID - CMRAIL->Levels[CLVL].MinNodeId ) )
41758 +int
41759 +cm_svc_indicator_set (EP_RAIL *rail, int svc_indicator)
41760 +{
41761 +    CM_RAIL *cmRail = rail->ClusterRail;
41762 +    unsigned long flags;
41763 +    int           clvl;
41764 +
41765 +    EPRINTF2 (DBG_SVC,"cm_svc_indicator_set: rail %p ind %d\n", rail, svc_indicator);
41766 +
41767 +    if (svc_indicator < 0 || svc_indicator >= EP_SVC_NUM_INDICATORS)
41768 +    {
41769 +       EPRINTF1 (DBG_SVC,"cm_svc_indicator_set: service indicator %d not registered\n", svc_indicator);
41770 +       return (-1);
41771 +    }
41772 +
41773 +    if (rail->State == EP_RAIL_STATE_UNINITIALISED) 
41774 +       return (-2);
41775 +    
41776 +    spin_lock_irqsave (&cmRail->Lock, flags);
41777 +    for (clvl = 0; clvl < cmRail->NumLevels; clvl++)  {
41778 +       statemap_setbits (cmRail->Levels[clvl].LocalMap, CM_SVC_INDICATOR_OFFSET (cmRail, clvl, svc_indicator, cmRail->NodeId), 1, 1); 
41779 +       EPRINTF3 (DBG_SVC,"cm_svc_indicator_set: clvl %d nodeId %d offset %d\n", clvl, cmRail->NodeId, CM_SVC_INDICATOR_OFFSET (cmRail, clvl, svc_indicator, cmRail->NodeId));
41780 +    }
41781 +    spin_unlock_irqrestore (&cmRail->Lock, flags);
41782 +
41783 +    return (0);
41784 +}
41785 +
41786 +int
41787 +cm_svc_indicator_clear (EP_RAIL *rail, int svc_indicator)
41788 +{
41789 +    CM_RAIL *cmRail = rail->ClusterRail;
41790 +    unsigned long flags;
41791 +    int           clvl;
41792 +
41793 +    EPRINTF2 (DBG_SVC, "cm_svc_indicator_clear: rail %p ind %d\n", rail, svc_indicator);
41794 +
41795 +    if (svc_indicator < 0 || svc_indicator >= EP_SVC_NUM_INDICATORS)
41796 +    {
41797 +       EPRINTF1 (DBG_SVC, "cm_svc_indicator_clear: service indicator %d not registered\n", svc_indicator);
41798 +       return (-1);
41799 +    }
41800 +
41801 +    if (rail->State == EP_RAIL_STATE_UNINITIALISED) 
41802 +       return (-2);
41803 +
41804 +    spin_lock_irqsave (&cmRail->Lock, flags);
41805 +    for (clvl = 0; clvl < cmRail->NumLevels; clvl++)  {
41806 +       statemap_setbits (cmRail->Levels[clvl].LocalMap, CM_SVC_INDICATOR_OFFSET (cmRail, clvl, svc_indicator, cmRail->NodeId), 0, 1); 
41807 +       EPRINTF3 (DBG_SVC, "cm_svc_indicator_clear: clvl %d nodeId %d offset %d\n", clvl, cmRail->NodeId, CM_SVC_INDICATOR_OFFSET (cmRail, clvl, svc_indicator, cmRail->NodeId));
41808 +    }
41809 +    spin_unlock_irqrestore (&cmRail->Lock, flags);
41810 +
41811 +    return (0);
41812 +}
41813 +
41814 +int
41815 +cm_svc_indicator_is_set (EP_RAIL *rail, int svc_indicator, int nodeId)
41816 +{
41817 +    CM_RAIL *cmRail = rail->ClusterRail;
41818 +    unsigned long flags;
41819 +    int           clvl;
41820 +    bitmap_t      bits;
41821 +
41822 +    EPRINTF4 (DBG_SVC, "cm_svc_indicator_is_set: rail %p ind %d nodeId %d (me=%d)\n", rail, svc_indicator, nodeId, cmRail->NodeId);
41823 +
41824 +    if (svc_indicator < 0 || svc_indicator > EP_SVC_NUM_INDICATORS)
41825 +    {
41826 +       EPRINTF1 (DBG_SVC, "cm_svc_indicator_is_set: service indicator %d not registered\n", svc_indicator);
41827 +       return (0);
41828 +    }
41829 +
41830 +    if (rail->State == EP_RAIL_STATE_UNINITIALISED) 
41831 +       return (0);
41832 +    
41833 +    spin_lock_irqsave (&cmRail->Lock, flags);
41834 +    for (clvl = 0; clvl < cmRail->NumLevels; clvl++)
41835 +       if (nodeId >= cmRail->Levels[clvl].MinNodeId && nodeId < (cmRail->Levels[clvl].MinNodeId + cmRail->Levels[clvl].NumNodes))
41836 +           break;
41837 +
41838 +    if ( clvl == cmRail->NumLevels) { 
41839 +       EPRINTF1 (DBG_SVC, "cm_svc_indicator_is_set: node out of range %d \n", nodeId); 
41840 +       spin_unlock_irqrestore (&cmRail->Lock, flags);
41841 +       return (0);
41842 +    }
41843 +
41844 +    if ( cmRail->NodeId == nodeId ) 
41845 +       bits = statemap_getbits (cmRail->Levels[clvl].LocalMap, CM_SVC_INDICATOR_OFFSET (cmRail, clvl, svc_indicator, nodeId), 1);
41846 +    else
41847 +       bits = statemap_getbits (cmRail->Levels[clvl].GlobalMap, CM_SVC_INDICATOR_OFFSET (cmRail, clvl, svc_indicator, nodeId), 1);
41848 +
41849 +    EPRINTF4 (DBG_SVC, "cm_svc_indicator_is_set: clvl %d nodeId %d offset %d %x\n", clvl, nodeId, CM_SVC_INDICATOR_OFFSET (cmRail, clvl, svc_indicator, nodeId), bits);
41850 +
41851 +    spin_unlock_irqrestore (&cmRail->Lock, flags);
41852 +
41853 +    return  ( (bits == 0) ? (0) : (1) );
41854 +}
41855 +
41856 +int
41857 +cm_svc_indicator_bitmap (EP_RAIL *rail, int svc_indicator, bitmap_t * bitmap, int low, int nnodes)
41858 +{
41859 +    /* or in the bit map */  
41860 +    CM_RAIL      *cmRail = rail->ClusterRail;
41861 +    int           nodeId, clvl;
41862 +    bitmap_t      bits;
41863 +    unsigned long flags;
41864 +    int           clip_out_low, clip_out_high;
41865 +    int           curr_low, curr_high;
41866 +    int           check_low, check_high;
41867 +
41868 +    EPRINTF4 (DBG_SVC, "cm_svc_indicator_bitmap: rail %p ind %d low %d high %d\n", rail, svc_indicator, low, (low + nnodes));
41869 +
41870 +    if (svc_indicator < 0 || svc_indicator >= EP_SVC_NUM_INDICATORS)
41871 +    {
41872 +       EPRINTF1 (DBG_SVC, "cm_svc_indicator_bitmap: service indicator %d not registered\n", svc_indicator);
41873 +       return (-1);
41874 +    }
41875 +
41876 +    if (rail->State != EP_RAIL_STATE_RUNNING) 
41877 +       return (-2);
41878 +
41879 +    spin_lock_irqsave (&cmRail->Lock, flags);
41880 +    
41881 +    clip_out_low = clip_out_high = -1; /* all in */
41882 +    for (clvl = 0; clvl < cmRail->NumLevels; clvl++) {
41883 +
41884 +       /* curr_high/low is the range of the current lvl */
41885 +       curr_low  = cmRail->Levels[clvl].MinNodeId;
41886 +       curr_high = cmRail->Levels[clvl].MinNodeId + cmRail->Levels[clvl].NumNodes;
41887 +
41888 +       /* find out how much of low high is in this range and only check that part */
41889 +       check_low  = ( low  < curr_low)  ? curr_low  : low; 
41890 +       check_high = ( (low + nnodes) > curr_high) ? curr_high : (low + nnodes);
41891 +
41892 +       EPRINTF6 (DBG_SVC, "cm_svc_indicator_bitmap: curr(%d,%d) check(%d,%d) clip(%d,%d)\n", curr_low, curr_high, check_low, check_high, clip_out_low, clip_out_high);
41893 +
41894 +       for(nodeId = check_low; nodeId < check_high; nodeId++) {
41895 +
41896 +           if (  (clip_out_low <= nodeId) && (nodeId <= clip_out_high))
41897 +               nodeId = clip_out_high; /* step over the cliped out section */
41898 +           else {
41899 +
41900 +               if ( cmRail->NodeId == nodeId ) 
41901 +                   bits = statemap_getbits (cmRail->Levels[clvl].LocalMap, CM_SVC_INDICATOR_OFFSET (cmRail, clvl, svc_indicator, nodeId), 1);
41902 +               else
41903 +                   bits = statemap_getbits (cmRail->Levels[clvl].GlobalMap, CM_SVC_INDICATOR_OFFSET (cmRail, clvl, svc_indicator, nodeId), 1);
41904 +               
41905 +               if ( bits ) {
41906 +                   EPRINTF2 (DBG_SVC, "cm_svc_indicator_bitmap: its set nodeId %d (clvl %d)\n", nodeId, clvl);
41907 +                   BT_SET ( bitmap , nodeId - low );
41908 +               }
41909 +           }
41910 +       }
41911 +
41912 +       /* widen the clip out range */
41913 +       clip_out_low  = curr_low;
41914 +       clip_out_high = curr_high -1; 
41915 +    }
41916 +
41917 +    spin_unlock_irqrestore (&cmRail->Lock, flags);      
41918 +
41919 +    return (0);
41920 +}
41921 +
41922 +#if defined(PER_CPU_TIMEOUT)
41923 +static void
41924 +cm_percpu_timeout (void *arg)
41925 +{
41926 +    CM_RAIL          *cmRail = (CM_RAIL *) arg;
41927 +    CM_TIMEOUT_DATA *hbd     = &cmRail->HeartbeatTimeoutsData[current_cpu()];
41928 +    long             now     = lbolt;
41929 +    unsigned         delay   = now - hbd->ScheduledAt;
41930 +    unsigned long    flags;
41931 +
41932 +    if (delay > hbd->WorstDelay)
41933 +       hbd->WorstDelay = delay;
41934 +    if (hbd->BestDelay == 0 || delay < hbd->BestDelay)
41935 +       hbd->BestDelay = delay;
41936 +
41937 +    if (cmRail->HeartbeatTimeoutsShouldStop)
41938 +    {
41939 +       spin_lock_irqsave (&cmRail->Lock, flags);
41940 +       cmRail->HeartbeatTimeoutsStopped |= (1 << current_cpu());
41941 +       kcondvar_wakeupall (&cmRail->HeartbeatTimeoutsWait, &cmRail->Lock);
41942 +       spin_unlock_irqrestore (&cmRail->Lock, flags);
41943 +       return;
41944 +    }
41945 +
41946 +    if (cmRail->NextRunTime == 0 || AFTER (cmRail->NextRunTime, lbolt))
41947 +       hbd->EarlyCount++;
41948 +    else if (cmRail->HeartbeatTimeoutRunning)
41949 +       hbd->MissedCount++;
41950 +    else
41951 +    {
41952 +       local_irq_save (flags);
41953 +       
41954 +       if (! spin_trylock (&cmRail->HeartbeatTimeoutsLock))
41955 +           hbd->WastedCount++;
41956 +       else
41957 +       {
41958 +           cmRail->HeartbeatTimeoutRunning = 1;
41959 +           hbd->WorkCount++;
41960 +
41961 +           spin_lock (&cmRail->Lock);
41962 +
41963 +           if ((delay = (lbolt - cmRail->NextRunTime)) > hbd->WorstHearbeatDelay)
41964 +               hbd->WorstHearbeatDelay = delay;
41965 +           if ((delay = (lbolt - now) > hbd->WorstLockDelay))
41966 +               hbd->WorstLockDelay = delay;
41967 +
41968 +           DoHeartbeatWork (cmRail);
41969 +
41970 +           spin_unlock (&cmRail->Lock);
41971 +           spin_unlock (&cmRail->HeartbeatTimeoutsLock);
41972 +
41973 +           cmRail->HeartbeatTimeoutRunning = 0;
41974 +       }
41975 +       local_irq_restore (flags);
41976 +    }
41977 +
41978 +    hbd->ScheduledAt = lbolt + MSEC2TICKS (CM_PERCPU_TIMEOUT_INTERVAL);
41979 +    timeout_cpu (cm_percpu_timeout, cmRail, MSECS2TICKS (CM_PERCPU_TIMEOUT_INTERVAL), CALLOUT_TYPE|CALLOUT_NOMALLOC);
41980 +}
41981 +
41982 +static void
41983 +StartPerCpuTimeouts (CM_RAIL *cmRail)
41984 +{
41985 +    register int c;
41986 +
41987 +    spin_lock_init (&cmRail->HeartbeatTimeoutsLock);
41988 +
41989 +    KMEM_ZALLOC (cmRail->HeartbeatTimeoutsData, CM_TIMEOUT_DATA *, ncpus * sizeof (CM_TIMEOUT_DATA), 1);
41990 +
41991 +    for (c = 0; c < cpus_in_box; c++)
41992 +    {
41993 +       if (cpu_to_processor (c))
41994 +       {       
41995 +           if (current_cpu() != c)
41996 +           {
41997 +               thread_bind (current_thread(), cpu_to_processor(c));
41998 +               mpsleep (current_thread(), 0, "StartPerCpuTimeouts", 1, NULL, 0);
41999 +
42000 +               if (current_cpu() != c)
42001 +                   panic ("ep: StartPerCpuTimeouts - failed to switch cpu\n");
42002 +           }
42003 +           
42004 +           cmRail->HeartbeatTimeoutsStarted |= (1 << c);
42005 +           cmRail->HeartbeatTimeoutsData[c].ScheduledAt = lbolt + c;
42006 +
42007 +           timeout_cpu (cm_percpu_timeout, cmRail, c, CALLOUT_TYPE|CALLOUT_NOMALLOC);
42008 +       }
42009 +    }
42010 +
42011 +    thread_bind(current_thread(), NULL);
42012 +}
42013 +
42014 +static void
42015 +StopPerCpuTimeouts (CM_RAIL *cmRail)
42016 +{
42017 +    register int c;
42018 +    unsigned long flags;
42019 +
42020 +    cmRail->HeartbeatTimeoutsShouldStop = 1;
42021 +
42022 +    for (c = 0; c < cpus_in_box; c++)
42023 +    {
42024 +       if (cmRail->HeartbeatTimeoutsStarted & (1 << c))
42025 +       {
42026 +           printk ("%s: stopping cpu_timeout on cpu %d\n", cmRail->Rail->Name, c);
42027 +
42028 +           if (untimeout_cpu (cm_percpu_timeout, cmRail, c, CALLOUT_TYPE|CALLOUT_NOMALLOC, NULL))
42029 +               cmRail->HeartbeatTimeoutsStopped |= (1 << c);
42030 +       }
42031 +    }
42032 +    thread_bind(current_thread(), NULL);
42033 +
42034 +    spin_lock_irqsave (&cmRail->Lock, flags);
42035 +    while (cmRail->HeartbeatTimeoutsStopped != cmRail->HeartbeatTimeoutsStarted)
42036 +       kcondvar_wait (&cmRail->HeartbeatTimeoutsWait, &cmRail->Lock, &flags);
42037 +    spin_unlock_irqrestore (&cmRail->Lock, flags);
42038 +
42039 +    cmRail->HeartbeatTimeoutsStarted    = 0;
42040 +    cmRail->HeartbeatTimeoutsStopped    = 0;
42041 +    cmRail->HeartbeatTimeoutsShouldStop = 0;
42042 +
42043 +    KMEM_FREE (cmRail->HeartbeatTimeoutsData, ncpus * sizeof (CM_TIMEOUT_DATA));
42044 +
42045 +    spin_lock_destroy (&cmRail->HeartbeatTimeoutsLock);
42046 +}
42047 +
42048 +#else
42049 +
42050 +static void
42051 +cm_heartbeat_timer (unsigned long arg)
42052 +{
42053 +    CM_RAIL *cmRail = (CM_RAIL *) arg;
42054 +    unsigned long flags;
42055 +
42056 +    spin_lock_irqsave (&cmRail->Lock, flags);
42057 +
42058 +    ASSERT (cmRail->Rail->State == EP_RAIL_STATE_RUNNING);
42059 +
42060 +    DoHeartbeatWork (cmRail);
42061 +    
42062 +    __Schedule_Timer (cmRail, cmRail->NextRunTime);
42063 +
42064 +    spin_unlock_irqrestore (&cmRail->Lock, flags);
42065 +}
42066 +
42067 +#endif /* defined(PER_CPU_TIMEOUT) */
42068 +
42069 +
42070 +
42071 +void
42072 +DisplayRailDo (DisplayInfo *di, EP_RAIL *rail)
42073 +{
42074 +    CM_RAIL *cmRail = rail->ClusterRail;
42075 +    unsigned long flags;
42076 +    int  i, j;
42077 +
42078 +    if (rail->State != EP_RAIL_STATE_RUNNING)
42079 +       return;
42080 +
42081 +    spin_lock_irqsave (&cmRail->Lock, flags);
42082 +
42083 +    (di->func)(di->arg, "NodeId=%d NodeLevel=%d NumLevels=%d NumNodes=%d\n", 
42084 +           cmRail->NodeId, cmRail->TopLevel, cmRail->NumLevels, cmRail->Rail->Position.pos_nodes);
42085 +    
42086 +    (di->func)(di->arg, "[");
42087 +
42088 +    for (i = 0; i < cmRail->NumLevels; i++)
42089 +    {
42090 +       if (i > 0)
42091 +           (di->func)(di->arg, ",");
42092 +       
42093 +       if (i < cmRail->TopLevel)
42094 +       {
42095 +           (di->func)(di->arg, "L ");
42096 +         
42097 +           for (j = 0; j < cmRail->Levels[i].NumSegs; j++)
42098 +               switch (cmRail->Levels[i].Sgmts[j].State)
42099 +               {
42100 +               case CM_SGMT_PRESENT: (di->func)(di->arg, "p%-4d", cmRail->Levels[i].Sgmts[j].NodeId); break;
42101 +               case CM_SGMT_WAITING: (di->func)(di->arg, "w%4s", ""); break;
42102 +               case CM_SGMT_COMING:  (di->func)(di->arg, "c%4s", ""); break;
42103 +               case CM_SGMT_ABSENT:  (di->func)(di->arg, ".%4s", ""); break;
42104 +               default:              (di->func)(di->arg, "?%4s", ""); break;
42105 +               }
42106 +       }
42107 +       else
42108 +           switch (cmRail->Role)
42109 +           {
42110 +           case CM_ROLE_LEADER_CANDIDATE:      
42111 +               (di->func)(di->arg,"l "); 
42112 +               for (j = 0; j < cmRail->Levels[i].NumSegs; j++)
42113 +                   (di->func)(di->arg,"     ");
42114 +               break;
42115 +         
42116 +           case CM_ROLE_SUBORDINATE:       
42117 +               switch (cmRail->Levels[i].Sgmts[0].State)
42118 +               {
42119 +               case CM_SGMT_PRESENT: (di->func)(di->arg, "p%-4d", cmRail->Levels[i].Sgmts[0].NodeId); break;
42120 +               case CM_SGMT_WAITING: (di->func)(di->arg, "w%4s", ""); break;
42121 +               case CM_SGMT_COMING:  (di->func)(di->arg, "c%4s", ""); break;
42122 +               case CM_SGMT_ABSENT:  (di->func)(di->arg, ".%4s", ""); break;
42123 +               default:              (di->func)(di->arg, "?%4s", ""); break;
42124 +               }
42125 +               for (j = 1; j < cmRail->Levels[i].NumSegs; j++)
42126 +                   (di->func)(di->arg, "     ");
42127 +               break;
42128 +         
42129 +           default:
42130 +               (di->func)(di->arg, "####");
42131 +               break;
42132 +           }
42133 +    }
42134 +    (di->func)(di->arg, "]\n");
42135 +
42136 +    spin_unlock_irqrestore (&cmRail->Lock, flags);
42137 +}
42138 +
42139 +void
42140 +DisplayRail (EP_RAIL *rail) 
42141 +{
42142 +    if (rail->State == EP_RAIL_STATE_RUNNING)
42143 +       DisplayRailDo (&di_ep_debug, rail);
42144 +}
42145 +
42146 +void
42147 +DisplayStatus (EP_RAIL *rail)
42148 +{
42149 +    if (rail->State == EP_RAIL_STATE_RUNNING)
42150 +    {
42151 +       CM_RAIL *cmRail = rail->ClusterRail;
42152 +       unsigned long flags;
42153 +       
42154 +       spin_lock_irqsave (&cmRail->Lock, flags);
42155 +       
42156 +       DisplayNodeMaps (&di_ep_debug, cmRail);
42157 +       
42158 +       spin_unlock_irqrestore (&cmRail->Lock, flags);
42159 +    }
42160 +}
42161 +
42162 +void
42163 +DisplaySegs (EP_RAIL *rail)
42164 +{
42165 +    if (rail->State == EP_RAIL_STATE_RUNNING)
42166 +    {
42167 +       CM_RAIL *cmRail = rail->ClusterRail;
42168 +       unsigned long flags;
42169 +       
42170 +       spin_lock_irqsave (&cmRail->Lock, flags);
42171 +       
42172 +       DisplayNodeSgmts (&di_ep_debug, cmRail);
42173 +       
42174 +       spin_unlock_irqrestore (&cmRail->Lock, flags);
42175 +    }
42176 +}
42177 +
42178 +static void
42179 +LoadBroadcastRoute (CM_RAIL *cmRail, int lvl, int sidx)
42180 +{
42181 +    EP_RAIL *rail  = cmRail->Rail;
42182 +    int      nsegs = cmRail->Levels[0].NumSegs;
42183 +    int      vp    = EP_VP_BCAST(lvl, sidx);
42184 +    int      nodes = 1;
42185 +    int      baseNode;
42186 +    int      i;
42187 +
42188 +    ASSERT (lvl > 0 && lvl <= cmRail->NumLevels);
42189 +    ASSERT (sidx == 0 || lvl < cmRail->NumLevels);
42190 +
42191 +    ASSERT (vp >= EP_VP_BCAST_BASE && vp < EP_VP_BCAST_BASE + EP_VP_BCAST_COUNT);
42192 +
42193 +    for (i = 1; i <= lvl; i++)
42194 +    {
42195 +       nodes *= nsegs;
42196 +       nsegs = (i == cmRail->NumLevels) ? 1 : cmRail->Levels[i].NumSegs;
42197 +    }
42198 +
42199 +    baseNode = ((cmRail->NodeId / (nodes * nsegs)) * nsegs + sidx) * nodes;
42200 +
42201 +    CPRINTF5 (2, "%s: broadcast vp lvl %d sidx %d [%d,%d]\n", 
42202 +             cmRail->Rail->Name, lvl, sidx, baseNode, baseNode + nodes - 1);
42203 +    
42204 +    rail->Operations.LoadSystemRoute (rail, vp, baseNode, baseNode + nodes - 1);
42205 +}
42206 +
42207 +static void
42208 +LoadRouteTable (CM_RAIL *cmRail)
42209 +{
42210 +    EP_RAIL *rail = cmRail->Rail;
42211 +    int      i, j;
42212 +   
42213 +   if (cmRail->NumNodes > EP_MAX_NODES)
42214 +   {
42215 +       printk ("More nodes (%d) than point-to-point virtual process table entries (%d)\n", cmRail->NumNodes, EP_MAX_NODES);
42216 +       panic ("LoadRouteTable\n");
42217 +   }
42218 +
42219 +   for (i = 0; i < cmRail->NumNodes; i++)
42220 +       rail->Operations.LoadSystemRoute (rail, EP_VP_NODE(i), i, i);
42221 +
42222 +   /* Generate broadcast routes for subtrees */
42223 +   for (i = 1; i < cmRail->NumLevels; i++)
42224 +      for (j = 0; j < cmRail->Levels[i].NumSegs; j++)
42225 +         LoadBroadcastRoute (cmRail, i, j);
42226 +
42227 +   /* Generate broadcast route for whole machine */
42228 +   LoadBroadcastRoute (cmRail, cmRail->NumLevels, 0);
42229 +
42230 +   /* Finally invalidate all the data routes */
42231 +   for (i = 0; i < cmRail->NumNodes; i++)
42232 +       rail->Operations.UnloadNodeRoute (cmRail->Rail, i);
42233 +}
42234 +
42235 +void
42236 +cm_node_disconnected (EP_RAIL *rail, unsigned nodeId)
42237 +{
42238 +    CM_RAIL *cmRail = rail->ClusterRail;
42239 +    int      base, lstat, lgstat;
42240 +    int             clvl, subClMin, subClMax;
42241 +    int      thisClId, myClId;
42242 +    unsigned long flags;
42243 +
42244 +    ASSERT (nodeId != cmRail->NodeId);
42245 +
42246 +    spin_lock_irqsave (&cmRail->Lock, flags);
42247 +    for (clvl = 0; clvl < cmRail->NumLevels; clvl++)
42248 +       if (nodeId >= cmRail->Levels[clvl].MinNodeId && nodeId < (cmRail->Levels[clvl].MinNodeId + cmRail->Levels[clvl].NumNodes))
42249 +           break;
42250 +
42251 +    myClId   = ClusterIds (cmRail, clvl, &subClMin, &subClMax);
42252 +    thisClId = nodeId - cmRail->Levels[clvl].MinNodeId;
42253 +    base     = thisClId * CM_GSTATUS_BITS;
42254 +    lstat    = statemap_getbits (cmRail->Levels[clvl].LocalMap,  base, CM_GSTATUS_BITS);
42255 +    lgstat   = statemap_getbits (cmRail->Levels[clvl].LastGlobalMap, base, CM_GSTATUS_BITS) & CM_GSTATUS_STATUS_MASK;
42256 +
42257 +    ASSERT ((lstat & CM_GSTATUS_ACK_MASK) == CM_GSTATUS_MAY_RUN);
42258 +
42259 +    CPRINTF7 (2, "%s: cm_node_disconnected: Node %d: clvl %d, lgstat %s, gstat %s, lstat %s -> %sMAY_START\n",
42260 +             cmRail->Rail->Name, nodeId, clvl,
42261 +             GlobalStatusString (cmRail->Levels[clvl].LastGlobalMap, thisClId),
42262 +             GlobalStatusString (cmRail->Levels[clvl].GlobalMap, thisClId),
42263 +             GlobalStatusString (cmRail->Levels[clvl].LocalMap, thisClId),
42264 +             ((lgstat != CM_GSTATUS_CLOSING) && (lstat & CM_GSTATUS_RESTART)) ? "RESTART|" : "");
42265 +    
42266 +    switch (lgstat)
42267 +    {
42268 +    case CM_GSTATUS_CLOSING:
42269 +       /* delayed ack of closing - set MAY_START and clear RESTART */
42270 +       statemap_setbits (cmRail->Levels[clvl].LocalMap, base, CM_GSTATUS_MAY_START, CM_GSTATUS_BITS);
42271 +       break;
42272 +    case CM_GSTATUS_STARTING:
42273 +    case CM_GSTATUS_RUNNING:
42274 +       IASSERT (! cmRail->Levels[clvl].Online || lstat & CM_GSTATUS_RESTART);
42275 +       break;
42276 +    case CM_GSTATUS_ABSENT:
42277 +       IASSERT (lstat & CM_GSTATUS_RESTART);
42278 +    }
42279 +
42280 +    cmRail->Levels[clvl].Connected--;
42281 +
42282 +    spin_unlock_irqrestore (&cmRail->Lock, flags);
42283 +}
42284 +
42285 +void
42286 +cm_restart_node (EP_RAIL *rail, unsigned nodeId)
42287 +{
42288 +    CM_RAIL *cmRail = rail->ClusterRail;
42289 +    int      base, lstat, lgstat;
42290 +    int             clvl, subClMin, subClMax;
42291 +    int      thisClId, myClId;
42292 +    unsigned long flags;
42293 +
42294 +    spin_lock_irqsave (&cmRail->Lock, flags);
42295 +    if (nodeId == rail->Position.pos_nodeid)
42296 +    {
42297 +       for (clvl = 0; clvl < cmRail->NumLevels; clvl++)
42298 +           RestartComms (cmRail, clvl);
42299 +    }
42300 +    else
42301 +    {
42302 +       for (clvl = 0; clvl < cmRail->NumLevels; clvl++)
42303 +           if (nodeId >= cmRail->Levels[clvl].MinNodeId && nodeId < (cmRail->Levels[clvl].MinNodeId + cmRail->Levels[clvl].NumNodes))
42304 +               break;
42305 +       
42306 +       myClId   = ClusterIds (cmRail, clvl, &subClMin, &subClMax);
42307 +       thisClId = nodeId - cmRail->Levels[clvl].MinNodeId;
42308 +       base     = thisClId * CM_GSTATUS_BITS;
42309 +       lstat    = statemap_getbits (cmRail->Levels[clvl].LocalMap,  base, CM_GSTATUS_BITS);
42310 +       lgstat   = statemap_getbits (cmRail->Levels[clvl].LastGlobalMap,  base, CM_GSTATUS_BITS) & CM_GSTATUS_STATUS_MASK;
42311 +
42312 +       CPRINTF6 (2, "%s: cm_restart_node: Node %d: clvl %d, lgstat %s, gstat %s, lstat %s\n",
42313 +                 cmRail->Rail->Name, nodeId, clvl,
42314 +                 GlobalStatusString (cmRail->Levels[clvl].LastGlobalMap, thisClId),
42315 +                 GlobalStatusString (cmRail->Levels[clvl].GlobalMap, thisClId),
42316 +                 GlobalStatusString (cmRail->Levels[clvl].LocalMap, thisClId));
42317 +       
42318 +       if (lgstat != CM_GSTATUS_CLOSING)
42319 +           statemap_setbits (cmRail->Levels[clvl].LocalMap, base, lstat | CM_GSTATUS_RESTART, CM_GSTATUS_BITS);
42320 +    }
42321 +    spin_unlock_irqrestore (&cmRail->Lock, flags);
42322 +}
42323 +
42324 +void
42325 +cm_force_offline (EP_RAIL *rail, int offline, unsigned int reason)
42326 +{
42327 +    CM_RAIL *cmRail = rail->ClusterRail;
42328 +    unsigned long flags;
42329 +
42330 +    spin_lock_irqsave (&cmRail->Lock, flags);
42331 +    if (offline)
42332 +       cmRail->OfflineReasons |= reason;
42333 +    else
42334 +       cmRail->OfflineReasons &= ~reason;
42335 +    spin_unlock_irqrestore (&cmRail->Lock, flags);
42336 +}
42337 +
42338 +static void
42339 +cm_remove_rail (EP_SUBSYS *subsys, EP_SYS *epsys, EP_RAIL *rail)
42340 +{
42341 +    CM_SUBSYS  *sys    = (CM_SUBSYS *) subsys;
42342 +    CM_RAIL    *cmRail = sys->Rails[rail->Number];
42343 +    int i, lvl, clvl;
42344 +
42345 +    cm_procfs_rail_fini (cmRail);
42346 +
42347 +    sys->Rails[rail->Number] = NULL;
42348 +    rail->ClusterRail        = NULL;
42349 +
42350 +#if defined(PER_CPU_TIMEOUT)
42351 +    StopPerCpuTimeouts (cmRail);
42352 +#else
42353 +    del_timer_sync (&cmRail->HeartbeatTimer);
42354 +#endif
42355 +    cmRail->NextRunTime      = 0;
42356 +    cmRail->NextDiscoverTime = 0;
42357 +    cmRail->NextHeartbeatTime = 0;
42358 +    
42359 +    for (clvl = 0; clvl < cmRail->NumLevels; clvl++)
42360 +    {
42361 +       for (lvl = 0; lvl <= clvl; lvl++)
42362 +       {
42363 +           CM_LEVEL *level = &cmRail->Levels[lvl];
42364 +           
42365 +           statemap_destroy (level->SubordinateMap[clvl]);
42366 +           
42367 +           for (i = 0; i < level->NumSegs; i++)
42368 +           {
42369 +               statemap_destroy (level->Sgmts[i].Maps[clvl].CurrentInputMap);
42370 +               statemap_destroy (level->Sgmts[i].Maps[clvl].InputMap);
42371 +               statemap_destroy (level->Sgmts[i].Maps[clvl].OutputMap);
42372 +           }
42373 +       }
42374 +       
42375 +       cmRail->Levels[clvl].Online = 0;
42376 +       
42377 +       statemap_destroy (cmRail->Levels[clvl].TmpMap);
42378 +       statemap_destroy (cmRail->Levels[clvl].GlobalMap);
42379 +       statemap_destroy (cmRail->Levels[clvl].LastGlobalMap);
42380 +       statemap_destroy (cmRail->Levels[clvl].SubTreeMap);
42381 +       statemap_destroy (cmRail->Levels[clvl].LocalMap);
42382 +    }
42383 +    
42384 +    spin_lock_destroy (&cmRail->Lock);
42385 +
42386 +    ep_free_inputq (cmRail->Rail, cmRail->PolledQueue);
42387 +    ep_free_inputq (cmRail->Rail, cmRail->IntrQueue);
42388 +    ep_free_outputq (cmRail->Rail, cmRail->MsgQueue);
42389 +
42390 +    KMEM_FREE (cmRail, sizeof (CM_RAIL));
42391 +}
42392 +
42393 +static int
42394 +cm_add_rail (EP_SUBSYS *subsys, EP_SYS *epsys, EP_RAIL *rail)
42395 +{
42396 +    CM_SUBSYS     *sys = (CM_SUBSYS *) subsys;
42397 +    ELAN_POSITION *pos = &rail->Position;
42398 +    CM_RAIL       *cmRail;
42399 +    int lvl, n, nn, clvl, span, i;
42400 +    unsigned long flags;
42401 +
42402 +    KMEM_ZALLOC (cmRail, CM_RAIL *, sizeof (CM_RAIL), 1);
42403 +
42404 +    if (cmRail == NULL)
42405 +       return (ENOMEM);
42406 +    
42407 +    cmRail->Rail     = rail;
42408 +    cmRail->NodeId   = pos->pos_nodeid;
42409 +    cmRail->NumNodes = pos->pos_nodes;
42410 +
42411 +    spin_lock_init (&cmRail->Lock);
42412 +
42413 +    if ((cmRail->IntrQueue   = ep_alloc_inputq (rail, EP_SYSTEMQ_INTR,   sizeof (CM_MSG), CM_INPUTQ_ENTRIES, IntrQueueCallback, cmRail)) == NULL ||
42414 +       (cmRail->PolledQueue = ep_alloc_inputq (rail, EP_SYSTEMQ_POLLED, sizeof (CM_MSG), CM_INPUTQ_ENTRIES, NULL, 0)) == NULL ||
42415 +       (cmRail->MsgQueue    = ep_alloc_outputq (rail, sizeof (CM_MSG), CM_NUM_MSG_BUFFERS)) == NULL)
42416 +    {
42417 +       goto failed;
42418 +    }
42419 +
42420 +    /* point to first "spare" message buffer */
42421 +    cmRail->NextSpareMsg = 0;
42422 +
42423 +    /* Compute the branching ratios from the switcy arity */
42424 +    for (lvl = 0; lvl < CM_MAX_LEVELS; lvl++)
42425 +       BranchingRatios[lvl] = (lvl < pos->pos_levels) ? pos->pos_arity[pos->pos_levels - lvl - 1] : 4;
42426 +    
42427 +    /* now determine the number of levels of hierachy we have */
42428 +    /* and how many nodes per level there are */
42429 +    for (lvl = 0, nn = 1, n = pos->pos_nodes; 
42430 +        n > 1; 
42431 +        nn *= BranchingRatios[lvl], n = n / BranchingRatios[lvl], lvl++)
42432 +    {
42433 +       int       nSegs = (n > BranchingRatios[lvl]) ? BranchingRatios[lvl] : n;
42434 +       int       nNodes = nn * nSegs;
42435 +       CM_LEVEL *level = &cmRail->Levels[lvl];
42436 +
42437 +       for (clvl = 0, span = pos->pos_arity[pos->pos_levels - clvl - 1]; 
42438 +            span < nNodes && clvl < pos->pos_levels - 1;
42439 +            clvl++, span *= pos->pos_arity[pos->pos_levels - clvl - 1])
42440 +           ;
42441 +       
42442 +       level->SwitchLevel = clvl;
42443 +       level->MinNodeId = (pos->pos_nodeid / nNodes) * nNodes;
42444 +       level->NumNodes = nNodes;
42445 +       level->NumSegs = nSegs;
42446 +    }
42447 +    
42448 +    cmRail->NumLevels      = lvl;
42449 +    cmRail->BroadcastLevel = lvl-1;
42450 +
42451 +    CPRINTF4 (2, "%s: NodeId=%d NumNodes=%d NumLevels=%d\n", 
42452 +             rail->Name, pos->pos_nodeid, pos->pos_nodes, cmRail->NumLevels);
42453 +
42454 +    LoadRouteTable (cmRail);
42455 +    
42456 +    /* Init SGMT constants */
42457 +    for (lvl = 0; lvl < cmRail->NumLevels; lvl++)
42458 +    {
42459 +       CM_LEVEL *level = &cmRail->Levels[lvl];
42460 +
42461 +       level->MySgmt = SegmentNo (cmRail, cmRail->NodeId, lvl);
42462 +       
42463 +       for (i = 0; i < CM_SGMTS_PER_LEVEL; i++)
42464 +       {
42465 +           CM_SGMT *sgmt = &level->Sgmts[i];
42466 +         
42467 +           sgmt->MsgNumber = lvl * CM_SGMTS_PER_LEVEL + i;
42468 +           sgmt->Level = lvl;
42469 +           sgmt->Sgmt = i;
42470 +       }
42471 +    }
42472 +
42473 +    /* Init maps for each cluster level */
42474 +    for (clvl = 0; clvl < cmRail->NumLevels; clvl++)
42475 +    {
42476 +       int nNodes = cmRail->Levels[clvl].NumNodes;
42477 +       int mapBits = (nNodes * CM_GSTATUS_BITS) + (nNodes * EP_SVC_NUM_INDICATORS);
42478 +       int clmin;
42479 +       int clmax;
42480 +       int clid = ClusterIds (cmRail, clvl, &clmin, &clmax);
42481 +
42482 +       for (lvl = 0; lvl <= clvl; lvl++)
42483 +       {
42484 +           CM_LEVEL *level = &cmRail->Levels[lvl];
42485 +
42486 +           level->SubordinateMap[clvl] = statemap_create (mapBits);
42487 +
42488 +           for (i = 0; i < level->NumSegs; i++)
42489 +           {
42490 +               level->Sgmts[i].Maps[clvl].CurrentInputMap = statemap_create (mapBits);
42491 +               level->Sgmts[i].Maps[clvl].InputMap        = statemap_create (mapBits);
42492 +               level->Sgmts[i].Maps[clvl].OutputMap       = statemap_create (mapBits);
42493 +           }
42494 +       }
42495 +       
42496 +       cmRail->Levels[clvl].Online = 0;
42497 +
42498 +       cmRail->Levels[clvl].TmpMap        = statemap_create (mapBits);
42499 +       cmRail->Levels[clvl].GlobalMap     = statemap_create (mapBits);
42500 +       cmRail->Levels[clvl].LastGlobalMap = statemap_create (mapBits);
42501 +       cmRail->Levels[clvl].SubTreeMap    = statemap_create (mapBits);
42502 +       cmRail->Levels[clvl].LocalMap      = statemap_create (mapBits);
42503 +
42504 +       /* Flag everyone outside my next lower cluster as sensed offline... */
42505 +       for (i = 0; i < clmin; i++)
42506 +           statemap_setbits (cmRail->Levels[clvl].LocalMap, i * CM_GSTATUS_BITS, CM_GSTATUS_MAY_START, CM_GSTATUS_BITS);
42507 +       
42508 +       for (i = clmax + 1; i < nNodes; i++)
42509 +           statemap_setbits (cmRail->Levels[clvl].LocalMap, i * CM_GSTATUS_BITS, CM_GSTATUS_MAY_START, CM_GSTATUS_BITS);
42510 +       
42511 +       /* ...and set my own state */
42512 +       statemap_setbits (cmRail->Levels[clvl].LocalMap, clid * CM_GSTATUS_BITS,
42513 +                         CM_GSTATUS_CLOSING | CM_GSTATUS_MAY_START, CM_GSTATUS_BITS);
42514 +    }
42515 +    
42516 +    /* compute parameter hash to add to messages */
42517 +    cmRail->ParamHash = EP_PROTOCOL_VERSION;
42518 +    cmRail->ParamHash = cmRail->ParamHash * 127 + CM_PERIODIC_DISCOVER_INTERVAL;
42519 +    cmRail->ParamHash = cmRail->ParamHash * 127 + CM_URGENT_DISCOVER_INTERVAL;
42520 +    cmRail->ParamHash = cmRail->ParamHash * 127 + CM_HEARTBEAT_INTERVAL;
42521 +    cmRail->ParamHash = cmRail->ParamHash * 127 + CM_P2P_DMA_RETRIES;
42522 +    cmRail->ParamHash = cmRail->ParamHash * 127 + CM_P2P_MSG_RETRIES;
42523 +    cmRail->ParamHash = cmRail->ParamHash * 127 + CM_BCAST_MSG_RETRIES;
42524 +    cmRail->ParamHash = cmRail->ParamHash * 127 + CM_TIMER_SCHEDULE_TIMEOUT;
42525 +    cmRail->ParamHash = cmRail->ParamHash * 127 + CM_HEARTBEAT_TIMEOUT;
42526 +    cmRail->ParamHash = cmRail->ParamHash * 127 + CM_DISCOVER_TIMEOUT;
42527 +    cmRail->ParamHash = cmRail->ParamHash * 127 + BT_NBIPUL;
42528 +    cmRail->ParamHash = cmRail->ParamHash * 127 + CM_GSTATUS_BITS;
42529 +    cmRail->ParamHash = cmRail->ParamHash * 127 + EP_SVC_NUM_INDICATORS;
42530 +    cmRail->ParamHash = cmRail->ParamHash * 127 + cmRail->NumLevels;
42531 +    cmRail->ParamHash = cmRail->ParamHash * 127 + cmRail->NumNodes;
42532 +    for (i = 0; i < cmRail->NumLevels; i++)
42533 +       cmRail->ParamHash = cmRail->ParamHash * 127 + BranchingRatios[i];
42534 +    
42535 +#if defined(PER_CPU_TIMEOUT)
42536 +    StartPerCpuTimeouts (cmRail);
42537 +#endif
42538 +
42539 +    spin_lock_irqsave (&cmRail->Lock, flags);
42540 +
42541 +#if !defined(PER_CPU_TIMEOUT)
42542 +    /* Initialise the timer, but don't add it yet, since
42543 +     * __Schedule_Heartbeat() will do this. */
42544 +
42545 +    init_timer (&cmRail->HeartbeatTimer);
42546 +    
42547 +    cmRail->HeartbeatTimer.function = cm_heartbeat_timer;
42548 +    cmRail->HeartbeatTimer.data     = (unsigned long) cmRail;
42549 +    cmRail->HeartbeatTimer.expires  = lbolt + hz;
42550 +#endif
42551 +
42552 +    /* start sending heartbeats */
42553 +    __Schedule_Heartbeat (cmRail);
42554 +
42555 +    /* start discovering who else is out there */
42556 +    LowerTopLevel (cmRail, 0);
42557 +
42558 +    /* connect to myself straight away - I know I'm here */
42559 +    ep_connect_node (rail, cmRail->NodeId);
42560 +    
42561 +    /* add to all rails */
42562 +    sys->Rails[rail->Number] = cmRail;
42563 +    rail->ClusterRail = (void *) cmRail;
42564 +
42565 +    spin_unlock_irqrestore (&cmRail->Lock, flags);
42566 +
42567 +    /* Enable the input queues */
42568 +    ep_enable_inputq (rail, cmRail->PolledQueue);
42569 +    ep_enable_inputq (rail, cmRail->IntrQueue);
42570 +
42571 +    /* Create the procfs entries */
42572 +    cm_procfs_rail_init (cmRail);
42573 +
42574 +    return 0;
42575 +
42576 + failed:
42577 +    cm_remove_rail (subsys, epsys, rail);
42578 +    return -ENOMEM;
42579 +}
42580 +
42581 +static void
42582 +cm_fini (EP_SUBSYS *subsys, EP_SYS *epsys)
42583 +{
42584 +    CM_SUBSYS *sys = (CM_SUBSYS *) subsys;
42585 +
42586 +    cm_procfs_fini(sys);
42587 +    
42588 +    KMEM_FREE (sys, sizeof (CM_SUBSYS));
42589 +}
42590 +
42591 +int
42592 +cm_init (EP_SYS *sys)
42593 +{
42594 +    CM_SUBSYS *subsys;
42595 +
42596 +    KMEM_ZALLOC (subsys, CM_SUBSYS *, sizeof (CM_SUBSYS), 1);
42597 +
42598 +    if (subsys == NULL)
42599 +       return (ENOMEM);
42600 +
42601 +    subsys->Subsys.Sys        = sys;
42602 +    subsys->Subsys.Name              = "cm";
42603 +    subsys->Subsys.Destroy    = cm_fini;
42604 +    subsys->Subsys.AddRail    = cm_add_rail;
42605 +    subsys->Subsys.RemoveRail = cm_remove_rail;
42606 +
42607 +    ep_subsys_add (sys, &subsys->Subsys);
42608 +
42609 +    cm_procfs_init (subsys);
42610 +
42611 +    /*
42612 +     * Initialise the machineid if it wasn't specified by
42613 +     * the modules.conf file - otherwise truncate it to 
42614 +     * 16 bits.
42615 +     */
42616 +    if (MachineId != -1)
42617 +       MachineId = (uint16_t) MachineId;
42618 +    else
42619 +    {
42620 +#if defined(LINUX_ALPHA)
42621 +       MachineId = (uint16_t)((5 << 12) | HZ);
42622 +#elif defined(LINUX_SPARC)
42623 +       MachineId = (uint16_t)((4 << 12) | HZ);
42624 +#elif defined(LINUX_I386)
42625 +       MachineId = (uint16_t)((3 << 12) | HZ);
42626 +#elif defined( LINUX_IA64)
42627 +       MachineId = (uint16_t)((2 << 12) | HZ);
42628 +#elif defined(LINUX_X86_64)
42629 +       MachineId = (uint16_t)((1 << 12) | HZ);
42630 +#else
42631 +       MachineId = (uint16_t)((0 << 12) | HZ);
42632 +#endif
42633 +    }
42634 +
42635 +    return (0);
42636 +}
42637 +
42638 +/*
42639 + * Local variables:
42640 + * c-file-style: "stroustrup"
42641 + * End:
42642 + */
42643 Index: linux-2.4.21/drivers/net/qsnet/ep/cm.h
42644 ===================================================================
42645 --- linux-2.4.21.orig/drivers/net/qsnet/ep/cm.h 2004-02-23 16:02:56.000000000 -0500
42646 +++ linux-2.4.21/drivers/net/qsnet/ep/cm.h      2005-06-01 23:12:54.633433936 -0400
42647 @@ -0,0 +1,412 @@
42648 +/*
42649 + *    Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd.
42650 + *    Copyright (c) 2002-2003 by Quadrics Ltd.
42651 + *
42652 + *    For licensing information please see the supplied COPYING file
42653 + *
42654 + */
42655 +
42656 +#ifndef __ELAN_CM_H
42657 +#define __ELAN_CM_H
42658 +
42659 +#ident "@(#)$Id: cm.h,v 1.14.2.1 2004/11/12 10:54:50 mike Exp $"
42660 +/*      $Source: /cvs/master/quadrics/epmod/cm.h,v $*/
42661 +
42662 +#include <elan/statemap.h>
42663 +
42664 +#if defined(DIGITAL_UNIX)
42665 +/*
42666 + * On Tru64 - SMP doesn't mean Symmetric - cpu 0 is a master cpu and is responsible
42667 + * for handling all PCI interrupts and "funneled" operations.  When a kernel thread
42668 + * is made runnable, the scheduler will choose which cpu it will run on at that time,
42669 + * and will only execute a higher priority thread from another cpu's run queue when 
42670 + * it becomes totally idle (apparently also including user processes).  Also the 
42671 + * assert_wait_mesg_timo function uses a per-cpu timeout - these can only get executed
42672 + * at "preemptable" places - so again have no guarantee on when they will execute if
42673 + * they happen to be queued on a "hogged" cpu. The combination of these mean that the Tru64
42674 + * is incapable of scheduling a high priority kernel  thread within a deterministic time
42675 + * of when it should have become runnable - wonderfull.
42676 + *
42677 + * Hence the solution Compaq have proposed it to schedule a timeout onto all of the
42678 + * cpu's timeouts lists at the maximum frequency that we could want to execute code,
42679 + * then to handle the scheduling of work between these ourselves.  With a bit of luck
42680 + * ..... at least one cpu will be sufficiently unloaded to allow us to get a chance
42681 + * to do our important work.
42682 + *
42683 + * However ..... this still is not reliable, since timeouts under Tru64 are still 
42684 + * only run when the currently running kernel thread "co-operates" by calling one
42685 + * of a number of functions which is permitted to run the "lwc"s AND is not holding
42686 + * any spinlocks AND is running ai IPL 0.   However Compaq are unable to provide
42687 + * any upper limit on the time between the "lwc"'s being run and so it is possible
42688 + * for all 4 cpus to not run them for an unbounded time.
42689 + *
42690 + * The solution proposed is to use the RM_TEMP_BACKDOOR hook which was added to 
42691 + * hardclock() to "solve" this problem for Memory Channel.  However, since it
42692 + * is called within the clock interrupt it is not permissible to aquire any
42693 + * spinlocks, nor to run for "too long".  This means that it is not possible to
42694 + * call the heartbeat algorithm from this hook.  
42695 + *
42696 + * Our solution to these limitations is to use the hook to cause an elan interrupt 
42697 + * to be delivered, by issueing a mis-aligned SetEvent command - this causes the device 
42698 + * to trap and ep_cprocTrap() can then run the heartbeat code.  However there is a lock 
42699 + * order violation between the elan_dev::IntrLock and ep_dev::Lock, so we have to 
42700 + * use a trylock and if we fail, then hope that when the interrupt is delievered again
42701 + * some time later we will succeed.
42702 + *
42703 + * However this only works if the kernel is able to respond to the Elan interrupt,
42704 + * so we panic inside the RM_TEMP_BACKDOOR hook if the SetEvent's interrupt has
42705 + * not been taken for more than an CM_TIMER_SCHEDULE_TIMEOUT interval.
42706 + *
42707 + * In fact this is exactly the mechanism that other operating systems use to
42708 + * execute timeouts, since the hardclock interrupt posts a low priority 
42709 + * "soft interrupt" which "pre-eempts" the currently running thread and then
42710 + * executes the timeouts.To block timeouts you use splsoftclock() the same as 
42711 + * in Tru64.
42712 + */
42713 +#define PER_CPU_TIMEOUT                        TRUE
42714 +#endif
42715 +
42716 +
42717 +#define CM_SGMTS_PER_LEVEL             8                       /* maximum nodes in each segment */
42718 +#define CM_MAX_LEVELS                  6                       /* maximum depth of tree */
42719 +
42720 +/* message buffers/dmas/events etc */
42721 +#define CM_NUM_NODE_MSG_BUFFERS                (CM_MAX_LEVELS * CM_SGMTS_PER_LEVEL) /* subordinates and leader */
42722 +#define CM_NUM_SPARE_MSG_BUFFERS       8                       /* spare msg buffers for non-connected nodes */
42723 +#define CM_NUM_MSG_BUFFERS             (CM_NUM_NODE_MSG_BUFFERS + CM_NUM_SPARE_MSG_BUFFERS)
42724 +
42725 +#define CM_INPUTQ_ENTRIES              128                     /* # entries in input queue */
42726 +
42727 +#define CM_PERIODIC_DISCOVER_INTERVAL  (5000)          /* 5s (infrequent resolution of established leader conflicts) */
42728 +#define CM_URGENT_DISCOVER_INTERVAL    (50)            /* 0.05s (more frequently than heartbeats 'cause they don't retry) */
42729 +#define CM_HEARTBEAT_INTERVAL          (125)           /* 0.125s */
42730 +#define CM_TIMER_SCHEDULE_TIMEOUT      (4000)          /* 4s     Maximum time before a timer that's secheduled to run gets to run (eg blocked in interrupt handlers etc) */
42731 +#define CM_THREAD_SCHEDULE_TIMEOUT     (30000)         /* 30s    Maximum time before a thread that's scheduled to run gets to run */
42732 +#define CM_THREAD_RUNNING_TIMEOUT      (30000)         /* 30s    Don't expect the manager thread to be running longer than this */
42733 +
42734 +#ifdef PER_CPU_TIMEOUT
42735 +#define CM_PERCPU_TIMEOUT_INTERVAL     (50)            /* 0.05s (must be less than all above intervals) */
42736 +#define CM_PACEMAKER_INTERVAL          (500)           /* 0.05s */
42737 +
42738 +#define CM_HEARTBEAT_OVERDUE           (250)           /* 0.25s Maximum time a timeout can be overdue before taking extreme action */
42739 +#endif
42740 +
42741 +#define CM_P2P_DMA_RETRIES             31
42742 +
42743 +/* We expect at least 1 point-to-point message in CM_P2P_MSG_RETRIES
42744 + * attempts to send one to be successfully received */
42745 +#define CM_P2P_MSG_RETRIES             8
42746 +
42747 +/* We expect at least 1 broadcast message in CM_BCAST_MSG_RETRIES attempts
42748 + * to send one to be successfully received. */
42749 +#define CM_BCAST_MSG_RETRIES           40
42750 +
42751 +/* Heartbeat timeout allows for a node stalling and still getting its
42752 + * heartbeat. The 2 is to allow for unsynchronised polling times. */
42753 +#define CM_HEARTBEAT_TIMEOUT           (CM_TIMER_SCHEDULE_TIMEOUT + (2 + CM_P2P_MSG_RETRIES) * CM_HEARTBEAT_INTERVAL)
42754 +
42755 +/* Discover timeout must be > CM_HEARTBEAT_TIMEOUT to guarantee that people
42756 + * who don't see discovery are considered dead by their leader.  This
42757 + * ensures that by the time a node "discovers" it is a leader of a segment,
42758 + * the previous leader of that segment will have been deemed to be dead by
42759 + * its the parent segment's leader */
42760 +#define CM_DISCOVER_TIMEOUT            (CM_TIMER_SCHEDULE_TIMEOUT + (2 + CM_BCAST_MSG_RETRIES) * CM_URGENT_DISCOVER_INTERVAL)
42761 +
42762 +#define CM_WAITING_TIMEOUT             (CM_DISCOVER_TIMEOUT * 100)
42763 +
42764 +/*
42765 + * Convert all timeouts specified in mS into "ticks"
42766 + */
42767 +#define MSEC2TICKS(MSEC)               (((MSEC)*HZ)/1000)
42768 +
42769 +
42770 +/* statemap entry */
42771 +typedef struct cm_state_entry
42772 +{
42773 +    int16_t           level;                   /* cluster level to apply to */
42774 +    int16_t          offset;                   /* from statemap_findchange() */
42775 +    uint16_t          seg[BT_NBIPUL/16];       /* ditto */
42776 +} CM_STATEMAP_ENTRY;
42777 +
42778 +/* offset is >= 0 for a change to apply and */
42779 +#define STATEMAP_NOMORECHANGES (-1)            /* end of a set of updates */
42780 +#define STATEMAP_RESET         (-2)            /* reset the target map */
42781 +#define STATEMAP_NOOP          (-3)            /* null token */
42782 +
42783 +/* CM message format */
42784 +typedef int8_t CM_SEQ;                         /* heartbeat sequence numbers; at least 2 bits, signed */
42785 +
42786 +/*
42787 + * The message header is received into the last 64 byte block of 
42788 + * the input queue and the Version *MUST* be the last word of the 
42789 + * block to ensure that we can see that the whole of the message
42790 + * has reached main memory after we've seen the input queue pointer
42791 + * have been updated.
42792 + */
42793 +typedef struct ep_cm_hdr
42794 +{
42795 +    uint32_t          Pad0;
42796 +    uint32_t          Pad1;
42797 +
42798 +    uint8_t           Type;
42799 +    uint8_t           Level;
42800 +    CM_SEQ            Seq;                     /* precision at least 2 bits each*/
42801 +    CM_SEQ            AckSeq;
42802 +    
42803 +    uint16_t          NumMaps;
42804 +    uint16_t          MachineId;
42805 +
42806 +    uint16_t          NodeId;
42807 +    uint16_t          Checksum;
42808 +
42809 +    uint32_t           Timestamp;
42810 +    uint32_t           ParamHash;
42811 +    uint32_t          Version;
42812 +} CM_HDR;
42813 +
42814 +#define CM_HDR_SIZE        sizeof (CM_HDR)
42815 +
42816 +typedef struct cm_msg
42817 +{
42818 +    union {
42819 +       CM_STATEMAP_ENTRY   Statemaps[1];               /* piggy-backed statemap updates start here */
42820 +       uint8_t             Space[EP_SYSTEMQ_MSG_MAX - CM_HDR_SIZE];
42821 +    } Payload;
42822 +    
42823 +    CM_HDR                 Hdr;
42824 +} CM_MSG;
42825 +
42826 +/* The maximum number of statemap entries that can fit within an EP_CM_MSG_BUFFER */
42827 +#define CM_MSG_MAXMAPS         (offsetof (CM_MSG, Hdr) / sizeof (CM_STATEMAP_ENTRY))
42828 +#define CM_MSG_MAP(mapno)      (CM_MSG_MAXMAPS - (mapno) - 1)
42829 +
42830 +/* The actual special message base & size, including 'nmaps' piggy-backed statemap entries */
42831 +#define CM_MSG_BASE(nmaps)     (nmaps == 0 ? offsetof (CM_MSG, Hdr) : offsetof (CM_MSG, Payload.Statemaps[CM_MSG_MAXMAPS - nmaps]))
42832 +#define CM_MSG_SIZE(nmaps)     (sizeof (CM_MSG) - CM_MSG_BASE(nmaps))
42833 +
42834 +#define CM_MSG_VERSION                         0xcad00005
42835 +#define CM_MSG_TYPE_RESOLVE_LEADER             0
42836 +#define CM_MSG_TYPE_DISCOVER_LEADER            1
42837 +#define CM_MSG_TYPE_NOTIFY                     2
42838 +#define CM_MSG_TYPE_DISCOVER_SUBORDINATE       3
42839 +#define CM_MSG_TYPE_IMCOMING                   4
42840 +#define CM_MSG_TYPE_HEARTBEAT                  5
42841 +#define CM_MSG_TYPE_REJOIN                     6
42842 +
42843 +/* CM machine segment */
42844 +typedef struct cm_sgmtMaps
42845 +{
42846 +    u_char       InputMapValid;                        /* Input map has been set */
42847 +    u_char       OutputMapValid;               /* Output map has been set */
42848 +    u_char       SentChanges;                  /* got an outstanding STATEMAP_NOMORECHANGES to send */
42849 +    statemap_t  *OutputMap;                    /* state to send */
42850 +    statemap_t  *InputMap;                     /* state received */
42851 +    statemap_t  *CurrentInputMap;              /* state being received */
42852 +} CM_SGMTMAPS;
42853 +
42854 +typedef struct cm_sgmt
42855 +{
42856 +   u_char       State;
42857 +   u_char       SendMaps;
42858 +   u_char       MsgAcked;
42859 +   CM_SEQ      MsgSeq;
42860 +   CM_SEQ      AckSeq;
42861 +   u_int       NodeId;
42862 +   long                UpdateTick;
42863 +   long                WaitingTick;
42864 +   uint32_t    Timestamp;
42865 +   CM_SGMTMAPS  Maps[CM_MAX_LEVELS];           /* Maps[i] == state for cluster level i */
42866 +   u_short      MsgNumber;                     /* msg buffer to use */
42867 +   u_short     NumMaps;                        /* # maps in message buffer */
42868 +   u_short      Level;
42869 +   u_short      Sgmt;
42870 +} CM_SGMT;
42871 +
42872 +#define CM_SGMT_ABSENT         0               /* no one there at all */
42873 +#define CM_SGMT_WAITING                1               /* waiting for subtree to connect */
42874 +#define CM_SGMT_COMING         2               /* expecting a subtree to reconnect */
42875 +#define CM_SGMT_PRESENT                3               /* connected */
42876 +
42877 +typedef struct cm_level
42878 +{
42879 +    int               SwitchLevel;
42880 +    u_int             MinNodeId;
42881 +    u_int              NumNodes;
42882 +    u_int              NumSegs;
42883 +    u_int              MySgmt;
42884 +   
42885 +    /* SubordinateMap[i] == OR of all subordinate maps on this level and down for cluster level i */
42886 +    u_char             SubordinateMapValid[CM_MAX_LEVELS];
42887 +    statemap_t        *SubordinateMap[CM_MAX_LEVELS];
42888 +
42889 +    /* maps/flags for this cluster level */
42890 +    u_int              Online:1;                               /* I've gone online (seen myself running) */
42891 +    u_int             Restarting:1;                            /* driving my owm restart bit */
42892 +    u_char            OfflineReasons;                          /* forced offline by broadcast */
42893 +
42894 +    u_char             GlobalMapValid;
42895 +    u_char             SubTreeMapValid;
42896 +    u_long            Connected;
42897 +
42898 +    statemap_t        *LocalMap;               /* state bits I drive */
42899 +    statemap_t        *SubTreeMap;             /* OR of my and my subtree states */
42900 +    statemap_t        *GlobalMap;              /* OR of all node states */
42901 +    statemap_t        *LastGlobalMap;          /* last map I saw */
42902 +    statemap_t        *TmpMap;                 /* scratchpad */
42903 +
42904 +    CM_SGMT           Sgmts[CM_SGMTS_PER_LEVEL];
42905 +} CM_LEVEL;
42906 +
42907 +#define CM_ROLE_LEADER_CANDIDATE       0
42908 +#define CM_ROLE_LEADER                 1
42909 +#define CM_ROLE_SUBORDINATE            2
42910 +
42911 +/* global status bits */
42912 +#define CM_GSTATUS_STATUS_MASK         0x03    /* bits nodes drive to broadcast their status */
42913 +#define CM_GSTATUS_ABSENT              0x00    /* Off the network */
42914 +#define CM_GSTATUS_STARTING            0x01    /* I'm waiting for everyone to see me online */
42915 +#define CM_GSTATUS_RUNNING              0x03   /* up and running */
42916 +#define CM_GSTATUS_CLOSING             0x02    /* I'm waiting for everyone to see me offline */
42917 +
42918 +#define CM_GSTATUS_ACK_MASK            0x0c    /* bits node drive to ack other status */
42919 +#define CM_GSTATUS_MAY_START           0x04    /* Everyone thinks I may not start */
42920 +#define CM_GSTATUS_MAY_RUN             0x08    /* Everyone thinks I may not run */
42921 +
42922 +#define CM_GSTATUS_RESTART             0x10    /* Someone thinks I should restart */
42923 +#define CM_GSTATUS_BITS                        5
42924 +
42925 +#define CM_GSTATUS_BASE(node)          ((node) * CM_GSTATUS_BITS)
42926 +
42927 +#if defined(PER_CPU_TIMEOUT)
42928 +typedef struct cm_timeout_data
42929 +{
42930 +    long               ScheduledAt;                            /* lbolt timeout was scheduled to run at */
42931 +
42932 +    unsigned long       EarlyCount;                            /* # times run early than NextRun */
42933 +    unsigned long      MissedCount;                            /* # times run on time - but someone else was running it */
42934 +    unsigned long       WastedCount;                           /* # times we failed to get the spinlock */
42935 +    unsigned long      WorkCount;                              /* # times we're the one running */
42936 +
42937 +    unsigned long      WorstDelay;                             /* worst scheduling delay */
42938 +    unsigned long      BestDelay;                              /* best scheduling delay */
42939 +
42940 +    unsigned long      WorstLockDelay;                         /* worst delay before getting rail->Lock */
42941 +
42942 +    unsigned long      WorstHearbeatDelay;                     /* worst delay before calling DoHeartbeatWork */
42943 +} CM_TIMEOUT_DATA;
42944 +#endif
42945 +
42946 +typedef struct cm_rail
42947 +{
42948 +    EP_RAIL          *Rail;                                    /* rail we're associated with */
42949 +    struct list_head   Link;                                   /*   and linked on the CM_SUBSYS */
42950 +
42951 +    uint32_t          ParamHash;                               /* hash of critical parameters */
42952 +    uint32_t           Timestamp;
42953 +    long              DiscoverStartTick;                       /* when discovery start */
42954 +
42955 +    unsigned int       NodeId;                                 /* my node id */
42956 +    unsigned int       NumNodes;                               /*   and number of nodes */
42957 +    unsigned int       NumLevels;                              /* number of levels computed from machine size */
42958 +    int                       BroadcastLevel;
42959 +    long              BroadcastLevelTick;
42960 +    unsigned int       TopLevel;                               /* level at which I'm not a leader */
42961 +    unsigned char      Role;                                   /* state at TopLevel */
42962 +
42963 +    EP_INPUTQ        *PolledQueue;                             /* polled input queue */
42964 +    EP_INPUTQ        *IntrQueue;                               /* intr input queue */
42965 +    EP_OUTPUTQ       *MsgQueue;                                /* message  */
42966 +    unsigned int       NextSpareMsg;                           /* next "spare" message buffer to use */
42967 +
42968 +    EP_CM_RAIL_STATS   Stats;                                  /* statistics */
42969 +
42970 +    kmutex_t          Mutex;
42971 +    spinlock_t        Lock;
42972 +    
42973 +    long              NextHeartbeatTime;                       /* next time to check/send heartbeats */
42974 +    long              NextDiscoverTime;                        /* next time to progress discovery  */
42975 +    long              NextRunTime;                             /* the earlier of the above two or intr requires inputq poll*/
42976 +
42977 +    unsigned int       OfflineReasons;                         /* forced offline by procfs/manager thread stuck */
42978 +
42979 +#if defined(PER_CPU_TIMEOUT)
42980 +    spinlock_t        HeartbeatTimeoutsLock;                   /* spinlock to sequentialise per-cpu timeouts */
42981 +    long              HeartbeatTimeoutsStarted;                /* bitmap of which timeouts have started */
42982 +    long              HeartbeatTimeoutsStopped;                /* bitmap of which timeouts have stopped */
42983 +    long              HeartbeatTimeoutsShouldStop;             /* flag to indicate timeouts should stop */
42984 +    kcondvar_t        HeartbeatTimeoutsWait;                   /* place to sleep waiting for timeouts to stop */
42985 +    long              HeartbeatTimeoutRunning;                 /* someone is running the timeout - don't try for the lock */
42986 +
42987 +    long              HeartbeatTimeoutOverdue;                 /* heartbeat seen as overdue - interrupt requested */
42988 +
42989 +    CM_TIMEOUT_DATA   *HeartbeatTimeoutsData;                  /* per timeout data */
42990 +#else
42991 +    struct timer_list  HeartbeatTimer;                         /* timer for heartbeat/discovery */
42992 +#endif
42993 +
42994 +    CM_LEVEL           Levels[CM_MAX_LEVELS];
42995 +} CM_RAIL;
42996 +
42997 +/* OfflineReasons (both per-rail and  */
42998 +#define CM_OFFLINE_BROADCAST           (1 << 0)
42999 +#define CM_OFFLINE_PROCFS              (1 << 1)
43000 +#define CM_OFFLINE_MANAGER             (1 << 2)
43001 +
43002 +typedef struct cm_subsys
43003 +{
43004 +    EP_SUBSYS          Subsys;
43005 +    CM_RAIL            *Rails[EP_MAX_RAILS];
43006 +} CM_SUBSYS;
43007 +
43008 +extern int  MachineId;
43009 +
43010 +extern void cm_node_disconnected (EP_RAIL *rail, unsigned nodeId);
43011 +extern void cm_restart_node (EP_RAIL *rail, unsigned nodeId);
43012 +extern void cm_restart_comms (CM_RAIL *cmRail);
43013 +extern int  cm_init (EP_SYS *sys);
43014 +
43015 +extern void DisplayRail(EP_RAIL *rail);
43016 +extern void DisplaySegs (EP_RAIL *rail);
43017 +extern void DisplayStatus (EP_RAIL *rail);
43018 +
43019 +typedef struct proc_private
43020 +{
43021 +    struct nodeset_private *pr_next;
43022 +    EP_RAIL                *pr_rail;
43023 +    char                  *pr_data;
43024 +    int                     pr_data_len;
43025 +    unsigned               pr_off;
43026 +    unsigned               pr_len;
43027 +    DisplayInfo             pr_di;
43028 +} PROC_PRIVATE;
43029 +
43030 +extern void    proc_character_fill (long mode, char *fmt, ...);
43031 +extern int     proc_release (struct inode *inode, struct file *file);
43032 +extern ssize_t proc_read (struct file *file, char *buf, size_t count, loff_t *ppos);
43033 +
43034 +
43035 +extern void DisplayNodeMaps  (DisplayInfo *di, CM_RAIL *cmRail);
43036 +extern void DisplayNodeSgmts (DisplayInfo *di, CM_RAIL *cmRail);
43037 +extern void DisplayRailDo    (DisplayInfo *di, EP_RAIL *rail);
43038 +
43039 +extern int    cm_read_cluster(EP_RAIL *rail,char *page);
43040 +extern void   cm_force_offline (EP_RAIL *rail, int offline, unsigned int reason);
43041 +
43042 +extern int    cm_svc_indicator_set      (EP_RAIL *rail, int svc_indicator);
43043 +extern int    cm_svc_indicator_clear    (EP_RAIL *rail, int svc_indicator);
43044 +extern int    cm_svc_indicator_is_set   (EP_RAIL *rail, int svc_indicator, int nodeId);
43045 +extern int    cm_svc_indicator_bitmap   (EP_RAIL *rail, int svc_indicator, bitmap_t * bitmap, int low, int nnodes);
43046 +
43047 +/* cm_procfs.c */
43048 +extern void   cm_procfs_init (CM_SUBSYS *subsys);
43049 +extern void   cm_procfs_fini (CM_SUBSYS *subsys);
43050 +extern void   cm_procfs_rail_init (CM_RAIL *rail);
43051 +extern void   cm_procfs_rail_fini (CM_RAIL *rail);
43052 +
43053 +/*
43054 + * Local variables:
43055 + * c-file-style: "stroustrup"
43056 + * End:
43057 + */
43058 +#endif /* __ELAN_CM_H */
43059 +
43060 Index: linux-2.4.21/drivers/net/qsnet/ep/cm_procfs.c
43061 ===================================================================
43062 --- linux-2.4.21.orig/drivers/net/qsnet/ep/cm_procfs.c  2004-02-23 16:02:56.000000000 -0500
43063 +++ linux-2.4.21/drivers/net/qsnet/ep/cm_procfs.c       2005-06-01 23:12:54.633433936 -0400
43064 @@ -0,0 +1,254 @@
43065 +/*
43066 + *    Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd.
43067 + *    Copyright (c) 2002-2005 by Quadrics Ltd.
43068 + *
43069 + *    For licensing information please see the supplied COPYING file
43070 + *
43071 + */
43072 +
43073 +#ident "@(#)$Id: cm_procfs.c,v 1.5 2004/05/14 09:23:13 daniel Exp $"
43074 +/*      $Source: /cvs/master/quadrics/epmod/cm_procfs.c,v $ */
43075 +
43076 +#include <qsnet/kernel.h>
43077 +
43078 +#include <elan/kcomm.h>
43079 +
43080 +#include "kcomm_vp.h"
43081 +#include "debug.h"
43082 +#include "cm.h"
43083 +#include <elan/epsvc.h>
43084 +
43085 +#include <qsnet/procfs_linux.h>
43086 +
43087 +extern char *sprintClPeers (char *str, CM_RAIL *cmRail, int clvl);
43088 +
43089 +static int
43090 +proc_read_cluster(char *page, char **start, off_t off,
43091 +               int count, int *eof, void *data)
43092 +{
43093 +    CM_RAIL *cmRail = (CM_RAIL *) data;
43094 +    char    *p      = page;
43095 +
43096 +    page[0] = 0;
43097 +
43098 +    if (cmRail->Rail->State != EP_RAIL_STATE_RUNNING)
43099 +       p += sprintf(p, "<not running>\n");
43100 +    else
43101 +    {
43102 +       CM_LEVEL *cmLevel;
43103 +       unsigned long flags;
43104 +       int  i, j;
43105 +       char clNodeStr[32]; /* [%d-%d][%d-%d] */
43106 +       char seperate_with;
43107 +
43108 +       struct { int val; char *name; } bitvals[] = {
43109 +           {CM_OFFLINE_BROADCAST, "Broadcast"},
43110 +           {CM_OFFLINE_PROCFS,    "Offline"},
43111 +           {CM_OFFLINE_MANAGER,   "Manager"}};
43112 +       
43113 +       spin_lock_irqsave (&cmRail->Lock, flags);
43114 +       
43115 +       for (i = 0; i < cmRail->NumLevels; i++)
43116 +       {
43117 +           cmLevel = &cmRail->Levels[i];
43118 +           
43119 +           p += sprintf(p, "%23s %7s ", sprintClPeers (clNodeStr, cmRail, i), cmLevel->Online?"Online":"Offline");
43120 +           
43121 +           if ((cmLevel->Online ) | ( cmLevel->Connected > 0))
43122 +               p += sprintf(p, "Connected=%lu ", cmLevel->Connected);
43123 +           
43124 +           seperate_with = '<';
43125 +           
43126 +           if ( cmLevel->Restarting ) {
43127 +               p += sprintf(p, "%cRestarting", seperate_with);
43128 +               seperate_with = ',';
43129 +           }
43130 +           
43131 +           if ( ! (cmLevel->GlobalMapValid & cmLevel->SubTreeMapValid )) {
43132 +               p += sprintf(p, "%cMap Not Valid", seperate_with);
43133 +               seperate_with = ',';
43134 +           }
43135 +           
43136 +           if ( cmLevel->OfflineReasons ) {
43137 +               for (j = 0; j < sizeof (bitvals)/sizeof(bitvals[0]); j++)
43138 +                   if (cmLevel->OfflineReasons & bitvals[j].val) {
43139 +                       p += sprintf(p, "%c%s", seperate_with, bitvals[j].name);
43140 +                       seperate_with = ',';
43141 +                   }
43142 +           }
43143 +           if ( cmRail->OfflineReasons ) {
43144 +               for (j = 0; j < sizeof (bitvals)/sizeof(bitvals[0]); j++)
43145 +                   if (cmRail->OfflineReasons & bitvals[j].val) {
43146 +                       p += sprintf(p, "%c%s", seperate_with, bitvals[j].name);
43147 +                       seperate_with = ',';
43148 +                   }
43149 +           }
43150 +           
43151 +           if ( seperate_with != '<' ) 
43152 +               p += sprintf(p,">\n");
43153 +           else
43154 +               p += sprintf(p,"\n");
43155 +       }
43156 +       
43157 +       spin_unlock_irqrestore (&cmRail->Lock, flags);
43158 +    }
43159 +
43160 +    return qsnet_proc_calc_metrics (page, start, off, count, eof, p - page);
43161 +}
43162 +
43163 +static struct rail_info
43164 +{
43165 +    char *name;
43166 +    int (*read_func) (char *page, char **start, off_t off, int count, int *eof, void *data);
43167 +    int (*write_func) (struct file *file, const char *buf, unsigned long count, void *data);
43168 +} rail_info[] = {
43169 +    {"cluster", proc_read_cluster, NULL},
43170 +};
43171 +
43172 +struct proc_dir_entry *svc_indicators_root;
43173 +
43174 +typedef struct svc_indicator_data
43175 +{
43176 +    int       svc_indicator;
43177 +    EP_RAIL  *rail;
43178 +} SVC_INDICATOR_DATA;
43179 +
43180 +static SVC_INDICATOR_DATA svc_indicator_data[EP_SVC_NUM_INDICATORS][EP_MAX_RAILS];
43181 +static char              *svc_indicator_names[EP_SVC_NUM_INDICATORS] = EP_SVC_NAMES;
43182 +
43183 +static int
43184 +proc_read_svc_indicator_rail_bitmap (char *page, char **start, off_t off,
43185 +                                    int count, int *eof, void *data)
43186 +{
43187 +    SVC_INDICATOR_DATA  *svc_data = (SVC_INDICATOR_DATA  *)data;
43188 +    unsigned int        nnodes   = ep_numnodes (ep_system());
43189 +    bitmap_t           *bitmap;
43190 +
43191 +    KMEM_ZALLOC (bitmap, bitmap_t *, (BT_BITOUL(EP_MAX_NODES) * sizeof (bitmap_t)), 1);
43192 +
43193 +    cm_svc_indicator_bitmap (svc_data->rail, svc_data->svc_indicator, bitmap, 0, nnodes);
43194 +
43195 +    ep_sprintf_bitmap (page, PAGESIZE, bitmap, 0, 0, nnodes);
43196 +    
43197 +    KMEM_FREE (bitmap, (BT_BITOUL(EP_MAX_NODES) * sizeof (bitmap_t)));
43198 +    
43199 +    strcat (page, "\n");
43200 +
43201 +    return (qsnet_proc_calc_metrics (page, start, off, count, eof, strlen(page)));
43202 +}
43203 +
43204 +static int
43205 +proc_read_svc_indicator_bitmap(char *page, char **start, off_t off,
43206 +                              int count, int *eof, void *data)
43207 +{
43208 +    unsigned int         num      = (unsigned long) data;
43209 +    EP_SYS              *sys      = ep_system();
43210 +    unsigned int         nnodes   = ep_numnodes (sys);
43211 +    bitmap_t            *bitmap;
43212 +
43213 +    KMEM_ALLOC(bitmap, bitmap_t *, (BT_BITOUL(EP_MAX_NODES) * sizeof (bitmap_t)), 1);
43214 +     
43215 +    ep_svc_indicator_bitmap (sys, num, bitmap, 0, nnodes);
43216 +
43217 +    ep_sprintf_bitmap (page, PAGESIZE, bitmap, 0, 0, nnodes);
43218 +    
43219 +    KMEM_FREE (bitmap, (BT_BITOUL(EP_MAX_NODES) * sizeof (bitmap_t)));
43220 +    
43221 +    strcat (page, "\n");
43222 +
43223 +    return (qsnet_proc_calc_metrics (page, start, off, count, eof, strlen(page)));
43224 +}
43225 +
43226 +void
43227 +cm_procfs_rail_init (CM_RAIL *cmRail)
43228 +{
43229 +    EP_RAIL *rail = cmRail->Rail;
43230 +    struct proc_dir_entry *p;
43231 +    int i;
43232 +    
43233 +    for (i = 0; i < sizeof (rail_info)/sizeof (rail_info[0]); i++)
43234 +    {
43235 +       if ((p = create_proc_entry (rail_info[i].name, 0, cmRail->Rail->ProcDir)) != NULL)
43236 +       {
43237 +           p->read_proc  = rail_info[i].read_func;
43238 +           p->write_proc = rail_info[i].write_func;
43239 +           p->data       = cmRail;
43240 +           p->owner      = THIS_MODULE;
43241 +       }
43242 +    }
43243 +
43244 +    if ((rail->SvcIndicatorDir = proc_mkdir ("svc_indicators", cmRail->Rail->ProcDir)) != NULL)
43245 +    {
43246 +       for (i = 0; i < EP_SVC_NUM_INDICATORS; i++)
43247 +       {
43248 +           if ((p = create_proc_entry (svc_indicator_names[i], 0, rail->SvcIndicatorDir)) != NULL)
43249 +           {
43250 +               svc_indicator_data[i][rail->Number].svc_indicator = i;
43251 +               svc_indicator_data[i][rail->Number].rail          = rail; 
43252 +               
43253 +               p->write_proc = NULL;
43254 +               p->read_proc  = proc_read_svc_indicator_rail_bitmap;
43255 +               p->data       = (void *)&svc_indicator_data[i][rail->Number];
43256 +               p->owner      = THIS_MODULE;
43257 +           }
43258 +       }
43259 +    }
43260 +}
43261 +
43262 +void
43263 +cm_procfs_rail_fini (CM_RAIL *cmRail)
43264 +{
43265 +    EP_RAIL *rail = cmRail->Rail;
43266 +    int i;
43267 +
43268 +    if (rail->SvcIndicatorDir)
43269 +    {
43270 +       for (i = 0; i < EP_SVC_NUM_INDICATORS; i++)
43271 +           remove_proc_entry (svc_indicator_names[i], rail->SvcIndicatorDir);
43272 +
43273 +       remove_proc_entry ("svc_indicators", cmRail->Rail->ProcDir);
43274 +    }
43275 +
43276 +    for (i = 0; i < sizeof (rail_info)/sizeof (rail_info[0]); i++)
43277 +       remove_proc_entry (rail_info[i].name, cmRail->Rail->ProcDir);
43278 +}
43279 +
43280 +void
43281 +cm_procfs_init (CM_SUBSYS *subsys)
43282 +{
43283 +    struct proc_dir_entry *p;
43284 +    int i;
43285 +
43286 +    qsnet_proc_register_hex (ep_config_root, "machine_id",      &MachineId,      0);
43287 +
43288 +    if ((svc_indicators_root = proc_mkdir("svc_indicators", ep_procfs_root)) != NULL)
43289 +    {
43290 +       for (i = 0; i < EP_SVC_NUM_INDICATORS; i++)
43291 +       {
43292 +           if ((p = create_proc_entry (svc_indicator_names[i], 0, svc_indicators_root)) != NULL)
43293 +           {
43294 +               p->write_proc = NULL;
43295 +               p->read_proc  = proc_read_svc_indicator_bitmap;
43296 +               p->data       = (void *)(long) i;
43297 +               p->owner      = THIS_MODULE;
43298 +           }
43299 +       }
43300 +       
43301 +    }
43302 +}
43303 +
43304 +void
43305 +cm_procfs_fini (CM_SUBSYS *subsys)
43306 +{
43307 +    int i;
43308 +
43309 +    if (svc_indicators_root)
43310 +    {
43311 +       for (i = 0; i < EP_SVC_NUM_INDICATORS; i++)
43312 +           remove_proc_entry (svc_indicator_names[i], svc_indicators_root);
43313 +       
43314 +       remove_proc_entry ("svc_indicators",   ep_procfs_root);
43315 +    }
43316 +
43317 +    remove_proc_entry ("machine_id",      ep_config_root);
43318 +}
43319 Index: linux-2.4.21/drivers/net/qsnet/ep/commands_elan4.c
43320 ===================================================================
43321 --- linux-2.4.21.orig/drivers/net/qsnet/ep/commands_elan4.c     2004-02-23 16:02:56.000000000 -0500
43322 +++ linux-2.4.21/drivers/net/qsnet/ep/commands_elan4.c  2005-06-01 23:12:54.634433784 -0400
43323 @@ -0,0 +1,173 @@
43324 +/*
43325 + *    Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd.
43326 + *    Copyright (c) 2002-2003 by Quadrics Ltd.
43327 + *
43328 + *    For licensing information please see the supplied COPYING file
43329 + *
43330 + */
43331 +
43332 +#ident "@(#)$Id: commands_elan4.c,v 1.2 2003/10/23 15:07:53 david Exp $ $Name: QSNETMODULES-4-30_20050128 $"
43333 +/*      $Source: /cvs/master/quadrics/epmod/commands_elan4.c,v $*/
43334 +
43335 +#include <qsnet/kernel.h>
43336 +
43337 +#include <elan/kcomm.h>
43338 +
43339 +#include "kcomm_vp.h"
43340 +#include "kcomm_elan4.h"
43341 +#include "debug.h"
43342 +
43343 +#include <elan4/trtype.h>
43344 +
43345 +static __inline__ void
43346 +elan4_command_write (ELAN4_CQ *cq, E4_uint64 val, unsigned off)
43347 +{
43348 +    writeq (val, cq->cq_mapping + offsetof (E4_CommandPort, Command[off]));
43349 +}
43350 +
43351 +void
43352 +elan4_nop_cmd (ELAN4_CQ *cq, E4_uint64 tag)
43353 +{
43354 +    elan4_command_write (cq, tag | NOP_CMD, 0);
43355 +}
43356 +
43357 +void
43358 +elan4_write_dword_cmd (ELAN4_CQ *cq, E4_Addr addr, E4_uint64 data)
43359 +{
43360 +    elan4_command_write (cq, addr | WRITE_DWORD_CMD, 0);
43361 +    elan4_command_write (cq, data, 1);
43362 +}
43363 +
43364 +void
43365 +elan4_add_dword_cmd (ELAN4_CQ *cq, E4_Addr addr, E4_uint64 data)
43366 +{
43367 +    elan4_command_write (cq, addr | ADD_DWORD_CMD, 0);
43368 +    elan4_command_write (cq, data,                 1);
43369 +}
43370 +
43371 +void
43372 +elan4_copy64_cmd (ELAN4_CQ *cq, E4_Addr from, E4_Addr to, E4_uint32 datatype)
43373 +{
43374 +    elan4_command_write (cq, from | (datatype << COPY64_DATA_TYPE_SHIFT) | COPY64_CMD, 0);
43375 +    elan4_command_write (cq, to   | (datatype << COPY64_DATA_TYPE_SHIFT),              1);
43376 +}
43377 +
43378 +void
43379 +elan4_interrupt_cmd (ELAN4_CQ *cq, E4_uint64 cookie)
43380 +{
43381 +    elan4_command_write (cq, (cookie << E4_MAIN_INT_SHIFT) | INTERRUPT_CMD, 0);
43382 +}
43383 +
43384 +
43385 +void 
43386 +elan4_run_thread_cmd (ELAN4_CQ *cq, E4_ThreadRegs *regs)
43387 +{
43388 +    elan4_command_write (cq, regs->Registers[0] | RUN_THREAD_CMD, 0);
43389 +    elan4_command_write (cq, regs->Registers[1],                  1);
43390 +    elan4_command_write (cq, regs->Registers[2],                  2);
43391 +    elan4_command_write (cq, regs->Registers[3],                  3);
43392 +    elan4_command_write (cq, regs->Registers[4],                  4);
43393 +    elan4_command_write (cq, regs->Registers[5],                  5);
43394 +    elan4_command_write (cq, regs->Registers[6],                  6);
43395 +}
43396 +
43397 +void
43398 +elan4_run_dma_cmd (ELAN4_CQ *cq, E4_DMA *dma)
43399 +{
43400 +    E4_uint64 *dmaptr = (E4_uint64 *) dma;
43401 +
43402 +    elan4_command_write (cq, dmaptr[0] | RUN_DMA_CMD, 0);
43403 +    elan4_command_write (cq, dmaptr[1],               1);
43404 +    elan4_command_write (cq, dmaptr[2],               2);
43405 +    elan4_command_write (cq, dmaptr[3],               3);
43406 +    elan4_command_write (cq, dmaptr[4],               4);
43407 +    elan4_command_write (cq, dmaptr[5],               5);
43408 +    elan4_command_write (cq, dmaptr[6],               6);
43409 +}
43410 +
43411 +void
43412 +elan4_set_event_cmd (ELAN4_CQ *cq, E4_Addr event)
43413 +{
43414 +    elan4_command_write (cq, event | SET_EVENT_CMD, 0);
43415 +}
43416 +
43417 +void
43418 +elan4_set_eventn_cmd (ELAN4_CQ *cq, E4_Addr event, E4_uint32 count)
43419 +{
43420 +    elan4_command_write (cq, SET_EVENTN_CMD,0);
43421 +    elan4_command_write (cq, event | count, 1);
43422 +}
43423 +    
43424 +void
43425 +elan4_wait_event_cmd (ELAN4_CQ *cq, E4_Addr event, E4_uint64 candt, E4_uint64 param0, E4_uint64 param1)
43426 +{
43427 +    elan4_command_write (cq, event | WAIT_EVENT_CMD, 0);
43428 +    elan4_command_write (cq, candt,                  1);
43429 +    elan4_command_write (cq, param0,                 2);
43430 +    elan4_command_write (cq, param1,                 3);
43431 +}
43432 +
43433 +void
43434 +elan4_open_packet (ELAN4_CQ *cq, E4_uint64 command)
43435 +{
43436 +    elan4_command_write (cq, command | OPEN_STEN_PKT_CMD, 0);
43437 +}
43438 +
43439 +void
43440 +elan4_guard (ELAN4_CQ *cq, E4_uint64 command)
43441 +{
43442 +    elan4_command_write (cq, command | GUARD_CMD, 0);
43443 +}
43444 +
43445 +void
43446 +elan4_sendtrans0 (ELAN4_CQ *cq, E4_uint16 trtype, E4_uint64 addr)
43447 +{
43448 +    elan4_command_write (cq, (trtype << 16) | SEND_TRANS_CMD, 0);
43449 +    elan4_command_write (cq, addr,                               1);
43450 +}
43451 +
43452 +void
43453 +elan4_sendtrans1 (ELAN4_CQ *cq, E4_uint16 trtype, E4_uint64 addr, E4_uint64 p0)
43454 +{
43455 +    elan4_command_write (cq, (trtype << 16) | SEND_TRANS_CMD, 0);
43456 +    elan4_command_write (cq, addr,                               1);
43457 +    elan4_command_write (cq, p0,                                 2);
43458 +}
43459 +
43460 +void
43461 +elan4_sendtrans2 (ELAN4_CQ *cq, E4_uint16 trtype, E4_uint64 addr, E4_uint64 p0, E4_uint64 p1)
43462 +{
43463 +    elan4_command_write (cq, (trtype << 16) | SEND_TRANS_CMD, 0);
43464 +    elan4_command_write (cq, addr,                               1);
43465 +    elan4_command_write (cq, p0,                                 2);
43466 +    elan4_command_write (cq, p1,                                 3);
43467 +}
43468 +
43469 +void
43470 +elan4_sendtransn (ELAN4_CQ *cq, E4_uint16 trtype, E4_uint64 addr, ...)
43471 +{
43472 +    E4_uint32    ndword = ((trtype & TR_SIZE_MASK) >> TR_SIZE_SHIFT);
43473 +    va_list      ap;
43474 +    register int i;
43475 +
43476 +    elan4_command_write (cq, (trtype << 16) | SEND_TRANS_CMD, 0);
43477 +    elan4_command_write (cq, addr,                               1);
43478 +    
43479 +    va_start (ap, addr);
43480 +    for (i = 2; i < ndword+2; i++) 
43481 +       elan4_command_write (cq, va_arg (ap, E4_uint64), i);
43482 +    va_end (ap);
43483 +}
43484 +
43485 +void
43486 +elan4_sendtransp (ELAN4_CQ *cq, E4_uint16 trtype, E4_uint64 addr, E4_uint64 *ptr)
43487 +{
43488 +    E4_uint32    ndword = ((trtype &TR_SIZE_MASK) >> TR_SIZE_SHIFT);
43489 +    register int i;
43490 +
43491 +    elan4_command_write (cq, (trtype << 16) | SEND_TRANS_CMD, 0);
43492 +    elan4_command_write (cq, addr,                            1);
43493 +    for (i = 2; i < ndword+2; i++)
43494 +       elan4_command_write (cq, *ptr++, i);
43495 +}
43496 +
43497 Index: linux-2.4.21/drivers/net/qsnet/ep/conf_linux.c
43498 ===================================================================
43499 --- linux-2.4.21.orig/drivers/net/qsnet/ep/conf_linux.c 2004-02-23 16:02:56.000000000 -0500
43500 +++ linux-2.4.21/drivers/net/qsnet/ep/conf_linux.c      2005-06-01 23:12:54.635433632 -0400
43501 @@ -0,0 +1,309 @@
43502 +/*
43503 + *    Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd.
43504 + *    Copyright (c) 2002-2003 by Quadrics Ltd.
43505 + *
43506 + *    For licensing information please see the supplied COPYING file
43507 + *
43508 + */
43509 +
43510 +#ident "@(#)$Id: conf_linux.c,v 1.37.2.3 2005/01/18 14:47:35 david Exp $"
43511 +/*      $Source: /cvs/master/quadrics/epmod/conf_linux.c,v $ */
43512 +
43513 +#include <qsnet/kernel.h>
43514 +#include <qsnet/autoconf.h>
43515 +
43516 +#include <elan/kcomm.h>
43517 +#include <elan/epsvc.h>
43518 +#include <elan/epcomms.h>
43519 +
43520 +#include "cm.h"
43521 +
43522 +#include "conf_linux.h"
43523 +
43524 +#include <linux/init.h>
43525 +#include <linux/module.h>
43526 +#include <linux/reboot.h>
43527 +#include <linux/notifier.h>
43528 +
43529 +/* Module parameters */
43530 +unsigned int epdebug        = 0;
43531 +unsigned int epdebug_console = 0;
43532 +unsigned int epdebug_cmlevel = 0;
43533 +#if ! defined(CONFIG_EP_NO_CHECK_SUM)
43534 +unsigned int epdebug_check_sum = 0;
43535 +#endif
43536 +int         disabled        = 0;
43537 +int          sdram_assert    = 0;
43538 +int          assfail_mode    = 0;
43539 +int         txd_stabilise   = 7;
43540 +int         portals_envelopes = 0;
43541 +
43542 +/* External module parameters */
43543 +extern int     MaxSwitchLevels;
43544 +extern int      RejoinCheck;
43545 +extern int      RejoinPanic;
43546 +extern int      PositionCheck;
43547 +extern int      MachineId;
43548 +
43549 +/* Module globals */
43550 +EP_SYS          epsys;
43551 +
43552 +#ifdef MODULE
43553 +MODULE_AUTHOR("Quadrics Ltd");
43554 +MODULE_DESCRIPTION("Elan Kernel Comms");
43555 +
43556 +MODULE_LICENSE("GPL");
43557 +
43558 +MODULE_PARM(epdebug,         "i");
43559 +MODULE_PARM(epdebug_console, "i");
43560 +MODULE_PARM(epdebug_cmlevel, "i");
43561 +#if ! defined(CONFIG_EP_NO_CHECK_SUM)
43562 +MODULE_PARM(epdebug_check_sum, "i");
43563 +#endif
43564 +MODULE_PARM(disabled,        "i");
43565 +
43566 +MODULE_PARM(MachineId,       "i");
43567 +MODULE_PARM(RejoinPanic,     "i");
43568 +MODULE_PARM(RejoinCheck,     "i");
43569 +MODULE_PARM(PositionCheck,   "i");
43570 +MODULE_PARM(MaxSwitchLevels, "i");
43571 +
43572 +MODULE_PARM(sdram_assert,    "i");
43573 +MODULE_PARM(assfail_mode,    "i");
43574 +MODULE_PARM(txd_stabilise,   "i");
43575 +MODULE_PARM(portals_envelopes,"i");
43576 +
43577 +/* epcomms.c large message service functions */
43578 +EXPORT_SYMBOL(ep_alloc_xmtr);
43579 +EXPORT_SYMBOL(ep_free_xmtr);
43580 +EXPORT_SYMBOL(ep_transmit_message);
43581 +EXPORT_SYMBOL(ep_multicast_message);
43582 +EXPORT_SYMBOL(ep_transmit_rpc);
43583 +
43584 +EXPORT_SYMBOL(ep_alloc_rcvr);
43585 +EXPORT_SYMBOL(ep_free_rcvr);
43586 +EXPORT_SYMBOL(ep_queue_receive);
43587 +EXPORT_SYMBOL(ep_requeue_receive);
43588 +EXPORT_SYMBOL(ep_rpc_put);
43589 +EXPORT_SYMBOL(ep_rpc_get);
43590 +EXPORT_SYMBOL(ep_complete_rpc);
43591 +EXPORT_SYMBOL(ep_complete_receive);
43592 +
43593 +EXPORT_SYMBOL(ep_poll_transmits);
43594 +EXPORT_SYMBOL(ep_enable_txcallbacks);
43595 +EXPORT_SYMBOL(ep_disable_txcallbacks);
43596 +
43597 +/* epcomms.c functions for accessing fields of rxds/txds */
43598 +EXPORT_SYMBOL(ep_rxd_arg);
43599 +EXPORT_SYMBOL(ep_rxd_len);
43600 +EXPORT_SYMBOL(ep_rxd_isrpc);
43601 +EXPORT_SYMBOL(ep_rxd_envelope);
43602 +EXPORT_SYMBOL(ep_rxd_payload);
43603 +EXPORT_SYMBOL(ep_rxd_node);
43604 +EXPORT_SYMBOL(ep_rxd_status);
43605 +EXPORT_SYMBOL(ep_rxd_statusblk);
43606 +EXPORT_SYMBOL(ep_txd_node);
43607 +EXPORT_SYMBOL(ep_txd_statusblk);
43608 +
43609 +/* kmap.c, nmh.c - handling mapping of pages into network memory */
43610 +EXPORT_SYMBOL(ep_dvma_reserve);
43611 +EXPORT_SYMBOL(ep_dvma_release);
43612 +EXPORT_SYMBOL(ep_dvma_load);
43613 +EXPORT_SYMBOL(ep_dvma_unload);
43614 +EXPORT_SYMBOL(ep_nmd_subset);
43615 +EXPORT_SYMBOL(ep_nmd_merge);
43616 +
43617 +EXPORT_SYMBOL(ep_system);
43618 +
43619 +/* kcomm.c */
43620 +EXPORT_SYMBOL(ep_nodeid);
43621 +EXPORT_SYMBOL(ep_numnodes);
43622 +EXPORT_SYMBOL(ep_waitfor_nodeid);
43623 +
43624 +/* railhints.c */
43625 +EXPORT_SYMBOL(ep_pickRail);
43626 +EXPORT_SYMBOL(ep_xmtr_bcastrail);
43627 +EXPORT_SYMBOL(ep_xmtr_prefrail);
43628 +EXPORT_SYMBOL(ep_xmtr_availrails);
43629 +EXPORT_SYMBOL(ep_xmtr_noderails);
43630 +EXPORT_SYMBOL(ep_rcvr_prefrail);
43631 +EXPORT_SYMBOL(ep_rcvr_availrails);
43632 +EXPORT_SYMBOL(ep_rxd_railmask);
43633 +
43634 +EXPORT_SYMBOL(ep_svc_indicator_bitmap);
43635 +EXPORT_SYMBOL(ep_svc_indicator_is_set);
43636 +EXPORT_SYMBOL(ep_svc_indicator_clear);
43637 +EXPORT_SYMBOL(ep_svc_indicator_set);
43638 +
43639 +/* cm.c */
43640 +EXPORT_SYMBOL(cm_svc_indicator_clear);
43641 +EXPORT_SYMBOL(cm_svc_indicator_set);
43642 +EXPORT_SYMBOL(cm_svc_indicator_is_set);
43643 +EXPORT_SYMBOL(cm_svc_indicator_bitmap);
43644 +
43645 +#endif
43646 +
43647 +EP_SYS *
43648 +ep_system()
43649 +{
43650 +    return (&epsys);
43651 +}
43652 +
43653 +void
43654 +ep_mod_inc_usecount()
43655 +{
43656 +    MOD_INC_USE_COUNT;
43657 +} 
43658 +
43659 +void
43660 +ep_mod_dec_usecount()
43661 +{
43662 +    MOD_DEC_USE_COUNT;
43663 +}
43664 +
43665 +#if defined(CONFIG_DUMP) || defined(CONFIG_DUMP_MODULE)
43666 +
43667 +#include <linux/dump.h>
43668 +
43669 +static int
43670 +ep_dump_event (struct notifier_block *self, unsigned long event, void *buffer)
43671 +{
43672 +    if (event == DUMP_BEGIN)
43673 +       ep_shutdown (&epsys);
43674 +
43675 +    return (NOTIFY_DONE);
43676 +}
43677 +static struct notifier_block ep_dump_notifier = 
43678 +{
43679 +    notifier_call:     ep_dump_event,
43680 +    priority:          0,
43681 +};
43682 +
43683 +#endif
43684 +
43685 +static int
43686 +ep_reboot_event (struct notifier_block *self, unsigned long event, void *buffer)
43687 +{
43688 +    if ((event == SYS_RESTART || event == SYS_HALT || event == SYS_POWER_OFF))
43689 +       ep_shutdown (&epsys);
43690 +
43691 +    return (NOTIFY_DONE);
43692 +}
43693 +
43694 +static struct notifier_block ep_reboot_notifier = 
43695 +{
43696 +    notifier_call:     ep_reboot_event,
43697 +    priority:          0,
43698 +};
43699 +
43700 +static int
43701 +ep_panic_event (struct notifier_block *self, unsigned long event, void *buffer)
43702 +{
43703 +    ep_shutdown (&epsys);
43704 +
43705 +    return (NOTIFY_DONE);
43706 +}
43707 +
43708 +static struct notifier_block ep_panic_notifier = 
43709 +{
43710 +    notifier_call:     ep_panic_event,
43711 +    priority:          0,
43712 +};
43713 +
43714 +/*
43715 + * Module configuration. 
43716 + */
43717 +#ifdef MODULE
43718 +static int __init ep_init(void)
43719 +#else
43720 +__initfunc(int ep_init(void))
43721 +#endif
43722 +{
43723 +    register int rmask = 0;
43724 +
43725 +    ep_procfs_init ();
43726 +
43727 +    ep_sys_init (&epsys);
43728 +
43729 +#if defined(CONFIG_ELAN4) || defined(CONFIG_ELAN4_MODULE)
43730 +    rmask = ep4_create_rails (&epsys, disabled);
43731 +#endif
43732 +    
43733 +    /* If we've brought up an elan4 rail, then disable all elan3 rails. */
43734 +    if ((rmask & ~disabled) != 0)
43735 +       disabled = ~rmask;
43736 +
43737 +#if defined(CONFIG_ELAN3) || defined(CONFIG_ELAN3_MODULE)
43738 +    rmask = ep3_create_rails (&epsys, disabled);
43739 +#endif
43740 +
43741 +#if defined(CONFIG_DUMP) || defined(CONFIG_DUMP_MODULE)
43742 +    register_dump_notifier (&ep_dump_notifier);
43743 +#endif
43744 +    register_reboot_notifier (&ep_reboot_notifier);
43745 +
43746 +#if !defined(NO_PANIC_NOTIFIER)
43747 +    notifier_chain_register (&panic_notifier_list, &ep_panic_notifier);
43748 +#endif
43749 +
43750 +    return (0);
43751 +}
43752 +
43753 +/*
43754 + * Module removal.
43755 + */
43756 +#ifdef MODULE
43757 +static void
43758 +__exit ep_exit(void)
43759 +{
43760 +    register int i;
43761 +
43762 +#if defined(CONFIG_DUMP) || defined(CONFIG_DUMP_MODULE)
43763 +    unregister_dump_notifier (&ep_dump_notifier);
43764 +#endif
43765 +    unregister_reboot_notifier (&ep_reboot_notifier);
43766 +
43767 +#if !defined(NO_PANIC_NOTIFIER)
43768 +    notifier_chain_unregister (&panic_notifier_list, &ep_panic_notifier);
43769 +#endif
43770 +
43771 +    for (i = 0; i < EP_MAX_RAILS; i++)
43772 +    {
43773 +       if (epsys.Rails[i])
43774 +       {
43775 +           switch (epsys.Rails[i]->State)
43776 +           {
43777 +           case EP_RAIL_STATE_UNINITIALISED:
43778 +               break;
43779 +
43780 +           case EP_RAIL_STATE_STARTED:
43781 +           case EP_RAIL_STATE_RUNNING:
43782 +           case EP_RAIL_STATE_INCOMPATIBLE:
43783 +               /* remove per-rail CM proc entries */
43784 +               ep_stop_rail (epsys.Rails[i]);
43785 +               break;
43786 +           }
43787 +
43788 +           /* remove EP proc rail entries after per-rail CM entries */
43789 +           ep_procfs_rail_fini (epsys.Rails[i]);
43790 +           ep_destroy_rail (epsys.Rails[i]);
43791 +       }
43792 +    }
43793 +
43794 +    ep_sys_fini (&epsys);
43795 +
43796 +    ep_procfs_fini ();
43797 +}
43798 +
43799 +/* Declare the module init and exit functions */
43800 +module_init(ep_init);
43801 +module_exit(ep_exit);
43802 +
43803 +#endif
43804 +
43805 +
43806 +/*
43807 + * Local variables:
43808 + * c-file-style: "stroustrup"
43809 + * End:
43810 + */
43811 Index: linux-2.4.21/drivers/net/qsnet/ep/conf_linux.h
43812 ===================================================================
43813 --- linux-2.4.21.orig/drivers/net/qsnet/ep/conf_linux.h 2004-02-23 16:02:56.000000000 -0500
43814 +++ linux-2.4.21/drivers/net/qsnet/ep/conf_linux.h      2005-06-01 23:12:54.635433632 -0400
43815 @@ -0,0 +1,29 @@
43816 +/*
43817 + *    Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd.
43818 + *    Copyright (c) 2002-2003 by Quadrics Ltd.
43819 + *
43820 + *    For licensing information please see the supplied COPYING file
43821 + *
43822 + */
43823 +
43824 +#ident "@(#)$Id: conf_linux.h,v 1.6 2003/10/02 14:16:07 mike Exp $"
43825 +/*      $Source: /cvs/master/quadrics/epmod/conf_linux.h,v $*/
43826 +
43827 +#ifndef __ELAN_CONF_LINUX_H
43828 +#define __ELAN_CONF_LINUX_H
43829 +
43830 +extern void ep_procfs_init(void);
43831 +extern void ep_procfs_fini(void);
43832 +extern void ep_procfs_rail_init(EP_RAIL *rail);
43833 +extern void ep_procfs_rail_fini(EP_RAIL *rail);
43834 +
43835 +extern void ep_procfs_svc_indicator_create(int svc_indicator, char *name);
43836 +extern void ep_procfs_svc_indicator_remove(int svc_indicator, char *name);
43837 +
43838 +#endif /* __ELAN_CONF_LINUX_H */
43839 +
43840 +/*
43841 + * Local variables:
43842 + * c-file-style: "stroustrup"
43843 + * End:
43844 + */
43845 Index: linux-2.4.21/drivers/net/qsnet/ep/debug.c
43846 ===================================================================
43847 --- linux-2.4.21.orig/drivers/net/qsnet/ep/debug.c      2004-02-23 16:02:56.000000000 -0500
43848 +++ linux-2.4.21/drivers/net/qsnet/ep/debug.c   2005-06-01 23:12:54.635433632 -0400
43849 @@ -0,0 +1,145 @@
43850 +/*
43851 + *    Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd.
43852 + *    Copyright (c) 2002-2003 by Quadrics Ltd.
43853 + *
43854 + *    For licensing information please see the supplied COPYING file
43855 + *
43856 + */
43857 +
43858 +#ident "@(#)$Id: debug.c,v 1.28.2.1 2004/11/12 10:54:50 mike Exp $"
43859 +/*      $Source: /cvs/master/quadrics/epmod/debug.c,v $*/
43860 +
43861 +#include <qsnet/kernel.h>
43862 +
43863 +#include <elan/kcomm.h>
43864 +
43865 +#include "debug.h"
43866 +
43867 +DisplayInfo di_ep_debug = {ep_debugf, DBG_DEBUG};
43868 +
43869 +/*
43870 + * Generate a partial bitmap string, for the bitmap from offset "off" for "count" bits,
43871 + * to allow for displaying of subsets, treat entry 0 of the bitmap as having value "base".
43872 + */
43873 +int
43874 +ep_sprintf_bitmap (char *str, unsigned nbytes, bitmap_t *bitmap, int base, int off, int nbits)
43875 +{
43876 +    char entry[12];                                            /* space for N-N */
43877 +    register int i, j, len;
43878 +    register int notstart = off;
43879 +    register int notfirst = 0;
43880 +    char *p = str;
43881 +    
43882 +    for (i = off; i < nbits; i++)
43883 +    {
43884 +       if (BT_TEST (bitmap, i))
43885 +       {
43886 +           for (j = i+1; j < nbits; j++)
43887 +               if (! BT_TEST (bitmap, j))
43888 +                   break;
43889 +
43890 +           if (j == (i+1))
43891 +               len = (int)sprintf (entry, "%d", base + i);
43892 +           else
43893 +               len = (int)sprintf (entry, "%d-%d", base + i, base + j-1);
43894 +           
43895 +           /* NOTE the 2 is for: one for comma, one for (possible) closing bracket */
43896 +           if ((p - str) <= (nbytes - (len+3)))
43897 +               p += (int)sprintf (p, "%c%s", notfirst++ ? ',' : notstart ? ' ' : '[', entry);
43898 +           else
43899 +           {
43900 +               /* no more space on this line, so move onto next */
43901 +               sprintf (p, "%c", notfirst++ ? ',' : '[');
43902 +
43903 +               return (i);
43904 +           }
43905 +
43906 +           i = j;
43907 +       }
43908 +    }
43909 +    
43910 +    if (!notfirst)
43911 +       sprintf (str, "<empty>");
43912 +    else
43913 +       strcpy (p, "]");
43914 +
43915 +    return (-1);
43916 +}
43917 +
43918 +void
43919 +ep_display_bitmap (char *prefix, char *tag, bitmap_t *bitmap, unsigned base, unsigned nbits)
43920 +{
43921 +    /* Tru64 kernel printf() truncates lines at 128 bytes - the man pages for printf (9)
43922 +     * do not mention this restriction, nor that it does not terminate the line with a
43923 +     * carriage return, this  is pretty naff. 
43924 +     * Linux has a similar limit though is much more generous at 1024 - and you can just 
43925 +     * look at the code to see why this has been done.
43926 +     *
43927 +     * Our nodeset information could well be longer than 128 characters,  so we're going to 
43928 +     * have to split it into a number of lines. */
43929 +
43930 +#define LINEBUF_SIZE           128
43931 +    char *p, linebuf[LINEBUF_SIZE+1];                          /* +1 for null termination */
43932 +    int i, noff, off = 0;
43933 +
43934 +    do {
43935 +       if (off == 0)
43936 +           p = linebuf + (int)sprintf (linebuf, "%s: %s ", prefix, tag);
43937 +       else
43938 +       {
43939 +           p = linebuf + (int)sprintf (linebuf, "%s:  ", prefix);
43940 +           for (i = 0; tag[i] != '\0'; i++)
43941 +               *p++ = ' ';
43942 +       }
43943 +
43944 +       noff = ep_sprintf_bitmap (p, &linebuf[LINEBUF_SIZE-1]-p, bitmap, base, off, nbits);
43945 +
43946 +       printk ("%s\n", linebuf);
43947 +
43948 +    } while ((off = noff) != -1);
43949 +
43950 +#undef LINEBUF_SIZE
43951 +}
43952 +
43953 +void
43954 +ep_debugf (long mode, char *fmt, ...)
43955 +{
43956 +   va_list ap;
43957 +   char prefix[32];
43958 +   
43959 +   va_start (ap, fmt);
43960 +#if defined(LINUX)
43961 +   sprintf (prefix, "[%08d.%04d] ", (int) lbolt, current->pid);
43962 +#else
43963 +   sprintf (prefix, "[%08d.----] ", (int) lbolt);
43964 +#endif
43965 +   qsnet_vdebugf ((mode & epdebug_console ? QSNET_DEBUG_CONSOLE: 0) | QSNET_DEBUG_BUFFER, prefix, fmt, ap);
43966 +   va_end (ap);
43967 +}
43968 +
43969 +int
43970 +ep_assfail (EP_RAIL *rail, const char *ex, const char *func, const char *file, const int line)
43971 +{
43972 +    qsnet_debugf (QSNET_DEBUG_BUFFER, "ep: assertion failure: %s, function: %s, file %s, line: %d\n", ex, func, file, line);
43973 +    
43974 +    printk (KERN_EMERG "ep: assertion failure: %s, function: %s, file %s, line: %d\n", ex, func, file, line);
43975 +    
43976 +    if (panicstr)
43977 +       return (0);
43978 +    
43979 +    if (assfail_mode & 1)                              /* return to BUG() */
43980 +       return 1;
43981 +    
43982 +    if (assfail_mode & 2)
43983 +       panic ("ep: assertion failure: %s, function: %s, file %s, line: %d\n", ex, func, file, line);
43984 +    if (assfail_mode & 4)
43985 +       epdebug = 0;
43986 +    
43987 +    return 0;
43988 +}
43989 +
43990 +/*
43991 + * Local variables:
43992 + * c-file-style: "stroustrup"
43993 + * End:
43994 + */
43995 Index: linux-2.4.21/drivers/net/qsnet/ep/debug_elan4.c
43996 ===================================================================
43997 --- linux-2.4.21.orig/drivers/net/qsnet/ep/debug_elan4.c        2004-02-23 16:02:56.000000000 -0500
43998 +++ linux-2.4.21/drivers/net/qsnet/ep/debug_elan4.c     2005-06-01 23:12:54.636433480 -0400
43999 @@ -0,0 +1,59 @@
44000 +/*
44001 + *    Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd.
44002 + *    Copyright (c) 2002-2003 by Quadrics Ltd.
44003 + *
44004 + *    For licensing information please see the supplied COPYING file
44005 + *
44006 + */
44007 +
44008 +#ident "@(#)$Id: debug_elan4.c,v 1.1 2004/05/19 10:21:04 david Exp $ $Name: QSNETMODULES-4-30_20050128 $"
44009 +/*      $Source: /cvs/master/quadrics/epmod/debug_elan4.c,v $*/
44010 +
44011 +#include <qsnet/kernel.h>
44012 +
44013 +#include <elan/kcomm.h>
44014 +
44015 +#include "kcomm_vp.h"
44016 +#include "kcomm_elan4.h"
44017 +#include "conf_linux.h"
44018 +#include "debug.h"
44019 +
44020 +static void
44021 +ep4_display_ecqs (EP4_RAIL *rail)
44022 +{
44023 +    struct list_head *el;
44024 +    unsigned long flags;
44025 +    int i;
44026 +
44027 +    spin_lock_irqsave (&rail->r_ecq_lock, flags);
44028 +    for (i = 0; i <EP4_NUM_ECQ; i++)
44029 +    {
44030 +       list_for_each (el, &rail->r_ecq_list[i]) {
44031 +           EP4_ECQ *ecq = list_entry (el, EP4_ECQ, ecq_link);
44032 +
44033 +           ep_debugf (DBG_DEBUG, "ECQ: type %d: avail %d cqnum %d\n", i, ecq->ecq_avail, elan4_cq2num (ecq->ecq_cq));
44034 +       }
44035 +    }
44036 +    spin_unlock_irqrestore (&rail->r_ecq_lock, flags);
44037 +}
44038 +
44039 +void
44040 +ep4_debug_rail (EP_RAIL *r)
44041 +{
44042 +    EP4_RAIL *rail = (EP4_RAIL *) r;
44043 +    EP_SYS   *sys  = rail->r_generic.System;
44044 +
44045 +    ep_debugf (DBG_DEBUG, "ep%d: is elan4 %d rev %c\n", rail->r_generic.Number,
44046 +              rail->r_generic.Devinfo.dev_instance, 'a' + rail->r_generic.Devinfo.dev_revision_id);
44047 +
44048 +    ep4_display_ecqs (rail);
44049 +
44050 +    ep_display_alloc (&sys->Allocator);
44051 +    ep_display_rmap (sys->Allocator.ResourceMap);
44052 +
44053 +    ep_display_alloc (&rail->r_generic.ElanAllocator);
44054 +    ep_display_alloc (&rail->r_generic.MainAllocator);
44055 +
44056 +    ep_display_rmap (rail->r_generic.ElanAllocator.ResourceMap);
44057 +}
44058 +
44059 Index: linux-2.4.21/drivers/net/qsnet/ep/debug.h
44060 ===================================================================
44061 --- linux-2.4.21.orig/drivers/net/qsnet/ep/debug.h      2004-02-23 16:02:56.000000000 -0500
44062 +++ linux-2.4.21/drivers/net/qsnet/ep/debug.h   2005-06-01 23:12:54.636433480 -0400
44063 @@ -0,0 +1,109 @@
44064 +/*
44065 + *    Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd.
44066 + *    Copyright (c) 2002-2003 by Quadrics Ltd.
44067 + *
44068 + *    For licensing information please see the supplied COPYING file
44069 + *
44070 + */
44071 +
44072 +#ifndef _ELAN3_EPDEBUG_H
44073 +#define _ELAN3_EPDEBUG_H
44074 +
44075 +#ident "$Id: debug.h,v 1.18.2.1 2004/11/12 10:54:50 mike Exp $"
44076 +/*      $Source: /cvs/master/quadrics/epmod/debug.h,v $ */
44077 +
44078 +extern unsigned int epdebug;
44079 +extern unsigned int epdebug_console;
44080 +extern unsigned int epdebug_cmlevel;
44081 +#if ! defined(CONFIG_EP_NO_CHECK_SUM)
44082 +extern unsigned int epdebug_check_sum;
44083 +#endif
44084 +#define DBG_CONFIG             0x00000001                      /* Module configuration */
44085 +#define DBG_PROBE              0x00000002
44086 +#define DBG_ROUTETABLE         0x00000004
44087 +#define DBG_STATEMAP           0x00000008
44088 +
44089 +#define DBG_CM                 0x00000020
44090 +#define DBG_XMTR               0x00000040
44091 +#define DBG_RCVR               0x00000080
44092 +#define DBG_FORWARD            0x00000100
44093 +#define DBG_DISCON             0x00000200
44094 +#define DBG_EPTRAP             0x00000400
44095 +#define DBG_COMMAND            0x00000800
44096 +#define DBG_RETRY              0x00001000
44097 +#define DBG_DEBUG              0x00002000
44098 +#define DBG_NETWORK_ERROR      0x00004000
44099 +#define DBG_MSGSYS             0x00008000
44100 +#define DBG_MANAGER            0x00010000
44101 +#define DBG_KMAP               0x00020000
44102 +#define DBG_FAILOVER           0x00040000
44103 +#define DBG_MAPNMD             0x00080000
44104 +#define DBG_KMSG               0x00100000
44105 +#define DBG_SVC                 0x00200000
44106 +#define DBG_STABILISE          0x00400000
44107 +
44108 +#if defined(DEBUG_PRINTF)
44109 +
44110 +#  define EPRINTF0(m,fmt)                      ((epdebug&(m)) ? ep_debugf(m,fmt)                     : (void)0)
44111 +#  define EPRINTF1(m,fmt,a)                    ((epdebug&(m)) ? ep_debugf(m,fmt,a)                   : (void)0)
44112 +#  define EPRINTF2(m,fmt,a,b)                  ((epdebug&(m)) ? ep_debugf(m,fmt,a,b)                 : (void)0)
44113 +#  define EPRINTF3(m,fmt,a,b,c)                        ((epdebug&(m)) ? ep_debugf(m,fmt,a,b,c)               : (void)0)
44114 +#  define EPRINTF4(m,fmt,a,b,c,d)              ((epdebug&(m)) ? ep_debugf(m,fmt,a,b,c,d)             : (void)0)
44115 +#  define EPRINTF5(m,fmt,a,b,c,d,e)            ((epdebug&(m)) ? ep_debugf(m,fmt,a,b,c,d,e)           : (void)0)
44116 +#  define EPRINTF6(m,fmt,a,b,c,d,e,f)          ((epdebug&(m)) ? ep_debugf(m,fmt,a,b,c,d,e,f)         : (void)0)
44117 +#  define EPRINTF7(m,fmt,a,b,c,d,e,f,g)                ((epdebug&(m)) ? ep_debugf(m,fmt,a,b,c,d,e,f,g)       : (void)0)
44118 +#  define EPRINTF8(m,fmt,a,b,c,d,e,f,g,h)      ((epdebug&(m)) ? ep_debugf(m,fmt,a,b,c,d,e,f,g,h)     : (void)0)
44119 +#  define EPRINTF9(m,fmt,a,b,c,d,e,f,g,h,i)    ((epdebug&(m)) ? ep_debugf(m,fmt,a,b,c,d,e,f,g,h,i)   : (void)0)
44120 +#  define EPRINTF10(m,fmt,a,b,c,d,e,f,g,h,i,j) ((epdebug&(m)) ? ep_debugf(m,fmt,a,b,c,d,e,f,g,h,i,j) : (void)0)
44121 +
44122 +#  define CPRINTF0(lvl,fmt)                    (((lvl) <= epdebug_cmlevel) ? EPRINTF0(DBG_CM,fmt)                   : (void)0)
44123 +#  define CPRINTF1(lvl,fmt,a)                  (((lvl) <= epdebug_cmlevel) ? EPRINTF1(DBG_CM,fmt,a)                 : (void)0)
44124 +#  define CPRINTF2(lvl,fmt,a,b)                        (((lvl) <= epdebug_cmlevel) ? EPRINTF2(DBG_CM,fmt,a,b)               : (void)0)
44125 +#  define CPRINTF3(lvl,fmt,a,b,c)              (((lvl) <= epdebug_cmlevel) ? EPRINTF3(DBG_CM,fmt,a,b,c)             : (void)0)
44126 +#  define CPRINTF4(lvl,fmt,a,b,c,d)            (((lvl) <= epdebug_cmlevel) ? EPRINTF4(DBG_CM,fmt,a,b,c,d)           : (void)0)
44127 +#  define CPRINTF5(lvl,fmt,a,b,c,d,e)          (((lvl) <= epdebug_cmlevel) ? EPRINTF5(DBG_CM,fmt,a,b,c,d,e)         : (void)0)
44128 +#  define CPRINTF6(lvl,fmt,a,b,c,d,e,f)                (((lvl) <= epdebug_cmlevel) ? EPRINTF6(DBG_CM,fmt,a,b,c,d,e,f)       : (void)0)
44129 +#  define CPRINTF7(lvl,fmt,a,b,c,d,e,f,g)      (((lvl) <= epdebug_cmlevel) ? EPRINTF7(DBG_CM,fmt,a,b,c,d,e,f,g)     : (void)0)
44130 +#  define CPRINTF8(lvl,fmt,a,b,c,d,e,f,g,h)    (((lvl) <= epdebug_cmlevel) ? EPRINTF8(DBG_CM,fmt,a,b,c,d,e,f,g,h)   : (void)0)
44131 +#  define CPRINTF9(lvl,fmt,a,b,c,d,e,f,g,h,i)  (((lvl) <= epdebug_cmlevel) ? EPRINTF9(DBG_CM,fmt,a,b,c,d,e,f,g,h,i) : (void)0)
44132 +
44133 +#if defined __GNUC__
44134 +extern void ep_debugf (long mode, char *fmt, ...) __attribute__ ((format (printf,2,3)));
44135 +#else
44136 +extern void ep_debugf (long mode, char *fmt, ...);
44137 +#endif
44138 +
44139 +#else
44140 +
44141 +#  define EPRINTF0(m,fmt)                      (0)
44142 +#  define EPRINTF1(m,fmt,a)                    (0)
44143 +#  define EPRINTF2(m,fmt,a,b)                  (0)
44144 +#  define EPRINTF3(m,fmt,a,b,c)                        (0)
44145 +#  define EPRINTF4(m,fmt,a,b,c,d)              (0)
44146 +#  define EPRINTF5(m,fmt,a,b,c,d,e)            (0)
44147 +#  define EPRINTF6(m,fmt,a,b,c,d,e,f)          (0)
44148 +#  define EPRINTF7(m,fmt,a,b,c,d,e,f,g)                (0)
44149 +#  define EPRINTF8(m,fmt,a,b,c,d,e,f,g,h)      (0)
44150 +#  define EPRINTF9(m,fmt,a,b,c,d,e,f,g,h,i)    (0)
44151 +#  define EPRINTF9(m,fmt,a,b,c,d,e,f,g,h,i,j)  (0)
44152 +
44153 +#  define CPRINTF0(lvl,fmt)                    (0)
44154 +#  define CPRINTF1(lvl,fmt,a)                  (0)
44155 +#  define CPRINTF2(lvl,fmt,a,b)                        (0)
44156 +#  define CPRINTF3(lvl,fmt,a,b,c)              (0)
44157 +#  define CPRINTF4(lvl,fmt,a,b,c,d)            (0)
44158 +#  define CPRINTF5(lvl,fmt,a,b,c,d,e)          (0)
44159 +#  define CPRINTF6(lvl,fmt,a,b,c,d,e,f)                (0)
44160 +#  define CPRINTF7(lvl,fmt,a,b,c,d,e,f,g)      (0)
44161 +#  define CPRINTF8(lvl,fmt,a,b,c,d,e,f,g,h)    (0)
44162 +#  define CPRINTF9(lvl,fmt,a,b,c,d,e,f,g,h,i)  (0)
44163 +
44164 +#endif /* DEBUG */
44165 +
44166 +/*
44167 + * Local variables:
44168 + * c-file-style: "stroustrup"
44169 + * End:
44170 + */
44171 +#endif /* _ELAN3_EPDEBUG_H */
44172 +
44173 Index: linux-2.4.21/drivers/net/qsnet/ep/epcomms_asm_elan4_thread.S
44174 ===================================================================
44175 --- linux-2.4.21.orig/drivers/net/qsnet/ep/epcomms_asm_elan4_thread.S   2004-02-23 16:02:56.000000000 -0500
44176 +++ linux-2.4.21/drivers/net/qsnet/ep/epcomms_asm_elan4_thread.S        2005-06-01 23:12:54.637433328 -0400
44177 @@ -0,0 +1,133 @@
44178 +/*
44179 + *    Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd.
44180 + *    Copyright (c) 2002-2003 by Quadrics Ltd.
44181 + *
44182 + *    For licensing information please see the supplied COPYING file
44183 + *
44184 + */
44185 +
44186 +#ident "@(#)$Id: epcomms_asm_elan4_thread.S,v 1.5 2004/04/25 11:25:43 david Exp $ $Name: QSNETMODULES-4-30_20050128 $"
44187 +/*      $Source: /cvs/master/quadrics/epmod/epcomms_asm_elan4_thread.S,v $*/
44188 +
44189 +#include <elan4/events.h>
44190 +#include <elan4/commands.h>
44191 +
44192 +#include "assym_elan4.h"
44193 +
44194 +/* XXXXX - registers.h */
44195 +#define E4_MAIN_INT_SHIFT              14
44196 +
44197 +/*
44198 + * c_waitevent_interrupt (E4_uint64 *commandport, E4_Event *event, E4_uint64 count, E4_uint64 intcookie)
44199 + */
44200 +       .global c_waitevent_interrupt
44201 +c_waitevent_interrupt:
44202 +       add             %sp, -192, %sp
44203 +       st64            %r16, [%sp + 64]                // preserve call preserved registers
44204 +       st64            %r24, [%sp + 128]               // - see CALL_USED_REGISTERS.
44205 +       mov             %r16,%r16                       // BUG FIX: E4 RevA
44206 +       mov             %r24,%r24                       // BUG FIX: E4 RevA
44207 +       nop                                             // BUG FIX: E4 RevA
44208 +       nop                                             // BUG FIX: E4 RevA
44209 +
44210 +       mov             %r7, %r18                       // (%r2) return pc
44211 +1:     call            2f
44212 +        mov            %sp, %r17                       // (%r1) SP
44213 +2:     add             %r7, (3f-1b), %r16              // (%r0) PC
44214 +       st32            %r16, [%sp]                     // event source block
44215 +       mov             MAKE_EXT_CLEAN_CMD, %r23
44216 +       st8             %r23, [%sp+56]                  // event source block
44217 +       mov             %r16,%r16                       // BUG FIX: E4 RevA
44218 +       mov             %r23,%r23                       // BUG FIX: E4 RevA
44219 +       nop                                             // BUG FIX: E4 RevA
44220 +       nop                                             // BUG FIX: E4 RevA
44221 +       
44222 +       or              %r9, WAIT_EVENT_CMD, %r16                                               ! WAIT_EVENT_CMD | event
44223 +       sll8            %r10, 32, %r17
44224 +       or              %r17, E4_EVENT_TYPE_VALUE(E4_EVENT_COPY, E4_EVENT_DTYPE_LONG, 8), %r17  !   ev_CountAndType
44225 +       mov             %sp, %r18                                                               !   ev_Source
44226 +       mov             %r8, %r19                                                               !   ev_Dest
44227 +       sll8            %r11, E4_MAIN_INT_SHIFT, %r20
44228 +       or              %r20, INTERRUPT_CMD, %r20                                               ! INTERRUPT_CMD | (cookie << E4_MAIN_INT_SHIFT)
44229 +       mov             NOP_CMD, %r21
44230 +       mov             NOP_CMD, %r22
44231 +       mov             NOP_CMD, %r23
44232 +
44233 +       st64suspend     %r16, [%r8]
44234 +       
44235 +3:     ld64            [%sp + 64], %r16                // restore call preserved register
44236 +       ld64            [%sp + 128], %r24
44237 +       jmpl            %r2+8, %r0                      // and return
44238 +        add            %sp, 192, %sp
44239 +
44240 +
44241 +#define EP4_RCVR_PENDING_STALLED               1               /* indicates thread has stalled for no descriptor (rcvr_pending_head) */
44242 +
44243 +#define RXD_DEBUG(VAL,RXD,TMP) \
44244 +       mov     VAL, TMP; \
44245 +       st8     TMP, [RXD + EP4_RXD_DEBUG]
44246 +
44247 +       
44248 +       /*
44249 +        * %r2  - rcvr elan
44250 +        * %r3  - rxd elan
44251 +        */
44252 +       .global c_queue_rxd
44253 +c_queue_rxd:
44254 +       RXD_DEBUG(1, %r3, %r23)
44255 +       
44256 +       ld16    [%r2 + EP4_RCVR_PENDING_TAILP], %r18    /* r18 == tailp, r19 = head */
44257 +       add     %r3, EP4_RXD_NEXT, %r4
44258 +       
44259 +       st8     %r0, [%r3 + EP4_RXD_NEXT]               /* rxd->rxd_next = NULL */
44260 +       st8     %r4, [%r2 + EP4_RCVR_PENDING_TAILP]     /* tailp = &rxd->rxd_next */
44261 +       st8     %r3, [%r18]                             /* *tailp = rxd */
44262 +
44263 +       cmp     %r19, EP4_RCVR_PENDING_STALLED          /* thread stalled ? */
44264 +       beq     1f
44265 +        mov    %r18, %r16                              /* must have used %r16, %r19, %r23 */
44266 +       mov     %r3, %r23
44267 +
44268 +       RXD_DEBUG(2, %r3, %r23)
44269 +       
44270 +       st8suspend %r16, [%r3 + EP4_RXD_QUEUED]         /* no - mark as queued - all done */
44271 +
44272 +1:     st8     %r16, [%r3 + EP4_RXD_QUEUED]            /* mark as queued */
44273 +
44274 +       RXD_DEBUG(3, %r3, %r23)
44275 +
44276 +       mov     %r3, %r8                                /* return rxd from c_stall_thread */
44277 +       ba      .epcomms_resume_thread                  /* resume the thread */
44278 +        ld64   [%r2 + EP4_RCVR_THREAD_STALL], %r0
44279 +
44280 +       /*
44281 +        *  c_stall_thread (EP4_RCVR_ELAN *rcvrElan)
44282 +        */
44283 +       .global c_stall_thread
44284 +c_stall_thread:
44285 +       add             %sp, -192, %sp
44286 +       st64            %r16, [%sp + 64]                // preserve call preserved registers
44287 +       st64            %r24, [%sp + 128]               // - see CALL_USED_REGISTERS.
44288 +       mov             %r16,%r16                       // BUG FIX: E4 RevA
44289 +       mov             %r24,%r24                       // BUG FIX: E4 RevA
44290 +       nop                                             // BUG FIX: E4 RevA
44291 +       nop                                             // BUG FIX: E4 RevA
44292 +
44293 +       mov             EP4_RCVR_PENDING_STALLED, %r9   // Mark rcvr as stalled
44294 +       st8             %r9, [%r8 + EP4_RCVR_PENDING_HEAD]
44295 +
44296 +       // XXXX _ TBD should generate interrupt
44297 +
44298 +       mov             %r1, %r17                       // SP 
44299 +       mov             %r7, %r23                       // return pc
44300 +
44301 +       st64suspend     %r16, [%r8 + EP4_RCVR_THREAD_STALL]
44302 +       
44303 +.epcomms_resume_thread:
44304 +       /* %r8 == rxdElan */
44305 +       
44306 +       ld64            [%sp + 64], %r16                // restore call preserved register
44307 +       ld64            [%sp + 128], %r24
44308 +       jmpl            %r7+8, %r0                      // and return
44309 +        add            %sp, 192, %sp
44310 +
44311 Index: linux-2.4.21/drivers/net/qsnet/ep/epcomms.c
44312 ===================================================================
44313 --- linux-2.4.21.orig/drivers/net/qsnet/ep/epcomms.c    2004-02-23 16:02:56.000000000 -0500
44314 +++ linux-2.4.21/drivers/net/qsnet/ep/epcomms.c 2005-06-01 23:12:54.637433328 -0400
44315 @@ -0,0 +1,484 @@
44316 +/*
44317 + *    Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd.
44318 + *    Copyright (c) 2002-2003 by Quadrics Ltd.
44319 + *
44320 + *    For licensing information please see the supplied COPYING file
44321 + *
44322 + */
44323 +
44324 +#ident "@(#)$Id: epcomms.c,v 1.71.2.6 2004/11/30 12:02:16 mike Exp $"
44325 +/*      $Source: /cvs/master/quadrics/epmod/epcomms.c,v $ */
44326 +
44327 +#include <qsnet/kernel.h>
44328 +#include <qsnet/kthread.h>
44329 +#include <qsnet/autoconf.h>
44330 +
44331 +#include <elan/kcomm.h>
44332 +#include <elan/epsvc.h>
44333 +#include <elan/epcomms.h>
44334 +#include "cm.h"
44335 +#include "debug.h"
44336 +
44337 +static void
44338 +ep_comms_thread (void *arg)
44339 +{
44340 +    EP_COMMS_SUBSYS  *subsys = (EP_COMMS_SUBSYS *) arg;
44341 +    struct list_head *el;
44342 +
44343 +    kernel_thread_init ("ep_comms");
44344 +
44345 +    /* since ep_alloc_xmtr() has incremented the module use count,
44346 +     * we would be preventing the module from being unloaded, so
44347 +     * we decrement the use count since this thread must terminate
44348 +     * during unload of the module.
44349 +     */
44350 +    ep_mod_dec_usecount();
44351 +
44352 +    for (;;)
44353 +    {
44354 +       long nextRunTime = 0;
44355 +
44356 +       /* NOTE - subsys->Lock serializes us against flush/relocations
44357 +        *        caused by rail nodeset transitions.
44358 +        */
44359 +       kmutex_lock (&subsys->Lock);
44360 +       list_for_each (el, &subsys->Transmitters) {
44361 +           nextRunTime = ep_check_xmtr (list_entry (el, EP_XMTR, Link), nextRunTime);
44362 +       }
44363 +
44364 +       list_for_each (el, &subsys->Receivers) {
44365 +           nextRunTime = ep_check_rcvr (list_entry (el, EP_RCVR, Link), nextRunTime);
44366 +       }
44367 +       kmutex_unlock (&subsys->Lock);
44368 +
44369 +#if ! defined(CONFIG_EP_NO_CHECK_SUM)
44370 +       ep_csum_rxds (subsys);  
44371 +#endif
44372 +       nextRunTime = ep_forward_rxds (subsys, nextRunTime);
44373 +
44374 +       if (ep_kthread_sleep (&subsys->Thread, nextRunTime) < 0)
44375 +           break;
44376 +    }
44377 +
44378 +    ep_mod_inc_usecount();
44379 +
44380 +    ep_kthread_stopped (&subsys->Thread);
44381 +    kernel_thread_exit();
44382 +}
44383 +
44384 +int
44385 +ep_comms_add_rail (EP_SUBSYS *s, EP_SYS *sys, EP_RAIL *rail)
44386 +{
44387 +    EP_COMMS_SUBSYS  *subsys = (EP_COMMS_SUBSYS *) s;
44388 +    EP_COMMS_RAIL    *commsRail;
44389 +    struct list_head *el;
44390 +
44391 +    printk ("%s: vendorid=%x deviceid=%x\n", rail->Name, rail->Devinfo.dev_vendor_id, rail->Devinfo.dev_device_id);
44392 +
44393 +    switch (rail->Devinfo.dev_device_id)
44394 +    {
44395 +#if defined(CONFIG_ELAN3) || defined(CONFIG_ELAN3_MODULE)
44396 +    case PCI_DEVICE_ID_ELAN3:
44397 +       commsRail = ep3comms_add_rail (s, sys, rail);
44398 +       break;
44399 +#endif
44400 +#if defined(CONFIG_ELAN4) || defined(CONFIG_ELAN4_MODULE)
44401 +    case PCI_DEVICE_ID_ELAN4:
44402 +       commsRail = ep4comms_add_rail (s, sys, rail);
44403 +       break;
44404 +#endif
44405 +    default:
44406 +       return 0;
44407 +    }
44408 +
44409 +    if (commsRail == NULL)
44410 +       return 1;
44411 +
44412 +    commsRail->Rail   = rail;
44413 +    commsRail->Subsys = subsys;
44414 +
44415 +    kmutex_lock (&subsys->Lock);
44416 +    list_add_tail (&commsRail->Link, &subsys->Rails);
44417 +    
44418 +    list_for_each (el, &subsys->Receivers) {
44419 +       EP_RCVR *rcvr = list_entry (el, EP_RCVR, Link);
44420 +
44421 +       EP_RAIL_OP (commsRail, Rcvr.AddRail) (rcvr, commsRail);
44422 +    }
44423 +       
44424 +    list_for_each (el, &subsys->Transmitters) {
44425 +       EP_XMTR *xmtr = list_entry (el, EP_XMTR, Link);
44426 +
44427 +       EP_RAIL_OP (commsRail, Xmtr.AddRail) (xmtr, commsRail);
44428 +    }
44429 +
44430 +    kmutex_unlock (&subsys->Lock);
44431 +
44432 +    return 0;
44433 +}
44434 +
44435 +void
44436 +ep_comms_del_rail (EP_SUBSYS *s, EP_SYS *sys, EP_RAIL *rail)
44437 +{
44438 +    EP_COMMS_SUBSYS  *subsys    = (EP_COMMS_SUBSYS *) s;
44439 +    EP_COMMS_RAIL    *commsRail = NULL;
44440 +    struct list_head *el;
44441 +
44442 +    kmutex_lock (&subsys->Lock);
44443 +    /* find out rail entry and remove from system list */
44444 +    list_for_each (el, &subsys->Rails) {
44445 +       if ((commsRail = list_entry (el, EP_COMMS_RAIL, Link))->Rail == rail)
44446 +           break;
44447 +    }
44448 +
44449 +    list_del (&commsRail->Link);
44450 +    
44451 +    list_for_each (el, &subsys->Receivers) {
44452 +       EP_RCVR *rcvr = list_entry (el, EP_RCVR, Link);
44453 +
44454 +       EP_RAIL_OP(commsRail, Rcvr.DelRail) (rcvr, commsRail);
44455 +    }
44456 +       
44457 +    list_for_each (el, &subsys->Transmitters) {
44458 +       EP_XMTR *xmtr = list_entry (el, EP_XMTR, Link);
44459 +
44460 +       EP_RAIL_OP(commsRail,Xmtr.DelRail) (xmtr, commsRail);
44461 +    }
44462 +
44463 +    kmutex_unlock (&subsys->Lock);
44464 +
44465 +    EP_RAIL_OP (commsRail, DelRail) (commsRail);
44466 +}
44467 +
44468 +void
44469 +ep_comms_fini (EP_SUBSYS *s, EP_SYS *sys)
44470 +{
44471 +    EP_COMMS_SUBSYS *subsys = (EP_COMMS_SUBSYS *) s;
44472 +
44473 +    ep_kthread_stop (&subsys->Thread);
44474 +    ep_kthread_destroy (&subsys->Thread);
44475 +
44476 +    if (subsys->ForwardXmtr)
44477 +       ep_free_xmtr (subsys->ForwardXmtr);
44478 +
44479 +    spin_lock_destroy (&subsys->ForwardDescLock);
44480 +
44481 +#if ! defined(CONFIG_EP_NO_CHECK_SUM)
44482 +    spin_lock_destroy (&subsys->CheckSumDescLock);
44483 +#endif
44484 +
44485 +    kmutex_destroy (&subsys->Lock);
44486 +
44487 +    KMEM_FREE (subsys, sizeof (EP_COMMS_SUBSYS));
44488 +}
44489 +
44490 +int
44491 +ep_comms_init (EP_SYS *sys)
44492 +{
44493 +    EP_COMMS_SUBSYS *subsys;
44494 +
44495 +    KMEM_ZALLOC (subsys, EP_COMMS_SUBSYS *, sizeof (EP_COMMS_SUBSYS), 1);
44496 +
44497 +    if (subsys == NULL)
44498 +       return (ENOMEM);
44499 +
44500 +    INIT_LIST_HEAD (&subsys->Rails);
44501 +    INIT_LIST_HEAD (&subsys->Receivers);
44502 +    INIT_LIST_HEAD (&subsys->Transmitters);
44503 +    INIT_LIST_HEAD (&subsys->ForwardDescList);
44504 +
44505 +    kmutex_init (&subsys->Lock);
44506 +    spin_lock_init (&subsys->ForwardDescLock);
44507 +
44508 +#if ! defined(CONFIG_EP_NO_CHECK_SUM)
44509 +    INIT_LIST_HEAD (&subsys->CheckSumDescList);
44510 +    spin_lock_init (&subsys->CheckSumDescLock);
44511 +#endif
44512 +
44513 +    subsys->Subsys.Sys        = sys;
44514 +    subsys->Subsys.Name              = "epcomms";
44515 +    subsys->Subsys.Destroy    = ep_comms_fini;
44516 +    subsys->Subsys.AddRail    = ep_comms_add_rail;
44517 +    subsys->Subsys.RemoveRail = ep_comms_del_rail;
44518 +
44519 +    ep_subsys_add (sys, &subsys->Subsys);
44520 +    ep_kthread_init (&subsys->Thread);
44521 +
44522 +    if ((subsys->ForwardXmtr = ep_alloc_xmtr (subsys->Subsys.Sys)) == NULL)
44523 +       goto failed;
44524 +
44525 +    if (kernel_thread_create (ep_comms_thread, subsys) == NULL)
44526 +       goto failed;
44527 +    ep_kthread_started (&subsys->Thread);
44528 +
44529 +    return (0);
44530 +
44531 + failed:
44532 +    ep_subsys_del (sys, &subsys->Subsys);
44533 +    ep_comms_fini (&subsys->Subsys, sys);
44534 +
44535 +    return (ENOMEM);
44536 +}
44537 +
44538 +void
44539 +ep_comms_display (EP_SYS *sys, char *how)
44540 +{
44541 +    EP_COMMS_SUBSYS  *subsys = (EP_COMMS_SUBSYS *) ep_subsys_find (sys, EPCOMMS_SUBSYS_NAME);
44542 +    struct list_head *el;
44543 +
44544 +    if (how == NULL || !strncmp (how, "rail", 4))
44545 +    {
44546 +       kmutex_lock (&subsys->Lock);
44547 +       list_for_each (el, &subsys->Rails) {
44548 +           EP_COMMS_RAIL *commsRail = list_entry (el, EP_COMMS_RAIL, Link);
44549 +
44550 +           EP_RAIL_OP(commsRail, DisplayRail) (commsRail);
44551 +       }
44552 +       kmutex_unlock (&subsys->Lock);
44553 +    }
44554 +           
44555 +    if (how == NULL || !strncmp (how, "xmtr", 4))
44556 +       list_for_each (el, &subsys->Transmitters)
44557 +           ep_display_xmtr (&di_ep_debug, list_entry (el, EP_XMTR, Link));
44558 +
44559 +    if (how == NULL || !strncmp (how, "rcvr", 4)) 
44560 +       list_for_each (el, &subsys->Receivers)
44561 +           ep_display_rcvr (&di_ep_debug, list_entry (el, EP_RCVR, Link), (how && how[4] == ',') ? 1 : 0);
44562 +}
44563 +
44564 +int
44565 +ep_svc_indicator_set (EP_SYS *epsys, int svc_indicator) 
44566 +{
44567 +    EP_COMMS_SUBSYS  *subsys;
44568 +    struct list_head *el;
44569 +
44570 +    EPRINTF1 (DBG_SVC,"ep_svc_indicator_set: %d \n",svc_indicator);
44571 +
44572 +    if (svc_indicator < 0 || svc_indicator > EP_SVC_NUM_INDICATORS)
44573 +       return (EP_EINVAL);
44574 +
44575 +    if ((subsys = (EP_COMMS_SUBSYS *) ep_subsys_find (epsys, "epcomms")) == NULL) {
44576 +       EPRINTF0 (DBG_SVC,"ep_svc_indicator_set: ep_subsys_find failed\n");
44577 +       return (EP_EINVAL);
44578 +    }
44579 +
44580 +
44581 +    kmutex_lock (&subsys->Lock); /* walking rails list and setting info on Rail */
44582 +    list_for_each (el, &subsys->Rails) { 
44583 +       EP_COMMS_RAIL *commsRail = list_entry (el, EP_COMMS_RAIL, Link);
44584 +
44585 +       cm_svc_indicator_set(commsRail->Rail, svc_indicator);
44586 +    }
44587 +    kmutex_unlock (&subsys->Lock);
44588 +
44589 +    EPRINTF1 (DBG_SVC,"ep_svc_indicator_set: %d success\n",svc_indicator);
44590 +    return (EP_SUCCESS);
44591 +}
44592 +
44593 +int
44594 +ep_svc_indicator_clear (EP_SYS *epsys, int svc_indicator) 
44595 +{
44596 +    EP_COMMS_SUBSYS  *subsys;
44597 +    struct list_head *el;
44598 +
44599 +    EPRINTF1 (DBG_SVC,"ep_svc_indicator_clear: %d \n",svc_indicator);
44600 +
44601 +    if (svc_indicator < 0 || svc_indicator >= EP_SVC_NUM_INDICATORS)
44602 +       return (EP_EINVAL);
44603 +
44604 +    if ((subsys = (EP_COMMS_SUBSYS *) ep_subsys_find (epsys, "epcomms")) == NULL) {
44605 +       EPRINTF0 (DBG_SVC,"ep_svc_indicator_clear: ep_subsys_find failed\n");
44606 +       return (EP_EINVAL);
44607 +    }
44608 +
44609 +    kmutex_lock (&subsys->Lock); /* walking rails list and setting info on Rail */
44610 +    list_for_each (el, &subsys->Rails) { 
44611 +       EP_COMMS_RAIL *commsRail = list_entry (el, EP_COMMS_RAIL, Link);
44612 +
44613 +       cm_svc_indicator_clear(commsRail->Rail, svc_indicator);
44614 +    }
44615 +    kmutex_unlock (&subsys->Lock);
44616 +
44617 +    EPRINTF1 (DBG_SVC,"ep_svc_indicator_clear: %d success\n",svc_indicator);
44618 +    return (EP_SUCCESS);
44619 +}
44620 +
44621 +int 
44622 +ep_svc_indicator_is_set (EP_SYS *epsys, int svc_indicator, int nodeId) 
44623 +{
44624 +    EP_COMMS_SUBSYS  *subsys;
44625 +    struct list_head *el;
44626 +    int               set = 0;
44627 +
44628 +    EPRINTF2 (DBG_SVC,"ep_svc_indicator_is_set: svc %d node %d \n", svc_indicator, nodeId);
44629 +
44630 +    if ((subsys = (EP_COMMS_SUBSYS *) ep_subsys_find (epsys, "epcomms")) == NULL) {
44631 +       EPRINTF0 (DBG_SVC,"ep_svc_indicator_is_set: ep_subsys_find failed\n");
44632 +       return (0);
44633 +    }
44634 +
44635 +    kmutex_lock (&subsys->Lock); /* walking rails list and setting info on Rail */
44636 +    list_for_each (el, &subsys->Rails) { 
44637 +       EP_COMMS_RAIL *commsRail = list_entry (el, EP_COMMS_RAIL, Link);
44638 +
44639 +       set |= cm_svc_indicator_is_set(commsRail->Rail, svc_indicator, nodeId);
44640 +    }
44641 +    kmutex_unlock (&subsys->Lock);
44642 +
44643 +    EPRINTF3 (DBG_SVC,"ep_svc_indicator_is_set: svc %d node %d returning %d\n", svc_indicator, nodeId, set);
44644 +    return set;
44645 +}
44646 +
44647 +int
44648 +ep_svc_indicator_bitmap (EP_SYS *epsys, int svc_indicator, bitmap_t * bitmap, int low, int nnodes) 
44649 +{
44650 +    EP_COMMS_SUBSYS  *subsys;
44651 +    struct list_head *el;
44652 +
44653 +    EPRINTF1 (DBG_SVC,"ep_svc_indicator_bitmap: svc %d\n", svc_indicator);
44654 +
44655 +    if (svc_indicator < 0 || svc_indicator >= EP_SVC_NUM_INDICATORS)
44656 +       return (-1);
44657 +
44658 +    if ((subsys = (EP_COMMS_SUBSYS *) ep_subsys_find (epsys, "epcomms")) == NULL) {
44659 +       EPRINTF0 (DBG_SVC,"ep_svc_indicator_bitmap: ep_subsys_find failed\n");
44660 +       return (-2);
44661 +    }
44662 +
44663 +    /* clear bitmap */
44664 +    bt_zero (bitmap, nnodes);
44665 +
44666 +    kmutex_lock (&subsys->Lock); /* walking rails list and setting info on Rail */
44667 +    list_for_each (el, &subsys->Rails) { 
44668 +       EP_COMMS_RAIL *commsRail = list_entry (el, EP_COMMS_RAIL, Link);
44669 +
44670 +       /* this will or in each bit map */
44671 +       cm_svc_indicator_bitmap (commsRail->Rail, svc_indicator, bitmap, low, nnodes);
44672 +    }
44673 +    kmutex_unlock (&subsys->Lock);
44674 +
44675 +    return (0);
44676 +}
44677 +
44678 +int
44679 +ep_xmtr_svc_indicator_bitmap (EP_XMTR *xmtr, int svc_indicator, bitmap_t * bitmap, int low, int nnodes) 
44680 +{
44681 +    int i;
44682 +
44683 +    EPRINTF1 (DBG_SVC,"ep_xmtr_svc_indicator_bitmap: svc %d\n", svc_indicator);
44684 +
44685 +    if (svc_indicator < 0 || svc_indicator >= EP_SVC_NUM_INDICATORS)
44686 +       return (-1);
44687 +
44688 +    /* clear bitmap */
44689 +    bt_zero (bitmap, nnodes);
44690 +
44691 +    for (i = 0; i < EP_MAX_RAILS; i++)
44692 +    {
44693 +       if (xmtr->RailMask & (1 << i) )
44694 +       {
44695 +           /* this will or in each bit map */
44696 +           cm_svc_indicator_bitmap (xmtr->Rails[i]->CommsRail->Rail, svc_indicator, bitmap, low, nnodes);
44697 +       }
44698 +    }
44699 +
44700 +    return (0);
44701 +}
44702 +
44703 +EP_RAILMASK
44704 +ep_svc_indicator_railmask (EP_SYS *epsys, int svc_indicator, int nodeId)
44705 +{
44706 +    EP_COMMS_SUBSYS  *subsys;
44707 +    struct list_head *el;
44708 +    EP_RAILMASK       rmask=0;
44709 +
44710 +    if ((subsys = (EP_COMMS_SUBSYS *) ep_subsys_find (epsys, "epcomms")) == NULL)
44711 +       return (rmask);
44712 +
44713 +    kmutex_lock (&subsys->Lock); /* walking rails list and reading info from Rail */
44714 +    list_for_each (el, &subsys->Rails) { 
44715 +       EP_COMMS_RAIL *commsRail = list_entry (el, EP_COMMS_RAIL, Link);
44716 +
44717 +       if ( cm_svc_indicator_is_set(commsRail->Rail, svc_indicator,nodeId))
44718 +            rmask |= EP_RAIL2RAILMASK(commsRail->Rail->Number);
44719 +    }
44720 +    kmutex_unlock (&subsys->Lock);
44721 +
44722 +    return (rmask);
44723 +}
44724 +
44725 +EP_RAILMASK
44726 +ep_xmtr_svc_indicator_railmask (EP_XMTR *xmtr, int svc_indicator, int nodeId)
44727 +{
44728 +    EP_RAILMASK    rmask=0;
44729 +    EP_COMMS_RAIL *commsRail;
44730 +    int            i;
44731 +
44732 +    for (i = 0; i < EP_MAX_RAILS; i++)
44733 +    {
44734 +       if (xmtr->RailMask & (1 << i) )
44735 +       {
44736 +           commsRail = xmtr->Rails[i]->CommsRail;
44737 +
44738 +           if ( cm_svc_indicator_is_set(commsRail->Rail, svc_indicator,nodeId))
44739 +               rmask |= EP_RAIL2RAILMASK(commsRail->Rail->Number);
44740 +       }
44741 +    }   
44742 +    
44743 +    EPRINTF3 (DBG_SVC, "ep_xmtr_svc_indicator_railmask: svc %d node %d mask 0x%x\n",  svc_indicator, nodeId, rmask);
44744 +
44745 +    return (rmask);
44746 +}
44747 +
44748 +EP_RAILMASK
44749 +ep_rcvr_railmask (EP_SYS *epsys, EP_SERVICE service)
44750 +{
44751 +    EP_COMMS_SUBSYS  *subsys;
44752 +    EP_RAILMASK       rmask=0;
44753 +    struct list_head *el;
44754 +    
44755 +    if ((subsys = (EP_COMMS_SUBSYS *) ep_subsys_find (epsys, "epcomms")) == NULL)
44756 +       return (rmask);
44757 +    
44758 +    kmutex_lock (&subsys->Lock);
44759 +    list_for_each (el, &subsys->Receivers) {
44760 +       EP_RCVR *rcvr = list_entry (el, EP_RCVR, Link);
44761
44762 +       if (rcvr->Service == service)
44763 +           rmask |= rcvr->RailMask; 
44764 +    }
44765 +    kmutex_unlock(&subsys->Lock);
44766 +
44767 +    return (rmask);
44768 +}
44769 +
44770 +#if ! defined(CONFIG_EP_NO_CHECK_SUM)
44771 +uint32_t
44772 +ep_calc_check_sum (EP_SYS *sys, EP_ENVELOPE *env, EP_NMD *nmd, int nFrags)
44773 +{
44774 +    EP_NMH   *nmh;
44775 +    int       i;
44776 +    uint16_t  check_data = 0;
44777 +    uint16_t  check_env  = 0;
44778 +
44779 +    for (i = 0; i < nFrags; i++) {
44780 +       /* find the nmh for this frag */
44781 +       nmh = ep_nmh_find (&sys->MappingTable, &nmd[i]);
44782 +
44783 +       ASSERT( nmh != NULL);
44784 +
44785 +       /* add the next frag to the check sum */
44786 +       check_data = nmh->nmh_ops->op_calc_check_sum (sys, nmh, &nmd[i], check_data);
44787 +    }
44788 +
44789 +    check_env = rolling_check_sum ((char *) env, offsetof(EP_ENVELOPE, CheckSum), 0);
44790 +
44791 +    return (EP_ENVELOPE_CHECK_SUM | ( (check_env & 0x7FFF) << 16) | (check_data & 0xFFFF));
44792 +}
44793 +#endif
44794 +
44795 +/*
44796 + * Local variables:
44797 + * c-file-style: "stroustrup"
44798 + * End:
44799 + */
44800 Index: linux-2.4.21/drivers/net/qsnet/ep/epcomms_elan3.c
44801 ===================================================================
44802 --- linux-2.4.21.orig/drivers/net/qsnet/ep/epcomms_elan3.c      2004-02-23 16:02:56.000000000 -0500
44803 +++ linux-2.4.21/drivers/net/qsnet/ep/epcomms_elan3.c   2005-06-01 23:12:54.638433176 -0400
44804 @@ -0,0 +1,191 @@
44805 +/*
44806 + *    Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd.
44807 + *    Copyright (c) 2002-2003 by Quadrics Ltd.
44808 + *
44809 + *    For licensing information please see the supplied COPYING file
44810 + *
44811 + */
44812 +
44813 +#ident "@(#)$Id: epcomms_elan3.c,v 1.60 2004/08/03 11:34:34 david Exp $"
44814 +/*      $Source: /cvs/master/quadrics/epmod/epcomms_elan3.c,v $ */
44815 +
44816 +#include <qsnet/kernel.h>
44817 +
44818 +#include <elan/kcomm.h>
44819 +#include <elan/epsvc.h>
44820 +#include <elan/epcomms.h>
44821 +
44822 +#include "kcomm_elan3.h"
44823 +#include "epcomms_elan3.h"
44824 +
44825 +void
44826 +ep3comms_flush_callback (void *arg, statemap_t *map)
44827 +{
44828 +    EP_COMMS_RAIL    *commsRail = (EP_COMMS_RAIL *) arg;
44829 +    EP_COMMS_SUBSYS  *subsys    = commsRail->Subsys;
44830 +    struct list_head *el;
44831 +
44832 +    kmutex_lock (&subsys->Lock);
44833 +    list_for_each (el, &subsys->Transmitters) {
44834 +       EP_XMTR *xmtr = list_entry (el, EP_XMTR, Link);
44835 +       
44836 +       if (xmtr->Rails[commsRail->Rail->Number])
44837 +           ep3xmtr_flush_callback (xmtr, (EP3_XMTR_RAIL *) xmtr->Rails[commsRail->Rail->Number]);
44838 +    }
44839 +
44840 +    list_for_each (el, &subsys->Receivers) {
44841 +       EP_RCVR *rcvr = list_entry (el, EP_RCVR, Link);
44842 +       
44843 +       if (rcvr->Rails[commsRail->Rail->Number])
44844 +           ep3rcvr_flush_callback (rcvr, (EP3_RCVR_RAIL *) rcvr->Rails[commsRail->Rail->Number]);
44845 +    }
44846 +    kmutex_unlock (&subsys->Lock);
44847 +}
44848 +
44849 +void
44850 +ep3comms_failover_callback (void *arg, statemap_t *map)
44851 +{
44852 +    EP_COMMS_RAIL    *commsRail = (EP_COMMS_RAIL *) arg;
44853 +    EP_COMMS_SUBSYS  *subsys    = commsRail->Subsys;
44854 +    struct list_head *el;
44855 +
44856 +    kmutex_lock (&subsys->Lock);
44857 +    list_for_each (el, &subsys->Transmitters) {
44858 +       EP_XMTR *xmtr = list_entry (el, EP_XMTR, Link);
44859 +       
44860 +       if (xmtr->Rails[commsRail->Rail->Number])
44861 +           ep3xmtr_failover_callback (xmtr, (EP3_XMTR_RAIL *) xmtr->Rails[commsRail->Rail->Number]);
44862 +    }
44863 +
44864 +    list_for_each (el, &subsys->Receivers) {
44865 +       EP_RCVR *rcvr = list_entry (el, EP_RCVR, Link);
44866 +       
44867 +       if (rcvr->Rails[commsRail->Rail->Number])
44868 +           ep3rcvr_failover_callback (rcvr, (EP3_RCVR_RAIL *) rcvr->Rails[commsRail->Rail->Number]);
44869 +    }
44870 +    kmutex_unlock (&subsys->Lock);
44871 +}
44872 +
44873 +void
44874 +ep3comms_disconnect_callback (void *arg, statemap_t *map)
44875 +{
44876 +    EP_COMMS_RAIL    *commsRail = (EP_COMMS_RAIL *) arg;
44877 +    EP_COMMS_SUBSYS  *subsys    = commsRail->Subsys;
44878 +    struct list_head *el;
44879 +
44880 +    kmutex_lock (&subsys->Lock);
44881 +    list_for_each (el, &subsys->Transmitters) {
44882 +       EP_XMTR *xmtr = list_entry (el, EP_XMTR, Link);
44883 +       
44884 +       if (xmtr->Rails[commsRail->Rail->Number])
44885 +           ep3xmtr_disconnect_callback (xmtr, (EP3_XMTR_RAIL *) xmtr->Rails[commsRail->Rail->Number]);
44886 +    }
44887 +
44888 +    list_for_each (el, &subsys->Receivers) {
44889 +       EP_RCVR *rcvr = list_entry (el, EP_RCVR, Link);
44890 +       
44891 +       if (rcvr->Rails[commsRail->Rail->Number])
44892 +           ep3rcvr_disconnect_callback (rcvr, (EP3_RCVR_RAIL *) rcvr->Rails[commsRail->Rail->Number]);
44893 +    }
44894 +    kmutex_unlock (&subsys->Lock);
44895 +}
44896 +
44897 +EP_COMMS_RAIL *
44898 +ep3comms_add_rail (EP_SUBSYS *s, EP_SYS *sys, EP_RAIL *r)
44899 +{
44900 +    EP3_RAIL         *rail   = (EP3_RAIL *) r;
44901 +    ELAN3_DEV        *dev    = rail->Device;
44902 +    EP3_COMMS_RAIL   *commsRail;
44903 +    EP3_InputQueue    qdesc;
44904 +    int i;
44905 +
44906 +    KMEM_ZALLOC (commsRail, EP3_COMMS_RAIL *, sizeof (EP3_COMMS_RAIL), TRUE);
44907 +
44908 +    if (commsRail == NULL)
44909 +       return NULL;
44910 +    
44911 +    commsRail->Generic.Ops.DelRail          = ep3comms_del_rail;
44912 +    commsRail->Generic.Ops.DisplayRail      = ep3comms_display_rail;
44913 +    commsRail->Generic.Ops.Rcvr.AddRail     = ep3rcvr_add_rail;
44914 +    commsRail->Generic.Ops.Rcvr.DelRail     = ep3rcvr_del_rail;
44915 +    commsRail->Generic.Ops.Rcvr.Check       = ep3rcvr_check;
44916 +    commsRail->Generic.Ops.Rcvr.QueueRxd    = ep3rcvr_queue_rxd;
44917 +    commsRail->Generic.Ops.Rcvr.RpcPut      = ep3rcvr_rpc_put;
44918 +    commsRail->Generic.Ops.Rcvr.RpcGet      = ep3rcvr_rpc_get;
44919 +    commsRail->Generic.Ops.Rcvr.RpcComplete = ep3rcvr_rpc_complete;
44920 +
44921 +    commsRail->Generic.Ops.Rcvr.StealRxd    = ep3rcvr_steal_rxd;
44922 +
44923 +    commsRail->Generic.Ops.Rcvr.FillOutRailStats = ep3rcvr_fillout_rail_stats;
44924 +
44925 +    commsRail->Generic.Ops.Rcvr.DisplayRcvr = ep3rcvr_display_rcvr;
44926 +    commsRail->Generic.Ops.Rcvr.DisplayRxd  = ep3rcvr_display_rxd;
44927 +
44928 +    commsRail->Generic.Ops.Xmtr.AddRail     = ep3xmtr_add_rail;
44929 +    commsRail->Generic.Ops.Xmtr.DelRail     = ep3xmtr_del_rail;
44930 +    commsRail->Generic.Ops.Xmtr.Check       = ep3xmtr_check;
44931 +    commsRail->Generic.Ops.Xmtr.BindTxd     = ep3xmtr_bind_txd;
44932 +    commsRail->Generic.Ops.Xmtr.UnbindTxd   = ep3xmtr_unbind_txd;
44933 +    commsRail->Generic.Ops.Xmtr.PollTxd     = ep3xmtr_poll_txd;
44934 +    commsRail->Generic.Ops.Xmtr.CheckTxdState = ep3xmtr_check_txd_state;
44935 +
44936 +    commsRail->Generic.Ops.Xmtr.DisplayXmtr = ep3xmtr_display_xmtr;
44937 +    commsRail->Generic.Ops.Xmtr.DisplayTxd  = ep3xmtr_display_txd;
44938 +
44939 +    commsRail->Generic.Ops.Xmtr.FillOutRailStats = ep3xmtr_fillout_rail_stats;
44940 +
44941 +    /* Allocate the input queues at their fixed elan address */
44942 +    if (! (commsRail->QueueDescs = ep_alloc_memory_elan (r, EP_EPCOMMS_QUEUE_BASE, roundup (EP_MSG_NSVC * sizeof (EP3_InputQueue), PAGESIZE), EP_PERM_ALL, 0)))
44943 +    {
44944 +       KMEM_FREE (commsRail, sizeof (EP3_COMMS_RAIL));
44945 +       return NULL;
44946 +    }
44947 +
44948 +    qdesc.q_state          = E3_QUEUE_FULL;
44949 +    qdesc.q_base           = 0;
44950 +    qdesc.q_top            = 0;
44951 +    qdesc.q_fptr           = 0;
44952 +    qdesc.q_bptr           = 0;
44953 +    qdesc.q_size           = 0;
44954 +    qdesc.q_event.ev_Count = 0;
44955 +    qdesc.q_event.ev_Type  = 0;
44956 +
44957 +    /* Initialise all queue entries to be full */
44958 +    for (i = 0; i < EP_MSG_NSVC; i++)
44959 +       elan3_sdram_copyl_to_sdram (dev, &qdesc, commsRail->QueueDescs + (i * sizeof (EP3_InputQueue)), sizeof (EP3_InputQueue));
44960 +
44961 +    ep_register_callback (r, EP_CB_FLUSH_FILTERING, ep3comms_flush_callback,      commsRail);
44962 +    ep_register_callback (r, EP_CB_FLUSH_FLUSHING,  ep3comms_flush_callback,      commsRail);
44963 +    ep_register_callback (r, EP_CB_FAILOVER,        ep3comms_failover_callback,   commsRail);
44964 +    ep_register_callback (r, EP_CB_DISCONNECTING,   ep3comms_disconnect_callback, commsRail);
44965 +
44966 +    return (EP_COMMS_RAIL *) commsRail;
44967 +}
44968 +
44969 +void
44970 +ep3comms_del_rail (EP_COMMS_RAIL *r)
44971 +{
44972 +    EP3_COMMS_RAIL *commsRail = (EP3_COMMS_RAIL *) r;
44973 +    EP_RAIL        *rail      = commsRail->Generic.Rail;
44974 +
44975 +    ep_remove_callback (rail, EP_CB_FLUSH_FILTERING, ep3comms_flush_callback,      commsRail);
44976 +    ep_remove_callback (rail, EP_CB_FLUSH_FLUSHING,  ep3comms_flush_callback,      commsRail);
44977 +    ep_remove_callback (rail, EP_CB_FAILOVER,        ep3comms_failover_callback,   commsRail);
44978 +    ep_remove_callback (rail, EP_CB_DISCONNECTING,   ep3comms_disconnect_callback, commsRail);
44979 +
44980 +    ep_free_memory_elan (rail, EP_EPCOMMS_QUEUE_BASE);
44981 +
44982 +    KMEM_FREE (commsRail, sizeof (EP3_COMMS_RAIL));
44983 +}
44984 +
44985 +void
44986 +ep3comms_display_rail (EP_COMMS_RAIL *r)
44987 +{
44988 +    
44989 +}
44990 +
44991 +/*
44992 + * Local variables:
44993 + * c-file-style: "stroustrup"
44994 + * End:
44995 + */
44996 Index: linux-2.4.21/drivers/net/qsnet/ep/epcomms_elan3.h
44997 ===================================================================
44998 --- linux-2.4.21.orig/drivers/net/qsnet/ep/epcomms_elan3.h      2004-02-23 16:02:56.000000000 -0500
44999 +++ linux-2.4.21/drivers/net/qsnet/ep/epcomms_elan3.h   2005-06-01 23:12:54.639433024 -0400
45000 @@ -0,0 +1,330 @@
45001 +/*
45002 + *    Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd.
45003 + *    Copyright (c) 2002-2003 by Quadrics Ltd.
45004 + *
45005 + *    For licensing information please see the supplied COPYING file
45006 + *
45007 + */
45008 +
45009 +#ifndef __EPCOMMS_ELAN3_H
45010 +#define __EPCOMMS_ELAN3_H
45011 +
45012 +#ident "@(#)$Id: epcomms_elan3.h,v 1.27.2.1 2004/11/12 10:54:51 mike Exp $"
45013 +/*      $Source: /cvs/master/quadrics/epmod/epcomms_elan3.h,v $ */
45014 +
45015 +#define EP3_DMAFAILCOUNT               3
45016 +
45017 +
45018 +/* Main/Elan spinlock */
45019 +typedef struct ep3_spinlock_elan 
45020 +{
45021 +    volatile E3_uint32 sl_lock;                /* main wants a lock */
45022 +    volatile E3_uint32 sl_seq;                 /* thread owns this word */
45023 +    /* NOTE: The lock/seq words must be within the same 32-byte Elan cache-line */
45024 +    E3_uint64          sl_pad[14];             /* pad to 64-bytes */
45025 +} EP3_SPINLOCK_ELAN;
45026 +
45027 +/* Declare this as a main memory cache block for efficiency */
45028 +typedef struct ep3_spinlock_main {
45029 +    volatile E3_uint32 sl_seq;                 /* copy of seq number updated by Elan */
45030 +    volatile E3_uint32 sl_pad[15];             /* pad to 64-bytes */
45031 +} EP3_SPINLOCK_MAIN;
45032 +
45033 +#if defined (__ELAN3__)
45034 +
45035 +extern void ep3_spinblock (EP3_SPINLOCK_ELAN *, EP3_SPINLOCK_MAIN *);
45036 +
45037 +#define EP3_SPINENTER(SLE,SL) \
45038 +do {\
45039 +       (SLE)->sl_seq++; \
45040 +       if ((SLE)->sl_lock) \
45041 +               ep3_spinblock(SLE, SL);\
45042 +} while (0)
45043 +
45044 +#define EP3_SPINEXIT(SLE,SL) \
45045 +do {\
45046 +       (SL)->sl_seq = (SLE)->sl_seq;\
45047 +} while (0)
45048 +
45049 +#else
45050 +
45051 +#define EP3_SPINENTER(DEV,SLE,SL) do { \
45052 +    E3_uint32 seq; \
45053 +\
45054 +    mb();\
45055 +    elan3_sdram_writel (DEV, (SLE) + offsetof (EP3_SPINLOCK_ELAN, sl_lock), 1);\
45056 +    mb();\
45057 +    seq = elan3_sdram_readl (DEV, (SLE) + offsetof (EP3_SPINLOCK_ELAN, sl_seq));\
45058 +    while (seq != (SL)->sl_seq)\
45059 +    {\
45060 +       while ((SL)->sl_seq == (seq - 1))\
45061 +       {\
45062 +           mb();\
45063 +\
45064 +           DELAY (1); \
45065 +       }\
45066 +       seq = elan3_sdram_readl (DEV, (SLE) + offsetof (EP3_SPINLOCK_ELAN, sl_seq));\
45067 +    }\
45068 +} while (0)
45069 +
45070 +#define EP3_SPINEXIT(DEV,SLE,SL) do { \
45071 +       wmb(); \
45072 +       elan3_sdram_writel (DEV, (SLE) + offsetof (EP3_SPINLOCK_ELAN, sl_lock), 0);\
45073 +       mmiob(); \
45074 +} while (0)
45075 +
45076 +#endif /* ! __ELAN3__ */
45077 +
45078 +/* per-rail elan memory portion receive descriptor */
45079 +typedef struct ep3_rxd_rail_elan
45080 +{
45081 +    E3_DMA             Dmas[EP_MAXFRAG+1];                     /* Dma's for fetching data/putting data & status blk */
45082 +    E3_Event           ChainEvent[EP_MAXFRAG];                 /* Events to chain dmas */
45083 +    E3_BlockCopyEvent  DataEvent;                              /* message received block event */
45084 +    E3_BlockCopyEvent  DoneEvent;                              /* RPC status block event */
45085 +    
45086 +    EP_NMD             Data;                                   /* Network mapping handle for receive data */
45087 +
45088 +    E3_Addr            RxdMain;                                /* pointer to main memory portion */
45089 +
45090 +    E3_Addr            Next;                                   /* linked list when on pending list (elan address) */
45091 +
45092 +    E3_uint64          MainAddr;                               /* kernel address of ep_rxd_main */
45093 +} EP3_RXD_RAIL_ELAN;
45094 +
45095 +#define EP3_RXD_RAIL_ELAN_SIZE roundup (sizeof (EP3_RXD_RAIL_ELAN), E3_DMA_ALIGN)
45096 +
45097 +/* per-rail main memory portion of receive descriptor */
45098 +typedef struct ep3_rxd_rail_main
45099 +{
45100 +    E3_uint32          DataEvent;                              /* dest for done event */
45101 +    E3_uint32          DoneEvent;                              /* dest for done event */
45102 +} EP3_RXD_RAIL_MAIN;
45103 +
45104 +#define EP3_RXD_RAIL_MAIN_SIZE roundup (sizeof(EP3_RXD_RAIL_MAIN), sizeof (E3_uint32))
45105 +
45106 +#if !defined(__ELAN3__)
45107 +/* Kernel memory portion of per-rail receive descriptor */
45108 +typedef struct ep3_rxd_rail
45109 +{
45110 +    EP_RXD_RAIL                Generic;                                /* generic rxd rail */
45111 +
45112 +    EP3_COOKIE         DataCookie;                             /* Event cookie */
45113 +    EP3_COOKIE         DoneCookie;                             /* Event cookie */
45114 +    EP3_COOKIE         ChainCookie[EP_MAXFRAG];                /* Event cookie */
45115 +
45116 +    sdramaddr_t                RxdElan;                                /* per-rail elan receive descriptor */
45117 +    E3_Addr            RxdElanAddr;                            /*   and elan address */
45118 +
45119 +    EP3_RXD_RAIL_MAIN  *RxdMain;                               /* per-rail main receive descriptor */
45120 +    E3_Addr            RxdMainAddr;                            /*   and elan address */
45121 +
45122 +    EP_BACKOFF         Backoff;                                /* dma backoff */
45123 +} EP3_RXD_RAIL;
45124 +
45125 +#define EP3_NUM_RXD_PER_BLOCK  16
45126 +
45127 +typedef struct ep3_rxd_rail_block
45128 +{
45129 +    struct list_head   Link;
45130 +    
45131 +    EP3_RXD_RAIL        Rxd[EP3_NUM_RXD_PER_BLOCK];
45132 +} EP3_RXD_RAIL_BLOCK;
45133 +
45134 +#endif /* ! __ELAN3__ */
45135 +
45136 +typedef struct ep3_rcvr_rail_elan                              /* Elan memory service structure */
45137 +{
45138 +    EP3_SPINLOCK_ELAN  ThreadLock;                             /* elan memory portion of spin lock */
45139 +    EP3_SPINLOCK_ELAN  PendingLock;                            /* spin lock for pending rx list */
45140 +
45141 +    E3_Addr           PendingDescs;                            /* list of pending receive descriptors */
45142 +    E3_uint32          ThreadShouldHalt;                        /* marks that the thread should halt */
45143 +
45144 +    E3_uint64         MainAddr;                                /* kernel address of ep_rcvr (for StallThreadForNoDescs)*/
45145 +} EP3_RCVR_RAIL_ELAN;
45146 +
45147 +typedef struct ep3_rcvr_rail_main                              /* Main memory service strucure */
45148 +{
45149 +    EP3_SPINLOCK_MAIN  ThreadLock;                             /* main memory portion of spin lock */
45150 +    EP3_SPINLOCK_MAIN  PendingLock;                            /* spinlock for pending rx list */
45151 +
45152 +    volatile unsigned   PendingDescsTailp;                     /* next pointer of last receive descriptor on pending list */
45153 +} EP3_RCVR_RAIL_MAIN;
45154 +
45155 +#if !defined(__ELAN3__)
45156 +
45157 +typedef struct ep3_rcvr_rail_stats
45158 +{
45159 +    unsigned long some_stat;
45160 +} EP3_RCVR_RAIL_STATS;
45161 +
45162 +typedef struct ep3_rcvr_rail
45163 +{
45164 +    EP_RCVR_RAIL       Generic;                                /* generic portion */
45165 +    
45166 +    EP3_RCVR_RAIL_MAIN *RcvrMain;
45167 +    E3_Addr            RcvrMainAddr;
45168 +    sdramaddr_t         RcvrElan;
45169 +    E3_Addr             RcvrElanAddr;
45170 +
45171 +    sdramaddr_t                InputQueueBase;                         /* base of receive queue */
45172 +    E3_Addr            InputQueueAddr;                         /* elan address of receive queue */
45173 +
45174 +    E3_Addr            ThreadStack;                            /* Thread processor stack */
45175 +    E3_Addr            ThreadWaiting;                          /* Elan thread is waiting as no receive descriptors pending (sp stored here ) */
45176 +    E3_Addr            ThreadHalted;                           /* Elan thread is waiting as it was requested to halt */
45177 +
45178 +    struct list_head   FreeDescList;                           /* freelist of per-rail receive descriptors */
45179 +    unsigned int       FreeDescCount;                          /*   and number on free list */
45180 +    unsigned int        TotalDescCount;                                /*   total number created */
45181 +    spinlock_t         FreeDescLock;                           /*   and lock for free list */
45182 +    struct list_head    DescBlockList;                         /* list of receive descriptor blocks */
45183 +
45184 +    unsigned int        FreeDescWaiting;                       /* waiting for descriptors to be freed */
45185 +    kcondvar_t         FreeDescSleep;                          /*   and sleep here */
45186 +
45187 +    unsigned int       CleanupWaiting;                         /* waiting for cleanup */
45188 +    kcondvar_t         CleanupSleep;                           /*   and sleep here */
45189 +
45190 +    EP3_RCVR_RAIL_STATS stats;                                  /* elan3 specific rcvr_rail stats */
45191 +} EP3_RCVR_RAIL;
45192 +
45193 +#endif /* ! __ELAN3__ */
45194 +
45195 +/* per-rail portion of transmit descriptor */
45196 +typedef struct ep3_txd_rail_elan
45197 +{
45198 +    EP_ENVELOPE               Envelope;                                /* message envelope */
45199 +    EP_PAYLOAD        Payload;                                 /* message payload */
45200 +
45201 +    E3_BlockCopyEvent EnveEvent;                               /* envelope event */
45202 +    E3_BlockCopyEvent DataEvent;                               /* data transfer event */
45203 +    E3_BlockCopyEvent DoneEvent;                               /* rpc done event */
45204 +} EP3_TXD_RAIL_ELAN;
45205 +
45206 +#define EP3_TXD_RAIL_ELAN_SIZE roundup (sizeof (EP3_TXD_RAIL_ELAN), E3_BLK_ALIGN)
45207 +
45208 +typedef struct ep3_txd_rail_main
45209 +{
45210 +    E3_uint32         EnveEvent;                               /* dest for envelope event */
45211 +    E3_uint32         DataEvent;                               /* dest for data transfer event */
45212 +    E3_uint32        DoneEvent;                                /* dest for rpc done event */
45213 +} EP3_TXD_RAIL_MAIN;
45214 +
45215 +#define EP3_TXD_RAIL_MAIN_SIZE roundup (sizeof(EP3_TXD_RAIL_MAIN), E3_BLK_ALIGN)
45216 +
45217 +#if !defined(__ELAN3__)
45218 +
45219 +typedef struct ep3_txd_rail
45220 +{
45221 +    EP_TXD_RAIL               Generic;                                 /* generic txd rail */
45222 +
45223 +    EP3_COOKIE        EnveCookie;                              /* Event cookies */
45224 +    EP3_COOKIE        DataCookie;
45225 +    EP3_COOKIE        DoneCookie;
45226 +
45227 +    sdramaddr_t               TxdElan;                                 /* Elan TX descriptor */
45228 +    E3_Addr           TxdElanAddr;                             /*  and elan address */
45229 +
45230 +    EP3_TXD_RAIL_MAIN *TxdMain;                                        /* Elan Main memory tx descriptor */
45231 +    E3_Addr           TxdMainAddr;                             /*  and elan address */
45232 +
45233 +    EP_BACKOFF        Backoff;                                 /* dma backoff */
45234 +} EP3_TXD_RAIL;
45235 +
45236 +
45237 +#define EP3_NUM_TXD_PER_BLOCK  16
45238 +
45239 +typedef struct ep3_txd_rail_block
45240 +{
45241 +    struct list_head   Link;
45242 +    
45243 +    EP3_TXD_RAIL       Txd[EP3_NUM_TXD_PER_BLOCK];
45244 +} EP3_TXD_RAIL_BLOCK;
45245 +
45246 +typedef struct ep3_xmtr_rail_stats
45247 +{
45248 +    unsigned long some_stat;
45249 +} EP3_XMTR_RAIL_STATS;
45250 +
45251 +typedef struct ep3_xmtr_rail
45252 +{
45253 +    EP_XMTR_RAIL       Generic;                                /* generic portion */
45254 +
45255 +    struct list_head   FreeDescList;                           /* freelist of per-rail receive descriptors */
45256 +    unsigned int       FreeDescCount;                          /*   and number on free list */
45257 +    unsigned int        TotalDescCount;
45258 +    spinlock_t         FreeDescLock;                           /*   and lock for free list */
45259 +    struct list_head    DescBlockList;                         /* list of receive descriptor blocks */
45260 +
45261 +    unsigned int        FreeDescWaiting;                       /* waiting for descriptors to be freed */
45262 +    kcondvar_t          FreeDescSleep;                         /*   and sleep here */
45263 +
45264 +    EP3_XMTR_RAIL_STATS stats;                                  /* elan3 specific xmtr rail stats */
45265 +} EP3_XMTR_RAIL;
45266 +
45267 +typedef struct ep3_comms_rail
45268 +{
45269 +    EP_COMMS_RAIL      Generic;                                /* generic comms rail */
45270 +    sdramaddr_t                QueueDescs;                             /* input queue descriptors */
45271 +} EP3_COMMS_RAIL;
45272 +
45273 +/* epcommxTx_elan3.c */
45274 +extern void           ep3xmtr_flush_callback (EP_XMTR *xmtr, EP3_XMTR_RAIL *xmtrRail);
45275 +extern void           ep3xmtr_failover_callback (EP_XMTR *xmtr, EP3_XMTR_RAIL *xmtrRail);
45276 +extern void           ep3xmtr_disconnect_callback (EP_XMTR *xmtr, EP3_XMTR_RAIL *xmtrRail);
45277 +
45278 +/* epcommsRx_elan3.c */
45279 +extern void          CompleteEnvelope (EP3_RAIL *rail, E3_Addr rxdMainAddr, E3_uint32 PAckVal);
45280 +extern void           StallThreadForNoDescs (EP3_RAIL *rail, E3_Addr rcvrElanAddr, E3_Addr sp);
45281 +extern void           StallThreadForHalted  (EP3_RAIL *rail, E3_Addr rcvrElanAddr, E3_Addr sp);
45282 +
45283 +extern void           ep3rcvr_flush_callback (EP_RCVR *rcvr, EP3_RCVR_RAIL *rcvrRail);
45284 +extern void           ep3rcvr_failover_callback (EP_RCVR *rcvr, EP3_RCVR_RAIL *rcvrRail);
45285 +extern void           ep3rcvr_disconnect_callback (EP_RCVR *rcvr, EP3_RCVR_RAIL *rcvrRail);
45286 +
45287 +/* epcomms_elan3.c */
45288 +extern EP_COMMS_RAIL *ep3comms_add_rail (EP_SUBSYS *s, EP_SYS *sys, EP_RAIL *r);
45289 +extern void           ep3comms_del_rail (EP_COMMS_RAIL *r);
45290 +extern void           ep3comms_display_rail (EP_COMMS_RAIL *r);
45291 +
45292 +/* epcommsTx_elan3.c */
45293 +extern int            ep3xmtr_bind_txd (EP_TXD *txd, EP_XMTR_RAIL *xmtrRail, unsigned int phase);
45294 +extern void           ep3xmtr_unbind_txd (EP_TXD *txd, unsigned int phase);
45295 +extern int            ep3xmtr_poll_txd (EP_XMTR_RAIL *xmtrRail, EP_TXD_RAIL *txdRail, int how);
45296 +extern long           ep3xmtr_check (EP_XMTR_RAIL *xmtrRail, long nextRunTime);
45297 +extern void           ep3xmtr_add_rail (EP_XMTR *xmtr, EP_COMMS_RAIL *commsRail);
45298 +extern void           ep3xmtr_del_rail (EP_XMTR *xmtr, EP_COMMS_RAIL *commsRail);
45299 +extern int            ep3xmtr_check_txd_state(EP_TXD *txd);
45300 +
45301 +extern void           ep3xmtr_display_xmtr (DisplayInfo *di, EP_XMTR_RAIL *xmtrRail);
45302 +extern void           ep3xmtr_display_txd  (DisplayInfo *di, EP_TXD_RAIL *txdRail);
45303 +
45304 +extern void           ep3xmtr_fillout_rail_stats (EP_XMTR_RAIL *xmtr_rail, char *str);
45305 +
45306 +/* epcommsRx_elan3.c */
45307 +extern int           ep3rcvr_queue_rxd (EP_RXD *rxd, EP_RCVR_RAIL *rcvrRail);
45308 +extern void          ep3rcvr_rpc_put (EP_RXD *rxd, EP_NMD *local, EP_NMD *remote, unsigned nFrags);
45309 +extern void          ep3rcvr_rpc_get (EP_RXD *rxd, EP_NMD *local, EP_NMD *remote, unsigned nFrags);
45310 +extern void          ep3rcvr_rpc_complete (EP_RXD *rxd, EP_NMD *local, EP_NMD *remote, unsigned nFrags);
45311 +
45312 +extern EP_RXD       *ep3rcvr_steal_rxd (EP_RCVR_RAIL *rcvrRail);
45313 +
45314 +extern long          ep3rcvr_check (EP_RCVR_RAIL *rcvrRail, long nextRunTime);
45315 +extern void           ep3rcvr_add_rail (EP_RCVR *rcvr, EP_COMMS_RAIL *rail);
45316 +extern void           ep3rcvr_del_rail (EP_RCVR *rcvr, EP_COMMS_RAIL *rail);
45317 +
45318 +extern void           ep3rcvr_display_rcvr (DisplayInfo *di, EP_RCVR_RAIL *rcvrRail);
45319 +extern void           ep3rcvr_display_rxd  (DisplayInfo *di, EP_RXD_RAIL *rxdRail);
45320 +
45321 +extern void           ep3rcvr_fillout_rail_stats (EP_RCVR_RAIL *rcvr_rail, char *str);
45322 +
45323 +#endif /* !defined(__ELAN3__) */
45324 +
45325 +/*
45326 + * Local variables:
45327 + * c-file-style: "stroustrup"
45328 + * End:
45329 + */
45330 +#endif /* __EPCOMMS_ELAN3_H */
45331 Index: linux-2.4.21/drivers/net/qsnet/ep/epcomms_elan3_thread.c
45332 ===================================================================
45333 --- linux-2.4.21.orig/drivers/net/qsnet/ep/epcomms_elan3_thread.c       2004-02-23 16:02:56.000000000 -0500
45334 +++ linux-2.4.21/drivers/net/qsnet/ep/epcomms_elan3_thread.c    2005-06-01 23:12:54.640432872 -0400
45335 @@ -0,0 +1,296 @@
45336 +/*
45337 + *    Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd.
45338 + *    Copyright (c) 2002-2003 by Quadrics Ltd.
45339 + *
45340 + *    For licensing information please see the supplied COPYING file
45341 + *
45342 + */
45343 +
45344 +#ident "@(#)$Id: epcomms_elan3_thread.c,v 1.4 2004/01/20 11:03:15 david Exp $"
45345 +/*      $Source: /cvs/master/quadrics/epmod/epcomms_elan3_thread.c,v $ */
45346 +
45347 +//#include <qsnet/types.h>
45348 +
45349 +typedef char               int8_t;
45350 +typedef unsigned char      uint8_t;
45351 +typedef short              int16_t;
45352 +typedef unsigned short     uint16_t;
45353 +typedef int                int32_t;
45354 +typedef unsigned int       uint32_t;
45355 +typedef long long          int64_t;
45356 +typedef unsigned long long uint64_t;
45357 +
45358 +#include <elan3/e3types.h>
45359 +#include <elan3/events.h>
45360 +#include <elan3/elanregs.h>
45361 +#include <elan3/intrinsics.h>
45362 +
45363 +#include <elan/nmh.h>
45364 +#include <elan/kcomm.h>
45365 +#include <elan/epcomms.h>
45366 +
45367 +#include "kcomm_vp.h"
45368 +#include "kcomm_elan3.h"
45369 +#include "epcomms_elan3.h"
45370 +
45371 +#ifndef offsetof
45372 +#define offsetof(s, m)                 (unsigned long)(&(((s *)0)->m))
45373 +#endif
45374 +
45375 +EP3_RAIL_ELAN *rail;
45376 +EP3_RCVR_RAIL_ELAN *r;
45377 +EP3_RCVR_RAIL_MAIN *rm;
45378 +
45379 +void
45380 +ep3comms_rcvr (EP3_RAIL_ELAN *rail, EP3_RCVR_RAIL_ELAN *rcvrElan, EP3_RCVR_RAIL_MAIN *rcvrMain, 
45381 +             EP3_InputQueue *q, unsigned int *cookies)
45382 +{
45383 +    int           count = 1;
45384 +    E3_Addr       nfptr = q->q_fptr + q->q_size;
45385 +    E3_uint32     tmp;
45386 +    int           i;
45387 +    E3_Addr       buffer;
45388 +    int                  len;
45389 +    E3_DMA       *dma;
45390 +    E3_Event     *event;
45391 +
45392 +    /* clear the queue state to allow envelopes to arrive */
45393 +    q->q_state = 0;
45394 +
45395 +    for (;;)
45396 +    {
45397 +       if (! rcvrElan->ThreadShouldHalt)
45398 +           c_waitevent ((E3_Event *) &q->q_event, count);                                              /* HALT POINT */
45399 +
45400 +       if (rcvrElan->ThreadShouldHalt && nfptr == q->q_bptr)
45401 +       {
45402 +           asm volatile ("mov %0, %%g1" : /* no outputs */ : "r" (rcvrElan));
45403 +           asm volatile ("ta %0"        : /* no outputs */ : "i" (EP3_UNIMP_THREAD_HALTED));           /* HALT POINT */
45404 +           continue;
45405 +       }
45406 +
45407 +       count = 0;
45408 +       do {
45409 +           /* Process the message at nfptr */
45410 +           EP_ENVELOPE      *env = (EP_ENVELOPE *) nfptr;
45411 +           EP3_RXD_RAIL_ELAN *rxd;
45412 +           int ack;
45413 +           
45414 +           EP3_SPINENTER(&rcvrElan->ThreadLock, &rcvrMain->ThreadLock);                                        /* HALT POINT */
45415 +           
45416 +           while ((rxd = (EP3_RXD_RAIL_ELAN *)rcvrElan->PendingDescs) == 0)
45417 +           {
45418 +               /* no receive descriptors, so trap to the kernel to wait
45419 +                * for receive descriptor to be queued, we pass the rcvr
45420 +                * in %g1, so that the trap handler can restart us. */
45421 +               EP3_SPINEXIT(&rcvrElan->ThreadLock, &rcvrMain->ThreadLock);
45422 +               asm volatile ("mov %0, %%g1" : /* no outputs */ : "r" (rcvrElan));
45423 +               asm volatile ("ta %0"        : /* no outputs */ : "i" (EP3_UNIMP_TRAP_NO_DESCS));       /* HALT POINT */
45424 +               EP3_SPINENTER(&rcvrElan->ThreadLock, &rcvrMain->ThreadLock);                            /* HALT POINT */
45425 +           }
45426 +
45427 +           if (env->Version != EP_ENVELOPE_VERSION)
45428 +           {
45429 +               /* This envelope has been cancelled - so just consume it */
45430 +               EP3_SPINEXIT(&rcvrElan->ThreadLock, &rcvrMain->ThreadLock);
45431 +               goto consume_envelope;
45432 +           }
45433 +
45434 +           dma   = rxd->Dmas;
45435 +           event = rxd->ChainEvent;
45436 +
45437 +           if (EP_IS_MULTICAST(env->Attr))
45438 +           {
45439 +               dma->dma_type            = E3_DMA_TYPE (DMA_BYTE, DMA_READ, DMA_NORMAL, EP3_DMAFAILCOUNT);
45440 +               dma->dma_size            = BT_BITOUL(EP_MAX_NODES) * sizeof (bitmap_t);
45441 +               dma->dma_source          = env->TxdMain.nmd_addr + offsetof (EP_TXD_MAIN, Bitmap);
45442 +               dma->dma_dest            = (E3_Addr) &((EP_RXD_MAIN *) rxd->RxdMain)->Bitmap;
45443 +               dma->dma_destEvent       = (E3_Addr) event;
45444 +               dma->dma_destCookieVProc = DMA_COOKIE_THREAD | DMA_COOKIE (cookies[env->NodeId], EP_VP_DATA (rail->NodeId));
45445 +               dma->dma_srcEvent        = env->TxdRail + offsetof (EP3_TXD_RAIL_ELAN, DataEvent);
45446 +               dma->dma_srcCookieVProc  = DMA_COOKIE_THREAD | DMA_REMOTE_COOKIE (cookies[env->NodeId], EP_VP_DATA (env->NodeId));
45447 +               
45448 +               event->ev_Count = 1;
45449 +
45450 +               dma++; event++;
45451 +           }
45452 +
45453 +           if (env->nFrags == 0)
45454 +           {
45455 +               /* Generate a "get" DMA to accept the envelope and fire the rx handler */
45456 +               dma->dma_type            = E3_DMA_TYPE(DMA_BYTE, DMA_READ, DMA_NORMAL, EP3_DMAFAILCOUNT);
45457 +               dma->dma_size            = 0;
45458 +               dma->dma_destEvent       = (E3_Addr) &rxd->DataEvent;
45459 +               dma->dma_destCookieVProc = DMA_COOKIE_THREAD | DMA_COOKIE (cookies[env->NodeId], EP_VP_DATA (rail->NodeId));
45460 +               dma->dma_srcEvent        = env->TxdRail + offsetof (EP3_TXD_RAIL_ELAN, DataEvent);
45461 +               dma->dma_srcCookieVProc  = DMA_COOKIE_THREAD | DMA_REMOTE_COOKIE (cookies[env->NodeId], EP_VP_DATA (env->NodeId));
45462 +               len = 0;
45463 +           }
45464 +           else
45465 +           {
45466 +               /* Generate the DMA chain to fetch the data */
45467 +               for (i = 0, buffer = rxd->Data.nmd_addr, len = 0; i < env->nFrags; i++, dma++, event++)
45468 +               {
45469 +                   dma->dma_type            = E3_DMA_TYPE(DMA_BYTE, DMA_READ, DMA_NORMAL, EP3_DMAFAILCOUNT);
45470 +                   dma->dma_size            = env->Frags[i].nmd_len;
45471 +                   dma->dma_source          = env->Frags[i].nmd_addr;
45472 +                   dma->dma_dest            = buffer;
45473 +                   dma->dma_destEvent       = (E3_Addr) event;
45474 +                   dma->dma_destCookieVProc = DMA_COOKIE_THREAD | DMA_COOKIE (cookies[env->NodeId], EP_VP_DATA (rail->NodeId));
45475 +                   dma->dma_srcEvent        = env->TxdRail + offsetof (EP3_TXD_RAIL_ELAN, DataEvent);
45476 +                   dma->dma_srcCookieVProc  = DMA_COOKIE_THREAD | DMA_REMOTE_COOKIE (cookies[env->NodeId], EP_VP_DATA (env->NodeId));
45477 +                   
45478 +                   event->ev_Count = 1;
45479 +                   
45480 +                   buffer += dma->dma_size;
45481 +                   len    += dma->dma_size;
45482 +               }
45483 +               
45484 +               /* Point the last dma at the done event */
45485 +               (--dma)->dma_destEvent = (E3_Addr) &rxd->DataEvent;
45486 +               
45487 +               if (rxd->Data.nmd_len < len)
45488 +               {
45489 +                   /* The receive descriptor was too small for the message */
45490 +                   /* complete the message anyway,  but don't transfer any */
45491 +                   /* data,  we set the length to EP_MSG_TOO_BIG */
45492 +                   for (i = 0, dma = rxd->Dmas; i < env->nFrags; i++, dma++)
45493 +                       dma->dma_size = 0;
45494 +                   
45495 +                   len = EP_MSG_TOO_BIG;
45496 +               }
45497 +           }
45498 +           
45499 +           /* Store the received message length in the rxdElan for CompleteEnvelope */
45500 +           rxd->Data.nmd_len = len;
45501 +
45502 +           /* Initialise %g1 with the  "rxd" so the trap handler can
45503 +            * complete the envelope processing if we trap while sending the
45504 +            * packet */
45505 +           asm volatile ("mov %0, %%g1" : /* no outputs */ : "r" (rxd));
45506 +
45507 +           /* Generate a packet to start the data transfer */
45508 +           c_open (EP_VP_DATA (env->NodeId));
45509 +           c_sendtrans2 (TR_THREADIDENTIFY, rxd->Dmas->dma_destCookieVProc, 0, 0);
45510 +           c_sendmem (TR_SENDACK | TR_REMOTEDMA, 0, rxd->Dmas); 
45511 +           ack = c_close();
45512 +           
45513 +           /*
45514 +            * If we trapped for an output timeout, then the trap handler will have
45515 +            * completed processing this envelope and cleared the spinlock, so we just
45516 +            * need to update the queue descriptor.
45517 +            */
45518 +           if (ack == EP3_PAckStolen)
45519 +               goto consume_envelope;
45520 +           
45521 +           if (ack != E3_PAckOk)
45522 +           {
45523 +               /* our packet got nacked, so trap into the kernel so that
45524 +                * it can complete processing of this envelope.
45525 +                */
45526 +               asm volatile ("ta %0" : /* no outputs */ : "i" (EP3_UNIMP_TRAP_PACKET_NACKED));         /* HALT POINT */
45527 +               goto consume_envelope;
45528 +           }
45529 +
45530 +           /* remove the RXD from the pending list */
45531 +           EP3_SPINENTER (&rcvrElan->PendingLock, &rcvrMain->PendingLock);
45532 +           if ((rcvrElan->PendingDescs = rxd->Next) == 0)
45533 +               rcvrMain->PendingDescsTailp = 0;
45534 +           EP3_SPINEXIT (&rcvrElan->PendingLock, &rcvrMain->PendingLock);
45535 +
45536 +           /* Copy the envelope information - as 5 64 byte chunks.
45537 +            * We force the parameters in g5, g6 so that they aren't
45538 +            * trashed by the loadblk32 into the locals/ins
45539 +            */
45540 +           if (EP_HAS_PAYLOAD(env->Attr))
45541 +           { 
45542 +               register void *src asm ("g5") = (void *) env;
45543 +               register void *dst asm ("g6") = (void *)  &((EP_RXD_MAIN *) rxd->RxdMain)->Envelope;
45544 +
45545 +               asm volatile (
45546 +                   "and     %%sp,63,%%g7               ! Calculate stack alignment\n"
45547 +                   "add     %%g7,64,%%g7               ! Space to save the registers\n"
45548 +                   "sub     %%sp,%%g7,%%sp             ! align stack\n" 
45549 +                   "stblock64 %%l0,[%%sp]              ! save the locals and ins\n"
45550 +
45551 +                   "ldblock64 [%0 + 0],%%l0            ! load 64-byte block into locals/ins\n"         /* copy envelope */
45552 +                   "stblock64 %%l0,[%1 + 0]            ! store 64-byte block from local/ins\n"
45553 +                   "ldblock64 [%0 + 64],%%l0           ! load 64-byte block into locals/ins\n"
45554 +                   "stblock64 %%l0,[%1 + 64]           ! store 64-byte block from local/ins\n"
45555 +
45556 +                   "ldblock64 [%0 + 128],%%l0          ! load 64-byte block into locals/ins\n"         /* copy payload */
45557 +                   "stblock64 %%l0,[%1 + 128]          ! store 64-byte block from local/ins\n"
45558 +                   "ldblock64 [%0 + 192],%%l0          ! load 64-byte block into locals/ins\n"
45559 +                   "stblock64 %%l0,[%1 + 192]          ! store 64-byte block from local/ins\n"
45560 +
45561 +                   "ldblock64 [%%sp],%%l0              ! restore locals and ins\n"
45562 +                   "add     %%sp,%%g7,%%sp             ! restore stack pointer\n"
45563 +                   : /* outputs */
45564 +                   : /* inputs */ "r" (src), "r" (dst)
45565 +                   : /* clobbered */ "g5", "g6", "g7" );
45566 +           }
45567 +           else
45568 +           { 
45569 +               register void *src asm ("g5") = (void *) env;
45570 +               register void *dst asm ("g6") = (void *)  &((EP_RXD_MAIN *) rxd->RxdMain)->Envelope;
45571 +
45572 +               asm volatile (
45573 +                   "and     %%sp,63,%%g7               ! Calculate stack alignment\n"
45574 +                   "add     %%g7,64,%%g7               ! Space to save the registers\n"
45575 +                   "sub     %%sp,%%g7,%%sp             ! align stack\n" 
45576 +                   "stblock64 %%l0,[%%sp]              ! save the locals and ins\n"
45577 +
45578 +                   "ldblock64 [%0 + 0],%%l0            ! load 64-byte block into locals/ins\n"
45579 +                   "stblock64 %%l0,[%1 + 0]            ! store 64-byte block from local/ins\n"
45580 +                   "ldblock64 [%0 + 64],%%l0           ! load 64-byte block into locals/ins\n"
45581 +                   "stblock64 %%l0,[%1 + 64]           ! store 64-byte block from local/ins\n"
45582 +
45583 +                   "ldblock64 [%%sp],%%l0              ! restore locals and ins\n"
45584 +                   "add     %%sp,%%g7,%%sp             ! restore stack pointer\n"
45585 +                   : /* outputs */
45586 +                   : /* inputs */ "r" (src), "r" (dst)
45587 +                   : /* clobbered */ "g5", "g6", "g7" );
45588 +           }
45589 +
45590 +           /* Store the message length to indicate that I've finished */
45591 +           ((EP_RXD_MAIN *) rxd->RxdMain)->Len = rxd->Data.nmd_len;                                    /* PCI write  */
45592 +           
45593 +           EP3_SPINEXIT(&rcvrElan->ThreadLock, &rcvrMain->ThreadLock);
45594 +
45595 +       consume_envelope:
45596 +           /* Sample the queue full bit *BEFORE* moving the fptr.
45597 +            * Then only clear it if it was full before, otherwise,
45598 +            * as soon as the fptr is moved on the queue could fill 
45599 +            * up, and so clearing it could mark a full queue as 
45600 +            * empty.
45601 +            *
45602 +            * While the full bit is set, the queue is in a 'steady
45603 +            * state', so it is safe to set the q_state
45604 +            * 
45605 +            */
45606 +           if (((tmp = q->q_state) & E3_QUEUE_FULL) == 0)
45607 +               q->q_fptr = nfptr;                              /* update queue */
45608 +           else
45609 +           {
45610 +               q->q_fptr = nfptr;                              /* update queue */
45611 +               q->q_state = tmp &~E3_QUEUE_FULL;               /* and clear full flag */
45612 +           }
45613 +
45614 +           count++;                                            /* bump message count */
45615 +           if (nfptr == q->q_top)                              /* queue wrap */
45616 +               nfptr = q->q_base;
45617 +           else
45618 +               nfptr += q->q_size;
45619 +
45620 +           c_break_busywait();                                 /* be nice              HALT POINT */
45621 +
45622 +       } while (nfptr != q->q_bptr);                           /* loop until Fptr == Bptr */
45623 +    }
45624 +}
45625 +
45626 +
45627 +/*
45628 + * Local variables:
45629 + * c-file-style: "stroustrup"
45630 + * End:
45631 + */
45632 Index: linux-2.4.21/drivers/net/qsnet/ep/epcomms_elan4.c
45633 ===================================================================
45634 --- linux-2.4.21.orig/drivers/net/qsnet/ep/epcomms_elan4.c      2004-02-23 16:02:56.000000000 -0500
45635 +++ linux-2.4.21/drivers/net/qsnet/ep/epcomms_elan4.c   2005-06-01 23:12:54.640432872 -0400
45636 @@ -0,0 +1,392 @@
45637 +/*
45638 + *    Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd.
45639 + *    Copyright (c) 2002-2003 by Quadrics Ltd.
45640 + *
45641 + *    For licensing information please see the supplied COPYING file
45642 + *
45643 + */
45644 +
45645 +#ident "@(#)$Id: epcomms_elan4.c,v 1.11.2.1 2004/10/28 11:53:28 david Exp $"
45646 +/*      $Source: /cvs/master/quadrics/epmod/epcomms_elan4.c,v $ */
45647 +
45648 +#include <qsnet/kernel.h>
45649 +
45650 +#include <elan/kcomm.h>
45651 +#include <elan/epsvc.h>
45652 +#include <elan/epcomms.h>
45653 +
45654 +#include "debug.h"
45655 +#include "kcomm_elan4.h"
45656 +#include "epcomms_elan4.h"
45657 +
45658 +static void
45659 +ep4comms_flush_interrupt (EP4_RAIL *rail, void *arg)
45660 +{
45661 +    EP4_COMMS_RAIL *commsRail = (EP4_COMMS_RAIL *) arg;
45662 +    unsigned long  flags;
45663 +
45664 +    spin_lock_irqsave (&commsRail->r_flush_lock, flags);
45665 +    commsRail->r_flush_count = 0;
45666 +    kcondvar_wakeupall (&commsRail->r_flush_sleep, &commsRail->r_flush_lock);
45667 +    spin_unlock_irqrestore  (&commsRail->r_flush_lock, flags);
45668 +}
45669 +
45670 +void
45671 +ep4comms_flush_start (EP4_COMMS_RAIL *commsRail)
45672 +{
45673 +    kmutex_lock (&commsRail->r_flush_mutex);
45674 +}
45675 +
45676 +void
45677 +ep4comms_flush_wait (EP4_COMMS_RAIL *commsRail)
45678 +{
45679 +    unsigned long flags;
45680 +
45681 +    ep4_wait_event_cmd (commsRail->r_flush_mcq, 
45682 +                       commsRail->r_elan_addr + offsetof (EP4_COMMS_RAIL_ELAN, r_flush_event),
45683 +                       E4_EVENT_INIT_VALUE (-32 * commsRail->r_flush_count, E4_EVENT_WRITE, E4_EVENT_DTYPE_LONG, 0),
45684 +                       commsRail->r_flush_ecq->ecq_addr, 
45685 +                       INTERRUPT_CMD | (commsRail->r_flush_intcookie.int_val << E4_MAIN_INT_SHIFT));
45686 +
45687 +    spin_lock_irqsave (&commsRail->r_flush_lock, flags);
45688 +    while (commsRail->r_flush_count != 0)
45689 +       kcondvar_wait (&commsRail->r_flush_sleep, &commsRail->r_flush_lock, &flags);
45690 +    spin_unlock_irqrestore (&commsRail->r_flush_lock, flags);
45691 +    
45692 +    kmutex_unlock (&commsRail->r_flush_mutex);
45693 +}
45694 +
45695 +void
45696 +ep4comms_flush_setevent (EP4_COMMS_RAIL *commsRail, ELAN4_CQ *cq)
45697 +{
45698 +    unsigned long flags;
45699 +
45700 +    spin_lock_irqsave (&commsRail->r_flush_lock, flags);
45701 +
45702 +    elan4_set_event_cmd (cq, commsRail->r_elan_addr + offsetof (EP4_COMMS_RAIL_ELAN, r_flush_event));
45703 +    
45704 +    commsRail->r_flush_count++;
45705 +    
45706 +    spin_unlock_irqrestore (&commsRail->r_flush_lock, flags);
45707 +}
45708 +
45709 +void
45710 +ep4comms_flush_callback (void *arg, statemap_t *map)
45711 +{
45712 +    EP4_COMMS_RAIL   *commsRail = (EP4_COMMS_RAIL *) arg;
45713 +    EP_COMMS_SUBSYS  *subsys    = commsRail->r_generic.Subsys;
45714 +    EP4_RAIL        *rail      = (EP4_RAIL *) commsRail->r_generic.Rail;
45715 +    unsigned int     rnum       = rail->r_generic.Number;
45716 +    struct list_head *el;
45717 +
45718 +    /*
45719 +     * We stall the retry thread from CB_FLUSH_FILTERING until
45720 +     * we've finished CB_FLUSH_FLUSHING to ensure that sten 
45721 +     * packets can not be being retried while we flush them
45722 +     * through.
45723 +     */
45724 +    switch (rail->r_generic.CallbackStep)
45725 +    {
45726 +    case EP_CB_FLUSH_FILTERING:
45727 +       ep_kthread_stall (&rail->r_retry_thread);
45728 +
45729 +       ep4comms_flush_start (commsRail);
45730 +       break;
45731 +
45732 +    case EP_CB_FLUSH_FLUSHING:
45733 +       break;
45734 +    }
45735 +
45736 +    kmutex_lock (&subsys->Lock);
45737 +    list_for_each (el, &subsys->Transmitters) {
45738 +       EP_XMTR *xmtr = list_entry (el, EP_XMTR, Link);
45739 +       
45740 +       if (xmtr->Rails[rnum])
45741 +           ep4xmtr_flush_callback (xmtr, (EP4_XMTR_RAIL *) xmtr->Rails[rnum]);
45742 +    }
45743 +
45744 +    list_for_each (el, &subsys->Receivers) {
45745 +       EP_RCVR *rcvr = list_entry (el, EP_RCVR, Link);
45746 +       
45747 +       if (rcvr->Rails[rnum])
45748 +           ep4rcvr_flush_callback (rcvr, (EP4_RCVR_RAIL *) rcvr->Rails[rnum]);
45749 +    }
45750 +    kmutex_unlock (&subsys->Lock);
45751 +
45752 +    switch (rail->r_generic.CallbackStep)
45753 +    {
45754 +    case EP_CB_FLUSH_FILTERING:
45755 +       ep4comms_flush_wait (commsRail);
45756 +       break;
45757 +
45758 +    case EP_CB_FLUSH_FLUSHING:
45759 +       ep_kthread_resume (&rail->r_retry_thread);
45760 +       break;
45761 +    }
45762 +}
45763 +
45764 +void
45765 +ep4comms_failover_callback (void *arg, statemap_t *map)
45766 +{
45767 +    EP_COMMS_RAIL    *commsRail = (EP_COMMS_RAIL *) arg;
45768 +    EP_COMMS_SUBSYS  *subsys    = commsRail->Subsys;
45769 +    unsigned int     rnum       = commsRail->Rail->Number;
45770 +    struct list_head *el;
45771 +
45772 +    kmutex_lock (&subsys->Lock);
45773 +    list_for_each (el, &subsys->Transmitters) {
45774 +       EP_XMTR *xmtr = list_entry (el, EP_XMTR, Link);
45775 +       
45776 +       if (xmtr->Rails[rnum])
45777 +           ep4xmtr_failover_callback (xmtr, (EP4_XMTR_RAIL *) xmtr->Rails[rnum]);
45778 +    }
45779 +
45780 +    list_for_each (el, &subsys->Receivers) {
45781 +       EP_RCVR *rcvr = list_entry (el, EP_RCVR, Link);
45782 +       
45783 +       if (rcvr->Rails[rnum])
45784 +           ep4rcvr_failover_callback (rcvr, (EP4_RCVR_RAIL *) rcvr->Rails[rnum]);
45785 +    }
45786 +    kmutex_unlock (&subsys->Lock);
45787 +}
45788 +
45789 +void
45790 +ep4comms_disconnect_callback (void *arg, statemap_t *map)
45791 +{
45792 +    EP_COMMS_RAIL    *commsRail = (EP_COMMS_RAIL *) arg;
45793 +    EP_COMMS_SUBSYS  *subsys    = commsRail->Subsys;
45794 +    unsigned int     rnum       = commsRail->Rail->Number;
45795 +    struct list_head *el;
45796 +
45797 +    kmutex_lock (&subsys->Lock);
45798 +    list_for_each (el, &subsys->Transmitters) {
45799 +       EP_XMTR *xmtr = list_entry (el, EP_XMTR, Link);
45800 +       
45801 +       if (xmtr->Rails[rnum])
45802 +           ep4xmtr_disconnect_callback (xmtr, (EP4_XMTR_RAIL *) xmtr->Rails[rnum]);
45803 +    }
45804 +
45805 +    list_for_each (el, &subsys->Receivers) {
45806 +       EP_RCVR *rcvr = list_entry (el, EP_RCVR, Link);
45807 +       
45808 +       if (rcvr->Rails[rnum])
45809 +           ep4rcvr_disconnect_callback (rcvr, (EP4_RCVR_RAIL *) rcvr->Rails[rnum]);
45810 +    }
45811 +    kmutex_unlock (&subsys->Lock);
45812 +}
45813 +
45814 +void
45815 +ep4comms_neterr_callback (EP4_RAIL *rail, void *arg, unsigned int nodeId, EP_NETERR_COOKIE *cookies)
45816 +{
45817 +    EP_COMMS_RAIL    *commsRail = (EP_COMMS_RAIL *) arg;
45818 +    EP_COMMS_SUBSYS  *subsys    = commsRail->Subsys;
45819 +    unsigned int     rnum       = commsRail->Rail->Number;
45820 +    struct list_head *el;
45821 +    
45822 +    /* First - stall the retry thread, so that it will no longer restart 
45823 +     *         any sten packets from the retry lists */
45824 +    ep_kthread_stall (&rail->r_retry_thread);
45825 +
45826 +    ep4comms_flush_start ((EP4_COMMS_RAIL *) commsRail);
45827 +
45828 +    /* Second - flush through all command queues for xmtrs and rcvrs */
45829 +    kmutex_lock (&subsys->Lock);
45830 +    list_for_each (el, &subsys->Transmitters) {
45831 +       EP_XMTR *xmtr = list_entry (el, EP_XMTR, Link);
45832 +       
45833 +       if (xmtr->Rails[rnum])
45834 +           ep4xmtr_neterr_flush (xmtr, (EP4_XMTR_RAIL *) xmtr->Rails[rnum], nodeId, cookies);
45835 +    }
45836 +    
45837 +    list_for_each (el, &subsys->Receivers) {
45838 +       EP_RCVR *rcvr = list_entry (el, EP_RCVR, Link);
45839 +       
45840 +       if (rcvr->Rails[rnum])
45841 +           ep4rcvr_neterr_flush (rcvr, (EP4_RCVR_RAIL *) rcvr->Rails[rnum], nodeId, cookies);
45842 +    }
45843 +    kmutex_unlock (&subsys->Lock);
45844 +
45845 +    /* Third - wait for flush to complete */
45846 +    ep4comms_flush_wait ((EP4_COMMS_RAIL *) commsRail);
45847 +    
45848 +    /* Fourth - flush through all command queues */
45849 +    ep4_flush_ecqs (rail);
45850 +    
45851 +    /* Fifth - search all the retry lists for the network error cookies */
45852 +    kmutex_lock (&subsys->Lock);
45853 +    list_for_each (el, &subsys->Transmitters) {
45854 +       EP_XMTR *xmtr = list_entry (el, EP_XMTR, Link);
45855 +       
45856 +       if (xmtr->Rails[rnum])
45857 +           ep4xmtr_neterr_check (xmtr, (EP4_XMTR_RAIL *) xmtr->Rails[rnum], nodeId, cookies);
45858 +    }
45859 +
45860 +    list_for_each (el, &subsys->Receivers) {
45861 +       EP_RCVR *rcvr = list_entry (el, EP_RCVR, Link);
45862 +       
45863 +       if (rcvr->Rails[rnum])
45864 +           ep4rcvr_neterr_check (rcvr, (EP4_RCVR_RAIL *) rcvr->Rails[rnum], nodeId, cookies);
45865 +    }
45866 +    kmutex_unlock (&subsys->Lock);
45867 +
45868 +    ep_kthread_resume (&rail->r_retry_thread);
45869 +}
45870 +
45871 +
45872 +EP_COMMS_RAIL *
45873 +ep4comms_add_rail (EP_SUBSYS *s, EP_SYS *sys, EP_RAIL *r)
45874 +{
45875 +    EP4_RAIL       *rail = (EP4_RAIL *)r;
45876 +    ELAN4_DEV      *dev  = rail->r_ctxt.ctxt_dev;
45877 +    EP4_COMMS_RAIL *commsRail;
45878 +    E4_InputQueue   qdesc;
45879 +    int i;
45880 +
45881 +    KMEM_ZALLOC (commsRail, EP4_COMMS_RAIL *,sizeof (EP4_COMMS_RAIL), 1);
45882 +
45883 +    if (commsRail == NULL)
45884 +       return NULL;
45885 +    
45886 +    commsRail->r_generic.Ops.DelRail          = ep4comms_del_rail;
45887 +    commsRail->r_generic.Ops.DisplayRail      = ep4comms_display_rail;
45888 +    commsRail->r_generic.Ops.Rcvr.AddRail     = ep4rcvr_add_rail;
45889 +    commsRail->r_generic.Ops.Rcvr.DelRail     = ep4rcvr_del_rail;
45890 +    commsRail->r_generic.Ops.Rcvr.Check       = ep4rcvr_check;
45891 +    commsRail->r_generic.Ops.Rcvr.QueueRxd    = ep4rcvr_queue_rxd;
45892 +    commsRail->r_generic.Ops.Rcvr.RpcPut      = ep4rcvr_rpc_put;
45893 +    commsRail->r_generic.Ops.Rcvr.RpcGet      = ep4rcvr_rpc_get;
45894 +    commsRail->r_generic.Ops.Rcvr.RpcComplete = ep4rcvr_rpc_complete;
45895 +
45896 +    commsRail->r_generic.Ops.Rcvr.StealRxd    = ep4rcvr_steal_rxd;
45897 +
45898 +    commsRail->r_generic.Ops.Rcvr.DisplayRcvr = ep4rcvr_display_rcvr;
45899 +    commsRail->r_generic.Ops.Rcvr.DisplayRxd  = ep4rcvr_display_rxd;
45900 +
45901 +    commsRail->r_generic.Ops.Rcvr.FillOutRailStats = ep4rcvr_fillout_rail_stats;
45902 +
45903 +    commsRail->r_generic.Ops.Xmtr.AddRail     = ep4xmtr_add_rail;
45904 +    commsRail->r_generic.Ops.Xmtr.DelRail     = ep4xmtr_del_rail;
45905 +    commsRail->r_generic.Ops.Xmtr.Check       = ep4xmtr_check;
45906 +    commsRail->r_generic.Ops.Xmtr.BindTxd     = ep4xmtr_bind_txd;
45907 +    commsRail->r_generic.Ops.Xmtr.UnbindTxd   = ep4xmtr_unbind_txd;
45908 +    commsRail->r_generic.Ops.Xmtr.PollTxd     = ep4xmtr_poll_txd;
45909 +    commsRail->r_generic.Ops.Xmtr.CheckTxdState = ep4xmtr_check_txd_state;
45910 +
45911 +    commsRail->r_generic.Ops.Xmtr.DisplayXmtr = ep4xmtr_display_xmtr;
45912 +    commsRail->r_generic.Ops.Xmtr.DisplayTxd  = ep4xmtr_display_txd;
45913 +
45914 +    commsRail->r_generic.Ops.Xmtr.FillOutRailStats = ep4xmtr_fillout_rail_stats;
45915 +
45916 +    /* Allocate command queue space for flushing (1 dword for interrupt + 4 dwords for waitevent) */
45917 +    if ((commsRail->r_flush_ecq = ep4_get_ecq (rail, EP4_ECQ_EVENT, 1)) == NULL)
45918 +    {
45919 +       KMEM_FREE (commsRail, sizeof (EP4_COMMS_RAIL));
45920 +       return NULL;
45921 +    }
45922 +
45923 +    if ((commsRail->r_flush_mcq = ep4_get_ecq (rail, EP4_ECQ_MAIN, 4)) == NULL)
45924 +    {
45925 +       ep4_put_ecq (rail, commsRail->r_flush_ecq, 1);
45926 +       KMEM_FREE (commsRail, sizeof (EP4_COMMS_RAIL));
45927 +       return NULL;
45928 +    }
45929 +
45930 +    /* Allocate and initialise the elan memory part */
45931 +    if ((commsRail->r_elan = ep_alloc_elan (r, EP4_COMMS_RAIL_ELAN_SIZE, 0, &commsRail->r_elan_addr)) == (sdramaddr_t) 0)
45932 +    {
45933 +       ep4_put_ecq (rail, commsRail->r_flush_mcq, 4);
45934 +       ep4_put_ecq (rail, commsRail->r_flush_ecq, 1);
45935 +       KMEM_FREE (commsRail, sizeof (EP4_COMMS_RAIL));
45936 +       return NULL;
45937 +    }
45938 +
45939 +    ep4_register_intcookie (rail, &commsRail->r_flush_intcookie, commsRail->r_elan_addr + offsetof (EP4_COMMS_RAIL_ELAN, r_flush_event),
45940 +                           ep4comms_flush_interrupt, commsRail);
45941 +
45942 +    elan4_sdram_writeq (dev, commsRail->r_elan + offsetof (EP4_COMMS_RAIL_ELAN, r_flush_event.ev_CountAndType),
45943 +                       E4_EVENT_INIT_VALUE (0, E4_EVENT_WRITE, E4_EVENT_DTYPE_LONG, 0));
45944 +
45945 +
45946 +    /* Allocate and initialise all the queue desriptors as "full" with no event */
45947 +    if ((commsRail->r_descs = ep_alloc_memory_elan (r, EP_EPCOMMS_QUEUE_BASE, roundup (EP_MSG_NSVC * EP_QUEUE_DESC_SIZE, SDRAM_PAGE_SIZE), EP_PERM_ALL, 0)) == (sdramaddr_t) 0)
45948 +    {
45949 +       ep_free_elan (r, commsRail->r_elan_addr, EP4_COMMS_RAIL_ELAN_SIZE);
45950 +       ep4_put_ecq (rail, commsRail->r_flush_mcq, 4);
45951 +       ep4_put_ecq (rail, commsRail->r_flush_ecq, 1);
45952 +       KMEM_FREE (commsRail, sizeof (EP4_COMMS_RAIL));
45953 +       return NULL;
45954 +    }
45955 +
45956 +    qdesc.q_bptr    = 0;
45957 +    qdesc.q_fptr    = 8;
45958 +    qdesc.q_control = E4_InputQueueControl (qdesc.q_bptr,qdesc.q_fptr, 8);
45959 +    qdesc.q_event   = 0;
45960 +
45961 +    for (i = 0; i < EP_MSG_NSVC; i++)
45962 +       elan4_sdram_copyq_to_sdram (rail->r_ctxt.ctxt_dev, &qdesc, commsRail->r_descs + (i * EP_QUEUE_DESC_SIZE),
45963 +                                   sizeof (E4_InputQueue));
45964 +
45965 +    kmutex_init (&commsRail->r_flush_mutex);
45966 +    spin_lock_init (&commsRail->r_flush_lock);
45967 +    kcondvar_init (&commsRail->r_flush_sleep);
45968 +
45969 +    ep_register_callback (r, EP_CB_FLUSH_FILTERING, ep4comms_flush_callback,      commsRail);
45970 +    ep_register_callback (r, EP_CB_FLUSH_FLUSHING,  ep4comms_flush_callback,      commsRail);
45971 +    ep_register_callback (r, EP_CB_FAILOVER,        ep4comms_failover_callback,   commsRail);
45972 +    ep_register_callback (r, EP_CB_DISCONNECTING,   ep4comms_disconnect_callback, commsRail);
45973 +
45974 +    commsRail->r_neterr_ops.op_func = ep4comms_neterr_callback;
45975 +    commsRail->r_neterr_ops.op_arg  = commsRail;
45976 +    
45977 +    ep4_add_neterr_ops (rail, &commsRail->r_neterr_ops);
45978 +
45979 +    return (EP_COMMS_RAIL *) commsRail;
45980 +}
45981 +
45982 +void
45983 +ep4comms_del_rail (EP_COMMS_RAIL *r)
45984 +{
45985 +    EP4_COMMS_RAIL *commsRail = (EP4_COMMS_RAIL *) r;
45986 +    EP4_RAIL       *rail      = (EP4_RAIL *) commsRail->r_generic.Rail;
45987 +
45988 +    ep_remove_callback (&rail->r_generic, EP_CB_FLUSH_FILTERING, ep4comms_flush_callback,      commsRail);
45989 +    ep_remove_callback (&rail->r_generic, EP_CB_FLUSH_FLUSHING,  ep4comms_flush_callback,      commsRail);
45990 +    ep_remove_callback (&rail->r_generic, EP_CB_FAILOVER,        ep4comms_failover_callback,   commsRail);
45991 +    ep_remove_callback (&rail->r_generic, EP_CB_DISCONNECTING,   ep4comms_disconnect_callback, commsRail);
45992 +
45993 +    kcondvar_destroy (&commsRail->r_flush_sleep);
45994 +    spin_lock_destroy (&commsRail->r_flush_lock);
45995 +    kmutex_destroy (&commsRail->r_flush_mutex);
45996 +
45997 +    ep_free_memory_elan (&rail->r_generic, EP_EPCOMMS_QUEUE_BASE);
45998 +    ep_free_elan (&rail->r_generic, commsRail->r_elan_addr, EP4_COMMS_RAIL_ELAN_SIZE);
45999 +
46000 +    ep4_deregister_intcookie (rail, &commsRail->r_flush_intcookie);
46001 +
46002 +    ep4_put_ecq (rail, commsRail->r_flush_mcq, 4);
46003 +    ep4_put_ecq (rail, commsRail->r_flush_ecq, 1);
46004 +
46005 +    KMEM_FREE (commsRail, sizeof (EP4_COMMS_RAIL));
46006 +}
46007 +
46008 +void
46009 +ep4comms_display_rail (EP_COMMS_RAIL *r)
46010 +{
46011 +    EP4_COMMS_RAIL *commsRail = (EP4_COMMS_RAIL *) r;
46012 +    EP4_RAIL       *rail      = (EP4_RAIL *) commsRail->r_generic.Rail;
46013 +    ELAN4_DEV      *dev       = rail->r_ctxt.ctxt_dev;
46014 +    
46015 +    ep4_display_rail (rail);
46016 +
46017 +    ep_debugf (DBG_DEBUG, "   flush count=%d mcq=%p ecq=%p event %llx.%llx.%llx\n", 
46018 +              commsRail->r_flush_count, commsRail->r_flush_mcq, commsRail->r_flush_ecq,
46019 +              elan4_sdram_readq (dev, commsRail->r_elan + offsetof (EP4_COMMS_RAIL_ELAN, r_flush_event.ev_CountAndType)),
46020 +              elan4_sdram_readq (dev, commsRail->r_elan + offsetof (EP4_COMMS_RAIL_ELAN, r_flush_event.ev_WritePtr)),
46021 +              elan4_sdram_readq (dev, commsRail->r_elan + offsetof (EP4_COMMS_RAIL_ELAN, r_flush_event.ev_WriteValue)));
46022 +}
46023 +
46024 +/*
46025 + * Local variables:
46026 + * c-file-style: "stroustrup"
46027 + * End:
46028 + */
46029 Index: linux-2.4.21/drivers/net/qsnet/ep/epcomms_elan4.h
46030 ===================================================================
46031 --- linux-2.4.21.orig/drivers/net/qsnet/ep/epcomms_elan4.h      2004-02-23 16:02:56.000000000 -0500
46032 +++ linux-2.4.21/drivers/net/qsnet/ep/epcomms_elan4.h   2005-06-01 23:12:54.641432720 -0400
46033 @@ -0,0 +1,470 @@
46034 +/*
46035 + *    Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd.
46036 + *    Copyright (c) 2002-2003 by Quadrics Ltd.
46037 + *
46038 + *    For licensing information please see the supplied COPYING file
46039 + *
46040 + */
46041 +
46042 +#ifndef __EPCOMMS_ELAN4_H
46043 +#define __EPCOMMS_ELAN4_H
46044 +
46045 +#ident "@(#)$Id: epcomms_elan4.h,v 1.13.2.1 2004/11/12 10:54:51 mike Exp $"
46046 +/*      $Source: /cvs/master/quadrics/epmod/epcomms_elan4.h,v $ */
46047 +
46048 +
46049 +#include <elan4/types.h>
46050 +
46051 +/*
46052 + * Elan4 spinlocks are a pair of 64 bit words, one in elan sdram and one in main memory
46053 + * the sdram word holds the thread sequence number in the bottom 32 bits and the main
46054 + * lock in the top 32 bits.  The main memory word holds the sequence number only in
46055 + * it's bottom 32 bits */
46056 +
46057 +typedef volatile E4_uint64 EP4_SPINLOCK_MAIN;
46058 +typedef volatile E4_uint64 EP4_SPINLOCK_ELAN;
46059 +
46060 +#define EP4_SPINLOCK_SEQ       0
46061 +#define EP4_SPINLOCK_MLOCK     4
46062 +
46063 +#if defined(__elan4__)
46064 +
46065 +#define EP4_SPINENTER(CPORT,SLE,SLM) \
46066 +do { \
46067 +    register long tmp; \
46068 +\
46069 +    asm volatile ("ld4         [%1], %0\n" \
46070 +                 "inc          %0\n" \
46071 +                 "st4          %0, [%1]\n" \
46072 +                 "ld4          [%1 + 4], %0\n" \
46073 +                 "srl8,byte    %0, 4, %0\n" \
46074 +                 : /* outputs */ "=r" (tmp)  \
46075 +                 : /* inputs */ "r" (SLE), "r" (SLM)); \
46076 +\
46077 +    if (tmp) \
46078 +       ep4_spinblock (CPORT,SLE, SLM); \
46079 +} while (0)
46080 +
46081 +extern void ep4_spinblock(E4_uint64 *cport, EP4_SPINLOCK_ELAN *sle, EP4_SPINLOCK_MAIN *slm);
46082 +
46083 +#define EP4_SPINEXIT(CPORT,SLE,SLM) \
46084 +do { \
46085 +    register long tmp; \
46086 +\
46087 +    asm volatile ("ld4         [%1], %0\n" \
46088 +                 "st4          %0, [%2]\n" \
46089 +                 : /* outputs */ "=r" (tmp) \
46090 +                 : /* inputs */ "r" (SLE), "r" (SLM)); \
46091 +} while (0)
46092 +
46093 +#else
46094 +
46095 +#define EP4_SPINENTER(DEV,SLE,SLM) \
46096 +do { \
46097 +    uint32_t seq; \
46098 +\
46099 +    mb(); \
46100 +    elan4_sdram_writel (DEV, (SLE) + EP4_SPINLOCK_MLOCK, 1); \
46101 +    mb(); \
46102 +    while ((seq = elan4_sdram_readl (DEV, (SLE) + EP4_SPINLOCK_SEQ)) != *((uint32_t *) (SLM))) \
46103 +    { \
46104 +       while (*((uint32_t *) (SLM)) == (seq - 1)) \
46105 +       { \
46106 +           mb(); \
46107 +           DELAY(1); \
46108 +       } \
46109 +    } \
46110 +} while (0)
46111 +
46112 +#define EP4_SPINEXIT(DEV,SLE,SLM) \
46113 +do { \
46114 +    wmb(); \
46115 +    elan4_sdram_writel (DEV, (SLE) + EP4_SPINLOCK_MLOCK, 0); \
46116 +} while (0)
46117 +
46118 +#endif /* !defined(__elan4__) */
46119 +
46120 +#define EP4_STEN_RETRYCOUNT    16
46121 +#define EP4_DMA_RETRYCOUNT     16
46122 +
46123 +typedef struct ep4_intr_cmd
46124 +{
46125 +    E4_uint64          c_write_cmd;
46126 +    E4_uint64          c_write_value;
46127 +    E4_uint64          c_intr_cmd;
46128 +} EP4_INTR_CMD;
46129 +
46130 +#define        EP4_INTR_CMD_NDWORDS    (sizeof (EP4_INTR_CMD) / 8)
46131 +
46132 +typedef struct ep4_rxd_sten_cmd
46133 +{
46134 +    E4_uint64          c_open;
46135 +
46136 +    E4_uint64          c_trans;
46137 +    E4_uint64          c_cookie;
46138 +    E4_uint64          c_dma_typeSize;
46139 +    E4_uint64          c_dma_cookie;
46140 +    E4_uint64          c_dma_vproc;
46141 +    E4_uint64          c_dma_srcAddr;
46142 +    E4_uint64          c_dma_dstAddr;
46143 +    E4_uint64          c_dma_srcEvent;
46144 +    E4_uint64          c_dma_dstEvent;
46145 +
46146 +    E4_uint64          c_ok_guard;
46147 +    E4_uint64          c_ok_write_cmd;
46148 +    E4_uint64          c_ok_write_value;
46149 +    
46150 +    E4_uint64          c_fail_guard;
46151 +    E4_uint64          c_fail_setevent;
46152 +
46153 +    E4_uint64          c_nop_cmd;
46154 +} EP4_RXD_STEN_CMD;
46155 +
46156 +#define EP4_RXD_STEN_CMD_NDWORDS       (sizeof (EP4_RXD_STEN_CMD) / 8)
46157 +
46158 +typedef struct ep4_rxd_dma_cmd
46159 +{
46160 +    E4_uint64          c_dma_typeSize;
46161 +    E4_uint64          c_dma_cookie;
46162 +    E4_uint64          c_dma_vproc;
46163 +    E4_uint64          c_dma_srcAddr;
46164 +    E4_uint64          c_dma_dstAddr;
46165 +    E4_uint64          c_dma_srcEvent;
46166 +    E4_uint64          c_dma_dstEvent;
46167 +    E4_uint64          c_nop_cmd;
46168 +} EP4_RXD_DMA_CMD;
46169 +
46170 +#define EP4_RXD_DMA_CMD_NDWORDS                (sizeof (EP4_RXD_DMA_CMD) / 8)
46171 +#define EP4_RXD_START_CMD_NDWORDS      (sizeof (E4_ThreadRegs) / 8)
46172 +
46173 +typedef struct ep4_rxd_rail_elan
46174 +{
46175 +    EP4_RXD_STEN_CMD    rxd_sten[EP_MAXFRAG+1];
46176 +
46177 +    EP4_INTR_CMD       rxd_done_cmd;                           /* command stream issued by done event (aligned to 64 bytes) */
46178 +    E4_Addr            rxd_next;                               /* linked list when on pending list (pad to 32 bytes)*/
46179 +    E4_Event32         rxd_failed;                             /* event set when sten packet fails */
46180 +
46181 +    EP4_INTR_CMD        rxd_failed_cmd;                                /* command stream issued by fail event (aligned to 64 bytes) */
46182 +    E4_uint64          rxd_queued;                             /* rxd queuing thread has executed (pad to 32 bytes)*/
46183 +
46184 +    E4_Event32         rxd_start;                              /* event to set to fire off and event chain (used as chain[0]) */
46185 +    E4_Event32         rxd_chain[EP_MAXFRAG];                  /* chained events (aligned to 32 bytes) */
46186 +    E4_Event32         rxd_done;                               /* event to fire done command stream causing interrupt (used as chain[EP_MAXFRAG]) */
46187 +
46188 +    E4_Addr            rxd_rxd;                                /* elan address of EP4_RXD_MAIN */
46189 +    E4_Addr            rxd_main;                               /* elan address of EP4_RXD_RAIL_MAIN */
46190 +    E4_uint64          rxd_debug;                              /* thread debug value */
46191 +
46192 +    EP_NMD             rxd_buffer;                             /* Network mapping descriptor for receive data */
46193 +} EP4_RXD_RAIL_ELAN;
46194 +
46195 +#define EP4_RXD_RAIL_ELAN_SIZE roundup(sizeof (EP4_RXD_RAIL_ELAN), 64)
46196 +
46197 +typedef struct ep4_rxd_rail_main
46198 +{
46199 +    E4_uint64          rxd_sent[EP_MAXFRAG+1];                 /* sten packet sent */
46200 +    E4_uint64          rxd_failed;                             /* sten packet failed */
46201 +    E4_uint64          rxd_done;                               /* operation complete */
46202 +
46203 +    E4_Addr            rxd_scq;                                /* command port for scq */
46204 +} EP4_RXD_RAIL_MAIN;
46205 +
46206 +#define EP4_RXD_RAIL_MAIN_SIZE roundup(sizeof (EP4_RXD_RAIL_MAIN), 8)
46207 +
46208 +#if !defined(__elan4__)
46209 +typedef struct ep4_rxd_rail
46210 +{
46211 +    EP_RXD_RAIL                rxd_generic;
46212 +
46213 +    struct list_head    rxd_retry_link;
46214 +    unsigned long       rxd_retry_time;
46215 +
46216 +    EP4_INTCOOKIE      rxd_intcookie;
46217 +
46218 +    sdramaddr_t                rxd_elan;
46219 +    EP_ADDR            rxd_elan_addr;
46220 +    
46221 +    EP4_RXD_RAIL_MAIN  *rxd_main;
46222 +    EP_ADDR            rxd_main_addr;
46223 +
46224 +    EP4_ECQ           *rxd_ecq;                                /* cq with 128 bytes targetted by event */
46225 +    EP4_ECQ           *rxd_scq;                                /* cq with 8 bytes targetted by main/thread store */
46226 +} EP4_RXD_RAIL;
46227 +
46228 +#define EP4_NUM_RXD_PER_BLOCK  16
46229 +
46230 +typedef struct ep4_rxd_rail_block
46231 +{
46232 +    struct list_head   blk_link;
46233 +    EP4_RXD_RAIL       blk_rxds[EP4_NUM_RXD_PER_BLOCK];
46234 +} EP4_RXD_RAIL_BLOCK;
46235 +
46236 +#endif /* !defined(__elan4__) */
46237 +
46238 +typedef struct ep4_rcvr_rail_elan
46239 +{
46240 +    E4_uint64          rcvr_thread_stall[8];                   /* place for thread to stall */
46241 +    E4_Event32         rcvr_qevent;                            /* Input queue event */
46242 +    E4_Event32         rcvr_thread_halt;                       /* place for thread to halt */
46243 +
46244 +    volatile E4_Addr    rcvr_pending_tailp;                    /* list of pending rxd's (elan addr) */
46245 +    volatile E4_Addr   rcvr_pending_head;                      /*   -- this pair aligned to 16 bytes */
46246 +
46247 +    EP4_SPINLOCK_ELAN  rcvr_thread_lock;                       /* spinlock for thread processing loop */
46248 +
46249 +    E4_uint64          rcvr_stall_intcookie;                   /* interrupt cookie to use when requseted to halt */
46250 +
46251 +    E4_uint64          rcvr_qbase;                             /* base of input queue */
46252 +    E4_uint64          rcvr_qlast;                             /* last item in input queue */
46253 +
46254 +    E4_uint64          rcvr_debug;                             /* thread debug value */
46255 +} EP4_RCVR_RAIL_ELAN;
46256 +
46257 +typedef struct ep4_rcvr_rail_main
46258 +{
46259 +    EP4_SPINLOCK_MAIN   rcvr_thread_lock;                      /* spinlock for thread processing loop */
46260 +} EP4_RCVR_RAIL_MAIN;
46261 +
46262 +#if !defined(__elan4__)
46263 +
46264 +typedef struct ep4_rcvr_rail_stats
46265 +{
46266 +    unsigned long some_stat;
46267 +} EP4_RCVR_RAIL_STATS;
46268 +
46269 +typedef struct ep4_rcvr_rail
46270 +{
46271 +    EP_RCVR_RAIL       rcvr_generic;                           /* generic portion */
46272 +    
46273 +    sdramaddr_t                rcvr_elan;
46274 +    EP_ADDR            rcvr_elan_addr;
46275 +
46276 +    EP4_RCVR_RAIL_MAIN *rcvr_main;
46277 +    EP_ADDR            rcvr_main_addr;
46278 +
46279 +    sdramaddr_t                rcvr_slots;                             /* input queue slots */
46280 +    EP_ADDR            rcvr_slots_addr;                        /*   and elan address */
46281 +
46282 +    EP_ADDR            rcvr_stack;                             /* stack for thread */
46283 +
46284 +    EP4_ECQ           *rcvr_ecq;                               /* command queue space for thread STEN packets */
46285 +    EP4_ECQ           *rcvr_resched;                           /* command queue space to reschedule the thread */
46286 +
46287 +    struct list_head    rcvr_freelist;                         /* freelist of per-rail receive descriptors */
46288 +    unsigned int        rcvr_freecount;                                /*   and number on free list */
46289 +    unsigned int        rcvr_totalcount;                               /*   total number created */
46290 +    spinlock_t          rcvr_freelock;                         /*   and lock for free list */
46291 +    struct list_head    rcvr_blocklist;                                /* list of receive descriptor blocks */
46292 +
46293 +    unsigned int        rcvr_freewaiting;                      /* waiting for descriptors to be freed */
46294 +    kcondvar_t         rcvr_freesleep;                         /*   and sleep here */
46295 +
46296 +    EP4_INTCOOKIE      rcvr_stall_intcookie;                   /* interrupt cookie for thread halt */
46297 +    unsigned char      rcvr_thread_halted;                     /* thread has been halted */
46298 +    unsigned char       rcvr_cleanup_waiting;                  /* waiting for cleanup */
46299 +    kcondvar_t          rcvr_cleanup_sleep;                    /*   and sleep here */
46300 +
46301 +    EP4_RETRY_OPS      rcvr_retryops;
46302 +
46303 +    struct list_head    rcvr_retrylist;                                /* list of txd's to retry envelopes for */
46304 +    struct list_head    rcvr_polllist;                         /* list of txd's to poll for completion */
46305 +    spinlock_t          rcvr_retrylock;
46306 +    
46307 +    EP4_RCVR_RAIL_STATS rcvr_stats;                             /* elan4 specific rcvr_rail stats */
46308 +
46309 +} EP4_RCVR_RAIL;
46310 +
46311 +#endif /* !defined(__elan4__) */
46312 +
46313 +typedef struct ep4_txd_rail_elan
46314 +{
46315 +    EP4_INTR_CMD        txd_env_cmd;                           /* command stream for envelope event (64 byte aligned) */
46316 +    E4_uint64          txd_pad0;                               /*  pad to 32 bytes */
46317 +    E4_Event32         txd_env;                                /* event set when STEN packet fails */
46318 +
46319 +    EP4_INTR_CMD       txd_done_cmd;                           /* command stream for done event (64 byte aligned) */
46320 +    E4_uint64          txd_pad1;                               /*  pad to 32 bytes */
46321 +    E4_Event32         txd_done;                               /* event set when transmit complete */
46322 +
46323 +    E4_Event32         txd_data;                               /* event set when xmit completes (=> phase becomes passive) */
46324 +} EP4_TXD_RAIL_ELAN;
46325 +
46326 +#define EP4_TXD_RAIL_ELAN_SIZE         roundup(sizeof(EP4_TXD_RAIL_ELAN), 64)
46327 +
46328 +typedef struct ep4_txd_rail_main
46329 +{
46330 +    E4_uint64          txd_env;
46331 +    E4_uint64          txd_data;
46332 +    E4_uint64          txd_done;
46333 +} EP4_TXD_RAIL_MAIN;
46334 +
46335 +#define EP4_TXD_RAIL_MAIN_SIZE         roundup(sizeof(EP4_TXD_RAIL_MAIN), 8)
46336 +
46337 +#if !defined (__elan4__)
46338 +typedef struct ep4_txd_rail
46339 +{
46340 +    EP_TXD_RAIL                txd_generic;
46341 +
46342 +    struct list_head    txd_retry_link;
46343 +    unsigned long      txd_retry_time;
46344 +
46345 +    EP4_INTCOOKIE      txd_intcookie;
46346 +
46347 +    sdramaddr_t                txd_elan;
46348 +    EP_ADDR            txd_elan_addr;
46349 +    
46350 +    EP4_TXD_RAIL_MAIN  *txd_main;
46351 +    EP_ADDR            txd_main_addr;
46352 +
46353 +    EP4_ECQ           *txd_ecq;
46354 +
46355 +    E4_uint64          txd_cookie;
46356 +} EP4_TXD_RAIL;
46357 +
46358 +#define EP4_NUM_TXD_PER_BLOCK  21
46359 +
46360 +typedef struct ep4_txd_rail_block
46361 +{
46362 +    struct list_head   blk_link;
46363 +    EP4_TXD_RAIL       blk_txds[EP4_NUM_TXD_PER_BLOCK];
46364 +} EP4_TXD_RAIL_BLOCK;
46365 +
46366 +typedef struct ep4_xmtr_rail_main
46367 +{
46368 +    E4_int64           xmtr_flowcnt;
46369 +} EP4_XMTR_RAIL_MAIN;
46370 +
46371 +typedef struct ep4_xmtr_rail_stats
46372 +{
46373 +    unsigned long some_stat;
46374 +} EP4_XMTR_RAIL_STATS;
46375 +
46376 +#define EP4_TXD_LIST_POLL      0
46377 +#define EP4_TXD_LIST_STALLED   1
46378 +#define EP4_TXD_LIST_RETRY     2
46379 +#define EP4_TXD_NUM_LISTS      3
46380 +typedef struct ep4_xmtr_rail
46381 +{
46382 +    EP_XMTR_RAIL       xmtr_generic;
46383 +
46384 +    EP4_XMTR_RAIL_MAIN *xmtr_main;
46385 +    EP_ADDR            xmtr_main_addr;
46386 +
46387 +    struct list_head    xmtr_freelist;
46388 +    unsigned int        xmtr_freecount;
46389 +    unsigned int        xmtr_totalcount;
46390 +    spinlock_t          xmtr_freelock;
46391 +    struct list_head    xmtr_blocklist;
46392 +    unsigned int        xmtr_freewaiting;
46393 +    kcondvar_t         xmtr_freesleep;
46394 +
46395 +    EP4_INTCOOKIE      xmtr_intcookie;                         /* interrupt cookie for "polled" descriptors */
46396 +
46397 +    ELAN4_CQ           *xmtr_cq;
46398 +    E4_int64           xmtr_flowcnt;
46399 +
46400 +    EP4_RETRY_OPS      xmtr_retryops;
46401 +
46402 +    struct list_head    xmtr_retrylist[EP4_TXD_NUM_LISTS];     /* list of txd's to retry envelopes for */
46403 +    struct list_head    xmtr_polllist;                         /* list of txd's to poll for completion */
46404 +    spinlock_t          xmtr_retrylock;
46405 +
46406 +    EP4_XMTR_RAIL_STATS stats;                                  /* elan4 specific xmtr rail stats */
46407 +} EP4_XMTR_RAIL;
46408 +
46409 +#define EP4_XMTR_CQSIZE                CQ_Size64K                              /* size of command queue for xmtr */
46410 +#define EP4_XMTR_FLOWCNT       (CQ_Size(EP4_XMTR_CQSIZE) / 512)        /* # of STEN packets which can fit in */
46411 +
46412 +typedef struct ep4_comms_rail_elan
46413 +{
46414 +    E4_Event32         r_flush_event;
46415 +} EP4_COMMS_RAIL_ELAN;
46416 +
46417 +#define EP4_COMMS_RAIL_ELAN_SIZE       roundup(sizeof (EP4_COMMS_RAIL_ELAN), 32)
46418 +
46419 +typedef struct ep4_comms_rail
46420 +{
46421 +    EP_COMMS_RAIL      r_generic;                              /* generic comms rail */
46422 +    sdramaddr_t                r_descs;                                /* input queue descriptors */
46423 +
46424 +    sdramaddr_t                r_elan;                                 /* elan portion */
46425 +    EP_ADDR            r_elan_addr;
46426 +
46427 +    kmutex_t           r_flush_mutex;                          /* sequentialise flush usage */
46428 +    EP4_INTCOOKIE       r_flush_intcookie;                     /* interrupt cookie to generate */
46429 +
46430 +    kcondvar_t         r_flush_sleep;                          /* place to sleep waiting */
46431 +    spinlock_t         r_flush_lock;                           /*   and spinlock to use */
46432 +
46433 +    unsigned int       r_flush_count;                          /* # setevents issued */
46434 +    EP4_ECQ           *r_flush_ecq;                            /* command queue for interrupt */
46435 +    EP4_ECQ           *r_flush_mcq;                            /* command queeu to issue waitevent */
46436 +
46437 +    EP4_NETERR_OPS      r_neterr_ops;                          /* network error fixup ops */
46438 +} EP4_COMMS_RAIL;
46439 +
46440 +/* epcommsTx_elan4.c */
46441 +extern void           ep4xmtr_flush_callback (EP_XMTR *xmtr, EP4_XMTR_RAIL *xmtrRail);
46442 +extern void           ep4xmtr_failover_callback (EP_XMTR *xmtr, EP4_XMTR_RAIL *xmtrRail);
46443 +extern void           ep4xmtr_disconnect_callback (EP_XMTR *xmtr, EP4_XMTR_RAIL *xmtrRail);
46444 +
46445 +extern void          ep4xmtr_neterr_flush (EP_XMTR *xmtr, EP4_XMTR_RAIL *xmtrRail, unsigned int nodeId, EP_NETERR_COOKIE *cookies);
46446 +extern void          ep4xmtr_neterr_check (EP_XMTR *xmtr, EP4_XMTR_RAIL *xmtrRail, unsigned int nodeId, EP_NETERR_COOKIE *cookies);
46447 +
46448 +/* epcommsRx_elan4.c */
46449 +extern void           ep4rcvr_flush_callback (EP_RCVR *rcvr, EP4_RCVR_RAIL *rcvrRail);
46450 +extern void           ep4rcvr_failover_callback (EP_RCVR *rcvr, EP4_RCVR_RAIL *rcvrRail);
46451 +extern void           ep4rcvr_disconnect_callback (EP_RCVR *rcvr, EP4_RCVR_RAIL *rcvrRail);
46452 +
46453 +extern void          ep4rcvr_neterr_flush (EP_RCVR *rcvr, EP4_RCVR_RAIL *rcvrRail, unsigned int nodeId, EP_NETERR_COOKIE *cookies);
46454 +extern void          ep4rcvr_neterr_check (EP_RCVR *rcvr, EP4_RCVR_RAIL *rcvrRail, unsigned int nodeId, EP_NETERR_COOKIE *cookies);
46455 +
46456 +/* epcomms_elan4.c */
46457 +extern void           ep4comms_flush_start (EP4_COMMS_RAIL *commsRail);
46458 +extern void           ep4comms_flush_wait (EP4_COMMS_RAIL *commsRail);
46459 +extern void           ep4comms_flush_setevent (EP4_COMMS_RAIL *commsRail, ELAN4_CQ *cq);
46460 +
46461 +extern EP_COMMS_RAIL *ep4comms_add_rail (EP_SUBSYS *s, EP_SYS *sys, EP_RAIL *r);
46462 +extern void           ep4comms_del_rail (EP_COMMS_RAIL *r);
46463 +extern void          ep4comms_display_rail (EP_COMMS_RAIL *r);
46464 +
46465 +/* epcommsTx_elan4.c */
46466 +extern int            ep4xmtr_bind_txd (EP_TXD *txd, EP_XMTR_RAIL *xmtrRail, unsigned int phase);
46467 +extern void           ep4xmtr_unbind_txd (EP_TXD *txd, unsigned int phase);
46468 +extern int            ep4xmtr_poll_txd (EP_XMTR_RAIL *xmtrRail, EP_TXD_RAIL *txdRail, int how);
46469 +extern long           ep4xmtr_check (EP_XMTR_RAIL *xmtrRail, long nextRunTime);
46470 +extern void           ep4xmtr_add_rail (EP_XMTR *xmtr, EP_COMMS_RAIL *commsRail);
46471 +extern void           ep4xmtr_del_rail (EP_XMTR *xmtr, EP_COMMS_RAIL *commsRail);
46472 +extern int            ep4xmtr_check_txd_state(EP_TXD *txd);
46473 +
46474 +extern void           ep4xmtr_display_xmtr (DisplayInfo *di, EP_XMTR_RAIL *xmtrRail);
46475 +extern void           ep4xmtr_display_txd  (DisplayInfo *di, EP_TXD_RAIL *txdRail);
46476 +
46477 +extern void           ep4xmtr_fillout_rail_stats (EP_XMTR_RAIL *xmtr_rail, char *str);
46478 +
46479 +/* epcommsRx_elan4.c */
46480 +extern int           ep4rcvr_queue_rxd (EP_RXD *rxd, EP_RCVR_RAIL *rcvrRail);
46481 +extern void          ep4rcvr_rpc_put (EP_RXD *rxd, EP_NMD *local, EP_NMD *remote, unsigned nFrags);
46482 +extern void          ep4rcvr_rpc_get (EP_RXD *rxd, EP_NMD *local, EP_NMD *remote, unsigned nFrags);
46483 +extern void          ep4rcvr_rpc_complete (EP_RXD *rxd, EP_NMD *local, EP_NMD *remote, unsigned nFrags);
46484 +
46485 +extern EP_RXD       *ep4rcvr_steal_rxd (EP_RCVR_RAIL *rcvrRail);
46486 +
46487 +extern long          ep4rcvr_check (EP_RCVR_RAIL *rcvrRail, long nextRunTime);
46488 +extern void           ep4rcvr_add_rail (EP_RCVR *rcvr, EP_COMMS_RAIL *rail);
46489 +extern void           ep4rcvr_del_rail (EP_RCVR *rcvr, EP_COMMS_RAIL *rail);
46490 +
46491 +extern void           ep4rcvr_display_rcvr (DisplayInfo *di, EP_RCVR_RAIL *rcvrRail);
46492 +extern void           ep4rcvr_display_rxd  (DisplayInfo *di, EP_RXD_RAIL *rxdRail);
46493 +
46494 +extern void           ep4rcvr_fillout_rail_stats (EP_RCVR_RAIL *rcvr_rail, char *str);
46495 +
46496 +#endif /* !defined(__elan4__) */
46497 +
46498 +/*
46499 + * Local variables:
46500 + * c-file-style: "stroustrup"
46501 + * End:
46502 + */
46503 +#endif /* __EPCOMMS_ELAN4_H */
46504 Index: linux-2.4.21/drivers/net/qsnet/ep/epcomms_elan4_thread.c
46505 ===================================================================
46506 --- linux-2.4.21.orig/drivers/net/qsnet/ep/epcomms_elan4_thread.c       2004-02-23 16:02:56.000000000 -0500
46507 +++ linux-2.4.21/drivers/net/qsnet/ep/epcomms_elan4_thread.c    2005-06-01 23:12:54.642432568 -0400
46508 @@ -0,0 +1,346 @@
46509 +/*
46510 + *    Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd.
46511 + *    Copyright (c) 2002-2003 by Quadrics Ltd.
46512 + *
46513 + *    For licensing information please see the supplied COPYING file
46514 + *
46515 + */
46516 +
46517 +#ident "@(#)$Id: epcomms_elan4_thread.c,v 1.10.8.2 2004/09/28 10:36:51 david Exp $ $Name: QSNETMODULES-4-30_20050128 $"
46518 +/*      $Source: /cvs/master/quadrics/epmod/epcomms_elan4_thread.c,v $*/
46519 +
46520 +//#include <qsnet/types.h>
46521 +
46522 +typedef char           int8_t;
46523 +typedef unsigned char  uint8_t;
46524 +typedef short          int16_t;
46525 +typedef unsigned short uint16_t;
46526 +typedef int            int32_t;
46527 +typedef unsigned int   uint32_t;
46528 +typedef long           int64_t;
46529 +typedef unsigned long  uint64_t;
46530 +
46531 +#include <elan/nmh.h>
46532 +#include <elan/kcomm.h>
46533 +#include <elan/epcomms.h>
46534 +
46535 +#include <elan4/registers.h>
46536 +
46537 +#include "kcomm_vp.h"
46538 +#include "kcomm_elan4.h"
46539 +#include "epcomms_elan4.h"
46540 +
46541 +#include <elan4/trtype.h>
46542 +
46543 +/* assembler in epcomms_asm_elan4_thread.S */
46544 +extern void               c_waitevent_interrupt (E4_uint64 *cport, E4_Event32 *event, E4_uint64 count, E4_uint64 intcookie);
46545 +extern EP4_RXD_RAIL_ELAN *c_stall_thread (EP4_RCVR_RAIL_ELAN *rcvrRail);
46546 +
46547 +#define R32_to_R47             "%r32", "%r33", "%r34", "%r35", "%r36", "%r37", "%r38", "%r39", \
46548 +                               "%r40", "%r41", "%r42", "%r43", "%r44", "%r45", "%r46", "%r47"
46549 +#define R48_to_R63             "%r48", "%r49", "%r50", "%r51", "%r52", "%r53", "%r54", "%r55", \
46550 +                               "%r56", "%r57", "%r58", "%r59", "%r60", "%r61", "%r62", "%r63"
46551 +
46552 +/* proto types for code in asm_elan4_thread.S */
46553 +extern void c_waitevent (E4_uint64 *commandport, E4_Addr event, E4_uint64 count);
46554 +extern void c_reschedule(E4_uint64 *commandport);
46555 +
46556 +static inline unsigned long
46557 +c_load_u16(unsigned short *ptr)
46558 +{
46559 +    unsigned long value;
46560 +
46561 +    asm volatile ("ld2         [%1], %%r2\n"
46562 +                 "srl8,byte    %%r2, %1, %0\n"
46563 +                 "sll8         %0, 48, %0\n"
46564 +                 "srl8         %0, 48, %0\n"
46565 +                 : /* outputs */ "=r" (value) 
46566 +                 : /* inputs */ "r" (ptr)
46567 +                 : /* clobbered */ "%r2");
46568 +    return value;
46569 +}
46570 +
46571 +static inline unsigned long
46572 +c_load_u32(unsigned int *ptr)
46573 +{
46574 +    unsigned long value;
46575 +
46576 +    asm volatile ("ld4         [%1], %%r2\n"
46577 +                 "srl8,byte    %%r2, %1, %0\n"
46578 +                 "sll8         %0, 32, %0\n"
46579 +                 "srl8         %0, 32, %0\n"
46580 +                 : /* outputs */ "=r" (value) 
46581 +                 : /* inputs */ "r" (ptr)
46582 +                 : /* clobbered */ "%r2");
46583 +    return value;
46584 +}
46585 +
46586 +static inline void
46587 +c_store_u32(unsigned int *ptr, unsigned long value)
46588 +{
46589 +    asm volatile ("sll8,byte   %0, %1, %%r2\n"
46590 +                 "st4          %%r2, [%1]\n"
46591 +                 : /* no outputs */ 
46592 +                 : /* inputs */ "r" (value), "r" (ptr)
46593 +                 : /* clobbered */ "%r2");
46594 +}
46595 +
46596 +/* Reschedule the current Elan thread to the back of the run queue 
46597 + * if there is another one ready to run */
46598 +static inline void
46599 +c_yield (E4_uint64 *commandport)
46600 +{
46601 +    unsigned long rval;
46602 +
46603 +    asm volatile ("breaktest %0" : /* outputs */ "=r" (rval) : /* inputs */);
46604 +
46605 +    if (rval  & ICC_SIGNED_BIT)
46606 +       c_reschedule(commandport);
46607 +}
46608 +
46609 +/* Reschedule the current thread if we're in danger of exceeding the 
46610 + * thread instruction count */
46611 +static inline void
46612 +c_insn_check(E4_uint64 *commandport)
46613 +{
46614 +    unsigned long rval;
46615 +
46616 +    asm volatile ("breaktest %0" : /* outputs */ "=r" (rval) : /* inputs */);
46617 +
46618 +    if (rval & ICC_ZERO_BIT)
46619 +       c_reschedule(commandport);
46620 +}
46621 +
46622 +void
46623 +ep4_spinblock (E4_uint64 *cport, EP4_SPINLOCK_ELAN *sle, EP4_SPINLOCK_MAIN *slm)
46624 +{
46625 +    do {
46626 +       unsigned long val = *sle & 0xfffffffff;
46627 +
46628 +       *slm = val;                                     /* Release my lock */
46629 +       
46630 +       while (*sle >> 32)                              /* Wait until the main */
46631 +           c_yield(cport);                             /* releases the lock */
46632 +       
46633 +       c_store_u32 ((unsigned int *) sle, val + 1);    /* and try and relock */
46634 +    } while (*sle >> 32);
46635 +}
46636 +
46637 +#define RESCHED_AFTER_PKTS     ((CQ_Size(CQ_Size64K) / 128) - 1)
46638 +
46639 +void
46640 +ep4comms_rcvr (EP4_RAIL_ELAN *rail, EP4_RCVR_RAIL_ELAN *rcvrElan, EP4_RCVR_RAIL_MAIN *rcvrMain,
46641 +              E4_InputQueue *inputq, E4_uint64 *cport, E4_uint64 *resched)
46642 +{
46643 +    long count = 1;
46644 +    long fptr  = inputq->q_fptr;
46645 +
46646 +    for (;;)
46647 +    {
46648 +       c_waitevent (cport, inputq->q_event, -count << 5);
46649 +
46650 +       count = 0;
46651 +
46652 +       while (fptr != inputq->q_bptr)
46653 +       {
46654 +           EP_ENVELOPE        *env      = (EP_ENVELOPE *) fptr;
46655 +           unsigned long       nodeid   = c_load_u32 (&env->NodeId);
46656 +           unsigned long       opencmd  = OPEN_STEN_PKT_CMD | OPEN_PACKET(0, PACK_OK | RESTART_COUNT_ZERO, EP_VP_DATA(nodeid));
46657 +           unsigned long       vproc    = EP_VP_DATA(rail->r_nodeid);
46658 +           EP_ATTRIBUTE        attr     = c_load_u32 (&env->Attr);
46659 +           unsigned long       txdRail  = c_load_u32 (&env->TxdRail);
46660 +           unsigned long       nFrags   = c_load_u32 (&env->nFrags);
46661 +           E4_uint64           cookie   = rail->r_cookies[nodeid];
46662 +           unsigned long       srcevent = (EP_IS_RPC(attr) ? txdRail + offsetof (EP4_TXD_RAIL_ELAN, txd_data) :
46663 +                                           txdRail + offsetof (EP4_TXD_RAIL_ELAN, txd_done));
46664 +           EP4_RXD_RAIL_ELAN  *rxdElan;
46665 +           EP4_RXD_RAIL_MAIN  *rxdMain;
46666 +           EP_RXD_MAIN        *rxd;
46667 +           EP4_RXD_STEN_CMD   *sten;
46668 +           E4_Event32         *event;
46669 +           unsigned long       first;
46670 +           unsigned long       buffer;
46671 +           unsigned long       len;
46672 +           unsigned long       i;
46673 +
46674 +           EP4_SPINENTER(resched, &rcvrElan->rcvr_thread_lock, &rcvrMain->rcvr_thread_lock);
46675 +
46676 +           if ((rxdElan = (EP4_RXD_RAIL_ELAN *) rcvrElan->rcvr_pending_head) == 0)
46677 +           {
46678 +               EP4_SPINEXIT (resched, &rcvrElan->rcvr_thread_lock, &rcvrMain->rcvr_thread_lock);
46679 +
46680 +               rxdElan = c_stall_thread (rcvrElan);
46681 +
46682 +               EP4_SPINENTER(resched, &rcvrElan->rcvr_thread_lock, &rcvrMain->rcvr_thread_lock);
46683 +           }
46684 +           
46685 +           if (c_load_u32 (&env->Version) != EP_ENVELOPE_VERSION)              /* envelope has been cancelled */
46686 +           {
46687 +               EP4_SPINEXIT (resched, &rcvrElan->rcvr_thread_lock, &rcvrMain->rcvr_thread_lock);
46688 +               goto consume_envelope;
46689 +           }
46690 +
46691 +           rxd     = (EP_RXD_MAIN *) rxdElan->rxd_rxd;
46692 +           rxdMain = (EP4_RXD_RAIL_MAIN *) rxdElan->rxd_main;
46693 +           first   = (EP_MAXFRAG+1) - (( EP_IS_MULTICAST(attr) ? 1 : 0) + (nFrags == 0 ? 1 : nFrags));
46694 +           sten    = &rxdElan->rxd_sten[first];
46695 +           event   = &rxdElan->rxd_chain[first];
46696 +
46697 +           if (EP_IS_MULTICAST(attr))                          /* need to fetch broadcast bitmap */
46698 +           {
46699 +               sten->c_open          = opencmd;
46700 +               sten->c_trans         = SEND_TRANS_CMD | ((TR_REMOTEDMA | TR_WAIT_FOR_EOP) << 16);
46701 +               sten->c_cookie        = cookie | EP4_COOKIE_THREAD | EP4_COOKIE_STEN;
46702 +               sten->c_dma_typeSize  = E4_DMA_TYPE_SIZE(BT_BITOUL(EP_MAX_NODES) * sizeof (bitmap_t), DMA_DataTypeWord, 0, EP4_DMA_RETRYCOUNT);
46703 +               sten->c_dma_cookie    = cookie | EP4_COOKIE_THREAD | EP4_COOKIE_REMOTE | EP4_COOKIE_DMA | EP4_COOKIE_INC;
46704 +               sten->c_dma_vproc     = vproc;
46705 +               sten->c_dma_srcAddr   = c_load_u32 (&env->TxdMain.nmd_addr) + offsetof(EP_TXD_MAIN, Bitmap);
46706 +               sten->c_dma_dstAddr   = (E4_Addr) &rxd->Bitmap;
46707 +               sten->c_dma_srcEvent  = srcevent;
46708 +               sten->c_dma_dstEvent  = (E4_Addr) event;
46709 +
46710 +               event->ev_CountAndType = E4_EVENT_INIT_VALUE(-32, E4_EVENT_COPY, E4_EVENT_DTYPE_LONG, EP4_RXD_STEN_CMD_NDWORDS);
46711 +
46712 +               cookie += (EP4_COOKIE_INC << 1);
46713 +
46714 +               sten++; event++;
46715 +           }
46716 +
46717 +           if (nFrags == 0)
46718 +           {
46719 +               /* Generate an empty "get" DMA to accept the envelope and fire the rx handler */
46720 +               sten->c_open          = opencmd;
46721 +               sten->c_trans         = SEND_TRANS_CMD | ((TR_REMOTEDMA | TR_WAIT_FOR_EOP) << 16);
46722 +               sten->c_cookie        = cookie | EP4_COOKIE_THREAD | EP4_COOKIE_STEN;
46723 +               sten->c_dma_typeSize  = E4_DMA_TYPE_SIZE(0, DMA_DataTypeByte, 0, EP4_DMA_RETRYCOUNT);
46724 +               sten->c_dma_cookie    = cookie | EP4_COOKIE_THREAD | EP4_COOKIE_REMOTE | EP4_COOKIE_DMA | EP4_COOKIE_INC;
46725 +               sten->c_dma_vproc     = vproc;
46726 +               sten->c_dma_srcEvent  = srcevent;
46727 +               sten->c_dma_dstEvent  = (E4_Addr) event;
46728 +
46729 +               event->ev_CountAndType = E4_EVENT_INIT_VALUE(-32, E4_EVENT_COPY, E4_EVENT_DTYPE_LONG, EP4_INTR_CMD_NDWORDS);
46730 +
46731 +               len = 0;
46732 +
46733 +               cookie += (EP4_COOKIE_INC << 1);
46734 +           }
46735 +           else
46736 +           {
46737 +               /* Generate the DMA chain to fetch the data */
46738 +               for (i = 0, buffer = c_load_u32 (&rxdElan->rxd_buffer.nmd_addr), len = 0; i < nFrags; i++)
46739 +               {
46740 +                   unsigned long fragLen = c_load_u32 (&env->Frags[i].nmd_len);
46741 +
46742 +                   sten->c_open          = opencmd;
46743 +                   sten->c_trans         = SEND_TRANS_CMD | ((TR_REMOTEDMA | TR_WAIT_FOR_EOP) << 16);
46744 +                   sten->c_cookie        = cookie | EP4_COOKIE_THREAD | EP4_COOKIE_STEN;
46745 +                   sten->c_dma_typeSize  = E4_DMA_TYPE_SIZE(fragLen, DMA_DataTypeByte, 0, EP4_DMA_RETRYCOUNT);
46746 +                   sten->c_dma_cookie    = cookie | EP4_COOKIE_THREAD | EP4_COOKIE_REMOTE | EP4_COOKIE_DMA | EP4_COOKIE_INC;
46747 +                   sten->c_dma_vproc     = vproc;
46748 +                   sten->c_dma_srcAddr   = c_load_u32 (&env->Frags[i].nmd_addr);
46749 +                   sten->c_dma_dstAddr   = buffer;
46750 +                   sten->c_dma_srcEvent  = srcevent;
46751 +                   sten->c_dma_dstEvent  = (E4_Addr) event;
46752 +                   
46753 +                   event->ev_CountAndType = E4_EVENT_INIT_VALUE(-32, E4_EVENT_COPY, E4_EVENT_DTYPE_LONG, EP4_RXD_STEN_CMD_NDWORDS);
46754 +                   
46755 +                   buffer += fragLen;
46756 +                   len    += fragLen;
46757 +
46758 +                   cookie += (EP4_COOKIE_INC << 1);
46759 +
46760 +                   sten++; event++;
46761 +               }
46762 +               
46763 +               (--event)->ev_CountAndType = E4_EVENT_INIT_VALUE(-32, E4_EVENT_COPY, E4_EVENT_DTYPE_LONG, EP4_INTR_CMD_NDWORDS);
46764 +
46765 +               if (c_load_u32 (&rxdElan->rxd_buffer.nmd_len) < len)
46766 +               {
46767 +                   /* The receive descriptor was too small for the message */
46768 +                   /* complete the message anyway,  but don't transfer any */
46769 +                   /* data,  we set the length to EP_MSG_TOO_BIG */
46770 +                   for (i = first, sten = &rxdElan->rxd_sten[first]; i <= EP_MAXFRAG; i++, sten++)
46771 +                       sten->c_dma_typeSize = E4_DMA_TYPE_SIZE(0, DMA_DataTypeByte, 0, EP4_DMA_RETRYCOUNT);
46772 +                   
46773 +                   len = EP_MSG_TOO_BIG;
46774 +               }
46775 +           }
46776 +           
46777 +           /* Stuff the first STEN packet into the command queue, there's always enough space, 
46778 +            * since we will insert a waitevent at least once for the queue size */
46779 +           asm volatile ("ld64         [%0], %%r32\n"
46780 +                         "ld64         [%0 + 64], %%r48\n"
46781 +                         "st64         %%r32, [%1]\n"
46782 +                         "st64         %%r48, [%1]\n"
46783 +                         : /* no outputs */
46784 +                         : /* inputs */ "r" (&rxdElan->rxd_sten[first]), "r" (cport)
46785 +                         : /* clobbered */ R32_to_R47, R48_to_R63);
46786 +
46787 +           /* remove the RXD from the pending list */
46788 +           if ((rcvrElan->rcvr_pending_head = rxdElan->rxd_next) == 0)
46789 +               rcvrElan->rcvr_pending_tailp = (E4_Addr)&rcvrElan->rcvr_pending_head;
46790 +
46791 +           /* mark as not queued */
46792 +           rxdElan->rxd_queued = 0;
46793 +
46794 +           /* copy down the envelope */
46795 +           if (EP_HAS_PAYLOAD(attr))
46796 +               asm volatile ("ld64     [%0],    %%r32\n"
46797 +                             "ld64     [%0+64], %%r48\n"
46798 +                             "st64     %%r32, [%1]\n"
46799 +                             "ld64     [%0+128], %%r32\n"
46800 +                             "st64     %%r48, [%1+64]\n"
46801 +                             "ld64     [%0+192], %%r48\n"
46802 +                             "st64     %%r32, [%1 + 128]\n"
46803 +                             "st64     %%r48, [%1 + 192]\n"
46804 +                             : /* no outputs */
46805 +                             : /* inputs */    "r" (env), "r" (&rxd->Envelope)
46806 +                             : /* clobbered */ R32_to_R47, R48_to_R63);
46807
46808 +           else
46809 +               asm volatile ("ld64     [%0],    %%r32\n"
46810 +                             "ld64     [%0+64], %%r48\n"
46811 +                             "st64     %%r32, [%1]\n"
46812 +                             "st64     %%r48, [%1+64]\n"
46813 +                             : /* no outputs */
46814 +                             : /* inputs */    "r" (env), "r" (&rxd->Envelope)
46815 +                             : /* clobbered */ R32_to_R47, R48_to_R63);
46816 +
46817 +           /* Store the message length to indicate that I've finished */
46818 +           c_store_u32 (&rxd->Len, len);
46819 +           
46820 +           /* Finally update the network error cookie */
46821 +           rail->r_cookies[nodeid] = cookie;
46822 +
46823 +           EP4_SPINEXIT (resched, &rcvrElan->rcvr_thread_lock, &rcvrMain->rcvr_thread_lock);
46824 +
46825 +       consume_envelope:
46826 +           if (fptr != rcvrElan->rcvr_qlast)
46827 +               fptr += EP_INPUTQ_SIZE;
46828 +           else
46829 +               fptr = rcvrElan->rcvr_qbase;
46830 +
46831 +           if (! rcvrElan->rcvr_stall_intcookie)
46832 +               inputq->q_fptr = fptr;
46833 +
46834 +           if (++count >= RESCHED_AFTER_PKTS)
46835 +               break;
46836 +
46837 +           c_insn_check (cport);
46838 +       }
46839 +       
46840 +       if (rcvrElan->rcvr_stall_intcookie)
46841 +       {
46842 +           c_waitevent_interrupt (cport, &rcvrElan->rcvr_thread_halt, -(1 << 5), rcvrElan->rcvr_stall_intcookie);
46843 +           inputq->q_fptr = fptr;
46844 +
46845 +           count++;                                            /* one extra as we were given an extra set to wake us up */
46846 +       }
46847 +    }
46848 +}
46849 +
46850 +/*
46851 + * Local variables:
46852 + * c-file-style: "stroustrup"
46853 + * End:
46854 + */
46855 Index: linux-2.4.21/drivers/net/qsnet/ep/epcommsFwd.c
46856 ===================================================================
46857 --- linux-2.4.21.orig/drivers/net/qsnet/ep/epcommsFwd.c 2004-02-23 16:02:56.000000000 -0500
46858 +++ linux-2.4.21/drivers/net/qsnet/ep/epcommsFwd.c      2005-06-01 23:12:54.643432416 -0400
46859 @@ -0,0 +1,310 @@
46860 +/*
46861 + *    Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd.
46862 + *    Copyright (c) 2002-2003 by Quadrics Ltd.
46863 + *
46864 + *    For licensing information please see the supplied COPYING file
46865 + *
46866 + */
46867 +
46868 +#ident "@(#)$Id: epcommsFwd.c,v 1.12 2004/08/16 12:21:15 david Exp $ $Name: QSNETMODULES-4-30_20050128 $"
46869 +/*      $Source: /cvs/master/quadrics/epmod/epcommsFwd.c,v $*/
46870 +
46871 +#include <qsnet/kernel.h>
46872 +
46873 +#include <elan/kcomm.h>
46874 +#include <elan/epsvc.h>
46875 +#include <elan/epcomms.h>
46876 +
46877 +#include "debug.h"
46878 +
46879 +unsigned int epcomms_forward_limit = 8;
46880 +
46881 +static void
46882 +GenerateTree (unsigned nodeId, unsigned lowId, unsigned  highId, bitmap_t *bitmap, 
46883 +             unsigned *parentp, unsigned *childrenp, int *nchildrenp)
46884 +{
46885 +    int i;
46886 +    int count;
46887 +    int branch;
46888 +    int nSub;
46889 +    int branchIndex;
46890 +    int parent;
46891 +    int nBranch;
46892 +    int rem;
46893 +    int self;
46894 +    int branchRatio;
46895 +    int node;
46896 +    int x, y, z;
46897 +
46898 +
46899 +#ifdef DEBUG_PRINTF
46900 +    {
46901 +#define OVERFLOW "...]"
46902 +#define LINESZ  128
46903 +       char space[LINESZ+1];
46904 +
46905 +       if (ep_sprintf_bitmap (space, LINESZ-strlen(OVERFLOW), bitmap, 0, 0, (highId - lowId)+1) != -1)
46906 +           strcat (space, OVERFLOW);
46907 +
46908 +       EPRINTF3 (DBG_FORWARD, "GenerateTree; elan node low=%d node high=%d bitmap=%s\n", lowId, highId, space);
46909 +#undef OVERFLOW
46910 +#undef LINESZ
46911 +    }
46912 +#endif
46913 +
46914 +    /* Count the number of nodes in the partition */
46915 +    /* and work out which one I am */
46916 +    for (count = 0, self = ELAN_INVALID_NODE, i = lowId; i <= highId; i++)
46917 +    {
46918 +       if (BT_TEST (bitmap, i-lowId))
46919 +       {
46920 +           if (i == nodeId)
46921 +               self = count;
46922 +           count++;
46923 +       }
46924 +    }
46925 +
46926 +    EPRINTF2 (DBG_FORWARD, "GenerateTree: count=%d self=%d\n", count, self);
46927 +
46928 +    if (count == 0 || self == ELAN_INVALID_NODE)
46929 +    {
46930 +       *parentp    = ELAN_INVALID_NODE;
46931 +       *nchildrenp = 0;
46932 +       return;
46933 +    }
46934 +
46935 +    /* search for position in tree */
46936 +    branchRatio = EP_TREE_ARITY;               /* branching ratio */
46937 +    branch      = 0;                           /* start with process 0 */
46938 +    nSub        = count;                       /* and whole tree */
46939 +    branchIndex = -1;                          /* my branch # in parent */
46940 +    parent      = -1;                          /* my parent's group index # */
46941 +    
46942 +    while (branch != self)                     /* descend process tree */
46943 +    {                                          /* until I find myself */
46944 +       parent = branch;
46945 +       branch++;                               /* parent + 1 = first born */
46946 +       nSub--;                                 /* set # descendents */
46947 +       
46948 +       rem  = nSub % branchRatio;
46949 +       nSub = nSub / branchRatio + 1;
46950 +       x = rem * nSub;
46951 +       y = self - branch;
46952 +       
46953 +       if (y < x)                              /* my first 'rem' branches have */
46954 +       {                                       /* 1 more descendent... */
46955 +           branchIndex = y / nSub;
46956 +           branch += branchIndex * nSub;
46957 +       }
46958 +       else                                    /* than the rest of my branches */
46959 +       {
46960 +           nSub--;
46961 +           z = (y - x) / nSub;
46962 +           branchIndex = rem + z;
46963 +           branch += x + z * nSub;
46964 +       }
46965 +    }
46966 +
46967 +    branch++;                                  /* my first born */
46968 +    nSub--;                                    /* total # of my descendents */
46969 +    /* leaves + their parents may have # children < branchRatio */
46970 +    nBranch = (nSub < branchRatio) ? nSub : branchRatio;       
46971 +
46972 +    EPRINTF2 (DBG_FORWARD, "GenerateTree: parent=%d nBranch=%d\n", parent, nBranch);
46973 +
46974 +    /* Now calculate the real elan id's of the parent and my children */
46975 +    if (parent == -1)
46976 +       *parentp = ELAN_INVALID_NODE;
46977 +    else
46978 +    {
46979 +       for (i = lowId, node = 0; i <= highId; i++)
46980 +       {
46981 +           if (BT_TEST(bitmap, i-lowId))
46982 +               if (node++ == parent)
46983 +                   break;
46984 +       }
46985 +       *parentp = i;
46986 +    }
46987 +
46988 +    for (i = lowId, branchIndex = 0, node = 0; branchIndex < nBranch && i <= highId; i++)
46989 +    {
46990 +       if (BT_TEST(bitmap, i-lowId))
46991 +       {
46992 +           if (node == branch)
46993 +           {
46994 +               branch = branch + nSub / branchRatio + ((branchIndex < (nSub % branchRatio)) ? 1 : 0);
46995 +
46996 +               childrenp[branchIndex++] = i;
46997 +           }
46998 +           node++;
46999 +       }
47000 +    }
47001 +
47002 +    *nchildrenp = branchIndex;
47003 +}
47004 +
47005 +static void
47006 +ForwardTxDone (EP_TXD *txd, void *arg, EP_STATUS status)
47007 +{
47008 +    EP_FWD_DESC     *desc   = (EP_FWD_DESC *) arg;
47009 +    EP_RXD          *rxd    = desc->Rxd;
47010 +    EP_COMMS_SUBSYS *subsys = rxd->Rcvr->Subsys;
47011 +    unsigned long    flags;
47012 +
47013 +    /* XXXX: if transmit fails, could step to next node in this subtree ? */
47014 +
47015 +    spin_lock_irqsave (&subsys->ForwardDescLock, flags);
47016 +
47017 +    if (--desc->NumChildren > 0)
47018 +       spin_unlock_irqrestore (&subsys->ForwardDescLock, flags);
47019 +    else
47020 +    {
47021 +       rxd->Rcvr->ForwardRxdCount--;
47022 +
47023 +       spin_unlock_irqrestore (&subsys->ForwardDescLock, flags);
47024 +
47025 +       KMEM_FREE (desc, sizeof (EP_FWD_DESC));
47026 +
47027 +       rxd->Handler (rxd);
47028 +    }
47029 +}
47030 +
47031 +long
47032 +ep_forward_rxds (EP_COMMS_SUBSYS *subsys, long nextRunTime)
47033 +{
47034 +    unsigned long flags;
47035 +    int i, res;
47036 +
47037 +    spin_lock_irqsave (&subsys->ForwardDescLock, flags);
47038 +    while (! list_empty (&subsys->ForwardDescList)) 
47039 +    {
47040 +       EP_RXD      *rxd     = (EP_RXD *) list_entry (subsys->ForwardDescList.next, EP_RXD, Link);
47041 +       EP_RXD_MAIN *rxdMain = rxd->RxdMain;
47042 +       EP_ENVELOPE *env     = &rxdMain->Envelope;
47043 +       EP_FWD_DESC *desc;
47044 +
47045 +       EPRINTF2 (DBG_FORWARD, "ep: forwarding rxd %p to range %x\n", rxd, env->Range);
47046 +
47047 +       list_del (&rxd->Link);
47048 +
47049 +       rxd->Rcvr->ForwardRxdCount++;
47050 +
47051 +       spin_unlock_irqrestore (&subsys->ForwardDescLock, flags);
47052 +
47053 +       KMEM_ALLOC (desc, EP_FWD_DESC *, sizeof (EP_FWD_DESC), 1);
47054 +
47055 +       if (desc == NULL)
47056 +       {
47057 +           spin_lock_irqsave (&subsys->ForwardDescLock, flags);
47058 +           rxd->Rcvr->ForwardRxdCount--;
47059 +           spin_unlock_irqrestore (&subsys->ForwardDescLock, flags);
47060 +
47061 +           rxd->Handler (rxd);
47062 +       }
47063 +       else
47064 +       {
47065 +           /* compute the spanning tree for this message */
47066 +           unsigned int destLo = EP_RANGE_LOW (env->Range);
47067 +           unsigned int destHi = EP_RANGE_HIGH (env->Range);
47068 +           unsigned int parent;
47069 +
47070 +           GenerateTree (subsys->Subsys.Sys->Position.pos_nodeid, destLo, destHi, rxdMain->Bitmap, &parent, desc->Children, &desc->NumChildren);
47071 +           
47072 +           if (desc->NumChildren == 0 || (epcomms_forward_limit && (rxd->Rcvr->ForwardRxdCount >= epcomms_forward_limit)))
47073 +           {
47074 +               EPRINTF5 (DBG_FORWARD, "ep; don't forward rxd %p to /%d (%d children/ %d forwarding (%d))\n",
47075 +                         rxd, rxd->Rcvr->Service, desc->NumChildren, rxd->Rcvr->ForwardRxdCount, epcomms_forward_limit);
47076 +
47077 +               spin_lock_irqsave (&subsys->ForwardDescLock, flags);
47078 +               rxd->Rcvr->ForwardRxdCount--;
47079 +               spin_unlock_irqrestore (&subsys->ForwardDescLock, flags);
47080 +
47081 +               KMEM_FREE (desc, sizeof (EP_FWD_DESC));
47082 +               
47083 +               rxd->Handler (rxd);
47084 +           }
47085 +           else
47086 +           {
47087 +               ep_nmd_subset (&desc->Data, &rxd->Data, 0, ep_rxd_len (rxd));
47088 +               desc->Rxd = rxd;
47089 +
47090 +               /* NOTE - cannot access 'desc' after last call to multicast, since it could complete
47091 +                *        and free the desc before we access it again.  Hence the reverse loop. */
47092 +               for (i = desc->NumChildren-1; i >= 0; i--)
47093 +               {
47094 +                   ASSERT (desc->Children[i] < subsys->Subsys.Sys->Position.pos_nodes);
47095 +
47096 +                   EPRINTF3 (DBG_FORWARD, "ep: forwarding rxd %p to node %d/%d\n", rxd, desc->Children[i], rxd->Rcvr->Service);
47097 +
47098 +                   if ((res = ep_multicast_forward (subsys->ForwardXmtr, desc->Children[i], rxd->Rcvr->Service, 0, 
47099 +                                                    ForwardTxDone, desc, env, EP_HAS_PAYLOAD(env->Attr) ? &rxdMain->Payload : NULL,  
47100 +                                                    rxdMain->Bitmap, &desc->Data, 1)) != EP_SUCCESS)
47101 +                   {
47102 +                       ep_debugf (DBG_FORWARD, "ep: ep_multicast_forward failed\n");
47103 +                       ForwardTxDone (NULL, desc, res);
47104 +                   }
47105 +               }
47106 +               
47107 +           }
47108 +       }
47109 +
47110 +       spin_lock_irqsave (&subsys->ForwardDescLock, flags);
47111 +    }
47112 +    spin_unlock_irqrestore (&subsys->ForwardDescLock, flags);
47113 +
47114 +    return (nextRunTime);
47115 +}
47116 +
47117 +#if ! defined(CONFIG_EP_NO_CHECK_SUM)
47118 +void
47119 +ep_csum_rxds (EP_COMMS_SUBSYS *subsys)
47120 +{
47121 +    unsigned long flags;
47122
47123 +    spin_lock_irqsave (&subsys->CheckSumDescLock, flags);
47124 +    while (! list_empty (&subsys->CheckSumDescList)) 
47125 +    {
47126 +       EP_RXD      *rxd = (EP_RXD *) list_entry (subsys->CheckSumDescList.next, EP_RXD, CheckSumLink);
47127 +       EP_ENVELOPE *env = &rxd->RxdMain->Envelope;
47128 +
47129 +       list_del_init (&rxd->CheckSumLink);
47130 +       spin_unlock_irqrestore (&subsys->CheckSumDescLock, flags);
47131 +
47132 +       if (env->CheckSum) {
47133 +           EP_NMD nmd;
47134 +           uint32_t csum; 
47135 +
47136 +           ep_nmd_subset ( &nmd, &rxd->Data, 0, ep_rxd_len (rxd));
47137 +
47138 +           csum = ep_calc_check_sum(subsys->Subsys.Sys, env, &nmd, 1);
47139 +           if ( env->CheckSum  != csum ) {
47140 +               int f;
47141 +       
47142 +
47143 +               printk("Check Sum Error: env(0x%x,0x%x) data(0x%x,0x%x)\n", ((csum >> 16) & 0x7FFF), ((env->CheckSum >> 16) & 0x7FFF), 
47144 +                      (csum & 0xFFFF),  (env->CheckSum & 0xFFFF));
47145 +               printk("Check Sum Error: Sent : NodeId %u Range 0x%x Service %u Version 0x%x Attr 0x%x\n", env->NodeId, env->Range, rxd->Rcvr->Service, env->Version, env->Attr);
47146 +               printk("Check Sum Error: Sent : Xid Generation 0x%x Handle 0x%x Unique 0x%llx\n", env->Xid.Generation, env->Xid.Handle, env->Xid.Unique);
47147 +               printk("Check Sum Error: Sent : TxdRail 0x%x TxdMain nmd_addr 0x%x  nmd_len %u  nmd_attr 0x%x\n",  env->TxdRail, env->TxdMain.nmd_addr, env->TxdMain.nmd_len, env->TxdMain.nmd_attr ); 
47148 +               printk("Check Sum Error: Sent : nFrags %d \n", env->nFrags);
47149 +               for(f=0;f<env->nFrags;f++)
47150 +                   printk("Check Sum Error: Sent (%d): nmd_addr 0x%x   nmd_len %u   nmd_attr 0x%x\n", f,
47151 +                          env->Frags[f].nmd_addr, env->Frags[f].nmd_len, env->Frags[f].nmd_attr);
47152 +               printk("Check Sum Error: Recv : nmd_addr 0x%x   nmd_len %u   nmd_attr 0x%x\n",
47153 +                      nmd.nmd_addr, nmd.nmd_len, nmd.nmd_attr);
47154 +
47155 +           }
47156 +       }
47157 +       ep_rxd_received_now(rxd);
47158 +
47159 +       spin_lock_irqsave (&subsys->CheckSumDescLock, flags);
47160 +    }
47161 +    spin_unlock_irqrestore (&subsys->CheckSumDescLock, flags);
47162 +}
47163 +#endif
47164 +
47165 +/*
47166 + * Local variables:
47167 + * c-file-style: "stroustrup"
47168 + * End:
47169 + */
47170 Index: linux-2.4.21/drivers/net/qsnet/ep/epcommsRx.c
47171 ===================================================================
47172 --- linux-2.4.21.orig/drivers/net/qsnet/ep/epcommsRx.c  2004-02-23 16:02:56.000000000 -0500
47173 +++ linux-2.4.21/drivers/net/qsnet/ep/epcommsRx.c       2005-06-01 23:12:54.645432112 -0400
47174 @@ -0,0 +1,1205 @@
47175 +/*
47176 + *    Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd.
47177 + *    Copyright (c) 2002-2003 by Quadrics Ltd.
47178 + *
47179 + *    For licensing information please see the supplied COPYING file
47180 + *
47181 + */
47182 +
47183 +#ident "@(#)$Id: epcommsRx.c,v 1.27.2.5 2004/11/30 12:02:16 mike Exp $ $Name: QSNETMODULES-4-30_20050128 $"
47184 +/*      $Source: /cvs/master/quadrics/epmod/epcommsRx.c,v $*/
47185 +
47186 +#include <qsnet/kernel.h>
47187 +#include <elan/kcomm.h>
47188 +#include <elan/epsvc.h>
47189 +#include <elan/epcomms.h>
47190 +
47191 +#include "debug.h"
47192 +
47193 +unsigned int ep_rxd_lowat = 5;
47194 +
47195 +static int
47196 +AllocateRxdBlock (EP_RCVR *rcvr, EP_ATTRIBUTE attr, EP_RXD **rxdp)
47197 +{
47198 +    EP_RXD_BLOCK *blk;
47199 +    EP_RXD       *rxd;
47200 +    EP_RXD_MAIN  *pRxdMain;
47201 +    int                  i;
47202 +    unsigned long flags;
47203 +
47204 +    KMEM_ZALLOC (blk, EP_RXD_BLOCK *, sizeof (EP_RXD_BLOCK), ! (attr & EP_NO_SLEEP));
47205 +
47206 +    if (blk == NULL)
47207 +       return (ENOMEM);
47208 +
47209 +    if ((pRxdMain = ep_shared_alloc_main (rcvr->Subsys->Subsys.Sys, EP_RXD_MAIN_SIZE * EP_NUM_RXD_PER_BLOCK, attr, &blk->NmdMain)) == (sdramaddr_t) 0)
47210 +    {
47211 +       KMEM_FREE (blk, sizeof (EP_RXD_BLOCK));
47212 +       return (ENOMEM);
47213 +    }
47214 +    
47215 +    for (rxd = &blk->Rxd[0], i = 0; i < EP_NUM_RXD_PER_BLOCK; i++, rxd++)
47216 +    {
47217 +       rxd->Rcvr        = rcvr;
47218 +       rxd->RxdMain     = pRxdMain;
47219 +
47220 +       ep_nmd_subset (&rxd->NmdMain, &blk->NmdMain, (i * EP_RXD_MAIN_SIZE), EP_RXD_MAIN_SIZE);
47221 +
47222 +       /* move onto next descriptor */
47223 +       pRxdMain = (EP_RXD_MAIN *) ((unsigned long) pRxdMain + EP_RXD_MAIN_SIZE);
47224 +    }
47225 +
47226 +    spin_lock_irqsave (&rcvr->FreeDescLock, flags);
47227 +
47228 +    list_add  (&blk->Link, &rcvr->DescBlockList);
47229 +
47230 +    rcvr->TotalDescCount += EP_NUM_RXD_PER_BLOCK;
47231 +
47232 +    for (i = rxdp ? 1 : 0; i < EP_NUM_RXD_PER_BLOCK; i++)
47233 +    {
47234 +       
47235 +#if ! defined(CONFIG_EP_NO_CHECK_SUM)
47236 +       INIT_LIST_HEAD (&blk->Rxd[i].CheckSumLink);
47237 +#endif
47238 +
47239 +       list_add (&blk->Rxd[i].Link, &rcvr->FreeDescList);
47240 +       
47241 +       rcvr->FreeDescCount++;
47242 +
47243 +       if (rcvr->FreeDescWanted)
47244 +       {
47245 +           rcvr->FreeDescWanted--;
47246 +           kcondvar_wakeupone (&rcvr->FreeDescSleep, &rcvr->FreeDescLock);
47247 +       }
47248 +    }
47249 +    spin_unlock_irqrestore (&rcvr->FreeDescLock, flags);
47250 +    
47251 +    if (rxdp)
47252 +    {
47253 +
47254 +#if ! defined(CONFIG_EP_NO_CHECK_SUM)
47255 +       INIT_LIST_HEAD (&blk->Rxd[0].CheckSumLink);
47256 +#endif
47257 +              
47258 +       *rxdp = &blk->Rxd[0];
47259 +    }
47260 +    return (ESUCCESS);
47261 +}
47262 +
47263 +static void
47264 +FreeRxdBlock (EP_RCVR *rcvr, EP_RXD_BLOCK *blk)
47265 +{
47266 +    unsigned long flags;
47267 +
47268 +    spin_lock_irqsave (&rcvr->FreeDescLock, flags);
47269 +
47270 +    list_del (&blk->Link);
47271 +
47272 +    rcvr->TotalDescCount -= EP_NUM_RXD_PER_BLOCK;
47273 +    rcvr->FreeDescCount -= EP_NUM_RXD_PER_BLOCK;
47274 +
47275 +    spin_unlock_irqrestore (&rcvr->FreeDescLock, flags);
47276 +
47277 +    ep_shared_free_main (rcvr->Subsys->Subsys.Sys, &blk->NmdMain);
47278 +    KMEM_FREE (blk, sizeof (EP_RXD_BLOCK));
47279 +}
47280 +
47281 +static EP_RXD *
47282 +GetRxd (EP_RCVR *rcvr, EP_ATTRIBUTE attr)
47283 +{
47284 +    EP_RXD *rxd;
47285 +    unsigned long flags;
47286 +    int low_on_rxds;
47287 +
47288 +    spin_lock_irqsave (&rcvr->FreeDescLock, flags);
47289 +
47290 +    while (list_empty (&rcvr->FreeDescList))
47291 +    {
47292 +       if (! (attr & EP_NO_ALLOC))
47293 +       {
47294 +           spin_unlock_irqrestore (&rcvr->FreeDescLock, flags);
47295 +
47296 +           if (AllocateRxdBlock (rcvr, attr, &rxd) == ESUCCESS)
47297 +               return (rxd);
47298 +
47299 +           spin_lock_irqsave (&rcvr->FreeDescLock, flags);
47300 +       }
47301 +
47302 +       if (attr & EP_NO_SLEEP)
47303 +       {
47304 +           IncrStat (rcvr->Subsys, NoFreeRxds);
47305 +           spin_unlock_irqrestore (&rcvr->FreeDescLock, flags);
47306 +
47307 +           ep_kthread_schedule (&rcvr->Subsys->Thread, lbolt);
47308 +           return (NULL);
47309 +       }
47310 +
47311 +       rcvr->FreeDescWanted++;
47312 +       kcondvar_wait (&rcvr->FreeDescSleep, &rcvr->FreeDescLock, &flags);
47313 +    }
47314 +
47315 +    rxd = list_entry (rcvr->FreeDescList.next, EP_RXD, Link);
47316 +
47317 +    list_del (&rxd->Link);
47318 +
47319 +    /* Wakeup the descriptor primer thread if there's not many left */
47320 +    low_on_rxds = (--rcvr->FreeDescCount < ep_rxd_lowat);
47321 +
47322 +    spin_unlock_irqrestore (&rcvr->FreeDescLock, flags);
47323 +
47324 +    if (low_on_rxds)
47325 +       ep_kthread_schedule (&rcvr->Subsys->Thread, lbolt);
47326 +
47327 +    return (rxd);
47328 +}
47329 +
47330 +static void
47331 +FreeRxd (EP_RCVR *rcvr, EP_RXD *rxd)
47332 +{
47333 +    unsigned long flags;
47334 +
47335 +    ASSERT (EP_XID_INVALID(rxd->MsgXid));
47336 +
47337 +    spin_lock_irqsave (&rcvr->FreeDescLock, flags);
47338 +
47339 +#if ! defined(CONFIG_EP_NO_CHECK_SUM)
47340 +    ASSERT(list_empty(&rxd->CheckSumLink));
47341 +#endif
47342 +   
47343 +    list_add (&rxd->Link, &rcvr->FreeDescList);
47344 +
47345 +    rcvr->FreeDescCount++;
47346 +
47347 +    if (rcvr->FreeDescWanted)                                  /* someone waiting for a receive */
47348 +    {                                                          /* descriptor, so wake them up */
47349 +       rcvr->FreeDescWanted--;
47350 +       kcondvar_wakeupone (&rcvr->FreeDescSleep, &rcvr->FreeDescLock);
47351 +    }
47352 +    
47353 +    spin_unlock_irqrestore (&rcvr->FreeDescLock, flags);
47354 +}
47355 +
47356 +int
47357 +ep_queue_receive (EP_RCVR *rcvr, EP_RXH *handler, void *arg, EP_NMD *nmd, EP_ATTRIBUTE attr)
47358 +{
47359 +    EP_RCVR_RAIL *rcvrRail;
47360 +    EP_RXD       *rxd;
47361 +    int           rnum;
47362 +    unsigned long flags;
47363 +
47364 +    if ((rxd = GetRxd (rcvr, attr)) == NULL)
47365 +       return (ENOMEM);
47366 +
47367 +    rxd->Handler      = handler;
47368 +    rxd->Arg          = arg;
47369 +    rxd->Data         = *nmd;
47370 +    rxd->RxdMain->Len = EP_RXD_PENDING;
47371 +    
47372 +    spin_lock_irqsave (&rcvr->Lock, flags);
47373 +
47374 +    list_add_tail (&rxd->Link, &rcvr->ActiveDescList);
47375 +    
47376 +    if (EP_IS_PREFRAIL_SET(attr))
47377 +       rnum = EP_ATTR2PREFRAIL(attr);
47378 +    else 
47379 +       rnum = ep_rcvr_prefrail (rcvr, EP_NMD_RAILMASK(nmd));
47380 +
47381 +    if (rnum < 0 || !(EP_NMD_RAILMASK(nmd) & EP_RAIL2RAILMASK(rnum) & rcvr->RailMask))
47382 +       rcvrRail = NULL;
47383 +    else
47384 +       rcvrRail = rcvr->Rails[rnum];
47385 +
47386 +    EPRINTF7 (DBG_RCVR,"ep_queue_receive: rxd=%p svc %d nmd=%08x,%d,%x rnum=%d rcvrRail=%p\n",
47387 +             rxd, rcvr->Service, nmd->nmd_addr, nmd->nmd_len, nmd->nmd_attr, rnum, rcvrRail);
47388 +
47389 +    rxd->State = EP_RXD_RECEIVE_ACTIVE;
47390 +
47391 +    if (rcvrRail == NULL || !EP_RCVR_OP (rcvrRail, QueueRxd) (rxd, rcvrRail))
47392 +    {
47393 +       rxd->State = EP_RXD_RECEIVE_UNBOUND;
47394 +
47395 +       ep_kthread_schedule (&rcvr->Subsys->Thread, lbolt);
47396 +    }
47397 +
47398 +    spin_unlock_irqrestore (&rcvr->Lock, flags);
47399 +
47400 +    return (ESUCCESS);
47401 +}
47402 +
47403 +void
47404 +ep_requeue_receive (EP_RXD *rxd, EP_RXH *handler, void *arg, EP_NMD *nmd, EP_ATTRIBUTE attr)
47405 +{
47406 +    EP_RCVR      *rcvr = rxd->Rcvr;
47407 +    EP_SYS       *sys  = rcvr->Subsys->Subsys.Sys;
47408 +    int           rnum = ep_pickRail(EP_NMD_RAILMASK(&rxd->Data));
47409 +    EP_RCVR_RAIL *rcvrRail;
47410 +    unsigned long flags;
47411 +
47412 +    ASSERT (rxd->RxdRail == NULL);
47413 +
47414 +    EPRINTF5 (DBG_RCVR,"ep_requeue_receive: rxd=%p svc %d nmd=%08x,%d,%x\n", 
47415 +             rxd, rcvr->Service, nmd->nmd_addr, nmd->nmd_len, nmd->nmd_attr);
47416 +
47417 +    rxd->Handler      = handler;
47418 +    rxd->Arg          = arg;
47419 +    rxd->Data         = *nmd;
47420 +    rxd->RxdMain->Len = EP_RXD_PENDING;
47421 +    
47422 +    spin_lock_irqsave (&rcvr->Lock, flags);
47423 +    
47424 +    list_add_tail (&rxd->Link, &rcvr->ActiveDescList);
47425 +
47426 +    /*
47427 +     * Rail selection: if they've asked for a particular rail, then use it, otherwise if
47428 +     *                 the rail it was last received on is mapped for the nmd and is available
47429 +     *                 then use that one, otherwise pick one that is mapped by the nmd.
47430 +     */
47431 +    if (EP_IS_PREFRAIL_SET(attr))
47432 +       rnum = EP_ATTR2PREFRAIL(attr);
47433 +    
47434 +    if (rnum < 0 || ! (EP_RAIL2RAILMASK (rnum) & EP_NMD_RAILMASK(nmd) & ep_rcvr_availrails (rcvr)))
47435 +       rnum = ep_rcvr_prefrail (rcvr, EP_NMD_RAILMASK(nmd));
47436 +
47437 +    if (rnum < 0)
47438 +       rcvrRail = NULL;
47439 +    else
47440 +    {
47441 +       rcvrRail = rcvr->Rails[rnum];
47442 +
47443 +       if (! (EP_NMD_RAILMASK(&rxd->Data) & EP_RAIL2RAILMASK(rnum)) && ep_nmd_map_rails (sys, &rxd->Data, EP_RAIL2RAILMASK(rnum)) < 0)
47444 +           rcvrRail = NULL;
47445 +    }
47446 +
47447 +    rxd->State = EP_RXD_RECEIVE_ACTIVE;
47448 +
47449 +    if (rcvrRail == NULL || !EP_RCVR_OP(rcvrRail, QueueRxd) (rxd, rcvrRail))
47450 +    {
47451 +       EPRINTF1 (DBG_RCVR, "ep_requeue_receive: rcvrRail=%p - setting unbound\n", rcvrRail);
47452 +
47453 +       rxd->State = EP_RXD_RECEIVE_UNBOUND;
47454 +
47455 +       ep_kthread_schedule (&rcvr->Subsys->Thread, lbolt);
47456 +    }
47457 +
47458 +    if (rcvr->CleanupWaiting)
47459 +       kcondvar_wakeupall (&rcvr->CleanupSleep, &rcvr->Lock);
47460 +    rcvr->CleanupWaiting = 0;
47461 +
47462 +    spin_unlock_irqrestore (&rcvr->Lock, flags);
47463 +}
47464 +
47465 +void
47466 +
47467 +ep_complete_receive (EP_RXD *rxd)
47468 +{
47469 +    EP_RCVR *rcvr = rxd->Rcvr;
47470 +    unsigned long flags;
47471 +
47472 +    ASSERT (rxd->RxdRail == NULL && rxd->State == EP_RXD_COMPLETED);
47473 +
47474 +    FreeRxd (rcvr, rxd);
47475 +
47476 +    /* if we're waiting for cleanup, then wake them up */
47477 +    spin_lock_irqsave (&rcvr->Lock, flags);
47478 +    if (rcvr->CleanupWaiting)
47479 +       kcondvar_wakeupall (&rcvr->CleanupSleep, &rcvr->Lock);
47480 +    rcvr->CleanupWaiting = 0;
47481 +    spin_unlock_irqrestore (&rcvr->Lock, flags);   
47482 +}
47483 +
47484 +int
47485 +ep_rpc_put (EP_RXD *rxd, EP_RXH *handler, void *arg, EP_NMD *local, EP_NMD *remote, int nFrags)
47486 +{
47487 +    EP_RCVR      *rcvr = rxd->Rcvr;
47488 +    EP_SYS       *sys  = rcvr->Subsys->Subsys.Sys;
47489 +    EP_ENVELOPE  *env  = &rxd->RxdMain->Envelope;
47490 +    unsigned long flags;
47491 +
47492 +    spin_lock_irqsave (&rcvr->Lock, flags);
47493 +    
47494 +    if (rxd->State == EP_RXD_BEEN_ABORTED)
47495 +    {
47496 +       EPRINTF2 (DBG_RCVR, "ep_rpc_put: rcvr %p rxd %p completed because no rails available\n", rcvr, rxd);
47497 +       
47498 +       /* rxd no longer on active list - just free it */
47499 +       /* off and return an error */
47500 +       spin_unlock_irqrestore (&rcvr->Lock, flags);
47501 +       
47502 +       return EP_CONN_RESET;
47503 +    }
47504 +    else
47505 +    {
47506 +       EP_RXD_RAIL      *rxdRail   = rxd->RxdRail;
47507 +       EP_RCVR_RAIL     *rcvrRail  = rxdRail->RcvrRail;
47508 +       EP_COMMS_RAIL    *commsRail = rcvrRail->CommsRail;
47509 +       EP_RAIL          *rail      = commsRail->Rail;
47510 +       EP_NODE_RAIL     *nodeRail  = &rail->Nodes[env->NodeId];
47511 +       int               i;
47512 +       
47513 +       /* Attempt to ensure that the local nmds are mapped */
47514 +       for (i = 0; i < nFrags; i++)
47515 +           if (! (EP_NMD_RAILMASK(&local[i]) & EP_RAIL2RAILMASK(rail->Number)))
47516 +               ep_nmd_map_rails (sys, &local[i], EP_RAIL2RAILMASK(rail->Number));
47517 +    
47518 +       if (nodeRail->State == EP_NODE_CONNECTED &&                                                                     /* rail is connected */
47519 +           (ep_nmd2railmask (local, nFrags) & ep_nmd2railmask (remote, nFrags) & EP_RAIL2RAILMASK (rail->Number)))     /* and NMDs valid for it */
47520 +       {
47521 +           rxd->State = EP_RXD_PUT_ACTIVE;
47522 +
47523 +           EP_RCVR_OP(rcvrRail, RpcPut) (rxd, local, remote, nFrags);
47524 +       }
47525 +       else
47526 +       {
47527 +           /* RPC completion cannot progress - either node is no longer connected on this 
47528 +            * rail or some of the source/destination NMDs are not mapped on this rail.
47529 +            * Save the NMDs into the RXD and schedule the thread to request mappings */
47530 +           EPRINTF4 (DBG_RCVR, "%s: ep_rpc_put: rcvr %p rxd %p %s\n", rail->Name, rcvr, rxd,
47531 +                     (nodeRail->State == EP_NODE_CONNECTED) ? "NMDs not valid on this rail" : "no longer connected on this rail");
47532 +
47533 +           rxd->State = EP_RXD_PUT_STALLED;
47534 +
47535 +           if (nodeRail->State == EP_NODE_CONNECTED)
47536 +               ep_kthread_schedule (&rcvr->Subsys->Thread, lbolt);
47537 +       }
47538 +
47539 +       /* install the handler */
47540 +       rxd->Handler = handler;
47541 +       rxd->Arg     = arg;
47542 +       
47543 +       /* store the arguements */
47544 +       rxd->nFrags = nFrags;
47545 +       for (i = 0; i < nFrags; i++)
47546 +       {
47547 +           rxd->Local[i]  = local[i];
47548 +           rxd->Remote[i] = remote[i];
47549 +       }
47550 +    }
47551 +    spin_unlock_irqrestore (&rcvr->Lock, flags);
47552 +
47553 +    return EP_SUCCESS;
47554 +}
47555 +
47556 +int
47557 +ep_rpc_get (EP_RXD *rxd, EP_RXH *handler, void *arg, EP_NMD *remote, EP_NMD *local, int nFrags)
47558 +{
47559 +    EP_RCVR      *rcvr = rxd->Rcvr;
47560 +    EP_SYS       *sys  = rcvr->Subsys->Subsys.Sys;
47561 +    EP_ENVELOPE  *env  = &rxd->RxdMain->Envelope;
47562 +    unsigned long flags;
47563 +
47564 +    spin_lock_irqsave (&rcvr->Lock, flags);
47565 +    
47566 +    if (rxd->State == EP_RXD_BEEN_ABORTED)
47567 +    {
47568 +       EPRINTF2 (DBG_RCVR, "ep_rpc_get: rcvr %p rxd %p completed because no rails available\n", rcvr, rxd);
47569 +       
47570 +       spin_unlock_irqrestore (&rcvr->Lock, flags);
47571 +
47572 +       return EP_CONN_RESET;
47573 +    }
47574 +    else
47575 +    {
47576 +       EP_RXD_RAIL      *rxdRail   = rxd->RxdRail;
47577 +       EP_RCVR_RAIL     *rcvrRail  = rxdRail->RcvrRail;
47578 +       EP_COMMS_RAIL    *commsRail = rcvrRail->CommsRail;
47579 +       EP_RAIL          *rail      = commsRail->Rail;
47580 +       EP_NODE_RAIL     *nodeRail  = &rail->Nodes[env->NodeId];
47581 +       int               i;
47582 +       
47583 +       /* Attempt to ensure that the local nmds are mapped */
47584 +       for (i = 0; i < nFrags; i++)
47585 +           if (! (EP_NMD_RAILMASK(&local[i]) & EP_RAIL2RAILMASK(rail->Number)))
47586 +               ep_nmd_map_rails (sys, &local[i], EP_RAIL2RAILMASK(rail->Number));
47587 +
47588 +       if (nodeRail->State == EP_NODE_CONNECTED &&                                                                     /* rail is connected */
47589 +           (ep_nmd2railmask (local, nFrags) & ep_nmd2railmask (remote, nFrags) & EP_RAIL2RAILMASK (rail->Number)))     /* and NMDs valid for it */
47590 +       {
47591 +           rxd->State = EP_RXD_GET_ACTIVE;
47592 +
47593 +           EP_RCVR_OP (rcvrRail, RpcGet) (rxd, local, remote, nFrags);
47594 +       }
47595 +       else
47596 +       {
47597 +           /* RPC completion cannot progress - either node is no longer connected on this 
47598 +            * node or some of the source/destination NMDs are not mapped on this rail.
47599 +            * Save the NMDs into the RXD and schedule the thread to request mappings */
47600 +           EPRINTF4 (DBG_RCVR, "%s: ep_rpc_get: rcvr %p rxd %p %s\n", rail->Name, rcvr, rxd, 
47601 +                     (nodeRail->State == EP_NODE_CONNECTED) ? "NMDs not valid on this rail" : "no longer connected on this rail");
47602 +           
47603 +           rxd->State = EP_RXD_GET_STALLED;
47604 +
47605 +           if (nodeRail->State == EP_NODE_CONNECTED)
47606 +               ep_kthread_schedule (&rcvr->Subsys->Thread, lbolt);
47607 +       }
47608 +
47609 +       /* install the handler */
47610 +       rxd->Handler = handler;
47611 +       rxd->Arg     = arg;
47612 +       
47613 +       /* store the arguements */
47614 +       rxd->nFrags = nFrags;
47615 +       for (i = 0; i < nFrags; i++)
47616 +       {
47617 +           rxd->Local[i]  = local[i];
47618 +           rxd->Remote[i] = remote[i];
47619 +       }
47620 +    }
47621 +    spin_unlock_irqrestore (&rcvr->Lock, flags);
47622 +    
47623 +    return EP_SUCCESS;
47624 +}
47625 +
47626 +int
47627 +ep_complete_rpc (EP_RXD *rxd, EP_RXH *handler, void *arg, EP_STATUSBLK *blk, EP_NMD *local, EP_NMD *remote, int nFrags)
47628 +{
47629 +    EP_RCVR      *rcvr = rxd->Rcvr;
47630 +    EP_SYS       *sys  = rcvr->Subsys->Subsys.Sys;
47631 +    EP_ENVELOPE  *env  = &rxd->RxdMain->Envelope;
47632 +    unsigned long flags;
47633 +
47634 +    spin_lock_irqsave (&rcvr->Lock, flags);
47635 +
47636 +    if (rxd->State == EP_RXD_BEEN_ABORTED)
47637 +    {
47638 +       EPRINTF2 (DBG_RCVR, "ep_complete_rpc: rcvr %p rxd %p completed because no rails available\n", rcvr, rxd);
47639 +       
47640 +       spin_unlock_irqrestore (&rcvr->Lock, flags);
47641 +       return EP_CONN_RESET;
47642 +    }
47643 +    else
47644 +    {
47645 +       EP_RXD_RAIL      *rxdRail   = rxd->RxdRail;
47646 +       EP_RCVR_RAIL     *rcvrRail  = rxdRail->RcvrRail;
47647 +       EP_COMMS_RAIL    *commsRail = rcvrRail->CommsRail;
47648 +       EP_RAIL          *rail      = commsRail->Rail;
47649 +       EP_NODE_RAIL     *nodeRail  = &rail->Nodes[env->NodeId];
47650 +       int               i;
47651 +
47652 +       if (blk == NULL)
47653 +           bzero (&rxd->RxdMain->StatusBlk, sizeof (EP_STATUSBLK));
47654 +       else
47655 +           bcopy (blk, &rxd->RxdMain->StatusBlk, sizeof (EP_STATUSBLK));
47656 +
47657 +       /* Attempt to ensure that the local nmds are mapped */
47658 +       for (i = 0; i < nFrags; i++)
47659 +           if (! (EP_NMD_RAILMASK(&local[i]) & EP_RAIL2RAILMASK(rail->Number)))
47660 +               ep_nmd_map_rails (sys, &local[i], EP_RAIL2RAILMASK(rail->Number));
47661 +
47662 +       if (nodeRail->State == EP_NODE_CONNECTED &&                                                                     /* rail is connected */
47663 +           (ep_nmd2railmask (local, nFrags) & ep_nmd2railmask (remote, nFrags) & EP_RAIL2RAILMASK (rail->Number)))     /* and NMDs valid for it */
47664 +       {
47665 +           rxd->State = EP_RXD_COMPLETE_ACTIVE;
47666 +
47667 +           EP_RCVR_OP (rcvrRail, RpcComplete) (rxd, local, remote, nFrags);
47668 +       }
47669 +       else
47670 +       {
47671 +           /* RPC completion cannot progress - either node is no longer connected on this 
47672 +            * node or some of the source/destination NMDs are not mapped on this rail.
47673 +            * Save the NMDs into the RXD and schedule the thread to request mappings */
47674 +           EPRINTF4 (DBG_RCVR, "%s: ep_complete_rpc: rcvr %p rxd %p %s\n", rail->Name, rcvr, rxd, 
47675 +                     (nodeRail->State == EP_NODE_CONNECTED) ? "NMDs not valid on this rail" : "no longer connected on this rail");
47676 +
47677 +           rxd->State = EP_RXD_COMPLETE_STALLED;
47678 +
47679 +           if (nodeRail->State == EP_NODE_CONNECTED)
47680 +               ep_kthread_schedule (&rcvr->Subsys->Thread, lbolt);
47681 +       }
47682 +
47683 +       /* install the handler */
47684 +       rxd->Handler = handler;
47685 +       rxd->Arg     = arg;
47686 +       
47687 +       /* store the arguements */
47688 +       rxd->nFrags = nFrags;
47689 +       for (i = 0; i < nFrags; i++)
47690 +       {
47691 +           rxd->Local[i]  = local[i];
47692 +           rxd->Remote[i] = remote[i];
47693 +       }
47694 +    }
47695 +    spin_unlock_irqrestore (&rcvr->Lock, flags);
47696 +       
47697 +    return (ESUCCESS);
47698 +}
47699 +
47700 +/* functions for accessing fields of rxds */
47701 +void            *ep_rxd_arg(EP_RXD *rxd)               { return (rxd->Arg); }
47702 +int              ep_rxd_len(EP_RXD *rxd)               { return (rxd->RxdMain->Len); }
47703 +EP_STATUS       ep_rxd_status(EP_RXD *rxd)             { return (rxd->RxdMain->Len < 0 ? rxd->RxdMain->Len : EP_SUCCESS); }
47704 +int              ep_rxd_isrpc(EP_RXD *rxd)             { return (EP_IS_RPC(rxd->RxdMain->Envelope.Attr) != 0); }
47705 +EP_ENVELOPE     *ep_rxd_envelope(EP_RXD *rxd)          { return (&rxd->RxdMain->Envelope); }
47706 +EP_PAYLOAD      *ep_rxd_payload(EP_RXD *rxd)           { return (EP_HAS_PAYLOAD(rxd->RxdMain->Envelope.Attr) ? &rxd->RxdMain->Payload : NULL); }
47707 +int              ep_rxd_node(EP_RXD *rxd)              { return (rxd->RxdMain->Envelope.NodeId); }
47708 +EP_STATUSBLK    *ep_rxd_statusblk(EP_RXD *rxd)                 { return (&rxd->RxdMain->StatusBlk); }
47709 +EP_RAILMASK      ep_rxd_railmask(EP_RXD *rxd)          { return (rxd->Data.nmd_attr); }
47710 +
47711 +static void
47712 +ProcessNmdMapResponse (EP_RCVR *rcvr, EP_RXD *rxd, EP_MANAGER_MSG *msg)
47713 +{
47714 +    EP_RXD_RAIL  *rxdRail  = rxd->RxdRail;
47715 +    EP_RCVR_RAIL *rcvrRail = rxdRail->RcvrRail;
47716 +    EP_RAIL      *rail     = rcvrRail->CommsRail->Rail;
47717 +    EP_NODE_RAIL *nodeRail = &rail->Nodes[rxd->RxdMain->Envelope.NodeId];
47718 +    int           i;
47719 +
47720 +    ASSERT (msg->Body.MapNmd.nFrags == rxd->nFrags);
47721 +    
47722 +    for (i = 0; i < rxd->nFrags; i++)
47723 +       rxd->Remote[i] = msg->Body.MapNmd.Nmd[i];
47724 +    
47725 +    if (nodeRail->State == EP_NODE_CONNECTED &&        /* node is still connected on this rail */
47726 +       (ep_nmd2railmask (rxd->Local, rxd->nFrags) & ep_nmd2railmask (rxd->Remote, rxd->nFrags) & EP_RAIL2RAILMASK (rail->Number)))     /* NMDs are now valid for this rail */
47727 +    {
47728 +       switch (rxd->State)
47729 +       {
47730 +       case EP_RXD_PUT_STALLED:
47731 +           rxd->State = EP_RXD_PUT_ACTIVE;
47732 +
47733 +           EP_RCVR_OP(rcvrRail, RpcPut) (rxd, rxd->Local, rxd->Remote, rxd->nFrags);
47734 +           break;
47735 +
47736 +       case EP_RXD_GET_STALLED:
47737 +           rxd->State = EP_RXD_GET_ACTIVE;
47738 +
47739 +           EP_RCVR_OP(rcvrRail, RpcGet) (rxd, rxd->Local, rxd->Remote, rxd->nFrags);
47740 +           break;
47741 +           
47742 +       case EP_RXD_COMPLETE_STALLED:
47743 +           rxd->State = EP_RXD_COMPLETE_ACTIVE;
47744 +
47745 +           EP_RCVR_OP(rcvrRail, RpcComplete) (rxd, rxd->Local, rxd->Remote, rxd->nFrags);
47746 +           break;
47747 +
47748 +       default:
47749 +           panic ("ProcessNmdMapResponse: XID match but rxd in invalid state\n");
47750 +           break;
47751 +       }
47752 +
47753 +       rxd->NextRunTime = 0;
47754 +    }
47755 +    else
47756 +       ep_debugf (DBG_MANAGER, "%s: ep_rcvr_xid_msg_handler: rcvr=%p rxd=%p - still cannot proceed\n", rail->Name, rcvr, rxd);
47757 +}
47758 +
47759 +static void
47760 +ProcessFailoverResponse (EP_RCVR *rcvr, EP_RXD *rxd, EP_MANAGER_MSG *msg)
47761 +{
47762 +    /* XXXX - TBD */
47763 +#ifdef NOTYET
47764 +    EP_COMMS_SUBSYS *subsys   = rcvr->Subsys;
47765 +    EP_RXD_RAIL     *rxdRail  = rxd->RxdRail;
47766 +    EP_RCVR_RAIL    *rcvrRail = rxdRail->RcvrRail;
47767 +    EP_RAIL         *rail     = rcvrRail->CommsRail->Rail;
47768 +    EP_RCVR_RAIL    *nRcvrRail;
47769 +    EP_RXD_RAIL     *nRxdRail;
47770 +
47771 +    ASSERT (rxd->RxdMain->Envelope.Attr & EP_RPC);
47772 +
47773 +    EPRINTF6 (DBG_RCVR, "ep_rcvr_xid_msg_handler: rcvr=%p rxd=%p Xid=%016llx state %x.%x - txd on rail %d\n", rcvr, rxd, 
47774 +             rxd->MsgXid.Unique, rxdRail->RxdMain->DataEvent, rxdRail->RxdMain->DoneEvent, msg->Body.FailoverTxd.Rail);
47775 +
47776 +    if ((nRcvrRail = rcvr->Rails[msg->Body.FailoverTxd.Rail]) == NULL ||
47777 +       (nRcvrRail->Rcvr->RailMask & EP_RAIL2RAILMASK (rail->Number)) == NULL)
47778 +    {
47779 +       ep_debugf (DBG_MANAGER, "%s: ep_rcvr_xid_msg_handler: rcvr=%p rxd=%p - still cannot proceed\n", rail->Name, rcvr,rxd);
47780 +       return;
47781 +    }
47782 +
47783 +
47784 +    nRxdRail = EP_RCVR_OP (nrcvrRail, GetRxd) (rcvr, nRcvrRail);
47785 +
47786 +
47787 +    /* If the RPC was in progress, then rollback and mark it as flagged, 
47788 +     * this will then get treated as though the NMDs were not mapped
47789 +     * for the rail when the user initiated the operation.
47790 +     */
47791 +    switch (rxdRail->RxdMain->DataEvent)
47792 +    {
47793 +    case EP_EVENT_ACTIVE|EP_RXD_PHASE_PUT:
47794 +    case EP_EVENT_FLAGGED|EP_RXD_PHASE_PUT:
47795 +       ASSERT (rxdRail->RxdMain->DoneEvent == EP_EVENT_PRIVATE ||
47796 +               rxdRail->RxdMain->DoneEvent == EP_EVENT_PENDING);
47797 +       
47798 +       nRxdRail->RxdMain->DataEvent = EP_EVENT_FLAGGED|EP_RXD_PHASE_PUT;
47799 +       nRxdRail->RxdMain->DoneEvent = EP_EVENT_PENDING;
47800 +       break;
47801 +
47802 +    case EP_EVENT_ACTIVE|EP_RXD_PHASE_GET:
47803 +    case EP_EVENT_FLAGGED|EP_RXD_PHASE_GET:
47804 +       ASSERT (rxdRail->RxdMain->DoneEvent == EP_EVENT_PRIVATE ||
47805 +               rxdRail->RxdMain->DoneEvent == EP_EVENT_PENDING);
47806 +       
47807 +       nRxdRail->RxdMain->DataEvent = EP_EVENT_FLAGGED|EP_RXD_PHASE_GET;
47808 +       nRxdRail->RxdMain->DoneEvent = EP_EVENT_PENDING;
47809 +       break;
47810 +
47811 +    case EP_EVENT_PRIVATE:
47812 +       switch (rxdRail->RxdMain->DoneEvent)
47813 +       {
47814 +       case EP_EVENT_ACTIVE|EP_RXD_PHASE_COMPLETE:
47815 +       case EP_EVENT_FLAGGED|EP_RXD_PHASE_COMPLETE:
47816 +           nRxdRail->RxdMain->DataEvent = EP_EVENT_PRIVATE;
47817 +           nRxdRail->RxdMain->DoneEvent = EP_EVENT_FLAGGED|EP_RXD_PHASE_COMPLETE;
47818 +           break;
47819 +
47820 +       case EP_EVENT_PENDING:
47821 +           break;
47822 +
47823 +       default:
47824 +           panic ("ep_rcvr_xid_msg_handler: rxd in invalid state\n");
47825 +       }
47826 +       break;
47827 +
47828 +    default:
47829 +       panic ("ep_rcvr_xid_msg_handler: rxd in invalid staten");
47830 +    }
47831 +    
47832 +    UnbindRxdFromRail (rxd, rxdRail);
47833 +
47834 +    /* Mark rxdRail as no longer active */
47835 +    rxdRail->RxdMain->DataEvent = EP_EVENT_PRIVATE;
47836 +    rxdRail->RxdMain->DoneEvent = EP_EVENT_PRIVATE;
47837 +
47838 +    sdram_writel (rail->Device, rxdRail->RxdElan + offsetof (EP_RXD_RAIL_ELAN, DataEvent.ev_Count), 0);
47839 +    sdram_writel (rail->Device, rxdRail->RxdElan + offsetof (EP_RXD_RAIL_ELAN, DoneEvent.ev_Count), 0);
47840 +    
47841 +    FreeRxdRail (rcvrRail, rxdRail);
47842 +
47843 +    BindRxdToRail (rxd, nRxdRail);
47844 +
47845 +    ep_kthread_schedule (&subsys->Thread, lbolt);
47846 +#endif
47847 +}
47848 +
47849 +void
47850 +ep_rcvr_xid_msg_handler (void *arg, EP_MANAGER_MSG *msg)
47851 +{
47852 +    EP_RCVR          *rcvr = (EP_RCVR *) arg;
47853 +    struct list_head *el;
47854 +    unsigned long     flags;
47855 +
47856 +    spin_lock_irqsave (&rcvr->Lock, flags);
47857 +    list_for_each (el, &rcvr->ActiveDescList) {
47858 +       EP_RXD *rxd = list_entry (el,EP_RXD, Link);
47859 +
47860 +       if (EP_XIDS_MATCH (msg->Hdr.Xid, rxd->MsgXid))
47861 +       {
47862 +           EP_INVALIDATE_XID (rxd->MsgXid);
47863 +
47864 +           switch (msg->Hdr.Type)
47865 +           {
47866 +           case EP_MANAGER_MSG_TYPE_MAP_NMD_RESPONSE:
47867 +               ProcessNmdMapResponse (rcvr, rxd, msg);
47868 +               break;
47869 +
47870 +           case EP_MANAGER_MSG_TYPE_FAILOVER_RESPONSE:
47871 +               ProcessFailoverResponse (rcvr, rxd, msg);
47872 +               break;
47873 +
47874 +           default:
47875 +               panic ("ep_rcvr_xid_msg_handler: XID match but invalid message type\n");
47876 +           }
47877 +       }
47878 +    }
47879 +    spin_unlock_irqrestore (&rcvr->Lock, flags);
47880 +}
47881 +
47882 +
47883 +EP_RCVR *
47884 +ep_alloc_rcvr (EP_SYS *sys, EP_SERVICE svc, unsigned int nenvs)
47885 +{
47886 +    EP_COMMS_SUBSYS  *subsys;
47887 +    EP_RCVR          *rcvr;
47888 +    struct list_head *el;
47889 +    extern int portals_envelopes;
47890 +
47891 +    if (portals_envelopes && (svc == EP_MSG_SVC_PORTALS_SMALL || svc == EP_MSG_SVC_PORTALS_LARGE))
47892 +    {
47893 +       printk ("ep: use %d envelopes rather than %d for portals %s message service\n", sys->Position.pos_nodes * 16, nenvs,
47894 +               svc == EP_MSG_SVC_PORTALS_SMALL ? "small" : "large");
47895 +
47896 +       nenvs = portals_envelopes;
47897 +    }
47898 +
47899 +    if ((subsys = (EP_COMMS_SUBSYS *) ep_subsys_find (sys, EPCOMMS_SUBSYS_NAME)) == NULL)
47900 +       return (NULL);
47901 +
47902 +    KMEM_ZALLOC (rcvr, EP_RCVR *, sizeof (EP_RCVR), 1);
47903 +
47904 +    if (rcvr == NULL)
47905 +       return (NULL);
47906 +
47907 +    rcvr->Subsys            = subsys;
47908 +    rcvr->Service           = svc;
47909 +    rcvr->InputQueueEntries = nenvs;
47910 +    rcvr->FreeDescCount     = 0;
47911 +    rcvr->TotalDescCount    = 0;
47912 +    rcvr->ForwardRxdCount   = 0;
47913 +
47914 +    spin_lock_init (&rcvr->Lock);
47915 +    INIT_LIST_HEAD (&rcvr->ActiveDescList);
47916 +
47917 +    kcondvar_init (&rcvr->CleanupSleep);
47918 +    kcondvar_init (&rcvr->FreeDescSleep);
47919 +    spin_lock_init (&rcvr->FreeDescLock);
47920 +    INIT_LIST_HEAD (&rcvr->FreeDescList);
47921 +    INIT_LIST_HEAD (&rcvr->DescBlockList);
47922 +
47923 +    ep_xid_cache_init (sys, &rcvr->XidCache);
47924 +
47925 +    rcvr->XidCache.MessageHandler = ep_rcvr_xid_msg_handler;
47926 +    rcvr->XidCache.Arg            = rcvr;
47927 +
47928 +    kmutex_lock (&subsys->Lock);
47929 +    /* See if this service is already in use */
47930 +    list_for_each (el, &subsys->Receivers) {
47931 +       EP_RCVR *rcvr = list_entry (el, EP_RCVR, Link);
47932 +
47933 +       if (rcvr->Service == svc)
47934 +       {
47935 +           KMEM_FREE (rcvr, sizeof (EP_RCVR));
47936 +           kmutex_unlock (&subsys->Lock);   
47937 +           return NULL;
47938 +       }
47939 +    }
47940 +    
47941 +    
47942 +    list_add_tail (&rcvr->Link, &subsys->Receivers);
47943 +
47944 +    ep_procfs_rcvr_add(rcvr);
47945 +
47946 +    /* Now add all rails which are already started */
47947 +    list_for_each (el, &subsys->Rails) { 
47948 +       EP_COMMS_RAIL *commsRail = list_entry (el, EP_COMMS_RAIL, Link);
47949 +
47950 +       EP_RAIL_OP (commsRail, Rcvr.AddRail) (rcvr, commsRail);
47951 +    }
47952 +    kmutex_unlock (&subsys->Lock);   
47953 +
47954 +    ep_mod_inc_usecount();
47955 +
47956 +    return (rcvr);
47957 +}
47958 +
47959 +void
47960 +ep_free_rcvr (EP_RCVR *rcvr)
47961 +{
47962 +    EP_COMMS_SUBSYS  *subsys = rcvr->Subsys;
47963 +    EP_SYS           *sys    = subsys->Subsys.Sys;
47964 +    struct list_head  list;
47965 +    struct list_head *el,*nel;
47966 +    unsigned long flags;
47967 +    
47968 +    kmutex_lock (&subsys->Lock);
47969 +    list_for_each (el, &subsys->Rails) { 
47970 +       EP_COMMS_RAIL *commsRail = list_entry (el, EP_COMMS_RAIL, Link);
47971 +       
47972 +       EP_RAIL_OP (commsRail, Rcvr.DelRail) (rcvr, commsRail);
47973 +    }
47974 +
47975 +    ep_procfs_rcvr_del(rcvr);
47976 +
47977 +    list_del (&rcvr->Link);
47978 +    kmutex_unlock (&subsys->Lock);
47979 +
47980 +    INIT_LIST_HEAD (&list);
47981 +
47982 +    /* abort all rxds - should not be bound to a rail */
47983 +    spin_lock_irqsave (&rcvr->Lock, flags);   
47984 +    for (;;)
47985 +    {
47986 +       if (! list_empty (&rcvr->ActiveDescList))
47987 +       {
47988 +           list_for_each_safe (el, nel, &rcvr->ActiveDescList) {
47989 +               EP_RXD *rxd = list_entry (el, EP_RXD, Link);
47990 +               
47991 +               ASSERT (rxd->RxdRail == NULL);
47992 +               ASSERT (rxd->RxdMain->Len == EP_RXD_PENDING);
47993 +               
47994 +               rxd->State = EP_RXD_COMPLETED;
47995 +               rxd->RxdMain->Len = EP_SHUTDOWN;
47996 +               
47997 +               list_del (&rxd->Link);
47998 +               list_add_tail (&rxd->Link, &list);
47999 +           }
48000 +           spin_unlock_irqrestore (&rcvr->Lock, flags);
48001 +           
48002 +           while (! list_empty (&list))
48003 +           {
48004 +               EP_RXD *rxd = list_entry (list.next, EP_RXD, Link);
48005 +               
48006 +               list_del (&rxd->Link);
48007 +               
48008 +               if (rxd->Handler) 
48009 +                   rxd->Handler (rxd);
48010 +           }
48011 +           spin_lock_irqsave (&rcvr->Lock, flags);   
48012 +           continue;
48013 +       }
48014 +
48015 +       if (rcvr->FreeDescCount == rcvr->TotalDescCount)
48016 +           break;
48017 +
48018 +       rcvr->CleanupWaiting++;
48019 +       kcondvar_wait (&rcvr->CleanupSleep, &rcvr->Lock, &flags);
48020 +    }
48021 +    spin_unlock_irqrestore (&rcvr->Lock, flags);
48022 +
48023 +    /* must all be in free list */
48024 +    ASSERT( rcvr->FreeDescCount ==  rcvr->TotalDescCount);
48025 +
48026 +    while (! list_empty(& rcvr->DescBlockList) )
48027 +       FreeRxdBlock (rcvr, list_entry (rcvr->DescBlockList.next, EP_RXD_BLOCK, Link));
48028 +
48029 +    /* had better be all gone now */
48030 +    ASSERT((rcvr->FreeDescCount == 0) && (rcvr->TotalDescCount == 0));
48031 +
48032 +    ep_xid_cache_destroy (sys, &rcvr->XidCache);
48033
48034 +    spin_lock_destroy (&rcvr->Lock);
48035 +    KMEM_FREE (rcvr, sizeof (EP_RCVR));
48036 +
48037 +    ep_mod_dec_usecount();
48038 +}
48039 +
48040 +EP_RXD *
48041 +StealRxdFromOtherRail (EP_RCVR *rcvr)
48042 +{
48043 +    EP_RXD          *rxd;
48044 +    int               i;
48045 +       
48046 +    /* looking at the the rcvr railmask to find a rail to try to steal rxd from */
48047 +    for (i = 0; i < EP_MAX_RAILS; i++) 
48048 +       if (rcvr->RailMask & (1 << i) ) 
48049 +           if ((rxd = EP_RCVR_OP (rcvr->Rails[i], StealRxd) (rcvr->Rails[i])) != NULL)
48050 +               return rxd;
48051 +
48052 +    return NULL;
48053 +}
48054 +
48055 +long
48056 +CheckUnboundRxd (EP_RCVR *rcvr, EP_RXD *rxd, long nextRunTime)
48057 +{
48058 +    EP_SYS       *sys = rcvr->Subsys->Subsys.Sys;
48059 +    EP_RCVR_RAIL *rcvrRail;
48060 +    int           rnum;
48061 +    
48062 +    if ((rnum = ep_rcvr_prefrail (rcvr, EP_NMD_RAILMASK(&rxd->Data))) < 0)
48063 +       rnum = ep_rcvr_prefrail (rcvr, ep_rcvr_availrails (rcvr));
48064 +    
48065 +    if ( rnum < 0 )    {
48066 +       if (nextRunTime == 0 || AFTER (nextRunTime, lbolt + RESOURCE_RETRY_TIME))
48067 +           nextRunTime = lbolt + RESOURCE_RETRY_TIME;
48068 +       
48069 +       return (nextRunTime);
48070 +    }
48071 +
48072 +    ASSERT ( rnum >= 0 );
48073 +
48074 +    rcvrRail = rcvr->Rails[rnum];
48075 +
48076 +    ASSERT ( rcvrRail != NULL);
48077 +
48078 +    rxd->State = EP_RXD_RECEIVE_ACTIVE;
48079 +
48080 +    if ((!(EP_NMD_RAILMASK (&rxd->Data) & EP_RAIL2RAILMASK(rnum)) &&                   /* not mapped already and */
48081 +        ep_nmd_map_rails (sys, &rxd->Data, EP_RAIL2RAILMASK(rnum)) == 0) ||            /* failed mapping, or */
48082 +       !EP_RCVR_OP (rcvrRail, QueueRxd) (rxd, rcvrRail))                               /* failed to queue */
48083 +    {
48084 +       ASSERT (rxd->RxdRail == NULL);
48085 +
48086 +       EPRINTF4 (DBG_RCVR,"CheckUnboundRxd: rcvr=%p rxd=%p -> rnum=%d rcvrRail=%p (failed)\n", rcvr, rxd, rnum, rcvrRail);
48087 +
48088 +       rxd->State = EP_RXD_RECEIVE_UNBOUND;
48089 +       
48090 +       if (nextRunTime == 0 || AFTER (nextRunTime, lbolt + RESOURCE_RETRY_TIME))
48091 +           nextRunTime = lbolt + RESOURCE_RETRY_TIME;
48092 +    }
48093 +
48094 +    return (nextRunTime);
48095 +}
48096 +
48097 +int
48098 +CheckRxdNmdsMapped (EP_RCVR *rcvr, EP_RXD *rxd)
48099 +{
48100 +    EP_RXD_RAIL *rxdRail = rxd->RxdRail;
48101 +    EP_RXD_MAIN *rxdMain = rxd->RxdMain;
48102 +    EP_ENVELOPE *env     = &rxdMain->Envelope;
48103 +    EP_SYS      *sys     = rcvr->Subsys->Subsys.Sys;
48104 +    EP_RAIL     *rail    = rxdRail->RcvrRail->CommsRail->Rail;
48105 +    int                 i;
48106 +
48107 +    /* Try and map the local NMDs before checking to see if we can proceed */
48108 +    if (! (ep_nmd2railmask (rxd->Local, rxd->nFrags) & EP_RAIL2RAILMASK (rail->Number)))
48109 +    {
48110 +       EPRINTF3 (DBG_MAPNMD, "%s: rcvr=%p rxd=%p RPC Local NMDs not mapped\n", rail->Name, rcvr, rxd);
48111 +       
48112 +       for (i = 0; i < rxd->nFrags; i++)
48113 +           if (! (EP_NMD_RAILMASK(&rxd->Local[i]) & EP_RAIL2RAILMASK(rail->Number)))
48114 +               if (ep_nmd_map_rails (sys, &rxd->Local[i], EP_RAIL2RAILMASK(rail->Number)))
48115 +                   rxd->NextRunTime = lbolt + RESOURCE_RETRY_TIME;
48116 +    }
48117 +    
48118 +    /* Try and map remote NMDs if they are not valid for this rail */
48119 +    if (! (ep_nmd2railmask (rxd->Remote, rxd->nFrags) & EP_RAIL2RAILMASK (rail->Number)))
48120 +    {
48121 +       EP_MANAGER_MSG_BODY msgBody;
48122 +
48123 +       EPRINTF3 (DBG_MAPNMD, "%s: rcvr=%p rxd=%p RPC Remote NMDs not mapped\n", rail->Name, rcvr, rxd);
48124 +
48125 +       if (EP_XID_INVALID(rxd->MsgXid))
48126 +           rxd->MsgXid = ep_xid_cache_alloc (sys, &rcvr->XidCache);
48127 +
48128 +       msgBody.MapNmd.nFrags   = rxd->nFrags;
48129 +       msgBody.MapNmd.Railmask = EP_RAIL2RAILMASK (rail->Number);
48130 +       for (i = 0; i < rxd->nFrags; i++)
48131 +           msgBody.MapNmd.Nmd[i] = rxd->Remote[i];
48132 +
48133 +       if (ep_send_message (rail, env->NodeId, EP_MANAGER_MSG_TYPE_MAP_NMD_REQUEST, rxd->MsgXid, &msgBody) == 0)
48134 +           rxd->NextRunTime = lbolt + MESSAGE_RETRY_TIME;
48135 +       else
48136 +           rxd->NextRunTime = lbolt + MSGBUSY_RETRY_TIME;
48137 +
48138 +       return 0;
48139 +    }
48140 +
48141 +    if ((ep_nmd2railmask (rxd->Local, rxd->nFrags) & ep_nmd2railmask (rxd->Remote, rxd->nFrags) & EP_RAIL2RAILMASK (rail->Number)) != 0)
48142 +    {
48143 +       rxd->NextRunTime = 0;
48144 +       return 1;
48145 +    }
48146 +
48147 +    return 0;
48148 +}
48149 +
48150 +long
48151 +ep_check_rcvr (EP_RCVR *rcvr, long nextRunTime)
48152 +{
48153 +    struct list_head *el, *nel;
48154 +    unsigned long     flags;
48155 +    int               i;
48156 +
48157 +    /* Check to see if we're low on rxds */
48158 +    if (rcvr->FreeDescCount < ep_rxd_lowat)
48159 +       AllocateRxdBlock (rcvr, 0, NULL);
48160 +
48161 +    for (i = 0; i < EP_MAX_RAILS; i++) 
48162 +       if (rcvr->RailMask & (1 << i) )
48163 +           nextRunTime = EP_RCVR_OP (rcvr->Rails[i], Check) (rcvr->Rails[i], nextRunTime);
48164 +
48165 +    /* See if we have any rxd's which need to be handled */
48166 +    spin_lock_irqsave (&rcvr->Lock, flags);
48167 +    list_for_each_safe (el, nel, &rcvr->ActiveDescList) {
48168 +       EP_RXD      *rxd     = list_entry (el, EP_RXD, Link);
48169 +       EP_RXD_MAIN *rxdMain = rxd->RxdMain;
48170 +       EP_ENVELOPE *env     = &rxdMain->Envelope;
48171 +       EP_RXD_RAIL *rxdRail = rxd->RxdRail;
48172 +
48173 +       if (rxdRail == NULL)
48174 +           nextRunTime = CheckUnboundRxd (rcvr, rxd, nextRunTime);
48175 +       else
48176 +       {
48177 +           EP_RCVR_RAIL *rcvrRail = rxdRail->RcvrRail;
48178 +           EP_RAIL      *rail     = rcvrRail->CommsRail->Rail;
48179 +
48180 +           if (rxd->RxdMain->Len == EP_RXD_PENDING ||                          /* envelope not received yet */
48181 +               rail->Nodes[env->NodeId].State != EP_NODE_CONNECTED)            /* will be failing over */
48182 +               continue;
48183 +
48184 +           switch (rxd->State)
48185 +           {
48186 +           case EP_RXD_PUT_STALLED:
48187 +               if (CheckRxdNmdsMapped (rcvr, rxd))
48188 +               {
48189 +                   rxd->State = EP_RXD_PUT_ACTIVE;
48190 +
48191 +                   EP_RCVR_OP (rcvrRail, RpcPut) (rxd, rxd->Local, rxd->Remote, rxd->nFrags);
48192 +               }
48193 +               break;
48194 +
48195 +           case EP_RXD_GET_STALLED:
48196 +               if (CheckRxdNmdsMapped (rcvr, rxd))
48197 +               {
48198 +                   rxd->State = EP_RXD_GET_ACTIVE;
48199 +
48200 +                   EP_RCVR_OP (rcvrRail, RpcGet) (rxd, rxd->Local, rxd->Remote, rxd->nFrags);
48201 +               }
48202 +               break;
48203 +           
48204 +           case EP_RXD_COMPLETE_STALLED:
48205 +               if (CheckRxdNmdsMapped (rcvr, rxd))
48206 +               {
48207 +                   rxd->State = EP_RXD_COMPLETE_ACTIVE;
48208 +
48209 +                   EP_RCVR_OP (rcvrRail, RpcComplete)(rxd, rxd->Local, rxd->Remote, rxd->nFrags);
48210 +               }
48211 +               break;
48212 +           }
48213 +               
48214 +           if (rxd->NextRunTime && (nextRunTime == 0 || AFTER (nextRunTime, rxd->NextRunTime)))
48215 +               nextRunTime = rxd->NextRunTime;
48216 +       }
48217 +    }
48218 +    spin_unlock_irqrestore (&rcvr->Lock, flags);
48219 +    
48220 +    return (nextRunTime);
48221 +}
48222 +
48223 +void
48224 +ep_display_rxd (DisplayInfo *di, EP_RXD *rxd)
48225 +{
48226 +    EP_RXD_MAIN *rxdMain  = rxd->RxdMain;
48227 +    EP_ENVELOPE *env      = &rxdMain->Envelope;
48228 +    EP_RXD_RAIL *rxdRail  = rxd->RxdRail;
48229 +    
48230 +    (di->func)(di->arg, "  RXD: %p State=%x RxdMain=%p(%x.%x.%x) Data=%x.%x.%x %s\n", rxd,
48231 +              rxd->State, rxd->RxdMain, rxd->NmdMain.nmd_addr, rxd->NmdMain.nmd_len,
48232 +              rxd->NmdMain.nmd_attr, rxd->Data.nmd_addr, rxd->Data.nmd_len, rxd->Data.nmd_attr,
48233 +              rxd->RxdMain->Len == EP_RXD_PENDING ? "Pending" : "Active");
48234 +    (di->func)(di->arg, "      NodeId=%d Range=%d.%d TxdRail=%x TxdMain=%x.%x.%x nFrags=%d XID=%08x.%08x.%016llx\n",
48235 +              env->NodeId,  EP_RANGE_LOW(env->Range), EP_RANGE_HIGH(env->Range), env->TxdRail, env->TxdMain.nmd_addr,
48236 +              env->TxdMain.nmd_len, env->TxdMain.nmd_attr, env->nFrags, env->Xid.Generation, env->Xid.Handle, env->Xid.Unique);;
48237 +    (di->func)(di->arg, "      Frag[0] %08x.%08x.%08x\n", env->Frags[0].nmd_addr, env->Frags[0].nmd_len, env->Frags[0].nmd_attr);
48238 +    (di->func)(di->arg, "      Frag[1] %08x.%08x.%08x\n", env->Frags[1].nmd_addr, env->Frags[1].nmd_len, env->Frags[1].nmd_attr);
48239 +    (di->func)(di->arg, "      Frag[2] %08x.%08x.%08x\n", env->Frags[2].nmd_addr, env->Frags[2].nmd_len, env->Frags[2].nmd_attr);
48240 +    (di->func)(di->arg, "      Frag[3] %08x.%08x.%08x\n", env->Frags[3].nmd_addr, env->Frags[3].nmd_len, env->Frags[3].nmd_attr);
48241 +
48242 +    if (rxdRail) EP_RCVR_OP (rxdRail->RcvrRail, DisplayRxd) (di, rxdRail);
48243 +}
48244 +
48245 +void
48246 +ep_display_rcvr (DisplayInfo *di, EP_RCVR *rcvr, int full)
48247 +{
48248 +    int               freeCount    = 0;
48249 +    int                      activeCount  = 0;
48250 +    int                      pendingCount = 0;
48251 +    int                      railCounts[EP_MAX_RAILS];
48252 +    struct list_head *el;
48253 +    int               i;
48254 +    unsigned long     flags;
48255 +
48256 +    for (i = 0; i <EP_MAX_RAILS; i++)
48257 +       railCounts[i] = 0;
48258 +
48259 +    spin_lock_irqsave (&rcvr->FreeDescLock, flags);
48260 +    list_for_each (el, &rcvr->FreeDescList)
48261 +       freeCount++;
48262 +    spin_unlock_irqrestore (&rcvr->FreeDescLock, flags);
48263 +
48264 +    spin_lock_irqsave (&rcvr->Lock, flags);
48265 +    list_for_each (el, &rcvr->ActiveDescList) {
48266 +       EP_RXD      *rxd     = list_entry (el, EP_RXD, Link);
48267 +       EP_RXD_RAIL *rxdRail = rxd->RxdRail;
48268 +
48269 +       if (rxd->RxdMain->Len == EP_RXD_PENDING)
48270 +           pendingCount++;
48271 +       else
48272 +           activeCount++;
48273 +
48274 +       if (rxdRail)
48275 +           railCounts[rxdRail->RcvrRail->CommsRail->Rail->Number]++;
48276 +    }
48277 +
48278 +    (di->func)(di->arg, "RCVR: rcvr=%p number=%d\n", rcvr, rcvr->Service);
48279 +    (di->func)(di->arg, "      RXDS Free=%d (%d) Pending=%d Active=%d Rails=%d.%d.%d.%d\n",
48280 +              freeCount, rcvr->FreeDescCount, pendingCount, activeCount, railCounts[0], railCounts[1],
48281 +              railCounts[2], railCounts[3]);
48282 +
48283 +    for (i = 0; i < EP_MAX_RAILS; i++)
48284 +       if (rcvr->Rails[i] != NULL)
48285 +           EP_RCVR_OP (rcvr->Rails[i], DisplayRcvr) (di, rcvr->Rails[i]);
48286 +
48287 +    list_for_each (el, &rcvr->ActiveDescList) {
48288 +       EP_RXD *rxd = list_entry (el, EP_RXD, Link);
48289 +
48290 +       if (rxd->RxdMain->Len != EP_RXD_PENDING || full)
48291 +           ep_display_rxd (di, rxd);
48292 +    }
48293 +    spin_unlock_irqrestore (&rcvr->Lock, flags);
48294 +}
48295 +
48296 +void
48297 +ep_rxd_received_now(EP_RXD *rxd)
48298 +{
48299 +    EP_ENVELOPE   *env  = &rxd->RxdMain->Envelope;
48300 +    EP_RCVR       *rcvr = rxd->Rcvr;
48301 +    unsigned long  flags;
48302 +
48303 +    INC_STAT(rcvr->stats,rx);
48304 +    ADD_STAT(rcvr->stats,rx_len, rxd->RxdMain->Len);
48305 +
48306 +    if (rxd->RxdMain->Len < 0 || !EP_IS_MULTICAST(env->Attr))
48307 +    {
48308 +       rxd->Handler (rxd);
48309 +    }
48310 +    else
48311 +    {
48312 +       EPRINTF5 (DBG_RCVR, "ep_rxd_received: forward rxd=%p Data=%08x.%08x.%08x len=%d\n", rxd, 
48313 +                 rxd->Data.nmd_addr, rxd->Data.nmd_len, rxd->Data.nmd_attr, ep_rxd_len(rxd));
48314 +
48315 +       spin_lock_irqsave (&rcvr->Subsys->ForwardDescLock, flags);
48316 +       list_add_tail (&rxd->Link, &rcvr->Subsys->ForwardDescList);
48317 +       spin_unlock_irqrestore (&rcvr->Subsys->ForwardDescLock, flags);
48318 +       
48319 +       ep_kthread_schedule (&rcvr->Subsys->Thread, lbolt);
48320 +    } 
48321 +}
48322 +
48323 +#if defined(CONFIG_EP_NO_CHECK_SUM)
48324 +void
48325 +ep_rxd_received(EP_RXD *rxd) 
48326 +{
48327 +   ep_rxd_received_now(rxd);
48328 +}
48329 +
48330 +#else
48331 +
48332 +void
48333 +ep_rxd_received(EP_RXD *rxd) 
48334 +{
48335 +  EP_ENVELOPE   *env  = &rxd->RxdMain->Envelope;
48336 +
48337 +  if (env->CheckSum) 
48338 +      ep_rxd_queue_csum(rxd);
48339 +  else 
48340 +      ep_rxd_received_now(rxd);
48341 +}
48342 +
48343 +void
48344 +ep_rxd_queue_csum(EP_RXD *rxd)
48345 +{
48346 +    EP_RCVR       *rcvr = rxd->Rcvr;
48347 +    unsigned long flags;
48348 +
48349 +    EPRINTF5 (DBG_RCVR, "ep_rxd_queue_csum: rxd=%p Data=%08x.%08x.%08x len=%d\n", rxd, 
48350 +             rxd->Data.nmd_addr, rxd->Data.nmd_len, rxd->Data.nmd_attr, ep_rxd_len(rxd));
48351 +    
48352 +    spin_lock_irqsave (&rcvr->Subsys->CheckSumDescLock, flags);
48353 +    list_add_tail (&rxd->CheckSumLink, &rcvr->Subsys->CheckSumDescList);
48354 +    spin_unlock_irqrestore (&rcvr->Subsys->CheckSumDescLock, flags);
48355 +    
48356 +    ep_kthread_schedule (&rcvr->Subsys->Thread, lbolt);
48357 +}
48358 +#endif
48359 +
48360 +void
48361 +ep_rcvr_fillout_stats(EP_RCVR *rcvr, char *str)
48362 +{
48363 +    sprintf(str+strlen(str),"Rx     %lu  %lu /sec\n",       GET_STAT_TOTAL(rcvr->stats,rx),      GET_STAT_PER_SEC(rcvr->stats,rx) );
48364 +    sprintf(str+strlen(str),"MBytes %lu  %lu Mbytes/sec\n", GET_STAT_TOTAL(rcvr->stats,rx_len) / (1024*1024),  GET_STAT_PER_SEC(rcvr->stats,rx_len) / (1024*1024));
48365 +}
48366 +
48367 +void
48368 +ep_rcvr_rail_fillout_stats(EP_RCVR_RAIL *rcvr_rail, char *str)
48369 +{
48370 +    sprintf(str+strlen(str),"Rx     %lu  %lu /sec\n",       GET_STAT_TOTAL(rcvr_rail->stats,rx),      GET_STAT_PER_SEC(rcvr_rail->stats,rx) );
48371 +    sprintf(str+strlen(str),"MBytes %lu  %lu Mbytes/sec\n", GET_STAT_TOTAL(rcvr_rail->stats,rx_len) / (1024*1024),  GET_STAT_PER_SEC(rcvr_rail->stats,rx_len) / (1024*1024));
48372 +}
48373 +
48374 +
48375 +/*
48376 + * Local variables:
48377 + * c-file-style: "stroustrup"
48378 + * End:
48379 + */
48380 Index: linux-2.4.21/drivers/net/qsnet/ep/epcommsRx_elan3.c
48381 ===================================================================
48382 --- linux-2.4.21.orig/drivers/net/qsnet/ep/epcommsRx_elan3.c    2004-02-23 16:02:56.000000000 -0500
48383 +++ linux-2.4.21/drivers/net/qsnet/ep/epcommsRx_elan3.c 2005-06-01 23:12:54.649431504 -0400
48384 @@ -0,0 +1,1776 @@
48385 +/*
48386 + *    Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd.
48387 + *    Copyright (c) 2002-2003 by Quadrics Ltd.
48388 + *
48389 + *    For licensing information please see the supplied COPYING file
48390 + *
48391 + */
48392 +
48393 +#ident "@(#)$Id: epcommsRx_elan3.c,v 1.19.2.3 2004/11/15 11:05:49 mike Exp $"
48394 +/*      $Source: /cvs/master/quadrics/epmod/epcommsRx_elan3.c,v $ */
48395 +
48396 +#include <qsnet/kernel.h>
48397 +
48398 +#include <elan/kcomm.h>
48399 +#include <elan/epsvc.h>
48400 +#include <elan/epcomms.h>
48401 +
48402 +#include "kcomm_vp.h"
48403 +#include "kcomm_elan3.h"
48404 +#include "epcomms_elan3.h"
48405 +#include "debug.h"
48406 +
48407 +#define RCVR_TO_RAIL(rcvrRail)         ((EP3_RAIL *) ((EP_RCVR_RAIL *) rcvrRail)->CommsRail->Rail)
48408 +#define RCVR_TO_DEV(rcvrRail)          (RCVR_TO_RAIL(rcvrRail)->Device)
48409 +#define RCVR_TO_SUBSYS(rcvrRail)       (((EP_RCVR_RAIL *) rcvrRail)->Rcvr->Subsys)
48410 +
48411 +static void RxDataEvent (EP3_RAIL *rail, void *arg);
48412 +static void RxDataRetry (EP3_RAIL *rail, void *arg, E3_DMA_BE *dma, int status);
48413 +static void RxDataVerify (EP3_RAIL *rail, void *arg, E3_DMA_BE *dma);
48414 +
48415 +static EP3_COOKIE_OPS RxDataCookieOps = 
48416 +{
48417 +    RxDataEvent,
48418 +    RxDataRetry,
48419 +    NULL, /* DmaCancelled */
48420 +    RxDataVerify,
48421 +};
48422 +
48423 +static void RxDoneEvent (EP3_RAIL *rail, void *arg);
48424 +static void RxDoneRetry (EP3_RAIL *rail, void *arg, E3_DMA_BE *dma, int status);
48425 +static void RxDoneVerify (EP3_RAIL *rail, void *arg, E3_DMA_BE *dma);
48426 +
48427 +static EP3_COOKIE_OPS RxDoneCookieOps = 
48428 +{
48429 +    RxDoneEvent,
48430 +    RxDoneRetry,
48431 +    NULL, /* DmaCancelled */
48432 +    RxDoneVerify,
48433 +};
48434 +
48435 +static int
48436 +AllocateRxdRailBlock (EP3_RCVR_RAIL *rcvrRail)
48437 +{
48438 +    EP3_RAIL         *rail = RCVR_TO_RAIL(rcvrRail);
48439 +    ELAN3_DEV         *dev  = rail->Device;
48440 +    EP3_RXD_RAIL_BLOCK *blk;
48441 +    EP3_RXD_RAIL       *rxdRail;
48442 +    sdramaddr_t        pRxdElan;
48443 +    EP3_RXD_RAIL_MAIN  *pRxdMain;
48444 +    E3_Addr           pRxdElanAddr;
48445 +    E3_Addr           pRxdMainAddr;
48446 +    E3_BlockCopyEvent  event;
48447 +    int                       i, j;
48448 +    unsigned long      flags;
48449 +
48450 +    KMEM_ZALLOC (blk, EP3_RXD_RAIL_BLOCK *, sizeof (EP3_RXD_RAIL_BLOCK), 1);
48451 +    if (blk == NULL)
48452 +       return 0;
48453 +
48454 +    if ((pRxdElan = ep_alloc_elan (&rail->Generic, EP3_RXD_RAIL_ELAN_SIZE * EP3_NUM_RXD_PER_BLOCK, 0, &pRxdElanAddr)) == (sdramaddr_t) 0)
48455 +    {
48456 +       KMEM_FREE (blk, sizeof (EP3_RXD_RAIL_BLOCK));
48457 +       return 0;
48458 +    }
48459 +
48460 +    if ((pRxdMain = ep_alloc_main (&rail->Generic, EP3_RXD_RAIL_MAIN_SIZE * EP3_NUM_RXD_PER_BLOCK, 0, &pRxdMainAddr)) == (sdramaddr_t) 0)
48461 +    {
48462 +       ep_free_elan (&rail->Generic, pRxdElanAddr, EP3_RXD_RAIL_ELAN_SIZE * EP3_NUM_RXD_PER_BLOCK);
48463 +       KMEM_FREE (blk, sizeof (EP3_RXD_RAIL_BLOCK));
48464 +       return 0;
48465 +    }
48466 +    
48467 +    if (ReserveDmaRetries (rail, EP3_NUM_RXD_PER_BLOCK, 0) != ESUCCESS)
48468 +    {
48469 +       ep_free_main (&rail->Generic, pRxdMainAddr, EP3_RXD_RAIL_MAIN_SIZE * EP3_NUM_RXD_PER_BLOCK);
48470 +       ep_free_elan (&rail->Generic, pRxdElanAddr, EP3_RXD_RAIL_ELAN_SIZE * EP3_NUM_RXD_PER_BLOCK);
48471 +       KMEM_FREE (blk, sizeof (EP3_RXD_RAIL_BLOCK));
48472 +       return 0;
48473 +    }
48474 +
48475 +    for (rxdRail = &blk->Rxd[0], i = 0; i < EP3_NUM_RXD_PER_BLOCK; i++, rxdRail++)
48476 +    {
48477 +       rxdRail->Generic.RcvrRail = (EP_RCVR_RAIL *) rcvrRail;
48478 +       rxdRail->RxdElan          = pRxdElan;
48479 +       rxdRail->RxdElanAddr      = pRxdElanAddr;
48480 +       rxdRail->RxdMain          = pRxdMain;
48481 +       rxdRail->RxdMainAddr      = pRxdMainAddr;
48482 +
48483 +       elan3_sdram_writel (dev, pRxdElan + offsetof (EP3_RXD_RAIL_ELAN, RxdMain),  0);
48484 +       elan3_sdram_writel (dev, pRxdElan + offsetof (EP3_RXD_RAIL_ELAN, Next),     0);
48485 +       elan3_sdram_writeq (dev, pRxdElan + offsetof (EP3_RXD_RAIL_ELAN, MainAddr), (long) rxdRail);
48486 +
48487 +       for (j = 0; j < EP_MAXFRAG; j++)
48488 +       {
48489 +           RegisterCookie (&rail->CookieTable, &rxdRail->ChainCookie[j], pRxdElanAddr + offsetof (EP3_RXD_RAIL_ELAN, ChainEvent[j]), &RxDataCookieOps, (void *) rxdRail);
48490 +
48491 +           event.ev_Type  = EV_TYPE_DMA | (pRxdElanAddr + offsetof (EP3_RXD_RAIL_ELAN, Dmas[j+1]));
48492 +           event.ev_Count = 0;
48493 +
48494 +           elan3_sdram_copyl_to_sdram (dev, &event, pRxdElan + offsetof (EP3_RXD_RAIL_ELAN, ChainEvent[j]), sizeof (E3_BlockCopyEvent));
48495 +       }
48496 +       
48497 +       RegisterCookie (&rail->CookieTable, &rxdRail->DataCookie, pRxdElanAddr + offsetof (EP3_RXD_RAIL_ELAN, DataEvent), &RxDataCookieOps, (void *) rxdRail);
48498 +       RegisterCookie (&rail->CookieTable, &rxdRail->DoneCookie, pRxdElanAddr + offsetof (EP3_RXD_RAIL_ELAN, DoneEvent), &RxDoneCookieOps, (void *) rxdRail);
48499 +
48500 +       EP3_INIT_COPY_EVENT (event, rxdRail->DataCookie, pRxdMainAddr + offsetof (EP3_RXD_RAIL_MAIN, DataEvent), 1);
48501 +       elan3_sdram_copyl_to_sdram (dev, &event, pRxdElan + offsetof (EP3_RXD_RAIL_ELAN, DataEvent), sizeof (E3_BlockCopyEvent));
48502 +
48503 +       EP3_INIT_COPY_EVENT (event, rxdRail->DoneCookie, pRxdMainAddr + offsetof (EP3_RXD_RAIL_MAIN, DoneEvent), 1);
48504 +       elan3_sdram_copyl_to_sdram (dev, &event, pRxdElan + offsetof (EP3_RXD_RAIL_ELAN, DoneEvent), sizeof (E3_BlockCopyEvent));
48505 +       
48506 +       pRxdMain->DataEvent = EP3_EVENT_FREE;
48507 +       pRxdMain->DoneEvent = EP3_EVENT_FREE;
48508 +
48509 +       /* move onto next descriptor */
48510 +       pRxdElan     += EP3_RXD_RAIL_ELAN_SIZE;
48511 +       pRxdElanAddr += EP3_RXD_RAIL_ELAN_SIZE;
48512 +       pRxdMain      = (EP3_RXD_RAIL_MAIN *) ((unsigned long) pRxdMain + EP3_RXD_RAIL_MAIN_SIZE);
48513 +       pRxdMainAddr += EP3_RXD_RAIL_MAIN_SIZE;
48514 +    }
48515 +
48516 +    spin_lock_irqsave (&rcvrRail->FreeDescLock, flags);
48517 +
48518 +    list_add  (&blk->Link, &rcvrRail->DescBlockList);
48519 +    rcvrRail->TotalDescCount += EP3_NUM_RXD_PER_BLOCK;
48520 +    rcvrRail->FreeDescCount  += EP3_NUM_RXD_PER_BLOCK;
48521 +
48522 +    for (i = 0; i < EP3_NUM_RXD_PER_BLOCK; i++)
48523 +       list_add (&blk->Rxd[i].Generic.Link, &rcvrRail->FreeDescList);
48524 +
48525 +    spin_unlock_irqrestore (&rcvrRail->FreeDescLock, flags);
48526 +    
48527 +    return 1;
48528 +}
48529 +
48530 +static void
48531 +FreeRxdRailBlock (EP3_RCVR_RAIL *rcvrRail, EP3_RXD_RAIL_BLOCK *blk)
48532 +{
48533 +    EP3_RAIL     *rail = RCVR_TO_RAIL(rcvrRail);
48534 +    EP3_RXD_RAIL *rxdRail;
48535 +    unsigned long flags;
48536 +    int i, j;
48537 +
48538 +    spin_lock_irqsave (&rcvrRail->FreeDescLock, flags);
48539 +
48540 +    list_del (&blk->Link);
48541 +
48542 +    rcvrRail->TotalDescCount -= EP3_NUM_RXD_PER_BLOCK;
48543 +
48544 +    for (rxdRail = &blk->Rxd[0], i = 0; i < EP3_NUM_RXD_PER_BLOCK; i++, rxdRail++)
48545 +    {
48546 +
48547 +       rcvrRail->FreeDescCount--;
48548 +
48549 +       list_del (&rxdRail->Generic.Link);
48550 +       
48551 +       for (j = 0; j < EP_MAXFRAG; j++)
48552 +           DeregisterCookie (&rail->CookieTable, &rxdRail->ChainCookie[j]);
48553 +       
48554 +       DeregisterCookie (&rail->CookieTable, &rxdRail->DataCookie);
48555 +       DeregisterCookie (&rail->CookieTable, &rxdRail->DoneCookie);
48556 +    }
48557 +
48558 +    spin_unlock_irqrestore (&rcvrRail->FreeDescLock, flags);
48559 +
48560 +    ReleaseDmaRetries (rail, EP3_NUM_RXD_PER_BLOCK);
48561 +
48562 +    ep_free_main (&rail->Generic, blk->Rxd[0].RxdMainAddr, EP3_RXD_RAIL_MAIN_SIZE * EP3_NUM_RXD_PER_BLOCK);
48563 +    ep_free_elan (&rail->Generic, blk->Rxd[0].RxdElanAddr, EP3_RXD_RAIL_ELAN_SIZE * EP3_NUM_RXD_PER_BLOCK);
48564 +
48565 +    KMEM_FREE (blk, sizeof (EP3_RXD_RAIL_BLOCK));
48566 +}
48567 +
48568 +static EP3_RXD_RAIL *
48569 +GetRxdRail (EP3_RCVR_RAIL *rcvrRail)
48570 +{
48571 +    EP3_RXD_RAIL *rxdRail;
48572 +    unsigned long flags;
48573 +    int low_on_rxds;
48574 +
48575 +    spin_lock_irqsave (&rcvrRail->FreeDescLock, flags);
48576 +
48577 +    if (list_empty (&rcvrRail->FreeDescList))
48578 +       rxdRail = NULL;
48579 +    else
48580 +    {
48581 +       rxdRail = list_entry (rcvrRail->FreeDescList.next, EP3_RXD_RAIL, Generic.Link);
48582 +
48583 +       list_del (&rxdRail->Generic.Link);
48584 +
48585 +       rcvrRail->FreeDescCount--;
48586 +    }
48587 +
48588 +    /* Wakeup the descriptor primer thread if there's not many left */
48589 +    low_on_rxds = (rcvrRail->FreeDescCount < ep_rxd_lowat);
48590 +
48591 +    spin_unlock_irqrestore (&rcvrRail->FreeDescLock, flags);
48592 +
48593 +    if (low_on_rxds)
48594 +       ep_kthread_schedule (&RCVR_TO_SUBSYS(rcvrRail)->Thread, lbolt);
48595 +
48596 +    return (rxdRail);
48597 +}
48598 +
48599 +static void
48600 +FreeRxdRail (EP3_RCVR_RAIL *rcvrRail, EP3_RXD_RAIL *rxdRail)
48601 +{
48602 +    unsigned long flags;
48603 +
48604 +#if defined(DEBUG_ASSERT)
48605 +    {
48606 +       EP_RAIL  *rail = (EP_RAIL *) RCVR_TO_RAIL(rcvrRail);
48607 +       ELAN3_DEV *dev = RCVR_TO_DEV (rcvrRail);
48608 +
48609 +       EP_ASSERT (rail, rxdRail->Generic.RcvrRail == &rcvrRail->Generic);
48610 +       
48611 +       EP_ASSERT (rail, rxdRail->RxdMain->DataEvent == EP3_EVENT_PRIVATE);
48612 +       EP_ASSERT (rail, rxdRail->RxdMain->DoneEvent == EP3_EVENT_PRIVATE);
48613 +       EP_ASSERT (rail, SDRAM_ASSERT (elan3_sdram_readl (dev, rxdRail->RxdElan + offsetof (EP3_RXD_RAIL_ELAN, DataEvent.ev_Count)) == 0));
48614 +       EP_ASSERT (rail, SDRAM_ASSERT (elan3_sdram_readl (dev, rxdRail->RxdElan + offsetof (EP3_RXD_RAIL_ELAN, DoneEvent.ev_Count)) == 0));
48615 +
48616 +       rxdRail->RxdMain->DataEvent = EP3_EVENT_FREE;
48617 +       rxdRail->RxdMain->DoneEvent = EP3_EVENT_FREE;
48618 +    }
48619 +#endif
48620 +
48621 +    spin_lock_irqsave (&rcvrRail->FreeDescLock, flags);
48622 +    
48623 +    list_add (&rxdRail->Generic.Link, &rcvrRail->FreeDescList);
48624 +
48625 +    rcvrRail->FreeDescCount++;
48626 +
48627 +    if (rcvrRail->FreeDescWaiting)
48628 +    {
48629 +       rcvrRail->FreeDescWaiting--;
48630 +       kcondvar_wakeupall (&rcvrRail->FreeDescSleep, &rcvrRail->FreeDescLock);
48631 +    }
48632 +
48633 +    spin_unlock_irqrestore (&rcvrRail->FreeDescLock, flags);
48634 +}
48635 +
48636 +static void
48637 +BindRxdToRail (EP_RXD *rxd, EP3_RXD_RAIL *rxdRail)
48638 +{
48639 +    EP3_RAIL *rail = RCVR_TO_RAIL (rxdRail->Generic.RcvrRail);
48640 +
48641 +    ASSERT (SPINLOCK_HELD (&rxd->Rcvr->Lock));
48642 +
48643 +    EPRINTF3 (DBG_RCVR, "%s: BindRxdToRail: rxd=%p rxdRail=%p\n",  rail->Generic.Name, rxd, rxdRail);
48644 +
48645 +    elan3_sdram_writel (rail->Device, rxdRail->RxdElan + offsetof (EP3_RXD_RAIL_ELAN, RxdMain), rxd->NmdMain.nmd_addr);                        /* PCI write */
48646 +
48647 +    rxd->RxdRail         = &rxdRail->Generic;
48648 +    rxdRail->Generic.Rxd = rxd;
48649 +}
48650 +
48651 +static void
48652 +UnbindRxdFromRail (EP_RXD *rxd, EP3_RXD_RAIL *rxdRail)
48653 +{
48654 +    EP3_RCVR_RAIL *rcvrRail = (EP3_RCVR_RAIL *) rxdRail->Generic.RcvrRail;
48655 +
48656 +    ASSERT (SPINLOCK_HELD (&rxd->Rcvr->Lock));
48657 +    ASSERT (rxd->RxdRail == &rxdRail->Generic && rxdRail->Generic.Rxd == rxd);
48658 +    
48659 +    EPRINTF3 (DBG_RCVR, "%s: UnbindRxdFromRail: rxd=%p rxdRail=%p\n",  RCVR_TO_RAIL(rxdRail->Generic.RcvrRail)->Generic.Name, rxd, rxdRail);
48660 +
48661 +    rxd->RxdRail         = NULL;
48662 +    rxdRail->Generic.Rxd = NULL;
48663 +
48664 +    if (rcvrRail->CleanupWaiting)
48665 +       kcondvar_wakeupall (&rcvrRail->CleanupSleep, &rxd->Rcvr->Lock);
48666 +    rcvrRail->CleanupWaiting = 0;
48667 +}
48668 +
48669 +static void
48670 +LockRcvrThread (EP3_RCVR_RAIL *rcvrRail)
48671 +{
48672 +    EP_COMMS_RAIL     *commsRail   = rcvrRail->Generic.CommsRail;
48673 +    EP3_RAIL          *rail        = RCVR_TO_RAIL(rcvrRail);
48674 +    ELAN3_DEV        *dev         = rail->Device;
48675 +    sdramaddr_t        sle         = rcvrRail->RcvrElan + offsetof (EP3_RCVR_RAIL_ELAN, ThreadLock);
48676 +    EP3_SPINLOCK_MAIN *sl          = &rcvrRail->RcvrMain->ThreadLock;
48677 +    E3_uint32          RestartBits = 0;
48678 +    int                delay       = 1;
48679 +    E3_uint32          seq;
48680 +    E3_uint32          reg;
48681 +
48682 +    ASSERT (SPINLOCK_HELD (&rcvrRail->Generic.Rcvr->Lock));
48683 +
48684 +    mb();
48685 +    elan3_sdram_writel (dev, sle + offsetof (EP3_SPINLOCK_ELAN, sl_lock), 1);
48686 +    mb();
48687 +    seq = elan3_sdram_readl (dev, sle + offsetof (EP3_SPINLOCK_ELAN, sl_seq));
48688 +    while (seq != sl->sl_seq)
48689 +    {
48690 +       while (sl->sl_seq == (seq - 1))
48691 +       {
48692 +           mb();
48693 +
48694 +           if ((read_reg32 (dev, Exts.InterruptReg) & (INT_TProc | INT_TProcHalted)) != 0 && spin_trylock (&dev->IntrLock))
48695 +           {
48696 +               reg=read_reg32 (dev, Exts.InterruptReg);
48697 +               ELAN_REG_REC(reg);
48698 +
48699 +               if ((reg & (INT_TProc | INT_TProcHalted)) != 0&& 
48700 +                   elan3_sdram_readl (dev, sle + offsetof (EP3_SPINLOCK_ELAN, sl_seq)) != sl->sl_seq)
48701 +               {
48702 +                   EPRINTF1 (DBG_RCVR, "%s: LockRcvrThread - thread trapped\n", rail->Generic.Name);
48703 +
48704 +                   /* The thread processor has *really* trapped, and the spinlock is still held.
48705 +                    * thus is must have trapped due to a network error - we need to complete the
48706 +                    * actions required for this envelope, since we may be spin-locking the receiver
48707 +                    * to search the dma retry lists for a particular dma.  So must ensure that
48708 +                    * if the thread had trapped then the dma has been queued onto the retry list
48709 +                    * *before* we inspect them.
48710 +                    */
48711 +                   IncrStat (commsRail, LockRcvrTrapped);
48712 +
48713 +                   /* We're going to generate a spurious interrupt here - since we will
48714 +                    * handle the thread processor trap directly */
48715 +                   ELAN_REG_REC(reg);
48716 +                   if (HandleTProcTrap (dev, &RestartBits))
48717 +                   {
48718 +                       /* NOTE - this is not an assert, since the "store" to unlock the lock could
48719 +                        *        be held up on the PCI interface, whilst the thread processor has
48720 +                        *        gone on and switched to a new thread, which has then trapped, and
48721 +                        *        our read of the InterruptReg can overtake the unlock write.
48722 +                        *
48723 +                        * ASSERT (dev->ThreadTrap->Registers[REG_GLOBALS + (1^WordEndianFlip)] == 
48724 +                        *         elan3_sdram_readl (dev, rcvr->RcvrElan + offsetof (EP_RCVR_ELAN, PendingRxDescsElan)));
48725 +                        */
48726 +
48727 +                       PULSE_SCHED_STATUS (dev, RestartBits);
48728 +
48729 +                       DeliverTProcTrap (dev, dev->ThreadTrap, INT_TProc);
48730 +                   }
48731 +               }
48732 +               spin_unlock (&dev->IntrLock);
48733 +           }
48734 +           
48735 +           DELAY (delay); delay++;
48736 +       }
48737 +       seq = elan3_sdram_readl (dev, sle + offsetof (EP3_SPINLOCK_ELAN, sl_seq));
48738 +    }
48739 +}
48740 +
48741 +static void
48742 +UnlockRcvrThread (EP3_RCVR_RAIL *rcvrRail)
48743 +{
48744 +    EP3_RAIL   *rail = RCVR_TO_RAIL(rcvrRail);
48745 +    sdramaddr_t sle  = rcvrRail->RcvrElan + offsetof (EP3_RCVR_RAIL_ELAN, ThreadLock);
48746 +
48747 +    mb();
48748 +    elan3_sdram_writel (rail->Device, sle + offsetof (EP3_SPINLOCK_ELAN, sl_lock), 0);
48749 +    mmiob(); 
48750 +}
48751 +
48752 +void
48753 +CompleteEnvelope (EP3_RAIL *rail, E3_Addr rxdElanAddr, E3_uint32 PAckVal)
48754 +{
48755 +    ELAN3_DEV         *dev       = rail->Device;
48756 +    sdramaddr_t        rxdElan   = ep_elan2sdram (&rail->Generic, rxdElanAddr);
48757 +    EP3_RXD_RAIL      *rxdRail   = (EP3_RXD_RAIL *) (unsigned long) elan3_sdram_readq (dev, rxdElan + offsetof (EP3_RXD_RAIL_ELAN, MainAddr));
48758 +    EP_RXD_MAIN       *rxdMain   = rxdRail->Generic.Rxd->RxdMain;
48759 +    EP_ENVELOPE       *env       = &rxdMain->Envelope;
48760 +    EP3_RCVR_RAIL     *rcvrRail  = (EP3_RCVR_RAIL *) rxdRail->Generic.RcvrRail;
48761 +    EP_COMMS_RAIL     *commsRail = rcvrRail->Generic.CommsRail;
48762 +    EP_RCVR           *rcvr      = rcvrRail->Generic.Rcvr;
48763 +    sdramaddr_t        queue     = ((EP3_COMMS_RAIL *) commsRail)->QueueDescs + rcvr->Service * sizeof (EP3_InputQueue);
48764 +    sdramaddr_t        sle       = rcvrRail->RcvrElan + offsetof (EP3_RCVR_RAIL_ELAN, ThreadLock);
48765 +    EP3_SPINLOCK_MAIN *sl        = &rcvrRail->RcvrMain->ThreadLock;
48766 +    int               nodeId;
48767 +    EP_NODE_RAIL     *nodeRail;
48768 +    E3_DMA_BE         dma;
48769 +    E3_Addr           nfptr;
48770 +    E3_Addr          next;
48771 +
48772 +    ASSERT (commsRail->Rail == &rail->Generic);
48773 +    ASSERT (rxdElanAddr == elan3_sdram_readl (dev, rcvrRail->RcvrElan + offsetof (EP3_RCVR_RAIL_ELAN, PendingDescs)));
48774 +
48775 +    IncrStat (commsRail, CompleteEnvelope);
48776 +
48777 +    /* We don't need to aquire the NodeLock here (however we might be holding it),
48778 +     * since this can only get called while the node is connected, or disconnecting.
48779 +     * If the node is disconnecting, then we can get called from FlushDisconnecting()
48780 +     * while holding the NodeLock - after we cannot get called again until the node 
48781 +     * has reconnected from scratch.
48782 +     */
48783 +    /* Copy the envelope information */
48784 +    nfptr = elan3_sdram_readl (dev, queue + offsetof (EP3_InputQueue, q_fptr));
48785 +
48786 +    if (nfptr == elan3_sdram_readl (dev, queue + offsetof (EP3_InputQueue, q_top)))
48787 +       nfptr = elan3_sdram_readl (dev, queue + offsetof (EP3_InputQueue, q_base));
48788 +    else
48789 +       nfptr += elan3_sdram_readl (dev, queue + offsetof (EP3_InputQueue, q_size));
48790 +
48791 +    /* Copy the envelope and payload (unconditionally) */
48792 +    elan3_sdram_copyl_from_sdram (dev, rcvrRail->InputQueueBase + (nfptr - rcvrRail->InputQueueAddr), env, EP_ENVELOPE_SIZE + EP_PAYLOAD_SIZE);
48793 +
48794 +    ASSERT (env->Version == EP_ENVELOPE_VERSION);
48795 +
48796 +    /* Copy the received message length */
48797 +    rxdMain->Len = elan3_sdram_readl (dev, rxdElan + offsetof (EP3_RXD_RAIL_ELAN, Data.nmd_len));
48798 +       
48799 +    /* Remove the RXD from the pending desc list */
48800 +    if ((next = elan3_sdram_readl (dev, rxdElan + offsetof (EP3_RXD_RAIL_ELAN, Next))) == 0)
48801 +       rcvrRail->RcvrMain->PendingDescsTailp = 0;
48802 +    elan3_sdram_writel (dev, rcvrRail->RcvrElan + offsetof (EP3_RCVR_RAIL_ELAN, PendingDescs), next);
48803 +
48804 +    /* Copy the DMA descriptor to queue on the approriate retry list */
48805 +    elan3_sdram_copyq_from_sdram (dev, rxdElan + offsetof (EP3_RXD_RAIL_ELAN, Dmas[0]), &dma, sizeof (E3_DMA));        /* PCI read block */
48806 +    
48807 +    EP_ASSERT (&rail->Generic, dma.s.dma_direction == DMA_READ);;
48808 +
48809 +#if defined(DEBUG_ASSERT) && defined(DEBUG_SDRAM_ASSERT)
48810 +    /* NOTE: not an assertion, since the thread packet could have successfully
48811 +     *       transferred the "put" dma to the far side - which could then have
48812 +     *       completed - but the far side will see a network error which will
48813 +     *       cause the virtual circuit to be dropped by the far side and this 
48814 +     *       DMA will be removed */
48815 +    if (rxdRail->RxdMain->DataEvent != EP3_EVENT_ACTIVE ||
48816 +       elan3_sdram_readl (dev, rxdElan + offsetof (EP3_RXD_RAIL_ELAN, DataEvent.ev_Count)) != 1)
48817 +    {
48818 +       printk ("CompleteEnvelope: suspicious dma : Node=%d DataBlock=%d Event=%d\n", 
48819 +               env->NodeId, rxdRail->RxdMain->DataEvent, 
48820 +               elan3_sdram_readl (dev, rxdElan + offsetof (EP3_RXD_RAIL_ELAN, DataEvent.ev_Count)));
48821 +    }
48822 +#endif
48823 +    
48824 +    EPRINTF6 (DBG_RCVR, "%s: CompleteEnvelope: rxd=%p NodeId=%d Xid=%llx Cookies=%08x,%08x\n", commsRail->Rail->Name,
48825 +             rxdRail, env->NodeId, (long long) env->Xid.Unique, dma.s.dma_srcCookieVProc, dma.s.dma_destCookieVProc);
48826 +
48827 +    /* we MUST convert this into a DMA_READ_REQUEUE dma as if we don't the DMA descriptor will
48828 +     * be read from the EP_RETRY_DMA rather than the original DMA - this can then get reused 
48829 +     * and an incorrect DMA descriptor sent */
48830 +    dma.s.dma_source    = rxdRail->RxdElanAddr + offsetof (EP3_RXD_RAIL_ELAN, Dmas[0]);
48831 +    dma.s.dma_direction = (dma.s.dma_direction & ~DMA_READ) | DMA_READ_REQUEUE;
48832 +    
48833 +    nodeId   = EP_VP_TO_NODE(dma.s.dma_srcVProc);
48834 +    nodeRail = &rail->Generic.Nodes[nodeId];
48835 +
48836 +    ASSERT (nodeRail->State >= EP_NODE_CONNECTED && nodeRail->State <= EP_NODE_LOCAL_PASSIVATE);
48837 +
48838 +    if (PAckVal != E3_PAckOk)
48839 +    {
48840 +       if (nodeRail->State == EP_NODE_CONNECTED)
48841 +           QueueDmaForRetry (rail, &dma, EP_RETRY_LOW_PRI_RETRY);
48842 +       else
48843 +           QueueDmaOnStalledList (rail, &dma);
48844 +    }
48845 +
48846 +    /* Finaly forcefully drop the spinlock for the thread */
48847 +    sl->sl_seq = elan3_sdram_readl (dev, sle + offsetof (EP3_SPINLOCK_ELAN, sl_seq));
48848 +
48849 +    wmb();
48850 +}
48851 +
48852 +void
48853 +StallThreadForNoDescs (EP3_RAIL *rail, E3_Addr rcvrElanAddr, E3_Addr sp)
48854 +{
48855 +    ELAN3_DEV      *dev       = rail->Device;
48856 +    sdramaddr_t    rcvrElan   = ep_elan2sdram (&rail->Generic, rcvrElanAddr);
48857 +    EP3_RCVR_RAIL  *rcvrRail  = (EP3_RCVR_RAIL *) (unsigned long) elan3_sdram_readq (dev, rcvrElan + offsetof (EP3_RCVR_RAIL_ELAN, MainAddr));
48858 +    EP_RCVR        *rcvr      = rcvrRail->Generic.Rcvr;
48859 +    EP_COMMS_RAIL  *commsRail = rcvrRail->Generic.CommsRail;
48860 +
48861 +    EPRINTF3 (DBG_RCVR, "%s: StallThreadForNoDescs - rcvrRail=%p sp=%x\n", commsRail->Rail->Name, rcvrRail, sp);
48862 +    
48863 +    IncrStat (commsRail, StallThread);
48864 +
48865 +    /* NOTE: spin lock not required as thread is trapped */
48866 +    
48867 +    if (rcvrRail->RcvrMain->PendingDescsTailp != 0)
48868 +    {
48869 +       EPRINTF1 (DBG_RCVR, "%s: StallThreadForNoDescs - pending descriptors, wakeup thread\n", commsRail->Rail->Name);
48870 +       
48871 +       /*
48872 +        * A receive buffer was queued after the thread had decided to go to 
48873 +        * sleep, but before the event interrupt occured.  Just restart the
48874 +        * thread to consume the envelope.
48875 +        */
48876 +       IssueRunThread (rail, sp);
48877 +    }
48878 +    else
48879 +    {
48880 +       EPRINTF1 (DBG_RCVR, "%s: StallThreadForNoDescs - set ThreadWaiting\n", commsRail->Rail->Name);
48881 +       
48882 +       IncrStat (commsRail, ThrdWaiting);
48883 +
48884 +       /* Mark the rcvr as waiting for a rxd, and schedule a call of ep_check_rcvr
48885 +        * to attempt to "steal" a descriptor from a different rail */
48886 +       rcvrRail->ThreadWaiting = sp;
48887 +
48888 +       ep_kthread_schedule (&rcvr->Subsys->Thread, lbolt);
48889 +    }
48890 +}
48891 +
48892 +void
48893 +StallThreadForHalted (EP3_RAIL *rail, E3_Addr rcvrElanAddr, E3_Addr sp)
48894 +{
48895 +    ELAN3_DEV     *dev       = rail->Device;
48896 +    sdramaddr_t    rcvrElan  = ep_elan2sdram (&rail->Generic, rcvrElanAddr);
48897 +    EP3_RCVR_RAIL *rcvrRail  = (EP3_RCVR_RAIL *) (unsigned long) elan3_sdram_readq (dev, rcvrElan + offsetof (EP3_RCVR_RAIL_ELAN, MainAddr));
48898 +    EP_RCVR       *rcvr      = rcvrRail->Generic.Rcvr;
48899 +    unsigned long  flags     = 0;
48900 +
48901 +    spin_lock_irqsave (&rcvr->Lock, flags);
48902 +
48903 +    rcvrRail->ThreadHalted = sp;
48904 +
48905 +    EPRINTF2 (DBG_EPTRAP, "%s: StallThreadForHalted: sp=%08x\n", rail->Generic.Name, sp);
48906 +
48907 +    if (rcvrRail->CleanupWaiting)
48908 +       kcondvar_wakeupone (&rcvrRail->CleanupSleep, &rcvr->Lock);
48909 +    rcvrRail->CleanupWaiting = 0;
48910 +
48911 +    spin_unlock_irqrestore (&rcvr->Lock, flags);
48912 +}
48913 +/*
48914 + * RxDataEvent: arg == EP3_RXD_RAIL
48915 + *   Called on completion of receiving data.
48916 + */
48917 +static void
48918 +RxDataEvent (EP3_RAIL *rail, void *arg)
48919 +{
48920 +    EP3_RXD_RAIL  *rxdRail  = (EP3_RXD_RAIL *) arg;
48921 +    EP3_RCVR_RAIL *rcvrRail = (EP3_RCVR_RAIL *) rxdRail->Generic.RcvrRail;
48922 +    EP_RXD        *rxd      = rxdRail->Generic.Rxd;
48923 +    EP_ENVELOPE   *env      = &rxd->RxdMain->Envelope;
48924 +    EP_RCVR       *rcvr     = rxd->Rcvr;
48925 +    ELAN3_DEV    *dev      = rail->Device;
48926 +    unsigned long flags;
48927 +    int delay = 1;
48928 +
48929 +    spin_lock_irqsave (&rcvr->Lock, flags);
48930 +    for (;;)
48931 +    {
48932 +       if (EP3_EVENT_FIRED (rxdRail->DataCookie, rxdRail->RxdMain->DataEvent))
48933 +           break;
48934 +
48935 +       if (EP3_EVENT_FIRING (dev, rxdRail->RxdElan + offsetof (EP3_RXD_RAIL_ELAN, DataEvent), rxdRail->DataCookie, rxdRail->RxdMain->DataEvent))
48936 +       {
48937 +           if (delay > EP3_EVENT_FIRING_TLIMIT)
48938 +               panic ("RxDataEvent: events set but block copy not completed\n");
48939 +           DELAY(delay);
48940 +           delay <<= 1;
48941 +       }
48942 +       else
48943 +       {
48944 +           printk ("%s: RxDataEvent: rxd %p not complete [%x,%x,%x]\n", rail->Generic.Name, rxd, rxdRail->RxdMain->DataEvent,
48945 +                   elan3_sdram_readl (dev, rxdRail->RxdElan + offsetof (EP3_RXD_RAIL_ELAN, DataEvent.ev_Count)),
48946 +                   elan3_sdram_readl (dev, rxdRail->RxdElan + offsetof (EP3_RXD_RAIL_ELAN, DataEvent.ev_Type)));
48947 +           
48948 +           spin_unlock_irqrestore (&rcvr->Lock, flags);
48949 +           return;
48950 +       }
48951 +       mb();
48952 +    }
48953 +
48954 +    /*
48955 +     * Note, since the thread will have sent the "get" dma before copying the
48956 +     * envelope, we must check that it has completed doing this,  if not then
48957 +     * it might be that the thread trapped due to a network error, so we must
48958 +     * spinlock against the thread 
48959 +     */
48960 +    if (rxd->RxdMain->Len == EP_RXD_PENDING)
48961 +    {
48962 +       LockRcvrThread (rcvrRail);
48963 +       UnlockRcvrThread (rcvrRail);
48964 +
48965 +       ASSERT (env->Version == EP_ENVELOPE_VERSION && rxd->RxdMain->Len != EP_RXD_PENDING);
48966 +    }
48967 +
48968 +    EPRINTF7 (DBG_RCVR, "%s: RxDataEvent: rxd=%p rxdRail=%p completed from elan node %d [XID=%llx] Length %d State %x\n", 
48969 +             rail->Generic.Name, rxd, rxdRail, env->NodeId, (long long) env->Xid.Unique, rxd->RxdMain->Len, rxd->State);
48970 +
48971 +    EP_ASSERT (&rail->Generic, rxd->State == EP_RXD_RECEIVE_ACTIVE || rxd->State == EP_RXD_PUT_ACTIVE || rxd->State == EP_RXD_GET_ACTIVE);
48972 +    EP_ASSERT (&rail->Generic, SDRAM_ASSERT (elan3_sdram_readl (dev, rxdRail->RxdElan + offsetof (EP3_RXD_RAIL_ELAN, DataEvent.ev_Count)) == 0));      /* PCI read */
48973 +    EP_ASSERT (&rail->Generic, rxdRail->RxdMain->DoneEvent == EP3_EVENT_PRIVATE);
48974 +
48975 +    rxdRail->RxdMain->DataEvent = EP3_EVENT_PRIVATE;
48976 +    rxd->Data.nmd_attr          = EP_RAIL2RAILMASK (rail->Generic.Number);
48977 +
48978 +    if (rxd->RxdMain->Len >= 0 && EP_IS_RPC(env->Attr))
48979 +       rxd->State = EP_RXD_RPC_IN_PROGRESS;
48980 +    else
48981 +    {
48982 +       rxd->State = EP_RXD_COMPLETED;
48983 +
48984 +       /* remove from active list */
48985 +       list_del (&rxd->Link);
48986 +
48987 +       UnbindRxdFromRail (rxd, rxdRail);
48988 +       FreeRxdRail (rcvrRail, rxdRail);
48989 +    }
48990 +
48991 +    spin_unlock_irqrestore (&rcvr->Lock, flags);
48992 +    ep_rxd_received (rxd);
48993 +
48994 +}
48995 +
48996 +/* 
48997 + * RxDataRetry: arg == EP3_RXD_RAIL
48998 + *   Called on retry of "get" dma of large transmit data
48999 + *   and rpc_get/rpc_put and "put" of datavec of rpc completion.
49000 + */
49001 +static void
49002 +RxDataRetry (EP3_RAIL *rail, void *arg, E3_DMA_BE *dma, int status)
49003 +{
49004 +    EP3_RXD_RAIL  *rxdRail  = (EP3_RXD_RAIL *) arg;
49005 +    EP_COMMS_RAIL *commsRail = rxdRail->Generic.RcvrRail->CommsRail;
49006 +    EP_RXD        *rxd       = rxdRail->Generic.Rxd;
49007 +
49008 +#if defined(DEBUG_ASSERT)
49009 +    RxDataVerify (rail, arg, dma);
49010 +#endif
49011 +
49012 +    IncrStat (commsRail, RxDataRetry);
49013 +
49014 +    EPRINTF4 (DBG_RCVR, "%s: RxDataRetry: rcvr %p rxd %p [XID=%llx]\n", rail->Generic.Name, rxd->Rcvr, rxd, (long long) rxd->RxdMain->Envelope.Xid.Unique);
49015 +
49016 +    QueueDmaForRetry (rail, dma, EP_RETRY_LOW_PRI_RETRY + ep_backoff (&rxdRail->Backoff, EP_BACKOFF_DATA));
49017 +}
49018 +
49019 +static void
49020 +RxDataVerify (EP3_RAIL *rail, void *arg, E3_DMA_BE *dma)
49021 +{
49022 +#if defined(DEBUG_ASSERT)
49023 +    EP3_RXD_RAIL   *rxdRail  = (EP3_RXD_RAIL *) arg;
49024 +    EP_RXD        *rxd       = rxdRail->Generic.Rxd;
49025 +
49026 +    if (dma->s.dma_direction == DMA_WRITE)
49027 +    {
49028 +       EP_ASSERT (&rail->Generic, 
49029 +                  (rxd->State == EP_RXD_RECEIVE_ACTIVE  && rxdRail->RxdMain->DataEvent == EP3_EVENT_ACTIVE && rxdRail->RxdMain->DoneEvent == EP3_EVENT_PRIVATE) ||
49030 +                  (rxd->State == EP_RXD_PUT_ACTIVE      && rxdRail->RxdMain->DataEvent == EP3_EVENT_ACTIVE && rxdRail->RxdMain->DoneEvent == EP3_EVENT_PRIVATE) ||
49031 +                  (rxd->State == EP_RXD_COMPLETE_ACTIVE && rxdRail->RxdMain->DataEvent == EP3_EVENT_PRIVATE && rxdRail->RxdMain->DoneEvent == EP3_EVENT_ACTIVE));
49032 +       EP_ASSERT (&rail->Generic, SDRAM_ASSERT (rxd->State == EP_RXD_COMPLETE_ACTIVE ?
49033 +                                                elan3_sdram_readl (rail->Device, rxdRail->RxdElan + offsetof (EP3_RXD_RAIL_ELAN, DoneEvent.ev_Count)) == 1:            /* PCI read */
49034 +                                                elan3_sdram_readl (rail->Device, rxdRail->RxdElan + offsetof (EP3_RXD_RAIL_ELAN, DataEvent.ev_Count)) == 1));          /* PCI read */
49035 +    }
49036 +    else
49037 +    {
49038 +       EP_ASSERT (&rail->Generic, dma->s.dma_direction == DMA_READ_REQUEUE);
49039 +       
49040 +#if defined(DEBUG_SDRAM_ASSERT)
49041 +       /* NOTE: not an assertion, since the "get" DMA can still be running if
49042 +        *       it's packet got a network error - and then the "put" from the
49043 +        *       far side has completed - however the virtual circuit should
49044 +        *       then be dropped by the far side and this DMA will be removed */
49045 +       if (EP_VP_TO_NODE(dma->s.dma_srcVProc) != ep_rxd_node(rxd) || 
49046 +           (rxd->State != EP_RXD_RECEIVE_ACTIVE && rxd->State != EP_RXD_GET_ACTIVE) ||
49047 +           rxdRail->RxdMain->DataEvent != EP3_EVENT_ACTIVE ||
49048 +           elan3_sdram_readl (rail->Device, rxdRail->RxdElan + offsetof (EP3_RXD_RAIL_ELAN, DataEvent.ev_Count)) != 1)
49049 +       {
49050 +           EPRINTF6 (DBG_RCVR, "%s: RxDataRetry: suspicious dma : VProc=%d NodeId=%d State=%d DataBlock=%x Event=%d\n",  
49051 +                     rail->Generic.Name, EP_VP_TO_NODE(dma->s.dma_srcVProc), ep_rxd_node(rxd), rxd->State, rxdRail->RxdMain->DataEvent, 
49052 +                     elan3_sdram_readl (rail->Device, rxdRail->RxdElan + offsetof (EP3_RXD_RAIL_ELAN, DataEvent.ev_Count)));
49053 +       }
49054 +#endif /* defined(DEBUG_SDRAM_ASSERT) */
49055 +    }
49056 +#endif /* DEBUG_ASSERT */
49057 +}
49058 +
49059 +/*
49060 + * RxDoneEvent: arg == EP_RXD
49061 + *   Called on completion of large receive.
49062 + */
49063 +static void
49064 +RxDoneEvent (EP3_RAIL *rail, void *arg)
49065 +{
49066 +    EP3_RXD_RAIL  *rxdRail   = (EP3_RXD_RAIL *) arg;
49067 +    EP3_RCVR_RAIL *rcvrRail  = (EP3_RCVR_RAIL *) rxdRail->Generic.RcvrRail;
49068 +    EP_COMMS_RAIL *commsRail = rcvrRail->Generic.CommsRail;
49069 +    EP_RXD        *rxd       = rxdRail->Generic.Rxd;
49070 +    EP_RCVR       *rcvr      = rxd->Rcvr;
49071 +    ELAN3_DEV    *dev       = rail->Device;
49072 +    int            delay     = 1;
49073 +    unsigned long  flags;
49074 +
49075 +    spin_lock_irqsave (&rcvr->Lock, flags);
49076 +    for (;;)
49077 +    {
49078 +       if (EP3_EVENT_FIRED (rxdRail->DoneCookie, rxdRail->RxdMain->DoneEvent))
49079 +           break;
49080 +       
49081 +       if (EP3_EVENT_FIRING (dev, rxdRail->RxdElan + offsetof (EP3_RXD_RAIL_ELAN, DoneEvent), rxdRail->DoneCookie, rxdRail->RxdMain->DoneEvent))
49082 +       {
49083 +           if (delay > EP3_EVENT_FIRING_TLIMIT)
49084 +               panic ("RxDoneEvent: events set but block copy not completed\n");
49085 +           DELAY(delay);
49086 +           delay <<= 1;
49087 +       }
49088 +       else
49089 +       {
49090 +           printk ("RxDoneEvent: rxd %p not complete [%x,%x.%x]\n", rxd, rxdRail->RxdMain->DoneEvent,
49091 +                   elan3_sdram_readl (dev, rxdRail->RxdElan + offsetof (EP3_RXD_RAIL_ELAN, DoneEvent.ev_Count)),
49092 +                   elan3_sdram_readl (dev, rxdRail->RxdElan + offsetof (EP3_RXD_RAIL_ELAN, DoneEvent.ev_Type)));
49093 +           
49094 +           spin_unlock_irqrestore (&rcvr->Lock, flags);
49095 +           return;
49096 +       }
49097 +       mb();
49098 +    }
49099 +
49100 +    EPRINTF4 (DBG_RCVR, "%s: RxDoneEvent: rxd %p completed from elan node %d [XID=%llx]\n", 
49101 +             commsRail->Rail->Name, rxd, rxd->RxdMain->Envelope.NodeId, (long long) rxd->RxdMain->Envelope.Xid.Unique);
49102 +    
49103 +    IncrStat (commsRail, RxDoneEvent);
49104 +
49105 +    EP_ASSERT (&rail->Generic, rxdRail->RxdMain->DataEvent  == EP3_EVENT_PRIVATE);
49106 +    EP_ASSERT (&rail->Generic, EP3_EVENT_FIRED (rxdRail->DoneCookie, rxdRail->RxdMain->DoneEvent));
49107 +    EP_ASSERT (&rail->Generic, SDRAM_ASSERT (elan3_sdram_readl (dev, rxdRail->RxdElan + offsetof (EP3_RXD_RAIL_ELAN, DataEvent.ev_Count)) == 0));      /* PCI read */
49108 +    EP_ASSERT (&rail->Generic, SDRAM_ASSERT (elan3_sdram_readl (dev, rxdRail->RxdElan + offsetof (EP3_RXD_RAIL_ELAN, DoneEvent.ev_Count)) == 0));      /* PCI read */
49109 +
49110 +    /* mark rxd as private  */
49111 +    rxdRail->RxdMain->DoneEvent = EP3_EVENT_PRIVATE;
49112 +
49113 +    /* remove from active list */
49114 +    list_del (&rxd->Link);
49115 +
49116 +    UnbindRxdFromRail (rxd, rxdRail);
49117 +    FreeRxdRail (rcvrRail, rxdRail);
49118 +       
49119 +    spin_unlock_irqrestore (&rcvr->Lock, flags);
49120 +
49121 +    rxd->Handler (rxd);
49122 +}
49123 +
49124 +/* 
49125 + * RxDoneRetry: arg == EP_RXD
49126 + *   Called on retry of "put" of RPC completion status block
49127 + */
49128 +static void
49129 +RxDoneRetry (EP3_RAIL *rail, void *arg, E3_DMA_BE *dma, int status)
49130 +{
49131 +    EP3_RXD_RAIL  *rxdRail   = (EP3_RXD_RAIL *) arg;
49132 +    EP_COMMS_RAIL *commsRail = rxdRail->Generic.RcvrRail->CommsRail;
49133 +    EP_RXD        *rxd       = rxdRail->Generic.Rxd;
49134 +
49135 +#if defined(DEBUG_ASSERT)
49136 +    RxDoneVerify (rail, arg, dma);
49137 +#endif
49138 +
49139 +    IncrStat (commsRail, RxDoneRetry);
49140 +
49141 +    EPRINTF4 (DBG_RCVR, "%s: RxDoneRetry: rcvr %p rxd %p [XID=%llx]\n", commsRail->Rail->Name, rxd->Rcvr, rxd, (long long) rxd->RxdMain->Envelope.Xid.Unique);
49142 +
49143 +    QueueDmaForRetry (rail, dma, EP_RETRY_LOW_PRI_RETRY + ep_backoff (&rxdRail->Backoff, EP_BACKOFF_DONE));
49144 +}
49145 +
49146 +static void
49147 +RxDoneVerify (EP3_RAIL *rail, void *arg, E3_DMA_BE *dma)
49148 +{
49149 +#if defined(DEBUG_ASSERT)
49150 +    EP3_RXD_RAIL *rxdRail = (EP3_RXD_RAIL *) arg;
49151 +    EP_RXD       *rxd     = rxdRail->Generic.Rxd;
49152 +
49153 +    EP_ASSERT (&rail->Generic, dma->s.dma_direction == DMA_WRITE && EP_VP_TO_NODE(dma->s.dma_destVProc) == ep_rxd_node(rxd));
49154 +    EP_ASSERT (&rail->Generic, rxd->State == EP_RXD_COMPLETE_ACTIVE && rxdRail->RxdMain->DoneEvent  == EP3_EVENT_ACTIVE);
49155 +    EP_ASSERT (&rail->Generic, SDRAM_ASSERT (elan3_sdram_readl (rail->Device, rxdRail->RxdElan + offsetof (EP3_RXD_RAIL_ELAN, DoneEvent.ev_Count)) == 1));     /* PCI read */
49156 +#endif /* defined(DEBUG_ASSERT) */
49157 +}
49158 +
49159 +int
49160 +ep3rcvr_queue_rxd (EP_RXD *rxd, EP_RCVR_RAIL *r)
49161 +{
49162 +    EP3_RCVR_RAIL *rcvrRail = (EP3_RCVR_RAIL *) r;
49163 +    EP3_RAIL      *rail     = RCVR_TO_RAIL(rcvrRail);
49164 +    ELAN3_DEV     *dev      = rail->Device;
49165 +    EP3_RXD_RAIL  *rxdRail;
49166 +
49167 +    ASSERT ( SPINLOCK_HELD(&rxd->Rcvr->Lock));
49168 +
49169 +    if ((rxdRail = GetRxdRail (rcvrRail)) == NULL)
49170 +       return 0;
49171 +
49172 +    /* Flush the Elan TLB if mappings have changed */
49173 +    ep_perrail_dvma_sync (&rail->Generic);
49174 +
49175 +    elan3_sdram_writel (dev, rxdRail->RxdElan + offsetof (EP3_RXD_RAIL_ELAN, Data.nmd_addr), rxd->Data.nmd_addr);              /* PCI write */
49176 +    elan3_sdram_writel (dev, rxdRail->RxdElan + offsetof (EP3_RXD_RAIL_ELAN, Data.nmd_len),  rxd->Data.nmd_len);               /* PCI write */
49177 +    elan3_sdram_writel (dev, rxdRail->RxdElan + offsetof (EP3_RXD_RAIL_ELAN, Data.nmd_attr), rxd->Data.nmd_attr);              /* PCI write */
49178 +
49179 +    /* Bind the rxdRail and rxd together */
49180 +    BindRxdToRail (rxd, rxdRail);
49181 +    
49182 +    /* Mark as active */
49183 +    elan3_sdram_writel (dev, rxdRail->RxdElan + offsetof (EP3_RXD_RAIL_ELAN, DataEvent.ev_Count), 1);
49184 +
49185 +    rxdRail->RxdMain->DataEvent  = EP3_EVENT_ACTIVE;
49186 +    rxdRail->RxdMain->DoneEvent  = EP3_EVENT_PRIVATE;
49187 +
49188 +    /* Interlock with StallThreadForNoDescs */
49189 +    spin_lock (&dev->IntrLock);
49190 +
49191 +    EPRINTF4 (DBG_RCVR, "%s: ep3rcvr_queue_rxd: rcvr %p rxd %p rxdRail %p\n", rail->Generic.Name, rxd->Rcvr, rxd, rxdRail);
49192 +
49193 +    EP3_SPINENTER (dev, rcvrRail->RcvrElan + offsetof (EP3_RCVR_RAIL_ELAN, PendingLock), &rcvrRail->RcvrMain->PendingLock);
49194 +
49195 +    elan3_sdram_writel (dev, rxdRail->RxdElan + offsetof (EP3_RXD_RAIL_ELAN, Next), 0);                                        /* PCI write */
49196 +    if (rcvrRail->RcvrMain->PendingDescsTailp == 0)
49197 +       elan3_sdram_writel (dev, rcvrRail->RcvrElan + offsetof (EP3_RCVR_RAIL_ELAN, PendingDescs), rxdRail->RxdElanAddr);       /* PCI write */
49198 +    else
49199 +       elan3_sdram_writel (dev, rcvrRail->RcvrMain->PendingDescsTailp, rxdRail->RxdElanAddr);                          /* PCI write */
49200 +    rcvrRail->RcvrMain->PendingDescsTailp = rxdRail->RxdElan + offsetof (EP3_RXD_RAIL_ELAN, Next);
49201 +    
49202 +    EP3_SPINEXIT (dev, rcvrRail->RcvrElan + offsetof (EP3_RCVR_RAIL_ELAN, PendingLock), &rcvrRail->RcvrMain->PendingLock);
49203 +
49204 +    /* If the thread has paused because it was woken up with no receive buffer */
49205 +    /* ready, then wake it up to process the one we've just added */
49206 +    if (rcvrRail->ThreadWaiting)
49207 +    {
49208 +       EPRINTF1 (DBG_RCVR, "%s: DoReceive: ThreadWaiting - restart thread\n", rail->Generic.Name);
49209 +
49210 +       IssueRunThread (rail, rcvrRail->ThreadWaiting);
49211 +
49212 +       rcvrRail->ThreadWaiting = (E3_Addr) 0;
49213 +    }
49214 +
49215 +    spin_unlock (&dev->IntrLock);
49216 +
49217 +    return 1;
49218 +}
49219 +
49220 +void
49221 +ep3rcvr_rpc_put (EP_RXD *rxd, EP_NMD *local, EP_NMD *remote, unsigned nFrags)
49222 +{
49223 +    EP3_RXD_RAIL      *rxdRail   = (EP3_RXD_RAIL *) rxd->RxdRail;
49224 +    EP3_RCVR_RAIL     *rcvrRail  = (EP3_RCVR_RAIL *) rxdRail->Generic.RcvrRail;
49225 +    EP3_RAIL          *rail      = RCVR_TO_RAIL (rcvrRail);
49226 +    ELAN3_DEV         *dev       = rail->Device;
49227 +
49228 +    EP3_RXD_RAIL_MAIN *rxdMain   = rxdRail->RxdMain;
49229 +    sdramaddr_t        rxdElan   = rxdRail->RxdElan;
49230 +    EP_ENVELOPE       *env       = &rxd->RxdMain->Envelope;
49231 +    E3_DMA_BE         dmabe;
49232 +    int                       i, len;
49233 +
49234 +    EP_ASSERT (&rail->Generic, rxd->State == EP_RXD_PUT_ACTIVE);
49235 +    EP_ASSERT (&rail->Generic, rxdMain->DataEvent == EP3_EVENT_PRIVATE && rxdMain->DoneEvent == EP3_EVENT_PRIVATE);
49236 +    EP_ASSERT (&rail->Generic, SDRAM_ASSERT (elan3_sdram_readl (dev, rxdElan + offsetof (EP3_RXD_RAIL_ELAN, DataEvent.ev_Count)) == 0));       /* PCI read */
49237 +    EP_ASSERT (&rail->Generic, SDRAM_ASSERT (elan3_sdram_readl (dev, rxdElan + offsetof (EP3_RXD_RAIL_ELAN, DoneEvent.ev_Count)) == 0));       /* PCI read */
49238 +
49239 +    /* Flush the Elan TLB if mappings have changed */
49240 +    ep_perrail_dvma_sync (&rail->Generic);
49241 +    
49242 +    /* Generate the DMA chain to put the data in two loops to burst
49243 +     * the data across the PCI bus */
49244 +    for (len = 0, i = (nFrags-1), local += (nFrags-1), remote += (nFrags-1); i >= 0;   len += local->nmd_len, i--, local--, remote--)
49245 +    {
49246 +       dmabe.s.dma_type            = E3_DMA_TYPE(DMA_BYTE, DMA_WRITE, DMA_NORMAL, EP3_DMAFAILCOUNT);
49247 +       dmabe.s.dma_size            = local->nmd_len;
49248 +       dmabe.s.dma_source          = local->nmd_addr;
49249 +       dmabe.s.dma_dest            = remote->nmd_addr;
49250 +       dmabe.s.dma_destEvent       = (E3_Addr) 0;
49251 +       dmabe.s.dma_destCookieVProc = EP_VP_DATA (env->NodeId);
49252 +       if (i == (nFrags-1))
49253 +           dmabe.s.dma_srcEvent    = rxdRail->RxdElanAddr + offsetof (EP3_RXD_RAIL_ELAN, DataEvent);
49254 +       else
49255 +           dmabe.s.dma_srcEvent    = rxdRail->RxdElanAddr + offsetof (EP3_RXD_RAIL_ELAN, ChainEvent[i]);
49256 +       dmabe.s.dma_srcCookieVProc  = LocalCookie (rail, env->NodeId);
49257 +       
49258 +       EPRINTF9 (DBG_RCVR, "%s: ep3rcvr_rpc_put: rxd %p [XID=%llx] idx=%d Source=%08x Dest=%08x Len=%x Cookies=%x.%x\n", rail->Generic.Name, rxd,
49259 +                 (long long) env->Xid.Unique, i, local->nmd_addr, remote->nmd_addr, local->nmd_len, dmabe.s.dma_destCookieVProc, dmabe.s.dma_srcCookieVProc);
49260 +       
49261 +       if (i != 0)
49262 +           elan3_sdram_copyq_to_sdram (dev, &dmabe, rxdElan + offsetof (EP3_RXD_RAIL_ELAN, Dmas[i]), sizeof (E3_DMA)); /* PCI write block */
49263 +    }
49264 +    
49265 +    for (i = 0; i < nFrags; i++)
49266 +       elan3_sdram_writel (dev, rxdElan + offsetof (EP3_RXD_RAIL_ELAN, ChainEvent[i].ev_Count), 1);                            /* PCI write */
49267 +    
49268 +    /* Initialise the data event */
49269 +    elan3_sdram_writel (dev, rxdElan + offsetof (EP3_RXD_RAIL_ELAN, DataEvent.ev_Count), 1);                   /* PCI write */
49270 +    rxdMain->DataEvent = EP3_EVENT_ACTIVE;
49271 +   
49272 +    ASSERT (rail->Generic.Nodes[env->NodeId].State >= EP_NODE_CONNECTED && rail->Generic.Nodes[env->NodeId].State <= EP_NODE_LOCAL_PASSIVATE);
49273 +
49274 +    if (IssueDma (rail, &dmabe, EP_RETRY_LOW_PRI, FALSE) != ISSUE_COMMAND_OK)
49275 +    {
49276 +       /* Failed to issue the dma command, so copy the dma descriptor and queue it for retry */
49277 +       EPRINTF2 (DBG_RCVR, "%s: ep3rcvr_rpc_put: queue rxd %p on retry thread\n", rail->Generic.Name, rxd);
49278 +       
49279 +       QueueDmaForRetry (rail, &dmabe, EP_RETRY_LOW_PRI);
49280 +    }
49281 +    
49282 +    BucketStat (rxd->Rcvr->Subsys, RPCPut, len);
49283 +}
49284 +
49285 +void
49286 +ep3rcvr_rpc_get (EP_RXD *rxd, EP_NMD *local, EP_NMD *remote, unsigned nFrags)
49287 +{
49288 +    EP3_RXD_RAIL      *rxdRail   = (EP3_RXD_RAIL *) rxd->RxdRail;
49289 +    EP3_RCVR_RAIL     *rcvrRail  = (EP3_RCVR_RAIL *) rxdRail->Generic.RcvrRail;
49290 +    EP3_RAIL          *rail      = RCVR_TO_RAIL (rcvrRail);
49291 +    ELAN3_DEV         *dev       = rail->Device;
49292 +
49293 +    EP3_RXD_RAIL_MAIN *rxdMain   = rxdRail->RxdMain;
49294 +    sdramaddr_t        rxdElan   = rxdRail->RxdElan;
49295 +    EP_ENVELOPE       *env       = &rxd->RxdMain->Envelope;
49296 +    E3_DMA_BE         dmabe;
49297 +    int                       i, len;
49298 +
49299 +    EP_ASSERT (&rail->Generic, rxd->State == EP_RXD_GET_ACTIVE);
49300 +    EP_ASSERT (&rail->Generic, rxdMain->DataEvent == EP3_EVENT_PRIVATE && rxdMain->DoneEvent == EP3_EVENT_PRIVATE);
49301 +    EP_ASSERT (&rail->Generic, SDRAM_ASSERT (elan3_sdram_readl (dev, rxdElan + offsetof (EP3_RXD_RAIL_ELAN, DataEvent.ev_Count)) == 0));       /* PCI read */
49302 +    EP_ASSERT (&rail->Generic, SDRAM_ASSERT (elan3_sdram_readl (dev, rxdElan + offsetof (EP3_RXD_RAIL_ELAN, DoneEvent.ev_Count)) == 0));       /* PCI read */
49303 +       
49304 +    /* Flush the Elan TLB if mappings have changed */
49305 +    ep_perrail_dvma_sync (&rail->Generic);
49306 +    
49307 +    /* Generate the DMA chain to get the data in two loops to burst
49308 +     * the data across the PCI bus */
49309 +    for (len = 0, i = (nFrags-1), remote += (nFrags-1), local += (nFrags-1); i >= 0;   len += remote->nmd_len, i--, remote--, local--)
49310 +    {
49311 +       dmabe.s.dma_type            = E3_DMA_TYPE(DMA_BYTE, DMA_READ, DMA_NORMAL, EP3_DMAFAILCOUNT);
49312 +       dmabe.s.dma_size            = remote->nmd_len;
49313 +       dmabe.s.dma_source          = remote->nmd_addr;
49314 +       dmabe.s.dma_dest            = local->nmd_addr;
49315 +       if (i == (nFrags-1))
49316 +           dmabe.s.dma_destEvent   = rxdRail->RxdElanAddr + offsetof (EP3_RXD_RAIL_ELAN, DataEvent);
49317 +       else
49318 +           dmabe.s.dma_destEvent   = rxdRail->RxdElanAddr + offsetof (EP3_RXD_RAIL_ELAN, ChainEvent[i]);
49319 +       dmabe.s.dma_destCookieVProc = LocalCookie (rail, env->NodeId);
49320 +       dmabe.s.dma_srcEvent        = (E3_Addr) 0;
49321 +       dmabe.s.dma_srcCookieVProc  = RemoteCookie (rail, env->NodeId);
49322 +       
49323 +       EPRINTF9 (DBG_RCVR, "%s: ep3rcvr_rpc_get rxd %p [XID=%llx] idx=%d Source=%08x Dest=%08x Len=%x Cookies=%x.%x\n", rail->Generic.Name, rxd,
49324 +                 (long long) env->Xid.Unique, i, remote->nmd_addr, local->nmd_addr, remote->nmd_len, dmabe.s.dma_destCookieVProc, 
49325 +                 dmabe.s.dma_srcCookieVProc);
49326 +       
49327 +       /* 
49328 +        * Always copy down the dma descriptor, since we issue it as a READ_REQUEUE
49329 +        * dma, and the elan will fetch the descriptor to send out of the link from
49330 +        * the rxdElan->Dmas[i] location,  before issueing the DMA chain we modify
49331 +        * the dma_source.
49332 +        */
49333 +       elan3_sdram_copyq_to_sdram (dev, &dmabe, rxdElan + offsetof (EP3_RXD_RAIL_ELAN, Dmas[i]), sizeof (E3_DMA)); /* PCI write block */
49334 +    }
49335 +    
49336 +    for (i = 0; i < nFrags; i++)
49337 +       elan3_sdram_writel (dev, rxdElan + offsetof (EP3_RXD_RAIL_ELAN, ChainEvent[i].ev_Count), 1);                            /* PCI write */
49338 +    
49339 +    /* Initialise the data event */
49340 +    elan3_sdram_writel (dev, rxdElan + offsetof (EP3_RXD_RAIL_ELAN, DataEvent.ev_Count), 1);                   /* PCI write */
49341 +    rxdMain->DataEvent  = EP3_EVENT_ACTIVE;
49342 +    
49343 +    ASSERT (rail->Generic.Nodes[env->NodeId].State >= EP_NODE_CONNECTED && rail->Generic.Nodes[env->NodeId].State <= EP_NODE_LOCAL_PASSIVATE);
49344 +
49345 +    /* we MUST convert this into a DMA_READ_REQUEUE dma as if we don't the DMA descriptor will
49346 +     * be read from the EP_RETRY_DMA rather than the orignal DMA - this can then get reused 
49347 +     * and an incorrect DMA descriptor sent */
49348 +    dmabe.s.dma_source    = rxdRail->RxdElanAddr + offsetof (EP3_RXD_RAIL_ELAN, Dmas[0]);
49349 +    dmabe.s.dma_direction = (dmabe.s.dma_direction & ~DMA_READ) | DMA_READ_REQUEUE;
49350 +    
49351 +    if (IssueDma (rail, &dmabe, EP_RETRY_LOW_PRI, FALSE) != ISSUE_COMMAND_OK)
49352 +    {
49353 +       /* Failed to issue the dma command, so copy the dma descriptor and queue it for retry */
49354 +       EPRINTF2 (DBG_RCVR, "%s: ep3rcvr_rpc_get: queue rxd %p on retry thread\n", rail->Generic.Name, rxd);
49355 +       
49356 +       QueueDmaForRetry (rail, &dmabe, EP_RETRY_LOW_PRI);
49357 +    }
49358 +
49359 +    BucketStat (rxd->Rcvr->Subsys, RPCGet, len);
49360 +}
49361 +       
49362 +void
49363 +ep3rcvr_rpc_complete (EP_RXD *rxd, EP_NMD *local, EP_NMD *remote, unsigned nFrags)
49364 +{
49365 +    EP3_RXD_RAIL      *rxdRail   = (EP3_RXD_RAIL *) rxd->RxdRail;
49366 +    EP3_RCVR_RAIL     *rcvrRail  = (EP3_RCVR_RAIL *) rxdRail->Generic.RcvrRail;
49367 +    EP3_RAIL          *rail      = RCVR_TO_RAIL (rcvrRail);
49368 +    ELAN3_DEV         *dev       = rail->Device;
49369 +
49370 +    EP3_RXD_RAIL_MAIN *rxdMain   = rxdRail->RxdMain;
49371 +    sdramaddr_t        rxdElan   = rxdRail->RxdElan;
49372 +    EP_ENVELOPE       *env       = &rxd->RxdMain->Envelope;
49373 +    E3_DMA_BE         dmabe;
49374 +    int                       i, len;
49375 +    
49376 +    EP_ASSERT (&rail->Generic, rxd->State == EP_RXD_COMPLETE_ACTIVE);
49377 +    EP_ASSERT (&rail->Generic, rxdMain->DataEvent == EP3_EVENT_PRIVATE && rxdMain->DoneEvent == EP3_EVENT_PRIVATE);
49378 +    EP_ASSERT (&rail->Generic, SDRAM_ASSERT (elan3_sdram_readl (dev, rxdElan + offsetof (EP3_RXD_RAIL_ELAN, DataEvent.ev_Count)) == 0));       /* PCI read */
49379 +    EP_ASSERT (&rail->Generic, SDRAM_ASSERT (elan3_sdram_readl (dev, rxdElan + offsetof (EP3_RXD_RAIL_ELAN, DoneEvent.ev_Count)) == 0));       /* PCI read */
49380 +
49381 +    /* Flush the Elan TLB if mappings have changed */
49382 +    ep_perrail_dvma_sync (&rail->Generic);
49383 +    
49384 +    /* Initialise the status block dma */
49385 +    dmabe.s.dma_type            = E3_DMA_TYPE(DMA_BYTE, DMA_WRITE, DMA_NORMAL, EP3_DMAFAILCOUNT);
49386 +    dmabe.s.dma_size            = sizeof (EP_STATUSBLK);
49387 +    dmabe.s.dma_source          = rxd->NmdMain.nmd_addr + offsetof (EP_RXD_MAIN, StatusBlk);
49388 +    dmabe.s.dma_dest            = env->TxdMain.nmd_addr + offsetof (EP_TXD_MAIN, StatusBlk);
49389 +    dmabe.s.dma_destEvent       = env->TxdRail + offsetof (EP3_TXD_RAIL_ELAN, DoneEvent);
49390 +    dmabe.s.dma_destCookieVProc = EP_VP_DATA(env->NodeId);
49391 +    dmabe.s.dma_srcEvent        = rxdRail->RxdElanAddr + offsetof (EP3_RXD_RAIL_ELAN, DoneEvent);
49392 +    dmabe.s.dma_srcCookieVProc  = LocalCookie (rail, env->NodeId);
49393 +    
49394 +    EPRINTF8 (DBG_RCVR, "%s: ep3rcvr_rpc_complete: rxd %p [XID=%llx] statusblk source=%08x dest=%08x len=%x Cookies=%x.%x\n", rail->Generic.Name, rxd,
49395 +             (long long) env->Xid.Unique, dmabe.s.dma_source, dmabe.s.dma_dest, dmabe.s.dma_size, dmabe.s.dma_destCookieVProc, 
49396 +             dmabe.s.dma_srcCookieVProc);
49397 +
49398 +    for (len = 0, i = EP_MAXFRAG, remote += (nFrags-1), local += (nFrags-1); i > EP_MAXFRAG-nFrags; len += local->nmd_len, i--, local--, remote--)
49399 +    {
49400 +       /* copy down previous dma */
49401 +       elan3_sdram_copyq_to_sdram (dev, &dmabe, rxdElan + offsetof (EP3_RXD_RAIL_ELAN, Dmas[i]),  sizeof (E3_DMA));    /* PCI write block */
49402 +       
49403 +       dmabe.s.dma_type            = E3_DMA_TYPE(DMA_BYTE, DMA_WRITE, DMA_NORMAL, EP3_DMAFAILCOUNT);
49404 +       dmabe.s.dma_size            = local->nmd_len;
49405 +       dmabe.s.dma_source          = local->nmd_addr;
49406 +       dmabe.s.dma_dest            = remote->nmd_addr;
49407 +       dmabe.s.dma_destEvent       = (E3_Addr) 0;
49408 +       dmabe.s.dma_destCookieVProc = EP_VP_DATA (env->NodeId);
49409 +       dmabe.s.dma_srcEvent        = rxdRail->RxdElanAddr + offsetof (EP3_RXD_RAIL_ELAN, ChainEvent[i-1]);
49410 +       dmabe.s.dma_srcCookieVProc  = LocalCookie (rail, env->NodeId);
49411 +       
49412 +       EPRINTF9 (DBG_RCVR, "%s: ep3rcvr_rpc_complete: rxd %p [XID=%llx] idx=%d Source=%08x Dest=%08x Len=%x Cookies=%x.%x\n", rail->Generic.Name, rxd,
49413 +                 (long long) env->Xid.Unique, i, local->nmd_addr, remote->nmd_addr, local->nmd_len, dmabe.s.dma_destCookieVProc, 
49414 +                 dmabe.s.dma_srcCookieVProc);
49415 +    }
49416 +    
49417 +    for (i = EP_MAXFRAG-nFrags; i < EP_MAXFRAG; i++)
49418 +       elan3_sdram_writel (dev, rxdElan + offsetof (EP3_RXD_RAIL_ELAN, ChainEvent[i].ev_Count), 1);                            /* PCI write */
49419 +    
49420 +    /* Initialise the done event */
49421 +    elan3_sdram_writel (dev, rxdElan + offsetof (EP3_RXD_RAIL_ELAN, DoneEvent.ev_Count), 1);                                   /* PCI write */
49422 +    rxdMain->DoneEvent  = EP3_EVENT_ACTIVE;
49423 +
49424 +    ASSERT (rail->Generic.Nodes[env->NodeId].State >= EP_NODE_CONNECTED && rail->Generic.Nodes[env->NodeId].State <= EP_NODE_LOCAL_PASSIVATE);
49425 +
49426 +    if (IssueDma (rail, &dmabe, EP_RETRY_LOW_PRI, FALSE) != ISSUE_COMMAND_OK)
49427 +    {
49428 +       /* Failed to issue the dma command, so copy the dma descriptor and queue it for retry */
49429 +       EPRINTF2 (DBG_RCVR, "%s: ep3rcvr_rpc_complete: queue rxd %p on retry thread\n", rail->Generic.Name, rxd);
49430 +       
49431 +       QueueDmaForRetry (rail, &dmabe, EP_RETRY_LOW_PRI);
49432 +    }
49433 +
49434 +    BucketStat (rxd->Rcvr->Subsys, CompleteRPC, len);
49435 +}
49436 +       
49437 +void
49438 +ep3rcvr_add_rail (EP_RCVR *rcvr, EP_COMMS_RAIL *commsRail)
49439 +{
49440 +    EP3_RAIL          *rail   = (EP3_RAIL *) commsRail->Rail;
49441 +    sdramaddr_t        qdescs = ((EP3_COMMS_RAIL *) commsRail)->QueueDescs;
49442 +    EP3_RCVR_RAIL     *rcvrRail;
49443 +    EP3_InputQueue     qdesc;
49444 +    sdramaddr_t        stack;
49445 +    unsigned long      flags;
49446 +
49447 +    KMEM_ZALLOC (rcvrRail, EP3_RCVR_RAIL *, sizeof (EP3_RCVR_RAIL), TRUE);
49448 +
49449 +    kcondvar_init (&rcvrRail->CleanupSleep);
49450 +    spin_lock_init (&rcvrRail->FreeDescLock);
49451 +    INIT_LIST_HEAD (&rcvrRail->FreeDescList);
49452 +    INIT_LIST_HEAD (&rcvrRail->DescBlockList);
49453 +
49454 +    rcvrRail->Generic.CommsRail = commsRail;
49455 +    rcvrRail->Generic.Rcvr      = rcvr;
49456 +
49457 +    rcvrRail->RcvrMain       = ep_alloc_main (&rail->Generic, sizeof (EP3_RCVR_RAIL_MAIN), 0, &rcvrRail->RcvrMainAddr);
49458 +    rcvrRail->RcvrElan       = ep_alloc_elan (&rail->Generic, sizeof (EP3_RCVR_RAIL_ELAN), 0, &rcvrRail->RcvrElanAddr);
49459 +    rcvrRail->InputQueueBase = ep_alloc_elan (&rail->Generic, EP_INPUTQ_SIZE * rcvr->InputQueueEntries, 0, &rcvrRail->InputQueueAddr);
49460 +    stack                    = ep_alloc_elan (&rail->Generic, EP3_STACK_SIZE, 0, &rcvrRail->ThreadStack);
49461 +
49462 +    rcvrRail->TotalDescCount = 0;
49463 +    rcvrRail->FreeDescCount  = 0;
49464 +
49465 +    /* Initialise the main/elan spin lock */
49466 +    elan3_sdram_writel (rail->Device, rcvrRail->RcvrElan + offsetof (EP3_RCVR_RAIL_ELAN, ThreadLock.sl_lock), 0);
49467 +    elan3_sdram_writel (rail->Device, rcvrRail->RcvrElan + offsetof (EP3_RCVR_RAIL_ELAN, ThreadLock.sl_seq),  0);
49468 +
49469 +    elan3_sdram_writel (rail->Device, rcvrRail->RcvrElan + offsetof (EP3_RCVR_RAIL_ELAN, PendingLock.sl_lock), 0);
49470 +    elan3_sdram_writel (rail->Device, rcvrRail->RcvrElan + offsetof (EP3_RCVR_RAIL_ELAN, PendingLock.sl_seq), 0);
49471 +    
49472 +    /* Initialise the receive lists */
49473 +    elan3_sdram_writel (rail->Device, rcvrRail->RcvrElan + offsetof (EP3_RCVR_RAIL_ELAN, PendingDescs), 0);
49474 +    
49475 +    /* Initialise the ThreadShould Halt */
49476 +    elan3_sdram_writel (rail->Device, rcvrRail->RcvrElan + offsetof (EP3_RCVR_RAIL_ELAN, ThreadShouldHalt), 0);
49477 +
49478 +    /* Initialise pointer to the ep_rcvr_rail */
49479 +    elan3_sdram_writeq (rail->Device, rcvrRail->RcvrElan + offsetof (EP3_RCVR_RAIL_ELAN, MainAddr), (unsigned long) rcvrRail);
49480 +
49481 +    /* Initialise elan visible main memory */
49482 +    rcvrRail->RcvrMain->ThreadLock.sl_seq  = 0;
49483 +    rcvrRail->RcvrMain->PendingLock.sl_seq = 0;
49484 +    rcvrRail->RcvrMain->PendingDescsTailp  = 0;
49485 +
49486 +    /* initialise and copy down the input queue descriptor */
49487 +    qdesc.q_state          = E3_QUEUE_FULL;
49488 +    qdesc.q_base           = rcvrRail->InputQueueAddr;
49489 +    qdesc.q_top            = rcvrRail->InputQueueAddr + (rcvr->InputQueueEntries-1) * EP_INPUTQ_SIZE;
49490 +    qdesc.q_fptr           = rcvrRail->InputQueueAddr;
49491 +    qdesc.q_bptr           = rcvrRail->InputQueueAddr + EP_INPUTQ_SIZE;
49492 +    qdesc.q_size           = EP_INPUTQ_SIZE;
49493 +    qdesc.q_event.ev_Count = 0;
49494 +    qdesc.q_event.ev_Type  = 0;
49495 +
49496 +    elan3_sdram_copyl_to_sdram (rail->Device, &qdesc, qdescs + rcvr->Service * sizeof (EP3_InputQueue), sizeof (EP3_InputQueue));
49497 +
49498 +    spin_lock_irqsave (&rcvr->Lock, flags);
49499 +    rcvr->Rails[rail->Generic.Number] = &rcvrRail->Generic;
49500 +    rcvr->RailMask |= EP_RAIL2RAILMASK (rail->Generic.Number);
49501 +    spin_unlock_irqrestore (&rcvr->Lock, flags);
49502 +
49503 +    /* initialise and run the Elan thread to process the queue */
49504 +    IssueRunThread (rail, ep3_init_thread (rail->Device, ep_symbol (&rail->ThreadCode, "ep3comms_rcvr"),
49505 +                                          rcvrRail->ThreadStack, stack, EP3_STACK_SIZE, 5,
49506 +                                          rail->RailElanAddr, rcvrRail->RcvrElanAddr, rcvrRail->RcvrMainAddr,
49507 +                                          EP_MSGQ_ADDR(rcvr->Service),
49508 +                                          rail->ElanCookies));
49509 +}
49510 +
49511 +void
49512 +ep3rcvr_del_rail (EP_RCVR *rcvr, EP_COMMS_RAIL *commsRail)
49513 +{
49514 +    EP3_RAIL         *rail     = (EP3_RAIL *) commsRail->Rail;
49515 +    EP3_RCVR_RAIL    *rcvrRail = (EP3_RCVR_RAIL *) rcvr->Rails[rail->Generic.Number];  
49516 +    unsigned long     flags;
49517 +    struct list_head *el, *nel;
49518 +
49519 +    EPRINTF1 (DBG_RCVR, "%s: ep3rcvr_del_rail: removing rail\n", rail->Generic.Name);
49520 +
49521 +    /* flag the rail as no longer available */
49522 +    spin_lock_irqsave (&rcvr->Lock, flags);
49523 +    rcvr->RailMask &= ~EP_RAIL2RAILMASK (rail->Generic.Number);
49524 +    spin_unlock_irqrestore (&rcvr->Lock, flags);
49525 +    
49526 +    /* mark the input queue descriptor as full */
49527 +    SetQueueLocked(rail, ((EP3_COMMS_RAIL *)commsRail)->QueueDescs + rcvr->Service * sizeof (EP3_InputQueue));
49528 +
49529 +    /* need to halt the thread first         */
49530 +    /*   set ThreadShouldHalt in elan memory */
49531 +    /*   then trigger the event              */
49532 +    /*   and wait on haltWait                */
49533 +    elan3_sdram_writel  (rail->Device, rcvrRail->RcvrElan + offsetof (EP3_RCVR_RAIL_ELAN, ThreadShouldHalt), TRUE);
49534 +
49535 +    IssueSetevent (rail,  EP_MSGQ_ADDR(rcvr->Service) + offsetof(EP3_InputQueue, q_event));
49536 +
49537 +    spin_lock_irqsave (&rcvr->Lock, flags);
49538 +
49539 +    while (rcvrRail->ThreadHalted == 0)
49540 +    {
49541 +       rcvrRail->CleanupWaiting++;
49542 +       kcondvar_wait (&rcvrRail->CleanupSleep, &rcvr->Lock, &flags);
49543 +    }
49544 +
49545 +    /* at this point the thread is halted and it has no envelopes */
49546
49547 +    /* we need to wait until all the rxd's in the list that are 
49548 +     * bound to the rail we are removing are not pending 
49549 +     */
49550 +    for (;;)
49551 +    {
49552 +       int mustWait = 0;
49553 +       
49554 +       list_for_each (el, &rcvr->ActiveDescList) {
49555 +           EP_RXD       *rxd     = list_entry (el,EP_RXD, Link);
49556 +           EP3_RXD_RAIL *rxdRail = (EP3_RXD_RAIL *) rxd->RxdRail;
49557 +
49558 +           if (rxdRail && RXD_BOUND2RAIL (rxdRail, rcvrRail) && rxd->RxdMain->Len != EP_RXD_PENDING)
49559 +           {
49560 +               mustWait++;
49561 +               break;
49562 +           }
49563 +       }
49564 +       
49565 +       if (! mustWait)
49566 +           break;
49567 +
49568 +       EPRINTF1 (DBG_RCVR, "%s: ep3rcvr_del_rail: waiting for active rxd's to be returned\n", rail->Generic.Name);
49569 +
49570 +       rcvrRail->CleanupWaiting++;
49571 +       kcondvar_wait (&rcvrRail->CleanupSleep, &rcvr->Lock, &flags);
49572 +    }
49573 +
49574 +    /* at this point all rxd's in the list that are bound to the deleting rail are not pending */
49575 +    list_for_each_safe (el, nel, &rcvr->ActiveDescList) {
49576 +       EP_RXD       *rxd     = list_entry (el, EP_RXD, Link);
49577 +       EP3_RXD_RAIL *rxdRail = (EP3_RXD_RAIL *) rxd->RxdRail;
49578 +       
49579 +       if (rxdRail && RXD_BOUND2RAIL (rxdRail, rcvrRail))
49580 +       {
49581 +           /* here we need to unbind the remaining rxd's */
49582 +           rxdRail->RxdMain->DataEvent = EP3_EVENT_PRIVATE;
49583 +           rxdRail->RxdMain->DoneEvent = EP3_EVENT_PRIVATE;
49584
49585 +           elan3_sdram_writel (rail->Device, rxdRail->RxdElan + offsetof (EP3_RXD_RAIL_ELAN, DataEvent.ev_Count), 0);  /* PCI write */
49586 +           elan3_sdram_writel (rail->Device, rxdRail->RxdElan + offsetof (EP3_RXD_RAIL_ELAN, DoneEvent.ev_Count), 0);  /* PCI write */
49587 +
49588 +           UnbindRxdFromRail (rxd, rxdRail);
49589 +           FreeRxdRail(rcvrRail,  rxdRail );
49590 +       }
49591 +    }
49592 +    spin_unlock_irqrestore (&rcvr->Lock, flags);
49593 +    
49594 +    /* wait for all rxd's for this rail to become free */
49595 +    spin_lock_irqsave (&rcvrRail->FreeDescLock, flags);
49596 +    while (rcvrRail->FreeDescCount != rcvrRail->TotalDescCount)
49597 +    {
49598 +       rcvrRail->FreeDescWaiting++;
49599 +       kcondvar_wait (&rcvrRail->FreeDescSleep, &rcvrRail->FreeDescLock, &flags);
49600 +    }
49601 +    spin_unlock_irqrestore (&rcvrRail->FreeDescLock, flags);
49602 +
49603 +    /* can now remove the rail as it can no longer be used */
49604 +    spin_lock_irqsave (&rcvr->Lock, flags);
49605 +    rcvr->Rails[rail->Generic.Number] = NULL;
49606 +    spin_unlock_irqrestore (&rcvr->Lock, flags);
49607 +
49608 +    /* all the rxd's accociated with DescBlocks must be in the FreeDescList */
49609 +    ASSERT (rcvrRail->TotalDescCount == rcvrRail->FreeDescCount);
49610 +
49611 +    /* run through the DescBlockList deleting them */
49612 +    while (!list_empty (&rcvrRail->DescBlockList))
49613 +       FreeRxdRailBlock (rcvrRail, list_entry(rcvrRail->DescBlockList.next, EP3_RXD_RAIL_BLOCK , Link));
49614 +
49615 +    /* it had better be empty after that */
49616 +    ASSERT ((rcvrRail->TotalDescCount == 0) && (rcvrRail->TotalDescCount == rcvrRail->FreeDescCount));
49617 +    
49618 +    ep_free_elan (&rail->Generic, rcvrRail->ThreadStack, EP3_STACK_SIZE);
49619 +    ep_free_elan (&rail->Generic, rcvrRail->InputQueueAddr, EP_INPUTQ_SIZE * rcvr->InputQueueEntries);
49620 +    ep_free_elan (&rail->Generic, rcvrRail->RcvrElanAddr, sizeof (EP3_RCVR_RAIL_ELAN));
49621 +    ep_free_main (&rail->Generic, rcvrRail->RcvrMainAddr, sizeof (EP3_RCVR_RAIL_MAIN));
49622 +
49623 +    KMEM_FREE (rcvrRail, sizeof (EP3_RCVR_RAIL));
49624 +}
49625 +
49626 +EP_RXD *
49627 +ep3rcvr_steal_rxd (EP_RCVR_RAIL *r)
49628 +{
49629 +    EP3_RCVR_RAIL *rcvrRail = (EP3_RCVR_RAIL *) r;
49630 +    EP3_RAIL      *rail     = RCVR_TO_RAIL (rcvrRail);
49631 +    EP_RCVR       *rcvr     = rcvrRail->Generic.Rcvr;
49632 +    E3_Addr        rxdElanAddr;
49633 +    unsigned long flags;
49634 +
49635 +    spin_lock_irqsave (&rcvr->Lock, flags);
49636 +
49637 +    LockRcvrThread (rcvrRail);
49638 +    if ((rxdElanAddr = elan3_sdram_readl (rail->Device, rcvrRail->RcvrElan + offsetof (EP3_RCVR_RAIL_ELAN, PendingDescs))) != 0)
49639 +    {
49640 +       sdramaddr_t  rxdElan  = ep_elan2sdram (&rail->Generic, rxdElanAddr);
49641 +       EP3_RXD_RAIL *rxdRail = (EP3_RXD_RAIL *) (unsigned long) elan3_sdram_readq (rail->Device, rxdElan + offsetof (EP3_RXD_RAIL_ELAN, MainAddr));
49642 +       EP_RXD      *rxd      = rxdRail->Generic.Rxd;
49643 +       sdramaddr_t  next;
49644 +       
49645 +       EPRINTF2 (DBG_RCVR, "%s: StealRxdFromOtherRail stealing rxd %p\n", rail->Generic.Name, rail);
49646 +       
49647 +       /* Remove the RXD from the pending desc list */
49648 +       if ((next = elan3_sdram_readl (rail->Device, rxdElan + offsetof (EP3_RXD_RAIL_ELAN, Next))) == 0)
49649 +           rcvrRail->RcvrMain->PendingDescsTailp = 0;
49650 +       elan3_sdram_writel (rail->Device, rcvrRail->RcvrElan + offsetof (EP3_RCVR_RAIL_ELAN, PendingDescs), next);
49651 +       UnlockRcvrThread (rcvrRail);
49652 +       
49653 +       UnbindRxdFromRail (rxd, rxdRail);
49654 +       
49655 +       spin_unlock_irqrestore (&rcvr->Lock, flags);
49656 +       
49657 +       /* Mark rxdRail as no longer active */
49658 +       rxdRail->RxdMain->DataEvent = EP3_EVENT_PRIVATE;
49659 +       rxdRail->RxdMain->DoneEvent = EP3_EVENT_PRIVATE;
49660 +       elan3_sdram_writel (rail->Device, rxdRail->RxdElan + offsetof (EP3_RXD_RAIL_ELAN, DataEvent.ev_Count), 0);
49661 +       elan3_sdram_writel (rail->Device, rxdRail->RxdElan + offsetof (EP3_RXD_RAIL_ELAN, DoneEvent.ev_Count), 0);
49662 +       
49663 +       FreeRxdRail (rcvrRail, rxdRail);
49664 +
49665 +       return rxd;
49666 +    }
49667 +
49668 +    UnlockRcvrThread (rcvrRail);
49669 +    spin_unlock_irqrestore (&rcvr->Lock, flags);
49670 +
49671 +    return NULL;
49672 +}
49673 +
49674 +long
49675 +ep3rcvr_check (EP_RCVR_RAIL *r, long nextRunTime)
49676 +{
49677 +    EP3_RCVR_RAIL    *rcvrRail = (EP3_RCVR_RAIL *) r;
49678 +    EP3_RAIL         *rail     = RCVR_TO_RAIL (rcvrRail);
49679 +    EP_RCVR          *rcvr     = rcvrRail->Generic.Rcvr;
49680 +    EP_COMMS_SUBSYS *subsys    = rcvr->Subsys;
49681 +    EP_SYS           *sys       = subsys->Subsys.Sys;
49682 +    EP_RXD           *rxd;
49683 +    unsigned long     flags;
49684 +
49685 +    if (rcvrRail->FreeDescCount < ep_rxd_lowat && !AllocateRxdRailBlock (rcvrRail))
49686 +    {
49687 +       EPRINTF1 (DBG_RCVR,"%s: failed to grow rxd rail pool\n", rail->Generic.Name);
49688 +               
49689 +       if (nextRunTime == 0 || AFTER (nextRunTime, lbolt + RESOURCE_RETRY_TIME))
49690 +           nextRunTime = lbolt + RESOURCE_RETRY_TIME;
49691 +    }
49692 +    
49693 +    if (rcvrRail->ThreadWaiting && (rxd = StealRxdFromOtherRail (rcvr)) != NULL)
49694 +    {
49695 +       /* Map the receive buffer into this rail as well */
49696 +       EPRINTF4 (DBG_RCVR, "%s: mapping rxd->Data (%08x.%08x.%08x) into this rails\n",
49697 +                 rail->Generic.Name, rxd->Data.nmd_addr,rxd->Data.nmd_len, rxd->Data.nmd_attr);
49698 +
49699 +       spin_lock_irqsave (&rcvr->Lock, flags);
49700 +       if ((!(EP_NMD_RAILMASK (&rxd->Data) & EP_RAIL2RAILMASK(rail->Generic.Number)) &&                /* not already mapped and */
49701 +            ep_nmd_map_rails (sys, &rxd->Data, EP_RAIL2RAILMASK(rail->Generic.Number)) == 0) ||        /* failed to map it */
49702 +           ep3rcvr_queue_rxd (rxd, &rcvrRail->Generic))                                                /* or failed to queue it */
49703 +       {
49704 +           EPRINTF5 (DBG_RCVR,"%s: stolen rcvr=%p rxd=%p -> rnum=%d rcvrRail=%p (failed)\n", 
49705 +                     rail->Generic.Name, rcvr, rxd, rail->Generic.Number, rcvrRail);
49706 +               
49707 +           if (nextRunTime == 0 || AFTER (nextRunTime, lbolt + RESOURCE_RETRY_TIME))
49708 +               nextRunTime = lbolt + RESOURCE_RETRY_TIME;
49709 +       }
49710 +       spin_unlock_irqrestore (&rcvr->Lock, flags);
49711 +    }
49712 +    
49713 +    return nextRunTime;
49714 +}
49715 +
49716 +static void
49717 +ep3rcvr_flush_filtering (EP_RCVR *rcvr, EP3_RCVR_RAIL *rcvrRail)
49718 +{
49719 +    EP3_COMMS_RAIL *commsRail = (EP3_COMMS_RAIL *) rcvrRail->Generic.CommsRail;
49720 +    EP3_RAIL       *rail      = (EP3_RAIL *) commsRail->Generic.Rail;
49721 +    ELAN3_DEV      *dev       = rail->Device;
49722 +    sdramaddr_t    qdesc      = commsRail->QueueDescs + rcvr->Service*sizeof (EP3_InputQueue);
49723 +    E3_Addr        qTop       = elan3_sdram_readl (dev, qdesc + offsetof (EP3_InputQueue, q_top));
49724 +    E3_Addr        qBase      = elan3_sdram_readl (dev, qdesc + offsetof (EP3_InputQueue, q_base));
49725 +    E3_Addr        qSize      = elan3_sdram_readl (dev,qdesc + offsetof (EP3_InputQueue, q_size));
49726 +    E3_uint32      nfptr, qbptr;
49727 +    unsigned long  flags;
49728 +    
49729 +    spin_lock_irqsave (&rcvr->Lock, flags);
49730 +    LockRcvrThread (rcvrRail);                                                                         /* PCI lock */
49731 +
49732 +    nfptr = elan3_sdram_readl (dev, qdesc + offsetof (EP3_InputQueue, q_fptr));
49733 +    qbptr = elan3_sdram_readl (dev, qdesc + offsetof (EP3_InputQueue, q_bptr));
49734 +    
49735 +    if (nfptr == qTop)
49736 +       nfptr = qBase;
49737 +    else
49738 +       nfptr += qSize;
49739 +    
49740 +    while (nfptr != qbptr)
49741 +    {
49742 +       unsigned nodeId = elan3_sdram_readl (dev, rcvrRail->InputQueueBase + (nfptr - rcvrRail->InputQueueAddr) + 
49743 +                                      offsetof (EP_ENVELOPE, NodeId));
49744 +       
49745 +       EPRINTF3 (DBG_DISCON, "%s: ep3rcvr_flush_filtering: nodeId=%d State=%d\n", rail->Generic.Name, nodeId, rail->Generic.Nodes[nodeId].State);
49746 +       
49747 +       if (rail->Generic.Nodes[nodeId].State == EP_NODE_LOCAL_PASSIVATE)
49748 +           elan3_sdram_writel (dev, rcvrRail->InputQueueBase + (nfptr - rcvrRail->InputQueueAddr) + 
49749 +                         offsetof (EP_ENVELOPE, Version), 0);
49750 +       
49751 +       if (nfptr == qTop)
49752 +           nfptr = qBase;
49753 +       else
49754 +           nfptr += qSize;
49755 +    }
49756 +    
49757 +    UnlockRcvrThread (rcvrRail);                                                                               /* PCI unlock */
49758 +    spin_unlock_irqrestore (&rcvr->Lock, flags);
49759 +}
49760 +
49761 +static void
49762 +ep3rcvr_flush_flushing (EP_RCVR *rcvr, EP3_RCVR_RAIL *rcvrRail)
49763 +{
49764 +    EP3_RAIL         *rail = RCVR_TO_RAIL (rcvrRail);
49765 +    struct list_head *el, *nel;
49766 +    unsigned long     flags;
49767 +
49768 +    spin_lock_irqsave (&rcvr->Lock, flags);
49769 +    LockRcvrThread (rcvrRail);                                                                         /* PCI lock */
49770 +    
49771 +    list_for_each_safe (el, nel, &rcvr->ActiveDescList) {
49772 +       EP_RXD       *rxd      = list_entry (el, EP_RXD, Link);
49773 +       EP3_RXD_RAIL *rxdRail  = (EP3_RXD_RAIL *) rxd->RxdRail;
49774 +       EP_ENVELOPE  *env      = &rxd->RxdMain->Envelope;
49775 +       EP_NODE_RAIL *nodeRail = &rail->Generic.Nodes[env->NodeId];
49776 +
49777 +       if (rxd->RxdMain->Len == EP_RXD_PENDING || !RXD_BOUND2RAIL(rxdRail,rcvrRail) || nodeRail->State != EP_NODE_LOCAL_PASSIVATE)
49778 +           continue;
49779 +       
49780 +       EPRINTF6 (DBG_DISCON, "%s: ep3rcvr_flush_flushing: rcvr %p rxd %p state %x.%x elan node %d\n", rail->Generic.Name,
49781 +                 rcvr, rxd, rxdRail->RxdMain->DataEvent, rxdRail->RxdMain->DoneEvent, env->NodeId);
49782 +       
49783 +       switch (rxd->State)
49784 +       {
49785 +       case EP_RXD_FREE:
49786 +           printk ("ep3rcvr_flush_flushing: rxd state is free but bound to a fail\n");
49787 +           break;
49788 +
49789 +       case EP_RXD_RECEIVE_ACTIVE:
49790 +           if (rxdRail->RxdMain->DataEvent == EP3_EVENT_ACTIVE)                /* incomplete message receive */
49791 +           {
49792 +               EPRINTF4 (DBG_RCVR, "%s: ep3rcvr_flush_flushing: rcvr %p rxd %p nodeId %d - passive\n", 
49793 +                         rail->Generic.Name, rcvr, rxd, env->NodeId);
49794 +               
49795 +               nodeRail->MessageState |= EP_NODE_PASSIVE_MESSAGES;
49796 +               continue;
49797 +           }
49798 +           break;
49799 +           
49800 +       default:
49801 +           EP_ASSERT (&rail->Generic, EP_IS_RPC(env->Attr));
49802 +
49803 +           if (!EP3_EVENT_FIRED (rxdRail->DoneCookie, rxdRail->RxdMain->DoneEvent))    /* incomplete RPC */
49804 +           {
49805 +               EPRINTF4 (DBG_RCVR, "%s: ep3rcvr_flush_flushing: rcvr %p rxd %p nodeId %d - active\n", 
49806 +                         rail->Generic.Name, rcvr, rxd, env->NodeId);
49807 +               
49808 +               EP_INVALIDATE_XID (rxd->MsgXid);                        /* Ignore any previous NMD map responses */
49809 +               
49810 +               nodeRail->MessageState |= EP_NODE_ACTIVE_MESSAGES;
49811 +               continue;
49812 +           }
49813 +           break;
49814 +
49815 +       case EP_RXD_BEEN_ABORTED:
49816 +           printk ("ep3rcvr_flush_flushing: rxd state is aborted but bound to a fail\n");
49817 +           break;
49818 +       }
49819 +
49820 +       EPRINTF4 (DBG_RCVR, "%s: ep3rcvr_flush_flushing: rcvr %p rxd %p nodeId %d - finished\n", 
49821 +                 rail->Generic.Name, rcvr, rxd, env->NodeId);
49822 +    }    
49823 +
49824 +    UnlockRcvrThread (rcvrRail);                                                                       /* PCI unlock */
49825 +    spin_unlock_irqrestore (&rcvr->Lock, flags);
49826 +}
49827 +
49828 +void
49829 +ep3rcvr_flush_callback (EP_RCVR *rcvr, EP3_RCVR_RAIL *rcvrRail)
49830 +{
49831 +    EP3_RAIL *rail = RCVR_TO_RAIL(rcvrRail);
49832 +
49833 +    switch (rail->Generic.CallbackStep)
49834 +    {
49835 +    case EP_CB_FLUSH_FILTERING:
49836 +       ep3rcvr_flush_filtering (rcvr, rcvrRail);
49837 +       break;
49838 +
49839 +    case EP_CB_FLUSH_FLUSHING:
49840 +       ep3rcvr_flush_flushing (rcvr, rcvrRail);
49841 +       break;
49842 +    }
49843 +}
49844 +
49845 +void
49846 +ep3rcvr_failover_callback (EP_RCVR *rcvr, EP3_RCVR_RAIL *rcvrRail)
49847 +{
49848 +    EP_COMMS_SUBSYS  *subsys = rcvr->Subsys;
49849 +    EP3_RAIL         *rail   = RCVR_TO_RAIL (rcvrRail);
49850 +    ELAN3_DEV        *dev    = rail->Device;
49851 +    struct list_head *el, *nel;
49852 +    unsigned long     flags;
49853 +#ifdef SUPPORT_RAIL_FAILOVER
49854 +    EP_SYS           *sys    = subsys->Subsys.Sys;
49855 +#endif
49856 +   
49857 +    spin_lock_irqsave (&rcvr->Lock, flags);
49858 +    LockRcvrThread (rcvrRail);                                                                         /* PCI lock */
49859 +    
49860 +    list_for_each_safe (el, nel, &rcvr->ActiveDescList) {
49861 +       EP_RXD             *rxd      = list_entry (el, EP_RXD, Link);
49862 +       EP3_RXD_RAIL       *rxdRail  = (EP3_RXD_RAIL *) rxd->RxdRail;
49863 +       EP_ENVELOPE        *env      = &rxd->RxdMain->Envelope;
49864 +       EP_NODE_RAIL       *nodeRail = &rail->Generic.Nodes[env->NodeId];
49865 +#ifdef SUPPORT_RAIL_FAILOVER
49866 +       EP_MANAGER_MSG_BODY msgBody;
49867 +       EP_NODE            *node     = &sys->Nodes[env->NodeId];
49868 +#endif
49869 +       
49870 +       if (rxd->RxdMain->Len == EP_RXD_PENDING || !RXD_BOUND2RAIL(rxdRail,rcvrRail) || nodeRail->State != EP_NODE_PASSIVATED)
49871 +           continue;
49872 +
49873 +       EPRINTF6 (DBG_FAILOVER, "%s: ep3rcvr_failover_callback: rcvr %p rxd %p elan node %d state %x.%x\n", rail->Generic.Name, rcvr, rxd, env->NodeId,
49874 +                 rxdRail->RxdMain->DataEvent, rxdRail->RxdMain->DoneEvent);
49875 +
49876 +       switch (rxd->State)
49877 +       {
49878 +       case EP_RXD_FREE:
49879 +           printk ("ep4rcvr_failover_callback: rxd state is free but bound to a fail\n");
49880 +           break;
49881 +
49882 +       case EP_RXD_RECEIVE_ACTIVE:
49883 +           if (rxdRail->RxdMain->DataEvent == EP3_EVENT_ACTIVE)                /* incomplete message receive */
49884 +           {
49885 +               EPRINTF4 (DBG_FAILOVER, "%s: ep3rcvr_failover_callback: rcvr %p rxd %p nodeId %d - unbind\n", rail->Generic.Name, rcvr, rxd, env->NodeId);
49886 +               
49887 +               UnbindRxdFromRail (rxd, rxdRail);
49888 +               
49889 +               /* clear the done flags - so that it will be ignored if an event interrupt is generated */
49890 +               rxdRail->RxdMain->DataEvent = EP3_EVENT_PRIVATE;
49891 +               rxdRail->RxdMain->DoneEvent = EP3_EVENT_PRIVATE;
49892 +               
49893 +               /* clear the data event - the done event should already be zero */
49894 +               elan3_sdram_writel (dev, rxdRail->RxdElan + offsetof (EP3_RXD_RAIL_ELAN, DataEvent.ev_Count), 0);       /* PCI write */
49895 +               
49896 +               FreeRxdRail (rcvrRail, rxdRail);
49897 +               
49898 +               /* epcomms thread will requeue on different rail */
49899 +               ep_kthread_schedule (&subsys->Thread, lbolt);
49900 +               continue;
49901 +           }
49902 +           break;
49903 +
49904 +       default:
49905 +           EP_ASSERT (&rail->Generic, EP_IS_RPC(env->Attr));
49906 +
49907 +#ifdef SUPPORT_RAIL_FAILOVER
49908 +           if (!EP3_EVENT_FIRED (rxdRail->DoneCookie, rxdRail->RxdMain->DoneEvent) && !(EP_IS_NO_FAILOVER(env->Attr)))         /* incomplete RPC, which can be failed over  */
49909 +           {
49910 +               EPRINTF7 (DBG_FAILOVER, "%s: ep3rcvr_failover_callback: rxd %p State %x.%x Xid %llxx MsgXid %llxx nodeId %d - failover\n", 
49911 +                         rail->Generic.Name, rxd, rxdRail->RxdMain->DataEvent, rxdRail->RxdMain->DoneEvent, 
49912 +                         (long long) env->Xid.Unique, (long long) rxd->MsgXid.Unique, env->NodeId);
49913 +               
49914 +               if (EP_XID_INVALID(rxd->MsgXid))
49915 +                   rxd->MsgXid = ep_xid_cache_alloc (sys, &rcvr->XidCache);
49916 +               
49917 +               /* XXXX maybe only send the message if the node failover retry is now ? */
49918 +               msgBody.Failover.Xid      = env->Xid;
49919 +               msgBody.Failover.Railmask = node->ConnectedRails;
49920 +               
49921 +               ep_send_message (&rail->Generic, env->NodeId, EP_MANAGER_MSG_TYPE_FAILOVER_REQUEST, rxd->MsgXid, &msgBody);
49922 +               
49923 +               nodeRail->MessageState |= EP_NODE_ACTIVE_MESSAGES;
49924 +               continue;
49925 +           }
49926 +#endif
49927 +           break;
49928 +
49929 +       case EP_RXD_BEEN_ABORTED:
49930 +           printk ("ep4rcvr_failover_callback: rxd state is aborted but bound to a fail\n");
49931 +           break;
49932 +       }
49933 +
49934 +       EPRINTF3 (DBG_FAILOVER, "%s: ep3rcvr_failover_callback: rxd %p nodeId %d - finished\n", rail->Generic.Name, rxd, env->NodeId);
49935 +    }
49936 +    
49937 +    UnlockRcvrThread (rcvrRail);                                                                       /* PCI unlock */
49938 +    spin_unlock_irqrestore (&rcvr->Lock, flags);
49939 +}
49940 +
49941 +void
49942 +ep3rcvr_disconnect_callback (EP_RCVR *rcvr, EP3_RCVR_RAIL *rcvrRail)
49943 +{
49944 +    EP3_RAIL         *rail = RCVR_TO_RAIL (rcvrRail);
49945 +    ELAN3_DEV        *dev = rail->Device;
49946 +    struct list_head *el, *nel;
49947 +    struct list_head  rxdList;
49948 +    unsigned long     flags;
49949 +
49950 +    INIT_LIST_HEAD (&rxdList);
49951 +    
49952 +    spin_lock_irqsave (&rcvr->Lock, flags);
49953 +    LockRcvrThread (rcvrRail);                                                                         /* PCI lock */
49954 +    
49955 +    list_for_each_safe (el, nel, &rcvr->ActiveDescList) {
49956 +       EP_RXD       *rxd      = list_entry (el, EP_RXD, Link);
49957 +       EP3_RXD_RAIL *rxdRail  = (EP3_RXD_RAIL *) rxd->RxdRail;
49958 +       EP_ENVELOPE  *env      = &rxd->RxdMain->Envelope;
49959 +       EP_NODE_RAIL *nodeRail = &rail->Generic.Nodes[env->NodeId];
49960 +       
49961 +       if (rxd->RxdMain->Len == EP_RXD_PENDING || !RXD_BOUND2RAIL(rxdRail,rcvrRail) || nodeRail->State != EP_NODE_DISCONNECTING)
49962 +           continue;
49963 +
49964 +       EPRINTF4 (DBG_DISCON, "%s: ep3rcvr_disconnect_callback: rcvr %p rxd %p elan node %d\n", rail->Generic.Name, rcvr, rxd, env->NodeId);
49965 +
49966 +       switch (rxd->State)
49967 +       {
49968 +       case EP_RXD_FREE:
49969 +           printk ("ep3rcvr_disconnect_callback: rxd state is free but bound to a fail\n");
49970 +           break;
49971 +
49972 +       case EP_RXD_RECEIVE_ACTIVE:
49973 +           if (rxdRail->RxdMain->DataEvent == EP3_EVENT_ACTIVE)                        /* incomplete message receive */
49974 +           {
49975 +               EPRINTF4 (DBG_RCVR, "%s: ep3rcvr_disconnect_callback: rcvr %p rxd %p nodeId %d - unbind\n", rail->Generic.Name, rcvr, rxd, env->NodeId);
49976 +               
49977 +               UnbindRxdFromRail (rxd, rxdRail);
49978 +               
49979 +               /* clear the done flags - so that it will be ignored if an event interrupt is generated */
49980 +               rxdRail->RxdMain->DataEvent = EP3_EVENT_PRIVATE;
49981 +               rxdRail->RxdMain->DoneEvent = EP3_EVENT_PRIVATE;
49982 +               
49983 +               /* clear the data event - the done event should already be zero */
49984 +               elan3_sdram_writel (dev, rxdRail->RxdElan + offsetof (EP3_RXD_RAIL_ELAN, DataEvent.ev_Count), 0);       /* PCI write */
49985 +               
49986 +               FreeRxdRail (rcvrRail, rxdRail);
49987 +
49988 +               /* remark it as pending if it was partially received */
49989 +               rxd->RxdMain->Len = EP_RXD_PENDING;
49990 +               
49991 +               /* epcomms thread will requeue on different rail */
49992 +               ep_kthread_schedule (&rcvr->Subsys->Thread, lbolt);
49993 +               continue;
49994 +           }
49995 +           break;
49996 +
49997 +       default:
49998 +           EP_ASSERT (&rail->Generic, EP_IS_RPC(env->Attr));
49999 +
50000 +           if (!EP3_EVENT_FIRED (rxdRail->DoneCookie, rxdRail->RxdMain->DoneEvent))    /* incomplete RPC */
50001 +           {
50002 +               EPRINTF4 (DBG_RCVR, "%s: ep3rcvr_disconnect_callback: rcvr %p rxd %p nodeId %d - not able to failover\n",
50003 +                         rail->Generic.Name, rcvr, rxd, env->NodeId);
50004 +           
50005 +               /* Mark as no longer active */
50006 +               rxdRail->RxdMain->DataEvent = EP3_EVENT_PRIVATE;
50007 +               rxdRail->RxdMain->DoneEvent = EP3_EVENT_PRIVATE;
50008 +               
50009 +               elan3_sdram_writel (dev, rxdRail->RxdElan + offsetof (EP3_RXD_RAIL_ELAN, DataEvent.ev_Count), 0);       /* PCI write */
50010 +               elan3_sdram_writel (dev, rxdRail->RxdElan + offsetof (EP3_RXD_RAIL_ELAN, DoneEvent.ev_Count), 0);       /* PCI write */
50011 +               
50012 +               UnbindRxdFromRail (rxd, rxdRail);
50013 +               FreeRxdRail (rcvrRail, rxdRail);
50014 +
50015 +               /* Ignore any previous NMD/failover responses */
50016 +               EP_INVALIDATE_XID (rxd->MsgXid);
50017 +               
50018 +               /* Remove from active list */
50019 +               list_del (&rxd->Link);
50020 +               
50021 +               if (rxd->State == EP_RXD_RPC_IN_PROGRESS)                               /* ownder by user .... */
50022 +                   rxd->State = EP_RXD_BEEN_ABORTED;
50023 +               else                                                                    /* queue for completion */
50024 +               {
50025 +                   rxd->RxdMain->Len = EP_CONN_RESET;                                  /* ensure ep_rxd_status() fails */
50026 +                   list_add_tail (&rxd->Link, &rxdList);
50027 +               }
50028 +               continue;
50029 +           }
50030 +           break;
50031 +
50032 +       case EP_RXD_BEEN_ABORTED:
50033 +           printk ("ep4rcvr_failover_callback: rxd state is aborted but bound to a fail\n");
50034 +           break;
50035 +       }
50036 +           
50037 +       EPRINTF4 (DBG_RCVR, "%s: ep3rcvr_disconnect_callback: rcvr %p rxd %p nodeId %d - finished\n", 
50038 +                 rail->Generic.Name, rcvr, rxd, env->NodeId);
50039 +    }
50040 +    
50041 +    UnlockRcvrThread (rcvrRail);                                                                       /* PCI unlock */
50042 +    spin_unlock_irqrestore (&rcvr->Lock, flags);
50043 +
50044 +    while (! list_empty (&rxdList)) 
50045 +    {
50046 +       EP_RXD *rxd = list_entry (rxdList.next, EP_RXD, Link);
50047 +
50048 +       list_del (&rxd->Link);
50049 +
50050 +       rxd->Handler (rxd);
50051 +    }
50052 +}
50053 +
50054 +void
50055 +ep3rcvr_display_rxd (DisplayInfo *di, EP_RXD_RAIL *r)
50056 +{
50057 +    EP3_RXD_RAIL *rxdRail = (EP3_RXD_RAIL *) r;
50058 +    sdramaddr_t   rxdElan = rxdRail->RxdElan;
50059 +    EP3_RAIL     *rail    = RCVR_TO_RAIL (rxdRail->Generic.RcvrRail);
50060 +    ELAN3_DEV    *dev     = rail->Device;
50061 +
50062 +    (di->func)(di->arg, "      ChainEvent=%x.%x %x.%x\n",
50063 +              elan3_sdram_readl (dev, rxdElan + offsetof (EP3_RXD_RAIL_ELAN, ChainEvent[0].ev_Count)),
50064 +              elan3_sdram_readl (dev, rxdElan + offsetof (EP3_RXD_RAIL_ELAN, ChainEvent[0].ev_Type)),
50065 +              elan3_sdram_readl (dev, rxdElan + offsetof (EP3_RXD_RAIL_ELAN, ChainEvent[1].ev_Count)),
50066 +              elan3_sdram_readl (dev, rxdElan + offsetof (EP3_RXD_RAIL_ELAN, ChainEvent[1].ev_Type)));
50067 +    (di->func)(di->arg, "      ChainEvent=%x.%x %x.%x\n",
50068 +              elan3_sdram_readl (dev, rxdElan + offsetof (EP3_RXD_RAIL_ELAN, ChainEvent[2].ev_Count)),
50069 +              elan3_sdram_readl (dev, rxdElan + offsetof (EP3_RXD_RAIL_ELAN, ChainEvent[2].ev_Type)),
50070 +              elan3_sdram_readl (dev, rxdElan + offsetof (EP3_RXD_RAIL_ELAN, ChainEvent[3].ev_Count)),
50071 +              elan3_sdram_readl (dev, rxdElan + offsetof (EP3_RXD_RAIL_ELAN, ChainEvent[3].ev_Type)));
50072 +    (di->func)(di->arg, "      DataEvent=%x.%x DoneEvent=%x.%x\n",
50073 +              elan3_sdram_readl (dev, rxdElan + offsetof (EP3_RXD_RAIL_ELAN, DataEvent.ev_Count)),
50074 +              elan3_sdram_readl (dev, rxdElan + offsetof (EP3_RXD_RAIL_ELAN, DataEvent.ev_Type)),
50075 +              elan3_sdram_readl (dev, rxdElan + offsetof (EP3_RXD_RAIL_ELAN, DoneEvent.ev_Count)),
50076 +              elan3_sdram_readl (dev, rxdElan + offsetof (EP3_RXD_RAIL_ELAN, DoneEvent.ev_Type)));
50077 +    (di->func)(di->arg, "      Data=%x Len=%x\n",
50078 +              elan3_sdram_readl (dev, rxdElan + offsetof (EP3_RXD_RAIL_ELAN, Data.nmd_addr)),
50079 +              elan3_sdram_readl (dev, rxdElan + offsetof (EP3_RXD_RAIL_ELAN, Data.nmd_len)));
50080 +}
50081 +
50082 +void
50083 +ep3rcvr_display_rcvr (DisplayInfo *di, EP_RCVR_RAIL *r)
50084 +{
50085 +    EP3_RCVR_RAIL  *rcvrRail  = (EP3_RCVR_RAIL *) r;
50086 +    EP3_COMMS_RAIL *commsRail = (EP3_COMMS_RAIL *) rcvrRail->Generic.CommsRail;
50087 +    EP3_RAIL       *rail      = RCVR_TO_RAIL (rcvrRail);
50088 +    ELAN3_DEV      *dev       = rail->Device;
50089 +    sdramaddr_t     queue     = commsRail->QueueDescs + rcvrRail->Generic.Rcvr->Service * sizeof (EP3_InputQueue);
50090 +    E3_Addr         qbase      = elan3_sdram_readl (dev, queue + offsetof (EP3_InputQueue, q_base));
50091 +    E3_Addr         qtop       = elan3_sdram_readl (dev, queue + offsetof (EP3_InputQueue, q_top));
50092 +    E3_uint32       qsize      = elan3_sdram_readl (dev, queue + offsetof (EP3_InputQueue, q_size));
50093 +    int             freeCount  = 0;
50094 +    int             blockCount = 0;
50095 +    unsigned long   flags;
50096 +    struct list_head *el;
50097 +
50098 +    spin_lock_irqsave (&rcvrRail->FreeDescLock, flags);
50099 +    list_for_each (el, &rcvrRail->FreeDescList)
50100 +       freeCount++;
50101 +    list_for_each (el, &rcvrRail->DescBlockList)
50102 +       blockCount++;
50103 +    spin_unlock_irqrestore (&rcvrRail->FreeDescLock, flags);
50104 +
50105 +    (di->func)(di->arg, "                 Rail %d FreeDesc %d (%d) Total %d Blocks %d %s\n",
50106 +              rail->Generic.Number, rcvrRail->FreeDescCount, freeCount, rcvrRail->TotalDescCount, blockCount, 
50107 +              rcvrRail->ThreadWaiting ? "ThreadWaiting" : "");
50108 +    
50109 +    (di->func)(di->arg, "                 InputQueue state=%x bptr=%x size=%x top=%x base=%x fptr=%x\n",
50110 +              elan3_sdram_readl (dev, queue + offsetof (EP3_InputQueue, q_state)),
50111 +              elan3_sdram_readl (dev, queue + offsetof (EP3_InputQueue, q_bptr)),
50112 +              elan3_sdram_readl (dev, queue + offsetof (EP3_InputQueue, q_size)),
50113 +              elan3_sdram_readl (dev, queue + offsetof (EP3_InputQueue, q_top)),
50114 +              elan3_sdram_readl (dev, queue + offsetof (EP3_InputQueue, q_base)),
50115 +              elan3_sdram_readl (dev, queue + offsetof (EP3_InputQueue, q_fptr)));
50116 +    (di->func)(di->arg, "                            event=%x.%x [%x.%x] wevent=%x.%x\n",
50117 +              elan3_sdram_readl (dev, queue + offsetof (EP3_InputQueue, q_event.ev_Type)),
50118 +              elan3_sdram_readl (dev, queue + offsetof (EP3_InputQueue, q_event.ev_Count)),
50119 +              elan3_sdram_readl (dev, queue + offsetof (EP3_InputQueue, q_event.ev_Source)),
50120 +              elan3_sdram_readl (dev, queue + offsetof (EP3_InputQueue, q_event.ev_Dest)),
50121 +              elan3_sdram_readl (dev, queue + offsetof (EP3_InputQueue, q_wevent)),
50122 +              elan3_sdram_readl (dev, queue + offsetof (EP3_InputQueue, q_wcount)));
50123 +    
50124 +    LockRcvrThread (rcvrRail);
50125 +    {
50126 +       E3_Addr     nfptr = elan3_sdram_readl (dev, queue + offsetof (EP3_InputQueue, q_fptr));
50127 +       EP_ENVELOPE env;
50128 +       
50129 +       if (nfptr == qtop)
50130 +           nfptr = qbase;
50131 +       else
50132 +           nfptr += qsize;
50133 +
50134 +       while (nfptr != elan3_sdram_readl (dev, queue + offsetof (E3_Queue, q_bptr)))
50135 +       {
50136 +           elan3_sdram_copyl_from_sdram (dev, rcvrRail->InputQueueBase + (nfptr - rcvrRail->InputQueueAddr),
50137 +                                         &env, sizeof (EP_ENVELOPE));
50138 +           
50139 +           (di->func)(di->arg, "                 ENVELOPE Version=%x Attr=%x Xid=%08x.%08x.%016llx\n",
50140 +                      env.Version, env.Attr, env.Xid.Generation, env.Xid.Handle, (long long) env.Xid.Unique);
50141 +           (di->func)(di->arg, "                          NodeId=%x Range=%x TxdRail=%x TxdMain=%x.%x.%x\n",
50142 +                      env.NodeId, env.Range, env.TxdRail, env.TxdMain.nmd_addr,
50143 +                      env.TxdMain.nmd_len, env.TxdMain.nmd_attr);
50144 +           
50145 +           
50146 +           if (nfptr == qtop)
50147 +               nfptr = qbase;
50148 +           else
50149 +               nfptr += qsize;
50150 +       }
50151 +    }
50152 +    UnlockRcvrThread (rcvrRail);
50153 +}
50154 +
50155 +void
50156 +ep3rcvr_fillout_rail_stats(EP_RCVR_RAIL *rcvr_rail, char *str) {
50157 +    /* no stats here yet */
50158 +    /* EP3_RCVR_RAIL * ep4rcvr_rail = (EP3_RCVR_RAIL *) rcvr_rail; */
50159 +}
50160 +
50161 Index: linux-2.4.21/drivers/net/qsnet/ep/epcommsRx_elan4.c
50162 ===================================================================
50163 --- linux-2.4.21.orig/drivers/net/qsnet/ep/epcommsRx_elan4.c    2004-02-23 16:02:56.000000000 -0500
50164 +++ linux-2.4.21/drivers/net/qsnet/ep/epcommsRx_elan4.c 2005-06-01 23:12:54.653430896 -0400
50165 @@ -0,0 +1,1758 @@
50166 +/*
50167 + *    Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd.
50168 + *    Copyright (c) 2002-2003 by Quadrics Ltd.
50169 + *
50170 + *    For licensing information please see the supplied COPYING file
50171 + *
50172 + */
50173 +
50174 +#ident "@(#)$Id: epcommsRx_elan4.c,v 1.30.2.2 2004/11/12 10:54:51 mike Exp $"
50175 +/*      $Source: /cvs/master/quadrics/epmod/epcommsRx_elan4.c,v $ */
50176 +
50177 +#include <qsnet/kernel.h>
50178 +
50179 +#include <elan/kcomm.h>
50180 +#include <elan/epsvc.h>
50181 +#include <elan/epcomms.h>
50182 +
50183 +#include "debug.h"
50184 +#include "kcomm_vp.h"
50185 +#include "kcomm_elan4.h"
50186 +#include "epcomms_elan4.h"
50187 +
50188 +#include <elan4/trtype.h>
50189 +
50190 +#define RCVR_TO_COMMS(rcvrRail)                ((EP4_COMMS_RAIL *) ((EP_RCVR_RAIL *) rcvrRail)->CommsRail)
50191 +#define RCVR_TO_RAIL(rcvrRail)         ((EP4_RAIL *) ((EP_RCVR_RAIL *) rcvrRail)->CommsRail->Rail)
50192 +#define RCVR_TO_DEV(rcvrRail)          (RCVR_TO_RAIL(rcvrRail)->r_ctxt.ctxt_dev)
50193 +#define RCVR_TO_SUBSYS(rcvrRail)       (((EP_RCVR_RAIL *) rcvrRail)->Rcvr->Subsys)
50194 +
50195 +#define RXD_TO_RCVR(txdRail)           ((EP4_RCVR_RAIL *) rxdRail->rxd_generic.RcvrRail)
50196 +#define RXD_TO_RAIL(txdRail)           RCVR_TO_RAIL(RXD_TO_RCVR(rxdRail))
50197 +
50198 +static void rxd_interrupt (EP4_RAIL *rail, void *arg);
50199 +
50200 +static __inline__ void 
50201 +__ep4_rxd_assert_free (EP4_RXD_RAIL *rxdRail, const char *file, const int line)
50202 +{
50203 +    EP4_RCVR_RAIL *rcvrRail = RXD_TO_RCVR(rxdRail);
50204 +    ELAN4_DEV     *dev      = RCVR_TO_DEV(rcvrRail);
50205 +    register int i, failed = 0;
50206 +    
50207 +    for (i = 0; i <= EP_MAXFRAG; i++)
50208 +       if (((rxdRail)->rxd_main->rxd_sent[i] != EP4_STATE_FREE)) 
50209 +           failed |= (1 << i);
50210 +    
50211 +    if (((rxdRail)->rxd_main->rxd_failed != EP4_STATE_FREE))
50212 +       failed |= (1 << 5);
50213 +    if (((rxdRail)->rxd_main->rxd_done   != EP4_STATE_FREE)) 
50214 +       failed |= (1 << 6);
50215 +    
50216 +    if (sdram_assert)
50217 +    {
50218 +       if (((elan4_sdram_readq (RXD_TO_RAIL(rxdRail)->r_ctxt.ctxt_dev, (rxdRail)->rxd_elan + offsetof (EP4_RXD_RAIL_ELAN, rxd_start.ev_CountAndType)) >> 32) != 0)) 
50219 +           failed |= (1 << 7);
50220 +       for (i = 0; i < EP_MAXFRAG; i++)
50221 +           if (((elan4_sdram_readq (dev, (rxdRail)->rxd_elan + offsetof (EP4_RXD_RAIL_ELAN, rxd_chain[i].ev_CountAndType)) >> 32) != 0)) 
50222 +               failed |= (1 << (8 + i));
50223 +       if (((elan4_sdram_readq (dev, (rxdRail)->rxd_elan + offsetof (EP4_RXD_RAIL_ELAN, rxd_done.ev_CountAndType)) >> 32) != 0)) 
50224 +           failed |= (1 << 12);
50225 +       if (((int)(elan4_sdram_readq (dev, (rxdRail)->rxd_elan + offsetof (EP4_RXD_RAIL_ELAN, rxd_failed.ev_CountAndType)) >> 32) != -32)) 
50226 +           failed |= (1 << 13);
50227 +    }
50228 +
50229 +    if (failed)
50230 +    {
50231 +       printk ("__ep4_rxd_assert_free: failed=%x rxdRail=%p %s - %d\n", failed, rxdRail, file, line);
50232 +
50233 +       ep_debugf (DBG_DEBUG, "__ep4_rxd_assert_free: failed=%x rxdRail=%p %s - %d\n", failed, rxdRail, file, line);
50234 +       ep4rcvr_display_rxd (&di_ep_debug, &rxdRail->rxd_generic);
50235 +
50236 +       for (i = 0; i <= EP_MAXFRAG; i++)
50237 +           (rxdRail)->rxd_main->rxd_sent[i] = EP4_STATE_FREE;
50238 +
50239 +       (rxdRail)->rxd_main->rxd_failed = EP4_STATE_FREE;
50240 +       (rxdRail)->rxd_main->rxd_done   = EP4_STATE_FREE;
50241 +
50242 +       if (sdram_assert)
50243 +       {
50244 +           elan4_sdram_writew (RXD_TO_RAIL(rxdRail)->r_ctxt.ctxt_dev,
50245 +                               (rxdRail)->rxd_elan + offsetof (EP4_RXD_RAIL_ELAN, rxd_start.ev_CountAndType) + 4, 0);
50246 +
50247 +           for (i = 0; i < EP_MAXFRAG; i++)
50248 +               elan4_sdram_writew (dev, (rxdRail)->rxd_elan + offsetof (EP4_RXD_RAIL_ELAN, rxd_chain[i].ev_CountAndType) + 4, 0);
50249 +           elan4_sdram_writew (dev, (rxdRail)->rxd_elan + offsetof (EP4_RXD_RAIL_ELAN, rxd_done.ev_CountAndType) + 4, 0);
50250 +           elan4_sdram_writew (dev, (rxdRail)->rxd_elan + offsetof (EP4_RXD_RAIL_ELAN, rxd_failed.ev_CountAndType) + 4, -32);
50251 +       }
50252 +       EP_ASSFAIL (RCVR_TO_RAIL(rcvrRail), "__ep4_rxd_assert_free");
50253 +    }
50254 +}
50255 +
50256 +static __inline__ void
50257 +__ep4_rxd_assert_pending(EP4_RXD_RAIL *rxdRail, const char *file, const int line)
50258 +{ 
50259 +    EP4_RCVR_RAIL *rcvrRail = RXD_TO_RCVR(rcvrRail);
50260 +    register int failed = 0;
50261 +
50262 +    failed |= ((rxdRail)->rxd_main->rxd_done != EP4_STATE_ACTIVE);
50263 +
50264 +    if (failed)
50265 +    {
50266 +       printk ("__ep4_rxd_assert_pending: %s - %d\n", file, line);
50267 +
50268 +       ep_debugf (DBG_DEBUG, "__ep4_rxd_assert_pending: %s - %d\n", file, line);
50269 +       ep4rcvr_display_rxd (&di_ep_debug, &rxdRail->rxd_generic);
50270 +
50271 +       (rxdRail)->rxd_main->rxd_done = EP4_STATE_ACTIVE;
50272 +
50273 +       EP_ASSFAIL (RCVR_TO_RAIL(rcvrRail), "__ep4_rxd_assert_pending");
50274 +    }
50275 +}
50276 +
50277 +static __inline__ void
50278 +__ep4_rxd_assert_private(EP4_RXD_RAIL *rxdRail, const char *file, const int line)
50279 +{
50280 +    EP4_RCVR_RAIL *rcvrRail = RXD_TO_RCVR(rxdRail);
50281 +    ELAN4_DEV     *dev      = RCVR_TO_DEV(rcvrRail);
50282 +    register int failed = 0;
50283 +
50284 +    if (((rxdRail)->rxd_main->rxd_failed != EP4_STATE_ACTIVE)) failed |= (1 << 0);
50285 +    if (((rxdRail)->rxd_main->rxd_done != EP4_STATE_PRIVATE))  failed |= (1 << 1);
50286 +    
50287 +    if (sdram_assert)
50288 +    {
50289 +       if (((elan4_sdram_readq (dev, (rxdRail)->rxd_elan + offsetof (EP4_RXD_RAIL_ELAN, rxd_done.ev_CountAndType)) >> 32) != 0))           failed |= (1 << 2);
50290 +       if (((int) (elan4_sdram_readq (dev, (rxdRail)->rxd_elan + offsetof (EP4_RXD_RAIL_ELAN, rxd_failed.ev_CountAndType)) >> 32) != -32)) failed |= (1 << 3);
50291 +    }
50292 +
50293 +    if (failed)
50294 +    {
50295 +       printk ("__ep4_rxd_assert_private: %s - %d\n", file, line);
50296 +
50297 +       ep_debugf (DBG_DEBUG, "__ep4_rxd_assert_private: %s - %d\n", file, line);
50298 +       ep4rcvr_display_rxd (&di_ep_debug, &rxdRail->rxd_generic);
50299 +
50300 +       (rxdRail)->rxd_main->rxd_failed = EP4_STATE_ACTIVE;
50301 +       (rxdRail)->rxd_main->rxd_done   = EP4_STATE_PRIVATE;
50302 +
50303 +       if (sdram_assert)
50304 +       {
50305 +           elan4_sdram_writew (dev, (rxdRail)->rxd_elan + offsetof (EP4_RXD_RAIL_ELAN, rxd_done.ev_CountAndType) + 4, 0);
50306 +           elan4_sdram_writew (dev, (rxdRail)->rxd_elan + offsetof (EP4_RXD_RAIL_ELAN, rxd_failed.ev_CountAndType) + 4, -32);
50307 +       }
50308 +
50309 +       EP_ASSFAIL (RCVR_TO_RAIL(rcvrRail), "__ep4_rxd_assert_private");
50310 +    }
50311 +}
50312 +
50313 +static __inline__ void
50314 +__ep4_rxd_private_to_free (EP4_RXD_RAIL *rxdRail)
50315 +{
50316 +    register int i;
50317 +
50318 +    for (i = 0; i <= EP_MAXFRAG; i++)
50319 +       rxdRail->rxd_main->rxd_sent[i] = EP4_STATE_FREE;
50320
50321 +    rxdRail->rxd_main->rxd_failed = EP4_STATE_FREE;
50322 +    rxdRail->rxd_main->rxd_done   = EP4_STATE_FREE;
50323 +}
50324 +
50325 +static __inline__ void
50326 +__ep4_rxd_force_private (EP4_RXD_RAIL *rxdRail)
50327 +{
50328 +    EP4_RAIL  *rail = RXD_TO_RAIL(rxdRail);
50329 +    ELAN4_DEV *dev  = rail->r_ctxt.ctxt_dev;
50330 +
50331 +    (rxdRail)->rxd_main->rxd_failed = EP4_STATE_ACTIVE;
50332 +    (rxdRail)->rxd_main->rxd_done = EP4_STATE_PRIVATE;
50333 +
50334 +    if (sdram_assert) 
50335 +       elan4_sdram_writeq (dev, (rxdRail)->rxd_elan + offsetof (EP4_RXD_RAIL_ELAN, rxd_done.ev_CountAndType),
50336 +                           E4_EVENT_INIT_VALUE(0, E4_EVENT_COPY, E4_EVENT_DTYPE_LONG, EP4_INTR_CMD_NDWORDS));
50337 +}
50338 +
50339 +#define EP4_RXD_ASSERT_FREE(rxdRail)           __ep4_rxd_assert_free(rxdRail, __FILE__, __LINE__)
50340 +#define EP4_RXD_ASSERT_PENDING(rxdRail)                __ep4_rxd_assert_pending(rxdRail, __FILE__, __LINE__)
50341 +#define EP4_RXD_ASSERT_PRIVATE(rxdRail)                __ep4_rxd_assert_private(rxdRail, __FILE__, __LINE__)
50342 +#define EP4_RXD_PRIVATE_TO_FREE(rxdRail)       __ep4_rxd_private_to_free(rxdRail)
50343 +#define EP4_RXD_FORCE_PRIVATE(rxdRail)         __ep4_rxd_force_private(rxdRail)
50344 +
50345 +static int
50346 +alloc_rxd_block (EP4_RCVR_RAIL *rcvrRail)
50347 +{
50348 +    EP4_RAIL           *rail = RCVR_TO_RAIL (rcvrRail);
50349 +    ELAN4_DEV          *dev  = rail->r_ctxt.ctxt_dev;
50350 +    EP4_RXD_RAIL_BLOCK *blk;
50351 +    EP4_RXD_RAIL_MAIN  *rxdMain;
50352 +    EP_ADDR            rxdMainAddr;
50353 +    sdramaddr_t                rxdElan;
50354 +    EP_ADDR            rxdElanAddr;
50355 +    EP4_RXD_RAIL       *rxdRail;
50356 +    unsigned long       flags;
50357 +    int                 i, j;
50358 +
50359 +    KMEM_ZALLOC (blk, EP4_RXD_RAIL_BLOCK *, sizeof (EP4_RXD_RAIL_BLOCK), 1);
50360 +
50361 +    if (blk == NULL)
50362 +       return 0;
50363 +
50364 +    if ((rxdElan = ep_alloc_elan (&rail->r_generic, EP4_RXD_RAIL_ELAN_SIZE * EP4_NUM_RXD_PER_BLOCK, 0, &rxdElanAddr)) == (sdramaddr_t) 0)
50365 +    {
50366 +       KMEM_FREE (blk, sizeof (EP4_RXD_RAIL_BLOCK));
50367 +       return 0;
50368 +    }
50369 +
50370 +    if ((rxdMain = ep_alloc_main (&rail->r_generic, EP4_RXD_RAIL_MAIN_SIZE * EP4_NUM_RXD_PER_BLOCK, 0, &rxdMainAddr)) == (EP4_RXD_RAIL_MAIN *) NULL)
50371 +    {
50372 +       ep_free_elan (&rail->r_generic, rxdElanAddr, EP4_RXD_RAIL_ELAN_SIZE * EP4_NUM_RXD_PER_BLOCK);
50373 +       KMEM_FREE (blk, sizeof (EP4_RXD_RAIL_BLOCK));
50374 +       return 0;
50375 +    }
50376 +
50377 +    if (ep4_reserve_dma_retries (rail, EP4_NUM_RXD_PER_BLOCK, 0) != 0)
50378 +    {
50379 +       ep_free_main (&rail->r_generic, blk->blk_rxds[0].rxd_main_addr, EP4_RXD_RAIL_MAIN_SIZE * EP4_NUM_RXD_PER_BLOCK);
50380 +       ep_free_elan (&rail->r_generic, rxdElanAddr, EP4_RXD_RAIL_ELAN_SIZE * EP4_NUM_RXD_PER_BLOCK);
50381 +       KMEM_FREE (blk, sizeof (EP4_RXD_RAIL_BLOCK));
50382 +
50383 +       return 0;
50384 +    }
50385 +
50386 +    for (rxdRail = &blk->blk_rxds[0], i = 0; i < EP4_NUM_RXD_PER_BLOCK; i++, rxdRail++)
50387 +    {
50388 +       rxdRail->rxd_generic.RcvrRail = &rcvrRail->rcvr_generic;
50389 +       rxdRail->rxd_elan             = rxdElan;
50390 +       rxdRail->rxd_elan_addr        = rxdElanAddr;
50391 +       rxdRail->rxd_main             = rxdMain;
50392 +       rxdRail->rxd_main_addr        = rxdMainAddr;
50393 +
50394 +       /* reserve 128 bytes of "event" cq space for the chained STEN packets */
50395 +       if ((rxdRail->rxd_ecq = ep4_get_ecq (rail, EP4_ECQ_EVENT, EP4_RXD_STEN_CMD_NDWORDS)) == NULL)
50396 +           goto failed;
50397 +
50398 +       /* allocate a single word of "setevent" command space */
50399 +       if ((rxdRail->rxd_scq = ep4_get_ecq (rail, EP4_ECQ_SINGLE, 1)) == NULL)
50400 +       {
50401 +           ep4_put_ecq (rail, rxdRail->rxd_ecq, EP4_RXD_STEN_CMD_NDWORDS);
50402 +           goto failed;
50403 +       }
50404 +
50405 +       /* initialise the completion events */
50406 +       for (j = 0; j <= EP_MAXFRAG; j++)
50407 +           rxdMain->rxd_sent[i] = EP4_STATE_FREE;
50408 +
50409 +       rxdMain->rxd_done   = EP4_STATE_FREE;
50410 +       rxdMain->rxd_failed = EP4_STATE_FREE;
50411 +
50412 +       /* initialise the scq for the thread */
50413 +       rxdMain->rxd_scq = rxdRail->rxd_scq->ecq_addr;
50414 +
50415 +       /* initialise the "start" event to copy the first STEN packet into the command queue */
50416 +       elan4_sdram_writeq (dev, rxdElan + offsetof (EP4_RXD_RAIL_ELAN, rxd_start.ev_CountAndType),
50417 +                           E4_EVENT_INIT_VALUE(0, E4_EVENT_COPY, E4_EVENT_DTYPE_LONG, EP4_RXD_START_CMD_NDWORDS));
50418 +       elan4_sdram_writeq (dev, rxdElan + offsetof (EP4_RXD_RAIL_ELAN, rxd_start.ev_CopySource),
50419 +                           rxdElanAddr + offsetof (EP4_RXD_RAIL_ELAN, rxd_sten[0]));
50420 +       elan4_sdram_writeq (dev, rxdElan + offsetof (EP4_RXD_RAIL_ELAN, rxd_start.ev_CopyDest),
50421 +                           rxdRail->rxd_ecq->ecq_addr);
50422 +
50423 +       /* initialise the "chain" events to copy the next STEN packet into the command queue */
50424 +       for (j = 0; j < EP_MAXFRAG; j++)
50425 +       {
50426 +           elan4_sdram_writeq (dev, rxdElan + offsetof (EP4_RXD_RAIL_ELAN, rxd_chain[j].ev_CountAndType),
50427 +                               E4_EVENT_INIT_VALUE(0, E4_EVENT_COPY, E4_EVENT_DTYPE_LONG, EP4_RXD_STEN_CMD_NDWORDS));
50428 +           elan4_sdram_writeq (dev, rxdElan + offsetof (EP4_RXD_RAIL_ELAN, rxd_chain[j].ev_CopySource),
50429 +                               rxdElanAddr + offsetof (EP4_RXD_RAIL_ELAN, rxd_sten[j+1]));
50430 +           elan4_sdram_writeq (dev, rxdElan + offsetof (EP4_RXD_RAIL_ELAN, rxd_chain[j].ev_CopyDest),
50431 +                               rxdRail->rxd_ecq->ecq_addr);
50432 +       }
50433 +
50434 +       /* initialise the portions of the sten packets which don't change */
50435 +       for (j = 0; j < EP_MAXFRAG+1; j++)
50436 +       {
50437 +           if (j < EP_MAXFRAG)
50438 +               elan4_sdram_writeq (dev, rxdElan + offsetof (EP4_RXD_RAIL_ELAN, rxd_sten[j].c_dma_dstEvent),
50439 +                                   rxdElan + offsetof (EP4_RXD_RAIL_ELAN, rxd_chain[j]));
50440 +           else
50441 +               elan4_sdram_writeq (dev, rxdElan + offsetof (EP4_RXD_RAIL_ELAN, rxd_sten[j].c_dma_dstEvent),
50442 +                                   rxdElan + offsetof (EP4_RXD_RAIL_ELAN, rxd_done));
50443 +
50444 +           elan4_sdram_writeq (dev, rxdElan + offsetof (EP4_RXD_RAIL_ELAN, rxd_sten[j].c_ok_guard),
50445 +                               GUARD_CMD | GUARD_CHANNEL (1) | GUARD_TEST(0, PACK_OK) | GUARD_RESET (EP4_STEN_RETRYCOUNT));
50446 +           elan4_sdram_writeq (dev, rxdElan + offsetof (EP4_RXD_RAIL_ELAN, rxd_sten[j].c_ok_write_cmd),
50447 +                               WRITE_DWORD_CMD | (rxdMainAddr + offsetof (EP4_RXD_RAIL_MAIN, rxd_sent[j])));
50448 +           elan4_sdram_writeq (dev, rxdElan + offsetof (EP4_RXD_RAIL_ELAN, rxd_sten[j].c_ok_write_value),
50449 +                               EP4_STATE_FINISHED);
50450 +           elan4_sdram_writeq (dev, rxdElan + offsetof (EP4_RXD_RAIL_ELAN, rxd_sten[j].c_fail_guard),
50451 +                               GUARD_CMD | GUARD_CHANNEL (1) | GUARD_TEST(0, RESTART_COUNT_ZERO) | GUARD_RESET (EP4_STEN_RETRYCOUNT));
50452 +           elan4_sdram_writeq (dev, rxdElan + offsetof (EP4_RXD_RAIL_ELAN, rxd_sten[j].c_fail_setevent),
50453 +                               SET_EVENT_CMD | (rxdElanAddr + offsetof (EP4_RXD_RAIL_ELAN, rxd_failed)));
50454 +           elan4_sdram_writeq (dev, rxdElan + offsetof (EP4_RXD_RAIL_ELAN, rxd_sten[j].c_nop_cmd),
50455 +                               NOP_CMD);
50456 +       }
50457 +
50458 +       /* register a main interrupt cookie */
50459 +       ep4_register_intcookie (rail, &rxdRail->rxd_intcookie, rxdElanAddr + offsetof (EP4_RXD_RAIL_ELAN, rxd_done),
50460 +                               rxd_interrupt, rxdRail);
50461 +
50462 +       /* initialise the command stream for the done event */
50463 +       elan4_sdram_writeq (dev, rxdElan + offsetof (EP4_RXD_RAIL_ELAN, rxd_done_cmd.c_write_cmd),
50464 +                           WRITE_DWORD_CMD | (rxdMainAddr + offsetof (EP4_RXD_RAIL_MAIN, rxd_done)));
50465 +       elan4_sdram_writeq (dev, rxdElan + offsetof (EP4_RXD_RAIL_ELAN, rxd_done_cmd.c_write_value),
50466 +                           EP4_STATE_FINISHED);
50467 +       elan4_sdram_writeq (dev, rxdElan + offsetof (EP4_RXD_RAIL_ELAN, rxd_done_cmd.c_intr_cmd),
50468 +                           INTERRUPT_CMD | (rxdRail->rxd_intcookie.int_val << E4_MAIN_INT_SHIFT));
50469 +
50470 +       /* initialise the command stream for the fail event */
50471 +       elan4_sdram_writeq (dev, rxdElan + offsetof (EP4_RXD_RAIL_ELAN, rxd_failed_cmd.c_write_cmd),
50472 +                           WRITE_DWORD_CMD | (rxdMainAddr + offsetof (EP4_RXD_RAIL_MAIN, rxd_failed)));
50473 +       elan4_sdram_writeq (dev, rxdElan + offsetof (EP4_RXD_RAIL_ELAN, rxd_failed_cmd.c_write_value),
50474 +                           EP4_STATE_FAILED);
50475 +       elan4_sdram_writeq (dev, rxdElan + offsetof (EP4_RXD_RAIL_ELAN, rxd_failed_cmd.c_intr_cmd),
50476 +                           INTERRUPT_CMD | (rxdRail->rxd_intcookie.int_val << E4_MAIN_INT_SHIFT));
50477 +
50478 +       /* initialise the done and fail events */
50479 +       elan4_sdram_writeq (dev, rxdElan + offsetof (EP4_RXD_RAIL_ELAN, rxd_done.ev_CountAndType),
50480 +                           E4_EVENT_INIT_VALUE(0, E4_EVENT_COPY, E4_EVENT_DTYPE_LONG, EP4_INTR_CMD_NDWORDS));
50481 +       elan4_sdram_writeq (dev, rxdElan + offsetof (EP4_RXD_RAIL_ELAN, rxd_done.ev_CopySource),
50482 +                           rxdElanAddr + offsetof (EP4_RXD_RAIL_ELAN, rxd_done_cmd));
50483 +       elan4_sdram_writeq (dev, rxdElan + offsetof (EP4_RXD_RAIL_ELAN, rxd_done.ev_CopyDest),
50484 +                           rxdRail->rxd_ecq->ecq_addr);
50485 +
50486 +       elan4_sdram_writeq (dev, rxdElan + offsetof (EP4_RXD_RAIL_ELAN, rxd_failed.ev_CountAndType),
50487 +                           E4_EVENT_INIT_VALUE(-32, E4_EVENT_COPY, E4_EVENT_DTYPE_LONG, EP4_INTR_CMD_NDWORDS));
50488 +       elan4_sdram_writeq (dev, rxdElan + offsetof (EP4_RXD_RAIL_ELAN, rxd_failed.ev_CopySource),
50489 +                           rxdElanAddr + offsetof (EP4_RXD_RAIL_ELAN, rxd_failed_cmd));
50490 +       elan4_sdram_writeq (dev, rxdElan + offsetof (EP4_RXD_RAIL_ELAN, rxd_failed.ev_CopyDest),
50491 +                           rxdRail->rxd_ecq->ecq_addr);
50492 +       
50493 +       /* initialise the pointer to the main memory portion */
50494 +       elan4_sdram_writeq (dev, rxdElan + offsetof (EP4_RXD_RAIL_ELAN, rxd_main), 
50495 +                           rxdMainAddr);
50496 +
50497 +       /* move onto next descriptor */
50498 +       rxdElan     += EP4_RXD_RAIL_ELAN_SIZE;
50499 +       rxdElanAddr += EP4_RXD_RAIL_ELAN_SIZE;
50500 +       rxdMain      = (EP4_RXD_RAIL_MAIN *) ((unsigned long) rxdMain + EP4_RXD_RAIL_MAIN_SIZE);
50501 +       rxdMainAddr += EP4_RXD_RAIL_MAIN_SIZE;
50502 +    }
50503 +
50504 +    spin_lock_irqsave (&rcvrRail->rcvr_freelock, flags);
50505 +
50506 +    list_add  (&blk->blk_link, &rcvrRail->rcvr_blocklist);
50507 +
50508 +    rcvrRail->rcvr_totalcount += EP4_NUM_RXD_PER_BLOCK;
50509 +    rcvrRail->rcvr_freecount  += EP4_NUM_RXD_PER_BLOCK;
50510 +
50511 +    for (i = 0; i < EP4_NUM_RXD_PER_BLOCK; i++)
50512 +       list_add (&blk->blk_rxds[i].rxd_generic.Link, &rcvrRail->rcvr_freelist);
50513 +
50514 +    spin_unlock_irqrestore (&rcvrRail->rcvr_freelock, flags);
50515 +
50516 +    return 1;
50517 +
50518 + failed:
50519 +    while (--i >= 0)
50520 +    {
50521 +       rxdRail--;
50522 +
50523 +       ep4_put_ecq (rail, rxdRail->rxd_ecq, EP4_RXD_STEN_CMD_NDWORDS);
50524 +       ep4_put_ecq (rail, rxdRail->rxd_scq, 1);
50525 +
50526 +       ep4_deregister_intcookie (rail, &rxdRail->rxd_intcookie);
50527 +    }
50528 +
50529 +    ep4_release_dma_retries (rail, EP4_NUM_RXD_PER_BLOCK);
50530 +    
50531 +    ep_free_main (&rail->r_generic, blk->blk_rxds[0].rxd_main_addr, EP4_RXD_RAIL_MAIN_SIZE * EP4_NUM_RXD_PER_BLOCK);
50532 +    ep_free_elan (&rail->r_generic, rxdElanAddr, EP4_RXD_RAIL_ELAN_SIZE * EP4_NUM_RXD_PER_BLOCK);
50533 +    KMEM_FREE (blk, sizeof (EP4_RXD_RAIL_BLOCK));
50534 +
50535 +    return 0;
50536 +}
50537 +
50538 +
50539 +static void
50540 +free_rxd_block (EP4_RCVR_RAIL *rcvrRail, EP4_RXD_RAIL_BLOCK *blk)
50541 +{
50542 +    EP4_RAIL     *rail = RCVR_TO_RAIL (rcvrRail);
50543 +    EP4_RXD_RAIL *rxdRail;
50544 +    unsigned long flags;
50545 +    int           i;
50546 +
50547 +    spin_lock_irqsave (&rcvrRail->rcvr_freelock, flags);
50548 +
50549 +    list_del (&blk->blk_link);
50550 +
50551 +    rcvrRail->rcvr_totalcount -= EP4_NUM_RXD_PER_BLOCK;
50552 +
50553 +    for (rxdRail = &blk->blk_rxds[0], i = 0; i < EP4_NUM_RXD_PER_BLOCK; i++, rxdRail++)
50554 +    {
50555 +       rcvrRail->rcvr_freecount--;
50556 +
50557 +       ep4_put_ecq (rail, rxdRail->rxd_ecq, EP4_RXD_STEN_CMD_NDWORDS);
50558 +       ep4_put_ecq (rail, rxdRail->rxd_scq, 1);
50559 +
50560 +       ep4_deregister_intcookie (rail, &rxdRail->rxd_intcookie);
50561 +
50562 +       list_del (&rxdRail->rxd_generic.Link);
50563 +    }
50564 +    spin_unlock_irqrestore (&rcvrRail->rcvr_freelock, flags);
50565 +
50566 +    ep4_release_dma_retries (rail, EP4_NUM_RXD_PER_BLOCK);
50567 +
50568 +    ep_free_main (&rail->r_generic, blk->blk_rxds[0].rxd_main_addr, EP4_RXD_RAIL_MAIN_SIZE * EP4_NUM_RXD_PER_BLOCK);
50569 +    ep_free_elan (&rail->r_generic, blk->blk_rxds[0].rxd_elan_addr, EP4_RXD_RAIL_ELAN_SIZE * EP4_NUM_RXD_PER_BLOCK);
50570 +
50571 +    KMEM_FREE (blk, sizeof (EP4_RXD_RAIL_BLOCK));
50572 +}
50573 +
50574 +static EP4_RXD_RAIL *
50575 +get_rxd_rail (EP4_RCVR_RAIL *rcvrRail)
50576 +{
50577 +    EP_COMMS_SUBSYS  *subsys = RCVR_TO_SUBSYS(rcvrRail);
50578 +    EP4_RXD_RAIL     *rxdRail;
50579 +    unsigned long flags;
50580 +    int low_on_rxds;
50581 +
50582 +    spin_lock_irqsave (&rcvrRail->rcvr_freelock, flags);
50583 +
50584 +    if (list_empty (&rcvrRail->rcvr_freelist))
50585 +       rxdRail = NULL;
50586 +    else
50587 +    {
50588 +       rxdRail = list_entry (rcvrRail->rcvr_freelist.next, EP4_RXD_RAIL, rxd_generic.Link);
50589 +
50590 +       EP4_RXD_ASSERT_FREE(rxdRail);
50591 +
50592 +       list_del (&rxdRail->rxd_generic.Link);
50593 +
50594 +       rcvrRail->rcvr_freecount--;
50595 +    }
50596 +    /* Wakeup the descriptor primer thread if there's not many left */
50597 +    low_on_rxds = (rcvrRail->rcvr_freecount < ep_rxd_lowat);
50598 +
50599 +    spin_unlock_irqrestore (&rcvrRail->rcvr_freelock, flags);
50600 +
50601 +    if (low_on_rxds)
50602 +       ep_kthread_schedule (&subsys->Thread, lbolt);
50603 +
50604 +    return (rxdRail);
50605 +}
50606 +
50607 +static void
50608 +free_rxd_rail (EP4_RCVR_RAIL *rcvrRail, EP4_RXD_RAIL *rxdRail)
50609 +{
50610 +    unsigned long flags;
50611 +
50612 +    EP4_RXD_ASSERT_FREE(rxdRail);
50613 +
50614 +    spin_lock_irqsave (&rcvrRail->rcvr_freelock, flags);
50615 +    
50616 +    list_add (&rxdRail->rxd_generic.Link, &rcvrRail->rcvr_freelist);
50617 +
50618 +    rcvrRail->rcvr_freecount++;
50619 +
50620 +    if (rcvrRail->rcvr_freewaiting)
50621 +    {
50622 +       rcvrRail->rcvr_freewaiting--;
50623 +       kcondvar_wakeupall (&rcvrRail->rcvr_freesleep, &rcvrRail->rcvr_freelock);
50624 +    }
50625 +
50626 +    spin_unlock_irqrestore (&rcvrRail->rcvr_freelock, flags);
50627 +}
50628 +
50629 +static void
50630 +bind_rxd_rail (EP_RXD *rxd, EP4_RXD_RAIL *rxdRail)
50631 +{
50632 +    EP4_RAIL *rail = RCVR_TO_RAIL (rxdRail->rxd_generic.RcvrRail);
50633 +
50634 +    ASSERT (SPINLOCK_HELD (&rxd->Rcvr->Lock));
50635 +
50636 +    EPRINTF3 (DBG_RCVR, "%s: bind_rxd_rail: rxd=%p rxdRail=%p\n",  rail->r_generic.Name, rxd, rxdRail);
50637 +
50638 +    elan4_sdram_writeq (rail->r_ctxt.ctxt_dev, rxdRail->rxd_elan + offsetof (EP4_RXD_RAIL_ELAN, rxd_rxd), rxd->NmdMain.nmd_addr);                      /* PCI write */
50639 +
50640 +    rxd->RxdRail             = &rxdRail->rxd_generic;
50641 +    rxdRail->rxd_generic.Rxd = rxd;
50642 +}
50643 +
50644 +static void
50645 +unbind_rxd_rail (EP_RXD *rxd, EP4_RXD_RAIL *rxdRail)
50646 +{
50647 +    EP4_RCVR_RAIL *rcvrRail = (EP4_RCVR_RAIL *) rxdRail->rxd_generic.RcvrRail;
50648 +    
50649 +    ASSERT (SPINLOCK_HELD (&rxd->Rcvr->Lock));
50650 +    ASSERT (rxd->RxdRail == &rxdRail->rxd_generic && rxdRail->rxd_generic.Rxd == rxd);
50651 +
50652 +    EP4_RXD_ASSERT_PRIVATE (rxdRail);
50653 +
50654 +    EPRINTF3 (DBG_RCVR, "%s: unbind_rxd_rail: rxd=%p rxdRail=%p\n",  RCVR_TO_RAIL(rcvrRail)->r_generic.Name, rxd, rxdRail);
50655 +
50656 +    rxd->RxdRail             = NULL;
50657 +    rxdRail->rxd_generic.Rxd = NULL;
50658 +
50659 +    if (rcvrRail->rcvr_cleanup_waiting)
50660 +       kcondvar_wakeupall (&rcvrRail->rcvr_cleanup_sleep, &rxd->Rcvr->Lock);
50661 +    rcvrRail->rcvr_cleanup_waiting = 0;
50662 +
50663 +    EP4_RXD_PRIVATE_TO_FREE (rxdRail);
50664 +}
50665 +
50666 +
50667 +static void
50668 +rcvr_stall_interrupt (EP4_RAIL *rail, void *arg)
50669 +{
50670 +    EP4_RCVR_RAIL *rcvrRail = (EP4_RCVR_RAIL *) arg;
50671 +    EP_RCVR       *rcvr     = rcvrRail->rcvr_generic.Rcvr;
50672 +    unsigned long  flags;
50673 +
50674 +    spin_lock_irqsave (&rcvr->Lock, flags);
50675 +    
50676 +    EPRINTF1 (DBG_RCVR, "rcvr_stall_interrupt: rcvrRail %p thread halted\n", rcvrRail);
50677 +
50678 +    rcvrRail->rcvr_thread_halted = 1;
50679 +
50680 +    kcondvar_wakeupall (&rcvrRail->rcvr_cleanup_sleep, &rcvr->Lock);
50681 +
50682 +    spin_unlock_irqrestore (&rcvr->Lock, flags);
50683 +}
50684 +
50685 +static void
50686 +rcvr_stall_haltop (ELAN4_DEV *dev, void *arg)
50687 +{
50688 +    EP4_RCVR_RAIL  *rcvrRail  = (EP4_RCVR_RAIL *) arg;
50689 +    EP4_COMMS_RAIL *commsRail = RCVR_TO_COMMS(rcvrRail);
50690 +    EP_RCVR        *rcvr      = rcvrRail->rcvr_generic.Rcvr;
50691 +    sdramaddr_t     qdesc     = ((EP4_COMMS_RAIL *) commsRail)->r_descs + (rcvr->Service * EP_QUEUE_DESC_SIZE);
50692 +    E4_uint64       qbptr     = elan4_sdram_readq (dev, qdesc + offsetof (E4_InputQueue, q_bptr));
50693 +
50694 +    /* Mark the queue as full by writing the fptr */
50695 +    if (qbptr == (rcvrRail->rcvr_slots_addr + EP_INPUTQ_SIZE * (rcvr->InputQueueEntries-1)))
50696 +       elan4_sdram_writeq (dev, qdesc + offsetof (E4_InputQueue, q_fptr), rcvrRail->rcvr_slots_addr);
50697 +    else
50698 +       elan4_sdram_writeq (dev, qdesc + offsetof (E4_InputQueue, q_fptr), qbptr + EP_INPUTQ_SIZE);
50699 +
50700 +    /* Notify the thread that it should stall after processing any outstanding envelopes */
50701 +    elan4_sdram_writeq (dev, rcvrRail->rcvr_elan + offsetof (EP4_RCVR_RAIL_ELAN, rcvr_stall_intcookie),
50702 +                       rcvrRail->rcvr_stall_intcookie.int_val);
50703 +
50704 +    /* Issue a swtevent to the queue event to wake the thread up */
50705 +    ep4_set_event_cmd (rcvrRail->rcvr_resched, rcvrRail->rcvr_elan_addr + offsetof (EP4_RCVR_RAIL_ELAN, rcvr_qevent));
50706 +}
50707 +
50708 +static void
50709 +rxd_interrupt (EP4_RAIL *rail, void *arg)
50710 +{
50711 +    EP4_RXD_RAIL      *rxdRail  = (EP4_RXD_RAIL *) arg;
50712 +    EP4_RCVR_RAIL     *rcvrRail = (EP4_RCVR_RAIL *) rxdRail->rxd_generic.RcvrRail;
50713 +    EP_RCVR           *rcvr     = rcvrRail->rcvr_generic.Rcvr;
50714 +    EP4_RXD_RAIL_MAIN *rxdMain  = rxdRail->rxd_main;
50715 +    unsigned long      delay    = 1;
50716 +    EP_RXD            *rxd;
50717 +    EP_ENVELOPE       *env;
50718 +    unsigned long      flags;
50719 +
50720 +    spin_lock_irqsave (&rcvr->Lock, flags);
50721 +
50722 +    for (;;)
50723 +    {
50724 +       if (rxdMain->rxd_done == EP4_STATE_FINISHED || rxdMain->rxd_failed == EP4_STATE_FAILED)
50725 +           break;
50726 +
50727 +       /* The write to rxd_done could be held up in the PCI bridge even though
50728 +        * we've seen the interrupt cookie.  Unlike elan3, there is no possibility
50729 +        * of spurious interrupts since we flush the command queues on node 
50730 +        * disconnection and the txcallback mechanism */
50731 +       mb();
50732 +
50733 +       if (delay > EP4_EVENT_FIRING_TLIMIT)
50734 +       {
50735 +           spin_unlock_irqrestore (&rcvr->Lock, flags);
50736 +
50737 +           EP_ASSFAIL (RCVR_TO_RAIL(rcvrRail), "rxd_interrupt - not finished\n");
50738 +           return;
50739 +       }
50740 +       DELAY(delay);
50741 +       delay <<= 1;
50742 +    }
50743 +
50744 +    if (rxdMain->rxd_done != EP4_STATE_FINISHED)
50745 +    {
50746 +       EPRINTF8 (DBG_RETRY, "%s: rxd_interrupt: rxdRail %p retry: done=%d failed=%d NodeId=%d XID=%08x.%08x.%016llx\n",
50747 +                 rail->r_generic.Name, rxdRail, (int)rxdMain->rxd_done, (int)rxdMain->rxd_failed, rxdRail->rxd_generic.Rxd->RxdMain->Envelope.NodeId,
50748 +                 rxdRail->rxd_generic.Rxd->RxdMain->Envelope.Xid.Generation, rxdRail->rxd_generic.Rxd->RxdMain->Envelope.Xid.Handle, 
50749 +                 rxdRail->rxd_generic.Rxd->RxdMain->Envelope.Xid.Unique);
50750 +    
50751 +       spin_lock (&rcvrRail->rcvr_retrylock);
50752 +
50753 +       rxdRail->rxd_retry_time = lbolt + EP_RETRY_LOW_PRI_TIME;                        /* XXXX backoff ? */
50754 +
50755 +       list_add_tail (&rxdRail->rxd_retry_link, &rcvrRail->rcvr_retrylist);
50756 +
50757 +       ep_kthread_schedule (&rail->r_retry_thread, rxdRail->rxd_retry_time);
50758 +       spin_unlock (&rcvrRail->rcvr_retrylock);
50759 +
50760 +       spin_unlock_irqrestore (&rcvr->Lock, flags);
50761 +       return;
50762 +    }
50763 +    
50764 +    rxd = rxdRail->rxd_generic.Rxd;
50765 +    env = &rxd->RxdMain->Envelope;
50766 +
50767 +    /*
50768 +     * Note, since the thread will have sent the remote dma packet before copying 
50769 +     * the envelope, we must check that it has completed doing this,  we do this
50770 +     * by acquiring the spinlock against the thread which it only drops once it's
50771 +     * completed.
50772 +     */
50773 +    if (rxd->RxdMain->Len == EP_RXD_PENDING)
50774 +    {
50775 +       EP4_SPINENTER (rail->r_ctxt.ctxt_dev, rcvrRail->rcvr_elan + offsetof (EP4_RCVR_RAIL_ELAN, rcvr_thread_lock),
50776 +                      &rcvrRail->rcvr_main->rcvr_thread_lock);
50777 +       
50778 +       EP4_SPINEXIT (rail->r_ctxt.ctxt_dev, rcvrRail->rcvr_elan + offsetof (EP4_RCVR_RAIL_ELAN, rcvr_thread_lock),
50779 +                     &rcvrRail->rcvr_main->rcvr_thread_lock);
50780 +       
50781 +       ASSERT (env->Version == EP_ENVELOPE_VERSION && rxd->RxdMain->Len != EP_RXD_PENDING);
50782 +    }
50783 +
50784 +    EPRINTF8 (DBG_RCVR, "%s: rxd_interrupt: rxd %p finished from %d XID %08x.%08x.%016llx len %d attr %x\n", rail->r_generic.Name, 
50785 +             rxd, rxd->RxdMain->Envelope.NodeId, rxd->RxdMain->Envelope.Xid.Generation, rxd->RxdMain->Envelope.Xid.Handle, 
50786 +             rxd->RxdMain->Envelope.Xid.Unique,  rxd->RxdMain->Len, rxd->RxdMain->Envelope.Attr);
50787 +
50788 +    rxdMain->rxd_done  = EP4_STATE_PRIVATE;
50789 +    rxd->Data.nmd_attr = EP_RAIL2RAILMASK (rail->r_generic.Number);
50790 +
50791 +    switch (rxd->State)
50792 +    {
50793 +    case EP_RXD_RECEIVE_ACTIVE:
50794 +       if (rxd->RxdMain->Len >= 0 && EP_IS_RPC(env->Attr))
50795 +           rxd->State = EP_RXD_RPC_IN_PROGRESS;
50796 +       else
50797 +       {
50798 +           rxd->State = EP_RXD_COMPLETED;
50799 +
50800 +           /* remove from active list */
50801 +           list_del (&rxd->Link);
50802 +
50803 +           unbind_rxd_rail (rxd, rxdRail);
50804 +           free_rxd_rail (rcvrRail, rxdRail);
50805 +       }
50806 +
50807 +       if (rxd->RxdMain->Len >= 0) {
50808 +           INC_STAT(rcvrRail->rcvr_generic.stats,rx);
50809 +           ADD_STAT(rcvrRail->rcvr_generic.stats,rx_len,rxd->RxdMain->Len);
50810 +           INC_STAT(rail->r_generic.Stats,rx);
50811 +           ADD_STAT(rail->r_generic.Stats,rx_len,rxd->RxdMain->Len);
50812 +       }
50813 +       spin_unlock_irqrestore (&rcvr->Lock, flags);
50814 +       ep_rxd_received (rxd);
50815 +
50816 +       break;
50817 +
50818 +    case EP_RXD_PUT_ACTIVE:
50819 +    case EP_RXD_GET_ACTIVE:
50820 +       rxd->State = EP_RXD_RPC_IN_PROGRESS;
50821 +       spin_unlock_irqrestore (&rcvr->Lock, flags);
50822 +       
50823 +       rxd->Handler (rxd);
50824 +       break;
50825 +
50826 +    case EP_RXD_COMPLETE_ACTIVE:
50827 +       rxd->State = EP_RXD_COMPLETED;
50828 +
50829 +       /* remove from active list */
50830 +       list_del (&rxd->Link);
50831 +
50832 +       unbind_rxd_rail (rxd, rxdRail);
50833 +       free_rxd_rail (rcvrRail, rxdRail);
50834 +
50835 +       spin_unlock_irqrestore (&rcvr->Lock, flags);
50836 +
50837 +       rxd->Handler(rxd);
50838 +       break;
50839 +
50840 +    default:
50841 +       spin_unlock_irqrestore (&rcvr->Lock, flags);
50842 +
50843 +       printk ("%s: rxd_interrupt: rxd %p in invalid state %d\n", rail->r_generic.Name, rxd, rxd->State);
50844 +       /* NOTREACHED */
50845 +    }
50846 +}
50847 +
50848 +static void
50849 +ep4rcvr_flush_filtering (EP_RCVR *rcvr, EP4_RCVR_RAIL *rcvrRail)
50850 +{
50851 +    EP4_COMMS_RAIL *commsRail = RCVR_TO_COMMS(rcvrRail);
50852 +    EP4_RAIL       *rail      = RCVR_TO_RAIL(rcvrRail);
50853 +    ELAN4_DEV      *dev       = rail->r_ctxt.ctxt_dev;
50854 +    sdramaddr_t    qdesc      = commsRail->r_descs + (rcvr->Service * EP_QUEUE_DESC_SIZE);
50855 +    E4_Addr        qbase      = rcvrRail->rcvr_slots_addr;
50856 +    E4_Addr        qlast      = qbase + EP_INPUTQ_SIZE * (rcvr->InputQueueEntries-1);
50857 +    E4_uint64      qfptr, qbptr;
50858 +    unsigned long  flags;
50859 +    
50860 +    spin_lock_irqsave (&rcvr->Lock, flags);
50861 +    EP4_SPINENTER (dev, rcvrRail->rcvr_elan + offsetof (EP4_RCVR_RAIL_ELAN, rcvr_thread_lock), &rcvrRail->rcvr_main->rcvr_thread_lock);
50862 +    
50863 +    /* zip down the input queue and invalidate any envelope we find to a node which is locally passivated */
50864 +    qfptr = elan4_sdram_readq (dev, qdesc + offsetof (E4_InputQueue, q_fptr));
50865 +    qbptr = elan4_sdram_readq (dev, qdesc + offsetof (E4_InputQueue, q_bptr));
50866 +
50867 +    while (qfptr != qbptr)
50868 +    {
50869 +       unsigned int nodeId = elan4_sdram_readl (dev, rcvrRail->rcvr_slots + (qfptr - qbase) + offsetof (EP_ENVELOPE, NodeId));
50870 +
50871 +       EPRINTF3 (DBG_DISCON, "%s: ep4rcvr_flush_filtering: nodeId=%d State=%d\n", rail->r_generic.Name, nodeId, rail->r_generic.Nodes[nodeId].State);
50872 +       
50873 +       if (rail->r_generic.Nodes[nodeId].State == EP_NODE_LOCAL_PASSIVATE)
50874 +           elan4_sdram_writel (dev,  rcvrRail->rcvr_slots + (qfptr - qbase) + offsetof (EP_ENVELOPE, Version), 0);
50875 +       
50876 +       if (qfptr != qlast)
50877 +           qfptr += EP_INPUTQ_SIZE;
50878 +       else
50879 +           qfptr = qbase;
50880 +    }
50881 +
50882 +    /* Insert an setevent command into the thread's command queue
50883 +     * to ensure that all sten packets have completed */
50884 +    elan4_guard (rcvrRail->rcvr_ecq->ecq_cq, GUARD_ALL_CHANNELS);
50885 +    ep4comms_flush_setevent (commsRail, rcvrRail->rcvr_ecq->ecq_cq);
50886 +    
50887 +    EP4_SPINEXIT (dev, rcvrRail->rcvr_elan + offsetof (EP4_RCVR_RAIL_ELAN, rcvr_thread_lock), &rcvrRail->rcvr_main->rcvr_thread_lock);
50888 +    spin_unlock_irqrestore (&rcvr->Lock, flags);
50889 +}
50890 +
50891 +static void
50892 +ep4rcvr_flush_flushing (EP_RCVR *rcvr, EP4_RCVR_RAIL *rcvrRail)
50893 +{
50894 +    EP4_RAIL         *rail = RCVR_TO_RAIL (rcvrRail);
50895 +    ELAN4_DEV       *dev  = rail->r_ctxt.ctxt_dev;
50896 +    struct list_head *el, *nel;
50897 +    struct list_head  rxdList;
50898 +    unsigned long     flags;
50899 +
50900 +    INIT_LIST_HEAD (&rxdList);
50901 +    
50902 +    /* remove any sten packates which are retrying to nodes which are being passivated */
50903 +    spin_lock_irqsave (&rcvrRail->rcvr_retrylock, flags);
50904 +    list_for_each_safe (el, nel, &rcvrRail->rcvr_retrylist) {
50905 +       EP4_RXD_RAIL *rxdRail  = list_entry (el, EP4_RXD_RAIL, rxd_retry_link);
50906 +       EP_ENVELOPE  *env      = &rxdRail->rxd_generic.Rxd->RxdMain->Envelope;
50907 +       EP_NODE_RAIL *nodeRail = &rail->r_generic.Nodes[env->NodeId];
50908 +
50909 +       if (nodeRail->State == EP_NODE_LOCAL_PASSIVATE)
50910 +       {
50911 +           EPRINTF2 (DBG_XMTR, "%s; ep4rcvr_flush_flushing: removing rxdRail %p from retry list\n", rail->r_generic.Name, rxdRail);
50912 +           
50913 +           list_del (&rxdRail->rxd_retry_link);
50914 +       }
50915 +    }
50916 +    spin_unlock_irqrestore (&rcvrRail->rcvr_retrylock, flags);
50917 +
50918 +    spin_lock_irqsave (&rcvr->Lock, flags);
50919 +    EP4_SPINENTER (dev, rcvrRail->rcvr_elan + offsetof (EP4_RCVR_RAIL_ELAN, rcvr_thread_lock), &rcvrRail->rcvr_main->rcvr_thread_lock);
50920 +    
50921 +    list_for_each_safe (el, nel, &rcvr->ActiveDescList) {
50922 +       EP_RXD       *rxd      = list_entry (el, EP_RXD, Link);
50923 +       EP4_RXD_RAIL *rxdRail  = (EP4_RXD_RAIL *) rxd->RxdRail;
50924 +       EP_ENVELOPE  *env      = &rxd->RxdMain->Envelope;
50925 +       EP_NODE_RAIL *nodeRail = &rail->r_generic.Nodes[env->NodeId];
50926 +
50927 +       if (rxd->RxdMain->Len == EP_RXD_PENDING || !RXD_BOUND2RAIL (rxdRail, rcvrRail) || nodeRail->State != EP_NODE_LOCAL_PASSIVATE)
50928 +           continue;
50929 +       
50930 +       EPRINTF5 (DBG_DISCON, "%s: ep4rcvr_flush_flushing: rcvr %p rxd %p state %d elan node %d\n", 
50931 +                 rail->r_generic.Name, rcvr, rxd, (int)rxdRail->rxd_main->rxd_done, env->NodeId);
50932 +       
50933 +       switch (rxd->State)
50934 +       {
50935 +       case EP_RXD_FREE:
50936 +           printk ("ep4rcvr_flush_flushing: rxd state is free but bound to a fail\n");
50937 +           break;
50938 +
50939 +       case EP_RXD_RECEIVE_ACTIVE:
50940 +           if (rxdRail->rxd_main->rxd_done == EP4_STATE_ACTIVE)                /* incomplete message receive */
50941 +           {
50942 +               EPRINTF4 (DBG_RCVR, "%s: ep4rcvr_flush_flushing: rcvr %p rxd %p nodeId %d - passive\n", 
50943 +                         rail->r_generic.Name, rcvr, rxd, env->NodeId);
50944 +               
50945 +               nodeRail->MessageState |= EP_NODE_PASSIVE_MESSAGES;
50946 +               continue;
50947 +           }
50948 +           break;
50949 +           
50950 +       default:
50951 +           EP4_ASSERT (rail, EP_IS_RPC(env->Attr));
50952 +
50953 +           if (rxdRail->rxd_main->rxd_done == EP4_STATE_ACTIVE)                /* incomplete RPC */
50954 +           {
50955 +               EPRINTF4 (DBG_RCVR, "%s: ep4rcvr_flush_flushing: rcvr %p rxd %p nodeId %d - active\n", 
50956 +                         rail->r_generic.Name, rcvr, rxd, env->NodeId);
50957 +               
50958 +               EP_INVALIDATE_XID (rxd->MsgXid);                        /* Ignore any previous NMD map responses */
50959 +               
50960 +               nodeRail->MessageState |= EP_NODE_ACTIVE_MESSAGES;
50961 +               continue;
50962 +           }
50963 +           break;
50964 +
50965 +       case EP_RXD_BEEN_ABORTED:
50966 +           printk ("ep4rcvr_flush_flushing: rxd state is aborted but bound to a fail\n");
50967 +           break;
50968 +       }
50969 +
50970 +       EPRINTF4 (DBG_RCVR, "%s: ep4rcvr_flush_flushing: rcvr %p rxd %p nodeId %d - finished\n", 
50971 +                 rail->r_generic.Name, rcvr, rxd, env->NodeId);
50972 +    }    
50973 +
50974 +    EP4_SPINEXIT (dev, rcvrRail->rcvr_elan + offsetof (EP4_RCVR_RAIL_ELAN, rcvr_thread_lock), &rcvrRail->rcvr_main->rcvr_thread_lock);
50975 +    spin_unlock_irqrestore (&rcvr->Lock, flags);
50976 +}
50977 +
50978 +void
50979 +ep4rcvr_flush_callback (EP_RCVR *rcvr, EP4_RCVR_RAIL *rcvrRail)
50980 +{
50981 +    EP4_RAIL *rail = RCVR_TO_RAIL(rcvrRail);
50982 +
50983 +    switch (rail->r_generic.CallbackStep)
50984 +    {
50985 +    case EP_CB_FLUSH_FILTERING:
50986 +       ep4rcvr_flush_filtering (rcvr, rcvrRail);
50987 +       break;
50988 +
50989 +    case EP_CB_FLUSH_FLUSHING:
50990 +       ep4rcvr_flush_flushing (rcvr, rcvrRail);
50991 +       break;
50992 +    }
50993 +}
50994 +
50995 +void
50996 +ep4rcvr_failover_callback (EP_RCVR *rcvr, EP4_RCVR_RAIL *rcvrRail)
50997 +{
50998 +    EP_COMMS_SUBSYS  *subsys = rcvr->Subsys;
50999 +    EP4_RAIL         *rail   = RCVR_TO_RAIL (rcvrRail);
51000 +    ELAN4_DEV       *dev    = rail->r_ctxt.ctxt_dev;
51001 +    struct list_head *el, *nel;
51002 +    unsigned long     flags;
51003 +#if SUPPORT_RAIL_FAILOVER
51004 +    EP_SYS           *sys    = subsys->Subsys.Sys;
51005 +#endif
51006 +    
51007 +    spin_lock_irqsave (&rcvr->Lock, flags);
51008 +    EP4_SPINENTER (dev, rcvrRail->rcvr_elan + offsetof (EP4_RCVR_RAIL_ELAN, rcvr_thread_lock), &rcvrRail->rcvr_main->rcvr_thread_lock);
51009 +    
51010 +    list_for_each_safe (el, nel, &rcvr->ActiveDescList) {
51011 +       EP_RXD             *rxd      = list_entry (el, EP_RXD, Link);
51012 +       EP4_RXD_RAIL       *rxdRail  = (EP4_RXD_RAIL *) rxd->RxdRail;
51013 +       EP_ENVELOPE        *env      = &rxd->RxdMain->Envelope;
51014 +       EP_NODE_RAIL       *nodeRail = &rail->r_generic.Nodes[env->NodeId];
51015 +#if SUPPORT_RAIL_FAILOVER
51016 +       EP_NODE            *node     = &sys->Nodes[env->NodeId];
51017 +       EP_MANAGER_MSG_BODY msgBody;
51018 +#endif
51019 +       
51020 +       if (rxd->RxdMain->Len == EP_RXD_PENDING || !RXD_BOUND2RAIL(rxdRail,rcvrRail) || nodeRail->State != EP_NODE_PASSIVATED)
51021 +           continue;
51022 +
51023 +       EPRINTF5 (DBG_FAILOVER, "%s: ep4rcvr_failover_callback: rcvr %p rxd %p elan node %d state %d\n", 
51024 +                 rail->r_generic.Name, rcvr, rxd, env->NodeId, (int)rxdRail->rxd_main->rxd_done);
51025 +
51026 +       switch (rxd->State)
51027 +       {
51028 +       case EP_RXD_FREE:
51029 +           printk ("ep4rcvr_failover_callback: rxd state is free but bound to a fail\n");
51030 +           break;
51031 +
51032 +       case EP_RXD_RECEIVE_ACTIVE:
51033 +           if (rxdRail->rxd_main->rxd_done == EP4_STATE_ACTIVE)                        /* incomplete message receive */
51034 +           {
51035 +               EPRINTF4 (DBG_FAILOVER, "%s: ep4rcvr_failover_callback: rcvr %p rxd %p nodeId %d - unbind\n", rail->r_generic.Name, rcvr, rxd, env->NodeId);
51036 +
51037 +               EP4_RXD_FORCE_PRIVATE(rxdRail);
51038 +               
51039 +               unbind_rxd_rail (rxd, rxdRail);
51040 +
51041 +               free_rxd_rail (rcvrRail, rxdRail);
51042 +           
51043 +               /* epcomms thread will requeue on different rail */
51044 +               ep_kthread_schedule (&subsys->Thread, lbolt);
51045 +               continue;
51046 +           }
51047 +           break;
51048 +
51049 +       default:
51050 +           EP4_ASSERT (rail, EP_IS_RPC(env->Attr));
51051 +
51052 +#if SUPPORT_RAIL_FAILOVER
51053 +           /* XXXX - no rail failover for now .... */
51054 +           if (rxdRail->rxd_main->rxd_done == EP4_STATE_ACTIVE && !EP_IS_NO_FAILOVER(env->Attr))       /* incomplete RPC, which can be failed over */
51055 +           {
51056 +               EPRINTF6 (DBG_FAILOVER, "%s: ep4rcvr_failover_callback: rxd %p State %d Xid %llxx MsgXid %llxx nodeId %d - failover\n", 
51057 +                         rail->r_generic.Name, rxd, rxd->State, env->Xid.Unique, rxd->MsgXid.Unique, env->NodeId);
51058 +               
51059 +               if (EP_XID_INVALID(rxd->MsgXid))
51060 +                   rxd->MsgXid = ep_xid_cache_alloc (sys, &rcvr->XidCache);
51061 +               
51062 +               /* XXXX maybe only send the message if the node failover retry is now ? */
51063 +               msgBody.Failover.Xid      = env->Xid;
51064 +               msgBody.Failover.Railmask = node->ConnectedRails;
51065 +               
51066 +               ep_send_message (&rail->r_generic, env->NodeId, EP_MANAGER_MSG_TYPE_FAILOVER_REQUEST, rxd->MsgXid, &msgBody);
51067 +               
51068 +               nodeRail->MessageState |= EP_NODE_ACTIVE_MESSAGES;
51069 +               continue;
51070 +           }
51071 +#endif
51072 +           break;
51073 +
51074 +       case EP_RXD_BEEN_ABORTED:
51075 +           printk ("ep4rcvr_failover_callback: rxd state is aborted but bound to a fail\n");
51076 +           break;
51077 +       }
51078 +       EPRINTF3 (DBG_FAILOVER, "%s: ep4rcvr_failover_callback: rxd %p nodeId %d - finished\n", rail->r_generic.Name, rxd, env->NodeId);
51079 +    }
51080 +    
51081 +    EP4_SPINEXIT (dev, rcvrRail->rcvr_elan + offsetof (EP4_RCVR_RAIL_ELAN, rcvr_thread_lock), &rcvrRail->rcvr_main->rcvr_thread_lock);
51082 +    spin_unlock_irqrestore (&rcvr->Lock, flags);
51083 +}
51084 +
51085 +void
51086 +ep4rcvr_disconnect_callback (EP_RCVR *rcvr, EP4_RCVR_RAIL *rcvrRail)
51087 +{
51088 +    EP4_RAIL         *rail = RCVR_TO_RAIL (rcvrRail);
51089 +    ELAN4_DEV        *dev = rail->r_ctxt.ctxt_dev;
51090 +    struct list_head *el, *nel;
51091 +    struct list_head  rxdList;
51092 +    unsigned long     flags;
51093 +
51094 +    INIT_LIST_HEAD (&rxdList);
51095 +    
51096 +    spin_lock_irqsave (&rcvr->Lock, flags);
51097 +    EP4_SPINENTER (dev, rcvrRail->rcvr_elan + offsetof (EP4_RCVR_RAIL_ELAN, rcvr_thread_lock), &rcvrRail->rcvr_main->rcvr_thread_lock);
51098 +    
51099 +    list_for_each_safe (el, nel, &rcvr->ActiveDescList) {
51100 +       EP_RXD       *rxd      = list_entry (el, EP_RXD, Link);
51101 +       EP4_RXD_RAIL *rxdRail  = (EP4_RXD_RAIL *) rxd->RxdRail;
51102 +       EP_ENVELOPE  *env      = &rxd->RxdMain->Envelope;
51103 +       EP_NODE_RAIL *nodeRail = &rail->r_generic.Nodes[env->NodeId];
51104 +       
51105 +       if (rxd->RxdMain->Len == EP_RXD_PENDING || !RXD_BOUND2RAIL(rxdRail,rcvrRail) || nodeRail->State != EP_NODE_DISCONNECTING)
51106 +           continue;
51107 +
51108 +       EPRINTF5 (DBG_DISCON, "%s: ep4rcvr_disconnect_callback: rcvr %p rxd %p elan node %d state %x\n", rail->r_generic.Name, rcvr, rxd, env->NodeId, rxd->State);
51109 +
51110 +       switch (rxd->State)
51111 +       {
51112 +       case EP_RXD_FREE:
51113 +           printk ("ep4rcvr_disconnect_callback: rxd state is free but bound to a rail\n");
51114 +           break;
51115 +
51116 +       case EP_RXD_RECEIVE_ACTIVE:
51117 +           if (rxdRail->rxd_main->rxd_done == EP4_STATE_ACTIVE)                /* incomplete message receive */
51118 +           {
51119 +               EPRINTF4 (DBG_RCVR, "%s: ep4rcvr_disconnect_callback: rcvr %p rxd %p nodeId %d - unbind\n", rail->r_generic.Name, rcvr, rxd, env->NodeId);
51120 +
51121 +               EP4_RXD_FORCE_PRIVATE (rxdRail);
51122 +               
51123 +               unbind_rxd_rail (rxd, rxdRail);
51124 +               free_rxd_rail (rcvrRail, rxdRail);
51125 +               
51126 +               /* remark it as pending if it was partially received */
51127 +               rxd->RxdMain->Len = EP_RXD_PENDING;
51128 +               
51129 +               /* epcomms thread will requeue on different rail */
51130 +               ep_kthread_schedule (&rcvr->Subsys->Thread, lbolt);
51131 +               continue;
51132 +           }
51133 +           break;
51134 +
51135 +       default:
51136 +           if (rxdRail->rxd_main->rxd_done == EP4_STATE_ACTIVE || rxdRail->rxd_main->rxd_done == EP4_STATE_PRIVATE)            /* incomplete RPC */
51137 +           {
51138 +               EPRINTF5 (DBG_RCVR, "%s: ep4rcvr_disconnect_callback: rcvr %p rxd %p nodeId %d state %x - not able to failover\n",
51139 +                         rail->r_generic.Name, rcvr, rxd, env->NodeId, rxd->State);
51140 +           
51141 +               EP4_RXD_FORCE_PRIVATE (rxdRail);
51142 +
51143 +               unbind_rxd_rail (rxd, rxdRail);
51144 +               free_rxd_rail (rcvrRail, rxdRail);
51145 +
51146 +               /* Ignore any previous NMD/failover responses */
51147 +               EP_INVALIDATE_XID (rxd->MsgXid);
51148 +               
51149 +               /* Remove from active list */
51150 +               list_del (&rxd->Link);
51151 +               
51152 +               if (rxd->State == EP_RXD_RPC_IN_PROGRESS)                               /* ownder by user .... */
51153 +                   rxd->State = EP_RXD_BEEN_ABORTED;
51154 +               else                                                                    /* queue for completion */
51155 +               {
51156 +                   rxd->RxdMain->Len = EP_CONN_RESET;                                  /* ensure ep_rxd_status() fails */
51157 +                   list_add_tail (&rxd->Link, &rxdList);
51158 +               }
51159 +               continue;
51160 +           }
51161 +           break;
51162 +
51163 +       case EP_RXD_BEEN_ABORTED:
51164 +           printk ("ep4rcvr_disconnect_callback: rxd state is aborted but bound to a rail\n");
51165 +           break;
51166 +       }
51167 +
51168 +       printk ("%s: ep4rcvr_disconnect_callback: rcvr %p rxd %p nodeId %d - finished\n", 
51169 +                 rail->r_generic.Name, rcvr, rxd, env->NodeId);
51170 +       EPRINTF4 (DBG_RCVR, "%s: ep4rcvr_disconnect_callback: rcvr %p rxd %p nodeId %d - finished\n", 
51171 +                 rail->r_generic.Name, rcvr, rxd, env->NodeId);
51172 +       ep4rcvr_display_rxd (&di_ep_debug, &rxdRail->rxd_generic);
51173 +    }
51174 +    
51175 +    EP4_SPINEXIT (dev, rcvrRail->rcvr_elan + offsetof (EP4_RCVR_RAIL_ELAN, rcvr_thread_lock), &rcvrRail->rcvr_main->rcvr_thread_lock);
51176 +    spin_unlock_irqrestore (&rcvr->Lock, flags);
51177 +
51178 +    while (! list_empty (&rxdList)) 
51179 +    {
51180 +       EP_RXD *rxd = list_entry (rxdList.next, EP_RXD, Link);
51181 +
51182 +       list_del (&rxd->Link);
51183 +
51184 +       rxd->Handler (rxd);
51185 +    }
51186 +}
51187 +
51188 +void
51189 +ep4rcvr_neterr_flush (EP_RCVR *rcvr, EP4_RCVR_RAIL *rcvrRail, unsigned int nodeId, EP_NETERR_COOKIE *cookies)
51190 +{
51191 +    EP4_COMMS_RAIL *commsRail = RCVR_TO_COMMS(rcvrRail);
51192 +    EP4_RAIL       *rail      = RCVR_TO_RAIL (rcvrRail);
51193 +    ELAN4_DEV      *dev       = rail->r_ctxt.ctxt_dev;
51194 +    unsigned long   flags;
51195 +
51196 +    spin_lock_irqsave (&rcvr->Lock, flags);
51197 +    EP4_SPINENTER (dev, rcvrRail->rcvr_elan + offsetof (EP4_RCVR_RAIL_ELAN, rcvr_thread_lock), &rcvrRail->rcvr_main->rcvr_thread_lock);
51198 +
51199 +    /* Insert an setevent command into the thread's command queue
51200 +     * to ensure that all sten packets have completed */
51201 +    elan4_guard (rcvrRail->rcvr_ecq->ecq_cq, GUARD_ALL_CHANNELS);
51202 +    ep4comms_flush_setevent (commsRail, rcvrRail->rcvr_ecq->ecq_cq);
51203 +    
51204 +    EP4_SPINEXIT (dev, rcvrRail->rcvr_elan + offsetof (EP4_RCVR_RAIL_ELAN, rcvr_thread_lock), &rcvrRail->rcvr_main->rcvr_thread_lock);
51205 +    spin_unlock_irqrestore (&rcvr->Lock, flags);
51206 +}
51207 +
51208 +void
51209 +ep4rcvr_neterr_check (EP_RCVR *rcvr, EP4_RCVR_RAIL *rcvrRail, unsigned int nodeId, EP_NETERR_COOKIE *cookies)
51210 +{
51211 +    EP4_RAIL         *rail = RCVR_TO_RAIL (rcvrRail);
51212 +    ELAN4_DEV        *dev = rail->r_ctxt.ctxt_dev;
51213 +    struct list_head *el;
51214 +    unsigned long     flags;
51215 +
51216 +    spin_lock_irqsave (&rcvr->Lock, flags);
51217 +    EP4_SPINENTER (dev, rcvrRail->rcvr_elan + offsetof (EP4_RCVR_RAIL_ELAN, rcvr_thread_lock), &rcvrRail->rcvr_main->rcvr_thread_lock);
51218 +    
51219 +    list_for_each (el, &rcvr->ActiveDescList) {
51220 +       EP_RXD       *rxd      = list_entry (el, EP_RXD, Link);
51221 +       EP4_RXD_RAIL *rxdRail  = (EP4_RXD_RAIL *) rxd->RxdRail;
51222 +       EP_ENVELOPE  *env      = &rxd->RxdMain->Envelope;
51223 +
51224 +       if (rxd->RxdMain->Len == EP_RXD_PENDING || !RXD_BOUND2RAIL(rxdRail,rcvrRail) || env->NodeId != nodeId)
51225 +           continue;
51226 +
51227 +       if (rxd->State == EP_RXD_RECEIVE_ACTIVE || rxd->State == EP_RXD_GET_ACTIVE)
51228 +       {
51229 +           EP_NETERR_COOKIE cookie;
51230 +           unsigned int     first, this;
51231 +
51232 +           if (rxd->State == EP_RXD_RECEIVE_ACTIVE)
51233 +               first = (EP_MAXFRAG+1) - (( EP_IS_MULTICAST(env->Attr) ? 1 : 0) + (env->nFrags == 0 ? 1 : env->nFrags));
51234 +           else
51235 +               first = (EP_MAXFRAG+1) - rxd->nFrags;
51236 +
51237 +           for (this = first; this < (EP_MAXFRAG+1); this++)
51238 +               if (rxdRail->rxd_main->rxd_sent[this] == EP4_STATE_ACTIVE)
51239 +                   break;
51240 +           
51241 +           if (this > first)
51242 +           {
51243 +               /* Look at the last completed STEN packet and if it's neterr cookie matches, then change
51244 +                * the rxd to look the same as if the sten packet had failed and then schedule it for retry */
51245 +               cookie = elan4_sdram_readq (dev, rxdRail->rxd_elan + offsetof (EP4_RXD_RAIL_ELAN, rxd_sten[--this].c_cookie));
51246 +               
51247 +               if (cookie == cookies[0] || cookie == cookies[1])
51248 +               {
51249 +                   EPRINTF5 (DBG_NETWORK_ERROR, "%s: ep4rcvr_neterr_check: cookie <%lld%s%s%s%s> matches rxd %p rxdRail %p this %d\n",
51250 +                             rail->r_generic.Name, EP4_COOKIE_STRING(cookie), rxd, rxdRail, this);
51251 +                   
51252 +                   printk ("%s: ep4rcvr_neterr_check: cookie <%lld%s%s%s%s> matches rxd %p rxdRail %p this %d : time %ld\n",
51253 +                           rail->r_generic.Name, EP4_COOKIE_STRING(cookie), rxd, rxdRail, this, rxdRail->rxd_retry_time);
51254 +                   
51255 +                   rxdRail->rxd_main->rxd_sent[this] = EP4_STATE_ACTIVE;
51256 +                   rxdRail->rxd_main->rxd_failed     = EP4_STATE_FAILED;
51257 +                   
51258 +                   spin_lock (&rcvrRail->rcvr_retrylock);
51259 +                   
51260 +                   ASSERT (rxdRail->rxd_retry_time == 0);
51261 +
51262 +                   rxdRail->rxd_retry_time = lbolt + EP_RETRY_LOW_PRI_TIME;
51263 +                       
51264 +                   list_add_tail (&rxdRail->rxd_retry_link, &rcvrRail->rcvr_retrylist);
51265 +                       
51266 +                   ep_kthread_schedule (&rail->r_retry_thread, rxdRail->rxd_retry_time);
51267 +                   
51268 +                   spin_unlock (&rcvrRail->rcvr_retrylock);
51269 +               }
51270 +           }
51271 +       }
51272 +    }
51273 +    EP4_SPINEXIT (dev, rcvrRail->rcvr_elan + offsetof (EP4_RCVR_RAIL_ELAN, rcvr_thread_lock), &rcvrRail->rcvr_main->rcvr_thread_lock);
51274 +    spin_unlock_irqrestore (&rcvr->Lock, flags);
51275 +}
51276 +
51277 +int
51278 +ep4rcvr_queue_rxd (EP_RXD *rxd, EP_RCVR_RAIL *r)
51279 +{
51280 +    EP4_RCVR_RAIL *rcvrRail = (EP4_RCVR_RAIL *) r;
51281 +    EP4_RAIL      *rail     = RCVR_TO_RAIL (rcvrRail);
51282 +    ELAN4_DEV     *dev      = rail->r_ctxt.ctxt_dev;
51283 +    EP4_RXD_RAIL  *rxdRail;
51284 +    register int   i;
51285 +
51286 +    ASSERT (SPINLOCK_HELD(&rxd->Rcvr->Lock));
51287 +
51288 +    if ((rxdRail = get_rxd_rail (rcvrRail)) == NULL)
51289 +       return 0;
51290 +    
51291 +    /* Flush the Elan TLB if mappings have changed */
51292 +    ep_perrail_dvma_sync (&rail->r_generic);
51293 +
51294 +    EPRINTF6 (DBG_RCVR, "%s: ep4rcvr_queue_rxd: rcvr %p rxd %p rxdRail %p buffer %x len %x\n", 
51295 +             rail->r_generic.Name, rxd->Rcvr, rxd, rxdRail, rxd->Data.nmd_addr, rxd->Data.nmd_len);
51296 +
51297 +    /* bind the rxdRail and rxd together */
51298 +    bind_rxd_rail (rxd, rxdRail);
51299 +
51300 +    elan4_sdram_writel (dev, rxdRail->rxd_elan + offsetof (EP4_RXD_RAIL_ELAN, rxd_buffer.nmd_addr), rxd->Data.nmd_addr);       /* PCI write */
51301 +    elan4_sdram_writel (dev, rxdRail->rxd_elan + offsetof (EP4_RXD_RAIL_ELAN, rxd_buffer.nmd_len),  rxd->Data.nmd_len);                /* PCI write */
51302 +    elan4_sdram_writel (dev, rxdRail->rxd_elan + offsetof (EP4_RXD_RAIL_ELAN, rxd_buffer.nmd_attr), rxd->Data.nmd_attr);       /* PCI write */
51303 +
51304 +    /* Mark as active */
51305 +    elan4_sdram_writeq (dev, rxdRail->rxd_elan + offsetof (EP4_RXD_RAIL_ELAN, rxd_done.ev_CountAndType), 
51306 +                       E4_EVENT_INIT_VALUE(-32, E4_EVENT_COPY, E4_EVENT_DTYPE_LONG, EP4_INTR_CMD_NDWORDS));
51307 +    
51308 +    for (i = 0; i <= EP_MAXFRAG; i++)
51309 +       rxdRail->rxd_main->rxd_sent[i] = EP4_STATE_ACTIVE;
51310 +
51311 +    rxdRail->rxd_main->rxd_failed = EP4_STATE_ACTIVE;
51312 +    rxdRail->rxd_main->rxd_done = EP4_STATE_ACTIVE;
51313 +
51314 +    elan4_sdram_writeq (dev, rxdRail->rxd_elan + offsetof (EP4_RXD_RAIL_ELAN, rxd_sten[0]) + 0x00, /* %r0 */
51315 +                       ep_symbol (&rail->r_threadcode, "c_queue_rxd"));
51316 +    elan4_sdram_writeq (dev, rxdRail->rxd_elan + offsetof (EP4_RXD_RAIL_ELAN, rxd_sten[0]) + 0x10, /* %r2 */
51317 +                       rcvrRail->rcvr_elan_addr);
51318 +    elan4_sdram_writeq (dev, rxdRail->rxd_elan + offsetof (EP4_RXD_RAIL_ELAN, rxd_sten[0]) + 0x18, /* %r3 */
51319 +                       rxdRail->rxd_elan_addr);
51320 +
51321 +    elan4_sdram_writeq (dev, rxdRail->rxd_elan + offsetof (EP4_RXD_RAIL_ELAN, rxd_start.ev_CountAndType),
51322 +                       E4_EVENT_INIT_VALUE(-32, E4_EVENT_COPY, E4_EVENT_DTYPE_LONG, EP4_RXD_START_CMD_NDWORDS));
51323 +
51324 +    ep4_set_event_cmd (rxdRail->rxd_scq, rxdRail->rxd_elan_addr + offsetof (EP4_RXD_RAIL_ELAN, rxd_start));
51325 +
51326 +    return 1;
51327 +}
51328 +
51329 +void
51330 +ep4rcvr_rpc_put (EP_RXD *rxd, EP_NMD *local, EP_NMD *remote, unsigned nFrags)
51331 +{
51332 +    EP4_RXD_RAIL    *rxdRail  = (EP4_RXD_RAIL *) rxd->RxdRail;
51333 +    EP4_RCVR_RAIL   *rcvrRail = (EP4_RCVR_RAIL *) rxdRail->rxd_generic.RcvrRail;
51334 +    EP4_RAIL        *rail     = RCVR_TO_RAIL (rcvrRail);
51335 +    ELAN4_DEV      *dev      = RCVR_TO_DEV (rcvrRail);
51336 +    sdramaddr_t     rxdElan   = rxdRail->rxd_elan;
51337 +    EP_ENVELOPE    *env       = &rxd->RxdMain->Envelope;
51338 +    unsigned long   first     = (EP_MAXFRAG+1) - nFrags;
51339 +    EP4_RXD_DMA_CMD cmd;
51340 +    register int    i, len;
51341 +
51342 +    EP4_ASSERT (rail, rxd->State == EP_RXD_PUT_ACTIVE);
51343 +    EP4_ASSERT (rail, rxdRail->rxd_main->rxd_done == EP4_STATE_PRIVATE);
51344 +    EP4_SDRAM_ASSERT (rail, rxdRail->rxd_elan + offsetof (EP4_RXD_RAIL_ELAN, rxd_done.ev_CountAndType),
51345 +                     E4_EVENT_INIT_VALUE (0, E4_EVENT_COPY, E4_EVENT_DTYPE_LONG, EP4_INTR_CMD_NDWORDS));
51346 +
51347 +    /* Flush the Elan TLB if mappings have changed */
51348 +    ep_perrail_dvma_sync (&rail->r_generic);
51349 +
51350 +    /* Generate the DMA chain to put the data */
51351 +    for (i = 0, len = 0; i < nFrags; i++, len += local->nmd_len, local++, remote++)
51352 +    {
51353 +       cmd.c_dma_typeSize     = RUN_DMA_CMD | E4_DMA_TYPE_SIZE(local->nmd_len, DMA_DataTypeByte, 0, EP4_DMA_RETRYCOUNT);
51354 +       cmd.c_dma_cookie       = ep4_neterr_cookie (rail, env->NodeId) | EP4_COOKIE_DMA;
51355 +       cmd.c_dma_vproc        = EP_VP_DATA(env->NodeId);
51356 +       cmd.c_dma_srcAddr      = local->nmd_addr;
51357 +       cmd.c_dma_dstAddr      = remote->nmd_addr;
51358 +       if (i == (nFrags-1))
51359 +           cmd.c_dma_srcEvent = rxdRail->rxd_elan_addr + offsetof (EP4_RXD_RAIL_ELAN, rxd_done);
51360 +       else
51361 +           cmd.c_dma_srcEvent = rxdRail->rxd_elan_addr + offsetof (EP4_RXD_RAIL_ELAN, rxd_chain[first + i]);
51362 +       cmd.c_dma_dstEvent     = 0;
51363 +       cmd.c_nop_cmd          = NOP_CMD;
51364 +
51365 +       EPRINTF7 (DBG_RCVR, "%s: ep4rcvr_rpc_put: rxd %p [XID=%llx] idx=%d Source=%08x Dest=%08x Len=%x\n", 
51366 +                 rail->r_generic.Name, rxd, env->Xid.Unique, i, local->nmd_addr, remote->nmd_addr, local->nmd_len);
51367 +       
51368 +       elan4_sdram_copyq_to_sdram (dev, &cmd, rxdElan + offsetof (EP4_RXD_RAIL_ELAN, rxd_sten[first + i]), sizeof (EP4_RXD_DMA_CMD));
51369 +    }
51370 +
51371 +    /* Initialise the event chain */
51372 +    for (i = 0; i < nFrags-1; i++)
51373 +       elan4_sdram_writeq (dev, rxdElan + offsetof (EP4_RXD_RAIL_ELAN, rxd_chain[first + i]), 
51374 +                           E4_EVENT_INIT_VALUE(-32, E4_EVENT_COPY, E4_EVENT_DTYPE_LONG, EP4_RXD_DMA_CMD_NDWORDS));
51375 +
51376 +    elan4_sdram_writeq (dev, rxdElan + offsetof (EP4_RXD_RAIL_ELAN, rxd_done),
51377 +                       E4_EVENT_INIT_VALUE(-32, E4_EVENT_COPY, E4_EVENT_DTYPE_LONG, EP4_INTR_CMD_NDWORDS));
51378 +
51379 +    for (i = 0; i <= EP_MAXFRAG; i++)
51380 +       rxdRail->rxd_main->rxd_sent[i] = EP4_STATE_ACTIVE;
51381 +
51382 +    rxdRail->rxd_main->rxd_failed = EP4_STATE_ACTIVE;
51383 +    rxdRail->rxd_main->rxd_done = EP4_STATE_ACTIVE;
51384 +
51385 +    /* Initialise the previous event to start the whole chain off */
51386 +    elan4_sdram_writeq (dev, rxdElan + offsetof (EP4_RXD_RAIL_ELAN, rxd_chain[first - 1]),
51387 +                       E4_EVENT_INIT_VALUE(-32, E4_EVENT_COPY, E4_EVENT_DTYPE_LONG, EP4_RXD_DMA_CMD_NDWORDS));
51388 +
51389 +    ASSERT (rail->r_generic.Nodes[env->NodeId].State >= EP_NODE_CONNECTED && rail->r_generic.Nodes[env->NodeId].State <= EP_NODE_LOCAL_PASSIVATE);
51390 +
51391 +    /* finally issue the setevent to start the whole chain */
51392 +    ep4_set_event_cmd (rxdRail->rxd_scq, rxdRail->rxd_elan_addr + offsetof (EP4_RXD_RAIL_ELAN, rxd_chain[first - 1]));
51393 +
51394 +    BucketStat (rxd->Rcvr->Subsys, RPCPut, len);
51395 +}    
51396 +
51397 +void
51398 +ep4rcvr_rpc_get (EP_RXD *rxd, EP_NMD *local, EP_NMD *remote, unsigned nFrags)
51399 +{
51400 +    EP4_RXD_RAIL    *rxdRail  = (EP4_RXD_RAIL *) rxd->RxdRail;
51401 +    EP4_RCVR_RAIL   *rcvrRail = (EP4_RCVR_RAIL *) rxdRail->rxd_generic.RcvrRail;
51402 +    EP4_RAIL        *rail     = RCVR_TO_RAIL (rcvrRail);
51403 +    ELAN4_DEV      *dev      = RCVR_TO_DEV (rcvrRail);
51404 +    sdramaddr_t      rxdElan  = rxdRail->rxd_elan;
51405 +    EP_ENVELOPE     *env      = &rxd->RxdMain->Envelope;
51406 +    unsigned long    first    = (EP_MAXFRAG+1) - nFrags;
51407 +    register int    i, len;
51408 +
51409 +    EP4_ASSERT (rail, rxd->State == EP_RXD_GET_ACTIVE);
51410 +    EP4_ASSERT (rail, rxdRail->rxd_main->rxd_done == EP4_STATE_PRIVATE);
51411 +    EP4_SDRAM_ASSERT (rail, rxdRail->rxd_elan + offsetof (EP4_RXD_RAIL_ELAN, rxd_done.ev_CountAndType),
51412 +                     E4_EVENT_INIT_VALUE (0, E4_EVENT_COPY, E4_EVENT_DTYPE_LONG, EP4_INTR_CMD_NDWORDS));
51413 +
51414 +    /* Flush the Elan TLB if mappings have changed */
51415 +    ep_perrail_dvma_sync (&rail->r_generic);
51416 +
51417 +    /* Generate the DMA chain to put the data */
51418 +    for (i = 0, len = 0; i < nFrags; i++, len += local->nmd_len, local++, remote++)
51419 +    {
51420 +       EPRINTF7 (DBG_RCVR, "%s: ep4rcvr_rpc_get rxd %p [XID=%llx] idx=%d Source=%08x Dest=%08x Len=%x\n", 
51421 +                 rail->r_generic.Name, rxd, env->Xid.Unique, i, remote->nmd_addr, local->nmd_addr, remote->nmd_len);
51422 +       
51423 +       elan4_sdram_writeq (dev, rxdElan + offsetof (EP4_RXD_RAIL_ELAN, rxd_sten[first + i].c_open),
51424 +                           OPEN_STEN_PKT_CMD | OPEN_PACKET(0, PACK_OK | RESTART_COUNT_ZERO, EP_VP_DATA(env->NodeId)));
51425 +       elan4_sdram_writeq (dev, rxdElan + offsetof (EP4_RXD_RAIL_ELAN, rxd_sten[first + i].c_trans),
51426 +                           SEND_TRANS_CMD | ((TR_REMOTEDMA | TR_WAIT_FOR_EOP) << 16));
51427 +       elan4_sdram_writeq (dev, rxdElan + offsetof (EP4_RXD_RAIL_ELAN, rxd_sten[first + i].c_cookie),
51428 +                           ep4_neterr_cookie (rail, env->NodeId) | EP4_COOKIE_STEN);
51429 +       elan4_sdram_writeq (dev, rxdElan + offsetof (EP4_RXD_RAIL_ELAN, rxd_sten[first + i].c_dma_typeSize),
51430 +                           E4_DMA_TYPE_SIZE (local->nmd_len, DMA_DataTypeByte, 0, EP4_DMA_RETRYCOUNT));
51431 +       elan4_sdram_writeq (dev, rxdElan + offsetof (EP4_RXD_RAIL_ELAN, rxd_sten[first + i].c_dma_cookie),
51432 +                           ep4_neterr_cookie (rail, env->NodeId) | EP4_COOKIE_DMA);
51433 +       elan4_sdram_writeq (dev, rxdElan + offsetof (EP4_RXD_RAIL_ELAN, rxd_sten[first + i].c_dma_vproc),
51434 +                           EP_VP_DATA (rail->r_generic.Position.pos_nodeid));
51435 +       elan4_sdram_writeq (dev, rxdElan + offsetof (EP4_RXD_RAIL_ELAN, rxd_sten[first + i].c_dma_srcAddr),
51436 +                           remote->nmd_addr);
51437 +       elan4_sdram_writeq (dev, rxdElan + offsetof (EP4_RXD_RAIL_ELAN, rxd_sten[first + i].c_dma_dstAddr),
51438 +                           local->nmd_addr);
51439 +       elan4_sdram_writeq (dev, rxdElan + offsetof (EP4_RXD_RAIL_ELAN, rxd_sten[first + i].c_dma_srcEvent),
51440 +                           0);
51441 +       elan4_sdram_writeq (dev, rxdElan + offsetof (EP4_RXD_RAIL_ELAN, rxd_sten[first + i].c_dma_dstEvent),
51442 +                           i == (nFrags-1) ? rxdRail->rxd_elan_addr + offsetof (EP4_RXD_RAIL_ELAN, rxd_done) : 
51443 +                                             rxdRail->rxd_elan_addr + offsetof (EP4_RXD_RAIL_ELAN, rxd_chain[first + i]));
51444 +    }
51445 +
51446 +    /* Initialise the event chain */
51447 +    for (i = 0; i < nFrags-1; i++)
51448 +       elan4_sdram_writeq (dev, rxdElan + offsetof (EP4_RXD_RAIL_ELAN, rxd_chain[first + i]), 
51449 +                           E4_EVENT_INIT_VALUE(-32, E4_EVENT_COPY, E4_EVENT_DTYPE_LONG, EP4_RXD_STEN_CMD_NDWORDS));
51450 +
51451 +    elan4_sdram_writeq (dev, rxdElan + offsetof (EP4_RXD_RAIL_ELAN, rxd_done),
51452 +                       E4_EVENT_INIT_VALUE(-32, E4_EVENT_COPY, E4_EVENT_DTYPE_LONG, EP4_INTR_CMD_NDWORDS));
51453 +
51454 +    for (i = 0; i <= EP_MAXFRAG; i++)
51455 +       rxdRail->rxd_main->rxd_sent[i] = EP4_STATE_ACTIVE;
51456 +
51457 +    rxdRail->rxd_main->rxd_failed = EP4_STATE_ACTIVE;
51458 +    rxdRail->rxd_main->rxd_done = EP4_STATE_ACTIVE;
51459 +
51460 +    /* Initialise the previous event to start the whole chain off */
51461 +    elan4_sdram_writeq (dev, rxdElan + offsetof (EP4_RXD_RAIL_ELAN, rxd_chain[first - 1]),
51462 +                       E4_EVENT_INIT_VALUE(-32, E4_EVENT_COPY, E4_EVENT_DTYPE_LONG, EP4_RXD_STEN_CMD_NDWORDS));
51463 +
51464 +    ASSERT (rail->r_generic.Nodes[env->NodeId].State >= EP_NODE_CONNECTED && rail->r_generic.Nodes[env->NodeId].State <= EP_NODE_LOCAL_PASSIVATE);
51465 +
51466 +    /* finally issue the setevent to start the whole chain */
51467 +    ep4_set_event_cmd (rxdRail->rxd_scq, rxdRail->rxd_elan_addr + offsetof (EP4_RXD_RAIL_ELAN, rxd_chain[first - 1]));
51468 +
51469 +    BucketStat (rxd->Rcvr->Subsys, RPCPut, len);
51470 +}
51471 +
51472 +void
51473 +ep4rcvr_rpc_complete (EP_RXD *rxd, EP_NMD *local, EP_NMD *remote, unsigned nFrags)
51474 +{
51475 +    EP4_RXD_RAIL    *rxdRail  = (EP4_RXD_RAIL *) rxd->RxdRail;
51476 +    EP4_RCVR_RAIL   *rcvrRail = (EP4_RCVR_RAIL *) rxdRail->rxd_generic.RcvrRail;
51477 +    EP4_RAIL        *rail     = RCVR_TO_RAIL (rcvrRail);
51478 +    ELAN4_DEV      *dev      = RCVR_TO_DEV (rcvrRail);
51479 +    sdramaddr_t     rxdElan   = rxdRail->rxd_elan;
51480 +    EP_ENVELOPE    *env       = &rxd->RxdMain->Envelope;
51481 +    unsigned long   first     = (EP_MAXFRAG+1) - nFrags - 1;
51482 +    EP4_RXD_DMA_CMD cmd;
51483 +    register int    i, len;
51484 +
51485 +    EP4_ASSERT (rail, rxd->State == EP_RXD_COMPLETE_ACTIVE);
51486 +    EP4_ASSERT (rail, rxdRail->rxd_main->rxd_done == EP4_STATE_PRIVATE);
51487 +    EP4_SDRAM_ASSERT (rail, rxdRail->rxd_elan + offsetof (EP4_RXD_RAIL_ELAN, rxd_done.ev_CountAndType),
51488 +                     E4_EVENT_INIT_VALUE (0, E4_EVENT_COPY, E4_EVENT_DTYPE_LONG, EP4_INTR_CMD_NDWORDS));
51489 +
51490 +    /* Flush the Elan TLB if mappings have changed */
51491 +    ep_perrail_dvma_sync (&rail->r_generic);
51492 +
51493 +    /* Generate the DMA chain to put the data */
51494 +    for (i = 0, len = 0; i < nFrags; i++, len += local->nmd_len, local++, remote++)
51495 +    {
51496 +       cmd.c_dma_typeSize = RUN_DMA_CMD | E4_DMA_TYPE_SIZE(local->nmd_len, DMA_DataTypeByte, 0, EP4_DMA_RETRYCOUNT);
51497 +       cmd.c_dma_cookie   = ep4_neterr_cookie (rail, env->NodeId) | EP4_COOKIE_DMA;
51498 +       cmd.c_dma_vproc    = EP_VP_DATA(env->NodeId);
51499 +       cmd.c_dma_srcAddr  = local->nmd_addr;
51500 +       cmd.c_dma_dstAddr  = remote->nmd_addr;
51501 +       cmd.c_dma_srcEvent = rxdRail->rxd_elan_addr + offsetof (EP4_RXD_RAIL_ELAN, rxd_chain[first + i]);
51502 +       cmd.c_dma_dstEvent = 0;
51503 +       cmd.c_nop_cmd      = NOP_CMD;
51504 +
51505 +       EPRINTF7 (DBG_RCVR, "%s: ep4rcvr_rpc_complete: rxd %p [XID=%llx] idx=%d Source=%08x Dest=%08x Len=%x\n", 
51506 +                 rail->r_generic.Name, rxd, env->Xid.Unique, i, local->nmd_addr, remote->nmd_addr, local->nmd_len);
51507 +
51508 +       elan4_sdram_copyq_to_sdram (dev, &cmd, rxdElan + offsetof (EP4_RXD_RAIL_ELAN, rxd_sten[first + i]), sizeof (EP4_RXD_DMA_CMD));
51509 +    }
51510 +    
51511 +    /* Initialise the status block dma */
51512 +    cmd.c_dma_typeSize = RUN_DMA_CMD | E4_DMA_TYPE_SIZE(EP_STATUSBLK_SIZE, DMA_DataTypeByte, 0, EP4_DMA_RETRYCOUNT);
51513 +    cmd.c_dma_cookie   = ep4_neterr_cookie (rail, env->NodeId) | EP4_COOKIE_DMA;
51514 +    cmd.c_dma_vproc    = EP_VP_DATA(env->NodeId);
51515 +    cmd.c_dma_srcAddr  = rxd->NmdMain.nmd_addr + offsetof (EP_RXD_MAIN, StatusBlk);
51516 +    cmd.c_dma_dstAddr  = env->TxdMain.nmd_addr + offsetof (EP_TXD_MAIN, StatusBlk);
51517 +    cmd.c_dma_srcEvent = rxdRail->rxd_elan_addr + offsetof (EP4_RXD_RAIL_ELAN, rxd_done);
51518 +    cmd.c_dma_dstEvent = env->TxdRail + offsetof (EP4_TXD_RAIL_ELAN, txd_done);;
51519 +    cmd.c_nop_cmd      = NOP_CMD;
51520 +
51521 +    EPRINTF6 (DBG_RCVR, "%s: ep4rcvr_rpc_complete: rxd %p [XID=%llx] statusblk source=%08x dest=%08x len=%x\n", 
51522 +             rail->r_generic.Name, rxd, env->Xid.Unique, (int) cmd.c_dma_srcAddr, (int) cmd.c_dma_dstAddr, EP_STATUSBLK_SIZE);
51523 +
51524 +    elan4_sdram_copyq_to_sdram (dev, &cmd, rxdElan + offsetof (EP4_RXD_RAIL_ELAN, rxd_sten[EP_MAXFRAG]), sizeof (EP4_RXD_DMA_CMD));
51525 +
51526 +    /* Initialise the event chain */
51527 +    for (i = 0; i < nFrags; i++)
51528 +       elan4_sdram_writeq (dev, rxdElan + offsetof (EP4_RXD_RAIL_ELAN, rxd_chain[first + i]), 
51529 +                           E4_EVENT_INIT_VALUE(-32, E4_EVENT_COPY, E4_EVENT_DTYPE_LONG, EP4_RXD_DMA_CMD_NDWORDS));
51530 +
51531 +    elan4_sdram_writeq (dev, rxdElan + offsetof (EP4_RXD_RAIL_ELAN, rxd_done),
51532 +                       E4_EVENT_INIT_VALUE(-32, E4_EVENT_COPY, E4_EVENT_DTYPE_LONG, EP4_INTR_CMD_NDWORDS));
51533 +
51534 +    for (i = 0; i <= EP_MAXFRAG; i++)
51535 +       rxdRail->rxd_main->rxd_sent[i] = EP4_STATE_ACTIVE;
51536 +
51537 +    rxdRail->rxd_main->rxd_failed = EP4_STATE_ACTIVE;
51538 +    rxdRail->rxd_main->rxd_done = EP4_STATE_ACTIVE;
51539 +
51540 +    /* Initialise the previous event to start the whole chain off */
51541 +    elan4_sdram_writeq (dev, rxdElan + offsetof (EP4_RXD_RAIL_ELAN, rxd_chain[first - 1]),
51542 +                       E4_EVENT_INIT_VALUE(-32, E4_EVENT_COPY, E4_EVENT_DTYPE_LONG, EP4_RXD_DMA_CMD_NDWORDS));
51543 +
51544 +    ASSERT (rail->r_generic.Nodes[env->NodeId].State >= EP_NODE_CONNECTED && rail->r_generic.Nodes[env->NodeId].State <= EP_NODE_LOCAL_PASSIVATE);
51545 +
51546 +    /* finally issue the setevent to start the whole chain */
51547 +    ep4_set_event_cmd (rxdRail->rxd_scq, rxdRail->rxd_elan_addr + offsetof (EP4_RXD_RAIL_ELAN, rxd_chain[first - 1]));
51548 +
51549 +    BucketStat (rxd->Rcvr->Subsys, CompleteRPC, len);
51550 +}
51551 +
51552 +EP_RXD *
51553 +ep4rcvr_steal_rxd (EP_RCVR_RAIL *r)
51554 +{
51555 +    /* XXXX - TBD */
51556 +    return NULL;
51557 +}
51558 +
51559 +long
51560 +ep4rcvr_check (EP_RCVR_RAIL *r, long nextRunTime)
51561 +{
51562 +    EP4_RCVR_RAIL *rcvrRail = (EP4_RCVR_RAIL *) r;
51563 +    EP4_RAIL      *rail     = RCVR_TO_RAIL (rcvrRail);
51564 +
51565 +    if (rcvrRail->rcvr_freecount < ep_rxd_lowat && !alloc_rxd_block (rcvrRail))
51566 +    {
51567 +       EPRINTF1 (DBG_RCVR,"%s: failed to grow rxd rail pool\n", rail->r_generic.Name);
51568 +               
51569 +       if (nextRunTime == 0 || AFTER (nextRunTime, lbolt + RESOURCE_RETRY_TIME))
51570 +           nextRunTime = lbolt + RESOURCE_RETRY_TIME;
51571 +    }
51572 +    
51573 +    return nextRunTime;
51574 +}
51575 +
51576 +unsigned long
51577 +ep4rcvr_retry (EP4_RAIL *rail, void *arg, unsigned long nextRunTime)
51578 +{
51579 +    EP4_RCVR_RAIL *rcvrRail = (EP4_RCVR_RAIL *) arg;
51580 +    ELAN4_DEV     *dev      = RCVR_TO_DEV(rcvrRail);
51581 +    unsigned long  flags;
51582 +
51583 +    spin_lock_irqsave (&rcvrRail->rcvr_retrylock, flags);
51584 +    while (! list_empty (&rcvrRail->rcvr_retrylist))
51585 +    {
51586 +       EP4_RXD_RAIL *rxdRail = list_entry (rcvrRail->rcvr_retrylist.next, EP4_RXD_RAIL, rxd_retry_link);
51587 +       EP_ENVELOPE  *env     = &rxdRail->rxd_generic.Rxd->RxdMain->Envelope;
51588 +       unsigned int  first   = (EP_MAXFRAG+1) - ((env->Attr & EP_MULTICAST ? 1 : 0) + (env->nFrags == 0 ? 1 : env->nFrags));
51589 +       
51590 +       if (BEFORE (lbolt, rxdRail->rxd_retry_time))
51591 +       {
51592 +           if (nextRunTime == 0 || AFTER (nextRunTime, rxdRail->rxd_retry_time))
51593 +               nextRunTime = rxdRail->rxd_retry_time;
51594 +
51595 +           break;
51596 +       }
51597 +
51598 +       list_del (&rxdRail->rxd_retry_link);
51599 +       rxdRail->rxd_retry_time = 0;
51600 +
51601 +       /* determine which sten packet to resubmit */
51602 +       for (; first < (EP_MAXFRAG+1); first++)
51603 +           if (rxdRail->rxd_main->rxd_sent[first] == EP4_STATE_ACTIVE)
51604 +               break;
51605 +
51606 +       EPRINTF3 (DBG_RETRY, "%s: ep4rcvr_retry: rxdRail %p, reissuing sten[%d]\n", rail->r_generic.Name, rxdRail, first);
51607 +
51608 +       /* re-initialise the fail event */
51609 +       elan4_sdram_writeq (dev, rxdRail->rxd_elan + offsetof (EP4_RXD_RAIL_ELAN, rxd_failed.ev_CountAndType),
51610 +                           E4_EVENT_INIT_VALUE (-32, E4_EVENT_COPY, E4_EVENT_DTYPE_LONG, EP4_INTR_CMD_NDWORDS));
51611 +
51612 +       rxdRail->rxd_main->rxd_failed = EP4_STATE_ACTIVE;
51613 +
51614 +       /* re-initialise the chain event to resubmit this sten packet */
51615 +       elan4_sdram_writeq (dev, rxdRail->rxd_elan + offsetof (EP4_RXD_RAIL_ELAN, rxd_chain[first-1].ev_CountAndType),
51616 +                           E4_EVENT_INIT_VALUE(-32, E4_EVENT_COPY, E4_EVENT_DTYPE_LONG, EP4_RXD_STEN_CMD_NDWORDS));
51617 +       
51618 +       /* finally issue the setevent to start the chain again */
51619 +       ep4_set_event_cmd (rxdRail->rxd_scq, rxdRail->rxd_elan_addr + offsetof (EP4_RXD_RAIL_ELAN, rxd_chain[first - 1]));
51620 +    }
51621 +    spin_unlock_irqrestore (&rcvrRail->rcvr_retrylock, flags);
51622 +    
51623 +    return nextRunTime;
51624 +}
51625 +
51626 +void
51627 +ep4rcvr_add_rail (EP_RCVR *rcvr, EP_COMMS_RAIL *commsRail)
51628 +{
51629 +    EP4_RAIL          *rail   = (EP4_RAIL *) commsRail->Rail;
51630 +    ELAN4_DEV         *dev    = rail->r_ctxt.ctxt_dev;
51631 +    sdramaddr_t        qdescs = ((EP4_COMMS_RAIL *) commsRail)->r_descs;
51632 +    EP4_RCVR_RAIL     *rcvrRail;
51633 +    E4_InputQueue      qdesc;
51634 +    E4_ThreadRegs      tregs;
51635 +    sdramaddr_t        stack;
51636 +    unsigned long      flags;
51637 +
51638 +    KMEM_ZALLOC (rcvrRail, EP4_RCVR_RAIL *, sizeof (EP4_RCVR_RAIL), 1);
51639 +
51640 +    spin_lock_init (&rcvrRail->rcvr_freelock);
51641 +    INIT_LIST_HEAD (&rcvrRail->rcvr_freelist);
51642 +    INIT_LIST_HEAD (&rcvrRail->rcvr_blocklist);
51643 +
51644 +    kcondvar_init (&rcvrRail->rcvr_cleanup_sleep);
51645 +    kcondvar_init (&rcvrRail->rcvr_freesleep);
51646 +
51647 +    INIT_LIST_HEAD (&rcvrRail->rcvr_retrylist);
51648 +    spin_lock_init (&rcvrRail->rcvr_retrylock);
51649 +
51650 +    rcvrRail->rcvr_generic.CommsRail = commsRail;
51651 +    rcvrRail->rcvr_generic.Rcvr      = rcvr;
51652 +
51653 +    rcvrRail->rcvr_main  = ep_alloc_main (&rail->r_generic, sizeof (EP4_RCVR_RAIL_MAIN), 0, &rcvrRail->rcvr_main_addr);
51654 +    rcvrRail->rcvr_elan  = ep_alloc_elan (&rail->r_generic, sizeof (EP4_RCVR_RAIL_ELAN), 0, &rcvrRail->rcvr_elan_addr);
51655 +    rcvrRail->rcvr_slots = ep_alloc_elan (&rail->r_generic, EP_INPUTQ_SIZE * rcvr->InputQueueEntries, 0, &rcvrRail->rcvr_slots_addr);
51656 +    stack                = ep_alloc_elan (&rail->r_generic, EP4_STACK_SIZE, 0, &rcvrRail->rcvr_stack);
51657 +
51658 +    /* allocate a command queue for the thread to use, plus space for it to wait/reschedule */
51659 +    rcvrRail->rcvr_ecq     = ep4_alloc_ecq (rail, CQ_Size64K);
51660 +    rcvrRail->rcvr_resched = ep4_get_ecq (rail, EP4_ECQ_ATOMIC, 8);
51661 +
51662 +    ep4_register_intcookie (rail, &rcvrRail->rcvr_stall_intcookie, rcvrRail->rcvr_elan_addr + offsetof (EP4_RCVR_RAIL_ELAN, rcvr_stall_intcookie),
51663 +                           rcvr_stall_interrupt, rcvrRail);
51664 +
51665 +    /* Initialise the elan portion */
51666 +    elan4_sdram_writeq (dev, rcvrRail->rcvr_elan + offsetof (EP4_RCVR_RAIL_ELAN, rcvr_qevent.ev_CountAndType), 0);
51667 +    elan4_sdram_writeq (dev, rcvrRail->rcvr_elan + offsetof (EP4_RCVR_RAIL_ELAN, rcvr_thread_halt.ev_CountAndType), 0);
51668 +    elan4_sdram_writeq (dev, rcvrRail->rcvr_elan + offsetof (EP4_RCVR_RAIL_ELAN, rcvr_thread_lock), 0);
51669 +    elan4_sdram_writeq (dev, rcvrRail->rcvr_elan + offsetof (EP4_RCVR_RAIL_ELAN, rcvr_pending_tailp),
51670 +                       rcvrRail->rcvr_elan_addr + offsetof (EP4_RCVR_RAIL_ELAN, rcvr_pending_head));
51671 +    elan4_sdram_writeq (dev, rcvrRail->rcvr_elan + offsetof (EP4_RCVR_RAIL_ELAN, rcvr_pending_head), 0);
51672 +    elan4_sdram_writeq (dev, rcvrRail->rcvr_elan + offsetof (EP4_RCVR_RAIL_ELAN, rcvr_stall_intcookie), 0);
51673 +    elan4_sdram_writeq (dev, rcvrRail->rcvr_elan + offsetof (EP4_RCVR_RAIL_ELAN, rcvr_qbase), rcvrRail->rcvr_slots_addr);
51674 +    elan4_sdram_writeq (dev, rcvrRail->rcvr_elan + offsetof (EP4_RCVR_RAIL_ELAN, rcvr_qlast), 
51675 +                       rcvrRail->rcvr_slots_addr + EP_INPUTQ_SIZE * (rcvr->InputQueueEntries-1));
51676 +
51677 +    /* Initialise the main memory portion */
51678 +    rcvrRail->rcvr_main->rcvr_thread_lock = 0;
51679 +
51680 +    /* Install our retry handler */
51681 +    rcvrRail->rcvr_retryops.op_func = ep4rcvr_retry;
51682 +    rcvrRail->rcvr_retryops.op_arg  = rcvrRail;
51683 +
51684 +    ep4_add_retry_ops (rail, &rcvrRail->rcvr_retryops);
51685 +
51686 +    /* Update the queue desriptor */
51687 +    qdesc.q_bptr    = rcvrRail->rcvr_slots_addr;
51688 +    qdesc.q_fptr    = rcvrRail->rcvr_slots_addr;
51689 +    qdesc.q_control = E4_InputQueueControl (rcvrRail->rcvr_slots_addr, rcvrRail->rcvr_slots_addr + (EP_INPUTQ_SIZE * (rcvr->InputQueueEntries-1)), EP_INPUTQ_SIZE);
51690 +    qdesc.q_event   = rcvrRail->rcvr_elan_addr + offsetof (EP4_RCVR_RAIL_ELAN, rcvr_qevent);
51691 +
51692 +    ep4_write_qdesc (rail, qdescs + (rcvr->Service * EP_QUEUE_DESC_SIZE), &qdesc);
51693 +
51694 +    spin_lock_irqsave (&rcvr->Lock, flags);
51695 +    rcvr->Rails[rail->r_generic.Number] = &rcvrRail->rcvr_generic;
51696 +    rcvr->RailMask |= EP_RAIL2RAILMASK (rail->r_generic.Number);
51697 +    spin_unlock_irqrestore (&rcvr->Lock, flags);
51698 +
51699 +    {
51700 +       sdramaddr_t stackTop     = stack + EP4_STACK_SIZE;
51701 +       E4_Addr     stackTopAddr = rcvrRail->rcvr_stack + EP4_STACK_SIZE;
51702 +
51703 +       ep4_init_thread (rail, &tregs, stackTop, stackTopAddr, ep_symbol (&rail->r_threadcode, "ep4comms_rcvr"), 6, 
51704 +                        (E4_uint64) rail->r_elan_addr, (E4_uint64) rcvrRail->rcvr_elan_addr, (E4_uint64) rcvrRail->rcvr_main_addr,
51705 +                        (E4_uint64) EP_MSGQ_ADDR(rcvr->Service), (E4_uint64) rcvrRail->rcvr_ecq->ecq_addr, (E4_uint64) rcvrRail->rcvr_resched->ecq_addr);
51706 +    }
51707 +    
51708 +    /* Issue the command to the threads private command queue */
51709 +    elan4_run_thread_cmd (rcvrRail->rcvr_ecq->ecq_cq, &tregs);
51710 +
51711 +    ep_procfs_rcvr_add_rail(&(rcvrRail->rcvr_generic));
51712 +}
51713 +
51714 +void
51715 +ep4rcvr_del_rail (EP_RCVR *rcvr, EP_COMMS_RAIL *commsRail)
51716 +{
51717 +    EP4_RAIL         *rail     = (EP4_RAIL *) commsRail->Rail;
51718 +    EP4_RCVR_RAIL    *rcvrRail = (EP4_RCVR_RAIL *) rcvr->Rails[rail->r_generic.Number];  
51719 +    ELAN4_HALTOP      haltop;
51720 +    struct list_head *el, *nel;
51721 +    unsigned long     flags;
51722 +
51723 +    ep_procfs_rcvr_del_rail(&(rcvrRail->rcvr_generic));
51724 +
51725 +    /* Run a halt operation to mark the input queue as full and
51726 +     * request the thread to halt */
51727 +    haltop.op_mask     = INT_DiscardingHighPri | INT_TProcHalted;
51728 +    haltop.op_function = rcvr_stall_haltop;
51729 +    haltop.op_arg      = rcvrRail;
51730 +
51731 +    elan4_queue_haltop (rail->r_ctxt.ctxt_dev, &haltop);
51732 +
51733 +    /* Wait for the thread to tell us it's processed the input queue */
51734 +    spin_lock_irqsave (&rcvr->Lock, flags);
51735 +    while (! rcvrRail->rcvr_thread_halted)
51736 +       kcondvar_wait (&rcvrRail->rcvr_cleanup_sleep, &rcvr->Lock, &flags);
51737 +    rcvrRail->rcvr_thread_halted = 0;
51738 +
51739 +    /* flag the rail as no longer available */
51740 +    rcvr->RailMask &= ~EP_RAIL2RAILMASK (rail->r_generic.Number);
51741 +
51742 +    /* wait for all active communications to terminate */
51743 +    for (;;)
51744 +    {
51745 +       int mustWait = 0;
51746 +
51747 +       list_for_each (el, &rcvr->ActiveDescList) {
51748 +           EP_RXD       *rxd     = list_entry (el, EP_RXD, Link);
51749 +           EP4_RXD_RAIL *rxdRail = (EP4_RXD_RAIL *) rxd->RxdRail;
51750 +           
51751 +           if (rxdRail && RXD_BOUND2RAIL (rxdRail, rcvrRail) && rxd->RxdMain->Len != EP_RXD_PENDING)
51752 +           {
51753 +               mustWait++;
51754 +               break;
51755 +           }
51756 +       }
51757 +
51758 +       if (! mustWait)
51759 +           break;
51760 +
51761 +       rcvrRail->rcvr_cleanup_waiting++;
51762 +       kcondvar_wait (&rcvrRail->rcvr_cleanup_sleep, &rcvr->Lock, &flags);
51763 +    }
51764 +
51765 +    /* at this point all rxd's in the list that are bound to the deleting rail are pending */
51766 +    list_for_each_safe (el, nel, &rcvr->ActiveDescList) {
51767 +       EP_RXD       *rxd     = list_entry (el, EP_RXD, Link);
51768 +       EP4_RXD_RAIL *rxdRail = (EP4_RXD_RAIL *) rxd->RxdRail;
51769 +
51770 +       if (rxdRail && RXD_BOUND2RAIL (rxdRail, rcvrRail))
51771 +       {
51772 +           EP4_RXD_ASSERT_PENDING (rxdRail);
51773 +           EP4_RXD_FORCE_PRIVATE (rxdRail);
51774 +
51775 +           unbind_rxd_rail (rxd, rxdRail);
51776 +           free_rxd_rail (rcvrRail, rxdRail);
51777 +       }
51778 +    }
51779 +    spin_unlock_irqrestore (&rcvr->Lock, flags);
51780 +
51781 +    /* wait for all rxd's for this rail to become free */
51782 +    spin_lock_irqsave (&rcvrRail->rcvr_freelock, flags);
51783 +    while (rcvrRail->rcvr_freecount != rcvrRail->rcvr_totalcount)
51784 +    {
51785 +       rcvrRail->rcvr_freewaiting++;
51786 +       kcondvar_wait (&rcvrRail->rcvr_freesleep, &rcvrRail->rcvr_freelock, &flags);
51787 +    }
51788 +    spin_unlock_irqrestore (&rcvrRail->rcvr_freelock, flags);
51789 +
51790 +    /* can now remove the rail as it can no longer be used */
51791 +    spin_lock_irqsave (&rcvr->Lock, flags);
51792 +    rcvr->Rails[rail->r_generic.Number] = NULL;
51793 +    spin_unlock_irqrestore (&rcvr->Lock, flags);
51794 +
51795 +    /* all the rxd's accociated with DescBlocks must be in the FreeDescList */
51796 +    ASSERT (rcvrRail->rcvr_totalcount == rcvrRail->rcvr_freecount);
51797 +
51798 +    /* run through the DescBlockList deleting them */
51799 +    while (!list_empty (&rcvrRail->rcvr_blocklist))
51800 +       free_rxd_block (rcvrRail, list_entry(rcvrRail->rcvr_blocklist.next, EP4_RXD_RAIL_BLOCK , blk_link));
51801 +
51802 +    /* it had better be empty after that */
51803 +    ASSERT ((rcvrRail->rcvr_totalcount == 0) && (rcvrRail->rcvr_totalcount == rcvrRail->rcvr_freecount));
51804 +
51805 +    ep4_remove_retry_ops (rail, &rcvrRail->rcvr_retryops);
51806 +
51807 +    ep4_deregister_intcookie (rail, &rcvrRail->rcvr_stall_intcookie);
51808 +
51809 +    ep4_put_ecq (rail, rcvrRail->rcvr_resched, 8);
51810 +    ep4_free_ecq (rail, rcvrRail->rcvr_ecq);
51811 +
51812 +    ep_free_elan (&rail->r_generic, rcvrRail->rcvr_stack, EP4_STACK_SIZE);
51813 +    ep_free_elan (&rail->r_generic, rcvrRail->rcvr_slots_addr, EP_INPUTQ_SIZE * rcvr->InputQueueEntries);
51814 +    ep_free_elan (&rail->r_generic, rcvrRail->rcvr_elan_addr, sizeof (EP4_RCVR_RAIL_ELAN));
51815 +    ep_free_main (&rail->r_generic, rcvrRail->rcvr_main_addr, sizeof (EP4_RCVR_RAIL_MAIN));
51816 +
51817 +    KMEM_FREE (rcvrRail, sizeof (EP4_RCVR_RAIL));
51818 +}
51819 +
51820 +void
51821 +ep4rcvr_display_rxd (DisplayInfo *di, EP_RXD_RAIL *r)
51822 +{
51823 +    EP4_RXD_RAIL *rxdRail = (EP4_RXD_RAIL *) r;
51824 +    sdramaddr_t   rxdElan = rxdRail->rxd_elan;
51825 +    EP4_RAIL     *rail    = RCVR_TO_RAIL (rxdRail->rxd_generic.RcvrRail);
51826 +    ELAN4_DEV    *dev     = rail->r_ctxt.ctxt_dev;
51827 +    int i;
51828 +
51829 +    (di->func)(di->arg, "    Rail %d rxd %p elan %lx(%x) main %p(%x) ecq %d scq %d debug %llx\n", rail->r_generic.Number,
51830 +              rxdRail, rxdRail->rxd_elan, rxdRail->rxd_elan_addr, rxdRail->rxd_main, rxdRail->rxd_main_addr,
51831 +              elan4_cq2num(rxdRail->rxd_ecq->ecq_cq), elan4_cq2num(rxdRail->rxd_scq->ecq_cq),
51832 +              elan4_sdram_readq (dev, rxdElan + offsetof (EP4_RXD_RAIL_ELAN, rxd_debug)));
51833 +    (di->func)(di->arg, "          start    %016llx %016llx %016llx [%016llx %016llx]\n",
51834 +              elan4_sdram_readq (dev, rxdElan + offsetof (EP4_RXD_RAIL_ELAN, rxd_start.ev_CountAndType)),
51835 +              elan4_sdram_readq (dev, rxdElan + offsetof (EP4_RXD_RAIL_ELAN, rxd_start.ev_Params[0])),
51836 +              elan4_sdram_readq (dev, rxdElan + offsetof (EP4_RXD_RAIL_ELAN, rxd_start.ev_Params[1])),
51837 +              elan4_sdram_readq (dev, rxdElan + offsetof (EP4_RXD_RAIL_ELAN, rxd_sten[0].c_cookie)),
51838 +              elan4_sdram_readq (dev, rxdElan + offsetof (EP4_RXD_RAIL_ELAN, rxd_sten[0].c_dma_cookie)));
51839 +              
51840 +    for (i = 0; i < EP_MAXFRAG; i++)
51841 +       (di->func)(di->arg, "          chain[%d] %016llx %016llx %016llx [%016llx %016llx]\n", i,
51842 +                  elan4_sdram_readq (dev, rxdElan + offsetof (EP4_RXD_RAIL_ELAN, rxd_chain[i].ev_CountAndType)),
51843 +                  elan4_sdram_readq (dev, rxdElan + offsetof (EP4_RXD_RAIL_ELAN, rxd_chain[i].ev_Params[0])),
51844 +                  elan4_sdram_readq (dev, rxdElan + offsetof (EP4_RXD_RAIL_ELAN, rxd_chain[i].ev_Params[1])),
51845 +                  elan4_sdram_readq (dev, rxdElan + offsetof (EP4_RXD_RAIL_ELAN, rxd_sten[i+1].c_cookie)),
51846 +                  elan4_sdram_readq (dev, rxdElan + offsetof (EP4_RXD_RAIL_ELAN, rxd_sten[i+1].c_dma_cookie)));
51847 +    (di->func)(di->arg, "          done    %016llx %016llx %016llx -> %016llx\n",
51848 +              elan4_sdram_readq (dev, rxdElan + offsetof (EP4_RXD_RAIL_ELAN, rxd_done.ev_CountAndType)),
51849 +              elan4_sdram_readq (dev, rxdElan + offsetof (EP4_RXD_RAIL_ELAN, rxd_done.ev_Params[0])),
51850 +              elan4_sdram_readq (dev, rxdElan + offsetof (EP4_RXD_RAIL_ELAN, rxd_done.ev_Params[1])),
51851 +              rxdRail->rxd_main->rxd_done);
51852 +    (di->func)(di->arg, "          fail    %016llx %016llx %016llx -> %016llx\n",
51853 +              elan4_sdram_readq (dev, rxdElan + offsetof (EP4_RXD_RAIL_ELAN, rxd_failed.ev_CountAndType)),
51854 +              elan4_sdram_readq (dev, rxdElan + offsetof (EP4_RXD_RAIL_ELAN, rxd_failed.ev_Params[0])),
51855 +              elan4_sdram_readq (dev, rxdElan + offsetof (EP4_RXD_RAIL_ELAN, rxd_failed.ev_Params[1])),
51856 +              rxdRail->rxd_main->rxd_failed);
51857 +    (di->func)(di->arg, "          next %016llx queued %016llx main %016llx\n",
51858 +              elan4_sdram_readq (dev, rxdElan + offsetof (EP4_RXD_RAIL_ELAN, rxd_next)),
51859 +              elan4_sdram_readq (dev, rxdElan + offsetof (EP4_RXD_RAIL_ELAN, rxd_queued)),
51860 +              elan4_sdram_readq (dev, rxdElan + offsetof (EP4_RXD_RAIL_ELAN, rxd_main)));
51861 +    (di->func)(di->arg, "          sent %016llx %016llx %016llx %016llx %016llx\n",
51862 +              rxdRail->rxd_main->rxd_sent[0], rxdRail->rxd_main->rxd_sent[1], rxdRail->rxd_main->rxd_sent[2],
51863 +              rxdRail->rxd_main->rxd_sent[3], rxdRail->rxd_main->rxd_sent[4]);
51864 +}
51865 +
51866 +void
51867 +ep4rcvr_display_rcvr (DisplayInfo *di, EP_RCVR_RAIL *r)
51868 +{
51869 +    EP_RCVR          *rcvr       = r->Rcvr;
51870 +    EP4_RCVR_RAIL    *rcvrRail   = (EP4_RCVR_RAIL *) r;
51871 +    EP4_COMMS_RAIL   *commsRail  = RCVR_TO_COMMS(rcvrRail);
51872 +    EP4_RAIL         *rail       = RCVR_TO_RAIL (rcvrRail);
51873 +    ELAN4_DEV        *dev        = rail->r_ctxt.ctxt_dev;
51874 +    sdramaddr_t       rcvrElan   = rcvrRail->rcvr_elan;
51875 +    sdramaddr_t       qdesc      = commsRail->r_descs + (rcvr->Service * EP_QUEUE_DESC_SIZE);
51876 +    sdramaddr_t       event      = rcvrRail->rcvr_elan + offsetof (EP4_RCVR_RAIL_ELAN, rcvr_qevent);
51877 +    unsigned int      freeCount  = 0;
51878 +    unsigned int      blockCount = 0;
51879 +    struct list_head *el;
51880 +    unsigned long     flags;
51881 +    
51882 +    spin_lock_irqsave (&rcvrRail->rcvr_freelock, flags);
51883 +    list_for_each (el, &rcvrRail->rcvr_freelist)
51884 +       freeCount++;
51885 +    list_for_each (el, &rcvrRail->rcvr_blocklist)
51886 +       blockCount++;
51887 +    spin_unlock_irqrestore(&rcvrRail->rcvr_freelock, flags);
51888 +
51889 +    (di->func)(di->arg, "      Rail %d elan %lx(%x) main %p(%x) ecq %d resched %d debug %llx\n",
51890 +              rail->r_generic.Number, rcvrRail->rcvr_elan, rcvrRail->rcvr_elan_addr,
51891 +              rcvrRail->rcvr_main, rcvrRail->rcvr_main_addr, elan4_cq2num(rcvrRail->rcvr_ecq->ecq_cq),
51892 +              elan4_cq2num (rcvrRail->rcvr_resched->ecq_cq),
51893 +              elan4_sdram_readq (dev, rcvrElan + offsetof (EP4_RCVR_RAIL_ELAN, rcvr_debug)));
51894 +    (di->func)(di->arg, "        free %d (%d) total %d blocks %d\n",
51895 +              rcvrRail->rcvr_freecount, freeCount, rcvrRail->rcvr_totalcount, blockCount);
51896 +    (di->func)(di->arg, "        spinlock %016llx %016llx\n", rcvrRail->rcvr_main->rcvr_thread_lock,
51897 +              elan4_sdram_readq (dev, rcvrElan + offsetof (EP4_RCVR_RAIL_ELAN, rcvr_thread_lock)));
51898 +    (di->func)(di->arg, "        queue: bptr %016llx fptr %016llx control %016llx (base %lx %x)\n",
51899 +              elan4_sdram_readq (dev, qdesc + offsetof (E4_InputQueue, q_bptr)),
51900 +              elan4_sdram_readq (dev, qdesc + offsetof (E4_InputQueue, q_fptr)),
51901 +              elan4_sdram_readq (dev, qdesc + offsetof (E4_InputQueue, q_control)),
51902 +              rcvrRail->rcvr_slots, rcvrRail->rcvr_slots_addr);
51903 +    (di->func)(di->arg, "        event %016llx %016llx %016llx\n",
51904 +              elan4_sdram_readq (dev, event + offsetof (E4_Event32, ev_CountAndType)),
51905 +              elan4_sdram_readq (dev, event + offsetof (E4_Event32, ev_Params[0])),
51906 +              elan4_sdram_readq (dev, event + offsetof (E4_Event32, ev_Params[1])));
51907 +    (di->func)(di->arg, "        pending_tailp %016llx pending_head %016llx\n", 
51908 +              elan4_sdram_readq (dev, rcvrElan + offsetof (EP4_RCVR_RAIL_ELAN, rcvr_pending_tailp)),
51909 +              elan4_sdram_readq (dev, rcvrElan + offsetof (EP4_RCVR_RAIL_ELAN, rcvr_pending_head)));
51910 +}
51911 +
51912 +void
51913 +ep4rcvr_fillout_rail_stats(EP_RCVR_RAIL *rcvr_rail, char *str) {
51914 +    /* no stats here yet */
51915 +    /* EP4_RCVR_RAIL * ep4rcvr_rail = (EP4_RCVR_RAIL *) rcvr_rail; */
51916 +}
51917 +
51918 +
51919 +/*
51920 + * Local variables:
51921 + * c-file-style: "stroustrup"
51922 + * End:
51923 + */
51924 Index: linux-2.4.21/drivers/net/qsnet/ep/epcommsTx.c
51925 ===================================================================
51926 --- linux-2.4.21.orig/drivers/net/qsnet/ep/epcommsTx.c  2004-02-23 16:02:56.000000000 -0500
51927 +++ linux-2.4.21/drivers/net/qsnet/ep/epcommsTx.c       2005-06-01 23:12:54.654430744 -0400
51928 @@ -0,0 +1,919 @@
51929 +/*
51930 + *    Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd.
51931 + *    Copyright (c) 2002-2003 by Quadrics Ltd.
51932 + *
51933 + *    For licensing information please see the supplied COPYING file
51934 + *
51935 + */
51936 +
51937 +#ident "@(#)$Id: epcommsTx.c,v 1.25.2.5 2004/12/09 10:02:42 david Exp $ $Name: QSNETMODULES-4-30_20050128 $"
51938 +/*      $Source: /cvs/master/quadrics/epmod/epcommsTx.c,v $*/
51939 +
51940 +#include <qsnet/kernel.h>
51941 +
51942 +#include <elan/kcomm.h>
51943 +#include <elan/epsvc.h>
51944 +#include <elan/epcomms.h>
51945 +
51946 +#include "cm.h"
51947 +#include "debug.h"
51948 +
51949 +unsigned int ep_txd_lowat = 5;
51950 +
51951 +static int
51952 +AllocateTxdBlock (EP_XMTR *xmtr, EP_ATTRIBUTE attr, EP_TXD **txdp)
51953 +{
51954 +    EP_TXD_BLOCK *blk;
51955 +    EP_TXD       *txd;
51956 +    EP_TXD_MAIN  *pTxdMain;
51957 +    int                  i;
51958 +    unsigned long flags;
51959 +
51960 +    EPRINTF1 (DBG_XMTR, "AllocateTxdBlock: xmtr=%p\n", xmtr);
51961 +
51962 +    KMEM_ZALLOC (blk, EP_TXD_BLOCK *, sizeof (EP_TXD_BLOCK), ! (attr & EP_NO_SLEEP));
51963 +
51964 +    if (blk == NULL)
51965 +       return -ENOMEM;
51966 +
51967 +    if ((pTxdMain = ep_shared_alloc_main (xmtr->Subsys->Subsys.Sys, EP_TXD_MAIN_SIZE * EP_NUM_TXD_PER_BLOCK, attr, &blk->NmdMain)) == (sdramaddr_t) 0)
51968 +    {
51969 +       KMEM_FREE (blk, sizeof (EP_TXD_BLOCK));
51970 +       return -ENOMEM;
51971 +    }
51972 +
51973 +    for (txd = &blk->Txd[0], i = 0; i < EP_NUM_TXD_PER_BLOCK; i++, txd++)
51974 +    {
51975 +       txd->Xmtr     = xmtr;
51976 +       txd->TxdMain = pTxdMain;
51977 +
51978 +       ep_nmd_subset (&txd->NmdMain, &blk->NmdMain, (i * EP_TXD_MAIN_SIZE), EP_TXD_MAIN_SIZE);
51979 +
51980 +       /* move onto next descriptor */
51981 +       pTxdMain = (EP_TXD_MAIN *) ((unsigned long) pTxdMain + EP_TXD_MAIN_SIZE);
51982 +    }
51983 +
51984 +    spin_lock_irqsave (&xmtr->FreeDescLock, flags);
51985 +
51986 +    list_add  (&blk->Link, &xmtr->DescBlockList);
51987 +    xmtr->TotalDescCount += EP_NUM_TXD_PER_BLOCK;
51988 +
51989 +    for (i = txdp ? 1 : 0; i < EP_NUM_TXD_PER_BLOCK; i++)
51990 +    {
51991 +       list_add (&blk->Txd[i].Link, &xmtr->FreeDescList);
51992 +
51993 +       xmtr->FreeDescCount++;
51994 +
51995 +       if (xmtr->FreeDescWanted)
51996 +       {
51997 +           xmtr->FreeDescWanted--;
51998 +           kcondvar_wakeupone (&xmtr->FreeDescSleep, &xmtr->FreeDescLock);
51999 +       }
52000 +    }
52001 +    spin_unlock_irqrestore (&xmtr->FreeDescLock, flags);
52002 +    
52003 +    if (txdp)
52004 +       *txdp = &blk->Txd[0];
52005 +
52006 +    return 0;
52007 +}
52008 +
52009 +static void
52010 +FreeTxdBlock (EP_XMTR *xmtr, EP_TXD_BLOCK *blk)
52011 +{
52012 +    unsigned long flags;
52013 +
52014 +    spin_lock_irqsave (&xmtr->FreeDescLock, flags);
52015 +    list_del (&blk->Link);
52016 +
52017 +    xmtr->TotalDescCount -= EP_NUM_RXD_PER_BLOCK;
52018 +    xmtr->FreeDescCount -= EP_NUM_RXD_PER_BLOCK;
52019 +    spin_unlock_irqrestore (&xmtr->FreeDescLock, flags);
52020 +
52021 +    ep_shared_free_main (xmtr->Subsys->Subsys.Sys, &blk->NmdMain);
52022 +    KMEM_FREE (blk, sizeof (EP_TXD_BLOCK));
52023 +}
52024 +
52025 +static EP_TXD *
52026 +GetTxd (EP_XMTR *xmtr, EP_ATTRIBUTE attr)
52027 +{
52028 +    EP_COMMS_SUBSYS *subsys = xmtr->Subsys;
52029 +    EP_TXD          *txd;
52030 +    int low_on_txds;
52031 +    unsigned long flags;
52032 +
52033 +    spin_lock_irqsave (&xmtr->FreeDescLock, flags);
52034 +
52035 +    while (list_empty (&xmtr->FreeDescList))
52036 +    {
52037 +       if (! (attr & EP_NO_ALLOC))
52038 +       {
52039 +           spin_unlock_irqrestore (&xmtr->FreeDescLock, flags);
52040 +
52041 +           if (AllocateTxdBlock (xmtr, attr, &txd) == ESUCCESS)
52042 +               return (txd);
52043 +
52044 +           spin_lock_irqsave (&xmtr->FreeDescLock, flags);
52045 +       }
52046 +
52047 +       if (attr & EP_NO_SLEEP)
52048 +       {
52049 +           spin_unlock_irqrestore (&xmtr->FreeDescLock, flags);
52050 +
52051 +           return (NULL);
52052 +       }
52053 +
52054 +       xmtr->FreeDescWanted++;
52055 +       kcondvar_wait (&xmtr->FreeDescSleep, &xmtr->FreeDescLock, &flags);
52056 +    }
52057 +
52058 +    txd = list_entry (xmtr->FreeDescList.next, EP_TXD, Link);
52059 +
52060 +    list_del (&txd->Link);
52061 +
52062 +    /* Wakeup the descriptor primer thread if there's not many left */
52063 +    low_on_txds = (--xmtr->FreeDescCount < ep_txd_lowat);
52064 +
52065 +    spin_unlock_irqrestore (&xmtr->FreeDescLock, flags);
52066 +
52067 +    if (low_on_txds)
52068 +       ep_kthread_schedule (&subsys->Thread, lbolt);
52069 +
52070 +    return (txd);
52071 +}
52072 +
52073 +void
52074 +FreeTxd (EP_XMTR *xmtr, EP_TXD *txd)
52075 +{
52076 +    unsigned long flags;
52077 +
52078 +    spin_lock_irqsave (&xmtr->FreeDescLock, flags);
52079 +    
52080 +    list_add (&txd->Link, &xmtr->FreeDescList);
52081 +
52082 +    xmtr->FreeDescCount++;
52083 +
52084 +    if (xmtr->FreeDescWanted)                                  /* someone waiting for a receive */
52085 +    {                                                          /* descriptor, so wake them up */
52086 +       xmtr->FreeDescWanted--;
52087 +       kcondvar_wakeupone (&xmtr->FreeDescSleep, &xmtr->FreeDescLock);
52088 +    }
52089 +    
52090 +    spin_unlock_irqrestore (&xmtr->FreeDescLock, flags);
52091 +}
52092 +
52093 +int
52094 +TxdShouldStabalise (EP_TXD_RAIL *txdRail, EP_RAIL *rail)
52095 +{
52096 +    EP_TXD      *txd  = txdRail->Txd;
52097 +    EP_XMTR     *xmtr = txd->Xmtr;
52098 +    EP_ATTRIBUTE attr = txd->Envelope.Attr;
52099 +    int                 stabilise;
52100 +    extern int   txd_stabilise;
52101 +
52102 +    switch (EP_ATTR2TYPE (attr)) 
52103 +    {
52104 +    case EP_TYPE_SVC_INDICATOR:                                /* is the rail in the current service indicator rail mask */
52105 +       if ((txd_stabilise & 4) == 0)
52106 +           return 0;
52107 +
52108 +       stabilise = (ep_xmtr_svc_indicator_railmask (xmtr, EP_ATTR2DATA (attr), txd->NodeId) & EP_RAIL2RAILMASK (rail->Number)) == 0;
52109 +       break;
52110 +
52111 +    case EP_TYPE_TIMEOUT:
52112 +       if ((txd_stabilise & 2) == 0)
52113 +           return 0;
52114 +
52115 +       stabilise = AFTER(lbolt, txdRail->Txd->TimeStamp + EP_ATTR2DATA(attr));
52116 +       break;
52117 +
52118 +    default:
52119 +       if ((txd_stabilise & 1) == 0)
52120 +           return 0;
52121 +
52122 +       stabilise = AFTER(lbolt, txdRail->Txd->TimeStamp + EP_DEFAULT_TIMEOUT);
52123 +       break;
52124 +    }
52125 +
52126 +    if (stabilise)
52127 +    {
52128 +       txd->Envelope.Attr = EP_SET_TXD_STABALISING(txd->Envelope.Attr);
52129 +       txd->RetryTime     = lbolt;
52130 +
52131 +       ep_kthread_schedule (&xmtr->Subsys->Thread, lbolt);    
52132 +    }
52133 +
52134 +    return stabilise;
52135 +}
52136 +
52137 +void ep_xmtr_txd_stat(EP_XMTR *xmtr, EP_TXD *txd) 
52138 +{
52139 +    int f;
52140 +    unsigned long size;
52141 +    EP_TXD_RAIL *txdRail = txd->TxdRail;
52142 +
52143 +    size = 0;
52144 +    for (f=0; f < txd->Envelope.nFrags; f++)
52145 +       size += txd->Envelope.Frags[f].nmd_len;
52146 +
52147 +    INC_STAT(xmtr->stats,tx);
52148 +    ADD_STAT(xmtr->stats,tx_len, size);  
52149 +    
52150 +    if ((txdRail != NULL) && (txdRail->XmtrRail != NULL)){
52151 +       INC_STAT(txdRail->XmtrRail->stats,tx);
52152 +       ADD_STAT(txdRail->XmtrRail->stats,tx_len, size); 
52153 +       
52154 +       if ((txdRail->XmtrRail->CommsRail != NULL) && ( txdRail->XmtrRail->CommsRail->Rail != NULL)) {
52155 +           INC_STAT(txdRail->XmtrRail->CommsRail->Rail->Stats,tx);
52156 +           ADD_STAT(txdRail->XmtrRail->CommsRail->Rail->Stats,tx_len, size);
52157 +       }
52158 +    }
52159 +}
52160 +
52161 +static int
52162 +PollActiveTransmitList (EP_XMTR *xmtr, int flag)
52163 +{
52164 +    struct list_head *el, *nel;
52165 +    struct list_head list;
52166 +    unsigned long flags;
52167 +    int count;
52168 +
52169 +    INIT_LIST_HEAD (&list);
52170 +
52171 +    spin_lock_irqsave (&xmtr->Lock, flags);
52172 +    list_for_each_safe (el, nel, &xmtr->ActiveDescList) {
52173 +       EP_TXD      *txd     = list_entry (el, EP_TXD, Link);
52174 +       EP_TXD_RAIL *txdRail = txd->TxdRail;
52175 +       
52176 +       if (txdRail == NULL)
52177 +           continue;
52178 +
52179 +       ASSERT (txdRail->Txd == txd);
52180 +       
52181 +       if (EP_XMTR_OP (txdRail->XmtrRail,PollTxd) (txdRail->XmtrRail, txdRail, flags))
52182 +       {
52183 +           list_del (&txd->Link);                              /* remove from active transmit list */
52184 +           list_add_tail (&txd->Link, &list);                  /* and add to list to call handlers */
52185 +       }
52186 +    }
52187 +    
52188 +    spin_unlock_irqrestore (&xmtr->Lock, flags);
52189 +
52190 +    for (count = 0; !list_empty (&list); count++)
52191 +    {
52192 +       EP_TXD *txd = list_entry (list.next, EP_TXD, Link);
52193 +
52194 +       list_del (&txd->Link);
52195 +
52196 +       txd->Handler (txd, txd->Arg, EP_SUCCESS);
52197 +
52198 +       FreeTxd (xmtr, txd);
52199 +    }
52200 +    return (count);
52201 +}
52202 +
52203 +static inline void
52204 +DoTransmit (EP_XMTR *xmtr, EP_TXD *txd)
52205 +{
52206 +    EP_RAILMASK   nmdRailMask = ep_nmd2railmask (txd->Envelope.Frags, txd->Envelope.nFrags);
52207 +    EP_XMTR_RAIL *xmtrRail;
52208 +    unsigned long flags;
52209 +    int rnum;
52210 +
52211 +    spin_lock_irqsave (&xmtr->Lock, flags);
52212 +
52213 +    if (EP_IS_SVC_INDICATOR(txd->Envelope.Attr))
52214 +       nmdRailMask = nmdRailMask & ep_xmtr_svc_indicator_railmask(xmtr, EP_ATTR2DATA(txd->Envelope.Attr), txd->NodeId);
52215 +
52216 +    if (EP_IS_PREFRAIL_SET(txd->Envelope.Attr))
52217 +       rnum = EP_ATTR2PREFRAIL(txd->Envelope.Attr);
52218 +    else 
52219 +       rnum = ep_xmtr_prefrail (xmtr, nmdRailMask, txd->NodeId);
52220 +    
52221 +    if (rnum < 0 || !(nmdRailMask & EP_RAIL2RAILMASK(rnum)))
52222 +       xmtrRail = NULL;
52223 +    else
52224 +       xmtrRail = xmtr->Rails[rnum];
52225 +    
52226 +    /* Allocate the XID while holding the xmtr->Lock from our XID cache */
52227 +    txd->Envelope.Xid = ep_xid_cache_alloc (xmtr->Subsys->Subsys.Sys, &xmtr->XidCache);
52228 +    
52229 +    EPRINTF7 (DBG_XMTR, "ep: transmit txd %p to %d/%d: Xid %llx nFrags %d [%08x.%d]\n",
52230 +             txd, txd->NodeId, txd->Service, (long long) txd->Envelope.Xid.Unique, 
52231 +             txd->Envelope.nFrags, txd->Envelope.Frags[0].nmd_addr, txd->Envelope.Frags[0].nmd_len);
52232 +
52233 +    /* Store time transmit started to timeout if not received */
52234 +    txd->TimeStamp = lbolt;
52235 +    
52236 +    /* Initialise the retry backoff */
52237 +    txd->Backoff.type = EP_BACKOFF_FREE;
52238 +
52239 +    list_add_tail (&txd->Link, &xmtr->ActiveDescList);
52240 +
52241 +    if (xmtrRail == NULL || !EP_XMTR_OP(xmtrRail,BindTxd) (txd, xmtrRail, EP_TXD_PHASE_ACTIVE))
52242 +       ep_kthread_schedule (&xmtr->Subsys->Thread, lbolt);
52243 +    
52244 +    spin_unlock_irqrestore (&xmtr->Lock, flags);
52245 +
52246 +    if (EP_IS_NO_INTERRUPT(txd->Envelope.Attr))
52247 +       PollActiveTransmitList (xmtr, POLL_TX_LIST);
52248 +}
52249 +
52250 +EP_STATUS
52251 +ep_transmit_message (EP_XMTR *xmtr, unsigned int dest, EP_SERVICE service, EP_ATTRIBUTE attr, 
52252 +                    EP_TXH *handler, void *arg, EP_PAYLOAD *payload, EP_NMD *nmd, int nFrags)
52253 +{
52254 +    EP_TXD       *txd;
52255 +    int           i, len;
52256 +
52257 +    if (nFrags > EP_MAXFRAG || service > EP_MSG_NSVC)
52258 +       return (EP_EINVAL);
52259 +
52260 +    if ((txd = GetTxd (xmtr, attr)) == NULL)
52261 +       return (EP_ENOMEM);
52262 +
52263 +    txd->Handler = handler;
52264 +    txd->Arg     = arg;
52265 +    txd->Service = service;
52266 +    txd->NodeId  = (unsigned short) dest;
52267 +
52268 +    /* Initialise the envelope */
52269 +    txd->Envelope.Version   = EP_ENVELOPE_VERSION;
52270 +    txd->Envelope.Attr      = EP_CLEAR_LOCAL_ATTR(attr);
52271 +    txd->Envelope.Range     = EP_RANGE (dest, dest);
52272 +    txd->Envelope.TxdMain   = txd->NmdMain;
52273 +    txd->Envelope.nFrags    = nFrags;
52274 +
52275 +    for (i = len = 0; i < nFrags; len += nmd[i].nmd_len, i++)
52276 +       txd->Envelope.Frags[i] = nmd[i];
52277 +
52278 +    if (payload)
52279 +    {
52280 +       txd->Envelope.Attr = EP_SET_HAS_PAYLOAD(txd->Envelope.Attr);
52281 +
52282 +       bcopy (payload, &txd->Payload, sizeof (EP_PAYLOAD));
52283 +    }
52284 +
52285 +    DoTransmit (xmtr, txd);
52286 +
52287 +    BucketStat (xmtr->Subsys, DataXmit, len);
52288 +
52289 +    return (EP_SUCCESS);
52290 +}
52291 +
52292 +EP_STATUS
52293 +ep_multicast_message (EP_XMTR *xmtr, unsigned int destLo, unsigned int destHi, bitmap_t *bitmap, EP_SERVICE service, 
52294 +                    EP_ATTRIBUTE attr, EP_TXH *handler, void *arg, EP_PAYLOAD *payload, EP_NMD *nmd, int nFrags)
52295 +{
52296 +    EP_SYS       *sys = xmtr->Subsys->Subsys.Sys;
52297 +    EP_TXD       *txd;
52298 +    int           nnodes;
52299 +    int           i, len;
52300 +    unsigned long flags;    
52301 +
52302 +    if (nFrags > EP_MAXFRAG || service > EP_MSG_NSVC)
52303 +       return (EP_EINVAL);
52304 +
52305 +    if (destLo == -1) 
52306 +       destLo = sys->Position.pos_nodeid & ~(EP_MAX_NODES-1);
52307 +
52308 +    if (destHi == -1 && (destHi = ((sys->Position.pos_nodeid + EP_MAX_NODES) & ~(EP_MAX_NODES-1)) - 1) >= sys->Position.pos_nodes)
52309 +       destHi = sys->Position.pos_nodes-1;
52310 +
52311 +    nnodes = (destHi-destLo+1);
52312 +
52313 +    if ((txd = GetTxd (xmtr, attr)) == NULL)
52314 +       return (EP_ENOMEM);
52315 +
52316 +    txd->Handler = handler;
52317 +    txd->Arg     = arg;
52318 +    txd->Service = service;
52319 +
52320 +    /* Initialise the envelope */
52321 +    txd->Envelope.Version   = EP_ENVELOPE_VERSION;
52322 +    txd->Envelope.Attr      = EP_SET_MULTICAST(EP_CLEAR_LOCAL_ATTR(attr));
52323 +    txd->Envelope.Range     = EP_RANGE (destLo, destHi);
52324 +    txd->Envelope.TxdMain   = txd->NmdMain;
52325 +    txd->Envelope.nFrags    = nFrags;
52326 +
52327 +    for (i = len = 0; i < nFrags; len += nmd[i].nmd_len, i++)
52328 +       txd->Envelope.Frags[i] = nmd[i];
52329 +
52330 +    if (payload)
52331 +    {
52332 +       txd->Envelope.Attr = EP_SET_HAS_PAYLOAD(txd->Envelope.Attr);
52333 +
52334 +       bcopy (payload, &txd->Payload, sizeof (EP_PAYLOAD));
52335 +    }
52336 +
52337 +    spin_lock_irqsave (&sys->NodeLock, flags);
52338 +    if (EP_IS_SVC_INDICATOR(attr)) 
52339 +       ep_xmtr_svc_indicator_bitmap(xmtr, EP_ATTR2DATA(attr), txd->TxdMain->Bitmap, destLo, nnodes);
52340 +    else
52341 +       bt_subset (statemap_tobitmap(sys->NodeSet), txd->TxdMain->Bitmap, destLo, nnodes);
52342 +    spin_unlock_irqrestore (&sys->NodeLock, flags);
52343 +
52344 +    if (bitmap != NULL)                                                                        /* bitmap supplied, so intersect it with */
52345 +       bt_intersect (txd->TxdMain->Bitmap, bitmap, nnodes);                            /* the current node set map */
52346 +    
52347 +    if ((attr & EP_NOT_MYSELF) && destLo <= sys->Position.pos_nodeid && sys->Position.pos_nodeid <= destHi)
52348 +       BT_CLEAR (txd->TxdMain->Bitmap, (sys->Position.pos_nodeid-destLo));                     /* clear myself if not wanted */
52349 +
52350 +    if ((i = bt_lowbit (txd->TxdMain->Bitmap, nnodes)) < 0)
52351 +    {
52352 +       FreeTxd (xmtr, txd);
52353 +       return (EP_NODE_DOWN);
52354 +    }
52355 +
52356 +    txd->NodeId = (unsigned short) i;
52357 +
52358 +    DoTransmit (xmtr, txd);
52359 +
52360 +    BucketStat (xmtr->Subsys, McastXmit, len);
52361 +
52362 +    return (EP_SUCCESS);
52363 +}
52364 +
52365 +EP_STATUS
52366 +ep_transmit_rpc (EP_XMTR *xmtr, unsigned int dest, EP_SERVICE service, EP_ATTRIBUTE attr,
52367 +                EP_TXH *handler, void *arg, EP_PAYLOAD *payload, EP_NMD *nmd, int nFrags)
52368 +{
52369 +    EP_TXD       *txd;
52370 +    int           i, len;
52371 +
52372 +    if (nFrags > EP_MAXFRAG || service > EP_MSG_NSVC)
52373 +       return (EP_EINVAL);
52374 +
52375 +    if ((txd = GetTxd (xmtr, attr)) == NULL)
52376 +       return (EP_ENOMEM);
52377 +
52378 +    txd->Handler = handler;
52379 +    txd->Arg     = arg;
52380 +    txd->Service = service;
52381 +    txd->NodeId  = dest;
52382 +
52383 +    /* Initialise the envelope */
52384 +    txd->Envelope.Version   = EP_ENVELOPE_VERSION;
52385 +    txd->Envelope.Attr      = EP_SET_RPC(EP_CLEAR_LOCAL_ATTR(attr));    
52386 +    txd->Envelope.Range     = EP_RANGE (dest, dest);
52387 +    txd->Envelope.TxdMain   = txd->NmdMain;
52388 +    txd->Envelope.nFrags    = nFrags;
52389 +     
52390 +    for (i = len = 0; i < nFrags; len += nmd[i].nmd_len, i++)
52391 +       txd->Envelope.Frags[i] = nmd[i];
52392 +
52393 +    if (payload)
52394 +    {
52395 +       txd->Envelope.Attr = EP_SET_HAS_PAYLOAD(txd->Envelope.Attr);
52396 +
52397 +       bcopy (payload, &txd->Payload, sizeof (EP_PAYLOAD));
52398 +    }
52399 +
52400 +    DoTransmit (xmtr, txd);
52401 +
52402 +    BucketStat (xmtr->Subsys, RPCXmit, len);
52403 +
52404 +    return (EP_SUCCESS);
52405 +}
52406 +
52407 +EP_STATUS
52408 +ep_multicast_forward (EP_XMTR *xmtr, unsigned int dest, EP_SERVICE service, EP_ATTRIBUTE attr, EP_TXH *handler, void *arg,
52409 +                     EP_ENVELOPE *env,  EP_PAYLOAD *payload, bitmap_t *bitmap, EP_NMD *nmd, int nFrags)
52410 +{
52411 +    EP_TXD       *txd;
52412 +    int           i, len;
52413 +
52414 +    if (nFrags > EP_MAXFRAG || service > EP_MSG_NSVC)
52415 +       return (EP_EINVAL);
52416 +
52417 +    if ((txd = GetTxd (xmtr, attr)) == NULL)
52418 +       return (EP_ENOMEM);
52419 +
52420 +    txd->Handler = handler;
52421 +    txd->Arg     = arg;
52422 +    txd->Service = service;
52423 +    txd->NodeId  = (unsigned short) dest;
52424 +
52425 +    /* Initialise the envelope */
52426 +    txd->Envelope.Version   = EP_ENVELOPE_VERSION;
52427 +    txd->Envelope.Attr      = EP_SET_MULTICAST(EP_CLEAR_LOCAL_ATTR(attr));
52428 +    txd->Envelope.Range     = env->Range;
52429 +    txd->Envelope.TxdMain   = txd->NmdMain;
52430 +    txd->Envelope.nFrags    = nFrags;
52431 +
52432 +    for (i = len = 0; i < nFrags; len += nmd[i].nmd_len, i++)
52433 +       txd->Envelope.Frags[i] = nmd[i];
52434 +
52435 +    bt_copy (bitmap, txd->TxdMain->Bitmap, EP_RANGE_HIGH(env->Range) - EP_RANGE_LOW(env->Range) + 1);
52436 +
52437 +    if (payload)
52438 +    {
52439 +       txd->Envelope.Attr = EP_SET_HAS_PAYLOAD(txd->Envelope.Attr);
52440 +
52441 +       bcopy (payload, &txd->Payload, sizeof (EP_PAYLOAD));
52442 +    }
52443 +
52444 +    DoTransmit (xmtr, txd);
52445 +
52446 +    BucketStat (xmtr->Subsys, McastXmit, len);
52447 +
52448 +    return (EP_SUCCESS);
52449 +}
52450 +
52451 +int
52452 +ep_poll_transmits (EP_XMTR *xmtr)
52453 +{
52454 +    return (PollActiveTransmitList (xmtr, POLL_TX_LIST));
52455 +}
52456 +
52457 +int
52458 +ep_enable_txcallbacks (EP_XMTR *xmtr)
52459 +{
52460 +    return (PollActiveTransmitList (xmtr, ENABLE_TX_CALLBACK));
52461 +}
52462 +
52463 +int
52464 +ep_disable_txcallbacks (EP_XMTR *xmtr)
52465 +{
52466 +    return (PollActiveTransmitList (xmtr, DISABLE_TX_CALLBACK));
52467 +}
52468 +
52469 +/* functions for accessing fields of txds */
52470 +int              ep_txd_node(EP_TXD *txd)              { return (txd->NodeId); }
52471 +EP_STATUSBLK    *ep_txd_statusblk(EP_TXD *txd)                 { return (&txd->TxdMain->StatusBlk); }
52472 +
52473 +void
52474 +ep_xmtr_xid_msg_handler (void *arg, EP_MANAGER_MSG *msg)
52475 +{
52476 +    EP_XMTR          *xmtr = (EP_XMTR *) arg;
52477 +    EP_SYS           *sys  = xmtr->Subsys->Subsys.Sys;
52478 +    struct list_head *el,*nel;
52479 +    unsigned long     flags;
52480 +
52481 +    switch (msg->Hdr.Type)
52482 +    {
52483 +    case EP_MANAGER_MSG_TYPE_FAILOVER_REQUEST:
52484 +       spin_lock_irqsave (&xmtr->Lock, flags);
52485 +       list_for_each (el, &xmtr->ActiveDescList) {
52486 +           EP_TXD      *txd     = list_entry (el, EP_TXD, Link);
52487 +           EP_TXD_RAIL *txdRail = txd->TxdRail;
52488 +
52489 +           if (txdRail != NULL && EP_XIDS_MATCH (msg->Body.Failover.Xid, txd->Envelope.Xid))
52490 +           {
52491 +               EP_XMTR_RAIL       *xmtrRail = txdRail->XmtrRail;
52492 +               EP_RAIL            *rail     = xmtrRail->CommsRail->Rail;
52493 +               EP_MANAGER_MSG_BODY msgBody;
52494 +               int                 rnum;
52495 +
52496 +               if (! (msg->Body.Failover.Railmask & EP_RAIL2RAILMASK (rail->Number)))
52497 +               {
52498 +                   /* Need to failover this txd to a different rail, select a rail from
52499 +                    * the set that she has asked us to use and which is connected to her
52500 +                    * on this transmitter.   If there are no such rails, then in all probability
52501 +                    * we're offline on all common rails and eventually she will see we have no
52502 +                    * rails in common and abort the receive. */
52503 +                   if ((rnum = ep_xmtr_prefrail (xmtr, msg->Body.Failover.Railmask, txd->NodeId)) < 0)
52504 +                       ep_debugf (DBG_XMTR, "%s: ep_xmtr_xid_msg_handler: FAILOVER_REQUEST but can't determine rail (%04x,%04x,%d,%04x)\n",
52505 +                                  rail->Name, msg->Body.Failover.Railmask, xmtr->RailMask, txd->NodeId, sys->Nodes[txd->NodeId].ConnectedRails);
52506 +                   else
52507 +                   {
52508 +                       EP_XMTR_RAIL *nXmtrRail = xmtr->Rails[rnum];
52509 +
52510 +                       EPRINTF4 (DBG_XMTR, "%s: ep_xmtr_xid_msg_handler: FAILOVER_REQUEST txd=%p XID=%llx-> rail %d\n", rail->Name, txd, (long long) txd->Envelope.Xid.Unique, rnum);
52511 +
52512 +                       /* Bind the txd rail onto the new rail - it doesn't matter if we fail
52513 +                        * as it will remain bound to the original rail */
52514 +                       (void) EP_XMTR_OP (nXmtrRail, BindTxd) (txd, nXmtrRail, EP_TXD_PHASE_PASSIVE);
52515 +                   }
52516 +               }
52517 +
52518 +               /* Send a failover response including an envelope update */
52519 +               msgBody.FailoverTxd.Rail     = rail->Number;
52520 +               msgBody.FailoverTxd.Xid      = txd->Envelope.Xid;
52521 +               msgBody.FailoverTxd.TxdRail  = txd->Envelope.TxdRail;
52522 +
52523 +               ep_send_message (rail, msg->Hdr.NodeId, EP_MANAGER_MSG_TYPE_FAILOVER_RESPONSE, msg->Hdr.Xid, &msgBody);
52524 +           }
52525 +       }
52526 +       spin_unlock_irqrestore (&xmtr->Lock, flags);
52527 +       break;
52528 +
52529 +    case EP_MANAGER_MSG_TYPE_GET_NODE_STATE_RESPONSE: {
52530 +       int         txd_has_not_sent_envelope = 0;
52531 +       EP_TXD      *txd            = NULL;
52532 +       EP_TXD_RAIL *txdRail        = NULL;
52533 +
52534 +       if (msg->Body.NodeState.NetworkErrorState != 0)
52535 +           ep_kthread_schedule (&xmtr->Subsys->Thread, lbolt + MESSAGE_RETRY_TIME);
52536 +       else
52537 +       {
52538 +           spin_lock_irqsave (&xmtr->Lock, flags);
52539 +           list_for_each_safe (el, nel, &xmtr->ActiveDescList) {
52540 +               
52541 +               txd     = list_entry (el, EP_TXD, Link);
52542 +               txdRail = txd->TxdRail;
52543 +               
52544 +               if (txdRail != NULL && EP_XIDS_MATCH (msg->Hdr.Xid, txd->Envelope.Xid)) {
52545 +                   txd_has_not_sent_envelope = EP_XMTR_OP(txdRail->XmtrRail,CheckTxdState)(txd);
52546 +                   break;
52547 +               }
52548 +           }
52549 +           
52550 +           if (txd_has_not_sent_envelope) {
52551 +               EPRINTF2 (DBG_STABILISE, "ep_xmtr_xid_msg_handler: GET_NODE_STATE_RESPONSE txd=%p XID=%llx not sent envelope\n",
52552 +                         txd, (long long) txd->Envelope.Xid.Unique);
52553 +
52554 +               /* at this point it has finished stabalising */
52555 +               txd->Envelope.Attr = EP_CLEAR_TXD_STABALISING(txd->Envelope.Attr);
52556 +
52557 +               /* store railmask into txd if not a service indicator or timeout */
52558 +               if (EP_IS_NO_TYPE(txd->Envelope.Attr))
52559 +                   txd->Envelope.Attr = EP_SET_DATA(txd->Envelope.Attr, EP_TYPE_RAILMASK, msg->Body.NodeState.Railmask);
52560 +
52561 +               spin_unlock_irqrestore (&xmtr->Lock, flags);
52562 +               
52563 +               /* TXD is now no longer bound to a rail , so let ep_check_xmtr() handle it */
52564 +               ep_kthread_schedule (&xmtr->Subsys->Thread, lbolt);
52565 +           }
52566 +           else
52567 +               spin_unlock_irqrestore (&xmtr->Lock, flags);    
52568 +       }
52569 +       break;
52570 +    }
52571 +    default:
52572 +       panic ("ep_xmtr_xid_msg_handler: XID match but invalid message type\n");
52573 +    }
52574 +}
52575 +
52576 +EP_XMTR *
52577 +ep_alloc_xmtr (EP_SYS *sys)
52578 +{
52579 +    EP_COMMS_SUBSYS   *subsys;
52580 +    EP_XMTR          *xmtr;
52581 +    struct list_head *el;
52582 +
52583 +    if ((subsys = (EP_COMMS_SUBSYS *) ep_subsys_find (sys, EPCOMMS_SUBSYS_NAME)) == NULL)
52584 +       return (NULL);
52585 +
52586 +    KMEM_ZALLOC (xmtr, EP_XMTR *, sizeof (EP_XMTR), 1);
52587 +
52588 +    if (xmtr == NULL)
52589 +       return (NULL);
52590 +    
52591 +    xmtr->Subsys = subsys;
52592 +
52593 +    spin_lock_init (&xmtr->Lock);
52594 +    INIT_LIST_HEAD (&xmtr->ActiveDescList);
52595 +    
52596 +    kcondvar_init (&xmtr->FreeDescSleep);
52597 +    spin_lock_init (&xmtr->FreeDescLock);
52598 +    INIT_LIST_HEAD (&xmtr->FreeDescList);
52599 +    INIT_LIST_HEAD (&xmtr->DescBlockList);
52600 +
52601 +    ep_xid_cache_init (sys, &xmtr->XidCache);
52602 +
52603 +    xmtr->XidCache.MessageHandler = ep_xmtr_xid_msg_handler;
52604 +    xmtr->XidCache.Arg            = xmtr;
52605 +
52606 +    kmutex_lock (&subsys->Lock);
52607 +    list_add_tail (&xmtr->Link, &subsys->Transmitters);
52608 +
52609 +    ep_procfs_xmtr_add(xmtr);
52610 +
52611 +    /* Now add all rails which are already started */
52612 +    list_for_each (el, &subsys->Rails) { 
52613 +       EP_COMMS_RAIL *commsRail = list_entry (el, EP_COMMS_RAIL, Link);
52614 +
52615 +       EP_RAIL_OP(commsRail, Xmtr.AddRail) (xmtr, commsRail);
52616 +    }
52617 +    kmutex_unlock (&subsys->Lock);
52618 +
52619 +    ep_mod_inc_usecount();
52620 +
52621 +    return (xmtr);
52622 +}
52623 +
52624 +void
52625 +ep_free_xmtr (EP_XMTR *xmtr)
52626 +{
52627 +    EP_COMMS_SUBSYS  *subsys = xmtr->Subsys;
52628 +    EP_SYS           *sys    = subsys->Subsys.Sys;
52629 +    struct list_head *el;
52630 +    
52631 +    kmutex_lock (&subsys->Lock);
52632 +    list_for_each (el, &subsys->Rails) { 
52633 +       EP_COMMS_RAIL *rail = list_entry (el, EP_COMMS_RAIL, Link);
52634 +
52635 +       EP_RAIL_OP(rail,Xmtr.DelRail) (xmtr, rail);
52636 +    }
52637 +
52638 +    list_del (&xmtr->Link);
52639 +    kmutex_unlock (&subsys->Lock);
52640 +
52641 +    /* all the desc's must be free */
52642 +    ASSERT(xmtr->FreeDescCount == xmtr->TotalDescCount);
52643 +
52644 +    /* delete the descs */
52645 +    while (!list_empty (&xmtr->DescBlockList))
52646 +       FreeTxdBlock( xmtr, list_entry(xmtr->DescBlockList.next, EP_TXD_BLOCK , Link));
52647 +
52648 +    /* they had better all be gone now */
52649 +    ASSERT((xmtr->FreeDescCount == 0) && (xmtr->TotalDescCount == 0));
52650 +
52651 +    ep_procfs_xmtr_del(xmtr);
52652 +
52653 +    ep_xid_cache_destroy (sys, &xmtr->XidCache);
52654 +
52655 +    spin_lock_destroy (&xmtr->Lock);
52656 +    KMEM_FREE (xmtr, sizeof (EP_XMTR));
52657 +
52658 +    ep_mod_dec_usecount();
52659 +}
52660 +
52661 +long
52662 +ep_check_xmtr (EP_XMTR *xmtr, long nextRunTime)
52663 +{
52664 +    EP_COMMS_SUBSYS  *subsys = xmtr->Subsys;
52665 +    EP_SYS           *sys    = subsys->Subsys.Sys;
52666 +    struct list_head *el, *nel;
52667 +    struct list_head  txdList;
52668 +    unsigned long       flags;
52669 +    int                 timed_out=0;
52670 +    int                 i;
52671 +    EP_MANAGER_MSG_BODY body;
52672 +
52673 +    INIT_LIST_HEAD (&txdList);
52674 +
52675 +    /* See if we have any txd's which need to be bound to a rail */
52676 +    spin_lock_irqsave (&xmtr->Lock, flags);
52677 +    list_for_each_safe (el, nel, &xmtr->ActiveDescList) {
52678 +       EP_TXD      *txd      = list_entry (el, EP_TXD, Link);
52679 +       EP_NODE     *node     = &sys->Nodes[txd->NodeId];
52680 +       EP_RAILMASK nodeRails = node->ConnectedRails & xmtr->RailMask;
52681 +       EP_ENVELOPE *env      = &txd->Envelope;
52682 +
52683 +       if (EP_IS_TXD_STABALISING(txd->Envelope.Attr)) 
52684 +       {
52685 +           ASSERT(txd->TxdRail != NULL);
52686 +
52687 +           if (AFTER (lbolt, txd->RetryTime))
52688 +           {
52689 +               EPRINTF6 (DBG_STABILISE, "ep_check_xmtr txd=%p txdRail=%p send get node state to %d Xid=%08x.%08x.%016llx\n",
52690 +                         txd, txd->TxdRail, txd->NodeId, env->Xid.Generation, env->Xid.Handle, env->Xid.Unique);
52691 +               
52692 +               body.Service = txd->Service;
52693 +               if (ep_send_message ( txd->TxdRail->XmtrRail->CommsRail->Rail, txd->NodeId, EP_MANAGER_MSG_TYPE_GET_NODE_STATE, env->Xid, &body) == 0)
52694 +                   txd->RetryTime = lbolt + (MESSAGE_RETRY_TIME << ep_backoff (&txd->Backoff, EP_BACKOFF_STABILISE));
52695 +               else
52696 +                   txd->RetryTime = lbolt + MSGBUSY_RETRY_TIME;
52697 +           }
52698 +
52699 +           ep_kthread_schedule (&subsys->Thread, txd->RetryTime);
52700 +           continue;
52701 +       }
52702 +
52703 +       if (txd->TxdRail != NULL)
52704 +           continue;
52705 +
52706 +       switch (EP_ATTR2TYPE(txd->Envelope.Attr)) 
52707 +       {
52708 +       case EP_TYPE_SVC_INDICATOR: 
52709 +       {
52710 +           EP_RAILMASK       rmask=0;
52711 +           struct list_head *tmp;
52712 +
52713 +           list_for_each (tmp, &subsys->Rails) { 
52714 +               EP_COMMS_RAIL *commsRail = list_entry (tmp, EP_COMMS_RAIL, Link);
52715 +               if ( cm_svc_indicator_is_set(commsRail->Rail, EP_ATTR2DATA(txd->Envelope.Attr), txd->NodeId))
52716 +                   rmask |= EP_RAIL2RAILMASK(commsRail->Rail->Number);
52717 +           } 
52718 +           nodeRails &= rmask;
52719 +           break;
52720 +       }
52721 +       case EP_TYPE_TIMEOUT:
52722 +           timed_out = AFTER(lbolt, txd->TimeStamp + EP_ATTR2DATA(txd->Envelope.Attr)) ? (1) : (0);
52723 +           break;
52724 +       case EP_TYPE_RAILMASK:
52725 +           nodeRails &= EP_ATTR2DATA(txd->Envelope.Attr);
52726 +           break;
52727 +       default:
52728 +           timed_out = AFTER(lbolt, txd->TimeStamp +  EP_DEFAULT_TIMEOUT) ? (1) : (0);
52729 +           break;
52730 +       }
52731 +
52732 +       if (nodeRails == 0 || timed_out || (EP_IS_NO_FAILOVER(env->Attr) && EP_IS_PREFRAIL_SET(env->Attr) && 
52733 +                                           (nodeRails & EP_RAIL2RAILMASK(EP_ATTR2PREFRAIL(env->Attr))) == 0))
52734 +       {
52735 +           EPRINTF5 (timed_out ? DBG_STABILISE : DBG_XMTR, "ep_check_xmtr: txd=%p XID=%llx to %d no rails connected or cannot failover (nodeRails=0x%x,timed_out=%d\n", 
52736 +                     txd, (long long) env->Xid.Unique, txd->NodeId, nodeRails, timed_out);
52737 +
52738 +           list_del  (&txd->Link);
52739 +           list_add_tail (&txd->Link, &txdList);
52740 +       }
52741 +       else
52742 +       {
52743 +           EP_XMTR_RAIL *xmtrRail;
52744 +           int i, len, rnum;
52745 +
52746 +           if (EP_IS_PREFRAIL_SET(env->Attr) && (nodeRails & EP_RAIL2RAILMASK(EP_ATTR2PREFRAIL(env->Attr))))
52747 +               rnum = EP_ATTR2PREFRAIL(env->Attr);
52748 +           else
52749 +               rnum = ep_pickRail (nodeRails);
52750 +
52751 +           EPRINTF3 (DBG_XMTR, "ep_check_xmtr: txd=%p XID=%llx mapping NMDs onto rail %d \n", txd, (long long) env->Xid.Unique, rnum);
52752 +
52753 +           for (i = len = 0; i < env->nFrags; i++, len += env->Frags[i].nmd_len)
52754 +               ep_nmd_map_rails (sys, &env->Frags[i], nodeRails);
52755 +
52756 +           if ((xmtrRail = xmtr->Rails[rnum]) == NULL || 
52757 +               !EP_XMTR_OP(xmtrRail,BindTxd) (txd, xmtrRail, EP_TXD_PHASE_ACTIVE))
52758 +               ep_kthread_schedule (&subsys->Thread, lbolt + RESOURCE_RETRY_TIME);
52759 +       }
52760 +    }
52761 +    spin_unlock_irqrestore (&xmtr->Lock, flags);
52762 +
52763 +    while (! list_empty (&txdList))
52764 +    {
52765 +       EP_TXD *txd = list_entry (txdList.next, EP_TXD, Link);
52766 +       list_del (&txd->Link);
52767 +
52768 +       txd->Handler (txd, txd->Arg, EP_NODE_DOWN);
52769 +       FreeTxd (xmtr, txd);
52770 +    }
52771 +
52772 +    /* Check to see if we're low on txds */
52773 +    if (xmtr->FreeDescCount < ep_txd_lowat)
52774 +       AllocateTxdBlock (xmtr, 0, NULL);
52775 +    
52776 +    /* Then check each rail */
52777 +    for (i = 0; i < EP_MAX_RAILS; i++) 
52778 +       if (xmtr->RailMask & (1 << i) ) 
52779 +           nextRunTime = EP_XMTR_OP (xmtr->Rails[i],Check) (xmtr->Rails[i], nextRunTime);
52780 +    return (nextRunTime);
52781 +}
52782 +
52783 +void
52784 +ep_display_txd (DisplayInfo *di, EP_TXD *txd)
52785 +{
52786 +    EP_ENVELOPE *env     = &txd->Envelope;
52787 +    EP_TXD_RAIL *txdRail = txd->TxdRail;
52788 +
52789 +    (di->func)(di->arg, "TXD: %p Version=%x Attr=%x Xid=%08x.%08x.%016llx\n", txd, 
52790 +              env->Version, env->Attr, env->Xid.Generation, env->Xid.Handle, (long long) env->Xid.Unique);
52791 +    (di->func)(di->arg,  "     NodeId=%d Range=%d.%d TxdRail=%x TxdMain=%x.%x.%x nFrags=%d\n",
52792 +              env->NodeId, EP_RANGE_LOW(env->Range), EP_RANGE_HIGH(env->Range), env->TxdRail,
52793 +              env->TxdMain.nmd_addr, env->TxdMain.nmd_len, env->TxdMain.nmd_attr, env->nFrags);
52794 +    (di->func)(di->arg,  "       Frag[0] %08x.%08x.%08x\n", env->Frags[0].nmd_addr, env->Frags[0].nmd_len, env->Frags[0].nmd_attr);
52795 +    (di->func)(di->arg,  "       Frag[1] %08x.%08x.%08x\n", env->Frags[1].nmd_addr, env->Frags[1].nmd_len, env->Frags[1].nmd_attr);
52796 +    (di->func)(di->arg,  "       Frag[2] %08x.%08x.%08x\n", env->Frags[2].nmd_addr, env->Frags[2].nmd_len, env->Frags[2].nmd_attr);
52797 +    (di->func)(di->arg,  "       Frag[3] %08x.%08x.%08x\n", env->Frags[3].nmd_addr, env->Frags[3].nmd_len, env->Frags[3].nmd_attr);
52798 +
52799 +    if (txdRail != NULL) EP_XMTR_OP (txdRail->XmtrRail, DisplayTxd) (di, txdRail);
52800 +}
52801 +
52802 +void
52803 +ep_display_xmtr (DisplayInfo *di, EP_XMTR *xmtr)
52804 +{
52805 +    int               freeCount   = 0;
52806 +    int               activeCount = 0;
52807 +    struct list_head *el;
52808 +    int               i;
52809 +    unsigned long     flags;
52810 +
52811 +    spin_lock_irqsave (&xmtr->FreeDescLock, flags);
52812 +    list_for_each (el, &xmtr->FreeDescList)
52813 +       freeCount++;
52814 +    spin_unlock_irqrestore (&xmtr->FreeDescLock, flags);
52815 +
52816 +    spin_lock_irqsave (&xmtr->Lock, flags);
52817 +    list_for_each (el, &xmtr->ActiveDescList)
52818 +       activeCount++;
52819 +    
52820 +    (di->func)(di->arg, "ep_display_xmtr: xmtr=%p Free=%d Active=%d\n", xmtr, freeCount, activeCount);
52821 +    for (i = 0; i < EP_MAX_RAILS; i++)
52822 +       if (xmtr->Rails[i]) EP_XMTR_OP (xmtr->Rails[i], DisplayXmtr) (di, xmtr->Rails[i]);
52823 +
52824 +    list_for_each (el,&xmtr->ActiveDescList)
52825 +       ep_display_txd (di, list_entry (el, EP_TXD, Link));
52826 +    spin_unlock_irqrestore (&xmtr->Lock, flags);
52827 +}
52828 +
52829 +void
52830 +ep_xmtr_fillout_stats(EP_XMTR *xmtr, char *str)
52831 +{
52832 +    sprintf(str+strlen(str),"Tx     %lu  %lu /sec\n",       GET_STAT_TOTAL(xmtr->stats,tx),      GET_STAT_PER_SEC(xmtr->stats,tx) );
52833 +    sprintf(str+strlen(str),"MBytes %lu  %lu Mbytes/sec\n", GET_STAT_TOTAL(xmtr->stats,tx_len) / (1024*1024),  GET_STAT_PER_SEC(xmtr->stats,tx_len) / (1024*1024));
52834 +}
52835 +
52836 +void
52837 +ep_xmtr_rail_fillout_stats(EP_XMTR_RAIL *xmtr_rail, char *str)
52838 +{
52839 +    sprintf(str+strlen(str),"Tx     %lu  %lu /sec\n",       GET_STAT_TOTAL(xmtr_rail->stats,tx),      GET_STAT_PER_SEC(xmtr_rail->stats,tx) );
52840 +    sprintf(str+strlen(str),"MBytes %lu  %lu Mbytes/sec\n", GET_STAT_TOTAL(xmtr_rail->stats,tx_len) / (1024*1024),  GET_STAT_PER_SEC(xmtr_rail->stats,tx_len) / (1024*1024));
52841 +}
52842 +
52843 +/*
52844 + * Local variables:
52845 + * c-file-style: "stroustrup"
52846 + * End:
52847 + */
52848 Index: linux-2.4.21/drivers/net/qsnet/ep/epcommsTx_elan3.c
52849 ===================================================================
52850 --- linux-2.4.21.orig/drivers/net/qsnet/ep/epcommsTx_elan3.c    2004-02-23 16:02:56.000000000 -0500
52851 +++ linux-2.4.21/drivers/net/qsnet/ep/epcommsTx_elan3.c 2005-06-01 23:12:54.657430288 -0400
52852 @@ -0,0 +1,1173 @@
52853 +/*
52854 + *    Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd.
52855 + *    Copyright (c) 2002-2003 by Quadrics Ltd.
52856 + *
52857 + *    For licensing information please see the supplied COPYING file
52858 + *
52859 + */
52860 +
52861 +#ident "@(#)$Id: epcommsTx_elan3.c,v 1.17.2.2 2004/11/12 10:54:51 mike Exp $"
52862 +/*      $Source: /cvs/master/quadrics/epmod/epcommsTx_elan3.c,v $ */
52863 +
52864 +#include <qsnet/kernel.h>
52865 +
52866 +#include <elan/kcomm.h>
52867 +#include <elan/epsvc.h>
52868 +#include <elan/epcomms.h>
52869 +
52870 +#include "kcomm_vp.h"
52871 +#include "kcomm_elan3.h"
52872 +#include "epcomms_elan3.h"
52873 +#include "debug.h"
52874 +
52875 +#define XMTR_TO_RAIL(xmtrRail)         ((EP3_RAIL *) ((EP_XMTR_RAIL *) xmtrRail)->CommsRail->Rail)
52876 +#define XMTR_TO_DEV(xmtrRail)          (XMTR_TO_RAIL(xmtrRail)->Device)
52877 +#define XMTR_TO_SUBSYS(xmtrRail)       (((EP_XMTR_RAIL *) xmtrRail)->Xmtr->Subsys)
52878 +
52879 +static void TxEnveEvent (EP3_RAIL *rail, void *arg);
52880 +static void TxEnveRetry (EP3_RAIL *rail, void *arg, E3_DMA_BE *dma, int status);
52881 +static void TxEnveVerify (EP3_RAIL *rail, void *arg, E3_DMA_BE *dma);
52882 +
52883 +static EP3_COOKIE_OPS EnveCookieOps =
52884 +{
52885 +    TxEnveEvent,
52886 +    TxEnveRetry,
52887 +    NULL, /* DmaCancelled */
52888 +    TxEnveVerify
52889 +};
52890 +
52891 +static void TxDataEvent (EP3_RAIL *rail, void *arg);
52892 +static void TxDataRetry (EP3_RAIL *rail, void *arg, E3_DMA_BE *dma, int status);
52893 +static void TxDataVerify (EP3_RAIL *rail, void *arg, E3_DMA_BE *dma);
52894 +
52895 +static EP3_COOKIE_OPS DataCookieOps =
52896 +{
52897 +    TxDataEvent,
52898 +    TxDataRetry,
52899 +    NULL, /* DmaCancelled */
52900 +    TxDataVerify
52901 +};
52902 +
52903 +static void TxDoneEvent (EP3_RAIL *dev, void *arg);
52904 +static void TxDoneRetry (EP3_RAIL *dev, void *arg, E3_DMA_BE *dma, int status);
52905 +static void TxDoneVerify (EP3_RAIL  *dev, void *arg, E3_DMA_BE *dma);
52906 +
52907 +static EP3_COOKIE_OPS DoneCookieOps = 
52908 +{
52909 +    TxDoneEvent,
52910 +    TxDoneRetry,
52911 +    NULL, /* DmaCancelled */
52912 +    TxDoneVerify,
52913 +} ;
52914 +
52915 +static int
52916 +AllocateTxdRailBlock (EP3_XMTR_RAIL *xmtrRail)
52917 +{
52918 +    EP3_RAIL          *rail = XMTR_TO_RAIL (xmtrRail);
52919 +    ELAN3_DEV         *dev = rail->Device;
52920 +    EP3_TXD_RAIL_BLOCK *blk;
52921 +    EP3_TXD_RAIL       *txdRail;
52922 +    sdramaddr_t        pTxdElan;
52923 +    EP3_TXD_RAIL_MAIN  *pTxdMain;
52924 +    E3_Addr           pTxdElanAddr;
52925 +    E3_Addr           pTxdMainAddr;
52926 +    E3_BlockCopyEvent  event;
52927 +    int                       i;
52928 +    unsigned long      flags;
52929 +
52930 +    KMEM_ZALLOC (blk, EP3_TXD_RAIL_BLOCK *, sizeof (EP3_TXD_RAIL_BLOCK), 1);
52931 +
52932 +    if (blk == NULL)
52933 +       return 0;
52934 +
52935 +    if ((pTxdElan = ep_alloc_elan (&rail->Generic, EP3_TXD_RAIL_ELAN_SIZE * EP3_NUM_TXD_PER_BLOCK, 0, &pTxdElanAddr)) == (sdramaddr_t) 0)
52936 +    {
52937 +       KMEM_FREE (blk, sizeof (EP3_TXD_RAIL_BLOCK));
52938 +       return 0;
52939 +    }
52940 +
52941 +    if ((pTxdMain = ep_alloc_main (&rail->Generic, EP3_TXD_RAIL_MAIN_SIZE * EP3_NUM_TXD_PER_BLOCK, 0, &pTxdMainAddr)) == (EP3_TXD_RAIL_MAIN *) NULL)
52942 +    {
52943 +       ep_free_elan (&rail->Generic, pTxdElanAddr, EP3_TXD_RAIL_ELAN_SIZE * EP3_NUM_TXD_PER_BLOCK);
52944 +       KMEM_FREE (blk, sizeof (EP3_TXD_RAIL_BLOCK));
52945 +       return 0;
52946 +    }
52947 +    
52948 +    if (ReserveDmaRetries (rail, EP3_NUM_TXD_PER_BLOCK, 0) != ESUCCESS)
52949 +    {
52950 +       ep_free_main (&rail->Generic, pTxdMainAddr, EP3_TXD_RAIL_MAIN_SIZE * EP3_NUM_TXD_PER_BLOCK);
52951 +       ep_free_elan (&rail->Generic, pTxdElanAddr, EP3_TXD_RAIL_ELAN_SIZE * EP3_NUM_TXD_PER_BLOCK);
52952 +       KMEM_FREE (blk, sizeof (EP3_TXD_RAIL_BLOCK));
52953 +       return 0;
52954 +    }
52955 +
52956 +    for (txdRail = &blk->Txd[0], i = 0; i < EP3_NUM_TXD_PER_BLOCK; i++, txdRail++)
52957 +    {
52958 +       txdRail->Generic.XmtrRail = &xmtrRail->Generic;
52959 +       txdRail->TxdElan          = pTxdElan;
52960 +       txdRail->TxdElanAddr      = pTxdElanAddr;
52961 +       txdRail->TxdMain          = pTxdMain;
52962 +       txdRail->TxdMainAddr      = pTxdMainAddr;
52963 +
52964 +       RegisterCookie (&rail->CookieTable, &txdRail->EnveCookie, pTxdElanAddr + offsetof (EP3_TXD_RAIL_ELAN, EnveEvent), &EnveCookieOps, (void *) txdRail);
52965 +       RegisterCookie (&rail->CookieTable, &txdRail->DataCookie, pTxdElanAddr + offsetof (EP3_TXD_RAIL_ELAN, DataEvent), &DataCookieOps, (void *) txdRail);
52966 +       RegisterCookie (&rail->CookieTable, &txdRail->DoneCookie, pTxdElanAddr + offsetof (EP3_TXD_RAIL_ELAN, DoneEvent), &DoneCookieOps, (void *) txdRail);
52967 +
52968 +       EP3_INIT_COPY_EVENT (event, txdRail->EnveCookie, pTxdMainAddr + offsetof (EP3_TXD_RAIL_MAIN, EnveEvent), 0);
52969 +       elan3_sdram_copyl_to_sdram (dev, &event, pTxdElan + offsetof (EP3_TXD_RAIL_ELAN, EnveEvent), sizeof (E3_BlockCopyEvent));
52970 +
52971 +       EP3_INIT_COPY_EVENT (event, txdRail->DataCookie, pTxdMainAddr + offsetof (EP3_TXD_RAIL_MAIN, DataEvent), 0);
52972 +       elan3_sdram_copyl_to_sdram (dev, &event, pTxdElan + offsetof (EP3_TXD_RAIL_ELAN, DataEvent), sizeof (E3_BlockCopyEvent));
52973 +
52974 +       EP3_INIT_COPY_EVENT (event, txdRail->DoneCookie, pTxdMainAddr + offsetof (EP3_TXD_RAIL_MAIN, DoneEvent), 0);
52975 +       elan3_sdram_copyl_to_sdram (dev, &event, pTxdElan + offsetof (EP3_TXD_RAIL_ELAN, DoneEvent), sizeof (E3_BlockCopyEvent));
52976 +       
52977 +       pTxdMain->EnveEvent = EP3_EVENT_FREE;
52978 +       pTxdMain->DataEvent = EP3_EVENT_FREE;
52979 +       pTxdMain->DoneEvent = EP3_EVENT_FREE;
52980 +
52981 +       /* move onto next descriptor */
52982 +       pTxdElan     += EP3_TXD_RAIL_ELAN_SIZE;
52983 +       pTxdElanAddr += EP3_TXD_RAIL_ELAN_SIZE;
52984 +       pTxdMain      = (EP3_TXD_RAIL_MAIN *) ((unsigned long) pTxdMain + EP3_TXD_RAIL_MAIN_SIZE);
52985 +       pTxdMainAddr += EP3_TXD_RAIL_MAIN_SIZE;
52986 +    }
52987 +
52988 +    spin_lock_irqsave (&xmtrRail->FreeDescLock, flags);
52989 +
52990 +    list_add  (&blk->Link, &xmtrRail->DescBlockList);
52991 +    xmtrRail->TotalDescCount += EP3_NUM_TXD_PER_BLOCK;
52992 +    xmtrRail->FreeDescCount  += EP3_NUM_TXD_PER_BLOCK;
52993 +
52994 +    for (i = 0; i < EP3_NUM_TXD_PER_BLOCK; i++)
52995 +       list_add (&blk->Txd[i].Generic.Link, &xmtrRail->FreeDescList);
52996 +
52997 +    spin_unlock_irqrestore (&xmtrRail->FreeDescLock, flags);
52998 +    
52999 +    return 1;
53000 +}
53001 +
53002 +static void
53003 +FreeTxdRailBlock (EP3_XMTR_RAIL *xmtrRail, EP3_TXD_RAIL_BLOCK *blk)
53004 +{
53005 +    EP3_RAIL     *rail = XMTR_TO_RAIL(xmtrRail);
53006 +    EP3_TXD_RAIL *txdRail;
53007 +    unsigned long flags;
53008 +    int i;
53009 +
53010 +    spin_lock_irqsave (&xmtrRail->FreeDescLock, flags);
53011 +
53012 +    list_del (&blk->Link);
53013 +    
53014 +    xmtrRail->TotalDescCount -= EP3_NUM_TXD_PER_BLOCK;
53015 +    
53016 +    for (txdRail = &blk->Txd[0], i = 0; i < EP3_NUM_TXD_PER_BLOCK; i++, txdRail++)
53017 +    {
53018 +       xmtrRail->FreeDescCount--;
53019 +       
53020 +       list_del (&txdRail->Generic.Link);
53021 +       
53022 +       DeregisterCookie (&rail->CookieTable, &txdRail->EnveCookie);
53023 +       DeregisterCookie (&rail->CookieTable, &txdRail->DataCookie);
53024 +       DeregisterCookie (&rail->CookieTable, &txdRail->DoneCookie);
53025 +    }
53026 +
53027 +    spin_unlock_irqrestore (&xmtrRail->FreeDescLock, flags);
53028 +
53029 +    ReleaseDmaRetries (rail, EP3_NUM_TXD_PER_BLOCK);
53030 +
53031 +    ep_free_main (&rail->Generic, blk->Txd[0].TxdMainAddr, EP3_TXD_RAIL_MAIN_SIZE * EP3_NUM_TXD_PER_BLOCK);
53032 +    ep_free_elan (&rail->Generic, blk->Txd[0].TxdElanAddr, EP3_TXD_RAIL_ELAN_SIZE * EP3_NUM_TXD_PER_BLOCK);
53033 +    KMEM_FREE (blk, sizeof (EP3_TXD_RAIL_BLOCK));
53034 +}
53035 +
53036 +static EP3_TXD_RAIL *
53037 +GetTxdRail (EP3_XMTR_RAIL *xmtrRail)
53038 +{
53039 +    EP_COMMS_SUBSYS  *subsys = xmtrRail->Generic.Xmtr->Subsys;
53040 +    EP3_TXD_RAIL     *txdRail;
53041 +    int low_on_txds;
53042 +    unsigned long flags;
53043 +
53044 +    spin_lock_irqsave (&xmtrRail->FreeDescLock, flags);
53045 +
53046 +    if (list_empty (&xmtrRail->FreeDescList))
53047 +       txdRail = NULL;
53048 +    else
53049 +    {
53050 +       txdRail = list_entry (xmtrRail->FreeDescList.next, EP3_TXD_RAIL, Generic.Link);
53051 +
53052 +#if defined(DEBUG)
53053 +       {
53054 +           EP_RAIL   *rail = xmtrRail->Generic.CommsRail->Rail;
53055 +           ELAN3_DEV *dev  = ((EP3_RAIL *) rail)->Device;
53056 +           
53057 +           EP_ASSERT (rail, txdRail->TxdMain->EnveEvent == EP3_EVENT_FREE);
53058 +           EP_ASSERT (rail, txdRail->TxdMain->DataEvent == EP3_EVENT_FREE);
53059 +           EP_ASSERT (rail, txdRail->TxdMain->DoneEvent == EP3_EVENT_FREE);
53060 +           EP_ASSERT (rail, SDRAM_ASSERT(elan3_sdram_readl (dev, txdRail->TxdElan + offsetof (EP3_TXD_RAIL_ELAN, EnveEvent.ev_Count)) == 0));
53061 +           EP_ASSERT (rail, SDRAM_ASSERT(elan3_sdram_readl (dev, txdRail->TxdElan + offsetof (EP3_TXD_RAIL_ELAN, DataEvent.ev_Count)) == 0));
53062 +           EP_ASSERT (rail, SDRAM_ASSERT(elan3_sdram_readl (dev, txdRail->TxdElan + offsetof (EP3_TXD_RAIL_ELAN, DoneEvent.ev_Count)) == 0));
53063 +       }
53064 +#endif
53065 +       
53066 +       list_del (&txdRail->Generic.Link);
53067 +
53068 +       xmtrRail->FreeDescCount--;
53069 +    }
53070 +    /* Wakeup the descriptor primer thread if there's not many left */
53071 +    low_on_txds = (xmtrRail->FreeDescCount < ep_txd_lowat);
53072 +
53073 +    spin_unlock_irqrestore (&xmtrRail->FreeDescLock, flags);
53074 +
53075 +    if (low_on_txds)
53076 +       ep_kthread_schedule (&subsys->Thread, lbolt);
53077 +
53078 +    return (txdRail);
53079 +}
53080 +
53081 +static void
53082 +FreeTxdRail (EP3_XMTR_RAIL *xmtrRail, EP3_TXD_RAIL *txdRail)
53083 +{
53084 +    unsigned long flags;
53085 +
53086 +#if defined(DEBUG_ASSERT)
53087 +    {
53088 +       EP_RAIL   *rail = xmtrRail->Generic.CommsRail->Rail;
53089 +       ELAN3_DEV *dev  = ((EP3_RAIL *) rail)->Device;
53090 +
53091 +       EP_ASSERT (rail, txdRail->Generic.XmtrRail == &xmtrRail->Generic);
53092 +       
53093 +       EP_ASSERT (rail, txdRail->TxdMain->EnveEvent == EP3_EVENT_PRIVATE);
53094 +       EP_ASSERT (rail, txdRail->TxdMain->DataEvent == EP3_EVENT_PRIVATE);
53095 +       EP_ASSERT (rail, txdRail->TxdMain->DoneEvent == EP3_EVENT_PRIVATE);
53096 +       EP_ASSERT (rail, SDRAM_ASSERT (elan3_sdram_readl (dev, txdRail->TxdElan + offsetof (EP3_TXD_RAIL_ELAN, EnveEvent.ev_Count)) == 0));
53097 +       EP_ASSERT (rail, SDRAM_ASSERT (elan3_sdram_readl (dev, txdRail->TxdElan + offsetof (EP3_TXD_RAIL_ELAN, DataEvent.ev_Count)) == 0));
53098 +       EP_ASSERT (rail, SDRAM_ASSERT (elan3_sdram_readl (dev, txdRail->TxdElan + offsetof (EP3_TXD_RAIL_ELAN, DoneEvent.ev_Count)) == 0));
53099 +       
53100 +       txdRail->TxdMain->EnveEvent = EP3_EVENT_FREE;
53101 +       txdRail->TxdMain->DataEvent = EP3_EVENT_FREE;
53102 +       txdRail->TxdMain->DoneEvent = EP3_EVENT_FREE;
53103 +    }
53104 +#endif
53105 +
53106 +    spin_lock_irqsave (&xmtrRail->FreeDescLock, flags);
53107 +    
53108 +    list_add (&txdRail->Generic.Link, &xmtrRail->FreeDescList);
53109 +
53110 +    xmtrRail->FreeDescCount++;
53111 +
53112 +    if (xmtrRail->FreeDescWaiting)
53113 +    {
53114 +       xmtrRail->FreeDescWaiting--;
53115 +       kcondvar_wakeupall (&xmtrRail->FreeDescSleep, &xmtrRail->FreeDescLock);
53116 +    }
53117 +
53118 +    spin_unlock_irqrestore (&xmtrRail->FreeDescLock, flags);
53119 +}
53120 +
53121 +static void
53122 +BindTxdToRail (EP_TXD *txd, EP3_TXD_RAIL *txdRail)
53123 +{
53124 +    ASSERT (SPINLOCK_HELD (&txd->Xmtr->Lock));
53125 +
53126 +    EPRINTF6 (DBG_XMTR, "%s: BindTxdToRail: txd=%p txdRail=%p XID=%08x.%08x.%016llx\n", 
53127 +             XMTR_TO_RAIL(txdRail->Generic.XmtrRail)->Generic.Name, txd, txdRail, 
53128 +             txd->Envelope.Xid.Generation, txd->Envelope.Xid.Handle, (long long) txd->Envelope.Xid.Unique);
53129 +
53130 +    txd->TxdRail = &txdRail->Generic;
53131 +    txdRail->Generic.Txd = txd;
53132 +}
53133 +
53134 +static void
53135 +UnbindTxdFromRail (EP_TXD *txd, EP3_TXD_RAIL *txdRail)
53136 +{
53137 +    ASSERT (SPINLOCK_HELD (&txd->Xmtr->Lock));
53138 +    ASSERT (txd->TxdRail == &txdRail->Generic && txdRail->Generic.Txd == txd);
53139 +
53140 +    EPRINTF6 (DBG_XMTR, "%s: UnbindTxdToRail: txd=%p txdRail=%p XID=%08x.%08x.%016llx\n", 
53141 +             XMTR_TO_RAIL(txdRail->Generic.XmtrRail)->Generic.Name, txd, txdRail, 
53142 +             txd->Envelope.Xid.Generation, txd->Envelope.Xid.Handle, (long long) txd->Envelope.Xid.Unique);
53143 +    txd->TxdRail = NULL;
53144 +    txdRail->Generic.Txd = NULL; 
53145 +}
53146 +
53147 +/*
53148 + * TxEnveEvent: arg == EP_TXD
53149 + *    Called when envelope delivered
53150 + */
53151 +static void
53152 +TxEnveEvent (EP3_RAIL *rail, void *arg)
53153 +{
53154 +    panic ("TxEnveEvent");
53155 +}
53156 +
53157 +/*
53158 + * TxEnveRetry: arg == EP3_TXD_RAIL
53159 + *    Called on retry of dma of large message envelope.
53160 + */
53161 +static void
53162 +TxEnveRetry (EP3_RAIL *rail, void *arg, E3_DMA_BE *dma, int status)
53163 +{
53164 +    EP3_TXD_RAIL  *txdRail  = (EP3_TXD_RAIL *) arg;
53165 +    EP3_XMTR_RAIL *xmtrRail = (EP3_XMTR_RAIL *) txdRail->Generic.XmtrRail;
53166 +    
53167 +    EPRINTF3 (DBG_XMTR, "%s: TxEnveRetry: xmtr %p txd %p\n",  rail->Generic.Name, xmtrRail, txdRail);
53168 +    
53169 +    EP_ASSERT (&rail->Generic, txdRail->TxdMain->EnveEvent == EP3_EVENT_ACTIVE);
53170 +    EP_ASSERT (&rail->Generic, SDRAM_ASSERT (elan3_sdram_readl (rail->Device, txdRail->TxdElan + offsetof (EP3_TXD_RAIL_ELAN, EnveEvent.ev_Count)) == 1)); /* PCI read */
53171 +    EP_ASSERT (&rail->Generic, dma->s.dma_direction == DMA_WRITE && EP_VP_TO_NODE(dma->s.dma_destVProc) == txdRail->Generic.Txd->NodeId);
53172 +
53173 +    if (! TxdShouldStabalise (&txdRail->Generic, &rail->Generic))
53174 +       QueueDmaForRetry (rail, dma, EP_RETRY_LOW_PRI_RETRY + ep_backoff (&txdRail->Backoff, EP_BACKOFF_ENVELOPE));
53175 +    else
53176 +       QueueDmaForRetry (rail, dma, EP_RETRY_STABALISING);     /* place dma on stabilising list for neterr fixup */
53177 +}
53178 +
53179 +static void
53180 +TxEnveVerify (EP3_RAIL *rail, void *arg, E3_DMA_BE *dma)
53181 +{
53182 +    EP3_TXD_RAIL *txdRail = (EP3_TXD_RAIL *) arg;
53183 +    
53184 +    EP_ASSERT (&rail->Generic, txdRail->TxdMain->EnveEvent == EP3_EVENT_ACTIVE);
53185 +    EP_ASSERT (&rail->Generic, SDRAM_ASSERT (elan3_sdram_readl (rail->Device, txdRail->TxdElan + offsetof (EP3_TXD_RAIL_ELAN, EnveEvent.ev_Count)) == 1)); /* PCI read */
53186 +    EP_ASSERT (&rail->Generic, dma->s.dma_direction == DMA_WRITE && EP_VP_TO_NODE(dma->s.dma_destVProc) == txdRail->Generic.Txd->NodeId);
53187 +}
53188 +
53189 +/*
53190 + * TxDataEvent: arg == EP3_TXD
53191 + *    Called on completion of a large transmit.
53192 + */
53193 +static void
53194 +TxDataEvent (EP3_RAIL *rail, void *arg)
53195 +{
53196 +    EP3_TXD_RAIL      *txdRail  = (EP3_TXD_RAIL *) arg;
53197 +    EP3_XMTR_RAIL     *xmtrRail = (EP3_XMTR_RAIL *) txdRail->Generic.XmtrRail;
53198 +    EP_XMTR          *xmtr     = xmtrRail->Generic.Xmtr;
53199 +    EP3_TXD_RAIL_MAIN *txdMain  = txdRail->TxdMain;
53200 +    sdramaddr_t        txdElan  = txdRail->TxdElan;
53201 +    int                delay    = 1;
53202 +    EP_TXD            *txd;
53203 +    unsigned long      flags;
53204 +
53205 +    spin_lock_irqsave (&xmtr->Lock, flags);
53206 +    for (;;)
53207 +    {
53208 +       if (EP3_EVENT_FIRED (txdRail->DataCookie, txdMain->DataEvent))
53209 +           break;
53210 +
53211 +       if (EP3_EVENT_FIRING (rail->Device, txdElan + offsetof (EP3_TXD_RAIL_ELAN, DataEvent), txdRail->DataCookie, txdMain->DataEvent))                /* PCI read */
53212 +       {
53213 +           if (delay > EP3_EVENT_FIRING_TLIMIT)
53214 +               panic ("TxDataEvent: events set but block copy not completed\n");
53215 +           DELAY(delay);
53216 +           delay <<= 1;
53217 +       }
53218 +       else
53219 +       {
53220 +           EPRINTF3 (DBG_XMTR, "%s: TxDataEvent: xmtr %p txd %p previously collecting by polling\n", 
53221 +                     rail->Generic.Name, xmtrRail, txdRail);
53222 +           spin_unlock_irqrestore (&xmtr->Lock, flags);
53223 +           return;
53224 +       }
53225 +       mb();
53226 +    }
53227 +
53228 +    if ((txd = txdRail->Generic.Txd) == NULL ||                        /* If there is no txd, or if the descriptor is marked */
53229 +       !(EP_IS_INTERRUPT_ENABLED(txd->Envelope.Attr)) ||       /* as no interrupt, or been reused as an RPC, */
53230 +       (EP_IS_RPC(txd->Envelope.Attr)))                        /* then we were either called as a result of a previous */
53231 +    {                                                          /* tx which was completed by polling or as a result */
53232 +       spin_unlock_irqrestore (&xmtr->Lock, flags);            /* of a EnableTxCallBack/DisableTxCallback */
53233 +
53234 +       EPRINTF4 (DBG_XMTR, "%s: TxDataEvent: xmtr %p txd %p recyled (%x)\n", 
53235 +                 rail->Generic.Name, xmtr, txd, txd ? txd->Envelope.Attr : 0);
53236 +       return;
53237 +    }
53238 +
53239 +    ASSERT (EP3_EVENT_FIRED (txdRail->EnveCookie, txdMain->EnveEvent));
53240 +
53241 +    EPRINTF5 (DBG_XMTR, "%s: TxDataEvent : xmtrRail=%p txdRail=%p tx=%p XID=%llx\n", 
53242 +             rail->Generic.Name, xmtrRail, txdRail, txd, (long long) txd->Envelope.Xid.Unique);
53243 +    
53244 +    ep_xmtr_txd_stat(xmtr,txd);
53245 +    
53246 +    /* remove from active transmit lists */
53247 +    list_del (&txd->Link);
53248 +
53249 +    UnbindTxdFromRail (txd, txdRail);
53250 +    
53251 +    /* clear the done flags for next time round */
53252 +    txdMain->EnveEvent = EP3_EVENT_PRIVATE;
53253 +    txdMain->DataEvent = EP3_EVENT_PRIVATE;
53254 +    txdMain->DoneEvent = EP3_EVENT_PRIVATE;
53255 +    
53256 +    FreeTxdRail (xmtrRail, txdRail);
53257 +
53258 +    spin_unlock_irqrestore (&xmtr->Lock, flags);
53259 +    
53260 +    txd->Handler (txd, txd->Arg, EP_SUCCESS);
53261 +    
53262 +    FreeTxd (xmtr, txd);
53263 +}
53264 +
53265 +/*
53266 + * TxDataRetry: arg == EP3_TXD
53267 + *    Called on retry of remote "put" dma of large transmit data.
53268 + */
53269 +static void
53270 +TxDataRetry (EP3_RAIL *rail, void *arg, E3_DMA_BE *dma, int status)
53271 +{
53272 +    EP3_TXD_RAIL  *txdRail  = (EP3_TXD_RAIL *) arg;
53273 +    EP3_XMTR_RAIL *xmtrRail = (EP3_XMTR_RAIL *) txdRail->Generic.XmtrRail;
53274 +    EP_TXD        *txd      = txdRail->Generic.Txd;
53275 +
53276 +    EP_ASSERT (&rail->Generic, ((txdRail->TxdMain->DataEvent == EP3_EVENT_ACTIVE && 
53277 +                                SDRAM_ASSERT (elan3_sdram_readl (rail->Device, txdRail->TxdElan + offsetof (EP3_TXD_RAIL_ELAN, DataEvent.ev_Count)) >= 1)) ||  /* PCI read */
53278 +                               (EP3_EVENT_FIRED (txdRail->DataCookie, txdRail->TxdMain->DataEvent) &&
53279 +                                SDRAM_ASSERT (elan3_sdram_readl (rail->Device, txdRail->TxdElan + offsetof (EP3_TXD_RAIL_ELAN, DataEvent.ev_Count)) == 0))));  /* PCI read */
53280 +    EP_ASSERT (&rail->Generic, dma->s.dma_direction == DMA_WRITE && EP_VP_TO_NODE(dma->s.dma_destVProc) == txd->NodeId);
53281 +
53282 +    EPRINTF5 (DBG_XMTR, "%s: TxDataRetry: xmtrRail=%p txdRail=%p txd=%p XID=%llx\n", 
53283 +             rail->Generic.Name, xmtrRail, txdRail, txd, (long long) txd->Envelope.Xid.Unique);
53284 +    
53285 +    QueueDmaForRetry (rail, dma, EP_RETRY_LOW_PRI_RETRY + ep_backoff (&txdRail->Backoff, EP_BACKOFF_DATA));
53286 +}
53287 +
53288 +static void
53289 +TxDataVerify (EP3_RAIL *rail, void *arg, E3_DMA_BE *dma)
53290 +{
53291 +    EP3_TXD_RAIL *txdRail = (EP3_TXD_RAIL *) arg;
53292 +    EP_TXD       *txd     = txdRail->Generic.Txd;
53293 +
53294 +    EP_ASSERT (&rail->Generic, ((txdRail->TxdMain->DataEvent == EP3_EVENT_ACTIVE && 
53295 +                                SDRAM_ASSERT (elan3_sdram_readl (rail->Device, txdRail->TxdElan + offsetof (EP3_TXD_RAIL_ELAN, DataEvent.ev_Count)) >= 1)) ||  /* PCI read */
53296 +                               (EP3_EVENT_FIRED (txdRail->DataCookie, txdRail->TxdMain->DataEvent) &&
53297 +                                SDRAM_ASSERT (elan3_sdram_readl (rail->Device, txdRail->TxdElan + offsetof (EP3_TXD_RAIL_ELAN, DataEvent.ev_Count)) == 0))));  /* PCI read */
53298 +    EP_ASSERT (&rail->Generic, dma->s.dma_direction == DMA_WRITE && EP_VP_TO_NODE(dma->s.dma_destVProc) == txd->NodeId);
53299 +}
53300 +
53301 +/*
53302 + * TxDoneEvent: arg == EP3_TXD
53303 + *    Called on completion of a RPC.
53304 + */
53305 +static void
53306 +TxDoneEvent (EP3_RAIL *rail, void *arg)
53307 +{
53308 +    EP3_TXD_RAIL      *txdRail  = (EP3_TXD_RAIL *) arg;
53309 +    EP3_XMTR_RAIL     *xmtrRail = (EP3_XMTR_RAIL *) txdRail->Generic.XmtrRail;
53310 +    EP_XMTR          *xmtr     = xmtrRail->Generic.Xmtr;
53311 +    int                delay   = 1;
53312 +    EP_TXD           *txd;
53313 +    unsigned long      flags;
53314 +
53315 +    spin_lock_irqsave (&xmtr->Lock, flags);
53316 +
53317 +    for (;;)
53318 +    {
53319 +       if (EP3_EVENT_FIRED (txdRail->DoneCookie, txdRail->TxdMain->DoneEvent) && 
53320 +           EP3_EVENT_FIRED (txdRail->DataCookie, txdRail->TxdMain->DataEvent))
53321 +           break;
53322 +       
53323 +       if (EP3_EVENT_FIRING (rail->Device, txdRail->TxdElan + offsetof (EP3_TXD_RAIL_ELAN, DoneEvent), txdRail->DoneCookie, txdRail->TxdMain->DoneEvent) && 
53324 +           EP3_EVENT_FIRING (rail->Device, txdRail->TxdElan + offsetof (EP3_TXD_RAIL_ELAN, DataEvent), txdRail->DataCookie, txdRail->TxdMain->DataEvent))
53325 +       {
53326 +           if (delay > EP3_EVENT_FIRING_TLIMIT)
53327 +               panic ("TxDoneEvent: events set but block copy not completed\n");
53328 +           DELAY(delay);
53329 +           delay <<= 1;
53330 +       }
53331 +       else
53332 +       {
53333 +           EPRINTF3 (DBG_XMTR, "%s: TxDoneEvent: xmtr %p txdRail %p previously collecting by polling\n", 
53334 +                     rail->Generic.Name, xmtr, txdRail);
53335 +           spin_unlock_irqrestore (&xmtr->Lock, flags);
53336 +           return;
53337 +       }
53338 +       mb();
53339 +    }
53340 +
53341 +    if ((txd = txdRail->Generic.Txd) == NULL ||                                                 /* If there is no txd, or if the descriptor is marked */
53342 +       !(EP_IS_INTERRUPT_ENABLED(txd->Envelope.Attr) || EP_IS_RPC(txd->Envelope.Attr))) /* marked as no interrupt, or been reused as an transmit, */
53343 +    {                                                                                   /* then we were either called as a result of a previous */
53344 +       spin_unlock_irqrestore (&xmtr->Lock, flags);                                     /* tx which was completed by polling or as a result */
53345 +                                                                                        /* of a EnableTxCallBack/DisableTxCallback */
53346 +
53347 +       EPRINTF4 (DBG_XMTR, "%s: TxDoneEvent: xmtr %p txd %p recyled (%x)\n", 
53348 +                 rail->Generic.Name, xmtr, txd, txd ? txd->Envelope.Attr : 0);
53349 +       return; 
53350 +    }
53351 +
53352 +    EPRINTF5 (DBG_XMTR, "%s: TxDoneEvent: xmtrRail=%p txdRail=%p txd=%p XID=%llx\n", 
53353 +             rail->Generic.Name, xmtrRail, txdRail, txd, (long long) txd->Envelope.Xid.Unique);
53354 +
53355 +    ep_xmtr_txd_stat(xmtr,txd);
53356 +
53357 +    /* remove from active transmit list */
53358 +    list_del (&txd->Link);
53359 +    
53360 +    UnbindTxdFromRail (txd, txdRail);
53361 +    
53362 +    /* clear the done flags for next time round */
53363 +    txdRail->TxdMain->EnveEvent = EP3_EVENT_PRIVATE;
53364 +    txdRail->TxdMain->DataEvent = EP3_EVENT_PRIVATE;
53365 +    txdRail->TxdMain->DoneEvent = EP3_EVENT_PRIVATE;
53366 +    
53367 +    FreeTxdRail (xmtrRail, txdRail);
53368 +
53369 +    spin_unlock_irqrestore (&xmtr->Lock, flags);
53370 +           
53371 +    if (txd->Handler)
53372 +       txd->Handler (txd, txd->Arg, EP_SUCCESS);
53373 +       
53374 +    FreeTxd (xmtr, txd);
53375 +}
53376 +
53377 +/*
53378 + * TxDoneRetry: arg == EP3_TXD
53379 + */
53380 +static void
53381 +TxDoneRetry (EP3_RAIL *rail, void *arg, E3_DMA_BE *dma, int status)
53382 +{
53383 +    panic ("TxDoneRetry");
53384 +}
53385 +
53386 +static void
53387 +TxDoneVerify (EP3_RAIL *rail, void *arg, E3_DMA_BE *dma)
53388 +{
53389 +    panic ("TxDoneVerify");
53390 +}
53391 +
53392 +static void
53393 +EnableTransmitCallback (EP_TXD *txd, EP3_TXD_RAIL *txdRail)
53394 +{
53395 +    ELAN3_DEV *dev = XMTR_TO_RAIL(txdRail->Generic.XmtrRail)->Device;
53396 +
53397 +    EPRINTF3 (DBG_XMTR, "%s: EnableTransmitCallback: txd %p txdRail %p\n", XMTR_TO_RAIL (txdRail->Generic.XmtrRail)->Generic.Name, txd, txdRail);
53398 +
53399 +    txd->Envelope.Attr = EP_SET_INTERRUPT_ENABLED(txd->Envelope.Attr);
53400 +               
53401 +    elan3_sdram_writel (dev, txdRail->TxdElan + offsetof (EP3_TXD_RAIL_ELAN, EnveEvent.ev_Type), EV_TYPE_BCOPY);
53402 +       
53403 +    if (EP_IS_RPC(txd->Envelope.Attr))
53404 +    {
53405 +       elan3_sdram_writel (dev, txdRail->TxdElan + offsetof (EP3_TXD_RAIL_ELAN, DataEvent.ev_Type), EV_TYPE_BCOPY);
53406 +       elan3_sdram_writel (dev, txdRail->TxdElan + offsetof (EP3_TXD_RAIL_ELAN, DoneEvent.ev_Type),  EV_TYPE_BCOPY | EV_TYPE_EVIRQ | txdRail->DoneCookie.Cookie);
53407 +    }
53408 +    else
53409 +    {
53410 +       elan3_sdram_writel (dev, txdRail->TxdElan + offsetof (EP3_TXD_RAIL_ELAN, DataEvent.ev_Type), EV_TYPE_BCOPY | EV_TYPE_EVIRQ | txdRail->DataCookie.Cookie);
53411 +       elan3_sdram_writel (dev, txdRail->TxdElan + offsetof (EP3_TXD_RAIL_ELAN, DoneEvent.ev_Type), EV_TYPE_BCOPY);
53412 +    }
53413 +}
53414 +
53415 +static void
53416 +DisableTransmitCallback (EP_TXD *txd, EP3_TXD_RAIL *txdRail)
53417 +{
53418 +    ELAN3_DEV *dev = XMTR_TO_RAIL(txdRail->Generic.XmtrRail)->Device;
53419 +
53420 +    EPRINTF3 (DBG_XMTR, "%s: DisableTransmitCallback: txd %p txdRail %p\n", XMTR_TO_RAIL (txdRail->Generic.XmtrRail)->Generic.Name, txd, txdRail);
53421 +
53422 +    txd->Envelope.Attr = EP_CLEAR_INTERRUPT_ENABLED(txd->Envelope.Attr);
53423 +
53424 +    elan3_sdram_writel (dev, txdRail->TxdElan + offsetof (EP3_TXD_RAIL_ELAN, EnveEvent.ev_Type), EV_TYPE_BCOPY);
53425 +    elan3_sdram_writel (dev, txdRail->TxdElan + offsetof (EP3_TXD_RAIL_ELAN, DataEvent.ev_Type), EV_TYPE_BCOPY);
53426 +    elan3_sdram_writel (dev, txdRail->TxdElan + offsetof (EP3_TXD_RAIL_ELAN, DoneEvent.ev_Type), EV_TYPE_BCOPY);
53427 +}
53428 +
53429 +static void
53430 +InitialiseTxdRail (EP_TXD *txd, EP3_TXD_RAIL *txdRail, int phase)
53431 +{
53432 +    EP3_XMTR_RAIL *xmtrRail = (EP3_XMTR_RAIL *) txdRail->Generic.XmtrRail;
53433 +    EP3_RAIL      *rail     = XMTR_TO_RAIL (xmtrRail);
53434 +
53435 +    /* Flush the Elan TLB if mappings have changed */
53436 +    ep_perrail_dvma_sync (&rail->Generic);
53437 +
53438 +    /* Initialise the per-rail fields in the envelope */
53439 +    txd->Envelope.TxdRail = txdRail->TxdElanAddr;
53440 +    txd->Envelope.NodeId  = rail->Generic.Position.pos_nodeid;
53441 +
53442 +    /* Initialise the dma backoff */
53443 +    txdRail->Backoff.type = EP_BACKOFF_FREE;
53444 +
53445 +    /* Initialise the per-rail events */
53446 +    switch (phase)
53447 +    {
53448 +    case EP_TXD_PHASE_ACTIVE:
53449 +       elan3_sdram_writel (rail->Device, txdRail->TxdElan + offsetof (EP3_TXD_RAIL_ELAN, EnveEvent.ev_Count), 1);
53450 +       elan3_sdram_writel (rail->Device, txdRail->TxdElan + offsetof (EP3_TXD_RAIL_ELAN, DataEvent.ev_Count), 
53451 +                           (txd->Envelope.nFrags ? txd->Envelope.nFrags : 1) + (EP_IS_MULTICAST(txd->Envelope.Attr) ? 1 : 0));
53452 +       
53453 +       txdRail->TxdMain->EnveEvent = EP3_EVENT_ACTIVE;
53454 +       txdRail->TxdMain->DataEvent = EP3_EVENT_ACTIVE;
53455 +       break;
53456 +       
53457 +    case EP_TXD_PHASE_PASSIVE:
53458 +       ASSERT (EP_IS_RPC(txd->Envelope.Attr));
53459 +
53460 +       elan3_sdram_writel (rail->Device, txdRail->TxdElan + offsetof (EP3_TXD_RAIL_ELAN, EnveEvent.ev_Count), 0);
53461 +       elan3_sdram_writel (rail->Device, txdRail->TxdElan + offsetof (EP3_TXD_RAIL_ELAN, DataEvent.ev_Count), 0);
53462 +
53463 +       txdRail->TxdMain->EnveEvent = txdRail->EnveCookie.Cookie;
53464 +       txdRail->TxdMain->DataEvent = txdRail->DataCookie.Cookie;
53465 +       break;
53466 +    }
53467 +
53468 +    if (! EP_IS_RPC(txd->Envelope.Attr))
53469 +       txdRail->TxdMain->DoneEvent = txdRail->DoneCookie.Cookie;
53470 +    else
53471 +    {
53472 +       elan3_sdram_writel (rail->Device, txdRail->TxdElan + offsetof (EP3_TXD_RAIL_ELAN, DoneEvent.ev_Count), 1);
53473 +       txdRail->TxdMain->DoneEvent = EP3_EVENT_ACTIVE;
53474 +    }
53475 +
53476 +    if (EP_IS_NO_INTERRUPT(txd->Envelope.Attr))
53477 +       DisableTransmitCallback (txd, txdRail);
53478 +    else
53479 +       EnableTransmitCallback (txd, txdRail);
53480 +
53481 +#if ! defined(CONFIG_EP_NO_CHECK_SUM)
53482 +    if ( epdebug_check_sum ) 
53483 +       txd->Envelope.CheckSum = ep_calc_check_sum( txd->Xmtr->Subsys->Subsys.Sys, &txd->Envelope, txd->Envelope.Frags, txd->Envelope.nFrags);
53484 +    else
53485 +#endif
53486 +       txd->Envelope.CheckSum = 0;  
53487 +
53488 +    /* copy the envelope and payload if present down to sdram */
53489 +    elan3_sdram_copyl_to_sdram (rail->Device, &txd->Envelope, txdRail->TxdElan + offsetof (EP3_TXD_RAIL_ELAN, Envelope), EP_ENVELOPE_SIZE);
53490 +    
53491 +    if (EP_HAS_PAYLOAD(txd->Envelope.Attr))
53492 +       elan3_sdram_copyl_to_sdram (rail->Device, &txd->Payload, txdRail->TxdElan + offsetof (EP3_TXD_RAIL_ELAN, Payload), EP_PAYLOAD_SIZE);
53493 +}
53494 +
53495 +void
53496 +ep3xmtr_flush_callback (EP_XMTR *xmtr, EP3_XMTR_RAIL *xmtrRail)
53497 +{
53498 +    EP3_RAIL *rail = XMTR_TO_RAIL (xmtrRail);
53499 +    struct list_head *el;
53500 +    unsigned long flags;
53501 +
53502 +    switch (rail->Generic.CallbackStep)
53503 +    {
53504 +    case EP_CB_FLUSH_FILTERING:
53505 +       /* only need to acquire/release the Lock to ensure that
53506 +        * the node state transition has been noticed. */
53507 +       spin_lock_irqsave (&xmtr->Lock, flags);
53508 +       spin_unlock_irqrestore (&xmtr->Lock, flags);
53509 +       break;
53510 +
53511 +    case EP_CB_FLUSH_FLUSHING:
53512 +       spin_lock_irqsave (&xmtr->Lock, flags);
53513 +       
53514 +       list_for_each (el, &xmtr->ActiveDescList) {
53515 +           EP_TXD       *txd      = list_entry (el, EP_TXD, Link);
53516 +           EP3_TXD_RAIL *txdRail  = (EP3_TXD_RAIL *) txd->TxdRail;
53517 +           EP_NODE_RAIL *nodeRail = &rail->Generic.Nodes[txd->NodeId];
53518 +           
53519 +           if (!TXD_BOUND2RAIL(txdRail, xmtrRail) || nodeRail->State != EP_NODE_LOCAL_PASSIVATE)
53520 +               continue;
53521 +           
53522 +           if (EP_IS_RPC(txd->Envelope.Attr))
53523 +           {
53524 +               if (! EP3_EVENT_FIRED (txdRail->DataCookie, txdRail->TxdMain->DataEvent))
53525 +                   nodeRail->MessageState |= EP_NODE_ACTIVE_MESSAGES;
53526 +               else if (! EP3_EVENT_FIRED (txdRail->DoneCookie, txdRail->TxdMain->DoneEvent))
53527 +                   nodeRail->MessageState |= EP_NODE_PASSIVE_MESSAGES;
53528 +           }
53529 +           else
53530 +           {
53531 +               if (! EP3_EVENT_FIRED (txdRail->DataCookie, txdRail->TxdMain->DataEvent))
53532 +                   nodeRail->MessageState |= EP_NODE_ACTIVE_MESSAGES;
53533 +           }
53534 +       }
53535 +       spin_unlock_irqrestore (&xmtr->Lock, flags);
53536 +       break;
53537 +
53538 +    default:
53539 +       panic ("ep3xmtr_flush_callback: invalid callback step\n");
53540 +       break;
53541 +    }
53542 +}
53543 +
53544 +void
53545 +ep3xmtr_failover_callback (EP_XMTR *xmtr, EP3_XMTR_RAIL *xmtrRail)
53546 +{
53547 +    EP3_RAIL         *rail   = XMTR_TO_RAIL (xmtrRail);
53548 +    struct list_head  txdList;
53549 +    struct list_head *el, *nel;
53550 +    unsigned long flags;
53551 +#ifdef SUPPORT_RAIL_FAILOVER
53552 +    EP_COMMS_SUBSYS  *subsys = xmtr->Subsys;
53553 +#endif
53554 +
53555 +    INIT_LIST_HEAD (&txdList);
53556 +
53557 +    spin_lock_irqsave (&xmtr->Lock, flags);
53558 +    list_for_each_safe (el, nel, &xmtr->ActiveDescList) {
53559 +       EP_TXD       *txd       = list_entry (el, EP_TXD, Link);
53560 +       EP3_TXD_RAIL *txdRail   = (EP3_TXD_RAIL *) txd->TxdRail;
53561 +       EP_NODE_RAIL *nodeRail = &rail->Generic.Nodes[txd->NodeId];
53562 +           
53563 +       /* Only progress relocation of txd's bound to this rail */
53564 +       if (!TXD_BOUND2RAIL(txdRail, xmtrRail) || nodeRail->State != EP_NODE_PASSIVATED)
53565 +           continue;
53566 +       
53567 +#ifdef SUPPORT_RAIL_FAILOVER
53568 +       /* Transmit data not been sent, so just restart on different rail */
53569 +       if (! EP3_EVENT_FIRED (txdRail->DataCookie, txdRail->TxdMain->DataEvent))
53570 +       {
53571 +           EPRINTF4 (DBG_XMTR, "%s: ep3xmtr_failover_callback - xmtr %p txd %p node %d unbind an retry\n", rail->Generic.Name, xmtr, txd, txd->NodeId);
53572 +           
53573 +           UnbindTxdFromRail (txd, txdRail);
53574 +           
53575 +           /* clear the done flags - so that it will be ignored if an event interrupt is generated */
53576 +           txdRail->TxdMain->EnveEvent = EP3_EVENT_PRIVATE;
53577 +           txdRail->TxdMain->DataEvent = EP3_EVENT_PRIVATE;
53578 +           txdRail->TxdMain->DoneEvent = EP3_EVENT_PRIVATE;
53579 +
53580 +           /* reset all events, since non of them could have been set */
53581 +           elan3_sdram_writel (rail->Device, txdRail->TxdElan + offsetof (EP3_TXD_RAIL_ELAN, EnveEvent.ev_Count), 0);                          /* PCI write */
53582 +           elan3_sdram_writel (rail->Device, txdRail->TxdElan + offsetof (EP3_TXD_RAIL_ELAN, DataEvent.ev_Count), 0);                          /* PCI write */
53583 +           elan3_sdram_writel (rail->Device, txdRail->TxdElan + offsetof (EP3_TXD_RAIL_ELAN, DoneEvent.ev_Count), 0);                          /* PCI write */
53584 +           
53585 +           FreeTxdRail (xmtrRail, txdRail);
53586 +           
53587 +           /* epcomms thread will restart on different rail */
53588 +           ep_kthread_schedule (&subsys->Thread, lbolt);
53589 +           continue;
53590 +       }
53591 +
53592 +       if (EP_IS_RPC(txd->Envelope.Attr) && !EP3_EVENT_FIRED (txdRail->DoneCookie, txdRail->TxdMain->DoneEvent))
53593 +       {
53594 +           if (EP_IS_NO_FAILOVER(txd->Envelope.Attr))
53595 +           {
53596 +               EPRINTF4 (DBG_XMTR, "%s: ep3xmtr_failover_callback - xmtr %p txd %p node %d - not able to failover\n",
53597 +                         rail->Generic.Name, xmtr, txd, txd->NodeId);
53598 +
53599 +               list_del (&txd->Link);
53600 +               UnbindTxdFromRail (txd, txdRail);
53601 +               
53602 +               /* clear the done flags - so that it will be ignored if an event interrupt is generated */
53603 +               txdRail->TxdMain->EnveEvent = EP3_EVENT_PRIVATE;
53604 +               txdRail->TxdMain->DataEvent = EP3_EVENT_PRIVATE;
53605 +               txdRail->TxdMain->DoneEvent = EP3_EVENT_PRIVATE;
53606 +               
53607 +               /* envelope and data events must have been set, so only clear the done event */
53608 +               EP_ASSERT (&rail->Generic, SDRAM_ASSERT(elan3_sdram_readl (rail->Device, txdRail->TxdElan + offsetof (EP3_TXD_RAIL_ELAN, EnveEvent.ev_Count)) == 0));
53609 +               EP_ASSERT (&rail->Generic, SDRAM_ASSERT(elan3_sdram_readl (rail->Device, txdRail->TxdElan + offsetof (EP3_TXD_RAIL_ELAN, DataEvent.ev_Count)) == 0));
53610 +
53611 +               elan3_sdram_writel (rail->Device, txdRail->TxdElan + offsetof (EP3_TXD_RAIL_ELAN, DoneEvent.ev_Count), 0);                              /* PCI write */
53612 +               
53613 +               FreeTxdRail (xmtrRail, txdRail);
53614 +           
53615 +               list_add_tail (&txd->Link, &txdList);
53616 +               continue;
53617 +           }
53618 +           EPRINTF4 (DBG_XMTR, "%s: ep3xmtr_failover_callback - xmtr %p txd %p node %d passive\n", rail->Generic.Name, xmtr, txd, txd->NodeId);
53619 +           
53620 +           nodeRail->MessageState |= EP_NODE_PASSIVE_MESSAGES;
53621 +           continue;
53622 +       }
53623 +
53624 +       EPRINTF4 (DBG_XMTR, "%s: ep3xmtr_failover_callback - xmtr %p txd %p node %d completed\n", rail->Generic.Name, xmtr, txd, txd->NodeId);
53625 +#endif
53626 +
53627 +    }
53628 +    spin_unlock_irqrestore (&xmtr->Lock, flags);
53629 +
53630 +    while (! list_empty (&txdList)) 
53631 +    {
53632 +       EP_TXD *txd = list_entry (txdList.next, EP_TXD, Link);
53633 +
53634 +       list_del (&txd->Link);
53635 +
53636 +       txd->Handler (txd, txd->Arg, EP_CONN_RESET);
53637 +       
53638 +       FreeTxd (xmtr, txd);
53639 +    }
53640 +}
53641 +
53642 +
53643 +void
53644 +ep3xmtr_disconnect_callback (EP_XMTR *xmtr, EP3_XMTR_RAIL *xmtrRail)
53645 +{
53646 +    EP3_RAIL         *rail = XMTR_TO_RAIL (xmtrRail);
53647 +    struct list_head *el, *nel;
53648 +    struct list_head  txdList;
53649 +    unsigned long flags;
53650 +    
53651 +    INIT_LIST_HEAD (&txdList);
53652 +
53653 +    spin_lock_irqsave (&xmtr->Lock, flags);
53654 +
53655 +    list_for_each_safe (el, nel, &xmtr->ActiveDescList) {
53656 +       EP_TXD       *txd       = list_entry (el, EP_TXD, Link);
53657 +       EP3_TXD_RAIL *txdRail   = (EP3_TXD_RAIL *) txd->TxdRail;
53658 +       EP_NODE_RAIL *nodeRail = &rail->Generic.Nodes[txd->NodeId];
53659 +           
53660 +       if (!TXD_BOUND2RAIL(txdRail, xmtrRail) || nodeRail->State != EP_NODE_DISCONNECTING)
53661 +           continue;
53662 +       
53663 +       if (EP3_EVENT_FIRED (txdRail->EnveCookie, txdRail->TxdMain->EnveEvent) &&
53664 +           EP3_EVENT_FIRED (txdRail->DataCookie, txdRail->TxdMain->DataEvent) &&
53665 +           EP3_EVENT_FIRED (txdRail->DoneCookie, txdRail->TxdMain->DoneEvent))
53666 +       {
53667 +           EPRINTF4 (DBG_XMTR, "%s: ep3xmtr_disconnect_callback - xmtr %p txd %p completed to node %d\n", rail->Generic.Name, xmtr, txd, txd->NodeId);
53668 +           continue;
53669 +       }
53670 +
53671 +       /* Remove from active list */
53672 +       list_del (&txd->Link);
53673 +       
53674 +       UnbindTxdFromRail (txd, txdRail);
53675 +       
53676 +       /* clear the done flags - so that it will be ignored if an event interrupt is generated */
53677 +       txdRail->TxdMain->EnveEvent = EP3_EVENT_PRIVATE;
53678 +       txdRail->TxdMain->DataEvent = EP3_EVENT_PRIVATE;
53679 +       txdRail->TxdMain->DoneEvent = EP3_EVENT_PRIVATE;
53680 +       
53681 +       /* reset the envelope and data events, since only they could have been set */
53682 +       elan3_sdram_writel (rail->Device, txdRail->TxdElan + offsetof (EP3_TXD_RAIL_ELAN, EnveEvent.ev_Count), 0);                              /* PCI write */
53683 +       elan3_sdram_writel (rail->Device, txdRail->TxdElan + offsetof (EP3_TXD_RAIL_ELAN, DataEvent.ev_Count), 0);                              /* PCI write */
53684 +       elan3_sdram_writel (rail->Device, txdRail->TxdElan + offsetof (EP3_TXD_RAIL_ELAN, DoneEvent.ev_Count), 0);                              /* PCI write */
53685 +       
53686 +       FreeTxdRail (xmtrRail, txdRail);
53687 +           
53688 +       EPRINTF4 (DBG_XMTR, "%s: ep3xmtr_disconnect_callback - xmtr %p txd %p node %d not conected\n", rail->Generic.Name, xmtr, txd, txd->NodeId);
53689 +
53690 +       /* add to the list of txd's which are to be completed */
53691 +       list_add_tail (&txd->Link, &txdList);
53692 +    }
53693 +    spin_unlock_irqrestore (&xmtr->Lock, flags);
53694 +    
53695 +    while (! list_empty (&txdList)) 
53696 +    {
53697 +       EP_TXD *txd = list_entry (txdList.next, EP_TXD, Link);
53698 +
53699 +       list_del (&txd->Link);
53700 +
53701 +       txd->Handler (txd, txd->Arg, EP_CONN_RESET);
53702 +       
53703 +       FreeTxd (xmtr, txd);
53704 +    }
53705 +}
53706 +
53707 +int
53708 +ep3xmtr_poll_txd (EP_XMTR_RAIL *x, EP_TXD_RAIL *t, int how)
53709 +{
53710 +    EP3_XMTR_RAIL *xmtrRail = (EP3_XMTR_RAIL *) x;
53711 +    EP3_TXD_RAIL  *txdRail  = (EP3_TXD_RAIL *) t;
53712 +    EP_TXD        *txd      = txdRail->Generic.Txd;
53713 +
53714 +    switch (how)
53715 +    {
53716 +    case ENABLE_TX_CALLBACK:
53717 +       if (EP_IS_NO_INTERRUPT(txd->Envelope.Attr))
53718 +           EnableTransmitCallback (txd, txdRail);
53719 +       break;
53720 +
53721 +    case DISABLE_TX_CALLBACK:
53722 +       if (EP_IS_NO_INTERRUPT(txd->Envelope.Attr))
53723 +           DisableTransmitCallback (txd, txdRail);
53724 +       break;
53725 +    }
53726 +
53727 +    if (EP3_EVENT_FIRED (txdRail->EnveCookie, txdRail->TxdMain->EnveEvent) &&
53728 +       EP3_EVENT_FIRED (txdRail->DataCookie, txdRail->TxdMain->DataEvent) &&
53729 +       EP3_EVENT_FIRED (txdRail->DoneCookie, txdRail->TxdMain->DoneEvent))
53730 +    {
53731 +       EPRINTF3 (DBG_XMTR, "%s: ep3xmtr_poll_txd: txd=%p XID=%llx completed\n", 
53732 +                 XMTR_TO_RAIL (xmtrRail)->Generic.Name, txd, (long long) txd->Envelope.Xid.Unique);
53733 +
53734 +       ep_xmtr_txd_stat(xmtrRail->Generic.Xmtr,txd);
53735 +
53736 +       UnbindTxdFromRail (txd, txdRail);
53737 +       
53738 +       /* clear the done flags - so that it will be ignored if an event interrupt is generated */
53739 +       txdRail->TxdMain->EnveEvent = EP3_EVENT_PRIVATE;
53740 +       txdRail->TxdMain->DataEvent = EP3_EVENT_PRIVATE;
53741 +       txdRail->TxdMain->DoneEvent = EP3_EVENT_PRIVATE;
53742 +       
53743 +       FreeTxdRail (xmtrRail, txdRail);
53744 +
53745 +       return 1;
53746 +    }
53747 +
53748 +    return 0;
53749 +}
53750 +
53751 +int
53752 +ep3xmtr_bind_txd (EP_TXD *txd, EP_XMTR_RAIL *x, unsigned int phase)
53753 +{
53754 +    EP3_XMTR_RAIL *xmtrRail = (EP3_XMTR_RAIL *) x;
53755 +    EP3_RAIL      *rail     = XMTR_TO_RAIL (xmtrRail);
53756 +    EP3_TXD_RAIL  *txdRail;
53757 +    E3_DMA_BE      dmabe;
53758 +
53759 +    if ((txdRail = GetTxdRail (xmtrRail)) == NULL)
53760 +       return 0;
53761 +
53762 +    switch (phase)
53763 +    {
53764 +    case EP_TXD_PHASE_ACTIVE:
53765 +       if (rail->Generic.Nodes[txd->NodeId].State != EP_NODE_CONNECTED)
53766 +       {
53767 +           EPRINTF2 (DBG_XMTR, "%s: TransmitTxdOnRail: node %u not connected on this rail\n", rail->Generic.Name, txd->NodeId);
53768 +
53769 +           /* clear the done flags - so that it will be ignored if an event interrupt is generated */
53770 +           txdRail->TxdMain->EnveEvent = EP3_EVENT_PRIVATE;
53771 +           txdRail->TxdMain->DataEvent = EP3_EVENT_PRIVATE;
53772 +           txdRail->TxdMain->DoneEvent = EP3_EVENT_PRIVATE;
53773 +
53774 +           /* reset all events, since non of them could have been set */
53775 +           elan3_sdram_writel (rail->Device, txdRail->TxdElan + offsetof (EP3_TXD_RAIL_ELAN, EnveEvent.ev_Count), 0);                          /* PCI write */
53776 +           elan3_sdram_writel (rail->Device, txdRail->TxdElan + offsetof (EP3_TXD_RAIL_ELAN, DataEvent.ev_Count), 0);                          /* PCI write */
53777 +           elan3_sdram_writel (rail->Device, txdRail->TxdElan + offsetof (EP3_TXD_RAIL_ELAN, DoneEvent.ev_Count), 0);                          /* PCI write */
53778 +
53779 +           FreeTxdRail (xmtrRail, txdRail);
53780 +           return 0;
53781 +       }
53782 +
53783 +       InitialiseTxdRail (txd, txdRail, phase);
53784 +
53785 +       /* Initialise the dma descriptor */
53786 +       dmabe.s.dma_type            = E3_DMA_TYPE (DMA_BYTE, DMA_WRITE, DMA_QUEUED, EP3_DMAFAILCOUNT);
53787 +       dmabe.s.dma_size            = (EP_HAS_PAYLOAD(txd->Envelope.Attr) ? EP_INPUTQ_SIZE : EP_ENVELOPE_SIZE);
53788 +       dmabe.s.dma_source          = txdRail->TxdElanAddr + offsetof (EP3_TXD_RAIL_ELAN, Envelope);
53789 +       dmabe.s.dma_dest            = (E3_Addr) 0;
53790 +       dmabe.s.dma_destEvent       = EP_MSGQ_ADDR(txd->Service);
53791 +       dmabe.s.dma_destCookieVProc = EP_VP_DATA (txd->NodeId);
53792 +       dmabe.s.dma_srcEvent        = txdRail->TxdElanAddr + offsetof (EP3_TXD_RAIL_ELAN, EnveEvent);
53793 +       dmabe.s.dma_srcCookieVProc  = LocalCookie (rail, txd->NodeId);
53794 +
53795 +       EPRINTF8 (DBG_XMTR, "%s: TransmitTxdOnRail: txd=%p txdRail=%p @ %x XID=%llx dest=%u srcEvent=%x srcCookie=%x\n", rail->Generic.Name, 
53796 +                 txd, txdRail, txdRail->TxdElanAddr, (long long) txd->Envelope.Xid.Unique, txd->NodeId, dmabe.s.dma_srcEvent, dmabe.s.dma_srcCookieVProc);
53797 +       
53798 +       BindTxdToRail (txd, txdRail);
53799 +       
53800 +       if (IssueDma (rail, &dmabe, EP_RETRY_LOW_PRI, FALSE) != ISSUE_COMMAND_OK)
53801 +           QueueDmaForRetry (rail, &dmabe, EP_RETRY_LOW_PRI);
53802 +       break;
53803 +
53804 +    case EP_TXD_PHASE_PASSIVE:
53805 +       InitialiseTxdRail (txd, txdRail, EP_TXD_PHASE_PASSIVE);                         /* initialise as passive (updated envelope) */
53806 +       
53807 +       EP_XMTR_OP (txd->TxdRail->XmtrRail, UnbindTxd) (txd, EP_TXD_PHASE_PASSIVE);     /* unbind from existing rail */
53808 +
53809 +       BindTxdToRail (txd, txdRail);                                                   /* and bind it to our new rail */
53810 +       break;
53811 +    }
53812 +
53813 +    return 1;
53814 +}
53815 +
53816 +void
53817 +ep3xmtr_unbind_txd (EP_TXD *txd, unsigned int phase)
53818 +{
53819 +    EP3_TXD_RAIL  *txdRail  = (EP3_TXD_RAIL *) txd->TxdRail;
53820 +    EP3_XMTR_RAIL *xmtrRail = (EP3_XMTR_RAIL *) txdRail->Generic.XmtrRail;
53821 +    EP3_RAIL      *rail     = XMTR_TO_RAIL (xmtrRail);
53822 +
53823 +    /* XXXX - TBD assertions on phase */
53824 +
53825 +    UnbindTxdFromRail (txd, txdRail);
53826 +    
53827 +    /* clear the done flags - so that it will be ignored if an event interrupt is generated */
53828 +    txdRail->TxdMain->EnveEvent = EP3_EVENT_PRIVATE;
53829 +    txdRail->TxdMain->DataEvent = EP3_EVENT_PRIVATE;
53830 +    txdRail->TxdMain->DoneEvent = EP3_EVENT_PRIVATE;
53831 +    
53832 +    /* reset the envelope and data events, since only they could have been set */
53833 +    elan3_sdram_writel (rail->Device, txdRail->TxdElan + offsetof (EP3_TXD_RAIL_ELAN, EnveEvent.ev_Count), 0);                         /* PCI write */
53834 +    elan3_sdram_writel (rail->Device, txdRail->TxdElan + offsetof (EP3_TXD_RAIL_ELAN, DataEvent.ev_Count), 0);                         /* PCI write */
53835 +    elan3_sdram_writel (rail->Device, txdRail->TxdElan + offsetof (EP3_TXD_RAIL_ELAN, DoneEvent.ev_Count), 0);                         /* PCI write */         
53836 +    
53837 +    FreeTxdRail (xmtrRail, txdRail);
53838 +}
53839 +
53840 +long
53841 +ep3xmtr_check (EP_XMTR_RAIL *x, long nextRunTime)
53842 +{
53843 +    EP3_XMTR_RAIL *xmtrRail = (EP3_XMTR_RAIL *) x;
53844 +
53845 +    if (xmtrRail->FreeDescCount < ep_txd_lowat && !AllocateTxdRailBlock(xmtrRail))
53846 +    {
53847 +       EPRINTF1 (DBG_RCVR,"%s: failed to grow txd rail pool\n", XMTR_TO_RAIL(xmtrRail)->Generic.Name);
53848 +               
53849 +       if (nextRunTime == 0 || AFTER (nextRunTime, lbolt + RESOURCE_RETRY_TIME))
53850 +           nextRunTime = lbolt + RESOURCE_RETRY_TIME;
53851 +    }
53852 +    
53853 +    return nextRunTime;
53854 +}
53855 +
53856 +void
53857 +ep3xmtr_add_rail (EP_XMTR *xmtr, EP_COMMS_RAIL *commsRail)
53858 +{
53859 +    EP3_XMTR_RAIL *xmtrRail;
53860 +    unsigned long  flags;
53861 +
53862 +    KMEM_ZALLOC (xmtrRail, EP3_XMTR_RAIL *, sizeof (EP3_XMTR_RAIL), 1);
53863 +
53864 +    spin_lock_init (&xmtrRail->FreeDescLock);
53865 +    kcondvar_init  (&xmtrRail->FreeDescSleep);
53866 +    INIT_LIST_HEAD (&xmtrRail->FreeDescList);
53867 +    INIT_LIST_HEAD (&xmtrRail->DescBlockList);
53868 +
53869 +    xmtrRail->Generic.CommsRail = commsRail;
53870 +    xmtrRail->Generic.Xmtr      = xmtr;
53871 +
53872 +    spin_lock_irqsave (&xmtr->Lock, flags);
53873 +
53874 +    xmtr->Rails[commsRail->Rail->Number] = &xmtrRail->Generic;
53875 +    xmtr->RailMask |= EP_RAIL2RAILMASK(commsRail->Rail->Number);
53876 +
53877 +    spin_unlock_irqrestore (&xmtr->Lock, flags);
53878 +}
53879 +
53880 +void
53881 +ep3xmtr_del_rail (EP_XMTR *xmtr, EP_COMMS_RAIL *commsRail)
53882 +{
53883 +    EP3_RAIL         *rail     = (EP3_RAIL *) commsRail->Rail;
53884 +    EP3_XMTR_RAIL    *xmtrRail = (EP3_XMTR_RAIL *) xmtr->Rails[commsRail->Rail->Number];
53885 +    unsigned long     flags;
53886 +
53887 +    /* rail mask set as not usable */
53888 +    spin_lock_irqsave (&xmtr->Lock, flags);
53889 +    xmtr->RailMask &= ~EP_RAIL2RAILMASK (rail->Generic.Number);
53890 +    spin_unlock_irqrestore (&xmtr->Lock, flags);
53891 +
53892 +    /* wait for all txd's for this rail to become free */
53893 +    spin_lock_irqsave (&xmtrRail->FreeDescLock, flags);
53894 +    while (xmtrRail->FreeDescCount != xmtrRail->TotalDescCount)
53895 +    {
53896 +       xmtrRail->FreeDescWaiting++;
53897 +       kcondvar_wait (&xmtrRail->FreeDescSleep, &xmtrRail->FreeDescLock, &flags);
53898 +    }
53899 +    spin_unlock_irqrestore (&xmtrRail->FreeDescLock, flags);
53900 +
53901 +    spin_lock_irqsave (&xmtr->Lock, flags);
53902 +    xmtr->Rails[commsRail->Rail->Number] = NULL;
53903 +    spin_unlock_irqrestore (&xmtr->Lock, flags);
53904 +
53905 +    /* need to free up the txd's and blocks */
53906 +    /* all the txd's accociated with DescBlocks must be in the FreeDescList */
53907 +    ASSERT (xmtrRail->TotalDescCount == xmtrRail->FreeDescCount);
53908 +
53909 +    /* run through the DescBlockList deleting them */
53910 +    while (!list_empty (&xmtrRail->DescBlockList))
53911 +       FreeTxdRailBlock (xmtrRail, list_entry(xmtrRail->DescBlockList.next, EP3_TXD_RAIL_BLOCK , Link));
53912 +    
53913 +    /* it had better be empty after that */
53914 +    ASSERT ((xmtrRail->FreeDescCount == 0) && (xmtrRail->TotalDescCount == 0));
53915 +
53916 +    spin_lock_destroy (&xmtrRail->FreeDescLock);
53917 +    kcondvar_destroy (&xmtrRail->FreeDescSleep);
53918 +
53919 +    KMEM_FREE (xmtrRail, sizeof (EP3_XMTR_RAIL));
53920 +}
53921 +
53922 +void
53923 +ep3xmtr_display_xmtr (DisplayInfo *di, EP_XMTR_RAIL *x)
53924 +{
53925 +    EP3_XMTR_RAIL *xmtrRail = (EP3_XMTR_RAIL *) x;
53926 +    EP3_RAIL      *rail     = XMTR_TO_RAIL (xmtrRail);
53927 +    struct list_head *el;
53928 +    unsigned long flags;
53929 +    int freeCount = 0;
53930 +
53931 +    spin_lock_irqsave (&xmtrRail->FreeDescLock, flags);
53932 +    list_for_each (el, &xmtrRail->FreeDescList)
53933 +       freeCount++;
53934 +    spin_unlock_irqrestore (&xmtrRail->FreeDescLock, flags);
53935 +
53936 +    (di->func)(di->arg, "                 Rail=%d Free=%d Total=%d (%d)\n",
53937 +               rail->Generic.Number, xmtrRail->FreeDescCount, xmtrRail->TotalDescCount, freeCount);
53938 +}
53939 +
53940 +void
53941 +ep3xmtr_display_txd (DisplayInfo *di, EP_TXD_RAIL *t)
53942 +{
53943 +    EP3_TXD_RAIL      *txdRail   = (EP3_TXD_RAIL *) t;
53944 +    EP3_XMTR_RAIL     *xmtrRail  = (EP3_XMTR_RAIL *) txdRail->Generic.XmtrRail;
53945 +    EP3_TXD_RAIL_MAIN *txdMain   = txdRail->TxdMain;
53946 +    sdramaddr_t        txdElan   = txdRail->TxdElan;
53947 +    EP3_RAIL          *rail      = (EP3_RAIL *) xmtrRail->Generic.CommsRail->Rail;
53948 +    ELAN3_DEV         *dev       = rail->Device;
53949 +    
53950 +    (di->func)(di->arg, "     EnveEvent=%x DataEvent=%x DoneEvent=%x Rail=%s\n", 
53951 +              txdMain->EnveEvent, txdMain->DataEvent, txdMain->DoneEvent, rail->Generic.Name);
53952 +    (di->func)(di->arg, "     EnveEvent=%x.%x DataEvent=%x.%x DoneEvent=%x.%x\n",
53953 +              elan3_sdram_readl (dev, txdElan + offsetof (EP3_TXD_RAIL_ELAN, EnveEvent.ev_Count)),
53954 +              elan3_sdram_readl (dev, txdElan + offsetof (EP3_TXD_RAIL_ELAN, EnveEvent.ev_Type)),
53955 +              elan3_sdram_readl (dev, txdElan + offsetof (EP3_TXD_RAIL_ELAN, DataEvent.ev_Count)),
53956 +              elan3_sdram_readl (dev, txdElan + offsetof (EP3_TXD_RAIL_ELAN, DataEvent.ev_Type)),
53957 +              elan3_sdram_readl (dev, txdElan + offsetof (EP3_TXD_RAIL_ELAN, DoneEvent.ev_Count)),
53958 +              elan3_sdram_readl (dev, txdElan + offsetof (EP3_TXD_RAIL_ELAN, DoneEvent.ev_Type)));
53959 +}
53960 +
53961 +int
53962 +ep3xmtr_check_txd_state (EP_TXD *txd)
53963 +{
53964 +    EP3_TXD_RAIL  *txdRail   = (EP3_TXD_RAIL *) txd->TxdRail;
53965 +    EP3_XMTR_RAIL *xmtrRail  = (EP3_XMTR_RAIL *) txdRail->Generic.XmtrRail;
53966 +    EP3_RAIL      *rail      = XMTR_TO_RAIL (xmtrRail);
53967 +    E3_Addr        enveEvent = txdRail->TxdElanAddr + offsetof (EP3_TXD_RAIL_ELAN, EnveEvent);
53968 +    EP3_RETRY_DMA *retry = NULL;
53969 +
53970 +    struct list_head *el;
53971 +    struct list_head *nel;
53972 +    unsigned long     flags;
53973 +
53974 +    /*  is enevelope event is really not set */
53975 +    if (EP3_EVENT_FIRED (txdRail->EnveCookie, txdRail->TxdMain->EnveEvent )) 
53976 +       return (0);
53977 +    
53978 +    /* remove matching dma from stalled list */            
53979 +    spin_lock_irqsave (&rail->DmaRetryLock, flags);
53980 +    
53981 +    list_for_each_safe(el, nel,  &rail->DmaRetries[EP_RETRY_STABALISING]) {
53982 +       retry = list_entry (el, EP3_RETRY_DMA, Link);
53983 +       
53984 +       if ( retry->Dma.s.dma_srcEvent == enveEvent ) {
53985 +           /* remove from retry list */
53986 +           list_del (&retry->Link);
53987 +           break; /* there can only be one */
53988 +       } 
53989 +    }
53990 +    ASSERT ( retry != NULL); /* must find one in list */
53991 +    ASSERT ( retry->Dma.s.dma_srcEvent == enveEvent ); /* better still be the right type then */    
53992 +
53993 +    /* add to free list */
53994 +    list_add (&retry->Link, &rail->DmaRetryFreeList);
53995 +
53996 +    spin_unlock_irqrestore (&rail->DmaRetryLock, flags);    
53997 +                       
53998 +    UnbindTxdFromRail (txd, txdRail);
53999 +       
54000 +    /* clear the done flags - so that it will be ignored if an event interrupt is generated */
54001 +    txdRail->TxdMain->EnveEvent = EP3_EVENT_PRIVATE;
54002 +    txdRail->TxdMain->DataEvent = EP3_EVENT_PRIVATE;
54003 +    txdRail->TxdMain->DoneEvent = EP3_EVENT_PRIVATE;
54004 +    
54005 +    /* reset the envelope and data events, since only they could have been set */
54006 +    elan3_sdram_writel (rail->Device, txdRail->TxdElan + offsetof (EP3_TXD_RAIL_ELAN, EnveEvent.ev_Count), 0);                         /* PCI write */
54007 +    elan3_sdram_writel (rail->Device, txdRail->TxdElan + offsetof (EP3_TXD_RAIL_ELAN, DataEvent.ev_Count), 0);                         /* PCI write */
54008 +    elan3_sdram_writel (rail->Device, txdRail->TxdElan + offsetof (EP3_TXD_RAIL_ELAN, DoneEvent.ev_Count), 0);                         /* PCI write */         
54009 +    
54010 +    FreeTxdRail (xmtrRail, txdRail);
54011 +
54012 +    return (1);
54013 +}
54014 +
54015 +void
54016 +ep3xmtr_fillout_rail_stats(EP_XMTR_RAIL *xmtr_rail, char *str) {
54017 +    /* no stats here yet */
54018 +    /* EP3_XMTR_RAIL * ep3xmtr_rail = (EP3_XMTR_RAIL *) xmtr_rail; */
54019 +}
54020 +
54021 +/*
54022 + * Local variables:
54023 + * c-file-style: "stroustrup"
54024 + * End:
54025 + */
54026 Index: linux-2.4.21/drivers/net/qsnet/ep/epcommsTx_elan4.c
54027 ===================================================================
54028 --- linux-2.4.21.orig/drivers/net/qsnet/ep/epcommsTx_elan4.c    2004-02-23 16:02:56.000000000 -0500
54029 +++ linux-2.4.21/drivers/net/qsnet/ep/epcommsTx_elan4.c 2005-06-01 23:12:54.659429984 -0400
54030 @@ -0,0 +1,1389 @@
54031 +/*
54032 + *    Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd.
54033 + *    Copyright (c) 2002-2003 by Quadrics Ltd.
54034 + *
54035 + *    For licensing information please see the supplied COPYING file
54036 + *
54037 + */
54038 +
54039 +#ident "@(#)$Id: epcommsTx_elan4.c,v 1.26.2.4 2004/11/12 10:54:51 mike Exp $"
54040 +/*      $Source: /cvs/master/quadrics/epmod/epcommsTx_elan4.c,v $ */
54041 +
54042 +#include <qsnet/kernel.h>
54043 +
54044 +#include <elan/kcomm.h>
54045 +#include <elan/epsvc.h>
54046 +#include <elan/epcomms.h>
54047 +
54048 +#include "debug.h"
54049 +#include "kcomm_vp.h"
54050 +#include "kcomm_elan4.h"
54051 +#include "epcomms_elan4.h"
54052 +
54053 +#include <elan4/trtype.h>
54054 +
54055 +#define XMTR_TO_COMMS(xmtrRail)                ((EP4_COMMS_RAIL *) ((EP_XMTR_RAIL *) xmtrRail)->CommsRail)
54056 +#define XMTR_TO_RAIL(xmtrRail)         ((EP4_RAIL *) ((EP_XMTR_RAIL *) xmtrRail)->CommsRail->Rail)
54057 +#define XMTR_TO_DEV(xmtrRail)          (XMTR_TO_RAIL(xmtrRail)->r_ctxt.ctxt_dev)
54058 +#define XMTR_TO_SUBSYS(xmtrRail)       (((EP_XMTR_RAIL *) xmtrRail)->Xmtr->Subsys)
54059 +
54060 +#define TXD_TO_XMTR(txdRail)           ((EP4_XMTR_RAIL *) txdRail->txd_generic.XmtrRail)
54061 +#define TXD_TO_RAIL(txdRail)           XMTR_TO_RAIL(TXD_TO_XMTR(txdRail))
54062 +
54063 +static void txd_interrupt (EP4_RAIL *rail, void *arg);
54064 +static void poll_interrupt (EP4_RAIL *rail, void *arg);
54065 +
54066 +static __inline__ int
54067 +on_list (struct list_head *ent, struct list_head *list)
54068 +{
54069 +    struct list_head *el;
54070 +    unsigned int count = 0;
54071 +    list_for_each (el, list) {
54072 +       if (el == ent)
54073 +           count++;
54074 +    }
54075 +    return count;
54076 +}
54077 +
54078 +static __inline__ void
54079 +__ep4_txd_assert_free (EP4_TXD_RAIL *txdRail, const char *file, const int line)
54080 +{
54081 +    EP4_XMTR_RAIL *xmtrRail = TXD_TO_XMTR (txdRail);
54082 +    ELAN4_DEV     *dev      = XMTR_TO_DEV (xmtrRail);
54083 +    register int   failed   = 0;
54084 +    
54085 +    if ((txdRail)->txd_retry_time     != 0)              failed |= (1 << 0);
54086 +    if ((txdRail)->txd_main->txd_env  != EP4_STATE_FREE) failed |= (1 << 1);
54087 +    if ((txdRail)->txd_main->txd_data != EP4_STATE_FREE) failed |= (1 << 2);
54088 +    if ((txdRail)->txd_main->txd_done != EP4_STATE_FREE) failed |= (1 << 3);
54089 +
54090 +    if (sdram_assert)
54091 +    {
54092 +       if ((int)(elan4_sdram_readq (dev, (txdRail)->txd_elan + offsetof (EP4_TXD_RAIL_ELAN, txd_env.ev_CountAndType)) >> 32)  != -32) failed |= (1 << 4);
54093 +       if ((int)(elan4_sdram_readq (dev, (txdRail)->txd_elan + offsetof (EP4_TXD_RAIL_ELAN, txd_data.ev_CountAndType)) >> 32) != 0)   failed |= (1 << 5);
54094 +       if ((int)(elan4_sdram_readq (dev, (txdRail)->txd_elan + offsetof (EP4_TXD_RAIL_ELAN, txd_done.ev_CountAndType)) >> 32) != 0)   failed |= (1 << 6);
54095 +    }
54096 +
54097 +    if (failed)
54098 +    {
54099 +       printk ("__ep4_txd_assert_free: failed=%x txdRail=%p at %s:%d\n", failed, txdRail, file, line);
54100 +
54101 +       ep_debugf (DBG_DEBUG, "__ep4_txd_assert_free: failed=%x txdRail=%p at %s:%d\n", failed, txdRail, file, line);
54102 +       ep4xmtr_display_txd (&di_ep_debug, &txdRail->txd_generic);
54103 +
54104 +       (txdRail)->txd_retry_time     = 0;
54105 +       (txdRail)->txd_main->txd_env  = EP4_STATE_FREE;
54106 +       (txdRail)->txd_main->txd_data = EP4_STATE_FREE;
54107 +       (txdRail)->txd_main->txd_done = EP4_STATE_FREE;
54108 +
54109 +       if (sdram_assert)
54110 +       {
54111 +           elan4_sdram_writel (dev, (txdRail)->txd_elan + offsetof (EP4_TXD_RAIL_ELAN, txd_env.ev_CountAndType)  + 4, -32);
54112 +           elan4_sdram_writel (dev, (txdRail)->txd_elan + offsetof (EP4_TXD_RAIL_ELAN, txd_data.ev_CountAndType) + 4, 0);
54113 +           elan4_sdram_writel (dev, (txdRail)->txd_elan + offsetof (EP4_TXD_RAIL_ELAN, txd_done.ev_CountAndType) + 4, 0);
54114 +       }
54115 +       EP_ASSFAIL (XMTR_TO_RAIL(xmtrRail), "__ep4_txd_assert_free");
54116 +    }
54117 +}
54118 +
54119 +static __inline__ void
54120 +__ep4_txd_assert_finished (EP4_TXD_RAIL *txdRail, const char *file, const int line)
54121 +{
54122 +    EP4_XMTR_RAIL *xmtrRail = TXD_TO_XMTR (txdRail);
54123 +    ELAN4_DEV     *dev      = XMTR_TO_DEV (xmtrRail);
54124 +    register int   failed   = 0;
54125 +    
54126 +    if ((txdRail)->txd_retry_time     != 0)                  failed |= (1 << 0);
54127 +    if ((txdRail)->txd_main->txd_env  != EP4_STATE_FINISHED) failed |= (1 << 1);
54128 +    if ((txdRail)->txd_main->txd_data != EP4_STATE_FINISHED) failed |= (1 << 2);
54129 +    if ((txdRail)->txd_main->txd_done != EP4_STATE_FINISHED) failed |= (1 << 3);
54130 +    
54131 +    if (sdram_assert)
54132 +    {
54133 +       if ((int)(elan4_sdram_readq (dev, (txdRail)->txd_elan + offsetof (EP4_TXD_RAIL_ELAN, txd_env.ev_CountAndType)) >> 32)  != -32) failed |= (1 << 4);
54134 +       if ((int)(elan4_sdram_readq (dev, (txdRail)->txd_elan + offsetof (EP4_TXD_RAIL_ELAN, txd_data.ev_CountAndType)) >> 32) != 0)   failed |= (1 << 5);
54135 +       if ((int)(elan4_sdram_readq (dev, (txdRail)->txd_elan + offsetof (EP4_TXD_RAIL_ELAN, txd_done.ev_CountAndType)) >> 32) != 0)   failed |= (1 << 6);
54136 +    }
54137 +
54138 +    if (failed)
54139 +    {
54140 +       printk ("__ep4_txd_assert_finished: failed=%x txdRail=%p at %s:%d\n", failed, txdRail, file, line);
54141 +
54142 +       ep_debugf (DBG_DEBUG, "__ep4_txd_assert_finished: failed=%x txdRail=%p at %s:%d\n", failed, txdRail, file, line);
54143 +       ep4xmtr_display_txd (&di_ep_debug, &txdRail->txd_generic);
54144 +
54145 +       (txdRail)->txd_retry_time     = 0;
54146 +       (txdRail)->txd_main->txd_env  = EP4_STATE_FINISHED;
54147 +       (txdRail)->txd_main->txd_data = EP4_STATE_FINISHED;
54148 +       (txdRail)->txd_main->txd_done = EP4_STATE_FINISHED;
54149 +
54150 +       if (sdram_assert)
54151 +       {
54152 +           elan4_sdram_writel (dev, (txdRail)->txd_elan + offsetof (EP4_TXD_RAIL_ELAN, txd_env.ev_CountAndType)  + 4, -32);
54153 +           elan4_sdram_writel (dev, (txdRail)->txd_elan + offsetof (EP4_TXD_RAIL_ELAN, txd_data.ev_CountAndType) + 4, 0);
54154 +           elan4_sdram_writel (dev, (txdRail)->txd_elan + offsetof (EP4_TXD_RAIL_ELAN, txd_done.ev_CountAndType) + 4, 0);
54155 +       }
54156 +       EP_ASSFAIL (XMTR_TO_RAIL(xmtrRail), "__ep4_txd_assert_finished");
54157 +    }
54158 +}
54159 +
54160 +static __inline__ int
54161 +__ep4_txd_assfail (EP4_TXD_RAIL *txdRail, const char *expr, const char *file, const int line)
54162 +{
54163 +    EP4_XMTR_RAIL *xmtrRail = TXD_TO_XMTR (txdRail);
54164 +
54165 +    printk ("__ep4_txd_assfail: %s:%d '%s'\n", file, line, expr);
54166 +
54167 +    ep_debugf (DBG_DEBUG, "__ep4_txd_assfail: %s:%d '%s'\n", file, line, expr);
54168 +    ep4xmtr_display_txd (&di_ep_debug, &txdRail->txd_generic);
54169 +
54170 +    EP_ASSFAIL (XMTR_TO_RAIL (xmtrRail), "__ep4_txd_assfail");
54171 +
54172 +    return 0;
54173 +}
54174 +
54175 +#define EP4_TXD_ASSERT(txdRail, EX)            ((void) ((EX) || (__ep4_txd_assfail(txdRail, #EX, __FILE__, __LINE__))))
54176 +#define EP4_TXD_ASSERT_FREE(txdRail)           __ep4_txd_assert_free(txdRail, __FILE__, __LINE__)
54177 +#define EP4_TXD_ASSERT_FINISHED(txdRail)       __ep4_txd_assert_finished(txdRail, __FILE__, __LINE__)
54178 +
54179 +static int
54180 +alloc_txd_block (EP4_XMTR_RAIL *xmtrRail)
54181 +{
54182 +    EP4_RAIL           *rail = XMTR_TO_RAIL(xmtrRail);
54183 +    ELAN4_DEV          *dev  = XMTR_TO_DEV(xmtrRail);
54184 +    EP4_TXD_RAIL_BLOCK *blk;
54185 +    EP4_TXD_RAIL_MAIN  *txdMain;
54186 +    EP_ADDR            txdMainAddr;
54187 +    sdramaddr_t                txdElan;
54188 +    EP_ADDR            txdElanAddr;
54189 +    EP4_TXD_RAIL       *txdRail;
54190 +    unsigned long       flags;
54191 +    int                 i;
54192 +
54193 +    KMEM_ZALLOC (blk, EP4_TXD_RAIL_BLOCK *, sizeof (EP4_TXD_RAIL_BLOCK), 1);
54194 +
54195 +    if (blk == NULL)
54196 +       return 0;
54197 +
54198 +    if ((txdElan = ep_alloc_elan (&rail->r_generic, EP4_TXD_RAIL_ELAN_SIZE * EP4_NUM_TXD_PER_BLOCK, 0, &txdElanAddr)) == (sdramaddr_t) 0)
54199 +    {
54200 +       KMEM_FREE (blk, sizeof (EP4_TXD_RAIL_BLOCK));
54201 +       return 0;
54202 +    }
54203 +
54204 +    if ((txdMain = ep_alloc_main (&rail->r_generic, EP4_TXD_RAIL_MAIN_SIZE * EP4_NUM_TXD_PER_BLOCK, 0, &txdMainAddr)) == (EP4_TXD_RAIL_MAIN *) NULL)
54205 +    {
54206 +       ep_free_elan (&rail->r_generic, txdElanAddr, EP4_TXD_RAIL_ELAN_SIZE * EP4_NUM_TXD_PER_BLOCK);
54207 +       KMEM_FREE (blk, sizeof (EP4_TXD_RAIL_BLOCK));
54208 +       return 0;
54209 +    }
54210 +
54211 +    if (ep4_reserve_dma_retries (rail, EP4_NUM_TXD_PER_BLOCK, 0) != 0)
54212 +    {
54213 +       ep_free_main (&rail->r_generic, blk->blk_txds[0].txd_main_addr, EP4_TXD_RAIL_MAIN_SIZE * EP4_NUM_TXD_PER_BLOCK);
54214 +       ep_free_elan (&rail->r_generic, txdElanAddr, EP4_TXD_RAIL_ELAN_SIZE * EP4_NUM_TXD_PER_BLOCK);
54215 +       KMEM_FREE (blk, sizeof (EP4_TXD_RAIL_BLOCK));
54216 +       return 0;
54217 +    }
54218 +
54219 +    for (txdRail = &blk->blk_txds[0], i = 0; i < EP4_NUM_TXD_PER_BLOCK; i++, txdRail++)
54220 +    {
54221 +       txdRail->txd_generic.XmtrRail = &xmtrRail->xmtr_generic;
54222 +       txdRail->txd_elan             = txdElan;
54223 +       txdRail->txd_elan_addr        = txdElanAddr;
54224 +       txdRail->txd_main             = txdMain;
54225 +       txdRail->txd_main_addr        = txdMainAddr;
54226 +
54227 +       /* We only need to reserve space for one command stream, since the sten packet
54228 +        * can only be retrying *before* the dma source event is set.
54229 +        * reserve bytes of "event" cq space for the completion write + interrupt */
54230 +       if ((txdRail->txd_ecq = ep4_get_ecq (rail, EP4_ECQ_EVENT, EP4_INTR_CMD_NDWORDS)) == NULL)
54231 +           goto failed;
54232 +
54233 +       /* register the main interrupt cookies */
54234 +       ep4_register_intcookie (rail, &txdRail->txd_intcookie, txdElanAddr + offsetof (EP4_TXD_RAIL_ELAN, txd_done), txd_interrupt, txdRail);
54235 +
54236 +       /* initialise the events */
54237 +       elan4_sdram_writeq (dev, txdElan + offsetof (EP4_TXD_RAIL_ELAN, txd_env.ev_CountAndType),
54238 +                           E4_EVENT_INIT_VALUE (-32, E4_EVENT_COPY, E4_EVENT_DTYPE_LONG, EP4_INTR_CMD_NDWORDS));
54239 +       elan4_sdram_writeq (dev, txdElan + offsetof (EP4_TXD_RAIL_ELAN, txd_env.ev_CopySource),
54240 +                           txdElanAddr + offsetof (EP4_TXD_RAIL_ELAN, txd_env_cmd));
54241 +       elan4_sdram_writeq (dev, txdElan + offsetof (EP4_TXD_RAIL_ELAN, txd_env.ev_CopyDest),
54242 +                           txdRail->txd_ecq->ecq_addr);
54243 +
54244 +       elan4_sdram_writeq (dev, txdElan + offsetof (EP4_TXD_RAIL_ELAN, txd_data.ev_CountAndType),
54245 +                           E4_EVENT_INIT_VALUE (0, E4_EVENT_WRITE, E4_EVENT_DTYPE_LONG, 0));
54246 +       elan4_sdram_writeq (dev, txdElan + offsetof (EP4_TXD_RAIL_ELAN, txd_data.ev_WritePtr),
54247 +                           txdMainAddr + offsetof (EP4_TXD_RAIL_MAIN, txd_data));
54248 +       elan4_sdram_writeq (dev, txdElan + offsetof (EP4_TXD_RAIL_ELAN, txd_data.ev_WriteValue),
54249 +                           EP4_STATE_FINISHED);
54250 +
54251 +       elan4_sdram_writeq (dev, txdElan + offsetof (EP4_TXD_RAIL_ELAN, txd_done.ev_CountAndType),
54252 +                           E4_EVENT_INIT_VALUE (0, E4_EVENT_COPY, E4_EVENT_DTYPE_LONG, EP4_INTR_CMD_NDWORDS));
54253 +       elan4_sdram_writeq (dev, txdElan + offsetof (EP4_TXD_RAIL_ELAN, txd_done.ev_CopySource),
54254 +                           txdElanAddr + offsetof (EP4_TXD_RAIL_ELAN, txd_done_cmd));
54255 +       elan4_sdram_writeq (dev, txdElan + offsetof (EP4_TXD_RAIL_ELAN, txd_done.ev_CopyDest),
54256 +                           txdRail->txd_ecq->ecq_addr);
54257 +
54258 +       /* Initialise the command streams */
54259 +       elan4_sdram_writeq (dev, txdElan + offsetof (EP4_TXD_RAIL_ELAN, txd_env_cmd.c_write_cmd),
54260 +                           WRITE_DWORD_CMD | (txdMainAddr + offsetof (EP4_TXD_RAIL_MAIN, txd_env)));
54261 +       elan4_sdram_writeq (dev, txdElan + offsetof (EP4_TXD_RAIL_ELAN, txd_env_cmd.c_write_value),
54262 +                           EP4_STATE_FAILED);
54263 +       elan4_sdram_writeq (dev, txdElan + offsetof (EP4_TXD_RAIL_ELAN, txd_env_cmd.c_intr_cmd),
54264 +                           INTERRUPT_CMD | (txdRail->txd_intcookie.int_val << E4_MAIN_INT_SHIFT));
54265 +
54266 +       elan4_sdram_writeq (dev, txdElan + offsetof (EP4_TXD_RAIL_ELAN, txd_done_cmd.c_write_cmd),
54267 +                           WRITE_DWORD_CMD | (txdMainAddr + offsetof (EP4_TXD_RAIL_MAIN, txd_done)));
54268 +       elan4_sdram_writeq (dev, txdElan + offsetof (EP4_TXD_RAIL_ELAN, txd_done_cmd.c_write_value),
54269 +                           EP4_STATE_FINISHED);
54270 +       elan4_sdram_writeq (dev, txdElan + offsetof (EP4_TXD_RAIL_ELAN, txd_done_cmd.c_intr_cmd),
54271 +                           INTERRUPT_CMD | (txdRail->txd_intcookie.int_val << E4_MAIN_INT_SHIFT));
54272 +
54273 +       txdMain->txd_env  = EP4_STATE_FREE;
54274 +       txdMain->txd_data = EP4_STATE_FREE;
54275 +       txdMain->txd_done = EP4_STATE_FREE;
54276 +
54277 +       /* move onto next descriptor */
54278 +       txdElan     += EP4_TXD_RAIL_ELAN_SIZE;
54279 +       txdElanAddr += EP4_TXD_RAIL_ELAN_SIZE;
54280 +       txdMain      = (EP4_TXD_RAIL_MAIN *) ((unsigned long) txdMain + EP4_TXD_RAIL_MAIN_SIZE);
54281 +       txdMainAddr += EP4_TXD_RAIL_MAIN_SIZE;
54282 +    }
54283 +
54284 +    spin_lock_irqsave (&xmtrRail->xmtr_freelock, flags);
54285 +
54286 +    list_add  (&blk->blk_link, &xmtrRail->xmtr_blocklist);
54287 +
54288 +    xmtrRail->xmtr_totalcount += EP4_NUM_TXD_PER_BLOCK;
54289 +    xmtrRail->xmtr_freecount  += EP4_NUM_TXD_PER_BLOCK;
54290 +
54291 +    for (i = 0; i < EP4_NUM_TXD_PER_BLOCK; i++)
54292 +       list_add (&blk->blk_txds[i].txd_generic.Link, &xmtrRail->xmtr_freelist);
54293 +
54294 +    spin_unlock_irqrestore (&xmtrRail->xmtr_freelock, flags);
54295 +
54296 +    return 1;
54297 +
54298 + failed:
54299 +    while (--i >= 0)
54300 +    {
54301 +       ep4_put_ecq (rail, txdRail->txd_ecq, EP4_INTR_CMD_NDWORDS);
54302 +       ep4_deregister_intcookie (rail, &txdRail->txd_intcookie);
54303 +    }
54304 +    ep4_release_dma_retries (rail, EP4_NUM_TXD_PER_BLOCK);
54305 +
54306 +    ep_free_main (&rail->r_generic, blk->blk_txds[0].txd_main_addr, EP4_TXD_RAIL_MAIN_SIZE * EP4_NUM_TXD_PER_BLOCK);
54307 +    ep_free_elan (&rail->r_generic, blk->blk_txds[0].txd_elan_addr, EP4_TXD_RAIL_ELAN_SIZE * EP4_NUM_TXD_PER_BLOCK);
54308 +
54309 +    KMEM_FREE (blk, sizeof (EP4_TXD_RAIL_BLOCK));
54310 +
54311 +    return 0;
54312 +}
54313 +
54314 +static void
54315 +free_txd_block (EP4_XMTR_RAIL *xmtrRail, EP4_TXD_RAIL_BLOCK *blk)
54316 +{
54317 +    EP4_RAIL     *rail = XMTR_TO_RAIL (xmtrRail);
54318 +    EP4_TXD_RAIL *txdRail;
54319 +    unsigned long flags;
54320 +    int           i;
54321 +
54322 +    spin_lock_irqsave (&xmtrRail->xmtr_freelock, flags);
54323 +
54324 +    list_del (&blk->blk_link);
54325 +
54326 +    xmtrRail->xmtr_totalcount -= EP4_NUM_TXD_PER_BLOCK;
54327 +
54328 +    for (txdRail = &blk->blk_txds[0], i = 0; i < EP4_NUM_TXD_PER_BLOCK; i++, txdRail++)
54329 +    {
54330 +       xmtrRail->xmtr_freecount--;
54331 +
54332 +       ep4_put_ecq (rail, txdRail->txd_ecq, EP4_INTR_CMD_NDWORDS);
54333 +
54334 +       ep4_deregister_intcookie (rail, &txdRail->txd_intcookie);
54335 +
54336 +       list_del (&txdRail->txd_generic.Link);
54337 +    }
54338 +    spin_unlock_irqrestore (&xmtrRail->xmtr_freelock, flags);
54339 +
54340 +    ep4_release_dma_retries (rail, EP4_NUM_TXD_PER_BLOCK);
54341 +
54342 +    ep_free_main (&rail->r_generic, blk->blk_txds[0].txd_main_addr, EP4_TXD_RAIL_MAIN_SIZE * EP4_NUM_TXD_PER_BLOCK);
54343 +    ep_free_elan (&rail->r_generic, blk->blk_txds[0].txd_elan_addr, EP4_TXD_RAIL_ELAN_SIZE * EP4_NUM_TXD_PER_BLOCK);
54344 +
54345 +    KMEM_FREE (blk, sizeof (EP4_TXD_RAIL_BLOCK));
54346 +}
54347 +
54348 +static EP4_TXD_RAIL *
54349 +get_txd_rail (EP4_XMTR_RAIL *xmtrRail)
54350 +{
54351 +    EP_COMMS_SUBSYS  *subsys = XMTR_TO_SUBSYS(xmtrRail);
54352 +    EP4_TXD_RAIL     *txdRail;
54353 +    unsigned long flags;
54354 +    int low_on_txds;
54355 +
54356 +    spin_lock_irqsave (&xmtrRail->xmtr_freelock, flags);
54357 +
54358 +    if (list_empty (&xmtrRail->xmtr_freelist))
54359 +       txdRail = NULL;
54360 +    else
54361 +    {
54362 +       txdRail = list_entry (xmtrRail->xmtr_freelist.next, EP4_TXD_RAIL, txd_generic.Link);
54363 +
54364 +       EP4_TXD_ASSERT_FREE(txdRail);
54365 +
54366 +       list_del (&txdRail->txd_generic.Link);
54367 +
54368 +       xmtrRail->xmtr_freecount--;
54369 +    }
54370 +    /* Wakeup the descriptor primer thread if there's not many left */
54371 +    low_on_txds = (xmtrRail->xmtr_freecount < ep_txd_lowat);
54372 +
54373 +    spin_unlock_irqrestore (&xmtrRail->xmtr_freelock, flags);
54374 +
54375 +    if (low_on_txds)
54376 +       ep_kthread_schedule (&subsys->Thread, lbolt);
54377 +
54378 +
54379 +    return (txdRail);
54380 +}
54381 +
54382 +static void
54383 +free_txd_rail (EP4_XMTR_RAIL *xmtrRail, EP4_TXD_RAIL *txdRail)
54384 +{
54385 +    unsigned long flags;
54386 +
54387 +    EP4_TXD_ASSERT_FREE(txdRail);
54388 +
54389 +    spin_lock_irqsave (&xmtrRail->xmtr_freelock, flags);
54390 +    
54391 +    list_add (&txdRail->txd_generic.Link, &xmtrRail->xmtr_freelist);
54392 +
54393 +    xmtrRail->xmtr_freecount++;
54394 +
54395 +    if (xmtrRail->xmtr_freewaiting)
54396 +    {
54397 +       xmtrRail->xmtr_freewaiting--;
54398 +       kcondvar_wakeupall (&xmtrRail->xmtr_freesleep, &xmtrRail->xmtr_freelock);
54399 +    }
54400 +
54401 +    spin_unlock_irqrestore (&xmtrRail->xmtr_freelock, flags);
54402 +}
54403 +
54404 +static void
54405 +bind_txd_rail (EP_TXD *txd, EP4_TXD_RAIL *txdRail)
54406 +{
54407 +    EPRINTF6 (DBG_XMTR, "%s: bind_txd_rail: txd=%p txdRail=%p XID=%08x.%08x.%016llx\n", 
54408 +             XMTR_TO_RAIL(txdRail->txd_generic.XmtrRail)->r_generic.Name, txd, txdRail, 
54409 +             txd->Envelope.Xid.Generation, txd->Envelope.Xid.Handle, txd->Envelope.Xid.Unique);
54410 +
54411 +    txd->TxdRail = &txdRail->txd_generic;
54412 +    txdRail->txd_generic.Txd = txd;
54413 +}
54414 +
54415 +static void
54416 +unbind_txd_rail (EP_TXD *txd, EP4_TXD_RAIL *txdRail)
54417 +{
54418 +    EP4_TXD_ASSERT (txdRail, txd->TxdRail == &txdRail->txd_generic && txdRail->txd_generic.Txd == txd);
54419 +
54420 +    EPRINTF6 (DBG_XMTR, "%s: unbind_txd_rail: txd=%p txdRail=%p XID=%08x.%08x.%016llx\n", 
54421 +             XMTR_TO_RAIL(txdRail->txd_generic.XmtrRail)->r_generic.Name, txd, txdRail, 
54422 +             txd->Envelope.Xid.Generation, txd->Envelope.Xid.Handle, txd->Envelope.Xid.Unique);
54423 +
54424 +
54425 +    txdRail->txd_generic.Txd = NULL; 
54426 +    txd->TxdRail = NULL;
54427 +}
54428 +
54429 +static void
54430 +initialise_txd (EP_TXD *txd, EP4_TXD_RAIL *txdRail, unsigned int phase)
54431 +{
54432 +    EP4_XMTR_RAIL *xmtrRail = (EP4_XMTR_RAIL *) txdRail->txd_generic.XmtrRail;
54433 +    EP4_RAIL      *rail     = XMTR_TO_RAIL (xmtrRail);
54434 +    ELAN4_DEV     *dev      = rail->r_ctxt.ctxt_dev;
54435 +    
54436 +    /* Flush the Elan TLB if mappings have changed */
54437 +    ep_perrail_dvma_sync (&rail->r_generic);
54438 +    
54439 +    /* Initialise the per-rail fields in the envelope */
54440 +    txd->Envelope.TxdRail = txdRail->txd_elan_addr;
54441 +    txd->Envelope.NodeId  = rail->r_generic.Position.pos_nodeid;
54442 +
54443 +    /* Allocate a network error fixup cookie */
54444 +    txdRail->txd_cookie = ep4_neterr_cookie (rail, txd->NodeId) | EP4_COOKIE_STEN;
54445 +
54446 +#if ! defined(CONFIG_EP_NO_CHECK_SUM)
54447 +    if ( epdebug_check_sum ) 
54448 +       txd->Envelope.CheckSum = ep_calc_check_sum( txd->Xmtr->Subsys->Subsys.Sys, &txd->Envelope, txd->Envelope.Frags, txd->Envelope.nFrags);
54449 +    else
54450 +#endif
54451 +       txd->Envelope.CheckSum = 0;  
54452 +
54453 +    /* Initialise the per-rail events */
54454 +    switch (phase)
54455 +    {
54456 +    case EP_TXD_PHASE_ACTIVE:
54457 +    {
54458 +       unsigned int nsets = (txd->Envelope.nFrags ? txd->Envelope.nFrags : 1) + ( EP_IS_MULTICAST(txd->Envelope.Attr) ? 1 : 0);
54459 +
54460 +       if (! EP_IS_RPC(txd->Envelope.Attr))
54461 +       {
54462 +           elan4_sdram_writeq (dev, txdRail->txd_elan + offsetof (EP4_TXD_RAIL_ELAN, txd_done.ev_CountAndType),
54463 +                               E4_EVENT_INIT_VALUE (-32 * nsets, E4_EVENT_COPY, E4_EVENT_DTYPE_LONG, EP4_INTR_CMD_NDWORDS));
54464 +
54465 +           txdRail->txd_main->txd_data = EP4_STATE_FINISHED;
54466 +       }
54467 +       else
54468 +       {
54469 +           elan4_sdram_writeq (dev, txdRail->txd_elan + offsetof (EP4_TXD_RAIL_ELAN, txd_data.ev_CountAndType),
54470 +                               E4_EVENT_INIT_VALUE(-32 * nsets , E4_EVENT_WRITE, E4_EVENT_DTYPE_LONG, 0));
54471 +           elan4_sdram_writeq (dev, txdRail->txd_elan + offsetof (EP4_TXD_RAIL_ELAN, txd_done.ev_CountAndType),
54472 +                               E4_EVENT_INIT_VALUE (-32, E4_EVENT_COPY, E4_EVENT_DTYPE_LONG, EP4_INTR_CMD_NDWORDS));
54473 +
54474 +           txdRail->txd_main->txd_data = EP4_STATE_ACTIVE;
54475 +       }
54476 +                  
54477 +       txdRail->txd_main->txd_env  = EP4_STATE_ACTIVE;
54478 +       txdRail->txd_main->txd_done = EP4_STATE_ACTIVE;
54479 +       break;
54480 +    }
54481 +
54482 +    case EP_TXD_PHASE_PASSIVE:
54483 +       EP4_TXD_ASSERT (txdRail, EP_IS_RPC(txd->Envelope.Attr));
54484 +       
54485 +       txdRail->txd_main->txd_env  = EP4_STATE_FINISHED;
54486 +       txdRail->txd_main->txd_data = EP4_STATE_FINISHED;
54487 +       txdRail->txd_main->txd_done = EP4_STATE_ACTIVE;
54488 +
54489 +       elan4_sdram_writeq (dev, txdRail->txd_elan + offsetof (EP4_TXD_RAIL_ELAN, txd_done.ev_CountAndType),
54490 +                           E4_EVENT_INIT_VALUE (-32, E4_EVENT_COPY, E4_EVENT_DTYPE_LONG, EP4_INTR_CMD_NDWORDS));
54491 +       break;
54492 +    }
54493 +
54494 +   if (EP_IS_NO_INTERRUPT(txd->Envelope.Attr))
54495 +       elan4_sdram_writeq (dev, txdRail->txd_elan + offsetof (EP4_TXD_RAIL_ELAN, txd_done_cmd.c_intr_cmd), NOP_CMD);
54496 +}
54497 +
54498 +static void
54499 +terminate_txd_rail (EP4_XMTR_RAIL *xmtrRail, EP4_TXD_RAIL *txdRail)
54500 +{
54501 +    EP4_SDRAM_ASSERT (TXD_TO_RAIL(txdRail),\
54502 +                     (txdRail)->txd_elan + offsetof (EP4_TXD_RAIL_ELAN, txd_env.ev_CountAndType),\
54503 +                     E4_EVENT_INIT_VALUE (-32, E4_EVENT_COPY, E4_EVENT_DTYPE_LONG, EP4_INTR_CMD_NDWORDS));\
54504 +
54505 +    /* clear the done flags - so that it will be ignored if an event interrupt is generated */
54506 +    txdRail->txd_main->txd_env  = EP4_STATE_FREE;
54507 +    txdRail->txd_main->txd_data = EP4_STATE_FREE;
54508 +    txdRail->txd_main->txd_done = EP4_STATE_FREE;
54509 +
54510 +#if defined(DEBUG_ASSERT)
54511 +    if (sdram_assert)
54512 +    {
54513 +       ELAN4_DEV *dev = XMTR_TO_RAIL (xmtrRail)->r_ctxt.ctxt_dev;
54514 +
54515 +       elan4_sdram_writeq (dev, txdRail->txd_elan + offsetof (EP4_TXD_RAIL_ELAN, txd_data.ev_CountAndType),
54516 +                           E4_EVENT_INIT_VALUE (0, E4_EVENT_WRITE, E4_EVENT_DTYPE_LONG, 0));
54517 +       elan4_sdram_writeq (dev, txdRail->txd_elan + offsetof (EP4_TXD_RAIL_ELAN, txd_done.ev_CountAndType),
54518 +                           E4_EVENT_INIT_VALUE (0, E4_EVENT_COPY, E4_EVENT_DTYPE_LONG, EP4_INTR_CMD_NDWORDS));
54519 +    }
54520 +#endif 
54521 +}
54522 +
54523 +static void
54524 +defer_txd_rail (EP4_TXD_RAIL *txdRail)
54525 +{
54526 +    EP4_XMTR_RAIL   *xmtrRail = TXD_TO_XMTR(txdRail);
54527 +    EP4_RAIL        *rail     = XMTR_TO_RAIL(xmtrRail);
54528 +    ELAN4_DEV       *dev      = rail->r_ctxt.ctxt_dev;
54529 +    EP_COMMS_SUBSYS *subsys   = XMTR_TO_SUBSYS(xmtrRail);
54530 +
54531 +    EPRINTF5 (DBG_XMTR, "%s: defer_txd_rail: xmtrRail=%p txdRail=%p env/data (%d,%d) not finished\n",
54532 +             rail->r_generic.Name, xmtrRail, txdRail, (int)txdRail->txd_main->txd_env, (int)txdRail->txd_main->txd_data);
54533 +                   
54534 +    /* transmit has completed, but the data dma has not completed
54535 +     * (because of network error fixup), we queue the txdRail onto a list
54536 +     * to be polled for completion later.
54537 +     */
54538 +    if (txdRail->txd_retry_time)
54539 +    {
54540 +       EP4_TXD_ASSERT (txdRail, (on_list (&txdRail->txd_retry_link, &xmtrRail->xmtr_retrylist[EP4_TXD_LIST_RETRY]) == 1 ||
54541 +                                 on_list (&txdRail->txd_retry_link, &xmtrRail->xmtr_retrylist[EP4_TXD_LIST_STALLED]) == 1));
54542 +
54543 +       list_del (&txdRail->txd_retry_link);
54544 +
54545 +       txdRail->txd_main->txd_env = EP4_STATE_FINISHED;
54546 +
54547 +       /* re-initialise the envelope event */
54548 +       elan4_sdram_writeq (dev, txdRail->txd_elan + offsetof (EP4_TXD_RAIL_ELAN, txd_env.ev_CountAndType),
54549 +                           E4_EVENT_INIT_VALUE (-32, E4_EVENT_COPY, E4_EVENT_DTYPE_LONG, EP4_INTR_CMD_NDWORDS));
54550 +    }
54551 +    
54552 +    txdRail->txd_retry_time = lbolt;
54553 +       
54554 +    list_add_tail (&txdRail->txd_retry_link, &xmtrRail->xmtr_retrylist[EP4_TXD_LIST_POLL]);
54555 +       
54556 +    ep_kthread_schedule (&subsys->Thread, lbolt);
54557 +}
54558 +
54559 +static void
54560 +finalise_txd (EP_TXD *txd, EP4_TXD_RAIL *txdRail)
54561 +{
54562 +    EP4_XMTR_RAIL   *xmtrRail = TXD_TO_XMTR(txdRail);
54563 +
54564 +    EP4_TXD_ASSERT_FINISHED (txdRail);
54565 +
54566 +    unbind_txd_rail (txd, txdRail);
54567 +    
54568 +    terminate_txd_rail (xmtrRail, txdRail);
54569 +    free_txd_rail (xmtrRail, txdRail);
54570 +}
54571 +
54572 +static void
54573 +txd_interrupt (EP4_RAIL *rail, void *arg)
54574 +{
54575 +    EP4_TXD_RAIL    *txdRail  = (EP4_TXD_RAIL *) arg;
54576 +    EP4_XMTR_RAIL   *xmtrRail = TXD_TO_XMTR(txdRail);
54577 +    EP_XMTR         *xmtr     = xmtrRail->xmtr_generic.Xmtr;
54578 +    int              delay    = 1;
54579 +    EP_TXD          *txd;
54580 +    unsigned long    flags;
54581 +
54582 +    spin_lock_irqsave (&xmtr->Lock, flags);
54583 +    for (;;)
54584 +    {
54585 +       if (txdRail->txd_main->txd_done == EP4_STATE_FINISHED || txdRail->txd_main->txd_env == EP4_STATE_FAILED)
54586 +           break;
54587 +       
54588 +       /* The write to txd_done could be held up in the PCI bridge even though
54589 +        * we've seen the interrupt cookie.  Unlike elan3, there is no possibility
54590 +        * of spurious interrupts since we flush the command queues on node 
54591 +        * disconnection and the txcallback mechanism */
54592 +       mb();
54593 +
54594 +       if (delay > EP4_EVENT_FIRING_TLIMIT)
54595 +       {
54596 +           spin_unlock_irqrestore (&xmtr->Lock, flags);
54597 +
54598 +           EP_ASSFAIL (XMTR_TO_RAIL(xmtrRail), "txd_interrupt - not finished\n");
54599 +           return;
54600 +       }
54601 +       DELAY (delay);
54602 +       delay <<= 1;
54603 +    }
54604 +
54605 +    txd = txdRail->txd_generic.Txd;
54606 +
54607 +    if (txdRail->txd_main->txd_env == EP4_STATE_FAILED)
54608 +    {
54609 +       spin_lock (&xmtrRail->xmtr_retrylock);
54610 +
54611 +       EP4_TXD_ASSERT (txdRail, txdRail->txd_retry_time == 0);                         /* cannot be on retry/poll list */
54612 +       EP4_TXD_ASSERT (txdRail, txdRail->txd_main->txd_done != EP4_STATE_FINISHED);    /* data xfer cannot have finished */
54613 +
54614 +       if (TxdShouldStabalise (&txdRail->txd_generic, &rail->r_generic))
54615 +       {
54616 +           EPRINTF6 (DBG_STABILISE, "%s: txd_interrupt: stablise xmtrRail=%p txdRail=%p txd=%p XID=%llx dest=%u\n", rail->r_generic.Name,
54617 +                     xmtrRail, txdRail, txd, txd->Envelope.Xid.Unique, txd->NodeId);
54618 +
54619 +           txdRail->txd_retry_time = lbolt;                    /* indicate on retry list */
54620 +           
54621 +           list_add_tail (&txdRail->txd_retry_link, &xmtrRail->xmtr_retrylist[EP4_TXD_LIST_STALLED]);
54622 +       }
54623 +       else
54624 +       {
54625 +           EPRINTF6 (DBG_RETRY, "%s: txd_interrupt: retry xmtrRail=%p txdRail=%p txd=%p XID=%llx dest=%u\n", rail->r_generic.Name,
54626 +                     xmtrRail, txdRail, txd, txd->Envelope.Xid.Unique, txd->NodeId);
54627 +
54628 +           txdRail->txd_retry_time = lbolt + EP_RETRY_LOW_PRI_TIME;            /* XXXX: backoff ? */
54629 +           
54630 +           list_add_tail (&txdRail->txd_retry_link, &xmtrRail->xmtr_retrylist[EP4_TXD_LIST_RETRY]);
54631 +           
54632 +           ep_kthread_schedule (&rail->r_retry_thread, txdRail->txd_retry_time);
54633 +       }
54634 +       spin_unlock (&xmtrRail->xmtr_retrylock);
54635 +
54636 +       spin_unlock_irqrestore (&xmtr->Lock, flags);
54637 +       return;
54638 +    }
54639 +
54640 +    EP4_TXD_ASSERT (txdRail, txd != NULL && !(EP_IS_NO_INTERRUPT(txd->Envelope.Attr)));
54641 +
54642 +    EPRINTF6 (DBG_XMTR, "%s: txd_interrupt: xmtrRail=%p txdRail=%p txd=%p XID=%llx dest=%u\n", rail->r_generic.Name,
54643 +             xmtrRail, txdRail, txd, txd->Envelope.Xid.Unique, txd->NodeId);
54644 +            
54645 +    if (txdRail->txd_main->txd_env != EP4_STATE_FINISHED || txdRail->txd_main->txd_data != EP4_STATE_FINISHED)
54646 +    {
54647 +       defer_txd_rail (txdRail);
54648 +
54649 +       spin_unlock_irqrestore (&xmtr->Lock, flags);
54650 +    }
54651 +    else
54652 +    {
54653 +       /* remove from active transmit list */
54654 +       list_del (&txd->Link);
54655 +
54656 +       ep_xmtr_txd_stat(xmtr,txd);
54657 +
54658 +       finalise_txd (txd, txdRail);
54659 +       
54660 +       spin_unlock_irqrestore (&xmtr->Lock, flags);
54661 +       
54662 +       txd->Handler (txd, txd->Arg, EP_SUCCESS);
54663 +       
54664 +       FreeTxd (xmtr, txd);
54665 +    }
54666 +}
54667 +
54668 +static void
54669 +poll_interrupt (EP4_RAIL *rail, void *arg)
54670 +{
54671 +    EP4_XMTR_RAIL   *xmtrRail = (EP4_XMTR_RAIL *) arg;
54672 +
54673 +    ep_poll_transmits (xmtrRail->xmtr_generic.Xmtr);
54674 +}
54675 +
54676 +void
54677 +issue_envelope_packet (EP4_XMTR_RAIL *xmtrRail, EP4_TXD_RAIL *txdRail)
54678 +{
54679 +    EP_TXD    *txd    = txdRail->txd_generic.Txd;
54680 +    ELAN4_CQ  *cq     = xmtrRail->xmtr_cq;
54681 +    E4_uint64 *blk0   = (E4_uint64 *) &txd->Envelope;
54682 +    E4_uint64 *blk1   = EP_HAS_PAYLOAD(txd->Envelope.Attr) ? (E4_uint64 *) &txd->Payload : NULL;
54683 +    E4_Addr    qaddr  = EP_MSGQ_ADDR(txd->Service);
54684 +
54685 +    EP4_SDRAM_ASSERT (TXD_TO_RAIL(txdRail),\
54686 +                     (txdRail)->txd_elan + offsetof (EP4_TXD_RAIL_ELAN, txd_env.ev_CountAndType),\
54687 +                     E4_EVENT_INIT_VALUE (-32, E4_EVENT_COPY, E4_EVENT_DTYPE_LONG, EP4_INTR_CMD_NDWORDS));\
54688 +
54689 +    elan4_open_packet (cq, OPEN_PACKET (0, PACK_OK | RESTART_COUNT_ZERO, EP_VP_DATA(txd->NodeId)));
54690 +    elan4_sendtrans0 (cq, TR_INPUT_Q_GETINDEX, EP_MSGQ_ADDR(txd->Service));
54691 +           
54692 +    /* send the payload if present */
54693 +    if (blk0) elan4_sendtransp (cq, TR_WRITE(128 >> 3, 0, TR_DATATYPE_BYTE), 0,   blk0);
54694 +    if (blk1) elan4_sendtransp (cq, TR_WRITE(128 >> 3, 0, TR_DATATYPE_BYTE), 128, blk1);
54695 +
54696 +    elan4_sendtrans1 (cq, TR_INPUT_Q_COMMIT, qaddr, txdRail->txd_cookie);
54697 +
54698 +    elan4_guard (cq, GUARD_CHANNEL (1) | GUARD_TEST(0, PACK_OK) | GUARD_RESET (EP4_STEN_RETRYCOUNT));
54699 +    elan4_write_dword_cmd (cq, txdRail->txd_main_addr + offsetof (EP4_TXD_RAIL_MAIN, txd_env), EP4_STATE_FINISHED);
54700 +           
54701 +    elan4_guard (cq, GUARD_CHANNEL (1) | GUARD_TEST(0, RESTART_COUNT_ZERO) | GUARD_RESET (EP4_STEN_RETRYCOUNT));
54702 +    elan4_set_event_cmd (cq, txdRail->txd_elan_addr + offsetof (EP4_TXD_RAIL_ELAN, txd_env));
54703 +    
54704 +    elan4_write_dword_cmd (cq, xmtrRail->xmtr_main_addr + offsetof (EP4_XMTR_RAIL_MAIN, xmtr_flowcnt), ++xmtrRail->xmtr_flowcnt);
54705 +}
54706 +
54707 +void
54708 +ep4xmtr_flush_callback (EP_XMTR *xmtr, EP4_XMTR_RAIL *xmtrRail)
54709 +{
54710 +    EP4_RAIL       *rail      = XMTR_TO_RAIL (xmtrRail);
54711 +    EP4_COMMS_RAIL *commsRail = XMTR_TO_COMMS (xmtrRail);
54712 +    struct list_head *el, *nel;
54713 +    unsigned long flags;
54714 +
54715 +    switch (rail->r_generic.CallbackStep)
54716 +    {
54717 +    case EP_CB_FLUSH_FILTERING:
54718 +       /* need to acquire/release the Lock to ensure that the node state
54719 +        * transition has been noticed and no new envelopes are queued to 
54720 +        * nodes which are passivating. */
54721 +       spin_lock_irqsave (&xmtr->Lock, flags);
54722 +
54723 +       /* Then we insert a "setevent" into the command queue to flush
54724 +        * through the envelopes which have already been submitted */
54725 +       ep4comms_flush_setevent (commsRail, xmtrRail->xmtr_cq);
54726 +
54727 +       spin_unlock_irqrestore (&xmtr->Lock, flags);
54728 +
54729 +       break;
54730 +
54731 +    case EP_CB_FLUSH_FLUSHING:
54732 +       /* remove any envelopes which are retrying to nodes which are going down */
54733 +       spin_lock_irqsave (&xmtrRail->xmtr_retrylock, flags);
54734 +       list_for_each_safe (el, nel, &xmtrRail->xmtr_retrylist[EP4_TXD_LIST_RETRY]) {
54735 +           EP4_TXD_RAIL *txdRail  = list_entry (el, EP4_TXD_RAIL, txd_retry_link);
54736 +           EP_TXD       *txd      = txdRail->txd_generic.Txd;
54737 +           EP_NODE_RAIL *nodeRail = &rail->r_generic.Nodes[txd->NodeId];
54738 +           
54739 +           EP4_TXD_ASSERT (txdRail, txdRail->txd_main->txd_env == EP4_STATE_FAILED);
54740 +           
54741 +           if (nodeRail->State == EP_NODE_LOCAL_PASSIVATE)
54742 +           {
54743 +               EPRINTF2 (DBG_XMTR, "%s; ep4xmtr_flush_callback: removing txdRail %p from retry list\n", rail->r_generic.Name, txdRail);
54744 +               
54745 +               EP4_TXD_ASSERT (txdRail, txdRail->txd_retry_time != 0);
54746 +
54747 +               list_del (&txdRail->txd_retry_link);
54748 +               list_add_tail (&txdRail->txd_retry_link, &xmtrRail->xmtr_retrylist[EP4_TXD_LIST_STALLED]);
54749 +           }
54750 +       }
54751 +       spin_unlock_irqrestore (&xmtrRail->xmtr_retrylock, flags);
54752 +
54753 +       /* Determine whether we have active or passive messages to 
54754 +        * any node which is passivating */
54755 +       spin_lock_irqsave (&xmtr->Lock, flags);
54756 +       list_for_each (el, &xmtr->ActiveDescList) {
54757 +           EP_TXD       *txd      = list_entry (el, EP_TXD, Link);
54758 +           EP4_TXD_RAIL *txdRail  = (EP4_TXD_RAIL *) txd->TxdRail;
54759 +           EP_NODE_RAIL *nodeRail = &rail->r_generic.Nodes[txd->NodeId];
54760 +           
54761 +           if (txdRail == NULL || txdRail->txd_generic.XmtrRail != &xmtrRail->xmtr_generic || nodeRail->State != EP_NODE_LOCAL_PASSIVATE)
54762 +               continue;
54763 +           
54764 +           EPRINTF5 (DBG_XMTR, "%s: flush txd=%p txdRail=%p data=%llx done=%llx\n", rail->r_generic.Name,
54765 +                     txd, txdRail, txdRail->txd_main->txd_data, txdRail->txd_main->txd_done);
54766 +
54767 +           if (EP_IS_RPC(txd->Envelope.Attr))
54768 +           {
54769 +               if (txdRail->txd_main->txd_data == EP4_STATE_ACTIVE)
54770 +                   nodeRail->MessageState |= EP_NODE_ACTIVE_MESSAGES;
54771 +               else if (txdRail->txd_main->txd_data == EP4_STATE_ACTIVE)
54772 +                   nodeRail->MessageState |= EP_NODE_PASSIVE_MESSAGES;
54773 +           }
54774 +           else
54775 +           {
54776 +               if (txdRail->txd_main->txd_data == EP4_STATE_ACTIVE)
54777 +                   nodeRail->MessageState |= EP_NODE_ACTIVE_MESSAGES;
54778 +           }
54779 +       }
54780 +       spin_unlock_irqrestore (&xmtr->Lock, flags);
54781 +       break;
54782 +
54783 +    default:
54784 +       panic ("ep4xmtr_flush_callback: invalid callback step\n");
54785 +       break;
54786 +    }
54787 +}
54788 +
54789 +void
54790 +ep4xmtr_failover_callback (EP_XMTR *xmtr, EP4_XMTR_RAIL *xmtrRail)
54791 +{
54792 +    EP4_RAIL         *rail   = XMTR_TO_RAIL (xmtrRail);
54793 +    struct list_head  txdList;
54794 +    struct list_head *el, *nel;
54795 +    unsigned long flags;
54796 +
54797 +    INIT_LIST_HEAD (&txdList);
54798 +
54799 +    spin_lock_irqsave (&xmtr->Lock, flags);
54800 +    list_for_each_safe (el, nel, &xmtr->ActiveDescList) {
54801 +       EP_TXD       *txd       = list_entry (el, EP_TXD, Link);
54802 +       EP4_TXD_RAIL *txdRail   = (EP4_TXD_RAIL *) txd->TxdRail;
54803 +       EP_NODE_RAIL *nodeRail = &rail->r_generic.Nodes[txd->NodeId];
54804 +           
54805 +       /* Only progress relocation of txd's bound to this rail */
54806 +       if (! TXD_BOUND2RAIL (txdRail, xmtrRail) || nodeRail->State != EP_NODE_PASSIVATED)
54807 +           continue;
54808 +       
54809 +       /* XXXX - no rail failover for now ....*/
54810 +
54811 +       EPRINTF4 (DBG_XMTR, "%s: ep4xmtr_failover_callback - xmtr %p txd %p node %d completed\n", rail->r_generic.Name, xmtr, txd, txd->NodeId);
54812 +    }
54813 +    spin_unlock_irqrestore (&xmtr->Lock, flags);
54814 +
54815 +    while (! list_empty (&txdList)) 
54816 +    {
54817 +       EP_TXD *txd = list_entry (txdList.next, EP_TXD, Link);
54818 +
54819 +       list_del (&txd->Link);
54820 +
54821 +       txd->Handler (txd, txd->Arg, EP_CONN_RESET);
54822 +       
54823 +       FreeTxd (xmtr, txd);
54824 +    }
54825 +}
54826 +
54827 +
54828 +void
54829 +ep4xmtr_disconnect_callback (EP_XMTR *xmtr, EP4_XMTR_RAIL *xmtrRail)
54830 +{
54831 +    EP4_RAIL         *rail = XMTR_TO_RAIL (xmtrRail);
54832 +    ELAN4_DEV        *dev  = rail->r_ctxt.ctxt_dev;
54833 +    struct list_head *el, *nel;
54834 +    struct list_head  txdList;
54835 +    unsigned long flags;
54836 +    
54837 +    INIT_LIST_HEAD (&txdList);
54838 +
54839 +    spin_lock_irqsave (&xmtr->Lock, flags);
54840 +
54841 +    list_for_each_safe (el, nel, &xmtr->ActiveDescList) {
54842 +       EP_TXD       *txd       = list_entry (el, EP_TXD, Link);
54843 +       EP4_TXD_RAIL *txdRail   = (EP4_TXD_RAIL *) txd->TxdRail;
54844 +       EP_NODE_RAIL *nodeRail = &rail->r_generic.Nodes[txd->NodeId];
54845 +           
54846 +       if ( ! TXD_BOUND2RAIL (txdRail, xmtrRail) || nodeRail->State != EP_NODE_DISCONNECTING)
54847 +           continue;
54848 +       
54849 +       if (txdRail->txd_main->txd_done == EP4_STATE_ACTIVE)
54850 +       {
54851 +
54852 +           EPRINTF8 (DBG_DISCON, "ep4xmtr_disconnect_callback: txdRail=%p : events %llx,%llx,%llx done %llx,%llx,%llx retry %lx\n",txdRail,
54853 +                     elan4_sdram_readq (dev, txdRail->txd_elan + offsetof (EP4_TXD_RAIL_ELAN, txd_env.ev_CountAndType)),
54854 +                     elan4_sdram_readq (dev, txdRail->txd_elan + offsetof (EP4_TXD_RAIL_ELAN, txd_data.ev_CountAndType)),
54855 +                     elan4_sdram_readq (dev, txdRail->txd_elan + offsetof (EP4_TXD_RAIL_ELAN, txd_done.ev_CountAndType)),
54856 +                     txdRail->txd_main->txd_env, txdRail->txd_main->txd_data, txdRail->txd_main->txd_done,
54857 +                     txdRail->txd_retry_time);
54858 +                      
54859 +           if (txdRail->txd_retry_time)
54860 +           {
54861 +               /* re-initialise the envelope event */
54862 +               elan4_sdram_writeq (dev, txdRail->txd_elan + offsetof (EP4_TXD_RAIL_ELAN, txd_env.ev_CountAndType),
54863 +                                   E4_EVENT_INIT_VALUE (-32, E4_EVENT_COPY, E4_EVENT_DTYPE_LONG, EP4_INTR_CMD_NDWORDS));
54864 +               
54865 +               EP4_TXD_ASSERT (txdRail, on_list (&txdRail->txd_retry_link, &xmtrRail->xmtr_retrylist[EP4_TXD_LIST_STALLED]) == 1);
54866 +
54867 +               txdRail->txd_retry_time  = 0;
54868 +
54869 +               list_del (&txdRail->txd_retry_link);
54870 +           }
54871 +
54872 +           /* Remove from active list */
54873 +           list_del (&txd->Link);
54874 +       
54875 +           unbind_txd_rail (txd, txdRail);
54876 +
54877 +           terminate_txd_rail (xmtrRail, txdRail);
54878 +           free_txd_rail (xmtrRail, txdRail);
54879 +           
54880 +           EPRINTF4 (DBG_XMTR, "%s: ep4xmtr_disconnect_callback - xmtr %p txd %p node %d not conected\n", rail->r_generic.Name, xmtr, txd, txd->NodeId);
54881 +
54882 +           /* add to the list of txd's which are to be completed */
54883 +           list_add_tail (&txd->Link, &txdList);
54884 +       }
54885 +    }
54886 +    spin_unlock_irqrestore (&xmtr->Lock, flags);
54887 +    
54888 +    while (! list_empty (&txdList)) 
54889 +    {
54890 +       EP_TXD *txd = list_entry (txdList.next, EP_TXD, Link);
54891 +
54892 +       list_del (&txd->Link);
54893 +
54894 +       txd->Handler (txd, txd->Arg, EP_CONN_RESET);
54895 +       
54896 +       FreeTxd (xmtr, txd);
54897 +    }
54898 +}
54899 +
54900 +void
54901 +ep4xmtr_neterr_flush (EP_XMTR *xmtr, EP4_XMTR_RAIL *xmtrRail, unsigned int nodeId, EP_NETERR_COOKIE *cookies)
54902 +{
54903 +    EP4_COMMS_RAIL   *commsRail = XMTR_TO_COMMS (xmtrRail);
54904 +    unsigned long flags;
54905 +    
54906 +    spin_lock_irqsave (&xmtr->Lock, flags);
54907 +
54908 +    /* insert a "setevent" into the command queue to flush
54909 +     * through the envelopes which have already been submitted */
54910 +    ep4comms_flush_setevent (commsRail, xmtrRail->xmtr_cq);
54911 +
54912 +    spin_unlock_irqrestore (&xmtr->Lock, flags);
54913 +}
54914 +
54915 +void
54916 +ep4xmtr_neterr_check (EP_XMTR *xmtr, EP4_XMTR_RAIL *xmtrRail, unsigned int nodeId, EP_NETERR_COOKIE *cookies)
54917 +{
54918 +    EP4_RAIL *rail = XMTR_TO_RAIL (xmtrRail);
54919 +    ELAN4_DEV *dev = rail->r_ctxt.ctxt_dev;
54920 +    struct list_head *el;
54921 +    unsigned long flags;
54922 +    
54923 +    spin_lock_irqsave (&xmtr->Lock, flags);
54924 +    list_for_each (el, &xmtr->ActiveDescList) {
54925 +       EP_TXD       *txd     = list_entry (el, EP_TXD, Link);
54926 +       EP4_TXD_RAIL *txdRail = (EP4_TXD_RAIL *) txd->TxdRail;
54927 +           
54928 +       if ( ! TXD_BOUND2RAIL (txdRail, xmtrRail) || txd->NodeId != nodeId)
54929 +           continue;
54930 +       
54931 +       /* The only non-dma associated with a txd is the initial sten packet, if it has been acked 
54932 +        * and the neterr cookie matches, then change it to look like it's been acked since the
54933 +        * INPUT_Q_COMMIT transaction has already been executed */
54934 +       if (txdRail->txd_main->txd_env == EP4_STATE_FAILED && (txdRail->txd_cookie == cookies[0] || txdRail->txd_cookie == cookies[1]))
54935 +       {
54936 +           EPRINTF4 (DBG_NETWORK_ERROR, "%s: ep4xmtr_neterr_callback: cookie <%lld%s%s%s%s> matches txd %p txdRail %p\n", 
54937 +                    rail->r_generic.Name, EP4_COOKIE_STRING(txdRail->txd_cookie), txd, txdRail);
54938 +
54939 +           EP4_TXD_ASSERT (txdRail, txdRail->txd_retry_time != 0);
54940 +
54941 +           txdRail->txd_main->txd_env = EP4_STATE_FINISHED;
54942 +
54943 +           /* re-initialise the envelope event */
54944 +           elan4_sdram_writeq (dev, txdRail->txd_elan + offsetof (EP4_TXD_RAIL_ELAN, txd_env.ev_CountAndType),
54945 +                               E4_EVENT_INIT_VALUE (-32, E4_EVENT_COPY, E4_EVENT_DTYPE_LONG, EP4_INTR_CMD_NDWORDS));
54946 +           
54947 +           spin_lock (&xmtrRail->xmtr_retrylock);
54948 +
54949 +           EP4_TXD_ASSERT (txdRail, (on_list (&txdRail->txd_retry_link, &xmtrRail->xmtr_retrylist[EP4_TXD_LIST_RETRY]) == 1 ||
54950 +                                     on_list (&txdRail->txd_retry_link, &xmtrRail->xmtr_retrylist[EP4_TXD_LIST_STALLED]) == 1));
54951 +
54952 +           txdRail->txd_retry_time = 0;
54953 +
54954 +           list_del (&txdRail->txd_retry_link);
54955 +
54956 +           spin_unlock (&xmtrRail->xmtr_retrylock);
54957 +       }
54958 +    }
54959 +    spin_unlock_irqrestore (&xmtr->Lock, flags);
54960 +}
54961 +
54962 +int
54963 +ep4xmtr_poll_txd (EP_XMTR_RAIL *x, EP_TXD_RAIL *t, int how)
54964 +{
54965 +    EP4_XMTR_RAIL *xmtrRail = (EP4_XMTR_RAIL *) x;
54966 +    ELAN4_DEV     *dev      = XMTR_TO_DEV (xmtrRail);
54967 +    EP4_TXD_RAIL  *txdRail  = (EP4_TXD_RAIL *) t;
54968 +    EP_TXD        *txd      = txdRail->txd_generic.Txd;
54969 +
54970 +    if (! EP_IS_NO_INTERRUPT(txd->Envelope.Attr))
54971 +       return 0;
54972 +
54973 +    switch (how)
54974 +    {
54975 +    case ENABLE_TX_CALLBACK:
54976 +       if (!EP_IS_INTERRUPT_ENABLED(txd->Envelope.Attr))
54977 +       {
54978 +           elan4_sdram_writeq (dev, txdRail->txd_elan + offsetof (EP4_TXD_RAIL_ELAN, txd_done_cmd.c_intr_cmd),
54979 +                               INTERRUPT_CMD | (xmtrRail->xmtr_intcookie.int_val << E4_MAIN_INT_SHIFT));
54980 +
54981 +           txd->Envelope.Attr |= EP_INTERRUPT_ENABLED;
54982 +       }
54983 +       break;
54984 +
54985 +    case DISABLE_TX_CALLBACK:
54986 +       if (EP_IS_INTERRUPT_ENABLED(txd->Envelope.Attr & EP_INTERRUPT_ENABLED))
54987 +       {
54988 +           elan4_sdram_writeq (dev, txdRail->txd_elan + offsetof (EP4_TXD_RAIL_ELAN, txd_done_cmd.c_intr_cmd), NOP_CMD);
54989 +
54990 +           txd->Envelope.Attr &= ~EP_INTERRUPT_ENABLED;
54991 +       }
54992 +    }
54993 +    
54994 +    if (txdRail->txd_main->txd_env == EP4_STATE_FINISHED && txdRail->txd_main->txd_data == EP4_STATE_FINISHED && txdRail->txd_main->txd_done == EP4_STATE_FINISHED)
54995 +    {
54996 +       EPRINTF3 (DBG_XMTR, "%s: ep4xmtr_poll_txd: txd=%p XID=%llx completed\n",
54997 +                 XMTR_TO_RAIL (xmtrRail)->r_generic.Name, txd, txd->Envelope.Xid.Unique);
54998 +       
54999 +       elan4_sdram_writeq (dev, txdRail->txd_elan + offsetof (EP4_TXD_RAIL_ELAN, txd_done_cmd.c_intr_cmd),
55000 +                           INTERRUPT_CMD | (txdRail->txd_intcookie.int_val << E4_MAIN_INT_SHIFT));
55001 +
55002 +
55003 +       ep_xmtr_txd_stat(xmtrRail->xmtr_generic.Xmtr,txd);
55004 +
55005 +       finalise_txd (txd, txdRail);
55006 +
55007 +       return 1;
55008 +    }
55009 +
55010 +    return 0;
55011 +}
55012 +
55013 +int
55014 +ep4xmtr_bind_txd (EP_TXD *txd, EP_XMTR_RAIL *x, unsigned int phase)
55015 +{
55016 +    EP4_XMTR_RAIL *xmtrRail = (EP4_XMTR_RAIL *) x;
55017 +    EP4_RAIL      *rail     = XMTR_TO_RAIL (xmtrRail);
55018 +    EP4_TXD_RAIL  *txdRail;
55019 +    unsigned long  flags;
55020 +
55021 +    if ((txdRail = get_txd_rail (xmtrRail)) == NULL)
55022 +       return 0;
55023 +    
55024 +    switch (phase)
55025 +    {
55026 +    case EP_TXD_PHASE_ACTIVE:
55027 +       if (rail->r_generic.Nodes[txd->NodeId].State != EP_NODE_CONNECTED)
55028 +       {
55029 +           EPRINTF2 (DBG_XMTR, "%s: ep4xmtr_bind_txd: node %u not connected on this rail\n", rail->r_generic.Name, txd->NodeId);
55030 +
55031 +           free_txd_rail (xmtrRail, txdRail);
55032 +           return 0;
55033 +       }
55034 +
55035 +       initialise_txd (txd, txdRail, EP_TXD_PHASE_ACTIVE);
55036 +
55037 +       bind_txd_rail (txd, txdRail);
55038 +       
55039 +       /* generate the STEN packet to transfer the envelope */
55040 +       spin_lock_irqsave (&xmtrRail->xmtr_retrylock, flags);
55041 +       if (((int) (xmtrRail->xmtr_flowcnt - xmtrRail->xmtr_main->xmtr_flowcnt)) < EP4_XMTR_FLOWCNT)
55042 +           issue_envelope_packet (xmtrRail, txdRail);
55043 +       else
55044 +       {
55045 +           txdRail->txd_retry_time = lbolt;
55046 +
55047 +           list_add_tail (&txdRail->txd_retry_link, &xmtrRail->xmtr_retrylist[EP4_TXD_LIST_RETRY]);
55048 +
55049 +           ep_kthread_schedule (&rail->r_retry_thread, txdRail->txd_retry_time);
55050 +       }
55051 +       spin_unlock_irqrestore (&xmtrRail->xmtr_retrylock, flags);
55052 +       break;
55053 +
55054 +    case EP_TXD_PHASE_PASSIVE:
55055 +       initialise_txd (txd, txdRail, EP_TXD_PHASE_PASSIVE);
55056 +       
55057 +       EP_XMTR_OP (txd->TxdRail->XmtrRail, UnbindTxd) (txd, EP_TXD_PHASE_PASSIVE);     /* unbind from existing rail */
55058 +
55059 +       bind_txd_rail (txd, txdRail);                                                   /* and bind it to our new rail */
55060 +       break;
55061 +    }
55062 +
55063 +    return 1;
55064 +}
55065 +
55066 +void
55067 +ep4xmtr_unbind_txd (EP_TXD *txd, unsigned int phase)
55068 +{
55069 +    /* XXXX - TBD */
55070 +}
55071 +
55072 +long
55073 +ep4xmtr_check (EP_XMTR_RAIL *x, long nextRunTime)
55074 +{
55075 +    EP4_XMTR_RAIL *xmtrRail = (EP4_XMTR_RAIL *) x;
55076 +    EP_XMTR       *xmtr     = xmtrRail->xmtr_generic.Xmtr;
55077 +    struct list_head  txdList;
55078 +    struct list_head *el, *nel;
55079 +    unsigned long flags;
55080 +
55081 +    INIT_LIST_HEAD (&txdList);
55082 +
55083 +    if (xmtrRail->xmtr_freecount < ep_txd_lowat && !alloc_txd_block (xmtrRail))
55084 +    {
55085 +       EPRINTF1 (DBG_RCVR,"%s: failed to grow txd rail pool\n", XMTR_TO_RAIL(xmtrRail)->r_generic.Name);
55086 +               
55087 +       if (nextRunTime == 0 || AFTER (nextRunTime, lbolt + RESOURCE_RETRY_TIME))
55088 +           nextRunTime = lbolt + RESOURCE_RETRY_TIME;
55089 +    }
55090 +
55091 +    spin_lock_irqsave (&xmtr->Lock, flags);
55092 +    list_for_each_safe (el, nel, &xmtrRail->xmtr_retrylist[EP4_TXD_LIST_POLL]) {
55093 +       EP4_TXD_RAIL *txdRail = list_entry (el, EP4_TXD_RAIL, txd_retry_link);
55094 +
55095 +       if (txdRail->txd_main->txd_env != EP4_STATE_FINISHED || txdRail->txd_main->txd_data != EP4_STATE_FINISHED)
55096 +       {
55097 +           ep_debugf (DBG_XMTR, "%s: ep4xmtr_check: xmtrRail=%p txdRail=%p env/data (%d,%d) not finished\n",
55098 +                      XMTR_TO_RAIL(xmtrRail)->r_generic.Name, xmtrRail, txdRail, (int)txdRail->txd_main->txd_env, (int)txdRail->txd_main->txd_data);
55099 +                   
55100 +           nextRunTime = lbolt + HZ;
55101 +       }
55102 +       else
55103 +       {
55104 +           EP_TXD *txd = txdRail->txd_generic.Txd;
55105 +
55106 +           ep_debugf (DBG_XMTR, "%s: ep4xmtr_check: xmtrRail=%p txdRail=%p env/data (%d,%d) finished\n",
55107 +                      XMTR_TO_RAIL(xmtrRail)->r_generic.Name, xmtrRail, txdRail, (int)txdRail->txd_main->txd_env, (int)txdRail->txd_main->txd_data);
55108 +
55109 +           EPRINTF5 (DBG_XMTR, "%s: ep4xmtr_check: xmtrRail=%p txdRail=%p env/data (%d,%d) finished\n",
55110 +                     XMTR_TO_RAIL(xmtrRail)->r_generic.Name, xmtrRail, txdRail, (int)txdRail->txd_main->txd_env, (int)txdRail->txd_main->txd_data);
55111 +           EPRINTF3  (DBG_XMTR, "%s:    done %x data %x\n", XMTR_TO_RAIL(xmtrRail)->r_generic.Name,
55112 +                      txdRail->txd_elan_addr + offsetof (EP4_TXD_RAIL_ELAN, txd_done),
55113 +                      txdRail->txd_elan_addr + offsetof (EP4_TXD_RAIL_ELAN, txd_data));
55114 +
55115 +           EP4_TXD_ASSERT (txdRail, txdRail->txd_retry_time != 0);
55116 +
55117 +           /* remove txd from active list and add to list to call handlers */
55118 +           list_del (&txd->Link);
55119 +           list_add_tail (&txd->Link, &txdList);
55120 +
55121 +           /* remove and free of txdRail */
55122 +           txdRail->txd_retry_time = 0;
55123 +           list_del (&txdRail->txd_retry_link);
55124 +
55125 +           finalise_txd (txd, txdRail);
55126 +
55127 +       }
55128 +    }
55129 +    spin_unlock_irqrestore (&xmtr->Lock, flags);
55130 +
55131 +    while (! list_empty (&txdList))
55132 +    {
55133 +       EP_TXD *txd = list_entry (txdList.next, EP_TXD, Link);
55134 +
55135 +       list_del (&txd->Link);
55136 +
55137 +       ep_xmtr_txd_stat (xmtr,txd);
55138 +
55139 +       txd->Handler (txd, txd->Arg, EP_SUCCESS);
55140 +
55141 +       FreeTxd (xmtr, txd);
55142 +    }
55143 +
55144 +    return nextRunTime;
55145 +}
55146 +
55147 +unsigned long
55148 +ep4xmtr_retry (EP4_RAIL *rail, void *arg, unsigned long nextRunTime)
55149 +{
55150 +    EP4_XMTR_RAIL *xmtrRail = (EP4_XMTR_RAIL *) arg;
55151 +    ELAN4_DEV     *dev      = XMTR_TO_DEV(xmtrRail);
55152 +    unsigned long  flags;
55153 +
55154 +    spin_lock_irqsave (&xmtrRail->xmtr_retrylock, flags);
55155 +    while (! list_empty (&xmtrRail->xmtr_retrylist[EP4_TXD_LIST_RETRY]))
55156 +    {
55157 +       EP4_TXD_RAIL *txdRail = list_entry (xmtrRail->xmtr_retrylist[EP4_TXD_LIST_RETRY].next, EP4_TXD_RAIL, txd_retry_link);
55158 +
55159 +       if (BEFORE (lbolt, txdRail->txd_retry_time))
55160 +       {
55161 +           if (nextRunTime == 0 || AFTER (nextRunTime, txdRail->txd_retry_time))
55162 +               nextRunTime = txdRail->txd_retry_time;
55163 +
55164 +           break;
55165 +       }
55166 +
55167 +       if (((int) (xmtrRail->xmtr_flowcnt - xmtrRail->xmtr_main->xmtr_flowcnt)) < EP4_XMTR_FLOWCNT)
55168 +       {
55169 +           txdRail->txd_retry_time = 0;
55170 +
55171 +           list_del (&txdRail->txd_retry_link);
55172 +           
55173 +           /* re-initialise the envelope event */
55174 +           elan4_sdram_writeq (dev, txdRail->txd_elan + offsetof (EP4_TXD_RAIL_ELAN, txd_env.ev_CountAndType),
55175 +                               E4_EVENT_INIT_VALUE (-32, E4_EVENT_COPY, E4_EVENT_DTYPE_LONG, EP4_INTR_CMD_NDWORDS));
55176 +           
55177 +           EPRINTF3 (DBG_RETRY, "%s: ep4xmtr_retry: re-issue envelope packet to %d for txdRail=%p\n", 
55178 +                     rail->r_generic.Name, txdRail->txd_generic.Txd->Envelope.NodeId, txdRail);
55179 +           
55180 +           txdRail->txd_main->txd_env = EP4_STATE_ACTIVE;
55181 +           
55182 +           issue_envelope_packet (xmtrRail, txdRail);
55183 +       }
55184 +       else
55185 +       {
55186 +           EPRINTF2 (DBG_RETRY, "%s: ep4xmtr_retry: cannot re-issue envelope packet to %d\n", rail->r_generic.Name, txdRail->txd_generic.Txd->Envelope.NodeId);
55187 +
55188 +           if (nextRunTime == 0 || AFTER (nextRunTime, txdRail->txd_retry_time))
55189 +               nextRunTime = txdRail->txd_retry_time;
55190 +
55191 +           break;
55192 +       }
55193 +    }
55194 +    spin_unlock_irqrestore (&xmtrRail->xmtr_retrylock, flags);
55195 +    
55196 +    return nextRunTime;
55197 +}
55198 +
55199 +void
55200 +ep4xmtr_add_rail (EP_XMTR *xmtr, EP_COMMS_RAIL *commsRail)
55201 +{
55202 +    EP4_RAIL         *rail   = (EP4_RAIL *) commsRail->Rail;
55203 +    EP_COMMS_SUBSYS  *subsys = xmtr->Subsys;
55204 +    EP4_XMTR_RAIL    *xmtrRail;
55205 +    unsigned long     flags;
55206 +    int                      i;
55207 +
55208 +    KMEM_ZALLOC (xmtrRail, EP4_XMTR_RAIL *, sizeof (EP4_XMTR_RAIL), 1);
55209 +
55210 +    spin_lock_init (&xmtrRail->xmtr_freelock);
55211 +    kcondvar_init  (&xmtrRail->xmtr_freesleep);
55212 +    INIT_LIST_HEAD (&xmtrRail->xmtr_freelist);
55213 +    INIT_LIST_HEAD (&xmtrRail->xmtr_blocklist);
55214 +
55215 +    for (i = 0; i < EP4_TXD_NUM_LISTS; i++)
55216 +       INIT_LIST_HEAD (&xmtrRail->xmtr_retrylist[i]);
55217 +    spin_lock_init (&xmtrRail->xmtr_retrylock);
55218 +
55219 +    xmtrRail->xmtr_generic.CommsRail = commsRail;
55220 +    xmtrRail->xmtr_generic.Xmtr      = xmtr;
55221 +
55222 +    xmtrRail->xmtr_main = ep_alloc_main (&rail->r_generic, sizeof (EP4_XMTR_RAIL_MAIN), 0, &xmtrRail->xmtr_main_addr);
55223 +    xmtrRail->xmtr_cq   = elan4_alloccq (&rail->r_ctxt, EP4_XMTR_CQSIZE, CQ_EnableAllBits, CQ_Priority);
55224 +
55225 +    xmtrRail->xmtr_retryops.op_func = ep4xmtr_retry;
55226 +    xmtrRail->xmtr_retryops.op_arg  = xmtrRail;
55227 +
55228 +    ep4_add_retry_ops (rail, &xmtrRail->xmtr_retryops);
55229 +
55230 +    ep4_register_intcookie (rail, &xmtrRail->xmtr_intcookie, xmtrRail->xmtr_main_addr,
55231 +                           poll_interrupt, xmtrRail);
55232 +
55233 +    spin_lock_irqsave (&xmtr->Lock, flags);
55234 +
55235 +    xmtr->Rails[commsRail->Rail->Number] = &xmtrRail->xmtr_generic;
55236 +    xmtr->RailMask |= EP_RAIL2RAILMASK(commsRail->Rail->Number);
55237 +
55238 +    spin_unlock_irqrestore (&xmtr->Lock, flags);
55239 +
55240 +    ep_kthread_schedule (&subsys->Thread, lbolt);
55241 +
55242 +    ep_procfs_xmtr_add_rail(&(xmtrRail->xmtr_generic));
55243 +}
55244 +
55245 +void
55246 +ep4xmtr_del_rail (EP_XMTR *xmtr, EP_COMMS_RAIL *commsRail)
55247 +{
55248 +    EP4_RAIL         *rail     = (EP4_RAIL *) commsRail->Rail;
55249 +    EP4_XMTR_RAIL    *xmtrRail = (EP4_XMTR_RAIL *) xmtr->Rails[commsRail->Rail->Number];
55250 +    unsigned long     flags;
55251 +
55252 +    /* rail mask set as not usable */
55253 +    spin_lock_irqsave (&xmtr->Lock, flags);
55254 +    xmtr->RailMask &= ~EP_RAIL2RAILMASK (rail->r_generic.Number);
55255 +    spin_unlock_irqrestore (&xmtr->Lock, flags);
55256 +
55257 +    ep_procfs_xmtr_del_rail(&(xmtrRail->xmtr_generic));
55258 +
55259 +    /* wait for all txd's for this rail to become free */
55260 +    spin_lock_irqsave (&xmtrRail->xmtr_freelock, flags);
55261 +    while (xmtrRail->xmtr_freecount != xmtrRail->xmtr_totalcount)
55262 +    {
55263 +       xmtrRail->xmtr_freewaiting++;
55264 +       kcondvar_wait (&xmtrRail->xmtr_freesleep, &xmtrRail->xmtr_freelock, &flags);
55265 +    }
55266 +    spin_unlock_irqrestore (&xmtrRail->xmtr_freelock, flags);
55267 +
55268 +    spin_lock_irqsave (&xmtr->Lock, flags);
55269 +    xmtr->Rails[commsRail->Rail->Number] = NULL;
55270 +    spin_unlock_irqrestore (&xmtr->Lock, flags);
55271 +
55272 +    /* all the txd's accociated with DescBlocks must be in the freelist */
55273 +    ASSERT (xmtrRail->xmtr_totalcount == xmtrRail->xmtr_freecount);
55274 +
55275 +    /* run through the DescBlockList deleting them */
55276 +    while (!list_empty (&xmtrRail->xmtr_blocklist))
55277 +       free_txd_block (xmtrRail, list_entry(xmtrRail->xmtr_blocklist.next, EP4_TXD_RAIL_BLOCK , blk_link));
55278 +    
55279 +    /* it had better be empty after that */
55280 +    ASSERT ((xmtrRail->xmtr_freecount == 0) && (xmtrRail->xmtr_totalcount == 0));
55281 +
55282 +    ep4_deregister_intcookie (rail, &xmtrRail->xmtr_intcookie);
55283 +
55284 +    ep4_remove_retry_ops (rail, &xmtrRail->xmtr_retryops);
55285 +
55286 +    elan4_freecq (&rail->r_ctxt, xmtrRail->xmtr_cq);
55287 +    ep_free_main (&rail->r_generic, xmtrRail->xmtr_main_addr, sizeof (EP4_XMTR_RAIL_MAIN));
55288 +
55289 +    spin_lock_destroy (&xmtrRail->xmtr_retrylock);
55290 +
55291 +    spin_lock_destroy (&xmtrRail->xmtr_freelock);
55292 +    kcondvar_destroy (&xmtrRail->xmtr_freesleep);
55293 +
55294 +    KMEM_FREE (xmtrRail, sizeof (EP4_XMTR_RAIL));
55295 +}
55296 +
55297 +void
55298 +ep4xmtr_display_xmtr (DisplayInfo *di, EP_XMTR_RAIL *x)
55299 +{
55300 +    EP4_XMTR_RAIL    *xmtrRail     = (EP4_XMTR_RAIL *) x;
55301 +    EP4_RAIL         *rail         = XMTR_TO_RAIL (xmtrRail);
55302 +    unsigned int      freeCount    = 0;
55303 +    unsigned int      pollCount    = 0;
55304 +    unsigned int      stalledCount = 0;
55305 +    unsigned int      retryCount   = 0;
55306 +    struct list_head *el;
55307 +    unsigned long     flags;
55308 +
55309 +    spin_lock_irqsave (&xmtrRail->xmtr_freelock, flags);
55310 +    list_for_each (el, &xmtrRail->xmtr_freelist)
55311 +       freeCount++;
55312 +    spin_unlock_irqrestore (&xmtrRail->xmtr_freelock, flags);
55313 +
55314 +    spin_lock_irqsave (&xmtrRail->xmtr_retrylock, flags);
55315 +    list_for_each (el, &xmtrRail->xmtr_retrylist[EP4_TXD_LIST_POLL])
55316 +       pollCount++;
55317 +    list_for_each (el, &xmtrRail->xmtr_retrylist[EP4_TXD_LIST_STALLED])
55318 +       stalledCount++;
55319 +    list_for_each (el, &xmtrRail->xmtr_retrylist[EP4_TXD_LIST_RETRY])
55320 +       retryCount++;
55321 +    spin_unlock_irqrestore (&xmtrRail->xmtr_retrylock, flags);
55322 +
55323 +    (di->func)(di->arg, "        rail=%d free=%d total=%d (%d) (retry %d,%d,%d)\n",
55324 +              rail->r_generic.Number, xmtrRail->xmtr_freecount, xmtrRail->xmtr_totalcount, 
55325 +              freeCount, pollCount, stalledCount, retryCount);
55326 +    (di->func)(di->arg, "        cq %d flowcnt %lld,%lld\n", elan4_cq2num (xmtrRail->xmtr_cq), xmtrRail->xmtr_flowcnt, xmtrRail->xmtr_main->xmtr_flowcnt);
55327 +}
55328 +
55329 +void
55330 +ep4xmtr_display_txd (DisplayInfo *di, EP_TXD_RAIL *t)
55331 +{
55332 +    EP4_TXD_RAIL      *txdRail  = (EP4_TXD_RAIL *) t;
55333 +    EP4_XMTR_RAIL     *xmtrRail = TXD_TO_XMTR(txdRail);
55334 +    EP4_TXD_RAIL_MAIN *txdMain  = txdRail->txd_main;
55335 +    sdramaddr_t        txdElan  = txdRail->txd_elan;
55336 +    EP4_RAIL          *rail     = XMTR_TO_RAIL (xmtrRail);
55337 +    ELAN4_DEV         *dev      = XMTR_TO_DEV (xmtrRail);
55338 +    char             *list     = "";
55339 +    unsigned long      flags;
55340 +
55341 +    spin_lock_irqsave (&xmtrRail->xmtr_retrylock, flags);
55342 +    if (txdRail->txd_retry_time)
55343 +    {
55344 +       if (on_list (&txdRail->txd_retry_link, &xmtrRail->xmtr_retrylist[EP4_TXD_LIST_POLL]))
55345 +           list = " poll";
55346 +       else if (on_list (&txdRail->txd_retry_link, &xmtrRail->xmtr_retrylist[EP4_TXD_LIST_STALLED]))
55347 +           list = " stalled";
55348 +       else if (on_list (&txdRail->txd_retry_link, &xmtrRail->xmtr_retrylist[EP4_TXD_LIST_RETRY]))
55349 +           list = " retry";
55350 +       else
55351 +           list = " ERROR";
55352 +    }
55353 +    spin_unlock_irqrestore (&xmtrRail->xmtr_retrylock, flags);
55354 +
55355 +    (di->func)(di->arg, "      Rail %d txd %p elan %lx (%x) main %p (%x) cookie <%lld%s%s%s%s> ecq %d %s\n", rail->r_generic.Number,
55356 +              txdRail, txdRail->txd_elan, txdRail->txd_elan_addr, txdRail->txd_main, txdRail->txd_main_addr, 
55357 +              EP4_COOKIE_STRING(txdRail->txd_cookie), elan4_cq2num (txdRail->txd_ecq->ecq_cq), list);
55358 +    
55359 +    (di->func)(di->arg, "        env  %016llx %016llx %016llx -> %016llx\n",
55360 +              elan4_sdram_readq (dev, txdElan + offsetof (EP4_TXD_RAIL_ELAN, txd_env.ev_CountAndType)),
55361 +              elan4_sdram_readq (dev, txdElan + offsetof (EP4_TXD_RAIL_ELAN, txd_env.ev_Params[0])),
55362 +              elan4_sdram_readq (dev, txdElan + offsetof (EP4_TXD_RAIL_ELAN, txd_env.ev_Params[1])),
55363 +              txdMain->txd_env);
55364 +    (di->func)(di->arg, "        data %016llx %016llx %016llx -> %016llx\n",
55365 +              elan4_sdram_readq (dev, txdElan + offsetof (EP4_TXD_RAIL_ELAN, txd_data.ev_CountAndType)),
55366 +              elan4_sdram_readq (dev, txdElan + offsetof (EP4_TXD_RAIL_ELAN, txd_data.ev_Params[0])),
55367 +              elan4_sdram_readq (dev, txdElan + offsetof (EP4_TXD_RAIL_ELAN, txd_data.ev_Params[1])),
55368 +              txdMain->txd_data);
55369 +    (di->func)(di->arg, "        done %016llx %016llx %016llx -> %016llx\n",
55370 +              elan4_sdram_readq (dev, txdElan + offsetof (EP4_TXD_RAIL_ELAN, txd_done.ev_CountAndType)),
55371 +              elan4_sdram_readq (dev, txdElan + offsetof (EP4_TXD_RAIL_ELAN, txd_done.ev_Params[0])),
55372 +              elan4_sdram_readq (dev, txdElan + offsetof (EP4_TXD_RAIL_ELAN, txd_done.ev_Params[1])),
55373 +              txdMain->txd_done);
55374 +}
55375 +
55376 +int
55377 +ep4xmtr_check_txd_state (EP_TXD *txd) 
55378 +{
55379 +    EP4_TXD_RAIL  *txdRail  = (EP4_TXD_RAIL *) txd->TxdRail;
55380 +    EP4_XMTR_RAIL *xmtrRail = (EP4_XMTR_RAIL *) txdRail->txd_generic.XmtrRail;
55381 +    ELAN4_DEV     *dev      = XMTR_TO_DEV (xmtrRail);
55382 +    unsigned long  flags;
55383 +
55384 +    if (txdRail->txd_main->txd_env == EP4_STATE_FINISHED)
55385 +       return 0;
55386 +
55387 +    EP4_TXD_ASSERT (txdRail, txdRail->txd_retry_time != 0);
55388 +
55389 +    spin_lock_irqsave (&xmtrRail->xmtr_retrylock, flags);
55390 +    EP4_TXD_ASSERT (txdRail, on_list (&txdRail->txd_retry_link, &xmtrRail->xmtr_retrylist[EP4_TXD_LIST_STALLED]) == 1);
55391 +
55392 +    list_del (&txdRail->txd_retry_link);
55393 +    txdRail->txd_retry_time  = 0;
55394 +    spin_unlock_irqrestore (&xmtrRail->xmtr_retrylock, flags);
55395 +    
55396 +    /* re-initialise the envelope event */
55397 +    elan4_sdram_writeq (dev, txdRail->txd_elan + offsetof (EP4_TXD_RAIL_ELAN, txd_env.ev_CountAndType),
55398 +                       E4_EVENT_INIT_VALUE (-32, E4_EVENT_COPY, E4_EVENT_DTYPE_LONG, EP4_INTR_CMD_NDWORDS));
55399 +           
55400 +    unbind_txd_rail (txd, txdRail);
55401 +
55402 +    terminate_txd_rail (xmtrRail, txdRail);
55403 +    free_txd_rail (xmtrRail, txdRail);
55404 +
55405 +    return 1;
55406 +}
55407 +
55408 +void
55409 +ep4xmtr_fillout_rail_stats(EP_XMTR_RAIL *xmtr_rail, char *str) {
55410 +    /* no stats here yet */
55411 +    /* EP4_XMTR_RAIL * ep4xmtr_rail = (EP4_XMTR_RAIL *) xmtr_rail; */
55412 +}
55413 +
55414 +
55415 +/*
55416 + * Local variables:
55417 + * c-file-style: "stroustrup"
55418 + * End:
55419 + */
55420 Index: linux-2.4.21/drivers/net/qsnet/ep/ep_procfs.c
55421 ===================================================================
55422 --- linux-2.4.21.orig/drivers/net/qsnet/ep/ep_procfs.c  2004-02-23 16:02:56.000000000 -0500
55423 +++ linux-2.4.21/drivers/net/qsnet/ep/ep_procfs.c       2005-06-01 23:12:54.660429832 -0400
55424 @@ -0,0 +1,331 @@
55425 +
55426 +/*
55427 + *    Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd.
55428 + *    Copyright (c) 2002-2003 by Quadrics Ltd.
55429 + *
55430 + *    For licensing information please see the supplied COPYING file
55431 + *
55432 + */
55433 +
55434 +#ident "@(#)$Id: ep_procfs.c,v 1.5.6.3 2004/11/30 10:10:57 mike Exp $"
55435 +/*      $Source: /cvs/master/quadrics/epmod/ep_procfs.c,v $*/
55436 +
55437 +#include <qsnet/kernel.h>
55438 +
55439 +#include <elan/kcomm.h>
55440 +#include <elan/epsvc.h>
55441 +#include <elan/epcomms.h>
55442 +
55443 +#include "cm.h"
55444 +#include "debug.h"
55445 +#include "conf_linux.h"
55446 +
55447 +#include "kcomm_vp.h"
55448 +#include "kcomm_elan4.h"
55449 +#include "epcomms_elan4.h"
55450 +
55451 +#include <qsnet/procfs_linux.h>
55452 +
55453 +struct proc_dir_entry *ep_procfs_xmtr_root;
55454 +struct proc_dir_entry *ep_procfs_rcvr_root;
55455 +
55456 +static int
55457 +ep_proc_open (struct inode *inode, struct file *file)
55458 +{
55459 +    PROC_PRIVATE *pr;
55460 +    int           pages = 4;
55461 +
55462 +    if ((pr = kmalloc (sizeof (PROC_PRIVATE), GFP_KERNEL)) == NULL)
55463 +       return (-ENOMEM);
55464 +    
55465 +    do {       
55466 +       pr->pr_data_len = PAGESIZE * pages;
55467 +
55468 +       KMEM_ZALLOC (pr->pr_data, char *, pr->pr_data_len, 1);
55469 +       if (pr->pr_data == NULL) 
55470 +       { 
55471 +           pr->pr_len  = sprintf (pr->pr_data, "Out of Memory\n");
55472 +           break;
55473 +       } 
55474 +       
55475 +       pr->pr_off     = 0;
55476 +       pr->pr_len     = 0;
55477 +       pr->pr_data[0] = 0;
55478 +       
55479 +       pr->pr_di.func  = proc_character_fill;
55480 +       pr->pr_di.arg   = (long)pr;
55481 +       
55482 +       if (!strcmp("debug_xmtr", file->f_dentry->d_iname)) 
55483 +       {   
55484 +           EP_XMTR *xmtr = (EP_XMTR *)(PDE(inode)->data);
55485 +           ep_display_xmtr (&pr->pr_di, xmtr);
55486 +       }
55487 +       
55488 +       if (!strcmp("debug_rcvr", file->f_dentry->d_iname)) 
55489 +       {
55490 +           EP_RCVR *rcvr = (EP_RCVR *)(PDE(inode)->data);
55491 +           ep_display_rcvr (&pr->pr_di, rcvr, 0);
55492 +       }
55493 +       
55494 +       if (!strcmp("debug_full", file->f_dentry->d_iname)) 
55495 +       {
55496 +           EP_RCVR *rcvr = (EP_RCVR *)(PDE(inode)->data);
55497 +           ep_display_rcvr (&pr->pr_di, rcvr, 1);
55498 +       }
55499 +
55500 +       if ( pr->pr_len < pr->pr_data_len) 
55501 +           break; /* we managed to get all the output into the buffer */
55502 +
55503 +       pages++;
55504 +       KMEM_FREE ( pr->pr_data,  pr->pr_data_len);
55505 +    } while (1);
55506 +       
55507 +
55508 +    file->private_data = (void *) pr;
55509 +
55510 +    MOD_INC_USE_COUNT;
55511 +    return (0);
55512 +}
55513 +
55514 +struct file_operations ep_proc_operations = 
55515 +{
55516 +    read:      proc_read,
55517 +    open:      ep_proc_open,
55518 +    release:   proc_release,
55519 +};
55520 +
55521 +static int
55522 +proc_read_rcvr_stats(char *page, char **start, off_t off,
55523 +                    int count, int *eof, void *data)
55524 +{
55525 +    EP_RCVR *rcvr = (EP_RCVR *)data;
55526 +    
55527 +    if (rcvr == NULL) 
55528 +       sprintf(page,"proc_read_rcvr_stats rcvr=NULL\n");
55529 +    else {
55530 +       page[0] = 0;
55531 +       ep_rcvr_fillout_stats(rcvr,page);
55532 +    }
55533 +    return (qsnet_proc_calc_metrics (page, start, off, count, eof, strlen(page)));
55534 +}
55535 +
55536 +static int
55537 +proc_read_rcvr_rail_stats(char *page, char **start, off_t off,
55538 +                    int count, int *eof, void *data)
55539 +{
55540 +    EP_RCVR_RAIL *rcvr_rail = (EP_RCVR_RAIL *)data;
55541 +
55542 +    if (rcvr_rail == NULL) {
55543 +       strcpy(page,"proc_read_rcvr_rail_stats rcvr_rail=NULL");
55544 +    } else {
55545 +       page[0] = 0;
55546 +       ep_rcvr_rail_fillout_stats(rcvr_rail, page);
55547 +       EP_RCVR_OP(rcvr_rail,FillOutRailStats)(rcvr_rail,page);
55548 +    }
55549 +    return (qsnet_proc_calc_metrics (page, start, off, count, eof, strlen(page)));
55550 +}
55551 +
55552 +void
55553 +ep_procfs_rcvr_add(EP_RCVR *rcvr)
55554 +{ 
55555 +    /* ep/rcvr/service_number/stats       */
55556 +    /* ep/rcvr/service_number/debug_rcvr  */
55557 +    /* ep/rcvr/service_number/debug_full  */
55558 +    struct proc_dir_entry *p;
55559 +    char str[32];
55560 +
55561 +    sprintf(str,"%d", rcvr->Service);
55562 +
55563 +    rcvr->procfs_root = proc_mkdir (str, ep_procfs_rcvr_root);
55564 +
55565 +    if ((p = create_proc_entry ("stats", 0,  rcvr->procfs_root)) != NULL)
55566 +    {
55567 +       p->write_proc = NULL;
55568 +       p->read_proc  = proc_read_rcvr_stats;
55569 +       p->data       = rcvr;
55570 +       p->owner      = THIS_MODULE;
55571 +    }
55572 +
55573 +    if ((p = create_proc_entry ("debug_rcrv", 0, rcvr->procfs_root)) != NULL)
55574 +    {
55575 +       p->proc_fops = &ep_proc_operations;
55576 +       p->owner     = THIS_MODULE;
55577 +       p->data      = rcvr;
55578 +    }
55579 +
55580 +    if ((p = create_proc_entry ("debug_full", 0, rcvr->procfs_root)) != NULL)
55581 +    {
55582 +       p->proc_fops = &ep_proc_operations;
55583 +       p->owner     = THIS_MODULE;
55584 +       p->data      = rcvr;
55585 +    }
55586 +}
55587 +
55588 +void
55589 +ep_procfs_rcvr_del(EP_RCVR *rcvr)
55590 +{  
55591 +    char str[32];
55592 +    sprintf(str,"%d", rcvr->Service);
55593 +
55594 +    remove_proc_entry ("stats",      rcvr->procfs_root);
55595 +    remove_proc_entry ("debug_rcvr", rcvr->procfs_root);
55596 +    remove_proc_entry ("debug_full", rcvr->procfs_root);
55597 +
55598 +    remove_proc_entry (str, ep_procfs_rcvr_root);
55599 +}
55600 +
55601 +void 
55602 +ep_procfs_rcvr_add_rail(EP_RCVR_RAIL *rcvrRail)
55603 +{
55604 +    /* ep/rcvr/service_number/railN/stats */
55605 +
55606 +    struct proc_dir_entry *p;
55607 +    char str[32];
55608 +    sprintf(str,"rail%d",rcvrRail->CommsRail->Rail->Number);
55609 +
55610 +    rcvrRail->procfs_root = proc_mkdir (str, rcvrRail->Rcvr->procfs_root);
55611 +    
55612 +    if ((p = create_proc_entry ("stats", 0,  rcvrRail->procfs_root)) != NULL)
55613 +    {
55614 +       p->write_proc = NULL;
55615 +       p->read_proc  = proc_read_rcvr_rail_stats;
55616 +       p->data       = rcvrRail;
55617 +       p->owner      = THIS_MODULE;
55618 +    } 
55619 +}
55620 +
55621 +void 
55622 +ep_procfs_rcvr_del_rail(EP_RCVR_RAIL *rcvrRail)
55623 +{
55624 +    char str[32];
55625 +    sprintf(str,"rail%d",rcvrRail->CommsRail->Rail->Number);
55626 +
55627 +    remove_proc_entry ("stats", rcvrRail->procfs_root);
55628 +
55629 +    remove_proc_entry (str, rcvrRail->Rcvr->procfs_root);
55630 +}
55631 +
55632 +
55633 +
55634 +
55635 +static int
55636 +proc_read_xmtr_stats(char *page, char **start, off_t off,
55637 +                    int count, int *eof, void *data)
55638 +{
55639 +    EP_XMTR *xmtr = (EP_XMTR *)data;
55640 +
55641 +    if (xmtr == NULL) 
55642 +       strcpy(page,"proc_read_xmtr_stats xmtr=NULL\n");
55643 +    else {
55644 +       page[0] = 0;
55645 +       ep_xmtr_fillout_stats(xmtr, page);
55646 +    }
55647 +    return (qsnet_proc_calc_metrics (page, start, off, count, eof, strlen(page)));
55648 +}
55649 +
55650 +static int
55651 +proc_read_xmtr_rail_stats(char *page, char **start, off_t off,
55652 +                    int count, int *eof, void *data)
55653 +{
55654 +    EP_XMTR_RAIL *xmtr_rail = (EP_XMTR_RAIL *)data;
55655 +
55656 +    if (xmtr_rail == NULL) 
55657 +       strcpy(page,"proc_read_xmtr_rail_stats xmtr_rail=NULL\n");
55658 +    else {
55659 +       page[0] = 0;
55660 +       ep_xmtr_rail_fillout_stats(xmtr_rail, page);
55661 +       EP_XMTR_OP(xmtr_rail,FillOutRailStats)(xmtr_rail,page);
55662 +    }
55663 +    return (qsnet_proc_calc_metrics (page, start, off, count, eof, strlen(page)));
55664 +}
55665 +
55666 +void
55667 +ep_procfs_xmtr_add(EP_XMTR *xmtr)
55668 +{ 
55669 +    /* ep/xmtr/service_number/stats       */
55670 +    /* ep/xmtr/service_number/debug_xmtr  */
55671 +    struct proc_dir_entry *p;
55672 +    char str[32];
55673 +
55674 +    sprintf(str,"%llx", (unsigned long long) (unsigned long)xmtr);
55675 +
55676 +    xmtr->procfs_root = proc_mkdir (str, ep_procfs_xmtr_root);
55677 +
55678 +    if ((p = create_proc_entry ("stats", 0,  xmtr->procfs_root)) != NULL)
55679 +    {
55680 +       p->write_proc = NULL;
55681 +       p->read_proc  = proc_read_xmtr_stats;
55682 +       p->data       = xmtr;
55683 +       p->owner      = THIS_MODULE;
55684 +    } 
55685 +
55686 +    if ((p = create_proc_entry ("debug_xmtr", 0, xmtr->procfs_root)) != NULL)
55687 +    {
55688 +       p->proc_fops = &ep_proc_operations;
55689 +       p->owner     = THIS_MODULE;
55690 +       p->data      = xmtr;
55691 +    }
55692 +}
55693 +
55694 +void
55695 +ep_procfs_xmtr_del(EP_XMTR *xmtr)
55696 +{  
55697 +    char str[32];
55698 +    sprintf(str,"%llx", (unsigned long long) (unsigned long)xmtr);
55699 +
55700 +    remove_proc_entry ("stats",      xmtr->procfs_root);
55701 +    remove_proc_entry ("debug_xmtr", xmtr->procfs_root);
55702 +
55703 +    remove_proc_entry (str, ep_procfs_xmtr_root);
55704 +}
55705 +
55706 +void 
55707 +ep_procfs_xmtr_add_rail(EP_XMTR_RAIL *xmtrRail)
55708 +{
55709 +    /* ep/xmtr/service_number/railN/stats */
55710 +    
55711 +    struct proc_dir_entry *p;
55712 +    char str[32];
55713 +    sprintf(str,"rail%d",xmtrRail->CommsRail->Rail->Number);
55714 +
55715 +    xmtrRail->procfs_root = proc_mkdir (str, xmtrRail->Xmtr->procfs_root);
55716 +
55717 +    if ((p = create_proc_entry ("stats", 0,  xmtrRail->procfs_root)) != NULL)
55718 +    {
55719 +       p->write_proc = NULL;
55720 +       p->read_proc  = proc_read_xmtr_rail_stats;
55721 +       p->data       = xmtrRail;
55722 +       p->owner      = THIS_MODULE;
55723 +    } 
55724 +}
55725 +
55726 +void 
55727 +ep_procfs_xmtr_del_rail(EP_XMTR_RAIL *xmtrRail)
55728 +{
55729 +    char str[32];
55730 +    sprintf(str,"rail%d",xmtrRail->CommsRail->Rail->Number);
55731 +
55732 +    remove_proc_entry ("stats", xmtrRail->procfs_root);
55733 +
55734 +    remove_proc_entry (str, xmtrRail->Xmtr->procfs_root);
55735 +}
55736 +
55737 +void
55738 +ep_procfs_rcvr_xmtr_init(void)
55739 +{
55740 +    ep_procfs_rcvr_root = proc_mkdir ("rcvr", ep_procfs_root);
55741 +    ep_procfs_xmtr_root = proc_mkdir ("xmtr", ep_procfs_root); 
55742 +}
55743 +
55744 +void
55745 +ep_procfs_rcvr_xmtr_fini(void)
55746 +{
55747 +    remove_proc_entry ("rcvr", ep_procfs_root);
55748 +    remove_proc_entry ("xmtr", ep_procfs_root);
55749 +}
55750 +
55751 +/*
55752 + * Local variables:
55753 + * c-file-style: "stroustrup"
55754 + * End:
55755 + */
55756 Index: linux-2.4.21/drivers/net/qsnet/ep/kalloc.c
55757 ===================================================================
55758 --- linux-2.4.21.orig/drivers/net/qsnet/ep/kalloc.c     2004-02-23 16:02:56.000000000 -0500
55759 +++ linux-2.4.21/drivers/net/qsnet/ep/kalloc.c  2005-06-01 23:12:54.661429680 -0400
55760 @@ -0,0 +1,677 @@
55761 +/*
55762 + *    Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd.
55763 + *    Copyright (c) 2002-2003 by Quadrics Ltd.
55764 + *
55765 + *    For licensing information please see the supplied COPYING file
55766 + *
55767 + */
55768 +
55769 +#ident "@(#)$Id: kalloc.c,v 1.17.8.2 2004/12/14 10:19:14 mike Exp $"
55770 +/*      $Source: /cvs/master/quadrics/epmod/kalloc.c,v $ */
55771 +
55772 +#include <qsnet/kernel.h>
55773 +
55774 +#include <elan/kcomm.h>
55775 +
55776 +#include "debug.h"
55777 +
55778 +static void
55779 +HashInPool (EP_ALLOC *alloc, EP_POOL *pool)
55780 +{
55781 +    int idx0 = HASH (pool->Handle.nmh_nmd.nmd_addr);
55782 +    int idx1 = HASH (pool->Handle.nmh_nmd.nmd_addr + pool->Handle.nmh_nmd.nmd_len);
55783 +
55784 +    list_add (&pool->HashBase, &alloc->HashBase[idx0]);
55785 +    list_add (&pool->HashTop, &alloc->HashTop[idx1]);
55786 +}
55787 +
55788 +static void
55789 +HashOutPool (EP_ALLOC *alloc, EP_POOL *pool)
55790 +{
55791 +    list_del (&pool->HashBase);
55792 +    list_del (&pool->HashTop);
55793 +}
55794 +
55795 +static EP_POOL *
55796 +LookupPool (EP_ALLOC *alloc, EP_ADDR addr)
55797 +{
55798 +    struct list_head *el;
55799 +    
55800 +    list_for_each (el, &alloc->HashBase[HASH(addr)]) {
55801 +       EP_POOL *pool = list_entry (el, EP_POOL, HashBase);
55802 +       
55803 +       if (pool->Handle.nmh_nmd.nmd_addr <= addr && addr < (pool->Handle.nmh_nmd.nmd_addr + pool->Handle.nmh_nmd.nmd_len))
55804 +           return (pool);
55805 +    }
55806 +    
55807 +    list_for_each (el, &alloc->HashTop[HASH(addr)]) {
55808 +       EP_POOL *pool = list_entry (el, EP_POOL, HashTop);
55809 +       
55810 +       if (pool->Handle.nmh_nmd.nmd_addr <= addr && addr < (pool->Handle.nmh_nmd.nmd_addr + pool->Handle.nmh_nmd.nmd_len))
55811 +           return (pool);
55812 +    }
55813 +    
55814 +    return (NULL);
55815 +}
55816 +
55817 +static EP_POOL *
55818 +AllocatePool (EP_ALLOC *alloc, EP_ADDR addr, unsigned size, unsigned int perm, EP_ATTRIBUTE attr)
55819 +{
55820 +    EP_ADDR base = 0;
55821 +    EP_POOL *pool;
55822 +    EP_RAIL *rail;
55823 +    int i, railmask = 0;
55824 +    struct list_head *el;
55825 +
55826 +    KMEM_ZALLOC (pool, EP_POOL *, sizeof (EP_POOL), !(attr & EP_NO_SLEEP));
55827 +    
55828 +    if (pool == NULL)
55829 +       return (NULL);
55830 +    
55831 +    if (addr != 0)
55832 +       base = addr;
55833 +    else
55834 +    {
55835 +       for (i = LN2_MIN_SIZE; i <= LN2_MAX_SIZE; i ++)
55836 +       {
55837 +           KMEM_ZALLOC (pool->Bitmaps[i - LN2_MIN_SIZE], bitmap_t *, BT_BITOUL(1 << (LN2_MAX_SIZE-i)) * sizeof (bitmap_t), !(attr & EP_NO_SLEEP));
55838 +           if (pool->Bitmaps[i - LN2_MIN_SIZE] == NULL)
55839 +               goto failed;
55840 +       }
55841 +    
55842 +       if ((base = ep_rmalloc (alloc->ResourceMap, size, !(attr & EP_NO_SLEEP))) == 0)
55843 +           goto failed;
55844 +    }
55845 +
55846 +    switch (alloc->Type)
55847 +    {
55848 +    case EP_ALLOC_TYPE_PRIVATE_SDRAM:
55849 +       rail = alloc->Data.Private.Rail;
55850 +
55851 +       if ((pool->Buffer.Sdram = rail->Operations.SdramAlloc (rail, base, size)) == 0)
55852 +           goto failed;
55853 +
55854 +       ep_perrail_sdram_map (rail, base, pool->Buffer.Sdram, size, perm, attr);
55855 +
55856 +       pool->Handle.nmh_nmd.nmd_addr = base;
55857 +       pool->Handle.nmh_nmd.nmd_len  = size;
55858 +       break;
55859 +       
55860 +    case EP_ALLOC_TYPE_PRIVATE_MAIN:
55861 +        KMEM_GETPAGES(pool->Buffer.Ptr, unsigned long, btop (size), !(attr & EP_NO_SLEEP));
55862 +       if (pool->Buffer.Ptr == 0)
55863 +           goto failed;
55864 +
55865 +       ep_perrail_kaddr_map (alloc->Data.Private.Rail, base, pool->Buffer.Ptr, size, perm, attr);
55866 +
55867 +       pool->Handle.nmh_nmd.nmd_addr = base;
55868 +       pool->Handle.nmh_nmd.nmd_len  = size;
55869 +       break;
55870 +
55871 +    case EP_ALLOC_TYPE_SHARED_MAIN:
55872 +        KMEM_GETPAGES(pool->Buffer.Ptr, unsigned long, btop (size), !(attr & EP_NO_SLEEP));
55873 +       if (pool->Buffer.Ptr == 0)
55874 +           goto failed;
55875 +
55876 +       list_for_each (el, &alloc->Data.Shared.Rails) {
55877 +           EP_RAIL *rail = list_entry (el, EP_RAIL_ENTRY, Link)->Rail;
55878 +
55879 +           ep_perrail_kaddr_map (rail, base, pool->Buffer.Ptr, size, perm, attr);
55880 +
55881 +           railmask |= (1 << rail->Number);
55882 +       }
55883 +       pool->Handle.nmh_nmd.nmd_addr = base;
55884 +       pool->Handle.nmh_nmd.nmd_len  = size;
55885 +       pool->Handle.nmh_nmd.nmd_attr = EP_NMD_ATTR (alloc->Data.Shared.System->Position.pos_nodeid, railmask);
55886 +
55887 +       ep_nmh_insert (&alloc->Data.Shared.System->MappingTable, &pool->Handle);
55888 +       break;
55889 +
55890 +    default:
55891 +       goto failed;
55892 +    }
55893 +
55894 +    return (pool);
55895 +    
55896 + failed:
55897 +    if (addr == 0 && base)
55898 +       ep_rmfree (alloc->ResourceMap, size, base);
55899 +
55900 +    for (i = LN2_MIN_SIZE; i <= LN2_MAX_SIZE; i ++)
55901 +       if (pool->Bitmaps[i - LN2_MIN_SIZE] != NULL)
55902 +           KMEM_FREE (pool->Bitmaps[i - LN2_MIN_SIZE], BT_BITOUL(1 << (LN2_MAX_SIZE - i)) * sizeof (bitmap_t));
55903 +    
55904 +    KMEM_FREE (pool, sizeof (EP_POOL));
55905 +    return (NULL);
55906 +}
55907 +
55908 +static void
55909 +FreePool (EP_ALLOC *alloc, EP_POOL *pool)
55910 +{
55911 +    struct list_head *el;
55912 +    int i;
55913 +
55914 +    switch (alloc->Type)
55915 +    {
55916 +    case EP_ALLOC_TYPE_PRIVATE_SDRAM:
55917 +       ep_perrail_unmap (alloc->Data.Private.Rail, pool->Handle.nmh_nmd.nmd_addr, pool->Handle.nmh_nmd.nmd_len);
55918 +
55919 +       alloc->Data.Private.Rail->Operations.SdramFree (alloc->Data.Private.Rail, pool->Buffer.Sdram, pool->Handle.nmh_nmd.nmd_len);
55920 +       break;
55921 +       
55922 +    case EP_ALLOC_TYPE_PRIVATE_MAIN:
55923 +       ep_perrail_unmap (alloc->Data.Private.Rail, pool->Handle.nmh_nmd.nmd_addr, pool->Handle.nmh_nmd.nmd_len);
55924 +
55925 +       KMEM_FREEPAGES (pool->Buffer.Ptr, btop (pool->Handle.nmh_nmd.nmd_len));
55926 +       break;
55927 +
55928 +    case EP_ALLOC_TYPE_SHARED_MAIN:
55929 +       ep_nmh_remove (&alloc->Data.Shared.System->MappingTable, &pool->Handle);
55930 +
55931 +       list_for_each (el, &alloc->Data.Shared.Rails) {
55932 +           EP_RAIL *rail = list_entry (el, EP_RAIL_ENTRY, Link)->Rail;
55933 +
55934 +           ep_perrail_unmap (rail, pool->Handle.nmh_nmd.nmd_addr, pool->Handle.nmh_nmd.nmd_len);
55935 +       }
55936 +
55937 +       KMEM_FREEPAGES (pool->Buffer.Ptr, btop (pool->Handle.nmh_nmd.nmd_len));
55938 +       break;
55939 +    }
55940 +    
55941 +    if (pool->Bitmaps[0])
55942 +    {
55943 +       ep_rmfree (alloc->ResourceMap, pool->Handle.nmh_nmd.nmd_len, pool->Handle.nmh_nmd.nmd_addr);
55944 +    
55945 +       for (i = LN2_MIN_SIZE; i <= LN2_MAX_SIZE; i ++)
55946 +           KMEM_FREE (pool->Bitmaps[i - LN2_MIN_SIZE], BT_BITOUL(1 << (LN2_MAX_SIZE - i)) * sizeof (bitmap_t));
55947 +    }
55948 +    
55949 +    KMEM_FREE (pool, sizeof (EP_POOL));
55950 +}
55951 +
55952 +static int
55953 +AddRail (EP_ALLOC *alloc, EP_RAIL *rail)
55954 +{
55955 +    struct list_head *el;
55956 +    EP_RAIL_ENTRY *l;
55957 +    unsigned long flags;
55958 +    int i;
55959 +
55960 +    ASSERT (alloc->Type == EP_ALLOC_TYPE_SHARED_MAIN);
55961 +
55962 +    KMEM_ZALLOC (l, EP_RAIL_ENTRY *, sizeof (EP_RAIL_ENTRY), 1);
55963 +
55964 +    if (l == NULL)
55965 +       return (ENOMEM);
55966 +
55967 +    l->Rail = rail;
55968 +
55969 +    spin_lock_irqsave (&alloc->Lock, flags);
55970 +    for (i = 0; i < NHASH; i++)
55971 +    {
55972 +       list_for_each (el, &alloc->HashBase[i]) {
55973 +           EP_POOL *pool = list_entry (el, EP_POOL, HashBase);
55974 +
55975 +           ep_perrail_kaddr_map (rail, pool->Handle.nmh_nmd.nmd_addr, pool->Buffer.Ptr, 
55976 +                                 pool->Handle.nmh_nmd.nmd_len, EP_PERM_WRITE, EP_NO_SLEEP);
55977 +
55978 +           pool->Handle.nmh_nmd.nmd_attr |= EP_NMD_ATTR (0, 1 << rail->Number);
55979 +       }
55980 +    }
55981 +
55982 +    list_add (&l->Link, &alloc->Data.Shared.Rails);
55983 +
55984 +    spin_unlock_irqrestore (&alloc->Lock, flags); 
55985 +    return (0);
55986 +}
55987 +
55988 +static void
55989 +RemoveRail (EP_ALLOC *alloc, EP_RAIL *rail)
55990 +{
55991 +    struct list_head *el;
55992 +    unsigned long flags;
55993 +    int i;
55994 +
55995 +    spin_lock_irqsave (&alloc->Lock, flags);
55996 +    for (i = 0; i < NHASH; i++)
55997 +    {
55998 +       list_for_each (el, &alloc->HashBase[i]) {
55999 +           EP_POOL *pool = list_entry (el, EP_POOL, HashBase);
56000 +
56001 +           ep_perrail_unmap (rail, pool->Handle.nmh_nmd.nmd_addr, pool->Handle.nmh_nmd.nmd_len);
56002 +
56003 +           pool->Handle.nmh_nmd.nmd_attr &= ~EP_NMD_ATTR (0, 1 << rail->Number);
56004 +       }
56005 +    }
56006 +
56007 +    list_for_each (el, &alloc->Data.Shared.Rails) {
56008 +       EP_RAIL_ENTRY *tmp = list_entry (el, EP_RAIL_ENTRY, Link);
56009 +       if (tmp->Rail == rail)
56010 +       {
56011 +           list_del (el);
56012 +           KMEM_FREE(tmp, sizeof (EP_RAIL_ENTRY));
56013 +           break;
56014 +       }
56015 +    }
56016 +
56017 +    spin_unlock_irqrestore (&alloc->Lock, flags);
56018 +}
56019 +
56020 +static EP_POOL *
56021 +AllocateBlock (EP_ALLOC *alloc, unsigned size, EP_ATTRIBUTE attr, int *offset)
56022 +{
56023 +    int block, j, k;
56024 +    unsigned long flags;
56025 +    EP_POOL *pool;
56026 +
56027 +
56028 +    if (size > MAX_SIZE)
56029 +    {
56030 +       if ((attr & EP_NO_ALLOC) || (pool  = AllocatePool (alloc, 0, size, alloc->Perm, attr)) == NULL)
56031 +           return (NULL);
56032 +
56033 +       spin_lock_irqsave (&alloc->Lock, flags);
56034 +       HashInPool (alloc, pool);
56035 +       spin_unlock_irqrestore (&alloc->Lock, flags);
56036 +
56037 +       *offset = 0;
56038 +
56039 +       return pool;
56040 +    }
56041 +
56042 +    spin_lock_irqsave (&alloc->Lock, flags);
56043 +
56044 +    /* Round up size to next power of 2 */
56045 +    for (k = LN2_MIN_SIZE; (1 << k) < size; k++)
56046 +       ;
56047 +    
56048 +    /* k now has ln2 of the size to allocate. */
56049 +    /* find the free list with the smallest block we can use*/
56050 +    for (j = k; j <= LN2_MAX_SIZE && list_empty (&alloc->Freelists[j - LN2_MIN_SIZE]); j++)
56051 +       ;
56052 +    
56053 +    /* j has ln2 of the smallest size block we can use */
56054 +    if (j < LN2_MAX_SIZE)
56055 +    {
56056 +       int nbits = 1 << (LN2_MAX_SIZE-j);
56057 +       
56058 +       pool  = list_entry (alloc->Freelists[j - LN2_MIN_SIZE].next, EP_POOL, Link[j - LN2_MIN_SIZE]);
56059 +       block = (bt_lowbit (pool->Bitmaps[j - LN2_MIN_SIZE], nbits) << j);
56060 +       
56061 +       BT_CLEAR (pool->Bitmaps[j - LN2_MIN_SIZE], block >> j);
56062 +       
56063 +       if (bt_lowbit (pool->Bitmaps[j - LN2_MIN_SIZE], nbits) == -1)
56064 +           list_del (&pool->Link[j - LN2_MIN_SIZE]);
56065 +    }
56066 +    else
56067 +    {
56068 +       spin_unlock_irqrestore (&alloc->Lock, flags);
56069 +       
56070 +       if ((attr & EP_NO_ALLOC) || (pool  = AllocatePool (alloc, 0, MAX_SIZE, alloc->Perm, attr)) == NULL)
56071 +           return (NULL);
56072 +
56073 +       block = 0;
56074 +       j = LN2_MAX_SIZE;
56075 +       
56076 +       spin_lock_irqsave (&alloc->Lock, flags);
56077 +       
56078 +       HashInPool (alloc, pool);
56079 +    }
56080 +    
56081 +    /* Split it until the buddies are the correct size, putting one
56082 +     * buddy back on the free list and continuing to split the other */
56083 +    while (--j >= k)
56084 +    {
56085 +       list_add (&pool->Link[j - LN2_MIN_SIZE], &alloc->Freelists[j - LN2_MIN_SIZE]);
56086 +       
56087 +       BT_SET (pool->Bitmaps[j - LN2_MIN_SIZE], block >> j);
56088 +       
56089 +       block += (1 << j);
56090 +    }
56091 +    spin_unlock_irqrestore (&alloc->Lock, flags);
56092 +
56093 +    *offset = block;
56094 +
56095 +    return (pool);
56096 +}
56097 +
56098 +static void
56099 +FreeBlock (EP_ALLOC *alloc, EP_ADDR addr, unsigned size)
56100 +{
56101 +    EP_POOL *pool;
56102 +    int  k, block = 0;
56103 +    unsigned long flags;
56104 +    
56105 +    spin_lock_irqsave (&alloc->Lock, flags);
56106 +    /* Round up size to next power of 2 */
56107 +    for (k = LN2_MIN_SIZE; (1 << k) < size; k++)
56108 +       ;
56109 +
56110 +    /* Find the pool containing this block */
56111 +    pool = LookupPool (alloc, addr);
56112 +
56113 +    /* It must exist */
56114 +    ASSERT (pool != NULL);
56115 +
56116 +    /* If we're freeing a subset of it, then update the bitmaps */
56117 +    if (size <= MAX_SIZE)
56118 +    {
56119 +       ASSERT (BT_TEST (pool->Bitmaps[k - LN2_MIN_SIZE], (addr - pool->Handle.nmh_nmd.nmd_addr) >> k) == 0);
56120 +       
56121 +       block = addr - pool->Handle.nmh_nmd.nmd_addr;
56122 +       
56123 +       while (k < LN2_MAX_SIZE && BT_TEST (pool->Bitmaps[k - LN2_MIN_SIZE], (block >> k) ^ 1))
56124 +       {
56125 +           BT_CLEAR (pool->Bitmaps[k - LN2_MIN_SIZE], (block >> k) ^ 1);
56126 +           
56127 +           if (bt_lowbit (pool->Bitmaps[k - LN2_MIN_SIZE], (1 << (LN2_MAX_SIZE - k))) == -1)
56128 +               list_del (&pool->Link[k - LN2_MIN_SIZE]);
56129 +           
56130 +           k++;
56131 +       }
56132 +    }
56133 +
56134 +    if (k >= LN2_MAX_SIZE)
56135 +    {
56136 +       HashOutPool (alloc, pool);
56137 +       spin_unlock_irqrestore (&alloc->Lock, flags);
56138 +
56139 +       FreePool (alloc, pool);
56140 +    }
56141 +    else
56142 +    {
56143 +       if (bt_lowbit (pool->Bitmaps[k - LN2_MIN_SIZE], (1 << (LN2_MAX_SIZE - k))) == -1)
56144 +           list_add (&pool->Link[k - LN2_MIN_SIZE], &alloc->Freelists[k - LN2_MIN_SIZE]);
56145 +
56146 +       BT_SET (pool->Bitmaps[k - LN2_MIN_SIZE], block >> k);
56147 +
56148 +       spin_unlock_irqrestore (&alloc->Lock, flags);
56149 +    }
56150 +}
56151 +
56152 +static void
56153 +InitialiseAllocator (EP_ALLOC *alloc, EP_ALLOC_TYPE type, unsigned int perm, EP_RMAP *rmap)
56154 +{
56155 +    int i;
56156 +
56157 +    spin_lock_init (&alloc->Lock);
56158 +
56159 +    alloc->Type        = type;
56160 +    alloc->ResourceMap = rmap;
56161 +    alloc->Perm        = perm;
56162 +
56163 +    for (i = 0; i < NHASH; i++)
56164 +    {
56165 +       (&alloc->HashBase[i])->next = &alloc->HashBase[i];
56166 +
56167 +       INIT_LIST_HEAD (&alloc->HashBase[i]);
56168 +       INIT_LIST_HEAD (&alloc->HashTop[i]);
56169 +    }
56170 +    
56171 +    for (i = 0; i < NUM_FREELISTS; i++)
56172 +       INIT_LIST_HEAD (&alloc->Freelists[i]);
56173 +}
56174 +
56175 +static void
56176 +DestroyAllocator (EP_ALLOC *alloc)
56177 +{
56178 +    struct list_head *el, *next;
56179 +    int i;
56180 +
56181 +    for (i = 0; i < NHASH; i++)
56182 +    {
56183 +       list_for_each_safe (el, next, &alloc->HashBase[i]) { 
56184 +           EP_POOL *pool = list_entry (el, EP_POOL, HashBase);
56185 +
56186 +           printk ("!!DestroyAllocator: pool=%p type=%d addr=%x len=%x\n", pool, alloc->Type,
56187 +                   pool->Handle.nmh_nmd.nmd_addr, pool->Handle.nmh_nmd.nmd_len);
56188 +
56189 +           list_del (&pool->HashBase);
56190 +           list_del (&pool->HashTop);
56191 +
56192 +           // XXXX: FreePool (alloc, pool);
56193 +       }
56194 +    }
56195 +
56196 +    spin_lock_destroy (&alloc->Lock);
56197 +}
56198 +
56199 +void
56200 +ep_display_alloc (EP_ALLOC *alloc)
56201 +{
56202 +    struct list_head *el;
56203 +    int i;
56204 +    int npools = 0;
56205 +    int nbytes = 0;
56206 +    int nfree = 0;
56207 +    unsigned long flags;
56208 +
56209 +    spin_lock_irqsave (&alloc->Lock, flags);
56210 +
56211 +    ep_debugf (DBG_DEBUG, "Kernel comms memory allocator %p type %d\n", alloc, alloc->Type);
56212 +    for (i = 0; i < NHASH; i++)
56213 +    {
56214 +       list_for_each (el, &alloc->HashBase[i]) {
56215 +           EP_POOL *pool = list_entry (el, EP_POOL, HashBase);
56216 +
56217 +           ep_debugf (DBG_DEBUG, "  POOL %4x: %p -> %x.%x\n", i, pool, pool->Handle.nmh_nmd.nmd_addr,
56218 +                      pool->Handle.nmh_nmd.nmd_addr + pool->Handle.nmh_nmd.nmd_len);
56219 +
56220 +           npools++;
56221 +           nbytes += pool->Handle.nmh_nmd.nmd_len;
56222 +       }
56223 +    }
56224 +    
56225 +    for (i = LN2_MIN_SIZE; i <= LN2_MAX_SIZE; i++)
56226 +    {
56227 +       int n = 0;
56228 +
56229 +       list_for_each (el, &alloc->Freelists[i - LN2_MIN_SIZE]) {
56230 +           EP_POOL *pool  = list_entry (el, EP_POOL, Link[i - LN2_MIN_SIZE]);
56231 +           int      nbits = bt_nbits (pool->Bitmaps[i - LN2_MIN_SIZE], 1 << (LN2_MAX_SIZE - i));
56232 +
56233 +           n += nbits;
56234 +           nfree += (nbits << i);
56235 +       }
56236 +       
56237 +       if (n != 0)
56238 +           ep_debugf (DBG_DEBUG, "  SIZE %5d : num %d\n", (1 << i), n);
56239 +    }
56240 +    ep_debugf (DBG_DEBUG, "%d pools with %d bytes and %d bytes free\n", npools, nbytes, nfree);
56241 +
56242 +    spin_unlock_irqrestore (&alloc->Lock, flags);
56243 +}
56244 +
56245 +/* per-rail allocators */
56246 +void
56247 +ep_alloc_init (EP_RAIL *rail)
56248 +{
56249 +    EP_RMAP *rmap = ep_rmallocmap (EP_PRIVATE_RMAP_SIZE, "PrivateMap", 1);
56250 +
56251 +    ep_rmfree (rmap, EP_PRIVATE_TOP-EP_PRIVATE_BASE, EP_PRIVATE_BASE);
56252 +
56253 +    InitialiseAllocator (&rail->ElanAllocator, EP_ALLOC_TYPE_PRIVATE_SDRAM, EP_PERM_ALL, rmap);
56254 +    InitialiseAllocator (&rail->MainAllocator, EP_ALLOC_TYPE_PRIVATE_MAIN, EP_PERM_WRITE, rmap);
56255 +
56256 +    rail->ElanAllocator.Data.Private.Rail = rail;
56257 +    rail->MainAllocator.Data.Private.Rail = rail;
56258 +}
56259 +
56260 +void
56261 +ep_alloc_fini (EP_RAIL *rail)
56262 +{
56263 +    EP_RMAP *rmap = rail->ElanAllocator.ResourceMap;
56264 +
56265 +    DestroyAllocator (&rail->ElanAllocator);
56266 +    DestroyAllocator (&rail->MainAllocator);
56267 +    
56268 +    ep_rmfreemap (rmap);
56269 +}
56270 +
56271 +sdramaddr_t
56272 +ep_alloc_memory_elan (EP_RAIL *rail, EP_ADDR addr, unsigned size, unsigned int perm, EP_ATTRIBUTE attr)
56273 +{
56274 +    EP_POOL *pool = AllocatePool (&rail->ElanAllocator, addr, size, perm, attr);
56275 +    unsigned long flags;
56276 +
56277 +    if (pool == NULL)
56278 +       return (0);
56279 +
56280 +    spin_lock_irqsave (&rail->ElanAllocator.Lock, flags);
56281 +    HashInPool (&rail->ElanAllocator, pool);
56282 +    spin_unlock_irqrestore (&rail->ElanAllocator.Lock, flags);
56283 +
56284 +    return (pool->Buffer.Sdram);
56285 +}
56286 +
56287 +void
56288 +ep_free_memory_elan (EP_RAIL *rail, EP_ADDR addr)
56289 +{
56290 +    EP_POOL *pool;
56291 +    unsigned long flags;
56292 +
56293 +    spin_lock_irqsave (&rail->ElanAllocator.Lock, flags);
56294 +    pool = LookupPool (&rail->ElanAllocator, addr);
56295 +    
56296 +    HashOutPool (&rail->ElanAllocator, pool);
56297 +    spin_unlock_irqrestore (&rail->ElanAllocator.Lock, flags);
56298 +    
56299 +    FreePool (&rail->ElanAllocator, pool);
56300 +}
56301 +
56302 +sdramaddr_t
56303 +ep_alloc_elan (EP_RAIL *rail, unsigned size, EP_ATTRIBUTE attr, EP_ADDR *addrp)
56304 +{
56305 +    int             offset;
56306 +    EP_POOL *pool;
56307 +
56308 +    if ((pool = AllocateBlock (&rail->ElanAllocator, size, attr, &offset)) == NULL)
56309 +       return (0);
56310 +    
56311 +    *addrp  = pool->Handle.nmh_nmd.nmd_addr + offset;
56312 +
56313 +    return (pool->Buffer.Sdram + offset);
56314 +}
56315 +
56316 +void
56317 +ep_free_elan (EP_RAIL *rail, EP_ADDR addr, unsigned size)
56318 +{
56319 +    FreeBlock (&rail->ElanAllocator, addr, size);
56320 +}
56321 +
56322 +void *
56323 +ep_alloc_main (EP_RAIL *rail, unsigned size, EP_ATTRIBUTE attr, EP_ADDR *addrp)
56324 +{
56325 +    int             offset;
56326 +    EP_POOL *pool;
56327 +
56328 +    if ((pool = AllocateBlock (&rail->MainAllocator, size, attr, &offset)) == NULL)
56329 +       return (NULL);
56330 +    
56331 +    *addrp  = pool->Handle.nmh_nmd.nmd_addr + offset;
56332 +
56333 +    return ((void *) ((unsigned long) pool->Buffer.Ptr + offset));
56334 +}
56335 +
56336 +void
56337 +ep_free_main (EP_RAIL *rail, EP_ADDR addr, unsigned size)
56338 +{
56339 +    FreeBlock (&rail->MainAllocator, addr, size);
56340 +}
56341 +
56342 +sdramaddr_t
56343 +ep_elan2sdram (EP_RAIL *rail, EP_ADDR addr)
56344 +{
56345 +    EP_POOL    *pool;
56346 +    sdramaddr_t res;
56347 +    unsigned long flags;
56348 +
56349 +    spin_lock_irqsave (&rail->ElanAllocator.Lock, flags);
56350 +    if ((pool = LookupPool (&rail->ElanAllocator, addr)) == NULL)
56351 +       res = 0;
56352 +    else
56353 +       res = pool->Buffer.Sdram + (addr - pool->Handle.nmh_nmd.nmd_addr);
56354 +    spin_unlock_irqrestore (&rail->ElanAllocator.Lock, flags);
56355 +
56356 +    return (res);
56357 +}
56358 +
56359 +void *
56360 +ep_elan2main (EP_RAIL *rail, EP_ADDR addr)
56361 +{
56362 +    EP_POOL *pool;
56363 +    void *res;
56364 +    unsigned long flags;
56365 +
56366 +    spin_lock_irqsave (&rail->MainAllocator.Lock, flags);
56367 +    if ((pool = LookupPool (&rail->MainAllocator, addr)) == NULL)
56368 +       res = NULL;
56369 +    else
56370 +       res = (void *) ((unsigned long) pool->Buffer.Ptr + (addr - pool->Handle.nmh_nmd.nmd_addr));
56371 +    spin_unlock_irqrestore (&rail->MainAllocator.Lock, flags);
56372 +
56373 +    return (res);
56374 +}
56375 +
56376 +/* shared allocators */
56377 +int
56378 +ep_shared_alloc_add_rail (EP_SYS *sys, EP_RAIL *rail)
56379 +{
56380 +    return (AddRail (&sys->Allocator, rail));
56381 +}
56382 +
56383 +void
56384 +ep_shared_alloc_remove_rail (EP_SYS *sys, EP_RAIL *rail)
56385 +{
56386 +    RemoveRail (&sys->Allocator, rail);
56387 +}
56388 +
56389 +void
56390 +ep_shared_alloc_init (EP_SYS *sys)
56391 +{
56392 +    EP_RMAP *rmap = ep_rmallocmap (EP_SHARED_RMAP_SIZE, "shared_alloc_map", 1);
56393 +
56394 +    ep_rmfree (rmap, EP_SHARED_TOP - EP_SHARED_BASE, EP_SHARED_BASE);
56395 +
56396 +    InitialiseAllocator (&sys->Allocator, EP_ALLOC_TYPE_SHARED_MAIN, EP_PERM_WRITE, rmap);
56397 +
56398 +    INIT_LIST_HEAD (&sys->Allocator.Data.Shared.Rails);
56399 +
56400 +    sys->Allocator.Data.Shared.System = sys;
56401 +}
56402 +
56403 +void
56404 +ep_shared_alloc_fini (EP_SYS *sys)
56405 +{
56406 +    EP_RMAP *rmap = sys->Allocator.ResourceMap;
56407 +
56408 +    DestroyAllocator (&sys->Allocator);
56409 +
56410 +    ep_rmfreemap (rmap);
56411 +}
56412 +
56413 +void *
56414 +ep_shared_alloc_main (EP_SYS *sys, unsigned size, EP_ATTRIBUTE attr, EP_NMD *nmd)
56415 +{
56416 +    int offset;
56417 +    EP_POOL *pool;
56418 +
56419 +    if ((pool = AllocateBlock (&sys->Allocator, size, attr, &offset)) == NULL)
56420 +       return (NULL);
56421 +
56422 +    ep_nmd_subset (nmd, &pool->Handle.nmh_nmd, offset, size);
56423 +
56424 +    return ((void *) ((unsigned long) pool->Buffer.Ptr + offset));
56425 +}
56426 +
56427 +void
56428 +ep_shared_free_main (EP_SYS *sys, EP_NMD *nmd)
56429 +{
56430 +    FreeBlock (&sys->Allocator, nmd->nmd_addr, nmd->nmd_len);
56431 +}
56432 +
56433 +/*
56434 + * Local variables:
56435 + * c-file-style: "stroustrup"
56436 + * End:
56437 + */
56438 Index: linux-2.4.21/drivers/net/qsnet/ep/kcomm.c
56439 ===================================================================
56440 --- linux-2.4.21.orig/drivers/net/qsnet/ep/kcomm.c      2004-02-23 16:02:56.000000000 -0500
56441 +++ linux-2.4.21/drivers/net/qsnet/ep/kcomm.c   2005-06-01 23:12:54.664429224 -0400
56442 @@ -0,0 +1,1448 @@
56443 +/*
56444 + *    Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd.
56445 + *    Copyright (c) 2002-2003 by Quadrics Ltd.
56446 + *
56447 + *    For licensing information please see the supplied COPYING file
56448 + *
56449 + */
56450 +
56451 +#ident "@(#)$Id: kcomm.c,v 1.50.2.9 2004/12/09 10:02:42 david Exp $"
56452 +/*      $Source: /cvs/master/quadrics/epmod/kcomm.c,v $ */
56453 +
56454 +#include <qsnet/kernel.h>
56455 +#include <qsnet/kthread.h>
56456 +
56457 +#include <elan/kcomm.h>
56458 +#include <elan/epsvc.h>
56459 +#include <elan/epcomms.h>
56460 +
56461 +#include "cm.h"
56462 +#include "debug.h"
56463 +
56464 +int MaxSwitchLevels = 5;                               /* Max 1024 sized machine */
56465 +
56466 +static char *NodeStateNames[EP_NODE_NUM_STATES] = 
56467 +{
56468 +    "Disconnected",
56469 +    "Connecting",
56470 +    "Connnected",
56471 +    "LeavingConnected",
56472 +    "LocalPassivate",
56473 +    "RemotePassivate",
56474 +    "Passivated",
56475 +    "Disconnecting",
56476 +};
56477 +
56478 +static void
56479 +ep_xid_cache_fill (EP_SYS *sys, EP_XID_CACHE *cache)
56480 +{
56481 +    unsigned long flags;
56482 +
56483 +    spin_lock_irqsave (&sys->XidLock, flags);
56484 +
56485 +    cache->Current = sys->XidNext;
56486 +    cache->Last    = cache->Current + EP_XID_CACHE_CHUNKS-1;
56487 +
56488 +    sys->XidNext += EP_XID_CACHE_CHUNKS;
56489 +
56490 +    spin_unlock_irqrestore (&sys->XidLock, flags);
56491 +}
56492 +
56493 +EP_XID
56494 +ep_xid_cache_alloc (EP_SYS *sys, EP_XID_CACHE *cache)
56495 +{
56496 +    EP_XID xid;
56497 +    
56498 +    if (cache->Current == cache->Last)
56499 +       ep_xid_cache_fill (sys, cache);
56500 +
56501 +    xid.Generation = sys->XidGeneration;
56502 +    xid.Handle     = cache->Handle;
56503 +    xid.Unique     = cache->Current++;
56504 +
56505 +    return (xid);
56506 +}
56507 +
56508 +void
56509 +ep_xid_cache_init (EP_SYS *sys, EP_XID_CACHE *cache)
56510 +{
56511 +    /* Stall manager thread - it doesn't lock the XidCacheList */
56512 +    ep_kthread_stall (&sys->ManagerThread);
56513 +
56514 +    cache->Handle = ++sys->XidHandle;
56515 +
56516 +    list_add_tail (&cache->Link, &sys->XidCacheList);
56517 +
56518 +    ep_kthread_resume (&sys->ManagerThread);
56519 +}
56520 +
56521 +void
56522 +ep_xid_cache_destroy (EP_SYS *sys, EP_XID_CACHE *cache)
56523 +{
56524 +    /* Stall manager thread - it doesn't lock the XidCacheList */
56525 +    ep_kthread_stall (&sys->ManagerThread);
56526 +
56527 +    list_del (&cache->Link);
56528 +
56529 +    ep_kthread_resume (&sys->ManagerThread);
56530 +}
56531 +
56532 +EP_XID_CACHE *
56533 +ep_xid_cache_find (EP_SYS *sys, EP_XID xid)
56534 +{
56535 +    struct list_head *el;
56536 +
56537 +    list_for_each (el, &sys->XidCacheList) {
56538 +       EP_XID_CACHE *cache = list_entry (el, EP_XID_CACHE, Link);
56539 +
56540 +       if (sys->XidGeneration == xid.Generation && cache->Handle == xid.Handle)
56541 +           return (cache);
56542 +    }
56543 +
56544 +    return (NULL);
56545 +}
56546 +
56547 +static int
56548 +MsgBusy (EP_RAIL *rail, EP_OUTPUTQ *outputq, int slotNum)
56549 +{
56550 +    switch (rail->Operations.OutputQState (rail, outputq, slotNum))
56551 +    {
56552 +    case EP_OUTPUTQ_BUSY:                      /* still busy */
56553 +       return 1;
56554 +       
56555 +    case EP_OUTPUTQ_FAILED:                    /* NACKed */
56556 +    {
56557 +#if defined(DEBUG_PRINTF)
56558 +       EP_MANAGER_MSG *msg = rail->Operations.OutputQMsg (rail, outputq, slotNum);
56559 +
56560 +       EPRINTF4 (DBG_MANAGER, "%s: kcomm msg %d type %d to %d failed\n", rail->Name, slotNum, msg->Hdr.Type, msg->Hdr.DestId);
56561 +#endif
56562 +       break;
56563 +    }
56564 +    
56565 +    case EP_OUTPUTQ_FINISHED:                  /* anything else is finished */
56566 +       break;
56567 +    }
56568 +
56569 +    return 0;
56570 +}
56571 +
56572 +int
56573 +ep_send_message (EP_RAIL *rail, int nodeId, int type, EP_XID xid, EP_MANAGER_MSG_BODY *body)
56574 +{
56575 +    EP_SYS         *sys  = rail->System;
56576 +    EP_NODE        *node = &sys->Nodes[nodeId];
56577 +    int             n    = EP_MANAGER_OUTPUTQ_SLOTS;
56578 +    int             slotNum;
56579 +    int             rnum;
56580 +    EP_RAIL        *msgRail;
56581 +    EP_MANAGER_MSG *msg;
56582 +    unsigned long   flags;
56583 +
56584 +    ASSERT (! EP_XID_INVALID (xid));
56585 +
56586 +    if ((rnum = ep_pickRail (node->ConnectedRails)) >= 0)
56587 +       msgRail = sys->Rails[rnum];
56588 +    else
56589 +    {
56590 +       if (EP_MANAGER_MSG_TYPE_CONNECTED(type))
56591 +       {
56592 +           ep_debugf (DBG_MANAGER, "%s: no rails available, trying to send type %d to %d\n", rail->Name, type, nodeId);
56593 +           return -EHOSTDOWN;
56594 +       }
56595 +
56596 +       ep_debugf (DBG_MANAGER, "%s: no rails connected to %d - using receiving rail\n", rail->Name, nodeId);
56597 +
56598 +       msgRail = rail;
56599 +    }
56600 +    
56601 +
56602 +    spin_lock_irqsave (&msgRail->ManagerOutputQLock, flags);
56603 +
56604 +    slotNum = msgRail->ManagerOutputQNextSlot;
56605 +
56606 +    while (n-- > 0 && MsgBusy (msgRail, msgRail->ManagerOutputQ, slotNum))             /* search for idle message buffer */
56607 +    {
56608 +       if (++(msgRail->ManagerOutputQNextSlot) == EP_MANAGER_OUTPUTQ_SLOTS)
56609 +           msgRail->ManagerOutputQNextSlot = 0;
56610 +      
56611 +       slotNum = msgRail->ManagerOutputQNextSlot;
56612 +    }
56613 +
56614 +    if (n == 0)                                                        /* all message buffers busy */
56615 +    {
56616 +       spin_unlock_irqrestore (&msgRail->ManagerOutputQLock, flags);
56617 +
56618 +       ep_debugf (DBG_MANAGER, "%s: all message buffers busy: trying to send type %d to %d\n", msgRail->Name, type, nodeId);
56619 +       return -EBUSY;
56620 +    }
56621 +
56622 +    msg = msgRail->Operations.OutputQMsg (msgRail, msgRail->ManagerOutputQ, slotNum);
56623 +    
56624 +    EPRINTF7 (DBG_MANAGER, "%s: ep_send_message: type=%d nodeId=%d rail=%d xid=%08x.%08x.%016llx\n", 
56625 +             msgRail->Name, type, nodeId, rail->Number, xid.Generation, xid.Handle, (long long) xid.Unique);
56626 +
56627 +    msg->Hdr.Version    = EP_MANAGER_MSG_VERSION;
56628 +    msg->Hdr.Type       = type;
56629 +    msg->Hdr.Rail       = rail->Number;
56630 +    msg->Hdr.NodeId     = msgRail->Position.pos_nodeid;
56631 +    msg->Hdr.DestId     = nodeId;
56632 +    msg->Hdr.Xid        = xid;
56633 +    msg->Hdr.Checksum   = 0;
56634 +
56635 +    if (body) bcopy (body, &msg->Body, sizeof (EP_MANAGER_MSG_BODY));
56636 +
56637 +    msg->Hdr.Checksum = CheckSum ((char *) msg, EP_MANAGER_MSG_SIZE);
56638 +
56639 +    if (rail->Operations.OutputQSend (msgRail, msgRail->ManagerOutputQ, slotNum, EP_MANAGER_MSG_SIZE,
56640 +                                     nodeId, EP_SYSTEMQ_MANAGER, EP_MANAGER_OUTPUTQ_RETRIES) < 0)
56641 +       IncrStat (msgRail, SendMessageFailed);
56642 +    
56643 +    if (++(msgRail->ManagerOutputQNextSlot) == EP_MANAGER_OUTPUTQ_SLOTS) /* check this one last next time */
56644 +       msgRail->ManagerOutputQNextSlot = 0;
56645 +
56646 +    spin_unlock_irqrestore (&msgRail->ManagerOutputQLock, flags);
56647 +
56648 +    return 0;
56649 +}
56650 +
56651 +void
56652 +ep_panic_node (EP_SYS *sys, int nodeId, unsigned char *reason)
56653 +{
56654 +    EP_NODE            *node = &sys->Nodes[nodeId];
56655 +    EP_MANAGER_MSG_BODY body;
56656 +    EP_XID              xid;
56657 +    kcondvar_t          sleep;
56658 +    int                 rnum;
56659 +    unsigned long       flags;
56660 +
56661 +    if (nodeId > sys->Position.pos_nodes)
56662 +       return;
56663 +
56664 +    strncpy (body.PanicReason, reason, sizeof (body.PanicReason));
56665 +
56666 +    kcondvar_init (&sleep);
56667 +    spin_lock_irqsave (&sys->NodeLock, flags);
56668 +    for (;;)
56669 +    {
56670 +       if (node->ConnectedRails == 0)
56671 +           break;
56672 +
56673 +       for (rnum = 0; rnum < EP_MAX_RAILS; rnum++)
56674 +           if (node->ConnectedRails & (1 << rnum))
56675 +               break;
56676 +
56677 +       xid = ep_xid_cache_alloc(sys, &sys->Rails[rnum]->XidCache);
56678 +       
56679 +       if (ep_send_message (sys->Rails[rnum], nodeId, EP_MANAGER_MSG_TYPE_REMOTE_PANIC, xid, &body) == 0)
56680 +           break;
56681 +
56682 +       if (kcondvar_timedwaitsig (&sleep, &sys->NodeLock, &flags, lbolt + hz) == CV_RET_SIGPENDING)
56683 +           break;
56684 +    }
56685 +    spin_unlock_irqrestore (&sys->NodeLock, flags);
56686 +    kcondvar_destroy (&sleep);
56687 +}
56688 +
56689 +static void
56690 +ProcessNeterrRequest (EP_RAIL *msgRail, EP_RAIL *rail, EP_MANAGER_MSG *msg)
56691 +{
56692 +    EPRINTF4 (DBG_NETWORK_ERROR, "%s: process neterr request - node %d cookies %llx %llx\n", rail->Name, msg->Hdr.NodeId, msg->Body.Cookies[0], msg->Body.Cookies[1]);
56693 +
56694 +    rail->Operations.NeterrFixup (rail, msg->Hdr.NodeId, msg->Body.Cookies);
56695 +    
56696 +    ep_send_message (rail, msg->Hdr.NodeId, EP_MANAGER_MSG_TYPE_NETERR_RESPONSE, msg->Hdr.Xid, &msg->Body);
56697 +}
56698 +
56699 +
56700 +static void
56701 +ProcessNeterrResponse (EP_RAIL *msgRail, EP_RAIL *rail, EP_MANAGER_MSG *msg)
56702 +{
56703 +    EP_SYS       *sys      = rail->System;
56704 +    EP_NODE_RAIL *nodeRail = &rail->Nodes[msg->Hdr.NodeId];
56705 +    unsigned long flags;
56706 +
56707 +    EPRINTF4 (DBG_NETWORK_ERROR, "%s: process neterr response - node %d cookies %llx %llx\n", rail->Name, msg->Hdr.NodeId, msg->Body.Cookies[0], msg->Body.Cookies[1]);
56708 +
56709 +    spin_lock_irqsave (&sys->NodeLock, flags);
56710 +    if (EP_XIDS_MATCH (nodeRail->MsgXid, msg->Hdr.Xid))
56711 +    {
56712 +       EP_INVALIDATE_XID (nodeRail->MsgXid);
56713 +
56714 +       if (nodeRail->NetworkErrorCookies[0] != 0 && nodeRail->NetworkErrorCookies[0] == msg->Body.Cookies[0])
56715 +           nodeRail->NetworkErrorCookies[0] = 0;
56716 +
56717 +       if (nodeRail->NetworkErrorCookies[1] != 0 && nodeRail->NetworkErrorCookies[1] == msg->Body.Cookies[1])
56718 +           nodeRail->NetworkErrorCookies[1] = 0;
56719 +       
56720 +       if (nodeRail->NetworkErrorCookies[0] == 0 && nodeRail->NetworkErrorCookies[1] == 0)
56721 +           nodeRail->NetworkErrorState &= ~EP_NODE_NETERR_ATOMIC_PACKET;
56722 +    }
56723 +    spin_unlock_irqrestore (&sys->NodeLock, flags);
56724 +}
56725 +
56726 +
56727 +static void
56728 +ProcessGetNodeState (EP_RAIL *msgRail, EP_RAIL *rail, EP_MANAGER_MSG *msg)
56729 +{
56730 +    EP_NODE_RAIL *nodeRail = &rail->Nodes[msg->Hdr.NodeId];
56731 +    unsigned int service = msg->Body.Service;
56732 +
56733 +    EPRINTF5 (DBG_MANAGER, "%s: ProcessGetNodeState: %s - %d %s%s\n",  msgRail->Name, rail->Name, msg->Hdr.NodeId,
56734 +             NodeStateNames[nodeRail->State], nodeRail->NetworkErrorState ? " (NetworkError)" : "");
56735 +
56736 +    msg->Body.NodeState.State             = nodeRail->State;
56737 +    msg->Body.NodeState.NetworkErrorState = nodeRail->NetworkErrorState;
56738 +    msg->Body.NodeState.Railmask          = ep_rcvr_railmask (rail->System, service);
56739 +
56740 +    if (ep_send_message (rail, msg->Hdr.NodeId, EP_MANAGER_MSG_TYPE_GET_NODE_STATE_RESPONSE, msg->Hdr.Xid, &msg->Body) < 0)
56741 +       printk ("%s: get node state for %s[%d] - failed to send response\n", msgRail->Name, rail->Name, msg->Hdr.NodeId);
56742 +}
56743 +
56744 +static void
56745 +ProcessFlushRequest (EP_RAIL *msgRail, EP_RAIL *rail, EP_MANAGER_MSG *msg)
56746 +{
56747 +    EP_NODE_RAIL *nodeRail = &rail->Nodes[msg->Hdr.NodeId];
56748 +
56749 +    EPRINTF5 (DBG_MANAGER, "%s: ProcessFlushRequest: %s - %d %s%s\n",  msgRail->Name, rail->Name, msg->Hdr.NodeId,
56750 +             NodeStateNames[nodeRail->State], nodeRail->NetworkErrorState ? " (NetworkError)" : "");
56751 +
56752 +    switch (nodeRail->State)
56753 +    {
56754 +    case EP_NODE_REMOTE_PASSIVATE:
56755 +       nodeRail->NextRunTime = lbolt + MSGBUSY_RETRY_TIME;     /* retransmit our flush request quickly */
56756 +       EPRINTF3 (DBG_MANAGER, "%s: ProcessFlushRequest: NextRunTime -> %lx (%lx)\n", rail->Name, nodeRail->NextRunTime, lbolt);
56757 +       /* DROPTHROUGH */
56758 +
56759 +    case EP_NODE_PASSIVATED:
56760 +    case EP_NODE_DISCONNECTED:
56761 +       if (nodeRail->NetworkErrorState != 0)
56762 +           break;
56763 +
56764 +       if (ep_send_message (rail, msg->Hdr.NodeId, EP_MANAGER_MSG_TYPE_FLUSH_RESPONSE, msg->Hdr.Xid, NULL) < 0)
56765 +           printk ("%s: flush request for %s[%d] - failed to send response\n", msgRail->Name, rail->Name, msg->Hdr.NodeId);
56766 +       break;
56767 +       
56768 +    default:
56769 +       EPRINTF4 (DBG_MANAGER, "%s: flush request for %s[%d] - node not in approriate state - %s\n", msgRail->Name, rail->Name, msg->Hdr.NodeId, NodeStateNames[nodeRail->State]);
56770 +       break;
56771 +    }
56772 +}
56773 +
56774 +static void
56775 +ProcessFlushResponse (EP_RAIL *msgRail, EP_RAIL *rail, EP_MANAGER_MSG *msg)
56776 +{
56777 +    EP_NODE_RAIL *nodeRail= &rail->Nodes[msg->Hdr.NodeId];
56778 +
56779 +    EPRINTF5 (DBG_MANAGER, "%s: ProcessFlushResponse: %s - %d %s%s\n",  msgRail->Name, rail->Name, msg->Hdr.NodeId,
56780 +             NodeStateNames[nodeRail->State], EP_XIDS_MATCH (nodeRail->MsgXid, msg->Hdr.Xid) ? " (XIDS match)" : "");
56781 +
56782 +    if (nodeRail->State == EP_NODE_REMOTE_PASSIVATE && EP_XIDS_MATCH(nodeRail->MsgXid, msg->Hdr.Xid))
56783 +    {
56784 +       EP_INVALIDATE_XID (nodeRail->MsgXid);
56785 +
56786 +       printk ("%s: flush response from %d - move to passivated list\n", rail->Name, msg->Hdr.NodeId);
56787 +       list_del (&nodeRail->Link);
56788 +
56789 +       /* Node is now passivated - attempt to failover  messages */
56790 +       list_add_tail (&nodeRail->Link, &rail->PassivatedList);
56791 +       nodeRail->State = EP_NODE_PASSIVATED;
56792 +    }
56793 +    else
56794 +    {
56795 +       printk ("%s: flush response from %d - not passivating (%s) or XIDs mismatch (%llx %llx)\n", rail->Name, 
56796 +               msg->Hdr.NodeId, NodeStateNames[nodeRail->State], (long long) nodeRail->MsgXid.Unique, (long long) msg->Hdr.Xid.Unique);
56797 +    }
56798 +}
56799 +
56800 +static void
56801 +ProcessMapNmdRequest (EP_RAIL *msgRail, EP_RAIL *rail, EP_MANAGER_MSG *msg)
56802 +{
56803 +    EP_SYS          *sys     = rail->System;
56804 +    EP_MAP_NMD_BODY *msgBody = &msg->Body.MapNmd;
56805 +    int              i;
56806 +
56807 +    EPRINTF4 (DBG_MANAGER, "%s: Map NMD request from %d for %d NMDs to railmask %x\n", rail->Name, msg->Hdr.NodeId, msgBody->nFrags, msgBody->Railmask);
56808 +    
56809 +    for (i = 0; i < msgBody->nFrags; i++)
56810 +       ep_nmd_map_rails (sys, &msgBody->Nmd[i], msgBody->Railmask);
56811 +    
56812 +    /* Must flush TLBs before responding */
56813 +    for (i = 0; i < EP_MAX_RAILS; i++)
56814 +       if (sys->Rails[i] && sys->Rails[i]->TlbFlushRequired)
56815 +           ep_perrail_dvma_sync (sys->Rails[i]);
56816 +
56817 +    if (ep_send_message (rail, msg->Hdr.NodeId, EP_MANAGER_MSG_TYPE_MAP_NMD_RESPONSE, msg->Hdr.Xid, &msg->Body) < 0)
56818 +       printk ("%s: map nmd request for %s[%d] - failed to send response\n", msgRail->Name, rail->Name, msg->Hdr.NodeId);
56819 +}
56820 +
56821 +static void
56822 +ProcessXidMessage (EP_RAIL *msgRail, EP_MANAGER_MSG *msg, EP_XID xid)
56823 +{
56824 +    EP_XID_CACHE *xidCache = ep_xid_cache_find (msgRail->System, xid);
56825 +
56826 +    EPRINTF6 (DBG_MANAGER, "%s: ProcessXidMessage: XID=%08x.%0x8.%016llx -> %p(%p)\n",
56827 +             msgRail->Name, xid.Generation, xid.Handle, (long long) xid.Unique,
56828 +             xidCache  ? xidCache->MessageHandler : 0, xidCache  ? xidCache->Arg : 0);
56829 +    
56830 +    if (xidCache != NULL)
56831 +       xidCache->MessageHandler (xidCache->Arg, msg);
56832 +}
56833 +
56834 +static void
56835 +ProcessMessage (EP_RAIL *msgRail, void *arg, void *msgbuf)
56836 +{
56837 +    EP_SYS         *sys  = msgRail->System;
56838 +    EP_MANAGER_MSG *msg  = (EP_MANAGER_MSG *) msgbuf;
56839 +    uint16_t        csum = msg->Hdr.Checksum;
56840 +    EP_RAIL        *rail;
56841 +
56842 +    if (msg->Hdr.Version != EP_MANAGER_MSG_VERSION)
56843 +       return;
56844 +
56845 +    msg->Hdr.Checksum= 0;
56846 +    if (CheckSum ((char *) msg, EP_MANAGER_MSG_SIZE) != csum)
56847 +    {
56848 +       printk ("%s: checksum failed on msg from %d (%d) (%x != %x) ?\n", msgRail->Name, msg->Hdr.NodeId, msg->Hdr.Type, csum, CheckSum ((char *) msg, EP_MANAGER_MSG_SIZE));
56849 +       return;
56850 +    }
56851 +
56852 +    if ((rail = sys->Rails[msg->Hdr.Rail]) == NULL)
56853 +    {
56854 +       printk ("%s: rail no longer exists for msg from %d?\n", msgRail->Name, msg->Hdr.NodeId);
56855 +       return;
56856 +    }
56857 +
56858 +    EPRINTF7 (DBG_MANAGER, "%s: ProcessMessage (%s) type=%d node=%d XID=%08x.%0x8.%016llx\n", 
56859 +             msgRail->Name, rail->Name, msg->Hdr.Type, msg->Hdr.NodeId,
56860 +             msg->Hdr.Xid.Generation, msg->Hdr.Xid.Handle, msg->Hdr.Xid.Unique);
56861 +
56862 +    switch (msg->Hdr.Type)
56863 +    {
56864 +    case EP_MANAGER_MSG_TYPE_REMOTE_PANIC:
56865 +       msg->Body.PanicReason[EP_PANIC_STRLEN] = '\0';          /* ensure string terminated */
56866 +
56867 +       printk ("%s: remote panic call from elan node %d - %s\n", msgRail->Name, msg->Hdr.NodeId, msg->Body.PanicReason);
56868 +       panic ("ep: remote panic request\n");
56869 +       break;
56870 +
56871 +    case EP_MANAGER_MSG_TYPE_NETERR_REQUEST:
56872 +       ProcessNeterrRequest (msgRail, rail, msg);
56873 +       break;
56874 +
56875 +    case EP_MANAGER_MSG_TYPE_NETERR_RESPONSE:
56876 +       ProcessNeterrResponse (msgRail, rail, msg);
56877 +       break;
56878 +
56879 +    case EP_MANAGER_MSG_TYPE_FLUSH_REQUEST:
56880 +       ProcessFlushRequest (msgRail, rail, msg);
56881 +       break;
56882 +
56883 +    case EP_MANAGER_MSG_TYPE_FLUSH_RESPONSE:
56884 +       ProcessFlushResponse (msgRail, rail, msg);
56885 +       break;
56886 +
56887 +    case EP_MANAGER_MSG_TYPE_MAP_NMD_REQUEST:
56888 +       ProcessMapNmdRequest (msgRail, rail, msg);
56889 +       break;
56890 +
56891 +    case EP_MANAGER_MSG_TYPE_MAP_NMD_RESPONSE:
56892 +       ProcessXidMessage (msgRail, msg, msg->Hdr.Xid);
56893 +       break;
56894 +
56895 +    case EP_MANAGER_MSG_TYPE_FAILOVER_REQUEST:
56896 +       ProcessXidMessage (msgRail, msg, msg->Body.Failover.Xid);
56897 +       break;
56898 +
56899 +    case EP_MANAGER_MSG_TYPE_FAILOVER_RESPONSE:
56900 +       ProcessXidMessage (msgRail, msg, msg->Hdr.Xid);
56901 +       break;
56902 +       
56903 +    case EP_MANAGER_MSG_TYPE_GET_NODE_STATE:
56904 +       ProcessGetNodeState (msgRail, rail, msg);
56905 +       break;
56906 +
56907 +    case EP_MANAGER_MSG_TYPE_GET_NODE_STATE_RESPONSE: 
56908 +       ProcessXidMessage (msgRail, msg, msg->Hdr.Xid);
56909 +       break;
56910 +
56911 +    default:
56912 +       printk ("%s: Unknown message type %d from %d\n", msgRail->Name, msg->Hdr.Type, msg->Hdr.NodeId);
56913 +       break;
56914 +    }
56915 +}
56916 +
56917 +
56918 +static void
56919 +ManagerQueueEvent (EP_RAIL *rail, void *arg)
56920 +{
56921 +    ep_kthread_schedule ((EP_KTHREAD *) arg, lbolt);
56922 +}
56923 +
56924 +void
56925 +UpdateConnectionState (EP_RAIL *rail, statemap_t *map)
56926 +{
56927 +    EP_SYS *sys = rail->System;
56928 +    bitmap_t seg;
56929 +    int offset, nodeId;
56930 +    unsigned long flags;
56931 +    
56932 +    while ((offset = statemap_findchange (map, &seg, 1)) >= 0)
56933 +    {
56934 +       for (nodeId = offset; nodeId < (offset + BT_NBIPUL) && nodeId < rail->Position.pos_nodes; nodeId++)
56935 +       {
56936 +           EP_NODE      *node     = &sys->Nodes[nodeId];
56937 +           EP_NODE_RAIL *nodeRail = &rail->Nodes[nodeId];
56938 +
56939 +           if (statemap_getbits (map, nodeId, 1))
56940 +           {
56941 +               spin_lock_irqsave (&sys->NodeLock, flags);
56942 +
56943 +               switch (nodeRail->State)
56944 +               {
56945 +               case EP_NODE_DISCONNECTED:
56946 +                   EPRINTF2 (DBG_MANAGER, "%s: Node %d -> Disconnected \n", rail->Name, nodeId);
56947 +                   break;
56948 +
56949 +               case EP_NODE_CONNECTING:
56950 +                   EPRINTF2 (DBG_MANAGER, "%s: Node %d -> Connect\n", rail->Name, nodeId);
56951 +                   
56952 +                   /* load the route table entry *before*  setting the state
56953 +                    * to connected, since DMA's can be initiated as soon as
56954 +                    * the node is marked as connected */
56955 +                   rail->Operations.LoadNodeRoute (rail, nodeId);
56956 +                   
56957 +                   nodeRail->State = EP_NODE_CONNECTED;
56958 +                   
56959 +                   statemap_setbits (rail->NodeSet, nodeId, 1, 1);
56960 +                   if (statemap_getbits (sys->NodeSet, nodeId, 1) == 0)
56961 +                       statemap_setbits (sys->NodeSet, nodeId, 1, 1);
56962 +
56963 +                   /* Add to rails connected to this node */
56964 +                   node->ConnectedRails |= (1 << rail->Number);
56965 +
56966 +                   /* Finally lower the per-node context filter */
56967 +                   rail->Operations.LowerFilter (rail, nodeId);
56968 +                   break;
56969 +                   
56970 +               case EP_NODE_LEAVING_CONNECTED:
56971 +                   EPRINTF2 (DBG_MANAGER, "%s: Node %d -> Local Passivate\n", rail->Name, nodeId);
56972 +                   
56973 +                   /* Raise the per-node context filter */
56974 +                   rail->Operations.RaiseFilter (rail, nodeId);
56975 +
56976 +                   /* If it's resolving network errors it will be on the NodeNeterrList,
56977 +                    * remove if from this list before placing it on the LocalPassivateList
56978 +                    * as we'll resolve the network error later in RemotePassivate */
56979 +                   if (nodeRail->NetworkErrorState)
56980 +                       list_del (&nodeRail->Link);
56981 +
56982 +                   list_add_tail (&nodeRail->Link, &rail->LocalPassivateList);
56983 +                   nodeRail->State = EP_NODE_LOCAL_PASSIVATE;
56984 +
56985 +                   /* Remove from rails connected to this node */
56986 +                   node->ConnectedRails &= ~(1 << rail->Number);
56987 +                   break;
56988 +
56989 +               default:
56990 +                   printk ("%s: Node %d - in NodeChangeMap with state %d\n", rail->Name, nodeId, nodeRail->State);
56991 +                   panic ("Node in NodeChangeMap with invalid state\n");
56992 +                   break;
56993 +               }
56994 +               spin_unlock_irqrestore (&sys->NodeLock, flags);
56995 +           }
56996 +       }
56997 +    }
56998 +}
56999 +
57000 +void
57001 +ProgressNetworkError (EP_RAIL *rail, EP_NODE_RAIL *nodeRail)
57002 +{
57003 +    EP_SYS             *sys    = rail->System;
57004 +    int                 nodeId = nodeRail - rail->Nodes;
57005 +    EP_MANAGER_MSG_BODY msg;
57006 +
57007 +    ASSERT (nodeRail->State >= EP_NODE_CONNECTED && nodeRail->State <= EP_NODE_REMOTE_PASSIVATE);
57008 +
57009 +    if (BEFORE (lbolt, nodeRail->NextRunTime))
57010 +       return;
57011 +
57012 +    if (nodeRail->NetworkErrorState & EP_NODE_NETERR_DMA_PACKET)
57013 +       nodeRail->NetworkErrorState &= ~EP_NODE_NETERR_DMA_PACKET;
57014 +    
57015 +    if (nodeRail->NetworkErrorState & EP_NODE_NETERR_ATOMIC_PACKET)
57016 +    {
57017 +       if (EP_XID_INVALID (nodeRail->MsgXid))
57018 +           nodeRail->MsgXid = ep_xid_cache_alloc (sys, &rail->XidCache);
57019 +
57020 +       msg.Cookies[0] = nodeRail->NetworkErrorCookies[0];
57021 +       msg.Cookies[1] = nodeRail->NetworkErrorCookies[1];
57022 +       
57023 +       EPRINTF4 (DBG_NETWORK_ERROR, "%s: progress neterr - node %d cookies %llx %llx\n", rail->Name, nodeId, msg.Cookies[0], msg.Cookies[1]);
57024 +
57025 +       if (ep_send_message (rail, nodeId, EP_MANAGER_MSG_TYPE_NETERR_REQUEST, nodeRail->MsgXid, &msg) == 0)
57026 +           nodeRail->NextRunTime = lbolt + MESSAGE_RETRY_TIME;
57027 +       else
57028 +           nodeRail->NextRunTime = lbolt + MSGBUSY_RETRY_TIME;
57029 +    }
57030 +}
57031 +
57032 +long
57033 +ProgressNodeLists (EP_RAIL *rail, long nextRunTime)
57034 +{
57035 +    EP_SYS           *sys = rail->System;
57036 +    struct list_head *el, *nel;
57037 +    unsigned long flags;
57038 +
57039 +    spin_lock_irqsave (&sys->NodeLock, flags);
57040 +    list_for_each_safe (el, nel, &rail->NetworkErrorList) {
57041 +       EP_NODE_RAIL *nodeRail = list_entry (el, EP_NODE_RAIL, Link);
57042 +       int           nodeId   = nodeRail - rail->Nodes;
57043 +
57044 +       ProgressNetworkError (rail, nodeRail);
57045 +
57046 +       if (nodeRail->NetworkErrorState == 0)
57047 +       {
57048 +           EPRINTF2 (DBG_NETWORK_ERROR, "%s: lower context filter for node %d due to network error\n", rail->Name, nodeId);
57049 +           printk ("%s: lower context filter for node %d due to network error\n", rail->Name, nodeId);
57050 +
57051 +           rail->Operations.LowerFilter (rail, nodeId);
57052 +
57053 +           list_del (&nodeRail->Link);
57054 +           continue;
57055 +       }
57056 +       
57057 +       if (nextRunTime == 0 || AFTER (nextRunTime, nodeRail->NextRunTime))
57058 +           nextRunTime = nodeRail->NextRunTime;
57059 +    }
57060 +    spin_unlock_irqrestore (&sys->NodeLock, flags);
57061 +
57062 +    if (! list_empty (&rail->LocalPassivateList))
57063 +    {
57064 +       EPRINTF1 (DBG_MANAGER, "%s: Locally Passivating Nodes\n", rail->Name);
57065 +       
57066 +       /* We have disconnected from some nodes or have left ourselves
57067 +        * flush through all communications and determine whether we
57068 +        * need to perform rail failover */
57069 +       rail->Operations.FlushFilters (rail);
57070 +       
57071 +       ep_call_callbacks (rail, EP_CB_FLUSH_FILTERING, rail->NodeSet);
57072 +
57073 +       rail->Operations.FlushQueues (rail);
57074 +
57075 +       ep_call_callbacks (rail, EP_CB_FLUSH_FLUSHING, rail->NodeSet);
57076 +
57077 +       while (! list_empty (&rail->LocalPassivateList))
57078 +       {
57079 +           EP_NODE_RAIL *nodeRail = list_entry (rail->LocalPassivateList.next, EP_NODE_RAIL, Link);
57080 +           int           nodeId   = nodeRail - rail->Nodes;
57081 +
57082 +           list_del (&nodeRail->Link);
57083 +
57084 +           rail->Operations.UnloadNodeRoute (rail, nodeId);
57085 +           
57086 +           if (nodeRail->NetworkErrorState == 0 && nodeRail->MessageState == 0)
57087 +           {
57088 +               EPRINTF2 (DBG_MANAGER, "%s: Node %d -> Disconnecting\n", rail->Name, nodeId);
57089 +
57090 +               list_add_tail (&nodeRail->Link, &rail->DisconnectingList);
57091 +               nodeRail->State = EP_NODE_DISCONNECTING;
57092 +           }
57093 +           else
57094 +           {
57095 +               EPRINTF2 (DBG_MANAGER, "%s: Node %d -> Remote Passivate\n", rail->Name, nodeId);
57096 +
57097 +               list_add_tail (&nodeRail->Link, &rail->RemotePassivateList);
57098 +               nodeRail->State = EP_NODE_REMOTE_PASSIVATE;
57099 +
57100 +               if (nodeRail->NetworkErrorState == 0)
57101 +                   nodeRail->NextRunTime = lbolt;
57102 +           }
57103 +       }
57104 +
57105 +       ep_call_callbacks (rail, EP_CB_PASSIVATED, rail->NodeSet);
57106 +    }
57107 +
57108 +    list_for_each_safe (el, nel, &rail->RemotePassivateList) {
57109 +       EP_NODE_RAIL *nodeRail = list_entry (el, EP_NODE_RAIL, Link);
57110 +       int           nodeId   = nodeRail - rail->Nodes;
57111 +       EP_NODE      *node     = &sys->Nodes[nodeId];
57112 +
57113 +       if (node->ConnectedRails == 0)                          /* no rails connected to this node (anymore) */
57114 +       {
57115 +           /*  Remove from this list */
57116 +           list_del (&nodeRail->Link);
57117 +
57118 +           EPRINTF2 (DBG_MANAGER, "%s: Node %d, no rails, Remote Passivate -> Disconnecting\n", rail->Name, nodeId);
57119 +
57120 +           /* transition towards disconnected */
57121 +           list_add_tail (&nodeRail->Link, &rail->DisconnectingList);
57122 +           nodeRail->State = EP_NODE_DISCONNECTING;
57123 +           continue;
57124 +       }
57125 +
57126 +       EPRINTF6 (DBG_MANAGER, "%s: Node %d - %s NetworkErrorState=%x NextRunTime=%lx (%lx)\n",
57127 +                 rail->Name, nodeId, NodeStateNames[nodeRail->State], nodeRail->NetworkErrorState,
57128 +                 nodeRail->NextRunTime, nextRunTime);
57129 +
57130 +       if (nodeRail->NetworkErrorState)
57131 +       {
57132 +           ProgressNetworkError (rail, nodeRail);
57133 +       }
57134 +       else if (! BEFORE (lbolt, nodeRail->NextRunTime))
57135 +       {
57136 +           if (EP_XID_INVALID (nodeRail->MsgXid))
57137 +               nodeRail->MsgXid = ep_xid_cache_alloc (sys, &rail->XidCache);
57138 +
57139 +           if (ep_send_message (rail, nodeId, EP_MANAGER_MSG_TYPE_FLUSH_REQUEST, nodeRail->MsgXid, NULL) == 0)
57140 +               nodeRail->NextRunTime = lbolt + MESSAGE_RETRY_TIME;
57141 +           else
57142 +               nodeRail->NextRunTime = lbolt + MSGBUSY_RETRY_TIME;
57143 +       }
57144 +
57145 +       if (nextRunTime == 0 || AFTER (nextRunTime, nodeRail->NextRunTime))
57146 +           nextRunTime = nodeRail->NextRunTime;
57147 +    }
57148 +    
57149 +    if (! list_empty (&rail->PassivatedList)) 
57150 +    {
57151 +       ep_call_callbacks (rail, EP_CB_FAILOVER, rail->NodeSet);
57152 +
57153 +       list_for_each_safe (el, nel, &rail->PassivatedList) {
57154 +           EP_NODE_RAIL *nodeRail = list_entry (rail->PassivatedList.next, EP_NODE_RAIL, Link);
57155 +           int           nodeId   = nodeRail - rail->Nodes;
57156 +           EP_NODE      *node     = &sys->Nodes[nodeId];
57157 +
57158 +           ASSERT (nodeRail->NetworkErrorState == 0);
57159 +
57160 +           if (node->ConnectedRails == 0)
57161 +           {
57162 +               /*  Remove from this list */
57163 +               list_del (&nodeRail->Link);
57164 +
57165 +               EPRINTF2 (DBG_MANAGER, "%s: Node %d, no rails, Passivated -> Disconnecting\n", rail->Name, nodeId);
57166 +
57167 +               /* transition towards disconnected */
57168 +               list_add_tail (&nodeRail->Link, &rail->DisconnectingList);
57169 +               nodeRail->State = EP_NODE_DISCONNECTING;
57170 +               continue;
57171 +           }
57172 +           
57173 +           EPRINTF6 (DBG_MANAGER, "%s: Node %d - %s NetworkErrorState=%x NextRunTime=%lx (%lx)\n",
57174 +                     rail->Name, nodeId, NodeStateNames[nodeRail->State], nodeRail->NetworkErrorState,
57175 +                     nodeRail->NextRunTime, nextRunTime);
57176 +
57177 +           if (nodeRail->MessageState == 0)
57178 +           {
57179 +               EPRINTF2 (DBG_MANAGER, "%s: Node %d, no messages, Passivated -> Disconnecting\n", rail->Name,nodeId);
57180 +
57181 +               list_del (&nodeRail->Link);
57182 +               list_add_tail (&nodeRail->Link, &rail->DisconnectingList);
57183 +               nodeRail->State = EP_NODE_DISCONNECTING;
57184 +               continue;
57185 +           }
57186 +
57187 +           nodeRail->MessageState = 0;
57188 +           nodeRail->NextRunTime  = lbolt + FAILOVER_RETRY_TIME;
57189 +
57190 +           if (nextRunTime == 0 || AFTER (nextRunTime, nodeRail->NextRunTime))
57191 +               nextRunTime = nodeRail->NextRunTime;
57192 +       }
57193 +    }
57194 +
57195 +    if (! list_empty (&rail->DisconnectingList))
57196 +    {
57197 +       ep_call_callbacks (rail, EP_CB_DISCONNECTING, rail->NodeSet);
57198 +
57199 +       while (! list_empty (&rail->DisconnectingList))
57200 +       {
57201 +           EP_NODE_RAIL *nodeRail = list_entry (rail->DisconnectingList.next, EP_NODE_RAIL, Link);
57202 +           int           nodeId   = nodeRail - rail->Nodes;
57203 +           EP_NODE      *node     = &sys->Nodes[nodeId];
57204 +
57205 +           EPRINTF2 (DBG_MANAGER, "%s: Node %d, Disconnecting -> Disconnected\n", rail->Name, nodeId);
57206 +
57207 +           list_del (&nodeRail->Link);
57208 +
57209 +           rail->Operations.NodeDisconnected (rail, nodeId);
57210 +
57211 +           /* Clear the network error state */
57212 +           nodeRail->NextRunTime            = 0;
57213 +           nodeRail->NetworkErrorState      = 0;
57214 +           nodeRail->NetworkErrorCookies[0] = 0;
57215 +           nodeRail->NetworkErrorCookies[1] = 0;
57216 +
57217 +           /* Clear the message state */
57218 +           nodeRail->MessageState = 0;
57219 +
57220 +           cm_node_disconnected (rail, nodeId);
57221 +
57222 +           nodeRail->State = EP_NODE_DISCONNECTED;
57223 +           
57224 +           statemap_setbits (rail->NodeSet, nodeId, 0, 1);
57225 +
57226 +           if (node->ConnectedRails == 0)
57227 +               statemap_setbits (sys->NodeSet, nodeId, 0, 1);
57228 +       }
57229 +
57230 +       ep_call_callbacks (rail, EP_CB_DISCONNECTED, rail->NodeSet);
57231 +    }
57232 +
57233 +    return (nextRunTime);
57234 +}
57235 +
57236 +void
57237 +DisplayNodes (EP_RAIL *rail)
57238 +{
57239 +    EP_SYS *sys = rail->System;
57240 +    int i, state, count;
57241 +    unsigned long flags;
57242 +
57243 +    spin_lock_irqsave (&sys->NodeLock, flags);
57244 +
57245 +    for (state = 0; state < EP_NODE_NUM_STATES; state++)
57246 +    {
57247 +       for (count = i = 0; i < rail->Position.pos_nodes; i++)
57248 +       {
57249 +           ASSERT (rail->Nodes[i].State < EP_NODE_NUM_STATES);
57250 +
57251 +           if (rail->Nodes[i].State == state)
57252 +               if (state != EP_NODE_DISCONNECTED)
57253 +                   printk ("%s %d", !count++ ? NodeStateNames[state] : "", i);
57254 +       }
57255 +       if (count)
57256 +           printk ("%s (%d total)\n", state == EP_NODE_DISCONNECTED ? NodeStateNames[state] : "", count);
57257 +    }
57258 +    spin_unlock_irqrestore (&sys->NodeLock, flags);
57259 +}
57260 +
57261 +static void
57262 +PositionFound (EP_RAIL *rail, ELAN_POSITION *pos)
57263 +{
57264 +    EP_SYS           *sys = rail->System;
57265 +    struct list_head *el;
57266 +    int i;
57267 +
57268 +    /* only called from the ep_managage whilst rail->State == EP_RAIL_STATE_STARTED */
57269 +    ASSERT ( rail->State == EP_RAIL_STATE_STARTED );
57270 +
57271 +#if defined(PER_CPU_TIMEOUT)
57272 +    /*
57273 +     * On Tru64 - if we're running in a "funnelled" thread, then we will be 
57274 +     * unable to start the per-cpu timeouts, so if we return then eventually
57275 +     * the ep_manager() thread will find the network position and we're
57276 +     * in control of our own destiny.
57277 +     */
57278 +    if (THREAD_IS_FUNNELED(current_thread()))
57279 +    {
57280 +       ep_kthread_schedule (&sys->ManagerThread, lbolt);
57281 +       return;
57282 +    }
57283 +#endif
57284 +
57285 +    sprintf (rail->Name, "ep%d[%d]", rail->Number, pos->pos_nodeid);
57286 +
57287 +    if (pos->pos_levels > MaxSwitchLevels)
57288 +    {
57289 +       for (i = 0; i < (pos->pos_levels - MaxSwitchLevels); i++)
57290 +           pos->pos_nodes /= pos->pos_arity[i];
57291 +
57292 +       for (i = 0; i < MaxSwitchLevels; i++)
57293 +           pos->pos_arity[i] = pos->pos_arity[i + (pos->pos_levels - MaxSwitchLevels)];
57294 +
57295 +       pos->pos_levels = MaxSwitchLevels;
57296 +       pos->pos_nodeid = pos->pos_nodeid % pos->pos_nodes;
57297 +                               
57298 +       printk ("%s: limiting switch levels to %d\n", rail->Name, MaxSwitchLevels);
57299 +       printk ("%s: nodeid=%d level=%d numnodes=%d\n", rail->Name, pos->pos_nodeid, pos->pos_levels, pos->pos_nodes);
57300 +
57301 +       sprintf (rail->Name, "ep%d[%d]", rail->Number, pos->pos_nodeid);
57302 +    }
57303 +
57304 +    if (rail->Position.pos_mode != ELAN_POS_UNKNOWN && rail->Position.pos_nodeid != pos->pos_nodeid)
57305 +    {
57306 +       printk ("%s: NodeId has changed from %d to %d\n", rail->Name, rail->Position.pos_nodeid, pos->pos_nodeid);
57307 +       panic ("ep: PositionFound: NodeId has changed\n");
57308 +    }
57309 +
57310 +    if (sys->Position.pos_mode != ELAN_POS_UNKNOWN && (sys->Position.pos_nodeid != pos->pos_nodeid || sys->Position.pos_nodes != pos->pos_nodes))
57311 +    {
57312 +       printk ("%s: position incompatible - disabling rail\n", rail->Name);
57313 +       rail->State = EP_RAIL_STATE_INCOMPATIBLE;
57314 +       return;
57315 +    }
57316 +
57317 +    if (sys->Position.pos_mode == ELAN_POS_UNKNOWN)
57318 +    {
57319 +       sys->Position = *pos;
57320 +       sys->NodeSet  = statemap_create (pos->pos_nodes);
57321 +       KMEM_ZALLOC (sys->Nodes, EP_NODE *, pos->pos_nodes * sizeof (EP_NODE), 1);
57322 +    }
57323 +
57324 +    rail->Position             = *pos;
57325 +    rail->SwitchBroadcastLevel = pos->pos_levels - 1;
57326 +    rail->State                = EP_RAIL_STATE_RUNNING;
57327 +
57328 +    for (i = 0; i < pos->pos_levels; i++)
57329 +    {
57330 +       rail->SwitchProbeTick[i]   = lbolt;
57331 +       rail->SwitchLast[i].uplink = 4;
57332 +    }
57333 +
57334 +    rail->Operations.PositionFound (rail, pos);
57335 +
57336 +    INIT_LIST_HEAD (&rail->NetworkErrorList);
57337 +    INIT_LIST_HEAD (&rail->LocalPassivateList);
57338 +    INIT_LIST_HEAD (&rail->RemotePassivateList);
57339 +    INIT_LIST_HEAD (&rail->PassivatedList);
57340 +    INIT_LIST_HEAD (&rail->DisconnectingList);
57341 +
57342 +    rail->NodeSet       = statemap_create (rail->Position.pos_nodes);
57343 +    rail->NodeChangeMap = statemap_create (rail->Position.pos_nodes);
57344 +    rail->NodeChangeTmp = statemap_create (rail->Position.pos_nodes);
57345 +
57346 +    KMEM_ZALLOC (rail->Nodes, EP_NODE_RAIL *, rail->Position.pos_nodes * sizeof (EP_NODE_RAIL), 1);
57347 +
57348 +    for (i = 0; i < rail->Position.pos_nodes; i++)
57349 +    {
57350 +       spin_lock_init (&rail->Nodes[i].CookieLock);
57351 +
57352 +       INIT_LIST_HEAD (&rail->Nodes[i].StalledDmas);
57353 +
57354 +       rail->Nodes[i].State = EP_NODE_DISCONNECTED;
57355 +    }
57356 +
57357 +    /* Notify all subsystems that a new rail has been enabled */
57358 +    kmutex_lock (&sys->SubsysLock);
57359 +    list_for_each (el, &sys->Subsystems) { 
57360 +       EP_SUBSYS *subsys = list_entry (el, EP_SUBSYS, Link);
57361 +
57362 +       if (subsys->AddRail)
57363 +           subsys->AddRail (subsys, sys, rail);
57364 +
57365 +       /* XXXX: what to do if the subsystem refused to add the rail ? */
57366 +    }
57367 +    kmutex_unlock (&sys->SubsysLock);
57368 +
57369 +    /* Now enable the manager input queue */
57370 +    ep_enable_inputq (rail, rail->ManagerInputQ);
57371 +}
57372 +
57373 +static void
57374 +ep_manager (void *arg)
57375 +{
57376 +    EP_SYS            *sys = (EP_SYS *) arg;
57377 +    struct list_head *el;
57378 +    ELAN_POSITION     pos;
57379 +    unsigned long     flags;
57380 +
57381 +    kernel_thread_init ("ep_manager");
57382 +    kernel_thread_become_highpri();
57383 +
57384 +    for (;;)
57385 +    {
57386 +       long nextRunTime = lbolt + MSEC2TICKS(CM_THREAD_SCHEDULE_TIMEOUT);
57387 +
57388 +       list_for_each (el, &sys->ManagedRails) {
57389 +           EP_RAIL *rail = list_entry (el, EP_RAIL, ManagerLink);
57390 +
57391 +           switch (rail->State)
57392 +           {
57393 +           case EP_RAIL_STATE_STARTED:
57394 +               if (ProbeNetwork (rail, &pos) == 0)
57395 +               {
57396 +                   PositionFound (rail, &pos);
57397 +                   break;
57398 +               }
57399 +
57400 +               if (nextRunTime == 0 || AFTER (nextRunTime, lbolt + HZ))
57401 +                   nextRunTime = lbolt + HZ;
57402 +               break;
57403 +
57404 +           case EP_RAIL_STATE_RUNNING:
57405 +               if (ep_poll_inputq (rail, rail->ManagerInputQ, 100, ProcessMessage, rail) >= 100)
57406 +                   nextRunTime = lbolt;
57407 +               
57408 +               /* Handle any nodes which the cluster membership subsystem
57409 +                * has indicated are to begin connecting or disconnecting */
57410 +               spin_lock_irqsave (&sys->NodeLock, flags);
57411 +               if (! statemap_changed (rail->NodeChangeMap))
57412 +                   spin_unlock_irqrestore (&sys->NodeLock, flags);
57413 +               else
57414 +               {
57415 +                   /*
57416 +                    * Take a copy of the statemap, and zero all entries so
57417 +                    * we only see new requests next time
57418 +                    */
57419 +                   statemap_copy (rail->NodeChangeTmp, rail->NodeChangeMap);
57420 +                   statemap_zero (rail->NodeChangeMap);
57421 +                   spin_unlock_irqrestore (&sys->NodeLock, flags);
57422 +                   
57423 +                   UpdateConnectionState (rail, rail->NodeChangeTmp);
57424 +               }
57425 +
57426 +               nextRunTime = ProgressNodeLists (rail, nextRunTime);
57427 +
57428 +               if (statemap_changed (rail->NodeSet))
57429 +               {
57430 +                   ep_call_callbacks (rail, EP_CB_NODESET, rail->NodeSet);
57431 +
57432 +                   statemap_clearchanges (rail->NodeSet);
57433 +               }
57434 +               break;
57435 +
57436 +           case EP_RAIL_STATE_INCOMPATIBLE:
57437 +               break;
57438 +           }
57439 +       }
57440 +
57441 +
57442 +       EPRINTF5 (DBG_MANAGER, "ep_manager: sleep now=%lx nextRunTime=%lx (%ld) [%lx (%ld)]\n",
57443 +                 lbolt, nextRunTime, nextRunTime ? nextRunTime - lbolt : 0, sys->ManagerThread.next_run,
57444 +                 sys->ManagerThread.next_run ? sys->ManagerThread.next_run - lbolt : 0);
57445 +
57446 +       if (ep_kthread_sleep (&sys->ManagerThread, nextRunTime) < 0)
57447 +           break;
57448 +    }
57449 +
57450 +    ep_kthread_stopped (&sys->ManagerThread);
57451 +    kernel_thread_exit();
57452 +}
57453 +
57454 +void
57455 +ep_connect_node (EP_RAIL *rail, int nodeId)
57456 +{
57457 +    EP_SYS       *sys  = rail->System;
57458 +    EP_NODE_RAIL *node = &rail->Nodes[nodeId];
57459 +    unsigned long flags;
57460 +  
57461 +    spin_lock_irqsave (&sys->NodeLock, flags);
57462 +
57463 +    EPRINTF2 (DBG_MANAGER, "%s: ep_connect_node: nodeId %d\n", rail->Name, nodeId);
57464 +
57465 +    ASSERT (node->State == EP_NODE_DISCONNECTED && statemap_getbits (rail->NodeChangeMap, nodeId, 1) == 0);
57466 +    
57467 +    node->State = EP_NODE_CONNECTING;
57468 +
57469 +    statemap_setbits (rail->NodeChangeMap, nodeId, 1, 1);
57470 +
57471 +    spin_unlock_irqrestore (&sys->NodeLock, flags);
57472 +
57473 +    ep_kthread_schedule (&sys->ManagerThread, lbolt);
57474 +}
57475 +
57476 +int
57477 +ep_disconnect_node (EP_RAIL *rail, int nodeId)
57478 +{
57479 +    EP_SYS       *sys  = rail->System;
57480 +    EP_NODE_RAIL *node = &rail->Nodes[nodeId];
57481 +    int                  state;
57482 +    unsigned long flags;
57483 +  
57484 +    spin_lock_irqsave (&sys->NodeLock, flags);
57485 +    
57486 +    EPRINTF3 (DBG_MANAGER, "%s: ep_disconnect_node: nodeId %d - %s\n", rail->Name, nodeId, NodeStateNames[node->State]);
57487 +
57488 +    switch (state = node->State)
57489 +    {
57490 +    case EP_NODE_CONNECTING:
57491 +       statemap_setbits (rail->NodeChangeMap, nodeId, 0, 1);
57492 +
57493 +       node->State = EP_NODE_DISCONNECTED;
57494 +       break;
57495 +       
57496 +    case EP_NODE_CONNECTED:
57497 +       statemap_setbits (rail->NodeChangeMap, nodeId, 1, 1);
57498 +
57499 +       node->State = EP_NODE_LEAVING_CONNECTED;
57500 +       break;
57501 +
57502 +    case EP_NODE_LEAVING_CONNECTED:
57503 +       /* no assert on NodeChangeMap as the map could have been taken but not acted on */
57504 +       break;
57505 +       
57506 +    default:
57507 +       ASSERT (statemap_getbits (rail->NodeChangeMap, nodeId, 1) == 0);
57508 +       break;
57509 +    }
57510 +    spin_unlock_irqrestore (&sys->NodeLock, flags);
57511 +
57512 +    if (state == EP_NODE_CONNECTED)
57513 +       ep_kthread_schedule (&sys->ManagerThread, lbolt);
57514 +
57515 +    return state;
57516 +}
57517 +
57518 +int
57519 +ep_manager_add_rail (EP_SYS *sys, EP_RAIL *rail)
57520 +{
57521 +    if ((rail->ManagerOutputQ = ep_alloc_outputq (rail, EP_MANAGER_MSG_SIZE, EP_MANAGER_OUTPUTQ_SLOTS)) == NULL)
57522 +       return -ENOMEM;
57523 +
57524 +    if ((rail->ManagerInputQ = ep_alloc_inputq (rail, EP_SYSTEMQ_MANAGER, EP_MANAGER_MSG_SIZE, EP_MANAGER_INPUTQ_SLOTS,
57525 +                                                ManagerQueueEvent, &sys->ManagerThread)) == NULL)
57526 +    {
57527 +       ep_free_outputq (rail, rail->ManagerOutputQ);
57528 +       return -ENOMEM;
57529 +    }
57530 +
57531 +    spin_lock_init (&rail->ManagerOutputQLock);
57532 +
57533 +    ep_xid_cache_init (sys, &rail->XidCache);
57534 +
57535 +    ep_kthread_stall (&sys->ManagerThread);
57536 +    list_add_tail (&rail->ManagerLink, &sys->ManagedRails);
57537 +    ep_kthread_resume (&sys->ManagerThread);
57538 +
57539 +    return (0);
57540 +}
57541 +
57542 +void
57543 +ep_manager_remove_rail (EP_SYS *sys, EP_RAIL *rail)
57544 +{
57545 +    if (rail->ManagerInputQ != NULL)
57546 +    {
57547 +       ep_kthread_stall (&sys->ManagerThread);
57548 +       list_del (&rail->ManagerLink);
57549 +       ep_kthread_resume (&sys->ManagerThread);
57550 +
57551 +       ep_xid_cache_destroy (sys, &rail->XidCache);
57552 +
57553 +       spin_lock_destroy (&rail->ManagerOutputQLock);
57554 +
57555 +       ep_disable_inputq (rail, rail->ManagerInputQ);
57556 +       ep_free_inputq (rail, rail->ManagerInputQ);
57557 +       ep_free_outputq (rail, rail->ManagerOutputQ);
57558 +    }
57559 +}
57560 +
57561 +int
57562 +ep_manager_init (EP_SYS *sys)
57563 +{
57564 +    INIT_LIST_HEAD (&sys->ManagedRails);
57565 +
57566 +    ep_kthread_init (&sys->ManagerThread);
57567 +
57568 +    if (kernel_thread_create (ep_manager, (void *) sys) == 0)
57569 +       return (ENOMEM);
57570 +    
57571 +    ep_kthread_started (&sys->ManagerThread);
57572 +    
57573 +    return (0);
57574 +}
57575 +
57576 +void
57577 +ep_manager_fini (EP_SYS *sys)
57578 +{
57579 +    ep_kthread_stop (&sys->ManagerThread);
57580 +    ep_kthread_destroy (&sys->ManagerThread);
57581 +}
57582 +
57583 +int
57584 +ep_sys_init (EP_SYS *sys)
57585 +{
57586 +    kmutex_init (&sys->SubsysLock);   
57587 +    kmutex_init (&sys->StartStopLock);
57588 +    spin_lock_init (&sys->NodeLock);
57589 +
57590 +    INIT_LIST_HEAD (&sys->Subsystems);
57591 +
57592 +    /* initialise the xid allocators */
57593 +    spin_lock_init (&sys->XidLock);
57594 +    INIT_LIST_HEAD (&sys->XidCacheList);
57595 +
57596 +    /* initially don't know where we are in the network */
57597 +    sys->Position.pos_mode = ELAN_POS_UNKNOWN;
57598 +
57599 +    /* initialise the network mapping descriptor hash tables */
57600 +    ep_nmh_init (&sys->MappingTable);
57601 +
57602 +    /* intialise the shared allocators */
57603 +    ep_shared_alloc_init (sys);
57604 +
57605 +    /* initialise the dvma space */
57606 +    ep_dvma_init (sys);
57607 +
57608 +    /* intiialise the rail manager */
57609 +    ep_manager_init (sys);
57610 +
57611 +    /* initialise all subsystems */
57612 +    cm_init (sys);
57613 +    ep_comms_init (sys);
57614 +    //ep_msgsys_init (sys);
57615 +
57616 +    return (0);
57617 +}
57618 +
57619 +void
57620 +ep_sys_fini (EP_SYS *sys)
57621 +{
57622 +    /* Destroy the subsystems in the reverse order to their creation */
57623 +    while (! list_empty (&sys->Subsystems))
57624 +    {
57625 +       EP_SUBSYS *subsys = list_entry (sys->Subsystems.prev, EP_SUBSYS, Link);
57626 +
57627 +       list_del (&subsys->Link);
57628 +       
57629 +       subsys->Destroy (subsys, sys);
57630 +    }
57631 +
57632 +    ep_manager_fini(sys);
57633 +    ep_dvma_fini (sys);
57634 +    ep_shared_alloc_fini (sys);
57635 +
57636 +    ep_nmh_fini (&sys->MappingTable);
57637 +
57638 +    if (sys->Position.pos_mode != ELAN_POS_UNKNOWN) {
57639 +       statemap_destroy (sys->NodeSet);
57640 +       KMEM_FREE(sys->Nodes, sys->Position.pos_nodes * sizeof (EP_NODE));
57641 +    }
57642 +
57643 +    spin_lock_destroy (&sys->XidLock);
57644 +
57645 +    spin_lock_destroy (&sys->NodeLock);
57646 +    kmutex_destroy (&sys->SubsysLock);
57647 +    kmutex_destroy (&sys->StartStopLock);
57648 +}
57649 +
57650 +void
57651 +ep_shutdown (EP_SYS *sys)
57652 +{
57653 +    sys->Shutdown = 1;
57654 +}
57655 +
57656 +int
57657 +ep_init_rail (EP_SYS *sys, EP_RAIL *rail)
57658 +{
57659 +    static int rnum;
57660 +
57661 +    rail->System              = sys;
57662 +    rail->State               = EP_RAIL_STATE_UNINITIALISED;
57663 +    rail->Number              = rnum++;
57664 +    rail->Position.pos_mode   = ELAN_POS_UNKNOWN;
57665 +    rail->Position.pos_nodeid = ELAN_INVALID_NODE;
57666 +
57667 +    rail->CallbackRegistered  = 0;
57668 +
57669 +    sprintf (rail->Name, "ep%d", rail->Number);
57670 +
57671 +    /* Initialise externally visible locks */
57672 +    kmutex_init (&rail->CallbackLock);
57673 +
57674 +    ep_alloc_init (rail);
57675 +
57676 +    sys->Rails[rail->Number] = rail;
57677 +
57678 +    return 0;
57679 +}
57680 +
57681 +void
57682 +ep_destroy_rail (EP_RAIL *rail)
57683 +{
57684 +    ASSERT (rail->State == EP_RAIL_STATE_UNINITIALISED);
57685 +
57686 +    ep_alloc_fini (rail);
57687 +
57688 +    kmutex_destroy (&rail->CallbackLock);
57689 +
57690 +    rail->System->Rails[rail->Number] = NULL;
57691 +
57692 +    rail->Operations.DestroyRail (rail);
57693 +}
57694 +
57695 +/* We need to traverse the Subsystems lists backwards
57696 + * but it's not defined in <linux/list.h> */
57697 +#define list_for_each_backwards(pos,list) \
57698 +       for (pos = (list)->prev; pos != (list); \
57699 +            pos = (pos)->prev)
57700 +
57701 +void
57702 +__ep_stop_rail (EP_RAIL *rail)
57703 +{
57704 +    /* called holding the sys->Lock */
57705 +    EP_SYS           *sys = rail->System;
57706 +    struct list_head *el;
57707 +
57708 +    rail->Operations.StallRail (rail);
57709 +
57710 +    /* Notify all subsystems that this rail is being stopped */
57711 +    if (rail->State == EP_RAIL_STATE_RUNNING)
57712 +    {
57713 +       kmutex_lock (&sys->SubsysLock);
57714 +       list_for_each_backwards (el, &sys->Subsystems) { 
57715 +           EP_SUBSYS *subsys = list_entry (el, EP_SUBSYS, Link);
57716 +           
57717 +           if (subsys->RemoveRail)
57718 +               subsys->RemoveRail (subsys, sys, rail);
57719 +       }
57720 +       kmutex_unlock (&sys->SubsysLock);
57721 +
57722 +       ep_manager_remove_rail (sys, rail);
57723 +
57724 +       KMEM_FREE (rail->Nodes, rail->Position.pos_nodes * sizeof (EP_NODE_RAIL));
57725 +
57726 +       statemap_destroy (rail->NodeChangeTmp);
57727 +       statemap_destroy (rail->NodeChangeMap);
57728 +       statemap_destroy (rail->NodeSet);
57729 +    }
57730 +
57731 +    ep_dvma_remove_rail (sys, rail);
57732 +    ep_shared_alloc_remove_rail (sys, rail);
57733 +
57734 +    rail->Operations.StopRail (rail);
57735 +
57736 +    rail->State = EP_RAIL_STATE_UNINITIALISED;
57737 +}
57738 +
57739 +void
57740 +ep_stop_rail (EP_RAIL *rail)
57741 +{
57742 +    EP_SYS *sys = rail->System;
57743 +
57744 +    /* stall ep_manager                      */
57745 +    /* and remove the rail from the manaager */
57746 +
57747 +    ep_kthread_stall (&sys->ManagerThread);
57748 +    if ( rail->State == EP_RAIL_STATE_STARTED ) 
57749 +        ep_manager_remove_rail (sys, rail);
57750 +    ep_kthread_resume (&sys->ManagerThread);
57751 +
57752 +    __ep_stop_rail (rail);
57753 +}
57754 +
57755 +int
57756 +ep_start_rail (EP_RAIL *rail)
57757 +{
57758 +    EP_SYS *sys = rail->System;
57759 +
57760 +    ASSERT (rail->State == EP_RAIL_STATE_UNINITIALISED);
57761 +
57762 +    if (rail->Operations.StartRail (rail) < 0)
57763 +       return -ENXIO;
57764 +    
57765 +    kmutex_lock (&sys->StartStopLock);
57766 +    /* Add this rail to the shared allocator */
57767 +    if (ep_shared_alloc_add_rail (rail->System, rail))
57768 +       goto failed;
57769 +
57770 +    /* Add this rail to dvma kmap */
57771 +    if (ep_dvma_add_rail (rail->System, rail))
57772 +       goto failed;
57773 +
57774 +    /* rail is now started */
57775 +    rail->State = EP_RAIL_STATE_STARTED;
57776 +
57777 +    /* notify the rail manager of the new rail */
57778 +    if (ep_manager_add_rail (rail->System, rail))
57779 +       goto failed;
57780 +
57781 +    kmutex_unlock (&sys->StartStopLock);
57782 +    return (ESUCCESS);
57783 +
57784 + failed:
57785 +    printk ("%s: start failed\n", rail->Name);
57786 +    kmutex_unlock (&sys->StartStopLock);
57787 +    __ep_stop_rail (rail);
57788 +
57789 +    return (ENOMEM);   
57790 +}
57791 +
57792 +void
57793 +ep_subsys_add (EP_SYS *sys, EP_SUBSYS *subsys)
57794 +{
57795 +    kmutex_lock (&sys->SubsysLock);
57796 +    list_add_tail (&subsys->Link, &sys->Subsystems);
57797 +    kmutex_unlock (&sys->SubsysLock);
57798 +}
57799 +
57800 +void
57801 +ep_subsys_del (EP_SYS *sys, EP_SUBSYS *subsys)
57802 +{
57803 +    kmutex_lock (&sys->SubsysLock);
57804 +    list_del (&subsys->Link);
57805 +    kmutex_unlock (&sys->SubsysLock);
57806 +}
57807 +
57808 +EP_SUBSYS *
57809 +ep_subsys_find (EP_SYS *sys, char *name)
57810 +{
57811 +    struct list_head *el;
57812 +
57813 +    ASSERT ( !in_interrupt());
57814 +
57815 +    kmutex_lock (&sys->SubsysLock); 
57816 +    list_for_each (el, &sys->Subsystems) {
57817 +       EP_SUBSYS *subsys = list_entry (el, EP_SUBSYS, Link);
57818 +
57819 +       if (! strcmp (subsys->Name, name))
57820 +       {
57821 +           kmutex_unlock (&sys->SubsysLock);
57822 +           return (subsys);
57823 +       }
57824 +    }
57825 +
57826 +    kmutex_unlock (&sys->SubsysLock);
57827 +    return (NULL);
57828 +}
57829 +
57830 +int
57831 +ep_waitfor_nodeid (EP_SYS *sys)
57832 +{
57833 +    int i, printed = 0;
57834 +    kcondvar_t Sleep;
57835 +    spinlock_t Lock;
57836 +
57837 +    kcondvar_init (&Sleep);
57838 +    spin_lock_init (&Lock);
57839 +
57840 +#define TICKS_TO_WAIT  (10*hz)
57841 +#define TICKS_PER_LOOP (hz/10)
57842 +    for (i = 0; sys->Position.pos_mode == ELAN_POS_UNKNOWN && i < TICKS_TO_WAIT; i += TICKS_PER_LOOP)
57843 +    {
57844 +       if (! printed++)
57845 +           printk ("ep: waiting for network position to be found\n");
57846 +
57847 +       spin_lock (&Lock);
57848 +       kcondvar_timedwait (&Sleep, &Lock, NULL, lbolt + TICKS_PER_LOOP);
57849 +       spin_unlock (&Lock);
57850 +    }
57851 +
57852 +    if (sys->Position.pos_mode == ELAN_POS_UNKNOWN)
57853 +       printk ("ep: network position not found after waiting\n");
57854 +    else if (printed)
57855 +       printk ("ep: network position found at nodeid %d\n", sys->Position.pos_nodeid);
57856 +
57857 +    spin_lock_destroy (&Lock);
57858 +    kcondvar_destroy (&Sleep);
57859 +
57860 +    return (sys->Position.pos_mode == ELAN_POS_UNKNOWN ? ELAN_INVALID_NODE : sys->Position.pos_nodeid);
57861 +}
57862 +
57863 +int
57864 +ep_nodeid (EP_SYS *sys)
57865 +{
57866 +    return (sys->Position.pos_mode == ELAN_POS_UNKNOWN ? ELAN_INVALID_NODE : sys->Position.pos_nodeid);
57867 +}
57868 +
57869 +int
57870 +ep_numnodes (EP_SYS *sys)
57871 +{
57872 +    return (sys->Position.pos_nodes);
57873 +}
57874 +
57875 +void
57876 +ep_fillout_stats(EP_RAIL *r, char *str) 
57877 +{
57878 +    sprintf(str+strlen(str),"SendMessageFailed %lu NeterrAtomicPacket %lu NeterrDmaPacket %lu \n", r->Stats.SendMessageFailed, r->Stats.NeterrAtomicPacket, r->Stats.NeterrDmaPacket);
57879 +    sprintf(str+strlen(str),"Rx %lu  %lu /sec\n",   GET_STAT_TOTAL(r->Stats,rx), GET_STAT_PER_SEC(r->Stats,rx) ); 
57880 +    sprintf(str+strlen(str),"MBytes %lu  %lu MB/sec\n", GET_STAT_TOTAL(r->Stats,rx_len)/ (1024*1024), GET_STAT_PER_SEC(r->Stats,rx_len) / (1024*1024)); 
57881 +    sprintf(str+strlen(str),"Tx %lu  %lu /sec\n",   GET_STAT_TOTAL(r->Stats,tx), GET_STAT_PER_SEC(r->Stats,tx) ); 
57882 +    sprintf(str+strlen(str),"MBytes %lu  %lu MB/sec\n", GET_STAT_TOTAL(r->Stats,tx_len)/ (1024*1024), GET_STAT_PER_SEC(r->Stats,tx_len) / (1024*1024)); 
57883 +}
57884 +
57885 +
57886 +/*
57887 + * Local variables:
57888 + * c-file-style: "stroustrup"
57889 + * End:
57890 + */
57891 Index: linux-2.4.21/drivers/net/qsnet/ep/kcomm_elan3.c
57892 ===================================================================
57893 --- linux-2.4.21.orig/drivers/net/qsnet/ep/kcomm_elan3.c        2004-02-23 16:02:56.000000000 -0500
57894 +++ linux-2.4.21/drivers/net/qsnet/ep/kcomm_elan3.c     2005-06-01 23:12:54.665429072 -0400
57895 @@ -0,0 +1,504 @@
57896 +
57897 +/*
57898 + *    Copyright (c) 2003 by Quadrics Ltd.
57899 + *    Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd.
57900 + *
57901 + *    For licensing information please see the supplied COPYING file
57902 + *
57903 + */
57904 +
57905 +#ident "@(#)$Id: kcomm_elan3.c,v 1.31.8.3 2004/11/30 12:02:17 mike Exp $"
57906 +/*      $Source: /cvs/master/quadrics/epmod/kcomm_elan3.c,v $ */
57907 +
57908 +#include <qsnet/kernel.h>
57909 +
57910 +#include <elan/kcomm.h>
57911 +
57912 +#include "kcomm_vp.h"
57913 +#include "kcomm_elan3.h"
57914 +#include "conf_linux.h"
57915 +
57916 +extern EP_CODE threadcode_elan3;
57917 +
57918 +unsigned int
57919 +ep3_create_rails (EP_SYS *sys, unsigned int disabled)
57920 +{
57921 +    unsigned int rmask = 0;
57922 +    ELAN3_DEV   *dev;
57923 +    EP_RAIL     *rail;
57924 +    int          i;
57925 +
57926 +    for (i = 0; i < EP_MAX_RAILS; i++)
57927 +    {
57928 +       if ((dev = elan3_device (i)) != NULL)
57929 +       {
57930 +           if ((rail = ep3_create_rail (sys, dev)) != NULL)
57931 +           {
57932 +               if (disabled & (1 << rail->Number))
57933 +                   printk ("%s: auto-start of device disabled by configuration\n", rail->Name);
57934 +               else
57935 +                   ep_start_rail (rail);
57936 +               
57937 +               ep_procfs_rail_init(rail);
57938 +
57939 +               rmask |= (1 << rail->Number);
57940 +           }
57941 +       }
57942 +    }
57943 +
57944 +    return rmask;
57945 +}
57946 +
57947 +EP_RAIL *
57948 +ep3_create_rail (EP_SYS *sys, ELAN3_DEV *dev)
57949 +{
57950 +    EP3_RAIL *rail;
57951 +    int       res;
57952 +
57953 +    KMEM_ZALLOC (rail, EP3_RAIL *, sizeof (EP3_RAIL), TRUE);
57954 +
57955 +    if (rail == NULL)
57956 +       return (EP_RAIL *) NULL;
57957 +
57958 +    if ((res = ep_init_rail (sys, &rail->Generic)) != 0)
57959 +    {
57960 +       KMEM_FREE (rail, sizeof (EP3_RAIL));
57961 +       return (EP_RAIL *) NULL;
57962 +    }
57963 +
57964 +    rail->Device = dev;
57965 +
57966 +    /* Install our rail operations */
57967 +    rail->Generic.Operations.DestroyRail      = ep3_destroy_rail;
57968 +    rail->Generic.Operations.StartRail        = ep3_start_rail;
57969 +    rail->Generic.Operations.StallRail        = ep3_stall_rail;
57970 +    rail->Generic.Operations.StopRail         = ep3_stop_rail;
57971 +
57972 +    rail->Generic.Operations.SdramAlloc       = ep3_sdram_alloc;
57973 +    rail->Generic.Operations.SdramFree        = ep3_sdram_free;
57974 +    rail->Generic.Operations.SdramWriteb      = ep3_sdram_writeb;
57975 +
57976 +    rail->Generic.Operations.KaddrMap         = ep3_kaddr_map;
57977 +    rail->Generic.Operations.SdramMap         = ep3_sdram_map;
57978 +    rail->Generic.Operations.Unmap            = ep3_unmap;
57979 +
57980 +    rail->Generic.Operations.DvmaReserve      = ep3_dvma_reserve;
57981 +    rail->Generic.Operations.DvmaRelease      = ep3_dvma_release;
57982 +    rail->Generic.Operations.DvmaSetPte       = ep3_dvma_set_pte;
57983 +    rail->Generic.Operations.DvmaReadPte      = ep3_dvma_read_pte;
57984 +    rail->Generic.Operations.DvmaUnload       = ep3_dvma_unload;
57985 +    rail->Generic.Operations.FlushTlb         = ep3_flush_tlb;
57986 +
57987 +    rail->Generic.Operations.ProbeRoute       = ep3_probe_route;
57988 +    rail->Generic.Operations.PositionFound    = ep3_position_found;
57989 +    rail->Generic.Operations.CheckPosition    = ep3_check_position;
57990 +    rail->Generic.Operations.NeterrFixup      = ep3_neterr_fixup;
57991 +
57992 +    rail->Generic.Operations.LoadSystemRoute  = ep3_load_system_route;
57993 +
57994 +    rail->Generic.Operations.LoadNodeRoute    = ep3_load_node_route;
57995 +    rail->Generic.Operations.UnloadNodeRoute  = ep3_unload_node_route;
57996 +    rail->Generic.Operations.LowerFilter      = ep3_lower_filter;
57997 +    rail->Generic.Operations.RaiseFilter      = ep3_raise_filter;
57998 +    rail->Generic.Operations.NodeDisconnected = ep3_node_disconnected;
57999 +
58000 +    rail->Generic.Operations.FlushFilters     = ep3_flush_filters;
58001 +    rail->Generic.Operations.FlushQueues      = ep3_flush_queues;
58002 +
58003 +    rail->Generic.Operations.AllocInputQ      = ep3_alloc_inputq;
58004 +    rail->Generic.Operations.FreeInputQ       = ep3_free_inputq;
58005 +    rail->Generic.Operations.EnableInputQ     = ep3_enable_inputq;
58006 +    rail->Generic.Operations.DisableInputQ    = ep3_disable_inputq;
58007 +    rail->Generic.Operations.PollInputQ       = ep3_poll_inputq;
58008 +
58009 +    rail->Generic.Operations.AllocOutputQ     = ep3_alloc_outputq;
58010 +    rail->Generic.Operations.FreeOutputQ      = ep3_free_outputq;
58011 +    rail->Generic.Operations.OutputQMsg       = ep3_outputq_msg;
58012 +    rail->Generic.Operations.OutputQState     = ep3_outputq_state;
58013 +    rail->Generic.Operations.OutputQSend      = ep3_outputq_send;
58014 +
58015 +    rail->Generic.Operations.FillOutStats     = ep3_fillout_stats;
58016 +
58017 +    rail->Generic.Devinfo = dev->Devinfo;
58018 +
58019 +    printk ("%s: connected via elan3 rev%c device %d\n", rail->Generic.Name,
58020 +           'a' + dev->Devinfo.dev_revision_id, dev->Instance);
58021 +
58022 +    return (EP_RAIL *) rail;
58023 +}
58024 +
58025 +void
58026 +ep3_destroy_rail (EP_RAIL *r)
58027 +{
58028 +    EP3_RAIL *rail = (EP3_RAIL *) r;
58029 +    
58030 +    KMEM_FREE (rail, sizeof (EP3_RAIL));
58031 +}
58032 +
58033 +static int
58034 +ep3_attach_rail (EP3_RAIL *rail)
58035 +{
58036 +    ELAN3_DEV        *dev = rail->Device;
58037 +    ELAN3_CTXT       *ctxt;
58038 +    ELAN_CAPABILITY  *cap;
58039 +    int               ctx;
58040 +    unsigned long     flags;
58041 +
58042 +    if ((ctxt = elan3_alloc (dev, TRUE)) == (ELAN3_CTXT *) NULL)
58043 +    {
58044 +       printk ("%s: cannot allocate elan context\n", rail->Generic.Name);
58045 +       return -ENXIO;
58046 +    }
58047 +    
58048 +    ctxt->Operations = &ep3_elan3_ops;
58049 +    ctxt->Private    = (void *) rail;
58050 +    
58051 +    /* Initialise a capability and attach to the elan*/
58052 +    KMEM_ALLOC (cap, ELAN_CAPABILITY *, sizeof (ELAN_CAPABILITY), TRUE);
58053 +    
58054 +    elan_nullcap (cap);
58055 +    
58056 +    cap->cap_type        = ELAN_CAP_TYPE_KERNEL;
58057 +    cap->cap_version     = ELAN_CAP_VERSION_NUMBER;
58058 +    cap->cap_mycontext   = ELAN3_MRF_CONTEXT_NUM | SYS_CONTEXT_BIT;
58059 +    cap->cap_lowcontext  = ELAN3_MRF_CONTEXT_NUM | SYS_CONTEXT_BIT;
58060 +    cap->cap_highcontext = ELAN3_MRF_CONTEXT_NUM | SYS_CONTEXT_BIT;
58061 +    cap->cap_railmask    = 1 << dev->Devinfo.dev_rail;
58062 +    
58063 +    /* Ensure the context filter is raised while we initialise */
58064 +    elan3_block_inputter (ctxt, TRUE);
58065 +
58066 +    if (elan3_doattach (ctxt, cap) != 0)
58067 +    {
58068 +       printk ("%s: cannot attach to kernel context\n", rail->Generic.Name);
58069 +
58070 +       KMEM_FREE (cap, sizeof (ELAN_CAPABILITY));
58071 +       elan3_free (ctxt);
58072 +       return -ENXIO;
58073 +    }
58074 +    KMEM_FREE (cap, sizeof (ELAN_CAPABILITY));
58075 +
58076 +    /* now attach to all the kernel comms input/dmaring/data contexts */
58077 +    spin_lock_irqsave (&dev->IntrLock, flags);
58078 +
58079 +    for (ctx = ELAN3_DMARING_BASE_CONTEXT_NUM; ctx <= ELAN3_DMARING_TOP_CONTEXT_NUM; ctx++)
58080 +    {
58081 +       /* place it in the info table.  NOTE: don't call elan3mmu_set_info, as this */
58082 +       /* will queue the info again on the devices info list */
58083 +       dev->CtxtTable[ctx] = ctxt;
58084 +       
58085 +       elan3mmu_set_context_filter (dev, ctx|SYS_CONTEXT_BIT, TRUE, 0, NULL);
58086 +       elan3mmu_attach (dev, ctx, ctxt->Elan3mmu, ctxt->RouteTable->Table, ctxt->RouteTable->Size-1);
58087 +    }
58088 +
58089 +    for (ctx = ELAN3_KCOMM_BASE_CONTEXT_NUM; ctx <= ELAN3_KCOMM_TOP_CONTEXT_NUM; ctx++)
58090 +    {
58091 +       /* place it in the info table.  NOTE: don't call elan3mmu_set_info, as this */
58092 +       /* will queue the info again on the devices info list */
58093 +       dev->CtxtTable[ctx] = ctxt;
58094 +       
58095 +       elan3mmu_set_context_filter (dev, ctx|SYS_CONTEXT_BIT, TRUE, 0, NULL);
58096 +       elan3mmu_attach (dev, ctx, ctxt->Elan3mmu, ctxt->RouteTable->Table, ctxt->RouteTable->Size-1);
58097 +    }
58098 +    spin_unlock_irqrestore (&dev->IntrLock, flags);
58099 +
58100 +    /* Stash the ctxt,commandport, mmu and route table */
58101 +    rail->Ctxt        = ctxt;
58102 +    rail->CommandPort = ctxt->CommandPort;
58103 +    rail->Elan3mmu    = ctxt->Elan3mmu;
58104 +    rail->RouteTable  = ctxt->RouteTable;
58105 +
58106 +    return 0;
58107 +}
58108 +
58109 +static void
58110 +ep3_detach_rail (EP3_RAIL *rail)
58111 +{
58112 +    ELAN3_DEV *dev = rail->Device;
58113 +    unsigned long flags;
58114 +    int ctx;
58115 +
58116 +    /* detach from the elan */
58117 +    spin_lock_irqsave (&dev->IntrLock, flags);
58118 +
58119 +    for (ctx = ELAN3_KCOMM_BASE_CONTEXT_NUM; ctx <= ELAN3_KCOMM_TOP_CONTEXT_NUM; ctx++)
58120 +    {
58121 +       dev->CtxtTable[ctx] = NULL;
58122 +       elan3mmu_detach (dev, ctx);
58123 +    }
58124 +
58125 +    for (ctx = ELAN3_DMARING_BASE_CONTEXT_NUM; ctx <= ELAN3_DMARING_TOP_CONTEXT_NUM; ctx++)
58126 +    {
58127 +       dev->CtxtTable[ctx] = NULL;
58128 +       elan3mmu_detach (dev, ctx);
58129 +    }
58130 +    spin_unlock_irqrestore (&dev->IntrLock, flags);
58131 +
58132 +    elan3_dodetach(rail->Ctxt);
58133 +    elan3_free (rail->Ctxt);
58134 +
58135 +    rail->Ctxt        = NULL;
58136 +    rail->CommandPort = 0;
58137 +    rail->Elan3mmu    = NULL;
58138 +    rail->RouteTable  = NULL;
58139 +}
58140 +
58141 +int
58142 +ep3_start_rail (EP_RAIL *r)
58143 +{
58144 +    EP3_RAIL     *rail = (EP3_RAIL *) r;
58145 +    int           i, res;
58146 +    unsigned long flags;
58147 +
58148 +    if ((res = ep3_attach_rail (rail)) != 0)
58149 +       return res;
58150 +
58151 +    spin_lock_init (&rail->CookieLock);
58152 +    kmutex_init (&rail->HaltOpMutex);
58153 +    kcondvar_init (&rail->HaltOpSleep);
58154 +
58155 +    /* Initialise event interrupt cookie table */
58156 +    InitialiseCookieTable (&rail->CookieTable);
58157 +
58158 +    /* Load and map the thread code */
58159 +    rail->ThreadCode = threadcode_elan3;
58160 +    if (ep_loadcode (&rail->Generic, &rail->ThreadCode) != ESUCCESS)
58161 +       goto failed;
58162 +
58163 +    /* Map the command port to be visible to the Elan */
58164 +    ep3_ioaddr_map (&rail->Generic, EP3_COMMANDPORT_ADDR, rail->Ctxt->CommandPage, PAGESIZE, EP_PERM_WRITE);
58165 +    rail->CommandPortAddr = EP3_COMMANDPORT_ADDR + (rail->Ctxt->CommandPort - rail->Ctxt->CommandPage);
58166 +
58167 +    /* Allocate the elan visible sdram/main memory */
58168 +    if ((rail->RailElan = ep_alloc_elan (&rail->Generic, sizeof (EP3_RAIL_ELAN), 0, &rail->RailElanAddr)) == 0 ||
58169 +       (rail->RailMain = ep_alloc_main (&rail->Generic, sizeof (EP3_RAIL_MAIN), 0, &rail->RailMainAddr)) == 0)
58170 +    {
58171 +       goto failed;
58172 +    }
58173 +
58174 +    /* Allocate the system input queues at their fixed elan address */
58175 +    if (! (rail->QueueDescs = ep_alloc_memory_elan (&rail->Generic, EP_SYSTEM_QUEUE_BASE, PAGESIZE, EP_PERM_ALL, 0)))
58176 +       goto failed;
58177 +
58178 +    /* Initialise all queue entries to be full */
58179 +    for (i = 0; i < EP_NUM_SYSTEMQ; i++)
58180 +       elan3_sdram_writel (rail->Device, EP_SYSTEMQ_DESC(rail->QueueDescs, i) + offsetof (EP3_InputQueue, q_state), E3_QUEUE_FULL);
58181 +
58182 +    /* initialise the dma rings */
58183 +    if (DmaRingsCreate (rail))
58184 +       goto failed;
58185 +    
58186 +    if (InitialiseDmaRetries (rail))
58187 +       goto failed;
58188 +
58189 +    if (ep3_init_probenetwork (rail))
58190 +       goto failed;
58191 +
58192 +    /* can now drop the context filter for the system context */
58193 +    spin_lock_irqsave (&rail->Device->IntrLock, flags);
58194 +    elan3mmu_set_context_filter (rail->Device, ELAN3_MRF_CONTEXT_NUM|SYS_CONTEXT_BIT, FALSE, 0, NULL);
58195 +    spin_unlock_irqrestore (&rail->Device->IntrLock, flags);
58196 +
58197 +    return 0;
58198 +
58199 + failed:
58200 +    printk ("ep3_start_rail: failed for rail %d\n", rail->Generic.Number);
58201 +    ep3_stop_rail (&rail->Generic);
58202 +
58203 +    return -ENOMEM;
58204 +}
58205 +
58206 +void
58207 +ep3_stall_rail (EP_RAIL *r)
58208 +{
58209 +    EP3_RAIL     *rail = (EP3_RAIL *) r;
58210 +    int           ctx;
58211 +    unsigned long flags;
58212 +
58213 +    /* raise all the context filters */
58214 +    spin_lock_irqsave (&rail->Device->IntrLock, flags);
58215 +
58216 +    for (ctx = ELAN3_KCOMM_BASE_CONTEXT_NUM; ctx <= ELAN3_KCOMM_TOP_CONTEXT_NUM; ctx++)
58217 +       elan3mmu_set_context_filter (rail->Device, ctx|SYS_CONTEXT_BIT, TRUE, 0, NULL);
58218 +
58219 +    for (ctx = ELAN3_DMARING_BASE_CONTEXT_NUM; ctx <= ELAN3_DMARING_TOP_CONTEXT_NUM; ctx++)
58220 +       elan3mmu_set_context_filter (rail->Device, ctx|SYS_CONTEXT_BIT, TRUE, 0, NULL);
58221 +
58222 +    elan3mmu_set_context_filter (rail->Device, ELAN3_MRF_CONTEXT_NUM|SYS_CONTEXT_BIT, TRUE, 0, NULL);
58223 +
58224 +    spin_unlock_irqrestore (&rail->Device->IntrLock, flags);
58225 +}
58226 +
58227 +void
58228 +ep3_stop_rail (EP_RAIL *r)
58229 +{
58230 +    EP3_RAIL *rail = (EP3_RAIL *) r;
58231 +
58232 +    ep3_destroy_probenetwork (rail);
58233 +
58234 +    if (rail->DmaRetryInitialised)
58235 +       DestroyDmaRetries (rail);
58236 +
58237 +    DmaRingsRelease(rail);
58238 +
58239 +    if (rail->Generic.State == EP_RAIL_STATE_RUNNING)
58240 +    {
58241 +       KMEM_FREE (rail->MainCookies, rail->Generic.Position.pos_nodes * sizeof (E3_uint32));
58242 +
58243 +       ep_free_elan (&rail->Generic, rail->ElanCookies, rail->Generic.Position.pos_nodes * sizeof (E3_uint32));
58244 +    }
58245 +
58246 +    if (rail->QueueDescs)
58247 +       ep_free_memory_elan (&rail->Generic, EP_SYSTEM_QUEUE_BASE);
58248 +    rail->QueueDescs = 0;
58249 +
58250 +    if (rail->RailMain)
58251 +       ep_free_main (&rail->Generic, rail->RailMainAddr, sizeof (EP3_RAIL_MAIN));
58252 +    rail->RailMain = 0;
58253 +
58254 +    if (rail->RailElan)
58255 +       ep_free_elan (&rail->Generic, rail->RailElanAddr, sizeof (EP3_RAIL_ELAN));
58256 +    rail->RailElan = 0;
58257 +
58258 +    ep_unloadcode (&rail->Generic, &rail->ThreadCode);
58259 +
58260 +    DestroyCookieTable (&rail->CookieTable);
58261 +
58262 +    ep_perrail_unmap (&rail->Generic, rail->Ctxt->CommandPage, PAGESIZE);
58263 +
58264 +    kcondvar_destroy (&rail->HaltOpSleep);
58265 +    kmutex_destroy (&rail->HaltOpMutex);
58266 +    spin_lock_destroy (&rail->CookieLock);
58267 +
58268 +    ep3_detach_rail (rail);
58269 +}
58270 +
58271 +void
58272 +ep3_position_found (EP_RAIL *r, ELAN_POSITION *pos)
58273 +{
58274 +    EP3_RAIL   *rail = (EP3_RAIL *) r;
58275 +    sdramaddr_t addr;
58276 +
58277 +    rail->SwitchBroadcastLevelTick = lbolt;
58278 +
58279 +    elan3_sdram_writel (rail->Device, rail->RailElan + offsetof (EP3_RAIL_ELAN, NodeId), pos->pos_nodeid);
58280 +
58281 +    /* Allocate Network Identify cookie state */
58282 +    KMEM_ZALLOC (rail->MainCookies, E3_uint32 *, pos->pos_nodes * sizeof (E3_uint32), 1);
58283 +    
58284 +    if (! (addr = ep_alloc_elan (&rail->Generic, pos->pos_nodes * sizeof (E3_uint32), 0, &rail->ElanCookies)))
58285 +       panic ("ep: PositionFound: cannot allocate elan cookies array\n");
58286 +
58287 +    elan3_sdram_zeroq_sdram (rail->Device, addr, pos->pos_nodes * sizeof (E3_uint32));
58288 +
58289 +    ep3_probe_position_found (rail, pos);
58290 +}
58291 +
58292 +sdramaddr_t
58293 +ep3_sdram_alloc (EP_RAIL *r, EP_ADDR addr, unsigned size)
58294 +{
58295 +    return elan3_sdram_alloc (((EP3_RAIL *) r)->Device, size);
58296 +}
58297 +
58298 +void
58299 +ep3_sdram_free (EP_RAIL *r, sdramaddr_t addr, unsigned size)
58300 +{
58301 +    elan3_sdram_free (((EP3_RAIL *) r)->Device, addr, size);
58302 +}
58303 +
58304 +void
58305 +ep3_sdram_writeb (EP_RAIL *r, sdramaddr_t addr, unsigned char val)
58306 +{
58307 +    elan3_sdram_writeb (((EP3_RAIL *) r)->Device, addr, val);
58308 +}
58309 +
58310 +void
58311 +ep3_flush_tlb (EP_RAIL *r)
58312 +{
58313 +    EP3_RAIL *rail = (EP3_RAIL *) r;
58314 +    ELAN3_DEV *dev = rail->Device;
58315 +    unsigned long flags;
58316 +
58317 +    spin_lock_irqsave (&dev->TlbLock, flags);
58318 +    
58319 +    IncrStat (dev, TlbFlushes);
58320 +    
58321 +    write_reg32 (dev, Cache_Control_Reg.ContReg, dev->Cache_Control_Reg | MMU_FLUSH);
58322 +    mmiob ();
58323 +    spin_unlock_irqrestore (&dev->TlbLock, flags);
58324 +
58325 +    while (! (read_reg32 (dev, Cache_Control_Reg.ContReg) & MMU_FLUSHED))
58326 +       mb();
58327 +}
58328 +
58329 +void
58330 +ep3_load_system_route (EP_RAIL *r, unsigned vp, unsigned lowNode, unsigned highNode)
58331 +{
58332 +    EP3_RAIL  *rail = (EP3_RAIL *) r;
58333 +    E3_uint16  flits[MAX_FLITS];
58334 +    int        nflits;
58335 +    
58336 +    nflits = GenerateRoute (&rail->Generic.Position, flits, lowNode, highNode, DEFAULT_ROUTE_TIMEOUT, HIGH_ROUTE_PRIORITY);
58337 +       
58338 +    if (LoadRoute (rail->Device, rail->RouteTable, vp, ELAN3_MRF_CONTEXT_NUM|SYS_CONTEXT_BIT, nflits, flits) != 0)
58339 +    {
58340 +       /* XXXX: whilst LoadRoute() can fail - it is not likely. */
58341 +       panic ("ep3_load_system_route: cannot load p2p route entry\n");
58342 +    }
58343 +}
58344 +
58345 +void
58346 +ep3_load_node_route (EP_RAIL *r, unsigned nodeId)
58347 +{
58348 +    EP3_RAIL     *rail = (EP3_RAIL *) r;
58349 +    E3_uint16     flits[MAX_FLITS];
58350 +    int           nflits;
58351 +
58352 +    nflits = GenerateRoute (&rail->Generic.Position, flits, nodeId, nodeId, DEFAULT_ROUTE_TIMEOUT, DEFAULT_ROUTE_PRIORITY);
58353 +
58354 +    if (LoadRoute (rail->Device, rail->RouteTable, EP_VP_DATA(nodeId), EP3_CONTEXT_NUM(rail->Generic.Position.pos_nodeid), nflits, flits) != 0)
58355 +       panic ("ep3_load_node_route: cannot load p2p data route entry\n");
58356 +}
58357 +
58358 +void
58359 +ep3_unload_node_route (EP_RAIL *r, unsigned nodeId)
58360 +{
58361 +    EP3_RAIL *rail = (EP3_RAIL *) r;
58362 +
58363 +    ClearRoute (rail->Device, rail->RouteTable, EP_VP_DATA(nodeId));
58364 +}
58365 +
58366 +void
58367 +ep3_lower_filter (EP_RAIL *r, unsigned nodeId)
58368 +{
58369 +    EP3_RAIL *rail = (EP3_RAIL *) r;
58370 +    unsigned long flags;
58371 +
58372 +    spin_lock_irqsave (&rail->Device->IntrLock, flags);
58373 +    elan3mmu_set_context_filter (rail->Device, EP3_CONTEXT_NUM(nodeId), 0, 0, NULL);
58374 +    spin_unlock_irqrestore (&rail->Device->IntrLock, flags);
58375 +}
58376 +
58377 +void
58378 +ep3_raise_filter (EP_RAIL *r, unsigned nodeId)
58379 +{
58380 +    EP3_RAIL *rail = (EP3_RAIL *) r;
58381 +    unsigned long flags;
58382 +
58383 +    spin_lock_irqsave (&rail->Device->IntrLock, flags);
58384 +    elan3mmu_set_context_filter (rail->Device, EP3_CONTEXT_NUM(nodeId), 1, 0, NULL);
58385 +    spin_unlock_irqrestore (&rail->Device->IntrLock, flags);
58386 +}
58387 +
58388 +void
58389 +ep3_node_disconnected (EP_RAIL *r, unsigned nodeId)
58390 +{
58391 +    FreeStalledDmas ((EP3_RAIL *) r, nodeId);
58392 +}
58393 +
58394 +void
58395 +ep3_fillout_stats(EP_RAIL *r, char *str) 
58396 +{
58397 +    /* no stats here yet */
58398 +    /* EP3_RAIL *ep3rail = (EP3_RAIL *)r; */
58399 +}
58400 Index: linux-2.4.21/drivers/net/qsnet/ep/kcomm_elan3.h
58401 ===================================================================
58402 --- linux-2.4.21.orig/drivers/net/qsnet/ep/kcomm_elan3.h        2004-02-23 16:02:56.000000000 -0500
58403 +++ linux-2.4.21/drivers/net/qsnet/ep/kcomm_elan3.h     2005-06-01 23:12:54.666428920 -0400
58404 @@ -0,0 +1,431 @@
58405 +/*
58406 + *    Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd.
58407 + *    Copyright (c) 2002-2003 by Quadrics Ltd.
58408 + *
58409 + *    For licensing information please see the supplied COPYING file
58410 + *
58411 + */
58412 +
58413 +#ifndef __EP_KCOMM_ELAN3_H
58414 +#define __EP_KCOMM_ELAN3_H
58415 +
58416 +#ident "@(#)$Id: kcomm_elan3.h,v 1.50.8.3 2004/12/14 10:19:14 mike Exp $ $Name: QSNETMODULES-4-30_20050128 $"
58417 +/*      $Source: /cvs/master/quadrics/epmod/kcomm_elan3.h,v $*/
58418 +
58419 +#if !defined(__ELAN3__)
58420 +#include <elan3/elanregs.h>
58421 +#include <elan3/elandev.h>
58422 +#include <elan3/elanvp.h>
58423 +#include <elan3/elan3mmu.h>
58424 +#include <elan3/elanctxt.h>
58425 +#include <elan3/elandebug.h>
58426 +#endif /* !defined(__ELAN3__) */
58427 +
58428 +#include <elan3/trtype.h>
58429 +
58430 +/* private address allocation */
58431 +#define EP3_TEXT_BASE                  0xFF000000              /* base address for thread code (defined in makerules.elan3) */
58432 +#define EP3_COMMANDPORT_ADDR           0xFFF00000              /* mapping address for elan command port */
58433 +
58434 +#define EP3_STACK_SIZE                 1024                    /* default thread code stack size */
58435 +
58436 +#define EP3_PACEMAKER_EVENTADDR                0xfeedbeef              /* mis-aligned address used by heartbeat pacemaker */
58437 +
58438 +/* context number allocation */
58439 +#define EP3_CONTEXT_NUM(nodeId)                ((ELAN3_KCOMM_BASE_CONTEXT_NUM + (nodeId)) | SYS_CONTEXT_BIT)
58440 +#define EP3_CONTEXT_ISDATA(ctx)                (((ctx) & MAX_ROOT_CONTEXT_MASK) >= ELAN3_KCOMM_BASE_CONTEXT_NUM && \
58441 +                                        ((ctx) & MAX_ROOT_CONTEXT_MASK) <= ELAN3_KCOMM_TOP_CONTEXT_NUM)
58442 +#define EP3_CONTEXT_TO_NODE(ctx)       (((ctx) & MAX_ROOT_CONTEXT_MASK) - ELAN3_KCOMM_BASE_CONTEXT_NUM)
58443 +
58444 +/* DMA issueing rings */
58445 +#define EP3_RING_CRITICAL              0
58446 +#define EP3_RING_CRITICAL_LEN          128
58447 +#define EP3_RING_HIGH_PRI              1
58448 +#define EP3_RING_HIGH_PRI_LEN          64
58449 +#define EP3_RING_LOW_PRI               2
58450 +#define EP3_RING_LOW_PRI_LEN           32
58451 +#define EP3_NUM_RINGS                  3
58452 +
58453 +/* Value to "return" from c_close() when envelope handled  by the trap handler */
58454 +#define EP3_PAckStolen                 4
58455 +
58456 +/* unimplemented instruction trap types for thread code */
58457 +#define EP3_UNIMP_TRAP_NO_DESCS                0
58458 +#define EP3_UNIMP_TRAP_PACKET_NACKED   1
58459 +#define EP3_UNIMP_THREAD_HALTED                2
58460 +#define EP3_NUM_UNIMP_TRAPS            3
58461 +
58462 +/* forward declarations */
58463 +typedef struct ep3_rail        EP3_RAIL;
58464 +
58465 +/* block copy elan3 inputter queue - with waitvent0 */
58466 +typedef struct ep3_inputqueue
58467 +{
58468 +    volatile E3_uint32 q_state;        /* queue is full=bit0, queue is locked=bit8 */
58469 +    volatile E3_Addr   q_bptr;         /* block aligned ptr to current back item */
58470 +    E3_uint32          q_size;         /* size of queue item; 0x1 <= size <= (0x40 * 5) */
58471 +    E3_Addr            q_top;          /* block aligned ptr to last queue item */
58472 +    E3_Addr            q_base;         /* block aligned ptr to first queue item */
58473 +    volatile E3_Addr   q_fptr;         /* block aligned ptr to current front item */
58474 +    E3_BlockCopyEvent  q_event;        /* queue block copy event */
58475 +    E3_uint32          q_pad[4];       /* pad to 64 bytes */
58476 +    E3_Addr            q_wevent;       /* WaitEvent0 struct */
58477 +    E3_int32           q_wcount;
58478 +} EP3_InputQueue;
58479 +
58480 +
58481 +#if !defined(__ELAN3__)
58482 +
58483 +/* dma retries types and retry times */
58484 +typedef struct ep3_retry_dma
58485 +{
58486 +    struct list_head    Link;                                  /* chained on free/retry list */
58487 +    long               RetryTime;                              /* "lbolt" to retry at */
58488 +    E3_DMA_BE          Dma;                                    /* DMA (in main memory) */
58489 +} EP3_RETRY_DMA;
58490 +
58491 +typedef struct ep3_dma_ring
58492 +{
58493 +    sdramaddr_t                pEvent;  
58494 +    E3_Addr            epEvent;
58495 +    
58496 +    sdramaddr_t                pDma;     
58497 +    E3_Addr            epDma; 
58498 +    
58499 +    E3_uint32         *pDoneBlk; 
58500 +    E3_Addr            epDoneBlk; 
58501 +    
58502 +    int                        Entries;                                /* number of slots in array  */
58503 +    int                        Position;                               /* current position in array */
58504 +
58505 +    ioaddr_t            CommandPort;
58506 +    ioaddr_t           CommandPage;
58507 +    DeviceMappingHandle CommandPageHandle;
58508 +} EP3_DMA_RING;
58509 +
58510 +#define DMA_RING_EVENT(ring,n)         ((ring)->pEvent + (n)*sizeof (E3_BlockCopyEvent))
58511 +#define DMA_RING_EVENT_ELAN(ring,n)    ((ring)->epEvent + (n)*sizeof (E3_BlockCopyEvent))
58512 +
58513 +#define DMA_RING_DMA(ring,n)           ((ring)->pDma   + (n)*sizeof (E3_DMA))
58514 +#define DMA_RING_DMA_ELAN(ring,n)      ((ring)->epDma   + (n)*sizeof (E3_DMA))
58515 +
58516 +#define DMA_RING_DONE_ELAN(ring,n)     ((ring)->epDoneBlk + (n)*sizeof (E3_uint32))
58517 +
58518 +/* Event interrupt cookie operations and lookup table */
58519 +typedef struct ep3_cookie_ops
58520 +{
58521 +    void       (*Event)       (EP3_RAIL *rail, void *arg);                             /* called from the interrupt handler when an event is "set" */
58522 +    void       (*DmaRetry)    (EP3_RAIL *rail, void *arg, E3_DMA_BE *dma, int error);  /* called from the interrupt handler when a DMA is "nacked" */
58523 +    void       (*DmaCancelled)(EP3_RAIL *rail, void *arg, E3_DMA_BE *dma);             /* called from the interrupt handler/flush disconnecting when cancelled. */
58524 +    void       (*DmaVerify)   (EP3_RAIL *rail, void *arg, E3_DMA_BE *dma);             /* called from multiple places, to check dma is consistent with state. */
58525 +} EP3_COOKIE_OPS;
58526 +
58527 +typedef struct ep3_cookie
58528 +{
58529 +    struct ep3_cookie  *Next;                                  /* Cookies are chained in hash table. */
58530 +    E3_uint32          Cookie;                                 /* Cooke store in ev_Type */
58531 +    EP3_COOKIE_OPS     *Operations;                            /* Cookie operations */
58532 +    void              *Arg;                                    /* Users arguement. */
58533 +} EP3_COOKIE;
58534 +
58535 +#define EP3_COOKIE_HASH_SIZE           (256)
58536 +#define EP3_HASH_COOKIE(a)             ((((a) >> 3) ^ ((a) >> 7) ^ ((a) >> 11)) & (EP3_COOKIE_HASH_SIZE-1))
58537 +
58538 +typedef struct ep3_cookie_table
58539 +{
58540 +    spinlock_t         Lock;
58541 +    EP3_COOKIE        *Entries[EP3_COOKIE_HASH_SIZE];
58542 +} EP3_COOKIE_TABLE;
58543 +
58544 +#endif /* !defined(__ELAN3__) */
58545 +
58546 +#define EP3_EVENT_FREE                 ((1 << 4) | EV_WCOPY)
58547 +#define EP3_EVENT_ACTIVE               ((2 << 4) | EV_WCOPY)
58548 +/* DONE == Cookie */
58549 +#define EP3_EVENT_FAILED               ((3 << 4) | EV_WCOPY)
58550 +#define EP3_EVENT_PRIVATE              ((4 << 4) | EV_WCOPY)
58551 +
58552 +/* The event cookie can get posted (and seen) before the write has */
58553 +/* hit main memory - in this case the event count is <= 0 and the block */
58554 +/* will be marked as ACTIVE - but could transition to DONE at any time */
58555 +/* Also for a word copy event, the value written into the "done" word */
58556 +/* can be the event interrupt cookie rather than the "source" value */
58557 +/* this happens since the uCode does not wait for the write to have */
58558 +/* occured before overwriting TMP_0 with the cookie */
58559 +#define EP3_EVENT_FIRING(edev, event, cookie, done) \
58560 +       (((((done) & ~(EV_TYPE_BCOPY | EV_TYPE_MASK_EVIRQ)) == (cookie).Cookie) || (done) == EP3_EVENT_ACTIVE) && \
58561 +        (int) elan3_sdram_readl (edev, (event) + offsetof (E3_BlockCopyEvent, ev_Count)) <= 0)
58562 +#define EP3_EVENT_FIRED(cookie, done) \
58563 +       (((done) & ~(EV_TYPE_BCOPY | EV_TYPE_MASK_EVIRQ)) == (cookie).Cookie)
58564 +
58565 +
58566 +/* Time limit to wait while event is firing and block write has not occured */
58567 +#define EP3_EVENT_FIRING_TLIMIT                16384                   /* 1023 uS */
58568 +
58569 +#define EP3_INIT_COPY_EVENT(event, cookie, dest, intr)                                                 \
58570 +{                                                                                                      \
58571 +       (event).ev_Count  = 0;                                                                          \
58572 +       (event).ev_Type   = (intr) ? EV_TYPE_BCOPY | EV_TYPE_EVIRQ | (cookie).Cookie : EV_TYPE_BCOPY;   \
58573 +       (event).ev_Source = (cookie).Cookie | EV_WCOPY;                                                 \
58574 +       (event).ev_Dest   = (dest) | EV_TYPE_BCOPY_WORD;                                                \
58575 +}
58576 +
58577 +#if !defined(__ELAN3__)
58578 +
58579 +/* Generic input queues which can be polled */
58580 +typedef struct ep3_inputq
58581 +{
58582 +    EP3_COOKIE         q_cookie;
58583 +    unsigned int       q_slotSize;
58584 +    unsigned int       q_slotCount;
58585 +
58586 +    void              *q_slots;
58587 +    EP_ADDR            q_slotsAddr;
58588 +
58589 +    EP_INPUTQ_CALLBACK *q_callback;
58590 +    void              *q_arg;
58591 +
58592 +    sdramaddr_t                q_desc;
58593 +    E3_Addr            q_descAddr;
58594 +
58595 +    E3_Addr            q_base;
58596 +    E3_Addr            q_top;
58597 +    E3_Addr            q_fptr;
58598 +
58599 +    E3_uint32          q_waitCount;
58600 +} EP3_INPUTQ;
58601 +
58602 +typedef struct ep3_outputq
58603 +{
58604 +    EP3_COOKIE         q_cookie;
58605 +
58606 +    unsigned int       q_slotCount;                            /* # slots allocated */
58607 +    unsigned int       q_slotSize;                             /* size of each slot (rounded up) */
58608 +
58609 +    sdramaddr_t                q_elan;
58610 +    E3_Addr            q_elanAddr;
58611 +
58612 +    void              *q_main;
58613 +    E3_Addr            q_mainAddr;
58614 +} EP3_OUTPUTQ;
58615 +
58616 +#endif /* !defined(__ELAN3__) */
58617 +
58618 +/* per-rail elan memory portion of device */
58619 +typedef struct ep3_rail_elan
58620 +{
58621 +    E3_uint16           ProbeSource0[TR_TRACEROUTE_ENTRIES];   /* 32 byte aligned */
58622 +    E3_uint16           ProbeSource1[TR_TRACEROUTE_ENTRIES];
58623 +
58624 +    E3_BlockCopyEvent   ProbeDone;                             /* 16 byte aligned */
58625 +    E3_Event            ProbeStart;                            /* 8 byte aligned */
58626 +    
58627 +    E3_uint32           ProbeType;                             /* 4 byte aligned */
58628 +    E3_uint32           ProbeLevel;
58629 +
58630 +    E3_uint32           NodeId;
58631 +} EP3_RAIL_ELAN;
58632 +
58633 +/* values for ProbeType */
58634 +#define PROBE_SINGLE                   0
58635 +#define PROBE_MULTIPLE                 1
58636 +/* number of attempts for each type */
58637 +#define PROBE_SINGLE_ATTEMPTS          10
58638 +#define PROBE_SINGLE_TIMEOUTS          5
58639 +#define PROBE_MULTIPLE_ATTEMPTS                20
58640 +#define PROBE_MULTIPLE_TIMEOUTS                10
58641 +
58642 +/* per-rail elan memory portsion of device */
58643 +typedef struct ep3_rail_main
58644 +{
58645 +    E3_uint16          ProbeDest0[TR_TRACEROUTE_ENTRIES];      /* 32 byte aligned */
58646 +    E3_uint16          ProbeDest1[TR_TRACEROUTE_ENTRIES];
58647 +    
58648 +    E3_uint32          ProbeDone;                              /* 4 byte aligned */
58649 +    E3_uint32          ProbeResult;
58650 +    E3_uint32          ProbeLevel;
58651 +} EP3_RAIL_MAIN;
58652 +
58653 +#if !defined(__ELAN3__)
58654 +
58655 +struct ep3_rail
58656 +{
58657 +    EP_RAIL            Generic;                                /* Generic rail */
58658 +
58659 +    ELAN3_DEV          *Device;                                        /* Elan device we're using */
58660 +    ELAN3_CTXT        *Ctxt;                                   /* Elan context struct */
58661 +    ioaddr_t            CommandPort;                           /* commandport from context */
58662 +    E3_Addr            CommandPortAddr;                        /*  and address mapped into elan */
58663 +
58664 +    ELAN3_ROUTE_TABLE  *RouteTable;                            /* routetable from context */
58665 +    ELAN3MMU          *Elan3mmu;                               /* elanmmu from context */
58666 +
58667 +    EP3_COOKIE_TABLE    CookieTable;                           /* Event cookie table */
58668 +
58669 +    EP_CODE            ThreadCode;                             /* copy of thread code */
58670 +    unsigned int        CommandPortEventTrap;                  /* flag to indicate command port eventint queue overflow trap */
58671 +
58672 +    sdramaddr_t         RailElan;                              /* Elan visible main/sdram portions of */
58673 +    E3_Addr             RailElanAddr;                          /* device structure */
58674 +    EP3_RAIL_MAIN      *RailMain;
58675 +    E3_Addr            RailMainAddr;
58676 +
58677 +    /* small system message queues */
58678 +    sdramaddr_t                QueueDescs;                             /* Input Queue descriptors */
58679 +
58680 +    /* Network position prober */
58681 +    E3_Addr            ProbeStack;                             /* Network position thread command structure */
58682 +    EP3_COOKIE         ProbeCookie;                            /*   event cookie for Done event */
58683 +    kcondvar_t         ProbeWait;                              /*   place to wait on probe thread */
58684 +    spinlock_t         ProbeLock;                              /*     and lock */
58685 +    volatile int        ProbeDone;                             /*     and flag to indicate it's done */
58686 +
58687 +    E3_uint16          ProbeDest0[TR_TRACEROUTE_ENTRIES];      /* last result of CheckNetworkPosition */
58688 +    E3_uint16          ProbeDest1[TR_TRACEROUTE_ENTRIES];
58689 +    E3_uint32          ProbeResult;
58690 +
58691 +    long               ProbeLevelTick[ELAN_MAX_LEVELS];
58692 +    long               SwitchBroadcastLevelTick;
58693 +
58694 +    /* rings for issueing dmas */
58695 +    EP3_DMA_RING        DmaRings[EP3_NUM_RINGS];
58696 +
58697 +    /* retry lists for dmas */
58698 +    struct list_head    DmaRetries[EP_NUM_RETRIES];            /* Dma retry lists */
58699 +    struct list_head    DmaRetryFreeList;                      /*   and free list */
58700 +    u_int              DmaRetryCount;                          /*   and total retry count */
58701 +    u_int              DmaRetryReserved;                       /*   and number reserved */
58702 +    u_int              DmaRetryThreadShouldStall;              /*   count of reasons to stall retries */
58703 +    u_int              DmaRetryThreadStarted:1;                /*   dma retry thread running */
58704 +    u_int              DmaRetryThreadShouldStop:1;             /*     but should stop */
58705 +    u_int              DmaRetryThreadStopped:1;                /*     and now it's stopped */
58706 +    u_int              DmaRetryInitialised:1;                  /* have initialise dma retries */
58707 +
58708 +    spinlock_t         DmaRetryLock;                           /*   spinlock protecting lists */
58709 +    kcondvar_t         DmaRetryWait;                           /*   place retry thread sleeps */
58710 +    long               DmaRetryTime;                           /*   and when it will next wakeup */
58711 +    unsigned int        DmaRetrySleeping;                      /*   and it's sleeping there */
58712 +
58713 +    /* Network Identify Cookies */
58714 +    E3_uint32         *MainCookies;                            /* One cookie allocator per-node for main*/
58715 +    E3_Addr            ElanCookies;                            /*   and one for elan */
58716 +    spinlock_t         CookieLock;                             /* spinlock to protect main cookies */
58717 +
58718 +    /* Halt operation flags for flushing. */
58719 +    kmutex_t            HaltOpMutex;                           /* serialize access to halt operations */
58720 +    unsigned int       HaltOpCompleted;                        /* flag to indicate halt operation completed */
58721 +    kcondvar_t         HaltOpSleep;                            /*   place to wait for it to complete */
58722 +
58723 +    /* Network error state */
58724 +    kcondvar_t         NetworkErrorSleep;                      /* place to sleep for network error halt operation */
58725 +    u_int              NetworkErrorFlushed;                    /*   and flag to indicate flushed */
58726 +
58727 +
58728 +    EP3_RAIL_STATS     Stats;                                  /* statistics */
58729 +};
58730 +
58731 +/* support.c */
58732 +
58733 +extern ELAN3_OPS  ep3_elan3_ops;
58734 +
58735 +extern E3_uint32    LocalCookie (EP3_RAIL *rail, unsigned int remoteNode);
58736 +extern E3_uint32    RemoteCookie (EP3_RAIL *rail, unsigned int remoteNode);
58737 +
58738 +extern void         InitialiseCookieTable (EP3_COOKIE_TABLE *table);
58739 +extern void         DestroyCookieTable (EP3_COOKIE_TABLE *table);
58740 +extern void         RegisterCookie (EP3_COOKIE_TABLE *table, EP3_COOKIE *cookie, 
58741 +                                   E3_Addr event, EP3_COOKIE_OPS *ops, void *arg);
58742 +extern void         DeregisterCookie (EP3_COOKIE_TABLE *table, EP3_COOKIE *cookie);
58743 +extern EP3_COOKIE   *LookupCookie (EP3_COOKIE_TABLE *table, uint32_t cookie);
58744 +extern EP3_COOKIE   *LookupEventCookie (EP3_RAIL *rail, EP3_COOKIE_TABLE *table, E3_Addr);
58745 +
58746 +extern int          DmaRingsCreate (EP3_RAIL *rail);
58747 +extern void         DmaRingsRelease (EP3_RAIL *rail);
58748 +extern int          IssueDma (EP3_RAIL *rail, E3_DMA_BE *dma, int type, int retryThread);
58749 +
58750 +extern int          IssueWaitevent (EP3_RAIL *rail, E3_Addr value);
58751 +extern void         IssueSetevent (EP3_RAIL *rail, E3_Addr value);
58752 +extern void         IssueRunThread (EP3_RAIL *rail, E3_Addr value);
58753 +extern long         DmaRetryTime (int type);
58754 +extern int          InitialiseDmaRetries (EP3_RAIL *rail);
58755 +extern void         DestroyDmaRetries (EP3_RAIL *rail);
58756 +extern int          ReserveDmaRetries (EP3_RAIL *rail, int count, EP_ATTRIBUTE attr);
58757 +extern void         ReleaseDmaRetries (EP3_RAIL *rail, int count);
58758 +extern void         StallDmaRetryThread (EP3_RAIL *rail);
58759 +extern void         ResumeDmaRetryThread (EP3_RAIL *rail);
58760 +extern void         QueueDmaForRetry (EP3_RAIL *rail, E3_DMA_BE *dma, int interval);
58761 +extern void         QueueDmaOnStalledList (EP3_RAIL *rail, E3_DMA_BE *dma);
58762 +extern void         FreeStalledDmas (EP3_RAIL *rail, unsigned int nodeId);
58763 +
58764 +extern void         SetQueueLocked(EP3_RAIL *rail, sdramaddr_t qaddr);
58765 +
58766 +/* threadcode_elan3.c */
58767 +extern E3_Addr    ep3_init_thread (ELAN3_DEV *dev, E3_Addr fn, E3_Addr addr, sdramaddr_t stack,
58768 +                                  int stackSize, int nargs, ...);
58769 +
58770 +/* probenetwork.c */
58771 +extern int        ep3_init_probenetwork (EP3_RAIL *rail);
58772 +extern void       ep3_destroy_probenetwork (EP3_RAIL *rail);
58773 +extern void       ep3_probe_position_found (EP3_RAIL *rail, ELAN_POSITION *pos);
58774 +extern int        ep3_probe_route (EP_RAIL *r, int level, int sw, int nodeid, int *linkup, int *linkdown, int attempts, EP_SWITCH *lsw);
58775 +extern int        ep3_check_position (EP_RAIL *rail);
58776 +
58777 +/* neterr_elan3.c */
58778 +extern void       ep3_neterr_fixup (EP_RAIL *r, unsigned int nodeId, EP_NETERR_COOKIE *cookies);
58779 +
58780 +/* kcomm_elan3.c */
58781 +extern EP_RAIL    *ep3_create_rail (EP_SYS *sys, ELAN3_DEV *dev);
58782 +extern void        ep3_destroy_rail (EP_RAIL *rail);
58783 +
58784 +extern int         ep3_start_rail (EP_RAIL *rail);
58785 +extern void        ep3_stall_rail (EP_RAIL *rail);
58786 +extern void        ep3_stop_rail (EP_RAIL *rail);
58787 +
58788 +extern void       ep3_position_found (EP_RAIL *rail, ELAN_POSITION *pos);
58789 +
58790 +extern sdramaddr_t ep3_sdram_alloc (EP_RAIL *rail, EP_ADDR addr, unsigned int size);
58791 +extern void        ep3_sdram_free (EP_RAIL *rail, sdramaddr_t addr, unsigned int size);
58792 +extern void        ep3_sdram_writeb (EP_RAIL *rail, sdramaddr_t addr, unsigned char val);
58793 +
58794 +extern void        ep3_flush_tlb (EP_RAIL *r);
58795 +extern void        ep3_load_system_route (EP_RAIL *r, unsigned int vp, unsigned int lowNode, unsigned int highNode);
58796 +extern void        ep3_load_node_route (EP_RAIL *r, unsigned int nodeId);
58797 +extern void        ep3_unload_node_route (EP_RAIL *r, unsigned int nodeId);
58798 +extern void        ep3_lower_filter (EP_RAIL *r, unsigned int nodeId);
58799 +extern void        ep3_raise_filter (EP_RAIL *rail, unsigned int nodeId);
58800 +extern void        ep3_node_disconnected (EP_RAIL *r, unsigned int nodeId);
58801 +
58802 +extern void        ep3_fillout_stats(EP_RAIL *rail, char *str);
58803 +
58804 +/* kmap_elan3.c */
58805 +extern void        ep3_kaddr_map (EP_RAIL *r, EP_ADDR eaddr, virtaddr_t kaddr, unsigned int len, unsigned int perm, int ep_attr);
58806 +extern void        ep3_sdram_map (EP_RAIL *r, EP_ADDR eaddr, sdramaddr_t saddr, unsigned int len, unsigned int perm, int ep_attr);
58807 +extern void        ep3_ioaddr_map (EP_RAIL *r, EP_ADDR eaddr, ioaddr_t ioaddr, unsigned int len, unsigned int perm);
58808 +extern void        ep3_unmap (EP_RAIL *r, EP_ADDR eaddr, unsigned int len);
58809 +extern void       *ep3_dvma_reserve (EP_RAIL *r, EP_ADDR eaddr, unsigned int npages);
58810 +extern void        ep3_dvma_release (EP_RAIL *r, EP_ADDR eaddr, unsigned int npages, void *private);
58811 +extern void        ep3_dvma_set_pte (EP_RAIL *r, void *private, unsigned int index, physaddr_t paddr, unsigned int perm);
58812 +extern physaddr_t  ep3_dvma_read_pte (EP_RAIL *r, void *private, unsigned int index);
58813 +extern void        ep3_dvma_unload (EP_RAIL *r, void *private, unsigned int index, unsigned int npages);
58814 +
58815 +/* kmsg_elan3.c */
58816 +extern EP_INPUTQ  *ep3_alloc_inputq (EP_RAIL *r, unsigned int qnum, unsigned int slotSize, unsigned int slotCount,
58817 +                                    EP_INPUTQ_CALLBACK *callback, void *arg);
58818 +extern void        ep3_free_inputq (EP_RAIL *r, EP_INPUTQ *q);
58819 +extern void        ep3_enable_inputq (EP_RAIL *r, EP_INPUTQ *q);
58820 +extern void        ep3_disable_inputq (EP_RAIL *r, EP_INPUTQ *q);
58821 +extern int         ep3_poll_inputq (EP_RAIL *r, EP_INPUTQ *q, int maxCount, EP_INPUTQ_HANDLER *handler, void *arg);
58822 +extern EP_OUTPUTQ *ep3_alloc_outputq (EP_RAIL *r, unsigned int slotSize, unsigned int slotCount);
58823 +extern void        ep3_free_outputq (EP_RAIL *r, EP_OUTPUTQ *q);
58824 +extern void       *ep3_outputq_msg (EP_RAIL *r, EP_OUTPUTQ *q, unsigned int slotNum);
58825 +extern int         ep3_outputq_state (EP_RAIL *r, EP_OUTPUTQ *q, unsigned int slotNum);
58826 +extern int         ep3_outputq_send (EP_RAIL *r, EP_OUTPUTQ *q, unsigned int slotNum, unsigned int size,
58827 +                                    unsigned int nodeId, unsigned int qnum, unsigned int retries);
58828 +
58829 +/* support_elan3.c */
58830 +extern void        ep3_flush_filters (EP_RAIL *r);
58831 +extern void        ep3_flush_queues (EP_RAIL *r);
58832 +
58833 +#endif /* !defined(__ELAN3__) */
58834 +
58835 +#endif /* __EP_KCOMM_ELAN3_H */
58836 Index: linux-2.4.21/drivers/net/qsnet/ep/kcomm_elan4.c
58837 ===================================================================
58838 --- linux-2.4.21.orig/drivers/net/qsnet/ep/kcomm_elan4.c        2004-02-23 16:02:56.000000000 -0500
58839 +++ linux-2.4.21/drivers/net/qsnet/ep/kcomm_elan4.c     2005-06-01 23:12:54.667428768 -0400
58840 @@ -0,0 +1,526 @@
58841 +/*
58842 + *    Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd.
58843 + *    Copyright (c) 2002-2003 by Quadrics Ltd.
58844 + *
58845 + *    For licensing information please see the supplied COPYING file
58846 + *
58847 + */
58848 +
58849 +#ident "@(#)$Id: kcomm_elan4.c,v 1.16.2.3 2004/11/30 12:02:17 mike Exp $ $Name: QSNETMODULES-4-30_20050128 $"
58850 +/*      $Source: /cvs/master/quadrics/epmod/kcomm_elan4.c,v $*/
58851 +
58852 +#include <qsnet/kernel.h>
58853 +#include <qsnet/kthread.h>
58854 +
58855 +#include <elan/kcomm.h>
58856 +
58857 +#include "kcomm_vp.h"
58858 +#include "kcomm_elan4.h"
58859 +#include "conf_linux.h"
58860 +
58861 +extern EP_CODE threadcode_elan4;
58862 +
58863 +unsigned int
58864 +ep4_create_rails (EP_SYS *sys, unsigned int disabled)
58865 +{
58866 +    unsigned int rmask = 0;
58867 +    ELAN4_DEV   *dev;
58868 +    EP_RAIL     *rail;
58869 +    int          i;
58870 +
58871 +    for (i = 0; i < EP_MAX_RAILS; i++)
58872 +    {
58873 +       if ((dev = elan4_reference_device (i, ELAN4_STATE_STARTED)) != NULL)
58874 +       {
58875 +           if ((rail = ep4_create_rail (sys, dev)) == NULL)
58876 +               elan4_dereference_device (dev);
58877 +           else
58878 +           {
58879 +               if (disabled & (1 << rail->Number))
58880 +                   printk ("%s: auto-start of device disabled by configuration\n", rail->Name);
58881 +               else
58882 +                   ep_start_rail (rail);
58883 +               
58884 +               ep_procfs_rail_init(rail);
58885 +
58886 +               rmask |= (1 << rail->Number);
58887 +           }
58888 +       }
58889 +    }
58890 +
58891 +    if (rmask)
58892 +       qsnet_debug_alloc();
58893 +
58894 +    return rmask;
58895 +}
58896 +
58897 +EP_RAIL *
58898 +ep4_create_rail (EP_SYS *sys, ELAN4_DEV *dev)
58899 +{
58900 +    EP4_RAIL *rail;
58901 +    int res;
58902 +
58903 +    KMEM_ZALLOC (rail, EP4_RAIL *, sizeof (EP4_RAIL), 1);
58904 +
58905 +    if (rail == NULL)
58906 +       return (EP_RAIL *) NULL;
58907 +    
58908 +    if ((res = ep_init_rail (sys, &rail->r_generic)) != 0)
58909 +    {
58910 +       KMEM_FREE (rail, sizeof (EP4_RAIL));
58911 +       return (EP_RAIL *) NULL;
58912 +    }
58913 +
58914 +    rail->r_ctxt.ctxt_dev = dev;
58915 +
58916 +    /* install our rail operations */
58917 +    rail->r_generic.Operations.DestroyRail      = ep4_destroy_rail;
58918 +    rail->r_generic.Operations.StartRail        = ep4_start_rail;
58919 +    rail->r_generic.Operations.StallRail        = ep4_stall_rail;
58920 +    rail->r_generic.Operations.StopRail         = ep4_stop_rail;    
58921 +
58922 +    rail->r_generic.Operations.SdramAlloc       = ep4_sdram_alloc;
58923 +    rail->r_generic.Operations.SdramFree        = ep4_sdram_free;
58924 +    rail->r_generic.Operations.SdramWriteb      = ep4_sdram_writeb;
58925 +
58926 +    rail->r_generic.Operations.KaddrMap         = ep4_kaddr_map;
58927 +    rail->r_generic.Operations.SdramMap         = ep4_sdram_map;
58928 +    rail->r_generic.Operations.Unmap            = ep4_unmap;
58929 +
58930 +    rail->r_generic.Operations.DvmaReserve     = ep4_dvma_reserve;
58931 +    rail->r_generic.Operations.DvmaRelease     = ep4_dvma_release;
58932 +    rail->r_generic.Operations.DvmaSetPte      = ep4_dvma_set_pte;
58933 +    rail->r_generic.Operations.DvmaReadPte     = ep4_dvma_read_pte;
58934 +    rail->r_generic.Operations.DvmaUnload      = ep4_dvma_unload;
58935 +    rail->r_generic.Operations.FlushTlb                = ep4_flush_tlb;
58936 +
58937 +    rail->r_generic.Operations.ProbeRoute       = ep4_probe_route;
58938 +
58939 +    rail->r_generic.Operations.PositionFound    = ep4_position_found;
58940 +    rail->r_generic.Operations.CheckPosition    = ep4_check_position;
58941 +    rail->r_generic.Operations.NeterrFixup      = ep4_neterr_fixup;
58942 +
58943 +    rail->r_generic.Operations.LoadSystemRoute  = ep4_load_system_route;
58944 +
58945 +    rail->r_generic.Operations.LoadNodeRoute    = ep4_load_node_route;
58946 +    rail->r_generic.Operations.UnloadNodeRoute  = ep4_unload_node_route;
58947 +    rail->r_generic.Operations.LowerFilter     = ep4_lower_filter;
58948 +    rail->r_generic.Operations.RaiseFilter     = ep4_raise_filter;
58949 +    rail->r_generic.Operations.NodeDisconnected = ep4_node_disconnected;
58950 +
58951 +    rail->r_generic.Operations.FlushFilters     = ep4_flush_filters;
58952 +    rail->r_generic.Operations.FlushQueues     = ep4_flush_queues;
58953 +
58954 +    rail->r_generic.Operations.AllocInputQ     = ep4_alloc_inputq;
58955 +    rail->r_generic.Operations.FreeInputQ      = ep4_free_inputq;
58956 +    rail->r_generic.Operations.EnableInputQ     = ep4_enable_inputq;
58957 +    rail->r_generic.Operations.DisableInputQ    = ep4_disable_inputq;
58958 +    rail->r_generic.Operations.PollInputQ      = ep4_poll_inputq;
58959 +
58960 +    rail->r_generic.Operations.AllocOutputQ     = ep4_alloc_outputq;
58961 +    rail->r_generic.Operations.FreeOutputQ     = ep4_free_outputq;
58962 +    rail->r_generic.Operations.OutputQMsg      = ep4_outputq_msg;
58963 +    rail->r_generic.Operations.OutputQState     = ep4_outputq_state;
58964 +    rail->r_generic.Operations.OutputQSend     = ep4_outputq_send;
58965 +
58966 +    rail->r_generic.Operations.FillOutStats     = ep4_fillout_stats;
58967 +    rail->r_generic.Operations.Debug           = ep4_debug_rail;
58968 +
58969 +    rail->r_generic.Devinfo = dev->dev_devinfo;
58970 +
58971 +    printk ("%s: connected via elan4 rev%c device %d\n", rail->r_generic.Name,
58972 +           'a' + dev->dev_devinfo.dev_revision_id, dev->dev_instance);
58973 +
58974 +    return (EP_RAIL *) rail;
58975 +}
58976 +
58977 +void
58978 +ep4_destroy_rail (EP_RAIL *r)
58979 +{
58980 +    EP4_RAIL *rail = (EP4_RAIL *) r;
58981 +
58982 +    elan4_dereference_device (rail->r_ctxt.ctxt_dev);
58983 +
58984 +    KMEM_FREE (rail, sizeof (EP4_RAIL));
58985 +}
58986 +
58987 +static int
58988 +ep4_attach_rail (EP4_RAIL *r)
58989 +{
58990 +    EP4_RAIL  *rail = (EP4_RAIL *) r;
58991 +    ELAN4_DEV *dev  = rail->r_ctxt.ctxt_dev;
58992 +    unsigned   ctx;
58993 +
58994 +    if (elan4_insertctxt (dev, &rail->r_ctxt, &ep4_trap_ops) != 0)
58995 +       return -ENOMEM;
58996 +    
58997 +    if ((rail->r_routetable = elan4_alloc_routetable (dev, 4)) == NULL)        /* 512 << 4 == 8192 entries */
58998 +    {
58999 +       elan4_removectxt (dev, &rail->r_ctxt);
59000 +       return -ENOMEM;
59001 +    }
59002 +    elan4_set_routetable (&rail->r_ctxt, rail->r_routetable);
59003 +
59004 +    /* Attach to the kernel comms nextwork context */
59005 +    if (elan4_attach_filter (&rail->r_ctxt, ELAN4_KCOMM_CONTEXT_NUM) < 0)
59006 +    {
59007 +       elan4_free_routetable (dev, rail->r_routetable);
59008 +       elan4_removectxt (dev, &rail->r_ctxt);
59009 +
59010 +       return -EBUSY;
59011 +    }
59012 +
59013 +    for (ctx = ELAN4_KCOMM_BASE_CONTEXT_NUM; ctx <= ELAN4_KCOMM_TOP_CONTEXT_NUM; ctx++)
59014 +       elan4_attach_filter (&rail->r_ctxt, ctx);
59015 +
59016 +    return 0;
59017 +}
59018 +
59019 +static void
59020 +ep4_detach_rail (EP4_RAIL *rail)
59021 +{
59022 +    ELAN4_DEV *dev = rail->r_ctxt.ctxt_dev;
59023 +    unsigned   ctx;
59024 +
59025 +    elan4_detach_filter (&rail->r_ctxt, ELAN4_KCOMM_CONTEXT_NUM);
59026 +
59027 +    for (ctx = ELAN4_KCOMM_BASE_CONTEXT_NUM; ctx <= ELAN4_KCOMM_TOP_CONTEXT_NUM; ctx++)
59028 +       elan4_detach_filter (&rail->r_ctxt, ctx);
59029 +
59030 +    if (rail->r_routetable)
59031 +    {
59032 +       elan4_set_routetable (&rail->r_ctxt, NULL);
59033 +       elan4_free_routetable (dev, rail->r_routetable);
59034 +    }
59035 +
59036 +    elan4_removectxt (dev, &rail->r_ctxt);
59037 +}
59038 +
59039 +int
59040 +ep4_start_rail (EP_RAIL *r)
59041 +{
59042 +    EP4_RAIL     *rail = (EP4_RAIL *) r;
59043 +    ELAN4_DEV    *dev  = rail->r_ctxt.ctxt_dev;
59044 +    E4_InputQueue qdesc;
59045 +    int           i, res;
59046 +
59047 +    if ((res = ep4_attach_rail (rail)) < 0)
59048 +       return res;
59049 +
59050 +    /* Initialise main interrupt cookie table */
59051 +    spin_lock_init (&rail->r_intcookie_lock);
59052 +    for (i = 0; i < EP4_INTCOOKIE_HASH_SIZE; i++)
59053 +       INIT_LIST_HEAD (&rail->r_intcookie_hash[i]);
59054 +
59055 +    kmutex_init (&rail->r_haltop_mutex);
59056 +    kcondvar_init (&rail->r_haltop_sleep);
59057 +    spin_lock_init (&rail->r_haltop_lock);
59058 +
59059 +    spin_lock_init (&rail->r_cookie_lock);
59060 +
59061 +    INIT_LIST_HEAD (&rail->r_ecq_list[EP4_ECQ_EVENT]);
59062 +    INIT_LIST_HEAD (&rail->r_ecq_list[EP4_ECQ_ATOMIC]);
59063 +    INIT_LIST_HEAD (&rail->r_ecq_list[EP4_ECQ_SINGLE]);
59064 +    INIT_LIST_HEAD (&rail->r_ecq_list[EP4_ECQ_MAIN]);
59065 +    spin_lock_init (&rail->r_ecq_lock);
59066 +
59067 +    ep_kthread_init (&rail->r_retry_thread);
59068 +    INIT_LIST_HEAD (&rail->r_retry_ops);
59069 +
59070 +    INIT_LIST_HEAD (&rail->r_neterr_ops);
59071 +
59072 +    kmutex_init (&rail->r_flush_mutex);
59073 +    kcondvar_init (&rail->r_flush_sleep);
59074 +
59075 +    /* Allocate the elan visible sdram/main memory */
59076 +    if ((rail->r_elan = ep_alloc_elan (&rail->r_generic, sizeof (EP4_RAIL_ELAN), 0, &rail->r_elan_addr)) == 0 ||
59077 +       (rail->r_main = ep_alloc_main (&rail->r_generic, sizeof (EP4_RAIL_MAIN), 0, &rail->r_main_addr)) == 0)
59078 +    {
59079 +       goto failed;
59080 +    }
59081 +
59082 +    for (i = 0; i < EP_NUM_SYSTEMQ; i++)
59083 +       elan4_sdram_writeq (dev, rail->r_elan + offsetof (EP4_RAIL_ELAN, r_qevents[i].ev_CountAndType), 0);
59084 +
59085 +    elan4_sdram_writeq (dev, rail->r_elan + offsetof (EP4_RAIL_ELAN, r_flush_event.ev_CountAndType), E4_EVENT_INIT_VALUE (0, E4_EVENT_WRITE, E4_EVENT_DTYPE_LONG, 0));
59086 +
59087 +    /* Allocate the system input queues at their fixed elan address */
59088 +    /*   avoid sdram address aliasing by allocating the min sdram pagesize */
59089 +    if (! (rail->r_queuedescs= ep_alloc_memory_elan (&rail->r_generic, EP_SYSTEM_QUEUE_BASE, SDRAM_PAGE_SIZE, EP_PERM_ALL, 0)))
59090 +       goto failed;
59091 +
59092 +    /* Initialise the input queue descriptor as "full" with no event */
59093 +    qdesc.q_bptr    = 0;
59094 +    qdesc.q_fptr    = 8;
59095 +    qdesc.q_control = E4_InputQueueControl(qdesc.q_bptr, qdesc.q_fptr, 8);
59096 +    qdesc.q_event   = 0;
59097 +
59098 +    for (i = 0; i < EP_NUM_SYSTEMQ; i++)
59099 +       elan4_sdram_copyq_to_sdram (dev, &qdesc, EP_SYSTEMQ_DESC (rail->r_queuedescs, i), sizeof (E4_InputQueue));
59100 +
59101 +    /* Allocate the resource map for command queue mappings */
59102 +    if ((rail->r_ecq_rmap = ep_rmallocmap (EP4_ECQ_RMAPSIZE, "r_ecq_rmap", 1)) == NULL)
59103 +       goto failed;
59104 +    
59105 +    ep_rmfree (rail->r_ecq_rmap, EP4_ECQ_TOP - EP4_ECQ_BASE, EP4_ECQ_BASE);
59106 +
59107 +    /* register an interrupt cookie & allocate command queues for command queue flushing */
59108 +    rail->r_flush_mcq = ep4_get_ecq (rail, EP4_ECQ_MAIN, 4);
59109 +    rail->r_flush_ecq = ep4_get_ecq (rail, EP4_ECQ_EVENT, 1);
59110 +
59111 +    if (rail->r_flush_mcq == NULL || rail->r_flush_ecq == NULL)
59112 +       goto failed;
59113 +
59114 +    ep4_register_intcookie (rail, &rail->r_flush_intcookie, rail->r_elan_addr + offsetof (EP4_RAIL_ELAN, r_flush_event), ep4_flush_interrupt, rail);
59115 +
59116 +    /* startup the retry thread */
59117 +    if (kernel_thread_create (ep4_retry_thread, (void *) rail) == 0)
59118 +       goto failed;
59119 +    ep_kthread_started (&rail->r_retry_thread);
59120 +    
59121 +    ep4_initialise_dma_retries (rail);
59122 +
59123 +    if ((rail->r_event_ecq = ep4_alloc_ecq (rail, CQ_Size1K)) == NULL)
59124 +       goto failed;
59125 +    
59126 +    rail->r_threadcode = threadcode_elan4;
59127 +    if (ep_loadcode (&rail->r_generic, &rail->r_threadcode))
59128 +       goto failed;
59129 +
59130 +    elan4_flush_icache (&rail->r_ctxt);
59131 +
59132 +    if (ep4_probe_init (rail))
59133 +       goto failed;
59134 +
59135 +    /* can now drop the context filter for the system context */
59136 +    elan4_set_filter (&rail->r_ctxt, ELAN4_KCOMM_CONTEXT_NUM, E4_FILTER_HIGH_PRI);
59137 +
59138 +    return 0;
59139 +
59140 + failed:
59141 +    printk ("ep4_start_rail: failed for rail '%s'\n", rail->r_generic.Name);
59142 +    ep4_stop_rail (&rail->r_generic);
59143 +
59144 +    return -ENOMEM;
59145 +}
59146 +
59147 +void
59148 +ep4_stall_rail (EP_RAIL *r)
59149 +{
59150 +    EP4_RAIL *rail = (EP4_RAIL *) r;
59151 +    unsigned  ctx;
59152 +
59153 +    /* Raise all the context filters */
59154 +    elan4_set_filter (&rail->r_ctxt, ELAN4_KCOMM_CONTEXT_NUM, E4_FILTER_DISCARD_ALL);
59155 +
59156 +    for (ctx = ELAN4_KCOMM_BASE_CONTEXT_NUM; ctx <= ELAN4_KCOMM_TOP_CONTEXT_NUM; ctx++)
59157 +       elan4_set_filter (&rail->r_ctxt, ctx, E4_FILTER_DISCARD_ALL);
59158 +}
59159 +
59160 +void
59161 +ep4_stop_rail (EP_RAIL *r)
59162 +{
59163 +    EP4_RAIL *rail = (EP4_RAIL *) r;
59164 +
59165 +    if (rail->r_generic.State == EP_RAIL_STATE_RUNNING) /* undo ep4_position_found() */
59166 +    {
59167 +       ELAN_POSITION *pos  = &rail->r_generic.Position;
59168 +       EP_ADDR        addr = elan4_sdram_readq (rail->r_ctxt.ctxt_dev, rail->r_elan + offsetof (EP4_RAIL_ELAN, r_cookies));
59169 +
59170 +       ep_free_elan (&rail->r_generic, addr, pos->pos_nodes * sizeof (E4_uint64));
59171 +
59172 +       KMEM_FREE (rail->r_cookies, pos->pos_nodes * sizeof (E4_uint64));
59173 +    }
59174 +
59175 +    ep4_probe_destroy (rail);
59176 +
59177 +    ep_unloadcode (&rail->r_generic, &rail->r_threadcode);
59178 +
59179 +    if (rail->r_event_ecq)
59180 +       ep4_free_ecq (rail, rail->r_event_ecq);
59181 +    rail->r_event_ecq = NULL;
59182 +
59183 +    ep4_finalise_dma_retries (rail);
59184 +
59185 +    ep_kthread_stop (&rail->r_retry_thread);
59186 +    ep_kthread_destroy (&rail->r_retry_thread);
59187 +
59188 +    if (rail->r_flush_intcookie.int_arg)
59189 +       ep4_deregister_intcookie (rail, &rail->r_flush_intcookie);
59190 +    rail->r_flush_intcookie.int_arg = NULL;
59191 +
59192 +    if (rail->r_flush_mcq)
59193 +       ep4_put_ecq (rail, rail->r_flush_mcq, 4);
59194 +    rail->r_flush_mcq = NULL;
59195 +
59196 +    if (rail->r_flush_ecq)
59197 +       ep4_put_ecq (rail, rail->r_flush_ecq, 1);
59198 +    rail->r_flush_ecq = NULL;
59199 +
59200 +    if (rail->r_ecq_rmap)
59201 +       ep_rmfreemap (rail->r_ecq_rmap);
59202 +    
59203 +    if (rail->r_queuedescs)
59204 +       ep_free_memory_elan (&rail->r_generic, EP_SYSTEM_QUEUE_BASE);
59205 +    rail->r_queuedescs = 0;
59206 +
59207 +    if (rail->r_elan)
59208 +       ep_free_elan (&rail->r_generic, rail->r_elan_addr, sizeof (EP4_RAIL_ELAN));
59209 +    rail->r_elan = 0;
59210 +
59211 +    if (rail->r_main)
59212 +       ep_free_main (&rail->r_generic, rail->r_main_addr, sizeof (EP4_RAIL_MAIN));
59213 +    rail->r_main = NULL;
59214 +
59215 +    kcondvar_destroy (&rail->r_flush_sleep);
59216 +    kmutex_destroy (&rail->r_flush_mutex);
59217 +
59218 +    spin_lock_destroy (&rail->r_ecq_lock);
59219 +    spin_lock_destroy (&rail->r_cookie_lock);
59220 +
59221 +    spin_lock_destroy (&rail->r_haltop_lock);
59222 +    kcondvar_destroy(&rail->r_haltop_sleep);
59223 +    kmutex_destroy (&rail->r_haltop_mutex);
59224 +    spin_lock_destroy (&rail->r_intcookie_lock);
59225 +
59226 +    ep4_detach_rail (rail);
59227 +}
59228 +
59229 +void
59230 +ep4_position_found (EP_RAIL *r, ELAN_POSITION *pos)
59231 +{
59232 +    EP4_RAIL   *rail = (EP4_RAIL *) r;
59233 +    sdramaddr_t cookies;
59234 +    EP_ADDR     addr;
59235 +    int         i;
59236 +
59237 +    KMEM_ZALLOC (rail->r_cookies, E4_uint64 *, pos->pos_nodes * sizeof (E4_uint64), 1);
59238 +
59239 +    if (! (cookies = ep_alloc_elan (&rail->r_generic, pos->pos_nodes * sizeof (E4_uint64), 0, &addr)))
59240 +       panic ("ep4_position_found: cannot allocate elan cookies array\n");
59241 +
59242 +    for (i = 0; i < pos->pos_nodes; i++)
59243 +       elan4_sdram_writeq (rail->r_ctxt.ctxt_dev, cookies + (i * sizeof (E4_uint64)), 0);
59244 +    
59245 +    for (i = 0; i < pos->pos_nodes; i++)
59246 +       rail->r_cookies[i] = 0;
59247 +
59248 +    elan4_sdram_writeq (rail->r_ctxt.ctxt_dev, rail->r_elan + offsetof (EP4_RAIL_ELAN, r_nodeid), pos->pos_nodeid);
59249 +    elan4_sdram_writeq (rail->r_ctxt.ctxt_dev, rail->r_elan + offsetof (EP4_RAIL_ELAN, r_cookies), addr);
59250 +
59251 +    ep4_probe_position_found (rail, pos);
59252 +}
59253 +
59254 +sdramaddr_t
59255 +ep4_sdram_alloc (EP_RAIL *r, EP_ADDR addr, unsigned size)
59256 +{
59257 +    ELAN4_DEV *dev = ((EP4_RAIL *) r)->r_ctxt.ctxt_dev;
59258 +
59259 +    if (size >= SDRAM_PAGE_SIZE)
59260 +       return elan4_sdram_alloc (dev, size);
59261 +    else
59262 +    {
59263 +       sdramaddr_t block = elan4_sdram_alloc (dev, SDRAM_PAGE_SIZE);
59264 +       sdramaddr_t sdram = block + (addr & (SDRAM_PAGE_SIZE-1));
59265 +
59266 +       /* free of the portion before sdram */
59267 +       if (sdram > block)
59268 +           elan4_sdram_free (dev, block, sdram - block);
59269 +
59270 +       /* free of the portion after sdram */
59271 +       if ((block + SDRAM_PAGE_SIZE) > (sdram + size))
59272 +           elan4_sdram_free (dev, sdram + size, block + SDRAM_PAGE_SIZE - (sdram + size));
59273 +
59274 +       return sdram;
59275 +    }
59276 +}
59277 +
59278 +void
59279 +ep4_sdram_free (EP_RAIL *r, sdramaddr_t addr, unsigned size)
59280 +{
59281 +    elan4_sdram_free (((EP4_RAIL *) r)->r_ctxt.ctxt_dev, addr, size);
59282 +}
59283 +
59284 +void
59285 +ep4_sdram_writeb (EP_RAIL *r, sdramaddr_t addr, unsigned char val)
59286 +{
59287 +    elan4_sdram_writeb (((EP4_RAIL *) r)->r_ctxt.ctxt_dev, addr, val);
59288 +}
59289 +
59290 +void
59291 +ep4_flush_tlb (EP_RAIL *r)
59292 +{
59293 +    elan4mmu_flush_tlb (((EP4_RAIL *) r)->r_ctxt.ctxt_dev);
59294 +}
59295 +
59296 +void
59297 +ep4_load_system_route (EP_RAIL *r, unsigned vp, unsigned lowNode, unsigned highNode)
59298 +{
59299 +    EP4_RAIL  *rail = (EP4_RAIL *) r;
59300 +    ELAN4_DEV *dev  = rail->r_ctxt.ctxt_dev;
59301 +    E4_VirtualProcessEntry route;
59302 +
59303 +    if (elan4_generate_route (&rail->r_generic.Position, &route, ELAN4_KCOMM_CONTEXT_NUM, 
59304 +                             lowNode, highNode, FIRST_SYSTEM_PACKET | FIRST_HIGH_PRI | FIRST_TIMEOUT(3)) < 0)
59305 +    {
59306 +       panic ("ep4_load_system_route: generate route failed\n");
59307 +       /* NOTREACHED */
59308 +    }
59309 +
59310 +    elan4_write_route (dev, rail->r_routetable, vp, &route);
59311 +}
59312 +
59313 +void
59314 +ep4_load_node_route (EP_RAIL *r, unsigned nodeId)
59315 +{
59316 +    EP4_RAIL  *rail = (EP4_RAIL *) r;
59317 +    ELAN4_DEV *dev  = rail->r_ctxt.ctxt_dev;
59318 +    E4_VirtualProcessEntry route;
59319 +
59320 +    if (elan4_generate_route (&rail->r_generic.Position, &route, EP4_CONTEXT_NUM(rail->r_generic.Position.pos_nodeid),
59321 +                             nodeId, nodeId, FIRST_SYSTEM_PACKET | FIRST_TIMEOUT(3)) < 0)
59322 +    {
59323 +       panic ("ep4_load_node_route: generate route failed\n");
59324 +       /* NOTREACHED */
59325 +    }
59326 +
59327 +    elan4_write_route (dev, rail->r_routetable, EP_VP_DATA(nodeId), &route);
59328 +}
59329 +
59330 +void
59331 +ep4_unload_node_route (EP_RAIL *r, unsigned nodeId)
59332 +{
59333 +    EP4_RAIL  *rail = (EP4_RAIL *) r;
59334 +    ELAN4_DEV *dev  = rail->r_ctxt.ctxt_dev;
59335 +    
59336 +    elan4_invalidate_route (dev, rail->r_routetable, EP_VP_DATA(nodeId));
59337 +}
59338 +
59339 +void
59340 +ep4_lower_filter (EP_RAIL *r, unsigned nodeId)
59341 +{
59342 +    EP4_RAIL *rail = (EP4_RAIL *) r;
59343 +
59344 +    elan4_set_filter (&rail->r_ctxt, EP4_CONTEXT_NUM(nodeId), E4_FILTER_HIGH_PRI);
59345 +}
59346 +
59347 +void
59348 +ep4_raise_filter (EP_RAIL *r, unsigned nodeId)
59349 +{
59350 +    EP4_RAIL *rail = (EP4_RAIL *) r;
59351 +
59352 +    elan4_set_filter (&rail->r_ctxt, EP4_CONTEXT_NUM(nodeId), E4_FILTER_DISCARD_ALL);
59353 +}
59354 +
59355 +void
59356 +ep4_node_disconnected (EP_RAIL *r, unsigned nodeId)
59357 +{
59358 +    ep4_free_stalled_dmas ((EP4_RAIL *) r, nodeId);
59359 +}
59360 +
59361 +void
59362 +ep4_fillout_stats(EP_RAIL *r, char *str) 
59363 +{
59364 +    /* no stats here yet */
59365 +    /* EP4_RAIL *ep4rail = (EP4_RAIL *)r; */
59366 +}
59367 Index: linux-2.4.21/drivers/net/qsnet/ep/kcomm_elan4.h
59368 ===================================================================
59369 --- linux-2.4.21.orig/drivers/net/qsnet/ep/kcomm_elan4.h        2004-02-23 16:02:56.000000000 -0500
59370 +++ linux-2.4.21/drivers/net/qsnet/ep/kcomm_elan4.h     2005-06-01 23:12:54.668428616 -0400
59371 @@ -0,0 +1,443 @@
59372 +/*
59373 + *    Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd.
59374 + *    Copyright (c) 2002-2003 by Quadrics Ltd.
59375 + *
59376 + *    For licensing information please see the supplied COPYING file
59377 + *
59378 + */
59379 +
59380 +#ifndef __EP_KCOMM_ELAN4_H
59381 +#define __EP_KCOMM_ELAN4_H
59382 +
59383 +#ident "@(#)$Id: kcomm_elan4.h,v 1.16.2.2 2004/12/14 10:19:14 mike Exp $ $Name: QSNETMODULES-4-30_20050128 $"
59384 +/*      $Source: /cvs/master/quadrics/epmod/kcomm_elan4.h,v $*/
59385 +
59386 +#include <elan4/types.h>
59387 +
59388 +#include <elan4/dma.h>
59389 +#include <elan4/events.h>
59390 +#include <elan4/commands.h>
59391 +
59392 +#if !defined(__elan4__)
59393 +#include <elan4/device.h>
59394 +#endif /* ! defined(__elan4__) */
59395 +
59396 +/* private address allocation */
59397 +#define EP4_TEXT_BASE                  0xF8000000              /* base address for thread code (defined in makerules.elan4) */
59398 +#define EP4_ECQ_BASE                   0xFF000000              /* address space for mapping command queues */
59399 +#define EP4_ECQ_TOP                    0xFF800000
59400 +
59401 +#define EP4_ECQ_RMAPSIZE               128
59402 +#define EP4_STACK_SIZE                 1024                    /* default thread code stack size */
59403 +#define EP4_MAX_LEVELS                 8                       /* same as ELAN_MAX_LEVELS */
59404 +
59405 +/* context number allocation */
59406 +#define EP4_CONTEXT_NUM(nodeId)                (ELAN4_KCOMM_BASE_CONTEXT_NUM + (nodeId))
59407 +#define EP4_CONTEXT_ISDATA(ctx)                ((ctx) >= ELAN4_KCOMM_BASE_CONTEXT_NUM && \
59408 +                                        (ctx) <= ELAN4_KCOMM_TOP_CONTEXT_NUM)
59409 +#define EP4_CONTEXT_TO_NODE(ctx)       ((ctx) - ELAN4_KCOMM_BASE_CONTEXT_NUM)
59410 +
59411 +/*
59412 + * network error cookie format:
59413 + *  -------------------------------------------------
59414 + *  | unique cookie value | Remote | DMA | Location |
59415 + *  -------------------------------------------------
59416 + * [63:4] Cookie   - unique cookie number
59417 + * [3]    Thread   - cookie generated by thread code
59418 + * [2]   Remote   - cookie generated by remote end
59419 + * [1]    STEN     - cookie is for a STEN packet
59420 + * [0]    DMA      - cookie is for a DMA
59421 + */
59422 +#define EP4_COOKIE_DMA         (1    << 0)
59423 +#define EP4_COOKIE_STEN                (1    << 1)
59424 +#define EP4_COOKIE_REMOTE      (1    << 2)
59425 +#define EP4_COOKIE_THREAD      (1    << 3)
59426 +#define EP4_COOKIE_INC         (1ull << 4)
59427 +
59428 +#define EP4_COOKIE_STRING(val) ((val) & ~(EP4_COOKIE_INC-1)) >> 4, \
59429 +                               ((val) & EP4_COOKIE_DMA)    ? ",dma" : "", \
59430 +                               ((val) & EP4_COOKIE_REMOTE) ? ",remote" : "", \
59431 +                               ((val) & EP4_COOKIE_THREAD) ? ",thread" : "", \
59432 +                               ((val) & EP4_COOKIE_STEN)   ? ",sten" : ""
59433 +/*
59434 + * Done "word" values 
59435 + */
59436 +#define EP4_STATE_FREE         0
59437 +#define EP4_STATE_ACTIVE       1
59438 +#define EP4_STATE_FINISHED     2
59439 +#define EP4_STATE_FAILED       3
59440 +#define EP4_STATE_PRIVATE      4
59441 +
59442 +#define EP4_EVENT_FIRING_TLIMIT        16384                   /* 1023 uS */
59443 +
59444 +/* forward declarations */
59445 +typedef struct ep4_rail        EP4_RAIL;
59446 +
59447 +#if !defined(__elan4__)
59448 +
59449 +typedef struct ep4_intcookie
59450 +{
59451 +    struct list_head            int_link;
59452 +    E4_uint64                   int_val;
59453 +    void                      (*int_callback)(EP4_RAIL *rail, void *arg);
59454 +    void                       *int_arg;
59455 +} EP4_INTCOOKIE;
59456 +
59457 +#define EP4_INTCOOKIE_HASH_SIZE        256
59458 +#define EP4_INTCOOKIE_HASH(a)          ((((a) >> 3) ^ ((a) >> 7) ^ ((a) >> 11)) & (EP4_INTCOOKIE_HASH_SIZE-1))
59459 +
59460 +typedef struct ep4_ecq
59461 +{
59462 +    struct list_head   ecq_link;                               /* linked on r_ecq_list */
59463 +    ELAN4_INTOP                ecq_intop;                              /* main interrupt op space */
59464 +    ELAN4_CQ          *ecq_cq;                                 /* command queue */
59465 +    E4_Addr            ecq_addr;                               /* address mapped into elan */
59466 +    unsigned int       ecq_avail;                              /* # dwords still available */
59467 +
59468 +    spinlock_t         ecq_lock;                               /* spinlock for main accesses */
59469 +    sdramaddr_t                ecq_event;                              /* event for flushing "event" queues */
59470 +    EP_ADDR            ecq_event_addr;
59471 +    struct ep4_ecq     *ecq_flushcq;                           /*  and command port to issue setevent to */
59472 +} EP4_ECQ;
59473 +
59474 +#define EP4_ECQ_EVENT          0                               /* command queues targetted by multi-blocks events */
59475 +#define EP4_ECQ_ATOMIC         1                               /* command queues targetted by atomic store operations */
59476 +#define EP4_ECQ_SINGLE         2                               /* command queues targetted by single word commands from main */
59477 +#define EP4_ECQ_MAIN           3                               /* command queues targetted by multi word commands from main */
59478 +#define EP4_NUM_ECQ            4
59479 +
59480 +#define EP4_ECQ_Size(which)            ((which) == EP4_ECQ_EVENT  ? CQ_Size64K : \
59481 +                                        (which) == EP4_ECQ_ATOMIC ? CQ_Size8K  : \
59482 +                                        (which) == EP4_ECQ_SINGLE ? CQ_Size1K  : \
59483 +                                        (which) == EP4_ECQ_MAIN   ? CQ_Size8K  : \
59484 +                                        CQ_Size1K)
59485 +
59486 +typedef struct ep4_dma_retry
59487 +{
59488 +    struct list_head    retry_link;                            /* chained on free/retry list */
59489 +    unsigned long      retry_time;                             /* "lbolt" to retry at */
59490 +    E4_DMA             retry_dma;                              /* DMA (in main memory) */
59491 +} EP4_DMA_RETRY;
59492 +
59493 +#define EP4_DMA_RETRY_CQSIZE           CQ_Size8K                               /* size of command queue for dma retry */
59494 +#define EP4_DMA_RETRY_FLOWCNT          (CQ_Size(EP4_DMA_RETRY_CQSIZE)/72)      /* # of reissued DMA's which can fit in */
59495 +
59496 +typedef struct ep4_inputq
59497 +{
59498 +    EP4_INTCOOKIE      q_intcookie;
59499 +    unsigned int       q_slotSize;
59500 +    unsigned int       q_slotCount;
59501 +
59502 +    void              *q_slots;
59503 +    EP_ADDR            q_slotsAddr;
59504 +    
59505 +    EP_INPUTQ_CALLBACK *q_callback;
59506 +    void              *q_arg;
59507 +
59508 +    sdramaddr_t                q_desc;
59509 +    EP_ADDR            q_descAddr;
59510 +    EP_ADDR            q_eventAddr;
59511 +    EP4_ECQ           *q_wcq;                                  /* command queue to issue waitevent to */
59512 +    EP4_ECQ           *q_ecq;                                  /* command queue targetted by event to generate interrupt */
59513 +
59514 +    EP_ADDR            q_fptr;                                 /* cached current front pointer */
59515 +    EP_ADDR            q_last;                                 /* elan addr for last queue slot  */
59516 +
59517 +    atomic_t           q_fired;                                /* atomic flag that interrupt received */
59518 +    unsigned int       q_count;                                /* count of slots consumed */
59519 +} EP4_INPUTQ;
59520 +
59521 +typedef struct ep4_outputq
59522 +{
59523 +    spinlock_t         q_lock;
59524 +    unsigned int       q_slotCount;
59525 +    unsigned int       q_slotSize;
59526 +    unsigned int        q_dwords;
59527 +    ELAN4_CQ          *q_cq;
59528 +    void              *q_main;
59529 +    EP_ADDR            q_mainAddr;
59530 +    unsigned int       q_retries;
59531 +} EP4_OUTPUTQ;
59532 +
59533 +#endif /* ! defined(__elan4__) */
59534 +
59535 +typedef struct ep4_check_sten
59536 +{
59537 +    E4_uint64          c_reset_event_cmd;                      /* WRITEDWORD to reset start event */
59538 +    E4_uint64          c_reset_event_value;
59539 +
59540 +    E4_uint64          c_open;                                 /* OPEN VP_PROBE(lvl) */
59541 +    E4_uint64          c_trans_traceroute0;                    /* SENDTRANS TR_TRACEROUTE 0s */
59542 +    E4_uint64          c_addr_traceroute0;
59543 +    E4_uint64          c_data_traceroute0[8];
59544 +    E4_uint64          c_trans_traceroute1;                    /* SENDTRANS TR_TRACEROUTE 1s */
59545 +    E4_uint64          c_addr_traceroute1;
59546 +    E4_uint64          c_data_traceroute1[8];
59547 +    E4_uint64          c_trans_sendack;                        /* SENDTRANS SENDACK */
59548 +    E4_uint64          c_addr_sendack;
59549 +    
59550 +    E4_uint64          c_guard_ok;                             /* GUARD OK - write level */
59551 +    E4_uint64          c_writedword_ok;
59552 +    E4_uint64          c_value_ok;
59553 +    
59554 +    E4_uint64          c_guard_fail;                           /* GUARD FAIL - chain setevent/write fail */
59555 +    E4_uint64          c_setevent_fail;
59556 +    E4_uint64          c_setevent_nop;
59557 +    E4_uint64          c_nop_pad;
59558 +} EP4_CHECK_STEN;
59559 +
59560 +#define EP4_CHECK_STEN_NDWORDS (sizeof (EP4_CHECK_STEN) >> 3)
59561 +
59562 +typedef struct ep4_rail_elan
59563 +{
59564 +    EP4_CHECK_STEN     r_check_sten[EP4_MAX_LEVELS];
59565 +    E4_Event32         r_check_fail;                                   /* Check failed (== r_check_start[-1]) */
59566 +    E4_Event32          r_check_start[EP4_MAX_LEVELS];
59567 +
59568 +    E4_Event32         r_qevents[EP_NUM_SYSTEMQ];
59569 +    E4_Event32         r_flush_event;
59570 +
59571 +    E4_uint64          r_nodeid;
59572 +#ifdef __elan4__
59573 +    E4_uint64         *r_cookies;
59574 +#else
59575 +    E4_Addr            r_cookies;
59576 +#endif
59577 +} EP4_RAIL_ELAN;
59578 +
59579 +#define TRACEROUTE_ENTRIES     16                      /* 2 * ELAN_MAX_LEVELS */
59580 +#define TRACEROUTE_NDWORDS     (TRACEROUTE_ENTRIES/2)
59581 +
59582 +typedef struct ep4_rail_main
59583 +{
59584 +    E4_uint32          r_probe_dest0[TRACEROUTE_ENTRIES];
59585 +    E4_uint32          r_probe_dest1[TRACEROUTE_ENTRIES];
59586 +    E4_uint64          r_probe_result;
59587 +    E4_uint64          r_probe_level;
59588 +
59589 +    E4_uint64           r_dma_flowcnt;                         /*  count of dma's queued */
59590 +} EP4_RAIL_MAIN;
59591 +
59592 +#define EP4_PROBE_ACTIVE       (0xffff)
59593 +#define EP4_PROBE_FAILED       (0xfffe)
59594 +
59595 +#if !defined(__elan4__)
59596 +
59597 +typedef struct ep4_retry_ops
59598 +{
59599 +    struct list_head   op_link;
59600 +    unsigned long     (*op_func)(EP4_RAIL *rail, void *arg, unsigned long nextRunTime);
59601 +    void              *op_arg;
59602 +} EP4_RETRY_OPS;
59603 +
59604 +typedef struct ep4_neterr_ops
59605 +{
59606 +    struct list_head   op_link;
59607 +    void             (*op_func) (EP4_RAIL *rail, void *arg, unsigned int nodeId, EP_NETERR_COOKIE *cookies);
59608 +    void              *op_arg;
59609 +} EP4_NETERR_OPS;
59610 +
59611 +struct ep4_rail
59612 +{
59613 +    EP_RAIL            r_generic;
59614 +    ELAN4_CTXT         r_ctxt;
59615 +    ELAN4_ROUTE_TABLE  *r_routetable;
59616 +    
59617 +    spinlock_t         r_intcookie_lock;
59618 +    struct list_head    r_intcookie_hash[EP4_INTCOOKIE_HASH_SIZE];
59619 +
59620 +    sdramaddr_t                r_elan;
59621 +    EP_ADDR            r_elan_addr;
59622 +    EP4_RAIL_MAIN      *r_main;
59623 +    EP_ADDR            r_main_addr;
59624 +    
59625 +    EP_CODE            r_threadcode;                           /* copy of thread code */
59626 +
59627 +    sdramaddr_t                r_queuedescs;                           /* systemq queue descriptors */
59628 +
59629 +    E4_uint64         *r_cookies;                              /* network error cookies */
59630 +    spinlock_t          r_cookie_lock;                         /*    and spin lock */
59631 +
59632 +    kcondvar_t         r_probe_wait;                           /* network position probing */
59633 +    spinlock_t         r_probe_lock;
59634 +    volatile int       r_probe_done;
59635 +    EP4_INTCOOKIE      r_probe_intcookie;
59636 +    EP4_ECQ           *r_probe_cq;
59637 +    E4_uint32          r_probe_source0[TRACEROUTE_ENTRIES];
59638 +    E4_uint32          r_probe_source1[TRACEROUTE_ENTRIES];
59639 +
59640 +    kmutex_t           r_haltop_mutex;                         /* halt/flush operations */
59641 +    ELAN4_HALTOP       r_haltop;
59642 +    ELAN4_DMA_FLUSHOP   r_flushop;
59643 +    kcondvar_t         r_haltop_sleep;
59644 +    spinlock_t         r_haltop_lock;
59645 +
59646 +    struct list_head    r_ecq_list[EP4_NUM_ECQ];               /* list of statically allocated command queues */
59647 +    EP_RMAP           *r_ecq_rmap;                             /* resource map for command queue mappings */
59648 +    spinlock_t          r_ecq_lock;                            /* spinlock for list/space management */
59649 +
59650 +    kmutex_t           r_flush_mutex;                          /* serialize command queue flushing */
59651 +    unsigned long      r_flush_count;                          /* # setevents issued for flushing */
59652 +    EP4_ECQ           *r_flush_mcq;                            /*   and command queue for waitevent */
59653 +    EP4_ECQ            *r_flush_ecq;                           /*   and command queue for interrupt */
59654 +    EP4_INTCOOKIE       r_flush_intcookie;                     /*   and interrupt cookie */
59655 +    kcondvar_t          r_flush_sleep;                         /*   and place to sleep ... */
59656 +
59657 +    EP_KTHREAD         r_retry_thread;                         /* retry thread */
59658 +    struct list_head    r_retry_ops;                           /*  list of retry operations */
59659 +
59660 +    EP4_RETRY_OPS       r_dma_ops;                             /* dma retry operations */
59661 +    EP4_ECQ           *r_dma_ecq;                              /*   command queue to reissue DMAs */
59662 +    E4_uint64           r_dma_flowcnt;                         /*   count of dma's reissued */
59663 +    struct list_head    r_dma_retrylist[EP_NUM_RETRIES];       /*   retry lists  */
59664 +    struct list_head    r_dma_freelist;                                /*   and free list */
59665 +    spinlock_t          r_dma_lock;                            /*   and spinlock to protect lists */
59666 +    unsigned long       r_dma_allocated;                       /*   # retries allocated*/
59667 +    unsigned long       r_dma_reserved;                                /*   # retries reserved */
59668 +
59669 +    EP4_ECQ           *r_event_ecq;                            /* command queue for occasional setevents */
59670 +
59671 +    struct list_head    r_neterr_ops;                          /* list of neterr fixup operations */
59672 +
59673 +    ELAN4_IPROC_TRAP    r_iproc_trap;
59674 +    ELAN4_TPROC_TRAP    r_tproc_trap;
59675 +} ;
59676 +
59677 +#define EP4_CTXT_TO_RAIL(ctxt) ((EP4_RAIL *) (((unsigned long) (ctxt)) - offsetof (EP4_RAIL, r_ctxt)))
59678 +
59679 +#if defined(DEBUG_ASSERT)
59680 +#define EP4_ASSERT(rail,EXPR)                  EP_ASSERT(&((rail)->r_generic), EXPR)
59681 +#define EP4_SDRAM_ASSERT(rail,off,value)       EP4_ASSERT(rail, (sdram_assert ? elan4_sdram_readq ((rail)->r_ctxt.ctxt_dev, (off)) == (value) : 1))
59682 +#else
59683 +#define EP4_ASSERT(rail,EXPR)
59684 +#define EP4_SDRAM_ASSERT(rail,off,value)
59685 +#endif
59686 +
59687 +/* kcomm_elan4.c */
59688 +extern EP_RAIL    *ep4_create_rail (EP_SYS *sys, ELAN4_DEV *dev);
59689 +extern void        ep4_destroy_rail (EP_RAIL *rail);
59690 +
59691 +extern int         ep4_start_rail (EP_RAIL *rail);
59692 +extern void        ep4_stall_rail (EP_RAIL *rail);
59693 +extern void        ep4_stop_rail (EP_RAIL *rail);
59694 +
59695 +extern void       ep4_debug_rail (EP_RAIL *rail);
59696 +
59697 +extern void        ep4_position_found (EP_RAIL *rail, ELAN_POSITION *pos);
59698 +
59699 +extern sdramaddr_t ep4_sdram_alloc (EP_RAIL *rail, EP_ADDR addr, unsigned int size);
59700 +extern void        ep4_sdram_free (EP_RAIL *rail, sdramaddr_t addr, unsigned int size);
59701 +extern void        ep4_sdram_writeb (EP_RAIL *rail, sdramaddr_t addr, unsigned char val);
59702 +
59703 +extern void        ep4_flush_tlb (EP_RAIL *r);
59704 +extern void        ep4_load_system_route (EP_RAIL *r, unsigned int vp, unsigned int lowNode, unsigned int highNode);
59705 +extern void        ep4_load_node_route (EP_RAIL *r, unsigned int nodeId);
59706 +extern void        ep4_unload_node_route (EP_RAIL *r, unsigned int nodeId);
59707 +extern void        ep4_lower_filter (EP_RAIL *r, unsigned int nodeId);
59708 +extern void        ep4_raise_filter (EP_RAIL *rail, unsigned int nodeId);
59709 +extern void        ep4_node_disconnected (EP_RAIL *r, unsigned int nodeId);
59710 +
59711 +/* kmap_elan4.c */
59712 +extern void        ep4_kaddr_map (EP_RAIL *r, EP_ADDR eaddr, virtaddr_t kaddr, unsigned int len, unsigned int perm, int ep_attr);
59713 +extern void        ep4_sdram_map (EP_RAIL *r, EP_ADDR eaddr, sdramaddr_t saddr, unsigned int len, unsigned int perm, int ep_attr);
59714 +extern void        ep4_ioaddr_map (EP_RAIL *r, EP_ADDR eaddr, ioaddr_t ioaddr, unsigned int len, unsigned int perm);
59715 +extern void        ep4_unmap (EP_RAIL *r, EP_ADDR eaddr, unsigned int len);
59716 +extern void       *ep4_dvma_reserve (EP_RAIL *r, EP_ADDR eaddr, unsigned int npages);
59717 +extern void        ep4_dvma_release (EP_RAIL *r, EP_ADDR eaddr, unsigned int npages, void *private);
59718 +extern void        ep4_dvma_set_pte (EP_RAIL *r, void *private, unsigned int index, physaddr_t paddr, unsigned int perm);
59719 +extern physaddr_t  ep4_dvma_read_pte (EP_RAIL *r, void *private, unsigned int index);
59720 +extern void        ep4_dvma_unload (EP_RAIL *r, void *private, unsigned int index, unsigned int npages);
59721 +
59722 +/* kmsg_elan4.c */
59723 +extern EP_INPUTQ  *ep4_alloc_inputq (EP_RAIL *r, unsigned int qnum, unsigned int slotSize, unsigned int slotCount,
59724 +                                    EP_INPUTQ_CALLBACK *callback, void *arg);
59725 +extern void        ep4_free_inputq (EP_RAIL *r, EP_INPUTQ *q);
59726 +extern void        ep4_enable_inputq (EP_RAIL *r, EP_INPUTQ *q);
59727 +extern void        ep4_disable_inputq (EP_RAIL *r, EP_INPUTQ *q);
59728 +extern int         ep4_poll_inputq (EP_RAIL *r, EP_INPUTQ *q, int maxCount, EP_INPUTQ_HANDLER *handler, void *arg);
59729 +extern EP_OUTPUTQ *ep4_alloc_outputq (EP_RAIL *r, unsigned int slotSize, unsigned int slotCount);
59730 +extern void        ep4_free_outputq (EP_RAIL *r, EP_OUTPUTQ *q);
59731 +extern void       *ep4_outputq_msg (EP_RAIL *r, EP_OUTPUTQ *q, unsigned int slotNum);
59732 +extern int         ep4_outputq_state (EP_RAIL *r, EP_OUTPUTQ *q, unsigned int slotNum);
59733 +extern int         ep4_outputq_send (EP_RAIL *r, EP_OUTPUTQ *q, unsigned int slotNum, unsigned int size,
59734 +                                    unsigned int nodeId, unsigned int qnum, unsigned int retries);
59735 +
59736 +/* probenetwork_elan4.c */
59737 +extern int         ep4_probe_init (EP4_RAIL *r);
59738 +extern void        ep4_probe_destroy (EP4_RAIL *r);
59739 +extern void        ep4_probe_position_found (EP4_RAIL *rail, ELAN_POSITION *pos);
59740 +extern int         ep4_probe_route (EP_RAIL *r, int level, int sw, int nodeid, int *linkup, int *linkdown, int attempts, EP_SWITCH *lsw);
59741 +extern int         ep4_check_position (EP_RAIL *rail);
59742 +
59743 +/* support_elan4.c */
59744 +extern ELAN4_TRAP_OPS ep4_trap_ops;
59745 +extern void           ep4_register_intcookie (EP4_RAIL *rail, EP4_INTCOOKIE *cp, E4_uint64 cookie, void (*callback)(EP4_RAIL *r, void *arg), void *arg);
59746 +extern void           ep4_deregister_intcookie (EP4_RAIL *rail, EP4_INTCOOKIE *cp);
59747 +extern EP4_INTCOOKIE *ep4_lookup_intcookie (EP4_RAIL *rail, E4_uint64 cookie);
59748 +extern E4_uint64      ep4_neterr_cookie (EP4_RAIL *rail, unsigned int node);
59749 +
59750 +extern void           ep4_flush_filters (EP_RAIL *r);
59751 +extern void           ep4_flush_queues (EP_RAIL *r);
59752 +extern void          ep4_write_qdesc (EP4_RAIL *rail, sdramaddr_t qaddr, E4_InputQueue *qdesc);
59753 +
59754 +extern EP4_ECQ       *ep4_alloc_ecq (EP4_RAIL *rail, unsigned int cqsize);
59755 +extern void           ep4_free_ecq (EP4_RAIL *rail, EP4_ECQ *ecq);
59756 +extern EP4_ECQ      *ep4_get_ecq (EP4_RAIL *rail, unsigned int which, unsigned int ndwords);
59757 +extern void           ep4_put_ecq (EP4_RAIL *rail, EP4_ECQ *ecq, unsigned int ndwords);
59758 +
59759 +extern void           ep4_nop_cmd (EP4_ECQ *ecq, E4_uint64 tag);
59760 +extern void           ep4_set_event_cmd (EP4_ECQ *ecq, E4_Addr event);
59761 +extern void           ep4_wait_event_cmd (EP4_ECQ *ecq, E4_Addr event, E4_uint64 candt, E4_uint64 param0, E4_uint64 param1);
59762 +
59763 +extern void           ep4_flush_interrupt (EP4_RAIL *rail, void *arg);
59764 +extern void           ep4_flush_ecqs (EP4_RAIL *rail);
59765 +
59766 +extern void           ep4_init_thread (EP4_RAIL *rail, E4_ThreadRegs *regs, sdramaddr_t stackTop, 
59767 +                                      EP_ADDR stackAddr, E4_Addr startpc, int nargs,...);
59768 +
59769 +extern void           ep4_initialise_dma_retries (EP4_RAIL *rail);
59770 +extern void           ep4_finalise_dma_retries (EP4_RAIL *rail);
59771 +extern int            ep4_reserve_dma_retries (EP4_RAIL *rail, unsigned int count, unsigned int attr);
59772 +extern void          ep4_release_dma_retries(EP4_RAIL *rail, unsigned int count);
59773 +extern void           ep4_queue_dma_retry (EP4_RAIL *rail, E4_DMA *dma, int interval);
59774 +extern void           ep4_queue_dma_stalled (EP4_RAIL *rail, E4_DMA *dma);
59775 +extern void           ep4_free_stalled_dmas (EP4_RAIL *rail, unsigned int nodeId);
59776 +extern void           ep4_display_rail (EP4_RAIL *rail);
59777 +
59778 +extern void           ep4_add_retry_ops (EP4_RAIL *rail, EP4_RETRY_OPS *ops);
59779 +extern void           ep4_remove_retry_ops (EP4_RAIL *rail, EP4_RETRY_OPS *ops);
59780 +extern void           ep4_retry_thread (EP4_RAIL *rail);
59781 +
59782 +/* neterr_elan4.c */
59783 +extern void           ep4_add_neterr_ops (EP4_RAIL *rail, EP4_NETERR_OPS *ops);
59784 +extern void           ep4_remove_neterr_ops (EP4_RAIL *rail, EP4_NETERR_OPS *ops);
59785 +extern void           ep4_neterr_fixup (EP_RAIL *r, unsigned int nodeId, EP_NETERR_COOKIE *cookies);
59786 +
59787 +/* commands_elan4.c */
59788 +extern void           elan4_nop_cmd (ELAN4_CQ *cq, E4_uint64 tag);
59789 +extern void           elan4_write_dword_cmd (ELAN4_CQ *cq, E4_Addr addr, E4_uint64 data);
59790 +extern void           elan4_add_dword_cmd (ELAN4_CQ *cq, E4_Addr addr, E4_uint64 data);
59791 +extern void           elan4_copy64_cmd (ELAN4_CQ *cq, E4_Addr from, E4_Addr to, E4_uint32 datatype);
59792 +extern void           elan4_interrupt_cmd (ELAN4_CQ *cq, E4_uint64 cookie);
59793 +extern void           elan4_run_thread_cmd (ELAN4_CQ *cq, E4_ThreadRegs *regs);
59794 +extern void           elan4_run_dma_cmd (ELAN4_CQ *cq, E4_DMA *dma);
59795 +extern void           elan4_set_event_cmd (ELAN4_CQ *cq, E4_Addr event);
59796 +extern void           elan4_set_eventn_cmd (ELAN4_CQ *cq, E4_Addr event, E4_uint32 count);
59797 +extern void           elan4_wait_event_cmd (ELAN4_CQ *cq, E4_Addr event, E4_uint64 candt, E4_uint64 param0, E4_uint64 param1);
59798 +extern void           elan4_open_packet (ELAN4_CQ *cq, E4_uint64 command);
59799 +extern void           elan4_guard (ELAN4_CQ *cq, E4_uint64 command);
59800 +extern void           elan4_sendtrans0 (ELAN4_CQ *cq, E4_uint16 trtype, E4_uint64 addr);
59801 +extern void           elan4_sendtrans1 (ELAN4_CQ *cq, E4_uint16 trtype, E4_uint64 addr, E4_uint64 p0);
59802 +extern void           elan4_sendtrans2 (ELAN4_CQ *cq, E4_uint16 trtype, E4_uint64 addr, E4_uint64 p0, E4_uint64 p1);
59803 +extern void           elan4_sendtransn (ELAN4_CQ *cq, E4_uint16 trtype, E4_uint64 addr, ...);
59804 +extern void           elan4_sendtransp (ELAN4_CQ *cq, E4_uint16 trtype, E4_uint64 addr, E4_uint64 *ptr);
59805 +
59806 +extern void           ep4_add_retry_ops (EP4_RAIL *rail, EP4_RETRY_OPS *ops);
59807 +extern void          ep4_remove_retry_ops (EP4_RAIL *rail, EP4_RETRY_OPS *ops);
59808 +extern void           ep4_retry_thread (EP4_RAIL *rail);
59809 +
59810 +extern void           ep4_fillout_stats(EP_RAIL *rail, char *str);
59811 +
59812 +#endif /* ! defined(__elan4__) */
59813 +
59814 +#endif /* __EP_KCOMM_ELAN4_H */
59815 Index: linux-2.4.21/drivers/net/qsnet/ep/kcomm_vp.h
59816 ===================================================================
59817 --- linux-2.4.21.orig/drivers/net/qsnet/ep/kcomm_vp.h   2004-02-23 16:02:56.000000000 -0500
59818 +++ linux-2.4.21/drivers/net/qsnet/ep/kcomm_vp.h        2005-06-01 23:12:54.668428616 -0400
59819 @@ -0,0 +1,36 @@
59820 +/*
59821 + *    Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd.
59822 + *    Copyright (c) 2002-2003 by Quadrics Ltd.
59823 + *
59824 + *    For licensing information please see the supplied COPYING file
59825 + *
59826 + */
59827 +
59828 +#ifndef __EP_KCOMM_VP_H
59829 +#define __EP_KCOMM_VP_H
59830 +
59831 +#ident "@(#)$Id: kcomm_vp.h,v 1.2 2004/03/24 11:32:56 david Exp $ $Name: QSNETMODULES-4-30_20050128 $"
59832 +/*      $Source: /cvs/master/quadrics/epmod/kcomm_vp.h,v $*/
59833 +
59834 +#define EP_MAX_NODES                   2048                    /* Max nodes we support */
59835 +
59836 +/* virtual process allocation */
59837 +#define EP_VP_NODE_BASE                        (0)
59838 +#define EP_VP_DATA_BASE                        (EP_VP_NODE_BASE + EP_MAX_NODES)
59839 +#define EP_VP_PROBE_BASE               (EP_VP_DATA_BASE + EP_MAX_NODES)
59840 +#define EP_VP_PROBE_COUNT              ELAN_MAX_LEVELS
59841 +
59842 +#define EP_VP_BCAST_BASE               (EP_VP_PROBE_BASE + EP_VP_PROBE_COUNT)
59843 +#define EP_VP_BCAST_COUNT              (CM_SGMTS_PER_LEVEL * (CM_MAX_LEVELS - 1) + 1)
59844 +
59845 +#define EP_VP_NODE(nodeId)             (EP_VP_NODE_BASE + (nodeId))
59846 +#define EP_VP_DATA(nodeId)             (EP_VP_DATA_BASE + (nodeId))
59847 +#define EP_VP_PROBE(lvl)               (EP_VP_PROBE_BASE + (lvl))
59848 +#define EP_VP_BCAST(lvl,sgmt)          (EP_VP_BCAST_BASE + ((lvl) - 1)*CM_SGMTS_PER_LEVEL + (sgmt))
59849 +
59850 +#define EP_VP_TO_NODE(vp)              ((vp) & (EP_MAX_NODES-1))
59851 +#define EP_VP_ISDATA(vp)               ((vp) >= EP_VP_DATA_BASE && (vp) < (EP_VP_DATA_BASE + EP_MAX_NODES))
59852 +
59853 +#endif /* __EP_KCOMM_VP_H */
59854 +
59855 +
59856 Index: linux-2.4.21/drivers/net/qsnet/ep/kmap.c
59857 ===================================================================
59858 --- linux-2.4.21.orig/drivers/net/qsnet/ep/kmap.c       2004-02-23 16:02:56.000000000 -0500
59859 +++ linux-2.4.21/drivers/net/qsnet/ep/kmap.c    2005-06-01 23:12:54.669428464 -0400
59860 @@ -0,0 +1,561 @@
59861 +/*
59862 + *    Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd.
59863 + *    Copyright (c) 2002-2003 by Quadrics Ltd.
59864 + *
59865 + *    For licensing information please see the supplied COPYING file
59866 + *
59867 + */
59868 +
59869 +#ident "@(#)$Id: kmap.c,v 1.10.6.2 2004/12/14 10:19:14 mike Exp $"
59870 +/*      $Source: /cvs/master/quadrics/epmod/kmap.c,v $*/
59871 +
59872 +#include <qsnet/kernel.h>
59873 +#include <qsnet/kpte.h>
59874 +
59875 +#include <elan/kcomm.h>
59876 +
59877 +#include "debug.h"
59878 +
59879 +#if defined(DIGITAL_UNIX)
59880 +#  define kernel_map                   (first_task->map)
59881 +#  define vaddr_to_phys(map, addr)     (pmap_extract (vm_map_pmap ((vm_map_t) map), (unsigned long) addr))
59882 +#elif defined(LINUX)
59883 +#  define kernel_map                   get_kern_mm()
59884 +#  define vaddr_to_phys(map, addr)     (kmem_to_phys(addr))
59885 +#elif defined(SOLARIS)
59886 +#  define kernel_map                   &kas
59887 +#  define vaddr_to_phys(map,addr)      ptob(hat_getpfnum (((struct as *) map)->a_hat, (caddr_t) addr))
59888 +#endif
59889 +
59890 +void
59891 +ep_perrail_kaddr_map (EP_RAIL *rail, EP_ADDR eaddr, virtaddr_t kaddr, unsigned long len,  unsigned int perm, int ep_attr)
59892 +{
59893 +    rail->Operations.KaddrMap (rail, eaddr, kaddr, len, perm, ep_attr);
59894 +}
59895 +
59896 +void
59897 +ep_perrail_sdram_map (EP_RAIL *rail, EP_ADDR eaddr, sdramaddr_t saddr, unsigned long len, unsigned int perm, int ep_attr)
59898 +{
59899 +    rail->Operations.SdramMap (rail, eaddr, saddr, len, perm, ep_attr);
59900 +}
59901 +
59902 +void
59903 +ep_perrail_unmap (EP_RAIL *rail, EP_ADDR eaddr, unsigned long len)
59904 +{
59905 +    rail->Operations.Unmap (rail, eaddr, len);
59906 +}
59907 +
59908 +void
59909 +ep_perrail_dvma_sync (EP_RAIL *rail)
59910 +{
59911 +    if (rail->TlbFlushRequired)
59912 +    {
59913 +       rail->TlbFlushRequired = 0;
59914 +
59915 +       rail->Operations.FlushTlb (rail);
59916 +    }
59917 +}
59918 +
59919 +
59920 +static int ep_dvma_map_rails (EP_SYS *sys, EP_NMH *nmh, EP_NMD *nmd, EP_RAILMASK mask);
59921 +
59922 +#if ! defined(CONFIG_EP_NO_CHECK_SUM)
59923 +static uint16_t ep_dvma_calc_check_sum (EP_SYS *sys, EP_NMH *nmh, EP_NMD *nmd, uint16_t check_sum);
59924 +#endif
59925 +
59926 +EP_NMH_OPS ep_dvma_nmh_ops = 
59927 +{
59928 +    ep_dvma_map_rails,
59929 +#if ! defined(CONFIG_EP_NO_CHECK_SUM)
59930 +    ep_dvma_calc_check_sum
59931 +#endif
59932 +};
59933 +
59934 +extern void
59935 +ep_dvma_init (EP_SYS *sys)
59936 +{
59937 +    EP_DVMA_STATE *d = &sys->DvmaState;
59938 +
59939 +    kmutex_init (&d->dvma_lock);
59940 +    
59941 +    INIT_LIST_HEAD (&d->dvma_handles);
59942 +    INIT_LIST_HEAD (&d->dvma_rails);
59943 +
59944 +    d->dvma_rmap = ep_rmallocmap (EP_DVMA_RMAP_SIZE, "dvma_rmap", 1);
59945 +
59946 +    ep_rmfree (d->dvma_rmap, EP_DVMA_TOP - EP_DVMA_BASE, EP_DVMA_BASE);
59947 +}
59948 +
59949 +extern void
59950 +ep_dvma_fini (EP_SYS *sys)
59951 +{
59952 +    EP_DVMA_STATE *d = &sys->DvmaState;
59953 +
59954 +    ep_rmfreemap (d->dvma_rmap);
59955 +
59956 +    kmutex_destroy (&d->dvma_lock);
59957 +}
59958 +    
59959 +extern int
59960 +ep_dvma_add_rail (EP_SYS *sys, EP_RAIL *rail)
59961 +{
59962 +    EP_DVMA_STATE    *d = &sys->DvmaState;
59963 +    EP_RAIL_ENTRY    *l;
59964 +    struct list_head *el;
59965 +
59966 +    KMEM_ZALLOC (l, EP_RAIL_ENTRY *, sizeof (EP_RAIL_ENTRY), 1);
59967 +
59968 +    if (l == NULL)
59969 +       return (ENOMEM);
59970 +
59971 +    kmutex_lock (&d->dvma_lock);
59972 +
59973 +    l->Rail = rail;
59974 +
59975 +    list_add_tail (&l->Link, &d->dvma_rails);
59976 +
59977 +    list_for_each (el, &d->dvma_handles) {
59978 +       EP_DVMA_NMH *desc   = list_entry (el, EP_DVMA_NMH, dvma_link);
59979 +       int          npages = desc->dvma_nmh.nmh_nmd.nmd_len >> PAGESHIFT;
59980 +
59981 +       desc->dvma_rails[rail->Number] = rail;
59982 +       desc->dvma_railmask |= ( 1 << rail->Number);
59983 +
59984 +       desc->dvma_private[rail->Number] = rail->Operations.DvmaReserve (rail, desc->dvma_nmh.nmh_nmd.nmd_addr, npages);
59985 +    }
59986 +
59987 +    kmutex_unlock (&d->dvma_lock);
59988 +    return (0);
59989 +}
59990 +
59991 +extern void
59992 +ep_dvma_remove_rail (EP_SYS *sys, EP_RAIL *rail)
59993 +{
59994 +    EP_DVMA_STATE    *d = &sys->DvmaState;
59995 +    struct list_head *el;
59996 +
59997 +    kmutex_lock (&d->dvma_lock);
59998 +    
59999 +    list_for_each (el, &d->dvma_handles) {
60000 +       EP_DVMA_NMH *desc   = list_entry (el, EP_DVMA_NMH, dvma_link);
60001 +       int          npages = desc->dvma_nmh.nmh_nmd.nmd_len >> PAGESHIFT;
60002 +
60003 +       desc->dvma_rails[rail->Number] = NULL;
60004 +       desc->dvma_railmask &= ~(1 << rail->Number);
60005 +
60006 +       rail->Operations.DvmaRelease (rail, desc->dvma_nmh.nmh_nmd.nmd_addr, npages, desc->dvma_private[rail->Number]);
60007 +    }
60008 +
60009 +    list_for_each (el, &d->dvma_rails) {
60010 +       EP_RAIL_ENTRY *tmp = list_entry (el, EP_RAIL_ENTRY, Link);
60011 +
60012 +       if (tmp->Rail == rail)
60013 +       {
60014 +           list_del (el);
60015 +
60016 +           KMEM_FREE (tmp, sizeof (EP_RAIL_ENTRY));
60017 +           break;
60018 +       }
60019 +    }
60020 +    kmutex_unlock (&d->dvma_lock);
60021 +}
60022 +
60023 +EP_NMH *
60024 +ep_dvma_reserve (EP_SYS *sys, unsigned npages, unsigned perm)
60025 +{
60026 +    EP_DVMA_STATE    *d = &sys->DvmaState;
60027 +    EP_DVMA_NMH      *desc;
60028 +    EP_ADDR          addr;
60029 +    struct list_head *el;
60030 +    int               i;
60031 +
60032 +    KMEM_ZALLOC (desc, EP_DVMA_NMH *, offsetof (EP_DVMA_NMH, dvma_attrs[npages]), 1);
60033 +    
60034 +    if (desc == NULL)
60035 +       return (NULL);
60036 +
60037 +    if ((addr = ep_rmalloc (d->dvma_rmap, npages << PAGESHIFT, 0)) == 0)
60038 +    {
60039 +
60040 +       KMEM_FREE (desc, sizeof (EP_DVMA_NMH));
60041 +       return (NULL);
60042 +    }
60043 +
60044 +    spin_lock_init (&desc->dvma_lock);
60045 +
60046 +    desc->dvma_perm = perm;
60047 +
60048 +    kmutex_lock (&d->dvma_lock);
60049 +    /* reserve the mapping resource */
60050 +    list_for_each (el, &d->dvma_rails) {
60051 +       EP_RAIL *rail = list_entry (el, EP_RAIL_ENTRY, Link)->Rail;
60052 +
60053 +       EPRINTF4 (DBG_KMAP, "%s: ep_dvma_reserve desc=%p npages=%d rail=%p\n", rail->Name, desc, npages, rail);
60054 +
60055 +       if ((desc->dvma_private[rail->Number] = rail->Operations.DvmaReserve (rail, addr, npages)) == NULL)
60056 +       {
60057 +           printk ("%s: !!ep_dvma_reserve - rail->DvmaReserve failed\n", rail->Name);
60058 +           goto failed;
60059 +       }
60060 +
60061 +       desc->dvma_rails[rail->Number] = rail;
60062 +       desc->dvma_railmask |= (1 << rail->Number);
60063 +    }
60064 +
60065 +    /* insert into the network mapping handle table */
60066 +    desc->dvma_nmh.nmh_nmd.nmd_addr = addr;
60067 +    desc->dvma_nmh.nmh_nmd.nmd_len  = npages << PAGESHIFT;
60068 +    desc->dvma_nmh.nmh_nmd.nmd_attr = EP_NMD_ATTR (sys->Position.pos_nodeid, 0);
60069 +    desc->dvma_nmh.nmh_ops          = &ep_dvma_nmh_ops;
60070 +
60071 +    ep_nmh_insert (&sys->MappingTable, &desc->dvma_nmh);
60072 +
60073 +    list_add (&desc->dvma_link, &d->dvma_handles);
60074 +
60075 +    kmutex_unlock (&d->dvma_lock);
60076 +
60077 +    return (&desc->dvma_nmh);
60078 +
60079 + failed:
60080 +
60081 +    kmutex_unlock (&d->dvma_lock);
60082 +
60083 +    for (i = 0; i < EP_MAX_RAILS; i++)
60084 +       if (desc->dvma_rails[i] != NULL)
60085 +           desc->dvma_rails[i]->Operations.DvmaRelease (desc->dvma_rails[i], addr, npages, desc->dvma_private[i]);
60086 +
60087 +    ep_rmfree (d->dvma_rmap, npages << PAGESHIFT, addr);
60088 +
60089 +    KMEM_FREE (desc, sizeof (EP_DVMA_NMH));
60090 +    return (NULL);
60091 +}
60092 +
60093 +void
60094 +ep_dvma_release (EP_SYS *sys, EP_NMH *nmh)
60095 +{
60096 +    EP_DVMA_STATE *d      = &sys->DvmaState;
60097 +    EP_DVMA_NMH   *desc   = (EP_DVMA_NMH *) nmh;
60098 +    EP_ADDR        addr   = nmh->nmh_nmd.nmd_addr;
60099 +    int            npages = nmh->nmh_nmd.nmd_len >> PAGESHIFT;
60100 +    EP_RAIL       *rail;
60101 +    int            i;
60102 +
60103 +    kmutex_lock (&d->dvma_lock);
60104 +
60105 +    list_del (&desc->dvma_link);
60106 +    
60107 +    ep_nmh_remove (&sys->MappingTable, nmh);
60108 +
60109 +    for (i = 0; i < EP_MAX_RAILS; i++)
60110 +       if ((rail = desc->dvma_rails[i]) != NULL)
60111 +           rail->Operations.DvmaRelease (rail, addr, npages, desc->dvma_private[i]);
60112 +
60113 +    ep_rmfree (d->dvma_rmap, npages << PAGESHIFT, addr);
60114 +
60115 +    KMEM_FREE (desc, offsetof (EP_DVMA_NMH, dvma_attrs[npages]));
60116 +
60117 +    kmutex_unlock (&d->dvma_lock);
60118 +}
60119 +
60120 +void
60121 +ep_dvma_load (EP_SYS *sys, void *map, caddr_t vaddr, unsigned len, EP_NMH *nmh, unsigned index, EP_RAILMASK *hints, EP_NMD *subset)
60122 +{
60123 +    EP_DVMA_NMH *desc = (EP_DVMA_NMH *) nmh;
60124 +    unsigned     offset = (unsigned long) vaddr & PAGEOFFSET;
60125 +    unsigned     npages = btopr (len + offset);
60126 +    EP_ADDR      addr   = nmh->nmh_nmd.nmd_addr + (index << PAGESHIFT);
60127 +    int                 rmask  = *hints;
60128 +    EP_RAIL     *rail;
60129 +    register int i, rnum;
60130 +    unsigned long flags;
60131 +
60132 +    EPRINTF7 (DBG_KMAP, "ep_dvma_load: map=%p vaddr=%p len=%x nmh=%p(%x,%x) index=%d\n",
60133 +             map, vaddr, len, nmh, nmh->nmh_nmd.nmd_addr, nmh->nmh_nmd.nmd_len, index);
60134 +
60135 +    /* If no rail specified, then map into all rails */
60136 +    if (rmask == 0)
60137 +       rmask = desc->dvma_railmask;
60138 +
60139 +    ASSERT ((index + npages) <= (nmh->nmh_nmd.nmd_len >> PAGESHIFT));
60140 +
60141 +    /* If not map specified then use the kernel map */
60142 +    if (map == NULL)
60143 +       map = kernel_map;
60144 +
60145 +    spin_lock_irqsave (&desc->dvma_lock, flags);
60146 +    /* Now map each of the specified pages (backwards) */
60147 +
60148 +    vaddr = (vaddr - offset) + (npages-1)*PAGESIZE;
60149 +    for (i = npages-1; i >= 0; i--, vaddr -= PAGESIZE)
60150 +    {
60151 +       physaddr_t paddr = vaddr_to_phys (map, vaddr);
60152 +       
60153 +       for (rnum = 0; rnum < EP_MAX_RAILS; rnum++)
60154 +       {
60155 +           if (! (rmask & (1 << rnum)) || (rail = desc->dvma_rails[rnum]) == NULL)
60156 +               rmask &= ~(1 << rnum);
60157 +           else
60158 +           {
60159 +               rail->Operations.DvmaSetPte (rail, desc->dvma_private[rnum], index + i, paddr, desc->dvma_perm);
60160 +
60161 +               desc->dvma_attrs[index + i] |= (1 << rnum);
60162 +           }
60163 +       }
60164 +    }
60165 +
60166 +    for (rnum = 0; rnum < EP_MAX_RAILS; rnum++)
60167 +       if ((rmask & (1 << rnum)) && (rail = desc->dvma_rails[rnum]) != NULL)
60168 +           rail->TlbFlushRequired = 1;
60169 +
60170 +    spin_unlock_irqrestore (&desc->dvma_lock, flags);
60171 +
60172 +    /* Construct the network mapping handle to be returned. */
60173 +    subset->nmd_addr = addr + offset;
60174 +    subset->nmd_len  = len;
60175 +    subset->nmd_attr = EP_NMD_ATTR(sys->Position.pos_nodeid, rmask);
60176 +}
60177 +
60178 +void
60179 +ep_dvma_unload (EP_SYS *sys, EP_NMH *nmh, EP_NMD *nmd)
60180 +{
60181 +    EP_DVMA_NMH *desc = (EP_DVMA_NMH *) nmh;
60182 +    unsigned     offset = nmd->nmd_addr & PAGEOFFSET;
60183 +    unsigned     npages = btopr (nmd->nmd_len + offset);
60184 +    unsigned     index  = (nmd->nmd_addr - nmh->nmh_nmd.nmd_addr) >> PAGESHIFT;
60185 +    EP_RAIL     *rail;
60186 +    int          rnum;
60187 +    int          rmask;
60188 +    register int i;
60189 +    unsigned long flags;
60190 +    
60191 +    spin_lock_irqsave (&desc->dvma_lock, flags);
60192 +
60193 +    /* compute which rails we need to unload on */
60194 +    for (rmask = 0, i = 0; i < npages; i++)
60195 +    {
60196 +       rmask |= desc->dvma_attrs[index + i];
60197 +       
60198 +       desc->dvma_attrs[index + i] = 0;
60199 +    }
60200 +    
60201 +    for (rnum = 0; rnum < EP_MAX_RAILS; rnum++)
60202 +       if ((rmask & (1 << rnum)) && (rail = desc->dvma_rails[rnum]) != NULL)
60203 +           rail->Operations.DvmaUnload (rail, desc->dvma_private[rnum], index, npages);
60204 +
60205 +    spin_unlock_irqrestore (&desc->dvma_lock, flags);
60206 +}
60207 +
60208 +int
60209 +ep_dvma_map_rails (EP_SYS *sys, EP_NMH *nmh, EP_NMD *nmd, EP_RAILMASK mask)
60210 +{
60211 +    EP_DVMA_NMH *desc = (EP_DVMA_NMH *) nmh;
60212 +    unsigned     offset = nmd->nmd_addr & PAGEOFFSET;
60213 +    unsigned     npages = btopr (nmd->nmd_len + offset);
60214 +    unsigned     index  = (nmd->nmd_addr - nmh->nmh_nmd.nmd_addr) >> PAGESHIFT;
60215 +    int          r, rnum;
60216 +    register int i;
60217 +    unsigned long flags;
60218 +
60219 +    spin_lock_irqsave (&desc->dvma_lock, flags);
60220 +
60221 +    EPRINTF4 (DBG_KMAP, "ep_dvma_map_rails: nmd=%08x.%08x.%08x mask=%04x\n", nmd->nmd_addr, nmd->nmd_len, nmd->nmd_attr, mask);
60222 +
60223 +    if ((mask &= desc->dvma_railmask) == 0)
60224 +    {
60225 +       printk ("ep_dvma_map_rails: no intersecting rails %04x.%04x\n", mask, desc->dvma_railmask);
60226 +       spin_unlock_irqrestore (&desc->dvma_lock, flags);
60227 +       return (-1);
60228 +    }
60229 +
60230 +    for (i = npages-1; i >= 0; i--)
60231 +    {
60232 +       int pgidx = (index + i);
60233 +
60234 +       for (rnum = 0; rnum < EP_MAX_RAILS; rnum++)
60235 +           if (desc->dvma_attrs[pgidx] & (1 << rnum))
60236 +               break;
60237 +       
60238 +       if (rnum == EP_MAX_RAILS)
60239 +       {
60240 +           EPRINTF3 (DBG_KMAP, "ep_dvma_map_rails: nmh=%p idx=%x [%08x] not ptes valid\n", nmh, pgidx, 
60241 +                     nmh->nmh_nmd.nmd_addr + ((pgidx) << PAGESHIFT));
60242 +           mask = 0;
60243 +       }
60244 +       else
60245 +       {
60246 +           EP_RAIL   *rail  = desc->dvma_rails[rnum];
60247 +           physaddr_t paddr = rail->Operations.DvmaReadPte (rail, desc->dvma_private[rnum], pgidx);
60248 +           
60249 +           EPRINTF5 (DBG_KMAP, "%s: ep_dvma_map_rails: nmh=%p idx=%x [%08x] paddr %llx\n", rail->Name, nmh, pgidx,
60250 +                     nmh->nmh_nmd.nmd_addr + (pgidx << PAGESHIFT), (long long) paddr);
60251 +           
60252 +           for (r = 0; r < EP_MAX_RAILS; r++)
60253 +           {
60254 +               if ((mask & (1 << r)) == 0)
60255 +                   continue;
60256 +               
60257 +               if ((desc->dvma_attrs[pgidx] & (1 << r)) == 0)
60258 +               {
60259 +                   EPRINTF5 (DBG_KMAP, "%s: ep_dvma_map_rails: nmh=%p idx=%x [%08x] paddr=%llx\n",
60260 +                             desc->dvma_rails[rnum]->Name, nmh, pgidx, nmh->nmh_nmd.nmd_addr + (pgidx << PAGESHIFT), 
60261 +                             (long long) paddr);
60262 +                   
60263 +                   rail->Operations.DvmaSetPte (rail, desc->dvma_private[rnum], pgidx, paddr, desc->dvma_perm);
60264 +                   
60265 +                   desc->dvma_attrs[pgidx] |= (1 << r);
60266 +               }
60267 +           }
60268 +       }
60269 +    }
60270 +
60271 +    for (rnum = 0; rnum < EP_MAX_RAILS; rnum++)
60272 +       if ((mask & (1 << rnum)) != 0)
60273 +           desc->dvma_rails[rnum]->TlbFlushRequired = 1;
60274 +
60275 +    EPRINTF4 (DBG_KMAP, "ep_dvma_map_rails: nmd=%08x.%08x.%08x|%04x\n", nmd->nmd_addr, nmd->nmd_len, nmd->nmd_attr, mask);
60276 +
60277 +    /* Finally update the network memory descriptor */
60278 +    nmd->nmd_attr |= mask;
60279 +
60280 +    spin_unlock_irqrestore (&desc->dvma_lock, flags);
60281 +
60282 +    return (0);
60283 +}
60284 +#if ! defined(CONFIG_EP_NO_CHECK_SUM)
60285 +#include <linux/highmem.h>
60286 +
60287 +/* Generic rolling checksum algorithm */
60288 +uint16_t
60289 +rolling_check_sum (char *msg, int nob, uint16_t sum)
60290 +{
60291 +    while (nob-- > 0)
60292 +       sum = sum * 13 + *msg++;
60293 +
60294 +    return (sum);
60295 +}
60296 +
60297 +#if ! defined(NO_RMAP)
60298 +void  
60299 +unmap_phys_address(unsigned long phys_addr)
60300 +{
60301 +    unsigned long pfn = (phys_addr >> PAGE_SHIFT);
60302 +    
60303 +    if (pfn_valid(pfn)) 
60304 +       kunmap(pfn_to_page(pfn));
60305 +}
60306 +
60307 +void * 
60308 +map_phys_address(unsigned long phys_addr)
60309 +{
60310 +    unsigned long pfn = (phys_addr >> PAGE_SHIFT);
60311 +    
60312 +    if (pfn_valid(pfn)) 
60313 +       return  kmap(pfn_to_page(pfn));
60314 +    
60315 +    return NULL;
60316 +}
60317 +#else
60318 +void  
60319 +unmap_phys_address(unsigned long phys_addr)
60320 +{
60321 +    struct page *p = virt_to_page(__va(phys_addr));
60322 +    
60323 +    if (VALID_PAGE(p)) 
60324 +       kunmap(p);
60325 +}
60326 +
60327 +void * 
60328 +map_phys_address(unsigned long phys_addr)
60329 +{
60330 +    struct page *p = virt_to_page(__va(phys_addr));
60331 +                               
60332 +    if (VALID_PAGE(p)) 
60333 +       return  kmap(p);
60334 +    
60335 +    return NULL;
60336 +}
60337 +#endif
60338 +
60339 +uint16_t
60340 +ep_dvma_calc_check_sum (EP_SYS *sys, EP_NMH *nmh, EP_NMD *nmd, uint16_t check_sum)
60341 +{
60342 +    /* cant be called from an interupt */
60343 +
60344 +    EP_DVMA_NMH *desc = (EP_DVMA_NMH *) nmh;
60345 +    unsigned     offset = nmd->nmd_addr & PAGEOFFSET;
60346 +    unsigned     npages = btopr (nmd->nmd_len + offset);
60347 +    unsigned     index  = (nmd->nmd_addr - nmh->nmh_nmd.nmd_addr) >> PAGESHIFT;
60348 +    unsigned     start, len;
60349 +    int          rnum;
60350 +    register int i;
60351 +    unsigned long flags;
60352 +    EP_RAIL      *rail;
60353 +
60354 +
60355 +    spin_lock_irqsave (&desc->dvma_lock, flags);
60356 +
60357 +    EPRINTF3 (DBG_KMAP, "ep_dvma_calc_check_sum: nmd=%08x.%08x.%08x \n", nmd->nmd_addr, nmd->nmd_len, nmd->nmd_attr);
60358
60359 +    /* find a rail */
60360 +    for (rnum = 0; rnum < EP_MAX_RAILS; rnum++)
60361 +       if (desc->dvma_attrs[index] & (1 << rnum))
60362 +           break;
60363 +       
60364 +    ASSERT (rnum != EP_MAX_RAILS);
60365
60366 +    rail = desc->dvma_rails[rnum];
60367 +
60368 +    for (i = 0; i <= (npages-1); i++)
60369 +    {
60370 +       int        pgidx = (index + i);
60371 +       physaddr_t paddr = rail->Operations.DvmaReadPte (rail, desc->dvma_private[rnum], pgidx);
60372 +       void *     virt;
60373 +
60374 +       spin_unlock_irqrestore (&desc->dvma_lock, flags); /* unlock for check sum calc */
60375 +
60376 +       virt = map_phys_address(paddr);
60377 +
60378 +       if (!virt)
60379 +           printk("ep_dvma_calc_check_sum: virt = NULL ! \n");
60380 +       else {
60381 +           if ( i == 0 ) {
60382 +               /* last bit of the first page */
60383 +               start =  (nmd->nmd_addr & (PAGESIZE - 1)) ;
60384 +               len   =  PAGESIZE - start;
60385 +               if ( len > nmd->nmd_len) /* less than the remaining page */ 
60386 +                   len =  nmd->nmd_len;
60387 +           } else {
60388 +               if ( i != (npages-1)) {
60389 +                   /* all of the middle pages    */
60390 +                   start = 0;
60391 +                   len   = PAGESIZE;
60392 +               } else {
60393 +                   /* first bit of the last page */
60394 +                   start = 0;
60395 +                   len   = ((nmd->nmd_addr + nmd->nmd_len -1) & (PAGESIZE -1)) +1;
60396 +               }
60397 +           }
60398 +
60399 +           check_sum = rolling_check_sum (((char *)virt)+start, len, check_sum);
60400 +           unmap_phys_address(paddr);
60401 +   
60402 +           /* re aquire the lock */
60403 +           spin_lock_irqsave (&desc->dvma_lock, flags);
60404 +       }
60405 +       
60406 +       EPRINTF5 (DBG_KMAP, "%s: ep_dvma_calc_check_sum: nmh=%p idx=%x [%08x] paddr %llx\n", rail->Name, nmh, pgidx,
60407 +                 nmh->nmh_nmd.nmd_addr + (pgidx << PAGESHIFT), (long long) paddr);     
60408 +    }
60409 +
60410 +    EPRINTF4 (DBG_KMAP, "ep_dvma_calc_check_sum: nmd=%08x.%08x.%08x = %d\n", nmd->nmd_addr, nmd->nmd_len, nmd->nmd_attr, check_sum);
60411 +
60412 +    spin_unlock_irqrestore (&desc->dvma_lock, flags);
60413 +
60414 +    return (check_sum);
60415 +}
60416 +#endif
60417 +/*
60418 + * Local variables:
60419 + * c-file-style: "stroustrup"
60420 + * End:
60421 + */
60422 Index: linux-2.4.21/drivers/net/qsnet/ep/kmap_elan3.c
60423 ===================================================================
60424 --- linux-2.4.21.orig/drivers/net/qsnet/ep/kmap_elan3.c 2004-02-23 16:02:56.000000000 -0500
60425 +++ linux-2.4.21/drivers/net/qsnet/ep/kmap_elan3.c      2005-06-01 23:12:54.670428312 -0400
60426 @@ -0,0 +1,209 @@
60427 +/*
60428 + *    Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd.
60429 + *    Copyright (c) 2002-2003 by Quadrics Ltd.
60430 + *
60431 + *    For licensing information please see the supplied COPYING file
60432 + *
60433 + */
60434 +
60435 +#ident "@(#)$Id: kmap_elan3.c,v 1.3.8.1 2004/12/14 10:19:14 mike Exp $"
60436 +/*      $Source: /cvs/master/quadrics/epmod/kmap_elan3.c,v $ */
60437 +
60438 +#include <qsnet/kernel.h>
60439 +
60440 +#include <elan3/elanregs.h>
60441 +#include <elan3/elandev.h>
60442 +#include <elan3/elanvp.h>
60443 +#include <elan3/elan3mmu.h>
60444 +#include <elan3/elanctxt.h>
60445 +#include <elan3/elandebug.h>
60446 +
60447 +#include <elan/kcomm.h>
60448 +
60449 +#include "kcomm_elan3.h"
60450 +
60451 +#if defined(DIGITAL_UNIX)
60452 +#  define kernel_map                   (first_task->map)
60453 +#  define vaddr_to_phys(map, addr)     (pmap_extract (vm_map_pmap ((vm_map_t) map), (unsigned long) addr))
60454 +#elif defined(LINUX)
60455 +#  define kernel_map                   get_kern_mm()
60456 +#  define vaddr_to_phys(map, addr)     (kmem_to_phys(addr))
60457 +#elif defined(SOLARIS)
60458 +#  define kernel_map                   &kas
60459 +#  define vaddr_to_phys(map,addr)      ptob(hat_getpfnum (((struct as *) map)->a_hat, (caddr_t) addr))
60460 +#endif
60461 +
60462 +#define ELAN3_PTES_PER_PAGE            (PAGESIZE/ELAN3_PAGE_SIZE)
60463 +
60464 +#if defined(__LITTLE_ENDIAN__)
60465 +#define PERM_ENDIAN    0
60466 +#else
60467 +#define PERM_ENDIAN    ELAN3_PTE_BIG_ENDIAN
60468 +#endif
60469 +
60470 +static unsigned int main_permtable[] = 
60471 +{
60472 +    ELAN3_PERM_REMOTEALL,              /* EP_PERM_EXECUTE */
60473 +    ELAN3_PERM_REMOTEREAD,             /* EP_PERM_READ */
60474 +    ELAN3_PERM_REMOTEWRITE,            /* EP_PERM_WRITE */
60475 +    ELAN3_PERM_REMOTEWRITE,            /* EP_PERM_ALL */
60476 +};
60477 +
60478 +static unsigned int sdram_permtable[] = 
60479 +{
60480 +    ELAN3_PERM_REMOTEREAD,             /* EP_PERM_EXECUTE */
60481 +    ELAN3_PERM_REMOTEREAD,             /* EP_PERM_READ */
60482 +    ELAN3_PERM_REMOTEWRITE,            /* EP_PERM_WRITE */
60483 +    ELAN3_PERM_REMOTEALL,              /* EP_PERM_ALL */
60484 +};
60485 +
60486 +static unsigned int io_permtable[] = 
60487 +{
60488 +    ELAN3_PERM_LOCAL_READ,             /* EP_PERM_EXECUTE */
60489 +    ELAN3_PERM_REMOTEREAD,             /* EP_PERM_READ */
60490 +    ELAN3_PERM_REMOTEWRITE,            /* EP_PERM_WRITE */
60491 +    ELAN3_PERM_REMOTEWRITE,            /* EP_PERM_ALL */
60492 +};
60493 +
60494 +void
60495 +ep3_kaddr_map (EP_RAIL *r, EP_ADDR eaddr, virtaddr_t kaddr, unsigned len, unsigned int perm, int ep_attr)
60496 +{
60497 +    EP3_RAIL    *rail   = (EP3_RAIL *) r;
60498 +    unsigned     npages = len >> PAGESHIFT;
60499 +    int          i;
60500 +    unsigned int off;
60501 +
60502 +    ASSERT ((eaddr & PAGEOFFSET) == 0 && (kaddr & PAGEOFFSET) == 0 && (len & PAGEOFFSET) == 0);
60503 +
60504 +    for (i = 0; i < npages; i++)
60505 +    {
60506 +       physaddr_t paddr = vaddr_to_phys (kernel_map, (void *) kaddr);
60507 +
60508 +       for (off = 0; off < PAGESIZE; off += ELAN3_PAGE_SIZE)
60509 +           elan3mmu_pteload (rail->Elan3mmu, PTBL_LEVEL_3, eaddr + off, paddr + off, 
60510 +                             main_permtable[perm], PTE_LOAD_LOCK | PTE_LOAD_NOSYNC | ((ep_attr & EP_NO_SLEEP) ? PTE_NO_SLEEP : 0));
60511 +
60512 +       eaddr += PAGESIZE;
60513 +       kaddr += PAGESIZE;
60514 +    }
60515 +}
60516 +
60517 +void
60518 +ep3_sdram_map (EP_RAIL *r, EP_ADDR eaddr, sdramaddr_t saddr, unsigned len, unsigned int perm, int ep_attr)
60519 +{
60520 +    EP3_RAIL    *rail   = (EP3_RAIL *) r;
60521 +    unsigned     npages = len >> PAGESHIFT;
60522 +    int          i;
60523 +    unsigned int off;
60524 +
60525 +    ASSERT ((eaddr & PAGEOFFSET) == 0 && (saddr & PAGEOFFSET) == 0 && (len & PAGEOFFSET) == 0);
60526 +
60527 +    for (i = 0; i < npages; i++)
60528 +    {
60529 +       physaddr_t paddr = elan3_sdram_to_phys (rail->Device, saddr);
60530 +
60531 +       for (off = 0; off < PAGESIZE; off += ELAN3_PAGE_SIZE)
60532 +           elan3mmu_pteload (rail->Elan3mmu, PTBL_LEVEL_3, eaddr+off, paddr+off, 
60533 +                             sdram_permtable[perm], PTE_LOAD_LOCK | PTE_LOAD_NOSYNC | ((ep_attr & EP_NO_SLEEP) ? PTE_NO_SLEEP : 0) );
60534 +
60535 +       eaddr += PAGESIZE;
60536 +       saddr += PAGESIZE;
60537 +    }
60538 +}
60539 +
60540 +void
60541 +ep3_ioaddr_map (EP_RAIL *r, EP_ADDR eaddr, ioaddr_t ioaddr, unsigned len, unsigned int perm)
60542 +{
60543 +    EP3_RAIL    *rail   = (EP3_RAIL *) r;
60544 +    unsigned     npages = len >> PAGESHIFT;
60545 +    int          i;
60546 +    unsigned int off;
60547 +
60548 +    ASSERT ((eaddr & PAGEOFFSET) == 0 && (ioaddr & PAGEOFFSET) == 0 && (len & PAGEOFFSET) == 0);
60549 +
60550 +    for (i = 0; i < npages; i++)
60551 +    {
60552 +       physaddr_t paddr = vaddr_to_phys (kernel_map, (void *) ioaddr);
60553 +
60554 +       for (off = 0; off < PAGESIZE; off += ELAN3_PAGE_SIZE)
60555 +           elan3mmu_pteload (rail->Elan3mmu, PTBL_LEVEL_3, eaddr + off, paddr + off, 
60556 +                             io_permtable[perm], PTE_LOAD_LOCK | PTE_LOAD_NOSYNC);
60557 +
60558 +       eaddr += PAGESIZE;
60559 +       ioaddr += PAGESIZE;
60560 +    }
60561 +}
60562 +void
60563 +ep3_unmap (EP_RAIL *r, EP_ADDR eaddr, unsigned len)
60564 +{
60565 +    EP3_RAIL *rail = (EP3_RAIL *) r;
60566 +
60567 +    ASSERT ((eaddr & PAGEOFFSET) == 0 && (len & PAGEOFFSET) == 0);
60568 +
60569 +    elan3mmu_unload (rail->Elan3mmu, eaddr, len, PTE_UNLOAD_UNLOCK | PTE_UNLOAD_NOSYNC);
60570 +}
60571 +
60572 +void *
60573 +ep3_dvma_reserve (EP_RAIL *r, EP_ADDR eaddr, unsigned npages)
60574 +{
60575 +    EP3_RAIL *rail = (EP3_RAIL *) r;
60576 +    void     *private;
60577 +
60578 +    KMEM_ALLOC (private, void *, npages * ELAN3_PTES_PER_PAGE * sizeof (sdramaddr_t), 1);
60579 +    
60580 +    if (private == NULL)
60581 +       return NULL;
60582 +    
60583 +    elan3mmu_reserve (rail->Elan3mmu, eaddr, npages * ELAN3_PTES_PER_PAGE, (sdramaddr_t *) private);
60584 +
60585 +    return private;
60586 +}
60587 +
60588 +void
60589 +ep3_dvma_release (EP_RAIL *r, EP_ADDR eaddr, unsigned npages, void *private)
60590 +{
60591 +    EP3_RAIL *rail = (EP3_RAIL *) r;
60592 +
60593 +    elan3mmu_release (rail->Elan3mmu, eaddr, npages * ELAN3_PTES_PER_PAGE, (sdramaddr_t *) private);
60594 +
60595 +    KMEM_FREE (private, npages * ELAN3_PTES_PER_PAGE * sizeof (sdramaddr_t));
60596 +}
60597 +
60598 +void
60599 +ep3_dvma_set_pte (EP_RAIL *r, void *private, unsigned index, physaddr_t paddr, unsigned int perm)
60600 +{
60601 +    ELAN3_DEV    *dev  = ((EP3_RAIL *) r)->Device;
60602 +    sdramaddr_t *ptep = &((sdramaddr_t *) private)[index * ELAN3_PTES_PER_PAGE];
60603 +    int          off;
60604 +
60605 +    for (off =0 ; off < PAGESIZE; off += ELAN3_PAGE_SIZE)
60606 +    {  
60607 +       ELAN3_PTE newpte = elan3mmu_phys_to_pte (dev, paddr + off, main_permtable[perm]) | ELAN3_PTE_REF | ELAN3_PTE_MOD;
60608 +
60609 +       elan3_writepte (dev, *ptep, newpte);
60610 +
60611 +       ptep++;
60612 +    }
60613 +}
60614 +
60615 +physaddr_t
60616 +ep3_dvma_read_pte (EP_RAIL *r, void *private, unsigned index)
60617 +{
60618 +    EP3_RAIL    *rail = (EP3_RAIL *) r;
60619 +    sdramaddr_t *ptep = &((sdramaddr_t *) private)[index * ELAN3_PTES_PER_PAGE];
60620 +    ELAN3_PTE     pte  = elan3_readpte (rail->Device, *ptep);
60621 +
60622 +    return pte & ELAN3_PTE_PFN_MASK;
60623 +}
60624 +
60625 +void
60626 +ep3_dvma_unload (EP_RAIL *r, void *private, unsigned index, unsigned npages)
60627 +{
60628 +    EP3_RAIL    *rail = (EP3_RAIL *) r;
60629 +    sdramaddr_t *ptep = &((sdramaddr_t *) private)[index * ELAN3_PTES_PER_PAGE];
60630 +    ELAN3_PTE     tpte = elan3mmu_kernel_invalid_pte (rail->Elan3mmu);
60631 +    int i;
60632 +
60633 +    for (i = (npages * ELAN3_PTES_PER_PAGE) - 1; i >= 0; i--)
60634 +       elan3_writepte (rail->Device, ptep[i], tpte);
60635 +}
60636 Index: linux-2.4.21/drivers/net/qsnet/ep/kmap_elan4.c
60637 ===================================================================
60638 --- linux-2.4.21.orig/drivers/net/qsnet/ep/kmap_elan4.c 2004-02-23 16:02:56.000000000 -0500
60639 +++ linux-2.4.21/drivers/net/qsnet/ep/kmap_elan4.c      2005-06-01 23:12:54.670428312 -0400
60640 @@ -0,0 +1,226 @@
60641 +/*
60642 + *    Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd.
60643 + *    Copyright (c) 2002-2003 by Quadrics Ltd.
60644 + *
60645 + *    For licensing information please see the supplied COPYING file
60646 + *
60647 + */
60648 +
60649 +#ident "@(#)$Id: kmap_elan4.c,v 1.7.8.2 2004/12/14 10:19:14 mike Exp $"
60650 +/*      $Source: /cvs/master/quadrics/epmod/kmap_elan4.c,v $ */
60651 +
60652 +#include <qsnet/kernel.h>
60653 +
60654 +#include <elan/kcomm.h>
60655 +
60656 +#include "debug.h"
60657 +#include "kcomm_elan4.h"
60658 +
60659 +#if defined(DIGITAL_UNIX)
60660 +#  define kernel_map                   (first_task->map)
60661 +#  define vaddr_to_phys(map, addr)     (pmap_extract (vm_map_pmap ((vm_map_t) map), (unsigned long) addr))
60662 +#elif defined(LINUX)
60663 +#  define kernel_map                   get_kern_mm()
60664 +#  define vaddr_to_phys(map, addr)     (kmem_to_phys(addr))
60665 +#elif defined(SOLARIS)
60666 +#  define kernel_map                   &kas
60667 +#  define vaddr_to_phys(map,addr)      ptob(hat_getpfnum (((struct as *) map)->a_hat, (caddr_t) addr))
60668 +#endif
60669 +
60670 +static unsigned int main_permtable[] = 
60671 +{
60672 +    PERM_Unused,                       /* EP_PERM_EXECUTE */
60673 +    PERM_RemoteReadOnly,               /* EP_PERM_READ */
60674 +    PERM_DataReadWrite,                        /* EP_PERM_WRITE */
60675 +    PERM_DataReadWrite,                        /* EP_PERM_ALL */
60676 +};
60677 +
60678 +static unsigned int sdram_permtable[] = 
60679 +{
60680 +    PERM_LocExecute,                   /* EP_PERM_EXECUTE */
60681 +    PERM_RemoteReadOnly,               /* EP_PERM_READ */
60682 +    PERM_DataReadWrite,                        /* EP_PERM_WRITE */
60683 +    PERM_RemoteAll,                    /* EP_PERM_ALL */
60684 +};
60685 +
60686 +static unsigned int io_permtable[] = 
60687 +{
60688 +    PERM_Unused,                       /* EP_PERM_EXECUTE */
60689 +    PERM_RemoteReadOnly,               /* EP_PERM_READ */
60690 +    PERM_DataReadWrite,                        /* EP_PERM_WRITE */
60691 +    PERM_Unused,                       /* EP_PERM_ALL */
60692 +};
60693 +
60694 +void
60695 +ep4_kaddr_map (EP_RAIL *r, EP_ADDR eaddr, virtaddr_t kaddr, unsigned int len, unsigned int perm, int ep_attr)
60696 +{
60697 +    EP4_RAIL    *rail   = (EP4_RAIL *) r;
60698 +    ELAN4_DEV   *dev    = rail->r_ctxt.ctxt_dev;
60699 +    unsigned int npages = len >> PAGESHIFT;
60700 +    int          i;
60701 +    unsigned int off;
60702 +
60703 +    ASSERT ((eaddr & PAGEOFFSET) == 0 && (kaddr & PAGEOFFSET) == 0 && (len & PAGEOFFSET) == 0);
60704 +
60705 +    for (i = 0; i < npages; i++)
60706 +    {
60707 +       physaddr_t paddr = vaddr_to_phys (kernel_map, (void *) kaddr);
60708 +
60709 +       for (off = 0; off < PAGESIZE; off += (1 << dev->dev_pageshift[0]))
60710 +       {
60711 +           E4_uint64 newpte = elan4mmu_phys2pte (dev, paddr + off, main_permtable[perm]);
60712 +
60713 +           elan4mmu_pteload (&rail->r_ctxt, 0, eaddr + off, newpte);
60714 +       }
60715 +
60716 +       eaddr += PAGESIZE;
60717 +       kaddr += PAGESIZE;
60718 +    }
60719 +}
60720 +
60721 +void
60722 +ep4_sdram_map (EP_RAIL *r, EP_ADDR eaddr, sdramaddr_t saddr, unsigned int len, unsigned int perm, int ep_attr)
60723 +{
60724 +    EP4_RAIL    *rail   = (EP4_RAIL *) r;
60725 +    ELAN4_DEV   *dev    = rail->r_ctxt.ctxt_dev;
60726 +    unsigned int npages = len >> PAGESHIFT;
60727 +    int          i;
60728 +    unsigned int off;
60729 +
60730 +    ASSERT ((eaddr & PAGEOFFSET) == 0 && (saddr & PAGEOFFSET) == 0 && (len & PAGEOFFSET) == 0);
60731 +
60732 +    if ((eaddr & (SDRAM_PGOFF_OFFSET << PAGE_SHIFT)) != (saddr & (SDRAM_PGOFF_OFFSET << PAGE_SHIFT)))
60733 +       printk ("ep4_sdram_map: eaddr=%x saddr=%lx - incorrectly alised\n", eaddr, saddr);
60734 +
60735 +    for (i = 0; i < npages; i++)
60736 +    {
60737 +       for (off = 0; off < PAGESIZE; off += (1 << dev->dev_pageshift[0]))
60738 +       {
60739 +           E4_uint64 newpte = ((saddr + off) >> PTE_PADDR_SHIFT) | PTE_SetPerm (sdram_permtable[perm]);
60740 +
60741 +           elan4mmu_pteload (&rail->r_ctxt, 0, eaddr + off, newpte);
60742 +       }
60743 +
60744 +       eaddr += PAGESIZE;
60745 +       saddr += PAGESIZE;
60746 +    }
60747 +}
60748 +
60749 +void
60750 +ep4_ioaddr_map (EP_RAIL *r, EP_ADDR eaddr, ioaddr_t ioaddr, unsigned int len, unsigned int perm)
60751 +{
60752 +    EP4_RAIL    *rail   = (EP4_RAIL *) r;
60753 +    ELAN4_DEV   *dev    = rail->r_ctxt.ctxt_dev;
60754 +    unsigned int npages = len >> PAGESHIFT;
60755 +    int          i;
60756 +    unsigned int off;
60757 +
60758 +    ASSERT ((eaddr & PAGEOFFSET) == 0 && (ioaddr & PAGEOFFSET) == 0 && (len & PAGEOFFSET) == 0);
60759 +
60760 +    for (i = 0; i < npages; i++)
60761 +    {
60762 +       physaddr_t paddr = vaddr_to_phys (kernel_map, (void *) ioaddr);
60763 +
60764 +       for (off = 0; off < PAGESIZE; off += (1 << dev->dev_pageshift[0]))
60765 +       {
60766 +           E4_uint64 newpte = elan4mmu_phys2pte (dev, paddr + off, io_permtable[perm]);
60767 +
60768 +           elan4mmu_pteload (&rail->r_ctxt, 0, eaddr + off, newpte);
60769 +       }
60770 +
60771 +       eaddr += PAGESIZE;
60772 +       ioaddr += PAGESIZE;
60773 +    }
60774 +}
60775 +void
60776 +ep4_unmap (EP_RAIL *r, EP_ADDR eaddr, unsigned int len)
60777 +{
60778 +    EP4_RAIL *rail = (EP4_RAIL *) r;
60779 +
60780 +    ASSERT ((eaddr & PAGEOFFSET) == 0 && (len & PAGEOFFSET) == 0);
60781 +
60782 +    elan4mmu_unload_range (&rail->r_ctxt, 0, eaddr, len);
60783 +}
60784 +
60785 +void *
60786 +ep4_dvma_reserve (EP_RAIL *r, EP_ADDR eaddr, unsigned int npages)
60787 +{
60788 +    EP4_RAIL  *rail = (EP4_RAIL *) r;
60789 +    ELAN4_DEV *dev  = rail->r_ctxt.ctxt_dev;
60790 +
60791 +    EPRINTF3 (DBG_KMAP, "ep4_dvma_reserve: eaddr=%x npages=%d (=> %d)\n", eaddr, npages, (npages << (PAGE_SHIFT - dev->dev_pageshift[0])));
60792 +
60793 +    return elan4mmu_reserve (&rail->r_ctxt, 0, (E4_Addr) eaddr, (npages << (PAGE_SHIFT - dev->dev_pageshift[0])), 0);
60794 +}
60795 +
60796 +void
60797 +ep4_dvma_release (EP_RAIL *r, EP_ADDR eaddr, unsigned int npages, void *private)
60798 +{
60799 +    EP4_RAIL *rail = (EP4_RAIL *) r;
60800 +
60801 +    EPRINTF3 (DBG_KMAP, "ep4_dvma_release: eaddr=%x npages=%d private=%p\n", eaddr, npages, private);
60802 +
60803 +    elan4mmu_release (&rail->r_ctxt, (ELAN4_HASH_CACHE *) private);
60804 +}
60805 +
60806 +void
60807 +ep4_dvma_set_pte (EP_RAIL *r, void *private, unsigned int index, physaddr_t paddr, unsigned int perm)
60808 +{
60809 +    EP4_RAIL     *rail  = (EP4_RAIL *) r;
60810 +    ELAN4_DEV    *dev   = rail->r_ctxt.ctxt_dev;
60811 +    unsigned int  off;
60812 +    unsigned long flags;
60813 +
60814 +    EPRINTF3 (DBG_KMAP, "ep4_dvma_set_pte: index %x -> eaddr %llx paddr %llx\n", 
60815 +             index, ((ELAN4_HASH_CACHE *) private)->hc_start + (index * PAGE_SIZE), (long long) paddr);
60816 +
60817 +    local_irq_save (flags);
60818 +    for (off = 0; off < PAGESIZE; off += (1 << dev->dev_pageshift[0]))
60819 +    {
60820 +       E4_uint64 newpte = elan4mmu_phys2pte (dev, paddr + off, main_permtable[perm]);
60821 +
60822 +       elan4mmu_set_pte (&rail->r_ctxt, (ELAN4_HASH_CACHE *) private, (index << (PAGE_SHIFT - dev->dev_pageshift[0])) +
60823 +                         (off >> dev->dev_pageshift[0]), newpte);
60824 +    }
60825 +    local_irq_restore (flags);
60826 +}
60827 +
60828 +physaddr_t
60829 +ep4_dvma_read_pte (EP_RAIL *r, void *private, unsigned int index)
60830 +{
60831 +    EP4_RAIL     *rail  = (EP4_RAIL *) r;
60832 +    ELAN4_DEV    *dev   = rail->r_ctxt.ctxt_dev;
60833 +    E4_uint64     pte;
60834 +    unsigned long flags;
60835 +
60836 +    local_irq_save (flags);
60837 +    pte = elan4mmu_get_pte (&rail->r_ctxt, (ELAN4_HASH_CACHE *) private, index << (PAGE_SHIFT - dev->dev_pageshift[0]));
60838 +    local_irq_restore (flags);
60839 +
60840 +    return elan4mmu_pte2phys (dev, pte);
60841 +}
60842 +
60843 +void
60844 +ep4_dvma_unload (EP_RAIL *r, void *private, unsigned int index, unsigned int npages)
60845 +{
60846 +    EP4_RAIL  *rail  = (EP4_RAIL *) r;
60847 +    ELAN4_DEV *dev   = rail->r_ctxt.ctxt_dev;
60848 +    EP_ADDR    eaddr = ((ELAN4_HASH_CACHE *) private)->hc_start + (index * PAGE_SIZE);
60849 +    unsigned long idx = (index << (PAGE_SHIFT - dev->dev_pageshift[0]));
60850 +    unsigned long lim = idx + (npages << (PAGE_SHIFT - dev->dev_pageshift[0]));
60851 +    unsigned long flags;
60852 +
60853 +    EPRINTF5 (DBG_KMAP, "ep4_dvma_unload: eaddr %x -> %lx : index=%d idx=%ld lim=%ld\n", 
60854 +             eaddr, (unsigned long)(eaddr + (npages * PAGE_SIZE)), index, idx, lim);
60855 +
60856 +    local_irq_save (flags);
60857 +    for (; idx < lim; idx++)
60858 +       elan4mmu_clear_pte (&rail->r_ctxt, (ELAN4_HASH_CACHE *) private, idx);
60859 +    local_irq_restore (flags);
60860 +}
60861 +
60862 +/*
60863 + * Local variables:
60864 + * c-file-style: "stroustrup"
60865 + * End:
60866 + */
60867 Index: linux-2.4.21/drivers/net/qsnet/ep/kmsg_elan3.c
60868 ===================================================================
60869 --- linux-2.4.21.orig/drivers/net/qsnet/ep/kmsg_elan3.c 2004-02-23 16:02:56.000000000 -0500
60870 +++ linux-2.4.21/drivers/net/qsnet/ep/kmsg_elan3.c      2005-06-01 23:12:54.671428160 -0400
60871 @@ -0,0 +1,345 @@
60872 +/*
60873 + *    Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd.
60874 + *    Copyright (c) 2002-2003 by Quadrics Ltd.
60875 + *
60876 + *    For licensing information please see the supplied COPYING file
60877 + *
60878 + */
60879 +
60880 +#ident "@(#)$Id: kmsg_elan3.c,v 1.3.8.1 2004/09/30 09:52:37 david Exp $"
60881 +/*      $Source: /cvs/master/quadrics/epmod/kmsg_elan3.c,v $ */
60882 +
60883 +#include <qsnet/kernel.h>
60884 +
60885 +#include <elan/kcomm.h>
60886 +
60887 +#include "kcomm_vp.h"
60888 +#include "kcomm_elan3.h"
60889 +#include "debug.h"
60890 +
60891 +static void
60892 +ep3_inputq_event (EP3_RAIL *rail, void *arg)
60893 +{
60894 +    EP3_INPUTQ *inputq = (EP3_INPUTQ *) arg;
60895 +    
60896 +    (*inputq->q_callback)((EP_RAIL *)rail, inputq->q_arg);
60897 +}
60898 +
60899 +static EP3_COOKIE_OPS ep3_inputq_cookie_ops = 
60900 +{
60901 +    ep3_inputq_event,
60902 +};
60903 +
60904 +EP_INPUTQ *
60905 +ep3_alloc_inputq (EP_RAIL *r, unsigned qnum, unsigned slotSize, unsigned slotCount,
60906 +                 EP_INPUTQ_CALLBACK *callback, void *arg)
60907 +{
60908 +    EP3_RAIL      *rail = (EP3_RAIL *) r;
60909 +    EP3_INPUTQ    *inputq;
60910 +    EP3_InputQueue qdesc;
60911 +    void          *slots;
60912 +    int            i;
60913 +
60914 +    ASSERT ((slotSize & (EP_SYSTEMQ_MSG_ALIGN-1)) == 0);
60915 +
60916 +    KMEM_ALLOC (inputq, EP3_INPUTQ *, sizeof (EP3_INPUTQ), TRUE);
60917 +
60918 +    if (inputq == NULL)
60919 +       return (EP_INPUTQ *) NULL;
60920 +    
60921 +    if ((slots = ep_alloc_main (&rail->Generic, slotSize * slotCount, 0, &inputq->q_slotsAddr)) == NULL)
60922 +    {
60923 +       KMEM_FREE (inputq, sizeof (EP3_INPUTQ));
60924 +       return (EP_INPUTQ *) NULL;
60925 +    }
60926 +
60927 +    inputq->q_slotSize  = slotSize;
60928 +    inputq->q_slotCount = slotCount;
60929 +    inputq->q_callback  = callback;
60930 +    inputq->q_arg       = arg;
60931 +    inputq->q_slots     = slots;
60932 +
60933 +    /* Initialise all the slots to be "unreceived" */
60934 +    for (i = 0; i < slotCount; i++)
60935 +       ((uint32_t *) ((unsigned long) slots + (i+1) * slotSize))[-1] = EP_SYSTEMQ_UNRECEIVED;
60936 +    
60937 +    inputq->q_base     = inputq->q_slotsAddr;
60938 +    inputq->q_top      = inputq->q_base + (slotCount-1) * slotSize;
60939 +    inputq->q_fptr     = inputq->q_base;
60940 +    inputq->q_desc     = EP_SYSTEMQ_DESC(rail->QueueDescs, qnum);
60941 +    inputq->q_descAddr = EP_SYSTEMQ_ADDR (qnum);
60942 +
60943 +    if (callback)
60944 +       RegisterCookie (&rail->CookieTable, &inputq->q_cookie, inputq->q_descAddr, &ep3_inputq_cookie_ops, inputq);
60945 +
60946 +    /* Initialise the input queue descriptor */
60947 +    qdesc.q_state          = E3_QUEUE_FULL;
60948 +    qdesc.q_bptr           = inputq->q_base + slotSize;
60949 +    qdesc.q_fptr           = inputq->q_fptr;
60950 +    qdesc.q_base           = inputq->q_base;
60951 +    qdesc.q_top            = inputq->q_top;
60952 +    qdesc.q_size           = slotSize;
60953 +    qdesc.q_event.ev_Count = 1;
60954 +    qdesc.q_event.ev_Type  = callback ? EV_TYPE_EVIRQ | inputq->q_cookie.Cookie : 0;
60955 +    qdesc.q_wevent         = inputq->q_descAddr + offsetof (EP3_InputQueue, q_event);
60956 +    qdesc.q_wcount         = 0;
60957 +
60958 +    /* copy the queue descriptor down to sdram */
60959 +    elan3_sdram_copyl_to_sdram (rail->Device, &qdesc, inputq->q_desc, sizeof (EP3_InputQueue));
60960 +
60961 +    return (EP_INPUTQ *) inputq;
60962 +}
60963 +
60964 +void
60965 +ep3_free_inputq (EP_RAIL *r, EP_INPUTQ *q)
60966 +{
60967 +    EP3_RAIL   *rail   = (EP3_RAIL *) r;
60968 +    EP3_INPUTQ *inputq = (EP3_INPUTQ *) q;
60969 +
60970 +    ep_free_main (&rail->Generic, inputq->q_slotsAddr, inputq->q_slotSize * inputq->q_slotCount);
60971 +
60972 +    if (inputq->q_callback)
60973 +       DeregisterCookie (&rail->CookieTable, &inputq->q_cookie);
60974 +
60975 +    KMEM_FREE (inputq, sizeof (EP3_INPUTQ));
60976 +}
60977 +
60978 +void
60979 +ep3_enable_inputq (EP_RAIL *r, EP_INPUTQ *q)
60980 +{
60981 +    EP3_RAIL   *rail   = (EP3_RAIL *) r;
60982 +    EP3_INPUTQ *inputq = (EP3_INPUTQ *) q;
60983 +
60984 +    elan3_sdram_writel (rail->Device, inputq->q_desc + offsetof (EP3_InputQueue, q_state), 0);
60985 +}
60986 +
60987 +void
60988 +ep3_disable_inputq (EP_RAIL *r, EP_INPUTQ *q)
60989 +{
60990 +    EP3_RAIL      *rail   = (EP3_RAIL *) r;
60991 +    EP3_INPUTQ    *inputq = (EP3_INPUTQ *) q;
60992 +    EP3_InputQueue qdesc;
60993 +
60994 +    /* mark the queue as locked */
60995 +    SetQueueLocked (rail, inputq->q_desc);
60996 +
60997 +    /* re-initialise the queue as empty */
60998 +    qdesc.q_state          = E3_QUEUE_FULL;
60999 +    qdesc.q_bptr           = (E3_Addr) inputq->q_base + inputq->q_slotSize;
61000 +    qdesc.q_fptr           = inputq->q_fptr;
61001 +    qdesc.q_base           = inputq->q_base;
61002 +    qdesc.q_top            = inputq->q_top;
61003 +    qdesc.q_size           = inputq->q_slotSize;
61004 +    qdesc.q_event.ev_Count = 1;
61005 +    qdesc.q_event.ev_Type  = inputq->q_callback ? EV_TYPE_EVIRQ | inputq->q_cookie.Cookie : 0;
61006 +    qdesc.q_wevent         = inputq->q_descAddr + offsetof (EP3_InputQueue, q_event);
61007 +    qdesc.q_wcount         = 0;
61008 +
61009 +    /* copy the queue descriptor down to sdram */
61010 +    elan3_sdram_copyl_to_sdram (rail->Device, &qdesc, inputq->q_desc, sizeof (EP3_InputQueue));
61011 +}
61012 +
61013 +int
61014 +ep3_poll_inputq (EP_RAIL *r, EP_INPUTQ *q, int maxCount, EP_INPUTQ_HANDLER *handler, void *arg)
61015 +{
61016 +    EP3_RAIL          *rail   = (EP3_RAIL *) r;
61017 +    EP3_INPUTQ        *inputq = (EP3_INPUTQ *) q;
61018 +    sdramaddr_t        qdesc  = inputq->q_desc;
61019 +    E3_Addr            nfptr;
61020 +    int                count = 0;
61021 +    E3_uint32          state;
61022 +    int                       delay;
61023 +
61024 + run_again_because_of_eventqueue_overflow:
61025 +    nfptr = inputq->q_fptr + inputq->q_slotSize;
61026 +    if (nfptr > inputq->q_top)                                 
61027 +       nfptr = inputq->q_base;
61028 +
61029 +    while (nfptr != elan3_sdram_readl (rail->Device, qdesc + offsetof (EP3_InputQueue, q_bptr)))                       /* PCI read */
61030 +    {
61031 +       unsigned long slot = (unsigned long) inputq->q_slots + (nfptr - inputq->q_base);
61032 +
61033 +       /* Poll the final word of the message until the message has completely
61034 +        * arrived in main memory. */
61035 +       for (delay = 1; ((uint32_t *) (slot + inputq->q_slotSize))[-1] == EP_SYSTEMQ_UNRECEIVED && delay < EP_SYSTEMQ_UNRECEIVED_TLIMIT; delay <<= 1)
61036 +           DELAY (delay);
61037 +
61038 +       /* Call the message handler */
61039 +       (*handler) (r, arg, (void *) slot);
61040 +       
61041 +       state = elan3_sdram_readl (rail->Device, qdesc + offsetof (EP3_InputQueue, q_state));                           /* PCI read */
61042 +       if ((state & E3_QUEUE_FULL) == 0)
61043 +           elan3_sdram_writel (rail->Device, qdesc + offsetof (EP3_InputQueue, q_fptr), nfptr);                        /* PCI write */
61044 +       else
61045 +       {
61046 +           elan3_sdram_writel (rail->Device, qdesc + offsetof (EP3_InputQueue, q_fptr), nfptr);                        /* PCI write */
61047 +           elan3_sdram_writel (rail->Device, qdesc + offsetof (EP3_InputQueue, q_state), (state & ~E3_QUEUE_FULL));    /* PCI write */
61048 +       }
61049 +       inputq->q_fptr = nfptr;
61050 +       
61051 +       nfptr += roundup (inputq->q_slotSize, E3_BLK_ALIGN);
61052 +       if (nfptr > inputq->q_top)
61053 +           nfptr = inputq->q_base;
61054 +
61055 +       if (++count >= maxCount && maxCount)
61056 +           break;
61057 +    }
61058 +    
61059 +    if (inputq->q_callback && count != 0)
61060 +    {
61061 +       if (count != inputq->q_waitCount)
61062 +           elan3_sdram_writel (rail->Device, qdesc + offsetof (EP3_InputQueue, q_wcount), inputq->q_waitCount = count);
61063 +
61064 +       if (IssueWaitevent (rail, inputq->q_descAddr + offsetof (EP3_InputQueue, q_wevent)) == ISSUE_COMMAND_TRAPPED)
61065 +           goto run_again_because_of_eventqueue_overflow;
61066 +    }
61067 +
61068 +    return count;
61069 +}
61070 +
61071 +#define Q_EVENT(q,slotNum)             ((q)->q_elan      + (slotNum) * sizeof (E3_BlockCopyEvent))
61072 +#define Q_EVENT_ADDR(q,slotNum)                ((q)->q_elanAddr  + (slotNum) * sizeof (E3_BlockCopyEvent))
61073 +#define Q_MSG(q,slotNum)       (void *)((q)->q_main      + (slotNum) * (q)->q_slotSize)
61074 +#define Q_MSG_ADDR(q,slotNum)          ((q)->q_mainAddr  + (slotNum) * (q)->q_slotSize)
61075 +#define Q_DONE(q,slotNum)     (*((int *)((q)->q_main      + (q)->q_slotCount * (q)->q_slotSize + (slotNum) * sizeof (E3_uint32))))
61076 +#define Q_DONE_ADDR(q,slotNum)         ((q)->q_mainAddr  + (q)->q_slotCount * (q)->q_slotSize + (slotNum) * sizeof (E3_uint32))
61077 +
61078 +#define Q_ELAN_SIZE(q)                 ((q)->q_slotCount * sizeof (E3_BlockCopyEvent))
61079 +#define Q_MAIN_SIZE(q)                 ((q)->q_slotCount * ((q)->q_slotSize + sizeof (E3_uint32)))
61080 +
61081 +static void
61082 +ep3_outputq_retry (EP3_RAIL *rail, void *arg, E3_DMA_BE *dma, int error)
61083 +{
61084 +    E3_DMA_BE    *dmabe = (E3_DMA_BE *) dma;
61085 +    sdramaddr_t   event = ep_elan2sdram (&rail->Generic, dmabe->s.dma_srcEvent);
61086 +    E3_Addr       done  = elan3_sdram_readl (rail->Device, event + offsetof (E3_BlockCopyEvent, ev_Dest));
61087 +    E3_uint32    *donep = ep_elan2main (&rail->Generic, done & ~EV_BCOPY_DTYPE_MASK);
61088 +
61089 +    EPRINTF1 (DBG_KMSG, "ep3_ouputq_retry: donep at %p -> FAILED\n", donep);
61090 +    
61091 +    *donep = EP3_EVENT_FAILED;
61092 +}
61093 +
61094 +static EP3_COOKIE_OPS ep3_outputq_cookie_ops =
61095 +{
61096 +    NULL, /* Event */
61097 +    ep3_outputq_retry,
61098 +    NULL, /* DmaCancelled */
61099 +    NULL, /* DmaVerify */
61100 +};
61101 +
61102 +EP_OUTPUTQ *
61103 +ep3_alloc_outputq (EP_RAIL *r, unsigned slotSize, unsigned slotCount)
61104 +{
61105 +    EP3_RAIL         *rail = (EP3_RAIL *) r;
61106 +    EP3_OUTPUTQ      *outputq;
61107 +    int               i;
61108 +    E3_BlockCopyEvent event;
61109 +
61110 +    ASSERT ((slotSize & (EP_SYSTEMQ_MSG_ALIGN-1)) == 0);
61111 +
61112 +    KMEM_ALLOC (outputq, EP3_OUTPUTQ *, sizeof (EP3_OUTPUTQ), 1);
61113 +
61114 +    if (outputq == NULL)
61115 +       return NULL;
61116 +
61117 +    outputq->q_slotCount = slotCount;
61118 +    outputq->q_slotSize  = slotSize;
61119 +
61120 +    outputq->q_elan = ep_alloc_elan (r, Q_ELAN_SIZE(outputq), 0, &outputq->q_elanAddr);
61121 +
61122 +    if (outputq->q_elan == (sdramaddr_t) 0)
61123 +    {
61124 +       KMEM_FREE (outputq, sizeof (EP3_OUTPUTQ));
61125 +       return NULL;
61126 +    }
61127 +
61128 +    outputq->q_main = ep_alloc_main (r, Q_MAIN_SIZE(outputq), 0, &outputq->q_mainAddr);
61129 +
61130 +    if (outputq->q_main == (void *) NULL)
61131 +    {
61132 +       ep_free_elan (r, outputq->q_elanAddr, Q_ELAN_SIZE(outputq));
61133 +       KMEM_FREE (outputq, sizeof (EP3_OUTPUTQ));
61134 +       return NULL;
61135 +    }
61136 +
61137 +    RegisterCookie (&rail->CookieTable, &outputq->q_cookie, outputq->q_elanAddr, &ep3_outputq_cookie_ops, outputq);
61138 +
61139 +    for (i = 0; i < slotCount; i++)
61140 +    {
61141 +       EP3_INIT_COPY_EVENT (event, outputq->q_cookie, Q_DONE_ADDR(outputq, i), 0);
61142 +
61143 +       Q_DONE(outputq, i) = outputq->q_cookie.Cookie;
61144 +       
61145 +       elan3_sdram_copyl_to_sdram (rail->Device, &event, Q_EVENT(outputq, i), sizeof (E3_BlockCopyEvent));
61146 +    }
61147 +
61148 +    return (EP_OUTPUTQ *) outputq;
61149 +}
61150 +
61151 +void
61152 +ep3_free_outputq (EP_RAIL *r, EP_OUTPUTQ *q)
61153 +{
61154 +    EP3_RAIL    *rail    = (EP3_RAIL *) r;
61155 +    EP3_OUTPUTQ *outputq = (EP3_OUTPUTQ *) q;
61156 +
61157 +    DeregisterCookie (&rail->CookieTable, &outputq->q_cookie);
61158 +    
61159 +    ep_free_main (r, outputq->q_mainAddr, Q_MAIN_SIZE(outputq));
61160 +    ep_free_elan (r, outputq->q_elanAddr, Q_ELAN_SIZE(outputq));
61161
61162 +    KMEM_FREE (outputq, sizeof (EP3_OUTPUTQ));
61163 +}
61164 +
61165 +void *
61166 +ep3_outputq_msg (EP_RAIL *r, EP_OUTPUTQ *q, unsigned slotNum)
61167 +{
61168 +    return Q_MSG ((EP3_OUTPUTQ *) q, slotNum);
61169 +}
61170 +
61171 +int
61172 +ep3_outputq_state (EP_RAIL *r, EP_OUTPUTQ *q, unsigned slotNum)
61173 +{
61174 +    switch (Q_DONE((EP3_OUTPUTQ *) q, slotNum))
61175 +    {
61176 +    case EP3_EVENT_ACTIVE:
61177 +       return EP_OUTPUTQ_BUSY;
61178 +       
61179 +    case EP3_EVENT_FAILED:
61180 +       return EP_OUTPUTQ_FAILED;
61181 +       
61182 +    default:
61183 +       return EP_OUTPUTQ_FINISHED;
61184 +    }
61185 +}
61186 +
61187 +int
61188 +ep3_outputq_send (EP_RAIL *r, EP_OUTPUTQ *q, unsigned slotNum, unsigned size, 
61189 +                 unsigned vp, unsigned qnum, unsigned retries)
61190 +{
61191 +    EP3_RAIL    *rail    = (EP3_RAIL *) r;
61192 +    EP3_OUTPUTQ *outputq = (EP3_OUTPUTQ *) q;
61193 +    unsigned     base    = outputq->q_slotSize - roundup (size, E3_BLK_ALIGN);
61194 +    E3_DMA_BE    dmabe;
61195 +
61196 +    dmabe.s.dma_type           = E3_DMA_TYPE(DMA_BYTE, DMA_WRITE, DMA_QUEUED, retries);
61197 +    dmabe.s.dma_size            = roundup (size, E3_BLK_ALIGN);
61198 +    dmabe.s.dma_source          = Q_MSG_ADDR(outputq, slotNum) + base;
61199 +    dmabe.s.dma_dest            = base;
61200 +    dmabe.s.dma_destEvent       = EP_SYSTEMQ_ADDR(qnum);
61201 +    dmabe.s.dma_destCookieVProc = vp;
61202 +    dmabe.s.dma_srcEvent        = Q_EVENT_ADDR(outputq, slotNum);
61203 +    dmabe.s.dma_srcCookieVProc  = 0;
61204 +
61205 +    Q_DONE(outputq, slotNum) = EP3_EVENT_ACTIVE;
61206 +    
61207 +    elan3_sdram_writel (rail->Device, Q_EVENT(outputq, slotNum), 1);
61208 +
61209 +    if (IssueDma (rail, &dmabe, EP_RETRY_CRITICAL, FALSE) != ISSUE_COMMAND_OK)
61210 +    {
61211 +       Q_DONE(outputq, slotNum) = EP3_EVENT_FAILED;
61212 +       return FALSE;
61213 +    }
61214 +
61215 +    return TRUE;
61216 +}
61217 Index: linux-2.4.21/drivers/net/qsnet/ep/kmsg_elan4.c
61218 ===================================================================
61219 --- linux-2.4.21.orig/drivers/net/qsnet/ep/kmsg_elan4.c 2004-02-23 16:02:56.000000000 -0500
61220 +++ linux-2.4.21/drivers/net/qsnet/ep/kmsg_elan4.c      2005-06-01 23:12:54.672428008 -0400
61221 @@ -0,0 +1,416 @@
61222 +/*
61223 + *    Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd.
61224 + *    Copyright (c) 2002-2003 by Quadrics Ltd.
61225 + *
61226 + *    For licensing information please see the supplied COPYING file
61227 + *
61228 + */
61229 +
61230 +#ident "@(#)$Id: kmsg_elan4.c,v 1.8.6.1 2004/09/30 09:52:37 david Exp $"
61231 +/*      $Source: /cvs/master/quadrics/epmod/kmsg_elan4.c,v $ */
61232 +
61233 +#include <qsnet/kernel.h>
61234 +
61235 +#include <elan/kcomm.h>
61236 +
61237 +#include "debug.h"
61238 +#include "kcomm_vp.h"
61239 +#include "kcomm_elan4.h"
61240 +
61241 +#include <elan4/trtype.h>
61242 +
61243 +static void
61244 +ep4_inputq_interrupt (EP4_RAIL *rail, void *arg)
61245 +{
61246 +    EP4_INPUTQ *inputq = (EP4_INPUTQ *) arg;
61247 +
61248 +    /* mark the queue as "fired" to cause a single waitevent
61249 +     * to be issued next time the queue is polled */
61250 +    atomic_inc (&inputq->q_fired);
61251 +    
61252 +    (*inputq->q_callback)(&rail->r_generic, inputq->q_arg);
61253 +}
61254 +
61255 +EP_INPUTQ *
61256 +ep4_alloc_inputq (EP_RAIL *r, unsigned qnum, unsigned slotSize, unsigned slotCount,
61257 +                 EP_INPUTQ_CALLBACK *callback, void *arg)
61258 +{
61259 +    EP4_RAIL     *rail = (EP4_RAIL *) r;
61260 +    EP4_INPUTQ   *inputq;
61261 +    E4_Event32    qevent;
61262 +    void         *slots;
61263 +    int           i;
61264 +
61265 +    ASSERT ((slotSize & (EP_SYSTEMQ_MSG_ALIGN-1)) == 0);
61266 +
61267 +    KMEM_ALLOC (inputq, EP4_INPUTQ *, sizeof (EP4_INPUTQ), 1);
61268 +
61269 +    if (inputq == NULL)
61270 +       return (EP_INPUTQ *) NULL;
61271 +    
61272 +    if ((slots = ep_alloc_main (&rail->r_generic, slotSize * slotCount, 0, &inputq->q_slotsAddr)) == NULL)
61273 +    {
61274 +       KMEM_FREE (inputq, sizeof (EP4_INPUTQ));
61275 +       return (EP_INPUTQ *) NULL;
61276 +    }
61277 +
61278 +    inputq->q_slotSize  = slotSize;
61279 +    inputq->q_slotCount = slotCount;
61280 +    inputq->q_callback  = callback;
61281 +    inputq->q_arg       = arg;
61282 +    inputq->q_slots     = slots;
61283 +
61284 +    /* Initialise all the slots to be "unreceived" */
61285 +    for (i = 0; i < slotCount; i++)
61286 +       ((uint32_t *) ((unsigned long) slots + (i+1) * slotSize))[-1] = EP_SYSTEMQ_UNRECEIVED;
61287 +    
61288 +    inputq->q_last      = inputq->q_slotsAddr + (slotCount-1) * slotSize;
61289 +    inputq->q_fptr      = inputq->q_slotsAddr;
61290 +    inputq->q_desc      = EP_SYSTEMQ_DESC (rail->r_queuedescs, qnum);
61291 +    inputq->q_descAddr  = EP_SYSTEMQ_ADDR (qnum);
61292 +    inputq->q_eventAddr = rail->r_elan_addr + offsetof (EP4_RAIL_ELAN, r_qevents[qnum]);
61293 +
61294 +    if (callback)
61295 +    {
61296 +       if ((inputq->q_ecq = ep4_get_ecq (rail, EP4_ECQ_EVENT, 1)) == 0)
61297 +       {
61298 +           ep_free_main (&rail->r_generic, inputq->q_slotsAddr, inputq->q_slotSize * inputq->q_slotCount);
61299 +
61300 +           KMEM_FREE (inputq, sizeof (EP4_INPUTQ));
61301 +           return (EP_INPUTQ *) NULL;
61302 +       }
61303 +
61304 +       if ((inputq->q_wcq = ep4_get_ecq (rail, EP4_ECQ_MAIN, 4)) == 0)
61305 +       {
61306 +           ep4_put_ecq (rail, inputq->q_ecq, 1);
61307 +           ep_free_main (&rail->r_generic, inputq->q_slotsAddr, inputq->q_slotSize * inputq->q_slotCount);
61308 +
61309 +           KMEM_FREE (inputq, sizeof (EP4_INPUTQ));
61310 +           return (EP_INPUTQ *) NULL;
61311 +       }
61312 +
61313 +       ep4_register_intcookie (rail, &inputq->q_intcookie, inputq->q_descAddr, ep4_inputq_interrupt, inputq);
61314 +
61315 +       inputq->q_count = 0;
61316 +
61317 +       atomic_set (&inputq->q_fired, 0);
61318 +
61319 +       /* Initialise the queue event */
61320 +       qevent.ev_CountAndType = E4_EVENT_INIT_VALUE (callback ? -32 : 0, E4_EVENT_WRITE, E4_EVENT_DTYPE_LONG, 0);
61321 +       qevent.ev_WritePtr     = inputq->q_ecq->ecq_addr;
61322 +       qevent.ev_WriteValue   = (inputq->q_intcookie.int_val << E4_MAIN_INT_SHIFT) | INTERRUPT_CMD;
61323 +    }
61324 +
61325 +    /* copy the event down to sdram */
61326 +    elan4_sdram_copyq_to_sdram (rail->r_ctxt.ctxt_dev, &qevent, rail->r_elan + offsetof (EP4_RAIL_ELAN, r_qevents[qnum]), sizeof (E4_Event32));
61327 +
61328 +    return (EP_INPUTQ *) inputq;
61329 +}
61330 +
61331 +void
61332 +ep4_free_inputq (EP_RAIL *r, EP_INPUTQ *q)
61333 +{
61334 +    EP4_RAIL   *rail   = (EP4_RAIL *) r;
61335 +    EP4_INPUTQ *inputq = (EP4_INPUTQ *) q;
61336 +
61337 +    ep_free_main (&rail->r_generic, inputq->q_slotsAddr, inputq->q_slotSize * inputq->q_slotCount);
61338 +
61339 +    if (inputq->q_callback)
61340 +    {
61341 +       ep4_deregister_intcookie (rail, &inputq->q_intcookie);
61342 +       ep4_put_ecq (rail, inputq->q_ecq, 1);
61343 +       ep4_put_ecq (rail, inputq->q_wcq, 4);
61344 +    }
61345 +
61346 +    KMEM_FREE (inputq, sizeof (EP4_INPUTQ));
61347 +}
61348 +
61349 +void
61350 +ep4_enable_inputq (EP_RAIL *r, EP_INPUTQ *q)
61351 +{
61352 +    EP4_RAIL     *rail     = (EP4_RAIL *) r;
61353 +    EP4_INPUTQ   *inputq   = (EP4_INPUTQ *) q;
61354 +    EP_ADDR       lastSlot = inputq->q_slotsAddr + (inputq->q_slotCount-1) * inputq->q_slotSize;
61355 +    E4_InputQueue qdesc;
61356 +
61357 +    qdesc.q_bptr    = inputq->q_slotsAddr;
61358 +    qdesc.q_fptr    = inputq->q_slotsAddr;
61359 +    qdesc.q_control = E4_InputQueueControl (inputq->q_slotsAddr, lastSlot, inputq->q_slotSize);
61360 +    qdesc.q_event   = inputq->q_callback ? inputq->q_eventAddr : 0;
61361 +
61362 +    /* copy the queue descriptor down to sdram */
61363 +    ep4_write_qdesc (rail, inputq->q_desc, &qdesc);
61364 +
61365 +    EPRINTF5 (DBG_KMSG,  "ep_enable_inputq: %x - %016llx %016llx %016llx %016llx\n", (int) inputq->q_descAddr,
61366 +           elan4_sdram_readq (rail->r_ctxt.ctxt_dev, inputq->q_desc + 0),
61367 +           elan4_sdram_readq (rail->r_ctxt.ctxt_dev, inputq->q_desc + 8),
61368 +           elan4_sdram_readq (rail->r_ctxt.ctxt_dev, inputq->q_desc + 16),
61369 +           elan4_sdram_readq (rail->r_ctxt.ctxt_dev, inputq->q_desc + 24));
61370 +}
61371 +
61372 +void
61373 +ep4_disable_inputq (EP_RAIL *r, EP_INPUTQ *q)
61374 +{
61375 +    EP4_RAIL     *rail   = (EP4_RAIL *) r;
61376 +    EP4_INPUTQ   *inputq = (EP4_INPUTQ *) q;
61377 +    E4_InputQueue qdesc;
61378 +
61379 +    /* Initialise the input queue descriptor as "full" with no event */
61380 +    qdesc.q_bptr    = 0;
61381 +    qdesc.q_fptr    = 8;
61382 +    qdesc.q_control = E4_InputQueueControl(qdesc.q_bptr, qdesc.q_fptr, 8);
61383 +    qdesc.q_event   = 0;
61384 +
61385 +    /* copy the queue descriptor down to sdram */
61386 +    ep4_write_qdesc (rail, inputq->q_desc, &qdesc);
61387 +}
61388 +
61389 +int
61390 +ep4_poll_inputq (EP_RAIL *r, EP_INPUTQ *q, int maxCount, EP_INPUTQ_HANDLER *handler, void *arg)
61391 +{
61392 +    EP4_RAIL   *rail   = (EP4_RAIL *) r;
61393 +    ELAN4_DEV  *dev    = rail->r_ctxt.ctxt_dev; 
61394 +    EP4_INPUTQ *inputq = (EP4_INPUTQ *) q;
61395 +    sdramaddr_t qdesc = inputq->q_desc;
61396 +    E4_Addr     fptr  = inputq->q_fptr;
61397 +    E4_Addr     bptr  = elan4_sdram_readl (dev, qdesc + offsetof (E4_InputQueue, q_bptr));
61398 +    int                count = 0;
61399 +    int         delay;
61400 +
61401 +    while (bptr != 0 && fptr != bptr)
61402 +    {
61403 +       while (fptr != bptr)
61404 +       {
61405 +           unsigned long slot = (unsigned long) inputq->q_slots + (fptr - inputq->q_slotsAddr);
61406 +           
61407 +           /* Poll the final word of the message until the message has completely
61408 +            * arrived in main memory. */
61409 +           for (delay = 1; ((uint32_t *) (slot + inputq->q_slotSize))[-1] == EP_SYSTEMQ_UNRECEIVED && delay < EP_SYSTEMQ_UNRECEIVED_TLIMIT; delay <<= 1)
61410 +               DELAY (delay);
61411 +           
61412 +           EPRINTF4(DBG_KMSG, "ep4_poll_inputq: %x slot %d of %d [%08x]\n", (int)inputq->q_descAddr,
61413 +                    ((int)(fptr - inputq->q_slotsAddr))/inputq->q_slotSize, 
61414 +                    inputq->q_slotCount, ((uint32_t *) (slot + inputq->q_slotSize))[-1]);
61415 +           
61416 +           /* Call the message handler */
61417 +           (*handler) (r, arg, (void *) slot);
61418 +           
61419 +           /* reset the last word of the slot to "unreceived" */
61420 +           ((uint32_t *) (slot + inputq->q_slotSize))[-1] = EP_SYSTEMQ_UNRECEIVED;
61421 +           
61422 +           /* move on the front pointer */
61423 +           fptr = (fptr == inputq->q_last) ? inputq->q_slotsAddr : fptr + inputq->q_slotSize;
61424 +           
61425 +           elan4_sdram_writel (dev, qdesc + offsetof (E4_InputQueue, q_fptr), fptr);
61426 +           
61427 +           inputq->q_count++;
61428 +           
61429 +           if (++count >= maxCount && maxCount)
61430 +           {
61431 +               inputq->q_fptr = fptr;
61432 +
61433 +               return count;
61434 +           }
61435 +       }
61436 +
61437 +       bptr = elan4_sdram_readl (dev, qdesc + offsetof (E4_InputQueue, q_bptr));
61438 +    }
61439 +
61440 +    inputq->q_fptr = fptr;
61441 +
61442 +    /* Only insert a single wait event command if the callback has
61443 +     * occured, otherwise just acrue the count as we've just periodically
61444 +     * polled it.
61445 +     */
61446 +    if (inputq->q_callback && atomic_read (&inputq->q_fired))
61447 +    {
61448 +       atomic_dec (&inputq->q_fired);
61449 +
61450 +       ep4_wait_event_cmd (inputq->q_wcq, inputq->q_eventAddr,
61451 +                           E4_EVENT_INIT_VALUE (-inputq->q_count << 5, E4_EVENT_WRITE, E4_EVENT_DTYPE_LONG, 0),
61452 +                           inputq->q_ecq->ecq_addr,
61453 +                           (inputq->q_intcookie.int_val << E4_MAIN_INT_SHIFT) | INTERRUPT_CMD);
61454 +
61455 +       inputq->q_count = 0;
61456 +    }
61457 +
61458 +    return count;
61459 +}
61460 +
61461 +#define Q_MSG(q,slotNum)         (unsigned long)((q)->q_main      + (slotNum) * (q)->q_slotSize)
61462 +#define Q_MSG_ADDR(q,slotNum)                  ((q)->q_mainAddr  + (slotNum) * (q)->q_slotSize)
61463 +#define Q_DONE(q,slotNum)        *((E4_uint64 *)((q)->q_main      + (q)->q_slotCount * (q)->q_slotSize + (slotNum) * sizeof (E4_uint64)))
61464 +#define Q_DONE_ADDR(q,slotNum)                 ((q)->q_mainAddr  + (q)->q_slotCount * (q)->q_slotSize + (slotNum) * sizeof (E4_uint64))
61465 +
61466 +#define Q_MAIN_SIZE(q)                 ((q)->q_slotCount * ((q)->q_slotSize + sizeof (E4_uint64)))
61467 +
61468 +#define Q_DONE_VAL(val,cnt)            ((cnt) << 16 | (val))
61469 +#define Q_DONE_RET(done)               ((int) ((done) & 0xffff))
61470 +#define Q_DONE_CNT(done)               ((int) ((done) >> 16))
61471 +
61472 +EP_OUTPUTQ *
61473 +ep4_alloc_outputq (EP_RAIL *r, unsigned slotSize, unsigned slotCount)
61474 +{
61475 +    EP4_RAIL    *rail = (EP4_RAIL *) r;
61476 +    EP4_OUTPUTQ *outputq;
61477 +    int          i;
61478 +
61479 +    ASSERT ((slotSize & (EP_SYSTEMQ_MSG_ALIGN-1)) == 0);
61480 +
61481 +    KMEM_ALLOC (outputq, EP4_OUTPUTQ *, sizeof (EP4_OUTPUTQ), 1);
61482 +
61483 +    if (outputq == NULL)
61484 +       return NULL;
61485 +
61486 +    spin_lock_init (&outputq->q_lock);
61487 +
61488 +    outputq->q_slotCount = slotCount;
61489 +    outputq->q_slotSize  = slotSize;
61490 +    outputq->q_main      = ep_alloc_main (r, Q_MAIN_SIZE(outputq), 0, &outputq->q_mainAddr);
61491 +
61492 +    if (outputq->q_main == (E4_uint64 *) NULL)
61493 +    {
61494 +       KMEM_FREE (outputq, sizeof (EP_OUTPUTQ));
61495 +       return NULL;
61496 +    }
61497 +
61498 +    outputq->q_cq = elan4_alloccq (&rail->r_ctxt, CQ_Size64K, CQ_STENEnableBit | CQ_WriteEnableBit, CQ_Priority);
61499 +
61500 +    if (outputq->q_cq == (ELAN4_CQ *) NULL)
61501 +    {
61502 +       ep_free_main (&rail->r_generic, outputq->q_mainAddr, Q_MAIN_SIZE(outputq));
61503 +
61504 +       KMEM_FREE (outputq, sizeof (EP_OUTPUTQ));
61505 +    }
61506 +
61507 +    outputq->q_dwords = CQ_Size (outputq->q_cq->cq_size) >> 3;
61508 +
61509 +    /* mark all the queue slots as finished */
61510 +    for (i = 0; i < slotCount; i++)
61511 +       Q_DONE(outputq, i) = Q_DONE_VAL (EP_OUTPUTQ_FINISHED, 0);
61512 +
61513 +    return (EP_OUTPUTQ *) outputq;
61514 +}
61515 +
61516 +void
61517 +ep4_free_outputq (EP_RAIL *r, EP_OUTPUTQ *q)
61518 +{
61519 +    EP4_RAIL    *rail    = (EP4_RAIL *) r;
61520 +    EP4_OUTPUTQ *outputq = (EP4_OUTPUTQ *) q;
61521 +
61522 +    elan4_freecq (&rail->r_ctxt, outputq->q_cq);
61523 +
61524 +    ep_free_main (&rail->r_generic, outputq->q_mainAddr, Q_MAIN_SIZE(outputq));
61525 +
61526 +    spin_lock_destroy (&outputq->q_lock);
61527 +
61528 +    KMEM_FREE (outputq, sizeof (EP4_OUTPUTQ));
61529 +}
61530 +
61531 +void *
61532 +ep4_outputq_msg (EP_RAIL *r, EP_OUTPUTQ *q, unsigned slotNum)
61533 +{
61534 +    return (void *) Q_MSG ((EP4_OUTPUTQ *) q, slotNum);
61535 +}
61536 +
61537 +int
61538 +ep4_outputq_state (EP_RAIL *r, EP_OUTPUTQ *q, unsigned slotNum)
61539 +{
61540 +    EPRINTF2 (DBG_KMSG, "ep4_outputq_state: slotNum %d state %x\n", slotNum, (int)Q_DONE((EP4_OUTPUTQ *) q, slotNum));
61541 +
61542 +    return Q_DONE_RET(Q_DONE((EP4_OUTPUTQ *)q, slotNum));
61543 +}
61544 +
61545 +int
61546 +ep4_outputq_send (EP_RAIL *r, EP_OUTPUTQ *q, unsigned slotNum, unsigned size, 
61547 +                 unsigned vp, unsigned qnum, unsigned retries)
61548 +{
61549 +    EP4_OUTPUTQ *outputq = (EP4_OUTPUTQ *) q;
61550 +    unsigned int nbytes  = roundup (size, 32);
61551 +    unsigned int base    = outputq->q_slotSize - nbytes;
61552 +    unsigned int i, dwords;
61553 +    unsigned long flags;
61554 +    E4_uint64 val;
61555 +
61556 +    spin_lock_irqsave (&outputq->q_lock, flags);
61557 +
61558 +    EPRINTF4 (DBG_KMSG, "ep4_outputq_send: slotNum=%d size=%d vp=%d qnum=%d\n", slotNum, size, vp, qnum);
61559 +
61560 +    /* compute command queue size as follows - each slot uses
61561 +     *     overhead:   14 dwords +
61562 +     *    data > 128 ? 36 dwords
61563 +     *     data > 64  ? 18 dwords
61564 +     *     data > 32  ? 10 dwords
61565 +     *     else         6  dwords
61566 +     */
61567 +    dwords = 14 + (size > 128 ? 36 :
61568 +                  size > 64  ? 18 :
61569 +                  size  ? 10 : 6);
61570 +
61571 +    outputq->q_dwords += Q_DONE_CNT (Q_DONE(outputq, slotNum));
61572 +
61573 +    if (dwords > outputq->q_dwords)
61574 +    {
61575 +       /* attempt to reclaim command queue space from other slots */
61576 +       i = slotNum;
61577 +       do {
61578 +           if (++i == outputq->q_slotCount)
61579 +               i = 0;
61580 +           
61581 +           val = Q_DONE(outputq, i);
61582 +
61583 +           if ((Q_DONE_RET (val) == EP_OUTPUTQ_FINISHED || Q_DONE_RET (val) == EP_OUTPUTQ_FAILED) && Q_DONE_CNT(val) > 0)
61584 +           {
61585 +               outputq->q_dwords += Q_DONE_CNT (val);
61586 +
61587 +               Q_DONE(outputq, i) = Q_DONE_VAL(Q_DONE_RET(val), 0);
61588 +           }
61589 +       } while (i != slotNum && dwords > outputq->q_dwords);
61590 +    }
61591 +
61592 +    if (dwords > outputq->q_dwords)
61593 +    {
61594 +       spin_unlock_irqrestore (&outputq->q_lock, flags);
61595 +       
61596 +       EPRINTF0 (DBG_KMSG, "ep4_outputq_state: no command queue space\n");
61597 +       return 0;
61598 +    }
61599 +
61600 +    outputq->q_dwords -= dwords;
61601 +
61602 +    Q_DONE(outputq, slotNum) = Q_DONE_VAL (EP_OUTPUTQ_BUSY, dwords);
61603 +
61604 +    if (outputq->q_retries != retries)
61605 +    {
61606 +       elan4_guard (outputq->q_cq, GUARD_CHANNEL(1) | GUARD_RESET(outputq->q_retries = retries));
61607 +       elan4_nop_cmd (outputq->q_cq, 0);
61608 +    }
61609 +
61610 +    /* transfer the top "size" bytes from message buffer to top of input queue */
61611 +    elan4_open_packet (outputq->q_cq, OPEN_PACKET (0, PACK_OK | RESTART_COUNT_ZERO, vp));
61612 +    elan4_sendtrans0 (outputq->q_cq, TR_INPUT_Q_GETINDEX, EP_SYSTEMQ_ADDR(qnum));
61613 +
61614 +    /* send upto EP_SYSTEMQ_MSG_MAX (256) bytes of message to the top of the slot */
61615 +    if (size > 128)
61616 +    {
61617 +       elan4_sendtransp (outputq->q_cq, TR_WRITE (128 >> 3, 0, TR_DATATYPE_DWORD), base + 0,   (void *) (Q_MSG(outputq, slotNum) + base + 0));
61618 +       elan4_sendtransp (outputq->q_cq, TR_WRITE (128 >> 3, 0, TR_DATATYPE_DWORD), base + 128, (void *) (Q_MSG(outputq, slotNum) + base + 128));
61619 +    }
61620 +    else if (size > 64)
61621 +       elan4_sendtransp (outputq->q_cq, TR_WRITE (128 >> 3, 0, TR_DATATYPE_DWORD), base, (void *) (Q_MSG(outputq, slotNum) + base));
61622 +    else if (size > 32)
61623 +       elan4_sendtransp (outputq->q_cq, TR_WRITE (64 >> 3, 0, TR_DATATYPE_DWORD),  base, (void *) (Q_MSG(outputq, slotNum) + base));
61624 +    else
61625 +       elan4_sendtransp (outputq->q_cq, TR_WRITE (32 >> 3, 0, TR_DATATYPE_DWORD),  base, (void *) (Q_MSG(outputq, slotNum) + base));
61626 +    elan4_sendtrans1 (outputq->q_cq, TR_INPUT_Q_COMMIT, EP_SYSTEMQ_ADDR(qnum), 0 /* no cookie */);
61627 +
61628 +    elan4_guard (outputq->q_cq, GUARD_CHANNEL (1) | GUARD_TEST(0, PACK_OK) | GUARD_RESET (outputq->q_retries));
61629 +    elan4_write_dword_cmd (outputq->q_cq, Q_DONE_ADDR(outputq, slotNum), Q_DONE_VAL (EP_OUTPUTQ_FINISHED, dwords));
61630 +
61631 +    elan4_guard (outputq->q_cq, GUARD_CHANNEL (1) | GUARD_TEST(0, RESTART_COUNT_ZERO) | GUARD_RESET (outputq->q_retries));
61632 +    elan4_write_dword_cmd (outputq->q_cq, Q_DONE_ADDR(outputq, slotNum), Q_DONE_VAL (EP_OUTPUTQ_FAILED, dwords));
61633 +
61634 +    spin_unlock_irqrestore (&outputq->q_lock, flags);
61635 +
61636 +    return 1;
61637 +}
61638 Index: linux-2.4.21/drivers/net/qsnet/ep/kthread.c
61639 ===================================================================
61640 --- linux-2.4.21.orig/drivers/net/qsnet/ep/kthread.c    2004-02-23 16:02:56.000000000 -0500
61641 +++ linux-2.4.21/drivers/net/qsnet/ep/kthread.c 2005-06-01 23:12:54.672428008 -0400
61642 @@ -0,0 +1,186 @@
61643 +/*
61644 + *    Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd.
61645 + *    Copyright (c) 2002-2003 by Quadrics Ltd.
61646 + *
61647 + *    For licensing information please see the supplied COPYING file
61648 + *
61649 + */
61650 +
61651 +#ident "@(#)$Id: kthread.c,v 1.5 2004/05/19 08:54:57 david Exp $ $Name: QSNETMODULES-4-30_20050128 $"
61652 +/*      $Source: /cvs/master/quadrics/epmod/kthread.c,v $*/
61653 +
61654 +#include <qsnet/kernel.h>
61655 +
61656 +#include <elan/kthread.h>
61657 +
61658 +void
61659 +ep_kthread_init (EP_KTHREAD *kt)
61660 +{
61661 +       spin_lock_init (&kt->lock);
61662 +       kcondvar_init (&kt->wait);
61663 +       
61664 +       kt->next_run     = 0;
61665 +       kt->should_stall = 0;
61666 +       kt->started      = 0;
61667 +       kt->should_stop  = 0;
61668 +       kt->stopped      = 0;
61669 +       kt->state        = KT_STATE_RUNNING;
61670 +}
61671 +
61672 +void
61673 +ep_kthread_destroy (EP_KTHREAD *kt)
61674 +{
61675 +       spin_lock_destroy (&kt->lock);
61676 +       kcondvar_destroy (&kt->wait);
61677 +}
61678 +
61679 +void
61680 +ep_kthread_started (EP_KTHREAD *kt)
61681 +{
61682 +       unsigned long flags;
61683 +       
61684 +       spin_lock_irqsave (&kt->lock, flags);
61685 +       kt->started = 1;
61686 +       spin_unlock_irqrestore(&kt->lock, flags);
61687 +}
61688 +
61689 +void
61690 +ep_kthread_stopped (EP_KTHREAD *kt)
61691 +{
61692 +       unsigned long flags;
61693 +       
61694 +       spin_lock_irqsave (&kt->lock, flags);
61695 +       kt->stopped = 1;
61696 +       kcondvar_wakeupall (&kt->wait, &kt->lock);
61697 +       spin_unlock_irqrestore(&kt->lock, flags);
61698 +}
61699 +
61700 +int
61701 +ep_kthread_should_stall (EP_KTHREAD *kth)
61702 +{
61703 +       return (kth->should_stall);
61704 +}
61705 +
61706 +int
61707 +ep_kthread_sleep (EP_KTHREAD *kt, long next_run)
61708 +{
61709 +       unsigned long flags;
61710 +
61711 +       spin_lock_irqsave (&kt->lock, flags);
61712 +       if (next_run && (kt->next_run == 0 || BEFORE (next_run, kt->next_run)))
61713 +               kt->next_run = next_run;
61714 +
61715 +       if (kt->should_stop)
61716 +       {
61717 +               spin_unlock_irqrestore (&kt->lock, flags);
61718 +               return (-1);
61719 +       }
61720 +       
61721 +       do {
61722 +               if (kt->should_stall)
61723 +                       kcondvar_wakeupall (&kt->wait, &kt->lock);
61724 +
61725 +               kt->state = KT_STATE_SLEEPING;
61726 +               kt->running = 0;
61727 +               if (kt->should_stall || kt->next_run == 0)
61728 +                       kcondvar_wait (&kt->wait, &kt->lock, &flags);
61729 +               else
61730 +                       kcondvar_timedwait (&kt->wait,&kt->lock, &flags, kt->next_run);
61731 +               kt->state = KT_STATE_RUNNING;
61732 +               kt->running = lbolt;
61733 +       } while (kt->should_stall);
61734 +       kt->next_run = 0;
61735 +       spin_unlock_irqrestore (&kt->lock, flags);
61736 +       
61737 +       return (0);
61738 +}
61739 +
61740 +void
61741 +ep_kthread_schedule (EP_KTHREAD *kt, long tick)
61742 +{
61743 +       unsigned long flags;
61744 +       
61745 +       spin_lock_irqsave (&kt->lock, flags);
61746 +       if (kt->next_run == 0 || BEFORE (tick, kt->next_run))
61747 +       {
61748 +               kt->next_run = tick;
61749 +               if (!kt->should_stall && kt->state == KT_STATE_SLEEPING)
61750 +               {
61751 +                       kt->state = KT_STATE_SCHEDULED;
61752 +                       kcondvar_wakeupone (&kt->wait, &kt->lock);
61753 +               }
61754 +       }
61755 +       spin_unlock_irqrestore (&kt->lock, flags);
61756 +}
61757 +
61758 +void
61759 +ep_kthread_stall (EP_KTHREAD *kt)
61760 +{
61761 +       unsigned long flags;
61762 +       
61763 +       spin_lock_irqsave (&kt->lock, flags);
61764 +       if (kt->should_stall++ == 0)
61765 +               kcondvar_wakeupall (&kt->wait, &kt->lock);
61766 +
61767 +       while (kt->state != KT_STATE_SLEEPING)
61768 +               kcondvar_wait (&kt->wait, &kt->lock, &flags);
61769 +       spin_unlock_irqrestore (&kt->lock, flags);
61770 +}
61771 +
61772 +void
61773 +ep_kthread_resume (EP_KTHREAD *kt)
61774 +{
61775 +       unsigned long flags;
61776 +
61777 +       spin_lock_irqsave (&kt->lock, flags);
61778 +       if (--kt->should_stall == 0)
61779 +       {
61780 +               kt->state = KT_STATE_SCHEDULED;
61781 +               kcondvar_wakeupone (&kt->wait, &kt->lock);
61782 +       }
61783 +       spin_unlock_irqrestore (&kt->lock, flags);
61784 +}
61785 +
61786 +void
61787 +ep_kthread_stop (EP_KTHREAD *kt)
61788 +{
61789 +       unsigned long flags;
61790 +       
61791 +       spin_lock_irqsave (&kt->lock, flags);
61792 +       kt->should_stop = 1;
61793 +       while (kt->started && !kt->stopped)
61794 +       {
61795 +               kcondvar_wakeupall (&kt->wait, &kt->lock);
61796 +               kcondvar_wait (&kt->wait, &kt->lock, &flags);
61797 +       }
61798 +       spin_unlock_irqrestore (&kt->lock, flags);
61799 +}
61800 +
61801 +int
61802 +ep_kthread_state (EP_KTHREAD *kt, long *time)
61803 +{
61804 +       unsigned long flags;
61805 +       int res = KT_STATE_SLEEPING;
61806 +
61807 +       spin_lock_irqsave (&kt->lock, flags);
61808 +
61809 +       if (kt->next_run) {
61810 +               *time = kt->next_run;
61811 +               res   = kt->should_stall ? KT_STATE_STALLED : KT_STATE_SCHEDULED;
61812 +       }
61813 +
61814 +       if (kt->running) {
61815 +               *time = kt->running;
61816 +               res   = KT_STATE_RUNNING;
61817 +       }
61818 +
61819 +       spin_unlock_irqrestore (&kt->lock, flags);
61820 +       
61821 +       return res;
61822 +}
61823 +
61824 +/*
61825 + * Local variables:
61826 + * c-file-style: "linux"
61827 + * End:
61828 + */
61829 Index: linux-2.4.21/drivers/net/qsnet/ep/kthread.h
61830 ===================================================================
61831 --- linux-2.4.21.orig/drivers/net/qsnet/ep/kthread.h    2004-02-23 16:02:56.000000000 -0500
61832 +++ linux-2.4.21/drivers/net/qsnet/ep/kthread.h 2005-06-01 23:12:54.673427856 -0400
61833 @@ -0,0 +1,53 @@
61834 +/*
61835 + *    Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd.
61836 + *    Copyright (c) 2002-2003 by Quadrics Ltd.
61837 + *
61838 + *    For licensing information please see the supplied COPYING file
61839 + *
61840 + */
61841 +
61842 +#ifndef __ELAN3_KTHREAD_H
61843 +#define __ELAN3_KTHREAD_H
61844 +
61845 +#ident "@(#)$Id: kthread.h,v 1.4 2004/05/06 14:24:08 david Exp $ $Name: QSNETMODULES-4-30_20050128 $"
61846 +/*      $Source: /cvs/master/quadrics/epmod/kthread.h,v $*/
61847 +
61848 +typedef struct ep_kthread
61849 +{
61850 +       kcondvar_t      wait;                                   /* place to sleep */
61851 +       spinlock_t      lock;                                   /* and lock */
61852 +       long            next_run;                               /* tick when thread should next run */
61853 +       long            running;                                /* tick when thread started to run */
61854 +       unsigned short  should_stall;
61855 +       unsigned char   state;
61856 +       unsigned int    started:1;
61857 +       unsigned int    should_stop:1;
61858 +       unsigned int    stopped:1;
61859 +} EP_KTHREAD;
61860 +
61861 +#define KT_STATE_SLEEPING              0
61862 +#define KT_STATE_SCHEDULED             1
61863 +#define KT_STATE_RUNNING               2
61864 +#define KT_STATE_STALLED               3
61865 +
61866 +#define AFTER(a, b)                    ((((long)(a)) - ((long)(b))) > 0)
61867 +#define BEFORE(a,b)                    ((((long)(a)) - ((long)(b))) < 0)
61868 +
61869 +extern void ep_kthread_init (EP_KTHREAD *kt);
61870 +extern void ep_kthread_destroy (EP_KTHREAD *kt);
61871 +extern void ep_kthread_started (EP_KTHREAD *kt);
61872 +extern void ep_kthread_stopped (EP_KTHREAD *kt);
61873 +extern int  ep_kthread_should_stall (EP_KTHREAD *kth);
61874 +extern int  ep_kthread_sleep (EP_KTHREAD *kth, long next_run);
61875 +extern void ep_kthread_schedule (EP_KTHREAD *kt, long when);
61876 +extern void ep_kthread_stall (EP_KTHREAD *kth);
61877 +extern void ep_kthread_resume (EP_KTHREAD *kt);
61878 +extern void ep_kthread_stop (EP_KTHREAD *kt);
61879 +extern int  ep_kthread_state (EP_KTHREAD *kt, long *time);
61880 +#endif /* __ELAN3_KTHREAD_H */
61881 +
61882 +/*
61883 + * Local variables:
61884 + * c-file-style: "linux"
61885 + * End:
61886 + */
61887 Index: linux-2.4.21/drivers/net/qsnet/ep/Makefile
61888 ===================================================================
61889 --- linux-2.4.21.orig/drivers/net/qsnet/ep/Makefile     2004-02-23 16:02:56.000000000 -0500
61890 +++ linux-2.4.21/drivers/net/qsnet/ep/Makefile  2005-06-01 23:12:54.673427856 -0400
61891 @@ -0,0 +1,33 @@
61892 +#
61893 +# Makefile for Quadrics QsNet
61894 +#
61895 +# Copyright (c) 2002-2004 Quadrics Ltd
61896 +#
61897 +# File: drivers/net/qsnet/ep/Makefile
61898 +#
61899 +
61900 +
61901 +ep3-$(CONFIG_ELAN3)    := kcomm_elan3.o kmsg_elan3.o kmap_elan3.o neterr_elan3.o probenetwork_elan3.o support_elan3.o threadcode_elan3.o threadcode_elan3_Linux.o epcomms_elan3.o epcommsTx_elan3.o epcommsRx_elan3.o
61902 +ep4-$(CONFIG_ELAN4)    := kcomm_elan4.o kmsg_elan4.o kmap_elan4.o neterr_elan4.o probenetwork_elan4.o commands_elan4.o debug_elan4.o support_elan4.o threadcode_elan4_Linux.o epcomms_elan4.o epcommsTx_elan4.o epcommsRx_elan4.o
61903 +#
61904 +
61905 +#
61906 +# Makefile for Quadrics QsNet
61907 +#
61908 +# Copyright (c) 2004 Quadrics Ltd.
61909 +#
61910 +# File: driver/net/qsnet/ep/Makefile
61911 +#
61912 +
61913 +list-multi             := ep.o
61914 +ep-objs        := cm.o debug.o kalloc.o kcomm.o kmap.o kthread.o neterr.o nmh.o probenetwork.o railhints.o rmap.o statemap.o support.o threadcode.o epcomms.o epcommsRx.o epcommsTx.o epcommsFwd.o conf_linux.o procfs_linux.o ep_procfs.o cm_procfs.o $(ep3-$(CONFIG_EP)) $(ep4-$(CONFIG_EP))
61915 +export-objs            := conf_linux.o
61916 +obj-$(CONFIG_EP)       := ep.o
61917 +
61918 +ep.o : $(ep-objs)
61919 +       $(LD) -r -o $@ $(ep-objs)
61920 +
61921 +EXTRA_CFLAGS           +=  -DDEBUG -DDEBUG_PRINTF -DDEBUG_ASSERT
61922 +
61923 +include $(TOPDIR)/Rules.make
61924 +
61925 Index: linux-2.4.21/drivers/net/qsnet/ep/Makefile.conf
61926 ===================================================================
61927 --- linux-2.4.21.orig/drivers/net/qsnet/ep/Makefile.conf        2004-02-23 16:02:56.000000000 -0500
61928 +++ linux-2.4.21/drivers/net/qsnet/ep/Makefile.conf     2005-06-01 23:12:54.673427856 -0400
61929 @@ -0,0 +1,12 @@
61930 +# Flags for generating QsNet Linux Kernel Makefiles
61931 +MODNAME                =       ep.o
61932 +MODULENAME     =       ep
61933 +KOBJFILES      =       cm.o debug.o kalloc.o kcomm.o kmap.o kthread.o neterr.o nmh.o probenetwork.o railhints.o rmap.o statemap.o support.o threadcode.o epcomms.o epcommsRx.o epcommsTx.o epcommsFwd.o conf_linux.o procfs_linux.o ep_procfs.o cm_procfs.o \$\(ep3-\$\(CONFIG_EP\)\) \$\(ep4-\$\(CONFIG_EP\)\)
61934 +EXPORT_KOBJS   =       conf_linux.o
61935 +CONFIG_NAME    =       CONFIG_EP
61936 +SGALFC         =       
61937 +# EXTRALINES START
61938 +
61939 +ep3-$(CONFIG_ELAN3)    := kcomm_elan3.o kmsg_elan3.o kmap_elan3.o neterr_elan3.o probenetwork_elan3.o support_elan3.o threadcode_elan3.o threadcode_elan3_Linux.o epcomms_elan3.o epcommsTx_elan3.o epcommsRx_elan3.o
61940 +ep4-$(CONFIG_ELAN4)    := kcomm_elan4.o kmsg_elan4.o kmap_elan4.o neterr_elan4.o probenetwork_elan4.o commands_elan4.o debug_elan4.o support_elan4.o threadcode_elan4_Linux.o epcomms_elan4.o epcommsTx_elan4.o epcommsRx_elan4.o
61941 +# EXTRALINES END
61942 Index: linux-2.4.21/drivers/net/qsnet/ep/neterr.c
61943 ===================================================================
61944 --- linux-2.4.21.orig/drivers/net/qsnet/ep/neterr.c     2004-02-23 16:02:56.000000000 -0500
61945 +++ linux-2.4.21/drivers/net/qsnet/ep/neterr.c  2005-06-01 23:12:54.674427704 -0400
61946 @@ -0,0 +1,82 @@
61947 +/*
61948 + *    Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd.
61949 + *
61950 + *    For licensing information please see the supplied COPYING file
61951 + *
61952 + */
61953 +
61954 +#ident "@(#)$Id: neterr.c,v 1.25.8.1 2004/11/12 10:54:51 mike Exp $"
61955 +/*      $Source: /cvs/master/quadrics/epmod/neterr.c,v $ */
61956 +
61957 +#include <qsnet/kernel.h>
61958 +#include <elan/kcomm.h>
61959 +
61960 +#include "debug.h"
61961 +
61962 +void
61963 +ep_queue_network_error (EP_RAIL *rail, int nodeId, int what, int channel, EP_NETERR_COOKIE cookie)
61964 +{
61965 +    EP_SYS       *sys      = rail->System;
61966 +    EP_NODE_RAIL *nodeRail = &rail->Nodes[nodeId];
61967 +    unsigned long flags;
61968 +
61969 +    spin_lock_irqsave (&sys->NodeLock, flags);
61970 +
61971 +    ASSERT (nodeRail->State >= EP_NODE_CONNECTED && nodeRail->State <= EP_NODE_LOCAL_PASSIVATE);
61972 +    
61973 +    if (nodeRail->NetworkErrorState == 0)
61974 +    {
61975 +       EPRINTF2 (DBG_NETWORK_ERROR, "%s: raise context filter for node %d due to network error\n", rail->Name, nodeId);
61976 +       printk ("%s: raise context filter for node %d due to network error\n", rail->Name, nodeId);
61977 +       
61978 +       rail->Operations.RaiseFilter (rail, nodeId);
61979 +       
61980 +       if (nodeRail->State == EP_NODE_LOCAL_PASSIVATE)
61981 +           printk ("%s: node %d is flushing - deferring network error fixup\n", rail->Name, nodeId);
61982 +       else
61983 +           list_add_tail (&nodeRail->Link, &rail->NetworkErrorList);
61984 +    }
61985 +    
61986 +    switch (what)
61987 +    {
61988 +    case EP_NODE_NETERR_ATOMIC_PACKET:
61989 +       ASSERT (nodeRail->NetworkErrorCookies[channel] == 0);
61990 +       
61991 +       /* Need to raise the approriate context filter for this node,
61992 +        * and periodically send a neterr fixup message to it until 
61993 +        * we receive an ack from it
61994 +        */
61995 +       IncrStat (rail, NeterrAtomicPacket);
61996 +       
61997 +       nodeRail->NetworkErrorCookies[channel] = cookie;
61998 +       
61999 +       nodeRail->NetworkErrorState |= EP_NODE_NETERR_ATOMIC_PACKET;
62000 +       nodeRail->MsgXid             = ep_xid_cache_alloc (sys, &rail->XidCache);
62001 +       
62002 +       EPRINTF3 (DBG_NETWORK_ERROR, "%s: atomic packet destroyed - node %d cookie %llx\n", rail->Name, nodeId, cookie);
62003 +
62004 +       printk ("%s: atomic packet destroyed - node %d cookie %llx\n", rail->Name, nodeId, cookie);
62005 +       break;
62006 +
62007 +    case EP_NODE_NETERR_DMA_PACKET:
62008 +       /* Must be an overlapped dma packet, raise the context filter,
62009 +        * and hold it up for a NETWORK_ERROR_TIMEOUT */
62010 +       IncrStat (rail, NeterrDmaPacket);
62011 +       
62012 +       nodeRail->NetworkErrorState |= EP_NODE_NETERR_DMA_PACKET;
62013 +       break;
62014 +    }
62015 +
62016 +    nodeRail->NextRunTime = lbolt + NETWORK_ERROR_TIMEOUT;
62017 +    
62018 +    spin_unlock_irqrestore (&sys->NodeLock, flags);
62019 +
62020 +    ep_kthread_schedule (&sys->ManagerThread, nodeRail->NextRunTime);
62021 +}
62022 +
62023 +/*
62024 + * Local variables:
62025 + * c-file-style: "stroustrup"
62026 + * End:
62027 + */
62028 +
62029 Index: linux-2.4.21/drivers/net/qsnet/ep/neterr_elan3.c
62030 ===================================================================
62031 --- linux-2.4.21.orig/drivers/net/qsnet/ep/neterr_elan3.c       2004-02-23 16:02:56.000000000 -0500
62032 +++ linux-2.4.21/drivers/net/qsnet/ep/neterr_elan3.c    2005-06-01 23:12:54.674427704 -0400
62033 @@ -0,0 +1,326 @@
62034 +/*
62035 + *    Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd.
62036 + *
62037 + *    For licensing information please see the supplied COPYING file
62038 + *
62039 + */
62040 +
62041 +#ident "@(#)$Id: neterr_elan3.c,v 1.24 2003/11/17 13:26:45 david Exp $"
62042 +/*      $Source: /cvs/master/quadrics/epmod/neterr_elan3.c,v $ */
62043 +
62044 +#include <qsnet/kernel.h>
62045 +
62046 +#include <elan/kcomm.h>
62047 +
62048 +#include "kcomm_vp.h"
62049 +#include "kcomm_elan3.h"
62050 +#include "debug.h"
62051 +
62052 +typedef struct neterr_halt_args
62053 +{
62054 +    EP3_RAIL        *Rail;
62055 +    unsigned int      NodeId;
62056 +    EP_NETERR_COOKIE *Cookies;
62057 +} NETERR_HALT_ARGS;
62058 +
62059 +static int
62060 +DmaMatchesCookie (EP3_RAIL *rail, E3_DMA_BE *dma, int nodeId, EP_NETERR_COOKIE *cookies, char *where)
62061 +{
62062 +    E3_uint32     cvproc;
62063 +    E3_uint32     cookie;
62064 +    
62065 +    if (dma->s.dma_direction == DMA_WRITE)
62066 +    {
62067 +       cvproc = dma->s.dma_destCookieVProc;
62068 +       cookie = dma->s.dma_srcCookieVProc;
62069 +    }
62070 +    else
62071 +    {
62072 +       cvproc = dma->s.dma_srcCookieVProc;
62073 +       cookie = dma->s.dma_destCookieVProc;
62074 +    }
62075 +    
62076 +    EPRINTF6 (DBG_NETWORK_ERROR, "%s: Neterr - %s: DMA %08x %08x %08x %08x\n", rail->Generic.Name, where,
62077 +             dma->s.dma_type, dma->s.dma_size, dma->s.dma_source, dma->s.dma_dest);
62078 +    EPRINTF5 (DBG_NETWORK_ERROR, "%s:                     %08x %08x %08x %08x\n", rail->Generic.Name,
62079 +             dma->s.dma_destEvent, dma->s.dma_destCookieVProc, dma->s.dma_srcEvent, dma->s.dma_srcCookieVProc);
62080 +
62081 +    if (EP_VP_ISDATA((cvproc & DMA_PROCESS_MASK)) && EP_VP_TO_NODE(cvproc & DMA_PROCESS_MASK) == nodeId)
62082 +    {
62083 +       /*
62084 +        * This is a DMA going to the node which has a network fixup
62085 +        * request pending, so check if the cookie matches.
62086 +        */
62087 +       if ((cookie == cookies[0] || cookie == cookies[1]) /* && !WaitForEop */)
62088 +       {
62089 +           EPRINTF3 (DBG_NETWORK_ERROR, "%s: match cookie %08x on %s\n", rail->Generic.Name, cookie, where);
62090 +           
62091 +           return (TRUE);
62092 +       }
62093 +    }
62094 +
62095 +    return (FALSE);
62096 +}
62097 +
62098 +
62099 +static void
62100 +NetworkErrorHaltOperation (ELAN3_DEV *dev, void *arg)
62101 +{
62102 +    NETERR_HALT_ARGS *args = (NETERR_HALT_ARGS *) arg;
62103 +    EP3_RAIL         *rail = args->Rail;
62104 +    EP_SYS           *sys  = rail->Generic.System;
62105 +    sdramaddr_t       FPtr, BPtr;
62106 +    sdramaddr_t       Base, Top;
62107 +    E3_DMA_BE         dma;
62108 +    unsigned long     flags;
62109 +
62110 +    spin_lock_irqsave (&sys->NodeLock, flags);
62111 +
62112 +    ASSERT (elan3_sdram_readl (dev, dev->TAndQBase + offsetof (E3_TrapAndQueue, DProc.s.FSR)) == 0);
62113 +    ASSERT (elan3_sdram_readl (dev, dev->TAndQBase + offsetof (E3_TrapAndQueue, DProcData0.s.FSR.Status)) == 0);
62114 +    ASSERT (elan3_sdram_readl (dev, dev->TAndQBase + offsetof (E3_TrapAndQueue, DProcData1.s.FSR.Status)) == 0);
62115 +    ASSERT (elan3_sdram_readl (dev, dev->TAndQBase + offsetof (E3_TrapAndQueue, DProcData2.s.FSR.Status)) == 0);
62116 +    ASSERT (elan3_sdram_readl (dev, dev->TAndQBase + offsetof (E3_TrapAndQueue, DProcData3.s.FSR.Status)) == 0);
62117 +    
62118 +    FPtr  = read_reg32 (dev, DProc_SysCntx_FPtr);
62119 +    BPtr =  read_reg32 (dev, DProc_SysCntx_BPtr);
62120 +    Base  = dev->TAndQBase + offsetof (E3_TrapAndQueue, SysCntxDmaQueue[0]);
62121 +    Top   = dev->TAndQBase + offsetof (E3_TrapAndQueue, SysCntxDmaQueue[E3_SysCntxQueueSize-1]);
62122 +    
62123 +    while (FPtr != BPtr)
62124 +    {
62125 +       elan3_sdram_copyq_from_sdram (dev, FPtr, &dma, sizeof (E3_DMA_BE));
62126 +       
62127 +       if (DmaMatchesCookie (rail, &dma, args->NodeId, args->Cookies, "runq "))
62128 +       {
62129 +           /*
62130 +            * Transfer the DMA to the node, it's source event will 
62131 +            * get executed later.
62132 +            */
62133 +           QueueDmaOnStalledList (rail, &dma);
62134 +           
62135 +           /*
62136 +            * Remove the DMA from the queue by replacing it with one with
62137 +            * zero size and no events.
62138 +            *
62139 +            * NOTE: we must preserve the SYS_CONTEXT_BIT since the Elan uses this
62140 +            * to mark the approriate run queue as empty.
62141 +            */
62142 +           dma.s.dma_type            = (SYS_CONTEXT_BIT << 16);
62143 +           dma.s.dma_size            = 0;
62144 +           dma.s.dma_source          = (E3_Addr) 0;
62145 +           dma.s.dma_dest            = (E3_Addr) 0;
62146 +           dma.s.dma_destEvent       = (E3_Addr) 0;
62147 +           dma.s.dma_destCookieVProc = 0;
62148 +           dma.s.dma_srcEvent        = (E3_Addr) 0;
62149 +           dma.s.dma_srcCookieVProc  = 0;
62150 +           
62151 +           elan3_sdram_copyq_to_sdram (dev, &dma, FPtr, sizeof (E3_DMA_BE));
62152 +       }
62153 +
62154 +       FPtr = (FPtr == Top) ? Base : FPtr + sizeof (E3_DMA);
62155 +    }
62156 +
62157 +    rail->NetworkErrorFlushed = TRUE;
62158 +    kcondvar_wakeupall (&rail->NetworkErrorSleep, &sys->NodeLock);
62159 +
62160 +    spin_unlock_irqrestore (&sys->NodeLock, flags);
62161 +}
62162 +
62163 +void
62164 +ep3_neterr_fixup (EP_RAIL *r, unsigned int nodeId, EP_NETERR_COOKIE *cookies)
62165 +{
62166 +    EP3_RAIL        *rail        = (EP3_RAIL *) r;
62167 +    EP_SYS          *sys         = rail->Generic.System;
62168 +    ELAN3_DEV       *dev         = rail->Device;
62169 +    EP_NODE_RAIL    *nodeRail    = &rail->Generic.Nodes[nodeId];
62170 +    E3_DMA_BE        dmabe;
62171 +    EP3_COOKIE      *cp;
62172 +    E3_uint32        vp;
62173 +    NETERR_HALT_ARGS args;
62174 +    struct list_head *el, *nel, matchedList;
62175 +    int              i;
62176 +    unsigned long    flags;
62177 +
62178 +    INIT_LIST_HEAD (&matchedList);
62179 +
62180 +    StallDmaRetryThread (rail);
62181 +
62182 +    args.Rail       = rail;
62183 +    args.NodeId     = nodeId;
62184 +    args.Cookies    = cookies;
62185 +
62186 +    spin_lock_irqsave (&rail->Device->IntrLock, flags);
62187 +    QueueHaltOperation (rail->Device, 0, NULL, INT_TProcHalted | INT_DProcHalted, NetworkErrorHaltOperation, &args);
62188 +    spin_unlock_irqrestore (&rail->Device->IntrLock, flags);
62189 +    
62190 +    spin_lock_irqsave (&sys->NodeLock, flags);
62191 +    while (! rail->NetworkErrorFlushed)
62192 +       kcondvar_wait (&rail->NetworkErrorSleep, &sys->NodeLock, &flags);
62193 +    rail->NetworkErrorFlushed = FALSE;
62194 +    
62195 +    spin_lock (&rail->DmaRetryLock);
62196 +    for (i = EP_RETRY_BASE; i < EP_NUM_RETRIES; i++)
62197 +    {
62198 +       list_for_each_safe (el, nel, &rail->DmaRetries[i]) {
62199 +           EP3_RETRY_DMA *retry = list_entry (el, EP3_RETRY_DMA, Link);
62200 +
62201 +           if (DmaMatchesCookie (rail, &retry->Dma, nodeId, cookies, "retry"))
62202 +           {
62203 +               /* remove from retry list */
62204 +               list_del (&retry->Link);
62205 +
62206 +               /* add to list of dmas which matched */
62207 +               list_add_tail (&retry->Link, &matchedList);
62208 +           }
62209 +       }
62210 +    }
62211 +    
62212 +    list_for_each_safe (el, nel, &nodeRail->StalledDmas) {
62213 +       EP3_RETRY_DMA *retry = list_entry (el, EP3_RETRY_DMA, Link);
62214 +       
62215 +       if (DmaMatchesCookie (rail, &retry->Dma, nodeId, cookies, "stalled"))
62216 +       {
62217 +           /* remove from retry list */
62218 +           list_del (&retry->Link);
62219 +           
62220 +           /* add to list of dmas which matched */
62221 +           list_add_tail (&retry->Link, &matchedList);
62222 +       }
62223 +    }
62224 +    
62225 +    spin_unlock (&rail->DmaRetryLock);
62226 +    spin_unlock_irqrestore (&sys->NodeLock, flags);
62227 +    
62228 +    ResumeDmaRetryThread (rail);
62229 +
62230 +    /* Now "set" the source event of any write DMA's */
62231 +    while (! list_empty (&matchedList))
62232 +    {
62233 +       EP3_RETRY_DMA *retry = list_entry (matchedList.next, EP3_RETRY_DMA, Link);
62234 +       
62235 +       list_del (&retry->Link);
62236 +
62237 +       if (retry->Dma.s.dma_direction == DMA_WRITE && retry->Dma.s.dma_srcEvent)
62238 +       {
62239 +           sdramaddr_t event = ep_elan2sdram (&rail->Generic, retry->Dma.s.dma_srcEvent);
62240 +
62241 +           /* Block local interrupts, since we need to atomically
62242 +            * decrement the event count and perform the word write
62243 +            */
62244 +           local_irq_save (flags);
62245 +           {
62246 +               E3_uint32 type  = elan3_sdram_readl (dev, event + offsetof (E3_Event, ev_Type));
62247 +               E3_uint32 count = elan3_sdram_readl (dev, event + offsetof (E3_Event, ev_Count));
62248 +
62249 +               elan3_sdram_writel (dev, event + offsetof (E3_Event, ev_Count), count - 1);
62250 +
62251 +               if (count == 1)
62252 +               {
62253 +                   if (type & EV_TYPE_MASK_BCOPY)
62254 +                   {
62255 +                       E3_Addr srcVal  = elan3_sdram_readl (dev, event + offsetof (E3_BlockCopyEvent, ev_Source));
62256 +                       E3_Addr dstAddr = elan3_sdram_readl (dev, event + offsetof (E3_BlockCopyEvent, ev_Dest)) & ~EV_BCOPY_DTYPE_MASK;
62257 +
62258 +                       ASSERT ((srcVal & EV_WCOPY) != 0);
62259 +                       
62260 +                       EPRINTF3 (DBG_NETWORK_ERROR, "%s: neterr perform event word write at %08x with %08x\n", rail->Generic.Name, dstAddr, srcVal);
62261 +
62262 +                       ELAN3_OP_STORE32 (rail->Ctxt, dstAddr, srcVal);
62263 +                   }
62264 +
62265 +                   if ((type & ~EV_TYPE_MASK_BCOPY) != 0)
62266 +                   {
62267 +                       if ((type & EV_TYPE_MASK_CHAIN) == EV_TYPE_CHAIN)
62268 +                       {
62269 +                           printk ("%s: event at %08x - chained event %x is invalid\n", rail->Generic.Name, retry->Dma.s.dma_srcEvent, type);
62270 +                           panic ("ep: neterr invalid event type\n");
62271 +                       }
62272 +                       else if ((type & EV_TYPE_MASK_EVIRQ) == EV_TYPE_EVIRQ)
62273 +                       {
62274 +                           EPRINTF2 (DBG_NETWORK_ERROR, "%s: neterr event interrupt - cookie %08x\n", rail->Generic.Name, (type & ~(EV_TYPE_MASK_EVIRQ|EV_TYPE_MASK_BCOPY)));
62275 +                           
62276 +                           cp = LookupCookie (&rail->CookieTable, (type & ~(EV_TYPE_MASK_EVIRQ|EV_TYPE_MASK_BCOPY)));
62277 +                           
62278 +                           if (cp->Operations->Event)
62279 +                               cp->Operations->Event(rail, cp->Arg);
62280 +                       }
62281 +                       else if ((type & EV_TYPE_MASK_DMA) == EV_TYPE_DMA)
62282 +                       {
62283 +                           sdramaddr_t dma = ep_elan2sdram (&rail->Generic, (type & ~EV_TYPE_MASK2));
62284 +                           
62285 +                           EPRINTF2 (DBG_NETWORK_ERROR, "%s: neterr chained dma - %08x\n", rail->Generic.Name, (type & ~EV_TYPE_MASK2));
62286 +                           
62287 +                           elan3_sdram_copyq_from_sdram (dev, dma, &dmabe, sizeof (E3_DMA));
62288 +                           
62289 +                           if (dmabe.s.dma_direction == DMA_WRITE)
62290 +                           {
62291 +                               vp = dmabe.s.dma_destVProc;
62292 +                               cp = LookupEventCookie (rail, &rail->CookieTable, dmabe.s.dma_srcEvent);
62293 +                           }
62294 +                           else
62295 +                           {
62296 +                               vp = dmabe.s.dma_srcVProc;
62297 +                               cp = LookupEventCookie (rail, &rail->CookieTable, dmabe.s.dma_destEvent);
62298 +                               
62299 +                               /* we MUST convert this into a DMA_READ_REQUEUE dma as if we don't the 
62300 +                                * DMA descriptor will be read from the EP_RETRY_DMA rather than the 
62301 +                                * original DMA - this can then get reused and an incorrect DMA 
62302 +                                * descriptor sent 
62303 +                                * eventp->ev_Type contains the dma address with type in the lower bits 
62304 +                                */ 
62305 +                           
62306 +                               dmabe.s.dma_source    = (type & ~EV_TYPE_MASK2);
62307 +                               dmabe.s.dma_direction = (dmabe.s.dma_direction & ~DMA_READ) | DMA_READ_REQUEUE;
62308 +                           }
62309 +                       
62310 +                           ASSERT (EP_VP_ISDATA(vp));
62311 +                       
62312 +                           nodeRail = &rail->Generic.Nodes[EP_VP_TO_NODE(vp)];
62313 +
62314 +                           switch (nodeRail->State)
62315 +                           {
62316 +                           case EP_NODE_CONNECTED:
62317 +                           case EP_NODE_LEAVING_CONNECTED:
62318 +                               if (cp != NULL)
62319 +                                   cp->Operations->DmaRetry (rail, cp->Arg, &dmabe, EAGAIN);
62320 +                               else
62321 +                               {
62322 +                                   ASSERT (dmabe.s.dma_direction == DMA_WRITE && dmabe.s.dma_srcEvent == 0 && dmabe.s.dma_isRemote);
62323 +                               
62324 +                                   QueueDmaForRetry (rail, &dmabe, EP_RETRY_ANONYMOUS);
62325 +                               }
62326 +                               break;
62327 +
62328 +                           case EP_NODE_LOCAL_PASSIVATE:
62329 +                               QueueDmaOnStalledList (rail, &dmabe);
62330 +                               break;
62331 +
62332 +                           default:
62333 +                               panic ("ep: neterr incorrect state for node\n");
62334 +                           }
62335 +                       }
62336 +                       else if ((type & EV_TYPE_MASK_THREAD) == EV_TYPE_THREAD)
62337 +                       {
62338 +                           printk ("%s: event at %08x - thread waiting %x is invalid\n", rail->Generic.Name, retry->Dma.s.dma_srcEvent, type);
62339 +                           panic ("ep: neterr invalid event type\n");
62340 +                       }
62341 +                   }
62342 +               }
62343 +           }
62344 +           local_irq_restore(flags);
62345 +       }
62346 +       
62347 +       /* add to free list */
62348 +       spin_lock_irqsave (&rail->DmaRetryLock, flags);
62349 +       list_add (&retry->Link, &rail->DmaRetryFreeList);
62350 +       spin_unlock_irqrestore (&rail->DmaRetryLock, flags);
62351 +    }
62352 +}
62353 +
62354 +/*
62355 + * Local variables:
62356 + * c-file-style: "stroustrup"
62357 + * End:
62358 + */
62359 +
62360 Index: linux-2.4.21/drivers/net/qsnet/ep/neterr_elan4.c
62361 ===================================================================
62362 --- linux-2.4.21.orig/drivers/net/qsnet/ep/neterr_elan4.c       2004-02-23 16:02:56.000000000 -0500
62363 +++ linux-2.4.21/drivers/net/qsnet/ep/neterr_elan4.c    2005-06-01 23:12:54.675427552 -0400
62364 @@ -0,0 +1,251 @@
62365 +/*
62366 + *    Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd.
62367 + *
62368 + *    For licensing information please see the supplied COPYING file
62369 + *
62370 + */
62371 +
62372 +#ident "@(#)$Id: neterr_elan4.c,v 1.2 2003/11/24 17:57:24 david Exp $"
62373 +/*      $Source: /cvs/master/quadrics/epmod/neterr_elan4.c,v $ */
62374 +
62375 +#include <qsnet/kernel.h>
62376 +
62377 +#include <elan/kcomm.h>
62378 +
62379 +#include "kcomm_vp.h"
62380 +#include "kcomm_elan4.h"
62381 +#include "debug.h"
62382 +
62383 +struct neterr_desc
62384 +{
62385 +    EP4_RAIL         *rail;
62386 +    unsigned int      nodeid;
62387 +    EP_NETERR_COOKIE *cookies;
62388 +    int                      done;
62389 +} ;
62390 +
62391 +static int
62392 +dma_matches_cookie (EP4_RAIL *rail, E4_uint64 vproc, E4_uint64 cookie, unsigned int nodeId, EP_NETERR_COOKIE *cookies, const char *where)
62393 +{
62394 +    if ((EP_VP_ISDATA (vproc) && EP_VP_TO_NODE (vproc) == nodeId) && (cookie == cookies[0] || cookie == cookies[1]))
62395 +    {
62396 +       EPRINTF3 (DBG_NETWORK_ERROR, "%s: match cookie %016llx on %s\n", rail->r_generic.Name, cookie, where);
62397 +
62398 +       return 1;
62399 +    }
62400 +    return 0;
62401 +}
62402 +
62403 +static void
62404 +ep4_neterr_dma_flushop (ELAN4_DEV *dev, void *arg, int qfull)
62405 +{
62406 +    struct neterr_desc *desc  = (struct neterr_desc *) arg;
62407 +    EP4_RAIL           *rail  = desc->rail;
62408 +    E4_uint64           qptrs = read_reg64 (dev, DProcHighPriPtrs);
62409 +    E4_uint32           qsize = E4_QueueSize (E4_QueueSizeValue (qptrs));
62410 +    E4_uint32           qfptr = E4_QueueFrontPointer (qptrs);
62411 +    E4_uint32           qbptr = E4_QueueBackPointer (qptrs);
62412 +    E4_DProcQueueEntry  qentry;
62413 +    unsigned long       flags;
62414 +
62415 +    while ((qfptr != qbptr) || qfull)
62416 +    {
62417 +       E4_uint64 cookie = elan4_sdram_readq (dev, qfptr + offsetof (E4_DProcQueueEntry, Desc.dma_cookie));
62418 +       E4_uint64 vproc  = elan4_sdram_readq (dev, qfptr + offsetof (E4_DProcQueueEntry, Desc.dma_vproc));
62419 +
62420 +       if (dma_matches_cookie (rail, vproc, cookie, desc->nodeid, desc->cookies, "runq "))
62421 +       {
62422 +           elan4_sdram_copyq_from_sdram (dev, qfptr, &qentry, sizeof (E4_DProcQueueEntry));
62423 +
62424 +           ep4_queue_dma_stalled (rail, &qentry.Desc);
62425 +
62426 +           /* Replace the dma with one which will "disappear" */
62427 +           qentry.Desc.dma_typeSize = DMA_ShMemWrite | dev->dev_ctxt.ctxt_num;
62428 +           qentry.Desc.dma_cookie   = 0;
62429 +           qentry.Desc.dma_vproc    = 0;
62430 +           qentry.Desc.dma_srcAddr  = 0;
62431 +           qentry.Desc.dma_dstAddr  = 0;
62432 +           qentry.Desc.dma_srcEvent = 0;
62433 +           qentry.Desc.dma_dstEvent = 0;
62434 +
62435 +           elan4_sdram_copyq_to_sdram (dev, &qentry, qfptr, sizeof (E4_DProcQueueEntry));
62436 +       }
62437 +       
62438 +       qfptr = (qfptr & ~(qsize-1)) | ((qfptr + sizeof (E4_DProcQueueEntry)) & (qsize-1));
62439 +       qfull = 0;
62440 +    }
62441 +
62442 +    spin_lock_irqsave (&rail->r_haltop_lock, flags);
62443 +    desc->done = 1;
62444 +    kcondvar_wakeupall (&rail->r_haltop_sleep, &rail->r_haltop_lock);
62445 +    spin_unlock_irqrestore (&rail->r_haltop_lock, flags);
62446 +}
62447 +
62448 +static void
62449 +ep4_neterr_dma_haltop (ELAN4_DEV *dev, void *arg)
62450 +{
62451 +    struct neterr_desc *desc = (struct neterr_desc *) arg;
62452 +
62453 +    elan4_queue_dma_flushop (dev, &desc->rail->r_flushop, 1);
62454 +}
62455 +
62456 +void
62457 +ep4_neterr_fixup_dmas (EP4_RAIL *rail, unsigned int nodeId, EP_NETERR_COOKIE *cookies)
62458 +{
62459 +    EP_NODE_RAIL      *nodeRail = &rail->r_generic.Nodes[nodeId];
62460 +    struct neterr_desc desc;
62461 +    struct list_head   matchedList;
62462 +    struct list_head  *el, *nel;
62463 +    unsigned long      flags;
62464 +    register int       i;
62465 +
62466 +    desc.rail    = rail;
62467 +    desc.nodeid  = nodeId;
62468 +    desc.cookies = cookies;
62469 +    desc.done    = 0;
62470 +
62471 +    INIT_LIST_HEAD (&matchedList);
62472 +
62473 +    /* First -  stall the retry thread, so that it will no longer restart
62474 +     *          any dma's from the retry list */
62475 +    ep_kthread_stall (&rail->r_retry_thread);
62476 +    
62477 +    /* Second - flush through all command queues targetted by events, thread etc */
62478 +    ep4_flush_ecqs (rail);
62479 +    
62480 +    /* Third - queue a halt operation to flush through all DMA's which are executing
62481 +     *         or on the run queues */
62482 +    kmutex_lock (&rail->r_haltop_mutex);
62483 +    
62484 +    rail->r_haltop.op_mask      = INT_DProcHalted;
62485 +    rail->r_haltop.op_function  = ep4_neterr_dma_haltop;
62486 +    rail->r_haltop.op_arg       = &desc;
62487 +
62488 +    rail->r_flushop.op_function = ep4_neterr_dma_flushop;
62489 +    rail->r_flushop.op_arg      = &desc;
62490 +    
62491 +    elan4_queue_haltop (rail->r_ctxt.ctxt_dev, &rail->r_haltop);
62492 +
62493 +    spin_lock_irqsave (&rail->r_haltop_lock, flags);
62494 +    while (! desc.done)
62495 +       kcondvar_wait (&rail->r_haltop_sleep, &rail->r_haltop_lock, &flags);
62496 +    spin_unlock_irqrestore (&rail->r_haltop_lock, flags);
62497 +    kmutex_unlock (&rail->r_haltop_mutex);
62498 +
62499 +    /* Fourth - run down the dma retry lists and move all entries to the cancelled
62500 +     *          list.  Any dma's which were on the run queues have already been
62501 +     *          moved there */
62502 +    spin_lock_irqsave (&rail->r_dma_lock, flags);
62503 +    for (i = EP_RETRY_BASE; i < EP_NUM_RETRIES; i++)
62504 +    {
62505 +       list_for_each_safe (el,nel, &rail->r_dma_retrylist[i]) {
62506 +           EP4_DMA_RETRY *retry    = list_entry (el, EP4_DMA_RETRY, retry_link);
62507 +           
62508 +           if (dma_matches_cookie (rail, retry->retry_dma.dma_vproc, retry->retry_dma.dma_cookie, nodeId, cookies, "retry"))
62509 +           {
62510 +               /* remove from retry list */
62511 +               list_del (&retry->retry_link);
62512 +               
62513 +               /* add to list of dmas which matched */
62514 +               list_add_tail (&retry->retry_link, &matchedList);
62515 +           }
62516 +       }
62517 +    }
62518 +    
62519 +    list_for_each_safe (el, nel, &nodeRail->StalledDmas) {
62520 +       EP4_DMA_RETRY *retry = list_entry (el, EP4_DMA_RETRY, retry_link);
62521 +       
62522 +       if (dma_matches_cookie (rail, retry->retry_dma.dma_vproc, retry->retry_dma.dma_cookie, nodeId, cookies, "stalled"))
62523 +       {
62524 +           /* remove from retry list */
62525 +           list_del (&retry->retry_link);
62526 +           
62527 +           /* add to list of dmas which matched */
62528 +           list_add_tail (&retry->retry_link, &matchedList);
62529 +       }
62530 +    }
62531 +    spin_unlock_irqrestore (&rail->r_dma_lock, flags);
62532 +    
62533 +    /* Now "set" the source event of any put DMA#'s we can use the dma 
62534 +     * retry command queue as the retry thread is stalled */
62535 +    while (! list_empty (&matchedList))
62536 +    {
62537 +       EP4_DMA_RETRY *retry = list_entry (matchedList.next, EP4_DMA_RETRY, retry_link);
62538 +       
62539 +       list_del (&retry->retry_link);
62540 +
62541 +       elan4_set_event_cmd (rail->r_dma_ecq->ecq_cq, retry->retry_dma.dma_srcEvent);
62542 +
62543 +       spin_lock_irqsave (&rail->r_dma_lock, flags);
62544 +       list_add (&retry->retry_link, &rail->r_dma_freelist);
62545 +       spin_unlock_irqrestore (&rail->r_dma_lock, flags);
62546 +    }
62547 +
62548 +    /* Flush through the command queues to ensure that all the setevents have executed */
62549 +    ep4_flush_ecqs (rail);
62550 +
62551 +    /* Finally - allow the retry thread to run again */
62552 +    ep_kthread_resume (&rail->r_retry_thread);
62553 +}
62554 +
62555 +void
62556 +ep4_add_neterr_ops (EP4_RAIL *rail, EP4_NETERR_OPS *ops)
62557 +{
62558 +    /* we're called from the ManagerThread, so no need to stall it */
62559 +    list_add_tail (&ops->op_link, &rail->r_neterr_ops);
62560 +}
62561 +void
62562 +ep4_remove_neterr_ops (EP4_RAIL *rail, EP4_NETERR_OPS *ops)
62563 +{
62564 +    EP_SYS *sys = rail->r_generic.System;
62565 +
62566 +    ep_kthread_stall (&sys->ManagerThread);
62567 +    list_del (&ops->op_link);
62568 +    ep_kthread_resume (&sys->ManagerThread);
62569 +}
62570 +
62571 +void
62572 +ep4_neterr_fixup_sten (EP4_RAIL *rail, unsigned int nodeId, EP_NETERR_COOKIE *cookies)
62573 +{
62574 +    struct list_head *el;
62575 +
62576 +    list_for_each (el, &rail->r_neterr_ops) {
62577 +       EP4_NETERR_OPS *op = list_entry (el, EP4_NETERR_OPS, op_link);
62578 +
62579 +       (op->op_func) (rail, op->op_arg, nodeId, cookies);
62580 +    }
62581 +}
62582 +
62583 +void
62584 +ep4_neterr_fixup (EP_RAIL *r, unsigned int nodeId, EP_NETERR_COOKIE *cookies)
62585 +{
62586 +    EP4_RAIL *rail = (EP4_RAIL *) r;
62587 +
62588 +    /* network error cookies can come from the following :
62589 +     *
62590 +     *   DMA  engine
62591 +     *     if a DMA matches a network error cookie, then we just need to 
62592 +     *     execute the local setevent *before* returning.
62593 +     *
62594 +     *   STEN packet
62595 +     *     if the STEN packet was generated with as a WAIT_FOR_EOP
62596 +     *     and it's not present on the retry lists, then re-create
62597 +     *     it.
62598 +     *
62599 +     */
62600 +    EPRINTF4 (DBG_NETWORK_ERROR, "%s: ep4_neterr_fixup: node %d cookies <%lld%s%s%s%s> <%lld%s%s%s%s>\n",
62601 +             rail->r_generic.Name, nodeId, EP4_COOKIE_STRING(cookies[0]), EP4_COOKIE_STRING(cookies[1]));
62602 +
62603 +    if ((cookies[0] & EP4_COOKIE_DMA) || (cookies[1] & EP4_COOKIE_DMA))
62604 +       ep4_neterr_fixup_dmas (rail, nodeId, cookies);
62605 +
62606 +    if ((cookies[0] & EP4_COOKIE_STEN) || (cookies[1] & EP4_COOKIE_STEN))
62607 +       ep4_neterr_fixup_sten (rail, nodeId, cookies);
62608 +}
62609 +
62610 +/*
62611 + * Local variables:
62612 + * c-file-style: "stroustrup"
62613 + * End:
62614 + */
62615 +
62616 Index: linux-2.4.21/drivers/net/qsnet/ep/nmh.c
62617 ===================================================================
62618 --- linux-2.4.21.orig/drivers/net/qsnet/ep/nmh.c        2004-02-23 16:02:56.000000000 -0500
62619 +++ linux-2.4.21/drivers/net/qsnet/ep/nmh.c     2005-06-01 23:12:54.676427400 -0400
62620 @@ -0,0 +1,181 @@
62621 +/*
62622 + *    Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd.
62623 + *    Copyright (c) 2002-2003 by Quadrics Ltd.
62624 + *
62625 + *    For licensing information please see the supplied COPYING file
62626 + *
62627 + */
62628 +#ident "@(#)$Id: nmh.c,v 1.6 2004/01/05 13:48:08 david Exp $"
62629 +/*      $Source: /cvs/master/quadrics/epmod/nmh.c,v $*/
62630 +
62631 +#include <qsnet/kernel.h>
62632 +
62633 +#include <elan/kcomm.h>
62634 +
62635 +#define EP_NMD_SPANS(nmd, base, top)   ((nmd)->nmd_addr <= (base) &&  \
62636 +                                        ((nmd)->nmd_addr + (nmd)->nmd_len - 1) >= (top))
62637 +
62638 +#define EP_NMD_OVERLAPS(nmd, addr, len)        ((nmd)->nmd_addr <= ((addr) + (len)) && \
62639 +                                        ((nmd)->nmd_addr + (nmd)->nmd_len - 1) >= (addr))
62640 +
62641 +#define EP_NMH_HASH(tbl,idx,addr)      ((addr) % (tbl)->tbl_size[idx])
62642 +
62643 +int
62644 +ep_nmh_init (EP_NMH_TABLE *tbl)
62645 +{
62646 +    int i, idx, hsize = 1;
62647 +
62648 +    for (idx = EP_NMH_NUMHASH-1; idx >= 0; idx--, hsize <<= 1)
62649 +    {
62650 +       tbl->tbl_size[idx] = (hsize < EP_NMH_HASHSIZE) ? hsize : EP_NMH_HASHSIZE;
62651 +
62652 +       KMEM_ZALLOC (tbl->tbl_hash[idx], struct list_head *, sizeof (struct list_head) * tbl->tbl_size[idx], 1);
62653 +       
62654 +       if (tbl->tbl_hash == NULL)
62655 +       {
62656 +           while (++idx < EP_NMH_NUMHASH)
62657 +               KMEM_FREE (tbl->tbl_hash[idx], sizeof (struct list_head) * tbl->tbl_size[idx]);
62658 +           return (ENOMEM);
62659 +       }
62660 +
62661 +       for (i = 0; i < tbl->tbl_size[idx]; i++)
62662 +           INIT_LIST_HEAD (&tbl->tbl_hash[idx][i]);
62663 +    }
62664 +
62665 +    return (0);
62666 +}
62667 +
62668 +void
62669 +ep_nmh_fini (EP_NMH_TABLE *tbl)
62670 +{
62671 +    int idx;
62672 +
62673 +    for (idx = 0; idx < EP_NMH_NUMHASH; idx++)
62674 +       if (tbl->tbl_hash[idx])
62675 +           KMEM_FREE (tbl->tbl_hash[idx], sizeof (struct list_head) * tbl->tbl_size[idx]);
62676 +    
62677 +    bzero (tbl, sizeof (EP_NMH_TABLE));
62678 +}
62679 +
62680 +void
62681 +ep_nmh_insert (EP_NMH_TABLE *tbl, EP_NMH *nmh)
62682 +{
62683 +    EP_ADDR base = nmh->nmh_nmd.nmd_addr;
62684 +    EP_ADDR top  = base + nmh->nmh_nmd.nmd_len - 1;
62685 +    int     idx;
62686 +
62687 +    for (idx = 0, base >>= 12, top >>= 12; base != top && idx < EP_NMH_NUMHASH; idx++, base >>= 1, top >>= 1)
62688 +       ;
62689 +
62690 +    list_add_tail (&nmh->nmh_link, &tbl->tbl_hash[idx][EP_NMH_HASH(tbl, idx, base)]);
62691 +}
62692 +
62693 +void
62694 +ep_nmh_remove (EP_NMH_TABLE *tbl, EP_NMH *nmh)
62695 +{
62696 +    list_del (&nmh->nmh_link);
62697 +}
62698 +
62699 +EP_NMH *
62700 +ep_nmh_find (EP_NMH_TABLE *tbl, EP_NMD *nmd)
62701 +{
62702 +    EP_ADDR           base = nmd->nmd_addr;
62703 +    EP_ADDR           top  = base + nmd->nmd_len - 1;
62704 +    int               idx;
62705 +    struct list_head *le;
62706 +    
62707 +    for (idx = 0, base >>= 12, top >>= 12; base != top && idx < EP_NMH_NUMHASH; idx++, base >>= 1, top >>= 1)
62708 +       ;
62709 +    
62710 +    for (; idx < EP_NMH_NUMHASH; idx++, base >>= 1, top >>= 1) {
62711 +
62712 +       list_for_each (le, &tbl->tbl_hash[idx][EP_NMH_HASH(tbl, idx, base)]) {
62713 +           EP_NMH *nmh = list_entry (le, EP_NMH, nmh_link);
62714 +
62715 +           if (EP_NMD_SPANS (&nmh->nmh_nmd, nmd->nmd_addr, nmd->nmd_addr + nmd->nmd_len - 1))
62716 +               return (nmh);
62717 +       }
62718 +    }
62719 +
62720 +    return (0);
62721 +}
62722 +
62723 +void
62724 +ep_nmd_subset (EP_NMD *subset, EP_NMD *nmd, unsigned off, unsigned len)
62725 +{
62726 +    ASSERT ((off + len - 1) <= nmd->nmd_len);
62727 +
62728 +    subset->nmd_addr = nmd->nmd_addr + off;
62729 +    subset->nmd_len  = len;
62730 +    subset->nmd_attr = nmd->nmd_attr;
62731 +}
62732 +
62733 +int
62734 +ep_nmd_merge (EP_NMD *merged, EP_NMD *a, EP_NMD *b)
62735 +{
62736 +    if (EP_NMD_NODEID (a) != EP_NMD_NODEID (b))                        /* not generated on the same node */
62737 +       return 0;
62738 +    
62739 +    if ((EP_NMD_RAILMASK (a) & EP_NMD_RAILMASK (b)) == 0)      /* no common rails */
62740 +       return 0;
62741 +    
62742 +    if (b->nmd_addr == (a->nmd_addr + a->nmd_len))
62743 +    {
62744 +       if (merged != NULL)
62745 +       {
62746 +           merged->nmd_addr = a->nmd_addr;
62747 +           merged->nmd_len  = a->nmd_len + b->nmd_len;
62748 +           merged->nmd_attr = EP_NMD_ATTR(EP_NMD_NODEID(a), EP_NMD_RAILMASK(a) & EP_NMD_RAILMASK(b));
62749 +       }
62750 +       return 1;
62751 +    }
62752 +    
62753 +    if (a->nmd_addr == (b->nmd_addr + b->nmd_len))
62754 +    {
62755 +       if (merged != NULL)
62756 +       {
62757 +           merged->nmd_addr = b->nmd_addr;
62758 +           merged->nmd_len  = b->nmd_len + a->nmd_len;
62759 +           merged->nmd_attr = EP_NMD_ATTR(EP_NMD_NODEID(b), EP_NMD_RAILMASK(a) & EP_NMD_RAILMASK(b));
62760 +       }
62761 +       
62762 +       return 1;
62763 +    }
62764 +
62765 +    return 0;
62766 +}
62767 +
62768 +int
62769 +ep_nmd_map_rails (EP_SYS *sys, EP_NMD *nmd, unsigned railmask)
62770 +{
62771 +    EP_NMH *nmh = ep_nmh_find (&sys->MappingTable, nmd);
62772 +
62773 +    if (nmh == NULL)
62774 +    {
62775 +       printk ("ep_nmd_map_rails: nmd=%08x.%08x.%08x cannot be found\n",
62776 +               nmd->nmd_addr, nmd->nmd_len, nmd->nmd_attr);
62777 +       return (-1);
62778 +    }
62779 +
62780 +    return (nmh->nmh_ops->op_map_rails (sys, nmh, nmd, railmask));
62781 +}
62782 +
62783 +EP_RAILMASK
62784 +ep_nmd2railmask (EP_NMD *frags, int nFrags)
62785 +{
62786 +    EP_RAILMASK mask;
62787 +
62788 +    if (nFrags == 0)
62789 +       return ((EP_RAILMASK)-1);
62790 +    
62791 +    for (mask = EP_NMD_RAILMASK(frags); --nFrags; )
62792 +       mask &= EP_NMD_RAILMASK(++frags);
62793 +
62794 +    return (mask);
62795 +}
62796 +
62797 +/*
62798 + * Local variables:
62799 + * c-file-style: "stroustrup"
62800 + * End:
62801 + */
62802 Index: linux-2.4.21/drivers/net/qsnet/ep/probenetwork.c
62803 ===================================================================
62804 --- linux-2.4.21.orig/drivers/net/qsnet/ep/probenetwork.c       2004-02-23 16:02:56.000000000 -0500
62805 +++ linux-2.4.21/drivers/net/qsnet/ep/probenetwork.c    2005-06-01 23:12:54.677427248 -0400
62806 @@ -0,0 +1,446 @@
62807 +/*
62808 + *    Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd.
62809 + *    Copyright (c) 2002-2003 by Quadrics Ltd.
62810 + *
62811 + *    For licensing information please see the supplied COPYING file
62812 + *
62813 + */
62814 +
62815 +#ident "@(#)$Id: probenetwork.c,v 1.43 2004/04/19 15:43:15 david Exp $"
62816 +/*      $Source: /cvs/master/quadrics/epmod/probenetwork.c,v $ */
62817 +
62818 +#include <qsnet/kernel.h>
62819 +
62820 +#include <elan/kcomm.h>
62821 +#include "debug.h"
62822 +
62823 +int PositionCheck = 1;
62824 +
62825 +#define NUM_DOWN_FROM_VAL(NumDownLinksVal, level)      (((NumDownLinksVal) >> ((level) << 2)) & 0xF)
62826 +
62827 +int
62828 +ProbeNetwork (EP_RAIL *rail, ELAN_POSITION *pos)
62829 +{
62830 +    int               lvl, i;
62831 +    int               level;
62832 +    int               nodeid;
62833 +    int               numnodes;
62834 +    int                      randomRoutingDisabled;
62835 +    int               sw;
62836 +    int               nacks;
62837 +    int               nowayup;
62838 +    int                      nalias;
62839 +    int                      upmask;
62840 +    int                      partial;
62841 +    int                      link;
62842 +    int                      invalid;
62843 +    int                      linkdown[ELAN_MAX_LEVELS];
62844 +    int                      linkup[ELAN_MAX_LEVELS];
62845 +    EP_SWITCH        *switches[ELAN_MAX_LEVELS];
62846 +    int               switchCount[ELAN_MAX_LEVELS+1];
62847 +    int               lowestBcast;
62848 +    int               numUpLinks[ELAN_MAX_LEVELS];
62849 +    int               routedown [ELAN_MAX_LEVELS];
62850 +
62851 +    EPRINTF1 (DBG_PROBE, "%s: ProbeNetwork started\n", rail->Name);
62852 +
62853 +    switchCount[0] = 1;
62854 +    numUpLinks [0] = 4;
62855 +
62856 +    for (level = 0; level < ELAN_MAX_LEVELS; level++)
62857 +    {
62858 +       int ndown  = NUM_DOWN_FROM_VAL (rail->Devinfo.dev_num_down_links_value, level);
62859 +
62860 +       KMEM_ZALLOC (switches[level], EP_SWITCH *, sizeof (EP_SWITCH) * switchCount[level], 1);
62861 +
62862 +       for (sw = 0, nacks = 0, nowayup = 0, lowestBcast=7; sw < switchCount[level]; sw++)
62863 +       {
62864 +           EP_SWITCH *lsw  = &switches[level][sw];
62865 +           int        good = 1;
62866 +           int        tsw;
62867 +
62868 +           for (nodeid = 0,tsw = sw, lvl = level-1 ; lvl >= 0 ; lvl--)
62869 +           {
62870 +               EP_SWITCH *lsw;
62871 +               int        link = (8-numUpLinks[lvl]) + (tsw % numUpLinks[lvl]);
62872 +
62873 +               tsw  = tsw / numUpLinks[lvl];
62874 +               lsw  = &switches[lvl][tsw];
62875 +
62876 +               if (lsw->present == 0 || (lsw->lnr & (1 << link)))
62877 +               {
62878 +                   EPRINTF4 (DBG_PROBE, "lvl %d sw %d present=%d lnr=%x\n", lvl, sw, lsw->present, lsw->lnr);
62879 +                   good = 0;
62880 +               }
62881 +               
62882 +               linkup[lvl]   = link;
62883 +               linkdown[lvl] = lsw->link;
62884 +
62885 +               if ( lvl ) nodeid = ((nodeid + linkdown[lvl]) * (8-numUpLinks[lvl-1]));
62886 +               else       nodeid += linkdown[0];
62887 +
62888 +           }
62889 +           
62890 +           /* 
62891 +            * don't bother probing routes which we we've already seen are unreachable 
62892 +            * because a link upwards was in reset or the switch previously nacked us.
62893 +            */
62894 +           if (! good)
62895 +           {
62896 +               lsw->present = 0;
62897 +
62898 +               nacks++;
62899 +               nowayup++;
62900 +
62901 +               continue;
62902 +           }
62903 +
62904 +           lsw->present = rail->Operations.ProbeRoute (rail, level, sw, nodeid, linkup, linkdown, 5, lsw);
62905 +
62906 +           if (! lsw->present)
62907 +           {
62908 +               EPRINTF3 (DBG_PROBE, "%s: level %d switch %d - unexpected nack\n", rail->Name, level, sw);
62909 +
62910 +               nacks++;
62911 +               nowayup++;
62912 +           }
62913 +           else
62914 +           {
62915 +               EPRINTF5 (DBG_PROBE, "%s: level %d switch %d - link %d bcast %d\n", rail->Name, level, sw, lsw->link, lsw->bcast);
62916 +
62917 +               if (level == 2 && rail->Devinfo.dev_device_id == PCI_DEVICE_ID_ELAN3)
62918 +               {
62919 +                   /* If we see broadcast top as 7, and we came in on a low link, then we can't
62920 +                    * determine whether we're in a 128 way or a un-configured 64u64d switch, so
62921 +                    * we treat it as a 64u64d and detect the 128 way case by "going over the top" 
62922 +                    * below. Unless we've been told what it really is by NumDownLinksVal.
62923 +                    */
62924 +                   if (lsw->bcast == 7 && lsw->link < 4)
62925 +                       lsw->bcast = ndown ? (ndown - 1) : 3;
62926 +               }
62927 +
62928 +               if ( lowestBcast > lsw->bcast ) 
62929 +                   lowestBcast = lsw->bcast;
62930 +
62931 +               if (lsw->link > (ndown ? (ndown-1) : (lowestBcast == 7 ? 3 : lowestBcast)))
62932 +               {
62933 +                   /* We've arrived on a "up-link" - this could be either
62934 +                    * we're in the top half of a x8 top-switch - or we're
62935 +                    * in the bottom half and have gone "over the top". We
62936 +                    * differentiate these cases since the switches below
62937 +                    * a x8 top-switch will have broadcast top set to 3, 
62938 +                    * and the x8 topswitch have broadcast top set to 7.
62939 +                    */
62940 +                   if (lsw->bcast == 7)
62941 +                       nowayup++;
62942 +                   else
62943 +                   {
62944 +                       EPRINTF2 (DBG_PROBE, "%s: level %d - gone over the top\n",
62945 +                                 rail->Name, level);
62946 +
62947 +                       if (level > 0)
62948 +                       {
62949 +                           KMEM_FREE (switches[level], sizeof (EP_SWITCH) * switchCount[level] );
62950 +                           level--;
62951 +                       }
62952 +                       
62953 +                       numUpLinks[level] = 0;
62954 +                       goto finished;
62955 +                   }
62956 +               }
62957 +
62958 +           }
62959 +       }
62960 +
62961 +       numUpLinks[level]    = ndown ? (8 - ndown) : (7 - lowestBcast);
62962 +       switchCount[level+1] = switchCount[level] *  numUpLinks[level];
62963 +       
62964 +       /* Now we know which links are uplinks, we can see whether there is
62965 +        * any possible ways up */
62966 +       upmask = (ndown ? (0xFF << ndown) & 0xFF : (0xFF << (8 - numUpLinks[level])) & 0xFF);
62967 +
62968 +       for (sw = 0; sw < switchCount[level]; sw++)
62969 +       {
62970 +           EP_SWITCH *lsw  = &switches[level][sw];
62971 +
62972 +           if (lsw->present && lsw->link <= (ndown ? (ndown-1) : (lowestBcast == 7 ? 3 : lowestBcast)) && (switches[level][sw].lnr & upmask) == upmask)
62973 +               nowayup++;
62974 +       }
62975 +
62976 +       EPRINTF7 (DBG_PROBE, "%s: level %d - sw=%d nacks=%d nowayup=%d bcast=%d numup=%d\n", 
62977 +                 rail->Name, level, sw, nacks, nowayup, lowestBcast, numUpLinks[level]);
62978 +
62979 +       if (nacks == sw)
62980 +       {
62981 +           static bitmap_t printed[BT_BITOUL(EP_MAX_RAILS)];
62982 +
62983 +           if (! BT_TEST (printed, rail->Number))
62984 +               printk ("%s: cannot determine network position\n", rail->Name);
62985 +           BT_SET (printed, rail->Number);
62986 +           goto failed;
62987 +       }
62988 +
62989 +       if (nowayup == sw)
62990 +           goto finished;
62991 +    }
62992 +    
62993 +    printk ("%s: exceeded number of levels\n", rail->Name);
62994 +    level = ELAN_MAX_LEVELS - 1;
62995 +
62996 + failed:
62997 +    
62998 +    for (lvl = 0; lvl <= level; lvl++)
62999 +       KMEM_FREE (switches[lvl], sizeof (EP_SWITCH) * switchCount[lvl] );
63000 +
63001 +    return -EAGAIN;
63002 +
63003 + finished:
63004 +    /* we've successfully probed the network - now calculate our node 
63005 +     * positon and what level of random routing is possible */
63006 +    nalias = 1;
63007 +    for (lvl = 0, invalid = 0, partial = 0, randomRoutingDisabled = 0; lvl <= level; lvl++)
63008 +    {
63009 +       int ndown  = NUM_DOWN_FROM_VAL (rail->Devinfo.dev_num_down_links_value, lvl);
63010 +       int upmask = ndown ? (0xFF << ndown) & 0xFF : 0xF0;
63011 +
63012 +       for (sw = 0, nalias = 0; sw < switchCount[lvl]; sw++)
63013 +       {
63014 +           EP_SWITCH *lsw = &switches[lvl][sw];
63015 +           
63016 +           /* You can only use adaptive routing if links 4-7 are uplinks, and at least one of them is
63017 +            * not in reset.   Otherwise you can randomly select an "uplink" if all the uplinks are not
63018 +            * in reset. */
63019 +           if (lsw->present && ((upmask == 0xF0) ? (lsw->lnr & upmask) == upmask : (lsw->lnr & upmask) != 0))
63020 +               randomRoutingDisabled |= (1 << lvl);
63021 +           
63022 +           if (!lsw->present)
63023 +               partial++;
63024 +           else
63025 +           {
63026 +               if (lsw->invalid)
63027 +               {
63028 +                   printk ("%s: invalid switch detected (level %d switch %d)\n", rail->Name, lvl, sw);
63029 +                   invalid++;
63030 +               }
63031 +               
63032 +               for (i = 0; i < nalias; i++)
63033 +                   if (linkdown[i] == lsw->link)
63034 +                       break;
63035 +               if (i == nalias)
63036 +                   linkdown[nalias++] = lsw->link;
63037 +           }
63038 +       }
63039 +       
63040 +       link = linkdown[0];
63041 +       for (i = 1; i < nalias; i++)
63042 +           if (linkdown[i] < link)
63043 +               link = linkdown[i];
63044 +
63045 +       if (nalias > 1 && lvl != level)
63046 +       {
63047 +           printk ("%s: switch aliased below top level (level %d)\n", rail->Name, lvl);
63048 +           invalid++;
63049 +       }
63050 +       
63051 +       routedown[lvl] = link;
63052 +   }
63053 +
63054 +    for (lvl = 0; lvl <= level; lvl++) 
63055 +       KMEM_FREE (switches[lvl], sizeof (EP_SWITCH) * switchCount[lvl] );
63056 +
63057 +    if (invalid)
63058 +    {
63059 +       printk ("%s: invalid switch configuration\n", rail->Name);
63060 +       return (EINVAL);
63061 +    }
63062 +
63063 +    /* Handle the aliasing case where a 16 way is used as multiple smaller switches */
63064 +    if (nalias == 1)
63065 +       level++;
63066 +    else if (nalias == 2)                                      /* a 16 way as 2x8 ways */
63067 +       numUpLinks[level++] = 6;                                /*   only 2 down links */
63068 +    else if (nalias > 4)                                       /* a 16 way as 8x2 ways */
63069 +       numUpLinks[level-1] = 6;
63070 +    
63071 +    /* 
63072 +     * Compute my nodeid and number of nodes in the machine
63073 +     * from the routedown and the number of downlinks at each level.
63074 +     */
63075 +    for(nodeid=0, lvl = level - 1; lvl >= 0; lvl--)
63076 +    {
63077 +       if (lvl) nodeid = ((nodeid + routedown[lvl]) * (8-numUpLinks[lvl-1]));  
63078 +       else     nodeid += routedown[0];
63079 +    }
63080 +
63081 +    for (numnodes = 1, lvl = 0; lvl < level; lvl++)
63082 +       numnodes *= (8 - numUpLinks[lvl]);
63083 +
63084 +    sprintf (rail->Name, "ep%d[%d]", rail->Number, nodeid);
63085 +
63086 +    if (randomRoutingDisabled & ((1 << (level-1))-1))
63087 +       printk ("%s: nodeid=%d level=%d numnodes=%d (random routing disabled 0x%x)\n", 
63088 +               rail->Name, nodeid, level, numnodes, randomRoutingDisabled);
63089 +    else if (partial)
63090 +       printk ("%s: nodeid=%d level=%d numnodes=%d (random routing ok)\n",
63091 +               rail->Name, nodeid, level, numnodes);
63092 +    else
63093 +       printk ("%s: nodeid=%d level=%d numnodes=%d\n",
63094 +               rail->Name, nodeid, level, numnodes);
63095 +
63096 +    pos->pos_mode               = ELAN_POS_MODE_SWITCHED;
63097 +    pos->pos_nodeid              = nodeid;
63098 +    pos->pos_levels              = level;
63099 +    pos->pos_nodes               = numnodes;
63100 +    pos->pos_random_disabled     = randomRoutingDisabled;
63101 +
63102 +    for(lvl = 0; lvl < level; lvl++)
63103 +       pos->pos_arity[level -lvl - 1] = (8-numUpLinks[lvl]);
63104 +    pos->pos_arity[level] = 1;                         /* XXXX why does this need to be 1 ? */
63105 +    
63106 +    return 0;
63107 +}
63108 +
63109 +/*
63110 + * broadcast top is invalid if it is not set to the number of downlinks-1,
63111 + * or at the topmost level it is less than ndown-1.
63112 + */
63113 +#define BCAST_TOP_INVALID(lvl, bcast, ndown)   ((lvl) == 0 ? (bcast) < ((ndown)-1) : (bcast) != ((ndown) - 1))
63114 +
63115 +void
63116 +CheckPosition (EP_RAIL *rail)
63117 +{
63118 +    ELAN_POSITION *pos     = &rail->Position;
63119 +    unsigned int   nodeid  = pos->pos_nodeid;
63120 +    unsigned int   invalid = 0;
63121 +    unsigned int   changed = 0;
63122 +    int lvl, slvl;
63123 +
63124 +    if (! PositionCheck)
63125 +       return;
63126 +
63127 +    if (rail->Operations.CheckPosition(rail))          /* is update ready for this rail */
63128 +    {
63129 +       EPRINTF2 (DBG_ROUTETABLE, "%s: check position: SwitchProbeLevel=%d\n", rail->Name, rail->SwitchProbeLevel);
63130 +
63131 +       for (lvl = 0, slvl = pos->pos_levels-1; lvl <= rail->SwitchProbeLevel; lvl++, slvl--)
63132 +       {
63133 +           EP_SWITCHSTATE *state  = &rail->SwitchState[lvl];
63134 +           EP_SWITCHSTATE *lstate = &rail->SwitchLast[lvl];
63135 +           unsigned int    ndown  = pos->pos_arity[slvl];
63136 +           unsigned int    upmask = (0xFF << ndown) & 0xFF;
63137 +           unsigned int    mylink = nodeid % ndown;
63138 +           unsigned int    error  = 0;
63139 +           unsigned int    binval = 0;
63140 +
63141 +           nodeid /= ndown;
63142 +
63143 +           /*
63144 +            * broadcast top is invalid if it is not set to the number of downlinks-1,
63145 +            * or at the topmost level it is less than ndown-1.
63146 +            */
63147 +           if (BCAST_TOP_INVALID(lvl, state->bcast, ndown) || (state->LNR & upmask) == upmask)
63148 +           {
63149 +               /* no way up from here - we'd better be at the top */
63150 +               if (lvl != (pos->pos_levels-1))
63151 +               {
63152 +                   if (state->bcast != (ndown-1))
63153 +                       printk ("%s: invalid broadcast top %d at level %d\n", rail->Name, state->bcast, lvl);
63154 +                   else if ((state->LNR & upmask) == upmask && (lstate->LNR & upmask) == upmask)
63155 +                       printk ("%s: no way up to switch at level %d (turned off ?)\n", rail->Name, lvl+1);
63156 +               }
63157 +               else
63158 +               {
63159 +                   if (state->linkid != mylink)
63160 +                       printk ("%s: moved at top level was connected to link %d now connected to %d\n", rail->Name, mylink, state->linkid);
63161 +               }
63162 +
63163 +               if (state->linkid != mylink)
63164 +                   error++;
63165 +               
63166 +               if (BCAST_TOP_INVALID (lvl, state->bcast, ndown))
63167 +                   binval++;
63168 +           }
63169 +           else
63170 +           {
63171 +               if (state->linkid != mylink)
63172 +               {
63173 +                   if (state->linkid != rail->SwitchLast[lvl].linkid)
63174 +                       printk ("%s: moved at lvl %d was connected to link %d now connected to %d\n", rail->Name, lvl, mylink, state->linkid);
63175 +                       
63176 +                   error++;
63177 +               }
63178 +           }
63179 +
63180 +           if (error == 0 && invalid == 0)
63181 +               rail->SwitchProbeTick[lvl] = lbolt;
63182 +           
63183 +           EPRINTF10 (DBG_ROUTETABLE, "%s:   lvl=%d (slvl=%d) linkid=%d bcast=%d lnr=%02x uplink=%d : error=%d binval=%d invalid=%d\n", 
63184 +                      rail->Name, lvl, slvl, state->linkid, state->bcast, state->LNR, state->uplink, error, binval, invalid);
63185 +
63186 +           invalid |= (error | binval);
63187 +       }
63188 +       
63189 +       for (lvl = 0; lvl < rail->SwitchProbeLevel; lvl++)
63190 +           if (rail->SwitchState[lvl].uplink != rail->SwitchLast[lvl].uplink)
63191 +               changed++;
63192 +
63193 +       if (changed)
63194 +       {
63195 +           printk ("%s: broadcast tree has changed from", rail->Name);
63196 +           for (lvl = 0; lvl < rail->SwitchProbeLevel; lvl++)
63197 +               printk ("%c%d", lvl == 0 ? ' ' : ',', rail->SwitchLast[lvl].uplink);
63198 +
63199 +           for (lvl = 0; lvl < rail->SwitchProbeLevel; lvl++)
63200 +               printk ("%s%d", lvl == 0 ? " to " : ",", rail->SwitchState[lvl].uplink);
63201 +           printk ("\n");
63202 +       }
63203 +
63204 +       if (rail->SwitchProbeLevel > 0)
63205 +           bcopy (rail->SwitchState, rail->SwitchLast, rail->SwitchProbeLevel * sizeof (EP_SWITCHSTATE));
63206 +    }
63207 +
63208 +    for (lvl = 0; lvl < pos->pos_levels; lvl++)
63209 +    {
63210 +       EPRINTF4 (DBG_ROUTETABLE, "%s: level %d lbolt=%lx ProbeLevelTick=%lx\n",
63211 +                 rail->Name, lvl, lbolt, rail->SwitchProbeTick[lvl]);
63212 +       
63213 +       if (AFTER (lbolt, rail->SwitchProbeTick[lvl] + EP_POSITION_TIMEOUT))
63214 +       {
63215 +           if (lvl < rail->SwitchBroadcastLevel+1)
63216 +           {
63217 +               if (lvl == 0)
63218 +                   printk ("%s: cable disconnected\n", rail->Name);
63219 +               else
63220 +                   printk ("%s: broadcast level has dropped to %d (should be %d)\n",
63221 +                           rail->Name, lvl, rail->Position.pos_levels);
63222 +           }
63223 +           break;
63224 +       }
63225 +    }
63226 +    
63227 +    if (lvl > rail->SwitchBroadcastLevel+1)
63228 +    {
63229 +       if (rail->SwitchBroadcastLevel < 0)
63230 +           printk ("%s: cable reconnected\n", rail->Name);
63231 +       if (lvl == rail->Position.pos_levels)
63232 +           printk ("%s: broadcast level has recovered\n", rail->Name);
63233 +       else
63234 +           printk ("%s: broadcast level has recovered to %d (should be %d)\n", 
63235 +                   rail->Name, lvl, rail->Position.pos_levels);
63236 +    }
63237 +    
63238 +    if (rail->SwitchBroadcastLevel != (lvl - 1))
63239 +    {
63240 +       EPRINTF2 (DBG_ROUTETABLE, "%s: setting SwitchBroadcastLevel to %d\n", rail->Name, lvl-1);
63241 +       
63242 +       rail->SwitchBroadcastLevel     = lvl - 1;
63243 +       rail->SwitchBroadcastLevelTick = lbolt;
63244 +    }
63245 +}
63246 +
63247 +
63248 +/*
63249 + * Local variables:
63250 + * c-file-style: "stroustrup"
63251 + * End:
63252 + */
63253 Index: linux-2.4.21/drivers/net/qsnet/ep/probenetwork_elan3.c
63254 ===================================================================
63255 --- linux-2.4.21.orig/drivers/net/qsnet/ep/probenetwork_elan3.c 2004-02-23 16:02:56.000000000 -0500
63256 +++ linux-2.4.21/drivers/net/qsnet/ep/probenetwork_elan3.c      2005-06-01 23:12:54.677427248 -0400
63257 @@ -0,0 +1,298 @@
63258 +/*
63259 + *    Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd.
63260 + *    Copyright (c) 2002-2003 by Quadrics Ltd.
63261 + *
63262 + *    For licensing information please see the supplied COPYING file
63263 + *
63264 + */
63265 +
63266 +#ident "@(#)$Id: probenetwork_elan3.c,v 1.40 2004/04/15 12:30:08 david Exp $"
63267 +/*      $Source: /cvs/master/quadrics/epmod/probenetwork_elan3.c,v $ */
63268 +
63269 +#include <qsnet/kernel.h>
63270 +
63271 +#include <elan/kcomm.h>
63272 +
63273 +#include "kcomm_vp.h"
63274 +#include "kcomm_elan3.h"
63275 +#include "debug.h"
63276 +
63277 +#include <elan3/intrinsics.h>
63278 +
63279 +static void ep3_probe_event (EP3_RAIL *rail, void *arg);
63280 +static EP3_COOKIE_OPS ep3_probe_ops = 
63281 +{
63282 +    ep3_probe_event
63283 +} ;
63284 +
63285 +int
63286 +ep3_init_probenetwork (EP3_RAIL *rail)
63287 +{
63288 +    sdramaddr_t              stack;
63289 +    E3_Addr           sp;
63290 +    E3_BlockCopyEvent event;
63291 +    int               i;
63292 +
63293 +    if (! (stack = ep_alloc_elan (&rail->Generic, EP3_STACK_SIZE, 0, &rail->ProbeStack)))
63294 +       return -ENOMEM;
63295 +
63296 +    spin_lock_init (&rail->ProbeLock);
63297 +    kcondvar_init (&rail->ProbeWait);
63298 +
63299 +    /* Initialise the probe command structure */
63300 +    for (i = 0; i < TR_TRACEROUTE_ENTRIES; i++)
63301 +       elan3_sdram_writew (rail->Device, rail->RailElan + offsetof (EP3_RAIL_ELAN, ProbeSource0[i]), 0);
63302 +    for (i = 0; i < TR_TRACEROUTE_ENTRIES; i++)
63303 +       elan3_sdram_writew (rail->Device, rail->RailElan + offsetof (EP3_RAIL_ELAN, ProbeSource1[i]), 1);
63304 +    
63305 +    RegisterCookie (&rail->CookieTable, &rail->ProbeCookie, rail->RailElanAddr + offsetof (EP3_RAIL_ELAN, ProbeDone), &ep3_probe_ops, rail);
63306 +    
63307 +    elan3_sdram_writel (rail->Device, rail->RailElan + offsetof (EP3_RAIL_ELAN, ProbeStart.ev_Type), 0);
63308 +    elan3_sdram_writel (rail->Device, rail->RailElan + offsetof (EP3_RAIL_ELAN, ProbeStart.ev_Count), 0);
63309 +
63310 +    EP3_INIT_COPY_EVENT (event, rail->ProbeCookie, rail->RailMainAddr + offsetof (EP3_RAIL_MAIN, ProbeDone), 1);
63311 +    elan3_sdram_copyl_to_sdram (rail->Device, &event, rail->RailElan + offsetof (EP3_RAIL_ELAN, ProbeDone), sizeof (E3_BlockCopyEvent));
63312 +
63313 +    rail->RailMain->ProbeDone = EP3_EVENT_FREE;
63314 +
63315 +    sp = ep3_init_thread (rail->Device, ep_symbol (&rail->ThreadCode, "kcomm_probe"),
63316 +                         rail->ProbeStack, stack, EP3_STACK_SIZE,
63317 +                         3, rail->CommandPortAddr, rail->RailElanAddr, rail->RailMainAddr);
63318 +    
63319 +    IssueRunThread (rail, sp);
63320 +
63321 +    return 0;
63322 +}
63323 +
63324 +void
63325 +ep3_destroy_probenetwork (EP3_RAIL *rail)
63326 +{
63327 +    if (rail->ProbeStack == (sdramaddr_t) 0)
63328 +       return;
63329 +
63330 +    /* XXXX: ensure that the network probe thread is stopped */
63331 +
63332 +    DeregisterCookie (&rail->CookieTable, &rail->ProbeCookie);
63333 +
63334 +    kcondvar_destroy (&rail->ProbeWait);
63335 +    spin_lock_destroy (&rail->ProbeLock);
63336 +    
63337 +    ep_free_elan (&rail->Generic, rail->ProbeStack, EP3_STACK_SIZE);
63338 +}
63339 +
63340 +static void
63341 +ep3_probe_event (EP3_RAIL *rail, void *arg)
63342 +{
63343 +    unsigned long flags;
63344 +
63345 +    spin_lock_irqsave (&rail->ProbeLock, flags);
63346 +    rail->ProbeDone = 1;
63347 +    kcondvar_wakeupone (&rail->ProbeWait, &rail->ProbeLock);
63348 +    spin_unlock_irqrestore (&rail->ProbeLock, flags);
63349 +}
63350 +
63351 +int
63352 +ep3_probe_route (EP_RAIL *r, int level, int sw, int nodeid, int *linkup, int *linkdown, int attempts, EP_SWITCH *lsw)
63353 +{
63354 +    EP3_RAIL      *rail     = (EP3_RAIL *) r;
63355 +    EP3_RAIL_MAIN *railMain = rail->RailMain;
63356 +    sdramaddr_t    railElan = rail->RailElan;
63357 +    E3_uint16      flits[MAX_FLITS];
63358 +    E3_uint32      result;
63359 +    int                   nflits;
63360 +    unsigned long  flags;
63361 +
63362 +    spin_lock_irqsave (&rail->ProbeLock, flags);
63363 +
63364 +    nflits = GenerateProbeRoute ( flits, nodeid, level, linkup, linkdown, 0);
63365 +           
63366 +    if (LoadRoute (rail->Device, rail->RouteTable, EP_VP_PROBE(level), ELAN3_MRF_CONTEXT_NUM|SYS_CONTEXT_BIT, nflits, flits) != 0)
63367 +    {
63368 +       EPRINTF0 (DBG_ROUTETABLE, "ProbeRoute: cannot load route entry\n");
63369 +       spin_unlock_irqrestore (&rail->ProbeLock, flags);
63370 +       return (EINVAL);
63371 +    }
63372 +
63373 +    do {
63374 +       /* Initialise the probe source to include our partially computed nodeid */
63375 +       elan3_sdram_writew (rail->Device, railElan + offsetof (EP3_RAIL_ELAN, ProbeSource0[TR_TRACEROUTE_ENTRIES-1]), nodeid);
63376 +       elan3_sdram_writew (rail->Device, railElan + offsetof (EP3_RAIL_ELAN, ProbeSource1[TR_TRACEROUTE_ENTRIES-1]), nodeid);
63377 +
63378 +       /* Initialise the count result etc */
63379 +       elan3_sdram_writel (rail->Device, railElan + offsetof (EP3_RAIL_ELAN, ProbeType), PROBE_SINGLE);
63380 +       elan3_sdram_writel (rail->Device, railElan + offsetof (EP3_RAIL_ELAN, ProbeLevel), level);
63381 +
63382 +       railMain->ProbeResult  = -1;
63383 +           
63384 +       /* Clear the receive area */
63385 +       bzero (railMain->ProbeDest0, sizeof (railMain->ProbeDest0));
63386 +       bzero (railMain->ProbeDest1, sizeof (railMain->ProbeDest1));
63387 +    
63388 +       /* Re-arm the completion event */
63389 +       elan3_sdram_writel (rail->Device, railElan + offsetof (EP3_RAIL_ELAN, ProbeDone.ev_Count), 1);
63390 +       railMain->ProbeDone = EP3_EVENT_ACTIVE;
63391 +       rail->ProbeDone = 0;
63392 +
63393 +       /* And wakeup the thread to do the probe */
63394 +       IssueSetevent (rail, rail->RailElanAddr + offsetof (EP3_RAIL_ELAN, ProbeStart));
63395 +
63396 +       /* Now wait for it to complete */
63397 +       while (! rail->ProbeDone)
63398 +           kcondvar_wait (&rail->ProbeWait, &rail->ProbeLock, &flags);
63399 +
63400 +       /* wait for block copy event to flush write buffers */
63401 +       while (! EP3_EVENT_FIRED (rail->ProbeCookie, railMain->ProbeDone))
63402 +           if (! EP3_EVENT_FIRING(rail->Device, railElan + offsetof (EP3_RAIL_ELAN, ProbeDone), rail->ProbeCookie, railMain->ProbeDone))
63403 +               panic ("ProbeRoute: network probe event failure\n");
63404 +
63405 +       result = railMain->ProbeResult;
63406 +
63407 +       if (result == C_ACK_ERROR)
63408 +           kcondvar_timedwait (&rail->ProbeWait, &rail->ProbeLock, &flags, lbolt + (hz/8));
63409 +       
63410 +       railMain->ProbeDone = EP3_EVENT_FREE;
63411 +
63412 +    } while (result != C_ACK_OK && --attempts);
63413 +
63414 +    if (result == C_ACK_OK)
63415 +    {
63416 +       if (railMain->ProbeDest0[TR_TRACEROUTE_ENTRIES - ((2*level)+1) - 1] != nodeid ||
63417 +           railMain->ProbeDest1[TR_TRACEROUTE_ENTRIES - ((2*level)+1) - 1] != nodeid)
63418 +       {
63419 +           printk ("%s: lost nodeid at level %d switch %d - %d != %d\n", rail->Generic.Name, level, sw,
63420 +                   railMain->ProbeDest0[TR_TRACEROUTE_ENTRIES - ((2*level)+1) - 1], nodeid);
63421 +
63422 +           result = C_ACK_ERROR;
63423 +       }
63424 +       else
63425 +       {
63426 +           E3_uint16 val0 = railMain->ProbeDest0[TR_TRACEROUTE_ENTRIES - level - 1];
63427 +           E3_uint16 val1 = railMain->ProbeDest1[TR_TRACEROUTE_ENTRIES - level - 1];
63428 +               
63429 +           EPRINTF7 (DBG_PROBE, "%s: level %d switch %d - linkid=%d bcast=%d LNR=%02x%s\n", 
63430 +                     rail->Generic.Name, level, sw, TR_TRACEROUTE0_LINKID(val0),
63431 +                     TR_TRACEROUTE1_BCAST_TOP(val1), TR_TRACEROUTE0_LNR(val0),
63432 +                     TR_TRACEROUTE0_REVID(val0) ? "" : " RevA Part");
63433 +           
63434 +           lsw->lnr     = TR_TRACEROUTE0_LNR(val0);
63435 +           lsw->link    = TR_TRACEROUTE0_LINKID(val0);
63436 +           lsw->bcast   = TR_TRACEROUTE1_BCAST_TOP(val1);
63437 +           lsw->invalid = (TR_TRACEROUTE0_REVID(val0) == 0);
63438 +       }
63439 +    }
63440 +    spin_unlock_irqrestore (&rail->ProbeLock, flags);
63441 +    
63442 +    return (result == C_ACK_OK);
63443 +}
63444 +
63445 +void
63446 +ep3_probe_position_found (EP3_RAIL *rail, ELAN_POSITION *pos)
63447 +{
63448 +    E3_uint16  flits[MAX_FLITS];
63449 +    int        lvl, nflits;
63450 +    
63451 +    for (lvl = 0; lvl < pos->pos_levels; lvl++)
63452 +    {
63453 +       nflits = GenerateCheckRoute (pos, flits, pos->pos_levels - lvl - 1, 0);
63454 +
63455 +       if (LoadRoute (rail->Device, rail->Ctxt->RouteTable, EP_VP_PROBE(lvl), ELAN3_MRF_CONTEXT_NUM|SYS_CONTEXT_BIT, nflits, flits) != 0)
63456 +           panic ("ep3_probe_position_found: cannot load probe route entry\n");
63457 +    }
63458 +    
63459 +    /* Initialise the traceroute source data with our nodeid */
63460 +    elan3_sdram_writew (rail->Device, rail->RailElan + offsetof (EP3_RAIL_ELAN, ProbeSource0[TR_TRACEROUTE_ENTRIES-1]), pos->pos_nodeid);
63461 +    elan3_sdram_writew (rail->Device, rail->RailElan + offsetof (EP3_RAIL_ELAN, ProbeSource1[TR_TRACEROUTE_ENTRIES-1]), pos->pos_nodeid);
63462 +}
63463 +
63464 +int
63465 +ep3_check_position (EP_RAIL *r)
63466 +{
63467 +    EP3_RAIL      *rail     = (EP3_RAIL *) r;
63468 +    EP3_RAIL_MAIN *railMain = rail->RailMain;
63469 +    sdramaddr_t    railElan = rail->RailElan;
63470 +    ELAN_POSITION *pos      = &rail->Generic.Position;
63471 +    unsigned int   level    = rail->RailMain->ProbeLevel;
63472 +    unsigned int   updated  = EP3_EVENT_FIRED (rail->ProbeCookie, railMain->ProbeDone);
63473 +    unsigned int   lvl;
63474 +
63475 +    if (updated)
63476 +    {
63477 +       if (railMain->ProbeResult != C_ACK_OK)
63478 +       {
63479 +           EPRINTF2 (DBG_PROBE, "%s: CheckNetworkPosition: packet nacked result=%d\n", rail->Generic.Name, railMain->ProbeResult); 
63480 +           
63481 +           rail->Generic.SwitchProbeLevel = -1;
63482 +       }
63483 +       else
63484 +       {
63485 +           E3_uint16 val0 = railMain->ProbeDest0[TR_TRACEROUTE_ENTRIES - 2*(level+1)];
63486 +           E3_uint16 val1 = railMain->ProbeDest1[TR_TRACEROUTE_ENTRIES - 2*(level+1)];
63487 +
63488 +           if (val0 != pos->pos_nodeid || val1 != pos->pos_nodeid)
63489 +           {
63490 +               static unsigned long printed = 0;
63491 +
63492 +               /* We've received a packet from another node - this probably means
63493 +                * that we've moved */
63494 +               if ((lbolt - printed) > (HZ*10))
63495 +               {
63496 +                   printk ("%s: ep3_check_position - level %d lost nodeid\n", rail->Generic.Name, level);
63497 +                   printed = lbolt;
63498 +               }
63499 +
63500 +               rail->Generic.SwitchProbeLevel = -1;
63501 +           }
63502 +           else
63503 +           {
63504 +               for (lvl = 0; lvl <= level; lvl++)
63505 +               {
63506 +                   E3_uint16 val0 = railMain->ProbeDest0[TR_TRACEROUTE_ENTRIES - ((2*level) - lvl + 1)];
63507 +                   E3_uint16 val1 = railMain->ProbeDest1[TR_TRACEROUTE_ENTRIES - ((2*level) - lvl + 1)];
63508 +
63509 +                   rail->Generic.SwitchState[lvl].linkid = TR_TRACEROUTE0_LINKID(val0);
63510 +                   rail->Generic.SwitchState[lvl].LNR    = TR_TRACEROUTE0_LNR(val0);
63511 +                   rail->Generic.SwitchState[lvl].bcast  = TR_TRACEROUTE1_BCAST_TOP(val1);
63512 +                   rail->Generic.SwitchState[lvl].uplink = 4;
63513 +
63514 +                   EPRINTF5 (DBG_PROBE, " --- lvl %d: linkid=%d LNR=%x bcast=%d uplink=%d\n", lvl, rail->Generic.SwitchState[lvl].linkid,
63515 +                             rail->Generic.SwitchState[lvl].LNR, rail->Generic.SwitchState[lvl].bcast ,rail->Generic.SwitchState[lvl].uplink);
63516 +               }
63517 +               rail->Generic.SwitchProbeLevel = level;
63518 +           }
63519 +       }
63520 +
63521 +       railMain->ProbeDone = EP3_EVENT_FREE;
63522 +    }
63523 +
63524 +    if (railMain->ProbeDone == EP3_EVENT_FREE)
63525 +    {
63526 +       if (rail->Generic.SwitchBroadcastLevel == rail->Generic.Position.pos_levels-1)
63527 +           level = rail->Generic.Position.pos_levels - 1;
63528 +       else
63529 +           level = rail->Generic.SwitchBroadcastLevel + 1;
63530 +
63531 +       EPRINTF2 (DBG_PROBE, "%s: ep3_check_postiion: level %d\n", rail->Generic.Name, level);
63532 +
63533 +       /* Initialise the count result etc */
63534 +       elan3_sdram_writel (rail->Device, railElan + offsetof (EP3_RAIL_ELAN, ProbeType), PROBE_MULTIPLE);
63535 +       elan3_sdram_writel (rail->Device, railElan + offsetof (EP3_RAIL_ELAN, ProbeLevel), level);
63536 +
63537 +       railMain->ProbeResult = -1;
63538 +       railMain->ProbeLevel  = -1;
63539 +       
63540 +       /* Clear the receive area */
63541 +       bzero (railMain->ProbeDest0, sizeof (railMain->ProbeDest0));
63542 +       bzero (railMain->ProbeDest1, sizeof (railMain->ProbeDest1));
63543 +       
63544 +       /* Re-arm the completion event */
63545 +       elan3_sdram_writel (rail->Device, railElan + offsetof (EP3_RAIL_ELAN, ProbeDone.ev_Type), EV_TYPE_BCOPY);
63546 +       elan3_sdram_writel (rail->Device, railElan + offsetof (EP3_RAIL_ELAN, ProbeDone.ev_Count), 1);
63547 +
63548 +       railMain->ProbeDone = EP3_EVENT_ACTIVE;
63549 +       
63550 +       IssueSetevent (rail, rail->RailElanAddr + offsetof (EP3_RAIL_ELAN, ProbeStart));
63551 +    }
63552 +
63553 +    return updated;
63554 +}
63555 +
63556 Index: linux-2.4.21/drivers/net/qsnet/ep/probenetwork_elan3_thread.c
63557 ===================================================================
63558 --- linux-2.4.21.orig/drivers/net/qsnet/ep/probenetwork_elan3_thread.c  2004-02-23 16:02:56.000000000 -0500
63559 +++ linux-2.4.21/drivers/net/qsnet/ep/probenetwork_elan3_thread.c       2005-06-01 23:12:54.678427096 -0400
63560 @@ -0,0 +1,98 @@
63561 +/*
63562 + *    Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd.
63563 + *    Copyright (c) 2002-2003 by Quadrics Ltd.
63564 + *
63565 + *    For licensing information please see the supplied COPYING file
63566 + *
63567 + */
63568 +
63569 +#ident "@(#)$Id: probenetwork_elan3_thread.c,v 1.19 2004/03/24 11:32:56 david Exp $"
63570 +/*      $Source: /cvs/master/quadrics/epmod/probenetwork_elan3_thread.c,v $*/
63571 +
63572 +#include <elan3/e3types.h>
63573 +#include <elan3/events.h>
63574 +#include <elan3/elanregs.h>
63575 +#include <elan3/intrinsics.h>
63576 +
63577 +#include "kcomm_vp.h"
63578 +#include "kcomm_elan3.h"
63579 +
63580 +static int
63581 +kcomm_probe_vp (EP3_RAIL_ELAN *railElan, EP3_RAIL_MAIN *railMain, int vp, int attempts, int timeouts)
63582 +{
63583 +    int rc;
63584 +
63585 +    /* Since we use %g1 to hold the "rxd" so the trap handler can
63586 +     * complete the envelope processing - we pass zero to indicate we're
63587 +     * not a receiver thread */
63588 +    asm volatile ("mov %g0, %g1");
63589 +
63590 +    while (attempts && timeouts)
63591 +    {
63592 +       c_open (vp);
63593 +       c_sendmem (TR_TRACEROUTE, &railMain->ProbeDest0, &railElan->ProbeSource0);
63594 +       c_sendmem (TR_TRACEROUTE, &railMain->ProbeDest1, &railElan->ProbeSource1);
63595 +       c_sendtrans0 (TR_SENDACK | TR_SETEVENT, (E3_Addr) 0);
63596 +       
63597 +       switch (rc = c_close())
63598 +       {
63599 +       case C_ACK_OK:
63600 +           return (C_ACK_OK);
63601 +           
63602 +       case C_ACK_DISCARD:
63603 +           attempts--;
63604 +           break;
63605 +
63606 +       default:                                        /* output timeout */
63607 +           timeouts--;
63608 +       }
63609 +
63610 +       c_break_busywait();
63611 +    }
63612 +
63613 +    return (timeouts == 0 ? C_ACK_ERROR : C_ACK_DISCARD);
63614 +}
63615 +
63616 +void
63617 +kcomm_probe (E3_CommandPort *cport, EP3_RAIL_ELAN *railElan, EP3_RAIL_MAIN *railMain)
63618 +{
63619 +    int level;
63620 +
63621 +    for (;;)
63622 +    {
63623 +       c_waitevent (&railElan->ProbeStart, 1);
63624 +
63625 +       switch (railElan->ProbeType)
63626 +       {
63627 +       case PROBE_SINGLE:
63628 +           railMain->ProbeResult = kcomm_probe_vp (railElan, railMain, EP_VP_PROBE(railElan->ProbeLevel),
63629 +                                                   PROBE_SINGLE_ATTEMPTS, PROBE_SINGLE_TIMEOUTS);
63630 +
63631 +           cport->SetEvent = (E3_Addr) &railElan->ProbeDone;
63632 +           break;
63633 +
63634 +       case PROBE_MULTIPLE:
63635 +           for (level = railElan->ProbeLevel; level >= 0; level--)
63636 +           {
63637 +               if (kcomm_probe_vp (railElan, railMain, EP_VP_PROBE(level),
63638 +                                   PROBE_MULTIPLE_ATTEMPTS, PROBE_MULTIPLE_TIMEOUTS) == C_ACK_OK)
63639 +               {
63640 +                   railMain->ProbeLevel  = level;
63641 +                   railMain->ProbeResult = C_ACK_OK;
63642 +                   break;
63643 +               }
63644 +
63645 +               c_break_busywait();
63646 +           }
63647 +           cport->SetEvent = (E3_Addr) &railElan->ProbeDone;
63648 +           break;
63649 +       }
63650 +
63651 +    }
63652 +}
63653 +
63654 +/*
63655 + * Local variables:
63656 + * c-file-style: "stroustrup"
63657 + * End:
63658 + */
63659 Index: linux-2.4.21/drivers/net/qsnet/ep/probenetwork_elan4.c
63660 ===================================================================
63661 --- linux-2.4.21.orig/drivers/net/qsnet/ep/probenetwork_elan4.c 2004-02-23 16:02:56.000000000 -0500
63662 +++ linux-2.4.21/drivers/net/qsnet/ep/probenetwork_elan4.c      2005-06-01 23:12:54.679426944 -0400
63663 @@ -0,0 +1,396 @@
63664 +/*
63665 + *    Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd.
63666 + *    Copyright (c) 2002-2003 by Quadrics Ltd.
63667 + *
63668 + *    For licensing information please see the supplied COPYING file
63669 + *
63670 + */
63671 +
63672 +#ident "@(#)$Id: probenetwork_elan4.c,v 1.9 2004/08/19 11:05:03 david Exp $ $Name: QSNETMODULES-4-30_20050128 $"
63673 +/*      $Source: /cvs/master/quadrics/epmod/probenetwork_elan4.c,v $*/
63674 +
63675 +#include <qsnet/kernel.h>
63676 +
63677 +#include <elan/kcomm.h>
63678 +
63679 +#include "kcomm_vp.h"
63680 +#include "kcomm_elan4.h"
63681 +#include "debug.h"
63682 +
63683 +#include <elan4/trtype.h>
63684 +#include <elan4/commands.h>
63685 +
63686 +static void
63687 +probe_interrupt (EP4_RAIL *rail, void *arg)
63688 +{
63689 +    unsigned long flags;
63690 +
63691 +    spin_lock_irqsave (&rail->r_probe_lock, flags);
63692 +    rail->r_probe_done = 1;
63693 +    kcondvar_wakeupone (&rail->r_probe_wait, &rail->r_probe_lock);
63694 +    spin_unlock_irqrestore (&rail->r_probe_lock, flags);
63695 +}
63696 +
63697 +int
63698 +ep4_probe_init (EP4_RAIL *rail)
63699 +{
63700 +    spin_lock_init (&rail->r_probe_lock);
63701 +    kcondvar_init (&rail->r_probe_wait);
63702 +
63703 +    rail->r_probe_cq = ep4_alloc_ecq (rail, CQ_Size1K);
63704 +
63705 +    if (rail->r_probe_cq == NULL)
63706 +       return -ENOMEM;
63707 +
63708 +    ep4_register_intcookie (rail, &rail->r_probe_intcookie, rail->r_elan_addr, probe_interrupt, rail);
63709 +
63710 +    return 0;
63711 +}
63712 +
63713 +void
63714 +ep4_probe_destroy (EP4_RAIL *rail)
63715 +{
63716 +    if (rail->r_probe_cq)
63717 +       ep4_free_ecq (rail, rail->r_probe_cq);
63718 +
63719 +    if (rail->r_probe_intcookie.int_arg == NULL)
63720 +       return;
63721 +    ep4_deregister_intcookie (rail, &rail->r_probe_intcookie);
63722 +
63723 +    kcondvar_destroy (&rail->r_probe_wait);
63724 +    spin_lock_destroy (&rail->r_probe_lock);
63725 +}
63726 +
63727 +#define LINKDOWN(nodeid, level)        ((nodeid >> (level << 1)) & 3)
63728 +#define PROBE_PATTERN0(nodeid) (0xaddebabe ^ nodeid)
63729 +#define PROBE_PATTERN1(nodeid)  (0xfeedbeef ^ nodeid)
63730 +
63731 +#define EP4_PROBE_RETRIES      4
63732 +
63733 +int
63734 +ep4_probe_route (EP_RAIL *r, int level, int sw, int nodeid, int *linkup, int *linkdown, int attempts, EP_SWITCH *lsw)
63735 +{
63736 +    EP4_RAIL      *rail  = (EP4_RAIL *) r;
63737 +    EP4_RAIL_MAIN *rmain = rail->r_main;
63738 +    E4_uint16      first = 0;
63739 +    int                   rb    = 0;
63740 +
63741 +    E4_uint8  packed[ROUTE_NUM_PACKED];
63742 +    E4_VirtualProcessEntry route;
63743 +    unsigned long flags;
63744 +    int i;
63745 +
63746 +    for (i = 0; i < ROUTE_NUM_PACKED; i++)
63747 +       packed[i] = 0;
63748 +
63749 +    /* Generate "up" routes */
63750 +    for (i = 0; i < level; i++)
63751 +       if (first == 0)
63752 +           first = linkup ? FIRST_ROUTE(linkup[i]) : FIRST_ADAPTIVE;
63753 +       else
63754 +           packed[rb++] = linkup ? PACKED_ROUTE(linkup[i]) : PACKED_ADAPTIVE;
63755 +    
63756 +    /* Generate a "to-me" route down */
63757 +    if (first == 0)
63758 +       first = FIRST_MYLINK;
63759 +    else
63760 +       packed[rb++] = PACKED_MYLINK;
63761 +    
63762 +    /* Generate the "down" routes */
63763 +    for (i = level-1; i >= 0; i--)
63764 +       packed[rb++] = linkdown ? PACKED_ROUTE(linkdown[i]) : PACKED_ROUTE(LINKDOWN(nodeid, i));
63765 +    
63766 +    /* Pack up the routes into the virtual process entry */
63767 +    route.Values[0] = first | FIRST_HIGH_PRI | FIRST_SYSTEM_PACKET | FIRST_TIMEOUT(3);
63768 +    route.Values[1] = ROUTE_CTXT_VALUE(ELAN4_KCOMM_CONTEXT_NUM);
63769 +
63770 +    for (i = 0; i < (ROUTE_NUM_PACKED >> 1); i++)
63771 +    {
63772 +       route.Values[0] |= ((E4_uint64) packed[i]) << ((i << 2) + ROUTE_PACKED_OFFSET);
63773 +       route.Values[1] |= ((E4_uint64) packed[i+(ROUTE_NUM_PACKED >> 1)]) << ((i << 2));
63774 +    }
63775 +
63776 +    elan4_write_route (rail->r_ctxt.ctxt_dev, rail->r_routetable, EP_VP_PROBE(level), &route);
63777 +    
63778 +    while (attempts--)
63779 +    {
63780 +       rail->r_probe_done = 0;
63781 +
63782 +       /* generate the STEN packet - note we use a datatype of dword as we're copying to elan in dwords
63783 +        *   NB - no flow control is required, since the max packet size is less than the command queue
63784 +        *        size and it's dedicated for network probing.
63785 +        */
63786 +       
63787 +       elan4_guard   (rail->r_probe_cq->ecq_cq, GUARD_CHANNEL(1) | GUARD_RESET(EP4_PROBE_RETRIES));
63788 +       elan4_nop_cmd (rail->r_probe_cq->ecq_cq, 0);
63789 +       
63790 +       elan4_open_packet (rail->r_probe_cq->ecq_cq, OPEN_STEN_PKT_CMD | OPEN_PACKET(0, PACK_OK | RESTART_COUNT_ZERO, EP_VP_PROBE(level)));
63791 +       elan4_sendtransn  (rail->r_probe_cq->ecq_cq, TR_TRACEROUTE(TRACEROUTE_NDWORDS),
63792 +                          rail->r_main_addr + offsetof (EP4_RAIL_MAIN, r_probe_dest0),
63793 +                          0x0000000000000000ull, 0x0000000000000000ull, 0x0000000000000000ull, 0x0000000000000000ull, 
63794 +                          0x0000000000000000ull, 0x0000000000000000ull, 0x0000000000000000ull, 0x0000000000000000ull | ((E4_uint64)PROBE_PATTERN0(nodeid) << 32));
63795 +       elan4_sendtransn  (rail->r_probe_cq->ecq_cq, TR_TRACEROUTE(TRACEROUTE_NDWORDS),
63796 +                          rail->r_main_addr + offsetof (EP4_RAIL_MAIN, r_probe_dest1),
63797 +                          0x0000000100000001ull, 0x0000000100000001ull, 0x0000000100000001ull, 0x0000000100000001ull, 
63798 +                          0x0000000100000001ull, 0x0000000100000001ull, 0x0000000100000001ull, 0x0000000000000001ull | ((E4_uint64)PROBE_PATTERN1(nodeid) << 32));
63799 +       elan4_sendtrans0  (rail->r_probe_cq->ecq_cq, TR_NOP_TRANS | TR_LAST_AND_SEND_ACK, 0);
63800 +
63801 +       elan4_guard           (rail->r_probe_cq->ecq_cq, GUARD_CHANNEL(1) | GUARD_TEST(0, PACK_OK) | GUARD_RESET(EP4_PROBE_RETRIES));
63802 +       elan4_write_dword_cmd (rail->r_probe_cq->ecq_cq, rail->r_main_addr + offsetof (EP4_RAIL_MAIN, r_probe_result), EP4_STATE_FINISHED);
63803 +
63804 +       elan4_guard            (rail->r_probe_cq->ecq_cq, GUARD_CHANNEL(1) | GUARD_TEST(0, RESTART_COUNT_ZERO) | GUARD_RESET(EP4_PROBE_RETRIES));
63805 +       elan4_write_dword_cmd  (rail->r_probe_cq->ecq_cq, rail->r_main_addr + offsetof (EP4_RAIL_MAIN, r_probe_result), EP4_STATE_FAILED);
63806 +
63807 +       elan4_interrupt_cmd   (rail->r_probe_cq->ecq_cq,  rail->r_probe_intcookie.int_val);
63808 +
63809 +       spin_lock_irqsave (&rail->r_probe_lock, flags);
63810 +       while (! rail->r_probe_done)
63811 +           kcondvar_wait (&rail->r_probe_wait, &rail->r_probe_lock, &flags);
63812 +       spin_unlock_irqrestore (&rail->r_probe_lock, flags);
63813 +
63814 +       if (rmain->r_probe_result == EP4_STATE_FINISHED)
63815 +       {
63816 +           if (rmain->r_probe_dest0[TRACEROUTE_ENTRIES - ((2*level)+1) - 1] != PROBE_PATTERN0(nodeid) ||
63817 +               rmain->r_probe_dest1[TRACEROUTE_ENTRIES - ((2*level)+1) - 1] != PROBE_PATTERN1(nodeid))
63818 +           {
63819 +               printk ("%s: lost nodeid at level %d switch %d - %d != %d\n", rail->r_generic.Name, level, sw,
63820 +                       rmain->r_probe_dest0[TRACEROUTE_ENTRIES - ((2*level)+1) - 1], PROBE_PATTERN0(nodeid));
63821 +           }
63822 +           else
63823 +           {
63824 +               E4_uint32 val0 = rmain->r_probe_dest0[TRACEROUTE_ENTRIES - level - 1];
63825 +               E4_uint32 val1 = rmain->r_probe_dest1[TRACEROUTE_ENTRIES - level - 1];
63826 +               
63827 +               lsw->lnr     = TR_TRACEROUTE0_LNR(val0);
63828 +               lsw->link    = TR_TRACEROUTE0_LINKID(val0);
63829 +               lsw->bcast   = TR_TRACEROUTE1_BCAST_TOP(val1);
63830 +               lsw->invalid = 0;
63831 +
63832 +               return 1;
63833 +           }
63834 +       }
63835 +
63836 +       rmain->r_probe_result = EP4_STATE_FREE;
63837 +    }
63838 +
63839 +    return 0;
63840 +}
63841 +
63842 +
63843 +void
63844 +ep4_probe_position_found (EP4_RAIL *rail, ELAN_POSITION *pos)
63845 +{
63846 +    ELAN4_DEV  *dev  = rail->r_ctxt.ctxt_dev;
63847 +    int         lvl;
63848 +
63849 +    for (lvl = 0; lvl < pos->pos_levels; lvl++)
63850 +    {
63851 +       /* Initialise the "probe" route to use the broadcast tree */
63852 +       ELAN_POSITION *pos     = &rail->r_generic.Position;
63853 +       unsigned char *arityp  = &pos->pos_arity[pos->pos_levels - 1];
63854 +       unsigned int   spanned = *arityp;
63855 +       E4_uint16      first   = 0;
63856 +       int            rb      = 0;
63857 +       
63858 +       E4_uint8  packed[ROUTE_NUM_PACKED];
63859 +       E4_VirtualProcessEntry route;
63860 +       int i;
63861 +       
63862 +       for (i = 0; i < ROUTE_NUM_PACKED; i++)
63863 +           packed[i] = 0;
63864 +
63865 +       /* Generate "up" routes */
63866 +       for (i = 0; i < lvl; i++, spanned *= *(--arityp))
63867 +       {
63868 +           if (first == 0)
63869 +               first = FIRST_BCAST_TREE;
63870 +           else
63871 +               packed[rb++] = PACKED_BCAST_TREE;
63872 +       }
63873 +
63874 +       /* Generate a "to-me" route down */
63875 +       if (first == 0)
63876 +           first = FIRST_MYLINK;
63877 +       else
63878 +           packed[rb++] = PACKED_MYLINK;
63879 +
63880 +       spanned /= *arityp++;
63881 +
63882 +       /* Generate the "down" routes */
63883 +       for (i = lvl-1; i >= 0; i--)
63884 +       {
63885 +           spanned /= *arityp;
63886 +           packed[rb++] = PACKED_ROUTE((pos->pos_nodeid / spanned) % *arityp);
63887 +           arityp++;
63888 +       }
63889 +
63890 +    
63891 +       /* Pack up the routes into the virtual process entry */
63892 +       route.Values[0] = first | FIRST_HIGH_PRI | FIRST_SYSTEM_PACKET | FIRST_TIMEOUT(3);
63893 +       route.Values[1] = ROUTE_CTXT_VALUE(ELAN4_KCOMM_CONTEXT_NUM);
63894 +       
63895 +       for (i = 0; i < (ROUTE_NUM_PACKED >> 1); i++)
63896 +       {
63897 +           route.Values[0] |= ((E4_uint64) packed[i]) << ((i << 2) + ROUTE_PACKED_OFFSET);
63898 +           route.Values[1] |= ((E4_uint64) packed[i+(ROUTE_NUM_PACKED >> 1)]) << ((i << 2));
63899 +       }
63900 +       
63901 +       elan4_write_route (rail->r_ctxt.ctxt_dev, rail->r_routetable, EP_VP_PROBE(lvl), &route);
63902 +       
63903 +       /* Initialise "start" event for this level */
63904 +       elan4_sdram_writeq (dev, rail->r_elan + offsetof (EP4_RAIL_ELAN, r_check_start[lvl].ev_CountAndType),
63905 +                           E4_EVENT_INIT_VALUE (-32, E4_EVENT_COPY, E4_EVENT_DTYPE_LONG, EP4_CHECK_STEN_NDWORDS));
63906 +       elan4_sdram_writeq (dev, rail->r_elan + offsetof (EP4_RAIL_ELAN, r_check_start[lvl].ev_CopySource),
63907 +                           rail->r_elan_addr + offsetof (EP4_RAIL_ELAN, r_check_sten[lvl]));
63908 +       elan4_sdram_writeq (dev, rail->r_elan + offsetof (EP4_RAIL_ELAN, r_check_start[lvl].ev_CopyDest),
63909 +                           rail->r_probe_cq->ecq_addr);
63910 +
63911 +       /* Initiailise command stream - reset the start event */
63912 +       elan4_sdram_writeq (dev, rail->r_elan + offsetof (EP4_RAIL_ELAN, r_check_sten[lvl].c_reset_event_cmd),
63913 +                           WRITE_DWORD_CMD | (rail->r_elan_addr + offsetof (EP4_RAIL_ELAN, r_check_start[lvl])));
63914 +       elan4_sdram_writeq (dev, rail->r_elan + offsetof (EP4_RAIL_ELAN, r_check_sten[lvl].c_reset_event_value),
63915 +                           E4_EVENT_INIT_VALUE (-32, E4_EVENT_COPY, E4_EVENT_DTYPE_LONG, EP4_CHECK_STEN_NDWORDS));
63916 +
63917 +       /* Initiailise command stream - sten traceroute packet */
63918 +       elan4_sdram_writeq (dev, rail->r_elan + offsetof (EP4_RAIL_ELAN, r_check_sten[lvl].c_open),
63919 +                           OPEN_STEN_PKT_CMD | OPEN_PACKET (0, PACK_OK | RESTART_COUNT_ZERO, EP_VP_PROBE(lvl)));
63920 +
63921 +       /* Initiailise command stream - traceroute 0 */
63922 +       elan4_sdram_writeq (dev, rail->r_elan + offsetof (EP4_RAIL_ELAN, r_check_sten[lvl].c_trans_traceroute0),
63923 +                           SEND_TRANS_CMD | (TR_TRACEROUTE(TRACEROUTE_NDWORDS) << 16));
63924 +       elan4_sdram_writeq (dev, rail->r_elan + offsetof (EP4_RAIL_ELAN, r_check_sten[lvl].c_addr_traceroute0),
63925 +                           rail->r_main_addr + offsetof (EP4_RAIL_MAIN, r_probe_dest0));
63926 +       for (i = 0; i < (TRACEROUTE_NDWORDS-1); i++)
63927 +           elan4_sdram_writeq (dev, rail->r_elan + offsetof (EP4_RAIL_ELAN, r_check_sten[lvl].c_data_traceroute0[i]),
63928 +                               0x0000000000000000ull);
63929 +       elan4_sdram_writeq (dev, rail->r_elan + offsetof (EP4_RAIL_ELAN, r_check_sten[lvl].c_data_traceroute0[i]),
63930 +                           0x0000000000000000ull | ((E4_uint64) PROBE_PATTERN0(pos->pos_nodeid) << 32));
63931 +
63932 +       /* Initiailise command stream - traceroute 1 */
63933 +       elan4_sdram_writeq (dev, rail->r_elan + offsetof (EP4_RAIL_ELAN, r_check_sten[lvl].c_trans_traceroute1),
63934 +                           SEND_TRANS_CMD | (TR_TRACEROUTE(TRACEROUTE_NDWORDS) << 16));
63935 +       elan4_sdram_writeq (dev, rail->r_elan + offsetof (EP4_RAIL_ELAN, r_check_sten[lvl].c_addr_traceroute1),
63936 +                           rail->r_main_addr + offsetof (EP4_RAIL_MAIN, r_probe_dest1));
63937 +       for (i = 0; i < (TRACEROUTE_NDWORDS-1); i++)
63938 +           elan4_sdram_writeq (dev, rail->r_elan + offsetof (EP4_RAIL_ELAN, r_check_sten[lvl].c_data_traceroute1[i]),
63939 +                               0x0000000100000001ull);
63940 +       elan4_sdram_writeq (dev, rail->r_elan + offsetof (EP4_RAIL_ELAN, r_check_sten[lvl].c_data_traceroute1[i]),
63941 +                           0x0000000000000001ull | ((E4_uint64) PROBE_PATTERN1(pos->pos_nodeid) << 32));
63942 +
63943 +       /* Initiailise command stream - null sendack */
63944 +       elan4_sdram_writeq (dev, rail->r_elan + offsetof (EP4_RAIL_ELAN, r_check_sten[lvl].c_trans_sendack),
63945 +                           SEND_TRANS_CMD | ((TR_NOP_TRANS | TR_LAST_AND_SEND_ACK) << 16));
63946 +       elan4_sdram_writeq (dev, rail->r_elan + offsetof (EP4_RAIL_ELAN, r_check_sten[lvl].c_addr_sendack),
63947 +                           0);
63948 +       
63949 +       /* Initiailise command stream - guard ok, write done */
63950 +       elan4_sdram_writeq (dev, rail->r_elan + offsetof (EP4_RAIL_ELAN, r_check_sten[lvl].c_guard_ok),
63951 +                           GUARD_CMD | GUARD_CHANNEL(1) | GUARD_TEST(0, PACK_OK) | GUARD_RESET(EP4_PROBE_RETRIES));
63952 +       elan4_sdram_writeq (dev, rail->r_elan + offsetof (EP4_RAIL_ELAN, r_check_sten[lvl].c_writedword_ok),
63953 +                           WRITE_DWORD_CMD | (rail->r_main_addr + offsetof (EP4_RAIL_MAIN, r_probe_level)));
63954 +       elan4_sdram_writeq (dev, rail->r_elan + offsetof (EP4_RAIL_ELAN, r_check_sten[lvl].c_value_ok),
63955 +                           lvl);
63956 +
63957 +       /* Initiailise command stream - guard fail, chain to next or write done */
63958 +       elan4_sdram_writeq (dev, rail->r_elan + offsetof (EP4_RAIL_ELAN, r_check_sten[lvl].c_guard_fail),
63959 +                           GUARD_CMD | GUARD_CHANNEL(1) | GUARD_TEST(0, RESTART_COUNT_ZERO) | GUARD_RESET(EP4_PROBE_RETRIES));
63960 +
63961 +       if (lvl > 0)
63962 +       {
63963 +           elan4_sdram_writeq (dev, rail->r_elan + offsetof (EP4_RAIL_ELAN, r_check_sten[lvl].c_setevent_fail),
63964 +                               SET_EVENT_CMD | (rail->r_elan_addr + offsetof (EP4_RAIL_ELAN, r_check_start[lvl-1])));
63965 +           elan4_sdram_writeq (dev, rail->r_elan + offsetof (EP4_RAIL_ELAN, r_check_sten[lvl].c_setevent_nop),
63966 +                               NOP_CMD);
63967 +       }
63968 +       else
63969 +       {
63970 +           elan4_sdram_writeq (dev, rail->r_elan + offsetof (EP4_RAIL_ELAN, r_check_sten[lvl].c_setevent_fail),
63971 +                               WRITE_DWORD_CMD | (rail->r_main_addr + offsetof (EP4_RAIL_MAIN, r_probe_level)));
63972 +           elan4_sdram_writeq (dev, rail->r_elan + offsetof (EP4_RAIL_ELAN, r_check_sten[lvl].c_setevent_nop),
63973 +                               EP4_PROBE_FAILED);
63974 +       }
63975 +       elan4_sdram_writeq (dev, rail->r_elan + offsetof (EP4_RAIL_ELAN, r_check_sten[lvl].c_nop_pad),
63976 +                           NOP_CMD);
63977 +    }
63978 +
63979 +    
63980 +    rail->r_main->r_probe_level = EP4_PROBE_ACTIVE;
63981 +
63982 +    mb();
63983 +    ep4_set_event_cmd (rail->r_probe_cq, rail->r_elan_addr + offsetof (EP4_RAIL_ELAN, r_check_start[pos->pos_levels-1]));
63984 +}
63985 +
63986 +int
63987 +ep4_check_position (EP_RAIL *r)
63988 +{
63989 +    EP4_RAIL      *rail = (EP4_RAIL *) r;
63990 +    ELAN_POSITION *pos  = &rail->r_generic.Position;
63991 +    unsigned int level  = rail->r_main->r_probe_level;
63992 +    unsigned int lvl;
63993 +
63994 +    EPRINTF2 (DBG_PROBE, "%s: ep4_check_position: level=%lld\n", rail->r_generic.Name, rail->r_main->r_probe_level);
63995 +
63996 +    if (rail->r_main->r_probe_level != EP4_PROBE_ACTIVE)
63997 +    {
63998 +       if (rail->r_main->r_probe_level == EP4_PROBE_FAILED)
63999 +       {
64000 +           EPRINTF1 (DBG_PROBE, "%s: ep4_check_position: packets all nacked\n", rail->r_generic.Name);
64001 +
64002 +           rail->r_generic.SwitchProbeLevel = -1;
64003 +       }
64004 +       else
64005 +       {
64006 +           E4_uint32 val0  = rail->r_main->r_probe_dest0[TRACEROUTE_ENTRIES - 2*(level+1)];
64007 +           E4_uint32 val1  = rail->r_main->r_probe_dest1[TRACEROUTE_ENTRIES - 2*(level+1)];
64008 +
64009 +           if (val0 != PROBE_PATTERN0 (pos->pos_nodeid) || val1 != PROBE_PATTERN1 (pos->pos_nodeid))
64010 +           {
64011 +               static unsigned long printed = 0;
64012 +
64013 +               /* We've received a packet from another node - this probably means
64014 +                * that we've moved */
64015 +               if ((lbolt - printed) > (HZ*10))
64016 +               {
64017 +                   printk ("%s: ep4_check_position - level %d lost nodeid\n", rail->r_generic.Name, level);
64018 +                   printed = lbolt;
64019 +               }
64020 +
64021 +               rail->r_generic.SwitchProbeLevel = -1;
64022 +           }
64023 +           else
64024 +           {
64025 +               for (lvl = 0 ; lvl <= level; lvl++)
64026 +               {
64027 +                   E4_uint32 uval0  = rail->r_main->r_probe_dest0[TRACEROUTE_ENTRIES - lvl - 1];
64028 +                   E4_uint32 dval0  = rail->r_main->r_probe_dest0[TRACEROUTE_ENTRIES - ((2*level) - lvl + 1)];
64029 +                   E4_uint32 dval1  = rail->r_main->r_probe_dest1[TRACEROUTE_ENTRIES - ((2*level) - lvl + 1)];
64030 +
64031 +                   rail->r_generic.SwitchState[lvl].linkid = TR_TRACEROUTE0_LINKID (dval0);
64032 +                   rail->r_generic.SwitchState[lvl].LNR    = TR_TRACEROUTE0_LNR(dval0);
64033 +                   rail->r_generic.SwitchState[lvl].bcast  = TR_TRACEROUTE1_BCAST_TOP (dval1);
64034 +                   rail->r_generic.SwitchState[lvl].uplink = TR_TRACEROUTE0_LINKID (uval0);
64035 +
64036 +                   EPRINTF5 (DBG_PROBE, " --- lvl %d: linkid=%d LNR=%x bcast=%d uplink=%d\n", lvl, rail->r_generic.SwitchState[lvl].linkid,
64037 +                             rail->r_generic.SwitchState[lvl].LNR, rail->r_generic.SwitchState[lvl].bcast ,rail->r_generic.SwitchState[lvl].uplink);
64038 +
64039 +               }
64040 +
64041 +               rail->r_generic.SwitchProbeLevel = level;
64042 +           }
64043 +       }
64044 +
64045 +       rail->r_main->r_probe_level = EP4_PROBE_ACTIVE;
64046 +       mb();
64047 +
64048 +       if (rail->r_generic.SwitchBroadcastLevel == rail->r_generic.Position.pos_levels-1)
64049 +           level = rail->r_generic.Position.pos_levels - 1;
64050 +       else
64051 +           level = rail->r_generic.SwitchBroadcastLevel + 1;
64052 +
64053 +       ep4_set_event_cmd (rail->r_probe_cq, rail->r_elan_addr + offsetof (EP4_RAIL_ELAN, r_check_start[level]));
64054 +
64055 +       return 1;
64056 +    }
64057 +
64058 +    return 0;
64059 +}
64060 Index: linux-2.4.21/drivers/net/qsnet/ep/procfs_linux.c
64061 ===================================================================
64062 --- linux-2.4.21.orig/drivers/net/qsnet/ep/procfs_linux.c       2004-02-23 16:02:56.000000000 -0500
64063 +++ linux-2.4.21/drivers/net/qsnet/ep/procfs_linux.c    2005-06-01 23:12:54.680426792 -0400
64064 @@ -0,0 +1,693 @@
64065 +/*
64066 + *    Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd.
64067 + *    Copyright (c) 2002-2003 by Quadrics Ltd.
64068 + *
64069 + *    For licensing information please see the supplied COPYING file
64070 + *
64071 + */
64072 +
64073 +#ident "@(#)$Id: procfs_linux.c,v 1.53.2.4 2005/01/18 14:18:42 david Exp $"
64074 +/*      $Source: /cvs/master/quadrics/epmod/procfs_linux.c,v $*/
64075 +
64076 +#include <qsnet/kernel.h>
64077 +
64078 +#include <elan/kcomm.h>
64079 +#include <elan/epsvc.h>
64080 +#include <elan/epcomms.h>
64081 +
64082 +#include "cm.h"
64083 +#include "debug.h"
64084 +#include "conf_linux.h"
64085 +#include <linux/module.h>
64086 +#include <linux/wait.h>
64087 +#include <linux/poll.h>
64088 +
64089 +#include <qsnet/procfs_linux.h>
64090 +
64091 +struct proc_dir_entry *ep_procfs_root;
64092 +struct proc_dir_entry *ep_config_root;
64093 +
64094 +/*
64095 + * We provide a slightly "special" interface for /proc/elan/device%d/nodeset,
64096 + * so that it can be included in a "poll" system call.  On each "read" on the
64097 + * file, we generate a new nodeset if a) the previous one has been completely
64098 + * read and b) if it has changed since it was generated.
64099 + *
64100 + * Unfortunately ... this doesn't allow "tail -f" to work, since this uses
64101 + * fstat() on the fd, as we only hold the last nodeset string, we could not
64102 + * handle the case where two processes were reading a different rates.
64103 + * We could maybe have implemented this as a "sliding window", so that we 
64104 + * add a new nodeset string, when it has changed and someone reads past 
64105 + * end of the last one.   Then if someone read from before out "window"
64106 + * we would produce "padding" data.  The problem with this, is that a 
64107 + * simple "cat" on /proc/elan/device%d/nodeset will read the whole "file"
64108 + * which will be mostly padding !
64109 + *
64110 + * Just to not that the purpose of this interface is:
64111 + *    1) to allow cat /proc/elan/device%d/nodeset to show the current
64112 + *       nodeset.
64113 + *    2) to allow rms (or similar) to poll() on the file, and when the
64114 + *       nodeset changes read a new one.
64115 + *
64116 + * so ... we don't bother solving the troublesome "tail -f" problem.
64117 + */
64118 +
64119 +typedef struct nodeset_private
64120 +{
64121 +    struct nodeset_private *pr_next;
64122 +    EP_RAIL                *pr_rail;
64123 +    unsigned               pr_changed;
64124 +    char                  *pr_page;
64125 +    unsigned               pr_off;
64126 +    unsigned               pr_len;
64127 +} NODESET_PRIVATE;
64128 +
64129 +NODESET_PRIVATE   *ep_nodeset_list;
64130 +wait_queue_head_t  ep_nodeset_wait;
64131 +spinlock_t         ep_nodeset_lock;
64132 +
64133 +static int
64134 +proc_write_state(struct file *file, const char *buffer,
64135 +                unsigned long count, void *data)
64136 +{
64137 +    EP_RAIL *rail = (EP_RAIL *) data;
64138 +    char    tmpbuf[128];
64139 +    int     res;
64140 +
64141 +    if (count > sizeof (tmpbuf)-1)
64142 +       return (-EINVAL);
64143 +    
64144 +    MOD_INC_USE_COUNT;
64145 +    
64146 +    if (copy_from_user (tmpbuf, buffer, count))
64147 +       res = -EFAULT;
64148 +    else 
64149 +    {
64150 +       tmpbuf[count] = '\0';   
64151 +
64152 +       if (tmpbuf[count-1] == '\n')
64153 +           tmpbuf[count-1] = '\0';
64154 +
64155 +       if (! strcmp (tmpbuf, "start") && rail->State == EP_RAIL_STATE_UNINITIALISED)
64156 +           ep_start_rail (rail);
64157 +       
64158 +       if (! strcmp (tmpbuf, "stop") && rail->State > EP_RAIL_STATE_UNINITIALISED)
64159 +           ep_stop_rail (rail);
64160 +       
64161 +       if (! strcmp (tmpbuf, "offline") && rail->State > EP_RAIL_STATE_UNINITIALISED)
64162 +           cm_force_offline (rail, 1, CM_OFFLINE_PROCFS);
64163 +
64164 +       if (! strcmp (tmpbuf, "online") && rail->State > EP_RAIL_STATE_UNINITIALISED)
64165 +           cm_force_offline (rail, 0, CM_OFFLINE_PROCFS);
64166 +
64167 +       if (! strncmp (tmpbuf, "restart=", 8) && rail->State == EP_RAIL_STATE_RUNNING)
64168 +           cm_restart_node (rail, simple_strtol (tmpbuf + 8, NULL, 0));
64169 +
64170 +       if (! strncmp (tmpbuf, "panic=", 6))
64171 +           ep_panic_node (rail->System, simple_strtol(tmpbuf + 6, NULL, 0),
64172 +                          strchr (tmpbuf, ',') ? strchr(tmpbuf, ',') + 1 : "remote panic request");
64173 +
64174 +       if (! strncmp (tmpbuf, "raise=", 6) && rail->State > EP_RAIL_STATE_UNINITIALISED)
64175 +           rail->Operations.RaiseFilter (rail, simple_strtol (tmpbuf + 6, NULL, 0));
64176 +
64177 +       if (! strncmp (tmpbuf, "lower=", 6) && rail->State > EP_RAIL_STATE_UNINITIALISED)
64178 +           rail->Operations.LowerFilter (rail, simple_strtol (tmpbuf + 6, NULL, 0));
64179 +       
64180 +       res = count;
64181 +    }
64182 +
64183 +    MOD_DEC_USE_COUNT;
64184 +
64185 +    return (res);
64186 +}
64187 +
64188 +static int
64189 +proc_read_state(char *page, char **start, off_t off,
64190 +               int count, int *eof, void *data)
64191 +{
64192 +    EP_RAIL *rail = (EP_RAIL *) data;
64193 +    int     len;
64194 +
64195 +    switch (rail->State)
64196 +    {
64197 +    case EP_RAIL_STATE_UNINITIALISED:
64198 +       len = sprintf (page, "uninitialised\n");
64199 +       break;
64200 +    case EP_RAIL_STATE_STARTED:
64201 +       len = sprintf (page, "started\n");
64202 +       break;
64203 +    case EP_RAIL_STATE_RUNNING:
64204 +       len = sprintf (page, "running NodeId=%d NumNodes=%d\n", rail->Position.pos_nodeid, rail->Position.pos_nodes);
64205 +       break;
64206 +    case EP_RAIL_STATE_INCOMPATIBLE:
64207 +       len = sprintf (page, "incompatible NodeId=%d NumNodes=%d\n", rail->Position.pos_nodeid, rail->Position.pos_nodes);
64208 +       break;
64209 +    default:
64210 +       len = sprintf (page, "<unknown>\n");
64211 +       break;
64212 +    }
64213 +
64214 +    return (qsnet_proc_calc_metrics (page, start, off, count, eof, len));
64215 +}
64216 +
64217 +static int
64218 +proc_write_display(struct file *file, const char *buffer,
64219 +                  unsigned long count, void *data)
64220 +{
64221 +    EP_RAIL *rail = (EP_RAIL *) data;
64222 +    char    tmpbuf[128];
64223 +    int     res;
64224 +
64225 +    if (count > sizeof (tmpbuf)-1)
64226 +       return (-EINVAL);
64227 +    
64228 +    MOD_INC_USE_COUNT;
64229 +    
64230 +    if (copy_from_user (tmpbuf, buffer, count))
64231 +       res = -EFAULT;
64232 +    else 
64233 +    {
64234 +       tmpbuf[count] = '\0';   
64235 +
64236 +       if (tmpbuf[count-1] == '\n')
64237 +           tmpbuf[count-1] = '\0';
64238 +
64239 +       if (! strcmp (tmpbuf, "rail"))
64240 +           DisplayRail (rail);
64241 +       if (! strcmp (tmpbuf, "segs"))
64242 +           DisplaySegs (rail);
64243 +       if (! strcmp (tmpbuf, "nodes"))
64244 +           DisplayNodes (rail);
64245 +       if (! strcmp (tmpbuf, "status"))
64246 +           DisplayStatus (rail);
64247 +       if (! strcmp (tmpbuf, "debug") && rail->Operations.Debug)
64248 +           rail->Operations.Debug (rail);
64249 +       if (! strncmp (tmpbuf, "epcomms", 7))
64250 +           ep_comms_display (rail->System, tmpbuf[7] == '=' ? tmpbuf + 8 : NULL);
64251 +       res = count;
64252 +    }
64253 +
64254 +    MOD_DEC_USE_COUNT;
64255 +
64256 +    return (res);
64257 +}
64258 +
64259 +static int
64260 +proc_read_display(char *page, char **start, off_t off,
64261 +                 int count, int *eof, void *data)
64262 +{
64263 +    int len = sprintf (page, "<unreadable>\n");
64264 +    
64265 +    return (qsnet_proc_calc_metrics (page, start, off, count, eof, len));
64266 +}
64267 +
64268 +
64269 +static int
64270 +proc_read_stats(char *page, char **start, off_t off,
64271 +               int count, int *eof, void *data)
64272 +{
64273 +    EP_RAIL *rail = (EP_RAIL *) data;
64274 +
64275 +    if ( rail == NULL ) {
64276 +       strcpy(page,"proc_read_stats rail=NULL\n");
64277 +    } else {
64278 +       page[0] = 0;
64279 +       ep_fillout_stats(rail, page);
64280 +       rail->Operations.FillOutStats (rail, page);
64281 +    }
64282 +    return (qsnet_proc_calc_metrics (page, start, off, count, eof, strlen(page)));
64283 +}
64284 +
64285 +static int
64286 +proc_read_devinfo(char *page, char **start, off_t off,
64287 +                 int count, int *eof, void *data)
64288 +{
64289 +    EP_RAIL       *rail    = (EP_RAIL *) data;
64290 +    ELAN_DEVINFO  *devinfo = &rail->Devinfo;
64291 +    ELAN_POSITION *pos     = &rail->Position;
64292 +    char          *p       = page;
64293 +    
64294 +    switch (devinfo->dev_device_id)
64295 +    {
64296 +    case PCI_DEVICE_ID_ELAN3:
64297 +       p += sprintf (p, "ep%d is elan3 %d rev %c\n", rail->Number, 
64298 +                     devinfo->dev_instance, 'a' + devinfo->dev_revision_id);
64299 +       break;
64300 +       
64301 +    case PCI_DEVICE_ID_ELAN4:
64302 +       p += sprintf (p, "ep%d is elan4 %d rev %c\n", rail->Number, 
64303 +                     devinfo->dev_instance, 'a' + devinfo->dev_revision_id);
64304 +       break;
64305 +    default:
64306 +       p += sprintf (p, "ep%d is unkown %x/%x\n", rail->Number, devinfo->dev_vendor_id, devinfo->dev_device_id);
64307 +       break;
64308 +    }
64309 +
64310 +    if (rail->State == EP_RAIL_STATE_RUNNING)
64311 +       p += sprintf (p, "ep%d nodeid %d numnodes %d\n", rail->Number, pos->pos_nodeid, pos->pos_nodes);
64312 +
64313 +    return (qsnet_proc_calc_metrics (page, start, off, count, eof, p - page));
64314 +}
64315 +
64316 +static struct rail_info
64317 +{
64318 +    char *name;
64319 +    int (*read_func) (char *page, char **start, off_t off, int count, int *eof, void *data);
64320 +    int (*write_func) (struct file *file, const char *buf, unsigned long count, void *data);
64321 +} rail_info[] = {
64322 +    {"state",   proc_read_state,   proc_write_state},
64323 +    {"display", proc_read_display, proc_write_display},
64324 +    {"stats",   proc_read_stats,   NULL},
64325 +    {"devinfo", proc_read_devinfo, NULL},
64326 +};
64327 +
64328 +static int
64329 +nodeset_open (struct inode *inode, struct file *file)
64330 +{
64331 +    NODESET_PRIVATE *pr;
64332 +
64333 +    if ((pr = kmalloc (sizeof (NODESET_PRIVATE), GFP_KERNEL)) == NULL)
64334 +       return (-ENOMEM);
64335 +    
64336 +    pr->pr_changed = 1;
64337 +    pr->pr_off     = 0;
64338 +    pr->pr_len     = 0;
64339 +    pr->pr_page    = NULL;
64340 +    pr->pr_rail    = (EP_RAIL *)( PDE(inode)->data );
64341 +
64342 +    spin_lock (&ep_nodeset_lock);
64343 +    pr->pr_next = ep_nodeset_list;
64344 +    ep_nodeset_list = pr;
64345 +    spin_unlock (&ep_nodeset_lock);
64346 +
64347 +    file->private_data = (void *) pr;
64348 +
64349 +    MOD_INC_USE_COUNT;
64350 +    return (0);
64351 +}
64352 +
64353 +static int
64354 +nodeset_release (struct inode *inode, struct file *file)
64355 +{
64356 +    NODESET_PRIVATE *pr = (NODESET_PRIVATE *) file->private_data;
64357 +    NODESET_PRIVATE **ppr;
64358 +
64359 +    spin_lock (&ep_nodeset_lock);
64360 +    for (ppr = &ep_nodeset_list; (*ppr) != pr; ppr = &(*ppr)->pr_next)
64361 +       ;
64362 +    (*ppr) = pr->pr_next;
64363 +    spin_unlock (&ep_nodeset_lock);
64364 +
64365 +    if (pr->pr_page)
64366 +       free_page ((unsigned long) pr->pr_page);
64367 +    kfree (pr);
64368 +    
64369 +    MOD_DEC_USE_COUNT;
64370 +    return (0);
64371 +}
64372 +
64373 +static ssize_t
64374 +nodeset_read (struct file *file, char *buf, size_t count, loff_t *ppos)
64375 +{
64376 +    NODESET_PRIVATE *pr  = (NODESET_PRIVATE *) file->private_data;
64377 +    EP_RAIL          *rail = pr->pr_rail;
64378 +    int              error;
64379 +    unsigned long    flags;
64380 +
64381 +    if (!pr->pr_changed && pr->pr_off >= pr->pr_len)
64382 +       return (0);
64383 +
64384 +    if ((error = verify_area (VERIFY_WRITE, buf, count)) != 0)
64385 +       return (error);
64386 +
64387 +    if (pr->pr_page == NULL && (pr->pr_page = (char *) __get_free_page (GFP_KERNEL)) == NULL)
64388 +       return (-ENOMEM);
64389 +
64390 +    if (pr->pr_off >= pr->pr_len)
64391 +    {
64392 +       kmutex_lock (&rail->CallbackLock);
64393 +       if (rail->State == EP_RAIL_STATE_RUNNING)
64394 +       {
64395 +           spin_lock_irqsave (&rail->System->NodeLock, flags);
64396 +           ep_sprintf_bitmap (pr->pr_page, PAGESIZE, statemap_tobitmap(rail->NodeSet), 0, 0, rail->Position.pos_nodes);
64397 +           spin_unlock_irqrestore (&rail->System->NodeLock, flags);
64398 +
64399 +           if (rail->SwitchBroadcastLevel == -1)
64400 +               strcat (pr->pr_page, "<disconnected>");
64401 +           else if (rail->SwitchBroadcastLevel < (rail->Position.pos_levels-1))
64402 +               sprintf (pr->pr_page + strlen (pr->pr_page), "<%d>", rail->SwitchBroadcastLevel);
64403 +           strcat (pr->pr_page, "\n");
64404 +       }
64405 +       else
64406 +           strcpy (pr->pr_page, "<not running>\n");
64407 +       kmutex_unlock (&rail->CallbackLock);
64408 +
64409 +       pr->pr_len     = strlen (pr->pr_page);
64410 +       pr->pr_off     = 0;
64411 +       pr->pr_changed = 0;
64412 +    }
64413 +
64414 +    if (count >= (pr->pr_len - pr->pr_off))
64415 +       count = pr->pr_len - pr->pr_off;
64416 +
64417 +    copy_to_user (buf, pr->pr_page + pr->pr_off, count);
64418 +
64419 +    pr->pr_off += count;
64420 +    *ppos      += count;
64421 +
64422 +    if (pr->pr_off >= pr->pr_len)
64423 +    {
64424 +       free_page ((unsigned long) pr->pr_page);
64425 +       pr->pr_page = NULL;
64426 +    }
64427 +
64428 +    return (count);
64429 +}
64430 +
64431 +static unsigned int
64432 +nodeset_poll (struct file *file, poll_table *wait)
64433 +{
64434 +    NODESET_PRIVATE *pr = (NODESET_PRIVATE *) file->private_data;
64435 +
64436 +    poll_wait (file, &ep_nodeset_wait, wait);
64437 +    if (pr->pr_changed || pr->pr_off < pr->pr_len)
64438 +       return (POLLIN | POLLRDNORM);
64439 +    return (0);
64440 +}
64441 +
64442 +static void 
64443 +nodeset_callback (void *arg, statemap_t *map)
64444 +{
64445 +    EP_RAIL         *rail = (EP_RAIL *) arg;
64446 +    NODESET_PRIVATE *pr;
64447 +
64448 +    ep_display_bitmap (rail->Name, "Nodeset", statemap_tobitmap(map), 0, ep_numnodes(rail->System));
64449 +
64450 +    spin_lock (&ep_nodeset_lock);
64451 +    for (pr = ep_nodeset_list; pr; pr = pr->pr_next)
64452 +       if (pr->pr_rail == rail)
64453 +           pr->pr_changed = 1;
64454 +    spin_unlock (&ep_nodeset_lock);
64455 +
64456 +    wake_up_interruptible (&ep_nodeset_wait);
64457 +}
64458 +
64459 +void
64460 +proc_character_fill (long mode, char *fmt, ...)
64461 +{
64462 +    int len;
64463 +    va_list ap;
64464 +    PROC_PRIVATE *private = (PROC_PRIVATE *)mode;
64465 +    
64466 +    /* is the buffer already full */
64467 +    if (private->pr_len >= private->pr_data_len) 
64468 +       return;
64469 +    
64470 +    /* attempt to fill up to the remaining space */
64471 +    va_start (ap, fmt);
64472 +    len = vsnprintf ( & private->pr_data[private->pr_len], (private->pr_data_len - private->pr_len), fmt, ap);
64473 +    va_end (ap);
64474 +    
64475 +    if (len < 0 ) 
64476 +    {
64477 +       /* we have reached the end of buffer and need to fail all future writes
64478 +        * the caller can check (pr_len >= pr_data_len) and recall with more space 
64479 +        */
64480 +       private->pr_len = private->pr_data_len;
64481 +       return;
64482 +    }
64483 +    
64484 +    /* move the length along */
64485 +    private->pr_len += len;   
64486 +}
64487 +
64488 +int
64489 +proc_release (struct inode *inode, struct file *file)
64490 +{
64491 +    PROC_PRIVATE *pr = (PROC_PRIVATE *) file->private_data;
64492 +    
64493 +    if (pr->pr_data)
64494 +       KMEM_FREE (pr->pr_data, pr->pr_data_len);
64495 +    kfree (pr);
64496 +    
64497 +    MOD_DEC_USE_COUNT;
64498 +    return (0);
64499 +}
64500 +
64501 +ssize_t
64502 +proc_read (struct file *file, char *buf, size_t count, loff_t *ppos)
64503 +{
64504 +    PROC_PRIVATE *pr  = (PROC_PRIVATE *) file->private_data;
64505 +    int           error;
64506 +
64507 +    if (pr->pr_off >= pr->pr_len)
64508 +       return (0);
64509 +
64510 +    if ((error = verify_area (VERIFY_WRITE, buf, count)) != 0)
64511 +       return (error);
64512 +
64513 +    if (count >= (pr->pr_len - pr->pr_off))
64514 +       count = pr->pr_len - pr->pr_off;
64515 +
64516 +    copy_to_user (buf, pr->pr_data + pr->pr_off, count);
64517 +
64518 +    pr->pr_off += count;
64519 +    *ppos      += count;
64520 +
64521 +    return (count);
64522 +}
64523 +
64524 +static int
64525 +proc_open (struct inode *inode, struct file *file)
64526 +{
64527 +    PROC_PRIVATE *pr;
64528 +    CM_RAIL      *cmRail;
64529 +    int           pages = 4;
64530 +    unsigned long flags;
64531 +
64532 +    if ((pr = kmalloc (sizeof (PROC_PRIVATE), GFP_KERNEL)) == NULL)
64533 +       return (-ENOMEM);
64534 +    
64535 +    pr->pr_rail = (EP_RAIL *)(PDE(inode)->data);
64536 +       
64537 +    do {       
64538 +       pr->pr_data_len = PAGESIZE * pages;
64539 +
64540 +       KMEM_ZALLOC (pr->pr_data, char *, pr->pr_data_len, 1);
64541 +       if (pr->pr_data == NULL) 
64542 +       { 
64543 +           pr->pr_len  = sprintf (pr->pr_data, "Out of Memory\n");
64544 +           break;
64545 +       } 
64546 +       
64547 +       pr->pr_off     = 0;
64548 +       pr->pr_len     = 0;
64549 +       pr->pr_data[0] = 0;
64550 +       
64551 +       if (pr->pr_rail->State != EP_RAIL_STATE_RUNNING) 
64552 +       { 
64553 +           pr->pr_len  = sprintf (pr->pr_data, "Rail not Running\n");
64554 +           break;
64555 +       } 
64556 +       else 
64557 +       {
64558 +           pr->pr_di.func  = proc_character_fill;
64559 +           pr->pr_di.arg   = (long)pr;
64560 +
64561 +           if (!strcmp("maps", file->f_dentry->d_iname)) 
64562 +           {
64563 +               cmRail = pr->pr_rail->ClusterRail;
64564 +
64565 +               spin_lock_irqsave (&cmRail->Lock, flags);
64566 +               DisplayNodeMaps (&pr->pr_di, cmRail);   
64567 +               spin_unlock_irqrestore (&cmRail->Lock, flags);  
64568 +           }
64569 +
64570 +           if (!strcmp("segs", file->f_dentry->d_iname)) 
64571 +           {
64572 +               cmRail = pr->pr_rail->ClusterRail;
64573 +               
64574 +               spin_lock_irqsave (&cmRail->Lock, flags);       
64575 +               DisplayNodeSgmts (&pr->pr_di, cmRail);
64576 +               spin_unlock_irqrestore (&cmRail->Lock, flags);
64577 +           }
64578 +
64579 +           if (!strcmp("tree", file->f_dentry->d_iname)) 
64580 +               DisplayRailDo (&pr->pr_di, pr->pr_rail);
64581 +       }
64582 +
64583 +       if ( pr->pr_len < pr->pr_data_len) 
64584 +           break; /* we managed to get all the output into the buffer */
64585 +
64586 +       pages++;
64587 +       KMEM_FREE ( pr->pr_data,  pr->pr_data_len);
64588 +    } while (1);
64589 +       
64590 +
64591 +    file->private_data = (void *) pr;
64592 +
64593 +    MOD_INC_USE_COUNT;
64594 +    return (0);
64595 +}
64596 +
64597 +struct file_operations proc_nodeset_operations = 
64598 +{
64599 +    read:      nodeset_read,
64600 +    poll:      nodeset_poll,
64601 +    open:      nodeset_open,
64602 +    release:   nodeset_release,
64603 +};
64604 +
64605 +struct file_operations proc_operations = 
64606 +{
64607 +    read:      proc_read,
64608 +    open:      proc_open,
64609 +    release:   proc_release,
64610 +};
64611 +
64612 +void
64613 +ep_procfs_rail_init (EP_RAIL *rail)
64614 +{
64615 +    struct proc_dir_entry *dir;
64616 +    struct proc_dir_entry *p;
64617 +    char                   name[10];
64618 +    int                    i;
64619 +
64620 +    sprintf (name, "rail%d", rail->Number);
64621 +
64622 +    if ((dir = rail->ProcDir = proc_mkdir (name, ep_procfs_root)) == NULL)
64623 +       return;
64624 +
64625 +    for (i = 0; i < sizeof (rail_info)/sizeof (rail_info[0]); i++)
64626 +    {
64627 +       if ((p = create_proc_entry (rail_info[i].name, 0, dir)) != NULL)
64628 +       {
64629 +           p->read_proc  = rail_info[i].read_func;
64630 +           p->write_proc = rail_info[i].write_func;
64631 +           p->data       = rail;
64632 +           p->owner      = THIS_MODULE;
64633 +       }
64634 +    }
64635 +
64636 +    if ((p = create_proc_entry ("nodeset", 0, dir)) != NULL)
64637 +    {
64638 +       p->proc_fops = &proc_nodeset_operations;
64639 +       p->owner     = THIS_MODULE;
64640 +       p->data      = rail;
64641 +
64642 +       rail->CallbackRegistered = 1;
64643 +       ep_register_callback (rail, EP_CB_NODESET, nodeset_callback, rail);
64644 +    }
64645 +     
64646 +    if ((p = create_proc_entry ("maps", 0, dir)) != NULL)
64647 +    {
64648 +       p->proc_fops = &proc_operations;
64649 +       p->owner     = THIS_MODULE;
64650 +       p->data      = rail;    
64651 +    }
64652 +    
64653 +    if ((p = create_proc_entry ("segs", 0, dir)) != NULL)
64654 +    {
64655 +       p->proc_fops = &proc_operations;
64656 +       p->owner     = THIS_MODULE;
64657 +       p->data      = rail;
64658 +    }
64659 +    
64660 +    if ((p = create_proc_entry ("tree", 0, dir)) != NULL)
64661 +    {
64662 +       p->proc_fops = &proc_operations;
64663 +       p->owner     = THIS_MODULE;
64664 +       p->data      = rail;
64665 +    }
64666 +
64667 +}
64668 +
64669 +void
64670 +ep_procfs_rail_fini (EP_RAIL *rail)
64671 +{
64672 +    struct proc_dir_entry *dir = rail->ProcDir;
64673 +    char name[10];
64674 +    int  i;
64675 +
64676 +    if (dir == NULL)
64677 +       return;
64678 +
64679 +    if (rail->CallbackRegistered)
64680 +    {
64681 +       ep_remove_callback (rail, EP_CB_NODESET, nodeset_callback, rail);
64682 +
64683 +       remove_proc_entry ("nodeset", dir);
64684 +    }
64685 +
64686 +    remove_proc_entry ("maps",    dir);
64687 +    remove_proc_entry ("segs",    dir);
64688 +    remove_proc_entry ("tree",    dir);
64689 +
64690 +    for (i = 0; i < sizeof (rail_info)/sizeof (rail_info[0]); i++)
64691 +       remove_proc_entry (rail_info[i].name, dir);
64692 +
64693 +    sprintf (name, "rail%d", rail->Number);
64694 +    remove_proc_entry (name, ep_procfs_root);
64695 +}
64696 +
64697 +#include "quadrics_version.h"
64698 +static char     quadrics_version[] = QUADRICS_VERSION;
64699 +
64700 +void
64701 +ep_procfs_init()
64702 +{
64703 +    extern int txd_stabilise;
64704 +    extern int MaxSwitchLevels;
64705 +
64706 +    spin_lock_init (&ep_nodeset_lock);
64707 +    init_waitqueue_head (&ep_nodeset_wait);
64708 +
64709 +    ep_procfs_root = proc_mkdir ("ep", qsnet_procfs_root);
64710 +    ep_config_root = proc_mkdir ("config", ep_procfs_root);
64711 +
64712 +    qsnet_proc_register_str (ep_procfs_root, "version", quadrics_version, 1);
64713 +
64714 +    qsnet_proc_register_hex (ep_config_root, "epdebug",               &epdebug,               0);
64715 +    qsnet_proc_register_hex (ep_config_root, "epdebug_console",       &epdebug_console,       0);
64716 +    qsnet_proc_register_hex (ep_config_root, "epdebug_cmlevel",       &epdebug_cmlevel,       0);
64717 +#if ! defined(CONFIG_EP_NO_CHECK_SUM)
64718 +    qsnet_proc_register_hex (ep_config_root, "epdebug_check_sum",     &epdebug_check_sum,     0);
64719 +#endif
64720 +    qsnet_proc_register_hex (ep_config_root, "epcomms_forward_limit", &epcomms_forward_limit, 0);
64721 +    qsnet_proc_register_int (ep_config_root, "txd_stabilise",         &txd_stabilise,         0);
64722 +    qsnet_proc_register_int (ep_config_root, "assfail_mode",          &assfail_mode,          0);
64723 +    qsnet_proc_register_int (ep_config_root, "max_switch_levels",     &MaxSwitchLevels,       1);
64724 +
64725 +    ep_procfs_rcvr_xmtr_init();
64726 +}
64727 +
64728 +void
64729 +ep_procfs_fini(void)
64730 +{
64731 +    ep_procfs_rcvr_xmtr_fini();
64732 +
64733 +    remove_proc_entry ("max_switch_levels",     ep_config_root);
64734 +    remove_proc_entry ("assfail_mode",          ep_config_root);
64735 +    remove_proc_entry ("txd_stabilise",         ep_config_root);
64736 +    remove_proc_entry ("epcomms_forward_limit", ep_config_root);
64737 +
64738 +#if ! defined(CONFIG_EP_NO_CHECK_SUM)
64739 +    remove_proc_entry ("epdebug_check_sum",     ep_config_root);
64740 +#endif
64741 +    remove_proc_entry ("epdebug_cmlevel",       ep_config_root);
64742 +    remove_proc_entry ("epdebug_console",       ep_config_root);
64743 +    remove_proc_entry ("epdebug",               ep_config_root);
64744 +
64745 +    remove_proc_entry ("version", ep_procfs_root);
64746 +    
64747 +    remove_proc_entry ("config", ep_procfs_root);
64748 +    remove_proc_entry ("ep", qsnet_procfs_root);
64749 +
64750 +    spin_lock_destroy (&ep_nodeset_lock);
64751 +}
64752 +
64753 +/*
64754 + * Local variables:
64755 + * c-file-style: "stroustrup"
64756 + * End:
64757 + */
64758 Index: linux-2.4.21/drivers/net/qsnet/ep/quadrics_version.h
64759 ===================================================================
64760 --- linux-2.4.21.orig/drivers/net/qsnet/ep/quadrics_version.h   2004-02-23 16:02:56.000000000 -0500
64761 +++ linux-2.4.21/drivers/net/qsnet/ep/quadrics_version.h        2005-06-01 23:12:54.680426792 -0400
64762 @@ -0,0 +1 @@
64763 +#define QUADRICS_VERSION "4.30qsnet"
64764 Index: linux-2.4.21/drivers/net/qsnet/ep/railhints.c
64765 ===================================================================
64766 --- linux-2.4.21.orig/drivers/net/qsnet/ep/railhints.c  2004-02-23 16:02:56.000000000 -0500
64767 +++ linux-2.4.21/drivers/net/qsnet/ep/railhints.c       2005-06-01 23:12:54.680426792 -0400
64768 @@ -0,0 +1,103 @@
64769 +/*
64770 + *    Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd.
64771 + *    Copyright (c) 2002-2003 by Quadrics Ltd.
64772 + *
64773 + *    For licensing information please see the supplied COPYING file
64774 + *
64775 + */
64776 +
64777 +#ident "@(#)$Id: railhints.c,v 1.5 2004/02/06 22:37:06 david Exp $ $Name: QSNETMODULES-4-30_20050128 $"
64778 +/*      $Source: /cvs/master/quadrics/epmod/railhints.c,v $*/
64779 +
64780 +#include <qsnet/kernel.h>
64781 +
64782 +#include <elan/kcomm.h>
64783 +#include <elan/epsvc.h>
64784 +#include <elan/epcomms.h>
64785 +
64786 +#include "debug.h"
64787 +
64788 +int
64789 +ep_pickRail(EP_RAILMASK railmask)
64790 +{
64791 +    static volatile int lastGlobal;
64792 +    int i, rnum, last = lastGlobal;
64793 +
64794 +    /* Pick a single rail out of the railmask */
64795 +    for (i = 0; i < EP_MAX_RAILS; i++)
64796 +       if (railmask & (1 << ((last + i) % EP_MAX_RAILS)))
64797 +           break;
64798 +
64799 +    if (i == EP_MAX_RAILS)
64800 +       return (-1);
64801 +
64802 +    rnum = (last + i) % EP_MAX_RAILS;
64803 +
64804 +    lastGlobal = (rnum + 1) % EP_MAX_RAILS;
64805 +
64806 +    ASSERT (railmask & (1 << rnum));
64807 +
64808 +    return (rnum);
64809 +}
64810 +
64811 +int
64812 +ep_xmtr_bcastrail (EP_XMTR *xmtr, EP_RAILMASK allowedRails)
64813 +{
64814 +    /* Retrun a single rail out of allowed mask with the best connectivity for broadcast. */
64815 +    return (ep_pickRail (allowedRails & xmtr->RailMask));
64816 +}
64817 +
64818 +int
64819 +ep_xmtr_prefrail (EP_XMTR *xmtr, EP_RAILMASK allowedRails, unsigned nodeId)
64820 +{
64821 +    EP_NODE *node = &xmtr->Subsys->Subsys.Sys->Nodes[nodeId];
64822 +
64823 +    EPRINTF5 (DBG_XMTR, "ep_xmtr_prefrail: xmtr=%p allowedRails=%x nodeId=%d xmtr->RailMaks=%x Connected=%x\n", 
64824 +             xmtr, allowedRails, nodeId, xmtr->RailMask, node->ConnectedRails);
64825 +
64826 +    /* Return a single rail which is currently connected to nodeId (limited to rails
64827 +     * in allowedmask) - if more than one rail is possible, then round-robin between 
64828 +     * them */
64829 +    return (ep_pickRail (allowedRails & xmtr->RailMask & node->ConnectedRails));
64830 +}
64831 +
64832 +EP_RAILMASK
64833 +ep_xmtr_availrails (EP_XMTR *xmtr)
64834 +{
64835 +    /* Return which rails can be used to transmit one. */
64836 +
64837 +    return (xmtr->RailMask);
64838 +}
64839 +
64840 +EP_RAILMASK
64841 +ep_xmtr_noderails (EP_XMTR *xmtr, unsigned nodeId)
64842 +{
64843 +    EP_NODE *node = &xmtr->Subsys->Subsys.Sys->Nodes[nodeId];
64844 +
64845 +    /* Return which rails can be used to transmit to this node. */
64846 +
64847 +    return (xmtr->RailMask & node->ConnectedRails);
64848 +}
64849 +
64850 +int
64851 +ep_rcvr_prefrail (EP_RCVR *rcvr, EP_RAILMASK allowedRails)
64852 +{
64853 +    /* Return the "best" rail for queueing a receive buffer out on - this will be a
64854 +     * rail with ThreadWaiting set or the rail with the least descriptors queued
64855 +     * on it. */
64856 +    
64857 +    return (ep_pickRail (allowedRails & rcvr->RailMask));
64858 +}
64859 +
64860 +EP_RAILMASK
64861 +ep_rcvr_availrails (EP_RCVR *rcvr)
64862 +{
64863 +    /* Return which rails can be used to queue receive buffers. */
64864 +    return (rcvr->RailMask);
64865 +}
64866 +
64867 +/*
64868 + * Local variables:
64869 + * c-file-style: "stroustrup"
64870 + * End:
64871 + */
64872 Index: linux-2.4.21/drivers/net/qsnet/ep/rmap.c
64873 ===================================================================
64874 --- linux-2.4.21.orig/drivers/net/qsnet/ep/rmap.c       2004-02-23 16:02:56.000000000 -0500
64875 +++ linux-2.4.21/drivers/net/qsnet/ep/rmap.c    2005-06-01 23:12:54.681426640 -0400
64876 @@ -0,0 +1,365 @@
64877 +/*
64878 + *    Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd.
64879 + *    Copyright (c) 2002-2003 by Quadrics Ltd.
64880 + *
64881 + *    For licensing information please see the supplied COPYING file
64882 + *
64883 + */
64884 +
64885 +#ident "@(#)$Id: rmap.c,v 1.15 2004/05/19 10:24:38 david Exp $"
64886 +/*      $Source: /cvs/master/quadrics/epmod/rmap.c,v $ */
64887 +
64888 +#include <qsnet/kernel.h>
64889 +#include <elan/rmap.h>
64890 +
64891 +#include "debug.h"
64892 +
64893 +void
64894 +ep_display_rmap (EP_RMAP *mp)
64895 +{
64896 +    EP_RMAP_ENTRY *bp;
64897 +    unsigned long flags;
64898 +    
64899 +    spin_lock_irqsave (&mp->m_lock, flags);
64900 +    ep_debugf (DBG_DEBUG, "map: %s size %d free %d\n", mp->m_name, mp->m_size, mp->m_free);
64901 +    for (bp = &mp->m_map[0]; bp->m_size; bp++)
64902 +       ep_debugf (DBG_DEBUG, "   [%lx - %lx]\n", bp->m_addr, bp->m_addr+bp->m_size-1);
64903 +    spin_unlock_irqrestore (&mp->m_lock, flags);
64904 +}
64905 +
64906 +void
64907 +ep_mapinit (EP_RMAP *mp, char *name, u_int mapsize)
64908 +{
64909 +    spin_lock_init (&mp->m_lock);
64910 +    kcondvar_init (&mp->m_wait);
64911 +    
64912 +    /* The final segment in the array has size 0 and acts as a delimiter
64913 +     * we insure that we never use segments past the end of the array by
64914 +     * maintaining a free segment count in m_free.  When excess segments
64915 +     * occur we discard some resources */
64916 +    
64917 +    mp->m_size = mapsize;
64918 +    mp->m_free = mapsize;
64919 +    mp->m_name = name;
64920 +    
64921 +    bzero (mp->m_map, sizeof (EP_RMAP_ENTRY) * (mapsize+1));
64922 +}
64923 +
64924 +EP_RMAP *
64925 +ep_rmallocmap (size_t mapsize, char *name, int cansleep)
64926 +{
64927 +    EP_RMAP *mp;
64928 +
64929 +    KMEM_ZALLOC (mp, EP_RMAP *, sizeof (EP_RMAP) + mapsize*sizeof (EP_RMAP_ENTRY), cansleep);
64930 +
64931 +    if (mp != NULL)
64932 +       ep_mapinit (mp, name, mapsize);
64933 +
64934 +    return (mp);
64935 +}
64936 +
64937 +void
64938 +ep_rmfreemap (EP_RMAP *mp)
64939 +{
64940 +    spin_lock_destroy (&mp->m_lock);
64941 +    kcondvar_destroy (&mp->m_wait);
64942 +    
64943 +    KMEM_FREE (mp, sizeof (EP_RMAP) + mp->m_size * sizeof (EP_RMAP_ENTRY));
64944 +}
64945 +
64946 +static u_long
64947 +ep_rmalloc_locked (EP_RMAP *mp, size_t size)
64948 +{
64949 +    EP_RMAP_ENTRY *bp;
64950 +    u_long            addr;
64951 +    
64952 +    ASSERT (size > 0);
64953 +    ASSERT (SPINLOCK_HELD (&mp->m_lock));
64954 +
64955 +    for (bp = &mp->m_map[0]; bp->m_size; bp++)
64956 +    {
64957 +       if (bp->m_size >= size)
64958 +       {
64959 +           addr = bp->m_addr;
64960 +           bp->m_addr += size;
64961 +           
64962 +           if ((bp->m_size -= size) == 0)
64963 +           {
64964 +               /* taken all of this slot - so shift the map down */
64965 +               do {
64966 +                   bp++;
64967 +                   (bp-1)->m_addr = bp->m_addr;
64968 +               } while (((bp-1)->m_size = bp->m_size) != 0);
64969 +
64970 +               mp->m_free++;
64971 +           }
64972 +           return (addr);
64973 +       }
64974 +    }
64975 +
64976 +    return (0);
64977 +}
64978 +
64979 +u_long
64980 +ep_rmalloc (EP_RMAP *mp, size_t size, int cansleep)
64981 +{
64982 +    unsigned long addr;
64983 +    unsigned long flags;
64984 +
64985 +    spin_lock_irqsave (&mp->m_lock, flags);
64986 +    while ((addr = ep_rmalloc_locked (mp, size)) == 0 && cansleep)
64987 +    {
64988 +       mp->m_want = 1;
64989 +       kcondvar_wait (&mp->m_wait, &mp->m_lock, &flags);
64990 +    }
64991 +
64992 +    spin_unlock_irqrestore (&mp->m_lock, flags);
64993 +
64994 +    return (addr);
64995 +}
64996 +
64997 +
64998 +
64999 +u_long
65000 +ep_rmalloc_constrained (EP_RMAP *mp, size_t size, u_long alo, u_long ahi, u_long align, int cansleep)
65001 +{
65002 +    EP_RMAP_ENTRY *bp, *bp2, *lbp;
65003 +    unsigned long addr=0;
65004 +    size_t        delta;
65005 +    int           ok;
65006 +    unsigned long flags;
65007 +
65008 +    spin_lock_irqsave (&mp->m_lock, flags);
65009 + again:
65010 +    for (bp = &mp->m_map[0]; bp->m_size; bp++)
65011 +    {
65012 +       delta = 0;
65013 +       
65014 +       if (alo < bp->m_addr)
65015 +       {
65016 +           addr = bp->m_addr;
65017 +           
65018 +           if (addr & (align-1))
65019 +               addr = (addr + (align-1)) & ~(align-1);
65020 +           
65021 +           delta = addr - bp->m_addr;
65022 +           
65023 +           if (ahi >= bp->m_addr + bp->m_size)
65024 +               ok = (bp->m_size >= (size + delta));
65025 +           else
65026 +               ok = ((bp->m_addr + size + delta) <= ahi);
65027 +       }
65028 +       else
65029 +       {
65030 +           addr = alo;
65031 +           if (addr & (align-1))
65032 +               addr = (addr + (align-1)) & ~(align-1);
65033 +           delta = addr - bp->m_addr;
65034 +           
65035 +           if (ahi >= bp->m_addr + bp->m_size)
65036 +               ok = ((alo + size + delta) <= (bp->m_addr + bp->m_size));
65037 +           else
65038 +               ok = ((alo + size + delta) <= ahi);
65039 +       }
65040 +
65041 +       if (ok)
65042 +           break;
65043 +    }  
65044 +    
65045 +    if (bp->m_size == 0)
65046 +    {
65047 +       if (cansleep)
65048 +       {
65049 +           mp->m_want = 1;
65050 +           kcondvar_wait (&mp->m_wait, &mp->m_lock, &flags);
65051 +           goto again;
65052 +       }
65053 +       spin_unlock_irqrestore (&mp->m_lock, flags);
65054 +       return (0);
65055 +    }
65056 +
65057 +    /* found an approriate map entry - so take the bit out which we want */
65058 +    if (bp->m_addr == addr) 
65059 +    {
65060 +       if (bp->m_size == size) 
65061 +       {
65062 +           /* allocate entire segment and compress map */
65063 +           bp2 = bp;
65064 +           while (bp2->m_size) 
65065 +           {
65066 +               bp2++;
65067 +               (bp2-1)->m_addr = bp2->m_addr;
65068 +               (bp2-1)->m_size = bp2->m_size;
65069 +           }
65070 +           mp->m_free++;
65071 +       }
65072 +       else 
65073 +       {
65074 +           /* take from start of segment */
65075 +           bp->m_addr += size;
65076 +           bp->m_size -= size;
65077 +       }
65078 +    }
65079 +    else 
65080 +    {
65081 +       if (bp->m_addr + bp->m_size == addr + size) 
65082 +       {
65083 +           /* take from end of segment */
65084 +           bp->m_size -= size;
65085 +       }
65086 +       else 
65087 +       {
65088 +           /* split the segment loosing the last entry if there's no space */
65089 +           if (mp->m_free == 0) 
65090 +           {
65091 +               /* find last map entry */
65092 +               for (lbp = bp; lbp->m_size != 0; lbp++)
65093 +                   ;
65094 +               lbp--;
65095 +               
65096 +               if (lbp->m_size > (lbp-1)->m_size)
65097 +                   lbp--;
65098 +               
65099 +               printk ("%s: lost resource map entry [%lx, %lx]\n",
65100 +                       mp->m_name, lbp->m_addr, lbp->m_addr + lbp->m_size);
65101 +               
65102 +               *lbp = *(lbp+1);
65103 +               (lbp+1)->m_size = 0;
65104 +               
65105 +               mp->m_free++;
65106 +           }
65107 +           
65108 +           for (bp2 = bp; bp2->m_size != 0; bp2++)
65109 +               continue;
65110 +           
65111 +           for (bp2--; bp2 > bp; bp2--)
65112 +           {
65113 +               (bp2+1)->m_addr = bp2->m_addr;
65114 +               (bp2+1)->m_size = bp2->m_size;
65115 +           }
65116 +
65117 +           mp->m_free--;
65118 +           
65119 +           (bp+1)->m_addr = addr + size;
65120 +           (bp+1)->m_size = bp->m_addr + bp->m_size - (addr + size);
65121 +           bp->m_size = addr - bp->m_addr;
65122 +       }
65123 +    }
65124 +
65125 +    spin_unlock_irqrestore (&mp->m_lock, flags);
65126 +    return (addr);
65127 +}
65128 +
65129 +void
65130 +ep_rmfree (EP_RMAP *mp, size_t size, u_long addr)
65131 +{
65132 +    EP_RMAP_ENTRY *bp;
65133 +    unsigned long t;
65134 +    unsigned long flags;
65135 +
65136 +    spin_lock_irqsave (&mp->m_lock, flags);
65137 +
65138 +    ASSERT (addr != 0 && size > 0);
65139 +       
65140 +again:
65141 +    /* find the piece of the map which starts after the returned space
65142 +     * or the end of the map */
65143 +    for (bp = &mp->m_map[0]; bp->m_addr <= addr && bp->m_size != 0; bp++)
65144 +       ;
65145 +
65146 +    /* bp points to the piece to the right of where we want to go */
65147 +    
65148 +    if (bp > &mp->m_map[0] && (bp-1)->m_addr + (bp-1)->m_size >= addr) 
65149 +    {
65150 +       /* merge with piece on the left */
65151 +       
65152 +       ASSERT ((bp-1)->m_addr + (bp-1)->m_size <= addr);
65153 +       
65154 +       (bp-1)->m_size += size;
65155 +       
65156 +       ASSERT (bp->m_size == 0 || addr+size <= bp->m_addr);
65157 +       
65158 +       if (bp->m_size && (addr + size) == bp->m_addr)
65159 +       {
65160 +           /* merge witht he piece on the right by 
65161 +            * growing the piece on the left and shifting
65162 +            * the map down */
65163 +           
65164 +           ASSERT ((addr + size) <= bp->m_addr);
65165 +           
65166 +           (bp-1)->m_size += bp->m_size;
65167 +           while (bp->m_size) 
65168 +           {
65169 +               bp++;
65170 +               (bp-1)->m_addr = bp->m_addr;
65171 +               (bp-1)->m_size = bp->m_size;
65172 +           }
65173 +           
65174 +           mp->m_free++;
65175 +       }
65176 +    }
65177 +    else if (addr + size >= bp->m_addr && bp->m_size)
65178 +    {
65179 +       /* merge with piece to the right */
65180 +       
65181 +       ASSERT ((addr + size) <= bp->m_addr);
65182 +       
65183 +       bp->m_addr -= size;
65184 +       bp->m_size += size;
65185 +    }
65186 +    else
65187 +    {
65188 +       /* doesn't join with left or right - check for map
65189 +          overflow and discard the smallest of the last or
65190 +          next to last entries */
65191 +
65192 +       if (mp->m_free == 0)
65193 +       {
65194 +           EP_RMAP_ENTRY *lbp;
65195 +           
65196 +           /* find last map entry */
65197 +           for (lbp = bp; lbp->m_size != 0; lbp++)
65198 +               ;
65199 +           lbp--;
65200 +           
65201 +           if (lbp->m_size > (lbp-1)->m_size)
65202 +               lbp--;
65203 +           
65204 +           printk ("%s: lost resource map entry [%lx, %lx]\n", 
65205 +                   mp->m_name, lbp->m_addr, lbp->m_addr + lbp->m_size);
65206 +           
65207 +           *lbp = *(lbp+1);
65208 +           (lbp+1)->m_size = 0;
65209 +
65210 +           mp->m_free++;
65211 +           goto again;
65212 +       }
65213 +
65214 +       /* make a new entry and push the remaining ones up */
65215 +       do {
65216 +           t = bp->m_addr;
65217 +           bp->m_addr = addr;
65218 +           addr = t;
65219 +           t = bp->m_size;
65220 +           bp->m_size = size;
65221 +           bp++;
65222 +       } while ((size = t) != 0);
65223 +
65224 +       mp->m_free--;
65225 +    }
65226 +    
65227 +    /* if anyone blocked on rmalloc failure, wake 'em up */
65228 +    if (mp->m_want)
65229 +    {
65230 +       mp->m_want = 0;
65231 +       kcondvar_wakeupall (&mp->m_wait, &mp->m_lock);
65232 +    }
65233 +
65234 +    spin_unlock_irqrestore (&mp->m_lock, flags);
65235 +}
65236 +
65237 +/*
65238 + * Local variables:
65239 + * c-file-style: "stroustrup"
65240 + * End:
65241 + */
65242 Index: linux-2.4.21/drivers/net/qsnet/ep/spinlock_elan3_thread.c
65243 ===================================================================
65244 --- linux-2.4.21.orig/drivers/net/qsnet/ep/spinlock_elan3_thread.c      2004-02-23 16:02:56.000000000 -0500
65245 +++ linux-2.4.21/drivers/net/qsnet/ep/spinlock_elan3_thread.c   2005-06-01 23:12:54.681426640 -0400
65246 @@ -0,0 +1,44 @@
65247 +/*
65248 + *    Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd.
65249 + *    Copyright (c) 2002-2003 by Quadrics Ltd.
65250 + *
65251 + *    For licensing information please see the supplied COPYING file
65252 + *
65253 + */
65254 +
65255 +#ident "@(#)$Id: spinlock_elan3_thread.c,v 1.9 2003/10/07 13:22:38 david Exp $"
65256 +/*      $Source: /cvs/master/quadrics/epmod/spinlock_elan3_thread.c,v $ */
65257 +
65258 +#include <qsnet/types.h>
65259 +
65260 +#include <elan3/e3types.h>
65261 +#include <elan3/events.h>
65262 +#include <elan3/elanregs.h>
65263 +#include <elan3/intrinsics.h>
65264 +
65265 +#include <elan/nmh.h>
65266 +#include <elan/kcomm.h>
65267 +#include <elan/epcomms.h>
65268 +
65269 +#include "kcomm_elan3.h"
65270 +#include "epcomms_elan3.h"
65271 +
65272 +void
65273 +ep3_spinblock (EP3_SPINLOCK_ELAN *sle, EP3_SPINLOCK_MAIN *sl)
65274 +{
65275 +    do {
65276 +       sl->sl_seq = sle->sl_seq;                       /* Release my lock */
65277 +       
65278 +       while (sle->sl_lock)                            /* Wait until the main */
65279 +           c_break();                                  /* releases the lock */
65280 +       
65281 +       sle->sl_seq++;                                  /* and try and relock */
65282 +    } while (sle->sl_lock);
65283 +}
65284 +
65285 +
65286 +/*
65287 + * Local variables:
65288 + * c-file-style: "stroustrup"
65289 + * End:
65290 + */
65291 Index: linux-2.4.21/drivers/net/qsnet/ep/statemap.c
65292 ===================================================================
65293 --- linux-2.4.21.orig/drivers/net/qsnet/ep/statemap.c   2004-02-23 16:02:56.000000000 -0500
65294 +++ linux-2.4.21/drivers/net/qsnet/ep/statemap.c        2005-06-01 23:12:54.682426488 -0400
65295 @@ -0,0 +1,385 @@
65296 +/*
65297 + *    Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd.
65298 + *    Copyright (c) 2002-2003 by Quadrics Ltd.
65299 + *
65300 + *    For licensing information please see the supplied COPYING file
65301 + *
65302 + */
65303 +
65304 +#ident "@(#)$Id: statemap.c,v 1.11.8.1 2004/11/18 12:05:00 david Exp $"
65305 +/*      $Source: /cvs/master/quadrics/epmod/statemap.c,v $ */
65306 +
65307 +#include <qsnet/kernel.h>
65308 +#include <elan/statemap.h>
65309 +
65310 +/******************************** global state bitmap stuff **********************************/
65311 +static int
65312 +statemap_setmapbit (bitmap_t *map, int offset, int bit)
65313 +{
65314 +   bitmap_t *e    = &map[offset >> BT_ULSHIFT];
65315 +   bitmap_t  mask = ((bitmap_t)1) << (offset & BT_ULMASK);
65316 +   int       rc = ((*e) & mask) != 0;
65317 +   
65318 +   if (bit)
65319 +   {
65320 +      *e |= mask;
65321 +      return (!rc);
65322 +   }
65323 +
65324 +   *e &= ~mask;
65325 +   return (rc);
65326 +}
65327 +
65328 +static int
65329 +statemap_firstsegbit (bitmap_t seg)
65330 +{
65331 +   int            bit = 0;
65332 +   
65333 +   if (seg == 0)
65334 +      return (-1);
65335 +
65336 +#if (BT_ULSHIFT == 6)
65337 +   if ((seg & 0xffffffffL) == 0)
65338 +   {
65339 +      seg >>= 32;
65340 +      bit += 32;
65341 +   }
65342 +#elif (BT_ULSHIFT != 5)
65343 +# error "Unexpected value of BT_ULSHIFT"
65344 +#endif
65345 +
65346 +   if ((seg & 0xffff) == 0)
65347 +   {
65348 +      seg >>= 16;
65349 +      bit += 16;
65350 +   }
65351 +      
65352 +   if ((seg & 0xff) == 0)
65353 +   {
65354 +      seg >>= 8;
65355 +      bit += 8;
65356 +   }
65357 +      
65358 +   if ((seg & 0xf) == 0)
65359 +   {
65360 +      seg >>= 4;
65361 +      bit += 4;
65362 +   }
65363 +      
65364 +   if ((seg & 0x3) == 0)
65365 +   {
65366 +      seg >>= 2;
65367 +      bit += 2;
65368 +   }
65369 +
65370 +   return (((seg & 0x1) == 0) ? bit + 1 : bit);
65371 +}
65372 +
65373 +bitmap_t
65374 +statemap_getseg (statemap_t *map, unsigned int offset)
65375 +{
65376 +   ASSERT (offset < map->size);
65377 +   ASSERT ((offset & BT_ULMASK) == 0);
65378 +
65379 +   return (map->bitmap[offset >> BT_ULSHIFT]);
65380 +}
65381 +
65382 +void
65383 +statemap_setseg (statemap_t *map, unsigned int offset, bitmap_t seg)
65384 +{
65385 +   ASSERT (offset < map->size);
65386 +   ASSERT ((offset & BT_ULMASK) == 0);
65387 +
65388 +   offset >>= BT_ULSHIFT;
65389 +   if (map->bitmap[offset] == seg)
65390 +      return;
65391 +
65392 +   map->bitmap[offset] = seg;
65393 +
65394 +   if (statemap_setmapbit (map->changemap2, offset,       1) &&
65395 +       statemap_setmapbit (map->changemap1, offset >>= BT_ULSHIFT, 1))
65396 +      statemap_setmapbit (map->changemap0, offset >>= BT_ULSHIFT, 1);
65397 +}
65398 +
65399 +bitmap_t
65400 +statemap_getbits (statemap_t *map, unsigned int offset, int nbits)
65401 +{
65402 +   int      index = offset >> BT_ULSHIFT;
65403 +   bitmap_t mask  = (nbits == BT_NBIPUL) ? (bitmap_t) -1 : (((bitmap_t)1) << nbits) - 1;
65404 +   
65405 +   ASSERT (nbits <= BT_NBIPUL);
65406 +   ASSERT (offset + nbits <= map->size);
65407 +
65408 +   offset &= BT_ULMASK;
65409 +   if (offset + nbits <= BT_NBIPUL)
65410 +      return ((map->bitmap[index] >> offset) & mask);
65411 +   
65412 +   return (((map->bitmap[index] >> offset) |
65413 +           (map->bitmap[index + 1] << (BT_NBIPUL - offset))) & mask);
65414 +}
65415 +
65416 +void
65417 +statemap_setbits (statemap_t *map, unsigned int offset, bitmap_t bits, int nbits)
65418 +{
65419 +   int      index = offset >> BT_ULSHIFT;
65420 +   bitmap_t mask;
65421 +   bitmap_t seg;
65422 +   bitmap_t newseg;
65423 +
65424 +   ASSERT (nbits <= BT_NBIPUL);
65425 +   ASSERT (offset + nbits <= map->size);
65426 +
65427 +   offset &= BT_ULMASK;
65428 +   if (offset + nbits <= BT_NBIPUL)
65429 +   {
65430 +      mask = ((nbits == BT_NBIPUL) ? -1 : ((((bitmap_t)1) << nbits) - 1)) << offset;
65431 +      seg = map->bitmap[index];
65432 +      newseg = ((bits << offset) & mask) | (seg & ~mask);
65433 +      
65434 +      if (seg == newseg)
65435 +        return;
65436 +   
65437 +      map->bitmap[index] = newseg;
65438 +      
65439 +      if (statemap_setmapbit (map->changemap2, index,       1) &&
65440 +         statemap_setmapbit (map->changemap1, index >>= BT_ULSHIFT, 1))
65441 +        statemap_setmapbit (map->changemap0, index >>= BT_ULSHIFT, 1);
65442 +      return;
65443 +   }
65444 +   
65445 +   mask = ((bitmap_t)-1) << offset;
65446 +   seg = map->bitmap[index];
65447 +   newseg = ((bits << offset) & mask) | (seg & ~mask);
65448 +
65449 +   if (seg != newseg)
65450 +   {
65451 +      map->bitmap[index] = newseg;
65452 +      
65453 +      if (statemap_setmapbit (map->changemap2, index,       1) &&
65454 +         statemap_setmapbit (map->changemap1, index >> BT_ULSHIFT, 1))
65455 +        statemap_setmapbit (map->changemap0, index >> (2 * BT_ULSHIFT), 1);
65456 +   }
65457 +   
65458 +   index++;
65459 +   offset = BT_NBIPUL - offset;
65460 +   mask = (((bitmap_t)1) << (nbits - offset)) - 1;
65461 +   seg = map->bitmap[index];
65462 +   newseg = ((bits >> offset) & mask) | (seg & ~mask);
65463 +   
65464 +   if (seg == newseg)
65465 +      return;
65466 +   
65467 +   map->bitmap[index] = newseg;
65468 +   
65469 +   if (statemap_setmapbit (map->changemap2, index,       1) &&
65470 +       statemap_setmapbit (map->changemap1, index >>= BT_ULSHIFT, 1))
65471 +      statemap_setmapbit (map->changemap0, index >>= BT_ULSHIFT, 1);
65472 +}
65473 +
65474 +void
65475 +statemap_zero (statemap_t *dst)
65476 +{
65477 +   int       size       = dst->size;
65478 +   int       offset     = 0;
65479 +   bitmap_t *changemap0 = dst->changemap0;
65480 +   bitmap_t *changemap1 = dst->changemap1;
65481 +   bitmap_t *changemap2 = dst->changemap2;
65482 +   bitmap_t *dstmap     = dst->bitmap;
65483 +   bitmap_t  bit0;
65484 +   bitmap_t  bit1;
65485 +   bitmap_t  bit2;
65486 +
65487 +   for (bit0 = 1; offset < size; bit0 <<= 1, changemap1++)
65488 +   {
65489 +      for (bit1 = 1; bit1 != 0 && offset < size; bit1 <<= 1, changemap2++)
65490 +      {
65491 +        for (bit2 = 1; bit2 != 0 && offset < size; bit2 <<= 1, dstmap++, offset += BT_NBIPUL)
65492 +        {
65493 +            *dstmap = 0;
65494 +            *changemap2 |= bit2;
65495 +        }
65496 +        *changemap1 |= bit1;
65497 +      }
65498 +      *changemap0 |= bit0;
65499 +   }
65500 +}
65501 +   
65502 +void
65503 +statemap_setmap (statemap_t *dst, statemap_t *src)
65504 +{
65505 +   int       size       = dst->size;
65506 +   int       offset     = 0;
65507 +   bitmap_t *changemap0 = dst->changemap0;
65508 +   bitmap_t *changemap1 = dst->changemap1;
65509 +   bitmap_t *changemap2 = dst->changemap2;
65510 +   bitmap_t *dstmap     = dst->bitmap;
65511 +   bitmap_t *srcmap     = src->bitmap;
65512 +   bitmap_t  bit0;
65513 +   bitmap_t  bit1;
65514 +   bitmap_t  bit2;
65515 +
65516 +   ASSERT (src->size == size);
65517 +   
65518 +   for (bit0 = 1; offset < size; bit0 <<= 1, changemap1++)
65519 +   {
65520 +      for (bit1 = 1; bit1 != 0 && offset < size; bit1 <<= 1, changemap2++)
65521 +      {
65522 +        for (bit2 = 1; bit2 != 0 && offset < size; bit2 <<= 1, dstmap++, srcmap++, offset += BT_NBIPUL)
65523 +           if (*dstmap != *srcmap)
65524 +           {
65525 +              *dstmap = *srcmap;
65526 +              *changemap2 |= bit2;
65527 +           }
65528 +        if (*changemap2 != 0)
65529 +           *changemap1 |= bit1;
65530 +      }
65531 +      if (*changemap1 != 0)
65532 +        *changemap0 |= bit0;
65533 +   }
65534 +}
65535 +
65536 +void
65537 +statemap_ormap (statemap_t *dst, statemap_t *src)
65538 +{
65539 +   int       size       = dst->size;
65540 +   int       offset     = 0;
65541 +   bitmap_t *changemap0 = dst->changemap0;
65542 +   bitmap_t *changemap1 = dst->changemap1;
65543 +   bitmap_t *changemap2 = dst->changemap2;
65544 +   bitmap_t *dstmap     = dst->bitmap;
65545 +   bitmap_t *srcmap     = src->bitmap;
65546 +   bitmap_t  bit0;
65547 +   bitmap_t  bit1;
65548 +   bitmap_t  bit2;
65549 +   bitmap_t  seg;
65550 +
65551 +   ASSERT (src->size == size);
65552 +   
65553 +   for (bit0 = 1; offset < size; bit0 <<= 1, changemap1++)
65554 +   {
65555 +      for (bit1 = 1; bit1 != 0 && offset < size; bit1 <<= 1, changemap2++)
65556 +      {
65557 +        for (bit2 = 1; bit2 != 0 && offset < size; bit2 <<= 1, dstmap++, srcmap++, offset += BT_NBIPUL)
65558 +        {
65559 +           seg = *dstmap | *srcmap;
65560 +           if (*dstmap != seg)
65561 +           {
65562 +              *dstmap = seg;
65563 +              *changemap2 |= bit2;
65564 +           }
65565 +        }
65566 +        if (*changemap2 != 0)
65567 +           *changemap1 |= bit1;
65568 +      }
65569 +      if (*changemap1 != 0)
65570 +        *changemap0 |= bit0;
65571 +   }
65572 +}
65573 +
65574 +int
65575 +statemap_findchange (statemap_t *map, bitmap_t *newseg, int clearchange)
65576 +{
65577 +   int          bit0;
65578 +   bitmap_t    *cm1;
65579 +   int          bit1;
65580 +   bitmap_t    *cm2;
65581 +   int          bit2;
65582 +   unsigned int offset;
65583 +
65584 +   bit0 = statemap_firstsegbit (*(map->changemap0));
65585 +   if (bit0 < 0)
65586 +      return (-1);
65587 +
65588 +   offset = bit0;
65589 +   cm1 = map->changemap1 + offset;
65590 +   bit1 = statemap_firstsegbit (*cm1);
65591 +   ASSERT (bit1 >= 0);
65592 +
65593 +   offset = (offset << BT_ULSHIFT) + bit1;
65594 +   cm2 = map->changemap2 + offset;
65595 +   bit2 = statemap_firstsegbit (*cm2);
65596 +   ASSERT (bit2 >= 0);
65597 +   
65598 +   offset = (offset << BT_ULSHIFT) + bit2;
65599 +   *newseg = map->bitmap[offset];
65600 +
65601 +   if (clearchange &&
65602 +       (*cm2 &= ~(((bitmap_t)1) << bit2)) == 0 &&
65603 +       (*cm1 &= ~(((bitmap_t)1) << bit1)) == 0)
65604 +      map->changemap0[0] &= ~(((bitmap_t)1) << bit0);
65605 +
65606 +   return (offset << BT_ULSHIFT);
65607 +}
65608 +
65609 +int
65610 +statemap_changed (statemap_t *map)
65611 +{
65612 +   return ((*(map->changemap0) != 0));
65613 +}
65614 +
65615 +void
65616 +statemap_reset (statemap_t *map)
65617 +{
65618 +   bzero (map->changemap0, map->changemap_nob + map->bitmap_nob);
65619 +}
65620 +
65621 +void
65622 +statemap_copy (statemap_t *dst, statemap_t *src)
65623 +{
65624 +   ASSERT (dst->size == src->size);
65625 +   bcopy (src->changemap0, dst->changemap0, src->changemap_nob + src->bitmap_nob);
65626 +}
65627 +
65628 +void
65629 +statemap_clearchanges (statemap_t *map)
65630 +{
65631 +   if (statemap_changed (map))
65632 +      bzero (map->changemap0, map->changemap_nob);
65633 +}
65634 +
65635 +bitmap_t *
65636 +statemap_tobitmap (statemap_t *map)
65637 +{
65638 +    return (map->bitmap);
65639 +}
65640 +
65641 +statemap_t *
65642 +statemap_create (int size)
65643 +{
65644 +   int   struct_entries     = (sizeof (statemap_t) * 8 + (BT_NBIPUL-1)) >> BT_ULSHIFT;
65645 +   int   bitmap_entries     = (size + (BT_NBIPUL-1)) >> BT_ULSHIFT;
65646 +   int   changemap2_entries = (bitmap_entries + (BT_NBIPUL-1)) >> BT_ULSHIFT;
65647 +   int   changemap1_entries = (changemap2_entries + (BT_NBIPUL-1)) >> BT_ULSHIFT;
65648 +   int   changemap0_entries = (changemap1_entries + (BT_NBIPUL-1)) >> BT_ULSHIFT;
65649 +   int   changemap_entries  = changemap0_entries + changemap1_entries + changemap2_entries;
65650 +   int   nob                = (struct_entries + bitmap_entries + changemap_entries) * sizeof (bitmap_t);
65651 +   statemap_t *map;
65652 +
65653 +   ASSERT ((1 << BT_ULSHIFT) == BT_NBIPUL);
65654 +   ASSERT (changemap0_entries == 1);
65655 +
65656 +   KMEM_ZALLOC (map, statemap_t *, nob, 1);
65657 +
65658 +   map->size = size;
65659 +   map->nob  = nob;
65660 +   map->changemap_nob = changemap_entries * sizeof (bitmap_t);
65661 +   map->bitmap_nob = bitmap_entries * sizeof (bitmap_t);
65662 +   map->changemap0 = ((bitmap_t *)map) + struct_entries;
65663 +   map->changemap1 = map->changemap0 + changemap0_entries;
65664 +   map->changemap2 = map->changemap1 + changemap1_entries;
65665 +   map->bitmap     = map->changemap2 + changemap2_entries;
65666 +
65667 +   return (map);
65668 +}
65669 +
65670 +void
65671 +statemap_destroy (statemap_t *map)
65672 +{
65673 +   KMEM_FREE (map, map->nob);
65674 +}
65675 +
65676 +/*
65677 + * Local variables:
65678 + * c-file-style: "stroustrup"
65679 + * End:
65680 + */
65681 Index: linux-2.4.21/drivers/net/qsnet/ep/statusmon.h
65682 ===================================================================
65683 --- linux-2.4.21.orig/drivers/net/qsnet/ep/statusmon.h  2004-02-23 16:02:56.000000000 -0500
65684 +++ linux-2.4.21/drivers/net/qsnet/ep/statusmon.h       2005-06-01 23:12:54.682426488 -0400
65685 @@ -0,0 +1,44 @@
65686 +/*
65687 + *    Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd.
65688 + *    Copyright (c) 2002-2003 by Quadrics Ltd.
65689 + *
65690 + *    For licensing information please see the supplied COPYING file
65691 + *
65692 + */
65693 +
65694 +#ident "@(#)$Id: statusmon.h,v 1.6 2003/10/07 13:22:38 david Exp $"
65695 +/*      $Source: /cvs/master/quadrics/epmod/statusmon.h,v $*/
65696 +
65697 +#ifndef __ELAN3_STATUSMON_H
65698 +#define __ELAN3_STATUSMON_H
65699 +
65700 +typedef struct statusmon_node
65701 +{
65702 +    u_int      NodeId;
65703 +    u_int      State;
65704 +} STATUSMON_SGMT;
65705 +
65706 +typedef struct statusmon_level
65707 +{
65708 +    unsigned      Width;
65709 +    STATUSMON_SGMT Nodes[CM_SGMTS_PER_LEVEL];
65710 +} STATUSMON_LEVEL;
65711 +
65712 +typedef struct statusmon_msg
65713 +{
65714 +    unsigned       Type;
65715 +    unsigned       NodeId;
65716 +    unsigned       NumLevels;
65717 +    unsigned       TopLevel;
65718 +    unsigned        Role;
65719 +    STATUSMON_LEVEL Levels[CM_MAX_LEVELS];
65720 +} STATUSMON_MSG;
65721 +
65722 +
65723 +#endif /* __ELAN3_STATUSMON_H */
65724 +
65725 +/*
65726 + * Local variables:
65727 + * c-file-style: "stroustrup"
65728 + * End:
65729 + */
65730 Index: linux-2.4.21/drivers/net/qsnet/ep/support.c
65731 ===================================================================
65732 --- linux-2.4.21.orig/drivers/net/qsnet/ep/support.c    2004-02-23 16:02:56.000000000 -0500
65733 +++ linux-2.4.21/drivers/net/qsnet/ep/support.c 2005-06-01 23:12:54.683426336 -0400
65734 @@ -0,0 +1,109 @@
65735 +/*
65736 + *    Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd.
65737 + *    Copyright (c) 2002-2003 by Quadrics Ltd.
65738 + *
65739 + *    For licensing information please see the supplied COPYING file
65740 + *
65741 + */
65742 +
65743 +#ident "@(#)$Id: support.c,v 1.37.8.1 2004/09/30 15:01:53 david Exp $"
65744 +/*      $Source: /cvs/master/quadrics/epmod/support.c,v $ */
65745 +
65746 +#include <qsnet/kernel.h>
65747 +#include <elan/kcomm.h>
65748 +
65749 +/****************************************************************************************/
65750 +/*
65751 + * Nodeset/flush callbacks.
65752 + */
65753 +int
65754 +ep_register_callback (EP_RAIL *rail, unsigned idx, void (*routine)(void *, statemap_t *), void *arg)
65755 +{
65756 +    EP_CALLBACK *cb;
65757 +    
65758 +    KMEM_ALLOC (cb, EP_CALLBACK *, sizeof (EP_CALLBACK), 1);
65759 +    
65760 +    cb->Routine = routine;
65761 +    cb->Arg     = arg;
65762 +
65763 +    kmutex_lock (&rail->CallbackLock);
65764 +    cb->Next = rail->CallbackList[idx];
65765 +    rail->CallbackList[idx] = cb;
65766 +    kmutex_unlock (&rail->CallbackLock);
65767 +    
65768 +    return (ESUCCESS);
65769 +}
65770 +
65771 +void
65772 +ep_remove_callback (EP_RAIL *rail, unsigned idx, void (*routine)(void *, statemap_t *), void *arg)
65773 +{
65774 +    EP_CALLBACK  *cb;
65775 +    EP_CALLBACK **predp;
65776 +
65777 +    kmutex_lock (&rail->CallbackLock);
65778 +    for (predp = &rail->CallbackList[idx]; (cb = *predp); predp = &cb->Next)
65779 +       if (cb->Routine == routine && cb->Arg == arg)
65780 +           break;
65781 +
65782 +    if (cb == NULL)
65783 +       panic ("ep_remove_member_callback");
65784 +    
65785 +    *predp = cb->Next;
65786 +    kmutex_unlock (&rail->CallbackLock);
65787 +    
65788 +    KMEM_FREE (cb, sizeof (EP_CALLBACK));
65789 +}
65790 +
65791 +void
65792 +ep_call_callbacks (EP_RAIL *rail, unsigned idx, statemap_t *map)
65793 +{
65794 +    EP_CALLBACK *cb;
65795 +
65796 +    kmutex_lock (&rail->CallbackLock);
65797 +
65798 +    rail->CallbackStep = idx;
65799 +
65800 +    for (cb = rail->CallbackList[idx]; cb; cb = cb->Next) {
65801 +       (cb->Routine) (cb->Arg, map);
65802 +    }
65803 +    kmutex_unlock (&rail->CallbackLock);
65804 +}
65805 +
65806 +unsigned int
65807 +ep_backoff (EP_BACKOFF *backoff, int type)
65808 +{
65809 +    static int bcount[EP_NUM_BACKOFF] = {1, 16, 32, 64, 128, 256, 512, 1024};
65810 +    
65811 +    if (backoff->type != type)
65812 +    {
65813 +       backoff->type  = type;
65814 +       backoff->indx  = 0;
65815 +       backoff->count = 0;
65816 +    }
65817 +
65818 +    if (++backoff->count > bcount[backoff->indx] && backoff->indx < (EP_NUM_BACKOFF-1))
65819 +    {
65820 +       backoff->indx++;
65821 +       backoff->count = 0;
65822 +    }
65823 +
65824 +    return (backoff->indx);
65825 +}
65826 +
65827 +/* Generic checksum algorithm */
65828 +uint16_t
65829 +CheckSum (char *msg, int nob)
65830 +{
65831 +    uint16_t sum = 0;
65832 +   
65833 +    while (nob-- > 0)
65834 +       sum = sum * 13 + *msg++;
65835 +
65836 +    return (sum);
65837 +}
65838 +
65839 +/*
65840 + * Local variables:
65841 + * c-file-style: "stroustrup"
65842 + * End:
65843 + */
65844 Index: linux-2.4.21/drivers/net/qsnet/ep/support_elan3.c
65845 ===================================================================
65846 --- linux-2.4.21.orig/drivers/net/qsnet/ep/support_elan3.c      2004-02-23 16:02:56.000000000 -0500
65847 +++ linux-2.4.21/drivers/net/qsnet/ep/support_elan3.c   2005-06-01 23:12:54.687425728 -0400
65848 @@ -0,0 +1,2111 @@
65849 +/*
65850 + *    Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd.
65851 + *    Copyright (c) 2002-2003 by Quadrics Ltd.
65852 + *
65853 + *    For licensing information please see the supplied COPYING file
65854 + *
65855 + */
65856 +
65857 +#ident "@(#)$Id: support_elan3.c,v 1.42.8.3 2004/11/12 10:54:51 mike Exp $"
65858 +/*      $Source: /cvs/master/quadrics/epmod/support_elan3.c,v $ */
65859 +
65860 +#include <qsnet/kernel.h>
65861 +#include <qsnet/kthread.h>
65862 +
65863 +#include <elan/kcomm.h>
65864 +#include <elan/epsvc.h>
65865 +#include <elan/epcomms.h>
65866 +
65867 +#include "kcomm_vp.h"
65868 +#include "kcomm_elan3.h"
65869 +#include "epcomms_elan3.h"
65870 +#include "debug.h"
65871 +
65872 +#include <elan3/thread.h>
65873 +#include <elan3/urom_addrs.h>
65874 +
65875 +/****************************************************************************************/
65876 +#define DMA_RING_NEXT_POS(ring)      ((ring)->Position+1 == ring->Entries ? 0 : ((ring)->Position+1))
65877 +#define DMA_RING_PREV_POS(ring,pos)  ((pos) == 0 ? (ring)->Entries-1 : (pos) - 1)
65878 +
65879 +static int 
65880 +DmaRingCreate (EP3_RAIL *rail, EP3_DMA_RING *ring, int ctxnum, int entries)
65881 +{
65882 +    unsigned long pgnum = (ctxnum * sizeof (E3_CommandPort)) / PAGE_SIZE;
65883 +    unsigned long pgoff = (ctxnum * sizeof (E3_CommandPort)) & (PAGE_SIZE-1);
65884 +    int           s;    
65885 +        
65886 +    /* set up the initial position */
65887 +    ring->Entries  = entries;
65888 +    ring->Position = 0;
65889 +    
65890 +    if (! (ring->pEvent = ep_alloc_elan (&rail->Generic, entries * sizeof (E3_BlockCopyEvent), 0, &ring->epEvent)))
65891 +    {
65892 +       ring->CommandPort = (ioaddr_t) NULL;
65893 +       return (ENOMEM);
65894 +    }
65895 +    
65896 +    if (! (ring->pDma = ep_alloc_elan (&rail->Generic, entries * sizeof (E3_DMA), 0, &ring->epDma)))
65897 +    {
65898 +       ep_free_elan (&rail->Generic, ring->epEvent, entries * sizeof (E3_BlockCopyEvent));
65899 +
65900 +       ring->CommandPort = (ioaddr_t) NULL;
65901 +       return (ENOMEM);
65902 +    }
65903 +    
65904 +    if (! (ring->pDoneBlk = ep_alloc_main (&rail->Generic, entries * sizeof (E3_uint32), 0, &ring->epDoneBlk)))
65905 +    {
65906 +       ep_free_elan (&rail->Generic, ring->epEvent, entries * sizeof (E3_BlockCopyEvent));
65907 +       ep_free_elan (&rail->Generic, ring->epDma,   entries * sizeof (E3_DMA));
65908 +
65909 +       ring->CommandPort = (ioaddr_t) NULL;
65910 +       return (ENOMEM);
65911 +    }
65912 +    
65913 +    if (MapDeviceRegister (rail->Device, ELAN3_BAR_COMMAND_PORT, &ring->CommandPage, pgnum * PAGE_SIZE, PAGE_SIZE, &ring->CommandPageHandle) != ESUCCESS)
65914 +    {
65915 +       ep_free_elan (&rail->Generic, ring->epEvent,   entries * sizeof (E3_BlockCopyEvent));
65916 +       ep_free_elan (&rail->Generic, ring->epDma,     entries * sizeof (E3_DMA));
65917 +       ep_free_main (&rail->Generic, ring->epDoneBlk, entries * sizeof (E3_uint32));
65918 +
65919 +       ring->CommandPort = (ioaddr_t) NULL;
65920 +       return (ENOMEM);
65921 +    }
65922 +    ring->CommandPort = ring->CommandPage + pgoff;
65923 +       
65924 +    for (s = 0; s < entries; s++)
65925 +    {
65926 +       /* setup the event */
65927 +       elan3_sdram_writel(rail->Device, DMA_RING_EVENT(ring,s) + offsetof(E3_BlockCopyEvent,ev_Type),   
65928 +                          EV_TYPE_BCOPY | EV_TYPE_DMA | DMA_RING_DMA_ELAN(ring, s));
65929 +       elan3_sdram_writel(rail->Device, DMA_RING_EVENT(ring,s) + offsetof(E3_BlockCopyEvent,ev_Source), DMA_RING_DMA_ELAN(ring,s)  | EV_WCOPY);
65930 +       elan3_sdram_writel(rail->Device, DMA_RING_EVENT(ring,s) + offsetof(E3_BlockCopyEvent,ev_Dest),   DMA_RING_DONE_ELAN(ring,s) | EV_TYPE_BCOPY_WORD );         
65931 +
65932 +       /* need to set all the doneBlks to appear that they have completed */
65933 +       ring->pDoneBlk[s] = DMA_RING_DMA_ELAN(ring,s)  | EV_WCOPY;
65934 +    }
65935 +
65936 +    return 0; /* success */
65937 +}
65938 +
65939 +static void
65940 +DmaRingRelease(EP3_RAIL *rail, EP3_DMA_RING *ring)
65941 +{
65942 +    if (ring->CommandPage != (ioaddr_t) 0)
65943 +    {
65944 +       UnmapDeviceRegister(rail->Device, &ring->CommandPageHandle);
65945 +
65946 +       ep_free_elan (&rail->Generic, ring->epEvent,   ring->Entries * sizeof (E3_BlockCopyEvent));
65947 +       ep_free_elan (&rail->Generic, ring->epDma,     ring->Entries * sizeof (E3_DMA));
65948 +       ep_free_main (&rail->Generic, ring->epDoneBlk, ring->Entries * sizeof (E3_uint32));
65949 +    }
65950 +    ring->CommandPage = (ioaddr_t) 0;
65951 +}
65952 +
65953 +void 
65954 +DmaRingsRelease (EP3_RAIL *rail)
65955 +{
65956 +    DmaRingRelease (rail, &rail->DmaRings[EP3_RING_CRITICAL]);
65957 +    DmaRingRelease (rail, &rail->DmaRings[EP3_RING_HIGH_PRI]);
65958 +    DmaRingRelease (rail, &rail->DmaRings[EP3_RING_LOW_PRI]);
65959 +}
65960 +
65961 +int 
65962 +DmaRingsCreate (EP3_RAIL *rail)
65963 +{
65964 +    if (DmaRingCreate (rail, &rail->DmaRings[EP3_RING_CRITICAL], ELAN3_DMARING_BASE_CONTEXT_NUM + EP3_RING_CRITICAL, EP3_RING_CRITICAL_LEN) ||
65965 +       DmaRingCreate (rail, &rail->DmaRings[EP3_RING_HIGH_PRI], ELAN3_DMARING_BASE_CONTEXT_NUM + EP3_RING_HIGH_PRI, EP3_RING_HIGH_PRI_LEN) ||
65966 +       DmaRingCreate (rail, &rail->DmaRings[EP3_RING_LOW_PRI],  ELAN3_DMARING_BASE_CONTEXT_NUM + EP3_RING_LOW_PRI,  EP3_RING_LOW_PRI_LEN))
65967 +    {
65968 +       DmaRingsRelease (rail);
65969 +       return (ENOMEM);
65970 +    }
65971 +  
65972 +    return 0;
65973 +}
65974 +
65975 +static int 
65976 +DmaRingNextSlot (EP3_DMA_RING *ring)
65977 +{
65978 +    int pos  = ring->Position;
65979 +    int npos = DMA_RING_NEXT_POS(ring);
65980 +
65981 +    if (ring->pDoneBlk[npos] == EP3_EVENT_ACTIVE)
65982 +       return (-1);
65983 +    
65984 +    ring->pDoneBlk[pos] = EP3_EVENT_ACTIVE;
65985 +
65986 +    ring->Position = npos; /* move on one */
65987 +
65988 +    return (pos);
65989 +}
65990 +
65991 +
65992 +/****************************************************************************************/
65993 +/*
65994 + * Dma/event command issueing - these handle cproc queue overflow traps.
65995 + */
65996 +static int
65997 +DmaRunQueueSizeCheck (EP3_RAIL *rail, E3_uint32 len)
65998 +{
65999 +    E3_uint64  FandBPtr = read_reg64 (rail->Device, DProc_SysCntx_FPtr);
66000 +    E3_uint32  FPtr, BPtr;
66001 +    E3_uint32  qlen;
66002 +
66003 +#if (BYTE_ORDER == LITTLE_ENDIAN) || defined(__LITTLE_ENDIAN__)
66004 +    FPtr = (FandBPtr & 0xFFFFFFFFull);
66005 +    BPtr = (FandBPtr >> 32);
66006 +#else
66007 +    FPtr = (FandBPtr >> 32);
66008 +    BPtr = (FandBPtr & 0xFFFFFFFFull);
66009 +#endif
66010 +    
66011 +    qlen = (((BPtr - FPtr)/sizeof (E3_DMA)) & (E3_SysCntxQueueSize-1));
66012 +    
66013 +    if      (qlen < 4)   IncrStat (rail, DmaQueueLength[0]);
66014 +    else if (qlen < 8)   IncrStat (rail, DmaQueueLength[1]);
66015 +    else if (qlen < 16)  IncrStat (rail, DmaQueueLength[2]);
66016 +    else if (qlen < 32)  IncrStat (rail, DmaQueueLength[3]);
66017 +    else if (qlen < 64)  IncrStat (rail, DmaQueueLength[4]);
66018 +    else if (qlen < 128) IncrStat (rail, DmaQueueLength[5]);
66019 +    else if (qlen < 240) IncrStat (rail, DmaQueueLength[6]);
66020 +    else                 IncrStat (rail, DmaQueueLength[7]);
66021 +       
66022 +    return (qlen < len);
66023 +}
66024 +
66025 +int
66026 +IssueDma (EP3_RAIL *rail, E3_DMA_BE * dmabe, int type, int retryThread)
66027 +{
66028 +    ELAN3_DEV     *dev = rail->Device;
66029 +    EP3_RETRY_DMA *retry;
66030 +    EP3_DMA_RING  *ring;
66031 +    int           slot;
66032 +    int           i, res;
66033 +    unsigned long flags;
66034 +
66035 +    ASSERT (dmabe->s.dma_direction == DMA_WRITE || dmabe->s.dma_direction == DMA_READ_REQUEUE);
66036 +
66037 +    ASSERT (! EP_VP_ISDATA(dmabe->s.dma_destVProc) ||
66038 +           (dmabe->s.dma_direction == DMA_WRITE ? 
66039 +            EP_VP_TO_NODE(dmabe->s.dma_srcVProc) == rail->Generic.Position.pos_nodeid :
66040 +            EP_VP_TO_NODE(dmabe->s.dma_destVProc) == rail->Generic.Position.pos_nodeid));
66041 +    
66042 +    /*
66043 +     * If we're not the retry thread - then don't issue this DMA
66044 +     * if there are any already queued on the retry lists with
66045 +     * higher or equal priority than this one that are ready to
66046 +     * retry.
66047 +     */
66048 +    if (! retryThread)
66049 +    {
66050 +       for (i = EP_RETRY_BASE; i < type; i++)
66051 +       {
66052 +           if (list_empty (&rail->DmaRetries[i]))
66053 +               continue;
66054 +
66055 +           retry = list_entry (rail->DmaRetries[i].next, EP3_RETRY_DMA, Link);
66056 +               
66057 +           if (AFTER (lbolt, retry->RetryTime))
66058 +           {
66059 +               IncrStat (rail, IssueDmaFail[type]);
66060 +               return (ISSUE_COMMAND_RETRY);
66061 +           }
66062 +       }
66063 +    }
66064 +
66065 +    /*
66066 +     * Depending on the type of DMA we're issuing - throttle back
66067 +     * issueing of it if the DMA run queue is too full.  This then
66068 +     * prioritises the "special" messages and completing data 
66069 +     * transfers which have matched a receive buffer.
66070 +     */
66071 +
66072 +    if (type >= EP_RETRY_LOW_PRI_RETRY)
66073 +    {
66074 +       if (! DmaRunQueueSizeCheck (rail, E3_SysCntxQueueSize / 2))
66075 +       {
66076 +           IncrStat (rail, IssueDmaFail[type]);
66077 +           return (ISSUE_COMMAND_RETRY);
66078 +       }
66079 +       ring = &rail->DmaRings[EP3_RING_LOW_PRI];
66080 +    } 
66081 +    else if (type == EP_RETRY_LOW_PRI)
66082 +    {
66083 +       if (! DmaRunQueueSizeCheck (rail, E3_SysCntxQueueSize / 3))
66084 +       {
66085 +           IncrStat (rail, IssueDmaFail[type]);
66086 +           return (ISSUE_COMMAND_RETRY);
66087 +       }
66088 +       ring = &rail->DmaRings[EP3_RING_LOW_PRI];
66089 +    }
66090 +    else if (type >= EP_RETRY_HIGH_PRI)
66091 +       ring = &rail->DmaRings[EP3_RING_HIGH_PRI];
66092 +    else
66093 +       ring = &rail->DmaRings[EP3_RING_CRITICAL];
66094 +
66095 +    local_irq_save (flags);
66096 +    if (! spin_trylock (&dev->CProcLock))
66097 +    {
66098 +       IncrStat (rail, IssueDmaFail[type]);
66099 +
66100 +       res = ISSUE_COMMAND_RETRY;
66101 +    }
66102 +    else
66103 +    {
66104 +       if ((slot = DmaRingNextSlot (ring)) == -1)
66105 +       {
66106 +           IncrStat (rail, IssueDmaFail[type]);
66107 +           
66108 +           res = ISSUE_COMMAND_RETRY;
66109 +       }
66110 +       else
66111 +       {
66112 +           EPRINTF4 (DBG_COMMAND, "IssueDma: type %08x size %08x Elan source %08x Elan dest %08x\n",
66113 +                     dmabe->s.dma_type, dmabe->s.dma_size, dmabe->s.dma_source, dmabe->s.dma_dest);
66114 +           EPRINTF2 (DBG_COMMAND, "          dst event %08x cookie/proc %08x\n",
66115 +                     dmabe->s.dma_destEvent, dmabe->s.dma_destCookieVProc);
66116 +           EPRINTF2 (DBG_COMMAND, "          src event %08x cookie/proc %08x\n",
66117 +                     dmabe->s.dma_srcEvent, dmabe->s.dma_srcCookieVProc);
66118 +
66119 +           elan3_sdram_copyq_to_sdram (dev,  dmabe,  DMA_RING_DMA(ring, slot), sizeof (E3_DMA));                       /* PCI write block */
66120 +           elan3_sdram_writel (dev, DMA_RING_EVENT(ring, slot) + offsetof (E3_BlockCopyEvent, ev_Count), 1);   /* PCI write */
66121 +           
66122 +           mb();                                                               /* ensure writes to main memory completed */
66123 +           writel (DMA_RING_EVENT_ELAN(ring,slot), ring->CommandPort + offsetof (E3_CommandPort, SetEvent));
66124 +           mmiob();                                                            /* and flush through IO writes */
66125 +           
66126 +           res = ISSUE_COMMAND_OK;
66127 +       }
66128 +       spin_unlock (&dev->CProcLock);
66129 +    }
66130 +    local_irq_restore (flags);
66131 +
66132 +    return (res);
66133 +}
66134 +
66135 +int
66136 +IssueWaitevent (EP3_RAIL *rail, E3_Addr value)
66137 +{
66138 +    ELAN3_DEV     *dev   = rail->Device;
66139 +    int           res;
66140 +    unsigned long flags;
66141 +    
66142 +    spin_lock_irqsave (&dev->IntrLock, flags);
66143 +
66144 +    ASSERT (rail->CommandPortEventTrap == FALSE);
66145 +
66146 +    /*
66147 +     * Disable the command processor interrupts, so that we don't see
66148 +     * spurious interrupts appearing.
66149 +     */
66150 +    DISABLE_INT_MASK (dev, INT_CProc | INT_ComQueue);
66151 +
66152 +    EPRINTF1 (DBG_COMMAND, "IssueWaitevent: %08x\n", value);
66153 +
66154 +    mb();                                                              /* ensure writes to main memory completed */
66155 +    writel (value, rail->CommandPort + offsetof (E3_CommandPort, WaitEvent0));
66156 +    mmiob();                                                           /* and flush through IO writes */
66157 +    
66158 +    do {
66159 +       res = CheckCommandQueueFlushed (rail->Ctxt, EventComQueueNotEmpty, ISSUE_COMMAND_CANT_WAIT, &flags);
66160 +
66161 +       EPRINTF1 (DBG_COMMAND, "IssueWaitevent: CheckCommandQueueFlushed -> %d\n", res);
66162 +
66163 +       if (res == ISSUE_COMMAND_WAIT)
66164 +           HandleCProcTrap (dev, 0, NULL);
66165 +    } while (res != ISSUE_COMMAND_OK);
66166 +
66167 +    if (! rail->CommandPortEventTrap)
66168 +       res = ISSUE_COMMAND_OK;
66169 +    else
66170 +    {
66171 +       rail->CommandPortEventTrap = FALSE;
66172 +       res = ISSUE_COMMAND_TRAPPED;
66173 +    }
66174 +
66175 +    EPRINTF1 (DBG_COMMAND, "IssueWaitevent: -> %d\n", res);
66176 +
66177 +    /*
66178 +     * Re-enable the command processor interrupt as we've finished 
66179 +     * polling it.
66180 +     */
66181 +    ENABLE_INT_MASK (dev, INT_CProc | INT_ComQueue);
66182 +
66183 +    spin_unlock_irqrestore (&dev->IntrLock, flags);
66184 +
66185 +    return (res);
66186 +}
66187 +
66188 +void
66189 +IssueSetevent (EP3_RAIL *rail, E3_Addr value)
66190 +{
66191 +    EPRINTF1 (DBG_COMMAND, "IssueSetevent: %08x\n", value);
66192 +
66193 +    mb();                                                              /* ensure writes to main memory completed */
66194 +    writel (value, rail->CommandPort + offsetof (E3_CommandPort, SetEvent));
66195 +    mmiob();                                                           /* and flush through IO writes */
66196 +}
66197 +
66198 +void
66199 +IssueRunThread (EP3_RAIL *rail, E3_Addr value)
66200 +{
66201 +    EPRINTF1 (DBG_COMMAND, "IssueRunThread: %08x\n", value);
66202 +
66203 +    mb();                                                              /* ensure writes to main memory completed */
66204 +    writel (value, rail->CommandPort + offsetof (E3_CommandPort, RunThread));
66205 +    mmiob();                                                           /* and flush through IO writes */
66206 +}
66207 +
66208 +/****************************************************************************************/
66209 +/*
66210 + * DMA retry list management
66211 + */
66212 +static unsigned DmaRetryTimes[EP_NUM_RETRIES]; 
66213 +
66214 +static void
66215 +ep3_dma_retry (EP3_RAIL *rail)
66216 +{
66217 +    EP3_COOKIE    *cp;
66218 +    int            res;
66219 +    int                   vp;
66220 +    unsigned long  flags;
66221 +    int            i;
66222 +
66223 +    kernel_thread_init("ep3_dma_retry");
66224 +
66225 +    spin_lock_irqsave (&rail->DmaRetryLock, flags);
66226 +
66227 +    for (;;)
66228 +    {
66229 +       long yieldAt   = lbolt + (hz/10);
66230 +       long retryTime = 0;
66231 +
66232 +       if (rail->DmaRetryThreadShouldStop)
66233 +           break;
66234 +       
66235 +       for (i = EP_RETRY_BASE; i < EP_NUM_RETRIES; i++)
66236 +       {
66237 +           while (! list_empty (&rail->DmaRetries[i]))
66238 +           {
66239 +               EP3_RETRY_DMA *retry = list_entry (rail->DmaRetries[i].next, EP3_RETRY_DMA, Link);
66240 +
66241 +               if (! AFTER (lbolt, retry->RetryTime))
66242 +                   break;
66243 +               
66244 +               if (rail->DmaRetryThreadShouldStall || AFTER (lbolt, yieldAt))
66245 +                   goto cant_do_more;
66246 +
66247 +               EPRINTF2 (DBG_RETRY, "%s: DmaRetryThread: retry %p\n", rail->Generic.Name, retry);
66248 +               EPRINTF5 (DBG_RETRY, "%s:                 %08x %08x %08x %08x\n",
66249 +                         rail->Generic.Name, retry->Dma.s.dma_type, retry->Dma.s.dma_size, retry->Dma.s.dma_source, retry->Dma.s.dma_dest);
66250 +               EPRINTF5 (DBG_RETRY, "%s:                 %08x %08x %08x %08x\n",
66251 +                         rail->Generic.Name, retry->Dma.s.dma_destEvent, retry->Dma.s.dma_destCookieVProc,
66252 +                         retry->Dma.s.dma_srcEvent, retry->Dma.s.dma_srcCookieVProc);
66253 +#if defined(DEBUG)
66254 +               if (retry->Dma.s.dma_direction == DMA_WRITE)
66255 +                   cp = LookupEventCookie (rail, &rail->CookieTable, retry->Dma.s.dma_srcEvent);
66256 +               else
66257 +                   cp = LookupEventCookie (rail, &rail->CookieTable, retry->Dma.s.dma_destEvent);
66258 +
66259 +               ASSERT (cp != NULL || (retry->Dma.s.dma_srcEvent == 0 && retry->Dma.s.dma_direction == DMA_WRITE && retry->Dma.s.dma_isRemote));
66260 +               
66261 +               if (cp && cp->Operations->DmaVerify)
66262 +                   cp->Operations->DmaVerify (rail, cp->Arg, &retry->Dma);
66263 +#endif
66264 +
66265 +#if defined(DEBUG_ASSERT)
66266 +               if (retry->Dma.s.dma_direction == DMA_WRITE)
66267 +                   vp = retry->Dma.s.dma_destVProc;
66268 +               else
66269 +                   vp = retry->Dma.s.dma_srcVProc;
66270 +
66271 +               ASSERT (!EP_VP_ISDATA(vp) || 
66272 +                       (rail->Generic.Nodes[EP_VP_TO_NODE(vp)].State >= EP_NODE_CONNECTED &&
66273 +                        rail->Generic.Nodes[EP_VP_TO_NODE(vp)].State <= EP_NODE_LOCAL_PASSIVATE));
66274 +#endif
66275 +               spin_unlock_irqrestore (&rail->DmaRetryLock, flags);
66276 +               res = IssueDma (rail, &(retry->Dma), i, TRUE);
66277 +               spin_lock_irqsave (&rail->DmaRetryLock, flags);
66278 +               
66279 +               if (res != ISSUE_COMMAND_OK)
66280 +                   goto cant_do_more;
66281 +               
66282 +               /* Command issued, so remove from list, and add to free list */
66283 +               list_del (&retry->Link);
66284 +               list_add (&retry->Link, &rail->DmaRetryFreeList);
66285 +           }
66286 +       }
66287 +    cant_do_more:
66288 +       
66289 +       for (i = EP_RETRY_BASE; i < EP_NUM_RETRIES; i++)
66290 +       {
66291 +           if (!list_empty (&rail->DmaRetries[i]))
66292 +           {
66293 +               EP3_RETRY_DMA *retry = list_entry (rail->DmaRetries[i].next, EP3_RETRY_DMA, Link);
66294 +
66295 +               retryTime = retryTime ? MIN(retryTime, retry->RetryTime) : retry->RetryTime;
66296 +           }
66297 +       }
66298 +
66299 +       if (retryTime && !AFTER (retryTime, lbolt))
66300 +           retryTime = lbolt + 1;
66301 +
66302 +       do {
66303 +           EPRINTF3 (DBG_RETRY, "%s: ep_cm_retry: %s %lx\n", rail->Generic.Name, rail->DmaRetryThreadShouldStall ? "stalled" : "sleeping", retryTime);
66304 +           
66305 +           if (rail->DmaRetryTime == 0 || (retryTime != 0 && retryTime < rail->DmaRetryTime))
66306 +               rail->DmaRetryTime = retryTime;
66307 +           
66308 +           rail->DmaRetrySleeping = TRUE;
66309 +           
66310 +           if (rail->DmaRetryThreadShouldStall)                                        /* wakeup threads waiting in StallDmaRetryThread */
66311 +               kcondvar_wakeupall (&rail->DmaRetryWait, &rail->DmaRetryLock);  /* for us to really go to sleep for good. */
66312 +
66313 +           if (rail->DmaRetryTime == 0 || rail->DmaRetryThreadShouldStall)
66314 +               kcondvar_wait (&rail->DmaRetryWait, &rail->DmaRetryLock, &flags);
66315 +           else
66316 +               kcondvar_timedwait (&rail->DmaRetryWait, &rail->DmaRetryLock, &flags, rail->DmaRetryTime);
66317 +
66318 +           rail->DmaRetrySleeping = FALSE;
66319 +
66320 +       } while (rail->DmaRetryThreadShouldStall);
66321 +
66322 +       rail->DmaRetryTime = 0;
66323 +    }
66324 +
66325 +    rail->DmaRetryThreadStopped = 1;
66326 +    kcondvar_wakeupall (&rail->DmaRetryWait, &rail->DmaRetryLock);
66327 +    spin_unlock_irqrestore (&rail->DmaRetryLock, flags);
66328 +
66329 +    kernel_thread_exit();
66330 +}
66331 +
66332 +void
66333 +StallDmaRetryThread (EP3_RAIL *rail)
66334 +{
66335 +    unsigned long flags;
66336 +
66337 +    spin_lock_irqsave (&rail->DmaRetryLock, flags);
66338 +    rail->DmaRetryThreadShouldStall++;
66339 +
66340 +    while (! rail->DmaRetrySleeping)
66341 +       kcondvar_wait (&rail->DmaRetryWait, &rail->DmaRetryLock, &flags);
66342 +    spin_unlock_irqrestore (&rail->DmaRetryLock, flags);
66343 +}
66344 +
66345 +void 
66346 +ResumeDmaRetryThread (EP3_RAIL *rail)
66347 +{
66348 +    unsigned long flags;
66349 +
66350 +    spin_lock_irqsave (&rail->DmaRetryLock, flags);
66351 +
66352 +    ASSERT (rail->DmaRetrySleeping);
66353 +
66354 +    if (--rail->DmaRetryThreadShouldStall == 0)
66355 +    {
66356 +       rail->DmaRetrySleeping = 0;
66357 +       kcondvar_wakeupone (&rail->DmaRetryWait, &rail->DmaRetryLock);
66358 +    }
66359 +    spin_unlock_irqrestore (&rail->DmaRetryLock, flags);
66360 +}
66361 +
66362 +int
66363 +InitialiseDmaRetries (EP3_RAIL *rail)
66364 +{
66365 +    int i;
66366 +
66367 +    spin_lock_init (&rail->DmaRetryLock);
66368 +    kcondvar_init (&rail->DmaRetryWait);
66369 +
66370 +    for (i = 0; i < EP_NUM_RETRIES; i++)
66371 +       INIT_LIST_HEAD (&rail->DmaRetries[i]);
66372 +
66373 +    INIT_LIST_HEAD (&rail->DmaRetryFreeList);
66374 +
66375 +    DmaRetryTimes[EP_RETRY_HIGH_PRI]  = EP_RETRY_HIGH_PRI_TIME;
66376 +
66377 +    for (i =0 ; i < EP_NUM_BACKOFF; i++)
66378 +       DmaRetryTimes[EP_RETRY_HIGH_PRI_RETRY+i] = EP_RETRY_HIGH_PRI_TIME << i;
66379 +    
66380 +    DmaRetryTimes[EP_RETRY_LOW_PRI] = EP_RETRY_LOW_PRI_TIME;
66381 +
66382 +    for (i =0 ; i < EP_NUM_BACKOFF; i++)
66383 +       DmaRetryTimes[EP_RETRY_LOW_PRI_RETRY+i] = EP_RETRY_LOW_PRI_TIME << i;
66384 +    
66385 +    DmaRetryTimes[EP_RETRY_ANONYMOUS] = EP_RETRY_ANONYMOUS_TIME;
66386 +    DmaRetryTimes[EP_RETRY_NETERR]    = EP_RETRY_NETERR_TIME;
66387 +
66388 +    rail->DmaRetryInitialised = 1;
66389 +
66390 +    if (kernel_thread_create (ep3_dma_retry, (void *) rail) == 0)
66391 +    {
66392 +       spin_lock_destroy (&rail->DmaRetryLock);
66393 +       return (ENOMEM);
66394 +    }
66395 +
66396 +    rail->DmaRetryThreadStarted = 1;
66397 +
66398 +    return (ESUCCESS);
66399 +}
66400 +
66401 +void
66402 +DestroyDmaRetries (EP3_RAIL *rail)
66403 +{
66404 +    unsigned long flags;
66405 +
66406 +    spin_lock_irqsave (&rail->DmaRetryLock, flags);
66407 +    rail->DmaRetryThreadShouldStop = 1;
66408 +    while (rail->DmaRetryThreadStarted && !rail->DmaRetryThreadStopped)
66409 +    {
66410 +       kcondvar_wakeupall (&rail->DmaRetryWait, &rail->DmaRetryLock);
66411 +       kcondvar_wait (&rail->DmaRetryWait, &rail->DmaRetryLock, &flags);
66412 +    }
66413 +    rail->DmaRetryThreadStarted = 0;
66414 +    rail->DmaRetryThreadStopped = 0;
66415 +    rail->DmaRetryThreadShouldStop = 0;
66416 +    rail->DmaRetryInitialised = 0;
66417 +
66418 +    spin_unlock_irqrestore (&rail->DmaRetryLock, flags);
66419 +
66420 +    /* Everyone should have given back their retry dma's by now */
66421 +    ASSERT (rail->DmaRetryReserved == 0);
66422 +
66423 +    while (! list_empty (&rail->DmaRetryFreeList))
66424 +    {
66425 +       EP3_RETRY_DMA *retry = list_entry (rail->DmaRetryFreeList.next, EP3_RETRY_DMA, Link);
66426 +       
66427 +       list_del (&retry->Link);
66428 +
66429 +       KMEM_FREE (retry, sizeof (EP3_RETRY_DMA));
66430 +    }
66431 +
66432 +    kcondvar_destroy (&rail->DmaRetryWait);
66433 +    spin_lock_destroy (&rail->DmaRetryLock);
66434 +}
66435 +
66436 +int
66437 +ReserveDmaRetries (EP3_RAIL *rail, int count, EP_ATTRIBUTE attr)
66438 +{
66439 +    EP3_RETRY_DMA *retry;
66440 +    int                  remaining = count;
66441 +    unsigned long flags;
66442 +
66443 +    spin_lock_irqsave (&rail->DmaRetryLock, flags);
66444 +    
66445 +    if (remaining <= (rail->DmaRetryCount - rail->DmaRetryReserved))
66446 +    {
66447 +       rail->DmaRetryReserved += remaining;
66448 +
66449 +       spin_unlock_irqrestore (&rail->DmaRetryLock, flags);
66450 +       return (ESUCCESS);
66451 +    }
66452 +
66453 +    remaining -= (rail->DmaRetryCount - rail->DmaRetryReserved);
66454 +
66455 +    rail->DmaRetryReserved = rail->DmaRetryCount;
66456 +
66457 +    spin_unlock_irqrestore (&rail->DmaRetryLock, flags);
66458 +
66459 +    while (remaining)
66460 +    {
66461 +       KMEM_ALLOC (retry, EP3_RETRY_DMA *, sizeof (EP3_RETRY_DMA), !(attr & EP_NO_SLEEP));
66462 +       
66463 +       if (retry == NULL)
66464 +           goto failed;
66465 +
66466 +       /* clear E3_DMA */
66467 +       bzero((char *)(&(retry->Dma.s)), sizeof(E3_DMA));
66468 +
66469 +       remaining--; 
66470 +
66471 +       spin_lock_irqsave (&rail->DmaRetryLock, flags);
66472 +
66473 +       list_add (&retry->Link, &rail->DmaRetryFreeList);
66474 +
66475 +       rail->DmaRetryCount++;
66476 +       rail->DmaRetryReserved++;
66477 +
66478 +       spin_unlock_irqrestore (&rail->DmaRetryLock, flags);
66479 +    }
66480 +    return (ESUCCESS);
66481 +
66482 + failed:
66483 +    spin_lock_irqsave (&rail->DmaRetryLock, flags);
66484 +    rail->DmaRetryReserved -= (count - remaining);
66485 +    spin_unlock_irqrestore (&rail->DmaRetryLock, flags);
66486 +    return (ENOMEM);
66487 +}
66488 +
66489 +void
66490 +ReleaseDmaRetries (EP3_RAIL *rail, int count)
66491 +{
66492 +    unsigned long flags;
66493 +
66494 +    spin_lock_irqsave (&rail->DmaRetryLock, flags);
66495 +    rail->DmaRetryReserved -= count;
66496 +    spin_unlock_irqrestore (&rail->DmaRetryLock, flags);
66497 +}
66498 +
66499 +void
66500 +QueueDmaForRetry (EP3_RAIL *rail, E3_DMA_BE *dma, int interval)
66501 +{
66502 +    EP3_RETRY_DMA *retry;
66503 +    unsigned long flags;
66504 +
66505 +    /*
66506 +     * When requeueing DMAs they must never be "READ" dma's since
66507 +     * these would fetch the DMA descriptor from the retryn descriptor
66508 +     */
66509 +    ASSERT (dma->s.dma_direction == DMA_WRITE || dma->s.dma_direction == DMA_READ_REQUEUE);
66510 +    ASSERT (dma->s.dma_direction == DMA_WRITE ? 
66511 +           EP_VP_TO_NODE(dma->s.dma_srcVProc) == rail->Generic.Position.pos_nodeid :
66512 +           EP_VP_TO_NODE(dma->s.dma_destVProc) == rail->Generic.Position.pos_nodeid);
66513 +
66514 +    spin_lock_irqsave (&rail->DmaRetryLock, flags);
66515 +    
66516 +    EP_ASSERT (&rail->Generic, !list_empty (&rail->DmaRetryFreeList));
66517 +
66518 +    /* take an item of the free list */
66519 +    retry = list_entry (rail->DmaRetryFreeList.next, EP3_RETRY_DMA, Link);
66520 +
66521 +    list_del (&retry->Link);
66522 +    
66523 +    EPRINTF5 (DBG_RETRY, "%s: QueueDmaForRetry: %08x %08x %08x %08x\n", rail->Generic.Name,
66524 +             dma->s.dma_type, dma->s.dma_size, dma->s.dma_source, dma->s.dma_dest);
66525 +    EPRINTF5 (DBG_RETRY, "%s:                   %08x %08x %08x %08x\n",rail->Generic.Name,
66526 +            dma->s.dma_destEvent, dma->s.dma_destCookieVProc,
66527 +            dma->s.dma_srcEvent, dma->s.dma_srcCookieVProc);
66528 +
66529 +    /* copy the DMA into the retry descriptor */
66530 +    retry->Dma.s.dma_type            = dma->s.dma_type;
66531 +    retry->Dma.s.dma_size            = dma->s.dma_size;
66532 +    retry->Dma.s.dma_source          = dma->s.dma_source;
66533 +    retry->Dma.s.dma_dest            = dma->s.dma_dest;
66534 +    retry->Dma.s.dma_destEvent       = dma->s.dma_destEvent;
66535 +    retry->Dma.s.dma_destCookieVProc = dma->s.dma_destCookieVProc;
66536 +    retry->Dma.s.dma_srcEvent        = dma->s.dma_srcEvent;
66537 +    retry->Dma.s.dma_srcCookieVProc  = dma->s.dma_srcCookieVProc;
66538 +
66539 +    retry->RetryTime = lbolt + DmaRetryTimes[interval];
66540 +
66541 +    /* chain onto the end of the approriate retry list */
66542 +    list_add_tail (&retry->Link, &rail->DmaRetries[interval]);
66543 +
66544 +    /* now wakeup the retry thread */
66545 +    if (rail->DmaRetryTime == 0 || retry->RetryTime < rail->DmaRetryTime)
66546 +       rail->DmaRetryTime = retry->RetryTime;
66547 +    
66548 +    if (rail->DmaRetrySleeping && !rail->DmaRetryThreadShouldStall)
66549 +    {
66550 +       rail->DmaRetrySleeping = 0;
66551 +       kcondvar_wakeupone (&rail->DmaRetryWait, &rail->DmaRetryLock);
66552 +    }
66553 +
66554 +    spin_unlock_irqrestore (&rail->DmaRetryLock, flags);
66555 +}
66556 +
66557 +void
66558 +QueueDmaOnStalledList (EP3_RAIL *rail, E3_DMA_BE *dma)
66559 +{
66560 +    EP_NODE_RAIL *nodeRail = &rail->Generic.Nodes[dma->s.dma_direction == DMA_WRITE ? 
66561 +                                                 EP_VP_TO_NODE(dma->s.dma_srcVProc) :
66562 +                                                 EP_VP_TO_NODE(dma->s.dma_destVProc)];
66563 +    EP3_RETRY_DMA *retry;
66564 +    unsigned long flags;
66565 +
66566 +    /*
66567 +     * When requeueing DMAs they must never be "READ" dma's since
66568 +     * these would fetch the DMA descriptor from the retryn descriptor
66569 +     */
66570 +    ASSERT (dma->s.dma_direction == DMA_WRITE || dma->s.dma_direction == DMA_READ_REQUEUE);
66571 +    ASSERT (dma->s.dma_direction == DMA_WRITE ? 
66572 +           EP_VP_TO_NODE(dma->s.dma_srcVProc) == rail->Generic.Position.pos_nodeid :
66573 +           EP_VP_TO_NODE(dma->s.dma_destVProc) == rail->Generic.Position.pos_nodeid);
66574 +
66575 +    spin_lock_irqsave (&rail->DmaRetryLock, flags);
66576 +    
66577 +    EP_ASSERT (&rail->Generic, !list_empty (&rail->DmaRetryFreeList));
66578 +
66579 +    /* take an item of the free list */
66580 +    retry = list_entry (rail->DmaRetryFreeList.next, EP3_RETRY_DMA, Link);
66581 +
66582 +    list_del (&retry->Link);
66583 +    
66584 +    EPRINTF5 (DBG_RETRY, "%s: QueueDmaOnStalledList: %08x %08x %08x %08x\n", rail->Generic.Name,
66585 +             dma->s.dma_type, dma->s.dma_size, dma->s.dma_source, dma->s.dma_dest);
66586 +    EPRINTF5 (DBG_RETRY, "%s:                        %08x %08x %08x %08x\n", rail->Generic.Name,
66587 +             dma->s.dma_destEvent, dma->s.dma_destCookieVProc,
66588 +             dma->s.dma_srcEvent, dma->s.dma_srcCookieVProc);
66589 +
66590 +    /* copy the DMA into the retry descriptor */
66591 +    retry->Dma.s.dma_type            = dma->s.dma_type;
66592 +    retry->Dma.s.dma_size            = dma->s.dma_size;
66593 +    retry->Dma.s.dma_source          = dma->s.dma_source;
66594 +    retry->Dma.s.dma_dest            = dma->s.dma_dest;
66595 +    retry->Dma.s.dma_destEvent       = dma->s.dma_destEvent;
66596 +    retry->Dma.s.dma_destCookieVProc = dma->s.dma_destCookieVProc;
66597 +    retry->Dma.s.dma_srcEvent        = dma->s.dma_srcEvent;
66598 +    retry->Dma.s.dma_srcCookieVProc  = dma->s.dma_srcCookieVProc;
66599 +
66600 +    /* chain onto the node cancelled dma list */
66601 +    list_add_tail (&retry->Link, &nodeRail->StalledDmas);
66602 +
66603 +    spin_unlock_irqrestore (&rail->DmaRetryLock, flags);
66604 +}
66605 +
66606 +void
66607 +FreeStalledDmas (EP3_RAIL *rail, unsigned int nodeId)
66608 +{
66609 +    EP_NODE_RAIL *nodeRail = &rail->Generic.Nodes[nodeId];
66610 +    struct list_head *el, *nel;
66611 +    unsigned long flags;
66612 +
66613 +    spin_lock_irqsave (&rail->DmaRetryLock, flags);
66614 +    list_for_each_safe (el, nel, &nodeRail->StalledDmas) {
66615 +       list_del (el);
66616 +       list_add (el, &rail->DmaRetryFreeList);
66617 +    }
66618 +    spin_unlock_irqrestore (&rail->DmaRetryLock, flags);
66619 +}
66620 +
66621 +/****************************************************************************************/
66622 +/*
66623 + * Connection management.
66624 + */
66625 +static void
66626 +DiscardingHaltOperation (ELAN3_DEV *dev, void *arg)
66627 +{
66628 +    EP3_RAIL *rail = (EP3_RAIL *) arg;
66629 +    unsigned long flags;
66630 +
66631 +    spin_lock_irqsave (&dev->IntrLock, flags);
66632 +    rail->HaltOpCompleted = 1;
66633 +    kcondvar_wakeupall (&rail->HaltOpSleep, &dev->IntrLock);
66634 +    spin_unlock_irqrestore (&dev->IntrLock, flags);
66635 +}
66636
66637 +typedef struct {
66638 +     EP3_RAIL  *rail;
66639 +    sdramaddr_t qaddr;
66640 +} SetQueueFullData;
66641
66642 +static void
66643 +SetQueueLockedOperation (ELAN3_DEV *dev, void *arg)
66644 +{
66645 +    SetQueueFullData *data =  (SetQueueFullData *) arg;
66646 +    unsigned long     flags;     
66647 +
66648 +    spin_lock_irqsave (&dev->IntrLock, flags);
66649 +
66650 +    elan3_sdram_writel  (dev, data->qaddr, E3_QUEUE_LOCKED | elan3_sdram_readl(dev, data->qaddr));
66651 +   
66652 +    data->rail->HaltOpCompleted = 1;
66653 +    kcondvar_wakeupall (&data->rail->HaltOpSleep, &dev->IntrLock);
66654 +
66655 +    spin_unlock_irqrestore (&dev->IntrLock, flags);
66656 +}
66657 +
66658 +static void
66659 +FlushDmaQueuesHaltOperation (ELAN3_DEV *dev, void *arg)
66660 +{
66661 +    EP3_RAIL      *rail    = (EP3_RAIL *) arg;
66662 +    sdramaddr_t    FPtr, BPtr;
66663 +    sdramaddr_t           Base, Top;
66664 +    E3_DMA_BE      dma;
66665 +    EP_NODE_RAIL  *node;
66666 +    int            vp;
66667 +    unsigned long  flags;
66668 +
66669 +    ASSERT (elan3_sdram_readl (dev, dev->TAndQBase + offsetof (E3_TrapAndQueue, DProc.s.FSR)) == 0);
66670 +    ASSERT (elan3_sdram_readl (dev, dev->TAndQBase + offsetof (E3_TrapAndQueue, DProcData0.s.FSR.Status)) == 0);
66671 +    ASSERT (elan3_sdram_readl (dev, dev->TAndQBase + offsetof (E3_TrapAndQueue, DProcData1.s.FSR.Status)) == 0);
66672 +    ASSERT (elan3_sdram_readl (dev, dev->TAndQBase + offsetof (E3_TrapAndQueue, DProcData2.s.FSR.Status)) == 0);
66673 +    ASSERT (elan3_sdram_readl (dev, dev->TAndQBase + offsetof (E3_TrapAndQueue, DProcData3.s.FSR.Status)) == 0);
66674 +    
66675 +    FPtr  = read_reg32 (dev, DProc_SysCntx_FPtr);
66676 +    BPtr =  read_reg32 (dev, DProc_SysCntx_BPtr);
66677 +    Base  = dev->TAndQBase + offsetof (E3_TrapAndQueue, SysCntxDmaQueue[0]);
66678 +    Top   = dev->TAndQBase + offsetof (E3_TrapAndQueue, SysCntxDmaQueue[E3_SysCntxQueueSize-1]);
66679 +    
66680 +    while (FPtr != BPtr)
66681 +    {
66682 +       elan3_sdram_copyq_from_sdram (dev, FPtr, &dma, sizeof (E3_DMA_BE));
66683 +       
66684 +       EPRINTF5 (DBG_DISCON, "%s: FlushDmaQueuesHaltOperation: %08x %08x %08x %08x\n", rail->Generic.Name,
66685 +                 dma.s.dma_type, dma.s.dma_size, dma.s.dma_source, dma.s.dma_dest);
66686 +       EPRINTF5 (DBG_DISCON, "%s:                              %08x %08x %08x %08x\n", rail->Generic.Name,
66687 +                 dma.s.dma_destEvent, dma.s.dma_destCookieVProc,
66688 +                dma.s.dma_srcEvent, dma.s.dma_srcCookieVProc);
66689 +       
66690 +       ASSERT ((dma.s.dma_u.s.Context & SYS_CONTEXT_BIT) != 0);
66691 +
66692 +       if (dma.s.dma_direction == DMA_WRITE)
66693 +           vp = dma.s.dma_destVProc;
66694 +       else
66695 +           vp = dma.s.dma_srcVProc;
66696 +       
66697 +       node = &rail->Generic.Nodes[EP_VP_TO_NODE(vp)];
66698 +
66699 +       ASSERT (!EP_VP_ISDATA(vp) || (node->State >= EP_NODE_CONNECTED && node->State <= EP_NODE_LOCAL_PASSIVATE));
66700 +
66701 +       if (EP_VP_ISDATA(vp) && node->State == EP_NODE_LOCAL_PASSIVATE)
66702 +       {
66703 +           /*
66704 +            * This is a DMA going to the node which is being removed, 
66705 +            * so move it onto the node dma list where it will get
66706 +            * handled later.
66707 +            */
66708 +           EPRINTF1 (DBG_DISCON, "%s: FlushDmaQueuesHaltOperation: move dma to cancelled list\n", rail->Generic.Name);
66709 +          
66710 +           if (dma.s.dma_direction != DMA_WRITE)
66711 +           {
66712 +               /* for read dma's set the DMA_READ_REQUEUE bits as the dma_source has been 
66713 +                * modified by the elan to point at the dma in the rxd where it was issued
66714 +                * from */
66715 +               dma.s.dma_direction = (dma.s.dma_direction & ~DMA_READ) | DMA_READ_REQUEUE;
66716 +           }
66717 +           
66718 +           QueueDmaOnStalledList (rail, &dma);
66719 +           
66720 +           /*
66721 +            * Remove the DMA from the queue by replacing it with one with
66722 +            * zero size and no events.
66723 +            *
66724 +            * NOTE: we must preserve the SYS_CONTEXT_BIT since the Elan uses this
66725 +            * to mark the approriate run queue as empty.
66726 +            */
66727 +           dma.s.dma_type            = (SYS_CONTEXT_BIT << 16);
66728 +           dma.s.dma_size            = 0;
66729 +           dma.s.dma_source          = (E3_Addr) 0;
66730 +           dma.s.dma_dest            = (E3_Addr) 0;
66731 +           dma.s.dma_destEvent       = (E3_Addr) 0;
66732 +           dma.s.dma_destCookieVProc = 0;
66733 +           dma.s.dma_srcEvent        = (E3_Addr) 0;
66734 +           dma.s.dma_srcCookieVProc  = 0;
66735 +           
66736 +           elan3_sdram_copyq_to_sdram (dev, &dma, FPtr, sizeof (E3_DMA_BE));
66737 +       }
66738 +
66739 +       FPtr = (FPtr == Top) ? Base : FPtr + sizeof (E3_DMA);
66740 +    }
66741 +
66742 +    spin_lock_irqsave (&dev->IntrLock, flags);
66743 +    rail->HaltOpCompleted = 1;
66744 +    kcondvar_wakeupall (&rail->HaltOpSleep, &dev->IntrLock);
66745 +    spin_unlock_irqrestore (&dev->IntrLock, flags);
66746 +}
66747 +
66748 +void
66749 +SetQueueLocked (EP3_RAIL *rail, sdramaddr_t qaddr)
66750 +{
66751 +    ELAN3_DEV        *dev = rail->Device;
66752 +    SetQueueFullData  data;
66753 +    unsigned long     flags;
66754 +    
66755 +    /* Ensure that the context filter changes have been seen by halting
66756 +     * then restarting the inputters - this also ensures that any setevent
66757 +     * commands used to issue dma's have completed and any trap has been
66758 +     * handled. */
66759 +    data.rail  = rail;
66760 +    data.qaddr = qaddr;
66761 +
66762 +    kmutex_lock (&rail->HaltOpMutex);
66763 +    spin_lock_irqsave (&dev->IntrLock, flags);
66764 +    QueueHaltOperation (dev, 0, NULL, INT_DiscardingSysCntx | INT_TProcHalted, SetQueueLockedOperation, &data);
66765 +
66766 +    while (! rail->HaltOpCompleted)
66767 +       kcondvar_wait (&rail->HaltOpSleep, &dev->IntrLock, &flags);
66768 +    rail->HaltOpCompleted = 0;
66769 +
66770 +    spin_unlock_irqrestore (&dev->IntrLock, flags);
66771 +    kmutex_unlock (&rail->HaltOpMutex);
66772 +}
66773 +
66774 +void
66775 +ep3_flush_filters (EP_RAIL *r)
66776 +{
66777 +    EP3_RAIL *rail = (EP3_RAIL *) r;
66778 +    ELAN3_DEV *dev  = rail->Device;
66779 +    unsigned long flags;
66780 +
66781 +    /* Ensure that the context filter changes have been seen by halting
66782 +     * then restarting the inputters - this also ensures that any setevent
66783 +     * commands used to issue dma's have completed and any trap has been
66784 +     * handled. */
66785 +    kmutex_lock (&rail->HaltOpMutex);
66786 +    spin_lock_irqsave (&dev->IntrLock, flags);
66787 +    QueueHaltOperation (dev, 0, NULL, INT_DiscardingSysCntx, DiscardingHaltOperation, rail);
66788 +    
66789 +    while (! rail->HaltOpCompleted)
66790 +       kcondvar_wait (&rail->HaltOpSleep, &dev->IntrLock, &flags);
66791 +    rail->HaltOpCompleted = 0;
66792 +    spin_unlock_irqrestore (&dev->IntrLock, flags);
66793 +    kmutex_unlock (&rail->HaltOpMutex);
66794 +}
66795 +
66796 +void
66797 +ep3_flush_queues (EP_RAIL *r)
66798 +{
66799 +    EP3_RAIL         *rail = (EP3_RAIL *) r;
66800 +    ELAN3_DEV         *dev  = rail->Device;
66801 +    struct list_head *el;
66802 +    struct list_head *nel;
66803 +    EP_NODE_RAIL     *node;
66804 +    unsigned long flags;
66805 +    int vp, i;
66806 +
66807 +    ASSERT (NO_LOCKS_HELD);
66808 +    
66809 +    /* First - stall the dma retry thread, so that it will no longer
66810 +     *         restart any dma's from the rety lists. */
66811 +    StallDmaRetryThread (rail);
66812 +
66813 +    /* Second - queue a halt operation to flush through all DMA's which are executing
66814 +     *          or on the run queue. */
66815 +    kmutex_lock (&rail->HaltOpMutex);
66816 +    spin_lock_irqsave (&dev->IntrLock, flags);
66817 +    QueueHaltOperation (dev, 0, NULL, INT_DProcHalted | INT_TProcHalted, FlushDmaQueuesHaltOperation, rail);
66818 +    while (! rail->HaltOpCompleted)
66819 +       kcondvar_wait (&rail->HaltOpSleep, &dev->IntrLock, &flags);
66820 +    rail->HaltOpCompleted = 0;
66821 +    spin_unlock_irqrestore (&dev->IntrLock, flags);
66822 +    kmutex_unlock (&rail->HaltOpMutex);
66823 +
66824 +    /* Third - run down the dma retry lists and move all entries to the cancelled
66825 +     *         list.  Any dma's which were on the run queues have already been
66826 +     *         moved there */
66827 +    spin_lock_irqsave (&rail->DmaRetryLock, flags);
66828 +    for (i = EP_RETRY_BASE; i < EP_NUM_RETRIES; i++)
66829 +    {
66830 +       list_for_each_safe (el, nel, &rail->DmaRetries[i]) {
66831 +           EP3_RETRY_DMA *retry = list_entry (el, EP3_RETRY_DMA, Link);
66832 +
66833 +           if (retry->Dma.s.dma_direction == DMA_WRITE)
66834 +               vp = retry->Dma.s.dma_destVProc;
66835 +           else
66836 +               vp = retry->Dma.s.dma_srcVProc;
66837 +           
66838 +           node = &rail->Generic.Nodes[EP_VP_TO_NODE(vp)];
66839 +           
66840 +           ASSERT (!EP_VP_ISDATA(vp) || (node->State >= EP_NODE_CONNECTED && node->State <= EP_NODE_LOCAL_PASSIVATE));
66841 +
66842 +           if (EP_VP_ISDATA(vp) && node->State == EP_NODE_LOCAL_PASSIVATE)
66843 +           {
66844 +               EPRINTF5 (DBG_DISCON, "%s: FlushDmaQueues: %08x %08x %08x %08x\n",rail->Generic.Name,
66845 +                         retry->Dma.s.dma_type, retry->Dma.s.dma_size, retry->Dma.s.dma_source, retry->Dma.s.dma_dest);
66846 +               EPRINTF5 (DBG_DISCON, "%s:                 %08x %08x %08x %08x\n", rail->Generic.Name,
66847 +                         retry->Dma.s.dma_destEvent, retry->Dma.s.dma_destCookieVProc,
66848 +                         retry->Dma.s.dma_srcEvent, retry->Dma.s.dma_srcCookieVProc);
66849 +
66850 +               list_del (&retry->Link);
66851 +
66852 +               list_add_tail (&retry->Link, &node->StalledDmas);
66853 +           }
66854 +       }
66855 +    }
66856 +    spin_unlock_irqrestore (&rail->DmaRetryLock, flags);
66857 +
66858 +    /* Finally - allow the dma retry thread to run again */
66859 +    ResumeDmaRetryThread (rail);
66860 +}
66861 +
66862 +/****************************************************************************************/
66863 +/* NOTE - we require that all cookies are non-zero, which is 
66864 + *        achieved because EP_VP_DATA() is non-zero for all
66865 + *        nodes */
66866 +E3_uint32
66867 +LocalCookie (EP3_RAIL *rail, unsigned remoteNode)
66868 +{
66869 +    E3_uint32     cookie;
66870 +    unsigned long flags;
66871 +
66872 +    spin_lock_irqsave (&rail->CookieLock, flags);
66873 +    cookie = DMA_COOKIE (rail->MainCookies[remoteNode], EP_VP_DATA(rail->Generic.Position.pos_nodeid));
66874 +    spin_unlock_irqrestore (&rail->CookieLock, flags);
66875 +
66876 +    /* Main processor cookie for srcCookie - this is what is sent
66877 +     * to the remote node along with the setevent from the put
66878 +     * or the dma descriptor for a get */
66879 +    return (cookie);
66880 +}
66881 +
66882 +E3_uint32
66883 +RemoteCookie (EP3_RAIL *rail, u_int remoteNode)
66884 +{
66885 +    uint32_t      cookie;
66886 +    unsigned long flags;
66887 +
66888 +    spin_lock_irqsave (&rail->CookieLock, flags);
66889 +    cookie = DMA_REMOTE_COOKIE (rail->MainCookies[remoteNode], EP_VP_DATA(remoteNode));
66890 +    spin_unlock_irqrestore (&rail->CookieLock, flags);
66891 +
66892 +    /* Main processor cookie for dstCookie - this is the cookie
66893 +     * that the "remote put" dma uses for it's setevent packets for
66894 +     * a get dma */
66895 +    
66896 +    return (cookie);
66897 +}
66898 +
66899 +/****************************************************************************************/
66900 +/*
66901 + * Event Cookie management.
66902 + *
66903 + *   We find the ep_cookie in one of two ways:
66904 + *     1) for block copy events
66905 + *          the cookie value is stored in the ev_Source - for EVIRQ events
66906 + *          it is also stored in the ev_Type
66907 + *     2) for normal events
66908 + *          we just use the event address.
66909 + */
66910 +void 
66911 +InitialiseCookieTable (EP3_COOKIE_TABLE *table)
66912 +{
66913 +    register int i;
66914 +    
66915 +    spin_lock_init (&table->Lock);
66916 +    
66917 +    for (i = 0; i < EP3_COOKIE_HASH_SIZE; i++)
66918 +       table->Entries[i] = NULL;
66919 +}
66920 +
66921 +void
66922 +DestroyCookieTable (EP3_COOKIE_TABLE *table)
66923 +{
66924 +    register int i;
66925 +
66926 +    for (i = 0; i < EP3_COOKIE_HASH_SIZE; i++)
66927 +       if (table->Entries[i])
66928 +           printk ("DestroyCookieTable: entry %d not empty\n", i);
66929 +
66930 +    spin_lock_destroy (&table->Lock);
66931 +}
66932 +
66933 +void
66934 +RegisterCookie (EP3_COOKIE_TABLE *table, EP3_COOKIE *cp, E3_uint32 cookie, EP3_COOKIE_OPS *ops, void *arg)
66935 +{
66936 +    EP3_COOKIE *tcp;
66937 +    int hashval = EP3_HASH_COOKIE(cookie);
66938 +    unsigned long flags;
66939 +
66940 +    spin_lock_irqsave (&table->Lock, flags);
66941 +    
66942 +    cp->Operations = ops;
66943 +    cp->Arg        = arg;
66944 +    cp->Cookie     = cookie;
66945 +    
66946 +#if defined(DEBUG)
66947 +    /* Check that the cookie is unique */
66948 +    for (tcp = table->Entries[hashval]; tcp; tcp = tcp->Next)
66949 +       if (tcp->Cookie == cookie)
66950 +           panic ("RegisterEventCookie: non unique cookie\n");
66951 +#endif
66952 +    cp->Next = table->Entries[hashval];
66953 +    
66954 +    table->Entries[hashval] = cp;
66955 +    
66956 +    spin_unlock_irqrestore (&table->Lock, flags);
66957 +}
66958 +
66959 +void
66960 +DeregisterCookie (EP3_COOKIE_TABLE *table, EP3_COOKIE *cp)
66961 +{
66962 +    EP3_COOKIE **predCookiep;
66963 +    unsigned long flags;
66964 +
66965 +    spin_lock_irqsave (&table->Lock, flags);
66966 +    
66967 +    for (predCookiep = &table->Entries[EP3_HASH_COOKIE (cp->Cookie)]; *predCookiep; predCookiep = &(*predCookiep)->Next)
66968 +    {
66969 +       if (*predCookiep == cp)
66970 +       {
66971 +           *predCookiep = cp->Next;
66972 +           break;
66973 +       }
66974 +    }
66975 +
66976 +    spin_unlock_irqrestore (&table->Lock, flags);
66977 +
66978 +    cp->Operations = NULL;
66979 +    cp->Arg        = NULL;
66980 +    cp->Cookie     = 0;
66981 +    cp->Next       = NULL;
66982 +}
66983 +
66984 +EP3_COOKIE *
66985 +LookupCookie (EP3_COOKIE_TABLE *table, E3_Addr cookie)
66986 +{
66987 +    EP3_COOKIE *cp;
66988 +    unsigned long flags;
66989 +
66990 +    spin_lock_irqsave (&table->Lock, flags);
66991 +    
66992 +    for (cp = table->Entries[EP3_HASH_COOKIE(cookie)]; cp; cp = cp->Next)
66993 +       if (cp->Cookie == cookie)
66994 +           break;
66995 +    
66996 +    spin_unlock_irqrestore (&table->Lock, flags);
66997 +    return (cp);
66998 +}
66999 +
67000 +EP3_COOKIE *
67001 +LookupEventCookie (EP3_RAIL *rail, EP3_COOKIE_TABLE *table, E3_Addr eaddr)
67002 +{
67003 +    sdramaddr_t event;
67004 +    E3_uint32 type;
67005 +
67006 +    if ((event = ep_elan2sdram (&rail->Generic, eaddr)) != (sdramaddr_t) 0)
67007 +    {
67008 +       type = elan3_sdram_readl (rail->Device, event + offsetof (E3_BlockCopyEvent, ev_Type));
67009 +
67010 +       if (type & EV_TYPE_BCOPY)
67011 +           return (LookupCookie (table, elan3_sdram_readl (rail->Device, event + offsetof (E3_BlockCopyEvent, ev_Source)) & ~EV_WCOPY));
67012 +       else
67013 +           return (LookupCookie (table, eaddr));
67014 +    }
67015 +
67016 +    return (NULL);
67017 +}
67018 +
67019 +/****************************************************************************************/
67020 +/*
67021 + * Elan context operations - note only support interrupt ops.
67022 + */
67023 +static int        ep3_event     (ELAN3_CTXT *ctxt, E3_uint32 cookie, int flag);
67024 +static int        ep3_dprocTrap (ELAN3_CTXT *ctxt, DMA_TRAP *trap);
67025 +static int        ep3_tprocTrap (ELAN3_CTXT *ctxt, THREAD_TRAP *trap);
67026 +static int        ep3_iprocTrap (ELAN3_CTXT *ctxt, INPUT_TRAP *trap, int chan);
67027 +static int        ep3_cprocTrap (ELAN3_CTXT *ctxt, COMMAND_TRAP *trap);
67028 +static int        ep3_cprocReissue (ELAN3_CTXT *ctxt, CProcTrapBuf_BE *tbuf);
67029 +
67030 +static E3_uint8   ep3_load8 (ELAN3_CTXT *ctxt, E3_Addr addr);
67031 +static void       ep3_store8 (ELAN3_CTXT *ctxt, E3_Addr addr, E3_uint8 val);
67032 +static E3_uint16  ep3_load16 (ELAN3_CTXT *ctxt, E3_Addr addr);
67033 +static void       ep3_store16 (ELAN3_CTXT *ctxt, E3_Addr addr, E3_uint16 val);
67034 +static E3_uint32  ep3_load32 (ELAN3_CTXT *ctxt, E3_Addr addr);
67035 +static void       ep3_store32 (ELAN3_CTXT *ctxt, E3_Addr addr, E3_uint32 val);
67036 +static E3_uint64  ep3_load64 (ELAN3_CTXT *ctxt, E3_Addr addr);
67037 +static void       ep3_store64 (ELAN3_CTXT *ctxt, E3_Addr addr, E3_uint64 val);
67038 +
67039 +ELAN3_OPS ep3_elan3_ops = 
67040 +{
67041 +    ELAN3_OPS_VERSION,         /* Version */
67042 +    
67043 +    NULL,                      /* Exception */
67044 +    NULL,                      /* GetWordItem */
67045 +    NULL,                      /* GetBlockItem */
67046 +    NULL,                      /* PutWordItem */
67047 +    NULL,                      /* PutBlockItem */
67048 +    NULL,                      /* PutbackItem */
67049 +    NULL,                      /* FreeWordItem */
67050 +    NULL,                      /* FreeBlockItem */
67051 +    NULL,                      /* CountItems */
67052 +    ep3_event,                 /* Event */
67053 +    NULL,                      /* SwapIn */
67054 +    NULL,                      /* SwapOut */
67055 +    NULL,                      /* FreePrivate */
67056 +    NULL,                      /* FixupNetworkError */
67057 +    ep3_dprocTrap,             /* DProcTrap */
67058 +    ep3_tprocTrap,             /* TProcTrap */
67059 +    ep3_iprocTrap,             /* IProcTrap */
67060 +    ep3_cprocTrap,             /* CProcTrap */
67061 +    ep3_cprocReissue,          /* CProcReissue */
67062 +    NULL,                      /* StartFaultCheck */
67063 +    NULL,                      /* EndFaulCheck */
67064 +    ep3_load8,                 /* Load8 */
67065 +    ep3_store8,                        /* Store8 */
67066 +    ep3_load16,                        /* Load16 */
67067 +    ep3_store16,               /* Store16 */
67068 +    ep3_load32,                        /* Load32 */
67069 +    ep3_store32,               /* Store32 */
67070 +    ep3_load64,                        /* Load64 */
67071 +    ep3_store64,               /* Store64 */
67072 +};
67073 +
67074 +static int
67075 +ep3_event (ELAN3_CTXT *ctxt, E3_uint32 cookie, int flag)
67076 +{
67077 +    EP3_RAIL  *rail = (EP3_RAIL *) ctxt->Private;
67078 +    EP3_COOKIE *cp   = LookupCookie (&rail->CookieTable, cookie);
67079 +    
67080 +    if (cp == NULL)
67081 +    {
67082 +       printk ("ep3_event: cannot find event cookie for %x\n", cookie);
67083 +       return (OP_HANDLED);
67084 +    }
67085 +    
67086 +    if (cp->Operations->Event)
67087 +       cp->Operations->Event(rail, cp->Arg);
67088 +    
67089 +    return (OP_HANDLED);
67090 +}
67091 +
67092 +/* Trap interface */
67093 +int
67094 +ep3_dprocTrap (ELAN3_CTXT *ctxt, DMA_TRAP *trap)
67095 +{
67096 +    EP3_RAIL        *rail = (EP3_RAIL *) ctxt->Private;
67097 +    ELAN3_DEV        *dev = rail->Device;
67098 +    EP3_COOKIE       *cp;
67099 +    E3_FaultSave_BE *FaultArea;
67100 +    E3_uint16        vp;
67101 +    int                     validTrap;
67102 +    int                     numFaults;
67103 +    int                     i;
67104 +    sdramaddr_t      event;
67105 +    E3_uint32        type;
67106 +    sdramaddr_t      dma;
67107 +    E3_DMA_BE        dmabe;
67108 +    int              status = EAGAIN;
67109 +
67110 +    EPRINTF4 (DBG_EPTRAP, "ep3_dprocTrap: WakeupFnt=%x Cntx=%x SuspAddr=%x TrapType=%s\n",
67111 +             trap->Status.s.WakeupFunction, trap->Status.s.Context, 
67112 +             trap->Status.s.SuspendAddr, MiToName (trap->Status.s.TrapType));
67113 +    EPRINTF4 (DBG_EPTRAP, "              type %08x size %08x source %08x dest %08x\n",
67114 +             trap->Desc.s.dma_type, trap->Desc.s.dma_size, trap->Desc.s.dma_source, trap->Desc.s.dma_dest);
67115 +    EPRINTF2 (DBG_EPTRAP, "              Dest event %08x cookie/proc %08x\n",
67116 +             trap->Desc.s.dma_destEvent, trap->Desc.s.dma_destCookieVProc);
67117 +    EPRINTF2 (DBG_EPTRAP, "              Source event %08x cookie/proc %08x\n",
67118 +             trap->Desc.s.dma_srcEvent, trap->Desc.s.dma_srcCookieVProc);
67119 +
67120 +    ASSERT (trap->Status.s.Context & SYS_CONTEXT_BIT);
67121 +
67122 +    switch (trap->Status.s.TrapType)
67123 +    {
67124 +    case MI_DmaPacketTimedOutOrPacketError:
67125 +       if (trap->Desc.s.dma_direction == DMA_WRITE)
67126 +           vp = trap->Desc.s.dma_destVProc;
67127 +       else
67128 +           vp = trap->Desc.s.dma_srcVProc;
67129 +       
67130 +       if (! trap->PacketInfo.s.PacketTimeout)
67131 +           status = ETIMEDOUT;
67132 +       else
67133 +       {
67134 +           status = EHOSTDOWN;
67135 +
67136 +           /* XXXX: dma timedout - might want to "restart" tree ? */
67137 +       }
67138 +       goto retry_dma;
67139 +
67140 +    case MI_DmaFailCountError:
67141 +       goto retry_dma;
67142 +
67143 +    case MI_TimesliceDmaQueueOverflow:
67144 +       IncrStat (rail, DprocDmaQueueOverflow);
67145 +
67146 +       goto retry_dma;
67147 +
67148 +    case MI_RemoteDmaCommand:
67149 +    case MI_RunDmaCommand:
67150 +    case MI_DequeueNonSysCntxDma:
67151 +    case MI_DequeueSysCntxDma:
67152 +       /*
67153 +        * The DMA processor has trapped due to outstanding prefetches from the previous 
67154 +        * dma.  The "current" dma has not been consumed, so we just ignore the trap
67155 +        */
67156 +       return (OP_HANDLED);
67157 +       
67158 +    case MI_EventQueueOverflow:
67159 +       IncrStat (rail, DprocEventQueueOverflow);
67160 +
67161 +       if ((event = ep_elan2sdram (&rail->Generic, trap->Desc.s.dma_srcEvent)) != (sdramaddr_t) 0 &&
67162 +           ((type  = elan3_sdram_readl (dev, event + offsetof(E3_Event,ev_Type))) & EV_TYPE_MASK_EVIRQ) == EV_TYPE_EVIRQ)
67163 +       {
67164 +           spin_unlock (&ctxt->Device->IntrLock);
67165 +           ep3_event (ctxt, (type & ~(EV_TYPE_MASK_EVIRQ | EV_TYPE_MASK_BCOPY)), OP_LWP);
67166 +           spin_lock (&ctxt->Device->IntrLock);
67167 +       }
67168 +       return (OP_HANDLED);
67169 +       
67170 +    case MI_DmaQueueOverflow:
67171 +       IncrStat (rail, DprocDmaQueueOverflow);
67172 +
67173 +       if ((event = ep_elan2sdram (&rail->Generic, trap->Desc.s.dma_srcEvent)) != (sdramaddr_t) 0 &&
67174 +           ((type = elan3_sdram_readl (dev, event + offsetof (E3_Event, ev_Type))) & EV_TYPE_MASK_DMA) == EV_TYPE_DMA &&
67175 +           (dma  = ep_elan2sdram (&rail->Generic, (type & ~EV_TYPE_MASK2))) != (sdramaddr_t) 0)
67176 +       {
67177 +           elan3_sdram_copyq_from_sdram (dev, dma, &dmabe, sizeof (E3_DMA));
67178 +           
67179 +           /* We only chain together DMA's of the same direction, so since
67180 +            * we took a DmaQueueOverflow trap - this means that DMA which
67181 +            * trapped was a WRITE dma - hence the one we chain to must also
67182 +            * be a WRITE dma.
67183 +            */
67184 +           ASSERT (dmabe.s.dma_direction == DMA_WRITE);
67185 +           
67186 +           cp = LookupEventCookie (rail, &rail->CookieTable, dmabe.s.dma_srcEvent);
67187 +
67188 +#ifdef DEBUG_ASSERT
67189 +           {
67190 +               E3_uint16     vp       = dmabe.s.dma_destVProc;
67191 +               EP_NODE_RAIL *nodeRail = &rail->Generic.Nodes[EP_VP_TO_NODE(vp)];
67192 +
67193 +               ASSERT (cp != NULL && (!EP_VP_ISDATA(vp) || (nodeRail->State >= EP_NODE_CONNECTED && nodeRail->State <= EP_NODE_LOCAL_PASSIVATE)));
67194 +           }
67195 +#endif
67196 +           cp->Operations->DmaRetry (rail, cp->Arg, &dmabe, EAGAIN);
67197 +           
67198 +           return (OP_HANDLED);
67199 +       }
67200 +
67201 +       panic ("ep3_dprocTrap\n");
67202 +       return (OP_HANDLED);
67203 +
67204 +    default:
67205 +       break;
67206 +    }
67207 +
67208 +    /* If it's a dma which traps past the end of the source, then */
67209 +    /* just re-issue it */
67210 +    numFaults = validTrap = (trap->FaultSave.s.FSR.Status != 0);
67211 +    for (i = 0, FaultArea = &trap->Data0; i < 4; i++, FaultArea++)
67212 +    {
67213 +       if (FaultArea->s.FSR.Status != 0)
67214 +       {
67215 +           numFaults++;
67216 +
67217 +           /* XXXX: Rev B Elans can prefetch data past the end of the dma descriptor */
67218 +           /*       if the fault relates to this, then just ignore it */
67219 +           if (FaultArea->s.FaultAddress >= (trap->Desc.s.dma_source+trap->Desc.s.dma_size))
67220 +           {
67221 +               static int i;
67222 +               if (i < 10 && i++ < 10)
67223 +                   printk ("ep3_dprocTrap: Rev B prefetch trap error %08x %08x\n",
67224 +                            FaultArea->s.FaultAddress, (trap->Desc.s.dma_source+trap->Desc.s.dma_size));
67225 +               continue;
67226 +           }
67227 +
67228 +           validTrap++;
67229 +       }
67230 +    }
67231 +
67232 +    /*
67233 +     * NOTE: for physical errors (uncorrectable ECC/PCI parity errors) the FSR will
67234 +     *       be zero - hence we will not see any faults - and none will be valid, 
67235 +     *       so only ignore a Rev B prefetch trap if we've seen some faults. Otherwise
67236 +     *       we can reissue a DMA which has already sent it's remote event !
67237 +     */
67238 +    if (numFaults != 0 && validTrap == 0)
67239 +    {
67240 +    retry_dma:
67241 +       if (trap->Desc.s.dma_direction == DMA_WRITE)
67242 +       {
67243 +           vp = trap->Desc.s.dma_destVProc;
67244 +           cp = LookupEventCookie (rail, &rail->CookieTable, trap->Desc.s.dma_srcEvent);
67245 +       }
67246 +       else
67247 +       {
67248 +           ASSERT (EP3_CONTEXT_ISDATA(trap->Desc.s.dma_queueContext) || trap->Desc.s.dma_direction == DMA_READ_REQUEUE);
67249 +
67250 +           vp = trap->Desc.s.dma_srcVProc;
67251 +           cp = LookupEventCookie (rail, &rail->CookieTable, trap->Desc.s.dma_destEvent);
67252 +
67253 +           /* for read dma's set the DMA_READ_REQUEUE bits as the dma_source has been 
67254 +            * modified by the elan to point at the dma in the rxd where it was issued
67255 +            * from */
67256 +           trap->Desc.s.dma_direction = (trap->Desc.s.dma_direction & ~DMA_READ) | DMA_READ_REQUEUE;
67257 +       }
67258 +
67259 +#ifdef DEBUG_ASSERT
67260 +       {
67261 +           EP_NODE_RAIL *nodeRail = &rail->Generic.Nodes[EP_VP_TO_NODE(vp)];
67262 +
67263 +           ASSERT (!EP_VP_ISDATA(vp) || (nodeRail->State >= EP_NODE_CONNECTED && nodeRail->State <= EP_NODE_LOCAL_PASSIVATE));
67264 +       }
67265 +#endif
67266 +       
67267 +       if (cp != NULL)
67268 +           cp->Operations->DmaRetry (rail, cp->Arg, &trap->Desc, status);
67269 +       else
67270 +       {
67271 +           ASSERT (trap->Desc.s.dma_direction == DMA_WRITE && trap->Desc.s.dma_srcEvent == 0 && trap->Desc.s.dma_isRemote);
67272 +
67273 +           QueueDmaForRetry (rail, &trap->Desc, EP_RETRY_ANONYMOUS);
67274 +       }
67275 +
67276 +       return (OP_HANDLED);
67277 +    }
67278 +    
67279 +    printk ("ep3_dprocTrap: WakeupFnt=%x Cntx=%x SuspAddr=%x TrapType=%s\n",
67280 +            trap->Status.s.WakeupFunction, trap->Status.s.Context, 
67281 +            trap->Status.s.SuspendAddr, MiToName (trap->Status.s.TrapType));
67282 +    printk ("                    FaultAddr=%x EventAddr=%x FSR=%x\n",
67283 +            trap->FaultSave.s.FaultAddress, trap->FaultSave.s.EventAddress,
67284 +            trap->FaultSave.s.FSR.Status);
67285 +    for (i = 0, FaultArea = &trap->Data0; i < 4; i++, FaultArea++)
67286 +       printk ("                  %d FaultAddr=%x EventAddr=%x FSR=%x\n", i,
67287 +                FaultArea->s.FaultAddress, FaultArea->s.EventAddress, FaultArea->s.FSR.Status);
67288 +    
67289 +    printk ("                  type %08x size %08x source %08x dest %08x\n",
67290 +            trap->Desc.s.dma_type, trap->Desc.s.dma_size, trap->Desc.s.dma_source, trap->Desc.s.dma_dest);
67291 +    printk ("                  Dest event %08x cookie/proc %08x\n",
67292 +            trap->Desc.s.dma_destEvent, trap->Desc.s.dma_destCookieVProc);
67293 +    printk ("                  Source event %08x cookie/proc %08x\n",
67294 +            trap->Desc.s.dma_srcEvent, trap->Desc.s.dma_srcCookieVProc);
67295 +
67296 +//    panic ("ep3_dprocTrap");
67297 +
67298 +    return (OP_HANDLED);
67299 +}
67300 +
67301 +int
67302 +ep3_tprocTrap (ELAN3_CTXT *ctxt, THREAD_TRAP *trap)
67303 +{
67304 +    EP3_RAIL *rail = (EP3_RAIL *) ctxt->Private;
67305 +
67306 +    EPRINTF6 (DBG_EPTRAP, "ep3_tprocTrap: SP=%08x PC=%08x NPC=%08x DIRTY=%08x TRAP=%08x MI=%s\n",
67307 +             trap->sp, trap->pc, trap->npc, trap->DirtyBits.Bits, trap->TrapBits.Bits, MiToName (trap->mi));
67308 +    EPRINTF4 (DBG_EPTRAP, "              g0=%08x g1=%08x g2=%08x g3=%08x\n", 
67309 +             trap->Registers[REG_GLOBALS+(0^WordEndianFlip)], trap->Registers[REG_GLOBALS+(1^WordEndianFlip)], 
67310 +             trap->Registers[REG_GLOBALS+(2^WordEndianFlip)], trap->Registers[REG_GLOBALS+(3^WordEndianFlip)]);
67311 +    EPRINTF4 (DBG_EPTRAP, "              g4=%08x g5=%08x g6=%08x g7=%08x\n", 
67312 +             trap->Registers[REG_GLOBALS+(4^WordEndianFlip)], trap->Registers[REG_GLOBALS+(5^WordEndianFlip)], 
67313 +             trap->Registers[REG_GLOBALS+(6^WordEndianFlip)], trap->Registers[REG_GLOBALS+(7^WordEndianFlip)]);
67314 +    EPRINTF4 (DBG_EPTRAP, "              o0=%08x o1=%08x o2=%08x o3=%08x\n", 
67315 +             trap->Registers[REG_OUTS+(0^WordEndianFlip)], trap->Registers[REG_OUTS+(1^WordEndianFlip)], 
67316 +             trap->Registers[REG_OUTS+(2^WordEndianFlip)], trap->Registers[REG_OUTS+(3^WordEndianFlip)]);
67317 +    EPRINTF4 (DBG_EPTRAP, "              o4=%08x o5=%08x o6=%08x o7=%08x\n", 
67318 +             trap->Registers[REG_OUTS+(4^WordEndianFlip)], trap->Registers[REG_OUTS+(5^WordEndianFlip)], 
67319 +             trap->Registers[REG_OUTS+(6^WordEndianFlip)], trap->Registers[REG_OUTS+(7^WordEndianFlip)]);
67320 +    EPRINTF4 (DBG_EPTRAP, "              l0=%08x l1=%08x l2=%08x l3=%08x\n", 
67321 +             trap->Registers[REG_LOCALS+(0^WordEndianFlip)], trap->Registers[REG_LOCALS+(1^WordEndianFlip)], 
67322 +             trap->Registers[REG_LOCALS+(2^WordEndianFlip)], trap->Registers[REG_LOCALS+(3^WordEndianFlip)]);
67323 +    EPRINTF4 (DBG_EPTRAP, "              l4=%08x l5=%08x l6=%08x l7=%08x\n", 
67324 +             trap->Registers[REG_LOCALS+(4^WordEndianFlip)], trap->Registers[REG_LOCALS+(5^WordEndianFlip)], 
67325 +             trap->Registers[REG_LOCALS+(6^WordEndianFlip)], trap->Registers[REG_LOCALS+(7^WordEndianFlip)]);
67326 +    EPRINTF4 (DBG_EPTRAP, "              i0=%08x i1=%08x i2=%08x i3=%08x\n", 
67327 +             trap->Registers[REG_INS+(0^WordEndianFlip)], trap->Registers[REG_INS+(1^WordEndianFlip)], 
67328 +             trap->Registers[REG_INS+(2^WordEndianFlip)], trap->Registers[REG_INS+(3^WordEndianFlip)]);
67329 +    EPRINTF4 (DBG_EPTRAP, "              i4=%08x i5=%08x i6=%08x i7=%08x\n", 
67330 +             trap->Registers[REG_INS+(4^WordEndianFlip)], trap->Registers[REG_INS+(5^WordEndianFlip)], 
67331 +             trap->Registers[REG_INS+(6^WordEndianFlip)], trap->Registers[REG_INS+(7^WordEndianFlip)]);
67332 +    
67333 +    ASSERT (trap->Status.s.Context & SYS_CONTEXT_BIT);
67334 +
67335 +    switch (trap->mi)
67336 +    {
67337 +    case MI_UnimplementedError:
67338 +       if (trap->TrapBits.s.ForcedTProcTrap)
67339 +       {
67340 +           ASSERT (trap->TrapBits.s.OutputWasOpen == 0);
67341 +           
67342 +           EPRINTF0 (DBG_EPTRAP, "ep3_tprocTrap: ForcedTProcTrap\n");
67343 +
67344 +           IssueRunThread (rail, SaveThreadToStack (ctxt, trap, FALSE));
67345 +           return (OP_HANDLED);
67346 +       }
67347 +
67348 +       if (trap->TrapBits.s.ThreadTimeout)
67349 +       {
67350 +           EPRINTF0 (DBG_EPTRAP, "ep3_tprocTrap: ThreadTimeout\n");
67351 +
67352 +           if (trap->Registers[REG_GLOBALS + (1^WordEndianFlip)] == 0)
67353 +               RollThreadToClose (ctxt, trap, trap->TrapBits.s.PacketAckValue);
67354 +           else
67355 +           {
67356 +               CompleteEnvelope (rail, trap->Registers[REG_GLOBALS + (1^WordEndianFlip)], trap->TrapBits.s.PacketAckValue);
67357 +
67358 +               RollThreadToClose (ctxt, trap, EP3_PAckStolen);
67359 +           }
67360 +               
67361 +           IssueRunThread (rail, SaveThreadToStack (ctxt, trap, FALSE));
67362 +           return (OP_HANDLED);
67363 +       }
67364 +
67365 +       if (trap->TrapBits.s.Unimplemented)
67366 +       {
67367 +           E3_uint32 instr = ELAN3_OP_LOAD32 (ctxt, trap->pc & PC_MASK);
67368 +
67369 +           PRINTF1 (ctxt, DBG_EPTRAP, "ep3_tprocTrap: unimplemented instruction %08x\n", instr);
67370 +
67371 +           if ((instr & OPCODE_MASK) == OPCODE_Ticc &&
67372 +               (instr & OPCODE_IMM)  == OPCODE_IMM &&
67373 +               (Ticc_COND(instr)     == Ticc_TA))
67374 +           {
67375 +               switch (INSTR_IMM(instr))
67376 +               {
67377 +               case EP3_UNIMP_TRAP_NO_DESCS:
67378 +                   StallThreadForNoDescs (rail, trap->Registers[REG_GLOBALS + (1^WordEndianFlip)], 
67379 +                                          SaveThreadToStack (ctxt, trap, TRUE));
67380 +                   return (OP_HANDLED);
67381 +
67382 +               case EP3_UNIMP_TRAP_PACKET_NACKED:
67383 +                   CompleteEnvelope (rail, trap->Registers[REG_GLOBALS + (1^WordEndianFlip)], E3_PAckDiscard);
67384 +
67385 +                   IssueRunThread (rail, SaveThreadToStack (ctxt, trap, TRUE));
67386 +                   return (OP_HANDLED);
67387 +
67388 +               case EP3_UNIMP_THREAD_HALTED: 
67389 +                   StallThreadForHalted (rail, trap->Registers[REG_GLOBALS + (1^WordEndianFlip)], 
67390 +                                         SaveThreadToStack (ctxt, trap, TRUE));
67391 +                   return (OP_HANDLED);
67392 +
67393 +               default:
67394 +                   break;
67395 +                   
67396 +               }
67397 +           }
67398 +       }
67399 +       break;
67400 +
67401 +    default:
67402 +       break;
67403 +    }
67404 +
67405 +    /* All other traps should not happen for kernel comms */
67406 +    printk ("ep3_tprocTrap: SP=%08x PC=%08x NPC=%08x DIRTY=%08x TRAP=%08x MI=%s\n",
67407 +            trap->sp, trap->pc, trap->npc, trap->DirtyBits.Bits,
67408 +            trap->TrapBits.Bits, MiToName (trap->mi));
67409 +    printk ("              FaultSave : FaultAddress %08x EventAddress %08x FSR %08x\n",
67410 +            trap->FaultSave.s.FaultAddress, trap->FaultSave.s.EventAddress, trap->FaultSave.s.FSR.Status);
67411 +    printk ("              DataFault : FaultAddress %08x EventAddress %08x FSR %08x\n",
67412 +            trap->DataFaultSave.s.FaultAddress, trap->DataFaultSave.s.EventAddress, trap->DataFaultSave.s.FSR.Status);
67413 +    printk ("              InstFault : FaultAddress %08x EventAddress %08x FSR %08x\n",
67414 +            trap->InstFaultSave.s.FaultAddress, trap->InstFaultSave.s.EventAddress, trap->InstFaultSave.s.FSR.Status);
67415 +    printk ("              OpenFault : FaultAddress %08x EventAddress %08x FSR %08x\n",
67416 +            trap->OpenFaultSave.s.FaultAddress, trap->OpenFaultSave.s.EventAddress, trap->OpenFaultSave.s.FSR.Status);
67417 +
67418 +    if (trap->DirtyBits.s.GlobalsDirty)
67419 +    {
67420 +       printk ("              g0=%08x g1=%08x g2=%08x g3=%08x\n", 
67421 +                trap->Registers[REG_GLOBALS+(0^WordEndianFlip)], trap->Registers[REG_GLOBALS+(1^WordEndianFlip)], 
67422 +                trap->Registers[REG_GLOBALS+(2^WordEndianFlip)], trap->Registers[REG_GLOBALS+(3^WordEndianFlip)]);
67423 +       printk ("              g4=%08x g5=%08x g6=%08x g7=%08x\n", 
67424 +                trap->Registers[REG_GLOBALS+(4^WordEndianFlip)], trap->Registers[REG_GLOBALS+(5^WordEndianFlip)], 
67425 +                trap->Registers[REG_GLOBALS+(6^WordEndianFlip)], trap->Registers[REG_GLOBALS+(7^WordEndianFlip)]);
67426 +    }
67427 +    if (trap->DirtyBits.s.OutsDirty)
67428 +    {
67429 +       printk ("              o0=%08x o1=%08x o2=%08x o3=%08x\n", 
67430 +                trap->Registers[REG_OUTS+(0^WordEndianFlip)], trap->Registers[REG_OUTS+(1^WordEndianFlip)], 
67431 +                trap->Registers[REG_OUTS+(2^WordEndianFlip)], trap->Registers[REG_OUTS+(3^WordEndianFlip)]);
67432 +       printk ("              o4=%08x o5=%08x o6=%08x o7=%08x\n", 
67433 +                trap->Registers[REG_OUTS+(4^WordEndianFlip)], trap->Registers[REG_OUTS+(5^WordEndianFlip)], 
67434 +                trap->Registers[REG_OUTS+(6^WordEndianFlip)], trap->Registers[REG_OUTS+(7^WordEndianFlip)]);
67435 +    }
67436 +    if (trap->DirtyBits.s.LocalsDirty)
67437 +    {
67438 +       printk ("              l0=%08x l1=%08x l2=%08x l3=%08x\n", 
67439 +                trap->Registers[REG_LOCALS+(0^WordEndianFlip)], trap->Registers[REG_LOCALS+(1^WordEndianFlip)], 
67440 +                trap->Registers[REG_LOCALS+(2^WordEndianFlip)], trap->Registers[REG_LOCALS+(3^WordEndianFlip)]);
67441 +       printk ("              l4=%08x l5=%08x l6=%08x l7=%08x\n", 
67442 +                trap->Registers[REG_LOCALS+(4^WordEndianFlip)], trap->Registers[REG_LOCALS+(5^WordEndianFlip)], 
67443 +                trap->Registers[REG_LOCALS+(6^WordEndianFlip)], trap->Registers[REG_LOCALS+(7^WordEndianFlip)]);
67444 +    }
67445 +    if (trap->DirtyBits.s.InsDirty)
67446 +    {
67447 +       printk ("              i0=%08x i1=%08x i2=%08x i3=%08x\n", 
67448 +                trap->Registers[REG_INS+(0^WordEndianFlip)], trap->Registers[REG_INS+(1^WordEndianFlip)], 
67449 +                trap->Registers[REG_INS+(2^WordEndianFlip)], trap->Registers[REG_INS+(3^WordEndianFlip)]);
67450 +       printk ("              i4=%08x i5=%08x i6=%08x i7=%08x\n", 
67451 +                trap->Registers[REG_INS+(4^WordEndianFlip)], trap->Registers[REG_INS+(5^WordEndianFlip)], 
67452 +                trap->Registers[REG_INS+(6^WordEndianFlip)], trap->Registers[REG_INS+(7^WordEndianFlip)]);
67453 +    }
67454 +    
67455 +//    panic ("ep3_tprocTrap");
67456 +
67457 +    return (OP_HANDLED);
67458 +}
67459 +
67460 +int
67461 +ep3_iprocTrap (ELAN3_CTXT *ctxt, INPUT_TRAP *trap, int channel)
67462 +{
67463 +    EP3_RAIL      *rail = (EP3_RAIL *) ctxt->Private;
67464 +    ELAN3_DEV      *dev = ctxt->Device;
67465 +    EP3_COOKIE    *cp;
67466 +    sdramaddr_t    event;
67467 +    E3_uint32      type;
67468 +    sdramaddr_t    dma;
67469 +    E3_DMA_BE      dmabe;
67470 +
67471 +    ASSERT (trap->Transactions[0].s.TrTypeCntx.s.Context & SYS_CONTEXT_BIT);
67472 +
67473 +    /*
67474 +     * first process the trap to determine the cause
67475 +     */
67476 +    InspectIProcTrap (ctxt, trap);
67477 +
67478 +    if (! trap->AckSent && trap->LockQueuePointer)             /* Must be a network error in a queueing DMA */
67479 +    {                                                          /* packet - unlock the queue */
67480 +       IncrStat (rail, QueueingPacketTrap);
67481 +
67482 +       SimulateUnlockQueue (ctxt, trap->LockQueuePointer, FALSE);
67483 +       return (OP_HANDLED);
67484 +    }
67485 +
67486 +    if (trap->AckSent && trap->BadTransaction)
67487 +    {
67488 +       spin_unlock (&dev->IntrLock);
67489 +
67490 +       /* NOTE - no network error fixup is necessary for system context
67491 +        *        messages since they are idempotent and are single packet 
67492 +        *        dmas
67493 +        */
67494 +       if (EP3_CONTEXT_ISDATA (trap->Transactions[0].s.TrTypeCntx.s.Context))
67495 +       {
67496 +           int nodeId = EP3_CONTEXT_TO_NODE(trap->Transactions[0].s.TrTypeCntx.s.Context);
67497 +           
67498 +           if (trap->DmaIdentifyTransaction)
67499 +               ep_queue_network_error (&rail->Generic, nodeId, EP_NODE_NETERR_ATOMIC_PACKET, channel, trap->DmaIdentifyTransaction->s.TrAddr);
67500 +           else if (trap->ThreadIdentifyTransaction)
67501 +               ep_queue_network_error (&rail->Generic, nodeId, EP_NODE_NETERR_ATOMIC_PACKET, channel, trap->ThreadIdentifyTransaction->s.TrAddr);
67502 +           else
67503 +               ep_queue_network_error (&rail->Generic, nodeId, EP_NODE_NETERR_DMA_PACKET, channel, 0);
67504 +       }
67505 +
67506 +       spin_lock (&dev->IntrLock);
67507 +       return (OP_HANDLED);
67508 +    }
67509 +    
67510 +    if (trap->AckSent)
67511 +    {
67512 +       if (trap->TrappedTransaction == NULL)
67513 +           return (OP_HANDLED);
67514 +       
67515 +       while (! trap->TrappedTransaction->s.TrTypeCntx.s.LastTrappedTrans)
67516 +       {
67517 +           E3_IprocTrapHeader_BE *hdrp  = trap->TrappedTransaction;
67518 +           E3_IprocTrapData_BE   *datap = trap->TrappedDataBuffer;
67519 +           
67520 +           ASSERT (hdrp->s.TrTypeCntx.s.StatusRegValid != 0);
67521 +           
67522 +           if ((hdrp->s.TrTypeCntx.s.Type & TR_WRITEBLOCK_BIT) != 0)
67523 +           {
67524 +               printk ("ep3_iprocTrap: WRITEBLOCK : Addr %x\n", hdrp->s.TrAddr);
67525 +//             panic ("ep3_iprocTrap\n");
67526 +           }
67527 +           else
67528 +           {
67529 +               switch (hdrp->s.TrTypeCntx.s.Type & TR_OPCODE_TYPE_MASK)
67530 +               {
67531 +               case TR_SETEVENT & TR_OPCODE_TYPE_MASK:
67532 +                   switch (GET_STATUS_TRAPTYPE (hdrp->s.IProcTrapStatus))
67533 +                   {
67534 +                   case MI_DmaQueueOverflow:
67535 +                       IncrStat (rail, IprocDmaQueueOverflow);
67536 +
67537 +                       if ((event = ep_elan2sdram (&rail->Generic, hdrp->s.TrAddr)) != (sdramaddr_t) 0 &&
67538 +                           ((type = elan3_sdram_readl (dev, event + offsetof (E3_Event, ev_Type))) & EV_TYPE_MASK_DMA) == EV_TYPE_DMA &&
67539 +                           (dma  = ep_elan2sdram (&rail->Generic, (type & ~EV_TYPE_MASK2))) != (sdramaddr_t) 0)
67540 +                       {
67541 +                           elan3_sdram_copyq_from_sdram (dev, dma, &dmabe, sizeof (E3_DMA));
67542 +                           
67543 +                           if (dmabe.s.dma_direction == DMA_WRITE)
67544 +                               cp = LookupEventCookie (rail, &rail->CookieTable, dmabe.s.dma_srcEvent);
67545 +                           else
67546 +                           {
67547 +                               cp = LookupEventCookie (rail, &rail->CookieTable, dmabe.s.dma_destEvent);
67548 +                               
67549 +                               /* we MUST convert this into a DMA_READ_REQUEUE dma as if we don't the 
67550 +                                * DMA descriptor will be read from the EP3_RETRY_DMA rather than the 
67551 +                                * original DMA - this can then get reused and an incorrect DMA 
67552 +                                * descriptor sent 
67553 +                                * eventp->ev_Type contains the dma address with type in the lower bits 
67554 +                                */ 
67555 +                               
67556 +                               dmabe.s.dma_source    = (type & ~EV_TYPE_MASK2);
67557 +                               dmabe.s.dma_direction = (dmabe.s.dma_direction & ~DMA_READ) | DMA_READ_REQUEUE;
67558 +                           }
67559 +
67560 +#ifdef DEBUG_ASSERT
67561 +                           {
67562 +                               E3_uint16     vp       = (dmabe.s.dma_direction == DMA_WRITE ? dmabe.s.dma_destVProc : dmabe.s.dma_srcVProc);
67563 +                               EP_NODE_RAIL *nodeRail = &rail->Generic.Nodes[EP_VP_TO_NODE(vp)];
67564 +
67565 +                               ASSERT (!EP_VP_ISDATA(vp) || (nodeRail->State >= EP_NODE_CONNECTED && nodeRail->State <= EP_NODE_LOCAL_PASSIVATE));
67566 +                           }
67567 +#endif
67568 +
67569 +                           if (cp != NULL)
67570 +                               cp->Operations->DmaRetry (rail, cp->Arg, &dmabe, EAGAIN);
67571 +                           else
67572 +                           {
67573 +                               ASSERT (dmabe.s.dma_direction == DMA_WRITE && dmabe.s.dma_srcEvent == 0 && dmabe.s.dma_isRemote);
67574 +                               
67575 +                               QueueDmaForRetry (rail, &dmabe, EP_RETRY_ANONYMOUS);
67576 +                           }
67577 +                           break;
67578 +                       }
67579 +
67580 +                       printk ("ep3_iprocTrap: SETEVENT : %x - cannot find dma to restart\n", hdrp->s.TrAddr);
67581 +//                     panic ("ep3_iprocTrap\n");
67582 +                       break;
67583 +
67584 +                   case MI_EventQueueOverflow:
67585 +                   {
67586 +                       sdramaddr_t event;
67587 +                       E3_uint32   type;
67588 +
67589 +                       IncrStat (rail, IprocEventQueueOverflow);
67590 +
67591 +                       if ((event = ep_elan2sdram (&rail->Generic, hdrp->s.TrAddr)) != (sdramaddr_t) 0 &&
67592 +                           ((type = elan3_sdram_readl (dev, event + offsetof (E3_Event, ev_Type))) & EV_TYPE_MASK_EVIRQ) == EV_TYPE_EVIRQ)
67593 +                       {
67594 +                           spin_unlock (&dev->IntrLock);
67595 +                           ep3_event (ctxt, (type & ~(EV_TYPE_MASK_EVIRQ|EV_TYPE_MASK_BCOPY)), OP_LWP);
67596 +                           spin_lock (&dev->IntrLock);
67597 +
67598 +                           break;
67599 +                       }
67600 +
67601 +                       printk ("ep3_iprocTrap: SETEVENT : %x - cannot find event\n", hdrp->s.TrAddr);
67602 +//                     panic ("ep3_iprocTrap\n");
67603 +                       break;
67604 +                   }
67605 +
67606 +                   default:
67607 +                       printk ("ep3_iprocTrap: SETEVENT : %x MI=%x\n", hdrp->s.TrAddr, GET_STATUS_TRAPTYPE(hdrp->s.IProcTrapStatus));
67608 +//                     panic ("ep3_iprocTrap\n");
67609 +                       break;
67610 +                   }
67611 +                   break;
67612 +                   
67613 +               case TR_SENDDISCARD & TR_OPCODE_TYPE_MASK:
67614 +                   /* Just ignore send-discard transactions */
67615 +                   break;
67616 +                   
67617 +               case TR_REMOTEDMA & TR_OPCODE_TYPE_MASK:
67618 +               {
67619 +                   E3_DMA_BE *dmap = (E3_DMA_BE *) datap;
67620 +
67621 +                   if (GET_STATUS_TRAPTYPE(hdrp->s.IProcTrapStatus) != MI_DmaQueueOverflow)
67622 +                   {
67623 +                       printk ("ep3_iprocTrap: MI=%x\n", GET_STATUS_TRAPTYPE(hdrp->s.IProcTrapStatus));
67624 +                       break;
67625 +                   }
67626 +
67627 +                   IncrStat (rail, IprocDmaQueueOverflow);
67628 +
67629 +                   cp = LookupEventCookie (rail, &rail->CookieTable, dmap->s.dma_srcEvent);
67630 +
67631 +                   /* modify the dma type since it will still be a "read" dma */
67632 +                   dmap->s.dma_type = (dmap->s.dma_type & ~DMA_TYPE_READ) | DMA_TYPE_ISREMOTE;
67633 +
67634 +#ifdef DEBUG_ASSERT
67635 +                   {
67636 +                       E3_uint16     vp       = dmap->s.dma_destVProc;
67637 +                       EP_NODE_RAIL *nodeRail = &rail->Generic.Nodes[EP_VP_TO_NODE(vp)];
67638 +                       
67639 +                       ASSERT (!EP_VP_ISDATA(vp) || (nodeRail->State >= EP_NODE_CONNECTED && nodeRail->State <= EP_NODE_LOCAL_PASSIVATE));
67640 +                   }
67641 +#endif
67642 +                   if (cp != NULL)
67643 +                       cp->Operations->DmaRetry (rail, cp->Arg, dmap, EAGAIN);
67644 +                   else
67645 +                   {
67646 +                       ASSERT (dmap->s.dma_direction == DMA_WRITE && dmap->s.dma_srcEvent == 0 && dmap->s.dma_isRemote);
67647 +                       
67648 +                       QueueDmaForRetry (rail, dmap, EP_RETRY_ANONYMOUS);
67649 +                   }
67650 +                   break;
67651 +               }   
67652 +               default:
67653 +                   printk ("ep3_iprocTrap: %s\n", IProcTrapString (hdrp, datap));
67654 +                   break;
67655 +               }
67656 +           }
67657 +           
67658 +           /*
67659 +            * We've successfully processed this transaction, so move onto the 
67660 +            * next one.
67661 +            */
67662 +           trap->TrappedTransaction++;
67663 +           trap->TrappedDataBuffer++;
67664 +       }
67665 +
67666 +       return (OP_HANDLED);
67667 +    }
67668 +    
67669 +    /* Workaround WRITEBLOCK transaction executed when LOCKQUEUE transaction missed */
67670 +    if ((trap->TrappedTransaction->s.TrTypeCntx.s.Type & TR_WRITEBLOCK_BIT) &&         /* a DMA packet */
67671 +       trap->LockQueuePointer == 0 && trap->UnlockQueuePointer &&              /* a queueing DMA */
67672 +       trap->TrappedTransaction->s.TrAddr == trap->FaultSave.s.FaultAddress)   /* and missed lockqueue */
67673 +    {
67674 +       printk ("ep3_iprocTrap: missed lockqueue transaction for queue %x\n", trap->UnlockQueuePointer);
67675 +       return (OP_HANDLED);
67676 +    }
67677 +
67678 +    if (trap->FaultSave.s.FaultContext != 0)
67679 +       printk ("ep3_iprocTrap: pagefault at %08x in context %x\n", 
67680 +               trap->FaultSave.s.FaultAddress, trap->FaultSave.s.FaultContext);
67681 +
67682 +//    panic ("ep3_iprocTrap: unexpected inputter trap\n");
67683 +    
67684 +    return (OP_HANDLED);
67685 +}
67686 +
67687 +/*
67688 + * Command processor trap
67689 + *   kernel comms should only be able to generate
67690 + *   queue overflow traps
67691 + */
67692 +int
67693 +ep3_cprocTrap (ELAN3_CTXT *ctxt, COMMAND_TRAP *trap)
67694 +{
67695 +    EP3_RAIL     *rail   = (EP3_RAIL *) ctxt->Private;
67696 +    int           ctxnum = (trap->TrapBuf.r.Breg >> 16) & MAX_ROOT_CONTEXT_MASK;
67697 +    ELAN3_DEV     *dev    = rail->Device;
67698 +    EP3_DMA_RING  *ring;
67699 +    EP3_COOKIE   *cp;
67700 +    E3_DMA_BE     dmabe;
67701 +    int           vp, slot;
67702 +    unsigned long flags;
67703 +
67704 +    switch (trap->Status.s.TrapType)
67705 +    {
67706 +    case MI_DmaQueueOverflow:
67707 +       IncrStat (rail, CprocDmaQueueOverflow);
67708 +
67709 +       /* Use the context number that the setevent was issued in,
67710 +        * to find the appropriate dma ring, then since they are guaranteed
67711 +        * to be issued in order, we just search backwards till we find the
67712 +        * last one which has completed its word copy - this must be the
67713 +        * one which had caused the DmaQueueOverflow trap ! */
67714 +
67715 +       ASSERT (ctxnum >= ELAN3_DMARING_BASE_CONTEXT_NUM && ctxnum < (ELAN3_DMARING_BASE_CONTEXT_NUM+EP3_NUM_RINGS));
67716 +
67717 +       spin_lock_irqsave (&dev->CProcLock, flags);
67718 +
67719 +       ring = &rail->DmaRings[ctxnum - ELAN3_DMARING_BASE_CONTEXT_NUM];
67720 +       slot = DMA_RING_PREV_POS(ring, ring->Position);
67721 +       
67722 +       while (ring->pDoneBlk[slot] == EP3_EVENT_ACTIVE)
67723 +           slot = DMA_RING_PREV_POS(ring, slot);
67724 +       
67725 +       elan3_sdram_copyq_from_sdram (rail->Device , DMA_RING_DMA(ring,slot), &dmabe, sizeof (E3_DMA));
67726 +
67727 +#if defined(DEBUG_ASSERT)
67728 +       while (slot != DMA_RING_PREV_POS(ring, ring->Position))
67729 +       {
67730 +           ASSERT (ring->pDoneBlk[slot] != EP3_EVENT_ACTIVE);
67731 +           
67732 +           slot = DMA_RING_PREV_POS(ring, slot);
67733 +       }
67734 +#endif
67735 +       spin_unlock_irqrestore (&dev->CProcLock, flags);
67736 +
67737 +       if (dmabe.s.dma_direction == DMA_WRITE)
67738 +           cp = LookupEventCookie (rail, &rail->CookieTable, dmabe.s.dma_srcEvent);
67739 +       else
67740 +       {
67741 +           ASSERT (dmabe.s.dma_direction = DMA_READ_REQUEUE);
67742 +
67743 +           cp = LookupEventCookie (rail, &rail->CookieTable, dmabe.s.dma_destEvent);
67744 +       }
67745 +
67746 +#if defined(DEBUG_ASSERT)
67747 +       if (dmabe.s.dma_direction == DMA_WRITE)
67748 +           vp = dmabe.s.dma_destVProc;
67749 +       else
67750 +           vp = dmabe.s.dma_srcVProc;
67751 +       
67752 +       ASSERT (!EP_VP_ISDATA(vp) || (rail->Generic.Nodes[EP_VP_TO_NODE(vp)].State >= EP_NODE_CONNECTED &&
67753 +                                     rail->Generic.Nodes[EP_VP_TO_NODE(vp)].State <= EP_NODE_LOCAL_PASSIVATE));
67754 +#endif
67755 +
67756 +       if (cp != NULL)
67757 +           cp->Operations->DmaRetry (rail, cp->Arg, &dmabe, EAGAIN);
67758 +       else
67759 +       {
67760 +           ASSERT (dmabe.s.dma_direction == DMA_WRITE && dmabe.s.dma_srcEvent == 0 && dmabe.s.dma_isRemote);
67761 +           
67762 +           QueueDmaForRetry (rail, &dmabe, EP_RETRY_ANONYMOUS);
67763 +       }
67764 +       
67765 +       return (OP_HANDLED);
67766 +
67767 +    case MI_EventQueueOverflow:
67768 +       ASSERT (ctxnum == ELAN3_MRF_CONTEXT_NUM);
67769 +
67770 +       IncrStat (rail, CprocEventQueueOverflow);
67771 +       
67772 +       rail->CommandPortEventTrap = TRUE;
67773 +       return (OP_HANDLED);
67774 +       
67775 +#if defined(PER_CPU_TIMEOUT)
67776 +    case MI_SetEventReadWait:
67777 +       if (ctxnum == ELAN3_MRF_CONTEXT_NUM && trap->FaultSave.s.EventAddress == EP_PACEMAKER_EVENTADDR)
67778 +       {
67779 +           HeartbeatPacemaker (rail);
67780 +           return (OP_HANDLED);
67781 +       }
67782 +#endif
67783 +
67784 +    default:
67785 +       printk ("ep3_cprocTrap : Context=%x Status=%x TrapType=%x\n", ctxnum, trap->Status.Status, trap->Status.s.TrapType);
67786 +       printk ("               FaultAddr=%x EventAddr=%x FSR=%x\n",
67787 +                trap->FaultSave.s.FaultAddress, trap->FaultSave.s.EventAddress,
67788 +                trap->FaultSave.s.FSR.Status);
67789 +       break;
67790 +    }
67791 +
67792 +//    panic ("ep3_cprocTrap");
67793 +
67794 +    return (OP_HANDLED);
67795 +}
67796 +
67797 +static int
67798 +ep3_cprocReissue (ELAN3_CTXT *ctxt, CProcTrapBuf_BE *tbuf)
67799 +{
67800 +    EP3_RAIL   *rail    = (EP3_RAIL *) ctxt->Private;
67801 +    unsigned  cmdoff = (tbuf->s.ContextType >> 5) & 0xFF;
67802 +    int       ctxnum = (tbuf->s.ContextType >> 16) & MAX_ROOT_CONTEXT_MASK;
67803 +    
67804 +    if (ctxnum >= ELAN3_DMARING_BASE_CONTEXT_NUM && ctxnum < (ELAN3_DMARING_BASE_CONTEXT_NUM+EP3_NUM_RINGS))
67805 +    {
67806 +       EP3_DMA_RING *ring = &rail->DmaRings[ctxnum - ELAN3_DMARING_BASE_CONTEXT_NUM];
67807 +
67808 +       ASSERT ((cmdoff << 2) == offsetof (E3_CommandPort, SetEvent)); /* can only be setevent commands! */
67809 +       ASSERT (tbuf->s.Addr >= DMA_RING_EVENT_ELAN(ring,0) && tbuf->s.Addr < DMA_RING_EVENT_ELAN(ring, ring->Entries));
67810 +       
67811 +       writel (tbuf->s.Addr, ring->CommandPort + (cmdoff << 2));
67812 +    }
67813 +    else
67814 +    {
67815 +       ASSERT (ctxnum == ELAN3_MRF_CONTEXT_NUM);
67816 +
67817 +       writel (tbuf->s.Addr, ctxt->CommandPort + (cmdoff << 2));
67818 +    }
67819 +    
67820 +    return (OP_HANDLED);
67821 +}
67822 +
67823 +static E3_uint8
67824 +ep3_load8 (ELAN3_CTXT *ctxt, E3_Addr addr)
67825 +{
67826 +    EP3_RAIL    *rail  = (EP3_RAIL *) ctxt->Private;
67827 +    ELAN3_DEV    *dev = ctxt->Device;
67828 +    sdramaddr_t offset;
67829 +    E3_uint8   *ptr;
67830 +
67831 +    if ((offset = ep_elan2sdram (&rail->Generic, addr)) != 0)
67832 +       return (elan3_sdram_readb (dev, offset));
67833 +    if ((ptr = ep_elan2main (&rail->Generic, addr)) != NULL)
67834 +       return (*ptr);
67835 +
67836 +    printk ("ep3_load8: %08x\n", addr);
67837 +    return (0);
67838 +}
67839 +
67840 +static void
67841 +ep3_store8 (ELAN3_CTXT *ctxt, E3_Addr addr, E3_uint8 val)
67842 +{
67843 +    EP3_RAIL   *rail  = (EP3_RAIL *) ctxt->Private;
67844 +    ELAN3_DEV   *dev = ctxt->Device;
67845 +    sdramaddr_t offset;
67846 +    E3_uint8   *ptr;
67847 +
67848 +    if ((offset = ep_elan2sdram (&rail->Generic, addr)) != 0)
67849 +       elan3_sdram_writeb (dev, offset, val);
67850 +    else if ((ptr = ep_elan2main (&rail->Generic, addr)) != 0)
67851 +       *ptr = val;
67852 +    else
67853 +       printk ("ep3_store8 %08x\n", addr);
67854 +}
67855 +
67856 +static E3_uint16
67857 +ep3_load16 (ELAN3_CTXT *ctxt, E3_Addr addr)
67858 +{
67859 +    EP3_RAIL   *rail  = (EP3_RAIL *) ctxt->Private;
67860 +    ELAN3_DEV   *dev = ctxt->Device;
67861 +    sdramaddr_t offset;
67862 +    E3_uint16  *ptr;
67863 +
67864 +    if ((offset = ep_elan2sdram (&rail->Generic, addr)) != 0)
67865 +       return (elan3_sdram_readw (dev, offset));
67866 +    if ((ptr = ep_elan2main (&rail->Generic, addr)) != 0)
67867 +       return (*ptr);
67868 +
67869 +    printk ("ep3_load16 %08x\n", addr);
67870 +    return (0);
67871 +}
67872 +
67873 +static void
67874 +ep3_store16 (ELAN3_CTXT *ctxt, E3_Addr addr, E3_uint16 val)
67875 +{
67876 +    EP3_RAIL   *rail  = (EP3_RAIL *) ctxt->Private;
67877 +    ELAN3_DEV   *dev = ctxt->Device;
67878 +    sdramaddr_t offset;
67879 +    E3_uint16  *ptr;
67880 +
67881 +    if ((offset = ep_elan2sdram (&rail->Generic, addr)) != 0)
67882 +       elan3_sdram_writew (dev, offset, val);
67883 +    else if ((ptr = ep_elan2main (&rail->Generic, addr)) != 0)
67884 +       *ptr = val;
67885 +    else
67886 +       printk ("ep3_store16 %08x\n", addr);
67887 +}
67888 +
67889 +static E3_uint32
67890 +ep3_load32 (ELAN3_CTXT *ctxt, E3_Addr addr)
67891 +{
67892 +    EP3_RAIL   *rail  = (EP3_RAIL *) ctxt->Private;
67893 +    ELAN3_DEV   *dev = ctxt->Device;
67894 +    sdramaddr_t offset;
67895 +    E3_uint32  *ptr;
67896 +
67897 +    if ((offset = ep_elan2sdram (&rail->Generic, addr)) != 0)
67898 +       return (elan3_sdram_readl(dev, offset));
67899 +    if ((ptr = ep_elan2main (&rail->Generic, addr)) != 0)
67900 +       return (*ptr);
67901 +    
67902 +    printk ("ep3_load32 %08x\n", addr);
67903 +    return (0);
67904 +}
67905 +
67906 +static void
67907 +ep3_store32 (ELAN3_CTXT *ctxt, E3_Addr addr, E3_uint32 val)
67908 +{
67909 +    EP3_RAIL   *rail  = (EP3_RAIL *) ctxt->Private;
67910 +    ELAN3_DEV   *dev = ctxt->Device;
67911 +    sdramaddr_t offset;
67912 +    E3_uint32  *ptr;
67913 +
67914 +    if ((offset = ep_elan2sdram (&rail->Generic, addr)) != 0)
67915 +       elan3_sdram_writel (dev, offset, val);
67916 +    else if ((ptr = ep_elan2main (&rail->Generic, addr)) != 0)
67917 +       *ptr = val;
67918 +    else
67919 +       printk ("ep3_store32 %08x\n", addr);
67920 +}
67921 +
67922 +static E3_uint64
67923 +ep3_load64 (ELAN3_CTXT *ctxt, E3_Addr addr)
67924 +{
67925 +    EP3_RAIL   *rail  = (EP3_RAIL *) ctxt->Private;
67926 +    ELAN3_DEV   *dev = ctxt->Device;
67927 +    sdramaddr_t offset;
67928 +    E3_uint64  *ptr;
67929 +
67930 +    if ((offset = ep_elan2sdram (&rail->Generic, addr)) != 0)
67931 +       return (elan3_sdram_readq (dev, offset));
67932 +    if ((ptr = ep_elan2main (&rail->Generic, addr)) != 0)
67933 +       return (*ptr);
67934 +
67935 +    printk ("ep3_load64 %08x\n", addr);
67936 +    return (0);
67937 +}
67938 +
67939 +static void
67940 +ep3_store64 (ELAN3_CTXT *ctxt, E3_Addr addr, E3_uint64 val)
67941 +{
67942 +    EP3_RAIL   *rail  = (EP3_RAIL *) ctxt->Private;
67943 +    ELAN3_DEV   *dev = ctxt->Device;
67944 +    sdramaddr_t offset;
67945 +    E3_uint64  *ptr;
67946 +
67947 +    if ((offset = ep_elan2sdram (&rail->Generic, addr)) != 0)
67948 +       elan3_sdram_writeq (dev, offset, val);
67949 +    else if ((ptr = ep_elan2main (&rail->Generic, addr)) != 0)
67950 +       *ptr = val;
67951 +    else
67952 +       printk ("ep3_store64 %08x\n", addr);
67953 +}
67954 +
67955 +/*
67956 + * Local variables:
67957 + * c-file-style: "stroustrup"
67958 + * End:
67959 + */
67960 Index: linux-2.4.21/drivers/net/qsnet/ep/support_elan4.c
67961 ===================================================================
67962 --- linux-2.4.21.orig/drivers/net/qsnet/ep/support_elan4.c      2004-02-23 16:02:56.000000000 -0500
67963 +++ linux-2.4.21/drivers/net/qsnet/ep/support_elan4.c   2005-06-01 23:12:54.689425424 -0400
67964 @@ -0,0 +1,1184 @@
67965 +/*
67966 + *    Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd.
67967 + *    Copyright (c) 2002-2003 by Quadrics Ltd.
67968 + *
67969 + *    For licensing information please see the supplied COPYING file
67970 + *
67971 + */
67972 +
67973 +#ident "@(#)$Id: support_elan4.c,v 1.18.2.3 2004/11/18 12:05:00 david Exp $ $Name: QSNETMODULES-4-30_20050128 $"
67974 +/*      $Source: /cvs/master/quadrics/epmod/support_elan4.c,v $*/
67975 +
67976 +#include <qsnet/kernel.h>
67977 +#include <qsnet/kthread.h>
67978 +
67979 +#include <elan/kcomm.h>
67980 +
67981 +#include "kcomm_vp.h"
67982 +#include "kcomm_elan4.h"
67983 +#include "debug.h"
67984 +
67985 +#include <elan4/trtype.h>
67986 +#include <elan4/debug.h>
67987 +
67988 +void
67989 +ep4_register_intcookie (EP4_RAIL *rail, EP4_INTCOOKIE *cp, E4_uint64 cookie, void (*callback)(EP4_RAIL *r, void *arg), void *arg)
67990 +{
67991 +    unsigned long flags;
67992 +    
67993 +    cp->int_val      = cookie;
67994 +    cp->int_callback = callback;
67995 +    cp->int_arg      = arg;
67996 +       
67997 +    spin_lock_irqsave (&rail->r_intcookie_lock, flags);
67998 +    list_add_tail (&cp->int_link, &rail->r_intcookie_hash[EP4_INTCOOKIE_HASH(cookie)]);
67999 +    spin_unlock_irqrestore (&rail->r_intcookie_lock, flags);
68000 +}
68001 +
68002 +void
68003 +ep4_deregister_intcookie (EP4_RAIL *rail, EP4_INTCOOKIE *cp)
68004 +{
68005 +    unsigned long flags;
68006 +    
68007 +    spin_lock_irqsave (&rail->r_intcookie_lock, flags);
68008 +    list_del (&cp->int_link);
68009 +    spin_unlock_irqrestore (&rail->r_intcookie_lock, flags);
68010 +}
68011 +
68012 +
68013 +EP4_INTCOOKIE *
68014 +ep4_lookup_intcookie (EP4_RAIL *rail, E4_uint64 cookie)
68015 +{
68016 +    struct list_head *el;
68017 +    unsigned long flags;
68018 +
68019 +    spin_lock_irqsave (&rail->r_intcookie_lock, flags);
68020 +    list_for_each (el, &rail->r_intcookie_hash[EP4_INTCOOKIE_HASH(cookie)]) {
68021 +       EP4_INTCOOKIE *cp = list_entry (el, EP4_INTCOOKIE, int_link);
68022 +
68023 +       if (cp->int_val == cookie)
68024 +       {
68025 +           spin_unlock_irqrestore (&rail->r_intcookie_lock, flags);
68026 +           return cp;
68027 +       }
68028 +    }
68029 +    spin_unlock_irqrestore (&rail->r_intcookie_lock, flags);
68030 +    return NULL;
68031 +}
68032 +
68033 +E4_uint64
68034 +ep4_neterr_cookie (EP4_RAIL *rail, unsigned int node)
68035 +{
68036 +    E4_uint64      cookie;
68037 +    unsigned long  flags;
68038 +
68039 +    spin_lock_irqsave (&rail->r_cookie_lock, flags);
68040 +    cookie = rail->r_cookies[node];
68041 +
68042 +    rail->r_cookies[node] += EP4_COOKIE_INC;
68043 +    
68044 +    spin_unlock_irqrestore (&rail->r_cookie_lock, flags);
68045 +
68046 +    return cookie;
68047 +}
68048 +
68049 +void
68050 +ep4_eproc_trap (ELAN4_CTXT *ctxt, E4_uint64 status)
68051 +{
68052 +    EP4_RAIL        *rail = EP4_CTXT_TO_RAIL (ctxt);
68053 +    ELAN4_EPROC_TRAP trap;
68054 +
68055 +    elan4_extract_eproc_trap (ctxt->ctxt_dev, status, &trap, 0);
68056 +
68057 +    if (epdebug & DBG_EPTRAP)
68058 +       elan4_display_eproc_trap (DBG_BUFFER, 0, "ep4_eproc_trap", &trap);
68059 +
68060 +    switch (EPROC_TrapType (status))
68061 +    {
68062 +    case EventProcNoFault:
68063 +       EPRINTF1 (DBG_EPTRAP, "%s: EventProcNoFault\n", rail->r_generic.Name);
68064 +       return;
68065 +
68066 +    default:
68067 +       printk ("%s: unhandled eproc trap %d\n", rail->r_generic.Name, EPROC_TrapType (status));
68068 +       elan4_display_eproc_trap (DBG_CONSOLE, 0, "ep4_eproc_trap", &trap);
68069 +    }
68070 +}
68071 +
68072 +void
68073 +ep4_cproc_trap (ELAN4_CTXT *ctxt, E4_uint64 status, unsigned cqnum)
68074 +{
68075 +    EP4_RAIL        *rail = EP4_CTXT_TO_RAIL (ctxt);
68076 +    ELAN4_CPROC_TRAP trap;
68077 +    struct list_head *el;
68078 +    register int      i;
68079 +
68080 +    elan4_extract_cproc_trap (ctxt->ctxt_dev, status, &trap, cqnum);
68081 +
68082 +    if (epdebug & DBG_EPTRAP)
68083 +       elan4_display_cproc_trap (DBG_BUFFER, 0, "ep4_cproc_trap", &trap);
68084 +       
68085 +    switch (CPROC_TrapType (status))
68086 +    {
68087 +    case CommandProcInterruptQueueOverflow:
68088 +       /*
68089 +        * Try and handle a bunch of elan main interrupts
68090 +        */
68091 +       for (i = 0; i <EP4_NUM_ECQ; i++) {
68092 +           list_for_each (el, &rail->r_ecq_list[i]) {
68093 +               EP4_ECQ *ecq = list_entry (el, EP4_ECQ, ecq_link);
68094 +           
68095 +               if (elan4_cq2num (ecq->ecq_cq) == cqnum)
68096 +               {
68097 +                   printk ("%s: defer command queue %d after trap %x\n",
68098 +                           rail->r_generic.Name, cqnum, CPROC_TrapType (status));
68099 +       
68100 +                   elan4_queue_mainintop (ctxt->ctxt_dev, &ecq->ecq_intop);
68101 +                   return;
68102 +               }
68103 +           }
68104 +       }
68105 +       break;
68106 +
68107 +    case CommandProcDmaQueueOverflow:
68108 +    case CommandProcThreadQueueOverflow:
68109 +       for (i = 0; i <EP4_NUM_ECQ; i++) {
68110 +           list_for_each (el, &rail->r_ecq_list[i]) {
68111 +               EP4_ECQ *ecq = list_entry (el, EP4_ECQ, ecq_link);
68112 +           
68113 +               if (elan4_cq2num (ecq->ecq_cq) == cqnum)
68114 +               {
68115 +                   printk ("%s: restart command queue %d after trap %x\n",
68116 +                           rail->r_generic.Name, cqnum, CPROC_TrapType (status));
68117 +
68118 +                   elan4_restartcq (ctxt->ctxt_dev, ecq->ecq_cq);
68119 +                   return;
68120 +               }
68121 +           }
68122 +       }
68123 +       break;
68124 +    }
68125 +
68126 +    printk ("%s: unhandled cproc trap %d for cqnum %d\n", rail->r_generic.Name, CPROC_TrapType (status), cqnum);
68127 +    elan4_display_cproc_trap (DBG_CONSOLE, 0, "ep4_cproc_trap", &trap);
68128 +}
68129 +
68130 +void
68131 +ep4_dproc_trap (ELAN4_CTXT *ctxt, E4_uint64 status, unsigned unit)
68132 +{
68133 +    EP4_RAIL        *rail = EP4_CTXT_TO_RAIL (ctxt);
68134 +    ELAN4_DPROC_TRAP trap;
68135 +
68136 +    elan4_extract_dproc_trap (ctxt->ctxt_dev, status, &trap, unit);
68137 +
68138 +    if (epdebug & DBG_EPTRAP)
68139 +       elan4_display_dproc_trap (DBG_BUFFER, 0, "ep4_dproc_trap", &trap);
68140 +
68141 +    if (! DPROC_PrefetcherFault (trap.tr_status))
68142 +    {
68143 +       switch (DPROC_TrapType (trap.tr_status))
68144 +       {
68145 +       case DmaProcFailCountError:
68146 +           goto retry_this_dma;
68147 +
68148 +       case DmaProcPacketAckError:
68149 +           goto retry_this_dma;
68150 +
68151 +       case DmaProcQueueOverflow:
68152 +           goto retry_this_dma;
68153 +       }
68154 +    }
68155 +
68156 +    printk ("%s: unhandled dproc trap\n", rail->r_generic.Name);
68157 +    elan4_display_dproc_trap (DBG_CONSOLE, 0, "ep4_dproc_trap", &trap);
68158 +    return;
68159 +
68160 + retry_this_dma:
68161 +    /*XXXX implement backoff .... */
68162 +
68163 +    ep4_queue_dma_retry (rail, &trap.tr_desc, EP_RETRY_LOW_PRI);
68164 +}
68165 +
68166 +void
68167 +ep4_tproc_trap (ELAN4_CTXT *ctxt, E4_uint64 status)
68168 +{
68169 +    EP4_RAIL         *rail = EP4_CTXT_TO_RAIL (ctxt);
68170 +    ELAN4_TPROC_TRAP *trap = &rail->r_tproc_trap;
68171 +
68172 +    elan4_extract_tproc_trap (ctxt->ctxt_dev, status, trap);
68173 +
68174 +    if (epdebug & DBG_EPTRAP)
68175 +       elan4_display_tproc_trap (DBG_BUFFER, 0, "ep4_tproc_trap", trap);
68176 +       
68177 +    printk ("%s: unhandled tproc trap\n", rail->r_generic.Name);
68178 +    elan4_display_tproc_trap (DBG_CONSOLE, 0, "ep4_tproc_trap", trap);
68179 +}
68180 +
68181 +void
68182 +ep4_iproc_trap (ELAN4_CTXT *ctxt, E4_uint64 status, unsigned unit)
68183 +{
68184 +    EP4_RAIL         *rail = EP4_CTXT_TO_RAIL (ctxt);
68185 +    ELAN4_IPROC_TRAP *trap = &rail->r_iproc_trap;
68186 +
68187 +    elan4_extract_iproc_trap (ctxt->ctxt_dev, status, trap, unit);
68188 +
68189 +    if (epdebug & DBG_EPTRAP)
68190 +       elan4_display_iproc_trap (DBG_BUFFER, 0, "ep4_iproc_trap", trap);
68191 +       
68192 +    elan4_inspect_iproc_trap (trap);
68193 +
68194 +    switch (IPROC_TrapValue (trap->tr_transactions[trap->tr_trappedTrans].IProcStatusCntxAndTrType))
68195 +    {
68196 +    case InputDmaQueueOverflow:
68197 +       ep4_queue_dma_retry (rail, (E4_DMA *) &trap->tr_dataBuffers[trap->tr_trappedTrans], EP_RETRY_LOW_PRI);
68198 +       return;
68199 +
68200 +    case InputEventEngineTrapped:
68201 +    {
68202 +       E4_IprocTrapHeader *hdrp = &trap->tr_transactions[trap->tr_trappedTrans];
68203 +       sdramaddr_t         inputq;
68204 +       E4_Addr             event;
68205 +
68206 +       /* XXXX: flow control on the command queue which we issue to is 
68207 +        * rather difficult, we don't want to have space for an event 
68208 +        * for each possible context, nor the mechanism to hold the 
68209 +        * context filter up until the event has been executed.  Given
68210 +        * that the event engine will be restarted by this same interrupt
68211 +        * and we're using high priority command queues, then we just use
68212 +        * a single small command queue for this.
68213 +        */
68214 +       switch (IPROC_TransactionType(hdrp->IProcStatusCntxAndTrType) & TR_OPCODE_MASK)
68215 +       {
68216 +       case TR_SETEVENT & TR_OPCODE_MASK:
68217 +           if (hdrp->TrAddr != 0)
68218 +               ep4_set_event_cmd (rail->r_event_ecq, hdrp->TrAddr);
68219 +           return;
68220 +
68221 +       case TR_INPUT_Q_COMMIT & TR_OPCODE_MASK:
68222 +           if ((inputq = ep_elan2sdram (&rail->r_generic, hdrp->TrAddr)) == 0)
68223 +               printk ("%s: TR_INPUT_Q_COMMIT at %llx is not sdram\n", rail->r_generic.Name, hdrp->TrAddr);
68224 +           else
68225 +           {
68226 +               if ((event = elan4_sdram_readq (rail->r_ctxt.ctxt_dev, inputq + offsetof (E4_InputQueue, q_event))) != 0)
68227 +                   ep4_set_event_cmd (rail->r_event_ecq, event);
68228 +               return;
68229 +           }
68230 +       }
68231 +       break;
68232 +    }
68233 +
68234 +    case InputEopErrorOnWaitForEop:
68235 +    case InputEopErrorTrap:
68236 +    case InputCrcErrorAfterPAckOk:
68237 +       if (! (trap->tr_flags & TR_FLAG_ACK_SENT) || (trap->tr_flags & TR_FLAG_EOP_BAD))
68238 +           return;
68239 +       
68240 +       if (EP4_CONTEXT_ISDATA (IPROC_NetworkContext (status)))
68241 +       {
68242 +           unsigned int nodeId = EP4_CONTEXT_TO_NODE (IPROC_NetworkContext (status));
68243 +
68244 +           if ((trap->tr_flags & (TR_FLAG_DMA_PACKET | TR_FLAG_BAD_TRANS)) || 
68245 +               ((trap->tr_flags & TR_FLAG_EOP_ERROR) && (trap->tr_identifyTrans == TR_TRANS_INVALID)))
68246 +           {
68247 +               printk ("%s: network error on dma packet from node %d\n", rail->r_generic.Name, nodeId);
68248 +
68249 +               ep_queue_network_error (&rail->r_generic, EP4_CONTEXT_TO_NODE(IPROC_NetworkContext (status)), EP_NODE_NETERR_DMA_PACKET, unit & 1, 0);
68250 +               return;
68251 +           }
68252 +           
68253 +           if (trap->tr_flags & TR_FLAG_EOP_ERROR)
68254 +           {
68255 +               E4_uint64        status = trap->tr_transactions[trap->tr_identifyTrans].IProcStatusCntxAndTrType;
68256 +               EP_NETERR_COOKIE cookie = 0;
68257 +
68258 +               switch (IPROC_TransactionType (status) & TR_OPCODE_MASK)
68259 +               {
68260 +               case TR_SETEVENT_IDENTIFY & TR_OPCODE_MASK:
68261 +                   if (IPROC_TrapValue(status) == InputNoFault)
68262 +                       cookie = trap->tr_transactions[trap->tr_identifyTrans].TrAddr;
68263 +                   else
68264 +                       cookie = trap->tr_dataBuffers[trap->tr_identifyTrans].Data[0];
68265 +                   printk ("%s: network error on setevent <%lld%s%s%s%s> from node %d\n", rail->r_generic.Name, EP4_COOKIE_STRING(cookie), nodeId);
68266 +                   break;
68267 +
68268 +               case TR_INPUT_Q_COMMIT & TR_OPCODE_MASK:
68269 +                   if (IPROC_TrapValue(status) == InputNoFault)
68270 +                       cookie = trap->tr_transactions[trap->tr_identifyTrans].TrAddr;
68271 +                   else
68272 +                       cookie = trap->tr_dataBuffers[trap->tr_identifyTrans].Data[0];
68273 +                   printk ("%s: network error on queue commit <%lld%s%s%s%s> from node %d\n", rail->r_generic.Name, EP4_COOKIE_STRING(cookie), nodeId);
68274 +                   break;
68275 +                   
68276 +               case TR_REMOTEDMA & TR_OPCODE_MASK:
68277 +                   cookie = trap->tr_transactions[trap->tr_identifyTrans].TrAddr;
68278 +                   printk ("%s: network error on remote dma <%lld%s%s%s%s> from node %d\n", rail->r_generic.Name, EP4_COOKIE_STRING(cookie), nodeId);
68279 +                   break;
68280 +
68281 +               case TR_IDENTIFY & TR_OPCODE_MASK:
68282 +                   cookie = trap->tr_transactions[trap->tr_identifyTrans].TrAddr;
68283 +                   printk ("%s: network error on identify <%lld%s%s%s%s> from node %d\n", rail->r_generic.Name, EP4_COOKIE_STRING(cookie), nodeId);
68284 +                   break;
68285 +
68286 +               default:
68287 +                   panic ("%s: unknown identify transaction type %x for eop error from node %d\n", rail->r_generic.Name,
68288 +                           IPROC_TransactionType (trap->tr_transactions[trap->tr_identifyTrans].IProcStatusCntxAndTrType), nodeId);
68289 +                   break;
68290 +               }
68291 +
68292 +               ep_queue_network_error (&rail->r_generic, nodeId, EP_NODE_NETERR_ATOMIC_PACKET, unit & 1, cookie);
68293 +           }
68294 +       }
68295 +       return;
68296 +    }
68297 +
68298 +    printk ("%s: unhandled iproc trap\n", rail->r_generic.Name);
68299 +    elan4_display_iproc_trap (DBG_CONSOLE, 0, "ep4_iproc_trap", trap);
68300 +}
68301 +
68302 +void
68303 +ep4_interrupt (ELAN4_CTXT *ctxt, E4_uint64 cookie)
68304 +{
68305 +    EP4_RAIL      *rail = EP4_CTXT_TO_RAIL (ctxt);
68306 +    EP4_INTCOOKIE *cp  = ep4_lookup_intcookie (rail, cookie);
68307 +
68308 +    if (cp == NULL)
68309 +    {
68310 +       printk ("ep4_interrupt: cannot find event cookie for %016llx\n", (long long) cookie);
68311 +       return;
68312 +    }
68313 +
68314 +    cp->int_callback (rail, cp->int_arg);
68315 +}
68316 +
68317 +ELAN4_TRAP_OPS ep4_trap_ops = 
68318 +{
68319 +    ep4_eproc_trap,
68320 +    ep4_cproc_trap,
68321 +    ep4_dproc_trap,
68322 +    ep4_tproc_trap,
68323 +    ep4_iproc_trap,
68324 +    ep4_interrupt,
68325 +};
68326 +
68327 +void
68328 +ep4_flush_filters (EP_RAIL *r)
68329 +{
68330 +    /* nothing to do here as elan4_set_filter() flushes the context filter */
68331 +}
68332 +
68333 +struct flush_queues_desc
68334 +{
68335 +    EP4_RAIL      *rail;
68336 +    volatile int   done;
68337 +} ;
68338 +
68339 +static void
68340 +ep4_flush_queues_flushop (ELAN4_DEV *dev, void *arg, int qfull)
68341 +{
68342 +    struct flush_queues_desc *desc  = (struct flush_queues_desc *) arg;
68343 +    EP4_RAIL                 *rail  = desc->rail;
68344 +    E4_uint64                qptrs = read_reg64 (dev, DProcHighPriPtrs);
68345 +    E4_uint32                 qsize = E4_QueueSize (E4_QueueSizeValue (qptrs));
68346 +    E4_uint32                 qfptr = E4_QueueFrontPointer (qptrs);
68347 +    E4_uint32                 qbptr = E4_QueueBackPointer (qptrs);
68348 +    E4_DProcQueueEntry        qentry;
68349 +    unsigned long             flags;
68350 +
68351 +    while ((qfptr != qbptr) || qfull)
68352 +    {
68353 +       E4_uint64 typeSize = elan4_sdram_readq (dev, qfptr + offsetof (E4_DProcQueueEntry, Desc.dma_typeSize));
68354 +       
68355 +       if (DMA_Context (qentry.Desc.dma_typeSize) == rail->r_ctxt.ctxt_num)
68356 +       {
68357 +           E4_uint64     vp       = elan4_sdram_readq (dev, qfptr + offsetof (E4_DProcQueueEntry, Desc.dma_vproc));
68358 +           EP_NODE_RAIL *nodeRail = &rail->r_generic.Nodes[EP_VP_TO_NODE(vp)];
68359 +           
68360 +           EP4_ASSERT (rail, !EP_VP_ISDATA(vp) || (nodeRail->State >= EP_NODE_CONNECTED && nodeRail->State <= EP_NODE_LOCAL_PASSIVATE));
68361 +           
68362 +           if (EP_VP_ISDATA(vp) && nodeRail->State == EP_NODE_LOCAL_PASSIVATE)
68363 +           {
68364 +               /*
68365 +                * This is a DMA going to the node which is being removed, 
68366 +                * so move it onto the node dma list where it will get
68367 +                * handled later.
68368 +                */
68369 +               qentry.Desc.dma_typeSize = typeSize;
68370 +               qentry.Desc.dma_cookie   = elan4_sdram_readq (dev, qfptr + offsetof (E4_DProcQueueEntry, Desc.dma_cookie));
68371 +               qentry.Desc.dma_vproc    = vp;
68372 +               qentry.Desc.dma_srcAddr  = elan4_sdram_readq (dev, qfptr + offsetof (E4_DProcQueueEntry, Desc.dma_srcAddr));
68373 +               qentry.Desc.dma_dstAddr  = elan4_sdram_readq (dev, qfptr + offsetof (E4_DProcQueueEntry, Desc.dma_dstAddr));
68374 +               qentry.Desc.dma_srcEvent = elan4_sdram_readq (dev, qfptr + offsetof (E4_DProcQueueEntry, Desc.dma_srcEvent));
68375 +               qentry.Desc.dma_dstEvent = elan4_sdram_readq (dev, qfptr + offsetof (E4_DProcQueueEntry, Desc.dma_dstEvent));
68376 +               
68377 +               EPRINTF4 (DBG_RETRY, "ep4_flush_dmas: %016llx %016llx %016llx %016llx\n", qentry.Desc.dma_typeSize, 
68378 +                         qentry.Desc.dma_cookie, qentry.Desc.dma_vproc, qentry.Desc.dma_srcAddr);
68379 +               EPRINTF3 (DBG_RETRY, "                %016llx %016llx %016llx\n", qentry.Desc.dma_dstAddr, 
68380 +                         qentry.Desc.dma_srcEvent, qentry.Desc.dma_dstEvent);
68381 +               
68382 +               ep4_queue_dma_stalled (rail, &qentry.Desc);
68383 +               
68384 +               qentry.Desc.dma_typeSize = DMA_ShMemWrite | dev->dev_ctxt.ctxt_num;
68385 +               qentry.Desc.dma_cookie   = 0;
68386 +               qentry.Desc.dma_vproc    = 0;
68387 +               qentry.Desc.dma_srcAddr  = 0;
68388 +               qentry.Desc.dma_dstAddr  = 0;
68389 +               qentry.Desc.dma_srcEvent = 0;
68390 +               qentry.Desc.dma_dstEvent = 0;
68391 +               
68392 +               elan4_sdram_copyq_to_sdram (dev, &qentry, qfptr, sizeof (E4_DProcQueueEntry));
68393 +           }
68394 +       }
68395 +
68396 +       qfptr = (qfptr & ~(qsize-1)) | ((qfptr + sizeof (E4_DProcQueueEntry)) & (qsize-1));
68397 +       qfull = 0;
68398 +    }
68399 +
68400 +    spin_lock_irqsave (&rail->r_haltop_lock, flags);
68401 +    desc->done = 1;
68402 +    kcondvar_wakeupall (&rail->r_haltop_sleep, &rail->r_haltop_lock);
68403 +    spin_unlock_irqrestore (&rail->r_haltop_lock, flags);
68404 +}
68405 +
68406 +static void
68407 +ep4_flush_queues_haltop (ELAN4_DEV *dev, void *arg)
68408 +{
68409 +    struct flush_queues_desc *desc = (struct flush_queues_desc *) arg;
68410 +
68411 +    elan4_queue_dma_flushop (dev, &desc->rail->r_flushop, 1);
68412 +}
68413 +
68414 +void
68415 +ep4_flush_queues (EP_RAIL *r)
68416 +{
68417 +    EP4_RAIL *rail = (EP4_RAIL *) r;
68418 +    struct flush_queues_desc desc;
68419 +    struct list_head *el, *nel;
68420 +    unsigned long flags;
68421 +    int i;
68422 +
68423 +    /* initialise descriptor */
68424 +    desc.rail  = rail;
68425 +    desc.done  = 0;
68426 +
68427 +    /* First -  stall the dma retry thread, so that it will no longer restart
68428 +     *          any dma's from the retry list */
68429 +    ep_kthread_stall (&rail->r_retry_thread);
68430 +
68431 +    /* Second - flush through all command queues targetted by events, thread etc */
68432 +    ep4_flush_ecqs (rail);
68433 +
68434 +    /* Third - queue a halt operation to flush through all DMA's which are executing
68435 +     *         or on the run queues */
68436 +    kmutex_lock (&rail->r_haltop_mutex);
68437 +
68438 +    rail->r_haltop.op_mask      = INT_DProcHalted;
68439 +    rail->r_haltop.op_function  = ep4_flush_queues_haltop;
68440 +    rail->r_haltop.op_arg       = &desc;
68441 +
68442 +    rail->r_flushop.op_function = ep4_flush_queues_flushop;
68443 +    rail->r_flushop.op_arg      = &desc;
68444 +    
68445 +    elan4_queue_haltop (rail->r_ctxt.ctxt_dev, &rail->r_haltop);
68446 +
68447 +    spin_lock_irqsave (&rail->r_haltop_lock, flags);
68448 +    while (! desc.done)
68449 +       kcondvar_wait (&rail->r_haltop_sleep, &rail->r_haltop_lock, &flags);
68450 +    spin_unlock_irqrestore (&rail->r_haltop_lock, flags);
68451 +    kmutex_unlock (&rail->r_haltop_mutex);
68452 +
68453 +    /* Fourth - run down the dma retry lists and move all entries to the cancelled
68454 +     *          list.  Any dma's which were on the run queues have already been
68455 +     *          moved there */
68456 +    spin_lock_irqsave (&rail->r_dma_lock, flags);
68457 +    for (i = EP_RETRY_BASE; i < EP_NUM_RETRIES; i++)
68458 +    {
68459 +       list_for_each_safe (el,nel, &rail->r_dma_retrylist[i]) {
68460 +           EP4_DMA_RETRY *retry    = list_entry (el, EP4_DMA_RETRY, retry_link);
68461 +           EP_NODE_RAIL  *nodeRail = &rail->r_generic.Nodes[EP_VP_TO_NODE(retry->retry_dma.dma_vproc)];
68462 +
68463 +           EP4_ASSERT (rail, nodeRail->State >= EP_NODE_CONNECTED && nodeRail->State <= EP_NODE_LOCAL_PASSIVATE);
68464 +
68465 +           if (nodeRail->State == EP_NODE_LOCAL_PASSIVATE)
68466 +           {
68467 +               list_del (&retry->retry_link);
68468 +               list_add_tail (&retry->retry_link, &nodeRail->StalledDmas);
68469 +           }
68470 +       }
68471 +    }
68472 +    spin_unlock_irqrestore (&rail->r_dma_lock, flags);
68473 +    
68474 +    /* Finally - allow the retry thread to run again */
68475 +    ep_kthread_resume (&rail->r_retry_thread);
68476 +}
68477 +
68478 +struct write_qdesc_desc
68479 +{
68480 +    EP4_RAIL      *rail;
68481 +    sdramaddr_t    qaddr;
68482 +    E4_InputQueue *qdesc;
68483 +    volatile int   done;
68484 +} ;
68485 +
68486 +static void
68487 +ep4_write_qdesc_haltop (ELAN4_DEV *dev, void *arg)
68488 +{
68489 +    struct write_qdesc_desc *desc = (struct write_qdesc_desc *) arg;
68490 +    EP4_RAIL                *rail = desc->rail;
68491 +    unsigned long            flags;
68492 +
68493 +    elan4_sdram_copyq_to_sdram (dev, desc->qdesc, desc->qaddr, sizeof (E4_InputQueue));
68494 +
68495 +    spin_lock_irqsave (&rail->r_haltop_lock, flags);
68496 +    desc->done = 1;
68497 +    kcondvar_wakeupall (&rail->r_haltop_sleep, &rail->r_haltop_lock);
68498 +    spin_unlock_irqrestore (&rail->r_haltop_lock, flags);
68499 +}
68500 +
68501 +void
68502 +ep4_write_qdesc (EP4_RAIL *rail, sdramaddr_t qaddr, E4_InputQueue *qdesc)
68503 +{
68504 +    struct write_qdesc_desc desc;
68505 +    unsigned long flags;
68506 +
68507 +    /* initialise descriptor */
68508 +    desc.rail  = rail;
68509 +    desc.qaddr = qaddr;
68510 +    desc.qdesc = qdesc;
68511 +    desc.done  = 0;
68512 +
68513 +    kmutex_lock (&rail->r_haltop_mutex);
68514 +
68515 +    rail->r_haltop.op_mask     = INT_DiscardingHighPri;
68516 +    rail->r_haltop.op_function = ep4_write_qdesc_haltop;
68517 +    rail->r_haltop.op_arg      = &desc;
68518 +    
68519 +    elan4_queue_haltop (rail->r_ctxt.ctxt_dev, &rail->r_haltop);
68520 +
68521 +    spin_lock_irqsave (&rail->r_haltop_lock, flags);
68522 +    while (! desc.done)
68523 +       kcondvar_wait (&rail->r_haltop_sleep, &rail->r_haltop_lock, &flags);
68524 +    spin_unlock_irqrestore (&rail->r_haltop_lock, flags);
68525 +    
68526 +    kmutex_unlock (&rail->r_haltop_mutex);
68527 +}
68528 +#define CQ_SIZE_NWORDS ((CQ_Size (ecq->ecq_cq->cq_size) >> 3) - 8)     /* available number of dwords (less enough to flush) */
68529 +EP4_ECQ *
68530 +ep4_alloc_ecq (EP4_RAIL *rail, unsigned cqsize)
68531 +{
68532 +    EP4_ECQ *ecq;
68533 +    unsigned long pgoff;
68534 +
68535 +    /* no space available, so allocate a new entry */
68536 +    KMEM_ZALLOC (ecq, EP4_ECQ *, sizeof (EP4_ECQ), 1);
68537 +
68538 +    if (ecq == NULL)
68539 +       return 0;
68540 +
68541 +    if ((ecq->ecq_cq = elan4_alloccq (&rail->r_ctxt, cqsize, CQ_EnableAllBits, CQ_Priority)) == NULL)
68542 +    {
68543 +       KMEM_FREE (ecq, sizeof (EP4_ECQ));
68544 +       return 0;
68545 +    }
68546 +
68547 +    pgoff = (ecq->ecq_cq->cq_mapping & (PAGE_SIZE-1));
68548 +
68549 +    ecq->ecq_addr  = ep_rmalloc (rail->r_ecq_rmap, PAGESIZE, 0) + pgoff;
68550 +    ecq->ecq_avail = CQ_SIZE_NWORDS;                   /* available number of dwords (less enough to flush) */
68551 +
68552 +    ecq->ecq_intop.op_function = (ELAN4_HALTFN *) elan4_restartcq;
68553 +    ecq->ecq_intop.op_arg      = ecq->ecq_cq;
68554 +
68555 +    ep4_ioaddr_map (&rail->r_generic, ecq->ecq_addr - pgoff, ecq->ecq_cq->cq_mapping - pgoff, PAGESIZE, EP_PERM_WRITE);
68556 +
68557 +    spin_lock_init (&ecq->ecq_lock);
68558 +
68559 +    return ecq;
68560 +}
68561 +
68562 +void
68563 +ep4_free_ecq (EP4_RAIL *rail, EP4_ECQ *ecq)
68564 +{
68565 +    unsigned long pgoff = (ecq->ecq_cq->cq_mapping & (PAGE_SIZE-1));
68566 +
68567 +    spin_lock_destroy (&ecq->ecq_lock);
68568 +
68569 +    ep4_unmap (&rail->r_generic, ecq->ecq_addr - pgoff, PAGESIZE);
68570 +    ep_rmfree (rail->r_ecq_rmap, PAGESIZE, ecq->ecq_addr - pgoff);
68571 +
68572 +    elan4_freecq (&rail->r_ctxt, ecq->ecq_cq);
68573 +    
68574 +    KMEM_FREE (ecq, sizeof (EP4_ECQ));
68575 +}
68576 +
68577 +EP4_ECQ *
68578 +ep4_get_ecq (EP4_RAIL *rail, unsigned which, unsigned ndwords)
68579 +{
68580 +    ELAN4_DEV *dev = rail->r_ctxt.ctxt_dev;
68581 +    struct list_head *el;
68582 +    unsigned long flags;
68583 +    EP4_ECQ *ecq;
68584 +    
68585 +    spin_lock_irqsave (&rail->r_ecq_lock, flags);
68586 +    list_for_each (el, &rail->r_ecq_list[which]) {
68587 +       EP4_ECQ *ecq = list_entry (el, EP4_ECQ, ecq_link);
68588 +
68589 +       if (ecq->ecq_avail >= ndwords)
68590 +       {
68591 +           ecq->ecq_avail -= ndwords;
68592 +
68593 +           spin_unlock_irqrestore (&rail->r_ecq_lock, flags);
68594 +
68595 +           return ecq;
68596 +       }
68597 +    }
68598 +    spin_unlock_irqrestore (&rail->r_ecq_lock, flags);
68599 +
68600 +    if ((ecq = ep4_alloc_ecq (rail, EP4_ECQ_Size (which))) == NULL)
68601 +       return NULL;
68602 +
68603 +    if (which == EP4_ECQ_EVENT)
68604 +    {
68605 +       if ((ecq->ecq_event = ep_alloc_elan (&rail->r_generic, sizeof (E4_Event32), 0, &ecq->ecq_event_addr)) == 0)
68606 +       {
68607 +           ep4_free_ecq (rail, ecq);
68608 +           return NULL;
68609 +       }
68610 +       
68611 +       elan4_sdram_writeq (dev, ecq->ecq_event + offsetof (E4_Event32, ev_CountAndType),
68612 +                           E4_EVENT_INIT_VALUE (0, E4_EVENT_WRITE, E4_EVENT_DTYPE_LONG, 0));
68613 +       elan4_sdram_writeq (dev, ecq->ecq_event + offsetof (E4_Event32, ev_WritePtr),
68614 +                           ecq->ecq_addr);
68615 +       elan4_sdram_writeq (dev, ecq->ecq_event + offsetof (E4_Event32, ev_WriteValue),
68616 +                           SET_EVENT_CMD | (rail->r_elan_addr + offsetof (EP4_RAIL_ELAN, r_flush_event)));
68617 +       
68618 +       if ((ecq->ecq_flushcq = ep4_get_ecq (rail, EP4_ECQ_SINGLE, 1)) == NULL)
68619 +       {
68620 +           ep_free_elan (&rail->r_generic, ecq->ecq_event_addr, sizeof (E4_Event32));
68621 +           ep4_free_ecq (rail, ecq);
68622 +           return NULL;
68623 +       }
68624 +    }
68625 +
68626 +    spin_lock_irqsave (&rail->r_ecq_lock, flags);
68627 +    list_add (&ecq->ecq_link, &rail->r_ecq_list[which]);
68628 +
68629 +    ecq->ecq_avail -= ndwords;
68630 +    spin_unlock_irqrestore (&rail->r_ecq_lock, flags);
68631 +
68632 +    return ecq;
68633 +}
68634 +
68635 +void
68636 +ep4_put_ecq (EP4_RAIL *rail, EP4_ECQ *ecq, unsigned ndwords)
68637 +{
68638 +    unsigned long flags;
68639 +
68640 +    spin_lock_irqsave (&rail->r_ecq_lock, flags);
68641 +
68642 +    ecq->ecq_avail += ndwords;
68643 +    
68644 +    if (ecq->ecq_avail !=  CQ_SIZE_NWORDS) 
68645 +       spin_unlock_irqrestore (&rail->r_ecq_lock, flags);
68646 +    else
68647 +    {
68648 +       list_del (&ecq->ecq_link);
68649 +       spin_unlock_irqrestore (&rail->r_ecq_lock, flags);
68650 +       
68651 +       if (ecq->ecq_flushcq)
68652 +           ep4_put_ecq (rail, ecq->ecq_flushcq, 1);
68653 +       if (ecq->ecq_event_addr)
68654 +           ep_free_elan (&rail->r_generic, ecq->ecq_event_addr, sizeof (E4_Event32));
68655 +
68656 +       ep4_free_ecq (rail, ecq);
68657 +    }
68658 +}
68659 +
68660 +void
68661 +ep4_nop_cmd (EP4_ECQ *ecq, E4_uint64 tag)
68662 +{
68663 +    unsigned long flags;
68664 +
68665 +    spin_lock_irqsave (&ecq->ecq_lock, flags);
68666 +    elan4_nop_cmd (ecq->ecq_cq, tag);
68667 +    spin_unlock_irqrestore (&ecq->ecq_lock, flags);
68668 +    
68669 +}
68670 +
68671 +void
68672 +ep4_set_event_cmd (EP4_ECQ *ecq, E4_Addr event)
68673 +{
68674 +    unsigned long flags;
68675 +
68676 +    spin_lock_irqsave (&ecq->ecq_lock, flags);
68677 +    elan4_set_event_cmd (ecq->ecq_cq, event);
68678 +    spin_unlock_irqrestore (&ecq->ecq_lock, flags);
68679 +}
68680 +
68681 +void
68682 +ep4_wait_event_cmd (EP4_ECQ *ecq, E4_Addr event, E4_uint64 candt, E4_uint64 param0, E4_uint64 param1)
68683 +{
68684 +    unsigned long flags;
68685 +
68686 +    spin_lock_irqsave (&ecq->ecq_lock, flags);
68687 +    elan4_wait_event_cmd (ecq->ecq_cq, event, candt, param0, param1);
68688 +    spin_unlock_irqrestore (&ecq->ecq_lock, flags);
68689 +}
68690 +
68691 +void
68692 +ep4_flush_interrupt (EP4_RAIL *rail, void *arg)
68693 +{
68694 +    unsigned long flags;
68695 +
68696 +    spin_lock_irqsave (&rail->r_ecq_lock, flags);
68697 +    rail->r_flush_count = 0;
68698 +    kcondvar_wakeupone (&rail->r_flush_sleep, &rail->r_ecq_lock);
68699 +    spin_unlock_irqrestore (&rail->r_ecq_lock, flags);
68700 +}
68701 +
68702 +void
68703 +ep4_flush_ecqs (EP4_RAIL *rail)
68704 +{
68705 +    ELAN4_DEV *dev = rail->r_ctxt.ctxt_dev;
68706 +    struct list_head *el;
68707 +    unsigned long flags;
68708 +    int i;
68709 +
68710 +    kmutex_lock (&rail->r_flush_mutex);
68711 +
68712 +    EP4_SDRAM_ASSERT (rail, rail->r_elan + offsetof (EP4_RAIL_ELAN, r_flush_event), E4_EVENT_INIT_VALUE (0, E4_EVENT_WRITE, E4_EVENT_DTYPE_LONG,0));
68713 +
68714 +    spin_lock_irqsave (&rail->r_ecq_lock, flags);
68715 +    /* first flush all the "event" queues */
68716 +    list_for_each (el, &rail->r_ecq_list[EP4_ECQ_EVENT]) {
68717 +       EP4_ECQ *ecq = list_entry (el, EP4_ECQ, ecq_link);
68718 +
68719 +       elan4_sdram_writeq (dev, ecq->ecq_event + offsetof (E4_Event32, ev_CountAndType),
68720 +                           E4_EVENT_INIT_VALUE (-32, E4_EVENT_WRITE, E4_EVENT_DTYPE_LONG, 0));
68721 +
68722 +       ep4_set_event_cmd (ecq->ecq_flushcq, ecq->ecq_event_addr);
68723 +
68724 +       rail->r_flush_count++;
68725 +    }
68726 +
68727 +    /* next issue the setevents to all the other queues */
68728 +    for (i = EP4_ECQ_ATOMIC; i <EP4_NUM_ECQ; i++)
68729 +    {
68730 +       list_for_each (el,&rail->r_ecq_list[i]) {
68731 +           EP4_ECQ *ecq = list_entry (el, EP4_ECQ, ecq_link);
68732 +
68733 +           ep4_set_event_cmd (ecq, rail->r_elan_addr + offsetof (EP4_RAIL_ELAN, r_flush_event));
68734 +
68735 +           rail->r_flush_count++;
68736 +       }
68737 +    }
68738 +
68739 +    /* issue the waitevent command */
68740 +    ep4_wait_event_cmd (rail->r_flush_mcq,  rail->r_elan_addr + offsetof (EP4_RAIL_ELAN, r_flush_event),
68741 +                       E4_EVENT_INIT_VALUE (-32 * rail->r_flush_count, E4_EVENT_WRITE, E4_EVENT_DTYPE_LONG,0),
68742 +                       rail->r_flush_ecq->ecq_addr,
68743 +                       INTERRUPT_CMD | (rail->r_flush_intcookie.int_val << E4_MAIN_INT_SHIFT));
68744 +    
68745 +    while (rail->r_flush_count)
68746 +       kcondvar_wait (&rail->r_flush_sleep, &rail->r_ecq_lock, &flags);
68747 +    
68748 +    spin_unlock_irqrestore (&rail->r_ecq_lock, flags);
68749 +
68750 +    EP4_SDRAM_ASSERT (rail, rail->r_elan + offsetof (EP4_RAIL_ELAN, r_flush_event), E4_EVENT_INIT_VALUE (0, E4_EVENT_WRITE, E4_EVENT_DTYPE_LONG,0));
68751 +
68752 +    kmutex_unlock (&rail->r_flush_mutex);
68753 +}
68754 +
68755 +void
68756 +ep4_init_thread (EP4_RAIL *rail, E4_ThreadRegs *regs, sdramaddr_t stackTop, 
68757 +                EP_ADDR stackAddr, E4_Addr startpc, int nargs,...)
68758 +{
68759 +    sdramaddr_t   sp = stackTop - roundup (nargs * sizeof (E4_uint64), E4_STACK_ALIGN);
68760 +    int           i;
68761 +    va_list       ap;
68762 +    
68763 +    /*
68764 +     * the thread start code expects the following :
68765 +     *   %r1 = stack pointer
68766 +     *   %r6 = frame pointer
68767 +     *   %r2 = function to call
68768 +     *
68769 +     *   function args are store on stack above %sp
68770 +     */
68771 +
68772 +    va_start(ap, nargs);
68773 +    for (i = 0; i < nargs; i++)
68774 +       elan4_sdram_writeq (rail->r_ctxt.ctxt_dev, sp + (i * sizeof (E4_uint64)), va_arg (ap, E4_uint64));
68775 +    va_end (ap);
68776 +    
68777 +    regs->Registers[0] = ep_symbol (&rail->r_threadcode, ".thread_start");             /* %r0 - PC */
68778 +    regs->Registers[1] = stackAddr - (stackTop - sp);                                  /* %r1 - stack pointer */
68779 +    regs->Registers[2] = startpc;                                                      /* %r2 - start pc */
68780 +    regs->Registers[3] = 0;
68781 +    regs->Registers[4] = 0;
68782 +    regs->Registers[5] = 0;
68783 +    regs->Registers[6] = stackTop;                                                     /* %r6 - frame pointer */ 
68784 +}
68785 +
68786 +/* retransmission thread */
68787 +
68788 +void
68789 +ep4_add_retry_ops (EP4_RAIL *rail, EP4_RETRY_OPS *ops)
68790 +{
68791 +    ep_kthread_stall (&rail->r_retry_thread);
68792 +    list_add_tail (&ops->op_link, &rail->r_retry_ops);
68793 +    ep_kthread_resume (&rail->r_retry_thread);
68794 +}
68795 +
68796 +void
68797 +ep4_remove_retry_ops (EP4_RAIL *rail, EP4_RETRY_OPS *ops)
68798 +{
68799 +    ep_kthread_stall (&rail->r_retry_thread);
68800 +    list_del (&ops->op_link);
68801 +    ep_kthread_resume (&rail->r_retry_thread);
68802 +}
68803 +
68804 +void
68805 +ep4_retry_thread (EP4_RAIL *rail)
68806 +{
68807 +    struct list_head *el;
68808 +
68809 +    kernel_thread_init ("ep4_retry");
68810 +    
68811 +    for (;;)
68812 +    {
68813 +       long nextRunTime = 0;
68814 +
68815 +       list_for_each (el, &rail->r_retry_ops) {
68816 +           EP4_RETRY_OPS *ops = list_entry (el, EP4_RETRY_OPS, op_link);
68817 +
68818 +           nextRunTime = ops->op_func (rail, ops->op_arg, nextRunTime);
68819 +       }
68820 +
68821 +       if (ep_kthread_sleep (&rail->r_retry_thread, nextRunTime) < 0)
68822 +           break;
68823 +    }
68824 +
68825 +    ep_kthread_stopped (&rail->r_retry_thread);
68826 +
68827 +    kernel_thread_exit();
68828 +}
68829 +
68830 +/* DMA retransmission */
68831 +static unsigned ep4_dma_retry_times[EP_NUM_RETRIES];
68832 +
68833 +static unsigned long
68834 +ep4_retry_dmas (EP4_RAIL *rail, void *arg, unsigned long nextRunTime)
68835 +{
68836 +    unsigned long yieldAt = lbolt + (hz/10);
68837 +    unsigned long flags;
68838 +    int           i;
68839 +
68840 +    for (i = EP_RETRY_BASE; i < EP_NUM_RETRIES; i++)
68841 +    {
68842 +       while (! list_empty (&rail->r_dma_retrylist[i]))
68843 +       {
68844 +           EP4_DMA_RETRY *retry = list_entry (rail->r_dma_retrylist[i].next, EP4_DMA_RETRY, retry_link);
68845 +           
68846 +           if (! AFTER(lbolt, retry->retry_time))
68847 +               break;
68848 +
68849 +           if (ep_kthread_should_stall (&rail->r_retry_thread) || AFTER (lbolt, yieldAt))
68850 +               goto cant_do_more;
68851 +           
68852 +           EPRINTF3 (DBG_RETRY, "%s: ep4_retry_dmas: flowcnt %llx %llx\n", rail->r_generic.Name, rail->r_dma_flowcnt, rail->r_main->r_dma_flowcnt);
68853 +
68854 +           if ((rail->r_dma_flowcnt - rail->r_main->r_dma_flowcnt) > EP4_DMA_RETRY_FLOWCNT)
68855 +           {
68856 +               printk ("ep4_retry_dmas: flowcnt %llx %llx\n", rail->r_dma_flowcnt, rail->r_main->r_dma_flowcnt);
68857 +
68858 +               goto cant_do_more;
68859 +           }
68860 +
68861 +           EPRINTF4 (DBG_RETRY, "%s: ep4_retry_dmas: %016llx %016llx %016llx\n", rail->r_generic.Name,
68862 +                     retry->retry_dma.dma_typeSize, retry->retry_dma.dma_cookie, retry->retry_dma.dma_vproc);
68863 +           EPRINTF5 (DBG_RETRY, "%s:                  %016llx %016llx %016llx %016llx\n", rail->r_generic.Name,
68864 +                     retry->retry_dma.dma_srcAddr, retry->retry_dma.dma_dstAddr, retry->retry_dma.dma_srcEvent, 
68865 +                     retry->retry_dma.dma_dstEvent);
68866 +
68867 +           elan4_run_dma_cmd (rail->r_dma_ecq->ecq_cq, &retry->retry_dma);
68868 +           elan4_write_dword_cmd (rail->r_dma_ecq->ecq_cq, rail->r_main_addr + offsetof (EP4_RAIL_MAIN, r_dma_flowcnt), ++rail->r_dma_flowcnt);
68869 +
68870 +           spin_lock_irqsave (&rail->r_dma_lock, flags);
68871 +           list_del (&retry->retry_link);
68872 +           list_add (&retry->retry_link, &rail->r_dma_freelist);
68873 +           spin_unlock_irqrestore (&rail->r_dma_lock, flags);
68874 +       }
68875 +    }
68876 + cant_do_more:
68877 +
68878 +    /* re-compute the next retry time */
68879 +    for (i = EP_RETRY_BASE; i < EP_NUM_RETRIES; i++)
68880 +    {
68881 +       if (! list_empty (&rail->r_dma_retrylist[i]))
68882 +       {
68883 +           EP4_DMA_RETRY *retry = list_entry (rail->r_dma_retrylist[i].next, EP4_DMA_RETRY, retry_link);
68884 +
68885 +           SET_NEXT_RUN_TIME (nextRunTime, retry->retry_time);
68886 +       }
68887 +    }
68888 +
68889 +    return nextRunTime;
68890 +}
68891 +
68892 +void
68893 +ep4_initialise_dma_retries (EP4_RAIL *rail)
68894 +{
68895 +    int i;
68896 +
68897 +    spin_lock_init (&rail->r_dma_lock);
68898 +    
68899 +    for (i = 0; i < EP_NUM_RETRIES; i++)
68900 +       INIT_LIST_HEAD (&rail->r_dma_retrylist[i]);
68901 +    
68902 +    INIT_LIST_HEAD (&rail->r_dma_freelist);
68903 +    
68904 +    rail->r_dma_ecq = ep4_alloc_ecq (rail, EP4_DMA_RETRY_CQSIZE);
68905 +    
68906 +    rail->r_dma_allocated = 0;
68907 +    rail->r_dma_reserved  = 0;
68908 +
68909 +    ep4_dma_retry_times[EP_RETRY_HIGH_PRI] = EP_RETRY_HIGH_PRI_TIME;
68910 +
68911 +    for (i =0 ; i < EP_NUM_BACKOFF; i++)
68912 +       ep4_dma_retry_times[EP_RETRY_HIGH_PRI_RETRY+i] = EP_RETRY_HIGH_PRI_TIME << i;
68913 +    
68914 +    ep4_dma_retry_times[EP_RETRY_LOW_PRI] = EP_RETRY_LOW_PRI_TIME;
68915 +
68916 +    for (i =0 ; i < EP_NUM_BACKOFF; i++)
68917 +       ep4_dma_retry_times[EP_RETRY_LOW_PRI_RETRY+i] = EP_RETRY_LOW_PRI_TIME << i;
68918 +    
68919 +    ep4_dma_retry_times[EP_RETRY_ANONYMOUS] = EP_RETRY_ANONYMOUS_TIME;
68920 +    ep4_dma_retry_times[EP_RETRY_NETERR]    = EP_RETRY_NETERR_TIME;
68921 +
68922 +    rail->r_dma_ops.op_func = ep4_retry_dmas;
68923 +    rail->r_dma_ops.op_arg  = NULL;
68924 +
68925 +    ep4_add_retry_ops (rail, &rail->r_dma_ops);
68926 +}
68927 +
68928 +void
68929 +ep4_finalise_dma_retries (EP4_RAIL *rail)
68930 +{
68931 +    ep4_remove_retry_ops (rail, &rail->r_dma_ops);
68932 +
68933 +    /* Everyone should have given back their retry dma's by now */
68934 +    EP4_ASSERT (rail, rail->r_dma_reserved == 0);
68935 +
68936 +    while (! list_empty (&rail->r_dma_freelist))
68937 +    {
68938 +       EP4_DMA_RETRY *retry = list_entry (rail->r_dma_freelist.next, EP4_DMA_RETRY, retry_link);
68939 +
68940 +       list_del (&retry->retry_link);
68941 +
68942 +       KMEM_FREE (retry, sizeof (EP4_DMA_RETRY));
68943 +    }
68944 +
68945 +    ep4_free_ecq (rail, rail->r_dma_ecq);
68946 +
68947 +    spin_lock_destroy (&rail->r_dma_lock);
68948 +}
68949 +
68950 +int
68951 +ep4_reserve_dma_retries (EP4_RAIL *rail, unsigned int count, EP_ATTRIBUTE attr)
68952 +{
68953 +    EP4_DMA_RETRY *retry;
68954 +    unsigned int   remaining = count;
68955 +    unsigned long  flags;
68956 +
68957 +    spin_lock_irqsave (&rail->r_dma_lock, flags);
68958 +
68959 +    if (remaining <= (rail->r_dma_allocated - rail->r_dma_reserved))
68960 +    {
68961 +       rail->r_dma_reserved += remaining;
68962 +
68963 +       spin_unlock_irqrestore (&rail->r_dma_lock, flags);
68964 +
68965 +       return 0;
68966 +    }
68967 +    
68968 +    remaining -= (rail->r_dma_allocated - rail->r_dma_reserved);
68969 +
68970 +    rail->r_dma_reserved = rail->r_dma_allocated;
68971 +
68972 +    spin_unlock_irqrestore (&rail->r_dma_lock, flags);
68973 +
68974 +    while (remaining > 0)
68975 +    {
68976 +       KMEM_ALLOC (retry, EP4_DMA_RETRY *, sizeof (EP4_DMA_RETRY), !(attr & EP_NO_SLEEP));
68977 +
68978 +       if (retry == NULL)
68979 +           goto failed;
68980 +       
68981 +       remaining--;
68982 +
68983 +       spin_lock_irqsave (&rail->r_dma_lock, flags);
68984 +       list_add (&retry->retry_link, &rail->r_dma_freelist);
68985 +
68986 +       rail->r_dma_allocated++;
68987 +       rail->r_dma_reserved++;
68988 +       spin_unlock_irqrestore (&rail->r_dma_lock, flags);
68989 +    }
68990 +
68991 +    return 0;
68992 +
68993 + failed:
68994 +    spin_lock_irqsave (&rail->r_dma_lock, flags);
68995 +    rail->r_dma_reserved -= (count - remaining);
68996 +    spin_unlock_irqrestore (&rail->r_dma_lock, flags);
68997 +
68998 +    return 1;
68999 +}
69000 +
69001 +void
69002 +ep4_release_dma_retries (EP4_RAIL *rail, unsigned int count)
69003 +{
69004 +    unsigned long flags;
69005 +
69006 +    spin_lock_irqsave (&rail->r_dma_lock, flags);
69007 +    rail->r_dma_reserved -= count;
69008 +    spin_unlock_irqrestore (&rail->r_dma_lock, flags);
69009 +}
69010 +
69011 +void
69012 +ep4_queue_dma_retry (EP4_RAIL *rail, E4_DMA *dma, int interval)
69013 +{
69014 +    EP4_DMA_RETRY *retry;
69015 +    unsigned long  flags;
69016 +    
69017 +    spin_lock_irqsave (&rail->r_dma_lock, flags);
69018 +
69019 +    EP4_ASSERT (rail, !list_empty (&rail->r_dma_freelist));
69020 +    
69021 +    /* take an item of the free list */
69022 +    retry = list_entry (rail->r_dma_freelist.next, EP4_DMA_RETRY, retry_link);
69023 +
69024 +    list_del (&retry->retry_link);
69025 +    
69026 +    EPRINTF5 (DBG_RETRY, "%s: ep4_queue_dma_retry: %016llx %016llx %016llx %016llx\n", rail->r_generic.Name,
69027 +             dma->dma_typeSize, dma->dma_cookie, dma->dma_vproc, dma->dma_srcAddr);
69028 +    EPRINTF5 (DBG_RETRY, "%s:                      %016llx %016llx %016llx (%d)\n", rail->r_generic.Name,
69029 +             dma->dma_dstAddr, dma->dma_srcEvent, dma->dma_dstEvent, interval);
69030 +
69031 +    retry->retry_dma.dma_typeSize = dma->dma_typeSize;
69032 +    retry->retry_dma.dma_cookie   = dma->dma_cookie;
69033 +    retry->retry_dma.dma_vproc    = dma->dma_vproc;
69034 +    retry->retry_dma.dma_srcAddr  = dma->dma_srcAddr;
69035 +    retry->retry_dma.dma_dstAddr  = dma->dma_dstAddr;
69036 +    retry->retry_dma.dma_srcEvent = dma->dma_srcEvent;
69037 +    retry->retry_dma.dma_dstEvent = dma->dma_dstEvent;
69038 +
69039 +    retry->retry_time             = lbolt + ep4_dma_retry_times[interval];
69040 +
69041 +    /* chain onto the end of the approriate retry list */
69042 +    list_add_tail (&retry->retry_link, &rail->r_dma_retrylist[interval]);
69043 +
69044 +    ep_kthread_schedule (&rail->r_retry_thread, retry->retry_time);
69045 +
69046 +    spin_unlock_irqrestore (&rail->r_dma_lock, flags);
69047 +}
69048 +
69049 +void
69050 +ep4_queue_dma_stalled (EP4_RAIL *rail, E4_DMA *dma)
69051 +{
69052 +    EP_NODE_RAIL  *nodeRail = &rail->r_generic.Nodes[EP_VP_TO_NODE(dma->dma_vproc)];
69053 +    EP4_DMA_RETRY *retry;
69054 +    unsigned long  flags;
69055 +    
69056 +    spin_lock_irqsave (&rail->r_dma_lock, flags);
69057 +
69058 +    EP4_ASSERT (rail, !list_empty (&rail->r_dma_freelist));
69059 +    
69060 +    /* take an item of the free list */
69061 +    retry = list_entry (rail->r_dma_freelist.next, EP4_DMA_RETRY, retry_link);
69062 +
69063 +    list_del (&retry->retry_link);
69064 +    
69065 +    EPRINTF5 (DBG_RETRY, "%s: ep4_queue_dma_stalled: %016llx %016llx %016llx %016llx\n", rail->r_generic.Name,
69066 +             dma->dma_typeSize, dma->dma_cookie, dma->dma_vproc, dma->dma_srcAddr);
69067 +    EPRINTF4 (DBG_RETRY, "%s:                        %016llx %016llx %016llx\n", rail->r_generic.Name,
69068 +             dma->dma_dstAddr, dma->dma_srcEvent, dma->dma_dstEvent);
69069 +
69070 +    retry->retry_dma.dma_typeSize = dma->dma_typeSize;
69071 +    retry->retry_dma.dma_cookie   = dma->dma_cookie;
69072 +    retry->retry_dma.dma_vproc    = dma->dma_vproc;
69073 +    retry->retry_dma.dma_srcAddr  = dma->dma_srcAddr;
69074 +    retry->retry_dma.dma_dstAddr  = dma->dma_dstAddr;
69075 +    retry->retry_dma.dma_srcEvent = dma->dma_srcEvent;
69076 +    retry->retry_dma.dma_dstEvent = dma->dma_dstEvent;
69077 +
69078 +    /* chain onto the node cancelled dma list */
69079 +    list_add_tail (&retry->retry_link, &nodeRail->StalledDmas);
69080 +
69081 +    spin_unlock_irqrestore (&rail->r_dma_lock, flags);
69082 +}
69083 +
69084 +void
69085 +ep4_free_stalled_dmas (EP4_RAIL *rail, unsigned int nodeId)
69086 +{
69087 +    EP_NODE_RAIL *nodeRail = &rail->r_generic.Nodes[nodeId];
69088 +    struct list_head *el, *nel;
69089 +    unsigned long flags;
69090 +
69091 +    spin_lock_irqsave (&rail->r_dma_lock, flags);
69092 +    list_for_each_safe (el, nel, &nodeRail->StalledDmas) {
69093 +       list_del (el);
69094 +       list_add (el, &rail->r_dma_freelist);
69095 +    }
69096 +    spin_unlock_irqrestore (&rail->r_dma_lock, flags);
69097 +}
69098 +
69099 +void
69100 +ep4_display_rail (EP4_RAIL *rail)
69101 +{
69102 +    ELAN4_DEV        *dev = rail->r_ctxt.ctxt_dev;
69103 +    struct list_head *el;
69104 +    register int      i;
69105 +    unsigned long     flags;
69106 +
69107 +    ep_debugf (DBG_DEBUG, "%s: vendorid=%x deviceid=%x\n", rail->r_generic.Name, 
69108 +              rail->r_generic.Devinfo.dev_vendor_id, rail->r_generic.Devinfo.dev_device_id);
69109 +
69110 +    spin_lock_irqsave (&rail->r_ecq_lock, flags);
69111 +    for (i = 0; i < EP4_NUM_ECQ; i++)
69112 +    {
69113 +       list_for_each (el, &rail->r_ecq_list[i]) {
69114 +           EP4_ECQ *ecq = list_entry (el, EP4_ECQ, ecq_link);
69115 +           
69116 +           if (i == EP4_ECQ_EVENT)
69117 +               ep_debugf (DBG_DEBUG, "   ECQ[%d] ecq=%p cqnum=%d addr=%llx avail=%d event=%llx,%llx,%llx\n",
69118 +                          i, ecq, elan4_cq2num (ecq->ecq_cq), ecq->ecq_addr, ecq->ecq_avail,
69119 +                          elan4_sdram_readq (dev, ecq->ecq_event + offsetof (E4_Event32, ev_CountAndType)),
69120 +                          elan4_sdram_readq (dev, ecq->ecq_event + offsetof (E4_Event32, ev_WriteValue)),
69121 +                          elan4_sdram_readq (dev, ecq->ecq_event + offsetof (E4_Event32, ev_WritePtr)));
69122 +
69123 +           else
69124 +               ep_debugf (DBG_DEBUG, "   ECQ[%d] ecq=%p cqnum=%d addr=%llx avail=%d\n",
69125 +                          i, ecq, elan4_cq2num (ecq->ecq_cq), ecq->ecq_addr, ecq->ecq_avail);
69126 +       }
69127 +    }
69128 +    spin_unlock_irqrestore (&rail->r_ecq_lock, flags);
69129 +
69130 +    ep_debugf (DBG_DEBUG, "   flush count=%ld mcq=%p ecq=%p event %llx.%llx.%llx\n", 
69131 +              rail->r_flush_count, rail->r_flush_mcq, rail->r_flush_ecq,
69132 +              elan4_sdram_readq (dev, rail->r_elan + offsetof (EP4_RAIL_ELAN, r_flush_event.ev_CountAndType)),
69133 +              elan4_sdram_readq (dev, rail->r_elan + offsetof (EP4_RAIL_ELAN, r_flush_event.ev_WritePtr)),
69134 +              elan4_sdram_readq (dev, rail->r_elan + offsetof (EP4_RAIL_ELAN, r_flush_event.ev_WriteValue)));
69135 +    
69136 +    spin_lock_irqsave (&rail->r_dma_lock, flags);
69137 +    for (i = 0; i < EP_NUM_RETRIES; i++)
69138 +    {
69139 +       list_for_each (el, &rail->r_dma_retrylist[i]) {
69140 +           EP4_DMA_RETRY *retry = list_entry (el, EP4_DMA_RETRY, retry_link);
69141 +           
69142 +           ep_debugf (DBG_DEBUG, "    RETRY[%d] typeSize %llx cookie %llx vproc %llx events %llx %llx\n",
69143 +                      i, retry->retry_dma.dma_typeSize, retry->retry_dma.dma_cookie,
69144 +                      retry->retry_dma.dma_vproc, retry->retry_dma.dma_srcEvent, retry->retry_dma.dma_dstEvent);
69145 +       }
69146 +    }
69147 +    spin_unlock_irqrestore (&rail->r_dma_lock, flags);
69148 +}
69149 Index: linux-2.4.21/drivers/net/qsnet/ep/threadcode.c
69150 ===================================================================
69151 --- linux-2.4.21.orig/drivers/net/qsnet/ep/threadcode.c 2004-02-23 16:02:56.000000000 -0500
69152 +++ linux-2.4.21/drivers/net/qsnet/ep/threadcode.c      2005-06-01 23:12:54.689425424 -0400
69153 @@ -0,0 +1,146 @@
69154 +/*
69155 + *    Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd.
69156 + *    Copyright (c) 2002-2003 by Quadrics Ltd.
69157 + *
69158 + *    For licensing information please see the supplied COPYING file
69159 + *
69160 + */
69161 +
69162 +#ident "@(#)$Id: threadcode.c,v 1.11 2003/10/07 13:22:38 david Exp $"
69163 +/*      $Source: /cvs/master/quadrics/epmod/threadcode.c,v $ */
69164 +
69165 +#include <qsnet/kernel.h>
69166 +
69167 +#include <elan/kcomm.h>
69168 +
69169 +EP_ADDR
69170 +ep_symbol (EP_CODE *code, char *name)
69171 +{
69172 +    EP_SYMBOL *s = code->symbols;
69173 +    
69174 +    while (s->name && strcmp (s->name, name))
69175 +       s++;
69176 +    
69177 +    return (s->name ? s->value : (EP_ADDR) 0);
69178 +}
69179 +
69180 +int
69181 +ep_loadcode (EP_RAIL *rail, EP_CODE *code)
69182 +{
69183 +    register int i;
69184 +
69185 +    EP_ADDR  _stext  = ep_symbol (code, "_stext");
69186 +    EP_ADDR  _etext  = ep_symbol (code, "_etext");
69187 +    EP_ADDR  _sdata  = ep_symbol (code, "_sdata");
69188 +    EP_ADDR  _edata  = ep_symbol (code, "_edata");
69189 +    EP_ADDR  _end    = ep_symbol (code, "_end");
69190 +    EP_ADDR  _rodata = roundup (_etext, sizeof (uint64_t));
69191 +
69192 +    if (_stext == (EP_ADDR) 0 || _etext == (EP_ADDR) 0 ||
69193 +       _sdata == (EP_ADDR) 0 || _edata == (EP_ADDR) 0 ||
69194 +       _end == (EP_ADDR) 0)
69195 +    {
69196 +       printk ("ep_loadcode: symbols not defined correctly for code at %p\n", code);
69197 +       return (EINVAL);
69198 +    }
69199 +
69200 +    /*
69201 +     * Include the rodata in the text segment
69202 +     */
69203 +    _etext = _rodata + code->rodata_size;
69204 +
69205 +    /*
69206 +     * If _etext is in the same page as _sdata,  then allocate a contiguous
69207 +     * chunk of memory and map it as read/write. otherwise allocate two chunks
69208 +     * and map the code in as read-only.
69209 +     */
69210 +    if ((_etext & PAGEMASK) == (_sdata & PAGEMASK))
69211 +    {
69212 +       code->ntext  = btopr (_end - (_stext & PAGEMASK));
69213 +       code->pptext = ep_alloc_memory_elan (rail, _stext & PAGEMASK, ptob (code->ntext), EP_PERM_EXECUTE, 0);
69214 +
69215 +       if (code->pptext == (sdramaddr_t) 0)
69216 +           return (ENOMEM);
69217 +       
69218 +       code->_stext  = code->pptext + (_stext & PAGEOFFSET);
69219 +       code->_rodata = code->_stext + (_rodata - _stext);
69220 +       code->_sdata  = code->_stext + (_sdata - _stext);
69221 +    }
69222 +    else
69223 +    {
69224 +       code->ntext  = btopr (_etext - (_stext & PAGEMASK));
69225 +       code->ndata  = btopr (_end - (_sdata & PAGEMASK));
69226 +
69227 +       if (code->ntext)
69228 +       {
69229 +           code->pptext = ep_alloc_memory_elan (rail, _stext & PAGEMASK, ptob (code->ntext), EP_PERM_EXECUTE, 0);
69230 +
69231 +           if (code->pptext == (sdramaddr_t) 0)
69232 +               return (ENOMEM);
69233 +
69234 +           code->_stext  = code->pptext + (_stext & PAGEOFFSET);
69235 +           code->_rodata = code->_stext + (_rodata - _stext);
69236 +       }
69237 +       
69238 +       if (code->ndata)
69239 +       {
69240 +           code->ppdata = ep_alloc_memory_elan (rail, _sdata & PAGEMASK, ptob (code->ndata), EP_PERM_WRITE, 0);
69241 +
69242 +           if (code->ppdata == (sdramaddr_t) 0)
69243 +           {
69244 +               if (code->ntext) ep_free_memory_elan (rail, _sdata & PAGEMASK);
69245 +               code->ntext = 0;
69246 +
69247 +               return (ENOMEM);
69248 +           }
69249 +           
69250 +           code->_sdata = code->ppdata + (_sdata & PAGEOFFSET);
69251 +       }
69252 +    }
69253 +    
69254 +#ifdef __LITTLE_ENDIAN__
69255 +#  define Flip 3
69256 +#else
69257 +#  define Flip  0
69258 +#endif
69259 +
69260 +    /*
69261 +     * Now copy the text and rodata into the SDRAM
69262 +     * this is linked into the module to be byte 
69263 +     * copied to the SDRAM, since we want to copy
69264 +     * with word accesses we have to do the byte
69265 +     * assembly correctly.
69266 +     */
69267 +    for (i = 0; i < code->text_size; i++)
69268 +       rail->Operations.SdramWriteb (rail, code->_stext + i, code->text[i^Flip]);
69269 +
69270 +    for (i = 0; i < code->rodata_size; i++)
69271 +       rail->Operations.SdramWriteb (rail, code->_rodata + i, code->rodata[i^Flip]);
69272 +    
69273 +    /*
69274 +     * And the initialised data segment.
69275 +     */
69276 +    for (i = 0; i < code->data_size; i++)
69277 +       rail->Operations.SdramWriteb (rail, code->_sdata + i, code->data[i^Flip]);
69278 +
69279 +    return (ESUCCESS);
69280 +}
69281 +
69282 +void
69283 +ep_unloadcode (EP_RAIL *rail, EP_CODE *code)
69284 +{
69285 +    EP_ADDR  _stext = ep_symbol (code, "_stext");
69286 +    EP_ADDR  _sdata = ep_symbol (code, "_sdata");
69287 +
69288 +    if (code->pptext)
69289 +       ep_free_memory_elan (rail, _stext & PAGEMASK);
69290 +    if (code->ppdata)
69291 +       ep_free_memory_elan (rail, _sdata & PAGEMASK);
69292 +    code->pptext = code->ppdata = 0;
69293 +}
69294 +
69295 +/*
69296 + * Local variables:
69297 + * c-file-style: "stroustrup"
69298 + * End:
69299 + */
69300 Index: linux-2.4.21/drivers/net/qsnet/ep/threadcode_elan3.c
69301 ===================================================================
69302 --- linux-2.4.21.orig/drivers/net/qsnet/ep/threadcode_elan3.c   2004-02-23 16:02:56.000000000 -0500
69303 +++ linux-2.4.21/drivers/net/qsnet/ep/threadcode_elan3.c        2005-06-01 23:12:54.690425272 -0400
69304 @@ -0,0 +1,85 @@
69305 +/*
69306 + *    Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd.
69307 + *    Copyright (c) 2002-2003 by Quadrics Ltd.
69308 + *
69309 + *    For licensing information please see the supplied COPYING file
69310 + *
69311 + */
69312 +
69313 +#ident "@(#)$Id: threadcode_elan3.c,v 1.11 2003/10/07 13:22:38 david Exp $"
69314 +/*      $Source: /cvs/master/quadrics/epmod/threadcode_elan3.c,v $ */
69315 +
69316 +#include <qsnet/kernel.h>
69317 +
69318 +#include <elan/kcomm.h>
69319 +
69320 +#include "kcomm_elan3.h"
69321 +#include "debug.h"
69322 +
69323 +#include <elan3/thread.h>
69324 +
69325 +E3_Addr
69326 +ep3_init_thread (ELAN3_DEV  *dev,
69327 +                E3_Addr     fn,                                /* Elan address of function */
69328 +                E3_Addr     addr,                              /* Elan address of stack */
69329 +                sdramaddr_t stack,                             /* sdram address of stack */
69330 +                int           stackSize,                       /* stack size (in bytes) */
69331 +                int           nargs,
69332 +                ...)
69333 +{
69334 +    sdramaddr_t  frame;
69335 +    sdramaddr_t  regs;
69336 +    sdramaddr_t  argsp;
69337 +    int                 i;
69338 +    va_list      ap;
69339 +
69340 +    /*
69341 +     * Align the stack pointer at the top of the stack and leave space for a stack frame
69342 +     */
69343 +    stack = ((stack + stackSize) & ~(E3_STACK_ALIGN-1)) - sizeof (E3_Frame);
69344 +    addr  = ((addr  + stackSize) & ~(E3_STACK_ALIGN-1)) - sizeof (E3_Frame);
69345 +
69346 +    va_start (ap, nargs);
69347 +
69348 +    if (nargs > 6)
69349 +    {
69350 +       stack -= (((nargs*sizeof (E3_uint32))+E3_STACK_ALIGN-1) & ~(E3_STACK_ALIGN-1));
69351 +       addr  -= (((nargs*sizeof (E3_uint32))+E3_STACK_ALIGN-1) & ~(E3_STACK_ALIGN-1));
69352 +    }
69353 +    
69354 +    frame  = stack;
69355 +    regs   = stack - sizeof (E3_OutsRegs);
69356 +
69357 +    /*
69358 +     * Initialise the registers, and stack frame.
69359 +     */
69360 +    elan3_sdram_writel (dev, regs + offsetof (E3_OutsRegs, o[6]), fn);
69361 +    elan3_sdram_writel (dev, regs + offsetof (E3_OutsRegs, o[7]), 0);
69362 +    
69363 +    if (nargs <= 6)
69364 +    {
69365 +       for (i = 0; i < nargs; i++)
69366 +           elan3_sdram_writel (dev, regs + offsetof (E3_OutsRegs, o[i]), va_arg (ap, E3_uint32));
69367 +    }
69368 +    else
69369 +    {
69370 +       for (i = 0; i < 6; i++)
69371 +           elan3_sdram_writel (dev, regs + offsetof (E3_OutsRegs, o[i]), va_arg (ap, E3_uint32));
69372 +       
69373 +       for (argsp = frame + offsetof (E3_Frame, fr_argx[0]); i < nargs; i++, argsp += sizeof (E3_uint32))
69374 +           elan3_sdram_writel (dev, argsp, va_arg (ap, int));
69375 +    }
69376 +
69377 +    elan3_sdram_writel (dev, frame + offsetof (E3_Frame, fr_savefp), 0);
69378 +    elan3_sdram_writel (dev, frame + offsetof (E3_Frame, fr_savepc), 0);
69379 +
69380 +    va_end (ap);
69381 +
69382 +    return (addr);
69383 +}
69384 +
69385 +/*
69386 + * Local variables:
69387 + * c-file-style: "stroustrup"
69388 + * End:
69389 + */
69390 Index: linux-2.4.21/drivers/net/qsnet/ep/threadcode_elan3_Linux.c
69391 ===================================================================
69392 --- linux-2.4.21.orig/drivers/net/qsnet/ep/threadcode_elan3_Linux.c     2004-02-23 16:02:56.000000000 -0500
69393 +++ linux-2.4.21/drivers/net/qsnet/ep/threadcode_elan3_Linux.c  2005-06-01 23:12:54.690425272 -0400
69394 @@ -0,0 +1,112 @@
69395 +/* --------------------------------------------------------*/
69396 +/* MACHINE GENERATED ELAN CODE                             */
69397 +#include <qsnet/kernel.h>
69398 +#include <elan/kcomm.h>
69399 +#include "kcomm_elan3.h"
69400 +static uint32_t threadcode_elan3_text[] = {
69401 +0x80a0239c, 0x00001082, 0x00e0a280, 0x47008002, 0x0020a380, 0x20600288, 0x20200286, 0x43008002, 
69402 +0x00000001, 0x0a006081, 0x00000001, 0x00000001, 0x00000001, 0x00000001, 0x00000001, 0x00000001, 
69403 +0x00000001, 0x00000001, 0x00000001, 0x00000001, 0x00000001, 0x00000001, 0x00000001, 0x00000001, 
69404 +0x00000001, 0x00000001, 0x00000001, 0x00000001, 0x00000001, 0x00000001, 0x00000001, 0x00000001, 
69405 +0x00000001, 0x00000001, 0xa800c613, 0xa300c609, 0x0020108a, 0x0080900b, 0x00006885, 0x0580a080, 
69406 +0x06008002, 0x02a0a080, 0x06008022, 0xffff0296, 0x04008010, 0xff3f0398, 0x1f008010, 0x00201090, 
69407 +0x00007081, 0x1600801c, 0x00000001, 0x60a0239c, 0x00a0a3c0, 0x20a0a3f0, 0x40a0a3e0, 0x00c03f3f, 
69408 +0xf8e017be, 0x04e08f80, 0x06008012, 0x00000001, 0x00c01ffc, 0x0000a081, 0x06008010, 0x40a083e0, 
69409 +0x14e007be, 0x00c01ffc, 0x0000a081, 0x40a083e0, 0x20a083f0, 0x00a083c0, 0x60a0039c, 0x00e0a280, 
69410 +0xbfffbf12, 0x0020a380, 0x03008012, 0x02201090, 0x03201090, 0x08e0c381, 0x80a0039c, 0xe0a0239c, 
69411 +0x60a023de, 0x80a0a3e0, 0xa0a0a3f0, 0x080010b8, 0x090010b0, 0x0a0010b2, 0x04000037, 0x402006b4, 
69412 +0x50200690, 0x01201092, 0x20a0239c, 0x00a0a3f0, 0x00c03f3f, 0x8ce117be, 0x04e08f80, 0x06008012, 
69413 +0x00000001, 0x00c01ff8, 0x0000b081, 0x06008010, 0x00a083f0, 0x14e007be, 0x00c01ff8, 0x0000b081, 
69414 +0x00a083f0, 0x20a0039c, 0x582006d0, 0x0020a280, 0x05008002, 0x0900a280, 0x10008002, 0x50200690, 
69415 +0xeaffbf30, 0x5c2006d4, 0x18001090, 0x19001092, 0x1b800294, 0x0a201096, 0x8affff7f, 0x05201098, 
69416 +0x446026d0, 0x302027f4, 0xdfffbf10, 0x50200690, 0xfdffbf10, 0x446026c0, 0x5c2006e0, 0x0020a480, 
69417 +0xf9ffbf06, 0x18001090, 0x19001092, 0x1b000494, 0x14201096, 0x7bffff7f, 0x0a201098, 0x0020a280, 
69418 +0xf4ffbf22, 0x486026e0, 0x00007081, 0x1600801c, 0x00000001, 0x60a0239c, 0x00a0a3c0, 0x20a0a3f0, 
69419 +0x40a0a3e0, 0x00c03f3f, 0x60e217be, 0x04e08f80, 0x06008012, 0x00000001, 0x00c01ffc, 0x0000a081, 
69420 +0x06008010, 0x40a083e0, 0x14e007be, 0x00c01ffc, 0x0000a081, 0x40a083e0, 0x20a083f0, 0x00a083c0, 
69421 +0x60a0039c, 0xff3f84a0, 0xe0ffbf1c, 0x18001090, 0xd5ffbf30, 0x60a003de, 0x80a083e0, 0xa0a083f0, 
69422 +0x08e0c381, 0xe0a0039c, 0x00a1239c, 0x60a023de, 0x80a0a3e0, 0xa0a0a3f0, 0x44a123d0, 0x090010b0, 
69423 +0x0a0010b6, 0x0b0010b8, 0x0c0010b4, 0x012010ba, 0xdca023fa, 0x142007d2, 0x082007d0, 0x084002b2, 
69424 +0x000027c0, 0xf42006d0, 0x0020a280, 0x15008032, 0xf42006d0, 0x18200790, 0xdca003d2, 0x20a0239c, 
69425 +0x00a0a3f0, 0x00c03f3f, 0x20e317be, 0x04e08f80, 0x06008012, 0x00000001, 0x00c01ff8, 0x0000b081, 
69426 +0x06008010, 0x00a083f0, 0x14e007be, 0x00c01ff8, 0x0000b081, 0x00a083f0, 0x20a0039c, 0xf42006d0, 
69427 +0x0020a280, 0x0a008022, 0xdca023c0, 0x042007d0, 0x0840a680, 0x06008032, 0xdca023c0, 0x18001082, 
69428 +0x0220d091, 0xe1ffbf10, 0xf42006d0, 0x06008010, 0x190010a2, 0x042006d0, 0x00c026d0, 0x18001082, 
69429 +0x0020d091, 0x042006d0, 0x01200290, 0x042026d0, 0x000006d0, 0x0020a280, 0x04008002, 0x18001090, 
69430 +0x4f010040, 0x1b001092, 0xf02006e0, 0x0020a480, 0xf1ffbf02, 0x40b03611, 0x004004d2, 0x01201290, 
69431 +0x0840a280, 0x0e018012, 0x10001096, 0x046004d0, 0x01208a80, 0x33008002, 0xa0200484, 0x0c2610ba, 
69432 +0x000024fa, 0x00211090, 0x042024d0, 0x246004d0, 0x80200290, 0x082024d0, 0xec2004d0, 0x00210290, 
69433 +0x0c2024d0, 0x102024c4, 0x186004d2, 0x02602a93, 0x098006d0, 0x0001003b, 0x1d000290, 0x098026d0, 
69434 +0xc0ff3f3b, 0x1d000a90, 0x44a103fa, 0x606007d2, 0x00680292, 0x09001290, 0x4000003b, 0x1d001290, 
69435 +0x142024d0, 0x206004d0, 0x10210290, 0x182024d0, 0x186004d0, 0x02202a91, 0x088006d2, 0x0001003b, 
69436 +0x1d400292, 0x088026d2, 0xc0ff3f3b, 0x1d400a92, 0x186004d0, 0x00280290, 0x80000015, 0x0a001290, 
69437 +0x08401292, 0x4000003b, 0x1d401292, 0x1c2024d2, 0x01201090, 0xa02024d0, 0x20200496, 0xa8200484, 
69438 +0x306004d0, 0x0020a280, 0x2b008012, 0x00201098, 0x0c2610ba, 0x00c022fa, 0x04e022c0, 0xc0200490, 
69439 +0x10e022d0, 0x186004d2, 0x02602a93, 0x098006d0, 0x0001003b, 0x1d000290, 0x098026d0, 0xc0ff3f3b, 
69440 +0x1d000a90, 0x44a103fa, 0x606007d2, 0x00680292, 0x09001290, 0x4000003b, 0x1d001290, 0x14e022d0, 
69441 +0x206004d0, 0x10210290, 0x18e022d0, 0x186004d0, 0x02202a91, 0x088006d2, 0x0001003b, 0x1d400292, 
69442 +0x088026d2, 0xc0ff3f3b, 0x1d400a92, 0x186004d0, 0x00280290, 0x80000015, 0x0a001290, 0x08401292, 
69443 +0x4000003b, 0x1d401292, 0x1ce022d2, 0x4f008010, 0x0020109a, 0x0c00109a, 0x306004d0, 0x0840a380, 
69444 +0x3b00801a, 0xe02004c6, 0x0c2610ba, 0x00c022fa, 0x01202b91, 0x0c000290, 0x02202a91, 0x08400490, 
69445 +0x382002d2, 0x04e022d2, 0x342002d0, 0x08e022d0, 0x0ce022c6, 0x10e022c4, 0x186004d0, 0x02202a91, 
69446 +0x088006d2, 0x0001003b, 0x1d400292, 0x088026d2, 0xc0ff3f3b, 0x1d400a92, 0x44a103fa, 0x606007d0, 
69447 +0x00280290, 0x08401292, 0x4000003b, 0x1d401292, 0x14e022d2, 0x206004d0, 0x10210290, 0x18e022d0, 
69448 +0x186004d0, 0x02202a91, 0x088006d4, 0x0001003b, 0x1d800294, 0x088026d4, 0xc0ff3f3b, 0x1d800a94, 
69449 +0x186004d0, 0x00280290, 0x80000013, 0x09001290, 0x08801294, 0x4000003b, 0x1d801294, 0x1ce022d4, 
69450 +0x01201090, 0x008020d0, 0x04e002d0, 0x08c00086, 0x0840039a, 0x01200398, 0x20e00296, 0x306004d0, 
69451 +0x0800a380, 0xc9ffbf0a, 0x08a00084, 0xc0200490, 0xf0ff22d0, 0xe42004d0, 0x0d00a280, 0x0b00801a, 
69452 +0x00201098, 0x04008010, 0x10001096, 0x01200398, 0x20e00296, 0x306004d0, 0x0800a380, 0xfcffbf2a, 
69453 +0x04e022c0, 0xfc3f109a, 0xe42024da, 0x10001082, 0x186004d0, 0x00280290, 0x08006081, 0x00000001, 
69454 +0x00000001, 0x00000001, 0x00000001, 0x00000001, 0x00000001, 0x00000001, 0x00000001, 0x00000001, 
69455 +0x00000001, 0x00000001, 0x00000001, 0x00000001, 0x00000001, 0x00000001, 0x00000001, 0x00000001, 
69456 +0x00000001, 0x00000001, 0x00000001, 0x00000001, 0x00000001, 0x00000001, 0x00000001, 0x00201098, 
69457 +0x0c00109a, 0x142004fa, 0xec00823b, 0x3080d61b, 0x00006891, 0x0420a280, 0x3b008002, 0x0c00a280, 
69458 +0x04008002, 0x00000001, 0x0120d091, 0x36008030, 0x7c2006d0, 0x01200290, 0x7c2026d0, 0x782006d0, 
69459 +0x0020a280, 0x04008002, 0x78200690, 0x64000040, 0x40e00692, 0xf02004d0, 0x0020a280, 0x03008012, 
69460 +0xf02026d0, 0x80e026c0, 0x7c2006d0, 0x40e026d0, 0x046004d0, 0x04208a80, 0x13008002, 0x1100108a, 
69461 +0xec2004cc, 0x3fa00b8e, 0x40e0018e, 0x0780239c, 0x0080bbe0, 0x006099e0, 0x00a0b9e0, 0x406099e0, 
69462 +0x40a0b9e0, 0x806099e0, 0x80a0b9e0, 0xc06099e0, 0xc0a0b9e0, 0x00809be0, 0x0780039c, 0x0e008010, 
69463 +0xec2004d2, 0xec2004cc, 0x3fa00b8e, 0x40e0018e, 0x0780239c, 0x0080bbe0, 0x006099e0, 0x00a0b9e0, 
69464 +0x406099e0, 0x40a0b9e0, 0x00809be0, 0x0780039c, 0xec2004d2, 0xe42004d0, 0x886222d0, 0x042006d0, 
69465 +0x00c026d0, 0x000007d0, 0x01208a80, 0x05008012, 0x00000001, 0x142027f2, 0x06008010, 0xdca003fa, 
69466 +0x142027f2, 0xfe3f0a90, 0x000027d0, 0xdca003fa, 0x016007ba, 0xdca023fa, 0x0c2007d0, 0x0840a680, 
69467 +0x04008032, 0x082007d0, 0x03008010, 0x102007f2, 0x084006b2, 0x00007081, 0x1600801c, 0x00000001, 
69468 +0x60a0239c, 0x00a0a3c0, 0x20a0a3f0, 0x40a0a3e0, 0x02c03f3f, 0x8ce017be, 0x04e08f80, 0x06008012, 
69469 +0x00000001, 0x00c01ffc, 0x0000a081, 0x06008010, 0x40a083e0, 0x14e007be, 0x00c01ffc, 0x0000a081, 
69470 +0x40a083e0, 0x20a083f0, 0x00a083c0, 0x60a0039c, 0x042007d0, 0x0840a680, 0xb3febf12, 0x190010a2, 
69471 +0x8afebf10, 0xf42006d0, 0x60a003de, 0x80a083e0, 0xa0a083f0, 0x08e0c381, 0x00a1039c, 0x80a0239c, 
69472 +0x042002c4, 0x004022c4, 0x18008030, 0x00007081, 0x16008012, 0x00000001, 0x60a0239c, 0x00a0a3c0, 
69473 +0x20a0a3f0, 0x40a0a3e0, 0x02c03f3f, 0x24e117be, 0x04e08f80, 0x06008012, 0x00000001, 0x00c01ffc, 
69474 +0x0000a081, 0x06008010, 0x40a083e0, 0x14e007be, 0x00c01ffc, 0x0000a081, 0x40a083e0, 0x20a083f0, 
69475 +0x00a083c0, 0x60a0039c, 0x000002c4, 0x00a0a080, 0xe7ffbf12, 0x00000001, 0x042002c4, 0x01a00084, 
69476 +0x042022c4, 0x000002c4, 0x00a0a080, 0xddffbf12, 0x00000001, 0x08e0c381, 0x80a0039c, };
69477 +#define threadcode_elan3_text_size 0x97c
69478 +static uint32_t threadcode_elan3_data[] = {
69479 +0};
69480 +#define threadcode_elan3_data_size 0x0
69481 +static uint32_t threadcode_elan3_rodata[] = {
69482 +0};
69483 +#define threadcode_elan3_rodata_size 0x0
69484 +static EP_SYMBOL threadcode_elan3_symbols[] = {
69485 +    {"__bss_start", 0xff00297c},
69486 +    {"_edata", 0xff00297c},
69487 +    {"_end", 0xff002988},
69488 +    {"_etext", 0xff00097c},
69489 +    {"_sdata", 0xff00297c},
69490 +    {"_stext", 0xff000000},
69491 +    {"ep3_spinblock", 0xff0008dc},
69492 +    {"ep3comms_rcvr", 0xff0002a8},
69493 +    {"kcomm_probe", 0xff00013c},
69494 +    {"r", 0xff00297c},
69495 +    {"rail", 0xff002984},
69496 +    {"rm", 0xff002980},
69497 +    {0, 0}};
69498 +EP_CODE threadcode_elan3 = {
69499 +   (unsigned char *) threadcode_elan3_text,
69500 +   threadcode_elan3_text_size,
69501 +   (unsigned char *) threadcode_elan3_data,
69502 +   threadcode_elan3_data_size,
69503 +   (unsigned char *) threadcode_elan3_rodata,
69504 +   threadcode_elan3_rodata_size,
69505 +   threadcode_elan3_symbols,
69506 +};
69507 Index: linux-2.4.21/drivers/net/qsnet/ep/threadcode_elan4_Linux.c
69508 ===================================================================
69509 --- linux-2.4.21.orig/drivers/net/qsnet/ep/threadcode_elan4_Linux.c     2004-02-23 16:02:56.000000000 -0500
69510 +++ linux-2.4.21/drivers/net/qsnet/ep/threadcode_elan4_Linux.c  2005-06-01 23:12:54.691425120 -0400
69511 @@ -0,0 +1,112 @@
69512 +/* --------------------------------------------------------*/
69513 +/* MACHINE GENERATED ELAN CODE                             */
69514 +#include <qsnet/kernel.h>
69515 +#include <elan/kcomm.h>
69516 +#include "kcomm_elan4.h"
69517 +static uint32_t threadcode_elan4_text[] = {
69518 +0x00a00087, 0xc04060cb, 0x00003080, 0x80001080, 0x02606180, 0x02004032, 0x807f60cb, 0x04606180, 
69519 +0x02004032, 0x407f60d3, 0x08606180, 0x02004032, 0x007f60db, 0x10606180, 0x02004032, 0xc07e60e3, 
69520 +0x20606180, 0x02004032, 0x807e60eb, 0x40606180, 0x02004032, 0x407e60f3, 0x80606180, 0x02004032, 
69521 +0x007e60fb, 0x40001180, 0xc3801080, 0xc07f60c3, 0x20002000, 0x20002000, 0x20002000, 0x20002000, 
69522 +0x407f8001, 0x4060c0c7, 0x4860c0d0, 0x5060c0d1, 0x5860c0d2, 0x6060c0d3, 0x6860c0d4, 0x00208292, 
69523 +0x00608291, 0x00a08294, 0xff3f8088, 0x1c381293, 0xc04044c8, 0x13004290, 0xc000c5d0, 0x08004030, 
69524 +0x00001088, 0x04204288, 0x0020b200, 0x04004003, 0x00208080, 0x9c010040, 0x00a08488, 0xc04044c8, 
69525 +0x20381288, 0x0020b200, 0xf6ff7f13, 0x01208408, 0x11161282, 0x804094c2, 0xc04044c8, 0x20381288, 
69526 +0x0020b200, 0xebff7f13, 0x00208080, 0x406040c7, 0x486040d0, 0x506040d1, 0x586040d2, 0x606040d3, 
69527 +0x686040d4, 0x08e00180, 0xc0608001, 0x00000001, 0x00000001, 0x00000001, 0x00000001, 0x00000001, 
69528 +0x807e8001, 0x4060c0c7, 0x4860c0d0, 0x5060c0d1, 0x5860c0d2, 0x6060c0d3, 0x6860c0d4, 0x7060c0d5, 
69529 +0x7860c0d6, 0x8060c0d7, 0x8860c0d8, 0x9060c0d9, 0x9860c0da, 0xa060c0db, 0xa860c0dc, 0xb060c0dd, 
69530 +0xb860c0de, 0xc060c0df, 0x8061c0c8, 0x00608296, 0x00a0829a, 0x9861c0cb, 0xa061c0cc, 0xa861c0cd, 
69531 +0x01208088, 0x3861c0c8, 0x08e042d2, 0x386140c9, 0x0900900a, 0xa06140c8, 0x986140cb, 0x18e042c9, 
69532 +0x72010040, 0x05b4128a, 0x0020808c, 0x3861c0cc, 0x986140c9, 0xc04042c8, 0x0880b400, 0x39014003, 
69533 +0xffff3f08, 0x90a0851c, 0xe023829f, 0x20f4179f, 0x10e3879f, 0xffff3f08, 0xe023829e, 0x20b4179e, 
69534 +0x03a3879e, 0xffff3f08, 0xe023829d, 0x2074179d, 0x0363879d, 0x00a08495, 0x18a08408, 0x800012c2, 
69535 +0x089a109b, 0x20f4169b, 0x20f8169b, 0x00e88609, 0x20741289, 0x01120008, 0x0a381288, 0x08408297, 
69536 +0x45208088, 0x06341288, 0x806140ca, 0xc88042c8, 0x00288218, 0x04a08408, 0x800012c2, 0x089a1088, 
69537 +0x20341288, 0x20381288, 0x00281299, 0x20a08408, 0x800012c2, 0x089a108a, 0x20b4128a, 0x20b8128a, 
69538 +0x30a08408, 0x800012c2, 0x089a1093, 0x20f41493, 0x20f81493, 0x03f41689, 0x806140cb, 0x2922808c, 
69539 +0x0334138c, 0xccc042c8, 0xc90042d1, 0x02604688, 0x0020b200, 0x03004002, 0x60a08214, 0x80a08214, 
69540 +0x90a08509, 0x804012c8, 0x01208208, 0x804092c8, 0x046012c8, 0x043a1288, 0x0020b200, 0x04004003, 
69541 +0xa86140c8, 0x67ffff7f, 0x00a0868a, 0x88a045d0, 0x0020b400, 0x12004013, 0x00208080, 0x800017c8, 
69542 +0x808096c8, 0x72010040, 0x00a08588, 0x00208290, 0x90a08509, 0x804012c8, 0x01208208, 0x804092c8, 
69543 +0x046012c8, 0x043a1288, 0x0020b200, 0x04004003, 0xa86140c8, 0x53ffff7f, 0x00a0868a, 0x804015c2, 
69544 +0x159a1089, 0x20741289, 0x20781289, 0x40b03608, 0x01208288, 0x0840b200, 0x06004023, 0xa02344c4, 
69545 +0x800017c8, 0x808096c8, 0xbb004010, 0xa8a045c8, 0x01604688, 0x00281288, 0x08009008, 0x00e0b400, 
69546 +0x05004003, 0x3f381289, 0x13408209, 0x03004010, 0x05208088, 0x04208088, 0x09009220, 0x07341889, 
69547 +0x0900840b, 0x05341888, 0x0023820a, 0x01604688, 0x0020b200, 0x1d004002, 0x0a00840c, 0xc900c4d7, 
69548 +0x40c40f08, 0x09208288, 0x08e0c2c8, 0x0a608488, 0x10e0c2c8, 0x81001008, 0x0a341288, 0x18e0c2c8, 
69549 +0x1d608488, 0x20e0c2c8, 0x28e0c2d8, 0x24608508, 0x800012c2, 0x089a1088, 0x20341288, 0x20381288, 
69550 +0x80208208, 0x30e0c2c8, 0x00218108, 0x38e0c2c8, 0x40e0c2d4, 0x48e0c2cc, 0xca00c4df, 0x20608411, 
69551 +0x80e0820b, 0x2020830c, 0x00e0b400, 0x13004013, 0x0020808e, 0xc0c0c2d7, 0x40c40f09, 0x09608289, 
69552 +0x08e0c2c9, 0x0a608488, 0x10e0c2c8, 0x00040008, 0x18e0c2c8, 0x1d608488, 0x20e0c2c8, 0x28e0c2d8, 
69553 +0x40e0c2d4, 0x48e0c2cc, 0xc000c3de, 0x00208083, 0x4c004010, 0x20608411, 0xb8238408, 0x800012c2, 
69554 +0x089a108f, 0x20f4138f, 0x20f8138f, 0x00208083, 0x13c0b000, 0x2e00401b, 0x40c40f08, 0x092082a2, 
69555 +0x00040021, 0xffff3f08, 0xe023828d, 0x2074138d, 0x1063838d, 0x0e808309, 0x0e408209, 0x02741289, 
69556 +0x1540820a, 0x38a0820a, 0x808012c2, 0x0a9a108a, 0x20b4128a, 0x20b8128a, 0xc0c0c2d7, 0x08e0c2e2, 
69557 +0x0a608488, 0x10e0c2c8, 0x20b41288, 0x21008288, 0x18e0c2c8, 0x1d608488, 0x20e0c2c8, 0x28e0c2d8, 
69558 +0x15408209, 0x34608209, 0x804012c2, 0x099a1089, 0x20741289, 0x20781289, 0x30e0c2c9, 0x38e0c2cf, 
69559 +0x40e0c2d4, 0x48e0c2cc, 0xc000c3cd, 0x0ac0830f, 0x0ac08003, 0x20608411, 0x80e0820b, 0x01a0830e, 
69560 +0x1380b300, 0xdcff7f0b, 0x2020830c, 0xe03f830c, 0xc000c3dd, 0xbc238408, 0x800012c2, 0x089a1088, 
69561 +0x20341288, 0x20381288, 0x0300b200, 0x0d00401b, 0x07341888, 0x0020888e, 0x0420b800, 0x08004019, 
69562 +0x0800840b, 0x00040008, 0x18e0c2c8, 0x01a0830e, 0x04a0b300, 0xfdff7f09, 0x80e0820b, 0xfc3f8083, 
69563 +0x07341888, 0x08008408, 0xa06140ca, 0xc00062e3, 0x402062f3, 0xc080e2e3, 0xc080e2f3, 0x982244c8, 
69564 +0x88a0c5c8, 0x88a045c8, 0x0020b200, 0x05004013, 0x04604688, 0x88a08508, 0x80a0c5c8, 0x04604688, 
69565 +0x0020b200, 0x0c004002, 0xd822c4c0, 0xc04065e3, 0x406065f3, 0xc000e1e3, 0x806065e3, 0x4020e1f3, 
69566 +0xc06065f3, 0x8020e1e3, 0xc020e1f3, 0x07004010, 0x88228108, 0xc04065e3, 0x406065f3, 0xc000e1e3, 
69567 +0x4020e1f3, 0x88228108, 0x08d61082, 0x800092c2, 0x03f41689, 0x806140cb, 0x2922808c, 0x0334138c, 
69568 +0xccc042c8, 0xc900c2d1, 0x800017c8, 0x808096c8, 0xa8a045c8, 0x0880b400, 0x03004013, 0x00a18412, 
69569 +0xa0a045d2, 0x98a045c8, 0x0020b200, 0x05004013, 0x386140c9, 0x986140c8, 0x0820c2d2, 0x386140c9, 
69570 +0x01608209, 0xfe61b200, 0x0e004015, 0x3861c0c9, 0x00001088, 0x02204288, 0x0020b200, 0x05004003, 
69571 +0x986140ca, 0x28000040, 0xa06140c8, 0x986140ca, 0xc08042c8, 0x0880b400, 0xd8fe7f13, 0x00a08495, 
69572 +0x98a045cb, 0x00e0b200, 0xbafe7f03, 0x386140c9, 0xa06140c8, 0x60a08509, 0x48000040, 0xe03f808a, 
69573 +0x986140cb, 0x08e0c2d2, 0x386140cc, 0x0120830c, 0xaffe7f10, 0x3861c0cc, 0x406040c7, 0x486040d0, 
69574 +0x506040d1, 0x586040d2, 0x606040d3, 0x686040d4, 0x706040d5, 0x786040d6, 0x806040d7, 0x886040d8, 
69575 +0x906040d9, 0x986040da, 0xa06040db, 0xa86040dc, 0xb06040dd, 0xb86040de, 0xc06040df, 0x08e00180, 
69576 +0x80618001, 0x807f8001, 0xc040e0d3, 0x4060e0db, 0x00208490, 0x00208698, 0x00208080, 0x00208080, 
69577 +0x00e08192, 0x02000040, 0x00608091, 0x14e08110, 0x17208097, 0xc000f2d3, 0xc04060d3, 0x406060db, 
69578 +0x08a00080, 0x80608001, 0x407f8001, 0x4060e0d3, 0x8060e0db, 0x00208490, 0x00208698, 0x00208080, 
69579 +0x00208080, 0x00e08192, 0x02000040, 0x00608091, 0x40e08110, 0xc040e0d1, 0x37208097, 0x3860c0d7, 
69580 +0x00208490, 0x00e08597, 0x00208080, 0x00208080, 0x1f608290, 0x20b41291, 0x08638491, 0x00608092, 
69581 +0x00208293, 0xc000f2d1, 0x406060d3, 0x806060db, 0x08a00080, 0xc0608001, 0x407f8001, 0x4060e0d3, 
69582 +0x8060e0db, 0x00208490, 0x00208698, 0x00208080, 0x00208080, 0x00e08192, 0x02000040, 0x00608091, 
69583 +0x54e08110, 0xc040e0d1, 0x37208097, 0x3860c0d7, 0x00208490, 0x00e08597, 0x00208080, 0x00208080, 
69584 +0x1f608290, 0x20b41291, 0x08638491, 0x00608092, 0x00208293, 0x0ef41294, 0x0d208594, 0x17208095, 
69585 +0x17208096, 0x17208097, 0xc000f2d3, 0x406060d3, 0x806060db, 0x08a00080, 0xc0608001, 0x01208097, 
69586 +0xb0e3c0d7, 0x80a060d2, 0x98e28004, 0x98e2c0c0, 0x80a0c0c4, 0xc080c4c3, 0x01e0b400, 0x06004002, 
69587 +0x00a08490, 0x00e08097, 0x02208097, 0xb0e3c0d7, 0xd8e2d0d0, 0xd8e2c0d0, 0x03208097, 0xb0e3c0d7, 
69588 +0x00e08088, 0x0e004010, 0x00a060c3, 0x407f8001, 0x4060e0d3, 0x8060e0db, 0x00208490, 0x00208698, 
69589 +0x00208080, 0x00208080, 0x01208089, 0x8820c2c9, 0x00608091, 0x00e08197, 0x0020f2d3, 0x406060d3, 
69590 +0x806060db, 0x08e00180, 0xc0608001, };
69591 +#define threadcode_elan4_text_size 0x90c
69592 +static uint32_t threadcode_elan4_data[] = {
69593 +0};
69594 +#define threadcode_elan4_data_size 0x0
69595 +static uint32_t threadcode_elan4_rodata[] = {
69596 +0};
69597 +#define threadcode_elan4_rodata_size 0x0
69598 +static EP_SYMBOL threadcode_elan4_symbols[] = {
69599 +    {".thread_restart", 0x00000000f800000c},
69600 +    {".thread_start", 0x00000000f8000000},
69601 +    {"__bss_start", 0x00000000f810090c},
69602 +    {"_edata", 0x00000000f810090c},
69603 +    {"_end", 0x00000000f8100910},
69604 +    {"_etext", 0x00000000f800090c},
69605 +    {"_sdata", 0x00000000f810090c},
69606 +    {"_stext", 0x00000000f8000000},
69607 +    {"c_queue_rxd", 0x00000000f800087c},
69608 +    {"c_reschedule", 0x00000000f8000744},
69609 +    {"c_stall_thread", 0x00000000f80008cc},
69610 +    {"c_waitevent", 0x00000000f8000788},
69611 +    {"c_waitevent_interrupt", 0x00000000f80007f8},
69612 +    {"ep4_spinblock", 0x00000000f8000080},
69613 +    {"ep4comms_rcvr", 0x00000000f8000140},
69614 +    {0, 0}};
69615 +EP_CODE threadcode_elan4 = {
69616 +   (unsigned char *) threadcode_elan4_text,
69617 +   threadcode_elan4_text_size,
69618 +   (unsigned char *) threadcode_elan4_data,
69619 +   threadcode_elan4_data_size,
69620 +   (unsigned char *) threadcode_elan4_rodata,
69621 +   threadcode_elan4_rodata_size,
69622 +   threadcode_elan4_symbols,
69623 +};
69624 Index: linux-2.4.21/drivers/net/qsnet/jtag/jtagdrv.c
69625 ===================================================================
69626 --- linux-2.4.21.orig/drivers/net/qsnet/jtag/jtagdrv.c  2004-02-23 16:02:56.000000000 -0500
69627 +++ linux-2.4.21/drivers/net/qsnet/jtag/jtagdrv.c       2005-06-01 23:12:54.692424968 -0400
69628 @@ -0,0 +1,451 @@
69629 +/*
69630 + *    Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd.
69631 + *
69632 + *    For licensing information please see the supplied COPYING file
69633 + *
69634 + */
69635 +
69636 +#ident "@(#)$Id: jtagdrv.c,v 1.12 2003/06/07 16:02:35 david Exp $"
69637 +/*      $Source: /cvs/master/quadrics/jtagmod/jtagdrv.c,v $*/
69638 +
69639 +#include <qsnet/types.h>
69640 +
69641 +#include "jtagdrv.h"
69642 +#include <jtag/jtagio.h>
69643 +
69644 +int
69645 +jtagdrv_strobe_data (JTAG_DEV *dev, u_char data)
69646 +{
69647 +    u_char dsr;
69648 +
69649 +    PRINTF (DBG_ECPP, ("jtagdrv_strobe_data: %s %s %s -> ", (data & LPT_DATA_TRST) ? "TRST" : "trst", 
69650 +                      (data & LPT_DATA_TDI) ? "TDI" : "tdi", (data & LPT_DATA_TMS) ? "TMS" : "tms"));
69651 +
69652 +
69653 +    LPT_WRITE_DATA (dev, data); DELAY(5);                      /* Drive NEW values on data wires */
69654 +    LPT_WRITE_CTRL (dev, LPT_CTRL_TCLK); DELAY(5);             /* Drive strobe low */
69655 +    LPT_READ_STAT  (dev, dsr); DELAY(5);                       /* Sample TDI from ring */
69656 +    LPT_WRITE_CTRL (dev, 0); DELAY(5);                         /* Drive strobe high */
69657 +
69658 +    PRINTF (DBG_ECPP, ("%s\n", (dsr & LPT_STAT_PE) ? "TDO" : "tdo"));
69659 +
69660 +    return ((dsr & LPT_STAT_PE) ? 1 : 0);
69661 +}
69662 +
69663 +void
69664 +jtagdrv_select_ring (JTAG_DEV *dev, u_int ring)
69665 +{
69666 +    PRINTF (DBG_ECPP, ("jtagdrv_select_ring: ring=0x%x\n", ring));
69667 +
69668 +    LPT_WRITE_CTRL (dev, 0); DELAY(5);                         /* Drive strobe and TCLK high */
69669 +    LPT_WRITE_DATA (dev, ring);        DELAY(5);                       /* Drive ring address */
69670 +    LPT_WRITE_CTRL (dev, LPT_CTRL_RCLK); DELAY(5);             /* Drive strobe low */
69671 +    LPT_WRITE_CTRL (dev, 0); DELAY(5);                         /* Drive strobe high */
69672 +}
69673 +
69674 +void
69675 +jtagdrv_reset (JTAG_DEV *dev)
69676 +{
69677 +    register int i;
69678 +
69679 +    for (i = 0; i < 5; i++)
69680 +       jtagdrv_strobe_data (dev, LPT_DATA_TRST | LPT_DATA_TMS);                /* 5 clocks to Reset from any state */
69681 +    jtagdrv_strobe_data (dev, LPT_DATA_TRST);                                  /* to Run-Test/Idle */
69682 +}
69683 +
69684 +void
69685 +jtagdrv_shift_ir (JTAG_DEV *dev, u_char *value, int nbits)
69686 +{
69687 +    register int i;
69688 +    register int bit;
69689 +
69690 +    jtagdrv_strobe_data (dev, LPT_DATA_TRST | LPT_DATA_TMS);                   /* to Select DR-Scan */
69691 +    jtagdrv_strobe_data (dev, LPT_DATA_TRST | LPT_DATA_TMS);                   /* to Select IR-Scan */
69692 +    jtagdrv_strobe_data (dev, LPT_DATA_TRST);                                  /* to Capture-IR */
69693 +    jtagdrv_strobe_data (dev, LPT_DATA_TRST);                                  /* to Shift-IR */
69694 +    
69695 +    for (i = 0; i < nbits; i++)
69696 +    {
69697 +       /* strobe through the instruction bits,  asserting TMS on the last bit */
69698 +
69699 +       if (i == (nbits-1))
69700 +           bit = jtagdrv_strobe_data (dev, LPT_DATA_TRST | LPT_DATA_TMS | (JTAG_BIT(value, i) ? LPT_DATA_TDI : 0));
69701 +       else
69702 +           bit = jtagdrv_strobe_data (dev, LPT_DATA_TRST | (JTAG_BIT(value, i) ? LPT_DATA_TDI : 0));
69703 +       
69704 +       if (bit)
69705 +           JTAG_SET_BIT(value, i);
69706 +       else
69707 +           JTAG_CLR_BIT(value, i);
69708 +    }
69709 +    
69710 +    jtagdrv_strobe_data (dev, LPT_DATA_TRST | LPT_DATA_TMS);                   /* to Update-IR */
69711 +    jtagdrv_strobe_data (dev, LPT_DATA_TRST);                                  /* to Run-Test/Idle */
69712 +}
69713 +
69714 +
69715 +void
69716 +jtagdrv_shift_dr (JTAG_DEV *dev, u_char *value, int nbits)
69717 +{
69718 +    register int i;
69719 +    register int bit;
69720 +
69721 +    jtagdrv_strobe_data (dev, LPT_DATA_TRST | LPT_DATA_TMS);                   /* to Select DR-Scan */
69722 +    jtagdrv_strobe_data (dev, LPT_DATA_TRST);                                  /* to Capture-DR */
69723 +    jtagdrv_strobe_data (dev, LPT_DATA_TRST);                                  /* to Shift-DR */
69724 +    
69725 +    for (i = 0; i < nbits; i++)
69726 +    {
69727 +       /* strobe through the data bits,  asserting TMS on the last bit */
69728 +
69729 +       if (i == (nbits-1))
69730 +           bit = jtagdrv_strobe_data (dev, LPT_DATA_TRST | LPT_DATA_TMS | (JTAG_BIT(value, i) ? LPT_DATA_TDI : 0));
69731 +       else
69732 +           bit = jtagdrv_strobe_data (dev, LPT_DATA_TRST | (JTAG_BIT(value, i) ? LPT_DATA_TDI : 0));
69733 +       
69734 +       if (bit)
69735 +           JTAG_SET_BIT(value, i);
69736 +       else
69737 +           JTAG_CLR_BIT(value, i);
69738 +    }
69739 +    
69740 +    jtagdrv_strobe_data (dev, LPT_DATA_TRST | LPT_DATA_TMS);                   /* to Update-DR */
69741 +    jtagdrv_strobe_data (dev, LPT_DATA_TRST);                                  /* to Run-Test/Idle */
69742 +}
69743 +
69744 +static int
69745 +jtagdrv_i2c_start (JTAG_DEV *dev)
69746 +{
69747 +    u_char dsr;
69748 +    int i;
69749 +
69750 +    PRINTF (DBG_ECPP, ("jtagdrv_i2c_start\n"));
69751 +    
69752 +    /* Issue a stop sequence */
69753 +    LPT_WRITE_CTRL (dev,  LPT_CTRL_SCLK); DELAY(1);            /* SCLK low */
69754 +    LPT_WRITE_DATA (dev, 0); DELAY(5);                         /* SDA low */
69755 +    LPT_WRITE_CTRL (dev, 0); DELAY(5);                         /* SCLK high */
69756 +    LPT_WRITE_DATA (dev, LPT_DATA_SDA); DELAY(5);              /* SDA high */
69757 +    
69758 +    /* sample the line to see if we're idle */
69759 +    LPT_READ_STAT (dev, dsr);                                  /* sample SDA */
69760 +    if ((dsr & LPT_STAT_SDA) == 0)                             /* Cannot start if SDA already driven */
69761 +    {
69762 +       PRINTF (DBG_ECPP, ("jtagdrv_i2c_start: cannot start - sda driven low\n"));
69763 +
69764 +       for (i = 0; i < 16 ; i++)
69765 +       {
69766 +           LPT_WRITE_CTRL (dev, LPT_CTRL_SCLK); DELAY(5);      /* SCLK low */
69767 +           LPT_WRITE_CTRL (dev, 0); DELAY(5);                  /* SCLK high */
69768 +           LPT_READ_STAT  (dev, dsr);
69769 +           
69770 +           if (dsr & LPT_STAT_SDA)
69771 +           {
69772 +               PRINTF (DBG_ECPP, ("jtagdrv_i2c_start - stopped after %d clocks\n", i));
69773 +               break;
69774 +           }
69775 +       }
69776 +
69777 +       if ((dsr & LPT_STAT_SDA) == 0)
69778 +       {
69779 +           PRINTF (DBG_ECPP, ("jtagdrv_i2c_start - cannot start - not idle\n"));
69780 +           return (0);
69781 +       }
69782 +
69783 +       /* seen SDA float high, so issue a stop sequence */
69784 +       LPT_WRITE_CTRL (dev, LPT_CTRL_SCLK); DELAY(1);          /* SCLK low */
69785 +       LPT_WRITE_DATA (dev, 0); DELAY(5);                      /* SDA low */
69786 +       LPT_WRITE_CTRL (dev, 0); DELAY(5);                      /* SCLK high */
69787 +       LPT_WRITE_DATA (dev, LPT_DATA_SDA); DELAY(5);           /* SDA high */
69788 +    }
69789 +
69790 +    LPT_WRITE_DATA (dev, 0); DELAY(4);                         /* drive SDA low */
69791 +    return (1);
69792 +}
69793 +
69794 +static void
69795 +jtagdrv_i2c_stop (JTAG_DEV *dev)
69796 +{
69797 +    u_char dsr;
69798 +    int    i;
69799 +
69800 +    PRINTF (DBG_ECPP, ("jtagdrv_i2c_stop\n"));
69801 +
69802 +    LPT_WRITE_CTRL (dev, LPT_CTRL_SCLK); DELAY(1);             /* SCLK low */
69803 +    LPT_WRITE_DATA (dev, 0); DELAY(5);                         /* SDA low */
69804 +    LPT_WRITE_CTRL (dev, 0); DELAY(5);                         /* SCLK high */
69805 +    LPT_WRITE_DATA (dev, LPT_DATA_SDA); DELAY(5);              /* SDA high */
69806 +
69807 +    /* 
69808 +     * bug fix for temperature sensor chip
69809 +     * if it's still driving SDA, then clock
69810 +     * it until it stops driving it 
69811 +     */
69812 +    LPT_READ_STAT (dev, dsr);
69813 +    if ((dsr & LPT_STAT_SDA) == 0)
69814 +    {
69815 +       PRINTF (DBG_ECPP, ("jtagdrv_i2c_stop - slave not stodeved\n"));
69816 +       for (i = 0; i < 16 ; i++)
69817 +       {
69818 +           LPT_WRITE_CTRL (dev, LPT_CTRL_SCLK); DELAY(5);      /* SCLK low */
69819 +           LPT_WRITE_CTRL (dev, 0); DELAY(5);                  /* SCLK high */
69820 +           LPT_READ_STAT  (dev, dsr);
69821 +           
69822 +           if (dsr & LPT_STAT_SDA)
69823 +               break;
69824 +       }
69825 +       PRINTF (DBG_ECPP, ("jtagdrv_i2c_stop - stodeved after %d clocks\n", i));
69826 +    }
69827 +}
69828 +
69829 +static int
69830 +jtagdrv_i2c_strobe (JTAG_DEV *dev, u_char data)
69831 +{
69832 +    u_char dsr;
69833 +    
69834 +    PRINTF (DBG_ECPP, ("jtagdrv_i2c_strobe : %s", (data & LPT_DATA_SDA) ? "SDA" : "sda"));
69835 +
69836 +    LPT_WRITE_CTRL (dev, LPT_CTRL_SCLK); DELAY(1);             /* SCLK low */
69837 +    LPT_WRITE_DATA (dev, data);        DELAY(5);                       /* write data */
69838 +    LPT_WRITE_CTRL (dev, 0);                                   /* SCLK high */
69839 +    LPT_READ_STAT  (dev, dsr); DELAY(4);                       /* Sample SDA */
69840 +
69841 +    PRINTF (DBG_ECPP, (" -> %s\n", (dsr & LPT_STAT_SDA) ? "SDA" : "sda"));
69842 +
69843 +    return ((dsr & LPT_STAT_SDA) ? 1 : 0);
69844 +}
69845 +
69846 +static int
69847 +jtagdrv_i2c_get_ack (JTAG_DEV *dev)
69848 +{
69849 +    u_char dsr;
69850 +
69851 +    LPT_WRITE_CTRL (dev, LPT_CTRL_SCLK); DELAY(1);             /* SCLK low */
69852 +    LPT_WRITE_DATA (dev, LPT_DATA_SDA);        DELAY(5);               /* SDA high */
69853 +    LPT_WRITE_CTRL (dev, 0);                                   /* SCLK high */
69854 +    LPT_READ_STAT  (dev, dsr); DELAY(4);                       /* Sample SDA */
69855 +
69856 +    PRINTF (DBG_ECPP, ("jtagdrv_i2c_get_ack -> %s\n", (dsr & LPT_STAT_SDA) ? "no ack" : "ack"));
69857 +    
69858 +    return ((dsr & LPT_STAT_SDA) ? 0 : 1);
69859 +}
69860 +
69861 +static int
69862 +jtagdrv_i2c_drive_ack (JTAG_DEV *dev, int nack)
69863 +{
69864 +    u_char dsr;
69865 +
69866 +    LPT_WRITE_CTRL (dev, LPT_CTRL_SCLK); DELAY(1);             /* SCLK low */
69867 +    LPT_WRITE_DATA (dev, nack ? LPT_DATA_SDA : 0); DELAY(5);   /* SDA low for ack, high for nack */
69868 +    LPT_WRITE_CTRL (dev, 0);                                   /* SCLK high */
69869 +    LPT_READ_STAT  (dev, dsr); DELAY(4);                       /* Sample SDA for ack */
69870 +
69871 +    PRINTF (DBG_ECPP, ("jtagdrv_i2c_drive_ack %d -> %s\n", nack, (dsr & LPT_STAT_SDA) ? "done" : "more"));
69872 +    
69873 +    return ((dsr & LPT_STAT_SDA) ? 1 : 0);
69874 +}
69875 +
69876 +static void
69877 +jtagdrv_i2c_shift_addr (JTAG_DEV *dev, u_int address, int readNotWrite)
69878 +{
69879 +    register int i;
69880 +
69881 +    PRINTF (DBG_ECPP, ("jtagdrv_i2c_shift_addr: %x\n", address));
69882 +
69883 +    for (i = I2C_ADDR_LEN-1; i >= 0; i--)
69884 +       jtagdrv_i2c_strobe (dev, (address & (1 << i)) ? LPT_DATA_SDA : 0);
69885 +    
69886 +    jtagdrv_i2c_strobe (dev, readNotWrite ? LPT_DATA_SDA : 0);
69887 +}
69888 +
69889 +static u_char
69890 +jtagdrv_i2c_shift_data (JTAG_DEV *dev, u_char data)
69891 +{
69892 +    register int i;
69893 +    u_char val = 0;
69894 +    
69895 +    PRINTF (DBG_ECPP, ("jtagdrv_i2c_shift_data : %02x\n", data));
69896 +
69897 +    for (i = I2C_DATA_LEN-1; i >= 0; i--)
69898 +       if (jtagdrv_i2c_strobe (dev, data & (1 << i) ? LPT_DATA_SDA : 0))
69899 +           val |= (1 << i);
69900 +
69901 +    PRINTF (DBG_ECPP, ("jtagdrv_i2c_shift_data : -> %02x\n", val));
69902 +
69903 +    return (val);
69904 +}
69905 +
69906 +int
69907 +jtagdrv_i2c_write (JTAG_DEV *dev, u_int address, u_int count, u_char *data)
69908 +{
69909 +    register int i;
69910 +
69911 +    PRINTF (DBG_FN, ("jtagdrv_i2c_write: address=%x count=%d data=%02x\n", address, count, data[0]));
69912 +
69913 +    if (! jtagdrv_i2c_start (dev))
69914 +       return (I2C_OP_NOT_IDLE);
69915 +
69916 +    jtagdrv_i2c_shift_addr (dev, address, 0);
69917 +    
69918 +    if (! jtagdrv_i2c_get_ack (dev))
69919 +    {
69920 +       PRINTF (DBG_FN, ("jtagdrv_i2c_write: no ack on address phase\n"));
69921 +
69922 +       jtagdrv_i2c_stop (dev);
69923 +       return (I2C_OP_NO_DEVICE);
69924 +    }
69925 +    
69926 +    for (i = 0; i < count; i++)
69927 +    {
69928 +       jtagdrv_i2c_shift_data (dev, data[i]);
69929 +       
69930 +       if (! jtagdrv_i2c_get_ack (dev))
69931 +       {
69932 +           PRINTF (DBG_FN, ("jtagdrv_i2c_write: no ack on data phase %d\n", i));
69933 +
69934 +           jtagdrv_i2c_stop (dev);
69935 +           return (I2C_OP_WRITE_TO_BIG);
69936 +       }
69937 +    }
69938 +
69939 +    jtagdrv_i2c_stop (dev);
69940 +    return (I2C_OP_SUCCESS);
69941 +}
69942 +
69943 +int
69944 +jtagdrv_i2c_read (JTAG_DEV *dev, u_int address, u_int count, u_char *data)
69945 +{
69946 +    register int i;
69947 +
69948 +    PRINTF (DBG_FN, ("jtagdrv_i2c_read: address=%x count=%d\n", address, count));
69949 +
69950 +    if (! jtagdrv_i2c_start (dev))
69951 +       return (I2C_OP_NOT_IDLE);
69952 +
69953 +    jtagdrv_i2c_shift_addr (dev, address, 1);
69954 +    
69955 +    if (! jtagdrv_i2c_get_ack (dev))
69956 +    {
69957 +       PRINTF (DBG_FN, ("jtagdrv_i2c_read: no ack on address phase\n"));
69958 +
69959 +       jtagdrv_i2c_stop (dev);
69960 +       return (I2C_OP_NO_DEVICE);
69961 +    }
69962 +    
69963 +    for (i = 0; i < count; i++)
69964 +    {
69965 +       data[i] = jtagdrv_i2c_shift_data (dev, 0xff);
69966 +
69967 +       jtagdrv_i2c_drive_ack (dev, (i == (count-1) ? 1 : 0));
69968 +    }
69969 +
69970 +    jtagdrv_i2c_stop (dev);
69971 +    
69972 +    return (I2C_OP_SUCCESS);
69973 +}
69974 +
69975 +int
69976 +jtagdrv_i2c_writereg (JTAG_DEV *dev, u_int address, u_int intaddress, u_int count, u_char *data)
69977 +{
69978 +    register int i;
69979 +
69980 +    PRINTF (DBG_FN, ("jtagdrv_i2c_writereg: address=%x count=%d\n", address, count));
69981 +
69982 +    if (! jtagdrv_i2c_start (dev))
69983 +       return (I2C_OP_NOT_IDLE);
69984 +
69985 +    jtagdrv_i2c_shift_addr (dev, address, 0);
69986 +    
69987 +    if (! jtagdrv_i2c_get_ack (dev))
69988 +    {
69989 +       PRINTF (DBG_FN, ("jtagdrv_i2c_writereg: no ack on address phase\n"));
69990 +
69991 +       jtagdrv_i2c_stop (dev);
69992 +       return (I2C_OP_NO_DEVICE);
69993 +    }
69994 +    
69995 +    jtagdrv_i2c_shift_data (dev, intaddress);
69996 +    
69997 +    if (! jtagdrv_i2c_get_ack (dev))
69998 +    {
69999 +       PRINTF (DBG_FN, ("jtagdrv_i2c_writereg: no ack on intaddress phase\n"));
70000 +       jtagdrv_i2c_stop (dev);
70001 +       return (I2C_OP_NO_DEVICE);
70002 +    }
70003 +    
70004 +    for (i = 0; i < count; i++)
70005 +    {
70006 +       jtagdrv_i2c_shift_data (dev, data[i]);
70007 +       if (! jtagdrv_i2c_get_ack (dev))
70008 +       {
70009 +           PRINTF (DBG_FN, ("jtagdrv_i2c_writedate: no ack on byte %d\n", i));
70010 +           jtagdrv_i2c_stop (dev);
70011 +           return (I2C_OP_WRITE_TO_BIG);
70012 +       }
70013 +    }
70014 +    
70015 +    jtagdrv_i2c_stop (dev);
70016 +    return (I2C_OP_SUCCESS);
70017 +}
70018 +
70019 +int
70020 +jtagdrv_i2c_readreg (JTAG_DEV *dev, u_int address, u_int intaddress, u_int count, u_char *data)
70021 +{
70022 +    PRINTF (DBG_FN, ("jtagdrv_i2c_readreg: address=%x count=%d\n", address, count));
70023 +
70024 +    if (! jtagdrv_i2c_start (dev))
70025 +       return (I2C_OP_NOT_IDLE);
70026 +
70027 +    jtagdrv_i2c_shift_addr (dev, address, 0);
70028 +    
70029 +    if (! jtagdrv_i2c_get_ack (dev))
70030 +    {
70031 +       PRINTF (DBG_FN, ("jtagdrv_i2c_readreg: no ack on address phase\n"));
70032 +
70033 +       jtagdrv_i2c_stop (dev);
70034 +       return (I2C_OP_NO_DEVICE);
70035 +    }
70036 +    
70037 +    jtagdrv_i2c_shift_data (dev, intaddress);
70038 +    
70039 +    if (! jtagdrv_i2c_get_ack (dev))
70040 +    {
70041 +       PRINTF (DBG_FN, ("jtagdrv_i2c_readreg: no ack on intaddress phase\n"));
70042 +       jtagdrv_i2c_stop (dev);
70043 +       return (I2C_OP_NO_DEVICE);
70044 +    }
70045 +
70046 +    jtagdrv_i2c_stop (dev);
70047 +
70048 +    return (jtagdrv_i2c_read (dev, address, count, data));
70049 +}
70050 +
70051 +void
70052 +jtagdrv_i2c_clock_shift (JTAG_DEV *dev, u_int t, u_int n, u_int m)
70053 +{
70054 +    int i;
70055 +
70056 +    for (i = 2; i >= 0; i--)
70057 +    {
70058 +       LPT_WRITE_DATA (dev, ((t & (1 << i)) ? LPT_DATA_TDI : 0)); DELAY(1);                    /* clock low  | data */
70059 +       LPT_WRITE_DATA (dev, ((t & (1 << i)) ? LPT_DATA_TDI : 0) | LPT_DATA_TMS); DELAY(1);     /* clock high | data */
70060 +    }
70061 +
70062 +    for (i = 1; i >= 0; i--)
70063 +    {
70064 +       LPT_WRITE_DATA (dev, ((n & (1 << i)) ? LPT_DATA_TDI : 0)); DELAY(1);                    /* clock low  | data */
70065 +       LPT_WRITE_DATA (dev, ((n & (1 << i)) ? LPT_DATA_TDI : 0)| LPT_DATA_TMS); DELAY(1);      /* clock high | data */
70066 +    }    
70067 +
70068 +    for (i = 6; i >= 0; i--)
70069 +    {
70070 +       LPT_WRITE_DATA (dev, ((m & (1 << i)) ? LPT_DATA_TDI : 0)); DELAY(1);                    /* clock low  | data */
70071 +       LPT_WRITE_DATA (dev, ((m & (1 << i)) ? LPT_DATA_TDI : 0) | LPT_DATA_TMS); DELAY(1);     /* clock high | data */
70072 +    }    
70073 +
70074 +    LPT_WRITE_DATA (dev, 0); DELAY(1);                                                         /* clock low  | 0 */
70075 +
70076 +    LPT_WRITE_CTRL (dev, LPT_CTRL_TCLK); DELAY(1);                                             /* strobe low */
70077 +    LPT_WRITE_CTRL (dev, 0); DELAY(1);                                                         /* strobe low */
70078 +}
70079 +
70080 Index: linux-2.4.21/drivers/net/qsnet/jtag/jtagdrv.h
70081 ===================================================================
70082 --- linux-2.4.21.orig/drivers/net/qsnet/jtag/jtagdrv.h  2004-02-23 16:02:56.000000000 -0500
70083 +++ linux-2.4.21/drivers/net/qsnet/jtag/jtagdrv.h       2005-06-01 23:12:54.692424968 -0400
70084 @@ -0,0 +1,57 @@
70085 +/*
70086 + *    Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd.
70087 + *
70088 + *    For licensing information please see the supplied COPYING file
70089 + *
70090 + */
70091 +
70092 +#ifndef __JTAGDRV_COMMON_H
70093 +#define __JTAGDRV_COMMON_H
70094 +
70095 +#ident "@(#)$Id: jtagdrv.h,v 1.5 2002/08/09 11:18:37 addy Exp $"
70096 +/*      $Source: /cvs/master/quadrics/jtagmod/jtagdrv.h,v $*/
70097 +
70098 +#include <qsnet/config.h>
70099 +
70100 +/* include OS specific header file */
70101 +#if defined(LINUX)
70102 +#  include "jtagdrv_Linux.h"
70103 +#elif defined(DIGITAL_UNIX)
70104 +#  include "jtagdrv_OSF1.h"
70105 +#elif defined(QNX)
70106 +#  include "jtagdrv_QNX.h"
70107 +#else
70108 +#  error cannot determint os type
70109 +#endif
70110 +
70111 +extern int jtagdebug;
70112 +
70113 +#define DBG_CFG                (1 << 0)
70114 +#define DBG_OPEN       (1 << 1)
70115 +#define DBG_IOCTL      (1 << 2)
70116 +#define DBG_ECPP       (1 << 3)
70117 +#define DBG_FN         (1 << 4)
70118 +
70119 +#define DRIVER_NAME    "jtag"
70120 +
70121 +#if defined(LINUX)
70122 +#define PRINTF(n,X)    ((n) & jtagdebug ? (void) printk X : (void) 0)
70123 +#define PRINTMSG(fmt, arg...) printk(KERN_INFO DRIVER_NAME ": " fmt, ##arg)
70124 +#else
70125 +#define PRINTF(n,X)    ((n) & jtagdebug ? (void) printf X : (void) 0)
70126 +#define PRINTMSG(M, A) printf ("jtag: " M, A)
70127 +#endif
70128 +
70129 +extern void jtagdrv_select_ring (JTAG_DEV *pp, u_int ring);
70130 +extern void jtagdrv_reset (JTAG_DEV *pp);
70131 +extern void jtagdrv_shift_ir (JTAG_DEV *pp, u_char *value, int nbits);
70132 +extern void jtagdrv_shift_dr (JTAG_DEV *pp, u_char *value, int nbits);
70133 +
70134 +extern int  jtagdrv_i2c_write (JTAG_DEV *pp, u_int address, u_int count, u_char *data);
70135 +extern int  jtagdrv_i2c_read (JTAG_DEV *pp, u_int address, u_int count, u_char *data);
70136 +extern int  jtagdrv_i2c_writereg (JTAG_DEV *pp, u_int address, u_int intaddress, u_int count, u_char *data);
70137 +extern int  jtagdrv_i2c_readreg (JTAG_DEV *pp, u_int address, u_int intaddress, u_int count, u_char *data);
70138 +extern void jtagdrv_i2c_clock_shift (JTAG_DEV *pp, u_int t, u_int n, u_int m);
70139 +
70140 +
70141 +#endif /* __JTAGDRV_COMMON_H */
70142 Index: linux-2.4.21/drivers/net/qsnet/jtag/jtagdrv_Linux.c
70143 ===================================================================
70144 --- linux-2.4.21.orig/drivers/net/qsnet/jtag/jtagdrv_Linux.c    2004-02-23 16:02:56.000000000 -0500
70145 +++ linux-2.4.21/drivers/net/qsnet/jtag/jtagdrv_Linux.c 2005-06-01 23:12:54.693424816 -0400
70146 @@ -0,0 +1,319 @@
70147 +/*
70148 + *    Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd.
70149 + *
70150 + *    For licensing information please see the supplied COPYING file
70151 + *
70152 + */
70153 +
70154 +/*
70155 + * $Id: jtagdrv_Linux.c,v 1.18 2004/01/06 11:15:46 fabien Exp $
70156 + * $Source: /cvs/master/quadrics/jtagmod/jtagdrv_Linux.c,v $
70157 + */
70158 +
70159 +#include "jtagdrv.h"
70160 +#include <jtag/jtagio.h>
70161 +
70162 +#include <linux/module.h>
70163 +#include <linux/ioport.h>
70164 +
70165 +MODULE_AUTHOR("Quadrics Ltd.");
70166 +MODULE_DESCRIPTION("JTAG Parallel port QsNet switch interface");
70167 +
70168 +MODULE_LICENSE("GPL");
70169 +
70170 +#define MAJOR_INSTANCE 0       /* 0 is dynamic assign of device major  */ 
70171 +#define MAX_JTAG_DEV   4
70172 +
70173 +int jtag_major = MAJOR_INSTANCE;
70174 +int jtagdebug  = 0;
70175 +MODULE_PARM(jtag_major, "i");
70176 +MODULE_PARM(jtagdebug, "i");
70177 +
70178 +JTAG_DEV       jtag_devs[MAX_JTAG_DEV];
70179 +
70180 +int io[MAX_JTAG_DEV]= { 0, };
70181 +MODULE_PARM(io, "1-4i");
70182 +
70183 +
70184 +/* The fops functions */
70185 +int jtag_open(struct inode *, struct file *);
70186 +int jtag_close(struct inode *, struct file *);
70187 +int jtag_ioctl(struct inode *, struct file *, unsigned int, unsigned long );
70188 +
70189 +struct file_operations jtag_fops = {
70190 +    ioctl:   jtag_ioctl,
70191 +    open:    jtag_open,
70192 +    release: jtag_close,
70193 +};
70194 +
70195 +int
70196 +jtag_probe(void)
70197 +{
70198 +       int i=0;        
70199 +       int default_io = 1;
70200 +       JTAG_DEV *dev;
70201 +       unsigned char value=0xff;
70202 +
70203 +       /* see if there are any user supplied io addr */
70204 +       for ( i = 0; i < MAX_JTAG_DEV; i++) {
70205 +               if ( io[i] != 0x00)
70206 +                       default_io = 0;
70207 +               jtag_devs[i].base = io[i];
70208 +       }
70209 +       
70210 +       if ( default_io ) {
70211 +               jtag_devs[0].base = 0x3bc;
70212 +               jtag_devs[1].base = 0x378;
70213 +               jtag_devs[2].base = 0x278;
70214 +               jtag_devs[3].base = 0x268;
70215 +       }
70216 +
70217 +       for ( i = 0 ; i < MAX_JTAG_DEV; i++) {
70218 +               if ( jtag_devs[i].base == 0x3bc ) 
70219 +                       jtag_devs[i].region = 3;
70220 +               else
70221 +                       jtag_devs[i].region = 8;
70222 +               jtag_devs[i].present = 0;
70223 +       }       
70224 +
70225 +
70226 +       if( default_io )
70227 +       {
70228 +               for( i = 0 ; i < MAX_JTAG_DEV; i++) {
70229 +                       dev=&(jtag_devs[i]);
70230 +                       if(dev->base && request_region(dev->base, dev->region, "jtag")) {
70231 +                               LPT_WRITE(dev, 0,0);
70232 +                               LPT_READ(dev, 0,value);
70233 +                               if ( value != 0xff) {
70234 +                                       PRINTMSG("(%d , %d) present, io=0x%04lx\n",jtag_major,i,dev->base);
70235 +                       
70236 +                                       dev->present=1; 
70237 +                               }
70238 +                               else
70239 +                                       release_region(dev->base, dev->region);
70240 +                       }
70241 +               }
70242 +               return 0;
70243 +       }     
70244 +       else /* Force the region to be present, this makes the PCI parallel cards work */
70245 +       {
70246 +               for( i = 0 ; i < MAX_JTAG_DEV; i++) 
70247 +               {
70248 +                        dev=&(jtag_devs[i]);
70249 +                        if(dev->base && request_region(dev->base, dev->region, "jtag") && (dev->base != 0)) 
70250 +                       {
70251 +                                PRINTMSG("(%d , %d) forced by user, io=0x%04lx\n",jtag_major,i,dev->base);
70252 +                                        dev->present=1;
70253 +                       }       
70254 +                        else   
70255 +                       {
70256 +                                if( dev->base != 0)
70257 +                                       release_region(dev->base, dev->region);
70258 +                       }
70259 +               }
70260 +                return 0;
70261 +       }
70262 +}
70263 +
70264 +int init_module(void)
70265 +{
70266 +       int result,i;
70267 +       result = register_chrdev(jtag_major, DRIVER_NAME, &jtag_fops);
70268 +       if (result < 0) {
70269 +               PRINTMSG("Couldn't register char device err == %d\n",jtag_major);
70270 +               return -1;
70271 +       }
70272 +
70273 +       if ( jtag_major == 0 ) 
70274 +               jtag_major = result;
70275 +
70276 +       for ( i = 0; i < MAX_JTAG_DEV; i++) {
70277 +               jtag_devs[i].base=io[i];        
70278 +       }
70279 +
70280 +       jtag_probe();
70281 +
70282 +       PRINTMSG("Registered character device, major == %d\n",jtag_major);      
70283 +       return 0;
70284 +}      
70285 +
70286 +void cleanup_module(void)
70287 +{
70288 +       int i=0;
70289 +
70290 +       for( i = 0; i < MAX_JTAG_DEV; i++) {
70291 +               if( jtag_devs[i].present)       
70292 +                       release_region(jtag_devs[i].base, jtag_devs[i].region);
70293 +       }
70294 +                       
70295 +       unregister_chrdev(jtag_major, DRIVER_NAME);
70296 +       PRINTMSG("Unloaded char device\n");
70297 +}
70298 +
70299 +
70300 +int
70301 +jtag_open (struct inode *inode, struct file *filp)
70302 +{
70303 +    int unit = MINOR(inode->i_rdev);
70304 +    JTAG_DEV *dev = &jtag_devs[unit];
70305 +
70306 +    if (unit < 0 || unit > MAX_JTAG_DEV || !dev->present)
70307 +       return (-ENXIO);
70308 +
70309 +    /*
70310 +     * Only allow a single open at a time 
70311 +     */
70312 +    if (dev->open)
70313 +       return (-EBUSY);
70314 +    dev->open = 1;
70315 +    
70316 +    /*
70317 +     * Initialise the hardware registers
70318 +     */
70319 +   
70320 +    LPT_WRITE (dev, LPT_CTRL, 0);
70321 +    DELAY(50);
70322 +    LPT_WRITE (dev, LPT_CTRL, LPT_CTRL_INIT);
70323 +
70324 +    MOD_INC_USE_COUNT;
70325 +
70326 +    return (0);
70327 +}
70328 +
70329 +int
70330 +jtag_close(struct inode *inode, struct file *filp)
70331 +{
70332 +  
70333 +    int unit = MINOR(inode->i_rdev);
70334 +    JTAG_DEV *dev = &jtag_devs[unit];
70335 +    
70336 +    if (unit < 0 || unit > MAX_JTAG_DEV || !dev->present)
70337 +       return (-ENXIO);
70338 +    
70339 +    dev->open = 0;
70340 +
70341 +    MOD_DEC_USE_COUNT;
70342 +
70343 +    return (0);
70344 +}
70345 +
70346 +int
70347 +jtag_ioctl (struct inode *inode, struct file *filp, unsigned int io_cmd, unsigned long io_data)
70348 +{
70349 +    int                  unit = MINOR(inode->i_rdev);
70350 +    JTAG_DEV             *dev = &jtag_devs[unit];
70351 +    JTAG_RESET_ARGS      *resetargs;
70352 +    JTAG_SHIFT_ARGS      *shiftargs;
70353 +    I2C_ARGS            *i2cargs;
70354 +    I2C_CLOCK_SHIFT_ARGS *clockargs;
70355 +    u_char              *buf;
70356 +    int                          freq;
70357 +
70358 +    if (unit < 0 || unit > MAX_JTAG_DEV || !dev->present)
70359 +       return (-ENXIO);
70360 +    
70361 +    PRINTF (DBG_IOCTL, ("jtag_ioctl: device %d cmd=%x\n", unit, io_cmd));
70362 +
70363 +    switch (io_cmd)
70364 +    {
70365 +    case JTAG_RESET:
70366 +       resetargs = (JTAG_RESET_ARGS *) io_data;
70367 +
70368 +       if (! VALID_JTAG_RING (resetargs->ring))
70369 +           return (-EINVAL);
70370 +       
70371 +       jtagdrv_select_ring (dev, resetargs->ring);
70372 +       jtagdrv_reset (dev);
70373 +       return (0);
70374 +       
70375 +    case JTAG_SHIFT_IR:
70376 +    case JTAG_SHIFT_DR:
70377 +       shiftargs = (JTAG_SHIFT_ARGS *) io_data;
70378 +       
70379 +       if (! VALID_JTAG_RING (shiftargs->ring) || shiftargs->nbits > (JTAG_MAX_DATA_LEN*JTAG_MAX_CHIPS)) {
70380 +           return (-EFAULT);
70381 +               }
70382 +
70383 +       buf = (u_char *) kmalloc (JTAG_NBYTES(shiftargs->nbits), GFP_KERNEL);
70384 +
70385 +       if (buf == (u_char *) NULL)
70386 +           return (-ENOMEM);
70387 +       
70388 +       if (copy_from_user (buf, shiftargs->value, JTAG_NBYTES(shiftargs->nbits)))
70389 +       {
70390 +           kfree(buf);
70391 +           return (-EFAULT);
70392 +       }
70393 +
70394 +
70395 +       jtagdrv_select_ring (dev, shiftargs->ring);
70396 +
70397 +       if (io_cmd == JTAG_SHIFT_IR)
70398 +           jtagdrv_shift_ir (dev, buf, shiftargs->nbits);
70399 +       else
70400 +           jtagdrv_shift_dr (dev, buf, shiftargs->nbits);
70401 +       
70402 +       if (copy_to_user (shiftargs->value, buf, JTAG_NBYTES (shiftargs->nbits)))
70403 +       {
70404 +           kfree (buf);
70405 +           return (-EFAULT);
70406 +       }
70407 +
70408 +       kfree (buf);
70409 +       return (0);
70410 +
70411 +    case I2C_WRITE:
70412 +    case I2C_READ:
70413 +    case I2C_WRITEREG:
70414 +    case I2C_READREG:
70415 +       i2cargs = (I2C_ARGS *) io_data;
70416 +
70417 +       if (! VALID_I2C_RING(i2cargs->ring) || i2cargs->count > I2C_MAX_DATA_LEN)
70418 +           return (-EFAULT);
70419 +
70420 +       jtagdrv_select_ring (dev, RING_I2C_BIT | i2cargs->ring);
70421 +       switch (io_cmd)
70422 +       {
70423 +       case I2C_WRITE:
70424 +           i2cargs->ok = jtagdrv_i2c_write (dev, i2cargs->device, i2cargs->count, i2cargs->data);
70425 +           break;
70426 +
70427 +       case I2C_READ:
70428 +           i2cargs->ok = jtagdrv_i2c_read (dev, i2cargs->device, i2cargs->count, i2cargs->data);
70429 +           break;
70430 +
70431 +       case I2C_WRITEREG:
70432 +           i2cargs->ok = jtagdrv_i2c_writereg (dev, i2cargs->device, i2cargs->reg, i2cargs->count, i2cargs->data);
70433 +           break;
70434 +
70435 +       case I2C_READREG:
70436 +           i2cargs->ok = jtagdrv_i2c_readreg (dev, i2cargs->device, i2cargs->reg, i2cargs->count, i2cargs->data);
70437 +           break;
70438 +       }
70439 +       return (0);
70440 +
70441 +    case I2C_CLOCK_SHIFT:
70442 +       clockargs = (I2C_CLOCK_SHIFT_ARGS *) io_data;
70443 +
70444 +       freq = (10 * clockargs->m / (1 << (((clockargs->n + 1) & 3))));
70445 +       
70446 +       /* validate the value, and initialise the ring */
70447 +       if (clockargs->t != 0 || clockargs->n > 3 || clockargs->m > 127)
70448 +           return (-EINVAL);
70449 +       
70450 +       jtagdrv_select_ring (dev, RING_I2C_BIT | RING_CLOCK_SHIFT);
70451 +       jtagdrv_i2c_clock_shift (dev, clockargs->t, clockargs->n, clockargs->m);
70452 +       jtagdrv_select_ring (dev, 0);
70453 +       return (0);
70454 +
70455 +    default:
70456 +       return (-EINVAL);
70457 +    }
70458 +    return (-EINVAL);
70459 +}
70460 +
70461 +/*
70462 + * Local variables:
70463 + * c-file-style: "stroustrup"
70464 + * End:
70465 + */
70466 Index: linux-2.4.21/drivers/net/qsnet/jtag/jtagdrv_Linux.h
70467 ===================================================================
70468 --- linux-2.4.21.orig/drivers/net/qsnet/jtag/jtagdrv_Linux.h    2004-02-23 16:02:56.000000000 -0500
70469 +++ linux-2.4.21/drivers/net/qsnet/jtag/jtagdrv_Linux.h 2005-06-01 23:12:54.693424816 -0400
70470 @@ -0,0 +1,174 @@
70471 +/*
70472 + *    Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd.
70473 + *
70474 + *    For licensing information please see the supplied COPYING file
70475 + *
70476 + */
70477 +
70478 +#ident "@(#)$Id: jtagdrv_Linux.h,v 1.3 2002/08/09 11:18:37 addy Exp $"
70479 +/*      $Source: /cvs/master/quadrics/jtagmod/jtagdrv_Linux.h,v $*/
70480 +
70481 +#ifndef __JTAGDRV_LINUX_H
70482 +#define __JTAGDRV_LINUX_H
70483 +
70484 +#include <qsnet/kernel.h>
70485 +#include <asm/io.h>
70486 +
70487 +typedef struct jtag_dev
70488 +{
70489 +    unsigned long      base;
70490 +    int                 region;
70491 +
70492 +    u_int              present:1;
70493 +    u_int              open:1;
70494 +} JTAG_DEV;
70495 +
70496 +/*
70497 +**
70498 +**                     Hardware Defines
70499 +**
70500 +*/
70501 +
70502 +/*
70503 + * Assume that bit 4 of the Control Register is set to 1 (by default) 
70504 + * to enable the printer port (CS3).
70505 + *
70506 + * The default base address is 3BC-3BF. 
70507 + */
70508 +
70509 +#define LPT0   0x3BC                   /* CSR Base Address - note this can
70510 +                                        * change depending on the setting
70511 +                                        * in the Control Register 0.
70512 +                                        *
70513 +                                        * LPT1 0x378
70514 +                                        * LPT2 0x278
70515 +                                        * LPT3 0x268
70516 +                                       */
70517 +
70518 +/*
70519 + *     Register offsets from the port base address
70520 + */
70521 +
70522 +#define LPT_REGISTER_0 0
70523 +#define LPT_REGISTER_1 1
70524 +#define LPT_REGISTER_2 2
70525 +#define LPT_REGISTER_3 0x400
70526 +#define LPT_REGISTER_4 0x401
70527 +#define LPT_REGISTER_5 0x402
70528 +
70529 +/*
70530 + *     Chip control registers
70531 + */
70532 +                                       /* Base address for Super I/O National*/
70533 +
70534 +#define SIO_BASE_ADDR  0x26e           /* Semiconductor PC87332VLJ combo-chip*/
70535 +#define CR4_REG                0x04            /* index 4, printer control reg 4 */
70536 +
70537 +#define LPT_EPP                0x01            /* Enable bit for epp */
70538 +#define LPT_ECP                0x04            /* Enable bit for ecp */
70539 +
70540 +/*
70541 + *     Registers for use with centronics, nibble and byte modes.
70542 + */
70543 +
70544 +#define LPT_DATA       LPT_REGISTER_0          /* line printer port data */
70545 +#define LPT_STAT       LPT_REGISTER_1          /* LPT port status        */
70546 +#define LPT_CTRL       LPT_REGISTER_2          /* LPT port control       */
70547 +
70548 +/*
70549 + *     Registers for use with ECP mode.
70550 + */ 
70551 +
70552 +#define LPT_DFIFO      LPT_REGISTER_3          /* r/w fifo register    */
70553 +#define LPT_CFGB       LPT_REGISTER_4          /* Configuration B      */
70554 +#define LPT_ECR                LPT_REGISTER_5          /* Exteded control      */
70555 +
70556 +/*
70557 + * Bit assignments for ECR register.
70558 + */
70559 +
70560 +       /* Bits 0-4 */
70561 +
70562 +#define LPT_ECR_EMPTY  0x01            /* FIFO is empty */
70563 +#define LPT_ECR_FULL   0x02            /* FIFO is full */
70564 +#define LPT_ECR_SERV   0x04            /* Service bit */
70565 +#define LPT_ECR_DMA    0x08            /* DMA enable */
70566 +#define LPT_ECR_nINTR  0x10            /* Interrupt disable */
70567 +
70568 +       /*
70569 +        * Bits 5-7 are ECR modes.
70570 +        */
70571 +
70572 +#define LPT_ECR_PAR    0x20            /* Parallel port FIFO mode */
70573 +#define LPT_ECR_ECP    0x60            /* ECP mode */
70574 +#define LPT_ECR_CFG    0xE0            /* Configuration mode */
70575 +#define LPT_ECR_CLEAR  ~0xE0           /* Cear mode bits */
70576 +
70577 +/*
70578 + * Bit assignments for the parallel port STATUS register:
70579 + */
70580 +
70581 +#define LPT_STAT_BIT0  0X1     /* Reserved. Bit always set.            */
70582 +#define LPT_STAT_BIT1  0X2     /* Reserved. Bit always set.            */
70583 +#define LPT_STAT_IRQ   0x4     /* interrupt status bit                 */
70584 +#define LPT_STAT_ERROR 0x8     /* set to 0 to indicate error           */
70585 +#define LPT_STAT_SLCT  0x10    /* status of SLCT lead from printer     */
70586 +#define LPT_STAT_PE    0x20    /* set to 1 when out of paper           */
70587 +#define LPT_STAT_ACK   0x40    /* acknowledge - set to 0 when ready    */
70588 +#define LPT_STAT_nBUSY 0x80    /* busy status bit, 0=busy, 1=ready     */
70589 +
70590 +/*
70591 + * Bit assignments for the parallel port CONTROL register:
70592 + */
70593 +
70594 +#define LPT_CTRL_nSTROBE       0x1     /* Printer Strobe Control       */
70595 +#define LPT_CTRL_nAUTOFD       0x2     /* Auto Feed Control            */
70596 +#define LPT_CTRL_INIT          0x4     /* Initialize Printer Control   */
70597 +#define LPT_CTRL_nSLCTIN       0x8     /* 0=select printer, 1=not selected */
70598 +#define LPT_CTRL_IRQ           0x10    /* Interrupt Request Enable Control */
70599 +#define LPT_CTRL_DIR           0x20    /* Direction control            */
70600 +#define LPT_CTRL_BIT6          0X40    /* Reserved. Bit always set.    */
70601 +#define LPT_CTRL_BIT7          0X80    /* Reserved. Bit always set.    */
70602 +
70603 +
70604 +#define LPT_WRITE(dev, regname, value) do { outb(value, (dev)->base + regname); } while (0)
70605 +#define LPT_READ(dev, regname,value)   do { value = inb((dev)->base + regname); } while (0)
70606 +
70607 +
70608 +
70609 +/* Standard register access macros */
70610 +#define LPT_WRITE_CTRL(dev, value)     LPT_WRITE(dev, LPT_CTRL, LPT_CTRL_INIT | value)
70611 +#define LPT_WRITE_DATA(dev, value)     LPT_WRITE(dev, LPT_DATA, value)
70612 +#define LPT_READ_STAT(dev, value)      LPT_READ(dev, LPT_STAT, value)
70613 +
70614 +/*
70615 + * The jtag signals are connected to the parallel port as follows :
70616 + *
70617 + *  TRST       bit 0
70618 + *  TDI                bit 1
70619 + *  TMS                bit 2
70620 + *  TCLK       AFX
70621 + *  TDO                PE
70622 + */
70623 +#define LPT_DATA_TRST  1
70624 +#define LPT_DATA_TDI   2
70625 +#define LPT_DATA_TMS   4
70626 +#define LPT_CTRL_TCLK  LPT_CTRL_nAUTOFD
70627 +#define LPT_STAT_TDO   LPT_STAT_PE
70628 +
70629 +/*
70630 + * The I2C signals are connected as follows :
70631 + */
70632 +#define LPT_DATA_SDA   2
70633 +#define LPT_CTRL_SCLK  LPT_CTRL_nAUTOFD
70634 +#define LPT_STAT_SDA   LPT_STAT_PE
70635 +
70636 +/*
70637 + * The ring selection signals are as follows :
70638 + *  addr       bit 0-7
70639 + *  clock      nSLCTIN
70640 + */
70641 +#define LPT_CTRL_RCLK  LPT_CTRL_nSLCTIN
70642 +
70643 +
70644 +#endif /* __JTAGDRV_LINUX_H */
70645 Index: linux-2.4.21/drivers/net/qsnet/jtag/Makefile
70646 ===================================================================
70647 --- linux-2.4.21.orig/drivers/net/qsnet/jtag/Makefile   2004-02-23 16:02:56.000000000 -0500
70648 +++ linux-2.4.21/drivers/net/qsnet/jtag/Makefile        2005-06-01 23:12:54.694424664 -0400
70649 @@ -0,0 +1,31 @@
70650 +#
70651 +# Makefile for Quadrics QsNet
70652 +#
70653 +# Copyright (c) 2002-2004 Quadrics Ltd
70654 +#
70655 +# File: drivers/net/qsnet/jtag/Makefile
70656 +#
70657 +
70658 +
70659 +#
70660 +
70661 +#
70662 +# Makefile for Quadrics QsNet
70663 +#
70664 +# Copyright (c) 2004 Quadrics Ltd.
70665 +#
70666 +# File: driver/net/qsnet/jtag/Makefile
70667 +#
70668 +
70669 +list-multi             := jtag.o
70670 +jtag-objs      := jtagdrv_Linux.o jtagdrv.o
70671 +export-objs            := 
70672 +obj-$(CONFIG_JTAG)     := jtag.o
70673 +
70674 +jtag.o : $(jtag-objs)
70675 +       $(LD) -r -o $@ $(jtag-objs)
70676 +
70677 +EXTRA_CFLAGS           +=  -DDEBUG -DDEBUG_PRINTF -DDEBUG_ASSERT
70678 +
70679 +include $(TOPDIR)/Rules.make
70680 +
70681 Index: linux-2.4.21/drivers/net/qsnet/jtag/Makefile.conf
70682 ===================================================================
70683 --- linux-2.4.21.orig/drivers/net/qsnet/jtag/Makefile.conf      2004-02-23 16:02:56.000000000 -0500
70684 +++ linux-2.4.21/drivers/net/qsnet/jtag/Makefile.conf   2005-06-01 23:12:54.694424664 -0400
70685 @@ -0,0 +1,10 @@
70686 +# Flags for generating QsNet Linux Kernel Makefiles
70687 +MODNAME                =       jtag.o
70688 +MODULENAME     =       jtag
70689 +KOBJFILES      =       jtagdrv_Linux.o jtagdrv.o
70690 +EXPORT_KOBJS   =       
70691 +CONFIG_NAME    =       CONFIG_JTAG
70692 +SGALFC         =       
70693 +# EXTRALINES START
70694 +
70695 +# EXTRALINES END
70696 Index: linux-2.4.21/drivers/net/qsnet/jtag/quadrics_version.h
70697 ===================================================================
70698 --- linux-2.4.21.orig/drivers/net/qsnet/jtag/quadrics_version.h 2004-02-23 16:02:56.000000000 -0500
70699 +++ linux-2.4.21/drivers/net/qsnet/jtag/quadrics_version.h      2005-06-01 23:12:54.694424664 -0400
70700 @@ -0,0 +1 @@
70701 +#define QUADRICS_VERSION "4.30qsnet"
70702 Index: linux-2.4.21/drivers/net/qsnet/Makefile
70703 ===================================================================
70704 --- linux-2.4.21.orig/drivers/net/qsnet/Makefile        2004-02-23 16:02:56.000000000 -0500
70705 +++ linux-2.4.21/drivers/net/qsnet/Makefile     2005-06-01 23:12:54.695424512 -0400
70706 @@ -0,0 +1,17 @@
70707 +#
70708 +# Makefile for Quadrics QsNet
70709 +#
70710 +# Copyright (c) 2003 Quadrics Ltd.
70711 +#
70712 +# File: driver/net/qsnet/Makefile
70713 +#
70714 +
70715 +subdir-$(CONFIG_QSNET)  += qsnet elan
70716 +subdir-$(CONFIG_ELAN3)  += elan3
70717 +subdir-$(CONFIG_ELAN4)  += elan4
70718 +subdir-$(CONFIG_EP)     += ep
70719 +subdir-$(CONFIG_EIP)    += eip
70720 +subdir-$(CONFIG_RMS)    += rms
70721 +subdir-$(CONFIG_JTAG)   += jtag
70722 +
70723 +include $(TOPDIR)/Rules.make
70724 Index: linux-2.4.21/drivers/net/qsnet/qsnet/debug.c
70725 ===================================================================
70726 --- linux-2.4.21.orig/drivers/net/qsnet/qsnet/debug.c   2004-02-23 16:02:56.000000000 -0500
70727 +++ linux-2.4.21/drivers/net/qsnet/qsnet/debug.c        2005-06-01 23:12:54.696424360 -0400
70728 @@ -0,0 +1,583 @@
70729 +/*
70730 + *    Copyright (c) 2003 by Quadrics Ltd.
70731 + * 
70732 + *    For licensing information please see the supplied COPYING file
70733 + *
70734 + */
70735 +
70736 +#ident "@(#)$Id: debug.c,v 1.21 2004/08/19 08:09:57 david Exp $"
70737 +/*      $Source: /cvs/master/quadrics/qsnet/debug.c,v $ */
70738 +
70739 +#include <qsnet/kernel.h>
70740 +#include <qsnet/debug.h>
70741 +#include <qsnet/procfs_linux.h>
70742 +
70743 +caddr_t        qsnet_debug_buffer_ptr = NULL;
70744 +int           qsnet_debug_front      = 0;
70745 +int           qsnet_debug_back       = 0;
70746 +int            qsnet_debug_lost_lines = 0;
70747 +int           qsnet_debug_disabled   = 0;
70748 +
70749 +int           qsnet_debug_line_size  = 256;
70750 +int           qsnet_debug_num_lines  = 8192;
70751 +
70752 +int           qsnet_assfail_mode     = 1;                      /* default to BUG() */
70753 +
70754 +int            qsnet_debug_running    = 0;
70755 +int            kqsnet_debug_running   = 0;
70756 +
70757 +static spinlock_t qsnet_debug_lock;
70758 +static kcondvar_t qsnet_debug_wait;
70759 +static char       qsnet_debug_buffer_space[8192];
70760 +
70761 +#define QSNET_DEBUG_PREFIX_MAX_SIZE    32
70762 +#define QSNET_DEBUG_MAX_WORDWRAP       15
70763 +
70764 +/* must be larger than  QSNET_DEBUG_PREFIX_MAX_SIZE +  QSNET_DEBUG_MAX_WORDWRAP + 2 */
70765 +#if defined(DIGITAL_UNIX) 
70766 +#define QSNET_DEBUG_CONSOLE_WIDTH 80
70767 +#elif defined(LINUX)
70768 +#define QSNET_DEBUG_CONSOLE_WIDTH 128
70769 +#endif
70770 +
70771 +#define isspace(CH)    ((CH==' ') | (CH=='\t') | (CH=='\n'))
70772 +
70773 +#ifdef LINUX
70774 +#define ALLOC_DEBUG_BUFFER(ptr)                do { (ptr) = (void *)__get_free_pages (GFP_KERNEL, get_order (qsnet_debug_num_lines * qsnet_debug_line_size)); } while (0)
70775 +#define FREE_DEBUG_BUFFER(ptr)         free_pages ((unsigned long) ptr, get_order (qsnet_debug_num_lines * qsnet_debug_line_size))
70776 +#else
70777 +#define ALLOC_DEBUG_BUFFER(ptr)                KMEM_ALLOC (ptr, caddr_t, qsnet_debug_num_lines * qsnet_debug_line_size, 1)
70778 +#define FREE_DEBUG_BUFFER(ptr)         KMEM_FREE (ptr, qsnet_debug_num_lines * qsnet_debug_line_size)
70779 +#endif
70780 +
70781 +void
70782 +qsnet_debug_init ()
70783 +{
70784 +       spin_lock_init (&qsnet_debug_lock);
70785 +       kcondvar_init  (&qsnet_debug_wait);
70786 +
70787 +       qsnet_debug_front      = 0;
70788 +       qsnet_debug_back       = 0;
70789 +       qsnet_debug_lost_lines = 0;
70790 +
70791 +       if (qsnet_debug_line_size < (QSNET_DEBUG_PREFIX_MAX_SIZE + QSNET_DEBUG_MAX_WORDWRAP + 2))
70792 +               qsnet_debug_line_size = 256;
70793 +
70794 +       qsnet_debug_running    = 1;
70795 +
70796 +       qsnet_proc_register_int (qsnet_procfs_config, "assfail_mode", &qsnet_assfail_mode, 0);
70797 +}
70798 +
70799 +void
70800 +qsnet_debug_fini()
70801 +{
70802 +       if (!qsnet_debug_running) return;
70803 +
70804 +       remove_proc_entry ("assfail_mode", qsnet_procfs_config);
70805 +
70806 +       spin_lock_destroy (&qsnet_debug_lock);
70807 +       kcondvar_destroy  (&qsnet_debug_wait);
70808 +       
70809 +       if (qsnet_debug_buffer_ptr)
70810 +               FREE_DEBUG_BUFFER (qsnet_debug_buffer_ptr);
70811 +
70812 +       qsnet_debug_buffer_ptr     = NULL;
70813 +       qsnet_debug_lost_lines = 0;     
70814 +       qsnet_debug_running    = 0;     
70815 +}
70816 +
70817 +void
70818 +qsnet_debug_disable(int val)
70819 +{
70820 +       qsnet_debug_disabled = val;
70821 +}
70822 +
70823 +void
70824 +qsnet_debug_alloc()
70825 +{
70826 +       caddr_t ptr;
70827 +       unsigned long flags;
70828 +
70829 +       if (!qsnet_debug_running) return;
70830 +
70831 +       if (qsnet_debug_buffer_ptr == NULL)
70832 +       {
70833 +               ALLOC_DEBUG_BUFFER (ptr);
70834 +
70835 +               if (ptr != NULL)
70836 +               {
70837 +                       spin_lock_irqsave (&qsnet_debug_lock, flags);
70838 +                       if (qsnet_debug_buffer_ptr == NULL)
70839 +                       {
70840 +                               qsnet_debug_buffer_ptr = ptr;
70841 +                               spin_unlock_irqrestore (&qsnet_debug_lock, flags);
70842 +                       }
70843 +                       else
70844 +                       {
70845 +                               spin_unlock_irqrestore (&qsnet_debug_lock, flags);
70846 +
70847 +                               FREE_DEBUG_BUFFER (ptr);
70848 +                       }
70849 +               }
70850 +       }
70851 +       
70852 +}
70853 +
70854 +static void 
70855 +qsnet_prefix_debug(unsigned int mode, char *prefix, char *buffer) 
70856 +{
70857 +       /* assumes caller has lock */
70858 +
70859 +       int  prefixlen = strlen(prefix);
70860 +       char pref[QSNET_DEBUG_PREFIX_MAX_SIZE];
70861 +       int  prefix_done = 0;
70862 +
70863 +       if (!qsnet_debug_running) return;
70864 +
70865 +       if (qsnet_debug_disabled)
70866 +               return;
70867 +
70868 +       if (prefixlen >= QSNET_DEBUG_PREFIX_MAX_SIZE) 
70869 +       {
70870 +               strncpy(pref,prefix,QSNET_DEBUG_PREFIX_MAX_SIZE -2);
70871 +               strcpy (&pref[QSNET_DEBUG_PREFIX_MAX_SIZE-5],"... ");
70872 +
70873 +               prefix = pref;
70874 +                prefixlen = strlen(prefix);
70875 +       }
70876 +
70877 +#ifdef CONFIG_MPSAS
70878 +       {
70879 +               char *p;
70880 +#define TRAP_PUTCHAR_B                 (0x17a - 256)
70881 +#define SAS_PUTCHAR(c)                 do {\
70882 +                       register int o0 asm ("o0") = (c);\
70883 +\
70884 +                       asm volatile ("ta %0; nop" \
70885 +                                     : /* no outputs */\
70886 +                                     : /* inputs */ "i" (TRAP_PUTCHAR_B), "r" (o0)\
70887 +                                     : /* clobbered */ "o0");\
70888 +\
70889 +                       if (o0 == '\n') {\
70890 +                               o0 = '\r';\
70891 +\
70892 +                               asm volatile ("ta %0; nop" \
70893 +                                             : /* no outputs */\
70894 +                                             : /* inputs */ "i" (TRAP_PUTCHAR_B), "r" (o0)\
70895 +                                             : /* clobbered */ "o0");\
70896 +                       }\
70897 +               } while(0)
70898 +
70899 +               for (p = prefix; *p; p++)
70900 +                       SAS_PUTCHAR (*p);
70901 +
70902 +               for (p = buffer; *p; p++)
70903 +                       SAS_PUTCHAR (*p);
70904 +       }
70905 +#else
70906 +       if (mode & QSNET_DEBUG_BUFFER)
70907 +       {
70908 +               if (qsnet_debug_buffer_ptr == NULL)
70909 +                       qsnet_debug_lost_lines++;
70910 +               else
70911 +               {                   
70912 +                       caddr_t base = &qsnet_debug_buffer_ptr[qsnet_debug_line_size * qsnet_debug_back];
70913 +                       caddr_t lim  = base + qsnet_debug_line_size - 2;
70914 +                       caddr_t p;
70915 +               
70916 +                       p = buffer; 
70917 +                       prefix_done = 0;
70918 +                       while (*p) 
70919 +                       {
70920 +                               /* sort out prefix */
70921 +                               if ( prefix_done++ ) 
70922 +                               {
70923 +                                       int i;
70924 +                                       for(i=0;i<prefixlen;i++)
70925 +                                               base[i] = ' ';
70926 +                                       /* memset(base,' ',prefixlen); */
70927 +                               }
70928 +                               else
70929 +                                       strcpy(base,prefix);
70930 +                               base += prefixlen; /* move the base on */
70931 +
70932 +                               /* copy data */
70933 +                               for ( ; *p && (base < lim); )
70934 +                                       *base++ = *p++;
70935 +
70936 +                               /* if line split then add \n */
70937 +                               if ((base == lim) && (*base != '\n'))
70938 +                               {
70939 +                                       char *ptr;
70940 +                                       int   count;
70941 +
70942 +                                       *base = '\n';
70943 +                                       /* we added a \n cos it was end of line put next char was \n */
70944 +                                       if (*p == '\n') 
70945 +                                               p++;
70946 +                                       else
70947 +                                       {
70948 +                                               /* lets see if we can back track and find a white space to break on */
70949 +                                               ptr = base-1;
70950 +                                               count = 1;
70951 +                                               while ( ( !isspace(*ptr) ) && ( count < QSNET_DEBUG_MAX_WORDWRAP ))
70952 +                                               {
70953 +                                                       count++;
70954 +                                                       ptr--;
70955 +                                               }
70956 +
70957 +                                               if ( isspace(*ptr) ) 
70958 +                                               {
70959 +                                                       /* found somewhere to wrap to */
70960 +                                                       p -= (count-1); /* need to loose the white space */
70961 +                                                       base = ptr;
70962 +                                                       *base = '\n';
70963 +                                               }
70964 +                                       }
70965 +                                       base++;
70966 +                               }
70967 +                               *base = '\0';
70968 +
70969 +                               /* move on pointers */
70970 +                               qsnet_debug_back = (++qsnet_debug_back == qsnet_debug_num_lines) ? 0 : qsnet_debug_back;            
70971 +                               if (qsnet_debug_back == qsnet_debug_front)
70972 +                               {
70973 +                                       qsnet_debug_lost_lines++;
70974 +                                       qsnet_debug_front = (++qsnet_debug_front == qsnet_debug_num_lines) ? 0 : qsnet_debug_front;
70975 +                               }
70976 +                               base  = &qsnet_debug_buffer_ptr[qsnet_debug_line_size * qsnet_debug_back];
70977 +                               lim  =  base + qsnet_debug_line_size - 2;
70978 +                       }
70979 +                       kcondvar_wakeupone (&qsnet_debug_wait, &qsnet_debug_lock);
70980 +               }
70981 +       }
70982 +
70983 +       if (mode & QSNET_DEBUG_CONSOLE)
70984 +       {
70985 +               int     remaining = QSNET_DEBUG_CONSOLE_WIDTH - prefixlen;
70986 +               caddr_t p;
70987 +               char    line[QSNET_DEBUG_CONSOLE_WIDTH +2];
70988 +               int     len;
70989 +           
70990 +               strcpy (pref,prefix);
70991 +               prefix_done = 0;
70992 +
70993 +               p = buffer;
70994 +               while ( *p )
70995 +               {
70996 +                       /* use the prefix only once */
70997 +                       if  ( prefix_done++ > 0 ) 
70998 +                               {
70999 +                                       int i;
71000 +                                       for(i=0;i<prefixlen;i++)
71001 +                                               pref[i] = ' ';
71002 +                                       /* memset(perf,' ',prefixlen); */
71003 +                               }       
71004 +
71005 +                       len=strlen(p);
71006 +                       if (len > remaining) len = remaining;
71007 +                 
71008 +                       strncpy(line, p, len);
71009 +                       line[len] = 0;
71010 +                       p += len;
71011 +                   
71012 +                       /* word wrap */
71013 +                       if ((len == remaining) && *p && !isspace(*p))
71014 +                       {
71015 +                               /* lets see if we can back track and find a white space to break on */
71016 +                               char * ptr = &line[len-1];
71017 +                               int    count = 1;
71018 +
71019 +                               while ( ( !isspace(*ptr) ) && ( count < QSNET_DEBUG_MAX_WORDWRAP ))
71020 +                               {
71021 +                                       count++;
71022 +                                       ptr--;
71023 +                               }
71024 +
71025 +                               if ( isspace(*ptr) ) 
71026 +                               {
71027 +                                       /* found somewhere to wrap to */
71028 +                                       p -= (count-1); /* need to loose the white space */
71029 +                                       len -= count;
71030 +                               }               
71031 +                       }
71032 +
71033 +                       if (line[len-1] != '\n' ) 
71034 +                       {
71035 +                               line[len] = '\n';
71036 +                               line[len+1] = 0;
71037 +                       }
71038 +
71039 +                       /* we put a \n in so dont need another one next */
71040 +                       if ( *p == '\n')
71041 +                               p++;
71042 +
71043 +#if defined(DIGITAL_UNIX)
71044 +                       {
71045 +                               char *pr;
71046 +
71047 +                               for (pr = pref; *pr; pr++)
71048 +                                       cnputc (*pr);
71049 +
71050 +                               for (pr = line; *pr; pr++)
71051 +                                       cnputc (*pr); 
71052 +                       }
71053 +#elif defined(LINUX)
71054 +                       printk("%s%s",pref,line);
71055 +#endif
71056 +               }
71057 +       }
71058 +#endif /* CONFIG_MPSAS */
71059 +}
71060 +
71061 +void
71062 +qsnet_vdebugf (unsigned int mode, char *prefix, char *fmt, va_list ap)
71063 +{
71064 +       unsigned long flags;
71065 +
71066 +       if (!qsnet_debug_running) return;
71067 +
71068 +       spin_lock_irqsave (&qsnet_debug_lock, flags);
71069 +
71070 +       qsnet_debug_buffer_space[0] = '\0';
71071 +
71072 +#if defined(DIGITAL_UNIX)
71073 +       prf (qsnet_debug_buffer_space+strlen(qsnet_debug_buffer_space), NULL, fmt, ap);
71074 +#elif defined(LINUX)
71075 +       vsprintf (qsnet_debug_buffer_space+strlen(qsnet_debug_buffer_space), fmt, ap);
71076 +#endif
71077 +
71078 +       if (prefix == NULL)
71079 +               printk ("qsnet_vdebugf: prefix==NULL\n");
71080 +       else
71081 +               qsnet_prefix_debug(mode, prefix, qsnet_debug_buffer_space);
71082 +
71083 +       spin_unlock_irqrestore (&qsnet_debug_lock, flags);
71084 +}
71085 +
71086 +void kqsnet_debugf(char *fmt,...)
71087 +{
71088 +       if ( kqsnet_debug_running ) {
71089 +               va_list ap;
71090 +               char string[20];
71091 +               
71092 +               sprintf (string, "mm=%p:", current->mm);
71093 +               va_start(ap, fmt);
71094 +               qsnet_vdebugf(QSNET_DEBUG_BUFFER, string, fmt, ap);
71095 +               va_end(ap);
71096 +       }       
71097 +}
71098 +void 
71099 +qsnet_debugf(unsigned int mode, char *fmt,...)
71100 +{
71101 +       va_list       ap;
71102 +       unsigned long flags;
71103 +
71104 +       if (!qsnet_debug_running) return;
71105 +
71106 +       spin_lock_irqsave (&qsnet_debug_lock, flags);
71107 +
71108 +       qsnet_debug_buffer_space[0] = '\0';
71109 +
71110 +       va_start (ap, fmt);
71111 +#if defined(DIGITAL_UNIX)
71112 +       prf (qsnet_debug_buffer_space+strlen(qsnet_debug_buffer_space), NULL, fmt, ap);
71113 +#elif defined(LINUX)
71114 +       vsprintf (qsnet_debug_buffer_space+strlen(qsnet_debug_buffer_space), fmt, ap);
71115 +#endif
71116 +       va_end (ap);
71117 +
71118 +       qsnet_prefix_debug(mode, "", qsnet_debug_buffer_space); 
71119 +
71120 +       spin_unlock_irqrestore (&qsnet_debug_lock, flags);
71121 +}
71122 +
71123 +int
71124 +qsnet_debug_buffer (caddr_t ubuffer, int len)
71125 +{
71126 +       caddr_t buffer, ptr, base;
71127 +       int     remain, len1;
71128 +       unsigned long flags;
71129 +       static  char qsnet_space[65536];
71130 +
71131 +       if (!qsnet_debug_running) return (0);
71132 +
71133 +       if (len < qsnet_debug_line_size)
71134 +               return (-1);
71135 +
71136 +       if (len > (qsnet_debug_line_size * qsnet_debug_num_lines))
71137 +               len = qsnet_debug_line_size * qsnet_debug_num_lines;
71138 +    
71139 +       if ( len > 65536 ) {
71140 +               KMEM_ZALLOC (buffer, caddr_t, len, 1);
71141 +       } else 
71142 +               buffer = qsnet_space;
71143 +
71144 +       if (buffer == NULL)
71145 +               return (-1);
71146 +
71147 +       if (qsnet_debug_buffer_ptr == NULL)
71148 +               qsnet_debug_alloc();
71149 +
71150 +       if (qsnet_debug_buffer_ptr == NULL)
71151 +       {
71152 +               if ( len > 65536 )
71153 +                       KMEM_FREE (buffer, len);
71154 +               return (-1);
71155 +       }
71156 +
71157 +       spin_lock_irqsave (&qsnet_debug_lock, flags);
71158 +    
71159 +       while (!qsnet_debug_lost_lines && (qsnet_debug_back == qsnet_debug_front))
71160 +               if (kcondvar_waitsig (&qsnet_debug_wait, &qsnet_debug_lock, &flags) == 0)
71161 +                       break;
71162 +    
71163 +       ptr    = buffer;
71164 +       remain = len;
71165 +
71166 +       if (qsnet_debug_lost_lines)
71167 +       {
71168 +               qsnet_debug_lost_lines = 0;
71169 +               strcpy (ptr, "Debug Buffer has overflowed!!\n");
71170 +               len1 = strlen (ptr);
71171 +
71172 +               remain -= len1;
71173 +               ptr    += len1;
71174 +       }
71175 +
71176 +       while (qsnet_debug_front != qsnet_debug_back)
71177 +       {
71178 +               /* copy the line from DebugFront */
71179 +               base = &qsnet_debug_buffer_ptr[qsnet_debug_front*qsnet_debug_line_size];
71180 +
71181 +               len1 = strlen (base);
71182 +
71183 +               if (len1 > remain)
71184 +                       break;
71185 +       
71186 +               bcopy (base, ptr, len1);
71187 +       
71188 +               ptr += len1;
71189 +               remain -= len1;
71190 +
71191 +               qsnet_debug_front = (++qsnet_debug_front == qsnet_debug_num_lines) ? 0 : qsnet_debug_front;
71192 +       }
71193 +
71194 +       spin_unlock_irqrestore (&qsnet_debug_lock, flags);
71195 +
71196 +       len1 = ptr - buffer;
71197 +
71198 +       if (len1 != 0 && copyout (buffer, ubuffer, len1))
71199 +               len1 = -1;
71200 +
71201 +       if ( len > 65536 )
71202 +               KMEM_FREE (buffer, len);
71203 +   
71204 +       return (len1);
71205 +}
71206 +
71207 +void
71208 +qsnet_debug_buffer_on() 
71209 +{
71210 +       if (qsnet_debug_buffer_ptr == NULL)
71211 +               qsnet_debug_alloc();
71212 +}
71213 +
71214 +void 
71215 +qsnet_debug_buffer_clear()
71216 +{
71217 +       unsigned long flags;
71218 +
71219 +       qsnet_debug_buffer_on();
71220 +       
71221 +       if (qsnet_debug_buffer_ptr != NULL){
71222 +               spin_lock_irqsave (&qsnet_debug_lock, flags);
71223 +               qsnet_debug_front      = 0;
71224 +               qsnet_debug_back       = 0;
71225 +               qsnet_prefix_debug(QSNET_DEBUG_BUFFER,"Clear","");
71226 +               spin_unlock_irqrestore (&qsnet_debug_lock, flags);      
71227 +       }
71228 +}
71229 +
71230 +void 
71231 +qsnet_debug_buffer_mark(char *str)
71232 +{
71233 +       unsigned long flags;    
71234 +
71235 +       qsnet_debug_buffer_on();
71236 +
71237 +       if (qsnet_debug_buffer_ptr != NULL) {
71238 +               spin_lock_irqsave (&qsnet_debug_lock, flags);
71239 +               qsnet_prefix_debug(QSNET_DEBUG_BUFFER,"Mark",str);
71240 +               spin_unlock_irqrestore (&qsnet_debug_lock, flags);
71241 +       }
71242 +}
71243 +int
71244 +qsnet_debug_dump ()
71245 +{
71246 +       unsigned long flags;
71247 +
71248 +       if (!qsnet_debug_running) return (0);
71249 +
71250 +       if (qsnet_debug_buffer_ptr == NULL)
71251 +               qsnet_debug_alloc();
71252 +
71253 +       if (qsnet_debug_buffer_ptr == NULL)
71254 +               return (-1);
71255 +
71256 +       spin_lock_irqsave (&qsnet_debug_lock, flags);
71257 +
71258 +       while (qsnet_debug_front != qsnet_debug_back)
71259 +       {
71260 +               printk ("%s", &qsnet_debug_buffer_ptr[qsnet_debug_front*qsnet_debug_line_size]);
71261 +
71262 +               qsnet_debug_front = (++qsnet_debug_front == qsnet_debug_num_lines) ? 0 : qsnet_debug_front;
71263 +       }
71264 +
71265 +       if (qsnet_debug_lost_lines)
71266 +               printk ("\n**** Debug buffer has lost %d lines\n****\n",qsnet_debug_lost_lines);
71267 +
71268 +       spin_unlock_irqrestore (&qsnet_debug_lock, flags);
71269 +
71270 +       return (0);
71271 +}
71272 +
71273 +int
71274 +qsnet_debug_kmem (void *handle)
71275 +{
71276 +       if (!qsnet_debug_running) return (0);
71277 +
71278 +#ifdef KMEM_DEBUG
71279 +       qsnet_kmem_display(handle);
71280 +#endif
71281 +       return (0);
71282 +}
71283 +
71284 +int
71285 +qsnet_assfail (char *ex, const char *func, char *file, int line)
71286 +{
71287 +       qsnet_debugf (QSNET_DEBUG_BUFFER, "qsnet: assertion failure: %s, function: %s, file %s, line: %d\n", ex, func, file, line);
71288 +
71289 +       printk (KERN_EMERG "qsnet: assertion failure: %s, function: %s, file %s, line: %d\n", ex, func, file, line);
71290 +
71291 +       if (panicstr)
71292 +               return (0);
71293 +
71294 +       if (qsnet_assfail_mode & 1)                             /* return to BUG() */
71295 +               return 1;
71296 +
71297 +       if (qsnet_assfail_mode & 2)
71298 +               panic ("qsnet: assertion failure: %s, function: %s, file %s, line: %d\n", ex, func, file, line);
71299 +       if (qsnet_assfail_mode & 4)
71300 +               qsnet_debug_disable (1);
71301 +
71302 +       return 0;
71303 +
71304 +}
71305 +
71306 +
71307 +/*
71308 + * Local variables:
71309 + * c-file-style: "linux"
71310 + * End:
71311 + */
71312 Index: linux-2.4.21/drivers/net/qsnet/qsnet/i686_mmx.c
71313 ===================================================================
71314 --- linux-2.4.21.orig/drivers/net/qsnet/qsnet/i686_mmx.c        2004-02-23 16:02:56.000000000 -0500
71315 +++ linux-2.4.21/drivers/net/qsnet/qsnet/i686_mmx.c     2005-06-01 23:12:54.696424360 -0400
71316 @@ -0,0 +1,99 @@
71317 +/*
71318 + *    Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd.
71319 + * 
71320 + *    For licensing information please see the supplied COPYING file
71321 + *
71322 + */
71323 +
71324 +#ident "@(#)$Id: i686_mmx.c,v 1.11 2004/01/05 12:08:25 mike Exp $"
71325 +/*      $Source: /cvs/master/quadrics/qsnet/i686_mmx.c,v $*/
71326 +
71327 +#include <qsnet/kernel.h>
71328 +
71329 +#if defined(LINUX_I386)
71330 +
71331 +#include <linux/config.h>
71332 +#include <linux/sched.h>
71333 +#include <asm/processor.h>
71334 +#include <asm/i387.h>
71335 +
71336 +int mmx_disabled = 0;
71337 +
71338 +#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 5, 0)
71339 +/* These functions are lifted from arch/i386/kernel/i387.c
71340 + * and MUST be kept in step with the kernel (currently 2.4.17)
71341 + * alternatively we should export the kernel_fpu_begin() function
71342 + */
71343 +static inline void __save_init_fpu( struct task_struct *tsk )
71344 +{
71345 +       if ( cpu_has_fxsr ) {
71346 +               asm volatile( "fxsave %0 ; fnclex"
71347 +                             : "=m" (tsk->thread.i387.fxsave) );
71348 +       } else {
71349 +               asm volatile( "fnsave %0 ; fwait"
71350 +                             : "=m" (tsk->thread.i387.fsave) );
71351 +       }
71352 +       tsk->flags &= ~PF_USEDFPU;
71353 +}
71354 +#if defined(MODULE)
71355 +void kernel_fpu_begin(void)
71356 +{
71357 +       struct task_struct *tsk = current;
71358 +
71359 +       if (tsk->flags & PF_USEDFPU) {
71360 +               __save_init_fpu(tsk);
71361 +               return;
71362 +       }
71363 +       clts();
71364 +}
71365 +#endif
71366 +#endif
71367 +
71368 +extern inline int
71369 +mmx_preamble(void)
71370 +{
71371 +    if (mmx_disabled || in_interrupt())
71372 +       return (0);
71373 +
71374 +    kernel_fpu_begin();
71375 +
71376 +    return (1);
71377 +}
71378 +
71379 +extern inline void
71380 +mmx_postamble(void)
71381 +{
71382 +    kernel_fpu_end();
71383 +}
71384 +
71385 +extern u64
71386 +qsnet_readq (volatile u64 *ptr)
71387 +{
71388 +    u64 value;
71389 +
71390 +    if (! mmx_preamble())
71391 +       value = *ptr;
71392 +    else
71393 +    {
71394 +       asm volatile ("movq (%0), %%mm0\n"
71395 +                     "movq %%mm0, (%1)\n"
71396 +                     : : "r" (ptr), "r" (&value) : "memory");
71397 +       mmx_postamble();
71398 +    }
71399 +    return (value);
71400 +}
71401 +
71402 +void
71403 +qsnet_writeq(u64 value, volatile u64 *ptr)
71404 +{
71405 +    if (! mmx_preamble())
71406 +       *ptr = value;
71407 +    else
71408 +    {
71409 +       asm volatile ("movq (%0), %%mm0\n"
71410 +                     "movq %%mm0, (%1)\n"
71411 +                     : : "r" (&value), "r" (ptr) : "memory");
71412 +       mmx_postamble();
71413 +    }
71414 +}
71415 +#endif
71416 Index: linux-2.4.21/drivers/net/qsnet/qsnet/kernel_linux.c
71417 ===================================================================
71418 --- linux-2.4.21.orig/drivers/net/qsnet/qsnet/kernel_linux.c    2004-02-23 16:02:56.000000000 -0500
71419 +++ linux-2.4.21/drivers/net/qsnet/qsnet/kernel_linux.c 2005-06-01 23:12:54.697424208 -0400
71420 @@ -0,0 +1,856 @@
71421 +/*
71422 + *    Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd.
71423 + * 
71424 + *    For licensing information please see the supplied COPYING file
71425 + *
71426 + */
71427 +
71428 +#ident "@(#)$Id: kernel_linux.c,v 1.71.2.3 2004/11/04 11:03:47 david Exp $"
71429 +/*      $Source: /cvs/master/quadrics/qsnet/kernel_linux.c,v $*/
71430 +
71431 +#include <qsnet/kernel.h>
71432 +#include <qsnet/ctrl_linux.h>
71433 +#include <qsnet/kpte.h>
71434 +
71435 +#include <linux/sysctl.h>
71436 +#include <linux/init.h>
71437 +#include <linux/module.h>
71438 +#include <linux/vmalloc.h>
71439 +
71440 +#include <qsnet/procfs_linux.h>
71441 +
71442 +#include <linux/smp.h>         /* for smp_call_function() prototype */
71443 +#include <linux/smp_lock.h>
71444 +#include <linux/mm.h>
71445 +
71446 +#include <linux/highmem.h>
71447 +
71448 +extern int mmx_disabled;
71449 +extern int qsnet_debug_line_size;
71450 +extern int qsnet_debug_num_lines;
71451 +
71452 +gid_t                 qsnet_procfs_gid;
71453 +struct proc_dir_entry *qsnet_procfs_root;
71454 +struct proc_dir_entry *qsnet_procfs_config;
71455 +
71456 +MODULE_AUTHOR("Quadrics Ltd.");
71457 +MODULE_DESCRIPTION("QsNet Kernel support code");
71458 +
71459 +MODULE_LICENSE("GPL");
71460 +
71461 +#if defined(LINUX_I386)
71462 +MODULE_PARM(mmx_disabled, "i");
71463 +#endif
71464 +
71465 +MODULE_PARM(qsnet_debug_line_size, "i");
71466 +MODULE_PARM(qsnet_debug_num_lines, "i");
71467 +
71468 +MODULE_PARM(qsnet_procfs_gid, "i");
71469 +
71470 +#ifdef KMEM_DEBUG
71471 +EXPORT_SYMBOL(qsnet_kmem_alloc_debug);
71472 +EXPORT_SYMBOL(qsnet_kmem_free_debug);
71473 +#else
71474 +EXPORT_SYMBOL(qsnet_kmem_alloc);
71475 +EXPORT_SYMBOL(qsnet_kmem_free);
71476 +#endif
71477 +
71478 +EXPORT_SYMBOL(qsnet_kmem_display);
71479 +EXPORT_SYMBOL(kmem_to_phys);
71480 +
71481 +EXPORT_SYMBOL(cpu_hold_all);
71482 +EXPORT_SYMBOL(cpu_release_all);
71483 +
71484 +#if defined(LINUX_I386)
71485 +EXPORT_SYMBOL(qsnet_readq);
71486 +EXPORT_SYMBOL(qsnet_writeq);
71487 +#endif
71488 +
71489 +/* debug.c */
71490 +EXPORT_SYMBOL(qsnet_debugf);
71491 +EXPORT_SYMBOL(kqsnet_debugf);
71492 +EXPORT_SYMBOL(qsnet_vdebugf);
71493 +EXPORT_SYMBOL(qsnet_debug_buffer);
71494 +EXPORT_SYMBOL(qsnet_debug_alloc);
71495 +EXPORT_SYMBOL(qsnet_debug_dump);
71496 +EXPORT_SYMBOL(qsnet_debug_kmem);
71497 +EXPORT_SYMBOL(qsnet_debug_disable);
71498 +
71499 +EXPORT_SYMBOL(qsnet_assfail);
71500 +
71501 +EXPORT_SYMBOL(qsnet_procfs_gid);
71502 +EXPORT_SYMBOL(qsnet_procfs_root);
71503 +
71504 +static int qsnet_open    (struct inode *ino, struct file *fp);
71505 +static int qsnet_release (struct inode *ino, struct file *fp);
71506 +static int qsnet_ioctl   (struct inode *ino, struct file *fp, unsigned int cmd, unsigned long arg);
71507 +
71508 +static struct file_operations qsnet_ioctl_fops = 
71509 +{
71510 +       ioctl:   qsnet_ioctl,
71511 +       open:    qsnet_open,
71512 +       release: qsnet_release,
71513 +};
71514 +
71515 +static int
71516 +qsnet_open (struct inode *inode, struct file *fp)
71517 +{
71518 +       MOD_INC_USE_COUNT;
71519 +       fp->private_data = NULL;
71520 +       return (0);
71521 +}
71522 +
71523 +static int
71524 +qsnet_release (struct inode *inode, struct file *fp)
71525 +{
71526 +       MOD_DEC_USE_COUNT;
71527 +       return (0);
71528 +}
71529 +
71530 +static int 
71531 +qsnet_ioctl(struct inode *inode, struct file *fp, unsigned int cmd, unsigned long arg)
71532 +{
71533 +       int res=0;
71534 +
71535 +       switch (cmd) 
71536 +       {
71537 +       case QSNETIO_DEBUG_KMEM:
71538 +       {
71539 +               QSNETIO_DEBUG_KMEM_STRUCT args;
71540 +
71541 +               if (copy_from_user (&args, (void *) arg, sizeof (QSNETIO_DEBUG_KMEM_STRUCT)))
71542 +                       return (-EFAULT);
71543 +
71544 +               /* doesnt use handle as a pointer */
71545 +               qsnet_kmem_display(args.handle);
71546 +               break;
71547 +       }
71548 +
71549 +       case QSNETIO_DEBUG_DUMP : 
71550 +       {
71551 +               res = qsnet_debug_dump();
71552 +               break;
71553 +       }
71554 +
71555 +       case QSNETIO_DEBUG_BUFFER :
71556 +       {
71557 +               QSNETIO_DEBUG_BUFFER_STRUCT args;
71558 +
71559 +               if (copy_from_user (&args, (void *) arg, sizeof (QSNETIO_DEBUG_BUFFER_STRUCT)))
71560 +                       return (-EFAULT);
71561 +
71562 +               /* qsnet_debug_buffer uses copyout */
71563 +               if ((res = qsnet_debug_buffer (args.addr, args.len)) != -1)
71564 +               {
71565 +                       args.len = res;
71566 +                       if (copy_to_user ((void *) arg, &args, sizeof (QSNETIO_DEBUG_BUFFER_STRUCT)))
71567 +                               return (-EFAULT);
71568 +                       res = 0;
71569 +               }
71570 +               break;
71571 +       }
71572 +       default:
71573 +               res = EINVAL;
71574 +               break;
71575 +       }
71576 +
71577 +       return ((res == 0) ? 0 : -res);
71578 +}
71579 +
71580 +#ifdef KMEM_DEBUG
71581 +static int qsnet_kmem_open    (struct inode *ino, struct file *fp);
71582 +static int qsnet_kmem_release (struct inode *ino, struct file *fp);
71583 +static ssize_t qsnet_kmem_read (struct file *file, char *buf, size_t count, loff_t *ppos);
71584 +
71585 +static struct file_operations qsnet_kmem_fops = 
71586 +{
71587 +       open:    qsnet_kmem_open,
71588 +       release: qsnet_kmem_release,
71589 +       read:    qsnet_kmem_read,
71590 +};
71591 +
71592 +typedef struct qsnet_private_space
71593 +{
71594 +       char * space;
71595 +       int    size;
71596 +       struct qsnet_private_space *next;
71597 +} QSNET_PRIVATE_SPACE;
71598 +
71599 +typedef struct qsnet_private  
71600 +{
71601 +       QSNET_PRIVATE_SPACE *space_chain;
71602 +        QSNET_PRIVATE_SPACE *current_space;
71603 +       int                  current_pos;
71604 +
71605 +} QSNET_PRIVATE;
71606 +
71607 +#define QSNET_KMEM_DEBUG_LINE_SIZE ((int)512)
71608 +#define QSNET_PRIVATE_PAGE_SIZE    ((int)(4*1024))
71609 +
71610 +static int qsnet_kmem_fill(QSNET_PRIVATE *pd);
71611 +
71612 +void
71613 +destroy_chain(QSNET_PRIVATE * pd)
71614 +{
71615 +       QSNET_PRIVATE_SPACE *mem, *next;
71616 +       
71617 +       if (pd == NULL) return;
71618 +
71619 +       for(mem = pd->space_chain ; mem != NULL; )
71620 +       {
71621 +               next = mem->next; 
71622 +               if ( mem->space ) 
71623 +                       kfree ( mem->space);
71624 +               kfree(mem);
71625 +               mem = next;
71626 +       }
71627 +       kfree (pd);
71628 +}
71629 +
71630 +QSNET_PRIVATE *
71631 +make_chain(int len)
71632 +{
71633 +       QSNET_PRIVATE       * pd;
71634 +       QSNET_PRIVATE_SPACE * mem;
71635 +       int                   i;
71636 +
71637 +       /* make the private data block */
71638 +       if ((pd = kmalloc (sizeof (QSNET_PRIVATE), GFP_KERNEL)) == NULL)
71639 +               return NULL;
71640 +       pd->space_chain = NULL;
71641 +
71642 +       /* first make the holders */
71643 +       for(i=0;i<len;i++)
71644 +       {
71645 +               if ((mem = kmalloc (sizeof (QSNET_PRIVATE_SPACE), GFP_KERNEL)) == NULL)
71646 +               {
71647 +                       destroy_chain(pd);
71648 +                       return (NULL);
71649 +               }
71650 +               mem->next  = pd->space_chain;
71651 +               mem->size  = 0;
71652 +               mem->space = 0;
71653 +               pd->space_chain = mem;
71654 +
71655 +               /* now add the space */
71656 +               if ((mem->space = kmalloc (QSNET_PRIVATE_PAGE_SIZE, GFP_KERNEL)) == NULL)
71657 +               {
71658 +                       destroy_chain(pd);
71659 +                       return (NULL);
71660 +               }                       
71661 +
71662 +               mem->space[0] = 0;
71663 +
71664 +       }
71665 +
71666 +       pd->current_space = pd->space_chain;
71667 +       pd->current_pos   = 0;
71668 +
71669 +       return pd;
71670 +}
71671 +
71672 +static int
71673 +qsnet_kmem_open (struct inode *inode, struct file *fp)
71674 +{
71675 +       MOD_INC_USE_COUNT;
71676 +       fp->private_data = NULL;
71677 +       return (0);
71678 +}
71679 +
71680 +static int
71681 +qsnet_kmem_release (struct inode *inode, struct file *fp)
71682 +{
71683 +       if ( fp->private_data )
71684 +       {
71685 +               QSNET_PRIVATE * pd = (QSNET_PRIVATE *) fp->private_data;
71686 +
71687 +               /* free the space */
71688 +               if (pd->space_chain)
71689 +                       kfree (pd->space_chain);        
71690 +
71691 +               /* free struct */
71692 +               kfree (pd);
71693 +       }
71694 +       MOD_DEC_USE_COUNT;
71695 +       return (0);
71696 +}
71697 +
71698 +static ssize_t
71699 +qsnet_kmem_read (struct file *file, char *buf, size_t count, loff_t *ppos)
71700 +{
71701 +       QSNET_PRIVATE * pd = (QSNET_PRIVATE *) file->private_data;
71702 +       int             error;
71703 +       int             output_count;
71704 +       int             num_of_links=10;
71705 +
71706 +       /* make a buffer to output count bytes in */
71707 +       if ((error = verify_area (VERIFY_WRITE, buf, count)) != 0)
71708 +               return (error);
71709 +
71710 +       if ( pd == NULL) 
71711 +       {
71712 +               /* first time */
71713 +
71714 +               /* ok we have to guess at how much space we are going to need  */
71715 +               /* if it fails we up the space and carry try again             */
71716 +               /* we have to do it this way as we cant get more memory whilst */
71717 +               /* holding the lock                                            */
71718 +               if ((pd = make_chain(num_of_links)) == NULL)
71719 +                       return (-ENOMEM);       
71720 +
71721 +               while ( qsnet_kmem_fill(pd) ) 
71722 +               {
71723 +                       destroy_chain(pd);
71724 +                       num_of_links += 10;
71725 +                       if ((pd = make_chain(num_of_links)) == NULL)
71726 +                               return (-ENOMEM);       
71727 +               }
71728 +
71729 +               /* we have the space and filled it */
71730 +               file->private_data = (void *)pd;        
71731 +       }
71732 +               
71733 +       /* output buffer */
71734 +       if ( pd->current_pos >= pd->current_space->size )
71735 +               return (0); /* finished */
71736 +
71737 +       output_count = pd->current_space->size - pd->current_pos;
71738 +       if ( output_count > count ) 
71739 +               output_count = count;
71740 +
71741 +       copy_to_user(buf, (pd->current_space->space + pd->current_pos), output_count);
71742 +
71743 +       pd->current_pos += output_count;
71744 +       ppos            += output_count;
71745 +
71746 +       /* just check to see if we have finished the current space */
71747 +       if ( pd->current_pos >= pd->current_space->size )
71748 +       {
71749 +               if ( pd->current_space->next )
71750 +               {
71751 +                       pd->current_space = pd->current_space->next;
71752 +                       pd->current_pos   = 0;
71753 +               }
71754 +       }       
71755 +
71756 +       return (output_count);
71757 +}
71758 +#endif /* KMEM_DEBUG */
71759 +
71760 +static int
71761 +proc_write_qsnetdebug(struct file *file, const char *buffer,
71762 +                     unsigned long count, void *data)
71763 +{
71764 +       char    tmpbuf[128];
71765 +       int     res;
71766 +       
71767 +       if (count > sizeof (tmpbuf)-1)
71768 +               return (-EINVAL);
71769 +       
71770 +       MOD_INC_USE_COUNT;
71771 +       
71772 +       if (copy_from_user (tmpbuf, buffer, count))
71773 +               res = -EFAULT;
71774 +       else 
71775 +       {
71776 +               tmpbuf[count] = '\0';   
71777 +               
71778 +               if (tmpbuf[count-1] == '\n')
71779 +                       tmpbuf[count-1] = '\0';
71780 +               
71781 +               if (! strcmp (tmpbuf, "on"))
71782 +                       qsnet_debug_buffer_on();
71783 +
71784 +               if (! strcmp (tmpbuf, "clear"))
71785 +                       qsnet_debug_buffer_clear();
71786 +
71787 +               if (! strncmp (tmpbuf, "mark",4))
71788 +                       qsnet_debug_buffer_mark( &tmpbuf[4] );
71789 +               
71790 +               res = count;
71791 +       }
71792 +       
71793 +       MOD_DEC_USE_COUNT;
71794 +       
71795 +       return (res);
71796 +}
71797 +
71798 +static int
71799 +proc_read_qsnetdebug(char *page, char **start, off_t off,
71800 +                    int count, int *eof, void *data)
71801 +{
71802 +       int len = sprintf (page, "echo command > /proc/qsnet/config/qsnetdebug\ncommand = on | off | clear | mark text\n");
71803 +       return (qsnet_proc_calc_metrics (page, start, off, count, eof, len));
71804 +}
71805 +
71806 +#include "quadrics_version.h"
71807 +extern int kqsnet_debug_running;
71808 +static char       quadrics_version[] = QUADRICS_VERSION;
71809 +
71810 +static int __init qsnet_init(void)
71811 +{
71812 +       struct proc_dir_entry *p;
71813 +
71814 +       if ((qsnet_procfs_root = proc_mkdir ("qsnet", 0)) == NULL)
71815 +       {
71816 +               printk ("qsnet: failed to create /proc/qsnet \n");
71817 +               return (-ENXIO);
71818 +       }
71819 +       
71820 +       if ((p = create_proc_entry ("ioctl", S_IRUGO|S_IWUSR|S_IWGRP, qsnet_procfs_root)) == NULL)
71821 +       {
71822 +               printk ("qsnet: failed to register /proc/qsnet/ioctl\n");
71823 +               return (-ENXIO);
71824 +       }
71825 +       p->proc_fops = &qsnet_ioctl_fops;
71826 +       p->owner     = THIS_MODULE;
71827 +       p->data      = NULL;
71828 +       p->gid       = qsnet_procfs_gid;
71829 +
71830 +       qsnet_proc_register_str (qsnet_procfs_root, "version", quadrics_version, S_IRUGO);
71831 +
71832 +       if ((qsnet_procfs_config = proc_mkdir ("config", qsnet_procfs_root)) == NULL)
71833 +       {
71834 +               printk ("qsnet: failed to create /proc/qsnet/config \n");
71835 +               return (-ENXIO);
71836 +       }
71837 +
71838 +#ifdef KMEM_DEBUG
71839 +       if ((p = create_proc_entry ("kmem_debug", S_IRUGO|S_IWUSR|S_IWGRP, qsnet_procfs_config)) == NULL)
71840 +       {
71841 +               printk ("qsnet: failed to register /proc/qsnet/config/kmem_debug\n");
71842 +               return (-ENXIO);
71843 +       }
71844 +       p->proc_fops = &qsnet_kmem_fops;
71845 +       p->owner     = THIS_MODULE;
71846 +       p->data      = NULL;
71847 +       p->gid       = qsnet_procfs_gid;
71848 +#endif         
71849 +      
71850 +       qsnet_debug_init(); 
71851 +
71852 +       qsnet_proc_register_int (qsnet_procfs_config, "kqsnet_debug_running", &kqsnet_debug_running, 0);
71853 +
71854 +       if ((p = create_proc_entry ("qsnetdebug", S_IRUGO|S_IWUSR|S_IWGRP, qsnet_procfs_config)) == NULL)
71855 +       {
71856 +               printk ("qsnet: failed to register /proc/qsnet/config/qsnetdebug\n");
71857 +               return (-ENXIO);
71858 +       }
71859 +       p->read_proc  = proc_read_qsnetdebug;
71860 +       p->write_proc = proc_write_qsnetdebug;
71861 +       p->owner      = THIS_MODULE;
71862 +       p->data       = NULL;
71863 +       p->gid        = qsnet_procfs_gid;
71864 +       
71865 +       return (0);
71866 +}
71867 +
71868 +static void __exit qsnet_exit(void)
71869 +{
71870 +#ifdef KMEM_DEBUG
71871 +       qsnet_kmem_display(0);
71872 +#endif
71873 +       qsnet_debug_fini();
71874 +
71875 +       remove_proc_entry ("qsnetdebug",           qsnet_procfs_config);
71876 +       remove_proc_entry ("kqsnet_debug_running", qsnet_procfs_config);
71877 +#ifdef KMEM_DEBUG
71878 +       remove_proc_entry ("kmem_debug",           qsnet_procfs_config);
71879 +#endif
71880 +       remove_proc_entry ("config",               qsnet_procfs_root);
71881 +
71882 +       remove_proc_entry ("version", qsnet_procfs_root);
71883 +       remove_proc_entry ("ioctl",   qsnet_procfs_root);
71884 +
71885 +       remove_proc_entry ("qsnet", 0);
71886 +}
71887 +
71888 +/* Declare the module init and exit functions */
71889 +module_init(qsnet_init);
71890 +module_exit(qsnet_exit);
71891 +
71892 +#ifdef KMEM_DEBUG
71893 +/*
71894 + * Kernel memory allocation.  We maintain our own list of allocated mem
71895 + * segments so we can free them on module cleanup.
71896 + * 
71897 + * We use kmalloc for allocations less than one page in size; vmalloc for
71898 + * larger sizes.
71899 + */
71900 +
71901 +typedef struct {
71902 +       struct list_head list;
71903 +       void            *ptr;
71904 +       int             len;
71905 +       int             used_vmalloc;
71906 +       void            *owner;
71907 +       void            *caller;
71908 +       unsigned int     time;
71909 +       int              line;
71910 +       char             filename[20];
71911 +} kmalloc_t;
71912 +
71913 +static LIST_HEAD(kmalloc_head);
71914 +
71915 +static spinlock_t      kmalloc_lock = SPIN_LOCK_UNLOCKED;
71916 +
71917 +/*
71918 + * Kernel memory allocation.  We use kmalloc for allocations less 
71919 + * than one page in size; vmalloc for larger sizes.
71920 + */
71921 +
71922 +static int
71923 +qsnet_kmem_fill(QSNET_PRIVATE *pd)
71924 +{
71925 +       kmalloc_t *kp;
71926 +       struct list_head *lp;
71927 +       unsigned long flags;
71928 +       char str[QSNET_KMEM_DEBUG_LINE_SIZE];
71929 +       QSNET_PRIVATE_SPACE * current_space;
71930 +       int                   current_pos;
71931 +       int                   len;
71932 +       current_space = pd->space_chain;
71933 +       current_pos   = 0;
71934 +       
71935 +       
71936 +       current_space->space[0] = 0;    
71937 +       spin_lock_irqsave(&kmalloc_lock, flags);
71938 +       for (lp = kmalloc_head.next; lp != &kmalloc_head;  lp = lp->next) {
71939 +               kp = list_entry(lp, kmalloc_t, list);
71940 +               
71941 +               /* make the next line */
71942 +               sprintf(str,"%p %d %d %p %p %u %d %s\n",
71943 +                       kp->ptr, kp->len, kp->used_vmalloc, kp->caller, kp->owner, kp->time, kp->line, kp->filename);
71944 +               len = strlen(str);
71945 +               
71946 +               /* does it fit on the current page */
71947 +               if ( (current_pos + len + 1) >=  QSNET_PRIVATE_PAGE_SIZE)
71948 +               {
71949 +                       /* move onto next page */
71950 +                       if ((current_space = current_space->next) == NULL)
71951 +                       {
71952 +                               /* run out of space !!!! */
71953 +                               spin_unlock_irqrestore(&kmalloc_lock, flags);
71954 +                               return (1);
71955 +                       }
71956 +                       current_space->space[0] = 0;    
71957 +                       current_pos = 0;
71958 +               }
71959 +               strcat( current_space->space + current_pos, str);
71960 +               current_pos += len;
71961 +
71962 +               /* remember how much we wrote to this page */
71963 +               current_space->size = current_pos;
71964 +
71965 +       }
71966 +       spin_unlock_irqrestore(&kmalloc_lock, flags);
71967 +
71968 +       return (0);
71969 +}
71970 +
71971 +void * 
71972 +qsnet_kmem_alloc_debug(int len, int cansleep, int zerofill, char *file, int line)
71973 +{
71974 +       void *new;
71975 +       unsigned long flags;
71976 +       kmalloc_t *kp;
71977 +
71978 +       if (len < PAGE_SIZE || !cansleep)
71979 +               new = kmalloc(len, cansleep ? GFP_KERNEL : GFP_ATOMIC);
71980 +       else
71981 +               new = vmalloc(len);
71982 +
71983 +       if (len >= PAGE_SIZE)
71984 +               ASSERT(PAGE_ALIGNED((uintptr_t) new));
71985 +
71986 +       if (new && zerofill)
71987 +               memset(new,0,len);
71988 +
71989 +       /* record allocation */
71990 +       kp = kmalloc(sizeof(kmalloc_t), cansleep ? GFP_KERNEL : GFP_ATOMIC);
71991 +       ASSERT(kp != NULL);
71992 +       kp->len = len;
71993 +       kp->ptr = new;
71994 +       kp->used_vmalloc = (len >= PAGE_SIZE || cansleep);
71995 +       kp->owner  = current;
71996 +       kp->caller = __builtin_return_address(0);
71997 +       kp->time = lbolt;
71998 +       kp->line = line;
71999 +       len = strlen(file);
72000 +
72001 +       if (len > 18) 
72002 +               strcpy(kp->filename,&file[len-18]);
72003 +       else
72004 +               strcpy(kp->filename,file);
72005 +
72006 +       spin_lock_irqsave(&kmalloc_lock, flags);
72007 +       list_add(&kp->list, &kmalloc_head);
72008 +       spin_unlock_irqrestore(&kmalloc_lock, flags);
72009 +
72010 +       return new;
72011 +}
72012 +
72013 +void 
72014 +qsnet_kmem_free_debug(void *ptr, int len, char *file, int line)
72015 +{
72016 +       unsigned long flags;
72017 +       kmalloc_t *kp;
72018 +       struct list_head *lp;
72019 +
72020 +       spin_lock_irqsave(&kmalloc_lock, flags);
72021 +       for (lp = kmalloc_head.next; lp != &kmalloc_head; lp = lp->next) {
72022 +               kp = list_entry(lp, kmalloc_t, list);
72023 +               if (kp->ptr == ptr) {
72024 +                       if (kp->len != len)
72025 +                               printk("qsnet_kmem_free_debug(%p) ptr %p len %d mismatch: expected %d caller %p owner %p (%s:%d)\n",
72026 +                                      current, ptr, len, kp->len, __builtin_return_address(0), kp->caller, file, line);
72027 +                       list_del(lp);
72028 +                       kfree(kp); /* free off descriptor */
72029 +                       break;
72030 +               }
72031 +       }
72032 +       spin_unlock_irqrestore(&kmalloc_lock, flags);
72033 +
72034 +       if (lp == &kmalloc_head) /* segment must be found */
72035 +       {
72036 +               printk( "qsnet_kmem_free_debug(%p) ptr %p len %d not found: caller %p (%s:%d)\n",
72037 +                       current, ptr, len, __builtin_return_address(0), file, line);
72038 +       }
72039 +
72040 +       if ((((unsigned long) ptr) >= VMALLOC_START && ((unsigned long) ptr) < VMALLOC_END)) 
72041 +               vfree (ptr);
72042 +       else
72043 +               kfree (ptr);
72044 +}
72045 +
72046 +#else /* !KMEM_DEBUG */
72047 +
72048 +void * 
72049 +qsnet_kmem_alloc(int len, int cansleep, int zerofill)
72050 +{
72051 +       void *new;
72052 +
72053 +       if (len < PAGE_SIZE || !cansleep)
72054 +               new = kmalloc(len, cansleep ? GFP_KERNEL : GFP_ATOMIC);
72055 +       else
72056 +               new = vmalloc(len);
72057 +
72058 +       if (len >= PAGE_SIZE)
72059 +               ASSERT(PAGE_ALIGNED((unsigned long) new));
72060 +
72061 +       if (new && zerofill)
72062 +               memset(new,0,len);
72063 +
72064 +       return new;
72065 +}
72066 +
72067 +void 
72068 +qsnet_kmem_free(void *ptr, int len)
72069 +{
72070 +       if ((((unsigned long) ptr) >= VMALLOC_START && ((unsigned long) ptr) < VMALLOC_END)) 
72071 +               vfree (ptr);
72072 +       else
72073 +               kfree (ptr);
72074 +}
72075 +#endif /* !KMEM_DEBUG */
72076 +
72077 +void
72078 +qsnet_kmem_display(void *handle)
72079 +{
72080 +#ifdef KMEM_DEBUG
72081 +       kmalloc_t *kp;
72082 +       struct list_head *lp;
72083 +       unsigned long flags;
72084 +       int count = 0, totsize = 0;
72085 +
72086 +       spin_lock_irqsave(&kmalloc_lock, flags);
72087 +       for (lp = kmalloc_head.next; lp != &kmalloc_head;  lp = lp->next) {
72088 +               kp = list_entry(lp, kmalloc_t, list);
72089 +
72090 +               if (!handle || handle == kp->owner)
72091 +               {
72092 +                       printk("qsnet_kmem_display(%p): mem %p len %d unfreed caller %p (%p) \n",
72093 +                              handle, kp->ptr, kp->len, kp->caller, kp->owner);
72094 +                   
72095 +                       count++;
72096 +                       totsize += kp->len;
72097 +               }
72098 +       }
72099 +       spin_unlock_irqrestore(&kmalloc_lock, flags);
72100 +
72101 +       printk("qsnet_kmem_display(%p): %d bytes left in %d objects\n", handle, totsize, count);
72102 +#endif
72103 +}
72104 +
72105 +physaddr_t
72106 +kmem_to_phys(void *ptr)
72107 +{
72108 +       virtaddr_t virt = (virtaddr_t) ptr;
72109 +       physaddr_t phys;
72110 +       pte_t     *pte;
72111 +
72112 +       if ((virt >= VMALLOC_START && virt < VMALLOC_END))
72113 +       {
72114 +               pte = find_pte_kernel(virt);
72115 +               ASSERT(pte && !pte_none(*pte));
72116 +               phys = pte_phys(*pte) + (virt & (PAGE_SIZE-1));
72117 +       }
72118 +#if defined(PKMAP_BASE)
72119 +       else if (virt >= PKMAP_BASE && virt < (PKMAP_BASE + LAST_PKMAP * PAGE_SIZE))
72120 +       {
72121 +               pte = find_pte_kernel(virt);
72122 +               ASSERT(pte && !pte_none(*pte));
72123 +               phys = pte_phys(*pte) + (virt & (PAGE_SIZE-1));
72124 +       }
72125 +#endif
72126 +#if defined(__ia64)
72127 +       else if (virt >= __IA64_UNCACHED_OFFSET && virt < PAGE_OFFSET)
72128 +       {
72129 +               /* ia64 non-cached KSEG */
72130 +               phys = ((physaddr_t) ptr - __IA64_UNCACHED_OFFSET);
72131 +       }
72132 +#endif
72133 +       else /* otherwise it's KSEG */
72134 +       {
72135 +               phys = __pa(virt);
72136 +       }
72137 +           
72138 +#if defined(CONFIG_ALPHA_GENERIC) || (defined(CONFIG_ALPHA_EV6) && !defined(USE_48_BIT_KSEG))
72139 +       /* 
72140 +        * with TS_BIAS as bit 40 - the tsunami pci space is mapped into
72141 +        * the kernel at 0xfffff500.00000000 however we need to convert
72142 +        * this to the true physical address 0x00000800.00000000.
72143 +        *
72144 +        * there is no need for PHYS_TWIDDLE since we knew we'd get a kernel
72145 +        * virtual address already and handled this with __pa().
72146 +        */
72147 +       if (phys & (1ul << 40)) {
72148 +               phys &= ~(1ul << 40);   /*   clear bit 40 (kseg I/O select) */
72149 +               phys |= (1ul << 43);    /*   set   bit 43 (phys I/O select) */
72150 +       }
72151 +#endif
72152 +       return phys;
72153 +}
72154 +
72155 +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,4,0)
72156 +
72157 +EXPORT_SYMBOL(pci_resource_size);
72158 +EXPORT_SYMBOL(pci_get_base_address);
72159 +EXPORT_SYMBOL(pci_base_to_kseg);
72160 +
72161 +
72162 +/*
72163 + * PCI stuff.  
72164 + *
72165 + * XXX pci_base_to_kseg() and pci_kseg_to_phys() are problematic
72166 + * in that they may not work on non-Tsunami (DS20, ES40, etc) 
72167 + * architectures, and may not work in non-zero PCI bus numbers.
72168 + */
72169 +
72170 +unsigned long 
72171 +pci_get_base_address(struct pci_dev *pdev, int index)
72172 +{
72173 +       unsigned long base;
72174 +
72175 +       ASSERT(index >= 0 && index <= 5);
72176 +       /* borrowed in part from drivers/scsi/sym53c8xx.c */
72177 +       base = pdev->base_address[index++];
72178 +
72179 +#if BITS_PER_LONG > 32
72180 +       if ((base & 0x7) == 0x4)
72181 +               base |= (((unsigned long)pdev->base_address[index]) << 32);
72182 +#endif
72183 +       return base;
72184 +}
72185 +
72186 +unsigned long 
72187 +pci_resource_size(struct pci_dev *pdev, int index)
72188 +{
72189 +       u32 addr, mask, size;
72190 +
72191 +       static u32 bar_addr[] = {
72192 +               PCI_BASE_ADDRESS_0, 
72193 +               PCI_BASE_ADDRESS_1, 
72194 +               PCI_BASE_ADDRESS_2,
72195 +               PCI_BASE_ADDRESS_3, 
72196 +               PCI_BASE_ADDRESS_4, 
72197 +               PCI_BASE_ADDRESS_5, 
72198 +       };
72199 +       ASSERT(index >= 0 && index <= 5);
72200 +
72201 +       /* algorithm from Rubini book */
72202 +       pci_read_config_dword (pdev,    bar_addr[index], &addr);
72203 +       pci_write_config_dword(pdev,    bar_addr[index], ~0);
72204 +       pci_read_config_dword (pdev,    bar_addr[index], &mask);
72205 +       pci_write_config_dword(pdev,    bar_addr[index], addr);
72206 +
72207 +       mask &= PCI_BASE_ADDRESS_MEM_MASK;
72208 +       size = ~mask + 1;
72209 +       return size;
72210 +}
72211 +
72212 +/*
72213 + * Convert BAR register value to KSEG address.
72214 + */
72215 +void *
72216 +pci_base_to_kseg(u64 baddr, int bus)
72217 +{
72218 +       u64 kseg;
72219 +
72220 +       /* XXX tsunami specific */
72221 +       baddr &= ~(u64)0x100000000;  /* mask out hose bit */
72222 +       kseg = TSUNAMI_MEM(bus) + baddr;
72223 +       return (void *)kseg; 
72224 +}
72225 +
72226 +#endif /* LINUX_VERSION_CODE <= KERNEL_VERSION(2,4,0) */
72227 +
72228 +/*
72229 + * Spin the other CPU's in an SMP system.
72230 + * smp_call_function() needed to be exported to modules.  It will be
72231 + * papered over in <linux/smp.h> if running on a non-SMP box.
72232 + */
72233 +static spinlock_t hold_lock = SPIN_LOCK_UNLOCKED;
72234 +
72235 +#if 0
72236 +static void cpu_hold(void *unused)
72237 +{
72238 +       spin_lock(&hold_lock);
72239 +       spin_unlock(&hold_lock);
72240 +}
72241 +#endif
72242 +
72243 +void cpu_hold_all(void)
72244 +{
72245 +       spin_lock(&hold_lock);
72246 +
72247 +#if 0
72248 +       {
72249 +               int res;
72250 +               int retries = 10; 
72251 +           
72252 +               /* XXXXX: cannot call smp_call_function() from interrupt context */
72253 +           
72254 +               do {
72255 +                       /* only request blocking retry if not in interrupt context */
72256 +                       res = smp_call_function(cpu_hold, NULL, !in_interrupt(), 0);
72257 +                       if (res)
72258 +                               mdelay(5);
72259 +               } while (res && retries--);
72260 +           
72261 +               if (res)
72262 +                       printk("cpu_hold_all: IPI timeout\n");
72263 +       }
72264 +#endif
72265 +}
72266 +
72267 +void cpu_release_all(void)
72268 +{
72269 +       spin_unlock(&hold_lock);
72270 +}
72271 +
72272 +/*
72273 + * Local variables:
72274 + * c-file-style: "linux"
72275 + * End:
72276 + */
72277 Index: linux-2.4.21/drivers/net/qsnet/qsnet/Makefile
72278 ===================================================================
72279 --- linux-2.4.21.orig/drivers/net/qsnet/qsnet/Makefile  2004-02-23 16:02:56.000000000 -0500
72280 +++ linux-2.4.21/drivers/net/qsnet/qsnet/Makefile       2005-06-01 23:12:54.697424208 -0400
72281 @@ -0,0 +1,31 @@
72282 +#
72283 +# Makefile for Quadrics QsNet
72284 +#
72285 +# Copyright (c) 2002-2004 Quadrics Ltd
72286 +#
72287 +# File: drivers/net/qsnet/qsnet/Makefile
72288 +#
72289 +
72290 +
72291 +#
72292 +
72293 +#
72294 +# Makefile for Quadrics QsNet
72295 +#
72296 +# Copyright (c) 2004 Quadrics Ltd.
72297 +#
72298 +# File: driver/net/qsnet/qsnet/Makefile
72299 +#
72300 +
72301 +list-multi             := qsnet.o
72302 +qsnet-objs     := debug.o kernel_linux.o i686_mmx.o
72303 +export-objs            := kernel_linux.o
72304 +obj-$(CONFIG_QSNET)    := qsnet.o
72305 +
72306 +qsnet.o : $(qsnet-objs)
72307 +       $(LD) -r -o $@ $(qsnet-objs)
72308 +
72309 +EXTRA_CFLAGS           +=  -DDEBUG -DDEBUG_PRINTF -DDEBUG_ASSERT
72310 +
72311 +include $(TOPDIR)/Rules.make
72312 +
72313 Index: linux-2.4.21/drivers/net/qsnet/qsnet/Makefile.conf
72314 ===================================================================
72315 --- linux-2.4.21.orig/drivers/net/qsnet/qsnet/Makefile.conf     2004-02-23 16:02:56.000000000 -0500
72316 +++ linux-2.4.21/drivers/net/qsnet/qsnet/Makefile.conf  2005-06-01 23:12:54.698424056 -0400
72317 @@ -0,0 +1,10 @@
72318 +# Flags for generating QsNet Linux Kernel Makefiles
72319 +MODNAME                =       qsnet.o
72320 +MODULENAME     =       qsnet
72321 +KOBJFILES      =       debug.o kernel_linux.o i686_mmx.o
72322 +EXPORT_KOBJS   =       kernel_linux.o
72323 +CONFIG_NAME    =       CONFIG_QSNET
72324 +SGALFC         =       
72325 +# EXTRALINES START
72326 +
72327 +# EXTRALINES END
72328 Index: linux-2.4.21/drivers/net/qsnet/qsnet/qsnetkmem_linux.c
72329 ===================================================================
72330 --- linux-2.4.21.orig/drivers/net/qsnet/qsnet/qsnetkmem_linux.c 2004-02-23 16:02:56.000000000 -0500
72331 +++ linux-2.4.21/drivers/net/qsnet/qsnet/qsnetkmem_linux.c      2005-06-01 23:12:54.698424056 -0400
72332 @@ -0,0 +1,325 @@
72333 +/*
72334 + *    Copyright (c) 2003 by Quadrics Ltd.
72335 + * 
72336 + *    For licensing information please see the supplied COPYING file
72337 + *
72338 + */
72339 +
72340 +#ident "@(#)$Id: qsnetkmem_linux.c,v 1.3 2003/08/13 10:03:27 fabien Exp $"
72341 +/*      $Source: /cvs/master/quadrics/qsnet/qsnetkmem_linux.c,v $*/
72342 +
72343 +/* macro macros */
72344 +#define MACRO_BEGIN     do {
72345 +#define MACRO_END       } while (0)
72346 +#define offsetof(T,F) ((int )&(((T *)0)->F))
72347 +
72348 +#include <stdio.h>
72349 +#include <stdlib.h>
72350 +#include <ctype.h>
72351 +#include <sys/types.h>
72352 +#include <errno.h>
72353 +#include <unistd.h>
72354 +#include <string.h>
72355 +#include <qsnet/config.h>
72356 +#include <qsnet/list.h>
72357 +#include <qsnet/procfs_linux.h>
72358 +#include <signal.h>
72359 +#include <sys/wait.h>
72360 +
72361 +#define LIST_HEAD_INIT(name) { &(name), &(name) }
72362 +
72363 +#define LIST_HEAD(name) \
72364 +       struct list_head name = LIST_HEAD_INIT(name)
72365 +
72366 +typedef struct {
72367 +       struct list_head list;
72368 +       void            *ptr;
72369 +       int             len;
72370 +       int             used_vmalloc;
72371 +       void            *owner;
72372 +       void            *caller;
72373 +       unsigned int     time;
72374 +       int              mark;
72375 +       int              line;
72376 +       char             file[256];
72377 +       
72378 +} kmalloc_t;
72379 +
72380 +
72381 +static LIST_HEAD(current_kmem);
72382 +static LIST_HEAD(stored_kmem);
72383 +
72384 +void
72385 +count_kmem(struct list_head * list, long * count, long * size )
72386 +{
72387 +       long              c,s;
72388 +       struct list_head *tmp;
72389 +       kmalloc_t        *kmem_ptr = NULL;
72390 +
72391 +
72392 +       c = s = 0L;
72393 +
72394 +       list_for_each(tmp, list) {
72395 +               kmem_ptr = list_entry(tmp, kmalloc_t , list);
72396 +               c++;
72397 +               s += kmem_ptr->len;
72398 +       }       
72399 +
72400 +       *count = c;
72401 +       *size  = s;
72402 +}
72403 +
72404 +void
72405 +clear_kmem(struct list_head * list)
72406 +{
72407 +       struct list_head *tmp,*tmp2;
72408 +       kmalloc_t        *kmem_ptr = NULL;
72409 +
72410 +       list_for_each_safe(tmp, tmp2, list) {
72411 +               kmem_ptr = list_entry(tmp, kmalloc_t , list);
72412 +               list_del_init(&kmem_ptr->list);
72413 +               free( kmem_ptr );
72414 +       }
72415 +}
72416 +
72417 +void 
72418 +move_kmem(struct list_head * dest, struct list_head *src)
72419 +{
72420 +       struct list_head *tmp,*tmp2;
72421 +       kmalloc_t        *kp= NULL;
72422 +
72423 +       list_for_each_safe(tmp, tmp2, src) {
72424 +               kp = list_entry(tmp, kmalloc_t , list);
72425 +               list_del_init(&kp->list);
72426 +
72427 +/*
72428 +               printf("mem %p len %d (vm=%d)  caller %p owner %p (%s:%d)\n",
72429 +                      kp->ptr, kp->len, kp->used_vmalloc, kp->caller, kp->owner, kp->file, kp->line);
72430 +*/                 
72431 +
72432 +               list_add_tail(&kp->list, dest);
72433 +       }
72434 +}
72435 +
72436 +void
72437 +read_kmem(struct list_head * list)
72438 +{
72439 +       FILE      * fd;
72440 +       char        line[1024];
72441 +       int         line_size = 100;
72442 +       char      * rep;
72443 +       kmalloc_t * kp;
72444 +
72445 +       clear_kmem(list);
72446 +
72447 +       fd = fopen(QSNET_PROCFS_KMEM_DEBUG,"r");
72448 +       if ( fd == NULL) 
72449 +       {
72450 +               printf("No Kmem Debug\n");
72451 +               return;
72452 +       }
72453 +
72454 +       rep = fgets(line,line_size, fd);
72455 +
72456 +       while ( rep != NULL ) 
72457 +       {
72458 +               kp = malloc(sizeof(kmalloc_t));
72459 +
72460 +               sscanf(line,"%p %d %d %p %p %u %d %s\n",
72461 +                      &kp->ptr, &kp->len, &kp->used_vmalloc, &kp->caller, &kp->owner, &kp->time, &kp->line, &kp->file[0]);
72462 +
72463 +/*
72464 +               printf(">>%s<<\n",line);
72465 +               printf("%p %d %d %p %p %u %d %s\n",
72466 +                      kp->ptr, kp->len, kp->used_vmalloc, kp->caller, kp->owner, kp->time, kp->line, kp->file);
72467 +*/
72468 +
72469 +               list_add_tail(&kp->list, list);
72470 +
72471 +               rep = fgets(line,line_size, fd);
72472 +       }
72473 +       fclose(fd);
72474 +}
72475 +
72476 +void
72477 +mark_kmem(struct list_head * list, int mark)
72478 +{
72479 +       struct list_head *tmp;
72480 +       kmalloc_t        *kp = NULL;
72481 +
72482 +       list_for_each(tmp, list) {
72483 +               kp = list_entry(tmp, kmalloc_t , list);
72484 +
72485 +               kp->mark = mark;
72486 +       }
72487 +}
72488 +
72489 +kmalloc_t *
72490 +find_kmem(kmalloc_t * value, struct list_head * list)
72491 +{
72492 +       struct list_head *tmp;
72493 +       kmalloc_t        *kp = NULL;
72494 +
72495 +       
72496 +       list_for_each(tmp, list) {
72497 +               kp = list_entry(tmp, kmalloc_t , list);
72498 +               if ( (kp->ptr == value->ptr)
72499 +                    && (kp->len == value->len)
72500 +                    && (kp->used_vmalloc  == value->used_vmalloc )
72501 +                    && (kp->owner  == value->owner )
72502 +                    && (kp->caller  == value->caller )
72503 +                    && (kp->time  == value->time )
72504 +                    && (kp->line  == value->line )
72505 +                    && !(strcmp(kp->file,value->file) ))
72506 +                       return kp;
72507 +       }       
72508 +       return NULL;
72509 +}
72510 +
72511 +void 
72512 +diff_kmem(struct list_head *curr, struct list_head *stored)
72513 +{
72514 +       struct list_head *tmp;
72515 +       kmalloc_t        *kp = NULL;
72516 +       long              c,s;
72517 +
72518 +       mark_kmem(stored,  0);
72519 +       mark_kmem(curr,    0);
72520 +       
72521 +       list_for_each(tmp, stored) {
72522 +               kp = list_entry(tmp, kmalloc_t , list);
72523 +               if (find_kmem( kp, curr) != NULL) 
72524 +                       kp->mark = 1;
72525 +       }
72526 +       
72527 +       list_for_each(tmp, curr) {
72528 +               kp = list_entry(tmp, kmalloc_t , list);
72529 +               if (find_kmem( kp, stored) != NULL) 
72530 +                       kp->mark = 1;
72531 +       }               
72532 +
72533 +       c=s=0L;
72534 +       list_for_each(tmp, stored) {
72535 +               kp = list_entry(tmp, kmalloc_t , list);
72536 +               if (kp->mark != 1)
72537 +               {
72538 +                       printf("-- mem %p len %d (vm=%d)  caller %p owner %p (%s:%d)\n",
72539 +                              kp->ptr, kp->len, kp->used_vmalloc, kp->caller, kp->owner, kp->file, kp->line);
72540 +                       c++;
72541 +                       s+= kp->len;
72542 +               }
72543 +       }
72544 +       printf("-- %4ld %10ld \n",c,s);
72545 +       
72546 +       c=s=0L;
72547 +       list_for_each(tmp, curr) {
72548 +               kp = list_entry(tmp, kmalloc_t , list);
72549 +               if (kp->mark != 1)
72550 +               {
72551 +                       printf("++ mem %p len %d (vm=%d)  caller %p owner %p (%s:%d)\n",
72552 +                              kp->ptr, kp->len, kp->used_vmalloc, kp->caller, kp->owner, kp->file, kp->line);
72553 +                       c++;
72554 +                       s+= kp->len;
72555 +               }
72556 +       }               
72557 +       printf("++ %4ld %10ld \n",c,s);
72558 +}
72559 +
72560 +
72561 +void
72562 +print_kmem(struct list_head * list)
72563 +{
72564 +       struct list_head *tmp;
72565 +       kmalloc_t        *kp = NULL;
72566 +
72567 +       list_for_each(tmp, list) {
72568 +               kp = list_entry(tmp, kmalloc_t , list);
72569 +
72570 +               printf("mem %p len %d (vm=%d)  caller %p owner %p (%s:%d)\n",
72571 +                      kp->ptr, kp->len, kp->used_vmalloc, kp->caller, kp->owner, kp->file, kp->line);
72572 +                   
72573 +       }
72574 +}
72575 +
72576 +void 
72577 +print_cmds()
72578 +{
72579 +       long c,s;
72580 +
72581 +       printf("q : quits \n");
72582 +       printf("r : read\n");
72583 +       printf("c : print current\n");
72584 +       printf("o : print stored\n");
72585 +       printf("s : store\n");
72586 +
72587 +       count_kmem(&current_kmem, &c, &s );
72588 +       printf("\ncurrent : %4ld %10ld\n", c , s);
72589
72590 +       count_kmem(&stored_kmem, &c, &s );
72591 +       printf("store   : %4ld %10ld\n", c , s);
72592
72593 +}
72594 +
72595 +int
72596 +main()
72597 +{
72598 +       char            line[128];
72599 +       int             line_size=127;
72600 +       int             len;
72601 +
72602 +
72603 +       while (1)
72604 +       {
72605 +               
72606 +               printf(">> ");
72607 +               fgets(line,line_size, stdin);
72608 +       
72609 +               
72610 +               len = strlen( line ) -1;
72611 +               if ( len ) 
72612 +               {
72613 +                       switch ( tolower(line[0]) ) 
72614 +                       {
72615 +                       case 'q':
72616 +                               exit(0);
72617 +
72618 +                       case 'r' :
72619 +                               read_kmem(&current_kmem);
72620 +                               break;
72621 +
72622 +                       case 'c' :
72623 +                               print_kmem(&current_kmem);
72624 +                               break;
72625 +
72626 +                       case 'o' :
72627 +                               print_kmem(&stored_kmem);
72628 +                               break;
72629 +
72630 +                       case 's' :
72631 +                               clear_kmem(&stored_kmem);
72632 +                               move_kmem(&stored_kmem, &current_kmem);
72633 +                               break;
72634 +
72635 +                       case 'd' :
72636 +                               diff_kmem(&current_kmem, &stored_kmem);
72637 +                               break;
72638 +
72639 +                       default:
72640 +                               print_cmds();   
72641 +                       }
72642 +
72643 +               
72644 +                       
72645 +               }
72646 +               else
72647 +                       print_cmds();
72648 +       }
72649 +
72650 +}
72651 +
72652 +
72653 +/*
72654 + * Local variables:
72655 + * c-file-style: "linux"
72656 + * End:
72657 + */
72658 Index: linux-2.4.21/drivers/net/qsnet/qsnet/quadrics_version.h
72659 ===================================================================
72660 --- linux-2.4.21.orig/drivers/net/qsnet/qsnet/quadrics_version.h        2004-02-23 16:02:56.000000000 -0500
72661 +++ linux-2.4.21/drivers/net/qsnet/qsnet/quadrics_version.h     2005-06-01 23:12:54.699423904 -0400
72662 @@ -0,0 +1 @@
72663 +#define QUADRICS_VERSION "4.30qsnet"
72664 Index: linux-2.4.21/drivers/net/qsnet/rms/Makefile
72665 ===================================================================
72666 --- linux-2.4.21.orig/drivers/net/qsnet/rms/Makefile    2004-02-23 16:02:56.000000000 -0500
72667 +++ linux-2.4.21/drivers/net/qsnet/rms/Makefile 2005-06-01 23:12:54.699423904 -0400
72668 @@ -0,0 +1,31 @@
72669 +#
72670 +# Makefile for Quadrics QsNet
72671 +#
72672 +# Copyright (c) 2002-2004 Quadrics Ltd
72673 +#
72674 +# File: drivers/net/qsnet/rms/Makefile
72675 +#
72676 +
72677 +
72678 +#
72679 +
72680 +#
72681 +# Makefile for Quadrics QsNet
72682 +#
72683 +# Copyright (c) 2004 Quadrics Ltd.
72684 +#
72685 +# File: driver/net/qsnet/rms/Makefile
72686 +#
72687 +
72688 +list-multi             := rms.o
72689 +rms-objs       := rms_kern.o rms_kern_Linux.o
72690 +export-objs            := 
72691 +obj-$(CONFIG_RMS)      := rms.o
72692 +
72693 +rms.o : $(rms-objs)
72694 +       $(LD) -r -o $@ $(rms-objs)
72695 +
72696 +EXTRA_CFLAGS           +=  -DDEBUG -DDEBUG_PRINTF -DDEBUG_ASSERT
72697 +
72698 +include $(TOPDIR)/Rules.make
72699 +
72700 Index: linux-2.4.21/drivers/net/qsnet/rms/Makefile.conf
72701 ===================================================================
72702 --- linux-2.4.21.orig/drivers/net/qsnet/rms/Makefile.conf       2004-02-23 16:02:56.000000000 -0500
72703 +++ linux-2.4.21/drivers/net/qsnet/rms/Makefile.conf    2005-06-01 23:12:54.699423904 -0400
72704 @@ -0,0 +1,10 @@
72705 +# Flags for generating QsNet Linux Kernel Makefiles
72706 +MODNAME                =       rms.o
72707 +MODULENAME     =       rms
72708 +KOBJFILES      =       rms_kern.o rms_kern_Linux.o
72709 +EXPORT_KOBJS   =       
72710 +CONFIG_NAME    =       CONFIG_RMS
72711 +SGALFC         =       
72712 +# EXTRALINES START
72713 +
72714 +# EXTRALINES END
72715 Index: linux-2.4.21/drivers/net/qsnet/rms/quadrics_version.h
72716 ===================================================================
72717 --- linux-2.4.21.orig/drivers/net/qsnet/rms/quadrics_version.h  2004-02-23 16:02:56.000000000 -0500
72718 +++ linux-2.4.21/drivers/net/qsnet/rms/quadrics_version.h       2005-06-01 23:12:54.700423752 -0400
72719 @@ -0,0 +1 @@
72720 +#define QUADRICS_VERSION "4.30qsnet"
72721 Index: linux-2.4.21/drivers/net/qsnet/rms/rms_kern.c
72722 ===================================================================
72723 --- linux-2.4.21.orig/drivers/net/qsnet/rms/rms_kern.c  2004-02-23 16:02:56.000000000 -0500
72724 +++ linux-2.4.21/drivers/net/qsnet/rms/rms_kern.c       2005-06-01 23:12:54.702423448 -0400
72725 @@ -0,0 +1,1757 @@
72726 +/*
72727 + * Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd.
72728 + *
72729 + * For licensing information please see the supplied COPYING file
72730 + *
72731 + * rms_kern.c:    RMS kernel module
72732 + *
72733 + * $Source: /cvs/master/quadrics/rmsmod/rms_kern.c,v $
72734 + */
72735 +
72736 +#ident "@(#)$Id: rms_kern.c,v 1.62.2.4 2005/01/18 11:05:45 duncan Exp $"
72737 +
72738 +#include <stddef.h>
72739 +#include <qsnet/kernel.h>
72740 +#include <qsnet/autoconf.h>
72741 +#include <rms/rmscall.h>
72742 +
72743 +/*
72744 + * extend stats added in version 5
72745 + */
72746 +#define RMS_MODVERSION 5
72747 +
72748 +#if defined(SOLARIS)
72749 +
72750 +#define CURUID() CURPROC()->p_cred->cr_uid
72751 +#define RMS_NCPUS() 4
72752 +#define PROC_STRUCT proc
72753 +
72754 +#include <sys/time.h>
72755 +
72756 +#elif defined(LINUX)
72757 +
72758 +#ifdef PROCESS_ACCT 
72759 +#define TIMEVAL_TO_MSEC(tv) ((tv)->tv_sec * 1000 + (tv)->tv_usec / 1000)
72760 +#define TIMEVAL_TO_CT(tv)   ((tv)->tv_sec * HZ + (tv)->tv_usec / (1000000L / HZ))
72761 +#endif
72762 +
72763 +#ifdef RSS_ATOMIC
72764 +#define PROC_RSS(proc) ((proc)->mm ? atomic_read(&(proc)->mm->rss) : 0)
72765 +#else
72766 +#define PROC_RSS(proc) ((proc)->mm ? (proc)->mm->rss : 0)
72767 +#endif
72768 +
72769 +#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 5, 0)
72770 +#      define  RMS_NCPUS()     smp_num_cpus
72771 +#else
72772 +#      define RMS_NCPUS()      num_online_cpus()
72773 +#endif
72774 +
72775 +#define CURUID()       CURPROC()->uid
72776 +#define p_pid          pid
72777 +#define PROC_STRUCT    task_struct
72778 +
72779 +/* care needed with conversion to millisecs on 32-bit Linux */
72780 +#ifdef LINUX
72781 +#ifdef LINUX_I386
72782 +#define CT_TO_MSEC(x)  ct_to_msec(x)
72783 +
72784 +uint64_t ct_to_msec(clock_t t)
72785 +{
72786 +    uint64_t msecs;
72787 +    if (t < 2000000)
72788 +    {
72789 +       t = (1000 * t)/HZ;
72790 +       msecs = t;
72791 +    }
72792 +    else
72793 +    {
72794 +       t = t / HZ;
72795 +       msecs = t * 1000;
72796 +    }
72797 +    return(msecs);
72798 +}
72799 +
72800 +#else
72801 +#define CT_TO_MSEC(x)  (((x) * 1000)/HZ)
72802 +#endif
72803 +#endif
72804 +
72805 +#ifndef FALSE
72806 +#define FALSE          (0)
72807 +#define TRUE           (!FALSE)
72808 +#endif
72809 +
72810 +#include <linux/time.h>
72811 +#include <linux/proc_fs.h>
72812 +#include <linux/ptrack.h>
72813 +
72814 +#include <linux/module.h>
72815 +
72816 +#elif defined(DIGITAL_UNIX)
72817 +
72818 +#define CURUID() CURPROC()->p_ruid
72819 +extern  int ncpus;
72820 +#define RMS_NCPUS() ncpus
72821 +#define PROC_STRUCT proc
72822 +#define TIMEVAL_TO_MSEC(tv) ((tv)->tv_sec * 1000 + (tv)->tv_usec / 1000)
72823 +
72824 +#include <sys/time.h>
72825 +
72826 +#else
72827 +#error cannot determine operating system
72828 +#endif
72829 +
72830 +int shm_cleanup(void);
72831 +
72832 +struct cap_desc {
72833 +
72834 +    struct cap_desc *next;
72835 +    int              index;    /* index of capability in program */
72836 +    ELAN_CAPABILITY  cap;      /* elan capability */
72837 +
72838 +};
72839 +
72840 +struct proc_desc {
72841 +    
72842 +    struct proc_desc    *next;
72843 +    struct PROC_STRUCT  *proc;
72844 +    struct prg_desc     *program;      /* controlling program         */
72845 +    int                  mycap;                /* index of my capability      */
72846 +    int                  myctx;                /* context number for process  */
72847 +    int                  flags;
72848 +    int                  vp;           /* elan virtual process number */
72849 +};
72850 +
72851 +struct prg_desc {
72852 +    
72853 +    struct prg_desc  *next;            
72854 +    int               id;      /* program id                          */
72855 +    int               flags;   /* program status flags                */
72856 +    uid_t             uid;     /* user id                             */
72857 +    int               ncpus;   /* number of cpus allocated to program */
72858 +    int               nprocs;  /* number of processes in program      */
72859 +    struct proc_desc *pdescs;  /* processes in this program           */
72860 +    int               ncaps;   /* number of capabilities              */
72861 +    struct cap_desc  *caps;    /* elan capabilities                   */
72862 +    char             *corepath;        /* core path for parallel program      */
72863 +    int               psid;    /* processor set id                    */
72864 +
72865 +    uint64_t       cutime;     /* user time accumulated by children   */
72866 +    uint64_t       cstime;     /* system time accumulated by children */
72867 +    uint64_t       start_time; /* time program created                */
72868 +    uint64_t       end_time;   /* time last process exited            */
72869 +    uint64_t       sched_time; /* last time job was scheduled         */
72870 +    uint64_t       accum_atime;        /* allocated time last deschedule      */
72871 +    uint64_t       memint;     /* accumulated memory integral         */
72872 +    uint64_t       ebytes;     /* data transferred by the Elan(s)     */
72873 +    uint64_t       exfers;     /* number of Elan data transfers       */
72874 +    long           maxrss;     /* maximum size to date                */
72875 +    long           majflt;
72876 +    
72877 +#ifdef LINUX
72878 +    struct proc_dir_entry *proc_entry;
72879 +#endif
72880 +
72881 +};
72882 +
72883 +#if defined(LINUX)
72884 +static int rms_ptrack_callback (void *arg, int phase, struct task_struct *child);
72885 +#else
72886 +static void rms_xd_callback(void *arg, int phase, void *ctask);
72887 +static void rms_xa_callback (void *arg, int phase, void *ctask);
72888 +#endif
72889 +
72890 +static void prgsignal(struct prg_desc *program, int signo);
72891 +static uint64_t gettime(void);
72892 +static void freeProgram(struct prg_desc *program);
72893 +
72894 +static struct prg_desc *programs = 0;
72895 +
72896 +kmutex_t rms_lock;
72897 +
72898 +int rms_init(void)
72899 +{
72900 +    kmutex_init (&rms_lock);
72901 +
72902 +    DBG(printk("rms: initialising\n"));
72903 +
72904 +    return(ESUCCESS);
72905 +}
72906 +
72907 +int rms_reconfigure(void)
72908 +{
72909 +    return(ESUCCESS);
72910 +}
72911 +
72912 +int rms_programs_registered(void)
72913 +{
72914 +    /*
72915 +    ** Called when trying to unload rms.mod will not succeed
72916 +    ** if programs registered
72917 +    */
72918
72919 +   struct prg_desc *program, **pp;
72920 +
72921 +   kmutex_lock(&rms_lock);
72922 +
72923 +   for (program = programs; program; program = program->next)
72924 +   {
72925 +       if (program->nprocs != 0)
72926 +       {
72927 +            kmutex_unlock(&rms_lock);
72928 +            return(EBUSY);
72929 +       }
72930 +   }
72931 +
72932 +   /*
72933 +   ** We have traversed the programs list and no processes registered
72934 +   ** Now free the memory
72935 +   */
72936 +       
72937 +    pp = &programs;
72938 +    while ((program = *pp) != NULL)
72939 +    {
72940 +        *pp = program->next;
72941 +        freeProgram(program);
72942 +    }
72943 +    kmutex_unlock(&rms_lock);
72944 +   
72945 +    return(ESUCCESS);
72946 +
72947 +}
72948 +
72949 +int rms_fini(void)
72950 +{
72951 +    /*
72952 +     * don't allow an unload if there are programs registered
72953 +     */
72954 +    if (rms_programs_registered())
72955 +        return(EBUSY);
72956 +
72957 +    kmutex_destroy (&rms_lock);
72958 +
72959 +    DBG(printk("rms: removed\n"));
72960 +
72961 +    return(ESUCCESS);
72962 +}
72963 +
72964 +#ifdef LINUX
72965 +
72966 +extern struct proc_dir_entry *rms_procfs_programs;
72967 +
72968 +/*
72969 + * display one pid per line if there isn't enough space 
72970 + * for another pid then add "...\n" and stop 
72971 + */
72972 +int pids_callback(char* page, char** start, off_t off, int count, int* eof, void* data)
72973 +{
72974 +    struct prg_desc *program = (struct prg_desc *)data;
72975 +    struct proc_desc *pdesc;
72976 +    char *ptr = page;
72977 +    int bytes = 0, nb;
72978 +
72979 +    kmutex_lock(&rms_lock);
72980 +    
72981 +    for (pdesc = program->pdescs; pdesc; pdesc = pdesc->next)
72982 +    {
72983 +       if (bytes > count - 15)
72984 +       {
72985 +           bytes += sprintf(ptr,"...\n");
72986 +           break;
72987 +       }
72988 +        nb = sprintf(ptr, "%d %d\n", pdesc->proc->p_pid, pdesc->vp);
72989 +       bytes += nb;
72990 +       ptr += nb;
72991 +    }
72992 +    kmutex_unlock(&rms_lock);
72993 +    
72994 +    return(bytes);
72995 +}
72996 +
72997 +int status_callback(char* page, char** start, off_t off, int count, int* eof, void* data)
72998 +{
72999 +    struct prg_desc *program = (struct prg_desc *)data;
73000 +    int bytes;
73001 +    if (program->flags & PRG_KILLED)
73002 +       bytes = sprintf(page, "killed\n");
73003 +    else
73004 +       bytes = sprintf(page, "running\n");
73005 +    return(bytes);
73006 +}
73007 +
73008 +void rms_create_proc_entry(struct prg_desc *program)
73009 +{
73010 +    struct proc_dir_entry *p;
73011 +    char name[32];
73012 +
73013 +    if (rms_procfs_programs)
73014 +    {
73015 +       sprintf(name,"%d", program->id);
73016 +       if ((program->proc_entry = proc_mkdir(name, rms_procfs_programs)) != NULL)
73017 +       {
73018 +           if ((p = create_proc_entry ("pids", S_IRUGO, program->proc_entry)) != NULL)
73019 +           {
73020 +               p->owner = THIS_MODULE;
73021 +               p->data = program;
73022 +               p->read_proc = pids_callback;
73023 +           }
73024 +           if ((p = create_proc_entry ("status", S_IRUGO, program->proc_entry)) != NULL)
73025 +           {
73026 +               p->owner = THIS_MODULE;
73027 +               p->data = program;
73028 +               p->read_proc = status_callback;
73029 +           }
73030 +       }
73031 +    }
73032 +}
73033 +
73034 +void rms_remove_proc_entry(struct prg_desc *program)
73035 +{
73036 +    char name[32];
73037 +    if (rms_procfs_programs)
73038 +    {
73039 +       if (program->proc_entry)
73040 +       {
73041 +           remove_proc_entry ("pids", program->proc_entry);
73042 +           remove_proc_entry ("status", program->proc_entry);
73043 +       }
73044 +       sprintf(name,"%d", program->id);
73045 +       remove_proc_entry (name, rms_procfs_programs);
73046 +    }
73047 +}
73048 +
73049 +#endif
73050 +
73051 +/*
73052 + * find a program from its index/pid
73053 + *
73054 + * Duncan:  make the lookup more efficient for large numbers of programs/processes
73055 + */
73056 +static struct prg_desc *findProgram(const int id)
73057 +{
73058 +    struct prg_desc *program;
73059 +    for (program = programs; program; program = program->next)
73060 +       if (program->id == id)
73061 +           return(program);
73062 +    return(0);
73063 +}
73064 +
73065 +static struct proc_desc *findProcess(const int pid)
73066 +{
73067 +    struct prg_desc *program;
73068 +    struct proc_desc *pdesc;
73069 +    for (program = programs; program; program = program->next)
73070 +       for (pdesc = program->pdescs; pdesc; pdesc = pdesc->next)
73071 +           if (pdesc->proc->p_pid == pid)
73072 +               return(pdesc);
73073 +    return(0);
73074 +}
73075 +
73076 +static void freeProgram(struct prg_desc *program)
73077 +{
73078 +    struct proc_desc *pdesc;
73079 +    struct cap_desc *cdesc;
73080 +
73081 +#ifdef LINUX
73082 +    rms_remove_proc_entry(program);
73083 +#endif
73084 +
73085 +    while ((pdesc = program->pdescs) != NULL)
73086 +    {
73087 +       program->pdescs = pdesc->next;
73088 +       KMEM_FREE(pdesc, sizeof(struct proc_desc));
73089 +    }
73090 +
73091 +    while ((cdesc = program->caps) != NULL)
73092 +    {
73093 +       program->caps = cdesc->next;
73094 +       KMEM_FREE(cdesc, sizeof(struct cap_desc));
73095 +    }
73096 +
73097 +    if (program->corepath)
73098 +       KMEM_FREE(program->corepath, MAXCOREPATHLEN + 1);
73099 +
73100 +    KMEM_FREE(program, sizeof(struct prg_desc));
73101 +
73102 +#ifdef LINUX
73103 +    MOD_DEC_USE_COUNT;
73104 +#endif
73105 +}
73106 +
73107 +/*
73108 + * rms_prgcreate
73109 + *
73110 + * create a new program description
73111 + */
73112 +int rms_prgcreate(int id, uid_t uid, int cpus)
73113 +{
73114 +    struct prg_desc *program;
73115 +    struct proc_desc *pdesc;
73116 +    
73117 +    DBG(printk("rms_prgcreate :: program %d pid %d uid %d cpus %d\n", id, CURPROC()->p_pid, uid, cpus));
73118 +    
73119 +    /*
73120 +     * parallel programs are created as root by the rmsd as it forks the loader
73121 +     */
73122 +    if (CURUID())
73123 +       return(EACCES);
73124 +    
73125 +    /*
73126 +     * program ids must be unique
73127 +     */
73128 +    kmutex_lock(&rms_lock);
73129 +    program = findProgram(id);
73130 +    kmutex_unlock(&rms_lock);
73131 +    if (program)
73132 +       return(EINVAL);
73133 +
73134 +    /*
73135 +     * create a new program description
73136 +     */
73137 +    KMEM_ALLOC(program, struct prg_desc *, sizeof(struct prg_desc), TRUE);
73138 +    if (!program)
73139 +       return(ENOMEM);
73140 +
73141 +    program->id = id;
73142 +    program->flags = PRG_RUNNING;
73143 +    program->ncpus = cpus;
73144 +    program->nprocs = 1;
73145 +    program->uid = uid;
73146 +    program->ncaps = 0;
73147 +    program->caps = 0;
73148 +    program->corepath = 0;
73149 +    program->psid = 0;
73150 +    program->start_time = program->sched_time = gettime();
73151 +    program->end_time = 0;
73152 +    program->accum_atime = 0;
73153 +    program->cutime = 0;
73154 +    program->cstime = 0;
73155 +    program->maxrss = 0;
73156 +    program->memint = 0;
73157 +    program->majflt = 0;
73158 +    program->ebytes = 0;
73159 +    program->exfers = 0;
73160 +
73161 +    KMEM_ALLOC(pdesc, struct proc_desc *, sizeof(struct proc_desc), TRUE);
73162 +    if (!pdesc)
73163 +       return(ENOMEM);
73164 +
73165 +    pdesc->proc = CURPROC();
73166 +    pdesc->next = 0;
73167 +    pdesc->mycap = ELAN_CAP_UNINITIALISED;
73168 +    pdesc->myctx = ELAN_CAP_UNINITIALISED;
73169 +    pdesc->vp = -1;            /* rmsloader */
73170 +    pdesc->program = program;
73171 +    program->pdescs = pdesc;
73172 +    
73173 +#ifdef LINUX
73174 +    rms_create_proc_entry(program);
73175 +#endif
73176 +    
73177 +    kmutex_lock(&rms_lock);
73178 +
73179 +#if defined(LINUX)
73180 +    if (ptrack_register (rms_ptrack_callback, NULL) != 0)
73181 +    {
73182 +       kmutex_unlock(&rms_lock);
73183 +        KMEM_FREE(pdesc,sizeof(struct proc_desc));
73184 +        KMEM_FREE(program,sizeof(struct prg_desc));
73185 +       return(ENOMEM);
73186 +    }
73187 +#else
73188 +    /*
73189 +     * install a fork handler
73190 +     */
73191 +    if (HANDLER_REGISTER((void *)(unsigned long)rms_xa_callback, NULL, XA_FORK | XA_EXIT | XA_IOF | XA_KOF | XA_KOE) == NULL)
73192 +    {
73193 +       kmutex_unlock(&rms_lock);
73194 +        KMEM_FREE(pdesc,sizeof(struct proc_desc));
73195 +        KMEM_FREE(program,sizeof(struct prg_desc));
73196 +       return(ENOMEM);
73197 +    }
73198 +#endif
73199 +
73200 +    program->next = programs;
73201 +    programs = program;
73202 +    
73203 +#ifdef LINUX
73204 +    MOD_INC_USE_COUNT;
73205 +#endif
73206 +    
73207 +    kmutex_unlock(&rms_lock);
73208 +    return(ESUCCESS);
73209 +}
73210 +
73211 +
73212 +/*
73213 + * rms_prgdestroy
73214 + *
73215 + * destroy a program description
73216 + */
73217 +int rms_prgdestroy(int id)
73218 +{
73219 +    struct prg_desc *program, **pp;
73220 +    int status = ESRCH;
73221 +
73222 +    /*
73223 +     * parallel programs are created and destroyed by the rmsd
73224 +     */
73225 +    if (CURUID())
73226 +       return(EACCES);
73227 +
73228 +    kmutex_lock(&rms_lock);
73229 +    
73230 +    pp = &programs;
73231 +    while ((program = *pp) != NULL)
73232 +    {
73233 +       if (program->id == id)
73234 +       {
73235 +           if (program->nprocs == 0)
73236 +           {
73237 +               DBG(printk("rms_prgdestro :: removing program %d\n", program->id));
73238 +               *pp = program->next;
73239 +               freeProgram(program);
73240 +               status = ESUCCESS;
73241 +           }
73242 +           else
73243 +           {
73244 +               DBG(printk("rms_prgdestro :: failed to remove program %d: %d\n", program->id, program->nprocs));
73245 +               status = ECHILD;
73246 +               pp = &program->next;
73247 +           }
73248 +       }
73249 +       else
73250 +           pp = &program->next;
73251 +    }
73252 +    
73253 +    kmutex_unlock(&rms_lock);
73254 +    return(status);
73255 +}
73256 +
73257 +/*
73258 + * rms_prgids
73259 + */
73260 +int rms_prgids(int maxids, int *prgids, int *nprgs)
73261 +{
73262 +    struct prg_desc *program;
73263 +    int count = 0, *buf, *bufp;
73264 +    int status = ESUCCESS;
73265 +
73266 +    if (maxids < 1)
73267 +        return(EINVAL);
73268 +
73269 +    kmutex_lock(&rms_lock);
73270 +
73271 +    for (program = programs; program; program = program->next)
73272 +        count++;
73273 +    count = MIN(count, maxids);
73274 +
73275 +    if (count > 0)
73276 +    {
73277 +        KMEM_ALLOC(buf, int *, count * sizeof(int), TRUE);
73278 +       if (buf)
73279 +       {                  
73280 +           for (program = programs, bufp=buf; bufp < buf + count; 
73281 +                program = program->next)
73282 +               *bufp++ = program->id;
73283 +       
73284 +           if (copyout(buf, prgids, sizeof(int) * count))
73285 +               status = EFAULT;
73286 +
73287 +           KMEM_FREE(buf, count * sizeof(int));
73288 +       }
73289 +       else
73290 +           status = ENOMEM;
73291 +    }
73292 +    
73293 +    if (copyout(&count, nprgs, sizeof(int)))
73294 +       status = EFAULT;
73295 +
73296 +    kmutex_unlock(&rms_lock);
73297 +    
73298 +    return(status);
73299 +}
73300 +
73301 +/*
73302 + * rms_prginfo
73303 + */
73304 +int rms_prginfo(int id, int maxpids, pid_t *pids, int *nprocs)
73305 +{
73306 +    struct prg_desc *program;
73307 +    struct proc_desc *pdesc;
73308 +    pid_t *pidp, *buf;
73309 +    int status = ESUCCESS;
73310 +
73311 +    kmutex_lock(&rms_lock);
73312 +
73313 +    if ((program = findProgram(id)) != NULL)
73314 +    {
73315 +       if (program->nprocs > 0)
73316 +       {
73317 +           KMEM_ALLOC(buf, pid_t *, program->nprocs * sizeof(pid_t), TRUE);
73318 +           if (buf)
73319 +           {
73320 +               for (pidp = buf, pdesc = program->pdescs; pdesc; pdesc = pdesc->next)
73321 +                   *pidp++ = pdesc->proc->p_pid;
73322 +               
73323 +               if (copyout(buf, pids, sizeof(pid_t) * MIN(program->nprocs, maxpids)))
73324 +                   status = EFAULT;
73325 +               
73326 +               KMEM_FREE(buf, program->nprocs * sizeof(pid_t));
73327 +           }
73328 +           else
73329 +               status = ENOMEM;
73330 +       }
73331 +       
73332 +       if (copyout(&program->nprocs, nprocs, sizeof(int)))
73333 +           status = EFAULT;
73334 +    }
73335 +    else
73336 +       status = ESRCH;
73337 +
73338 +    kmutex_unlock(&rms_lock);
73339 +    
73340 +    return(status);
73341 +}
73342 +
73343 +/*
73344 + * rmsmod always used to use psignal but this doesn't work
73345 + * on Linux 2.6.7 so we have changed to kill_proc
73346 + */
73347 +static void prgsignal(struct prg_desc *program, int signo)
73348 +{
73349 +    struct proc_desc *pdesc;
73350 +    for (pdesc = program->pdescs; pdesc; pdesc = pdesc->next)
73351 +       kill_proc(pdesc->proc->p_pid, signo, 1);
73352 +}
73353 +
73354 +
73355 +int rms_prgsignal(int id, int signo)
73356 +{
73357 +    struct prg_desc *program;
73358 +    int status = ESUCCESS;
73359 +    
73360 +    kmutex_lock(&rms_lock);
73361 +    
73362 +    if ((program = findProgram(id)) != NULL)
73363 +    {
73364 +       if (CURUID() == 0 || CURUID() == program->uid)
73365 +       {
73366 +           prgsignal(program, signo);
73367 +           if (signo == SIGKILL)
73368 +               program->flags |= PRG_KILLED;
73369 +       }
73370 +       else
73371 +           status = EACCES;
73372 +    }
73373 +    else
73374 +       status = ESRCH;
73375 +    
73376 +    kmutex_unlock(&rms_lock);
73377 +    
73378 +    return(status);
73379 +}
73380 +
73381 +int rms_prgaddcap(int id, int index, ELAN_CAPABILITY *cap)
73382 +{
73383 +    struct prg_desc *program;
73384 +    struct cap_desc *cdesc;
73385 +    int status = ESUCCESS;
73386 +
73387 +    if (cap == NULL)
73388 +        return(EINVAL);
73389 +
73390 +    kmutex_lock(&rms_lock);
73391 +    if ((program = findProgram(id)) != NULL)
73392 +    {
73393 +       KMEM_ALLOC(cdesc, struct cap_desc *, sizeof(struct cap_desc), TRUE);
73394 +       if (cdesc)
73395 +       {
73396 +           cdesc->index = index;
73397 +           if (copyin(cap, &cdesc->cap, sizeof(ELAN_CAPABILITY)))
73398 +           {
73399 +               KMEM_FREE(cdesc, sizeof(struct cap_desc));
73400 +               status = EFAULT;
73401 +           }
73402 +           else
73403 +           {
73404 +               DBG(printk("rms_prgaddcap :: program %d index %d context %d<-->%d\n",
73405 +                          program->id, index, cdesc->cap.cap_lowcontext, cdesc->cap.cap_highcontext));
73406 +               cdesc->next = program->caps;
73407 +               program->caps = cdesc;
73408 +               program->ncaps++;
73409 +           }
73410 +       }
73411 +       else
73412 +           status = ENOMEM;
73413 +    }
73414 +    else
73415 +       status = ESRCH;
73416 +
73417 +    kmutex_unlock(&rms_lock);
73418 +    return(status);
73419 +}
73420 +
73421 +static uint64_t gettime(void)
73422 +{
73423 +    uint64_t now;
73424 +
73425 +#if defined(SOLARIS)
73426 +    timespec_t tv;
73427 +    gethrestime(&tv);
73428 +    now = tv.tv_sec * 1000 + tv.tv_nsec / 1000000;
73429 +#elif defined(LINUX)
73430 +    struct timeval tv;
73431 +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,4,17)
73432 +    get_fast_time(&tv);
73433 +#else
73434 +    do_gettimeofday(&tv);
73435 +#endif
73436 +    now = tv.tv_sec * 1000 + tv.tv_usec / 1000;
73437 +#elif defined(DIGITAL_UNIX)
73438 +    struct timeval tv;
73439 +    microtime(&tv);
73440 +    now = tv.tv_sec * 1000 + tv.tv_usec / 1000;
73441 +#endif
73442 +
73443 +    return(now);
73444 +}
73445 +
73446 +#ifdef DIGITAL_UNIX
73447 +
73448 +int rms_getrusage(struct proc_desc *pdesc, struct rusage *ru)
73449 +{
73450 +    task_t   task;
73451 +    thread_t thread;
73452 +    
73453 +    if (!pdesc->proc)
73454 +       return(-1);
73455 +    
73456 +    /*
73457 +     * locking required unless called from the current proc
73458 +     */
73459 +    if (pdesc->proc != CURPROC())
73460 +    {
73461 +       if (!P_REF(pdesc->proc))
73462 +           return(-1);
73463 +       
73464 +       task = proc_to_task(pdesc->proc);
73465 +       if (!task) 
73466 +       {
73467 +           P_UNREF(pdesc->proc);
73468 +           DBG(printk("rms_getrusage :: process (%d) has no task\n", pdesc->proc->p_pid));
73469 +           return(-1);
73470 +       }
73471 +
73472 +       task_reference(task);
73473 +       task_lock(task);
73474 +       
73475 +       if (!queue_empty(&task->thread_list))
73476 +           thread = (thread_t) queue_first(&task->thread_list);
73477 +       else 
73478 +       {
73479 +           task_unlock(task);
73480 +           task_deallocate(task);
73481 +           P_UNREF(pdesc->proc);
73482 +           return(-1);
73483 +       }
73484 +       
73485 +       thread_reference(thread);
73486 +       task_unlock(task);
73487 +    }
73488 +    
73489 +    *ru = proc_to_utask(pdesc->proc)->uu_ru;
73490 +    task_get_rusage(ru, proc_to_task(pdesc->proc));
73491 +    
73492 +    if (pdesc->proc != CURPROC())
73493 +    {
73494 +       task_deallocate(task);
73495 +       thread_deallocate(thread);
73496 +       P_UNREF(pdesc->proc);
73497 +    }
73498 +    return(0);
73499 +}
73500 +
73501 +#endif
73502 +
73503 +/*
73504 + * new stats collection interface, 64-bit with addition of Elan stats
73505 + */
73506 +int rms_prggetstats(int id, prgstats_t *stats)
73507 +{
73508 +#ifdef DIGITAL_UNIX
73509 +    long ruixrss, ruidrss, ruisrss, rumaxrss, rumajflt;
73510 +#endif
73511 +    struct prg_desc *program = 0;
73512 +    struct proc_desc *pdesc;
73513 +    int status = ESUCCESS;
73514 +    prgstats_t totals;
73515 +    uint64_t now = gettime();
73516 +#if defined(SOLARIS)
73517 +    clock_t utime, stime;
73518 +#elif defined(LINUX)
73519 +    uint64_t utime, stime;
73520 +#endif
73521 +
73522 +    long maxrss;
73523 +
73524 +    kmutex_lock(&rms_lock);
73525 +    
73526 +    if (id < 0)
73527 +    {
73528 +       if ((pdesc = findProcess(CURPROC()->p_pid)) != NULL)
73529 +           program = pdesc->program;
73530 +    }
73531 +    else
73532 +       program = findProgram(id);
73533 +
73534 +    if (program)
73535 +    {
73536 +       if (CURUID() == 0 || CURUID() == program->uid)
73537 +       {
73538 +           totals.flags = program->flags;
73539 +           totals.ncpus = program->ncpus;
73540 +           maxrss = 0;
73541 +
73542 +           if (program->nprocs > 0)
73543 +               totals.etime = now - program->start_time;
73544 +           else
73545 +               totals.etime = program->end_time - program->start_time;
73546 +           
73547 +           totals.atime = program->accum_atime;
73548 +           if (program->flags & PRG_RUNNING)
73549 +               totals.atime += program->ncpus * (now - program->sched_time);
73550 +           
73551 +#if defined(SOLARIS)
73552 +           utime = stime = 0;
73553 +           for (pdesc = program->pdescs; pdesc; pdesc = pdesc->next)
73554 +           {
73555 +               utime += pdesc->proc->p_utime;
73556 +               stime += pdesc->proc->p_stime;
73557 +           }
73558 +           totals.utime = TICK_TO_MSEC(utime);
73559 +           totals.stime = TICK_TO_MSEC(stime);
73560 +
73561 +#elif defined(LINUX)
73562 +           utime = stime = 0;
73563 +           totals.memint = program->memint;
73564 +           totals.pageflts = program->majflt;
73565 +
73566 +           for (pdesc = program->pdescs; pdesc; pdesc = pdesc->next)
73567 +           {
73568 +#ifdef  PROCESS_ACCT
73569 +       DBG(printk("rms_prggetsta :: process %d utime %ld clks stime %ld clks\n", 
73570 +                               pdesc->proc->p_pid, TIMEVAL_TO_CT(&pdesc->proc->utime),
73571 +                               TIMEVAL_TO_CT(&pdesc->proc->stime)));                 
73572 +       utime += TIMEVAL_TO_CT(&pdesc->proc->utime);                  
73573 +       stime += TIMEVAL_TO_CT(&pdesc->proc->stime);                  
73574 +#elif LINUX_VERSION_CODE < KERNEL_VERSION(2, 5, 0)
73575 +               DBG(printk("rms_prggetsta :: process %d utime %ld clks stime %ld clks\n", 
73576 +                          pdesc->proc->p_pid, pdesc->proc->times.tms_utime, 
73577 +                          pdesc->proc->times.tms_stime));
73578 +               utime += pdesc->proc->times.tms_utime;
73579 +               stime += pdesc->proc->times.tms_stime;
73580 +#else
73581 +               DBG(printk("rms_prggetsta :: process %d utime %ld clks stime %ld clks\n", 
73582 +                          pdesc->proc->p_pid, pdesc->proc->utime, pdesc->proc->stime));
73583 +               utime += pdesc->proc->utime;
73584 +               stime += pdesc->proc->stime;
73585 +#endif
73586 +
73587 +               totals.pageflts += pdesc->proc->maj_flt; 
73588 +
73589 +               maxrss += PROC_RSS(pdesc->proc) >> (20 - PAGE_SHIFT);
73590 +           }
73591 +
73592 +           /* convert user and system times to millisecs */
73593 +           totals.utime = CT_TO_MSEC(utime);
73594 +           totals.stime = CT_TO_MSEC(stime);
73595 +           
73596 +#elif defined(DIGITAL_UNIX)
73597 +           totals.utime = totals.stime = 0;
73598 +           totals.memint = program->memint;
73599 +           totals.pageflts = program->majflt;
73600 +
73601 +           for (pdesc = program->pdescs; pdesc; pdesc = pdesc->next)
73602 +           {
73603 +               struct rusage ru;
73604 +               if (rms_getrusage(pdesc, &ru) < 0)
73605 +                   continue;
73606 +               
73607 +               totals.utime += TIMEVAL_TO_MSEC(&ru.ru_utime);
73608 +               totals.stime += TIMEVAL_TO_MSEC(&ru.ru_stime);
73609 +               
73610 +               /* convert maxrss to megabytes */
73611 +               rumaxrss = ru.ru_maxrss >> 10;
73612 +               rumajflt = ru.ru_majflt;
73613 +               totals.pageflts += rumajflt;
73614 +               
73615 +               /*
73616 +                * memory intergals are still broken in 5.1
73617 +                */
73618 +               
73619 +#ifdef FIXED_MEMINIT
73620 +               
73621 +               /* convert from pages * clock ticks to Mbytes * secs */
73622 +               ruixrss = (ru.ru_ixrss >> (20 - PAGE_SHIFT)) / hz;
73623 +               ruidrss = (ru.ru_idrss >> (20 - PAGE_SHIFT)) / hz;
73624 +               ruisrss = (ru.ru_isrss >> (20 - PAGE_SHIFT)) / hz;
73625 +               
73626 +               DBG(printk("rms_prggetsta :: process %d mem %d int %d %d %d flt %d\n", pdesc->proc->p_pid, 
73627 +                          rumaxrss, ruixrss, ruidrss, ruisrss, rumajflt));
73628 +               
73629 +               totals.memint += ruixrss + ruidrss + ruisrss;
73630 +#else
73631 +               DBG(printk("rms_prggetsta :: process %d mem %d flt %d\n", pdesc->proc->p_pid, rumaxrss, rumajflt));
73632 +               totals.memint = 0;
73633 +#endif
73634 +               maxrss += rumaxrss;
73635 +           }
73636 +#endif /* DIGITAL_UNIX */
73637 +
73638 +           if (maxrss > program->maxrss)
73639 +               program->maxrss = maxrss;
73640 +           
73641 +           totals.utime += program->cutime;
73642 +           totals.stime += program->cstime;
73643 +           totals.mem = program->maxrss;
73644 +           totals.ebytes = program->ebytes;
73645 +           totals.exfers = program->exfers;
73646 +
73647 +           DBG(printk("rms_prggetsta :: program %d mem %d flt %d\n", program->id, totals.mem, totals.pageflts));
73648 +           
73649 +           if (copyout(&totals, stats, sizeof(prgstats_t)))
73650 +               status = EFAULT;
73651 +       }
73652 +       else
73653 +           status = EACCES;
73654 +    }
73655 +    else
73656 +       status = ESRCH;
73657 +    
73658 +    kmutex_unlock(&rms_lock);
73659 +    return(status);
73660 +}
73661 +
73662 +/*
73663 + * preserve the old stats stats collection interface
73664 + */
73665 +
73666 +int rms_prggetoldstats(int id, prgstats_old_t *stats)
73667 +{
73668 +#ifdef DIGITAL_UNIX
73669 +    long ruixrss, ruidrss, ruisrss, rumaxrss, rumajflt;
73670 +#endif
73671 +    struct prg_desc *program = 0;
73672 +    struct proc_desc *pdesc;
73673 +    int status = ESUCCESS;
73674 +    prgstats_old_t totals;
73675 +    uint64_t now = gettime();
73676 +#if defined(SOLARIS) || defined(LINUX)
73677 +    clock_t utime, stime;
73678 +#endif
73679 +    long maxrss;
73680 +
73681 +    kmutex_lock(&rms_lock);
73682 +    
73683 +    if (id < 0)
73684 +    {
73685 +       if ((pdesc = findProcess(CURPROC()->p_pid)) != NULL)
73686 +           program = pdesc->program;
73687 +    }
73688 +    else
73689 +       program = findProgram(id);
73690 +
73691 +    if (program)
73692 +    {
73693 +       if (CURUID() == 0 || CURUID() == program->uid)
73694 +       {
73695 +           totals.flags = program->flags;
73696 +           totals.ncpus = program->ncpus;
73697 +           maxrss = 0;
73698 +
73699 +           if (program->nprocs > 0)
73700 +               totals.etime = now - program->start_time;
73701 +           else
73702 +               totals.etime = program->end_time - program->start_time;
73703 +           
73704 +           totals.atime = program->accum_atime;
73705 +           if (program->flags & PRG_RUNNING)
73706 +               totals.atime += program->ncpus * (now - program->sched_time);
73707 +           
73708 +#if defined(SOLARIS)
73709 +           utime = stime = 0;
73710 +           for (pdesc = program->pdescs; pdesc; pdesc = pdesc->next)
73711 +           {
73712 +               utime += pdesc->proc->p_utime;
73713 +               stime += pdesc->proc->p_stime;
73714 +           }
73715 +           totals.utime = TICK_TO_MSEC(utime);
73716 +           totals.stime = TICK_TO_MSEC(stime);
73717 +
73718 +#elif defined(LINUX)
73719 +           utime = stime = 0;
73720 +           totals.memint = program->memint;
73721 +           totals.pageflts = program->majflt;
73722 +
73723 +           for (pdesc = program->pdescs; pdesc; pdesc = pdesc->next)
73724 +           {
73725 +#ifdef  PROCESS_ACCT
73726 +       DBG(printk("rms_getoldsta :: process %d utime %ld clks stime %ld clks\n", 
73727 +                               pdesc->proc->p_pid, TIMEVAL_TO_CT(&pdesc->proc->utime),
73728 +                               TIMEVAL_TO_CT(&pdesc->proc->stime)));                 
73729 +       utime += TIMEVAL_TO_CT(&pdesc->proc->utime);                  
73730 +       stime += TIMEVAL_TO_CT(&pdesc->proc->stime);                  
73731 +#elif LINUX_VERSION_CODE < KERNEL_VERSION(2, 5, 0)
73732 +               DBG(printk("rms_getoldsta :: process %d utime %ld clks stime %ld clks\n", 
73733 +                          pdesc->proc->p_pid, pdesc->proc->times.tms_utime, 
73734 +                          pdesc->proc->times.tms_stime));
73735 +               utime += pdesc->proc->times.tms_utime;
73736 +               stime += pdesc->proc->times.tms_stime;
73737 +#else
73738 +               DBG(printk("rms_getoldsta :: process %d utime %ld clks stime %ld clks\n", 
73739 +                          pdesc->proc->p_pid, pdesc->proc->utime, pdesc->proc->stime));
73740 +               utime += pdesc->proc->utime;
73741 +               stime += pdesc->proc->stime;
73742 +#endif
73743 +
73744 +               totals.pageflts += pdesc->proc->maj_flt; 
73745 +               maxrss += PROC_RSS(pdesc->proc) >> (20 - PAGE_SHIFT);
73746 +           }
73747 +
73748 +           /* convert user and system times to millisecs */
73749 +           totals.utime = CT_TO_MSEC(utime);
73750 +           totals.stime = CT_TO_MSEC(stime);
73751 +           
73752 +#elif defined(DIGITAL_UNIX)
73753 +           totals.utime = totals.stime = 0;
73754 +           totals.memint = program->memint;
73755 +           totals.pageflts = program->majflt;
73756 +
73757 +           for (pdesc = program->pdescs; pdesc; pdesc = pdesc->next)
73758 +           {
73759 +               struct rusage ru;
73760 +               if (rms_getrusage(pdesc, &ru) < 0)
73761 +                   continue;
73762 +               
73763 +               totals.utime += TIMEVAL_TO_MSEC(&ru.ru_utime);
73764 +               totals.stime += TIMEVAL_TO_MSEC(&ru.ru_stime);
73765 +               
73766 +               /* convert maxrss to megabytes */
73767 +               rumaxrss = ru.ru_maxrss >> 10;
73768 +               rumajflt = ru.ru_majflt;
73769 +               totals.pageflts += rumajflt;
73770 +               
73771 +               /*
73772 +                * memory intergals are still broken in 5.1
73773 +                */
73774 +               
73775 +#ifdef FIXED_MEMINIT
73776 +               
73777 +               /* convert from pages * clock ticks to Mbytes * secs */
73778 +               ruixrss = (ru.ru_ixrss >> (20 - PAGE_SHIFT)) / hz;
73779 +               ruidrss = (ru.ru_idrss >> (20 - PAGE_SHIFT)) / hz;
73780 +               ruisrss = (ru.ru_isrss >> (20 - PAGE_SHIFT)) / hz;
73781 +               
73782 +               DBG(printk("rms_getoldsta :: process %d mem %d int %d %d %d flt %d\n", pdesc->proc->p_pid, 
73783 +                          rumaxrss, ruixrss, ruidrss, ruisrss, rumajflt));
73784 +               
73785 +               totals.memint += ruixrss + ruidrss + ruisrss;
73786 +#else
73787 +               DBG(printk("rms_getoldsta :: process %d mem %d flt %d\n", pdesc->proc->p_pid, rumaxrss, rumajflt));
73788 +               totals.memint = 0;
73789 +#endif
73790 +               maxrss += rumaxrss;
73791 +           }
73792 +#endif /* DIGITAL_UNIX */
73793 +
73794 +           if (maxrss > program->maxrss)
73795 +               program->maxrss = maxrss;
73796 +           
73797 +           totals.utime += program->cutime;
73798 +           totals.stime += program->cstime;
73799 +           totals.mem = program->maxrss;
73800 +           
73801 +           DBG(printk("rms_getoldsta :: program %d mem %d flt %d\n", program->id, totals.mem, totals.pageflts));
73802 +           
73803 +           if (copyout(&totals, stats, sizeof(prgstats_old_t)))
73804 +               status = EFAULT;
73805 +       }
73806 +       else
73807 +           status = EACCES;
73808 +    }
73809 +    else
73810 +       status = ESRCH;
73811 +    
73812 +    kmutex_unlock(&rms_lock);
73813 +    return(status);
73814 +}
73815 +
73816 +
73817 +int rms_prgsuspend(int id)
73818 +{
73819 +    struct prg_desc *program;
73820 +    int status = ESUCCESS;
73821 +
73822 +    kmutex_lock(&rms_lock);
73823 +    
73824 +    if ((program = findProgram(id)) != NULL)
73825 +    {
73826 +       if (CURUID() == 0 || CURUID() == program->uid)
73827 +       {
73828 +           program->flags &= ~PRG_RUNNING;
73829 +           program->flags |=  PRG_SUSPEND;
73830 +           program->accum_atime += program->ncpus * (gettime() - program->sched_time);
73831 +
73832 +           /* suspend/resume just use signals for now */
73833 +           prgsignal(program, SIGSTOP);
73834 +       }
73835 +       else
73836 +           status = EACCES;
73837 +    }
73838 +    else
73839 +       status = ESRCH;
73840 +
73841 +    kmutex_unlock(&rms_lock);
73842 +    return(status);
73843 +}
73844 +
73845 +int rms_prgresume(int id)
73846 +{
73847 +    struct prg_desc *program;
73848 +    int status = ESUCCESS;
73849 +
73850 +    kmutex_lock(&rms_lock);
73851 +    
73852 +    if ((program = findProgram(id)) != NULL)
73853 +    {
73854 +       if (CURUID() == 0 || CURUID() == program->uid)
73855 +       {
73856 +           program->flags &= ~PRG_SUSPEND;
73857 +           program->flags |=  PRG_RUNNING;
73858 +           program->sched_time = gettime();
73859 +           prgsignal(program, SIGCONT);
73860 +       }
73861 +       else
73862 +           status = EACCES;
73863 +    }
73864 +    else
73865 +       status = ESRCH;
73866 +
73867 +    kmutex_unlock(&rms_lock);
73868 +    return(status);
73869 +}
73870 +
73871 +
73872 +int rms_ncaps(int *ncaps)
73873 +{
73874 +    struct proc_desc *pdesc;
73875 +    int status = ESUCCESS;
73876 +    
73877 +    kmutex_lock(&rms_lock);
73878 +    if ((pdesc = findProcess(CURPROC()->p_pid)) != NULL)
73879 +    {
73880 +       if (copyout(&pdesc->program->ncaps, ncaps, sizeof(int)))
73881 +           status = EFAULT;
73882 +    }
73883 +    else
73884 +       status = ESRCH;
73885 +
73886 +    kmutex_unlock(&rms_lock);
73887 +    return(status);
73888 +}
73889 +
73890 +int rms_getprgid(pid_t pid, int *id)
73891 +{
73892 +    struct proc_desc *pdesc;
73893 +    int status = ESUCCESS;
73894 +    
73895 +    if (pid == 0)
73896 +       pid = CURPROC()->p_pid;
73897 +    
73898 +    kmutex_lock(&rms_lock);
73899 +    if ((pdesc = findProcess(pid)) != NULL)
73900 +    {
73901 +       if (copyout(&pdesc->program->id, id, sizeof(int)))
73902 +           status = EFAULT;
73903 +    }
73904 +    else
73905 +       status = ESRCH;
73906 +
73907 +    kmutex_unlock(&rms_lock);
73908 +    return(status);
73909 +}
73910 +
73911 +int rms_setcap(int index, int ctx)
73912 +{
73913 +    struct proc_desc *pdesc;
73914 +    struct cap_desc *cdesc;
73915 +    int status = EINVAL;
73916 +    
73917 +    DBG(printk("rms_setcap    :: process %d cap %d ctx %d\n",CURPROC()->p_pid,index,ctx));
73918 +
73919 +    kmutex_lock(&rms_lock);
73920 +    if ((pdesc = findProcess(CURPROC()->p_pid)) != NULL)
73921 +    {
73922 +       for (cdesc = pdesc->program->caps; cdesc; cdesc = cdesc->next)
73923 +           if (cdesc->index == index && 0 <= ctx && ctx <= (cdesc->cap.cap_highcontext - cdesc->cap.cap_lowcontext + 1))
73924 +           {
73925 +               pdesc->mycap = index;
73926 +               pdesc->myctx = cdesc->cap.cap_lowcontext + ctx;
73927 +               status = ESUCCESS;
73928 +           }
73929 +    }
73930 +    else
73931 +       status = ESRCH;
73932 +
73933 +    kmutex_unlock(&rms_lock);
73934 +    return(status);
73935 +}
73936 +
73937 +
73938 +int rms_mycap(int *index)
73939 +{
73940 +    struct proc_desc *pdesc;
73941 +    int status = ESUCCESS;
73942 +    
73943 +    DBG(printk("rms_mycap :: process %d\n", CURPROC()->p_pid));
73944 +    
73945 +    kmutex_lock(&rms_lock);
73946 +    if ((pdesc = findProcess(CURPROC()->p_pid)) != NULL)
73947 +    {
73948 +       DBG(printk("rms_mycap :: found process %d mycap = %d\n", CURPROC()->p_pid, pdesc->mycap));
73949 +       if (copyout(&pdesc->mycap, index, sizeof(int)))
73950 +           status = EFAULT;
73951 +    }
73952 +    else
73953 +       status = ESRCH;
73954 +
73955 +    kmutex_unlock(&rms_lock);
73956 +    return(status);
73957 +}
73958 +
73959 +int rms_getcap(int index, ELAN_CAPABILITY *cap)
73960 +{
73961 +    struct proc_desc *pdesc;
73962 +    struct cap_desc *cdesc;
73963 +    int status = ESUCCESS;
73964 +    
73965 +    kmutex_lock(&rms_lock);
73966 +    if ((pdesc = findProcess(CURPROC()->p_pid)) != NULL)
73967 +    {
73968 +       for (cdesc = pdesc->program->caps; cdesc; cdesc = cdesc->next)
73969 +           if (cdesc->index == index)
73970 +               break;
73971 +       
73972 +       if (cdesc)
73973 +       {
73974 +           /* tell each process about its own context */
73975 +           cdesc->cap.cap_mycontext = pdesc->myctx;
73976 +           
73977 +           if (copyout(&cdesc->cap, cap, ELAN_CAP_SIZE(&cdesc->cap)))
73978 +               status = EFAULT;
73979 +           
73980 +           DBG(printk("rms_getcap    :: program %d index %d context %d<-->%d\n", pdesc->program->id, 
73981 +                      cdesc->index, cdesc->cap.cap_lowcontext, cdesc->cap.cap_highcontext));
73982 +       }
73983 +       else
73984 +           status = EINVAL;
73985 +    }
73986 +    else
73987 +       status = ESRCH;
73988 +    
73989 +    kmutex_unlock(&rms_lock);
73990 +    return(status);
73991 +}
73992 +
73993 +
73994 +static int
73995 +rms_fork_callback (struct PROC_STRUCT *curproc, struct PROC_STRUCT *child)
73996 +{
73997 +    struct prg_desc *program;
73998 +    struct proc_desc *parent;
73999 +    struct proc_desc *pdesc = NULL;
74000 +
74001 +    kmutex_lock(&rms_lock);
74002 +    
74003 +    DBG(printk("rms_fork_func :: phase is fork pid %d child %d\n", curproc->p_pid, child->p_pid));
74004 +
74005 +    /*
74006 +     * find the process that forked
74007 +     */
74008 +    if ((parent = findProcess(curproc->p_pid)) != NULL)
74009 +    {
74010 +       program = parent->program;
74011 +       
74012 +       DBG(printk("rms_fork_func :: program is %d flags %d\n", program->id, program->flags));
74013 +       
74014 +       /*
74015 +        * processes can be blocked in fork while prgsignal is in progress
74016 +        * so check to see if the PRG_KILLED flag is set
74017 +        */
74018 +       if (program->flags & PRG_KILLED)
74019 +           DBG(printk("rms_fork_func :: fork handler called after program killed\n"));
74020 +       else
74021 +       {
74022 +           /*
74023 +            * create a new process description and add to program
74024 +            */
74025 +           KMEM_ALLOC(pdesc, struct proc_desc *, sizeof(struct proc_desc), TRUE);
74026 +           if (pdesc)
74027 +           {
74028 +               pdesc->next = program->pdescs;
74029 +               program->pdescs = pdesc;
74030 +               pdesc->proc = child;
74031 +               pdesc->mycap = parent->mycap;
74032 +               pdesc->myctx = parent->myctx;
74033 +               pdesc->program = program;
74034 +               pdesc->vp = -1;              /* assigned by elaninitdone */
74035 +               program->nprocs++;
74036 +           }
74037 +           else
74038 +               printk("rms_fork_func :: memory allocation failed\n");
74039 +       }
74040 +    }
74041 +    else
74042 +       DBG(printk("rms_fork_func :: no program\n"));
74043 +    
74044 +    kmutex_unlock (&rms_lock);
74045 +
74046 +    return pdesc == NULL;
74047 +}
74048 +
74049 +static void
74050 +rms_exit_callback (struct PROC_STRUCT *curproc)
74051 +{
74052 +    struct prg_desc *program;
74053 +    struct proc_desc *pdesc, **pdescp, *p;
74054 +#ifdef DIGITAL_UNIX
74055 +    struct rusage ru;
74056 +#endif
74057 +    long maxrss;
74058 +
74059 +    kmutex_lock(&rms_lock);
74060 +    
74061 +    DBG(printk("rms_exit_func :: process %d exiting\n", curproc->p_pid));
74062 +
74063 +    /*
74064 +     * find the process that exited and accumulate 
74065 +     * resource usage in its parent program
74066 +     */
74067 +    for (program = programs, pdesc = 0; program && !pdesc; program = program->next)
74068 +    {
74069 +       pdescp = &program->pdescs;
74070 +       while ((pdesc = *pdescp) != NULL)
74071 +       {
74072 +           if (pdesc->proc->p_pid == curproc->p_pid)
74073 +           {
74074 +               /*
74075 +                * keep track of the resources used
74076 +                */
74077 +#if defined(SOLARIS)
74078 +               program->cutime += TICK_TO_MSEC(pdesc->proc->p_utime);
74079 +               program->cstime += TICK_TO_MSEC(pdesc->proc->p_stime);
74080 +               
74081 +#elif defined(LINUX)
74082 +#ifdef PROCESS_ACCT
74083 +       DBG(printk("rms_exit_func :: process %d exit utime %ld clks stime %ld clks\n",
74084 +                               pdesc->proc->p_pid,
74085 +                               TIMEVAL_TO_CT(&pdesc->proc->utime),                
74086 +                               TIMEVAL_TO_CT(&pdesc->proc->stime)));              
74087 +       program->cutime += TIMEVAL_TO_MSEC(&pdesc->proc->utime);      
74088 +       program->cstime += TIMEVAL_TO_MSEC(&pdesc->proc->stime);      
74089 +#elif LINUX_VERSION_CODE < KERNEL_VERSION(2, 5, 0)                           
74090 +               DBG(printk("rms_exit_func :: process %d exit utime %ld clks stime %ld clks\n", 
74091 +                          pdesc->proc->p_pid, pdesc->proc->times.tms_utime, 
74092 +                          pdesc->proc->times.tms_stime));
74093 +               
74094 +               program->cutime += CT_TO_MSEC(pdesc->proc->times.tms_utime);
74095 +               program->cstime += CT_TO_MSEC(pdesc->proc->times.tms_stime);
74096 +#else
74097 +               DBG(printk("rms_exit_func :: process %d exit utime %ld clks stime %ld clks\n", 
74098 +                          pdesc->proc->p_pid, pdesc->proc->utime, pdesc->proc->stime));
74099 +               
74100 +               program->cutime += CT_TO_MSEC(pdesc->proc->utime);
74101 +               program->cstime += CT_TO_MSEC(pdesc->proc->stime);
74102 +#endif
74103 +               program->majflt += pdesc->proc->maj_flt;
74104 +               maxrss = PROC_RSS(pdesc->proc) >> (20 - PAGE_SHIFT);
74105 +               
74106 +#elif defined(DIGITAL_UNIX)
74107 +               if (rms_getrusage(pdesc, &ru) == 0)
74108 +               {
74109 +                   program->cutime += TIMEVAL_TO_MSEC(&ru.ru_utime);
74110 +                   program->cstime += TIMEVAL_TO_MSEC(&ru.ru_stime);
74111 +                   program->majflt += ru.ru_majflt;
74112 +                   
74113 +                   /* convert maxrss to megabytes */
74114 +                   maxrss = ru.ru_maxrss >> 10;
74115 +               }
74116 +#endif
74117 +               
74118 +               /*
74119 +                * shared memory segment cleanup
74120 +                */
74121 +#if defined(DIGITAL_UNIX)
74122 +               rms_shmcleanup(-1);
74123 +#elif defined(LINUX)
74124 +               shm_cleanup();
74125 +#endif 
74126 +               
74127 +               /* 
74128 +                * remove process from program 
74129 +                */
74130 +               *pdescp = pdesc->next;
74131 +               KMEM_FREE(pdesc, sizeof(struct proc_desc));
74132 +               program->nprocs--;
74133 +               
74134 +               /*
74135 +                * update the memory high water mark for the program
74136 +                */
74137 +               for (p = program->pdescs; p; p = p->next)
74138 +               {
74139 +#if defined(DIGITAL_UNIX)
74140 +                   if (rms_getrusage(p, &ru) < 0)
74141 +                       continue;
74142 +                   
74143 +                   /* convert maxrss to megabytes */
74144 +                   maxrss += ru.ru_maxrss >> 10;
74145 +                   
74146 +#elif defined(LINUX)                   
74147 +                   maxrss += PROC_RSS(p->proc) >> (20 - PAGE_SHIFT);
74148 +#endif
74149 +               }
74150 +               if (maxrss > program->maxrss)
74151 +                   program->maxrss = maxrss;
74152 +               
74153 +               DBG(printk("rms_exit_func :: program %d procs %d mem %ld\n", program->id, program->nprocs, program->maxrss));
74154 +               
74155 +               /*
74156 +                * final update to the program if this is the last process
74157 +                */
74158 +               if (program->nprocs == 0)
74159 +               {
74160 +                   program->end_time = gettime();
74161 +                   program->flags &= ~PRG_RUNNING;
74162 +                   program->accum_atime += program->ncpus * (program->end_time - program->sched_time);
74163 +                   DBG(printk("rms_exit_func :: last process has gone\n"));
74164 +               }
74165 +               break;
74166 +           }
74167 +           else
74168 +               pdescp = &pdesc->next;
74169 +       }
74170 +    }
74171 +    kmutex_unlock  (&rms_lock);
74172 +}
74173 +
74174 +#if defined(LINUX)
74175 +static int
74176 +rms_ptrack_callback (void *arg, int phase, struct task_struct *child)
74177 +{
74178 +    switch (phase)
74179 +    {
74180 +    case PTRACK_PHASE_CLONE:
74181 +       if (rms_fork_callback (current, child))
74182 +           return PTRACK_DENIED;
74183 +       else
74184 +           return PTRACK_INNHERIT;
74185 +
74186 +    case PTRACK_PHASE_CLONE_FAIL:
74187 +       DBG(printk("rms_fork_func :: fork failed pid %d child %d\n", current->p_pid, child->p_pid));
74188 +       rms_exit_callback(child);
74189 +       break;
74190 +
74191 +    case PTRACK_PHASE_EXIT:
74192 +       rms_exit_callback(current);
74193 +       break;
74194 +    }
74195 +    return PTRACK_FINISHED;
74196 +}
74197 +
74198 +#else
74199 +
74200 +static void
74201 +rms_xa_callback (void *arg, int phase, void *ctask)
74202 +{
74203 +    switch (phase)
74204 +    {
74205 +    case XA_FORK:
74206 +       if (rms_fork_callback (CURPROC(), (struct PROC_STRUCT *)task_to_proc(ctask)))
74207 +           psignal(task_to_proc(ctask), SIGKILL);
74208 +       break;
74209 +    case XA_EXIT:
74210 +       rms_exit_callback (CURPROC());
74211 +       break;
74212 +    }
74213 +}
74214 +
74215 +#endif
74216 +
74217 +#ifdef DIGITAL_UNIX
74218 +
74219 +/*
74220 + * NB: These functions will only work on steelos.
74221 + */
74222 +
74223 +/*
74224 + * rms_setcorepath
74225 + *
74226 + * set a path at which to dump core if the task aborts  
74227 + *
74228 + * enhanced core file names must be enabled for this to work
74229 + */
74230 +int rms_setcorepath(char *corepath)
74231 +{
74232 +    int    length;
74233 +    char  *path;
74234 +    int    status; 
74235 +    struct proc_desc *pdesc;
74236 +    
74237 +    /* 
74238 +     * access restricted - we don't want users moving
74239 +     * their corepath and generating a huge I/O load
74240 +     */
74241 +    if (CURUID())
74242 +       return(EACCES);
74243 +    
74244 +    if (!(pdesc = findProcess(CURPROC()->p_pid)))
74245 +       return(ESRCH);
74246 +    
74247 +    if (pdesc->program->corepath)
74248 +       return(EEXIST);
74249 +    
74250 +    KMEM_ALLOC(path, char *, MAXCOREPATHLEN + 1, TRUE);
74251 +    if (path == 0)
74252 +       return(ENOMEM);
74253 +    
74254 +    if (copyinstr(corepath, path, MAXCOREPATHLEN, &length))
74255 +       return(EFAULT);
74256 +    
74257 +    path[length] = 0;
74258 +    status = add_corepath(path);
74259 +    
74260 +    DBG(printk("rms_setcorepa :: id %d corepath %s status %d\n", pdesc->program->id, path, status));
74261 +    
74262 +    if (status == ESUCCESS)
74263 +       pdesc->program->corepath = path;
74264 +    else
74265 +       KMEM_FREE(path, MAXCOREPATHLEN + 1);
74266 +    
74267 +    return(status);
74268 +}
74269 +
74270 +static int find_corepath(pid_t pid, char *path, int len)
74271 +{
74272 +    struct proc *procp;
74273 +    struct utask *utask;
74274 +    int status = ESUCCESS;
74275 +
74276 +    procp = pfind(pid);
74277 +    if (procp == NULL)
74278 +        return(ENOENT);
74279 +    
74280 +    utask = proc_to_utask(procp);
74281 +    
74282 +    if (utask->uu_coredir)
74283 +        bcopy(utask->uu_coredir,path,len);
74284 +    else
74285 +        status = ENOENT;
74286 +    
74287 +    /* pfind takes out a reference */
74288 +    P_UNREF(procp);
74289 +
74290 +    return(status);
74291 +}
74292 +
74293 +int rms_getcorepath(pid_t pid, char *corepath, int maxlen)
74294 +{
74295 +    char src[MAXCOREPATHLEN];
74296 +    int len;
74297 +    int status;
74298 +    
74299 +    if (maxlen < 2)
74300 +       return(EINVAL);
74301 +    
74302 +    len = MIN(maxlen, MAXCOREPATHLEN);
74303 +    
74304 +    status = find_corepath(pid, src, len);
74305 +    
74306 +    if (status == ESUCCESS)
74307 +        len = strlen(src)+1;
74308 +    else if (status == ENOENT) 
74309 +    {
74310 +       len = 2;
74311 +       src[0] = '.';
74312 +        src[1] = '\0';
74313 +        status = ESUCCESS;
74314 +    }
74315 +    
74316 +    if (copyout(src, corepath, len))
74317 +       return(EFAULT);
74318 +    
74319 +    return(status);
74320 +}
74321 +
74322 +#endif
74323 +
74324 +/*
74325 + * rms_elaninitdone - mark a process as having successfully completed elan initialisation
74326 + */
74327 +int rms_elaninitdone(int vp)
74328 +{
74329 +    int status = ESUCCESS;
74330 +    struct proc_desc *pdesc;
74331 +    
74332 +    DBG(printk("rms_elaninit  :: process %d vp %d\n", CURPROC()->p_pid, vp));
74333 +    
74334 +    kmutex_lock(&rms_lock);
74335 +    if ((pdesc = findProcess(CURPROC()->p_pid)) != NULL)
74336 +       pdesc->vp = vp;
74337 +    else
74338 +       status = ESRCH;
74339 +    kmutex_unlock(&rms_lock);
74340 +    return(status);
74341 +}
74342 +
74343 +
74344 +/*
74345 + * rms_prgelanpids - return the ids of processes that have completed elan initialisation
74346 + */
74347 +int rms_prgelanpids(int id, int maxpids, int *vps, pid_t *pids, int *npids)
74348 +{
74349 +    struct prg_desc *program;
74350 +    struct proc_desc *pdesc;
74351 +    pid_t *pidbuf;
74352 +    int status = ESUCCESS, count = 0, *vpbuf;
74353 +    
74354 +    DBG(printk("rms_elanpids  :: process %d id %d\n", CURPROC()->p_pid, id));
74355 +    
74356 +    kmutex_lock(&rms_lock);
74357 +    
74358 +    if ((program = findProgram(id)) != NULL)
74359 +    {
74360 +       if (program->nprocs > 0)
74361 +       {
74362 +           KMEM_ALLOC(pidbuf, pid_t *, program->nprocs * sizeof(pid_t), TRUE);
74363 +           KMEM_ALLOC(vpbuf, int *, program->nprocs * sizeof(int), TRUE);
74364 +           if (pidbuf && vpbuf)
74365 +           {
74366 +               for (pdesc = program->pdescs; pdesc; pdesc = pdesc->next)
74367 +                   if (pdesc->vp >= 0)
74368 +                   {
74369 +                       pidbuf[count] = pdesc->proc->p_pid;
74370 +                       vpbuf[count] = pdesc->vp;
74371 +                       count++;
74372 +                   }
74373 +           
74374 +               if (count > 0 && (copyout(pidbuf, pids, sizeof(pid_t) * MIN(count, maxpids)) ||
74375 +                                 copyout(vpbuf, vps, sizeof(int) * MIN(count, maxpids))))
74376 +                   status = EFAULT;
74377 +               
74378 +               KMEM_FREE(pidbuf, program->nprocs * sizeof(pid_t));
74379 +               KMEM_FREE(vpbuf, program->nprocs * sizeof(int));
74380 +           }
74381 +           else
74382 +               status = ENOMEM;
74383 +       }
74384 +
74385 +       if (copyout(&count, npids, sizeof(int)))
74386 +           status = EFAULT;
74387 +    }
74388 +    else
74389 +       status = ESRCH;
74390 +
74391 +    kmutex_unlock(&rms_lock);
74392 +    
74393 +    return(status);
74394 +
74395 +}
74396 +
74397 +int rms_setpset(int psid)
74398 +{
74399 +    struct prg_desc *program;
74400 +    struct proc_desc *pdesc;
74401 +    int status = ESUCCESS;
74402 +
74403 +    if (CURUID())
74404 +       return(EACCES);
74405 +
74406 +    kmutex_lock(&rms_lock);
74407 +    
74408 +    if ((pdesc = findProcess(CURPROC()->p_pid)) != NULL)
74409 +    {
74410 +       program = pdesc->program;
74411 +       program->psid = psid;
74412 +    }
74413 +    else
74414 +       status = ESRCH;
74415 +
74416 +    kmutex_unlock(&rms_lock);
74417 +    return(status);
74418 +}
74419 +
74420 +
74421 +int rms_getpset(int id, int *psid)
74422 +{
74423 +    struct prg_desc *program;
74424 +    int status = ESUCCESS;
74425 +    
74426 +    kmutex_lock(&rms_lock);
74427 +    if ((program = findProgram(id)) != NULL)
74428 +    {
74429 +       if (copyout(&program->psid, psid, sizeof(int)))
74430 +           status = EFAULT;
74431 +    }
74432 +    else
74433 +       status = ESRCH;
74434 +    
74435 +    kmutex_unlock(&rms_lock);
74436 +    return(status);
74437 +}
74438 +
74439 +int
74440 +rms_setelanstats(int id, uint64_t ebytes, uint64_t exfers)
74441 +{
74442 +    struct prg_desc *program;
74443 +    int status = ESUCCESS;
74444 +    
74445 +    DBG(printk("rms_setelanst :: process %d id %d\n", CURPROC()->p_pid, id));
74446 +
74447 +    kmutex_lock(&rms_lock);
74448 +    if ((program = findProgram(id)) != NULL)
74449 +    {
74450 +       if (CURUID() == 0 || CURUID() == program->uid)
74451 +       {
74452 +           program->ebytes = ebytes;
74453 +           program->exfers = exfers;
74454 +       }
74455 +       else
74456 +           status = EACCES;
74457 +    }
74458 +    else
74459 +       status = ESRCH;
74460 +    
74461 +    kmutex_unlock(&rms_lock);
74462 +    return(status);
74463 +}
74464 +
74465 +rms_modversion()
74466 +{
74467 +    return(RMS_MODVERSION);
74468 +}
74469 +
74470 +
74471 +/*
74472 + * Local variables:
74473 + * c-file-style: "stroustrup"
74474 + * End:
74475 + */
74476 +
74477 +
74478 +
74479 +
74480 +
74481 +
74482 +
74483 Index: linux-2.4.21/drivers/net/qsnet/rms/rms_kern_Linux.c
74484 ===================================================================
74485 --- linux-2.4.21.orig/drivers/net/qsnet/rms/rms_kern_Linux.c    2004-02-23 16:02:56.000000000 -0500
74486 +++ linux-2.4.21/drivers/net/qsnet/rms/rms_kern_Linux.c 2005-06-01 23:12:54.703423296 -0400
74487 @@ -0,0 +1,430 @@
74488 +/*
74489 + *    Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd.
74490 + *
74491 + *    For licensing information please see the supplied COPYING file
74492 + *
74493 + */
74494 +
74495 +#ident "$Id: rms_kern_Linux.c,v 1.20 2004/05/14 08:55:57 duncan Exp $"
74496 +/*      $Source: /cvs/master/quadrics/rmsmod/rms_kern_Linux.c,v $*/
74497 +
74498 +#include <qsnet/kernel.h>
74499 +
74500 +#include <linux/sysctl.h>
74501 +#include <linux/init.h>
74502 +#include <linux/module.h>
74503 +#include <linux/proc_fs.h>
74504 +
74505 +#include <rms/rmscall.h>
74506 +#include <rms/rmsio.h>
74507 +
74508 +MODULE_AUTHOR("Quadrics Ltd");
74509 +MODULE_DESCRIPTION("RMS support module");
74510 +MODULE_LICENSE("GPL");
74511 +
74512 +int rms_debug = 0;
74513 +
74514 +ctl_table rms_table[] = {
74515 +    {
74516 +       .ctl_name = 1,
74517 +       .procname = "rms_debug",
74518 +       .data     = &rms_debug,
74519 +       .maxlen   = sizeof(int),
74520 +       .mode     = 0644,
74521 +       .child    = NULL,
74522 +       .proc_handler = &proc_dointvec,
74523 +    },
74524 +    {0}
74525 +};
74526 +
74527 +ctl_table rms_root_table[] = {
74528 +    {
74529 +       .ctl_name = CTL_DEBUG,
74530 +       .procname = "rms",
74531 +       .data     = NULL,
74532 +       .maxlen   = 0,
74533 +       .mode     = 0555,
74534 +       .child    = rms_table,
74535 +    },
74536 +    {0}
74537 +};
74538 +
74539 +static struct ctl_table_header *rms_sysctl_header;
74540 +
74541 +static int rms_open (struct inode *ino, struct file *fp);
74542 +static int rms_release (struct inode *ino, struct file *fp);
74543 +static int rms_ioctl (struct inode *inode, struct file *fp, unsigned int cmd, unsigned long arg);
74544 +
74545 +#if defined(CONFIG_PPC64) || defined(CONFIG_SPARC64) || defined(CONFIG_X86_64)
74546 +static int
74547 +rms_ioctl32_cmds[] =
74548 +{
74549 +    RMSIO_GETPRGID32,
74550 +    RMSIO_GETCAP32
74551 +};
74552 +
74553 +static int      rms_ioctl32 (unsigned int fd, unsigned int cmd, 
74554 +                            unsigned long arg, struct file *file);
74555 +#endif
74556 +
74557 +static struct file_operations rms_fops =
74558 +{
74559 +    .owner   = THIS_MODULE,
74560 +    .ioctl   = rms_ioctl,
74561 +    .open    = rms_open,
74562 +    .release = rms_release,
74563 +};
74564 +
74565 +struct proc_dir_entry *rms_procfs_programs;
74566 +static struct proc_dir_entry *rms_procfs_root;
74567 +
74568 +int version_callback(char* page, char** start, off_t off, int count, int* eof, void* data)
74569 +{
74570 +    return(sprintf(page, "$Id: rms_kern_Linux.c,v 1.20 2004/05/14 08:55:57 duncan Exp $\n"));
74571 +}
74572 +
74573 +static int __init rms_start(void)
74574 +{
74575 +    struct proc_dir_entry *p;
74576 +    int res;
74577 +
74578 +    if ((rms_sysctl_header = register_sysctl_table(rms_root_table, 1)) == 0)
74579 +    {
74580 +       printk ("rmsmod: failed to register sysctl table\n");
74581 +       return (-ENXIO);
74582 +    }
74583 +    
74584 +    if ((rms_procfs_root = proc_mkdir("rms",  NULL)) == NULL ||
74585 +       (rms_procfs_programs = proc_mkdir("programs",  rms_procfs_root)) == NULL ||
74586 +       (p = create_proc_entry ("control", S_IRUGO, rms_procfs_root)) == NULL)
74587 +    {
74588 +       unregister_sysctl_table (rms_sysctl_header);
74589 +       printk ("rmsmod: failed to register /proc/rms\n");
74590 +       return (-ENXIO);
74591 +    }
74592 +    p->proc_fops = &rms_fops;
74593 +    p->owner     = THIS_MODULE;
74594 +    p->data      = NULL;
74595 +
74596 +    if ((p = create_proc_entry ("version", S_IRUGO, rms_procfs_root)) != NULL)
74597 +    {
74598 +       p->owner = THIS_MODULE;
74599 +       p->data = NULL;
74600 +       p->read_proc = version_callback;
74601 +    }
74602 +
74603 +    if ((res = rms_init()) != ESUCCESS)
74604 +    {
74605 +       remove_proc_entry ("programs", rms_procfs_root);
74606 +       remove_proc_entry ("control", rms_procfs_root);
74607 +       remove_proc_entry ("rms", NULL);
74608 +       unregister_sysctl_table (rms_sysctl_header);
74609 +       return (-res);
74610 +    }
74611 +
74612 +#if defined(CONFIG_PPC64) || defined(CONFIG_SPARC64) || defined(CONFIG_X86_64)
74613 +    lock_kernel();
74614 +    {
74615 +       extern int register_ioctl32_conversion(unsigned int cmd, int (*handler)(unsigned int, unsigned int, unsigned long, struct file *));
74616 +       register int i;
74617 +       for (i = 0; i < sizeof (rms_ioctl32_cmds)/sizeof(rms_ioctl32_cmds[0]); i++)
74618 +           register_ioctl32_conversion (rms_ioctl32_cmds[i], rms_ioctl32);
74619 +    }
74620 +    unlock_kernel();
74621 +#endif
74622 +    return (0);
74623 +}
74624 +
74625 +static void __exit rms_exit(void)
74626 +{
74627 +    rms_fini();
74628 +
74629 +#if defined(CONFIG_PPC64) || defined(CONFIG_SPARC64) || defined(CONFIG_X86_64)
74630 +    lock_kernel();
74631 +    {
74632 +       extern void unregister_ioctl32_conversion(unsigned int cmd);
74633 +       register int i;
74634 +
74635 +       for (i = 0; i < sizeof (rms_ioctl32_cmds)/sizeof(rms_ioctl32_cmds[0]); i++)
74636 +           unregister_ioctl32_conversion (rms_ioctl32_cmds[i]);
74637 +    }
74638 +    unlock_kernel();
74639 +#endif
74640 +
74641 +    remove_proc_entry ("version", rms_procfs_root);
74642 +    remove_proc_entry ("programs", rms_procfs_root);
74643 +    remove_proc_entry ("control", rms_procfs_root);
74644 +    remove_proc_entry ("rms", NULL);
74645 +    unregister_sysctl_table(rms_sysctl_header);
74646 +}
74647 +
74648 +/* Declare the module init and exit functions */
74649 +module_init(rms_start);
74650 +module_exit(rms_exit);
74651 +
74652 +static int
74653 +rms_open (struct inode *inode, struct file *fp)
74654 +{
74655 +    MOD_INC_USE_COUNT;
74656 +    fp->private_data = NULL;
74657 +
74658 +    return (0);
74659 +}
74660 +
74661 +static int
74662 +rms_release (struct inode *inode, struct file *fp)
74663 +{
74664 +    MOD_DEC_USE_COUNT;
74665 +    return (0);
74666 +}
74667 +
74668 +static int 
74669 +rms_ioctl(struct inode *inode, struct file *fp, unsigned int cmd, unsigned long arg)
74670 +{
74671 +    int res;
74672 +
74673 +    switch (cmd) 
74674 +    {
74675 +/* no corepath support in Linux yet */
74676 +#if 0
74677 +    case RMSIO_SETCOREPATH:
74678 +       res = rms_setcorepath((caddr_t)arg);
74679 +       break;
74680 +       
74681 +    case RMSIO_GETCOREPATH:
74682 +    {
74683 +       RMSIO_GETCOREPATH_STRUCT args;
74684 +
74685 +       if (copy_from_user (&args, (void *) arg, sizeof (args)))
74686 +           return (-EFAULT);
74687 +
74688 +       res = rms_getcorepath(args.pid, args.corepath, args.maxlen);
74689 +       break;
74690 +    }
74691 +#endif
74692 +       
74693 +    case RMSIO_PRGCREATE:
74694 +    {
74695 +       RMSIO_PRGCREATE_STRUCT args;
74696 +
74697 +       if (copy_from_user (&args, (void *) arg, sizeof (args)))
74698 +           return (-EFAULT);
74699 +
74700 +       res = rms_prgcreate(args.id, args.uid, args.cpus);
74701 +       break;
74702 +    }
74703 +
74704 +    case RMSIO_PRGDESTROY:
74705 +       res = rms_prgdestroy(arg);
74706 +       break;
74707 +       
74708 +    case RMSIO_PRGIDS:
74709 +    {
74710 +       RMSIO_PRGIDS_STRUCT args;
74711 +       
74712 +       if (copy_from_user (&args, (void *) arg, sizeof (args)))
74713 +           return (-EFAULT);
74714 +
74715 +       res = rms_prgids(args.maxids, args.prgids, args.nprgs);
74716 +       break;
74717 +    }
74718 +
74719 +    case RMSIO_PRGINFO:
74720 +    {
74721 +       RMSIO_PRGINFO_STRUCT args;
74722 +       
74723 +       if (copy_from_user (&args, (void *) arg, sizeof (args)))
74724 +           return (-EFAULT);
74725 +
74726 +       res = rms_prginfo(args.id, args.maxpids, args.pids, args.nprocs);
74727 +       break;
74728 +    }
74729 +       
74730 +    case RMSIO_PRGSIGNAL:
74731 +    {
74732 +       RMSIO_PRGSIGNAL_STRUCT args;
74733 +
74734 +       if (copy_from_user (&args, (void *) arg, sizeof (args)))
74735 +           return (-EFAULT);
74736 +
74737 +       res = rms_prgsignal(args.id, args.signo);
74738 +       break;
74739 +    }
74740 +       
74741 +    case RMSIO_PRGADDCAP:
74742 +    {
74743 +       RMSIO_PRGADDCAP_STRUCT args;
74744 +
74745 +       if (copy_from_user (&args, (void *) arg, sizeof (args)))
74746 +           return (-EFAULT);
74747 +
74748 +       res = rms_prgaddcap(args.id, args.index, args.cap);
74749 +       break;
74750 +    }
74751 +
74752 +    case RMSIO_SETCAP:
74753 +    {
74754 +       RMSIO_SETCAP_STRUCT args;
74755 +
74756 +       if (copy_from_user (&args, (void *) arg, sizeof (args)))
74757 +           return (-EFAULT);
74758 +
74759 +       res = rms_setcap(args.index, args.ctx);
74760 +       break;
74761 +    }
74762 +       
74763 +    case RMSIO_NCAPS:
74764 +       res = rms_ncaps((int *)arg);
74765 +       break;
74766 +       
74767 +    case RMSIO_GETPRGID:
74768 +    {
74769 +       RMSIO_GETPRGID_STRUCT args;
74770 +
74771 +       if (copy_from_user (&args, (void *) arg, sizeof (args)))
74772 +           return (-EFAULT);
74773 +
74774 +       res = rms_getprgid(args.pid, args.id);
74775 +       break;
74776 +    }
74777 +
74778 +    case RMSIO_GETMYCAP:
74779 +       res = rms_mycap((int *)arg);
74780 +       break;
74781 +       
74782 +    case RMSIO_GETCAP:
74783 +    {
74784 +       RMSIO_GETCAP_STRUCT args;
74785 +
74786 +       if (copy_from_user (&args, (void *) arg, sizeof (args)))
74787 +           return (-EFAULT);
74788 +
74789 +       res = rms_getcap(args.index, args.cap);
74790 +       break;
74791 +    }
74792 +
74793 +    case RMSIO_PRGGETSTATS:
74794 +    {
74795 +       RMSIO_PRGGETSTATS_STRUCT args;
74796 +
74797 +       if (copy_from_user (&args, (void *) arg, sizeof (args)))
74798 +           return (-EFAULT);
74799 +
74800 +       res = rms_prggetoldstats(args.id, args.stats);
74801 +       break;
74802 +    }
74803 +
74804 +    case RMSIO_PRGGETSTATS2:
74805 +    {
74806 +       RMSIO_PRGGETSTATS2_STRUCT args;
74807 +
74808 +       if (copy_from_user (&args, (void *) arg, sizeof (args)))
74809 +           return (-EFAULT);
74810 +
74811 +       res = rms_prggetstats(args.id, args.stats);
74812 +       break;
74813 +    }
74814 +
74815 +    case RMSIO_PRGSUSPEND:
74816 +       res = rms_prgsuspend(arg);
74817 +       break;
74818 +       
74819 +    case RMSIO_PRGRESUME:
74820 +       res = rms_prgresume(arg);
74821 +       break;
74822 +
74823 +    case RMSIO_ELANINITDONE:
74824 +       res = rms_elaninitdone(arg);
74825 +       break;
74826 +
74827 +    case RMSIO_PRGELANPIDS:
74828 +    {
74829 +       RMSIO_PRGELANPIDS_STRUCT args;
74830 +
74831 +       if (copy_from_user (&args, (void *) arg, sizeof (args)))
74832 +           return (-EFAULT);
74833 +
74834 +       res = rms_prgelanpids(args.id, args.maxpids, args.vps, args.pids, args.npids);
74835 +       break;
74836 +    }
74837 +
74838 +    case RMSIO_SETELANSTATS:
74839 +    {
74840 +       RMSIO_SETELANSTATS_STRUCT args;
74841 +       elanstats_t estats;
74842 +
74843 +       if (copy_from_user(&args, (void *)arg, sizeof(args)) ||
74844 +           copy_from_user(&estats, (void *)args.estats, sizeof(estats)))
74845 +           return(-EFAULT);
74846 +       
74847 +       res = rms_setelanstats(args.id, estats.ebytes, estats.exfers);
74848 +       break;
74849 +    }
74850 +
74851 +    case RMSIO_MODVERSION:
74852 +    {
74853 +       RMSIO_MODVERSION_STRUCT args;
74854 +       int version = rms_modversion();
74855 +       
74856 +       if (copy_from_user (&args, (void *)arg, sizeof (args)))
74857 +           return (-EFAULT);
74858 +       
74859 +       if (copyout(&version, args.version, sizeof(int)))
74860 +           res = EFAULT;
74861 +       else
74862 +           res = ESUCCESS;
74863 +
74864 +       break;
74865 +    }
74866 +
74867 +    default:
74868 +       res = EINVAL;
74869 +       break;
74870 +    }
74871 +
74872 +    return ((res == 0) ? 0 : -res);
74873 +}
74874 +
74875 +#if defined(CONFIG_PPC64) || defined(CONFIG_SPARC64) || defined(CONFIG_X86_64)
74876 +static int
74877 +rms_ioctl32 (unsigned int fd, unsigned int cmd, unsigned long arg, struct file *file)
74878 +{
74879 +    int res;
74880 +
74881 +    switch (cmd)
74882 +    {
74883 +    case RMSIO_GETPRGID32:
74884 +    {
74885 +       RMSIO_GETPRGID_STRUCT32 args;
74886 +
74887 +       if (copy_from_user (&args, (void *) arg, sizeof (args)))
74888 +           return (-EFAULT);
74889 +
74890 +       res = rms_getprgid(args.pid, (int *)(unsigned long) args.idptr);
74891 +       break;
74892 +    }
74893 +       
74894 +    case RMSIO_GETCAP32:
74895 +    {
74896 +       RMSIO_GETCAP_STRUCT32 args;
74897 +
74898 +       if (copy_from_user (&args, (void *) arg, sizeof (args)))
74899 +           return (-EFAULT);
74900 +
74901 +       res = rms_getcap(args.index, (ELAN_CAPABILITY *)(unsigned long) args.capptr);
74902 +       break;
74903 +    }
74904 +
74905 +    default:
74906 +       return (sys_ioctl (fd, cmd, arg));
74907 +    }
74908 +
74909 +    return ((res == 0) ? 0 : -res);
74910 +}
74911 +#endif
74912 +
74913 +/*
74914 + * Local variables:
74915 + * c-file-style: "stroustrup"
74916 + * End:
74917 + */
74918 Index: linux-2.4.21/fs/exec.c
74919 ===================================================================
74920 --- linux-2.4.21.orig/fs/exec.c 2005-06-01 22:58:09.044063984 -0400
74921 +++ linux-2.4.21/fs/exec.c      2005-06-01 23:12:54.704423144 -0400
74922 @@ -51,6 +51,7 @@
74923  #ifdef CONFIG_KMOD
74924  #include <linux/kmod.h>
74925  #endif
74926 +#include <linux/ptrack.h>
74927  
74928  int core_uses_pid;
74929  char core_pattern[65] = "core";
74930 @@ -1125,6 +1126,10 @@
74931         if (retval < 0) 
74932                 goto out; 
74933  
74934 +
74935 +       /* Notify any ptrack callbacks of the process exec */
74936 +       ptrack_call_callbacks (PTRACK_PHASE_EXEC, NULL);
74937 +       
74938         retval = search_binary_handler(&bprm,regs);
74939         if (retval >= 0)
74940                 /* execve success */
74941 Index: linux-2.4.21/include/elan/bitmap.h
74942 ===================================================================
74943 --- linux-2.4.21.orig/include/elan/bitmap.h     2004-02-23 16:02:56.000000000 -0500
74944 +++ linux-2.4.21/include/elan/bitmap.h  2005-06-01 23:12:54.704423144 -0400
74945 @@ -0,0 +1,74 @@
74946 +/*
74947 + *    Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd.
74948 + *
74949 + *    For licensing information please see the supplied COPYING file
74950 + *
74951 + */
74952 +
74953 +#ifndef __QSNET_BITMAP_H
74954 +#define __QSNET_BITMAP_H
74955 +
74956 +#ident "$Id: bitmap.h,v 1.5 2004/01/20 17:32:15 david Exp $"
74957 +/*      $Source: /cvs/master/quadrics/elanmod/modsrc/bitmap.h,v $ */
74958 +
74959 +typedef unsigned int                   bitmap_t;
74960 +
74961 +#define BT_NBIPUL                      32                      /* n bits per bitmap_t */
74962 +#define BT_ULSHIFT                     5                       /* log 2 BT_NBIPUL to extract word index */
74963 +#define BT_ULMASK                      0x1f                    /* to extract bit index */
74964 +
74965 +#define BT_WIM(bitmap,bitindex)                ((bitmap)[(bitindex) >> BT_ULSHIFT])            /* word in map */
74966 +#define BT_BIW(bitindex)               (1 << ((bitindex) & BT_ULMASK))         /* bit in word */
74967 +
74968 +/* BT_BITOUL -- n bits to n words */
74969 +#define BT_BITOUL(nbits)               (((nbits) + BT_NBIPUL -1) / BT_NBIPUL)
74970 +
74971 +#define BT_TEST(bitmap,bitindex)       ((BT_WIM((bitmap), (bitindex)) & BT_BIW(bitindex)) ? 1 : 0)
74972 +#define BT_SET(bitmap,bitindex)                do { BT_WIM((bitmap), (bitindex)) |= BT_BIW(bitindex); } while (0)
74973 +#define BT_CLEAR(bitmap,bitindex)      do { BT_WIM((bitmap), (bitindex)) &= ~BT_BIW(bitindex); } while (0)
74974 +
74975 +/* return first free bit in the bitmap, or -1 for failure */
74976 +extern int  bt_freebit (bitmap_t *bitmap, int nbits);
74977 +
74978 +/* return the index of the lowest set bit in the bitmap or -1 for failure */
74979 +extern int bt_lowbit (bitmap_t *bitmap, int nbits);
74980 +
74981 +/* return the index of the next set/clear bit in the bitmap or -1 for failure */
74982 +extern int bt_nextbit (bitmap_t *bitmap, int nbits, int last, int isset);
74983 +
74984 +/* copy/zero/fill/compare a bit map */
74985 +extern void bt_copy (bitmap_t *a, bitmap_t *b, int nbits);
74986 +extern void bt_zero (bitmap_t *a, int nbits);
74987 +extern void bt_fill (bitmap_t *a, int nbits);
74988 +extern int  bt_cmp (bitmap_t *a, bitmap_t *b, int nbits);
74989 +
74990 +/* intersect bitmap 'a' with bitmap 'b' and return in 'a' */
74991 +extern void bt_intersect (bitmap_t *a, bitmap_t *b, int nbits);
74992 +
74993 +/* remove/add bitmap 'b' from bitmap 'a' */
74994 +extern void bt_remove (bitmap_t *a, bitmap_t *b, int nbits);
74995 +extern void bt_add (bitmap_t *a, bitmap_t *b, int nbits);
74996 +
74997 +/* check whether bitmap 'a' spans bitmap 'b' */
74998 +extern int  bt_spans (bitmap_t *a, bitmap_t *b, int nbits);
74999 +
75000 +/* copy [base,base+nbits-1] from 'a' to 'b' */
75001 +extern void bt_subset (bitmap_t *a, bitmap_t *b, int base, int nbits);
75002 +
75003 +/* find bits clear in 'a' and set in 'b', put result in 'c' */
75004 +extern void bt_up (bitmap_t *a, bitmap_t *b, bitmap_t *c, int nbits);
75005 +
75006 +/* find bits set in 'a' and clear in 'b', put result in 'c' */
75007 +extern void bt_down (bitmap_t *a, bitmap_t *b, bitmap_t *c, int nbits);
75008 +
75009 +/* return number of bits set in bitmap */
75010 +extern int  bt_nbits (bitmap_t *a, int nbits);
75011 +
75012 +
75013 +#endif /* __QSNET_BITMAP_H */
75014 +
75015 +/*
75016 + * Local variables:
75017 + * c-file-style: "linux"
75018 + * End:
75019 + */
75020 Index: linux-2.4.21/include/elan/capability.h
75021 ===================================================================
75022 --- linux-2.4.21.orig/include/elan/capability.h 2004-02-23 16:02:56.000000000 -0500
75023 +++ linux-2.4.21/include/elan/capability.h      2005-06-01 23:12:54.705422992 -0400
75024 @@ -0,0 +1,197 @@
75025 +/*
75026 + *    Copyright (c) 2003 by Quadrics Limited.
75027 + * 
75028 + *    For licensing information please see the supplied COPYING file
75029 + *
75030 + */
75031 +
75032 +#ident "@(#)$Id: capability.h,v 1.16 2004/07/20 10:15:33 david Exp $"
75033 +/*      $Source: /cvs/master/quadrics/elanmod/modsrc/capability.h,v $*/
75034 +
75035 +#ifndef __ELAN_CAPABILITY_H
75036 +#define __ELAN_CAPABILITY_H
75037 +
75038 +#include <elan/bitmap.h>
75039 +
75040 +/* Maximum number of rails */
75041 +#define ELAN_MAX_RAILS          (31)
75042 +/* Maximum number of virtual processes we support */
75043 +#define ELAN_MAX_VPS           (16384)
75044 +
75045 +/* Number of words in a bitmap capability */
75046 +#define ELAN_BITMAPSIZE                BT_BITOUL(ELAN_MAX_VPS)
75047 +
75048 +/* Guaranteed invalid values */
75049 +#define ELAN_INVALID_PROCESS   (0x7fffffff)            /* A GUARANTEED invalid process # */
75050 +#define ELAN_INVALID_NODE      (0xFFFF)
75051 +#define ELAN_INVALID_CONTEXT   (0xFFFF)
75052 +
75053 +/* Number of values in a user key */
75054 +#define ELAN_USERKEY_ENTRIES   4
75055 +
75056 +typedef void * ELAN_CAP_OWNER;
75057 +
75058 +/* 
75059 + * When used in userspace this is relative to the base of
75060 + * the capabality but is an absolute location for kernel space.
75061 + */
75062 +typedef struct elan_location
75063 +{
75064 +       unsigned short loc_node;
75065 +       unsigned short loc_context;
75066 +} ELAN_LOCATION;
75067 +
75068 +typedef struct elan_userkey
75069 +{
75070 +       unsigned        key_values[ELAN_USERKEY_ENTRIES];
75071 +} ELAN_USERKEY;
75072 +
75073 +typedef struct elan_capability
75074 +{
75075 +       ELAN_USERKEY    cap_userkey;                            /* User defined protection */
75076 +
75077 +       int             cap_version;                            /* Version number */
75078 +       unsigned short  cap_type;                               /* Capability Type */
75079 +       unsigned short  cap_spare;                              /* spare was cap_elan_type */
75080 +
75081 +       int             cap_lowcontext;                         /* low context number in block */
75082 +       int             cap_highcontext;                        /* high context number in block */
75083 +       int             cap_mycontext;                          /* my context number */
75084 +    
75085 +       int             cap_lownode;                            /* low elan id of group */
75086 +       int             cap_highnode;                           /* high elan id of group */
75087 +
75088 +       unsigned int    cap_railmask;                           /* which rails this capability is valid for */
75089 +       
75090 +       bitmap_t        cap_bitmap[ELAN_BITMAPSIZE];            /* Bitmap of process to processor translation */
75091 +} ELAN_CAPABILITY;
75092 +
75093 +#define ELAN_CAP_UNINITIALISED         (-1)
75094 +
75095 +#define ELAN_CAP_VERSION_NUMBER                (0x00010002)
75096 +
75097 +#define ELAN_CAP_NUM_NODES(cap)                ((cap)->cap_highnode - (cap)->cap_lownode + 1)
75098 +#define ELAN_CAP_NUM_CONTEXTS(cap)     ((cap)->cap_highcontext - (cap)->cap_lowcontext + 1)
75099 +
75100 +/* using or defining our own MIN/MAX had confilicts with dunix so we define ELAN_ ones */
75101 +#define ELAN_MIN(a,b)  ((a) > (b) ? (b) : (a))
75102 +#define ELAN_MAX(a,b)  ((a) > (b) ? (a) : (b))
75103 +#define ELAN_CAP_BITMAPSIZE(cap)       (ELAN_MAX (ELAN_MIN (ELAN_CAP_NUM_NODES(cap) * ELAN_CAP_NUM_CONTEXTS(cap), ELAN_MAX_VPS), 0))
75104 +
75105 +#define ELAN_CAP_SIZE(cap)             (offsetof (ELAN_CAPABILITY, cap_bitmap[BT_BITOUL(ELAN_CAP_BITMAPSIZE(cap))]))
75106 +#define ELAN_CAP_ENTRIES(cap)           (((cap)->cap_type & ELAN_CAP_TYPE_NO_BITMAP) ? ELAN_CAP_BITMAPSIZE((cap)) : bt_nbits((cap)->cap_bitmap, ELAN_CAP_BITMAPSIZE((cap))))
75107 +
75108 +#define ELAN_CAP_IS_RAIL_SET(cap,rail)  ((cap)->cap_railmask & (1<<rail))
75109 +
75110 +#define ELAN_CAP_KEY_MATCH(cap1,cap2)  ((cap1)->cap_userkey.key_values[0] == (cap2)->cap_userkey.key_values[0] && \
75111 +                                        (cap1)->cap_userkey.key_values[1] == (cap2)->cap_userkey.key_values[1] && \
75112 +                                        (cap1)->cap_userkey.key_values[2] == (cap2)->cap_userkey.key_values[2] && \
75113 +                                        (cap1)->cap_userkey.key_values[3] == (cap2)->cap_userkey.key_values[3])
75114 +
75115 +#define ELAN_CAP_TYPE_MATCH(cap1,cap2)  ((cap1)->cap_version           == (cap2)->cap_version           && \
75116 +                                        (cap1)->cap_type              == (cap2)->cap_type)
75117 +
75118 +#define ELAN_CAP_GEOM_MATCH(cap1,cap2) ((cap1)->cap_lowcontext        == (cap2)->cap_lowcontext        && \
75119 +                                        (cap1)->cap_highcontext       == (cap2)->cap_highcontext       && \
75120 +                                        (cap1)->cap_lownode           == (cap2)->cap_lownode           && \
75121 +                                        (cap1)->cap_highnode          == (cap2)->cap_highnode          && \
75122 +                                         (cap1)->cap_railmask          == (cap2)->cap_railmask          && \
75123 +                                        !bcmp (&(cap1)->cap_bitmap[0], &(cap2)->cap_bitmap[0],            \
75124 +                                               BT_BITOUL(ELAN_CAP_BITMAPSIZE(cap1)*sizeof(bitmap_t))))
75125 +
75126 +#define ELAN_CAP_MATCH(cap1,cap2)      (ELAN_CAP_KEY_MATCH (cap1, cap2)  && \
75127 +                                        ELAN_CAP_TYPE_MATCH (cap1, cap2) && \
75128 +                                        ELAN_CAP_GEOM_MATCH (cap1, cap2))
75129 +
75130 +#define ELAN_CAP_VALID_MYCONTEXT(cap)   (    ((cap)->cap_lowcontext  != ELAN_CAP_UNINITIALISED)     \
75131 +                                         && ((cap)->cap_mycontext   != ELAN_CAP_UNINITIALISED)     \
75132 +                                         && ((cap)->cap_highcontext != ELAN_CAP_UNINITIALISED)     \
75133 +                                         && ((cap)->cap_lowcontext <= (cap)->cap_mycontext)        \
75134 +                                         && ((cap)->cap_mycontext <= (cap)->cap_highcontext)) 
75135 +
75136 +/*
75137 + * Definitions for type 
75138 + */
75139 +#define ELAN_CAP_TYPE_BLOCK            1               /* Block distribution */
75140 +#define ELAN_CAP_TYPE_CYCLIC           2               /* Cyclic distribution */
75141 +#define ELAN_CAP_TYPE_KERNEL           3               /* Kernel capability */
75142 +
75143 +#define ELAN_CAP_TYPE_MASK             (0xFFF)         /* Mask for type */
75144 +
75145 +/* OR these bits in for extra features */
75146 +#define ELAN_CAP_TYPE_HWTEST           (1 << 12)       /* Hardware test capability type */
75147 +#define ELAN_CAP_TYPE_MULTI_RAIL       (1 << 13)       /* "new" multi rail capability */
75148 +#define ELAN_CAP_TYPE_NO_BITMAP                (1 << 14)       /* don't use bit map */
75149 +#define ELAN_CAP_TYPE_BROADCASTABLE    (1 << 15)       /* broadcastable */
75150 +
75151 +
75152 +extern void          elan_nullcap     (ELAN_CAPABILITY *cap);
75153 +extern char         *elan_capability_string (ELAN_CAPABILITY *cap, char *str);
75154 +extern ELAN_LOCATION elan_vp2location (unsigned process, ELAN_CAPABILITY *cap);
75155 +extern int           elan_location2vp (ELAN_LOCATION location, ELAN_CAPABILITY *cap);
75156 +extern int           elan_nvps        (ELAN_CAPABILITY *cap);
75157 +extern int           elan_nlocal      (int node, ELAN_CAPABILITY *cap);
75158 +extern int           elan_maxlocal    (ELAN_CAPABILITY *cap);
75159 +extern int           elan_localvps    (int node, ELAN_CAPABILITY *cap, int *vps, int size);
75160 +extern int           elan_nrails      (ELAN_CAPABILITY *cap);
75161 +extern int           elan_rails       (ELAN_CAPABILITY *cap, int *rails);
75162 +extern int           elan_cap_overlap (ELAN_CAPABILITY *cap1, ELAN_CAPABILITY *cap2);
75163 +
75164 +/*
75165 + * capability creation/access fns provide for running
75166 + * new libelan code on old OS releases
75167 + */
75168 +extern int elan_lowcontext(ELAN_CAPABILITY *cap);
75169 +extern int elan_mycontext(ELAN_CAPABILITY *cap);
75170 +extern int elan_highcontext(ELAN_CAPABILITY *cap);
75171 +extern int elan_lownode(ELAN_CAPABILITY *cap);
75172 +extern int elan_highnode(ELAN_CAPABILITY *cap);
75173 +extern int elan_captype(ELAN_CAPABILITY *cap);
75174 +extern int elan_railmask(ELAN_CAPABILITY *cap);
75175 +
75176 +extern int elan_getenvCap (ELAN_CAPABILITY *cap, int index);
75177 +extern ELAN_CAPABILITY *elan_createCapability(void);
75178 +extern ELAN_CAPABILITY *elan_copyCapability(ELAN_CAPABILITY *from, int ctxShift);
75179 +extern int elan_generateCapability(char *string);
75180 +
75181 +typedef struct elan_cap_struct
75182 +{
75183 +       ELAN_CAP_OWNER   owner;
75184 +       ELAN_CAPABILITY  cap;
75185 +
75186 +       unsigned int     attached; /* count of people attached */
75187 +       unsigned int     active;   /* ie not being destroyed   */
75188 +} ELAN_CAP_STRUCT;
75189 +
75190 +#if ! defined(__KERNEL__)
75191 +extern void          elan_get_random_key(ELAN_USERKEY *key);
75192 +extern int           elan_prefrails(ELAN_CAPABILITY *cap, int *pref, int nvp);
75193 +#endif
75194 +
75195 +#if defined(__KERNEL__)
75196 +/* capability.c */
75197 +extern int elan_validate_cap  (ELAN_CAPABILITY *cap);
75198 +extern int elan_validate_map  (ELAN_CAPABILITY *cap, ELAN_CAPABILITY *map);
75199 +
75200 +extern int elan_create_cap  (ELAN_CAP_OWNER owner, ELAN_CAPABILITY *cap);
75201 +extern int elan_destroy_cap (ELAN_CAP_OWNER owner, ELAN_CAPABILITY *cap);
75202 +extern int elan_create_vp   (ELAN_CAP_OWNER owner, ELAN_CAPABILITY *cap, ELAN_CAPABILITY *map);
75203 +extern int elan_destroy_vp  (ELAN_CAP_OWNER owner, ELAN_CAPABILITY *cap, ELAN_CAPABILITY *map);
75204 +
75205 +typedef        void (*ELAN_DESTROY_CB)(void *args, ELAN_CAPABILITY *cap, ELAN_CAPABILITY *map);
75206 +
75207 +extern int elan_attach_cap  (ELAN_CAPABILITY *cap, unsigned int rail, void *args, ELAN_DESTROY_CB callback);
75208 +extern int elan_detach_cap  (ELAN_CAPABILITY *cap, unsigned int rail);
75209 +
75210 +extern int elan_get_caps    (uint *number_of_results, uint array_size, ELAN_CAP_STRUCT *caps);
75211 +extern int elan_cap_dump    (void);
75212 +#endif /* __KERNEL__ */
75213 +
75214 +
75215 +#endif /* __ELAN_CAPABILITY_H */
75216 +
75217 +/*
75218 + * Local variables:
75219 + * c-file-style: "linux"
75220 + * End:
75221 + */
75222 Index: linux-2.4.21/include/elan/cm.h
75223 ===================================================================
75224 --- linux-2.4.21.orig/include/elan/cm.h 2004-02-23 16:02:56.000000000 -0500
75225 +++ linux-2.4.21/include/elan/cm.h      2005-06-01 23:12:54.706422840 -0400
75226 @@ -0,0 +1,412 @@
75227 +/*
75228 + *    Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd.
75229 + *    Copyright (c) 2002-2003 by Quadrics Ltd.
75230 + *
75231 + *    For licensing information please see the supplied COPYING file
75232 + *
75233 + */
75234 +
75235 +#ifndef __ELAN_CM_H
75236 +#define __ELAN_CM_H
75237 +
75238 +#ident "@(#)$Id: cm.h,v 1.14.2.1 2004/11/12 10:54:50 mike Exp $"
75239 +/*      $Source: /cvs/master/quadrics/epmod/cm.h,v $*/
75240 +
75241 +#include <elan/statemap.h>
75242 +
75243 +#if defined(DIGITAL_UNIX)
75244 +/*
75245 + * On Tru64 - SMP doesn't mean Symmetric - cpu 0 is a master cpu and is responsible
75246 + * for handling all PCI interrupts and "funneled" operations.  When a kernel thread
75247 + * is made runnable, the scheduler will choose which cpu it will run on at that time,
75248 + * and will only execute a higher priority thread from another cpu's run queue when 
75249 + * it becomes totally idle (apparently also including user processes).  Also the 
75250 + * assert_wait_mesg_timo function uses a per-cpu timeout - these can only get executed
75251 + * at "preemptable" places - so again have no guarantee on when they will execute if
75252 + * they happen to be queued on a "hogged" cpu. The combination of these mean that the Tru64
75253 + * is incapable of scheduling a high priority kernel  thread within a deterministic time
75254 + * of when it should have become runnable - wonderfull.
75255 + *
75256 + * Hence the solution Compaq have proposed it to schedule a timeout onto all of the
75257 + * cpu's timeouts lists at the maximum frequency that we could want to execute code,
75258 + * then to handle the scheduling of work between these ourselves.  With a bit of luck
75259 + * ..... at least one cpu will be sufficiently unloaded to allow us to get a chance
75260 + * to do our important work.
75261 + *
75262 + * However ..... this still is not reliable, since timeouts under Tru64 are still 
75263 + * only run when the currently running kernel thread "co-operates" by calling one
75264 + * of a number of functions which is permitted to run the "lwc"s AND is not holding
75265 + * any spinlocks AND is running ai IPL 0.   However Compaq are unable to provide
75266 + * any upper limit on the time between the "lwc"'s being run and so it is possible
75267 + * for all 4 cpus to not run them for an unbounded time.
75268 + *
75269 + * The solution proposed is to use the RM_TEMP_BACKDOOR hook which was added to 
75270 + * hardclock() to "solve" this problem for Memory Channel.  However, since it
75271 + * is called within the clock interrupt it is not permissible to aquire any
75272 + * spinlocks, nor to run for "too long".  This means that it is not possible to
75273 + * call the heartbeat algorithm from this hook.  
75274 + *
75275 + * Our solution to these limitations is to use the hook to cause an elan interrupt 
75276 + * to be delivered, by issueing a mis-aligned SetEvent command - this causes the device 
75277 + * to trap and ep_cprocTrap() can then run the heartbeat code.  However there is a lock 
75278 + * order violation between the elan_dev::IntrLock and ep_dev::Lock, so we have to 
75279 + * use a trylock and if we fail, then hope that when the interrupt is delievered again
75280 + * some time later we will succeed.
75281 + *
75282 + * However this only works if the kernel is able to respond to the Elan interrupt,
75283 + * so we panic inside the RM_TEMP_BACKDOOR hook if the SetEvent's interrupt has
75284 + * not been taken for more than an CM_TIMER_SCHEDULE_TIMEOUT interval.
75285 + *
75286 + * In fact this is exactly the mechanism that other operating systems use to
75287 + * execute timeouts, since the hardclock interrupt posts a low priority 
75288 + * "soft interrupt" which "pre-eempts" the currently running thread and then
75289 + * executes the timeouts.To block timeouts you use splsoftclock() the same as 
75290 + * in Tru64.
75291 + */
75292 +#define PER_CPU_TIMEOUT                        TRUE
75293 +#endif
75294 +
75295 +
75296 +#define CM_SGMTS_PER_LEVEL             8                       /* maximum nodes in each segment */
75297 +#define CM_MAX_LEVELS                  6                       /* maximum depth of tree */
75298 +
75299 +/* message buffers/dmas/events etc */
75300 +#define CM_NUM_NODE_MSG_BUFFERS                (CM_MAX_LEVELS * CM_SGMTS_PER_LEVEL) /* subordinates and leader */
75301 +#define CM_NUM_SPARE_MSG_BUFFERS       8                       /* spare msg buffers for non-connected nodes */
75302 +#define CM_NUM_MSG_BUFFERS             (CM_NUM_NODE_MSG_BUFFERS + CM_NUM_SPARE_MSG_BUFFERS)
75303 +
75304 +#define CM_INPUTQ_ENTRIES              128                     /* # entries in input queue */
75305 +
75306 +#define CM_PERIODIC_DISCOVER_INTERVAL  (5000)          /* 5s (infrequent resolution of established leader conflicts) */
75307 +#define CM_URGENT_DISCOVER_INTERVAL    (50)            /* 0.05s (more frequently than heartbeats 'cause they don't retry) */
75308 +#define CM_HEARTBEAT_INTERVAL          (125)           /* 0.125s */
75309 +#define CM_TIMER_SCHEDULE_TIMEOUT      (4000)          /* 4s     Maximum time before a timer that's secheduled to run gets to run (eg blocked in interrupt handlers etc) */
75310 +#define CM_THREAD_SCHEDULE_TIMEOUT     (30000)         /* 30s    Maximum time before a thread that's scheduled to run gets to run */
75311 +#define CM_THREAD_RUNNING_TIMEOUT      (30000)         /* 30s    Don't expect the manager thread to be running longer than this */
75312 +
75313 +#ifdef PER_CPU_TIMEOUT
75314 +#define CM_PERCPU_TIMEOUT_INTERVAL     (50)            /* 0.05s (must be less than all above intervals) */
75315 +#define CM_PACEMAKER_INTERVAL          (500)           /* 0.05s */
75316 +
75317 +#define CM_HEARTBEAT_OVERDUE           (250)           /* 0.25s Maximum time a timeout can be overdue before taking extreme action */
75318 +#endif
75319 +
75320 +#define CM_P2P_DMA_RETRIES             31
75321 +
75322 +/* We expect at least 1 point-to-point message in CM_P2P_MSG_RETRIES
75323 + * attempts to send one to be successfully received */
75324 +#define CM_P2P_MSG_RETRIES             8
75325 +
75326 +/* We expect at least 1 broadcast message in CM_BCAST_MSG_RETRIES attempts
75327 + * to send one to be successfully received. */
75328 +#define CM_BCAST_MSG_RETRIES           40
75329 +
75330 +/* Heartbeat timeout allows for a node stalling and still getting its
75331 + * heartbeat. The 2 is to allow for unsynchronised polling times. */
75332 +#define CM_HEARTBEAT_TIMEOUT           (CM_TIMER_SCHEDULE_TIMEOUT + (2 + CM_P2P_MSG_RETRIES) * CM_HEARTBEAT_INTERVAL)
75333 +
75334 +/* Discover timeout must be > CM_HEARTBEAT_TIMEOUT to guarantee that people
75335 + * who don't see discovery are considered dead by their leader.  This
75336 + * ensures that by the time a node "discovers" it is a leader of a segment,
75337 + * the previous leader of that segment will have been deemed to be dead by
75338 + * its the parent segment's leader */
75339 +#define CM_DISCOVER_TIMEOUT            (CM_TIMER_SCHEDULE_TIMEOUT + (2 + CM_BCAST_MSG_RETRIES) * CM_URGENT_DISCOVER_INTERVAL)
75340 +
75341 +#define CM_WAITING_TIMEOUT             (CM_DISCOVER_TIMEOUT * 100)
75342 +
75343 +/*
75344 + * Convert all timeouts specified in mS into "ticks"
75345 + */
75346 +#define MSEC2TICKS(MSEC)               (((MSEC)*HZ)/1000)
75347 +
75348 +
75349 +/* statemap entry */
75350 +typedef struct cm_state_entry
75351 +{
75352 +    int16_t           level;                   /* cluster level to apply to */
75353 +    int16_t          offset;                   /* from statemap_findchange() */
75354 +    uint16_t          seg[BT_NBIPUL/16];       /* ditto */
75355 +} CM_STATEMAP_ENTRY;
75356 +
75357 +/* offset is >= 0 for a change to apply and */
75358 +#define STATEMAP_NOMORECHANGES (-1)            /* end of a set of updates */
75359 +#define STATEMAP_RESET         (-2)            /* reset the target map */
75360 +#define STATEMAP_NOOP          (-3)            /* null token */
75361 +
75362 +/* CM message format */
75363 +typedef int8_t CM_SEQ;                         /* heartbeat sequence numbers; at least 2 bits, signed */
75364 +
75365 +/*
75366 + * The message header is received into the last 64 byte block of 
75367 + * the input queue and the Version *MUST* be the last word of the 
75368 + * block to ensure that we can see that the whole of the message
75369 + * has reached main memory after we've seen the input queue pointer
75370 + * have been updated.
75371 + */
75372 +typedef struct ep_cm_hdr
75373 +{
75374 +    uint32_t          Pad0;
75375 +    uint32_t          Pad1;
75376 +
75377 +    uint8_t           Type;
75378 +    uint8_t           Level;
75379 +    CM_SEQ            Seq;                     /* precision at least 2 bits each*/
75380 +    CM_SEQ            AckSeq;
75381 +    
75382 +    uint16_t          NumMaps;
75383 +    uint16_t          MachineId;
75384 +
75385 +    uint16_t          NodeId;
75386 +    uint16_t          Checksum;
75387 +
75388 +    uint32_t           Timestamp;
75389 +    uint32_t           ParamHash;
75390 +    uint32_t          Version;
75391 +} CM_HDR;
75392 +
75393 +#define CM_HDR_SIZE        sizeof (CM_HDR)
75394 +
75395 +typedef struct cm_msg
75396 +{
75397 +    union {
75398 +       CM_STATEMAP_ENTRY   Statemaps[1];               /* piggy-backed statemap updates start here */
75399 +       uint8_t             Space[EP_SYSTEMQ_MSG_MAX - CM_HDR_SIZE];
75400 +    } Payload;
75401 +    
75402 +    CM_HDR                 Hdr;
75403 +} CM_MSG;
75404 +
75405 +/* The maximum number of statemap entries that can fit within an EP_CM_MSG_BUFFER */
75406 +#define CM_MSG_MAXMAPS         (offsetof (CM_MSG, Hdr) / sizeof (CM_STATEMAP_ENTRY))
75407 +#define CM_MSG_MAP(mapno)      (CM_MSG_MAXMAPS - (mapno) - 1)
75408 +
75409 +/* The actual special message base & size, including 'nmaps' piggy-backed statemap entries */
75410 +#define CM_MSG_BASE(nmaps)     (nmaps == 0 ? offsetof (CM_MSG, Hdr) : offsetof (CM_MSG, Payload.Statemaps[CM_MSG_MAXMAPS - nmaps]))
75411 +#define CM_MSG_SIZE(nmaps)     (sizeof (CM_MSG) - CM_MSG_BASE(nmaps))
75412 +
75413 +#define CM_MSG_VERSION                         0xcad00005
75414 +#define CM_MSG_TYPE_RESOLVE_LEADER             0
75415 +#define CM_MSG_TYPE_DISCOVER_LEADER            1
75416 +#define CM_MSG_TYPE_NOTIFY                     2
75417 +#define CM_MSG_TYPE_DISCOVER_SUBORDINATE       3
75418 +#define CM_MSG_TYPE_IMCOMING                   4
75419 +#define CM_MSG_TYPE_HEARTBEAT                  5
75420 +#define CM_MSG_TYPE_REJOIN                     6
75421 +
75422 +/* CM machine segment */
75423 +typedef struct cm_sgmtMaps
75424 +{
75425 +    u_char       InputMapValid;                        /* Input map has been set */
75426 +    u_char       OutputMapValid;               /* Output map has been set */
75427 +    u_char       SentChanges;                  /* got an outstanding STATEMAP_NOMORECHANGES to send */
75428 +    statemap_t  *OutputMap;                    /* state to send */
75429 +    statemap_t  *InputMap;                     /* state received */
75430 +    statemap_t  *CurrentInputMap;              /* state being received */
75431 +} CM_SGMTMAPS;
75432 +
75433 +typedef struct cm_sgmt
75434 +{
75435 +   u_char       State;
75436 +   u_char       SendMaps;
75437 +   u_char       MsgAcked;
75438 +   CM_SEQ      MsgSeq;
75439 +   CM_SEQ      AckSeq;
75440 +   u_int       NodeId;
75441 +   long                UpdateTick;
75442 +   long                WaitingTick;
75443 +   uint32_t    Timestamp;
75444 +   CM_SGMTMAPS  Maps[CM_MAX_LEVELS];           /* Maps[i] == state for cluster level i */
75445 +   u_short      MsgNumber;                     /* msg buffer to use */
75446 +   u_short     NumMaps;                        /* # maps in message buffer */
75447 +   u_short      Level;
75448 +   u_short      Sgmt;
75449 +} CM_SGMT;
75450 +
75451 +#define CM_SGMT_ABSENT         0               /* no one there at all */
75452 +#define CM_SGMT_WAITING                1               /* waiting for subtree to connect */
75453 +#define CM_SGMT_COMING         2               /* expecting a subtree to reconnect */
75454 +#define CM_SGMT_PRESENT                3               /* connected */
75455 +
75456 +typedef struct cm_level
75457 +{
75458 +    int               SwitchLevel;
75459 +    u_int             MinNodeId;
75460 +    u_int              NumNodes;
75461 +    u_int              NumSegs;
75462 +    u_int              MySgmt;
75463 +   
75464 +    /* SubordinateMap[i] == OR of all subordinate maps on this level and down for cluster level i */
75465 +    u_char             SubordinateMapValid[CM_MAX_LEVELS];
75466 +    statemap_t        *SubordinateMap[CM_MAX_LEVELS];
75467 +
75468 +    /* maps/flags for this cluster level */
75469 +    u_int              Online:1;                               /* I've gone online (seen myself running) */
75470 +    u_int             Restarting:1;                            /* driving my owm restart bit */
75471 +    u_char            OfflineReasons;                          /* forced offline by broadcast */
75472 +
75473 +    u_char             GlobalMapValid;
75474 +    u_char             SubTreeMapValid;
75475 +    u_long            Connected;
75476 +
75477 +    statemap_t        *LocalMap;               /* state bits I drive */
75478 +    statemap_t        *SubTreeMap;             /* OR of my and my subtree states */
75479 +    statemap_t        *GlobalMap;              /* OR of all node states */
75480 +    statemap_t        *LastGlobalMap;          /* last map I saw */
75481 +    statemap_t        *TmpMap;                 /* scratchpad */
75482 +
75483 +    CM_SGMT           Sgmts[CM_SGMTS_PER_LEVEL];
75484 +} CM_LEVEL;
75485 +
75486 +#define CM_ROLE_LEADER_CANDIDATE       0
75487 +#define CM_ROLE_LEADER                 1
75488 +#define CM_ROLE_SUBORDINATE            2
75489 +
75490 +/* global status bits */
75491 +#define CM_GSTATUS_STATUS_MASK         0x03    /* bits nodes drive to broadcast their status */
75492 +#define CM_GSTATUS_ABSENT              0x00    /* Off the network */
75493 +#define CM_GSTATUS_STARTING            0x01    /* I'm waiting for everyone to see me online */
75494 +#define CM_GSTATUS_RUNNING              0x03   /* up and running */
75495 +#define CM_GSTATUS_CLOSING             0x02    /* I'm waiting for everyone to see me offline */
75496 +
75497 +#define CM_GSTATUS_ACK_MASK            0x0c    /* bits node drive to ack other status */
75498 +#define CM_GSTATUS_MAY_START           0x04    /* Everyone thinks I may not start */
75499 +#define CM_GSTATUS_MAY_RUN             0x08    /* Everyone thinks I may not run */
75500 +
75501 +#define CM_GSTATUS_RESTART             0x10    /* Someone thinks I should restart */
75502 +#define CM_GSTATUS_BITS                        5
75503 +
75504 +#define CM_GSTATUS_BASE(node)          ((node) * CM_GSTATUS_BITS)
75505 +
75506 +#if defined(PER_CPU_TIMEOUT)
75507 +typedef struct cm_timeout_data
75508 +{
75509 +    long               ScheduledAt;                            /* lbolt timeout was scheduled to run at */
75510 +
75511 +    unsigned long       EarlyCount;                            /* # times run early than NextRun */
75512 +    unsigned long      MissedCount;                            /* # times run on time - but someone else was running it */
75513 +    unsigned long       WastedCount;                           /* # times we failed to get the spinlock */
75514 +    unsigned long      WorkCount;                              /* # times we're the one running */
75515 +
75516 +    unsigned long      WorstDelay;                             /* worst scheduling delay */
75517 +    unsigned long      BestDelay;                              /* best scheduling delay */
75518 +
75519 +    unsigned long      WorstLockDelay;                         /* worst delay before getting rail->Lock */
75520 +
75521 +    unsigned long      WorstHearbeatDelay;                     /* worst delay before calling DoHeartbeatWork */
75522 +} CM_TIMEOUT_DATA;
75523 +#endif
75524 +
75525 +typedef struct cm_rail
75526 +{
75527 +    EP_RAIL          *Rail;                                    /* rail we're associated with */
75528 +    struct list_head   Link;                                   /*   and linked on the CM_SUBSYS */
75529 +
75530 +    uint32_t          ParamHash;                               /* hash of critical parameters */
75531 +    uint32_t           Timestamp;
75532 +    long              DiscoverStartTick;                       /* when discovery start */
75533 +
75534 +    unsigned int       NodeId;                                 /* my node id */
75535 +    unsigned int       NumNodes;                               /*   and number of nodes */
75536 +    unsigned int       NumLevels;                              /* number of levels computed from machine size */
75537 +    int                       BroadcastLevel;
75538 +    long              BroadcastLevelTick;
75539 +    unsigned int       TopLevel;                               /* level at which I'm not a leader */
75540 +    unsigned char      Role;                                   /* state at TopLevel */
75541 +
75542 +    EP_INPUTQ        *PolledQueue;                             /* polled input queue */
75543 +    EP_INPUTQ        *IntrQueue;                               /* intr input queue */
75544 +    EP_OUTPUTQ       *MsgQueue;                                /* message  */
75545 +    unsigned int       NextSpareMsg;                           /* next "spare" message buffer to use */
75546 +
75547 +    EP_CM_RAIL_STATS   Stats;                                  /* statistics */
75548 +
75549 +    kmutex_t          Mutex;
75550 +    spinlock_t        Lock;
75551 +    
75552 +    long              NextHeartbeatTime;                       /* next time to check/send heartbeats */
75553 +    long              NextDiscoverTime;                        /* next time to progress discovery  */
75554 +    long              NextRunTime;                             /* the earlier of the above two or intr requires inputq poll*/
75555 +
75556 +    unsigned int       OfflineReasons;                         /* forced offline by procfs/manager thread stuck */
75557 +
75558 +#if defined(PER_CPU_TIMEOUT)
75559 +    spinlock_t        HeartbeatTimeoutsLock;                   /* spinlock to sequentialise per-cpu timeouts */
75560 +    long              HeartbeatTimeoutsStarted;                /* bitmap of which timeouts have started */
75561 +    long              HeartbeatTimeoutsStopped;                /* bitmap of which timeouts have stopped */
75562 +    long              HeartbeatTimeoutsShouldStop;             /* flag to indicate timeouts should stop */
75563 +    kcondvar_t        HeartbeatTimeoutsWait;                   /* place to sleep waiting for timeouts to stop */
75564 +    long              HeartbeatTimeoutRunning;                 /* someone is running the timeout - don't try for the lock */
75565 +
75566 +    long              HeartbeatTimeoutOverdue;                 /* heartbeat seen as overdue - interrupt requested */
75567 +
75568 +    CM_TIMEOUT_DATA   *HeartbeatTimeoutsData;                  /* per timeout data */
75569 +#else
75570 +    struct timer_list  HeartbeatTimer;                         /* timer for heartbeat/discovery */
75571 +#endif
75572 +
75573 +    CM_LEVEL           Levels[CM_MAX_LEVELS];
75574 +} CM_RAIL;
75575 +
75576 +/* OfflineReasons (both per-rail and  */
75577 +#define CM_OFFLINE_BROADCAST           (1 << 0)
75578 +#define CM_OFFLINE_PROCFS              (1 << 1)
75579 +#define CM_OFFLINE_MANAGER             (1 << 2)
75580 +
75581 +typedef struct cm_subsys
75582 +{
75583 +    EP_SUBSYS          Subsys;
75584 +    CM_RAIL            *Rails[EP_MAX_RAILS];
75585 +} CM_SUBSYS;
75586 +
75587 +extern int  MachineId;
75588 +
75589 +extern void cm_node_disconnected (EP_RAIL *rail, unsigned nodeId);
75590 +extern void cm_restart_node (EP_RAIL *rail, unsigned nodeId);
75591 +extern void cm_restart_comms (CM_RAIL *cmRail);
75592 +extern int  cm_init (EP_SYS *sys);
75593 +
75594 +extern void DisplayRail(EP_RAIL *rail);
75595 +extern void DisplaySegs (EP_RAIL *rail);
75596 +extern void DisplayStatus (EP_RAIL *rail);
75597 +
75598 +typedef struct proc_private
75599 +{
75600 +    struct nodeset_private *pr_next;
75601 +    EP_RAIL                *pr_rail;
75602 +    char                  *pr_data;
75603 +    int                     pr_data_len;
75604 +    unsigned               pr_off;
75605 +    unsigned               pr_len;
75606 +    DisplayInfo             pr_di;
75607 +} PROC_PRIVATE;
75608 +
75609 +extern void    proc_character_fill (long mode, char *fmt, ...);
75610 +extern int     proc_release (struct inode *inode, struct file *file);
75611 +extern ssize_t proc_read (struct file *file, char *buf, size_t count, loff_t *ppos);
75612 +
75613 +
75614 +extern void DisplayNodeMaps  (DisplayInfo *di, CM_RAIL *cmRail);
75615 +extern void DisplayNodeSgmts (DisplayInfo *di, CM_RAIL *cmRail);
75616 +extern void DisplayRailDo    (DisplayInfo *di, EP_RAIL *rail);
75617 +
75618 +extern int    cm_read_cluster(EP_RAIL *rail,char *page);
75619 +extern void   cm_force_offline (EP_RAIL *rail, int offline, unsigned int reason);
75620 +
75621 +extern int    cm_svc_indicator_set      (EP_RAIL *rail, int svc_indicator);
75622 +extern int    cm_svc_indicator_clear    (EP_RAIL *rail, int svc_indicator);
75623 +extern int    cm_svc_indicator_is_set   (EP_RAIL *rail, int svc_indicator, int nodeId);
75624 +extern int    cm_svc_indicator_bitmap   (EP_RAIL *rail, int svc_indicator, bitmap_t * bitmap, int low, int nnodes);
75625 +
75626 +/* cm_procfs.c */
75627 +extern void   cm_procfs_init (CM_SUBSYS *subsys);
75628 +extern void   cm_procfs_fini (CM_SUBSYS *subsys);
75629 +extern void   cm_procfs_rail_init (CM_RAIL *rail);
75630 +extern void   cm_procfs_rail_fini (CM_RAIL *rail);
75631 +
75632 +/*
75633 + * Local variables:
75634 + * c-file-style: "stroustrup"
75635 + * End:
75636 + */
75637 +#endif /* __ELAN_CM_H */
75638 +
75639 Index: linux-2.4.21/include/elan/compat.h
75640 ===================================================================
75641 --- linux-2.4.21.orig/include/elan/compat.h     2004-02-23 16:02:56.000000000 -0500
75642 +++ linux-2.4.21/include/elan/compat.h  2005-06-01 23:12:54.706422840 -0400
75643 @@ -0,0 +1,23 @@
75644 +/*
75645 + *    Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd.
75646 + *    Copyright (c) 2002-2003 by Quadrics Ltd.
75647 + *
75648 + *    For licensing information please see the supplied COPYING file
75649 + *
75650 + */
75651 +
75652 +#ident "@(#)$Id: compat.h,v 1.1 2003/12/03 13:18:48 david Exp $ $Name: QSNETMODULES-4-30_20050128 $"
75653 +/*      $Source: /cvs/master/quadrics/elanmod/modsrc/compat.h,v $*/
75654 +
75655 +#ifndef __ELAN_COMPAT_H
75656 +#define __ELAN_COMPAT_H
75657 +
75658 +#define ELANMOD_STATS_MAP      ELAN_STATS_MAP
75659 +
75660 +#endif  /* __ELAN_COMPAT_H */
75661 +
75662 +/*
75663 + * Local variables:
75664 + * c-file-style: "stroustrup"
75665 + * End:
75666 + */
75667 Index: linux-2.4.21/include/elan/device.h
75668 ===================================================================
75669 --- linux-2.4.21.orig/include/elan/device.h     2004-02-23 16:02:56.000000000 -0500
75670 +++ linux-2.4.21/include/elan/device.h  2005-06-01 23:12:54.707422688 -0400
75671 @@ -0,0 +1,62 @@
75672 +/*
75673 + *    Copyright (c) 2003 by Quadrics Limited.
75674 + * 
75675 + *    For licensing information please see the supplied COPYING file
75676 + *
75677 + */
75678 +
75679 +#ident "@(#)$Id: device.h,v 1.5 2003/09/24 13:55:37 david Exp $"
75680 +/*      $Source: /cvs/master/quadrics/elanmod/modsrc/device.h,v $*/
75681 +
75682 +#ifndef __ELAN_DEVICE_H
75683 +#define __ELAN_DEVICE_H
75684 +
75685 +/* non-kernel headings */
75686 +typedef unsigned int ELAN_DEV_IDX;
75687 +
75688 +#if defined(__KERNEL__)
75689 +
75690 +/* device callbacks */
75691 +#define ELAN_DEV_OPS_VERSION ((u_int)1)
75692 +
75693 +typedef struct elan_dev_ops
75694 +{
75695 +       /* dev info */
75696 +       int (*get_position)          (void *user_data, ELAN_POSITION *position);
75697 +       int (*set_position)          (void *user_data, unsigned short nodeId, unsigned short numNodes);
75698 +
75699 +       /* cap */
75700 +
75701 +       u_int  ops_version;
75702 +} ELAN_DEV_OPS;
75703 +
75704 +typedef struct elan_dev_struct
75705 +{
75706 +       struct list_head node;
75707 +
75708 +       ELAN_DEV_IDX     devidx;
75709 +       ELAN_DEVINFO    *devinfo;
75710 +       void            *user_data;
75711 +       ELAN_DEV_OPS *ops;
75712 +} ELAN_DEV_STRUCT;
75713 +
75714 +/* device.c */
75715 +extern ELAN_DEV_IDX         elan_dev_register   (ELAN_DEVINFO    *devinfo, 
75716 +                                                   ELAN_DEV_OPS *ops,
75717 +                                                   void            *userdata);
75718 +extern int                  elan_dev_deregister (ELAN_DEVINFO *devinfo);
75719 +
75720 +extern ELAN_DEV_STRUCT * elan_dev_find       (ELAN_DEV_IDX devidx);
75721 +
75722 +extern ELAN_DEV_STRUCT * elan_dev_find_byrail(unsigned short deviceid, unsigned rail);
75723 +extern int                  elan_dev_dump       (void);
75724 +
75725 +#endif /* __KERNEL__ */
75726 +
75727 +#endif /* __ELAN_DEVICE_H */
75728 +
75729 +/*
75730 + * Local variables:
75731 + * c-file-style: "linux"
75732 + * End:
75733 + */
75734 Index: linux-2.4.21/include/elan/devinfo.h
75735 ===================================================================
75736 --- linux-2.4.21.orig/include/elan/devinfo.h    2004-02-23 16:02:56.000000000 -0500
75737 +++ linux-2.4.21/include/elan/devinfo.h 2005-06-01 23:12:54.707422688 -0400
75738 @@ -0,0 +1,81 @@
75739 +/*
75740 + *    Copyright (c) 2003 by Quadrics Limited.
75741 + * 
75742 + *    For licensing information please see the supplied COPYING file
75743 + *
75744 + */
75745 +
75746 +#ident "@(#)$Id: devinfo.h,v 1.11 2004/03/12 14:27:39 david Exp $"
75747 +/*      $Source: /cvs/master/quadrics/elanmod/modsrc/devinfo.h,v $*/
75748 +
75749 +#ifndef __ELAN_DEVINFO_H
75750 +#define __ELAN_DEVINFO_H
75751 +
75752 +#define ELAN_MAX_LEVELS                        8                       /* maximum number of levels in switch network */
75753 +
75754 +typedef struct elan_position
75755 +{
75756 +       unsigned        pos_mode;                               /* mode we're operating in */
75757 +       unsigned        pos_nodeid;                             /* port this device connected to */
75758 +       unsigned        pos_levels;                             /* number of levels to top switch */
75759 +       unsigned        pos_nodes;                              /* number of nodes in the machine */
75760 +       unsigned        pos_random_disabled;                    /* levels at which "random" routing is not possible */
75761 +       unsigned char   pos_arity[ELAN_MAX_LEVELS];             /* number of downlinks per switch level */
75762 +} ELAN_POSITION;
75763 +
75764 +#define ELAN4_PARAM_PCI_PADDING_FLAGS          0               /* A bit field, representing good places to burst across the pci                      */
75765 +#define ELAN4_PARAM_EVENT_COPY_WIN             1               /* The num of cmds when it becomes quicker to send via event copy than write directly */
75766 +#define ELAN4_PARAM_WRITE_COMBINING            2               /* If set the device supports bursts accesses across the pci bus                      */
75767 +#define ELAN4_PARAM_COUNT                      12
75768 +
75769 +typedef struct elan_params
75770 +{
75771 +       unsigned        values[ELAN4_PARAM_COUNT];
75772 +} ELAN_PARAMS;
75773 +
75774 +/* values for pos_mode */
75775 +#define ELAN_POS_UNKNOWN               0                       /* network position unknown */
75776 +#define ELAN_POS_MODE_SWITCHED         1                       /* connected to a switch */
75777 +#define ELAN_POS_MODE_LOOPBACK         2                       /* loopback connector */
75778 +#define ELAN_POS_MODE_BACKTOBACK       3                       /* cabled back-to-back to another node */
75779 +
75780 +typedef struct elan_devinfo
75781 +{
75782 +       unsigned short  dev_vendor_id;                          /* pci vendor id */
75783 +       unsigned short  dev_device_id;                          /* pci device id */
75784 +       unsigned char   dev_revision_id;                        /* pci revision id */
75785 +       unsigned char   dev_instance;                           /* device instance number */
75786 +       unsigned char   dev_rail;                               /* device rail number */
75787 +
75788 +       unsigned short  dev_driver_version;                     /* device driver version */
75789 +       unsigned short  dev_params_mask;                        /* mask for valid entries in dev_params array */
75790 +       ELAN_PARAMS     dev_params;                             /* device parametization */
75791 +
75792 +       unsigned        dev_num_down_links_value;               /* MRH hint as to machine size NEEDS coding XXXXX */
75793 +} ELAN_DEVINFO;
75794 +
75795 +#define PCI_VENDOR_ID_QUADRICS         0x14fc
75796 +#define PCI_DEVICE_ID_ELAN3            0x0000
75797 +#define   PCI_REVISION_ID_ELAN3_REVA   0x0000
75798 +#define   PCI_REVISION_ID_ELAN3_REVB   0x0001
75799 +#define PCI_DEVICE_ID_ELAN4            0x0001
75800 +#define   PCI_REVISION_ID_ELAN4_REVA   0x0000
75801 +#define   PCI_REVISION_ID_ELAN4_REVB   0x0001
75802 +
75803 +#if defined(__KERNEL__)
75804 +/* devinfo.c */
75805 +#include <elan/capability.h>
75806 +#include <elan/device.h>
75807 +extern int elan_get_devinfo  (ELAN_DEV_IDX devidx, ELAN_DEVINFO  *devinfo);
75808 +extern int elan_get_position (ELAN_DEV_IDX devidx, ELAN_POSITION *position);
75809 +extern int elan_set_position (ELAN_DEV_IDX devidx, unsigned short nodeId, unsigned short numNodes);
75810 +#endif /* __KERNEL__ */
75811 +
75812 +
75813 +#endif /* __ELAN_DEVINFO_H */
75814 +
75815 +/*
75816 + * Local variables:
75817 + * c-file-style: "linux"
75818 + * End:
75819 + */
75820 Index: linux-2.4.21/include/elan/elanmoddebug.h
75821 ===================================================================
75822 --- linux-2.4.21.orig/include/elan/elanmoddebug.h       2004-02-23 16:02:56.000000000 -0500
75823 +++ linux-2.4.21/include/elan/elanmoddebug.h    2005-06-01 23:12:54.707422688 -0400
75824 @@ -0,0 +1,63 @@
75825 +/*
75826 + *    Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd.
75827 + *
75828 + *    For licensing information please see the supplied COPYING file
75829 + *
75830 + */
75831 +
75832 +#ifndef _ELAN_DEBUG_H
75833 +#define _ELAN_DEBUG_H
75834 +
75835 +
75836 +#ident "$Id: elanmoddebug.h,v 1.5 2003/09/24 13:55:37 david Exp $"
75837 +/*      $Source: /cvs/master/quadrics/elanmod/modsrc/elanmoddebug.h,v $ */
75838 +
75839 +#if defined(__KERNEL__)
75840 +
75841 +/* 0 | QSNET_DEBUG_BUFFER | QSNET_DEBUG_CONSOLE */
75842 +extern int elan_debug_mode; 
75843 +extern int elan_debug_mask;
75844 +
75845 +#define ELAN_DBG_VP            0x00000001
75846 +#define ELAN_DBG_CAP            0x00000002
75847 +#define ELAN_DBG_CTRL           0x00000004
75848 +#define ELAN_DBG_SYS_FN         0x00000008
75849 +#define ELAN_DBG_ALL           0xffffffff
75850 +
75851 +
75852 +#if defined(DEBUG_PRINTF)
75853 +#  define ELAN_DEBUG0(m,fmt)                   ((elan_debug_mask&(m)) ? qsnet_debugf(elan_debug_mode,fmt)             : (void)0)
75854 +#  define ELAN_DEBUG1(m,fmt,a)                 ((elan_debug_mask&(m)) ? qsnet_debugf(elan_debug_mode,fmt,a)           : (void)0)
75855 +#  define ELAN_DEBUG2(m,fmt,a,b)               ((elan_debug_mask&(m)) ? qsnet_debugf(elan_debug_mode,fmt,a,b)         : (void)0)
75856 +#  define ELAN_DEBUG3(m,fmt,a,b,c)             ((elan_debug_mask&(m)) ? qsnet_debugf(elan_debug_mode,fmt,a,b,c)       : (void)0)
75857 +#  define ELAN_DEBUG4(m,fmt,a,b,c,d)           ((elan_debug_mask&(m)) ? qsnet_debugf(elan_debug_mode,fmt,a,b,c,d)     : (void)0)
75858 +#  define ELAN_DEBUG5(m,fmt,a,b,c,d,e)         ((elan_debug_mask&(m)) ? qsnet_debugf(elan_debug_mode,fmt,a,b,c,d,e)   : (void)0)
75859 +#  define ELAN_DEBUG6(m,fmt,a,b,c,d,e,f)       ((elan_debug_mask&(m)) ? qsnet_debugf(elan_debug_mode,fmt,a,b,c,d,e,f) : (void)0)
75860 +#ifdef __GNUC__
75861 +#  define ELAN_DEBUG(m,args...)                        ((elan_debug_mask&(m)) ? qsnet_debugf(elan_debug_mode, ##args)         : (void)0)
75862 +#endif
75863 +
75864 +#else
75865 +
75866 +#  define ELAN_DEBUG0(m,fmt)                   (0)
75867 +#  define ELAN_DEBUG1(m,fmt,a)                 (0)
75868 +#  define ELAN_DEBUG2(m,fmt,a,b)               (0)
75869 +#  define ELAN_DEBUG3(m,fmt,a,b,c)             (0)
75870 +#  define ELAN_DEBUG4(m,fmt,a,b,c,d)           (0)
75871 +#  define ELAN_DEBUG5(m,fmt,a,b,c,d,e)         (0)
75872 +#  define ELAN_DEBUG6(m,fmt,a,b,c,d,e,f)       (0)
75873 +#ifdef __GNUC__
75874 +#  define ELAN_DEBUG(m,args...)
75875 +#endif
75876 +
75877 +#endif /* DEBUG_PRINTF */
75878 +
75879 +
75880 +#endif /* __KERNEL__ */
75881 +#endif /* _ELAN_DEBUG_H */
75882 +
75883 +/*
75884 + * Local variables:
75885 + * c-file-style: "linux"
75886 + * End:
75887 + */
75888 Index: linux-2.4.21/include/elan/elanmod.h
75889 ===================================================================
75890 --- linux-2.4.21.orig/include/elan/elanmod.h    2004-02-23 16:02:56.000000000 -0500
75891 +++ linux-2.4.21/include/elan/elanmod.h 2005-06-01 23:12:54.708422536 -0400
75892 @@ -0,0 +1,59 @@
75893 +/*
75894 + *    Copyright (c) 2003 by Quadrics Limited.
75895 + * 
75896 + *    For licensing information please see the supplied COPYING file
75897 + *
75898 + */
75899 +
75900 +#ident "@(#)$Id: elanmod.h,v 1.10 2004/06/18 09:28:16 mike Exp $"
75901 +/*      $Source: /cvs/master/quadrics/elanmod/modsrc/elanmod.h,v $*/
75902 +
75903 +#ifndef __ELAN_MOD_H
75904 +#define __ELAN_MOD_H
75905 +
75906 +#include <elan/devinfo.h>
75907 +#include <elan/device.h>
75908 +#include <elan/capability.h>
75909 +#include <elan/stats.h>
75910 +
75911 +#if defined(__KERNEL__)
75912 +
75913 +#include <elan/elanmoddebug.h>
75914 +
75915 +extern kmutex_t elan_mutex;
75916 +
75917 +/* elan_general.c */
75918 +extern int elan_init(void);
75919 +extern int elan_fini(void);
75920 +
75921 +/* return codes, -ve => errno, +ve => success */
75922 +#define ELAN_CAP_OK  (0)
75923 +#define ELAN_CAP_RMS (1)
75924 +
75925 +#define ELAN_USER_ATTACH    (1)
75926 +#define ELAN_USER_DETACH    (2)
75927 +#define ELAN_USER_P2P       (3)
75928 +#define ELAN_USER_BROADCAST (4)
75929 +
75930 +extern int elanmod_classify_cap (ELAN_POSITION *position, ELAN_CAPABILITY *cap, unsigned use);
75931 +
75932 +#define ELAN_USER_BASE_CONTEXT_NUM     0x000                   /* first user allowable context */
75933 +#define ELAN_USER_TOP_CONTEXT_NUM      0x7FF                   /* last user allowable context */
75934 +
75935 +#define ELAN_RMS_BASE_CONTEXT_NUM      0x400                   /* reserved for RMS allocation */
75936 +#define ELAN_RMS_TOP_CONTEXT_NUM       0x7FF
75937 +
75938 +#define ELAN_USER_CONTEXT(ctx)         ((ctx) >= ELAN_USER_BASE_CONTEXT_NUM && \
75939 +                                        (ctx) <= ELAN_USER_TOP_CONTEXT_NUM)    
75940 +
75941 +#define ELAN_RMS_CONTEXT(ctx)          ((ctx) >= ELAN_RMS_BASE_CONTEXT_NUM && \
75942 +                                        (ctx) <= ELAN_RMS_TOP_CONTEXT_NUM)    
75943 +#endif /* __KERNEL__ */
75944 +
75945 +#endif /* __ELAN_MOD_H */
75946 +
75947 +/*
75948 + * Local variables:
75949 + * c-file-style: "linux"
75950 + * End:
75951 + */
75952 Index: linux-2.4.21/include/elan/elanmod_linux.h
75953 ===================================================================
75954 --- linux-2.4.21.orig/include/elan/elanmod_linux.h      2004-02-23 16:02:56.000000000 -0500
75955 +++ linux-2.4.21/include/elan/elanmod_linux.h   2005-06-01 23:12:54.708422536 -0400
75956 @@ -0,0 +1,140 @@
75957 +/*
75958 + *    Copyright (c) 2003 by Quadrics Ltd.
75959 + * 
75960 + *    For licensing information please see the supplied COPYING file
75961 + *
75962 + */
75963 +
75964 +#ident "@(#)$Id: elanmod_linux.h,v 1.6 2003/09/29 15:36:20 mike Exp $"
75965 +/*      $Source: /cvs/master/quadrics/elanmod/modsrc/elanmod_linux.h,v $*/
75966 +
75967 +#ifndef __ELAN_MOD_LINUX_H
75968 +#define __ELAN_MOD_LINUX_H
75969 +
75970 +#define ELANCRTL_USER_BASE             0x40
75971 +
75972 +/* stats */
75973 +typedef struct elanctrl_stats_get_next_struct
75974 +{
75975 +       ELAN_STATS_IDX   statidx; 
75976 +       ELAN_STATS_IDX  *next_statidx; /* return value */
75977 +} ELANCTRL_STATS_GET_NEXT_STRUCT;
75978 +#define ELANCTRL_STATS_GET_NEXT   _IOR   ('e', ELANCRTL_USER_BASE + 0,  ELANCTRL_STATS_GET_NEXT_STRUCT)
75979 +
75980 +typedef struct elanctrl_stats_find_index_struct
75981 +{
75982 +       caddr_t          block_name;
75983 +       ELAN_STATS_IDX  *statidx; /* return value */
75984 +       uint        *num_entries; /* return value */
75985 +} ELANCTRL_STATS_FIND_INDEX_STRUCT;
75986 +#define ELANCTRL_STATS_FIND_INDEX   _IOR   ('e', ELANCRTL_USER_BASE + 1,  ELANCTRL_STATS_FIND_INDEX_STRUCT)
75987 +
75988 +typedef struct elanctrl_stats_get_block_info_struct
75989 +{
75990 +       ELAN_STATS_IDX  statidx; 
75991 +       caddr_t       block_name; /* return value */
75992 +       uint        *num_entries; /* return value */
75993 +} ELANCTRL_STATS_GET_BLOCK_INFO_STRUCT;
75994 +#define ELANCTRL_STATS_GET_BLOCK_INFO   _IOR   ('e', ELANCRTL_USER_BASE + 2, ELANCTRL_STATS_GET_BLOCK_INFO_STRUCT)
75995 +
75996 +typedef struct elanctrl_stats_get_index_name_struct
75997 +{
75998 +       ELAN_STATS_IDX statidx; 
75999 +       uint           index;
76000 +       caddr_t        name; /* return value */
76001 +} ELANCTRL_STATS_GET_INDEX_NAME_STRUCT;
76002 +#define ELANCTRL_STATS_GET_INDEX_NAME   _IOR   ('e', ELANCRTL_USER_BASE + 3, ELANCTRL_STATS_GET_INDEX_NAME_STRUCT)
76003 +
76004 +typedef struct elanctrl_stats_clear_block_struct
76005 +{
76006 +       ELAN_STATS_IDX statidx; 
76007 +} ELANCTRL_STATS_CLEAR_BLOCK_STRUCT;
76008 +#define ELANCTRL_STATS_CLEAR_BLOCK   _IOR   ('e', ELANCRTL_USER_BASE + 4, ELANCTRL_STATS_CLEAR_BLOCK_STRUCT)
76009 +
76010 +typedef struct elanctrl_stats_get_block_struct
76011 +{
76012 +       ELAN_STATS_IDX statidx; 
76013 +       uint           entries;  
76014 +       ulong         *values; /* return values */
76015 +} ELANCTRL_STATS_GET_BLOCK_STRUCT;
76016 +#define ELANCTRL_STATS_GET_BLOCK        _IOR   ('e', ELANCRTL_USER_BASE + 5, ELANCTRL_STATS_GET_BLOCK_STRUCT)
76017 +
76018 +
76019 +typedef struct elanctrl_get_devinfo_struct
76020 +{
76021 +       ELAN_DEV_IDX  devidx; 
76022 +       ELAN_DEVINFO *devinfo; /* return values */
76023 +} ELANCTRL_GET_DEVINFO_STRUCT;
76024 +#define ELANCTRL_GET_DEVINFO        _IOR   ('e', ELANCRTL_USER_BASE + 6, ELANCTRL_GET_DEVINFO_STRUCT)
76025 +
76026 +typedef struct elanctrl_get_position_struct
76027 +{
76028 +       ELAN_DEV_IDX   devidx; 
76029 +       ELAN_POSITION *position; /* return values */
76030 +} ELANCTRL_GET_POSITION_STRUCT;
76031 +#define ELANCTRL_GET_POSITION        _IOR   ('e', ELANCRTL_USER_BASE + 7, ELANCTRL_GET_POSITION_STRUCT)
76032 +
76033 +typedef struct elanctrl_set_position_struct
76034 +{
76035 +       ELAN_DEV_IDX   devidx; 
76036 +       unsigned short nodeId;
76037 +       unsigned short numNodes;
76038 +} ELANCTRL_SET_POSITION_STRUCT;
76039 +#define ELANCTRL_SET_POSITION        _IOR   ('e', ELANCRTL_USER_BASE + 8, ELANCTRL_SET_POSITION_STRUCT)
76040 +
76041 +typedef struct elanctrl_create_cap_struct
76042 +{
76043 +       ELAN_CAPABILITY cap;
76044 +} ELANCTRL_CREATE_CAP_STRUCT;
76045 +#define ELANCTRL_CREATE_CAP             _IOW   ('e', ELANCRTL_USER_BASE + 9, ELANCTRL_CREATE_CAP_STRUCT)
76046 +
76047 +typedef struct elanctrl_destroy_cap_struct
76048 +{
76049 +       ELAN_CAPABILITY cap;
76050 +} ELANCTRL_DESTROY_CAP_STRUCT;
76051 +#define ELANCTRL_DESTROY_CAP             _IOW   ('e', ELANCRTL_USER_BASE + 10, ELANCTRL_DESTROY_CAP_STRUCT)
76052 +
76053 +typedef struct elanctrl_create_vp_struct
76054 +{
76055 +       ELAN_CAPABILITY cap;
76056 +       ELAN_CAPABILITY map;
76057 +} ELANCTRL_CREATE_VP_STRUCT;
76058 +#define ELANCTRL_CREATE_VP             _IOW   ('e', ELANCRTL_USER_BASE + 11, ELANCTRL_CREATE_VP_STRUCT)
76059 +
76060 +typedef struct elanctrl_destroy_vp_struct
76061 +{
76062 +       ELAN_CAPABILITY cap;
76063 +       ELAN_CAPABILITY map;
76064 +} ELANCTRL_DESTROY_VP_STRUCT;
76065 +#define ELANCTRL_DESTROY_VP          _IOW   ('e', ELANCRTL_USER_BASE + 12, ELANCTRL_DESTROY_VP_STRUCT)
76066 +
76067 +#define ELANCTRL_DEBUG_DUMP          _IO    ('e', ELANCRTL_USER_BASE + 13)
76068 +
76069 +typedef struct elanctrl_get_caps_struct
76070 +{
76071 +       uint            *number_of_results;
76072 +       uint             array_size;
76073 +       ELAN_CAP_STRUCT *caps;
76074 +} ELANCTRL_GET_CAPS_STRUCT;
76075 +#define ELANCTRL_GET_CAPS          _IOW   ('e', ELANCRTL_USER_BASE + 14, ELANCTRL_GET_CAPS_STRUCT)
76076 +
76077 +
76078 +typedef struct elanctrl_debug_buffer_struct
76079 +{
76080 +       caddr_t buffer;
76081 +       int     size;
76082 +} ELANCTRL_DEBUG_BUFFER_STRUCT;
76083 +#define ELANCTRL_DEBUG_BUFFER _IOW ('e', ELANCRTL_USER_BASE + 15, ELANCTRL_DEBUG_BUFFER_STRUCT)
76084 +
76085 +#define ELANMOD_PROCFS_IOCTL      "/proc/qsnet/elan/ioctl"
76086 +#define ELANMOD_PROCFS_VERSION    "/proc/qsnet/elan/version"
76087 +#define ELANMOD_PROCFS_DEBUG_MASK "/proc/qsnet/elan/debug_mask"
76088 +#define ELANMOD_PROCFS_DEBUG_MODE "/proc/qsnet/elan/debug_mode"
76089 +
76090 +#endif /* __ELAN_MOD_LINUX_H */
76091 +
76092 +/*
76093 + * Local variables:
76094 + * c-file-style: "linux"
76095 + * End:
76096 + */
76097 Index: linux-2.4.21/include/elan/elanmod_subsystem.h
76098 ===================================================================
76099 --- linux-2.4.21.orig/include/elan/elanmod_subsystem.h  2004-02-23 16:02:56.000000000 -0500
76100 +++ linux-2.4.21/include/elan/elanmod_subsystem.h       2005-06-01 23:12:54.708422536 -0400
76101 @@ -0,0 +1,138 @@
76102 +/*
76103 + *    Copyright (c) 2003 by Quadrics Limited.
76104 + * 
76105 + *    For licensing information please see the supplied COPYING file
76106 + *
76107 + */
76108 +
76109 +#ifndef __ELAN_SUBSYSTEM_H
76110 +#define __ELAN_SUBSYSTEM_H
76111 +
76112 +#include <sys/types.h>
76113 +#include <sys/param.h>
76114 +
76115 +#if defined( __KERNEL__) 
76116 +int elan_configure(
76117 +    cfg_op_t op,
76118 +    caddr_t  indata,
76119 +    ulong    indata_size,
76120 +    caddr_t  outdata,
76121 +    ulong    outdata_size);
76122 +#endif
76123 +
76124 +#define ELAN_KMOD_CODE(x)      ((x)+CFG_OP_SUBSYS_MIN)
76125 +#define ELAN_MAX_KMOD_CODES 100
76126 +
76127 +#define ELAN_SUBSYS "elan"
76128 +
76129 +#define ELAN_STATS_GET_NEXT    0x01
76130 +typedef struct {
76131 +       ELAN_STATS_IDX statidx;
76132 +       ELAN_STATS_IDX *next_statidx;   
76133 +} elan_stats_get_next_struct;
76134 +
76135 +
76136 +#define ELAN_STATS_FIND_INDEX   0x02
76137 +typedef struct {
76138 +       caddr_t          block_name;
76139 +       ELAN_STATS_IDX  *statidx; /* return value */
76140 +       uint        *num_entries; /* return value */
76141 +} elan_stats_find_index_struct;
76142 +
76143 +#define ELAN_STATS_GET_BLOCK_INFO  0x03
76144 +typedef struct {
76145 +       ELAN_STATS_IDX  statidx; 
76146 +       caddr_t       block_name; /* return value */
76147 +       uint        *num_entries; /* return value */
76148 +} elan_stats_get_block_info_struct;
76149 +
76150 +#define ELAN_STATS_GET_INDEX_NAME  0x04
76151 +typedef struct {
76152 +       ELAN_STATS_IDX statidx; 
76153 +       uint           index;
76154 +       caddr_t        name; /* return value */
76155 +} elan_stats_get_index_name_struct;
76156 +
76157 +#define ELAN_STATS_CLEAR_BLOCK  0x05
76158 +typedef struct {
76159 +       ELAN_STATS_IDX statidx; 
76160 +} elan_stats_clear_block_struct;
76161 +
76162 +#define ELAN_STATS_GET_BLOCK     0x06
76163 +typedef struct 
76164 +{
76165 +       ELAN_STATS_IDX statidx; 
76166 +       uint           entries;  
76167 +       ulong         *values; /* return values */
76168 +} elan_stats_get_block_struct;
76169 +
76170 +#define ELAN_GET_DEVINFO     0x07
76171 +typedef struct 
76172 +{
76173 +       ELAN_DEV_IDX  devidx; 
76174 +       ELAN_DEVINFO *devinfo; /* return values */
76175 +} elan_get_devinfo_struct;
76176 +
76177 +#define ELAN_GET_POSITION  0x08
76178 +typedef struct {
76179 +       ELAN_DEV_IDX   devidx; 
76180 +       ELAN_POSITION *position; /* return values */
76181 +} elan_get_position_struct;
76182 +
76183 +#define ELAN_SET_POSITION   0x09
76184 +typedef struct {
76185 +       ELAN_DEV_IDX   devidx; 
76186 +       unsigned short nodeId;
76187 +       unsigned short numNodes;
76188 +} elan_set_position_struct;
76189 +
76190 +#define ELAN_CREATE_CAP  0x0a
76191 +typedef struct {
76192 +       ELAN_CAPABILITY cap;
76193 +} elan_create_cap_struct;
76194 +
76195 +#define ELAN_DESTROY_CAP    0x0b
76196 +typedef struct {
76197 +       ELAN_CAPABILITY cap;
76198 +} elan_destroy_cap_struct;
76199 +
76200 +#define ELAN_CREATE_VP   0x0c
76201 +typedef struct {
76202 +       ELAN_CAPABILITY cap;
76203 +       ELAN_CAPABILITY map;
76204 +} elan_create_vp_struct;
76205 +
76206 +#define ELAN_DESTROY_VP    0x0d
76207 +typedef struct {
76208 +       ELAN_CAPABILITY cap;
76209 +       ELAN_CAPABILITY map;
76210 +} elan_destroy_vp_struct;
76211 +
76212 +
76213 +#define ELAN_DEBUG_DUMP   0x0e
76214 +
76215 +#define ELAN_GET_CAPS    0x0f
76216 +typedef struct {
76217 +       uint            *number_of_results;
76218 +       uint             array_size;
76219 +       ELAN_CAP_STRUCT *caps;
76220 +} elan_get_caps_struct;
76221 +
76222 +#define ELAN_DEBUG_BUFFER 0x10
76223 +typedef struct {
76224 +       caddr_t addr;
76225 +       int     len;
76226 +} elan_debug_buffer_struct;
76227 +
76228 +#define ELANMOD_PROCFS_IOCTL      "/proc/qsnet/elan/ioctl"
76229 +#define ELANMOD_PROCFS_VERSION    "/proc/qsnet/elan/version"
76230 +#define ELANMOD_PROCFS_DEBUG_MASK "/proc/qsnet/elan/debug_mask"
76231 +#define ELANMOD_PROCFS_DEBUG_MODE "/proc/qsnet/elan/debug_mode"
76232 +
76233 +#endif /* __ELAN_SUBSYSTEM_H */
76234 +
76235 +/*
76236 + * Local variables:
76237 + * c-file-style: "linux"
76238 + * End:
76239 + */
76240 Index: linux-2.4.21/include/elan/epcomms.h
76241 ===================================================================
76242 --- linux-2.4.21.orig/include/elan/epcomms.h    2004-02-23 16:02:56.000000000 -0500
76243 +++ linux-2.4.21/include/elan/epcomms.h 2005-06-01 23:12:54.710422232 -0400
76244 @@ -0,0 +1,635 @@
76245 +/*
76246 + *    Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd.
76247 + *    Copyright (c) 2002-2003 by Quadrics Ltd.
76248 + *
76249 + *    For licensing information please see the supplied COPYING file
76250 + *
76251 + */
76252 +
76253 +#ifndef __ELAN_EPCOMMS_H
76254 +#define __ELAN_EPCOMMS_H
76255 +
76256 +#ident "$Id: epcomms.h,v 1.44.2.2 2004/11/12 10:54:50 mike Exp $"
76257 +/*      $Source: /cvs/master/quadrics/epmod/epcomms.h,v $ */
76258 +
76259 +#include <elan/kcomm.h>
76260 +#include <elan/bitmap.h>
76261 +
76262 +#define EPCOMMS_SUBSYS_NAME    "epcomms"
76263 +
76264 +/* message service numbers */
76265 +#define EP_MSG_SVC_EIP512              0x00                            /* Quadrics EIP services */
76266 +#define EP_MSG_SVC_EIP1K               0x01
76267 +#define EP_MSG_SVC_EIP2K               0x02
76268 +#define EP_MSG_SVC_EIP4K               0x03
76269 +#define EP_MSG_SVC_EIP8K               0x04
76270 +#define EP_MSG_SVC_EIP16K              0x05
76271 +#define EP_MSG_SVC_EIP32K              0x06
76272 +#define EP_MSG_SVC_EIP64K              0x07
76273 +#define EP_MSG_SVC_EIP128K             0x08
76274 +
76275 +#define EP_MSG_SVC_PFS                 0x09                            /* Quadrics PFS rpc service */
76276 +
76277 +#define EP_MSG_SVC_PORTALS_SMALL       0x10                            /* Lustre Portals */
76278 +#define EP_MSG_SVC_PORTALS_LARGE       0x11
76279 +
76280 +#define EP_MSG_NSVC                    0x40                            /* Max number of services */
76281 +
76282 +#define EP_MSGQ_ADDR(qnum)             (EP_EPCOMMS_QUEUE_BASE + (qnum) * EP_QUEUE_DESC_SIZE)
76283 +
76284 +/*
76285 + * EP_ENVELOPE
76286 + *   Messages are sent by sending an envelope to the destination
76287 + *   describing the source buffers to transfer.  The receiving thread
76288 + *   then allocates a receive buffer and fetches the data by issuing
76289 + *   "get" dmas.
76290 + *
76291 + * NOTE:  envelopes are not explicitly converted to network byte order
76292 + *        since they are always transferred little endian as they are
76293 + *        copied to/from elan memory using word operations.
76294 + */
76295 +typedef struct ep_envelope
76296 +{
76297 +    uint32_t     Version;                                      /* Protocol version field */
76298 +
76299 +    EP_ATTRIBUTE  Attr;                                        /* Attributes */
76300 +
76301 +    EP_XID       Xid;                                          /* transaction id */
76302 +
76303 +    uint32_t     NodeId;                                       /* Source processor */
76304 +    uint32_t     Range;                                        /* range we're sending to (high << 16 | low) */
76305 +
76306 +    EP_ADDR      TxdRail;                                      /* address of per-rail txd */
76307 +    EP_NMD       TxdMain;                                      /* address of main memory portion of txd */
76308 +
76309 +    uint32_t      nFrags;                                      /* # fragments */
76310 +    EP_NMD       Frags[EP_MAXFRAG];                            /* network mapping handles of source data */
76311 +
76312 +    uint32_t      CheckSum;                                     /* holds the check sum value when active 
76313 +                                                                * must be after all members to be checksum'd
76314 +                                                                */
76315 +
76316 +    uint32_t     Pad[6];                                       /* Pad to 128 bytes */
76317 +} EP_ENVELOPE;
76318 +
76319 +#define EP_ENVELOPE_VERSION            0xdac10001
76320 +#define EP_ENVELOPE_SIZE               roundup (sizeof (EP_ENVELOPE), EP_BLK_SIZE)
76321 +
76322 +/*
76323 + * RPC payload - this small amount of data is transfered in
76324 + * the envelope for RPCs
76325 + */
76326 +typedef struct ep_payload
76327 +{
76328 +    uint32_t   Data[128/sizeof(uint32_t)];
76329 +} EP_PAYLOAD;
76330 +
76331 +#define EP_PAYLOAD_SIZE                        roundup (sizeof (EP_PAYLOAD), EP_BLK_SIZE)
76332 +
76333 +#define EP_INPUTQ_SIZE                 (EP_ENVELOPE_SIZE + EP_PAYLOAD_SIZE)
76334 +
76335 +/*
76336 + * EP_STATUSBLK
76337 + *   RPC completion transfers a status block to the client.
76338 + */
76339 +typedef struct ep_statusblk
76340 +{
76341 +    uint32_t   Data[128/sizeof(uint32_t)];
76342 +} EP_STATUSBLK;
76343 +
76344 +#define EP_STATUSBLK_SIZE              roundup (sizeof(EP_STATUSBLK), EP_BLK_SIZE)
76345 +
76346 +#define EP_RANGE(low,high)             ((high) << 16 | (low))
76347 +#define EP_RANGE_LOW(range)            ((range) & 0xFFFF)
76348 +#define EP_RANGE_HIGH(range)           (((range) >> 16) & 0xFFFF)
76349 +
76350 +/* return codes from functions, + 'res' parameter to txd callback, ep_rxd_status() */
76351 +typedef enum
76352 +{
76353 +    EP_SUCCESS         = 0,                                    /* message sent/received successfully */
76354 +    EP_RXD_PENDING     = -1,                                   /* rxd not completed by thread */
76355 +    EP_CONN_RESET      = -2,                                   /* virtual circuit reset */
76356 +    EP_NODE_DOWN       = -3,                                   /* node down - transmit not attempted */
76357 +    EP_MSG_TOO_BIG      = -4,                                  /* received message larger than buffer */
76358 +    EP_ENOMEM          = -5,                                   /* memory alloc failed */
76359 +    EP_EINVAL          = -6,                                   /* invalid parameters */
76360 +    EP_SHUTDOWN                = -7,                                   /* receiver is being shut down */
76361 +} EP_STATUS;
76362 +
76363 +/* forward declarations */
76364 +typedef struct ep_rxd          EP_RXD;
76365 +typedef struct ep_txd          EP_TXD;
76366 +typedef struct ep_rcvr_rail    EP_RCVR_RAIL;
76367 +typedef struct ep_rcvr         EP_RCVR;
76368 +typedef struct ep_xmtr_rail    EP_XMTR_RAIL;
76369 +typedef struct ep_xmtr         EP_XMTR;
76370 +typedef struct ep_comms_rail    EP_COMMS_RAIL;
76371 +typedef struct ep_comms_subsys  EP_COMMS_SUBSYS;
76372 +
76373 +typedef struct ep_rcvr_stats           EP_RCVR_STATS;
76374 +typedef struct ep_xmtr_stats           EP_XMTR_STATS;
76375 +typedef struct ep_rcvr_rail_stats      EP_RCVR_RAIL_STATS;
76376 +typedef struct ep_xmtr_rail_stats      EP_XMTR_RAIL_STATS;
76377 +
76378 +typedef void (EP_RXH)(EP_RXD *rxd);                            /* callback function from receive completion */
76379 +typedef void (EP_TXH)(EP_TXD *txd, void *arg, EP_STATUS res);  /* callback function from transmit completion  */
76380 +
76381 +/* Main memory portion shared descriptor */
76382 +typedef struct ep_rxd_main
76383 +{
76384 +    EP_ENVELOPE                Envelope;                               /* 128 byte aligned envelope */
76385 +    EP_PAYLOAD         Payload;                                /* 128 byte aligned payload */
76386 +    bitmap_t           Bitmap[BT_BITOUL(EP_MAX_NODES)];        /* broadcast bitmap */
76387 +    EP_STATUSBLK       StatusBlk;                              /* RPC status block to return */
76388 +    uint64_t           Next;                                   /* linked list when on active list (main address) */
76389 +    int32_t            Len;                                    /* Length of message received */
76390 +} EP_RXD_MAIN;
76391 +
76392 +#define EP_RXD_MAIN_SIZE       roundup (sizeof (EP_RXD_MAIN), EP_BLK_SIZE)
76393 +
76394 +/* Phases for message/rpc */
76395 +#ifndef __ELAN__
76396 +
76397 +/* Kernel memory portion of per-rail receive descriptor */
76398 +typedef struct ep_rxd_rail
76399 +{
76400 +    struct list_head    Link;                                  /* linked on freelist */
76401 +    EP_RCVR_RAIL       *RcvrRail;                              /* rvcr we're associated with */
76402 +    
76403 +    EP_RXD            *Rxd;                                    /* receive descriptor we're bound to */
76404 +} EP_RXD_RAIL;
76405 +
76406 +#define RXD_BOUND2RAIL(rxdRail,rcvrRail)       ((rxdRail) != NULL && ((EP_RXD_RAIL *) (rxdRail))->RcvrRail == (EP_RCVR_RAIL *) rcvrRail)
76407 +
76408 +struct ep_rxd
76409 +{
76410 +    struct list_head   Link;                                   /* linked on free/active list */
76411 +    EP_RCVR           *Rcvr;                                   /* owning receiver */
76412 +
76413 +    EP_RXD_MAIN               *RxdMain;                                /* shared main memory portion. */
76414 +    EP_NMD             NmdMain;                                /*  and network mapping descriptor */
76415 +
76416 +    EP_RXD_RAIL               *RxdRail;                                /* per-rail rxd we're bound to */
76417 +    
76418 +    EP_RXH            *Handler;                                /* completion function */
76419 +    void              *Arg;                                    /*    and arguement */
76420 +
76421 +    unsigned int       State;                                  /* RXD status (active,stalled,failed) */
76422 +
76423 +    EP_NMD             Data;                                   /* network mapping descriptor for user buffer */
76424 +
76425 +    int                        nFrags;                                 /* network mapping descriptor for put/get/complete */
76426 +    EP_NMD             Local[EP_MAXFRAG];
76427 +    EP_NMD             Remote[EP_MAXFRAG];
76428 +
76429 +    long               NextRunTime;                            /* time to resend failover/map requests */
76430 +    EP_XID             MsgXid;                                 /*   and transaction id */
76431 +
76432 +#if ! defined(CONFIG_EP_NO_CHECK_SUM)
76433 +    struct list_head   CheckSumLink;                           /* linked on check sum list */
76434 +#endif
76435 +};
76436 +
76437 +#define EP_NUM_RXD_PER_BLOCK   16
76438 +
76439 +/* rxd->State */
76440 +#define EP_RXD_FREE            0
76441 +
76442 +#define EP_RXD_RECEIVE_UNBOUND 1
76443 +#define EP_RXD_RECEIVE_ACTIVE  2
76444 +
76445 +#define EP_RXD_PUT_ACTIVE      3
76446 +#define EP_RXD_PUT_STALLED     4
76447 +#define EP_RXD_GET_ACTIVE      5
76448 +#define EP_RXD_GET_STALLED     6
76449 +
76450 +#define EP_RXD_COMPLETE_ACTIVE 7
76451 +#define EP_RXD_COMPLETE_STALLED        8
76452 +
76453 +#define EP_RXD_RPC_IN_PROGRESS 9
76454 +#define EP_RXD_COMPLETED       10      
76455 +
76456 +#define EP_RXD_BEEN_ABORTED    11                              /* rxd was aborted while in a private state */
76457 +
76458 +typedef struct ep_rxd_block
76459 +{
76460 +    struct list_head   Link;
76461 +
76462 +    EP_NMD             NmdMain;
76463 +
76464 +    EP_RXD             Rxd[EP_NUM_RXD_PER_BLOCK];
76465 +} EP_RXD_BLOCK;
76466 +
76467 +struct ep_rcvr_rail_stats 
76468 +{
76469 +    EP_STATS_COUNT rx;
76470 +    EP_STATS_COUNT rx_len;
76471 +};
76472 +
76473 +struct ep_rcvr_rail
76474 +{
76475 +    EP_RCVR           *Rcvr;                                   /* associated receiver */
76476 +    EP_COMMS_RAIL      *CommsRail;                             /* comms rail */
76477 +
76478 +    struct proc_dir_entry *procfs_root;                         /* root of this rcvr_rail's procfs entry */
76479 +    EP_RCVR_RAIL_STATS     stats;                               /* generic rcvr_rail stats */
76480 +};
76481 +
76482 +struct ep_rcvr_stats
76483 +{
76484 +    EP_STATS_COUNT rx;
76485 +    EP_STATS_COUNT rx_len;
76486 +};
76487 +
76488 +struct ep_rcvr
76489 +{
76490 +    struct list_head  Link;                                    /* queued on subsystem */
76491 +    EP_COMMS_SUBSYS  *Subsys;                                  /* kernel comms subsystem */
76492 +    EP_SERVICE        Service;                                 /* service number */
76493 +
76494 +    unsigned int      InputQueueEntries;                       /* # entries on receive queue */
76495 +
76496 +    EP_RAILMASK              RailMask;                                 /* bitmap of which rails are available */
76497 +    EP_RCVR_RAIL     *Rails[EP_MAX_RAILS];
76498 +
76499 +    spinlock_t       Lock;                                     /* spinlock for rails/receive lists */
76500 +
76501 +    struct list_head  ActiveDescList;                          /* List of pending/active receive descriptors */
76502 +
76503 +    EP_XID_CACHE      XidCache;                                        /* XID cache (protected by Lock) */
76504 +
76505 +    struct list_head  FreeDescList;                            /* List of free receive descriptors */
76506 +    unsigned int      FreeDescCount;                           /*   and number on free list */
76507 +    unsigned int      TotalDescCount;                           /*   total number created */
76508 +    spinlock_t       FreeDescLock;                             /*   and lock for free list */
76509 +    kcondvar_t       FreeDescSleep;                            /*   with place to sleep for rx desc */
76510 +    int                      FreeDescWanted;                           /*   and flag */
76511 +    struct list_head  DescBlockList;
76512 +
76513 +    unsigned int      ForwardRxdCount;                         /* count of rxd's being forwarded */
76514 +    unsigned int      CleanupWaiting;                          /* waiting for cleanup */
76515 +    kcondvar_t       CleanupSleep;                             /*   and place to sleep */
76516 +
76517 +    struct proc_dir_entry *procfs_root;                         /* place where this rcvr's proc entry is */
76518 +    EP_RCVR_STATS          stats;                                    
76519 +};
76520 +
76521 +#if ! defined(CONFIG_EP_NO_CHECK_SUM)
76522 +#define EP_ENVELOPE_CHECK_SUM      (1<<31)
76523 +extern uint32_t ep_calc_check_sum (EP_SYS *sys, EP_ENVELOPE *env, EP_NMD *nmd, int nFrags);
76524 +#endif
76525 +
76526 +#endif /* ! __ELAN__ */
76527 +
76528 +typedef struct ep_txd_main
76529 +{
76530 +    EP_STATUSBLK      StatusBlk;                               /* RPC status block */
76531 +    bitmap_t          Bitmap[BT_BITOUL(EP_MAX_NODES)];         /* broadcast bitmap */
76532 +} EP_TXD_MAIN;
76533 +
76534 +#define EP_TXD_MAIN_SIZE       roundup (sizeof (EP_TXD_MAIN), EP_BLK_SIZE)
76535 +
76536 +#ifndef __ELAN__
76537 +typedef struct ep_txd_rail
76538 +{
76539 +    struct list_head  Link;                                    /* linked on freelist */
76540 +    EP_XMTR_RAIL     *XmtrRail;                                        /* xmtr we're associated with */
76541 +
76542 +    EP_TXD          *Txd;                                      /* txd we're bound to */
76543 +} EP_TXD_RAIL;
76544 +
76545 +#define TXD_BOUND2RAIL(rxdRail,xmtrRail)       ((txdRail) != NULL && ((EP_TXD_RAIL *) (txdRail))->XmtrRail == (EP_XMTR_RAIL *) xmtrRail)
76546 +
76547 +struct ep_txd
76548 +{
76549 +    struct list_head  Link;                                    /* linked on free/active list */
76550 +    EP_XMTR         *Xmtr;                                     /* service we're associated with */
76551 +
76552 +    EP_TXD_MAIN             *TxdMain;                                  /* shared main memory portion */
76553 +    EP_NMD           NmdMain;                                  /*   and network mapping descriptor */
76554 +
76555 +    EP_TXD_RAIL      *TxdRail;                                 /* per-rail txd for this phase */
76556 +
76557 +    EP_TXH          *Handler;                                  /* completion function */
76558 +    void            *Arg;                                      /*    and arguement */
76559 +    
76560 +    unsigned short    NodeId;                                  /* node transmit is to. */
76561 +    EP_SERVICE        Service;                                 /*    and seervice */
76562 +
76563 +    long              TimeStamp;                                 /* time we where created at, to find sends taking too long */
76564 +    long             RetryTime;
76565 +    EP_BACKOFF       Backoff;
76566 +
76567 +    EP_ENVELOPE              Envelope;                                 /* envelope for transmit */
76568 +    EP_PAYLOAD       Payload;                                  /* payload for transmit */
76569 +};
76570 +
76571 +#define EP_NUM_TXD_PER_BLOCK   16
76572 +
76573 +/* "phase" parameter to BindTxd */
76574 +#define EP_TXD_PHASE_ACTIVE            1
76575 +#define EP_TXD_PHASE_PASSIVE           2
76576 +
76577 +typedef struct ep_txd_block
76578 +{
76579 +    struct list_head   Link;
76580 +    EP_NMD             NmdMain;
76581 +    EP_TXD             Txd[EP_NUM_TXD_PER_BLOCK];              /* transmit descriptors */
76582 +} EP_TXD_BLOCK;
76583 +
76584 +struct ep_xmtr_rail_stats
76585 +{
76586 +    EP_STATS_COUNT tx;
76587 +    EP_STATS_COUNT tx_len;
76588 +};
76589 +
76590 +struct ep_xmtr_rail
76591 +{
76592 +    EP_COMMS_RAIL      *CommsRail;                             /* associated comms rail */
76593 +    EP_XMTR           *Xmtr;                                   /* associated transmitter */
76594 +
76595 +    struct proc_dir_entry *procfs_root;                         /* place where this xmtr's proc entry is */
76596 +
76597 +    EP_XMTR_RAIL_STATS     stats;
76598 +};
76599 +
76600 +struct ep_xmtr_stats
76601 +{
76602 +    EP_STATS_COUNT tx;
76603 +    EP_STATS_COUNT tx_len;
76604 +};
76605 +
76606 +struct ep_xmtr
76607 +{
76608 +    struct list_head  Link;                                    /* Linked on subsys */
76609 +    EP_COMMS_SUBSYS  *Subsys;                                  /* kernel comms subsystem */
76610 +
76611 +    EP_RAILMASK              RailMask;                                 /* bitmap of which rails are available */
76612 +    EP_XMTR_RAIL     *Rails[EP_MAX_RAILS];                     /* per-rail state */
76613 +
76614 +    spinlock_t       Lock;                                     /* lock for active descriptor list */
76615 +
76616 +    struct list_head  ActiveDescList;                          /* list of active transmit descriptors */
76617 +
76618 +    EP_XID_CACHE      XidCache;                                        /* XID cache (protected by Lock) */
76619 +
76620 +    struct list_head  FreeDescList;                            /* List of free receive descriptors */
76621 +    unsigned int      FreeDescCount;                           /*   and number on free list */
76622 +    unsigned int      TotalDescCount;
76623 +    spinlock_t       FreeDescLock;                             /*   and lock for free list */
76624 +    kcondvar_t       FreeDescSleep;                            /*   with place to sleep for rx desc */
76625 +    int                      FreeDescWanted;                           /*   and flag */
76626 +    struct list_head  DescBlockList;
76627 +
76628 +    struct proc_dir_entry *procfs_root;                         /* place where this rcvr's proc entry is */
76629 +    EP_XMTR_STATS          stats;   
76630 +};
76631 +
76632 +/* forward descriptor */
76633 +#define EP_TREE_ARITY          3
76634 +
76635 +typedef struct ep_fwd_desc
76636 +{
76637 +    struct list_head    Link;                                  /* linked on forward/free lists */
76638 +    EP_RXD            *Rxd;                                    /* rxd to forward */
76639 +    EP_NMD             Data;                                   /* nmd of subset of receive buffer */
76640 +    unsigned           NumChildren;                            /*   number of places we're forwarding */
76641 +    unsigned           Children[EP_TREE_ARITY];
76642 +} EP_FWD_DESC;
76643 +
76644 +typedef struct ep_comms_ops
76645 +{
76646 +    void            (*DelRail) (EP_COMMS_RAIL *rail);
76647 +    void            (*DisplayRail) (EP_COMMS_RAIL *rail);
76648 +
76649 +    struct {
76650 +       void         (*AddRail) (EP_RCVR *rcvr, EP_COMMS_RAIL *rail);
76651 +       void         (*DelRail) (EP_RCVR *rcvr, EP_COMMS_RAIL *rail);
76652 +
76653 +       long         (*Check) (EP_RCVR_RAIL *rcvrRail, long nextRunTime);
76654 +
76655 +       int          (*QueueRxd) (EP_RXD *rxd, EP_RCVR_RAIL *rcvrRail);
76656 +       void         (*RpcPut)(EP_RXD *rxd, EP_NMD *local, EP_NMD *remote, unsigned nFrags);
76657 +       void         (*RpcGet)(EP_RXD *rxd, EP_NMD *local, EP_NMD *remote, unsigned nFrags);
76658 +       void         (*RpcComplete)(EP_RXD *rxd, EP_NMD *local, EP_NMD *remote, unsigned nFrags);
76659 +
76660 +       EP_RXD      *(*StealRxd)(EP_RCVR_RAIL *rcvrRail);
76661 +
76662 +       void         (*DisplayRcvr) (DisplayInfo *di, EP_RCVR_RAIL *rcvrRail);
76663 +       void         (*DisplayRxd)  (DisplayInfo *di, EP_RXD_RAIL *rxdRail);
76664 +
76665 +       void         (*FillOutRailStats) (EP_RCVR_RAIL *rcvr_rail, char *str);
76666 +
76667 +    } Rcvr;
76668 +
76669 +    struct {
76670 +       void         (*AddRail) (EP_XMTR *xmtr, EP_COMMS_RAIL *rail);
76671 +       void         (*DelRail) (EP_XMTR *xmtr, EP_COMMS_RAIL *rail);
76672 +
76673 +       long         (*Check) (EP_XMTR_RAIL *xmtrRail, long nextRunTime);
76674 +       
76675 +       int          (*BindTxd) (EP_TXD *txd, EP_XMTR_RAIL *xmtrRail, unsigned int phase);
76676 +       void         (*UnbindTxd) (EP_TXD *txd, unsigned int phase);
76677 +       int          (*PollTxd) (EP_XMTR_RAIL *xmtrRail, EP_TXD_RAIL *txdRail, int how);
76678 +
76679 +       void         (*DisplayXmtr) (DisplayInfo *di, EP_XMTR_RAIL *xmtrRail);
76680 +       void         (*DisplayTxd)  (DisplayInfo *di, EP_TXD_RAIL *txdRail);
76681 +
76682 +       int          (*CheckTxdState) (EP_TXD *txd);
76683 +
76684 +       void         (*FillOutRailStats) (EP_XMTR_RAIL *xmtr_rail, char *str);
76685 +
76686 +    } Xmtr;
76687 +} EP_COMMS_OPS;
76688 +
76689 +#define EP_RAIL_OP(commsRail, Which)   (commsRail)->Ops.Which
76690 +#define EP_RCVR_OP(rcvrRail, Which)    (rcvrRail)->CommsRail->Ops.Rcvr.Which
76691 +#define EP_XMTR_OP(xmtrRail, Which)    (xmtrRail)->CommsRail->Ops.Xmtr.Which
76692 +
76693 +/* "how" parameter to PollTxd */
76694 +#define POLL_TX_LIST           0
76695 +#define ENABLE_TX_CALLBACK     1
76696 +#define DISABLE_TX_CALLBACK    2
76697 +
76698 +struct ep_comms_rail
76699 +{
76700 +    struct list_head   Link;                                   /* Linked on subsys */
76701 +    EP_RAIL           *Rail;                                   /* kernel comms rail */
76702 +    EP_COMMS_SUBSYS    *Subsys;
76703 +    EP_COMMS_OPS        Ops;
76704 +
76705 +    EP_COMMS_RAIL_STATS Stats;                                 /* statistics */
76706 +};
76707 +
76708 +struct ep_comms_subsys
76709 +{
76710 +    EP_SUBSYS          Subsys;                                 /* is a kernel comms subsystem */
76711 +
76712 +    kmutex_t           Lock;                                   /* global lock */
76713 +
76714 +    EP_COMMS_STATS     Stats;                                  /* statistics */
76715 +
76716 +    struct list_head   Rails;                                  /* list of all rails */
76717 +
76718 +    struct list_head    Receivers;                             /* list of receivers */
76719 +    struct list_head   Transmitters;                           /* and transmitters */
76720 +
76721 +    /* forward/allocator thread */
76722 +    EP_KTHREAD         Thread;                                 /* place thread sleeps */
76723 +
76724 +    /* message passing "broadcast" forward lists */
76725 +    spinlock_t         ForwardDescLock;                        /* Lock for broadcast forwarding */
76726 +    struct list_head    ForwardDescList;                       /* List of rxd's to forward */
76727 +
76728 +#if ! defined(CONFIG_EP_NO_CHECK_SUM)
76729 +    spinlock_t         CheckSumDescLock;                       /* Lock for CheckSums */
76730 +    struct list_head    CheckSumDescList;                      /* List of rxd's to be CheckSumed */
76731 +#endif
76732 +
76733 +    EP_XMTR           *ForwardXmtr;                            /* and transmitter to forward with */
76734 +};
76735 +
76736 +/* epcomms.c subsystem initialisation */
76737 +extern unsigned int   epcomms_forward_limit;
76738 +
76739 +extern int           ep_comms_init (EP_SYS *sys);
76740 +extern void           ep_comms_display (EP_SYS *sys, char *how);
76741 +extern EP_RAILMASK    ep_rcvr_railmask (EP_SYS *epsys, EP_SERVICE service);
76742 +
76743 +/* epcomms_elan3.c */
76744 +extern EP_COMMS_RAIL *ep3comms_add_rail (EP_SUBSYS *s, EP_SYS *sys, EP_RAIL *rail);
76745 +
76746 +/* epcomms_elan4.c */
76747 +extern EP_COMMS_RAIL *ep4comms_add_rail (EP_SUBSYS *s, EP_SYS *sys, EP_RAIL *rail);
76748 +
76749 +/* epcommsTx.c */
76750 +extern int            TxdShouldStabalise (EP_TXD_RAIL *txdRail, EP_RAIL *rail);
76751 +extern void          FreeTxd (EP_XMTR *xmtr, EP_TXD *txd);
76752 +
76753 +extern unsigned int   ep_txd_lowat;
76754 +extern long           ep_check_xmtr (EP_XMTR *xmtr, long nextRunTime);
76755 +extern void           ep_display_xmtr (DisplayInfo *di, EP_XMTR *xmtr);
76756 +extern void           ep_xmtr_flush_callback (EP_XMTR *xmtr, EP_XMTR_RAIL *xmtrRail);
76757 +extern void           ep_xmtr_reloc_callback (EP_XMTR *xmtr, EP_XMTR_RAIL *xmtrRail);
76758 +
76759 +extern void           ep_xmtr_fillout_stats      (EP_XMTR      *xmtr,      char *str);
76760 +extern void           ep_xmtr_rail_fillout_stats (EP_XMTR_RAIL *xmtr_rail, char *str);
76761 +
76762 +extern void           ep_xmtr_txd_stat (EP_XMTR *xmtr, EP_TXD *txd);
76763 +
76764 +/* epcommsRx.c */
76765 +extern EP_RXD        *StealRxdFromOtherRail (EP_RCVR *rcvr);
76766 +
76767 +extern unsigned int   ep_rxd_lowat;
76768 +extern long          ep_check_rcvr (EP_RCVR *rcvr, long nextRunTime);
76769 +extern void           ep_rcvr_flush_callback (EP_RCVR *rcvr, EP_RCVR_RAIL *rcvrRail);
76770 +extern void           ep_rcvr_reloc_callback (EP_RCVR *rcvr, EP_RCVR_RAIL *rcvrRail);
76771 +extern void           ep_display_rcvr (DisplayInfo *di, EP_RCVR *rcvr, int full);
76772 +
76773 +extern long           ep_forward_rxds (EP_COMMS_SUBSYS *subsys, long nextRunTime);
76774 +
76775 +extern void           ep_rcvr_fillout_stats      (EP_RCVR      *rcvr,      char *str);
76776 +extern void           ep_rcvr_rail_fillout_stats (EP_RCVR_RAIL *rcvr_rail, char *str);
76777 +
76778 +#if ! defined(CONFIG_EP_NO_CHECK_SUM)
76779 +extern void           ep_csum_rxds    (EP_COMMS_SUBSYS *subsys);
76780 +extern void           ep_rxd_queue_csum (EP_RXD *rxd);
76781 +#endif
76782 +
76783 +extern void           ep_rxd_received     (EP_RXD *rxd);
76784 +extern void           ep_rxd_received_now (EP_RXD *rxd);
76785 +
76786 +/* ep_procfs.c */
76787 +extern struct proc_dir_entry *ep_procfs_root;
76788 +
76789 +extern void ep_procfs_rcvr_xmtr_init(void);
76790 +extern void ep_procfs_rcvr_xmtr_fini(void);
76791 +
76792 +extern void ep_procfs_rcvr_add(EP_RCVR *rcvr);
76793 +extern void ep_procfs_rcvr_del(EP_RCVR *rcvr);
76794 +
76795 +extern void ep_procfs_rcvr_add_rail(EP_RCVR_RAIL *rcvrRail);
76796 +extern void ep_procfs_rcvr_del_rail(EP_RCVR_RAIL *rcvrRail);
76797 +
76798 +extern void ep_procfs_xmtr_add(EP_XMTR *xmtr);
76799 +extern void ep_procfs_xmtr_del(EP_XMTR *xmtr);
76800 +
76801 +extern void ep_procfs_xmtr_add_rail(EP_XMTR_RAIL *xmtrRail);
76802 +extern void ep_procfs_xmtr_del_rail(EP_XMTR_RAIL *xmtrRail);
76803 +
76804 +
76805 +/* Public Interface */
76806 +
76807 +
76808 +/* epcomms.c message xmtr functions */
76809 +extern EP_XMTR       *ep_alloc_xmtr (EP_SYS *sys);
76810 +extern void           ep_free_xmtr (EP_XMTR *xmtr);
76811 +
76812 +extern EP_STATUS      ep_transmit_message (EP_XMTR *xmtr, unsigned int dest, EP_SERVICE service, EP_ATTRIBUTE attr, 
76813 +                                          EP_TXH *handler, void *arg, EP_PAYLOAD *payload,
76814 +                                          EP_NMD *nmd, int nFrag);
76815 +extern EP_STATUS      ep_multicast_message (EP_XMTR *xmtr, unsigned int destLo, unsigned int destHi, bitmap_t *bitmap, 
76816 +                                           EP_SERVICE service, EP_ATTRIBUTE attr, EP_TXH *handler, void *arg, 
76817 +                                           EP_PAYLOAD *payload, EP_NMD *nmd, int nFrag);
76818 +extern EP_STATUS      ep_transmit_rpc (EP_XMTR *xmtr, unsigned int dest, EP_SERVICE service, EP_ATTRIBUTE attr, 
76819 +                                      EP_TXH *handler, void *arg, EP_PAYLOAD *payload,
76820 +                                      EP_NMD *nmd, int nFrag);
76821 +extern EP_STATUS      ep_multicast_forward (EP_XMTR *xmtr, unsigned int dest, EP_SERVICE service, EP_ATTRIBUTE attr, 
76822 +                                           EP_TXH *handler, void *arg, EP_ENVELOPE *env, EP_PAYLOAD *payload, 
76823 +                                           bitmap_t *bitmap, EP_NMD *nmd, int nFrags);
76824 +
76825 +/* epcomms.c functions for use with polled transmits */
76826 +extern int            ep_poll_transmits (EP_XMTR *xmtr);
76827 +extern int            ep_enable_txcallbacks (EP_XMTR *xmtr);
76828 +extern int            ep_disable_txcallbacks (EP_XMTR *xmtr);
76829 +
76830 +/* epcomms.c message rcvr functions */
76831 +extern EP_RCVR       *ep_alloc_rcvr (EP_SYS *sys, EP_SERVICE svc, unsigned int nenvelopes);
76832 +extern void          ep_free_rcvr (EP_RCVR *rcvr);
76833 +
76834 +extern EP_STATUS      ep_queue_receive (EP_RCVR *rcvr, EP_RXH *handler, void *arg, EP_NMD *nmd, EP_ATTRIBUTE attr);
76835 +extern void          ep_requeue_receive (EP_RXD *rxd, EP_RXH *handler, void *arg, EP_NMD *nmd, EP_ATTRIBUTE attr);
76836 +extern EP_STATUS      ep_rpc_put (EP_RXD *rxd, EP_RXH *handler, void *arg, EP_NMD *from, EP_NMD *to, int nFrags);
76837 +extern EP_STATUS      ep_rpc_get (EP_RXD *rxd, EP_RXH *handler, void *arg, EP_NMD *from, EP_NMD *to, int nFrags);
76838 +extern EP_STATUS      ep_complete_rpc (EP_RXD *rxd, EP_RXH *handler, void *arg, EP_STATUSBLK *blk, 
76839 +                                      EP_NMD *from, EP_NMD *to, int nFrags);
76840 +extern void          ep_complete_receive (EP_RXD *rxd);
76841 +
76842 +/* railhints.c */
76843 +extern int            ep_xmtr_bcastrail (EP_XMTR *xmtr, EP_RAILMASK allowedRails);
76844 +extern int            ep_xmtr_prefrail (EP_XMTR *xmtr, EP_RAILMASK allowedRails, unsigned nodeId);
76845 +extern EP_RAILMASK    ep_xmtr_availrails (EP_XMTR *xmtr);
76846 +extern EP_RAILMASK    ep_xmtr_noderails (EP_XMTR *xmtr, unsigned nodeId);
76847 +extern int            ep_rcvr_prefrail (EP_RCVR *rcvr, EP_RAILMASK allowedRails);
76848 +extern EP_RAILMASK    ep_rcvr_availrails (EP_RCVR *rcvr);
76849 +extern EP_RAILMASK    ep_rxd_railmask (EP_RXD *rxd);
76850 +
76851 +/* epcomms.c functions for accessing fields of rxds */
76852 +extern void          *ep_rxd_arg(EP_RXD *rxd);
76853 +extern int            ep_rxd_len(EP_RXD *rxd);
76854 +extern EP_STATUS      ep_rxd_status(EP_RXD *rxd);
76855 +extern int            ep_rxd_isrpc(EP_RXD *rxd);
76856 +extern EP_ENVELOPE   *ep_rxd_envelope(EP_RXD *rxd);
76857 +extern EP_PAYLOAD    *ep_rxd_payload(EP_RXD *rxd);
76858 +extern int            ep_rxd_node(EP_RXD *rxd);
76859 +extern EP_STATUSBLK  *ep_rxd_statusblk(EP_RXD *rxd);
76860 +
76861 +/* functions for accessing fields of txds */
76862 +extern int            ep_txd_node(EP_TXD *txd);
76863 +extern EP_STATUSBLK  *ep_txd_statusblk(EP_TXD *txd);
76864 +
76865 +/* functions for controlling how many processes are using module */
76866 +extern void              ep_mod_dec_usecount (void);
76867 +extern void              ep_mod_inc_usecount (void);
76868 +
76869 +extern EP_RAILMASK ep_xmtr_svc_indicator_railmask (EP_XMTR *xmtr, int svc_indicator, int nodeId);
76870 +extern int ep_xmtr_svc_indicator_bitmap (EP_XMTR *xmtr, int svc_indicator, bitmap_t * bitmap, int low, int nnodes);
76871 +
76872 +#endif /* ! __ELAN__ */
76873 +/*
76874 + * Local variables:
76875 + * c-file-style: "stroustrup"
76876 + * End:
76877 + */
76878 +#endif /* __ELAN_EPCOMMS_H */
76879 +
76880 Index: linux-2.4.21/include/elan/epsvc.h
76881 ===================================================================
76882 --- linux-2.4.21.orig/include/elan/epsvc.h      2004-02-23 16:02:56.000000000 -0500
76883 +++ linux-2.4.21/include/elan/epsvc.h   2005-06-01 23:12:54.710422232 -0400
76884 @@ -0,0 +1,36 @@
76885 +/*
76886 + *    Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd.
76887 + *    Copyright (c) 2002-2003 by Quadrics Ltd.
76888 + *
76889 + *    For licensing information please see the supplied COPYING file
76890 + *
76891 + */
76892 +
76893 +#ifndef __ELAN_EPSVC_H
76894 +#define __ELAN_EPSVC_H
76895 +
76896 +#ident "@(#)$Id: epsvc.h,v 1.9 2004/02/13 10:03:27 david Exp $"
76897 +/*      $Source: /cvs/master/quadrics/epmod/epsvc.h,v $ */
76898 +
76899 +
76900 +#define EP_SVC_NUM_INDICATORS       8
76901 +#define EP_SVC_INDICATOR_MAX_NAME  32
76902 +
76903 +#define EP_SVC_EIP     0
76904 +#define EP_SVC_NAMES   {"eip", "1", "2", "3", "4", "5", "6", "7"};
76905 +
76906 +#if defined(__KERNEL__)
76907 +extern int         ep_svc_indicator_set      (EP_SYS *epsys, int svc_indicator);
76908 +extern int         ep_svc_indicator_clear    (EP_SYS *epsys, int svc_indicator);
76909 +extern int         ep_svc_indicator_is_set   (EP_SYS *epsys, int svc_indicator, int nodeId);
76910 +extern int         ep_svc_indicator_bitmap   (EP_SYS *epsys, int svc_indicator, bitmap_t * bitmap, int low, int nnodes);
76911 +extern EP_RAILMASK ep_svc_indicator_railmask (EP_SYS *epsys, int svc_indicator, int nodeId);
76912 +#endif
76913 +
76914 +#endif /* __ELAN_EPSVC_H */
76915 +
76916 +/*
76917 + * Local variables:
76918 + * c-file-style: "stroustrup"
76919 + * End:
76920 + */
76921 Index: linux-2.4.21/include/elan/kalloc.h
76922 ===================================================================
76923 --- linux-2.4.21.orig/include/elan/kalloc.h     2004-02-23 16:02:56.000000000 -0500
76924 +++ linux-2.4.21/include/elan/kalloc.h  2005-06-01 23:12:54.710422232 -0400
76925 @@ -0,0 +1,108 @@
76926 +/*
76927 + *    Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd.
76928 + *    Copyright (c) 2002-2003 by Quadrics Ltd.
76929 + *
76930 + *    For licensing information please see the supplied COPYING file
76931 + *
76932 + */
76933 +
76934 +#ifndef __ELAN3_KALLOC_H
76935 +#define __ELAN3_KALLOC_H
76936 +
76937 +#ident "$Id: kalloc.h,v 1.11 2004/05/19 10:23:59 david Exp $"
76938 +/*      $Source: /cvs/master/quadrics/epmod/kalloc.h,v $ */
76939 +
76940 +#include <elan/rmap.h>
76941 +
76942 +/*
76943 + * Memory allocator
76944 + */
76945 +#define LN2_MIN_SIZE   6                                       /* 64 bytes */
76946 +#define LN2_MAX_SIZE   16                                      /* 64k bytes */
76947 +#define NUM_FREELISTS  (LN2_MAX_SIZE-LN2_MIN_SIZE + 1)
76948 +#define MIN_SIZE       (1 << LN2_MIN_SIZE)
76949 +#define MAX_SIZE       (1 << LN2_MAX_SIZE)
76950 +
76951 +#define HASHSHIFT      LN2_MAX_SIZE
76952 +#define NHASH          32
76953 +#define HASH(addr)     (((addr) >> HASHSHIFT) & (NHASH-1))
76954 +
76955 +typedef enum
76956 +{
76957 +    EP_ALLOC_TYPE_PRIVATE_SDRAM,
76958 +    EP_ALLOC_TYPE_PRIVATE_MAIN,
76959 +    EP_ALLOC_TYPE_SHARED_MAIN,
76960 +} EP_ALLOC_TYPE;
76961 +
76962 +typedef struct ep_pool
76963 +{
76964 +    EP_NMH               Handle;                               /* network mapping handle */
76965 +
76966 +    struct list_head     HashBase;                             /* linked on hash lists */
76967 +    struct list_head     HashTop;                              /* linked on hash lists */
76968 +
76969 +    struct list_head     Link[NUM_FREELISTS];                  /* linked on free lists */
76970 +    bitmap_t            *Bitmaps[NUM_FREELISTS];               /* bitmaps for each size */
76971 +
76972 +    union {
76973 +       sdramaddr_t     Sdram;
76974 +       unsigned long   Ptr;
76975 +    } Buffer;
76976 +} EP_POOL;
76977 +
76978 +typedef struct ep_alloc
76979 +{
76980 +    spinlock_t      Lock;
76981 +    
76982 +    EP_ALLOC_TYPE    Type;
76983 +    unsigned int     Perm;
76984 +
76985 +    EP_RMAP         *ResourceMap;
76986 +
76987 +    struct list_head HashBase[NHASH];
76988 +    struct list_head HashTop[NHASH];
76989 +    struct list_head Freelists[NUM_FREELISTS];
76990 +
76991 +    union {
76992 +       struct {
76993 +           EP_SYS             *System;
76994 +           struct list_head    Rails;
76995 +       } Shared;
76996 +       
76997 +       struct {
76998 +           EP_RAIL            *Rail;
76999 +       } Private;
77000 +    } Data;
77001 +} EP_ALLOC;
77002 +
77003 +extern void            ep_display_alloc (EP_ALLOC *alloc);
77004 +
77005 +extern void            ep_alloc_init (EP_RAIL *rail);
77006 +extern void            ep_alloc_fini (EP_RAIL *rail);
77007 +
77008 +extern sdramaddr_t     ep_alloc_memory_elan (EP_RAIL *rail, EP_ADDR addr, unsigned size, unsigned int perm, EP_ATTRIBUTE attr);
77009 +extern void            ep_free_memory_elan (EP_RAIL *rail, EP_ADDR addr);
77010 +
77011 +extern sdramaddr_t     ep_alloc_elan (EP_RAIL *rail, unsigned size, EP_ATTRIBUTE attr, EP_ADDR *addrp);
77012 +extern void            ep_free_elan (EP_RAIL *rail, EP_ADDR addr, unsigned size);
77013 +extern void           *ep_alloc_main (EP_RAIL *rail, unsigned size, EP_ATTRIBUTE attr, EP_ADDR *addr);
77014 +extern void            ep_free_main (EP_RAIL *rail, EP_ADDR addr, unsigned size);
77015 +
77016 +extern sdramaddr_t     ep_elan2sdram (EP_RAIL *rail, EP_ADDR addr);
77017 +extern void            *ep_elan2main (EP_RAIL *rail, EP_ADDR addr);
77018 +
77019 +extern void            ep_shared_alloc_init (EP_SYS *sys);
77020 +extern void            ep_shared_alloc_fini (EP_SYS *sys);
77021 +extern int             ep_shared_alloc_add_rail (EP_SYS *sys, EP_RAIL *rail);
77022 +extern void            ep_shared_alloc_remove_rail (EP_SYS *sys, EP_RAIL *rail);
77023 +
77024 +extern void           *ep_shared_alloc_main (EP_SYS *sys, unsigned size, EP_ATTRIBUTE attr, EP_NMD *nmd);
77025 +extern void            ep_shared_free_main (EP_SYS *sys, EP_NMD *nmd);
77026 +
77027 +#endif /* __ELAN_KALLOC_H */
77028 +
77029 +/*
77030 + * Local variables:
77031 + * c-file-style: "stroustrup"
77032 + * End:
77033 + */
77034 Index: linux-2.4.21/include/elan/kcomm.h
77035 ===================================================================
77036 --- linux-2.4.21.orig/include/elan/kcomm.h      2004-02-23 16:02:56.000000000 -0500
77037 +++ linux-2.4.21/include/elan/kcomm.h   2005-06-01 23:12:54.712421928 -0400
77038 @@ -0,0 +1,839 @@
77039 +/*
77040 + *    Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd.
77041 + *    Copyright (c) 2002-2003 by Quadrics Ltd.
77042 + *
77043 + *    For licensing information please see the supplied COPYING file
77044 + *
77045 + */
77046 +
77047 +#ifndef __ELAN_KCOMM_H
77048 +#define __ELAN_KCOMM_H
77049 +
77050 +#ident "$Id: kcomm.h,v 1.71.2.8 2004/12/14 10:19:14 mike Exp $"
77051 +/*      $Source: /cvs/master/quadrics/epmod/kcomm.h,v $*/
77052 +#define EP_KCOMM_MAJOR_VERSION         3
77053 +#define EP_KCOMM_MINOR_VERSION         1
77054 +
77055 +#define EP_PROTOCOL_VERSION            1                       /* CM/KCOMM protocol revision */
77056 +
77057 +#define EP_MAX_NODES                   2048                    /* Max nodes we support */
77058 +#define EP_MAX_RAILS                   16                      /* max number of rails (we use an unsigned short for bitmaps !) */
77059 +#define EP_MAXFRAG                     4                       /* max number of fragments */
77060 +
77061 +#define EP_BLK_SIZE                    64                      /* align objects for elan access */
77062 +
77063 +/* Elan virtual address address space */
77064 +#define EP_SYSTEM_QUEUE_BASE           0x00010000              /* Base address for system queues */
77065 +#define EP_MSGSYS_QUEUE_BASE           0x00020000              /* Base address for msgsys queues */
77066 +#define EP_EPCOMMS_QUEUE_BASE          0x00030000              /* Base address for message queues */
77067 +#define EP_DVMA_BASE                   0x10000000              /* elan address range for dvma mapping. */
77068 +#define EP_DVMA_TOP                    0xE0000000
77069 +
77070 +#define EP_SHARED_BASE                 0xE0000000              /* shared main/elan allocators */
77071 +#define EP_SHARED_TOP                  0xF0000000
77072 +
77073 +#define EP_PRIVATE_BASE                        0xF0000000              /* private main/elan allocators */
77074 +#define EP_PRIVATE_TOP                 0xF8000000
77075 +
77076 +#define EP_DVMA_RMAP_SIZE              1024                    /* size of resource map for dvma address space */
77077 +#define EP_SHARED_RMAP_SIZE            1024                    /* size of resource map for shared address space */
77078 +#define EP_PRIVATE_RMAP_SIZE           1024                    /* size of resource map for private address space */
77079 +
77080 +/* Input queue descriptors fit into 64 bytes */
77081 +#define EP_QUEUE_DESC_SIZE             64
77082 +
77083 +/* Timeouts for checking network position */
77084 +#define EP_POSITION_TIMEOUT            (4*HZ)          /* 1s   time to notice CheckNetworkPosition changes */
77085 +#define EP_WITHDRAW_TIMEOUT            (2*HZ)          /* 2s   time before withdrawing from unreachable nodes */
77086 +
77087 +/* Time to try again due to resource failue (eg malloc etc) */
77088 +#define RESOURCE_RETRY_TIME            (HZ/20)
77089 +
77090 +/* Time to retransmit message when send failed */
77091 +#define MSGBUSY_RETRY_TIME             (HZ/20)
77092 +
77093 +/* Time between retransmits of messages network flush requests */
77094 +#define MESSAGE_RETRY_TIME             (HZ/5)
77095 +
77096 +/* time to hold the context filter up to ensure that the
77097 + * next packet of a dma is guaranteed to get nacked (8mS) */
77098 +#define NETWORK_ERROR_TIMEOUT          (1 + roundup (HZ * 8 / 1000, 1))
77099 +
77100 +/* Time between retransmits of message failover requests */
77101 +#define FAILOVER_RETRY_TIME            (HZ/5)
77102 +
77103 +/* compute earliest time */
77104 +#define SET_NEXT_RUN_TIME(nextRunTime, time) \
77105 +do { \
77106 +    if ((nextRunTime) == 0 || AFTER(nextRunTime, (time)))\
77107 +       (nextRunTime) = (time);\
77108 +} while (0)
77109 +
77110 +/* DMA retry backoff/priorities/issue rings */
77111 +#define EP_NUM_BACKOFF                 8
77112 +#define EP_RETRY_STABALISING            0
77113 +#define EP_RETRY_BASE                  1
77114 +
77115 +#define EP_RETRY_CRITICAL              EP_RETRY_BASE
77116 +#define EP_RETRY_HIGH_PRI              (EP_RETRY_CRITICAL + 1)
77117 +#define EP_RETRY_HIGH_PRI_TIME         (1)
77118 +#define EP_RETRY_HIGH_PRI_RETRY                (EP_RETRY_HIGH_PRI + 1)
77119 +#define EP_RETRY_HIGH_PRI_RETRY_TIME   (2)
77120 +#define EP_RETRY_LOW_PRI               (EP_RETRY_HIGH_PRI_RETRY + EP_NUM_BACKOFF)
77121 +#define EP_RETRY_LOW_PRI_TIME          (2)
77122 +#define EP_RETRY_LOW_PRI_RETRY         (EP_RETRY_LOW_PRI + 1)
77123 +#define EP_RETRY_LOW_PRI_RETRY_TIME    (4)
77124 +#define EP_RETRY_ANONYMOUS             (EP_RETRY_LOW_PRI_RETRY + EP_NUM_BACKOFF)
77125 +#define EP_RETRY_ANONYMOUS_TIME                (10)
77126 +#define EP_RETRY_NETERR                        (EP_RETRY_ANONYMOUS + EP_NUM_BACKOFF)
77127 +#define EP_RETRY_NETERR_TIME           (10)
77128 +#define EP_NUM_RETRIES                 (EP_RETRY_NETERR + 1)
77129 +
77130 +typedef unsigned short EP_SERVICE;
77131 +
77132 +/* EP_ATTRIBUTE 32 bits 
77133 + *
77134 + * 0-2
77135 + *   for initial call :-
77136 + *     0 (0x1) EP_NO_ALLOC                             used once
77137 + *     1 (0x2) EP_NO_SLEEP                             used once
77138 + *     2 (0x4) EP_NOT_MYSELF                           used once
77139 + *
77140 + *   when stored and transmited :-
77141 + *     0 (0x0) EP_MULTICAST                            envelope
77142 + *     1 (0x2) EP_RPC                                  envelope
77143 + *     2 (0x4) EP_HAS_PAYLOAD                          envelope
77144 + *
77145 + * 3-11
77146 + *     3   (0x08) EP_PREFRAIL_SET                      preserved
77147 + *     4-7 (0xf0) Pref Rail
77148 + *     8  (0x100) EP_NO_INTERUPT
77149 + *     9  (0x200) EP_NO_FAILOVER
77150 + *
77151 + *    10 (0x400) EP_INTERRUPT_ENABLED                  internal
77152 + *    11 (0x800) EP_TXD_STABALISING                    internal
77153 + *
77154 + * 12-13 Not Used.
77155 + * 
77156 + * 14-15 (0xC000) Data Type.                           passed in
77157 + *    00 none. 
77158 + *    01 Service Indicator.
77159 + *    10 TimeOut.
77160 + *    11 RailMask
77161 + *          
77162 + * 16-31 (0x10000)  Data.  Service Indicator, TimeOut, RailMask, Pref Rail.
77163 + *         
77164 +*/
77165 +
77166 +typedef uint32_t EP_ATTRIBUTE;
77167 +
77168 +#define EP_LOCAL_ATTR_MASK 0x07
77169 +#define EP_CLEAR_LOCAL_ATTR(ATTR)  ( (ATTR) & ~EP_LOCAL_ATTR_MASK )
77170 +
77171 +#define EP_NO_ALLOC      0x01  /* Don't call allocators if no free descriptors */
77172 +#define EP_NO_SLEEP      0x02  /* Don't sleep if no free descriptors */
77173 +#define EP_NOT_MYSELF    0x04  /* Don't send multicast to me */
77174 +
77175 +#define EP_MULTICAST         0x01      /* Message is a multicast */
77176 +#define EP_RPC               0x02      /* Wait for RPC reply */
77177 +#define EP_HAS_PAYLOAD_BIT   0x04      /* transfer payload */
77178 +
77179 +
77180 +#define EP_PREFRAIL_SET  0x08  /* preferred rail is set (otherwise pick one from the NMDs) */
77181 +
77182 +#define EP_PREFRAIL_SHIFT  (4)
77183 +#define EP_PREFRAIL_MASK   0xf0
77184 +#define EP_IS_PREFRAIL_SET(ATTR)      (((ATTR) &  EP_PREFRAIL_SET) != 0)
77185 +#define EP_CLEAR_PREFRAIL(ATTR)       (((ATTR) & ~EP_PREFRAIL_SET) & ~EP_PREFRAIL_MASK) 
77186 +#define EP_SET_PREFRAIL(ATTR,RAIL)    (EP_CLEAR_PREFRAIL(ATTR) | (((RAIL) <<  EP_PREFRAIL_SHIFT ) &  EP_PREFRAIL_MASK ) |  EP_PREFRAIL_SET)
77187 +
77188 +
77189 +#define EP_ATTR2PREFRAIL(ATTR)            (((ATTR) & EP_PREFRAIL_MASK) >> EP_PREFRAIL_SHIFT)
77190 +
77191 +
77192 +#define EP_INTERRUPT_ENABLED 0x400     /* event interrupt enabled on EP_NO_INTERRUPT */
77193 +#define EP_TXD_STABALISING   0x800      /* flag to indicate this is attempting to stabalise */
77194 +
77195 +#define EP_IS_MULTICAST(ATTR)                 (((ATTR) &  EP_MULTICAST) != 0)
77196 +#define EP_SET_MULTICAST(ATTR)                ( (ATTR) |  EP_MULTICAST)
77197 +#define EP_CLEAR_MULTICAST(ATTR)              ( (ATTR) & ~EP_MULTICAST)
77198 +
77199 +#define EP_IS_RPC(ATTR)                       (((ATTR) &  EP_RPC) != 0)
77200 +#define EP_SET_RPC(ATTR)                      ( (ATTR) |  EP_RPC)
77201 +#define EP_CLEAR_RPC(ATTR)                    ( (ATTR) & ~EP_RPC)
77202 +
77203 +#define EP_HAS_PAYLOAD(ATTR)                  (((ATTR) &  EP_HAS_PAYLOAD_BIT) != 0)
77204 +#define EP_SET_HAS_PAYLOAD(ATTR)              ( (ATTR) |  EP_HAS_PAYLOAD_BIT)
77205 +#define EP_CLEAR_HAS_PAYLOAD(ATTR)            ( (ATTR) & ~EP_HAS_PAYLOAD_BIT)
77206 +
77207 +#define EP_IS_INTERRUPT_ENABLED(ATTR)         (((ATTR) &  EP_INTERRUPT_ENABLED) != 0)
77208 +#define EP_SET_INTERRUPT_ENABLED(ATTR)        ( (ATTR) |  EP_INTERRUPT_ENABLED)
77209 +#define EP_CLEAR_INTERRUPT_ENABLED(ATTR)      ( (ATTR) & ~EP_INTERRUPT_ENABLED)
77210 +
77211 +#define EP_IS_TXD_STABALISING(ATTR)           (((ATTR) &  EP_TXD_STABALISING) != 0)
77212 +#define EP_SET_TXD_STABALISING(ATTR)          ( (ATTR) |  EP_TXD_STABALISING)
77213 +#define EP_CLEAR_TXD_STABALISING(ATTR)        ( (ATTR) & ~EP_TXD_STABALISING)
77214 +
77215 +#define EP_NO_INTERRUPT      0x100     /* Don't generate completion interrupt (tx) */
77216 +#define EP_NO_FAILOVER       0x200     /* don't attempt rail failover, just abort */
77217 +
77218 +#define EP_IS_NO_INTERRUPT(ATTR)    (((ATTR) &  EP_NO_INTERRUPT) != 0)
77219 +#define EP_SET_NO_INTERRUPT(ATTR)   ( (ATTR) |  EP_NO_INTERRUPT)
77220 +#define EP_CLEAR_NO_INTERRUPT(ATTR) ( (ATTR) & ~EP_NO_INTERRUPT)
77221 +
77222 +#define EP_IS_NO_FAILOVER(ATTR)    (((ATTR) &  EP_NO_FAILOVER) != 0)
77223 +#define EP_SET_NO_FAILOVER(ATTR)   ( (ATTR) |  EP_NO_FAILOVER)
77224 +#define EP_CLEAR_NO_FAILOVER(ATTR) ( (ATTR) & ~EP_NO_FAILOVER)
77225 +
77226 +#define EP_TYPE_MASK           0xC000
77227 +#define EP_TYPE_SVC_INDICATOR  0x4000
77228 +#define EP_TYPE_TIMEOUT        0x8000
77229 +#define EP_TYPE_RAILMASK       0xC000
77230 +
77231 +#define EP_ATTR2TYPE(ATTR)        ( (ATTR) & EP_TYPE_MASK )
77232 +
77233 +#define EP_IS_SVC_INDICATOR(ATTR) (EP_ATTR2TYPE(ATTR) == EP_TYPE_SVC_INDICATOR)
77234 +#define EP_IS_TIMEOUT(ATTR)       (EP_ATTR2TYPE(ATTR) == EP_TYPE_TIMEOUT)
77235 +#define EP_IS_RAILMASK(ATTR)      (EP_ATTR2TYPE(ATTR) == EP_TYPE_RAILMASK)
77236 +#define EP_IS_NO_TYPE(ATTR)       (EP_ATTR2TYPE(ATTR) == 0)
77237 +
77238 +#define EP_DATA_SHIFT          (16)
77239 +#define EP_DATA_MASK           0xffff0000
77240 +
77241 +#define EP_ATTR2DATA(ATTR)     (((ATTR) & EP_DATA_MASK) >> EP_DATA_SHIFT)
77242 +#define EP_DATA2ATTR(DATA)     (((DATA) <<  EP_DATA_SHIFT) & EP_DATA_MASK)
77243 +
77244 +#define EP_CLEAR_DATA(ATTR)    (((ATTR) & ~EP_TYPE_MASK) & ~EP_DATA_MASK)
77245 +#define EP_SET_DATA(ATTR,TYPE,DATA)   (EP_CLEAR_DATA(ATTR) | ((TYPE) & EP_TYPE_MASK) | (((DATA) <<  EP_DATA_SHIFT) & EP_DATA_MASK))
77246 +
77247 +#define EP_DEFAULT_TIMEOUT     (HZ*30)
77248 +
77249 +#if !defined(offsetof)
77250 +#define offsetof(s, m)         (unsigned long)(&(((s *)0)->m))
77251 +#endif
77252 +#if !defined(roundup)
77253 +#define roundup(x, y)          ((((x)+((y)-1))/(y))*(y))
77254 +#endif
77255 +
77256 +/* 
77257 + * Message transaction ID's - these are unique 64 bts 
77258 + * numbers which include the initial rail number.
77259 + */
77260 +typedef struct ep_xid
77261 +{
77262 +    uint32_t   Generation;
77263 +    uint32_t   Handle;
77264 +    uint64_t   Unique;
77265 +} EP_XID;
77266 +
77267 +#define EP_INVALIDATE_XID(xid) ((xid).Generation = (xid).Handle = (xid).Unique = 0)
77268 +
77269 +#define EP_XID_INVALID(xid)    ((xid).Generation == 0 && (xid).Handle == 0 && (xid).Unique == 0)
77270 +#define EP_XIDS_MATCH(a,b)     ((a).Generation == (b).Generation && (a).Handle == (b).Handle && (a).Unique == (b).Unique)
77271 +
77272 +typedef struct ep_backoff
77273 +{
77274 +    unsigned char      type;
77275 +    unsigned char      indx;
77276 +    unsigned short     count;
77277 +} EP_BACKOFF;
77278 +
77279 +/* values for "type" */
77280 +#define EP_BACKOFF_FREE                0
77281 +#define EP_BACKOFF_ENVELOPE    1
77282 +#define EP_BACKOFF_FETCH       2
77283 +#define EP_BACKOFF_DATA                3
77284 +#define EP_BACKOFF_DONE                4
77285 +#define EP_BACKOFF_STABILISE   5
77286 +
77287 +#ifndef __ELAN__
77288 +
77289 +/* forward declaration of types */
77290 +typedef struct ep_rail EP_RAIL;
77291 +typedef struct ep_sys  EP_SYS;
77292 +
77293 +#include <elan/nmh.h>
77294 +#include <elan/kmap.h>
77295 +#include <elan/statemap.h>
77296 +#include <elan/kalloc.h>
77297 +#include <elan/kthread.h>
77298 +#include <elan/kcomm_stats.h>
77299 +#include <elan/devinfo.h>
77300 +
77301 +typedef struct ep_callback
77302 +{
77303 +    struct ep_callback *Next;
77304 +    void              (*Routine)(void *, statemap_t *);
77305 +    void              *Arg;
77306 +} EP_CALLBACK;
77307 +
77308 +#define EP_CB_FLUSH_FILTERING          0
77309 +#define EP_CB_FLUSH_FLUSHING           1
77310 +#define EP_CB_PASSIVATED               2
77311 +#define EP_CB_FAILOVER                 3
77312 +#define EP_CB_DISCONNECTING            4
77313 +#define EP_CB_DISCONNECTED             5
77314 +#define EP_CB_NODESET                  6
77315 +#define EP_CB_COUNT                    7
77316 +
77317 +#endif /* !defined(__ELAN__) */
77318 +
77319 +/* Small unreliable system message queues */
77320 +#define EP_SYSTEMQ_INTR                        0                       /* input queue for cluster membership generating an interrupt */
77321 +#define EP_SYSTEMQ_POLLED              1                       /* input queue for cluster membership polled on clock tick */
77322 +#define EP_SYSTEMQ_MANAGER             2                       /* input queue for manager messages */
77323 +#define EP_NUM_SYSTEMQ                 64
77324 +
77325 +#define EP_SYSTEMQ_ADDR(qnum)          (EP_SYSTEM_QUEUE_BASE + (qnum) * EP_QUEUE_DESC_SIZE)
77326 +#define EP_SYSTEMQ_DESC(base,qnum)     ((base) + (qnum) * EP_QUEUE_DESC_SIZE)
77327 +
77328 +#define EP_SYSTEMQ_MSG_ALIGN           64                      /* message sizes aligned to 64 byte boundaries */
77329 +#define EP_SYSTEMQ_MSG_MAX             (4*64)                  /* max message size */
77330 +
77331 +/* Special flag for Version field to indicate message not
77332 + * seen in main memory yet and time limit to poll for it */
77333 +#define EP_SYSTEMQ_UNRECEIVED                  0xdeadbabe
77334 +#define EP_SYSTEMQ_UNRECEIVED_TLIMIT           16384                   /* 1023 uS */
77335 +
77336 +#ifndef __ELAN__
77337 +
77338 +typedef void (EP_INPUTQ_HANDLER) (EP_RAIL *rail, void *arg, void *msg);
77339 +typedef void (EP_INPUTQ_CALLBACK) (EP_RAIL *rail, void *arg);
77340 +
77341 +typedef struct ep_inputq
77342 +{
77343 +    unsigned long      q_hidden;                               /* implementation hidden as ep3 or ep4 */
77344 +} EP_INPUTQ;
77345 +
77346 +typedef struct ep_outputq
77347 +{
77348 +    unsigned long      q_hidden;                               /* implementation hidden as ep3 or ep4 */
77349 +} EP_OUTPUTQ;
77350 +
77351 +/* returned values for ep_outputq_state */
77352 +#define EP_OUTPUTQ_BUSY                0
77353 +#define EP_OUTPUTQ_FAILED      1
77354 +#define EP_OUTPUTQ_FINISHED    2
77355 +
77356 +typedef struct ep_switch
77357 +{
77358 +    unsigned    present:1;
77359 +    unsigned   invalid:1;
77360 +    unsigned   link:3;
77361 +    unsigned   bcast:3;
77362 +    unsigned   lnr;
77363 +} EP_SWITCH;
77364 +
77365 +/*
77366 + * Network error fixup, flush, relocation messges
77367 + */
77368 +typedef struct ep_map_nmd_body
77369 +{
77370 +    uint32_t           nFrags;
77371 +    EP_RAILMASK                Railmask;
77372 +    EP_NMD             Nmd[EP_MAXFRAG];
77373 +} EP_MAP_NMD_BODY;
77374 +
77375 +typedef struct ep_failover_body
77376 +{
77377 +    EP_XID             Xid;
77378 +    EP_RAILMASK                Railmask;
77379 +} EP_FAILOVER_BODY;
77380 +
77381 +typedef struct ep_failover_txd
77382 +{
77383 +    EP_XID             Xid;
77384 +    uint32_t           Rail;
77385 +    EP_ADDR            TxdRail;
77386 +} EP_FAILOVER_TXD;
77387 +
77388 +typedef uint64_t EP_NETERR_COOKIE;
77389 +
77390 +#define EP_PANIC_STRLEN                31
77391 +
77392 +typedef struct ep_node_state
77393 +{
77394 +    unsigned char       State;
77395 +    unsigned char       NetworkErrorState;
77396 +    EP_RAILMASK         Railmask;
77397 +} EP_NODE_STATE;
77398 +
77399 +#define EP_MANAGER_MSG_SIZE            (2 * EP_SYSTEMQ_MSG_ALIGN)
77400 +
77401 +typedef struct ep_manager_msg_hdr
77402 +{
77403 +    EP_XID             Xid;                                    /* Message transaction id */
77404 +
77405 +    uint16_t           NodeId;                                 /* Originating node number */
77406 +    uint16_t           DestId;                                 /* destination node id */
77407 +
77408 +    uint16_t           Checksum;                               /* Message checksum */
77409 +    uint8_t            Rail;                                   /* Rail message associated with */
77410 +    uint8_t            Type;                                   /* Message type */
77411 +
77412 +    uint32_t           Pad;                                    /* pad to 32 bytes */
77413 +
77414 +    uint32_t           Version;                                /* Message Version */
77415 +} EP_MANAGER_MSG_HDR;
77416 +
77417 +typedef union ep_manager_msg_body
77418 +{
77419 +    unsigned char       Space[EP_MANAGER_MSG_SIZE - sizeof (EP_MANAGER_MSG_HDR)];
77420 +
77421 +    EP_NETERR_COOKIE   Cookies[2];                             /* EP_MSG_TYPE_NETERR */
77422 +    EP_MAP_NMD_BODY    MapNmd;                                 /* EP_MSG_TYPE_MAP_NMD */
77423 +    EP_FAILOVER_BODY   Failover;                               /* EP_MSG_TYPE_FAILOVER_REQUEST */
77424 +    EP_FAILOVER_TXD    FailoverTxd;                            /* EP_MSG_TYPE_FAILOVER_RESPONSE */
77425 +    unsigned char       PanicReason[EP_PANIC_STRLEN+1];                /* EP_MSG_TYPE_REMOTE_PANIC */
77426 +    EP_NODE_STATE       NodeState;                              /* EP_MSG_TYPE_GET_NODE_STATE_RESPONSE */   
77427 +    EP_SERVICE          Service;                                /* EP_MSG_TYPE_GET_NODE_STATE */
77428 +} EP_MANAGER_MSG_BODY;
77429 +
77430 +typedef struct ep_manager_msg
77431 +{
77432 +    EP_MANAGER_MSG_BODY Body;
77433 +    EP_MANAGER_MSG_HDR  Hdr;
77434 +} EP_MANAGER_MSG;
77435 +
77436 +#define EP_MANAGER_MSG_VERSION                         0xcad01000
77437 +#define EP_MANAGER_MSG_TYPE_REMOTE_PANIC               0x00
77438 +#define EP_MANAGER_MSG_TYPE_NETERR_REQUEST             0x01
77439 +#define EP_MANAGER_MSG_TYPE_NETERR_RESPONSE            0x02
77440 +#define EP_MANAGER_MSG_TYPE_FLUSH_REQUEST              0x03
77441 +#define EP_MANAGER_MSG_TYPE_FLUSH_RESPONSE             0x04
77442 +#define EP_MANAGER_MSG_TYPE_MAP_NMD_REQUEST            0x05
77443 +#define EP_MANAGER_MSG_TYPE_MAP_NMD_RESPONSE           0x06
77444 +#define EP_MANAGER_MSG_TYPE_FAILOVER_REQUEST           0x07
77445 +#define EP_MANAGER_MSG_TYPE_FAILOVER_RESPONSE          0x08
77446 +#define EP_MANAGER_MSG_TYPE_GET_NODE_STATE              0x09
77447 +#define EP_MANAGER_MSG_TYPE_GET_NODE_STATE_RESPONSE     0x0a
77448 +
77449 +/* Message types which should only be sent when a rail is connected */
77450 +#define EP_MANAGER_MSG_TYPE_CONNECTED(type)            (((type) & 1) == 1)
77451 +
77452 +#define EP_MANAGER_OUTPUTQ_SLOTS       128                     /* # entries in outputq */
77453 +#define EP_MANAGER_INPUTQ_SLOTS                128                     /* # entries in inputq */
77454 +#define EP_MANAGER_OUTPUTQ_RETRIES     31                      /* # retries for manager messages */
77455 +
77456 +/* XID's are allocated from a cache, which doesn't
77457 + * require locking since it relies on the caller to
77458 + * manage the locking for us.
77459 + */
77460 +typedef struct ep_xid_cache
77461 +{
77462 +    struct list_head   Link;
77463 +
77464 +    uint32_t           Handle;                                 /* my XID cache handle */
77465 +    uint64_t           Current;                                /* range of XID.Unique we can allocate from */
77466 +    uint64_t           Last;
77467 +
77468 +    void             (*MessageHandler)(void *arg, EP_MANAGER_MSG *);
77469 +    void              *Arg;
77470 +} EP_XID_CACHE;
77471 +
77472 +#define EP_XID_CACHE_CHUNKS    (10000)
77473 +
77474 +typedef struct ep_node_rail
77475 +{
77476 +    struct list_head    Link;                                  /* can be linked on work lists */
77477 +
77478 +    unsigned char       State;                                 /* node connection state */
77479 +    unsigned char      NetworkErrorState;                      /* reasons for keeping the context filter up */
77480 +    unsigned char      MessageState;                           /* state of messages during passivate/relocate */
77481 +
77482 +    EP_XID             MsgXid;                                 /* neterr/flush transaction id */
77483 +    long               NextRunTime;                            /* time to drop context filter for destroyed dma packet, or to send next request */
77484 +    EP_NETERR_COOKIE   NetworkErrorCookies[2];                 /* identify cookie for destroyed atomic packet */
77485 +
77486 +    uint32_t           Cookie;                                 /* per-node network error cookie */
77487 +    spinlock_t         CookieLock;                             /* and spinlock for it. */
77488 +
77489 +    struct list_head    StalledDmas;                           /* list of stalled DMAs */
77490 +} EP_NODE_RAIL;
77491 +
77492 +#define EP_NODE_DISCONNECTED           0                       /* node is disconnected */
77493 +#define EP_NODE_CONNECTING             1                       /* awaiting connection */
77494 +#define EP_NODE_CONNECTED              2                       /* node is connected */
77495 +#define EP_NODE_LEAVING_CONNECTED      3                       /* node is starting to disconnect */
77496 +#define EP_NODE_LOCAL_PASSIVATE        4                       /* flushing context filter/run queues */
77497 +#define EP_NODE_REMOTE_PASSIVATE       5                       /* stalling for neterr flush */
77498 +#define EP_NODE_PASSIVATED             6                       /* relocating active/passive messages */
77499 +#define EP_NODE_DISCONNECTING          7                       /* entering disconncted - abort remaining comms */
77500 +#define EP_NODE_NUM_STATES             8
77501 +
77502 +#define EP_NODE_NETERR_ATOMIC_PACKET   (1 << 0)
77503 +#define EP_NODE_NETERR_DMA_PACKET      (1 << 1)
77504 +
77505 +#define EP_NODE_PASSIVE_MESSAGES       (1 << 0)
77506 +#define EP_NODE_ACTIVE_MESSAGES                (1 << 1)
77507 +
77508 +/*
77509 + * Kernel thread code is loaded as a table.
77510 + */
77511 +typedef struct ep_symbol
77512 +{
77513 +    char   *name;
77514 +    EP_ADDR value;
77515 +} EP_SYMBOL;
77516 +
77517 +typedef struct ep_code
77518 +{
77519 +    u_char        *text;
77520 +    u_int         text_size;
77521 +    u_char        *data;
77522 +    u_int         data_size;
77523 +    u_char        *rodata;
77524 +    u_int         rodata_size;
77525 +    EP_SYMBOL     *symbols;
77526 +    
77527 +    int                   ntext;
77528 +    sdramaddr_t    pptext;
77529 +    EP_ADDR       etext;
77530 +    sdramaddr_t   _stext;
77531 +    sdramaddr_t          _rodata;
77532 +
77533 +    int                   ndata;
77534 +    sdramaddr_t    ppdata;
77535 +    EP_ADDR       edata;
77536 +    sdramaddr_t   _sdata;
77537 +} EP_CODE;
77538 +
77539 +typedef struct ep_switchstate
77540 +{
77541 +    unsigned char       linkid;
77542 +    unsigned char       LNR;
77543 +    unsigned char       bcast;
77544 +    unsigned char       uplink;
77545 +} EP_SWITCHSTATE;
77546 +
77547 +typedef struct ep_rail_ops
77548 +{
77549 +    void       (*DestroyRail) (EP_RAIL *rail);
77550 +
77551 +    int        (*StartRail) (EP_RAIL *rail);
77552 +    void       (*StallRail) (EP_RAIL *rail);
77553 +    void       (*StopRail) (EP_RAIL *rail);
77554 +
77555 +    sdramaddr_t (*SdramAlloc) (EP_RAIL *rail, EP_ADDR addr, unsigned size);
77556 +    void        (*SdramFree) (EP_RAIL *rail, sdramaddr_t addr, unsigned size);
77557 +    void        (*SdramWriteb) (EP_RAIL *rail, sdramaddr_t addr, unsigned char val);
77558 +    
77559 +    void       (*KaddrMap) (EP_RAIL *rail, EP_ADDR eaddr, virtaddr_t kaddr, unsigned len, unsigned int perm, int ep_attr);
77560 +    void       (*SdramMap) (EP_RAIL *rail, EP_ADDR eaddr, sdramaddr_t saddr, unsigned len, unsigned int perm, int ep_attr);
77561 +    void       (*Unmap) (EP_RAIL *rail, EP_ADDR eaddr, unsigned len);
77562 +
77563 +    void       *(*DvmaReserve) (EP_RAIL *rail, EP_ADDR eaddr, unsigned npages);
77564 +    void       (*DvmaRelease) (EP_RAIL *rail, EP_ADDR eaddr, unsigned npages, void *private);
77565 +    void       (*DvmaSetPte) (EP_RAIL *rail, void *private, unsigned index, physaddr_t phys, unsigned int perm);
77566 +    physaddr_t (*DvmaReadPte) (EP_RAIL *rail, void *private, unsigned index);
77567 +    void       (*DvmaUnload)(EP_RAIL *rail, void *private, unsigned index, unsigned npages);
77568 +    void       (*FlushTlb) (EP_RAIL *rail);
77569 +
77570 +    int        (*ProbeRoute) (EP_RAIL *r, int level, int sw, int nodeid, int *linkup, 
77571 +                              int *linkdown, int attempts, EP_SWITCH *lsw);
77572 +    void       (*PositionFound) (EP_RAIL *rail, ELAN_POSITION *pos);
77573 +    int                (*CheckPosition) (EP_RAIL *rail);
77574 +    void       (*NeterrFixup) (EP_RAIL *rail, unsigned int nodeId, EP_NETERR_COOKIE *cookies);
77575 +
77576 +    void       (*LoadSystemRoute) (EP_RAIL *rail, unsigned int vp, unsigned int lowNode, unsigned int highNode);
77577 +
77578 +    void       (*LoadNodeRoute) (EP_RAIL *rail, unsigned nodeId);
77579 +    void       (*UnloadNodeRoute) (EP_RAIL *rail, unsigned nodeId);
77580 +    void       (*LowerFilter) (EP_RAIL *rail, unsigned nodeId);
77581 +    void       (*RaiseFilter) (EP_RAIL *rail, unsigned nodeId);
77582 +    void       (*NodeDisconnected) (EP_RAIL *rail, unsigned nodeId);
77583 +
77584 +    void       (*FlushFilters) (EP_RAIL *rail);
77585 +    void       (*FlushQueues) (EP_RAIL *rail);
77586 +
77587 +
77588 +    EP_INPUTQ  *(*AllocInputQ) (EP_RAIL *rail, unsigned qnum, unsigned slotSize, unsigned slotCount,
77589 +                               void (*callback)(EP_RAIL *rail, void *arg), void *arg);
77590 +    void       (*FreeInputQ) (EP_RAIL *rail, EP_INPUTQ *q);
77591 +    void       (*EnableInputQ) (EP_RAIL *rail, EP_INPUTQ *q);
77592 +    void       (*DisableInputQ) (EP_RAIL *rail, EP_INPUTQ *q);
77593 +    int                (*PollInputQ) (EP_RAIL *rail, EP_INPUTQ *q, int maxCount, EP_INPUTQ_HANDLER *handler, void *arg);
77594 +
77595 +    EP_OUTPUTQ *(*AllocOutputQ) (EP_RAIL *rail, unsigned slotSize, unsigned slotCount);
77596 +    void       (*FreeOutputQ) (EP_RAIL *rail, EP_OUTPUTQ *outputq);
77597 +    void       *(*OutputQMsg) (EP_RAIL *rail, EP_OUTPUTQ *outputq, unsigned slotNum);
77598 +    int         (*OutputQState) (EP_RAIL *rail, EP_OUTPUTQ *outputq, unsigned slotNum);
77599 +    int                (*OutputQSend) (EP_RAIL *rail, EP_OUTPUTQ *outputq, unsigned slotNum, unsigned size,
77600 +                               unsigned vp, unsigned qnum, unsigned retries);
77601 +
77602 +    void        (*FillOutStats) (EP_RAIL *rail, char *str);
77603 +    void       (*Debug) (EP_RAIL *rail);
77604 +
77605 +} EP_RAIL_OPS;
77606 +
77607 +#define ep_alloc_inputq(rail,qnum,slotSize,slotCount,callback,arg) \
77608 +       (rail)->Operations.AllocInputQ(rail,qnum,slotSize,slotCount,callback,arg)
77609 +#define ep_free_inputq(rail,inputq) \
77610 +       (rail)->Operations.FreeInputQ(rail,inputq)
77611 +#define ep_enable_inputq(rail,inputq) \
77612 +       (rail)->Operations.EnableInputQ(rail,inputq)
77613 +#define ep_disable_inputq(rail,inputq) \
77614 +       (rail)->Operations.DisableInputQ(rail,inputq)
77615 +#define ep_poll_inputq(rail,inputq,maxCount,handler,arg) \
77616 +       (rail)->Operations.PollInputQ(rail,inputq,maxCount,handler,arg)
77617 +#define ep_alloc_outputq(rail,slotSize,slotCount)\
77618 +       (rail)->Operations.AllocOutputQ(rail,slotSize,slotCount)
77619 +#define ep_free_outputq(rail,outputq)\
77620 +       (rail)->Operations.FreeOutputQ(rail,outputq)
77621 +#define ep_outputq_msg(rail,outputq,slotNum)\
77622 +       (rail)->Operations.OutputQMsg(rail,outputq,slotNum)
77623 +#define ep_outputq_state(rail,outputq,slotNum)\
77624 +       (rail)->Operations.OutputQState(rail,outputq,slotNum)
77625 +#define ep_outputq_send(rail,outputq,slotNum,size,vp,qnum,retries)\
77626 +       (rail)->Operations.OutputQSend(rail,outputq,slotNum,size,vp,qnum,retries)
77627 +
77628 +struct ep_rail
77629 +{
77630 +    EP_SYS            *System;                                 /* "system" we've attached to */
77631 +
77632 +    unsigned char      Number;                                 /* Rail number */
77633 +    unsigned char       State;                                 /* Rail state */
77634 +    char               Name[32];                               /* Rail name */
77635 +
77636 +    struct list_head    ManagerLink;                           /* linked on ManagedRails list */
77637 +
77638 +    ELAN_DEVINFO       Devinfo;                                /* Device information for this rail */
77639 +    ELAN_POSITION       Position;                              /* Position on switch device is connected to */
77640 +
77641 +    EP_RAIL_OPS                Operations;                             /* device specific operations */
77642 +    EP_RAIL_STATS      Stats;                                  /* statistics */
77643 +
77644 +    EP_ALLOC            ElanAllocator;                         /* per-rail elan memory allocator */
77645 +    EP_ALLOC            MainAllocator;                         /* per-rail main memory allocator */
77646 +
77647 +    unsigned           TlbFlushRequired;                       /* lazy TLB flushing */
77648 +
77649 +    int                SwitchBroadcastLevel;                   /* current switch level ok for broadcast */
77650 +    unsigned long       SwitchBroadcastLevelTick;
77651 +
77652 +    int                        SwitchProbeLevel;                       /* result of last switch probe */
77653 +    EP_SWITCHSTATE      SwitchState[ELAN_MAX_LEVELS];
77654 +    EP_SWITCHSTATE      SwitchLast[ELAN_MAX_LEVELS];
77655 +    unsigned long       SwitchProbeTick[ELAN_MAX_LEVELS];
77656 +    
77657 +    /* Node disconnecting/connecting state */
77658 +    EP_CALLBACK        *CallbackList[EP_CB_COUNT];             /* List of callbacks */
77659 +    kmutex_t           CallbackLock;                           /*   and lock for it. */
77660 +    unsigned           CallbackStep;                           /*  step through UpdateConnectionState. */
77661 +
77662 +    /* back pointer for cluster membership */
77663 +    void              *ClusterRail;
77664 +
77665 +    /* Per node state for message passing */
77666 +    EP_NODE_RAIL       *Nodes;                                 /* array of per-node state */
77667 +    statemap_t         *NodeSet;                               /* per-rail statemap of connected nodes */
77668 +    statemap_t        *NodeChangeMap;                          /* statemap of nodes to being connected/disconnected */
77669 +    statemap_t        *NodeChangeTmp;                          /*   and temporary copies */
77670 +
77671 +    struct list_head    NetworkErrorList;                      /* list of nodes resolving network errors */
77672 +    struct list_head    LocalPassivateList;                    /* list of nodes in state LOCAL_PASSIVATE */
77673 +    struct list_head    RemotePassivateList;                   /* list of nodes waiting for remote network error flush */
77674 +    struct list_head    PassivatedList;                                /* list of nodes performing message relocation */
77675 +    struct list_head    DisconnectingList;                     /* list of nodes transitioning to disconnected */
77676 +
77677 +    EP_XID_CACHE       XidCache;                               /* XID cache for node messages (single threaded access) */
77678 +
77679 +    /* Manager messages */
77680 +    EP_INPUTQ         *ManagerInputQ;
77681 +    EP_OUTPUTQ        *ManagerOutputQ;
77682 +    unsigned           ManagerOutputQNextSlot;
77683 +    spinlock_t         ManagerOutputQLock;
77684 +
77685 +    /* /proc entries */
77686 +    struct proc_dir_entry *ProcDir;
77687 +    struct proc_dir_entry *SvcIndicatorDir;
77688 +    int                    CallbackRegistered;
77689 +};
77690 +
77691 +/* values for State */
77692 +#define EP_RAIL_STATE_UNINITIALISED    0                       /* device uninitialised */
77693 +#define EP_RAIL_STATE_STARTED          1                       /* device started but network position unknown */
77694 +#define EP_RAIL_STATE_RUNNING          2                       /* device started and position known */
77695 +#define EP_RAIL_STATE_INCOMPATIBLE     3                       /* device started, but position incompatible */
77696 +
77697 +typedef struct ep_rail_entry
77698 +{
77699 +    struct list_head   Link;
77700 +    EP_RAIL           *Rail;
77701 +} EP_RAIL_ENTRY;
77702 +
77703 +typedef struct ep_subsys
77704 +{
77705 +    EP_SYS            *Sys;
77706 +
77707 +    struct list_head   Link;                                   /* Linked on sys->Subsystems */
77708 +    char              *Name;                                   /* Name to lookup */
77709 +    
77710 +    void              (*Destroy)    (struct ep_subsys *subsys, EP_SYS *sys);
77711 +
77712 +    int                       (*AddRail)    (struct ep_subsys *subsys, EP_SYS *sys, EP_RAIL *rail);
77713 +    void              (*RemoveRail) (struct ep_subsys *subsys, EP_SYS *sys, EP_RAIL *rail);
77714 +} EP_SUBSYS;
77715 +
77716 +typedef struct ep_node
77717 +{
77718 +    EP_RAILMASK                ConnectedRails;
77719 +} EP_NODE;
77720 +
77721 +struct ep_sys
77722 +{
77723 +    EP_RAIL         *Rails[EP_MAX_RAILS];                      /* array of all available devices */
77724 +
77725 +    kmutex_t        StartStopLock;                             /* lock for starting stopping rails */
77726 +
77727 +    ELAN_POSITION    Position;                                 /* primary node position */
77728 +
77729 +    EP_NMH_TABLE     MappingTable;                             /* Network mapping handle table */
77730 +
77731 +    EP_ALLOC        Allocator;                                 /* shared main memory allocator */
77732 +
77733 +    EP_DVMA_STATE    DvmaState;                                        /* dvma state */
77734 +
77735 +    kmutex_t        SubsysLock;                               /* lock on the Subsytems list */
77736 +    struct list_head Subsystems;                               /* list of subsystems */
77737 +
77738 +    /* device manager state */
77739 +    struct list_head ManagedRails;                             /* list of managed devices */
77740 +    EP_KTHREAD       ManagerThread;                            /* place for manager thread to sleep */
77741 +
77742 +    /* global node state */
77743 +    spinlock_t      NodeLock;                                  /* spinlock for node state (including per-device node state) */
77744 +    EP_NODE        *Nodes;                                     /* system wide node state */
77745 +    statemap_t      *NodeSet;                                  /* system wide nodeset */
77746 +    struct list_head NodesetCallbackList;                      /* list of "callbacks" */
77747 +
77748 +    /* Transaction Id */
77749 +    struct list_head XidCacheList;                             /* list of XID caches */
77750 +    uint32_t        XidGeneration;                             /* XID generation number (distinguishes reboots) */
77751 +    uint32_t        XidHandle;                                 /* XID handles (distinguishes XID caches) */
77752 +    uint64_t        XidNext;                                   /* next XID to prime cache */
77753 +    spinlock_t      XidLock;                                   /*   and it's spinlock  */
77754 +
77755 +    /* Shutdown/Panic */
77756 +    unsigned int     Shutdown;                                 /* node has shutdown/panic'd */
77757 +};
77758 +
77759 +#if defined(DEBUG_ASSERT)
77760 +extern int ep_assfail (EP_RAIL *rail, const char *string, const char *func, const char *file, const int line);
77761 +extern int sdram_assert;
77762 +extern int assfail_mode;
77763 +
77764 +#define EP_ASSERT(rail, EX)    do { \
77765 +    if (!(EX) && ep_assfail ((EP_RAIL *) (rail), #EX, __FUNCTION__, __FILE__, __LINE__)) { \
77766 +       BUG(); \
77767 +    } \
77768 +} while (0)
77769 +#define EP_ASSFAIL(rail,EX)    do { \
77770 +   if (ep_assfail ((EP_RAIL *) (rail), EX, __FUNCTION__, __FILE__, __LINE__)) { \
77771 +       BUG(); \
77772 +    } \
77773 +} while (0)
77774 +#define SDRAM_ASSERT(EX)       (sdram_assert ? (EX) : 1)
77775 +#else
77776 +#define EP_ASSERT(rail, EX)    ((void) 0)
77777 +#define EP_ASSFAIL(rail,str)   ((void) 0)
77778 +#define SDRAM_ASSERT(EX)       (1)
77779 +#endif
77780 +
77781 +/* conf_osdep.c */
77782 +extern EP_SYS    *ep_system(void);
77783 +extern void       ep_mod_dec_usecount (void);
77784 +extern void       ep_mod_inc_usecount (void);
77785 +
77786 +/* procfs_osdep.c */
77787 +extern struct proc_dir_entry *ep_procfs_root;
77788 +extern struct proc_dir_entry *ep_config_root;
77789 +
77790 +/* kcomm.c */
77791 +extern int        ep_sys_init (EP_SYS *sys);
77792 +extern void       ep_sys_fini (EP_SYS *sys);
77793 +extern void      ep_shutdown (EP_SYS *sys);
77794 +extern int        ep_init_rail (EP_SYS *sys, EP_RAIL *rail);
77795 +extern void       ep_destroy_rail (EP_RAIL *rail);
77796 +extern int        ep_start_rail (EP_RAIL *rail);
77797 +extern void       ep_stop_rail (EP_RAIL *rail);
77798 +
77799 +extern void       ep_connect_node (EP_RAIL *rail, int nodeId);
77800 +extern int        ep_disconnect_node (EP_RAIL *rail, int nodeId);
77801 +
77802 +extern EP_XID     ep_xid_cache_alloc (EP_SYS *sys, EP_XID_CACHE *cache);
77803 +extern void       ep_xid_cache_init (EP_SYS *sys, EP_XID_CACHE *cache);
77804 +extern void       ep_xid_cache_destroy (EP_SYS *sys, EP_XID_CACHE *cache);
77805 +
77806 +extern int        ep_send_message (EP_RAIL *rail, int nodeId, int type, EP_XID xid, EP_MANAGER_MSG_BODY *body);
77807 +
77808 +extern void       ep_panic_node (EP_SYS *sys, int nodeId, unsigned char *reason);
77809 +
77810 +extern void      ep_subsys_add (EP_SYS *sys, EP_SUBSYS *subsys);
77811 +extern void      ep_subsys_del (EP_SYS *sys, EP_SUBSYS *subsys);
77812 +extern EP_SUBSYS *ep_subsys_find (EP_SYS *sys, char *name);
77813 +
77814 +extern void       DisplayNodes (EP_RAIL *rail);
77815 +
77816 +extern void       ep_fillout_stats(EP_RAIL *rail, char *str);
77817 +
77818 +/* neterr.c */
77819 +extern void       ep_queue_network_error (EP_RAIL *rail, int nodeId, int what, int channel, EP_NETERR_COOKIE cookie);
77820 +
77821 +/* kcomm_elan3.c */
77822 +extern unsigned int ep3_create_rails (EP_SYS *sys, unsigned int disabled);
77823 +
77824 +/* kcomm_elan4.c */
77825 +extern unsigned int ep4_create_rails (EP_SYS *sys, unsigned int disabled);
77826 +
77827 +/* probenetwork.c */
77828 +extern int       ProbeNetwork (EP_RAIL *rail, ELAN_POSITION *pos);
77829 +extern void      CheckPosition (EP_RAIL *rail);
77830 +
77831 +extern uint16_t   CheckSum (char *msg, int nob);
77832 +
77833 +/* threadcode.c */
77834 +extern EP_ADDR    ep_symbol (EP_CODE *code, char *name);
77835 +extern int        ep_loadcode (EP_RAIL *rail, EP_CODE *code);
77836 +extern void       ep_unloadcode (EP_RAIL *rail, EP_CODE *code);
77837 +
77838 +/* Public interface */
77839 +/* debug.c */
77840 +extern int              ep_sprintf_bitmap (char *str, unsigned nbytes, bitmap_t *bitmap, int base, int count, int off);
77841 +extern void             ep_display_bitmap (char *prefix, char *tag, bitmap_t *bitmap, unsigned base, unsigned nbits);
77842 +
77843 +/* epcomms.c */
77844 +extern int              ep_waitfor_nodeid (EP_SYS *sys);
77845 +extern int              ep_nodeid (EP_SYS *sys);
77846 +extern int              ep_numnodes (EP_SYS *sys);
77847 +
77848 +/* railhints.c */
77849 +extern int              ep_pickRail(EP_RAILMASK railmask);
77850 +
77851 +/* support.c */
77852 +extern int              ep_register_nodeset_callback (EP_SYS *sys, void (*routine)(void *, statemap_t *), void *arg);
77853 +extern void             ep_remove_nodeset_callback (EP_SYS *sys, void (*routine)(void *, statemap_t *), void *arg);
77854 +extern void             ep_call_nodeset_callbacks (EP_SYS *sys, statemap_t *map);
77855 +
77856 +extern int              ep_register_callback (EP_RAIL *rail, unsigned idx, void (*routine)(void *, statemap_t *), void *arg);
77857 +extern void             ep_remove_callback (EP_RAIL *rail, unsigned idx, void (*routine)(void *, statemap_t *), void *arg);
77858 +extern void             ep_call_callbacks (EP_RAIL *rail, unsigned idx, statemap_t *);
77859 +extern unsigned int     ep_backoff (EP_BACKOFF *backoff, int type);
77860 +
77861 +#endif /* !__ELAN__ */
77862 +
77863 +typedef struct display_info {
77864 +    void (*func)(long, char *, ...);
77865 +    long arg;
77866 +} DisplayInfo;
77867 +
77868 +extern DisplayInfo di_ep_debug;
77869 +
77870 +
77871 +#endif /* __ELAN_KCOMM_H */
77872 +
77873 +/*
77874 + * Local variables:
77875 + * c-file-style: "stroustrup"
77876 + * End:
77877 + */
77878 Index: linux-2.4.21/include/elan/kcomm_stats.h
77879 ===================================================================
77880 --- linux-2.4.21.orig/include/elan/kcomm_stats.h        2004-02-23 16:02:56.000000000 -0500
77881 +++ linux-2.4.21/include/elan/kcomm_stats.h     2005-06-01 23:12:54.712421928 -0400
77882 @@ -0,0 +1,153 @@
77883 +/*
77884 + *    Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd.
77885 + *    Copyright (c) 2002-2003 by Quadrics Ltd.
77886 + *
77887 + *    For licensing information please see the supplied COPYING file
77888 + *
77889 + */
77890 +
77891 +#ifndef __EP_EPSTATS_H
77892 +#define __EP_EPSTATS_H
77893 +
77894 +#ident "$Id: kcomm_stats.h,v 1.4.8.1 2004/11/12 10:54:51 mike Exp $"
77895 +/*      $Source: /cvs/master/quadrics/epmod/kcomm_stats.h,v $ */
77896 +
77897 +#define EP_BUCKET_SLOTS                        8
77898 +
77899 +#define BucketStat(obj,stat,size)      ((size) < 128   ? (obj)->Stats.stat[0]++ : \
77900 +                                        (size) < 512   ? (obj)->Stats.stat[1]++ : \
77901 +                                        (size) < 1024  ? (obj)->Stats.stat[2]++ : \
77902 +                                        (size) < 8192  ? (obj)->Stats.stat[3]++ : \
77903 +                                        (size) < 16384 ? (obj)->Stats.stat[4]++ : \
77904 +                                        (size) < 32768 ? (obj)->Stats.stat[5]++ : \
77905 +                                        (size) < 65536 ? (obj)->Stats.stat[6]++ : \
77906 +                                        (obj)->Stats.stat[7]++)
77907 +#define IncrStat(obj,stat)             ((obj)->Stats.stat++)
77908 +
77909 +
77910 +#define EP3_NUM_DMA_FAIL               11      /* NOTE - the same as EP_NUM_RETRIES */
77911 +
77912 +#define ADD_STAT(STATS,STAT,VALUE) { unsigned long now = lbolt;\
77913 +   STATS.STAT.total += VALUE; \
77914 +   if  ( ( now - STATS.STAT.last_time ) > HZ ) { \
77915 + STATS.STAT.last_per_sec = ( STATS.STAT.total - STATS.STAT.last_count)/ ( (( now - STATS.STAT.last_time ) + (HZ/2)) / HZ);\
77916 + STATS.STAT.last_time = now; \
77917 + STATS.STAT.last_count = STATS.STAT.total; \
77918 +   }} \
77919 +
77920 +#define INC_STAT(STATS,STAT) ADD_STAT(STATS,STAT,1)
77921 +
77922 +#define GET_STAT_PER_SEC(STATS, STAT) (  (( lbolt - STATS.STAT.last_time ) < (HZ * 5)) ? STATS.STAT.last_per_sec : 0 )
77923 +#define GET_STAT_TOTAL(STATS, STAT) ( STATS.STAT.total )
77924 +
77925 +struct ep_stats_count 
77926 +{
77927 +    unsigned long total;
77928 +    unsigned long last_time;
77929 +    unsigned long last_count;
77930 +    unsigned long last_per_sec;
77931 +};
77932 +
77933 +typedef struct ep_stats_count          EP_STATS_COUNT;
77934 +
77935 +typedef struct ep3_rail_stats
77936 +{
77937 +    unsigned long      IssueDmaFail[EP3_NUM_DMA_FAIL];
77938 +
77939 +    unsigned long      DmaQueueLength[EP_BUCKET_SLOTS];
77940 +    unsigned long      CprocDmaQueueOverflow;
77941 +    unsigned long      DprocDmaQueueOverflow;
77942 +    unsigned long      IprocDmaQueueOverflow;
77943 +    unsigned long      CprocEventQueueOverflow;
77944 +    unsigned long      DprocEventQueueOverflow;
77945 +    unsigned long      IprocEventQueueOverflow;
77946 +
77947 +    unsigned long      QueueingPacketTrap;
77948 +    unsigned long      DmaIdentifyTrap;
77949 +    unsigned long      ThreadIdentifyTrap;
77950 +    unsigned long      DmaPacketTrap;
77951 +} EP3_RAIL_STATS;
77952 +
77953 +typedef struct ep4_rail_stats
77954 +{
77955 +    unsigned long       somestatsgohere;
77956 +} EP4_RAIL_STATS;
77957 +
77958 +typedef struct ep_rail_stats
77959 +{
77960 +    unsigned long      SendMessageFailed;
77961 +    unsigned long      NeterrAtomicPacket;
77962 +    unsigned long       NeterrDmaPacket;
77963 +
77964 +    EP_STATS_COUNT      rx;
77965 +    EP_STATS_COUNT      rx_len;
77966 +
77967 +    EP_STATS_COUNT      tx;
77968 +    EP_STATS_COUNT      tx_len;
77969 +
77970 +} EP_RAIL_STATS;
77971 +
77972 +typedef struct ep_cm_rail_stats
77973 +{
77974 +    /* cluster membership statistics */
77975 +    unsigned long      HeartbeatsSent;
77976 +    unsigned long      HeartbeatsRcvd;
77977 +    
77978 +    unsigned long      RetryHeartbeat;
77979 +    unsigned long      RejoinRequest;
77980 +    unsigned long      RejoinTooSlow;
77981 +    unsigned long      LaunchMessageFail;
77982 +    unsigned long      MapChangesSent;
77983 +
77984 +    /* Heartbeat scheduling stats */
77985 +    unsigned long      HeartbeatOverdue;
77986 +} EP_CM_RAIL_STATS;
77987 +
77988 +typedef struct ep_comms_rail_stats
77989 +{
77990 +    /* kernel comms large message statistics */
77991 +    unsigned long      TxEnveEvent;
77992 +    unsigned long      TxDataEvent;
77993 +    unsigned long      TxDoneEvent;
77994 +    unsigned long      RxDoneEvent;
77995 +    unsigned long      MulticastTxDone;
77996 +    unsigned long      QueueReceive;
77997 +
77998 +    unsigned long      TxEnveRetry;
77999 +    unsigned long      TxDataRetry;
78000 +    unsigned long      TxDoneRetry;
78001 +    unsigned long      RxThrdEvent;
78002 +    unsigned long      RxDataRetry;
78003 +    unsigned long      RxDoneRetry;
78004 +    unsigned long      StallThread;
78005 +    unsigned long      ThrdWaiting;
78006 +    unsigned long      CompleteEnvelope;
78007 +
78008 +    unsigned long      NoFreeTxds;
78009 +    unsigned long      NoFreeRxds;
78010 +
78011 +    unsigned long      LockRcvrTrapped;
78012 +} EP_COMMS_RAIL_STATS;
78013 +
78014 +typedef struct ep_comms_stats
78015 +{
78016 +    unsigned long      DataXmit[8];
78017 +    unsigned long      McastXmit[8];
78018 +    unsigned long      RPCXmit[8];
78019 +    unsigned long      RPCPut[8];
78020 +    unsigned long      RPCGet[8];
78021 +    unsigned long      CompleteRPC[8];
78022 +    unsigned long      RxData[8];
78023 +    unsigned long      RxMcast[8];
78024 +
78025 +    unsigned long      NoFreeTxds;
78026 +    unsigned long      NoFreeRxds;
78027 +} EP_COMMS_STATS;
78028 +
78029 +#endif /* __EP_EPSTATS_H */
78030 +
78031 +/*
78032 + * Local variables:
78033 + * c-file-style: "stroustrup"
78034 + * End:
78035 + */
78036 Index: linux-2.4.21/include/elan/kmap.h
78037 ===================================================================
78038 --- linux-2.4.21.orig/include/elan/kmap.h       2004-02-23 16:02:56.000000000 -0500
78039 +++ linux-2.4.21/include/elan/kmap.h    2005-06-01 23:12:54.713421776 -0400
78040 @@ -0,0 +1,68 @@
78041 +/*
78042 + *    Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd.
78043 + *
78044 + *    For licensing information please see the supplied COPYING file
78045 + *
78046 + */
78047 +
78048 +#ifndef __ELAN_KMAP_H
78049 +#define __ELAN_KMAP_H
78050 +
78051 +#ident "$Id: kmap.h,v 1.3.8.1 2004/12/14 10:19:14 mike Exp $"
78052 +/*      $Source: /cvs/master/quadrics/epmod/kmap.h,v $ */
78053 +
78054 +#include <elan/rmap.h>
78055 +
78056 +extern void ep_perrail_kaddr_map (EP_RAIL *rail, EP_ADDR eaddr, virtaddr_t vaddr, unsigned long len, unsigned int perm, int ep_attr);
78057 +extern void ep_perrail_sdram_map (EP_RAIL *rail, EP_ADDR eaddr, sdramaddr_t saddr, unsigned long len, unsigned int perm, int ep_attr);
78058 +extern void ep_perrail_unmap (EP_RAIL *rail, EP_ADDR eaddr, unsigned long len);
78059 +extern void ep_perrail_dvma_sync (EP_RAIL *rail);
78060 +
78061 +typedef struct ep_dvma_nmh
78062 +{
78063 +    EP_NMH             dvma_nmh;
78064 +    
78065 +    struct list_head   dvma_link;                              /* chained on ep_dvma_state */
78066 +    unsigned           dvma_perm;                              /* permissions for region */
78067 +
78068 +    spinlock_t         dvma_lock;
78069 +    EP_RAILMASK                dvma_railmask;                          /* bitmap of rails */
78070 +    EP_RAIL           *dvma_rails[EP_MAX_RAILS];               /* assoicated rails */
78071 +    void              *dvma_private[EP_MAX_RAILS];             /* pointers to rail private data */
78072 +    unsigned int        dvma_attrs[1];                         /* bitmap of which rails pages are loaded NOTE - max 32 rails */
78073 +} EP_DVMA_NMH;
78074 +
78075 +/* values for dvma_perm */
78076 +#define EP_PERM_EXECUTE                0
78077 +#define EP_PERM_READ           1
78078 +#define EP_PERM_WRITE          2
78079 +#define EP_PERM_ALL            3
78080 +
78081 +typedef struct ep_dvma_state
78082 +{
78083 +    kmutex_t           dvma_lock;
78084 +    struct list_head    dvma_handles;
78085 +    struct list_head    dvma_rails;
78086 +    EP_RMAP           *dvma_rmap;
78087 +} EP_DVMA_STATE;
78088 +
78089 +extern void    ep_dvma_init (EP_SYS *sys);
78090 +extern void    ep_dvma_fini (EP_SYS *sys);
78091 +extern EP_NMH *ep_dvma_reserve (EP_SYS *sys, unsigned npages, unsigned perm);
78092 +extern void    ep_dvma_release (EP_SYS *sys, EP_NMH *nmh);
78093 +extern void    ep_dvma_load (EP_SYS *sys, void *map, caddr_t vaddr, unsigned len, 
78094 +                            EP_NMH *nmh, unsigned index, EP_RAILMASK *hints, EP_NMD *subset);
78095 +extern void    ep_dvma_unload (EP_SYS *sys, EP_NMH *nmh, EP_NMD *nmd);
78096 +  
78097 +extern void    ep_dvma_remove_rail (EP_SYS *sys, EP_RAIL *rail);
78098 +extern int     ep_dvma_add_rail (EP_SYS *sys, EP_RAIL *rail);
78099 +
78100 +extern uint16_t rolling_check_sum (char *msg, int nob, uint16_t sum);
78101 +
78102 +#endif /* __ELAN_KMAP_H */
78103 +
78104 +/*
78105 + * Local variables:
78106 + * c-file-style: "stroustrup"
78107 + * End:
78108 + */
78109 Index: linux-2.4.21/include/elan/kmsg.h
78110 ===================================================================
78111 --- linux-2.4.21.orig/include/elan/kmsg.h       2004-02-23 16:02:56.000000000 -0500
78112 +++ linux-2.4.21/include/elan/kmsg.h    2005-06-01 23:12:54.713421776 -0400
78113 @@ -0,0 +1,14 @@
78114 +/*
78115 + *    Copyright (c) 2003 by Quadrics Ltd.
78116 + *
78117 + *    For licensing information please see the supplied COPYING file
78118 + *
78119 + */
78120 +
78121 +#ifndef __ELAN_KMSG_H
78122 +#define __ELAN_KMSG_H
78123 +
78124 +#ident "@(#)$Id: kmsg.h,v 1.1 2003/09/23 13:55:12 david Exp $"
78125 +/*      $Source: /cvs/master/quadrics/epmod/kmsg.h,v $ */
78126 +
78127 +#endif /* __ELAN_KMSG_H */
78128 Index: linux-2.4.21/include/elan/kthread.h
78129 ===================================================================
78130 --- linux-2.4.21.orig/include/elan/kthread.h    2004-02-23 16:02:56.000000000 -0500
78131 +++ linux-2.4.21/include/elan/kthread.h 2005-06-01 23:12:54.713421776 -0400
78132 @@ -0,0 +1,53 @@
78133 +/*
78134 + *    Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd.
78135 + *    Copyright (c) 2002-2003 by Quadrics Ltd.
78136 + *
78137 + *    For licensing information please see the supplied COPYING file
78138 + *
78139 + */
78140 +
78141 +#ifndef __ELAN3_KTHREAD_H
78142 +#define __ELAN3_KTHREAD_H
78143 +
78144 +#ident "@(#)$Id: kthread.h,v 1.4 2004/05/06 14:24:08 david Exp $ $Name: QSNETMODULES-4-30_20050128 $"
78145 +/*      $Source: /cvs/master/quadrics/epmod/kthread.h,v $*/
78146 +
78147 +typedef struct ep_kthread
78148 +{
78149 +       kcondvar_t      wait;                                   /* place to sleep */
78150 +       spinlock_t      lock;                                   /* and lock */
78151 +       long            next_run;                               /* tick when thread should next run */
78152 +       long            running;                                /* tick when thread started to run */
78153 +       unsigned short  should_stall;
78154 +       unsigned char   state;
78155 +       unsigned int    started:1;
78156 +       unsigned int    should_stop:1;
78157 +       unsigned int    stopped:1;
78158 +} EP_KTHREAD;
78159 +
78160 +#define KT_STATE_SLEEPING              0
78161 +#define KT_STATE_SCHEDULED             1
78162 +#define KT_STATE_RUNNING               2
78163 +#define KT_STATE_STALLED               3
78164 +
78165 +#define AFTER(a, b)                    ((((long)(a)) - ((long)(b))) > 0)
78166 +#define BEFORE(a,b)                    ((((long)(a)) - ((long)(b))) < 0)
78167 +
78168 +extern void ep_kthread_init (EP_KTHREAD *kt);
78169 +extern void ep_kthread_destroy (EP_KTHREAD *kt);
78170 +extern void ep_kthread_started (EP_KTHREAD *kt);
78171 +extern void ep_kthread_stopped (EP_KTHREAD *kt);
78172 +extern int  ep_kthread_should_stall (EP_KTHREAD *kth);
78173 +extern int  ep_kthread_sleep (EP_KTHREAD *kth, long next_run);
78174 +extern void ep_kthread_schedule (EP_KTHREAD *kt, long when);
78175 +extern void ep_kthread_stall (EP_KTHREAD *kth);
78176 +extern void ep_kthread_resume (EP_KTHREAD *kt);
78177 +extern void ep_kthread_stop (EP_KTHREAD *kt);
78178 +extern int  ep_kthread_state (EP_KTHREAD *kt, long *time);
78179 +#endif /* __ELAN3_KTHREAD_H */
78180 +
78181 +/*
78182 + * Local variables:
78183 + * c-file-style: "linux"
78184 + * End:
78185 + */
78186 Index: linux-2.4.21/include/elan/nmh.h
78187 ===================================================================
78188 --- linux-2.4.21.orig/include/elan/nmh.h        2004-02-23 16:02:56.000000000 -0500
78189 +++ linux-2.4.21/include/elan/nmh.h     2005-06-01 23:12:54.714421624 -0400
78190 @@ -0,0 +1,95 @@
78191 +/*
78192 + *    Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd.
78193 + *    Copyright (c) 2002-2003 by Quadrics Ltd.
78194 + *
78195 + *    For licensing information please see the supplied COPYING file
78196 + *
78197 + */
78198 +
78199 +#ifndef __ELAN3_NMH_H
78200 +#define __ELAN3_NMH_H
78201 +
78202 +#ident "@(#)$Id: nmh.h,v 1.7 2004/01/06 10:29:55 david Exp $"
78203 +/*      $Source: /cvs/master/quadrics/epmod/nmh.h,v $*/
78204 +
78205 +
78206 +/* Forward declarations */
78207 +typedef struct ep_nmd          EP_NMD;
78208 +typedef struct ep_nmh_ops      EP_NMH_OPS;
78209 +typedef struct ep_nmh          EP_NMH;
78210 +
78211 +/* Railmask held in 16 bit field (packs with nodeId into NMD */
78212 +typedef uint16_t               EP_RAILMASK;
78213 +
78214 +#define EP_RAIL2RAILMASK(rnum) (1 << (rnum))
78215 +#define EP_RAILMASK_ALL                0xffff
78216 +
78217 +/* kernel comms elan network address */
78218 +typedef uint32_t               EP_ADDR;
78219 +
78220 +/* network mapping descriptor - this is returned to the user from a map operation,
78221 + * and is what is passed to all communication functions */
78222 +struct ep_nmd
78223 +{
78224 +    EP_ADDR    nmd_addr;                                       /* base address */
78225 +    uint32_t   nmd_len;                                        /* size in bytes */
78226 +    uint32_t   nmd_attr;                                       /* nodeid << 16 | railmask */
78227 +};
78228 +
78229 +#define EP_NMD_ATTR(nodeid,railmask)   (((nodeid) << 16) | (railmask))
78230 +#define EP_NMD_NODEID(nmd)             ((nmd)->nmd_attr >> 16)
78231 +#define EP_NMD_RAILMASK(nmd)           ((nmd)->nmd_attr & EP_RAILMASK_ALL)
78232 +
78233 +#if !defined(__ELAN__)
78234 +
78235 +struct ep_nmh_ops
78236 +{
78237 +    int           (*op_map_rails) (EP_SYS *sys, EP_NMH *nmh, EP_NMD *nmd, EP_RAILMASK mask);   /* add mappings to different rail(s) */
78238 +
78239 +#if ! defined(CONFIG_EP_NO_CHECK_SUM)
78240 +    uint16_t (*op_calc_check_sum) (EP_SYS *sys, EP_NMH *nmh, EP_NMD *nmd, uint16_t check_sum); /* calculates check sum              */
78241 +#endif
78242 +};
78243 +
78244 +struct ep_nmh
78245 +{
78246 +    EP_NMD          nmh_nmd;                                   /* public field */
78247 +    struct list_head nmh_link;                                 /* linked on hash table */
78248 +    EP_NMH_OPS     *nmh_ops;                                   /* operations to perform on object */
78249 +};
78250 +
78251 +#define EP_NMH_NUMHASH                 (32 - 11 + 1)           /* one hash table for each power of 2 above pagesize */
78252 +#define EP_NMH_HASHSIZE                        (64)                    /* max size of each hash table */
78253 +
78254 +typedef struct ep_nmh_table
78255 +{
78256 +    struct list_head *tbl_hash[EP_NMH_NUMHASH];
78257 +    unsigned         tbl_size[EP_NMH_NUMHASH];
78258 +} EP_NMH_TABLE;
78259 +
78260 +extern int         ep_nmh_init (EP_NMH_TABLE *tbl);
78261 +extern void        ep_nmh_fini (EP_NMH_TABLE *tbl);
78262 +
78263 +extern void        ep_nmh_insert (EP_NMH_TABLE *tbl, EP_NMH *nmd);
78264 +extern void        ep_nmh_remove (EP_NMH_TABLE *tbl, EP_NMH *nmd);
78265 +extern EP_NMH     *ep_nmh_find (EP_NMH_TABLE *tbl, EP_NMD *nmh);
78266 +
78267 +#if ! defined(CONFIG_EP_NO_CHECK_SUM)
78268 +extern uint32_t    ep_nmd_calc_data_check_sum(EP_SYS *sys, EP_NMD *nmd, int nFrags);
78269 +#endif
78270 +
78271 +/* Public interface */
78272 +extern EP_RAILMASK ep_nmd2railmask (EP_NMD *frags, int nFrags);
78273 +extern void        ep_nmd_subset (EP_NMD *subset, EP_NMD *nmd, unsigned off, unsigned len);
78274 +extern int        ep_nmd_merge (EP_NMD *merged, EP_NMD *a, EP_NMD *b);
78275 +extern int         ep_nmd_map_rails (EP_SYS *sys, EP_NMD *nmd, unsigned railmask);
78276 +
78277 +#endif /* __ELAN__ */
78278 +
78279 +#endif /* __ELAN3_NMH_H */
78280 +
78281 +/*
78282 + * Local variables:
78283 + * c-file-style: "stroustrup"
78284 + * End:
78285 + */
78286 Index: linux-2.4.21/include/elan/rmap.h
78287 ===================================================================
78288 --- linux-2.4.21.orig/include/elan/rmap.h       2004-02-23 16:02:56.000000000 -0500
78289 +++ linux-2.4.21/include/elan/rmap.h    2005-06-01 23:12:54.714421624 -0400
78290 @@ -0,0 +1,49 @@
78291 +/*
78292 + *    Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd.
78293 + *    Copyright (c) 2002-2003 by Quadrics Ltd.
78294 + *
78295 + *    For licensing information please see the supplied COPYING file
78296 + *
78297 + */
78298 +
78299 +#ifndef __ELAN_RMAP_H
78300 +#define __ELAN_RMAP_H
78301 +
78302 +#ident "$Id: rmap.h,v 1.8 2004/05/19 10:24:40 david Exp $"
78303 +/*      $Source: /cvs/master/quadrics/epmod/rmap.h,v $ */
78304 +
78305 +
78306 +typedef struct ep_rmap_entry 
78307 +{
78308 +    size_t     m_size;
78309 +    u_long     m_addr;
78310 +} EP_RMAP_ENTRY;
78311 +
78312 +typedef struct ep_rmap 
78313 +{
78314 +    spinlock_t      m_lock;
78315 +    kcondvar_t       m_wait;
78316 +    u_int           m_size;
78317 +    u_int           m_free;
78318 +    u_int           m_want;
78319 +    char            *m_name;
78320 +    EP_RMAP_ENTRY    m_map[1];
78321 +} EP_RMAP;
78322 +
78323 +extern void         ep_display_rmap (EP_RMAP *map);
78324 +
78325 +extern void          ep_rmapinit (EP_RMAP *rmap, char *name, u_int mapsize);
78326 +extern unsigned long ep_rmalloc (EP_RMAP *rmap, size_t size, int cansleep);
78327 +extern unsigned long ep_rmalloc_constrained (EP_RMAP *mp, size_t size, unsigned long alo, unsigned long ahi, unsigned long align, int cansleep);
78328 +extern void          ep_rmfree (EP_RMAP *rmap, size_t size, unsigned long addr);
78329 +extern unsigned long ep_rmget (EP_RMAP *rmap, size_t size, unsigned long addr);
78330 +extern EP_RMAP      *ep_rmallocmap (size_t size, char *name, int cansleep);
78331 +extern void          ep_rmfreemap (EP_RMAP *map);
78332 +
78333 +#endif /* __ELAN3_RMAP_H */
78334 +
78335 +/*
78336 + * Local variables:
78337 + * c-file-style: "stroustrup"
78338 + * End:
78339 + */
78340 Index: linux-2.4.21/include/elan/statemap.h
78341 ===================================================================
78342 --- linux-2.4.21.orig/include/elan/statemap.h   2004-02-23 16:02:56.000000000 -0500
78343 +++ linux-2.4.21/include/elan/statemap.h        2005-06-01 23:12:54.714421624 -0400
78344 @@ -0,0 +1,52 @@
78345 +/*
78346 + *    Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd.
78347 + *    Copyright (c) 2002-2003 by Quadrics Ltd.
78348 + *
78349 + *    For licensing information please see the supplied COPYING file
78350 + *
78351 + */
78352 +
78353 +#ifndef __ELAN_STATEMAP_H
78354 +#define __ELAN_STATEMAP_H
78355 +
78356 +#ident "$Id: statemap.h,v 1.8 2003/10/07 13:22:38 david Exp $"
78357 +/*      $Source: /cvs/master/quadrics/epmod/statemap.h,v $ */
78358 +
78359 +#include <elan/bitmap.h>
78360 +
78361 +/******************************** global state bitmap stuff **********************************/
78362 +typedef struct
78363 +{
78364 +   unsigned int size;
78365 +   unsigned int nob;
78366 +   unsigned int changemap_nob;
78367 +   unsigned int bitmap_nob;
78368 +   bitmap_t    *changemap0;
78369 +   bitmap_t    *changemap1;
78370 +   bitmap_t    *changemap2;
78371 +   bitmap_t    *bitmap;
78372 +} statemap_t;
78373 +
78374 +extern bitmap_t              statemap_getseg (statemap_t *map, unsigned int offset);
78375 +extern void           statemap_setseg (statemap_t *map, unsigned int offset, bitmap_t seg);
78376 +extern bitmap_t       statemap_getbits (statemap_t *map, unsigned int offset, int nbits);
78377 +extern void           statemap_setbits (statemap_t *map, unsigned int offset, bitmap_t bits, int nbits);
78378 +extern void           statemap_zero (statemap_t *map);
78379 +extern void           statemap_setmap (statemap_t *dst, statemap_t *src);
78380 +extern void           statemap_ormap (statemap_t *dst, statemap_t *src);
78381 +extern int           statemap_findchange (statemap_t *map, bitmap_t *newseg, int clearchange);
78382 +extern int            statemap_changed (statemap_t *map);
78383 +extern void           statemap_reset (statemap_t *map);
78384 +extern void           statemap_copy (statemap_t *dst, statemap_t *src);
78385 +extern void           statemap_clearchanges (statemap_t *map);
78386 +extern bitmap_t      *statemap_tobitmap (statemap_t *map);
78387 +extern statemap_t    *statemap_create (int size);
78388 +extern void           statemap_destroy (statemap_t *map);
78389 +
78390 +#endif /* __ELAN_STATEMAP_H */
78391 +
78392 +/*
78393 + * Local variables:
78394 + * c-file-style: "stroustrup"
78395 + * End:
78396 + */
78397 Index: linux-2.4.21/include/elan/stats.h
78398 ===================================================================
78399 --- linux-2.4.21.orig/include/elan/stats.h      2004-02-23 16:02:56.000000000 -0500
78400 +++ linux-2.4.21/include/elan/stats.h   2005-06-01 23:12:54.715421472 -0400
78401 @@ -0,0 +1,85 @@
78402 +/*
78403 + *    Copyright (c) 2003 by Quadrics Limited.
78404 + * 
78405 + *    For licensing information please see the supplied COPYING file
78406 + *
78407 + */
78408 +
78409 +#ident "@(#)$Id: stats.h,v 1.5 2003/09/24 13:55:37 david Exp $"
78410 +/*      $Source: /cvs/master/quadrics/elanmod/modsrc/stats.h,v $*/
78411 +
78412 +#ifndef __ELAN_STATS_H
78413 +#define __ELAN_STATS_H
78414 +
78415 +
78416 +/* non-kernel headings */
78417 +#define ELAN_STATS_NAME_MAX_LEN ((uint)64)
78418 +typedef unsigned int ELAN_STATS_IDX;
78419 +
78420 +typedef struct elan_stats_map
78421 +{
78422 +       char entry_name[ELAN_STATS_NAME_MAX_LEN];
78423 +       int  index;
78424 +} ELAN_STATS_MAP;
78425 +
78426 +#if defined(__KERNEL__)
78427 +
78428 +/* stats callbacks */
78429 +#define ELAN_STATS_OPS_VERSION ((u_int)1)
78430 +typedef struct elan_stats_ops
78431 +{
78432 +       u_int  ops_version;
78433 +
78434 +       int (*elan_stats_get_name)    (void * arg, uint index,   caddr_t  name);
78435 +       int (*elan_stats_get_block)   (void * arg, uint entries, ulong   *values);
78436 +       int (*elan_stats_clear_block) (void * arg);
78437 +
78438 +} ELAN_STATS_OPS;
78439 +
78440 +typedef struct elan_stats_struct
78441 +{
78442 +       struct list_head   node;
78443 +
78444 +       ELAN_STATS_IDX     statidx;
78445 +       char               block_name[ELAN_STATS_NAME_MAX_LEN];
78446 +       uint               num_entries;
78447 +       ELAN_STATS_OPS *ops;
78448 +       void              *arg;
78449 +
78450 +} ELAN_STATS_STRUCT;
78451 +
78452 +/* stats.c */
78453 +extern int                   elan_stats_register   (ELAN_STATS_IDX    *statidx, 
78454 +                                                      char              *block_name, 
78455 +                                                      uint               num_entries,
78456 +                                                      ELAN_STATS_OPS *ops,
78457 +                                                      void              *arg);
78458 +
78459 +extern int                   elan_stats_deregister  (ELAN_STATS_IDX  statidx);
78460 +extern ELAN_STATS_STRUCT *elan_stats_find        (ELAN_STATS_IDX  statidx);
78461 +extern ELAN_STATS_STRUCT *elan_stats_find_by_name(caddr_t         block_name);
78462 +extern ELAN_STATS_STRUCT *elan_stats_find_next   (ELAN_STATS_IDX  statidx);
78463 +
78464 +
78465 +/* elan_stats.c */
78466 +extern int elan_stats_get_next_index (ELAN_STATS_IDX statidx, ELAN_STATS_IDX *next_statidx);
78467 +
78468 +extern int elan_stats_find_index     (caddr_t  block_name, ELAN_STATS_IDX *statidx, uint *num_entries);
78469 +
78470 +extern int elan_stats_get_block_info (ELAN_STATS_IDX statidx, caddr_t block_name, uint *num_entries);
78471 +
78472 +extern int elan_stats_get_index_name (ELAN_STATS_IDX statidx, uint index, caddr_t name);
78473 +
78474 +extern int elan_stats_get_block      (ELAN_STATS_IDX statidx, uint entries, ulong   *values);
78475 +
78476 +extern int elan_stats_clear_block    (ELAN_STATS_IDX statidx);
78477 +
78478 +#endif /* __KERNEL__ */
78479 +
78480 +#endif /* __ELAN_STATS_H */
78481 +
78482 +/*
78483 + * Local variables:
78484 + * c-file-style: "linux"
78485 + * End:
78486 + */
78487 Index: linux-2.4.21/include/elan3/compat.h
78488 ===================================================================
78489 --- linux-2.4.21.orig/include/elan3/compat.h    2004-02-23 16:02:56.000000000 -0500
78490 +++ linux-2.4.21/include/elan3/compat.h 2005-06-01 23:12:54.715421472 -0400
78491 @@ -0,0 +1,177 @@
78492 +/*
78493 + *    Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd.
78494 + *    Copyright (c) 2002-2003 by Quadrics Ltd.
78495 + *
78496 + *    For licensing information please see the supplied COPYING file
78497 + *
78498 + */
78499 +
78500 +#ident "@(#)$Id: compat.h,v 1.4 2004/06/09 09:07:03 mike Exp $ $Name: QSNETMODULES-4-30_20050128 $"
78501 +/*      $Source: /cvs/master/quadrics/elan3mod/elan3/elan3/compat.h,v $*/
78502 +
78503 +#ifndef __ELAN3_COMPAT_H
78504 +#define __ELAN3_COMPAT_H
78505 +
78506 +/* compatibility header to allow Eagle branch QSNETLIBS 
78507 + * to compile against head kernel */
78508 +
78509 +#define ELAN_EAGLE_COMPAT
78510 +
78511 +/* vmseg.h */
78512 +#define ELAN_FLAGSTATS                         ELAN3_FLAGSTATS
78513 +
78514 +/* uregs.h */
78515 +#define ELAN_STATS_NAME                                ELAN3_STATS_NAME
78516 +#define elan3_stats_names                      elan_stats_names
78517 +
78518 +/* spinlock.h */
78519 +#define ELAN_SPINLOCK                          ELAN3_SPINLOCK
78520 +#define ELAN_SPINLOCK_MAIN                     ELAN3_SPINLOCK_MAIN
78521 +#define ELAN_SPINLOCK_ELAN                     ELAN3_SPINLOCK_ELAN
78522 +#define ELAN_ME_SPINENTER                      ELAN3_ME_SPINENTER
78523 +#define ELAN_ME_FORCEENTER                     ELAN3_ME_FORCEENTER
78524 +#define ELAN_ME_SPINEXIT                       ELAN3_ME_SPINEXIT
78525 +#define ELAN_SPINENTER                         ELAN3_SPINENTER
78526 +#define ELAN_SPINEXIT                          ELAN3_SPINEXIT
78527 +#define elan3_me_spinblock                     elan_me_spinblock
78528 +#define elan3_spinenter                                elan_spinenter
78529 +
78530 +/* elanio.h */
78531 +#define ELANIO_CONTROL_PATHNAME                        ELAN3IO_CONTROL_PATHNAME
78532 +#define ELANIO_USER_PATHNAME                   ELAN3IO_USER_PATHNAME
78533 +#define ELANIO_SDRAM_PATHNAME                  ELAN3IO_SDRAM_PATHNAME
78534 +#define ELANIO_MAX_PATHNAMELEN                 ELAN3IO_MAX_PATHNAMELEN
78535 +
78536 +#define ELANIO_SET_BOUNDARY_SCAN               ELAN3IO_SET_BOUNDARY_SCAN
78537 +#define ELANIO_CLEAR_BOUNDARY_SCAN             ELAN3IO_CLEAR_BOUNDARY_SCAN
78538 +#define ELANIO_READ_LINKVAL                    ELAN3IO_READ_LINKVAL
78539 +#define ELANIO_WRITE_LINKVAL                   ELAN3IO_WRITE_LINKVAL
78540 +#define ELANIO_SET_DEBUG_STRUCT                        ELAN3IO_SET_DEBUG_STRUCT
78541 +#define ELANIO_SET_DEBUG                       ELAN3IO_SET_DEBUG
78542 +#define ELANIO_DEBUG_BUFFER_STRUCT             ELAN3IO_DEBUG_BUFFER_STRUCT
78543 +#define ELANIO_DEBUG_BUFFER                    ELAN3IO_DEBUG_BUFFER
78544 +#define ELANIO_NETERR_SERVER_STRUCT            ELAN3IO_NETERR_SERVER_STRUCT
78545 +#define ELANIO_NETERR_SERVER                   ELAN3IO_NETERR_SERVER
78546 +#define ELANIO_NETERR_FIXUP                    ELAN3IO_NETERR_FIXUP
78547 +
78548 +#define ELANIO_FREE                            ELAN3IO_FREE
78549 +#define ELANIO_ATTACH                          ELAN3IO_ATTACH
78550 +#define ELANIO_DETACH                          ELAN3IO_DETACH
78551 +#define ELANIO_ADDVP_STRUCT                    ELAN3IO_ADDVP_STRUCT
78552 +#define ELANIO_ADDVP                           ELAN3IO_ADDVP
78553 +#define ELANIO_REMOVEVP                                ELAN3IO_REMOVEVP
78554 +#define ELANIO_BCASTVP_STRUCT                  ELAN3IO_BCASTVP_STRUCT
78555 +#define ELANIO_BCASTVP                         ELAN3IO_BCASTVP
78556 +#define ELANIO_LOAD_ROUTE_STRUCT               ELAN3IO_LOAD_ROUTE_STRUCT
78557 +#define ELANIO_LOAD_ROUTE                      ELAN3IO_LOAD_ROUTE
78558 +#define ELANIO_PROCESS                         ELAN3IO_PROCESS
78559 +#define ELANIO_SETPERM_STRUCT                  ELAN3IO_SETPERM_STRUCT
78560 +#define ELANIO_SETPERM                         ELAN3IO_SETPERM
78561 +#define ELANIO_CLEARPERM_STRUCT                        ELAN3IO_CLEARPERM_STRUCT
78562 +#define ELANIO_CLEARPERM                       ELAN3IO_CLEARPERM
78563 +#define ELANIO_CHANGEPERM_STRUCT               ELAN3IO_CHANGEPERM_STRUCT
78564 +#define ELANIO_CHANGEPERM                      ELAN3IO_CHANGEPERM
78565 +#define ELANIO_HELPER_THREAD                   ELAN3IO_HELPER_THREAD
78566 +#define ELANIO_WAITCOMMAND                     ELAN3IO_WAITCOMMAND
78567 +#define ELANIO_BLOCK_INPUTTER                  ELAN3IO_BLOCK_INPUTTER
78568 +#define ELANIO_SET_FLAGS                       ELAN3IO_SET_FLAGS
78569 +#define ELANIO_WAITEVENT                       ELAN3IO_WAITEVENT
78570 +#define ELANIO_ALLOC_EVENTCOOKIE               ELAN3IO_ALLOC_EVENTCOOKIE
78571 +#define ELANIO_FREE_EVENTCOOKIE                        ELAN3IO_FREE_EVENTCOOKIE
78572 +#define ELANIO_ARM_EVENTCOOKIE                 ELAN3IO_ARM_EVENTCOOKIE
78573 +#define ELANIO_WAIT_EVENTCOOKIE                        ELAN3IO_WAIT_EVENTCOOKIE
78574 +#define ELANIO_SWAPSPACE                       ELAN3IO_SWAPSPACE
78575 +#define ELANIO_EXCEPTION_SPACE                 ELAN3IO_EXCEPTION_SPACE
78576 +#define ELANIO_GET_EXCEPTION                   ELAN3IO_GET_EXCEPTION
78577 +#define ELANIO_UNLOAD_STRUCT                   ELAN3IO_UNLOAD_STRUCT
78578 +#define ELANIO_UNLOAD                          ELAN3IO_UNLOAD
78579 +#define ELANIO_GET_ROUTE_STRUCT                        ELAN3IO_GET_ROUTE_STRUCT
78580 +#define ELANIO_GET_ROUTE                       ELAN3IO_GET_ROUTE
78581 +#define ELANIO_RESET_ROUTE_STRUCT              ELAN3IO_RESET_ROUTE_STRUCT
78582 +#define ELANIO_RESET_ROUTE                     ELAN3IO_RESET_ROUTE
78583 +#define ELANIO_CHECK_ROUTE_STRUCT              ELAN3IO_CHECK_ROUTE_STRUCT
78584 +#define ELANIO_CHECK_ROUTE                     ELAN3IO_CHECK_ROUTE
78585 +#define ELANIO_VP2NODEID_STRUCT                        ELAN3IO_VP2NODEID_STRUCT
78586 +#define ELANIO_VP2NODEID                       ELAN3IO_VP2NODEID
78587 +#define ELANIO_SET_SIGNAL                      ELAN3IO_SET_SIGNAL
78588 +#define ELANIO_PROCESS_2_LOCATION_STRUCT       ELAN3IO_PROCESS_2_LOCATION_STRUCT
78589 +#define ELANIO_PROCESS_2_LOCATION              ELAN3IO_PROCESS_2_LOCATION
78590 +#define ELANIO_GET_DEVINFO_STRUCT              ELAN3IO_GET_DEVINFO_STRUCT
78591 +#define ELANIO_GET_DEVINFO                     ELAN3IO_GET_DEVINFO
78592 +#define ELANIO_GET_POSITION_STRUCT             ELAN3IO_GET_POSITION_STRUCT
78593 +#define ELANIO_GET_POSITION                    ELAN3IO_GET_POSITION
78594 +#define ELANIO_STATS_STRUCT                    ELAN3IO_STATS_STRUCT
78595 +#define ELANIO_STATS                           ELAN3IO_STATS
78596 +#  define ELAN_SYS_STATS_DEVICE                        ELAN3_SYS_STATS_DEVICE
78597 +#  define ELAN_SYS_STATS_ELAN3MMU              ELAN3_SYS_STATS_MMU
78598 +
78599 +#define ELANIO_OFF_FLAG_PAGE                   ELAN3IO_OFF_FLAG_PAGE
78600 +#define ELANIO_OFF_UREG_PAGE                   ELAN3IO_OFF_UREG_PAGE
78601 +#define ELANIO_OFF_COMMAND_PAGE                        ELAN3IO_OFF_COMMAND_PAGE
78602 +
78603 +
78604 +/* elanvp.h */
78605 +#define ELAN_ROUTE_SUCCESS                     ELAN3_ROUTE_SUCCESS
78606 +#define ELAN_ROUTE_SYSCALL_FAILED              ELAN3_ROUTE_SYSCALL_FAILED
78607 +#define ELAN_ROUTE_INVALID                     ELAN3_ROUTE_INVALID
78608 +#define ELAN_ROUTE_TOO_LONG                    ELAN3_ROUTE_TOO_LONG
78609 +#define ELAN_ROUTE_LOAD_FAILED                 ELAN3_ROUTE_LOAD_FAILED
78610 +#define ELAN_ROUTE_PROC_RANGE                  ELAN3_ROUTE_PROC_RANGE
78611 +#define ELAN_ROUTE_INVALID_LEVEL               ELAN3_ROUTE_INVALID_LEVEL
78612 +#define ELAN_ROUTE_OCILATES                    ELAN3_ROUTE_OCILATES
78613 +#define ELAN_ROUTE_WRONG_DEST                  ELAN3_ROUTE_WRONG_DEST
78614 +#define ELAN_ROUTE_TURN_LEVEL                  ELAN3_ROUTE_TURN_LEVEL
78615 +#define ELAN_ROUTE_NODEID_UNKNOWN              ELAN3_ROUTE_NODEID_UNKNOWN
78616 +
78617 +/* elandev.h */
78618 +#define ELAN_STATS                             ELAN3_STATS
78619 +#define ELAN_STATS_VERSION                     ELAN3_STATS_VERSION
78620 +
78621 +/* perm.h */
78622 +#define ELAN_PERM_NOREMOTE                     ELAN3_PERM_NOREMOTE
78623 +#define ELAN_PERM_LOCAL_READ                   ELAN3_PERM_LOCAL_READ
78624 +#define ELAN_PERM_REMOTEALL                    ELAN3_PERM_REMOTEALL
78625 +
78626 +/* threadsyscall.h */
78627 +#define ELAN_ABORT_TRAPNUM                     ELAN3_ABORT_TRAPNUM
78628 +#define ELAN_ELANCALL_TRAPNUM                  ELAN3_ELANCALL_TRAPNUM
78629 +#define ELAN_SYSCALL_TRAPNUM                   ELAN3_SYSCALL_TRAPNUM
78630 +#define ELAN_SYS_close                         ELAN3_SYS_close
78631 +#define ELAN_SYS_getpid                                ELAN3_SYS_getpid
78632 +#define ELAN_SYS_ioctl                         ELAN3_SYS_ioctl
78633 +#define ELAN_SYS_kill                          ELAN3_SYS_kill
78634 +#define ELAN_SYS_lseek                         ELAN3_SYS_lseek
78635 +#define ELAN_SYS_mmap                          ELAN3_SYS_mmap
78636 +#define ELAN_SYS_munmap                                ELAN3_SYS_munmap
78637 +#define ELAN_SYS_open                          ELAN3_SYS_open
78638 +#define ELAN_SYS_poll                          ELAN3_SYS_poll
78639 +#define ELAN_SYS_read                          ELAN3_SYS_read
78640 +#define ELAN_SYS_write                         ELAN3_SYS_write
78641 +#define ELAN_T_SYSCALL_CODE                    ELAN3_T_SYSCALL_CODE
78642 +#define ELAN_T_SYSCALL_ERRNO                   ELAN3_T_SYSCALL_ERRNO
78643 +
78644 +/* elansyscall.h */
78645 +#define ELAN_SYS_FLAG_DMA_BADVP                        ELAN3_SYS_FLAG_DMA_BADVP
78646 +#define ELAN_SYS_FLAG_THREAD_BADVP             ELAN3_SYS_FLAG_THREAD_BADVP
78647 +#define ELAN_SYS_FLAG_DMAFAIL                  ELAN3_SYS_FLAG_DMAFAIL
78648 +#define ELAN_SYS_FLAG_NETERR                   ELAN3_SYS_FLAG_NETERR
78649 +
78650 +/* intrinsics.h */
78651 +#define elan_copy64w                           elan3_copy64w
78652 +#define elan_read64dw                          elan3_read64dw
78653 +#define elan_write64dw                         elan3_write64dw
78654 +
78655 +#ifndef ELAN_POLL_EVENT
78656 +#define ELAN_POLL_EVENT                                ELAN3_POLL_EVENT
78657 +#endif
78658 +#ifndef ELAN_WAIT_EVENT
78659 +#define ELAN_WAIT_EVENT                                ELAN3_WAIT_EVENT
78660 +#endif
78661 +
78662 +#endif /* __ELAN3_COMPAT_H */
78663 +/*
78664 + * Local variables:
78665 + * c-file-style: "stroustrup"
78666 + * End:
78667 + */
78668 +
78669 Index: linux-2.4.21/include/elan3/dma.h
78670 ===================================================================
78671 --- linux-2.4.21.orig/include/elan3/dma.h       2004-02-23 16:02:56.000000000 -0500
78672 +++ linux-2.4.21/include/elan3/dma.h    2005-06-01 23:12:54.716421320 -0400
78673 @@ -0,0 +1,213 @@
78674 +/*
78675 + *    Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd.
78676 + *
78677 + *    For licensing information please see the supplied COPYING file
78678 + *
78679 + */
78680 +
78681 +#ifndef __ELAN3_DMA_H
78682 +#define __ELAN3_DMA_H
78683 +
78684 +#ident "$Id: dma.h,v 1.38 2002/08/21 12:43:27 david Exp $"
78685 +/*      $Source: /cvs/master/quadrics/elan3mod/elan3/elan3/dma.h,v $ */
78686 +
78687 +#include <elan3/e3types.h>
78688 +#include <elan3/events.h>
78689 +
78690 +/* Alignment for a DMA descriptor */
78691 +#define E3_DMA_ALIGN           (32)
78692 +
78693 +/* The maximum size a DMA can be (i.e. < 2GB) */
78694 +#define E3_MAX_DMA_SIZE                0x7fffffff
78695 +
78696 +/* This macro returns TRUE if a fixup for the ELAN_REVB_BUG_2 problem is required 
78697 + * i.e. if the DMA begins in the last 64-bytes of a page and its size causes it to enter the
78698 + * next page, hence causing the Elan to issue 2 (64-byte) block reads to different pages.
78699 + * See GNAT hw-elan3/3263
78700 + */
78701 +#define E3_DMA_REVB_BUG_2(SIZE, ADDR, PAGESIZE)        \
78702 +       ( (((int) (ADDR) & (PAGESIZE-64)) == (PAGESIZE-64)) && (-(((int) (ADDR) | ~(PAGESIZE-1))) < (SIZE)) )
78703 +
78704 +/* There is a point where a dma runs quicker from main memory than
78705 + * when running from sdram and having to copy all the data down
78706 + * first.
78707 + */
78708 +#define E3_DMA_SDRAM_CUTOFF    128
78709 +
78710 +typedef union _e3_DmaType
78711 +{
78712 +    E3_uint32 type;
78713 +    struct
78714 +    {
78715 +#if defined(__LITTLE_ENDIAN__)
78716 +       E3_uint32 dataType:2;   /* Bits 0 to 1   */
78717 +       E3_uint32 direction:3;  /* Bit  4 to 2   */
78718 +       E3_uint32 opCode:4;     /* Bits 5 to 8   */
78719 +       E3_uint32 failCount:6;  /* Bits 9 to 14  */
78720 +       E3_uint32 isRemote:1;   /* Bit  15       */
78721 +       E3_uint32 Context:13;   /* Bits 16 to 28 */
78722 +       E3_uint32 :3;           /* Bits 29 to 31 */
78723 +#else
78724 +       E3_uint32 :3;           /* Bits 29 to 31 */
78725 +       E3_uint32 Context:13;   /* Bits 16 to 28 */
78726 +       E3_uint32 isRemote:1;   /* Bit  15       */
78727 +       E3_uint32 failCount:6;  /* Bits 9 to 14  */
78728 +       E3_uint32 opCode:4;     /* Bits 5 to 8   */
78729 +       E3_uint32 direction:3;  /* Bit  4 to 2   */
78730 +       E3_uint32 dataType:2;   /* Bits 0 to 1   */
78731 +#endif
78732 +    } s;
78733 +} E3_DmaType;
78734 +
78735 +#define E3_DMA_CONTEXT_MASK    (ALL_CONTEXT_BITS << 16)
78736 +
78737 +#define E3_DMA_CONTEXT(type)   (((type) >> 16) & ALL_CONTEXT_BITS)
78738 +#define E3_DMA_ISREMOTE(type)  (((type) >> 15) & 1)
78739 +#define E3_DMA_FAILCOUNT(type) (((type) >> 9) & 0x3F)
78740 +#define E3_DMA_OPCODE(type)    (((type) >> 5) & 0xF)
78741 +#define E3_DMA_DIRECTION(type) (((type) >> 2) & 0x7)
78742 +#define EP_DMA_DATATYPE(type)  (((type) >> 0) & 0x3)
78743 +
78744 +#define E3_DMA_TYPE(dataType, direction, opCode, failCount) \
78745 +    (((dataType) & 0x3) | (((direction) & 7) << 2) | (((opCode) & 0xF) << 5) | (((failCount) & 0x3F) << 9))
78746 +
78747 +
78748 +typedef union _e3_CookieVProc
78749 +{
78750 +    E3_uint32 cookie_vproc;
78751 +    struct
78752 +    {
78753 +#if defined(__LITTLE_ENDIAN__)
78754 +       E3_uint32 vproc:16;                     /* Bit  15 to 0  */
78755 +       E3_uint32 cookie:16;                    /* Bits 31 to 16 */
78756 +#else
78757 +       E3_uint32 cookie:16;                    /* Bits 31 to 16 */
78758 +       E3_uint32 vproc:16;                     /* Bit  15 to 0  */
78759 +#endif
78760 +    } s;
78761 +} E3_CookieVProc;
78762 +
78763 +#define E3_DMA_COOKIE_PROC(Cookie, VProc)  (((VProc) & 0xffff) | (((Cookie) << 16)))
78764 +
78765 +#define DMA_COOKIE_MASK                (0xffff0000)
78766 +#define DMA_PROCESS_MASK       (0x0000ffff)
78767 +
78768 +/* We use the bottom bit of the cookie to
78769 + * distinguish main/thread generated cookies
78770 + */
78771 +#define DMA_COOKIE_THREAD      (0x01 << 16)
78772 +
78773 +/* We use the next bit of the cookie to
78774 + * distinguish locally/remotely generated cookies 
78775 + */
78776 +#define DMA_COOKIE_REMOTE      (0x02 << 16)
78777 +
78778 +/* Assign and increment cookie (NB: we have reserved the bottom two bits)
78779 + */
78780 +#define DMA_COOKIE(COOKIE, VPROC)       ((((COOKIE) += (0x4 << 16)) & DMA_COOKIE_MASK) | VPROC)
78781 +#define DMA_REMOTE_COOKIE(COOKIE, VPROC) ((((COOKIE) += (0x4 << 16)) & DMA_COOKIE_MASK) | DMA_COOKIE_REMOTE | VPROC)
78782 +
78783 +#define DMA_COOKIE_REFRESH(COOKIEVP, COOKIE)                           \
78784 +do {                                                                   \
78785 +       COOKIEVP &= ~DMA_COOKIE_MASK;           /* Clear cookie */      \
78786 +       COOKIEVP |= DMA_COOKIE(COOKIE,0);       /* Assign new cookie */ \
78787 +} while (0)
78788 +
78789 +typedef struct e3_dma
78790 +{
78791 +    E3_DmaType         dma_u;
78792 +    E3_uint32          dma_size;
78793 +    E3_Addr            dma_source;
78794 +    E3_Addr            dma_dest;
78795 +    E3_Addr            dma_destEvent;
78796 +    E3_CookieVProc     dma_destCookieProc;
78797 +    E3_Addr            dma_srcEvent;
78798 +    E3_CookieVProc     dma_srcCookieProc;
78799 +} E3_DMA;
78800 +
78801 +
78802 +/*
78803 + * Word-swapped version of DMA descriptor.
78804 + * This is used by the UltraSPARC code to format the descriptor
78805 + * in main memory before block-copying it down to Elan SDRAM.
78806 + * In the process it does a dword (64-bit) conversion and so swaps
78807 + * the word order on a double-word pair basis
78808 + */
78809 +typedef struct e3_dma_swapped
78810 +{
78811 +    E3_uint32          dma_size;
78812 +    E3_DmaType         dma_u;
78813 +    E3_Addr            dma_dest;
78814 +    E3_Addr            dma_source;
78815 +    E3_CookieVProc     dma_destCookieProc;
78816 +    E3_Addr            dma_destEvent;
78817 +    E3_CookieVProc     dma_srcCookieProc;
78818 +    E3_Addr            dma_srcEvent;
78819 +} E3_DMA_SWAPPED;
78820 +
78821 +/* Define a Main memory structure for DMA desc based on Endianess of machine */
78822 +#if defined(__LITTLE_ENDIAN__)
78823 +#define E3_DMA_MAIN E3_DMA
78824 +#else
78825 +#define E3_DMA_MAIN E3_DMA_SWAPPED;
78826 +#endif
78827 +
78828 +#define dma_type        dma_u.type
78829 +#define dma_failCount    dma_u.s.failCount
78830 +#define dma_isRemote     dma_u.s.isRemote
78831 +#define dma_opCode       dma_u.s.opCode
78832 +#define dma_direction    dma_u.s.direction
78833 +#define dma_dataType     dma_u.s.dataType
78834 +#define dma_queueContext dma_u.s.Context
78835 +
78836 +#define dma_destCookieVProc   dma_destCookieProc.cookie_vproc
78837 +#define dma_destVProc        dma_destCookieProc.s.vproc
78838 +#define dma_destCookie       dma_destCookieProc.s.cookie
78839 +#define dma_srcCookieVProc    dma_srcCookieProc.cookie_vproc
78840 +#define dma_srcVProc         dma_srcCookieProc.s.vproc
78841 +#define dma_srcCookie        dma_srcCookieProc.s.cookie
78842 +
78843 +/*
78844 + * Values for dma_opCode
78845 + */
78846 +#define DMA_NORMAL                     0
78847 +#define DMA_QUEUED                     1
78848 +#define DMA_NORMAL_BROADCAST           2
78849 +#define DMA_QUEUED_BROADCAST           3
78850 +#define DMA_NORMAL_UNSAFE              4
78851 +#define DMA_QUEUED_UNSAFE              5
78852 +#define DMA_NORMAL_BROADCAST_UNSAFE    6
78853 +#define DMA_QUEUED_BROADCAST_UNSAFE    7
78854 +
78855 +/*
78856 + * Values for dma_direction
78857 + */
78858 +#define DMA_WRITE              0
78859 +#define DMA_READ_REQUEUE       1
78860 +#define DMA_READ               3
78861 +#define DMA_READ_BROADCAST     7
78862 +
78863 +/*
78864 + * Values for dma_dataType
78865 + */
78866 +#define DMA_BYTE               0
78867 +#define DMA_HALFWORD           1
78868 +#define DMA_WORD               2
78869 +#define DMA_DOUBLE             3
78870 +
78871 +/* OUT OF DATE ?
78872 +  #define DMA_OPCODE_SHIFT     3
78873 +  #define DMA_FAILCOUNT_SHIFT  9
78874 +*/
78875 +#define DMA_TYPE_ISREMOTE      (1 << 15)
78876 +#define DMA_TYPE_READ          (3 << 2)
78877 +#define DMA_TYPE_READ_REQUEUE  (1 << 2)
78878 +#define DMA_TYPE_DIRECTION_MASK        (3 << 2)
78879 +
78880 +#endif /* __ELAN3_DMA_H */
78881 +
78882 +/*
78883 + * Local variables:
78884 + * c-file-style: "stroustrup"
78885 + * End:
78886 + */
78887 Index: linux-2.4.21/include/elan3/e3types.h
78888 ===================================================================
78889 --- linux-2.4.21.orig/include/elan3/e3types.h   2004-02-23 16:02:56.000000000 -0500
78890 +++ linux-2.4.21/include/elan3/e3types.h        2005-06-01 23:12:54.716421320 -0400
78891 @@ -0,0 +1,82 @@
78892 +/*
78893 + *    Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd.
78894 + *
78895 + *    For licensing information please see the supplied COPYING file
78896 + *
78897 + */
78898 +
78899 +#ifndef __ELAN3_E3TYPES_H
78900 +#define __ELAN3_E3TYPES_H
78901 +
78902 +#ident "$Id: e3types.h,v 1.18 2002/08/09 11:23:33 addy Exp $"
78903 +/*      $Source: /cvs/master/quadrics/elan3mod/elan3/elan3/e3types.h,v $ */
78904 +
78905 +#include <qsnet/config.h>
78906 +/*
78907 + * "flip" values for correctly indexing into
78908 + * block data which was copied from the Elan
78909 + * using 64 bit accesses.
78910 + */
78911 +#if defined(__LITTLE_ENDIAN__)
78912 +#  define ByteEndianFlip  0
78913 +#  define ShortEndianFlip 0
78914 +#  define WordEndianFlip  0
78915 +#else
78916 +#  define ByteEndianFlip  7
78917 +#  define ShortEndianFlip 3
78918 +#  define WordEndianFlip  1
78919 +#endif
78920 +
78921 +
78922 +#ifndef _ASM
78923 +
78924 +typedef signed int        E3_int;
78925 +typedef unsigned int              E3_uint;
78926 +
78927 +typedef signed char       E3_int8;
78928 +typedef unsigned char     E3_uint8;
78929 +
78930 +typedef signed short      E3_int16;
78931 +typedef unsigned short            E3_uint16;
78932 +
78933 +typedef signed int        E3_int32;
78934 +typedef unsigned int              E3_uint32;
78935 +
78936 +#ifdef __ELAN3__
78937 +typedef signed long long   E3_int64;
78938 +typedef unsigned long long E3_uint64;
78939 +#ifdef _MAIN_LP64
78940 +/* NOTE: If the Main is 64-bit we declare the Elan thread's
78941 + * E3_uintptr to be 64-bits too
78942 + */
78943 +typedef unsigned long long E3_uintptr;
78944 +#else
78945 +typedef unsigned long      E3_uintptr;
78946 +#endif
78947 +
78948 +#else
78949 +
78950 +#ifdef _LP64
78951 +typedef signed long        E3_int64;
78952 +typedef unsigned long      E3_uint64;
78953 +typedef unsigned long      E3_uintptr;
78954 +#else /* _ILP32 */
78955 +typedef signed long long   E3_int64;
78956 +typedef unsigned long long E3_uint64;
78957 +typedef unsigned long      E3_uintptr;
78958 +#endif
78959 +
78960 +#endif /* __ELAN3__ */
78961 +
78962 +/* 32-bit Elan3 address */
78963 +typedef E3_uint32         E3_Addr;
78964 +
78965 +#endif /* _ASM */
78966 +
78967 +#endif /* __ELAN3_E3TYPES_H */
78968 +
78969 +/*
78970 + * Local variables:
78971 + * c-file-style: "stroustrup"
78972 + * End:
78973 + */
78974 Index: linux-2.4.21/include/elan3/elan3mmu.h
78975 ===================================================================
78976 --- linux-2.4.21.orig/include/elan3/elan3mmu.h  2004-02-23 16:02:56.000000000 -0500
78977 +++ linux-2.4.21/include/elan3/elan3mmu.h       2005-06-01 23:12:54.717421168 -0400
78978 @@ -0,0 +1,346 @@
78979 +/*
78980 + *    Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd.
78981 + *
78982 + *    For licensing information please see the supplied COPYING file
78983 + *
78984 + */
78985 +
78986 +#ifndef __ELAN3_ELAN3MMU_H
78987 +#define __ELAN3_ELAN3MMU_H
78988 +
78989 +#ident "$Id: elan3mmu.h,v 1.40.2.1 2004/12/14 10:19:48 mike Exp $"
78990 +/*      $Source: /cvs/master/quadrics/elan3mod/elan3/elan3/elan3mmu.h,v $*/
78991 +
78992 +
78993 +#include <elan3/pte.h>
78994 +
78995 +#ifdef __cplusplus
78996 +extern "C" {
78997 +#endif
78998 +
78999 +typedef struct elan3mmu_global_stats
79000 +{
79001 +    int                version;
79002 +    int                pteload;
79003 +    int                pteunload;
79004 +    int                ptereload;
79005 +
79006 +    int                streamable_alloc;
79007 +    int                streamable_free;
79008 +    int                streamable_alloc_failed;
79009 +
79010 +    int                num_ptbl_level[4]; /* number of level N  ptbls */
79011 +
79012 +    int                create_ptbl_failed;                     /* count of ptbl creation failure */
79013 +
79014 +    int         lX_alloc_l3;                           /* count of l3 ptbls used as lX */
79015 +    int         lX_freed_l3;                           /* count of lX ptbls freed as l3 */
79016 +
79017 +    int                l2_alloc_l3;                            /* count of l3 ptbls used as l2 */
79018 +    int                l2_freed_l3;                            /* count of l2 ptbls freed as l3 */
79019 +
79020 +    int                stolen_ptbls;                           /* count of l3 ptbls stolen */
79021 +} ELAN3MMU_GLOBAL_STATS;
79022 +
79023 +#define ELAN3MMU_STATS_VERSION         1
79024 +
79025 +#define ELAN3MMU_STAT(what)            (elan3mmu_global_stats.what++)
79026 +#define ELAN3MMU_SET_STAT(what,count)  (elan3mmu_global_stats.what = count)
79027 +
79028 +#ifdef __KERNEL__
79029 +
79030 +#define ELAN3_PT_SHIFT (ELAN3_L2_SHIFT + 2)
79031 +
79032 +typedef struct elan3_ptbl
79033 +{
79034 +    struct elan3_ptbl   *ptbl_parent;                          /* Parent page table, or next on freelist */
79035 +    struct elan3mmu     *ptbl_elan3mmu;                        /* elan3mmu we're allocated for */
79036 +    E3_Addr              ptbl_base;                            /* Virtual address we're mapping */
79037 +    u_char               ptbl_index;                           /* Index in ptbl group */
79038 +    u_char               ptbl_valid;                           /* Number of valid entries */
79039 +    u_char               ptbl_flags;                           /* Flags, defined below. */
79040 +    u_char               ptbl_spare;
79041 +} ELAN3_PTBL;
79042 +
79043 +#define ptbl_next      ptbl_parent                             /* Parent pointer is next pointer when on free list */
79044 +
79045 +#define PTBL_LEVEL_X            0x00
79046 +#define PTBL_LEVEL_1           0x01
79047 +#define PTBL_LEVEL_2           0x02
79048 +#define PTBL_LEVEL_3           0x03
79049 +#define PTBL_LEVEL_MASK                0x03
79050 +#define PTBL_LOCKED            0x04                            /* Page table is locked,  protects all fields */
79051 +#define PTBL_KEEP              0x08                            /* This ptbl is not to be stolen */
79052 +#define PTBL_ALLOCED           0x10                            /* This ptbl has been allocated, and is not free */
79053 +#define PTBL_GROUPED           0x20                            /* This ptbl is a member of a group of ptbls */
79054 +#define PTBL_KERNEL            0x80                            /* This ptbl is allocated for the kernel */
79055 +
79056 +#define PTBL_LEVEL(flags)      ((flags) & PTBL_LEVEL_MASK)
79057 +#define PTBL_IS_LOCKED(flags)  (((flags) & (PTBL_LOCKED|PTBL_ALLOCED)) == (PTBL_LOCKED|PTBL_ALLOCED))
79058 +
79059 +#if ELAN3_PAGE_SHIFT == 13
79060 +#  define PTBL_GROUP_SIZE      8192                            /* page table groups are 8k bytes */
79061 +#  define PTBLS_PER_GROUP_L1   8                               /* Number of level 1 tables in a group */
79062 +#  define PTBLS_PER_GROUP_L2   32                              /*   ... level 2 */
79063 +#  define PTBLS_PER_GROUP_L3   32                              /*   ... level 3 */
79064 +#  define PTBLS_PER_GROUP_LX   32                              /*   ... level X */
79065 +#  define PTBLS_PER_GROUP_MAX  32                              /*  max of l1,l2,l3,lX */
79066 +#else
79067 +#  define PTBL_GROUP_SIZE      4096                            /* page table groups are 4k bytes */
79068 +#  define PTBLS_PER_GROUP_L1   4                               /* Number of level 1 tables in a group */
79069 +#  define PTBLS_PER_GROUP_L2   16                              /*   ... level 2 */
79070 +#  define PTBLS_PER_GROUP_L3   8                               /*   ... level 3 */
79071 +#  define PTBLS_PER_GROUP_LX   16                              /*   ... level X */
79072 +#  define PTBLS_PER_GROUP_MAX  16                              /*  max of l1,l2,l3,lX */
79073 +#endif
79074 +
79075 +#define HMES_PER_GROUP         (PTBLS_PER_GROUP_L3*ELAN3_L3_ENTRIES)
79076 +
79077 +#if ELAN3_PAGE_SHIFT == 13
79078 +#  define PTBLS_PER_PTBL_L1    4                               /* 256 PTPs */
79079 +#  define PTBLS_PER_PTBL_L2    1                               /* 64 PTPs */
79080 +#  define PTBLS_PER_PTBL_L3    1                               /* 32 PTEs */
79081 +#else
79082 +#  define PTBLS_PER_PTBL_L1    4                               /* 256 PTPs */
79083 +#  define PTBLS_PER_PTBL_L2    1                               /* 64 PTPs */
79084 +#  define PTBLS_PER_PTBL_L3    2                               /* 64 PTEs */
79085 +#endif
79086 +
79087 +#define ELAN3_LX_ENTRIES     (32) 
79088 +#define PTBLS_PER_PTBL_LX   (1)        
79089 +
79090 +#define L1_VA_PER_PTBL (ELAN3_L1_SIZE*(ELAN3_L1_ENTRIES/PTBLS_PER_PTBL_L1))    /* 4 ptbl for L1 */
79091 +#define L2_VA_PER_PTBL (ELAN3_L2_SIZE*(ELAN3_L2_ENTRIES/PTBLS_PER_PTBL_L2))    /* 1 ptbl for L2 */
79092 +#define L3_VA_PER_PTBL (ELAN3_L3_SIZE*(ELAN3_L3_ENTRIES/PTBLS_PER_PTBL_L3))    /* 1 ptbl for L3 */
79093 +
79094 +typedef struct elan3_ptbl_gr
79095 +{
79096 +    struct elan3_ptbl_gr *pg_next;                             /* Next in list. */
79097 +    int                         pg_level;                              /* Level PG allocated for */
79098 +    sdramaddr_t                 pg_addr;                               /* sdram offset of ptes/ptps */    
79099 +    ELAN3_PTBL          pg_ptbls[PTBLS_PER_GROUP_MAX];         /* The actual page tables */
79100 +} ELAN3_PTBL_GR;
79101 +
79102 +
79103 +/*
79104 + * The elan3mmu structure is the mmu dependant hardware address translation
79105 + * structure linked to the address space structure to show the translatioms
79106 + * provided by the elan for an address sapce.
79107 + *
79108 + * We also have a doubly linked list of 'regions' which allow the 
79109 + * elan3mmu code to determine the access permissions for the elan 
79110 + * dependant on the virtual address that the translation is being
79111 + * loaded at.
79112 + */
79113 +
79114 +typedef struct elan3mmu_rgn
79115 +{
79116 +    struct elan3mmu_rgn *rgn_mnext;                            /* Doubly linked list of regions */
79117 +    struct elan3mmu_rgn *rgn_mprev;                            /*   sorted on main address */ 
79118 +    caddr_t             rgn_mbase;                             /* main address of base of region */
79119 +
79120 +    struct elan3mmu_rgn *rgn_enext;                            /* Doubly linked list of regions */
79121 +    struct elan3mmu_rgn *rgn_eprev;                            /*   sorted on elan address */
79122 +    E3_Addr             rgn_ebase;                             /* elan address of base of region */
79123 +
79124 +    u_int               rgn_len;                               /* length of region */
79125 +    u_int               rgn_perm;                              /* elan access permission */
79126 +} ELAN3MMU_RGN;
79127 +
79128 +typedef struct elan3mmu
79129 +{
79130 +    spinlock_t             elan3mmu_lock;                      /* spinlock lock for regions */
79131 +    ELAN3MMU_RGN           *elan3mmu_mrgns;                    /* Doubly linked list of memory regions */
79132 +    ELAN3MMU_RGN          *elan3mmu_mtail;                     /* Last memory region on list */
79133 +    ELAN3MMU_RGN          *elan3mmu_mrgnlast;                  /* Last region 'hit' */
79134 +
79135 +    ELAN3MMU_RGN           *elan3mmu_ergns;                    /* Doubly linked list of memory regions */
79136 +    ELAN3MMU_RGN          *elan3mmu_etail;                     /* Last memory region on list */
79137 +    ELAN3MMU_RGN          *elan3mmu_ergnlast;                  /* Last region 'hit' */
79138 +
79139 +    struct elan3_dev        *elan3mmu_dev;                     /* Elan device we're using. */
79140 +    struct elan3_ctxt     *elan3mmu_ctxt;                      /* Elan ctxt we're associated with */
79141 +
79142 +    sdramaddr_t             elan3mmu_ctp;                      /* Context table entry for our context */
79143 +    ELAN3_PTBL            *elan3mmu_l1ptbl;                    /* Level 1 Page table (first of 4) */
79144 +
79145 +    spinlock_t             elan3mmu_lXptbl_lock;               /* spinlock for level X table list */
79146 +    ELAN3_PTBL              *elan3mmu_lXptbl;                    /* Level X Page table list         */
79147 +
79148 +#ifdef LINUX
79149 +    struct mm_struct       *elan3mmu_coproc_mm;                        /* Linux mm we're mapping */
79150 +#endif
79151 +} ELAN3MMU;
79152 +
79153 +_NOTE(LOCK_ORDER(elan3mmu::elan3mmu_lock elan3_dev::IntrLock))
79154 +
79155 +_NOTE(MUTEX_PROTECTS_DATA(elan3mmu::elan3mmu_lock,
79156 +                         elan3mmu::elan3mmu_mrgns elan3mmu::elan3mmu_mtail
79157 +                         elan3mmu::elan3mmu_ergns elan3mmu::elan3mmu_etail))
79158 +/* protected by dev->IntrLock for read by device driver */
79159 +_NOTE(DATA_READABLE_WITHOUT_LOCK(elan3mmu::elan3mmu_mrgns elan3mmu::elan3mmu_mtail
79160 +                                elan3mmu::elan3mmu_ergns elan3mmu::elan3mmu_etail))
79161 +
79162 +_NOTE(SCHEME_PROTECTS_DATA("only set to valid region", 
79163 +                          elan3mmu::elan3mmu_ergnlast elan3mmu::elan3mmu_mrgnlast))
79164 +
79165 +_NOTE(MUTEX_PROTECTS_DATA(elan3_dev::IntrLock,
79166 +                         elan3mmu::elan3mmu_l1ptbl 
79167 +                         elan3mmu::elan3mmu_ctp 
79168 +                         elan3mmu::elan3mmu_dev))
79169 +
79170 +_NOTE(DATA_READABLE_WITHOUT_LOCK(elan3mmu::elan3mmu_l1ptbl
79171 +                                elan3mmu::elan3mmu_ctp 
79172 +                                elan3mmu::elan3mmu_dev))
79173 +
79174 +/*
79175 + * Macros for accessing ptes/ptbls/ptbl_grs
79176 + */
79177 +
79178 +#define OFFSETOF(object,member)        /* calculate offset of structure member */ \
79179 +       ((size_t) (&(((object *)0)->member)))
79180 +#define PTBL_TO_GR(ptbl)       /* convert ptbl to ptbl group */ \
79181 +       ((ELAN3_PTBL_GR *) ((caddr_t) ((ptbl) - (ptbl)->ptbl_index) - OFFSETOF(ELAN3_PTBL_GR,pg_ptbls[0])))
79182 +#define PTBL_TO_PTADDR(ptbl)   /* convert ptbl to a ptp pointing at it */ \
79183 +        (PTBL_TO_GR(ptbl)->pg_addr + ((ptbl)->ptbl_index<<ELAN3_PT_SHIFT))
79184 +#define PTE_TO_HME(ptbl,pte)   /* convert pte to corresponding hme */ \
79185 +        (PTBL_TO_GR(ptbl)->pg_hmes + ((pte) - (ELAN3_PTE *) PTBL_TO_GR(ptbl)->pg_vaddr))
79186 +#define HME_TO_PTE(ptebl,hme)  /* convert hme to corresponding pte */ \
79187 +        ((ELAN3_PTE *) PTBL_TO_GR(ptbl)->pg_vaddr + ((hme) - (PTBL_TO_GR(ptbl)->pg_hmes)))
79188 +
79189 +
79190 +/* Flags for lock_ptbl */
79191 +#define LK_PTBL_NOWAIT         0x1
79192 +#define LK_PTBL_FAILOK         0x2
79193 +
79194 +/* Return values for lock_ptbl */
79195 +#define LK_PTBL_OK             0x0
79196 +#define LK_PTBL_MISMATCH       0x1
79197 +#define LK_PTBL_FAILED         0x2
79198 +
79199 +/* Flags for elan3mmu_ptesync */
79200 +#define        NO_MLIST_LOCK   0
79201 +#define        MLIST_LOCKED    1
79202 +
79203 +/* Flags for elan3mmu_pteload */
79204 +#define PTE_LOAD               0x00
79205 +#define PTE_LOAD_LOCK          0x01                            /* translation should be locked */
79206 +#define PTE_LOAD_NOSYNC                0x02                            /* ref/mod bits should not be sync'ed to page */
79207 +#define PTE_NO_SLEEP            0x04                            /* true if we cant sleep */
79208 +#define PTE_NO_STEAL           0x08                            /* true if we don't want to steal ptbls */
79209 +
79210 +#define PTE_LOAD_ENDIAN_MASK   0x10                            /* mask for endian-ness */
79211 +#define PTE_LOAD_LITTLE_ENDIAN 0x00                            /* translation is to little-endian memory */
79212 +#define PTE_LOAD_BIG_ENDIAN    0x10                            /* translation is to big-endian memory */
79213 +
79214 +
79215 +/* Flags for elan3mmu_unload */
79216 +#define PTE_UNLOAD             0x00
79217 +#define PTE_UNLOAD_UNLOCK      0x01
79218 +#define PTE_UNLOAD_NOFLUSH     0x02
79219 +#define PTE_UNLOAD_NOSYNC      0x04
79220 +
79221 +extern int         elan3mmu_debug;
79222 +#ifdef DEBUG_PRINTF
79223 +#  define HAT_PRINTF0(n,msg)            ((elan3mmu_debug & n) ? (void) elan3_debugf (NULL, DBG_HAT, msg)             : (void) 0)
79224 +#  define HAT_PRINTF1(n,msg,a)          ((elan3mmu_debug & n) ? (void) elan3_debugf (NULL, DBG_HAT, msg,a)           : (void) 0)
79225 +#  define HAT_PRINTF2(n,msg,a,b)        ((elan3mmu_debug & n) ? (void) elan3_debugf (NULL, DBG_HAT, msg,a,b)         : (void) 0)
79226 +#  define HAT_PRINTF3(n,msg,a,b,c)      ((elan3mmu_debug & n) ? (void) elan3_debugf (NULL, DBG_HAT, msg,a,b,c)       : (void) 0)
79227 +#  define HAT_PRINTF4(n,msg,a,b,c,d)    ((elan3mmu_debug & n) ? (void) elan3_debugf (NULL, DBG_HAT, msg,a,b,c,d)     : (void) 0)
79228 +#  define HAT_PRINTF5(n,msg,a,b,c,d,e)  ((elan3mmu_debug & n) ? (void) elan3_debugf (NULL, DBG_HAT, msg,a,b,c,d,e)   : (void) 0)
79229 +#  define HAT_PRINTF6(n,msg,a,b,c,d,e,f) ((elan3mmu_debug & n) ? (void) elan3_debugf (NULL, DBG_HAT, msg,a,b,c,d,e,f) : (void) 0)
79230 +#  ifdef LINUX
79231 +#    define HAT_PRINTF(n,args...)        ((elan3mmu_debug & n) ? (void) elan3_debugf(NULL, DBG_HAT, ##args) : (void) 0)
79232 +#  endif
79233 +#else
79234 +#  define HAT_PRINTF0(n,msg)
79235 +#  define HAT_PRINTF1(n,msg,a)
79236 +#  define HAT_PRINTF2(n,msg,a,b)
79237 +#  define HAT_PRINTF3(n,msg,a,b,c)
79238 +#  define HAT_PRINTF4(n,msg,a,b,c,d)
79239 +#  define HAT_PRINTF5(n,msg,a,b,c,d,e)
79240 +#  define HAT_PRINTF6(n,msg,a,b,c,d,e,f)
79241 +#  ifdef LINUX
79242 +#    define HAT_PRINTF(n,args...)
79243 +#  endif
79244 +#endif
79245 +
79246 +/* elan3mmu_generic.c */
79247 +extern ELAN3MMU_GLOBAL_STATS elan3mmu_global_stats;
79248 +
79249 +extern void         elan3mmu_init (void);
79250 +extern void         elan3mmu_fini (void);
79251 +
79252 +extern ELAN3MMU           *elan3mmu_alloc (struct elan3_ctxt *ctxt);
79253 +extern void        elan3mmu_free (ELAN3MMU *elan3mmu);
79254 +
79255 +extern void          elan3mmu_set_context_filter (ELAN3_DEV *dev, int ctx, int disabled, E3_uint32 Pend, E3_uint32 *Maskp);
79256 +extern int          elan3mmu_attach (ELAN3_DEV *dev, int ctx, ELAN3MMU *elan3mmu, sdramaddr_t routeTable, E3_uint32 routeMask);
79257 +extern void         elan3mmu_detach (ELAN3_DEV *dev, int ctx);
79258 +
79259 +extern ELAN3MMU_RGN *elan3mmu_findrgn_elan (ELAN3MMU *elan3mmu, E3_Addr addr, int tail);
79260 +extern int           elan3mmu_addrgn_elan (ELAN3MMU *elan3mmu, ELAN3MMU_RGN *nrgn);
79261 +extern ELAN3MMU_RGN *elan3mmu_removergn_elan (ELAN3MMU *elan3mmu, E3_Addr addr);
79262 +extern ELAN3MMU_RGN *elan3mmu_rgnat_elan (ELAN3MMU *elan3mmu, E3_Addr addr);
79263 +extern ELAN3MMU_RGN *elan3mmu_findrgn_main (ELAN3MMU *elan3mmu, caddr_t addr, int tail);
79264 +extern int           elan3mmu_addrgn_main (ELAN3MMU *elan3mmu, ELAN3MMU_RGN *nrgn);
79265 +extern ELAN3MMU_RGN *elan3mmu_removergn_main (ELAN3MMU *elan3mmu, caddr_t addr);
79266 +extern ELAN3MMU_RGN *elan3mmu_rgnat_main (ELAN3MMU *elan3mmu, caddr_t addr);
79267 +
79268 +extern int          elan3mmu_setperm (ELAN3MMU *elan3mmu, caddr_t maddr, E3_Addr eaddr, u_int len, u_int perm);
79269 +extern void         elan3mmu_clrperm (ELAN3MMU *elan3mmu, E3_Addr addr, u_int len);
79270 +extern int          elan3mmu_checkperm (ELAN3MMU *elan3mmu, E3_Addr addr, u_int len, u_int access);
79271 +extern caddr_t      elan3mmu_mainaddr (ELAN3MMU *elan3mmu, E3_Addr addr);
79272 +extern E3_Addr      elan3mmu_elanaddr (ELAN3MMU *elan3mmu, caddr_t addr);
79273 +
79274 +extern void          elan3mmu_expand (ELAN3MMU *elan3mmu, E3_Addr addr, int len, int level, int attr);
79275 +extern void          elan3mmu_reserve (ELAN3MMU *elan3mmu, E3_Addr addr, u_int npages, sdramaddr_t *);
79276 +extern void          elan3mmu_release (ELAN3MMU *elan3mmu, E3_Addr addr, u_int npages, sdramaddr_t *);
79277 +
79278 +extern void          elan3mmu_pteload (ELAN3MMU *elan3mmu, int level, E3_Addr addr, physaddr_t paddr, int perm, int attr);
79279 +extern void         elan3mmu_unload (ELAN3MMU *elan3mmu, E3_Addr addr, u_int len, int flags);
79280 +extern void         elan3mmu_sync (ELAN3MMU *elan3mmu, E3_Addr addr, u_int len, u_int clearflag);
79281 +extern void         elan3mmu_pteunload (ELAN3_PTBL *ptbl, sdramaddr_t pte, int flags, int got_mlist_lock);
79282 +extern void         elan3mmu_ptesync (ELAN3_PTBL *ptbl, sdramaddr_t pte, int flags, int got_mlist_lock);
79283 +extern sdramaddr_t   elan3mmu_ptp2pte (ELAN3MMU *elan3mmu, sdramaddr_t ptp, int level);
79284 +extern sdramaddr_t   elan3mmu_ptefind (ELAN3MMU *elan3mmu, E3_Addr, int *level, ELAN3_PTBL **pptbl, spinlock_t **plock, unsigned long *flags);
79285 +extern sdramaddr_t   elan3mmu_ptealloc (ELAN3MMU *elan3mmu, E3_Addr, int level, ELAN3_PTBL **pptbl, spinlock_t **plock, int attr, unsigned long *flags);
79286 +extern void         elan3mmu_l1inval (ELAN3MMU *elan3mmu, ELAN3_PTBL *l1ptbl, int flags);
79287 +extern int           elan3mmu_l2inval (ELAN3MMU *elan3mmu, ELAN3_PTBL *l2ptbl, int flags, E3_Addr addr, spinlock_t **pl2lock, unsigned long *lock_flags);
79288 +extern int           elan3mmu_l3inval (ELAN3MMU *elan3mmu, ELAN3_PTBL *l3ptbl, int flags, E3_Addr addr, spinlock_t **pl3lock, unsigned long *lock_flags);
79289 +
79290 +extern void          elan3mmu_free_l1ptbl (ELAN3_DEV *dev, ELAN3_PTBL *ptbl, spinlock_t *lock, unsigned long flags);
79291 +extern void          elan3mmu_free_l2ptbl (ELAN3_DEV *dev, ELAN3_PTBL *ptbl, spinlock_t *lock, unsigned long flags);
79292 +extern void          elan3mmu_free_l3ptbl (ELAN3_DEV *dev, ELAN3_PTBL *ptbl, spinlock_t *lock, unsigned long flags);
79293 +
79294 +extern int          elan3mmu_lock_this_ptbl (ELAN3_PTBL *ptbl, int flag, spinlock_t **plock, unsigned long *flags);
79295 +extern int           elan3mmu_lock_ptbl (ELAN3_PTBL *ptbl, u_int flag, ELAN3MMU *elan3mmu, E3_Addr va, int level, spinlock_t **plock, unsigned long *flags);
79296 +extern void         elan3mmu_unlock_ptbl (ELAN3_PTBL *ptbl, spinlock_t *lock, unsigned long flags);
79297 +
79298 +/* elan3mmu_osdep.c */
79299 +extern void         elan3mmu_init_osdep (void);
79300 +extern void         elan3mmu_fini_osdep (void);
79301 +extern void         elan3mmu_alloc_osdep (ELAN3MMU *elan3mmu);
79302 +extern void         elan3mmu_free_osdep (ELAN3MMU *elan3mmu);
79303 +extern ELAN3_PTE     elan3mmu_phys_to_pte (ELAN3_DEV *dev, physaddr_t paddr, int perm);
79304 +extern ELAN3_PTE     elan3mmu_kernel_invalid_pte (ELAN3MMU *elan3mmu);
79305 +
79306 +#if defined (DIGITAL_UNIX)
79307 +#  include <elan3/elan3mmu_dunix.h>
79308 +#elif defined (LINUX)
79309 +#  include <elan3/elan3mmu_linux.h>
79310 +#endif
79311 +
79312 +#endif /* __KERNEL__ */
79313 +
79314 +#ifdef __cplusplus
79315 +}
79316 +#endif
79317 +
79318 +#endif /* __ELAN3_ELAN3MMU_H */
79319 +
79320 +/*
79321 + * Local variables:
79322 + * c-file-style: "stroustrup"
79323 + * End:
79324 + */
79325 Index: linux-2.4.21/include/elan3/elan3mmu_linux.h
79326 ===================================================================
79327 --- linux-2.4.21.orig/include/elan3/elan3mmu_linux.h    2004-02-23 16:02:56.000000000 -0500
79328 +++ linux-2.4.21/include/elan3/elan3mmu_linux.h 2005-06-01 23:12:54.717421168 -0400
79329 @@ -0,0 +1,39 @@
79330 +/*
79331 + *    Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd.
79332 + *
79333 + *    For licensing information please see the supplied COPYING file
79334 + *
79335 + */
79336 +
79337 +#ifndef __ELAN3_MMU_LINUX_H
79338 +#define __ELAN3_MMU_LINUX_H
79339 +
79340 +#ident "$Id: elan3mmu_linux.h,v 1.12 2003/09/24 13:57:24 david Exp $"
79341 +/*      $Source: /cvs/master/quadrics/elan3mod/elan3/elan3/elan3mmu_linux.h,v $*/
79342 +
79343 +/* XXX copy of elan3mmu_dunix.h */
79344 +
79345 +#define ALLOC_ELAN3MMU(ptr,cansleep)           KMEM_ALLOC(ptr, ELAN3MMU *, sizeof (ELAN3MMU), cansleep)
79346 +#define ALLOC_PTBL_GR(ptr,cansleep)            KMEM_ALLOC(ptr, ELAN3_PTBL_GR *, sizeof (ELAN3_PTBL_GR), cansleep)
79347 +#define ALLOC_ELAN3MMU_RGN(ptr,cansleep)       KMEM_ALLOC(ptr, ELAN3MMU_RGN *, sizeof (ELAN3MMU_RGN), cansleep)
79348 +#define ALLOC_HMENTS(ptr,cansleep)             KMEM_ALLOC((ptr,ELAN3_HMENT *, sizeof (ELAN3_HMENT), cansleep)
79349 +
79350 +#define FREE_ELAN3MMU(ptr)                     KMEM_FREE(ptr,sizeof (ELAN3MMU))
79351 +#define FREE_PTBL_GR(ptr)                      KMEM_FREE(ptr,sizeof (ELAN3_PTBL_GR))
79352 +#define FREE_ELAN3MMU_RGN(ptr)                 KMEM_FREE(ptr,sizeof (ELAN3MMU_RGN))
79353 +#define FREE_HMENTS(ptr)                       KMEM_FREE(ptr,sizeof (ELAN3_HMENT))
79354 +
79355 +extern void         elan3mmu_init_osdep(void);
79356 +extern void         elan3mmu_fini_osdep(void);
79357 +
79358 +extern void          elan3mmu_pte_range_unload (ELAN3MMU *elan3mmu, struct mm_struct *mm, caddr_t addr, unsigned long len);
79359 +extern void          elan3mmu_pte_range_update (ELAN3MMU *elan3mmu, struct mm_struct *mm, caddr_t addr, unsigned long len);
79360 +extern void          elan3mmu_pte_ctxt_unload(ELAN3MMU *elan3mmu);
79361 +
79362 +#endif
79363 +
79364 +/*
79365 + * Local variables:
79366 + * c-file-style: "stroustrup"
79367 + * End:
79368 + */
79369 Index: linux-2.4.21/include/elan3/elan3ops.h
79370 ===================================================================
79371 --- linux-2.4.21.orig/include/elan3/elan3ops.h  2004-02-23 16:02:56.000000000 -0500
79372 +++ linux-2.4.21/include/elan3/elan3ops.h       2005-06-01 23:12:54.718421016 -0400
79373 @@ -0,0 +1,42 @@
79374 +/*
79375 + *    Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd.
79376 + *
79377 + *    For licensing information please see the supplied COPYING file
79378 + *
79379 + */
79380 +
79381 +/* $Id: elan3ops.h,v 1.3 2003/09/24 13:57:24 david Exp $ */
79382 +/* $Source: /cvs/master/quadrics/elan3mod/elan3/elan3/elan3ops.h,v $ */
79383 +
79384 +#ifndef _ELAN3_OPS_H
79385 +#define _ELAN3_OPS_H
79386 +
79387 +int get_position          (void *arg, ELAN_POSITION *position);
79388 +int set_position          (void *arg, unsigned short nodeId, unsigned short numNodes);
79389 +
79390 +int elan3mod_create_cap   (void *arg, ELAN_CAP_OWNER owner, ELAN_CAPABILITY *cap);
79391 +int elan3mod_destroy_cap  (void *arg, ELAN_CAP_OWNER owner, ELAN_CAPABILITY *cap);
79392 +
79393 +int elan3mod_create_vp    (void *arg, ELAN_CAP_OWNER owner, ELAN_CAPABILITY *cap, ELAN_CAPABILITY *map);
79394 +int elan3mod_destroy_vp   (void *arg, ELAN_CAP_OWNER owner, ELAN_CAPABILITY *cap, ELAN_CAPABILITY *map);
79395 +
79396 +int elan3mod_attach_cap   (void *arg_ctxt, ELAN_CAPABILITY *cap);
79397 +int elan3mod_detach_cap   (void *arg_ctxt);
79398 +
79399 +extern ELAN_DEV_OPS elan3_dev_ops;
79400 +
79401 +int stats_get_index_name  (void *arg, uint index, caddr_t name);
79402 +int stats_get_block       (void *arg, uint entries, ulong *value);
79403 +int stats_clear_block     (void *arg);
79404 +
79405 +int elan3_register_dev_stats   (ELAN3_DEV * dev);
79406 +void elan3_deregister_dev_stats (ELAN3_DEV * dev);
79407 +
79408 +
79409 +#endif /* __ELAN3_OPS_H */
79410 +
79411 +/*
79412 + * Local variables:
79413 + * c-file-style: "linux"
79414 + * End:
79415 + */
79416 Index: linux-2.4.21/include/elan3/elanctxt.h
79417 ===================================================================
79418 --- linux-2.4.21.orig/include/elan3/elanctxt.h  2004-02-23 16:02:56.000000000 -0500
79419 +++ linux-2.4.21/include/elan3/elanctxt.h       2005-06-01 23:12:54.719420864 -0400
79420 @@ -0,0 +1,856 @@
79421 +/*
79422 + *    Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd.
79423 + *
79424 + *    For licensing information please see the supplied COPYING file
79425 + *
79426 + */
79427 +
79428 +#ifndef _ELAN3_ELANCTXT_H
79429 +#define _ELAN3_ELANCTXT_H
79430 +
79431 +#ident "$Id: elanctxt.h,v 1.81 2003/09/24 13:57:24 david Exp $"
79432 +/*      $Source: /cvs/master/quadrics/elan3mod/elan3/elan3/elanctxt.h,v $*/
79433 +
79434 +#ifdef __cplusplus
79435 +extern "C" {
79436 +#endif
79437 +
79438 +#include <elan3/elanregs.h>
79439 +#include <elan3/vmseg.h>
79440 +
79441 +#define BumpUserStat(ctxt, stat)       ((ctxt)->FlagPage->stat++)
79442 +
79443 +#if defined(__LITTLE_ENDIAN__)
79444 +
79445 +typedef union _CProcTrapBuf
79446 +{
79447 +    E3_uint64 Align64;
79448 +    struct
79449 +    {
79450 +       E3_uint32 Areg;
79451 +       E3_uint32 Breg;
79452 +    } r;
79453 +    struct
79454 +    {
79455 +       E3_uint32 Addr;
79456 +       E3_uint32 ContextType;
79457 +    } s;
79458 +} CProcTrapBuf_BE;
79459 +
79460 +typedef E3_EventInt        E3_EventInt_BE;
79461 +typedef E3_IprocTrapHeader E3_IprocTrapHeader_BE;
79462 +typedef E3_IprocTrapData   E3_IprocTrapData_BE;
79463 +typedef E3_FaultSave      E3_FaultSave_BE;
79464 +
79465 +typedef union
79466 +{
79467 +    E3_uint64  Align64;
79468 +    E3_DMA      s;
79469 +} E3_DMA_BE;
79470 +
79471 +typedef E3_ThreadQueue     E3_ThreadQueue_BE;
79472 +
79473 +#else
79474 +
79475 +/* "Big-Endian" data structures copied by 64 bit loads, these are 32 bit word flipped */
79476 +/* from the corresponding data structure. */
79477 +
79478 +typedef union _CProcTrapBuf
79479 +{
79480 +    E3_uint64 Align64;
79481 +    struct
79482 +    {
79483 +       E3_uint32 Breg;
79484 +       E3_uint32 Areg;
79485 +    } r;
79486 +    struct
79487 +    {
79488 +       E3_uint32 ContextType;
79489 +       E3_uint32 Addr;
79490 +    } s;
79491 +} CProcTrapBuf_BE;
79492 +
79493 +typedef union _E3_EventInt_BE
79494 +{
79495 +    E3_uint64    Align64;
79496 +    struct {
79497 +       E3_uint32 EventContext; /* Bits 16 to 28 */
79498 +       E3_uint32 IntCookie;
79499 +    } s;
79500 +} E3_EventInt_BE;
79501 +
79502 +typedef union _E3_IprocTrapHeader_BE
79503 +{
79504 +   E3_uint64            Align64;
79505 +
79506 +   struct
79507 +   {
79508 +      E3_uint32                 TrAddr;
79509 +      E3_TrTypeCntx     TrTypeCntx;
79510 +      union
79511 +      {
79512 +        E3_IProcStatus_Reg u_IProcStatus;
79513 +        E3_uint32          u_TrData1;
79514 +      } ipsotd;
79515 +      E3_uint32                 TrData0;
79516 +   } s;
79517 +} E3_IprocTrapHeader_BE;
79518 +
79519 +typedef E3_IprocTrapData E3_IprocTrapData_BE;
79520 +
79521 +typedef union _E3_FaultSave_be
79522 +{
79523 +    E3_uint64                  Align64;
79524 +    struct {
79525 +       volatile E3_uint32      FaultContext;
79526 +       E3_FaultStatusReg       FSR;
79527 +       volatile E3_uint32      EventAddress;
79528 +       volatile E3_uint32      FaultAddress;
79529 +    } s;
79530 +} E3_FaultSave_BE;
79531 +
79532 +typedef union _e3_dma_be
79533 +{
79534 +    E3_uint64          Align64;
79535 +    struct {
79536 +       E3_uint32       dma_size;
79537 +       E3_DmaType      dma_u;
79538 +       E3_Addr         dma_dest;
79539 +       E3_Addr         dma_source;
79540 +       E3_CookieVProc  dma_destCookieProc;
79541 +       E3_Addr         dma_destEvent;
79542 +       E3_CookieVProc  dma_srcCookieProc;
79543 +       E3_Addr         dma_srcEvent;
79544 +    } s;
79545 +} E3_DMA_BE;
79546 +
79547 +typedef union _E3_ThreadQueue_BE
79548 +{
79549 +   E3_uint64   Align64;
79550 +   struct
79551 +   {
79552 +       /* copied by 64 bit copy from elan to main */
79553 +       E3_uint32 :3;           /* Bits 29 to 31 */
79554 +       E3_uint32 Context:13;   /* Bits 16 to 28 */
79555 +       E3_uint32 :16;          /* Bits 0  to 15 */
79556 +       E3_Addr  Thread;        /* Bits 32 to 63 */
79557 +   } s;
79558 +} E3_ThreadQueue_BE;
79559 +
79560 +#endif /* defined(LITTLE_ENDIAN) || defined(__LITTLE_ENDIAN__) */
79561 +
79562 +typedef struct neterr_msg
79563 +{
79564 +    E3_uint32          Rail;                                   /* Rail error received on */
79565 +    ELAN_CAPABILITY    SrcCapability;                          /* Capability of source of packet */
79566 +    ELAN_CAPABILITY    DstCapability;                          /* Capability of dest of packet */
79567 +
79568 +    E3_uint32          DstProcess;                             /* Virtual Process of dest of packet */
79569 +    E3_Addr            CookieAddr;                             /* Cookie Address (or NULL for DMA) */
79570 +    E3_uint32          CookieVProc;                            /* Cookie and VP (identifies DMA) */
79571 +    E3_uint32          NextCookie;                             /* Next Cookie value (for thread) */
79572 +    E3_uint32          WaitForEop;                             /* Wait for EOP transaction */
79573 +} NETERR_MSG;
79574 +
79575 +#ifdef __KERNEL__
79576 +
79577 +/*
79578 + * Associated with each input channel can be a network error
79579 + * resolver structure, which can be queued on the network 
79580 + * error resolver threads to perform RPCs to the other kernels
79581 + * when a network error occurs with an identify transaction
79582 + * included
79583 + */
79584 +typedef struct neterr_resolver
79585 +{
79586 +    struct neterr_resolver *Next;
79587 +
79588 +    spinlock_t             Lock;
79589 +
79590 +    struct elan3_ctxt       *Ctxt;
79591 +    ELAN_LOCATION          Location;
79592 +
79593 +    int                            Completed;
79594 +    int                            Status;
79595 +    long                   Timestamp;
79596 +
79597 +    NETERR_MSG             Message;
79598 +} NETERR_RESOLVER;
79599 +
79600 +
79601 +typedef struct neterr_fixup
79602 +{
79603 +    struct neterr_fixup           *Next;
79604 +
79605 +    kcondvar_t             Wait;
79606 +    int                            Completed;
79607 +    int                            Status;
79608 +
79609 +    NETERR_MSG             Message;
79610 +} NETERR_FIXUP;
79611 +
79612 +#endif /* __KERNEL__ */
79613 +
79614 +/* Each of the following structures must be padded to a whole */
79615 +/* number of 64 bit words since the kernel uses 64 bit load/stores */
79616 +/* to transfer the elan register state. */
79617 +typedef struct command_trap
79618 +{
79619 +    E3_Status_Reg      Status;                                 /* 4  bytes */
79620 +    E3_uint32          Pad;                                    /* 4  bytes */
79621 +    E3_FaultSave_BE    FaultSave;                              /* 16 bytes */
79622 +    CProcTrapBuf_BE            TrapBuf;                                /* 8  bytes */
79623 +} COMMAND_TRAP;
79624 +
79625 +typedef struct thread_trap
79626 +{
79627 +    E3_uint32          Registers[32];                          /* 128 bytes */
79628 +#define REG_GLOBALS    0
79629 +#define REG_OUTS       8
79630 +#define REG_LOCALS     16
79631 +#define REG_INS                24
79632 +
79633 +    E3_FaultSave_BE    FaultSave;                              /* 16 bytes */
79634 +    E3_FaultSave_BE     DataFaultSave;                         /* 16 bytes */
79635 +    E3_FaultSave_BE     InstFaultSave;                         /* 16 bytes */
79636 +    E3_FaultSave_BE     OpenFaultSave;                         /* 16 bytes */
79637 +    
79638 +    E3_Status_Reg      Status;                                 /* 4 bytes */
79639 +
79640 +    E3_Addr            pc;                                     /* 4 bytes */
79641 +    E3_Addr            npc;                                    /* 4 bytes */
79642 +    E3_Addr            StartPC;                                /* 4 bytes */
79643 +    E3_Addr            sp;                                     /* 4 bytes */
79644 +    E3_uint32          mi;                                     /* 4 bytes */
79645 +    E3_TrapBits                TrapBits;                               /* 4 bytes */
79646 +    E3_DirtyBits       DirtyBits;                              /* 4 bytes */
79647 +} THREAD_TRAP;
79648 +
79649 +typedef struct dma_trap
79650 +{
79651 +    E3_DMA_BE          Desc;                                   /* 32 bytes */
79652 +    E3_FaultSave_BE    FaultSave;                              /* 16 bytes */
79653 +    E3_FaultSave_BE    Data0;                                  /* 16 bytes */
79654 +    E3_FaultSave_BE    Data1;                                  /* 16 bytes */
79655 +    E3_FaultSave_BE    Data2;                                  /* 16 bytes */
79656 +    E3_FaultSave_BE    Data3;                                  /* 16 bytes */
79657 +    E3_Status_Reg      Status;                                 /* 4 bytes */
79658 +    E3_DmaInfo         PacketInfo;                             /* 4 bytes */
79659 +} DMA_TRAP;
79660 +
79661 +typedef struct input_trap
79662 +{
79663 +    E3_uint32             State;                               /* 4 bytes */
79664 +    E3_Status_Reg         Status;                              /* 4 bytes */
79665 +    E3_FaultSave_BE       FaultSave;                           /* 16 bytes */
79666 +    
79667 +    u_int                 NumTransactions;                     /* 4 bytes */
79668 +    u_int                 Overflow;                            /* 4 bytes */
79669 +    u_int                 AckSent;                             /* 4 bytes */
79670 +    u_int                 BadTransaction;                      /* 4 bytes */
79671 +
79672 +    E3_IprocTrapHeader_BE *TrappedTransaction;                 /* 4 bytes */
79673 +    E3_IprocTrapData_BE   *TrappedDataBuffer;                  /* 4 bytes */
79674 +    E3_IprocTrapHeader_BE *WaitForEopTransaction;              /* 4 bytes */
79675 +    E3_IprocTrapData_BE   *WaitForEopDataBuffer;               /* 4 bytes */
79676 +    E3_IprocTrapHeader_BE *DmaIdentifyTransaction;             /* 4 bytes */
79677 +    E3_IprocTrapHeader_BE *ThreadIdentifyTransaction;          /* 4 bytes */
79678 +    E3_Addr               LockQueuePointer;                    /* 4 bytes */
79679 +    E3_Addr               UnlockQueuePointer;                  /* 4 bytes */
79680 +
79681 +    E3_IprocTrapHeader_BE  Transactions[MAX_TRAPPED_TRANS];    /* n * 8 bytes */
79682 +    E3_IprocTrapData_BE           DataBuffers[MAX_TRAPPED_TRANS];      /* n * 64 bytes */
79683 +} INPUT_TRAP;
79684 +
79685 +typedef struct input_fault_save
79686 +{
79687 +    struct input_fault_save *Next;
79688 +    E3_Addr                 Addr;
79689 +    E3_uint32               Count;
79690 +} INPUT_FAULT_SAVE;
79691 +
79692 +#define NUM_INPUT_FAULT_SAVE   32
79693 +#define MIN_INPUT_FAULT_PAGES  8
79694 +#define MAX_INPUT_FAULT_PAGES  128
79695 +
79696 +typedef E3_uint32 EVENT_COOKIE;
79697 +
79698 +#ifdef __KERNEL__
79699 +
79700 +typedef struct event_cookie_entry
79701 +{
79702 +    struct event_cookie_entry *ent_next;
79703 +    struct event_cookie_entry *ent_prev;
79704 +
79705 +    spinlock_t                ent_lock;
79706 +    unsigned                  ent_ref;
79707 +
79708 +    EVENT_COOKIE              ent_cookie;
79709 +    EVENT_COOKIE              ent_fired;
79710 +    kcondvar_t                ent_wait;
79711 +} EVENT_COOKIE_ENTRY;
79712 +
79713 +typedef struct event_cookie_table
79714 +{
79715 +    struct event_cookie_table *tbl_next;
79716 +    struct event_cookie_table *tbl_prev;
79717 +
79718 +    unsigned long              tbl_task;
79719 +    unsigned long              tbl_handle;
79720 +
79721 +    spinlock_t                tbl_lock;
79722 +    unsigned                  tbl_ref;
79723 +    EVENT_COOKIE_ENTRY        *tbl_entries;
79724 +} EVENT_COOKIE_TABLE;
79725 +
79726 +#define NBYTES_PER_SMALL_ROUTE 8
79727 +#define NBYTES_PER_LARGE_ROUTE 16
79728 +
79729 +#define ROUTE_BLOCK_SIZE       ELAN3_PAGE_SIZE
79730 +#define NROUTES_PER_BLOCK      (ROUTE_BLOCK_SIZE/NBYTES_PER_LARGE_ROUTE)
79731 +
79732 +typedef struct elan3_routes
79733 +{
79734 +    struct elan3_routes                *Next;                                  /* Can be chained together */
79735 +
79736 +    sdramaddr_t                         Routes;                                /* sdram offset of route entries */
79737 +    bitmap_t                    Bitmap[BT_BITOUL(NROUTES_PER_BLOCK)];  /* Bitmap of which entries are used */
79738 +} ELAN3_ROUTES; 
79739 +
79740 +
79741 +typedef struct elan3_route_table
79742 +{
79743 +    spinlock_t          Lock;                          /* Route lock */
79744 +    sdramaddr_t                 Table;                         /* Kernel address for route table */
79745 +    u_int               Size;                          /* # entries in route table */
79746 +
79747 +    ELAN3_ROUTES       *LargeRoutes;                   /* Large routes */
79748 +} ELAN3_ROUTE_TABLE;
79749 +
79750 +typedef struct elan3_vpseg
79751 +{
79752 +    struct elan3_vpseg         *Next;
79753 +    int                                 Process;                       /* Virtual process */
79754 +    int                                 Entries;                       /*  and # processes */
79755 +    int                                 Type;                          /* Type of cookie */
79756 +
79757 +    union
79758 +    {
79759 +       
79760 +       ELAN_CAPABILITY Capability;                     /* Capability of remote segment */
79761 +#  define SegCapability                SegUnion.Capability
79762 +       struct {
79763 +           u_short             LowProc;                        /* Base process number */
79764 +           u_short             HighProc;                       /*   and high process number */
79765 +#  define SegLowProc           SegUnion.BROADCAST.LowProc
79766 +#  define SegHighProc          SegUnion.BROADCAST.HighProc
79767 +       } BROADCAST;
79768 +    } SegUnion;
79769 +} ELAN3_VPSEG;
79770 +
79771 +#define ELAN3_VPSEG_UNINT      0                               /* Unitialised */
79772 +#define ELAN3_VPSEG_P2P                1                               /* Point to Point */
79773 +#define ELAN3_VPSEG_BROADCAST  2                               /* Broadcast */
79774 +
79775 +#define NUM_LISTS      7                                       /* Number of "swap" lists */
79776 +
79777 +typedef struct elan3_ctxt
79778 +{
79779 +    struct elan3_ctxt    *Next;                                        /* can be queued on a task */
79780 +    struct elan3_ctxt    *Prev;
79781 +
79782 +    CtxtHandle          Handle;                                /* user handle */
79783 +    int                         RefCnt;                                /* reference count */
79784 +
79785 +    ELAN3MMU           *Elan3mmu;                              /* elan3mmu allocated for Elan translations */
79786 +
79787 +    struct elan3_ops     *Operations;                          /* User supplied helper functions */
79788 +    void               *Private;                               /* Users private pointer */
79789 +
79790 +    int                         Status;                                /* Status (guarded by dev_mutex) */
79791 +    int                         OthersState;                           /* State of halt queueing for dma/thread */
79792 +    int                         LwpCount;                              /* Number of lwp's running */
79793 +
79794 +    ELAN3_DEV          *Device;                                /* Elan device */
79795 +
79796 +    ELAN_CAPABILITY     Capability;                            /* Capability I've attached as */
79797 +    ELAN_POSITION       Position;                              /* Position when I was created */
79798 +    
79799 +    ELAN3_VPSEG                *VpSegs;                                /* List of virtual process segments */
79800 +    ELAN3_ROUTE_TABLE    *RouteTable;
79801 +
79802 +    krwlock_t           VpLock;                                /* Reader/writer lock for vp list */
79803 +    kmutex_t            SwapListsLock;                         /* mutex to lock swap lists */
79804 +    kmutex_t            CmdLock;                               /* mutex to lock trapped dma command */
79805 +    kmutex_t            CmdPortLock;                           /* mutex to load/unload commandport xlation */
79806 +
79807 +    kcondvar_t          Wait;                                  /* Condition variable to sleep on */
79808 +    kcondvar_t          CommandPortWait;                       /* Condition variable to wait for commandport */
79809 +    kcondvar_t          LwpWait;                               /* Condition variable to wait for lwps to stop */
79810 +    kcondvar_t          HaltWait;                              /* Condition variable to wait for halt */
79811 +    int                         Halted;                                /*  and flag for halt cv */
79812 +
79813 +    caddr_t             CommandPageMapping;                    /* user virtual address for command page mapping */
79814 +    ioaddr_t             CommandPage;                          /* Elan command port mapping page */
79815 +    DeviceMappingHandle  CommandPageHandle;                    /* DDI Handle */
79816 +    ioaddr_t            CommandPort;                           /* Elan command port */
79817 +    void               *CommandPortItem;                       /* Item we're re-issuing to commandport */
79818 +
79819 +    ELAN3_FLAGSTATS      *FlagPage;                            /* Page visible to user process */
79820 +
79821 +    COMMAND_TRAP       *CommandTraps;                          /* Command port traps */
79822 +    ELAN3_SPLIT_QUEUE     CommandTrapQ;
79823 +                                                                  
79824 +    CProcTrapBuf_BE    *Commands;                              /* Overflowed commands */
79825 +    ELAN3_QUEUE           CommandQ;
79826 +
79827 +    THREAD_TRAP                *ThreadTraps;                           /* Thread processor traps */
79828 +    ELAN3_QUEUE                 ThreadTrapQ;
79829 +    
79830 +    DMA_TRAP           *DmaTraps;                              /* Dma processor tra[ed */
79831 +    ELAN3_QUEUE                 DmaTrapQ;
79832 +
79833 +    INPUT_TRAP          Input0Trap;                            /* Inputter channel 0 trap */
79834 +    INPUT_TRAP          Input1Trap;                            /* Inputter channel 1 trap */
79835 +    NETERR_RESOLVER    *Input0Resolver;                        /* Inputter channel 0 network error resolver */
79836 +    NETERR_RESOLVER    *Input1Resolver;                        /* Inputter channel 1 network error resolver */
79837 +
79838 +    INPUT_FAULT_SAVE    InputFaults[NUM_INPUT_FAULT_SAVE];     /* stored writeblock addresses */
79839 +    INPUT_FAULT_SAVE    *InputFaultList;                       /* organized in list for LRU */
79840 +    spinlock_t          InputFaultLock;                        /* and lock for list */
79841 +
79842 +    kmutex_t            NetworkErrorLock;
79843 +    NETERR_FIXUP       *NetworkErrorFixups;
79844 +
79845 +    EVENT_COOKIE        *EventCookies;                         /* Event cookies. */
79846 +    ELAN3_QUEUE                 EventCookieQ;
79847 +
79848 +    E3_Addr            *SwapThreads;                           /* Swapped Thread Queue */
79849 +    ELAN3_QUEUE                 SwapThreadQ;
79850 +
79851 +    E3_DMA_BE          *SwapDmas;                              /* Swapped Dmas Queue */
79852 +    ELAN3_QUEUE                 SwapDmaQ;
79853 +
79854 +    int                         ItemCount[NUM_LISTS];                  /* Count of items on each swap list */
79855 +    int                         inhibit;                               /* if set lwp not to reload translations */
79856 +
79857 +    int                  Disabled;
79858 +} ELAN3_CTXT;
79859 +
79860 +_NOTE(MUTEX_PROTECTS_DATA(elan3_dev::IntrLock, 
79861 +                         elan3_ctxt::Status elan3_ctxt::OthersState
79862 +                         elan3_ctxt::CommandTrapQ elan3_ctxt::CommandQ elan3_ctxt::ThreadTrapQ elan3_ctxt::DmaTrapQ 
79863 +                         elan3_ctxt::Input0Trap elan3_ctxt::Input1Trap elan3_ctxt::EventCookieQ elan3_ctxt::SwapThreadQ 
79864 +                         elan3_ctxt::SwapDmaQ elan3_ctxt::CommandPortItem elan3_ctxt::LwpCount))
79865 +_NOTE(MUTEX_PROTECTS_DATA(elan3_ctxt::SwapListsLock, 
79866 +                         elan3_ctxt::ItemCount))
79867 +_NOTE(RWLOCK_PROTECTS_DATA(elan3_ctxt::VpLock, 
79868 +                          elan3_ctxt::VpSegs elan3_vpseg::Next elan3_vpseg::Process 
79869 +                          elan3_vpseg::Entries elan3_vpseg::Type))
79870 +
79871 +_NOTE(DATA_READABLE_WITHOUT_LOCK(elan3_ctxt::ItemCount elan3_ctxt::Status elan3_ctxt::CommandPortItem))
79872 +
79873 +_NOTE(LOCK_ORDER(elan3_ctxt::SwapListsLock elan3_ctxt::CmdLock elan3_dev::IntrLock))
79874 +_NOTE(LOCK_ORDER(elan3_ctxt::SwapListsLock as::a_lock))                        /* implicit by pagefault */
79875 +
79876 +#define CTXT_DETACHED                          (1 << 0)                /* Context is detached. */
79877 +#define CTXT_NO_LWPS                           (1 << 1)                /* No lwp's to handle faults */
79878 +#define CTXT_EXITING                           (1 << 2)                /* User process is exiting */
79879 +
79880 +#define CTXT_SWAPPING_OUT                      (1 << 3)                /* Context is swapping out */
79881 +#define CTXT_SWAPPED_OUT                       (1 << 4)                /* Context is swapped out */
79882 +
79883 +#define CTXT_SWAP_FREE                         (1 << 5)                /* Swap buffer is free */
79884 +#define CTXT_SWAP_VALID                                (1 << 6)                /* Swap buffer has queue entries in it */
79885 +
79886 +#define CTXT_DMA_QUEUE_FULL                    (1 << 7)                /* Dma trap queue is full */
79887 +#define CTXT_THREAD_QUEUE_FULL                 (1 << 8)                /* Thread trap queue is full */
79888 +#define CTXT_EVENT_QUEUE_FULL                  (1 << 9)                /* Event interrupt queue is full */
79889 +#define CTXT_COMMAND_OVERFLOW_ERROR            (1 << 10)               /* Trap queue overflow */
79890 +
79891 +#define CTXT_SWAP_WANTED                       (1 << 11)               /* Some one wanted to swap */
79892 +#define CTXT_WAITING_SWAPIN                    (1 << 12)               /* Someone waiting on swapin */
79893 +
79894 +#define CTXT_WAITING_COMMAND                   (1 << 13)               /* swgelan waiting on command port */
79895 +#define CTXT_COMMAND_MAPPED_MAIN               (1 << 14)               /* segelan has mapped command port */
79896 +
79897 +#define CTXT_QUEUES_EMPTY                      (1 << 15)               /* dma/thread run queues are empty */
79898 +#define CTXT_QUEUES_EMPTYING                   (1 << 16)               /* dma/thread run queues are being emptied */
79899 +
79900 +#define CTXT_USER_FILTERING                    (1 << 17)               /* user requested context filter */
79901 +
79902 +#define CTXT_KERNEL                            (1 << 18)               /* context is a kernel context */
79903 +#define CTXT_COMMAND_MAPPED_ELAN               (1 << 19)               /* command port is mapped for elan */
79904 +#define CTXT_FIXUP_NETERR                      (1 << 20)               /* fixing up a network error */
79905 +
79906 +
79907 +#define CTXT_SWAPPED_REASONS           (CTXT_NO_LWPS   |               \
79908 +                                        CTXT_DETACHED  |               \
79909 +                                        CTXT_EXITING   |               \
79910 +                                        CTXT_FIXUP_NETERR)
79911 +
79912 +#define CTXT_OTHERS_REASONS            (CTXT_EVENT_QUEUE_FULL  |       \
79913 +                                        CTXT_DMA_QUEUE_FULL    |       \
79914 +                                        CTXT_THREAD_QUEUE_FULL |       \
79915 +                                        CTXT_COMMAND_OVERFLOW_ERROR |  \
79916 +                                        CTXT_SWAPPED_REASONS)
79917 +
79918 +#define CTXT_INPUTTER_REASONS          (CTXT_USER_FILTERING |          \
79919 +                                        CTXT_OTHERS_REASONS)
79920 +
79921 +#define CTXT_COMMAND_MAPPED            (CTXT_COMMAND_MAPPED_MAIN |     \
79922 +                                        CTXT_COMMAND_MAPPED_ELAN)
79923 +
79924 +#define CTXT_IS_KERNEL(ctxt)           ((ctxt)->Status & CTXT_KERNEL)
79925 +
79926 +/*
79927 + * State values for ctxt_inputterState/ctxt_commandportStats
79928 + */
79929 +#define CTXT_STATE_OK                  0
79930 +#define CTXT_STATE_TRAPPED             1               /* Inputter channel 0 trapped */
79931 +#define CTXT_STATE_RESOLVING           2               /* An LWP is resolving the trap */
79932 +#define CTXT_STATE_NEEDS_RESTART       3               /* Th trapped packet needs to be executed */
79933 +#define CTXT_STATE_NETWORK_ERROR       4               /* We're waiting on an RPC for the identify transaction */
79934 +#define CTXT_STATE_EXECUTING           5               /* An LWP is executing the trapped packet */
79935 +
79936 +/*
79937 + * State values for OthersState.
79938 + */
79939 +#define CTXT_OTHERS_RUNNING            0
79940 +#define CTXT_OTHERS_HALTING            1
79941 +#define CTXT_OTHERS_SWAPPING           2
79942 +#define CTXT_OTHERS_HALTING_MORE       3
79943 +#define CTXT_OTHERS_SWAPPING_MORE      4
79944 +#define CTXT_OTHERS_SWAPPED            5
79945 +
79946 +typedef struct elan3_ops
79947 +{
79948 +    u_int  Version;
79949 +
79950 +    int         (*Exception)   (ELAN3_CTXT *ctxt, int type, int proc, void *trap, va_list ap);
79951 +
79952 +    /* swap item list functions */
79953 +    int  (*GetWordItem)                (ELAN3_CTXT *ctxt, int list, void **itemp, E3_uint32 *valuep);
79954 +    int  (*GetBlockItem)       (ELAN3_CTXT *ctxt, int list, void **itemp, E3_Addr *valuep);
79955 +    void (*PutWordItem)                (ELAN3_CTXT *ctxt, int list, E3_Addr value);
79956 +    void (*PutBlockItem)       (ELAN3_CTXT *ctxt, int list, E3_uint32 *ptr);
79957 +    void (*PutbackItem)                (ELAN3_CTXT *ctxt, int list, void *item);
79958 +    void (*FreeWordItem)       (ELAN3_CTXT *ctxt, void *item);
79959 +    void (*FreeBlockItem)      (ELAN3_CTXT *ctxt, void *item);
79960 +    int  (*CountItems)         (ELAN3_CTXT *ctxt, int list);
79961 +
79962 +    /* event interrupt cookie */
79963 +    int  (*Event)              (ELAN3_CTXT *ctxt, E3_uint32 cookie, int flag);
79964 +
79965 +    /* swapin/swapout functions. */
79966 +    void (*Swapin)             (ELAN3_CTXT *ctxt);
79967 +    void (*Swapout)            (ELAN3_CTXT *ctxt);
79968 +
79969 +    /* Free of private data */
79970 +    void (*FreePrivate)                (ELAN3_CTXT *ctxt);
79971 +
79972 +    /* Fixup a network error */
79973 +    int  (*FixupNetworkError)  (ELAN3_CTXT *ctxt, NETERR_FIXUP *nef);
79974 +
79975 +    /* Interrupt handler trap interface */
79976 +    int  (*DProcTrap)          (ELAN3_CTXT *ctxt, DMA_TRAP *trap);
79977 +    int  (*TProcTrap)          (ELAN3_CTXT *ctxt, THREAD_TRAP *trap);
79978 +    int         (*IProcTrap)           (ELAN3_CTXT *ctxt, INPUT_TRAP *trap, int chan);
79979 +    int         (*CProcTrap)           (ELAN3_CTXT *ctxt, COMMAND_TRAP *trap);
79980 +    int  (*CProcReissue)        (ELAN3_CTXT *ctxt, CProcTrapBuf_BE *TrapBuf);
79981 +
79982 +    /* User memory access functions */
79983 +    int              (*StartFaultCheck)(ELAN3_CTXT *ctxt);
79984 +    void      (*EndFaultCheck)  (ELAN3_CTXT *ctxt);
79985 +
79986 +    E3_uint8  (*Load8)         (ELAN3_CTXT *ctxt, E3_Addr addr);
79987 +    void      (*Store8)                (ELAN3_CTXT *ctxt, E3_Addr addr, E3_uint8 val);
79988 +    E3_uint16 (*Load16)                (ELAN3_CTXT *ctxt, E3_Addr addr);
79989 +    void      (*Store16)       (ELAN3_CTXT *ctxt, E3_Addr addr, E3_uint16 val);
79990 +    E3_uint32 (*Load32)                (ELAN3_CTXT *ctxt, E3_Addr addr);
79991 +    void      (*Store32)       (ELAN3_CTXT *ctxt, E3_Addr addr, E3_uint32 val);
79992 +    E3_uint64 (*Load64)                (ELAN3_CTXT *ctxt, E3_Addr addr);
79993 +    void      (*Store64)       (ELAN3_CTXT *ctxt, E3_Addr addr, E3_uint64 val);
79994 +
79995 +} ELAN3_OPS;
79996 +
79997 +#define ELAN3_OPS_VERSION      0xdeef0001
79998 +
79999 +/*
80000 + * Flags for ops_event.
80001 + */
80002 +#define OP_INTR                        0               /* Called from interrupt handler */
80003 +#define OP_LWP                 1               /* Called from "lwp" */
80004 +
80005 +/*
80006 + * Return codes for "ops" functions.
80007 + */
80008 +#define OP_DEFER               0               /* Defer to next lower interrupt */
80009 +#define OP_IGNORE              1               /* No event hander, so ignore it */
80010 +#define OP_HANDLED             2               /* Handled event (resume thread) */
80011 +#define OP_FAILED              3               /* Failed */
80012 +
80013 +#define ELAN3_CALL_OP(ctxt,fn)                         ((ctxt)->Operations && (ctxt)->Operations->fn) ? (ctxt)->Operations->fn 
80014 +
80015 +#define ELAN3_OP_EXCEPTION(ctxt,type,proc,trap,ap)     (ELAN3_CALL_OP(ctxt,Exception)    (ctxt,type,proc,trap,ap)      : OP_IGNORE)
80016 +#define ELAN3_OP_GET_WORD_ITEM(ctxt,list,itemp,valuep) (ELAN3_CALL_OP(ctxt,GetWordItem)  (ctxt,list,itemp,valuep)      : 0)
80017 +#define ELAN3_OP_GET_BLOCK_ITEM(ctxt,list,itemp,valuep)        (ELAN3_CALL_OP(ctxt,GetBlockItem) (ctxt,list,itemp,valuep)      : 0)
80018 +#define ELAN3_OP_PUT_WORD_ITEM(ctxt,list,value)                (ELAN3_CALL_OP(ctxt,PutWordItem)  (ctxt,list,value)             : (void)0)
80019 +#define ELAN3_OP_PUT_BLOCK_ITEM(ctxt,list,ptr)         (ELAN3_CALL_OP(ctxt,PutBlockItem) (ctxt,list,ptr)               : (void)0)
80020 +#define ELAN3_OP_PUTBACK_ITEM(ctxt,list,item)          (ELAN3_CALL_OP(ctxt,PutbackItem)  (ctxt,list,item)              : (void)0)
80021 +#define ELAN3_OP_FREE_WORD_ITEM(ctxt,item)             (ELAN3_CALL_OP(ctxt,FreeWordItem) (ctxt,item)                   : (void)0)
80022 +#define ELAN3_OP_FREE_BLOCK_ITEM(ctxt,item)            (ELAN3_CALL_OP(ctxt,FreeBlockItem)(ctxt,item)                   : (void)0)
80023 +#define ELAN3_OP_COUNT_ITEMS(ctxt,list)                        (ELAN3_CALL_OP(ctxt,CountItems)(ctxt,list)                      : 0)
80024 +#define ELAN3_OP_EVENT(ctxt,cookie,flag)               (ELAN3_CALL_OP(ctxt,Event)(ctxt,cookie,flag)                    : OP_IGNORE)
80025 +#define ELAN3_OP_SWAPIN(ctxt)                          (ELAN3_CALL_OP(ctxt,Swapin)(ctxt)                               : (void)0)
80026 +#define ELAN3_OP_SWAPOUT(ctxt)                         (ELAN3_CALL_OP(ctxt,Swapout)(ctxt)                              : (void)0)
80027 +#define ELAN3_OP_FREE_PRIVATE(ctxt)                    (ELAN3_CALL_OP(ctxt,FreePrivate)(ctxt)                          : (void)0)
80028 +#define ELAN3_OP_FIXUP_NETWORK_ERROR(ctxt, nef)                (ELAN3_CALL_OP(ctxt,FixupNetworkError)(ctxt,nef)                        : OP_FAILED)
80029 +
80030 +#define ELAN3_OP_DPROC_TRAP(ctxt, trap)                        (ELAN3_CALL_OP(ctxt,DProcTrap)(ctxt,trap)                       : OP_DEFER)
80031 +#define ELAN3_OP_TPROC_TRAP(ctxt, trap)                        (ELAN3_CALL_OP(ctxt,TProcTrap)(ctxt,trap)                       : OP_DEFER)
80032 +#define ELAN3_OP_IPROC_TRAP(ctxt, trap, chan)          (ELAN3_CALL_OP(ctxt,IProcTrap)(ctxt,trap,chan)                  : OP_DEFER)
80033 +#define ELAN3_OP_CPROC_TRAP(ctxt, trap)                        (ELAN3_CALL_OP(ctxt,CProcTrap)(ctxt,trap)                       : OP_DEFER)
80034 +#define ELAN3_OP_CPROC_REISSUE(ctxt,tbuf)              (ELAN3_CALL_OP(ctxt,CProcReissue)(ctxt, tbuf)                   : OP_DEFER)
80035 +
80036 +#define ELAN3_OP_START_FAULT_CHECK(ctxt)               (ELAN3_CALL_OP(ctxt,StartFaultCheck)(ctxt)                      : 0)
80037 +#define ELAN3_OP_END_FAULT_CHECK(ctxt)                 (ELAN3_CALL_OP(ctxt,EndFaultCheck)(ctxt)                                : (void)0)
80038 +#define ELAN3_OP_LOAD8(ctxt,addr)                      (ELAN3_CALL_OP(ctxt,Load8)(ctxt,addr)                           : 0)
80039 +#define ELAN3_OP_STORE8(ctxt,addr,val)                 (ELAN3_CALL_OP(ctxt,Store8)(ctxt,addr,val)                      : (void)0)
80040 +#define ELAN3_OP_LOAD16(ctxt,addr)                     (ELAN3_CALL_OP(ctxt,Load16)(ctxt,addr)                          : 0)
80041 +#define ELAN3_OP_STORE16(ctxt,addr,val)                        (ELAN3_CALL_OP(ctxt,Store16)(ctxt,addr,val)                     : (void)0)
80042 +#define ELAN3_OP_LOAD32(ctxt,addr)                     (ELAN3_CALL_OP(ctxt,Load32)(ctxt,addr)                          : 0)
80043 +#define ELAN3_OP_STORE32(ctxt,addr,val)                        (ELAN3_CALL_OP(ctxt,Store32)(ctxt,addr,val)                     : (void)0)
80044 +#define ELAN3_OP_LOAD64(ctxt,addr)                     (ELAN3_CALL_OP(ctxt,Load64)(ctxt,addr)                          : 0)
80045 +#define ELAN3_OP_STORE64(ctxt,addr,val)                        (ELAN3_CALL_OP(ctxt,Store64)(ctxt,addr,val)                     : (void)0)
80046 +
80047 +#endif /* __KERNEL__ */
80048 +
80049 +/* "list" arguement to ops functions */
80050 +#define LIST_DMA_PTR           0
80051 +#define LIST_DMA_DESC          1
80052 +#define LIST_THREAD                    2
80053 +#define LIST_COMMAND           3
80054 +#define LIST_SETEVENT          4
80055 +#define LIST_FREE_WORD         5
80056 +#define LIST_FREE_BLOCK                6
80057 +
80058 +#define MAX_LISTS              7
80059 +
80060 +#if defined(__KERNEL__) && MAX_LISTS != NUM_LISTS
80061 +#  error Check NUM_LISTS == MAX_LISTS
80062 +#endif
80063 +
80064 +/*
80065 + * Values for the 'type' field to PostException().
80066 + */
80067 +#define EXCEPTION_INVALID_ADDR         1               /* FaultArea, res */
80068 +#define EXCEPTION_UNIMP_INSTR          2               /* instr */
80069 +#define EXCEPTION_INVALID_PROCESS      3               /* proc, res */
80070 +#define EXCEPTION_SIMULATION_FAILED    4               /* */
80071 +#define EXCEPTION_UNIMPLEMENTED                5               /* */
80072 +#define EXCEPTION_SWAP_FAULT           6               /* */
80073 +#define EXCEPTION_SWAP_FAILED          7               /* */
80074 +#define EXCEPTION_BAD_PACKET           8               /* */
80075 +#define EXCEPTION_FAULTED              9               /* addr */
80076 +#define EXCEPTION_QUEUE_OVERFLOW       10              /* FaultArea, TrapType */
80077 +#define EXCEPTION_COMMAND_OVERFLOW     11              /* count */
80078 +#define EXCEPTION_DMA_RETRY_FAIL       12              /* */
80079 +#define EXCEPTION_CHAINED_EVENT                13              /* EventAddr */
80080 +#define EXCEPTION_THREAD_KILLED                14              /* */
80081 +#define EXCEPTION_CANNOT_SAVE_THREAD   15
80082 +#define EXCEPTION_BAD_SYSCALL          16              /* */
80083 +#define EXCEPTION_DEBUG                        17
80084 +#define EXCEPTION_BAD_EVENT            18              /* */
80085 +#define EXCEPTION_NETWORK_ERROR                19              /* rvp */
80086 +#define EXCEPTION_BUS_ERROR            20
80087 +#define EXCEPTION_COOKIE_ERROR         21
80088 +#define EXCEPTION_PACKET_TIMEOUT       22
80089 +#define EXCEPTION_BAD_DMA              23              /* */
80090 +#define EXCEPTION_ENOMEM               24
80091 +
80092 +/*
80093 + * Values for the 'proc' field to ElanException().
80094 + */
80095 +#define COMMAND_PROC                   1
80096 +#define THREAD_PROC                    2
80097 +#define DMA_PROC                       3
80098 +#define INPUT_PROC                     4
80099 +#define EVENT_PROC                     5
80100 +
80101 +/* Flags to IssueDmaCommand */
80102 +#define ISSUE_COMMAND_FOR_CPROC                1
80103 +#define ISSUE_COMMAND_CANT_WAIT                2
80104 +
80105 +/* Return code from IssueDmaCommand.*/
80106 +#define ISSUE_COMMAND_OK               0
80107 +#define ISSUE_COMMAND_TRAPPED          1
80108 +#define ISSUE_COMMAND_RETRY            2
80109 +#define ISSUE_COMMAND_WAIT             3
80110 +
80111 +#ifdef __KERNEL__
80112 +
80113 +extern ELAN3_CTXT *elan3_alloc(ELAN3_DEV *dev, int kernel);
80114 +extern void       elan3_free      (ELAN3_CTXT *ctxt);
80115 +
80116 +extern int        elan3_attach    (ELAN3_CTXT *ctxt, ELAN_CAPABILITY *cap);
80117 +extern int         elan3_doattach  (ELAN3_CTXT *ctxt, ELAN_CAPABILITY *cap);
80118 +extern void       elan3_detach    (ELAN3_CTXT *ctxt);
80119 +extern void        elan3_dodetach  (ELAN3_CTXT *ctxt);
80120 +
80121 +extern int        elan3_addvp     (ELAN3_CTXT *ctxt, int process, ELAN_CAPABILITY *cap);
80122 +extern int        elan3_removevp  (ELAN3_CTXT *ctxt, int process);
80123 +extern int        elan3_addbcastvp(ELAN3_CTXT *ctxt, int process, int base, int count);
80124 +
80125 +extern int         elan3_process   (ELAN3_CTXT *ctxt);
80126 +
80127 +extern int        elan3_load_route (ELAN3_CTXT *ctxt, int process, E3_uint16 *flits);
80128 +extern int        elan3_check_route(ELAN3_CTXT *ctxt, int process, E3_uint16 *flits, E3_uint32 *routeError);
80129 +
80130 +extern int        elan3_lwp       (ELAN3_CTXT *ctxt);
80131 +
80132 +extern void       elan3_swapin (ELAN3_CTXT *ctxt, int reason);
80133 +extern void       elan3_swapout (ELAN3_CTXT *ctxt, int reason);
80134 +extern int         elan3_pagefault (ELAN3_CTXT *ctxt, E3_FaultSave_BE *FaultSave, int npages);
80135 +extern void        elan3_block_inputter (ELAN3_CTXT *ctxt, int block);
80136 +
80137 +
80138 +extern E3_Addr     elan3_init_thread (ELAN3_DEV *dev, E3_Addr fn, E3_Addr addr, sdramaddr_t stack, int stackSize, int nargs, ...);
80139 +
80140 +extern void       SetInputterState (ELAN3_CTXT *ctxt, E3_uint32 Pend, E3_uint32 *Maskp);
80141 +extern void       SetInputterStateForContext (ELAN3_CTXT *ctxt, E3_uint32 Pend, E3_uint32 *Maskp);
80142 +extern void        UnloadCommandPageMapping (ELAN3_CTXT *ctxt);
80143 +extern void       StartSwapoutContext (ELAN3_CTXT *ctxt, E3_uint32 Pend, E3_uint32 *Maskp);
80144 +
80145 +extern int        HandleExceptions (ELAN3_CTXT *ctxt, unsigned long *flags);
80146 +extern int        RestartContext (ELAN3_CTXT *ctxt, unsigned long *flags);
80147 +extern int         CheckCommandQueueFlushed (ELAN3_CTXT *ctxt, E3_uint32 cflags, int how, unsigned long *flags);
80148 +extern int        IssueCommand (ELAN3_CTXT *ctxt, unsigned cmdoff, E3_Addr value, int flags);
80149 +extern int        IssueDmaCommand (ELAN3_CTXT *ctxt, E3_Addr value, void *item, int flags);
80150 +extern int         WaitForDmaCommand (ELAN3_CTXT *ctxt, void *item, int flags);
80151 +extern void       FixupEventTrap (ELAN3_CTXT *ctxt, int proc, void *trap, E3_uint32 TrapType, 
80152 +                                  E3_FaultSave_BE *FaultSaveArea, int flags);
80153 +extern int        SimulateBlockCopy (ELAN3_CTXT *ctxt, E3_Addr EventAddress);
80154 +extern void       ReissueEvent (ELAN3_CTXT *ctxt, E3_Addr addr,int flags);
80155 +extern int         SetEventsNeedRestart (ELAN3_CTXT *ctxt);
80156 +extern void        RestartSetEvents (ELAN3_CTXT *ctxt);
80157 +extern int        RunEventType (ELAN3_CTXT *ctxt, E3_FaultSave_BE *FaultSaveArea, E3_uint32 EventType);
80158 +extern void        WakeupLwp (ELAN3_DEV *dev, void *arg);
80159 +extern void       QueueEventInterrupt (ELAN3_CTXT *ctxt, E3_uint32 cookie);
80160 +extern int         WaitForCommandPort (ELAN3_CTXT *ctxt);
80161 +
80162 +extern int        ElanException (ELAN3_CTXT *ctxt, int type, int proc, void *trap, ...);
80163 +
80164 +/* context_osdep.c */
80165 +extern int        LoadElanTranslation (ELAN3_CTXT *ctxt, E3_Addr elanAddr, int len, int protFault, int writeable);
80166 +extern void       LoadCommandPortTranslation (ELAN3_CTXT *ctxt);
80167 +
80168 +#if defined(DIGITAL_UNIX)
80169 +/* seg_elan.c */
80170 +extern caddr_t    elan3_segelan3_create (ELAN3_CTXT *ctxt);
80171 +extern void       elan3_segelan3_destroy (ELAN3_CTXT *ctxt);
80172 +extern int         elan3_segelan3_map (ELAN3_CTXT *ctxt);
80173 +extern void        elan3_segelan3_unmap (ELAN3_CTXT *ctxt);
80174 +
80175 +/* seg_elanmem.c */
80176 +extern int        elan3_segelanmem_create (ELAN3_DEV *dev, unsigned object, unsigned off, vm_offset_t *addrp, int len);
80177 +#endif /* defined(DIGITAL_UNIX) */
80178 +
80179 +/* route_table.c */
80180 +extern ELAN3_ROUTE_TABLE *AllocateRouteTable (ELAN3_DEV *dev, int size);
80181 +extern void              FreeRouteTable  (ELAN3_DEV *dev, ELAN3_ROUTE_TABLE *tbl);
80182 +extern int               LoadRoute       (ELAN3_DEV *dev, ELAN3_ROUTE_TABLE *tbl, int vp, int ctxnum, int nflits, E3_uint16 *flits);
80183 +extern int               GetRoute        (ELAN3_DEV *dev, ELAN3_ROUTE_TABLE *tbl, int process, E3_uint16 *flits);
80184 +extern void             InvalidateRoute (ELAN3_DEV *dev, ELAN3_ROUTE_TABLE *tbl, int vp);
80185 +extern void             ValidateRoute   (ELAN3_DEV *dev, ELAN3_ROUTE_TABLE *tbl, int vp);
80186 +extern void             ClearRoute      (ELAN3_DEV *dev, ELAN3_ROUTE_TABLE *tbl, int vp);
80187 +
80188 +extern int               GenerateRoute (ELAN_POSITION *pos, E3_uint16 *flits, int lowid, int highid, int timeout, int highPri);
80189 +extern int               GenerateProbeRoute (E3_uint16 *flits, int nodeid, int level, int *linkup, int *linkdown, int adaptive);
80190 +extern int               GenerateCheckRoute (ELAN_POSITION *pos, E3_uint16 *flits, int level, int adaptive);
80191 +
80192 +/* virtual_process.c */
80193 +extern ELAN_LOCATION  ProcessToLocation     (ELAN3_CTXT *ctxt, ELAN3_VPSEG *seg, int process, ELAN_CAPABILITY *cap);
80194 +extern int           ResolveVirtualProcess (ELAN3_CTXT *ctxt, int process);
80195 +extern caddr_t        CapabilityString      (ELAN_CAPABILITY *cap);
80196 +extern void           UnloadVirtualProcess  (ELAN3_CTXT *ctxt, ELAN_CAPABILITY *cap);
80197 +
80198 +extern int           elan3_get_route   (ELAN3_CTXT *ctxt, int process, E3_uint16 *flits);
80199 +extern int           elan3_reset_route (ELAN3_CTXT *ctxt, int process);
80200 +
80201 +/* cproc.c */
80202 +extern int       NextCProcTrap (ELAN3_CTXT *ctxt, COMMAND_TRAP *trap);
80203 +extern void      ResolveCProcTrap (ELAN3_CTXT *ctxt);
80204 +extern int       RestartCProcTrap (ELAN3_CTXT *ctxt);
80205 +
80206 +/* iproc.c */
80207 +extern void       InspectIProcTrap (ELAN3_CTXT *ctxt, INPUT_TRAP *trap);
80208 +extern void      ResolveIProcTrap (ELAN3_CTXT *ctxt, INPUT_TRAP *trap, NETERR_RESOLVER **rvp);
80209 +extern int       RestartIProcTrap (ELAN3_CTXT *ctxt, INPUT_TRAP *trap);
80210 +extern char      *IProcTrapString (E3_IprocTrapHeader_BE *hdrp, E3_IprocTrapData *datap);
80211 +extern void       SimulateUnlockQueue (ELAN3_CTXT *ctxt, E3_Addr QueuePointer, int SentAck);
80212 +
80213 +/* tproc.c */
80214 +extern int       NextTProcTrap (ELAN3_CTXT *ctxt, THREAD_TRAP *trap);
80215 +extern void      ResolveTProcTrap (ELAN3_CTXT *ctxt, THREAD_TRAP *trap);
80216 +extern int       TProcNeedsRestart (ELAN3_CTXT *ctxt);
80217 +extern void      RestartTProcItems (ELAN3_CTXT *ctxt);
80218 +extern E3_Addr    SaveThreadToStack (ELAN3_CTXT *ctxt, THREAD_TRAP *trap, int SkipInstruction);
80219 +extern void       ReissueStackPointer (ELAN3_CTXT *ctxt, E3_Addr StackPointer);
80220 +
80221 +/* tprocinsts.c */
80222 +extern int        RollThreadToClose (ELAN3_CTXT *ctxt, THREAD_TRAP *trap, E3_uint32 PAckVal);
80223 +
80224 +/* tproc_osdep.c */
80225 +extern int        ThreadSyscall (ELAN3_CTXT *ctxt, THREAD_TRAP *trap, int *skip);
80226 +extern int       ThreadElancall (ELAN3_CTXT *ctxt, THREAD_TRAP *trap, int *skip);
80227 +
80228 +/* dproc.c */
80229 +extern int       NextDProcTrap (ELAN3_CTXT *ctxt, DMA_TRAP *trap);
80230 +extern void      ResolveDProcTrap (ELAN3_CTXT *ctxt, DMA_TRAP *trap);
80231 +extern int       DProcNeedsRestart (ELAN3_CTXT *ctxt);
80232 +extern void      RestartDProcItems (ELAN3_CTXT *ctxt);
80233 +extern void       RestartDmaDesc (ELAN3_CTXT *ctxt, E3_DMA_BE *desc);
80234 +extern void       RestartDmaTrap (ELAN3_CTXT *ctxt, DMA_TRAP *trap);
80235 +extern void      RestartDmaPtr (ELAN3_CTXT *ctxt, E3_Addr ptr);
80236 +
80237 +/* network_error.c */
80238 +extern void       InitialiseNetworkErrorResolver (void);
80239 +extern void       FinaliseNetworkErrorResolver (void);
80240 +extern int        QueueNetworkErrorResolver (ELAN3_CTXT *ctxt, INPUT_TRAP *trap, NETERR_RESOLVER **rvpp);
80241 +extern void      FreeNetworkErrorResolver (NETERR_RESOLVER *rvp);
80242 +extern void       CancelNetworkErrorResolver (NETERR_RESOLVER *rvp);
80243 +extern int       ExecuteNetworkErrorFixup (NETERR_MSG *msg);
80244 +extern void      CompleteNetworkErrorFixup (ELAN3_CTXT *ctxt, NETERR_FIXUP *nef, int status);
80245 +
80246 +extern int        AddNeterrServerSyscall (int elanId, void *configp, void *addrp, char *namep);
80247 +
80248 +/* eventcookie.c */
80249 +extern void                cookie_init(void);
80250 +extern void                cookie_fini(void);
80251 +extern EVENT_COOKIE_TABLE *cookie_alloc_table (unsigned long task, unsigned long handle);
80252 +extern void                cookie_free_table (EVENT_COOKIE_TABLE *tbl);
80253 +extern int                 cookie_alloc_cookie (EVENT_COOKIE_TABLE *tbl, EVENT_COOKIE cookie);
80254 +extern int                 cookie_free_cookie (EVENT_COOKIE_TABLE *tbl, EVENT_COOKIE cookie);
80255 +extern int                 cookie_fire_cookie (EVENT_COOKIE_TABLE *tbl, EVENT_COOKIE cookie);
80256 +extern int                 cookie_wait_cookie (EVENT_COOKIE_TABLE *tbl, EVENT_COOKIE cookie);
80257 +extern int                 cookie_arm_cookie (EVENT_COOKIE_TABLE *tbl, EVENT_COOKIE cookie);
80258 +
80259 +/* routecheck.c */
80260 +extern int elan3_route_check          (ELAN3_CTXT *ctxt, E3_uint16 *flits, int destNode);
80261 +extern int elan3_route_broadcast_check(ELAN3_CTXT *ctxt, E3_uint16 *flitsA, int lowNode, int highNode);
80262 +
80263 +
80264 +#endif /* __KERNEL__ */
80265 +
80266 +#ifdef __cplusplus
80267 +}
80268 +#endif
80269 +
80270 +#endif /* _ELAN3_ELANCTXT_H */
80271 +
80272 +/*
80273 + * Local variables:
80274 + * c-file-style: "stroustrup"
80275 + * End:
80276 + */
80277 Index: linux-2.4.21/include/elan3/elandebug.h
80278 ===================================================================
80279 --- linux-2.4.21.orig/include/elan3/elandebug.h 2004-02-23 16:02:56.000000000 -0500
80280 +++ linux-2.4.21/include/elan3/elandebug.h      2005-06-01 23:12:54.720420712 -0400
80281 @@ -0,0 +1,106 @@
80282 +/*
80283 + *    Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd.
80284 + *
80285 + *    For licensing information please see the supplied COPYING file
80286 + *
80287 + */
80288 +
80289 +#ifndef _ELAN3_ELANDEBUG_H
80290 +#define _ELAN3_ELANDEBUG_H
80291 +
80292 +#ident "$Id: elandebug.h,v 1.38 2003/09/24 13:57:24 david Exp $"
80293 +/*      $Source: /cvs/master/quadrics/elan3mod/elan3/elan3/elandebug.h,v $ */
80294 +
80295 +#if defined(__KERNEL__)
80296 +
80297 +extern u_int elan3_debug;
80298 +extern u_int elan3_debug_console;
80299 +extern u_int elan3_debug_buffer;
80300 +extern u_int elan3_debug_ignore_dev;
80301 +extern u_int elan3_debug_ignore_kcomm;
80302 +extern u_int elan3_debug_ignore_ctxt;
80303 +extern u_int elan3_debug_display_ctxt;
80304 +
80305 +#define DBG_CONFIG     0x00000001                      /* Module configuration */
80306 +#define DBG_HAT                0x00000002
80307 +#define DBG_FN         0x00000004
80308 +#define DBG_SEG                0x00000008
80309 +#define DBG_INTR       0x00000010
80310 +#define DBG_LWP                0x00000020
80311 +#define DBG_FAULT      0x00000040
80312 +#define DBG_EVENT      0x00000080
80313 +#define DBG_CPROC      0x00000100
80314 +#define DBG_TPROC      0x00000200
80315 +#define DBG_DPROC      0x00000400
80316 +#define DBG_IPROC      0x00000800
80317 +#define DBG_SWAP       0x00001000
80318 +#define DBG_CMD                0x00002000
80319 +#define DBG_VP         0x00004000
80320 +#define DBG_SYSCALL    0x00008000
80321 +#define DBG_BSCAN      0x00010000
80322 +#define DBG_LINKERR    0x00020000
80323 +#define DBG_NETERR     0x00040000
80324 +#define DBG_NETRPC     0x00080000
80325 +#define DBG_EVENTCOOKIE 0x00100000
80326 +#define DBG_SDRAM      0x00200000
80327 +
80328 +#define DBG_EP         0x10000000
80329 +#define DBG_EPCONSOLE  0x20000000
80330 +
80331 +#define DBG_EIP                0x40000000
80332 +#define DBG_EIPFAIL    0x80000000
80333 +
80334 +#define DBG_ALL                0xffffffff
80335 +
80336 +/* values to pass as "ctxt" rather than a "ctxt" pointer */
80337 +#define DBG_DEVICE     ((void *) 0)
80338 +#define DBG_KCOMM      ((void *) 1)
80339 +#define DBG_ICS                ((void *) 2)
80340 +#define DBG_USER       ((void *) 3)
80341 +#define DBG_NTYPES     64
80342 +
80343 +#if defined(DEBUG_PRINTF)
80344 +#  define DBG(m,fn)                            ((elan3_debug&(m)) ? (void)(fn) : (void)0)
80345 +#  define PRINTF0(ctxt,m,fmt)                  ((elan3_debug&(m)) ? elan3_debugf(ctxt,m,fmt)             : (void)0)
80346 +#  define PRINTF1(ctxt,m,fmt,a)                        ((elan3_debug&(m)) ? elan3_debugf(ctxt,m,fmt,a)           : (void)0)
80347 +#  define PRINTF2(ctxt,m,fmt,a,b)              ((elan3_debug&(m)) ? elan3_debugf(ctxt,m,fmt,a,b)         : (void)0)
80348 +#  define PRINTF3(ctxt,m,fmt,a,b,c)            ((elan3_debug&(m)) ? elan3_debugf(ctxt,m,fmt,a,b,c)       : (void)0)
80349 +#  define PRINTF4(ctxt,m,fmt,a,b,c,d)          ((elan3_debug&(m)) ? elan3_debugf(ctxt,m,fmt,a,b,c,d)     : (void)0)
80350 +#  define PRINTF5(ctxt,m,fmt,a,b,c,d,e)                ((elan3_debug&(m)) ? elan3_debugf(ctxt,m,fmt,a,b,c,d,e)   : (void)0)
80351 +#  define PRINTF6(ctxt,m,fmt,a,b,c,d,e,f)      ((elan3_debug&(m)) ? elan3_debugf(ctxt,m,fmt,a,b,c,d,e,f) : (void)0)
80352 +#ifdef __GNUC__
80353 +#  define PRINTF(ctxt,m,args...)               ((elan3_debug&(m)) ? elan3_debugf(ctxt,m, ##args)         : (void)0)
80354 +#endif
80355 +
80356 +#else
80357 +
80358 +#  define DBG(m, fn)                           do { ; } while (0)
80359 +#  define PRINTF0(ctxt,m,fmt)                  do { ; } while (0)
80360 +#  define PRINTF1(ctxt,m,fmt,a)                        do { ; } while (0)
80361 +#  define PRINTF2(ctxt,m,fmt,a,b)              do { ; } while (0)
80362 +#  define PRINTF3(ctxt,m,fmt,a,b,c)            do { ; } while (0)
80363 +#  define PRINTF4(ctxt,m,fmt,a,b,c,d)          do { ; } while (0)
80364 +#  define PRINTF5(ctxt,m,fmt,a,b,c,d,e)                do { ; } while (0)
80365 +#  define PRINTF6(ctxt,m,fmt,a,b,c,d,e,f)      do { ; } while (0)
80366 +#ifdef __GNUC__
80367 +#  define PRINTF(ctxt,m,args...)               do { ; } while (0)
80368 +#endif
80369 +
80370 +#endif /* DEBUG_PRINTF */
80371 +
80372 +#ifdef __GNUC__
80373 +extern void       elan3_debugf (void *ctxt, unsigned int mode, char *fmt, ...)
80374 +    __attribute__ ((format (printf,3,4)));
80375 +#else
80376 +extern void       elan3_debugf (void *ctxt, unsigned int mode, char *fmt, ...);
80377 +#endif
80378 +
80379 +
80380 +#endif /* __KERNEL__ */
80381 +#endif /* _ELAN3_ELANDEBUG_H */
80382 +
80383 +/*
80384 + * Local variables:
80385 + * c-file-style: "stroustrup"
80386 + * End:
80387 + */
80388 Index: linux-2.4.21/include/elan3/elandev.h
80389 ===================================================================
80390 --- linux-2.4.21.orig/include/elan3/elandev.h   2004-02-23 16:02:56.000000000 -0500
80391 +++ linux-2.4.21/include/elan3/elandev.h        2005-06-01 23:12:54.721420560 -0400
80392 @@ -0,0 +1,581 @@
80393 +/*
80394 + *    Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd.
80395 + *
80396 + *    For licensing information please see the supplied COPYING file
80397 + *
80398 + */
80399 +
80400 +#ifndef __ELAN3_ELANDEV_H
80401 +#define __ELAN3_ELANDEV_H
80402 +
80403 +#ident "$Id: elandev.h,v 1.74.2.2 2004/12/10 11:10:19 mike Exp $"
80404 +/*      $Source: /cvs/master/quadrics/elan3mod/elan3/elan3/elandev.h,v $ */
80405 +
80406 +#include <elan/bitmap.h>
80407 +#include <elan/devinfo.h>
80408 +#include <elan/stats.h>
80409 +
80410 +#if defined(DIGITAL_UNIX)
80411 +#  include <elan3/elandev_dunix.h>
80412 +#elif defined(LINUX)
80413 +#  include <elan3/elandev_linux.h>
80414 +#elif defined(SOLARIS)
80415 +#  include <elan3/elandev_solaris.h>
80416 +#endif
80417 +
80418 +#ifndef TRUE
80419 +#  define TRUE 1
80420 +#endif
80421 +#ifndef FALSE
80422 +#  define FALSE 0
80423 +#endif
80424 +
80425 +/*
80426 + * Elan base address registers defined as follows :
80427 + */
80428 +#define ELAN3_BAR_SDRAM                0
80429 +#define ELAN3_BAR_COMMAND_PORT 1
80430 +#define ELAN3_BAR_REGISTERS    2
80431 +#define ELAN3_BAR_EBUS         3
80432 +
80433 +/* Macro to generate 'offset' to mmap "mem" device */
80434 +#define OFF_TO_SPACE(off)      ((off) >> 28)
80435 +#define OFF_TO_OFFSET(off)     ((off) & 0x0FFFFFFF)
80436 +#define GEN_OFF(space,off)     (((space) << 28) | ((off) & 0x0FFFFFFF))
80437 +
80438 +#ifdef __KERNEL__
80439 +
80440 +/*
80441 + * Elan EBUS is configured as follows :
80442 + */
80443 +#define ELAN3_EBUS_ROM_OFFSET          0x000000                /* rom */
80444 +#define ELAN3_EBUS_INTPAL_OFFSET       0x180000                /* interrupt pal (write only) */
80445 +
80446 +#define ELAN3_EBUS_ROM_SIZE            0x100000
80447 +
80448 +/*
80449 + * Elan SDRAM is arranged as follows :
80450 + */
80451 +#define ELAN3_TANDQ_SIZE               0x0020000               /* Trap And Queue Size */
80452 +#define ELAN3_CONTEXT_SIZE             0x0010000               /* Context Table Size */
80453 +#define ELAN3_COMMAND_TRAP_SIZE                0x0010000               /* Command Port Trap Size */
80454 +
80455 +#ifdef MPSAS
80456 +#define ELAN3_LN2_NUM_CONTEXTS 8                               /* Support 256 contexts */
80457 +#else
80458 +#define ELAN3_LN2_NUM_CONTEXTS 12                              /* Support 4096 contexts */
80459 +#endif
80460 +#define ELAN3_NUM_CONTEXTS     (1 << ELAN3_LN2_NUM_CONTEXTS)   /* Entries in context table */
80461 +
80462 +#define ELAN3_SDRAM_NUM_BANKS  4                               /* Elan supports 4 Banks of Sdram */
80463 +#define ELAN3_SDRAM_BANK_SHIFT 26                              /* each of which can be 64 mbytes ? */
80464 +#define ELAN3_SDRAM_BANK_SIZE  (1 << ELAN3_SDRAM_BANK_SHIFT)
80465 +
80466 +#define ELAN3_MAX_CACHE_SIZE   (64 * 1024)                     /* Maximum cache size */
80467 +#define ELAN3_CACHE_SIZE       (64 * 4 * E3_CACHELINE_SIZE)    /* Elan3 has 8K cache */
80468 +
80469 +#ifndef offsetof
80470 +#define offsetof(s, m)         (size_t)(&(((s *)0)->m))
80471 +#endif
80472 +
80473 +/*
80474 + * circular queue and macros to access members.
80475 + */
80476 +typedef struct
80477 +{
80478 +    u_int      q_back;                 /* Next free space */
80479 +    u_int      q_front;                /* First object to remove */
80480 +    u_int      q_size;                 /* Size of queue */
80481 +    u_int      q_count;                /* Current number of entries */
80482 +    u_int      q_slop;                 /* FULL <=> (count+slop) == size */
80483 +} ELAN3_QUEUE;
80484 +
80485 +typedef struct 
80486 +{
80487 +    u_int      q_back;                 /* Next free space */
80488 +    u_int      q_middle;               /* Middle pointer */
80489 +    u_int      q_front;                /* First object to remove */
80490 +    u_int      q_size;                 /* Size of queue */
80491 +    u_int      q_count;                /* Current number of entries */
80492 +    u_int      q_slop;                 /* FULL <=> (count+slop) == size */
80493 +} ELAN3_SPLIT_QUEUE;
80494 +
80495 +#define ELAN3_QUEUE_INIT(q,num,slop)   ((q).q_size = (num), (q).q_slop = (slop)+1, (q).q_front = (q).q_back = 0, (q).q_count = 0)
80496 +#define ELAN3_QUEUE_FULL(q)            ((q).q_count == ((q).q_size - (q).q_slop))
80497 +#define ELAN3_QUEUE_REALLY_FULL(q)     ((q).q_count == (q).q_size - 1)
80498 +#define ELAN3_QUEUE_EMPTY(q)           ((q).q_count == 0)
80499 +#define ELAN3_QUEUE_FRONT_EMPTY(q)     ((q).q_front == (q).q_middle)
80500 +#define ELAN3_QUEUE_BACK_EMPTY(q)      ((q).q_middle == (q).q_back)
80501 +#define ELAN3_QUEUE_ADD(q)             ((q).q_back = ((q).q_back+1) % (q).q_size, (q).q_count++)
80502 +#define ELAN3_QUEUE_REMOVE(q)          ((q).q_front = ((q).q_front+1) % (q).q_size, (q).q_count--)
80503 +#define ELAN3_QUEUE_ADD_FRONT(q)               ((q).q_front = ((q).q_front-1) % (q).q_size, (q).q_count++)
80504 +#define ELAN3_QUEUE_CONSUME(q)         ((q).q_middle = ((q).q_middle+1) % (q).q_size)
80505 +#define ELAN3_QUEUE_FRONT(q,qArea)     (&(qArea)[(q).q_front])
80506 +#define ELAN3_QUEUE_MIDDLE(q,qArea)    (&(qArea)[(q).q_middle])
80507 +#define ELAN3_QUEUE_BACK(q,qArea)      (&(qArea)[(q).q_back])
80508 +
80509 +#define SDRAM_MIN_BLOCK_SHIFT  10
80510 +#define SDRAM_NUM_FREE_LISTS   17                              /* allows max 64Mb block */
80511 +#define SDRAM_MIN_BLOCK_SIZE   (1 << SDRAM_MIN_BLOCK_SHIFT)
80512 +#define SDRAM_MAX_BLOCK_SIZE   (SDRAM_MIN_BLOCK_SIZE << (SDRAM_NUM_FREE_LISTS-1))
80513 +#define SDRAM_FREELIST_TRIGGER 32
80514 +
80515 +typedef struct elan3_sdram_bank
80516 +{
80517 +    u_int              Size;                                   /* Size of bank of memory */
80518 +
80519 +    ioaddr_t           Mapping;                                /* Where mapped in the kernel */
80520 +    DeviceMappingHandle Handle;                                        /* and mapping handle */
80521 +
80522 +    struct elan3_ptbl_gr **PtblGroups;
80523 +    
80524 +    bitmap_t          *Bitmaps[SDRAM_NUM_FREE_LISTS];
80525 +} ELAN3_SDRAM_BANK;
80526 +
80527 +typedef struct elan3_haltop
80528 +{
80529 +    struct elan3_haltop          *Next;                                /* Chain to next in list. */
80530 +    E3_uint32             Mask;                                /* Interrupt mask to see before calling function */
80531 +    
80532 +    void                (*Function)(void *, void *);           /* Function to call */
80533 +    void                 *Arguement;                           /* Arguement to pass to function */
80534 +} ELAN3_HALTOP;
80535 +
80536 +#define HALTOP_BATCH   32
80537 +
80538 +#endif /* __KERNEL__ */
80539 +
80540 +typedef struct elan3_stats
80541 +{
80542 +    u_long     Version;                                        /* version field */
80543 +    u_long     Interrupts;                                     /* count of elan interrupts */
80544 +    u_long     TlbFlushes;                                     /* count of tlb flushes */
80545 +    u_long     InvalidContext;                                 /* count of traps with invalid context */
80546 +    u_long     ComQueueHalfFull;                               /* count of interrupts due to com queue being half full */
80547 +
80548 +    u_long     CProcTraps;                                     /* count of cproc traps */
80549 +    u_long     DProcTraps;                                     /* count of dproc traps */
80550 +    u_long     TProcTraps;                                     /* cound of tproc traps */
80551 +    u_long     IProcTraps;                                     /* count of iproc traps */
80552 +    u_long     EventInterrupts;                                /* count of event interrupts */
80553 +
80554 +    u_long     PageFaults;                                     /* count of elan page faults */
80555 +
80556 +    /* inputter related */
80557 +    u_long     EopBadAcks;                                     /* count of EOP_BAD_ACKs */
80558 +    u_long     EopResets;                                      /* count of EOP_ERROR_RESET */
80559 +    u_long      InputterBadLength;                             /* count of BadLength */
80560 +    u_long      InputterCRCDiscards;                           /* count of CRC_STATUS_DISCARD */
80561 +    u_long      InputterCRCErrors;                             /* count of CRC_STATUS_ERROR */
80562 +    u_long      InputterCRCBad;                                        /* count of CRC_STATUS_BAD */
80563 +    u_long     DmaNetworkErrors;                               /* count of errors in dma data */
80564 +    u_long     DmaIdentifyNetworkErrors;                       /* count of errors after dma identify */
80565 +    u_long     ThreadIdentifyNetworkErrors;                    /* count of errors after thread identify */
80566 +
80567 +    /* dma related */
80568 +    u_long     DmaRetries;                                     /* count of dma retries (due to retry fail count) */    
80569 +    u_long     DmaOutputTimeouts;                              /* count of dma output timeouts */
80570 +    u_long     DmaPacketAckErrors;                             /* count of dma packet ack errors */
80571 +
80572 +    /* thread related */
80573 +    u_long     ForcedTProcTraps;                               /* count of forced tproc traps */
80574 +    u_long     TrapForTooManyInsts;                            /* count of too many instruction traps */
80575 +    u_long     ThreadOutputTimeouts;                           /* count of thread output timeouts */
80576 +    u_long       ThreadPacketAckErrors;                                /* count of thread packet ack errors */
80577 +
80578 +    /* link related */
80579 +    u_long     LockError;                                      /* count of RegPtr->Exts.LinkErrorTypes:LS_LockError */
80580 +    u_long     DeskewError;                                    /* count of RegPtr->Exts.LinkErrorTypes:LS_DeskewError */
80581 +    u_long     PhaseError;                                     /* count of RegPtr->Exts.LinkErrorTypes:LS_PhaseError */
80582 +    u_long     DataError;                                      /* count of RegPtr->Exts.LinkErrorTypes:LS_DataError */
80583 +    u_long     FifoOvFlow0;                                    /* count of RegPtr->Exts.LinkErrorTypes:LS_FifoOvFlow0 */
80584 +    u_long     FifoOvFlow1;                                    /* count of RegPtr->Exts.LinkErrorTypes:LS_FifoOvFlow1 */
80585 +    u_long     LinkErrorValue;                                 /* link error value on data error */
80586 +
80587 +    /* memory related */
80588 +    u_long     CorrectableErrors;                              /* count of correctable ecc errors */
80589 +    u_long     UncorrectableErrors;                            /* count of uncorrectable ecc errors */
80590 +    u_long       MultipleErrors;                                       /* count of multiple ecc errors */
80591 +    u_long     SdramBytesFree;                                 /* count of sdram bytes free */
80592 +    
80593 +    /* Interrupt related */
80594 +    u_long     LongestInterrupt;                               /* length of longest interrupt in ticks */
80595 +
80596 +    u_long     EventPunts;                                     /* count of punts of event interrupts to thread */
80597 +    u_long     EventRescheds;                                  /* count of reschedules of event interrupt thread */
80598 +} ELAN3_STATS;
80599 +
80600 +#define ELAN3_STATS_VERSION    (ulong)2
80601 +#define ELAN3_NUM_STATS                (sizeof (ELAN3_STATS)/sizeof (u_long))
80602 +
80603 +#define ELAN3_STATS_DEV_FMT   "elan3_stats_dev_%d"
80604 +
80605 +#ifdef __KERNEL__
80606 +
80607 +#define BumpStat(dev,stat)     ((dev)->Stats.stat++)
80608 +
80609 +typedef struct elan3_level_ptbl_block
80610 +{
80611 +    spinlock_t             PtblLock;                           /* Page table freelist lock */
80612 +    int                            PtblTotal;                          /* Count of level N page tables allocated */
80613 +    int                            PtblFreeCount;                      /* Count of free level N page tables */
80614 +    struct elan3_ptbl     *PtblFreeList;                       /* Free level N page tables */
80615 +    struct elan3_ptbl_gr          *PtblGroupList;                      /* List of Groups of level N page tables */
80616 +} ELAN3_LEVEL_PTBL_BLOCK;
80617
80618 +typedef struct elan3_dev
80619 +{
80620 +    ELAN3_DEV_OSDEP        Osdep;                              /* OS specific entries */
80621 +    int                            Instance;                           /* Device number */
80622 +    ELAN_DEVINFO            Devinfo;                           
80623 +    ELAN_POSITION          Position;                           /* position in switch network (for user code) */
80624 +    ELAN_DEV_IDX           DeviceIdx;                          /* device index registered with elanmod */
80625 +
80626 +    int                            ThreadsShouldStop;                  /* flag that kernel threads should stop */
80627 +
80628 +    spinlock_t             IntrLock;
80629 +    spinlock_t             TlbLock;
80630 +    spinlock_t             CProcLock;
80631 +    kcondvar_t             IntrWait;                           /* place event interrupt thread sleeps */
80632 +    unsigned               EventInterruptThreadStarted:1;      /* event interrupt thread started */
80633 +    unsigned               EventInterruptThreadStopped:1;      /* event interrupt thread stopped */
80634 +    
80635 +    DeviceMappingHandle            RegHandle;                          /* DDI Handle */
80636 +    ioaddr_t               RegPtr;                             /* Elan Registers */
80637 +
80638 +    volatile E3_uint32     InterruptMask;                      /* copy of RegPtr->InterruptMask */
80639 +    volatile E3_uint32     Event_Int_Queue_FPtr;               /* copy of RegPtr->Event_Int_Queue_FPtr */
80640 +    volatile E3_uint32      SchCntReg;                         /* copy of RegPtr->SchCntReg */
80641 +    volatile E3_uint32      Cache_Control_Reg;                 /* true value for RegPtr->Cache_Control_Reg */
80642 +    
80643 +    ELAN3_SDRAM_BANK       SdramBanks[ELAN3_SDRAM_NUM_BANKS];  /* Elan sdram banks */
80644 +    spinlock_t             SdramLock;                          /* Sdram allocator */
80645 +    sdramaddr_t                    SdramFreeLists[SDRAM_NUM_FREE_LISTS];
80646 +    unsigned               SdramFreeCounts[SDRAM_NUM_FREE_LISTS];
80647 +               
80648 +    sdramaddr_t                    TAndQBase;                          /* Trap and Queue area */
80649 +    sdramaddr_t                    ContextTable;                       /* Elan Context Table */
80650 +    u_int                  ContextTableSize;                   /* # entries in context table */
80651 +
80652 +    struct elan3_ctxt      **CtxtTable;                         /* array of ctxt pointers or nulls */
80653 +
80654 +    sdramaddr_t                    CommandPortTraps[2];                /* Command port trap overflow */
80655 +    int                            CurrentCommandPortTrap;             /* Which overflow queue we're using */
80656 +    
80657 +    u_int                  HaltAllCount;                       /* Count of reasons to halt context 0 queues */
80658 +    u_int                  HaltNonContext0Count;               /* Count of reasons to halt non-context 0 queues */
80659 +    u_int                  HaltDmaDequeueCount;                /* Count of reasons to halt dma from dequeuing */
80660 +    u_int                  HaltThreadCount;                    /* Count of reasons to halt the thread processor */
80661 +    u_int                  FlushCommandCount;                  /* Count of reasons to flush command queues */
80662 +    u_int                  DiscardAllCount;                    /* Count of reasons to discard context 0 */
80663 +    u_int                  DiscardNonContext0Count;            /* Count of reasons to discard non context 0 */
80664 +
80665 +    struct thread_trap    *ThreadTrap;                         /* Thread Processor trap space */
80666 +    struct dma_trap       *DmaTrap;                            /* DMA Processor trap space */
80667 +
80668 +    spinlock_t             FreeHaltLock;                       /* Lock for haltop free list */
80669 +    ELAN3_HALTOP                  *FreeHaltOperations;                 /* Free list of haltops */
80670 +    u_int                  NumHaltOperations;                  /* Number of haltops allocated */
80671 +    u_int                  ReservedHaltOperations;             /* Number of haltops reserved */
80672 +
80673 +    ELAN3_HALTOP                  *HaltOperations;                     /* List of operations to call */
80674 +    ELAN3_HALTOP                 **HaltOperationsTailpp;               /* Pointer to last "next" pointer in list */
80675 +    E3_uint32              HaltOperationsMask;                 /* Or of all bits in list of operations */
80676 +
80677 +    physaddr_t             SdramPhysBase;                      /* Physical address of SDRAM */
80678 +    physaddr_t             SdramPhysMask;                      /* and mask of significant bits */ 
80679 +    
80680 +    physaddr_t             PciPhysBase;                        /* physical address of local PCI segment */
80681 +    physaddr_t             PciPhysMask;                        /* and mask of significant bits */
80682 +
80683 +    long                   ErrorTime;                          /* lbolt at last error (link,ecc etc) */
80684 +    long                   ErrorsPerTick;                      /* count of errors for this tick */
80685 +    timer_fn_t             ErrorTimeoutId;                     /* id of timeout when errors masked out */
80686 +    timer_fn_t             DmaPollTimeoutId;                   /* id of timeout to poll for "bad" dmas */
80687 +    int                            FilterHaltQueued;
80688 +
80689 +    /*
80690 +     * HAT layer specific entries.
80691 +     */
80692 +    ELAN3_LEVEL_PTBL_BLOCK   Level[4];
80693 +    spinlock_t             PtblGroupLock;                      /* Lock for Page Table group lists */
80694 +    struct elan3_ptbl_gr    *Level3PtblGroupHand;              /* Hand for ptbl stealing */
80695 +
80696 +    /*
80697 +     * Per-Context Information structures.
80698 +     */
80699 +    struct elan3_info     *Infos;                              /* List of "infos" for this device */
80700 +
80701 +    char                    LinkShutdown;                       /* link forced into reset by panic/shutdown/dump */
80702 +
80703 +    /*
80704 +     * Device statistics.
80705 +     */
80706 +    ELAN3_STATS                    Stats;
80707 +    ELAN_STATS_IDX          StatsIndex;
80708 +
80709 +    struct {
80710 +       E3_Regs            *RegPtr;
80711 +       char               *Sdram[ELAN3_SDRAM_NUM_BANKS];
80712 +    } PanicState;
80713 +} ELAN3_DEV;
80714 +
80715 +#define ELAN3_DEV_CTX_TABLE(dev,ctxtn) ( (dev)->CtxtTable[ (ctxtn) &  MAX_ROOT_CONTEXT_MASK] )
80716 +
80717 +/* macros for accessing dev->RegPtr.Tags/Sets. */
80718 +#define write_cache_tag(dev,what,val)  writeq (val, dev->RegPtr + offsetof (E3_Regs, Tags.what))
80719 +#define read_cache_tag(dev,what)       readq (dev->RegPtr + offsetof (E3_Regs, Tags.what))
80720 +#define write_cache_set(dev,what,val)  writeq (val, dev->RegPtr + offsetof (E3_Regs, Sets.what))
80721 +#define read_cache_set(dev,what)       readq (dev->RegPtr + offsetof (E3_Regs, Sets.what))
80722 +
80723 +/* macros for accessing dev->RegPtr.Regs. */
80724 +#define write_reg64(dev,what,val)      writeq (val, dev->RegPtr + offsetof (E3_Regs, Regs.what))
80725 +#define write_reg32(dev,what,val)      writel (val, dev->RegPtr + offsetof (E3_Regs, Regs.what))
80726 +#define read_reg64(dev,what)           readq (dev->RegPtr + offsetof (E3_Regs, Regs.what))
80727 +#define read_reg32(dev,what)           readl (dev->RegPtr + offsetof (E3_Regs, Regs.what))
80728 +
80729 +/* macros for accessing dev->RegPtr.uRegs. */
80730 +#define write_ureg64(dev,what,val)     writeq (val, dev->RegPtr + offsetof (E3_Regs, URegs.what))
80731 +#define write_ureg32(dev,what,val)     writel (val, dev->RegPtr + offsetof (E3_Regs, URegs.what))
80732 +#define read_ureg64(dev,what)          readq (dev->RegPtr + offsetof (E3_Regs, URegs.what))
80733 +#define read_ureg32(dev,what)          readl (dev->RegPtr + offsetof (E3_Regs, URegs.what))
80734 +
80735 +/* macros for accessing dma descriptor/thread regs */
80736 +#define copy_dma_regs(dev, desc) \
80737 +MACRO_BEGIN \
80738 +    register int i;  \
80739 +    for (i = 0; i < sizeof (E3_DMA)/sizeof(E3_uint64); i++) \
80740 +       ((E3_uint64 *) desc)[i] = readq (dev->RegPtr + offsetof (E3_Regs, Regs.Dma_Desc) + i*sizeof (E3_uint64)); \
80741 +MACRO_END
80742 +
80743 +#define copy_thread_regs(dev, regs) \
80744 +MACRO_BEGIN \
80745 +    register int i;  \
80746 +    for (i = 0; i < (32*sizeof (E3_uint32))/sizeof(E3_uint64); i++) \
80747 +       ((E3_uint64 *) regs)[i] = readq (dev->RegPtr + offsetof (E3_Regs, Regs.Globals[0]) + i*sizeof (E3_uint64)); \
80748 +MACRO_END
80749 +
80750 +_NOTE(MUTEX_PROTECTS_DATA(elan3_dev::IntrLock, 
80751 +                         _E3_DataBusMap::Exts _E3_DataBusMap::Input_Context_Fil_Flush
80752 +                         elan3_dev::CurrentCommandPortTrap elan3_dev::HaltAllCount elan3_dev::HaltDmaDequeueCount
80753 +                         elan3_dev::FlushCommandCount elan3_dev::DiscardAllCount elan3_dev::DiscardNonContext0Count
80754 +                         elan3_dev::HaltOperations elan3_dev::HaltOperationsMask))
80755 +_NOTE(MUTEX_PROTECTS_DATA(elan3_dev::TlbLock, 
80756 +                         _E3_DataBusMap::Cache_Control_Reg))
80757 +_NOTE(MUTEX_PROTECTS_DATA(elan3_dev::InfoLock, 
80758 +                         elan3_dev::Infos elan3_dev::InfoTable))
80759 +_NOTE(MUTEX_PROTECTS_DATA(elan3_dev::FreeHaltLock, 
80760 +                         elan3_dev::FreeHaltOperations elan3_dev::NumHaltOperations elan3_dev::ReservedHaltOperations))
80761 +_NOTE(MUTEX_PROTECTS_DATA(elan3_dev::PageFreeListLock, 
80762 +                         elan3_dev::PageFreeList elan3_dev::PageFreeListSize))
80763 +_NOTE(MUTEX_PROTECTS_DATA(elan3_dev::Level1PtblLock,
80764 +                         elan3_dev::Level1PtblTotal elan3_dev::Level1PtblFreeCount elan3_dev::Level1PtblFreeList))
80765 +_NOTE(MUTEX_PROTECTS_DATA(elan3_dev::Level2PtblLock,
80766 +                         elan3_dev::Level2PtblTotal elan3_dev::Level2PtblFreeCount elan3_dev::Level2PtblFreeList))
80767 +_NOTE(MUTEX_PROTECTS_DATA(elan3_dev::Level3PtblLock,
80768 +                         elan3_dev::Level3PtblTotal elan3_dev::Level3PtblFreeCount elan3_dev::Level3PtblFreeList))
80769 +_NOTE(MUTEX_PROTECTS_DATA(elan3_dev::PtblGroupLock,
80770 +                         elan3_dev::Level1PtblGroupList elan3_dev::Level2PtblGroupList elan3_dev::Level3PtblGroupList))
80771 +
80772 +_NOTE(DATA_READABLE_WITHOUT_LOCK(elan3_dev::InfoTable elan3_dev::Level1PtblFreeList
80773 +                                elan3_dev::Level2PtblFreeList elan3_dev::Level3PtblFreeList))
80774 +
80775 +_NOTE(LOCK_ORDER(elan3_dev::InfoLock elan3_dev::IntrLock))
80776 +_NOTE(LOCK_ORDER(as::a_lock elan3_dev::InfoLock))
80777 +_NOTE(LOCK_ORDER(as::a_lock elan3_dev::IntrLock))
80778 +
80779 +#define SET_INT_MASK(dev,Mask)         MACRO_BEGIN write_reg32 (dev, Exts.InterruptMask, ((dev)->InterruptMask = (Mask)));  mmiob(); MACRO_END
80780 +#define ENABLE_INT_MASK(dev, bits)     MACRO_BEGIN write_reg32 (dev, Exts.InterruptMask, ((dev->InterruptMask |= (bits)))); mmiob(); MACRO_END
80781 +#define DISABLE_INT_MASK(dev, bits)    MACRO_BEGIN write_reg32 (dev, Exts.InterruptMask, ((dev->InterruptMask &= ~(bits)))); mmiob(); MACRO_END
80782 +
80783 +#define INIT_SCHED_STATUS(dev, val) \
80784 +MACRO_BEGIN \
80785 +       (dev)->SchCntReg = (val); \
80786 +       write_reg32 (dev, Exts.SchCntReg, (dev)->SchCntReg); \
80787 +       mmiob(); \
80788 +MACRO_END
80789 +
80790 +#define SET_SCHED_STATUS(dev, val) \
80791 +MACRO_BEGIN \
80792 +       ASSERT (((val) & HaltStopAndExtTestMask) == (val)); \
80793 +       (dev)->SchCntReg |= (val); \
80794 +       write_reg32 (dev, Exts.SchCntReg, (dev)->SchCntReg); \
80795 +        mmiob (); \
80796 +MACRO_END
80797 +
80798 +#define CLEAR_SCHED_STATUS(dev, val) \
80799 +MACRO_BEGIN \
80800 +       ASSERT (((val) & HaltStopAndExtTestMask) == (val)); \
80801 +       (dev)->SchCntReg &= ~(val); \
80802 +       write_reg32 (dev, Exts.SchCntReg, (dev)->SchCntReg); \
80803 +        mmiob(); \
80804 +MACRO_END
80805 +
80806 +#define MODIFY_SCHED_STATUS(dev, SetBits, ClearBits) \
80807 +MACRO_BEGIN \
80808 +       ASSERT ((((SetBits)|(ClearBits)) & HaltStopAndExtTestMask) == ((SetBits)|(ClearBits))); \
80809 +       (dev)->SchCntReg = (((dev)->SchCntReg | (SetBits)) & ~(ClearBits)); \
80810 +       write_reg32 (dev, Exts.SchCntReg, (dev)->SchCntReg); \
80811 +       mmiob(); \
80812 +MACRO_END
80813 +
80814 +#define PULSE_SCHED_STATUS(dev, RestartBits) \
80815 +MACRO_BEGIN \
80816 +       ASSERT (((RestartBits) & HaltStopAndExtTestMask) == 0); \
80817 +       write_reg32 (dev, Exts.SchCntReg, (dev)->SchCntReg | (RestartBits)); \
80818 +       mmiob(); \
80819 +MACRO_END
80820 +
80821 +#define SET_SCHED_LINK_VALUE(dev, enabled, val) \
80822 +MACRO_BEGIN \
80823 +       (dev)->SchCntReg = (((dev)->SchCntReg & HaltAndStopMask) | ((enabled) ? LinkBoundaryScan : 0) | LinkSetValue(val, 0)); \
80824 +       write_reg32 (dev, Exts.SchCntReg, (dev)->SchCntReg); \
80825 +       mmiob(); \
80826 +MACRO_END
80827 +
80828 +#ifdef DEBUG_ASSERT
80829 +#  define ELAN3_ASSERT(dev, EX)        ((void)((EX) || elan3_assfail(dev, #EX, __FILE__, __LINE__)))
80830 +#else
80831 +#  define ELAN3_ASSERT(dev, EX)
80832 +#endif
80833 +
80834 +/* elandev_generic.c */
80835 +extern int        InitialiseElan (ELAN3_DEV *dev, ioaddr_t CmdPort);
80836 +extern void       FinaliseElan (ELAN3_DEV *dev);
80837 +extern int        InterruptHandler (ELAN3_DEV *dev);
80838 +extern void       PollForDmaHungup (void *arg);
80839 +
80840 +extern int        SetLinkBoundaryScan (ELAN3_DEV *dev);
80841 +extern void       ClearLinkBoundaryScan (ELAN3_DEV *dev);
80842 +extern int        WriteBoundaryScanValue (ELAN3_DEV *dev, int value);
80843 +extern int        ReadBoundaryScanValue(ELAN3_DEV *dev, int link);
80844 +
80845 +extern int        ReadVitalProductData (ELAN3_DEV *dev, int *CasLatency);
80846 +
80847 +extern struct elan3_ptbl_gr *ElanGetPtblGr (ELAN3_DEV *dev, sdramaddr_t offset);
80848 +extern void       ElanSetPtblGr (ELAN3_DEV *dev, sdramaddr_t offset, struct elan3_ptbl_gr *ptg);
80849 +
80850 +extern void       ElanFlushTlb (ELAN3_DEV *dev);
80851 +
80852 +extern void       SetSchedStatusRegister (ELAN3_DEV *dev, E3_uint32 Pend, volatile E3_uint32 *Maskp);
80853 +extern void      FreeHaltOperation (ELAN3_DEV *dev, ELAN3_HALTOP *op);
80854 +extern int       ReserveHaltOperations (ELAN3_DEV *dev, int count, int cansleep);
80855 +extern void      ReleaseHaltOperations (ELAN3_DEV *dev, int count);
80856 +extern void      ProcessHaltOperations (ELAN3_DEV *dev, E3_uint32 Pend);
80857 +extern void      QueueHaltOperation (ELAN3_DEV *dev, E3_uint32 Pend, volatile E3_uint32 *Maskp,
80858 +                                     E3_uint32 ReqMask, void (*Function)(ELAN3_DEV *, void *), void *Arguement);
80859 +
80860 +extern int        ComputePosition (ELAN_POSITION *pos, unsigned NodeId, unsigned NumNodes, unsigned numDownLinksVal);
80861 +
80862 +extern caddr_t   MiToName (int mi);
80863 +extern void      ElanBusError (ELAN3_DEV *dev);
80864 +
80865 +extern void      TriggerLsa (ELAN3_DEV *dev);
80866 +
80867 +extern ELAN3_DEV  *elan3_device (int instance);
80868 +extern int       DeviceRegisterSize (ELAN3_DEV *dev, int rnumber, int *sizep);
80869 +extern int       MapDeviceRegister (ELAN3_DEV *dev, int rnumber, ioaddr_t *addrp, int offset, 
80870 +                                    int len, DeviceMappingHandle *handlep);
80871 +extern void       UnmapDeviceRegister (ELAN3_DEV *dev, DeviceMappingHandle *handlep);
80872 +
80873 +
80874 +/* sdram.c */
80875 +/* sdram accessing functions - define 4 different types for 8,16,32,64 bit accesses */
80876 +extern unsigned char      elan3_sdram_readb (ELAN3_DEV *dev, sdramaddr_t ptr);
80877 +extern unsigned short     elan3_sdram_readw (ELAN3_DEV *dev, sdramaddr_t ptr);
80878 +extern unsigned int       elan3_sdram_readl (ELAN3_DEV *dev, sdramaddr_t ptr);
80879 +extern unsigned long long elan3_sdram_readq (ELAN3_DEV *dev, sdramaddr_t ptr);
80880 +extern void               elan3_sdram_writeb (ELAN3_DEV *dev, sdramaddr_t ptr, unsigned char val);
80881 +extern void               elan3_sdram_writew (ELAN3_DEV *dev, sdramaddr_t ptr, unsigned short val);
80882 +extern void               elan3_sdram_writel (ELAN3_DEV *dev, sdramaddr_t ptr, unsigned int val);
80883 +extern void               elan3_sdram_writeq (ELAN3_DEV *dev, sdramaddr_t ptr, unsigned long long val);
80884 +
80885 +extern void              elan3_sdram_zerob_sdram (ELAN3_DEV *dev, sdramaddr_t ptr, int nbytes);
80886 +extern void              elan3_sdram_zerow_sdram (ELAN3_DEV *dev, sdramaddr_t ptr, int nbytes);
80887 +extern void              elan3_sdram_zerol_sdram (ELAN3_DEV *dev, sdramaddr_t ptr, int nbytes);
80888 +extern void              elan3_sdram_zeroq_sdram (ELAN3_DEV *dev, sdramaddr_t ptr, int nbytes);
80889 +
80890 +extern void               elan3_sdram_copyb_from_sdram (ELAN3_DEV *dev, sdramaddr_t from, void *to, int nbytes);
80891 +extern void               elan3_sdram_copyw_from_sdram (ELAN3_DEV *dev, sdramaddr_t from, void *to, int nbytes);
80892 +extern void               elan3_sdram_copyl_from_sdram (ELAN3_DEV *dev, sdramaddr_t from, void *to, int nbytes);
80893 +extern void               elan3_sdram_copyq_from_sdram (ELAN3_DEV *dev, sdramaddr_t from, void *to, int nbytes);
80894 +extern void               elan3_sdram_copyb_to_sdram (ELAN3_DEV *dev, void *from, sdramaddr_t to, int nbytes);
80895 +extern void               elan3_sdram_copyw_to_sdram (ELAN3_DEV *dev, void *from, sdramaddr_t to, int nbytes);
80896 +extern void               elan3_sdram_copyl_to_sdram (ELAN3_DEV *dev, void *from, sdramaddr_t to, int nbytes);
80897 +extern void               elan3_sdram_copyq_to_sdram (ELAN3_DEV *dev, void *from, sdramaddr_t to, int nbytes);
80898 +
80899 +extern void              elan3_sdram_init (ELAN3_DEV *dev);
80900 +extern void               elan3_sdram_fini (ELAN3_DEV *dev);
80901 +extern void              elan3_sdram_add (ELAN3_DEV *dev, sdramaddr_t base, sdramaddr_t top);
80902 +extern sdramaddr_t        elan3_sdram_alloc (ELAN3_DEV *dev, int nbytes);
80903 +extern void               elan3_sdram_free (ELAN3_DEV *dev, sdramaddr_t ptr, int nbytes);
80904 +extern physaddr_t         elan3_sdram_to_phys (ELAN3_DEV *dev, sdramaddr_t addr);
80905 +
80906 +/* cproc.c */
80907 +extern void      HandleCProcTrap (ELAN3_DEV *dev, E3_uint32 Pend, E3_uint32 *Mask);
80908 +
80909 +/* iproc.c */
80910 +extern void      HandleIProcTrap (ELAN3_DEV *dev, int Channel, E3_uint32 Pend, sdramaddr_t FaultSaveOff, 
80911 +                                  sdramaddr_t TransactionsOff, sdramaddr_t DataOff);
80912 +
80913 +/* tproc.c */
80914 +extern int       HandleTProcTrap (ELAN3_DEV *dev, E3_uint32 *RestartBits);
80915 +extern void      DeliverTProcTrap (ELAN3_DEV *dev, struct thread_trap *threadTrap, E3_uint32 Pend);
80916 +
80917 +/* dproc.c */
80918 +extern int       HandleDProcTrap (ELAN3_DEV *dev, E3_uint32 *RestartBits);
80919 +extern void      DeliverDProcTrap (ELAN3_DEV *dev, struct dma_trap *dmaTrap, E3_uint32 Pend);
80920 +
80921 +#if defined(LINUX)
80922 +/* procfs_linux.h */
80923 +extern struct proc_dir_entry *elan3_procfs_root;
80924 +extern struct proc_dir_entry *elan3_config_root;
80925 +
80926 +extern void elan3_procfs_init(void);
80927 +extern void elan3_procfs_fini(void);
80928 +extern void elan3_procfs_device_init (ELAN3_DEV *dev);
80929 +extern void elan3_procfs_device_fini (ELAN3_DEV *dev);
80930 +#endif /* defined(LINUX) */
80931 +
80932 +/* elan3_osdep.c */
80933 +extern int        BackToBackMaster;
80934 +extern int        BackToBackSlave;
80935 +
80936 +#define ELAN_REG_REC_MAX (100)
80937 +#define ELAN_REG_REC(REG)  {                                         \
80938 +elan_reg_rec_file [elan_reg_rec_index] = __FILE__;                   \
80939 +elan_reg_rec_line [elan_reg_rec_index] = __LINE__;                   \
80940 +elan_reg_rec_reg  [elan_reg_rec_index] = REG;                        \
80941 +elan_reg_rec_cpu  [elan_reg_rec_index] = smp_processor_id();         \
80942 +elan_reg_rec_lbolt[elan_reg_rec_index] = lbolt;                      \
80943 +elan_reg_rec_index = ((elan_reg_rec_index+1) % ELAN_REG_REC_MAX);}
80944 +
80945 +extern char *    elan_reg_rec_file [ELAN_REG_REC_MAX];
80946 +extern int       elan_reg_rec_line [ELAN_REG_REC_MAX];
80947 +extern long      elan_reg_rec_lbolt[ELAN_REG_REC_MAX];
80948 +extern int       elan_reg_rec_cpu  [ELAN_REG_REC_MAX];
80949 +extern E3_uint32 elan_reg_rec_reg  [ELAN_REG_REC_MAX];
80950 +extern int       elan_reg_rec_index;
80951
80952 +#endif /* __KERNEL__ */
80953 +
80954 +
80955 +#define ELAN3_PROCFS_ROOT          "/proc/qsnet/elan3"
80956 +#define ELAN3_PROCFS_VERSION       "/proc/qsnet/elan3/version"
80957 +#define ELAN3_PROCFS_DEBUG         "/proc/qsnet/elan3/config/elandebug"
80958 +#define ELAN3_PROCFS_DEBUG_CONSOLE "/proc/qsnet/elan3/config/elandebug_console"
80959 +#define ELAN3_PROCFS_DEBUG_BUFFER  "/proc/qsnet/elan3/config/elandebug_buffer"
80960 +#define ELAN3_PROCFS_MMU_DEBUG     "/proc/qsnet/elan3/config/elan3mmu_debug"
80961 +#define ELAN3_PROCFS_PUNT_LOOPS    "/proc/qsnet/elan3/config/eventint_punt_loops"
80962 +
80963 +#define ELAN3_PROCFS_DEVICE_STATS_FMT    "/proc/qsnet/elan3/device%d/stats"
80964 +#define ELAN3_PROCFS_DEVICE_POSITION_FMT "/proc/qsnet/elan3/device%d/position"
80965 +#define ELAN3_PROCFS_DEVICE_NODESET_FMT  "/proc/qsnet/elan3/device%d/nodeset"
80966 +
80967 +#endif /* __ELAN3_ELANDEV_H */
80968 +
80969 +/*
80970 + * Local variables:
80971 + * c-file-style: "stroustrup"
80972 + * End:
80973 + */
80974 Index: linux-2.4.21/include/elan3/elandev_linux.h
80975 ===================================================================
80976 --- linux-2.4.21.orig/include/elan3/elandev_linux.h     2004-02-23 16:02:56.000000000 -0500
80977 +++ linux-2.4.21/include/elan3/elandev_linux.h  2005-06-01 23:12:54.721420560 -0400
80978 @@ -0,0 +1,56 @@
80979 +/*
80980 + *    Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd.
80981 + *
80982 + *    For licensing information please see the supplied COPYING file
80983 + *
80984 + */
80985 +
80986 +#ifndef __ELANDEV_LINUX_H
80987 +#define __ELANDEV_LINUX_H
80988 +
80989 +#ident "$Id: elandev_linux.h,v 1.11 2003/09/24 13:57:24 david Exp $"
80990 +/*      $Source: /cvs/master/quadrics/elan3mod/elan3/elan3/elandev_linux.h,v $*/
80991 +
80992 +#ifdef __KERNEL__
80993 +#include <linux/mm.h>
80994 +#include <linux/sched.h>
80995 +#include <linux/pci.h>
80996 +#endif
80997 +
80998 +#define ELAN3_MAJOR              60
80999 +#define ELAN3_NAME               "elan3"
81000 +#define ELAN3_MAX_CONTROLLER     16                      /* limited to 4 bits */
81001
81002 +#define ELAN3_MINOR_DEVNUM(m)    ((m) & 0x0f)            /* card number */
81003 +#define ELAN3_MINOR_DEVFUN(m)    (((m) >> 4) & 0x0f)     /* function */
81004 +#define ELAN3_MINOR_CONTROL      0                       /* function values */
81005 +#define ELAN3_MINOR_MEM          1
81006 +#define ELAN3_MINOR_USER               2
81007
81008 +typedef void                   *DeviceMappingHandle;
81009 +
81010 +/* task and ctxt handle types */
81011 +typedef struct mm_struct       *TaskHandle;
81012 +typedef int                    CtxtHandle;
81013
81014 +#define ELAN3_MY_TASK_HANDLE() (current->mm)
81015 +#define KERNEL_TASK_HANDLE()   (get_kern_mm())
81016
81017 +/*
81018 + * OS-dependent component of ELAN3_DEV struct.
81019 + */
81020 +typedef struct elan3_dev_osdep
81021 +{
81022 +       struct pci_dev  *pci;                   /* PCI config data */
81023 +       int             ControlDeviceOpen;      /* flag to indicate control */
81024 +                                               /*   device open */
81025 +       struct proc_dir_entry *procdir;
81026 +} ELAN3_DEV_OSDEP;
81027 +
81028 +#endif /* __ELANDEV_LINUX_H */
81029 +
81030 +/*
81031 + * Local variables:
81032 + * c-file-style: "stroustrup"
81033 + * End:
81034 + */
81035 Index: linux-2.4.21/include/elan3/elanio.h
81036 ===================================================================
81037 --- linux-2.4.21.orig/include/elan3/elanio.h    2004-02-23 16:02:56.000000000 -0500
81038 +++ linux-2.4.21/include/elan3/elanio.h 2005-06-01 23:12:54.722420408 -0400
81039 @@ -0,0 +1,226 @@
81040 +/*
81041 + *    Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd.
81042 + *
81043 + *    For licensing information please see the supplied COPYING file
81044 + *
81045 + */
81046 +
81047 +#ifndef __ELAN3_ELAN3IO_H
81048 +#define __ELAN3_ELAN3IO_H
81049 +
81050 +#ident "$Id: elanio.h,v 1.19 2003/12/08 15:40:26 mike Exp $"
81051 +/*      $Source: /cvs/master/quadrics/elan3mod/elan3/elan3/elanio.h,v $*/
81052 +
81053 +#define ELAN3IO_CONTROL_PATHNAME       "/dev/elan3/control%d"
81054 +#define ELAN3IO_MEM_PATHNAME   "/dev/elan3/mem%d"
81055 +#define ELAN3IO_USER_PATHNAME  "/dev/elan3/user%d"
81056 +#define ELAN3IO_SDRAM_PATHNAME   "/dev/elan3/sdram%d"
81057 +#define ELAN3IO_MAX_PATHNAMELEN        32
81058 +
81059 +/* ioctls on /dev/elan3/control */
81060 +#define ELAN3IO_CONTROL_BASE           0
81061 +
81062 +#define ELAN3IO_SET_BOUNDARY_SCAN      _IO   ('e', ELAN3IO_CONTROL_BASE + 0)
81063 +#define ELAN3IO_CLEAR_BOUNDARY_SCAN    _IO   ('e', ELAN3IO_CONTROL_BASE + 1)
81064 +#define ELAN3IO_READ_LINKVAL           _IOWR ('e', ELAN3IO_CONTROL_BASE + 2, E3_uint32)
81065 +#define ELAN3IO_WRITE_LINKVAL          _IOWR ('e', ELAN3IO_CONTROL_BASE + 3, E3_uint32)
81066 +
81067 +typedef struct elanio_set_debug_struct
81068 +{
81069 +    char       what[32];
81070 +    u_long     value;
81071 +} ELAN3IO_SET_DEBUG_STRUCT;
81072 +#define ELAN3IO_SET_DEBUG              _IOW  ('e', ELAN3IO_CONTROL_BASE + 4, ELAN3IO_SET_DEBUG_STRUCT)
81073 +
81074 +typedef struct elanio_debug_buffer_struct
81075 +{
81076 +    caddr_t    addr;
81077 +    size_t     len;
81078 +} ELAN3IO_DEBUG_BUFFER_STRUCT;
81079 +#define ELAN3IO_DEBUG_BUFFER           _IOWR ('e', ELAN3IO_CONTROL_BASE + 5, ELAN3IO_DEBUG_BUFFER_STRUCT)
81080 +
81081 +typedef struct elanio_neterr_server_struct
81082 +{
81083 +    u_int              elanid;
81084 +    void              *addr;
81085 +    char              *name;
81086 +} ELAN3IO_NETERR_SERVER_STRUCT;
81087 +#define ELAN3IO_NETERR_SERVER          _IOW  ('e', ELAN3IO_CONTROL_BASE + 6, ELAN3IO_NETERR_SERVER_STRUCT)
81088 +#define ELAN3IO_NETERR_FIXUP           _IOWR ('e', ELAN3IO_CONTROL_BASE + 7, NETERR_MSG)
81089 +
81090 +typedef struct elanio_set_position_struct
81091 +{
81092 +    u_int              device;
81093 +    unsigned short      nodeId;
81094 +    unsigned short      numNodes;
81095 +} ELAN3IO_SET_POSITION_STRUCT;
81096 +#define ELAN3IO_SET_POSITION           _IOW ('e', ELAN3IO_CONTROL_BASE + 8, ELAN3IO_SET_POSITION_STRUCT)
81097 +
81098 +#if defined(LINUX)
81099 +
81100 +/* ioctls on /dev/elan3/sdram */
81101 +#define ELAN3IO_SDRAM_BASE             20
81102 +
81103 +/* ioctls on /dev/elan3/user */
81104 +#define ELAN3IO_USER_BASE              30
81105 +
81106 +#define ELAN3IO_FREE                   _IO  ('e', ELAN3IO_USER_BASE + 0)
81107 +
81108 +#define ELAN3IO_ATTACH                 _IOWR('e', ELAN3IO_USER_BASE + 1, ELAN_CAPABILITY)
81109 +#define ELAN3IO_DETACH                 _IO  ('e', ELAN3IO_USER_BASE + 2)
81110 +
81111 +typedef struct elanio_addvp_struct
81112 +{
81113 +    u_int              process;
81114 +    ELAN_CAPABILITY     capability;
81115 +} ELAN3IO_ADDVP_STRUCT;
81116 +#define ELAN3IO_ADDVP                  _IOWR('e', ELAN3IO_USER_BASE + 3, ELAN3IO_ADDVP_STRUCT)
81117 +#define ELAN3IO_REMOVEVP                       _IOW ('e', ELAN3IO_USER_BASE + 4, int)
81118 +
81119 +typedef struct elanio_bcastvp_struct
81120 +{
81121 +    u_int              process;
81122 +    u_int              lowvp;
81123 +    u_int              highvp;
81124 +} ELAN3IO_BCASTVP_STRUCT;
81125 +#define ELAN3IO_BCASTVP                        _IOW ('e', ELAN3IO_USER_BASE + 5, ELAN3IO_BCASTVP_STRUCT)
81126 +
81127 +typedef struct elanio_loadroute_struct
81128 +{
81129 +    u_int              process;
81130 +    E3_uint16          flits[MAX_FLITS];
81131 +} ELAN3IO_LOAD_ROUTE_STRUCT;
81132 +#define ELAN3IO_LOAD_ROUTE             _IOW ('e', ELAN3IO_USER_BASE + 6, ELAN3IO_LOAD_ROUTE_STRUCT)
81133 +
81134 +#define ELAN3IO_PROCESS                        _IO  ('e', ELAN3IO_USER_BASE + 7)
81135 +
81136 +typedef struct elanio_setperm_struct
81137 +{
81138 +    caddr_t            maddr;
81139 +    E3_Addr            eaddr;
81140 +    size_t             len;
81141 +    int                        perm;
81142 +} ELAN3IO_SETPERM_STRUCT;
81143 +#define ELAN3IO_SETPERM                        _IOW ('e', ELAN3IO_USER_BASE + 8, ELAN3IO_SETPERM_STRUCT)
81144 +
81145 +typedef struct elanio_clearperm_struct
81146 +{
81147 +    E3_Addr            eaddr;
81148 +    size_t             len;
81149 +} ELAN3IO_CLEARPERM_STRUCT;
81150 +#define ELAN3IO_CLEARPERM              _IOW ('e', ELAN3IO_USER_BASE + 9, ELAN3IO_CLEARPERM_STRUCT)
81151 +
81152 +typedef struct elanio_changeperm_struct
81153 +{
81154 +    E3_Addr            eaddr;
81155 +    size_t             len;
81156 +    int                        perm;
81157 +} ELAN3IO_CHANGEPERM_STRUCT;
81158 +#define ELAN3IO_CHANGEPERM             _IOW ('e', ELAN3IO_USER_BASE + 10, ELAN3IO_CHANGEPERM_STRUCT)
81159 +
81160 +
81161 +#define ELAN3IO_HELPER_THREAD          _IO  ('e', ELAN3IO_USER_BASE + 11)
81162 +#define ELAN3IO_WAITCOMMAND            _IO  ('e', ELAN3IO_USER_BASE + 12)
81163 +#define ELAN3IO_BLOCK_INPUTTER         _IOW ('e', ELAN3IO_USER_BASE + 13, int)
81164 +#define ELAN3IO_SET_FLAGS              _IOW ('e', ELAN3IO_USER_BASE + 14, int)
81165 +
81166 +#define ELAN3IO_WAITEVENT              _IOW ('e', ELAN3IO_USER_BASE + 15, E3_Event)
81167 +#define ELAN3IO_ALLOC_EVENTCOOKIE      _IOW ('e', ELAN3IO_USER_BASE + 16, EVENT_COOKIE)
81168 +#define ELAN3IO_FREE_EVENTCOOKIE               _IOW ('e', ELAN3IO_USER_BASE + 17, EVENT_COOKIE)
81169 +#define ELAN3IO_ARM_EVENTCOOKIE                _IOW ('e', ELAN3IO_USER_BASE + 18, EVENT_COOKIE)
81170 +#define ELAN3IO_WAIT_EVENTCOOKIE               _IOW ('e', ELAN3IO_USER_BASE + 19, EVENT_COOKIE)
81171 +
81172 +#define ELAN3IO_SWAPSPACE              _IOW ('e', ELAN3IO_USER_BASE + 20, SYS_SWAP_SPACE)
81173 +#define ELAN3IO_EXCEPTION_SPACE                _IOW ('e', ELAN3IO_USER_BASE + 21, SYS_EXCEPTION_SPACE)
81174 +#define ELAN3IO_GET_EXCEPTION          _IOR ('e', ELAN3IO_USER_BASE + 22, SYS_EXCEPTION)
81175 +
81176 +typedef struct elanio_unload_struct
81177 +{
81178 +    void       *addr;
81179 +    size_t      len;
81180 +} ELAN3IO_UNLOAD_STRUCT;
81181 +#define ELAN3IO_UNLOAD                 _IOW ('e', ELAN3IO_USER_BASE + 23, ELAN3IO_UNLOAD_STRUCT)
81182 +
81183 +
81184 +
81185 +typedef struct elanio_getroute_struct
81186 +{
81187 +    u_int              process;
81188 +    E3_uint16          flits[MAX_FLITS];
81189 +} ELAN3IO_GET_ROUTE_STRUCT;
81190 +#define ELAN3IO_GET_ROUTE              _IOW ('e', ELAN3IO_USER_BASE + 24, ELAN3IO_GET_ROUTE_STRUCT)
81191 +
81192 +typedef struct elanio_resetroute_struct
81193 +{
81194 +    u_int              process;
81195 +} ELAN3IO_RESET_ROUTE_STRUCT;
81196 +#define ELAN3IO_RESET_ROUTE            _IOW ('e', ELAN3IO_USER_BASE + 25, ELAN3IO_RESET_ROUTE_STRUCT)
81197 +
81198 +typedef struct elanio_checkroute_struct
81199 +{
81200 +    u_int              process;
81201 +    E3_uint32           routeError;
81202 +    E3_uint16          flits[MAX_FLITS];
81203 +} ELAN3IO_CHECK_ROUTE_STRUCT;
81204 +#define ELAN3IO_CHECK_ROUTE            _IOW ('e', ELAN3IO_USER_BASE + 26, ELAN3IO_CHECK_ROUTE_STRUCT)
81205 +
81206 +typedef struct elanio_vp2nodeId_struct
81207 +{
81208 +    u_int              process;
81209 +    unsigned short      nodeId;
81210 +    ELAN_CAPABILITY    cap;
81211 +} ELAN3IO_VP2NODEID_STRUCT;
81212 +#define ELAN3IO_VP2NODEID      _IOWR('e', ELAN3IO_USER_BASE + 27, ELAN3IO_VP2NODEID_STRUCT)
81213 +
81214 +#define ELAN3IO_SET_SIGNAL     _IOW ('e', ELAN3IO_USER_BASE + 28, int)
81215 +
81216 +typedef struct elanio_process_2_location_struct
81217 +{
81218 +    u_int              process;
81219 +    ELAN_LOCATION       loc;
81220 +} ELAN3IO_PROCESS_2_LOCATION_STRUCT;
81221 +#define ELAN3IO_PROCESS_2_LOCATION     _IOW ('e', ELAN3IO_USER_BASE + 29, ELAN3IO_PROCESS_2_LOCATION_STRUCT)
81222 +
81223 +
81224 +
81225 +/* ioctls on all device */
81226 +#define ELAN3IO_GENERIC_BASE           100
81227 +typedef struct elanio_get_devinfo_struct
81228 +{
81229 +    ELAN_DEVINFO *devinfo;
81230 +} ELAN3IO_GET_DEVINFO_STRUCT;
81231 +#define ELAN3IO_GET_DEVINFO            _IOR ('e', ELAN3IO_GENERIC_BASE + 0, ELAN_DEVINFO)
81232 +
81233 +typedef struct elanio_get_position_struct
81234 +{
81235 +    ELAN_POSITION *position;
81236 +} ELAN3IO_GET_POSITION_STRUCT;
81237 +#define ELAN3IO_GET_POSITION             _IOR ('e', ELAN3IO_GENERIC_BASE + 1, ELAN_POSITION)
81238 +
81239 +typedef struct elanio_stats_struct
81240 +{
81241 +    int                which;
81242 +    void       *ptr;
81243 +} ELAN3IO_STATS_STRUCT;
81244 +#define ELAN3IO_STATS                  _IOR ('e', ELAN3IO_GENERIC_BASE + 2, ELAN3IO_STATS_STRUCT)
81245 +#  define ELAN3_SYS_STATS_DEVICE       0
81246 +#  define ELAN3_SYS_STATS_MMU          1
81247 +
81248 +/* offsets on /dev/elan3/control */
81249 +
81250 +/* offsets on /dev/elan3/mem */
81251 +
81252 +/* page numbers on /dev/elan3/user */
81253 +#define ELAN3IO_OFF_COMMAND_PAGE               0
81254 +#define ELAN3IO_OFF_FLAG_PAGE          1
81255 +#define ELAN3IO_OFF_UREG_PAGE          2
81256 +
81257 +#endif /* LINUX */
81258 +
81259 +#endif /* __ELAN3_ELAN3IO_H */
81260 +
81261 +/*
81262 + * Local variables:
81263 + * c-file-style: "stroustrup"
81264 + * End:
81265 + */
81266 Index: linux-2.4.21/include/elan3/elanregs.h
81267 ===================================================================
81268 --- linux-2.4.21.orig/include/elan3/elanregs.h  2004-02-23 16:02:56.000000000 -0500
81269 +++ linux-2.4.21/include/elan3/elanregs.h       2005-06-01 23:12:54.724420104 -0400
81270 @@ -0,0 +1,1063 @@
81271 +/*
81272 + *    Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd.
81273 + *
81274 + *    For licensing information please see the supplied COPYING file
81275 + *
81276 + */
81277 +
81278 +/*
81279 + * Header file for internal slave mapping of the ELAN3 registers
81280 + */
81281 +
81282 +#ifndef _ELAN3_ELANREGS_H
81283 +#define _ELAN3_ELANREGS_H
81284 +
81285 +#ident "$Id: elanregs.h,v 1.87 2004/04/22 12:27:21 david Exp $"
81286 +/*      $Source: /cvs/master/quadrics/elan3mod/elan3/elan3/elanregs.h,v $*/
81287 +
81288 +#include <elan3/e3types.h>
81289 +#include <elan3/dma.h>
81290 +#include <elan3/elanuregs.h>
81291 +
81292 +#define MAX_ROOT_CONTEXT_MASK  0xfff
81293 +#define SYS_CONTEXT_BIT                0x1000
81294 +#define ALL_CONTEXT_BITS       (MAX_ROOT_CONTEXT_MASK | SYS_CONTEXT_BIT)
81295 +#define ROOT_TAB_OFFSET(Cntxt) (((Cntxt) & MAX_ROOT_CONTEXT_MASK) << 4)
81296 +#define CLEAR_SYS_BIT(Cntxt)   ((Cntxt) & ~SYS_CONTEXT_BIT)
81297 +
81298 +#define E3_CACHELINE_SIZE      (32)
81299 +#define E3_CACHE_SIZE          (8192)
81300 +
81301 +typedef volatile struct _E3_CacheSets
81302 +{
81303 +   E3_uint64   Set0[256];      /* 2k bytes per set */
81304 +   E3_uint64   Set1[256];      /* 2k bytes per set */
81305 +   E3_uint64   Set2[256];      /* 2k bytes per set */
81306 +   E3_uint64   Set3[256];      /* 2k bytes per set */
81307 +} E3_CacheSets;
81308 +
81309 +typedef union e3_cache_tag
81310 +{
81311 +   E3_uint64   Value;
81312 +   struct {
81313 +#if defined(__LITTLE_ENDIAN__)
81314 +       E3_uint32 pad2:8;               /* Undefined value when read */
81315 +       E3_uint32 LineError:1;          /* A line error has occured */
81316 +       E3_uint32 Modified:1;           /* Cache data is modified */
81317 +       E3_uint32 FillPending:1;                /* Pipelined fill occuring*/
81318 +       E3_uint32 AddrTag27to11:17;     /* Tag address bits 27 to 11 */
81319 +       E3_uint32 pad1:4;               /* Undefined value when read */
81320 +       E3_uint32 pad0;                 /* Undefined value when read */
81321 +#else
81322 +       E3_uint32 pad0;                 /* Undefined value when read */
81323 +       E3_uint32 pad1:4;               /* Undefined value when read */
81324 +       E3_uint32 AddrTag27to11:17;     /* Tag address bits 27 to 11 */
81325 +       E3_uint32 FillPending:1;                /* Pipelined fill occuring*/
81326 +       E3_uint32 Modified:1;           /* Cache data is modified */
81327 +       E3_uint32 LineError:1;          /* A line error has occured */
81328 +       E3_uint32 pad2:8;               /* Undefined value when read */
81329 +#endif
81330 +   } s;
81331 +} E3_CacheTag;
81332 +
81333 +#define E3_NumCacheLines       64
81334 +#define E3_NumCacheSets                4
81335 +
81336 +typedef volatile struct _E3_CacheTags
81337 +{
81338 +   E3_CacheTag Tags[E3_NumCacheLines][E3_NumCacheSets];        /* 2k bytes per set */
81339 +} E3_CacheTags;
81340 +
81341 +typedef union E3_IProcStatus_Reg
81342 +{
81343 +    E3_uint32 Status;
81344 +    struct
81345 +    {
81346 +#if defined(__LITTLE_ENDIAN__)
81347 +       E3_uint32 TrapType:8;           /* iprocs trap ucode address */
81348 +       E3_uint32 SuspendAddr:8;        /* iprocs suspend address */
81349 +       E3_uint32 EopType:2;            /* Type of Eop Received */
81350 +       E3_uint32 QueueingPacket:1;     /* receiving a queueing packet */
81351 +       E3_uint32 AckSent:1;            /* a packet ack has been sent */
81352 +       E3_uint32 Reject:1;             /* a packet nack has been sent */
81353 +       E3_uint32 CrcStatus:2;          /* Crc Status value */
81354 +       E3_uint32 BadLength:1;          /* Eop was received in a bad place */
81355 +       E3_uint32 Chan1:1;              /* This packet received on v chan1 */
81356 +       E3_uint32 First:1;              /* This is the first transaction in the packet */
81357 +       E3_uint32 Last:1;               /* This is the last transaction in the packet */
81358 +       E3_uint32 Unused:2;
81359 +       E3_uint32 WakeupFunction:3;     /* iprocs wakeup function */
81360 +#else
81361 +       E3_uint32 WakeupFunction:3;     /* iprocs wakeup function */
81362 +       E3_uint32 Unused:2;
81363 +       E3_uint32 Last:1;               /* This is the last transaction in the packet */
81364 +       E3_uint32 First:1;              /* This is the first transaction in the packet */
81365 +       E3_uint32 Chan1:1;              /* This packet received on v chan1 */
81366 +       E3_uint32 BadLength:1;          /* Eop was received in a bad place */
81367 +       E3_uint32 CrcStatus:2;          /* Crc Status value */
81368 +       E3_uint32 Reject:1;             /* a packet nack has been sent */
81369 +       E3_uint32 AckSent:1;            /* a packet ack has been sent */
81370 +       E3_uint32 QueueingPacket:1;     /* receiving a queueing packet */
81371 +       E3_uint32 EopType:2;            /* Type of Eop Received */
81372 +       E3_uint32 SuspendAddr:8;        /* iprocs suspend address */
81373 +       E3_uint32 TrapType:8;           /* iprocs trap ucode address */
81374 +#endif
81375 +    } s;
81376 +} E3_IProcStatus_Reg;
81377 +
81378 +#define CRC_STATUS_GOOD    (0 << 21)
81379 +#define CRC_STATUS_DISCARD (1 << 21)
81380 +#define CRC_STATUS_ERROR   (2 << 21)
81381 +#define CRC_STATUS_BAD     (3 << 21)
81382 +
81383 +#define CRC_MASK          (3 << 21)
81384 +
81385 +#define EOP_GOOD          (1 << 16)
81386 +#define EOP_BADACK        (2 << 16)
81387 +#define EOP_ERROR_RESET           (3 << 16)
81388 +
81389 +#define E3_IPS_LastTrans       (1 << 26)
81390 +#define E3_IPS_FirstTrans      (1 << 25)
81391 +#define E3_IPS_VChan1          (1 << 24)
81392 +#define E3_IPS_BadLength       (1 << 23)
81393 +#define E3_IPS_CrcMask         (3 << 21)
81394 +#define E3_IPS_Rejected                (1 << 20)
81395 +#define E3_IPS_AckSent         (1 << 19)
81396 +#define E3_IPS_QueueingPacket  (1 << 18)
81397 +#define E3_IPS_EopType         (3 << 16)
81398 +
81399 +typedef union E3_Status_Reg
81400 +{
81401 +    E3_uint32 Status;
81402 +    struct
81403 +    {
81404 +#if defined(__LITTLE_ENDIAN__)
81405 +       E3_uint32 TrapType:8;           /* procs trap ucode address */
81406 +       E3_uint32 SuspendAddr:8;        /* procs suspend address */
81407 +       E3_uint32 Context:13;           /* procs current context */
81408 +       E3_uint32 WakeupFunction:3;     /* procs wakeup function */
81409 +#else
81410 +       E3_uint32 WakeupFunction:3;     /* procs wakeup function */
81411 +       E3_uint32 Context:13;           /* procs current context */
81412 +       E3_uint32 SuspendAddr:8;        /* procs suspend address */
81413 +       E3_uint32 TrapType:8;           /* procs trap ucode address */
81414 +#endif
81415 +    } s;
81416 +} E3_Status_Reg;
81417 +
81418 +/* values for WakeupFunction */
81419 +#define SleepOneTick                   0
81420 +#define WakeupToSendTransOrEop         1
81421 +#define SleepOneTickThenRunnable       2
81422 +#define WakeupNever                    4
81423 +/* extra dma wakeup functions */
81424 +#define WakupeToSendTransOrEop         1
81425 +#define WakeupForPacketAck             3
81426 +#define WakeupToSendTrans              5
81427 +/* extra thread wakup function */
81428 +#define WakeupStopped                  3
81429 +/* extra cproc wakup function */
81430 +#define WakeupSetEvent                 3
81431 +
81432 +#define GET_STATUS_CONTEXT(Ptr)      ((Ptr.Status >> 16) & 0x1fff)
81433 +#define GET_STATUS_SUSPEND_ADDR(Ptr) ((Ptr.Status >> 8) & 0xff)
81434 +#define GET_STATUS_TRAPTYPE(Ptr)     ((E3_uint32)(Ptr.Status & 0xff))
81435 +
81436 +/*
81437 + * Interrupt register bits
81438 + */
81439 +#define INT_PciMemErr                  (1<<15) /* Pci memory access error */
81440 +#define INT_SDRamInt                   (1<<14) /* SDRam ECC interrupt */
81441 +#define INT_EventInterrupt             (1<<13) /* Event Interrupt */
81442 +#define INT_LinkError                  (1<<12) /* Link Error */
81443 +#define INT_ComQueue                   (1<<11) /* a comm queue half full */
81444 +#define INT_TProcHalted                        (1<<10) /* Tproc Halted */
81445 +#define INT_DProcHalted                        (1<<9) /* Dmas Halted */
81446 +#define INT_DiscardingNonSysCntx       (1<<8) /* Inputters Discarding Non-SysCntx */
81447 +#define INT_DiscardingSysCntx          (1<<7) /* Inputters Discarding SysCntx */
81448 +#define INT_TProc                      (1<<6) /* tproc interrupt */
81449 +#define INT_CProc                      (1<<5) /* cproc interrupt */
81450 +#define INT_DProc                      (1<<4) /* dproc interrupt */
81451 +#define INT_IProcCh1NonSysCntx         (1<<3) /* iproc non-SysCntx interrupt */
81452 +#define INT_IProcCh1SysCntx            (1<<2) /* iproc SysCntx interrupt */
81453 +#define INT_IProcCh0NonSysCntx         (1<<1) /* iproc non-SysCntx interrupt */
81454 +#define INT_IProcCh0SysCntx            (1<<0) /* iproc SysCntx interrupt */
81455 +
81456 +#define INT_Inputters          (INT_IProcCh0SysCntx | INT_IProcCh0NonSysCntx | INT_IProcCh1SysCntx | INT_IProcCh1NonSysCntx)
81457 +#define INT_Discarding         (INT_DiscardingSysCntx | INT_DiscardingNonSysCntx)
81458 +#define INT_Halted             (INT_DProcHalted | INT_TProcHalted)
81459 +#define INT_ErrorInterrupts    (INT_PciMemErr | INT_SDRamInt | INT_LinkError)
81460 +
81461 +/*
81462 + * Link state bits.
81463 + */
81464 +#define LS_LinkNotReady        (1 << 0) /* Link is in reset or recovering from an error */
81465 +#define LS_Locked      (1 << 1) /* Linkinput PLL is locked */
81466 +#define LS_LockError   (1 << 2) /* Linkinput PLL was unable to lock onto the input clock. */
81467 +#define LS_DeskewError (1 << 3) /* Linkinput was unable to Deskew all the inputs. (Broken wire?) */
81468 +#define LS_PhaseError  (1 << 4) /* Linkinput Phase alignment error. */
81469 +#define LS_DataError   (1 << 5) /* Received value was neither good data or a token. */
81470 +#define LS_FifoOvFlow0 (1 << 6) /* Channel 0 input fifo overflowed. */
81471 +#define LS_FifoOvFlow1 (1 << 7) /* Channel 1 input fifo overflowed. */
81472 +
81473 +/*
81474 + * Link State Constant defines, used for writing to LinkSetValue
81475 + */
81476 +
81477 +#define LRS_DataDel0           0x0
81478 +#define LRS_DataDel1           0x1
81479 +#define LRS_DataDel2           0x2
81480 +#define LRS_DataDel3           0x3
81481 +#define LRS_DataDel4           0x4
81482 +#define LRS_DataDel5           0x5
81483 +#define LRS_DataDel6           0x6
81484 +#define LRS_DataDel7           0x7
81485 +#define LRS_DataDel8           0x8
81486 +#define LRS_PllDelValue                0x9
81487 +#define LRS_ClockEven          0xA
81488 +#define LRS_ClockOdd           0xB
81489 +#define LRS_ErrorLSW           0xC
81490 +#define LRS_ErrorMSW           0xD
81491 +#define LRS_FinCoarseDeskew    0xE
81492 +#define LRS_LinkInValue                0xF
81493 +#define LRS_NumLinkDels         0x10
81494 +
81495 +#define LRS_Pllfast             0x40
81496
81497 +union Sched_Status
81498 +{
81499 +    E3_uint32 Status;
81500 +    struct
81501 +    {
81502 +#if defined(__LITTLE_ENDIAN__)
81503 +       E3_uint32 StopNonSysCntxs:1;
81504 +       E3_uint32 FlushCommandQueues:1;
81505 +       E3_uint32 HaltDmas:1;
81506 +       E3_uint32 HaltDmaDequeue:1;
81507 +       E3_uint32 HaltThread:1;
81508 +       E3_uint32 CProcStop:1;
81509 +       E3_uint32 DiscardSysCntxIn:1;
81510 +       E3_uint32 DiscardNonSysCntxIn:1;
81511 +       E3_uint32 RestartCh0SysCntx:1;
81512 +       E3_uint32 RestartCh0NonSysCntx:1;
81513 +       E3_uint32 RestartCh1SysCntx:1;
81514 +       E3_uint32 RestartCh1NonSysCntx:1;
81515 +       E3_uint32 RestartDProc:1;
81516 +       E3_uint32 RestartTProc:1;
81517 +       E3_uint32 RestartCProc:1;
81518 +       E3_uint32 ClearLinkErrorInt:1;
81519 +       E3_uint32 :3;
81520 +       E3_uint32 LinkSetValue:10; 
81521 +       E3_uint32 FixLinkDelays:1;
81522 +       E3_uint32 LinkBoundaryScan:1;
81523 +#else
81524 +       E3_uint32 LinkBoundaryScan:1;
81525 +       E3_uint32 FixLinkDelays:1;
81526 +       E3_uint32 LinkSetValue:10; 
81527 +       E3_uint32 :3;
81528 +       E3_uint32 ClearLinkErrorInt:1;
81529 +       E3_uint32 RestartCProc:1;
81530 +       E3_uint32 RestartTProc:1;
81531 +       E3_uint32 RestartDProc:1;
81532 +       E3_uint32 RestartCh1NonSysCntx:1;
81533 +       E3_uint32 RestartCh1SysCntx:1;
81534 +       E3_uint32 RestartCh0NonSysCntx:1;
81535 +       E3_uint32 RestartCh0SysCntx:1;
81536 +       E3_uint32 DiscardNonSysCntxIn:1;
81537 +       E3_uint32 DiscardSysCntxIn:1;
81538 +       E3_uint32 CProcStop:1;
81539 +       E3_uint32 HaltThread:1;
81540 +       E3_uint32 HaltDmaDequeue:1;
81541 +       E3_uint32 HaltDmas:1;
81542 +       E3_uint32 FlushCommandQueues:1;
81543 +       E3_uint32 StopNonSysCntxs:1;
81544 +#endif
81545 +    } s;
81546 +};
81547 +
81548 +#define LinkBoundaryScan       ((E3_uint32) 1<<31) /* Clears the link error interrupt */
81549 +#define FixLinkDelays          ((E3_uint32) 1<<30) /* Clears the link error interrupt */
81550 +#define LinkSetValue(Val, OldVal) ((E3_uint32) (((Val) & 0x3ff) << 20) | ((OldVal) & ((~0x3ff) << 20)))
81551 +
81552 +#define ClearLinkErrorInt      ((E3_uint32) 1<<16) /* Clears the link error interrupt */
81553 +#define RestartCProc           ((E3_uint32) 1<<15) /* Clears command proc interrupt */
81554 +#define RestartTProc           ((E3_uint32) 1<<14) /* Clears thread interrupt */
81555 +#define RestartDProc           ((E3_uint32) 1<<13) /* Clears dma0 interrupt */
81556 +#define RestartCh1NonSysCntx   ((E3_uint32) 1<<12) /* Clears interrupt */
81557 +#define RestartCh1SysCntx      ((E3_uint32) 1<<11) /* Clears interrupt */
81558 +#define RestartCh0NonSysCntx   ((E3_uint32) 1<<10) /* Clears interrupt */
81559 +#define RestartCh0SysCntx      ((E3_uint32) 1<<9) /* Clears interrupt */
81560 +#define CProcStopped           ((E3_uint32) 1<<9) /* Read value only */
81561 +
81562 +#define TraceSetEvents         ((E3_uint32) 1<<8)
81563 +#define DiscardNonSysCntxIn    ((E3_uint32) 1<<7)
81564 +#define DiscardSysCntxIn       ((E3_uint32) 1<<6)
81565 +#define CProcStop              ((E3_uint32) 1<<5) /* Will empty all the command port queues. */
81566 +#define HaltThread             ((E3_uint32) 1<<4) /* Will stop the thread proc and clear the tproc command queue */
81567 +#define HaltDmaDequeue         ((E3_uint32) 1<<3) /* Will stop the dmaers starting new dma's. */
81568 +#define HaltDmas               ((E3_uint32) 1<<2) /* Will stop the dmaers and clear the dma command queues */
81569 +#define FlushCommandQueues     ((E3_uint32) 1<<1) /* Causes the command ports to be flushed. */
81570 +#define StopNonSysCntxs                ((E3_uint32) 1<<0) /* Prevents a non-SysCntx from starting. */
81571 +
81572 +/* Initial value of schedule status register */
81573 +#define LinkResetToken         0x00F
81574 +
81575 +#define Sched_Initial_Value    (LinkBoundaryScan | (LinkResetToken << 20) | \
81576 +                                DiscardSysCntxIn | DiscardNonSysCntxIn | HaltThread | HaltDmas)
81577 +
81578 +#define StopDmaQueues       (HaltDmaDequeue | HaltDmas | \
81579 +                             DiscardNonSysCntxIn | DiscardSysCntxIn)
81580 +#define CheckDmaQueueStopped (INT_DiscardingNonSysCntx | INT_DiscardingSysCntx | INT_DProcHalted)
81581 +
81582 +#define HaltStopAndExtTestMask 0xfff001ff
81583 +#define HaltAndStopMask                0x000001ff
81584 +
81585 +
81586 +#define DmaComQueueNotEmpty    (1<<0)
81587 +#define ThreadComQueueNotEmpty (1<<1)
81588 +#define EventComQueueNotEmpty  (1<<2)
81589 +#define DmaComQueueHalfFull    (1<<3)
81590 +#define ThreadComQueueHalfFull (1<<4)
81591 +#define EventComQueueHalfFull  (1<<5)
81592 +#define DmaComQueueError       (1<<6)
81593 +#define ThreadComQueueError    (1<<7)
81594 +#define EventComQueueError     (1<<8)
81595 +
81596 +#define ComQueueNotEmpty       (DmaComQueueNotEmpty | ThreadComQueueNotEmpty | EventComQueueNotEmpty)
81597 +#define ComQueueError          (DmaComQueueError | ThreadComQueueError | EventComQueueError)
81598 +
81599 +typedef union _E3_DmaInfo
81600 +{
81601 +    E3_uint32  Value;
81602 +    struct
81603 +    {
81604 +#if defined(__LITTLE_ENDIAN__)
81605 +       E3_uint32 DmaOutputOpen:1;      /* The packet is currently open */
81606 +       E3_uint32 :7;
81607 +       E3_uint32 TimeSliceCount:2;     /* Time left to timeslice */
81608 +       E3_uint32 UseRemotePriv:1;      /* Set for remote read dmas */
81609 +       E3_uint32 DmaLastPacket:1;      /* Set for the last packet of a dma */
81610 +       E3_uint32 PacketAckValue:2;     /* Packet ack type. Valid if AckBufferValid set. */
81611 +       E3_uint32 PacketTimeout:1;      /* Packet timeout. Sent an EopError. Valid if AckBufferValid set. */
81612 +       E3_uint32 AckBufferValid:1;     /* Packet ack is valid. */
81613 +       E3_uint32 :16;                  /* read as Zero */
81614 +#else
81615 +       E3_uint32 :16;                  /* read as Zero */
81616 +       E3_uint32 AckBufferValid:1;     /* Packet ack is valid. */
81617 +       E3_uint32 PacketTimeout:1;      /* Packet timeout. Sent an EopError. Valid if AckBufferValid set. */
81618 +       E3_uint32 PacketAckValue:2;     /* Packet ack type. Valid if AckBufferValid set. */
81619 +       E3_uint32 DmaLastPacket:1;      /* Set for the last packet of a dma */
81620 +       E3_uint32 UseRemotePriv:1;      /* Set for remote read dmas */
81621 +       E3_uint32 TimeSliceCount:2;     /* Time left to timeslice */
81622 +       E3_uint32 :7;
81623 +       E3_uint32 DmaOutputOpen:1;      /* The packet is currently open */
81624 +#endif
81625 +    } s;
81626 +} E3_DmaInfo;
81627 +
81628 +typedef volatile struct _E3_DmaRds
81629 +{
81630 +   E3_uint32           DMA_Source4to0AndTwoReads;
81631 +   E3_uint32           pad13;
81632 +   E3_uint32           DMA_BytesToRead;
81633 +   E3_uint32           pad14;
81634 +   E3_uint32           DMA_MinusPacketSize;
81635 +   E3_uint32           pad15;
81636 +   E3_uint32           DMA_MaxMinusPacketSize;
81637 +   E3_uint32           pad16;
81638 +   E3_uint32           DMA_DmaOutputOpen;
81639 +   E3_uint32           pad16a;
81640 +   E3_DmaInfo          DMA_PacketInfo;
81641 +   E3_uint32           pad17[7];
81642 +   E3_uint32           IProcTrapBase;
81643 +   E3_uint32           pad18;
81644 +   E3_uint32           IProcBlockTrapBase;
81645 +   E3_uint32           pad19[11];
81646 +} E3_DmaRds;
81647 +   
81648 +typedef volatile struct _E3_DmaWrs
81649 +{
81650 +   E3_uint64           pad0;
81651 +   E3_uint64           LdAlignment;
81652 +   E3_uint64           ResetAckNLdBytesToWr;
81653 +   E3_uint64           SetAckNLdBytesToWr;
81654 +   E3_uint64           LdBytesToRd;
81655 +   E3_uint64           LdDmaType;
81656 +   E3_uint64           SendRoutes;
81657 +   E3_uint64           SendEop;
81658 +   E3_uint64           pad1[8];
81659 +} E3_DmaWrs;
81660 +
81661 +typedef volatile struct _E3_Exts
81662 +{
81663 +   E3_uint32           CurrContext;                            /* 0x12a00 */
81664 +   E3_uint32           pad0;
81665 +   E3_Status_Reg       DProcStatus;                            /* 0x12a08 */
81666 +   E3_uint32           pad1;
81667 +   E3_Status_Reg       CProcStatus;                            /* 0x12a10 */
81668 +   E3_uint32           pad2;
81669 +   E3_Status_Reg       TProcStatus;                            /* 0x12a18 */
81670 +   E3_uint32           pad3;
81671 +   E3_IProcStatus_Reg  IProcStatus;                            /* 0x12a20 */
81672 +   E3_uint32           pad4[3];
81673 +
81674 +   E3_uint32           IProcTypeContext;                       /* 0x12a30 */
81675 +   E3_uint32           pad5;
81676 +   E3_uint32           IProcTransAddr;                         /* 0x12a38 */
81677 +   E3_uint32           pad6;
81678 +   E3_uint32           IProcCurrTransData0;                    /* 0x12a40 */
81679 +   E3_uint32           pad7;
81680 +   E3_uint32           IProcCurrTransData1;                    /* 0x12a48 */
81681 +   E3_uint32           pad8;
81682 +
81683 +   E3_uint32           SchCntReg;                              /* 0x12a50 */
81684 +   E3_uint32           pad9;
81685 +   E3_uint32           InterruptReg;                           /* 0x12a58 */
81686 +   E3_uint32           pad10;
81687 +   E3_uint32           InterruptMask;                          /* 0x12a60 */
81688 +   E3_uint32           pad11;
81689 +   E3_uint32           LinkErrorTypes;                         /* 0x12a68 */
81690 +   E3_uint32           pad12[3];
81691 +   E3_uint32           LinkState;      /* a read here returens the DataDel value for the */
81692 +                                       /* link that has just been defined by a write to */
81693 +                                       /* Regs.Exts.SchCntReg.LinkSetValue */
81694 +   E3_uint32           pad13;
81695 +
81696 +   union                                                       /* 0x12a80 */
81697 +   {
81698 +      E3_DmaWrs                DmaWrs;
81699 +      E3_DmaRds                DmaRds;
81700 +   } Dmas;
81701 +} E3_Exts;
81702 +
81703 +typedef union com_port_entry
81704 +{
81705 +    E3_uint64  type;
81706 +    struct
81707 +    {
81708 +       E3_uint32 Address;              /* Command VAddr */
81709 +#if defined(__LITTLE_ENDIAN__)
81710 +       E3_uint32 Context0Issue:1;      /* Issue was for context 0 */
81711 +       E3_uint32 EventNotCommand:1;    /* Issue address bit 3 */
81712 +       E3_uint32 RemoteDesc:1;         /* Issue address bit 5 */
81713 +       E3_uint32 :13;                  /* read as Zero */
81714 +       E3_uint32 Context:12;           /* Command Context */
81715 +       E3_uint32 :4;                   /* read as Zero */
81716 +#else
81717 +       E3_uint32 :4;                   /* read as Zero */
81718 +       E3_uint32 Context:12;           /* Command Context */
81719 +       E3_uint32 :13;                  /* read as Zero */
81720 +       E3_uint32 RemoteDesc:1;         /* Issue address bit 5 */
81721 +       E3_uint32 EventNotCommand:1;    /* Issue address bit 3 */
81722 +       E3_uint32 Context0Issue:1;      /* Issue was for context 0 */
81723 +#endif
81724 +    } s;
81725 +} E3_ComPortEntry;
81726 +
81727 +/* control reg bits */
81728 +#define CONT_MMU_ENABLE                (1 << 0) /* bit 0 enables mmu */
81729 +#define CONT_ENABLE_8K_PAGES   (1 << 1) /* When set smallest page is 8k instead of 4k. */
81730 +#define CONT_EN_ALL_SETS       (1 << 2) /* enable cache */
81731 +#define CONT_CACHE_LEVEL0      (1 << 3) /* cache context table */
81732 +#define CONT_CACHE_LEVEL1      (1 << 4) /* cache up level 1 PTD/PTE */
81733 +#define CONT_CACHE_LEVEL2      (1 << 5) /* cache up level 2 PTD/PTE */
81734 +#define CONT_CACHE_LEVEL3      (1 << 6) /* cache up level 3 PTD/PTE */
81735 +#define CONT_CACHE_TRAPS       (1 << 7) /* cache up traps */
81736 +#define CONT_CACHE_LEV0_ROUTES (1 << 8) /* cache up small routes */
81737 +#define CONT_CACHE_LEV1_ROUTES (1 << 9) /* cache up large routes */
81738 +#define CONT_CACHE_ALL         (CONT_CACHE_LEVEL0 | CONT_CACHE_LEVEL1 | CONT_CACHE_LEVEL2 | \
81739 +                                CONT_CACHE_LEVEL3 | CONT_CACHE_TRAPS | \
81740 +                                CONT_CACHE_LEV0_ROUTES | CONT_CACHE_LEV1_ROUTES)
81741 +
81742 +#define CONT_SYNCHRONOUS       (1 << 10) /* PCI running sync */
81743 +#define CONT_SER               (1 << 11) /* Single bit output (Elan1 SER bit) */
81744 +#define CONT_SIR               (1 << 12) /* Writing 1 resets elan. */
81745 +
81746 +#define CONT_PSYCHO_MODE       (1 << 13) /* Enables all the perversion required by psycho */
81747 +#define CONT_ENABLE_ECC                (1 << 14) /* Enables error detecting on the ECC */
81748 +#define CONT_SDRAM_TESTING     (1 << 15) /* Switches to test mode for checking EEC data bits */
81749 +
81750 +/* defines SDRam CasLatency. Once set will not change again unless reset is reasserted. */
81751 +/* 1 = Cas Latency is 3, 0 = Cas Latency is 2 */
81752 +#define CAS_LATENCY_2          (0 << 16)
81753 +#define CAS_LATENCY_3          (1 << 16)
81754 +#define REFRESH_RATE_2US       (0 << 17) /* defines 2us SDRam Refresh rate. */
81755 +#define REFRESH_RATE_4US       (1 << 17) /* defines 4us SDRam Refresh rate. */
81756 +#define REFRESH_RATE_8US       (2 << 17) /* defines 8us SDRam Refresh rate. */
81757 +#define REFRESH_RATE_16US      (3 << 17) /* defines 16us SDRam Refresh rate. */
81758 +
81759 +#define CONT_PCI_ERR           (1 << 19) /* Read 1 if PCI Error */
81760 +#define CONT_CLEAR_PCI_ERROR   (1 << 19) /* Clears an PCI error. */
81761 +
81762 +/* Will cause the PCI error bit to become set. This is used to force the threads proc
81763 +   and the uProc to start to stall. */
81764 +#define CONT_SET_PCI_ERROR     (1 << 20)
81765 +
81766 +/* Writes SDram control reg when set. Also starts SDram memory system refreshing. */
81767 +#define SETUP_SDRAM            (1 << 21)
81768 +
81769 +/* Flushes the tlb */
81770 +#define MMU_FLUSH              (1 << 22)
81771 +/* and read back when it's finished */
81772 +#define MMU_FLUSHED            (1 << 0)
81773 +
81774 +/* Clears any ECC error detected by SDRam interface */
81775 +#define CLEAR_SDRAM_ERROR      (1 << 23)
81776 +
81777 +#define ECC_ADDR_MASK          0x0ffffff8
81778 +#define ECC_UE_MASK            0x1 
81779 +#define ECC_CE_MASK            0x2
81780 +#define ECC_ME_MASK            0x4 
81781 +#define ECC_SYN_MASK           0xff
81782 +
81783 +/* define page table entry bit fields */
81784 +#define TLB_PageSizeBits       (3 << 0)
81785 +#define TLB_ACCBits            (7 << 2)
81786 +#define TLB_LocalBit           (1 << 5)
81787 +#define TLB_PCI64BitTargetBit  (1 << 6)
81788 +#define TLB_PCIBigEndianBit    (1 << 7)
81789 +
81790 +#define TLB_ModifiedBit                (1 << 55)
81791 +#define TLB_ReferencedBit      (1 << 63)
81792 +
81793 +/* Used to read values from the tlb. */
81794 +#define TLB_TlbReadCntBitsSh   56
81795 +#define TLB_UseSelAddrSh       (1ULL << 60)
81796 +#define TLB_WriteTlbLine       (1ULL << 61)
81797 +
81798 +#define TLB_SEL_LINE(LineNo) (TLB_UseSelAddrSh | \
81799 +                             ((E3_uint64)((LineNo) & 0xf) << TLB_TlbReadCntBitsSh))
81800 +
81801 +typedef union _E3_CacheContReg
81802 +{
81803 +    E3_uint32 ContReg;
81804 +    struct
81805 +    {
81806 +#if defined(__LITTLE_ENDIAN__)
81807 +       E3_uint32 MMU_Enable:1;         /* wr 1 to enable the MMU */
81808 +       E3_uint32 Set8kPages:1;         /* wr 1 smallest page is 8k. */
81809 +       E3_uint32 EnableAllSets:1;      /* wr 1 All the cache sets are enabled */
81810 +       E3_uint32 Cache_Level0:1;       /* wr 1 lev0 page tabs will be cached */
81811 +       E3_uint32 Cache_Level1:1;       /* wr 1 lev1 page tabs will be cached */
81812 +       E3_uint32 Cache_Level2:1;       /* wr 1 lev2 page tabs will be cached */
81813 +       E3_uint32 Cache_Level3:1;       /* wr 1 lev3 page tabs will be cached */
81814 +       E3_uint32 Cache_Traps:1;        /* wr 1 trap info will be cached */
81815 +       E3_uint32 Cache_Lev0_Routes:1;  /* wr 1 small routes will be cached */
81816 +       E3_uint32 Cache_Lev1_Routes:1;  /* wr 1 big routes will be cached */
81817 +       E3_uint32 PCI_Synchronous:1;    /* Pci and sys clocks are running synchronously*/
81818 +       E3_uint32 SER:1;                /* 1 bit output port */
81819 +       E3_uint32 SIR:1;                /* write 1 will reset elan */
81820 +       E3_uint32 PsychoMode:1;         /* Enables psycho perversion mode. */
81821 +       E3_uint32 CasLatency:1;         /* 1=cas latency=3, 1=cas latency=2 */
81822 +       E3_uint32 RefreshRate:2;        /* 0=2us, 1=4us, 2=8us, 3=16us */
81823 +       E3_uint32 Pci_Err:1;            /* pci error. Write 1 clears err */
81824 +       E3_uint32 Set_Pci_Error:1;      /* Will simulate an Pci error */
81825 +       E3_uint32 StartSDRam:1;         /* Starts the sdram subsystem */
81826 +       E3_uint32 FlushTlb:1;           /* Flush the contence of the tlb */
81827 +       E3_uint32 :11;
81828 +#else
81829 +       E3_uint32 :11;
81830 +       E3_uint32 FlushTlb:1;           /* Flush the contence of the tlb */
81831 +       E3_uint32 StartSDRam:1;         /* Starts the sdram subsystem */
81832 +       E3_uint32 Set_Pci_Error:1;      /* Will simulate an Pci error */
81833 +       E3_uint32 Pci_Err:1;            /* pci error. Write 1 clears err */
81834 +       E3_uint32 RefreshRate:2;        /* 0=2us, 1=4us, 2=8us, 3=16us */
81835 +       E3_uint32 CasLatency:1;         /* 1=cas latency=3, 1=cas latency=2 */
81836 +       E3_uint32 PsychoMode:1;         /* Enables psycho perversion mode. */
81837 +       E3_uint32 SIR:1;                /* write 1 will reset elan */
81838 +       E3_uint32 SER:1;                /* 1 bit output port */
81839 +       E3_uint32 PCI_Synchronous:1;    /* Pci and sys clocks are running synchronously*/
81840 +       E3_uint32 Cache_Lev1_Routes:1;  /* wr 1 big routes will be cached */
81841 +       E3_uint32 Cache_Lev0_Routes:1;  /* wr 1 small routes will be cached */
81842 +       E3_uint32 Cache_Traps:1;        /* wr 1 trap info will be cached */
81843 +       E3_uint32 Cache_Level3:1;       /* wr 1 lev3 page tabs will be cached */
81844 +       E3_uint32 Cache_Level2:1;       /* wr 1 lev2 page tabs will be cached */
81845 +       E3_uint32 Cache_Level1:1;       /* wr 1 lev1 page tabs will be cached */
81846 +       E3_uint32 Cache_Level0:1;       /* wr 1 lev0 page tabs will be cached */
81847 +       E3_uint32 EnableAllSets:1;      /* wr 1 All the cache sets are enabled */
81848 +       E3_uint32 Set8kPages:1;         /* wr 1 smallest page is 8k. */
81849 +       E3_uint32 MMU_Enable:1;         /* wr 1 to enable the MMU */
81850 +#endif
81851 +    } s;
81852 +} E3_CacheContReg;
81853 +
81854 +typedef union _E3_TrapBits
81855 +{
81856 +    volatile E3_uint32 Bits;
81857 +    struct
81858 +    {
81859 +#if defined(__LITTLE_ENDIAN__)
81860 +       E3_uint32 ForcedTProcTrap:1;     /* The theads proc has been halted */
81861 +       E3_uint32 InstAccessException:1; /* An instruction access exception */
81862 +       E3_uint32 Unimplemented:1;       /* Unimplemented instruction executed */
81863 +       E3_uint32 DataAccessException:1; /* A data access exception */  
81864 +
81865 +       E3_uint32 ThreadTimeout:1;       /* The threads outputer has timed out */
81866 +       E3_uint32 OpenException:1;       /* Invalid sequence of open, sendtr or close */
81867 +       E3_uint32 OpenRouteFetch:1;      /* Fault while fetching routes for previous open*/
81868 +       E3_uint32 TrapForTooManyInsts:1; /* Thread has been executing for too long */
81869 +       
81870 +       E3_uint32 PacketAckValue:2;      /* Packet ack type. Valid if AckBufferValid set. */
81871 +       E3_uint32 PacketTimeout:1;       /* Packet timeout. Sent an EopError. Valid if AckBufferValid set. */
81872 +
81873 +       E3_uint32 AckBufferValid:1;      /* The PacketAckValue bits are valid */
81874 +       E3_uint32 OutputWasOpen:1;       /* The output was open when tproc trapped */
81875 +       E3_uint32 TProcDeschedule:2;     /* The reason the tproc stopped running. */
81876 +       E3_uint32 :17;
81877 +#else
81878 +       E3_uint32 :17;
81879 +       E3_uint32 TProcDeschedule:2;     /* The reason the tproc stopped running. */
81880 +       E3_uint32 OutputWasOpen:1;       /* The output was open when tproc trapped */
81881 +       E3_uint32 AckBufferValid:1;      /* The PacketAckValue bits are valid */
81882 +       
81883 +       E3_uint32 PacketTimeout:1;       /* Packet timeout. Sent an EopError. Valid if AckBufferValid set. */
81884 +       E3_uint32 PacketAckValue:2;      /* Packet ack type. Valid if AckBufferValid set. */
81885 +       
81886 +       E3_uint32 TrapForTooManyInsts:1; /* Thread has been executing for too long */
81887 +       E3_uint32 OpenRouteFetch:1;      /* Fault while fetching routes for previous open*/
81888 +       E3_uint32 OpenException:1;       /* Invalid sequence of open, sendtr or close */
81889 +       E3_uint32 ThreadTimeout:1;       /* The threads outputer has timed out */
81890 +
81891 +       E3_uint32 DataAccessException:1; /* A data access exception */
81892 +       E3_uint32 Unimplemented:1;       /* Unimplemented instruction executed */
81893 +       E3_uint32 InstAccessException:1; /* An instruction access exception */
81894 +       E3_uint32 ForcedTProcTrap:1;     /* The theads proc has been halted */
81895 +#endif
81896 +    } s;
81897 +} E3_TrapBits;
81898 +
81899 +typedef union _E3_DirtyBits
81900 +{
81901 +    volatile E3_uint32 Bits;
81902 +    struct
81903 +    {
81904 +#if defined(__LITTLE_ENDIAN__)
81905 +       E3_uint32 GlobalsDirty:8;
81906 +       E3_uint32 OutsDirty:8;          /* will always read as dirty. */
81907 +       E3_uint32 LocalsDirty:8;
81908 +       E3_uint32 InsDirty:8;
81909 +#else
81910 +       E3_uint32 InsDirty:8;
81911 +       E3_uint32 LocalsDirty:8;
81912 +       E3_uint32 OutsDirty:8;          /* will always read as dirty. */
81913 +       E3_uint32 GlobalsDirty:8;
81914 +#endif
81915 +    } s;
81916 +} E3_DirtyBits;
81917 +
81918 +#define E3_TProcDescheduleMask    0x6000
81919 +#define E3_TProcDescheduleWait    0x2000
81920 +#define E3_TProcDescheduleSuspend 0x4000
81921 +#define E3_TProcDescheduleBreak   0x6000
81922 +
81923 +#define E3_TrapBitsMask          0x7fff
81924 +
81925 +#define ThreadRestartFromTrapBit       1
81926 +#define ThreadReloadAllRegs            2
81927 +
81928 +#define E3_PAckOk      0
81929 +#define E3_PAckTestFail        1
81930 +#define E3_PAckDiscard 2
81931 +#define E3_PAckError   3
81932 +
81933 +typedef volatile struct _E3_DataBusMap
81934 +{
81935 +   E3_uint64            Dma_Alignment_Port[8];         /* 0x00002800 */
81936 +   E3_uint32            pad0[0x30];                    /* 0x00002840 */
81937 +
81938 +   E3_uint32            Input_Trans0_Data[0x10];       /* 0x00002900 */
81939 +   E3_uint32            Input_Trans1_Data[0x10];
81940 +   E3_uint32            Input_Trans2_Data[0x10];
81941 +   E3_uint32            Input_Trans3_Data[0x10];
81942 +
81943 +/* this is the start of the exts directly addressable from the ucode. */
81944 +   E3_Exts              Exts;                          /* 0x00002a00 */
81945 +
81946 +/* this is the start of the registers directly addressable from the ucode. */
81947 +   E3_DMA               Dma_Desc;                      /* 0x00002b00 */
81948 +
81949 +   E3_uint32            Dma_Last_Packet_Size;          /* 0x00002b20 */
81950 +   E3_uint32            Dma_This_Packet_Size;          /* 0x00002b24 */
81951 +   E3_uint32            Dma_Tmp_Source;                /* 0x00002b28 */
81952 +   E3_uint32            Dma_Tmp_Dest;                  /* 0x00002b2c */
81953 +
81954 +   E3_Addr              Thread_SP_Save_Ptr;    /* points to the thread desched save word. */
81955 +   E3_uint32            Dma_Desc_Size_InProg;          /* 0x00002b34 */
81956 +
81957 +   E3_uint32            Thread_Desc_SP;                /* 0x00002b38 */
81958 +   E3_uint32            Thread_Desc_Context;           /* 0x00002b3c */
81959 +
81960 +   E3_uint32            uCode_TMP[0x10];               /* 0x00002b40 */
81961 +
81962 +   E3_uint32            TProc_NonSysCntx_FPtr;         /* 0x00002b80 */
81963 +   E3_uint32            TProc_NonSysCntx_BPtr;         /* 0x00002b84 */
81964 +   E3_uint32            TProc_SysCntx_FPtr;            /* 0x00002b88 */
81965 +   E3_uint32            TProc_SysCntx_BPtr;            /* 0x00002b8c */
81966 +   E3_uint32            DProc_NonSysCntx_FPtr;         /* 0x00002b90 */
81967 +   E3_uint32            DProc_NonSysCntx_BPtr;         /* 0x00002b94 */
81968 +   E3_uint32            DProc_SysCntx_FPtr;            /* 0x00002b98 */
81969 +   E3_uint32            DProc_SysCntx_BPtr;            /* 0x00002b9c */
81970 +
81971 +   E3_uint32            Input_Trap_Base;               /* 0x00002ba0 */
81972 +   E3_uint32            Input_Queue_Offset;            /* 0x00002ba4 */
81973 +   E3_uint32            CProc_TrapSave_Addr;           /* 0x00002ba8 */
81974 +   E3_uint32            Input_Queue_Addr;              /* 0x00002bac */
81975 +   E3_uint32            uCode_TMP10;                   /* 0x00002bb0 */
81976 +   E3_uint32            uCode_TMP11;                   /* 0x00002bb4 */
81977 +   E3_uint32            Event_Trace_Ptr;               /* 0x00002bb8 */
81978 +   E3_uint32            Event_Trace_Mask;              /* 0x00002bbc */
81979 +
81980 +   E3_ComPortEntry      DmaComQueue[3];                /* 0x00002bc0 */
81981 +
81982 +   E3_uint32            Event_Int_Queue_FPtr;          /* 0x00002bd8 */
81983 +   E3_uint32            Event_Int_Queue_BPtr;          /* 0x00002bdc */
81984 +
81985 +   E3_ComPortEntry      ThreadComQueue[2];             /* 0x00002be0 */
81986 +   E3_ComPortEntry      SetEventComQueue[2];           /* 0x00002bf0 */
81987 +
81988 +   E3_uint32            pad1[96];                      /* 0x00002c00 */
81989 +   E3_uint32            ComQueueStatus;                /* 0x00002d80 */
81990 +   E3_uint32            pad2[31];                      /* 0x00002d84 */
81991 +
81992 +/* These are the internal registers of the threads proc. */
81993 +   E3_uint32            Globals[8];                    /* 0x00002e00 */
81994 +   E3_uint32            Outs[8];
81995 +   E3_uint32            Locals[8];
81996 +   E3_uint32            Ins[8];
81997 +
81998 +   E3_uint32            pad3[16];
81999 +
82000 +   E3_uint32            IBufferReg[4];
82001 +
82002 +   E3_uint32            ExecuteNPC;
82003 +   E3_uint32            ExecutePC;
82004 +
82005 +   E3_uint32            StartPC;
82006 +   E3_uint32            pad4;
82007 +
82008 +   E3_uint32            StartnPC;
82009 +   E3_uint32            pad5;
82010 +
82011 +   E3_TrapBits          TrapBits;
82012 +   E3_DirtyBits                 DirtyBits;
82013 +   E3_uint64            LoadDataReg;
82014 +   E3_uint64            StoreDataReg;
82015 +
82016 +   E3_uint32            ECC_STATUS0;
82017 +   E3_uint32            ECC_STATUS1;
82018 +   E3_uint32            pad6[0xe];
82019 +
82020 +/* Pci slave port regs */
82021 +   E3_uint32            PciSlaveReadCache[0x10];
82022 +
82023 +   E3_uint32            Fault_Base_Ptr;
82024 +   E3_uint32            pad7;
82025 +   E3_uint32            Context_Ptr;
82026 +   E3_uint32            pad8;
82027 +   E3_uint32            Input_Context_Filter;      /* write only, No data */
82028 +   E3_uint32            Input_Context_Fil_Flush;   /* write only, No data */
82029 +   E3_CacheContReg      Cache_Control_Reg;
82030 +   E3_uint32            pad9;
82031 +
82032 +   E3_uint64            Tlb_Line_Value;
82033 +   
82034 +   E3_uint32            Walk_Datareg1;
82035 +   E3_uint32            Walk_VAddr_Tab_Base;
82036 +   E3_uint32            Walk_Datareg;
82037 +   E3_uint32            Walk_ContextReg;
82038 +   E3_uint32            Walk_FaultAddr;
82039 +   E3_uint32            Walk_EventAddr;
82040 +
82041 +/* outputers output cont ext registers. */
82042 +   E3_uint64            Dma_Route_012345_Context;
82043 +   E3_uint64            pad10;
82044 +   E3_uint64            Dma_Route_01234567;
82045 +   E3_uint64            Dma_Route_89ABCDEF;
82046 +
82047 +   E3_uint64            Thread_Route_012345_Context;
82048 +   E3_uint64            pad11;
82049 +   E3_uint64            Thread_Route_01234567;
82050 +   E3_uint64            Thread_Route_89ABCDEF;
82051 +} E3_DataBusMap;
82052 +
82053 +typedef volatile struct _E3_Regs
82054 +{
82055 +   E3_CacheSets                  Sets;                         /* 0x00000000 */
82056 +   E3_CacheTags                  Tags;                         /* 0x00002000 */
82057 +   E3_DataBusMap         Regs;                         /* 0x00002800 */
82058 +   E3_uint32             pad1[0x400];
82059 +   E3_User_Regs          URegs;
82060 +} E3_Regs;
82061 +
82062 +#define MAX_TRAPPED_TRANS      16
82063 +#define TRANS_DATA_WORDS       16
82064 +#define TRANS_DATA_BYTES       64
82065 +
82066 +/*
82067 + * Event interrupt
82068 + */
82069 +typedef volatile union _E3_EventInt
82070 +{
82071 +   E3_uint64    ForceAlign;
82072 +   struct {
82073 +       E3_uint32 IntCookie;
82074 +       E3_uint32 EventContext; /* Bits 16 to 28 */
82075 +    } s;
82076 +} E3_EventInt;
82077 +
82078 +#define GET_EVENT_CONTEXT(Ptr) ((Ptr->s.EventContext >> 16) & MAX_ROOT_CONTEXT_MASK)
82079 +
82080 +typedef volatile union _E3_ThreadQueue
82081 +{
82082 +   E3_uint64   ForceAlign;
82083 +   struct
82084 +   {
82085 +       E3_Addr  Thread;
82086 +#if defined(__LITTLE_ENDIAN__)
82087 +       E3_uint32 :16;          /* Bits 0  to 15 */
82088 +       E3_uint32 Context:13;   /* Bits 16 to 28 */
82089 +       E3_uint32 :3;           /* Bits 29 to 31 */
82090 +#else
82091 +       E3_uint32 :3;           /* Bits 29 to 31 */
82092 +       E3_uint32 Context:13;   /* Bits 16 to 28 */
82093 +       E3_uint32 :16;          /* Bits 0  to 15 */
82094 +#endif
82095 +   } s;
82096 +} E3_ThreadQueue;
82097 +
82098 +typedef volatile union _E3_FaultStatusReg
82099 +{
82100 +   E3_uint32 Status;
82101 +   struct
82102 +   {
82103 +#if defined(__LITTLE_ENDIAN__)
82104 +      E3_uint32 AccTypePerm:3; /* Access permission. See below. Bits 0 to 2 */
82105 +      E3_uint32 AccSize:4;     /* Access size. See below for different types. Bits 3 to 6 */
82106 +      E3_uint32 WrAcc:1;       /* Access was a write. Bit 7 */
82107 +      E3_uint32 NonAllocAcc:1; /* Access was a cache non allocate type. Bit 8 */
82108 +      E3_uint32 BlkDataType:2; /* Data size used for endian flips. Bits 9 to 10 */
82109 +      E3_uint32 RdLine:1;      /* Access was a dma read line. Bit 11 */
82110 +      E3_uint32 RdMult:1;      /* Access was a dma read multiple. Bit 12 */
82111 +      E3_uint32 Walking:1;     /* The fault occued when walking. Bit 13 */
82112 +      E3_uint32 Level:2;       /* Page table level when the fault occued. Bits 14 to 15 */
82113 +      E3_uint32 ProtFault:1;   /* A protection fault occured. Bit 16 */
82114 +      E3_uint32 FaultPte:2;    /* Page table type when the fault occured. Bit 17 */
82115 +      E3_uint32 AlignmentErr:1;        /* Address alignment did not match the access size. Bit 19 */
82116 +      E3_uint32 VProcSizeErr:1;        /* VProc number is out of range. Bit 20 */
82117 +      E3_uint32 WalkBadData:1; /* Memory CRC error during a walk. Bit 21 */
82118 +      E3_uint32 :10;           /* Bits 22 to 31 */
82119 +#else
82120 +      E3_uint32 :10;           /* Bits 22 to 31 */
82121 +      E3_uint32 WalkBadData:1; /* Memory CRC error during a walk. Bit 21 */
82122 +      E3_uint32 VProcSizeErr:1;        /* VProc number is out of range. Bit 20 */
82123 +      E3_uint32 AlignmentErr:1;        /* Address alignment did not match the access size. Bit 19 */
82124 +      E3_uint32 FaultPte:2;    /* Page table type when the fault occured. Bit 17 */
82125 +      E3_uint32 ProtFault:1;   /* A protection fault occured. Bit 16 */
82126 +      E3_uint32 Level:2;       /* Page table level when the fault occued. Bits 14 to 15 */
82127 +      E3_uint32 Walking:1;     /* The fault occued when walking. Bit 13 */
82128 +      E3_uint32 RdMult:1;      /* Access was a dma read multiple. Bit 12 */
82129 +      E3_uint32 RdLine:1;      /* Access was a dma read line. Bit 11 */
82130 +      E3_uint32 BlkDataType:2; /* Data size used for endian flips. Bits 9 to 10 */
82131 +      E3_uint32 NonAllocAcc:1; /* Access was a cache non allocate type. Bit 8 */
82132 +      E3_uint32 WrAcc:1;       /* Access was a write. Bit 7 */
82133 +      E3_uint32 AccSize:4;     /* Access size. See below for different types. Bits 3 to 6 */
82134 +      E3_uint32 AccTypePerm:3; /* Access permission. See below. Bits 0 to 2 */
82135 +#endif
82136 +   } s;
82137 +} E3_FaultStatusReg;
82138 +
82139 +typedef union _E3_FaultSave
82140 +{
82141 +   E3_uint64            ForceAlign;
82142 +   struct {
82143 +      E3_FaultStatusReg         FSR;
82144 +      volatile E3_uint32 FaultContext;
82145 +      volatile E3_uint32 FaultAddress;
82146 +      volatile E3_uint32 EventAddress;
82147 +   } s;
82148 +} E3_FaultSave;
82149 +
82150 +/* MMU fault status reg bit positions. */
82151 +#define FSR_WritePermBit       0       /* 1=Write access perm, 0=Read access perm */
82152 +#define FSR_RemotePermBit      1       /* 1=Remote access perm, 0=local access perm */
82153 +#define FSR_EventPermBit       2       /* 1=Event access perm, 0=data access perm */
82154 +#define FSR_Size0Bit           3
82155 +#define FSR_Size1Bit           4
82156 +#define FSR_Size2Bit           5
82157 +#define FSR_Size3Bit           6
82158 +#define FSR_WriteAccBit                7       /* 1=Write access, 0=Read access. */
82159 +#define FSR_NonAllocBit                8       /* 1=Do not fill cache with this data */
82160 +#define FSR_BlkDataTy0Bit      9
82161 +#define FSR_BlkDataTy1Bit      10
82162 +#define FSR_ReadLineBit                11
82163 +#define FSR_ReadMultipleBit    12
82164 +
82165 +#define FSR_PermMask           (0xf << FSR_WritePermBit)
82166 +#define FSR_SizeMask           (0xf << FSR_Size0Bit)
82167 +#define FSR_AccTypeMask                (3 << FSR_WriteAccBit)
82168 +#define FSR_BlkDataTyMask      (3 << FSR_BlkDataTy0Bit)
82169 +#define FSR_PciAccTyMask       (3 << FSR_ReadLineBit)
82170 +#define FSR_Walking            (0x1 << 13)
82171 +#define FSR_Level_Mask         (0x3 << 14)
82172 +#define FSR_ProtFault          (0x1 << 16)
82173 +#define FSR_FaultPTEType       (0x2 << 17)
82174 +#define FSR_AddrSizeError      (0x1 << 19)
82175 +#define FSR_VProcSizeError     (0x1 << 20)
82176 +#define FSR_WalkBadData                (0x1 << 21)
82177 +
82178 +#define FSR_PermRead           0
82179 +#define FSR_PermWrite          1
82180 +#define FSR_PermRemoteRead     2
82181 +#define FSR_PermRemoteWrite    3
82182 +#define FSR_PermEventRd                4
82183 +#define FSR_PermEventWr                5
82184 +#define FSR_PermRemoteEventRd  6
82185 +#define FSR_PermRemoteEventWr  7
82186 +
82187 +/* AT size values for each access type */
82188 +#define FSR_Word               (0x0 << FSR_Size0Bit)
82189 +#define FSR_DWord              (0x1 << FSR_Size0Bit)
82190 +#define FSR_QWord              (0x2 << FSR_Size0Bit)
82191 +#define FSR_Block32            (0x3 << FSR_Size0Bit)
82192 +#define FSR_ReservedBlock      (0x6 << FSR_Size0Bit)
82193 +#define FSR_Block64            (0x7 << FSR_Size0Bit)
82194 +#define FSR_GetCntxFilter      (0x8 << FSR_Size0Bit)
82195 +#define FSR_QueueDWord         (0x9 << FSR_Size0Bit)
82196 +#define FSR_RouteFetch         (0xa << FSR_Size0Bit)
82197 +#define FSR_QueueBlock         (0xb << FSR_Size0Bit)
82198 +#define FSR_Block32PartWrite   (0xe << FSR_Size0Bit)
82199 +#define FSR_Block64PartWrite   (0xf << FSR_Size0Bit)
82200 +
82201 +#define FSR_AllocRead          (0 << FSR_WriteAccBit)
82202 +#define FSR_AllocWrite         (1 << FSR_WriteAccBit)
82203 +#define FSR_NonAllocRd         (2 << FSR_WriteAccBit)
82204 +#define FSR_NonAllocWr         (3 << FSR_WriteAccBit)
82205 +
82206 +#define FSR_TypeByte           (0 << FSR_BlkDataTy0Bit)
82207 +#define FSR_TypeHWord          (1 << FSR_BlkDataTy0Bit)
82208 +#define FSR_TypeWord           (2 << FSR_BlkDataTy0Bit)
82209 +#define FSR_TypeDWord          (3 << FSR_BlkDataTy0Bit)
82210 +
82211 +typedef union E3_TrTypeCntx
82212 +{
82213 +   E3_uint32 TypeContext;
82214 +   struct
82215 +   {
82216 +#if defined(__LITTLE_ENDIAN__)
82217 +      E3_uint32 Type:16;               /* Transaction type field */
82218 +      E3_uint32 Context:13;            /* Transaction context */
82219 +      E3_uint32 TypeCntxInvalid:1;     /* Bit  29 */
82220 +      E3_uint32 StatusRegValid:1;      /* Bit  30 */
82221 +      E3_uint32 LastTrappedTrans:1;    /* Bit  31 */
82222 +#else
82223 +      E3_uint32 LastTrappedTrans:1;    /* Bit  31 */
82224 +      E3_uint32 StatusRegValid:1;      /* Bit  30 */
82225 +      E3_uint32 TypeCntxInvalid:1;     /* Bit  29 */
82226 +      E3_uint32 Context:13;            /* Transaction context */
82227 +      E3_uint32 Type:16;               /* Transaction type field */
82228 +#endif
82229 +   } s;
82230 +} E3_TrTypeCntx;
82231 +
82232 +#define GET_TRAP_TYPE(Ptr)    (Ptr.TypeContext & 0xfff)
82233 +#define GET_TRAP_CONTEXT(Ptr) ((Ptr.TypeContext >> 16) & 0x1fff)
82234 +
82235 +/* Words have been swapped for big endian access when fetched with dword access from elan.*/
82236 +typedef union _E3_IprocTrapHeader
82237 +{
82238 +   E3_uint64   forceAlign;
82239 +
82240 +   struct
82241 +   {
82242 +      E3_TrTypeCntx     TrTypeCntx;
82243 +      E3_uint32                 TrAddr;
82244 +      E3_uint32                 TrData0;
82245 +      union
82246 +      {
82247 +        E3_IProcStatus_Reg u_IProcStatus;
82248 +        E3_uint32          u_TrData1;
82249 +      } ipsotd;
82250 +   } s;
82251 +} E3_IprocTrapHeader;
82252 +
82253 +#define IProcTrapStatus ipsotd.u_IProcStatus
82254 +#define TrData1                ipsotd.u_TrData1
82255 +
82256 +typedef struct E3_IprocTrapData
82257 +{
82258 +   E3_uint32 TrData[TRANS_DATA_WORDS];
82259 +} E3_IprocTrapData;
82260 +
82261 +/*
82262 + * 64 kbytes of elan local memory. Must be aligned on a 64k boundary
82263 + */
82264 +#define E3_NonSysCntxQueueSize 0x400
82265 +#define E3_SysCntxQueueSize    0x100
82266 +
82267 +typedef struct _E3_TrapAndQueue
82268 +{
82269 +   E3_DMA              NonSysCntxDmaQueue[E3_NonSysCntxQueueSize];                     /* 0x000000 */
82270 +   E3_DMA              SysCntxDmaQueue[E3_SysCntxQueueSize];                           /* 0x008000 */
82271 +   E3_EventInt         EventIntQueue[E3_NonSysCntxQueueSize];                          /* 0x00A000 */
82272 +   E3_ThreadQueue      NonSysCntxThreadQueue[E3_NonSysCntxQueueSize];                  /* 0x00C000 */  
82273 +   E3_ThreadQueue      SysCntxThreadQueue[E3_SysCntxQueueSize];                        /* 0x00E000 */
82274 +   E3_FaultSave                IProcSysCntx;                                                   /* 0x00E800 */
82275 +   E3_Addr             Thread_SP_Save;                                                 /* 0x00E810 */
82276 +   E3_uint32           dummy0[3];                                                      /* 0x00E814 */
82277 +   E3_FaultSave                ThreadProcData;                                                 /* 0x00E820 */
82278 +   E3_FaultSave                ThreadProcInst;                                                 /* 0x00E830 */
82279 +   E3_FaultSave                dummy1[2];                                                      /* 0x00E840 */  
82280 +   E3_FaultSave                ThreadProcOpen;                                                 /* 0x00E860 */
82281 +   E3_FaultSave                dummy2;                                                         /* 0x00E870 */
82282 +   E3_FaultSave                IProcNonSysCntx;                                                /* 0x00E880 */
82283 +   E3_FaultSave                DProc;                                                          /* 0x00E890 */
82284 +   E3_FaultSave                CProc;                                                          /* 0x00E8A0 */
82285 +   E3_FaultSave                TProc;                                                          /* 0x00E8B0 */
82286 +   E3_FaultSave                DProcData0;                                                     /* 0x00E8C0 */
82287 +   E3_FaultSave                DProcData1;                                                     /* 0x00E8D0 */
82288 +   E3_FaultSave                DProcData2;                                                     /* 0x00E8E0 */
82289 +   E3_FaultSave                DProcData3;                                                     /* 0x00E8F0 */
82290 +   E3_uint32           dummy3[0xc0];                                                   /* 0x00E900 */
82291 +   E3_IprocTrapHeader  VCh0_C0_TrHead[MAX_TRAPPED_TRANS];
82292 +   E3_IprocTrapHeader  VCh0_NonC0_TrHead[MAX_TRAPPED_TRANS];
82293 +   E3_IprocTrapHeader  VCh1_C0_TrHead[MAX_TRAPPED_TRANS];
82294 +   E3_IprocTrapHeader  VCh1_NonC0_TrHead[MAX_TRAPPED_TRANS];
82295 +   E3_IprocTrapData    VCh0_C0_TrData[MAX_TRAPPED_TRANS];
82296 +   E3_IprocTrapData    VCh0_NonC0_TrData[MAX_TRAPPED_TRANS];
82297 +   E3_IprocTrapData    VCh1_C0_TrData[MAX_TRAPPED_TRANS];
82298 +   E3_IprocTrapData    VCh1_NonC0_TrData[MAX_TRAPPED_TRANS];
82299 +   E3_uint64           DmaOverflowQueueSpace[0x1000];
82300 +   E3_uint64           ThreadOverflowQueueSpace[0x800];
82301 +   E3_uint64           EventOverflowQueueSpace[0x800];
82302 +} E3_TrapAndQueue;
82303 +
82304 +
82305 +typedef struct _E3_ContextControlBlock 
82306 +{
82307 +   E3_uint32   rootPTP;
82308 +   E3_uint32   filter;
82309 +   E3_uint32   VPT_ptr;
82310 +   E3_uint32   VPT_mask;
82311 +} E3_ContextControlBlock;
82312 +
82313 +#define E3_CCB_CNTX0           (0x20000000)
82314 +#define E3_CCB_DISCARD_ALL     (0x40000000)
82315 +#define E3_CCB_ACKOK_ALL       (0x80000000)
82316 +#define E3_CCB_MASK            (0xc0000000)
82317 +
82318 +#define E3_NUM_CONTEXT_0       (0x20)
82319 +
82320 +/* Macros to manipulate event queue pointers */
82321 +/*     generate index in EventIntQueue */
82322 +#define E3_EVENT_INTQ_INDEX(fptr)      (((fptr) & 0x1fff) >> 3)
82323 +/*     generate next fptr */
82324 +#define E3_EVENT_INTQ_NEXT(fptr)       ((((fptr) + 8) & ~0x4000) | 0x2000)
82325 +
82326 +
82327 +#endif /* notdef _ELAN3_ELANREGS_H */
82328 +
82329 +/*
82330 + * Local variables:
82331 + * c-file-style: "stroustrup"
82332 + * End:
82333 + */
82334 Index: linux-2.4.21/include/elan3/elansyscall.h
82335 ===================================================================
82336 --- linux-2.4.21.orig/include/elan3/elansyscall.h       2004-02-23 16:02:56.000000000 -0500
82337 +++ linux-2.4.21/include/elan3/elansyscall.h    2005-06-01 23:12:54.724420104 -0400
82338 @@ -0,0 +1,124 @@
82339 +/*
82340 + *    Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd.
82341 + *
82342 + *    For licensing information please see the supplied COPYING file
82343 + *
82344 + */
82345 +
82346 +#ifndef __ELAN3_ELANSYSCALL_H
82347 +#define __ELAN3_ELANSYSCALL_H
82348 +
82349 +#ident "$Id: elansyscall.h,v 1.34 2004/06/07 13:50:06 mike Exp $"
82350 +/*      $Source: /cvs/master/quadrics/elan3mod/elan3/elan3/elansyscall.h,v $*/
82351 +
82352 +#ifdef __cplusplus
82353 +extern "C" {
82354 +#endif
82355 +
82356 +#ifndef _ASM
82357 +
82358 +typedef struct sys_word_item
82359 +{
82360 +    struct sys_word_item *Next;
82361 +    E3_uint32            Value;
82362 +} SYS_WORD_ITEM;
82363 +
82364 +typedef struct sys_block_item
82365 +{
82366 +    struct sys_block_item *Next;
82367 +    E3_uint32            *Pointer;
82368 +} SYS_BLOCK_ITEM;
82369 +
82370 +typedef struct sys_swap_space
82371 +{
82372 +    int                 Magic;
82373 +    void       *ItemListsHead[MAX_LISTS];
82374 +    void       **ItemListsTailp[MAX_LISTS];
82375 +} SYS_SWAP_SPACE;
82376 +
82377 +typedef struct sys_exception
82378 +{
82379 +    int                        Type;
82380 +    int                        Proc;
82381 +    u_long             Res;
82382 +    u_long             Value;
82383 +    E3_FaultSave_BE    FaultArea;
82384 +    
82385 +    union
82386 +    {
82387 +       DMA_TRAP        Dma;
82388 +       THREAD_TRAP     Thread;
82389 +       COMMAND_TRAP    Command;
82390 +       INPUT_TRAP      Input;
82391 +    }                  Union;
82392 +} SYS_EXCEPTION;
82393 +
82394 +typedef struct sys_exception_space
82395 +{
82396 +    struct sys_exception_space *Next;
82397 +    int                                Magic;
82398 +    int                                Front;
82399 +    int                                Back;
82400 +    int                                Count;
82401 +    int                                Overflow;
82402 +    SYS_EXCEPTION              Exceptions[1];
82403 +} SYS_EXCEPTION_SPACE;
82404 +
82405 +#ifdef __KERNEL__
82406 +
82407 +typedef struct sys_ctxt
82408 +{
82409 +    SYS_SWAP_SPACE      *Swap;
82410 +    SYS_EXCEPTION_SPACE *Exceptions;
82411 +    kmutex_t            Lock;
82412 +
82413 +    spinlock_t          WaitLock;
82414 +    kcondvar_t          NetworkErrorWait;
82415 +
82416 +    int                         Armed;
82417 +    int                         Backoff;
82418 +    long                Time;
82419 +
82420 +    u_long              Flags;
82421 +    int                  signal;
82422 +
82423 +    EVENT_COOKIE_TABLE  *Table;
82424 +} SYS_CTXT;
82425 +
82426 +extern SYS_CTXT *sys_init (ELAN3_CTXT *ctxt);
82427 +extern int       sys_waitevent (ELAN3_CTXT *ctxt, E3_Event *event);
82428 +extern void      sys_addException (SYS_CTXT *sctx, int type, int proc, caddr_t ptr, int size, 
82429 +                                  E3_FaultSave_BE *, u_long res, u_long value);
82430 +extern int       sys_getException (SYS_CTXT *sctx, SYS_EXCEPTION *ex);
82431 +
82432 +/* returns -ve error or ELAN_CAP_OK or ELAN_CAP_RMS */
82433 +/* use = ELAN_USER_ATTACH, ELAN_USER_P2P, ELAN_USER_BROADCAST */
82434 +extern int  elan3_validate_cap (ELAN3_DEV *dev, ELAN_CAPABILITY *cap ,int use);
82435 +
82436 +#endif /* __KERNEL__ */
82437 +
82438 +#endif /* _ASM */
82439 +
82440 +/* values for "Flags" */
82441 +#define ELAN3_SYS_FLAG_DMA_BADVP               1
82442 +#define ELAN3_SYS_FLAG_THREAD_BADVP    2
82443 +#define ELAN3_SYS_FLAG_DMAFAIL         4
82444 +#define ELAN3_SYS_FLAG_NETERR          8
82445 +
82446 +#define SYS_SWAP_MAGIC         0xB23C52DF
82447 +#define SYS_EXCEPTION_MAGIC    0xC34D63E0
82448 +
82449 +#define EXCEPTION_GLOBAL_STRING        "elan3_exceptions"
82450 +#define EXCEPTION_ABORT_STRING  "elan3_abortstring"
82451 +
82452 +#ifdef __cplusplus
82453 +}
82454 +#endif
82455 +
82456 +#endif /* __ELAN3_ELANSYSCALL_H */
82457 +
82458 +/*
82459 + * Local variables:
82460 + * c-file-style: "stroustrup"
82461 + * End:
82462 + */
82463 Index: linux-2.4.21/include/elan3/elanuregs.h
82464 ===================================================================
82465 --- linux-2.4.21.orig/include/elan3/elanuregs.h 2004-02-23 16:02:56.000000000 -0500
82466 +++ linux-2.4.21/include/elan3/elanuregs.h      2005-06-01 23:12:54.725419952 -0400
82467 @@ -0,0 +1,295 @@
82468 +/*
82469 + *    Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd.
82470 + *
82471 + *    For licensing information please see the supplied COPYING file
82472 + *
82473 + */
82474 +
82475 +#ifndef __ELAN3_ELANUREGS_H
82476 +#define __ELAN3_ELANUREGS_H
82477 +
82478 +#ident "$Id: elanuregs.h,v 1.10 2003/09/24 13:57:24 david Exp $"
82479 +/*      $Source: /cvs/master/quadrics/elan3mod/elan3/elan3/elanuregs.h,v $*/
82480 +
82481 +#ifdef __cplusplus
82482 +extern "C" {
82483 +#endif
82484 +
82485 +/*
82486 + * Statistic control reg values
82487 + * Each 4-bit nibble of the control word specifies what statistic
82488 + * is to be recorded in each of the 8 statistic counters
82489 + */
82490 +
82491 +/* Count reg 0 */
82492 +#define STC_INPUT_TRANSACTIONS         0
82493 +#define STP_DMA_EOP_WAIT_ACK           1
82494 +#define STP_THREAD_RUNNING             2
82495 +#define STP_UCODE_WAIT_MEM             3
82496 +#define STC_CACHE_WRITE_BACKS          4
82497 +#define STC_PCI_SLAVE_READS            5
82498 +#define STC_REG0_UNUSED6               6
82499 +#define STP_REG0_UNUSED7               7
82500 +
82501 +#define STATS_REG0_NAMES {             \
82502 +         "STC_INPUT_TRANSACTIONS",     \
82503 +         "STP_DMA_EOP_WAIT_ACK",       \
82504 +         "STP_THREAD_RUNNING",         \
82505 +         "STP_UCODE_WAIT_MEM",         \
82506 +         "STC_CACHE_WRITE_BACKS",      \
82507 +         "STC_PCI_SLAVE_READS",        \
82508 +         "STC_REG0_UNUSED6",           \
82509 +         "STP_REG0_UNUSED7"            \
82510 +}
82511 +
82512 +/* Count reg 1 */
82513 +#define STC_INPUT_WRITE_BLOCKS         (0 << 4)
82514 +#define STP_DMA_DATA_TRANSMITTING      (1 << 4)
82515 +#define STP_THEAD_WAITING_INST         (2 << 4)
82516 +#define STC_REG1_UNUSED3               (3 << 4)
82517 +#define STP_FETCHING_ROUTES            (4 << 4)
82518 +#define STC_REG1_UNUSED5               (5 << 4)
82519 +#define STC_PCI_SLAVE_WRITES           (6 << 4)
82520 +#define STP_PCI_SLAVE_READ_WAITING     (7 << 4)
82521 +
82522 +#define STATS_REG1_NAMES {             \
82523 +      "STC_INPUT_WRITE_BLOCKS",                \
82524 +         "STP_DMA_DATA_TRANSMITTING",  \
82525 +         "STP_THEAD_WAITING_INST",     \
82526 +         "STC_REG1_UNUSED3",           \
82527 +         "STP_FETCHING_ROUTES",        \
82528 +         "STC_REG1_UNUSED5",           \
82529 +         "STC_PCI_SLAVE_WRITES",       \
82530 +         "STP_PCI_SLAVE_READ_WAITING"  \
82531 +}
82532 +
82533 +/* Count reg 2 */
82534 +#define STC_INPUT_PKTS                 (0 << 8)
82535 +#define STP_DMA_WAITING_MEM            (1 << 8)
82536 +#define STP_THREAD_WAIT_OPEN_PKT       (2 << 8)
82537 +#define STC_REG2_UNUSED3               (3 << 8)
82538 +#define STC_ROUTE_FETCHES              (4 << 8)
82539 +#define STC_CACHE_NON_ALLOC_MISSES     (5 << 8)
82540 +#define STC_REG2_UNUSED6               (6 << 8)
82541 +#define STP_PCI_SLAVE_WRITE_WAITING    (7 << 8)
82542 +
82543 +#define STATS_REG2_NAMES {             \
82544 +      "STC_INPUT_PKTS",                        \
82545 +         "STP_DMA_WAITING_MEM",        \
82546 +         "STP_THREAD_WAIT_OPEN_PKT",   \
82547 +         "STC_REG2_UNUSED3",           \
82548 +         "STC_ROUTE_FETCHES",          \
82549 +         "STC_CACHE_NON_ALLOC_MISSES", \
82550 +         "STC_REG2_UNUSED6",           \
82551 +         "STP_PCI_SLAVE_WRITE_WAITING" \
82552 +}
82553 +
82554 +/* Count reg 3 */
82555 +#define STC_INPUT_PKTS_REJECTED                (0 << 12)
82556 +#define STP_DMA_WAIT_NETWORK_BUSY      (1 << 12)
82557 +#define STP_THREAD_WAIT_PACK           (2 << 12)
82558 +#define STP_UCODE_BLOCKED_UCODE                (3 << 12)
82559 +#define STC_TLB_HITS                   (4 << 12)
82560 +#define STC_REG3_UNUSED5               (5 << 12)
82561 +#define STC_PCI_MASTER_READS           (6 << 12)
82562 +#define STP_PCI_MASTER_WRITE_WAITING   (7 << 12)
82563 +
82564 +#define STATS_REG3_NAMES {             \
82565 +      "STC_INPUT_PKTS_REJECTED",       \
82566 +         "STP_DMA_WAIT_NETWORK_BUSY",  \
82567 +         "STP_THREAD_WAIT_PACK",       \
82568 +         "STP_UCODE_BLOCKED_UCODE",    \
82569 +         "STC_TLB_HITS",               \
82570 +         "STC_REG3_UNUSED5",           \
82571 +         "STC_PCI_MASTER_READS",       \
82572 +         "STP_PCI_MASTER_WRITE_WAITING"\
82573 +}
82574 +
82575 +/* Count reg 4 */
82576 +#define STP_INPUT_DATA_TRANSMITTING    (0 << 16)
82577 +#define STC_DMA_NON_CTX0_PKTS          (1 << 16)
82578 +#define STP_THREAD_EOP_WAIT_ACK                (2 << 16)
82579 +#define STP_UCODE_DPROC_RUNNING                (3 << 16)
82580 +#define STC_TLB_MEM_WALKS              (4 << 16)
82581 +#define STC_REG4_UNUSED5               (5 << 16)
82582 +#define STC_PCI_MASTER_WRITES          (6 << 16)
82583 +#define STP_PCI_MASTER_READ_WAITING    (7 << 16)
82584 +
82585 +#define STATS_REG4_NAMES {             \
82586 +      "STP_INPUT_DATA_TRANSMITTING",   \
82587 +         "STC_DMA_NON_CTX0_PKTS",      \
82588 +         "STP_THREAD_EOP_WAIT_ACK",    \
82589 +         "STP_UCODE_DPROC_RUNNING",    \
82590 +         "STC_TLB_MEM_WALKS",          \
82591 +         "STC_REG4_UNUSED5",           \
82592 +         "STC_PCI_MASTER_WRITES",      \
82593 +         "STP_PCI_MASTER_READ_WAITING" \
82594 +}
82595 +
82596 +/* Count reg 5 */
82597 +#define STP_INPUT_WAITING_NETWORK_DATA (0 << 20)
82598 +#define STC_DMA_NON_CTX0_PKTS_REJECTED (1 << 20)
82599 +#define STP_THREAD_WAITING_DATA                (2 << 20)
82600 +#define STP_UCODE_CPROC_RUNNING                (3 << 20)
82601 +#define STP_THREAD_TRANSMITTING_DATA   (4 << 20)
82602 +#define STP_PCI_WAITING_MAIN           (5 << 20)
82603 +#define STC_REG5_UNUSED6               (6 << 20)
82604 +#define STC_REG5_UNUSED7               (7 << 20)
82605 +
82606 +#define STATS_REG5_NAMES {                     \
82607 +      "STP_INPUT_WAITING_NETWORK_DATA",                \
82608 +         "STC_DMA_NON_CTX0_PKTS_REJECTED",     \
82609 +         "STP_THREAD_WAITING_DATA",            \
82610 +         "STP_UCODE_CPROC_RUNNING",            \
82611 +         "STP_THREAD_TRANSMITTING_DATA",       \
82612 +         "STP_PCI_WAITING_MAIN",               \
82613 +         "STC_REG5_UNUSED6",                   \
82614 +         "STC_REG5_UNUSED7"                    \
82615 +}
82616 +
82617 +/* Count reg 6 */
82618 +#define STP_INPUT_WAITING_MEMORY       (0 << 24)
82619 +#define STC_DMA_CTX0_PKTS              (1 << 24)
82620 +#define STP_THREAD_WAITING_MEMORY      (2 << 24)
82621 +#define STP_UCODE_TPROC_RUNNING                (3 << 24)
82622 +#define STC_CACHE_HITS                 (4 << 24)
82623 +#define STP_PCI_WAITING_ELAN           (5 << 24)
82624 +#define STC_REG6_UNUSED4               (6 << 24)
82625 +#define STC_REG6_UNUSED7               (7 << 24)
82626 +
82627 +#define STATS_REG6_NAMES {             \
82628 +      "STP_INPUT_WAITING_MEMORY",      \
82629 +         "STC_DMA_CTX0_PKTS",          \
82630 +         "STP_THREAD_WAITING_MEMORY",  \
82631 +         "STP_UCODE_TPROC_RUNNING",    \
82632 +         "STC_CACHE_HITS",             \
82633 +         "STP_PCI_WAITING_ELAN",       \
82634 +         "STC_REG6_UNUSED4",           \
82635 +         "STC_REG6_UNUSED7"            \
82636 +}
82637 +
82638 +/* Count reg 7 */
82639 +#define STC_INPUT_CTX_FILTER_FILL      (0 << 28)       
82640 +#define STC_DMA_CTX0_PKTS_REJECTED     (1 << 28)
82641 +#define STP_THREAD_WAIT_NETWORK_BUSY   (2 << 28)
82642 +#define STP_UCODE_IPROC_RUNNING                (3 << 28)
82643 +#define STP_TLB_MEM_WALKING            (4 << 28)
82644 +#define STC_CACHE_ALLOC_MISSES         (5 << 28)
82645 +#define STP_PCI_DATA_TRANSFER          (6 << 28)
82646 +#define STC_REG7_UNUSED7               (7 << 28)
82647 +
82648 +#define STATS_REG7_NAMES {             \
82649 +      "STC_INPUT_CTX_FILTER_FILL",     \
82650 +         "STC_DMA_CTX0_PKTS_REJECTED", \
82651 +         "STP_THREAD_WAIT_NETWORK_BUSY",\
82652 +         "STP_UCODE_IPROC_RUNNING",    \
82653 +         "STP_TLB_MEM_WALKING",        \
82654 +         "STC_CACHE_ALLOC_MISSES",     \
82655 +         "STP_PCI_DATA_TRANSFER",      \
82656 +         "STC_REG7_UNUSED7"            \
82657 +}
82658 +
82659 +#define STATS_REG_NAMES { \
82660 +    STATS_REG0_NAMES, \
82661 +    STATS_REG1_NAMES, \
82662 +    STATS_REG2_NAMES, \
82663 +    STATS_REG3_NAMES, \
82664 +    STATS_REG4_NAMES, \
82665 +    STATS_REG5_NAMES, \
82666 +    STATS_REG6_NAMES, \
82667 +    STATS_REG7_NAMES, \
82668 +}
82669 +
82670 +extern const char *elan3_stats_names[8][8];
82671 +
82672 +#define ELAN3_STATS_NAME(COUNT, CONTROL) (elan3_stats_names[(COUNT)][(CONTROL) & 7])
82673 +
82674 +typedef volatile union e3_StatsControl
82675 +{
82676 +   E3_uint32 StatsControl;
82677 +   struct
82678 +   {
82679 +#if defined(__LITTLE_ENDIAN__)
82680 +      E3_uint32 StatCont0:4;
82681 +      E3_uint32 StatCont1:4;
82682 +      E3_uint32 StatCont2:4;
82683 +      E3_uint32 StatCont3:4;
82684 +      E3_uint32 StatCont4:4;
82685 +      E3_uint32 StatCont5:4;
82686 +      E3_uint32 StatCont6:4;
82687 +      E3_uint32 StatCont7:4;
82688 +#else
82689 +      E3_uint32 StatCont7:4;
82690 +      E3_uint32 StatCont6:4;
82691 +      E3_uint32 StatCont5:4;
82692 +      E3_uint32 StatCont4:4;
82693 +      E3_uint32 StatCont3:4;
82694 +      E3_uint32 StatCont2:4;
82695 +      E3_uint32 StatCont1:4;
82696 +      E3_uint32 StatCont0:4;
82697 +#endif
82698 +   } s;
82699 +} E3_StatsControl;
82700 +
82701 +typedef volatile union e3_StatsCount
82702 +{
82703 +   E3_uint64    ClockStat; 
82704 +   struct
82705 +   {
82706 +       E3_uint32 ClockLSW;     /* read only */
82707 +       E3_uint32 StatsCount;
82708 +   } s;
82709 +} E3_StatsCount;
82710 +
82711 +typedef volatile union e3_clock
82712 +{
82713 +   E3_uint64 NanoSecClock;
82714 +   struct
82715 +   {
82716 +      E3_uint32 ClockLSW;
82717 +      E3_uint32 ClockMSW;
82718 +   } s;
82719 +} E3_Clock;
82720 +#define E3_TIME( X ) ((X).NanoSecClock)
82721 +
82722 +typedef volatile struct _E3_User_Regs
82723 +{
82724 +   E3_StatsCount       StatCounts[8];
82725 +   E3_StatsCount       InstCount;
82726 +   E3_uint32           pad0;
82727 +   E3_StatsControl     StatCont;
82728 +   E3_Clock            Clock;
82729 +   E3_uint32           pad1[0x7ea];
82730 +} E3_User_Regs;
82731 +
82732 +typedef volatile struct _E3_CommandPort 
82733 +{
82734 +   E3_Addr             PutDma;         /* 0x000 */
82735 +   E3_uint32           Pad1;
82736 +   E3_Addr             GetDma;         /* 0x008 */
82737 +   E3_uint32           Pad2;
82738 +   E3_Addr             RunThread;      /* 0x010 */
82739 +   E3_uint32           Pad3[3];
82740 +   E3_Addr             WaitEvent0;     /* 0x020 */
82741 +   E3_uint32           Pad4;
82742 +   E3_Addr             WaitEvent1;     /* 0x028 */
82743 +   E3_uint32           Pad5;
82744 +   E3_Addr             SetEvent;       /* 0x030 */
82745 +   E3_uint32           Pad6[3];
82746 +   E3_uint32           Pad7[0x7f0];    /* Fill out to an 8K page */
82747 +} E3_CommandPort;
82748 +/* Should have the new structures for the top four pages of the elan3 space */
82749 +
82750 +#define E3_COMMANDPORT_SIZE    (sizeof (E3_CommandPort))
82751 +
82752 +#ifdef __cplusplus
82753 +}
82754 +#endif
82755 +
82756 +#endif /* __ELAN3_ELANUREGS_H */
82757 +
82758 +/*
82759 + * Local variables:
82760 + * c-file-style: "stroustrup"
82761 + * End:
82762 + */
82763 Index: linux-2.4.21/include/elan3/elanvp.h
82764 ===================================================================
82765 --- linux-2.4.21.orig/include/elan3/elanvp.h    2004-02-23 16:02:56.000000000 -0500
82766 +++ linux-2.4.21/include/elan3/elanvp.h 2005-06-01 23:12:54.726419800 -0400
82767 @@ -0,0 +1,165 @@
82768 +/*
82769 + *    Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd.
82770 + *
82771 + *    For licensing information please see the supplied COPYING file
82772 + *
82773 + */
82774 +
82775 +#ifndef _ELAN3_ELANVP_H
82776 +#define _ELAN3_ELANVP_H
82777 +
82778 +#ident "$Id: elanvp.h,v 1.45 2004/06/18 09:28:06 mike Exp $"
82779 +/*      $Source: /cvs/master/quadrics/elan3mod/elan3/elan3/elanvp.h,v $ */
82780 +
82781 +#include <elan3/e3types.h>
82782 +#include <elan/bitmap.h>
82783 +#include <elan/capability.h>
82784 +
82785 +#ifdef __cplusplus
82786 +extern "C" {
82787 +#endif
82788 +
82789 +/*
82790 + * Context number allocation.
82791 + * [0-31]      system contexts
82792 + * [32-63]     hardware test
82793 + * [64-1023]   available
82794 + * [1024-2047] RMS allocatable
82795 + * [2048-4095] kernel comms data contexts
82796 + */
82797 +#define ELAN3_KCOMM_CONTEXT_NUM                0x001                   /* old kernel comms context (system) */
82798 +#define ELAN3_CM_CONTEXT_NUM           0x002                   /* new cluster member ship comms context (system) */
82799 +#define ELAN3_MRF_CONTEXT_NUM          0x003                   /* multi-rail kernel comms context */
82800 +#define ELAN3_DMARING_BASE_CONTEXT_NUM 0x010                   /* 16 contexts for dma ring issue (system) */
82801 +#define ELAN3_DMARING_TOP_CONTEXT_NUM  0x01f
82802 +
82803 +#define ELAN3_HWTEST_BASE_CONTEXT_NUM  0x020                   /* reserved for hardware test */
82804 +#define ELAN3_HWTEST_TOP_CONTEXT_NUM   0x03f
82805 +
82806 +#define ELAN3_KCOMM_BASE_CONTEXT_NUM   0x800                   /* kernel comms data transfer contexts */
82807 +#define ELAN3_KCOMM_TOP_CONTEXT_NUM    0xfff
82808 +
82809 +#define ELAN3_HWTEST_CONTEXT(ctx)      ((ctx) >= ELAN3_HWTEST_BASE_CONTEXT_NUM && \
82810 +                                        (ctx) <= ELAN3_HWTEST_TOP_CONTEXT_NUM)    
82811 +
82812 +#define ELAN3_SYSTEM_CONTEXT(ctx)      (((ctx) & SYS_CONTEXT_BIT) != 0 || \
82813 +                                        (ctx) < E3_NUM_CONTEXT_0 || \
82814 +                                        (ctx) >= ELAN3_KCOMM_BASE_CONTEXT_NUM)
82815 +
82816 +/* Maximum number of virtual processes */
82817 +#define ELAN3_MAX_VPS          (16384)
82818 +
82819 +#define ELAN3_INVALID_PROCESS  (0x7fffffff)            /* A GUARANTEED invalid process # */
82820 +#define ELAN3_INVALID_NODE     (0xFFFF)
82821 +#define ELAN3_INVALID_CONTEXT  (0xFFFF)
82822 +
82823 +
82824 +
82825 +#if defined(__KERNEL__) && !defined(__ELAN3__)
82826 +
82827 +/*
82828 + * Contexts are accessible via Elan capabilities,
82829 + * for each context that can be "attached" to there
82830 + * is a ELAN3_CTXT_INFO structure created by its
82831 + * "owner".  This also "remembers" all remote 
82832 + * segments that have "blazed" a trail to it.
82833 + *
82834 + * If the "owner" goes away the soft info is 
82835 + * destroyed when it is no longer "attached" or 
82836 + * "referenced" by a remote segment.
82837 + *
82838 + * If the owner changes the capability, then 
82839 + * the soft info must be not "referenced" or 
82840 + * "attached" before a new process can "attach"
82841 + * to it.
82842 + */
82843 +
82844 +_NOTE(MUTEX_PROTECTS_DATA(elan3_dev::InfoLock,
82845 +                         elan3_info::Next elan3_info::Prev elan3_info::Device elan3_info::Owner
82846 +                         elan3_info::Capability elan3_info::AttachedCapability elan3_info::Context))
82847 +_NOTE(MUTEX_PROTECTS_DATA(elan3_dev::IntrLock,
82848 +                         elan3_info::Nacking elan3_info::Disabled))
82849 +_NOTE(DATA_READABLE_WITHOUT_LOCK(elan3_info::Context elan3_info::Device elan3_info::Capability))
82850 +
82851 +#endif /* __KERNEL__ */
82852 +
82853 +#define LOW_ROUTE_PRIORITY     0
82854 +#define HIGH_ROUTE_PRIORITY    1
82855 +
82856 +#define DEFAULT_ROUTE_TIMEOUT  3
82857 +#define DEFAULT_ROUTE_PRIORITY LOW_ROUTE_PRIORITY
82858 +
82859 +
82860 +/* a small route is 4 flits (8 bytes), a big route  */
82861 +/* is 8 flits (16 bytes) - each packed route is 4 bits */
82862 +/* so giving us a maximum of 28 as flit0 does not contain */
82863 +/* packed routes */
82864 +#define MAX_FLITS              8
82865 +#define MAX_PACKED             28
82866 +
82867 +/* bit definitions for 64 bit route pointer */
82868 +#define ROUTE_VALID            (1ULL << 63)
82869 +#define ROUTE_PTR              (1ULL << 62)
82870 +#define ROUTE_CTXT_SHIFT       48
82871 +#define ROUTE_PTR_MASK         ((1ull << ROUTE_CTXT_SHIFT)-1)
82872 +#define ROUTE_GET_CTXT          ((VAL >> ROUTE_CTXT_SHIFT) & 0x3fff )
82873 +
82874 +#define SMALL_ROUTE(flits, context)    (((E3_uint64) (flits)[0] <<  0) | ((E3_uint64) (flits)[1] << 16) | \
82875 +                                        ((E3_uint64) (flits)[2] << 32) | ((E3_uint64) (context) << ROUTE_CTXT_SHIFT) | \
82876 +                                        ROUTE_VALID)
82877 +
82878 +#define BIG_ROUTE_PTR(paddr, context)  ((E3_uint64) (paddr) | ((E3_uint64) context << ROUTE_CTXT_SHIFT) | ROUTE_VALID | ROUTE_PTR)
82879 +
82880 +#define BIG_ROUTE0(flits)              (((E3_uint64) (flits)[0] <<  0) | ((E3_uint64) (flits)[1] << 16) | \
82881 +                                        ((E3_uint64) (flits)[2] << 32) | ((E3_uint64) (flits)[3] << 48))
82882 +#define BIG_ROUTE1(flits)              (((E3_uint64) (flits)[4] <<  0) | ((E3_uint64) (flits)[5] << 16) | \
82883 +                                        ((E3_uint64) (flits)[6] << 32) | ((E3_uint64) (flits)[7] << 48))
82884 +
82885 +
82886 +/* defines for first flit of a route */
82887 +#define FIRST_HIGH_PRI         (1 << 15)
82888 +#define FIRST_AGE(Val)         ((Val) << 11)
82889 +#define FIRST_TIMEOUT(Val)     ((Val) << 9)
82890 +#define FIRST_PACKED(X)                ((X) << 7)
82891 +#define FIRST_ROUTE(Val)       (Val)
82892 +#define FIRST_ADAPTIVE         (0x30)
82893 +#define FIRST_BCAST_TREE       (0x20)
82894 +#define FIRST_MYLINK           (0x10)
82895 +#define FIRST_BCAST(Top, Bot)  (0x40 | ((Top) << 3) | (Bot))
82896 +
82897 +/* defines for 3 bit packed entries for subsequent flits */
82898 +#define PACKED_ROUTE(Val)      (8 | (Val))
82899 +#define PACKED_ADAPTIVE                (3)
82900 +#define PACKED_BCAST_TREE      (2)
82901 +#define PACKED_MYLINK          (1)
82902 +#define PACKED_BCAST0(Top,Bot) (4 | (Bot & 3))
82903 +#define PACKED_BCAST1(Top,Bot) ((Top << 1) | (Bot >> 2))
82904 +
82905 +/* ---------------------------------------------------------- 
82906 + * elan3_route functions 
82907 + * return ELAN3_ROUTE_xxx codes
82908 + * ---------------------------------------------------------- */
82909 +
82910 +#define ELAN3_ROUTE_SUCCESS        (0x00)
82911 +#define ELAN3_ROUTE_SYSCALL_FAILED (0x01)
82912 +#define ELAN3_ROUTE_INVALID        (0x02)
82913 +#define ELAN3_ROUTE_TOO_LONG       (0x04)
82914 +#define ELAN3_ROUTE_LOAD_FAILED    (0x08)
82915 +#define ELAN3_ROUTE_PROC_RANGE     (0x0f)
82916 +#define ELAN3_ROUTE_INVALID_LEVEL  (0x10)
82917 +#define ELAN3_ROUTE_OCILATES       (0x20)
82918 +#define ELAN3_ROUTE_WRONG_DEST     (0x40)
82919 +#define ELAN3_ROUTE_TURN_LEVEL     (0x80)
82920 +#define ELAN3_ROUTE_NODEID_UNKNOWN (0xf0)
82921 +
82922 +#ifdef __cplusplus
82923 +}
82924 +#endif
82925 +
82926 +#endif /* _ELAN3_ELANVP_H */
82927 +
82928 +/*
82929 + * Local variables:
82930 + * c-file-style: "stroustrup"
82931 + * End:
82932 + */
82933 Index: linux-2.4.21/include/elan3/events.h
82934 ===================================================================
82935 --- linux-2.4.21.orig/include/elan3/events.h    2004-02-23 16:02:56.000000000 -0500
82936 +++ linux-2.4.21/include/elan3/events.h 2005-06-01 23:12:54.726419800 -0400
82937 @@ -0,0 +1,183 @@
82938 +/*
82939 + *    Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd.
82940 + *
82941 + *    For licensing information please see the supplied COPYING file
82942 + *
82943 + */
82944 +
82945 +#ifndef _ELAN3_EVENTS_H
82946 +#define _ELAN3_EVENTS_H
82947 +
82948 +#ident "$Id: events.h,v 1.45 2003/09/24 13:57:24 david Exp $"
82949 +/*      $Source: /cvs/master/quadrics/elan3mod/elan3/elan3/events.h,v $*/
82950 +
82951 +/*
82952 + * Alignments for events, event queues and blockcopy blocks.
82953 + */
82954 +#define E3_EVENT_ALIGN                 (8)
82955 +#define E3_QUEUE_ALIGN         (32)
82956 +#define E3_BLK_ALIGN           (64)
82957 +#define E3_BLK_SIZE            (64)
82958 +#define E3_BLK_PATTERN                 (0xfeedface)
82959 +
82960 +#define E3_EVENT_FREE          ((0 << 4) | EV_WCOPY)
82961 +#define E3_EVENT_PENDING       ((1 << 4) | EV_WCOPY)
82962 +#define E3_EVENT_ACTIVE                ((2 << 4) | EV_WCOPY)
82963 +#define E3_EVENT_FIRED         ((3 << 4) | EV_WCOPY)
82964 +#define E3_EVENT_FAILED                ((4 << 4) | EV_WCOPY)
82965 +#define E3_EVENT_DONE          ((5 << 4) | EV_WCOPY)
82966 +#define E3_EVENT_PRIVATE       ((6 << 4) | EV_WCOPY)
82967 +
82968 +/*
82969 + * Event values and masks
82970 + *
82971 + * Block Copy event    xxxxxxxxxxxxxxxx1
82972 + * Chained event       30 bit ptr ....0x
82973 + * Event interrupt     29 bit cookie 01x
82974 + * Dma event           28 bit ptr   011x
82975 + * thread event                28 bit ptr   111x
82976 + */
82977 +#define EV_CLEAR               (0x00000000)
82978 +#define EV_TYPE_BCOPY          (0x00000001)
82979 +#define EV_TYPE_CHAIN          (0x00000000)
82980 +#define EV_TYPE_EVIRQ          (0x00000002)
82981 +#define EV_TYPE_DMA            (0x00000006)
82982 +#define EV_TYPE_THREAD         (0x0000000e)
82983 +
82984 +#define EV_TYPE_BCOPY_BYTE     (0)
82985 +#define EV_TYPE_BCOPY_HWORD    (1)
82986 +#define EV_TYPE_BCOPY_WORD     (2)
82987 +#define EV_TYPE_BCOPY_DWORD    (3)
82988 +
82989 +/*
82990 + * Data type is in the lowest two bits of the Dest pointer.
82991 + */
82992 +#define EV_BCOPY_DTYPE_MASK    (3)
82993 +#define EV_WCOPY               (1)     /* [DestWord] = Source */
82994 +#define EV_BCOPY               (0)     /* [DestBlock] = [SourceBlock] */
82995 +
82996 +#define EV_TYPE_MASK           (0x0000000e)
82997 +#define EV_TYPE_MASK_BCOPY     (0x00000001)
82998 +#define EV_TYPE_MASK_CHAIN     (0x00000002)
82999 +#define EV_TYPE_MASK_EVIRQ     (0x00000006)
83000 +#define EV_TYPE_MASK_DMA       (0x0000000e)
83001 +#define EV_TYPE_MASK_THREAD    (0x0000000e)
83002 +#define EV_TYPE_MASK2          (0x0000000f)
83003 +
83004 +/*
83005 + * Min/Max size for Elan queue entries 
83006 + */
83007 +#define E3_QUEUE_MIN   E3_BLK_SIZE
83008 +#define E3_QUEUE_MAX   (E3_BLK_SIZE * 5)
83009 +
83010 +/*
83011 + * Elan queue state bits
83012 + */
83013 +#define E3_QUEUE_FULL  (1<<0)
83014 +#define E3_QUEUE_LOCKED        (1<<8)
83015 +
83016 +#ifndef _ASM
83017 +
83018 +typedef union _E3_Event
83019 +{
83020 +   E3_uint64   ev_Int64;
83021 +   struct {
83022 +      volatile E3_int32        u_Count;
83023 +      E3_uint32                u_Type;
83024 +   } ev_u;
83025 +} E3_Event;
83026 +
83027 +typedef union _E3_BlockCopyEvent
83028 +{
83029 +   E3_uint64 ev_ForceAlign;
83030 +   struct E3_BlockCopyEvent_u {
83031 +      volatile E3_int32        u_Count;
83032 +      E3_uint32                u_Type;
83033 +      E3_Addr          u_Source;
83034 +      E3_Addr          u_Dest;   /* lowest bits are the data type for endian conversion */
83035 +   } ev_u;
83036 +} E3_BlockCopyEvent;
83037 +
83038 +#define ev_Type   ev_u.u_Type
83039 +#define ev_Count  ev_u.u_Count
83040 +#define ev_Source ev_u.u_Source
83041 +#define ev_Dest   ev_u.u_Dest
83042 +
83043 +typedef union _E3_WaitEvent0
83044 +{
83045 +   E3_uint64            we_ForceAlign;
83046 +   struct {
83047 +       E3_Addr         u_EventLoc;
83048 +       E3_int32        u_WaitCount;
83049 +   } we_u;
83050 +} E3_WaitEvent0;
83051 +#define we_EventLoc we_u.u_EventLoc
83052 +#define we_WaitCount we_u.u_WaitCount
83053 +
83054 +typedef union _E3_Event_Blk
83055 +{
83056 +    E3_uint8  eb_Bytes[E3_BLK_SIZE];
83057 +    E3_uint32 eb_Int32[E3_BLK_SIZE/sizeof (E3_uint32)];
83058 +    E3_uint64 eb_Int64[E3_BLK_SIZE/sizeof (E3_uint64)];
83059 +} E3_Event_Blk;
83060 +
83061 +/* We make eb_done the last word of the blk
83062 + * so that we can guarantee the rest of the blk is
83063 + * correct when this value is set.
83064 + * However, when the TPORT code copies the envelope
83065 + * info into the blk, it uses a dword endian type.
83066 + * Thus we must correct for this when initialising
83067 + * the pattern in the Elan SDRAM blk (eeb_done)
83068 + */
83069 +#define eb_done eb_Int32[15]
83070 +#define eeb_done eb_Int32[15^WordEndianFlip]
83071 +
83072 +#define EVENT_WORD_READY(WORD) (*((volatile E3_uint32 *) WORD) != 0)
83073 +#define EVENT_BLK_READY(BLK) (((volatile E3_Event_Blk *) (BLK))->eb_done != 0)
83074 +#define EVENT_READY(EVENT)   (((volatile E3_Event *) (EVENT))->ev_Count <= 0)
83075 +
83076 +#define ELAN3_WAIT_EVENT (0)
83077 +#define ELAN3_POLL_EVENT (-1)
83078 +
83079 +#define SETUP_EVENT_TYPE(ptr,typeval) (((unsigned long)(ptr)) | (typeval))
83080 +
83081 +#define E3_RESET_BCOPY_BLOCK(BLK)                                                      \
83082 +       do {                                                                            \
83083 +               (BLK)->eb_done = 0;                                                     \
83084 +       } while (0)
83085 +
83086 +typedef struct e3_queue
83087 +{
83088 +   volatile E3_uint32  q_state;        /* queue is full=bit0, queue is locked=bit8 */
83089 +   volatile E3_Addr    q_bptr;         /* block aligned ptr to current back item */
83090 +   E3_uint32           q_size;         /* size of queue item; 0x1 <= size <= (0x40 * 5) */
83091 +   E3_Addr             q_top;          /* block aligned ptr to last queue item */
83092 +   E3_Addr             q_base;         /* block aligned ptr to first queue item */
83093 +   volatile E3_Addr    q_fptr;         /* block aligned ptr to current front item */
83094 +   E3_Event            q_event;        /* queue event */
83095 +} E3_Queue;
83096 +
83097 +typedef struct e3_blockcopy_queue
83098 +{
83099 +   volatile E3_uint32  q_state;        /* queue is full=bit0, queue is locked=bit8 */
83100 +   volatile E3_Addr    q_bptr;         /* block aligned ptr to current back item */
83101 +   E3_uint32           q_size;         /* size of queue item; 0x1 <= size <= (0x40 * 5) */
83102 +   E3_Addr             q_top;          /* block aligned ptr to last queue item */
83103 +   E3_Addr             q_base;         /* block aligned ptr to first queue item */
83104 +   volatile E3_Addr    q_fptr;         /* block aligned ptr to current front item */
83105 +   E3_BlockCopyEvent   q_event;        /* queue event */
83106 +   E3_uint32           q_pad[6];
83107 +} E3_BlockCopyQueue;
83108 +
83109 +#define E3_QUEUE_EVENT_OFFSET  24
83110 +#define QUEUE_FULL(Q)          ((Q)->q_state & E3_QUEUE_FULL)          
83111 +
83112 +#endif /* ! _ASM */
83113 +
83114 +#endif /* _ELAN3_EVENTS_H */
83115 +
83116 +/*
83117 + * Local variables:
83118 + * c-file-style: "stroustrup"
83119 + * End:
83120 + */
83121 Index: linux-2.4.21/include/elan3/intrinsics.h
83122 ===================================================================
83123 --- linux-2.4.21.orig/include/elan3/intrinsics.h        2004-02-23 16:02:56.000000000 -0500
83124 +++ linux-2.4.21/include/elan3/intrinsics.h     2005-06-01 23:12:54.727419648 -0400
83125 @@ -0,0 +1,320 @@
83126 +/*
83127 + *    Copyright (c) 2003 by Quadrics Limited.
83128 + * 
83129 + *    For licensing information please see the supplied COPYING file
83130 + *
83131 + */
83132 +
83133 +#ifndef _ELAN3_INTRINSICS_H
83134 +#define _ELAN3_INTRINSICS_H
83135 +
83136 +#ident "$Id: intrinsics.h,v 1.35 2003/09/24 13:57:24 david Exp $"
83137 +/*      $Source: /cvs/master/quadrics/elan3mod/elan3/elan3/intrinsics.h,v $ */
83138 +
83139 +#include <elan3/e3types.h>
83140 +#include <elan3/events.h>
83141 +
83142 +/* 
83143 + * This file contains definitions of the macros for accessing the QSW
83144 + * specific instructions, as if they were functions.
83145 + * The results from the function 
83146 + */
83147 +
83148 +#define C_ACK_OK       0                       /* return from c_close() */
83149 +#define C_ACK_TESTFAIL 1                       /* return from c_close() */
83150 +#define C_ACK_DISCARD  2                       /* return from c_close() */
83151 +#define C_ACK_ERROR    3                       /* return from c_close() */
83152 +
83153 +/*
83154 + * Elan asi's for tproc block accesses
83155 + */
83156 +#define EASI_BYTE      0
83157 +#define EASI_HALF      1
83158 +#define EASI_WORD      2
83159 +#define EASI_DOUBLE    3
83160 +
83161 +#if defined(__ELAN3__) && !defined (_ASM)
83162 +
83163 +extern inline void c_abort(void) 
83164 +{
83165 +    asm volatile (".word 0x0000                ! die you thread you " : : );
83166 +}
83167 +
83168 +extern inline void c_suspend(void) 
83169 +{
83170 +    asm volatile (
83171 +       "set 1f, %%i7                   ! RevB bug fix. get address of the wakeup inst\n"
83172 +       "andcc %%i7,0x4,%%g0            ! RevB bug fix. check alignment\n"
83173 +       "bne 1f                         ! RevB bug fix. jump to other alignment\n"
83174 +       "nop                            ! RevB bug fix. delay slot\n"
83175 +       "ldd [%%i7],%%i6                ! RevB bug fix. data fetch of instructions\n"
83176 +       "suspend                        ! do the real suspend\n"
83177 +       "1: add %%i7,5*4,%%i7           ! RevB bug fix. Point i7 to first ldblock\n"
83178 +       "ldd [%%i7],%%i6                ! RevB bug fix. data fetch of instructions\n"
83179 +       "suspend                        ! do the real suspend\n" : : );
83180 +}
83181 +
83182 +extern inline int c_close(void) 
83183 +{
83184 +    register int rc asm("o0");
83185 +
83186 +    asm volatile ("close %0" : "=r" (rc) : );
83187 +
83188 +    return (rc);
83189 +}
83190 +
83191 +extern inline int c_close_cookie(volatile E3_uint32 *cookiep, E3_uint32 next)
83192 +{
83193 +    register int rc asm("o0");
83194 +
83195 +    asm volatile ("close       %0              ! close the packet\n"
83196 +                 "bz,a         1f              ! ack received\n"
83197 +                 "st           %1, [%2]        ! update cookie on ack\n"
83198 +                 "1:                           ! label for not-ack\n"
83199 +                 : "=r" (rc) : "r" (next), "r" (cookiep));
83200 +
83201 +    return (rc);
83202 +}
83203 +
83204 +extern inline void c_break_busywait(void)
83205 +{
83206 +    asm volatile (
83207 +       "breaktest                      ! test to see if break necessary\n"
83208 +       "bpos 1f                        ! no other thread ready\n"
83209 +       "nop                            ! delay slot\n"
83210 +       "sub     %%sp,3*8*4,%%sp        ! Space to save the registers\n"
83211 +       "stblock %%g0,[%%sp+0]          ! save the globals\n"
83212 +       "stblock %%i0,[%%sp+8*4]        ! save the ins\n"
83213 +       "stblock %%l0,[%%sp+16*4]       ! save the locals\n"
83214 +       "set 2f, %%i7                   ! RevB bug fix. get address of the wakeup inst\n"
83215 +       "andcc %%i7,0x4,%%g0            ! RevB bug fix. check alignment\n"
83216 +       "bne 3f                         ! RevB bug fix. jump to other alignment\n"
83217 +       "nop                            ! RevB bug fix. delay slot\n"
83218 +       "ldd [%%i7],%%i6                ! RevB bug fix. data fetch of instructions\n"
83219 +       "break                          ! do the real break\n"
83220 +       "2: b 4f                        ! RevB bug fix. Branch over other alignment case\n"
83221 +       " ldblock [%%sp+16*4],%%l0      ! RevB bug fix. restore locals in delay slot\n"
83222 +       "3: add %%i7,5*4,%%i7           ! RevB bug fix. Point i7 to first ldblock\n"
83223 +       "ldd [%%i7],%%i6                ! RevB bug fix. data fetch of instructions\n"
83224 +       "break                          ! do the real break\n"
83225 +       "ldblock [%%sp+16*4],%%l0       ! restore locals\n"
83226 +       "4: ldblock [%%sp+8*4], %%i0    ! restore ins\n"
83227 +       "ldblock [%%sp+0],%%g0          ! restore globals\n"
83228 +       "add     %%sp,3*8*4,%%sp        ! restore stack pointer\n"
83229 +       "1: " : : );
83230 +}
83231 +
83232 +extern inline void c_break(void)
83233 +{
83234 +    asm volatile (
83235 +       "breaktest                      ! test to see if break necessary\n"
83236 +       "bne 1f                         ! haven't exceeded our inst count yet\n"
83237 +       "nop                            ! delay slot\n"
83238 +       "sub     %%sp,3*8*4,%%sp        ! Space to save the registers\n"
83239 +       "stblock %%g0,[%%sp+0]          ! save the globals\n"
83240 +       "stblock %%i0,[%%sp+8*4]        ! save the ins\n"
83241 +       "stblock %%l0,[%%sp+16*4]       ! save the locals\n"
83242 +       "set 2f, %%i7                   ! RevB bug fix. get address of the wakeup inst\n"
83243 +       "andcc %%i7,0x4,%%g0            ! RevB bug fix. check alignment\n"
83244 +       "bne 3f                         ! RevB bug fix. jump to other alignment\n"
83245 +       "nop                            ! RevB bug fix. delay slot\n"
83246 +       "ldd [%%i7],%%i6                ! RevB bug fix. data fetch of instructions\n"
83247 +       "break                          ! do the real break\n"
83248 +       "2: b 4f                        ! RevB bug fix. Branch over other alignment case\n"
83249 +       " ldblock [%%sp+16*4],%%l0      ! RevB bug fix. restore locals in delay slot\n"
83250 +       "3: add %%i7,5*4,%%i7           ! RevB bug fix. Point i7 to first ldblock\n"
83251 +       "ldd [%%i7],%%i6                ! RevB bug fix. data fetch of instructions\n"
83252 +       "break                          ! do the real break\n"
83253 +       "ldblock [%%sp+16*4],%%l0       ! restore locals\n"
83254 +       "4: ldblock [%%sp+8*4], %%i0    ! restore ins\n"
83255 +       "ldblock [%%sp+0],%%g0          ! restore globals\n"
83256 +       "add     %%sp,3*8*4,%%sp        ! restore stack pointer\n"
83257 +       "1: " : : );
83258 +}
83259 +
83260 +extern inline void c_open( const int arg ) 
83261 +{
83262 +    asm volatile ("open %0" : : "r" (arg) );
83263 +    asm volatile ("nop; nop; nop; nop");
83264 +    asm volatile ("nop; nop; nop; nop");
83265 +    asm volatile ("nop; nop; nop; nop");
83266 +    asm volatile ("nop; nop; nop; nop");
83267 +    asm volatile ("nop; nop; nop; nop");
83268 +    asm volatile ("nop; nop; nop; nop");
83269 +}
83270 +
83271 +extern inline void c_waitevent( volatile E3_Event *const ptr,
83272 +                               const int count) 
83273 +{
83274 +    register volatile E3_Event *a_unlikely asm("o0") = ptr;
83275 +    register int a_very_unlikely asm("o1") = count;
83276 +
83277 +    asm volatile (
83278 +        "sub     %%sp,1*8*4,%%sp       ! Space to save the registers\n"
83279 +        "stblock %%i0,[%%sp+0]         ! save the ins\n"
83280 +       "set    2f, %%i7                ! RevB bug fix. get address of the wakeup inst\n"
83281 +       "andcc %%i7,0x4,%%g0            ! RevB bug fix. check alignment\n"
83282 +       "bne 3f                         ! RevB bug fix. jump to other alignment\n"
83283 +       "nop                            ! RevB bug fix. delay slot\n"
83284 +       "ldd [%%i7],%%i4                ! RevB bug fix. data fetch of instructions\n"
83285 +        "waitevent                     ! do the business\n"
83286 +       "2: b 4f                        ! RevB bug fix. Branch over other alignment case\n"
83287 +        "  ldblock [%%sp+0],%%i0       ! RevB bug fix. restore ins in delay slot\n"
83288 +       "3: add %%i7,5*4,%%i7           ! RevB bug fix. Point i7 to first ldblock\n"
83289 +       "ldd [%%i7],%%i4                ! RevB bug fix. data fetch of instructions\n"
83290 +        "waitevent                     ! do the business\n"
83291 +        "ldblock [%%sp+0],%%i0         ! restore ins\n"
83292 +        "4: add     %%sp,1*8*4,%%sp    ! restore stack pointer\n"
83293 +        : /* no outputs */
83294 +        : /* inputs */ "r" (a_unlikely), "r" (a_very_unlikely)
83295 +        : /* clobbered */ "g0", "g1", "g2", "g3", "g4", "g5", "g6", "g7",
83296 +                         "l0", "l1", "l2", "l3", "l4", "l5", "l6", "l7" );
83297 +
83298 +}
83299 +
83300 +#define c_sendtrans0(type,dest)                        \
83301 +       asm volatile ("sendtrans %0, %%g0, %1" : : "i" (type), "r" (dest))
83302 +
83303 +#define c_sendtrans1(type,dest,arg)            \
83304 +       asm volatile ("sendtrans %0, %2, %1" : : "i" (type), "r" (dest), "r" (arg))
83305 +
83306 +#define c_sendtrans2(type,dest,arg1,arg2)      \
83307 +       do {                                    \
83308 +            register const unsigned long a_unlikely_1 asm("o4") = arg1;                        \
83309 +            register const unsigned long a_unlikely_2 asm("o5") = arg2;                        \
83310 +            asm volatile ("sendtrans %0, %2, %1"                                       \
83311 +                : : "i" (type), "r" (dest), "r" (a_unlikely_1), "r" (a_unlikely_2));   \
83312 +       } while(0)
83313 +
83314 +#define c_sendmem(type,dest,ptr)               \
83315 +       asm volatile ("sendtrans %0, [%2], %1" : : "i" (type), "r" (dest), "r" (ptr))
83316 +
83317 +/* Copy a single 64-byte block (src blk is read using a BYTE endian type) */
83318 +extern inline void elan3_copy64b(void *src, void *dst)
83319 +{
83320 +    /* Copy 64 bytes using ldblock/stblock
83321 +     * We save and restore the locals/ins because if we don't gcc
83322 +     * really makes a bad job of optimisising the rest of the thread code!
83323 +     *
83324 +     * We force the parameters in g5, g6 so that they aren't
83325 +     * trashed by the loadblk32 into the locals/ins
83326 +     */
83327 +    register void *tmp1 asm("g5") = src;
83328 +    register void *tmp2 asm("g6") = dst;
83329 +
83330 +    asm volatile (
83331 +       "and     %%sp,63,%%g7           ! Calculate stack alignment\n"
83332 +       "sub     %%sp,2*8*4,%%sp        ! Space to save the registers\n"
83333 +       "sub     %%sp,%%g7,%%sp         ! align stack\n" 
83334 +       "stblock64 %%l0,[%%sp]          ! save the locals and ins\n"
83335 +       "ldblock64a [%0]%2,%%l0         ! load 64-byte block into locals/ins\n"
83336 +       "stblock64a %%l0,[%1]%2         ! store 64-byte block from local/ins\n"
83337 +       "ldblock64 [%%sp],%%l0          ! restore locals and ins\n"
83338 +       "add     %%sp,%%g7, %%sp        ! undo alignment\n"
83339 +       "add     %%sp,2*8*4,%%sp        ! restore stack pointer\n"
83340 +       : /* outputs */
83341 +       : /* inputs */ "r" (tmp1), "r" (tmp2), "n" (EASI_BYTE)
83342 +       : /* clobbered */ "g5", "g6", "g7" );
83343 +}
83344 +
83345 +/* Copy a single 64-byte block (src blk is read using a WORD endian type) */
83346 +extern inline void elan3_copy64w(void *src, void *dst)
83347 +{
83348 +    /* Copy 64 bytes using ldblock/stblock
83349 +     * We save and restore the locals/ins because if we don't gcc
83350 +     * really makes a bad job of optimisising the rest of the thread code!
83351 +     *
83352 +     * We force the parameters in g5, g6 so that they aren't
83353 +     * trashed by the loadblk32 into the locals/ins
83354 +     */
83355 +    register void *tmp1 asm("g5") = src;
83356 +    register void *tmp2 asm("g6") = dst;
83357 +
83358 +    asm volatile (
83359 +       "and     %%sp,63,%%g7           ! Calculate stack alignment\n"
83360 +       "sub     %%sp,2*8*4,%%sp        ! Space to save the registers\n"
83361 +       "sub     %%sp,%%g7,%%sp         ! align stack\n" 
83362 +       "stblock64 %%l0,[%%sp]          ! save the locals and ins\n"
83363 +       "ldblock64a [%0]%2,%%l0         ! load 64-byte block into locals/ins\n"
83364 +       "stblock64a %%l0,[%1]%2         ! store 64-byte block from local/ins\n"
83365 +       "ldblock64 [%%sp],%%l0          ! restore locals and ins\n"
83366 +       "add     %%sp,%%g7, %%sp        ! undo alignment\n"
83367 +       "add     %%sp,2*8*4,%%sp        ! restore stack pointer\n"
83368 +       : /* outputs */
83369 +       : /* inputs */ "r" (tmp1), "r" (tmp2), "n" (EASI_WORD)
83370 +       : /* clobbered */ "g5", "g6", "g7" );
83371 +}
83372 +
83373 +/* Read a 64-bit value with a WORD (32-bit) endian type */
83374 +extern inline E3_uint64 elan3_read64w( volatile E3_uint64 *const ptr )
83375 +{
83376 +    E3_uint64 result;
83377 +
83378 +    asm volatile (
83379 +       "ldblock8a [%1]%2, %0\n"
83380 +       : /* outputs */ "=r" (result)
83381 +       : /* inputs */ "r" (ptr), "n" (EASI_WORD) );
83382 +
83383 +    return( result );
83384 +}
83385 +
83386 +/* Read a 64-bit value with a DOUBLEWORD (64-bit) endian type */
83387 +extern inline E3_uint64 elan3_read64dw( volatile E3_uint64 *const ptr )
83388 +{
83389 +    E3_uint64 result;
83390 +
83391 +    asm volatile (
83392 +       "ldblock8a [%1]%2, %0\n"
83393 +       : /* outputs */ "=r" (result)
83394 +       : /* inputs */ "r" (ptr), "n" (EASI_DOUBLE) );
83395 +
83396 +    return( result );
83397 +}
83398 +
83399 +/* Write a 32-bit value with a WORD (32-bit) endian type */
83400 +extern inline void elan3_write64w( volatile E3_uint64 *const ptr, E3_uint64 value )
83401 +{
83402 +    asm volatile (
83403 +       "stblock8a %1, [%0]%2\n"
83404 +       : /* no outputs */
83405 +       : /* inputs */ "r" (ptr), "r" (value), "n" (EASI_WORD) );
83406 +}
83407 +
83408 +/* Write a 64-bit value with a DOUBLEWORD (64-bit) endian type */
83409 +extern inline void elan3_write64dw( volatile E3_uint64 *const ptr, E3_uint64 value )
83410 +{
83411 +    asm volatile (
83412 +       "stblock8a %1, [%0]%2\n"
83413 +       : /* no outputs */
83414 +       : /* inputs */ "r" (ptr), "r" (value), "n" (EASI_DOUBLE) );
83415 +}
83416 +
83417 +extern inline E3_uint32 c_swap(volatile E3_uint32 *source, E3_uint32 result)
83418 +{
83419 +   asm volatile("swap [%1],%0\n"
83420 +               : "=r" (result)
83421 +               : "r" (source) ,"0" (result)
83422 +               : "memory");
83423 +   return result;
83424 +}
83425 +
83426 +extern inline E3_uint32 c_swap_save(volatile E3_uint32 *source, const E3_uint32 result)
83427 +{
83428 +   register E3_uint32 a_unlikely;
83429 +   asm volatile("" : "=r" (a_unlikely) : );
83430 +
83431 +   asm volatile("mov %2,%0; swap [%1],%0\n"
83432 +               : "=r" (a_unlikely)
83433 +               : "r" (source) ,"r" (result), "0" (a_unlikely)
83434 +               : "memory");
83435 +   return a_unlikely;
83436 +}
83437 +#endif /* (__ELAN3__) && !(_ASM) */
83438 +
83439 +#endif /* _ELAN3_INTRINSICS_H */
83440 +
83441 +/*
83442 + * Local variables:
83443 + * c-file-style: "stroustrup"
83444 + * End:
83445 + */
83446 Index: linux-2.4.21/include/elan3/minames.h
83447 ===================================================================
83448 --- linux-2.4.21.orig/include/elan3/minames.h   2004-02-23 16:02:56.000000000 -0500
83449 +++ linux-2.4.21/include/elan3/minames.h        2005-06-01 23:12:54.728419496 -0400
83450 @@ -0,0 +1,256 @@
83451 +{MI_WaitForRemoteDescRead,     "MI_WaitForRemoteDescRead"},
83452 +{MI_WaitForRemoteDescRead2,    "MI_WaitForRemoteDescRead2"},
83453 +{MI_WaitForRemoteDescRead2_seq1,       "MI_WaitForRemoteDescRead2_seq1"},
83454 +{MI_SendRemoteDmaRoutes,       "MI_SendRemoteDmaRoutes"},
83455 +{MI_IProcTrapped,      "MI_IProcTrapped"},
83456 +{MI_DProcTrapped,      "MI_DProcTrapped"},
83457 +{MI_CProcTrapped,      "MI_CProcTrapped"},
83458 +{MI_TProcTrapped,      "MI_TProcTrapped"},
83459 +{MI_TestWhichDmaQueue, "MI_TestWhichDmaQueue"},
83460 +{MI_TestWhichDmaQueue_seq1,    "MI_TestWhichDmaQueue_seq1"},
83461 +{MI_InputRemoteDmaUpdateBPtr,  "MI_InputRemoteDmaUpdateBPtr"},
83462 +{MI_FixupQueueContextAndRemoteBit,     "MI_FixupQueueContextAndRemoteBit"},
83463 +{MI_FixupQueueContextAndRemoteBit_seq1,        "MI_FixupQueueContextAndRemoteBit_seq1"},
83464 +{MI_FixupQueueContextAndRemoteBit_seq2,        "MI_FixupQueueContextAndRemoteBit_seq2"},
83465 +{MI_FixupQueueContextAndRemoteBit_seq3,        "MI_FixupQueueContextAndRemoteBit_seq3"},
83466 +{MI_FixupQueueContextAndRemoteBit_seq4,        "MI_FixupQueueContextAndRemoteBit_seq4"},
83467 +{MI_RunDmaCommand,     "MI_RunDmaCommand"},
83468 +{MI_DoSendRemoteDmaDesc,       "MI_DoSendRemoteDmaDesc"},
83469 +{MI_DequeueNonSysCntxDma,      "MI_DequeueNonSysCntxDma"},
83470 +{MI_WaitForRemoteDescRead1,    "MI_WaitForRemoteDescRead1"},
83471 +{MI_RemoteDmaCommand,  "MI_RemoteDmaCommand"},
83472 +{MI_WaitForRemoteRoutes,       "MI_WaitForRemoteRoutes"},
83473 +{MI_DequeueSysCntxDma, "MI_DequeueSysCntxDma"},
83474 +{MI_ExecuteDmaDescriptorForQueue,      "MI_ExecuteDmaDescriptorForQueue"},
83475 +{MI_ExecuteDmaDescriptor1,     "MI_ExecuteDmaDescriptor1"},
83476 +{MI_ExecuteDmaDescriptor1_seq1,        "MI_ExecuteDmaDescriptor1_seq1"},
83477 +{MI_ExecuteDmaDescriptor1_seq2,        "MI_ExecuteDmaDescriptor1_seq2"},
83478 +{MI_ExecuteDmaDescriptor1_seq3,        "MI_ExecuteDmaDescriptor1_seq3"},
83479 +{MI_GetNewSizeInProg,  "MI_GetNewSizeInProg"},
83480 +{MI_GetNewSizeInProg_seq1,     "MI_GetNewSizeInProg_seq1"},
83481 +{MI_FirstBlockRead,    "MI_FirstBlockRead"},
83482 +{MI_ExtraFirstBlockRead,       "MI_ExtraFirstBlockRead"},
83483 +{MI_UnimplementedError,        "MI_UnimplementedError"},
83484 +{MI_UpdateDescriptor,  "MI_UpdateDescriptor"},
83485 +{MI_UpdateDescriptor_seq1,     "MI_UpdateDescriptor_seq1"},
83486 +{MI_UpdateDescriptor_seq2,     "MI_UpdateDescriptor_seq2"},
83487 +{MI_UpdateDescriptor_seq3,     "MI_UpdateDescriptor_seq3"},
83488 +{MI_UpdateDescriptor_seq4,     "MI_UpdateDescriptor_seq4"},
83489 +{MI_UpdateDescriptor_seq5,     "MI_UpdateDescriptor_seq5"},
83490 +{MI_GetNextSizeInProg, "MI_GetNextSizeInProg"},
83491 +{MI_DoStopThisDma,     "MI_DoStopThisDma"},
83492 +{MI_DoStopThisDma_seq1,        "MI_DoStopThisDma_seq1"},
83493 +{MI_GenNewBytesToRead, "MI_GenNewBytesToRead"},
83494 +{MI_WaitForEventReadTy1,       "MI_WaitForEventReadTy1"},
83495 +{MI_WaitUpdateEvent,   "MI_WaitUpdateEvent"},
83496 +{MI_WaitUpdateEvent_seq1,      "MI_WaitUpdateEvent_seq1"},
83497 +{MI_DoSleepOneTickThenRunable, "MI_DoSleepOneTickThenRunable"},
83498 +{MI_RunEvent,  "MI_RunEvent"},
83499 +{MI_EnqueueThread,     "MI_EnqueueThread"},
83500 +{MI_CheckContext0,     "MI_CheckContext0"},
83501 +{MI_EnqueueDma,        "MI_EnqueueDma"},
83502 +{MI_CprocTrapping,     "MI_CprocTrapping"},
83503 +{MI_CprocTrapping_seq1,        "MI_CprocTrapping_seq1"},
83504 +{MI_WaitForRemoteRoutes1,      "MI_WaitForRemoteRoutes1"},
83505 +{MI_SetEventCommand,   "MI_SetEventCommand"},
83506 +{MI_DoSetEvent,        "MI_DoSetEvent"},
83507 +{MI_DoRemoteSetEventNowOrTrapQueueingDma,      "MI_DoRemoteSetEventNowOrTrapQueueingDma"},
83508 +{MI_DoRemoteSetEventNowOrTrapQueueingDma_seq1, "MI_DoRemoteSetEventNowOrTrapQueueingDma_seq1"},
83509 +{MI_SendRemoteDmaRoutes2,      "MI_SendRemoteDmaRoutes2"},
83510 +{MI_WaitForRemoteRoutes2,      "MI_WaitForRemoteRoutes2"},
83511 +{MI_WaitEventCommandTy0,       "MI_WaitEventCommandTy0"},
83512 +{MI_DequeueNonSysCntxDma2,     "MI_DequeueNonSysCntxDma2"},
83513 +{MI_WaitEventCommandTy1,       "MI_WaitEventCommandTy1"},
83514 +{MI_WaitEventCommandTy1_seq1,  "MI_WaitEventCommandTy1_seq1"},
83515 +{MI_DequeueNonSysCntxThread,   "MI_DequeueNonSysCntxThread"},
83516 +{MI_DequeueSysCntxDma1,        "MI_DequeueSysCntxDma1"},
83517 +{MI_DequeueSysCntxThread,      "MI_DequeueSysCntxThread"},
83518 +{MI_TestNonSysCntxDmaQueueEmpty,       "MI_TestNonSysCntxDmaQueueEmpty"},
83519 +{MI_TestNonSysCntxDmaQueueEmpty_seq1,  "MI_TestNonSysCntxDmaQueueEmpty_seq1"},
83520 +{MI_TestNonSysCntxDmaQueueEmpty_seq2,  "MI_TestNonSysCntxDmaQueueEmpty_seq2"},
83521 +{MI_RunThreadCommand,  "MI_RunThreadCommand"},
83522 +{MI_SetEventWaitForLastAcess,  "MI_SetEventWaitForLastAcess"},
83523 +{MI_SetEventReadWait,  "MI_SetEventReadWait"},
83524 +{MI_SetEventReadWait_seq1,     "MI_SetEventReadWait_seq1"},
83525 +{MI_TestEventType,     "MI_TestEventType"},
83526 +{MI_TestEventType_seq1,        "MI_TestEventType_seq1"},
83527 +{MI_TestEventBit2,     "MI_TestEventBit2"},
83528 +{MI_DmaDescOrBlockCopyOrChainedEvent,  "MI_DmaDescOrBlockCopyOrChainedEvent"},
83529 +{MI_RunThread, "MI_RunThread"},
83530 +{MI_RunThread1,        "MI_RunThread1"},
83531 +{MI_RunThread1_seq1,   "MI_RunThread1_seq1"},
83532 +{MI_IncDmaSysCntxBPtr, "MI_IncDmaSysCntxBPtr"},
83533 +{MI_IncDmaSysCntxBPtr_seq1,    "MI_IncDmaSysCntxBPtr_seq1"},
83534 +{MI_IncDmaSysCntxBPtr_seq2,    "MI_IncDmaSysCntxBPtr_seq2"},
83535 +{MI_WaitForCntxDmaDescRead,    "MI_WaitForCntxDmaDescRead"},
83536 +{MI_FillInContext,     "MI_FillInContext"},
83537 +{MI_FillInContext_seq1,        "MI_FillInContext_seq1"},
83538 +{MI_WriteNewDescToQueue,       "MI_WriteNewDescToQueue"},
83539 +{MI_WriteNewDescToQueue_seq1,  "MI_WriteNewDescToQueue_seq1"},
83540 +{MI_TestForQueueWrap,  "MI_TestForQueueWrap"},
83541 +{MI_TestForQueueWrap_seq1,     "MI_TestForQueueWrap_seq1"},
83542 +{MI_TestQueueIsFull,   "MI_TestQueueIsFull"},
83543 +{MI_TestQueueIsFull_seq1,      "MI_TestQueueIsFull_seq1"},
83544 +{MI_TestQueueIsFull_seq2,      "MI_TestQueueIsFull_seq2"},
83545 +{MI_CheckPsychoShitFixup,      "MI_CheckPsychoShitFixup"},
83546 +{MI_PsychoShitFixupForcedRead, "MI_PsychoShitFixupForcedRead"},
83547 +{MI_PrepareDMATimeSlice,       "MI_PrepareDMATimeSlice"},
83548 +{MI_PrepareDMATimeSlice_seq1,  "MI_PrepareDMATimeSlice_seq1"},
83549 +{MI_TProcRestartFromTrapOrTestEventBit2,       "MI_TProcRestartFromTrapOrTestEventBit2"},
83550 +{MI_TProcRestartFromTrapOrTestEventBit2_seq1,  "MI_TProcRestartFromTrapOrTestEventBit2_seq1"},
83551 +{MI_WaitForGlobalsRead,        "MI_WaitForGlobalsRead"},
83552 +{MI_WaitForNPCRead,    "MI_WaitForNPCRead"},
83553 +{MI_EventInterrupt,    "MI_EventInterrupt"},
83554 +{MI_EventInterrupt_seq1,       "MI_EventInterrupt_seq1"},
83555 +{MI_EventInterrupt_seq2,       "MI_EventInterrupt_seq2"},
83556 +{MI_EventInterrupt_seq3,       "MI_EventInterrupt_seq3"},
83557 +{MI_TestSysCntxDmaQueueEmpty,  "MI_TestSysCntxDmaQueueEmpty"},
83558 +{MI_TestSysCntxDmaQueueEmpty_seq1,     "MI_TestSysCntxDmaQueueEmpty_seq1"},
83559 +{MI_TestIfRemoteDesc,  "MI_TestIfRemoteDesc"},
83560 +{MI_DoDmaLocalSetEvent,        "MI_DoDmaLocalSetEvent"},
83561 +{MI_DoDmaLocalSetEvent_seq1,   "MI_DoDmaLocalSetEvent_seq1"},
83562 +{MI_DoDmaLocalSetEvent_seq2,   "MI_DoDmaLocalSetEvent_seq2"},
83563 +{MI_DmaLoop1,  "MI_DmaLoop1"},
83564 +{MI_ExitDmaLoop,       "MI_ExitDmaLoop"},
83565 +{MI_ExitDmaLoop_seq1,  "MI_ExitDmaLoop_seq1"},
83566 +{MI_RemoteDmaTestPAckType,     "MI_RemoteDmaTestPAckType"},
83567 +{MI_PacketDiscardOrTestFailRecIfCCis0, "MI_PacketDiscardOrTestFailRecIfCCis0"},
83568 +{MI_PacketDiscardOrTestFailRecIfCCis0_seq1,    "MI_PacketDiscardOrTestFailRecIfCCis0_seq1"},
83569 +{MI_TestNackFailIsZero2,       "MI_TestNackFailIsZero2"},
83570 +{MI_TestNackFailIsZero3,       "MI_TestNackFailIsZero3"},
83571 +{MI_DmaFailCountError, "MI_DmaFailCountError"},
83572 +{MI_TestDmaForSysCntx, "MI_TestDmaForSysCntx"},
83573 +{MI_TestDmaForSysCntx_seq1,    "MI_TestDmaForSysCntx_seq1"},
83574 +{MI_TestDmaForSysCntx_seq2,    "MI_TestDmaForSysCntx_seq2"},
83575 +{MI_TestAeqB2, "MI_TestAeqB2"},
83576 +{MI_TestAeqB2_seq1,    "MI_TestAeqB2_seq1"},
83577 +{MI_GetNextDmaDescriptor,      "MI_GetNextDmaDescriptor"},
83578 +{MI_DequeueSysCntxDma2,        "MI_DequeueSysCntxDma2"},
83579 +{MI_InputSetEvent,     "MI_InputSetEvent"},
83580 +{MI_PutBackSysCntxDma, "MI_PutBackSysCntxDma"},
83581 +{MI_PutBackSysCntxDma_seq1,    "MI_PutBackSysCntxDma_seq1"},
83582 +{MI_PutBackSysCntxDma_seq2,    "MI_PutBackSysCntxDma_seq2"},
83583 +{MI_InputRemoteDma,    "MI_InputRemoteDma"},
83584 +{MI_InputRemoteDma_seq1,       "MI_InputRemoteDma_seq1"},
83585 +{MI_WaitOneTickForWakeup1,     "MI_WaitOneTickForWakeup1"},
83586 +{MI_SendRemoteDmaDesc, "MI_SendRemoteDmaDesc"},
83587 +{MI_InputLockQueue,    "MI_InputLockQueue"},
83588 +{MI_CloseTheTrappedPacketIfCCis1,      "MI_CloseTheTrappedPacketIfCCis1"},
83589 +{MI_CloseTheTrappedPacketIfCCis1_seq1, "MI_CloseTheTrappedPacketIfCCis1_seq1"},
83590 +{MI_PostDmaInterrupt,  "MI_PostDmaInterrupt"},
83591 +{MI_InputUnLockQueue,  "MI_InputUnLockQueue"},
83592 +{MI_WaitForUnLockDescRead,     "MI_WaitForUnLockDescRead"},
83593 +{MI_SendEOPforRemoteDma,       "MI_SendEOPforRemoteDma"},
83594 +{MI_LookAtRemoteAck,   "MI_LookAtRemoteAck"},
83595 +{MI_InputWriteBlockQueue,      "MI_InputWriteBlockQueue"},
83596 +{MI_WaitForSpStore,    "MI_WaitForSpStore"},
83597 +{MI_TProcNext, "MI_TProcNext"},
83598 +{MI_TProcStoppedRunning,       "MI_TProcStoppedRunning"},
83599 +{MI_InputWriteBlock,   "MI_InputWriteBlock"},
83600 +{MI_RunDmaOrDeqNonSysCntxDma,  "MI_RunDmaOrDeqNonSysCntxDma"},
83601 +{MI_ExecuteDmaDescriptorForRun,        "MI_ExecuteDmaDescriptorForRun"},
83602 +{MI_ConfirmQueueLock,  "MI_ConfirmQueueLock"},
83603 +{MI_DmaInputIdentify,  "MI_DmaInputIdentify"},
83604 +{MI_TProcStoppedRunning2,      "MI_TProcStoppedRunning2"},
83605 +{MI_TProcStoppedRunning2_seq1, "MI_TProcStoppedRunning2_seq1"},
83606 +{MI_TProcStoppedRunning2_seq2, "MI_TProcStoppedRunning2_seq2"},
83607 +{MI_ThreadInputIdentify,       "MI_ThreadInputIdentify"},
83608 +{MI_InputIdWriteAddrAndType3,  "MI_InputIdWriteAddrAndType3"},
83609 +{MI_IProcTrappedWriteStatus,   "MI_IProcTrappedWriteStatus"},
83610 +{MI_FinishTrappingEop, "MI_FinishTrappingEop"},
83611 +{MI_InputTestTrans,    "MI_InputTestTrans"},
83612 +{MI_TestAeqB3, "MI_TestAeqB3"},
83613 +{MI_ThreadUpdateNonSysCntxBack,        "MI_ThreadUpdateNonSysCntxBack"},
83614 +{MI_ThreadQueueOverflow,       "MI_ThreadQueueOverflow"},
83615 +{MI_RunContext0Thread, "MI_RunContext0Thread"},
83616 +{MI_RunContext0Thread_seq1,    "MI_RunContext0Thread_seq1"},
83617 +{MI_RunContext0Thread_seq2,    "MI_RunContext0Thread_seq2"},
83618 +{MI_RunDmaDesc,        "MI_RunDmaDesc"},
83619 +{MI_RunDmaDesc_seq1,   "MI_RunDmaDesc_seq1"},
83620 +{MI_RunDmaDesc_seq2,   "MI_RunDmaDesc_seq2"},
83621 +{MI_TestAeqB,  "MI_TestAeqB"},
83622 +{MI_WaitForNonCntxDmaDescRead, "MI_WaitForNonCntxDmaDescRead"},
83623 +{MI_DmaQueueOverflow,  "MI_DmaQueueOverflow"},
83624 +{MI_BlockCopyEvent,    "MI_BlockCopyEvent"},
83625 +{MI_BlockCopyEventReadBlock,   "MI_BlockCopyEventReadBlock"},
83626 +{MI_BlockCopyWaitForReadData,  "MI_BlockCopyWaitForReadData"},
83627 +{MI_InputWriteWord,    "MI_InputWriteWord"},
83628 +{MI_TraceSetEvents,    "MI_TraceSetEvents"},
83629 +{MI_TraceSetEvents_seq1,       "MI_TraceSetEvents_seq1"},
83630 +{MI_TraceSetEvents_seq2,       "MI_TraceSetEvents_seq2"},
83631 +{MI_InputWriteDoubleWd,        "MI_InputWriteDoubleWd"},
83632 +{MI_SendLockTransIfCCis1,      "MI_SendLockTransIfCCis1"},
83633 +{MI_WaitForDmaRoutes1, "MI_WaitForDmaRoutes1"},
83634 +{MI_LoadDmaContext,    "MI_LoadDmaContext"},
83635 +{MI_InputTestAndSetWord,       "MI_InputTestAndSetWord"},
83636 +{MI_InputTestAndSetWord_seq1,  "MI_InputTestAndSetWord_seq1"},
83637 +{MI_GetDestEventValue, "MI_GetDestEventValue"},
83638 +{MI_SendDmaIdentify,   "MI_SendDmaIdentify"},
83639 +{MI_InputAtomicAddWord,        "MI_InputAtomicAddWord"},
83640 +{MI_LoadBFromTransD0,  "MI_LoadBFromTransD0"},
83641 +{MI_ConditionalWriteBackCCTrue,        "MI_ConditionalWriteBackCCTrue"},
83642 +{MI_WaitOneTickForWakeup,      "MI_WaitOneTickForWakeup"},
83643 +{MI_SendFinalUnlockTrans,      "MI_SendFinalUnlockTrans"},
83644 +{MI_SendDmaEOP,        "MI_SendDmaEOP"},
83645 +{MI_GenLastAddrForPsycho,      "MI_GenLastAddrForPsycho"},
83646 +{MI_FailedAckIfCCis0,  "MI_FailedAckIfCCis0"},
83647 +{MI_FailedAckIfCCis0_seq1,     "MI_FailedAckIfCCis0_seq1"},
83648 +{MI_WriteDmaSysCntxDesc,       "MI_WriteDmaSysCntxDesc"},
83649 +{MI_TimesliceDmaQueueOverflow, "MI_TimesliceDmaQueueOverflow"},
83650 +{MI_DequeueNonSysCntxThread1,  "MI_DequeueNonSysCntxThread1"},
83651 +{MI_DequeueNonSysCntxThread1_seq1,     "MI_DequeueNonSysCntxThread1_seq1"},
83652 +{MI_TestThreadQueueEmpty,      "MI_TestThreadQueueEmpty"},
83653 +{MI_ClearThreadQueueIfCC,      "MI_ClearThreadQueueIfCC"},
83654 +{MI_DequeueSysCntxThread1,     "MI_DequeueSysCntxThread1"},
83655 +{MI_DequeueSysCntxThread1_seq1,        "MI_DequeueSysCntxThread1_seq1"},
83656 +{MI_TProcStartUpGeneric,       "MI_TProcStartUpGeneric"},
83657 +{MI_WaitForPCload2,    "MI_WaitForPCload2"},
83658 +{MI_WaitForNPCWrite,   "MI_WaitForNPCWrite"},
83659 +{MI_WaitForEventWaitAddr,      "MI_WaitForEventWaitAddr"},
83660 +{MI_WaitForWaitEventAccess,    "MI_WaitForWaitEventAccess"},
83661 +{MI_WaitForWaitEventAccess_seq1,       "MI_WaitForWaitEventAccess_seq1"},
83662 +{MI_WaitForWaitEventDesc,      "MI_WaitForWaitEventDesc"},
83663 +{MI_WaitForEventReadTy0,       "MI_WaitForEventReadTy0"},
83664 +{MI_SendCondTestFail,  "MI_SendCondTestFail"},
83665 +{MI_InputMoveToNextTrans,      "MI_InputMoveToNextTrans"},
83666 +{MI_ThreadUpdateSysCntxBack,   "MI_ThreadUpdateSysCntxBack"},
83667 +{MI_FinishedSetEvent,  "MI_FinishedSetEvent"},
83668 +{MI_EventIntUpdateBPtr,        "MI_EventIntUpdateBPtr"},
83669 +{MI_EventQueueOverflow,        "MI_EventQueueOverflow"},
83670 +{MI_MaskLowerSource,   "MI_MaskLowerSource"},
83671 +{MI_DmaLoop,   "MI_DmaLoop"},
83672 +{MI_SendNullSetEvent,  "MI_SendNullSetEvent"},
83673 +{MI_SendFinalSetEvent, "MI_SendFinalSetEvent"},
83674 +{MI_TestNackFailIsZero1,       "MI_TestNackFailIsZero1"},
83675 +{MI_DmaPacketTimedOutOrPacketError,    "MI_DmaPacketTimedOutOrPacketError"},
83676 +{MI_NextPacketIsLast,  "MI_NextPacketIsLast"},
83677 +{MI_TestForZeroLengthDma,      "MI_TestForZeroLengthDma"},
83678 +{MI_WaitForPCload,     "MI_WaitForPCload"},
83679 +{MI_ReadInIns, "MI_ReadInIns"},
83680 +{MI_WaitForInsRead,    "MI_WaitForInsRead"},
83681 +{MI_WaitForLocals,     "MI_WaitForLocals"},
83682 +{MI_WaitForOutsWrite,  "MI_WaitForOutsWrite"},
83683 +{MI_WaitForWaitEvWrBack,       "MI_WaitForWaitEvWrBack"},
83684 +{MI_WaitForLockRead,   "MI_WaitForLockRead"},
83685 +{MI_TestQueueLock,     "MI_TestQueueLock"},
83686 +{MI_InputIdWriteAddrAndType,   "MI_InputIdWriteAddrAndType"},
83687 +{MI_InputIdWriteAddrAndType2,  "MI_InputIdWriteAddrAndType2"},
83688 +{MI_ThreadInputIdentify2,      "MI_ThreadInputIdentify2"},
83689 +{MI_WriteIntoTrapArea0,        "MI_WriteIntoTrapArea0"},
83690 +{MI_GenQueueBlockWrAddr,       "MI_GenQueueBlockWrAddr"},
83691 +{MI_InputDiscardFreeLock,      "MI_InputDiscardFreeLock"},
83692 +{MI_WriteIntoTrapArea1,        "MI_WriteIntoTrapArea1"},
83693 +{MI_WriteIntoTrapArea2,        "MI_WriteIntoTrapArea2"},
83694 +{MI_ResetBPtrToBase,   "MI_ResetBPtrToBase"},
83695 +{MI_InputDoTrap,       "MI_InputDoTrap"},
83696 +{MI_RemoteDmaCntxt0Update,     "MI_RemoteDmaCntxt0Update"},
83697 +{MI_ClearQueueLock,    "MI_ClearQueueLock"},
83698 +{MI_IProcTrappedBlockWriteData,        "MI_IProcTrappedBlockWriteData"},
83699 +{MI_FillContextFilter, "MI_FillContextFilter"},
83700 +{MI_IProcTrapped4,     "MI_IProcTrapped4"},
83701 +{MI_RunSysCntxDma,     "MI_RunSysCntxDma"},
83702 +{MI_ChainedEventError, "MI_ChainedEventError"},
83703 +{MI_InputTrappingEOP,  "MI_InputTrappingEOP"},
83704 +{MI_CheckForRunIfZero, "MI_CheckForRunIfZero"},
83705 +{MI_TestForBreakOrSuspend,     "MI_TestForBreakOrSuspend"},
83706 +{MI_SwapForRunable,    "MI_SwapForRunable"},
83707 Index: linux-2.4.21/include/elan3/neterr_rpc.h
83708 ===================================================================
83709 --- linux-2.4.21.orig/include/elan3/neterr_rpc.h        2004-02-23 16:02:56.000000000 -0500
83710 +++ linux-2.4.21/include/elan3/neterr_rpc.h     2005-06-01 23:12:54.728419496 -0400
83711 @@ -0,0 +1,68 @@
83712 +/*
83713 + *    Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd.
83714 + *
83715 + *    For licensing information please see the supplied COPYING file
83716 + *
83717 + */
83718 +
83719 +#ifndef __ELAN3_NETERR_RPC_H
83720 +#define __ELAN3_NETERR_RPC_H
83721 +
83722 +#ident "$Id: neterr_rpc.h,v 1.20 2003/06/26 16:05:22 fabien Exp $"
83723 +/*      $Source: /cvs/master/quadrics/elan3mod/elan3/elan3/neterr_rpc.h,v $*/
83724 +
83725 +#define NETERR_SERVICE "neterr-srv"
83726 +#define NETERR_PROGRAM ((u_long) 170002)
83727 +#define NETERR_VERSION ((u_long) 1)
83728 +
83729 +#define NETERR_NULL_RPC                0
83730 +#define NETERR_FIXUP_RPC       1
83731 +
83732 +/* network error rpc timeout */
83733 +#define NETERR_RPC_TIMEOUT     5
83734 +
83735 +/*
83736 + * XDR functions for Tru64 and Linux in userspace. 
83737 + *  NB Linux kernelspace xdr routines are in network_error.
83738 + *     and *must* be kept consistent.
83739 + */
83740 +#if defined(DIGITAL_UNIX) || !defined(__KERNEL__)
83741 +bool_t
83742 +xdr_capability (XDR *xdrs, void *arg)
83743 +{
83744 +    ELAN_CAPABILITY *cap = (ELAN_CAPABILITY *) arg;
83745 +
83746 +    return (xdr_opaque (xdrs, (caddr_t) &cap->cap_userkey, sizeof (cap->cap_userkey)) &&
83747 +           xdr_int (xdrs, &cap->cap_version) &&
83748 +           xdr_u_short (xdrs, &cap->cap_type) &&
83749 +           xdr_int (xdrs, &cap->cap_lowcontext) &&
83750 +           xdr_int (xdrs, &cap->cap_highcontext) &&
83751 +           xdr_int (xdrs, &cap->cap_mycontext) &&
83752 +           xdr_int (xdrs, &cap->cap_lownode) &&
83753 +           xdr_int (xdrs, &cap->cap_highnode) &&
83754 +           xdr_u_int (xdrs, &cap->cap_railmask) &&
83755 +           xdr_opaque (xdrs, (caddr_t) &cap->cap_bitmap[0], sizeof (cap->cap_bitmap)));
83756 +}
83757 +
83758 +bool_t
83759 +xdr_neterr_msg (XDR *xdrs, void *req)
83760 +{
83761 +    NETERR_MSG *msg = (NETERR_MSG *) req;
83762 +
83763 +    return (xdr_u_int (xdrs, &msg->Rail) &&
83764 +           xdr_capability (xdrs, &msg->SrcCapability) &&
83765 +           xdr_capability (xdrs, &msg->DstCapability) &&
83766 +           xdr_u_int (xdrs, &msg->DstProcess) &&
83767 +           xdr_u_int (xdrs, &msg->CookieAddr) &&
83768 +           xdr_u_int (xdrs, &msg->CookieVProc) &&
83769 +           xdr_u_int (xdrs, &msg->NextCookie) &&
83770 +           xdr_u_int (xdrs, &msg->WaitForEop));
83771 +}
83772 +#endif /* INCLUDE_XDR_INLINE */
83773 +
83774 +/*
83775 + * Local variables:
83776 + * c-file-style: "stroustrup"
83777 + * End:
83778 + */
83779 +#endif /* __ELAN3_NETERR_RPC_H */
83780 Index: linux-2.4.21/include/elan3/perm.h
83781 ===================================================================
83782 --- linux-2.4.21.orig/include/elan3/perm.h      2004-02-23 16:02:56.000000000 -0500
83783 +++ linux-2.4.21/include/elan3/perm.h   2005-06-01 23:12:54.728419496 -0400
83784 @@ -0,0 +1,29 @@
83785 +/*
83786 + *    Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd.
83787 + *
83788 + *    For licensing information please see the supplied COPYING file
83789 + *
83790 + */
83791 +
83792 +#ifndef __ELAN3_PERM_H
83793 +#define __ELAN3_PERM_H
83794 +
83795 +#ident "$Id: perm.h,v 1.7 2003/09/24 13:57:24 david Exp $"
83796 +/*      $Source: /cvs/master/quadrics/elan3mod/elan3/elan3/perm.h,v $*/
83797 +
83798 +#define ELAN3_PERM_NULL                0x00
83799 +#define ELAN3_PERM_LOCAL_READ  0x04
83800 +#define ELAN3_PERM_READ                0x08
83801 +#define ELAN3_PERM_NOREMOTE    0x0c
83802 +#define ELAN3_PERM_REMOTEREAD  0x10
83803 +#define ELAN3_PERM_REMOTEWRITE 0x14
83804 +#define ELAN3_PERM_REMOTEEVENT 0x18
83805 +#define ELAN3_PERM_REMOTEALL   0x1c
83806 +
83807 +#endif /* __ELAN3_PERM_H */
83808 +
83809 +/*
83810 + * Local variables:
83811 + * c-file-style: "stroustrup"
83812 + * End:
83813 + */
83814 Index: linux-2.4.21/include/elan3/pte.h
83815 ===================================================================
83816 --- linux-2.4.21.orig/include/elan3/pte.h       2004-02-23 16:02:56.000000000 -0500
83817 +++ linux-2.4.21/include/elan3/pte.h    2005-06-01 23:12:54.729419344 -0400
83818 @@ -0,0 +1,139 @@
83819 +/*
83820 + *    Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd.
83821 + *
83822 + *    For licensing information please see the supplied COPYING file
83823 + *
83824 + */
83825 +
83826 +#ifndef __ELAN3_PTE_H
83827 +#define __ELAN3_PTE_H
83828 +
83829 +#ident "$Id: pte.h,v 1.26 2003/09/24 13:57:24 david Exp $"
83830 +/*      $Source: /cvs/master/quadrics/elan3mod/elan3/elan3/pte.h,v $*/
83831 +
83832 +#ifdef __cplusplus
83833 +extern          "C"
83834 +{
83835 +#endif
83836 +
83837 +#include <elan3/e3types.h>
83838 +#include <elan3/perm.h>
83839 +
83840 +typedef E3_uint64 ELAN3_PTE;
83841 +typedef E3_uint32 ELAN3_PTP;
83842 +
83843 +#define ELAN3_PTE_SIZE         (8)
83844 +#define ELAN3_PTP_SIZE         (4)
83845 +
83846 +#define ELAN3_PTE_REF          ((E3_uint64) 1 << 63)           /* 63      - referenced bit */
83847 +#define ELAN3_PTE_MOD          ((E3_uint64) 1 << 55)           /* 55      - modified bit */
83848 +#define ELAN3_RM_MASK          (ELAN3_PTE_REF | ELAN3_PTE_MOD)
83849 +
83850 +#define ELAN3_PTE_PFN_MASK     0x0000fffffffff000ull           /* [12:48] - Physical address */
83851 +
83852 +#define ELAN3_PTE_BIG_ENDIAN   0x80                            /* 7       - big endian */
83853 +#define ELAN3_PTE_64_BIT               0x40                            /* 6       - 64 bit pci address */
83854 +#define ELAN3_PTE_LOCAL                0x20                            /* 5       - local sdram */
83855 +
83856 +#define ELAN3_PTE_PERM_MASK    0x1c                            /* [2:4]   - Permissions */
83857 +#define ELAN3_PTE_PERM_SHIFT      2
83858 +
83859 +#define ELAN3_ET_MASK          0x3
83860 +#define ELAN3_ET_INVALID               0x0                                     /* [0:1] */
83861 +#define ELAN3_ET_PTP           0x1
83862 +#define ELAN3_ET_PTE           0x2
83863 +
83864 +#define ELAN3_INVALID_PTP      ((ELAN3_PTP) 0)
83865 +#define ELAN3_INVALID_PTE      ((ELAN3_PTE) 0)
83866 +
83867 +#define ELAN3_PTP_TYPE(ptp)    ((ptp) & ELAN3_ET_MASK)
83868 +#define ELAN3_PTE_TYPE(pte)    ((pte) & ELAN3_ET_MASK)
83869 +#define ELAN3_PTE_PERM(pte)    ((pte) & ELAN3_PTE_PERM_MASK)
83870 +#define ELAN3_PTE_VALID(pte)   (((pte) & ELAN3_ET_MASK) == ELAN3_ET_PTE)
83871 +#define ELAN3_PTE_ISREF(pte)   ((pte) & ELAN3_PTE_REF)
83872 +#define ELAN3_PTE_ISMOD(pte)   ((pte) & ELAN3_PTE_MOD)
83873 +#define ELAN3_PTE_WRITEABLE(pte)       (ELAN3_PERM_WRITEABLE(ELAN3_PTE_PERM(pte)))
83874 +
83875 +#define ELAN3_PERM_WRITEABLE(perm)     ((perm) == ELAN3_PERM_NOREMOTE || (perm) > ELAN3_PERM_REMOTEREAD)
83876 +#define ELAN3_PERM_REMOTE(perm)                ((perm) > ELAN3_PERM_NOREMOTE)
83877 +
83878 +#define ELAN3_PERM_READONLY(perm)      ((perm) == ELAN3_PERM_NOREMOTE ? ELAN3_PERM_LOCAL_READ : \
83879 +                                        (perm) > ELAN3_PERM_REMOTEREAD ? ELAN3_PERM_READ : (perm))
83880 +#if PAGE_SHIFT == 12
83881 +#  define ELAN3_PAGE_SHIFT     12
83882 +#else
83883 +#  define ELAN3_PAGE_SHIFT     13
83884 +#endif
83885 +
83886 +#define ELAN3_PAGE_SIZE                (1 << ELAN3_PAGE_SHIFT)
83887 +#define ELAN3_PAGE_OFFSET      (ELAN3_PAGE_SIZE-1)
83888 +#define ELAN3_PAGE_MASK                (~ELAN3_PAGE_OFFSET)
83889 +
83890 +#if ELAN3_PAGE_SHIFT == 13
83891 +#  define ELAN3_L3_SHIFT               5
83892 +#else
83893 +#  define ELAN3_L3_SHIFT               6
83894 +#endif
83895 +#define ELAN3_L2_SHIFT         6
83896 +#define ELAN3_L1_SHIFT         8
83897 +
83898 +/* Number of entries in a given level ptbl */
83899 +#define ELAN3_L3_ENTRIES               (1 << ELAN3_L3_SHIFT)
83900 +#define ELAN3_L2_ENTRIES               (1 << ELAN3_L2_SHIFT)
83901 +#define ELAN3_L1_ENTRIES               (1 << ELAN3_L1_SHIFT)
83902 +
83903 +/* Virtual address spanned by each entry */
83904 +#define ELAN3_L3_SIZE          (1 << (ELAN3_PAGE_SHIFT))
83905 +#define ELAN3_L2_SIZE          (1 << (ELAN3_L3_SHIFT+ELAN3_PAGE_SHIFT))
83906 +#define ELAN3_L1_SIZE          (1 << (ELAN3_L3_SHIFT+ELAN3_L2_SHIFT+ELAN3_PAGE_SHIFT))
83907 +
83908 +/* Virtual address size of page table */
83909 +#define ELAN3_L1_PTSIZE          (ELAN3_L1_ENTRIES * ELAN3_L1_SIZE)
83910 +#define ELAN3_L3_PTSIZE                (ELAN3_L3_ENTRIES * ELAN3_L3_SIZE)
83911 +#define ELAN3_L2_PTSIZE                (ELAN3_L2_ENTRIES * ELAN3_L2_SIZE)
83912 +
83913 +/* Mask for offset into page table */
83914 +#define ELAN3_L1_PTOFFSET        ((ELAN3_L1_SIZE*ELAN3_L1_ENTRIES)-1)
83915 +#define ELAN3_L3_PTOFFSET      ((ELAN3_L3_SIZE*ELAN3_L3_ENTRIES)-1)
83916 +#define ELAN3_L2_PTOFFSET      ((ELAN3_L2_SIZE*ELAN3_L2_ENTRIES)-1)
83917 +
83918 +#define ELAN3_L1_INDEX(addr)   (((E3_Addr) (addr) & 0xFF000000) >> (ELAN3_L2_SHIFT+ELAN3_L3_SHIFT+ELAN3_PAGE_SHIFT))
83919 +#define ELAN3_L2_INDEX(addr)   (((E3_Addr) (addr) & 0x00FD0000) >> (ELAN3_L3_SHIFT+ELAN3_PAGE_SHIFT))
83920 +#define ELAN3_L3_INDEX(addr)   (((E3_Addr) (addr) & 0x0003F000) >> ELAN3_PAGE_SHIFT)
83921 +
83922 +#define        ELAN3_L1_BASE(addr)     (((E3_Addr)(addr)) & 0x00000000)
83923 +#define        ELAN3_L2_BASE(addr)     (((E3_Addr)(addr)) & 0xFF000000)
83924 +#define        ELAN3_L3_BASE(addr)     (((E3_Addr)(addr)) & 0xFFFC0000)
83925 +
83926 +/* Convert a page table pointer entry to the PT */
83927 +#define PTP_TO_PT_PADDR(ptp)   ((E3_Addr)(ptp & 0xFFFFFFFC))
83928 +
83929 +#ifdef __KERNEL__
83930 +/*
83931 + * incompatible access for permission macro.
83932 + */
83933 +extern  u_char  elan3mmu_permissionTable[8];
83934 +#define ELAN3_INCOMPAT_ACCESS(perm,access) (! (elan3mmu_permissionTable[(perm)>>ELAN3_PTE_PERM_SHIFT] & (1 << (access))))
83935 +
83936 +#define elan3_readptp(dev, ptp)                (elan3_sdram_readl (dev, ptp))
83937 +#define elan3_writeptp(dev, ptp, value)        (elan3_sdram_writel (dev, ptp, value))
83938 +#define elan3_readpte(dev, pte)                (elan3_sdram_readq (dev, pte))
83939 +#define elan3_writepte(dev,pte, value) (elan3_sdram_writeq (dev, pte, value))
83940 +
83941 +#define elan3_invalidatepte(dev, pte)  (elan3_sdram_writel (dev, pte, 0))
83942 +#define elan3_modifypte(dev,pte,new)   (elan3_sdram_writel (dev, pte, (int) (new)))
83943 +#define elan3_clrref(dev,pte)          (elan3_sdram_writeb (dev, pte + 7)
83944 +
83945 +#endif /* __KERNEL__ */
83946 +
83947 +#ifdef __cplusplus
83948 +}
83949 +#endif
83950 +
83951 +#endif /* __ELAN3_PTE_H */
83952 +
83953 +/*
83954 + * Local variables:
83955 + * c-file-style: "stroustrup"
83956 + * End:
83957 + */
83958 Index: linux-2.4.21/include/elan3/spinlock.h
83959 ===================================================================
83960 --- linux-2.4.21.orig/include/elan3/spinlock.h  2004-02-23 16:02:56.000000000 -0500
83961 +++ linux-2.4.21/include/elan3/spinlock.h       2005-06-01 23:12:54.729419344 -0400
83962 @@ -0,0 +1,195 @@
83963 +/*
83964 + *    Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd.
83965 + *
83966 + *    For licensing information please see the supplied COPYING file
83967 + *
83968 + */
83969 +
83970 +#ifndef _ELAN3_SPINLOCK_
83971 +#define _ELAN3_SPINLOCK_
83972 +
83973 +#ident "$Id: spinlock.h,v 1.31 2003/09/24 13:57:24 david Exp $"
83974 +/*      $Source: /cvs/master/quadrics/elan3mod/elan3/elan3/spinlock.h,v $*/
83975 +
83976 +/*
83977 + * This spinlock is designed for main/elan processor interactions.
83978 + * The lock is split over Elan/Main memory in such a way that
83979 + * we don't end up busy-polling over the PCI.
83980 + * In the Elan memory we have two words; one is a sequence number
83981 + * and the other is a lock word for main.
83982 + * In main memory we have a copy of the sequence number which main polls when it is
83983 + * waiting for the Elan to drop the lock. Main polls this word until it becomes
83984 + * equal to the sequence number it sampled.
83985 + * The Elan drops the lock by writing the current sequence number to main memory.
83986 + * It is coded to always give priority to the Elan thread, and so when both go for the
83987 + * lock, main will back off first.
83988 + *
83989 + * 18/3/98
83990 + * This has been extended to avoid a starvation case where both the main and thread claim the
83991 + * lock and so both backoff (thread does a break). So now, main attempts to claim the
83992 + * lock by writing 'mainLock' then samples the 'sl_seq' and if it has the lock
83993 + * it sets 'mainGotLock'. The thread will now see the 'sl_mainLock' set, but will only
83994 + * backoff with a c_break_busywait() if 'mainGotLock' is set too.
83995 + */
83996 +typedef struct elan3_spinlock_elan {
83997 +    union {
83998 +       volatile E3_uint64      mainLocks;              /* main writes this dble word */
83999 +       struct {
84000 +           volatile E3_uint32  mainLock;               /* main wants a lock */
84001 +           volatile E3_uint32  mainGotLock;            /* main has the lock */
84002 +       } s;
84003 +    } sl_u;
84004 +    volatile E3_uint32         sl_seq;                 /* thread owns this word */
84005 +    volatile E3_uint32         sl_mainWait;            /* performance counter */
84006 +    volatile E3_uint32         sl_elanWait;            /* performance counter */
84007 +    volatile E3_uint32         sl_elanBusyWait;        /* performance counter */
84008 +    /* NOTE: The lock/seq words must be within the same 32-byte Elan cache-line */
84009 +    E3_uint64                   sl_pad[5];             /* pad to 64-bytes */
84010 +} ELAN3_SPINLOCK_ELAN;
84011 +
84012 +#define sl_mainLocks sl_u.mainLocks
84013 +#define sl_mainLock  sl_u.s.mainLock
84014 +#define sl_mainGotLock sl_u.s.mainGotLock
84015 +
84016 +#define SL_MAIN_RECESSIVE      1
84017 +#define SL_MAIN_DOMINANT       2
84018 +
84019 +/* Declare this as a main memory cache block for efficiency */
84020 +typedef union elan3_spinlock_main {
84021 +    volatile E3_uint32         sl_seq;                 /* copy of seq number updated by Elan */
84022 +    volatile E3_uint32         sl_Int32[E3_BLK_SIZE/sizeof (E3_uint32)];
84023 +} ELAN3_SPINLOCK_MAIN;
84024 +
84025 +/* Main/Main or Elan/Elan lock word */
84026 +typedef volatile int   ELAN3_SPINLOCK;
84027 +
84028 +#ifdef __ELAN3__
84029 +
84030 +/* Main/Elan interlock */
84031 +
84032 +#define ELAN3_ME_SPINENTER(SLE,SL) do {\
84033 +                       asm volatile ("! elan3_spinlock store barrier");\
84034 +                       (SLE)->sl_seq++; \
84035 +                       if ((SLE)->sl_mainLock) \
84036 +                         elan3_me_spinblock(SLE, SL);\
84037 +                       asm volatile ("! elan3_spinlock store barrier");\
84038 +               } while (0)
84039 +#define ELAN3_ME_SPINEXIT(SLE,SL) do {\
84040 +                       asm volatile ("! elan3_spinlock store barrier");\
84041 +                       (SL)->sl_seq = (SLE)->sl_seq;\
84042 +                       asm volatile ("! elan3_spinlock store barrier");\
84043 +               } while (0)
84044 +
84045 +
84046 +/* Elan/Elan interlock */
84047 +#define ELAN3_SPINENTER(L)     do {\
84048 +                          asm volatile ("! store barrier");\
84049 +                          if (c_swap ((L), 1)) elan3_spinenter(L);\
84050 +                          asm volatile ("! store barrier");\
84051 +                       } while (0)
84052 +#define ELAN3_SPINEXIT(L)      do {\
84053 +                          asm volatile ("! store barrier");\
84054 +                          c_swap((L), 0);\
84055 +                          asm volatile ("! store barrier");\
84056 +                       } while (0)
84057 +
84058 +extern void elan3_me_spinblock (ELAN3_SPINLOCK_ELAN *sle, ELAN3_SPINLOCK_MAIN *sl);
84059 +extern void elan3_spinenter (ELAN3_SPINLOCK *l);
84060 +
84061 +#else                     
84062 +
84063 +/* Main/Elan interlock */
84064 +#ifdef DEBUG
84065 +#define ELAN3_ME_SPINENTER(SDRAM,SLE,SL) do {\
84066 +                       register E3_int32 maxLoops = 0x7fffffff;        \
84067 +                       register E3_uint32 seq;\
84068 +                       elan3_write32_sdram(SDRAM, (SLE) + offsetof(ELAN3_SPINLOCK_ELAN, sl_mainLock), SL_MAIN_RECESSIVE); \
84069 +                       MEMBAR_STORELOAD(); \
84070 +                       seq = elan3_read32_sdram(SDRAM, (SLE) + offsetof(ELAN3_SPINLOCK_ELAN, sl_seq)); \
84071 +                       while (seq != (SL)->sl_seq) {\
84072 +                           elan3_write32_sdram(SDRAM, (SLE) + offsetof(ELAN3_SPINLOCK_ELAN, sl_mainLock), 0); \
84073 +                           while ((SL)->sl_seq == (seq-1) && maxLoops--) ; \
84074 +                           if (maxLoops < 0) { \
84075 +                               printf("Failed to get ME lock %lx/%lx seq %d sle_seq %d sl_seq %d\n", \
84076 +                                      SL, SLE, seq, \
84077 +                                      elan3_read32_sdram(SDRAM, (SLE) + offsetof(ELAN3_SPINLOCK_ELAN, sl_seq)), \
84078 +                                      (SL)->sl_seq); \
84079 +                           } \
84080 +                           elan3_write32_sdram(SDRAM, (SLE) + offsetof(ELAN3_SPINLOCK_ELAN, sl_mainLock), SL_MAIN_RECESSIVE); \
84081 +                           MEMBAR_STORELOAD(); \
84082 +                           seq = elan3_read32_sdram(SDRAM, (SLE) + offsetof(ELAN3_SPINLOCK_ELAN, sl_seq)); \
84083 +                       }\
84084 +                       elan3_write32_sdram(SDRAM, (SLE) + offsetof(ELAN3_SPINLOCK_ELAN, sl_mainGotLock), 1); \
84085 +                       MEMBAR_LOADLOAD();\
84086 +               } while (0)
84087 +#else
84088 +#define ELAN3_ME_SPINENTER(SDRAM,SLE,SL) do {\
84089 +                       register E3_uint32 seq;\
84090 +                       elan3_write32_sdram(SDRAM, SLE + offsetof(ELAN3_SPINLOCK_ELAN, sl_mainLock), SL_MAIN_RECESSIVE); \
84091 +                       MEMBAR_STORELOAD(); \
84092 +                       seq = elan3_read32_sdram(SDRAM, SLE + offsetof(ELAN3_SPINLOCK_ELAN, sl_seq)); \
84093 +                       while (seq != (SL)->sl_seq) {\
84094 +                           elan3_write32_sdram(SDRAM, SLE + offsetof(ELAN3_SPINLOCK_ELAN, sl_mainLock), 0); \
84095 +                           while ((SL)->sl_seq == (seq-1)) ; \
84096 +                           elan3_write32_sdram(SDRAM, SLE + offsetof(ELAN3_SPINLOCK_ELAN, sl_mainLock), SL_MAIN_RECESSIVE); \
84097 +                           MEMBAR_STORELOAD(); \
84098 +                           seq = elan3_read32_sdram(SDRAM, SLE + offsetof(ELAN3_SPINLOCK_ELAN, sl_seq)); \
84099 +                       }\
84100 +                       elan3_write32_sdram(SDRAM, SLE + offsetof(ELAN3_SPINLOCK_ELAN, sl_mainGotLock), 1); \
84101 +                       MEMBAR_LOADLOAD();\
84102 +               } while (0)
84103 +#endif
84104 +#define ELAN3_ME_FORCEENTER(SDRAM,SLE,SL) do { \
84105 +       register E3_uint32 seq; \
84106 +       MEMBAR_STORELOAD(); \
84107 +       elan3_write32_sdram(SDRAM, SLE + offsetof(ELAN3_SPINLOCK_ELAN, sl_mainLock), SL_MAIN_DOMINANT); \
84108 +       MEMBAR_STORELOAD(); \
84109 +       seq = elan3_read32_sdram(SDRAM, SLE + offsetof(ELAN3_SPINLOCK_ELAN, sl_seq)); \
84110 +       while (seq != (SL)->sl_seq) \
84111 +       { \
84112 +               /* NOTE: we MUST call elan3_usecspin here for kernel comms */\
84113 +               while ((SL)->sl_seq == (seq)-1) \
84114 +                       elan3_usecspin (1); \
84115 +               seq = elan3_read32_sdram(SDRAM, SLE + offsetof(ELAN3_SPINLOCK_ELAN, sl_seq)); \
84116 +       } \
84117 +       elan3_write32_sdram(SDRAM, SLE + offsetof(ELAN3_SPINLOCK_ELAN, sl_mainGotLock), 1); \
84118 +       MEMBAR_LOADLOAD(); \
84119 +} while (0)
84120 +
84121 +#define ELAN3_ME_TRYENTER(SDRAM,SLE,SL,SEQ) do { \
84122 +    elan3_write32_sdram(SDRAM, SLE + offsetof(ELAN3_SPINLOCK_ELAN, sl_mainLock), SL_MAIN_RECESSIVE); \
84123 +    MEMBAR_STORELOAD(); \
84124 +    SEQ = elan3_read32_sdram(SDRAM, SLE + offsetof(ELAN3_SPINLOCK_ELAN, sl_seq)); \
84125 +} while (0)
84126 +
84127 +#define ELAN3_ME_CHECKENTER(SDRAM,SLE,SL,SEQ) do { \
84128 +    if ((SEQ) == ((SL)->sl_seq)) { \
84129 +        elan3_write32_sdram(SDRAM, SLE + offsetof(ELAN3_SPINLOCK_ELAN, sl_mainGotLock), 1); \
84130 +        MEMBAR_LOADLOAD();\
84131 +    } \
84132 +    else ELAN3_ME_SPINENTER(SLE,SL); \
84133 +} while (0)
84134 +       
84135 +#define ELAN3_ME_SPINEXIT(SDRAM,SLE,SL) do {\
84136 +                       MEMBAR_STORESTORE(); \
84137 +                       elan3_write64_sdram(SDRAM, SLE + offsetof(ELAN3_SPINLOCK_ELAN, sl_mainLocks), 0); \
84138 +                       MEMBAR_STORESTORE(); \
84139 +               } while (0)
84140 +
84141 +
84142 +/* Main/Main */
84143 +#define ELAN3_SPINENTER(L)     do {\
84144 +                          while (c_swap ((L), 1)) ; \
84145 +                       } while (0)
84146 +#define ELAN3_SPINEXIT(L)      do {\
84147 +                          c_swap((L), 0);\
84148 +                       } while (0)
84149 +#endif /* _ELAN3_ */
84150 +
84151 +#endif /* _ELAN3_SPINLOCK_H */
84152 +
84153 +/*
84154 + * Local variables:
84155 + * c-file-style: "stroustrup"
84156 + * End:
84157 + */
84158 Index: linux-2.4.21/include/elan3/thread.h
84159 ===================================================================
84160 --- linux-2.4.21.orig/include/elan3/thread.h    2004-02-23 16:02:56.000000000 -0500
84161 +++ linux-2.4.21/include/elan3/thread.h 2005-06-01 23:12:54.730419192 -0400
84162 @@ -0,0 +1,137 @@
84163 +/*
84164 + *    Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd.
84165 + *
84166 + *    For licensing information please see the supplied COPYING file
84167 + *
84168 + */
84169 +
84170 +#ifndef _ELAN3_THREAD_H
84171 +#define _ELAN3_THREAD_H
84172 +
84173 +#ident "$Id: thread.h,v 1.17 2002/08/09 11:23:34 addy Exp $"
84174 +/*      $Source: /cvs/master/quadrics/elan3mod/elan3/elan3/thread.h,v $*/
84175 +
84176 +/* Alignment for a stack frame */
84177 +#define E3_STACK_ALIGN         (64)
84178 +
84179 +typedef struct _E3_Frame {
84180 +    E3_uint32         fr_local[8];             /* saved locals (not used) */
84181 +    E3_uint32         fr_arg[6];               /* saved arguements o0 -> o5 */
84182 +    E3_Addr           fr_savefp;               /* saved frame pointer o6 */
84183 +    E3_Addr           fr_savepc;               /* saved program counter o7 */
84184 +    E3_Addr           fr_stret;                /* stuct return addr */
84185 +    E3_uint32         fr_argd[6];              /* arg dump area */
84186 +    E3_uint32         fr_argx[1];              /* array of args past the sixth */
84187 +} E3_Frame;
84188 +
84189 +typedef struct _E3_Stack {
84190 +    E3_uint32          Locals[8];
84191 +    E3_uint32          Ins[8];
84192 +    E3_uint32          Globals[8];
84193 +    E3_uint32          Outs[8];
84194 +} E3_Stack;
84195 +
84196 +typedef struct _E3_OutsRegs {
84197 +   E3_uint32   o[8];                           /* o6 == pc, o7 == fptr */
84198 +} E3_OutsRegs;
84199 +
84200 +/*
84201 + * "Magic" value for stack pointer to be ignored.
84202 + */
84203 +#define VanishingStackPointer  0x42
84204 +
84205 +
84206 +/*
84207 + * When the Elan traps the N & Z CC bits are held in the NPC
84208 + * and the V & C bits are in the PC
84209 + */
84210 +#define PSR_C_BIT      (1)
84211 +#define PSR_V_BIT      (2)
84212 +#define PSR_Z_BIT      (1)
84213 +#define PSR_N_BIT      (2)
84214 +#define CC_MASK                (3)
84215 +#define PC_MASK        (~3)
84216 +#define SP_MASK                (~3)
84217 +
84218 +/*
84219 + * Threads processor Opcodes.
84220 + */
84221 +#define OPCODE_MASK            (0xC1F80000)
84222 +#define OPCODE_IMM             (1 << 13)
84223 +
84224 +#define OPCODE_CLASS(instr)    ((instr) & 0xC0000000)
84225 +#define OPCODE_CLASS_0         0x00000000
84226 +#define OPCODE_CLASS_1         0x40000000
84227 +#define OPCODE_CLASS_2         0x80000000
84228 +#define OPCODE_CLASS_3         0xC0000000
84229 +
84230 +#define OPCODE_CPOP            0x81B00000
84231 +#define OPCODE_Ticc            0x81D00000
84232 +
84233 +#define OPCODE_FCODE_SHIFT     19
84234 +#define OPCODE_FCODE_MASK      0x1f
84235 +#define OPCODE_NOT_ALUOP       0x01000000
84236 +
84237 +#define OPCODE_SLL             0x81280000
84238 +#define OPCODE_SRL             0x81300000
84239 +#define OPCODE_SRA             0x81380000
84240 +
84241 +#define OPCODE_OPEN            0x81600000
84242 +#define OPCODE_CLOSE           0x81680000
84243 +#define OPCODE_BREAKTEST       0x81700000
84244 +
84245 +#define OPCODE_BREAK           0x81a00000
84246 +#define OPCODE_SUSPEND         0x81a80000
84247 +#define OPCODE_WAIT            0x81b00000
84248 +
84249 +#define OPCODE_JMPL            0x81c00000
84250 +
84251 +#define OPCODE_LD              0xC0000000
84252 +#define OPCODE_LDD             0xC0180000
84253 +
84254 +#define OPCODE_LDBLOCK16       0xC0900000
84255 +#define OPCODE_LDBLOCK32       0xC0800000
84256 +#define OPCODE_LDBLOCK64       0xC0980000
84257 +
84258 +#define OPCODE_ST              0xC0200000
84259 +#define OPCODE_STD             0xC0380000
84260 +
84261 +#define OPCODE_SWAP            0xC0780000
84262 +
84263 +#define OPCODE_STBLOCK16       0xC0b00000
84264 +#define OPCODE_STBLOCK32       0xC0a00000
84265 +#define OPCODE_STBLOCK64       0xC0b80000
84266 +
84267 +#define OPCODE_CLASS0_MASK     0xC1C00000
84268 +#define OPCODE_SETHI           0x01000000
84269 +#define OPCODE_BICC            0x00800000
84270 +#define OPCODE_SENDREG         0x01800000
84271 +#define OPCODE_SENDMEM         0x01c00000
84272 +
84273 +#define OPCODE_BICC_BN         0x00000000
84274 +#define OPCODE_BICC_BE         0x02000000
84275 +#define OPCODE_BICC_BLE                0x04000000
84276 +#define OPCODE_BICC_BL         0x06000000
84277 +#define OPCODE_BICC_BLEU       0x08000000
84278 +#define OPCODE_BICC_BCS                0x0A000000
84279 +#define OPCODE_BICC_BNEG       0x0C000000
84280 +#define OPCODE_BICC_BVS                0x0E000000
84281 +
84282 +#define OPCODE_BICC_MASK       0x0E000000
84283 +#define OPCODE_BICC_ANNUL      0x20000000
84284 +
84285 +#define INSTR_RS2(instr)       (((instr) >>  0) & 0x1F)
84286 +#define INSTR_RS1(instr)       (((instr) >> 14) & 0x1F)
84287 +#define INSTR_RD(instr)                (((instr) >> 25) & 0x1F)
84288 +#define INSTR_IMM(instr)       (((instr) & 0x1000) ? ((instr) & 0xFFF) | 0xFFFFF000 : (instr) & 0xFFF)
84289 +
84290 +#define Ticc_COND(instr)       INSTR_RD(instr)
84291 +#define Ticc_TA                        8
84292 +
84293 +#endif /* _ELAN3_THREAD_H */
84294 +
84295 +/*
84296 + * Local variables:
84297 + * c-file-style: "stroustrup"
84298 + * End:
84299 + */
84300 Index: linux-2.4.21/include/elan3/threadlinkage.h
84301 ===================================================================
84302 --- linux-2.4.21.orig/include/elan3/threadlinkage.h     2004-02-23 16:02:56.000000000 -0500
84303 +++ linux-2.4.21/include/elan3/threadlinkage.h  2005-06-01 23:12:54.730419192 -0400
84304 @@ -0,0 +1,103 @@
84305 +/*
84306 + *    Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd.
84307 + *
84308 + *    For licensing information please see the supplied COPYING file
84309 + *
84310 + */
84311 +
84312 +#ifndef __ELAN3_THREADLINKAGE_H
84313 +#define        __ELAN3_THREADLINKAGE_H
84314 +
84315 +#ident "$Id: threadlinkage.h,v 1.6 2002/08/09 11:23:34 addy Exp $"
84316 +/*      $Source: /cvs/master/quadrics/elan3mod/elan3/elan3/threadlinkage.h,v $*/
84317 +
84318 +#ifdef __cplusplus
84319 +extern "C" {
84320 +#endif
84321 +
84322 +#if defined(_ASM) || defined(__LANGUAGE_ASSEMBLY__)
84323 +
84324 +/*
84325 + * Macro to define weak symbol aliases. These are similar to the ANSI-C
84326 + *     #pragma weak name = _name
84327 + * except a compiler can determine type. The assembler must be told. Hence,
84328 + * the second parameter must be the type of the symbol (i.e.: function,...)
84329 + */
84330 +#define        ANSI_PRAGMA_WEAK(sym, stype)    \
84331 +       .weak   sym; \
84332 +       .type sym, #stype; \
84333 +/* CSTYLED */ \
84334 +sym    = _/**/sym
84335 +
84336 +/*
84337 + * ENTRY provides the standard procedure entry code
84338 + */
84339 +#define        ENTRY(x) \
84340 +       .section        ".text"; \
84341 +       .align  4; \
84342 +       .global x; \
84343 +x:
84344 +
84345 +/*
84346 + * ENTRY2 is identical to ENTRY but provides two labels for the entry point.
84347 + */
84348 +#define        ENTRY2(x, y) \
84349 +       .section        ".text"; \
84350 +       .align  4; \
84351 +       .global x, y; \
84352 +/* CSTYLED */ \
84353 +x:     ; \
84354 +y:
84355 +
84356 +
84357 +/*
84358 + * ALTENTRY provides for additional entry points.
84359 + */
84360 +#define        ALTENTRY(x) \
84361 +       .global x; \
84362 +x:
84363 +
84364 +/*
84365 + * DGDEF and DGDEF2 provide global data declarations.
84366 + *
84367 + * DGDEF provides a word aligned word of storage.
84368 + *
84369 + * DGDEF2 allocates "sz" bytes of storage with **NO** alignment.  This
84370 + * implies this macro is best used for byte arrays.
84371 + *
84372 + * DGDEF3 allocates "sz" bytes of storage with "algn" alignment.
84373 + */
84374 +#define        DGDEF2(name, sz) \
84375 +       .section        ".data"; \
84376 +       .global name; \
84377 +       .size   name, sz; \
84378 +name:
84379 +
84380 +#define        DGDEF3(name, sz, algn) \
84381 +       .section        ".data"; \
84382 +       .align  algn; \
84383 +       .global name; \
84384 +       .size   name, sz; \
84385 +name:
84386 +
84387 +#define        DGDEF(name)     DGDEF3(name, 4, 4)
84388 +
84389 +/*
84390 + * SET_SIZE trails a function and set the size for the ELF symbol table.
84391 + */
84392 +#define        SET_SIZE(x) \
84393 +       .size   x, (.-x)
84394 +
84395 +#endif /* _ASM || __LANGUAGE_ASSEMBLY__ */
84396 +
84397 +#ifdef __cplusplus
84398 +}
84399 +#endif
84400 +
84401 +#endif /* __ELAN3_THREADLINKAGE_H */
84402 +
84403 +/*
84404 + * Local variables:
84405 + * c-file-style: "stroustrup"
84406 + * End:
84407 + */
84408 Index: linux-2.4.21/include/elan3/threadsyscall.h
84409 ===================================================================
84410 --- linux-2.4.21.orig/include/elan3/threadsyscall.h     2004-02-23 16:02:56.000000000 -0500
84411 +++ linux-2.4.21/include/elan3/threadsyscall.h  2005-06-01 23:12:54.730419192 -0400
84412 @@ -0,0 +1,64 @@
84413 +/*
84414 + *    Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd.
84415 + *
84416 + *    For licensing information please see the supplied COPYING file
84417 + *
84418 + */
84419 +
84420 +#ifndef __ELAN3_SYSCALL_H
84421 +#define __ELAN3_SYSCALL_H
84422 +
84423 +#ident "$Id: threadsyscall.h,v 1.12 2003/09/24 13:57:24 david Exp $"
84424 +/*      $Source: /cvs/master/quadrics/elan3mod/elan3/elan3/threadsyscall.h,v $*/
84425 +
84426 +/* 
84427 + * This file contains the system calls supported from the Elan.
84428 + */
84429 +#define ELAN3_DEBUG_TRAPNUM    5       /* thread debugging trap */
84430 +#define ELAN3_ABORT_TRAPNUM    6       /* bad abort trap */
84431 +#define ELAN3_ELANCALL_TRAPNUM 7       /* elansyscall trap */
84432 +#define ELAN3_SYSCALL_TRAPNUM  8       /* new syscall trap */
84433 +
84434 +#define ELAN3_T_SYSCALL_CODE   0       /* offsets in struct elan3_t_syscall */
84435 +#define ELAN3_T_SYSCALL_ERRNO  4
84436 +
84437 +#define ELAN3_SYS_open         1
84438 +#define ELAN3_SYS_close                2
84439 +#define ELAN3_SYS_write                3
84440 +#define ELAN3_SYS_read         4
84441 +#define ELAN3_SYS_poll         5
84442 +#define ELAN3_SYS_ioctl                6
84443 +#define ELAN3_SYS_lseek                7
84444 +#define ELAN3_SYS_mmap         8
84445 +#define ELAN3_SYS_munmap       9
84446 +#define ELAN3_SYS_kill         10
84447 +#define ELAN3_SYS_getpid       11
84448 +
84449 +#if !defined(SYS_getpid) && defined(__NR_getxpid) 
84450 +#define SYS_getpid __NR_getxpid                /* for linux */
84451 +#endif
84452 +
84453 +#if !defined(_ASM) && !defined(__LANGUAGE_ASSEMBLY__)
84454 +
84455 +extern int     elan3_t_open (const char *, int, ...);
84456 +extern ssize_t elan3_t_write (int, const void *, unsigned);
84457 +extern ssize_t elan3_t_read(int, void *, unsigned);
84458 +extern int     elan3_t_ioctl(int, int, ...);
84459 +extern int     elan3_t_close(int);
84460 +extern off_t   elan3_t_lseek(int filedes, off_t offset, int whence);
84461 +
84462 +extern caddr_t elan3_t_mmap(caddr_t, size_t, int, int, int, off_t);
84463 +extern int     elan3_t_munmap(caddr_t, size_t);
84464 +
84465 +extern int     elan3_t_getpid(void);
84466 +extern void    elan3_t_abort(char *str);
84467 +
84468 +#endif /* !_ASM && ! __LANGUAGE_ASSEMBLY__ */
84469 +
84470 +#endif /* __ELAN3_SYSCALL_H */
84471 +
84472 +/*
84473 + * Local variables:
84474 + * c-file-style: "stroustrup"
84475 + * End:
84476 + */
84477 Index: linux-2.4.21/include/elan3/trtype.h
84478 ===================================================================
84479 --- linux-2.4.21.orig/include/elan3/trtype.h    2004-02-23 16:02:56.000000000 -0500
84480 +++ linux-2.4.21/include/elan3/trtype.h 2005-06-01 23:12:54.731419040 -0400
84481 @@ -0,0 +1,116 @@
84482 +/*
84483 + *    Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd.
84484 + *
84485 + *    For licensing information please see the supplied COPYING file
84486 + *
84487 + */
84488 +
84489 +#ifndef _ELAN3_TRTYPE_H
84490 +#define _ELAN3_TRTYPE_H
84491 +
84492 +#ident "$Id: trtype.h,v 1.13 2002/08/09 11:23:34 addy Exp $"
84493 +/*      $Source: /cvs/master/quadrics/elan3mod/elan3/elan3/trtype.h,v $ */
84494 +
84495 +/*<15> ackNow  */
84496 +#define TR_SENDACK     (1 << 15)
84497 +
84498 +#define TR_SIZE_SHIFT  12
84499 +#define TR_SIZE_MASK   7
84500 +
84501 +/*<14:12> Size 0, 1, 2, 4, 8, 16, 32, 64  Double Words
84502 +          Bit 14 is forced to zero currently so that only size 0, 1, 2, 4 are
84503 +         allowed    */
84504 +
84505 +#define TR_SIZE0       (0 << TR_SIZE_SHIFT)
84506 +#define TR_SIZE1       (1 << TR_SIZE_SHIFT)
84507 +#define TR_SIZE2       (2 << TR_SIZE_SHIFT)
84508 +#define TR_SIZE4       (3 << TR_SIZE_SHIFT)
84509 +#define TR_SIZE8       (4 << TR_SIZE_SHIFT)
84510 +
84511 +#define TR_64_BIT_ADDR (1 << 11)
84512 +#define TR_LAST_TRANS  (1 << 10)
84513 +
84514 +#define TR_WRITEBLOCK_BIT      (1 << 9)
84515 +#define TR_WRITEBLOCK          (TR_WRITEBLOCK_BIT | TR_SIZE8)
84516 +
84517 +
84518 +#define TR_WRITEBLOCK_SIZE     64
84519 +
84520 +/*
84521 + * write-block
84522 + */
84523 +/*     WriteBlock      <8:7>   Data type
84524 +                       <6:0>   Part write size */
84525 +#define TR_TYPE_SHIFT  7
84526 +#define TR_TYPE_MASK   ((1 << 2) - 1)
84527 +
84528 +#define TR_TYPE_BYTE   0
84529 +#define TR_TYPE_SHORT  1
84530 +#define TR_TYPE_WORD   2
84531 +#define TR_TYPE_DWORD  3
84532 +
84533 +#define TR_PARTSIZE_MASK ((1 << 7) -1)
84534 +
84535 +#define TR_WAIT_FOR_EOP        (1 << 8)
84536 +
84537 +/*
84538 + * trace-route format 
84539 + */
84540 +#define TR_TRACEROUTE0_CHANID(val)             ((val) & 1)                     /* 0     Chan Id */
84541 +#define TR_TRACEROUTE0_LINKID(val)             (((val) >> 1) & 7)              /* 1:3   Link Id */
84542 +#define TR_TRACEROUTE0_REVID(val)              (((val) >> 4) & 7)              /* 4:6   Revision ID */
84543 +#define TR_TRACEROUTE0_BCAST_TOP_PIN(val)      (((val) >> 7) & 1)              /* 7     Broadcast Top Pin (REV B) */
84544 +#define TR_TRACEROUTE0_LNR(val)                        ((val) >> 8)                    /* 8:15  Global Link Not Ready */
84545 +
84546 +#define TR_TRACEROUTE1_PRIO(val)               ((val & 0xF))                   /* 0:3   Arrival Priority (REV A) */
84547 +#define TR_TRACEROUTE1_AGE(val)                        (((val) >> 4) & 0xF)            /* 4:7   Priority Held(Age) (REV A) */
84548 +#define TR_TRACEROUTE1_ROUTE_SELECTED(val)     ((val) & 0xFF)                  /* 0:7   Arrival age (REV B) */
84549 +#define TR_TRACEROUTE1_BCAST_TOP(val)          (((val) >> 8) & 7)              /* 8:10  Broadcast Top */
84550 +#define TR_TRACEROUTE1_ADAPT(val)              (((val) >> 12) & 3)             /* 12:13 This Adaptive Value (REV A) */
84551 +#define TR_TRACEROUTE1_BCAST_BOT(val)          (((val) >> 12) & 7)             /* 12:14 Broadcast Bottom (REV B) */
84552 +
84553 +#define TR_TRACEROUTE2_ARRIVAL_AGE(val)                ((val) & 0xF)                   /* 0:3   Arrival Age (REV B) */
84554 +#define TR_TRACEROUTE2_CURR_AGE(val)           (((val) >> 4) & 0xF)            /* 4:7   Current Age (REV B) */
84555 +#define TR_TRACEROUTE2_BUSY(val)               (((val) >> 8) & 0xFF)           /* 8:15  Busy (REV B) */
84556 +
84557 +#define TR_TRACEROUTE_SIZE     32
84558 +#define TR_TRACEROUTE_ENTRIES  (TR_TRACEROUTE_SIZE/2)
84559 +
84560 +/*
84561 + * non-write block
84562 + */
84563 +#define TR_OPCODE_MASK         (((1 << 8) - 1) |                       \
84564 +                                (TR_SIZE_MASK << TR_SIZE_SHIFT) |      \
84565 +                                TR_WRITEBLOCK_BIT)
84566 +
84567 +#define TR_NOP_TRANS           (0x0 | TR_SIZE0)
84568 +#define TR_SETEVENT            (0x0 | TR_SIZE0 | TR_SENDACK | TR_LAST_TRANS)
84569 +#define TR_REMOTEDMA           (0x1 | TR_SIZE4 | TR_SENDACK | TR_LAST_TRANS)
84570 +#define TR_LOCKQUEUE           (0x2 | TR_SIZE0)
84571 +#define TR_UNLOCKQUEUE         (0x3 | TR_SIZE0 | TR_SENDACK | TR_LAST_TRANS)
84572 +
84573 +#define TR_SENDDISCARD         (0x4 | TR_SIZE0)
84574 +#define TR_TRACEROUTE          (0x5 | TR_SIZE4)
84575 +
84576 +#define TR_DMAIDENTIFY         (0x6 | TR_SIZE0)
84577 +#define TR_THREADIDENTIFY      (0x7 | TR_SIZE1)
84578 +
84579 +#define TR_GTE                 (0x8 | TR_SIZE1)
84580 +#define TR_LT                  (0x9 | TR_SIZE1)
84581 +#define TR_EQ                  (0xA | TR_SIZE1)
84582 +#define TR_NEQ                 (0xB | TR_SIZE1)
84583 +
84584 +#define TR_WRITEWORD           (0xC | TR_SIZE1)
84585 +#define TR_WRITEDOUBLEWORD     (0xD | TR_SIZE1)
84586 +#define TR_TESTANDWRITE        (0xE | TR_SIZE1)
84587 +#define TR_ATOMICADDWORD       (0xF | TR_SIZE1 | TR_SENDACK | TR_LAST_TRANS)
84588 +#define TR_OPCODE_TYPE_MASK    0xff
84589 +
84590 +
84591 +#endif /* notdef _ELAN3_TRTYPE_H */
84592 +
84593 +/*
84594 + * Local variables:
84595 + * c-file-style: "stroustrup"
84596 + * End:
84597 + */
84598 Index: linux-2.4.21/include/elan3/urom_addrs.h
84599 ===================================================================
84600 --- linux-2.4.21.orig/include/elan3/urom_addrs.h        2004-02-23 16:02:56.000000000 -0500
84601 +++ linux-2.4.21/include/elan3/urom_addrs.h     2005-06-01 23:12:54.731419040 -0400
84602 @@ -0,0 +1,262 @@
84603 +#define MI_WaitForRemoteDescRead 0x0
84604 +#define MI_WaitForRemoteDescRead2 0x1
84605 +#define MI_WaitForRemoteDescRead2_seq1 0x2
84606 +#define MI_SendRemoteDmaRoutes 0x3
84607 +#define MI_IProcTrapped 0x4
84608 +#define MI_DProcTrapped 0x5
84609 +#define MI_CProcTrapped 0x6
84610 +#define MI_TProcTrapped 0x7
84611 +#define MI_TestWhichDmaQueue 0x8
84612 +#define MI_TestWhichDmaQueue_seq1 0x9
84613 +#define MI_InputRemoteDmaUpdateBPtr 0xa
84614 +#define MI_FixupQueueContextAndRemoteBit 0xb
84615 +#define MI_FixupQueueContextAndRemoteBit_seq1 0xc
84616 +#define MI_FixupQueueContextAndRemoteBit_seq2 0xd
84617 +#define MI_FixupQueueContextAndRemoteBit_seq3 0xe
84618 +#define MI_FixupQueueContextAndRemoteBit_seq4 0xf
84619 +#define MI_RunDmaCommand 0x10
84620 +#define MI_DoSendRemoteDmaDesc 0x11
84621 +#define MI_DequeueNonSysCntxDma 0x12
84622 +#define MI_WaitForRemoteDescRead1 0x13
84623 +#define MI_RemoteDmaCommand 0x14
84624 +#define MI_WaitForRemoteRoutes 0x15
84625 +#define MI_DequeueSysCntxDma 0x16
84626 +#define MI_ExecuteDmaDescriptorForQueue 0x17
84627 +#define MI_ExecuteDmaDescriptor1 0x18
84628 +#define MI_ExecuteDmaDescriptor1_seq1 0x19
84629 +#define MI_ExecuteDmaDescriptor1_seq2 0x1a
84630 +#define MI_ExecuteDmaDescriptor1_seq3 0x1b
84631 +#define MI_GetNewSizeInProg 0x1c
84632 +#define MI_GetNewSizeInProg_seq1 0x1d
84633 +#define MI_FirstBlockRead 0x1e
84634 +#define MI_ExtraFirstBlockRead 0x1f
84635 +#define MI_UnimplementedError 0x20
84636 +#define MI_UpdateDescriptor 0x21
84637 +#define MI_UpdateDescriptor_seq1 0x22
84638 +#define MI_UpdateDescriptor_seq2 0x23
84639 +#define MI_UpdateDescriptor_seq3 0x24
84640 +#define MI_UpdateDescriptor_seq4 0x25
84641 +#define MI_UpdateDescriptor_seq5 0x26
84642 +#define MI_GetNextSizeInProg 0x27
84643 +#define MI_DoStopThisDma 0x28
84644 +#define MI_DoStopThisDma_seq1 0x29
84645 +#define MI_GenNewBytesToRead 0x2a
84646 +#define MI_WaitForEventReadTy1 0x2b
84647 +#define MI_WaitUpdateEvent 0x2c
84648 +#define MI_WaitUpdateEvent_seq1 0x2d
84649 +#define MI_DoSleepOneTickThenRunable 0x2e
84650 +#define MI_RunEvent 0x2f
84651 +#define MI_EnqueueThread 0x30
84652 +#define MI_CheckContext0 0x31
84653 +#define MI_EnqueueDma 0x32
84654 +#define MI_CprocTrapping 0x33
84655 +#define MI_CprocTrapping_seq1 0x34
84656 +#define MI_WaitForRemoteRoutes1 0x35
84657 +#define MI_SetEventCommand 0x36
84658 +#define MI_DoSetEvent 0x37
84659 +#define MI_DoRemoteSetEventNowOrTrapQueueingDma 0x38
84660 +#define MI_DoRemoteSetEventNowOrTrapQueueingDma_seq1 0x39
84661 +#define MI_SendRemoteDmaRoutes2 0x3a
84662 +#define MI_WaitForRemoteRoutes2 0x3b
84663 +#define MI_WaitEventCommandTy0 0x3c
84664 +#define MI_DequeueNonSysCntxDma2 0x3d
84665 +#define MI_WaitEventCommandTy1 0x3e
84666 +#define MI_WaitEventCommandTy1_seq1 0x3f
84667 +#define MI_DequeueNonSysCntxThread 0x40
84668 +#define MI_DequeueSysCntxDma1 0x41
84669 +#define MI_DequeueSysCntxThread 0x42
84670 +#define MI_TestNonSysCntxDmaQueueEmpty 0x43
84671 +#define MI_TestNonSysCntxDmaQueueEmpty_seq1 0x44
84672 +#define MI_TestNonSysCntxDmaQueueEmpty_seq2 0x45
84673 +#define MI_RunThreadCommand 0x46
84674 +#define MI_SetEventWaitForLastAcess 0x47
84675 +#define MI_SetEventReadWait 0x48
84676 +#define MI_SetEventReadWait_seq1 0x49
84677 +#define MI_TestEventType 0x4a
84678 +#define MI_TestEventType_seq1 0x4b
84679 +#define MI_TestEventBit2 0x4c
84680 +#define MI_DmaDescOrBlockCopyOrChainedEvent 0x4d
84681 +#define MI_RunThread 0x4e
84682 +#define MI_RunThread1 0x4f
84683 +#define MI_RunThread1_seq1 0x50
84684 +#define MI_IncDmaSysCntxBPtr 0x51
84685 +#define MI_IncDmaSysCntxBPtr_seq1 0x52
84686 +#define MI_IncDmaSysCntxBPtr_seq2 0x53
84687 +#define MI_WaitForCntxDmaDescRead 0x54
84688 +#define MI_FillInContext 0x55
84689 +#define MI_FillInContext_seq1 0x56
84690 +#define MI_WriteNewDescToQueue 0x57
84691 +#define MI_WriteNewDescToQueue_seq1 0x58
84692 +#define MI_TestForQueueWrap 0x59
84693 +#define MI_TestForQueueWrap_seq1 0x5a
84694 +#define MI_TestQueueIsFull 0x5b
84695 +#define MI_TestQueueIsFull_seq1 0x5c
84696 +#define MI_TestQueueIsFull_seq2 0x5d
84697 +#define MI_CheckPsychoShitFixup 0x5e
84698 +#define MI_PsychoShitFixupForcedRead 0x5f
84699 +#define MI_PrepareDMATimeSlice 0x60
84700 +#define MI_PrepareDMATimeSlice_seq1 0x61
84701 +#define MI_TProcRestartFromTrapOrTestEventBit2 0x62
84702 +#define MI_TProcRestartFromTrapOrTestEventBit2_seq1 0x63
84703 +#define MI_WaitForGlobalsRead 0x64
84704 +#define MI_WaitForNPCRead 0x65
84705 +#define MI_EventInterrupt 0x66
84706 +#define MI_EventInterrupt_seq1 0x67
84707 +#define MI_EventInterrupt_seq2 0x68
84708 +#define MI_EventInterrupt_seq3 0x69
84709 +#define MI_TestSysCntxDmaQueueEmpty 0x6a
84710 +#define MI_TestSysCntxDmaQueueEmpty_seq1 0x6b
84711 +#define MI_TestIfRemoteDesc 0x6c
84712 +#define MI_DoDmaLocalSetEvent 0x6d
84713 +#define MI_DoDmaLocalSetEvent_seq1 0x6e
84714 +#define MI_DoDmaLocalSetEvent_seq2 0x6f
84715 +#define MI_DmaLoop1 0x70
84716 +#define MI_ExitDmaLoop 0x71
84717 +#define MI_ExitDmaLoop_seq1 0x72
84718 +#define MI_RemoteDmaTestPAckType 0x73
84719 +#define MI_PacketDiscardOrTestFailRecIfCCis0 0x74
84720 +#define MI_PacketDiscardOrTestFailRecIfCCis0_seq1 0x75
84721 +#define MI_TestNackFailIsZero2 0x76
84722 +#define MI_TestNackFailIsZero3 0x77
84723 +#define MI_DmaFailCountError 0x78
84724 +#define MI_TestDmaForSysCntx 0x79
84725 +#define MI_TestDmaForSysCntx_seq1 0x7a
84726 +#define MI_TestDmaForSysCntx_seq2 0x7b
84727 +#define MI_TestAeqB2 0x7c
84728 +#define MI_TestAeqB2_seq1 0x7d
84729 +#define MI_GetNextDmaDescriptor 0x7e
84730 +#define MI_DequeueSysCntxDma2 0x7f
84731 +#define MI_InputSetEvent 0x80
84732 +#define MI_PutBackSysCntxDma 0x81
84733 +#define MI_PutBackSysCntxDma_seq1 0x82
84734 +#define MI_PutBackSysCntxDma_seq2 0x83
84735 +#define MI_InputRemoteDma 0x84
84736 +#define MI_InputRemoteDma_seq1 0x85
84737 +#define MI_WaitOneTickForWakeup1 0x86
84738 +#define MI_SendRemoteDmaDesc 0x87
84739 +#define MI_InputLockQueue 0x88
84740 +#define MI_CloseTheTrappedPacketIfCCis1 0x89
84741 +#define MI_CloseTheTrappedPacketIfCCis1_seq1 0x8a
84742 +#define MI_PostDmaInterrupt 0x8b
84743 +#define MI_InputUnLockQueue 0x8c
84744 +#define MI_WaitForUnLockDescRead 0x8d
84745 +#define MI_SendEOPforRemoteDma 0x8e
84746 +#define MI_LookAtRemoteAck 0x8f
84747 +#define MI_InputWriteBlockQueue 0x90
84748 +#define MI_WaitForSpStore 0x91
84749 +#define MI_TProcNext 0x92
84750 +#define MI_TProcStoppedRunning 0x93
84751 +#define MI_InputWriteBlock 0x94
84752 +#define MI_RunDmaOrDeqNonSysCntxDma 0x95
84753 +#define MI_ExecuteDmaDescriptorForRun 0x96
84754 +#define MI_ConfirmQueueLock 0x97
84755 +#define MI_DmaInputIdentify 0x98
84756 +#define MI_TProcStoppedRunning2 0x99
84757 +#define MI_TProcStoppedRunning2_seq1 0x9a
84758 +#define MI_TProcStoppedRunning2_seq2 0x9b
84759 +#define MI_ThreadInputIdentify 0x9c
84760 +#define MI_InputIdWriteAddrAndType3 0x9d
84761 +#define MI_IProcTrappedWriteStatus 0x9e
84762 +#define MI_FinishTrappingEop 0x9f
84763 +#define MI_InputTestTrans 0xa0
84764 +#define MI_TestAeqB3 0xa1
84765 +#define MI_ThreadUpdateNonSysCntxBack 0xa2
84766 +#define MI_ThreadQueueOverflow 0xa3
84767 +#define MI_RunContext0Thread 0xa4
84768 +#define MI_RunContext0Thread_seq1 0xa5
84769 +#define MI_RunContext0Thread_seq2 0xa6
84770 +#define MI_RunDmaDesc 0xa7
84771 +#define MI_RunDmaDesc_seq1 0xa8
84772 +#define MI_RunDmaDesc_seq2 0xa9
84773 +#define MI_TestAeqB 0xaa
84774 +#define MI_WaitForNonCntxDmaDescRead 0xab
84775 +#define MI_DmaQueueOverflow 0xac
84776 +#define MI_BlockCopyEvent 0xad
84777 +#define MI_BlockCopyEventReadBlock 0xae
84778 +#define MI_BlockCopyWaitForReadData 0xaf
84779 +#define MI_InputWriteWord 0xb0
84780 +#define MI_TraceSetEvents 0xb1
84781 +#define MI_TraceSetEvents_seq1 0xb2
84782 +#define MI_TraceSetEvents_seq2 0xb3
84783 +#define MI_InputWriteDoubleWd 0xb4
84784 +#define MI_SendLockTransIfCCis1 0xb5
84785 +#define MI_WaitForDmaRoutes1 0xb6
84786 +#define MI_LoadDmaContext 0xb7
84787 +#define MI_InputTestAndSetWord 0xb8
84788 +#define MI_InputTestAndSetWord_seq1 0xb9
84789 +#define MI_GetDestEventValue 0xba
84790 +#define MI_SendDmaIdentify 0xbb
84791 +#define MI_InputAtomicAddWord 0xbc
84792 +#define MI_LoadBFromTransD0 0xbd
84793 +#define MI_ConditionalWriteBackCCTrue 0xbe
84794 +#define MI_WaitOneTickForWakeup 0xbf
84795 +#define MI_SendFinalUnlockTrans 0xc0
84796 +#define MI_SendDmaEOP 0xc1
84797 +#define MI_GenLastAddrForPsycho 0xc2
84798 +#define MI_FailedAckIfCCis0 0xc3
84799 +#define MI_FailedAckIfCCis0_seq1 0xc4
84800 +#define MI_WriteDmaSysCntxDesc 0xc5
84801 +#define MI_TimesliceDmaQueueOverflow 0xc6
84802 +#define MI_DequeueNonSysCntxThread1 0xc7
84803 +#define MI_DequeueNonSysCntxThread1_seq1 0xc8
84804 +#define MI_TestThreadQueueEmpty 0xc9
84805 +#define MI_ClearThreadQueueIfCC 0xca
84806 +#define MI_DequeueSysCntxThread1 0xcb
84807 +#define MI_DequeueSysCntxThread1_seq1 0xcc
84808 +#define MI_TProcStartUpGeneric 0xcd
84809 +#define MI_WaitForPCload2 0xce
84810 +#define MI_WaitForNPCWrite 0xcf
84811 +#define MI_WaitForEventWaitAddr 0xd0
84812 +#define MI_WaitForWaitEventAccess 0xd1
84813 +#define MI_WaitForWaitEventAccess_seq1 0xd2
84814 +#define MI_WaitForWaitEventDesc 0xd3
84815 +#define MI_WaitForEventReadTy0 0xd4
84816 +#define MI_SendCondTestFail 0xd5
84817 +#define MI_InputMoveToNextTrans 0xd6
84818 +#define MI_ThreadUpdateSysCntxBack 0xd7
84819 +#define MI_FinishedSetEvent 0xd8
84820 +#define MI_EventIntUpdateBPtr 0xd9
84821 +#define MI_EventQueueOverflow 0xda
84822 +#define MI_MaskLowerSource 0xdb
84823 +#define MI_DmaLoop 0xdc
84824 +#define MI_SendNullSetEvent 0xdd
84825 +#define MI_SendFinalSetEvent 0xde
84826 +#define MI_TestNackFailIsZero1 0xdf
84827 +#define MI_DmaPacketTimedOutOrPacketError 0xe0
84828 +#define MI_NextPacketIsLast 0xe1
84829 +#define MI_TestForZeroLengthDma 0xe2
84830 +#define MI_WaitForPCload 0xe3
84831 +#define MI_ReadInIns 0xe4
84832 +#define MI_WaitForInsRead 0xe5
84833 +#define MI_WaitForLocals 0xe6
84834 +#define MI_WaitForOutsWrite 0xe7
84835 +#define MI_WaitForWaitEvWrBack 0xe8
84836 +#define MI_WaitForLockRead 0xe9
84837 +#define MI_TestQueueLock 0xea
84838 +#define MI_InputIdWriteAddrAndType 0xeb
84839 +#define MI_InputIdWriteAddrAndType2 0xec
84840 +#define MI_ThreadInputIdentify2 0xed
84841 +#define MI_WriteIntoTrapArea0 0xee
84842 +#define MI_GenQueueBlockWrAddr 0xef
84843 +#define MI_InputDiscardFreeLock 0xf0
84844 +#define MI_WriteIntoTrapArea1 0xf1
84845 +#define MI_WriteIntoTrapArea2 0xf2
84846 +#define MI_ResetBPtrToBase 0xf3
84847 +#define MI_InputDoTrap 0xf4
84848 +#define MI_RemoteDmaCntxt0Update 0xf5
84849 +#define MI_ClearQueueLock 0xf6
84850 +#define MI_IProcTrappedBlockWriteData 0xf7
84851 +#define MI_FillContextFilter 0xf8
84852 +#define MI_IProcTrapped4 0xf9
84853 +#define MI_RunSysCntxDma 0xfa
84854 +#define MI_ChainedEventError 0xfb
84855 +#define MI_InputTrappingEOP 0xfc
84856 +#define MI_CheckForRunIfZero 0xfd
84857 +#define MI_TestForBreakOrSuspend 0xfe
84858 +#define MI_SwapForRunable 0xff
84859 +
84860 +/*
84861 + * Local variables:
84862 + * c-file-style: "stroustrup"
84863 + * End:
84864 + */
84865 Index: linux-2.4.21/include/elan3/vmseg.h
84866 ===================================================================
84867 --- linux-2.4.21.orig/include/elan3/vmseg.h     2004-02-23 16:02:56.000000000 -0500
84868 +++ linux-2.4.21/include/elan3/vmseg.h  2005-06-01 23:12:54.732418888 -0400
84869 @@ -0,0 +1,75 @@
84870 +/*
84871 + *    Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd.
84872 + *
84873 + *    For licensing information please see the supplied COPYING file
84874 + *
84875 + */
84876 +
84877 +#ifndef _VM_SEG_ELAN3_H
84878 +#define _VM_SEG_ELAN3_H
84879 +
84880 +#ident "$Id: vmseg.h,v 1.20 2003/09/24 13:57:24 david Exp $"
84881 +/*      $Source: /cvs/master/quadrics/elan3mod/elan3/elan3/vmseg.h,v $*/
84882 +
84883 +#include <elan3/elanuregs.h>
84884 +
84885 +/*
84886 + * This segment maps Elan registers,  it is fixed size and has 8K 
84887 + * pages split up as follows
84888 + *
84889 + *    ----------------------------------------
84890 + *    |    Performance Counters (read-only)  |
84891 + *    ----------------------------------------
84892 + *    |    Flag Page (read-only)            |
84893 + *    ----------------------------------------
84894 + *    |    Command Port                             |
84895 + *    ----------------------------------------
84896 + */
84897 +typedef volatile struct elan3_flagstats 
84898 +{
84899 +    u_int      CommandFlag;
84900 +    u_int      PageFaults;
84901 +    u_int      CProcTraps;
84902 +    u_int      DProcTraps;
84903 +    u_int      TProcTraps;
84904 +    u_int      IProcTraps;
84905 +    u_int      EopBadAcks;
84906 +    u_int      EopResets;
84907 +    u_int      DmaNetworkErrors;
84908 +    u_int      DmaIdentifyNetworkErrors;
84909 +    u_int      ThreadIdentifyNetworkErrors;
84910 +    u_int      DmaRetries;
84911 +    u_int      ThreadSystemCalls;
84912 +    u_int      ThreadElanCalls;
84913 +    u_int      LoadVirtualProcess;
84914 +} ELAN3_FLAGSTATS;
84915 +
84916 +#ifdef DIGITAL_UNIX
84917 +typedef volatile union elan3_flagpage
84918 +{
84919 +    u_char        Padding[8192];
84920 +    ELAN3_FLAGSTATS Stats;
84921 +} ELAN3_FLAGPAGE;
84922 +
84923 +typedef volatile struct elan3_vmseg
84924 +{
84925 +    E3_CommandPort CommandPort;
84926 +    ELAN3_FLAGPAGE  FlagPage;
84927 +    E3_User_Regs   UserRegs;
84928 +} ELAN3_VMSEG;
84929 +
84930 +#define SEGELAN3_SIZE   (sizeof (ELAN3_VMSEG))
84931 +
84932 +#define SEGELAN3_COMMAND_PORT  0
84933 +#define SEGELAN3_FLAG_PAGE     1
84934 +#define SEGELAN3_PERF_COUNTERS 2
84935 +
84936 +#endif /* DIGITAL_UNIX */
84937 +
84938 +#endif /* _VM_SEG_ELAN3_H */
84939 +
84940 +/*
84941 + * Local variables:
84942 + * c-file-style: "stroustrup"
84943 + * End:
84944 + */
84945 Index: linux-2.4.21/include/elan3/vpd.h
84946 ===================================================================
84947 --- linux-2.4.21.orig/include/elan3/vpd.h       2004-02-23 16:02:56.000000000 -0500
84948 +++ linux-2.4.21/include/elan3/vpd.h    2005-06-01 23:12:54.732418888 -0400
84949 @@ -0,0 +1,47 @@
84950 +/*
84951 + *    Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd.
84952 + *
84953 + *    For licensing information please see the supplied COPYING file
84954 + *
84955 + */
84956 +
84957 +#ident "$Id: vpd.h,v 1.5 2002/08/09 11:23:34 addy Exp $"
84958 +/*      $Source: /cvs/master/quadrics/elan3mod/elan3/elan3/vpd.h,v $*/
84959 +
84960 +#ifndef __ELAN3_VPD_H
84961 +#define __ELAN3_VPD_H
84962 +
84963 +#define LARGE_RESOURCE_BIT                     0x80
84964 +
84965 +#define SMALL_RESOURCE_COMPATIBLE_DEVICE_ID    0x3
84966 +#define SMALL_RESOURCE_VENDOR_DEFINED          0xE
84967 +#define SMALL_RESOURCE_END_TAG                 0xF
84968 +
84969 +#define LARGE_RESOURCE_STRING                  0x2
84970 +#define LARGE_RESOURCE_VENDOR_DEFINED          0x4
84971 +#define LARGE_RESOURCE_VITAL_PRODUCT_DATA      0x10
84972 +
84973 +#define VPD_PART_NUMBER                        "PN"
84974 +#define VPD_FRU_PART_NUMBER            "FN"
84975 +#define VPD_EC_LEVEL                   "EC"
84976 +#define VPD_MANUFACTURE_ID             "MN"
84977 +#define VPD_SERIAL_NUMBER              "SN"
84978 +
84979 +#define VPD_LOAD_ID                    "LI"
84980 +#define VPD_ROM_LEVEL                  "RL"
84981 +#define VPD_ALTERABLE_ROM_LEVEL                "RM"
84982 +#define VPD_NETWORK_ADDRESS            "NA"
84983 +#define VPD_DEVICE_DRIVER_LEVEL                "DD"
84984 +#define VPD_DIAGNOSTIC_LEVEL           "DG"
84985 +#define VPD_LOADABLE_MICROCODE_LEVEL   "LL"
84986 +#define VPD_VENDOR_ID                  "VI"
84987 +#define VPD_FUNCTION_NUMBER            "FU"
84988 +#define VPD_SUBSYSTEM_VENDOR_ID                "SI"
84989 +
84990 +#endif /* __ELAN3_VPD_H */
84991 +
84992 +/*
84993 + * Local variables:
84994 + * c-file-style: "stroustrup"
84995 + * End:
84996 + */
84997 Index: linux-2.4.21/include/elan4/commands.h
84998 ===================================================================
84999 --- linux-2.4.21.orig/include/elan4/commands.h  2004-02-23 16:02:56.000000000 -0500
85000 +++ linux-2.4.21/include/elan4/commands.h       2005-06-01 23:12:54.733418736 -0400
85001 @@ -0,0 +1,247 @@
85002 +/*
85003 + *    Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd.
85004 + *    Copyright (c) 2002-2003 by Quadrics Ltd.
85005 + *
85006 + *    For licensing information please see the supplied COPYING file
85007 + *
85008 + */
85009 +
85010 +#ifndef __ELAN4_COMMANDS_H
85011 +#define __ELAN4_COMMANDS_H
85012 +
85013 +#ident "$Id: commands.h,v 1.29 2004/06/16 15:45:02 addy Exp $"
85014 +/*      $Source: /cvs/master/quadrics/elan4hdr/commands.h,v $*/
85015 +
85016 +/*
85017 + * This header file describes the command format for the Elan 4
85018 + *    See CommandFormat.doc
85019 + */
85020 +
85021 +/*
85022 + * Number of channels in traced elanlib_trace.c
85023 + */
85024 +#define TRACE_MAX_CHANNELS     2
85025 +
85026 +/*
85027 + * Define encoding for the commands issued into the command queues
85028 + */
85029 +#define RUN_THREAD_CMD       0x00
85030 +#define OPEN_STEN_PKT_CMD    0x01
85031 +#define WRITE_DWORD_CMD      0x02
85032 +#define ADD_DWORD_CMD        0x03
85033 +#define COPY64_CMD           0x05
85034 +#define GUARD_CMD            0x06
85035 +#define SET_EVENT_CMD        0x07
85036 +#define SEND_TRANS_CMD       0x09
85037 +#define INTERRUPT_CMD        0x0d
85038 +#define RUN_DMA_CMD          0x0e
85039 +#define SET_EVENTN_CMD       0x0f
85040 +#define NOP_CMD                     0x17
85041 +#define MAKE_EXT_CLEAN_CMD   0x37
85042 +#define WAIT_EVENT_CMD       0x1f
85043 +
85044 +/*
85045 + * Define the portion of the data word the user is NOT
85046 + * allowed to use. This varies with Commmand type
85047 + */
85048 +#define RUN_THREAD_CMD_MASK       0x03
85049 +#define OPEN_STEN_PKT_CMD_MASK    0x0f
85050 +#define WRITE_DWORD_CMD_MASK      0x07
85051 +#define ADD_DWORD_CMD_MASK        0x07
85052 +#define COPY64_CMD_MASK           0x0f
85053 +#define GUARD_CMD_MASK            0x0f
85054 +#define SET_EVENT_CMD_MASK        0x1f
85055 +#define SEND_TRANS_CMD_MASK       0x1f
85056 +#define INTERRUPT_CMD_MASK        0x0f
85057 +#define RUN_DMA_CMD_MASK          0x0f
85058 +#define SET_EVENTN_CMD_MASK       0x1f
85059 +#define NOP_CMD_MASK             0x3f
85060 +#define MAKE_EXT_CLEAN_MASK      0x3f
85061 +#define WAIT_EVENT_CMD_MASK       0x1f
85062 +
85063 +#define COPY64_DATA_TYPE_SHIFT    0x4
85064 +#define COPY64_DTYPE_BYTE        (0 << COPY64_DATA_TYPE_SHIFT)
85065 +#define COPY64_DTYPE_SHORT       (1 << COPY64_DATA_TYPE_SHIFT)
85066 +#define COPY64_DTYPE_WORD        (2 << COPY64_DATA_TYPE_SHIFT)
85067 +#define COPY64_DTYPE_LONG        (3 << COPY64_DATA_TYPE_SHIFT)
85068 +
85069 +/*
85070 + * SET_EVENTN - word 1 has following form
85071 + * [63:5]      Event Address
85072 + * [4:0]       Part Set Value.
85073 + */
85074 +#define SET_EVENT_PART_SET_MASK      0x1f
85075 +
85076 +/* OPEN_STEN_PKT_CMD 
85077 + *   [63:32]   Vproc
85078 + *   [31]      Use Test
85079 + *   [30:28]   unused
85080 + *   [27:21]   Test Acceptable PAck code
85081 + *   [20:16]   Test Ack Channel Number
85082 + *   [15:9]    Acceptable PAck code
85083 + *   [8:4]     Ack Channel Number (1 bit on Elan4)
85084 + *   [3:0]     Command type
85085 + */
85086 +/* Acceptable PAck code */
85087 +#define PACK_OK                        (1 << 0)
85088 +#define PACK_TESTFAIL          (1 << 1)
85089 +#define PACK_DISCARD           (1 << 2)
85090 +#define RESTART_COUNT_ZERO     (1 << 3)
85091 +#define PACK_ERROR             (1 << 7)
85092 +#define PACK_TIMEOUT           (1 << 8)
85093 +
85094 +/*
85095 + *#ifndef USE_DIRTY_COMMANDS
85096 + *#define USE_DIRTY_COMMANDS
85097 + *#endif
85098 + */
85099 +#ifdef USE_DIRTY_COMMANDS
85100 +#define OPEN_PACKET_USED_MASK    0x00000000780f00e0ULL
85101 +#define SEND_TRANS_USED_MASK     0xffffffff0000fff0ULL
85102 +#define COPY64_WRITE_USED_MASK   0x000000000000000fULL
85103 +#define MAIN_INT_USED_MASK       0x0000000000003ff0ULL
85104 +#define GUARD_USED_MASK          0xfffffe007000fde0ULL
85105 +#define DMA_TYPESIZE_USED_MASK   0x000000000000fff0ULL
85106 +#define SETEVENTN_USED_MASK      0xffffffffffffffe0ULL
85107 +#define NOP_USED_MASK            0xffffffffffffffc0ULL
85108 +#define EXT_CLEAN_USED_MASK      0xffffffffffffffc0ULL
85109 +#define WAIT_CNT_TYPE_USED_MASK  0x00000000fffff800ULL
85110 +#else
85111 +#define OPEN_PACKET_USED_MASK    0x0ULL
85112 +#define SEND_TRANS_USED_MASK     0x0ULL
85113 +#define COPY64_WRITE_USED_MASK   0x0ULL
85114 +#define MAIN_INT_USED_MASK       0x0ULL
85115 +#define GUARD_USED_MASK          0x0ULL
85116 +#define DMA_TYPESIZE_USED_MASK   0x0ULL
85117 +#define SETEVENTN_USED_MASK      0x0ULL
85118 +#define NOP_USED_MASK            0x0ULL
85119 +#define EXT_CLEAN_USED_MASK      0x0ULL
85120 +#define WAIT_CNT_TYPE_USED_MASK  0x0ULL
85121 +#endif
85122 +
85123 +#define OPEN_PACKET(chan, code, vproc) \
85124 +       ((((chan) & 1) << 4) | (((code) & 0x7f) << 9) | ((E4_uint64)(vproc) << 32) | OPEN_STEN_PKT_CMD)
85125 +
85126 +#define OPEN_PACKET_TEST(chan, code, vproc, tchan, tcode) \
85127 +       ((((chan) & 1) << 4) | (((code) & 0x7f) << 9) | ((E4_uint64)(vproc) << 32) | \
85128 +        (((tchan) & 1) << 16) | (((tcode) & 0x7f) << 21) | (((E4_uint64) 1) << 31) | OPEN_STEN_PKT_CMD)
85129 +
85130 +/*
85131 + * GUARD_CMD
85132 + *   [63:41]   unused
85133 + *   [40]      Reset Restart Fail Count        // only performed if the Guard executes the next command.
85134 + *   [39:32]   New Restart Fail Count value
85135 + *   [31]      Use Test
85136 + *   [30:28]   unused
85137 + *   [27:21]   Test Acceptable PAck code
85138 + *   [20:16]   Test Ack Channel Number
85139 + *   [15:9]    unused
85140 + *   [8:4]     Ack Channel Number
85141 + *   [3:0]     Command type
85142 + */
85143 +/* GUARD_CHANNEL(chan)
85144 + */
85145 +#define GUARD_ALL_CHANNELS     ((1 << 9) | GUARD_CMD)
85146 +#define GUARD_CHANNEL(chan)    ((((chan) & 1) << 4) | GUARD_CMD)
85147 +#define GUARD_TEST(chan,code)  ((1ull << 31) | (((code) & 0x7f) << 21) | (((chan) & 1) << 16))
85148 +#define GUARD_RESET(count)     ((1ull << 40) | ((((E4_uint64) count) & 0xff) << 32))
85149 +
85150 +#define GUARD_CHANNEL_TEST(chan,tchan,tcode) \
85151 +       ((((chan) & 1) << 4) | (((tchan) & 1) << 16) | (((tcode) & 0x7f) << 21) | \
85152 +        (((E4_uint64) 1) << 31) | GUARD_CMD)
85153 +
85154 +/*
85155 + * SEND_TRANS_CMD
85156 + * [63:32]     unused
85157 + * [31:16]     transaction type
85158 + * [15:4]      unused
85159 + * [3:0]       Command type
85160 + */
85161 +#define SEND_TRANS(TransType)  (((TransType) << 16) | SEND_TRANS_CMD)
85162 +
85163 +/*
85164 + * Command port trace debug levels
85165 + */
85166 +#define TRACE_CMD_BUFFER       0x01
85167 +#define TRACE_CMD_TYPE         0x02
85168 +#define TRACE_CHANNEL_OPENS    0x04
85169 +#define TRACE_GUARDED_ATOMICS  0x08
85170 +#define TRACE_CMD_TIMEOUT      0x10
85171 +
85172 +/*
85173 + * Commands that should be preceeded by a GUARD_CMD.
85174 + */
85175 +#define IS_ATOMIC_CMD(cmd)                                                             \
85176 +   ((cmd) == RUN_THREAD_CMD || (cmd) == ADD_DWORD_CMD || (cmd) == INTERRUPT_CMD ||     \
85177 +    (cmd) == RUN_DMA_CMD    || (cmd) == SET_EVENT_CMD || (cmd) == SET_EVENTN_CMD ||    \
85178 +    (cmd) == WAIT_EVENT_CMD)
85179 +
85180 +#ifndef _ASM
85181 +
85182 +/*
85183 + * These structures are used to build event copy command streams. They are intended to be included
85184 + * in a larger structure to form a self documenting command sequence that can be easily coped and manipulated.
85185 + */
85186 +
85187 +typedef struct e4_runthreadcmd
85188 +{
85189 +   E4_Addr     PC;
85190 +   E4_uint64   r[6];
85191 +} E4_RunThreadCmd;
85192 +
85193 +typedef E4_uint64 E4_OpenCmd;
85194 +
85195 +typedef struct e4_writecmd
85196 +{
85197 +   E4_Addr     WriteAddr;
85198 +   E4_uint64   WriteValue;
85199 +} E4_WriteCmd;
85200 +
85201 +typedef struct e4_addcmd
85202 +{
85203 +   E4_Addr     AddAddr;
85204 +   E4_uint64   AddValue;
85205 +} E4_AddCmd;
85206 +
85207 +typedef struct e4_copycmd
85208 +{
85209 +   E4_Addr     SrcAddr;
85210 +   E4_Addr     DstAddr;
85211 +} E4_CopyCmd;
85212 +
85213 +typedef E4_uint64 E4_GaurdCmd;
85214 +typedef E4_uint64 E4_SetEventCmd;
85215 +
85216 +/*
85217 + * The data to this command must be declared as a vector after the use of this.
85218 + */
85219 +typedef struct e4_sendtranscmd
85220 +{
85221 +   E4_Addr     Type;
85222 +   E4_Addr     Addr;
85223 +} E4_SendTransCmd;
85224 +
85225 +typedef E4_uint64 E4_IntCmd;
85226 +
85227 +/* The normal Dma struc can be used here. */
85228 +
85229 +typedef struct e4_seteventncmd
85230 +{
85231 +   E4_Addr     Event;
85232 +   E4_Addr     SetCount;
85233 +} E4_SetEventNCmd;
85234 +
85235 +typedef E4_uint64 E4_NopCmd;
85236 +typedef E4_uint64 E4_MakeExtCleanCmd;
85237 +
85238 +typedef struct e4_waitcmd
85239 +{
85240 +   E4_Addr     ev_Event;
85241 +   E4_Addr     ev_CountType;
85242 +   E4_Addr     ev_Params[2];
85243 +} E4_WaitCmd;
85244 +
85245 +#endif /* _ASM */
85246 +
85247 +#endif /* __ELAN4_COMMANDS_H  */
85248 +
85249 Index: linux-2.4.21/include/elan4/debug.h
85250 ===================================================================
85251 --- linux-2.4.21.orig/include/elan4/debug.h     2004-02-23 16:02:56.000000000 -0500
85252 +++ linux-2.4.21/include/elan4/debug.h  2005-06-01 23:12:54.733418736 -0400
85253 @@ -0,0 +1,113 @@
85254 +/*
85255 + *    Copyright (c) 2001-2002 by Quadrics Supercomputers World Ltd.
85256 + *    Copyright (c) 2002-2003 by Quadrics Ltd.
85257 + * 
85258 + *    For licensing information please see the supplied COPYING file
85259 + *
85260 + */
85261 +
85262 +#ifndef _ELAN4_ELANDEBUG_H
85263 +#define _ELAN4_ELANDEBUG_H
85264 +
85265 +#ident "$Id: debug.h,v 1.19.6.1 2005/01/18 14:36:10 david Exp $"
85266 +/*      $Source: /cvs/master/quadrics/elan4mod/debug.h,v $ */
85267 +
85268 +/* values for "type" field - note a "ctxt" is permissible */
85269 +/* and BUFFER/CONSOLE are for explict calls to elan4_debugf() */
85270 +#define DBG_DEVICE     ((void *) 0)
85271 +#define DBG_USER       ((void *) 1)
85272 +
85273 +#define DBG_BUFFER     ((void *) 62)
85274 +#define DBG_CONSOLE    ((void *) 63)
85275 +#define DBG_NTYPES     64
85276 +
85277 +/* values for "mode" field */
85278 +#define DBG_CONFIG     0x00000001
85279 +#define DBG_INTR       0x00000002
85280 +#define DBG_MAININT    0x00000004
85281 +#define DBG_SDRAM      0x00000008
85282 +#define DBG_MMU                0x00000010
85283 +#define DBG_REGISTER   0x00000020
85284 +#define DBG_CQ         0x00000040
85285 +#define DBG_NETWORK_CTX        0x00000080
85286 +
85287 +#define DBG_FLUSH      0x00000100
85288 +#define DBG_FILE       0x00000200
85289 +#define DBG_CONTROL    0x00000400
85290 +#define DBG_MEM                0x00000800
85291 +
85292 +#define DBG_PERM       0x00001000
85293 +#define DBG_FAULT      0x00002000
85294 +#define DBG_SWAP       0x00004000
85295 +#define DBG_TRAP       0x00008000
85296 +#define DBG_DDCQ       0x00010000
85297 +#define DBG_VP         0x00020000
85298 +#define DBG_RESTART    0x00040000
85299 +#define DBG_RESUME     0x00080000
85300 +#define DBG_CPROC      0x00100000
85301 +#define DBG_DPROC      0x00200000
85302 +#define DBG_EPROC      0x00400000
85303 +#define DBG_IPROC      0x00800000
85304 +#define DBG_TPROC      0x01000000
85305 +#define DBG_IOPROC     0x02000000
85306 +#define DBG_ROUTE      0x04000000
85307 +#define DBG_NETERR     0x08000000
85308 +
85309 +#define DBG_ALL                0x7FFFFFFF
85310 +
85311 +
85312 +#ifdef DEBUG_PRINTF
85313 +
85314 +#  define PRINTF0(type,m,fmt)                  ((elan4_debug&(m) || (type) == DBG_CONSOLE) ? elan4_debugf(type,m,fmt)                  : (void)0)
85315 +#  define PRINTF1(type,m,fmt,a)                        ((elan4_debug&(m) || (type) == DBG_CONSOLE) ? elan4_debugf(type,m,fmt,a)                : (void)0)
85316 +#  define PRINTF2(type,m,fmt,a,b)              ((elan4_debug&(m) || (type) == DBG_CONSOLE) ? elan4_debugf(type,m,fmt,a,b)              : (void)0)
85317 +#  define PRINTF3(type,m,fmt,a,b,c)            ((elan4_debug&(m) || (type) == DBG_CONSOLE) ? elan4_debugf(type,m,fmt,a,b,c)            : (void)0)
85318 +#  define PRINTF4(type,m,fmt,a,b,c,d)          ((elan4_debug&(m) || (type) == DBG_CONSOLE) ? elan4_debugf(type,m,fmt,a,b,c,d)          : (void)0)
85319 +#  define PRINTF5(type,m,fmt,a,b,c,d,e)                ((elan4_debug&(m) || (type) == DBG_CONSOLE) ? elan4_debugf(type,m,fmt,a,b,c,d,e)        : (void)0)
85320 +#  define PRINTF6(type,m,fmt,a,b,c,d,e,f)      ((elan4_debug&(m) || (type) == DBG_CONSOLE) ? elan4_debugf(type,m,fmt,a,b,c,d,e,f)      : (void)0)
85321 +#  define PRINTF7(type,m,fmt,a,b,c,d,e,f,g)    ((elan4_debug&(m) || (type) == DBG_CONSOLE) ? elan4_debugf(type,m,fmt,a,b,c,d,e,f,g)    : (void)0)
85322 +#  define PRINTF8(type,m,fmt,a,b,c,d,e,f,g,h)  ((elan4_debug&(m) || (type) == DBG_CONSOLE) ? elan4_debugf(type,m,fmt,a,b,c,d,e,f,g,h)  : (void)0)
85323 +#  define PRINTF9(type,m,fmt,a,b,c,d,e,f,g,h,i)        ((elan4_debug&(m) || (type) == DBG_CONSOLE) ? elan4_debugf(type,m,fmt,a,b,c,d,e,f,g,h,i): (void)0)
85324 +#ifdef __GNUC__
85325 +#  define PRINTF(type,m,args...)               ((elan4_debug&(m) || (type) == DBG_CONSOLE) ? elan4_debugf(type,m, ##args)              : (void)0)
85326 +#endif
85327 +#  define DBGCMD(type,m,cmd)                   ((elan4_debug&(m) || (type) == DBG_CONSOLE)  ? (void) (cmd) : (void) 0)
85328 +
85329 +#else
85330 +
85331 +#  define PRINTF0(type,m,fmt)                  (0)
85332 +#  define PRINTF1(type,m,fmt,a)                        (0)
85333 +#  define PRINTF2(type,m,fmt,a,b)              (0)
85334 +#  define PRINTF3(type,m,fmt,a,b,c)            (0)
85335 +#  define PRINTF4(type,m,fmt,a,b,c,d)          (0)
85336 +#  define PRINTF5(type,m,fmt,a,b,c,d,e)                (0)
85337 +#  define PRINTF6(type,m,fmt,a,b,c,d,e,f)      (0)
85338 +#  define PRINTF7(type,m,fmt,a,b,c,d,e,f,g)    (0)
85339 +#  define PRINTF8(type,m,fmt,a,b,c,d,e,f,g,h)  (0)
85340 +#  define PRINTF9(type,m,fmt,a,b,c,d,e,f,g,h,i)        (0)
85341 +#ifdef __GNUC__
85342 +#  define PRINTF(type,m,args...)
85343 +#endif
85344 +#  define DBGCMD(type,m,cmd)                   ((void) 0)
85345 +
85346 +#endif /* DEBUG_PRINTF */
85347 +
85348 +extern unsigned   elan4_debug;
85349 +extern unsigned   elan4_debug_toconsole;
85350 +extern unsigned   elan4_debug_tobuffer;
85351 +extern unsigned   elan4_debug_display_ctxt;
85352 +extern unsigned   elan4_debug_ignore_ctxt;
85353 +extern unsigned   elan4_debug_ignore_type;
85354 +
85355 +extern void      elan4_debug_init(void);
85356 +extern void      elan4_debug_fini(void);
85357 +extern void       elan4_debugf (void *type, int mode, char *fmt, ...);
85358 +extern int        elan4_debug_snapshot (caddr_t ubuffer, int len);
85359 +extern int       elan4_debug_display (void);
85360 +
85361 +/*
85362 + * Local variables:
85363 + * c-file-style: "stroustrup"
85364 + * End:
85365 + */
85366 +#endif /* _ELAN4_ELANDEBUG_H */
85367 Index: linux-2.4.21/include/elan4/device.h
85368 ===================================================================
85369 --- linux-2.4.21.orig/include/elan4/device.h    2004-02-23 16:02:56.000000000 -0500
85370 +++ linux-2.4.21/include/elan4/device.h 2005-06-01 23:12:54.735418432 -0400
85371 @@ -0,0 +1,781 @@
85372 +/*
85373 + *    Copyright (c) 2001-2002 by Quadrics Supercomputers World Ltd.
85374 + *    Copyright (c) 2002-2003 by Quadrics Ltd.
85375 + * 
85376 + *    For licensing information please see the supplied COPYING file
85377 + *
85378 + */
85379 +
85380 +#ifndef __ELAN4_ELANDEV_H
85381 +#define __ELAN4_ELANDEV_H
85382 +
85383 +#ident "$Id: device.h,v 1.68.2.1 2004/11/03 14:24:32 duncant Exp $"
85384 +/*      $Source: /cvs/master/quadrics/elan4mod/device.h,v $ */
85385 +
85386 +#include <elan/devinfo.h>
85387 +#include <elan/capability.h>
85388 +
85389 +#include <elan4/pci.h>
85390 +#include <elan4/sdram.h>
85391 +#include <elan4/dma.h>
85392 +#include <elan4/events.h>
85393 +#include <elan4/registers.h>
85394 +
85395 +#include <elan4/mmu.h>
85396 +#include <elan4/trap.h>
85397 +#include <elan4/stats.h>
85398 +#include <elan4/neterr.h>
85399 +
85400 +#ifdef CONFIG_MPSAS
85401 +#include <elan4/mpsas.h>
85402 +#endif
85403 +
85404 +#if defined(LINUX)
85405 +#include <elan4/device_Linux.h>
85406 +#elif defined(TRU64UNIX)
85407 +#include <elan4/device_OSF1.h>
85408 +#elif defined(SOLARIS)
85409 +#include <elan4/device_SunOS.h>
85410 +#endif
85411 +
85412 +/*
85413 + * Network context number allocation.
85414 + * [0]          neterr fixup system context
85415 + * [1]          kernel comms system context
85416 + * [2048-4095] kernel comms data contexts
85417 + */
85418 +#define ELAN4_NETERR_CONTEXT_NUM       0x00                    /* network error fixup context number */
85419 +#define ELAN4_KCOMM_CONTEXT_NUM                0x01                    /* kernel comms context number */
85420 +#define ELAN4_KCOMM_BASE_CONTEXT_NUM   0x800                   /* kernel comms data transfer contexts */
85421 +#define ELAN4_KCOMM_TOP_CONTEXT_NUM    0xfff
85422 +
85423 +#define ELAN4_SYSTEM_CONTEXT(ctx)  ((ctx) >= ELAN4_KCOMM_BASE_CONTEXT_NUM)
85424 +
85425 +typedef void (ELAN4_HALTFN)(struct elan4_dev *dev, void *arg);
85426 +
85427 +typedef struct elan4_haltop
85428 +{
85429 +    struct list_head    op_link;                               /* chain on a list */
85430 +    E4_uint32          op_mask;                                /* Interrupt mask to see before calling function */
85431 +    
85432 +    ELAN4_HALTFN       *op_function;                           /* function to call */
85433 +    void              *op_arg;                                 /* arguement to pass to function */
85434 +} ELAN4_HALTOP;
85435 +
85436 +typedef void (ELAN4_DMA_FLUSHFN)(struct elan4_dev *dev, void *arg, int qfull);
85437 +
85438 +typedef struct elan4_dma_flushop
85439 +{
85440 +    struct list_head    op_link;                               /* chain on a list */
85441 +    ELAN4_DMA_FLUSHFN  *op_function;                           /* function to call */
85442 +    void              *op_arg;                                 /* arguement to pass to function */
85443 +} ELAN4_DMA_FLUSHOP;
85444 +
85445 +typedef void (ELAN4_INTFN)(struct elan4_dev *dev, void *arg);
85446 +
85447 +typedef struct elan4_intop
85448 +{
85449 +    struct list_head    op_link;                               /* chain on a list */
85450 +    ELAN4_INTFN        *op_function;                           /* function to call */
85451 +    void              *op_arg;                                 /* arguement to pass to function */
85452 +    E4_uint64          op_cookie;                              /* and main interrupt cookie */
85453 +} ELAN4_INTOP;
85454 +
85455 +#define SDRAM_MIN_BLOCK_SHIFT  10
85456 +#define SDRAM_NUM_FREE_LISTS   19                                      /* allows max 256 Mb block */
85457 +#define SDRAM_MIN_BLOCK_SIZE   (1 << SDRAM_MIN_BLOCK_SHIFT)
85458 +#define SDRAM_MAX_BLOCK_SIZE   (SDRAM_MIN_BLOCK_SIZE << (SDRAM_NUM_FREE_LISTS-1))
85459 +
85460 +#if PAGE_SHIFT < 13
85461 +#define SDRAM_PAGE_SIZE                8192
85462 +#define SDRAM_PGOFF_OFFSET     1
85463 +#define SDRAM_PGOFF_MASK       (~SDRAM_PGOFF_OFFSET)
85464 +#else
85465 +#define SDRAM_PAGE_SIZE                PAGE_SIZE
85466 +#define SDRAM_PGOFF_OFFSET     0
85467 +#define SDRAM_PGOFF_MASK       (~SDRAM_PGOFF_OFFSET)
85468 +#endif
85469 +
85470 +typedef struct elan4_sdram
85471 +{
85472 +    sdramaddr_t        b_base;                                 /* offset in sdram bar */
85473 +    unsigned           b_size;                                 /* size of bank */
85474 +    ioaddr_t           b_ioaddr;                               /* ioaddr where mapped into the kernel */
85475 +    ELAN4_MAP_HANDLE   b_handle;                               /*    and mapping handle */
85476 +    bitmap_t          *b_bitmaps[SDRAM_NUM_FREE_LISTS];        /* buddy allocator bitmaps */
85477 +} ELAN4_SDRAM_BANK;
85478 +
85479 +/* command queue */
85480 +typedef struct elan4_cq 
85481 +{
85482 +    struct elan4_cqa    *cq_cqa;                                       /* command queue allocator this belongs to */
85483 +    unsigned            cq_idx;                                        /*  and which command queue this is */
85484 +
85485 +    sdramaddr_t                 cq_space;                                      /* sdram backing up command queue */
85486 +    unsigned            cq_size;                                       /* size value */
85487 +    unsigned            cq_perm;                                       /* permissions */
85488 +    ioaddr_t            cq_mapping;                                    /* mapping of command queue page */
85489 +    ELAN4_MAP_HANDLE    cq_handle;                                     /*    and mapping handle */
85490 +} ELAN4_CQ;
85491 +
85492 +/* cqtype flags to elan4_alloccq() */
85493 +#define CQ_Priority    (1 << 0)
85494 +#define CQ_Reorder     (1 << 1)
85495 +
85496 +/* command queues are allocated in chunks,so that all the
85497 + * command ports are in a single system page */
85498 +#define ELAN4_CQ_PER_CQA       MAX(1, (PAGESIZE/CQ_CommandMappingSize))
85499 +
85500 +/* maximum number of command queues per context */
85501 +#define ELAN4_MAX_CQA          (256 / ELAN4_CQ_PER_CQA)
85502 +
85503 +typedef struct elan4_cqa
85504 +{
85505 +    struct list_head   cqa_link;                                       /* linked together */
85506 +    bitmap_t           cqa_bitmap[BT_BITOUL(ELAN4_CQ_PER_CQA)];        /* bitmap of which are free */
85507 +    unsigned int        cqa_type;                                      /* allocation type */
85508 +    unsigned int       cqa_cqnum;                                      /* base cq number */
85509 +    unsigned int       cqa_ref;                                        /* "mappings" to a queue */
85510 +    unsigned int       cqa_idx;                                        /* index number */
85511 +    ELAN4_CQ           cqa_cq[ELAN4_CQ_PER_CQA];                       /* command queue entries */
85512 +} ELAN4_CQA;
85513 +
85514 +#define elan4_cq2num(cq)       ((cq)->cq_cqa->cqa_cqnum + (cq)->cq_idx)
85515 +#define elan4_cq2idx(cq)       ((cq)->cq_cqa->cqa_idx * ELAN4_CQ_PER_CQA + (cq)->cq_idx)
85516 +
85517 +typedef struct elan4_ctxt
85518 +{
85519 +    struct elan4_dev      *ctxt_dev;                                   /* device we're associated with */
85520 +    struct list_head       ctxt_link;                                  /* chained on device */
85521 +    
85522 +    struct elan4_trap_ops *ctxt_ops;                                   /* client specific operations */
85523 +
85524 +    signed                ctxt_num;                                    /* local context number */
85525 +
85526 +    struct list_head      ctxt_cqalist;                                /* link list of command queue allocators */
85527 +    bitmap_t              ctxt_cqamap[BT_BITOUL(ELAN4_MAX_CQA)];       /*   bitmap for allocating cqa_idx */
85528 +
85529 +    ELAN4_HASH_ENTRY     **ctxt_mmuhash[2];                            /* software hash tables */
85530 +    spinlock_t            ctxt_mmulock;                                /*   and spinlock. */
85531 +} ELAN4_CTXT;
85532 +
85533 +typedef struct elan4_trap_ops
85534 +{
85535 +    void       (*op_eproc_trap) (ELAN4_CTXT *ctxt, E4_uint64 status);
85536 +    void       (*op_cproc_trap) (ELAN4_CTXT *ctxt, E4_uint64 status, unsigned cqnum);
85537 +    void       (*op_dproc_trap) (ELAN4_CTXT *ctxt, E4_uint64 status, unsigned unit);
85538 +    void       (*op_tproc_trap) (ELAN4_CTXT *ctxt, E4_uint64 status);
85539 +    void       (*op_iproc_trap) (ELAN4_CTXT *ctxt, E4_uint64 status, unsigned unit);
85540 +    void       (*op_interrupt)  (ELAN4_CTXT *ctxt, E4_uint64 cookie);
85541 +    void       (*op_neterrmsg)  (ELAN4_CTXT *ctxt, ELAN4_NETERR_MSG *msg);
85542 +} ELAN4_TRAP_OPS;
85543 +
85544 +typedef struct elan4_route_table
85545 +{
85546 +    spinlock_t  tbl_lock;
85547 +    unsigned   tbl_size;
85548 +    sdramaddr_t tbl_entries;
85549 +} ELAN4_ROUTE_TABLE;
85550 +
85551 +#ifdef ELAN4_LARGE_PAGE_SUPPORT
85552 +#define NUM_HASH_TABLES                2
85553 +#else
85554 +#define NUM_HASH_TABLES                1
85555 +#endif
85556 +
85557 +#define DEV_STASH_ROUTE_COUNT 20
85558 +
85559 +typedef struct elan4_route_ringbuf {
85560 +    int start;
85561 +    int end;
85562 +    E4_VirtualProcessEntry routes[DEV_STASH_ROUTE_COUNT]; 
85563 +} ELAN4_ROUTE_RINGBUF;
85564 +
85565 +#define elan4_ringbuf_init(ringbuf) memset(&ringbuf, 0, sizeof(ELAN4_ROUTE_RINGBUF));
85566 +
85567 +typedef struct elan4_dev
85568 +{
85569 +    ELAN4_CTXT          dev_ctxt;                                      /* context for device operations */
85570 +
85571 +    ELAN4_DEV_OSDEP     dev_osdep;                                     /* OS specific entries */
85572 +
85573 +    int                         dev_instance;                                  /* device number */
85574 +    ELAN_DEVINFO        dev_devinfo;                                   /* device information (revision etc */
85575 +    ELAN_POSITION       dev_position;                                  /* position connected to switch */
85576 +    ELAN_DEV_IDX        dev_idx;                                       /* device idx registered with elanmod */
85577 +
85578 +    kmutex_t            dev_lock;                                      /* lock for device state/references */
85579 +    unsigned            dev_state;                                     /* device state */
85580 +    unsigned            dev_references;                                /*  # references */
85581 +    unsigned            dev_features;                                  /* features supported */
85582 +
85583 +    ioaddr_t            dev_regs;                                      /* Mapping of device registers */
85584 +    ELAN4_MAP_HANDLE    dev_regs_handle;
85585 +    ioaddr_t            dev_rom;                                       /* Mapping of rom */
85586 +    ELAN4_MAP_HANDLE    dev_rom_handle;
85587 +    ioaddr_t            dev_i2c;                                       /* Mapping of I2C registers */
85588 +    ELAN4_MAP_HANDLE    dev_i2c_handle;
85589 +    
85590 +    E4_uint64           dev_sdram_cfg;                                 /* SDRAM config value (from ROM) */
85591 +    E4_uint64           dev_sdram_initial_ecc_val;                     /* power on ECC register value */
85592 +    int                         dev_sdram_numbanks;                            /* # banks of sdram */
85593 +    ELAN4_SDRAM_BANK    dev_sdram_banks[SDRAM_MAX_BANKS];              /* Mapping of sdram banks */
85594 +    spinlock_t          dev_sdram_lock;                                /* spinlock for buddy allocator */
85595 +    sdramaddr_t                 dev_sdram_freelists[SDRAM_NUM_FREE_LISTS];
85596 +    unsigned            dev_sdram_freecounts[SDRAM_NUM_FREE_LISTS];
85597 +
85598 +    sdramaddr_t                 dev_cacheflush_space;                          /* sdram reserved for cache flush operation */
85599 +
85600 +    sdramaddr_t                 dev_faultarea;                                 /* fault areas for each unit */
85601 +    sdramaddr_t                 dev_inputtraparea;                             /* trap area for trapped transactions */
85602 +    sdramaddr_t                 dev_ctxtable;                                  /* context table (E4_ContextControlBlock) */
85603 +    int                         dev_ctxtableshift;                             /* and size (in bits) */
85604 +
85605 +    E4_uint32           dev_syscontrol;                                /* copy of system control register */
85606 +    spinlock_t          dev_syscontrol_lock;                           /*   spinlock to sequentialise modifications */
85607 +    unsigned            dev_direct_map_pci_writes;                     /*   # counts for CONT_DIRECT_MAP_PCI_WRITES */
85608 +
85609 +    volatile E4_uint32  dev_intmask;                                   /* copy of interrupt mask register */
85610 +    spinlock_t          dev_intmask_lock;                              /*   spinlock to sequentialise modifications */
85611 +
85612 +    /* i2c section */
85613 +    spinlock_t          dev_i2c_lock;                                  /* spinlock for i2c operations */
85614 +    unsigned int         dev_i2c_led_disabled;                         /* count of reasons led auto update disabled */
85615 +
85616 +    /* mmu section */
85617 +    unsigned            dev_pagesizeval[NUM_HASH_TABLES];              /* page size value */
85618 +    unsigned            dev_pageshift[NUM_HASH_TABLES];                        /* pageshift in bits. */
85619 +    unsigned            dev_hashsize[NUM_HASH_TABLES];                 /* # entries in mmu hash table */
85620 +    sdramaddr_t                 dev_hashtable[NUM_HASH_TABLES];                /* mmu hash table */
85621 +    ELAN4_HASH_ENTRY   *dev_mmuhash[NUM_HASH_TABLES];                  /*   and software shadow */
85622 +    ELAN4_HASH_ENTRY   **dev_mmufree[NUM_HASH_TABLES];                 /*   and partially free blocks */
85623 +    ELAN4_HASH_ENTRY    *dev_mmufreelist;                              /*   and free blocks */
85624 +    spinlock_t           dev_mmulock;
85625 +    E4_uint16           dev_topaddr[4];                                /* top address values */
85626 +    unsigned char       dev_topaddrvalid;
85627 +    unsigned char       dev_topaddrmode;
85628 +    unsigned char       dev_pteval;                                    /* allow setting of relaxed order/dont snoop attributes */
85629 +
85630 +    unsigned            dev_rsvd_hashmask[NUM_HASH_TABLES];
85631 +    unsigned            dev_rsvd_hashval[NUM_HASH_TABLES];
85632 +
85633 +    /* run queues */
85634 +    sdramaddr_t                 dev_comqlowpri;                                /* CProc low & high pri run queues */
85635 +    sdramaddr_t                 dev_comqhighpri;
85636 +
85637 +    sdramaddr_t                 dev_dmaqlowpri;                                /* DProc,TProc,Interrupt queues */
85638 +    sdramaddr_t                 dev_dmaqhighpri;
85639 +    sdramaddr_t                 dev_threadqlowpri;
85640 +    sdramaddr_t                 dev_threadqhighpri;
85641 +    sdramaddr_t                 dev_interruptq;
85642 +
85643 +    E4_uint32           dev_interruptq_nfptr;                          /* cache next main interrupt fptr */
85644 +    struct list_head     dev_interruptq_list;                          /*   list of operations to call when space in interruptq*/
85645 +
85646 +    /* command queue section */
85647 +    sdramaddr_t                 dev_cqaddr;                                    /* SDRAM address of command queues */
85648 +    unsigned            dev_cqoffset;                                  /* offset for command queue alignment constraints */
85649 +    unsigned            dev_cqcount;                                   /* number of command queue descriptors */
85650 +    bitmap_t           *dev_cqamap;                                    /* bitmap for allocation */
85651 +    spinlock_t          dev_cqlock;                                    /* spinlock to protect bitmap */
85652 +#ifdef CONFIG_MTRR
85653 +    unsigned            dev_cqreorder;                                 /* offset for first re-ordering queue on revb */
85654 +#endif
85655 +
85656 +    /* halt operation section */
85657 +    struct list_head     dev_haltop_list;                              /* list of operations to call when units halted */
85658 +    E4_uint32           dev_haltop_mask;                               /* mask of which ones to halt */
85659 +    E4_uint32           dev_haltop_active;                             /* mask of which haltops are executing */
85660 +    spinlock_t          dev_haltop_lock;                               /*    and their spinlock */
85661 +
85662 +    struct {
85663 +       struct list_head list;                                          /* list of halt operations for DMAs */
85664 +       ELAN4_CQ        *cq;                                            /*   and command queue's */
85665 +       ELAN4_INTOP      intop;                                         /*   and main interrupt op */
85666 +       E4_uint64        status;                                        /*   status register (when waiting for intop)*/
85667 +    }                   dev_dma_flushop[2];
85668 +
85669 +    unsigned            dev_halt_all_count;                            /* count of reasons to halt all units */
85670 +    unsigned            dev_halt_lowpri_count;                         /* count of reasons to halt lowpri queues */
85671 +    unsigned            dev_halt_cproc_count;                          /* count of reasons to halt command processor */
85672 +    unsigned            dev_halt_dproc_count;                          /* count of reasons to halt dma processor */
85673 +    unsigned            dev_halt_tproc_count;                          /* count of reasons to halt thread processor */
85674 +    unsigned            dev_discard_all_count;                         /* count of reasons to discard all packets */
85675 +    unsigned            dev_discard_lowpri_count;                      /* count of reasons to discard non-system packets */
85676 +    unsigned            dev_discard_highpri_count;                     /* count of reasons to discard system packets */
85677 +
85678 +    E4_uint32           dev_schedstatus;                               /* copy of schedule status register */
85679 +
85680 +    /* local context allocation section */
85681 +    spinlock_t          dev_ctxlock;                                   /* spinlock to protect bitmap */
85682 +    bitmap_t           *dev_ctxmap;                                    /* bitmap for local context allocation */
85683 +
85684 +    spinlock_t          dev_ctxt_lock;                                 /* spinlock to protect context list */
85685 +    struct list_head     dev_ctxt_list;                                        /* linked list of contexts */
85686 +
85687 +    /* locks to sequentialise interrupt handling */
85688 +    spinlock_t          dev_trap_lock;                                 /* spinlock while handling a trap */
85689 +    spinlock_t          dev_requeue_lock;                              /* spinlock sequentialising cproc requeue */
85690 +
85691 +    /* error rate interrupt section */
85692 +    long                dev_error_time;                                /* lbolt at start of sampling period */
85693 +    unsigned            dev_errors_per_period;                         /* errors so far this sampling period */
85694 +    timer_fn_t          dev_error_timeoutid;                           /* timeout to re-enable error interrupts */
85695 +    timer_fn_t          dev_linkerr_timeoutid;                         /* timeout to clear link error led */
85696 +
85697 +    /* kernel threads */
85698 +    unsigned            dev_stop_threads:1;                            /* kernel threads should exit */
85699 +
85700 +    /* main interrupt thread */
85701 +    kcondvar_t          dev_mainint_wait;                              /* place for mainevent interrupt thread to sleep */
85702 +    spinlock_t          dev_mainint_lock;                              /*   and it's spinlock */
85703 +    unsigned            dev_mainint_started:1;
85704 +    unsigned            dev_mainint_stopped:1;
85705 +
85706 +    /* device context - this is used to flush insert cache/instruction cache/dmas & threads */
85707 +    ELAN4_CPROC_TRAP     dev_cproc_trap;                               /* space to extract cproc trap into */
85708 +
85709 +    struct list_head     dev_intop_list;                               /* list of main interrupt operations */
85710 +    spinlock_t          dev_intop_lock;                                /*   and spinlock */
85711 +    E4_uint64           dev_intop_cookie;                              /*   and next cookie to use */
85712 +
85713 +    spinlock_t          dev_flush_lock;                                /* spinlock for flushing */
85714 +    kcondvar_t          dev_flush_wait;                                /*  and place to sleep */
85715 +
85716 +    ELAN4_CQ           *dev_flush_cq[COMMAND_INSERTER_CACHE_ENTRIES];  /* command queues to flush the insert cache */
85717 +    ELAN4_INTOP          dev_flush_op[COMMAND_INSERTER_CACHE_ENTRIES]; /* and a main interrupt operation for each one */
85718 +    unsigned            dev_flush_finished;                            /* flush command finished */
85719 +
85720 +    ELAN4_HALTOP        dev_iflush_haltop;                             /* halt operation for icache flush */
85721 +    unsigned            dev_iflush_queued:1;                           /* icache haltop queued */
85722 +
85723 +    ELAN4_ROUTE_TABLE   *dev_routetable;                               /* virtual process table (for dma queue flush)*/
85724 +    sdramaddr_t          dev_sdrampages[2];                            /* pages of sdram to hold suspend code sequence */
85725 +    E4_Addr             dev_tproc_suspend;                             /*  st8suspend instruction */
85726 +    E4_Addr             dev_tproc_space;                               /*     and target memory */
85727 +
85728 +    sdramaddr_t                 dev_neterr_inputq;                             /* network error input queue descriptor & event */
85729 +    sdramaddr_t                 dev_neterr_slots;                              /* network error message slots */
85730 +    ELAN4_CQ           *dev_neterr_msgcq;                              /* command queue for sending messages */
85731 +    ELAN4_CQ           *dev_neterr_intcq;                              /* command queue for message received interrupt */
85732 +    ELAN4_INTOP                 dev_neterr_intop;                              /*   and it's main interrupt operation */
85733 +    E4_uint64           dev_neterr_queued;                             /* # message queued in msgcq */
85734 +    spinlock_t           dev_neterr_lock;                              /*   and spinlock .... */
85735 +
85736 +    ELAN4_DEV_STATS     dev_stats;                                     /* device statistics */
85737 +    E4_uint64           dev_sdramerrs[30];                             /* last few sdram errors for procfs */
85738 +
85739 +    spinlock_t          dev_error_routes_lock;
85740 +    unsigned int       *dev_ack_errors;                                /* Map of source of dproc ack errors */
85741 +    ELAN4_ROUTE_RINGBUF  dev_ack_error_routes;
85742 +    unsigned int        *dev_dproc_timeout;                             /* Ditto dproc timeout errors */
85743 +    ELAN4_ROUTE_RINGBUF  dev_dproc_timeout_routes;
85744 +    unsigned int        *dev_cproc_timeout;                             /* Ditto cproc timeout errors */
85745 +    ELAN4_ROUTE_RINGBUF  dev_cproc_timeout_routes;
85746 +
85747 +    struct list_head     dev_hc_list;                                   /* list of the allocated hash_chunks */
85748 +
85749 +    ELAN4_IPROC_TRAP     dev_iproc_trap;                               /* space for iproc trap */
85750 +} ELAN4_DEV;
85751 +
85752 +/* values for dev_state */
85753 +#define ELAN4_STATE_STOPPED            (1 << 0)                        /* device initialised but not started */
85754 +#define ELAN4_STATE_STARTING           (1 << 1)                        /* device in process of starting */
85755 +#define ELAN4_STATE_STARTED            (1 << 2)                        /* device started */
85756 +#define ELAN4_STATE_STOPPING           (1 << 3)                        /* device in process of stopping */
85757 +
85758 +/* values for dev_features */
85759 +#define ELAN4_FEATURE_NO_WRITE_COMBINE (1 << 0)                        /* don't allow write combinig at all */
85760 +#define ELAN4_FEATURE_PCI_MAP          (1 << 1)                        /* must use pci mapping functions */
85761 +#define ELAN4_FEATURE_NO_DWORD_READ    (1 << 2)                        /* must perform 64 bit PIO reads */
85762 +
85763 +extern __inline__ unsigned int
85764 +__elan4_readb (ELAN4_DEV *dev, ioaddr_t addr)
85765 +{
85766 +    if (dev->dev_features & ELAN4_FEATURE_NO_DWORD_READ)
85767 +    {
85768 +       uint64_t val = readq ((void *) ((unsigned long) addr & ~7));
85769 +       return ((val >> (((unsigned long) addr & 7) << 3)) & 0xff);
85770 +    }
85771 +    return readb (addr);
85772 +}
85773 +
85774 +extern __inline__ unsigned int
85775 +__elan4_readw (ELAN4_DEV *dev, ioaddr_t addr)
85776 +{
85777 +    if (dev->dev_features & ELAN4_FEATURE_NO_DWORD_READ)
85778 +    {
85779 +       uint64_t val = readq ((void *) ((unsigned long) addr & ~7));
85780 +       return ((val >> (((unsigned long) addr & 7) << 3)) & 0xffff);
85781 +    }
85782 +    return readw (addr);
85783 +}
85784 +
85785 +extern __inline__ unsigned int
85786 +__elan4_readl (ELAN4_DEV *dev, ioaddr_t addr)
85787 +{
85788 +    if (dev->dev_features & ELAN4_FEATURE_NO_DWORD_READ)
85789 +    {
85790 +       uint64_t val = readq ((void *) ((unsigned long) addr & ~7));
85791 +       return ((val >> (((unsigned long) addr & 7) << 3)) & 0xffffffff);
85792 +    }
85793 +    return readl (addr);
85794 +}
85795 +
85796 +/* macros for accessing dev->dev_regs.Tags. */
85797 +#define write_tag(dev,what,val)                writeq (val, dev->dev_regs + offsetof (E4_Registers, Tags.what))
85798 +#define read_tag(dev,what)             readq (dev->dev_regs + offsetof (E4_Registers, Tags.what))
85799 +
85800 +/* macros for accessing dev->dev_regs.Regs. */
85801 +#define write_reg64(dev,what,val)      writeq (val, dev->dev_regs + offsetof (E4_Registers, Regs.what))
85802 +#define write_reg32(dev,what,val)      writel (val, dev->dev_regs + offsetof (E4_Registers, Regs.what))
85803 +#define read_reg64(dev,what)           readq (dev->dev_regs + offsetof (E4_Registers, Regs.what))
85804 +#define read_reg32(dev,what)           __elan4_readl (dev, dev->dev_regs + offsetof (E4_Registers, Regs.what))
85805 +
85806 +/* macros for accessing dev->dev_regs.uRegs. */
85807 +#define write_ureg64(dev,what,val)     writeq (val, dev->dev_regs + offsetof (E4_Registers, uRegs.what))
85808 +#define write_ureg32(dev,what,val)     writel (val, dev->dev_regs + offsetof (E4_Registers, uRegs.what))
85809 +#define read_ureg64(dev,what)          readq (dev->dev_regs + offsetof (E4_Registers, uRegs.what))
85810 +#define read_ureg32(dev,what)          __elan4_readl (dev, dev->dev_regs + offsetof (E4_Registers, uRegs.what))
85811 +
85812 +/* macros for accessing dev->dev_i2c */
85813 +#define write_i2c(dev,what,val)                writeb (val, dev->dev_i2c + offsetof (E4_I2C, what))
85814 +#define read_i2c(dev,what)             __elan4_readb (dev, dev->dev_i2c + offsetof (E4_I2C, what))
85815 +
85816 +/* macros for accessing dev->dev_rom */
85817 +#define read_ebus_rom(dev,off)         __elan4_readb (dev, dev->dev_rom + off)
85818 +
85819 +/* PIO flush operations - ensure writes to registers/sdram are ordered */
85820 +#ifdef CONFIG_IA64_SGI_SN2
85821 +#define pioflush_reg(dev)              read_reg32(dev,InterruptReg)
85822 +#define pioflush_sdram(dev)            elan4_sdram_readl(dev, 0)
85823 +#else
85824 +#define pioflush_reg(dev)              mb()
85825 +#define pioflush_sdram(dev)            mb()
85826 +#endif
85827 +
85828 +/* macros for manipulating the interrupt mask register */
85829 +#define SET_INT_MASK(dev,value)        \
85830 +do { \
85831 +    write_reg32(dev, InterruptMask, (dev)->dev_intmask = (value)); \
85832 +    pioflush_reg(dev);\
85833 +} while (0)
85834 +
85835 +#define CHANGE_INT_MASK(dev, value) \
85836 +do { \
85837 +    if ((dev)->dev_intmask != (value)) \
85838 +    {\
85839 +       write_reg32 (dev, InterruptMask, (dev)->dev_intmask = (value));\
85840 +       pioflush_reg(dev);\
85841 +    }\
85842 +} while (0)
85843 +
85844 +#define ENABLE_INT_MASK(dev,value) \
85845 +do { \
85846 +    unsigned long flags; \
85847 + \
85848 +    spin_lock_irqsave (&(dev)->dev_intmask_lock, flags); \
85849 +    write_reg32(dev, InterruptMask, (dev)->dev_intmask |= (value)); \
85850 +    pioflush_reg(dev);\
85851 +    spin_unlock_irqrestore (&(dev)->dev_intmask_lock, flags); \
85852 +} while (0)
85853 +
85854 +#define DISABLE_INT_MASK(dev,value) \
85855 +do { \
85856 +    unsigned long flags; \
85857 + \
85858 +    spin_lock_irqsave (&(dev)->dev_intmask_lock, flags); \
85859 +    write_reg32(dev, InterruptMask, (dev)->dev_intmask &= ~(value)); \
85860 +    pioflush_reg(dev);\
85861 +    spin_unlock_irqrestore (&(dev)->dev_intmask_lock, flags); \
85862 +} while (0)
85863 +
85864 +#define SET_SYSCONTROL(dev,what,value) \
85865 +do { \
85866 +    unsigned long flags; \
85867 +\
85868 +    spin_lock_irqsave (&(dev)->dev_syscontrol_lock, flags); \
85869 +    if ((dev)->what++ == 0) \
85870 +        write_reg64 (dev, SysControlReg, (dev)->dev_syscontrol |= (value)); \
85871 +    pioflush_reg(dev);\
85872 +    spin_unlock_irqrestore (&(dev)->dev_syscontrol_lock, flags); \
85873 +} while (0)
85874 +
85875 +#define CLEAR_SYSCONTROL(dev,what,value) \
85876 +do { \
85877 +    unsigned long flags; \
85878 +\
85879 +    spin_lock_irqsave (&(dev)->dev_syscontrol_lock, flags); \
85880 +    if (--(dev)->what == 0)\
85881 +       write_reg64 (dev, SysControlReg, (dev)->dev_syscontrol &= ~(value)); \
85882 +    pioflush_reg (dev); \
85883 +    spin_unlock_irqrestore (&(dev)->dev_syscontrol_lock, flags); \
85884 +} while (0)
85885 +
85886 +#define PULSE_SYSCONTROL(dev,value) \
85887 +do { \
85888 +    unsigned long flags; \
85889 +\
85890 +    spin_lock_irqsave (&(dev)->dev_syscontrol_lock, flags); \
85891 +    write_reg64 (dev, SysControlReg, (dev)->dev_syscontrol | (value)); \
85892 +    pioflush_reg (dev); \
85893 +    spin_unlock_irqrestore (&(dev)->dev_syscontrol_lock, flags); \
85894 +} while (0)
85895 +
85896 +#define CHANGE_SYSCONTROL(dev,add,sub) \
85897 +do { \
85898 +    unsigned long flags; \
85899 +\
85900 +    spin_lock_irqsave (&(dev)->dev_syscontrol_lock, flags); \
85901 +    dev->dev_syscontrol |= (add);\
85902 +    dev->dev_syscontrol &= ~(sub);\
85903 +    write_reg64 (dev, SysControlReg, (dev)->dev_syscontrol);\
85904 +    pioflush_reg (dev); \
85905 +    spin_unlock_irqrestore (&(dev)->dev_syscontrol_lock, flags); \
85906 +} while (0)
85907 +
85908 +#define SET_SCHED_STATUS(dev, value)\
85909 +do {\
85910 +    write_reg32 (dev, SchedStatus.Status, (dev)->dev_schedstatus = (value));\
85911 +    pioflush_reg (dev);\
85912 +} while (0)
85913 +
85914 +#define CHANGE_SCHED_STATUS(dev, value)\
85915 +do {\
85916 +    if ((dev)->dev_schedstatus != (value))\
85917 +    {\
85918 +       write_reg32 (dev, SchedStatus.Status, (dev)->dev_schedstatus = (value));\
85919 +       pioflush_reg (dev);\
85920 +    }\
85921 +} while (0)
85922 +
85923 +#define PULSE_SCHED_RESTART(dev,value)\
85924 +do {\
85925 +    write_reg32 (dev, SchedStatus.Restart, value);\
85926 +    pioflush_reg (dev);\
85927 +} while (0)
85928 +
85929 +/* device context elan address space */
85930 +#define DEVICE_TPROC_SUSPEND_ADDR              (0x1000000000000000ull)
85931 +#define DEVICE_TPROC_SPACE_ADDR                        (0x1000000000000000ull + SDRAM_PAGE_SIZE)
85932 +#if defined(__LITTLE_ENDIAN__)
85933 +#  define DEVICE_TPROC_SUSPEND_INSTR           0xd3f040c0 /* st64suspend %r16, [%r1] */
85934 +#else
85935 +#  define DEVICE_TPROC_SUSPEND_INSTR           0xc040f0d3 /* st64suspend %r16, [%r1] */
85936 +#endif
85937 +
85938 +#define DEVICE_NETERR_INPUTQ_ADDR              (0x2000000000000000ull)
85939 +#define DEVICE_NETERR_INTCQ_ADDR               (0x2000000000000000ull + SDRAM_PAGE_SIZE)
85940 +#define DEVICE_NETERR_SLOTS_ADDR               (0x2000000000000000ull + SDRAM_PAGE_SIZE*2)
85941 +
85942 +/*
85943 + * Interrupt operation cookie space
85944 + * [50:48]     type
85945 + * [47:0]      value
85946 + */
85947 +#define INTOP_PERSISTENT                       (0x1000000000000ull)
85948 +#define INTOP_ONESHOT                          (0x2000000000000ull)
85949 +#define INTOP_TYPE_MASK                                (0x3000000000000ull)
85950 +#define INTOP_VALUE_MASK                       (0x0ffffffffffffull)
85951 +
85952 +/* functions for accessing sdram - sdram.c */
85953 +extern unsigned char      elan4_sdram_readb (ELAN4_DEV *dev, sdramaddr_t ptr);
85954 +extern unsigned short     elan4_sdram_readw (ELAN4_DEV *dev, sdramaddr_t ptr);
85955 +extern unsigned int       elan4_sdram_readl (ELAN4_DEV *dev, sdramaddr_t ptr);
85956 +extern unsigned long long elan4_sdram_readq (ELAN4_DEV *dev, sdramaddr_t ptr);
85957 +extern void               elan4_sdram_writeb (ELAN4_DEV *dev, sdramaddr_t ptr, unsigned char val);
85958 +extern void               elan4_sdram_writew (ELAN4_DEV *dev, sdramaddr_t ptr, unsigned short val);
85959 +extern void               elan4_sdram_writel (ELAN4_DEV *dev, sdramaddr_t ptr, unsigned int val);
85960 +extern void               elan4_sdram_writeq (ELAN4_DEV *dev, sdramaddr_t ptr, unsigned long long val);
85961 +
85962 +extern void              elan4_sdram_zerob_sdram (ELAN4_DEV *dev, sdramaddr_t ptr, int nbytes);
85963 +extern void              elan4_sdram_zerow_sdram (ELAN4_DEV *dev, sdramaddr_t ptr, int nbytes);
85964 +extern void              elan4_sdram_zerol_sdram (ELAN4_DEV *dev, sdramaddr_t ptr, int nbytes);
85965 +extern void              elan4_sdram_zeroq_sdram (ELAN4_DEV *dev, sdramaddr_t ptr, int nbytes);
85966 +
85967 +extern void               elan4_sdram_copyb_from_sdram (ELAN4_DEV *dev, sdramaddr_t from, void *to, int nbytes);
85968 +extern void               elan4_sdram_copyw_from_sdram (ELAN4_DEV *dev, sdramaddr_t from, void *to, int nbytes);
85969 +extern void               elan4_sdram_copyl_from_sdram (ELAN4_DEV *dev, sdramaddr_t from, void *to, int nbytes);
85970 +extern void               elan4_sdram_copyq_from_sdram (ELAN4_DEV *dev, sdramaddr_t from, void *to, int nbytes);
85971 +extern void               elan4_sdram_copyb_to_sdram (ELAN4_DEV *dev, void *from, sdramaddr_t to, int nbytes);
85972 +extern void               elan4_sdram_copyw_to_sdram (ELAN4_DEV *dev, void *from, sdramaddr_t to, int nbytes);
85973 +extern void               elan4_sdram_copyl_to_sdram (ELAN4_DEV *dev, void *from, sdramaddr_t to, int nbytes);
85974 +extern void               elan4_sdram_copyq_to_sdram (ELAN4_DEV *dev, void *from, sdramaddr_t to, int nbytes);
85975 +
85976 +/* device.c - configuration */
85977 +extern unsigned int elan4_hash_0_size_val;
85978 +extern unsigned int elan4_hash_1_size_val;
85979 +extern unsigned int elan4_ctxt_table_shift;
85980 +extern unsigned int elan4_ln2_max_cqs;
85981 +extern unsigned int elan4_dmaq_highpri_size;
85982 +extern unsigned int elan4_threadq_highpri_size;
85983 +extern unsigned int elan4_dmaq_lowpri_size;
85984 +extern unsigned int elan4_threadq_lowpri_size;
85985 +extern unsigned int elan4_interruptq_size;
85986 +
85987 +extern unsigned int elan4_mainint_punt_loops;
85988 +extern unsigned int elan4_mainint_resched_ticks;
85989 +
85990 +
85991 +/* device.c */
85992 +extern void               elan4_set_schedstatus (ELAN4_DEV *dev, E4_uint32 intreg);
85993 +extern void               elan4_queue_haltop (ELAN4_DEV *dev, ELAN4_HALTOP *op);
85994 +extern void              elan4_queue_intop (ELAN4_DEV *dev, ELAN4_CQ *cq, ELAN4_INTOP *op);
85995 +extern void              elan4_register_intop (ELAN4_DEV *dev, ELAN4_INTOP *op);
85996 +extern void              elan4_deregister_intop (ELAN4_DEV *dev, ELAN4_INTOP *op);
85997 +extern void              elan4_queue_dma_flushop (ELAN4_DEV *dev, ELAN4_DMA_FLUSHOP *op, int hipri);
85998 +extern void              elan4_queue_mainintop (ELAN4_DEV *dev, ELAN4_INTOP *op);
85999 +
86000 +extern int                elan4_1msi0 (ELAN4_DEV *dev);
86001 +
86002 +extern int                elan4_insertctxt (ELAN4_DEV *dev, ELAN4_CTXT *ctxt, ELAN4_TRAP_OPS *ops);
86003 +extern void               elan4_removectxt (ELAN4_DEV *dev, ELAN4_CTXT *ctxt);
86004 +extern ELAN4_CTXT        *elan4_localctxt (ELAN4_DEV *dev, unsigned num);
86005 +extern ELAN4_CTXT        *elan4_networkctxt (ELAN4_DEV *dev, unsigned num);
86006 +
86007 +extern int                elan4_attach_filter (ELAN4_CTXT *ctxt, unsigned int ctxnum);
86008 +extern void               elan4_detach_filter (ELAN4_CTXT *ctxt, unsigned int ctxnum);
86009 +extern void              elan4_set_filter (ELAN4_CTXT *ctxt, unsigned int ctxnum, E4_uint32 state);
86010 +extern void              elan4_set_routetable (ELAN4_CTXT *ctxt, ELAN4_ROUTE_TABLE *tbl);
86011 +
86012 +extern ELAN4_CQA *        elan4_getcqa (ELAN4_CTXT *ctxt, unsigned int idx);
86013 +extern void               elan4_putcqa (ELAN4_CTXT *ctxt, unsigned int idx);
86014 +extern ELAN4_CQ          *elan4_alloccq (ELAN4_CTXT *ctxt, unsigned cqsize, unsigned cqperm, unsigned cqtype);
86015 +extern void               elan4_freecq (ELAN4_CTXT *ctxt, ELAN4_CQ *cq);
86016 +extern void               elan4_restartcq (ELAN4_DEV *dev, ELAN4_CQ *cq);
86017 +extern void               elan4_flushcq (ELAN4_DEV *dev, ELAN4_CQ *cq);
86018 +extern void               elan4_updatecq (ELAN4_DEV *dev, ELAN4_CQ *cq, unsigned perm, unsigned restart);
86019 +
86020 +extern void              elan4_flush_icache (ELAN4_CTXT *ctxt);
86021 +extern void              elan4_flush_icache_halted (ELAN4_CTXT *ctxt);
86022 +
86023 +extern int                elan4_initialise_device (ELAN4_DEV *dev);
86024 +extern void               elan4_finalise_device (ELAN4_DEV *dev);
86025 +extern int                elan4_start_device (ELAN4_DEV *dev);
86026 +extern void               elan4_stop_device (ELAN4_DEV *dev);
86027 +
86028 +extern int               elan4_compute_position (ELAN_POSITION *pos, unsigned nodeid, unsigned numnodes, unsigned aritiyval);
86029 +extern int               elan4_get_position (ELAN4_DEV *dev, ELAN_POSITION *pos);
86030 +extern int               elan4_set_position (ELAN4_DEV *dev, ELAN_POSITION *pos);
86031 +extern void              elan4_get_params   (ELAN4_DEV *dev, ELAN_PARAMS *params, unsigned short *mask);
86032 +extern void              elan4_set_params   (ELAN4_DEV *dev, ELAN_PARAMS *params, unsigned short mask);
86033 +
86034 +
86035 +extern int                elan4_read_vpd(ELAN4_DEV *dev, unsigned char *tag, unsigned char *result) ;
86036 +
86037 +
86038 +/* device_osdep.c */
86039 +extern unsigned int      elan4_pll_cfg;
86040 +extern int               elan4_pll_div;
86041 +extern int               elan4_mod45disable;
86042 +
86043 +extern int                elan4_pciinit (ELAN4_DEV *dev);
86044 +extern void               elan4_pcifini (ELAN4_DEV *dev);
86045 +extern void               elan4_pcierror (ELAN4_DEV *dev);
86046 +
86047 +extern ELAN4_DEV        *elan4_reference_device (int instance, int state);
86048 +extern void              elan4_dereference_device (ELAN4_DEV *dev);
86049 +
86050 +extern ioaddr_t           elan4_map_device (ELAN4_DEV *dev, unsigned bar, unsigned off, unsigned size, ELAN4_MAP_HANDLE *handlep);
86051 +extern void               elan4_unmap_device (ELAN4_DEV *dev, ioaddr_t ptr, unsigned size, ELAN4_MAP_HANDLE *handlep);
86052 +extern unsigned long      elan4_resource_len (ELAN4_DEV *dev, unsigned bar);
86053 +
86054 +extern void               elan4_configure_mtrr (ELAN4_DEV *dev);
86055 +extern void              elan4_unconfigure_mtrr (ELAN4_DEV *dev);
86056 +
86057 +/* i2c.c */
86058 +extern int               i2c_disable_auto_led_update (ELAN4_DEV *dev);
86059 +extern void              i2c_enable_auto_led_update (ELAN4_DEV *dev);
86060 +extern int               i2c_write (ELAN4_DEV *dev, unsigned int addr, unsigned int count, unsigned char *data);
86061 +extern int               i2c_read (ELAN4_DEV *dev, unsigned int addr, unsigned int count, unsigned char *data);
86062 +extern int               i2c_writereg (ELAN4_DEV *dev, unsigned int addr, unsigned int reg, unsigned int count, unsigned char *data);
86063 +extern int               i2c_readreg (ELAN4_DEV *dev, unsigned int addr, unsigned int reg, unsigned int count, unsigned char *data);
86064 +extern int               i2c_read_rom (ELAN4_DEV *dev, unsigned int addr, unsigned int count, unsigned char *data);
86065 +
86066 +#if defined(__linux__)
86067 +/* procfs_Linux.c */
86068 +extern void              elan4_procfs_device_init (ELAN4_DEV *dev);
86069 +extern void              elan4_procfs_device_fini (ELAN4_DEV *dev);
86070 +extern void              elan4_procfs_init(void);
86071 +extern void              elan4_procfs_fini(void);
86072 +
86073 +extern struct proc_dir_entry *elan4_procfs_root;
86074 +extern struct proc_dir_entry *elan4_config_root;
86075 +#endif
86076 +
86077 +/* sdram.c */
86078 +extern void              elan4_sdram_init (ELAN4_DEV *dev);
86079 +extern void               elan4_sdram_fini (ELAN4_DEV *dev);
86080 +extern void               elan4_sdram_setup_delay_lines (ELAN4_DEV *dev);
86081 +extern int                elan4_sdram_init_bank (ELAN4_DEV *dev, ELAN4_SDRAM_BANK *bank);
86082 +extern void               elan4_sdram_fini_bank (ELAN4_DEV *dev, ELAN4_SDRAM_BANK *bank);
86083 +extern void              elan4_sdram_add_bank (ELAN4_DEV *dev, ELAN4_SDRAM_BANK *bank);
86084 +extern sdramaddr_t        elan4_sdram_alloc (ELAN4_DEV *dev, int nbytes);
86085 +extern void               elan4_sdram_free (ELAN4_DEV *dev, sdramaddr_t ptr, int nbytes);
86086 +extern void               elan4_sdram_flushcache (ELAN4_DEV *dev, sdramaddr_t base, int nbytes);
86087 +extern char              *elan4_sdramerr2str (ELAN4_DEV *dev, E4_uint64 status, char *str);
86088 +
86089 +/* traps.c */
86090 +extern void               elan4_display_eproc_trap (void *type, int mode, char *str, ELAN4_EPROC_TRAP *trap);
86091 +extern void               elan4_display_cproc_trap (void *type, int mode, char *str, ELAN4_CPROC_TRAP *trap);
86092 +extern void               elan4_display_dproc_trap (void *type, int mode, char *str, ELAN4_DPROC_TRAP *trap);
86093 +extern void               elan4_display_tproc_trap (void *type, int mode, char *str, ELAN4_TPROC_TRAP *trap);
86094 +extern void               elan4_display_iproc_trap (void *type, int mode, char *str, ELAN4_IPROC_TRAP *trap);
86095 +
86096 +
86097 +extern void               elan4_extract_eproc_trap (ELAN4_DEV *dev, E4_uint64 status, ELAN4_EPROC_TRAP *trap, int iswaitevent);
86098 +extern void               elan4_extract_cproc_trap (ELAN4_DEV *dev, E4_uint64 status, ELAN4_CPROC_TRAP *trap, unsigned cqnum);
86099 +extern void               elan4_extract_dproc_trap (ELAN4_DEV *dev, E4_uint64 status, ELAN4_DPROC_TRAP *trap, unsigned unit);
86100 +extern void               elan4_extract_tproc_trap (ELAN4_DEV *dev, E4_uint64 status, ELAN4_TPROC_TRAP *trap);
86101 +extern void               elan4_extract_iproc_trap (ELAN4_DEV *dev, E4_uint64 status, ELAN4_IPROC_TRAP *trap, unsigned unit);
86102 +extern void elan4_ringbuf_store(ELAN4_ROUTE_RINGBUF *ringbuf, E4_VirtualProcessEntry *route, ELAN4_DEV *dev);
86103 +extern int                cproc_open_extract_vp (ELAN4_DEV *dev, ELAN4_CQ *cq);
86104 +
86105 +extern void               elan4_inspect_iproc_trap (ELAN4_IPROC_TRAP *trap);
86106 +extern E4_uint64          elan4_trapped_open_command (ELAN4_DEV *dev, ELAN4_CQ *cq);
86107 +
86108 +/* mmu.c */
86109 +extern void               elan4mmu_flush_tlb (ELAN4_DEV *dev);
86110 +extern ELAN4_HASH_ENTRY  *elan4mmu_ptealloc (ELAN4_CTXT *ctxt, int tbl, E4_Addr vaddr, unsigned int *tagidxp);
86111 +extern int                elan4mmu_pteload (ELAN4_CTXT *ctxt, int tbl, E4_Addr vaddr, E4_uint64 pte);
86112 +extern void               elan4mmu_unload_range (ELAN4_CTXT *ctxt, int tbl, E4_Addr start, unsigned long len);
86113 +extern void               elan4mmu_invalidate_ctxt (ELAN4_CTXT *ctxt);
86114 +
86115 +extern ELAN4_HASH_CACHE  *elan4mmu_reserve (ELAN4_CTXT *ctxt, int tbl, E4_Addr start, unsigned int npages, int cansleep);
86116 +extern void               elan4mmu_release (ELAN4_CTXT *ctxt, ELAN4_HASH_CACHE *hc);
86117 +extern void               elan4mmu_set_pte (ELAN4_CTXT *ctxt, ELAN4_HASH_CACHE *hc, unsigned int idx, E4_uint64 newpte);
86118 +extern E4_uint64          elan4mmu_get_pte (ELAN4_CTXT *ctxt, ELAN4_HASH_CACHE *hc, unsigned int idx);
86119 +extern void               elan4mmu_clear_pte (ELAN4_CTXT *ctxt, ELAN4_HASH_CACHE *hc, unsigned int idx);
86120 +
86121 +/* mmu_osdep.c */
86122 +extern int               elan4mmu_categorise_paddr (ELAN4_DEV *dev, physaddr_t *physp);
86123 +extern int                elan4mmu_alloc_topaddr (ELAN4_DEV *dev, physaddr_t paddr, unsigned type);
86124 +extern E4_uint64          elan4mmu_phys2pte (ELAN4_DEV *dev, physaddr_t paddr, unsigned perm);
86125 +extern physaddr_t        elan4mmu_pte2phys (ELAN4_DEV *dev, E4_uint64 pte);
86126 +
86127 +/* neterr.c */
86128 +extern int                elan4_neterr_init (ELAN4_DEV *dev);
86129 +extern void               elan4_neterr_destroy (ELAN4_DEV *dev);
86130 +extern int                elan4_neterr_sendmsg (ELAN4_DEV *dev, unsigned int nodeid, unsigned int retries, ELAN4_NETERR_MSG *msg);
86131 +extern int                elan4_neterr_iproc_trap (ELAN4_DEV *dev, ELAN4_IPROC_TRAP *trap);
86132 +
86133 +/* routetable.c */
86134 +extern ELAN4_ROUTE_TABLE *elan4_alloc_routetable (ELAN4_DEV *dev, unsigned size);
86135 +extern void               elan4_free_routetable (ELAN4_DEV *dev, ELAN4_ROUTE_TABLE *tbl);
86136 +extern void               elan4_write_route (ELAN4_DEV *dev, ELAN4_ROUTE_TABLE *tbl, unsigned vp, E4_VirtualProcessEntry *entry);
86137 +extern void               elan4_read_route (ELAN4_DEV *dev, ELAN4_ROUTE_TABLE *tbl, unsigned vp, E4_VirtualProcessEntry *entry);
86138 +extern void               elan4_invalidate_route (ELAN4_DEV *dev, ELAN4_ROUTE_TABLE *tbl, unsigned vp);
86139 +extern int                elan4_generate_route (ELAN_POSITION *pos, E4_VirtualProcessEntry *route, unsigned ctxnum,
86140 +                                               unsigned lowid, unsigned highid, unsigned options);
86141 +extern int               elan4_check_route (ELAN_POSITION *pos, ELAN_LOCATION location, E4_VirtualProcessEntry *route, unsigned flags);
86142 +
86143 +/* user.c */
86144 +extern int        __categorise_command (E4_uint64 command, int *cmdSize);
86145 +extern int        __whole_command (sdramaddr_t *commandPtr, sdramaddr_t insertPtr, unsigned int cqSize, unsigned int cmdSize);
86146 +
86147 +/*
86148 + * Local variables:
86149 + * c-file-style: "stroustrup"
86150 + * End:
86151 + */
86152 +#endif /* __ELAN4_ELANDEV_H */
86153 Index: linux-2.4.21/include/elan4/device_Linux.h
86154 ===================================================================
86155 --- linux-2.4.21.orig/include/elan4/device_Linux.h      2004-02-23 16:02:56.000000000 -0500
86156 +++ linux-2.4.21/include/elan4/device_Linux.h   2005-06-01 23:12:54.735418432 -0400
86157 @@ -0,0 +1,97 @@
86158 +/*
86159 + *    Copyright (c) 2001-2002 by Quadrics Supercomputers World Ltd.
86160 + *    Copyright (c) 2002-2003 by Quadrics Ltd.
86161 + * 
86162 + *    For licensing information please see the supplied COPYING file
86163 + *
86164 + */
86165 +
86166 +#ifndef __ELAN4_ELANDEV_LINUX_H
86167 +#define __ELAN4_ELANDEV_LINUX_H
86168 +
86169 +#ident "$Id: device_Linux.h,v 1.19 2004/08/09 14:02:37 daniel Exp $"
86170 +/*      $Source: /cvs/master/quadrics/elan4mod/device_Linux.h,v $*/
86171 +
86172 +#include <linux/coproc.h>
86173 +
86174 +#if defined(MPSAS)
86175 +#include <elan4/mpsas.h>
86176 +#endif
86177 +
86178 +#if defined(CONFIG_DEVFS_FS)
86179 +#include <linux/devfs_fs_kernel.h>
86180 +#endif
86181 +
86182 +#define ELAN4_MAJOR              61
86183 +#define ELAN4_NAME               "elan4"
86184 +#define ELAN4_MAX_CONTROLLER     16           /* limited to 4 bits */
86185
86186 +/* OS dependant component of ELAN4_DEV struct */
86187 +typedef struct elan4_dev_osdep
86188 +{
86189 +    struct pci_dev       *pdev;                        /* PCI config data */
86190 +
86191 +    struct proc_dir_entry *procdir;
86192 +    struct proc_dir_entry *configdir;
86193 +    struct proc_dir_entry *statsdir;
86194 +
86195 +#if defined(CONFIG_DEVFS_FS)
86196 +    devfs_handle_t devfs_control;
86197 +    devfs_handle_t devfs_sdram;
86198 +    devfs_handle_t devfs_user;
86199 +#endif
86200 +
86201 +#if defined(CONFIG_MTRR)
86202 +    int                           sdram_mtrr;
86203 +    int                           regs_mtrr;
86204 +#endif
86205 +} ELAN4_DEV_OSDEP;
86206 +
86207 +/* /dev/elan/rmsX */
86208 +
86209 +/* /dev/elan4/controlX */
86210 +typedef struct control_private
86211 +{
86212 +    struct elan4_dev   *pr_dev;
86213 +    unsigned           pr_boundary_scan;
86214 +} CONTROL_PRIVATE;
86215 +
86216 +/* /dev/elan4/sdramX */
86217 +typedef struct mem_page
86218 +{
86219 +    struct mem_page *pg_next;
86220 +    sdramaddr_t      pg_addr;
86221 +    unsigned long    pg_pgoff;
86222 +    unsigned        pg_ref;
86223 +} MEM_PAGE;
86224 +
86225 +#define MEM_HASH_SIZE  32
86226 +#define MEM_HASH(pgoff)        ((pgoff) & (MEM_HASH_SIZE-1))
86227 +
86228 +typedef struct mem_private
86229 +{
86230 +    struct elan4_dev *pr_dev;
86231 +    MEM_PAGE         *pr_pages[MEM_HASH_SIZE];
86232 +    spinlock_t        pr_lock;
86233 +} MEM_PRIVATE;
86234 +
86235 +/* /dev/elan4/userX */
86236 +typedef struct user_private
86237 +{
86238 +    atomic_t         pr_ref;
86239 +    struct user_ctxt *pr_uctx;
86240 +    struct mm_struct *pr_mm;
86241 +    coproc_ops_t      pr_coproc;
86242 +} USER_PRIVATE;
86243 +
86244 +/* No mapping handles on linux */
86245 +typedef void *ELAN4_MAP_HANDLE;
86246 +
86247 +#define ELAN4_TASK_HANDLE()    ((unsigned long) current->mm)
86248 +
86249 +/*
86250 + * Local variables:
86251 + * c-file-style: "stroustrup"
86252 + * End:
86253 + */
86254 +#endif /* __ELAN4_ELANDEV_LINUX_H */
86255 Index: linux-2.4.21/include/elan4/dma.h
86256 ===================================================================
86257 --- linux-2.4.21.orig/include/elan4/dma.h       2004-02-23 16:02:56.000000000 -0500
86258 +++ linux-2.4.21/include/elan4/dma.h    2005-06-01 23:12:54.736418280 -0400
86259 @@ -0,0 +1,82 @@
86260 +/*
86261 + *    Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd.
86262 + *    Copyright (c) 2002-2003 by Quadrics Ltd.
86263 + *
86264 + *    For licensing information please see the supplied COPYING file
86265 + *
86266 + */
86267 +
86268 +#ifndef __ELAN4_DMA_H
86269 +#define __ELAN4_DMA_H
86270 +
86271 +#ident "$Id: dma.h,v 1.16 2003/09/04 12:39:17 david Exp $"
86272 +/*      $Source: /cvs/master/quadrics/elan4hdr/dma.h,v $*/
86273 +
86274 +#include <elan4/types.h>
86275 +
86276 +/* Alignment for a DMA descriptor */
86277 +#define E4_DMA_ALIGN           (64)
86278 +
86279 +/* Maximum size of a single DMA ((1 << 31)-1) */
86280 +#define E4_MAX_DMA_SIZE                (0x7fffffff)
86281 +
86282 +/* 
86283 + * dma_typeSize
86284 + *
86285 + * [63:32]     Size
86286 + * [31]                unused
86287 + * [30]                IsRemote
86288 + * [29]                QueueWrite
86289 + * [28]                ShmemWrite
86290 + * [27:26]     DataType
86291 + * [25]                Broadcast
86292 + * [24]                AlignPackets
86293 + * [23:16]     FailCount
86294 + * [15:14]     unused
86295 + * [13:0]      Context
86296 + */
86297 +
86298 +#define DMA_FailCount(val)     (((val) & 0xff) << 16)
86299 +#define DMA_AlignPackets       (1 << 24)
86300 +#define DMA_Broadcast          (1 << 25)
86301 +#define DMA_ShMemWrite         (1 << 28)
86302 +#define DMA_QueueWrite         (1 << 29)
86303 +#define DMA_IsRemote           (1 << 30)
86304 +#define DMA_Context(val)       ((unsigned) (val) & 0x3ff)
86305 +#define DMA_ContextMask                0x3fffull
86306 +#define Dma_TypeSizeMask       0xfffffffffff00000ull
86307 +
86308 +#define DMA_DataTypeByte       (E4_DATATYPE_BYTE  << 26)
86309 +#define DMA_DataTypeShort      (E4_DATATYPE_SHORT << 26)
86310 +#define DMA_DataTypeWord       (E4_DATATYPE_WORD  << 26)
86311 +#define DMA_DataTypeLong       (E4_DATATYPE_DWORD << 26)
86312 +
86313 +#define E4_DMA_TYPE_SIZE(size, dataType, flags, failCount)     \
86314 +    ((((E4_uint64)(size)) << 32) |  ((dataType) & DMA_DataTypeLong) | \
86315 +     (flags) | DMA_FailCount(failCount))
86316 +
86317 +typedef volatile struct e4_dma
86318 +{
86319 +    E4_uint64          dma_typeSize;
86320 +    E4_uint64          dma_cookie;
86321 +    E4_uint64          dma_vproc;
86322 +    E4_Addr            dma_srcAddr;
86323 +    E4_Addr            dma_dstAddr;
86324 +    E4_Addr            dma_srcEvent;
86325 +    E4_Addr            dma_dstEvent;
86326 +} E4_DMA;
86327 +
86328 +/* Same as above but padded to 64-bytes */
86329 +typedef volatile struct e4_dma64
86330 +{
86331 +    E4_uint64          dma_typeSize;
86332 +    E4_uint64          dma_cookie;
86333 +    E4_uint64          dma_vproc;
86334 +    E4_Addr            dma_srcAddr;
86335 +    E4_Addr            dma_dstAddr;
86336 +    E4_Addr            dma_srcEvent;
86337 +    E4_Addr            dma_dstEvent;
86338 +    E4_Addr            dma_pad;
86339 +} E4_DMA64;
86340 +
86341 +#endif /* __ELAN4_DMA_H */
86342 Index: linux-2.4.21/include/elan4/events.h
86343 ===================================================================
86344 --- linux-2.4.21.orig/include/elan4/events.h    2004-02-23 16:02:56.000000000 -0500
86345 +++ linux-2.4.21/include/elan4/events.h 2005-06-01 23:12:54.736418280 -0400
86346 @@ -0,0 +1,179 @@
86347 +/*
86348 + *    Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd.
86349 + *    Copyright (c) 2002-2003 by Quadrics Ltd.
86350 + *
86351 + *    For licensing information please see the supplied COPYING file
86352 + *
86353 + */
86354 +
86355 +#ifndef __ELAN4_EVENTS_H
86356 +#define __ELAN4_EVENTS_H
86357 +
86358 +#ident "$Id: events.h,v 1.22 2004/06/23 11:07:18 addy Exp $"
86359 +/*      $Source: /cvs/master/quadrics/elan4hdr/events.h,v $*/
86360 +
86361 +#define E4_EVENT_ALIGN         32
86362 +#define E4_EVENTBLOCK_SIZE     64
86363 +
86364 +#ifndef _ASM
86365 +/*
86366 + * Event locations must be aligned to a 32 byte boundary. It is very much more efficent to place
86367 + * them in elan local memory but is not essential.
86368 + */
86369 +typedef struct _E4_Event
86370 +{
86371 +    volatile E4_uint64 ev_CountAndType;
86372 +    E4_uint64          ev_Params[2];
86373 +} E4_Event;
86374 +
86375 +/* Same as above but padded to correct Event alignment */
86376 +typedef struct _E4_Event32
86377 +{
86378 +    volatile E4_uint64 ev_CountAndType;
86379 +    E4_uint64          ev_Params[2];
86380 +    E4_uint64          ev_pad;
86381 +} E4_Event32;
86382 +
86383 +/*
86384 + * An E4_EVENTBLOCK_SIZE aligned block of Main or Elan memory
86385 + */
86386 +typedef union _E4_Event_Blk
86387 +{
86388 +    /* Padded to 64-bytes in case a cache-line write is more efficient */
86389 +    volatile E4_uint8  eb_unit8[E4_EVENTBLOCK_SIZE];
86390 +    volatile E4_uint32 eb_uint32[E4_EVENTBLOCK_SIZE/sizeof(E4_uint32)];
86391 +    volatile E4_uint64 eb_uint64[E4_EVENTBLOCK_SIZE/sizeof(E4_uint64)];
86392 +} E4_Event_Blk;
86393 +#define eb_done       eb_uint32[14]
86394 +#define eb_done_dword eb_uint64[7]
86395 +
86396 +#endif /* ! _ASM */
86397 +
86398 +/*
86399 + * ev_CountAndType
86400 + *  [63:31]   Count
86401 + *  [10]      CopyType
86402 + *  [9:8]     DataType
86403 + *  [7:0]     CopySize
86404 + */
86405 +#define E4_EVENT_TYPE_MASK     0x00000000ffffffffull
86406 +#define E4_EVENT_COUNT_MASK    0xffffffff00000000ull
86407 +#define E4_EVENT_COUNT_SHIFT   32
86408 +#define E4_EVENT_COPY_TYPE_MASK        (1 << 10)
86409 +#define E4_EVENT_DATA_TYPE_MASK        (3 << 8)
86410 +#define E4_EVENT_COPY_SIZE_MASK        (0xff)
86411 +
86412 +/* CopyType */
86413 +#define E4_EVENT_COPY          (0 << 10)
86414 +#define E4_EVENT_WRITE         (1 << 10)
86415 +
86416 +/* DataType */
86417 +#define E4_EVENT_DTYPE_BYTE    (0 << 8)
86418 +#define E4_EVENT_DTYPE_SHORT   (1 << 8)
86419 +#define E4_EVENT_DTYPE_WORD    (2 << 8)
86420 +#define E4_EVENT_DTYPE_LONG    (3 << 8)
86421 +
86422 +#define EVENT_COUNT(EventPtr)  ((E4_int32)(elan4_load64 (&(EventPtr)->ev_CountAndType) >> E4_EVENT_COUNT_SHIFT))
86423 +#define EVENT_TYPE(EventPtr)   ((E4_uint32)(elan4_load64 (&(EventPtr)->ev_CountAndType) & E4_EVENT_TYPE_MASK))
86424 +
86425 +#define E4_WAITEVENT_COUNT_TYPE_VALUE(Count, EventType, DataType, CopySize) \
86426 +       (((E4_uint64)(Count) << E4_EVENT_COUNT_SHIFT) | (EventType) | (DataType) | (CopySize))
86427 +
86428 +#define E4_EVENT_TYPE_VALUE(EventType, DataType, CopySize)     \
86429 +       ((EventType) | (DataType) | (CopySize))
86430 +
86431 +#define E4_EVENT_INIT_VALUE(InitialCount, EventType, DataType, CopySize)       \
86432 +       (((E4_uint64)(InitialCount) << E4_EVENT_COUNT_SHIFT) | E4_EVENT_TYPE_VALUE(EventType, DataType, CopySize))
86433 +
86434 +#define ev_CopySource  ev_Params[0]
86435 +#define ev_CopyDest    ev_Params[1]
86436 +#define ev_WritePtr    ev_Params[0]
86437 +#define ev_WriteValue  ev_Params[1]
86438 +
86439 +#define EVENT_BLK_READY(BLK) ((BLK)->eb_done != 0)
86440 +#define EVENT_READY(EVENT)   ((E4_uint32)((((volatile E4_Event *) (EVENT))->ev_CountAndType) >> E4_EVENT_COUNT_SHIFT) >= 0)
86441 +
86442 +#define ELAN_WAIT_EVENT (0)
86443 +#define ELAN_POLL_EVENT (-1)
86444 +
86445 +#define E4_BLK_PATTERN ((E4_uint32)0xfeedface)
86446 +
86447 +#define E4_INIT_COPY_EVENT(EVENT, BLK_ELAN, BLK, SIZE)                                                         \
86448 +       do {                                                                                            \
86449 +          elan4_store64 (E4_EVENT_INIT_VALUE(0, E4_EVENT_COPY, E4_EVENT_DTYPE_LONG, SIZE), &(EVENT)->ev_CountAndType); \
86450 +           elan4_store64 ((BLK_ELAN), &(EVENT)->ev_CopySource); \
86451 +          elan4_store64 ((BLK), &(EVENT)->ev_CopyDest); \
86452 +       } while (0)
86453 +
86454 +#define E4_INIT_WRITE_EVENT(EVENT, DWORD)                                                              \
86455 +       do {                                                                                            \
86456 +           elan4_store64 (E4_EVENT_INIT_VALUE(0, E4_EVENT_WRITE, E4_EVENT_DTYPE_LONG, 0), &(EVENT)->ev_CountAndType);  \
86457 +           elan4_store64 ((DWORD), &(EVENT)->ev_WritePtr); \
86458 +           elan4_store64 ((E4_Addr) (E4_BLK_PATTERN), &(EVENT)->ev_WriteValue); \
86459 +       } while (0)
86460 +
86461 +#define E4_RESET_BLK_EVENT(BLK)                                        \
86462 +       do {                                                            \
86463 +               (BLK)->eb_done = (0);                                   \
86464 +       } while (0)
86465 +
86466 +#define E4_PRIME_BLK_EVENT(EVENT, COUNT)                               \
86467 +       do {                                                            \
86468 +          elan4_store64 (E4_EVENT_INIT_VALUE(COUNT, E4_EVENT_COPY, E4_EVENT_DTYPE_LONG, 8), &(EVENT)->ev_CountAndType);\
86469 +       } while (0)
86470 +
86471 +#define E4_PRIME_COPY_EVENT(EVENT, SIZE, COUNT)                                \
86472 +       do {                                                            \
86473 +          elan4_store64 (E4_EVENT_INIT_VALUE(COUNT, E4_EVENT_COPY, E4_EVENT_DTYPE_LONG, (SIZE >> 3)), &(EVENT)->ev_CountAndType);\
86474 +       } while (0)
86475 +
86476 +#define E4_PRIME_WRITE_EVENT(EVENT, COUNT)                                     \
86477 +       do {                                                                    \
86478 +          elan4_store64 (E4_EVENT_INIT_VALUE(COUNT, E4_EVENT_WRITE, E4_EVENT_DTYPE_LONG, 0), &(EVENT)->ev_CountAndType);\
86479 +       } while (0)
86480 +
86481 +#ifndef _ASM
86482 +
86483 +#define E4_INPUTQ_ALIGN                        32      /* Descriptor must be 32-byte aligned */
86484 +
86485 +typedef struct _E4_InputQueue
86486 +{
86487 +   volatile E4_Addr    q_bptr;         /* 64 bit aligned ptr to current back item */
86488 +   E4_Addr             q_fptr;         /* 64 bit aligned ptr to current front item */
86489 +   E4_uint64           q_control;      /* this defines the last item, item size, and offset back to the first item. */
86490 +   E4_Addr             q_event;        /* queue event */
86491 +} E4_InputQueue;
86492 +
86493 +#define E4_INPUTQ_LASTITEM_MASK        0x00000000ffffffffULL
86494 +#define E4_INPUTQ_ITEMSIZE_MASK                0x000000ff00000000ULL
86495 +#define E4_INPUTQ_LASTITEM_OFFSET_MASK 0xffffff0000000000ULL
86496 +#define E4_INPUTQ_LASTITEM_SHIFT       0
86497 +#define E4_INPUTQ_ITEMSIZE_SHIFT       32
86498 +#define E4_INPUTQ_LASTITEM_OFFSET_SHIFT        40
86499 +
86500 +/*
86501 + * Macro to initialise the InputQueue control word given the FirstItem, LastItem & ItemSize
86502 + * FirstItem and LastItem are 64 bit double word aligned elan addresses.
86503 + */
86504 +#define E4_InputQueueControl(FirstItem, LastItem, ItemSizeInBytes)\
86505 +   (((((E4_uint64)(LastItem)))                                                      & E4_INPUTQ_LASTITEM_MASK) |\
86506 +    ((((E4_uint64)(ItemSizeInBytes))        << (E4_INPUTQ_ITEMSIZE_SHIFT-3))        & E4_INPUTQ_ITEMSIZE_MASK)  |\
86507 +    ((((E4_uint64)((FirstItem)-(LastItem))) << (E4_INPUTQ_LASTITEM_OFFSET_SHIFT-3)) & E4_INPUTQ_LASTITEM_OFFSET_MASK))    
86508 +
86509 +/* 
86510 + * LastItemOffset is a sign extended -ve quantity with LastItemOffset[26:3] == q_control[63:40]
86511 + * we sign extend this by setting LastItemOffset[63:27] to be #one.
86512 + */
86513 +#define E4_InputQueueLastItemOffset(control)  ((((E4_int64) -1) << (64 - (E4_INPUTQ_LASTITEM_OFFSET_SHIFT-3))) | \
86514 +                                              ((E4_int64) (((control) & E4_INPUTQ_LASTITEM_OFFSET_MASK) >> (E4_INPUTQ_LASTITEM_OFFSET_SHIFT-3))))
86515 +#define E4_InputQueueItemSize(control)       (((control) & E4_INPUTQ_ITEMSIZE_MASK) >> (E4_INPUTQ_ITEMSIZE_SHIFT-3))
86516 +
86517 +/*
86518 + * Macro to increment the InputQ front pointer taking into account wrap 
86519 + */
86520 +#define E4_InputQueueFptrIncrement(Q, FirstItem, LastItem, ItemSizeInBytes) \
86521 +       ((Q)->q_fptr = ( ((Q)->q_fptr == (LastItem)) ? (FirstItem) : ((Q)->q_fptr + (ItemSizeInBytes))) )
86522 +
86523 +#endif /* _ASM */
86524 +
86525 +#endif /* __ELAN4_EVENTS_H */
86526 Index: linux-2.4.21/include/elan4/i2c.h
86527 ===================================================================
86528 --- linux-2.4.21.orig/include/elan4/i2c.h       2004-02-23 16:02:56.000000000 -0500
86529 +++ linux-2.4.21/include/elan4/i2c.h    2005-06-01 23:12:54.736418280 -0400
86530 @@ -0,0 +1,47 @@
86531 +/*
86532 + *    Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd.
86533 + *    Copyright (c) 2002-2003 by Quadrics Ltd.
86534 + *
86535 + *    For licensing information please see the supplied COPYING file
86536 + *
86537 + */
86538 +
86539 +#ifndef _ELAN4_I2C_H
86540 +#define _ELAN4_I2C_H
86541 +
86542 +#ident "@(#)$Id: i2c.h,v 1.10 2003/12/02 16:11:22 lee Exp $ $Name: QSNETMODULES-4-30_20050128 $"
86543 +/*      $Source: /cvs/master/quadrics/elan4hdr/i2c.h,v $*/
86544 +
86545 +/* I2C address space - bits[7:1] */
86546 +#define I2C_LED_I2C_ADDR                       0x20
86547 +#define I2C_TEMP_ADDR                          0x48
86548 +#define I2C_EEPROM_ADDR                                0x50
86549 +
86550 +#define I2C_WRITE_ADDR(addr)                   ((addr) << 1 | 0)
86551 +#define I2C_READ_ADDR(addr)                    ((addr) << 1 | 1)
86552 +
86553 +/* I2C EEPROM appears as 8 I2C 256 byte devices */
86554 +#define I2C_24LC16B_BLOCKSIZE                  (256)
86555 +#define I2C_24LC16B_BLOCKADDR(addr)            ((addr) >> 8)
86556 +#define I2C_24LC16B_BLOCKOFFSET(addr)          ((addr) & 0xff)
86557 +
86558 +#define I2C_ELAN_EEPROM_PCI_BASEADDR           0       /* PCI config starts at addr 0 in the EEPROM */
86559 +#define I2C_ELAN_EEPROM_VPD_BASEADDR           256     /* VPD data start                            */
86560 +#define I2C_ELAN_EEPROM_PCI_SIZE               256     /* PCI data max size                         */
86561 +#define I2C_ELAN_EEPROM_VPD_SIZE               256     /* VPD data max size                         */
86562 +
86563 +#define I2C_ELAN_EEPROM_SIZE                   2048
86564 +
86565 +#define I2C_ELAN_EEPROM_DEVICE_ID              0xA0
86566 +#define I2C_ELAN_EEPROM_FAIL_LIMIT              8
86567 +
86568 +#define I2C_ELAN_EEPROM_ADDR_BLOCKSIZE_SHIFT   0x8
86569 +#define I2C_ELAN_EEPROM_ADDR_BLOCK_MASK                0x7
86570 +#define I2C_ELAN_EEPROM_ADDR_BLOCK_SHIFT       0x1
86571 +
86572 +/*
86573 + * Local variables:
86574 + * c-file-style: "stroustrup"
86575 + * End:
86576 + */
86577 +#endif /* _ELAN4_I2C_H */
86578 Index: linux-2.4.21/include/elan4/intcookie.h
86579 ===================================================================
86580 --- linux-2.4.21.orig/include/elan4/intcookie.h 2004-02-23 16:02:56.000000000 -0500
86581 +++ linux-2.4.21/include/elan4/intcookie.h      2005-06-01 23:12:54.737418128 -0400
86582 @@ -0,0 +1,62 @@
86583 +/*
86584 + *    Copyright (c) 2001-2002 by Quadrics Supercomputers World Ltd.
86585 + *    Copyright (c) 2002-2003 by Quadrics Ltd.
86586 + * 
86587 + *    For licensing information please see the supplied COPYING file
86588 + *
86589 + */
86590 +
86591 +#ident "@(#)$Id: intcookie.h,v 1.10 2004/08/09 14:02:37 daniel Exp $"
86592 +/*      $Source: /cvs/master/quadrics/elan4mod/intcookie.h,v $*/
86593 +
86594 +#ifndef __ELAN4_INTCOOKIE_H
86595 +#define __ELAN4_INTCOOKIE_H
86596 +
86597 +typedef E4_uint64 ELAN4_INTCOOKIE;
86598 +
86599 +#ifdef __KERNEL__
86600 +
86601 +typedef struct intcookie_entry
86602 +{
86603 +    struct intcookie_entry    *ent_next;
86604 +    struct intcookie_entry    *ent_prev;
86605 +
86606 +    spinlock_t                ent_lock;
86607 +    unsigned                  ent_ref;
86608 +
86609 +    ELAN4_INTCOOKIE           ent_cookie;
86610 +    ELAN4_INTCOOKIE           ent_fired;
86611 +    kcondvar_t                ent_wait;
86612 +} INTCOOKIE_ENTRY;
86613 +
86614 +typedef struct intcookie_table
86615 +{
86616 +    struct intcookie_table    *tbl_next;
86617 +    struct intcookie_table    *tbl_prev;
86618 +
86619 +    ELAN_CAPABILITY           *tbl_cap;
86620 +
86621 +    spinlock_t                tbl_lock;
86622 +    unsigned                  tbl_ref;
86623 +    INTCOOKIE_ENTRY           *tbl_entries;
86624 +} INTCOOKIE_TABLE;
86625 +
86626 +extern void                intcookie_init(void);
86627 +extern void                intcookie_fini(void);
86628 +extern INTCOOKIE_TABLE    *intcookie_alloc_table (ELAN_CAPABILITY *cap);
86629 +extern void                intcookie_free_table (INTCOOKIE_TABLE *tbl);
86630 +extern int                 intcookie_alloc (INTCOOKIE_TABLE *tbl, ELAN4_INTCOOKIE cookie);
86631 +extern int                 intcookie_free (INTCOOKIE_TABLE *tbl, ELAN4_INTCOOKIE cookie);
86632 +extern int                 intcookie_fire (INTCOOKIE_TABLE *tbl, ELAN4_INTCOOKIE cookie);
86633 +extern int                 intcookie_fire_cap (ELAN_CAPABILITY *cap, ELAN4_INTCOOKIE cookie);
86634 +extern int                 intcookie_wait (INTCOOKIE_TABLE *tbl, ELAN4_INTCOOKIE cookie);
86635 +extern int                 intcookie_arm (INTCOOKIE_TABLE *tbl, ELAN4_INTCOOKIE cookie);
86636 +
86637 +#endif /* __KERNEL */
86638 +
86639 +/*
86640 + * Local variables:
86641 + * c-file-style: "stroustrup"
86642 + * End:
86643 + */
86644 +#endif /* __ELAN4_INTCOOKIE_H */
86645 Index: linux-2.4.21/include/elan4/ioctl.h
86646 ===================================================================
86647 --- linux-2.4.21.orig/include/elan4/ioctl.h     2004-02-23 16:02:56.000000000 -0500
86648 +++ linux-2.4.21/include/elan4/ioctl.h  2005-06-01 23:12:54.738417976 -0400
86649 @@ -0,0 +1,320 @@
86650 +/*
86651 + *    Copyright (c) 2001-2002 by Quadrics Supercomputers World Ltd.
86652 + *    Copyright (c) 2002-2003 by Quadrics Ltd.
86653 + * 
86654 + *    For licensing information please see the supplied COPYING file
86655 + *
86656 + */
86657 +
86658 +#ifndef __ELAN4_IOCTL_H
86659 +#define __ELAN4_IOCTL_H
86660 +
86661 +#ident "@(#)$Id: ioctl.h,v 1.27.6.2 2005/01/11 12:15:39 duncant Exp $"
86662 +/*      $Source: /cvs/master/quadrics/elan4mod/ioctl.h,v $*/
86663 +
86664 +#include <elan/devinfo.h>
86665 +#include <elan/capability.h>
86666 +
86667 +#include <elan4/dma.h>
86668 +#include <elan4/neterr.h>
86669 +#include <elan4/registers.h>
86670 +#include <elan4/intcookie.h>
86671 +
86672 +#define ELAN4IO_CONTROL_PATHNAME       "/dev/elan4/control%d"
86673 +#define ELAN4IO_USER_PATHNAME          "/dev/elan4/user%d"
86674 +#define ELAN4IO_SDRAM_PATHNAME         "/dev/elan4/sdram%d"
86675 +#define ELAN4IO_MAX_PATHNAMELEN        32
86676 +
86677 +/*
86678 + * NOTE - ioctl values 0->0x1f are defined for 
86679 + *        generic/control usage.
86680 + */
86681 +
86682 +/* Macro to generate 'offset' to mmap "control" device */
86683 +#define OFF_TO_BAR(off)                (((off) >> 28) & 0xF)
86684 +#define OFF_TO_OFFSET(off)     ((off) & 0x0FFFFFFF)
86685 +#define GEN_OFF(bar,off)       (((bar) << 28) | ((off) & 0x0FFFFFFF))
86686 +
86687 +/* Definiations for generic ioctls */
86688 +#define ELAN4IO_GENERIC_BASE           0x00
86689 +
86690 +typedef struct elan4io_stats_struct
86691 +{
86692 +    int                       which;
86693 +    unsigned long long ptr;                                    /* always pass pointer as 64 bit */
86694 +} ELAN4IO_STATS_STRUCT;
86695 +
86696 +#define ELAN4IO_STATS                  _IOR ('e', ELAN4IO_GENERIC_BASE + 0, ELAN4IO_STATS_STRUCT)
86697 +#define ELAN4IO_DEVINFO                        _IOR ('e', ELAN4IO_GENERIC_BASE + 1, ELAN_DEVINFO)
86698 +#define ELAN4IO_POSITION               _IOR ('e', ELAN4IO_GENERIC_BASE + 2, ELAN_POSITION)
86699 +
86700 +
86701 +/* 
86702 + * Definitions for /dev/elan4/controlX
86703 + */
86704 +#define ELAN4IO_CONTROL_BASE           0x20
86705 +
86706 +#define ELAN4IO_GET_POSITION           _IOR ('e', ELAN4IO_CONTROL_BASE + 0, ELAN_POSITION)
86707 +#define ELAN4IO_SET_POSITION           _IOW ('e', ELAN4IO_CONTROL_BASE + 1, ELAN_POSITION)
86708 +#define ELAN4IO_DEBUG_SNAPSHOT         _IOW ('e', ELAN4IO_CONTROL_BASE + 2, )
86709 +
86710 +typedef struct elan4io_params_mask_struct
86711 +{
86712 +    unsigned short     p_mask;
86713 +    ELAN_PARAMS                p_params;
86714 +} ELAN4IO_PARAMS_STRUCT;
86715 +#define ELAN4IO_GET_PARAMS             _IOR ('e', ELAN4IO_CONTROL_BASE + 3, ELAN4IO_PARAMS_STRUCT)
86716 +#define ELAN4IO_SET_PARAMS             _IOW ('e', ELAN4IO_CONTROL_BASE + 4, ELAN4IO_PARAMS_STRUCT)
86717 +
86718 +/* old versions - implicit p_mask == 3 */
86719 +#define ELAN4IO_OLD_GET_PARAMS         _IOR ('e', ELAN4IO_CONTROL_BASE + 3, ELAN_PARAMS)
86720 +#define ELAN4IO_OLD_SET_PARAMS         _IOW ('e', ELAN4IO_CONTROL_BASE + 4, ELAN_PARAMS)
86721 +
86722 +/*
86723 + * Definitions for /dev/elan4/userX
86724 + */
86725 +#define ELAN4IO_USER_BASE              0x40
86726 +
86727 +#define ELAN4IO_FREE                   _IO   ('e', ELAN4IO_USER_BASE + 0)
86728 +#define ELAN4IO_ATTACH                 _IOWR ('e', ELAN4IO_USER_BASE + 1, ELAN_CAPABILITY)
86729 +#define ELAN4IO_DETACH                 _IOWR ('e', ELAN4IO_USER_BASE + 2, ELAN_CAPABILITY)
86730 +#define ELAN4IO_BLOCK_INPUTTER         _IO   ('e', ELAN4IO_USER_BASE + 3)
86731 +
86732 +typedef struct elan4io_add_p2pvp_struct 
86733 +{
86734 +    unsigned        vp_process;
86735 +    ELAN_CAPABILITY vp_capability;
86736 +} ELAN4IO_ADD_P2PVP_STRUCT;
86737 +
86738 +#define ELAN4IO_ADD_P2PVP              _IOW  ('e', ELAN4IO_USER_BASE + 4, ELAN4IO_ADD_P2PVP_STRUCT)
86739 +
86740 +typedef struct elan4io_add_bcastvp_struct
86741 +{
86742 +    unsigned int       vp_process;
86743 +    unsigned int       vp_lowvp;
86744 +    unsigned int       vp_highvp;
86745 +} ELAN4IO_ADD_BCASTVP_STRUCT;
86746 +
86747 +#define ELAN4IO_ADD_BCASTVP            _IOW  ('e', ELAN4IO_USER_BASE + 5, ELAN4IO_ADD_BCASTVP_STRUCT)
86748 +
86749 +#define ELAN4IO_REMOVEVP               _IO   ('e', ELAN4IO_USER_BASE + 6)
86750 +
86751 +typedef struct elan4io_route_struct
86752 +{
86753 +    unsigned int          rt_process;
86754 +    unsigned int          rt_error;
86755 +    E4_VirtualProcessEntry rt_route;
86756 +} ELAN4IO_ROUTE_STRUCT;
86757 +
86758 +#define ELAN4IO_SET_ROUTE              _IOW  ('e', ELAN4IO_USER_BASE + 7, ELAN4IO_ROUTE_STRUCT)
86759 +#define ELAN4IO_RESET_ROUTE            _IOW  ('e', ELAN4IO_USER_BASE + 9, ELAN4IO_ROUTE_STRUCT)
86760 +#define ELAN4IO_GET_ROUTE              _IOWR ('e', ELAN4IO_USER_BASE + 8, ELAN4IO_ROUTE_STRUCT)
86761 +#define ELAN4IO_CHECK_ROUTE            _IOWR ('e', ELAN4IO_USER_BASE + 10, ELAN4IO_ROUTE_STRUCT)
86762 +
86763 +typedef struct elan4io_alloc_cq_struct
86764 +{
86765 +    unsigned int cq_size;                                      /* input: size of queue */
86766 +    unsigned int cq_perm;                                      /* input: requested permissions */
86767 +    unsigned int cq_type;                                      /* input: queue type */
86768 +    unsigned int cq_indx;                                      /* output: queue number */
86769 +} ELAN4IO_ALLOCCQ_STRUCT;
86770 +
86771 +#define ELAN4IO_ALLOCCQ                        _IOWR ('e', ELAN4IO_USER_BASE + 11, ELAN4IO_ALLOCCQ_STRUCT)
86772 +#define ELAN4IO_FREECQ                 _IOWR ('e', ELAN4IO_USER_BASE + 12, unsigned)
86773 +
86774 +#define ELAN4IO_CQ_TYPE_REORDER                1                       /* revb reordering command queue */
86775 +
86776 +typedef struct elan4io_perm_struct
86777 +{
86778 +    E4_Addr            ps_eaddr;
86779 +    E4_uint64          ps_len;
86780 +    unsigned long      ps_maddr;
86781 +    unsigned int       ps_perm;
86782 +} ELAN4IO_PERM_STRUCT;
86783 +
86784 +typedef struct elan4io_perm_struct32
86785 +{
86786 +    E4_Addr            ps_eaddr;
86787 +    E4_uint64          ps_len;
86788 +    unsigned int       ps_maddr;
86789 +    unsigned int       ps_perm;
86790 +} ELAN4IO_PERM_STRUCT32;
86791 +
86792 +#define ELAN4IO_SETPERM                        _IOWR ('e', ELAN4IO_USER_BASE + 13, ELAN4IO_PERM_STRUCT)
86793 +#define ELAN4IO_SETPERM32              _IOWR ('e', ELAN4IO_USER_BASE + 13, ELAN4IO_PERM_STRUCT32)
86794 +#define ELAN4IO_CLRPERM                        _IOWR ('e', ELAN4IO_USER_BASE + 14, ELAN4IO_PERM_STRUCT)
86795 +#define ELAN4IO_CLRPERM32              _IOWR ('e', ELAN4IO_USER_BASE + 14, ELAN4IO_PERM_STRUCT32)
86796 +
86797 +typedef struct elan4io_trapsig_struct
86798 +{
86799 +    int                ts_signo;
86800 +} ELAN4IO_TRAPSIG_STRUCT;
86801 +#define ELAN4IO_TRAPSIG                        _IOW  ('e', ELAN4IO_USER_BASE + 15, ELAN4IO_TRAPSIG_STRUCT)
86802 +
86803 +typedef struct elan4io_traphandler_struct
86804 +{
86805 +    unsigned int       th_nticks;                              /* number of ticks to sleep for next trap */
86806 +    unsigned int       th_proc;                                        /* elan processor involved */
86807 +    unsigned long      th_trapp;                               /* space to store trap */
86808 +} ELAN4IO_TRAPHANDLER_STRUCT;
86809 +
86810 +typedef struct elan4io_traphandler_struct32
86811 +{
86812 +    unsigned int       th_nticks;                              /* number of ticks to sleep for next trap */
86813 +    unsigned int       th_proc;                                        /* elan processor involved */
86814 +    unsigned int       th_trapp;                               /* space to store trap */
86815 +} ELAN4IO_TRAPHANDLER_STRUCT32;
86816 +
86817 +#define ELAN4IO_TRAPHANDLER            _IOW  ('e', ELAN4IO_USER_BASE + 16, ELAN4IO_TRAPHANDLER_STRUCT)
86818 +#define ELAN4IO_TRAPHANDLER32          _IOW  ('e', ELAN4IO_USER_BASE + 16, ELAN4IO_TRAPHANDLER_STRUCT32)
86819 +
86820 +typedef struct elan4io_required_mappings_struct
86821 +{
86822 +    E4_Addr    rm_upage_addr;                                  /* elan address of user page */
86823 +    E4_Addr    rm_trestart_addr;                               /* elan address of tproc restart trampoline */
86824 +} ELAN4IO_REQUIRED_MAPPINGS_STRUCT;
86825 +#define ELAN4IO_REQUIRED_MAPPINGS      _IOW  ('e', ELAN4IO_USER_BASE + 17, ELAN4IO_REQUIRED_MAPPINGS_STRUCT)
86826 +
86827 +typedef struct elan4io_resume_eproc_trap_struct
86828 +{
86829 +    E4_Addr             rs_addr;
86830 +} ELAN4IO_RESUME_EPROC_TRAP_STRUCT;
86831 +#define ELAN4IO_RESUME_EPROC_TRAP      _IOW  ('e', ELAN4IO_USER_BASE + 18, ELAN4IO_RESUME_EPROC_TRAP_STRUCT)
86832 +
86833 +typedef struct elan4io_resume_cproc_trap_struct
86834 +{
86835 +    unsigned int       rs_indx;
86836 +} ELAN4IO_RESUME_CPROC_TRAP_STRUCT;
86837 +#define ELAN4IO_RESUME_CPROC_TRAP      _IOW  ('e', ELAN4IO_USER_BASE + 19, ELAN4IO_RESUME_CPROC_TRAP_STRUCT)
86838 +
86839 +typedef struct elan4io_resume_dproc_trap_struct
86840 +{
86841 +    E4_DMA             rs_desc;
86842 +} ELAN4IO_RESUME_DPROC_TRAP_STRUCT;
86843 +#define ELAN4IO_RESUME_DPROC_TRAP      _IOW  ('e', ELAN4IO_USER_BASE + 20, ELAN4IO_RESUME_DPROC_TRAP_STRUCT)
86844 +
86845 +typedef struct elan4io_resume_tproc_trap_struct
86846 +{
86847 +    E4_ThreadRegs      rs_regs;
86848 +} ELAN4IO_RESUME_TPROC_TRAP_STRUCT;
86849 +#define ELAN4IO_RESUME_TPROC_TRAP      _IOW  ('e', ELAN4IO_USER_BASE + 21, ELAN4IO_RESUME_TPROC_TRAP_STRUCT)
86850 +
86851 +typedef struct elan4io_resume_iproc_trap_struct
86852 +{
86853 +    unsigned int       rs_channel;
86854 +    unsigned int       rs_trans;
86855 +    E4_IprocTrapHeader  rs_header;
86856 +    E4_IprocTrapData    rs_data;
86857 +} ELAN4IO_RESUME_IPROC_TRAP_STRUCT;
86858 +#define ELAN4IO_RESUME_IPROC_TRAP      _IOW  ('e', ELAN4IO_USER_BASE + 22, ELAN4IO_RESUME_IPROC_TRAP_STRUCT)
86859 +
86860 +#define ELAN4IO_FLUSH_ICACHE           _IO   ('e', ELAN4IO_USER_BASE + 23)
86861 +#define ELAN4IO_STOP_CTXT              _IO   ('e', ELAN4IO_USER_BASE + 24)
86862 +
86863 +#define ELAN4IO_ALLOC_INTCOOKIE                _IOW  ('e', ELAN4IO_USER_BASE + 25, ELAN4_INTCOOKIE)
86864 +#define ELAN4IO_FREE_INTCOOKIE         _IOW  ('e', ELAN4IO_USER_BASE + 26, ELAN4_INTCOOKIE)
86865 +#define ELAN4IO_ARM_INTCOOKIE          _IOW  ('e', ELAN4IO_USER_BASE + 27, ELAN4_INTCOOKIE)
86866 +#define ELAN4IO_WAIT_INTCOOKIE         _IOW  ('e', ELAN4IO_USER_BASE + 28, ELAN4_INTCOOKIE)
86867 +
86868 +typedef struct elan4io_alloc_trap_queues_struct
86869 +{
86870 +    unsigned int       tq_ndproc_traps;
86871 +    unsigned int       tq_neproc_traps;
86872 +    unsigned int       tq_ntproc_traps;
86873 +    unsigned int       tq_nthreads;
86874 +    unsigned int       tq_ndmas;
86875 +} ELAN4IO_ALLOC_TRAP_QUEUES_STRUCT;
86876 +#define ELAN4IO_ALLOC_TRAP_QUEUES      _IOW  ('e', ELAN4IO_USER_BASE + 29, ELAN4IO_ALLOC_TRAP_QUEUES_STRUCT)
86877 +
86878 +typedef struct elan4io_neterr_msg_struct
86879 +{
86880 +    unsigned int       nm_vp;
86881 +    unsigned int       nm_nctx;
86882 +    unsigned int       nm_retries;
86883 +    unsigned int        nm_pad;
86884 +    ELAN4_NETERR_MSG    nm_msg;
86885 +} ELAN4IO_NETERR_MSG_STRUCT;
86886 +#define ELAN4IO_NETERR_MSG             _IOW ('e', ELAN4IO_USER_BASE + 30, ELAN4IO_NETERR_MSG_STRUCT)
86887 +
86888 +typedef struct elan4io_neterr_timer_struct 
86889 +{
86890 +    unsigned int       nt_usecs;
86891 +} ELAN4IO_NETERR_TIMER_STUCT;
86892 +
86893 +#define ELAN4IO_NETERR_TIMER           _IO  ('e', ELAN4IO_USER_BASE + 31)
86894 +
86895 +typedef struct elan4io_neterr_fixup_struct
86896 +{
86897 +    E4_uint64          nf_cookie;
86898 +    unsigned int       nf_waitforeop;
86899 +    unsigned int       nf_sten;
86900 +    unsigned int       nf_vp;
86901 +    unsigned int       nf_pad;
86902 +} ELAN4IO_NETERR_FIXUP_STRUCT;
86903 +
86904 +#define ELAN4IO_NETERR_FIXUP           _IOW ('e', ELAN4IO_USER_BASE + 32, ELAN4IO_NETERR_FIXUP_STRUCT)
86905 +
86906 +typedef struct elan4io_firecap_struct 
86907 +{
86908 +    ELAN_CAPABILITY     fc_capability;
86909 +    ELAN4_INTCOOKIE     fc_cookie;
86910 +} ELAN4IO_FIRECAP_STRUCT;
86911 +
86912 +#define ELAN4IO_FIRE_INTCOOKIE         _IOW  ('e', ELAN4IO_USER_BASE + 33, ELAN4IO_FIRECAP_STRUCT)
86913 +
86914 +#define ELAN4IO_ALLOC_INTCOOKIE_TABLE  _IOW  ('e', ELAN4IO_USER_BASE + 34, ELAN_CAPABILITY)
86915 +#define ELAN4IO_FREE_INTCOOKIE_TABLE   _IO   ('e', ELAN4IO_USER_BASE + 35)
86916 +
86917 +typedef struct elan4io_translation
86918 +{
86919 +    E4_Addr            tr_addr;
86920 +    unsigned long      tr_len;
86921 +    unsigned int       tr_access;
86922 +} ELAN4IO_TRANSLATION_STRUCT;
86923 +
86924 +#define ELAN4IO_LOAD_TRANSLATION       _IOW  ('e', ELAN4IO_USER_BASE + 36, ELAN4IO_TRANSLATION_STRUCT)
86925 +#define ELAN4IO_UNLOAD_TRANSLATION     _IOW  ('e', ELAN4IO_USER_BASE + 37, ELAN4IO_TRANSLATION_STRUCT)
86926 +
86927 +typedef struct elan4io_dumpcq_struct32
86928 +{
86929 +    E4_uint64 cq_space;      /* output: sdram addr of q, used to decode ptrs */
86930 +    E4_uint32 cq_size;       /* output: The real size of the command queue */
86931 +    E4_uint32 bufsize;       /* input: The size of the buffer to dump to */
86932 +    E4_uint32 cq_indx;       /* input: index of cq to dump */
86933 +    unsigned int buffer;     /* input: user address of rgs->buffer to dump to */
86934 +} ELAN4IO_DUMPCQ_STRUCT32;
86935 +
86936 +typedef struct elan4io_dumpcq_struct
86937 +{
86938 +    E4_uint64 cq_space;      /* output: sdram addr of q, used to decode ptrs */
86939 +    E4_uint32 cq_size;       /* output: The real size of the command queue */
86940 +    E4_uint32 bufsize;       /* input: The size of the buffer to dump to */
86941 +    E4_uint32 cq_indx;       /* input: index of cq to dump */
86942 +    unsigned long buffer;    /* input: user address of rgs->buffer to dump to */
86943 +} ELAN4IO_DUMPCQ_STRUCT;
86944 +
86945 +#define ELAN4IO_DUMPCQ                 _IOWR ('e', ELAN4IO_USER_BASE + 38, ELAN4IO_DUMPCQ_STRUCT)
86946 +#define ELAN4IO_DUMPCQ32                       _IOWR ('e', ELAN4IO_USER_BASE + 38, ELAN4IO_DUMPCQ_STRUCT32)
86947 +
86948 +/* mmap offsets - - we define the file offset space as follows:
86949 + *
86950 + * page 0 - 4095 - command queues
86951 + * page 4096    - device user registers
86952 + * page 4097    - flag page/user stats
86953 + * page 4098    - device stats
86954 + * page 4099     - tproc trampoline
86955 + */
86956 +
86957 +#define ELAN4_OFF_COMMAND_QUEUES       0
86958 +#define ELAN4_OFF_USER_REGS            4096
86959 +#define ELAN4_OFF_USER_PAGE            4097
86960 +#define ELAN4_OFF_DEVICE_STATS         4098
86961 +#define ELAN4_OFF_TPROC_TRAMPOLINE     4099
86962 +
86963 +
86964 +/*
86965 + * Local variables:
86966 + * c-file-style: "stroustrup"
86967 + * End:
86968 + */
86969 +#endif /* __ELAN4_IOCTL_H */
86970 Index: linux-2.4.21/include/elan4/mmu.h
86971 ===================================================================
86972 --- linux-2.4.21.orig/include/elan4/mmu.h       2004-02-23 16:02:56.000000000 -0500
86973 +++ linux-2.4.21/include/elan4/mmu.h    2005-06-01 23:12:54.738417976 -0400
86974 @@ -0,0 +1,94 @@
86975 +/*
86976 + *    Copyright (c) 2001-2002 by Quadrics Supercomputers World Ltd.
86977 + *    Copyright (c) 2002-2003 by Quadrics Ltd.
86978 + * 
86979 + *    For licensing information please see the supplied COPYING file
86980 + *
86981 + */
86982 +
86983 +#ident "@(#)$Id: mmu.h,v 1.11 2004/04/21 12:04:24 david Exp $"
86984 +/*      $Source: /cvs/master/quadrics/elan4mod/mmu.h,v $*/
86985 +
86986 +
86987 +#ifndef __ELAN4_MMU_H
86988 +#define __ELAN4_MMU_H
86989 +
86990 +typedef struct elan4_hash_entry
86991 +{
86992 +    struct elan4_hash_entry    *he_next;
86993 +    struct elan4_hash_entry    *he_prev;
86994 +
86995 +    sdramaddr_t                         he_entry;
86996 +    
86997 +    struct elan4_hash_entry    *he_chain[2];
86998 +    E4_uint64                   he_tag[2];
86999 +    E4_uint32                   he_pte[2];
87000 +} ELAN4_HASH_ENTRY;
87001 +
87002 +#define ELAN4_HENT_CHUNKS      16              /* SDRAM_MIN_BLOCK_SIZE/sizeof (E4_HashTableEntry) */
87003 +
87004 +typedef struct elan4_hash_chunk
87005 +{
87006 +    struct list_head            hc_link;
87007 +    ELAN4_HASH_ENTRY           hc_hents[ELAN4_HENT_CHUNKS];
87008 +} ELAN4_HASH_CHUNK;
87009 +
87010 +typedef struct elan4_hash_cache
87011 +{
87012 +    E4_Addr           hc_start;
87013 +    E4_Addr           hc_end;
87014 +    int                      hc_tbl;
87015 +
87016 +    ELAN4_HASH_ENTRY *hc_hes[1];
87017 +} ELAN4_HASH_CACHE;
87018 +
87019 +/* 
87020 + * he_pte is really 4 bytes of pte "type" one for each pte
87021 + * entry - however we declare it as an "int" so we can
87022 + * easily determine that all 4 entries are invalid 
87023 + */
87024 +#define HE_SET_PTE(he,tagidx,pteidx,val)       (((E4_uint8 *) &(he->he_pte[tagidx]))[pteidx] = (val))
87025 +#define HE_GET_PTE(he,tagidx,pteidx)           (((E4_uint8 *) &(he->he_pte[tagidx]))[pteidx])
87026 +
87027 +/*
87028 + * he_tag has the following form :
87029 + *     [63:27] tag
87030 + *     [20:17]  pte valid
87031 + *     [16]     locked
87032 + *     [15]     copy
87033 + *     [14]     valid
87034 + *     [13:0]  context
87035 + */
87036 +
87037 +#define HE_TAG_VALID           (1 << 14)
87038 +#define HE_TAG_COPY            (1 << 15)
87039 +#define HE_TAG_LOCKED          (1 << 16)
87040 +
87041 +#define INVALID_CONTEXT                0
87042 +
87043 +extern u_char elan4_permtable[];
87044 +#define ELAN4_INCOMPAT_ACCESS(perm,access) ((elan4_permtable[(perm)] & (1 << (access))) == 0)
87045 +extern u_char elan4_permreadonly[];
87046 +#define ELAN4_PERM_READONLY(perm)        (elan4_permreadonly[(perm)])
87047 +
87048 +/* return code from elan4mmu_categorise_paddr */
87049 +#define ELAN4MMU_PADDR_SDRAM           0
87050 +#define ELAN4MMU_PADDR_COMMAND         1
87051 +#define ELAN4MMU_PADDR_LOCALPCI                2
87052 +#define ELAN4MMU_PADDR_PAGE            3
87053 +#define ELAN4MMU_PADDR_OTHER           4
87054 +
87055 +extern int elan4_debug_mmu;
87056 +
87057 +#ifdef DEBUG_PRINTF
87058 +#  define MPRINTF(ctxt,lvl,args...)    (elan4_debug_mmu > (lvl) ? elan4_debugf(ctxt,DBG_MMU, ##args) : (void)0)
87059 +#else
87060 +#  define MPRINTF(ctxt,lvl,args...)    ((void) 0)
87061 +#endif
87062 +
87063 +/*
87064 + * Local variables:
87065 + * c-file-style: "stroustrup"
87066 + * End:
87067 + */
87068 +#endif /* __ELAN4_MMU_H */
87069 Index: linux-2.4.21/include/elan4/neterr.h
87070 ===================================================================
87071 --- linux-2.4.21.orig/include/elan4/neterr.h    2004-02-23 16:02:56.000000000 -0500
87072 +++ linux-2.4.21/include/elan4/neterr.h 2005-06-01 23:12:54.738417976 -0400
87073 @@ -0,0 +1,40 @@
87074 +/*
87075 + *    Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd.
87076 + *    Copyright (c) 2002-2004 by Quadrics Ltd.
87077 + *
87078 + *    For licensing information please see the supplied COPYING file
87079 + *
87080 + */
87081 +
87082 +#ifndef __ELAN4_NETERR_H
87083 +#define __ELAN4_NETERR_H
87084 +
87085 +#ident "@(#)$Id: neterr.h,v 1.1 2004/01/19 14:38:34 david Exp $ $Name: QSNETMODULES-4-30_20050128 $"
87086 +/*      $Source: /cvs/master/quadrics/elan4mod/neterr.h,v $*/
87087 +
87088 +typedef struct elan4_neterr_msg
87089 +{
87090 +    E4_uint8           msg_type;
87091 +    E4_uint8           msg_waitforeop;
87092 +    E4_uint16          msg_context;                            /* network context # message sent to */
87093 +    E4_int16           msg_found;                              /* # cookie found (response) */
87094 +
87095 +    ELAN_LOCATION      msg_sender;                             /* nodeid/context # message sent from */
87096 +    E4_uint32          msg_pad;
87097 +
87098 +    E4_uint64          msg_cookies[6];                         /* 64 bit cookies from identify packets */
87099 +} ELAN4_NETERR_MSG;
87100 +
87101 +#define ELAN4_NETERR_MSG_SIZE          sizeof (ELAN4_NETERR_MSG)
87102 +#define ELAN4_NETERR_MSG_REQUEST       1
87103 +#define ELAN4_NETERR_MSG_RESPONSE      2
87104 +
87105 +#define ELAN4_NETERR_MAX_COOKIES       (sizeof (((ELAN4_NETERR_MSG *) 0)->msg_cookies) / \
87106 +                                        sizeof (((ELAN4_NETERR_MSG *) 0)->msg_cookies[0]))
87107 +
87108 +/*
87109 + * Local variables:
87110 + * c-file-style: "stroustrup"
87111 + * End:
87112 + */
87113 +#endif /* __ELAN4_NETERR_H */
87114 Index: linux-2.4.21/include/elan4/pci.h
87115 ===================================================================
87116 --- linux-2.4.21.orig/include/elan4/pci.h       2004-02-23 16:02:56.000000000 -0500
87117 +++ linux-2.4.21/include/elan4/pci.h    2005-06-01 23:12:54.739417824 -0400
87118 @@ -0,0 +1,227 @@
87119 +/*
87120 + *    Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd.
87121 + *    Copyright (c) 2002-2003 by Quadrics Ltd.
87122 + *
87123 + *    For licensing information please see the supplied COPYING file
87124 + *
87125 + */
87126 +
87127 +#ifndef __ELAN4_PCI_H 
87128 +#define __ELAN4_PCI_H
87129 +
87130 +#ident "$Id: pci.h,v 1.32 2003/09/04 12:39:17 david Exp $"
87131 +/*      $Source: /cvs/master/quadrics/elan4hdr/pci.h,v $*/
87132 +
87133 +/* Elan has 2 64 bit bars */
87134 +#define ELAN4_BAR_SDRAM                        0
87135 +#define ELAN4_BAR_REGISTERS            2
87136 +
87137 +#define PCI_VENDOR_ID_QUADRICS         0x14fc
87138 +#define PCI_DEVICE_ID_ELAN3            0x0000
87139 +#define   PCI_REVISION_ID_ELAN3_REVA   0x0000
87140 +#define   PCI_REVISION_ID_ELAN3_REVB   0x0001
87141 +#define PCI_DEVICE_ID_ELAN4            0x0001
87142 +#define   PCI_REVISION_ID_ELAN4_REVA   0x0000
87143 +#define   PCI_REVISION_ID_ELAN4_REVB   0x0001
87144 +
87145 +/* support standard pseudo bars */
87146 +#define ELAN4_PSEUDO_BAR_ROM           8
87147 +
87148 +/* Elan PCI control
87149 + configuration space register. ElanControlRegister */
87150 +#define PCI_ELAN_PARITY_ADDR_LO                0x40
87151 +#define PCI_ELAN_PARITY_ADDR_HI                0x44
87152 +#define PCI_ELAN_PARITY_TYPE           0x48
87153 +#define PCI_ELAN_CONTROL               0x4c
87154 +#define PCI_ELAN_PLL_CONTROL           0x50
87155 +#define PCI_ELAN_SPLIT_MESSAGE_ATTR    0x54
87156 +#define PCI_ELAN_SPLIT_MESSAGE_VALUE   0x54
87157 +#define PCI_ELAN_RAMBIST_FAILED                0x54
87158 +#define PCI_ELAN_TOPPHYSADDR(i)                (0x58 + ((i)<<1))
87159 +
87160 +/*
87161 + * [31]           PciM66EN             This is set it the bus is running in PCI2.3 - 66MHz mode.
87162 + * [30:28] InitPattern         This gives the PCI-X startup mode. See "Pci intialisation patterns" below.
87163 + * [27]           notBusIs64Bits       If set the bus is running 32 bits wide. If Clear it is a 64 bit bus.
87164 + * [26:24] RamBistCntl         Used to control the Elan4 RAM BIST. Not acitive it zero.
87165 + * [23]           RamBistFinished      Only used when performing the RAM BIST test.
87166 + * [22]           SelectSplitMessAttr  See ECTRL_SELECT_SPLIT_MESS_ATTR below.
87167 + * [21]           ReceivedSplitCompError See ECTRL_REC_SPLIT_COMP_MESSAGE below
87168 + * [20:16] WriteHighPriTime    Used with ReadHighPriTime to control the ratio of PCI master write to PCI master
87169 + *                             read bandwidth under heavy load. The high the value of WriteHighPriTime the longer
87170 + *                             the PCI write bursts will be allowed without interruption from a read transfer.
87171 + * [15]    DisableCouplingTest This is only used as part of the RAM BIST test. It effects the testing of the main
87172 + *                             cache tag RAMS.
87173 + * [14:13] Not used            Will read as zero.
87174 + * [12:8]  ReadHighPriTime     Used with WriteHighPriTime to control the ratio of PCI master write to PCI master
87175 + *                             read bandwidth under heavy load. The high the value of ReadHighPriTime the longer
87176 + *                             the PCI read bursts will be allowed without interruption from a write transfer.
87177 + * [7] EnableLatencyCountReset  This bit effect the behaviour of disconnects due to the removal of GNT# after the latency
87178 + *                             counter has expired. If set it will allow the latency counter to be reset each time the
87179 + *                             GNT# is reasserted. If asserted it should provided improved bandwidth on the PCI bus
87180 + *                             without increasing the maximum latency another device would have for access to the bus.
87181 + *                             It will increase the average latency of other devices.
87182 + * [6] ExtraMasterAddrBits     This bit used to control the physical PCI addresses generated by the MMU.
87183 + * [5] ReducedPciDecode                If set the PCI local memory BAR will decode 256Mbytes of PCI address space. If clear it
87184 + *                             will decode 2Gbyte of PCI address space.
87185 + * [4] ConfigInEBusRom         If set the constant values of the Elan4 PCI configuration space will be taken from the
87186 + *                             EEPROM. If clear the internal values will be used.
87187 + * [3] EnableRd2_2Bursts       This bit only effects the behaviour of burst reads when the PCI bus is operating in
87188 + *                             PCI-2.2 mode. It allows adjacent reads to be merged into longer bursts for higher
87189 + *                             performance.
87190 + * [2] SoftIntReset            If set this bit will cause the Elan4 to reset itself with the exception of the PCI
87191 + *                             configuation space. All internal state machines will be put into the reset state.
87192 + * [1] EnableWrBursts          This bit allows much longer PCI-X write bursts. If set it will stop the Elan4 from
87193 + *                             being completely PCI-X compliant as the Elan4 may request a long PCI-X write burst that
87194 + *                             it does not complete. However it should significantly increase the maximum PCI-X write
87195 + *                             bandwidth and is unlikely to cause problems with many PCI-X bridge chips.
87196 + * [0] InvertMSIPriority       This bit effect the way MSI interrupts are generated. It provides flexiblity to generate
87197 + *                             the MSI interrupts in a different way to allow for different implimentations of MSI
87198 + *                             logic and still give the correct priority of Elan4 interrupts.
87199 + *
87200 + *     {PciM66EN, InitPattern, notBusIs64Bits, RamBistCntl, RamBistFinished,
87201 + *      SelectSplitMessAttr, ReceivedSplitCompError, WriteHighPriTime,
87202 + *      DisableCouplingTest, 2'h0, ReadHighPriTime,
87203 + *      EnableLatencyCountReset, ExtraMasterAddrBits, ReducedPciDecode, ConfigInEBusRom,
87204 + *      EnableRd2_2Bursts, SoftIntReset, EnableWrBursts, InvertMSIPriority}
87205 + */
87206 +
87207 +#define ECTRL_INVERT_MSI_PRIO          (1 << 0)
87208 +#define ECTRL_ENABLE_WRITEBURSTS       (1 << 1)
87209 +#define ECTRL_SOFTWARE_INTERNAL_RESET  (1 << 2)
87210 +#define ECTRL_ENABLE_2_2READBURSTS     (1 << 3)
87211 +#define ECTRL_CONFIG_IN_EBUS_ROM       (1 << 4)
87212 +#define ECTRL_28_NOT_30_BIT_LOCAL_BAR  (1 << 5)
87213 +#define ECTRL_ExtraMasterAddrBits      (1 << 6)
87214 +#define ECTRL_ENABLE_LATENCY_RESET      (1 << 7)
87215 +#define ECTRL_DISABLE_COUPLING_TEST    (1 << 15)
87216 +
87217 +/*
87218 + * Ratio of the following two registers set the relative bandwidth given to intputer data
87219 + * versus other PCI pci traffic when scheduling new PCI master accesses.
87220 + */
87221 +#define ECTRL_OTHER_HIGH_PRI_TIME_SHIFT        (8)     /* Sets top 4 bits of 8 bit counter */
87222 +#define ECTRL_OTHER_HIGH_PRI_TIME_MASK (0x1f)
87223 +
87224 +
87225 +#define ECTRL_IPROC_HIGH_PRI_TIME_SHIFT        (16)    /* Sets top 4 bits of 8 bit counter */
87226 +#define ECTRL_IPROC_HIGH_PRI_TIME_MASK (0x1f)
87227 +
87228 +/*
87229 + * This is set if a split completion message is received.
87230 + * This will cause a PCI error interrupt.
87231 + * This error is cleared by writting a 1 to this bit.
87232 + */
87233 +#define ECTRL_REC_SPLIT_COMP_MESSAGE   (1 << 21)
87234 +/*
87235 + * This bit is used to select reading of either the Split message attribute value when
87236 + * set or the split completion message data value from 0x54 in the config space
87237 + * if the ECTRL_REC_SPLIT_COMP_MESSAGE bit is set. 0x54 returns the the BistFailed flags
87238 + * if any of the BIST control bits are set (bits 26 to 24)
87239 + */
87240 +#define ECTRL_SELECT_SPLIT_MESS_ATTR   (1 << 22)
87241 +
87242 +// Internal RAM bist control bits.
87243 +// Three bits of state control the RAM BIST (Built in self test).
87244 +//
87245 +// These bits must not be set unless the ECTRL_SOFTWARE_INTERNAL_RESET bit has also been set!
87246 +//
87247 +// For a normal fast ram test assert ECTRL_BIST_FAST_TEST. 
87248 +// For a data retention test first write ECTRL_START_RETENTION_TEST then wait the retention period of
87249 +// at least 1ms and preferably much longer then write ECTRL_CONTINUE_RETENTION_TEST then wait
87250 +// again and finallly write ECTRL_FINISH_RETENTION_TEST.
87251 +// 
87252 +// The read only bit ECTRL_BIST_FINISHED_TEST can be polled to check that the test has compleated.
87253 +#define ECTRL_BIST_CTRL_SHIFT          (24)
87254 +#define ECTRL_BIST_CTRL_MASK           (7 << 24)
87255 +
87256 +#define ECTRL_BIST_FAST_TEST           ((7 << 24) | ECTRL_SOFTWARE_INTERNAL_RESET)     // old scheme
87257 +#define ECTRL_START_RETENTION_TEST     ((1 << 24) | ECTRL_SOFTWARE_INTERNAL_RESET)
87258 +#define ECTRL_CONTINUE_RETENTION_TEST  ((3 << 24) | ECTRL_SOFTWARE_INTERNAL_RESET)
87259 +#define ECTRL_FINISH_RETENTION_TEST    ((7 << 24) | ECTRL_SOFTWARE_INTERNAL_RESET)
87260 +
87261 +#define ECTRL_BIST_KICK_OFF            ((1 << 24) | ECTRL_SOFTWARE_INTERNAL_RESET)     // new scheme
87262 +#define ECTRL_BIST_MOVE_ON_ODD         ((3 << 24) | ECTRL_SOFTWARE_INTERNAL_RESET)
87263 +#define ECTRL_BIST_MOVE_ON_EVEN                ((5 << 24) | ECTRL_SOFTWARE_INTERNAL_RESET)
87264 +#define ECTRL_BIST_SCREAM_THROUGH      ((7 << 24) | ECTRL_SOFTWARE_INTERNAL_RESET)
87265 +
87266 +#define ECTRL_CLEAR_BIST_TEST          (0 << 24)
87267 +#define ECTRL_BIST_FINISHED_TEST       (1 << 23)
87268 +
87269 +// Read only current PCI bus type.
87270 +#define ECTRL_RUNNING_32BIT_MODE       (1 << 27)
87271 +#define ECTRL_INITIALISATION_MODE      (7 << 28)
87272 +#define ECTRL_RUNNING_M66EN_MODE       (1 << 31)
87273 +
87274 +#define ECTRL_INIT_PATTERN_SHIFT       (28)
87275 +#define ECTRL_INIT_PATTERN_MASK                (0x7)
87276 +
87277 +// Pci intialisation patterns
87278 +#define Pci2_2                         (0 << 28)
87279 +#define PciX50To66MHz                  (1 << 28)
87280 +#define PciX66to100MHz                 (2 << 28)
87281 +#define PciX100to133MHz                        (3 << 28)
87282 +#define PciXReserved1                  (4 << 28)
87283 +#define PciXReserved2                  (5 << 28)
87284 +#define PciXReserved3                  (6 << 28)
87285 +#define PciXReserved4                  (7 << 28)
87286 +
87287 +/* Elan PCI pll and pad control configuration space register. ElanPllControlReg */
87288 +// This overrides the default PCI pll control settings.
87289 +#define PciPll_FeedForwardISel0                (1 << 0)        // Lsi name Z0
87290 +#define PciPll_FeedForwardISel1                (1 << 1)        // Lsi name Z1
87291 +#define PciPll_ChargePumpISel0         (1 << 2)        // Lsi name P0
87292 +#define PciPll_ChargePumpISel1         (1 << 3)        // Lsi name P1
87293 +#define PciPll_EnableAutoReset         (1 << 4)        // Lsi name ENARST
87294 +#define PciPll_RSEL200500              (1 << 5)        // Lsi name Range Select, 0: 100 - 250MHz, 1: 200 - 500MHz
87295 +#define PciPll_DivideFeedback          (1 << 6)        // Just used for test - This divides the shortcut feedback to the PCI PLL so that it can lock to the tester clock.
87296 +#define PciPll_CutFeedback             (1 << 7)        // Just used for test - This disables the shortcut feedback.
87297 +
87298 +// This overrides the default PCI BZ controler settings.
87299 +#define PciBZ_UPDI                     (0xf << 8)
87300 +#define PciBZ_WAIT_INT                 (0xf << 12)
87301 +
87302 +// This overrides the default Sys and SDRam pll control settings.
87303 +#define SysPll_FeedForwardISel0                (1 << 16)       // Lsi name P0     
87304 +#define SysPll_FeedForwardISel1                (1 << 17)       // Lsi name P1     
87305 +#define SysPll_ChargePumpISel0         (1 << 18)       // Lsi name Z0    
87306 +#define SysPll_ChargePumpISel1         (1 << 19)       // Lsi name Z1    
87307 +#define SysPll_EnableAutoReset         (1 << 20)       // Lsi name ENARST
87308 +#define SysPll_DivPhaseCompInBy2       (1 << 21)       // Lsi name NODIV (Should be DIV)
87309 +#define SysPll_PllTestClkSel           (1 << 22)       // If asserted the master clock source is not taken from the pll.
87310 +
87311 +#define Pll_ForceEBusADTristate                (1 << 23)       // Required to enable the testing of EnableAutoReset. Enables use of EBusAD[7] (rev A)
87312 +#define Pll_LinkErrDirectToSDA         (1 << 23)       // Access to link error flag for triggering (rev B)
87313 +
87314 +
87315 +#define ECTRL_SYS_CLOCK_RATIO_SHIFT    (24)
87316 +// Config: with 800MHz         Speeds are 266 200 160 133.
87317 +//         0 = 133/133 (1:1)   6:6     1
87318 +//        1 = 160/133 (6:5)    5:6     1.2
87319 +//         2 = 200/133 (3:2)   4:6     1.5
87320 +//        3 = 266/133 (2:1)    3:6     2
87321 +//        4 = 200/200 (1:1)    4:4     1
87322 +//        5 = 266/200 (4:3)    3:4     1.33
87323 +
87324 +// Config: with 600MHz         Speeds are 200 150 120 100
87325 +//         0 = 100/100 (1:1)   6:6     1
87326 +//        1 = 120/100 (6:5)    5:6     1.2
87327 +//         2 = 150/100 (3:2)   4:6     1.5
87328 +//        3 = 200/100 (2:1)    3:6     2
87329 +//        4 = 150/150 (1:1)    4:4     1
87330 +//        5 = 200/150 (4:3)    3:4     1.33
87331 +
87332 +#define ECTRL_SYS_CLOCK_RATIO_SHIFT    (24)
87333 +#define ECTRL_SYS_CLOCK_RATIO_1_1Slow  (0 << ECTRL_SYS_CLOCK_RATIO_SHIFT)
87334 +#define ECTRL_SYS_CLOCK_RATIO_6_5      (1 << ECTRL_SYS_CLOCK_RATIO_SHIFT)
87335 +#define ECTRL_SYS_CLOCK_RATIO_3_2      (2 << ECTRL_SYS_CLOCK_RATIO_SHIFT)
87336 +#define ECTRL_SYS_CLOCK_RATIO_2_1      (3 << ECTRL_SYS_CLOCK_RATIO_SHIFT)
87337 +#define ECTRL_SYS_CLOCK_RATIO_1_1Fast  (4 << ECTRL_SYS_CLOCK_RATIO_SHIFT)
87338 +#define ECTRL_SYS_CLOCK_RATIO_4_3      (5 << ECTRL_SYS_CLOCK_RATIO_SHIFT)
87339 +#define ECTRL_SYS_CLOCK_MAX_NORMAL     (6)                                     /* used to generate a valid random value */
87340 +#define GET_RANDOM_CLOCK_RATIO         (Random(ECTRL_SYS_CLOCK_MAX_NORMAL) << ECTRL_SYS_CLOCK_RATIO_SHIFT)
87341 +#define ECTRL_SYS_CLOCK_RATIO_PLL_TEST (6 << ECTRL_SYS_CLOCK_RATIO_SHIFT)
87342 +#define ECTRL_SYS_CLOCK_RATIO_TEST     (7 << ECTRL_SYS_CLOCK_RATIO_SHIFT)
87343 +#define ECTRL_SYS_CLOCK_RATIO_MASK     (7 << ECTRL_SYS_CLOCK_RATIO_SHIFT)
87344 +
87345 +#endif /* __ELAN4_PCI_H */
87346 Index: linux-2.4.21/include/elan4/registers.h
87347 ===================================================================
87348 --- linux-2.4.21.orig/include/elan4/registers.h 2004-02-23 16:02:56.000000000 -0500
87349 +++ linux-2.4.21/include/elan4/registers.h      2005-06-01 23:12:54.742417368 -0400
87350 @@ -0,0 +1,1588 @@
87351 +/*
87352 + *    Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd.
87353 + *    Copyright (c) 2002-2003 by Quadrics Ltd.
87354 + *
87355 + *    For licensing information please see the supplied COPYING file
87356 + *
87357 + */
87358 +
87359 +#ifndef _ELAN4_REGISTERS_H
87360 +#define _ELAN4_REGISTERS_H
87361 +
87362 +#ident "$Id: registers.h,v 1.117.2.1 2004/10/04 14:26:18 david Exp $"
87363 +/*      $Source: /cvs/master/quadrics/elan4hdr/registers.h,v $*/
87364 +
87365 +/*
87366 + * Header file for internal slave mapping of the ELAN4 registers
87367 + */
87368 +
87369 +#define E4_CACHELINE_SIZE      (64)
87370 +#define E4_STACK_ALIGN         (64)
87371 +
87372 +#ifndef _ASM
87373 +
87374 +#include <elan4/types.h>
87375 +#include <elan4/dma.h>
87376 +#include <elan4/userregs.h>
87377 +
87378 +typedef volatile struct _E4_CacheSets
87379 +{
87380 +   E4_uint64   Set0[1024];     /* 8k bytes per set */
87381 +   E4_uint64   Set1[1024];     /* 8k bytes per set */
87382 +   E4_uint64   Set2[1024];     /* 8k bytes per set */
87383 +   E4_uint64   Set3[1024];     /* 8k bytes per set */
87384 +} E4_CacheSets;
87385 +
87386 +typedef union e4_cache_tag
87387 +{
87388 +   struct {
87389 +       E4_uint32 pad0;                 /* Undefined value when read */
87390 +#if (BYTE_ORDER == LITTLE_ENDIAN) || defined(__LITTLE_ENDIAN__)
87391 +       E4_uint32 :10;                                          /* 0-9   - reserved */
87392 +       E4_uint32 LineError:1;                                  /* 10    - line error */
87393 +       E4_uint32 Modified:1;                                   /* 11    - modified */
87394 +       E4_uint32 FillPending:1;                                        /* 12    - fill pending */
87395 +       E4_uint32 AddrTag30to13:18;                             /* 30-13 - tag */
87396 +       E4_uint32 :1;                                           /* 31    -  */
87397 +#else
87398 +       E4_uint32 :1;                                           /* 31    -  */
87399 +       E4_uint32 AddrTag30to13:18;                             /* 30-13 - tag */
87400 +       E4_uint32 FillPending:1;                                        /* 12    - fill pending */
87401 +       E4_uint32 Modified:1;                                   /* 11    - modified */
87402 +       E4_uint32 LineError:1;                                  /* 10    - line error */
87403 +       E4_uint32 :10;                                          /* 0-9   - reserved */
87404 +#endif
87405 +   } s;
87406 +   E4_uint64   Value;
87407 +} E4_CacheTag;
87408 +
87409 +typedef volatile struct _E4_CacheTags
87410 +{
87411 +   E4_CacheTag Tags[4][128];   /* 8k bytes per set, 64 byte cache line */
87412 +} E4_CacheTags;
87413 +
87414 +#define E4_NumCacheSets                4
87415 +#define E4_NumCacheLines       128
87416 +#define E4_CacheLineSize       64
87417 +#define E4_CacheSize           (E4_NumCacheSets * E4_NumCacheLines * E4_CacheLineSize)
87418 +#define E4_CacheSetSize        (E4_NumCacheLines * E4_CacheLineSize)
87419 +
87420 +/*
87421 + * Run Queue pointers 
87422 + *
87423 + * [62:35]     FrontPointer[30:3]
87424 + * [33:32]     Size Value
87425 + * [30:3]      BackPointer[30:3]
87426 + */
87427 +#define E4_QueuePtrMask                (0x7ffffff8ULL)
87428 +#define E4_QueueSizeMask       3
87429 +#define E4_QueueEntrySize       sizeof (E4_uint64)
87430 +
87431 +#define E4_Queue8KBytes                0
87432 +#define E4_Queue64KBytes       1
87433 +#define E4_Queue512KBytes      2
87434 +#define E4_Queue4MBytes                3
87435 +
87436 +#define E4_QueueFrontValue(val,size)   ((val) | (size))
87437 +#define E4_QueueValue(queue,size)      (((E4_uint64) E4_QueueFrontValue(queue,size)) << 32 | ((E4_uint64) (queue)))
87438 +
87439 +#define E4_QueueFrontPointer(val)      /* extract queue front pointer from register */\
87440 +       (((val) >> 32) & E4_QueuePtrMask)
87441 +#define E4_QueueBackPointer(val)       /* extract queue back pointer from register */ \
87442 +       ((val) & E4_QueuePtrMask)
87443 +#define E4_QueueSizeValue(val)         /* extract queue size value from register */ \
87444 +       (((val) >> 32) & E4_QueueSizeMask)
87445 +#define E4_QueueSize(value)            /* queue size in bytes from size value */ \
87446 +       (1 << (((value)*3) + 13))
87447 +#define E4_QueueOffsetMask(fptr)\
87448 +        ((8192 << (((fptr) & E4_QueueSizeMask) << 3)) - 1)
87449 +#define E4_QueueOffset(fptr)\
87450 +        ((fptr) & E4_QueueOffsetMask(fptr))
87451 +#define E4_QueueFrontPointerInc(fptr)   \
87452 +        ( ((fptr) & ~E4_QueueOffsetMask(fptr)) | ((E4_QueueOffset(fptr) + 8) & E4_QueueOffsetMask(fptr)) )
87453 +
87454 +typedef union _E4_QueuePtr
87455 +{
87456 +   E4_uint64   Value;
87457 +   struct {
87458 +       E4_uint32 Back;
87459 +       E4_uint32 Front;
87460 +   } s;
87461 +} E4_QueuePtr;
87462 +
87463 +/*
87464 + * DMA processor status register.
87465 + *
87466 + * [48]                FirstSendTrans          Set for the first packet of a dma.
87467 + * [47:46]     TimeSliceCount          Time left to timeslice.
87468 + * [45]                DmaLastPacket           Set for the last packet of a dma.
87469 + * [44]                CurrPrefetchDma         Dma descriptor the prefetcher is valid for.
87470 + * [43:39]     PrefetcherState         Dma prefetcher's state machines value.
87471 + * [38:33]     PacketAssemblyState     Packet assembler's state machines value.
87472 + * [32:31]     PrefetcherWakeupFnt     Dma prefetcher's wakeup function.
87473 + * [30:28]     PacketAssWakeupFnt      Packet assembler's wakeup function.
87474 + * [27]                AckBufferValid          Packet ack is valid.
87475 + * [26]                PrefetchedDataProblem   Had either a data read fault or data error. Valid if AckBufferValid.
87476 + * [25]                PrefetcherHalting       Prefetch data about to stop for halt. Valid if AckBufferValid.
87477 + * [24]                PacketTimeout           Packet timeout. Sent an EopError. Valid if AckBufferValid set.
87478 + * [23:22]     PacketAckValue          Packet ack type. Valid if AckBufferValid set.
87479 + * [21:20]     FaultUnitNo             Set if the dma prefetcher has faulted.
87480 + * [19:17]     TrapType                Packet assembler's trap type.
87481 + * [16]                PrefetcherFault         Set if the dma prefetcher has faulted for this DMA unit.
87482 + * [15]                Remote                  The Dma had been issued remotly
87483 + * [14]                Priority                Running at high priority.
87484 + * [13:0]      Context                 procs current context.
87485 + */
87486 +
87487 +#define DPROC_FirstSendTrans(s)                ((unsigned)((s) >> 48) & 1)
87488 +#define DPROC_TimeSliceCount(s)                ((unsigned)(((s) >> 46) & 3)
87489 +#define DPROC_DmaLastPacket(s)         ((unsigned)((s) >> 45) & 1)
87490 +#define DPROC_CurrPrefetchDma(s)       ((unsigned)((s) >> 44) & 1)
87491 +#define DPROC_PrefetcerState(s)                ((unsigned)((s) >> 39) & 0x1f)
87492 +#define DPROC_PacketAssemblerState(s)  ((unsigned)((s) >> 33) & 0x1f)
87493 +#define DPROC_PrefetcherWakeupFn(s)    ((unsigned)((s) >> 31) & 3)
87494 +#define DPROC_PacketAssemblerWakeupFn(s)((unsigned)((s) >> 28) & 3)
87495 +#define DPROC_AckBufferValid(s)                ((unsigned)((s) >> 27) & 1)
87496 +#define DPROC_PrefetcherDataProblem(s) ((unsigned)((s) >> 26) & 1)
87497 +#define DPROC_PrefetcherHalting(s)     ((unsigned)((s) >> 25) & 1)
87498 +#define DPROC_PacketTimeout(s)         ((unsigned)((s) >> 24) & 1)
87499 +#define DPROC_PacketAckValue(s)                ((unsigned)((s) >> 22) & 3)
87500 +#define DPROC_FaultUnitNo(s)           ((unsigned)((s) >> 20) & 3)
87501 +#define DPROC_TrapType(s)              ((unsigned)((s) >> 17) & 7)
87502 +#define DPROC_PrefetcherFault(s)       ((unsigned)((s) >> 16) & 1)
87503 +#define DPROC_Remote(s)                        ((unsigned)((s) >> 15) & 1)
87504 +#define DPROC_Priority(s)              ((unsigned)((s) >> 14) & 1)
87505 +#define DPROC_Context(s)               ((unsigned)(s) & 0x3fff)
87506 +
87507 +/*
87508 + * Command processor status register.
87509 + *
87510 + * [26:21]     CPState         procs current state.
87511 + * [20]                WakeupFnt       procs wakeup function.
87512 + * [19:16]     TrapValue       procs trap value.
87513 + * [15]                Remote          Issued remotely.
87514 + * [14]                Priority        Running at high priority.
87515 + * [13:0]      Context         procs current context.
87516 + */
87517 +
87518 +#define CPROC_TrapType(s)              ((unsigned)((s) >> 16) & 0xf)
87519 +#define CPROC_Remote(s)                        ((unsigned)((s) >> 15) & 0x1)
87520 +#define CPROC_Priority(s)              ((unsigned)((s) >> 14) & 0x1)
87521 +#define CPROC_Context(s)               ((unsigned)(s) & 0x3fff)
87522 +
87523 +/*
87524 + * Event processor status register.
87525 + *
87526 + * [34:30]     CPState         event procs current state.
87527 + * [29:28]     WakeupFnt       event procs wakeup function.
87528 + * [27:20]     EventCopySize   This is the number of DWords to still be copied on a copy dword event.
87529 + * [19]                EProcPort1Fault CUN_EventProc1 has taken a translation fault.
87530 + * [18]                EProcPort0Fault CUN_EventProc0 has taken a translation fault.
87531 + * [17:16]     TrapValue       event proc's trap value.
87532 + * [15]                Remote          Issued remotely.
87533 + * [14]                Priority        Running at high priority.
87534 + * [13:0]      Context         procs current context.
87535 + */
87536 +
87537 +#define EPROC_CPState(s)               ((unsigned)((s) >> 30) & 0x1f)
87538 +#define EPROC_WakeupFunction(s)                ((unsigned)((s) >> 28) & 3)
87539 +#define EPROC_CopySize(s)              ((unsigned)((s) >> 20) & 0xFF)
87540 +#define EPROC_Port1Fault(s)            ((unsigned)((s) >> 19) & 1)
87541 +#define EPROC_Port0Fault(s)            ((unsigned)((s) >> 18) & 1)
87542 +#define EPROC_TrapType(s)              ((unsigned)((s) >> 16) & 3)
87543 +#define EPROC_Remote(s)                        ((unsigned)((s) >> 15) & 1)
87544 +#define EPROC_Priority(s)              ((unsigned)((s) >> 14) & 1)
87545 +#define EPROC_Context(s)               ((unsigned)(s) & 0x3fff)
87546 +
87547 +/*
87548 + * Thread processor status register.
87549 + *
87550 + * [39:24]     MemPortBusy             16 bits of port busy flags for all FFU memory ports.
87551 + * [23:21]     Reads as zero
87552 + * [20:18]     TQState                 State vector for thread queuing proc.
87553 + * [17]                HighRunQueueFull        High priority run queue is full
87554 + * [16]                LowRunQueueFull         Low priority run queue is full
87555 + * [15]                ReadyHigh               More runable threads at high priority
87556 + * [14]                ReadyLow                More runable threads at low priority
87557 + * [13:0]      Context                 procs current context.
87558 + */
87559 +#define TPROC_HighRunQueueFull(s)      ((unsigned)((s) >> 17) & 1)
87560 +#define TPROC_LowRunQueueFull(s)       ((unsigned)((s) >> 16) & 1)
87561 +#define TPROC_ReadyHigh(s)             ((unsigned)((s) >> 15) & 1)
87562 +#define TPROC_ReadyLow(s)              ((unsigned)((s) >> 14) & 1)
87563 +#define TPROC_Context(s)               ((unsigned)((s) & 0x3fff))
87564 +
87565 +/*
87566 + * Input processor status register
87567 + *
87568 + * [55]                Last Trans (~EOP)
87569 + * [54]                First Trans (~EOP)
87570 + * [53]                Channel (~EOP) 
87571 + * [52]                Bad Length (~EOP)
87572 + * [51:50]     Trans CRC Status (~EOP)
87573 + * [49:48]     EOP type
87574 + * [47]                EOP trap
87575 + * [46]                Trapping priority
87576 + * [45]                Trapping Channel
87577 + * [44:43]     Bad ack sent
87578 + * [42:41]     Good ack sent
87579 + * [40]                Queueing Packet (~EOP)
87580 + * [39:36]     Channel trapped bits
87581 + * [35:32]     IProc Trap Value
87582 + * [31:16]     Network Context (~EOP)
87583 + * [15:0]      Transaction Type (~EOP)
87584 + */
87585 +#define IPROC_LastTrans(s)             ((unsigned)((s) >> 55) & 0x1)
87586 +#define IPROC_FirstTrans(s)            ((unsigned)((s) >> 54) & 0x1)
87587 +#define IPROC_Channel(s)               ((unsigned)((s) >> 53) & 0x1)
87588 +#define IPROC_BadLength(s)             ((unsigned)((s) >> 52) & 0x1)
87589 +#define IPROC_TransCRCStatus(s)                ((unsigned)((s) >> 50) & 0x3)
87590 +#define IPROC_EOPType(s)               ((unsigned)((s) >> 48) & 0x3)
87591 +#define IPROC_EOPTrap(s)               ((unsigned)((s) >> 47) & 0x1)
87592 +#define IPROC_InputterPri(s)           ((unsigned)((s) >> 46) & 0x1)
87593 +#define IPROC_InputterChan(s)          ((unsigned)((s) >> 45) & 0x1)
87594 +#define IPROC_BadAckSent(s)            ((unsigned)((s) >> 43) & 0x3)
87595 +#define IPROC_GoodAckSent(s)           ((unsigned)((s) >> 41) & 0x3)
87596 +#define IPROC_QueueingPacket(s)                ((unsigned)((s) >> 40) & 0x1)
87597 +#define IPROC_ChannelTrapped(s)                ((unsigned)((s) >> 36) & 0xF)
87598 +#define IPROC_TrapValue(s)             ((unsigned)((s) >> 32) & 0xF)
87599 +#define IPROC_NetworkContext(s)                ((unsigned)((s) >> 16) & 0xFFFF)
87600 +#define IPROC_TransactionType(s)       ((unsigned)(s) & 0xFFFF)
87601 +
87602 +/* values for IPROC_TransCRCStatus */
87603 +#define CRC_STATUS_GOOD    (0)
87604 +#define CRC_STATUS_DISCARD (1)
87605 +#define CRC_STATUS_ERROR   (2)
87606 +#define CRC_STATUS_BAD     (3)
87607 +
87608 +/* values for IPROC_EOPType */
87609 +#define EOP_GOOD          (1)
87610 +#define EOP_BADACK        (2)
87611 +#define EOP_ERROR_RESET           (3)
87612 +
87613 +/*
87614 + * Interrupt register bits
87615 + *
87616 + * There are up to four sources of interrupt for the MSI port.
87617 + * The Elan will request 4 ports but may only get either 2 or 1 port. The Interrupts are assigned
87618 + * as shown below:
87619 + * No Of MSI ints      Low Prioity                                                     High Prioity
87620 + *     4               Event Ints      OtherInts               Inputer Ints            Hard Error ints.
87621 + *                i.e.                 Dproc, Tproc, Sten.     HighPri and LowPri      Link errs, ECC errs,
87622 + *
87623 + *     2               Event Ints      All other interrupts.
87624 + *     1               All together.
87625 + * 
87626 + * It is not safe to change the number of sources of interrupt while there may be outstanding,
87627 + * unserviced interrupts pending.
87628 + * There two forms of encoding. This has been provided in case an MSI implimentation assumes either
87629 + * a high value to have a high priority or a low value to have a high priority. This is controled
87630 + * by a bit in the Elan Pci Control register.
87631 + */
87632 +#define INT_LinkPortKeyFail            (1<<18)
87633 +#define INT_PciMemErr                  (1<<17)
87634 +#define INT_SDRamInt                   (1<<16)
87635 +#define INT_LinkError                  (1<<15)
87636 +#define INT_IProcCh1HighPri            (1<<14)
87637 +#define INT_IProcCh0HighPri            (1<<13)
87638 +#define INT_IProcCh1LowPri             (1<<12)
87639 +#define INT_IProcCh0LowPri             (1<<11)
87640 +#define INT_DiscardingHighPri          (1<<10)
87641 +#define INT_DiscardingLowPri           (1<<9)
87642 +#define INT_CProcHalted                        (1<<8)
87643 +#define INT_TProcHalted                        (1<<7)
87644 +#define INT_DProcHalted                        (1<<6)
87645 +#define INT_EProc                      (1<<5)
87646 +#define INT_TProc                      (1<<4)
87647 +#define INT_CProc                      (1<<3)
87648 +#define INT_Dma1Proc                   (1<<2)
87649 +#define INT_Dma0Proc                   (1<<1)
87650 +#define INT_MainInterrupt              (1<<0)
87651 +
87652 +#define INT_Units              (INT_EProc | INT_TProc | INT_CProc | INT_Dma1Proc | INT_Dma0Proc)
87653 +#define INT_Inputters          (INT_IProcCh1HighPri | INT_IProcCh0HighPri | INT_IProcCh1LowPri | INT_IProcCh0LowPri)
87654 +#define INT_Discarding         (INT_DiscardingHighPri | INT_DiscardingLowPri)
87655 +#define INT_Halted             (INT_CProcHalted | INT_TProcHalted | INT_DProcHalted)
87656 +#define INT_ErrorInterrupts    (INT_LinkPortKeyFail | INT_PciMemErr | INT_SDRamInt | INT_LinkError)
87657 +
87658 +#define INT_MSI0               INT_MainInterrupt
87659 +#define INT_MSI1               (INT_Units | INT_Discarding | INT_Halted)
87660 +#define INT_MSI2               (INT_Inputters)
87661 +#define INT_MSI3               (INT_ErrorInterrupts)
87662 +
87663 +#define E4_INTERRUPT_REG_SHIFT 32
87664 +#define E4_INTERRUPT_MASK_MASK (0xffffffffULL)
87665 +
87666 +/*
87667 + * Trap type values - see trapvalues.v
87668 + */
87669 +
87670 +#define CommandProcInserterError               0x1
87671 +#define CommandProcPermissionTrap              0x2
87672 +#define CommandProcSendTransInvalid            0x3
87673 +#define CommandProcSendTransExpected           0x4
87674 +#define CommandProcDmaQueueOverflow            0x5
87675 +#define CommandProcInterruptQueueOverflow      0x6
87676 +#define CommandProcMemoryFault                 0x7
87677 +#define CommandProcRouteFetchFault             0x8
87678 +#define CommandProcFailCountZero               0x9
87679 +#define CommandProcAddressAlignment            0xa
87680 +#define CommandProcWaitTrap                    0xb
87681 +#define CommandProcMultipleGuards              0xc
87682 +#define CommandProcOpenOnGuardedChan           0xd
87683 +#define CommandProcThreadQueueOverflow         0xe
87684 +#define CommandProcBadData                      0xf
87685 +
87686 +#define DmaProcNoFault                         0x0
87687 +#define DmaProcRouteFetchFault                 0x1
87688 +#define DmaProcFailCountError                  0x2
87689 +#define DmaProcPacketAckError                  0x3
87690 +#define DmaProcRunQueueReadFault               0x4
87691 +#define DmaProcQueueOverflow                   0x5
87692 +
87693 +#define EventProcNoFault                       0x0
87694 +#define EventProcAddressAlignment              0x1
87695 +#define EventProcMemoryFault                   0x2
87696 +#define EventProcCountWrapError                        0x3
87697 +
87698 +#define InputNoFault                           0x0
87699 +#define InputAddressAlignment                  0x1
87700 +#define InputMemoryFault                       0x2
87701 +#define InputInvalidTransType                  0x3
87702 +#define InputDmaQueueOverflow                  0x4
87703 +#define InputEventEngineTrapped                        0x5
87704 +#define InputCrcErrorAfterPAckOk               0x6
87705 +#define InputEopErrorOnWaitForEop              0x7
87706 +#define InputEopErrorTrap                      0x8
87707 +#define InputDiscardAfterAckOk                 0x9
87708
87709 +typedef struct _E4_Sched_Status
87710 +{
87711 +    E4_uint32 Status;
87712 +    E4_uint32 Restart;
87713 +} E4_Sched_Status;
87714
87715 +typedef struct _E4_Input_Ptrs
87716 +{
87717 +    E4_uint32 ContextFilterTable;
87718 +    E4_uint32 TrapBasePtr;
87719 +} E4_Input_Ptrs;
87720 +
87721 +#define SCH_StopLowPriQueues           (1 << 0)
87722 +#define SCH_DProcHalt                  (1 << 1)
87723 +#define SCH_TProcHalt                  (1 << 2)
87724 +#define SCH_CProcHalt                  (1 << 3)
87725 +
87726 +#define SCH_CProcTimeout600ns          (1 << 4)
87727 +#define SCH_CProcTimeout1p4us          (2 << 4)
87728 +#define SCH_CProcTimeout3p0us          (3 << 4)
87729 +#define SCH_CProcTimeout6p2us          (4 << 4)
87730 +#define SCH_CProcTimeout12p6us         (5 << 4)
87731 +#define SCH_CProcTimeout25p4us         (6 << 4)
87732 +#define SCH_CProcTimeout51p0us         (7 << 4)
87733 +#define SCH_DiscardLowPriInput         (1 << 7)
87734 +#define SCH_DiscardHighPriInput                (1 << 8)
87735 +
87736 +#define SCH_DProcTimeslice64us         (0 << 9)
87737 +#define SCH_DProcTimeslice128us                (1 << 9)
87738 +#define SCH_DProcTimeslice256us                (2 << 9)
87739 +#define SCH_DProcTimeslice512us                (3 << 9)
87740 +
87741 +#define SCH_Halt                       (SCH_StopLowPriQueues | SCH_DProcHalt | SCH_TProcHalt | SCH_CProcHalt)
87742 +#define SCH_Discard                    (SCH_DiscardLowPriInput | SCH_DiscardHighPriInput)
87743 +
87744 +#define SCH_RestartCProc               (1 << 0)
87745 +#define SCH_RestartTProc               (1 << 1)
87746 +#define SCH_RestartEProc               (1 << 2)
87747 +#define SCH_RestartDma0Proc            (1 << 3)
87748 +#define SCH_RestartDma1Proc            (1 << 4)
87749 +#define SCH_RestartDmaPrefetchProc     (1 << 5)
87750 +#define SCH_RestartCh0LowPriInput      (1 << 6)
87751 +#define SCH_RestartCh1LowPriInput      (1 << 7)
87752 +#define SCH_RestartCh0HighPriInput     (1 << 8)
87753 +#define SCH_RestartCh1HighPriInput     (1 << 9)
87754 +#define SCH_ClearLinkErrorInt          (1 << 10)
87755 +#define SCH_ContextFilterFlush         (1 << 11)
87756 +
87757 +/*
87758 + * Link state bits.
87759 + */
87760 +#define LS_LinkNotReady                (1 << 0) /* Link is in reset or recovering from an error */
87761 +#define LS_Locked              (1 << 1) /* Linkinput PLL is locked */
87762 +#define LS_LockError           (1 << 2) /* Linkinput PLL was unable to lock onto the input clock. */
87763 +#define LS_DeskewError         (1 << 3) /* Linkinput was unable to Deskew all the inputs. (Broken wire?) */
87764 +#define LS_PhaseError          (1 << 4) /* Linkinput Phase alignment error. */
87765 +#define LS_DataError           (1 << 5) /* Received value was neither good data or a token. */
87766 +#define LS_FifoOvFlow0         (1 << 6) /* Channel 0 input fifo overflowed. */
87767 +#define LS_FifoOvFlow1         (1 << 7) /* Channel 1 input fifo overflowed. */
87768 +#define LS_Mod45Changed                (1 << 8) /* Mod45 bit has changed. Error setr to force reset. */
87769 +#define LS_PAckNotSeenError    (1 << 9) /* PAck value not returned for this packet. */
87770 +
87771 +/*
87772 + * Link State Constant defines, used for writing to LinkSetValue
87773 + */
87774 +
87775 +#define LRS_DataDel0           0x0
87776 +#define LRS_DataDel1           0x1
87777 +#define LRS_DataDel2           0x2
87778 +#define LRS_DataDel3           0x3
87779 +#define LRS_DataDel4           0x4
87780 +#define LRS_DataDel5           0x5
87781 +#define LRS_DataDel6           0x6
87782 +#define LRS_DataDel7           0x7
87783 +#define LRS_DataDel8           0x8
87784 +#define LRS_LinkInValue                0x9
87785 +#define LRS_PllDelValue                0xA
87786 +#define LRS_ClockEven          0xB
87787 +#define LRS_ErrorVal8to0       0xC
87788 +#define LRS_ErrorVal17to9      0xD
87789 +#define LRS_ErrorVal26to18     0xE
87790 +#define LRS_ErrorVal35to27     0xF
87791 +#define LRS_NumLinkDels         0x10
87792 +
87793 +#define LRS_Pllfast             0x40
87794 +
87795 +typedef struct _E4_CommandControl
87796 +{
87797 +    volatile E4_uint32 CommandQueueDescsBase;
87798 +    volatile E4_uint32 CommandRequeuePtr;
87799 +} E4_CommandControl;
87800 +
87801 +#define E4_CommandRequeueBusy          0x80000000      /* Test against read value of CommandRequeuePtr */
87802 +#define E4_CommandRequeueHighPri       0x1             /* Will requeue onto the high pri queue */
87803 +#define E4_QueueDescPtrMask            0x7fffffe0
87804 +
87805 +typedef struct _E4_CommandQueueDesc
87806 +{
87807 +    E4_uint64 CQ_QueuePtrs;
87808 +    E4_uint64 CQ_HoldingValue;         /* 32 bit value for 32 bit accesses or OutOfOrderMask*/
87809 +    E4_uint64 CQ_AckBuffers;           /* Space for 32 4 bit ack buffer values. */
87810 +    E4_uint64 CQ_Control;
87811 +} E4_CommandQueueDesc;
87812 +
87813 +/*
87814 + * Rev A - CQ_QueuePtrs
87815 + * [63]                Unused          Should be set to zero.
87816 + * [62:51]     Unused          (reads as top of InsertPtr)
87817 + * [50:35]     CompletedPtr    Completed pointer. This is alligned to a byte address.
87818 + * [34]                Trapped         Will be set if the command has trapped.
87819 + * [33:32]     Size            Size of queue.
87820 + * [31]                Used            Will be set if the descriptor has been changed and written back by the elan.
87821 + * [30:3]      InsertPtr       Insert pointer. This is alligned to a byte address.
87822 + * [2]         TimedOut        Will be set if the queue timedout executing a command.
87823 + * [1]         Priority        When set the queue runs at high priority.
87824 + * [0]         Error           If this becomes set all new data written to the queue is * discarded.
87825 + *
87826 + * Rev B - CQ_QueuePtrs
87827 + * [63]                TimedOut        Will be set if the queue timedout executing a command.
87828 + * [62]                Priority        When set the queue runs at high priority.
87829 + * [61]                QueueType       1=will accept unordered 64 bit PCI writes. 0=will accept ordered 32 or 64 bit PCI writes.
87830 + * [60:51]     Unused          (reads as top of InsertPtr)
87831 + * [50:35]     CompletedPtr    Completed pointer. This is alligned to a byte address.
87832 + * [34]                Trapped         Will be set if the command has trapped.
87833 + * [33:32]     Size            Size of queue.
87834 + * [31]                Used            Will be set if the descriptor has been changed and written back by the elan.
87835 + * [30:3]      InsertPtr       Insert pointer. This is alligned to a byte address.
87836 + * [2]         OrderControl    Holds bit 8 of last PCI accesses. Used by a reordering queue.
87837 + * [1:0]       ErrorType       This field has the current error status of the queue.
87838 + */
87839 +
87840 +/* Common between revA and RevB */
87841 +#define CQ_PtrMask             (0x7ffffff8)                    /* 31 bit sdram address */
87842 +#define CQ_PtrOffsetMask       (0x7fff8)
87843 +#define CQ_PtrBaseMask         (0x7ff80000)
87844 +
87845 +#define CQ_InsertPtrShift      (3 - 3)                         /* InsertPtr is 64 bit aligned */
87846 +#define CQ_SizeShift           (32)
87847 +#  define CQ_Size1K            0
87848 +#  define CQ_Size8K            1
87849 +#  define CQ_Size64K           2
87850 +#  define CQ_Size512K          3
87851 +#  define CQ_SizeMask          3
87852 +
87853 +#define CQ_CompletedPtrShift   (35 - 3)                        /* CompletedPtr is 64 but aligned */
87854 +
87855 +#define CQ_Used                        (1ull << 31)
87856 +#define CQ_Trapped             (1ull << 34)
87857 +
87858 +#define CQ_QueuePtrsValue(Size,Inserter,Completer) \
87859 +       (((E4_uint64) (Size) << CQ_SizeShift) | \
87860 +        ((E4_uint64) (Inserter) << CQ_InsertPtrShift) | \
87861 +        ((E4_uint64) (Completer) << CQ_CompletedPtrShift))
87862 +
87863 +#define CQ_InsertPtr(QueuePtrs) \
87864 +       (((E4_uint64) QueuePtrs) & CQ_PtrMask)
87865 +
87866 +#define CQ_CompletedPtr(QueuePtrs) \
87867 +       (((E4_uint32)((QueuePtrs) >> CQ_CompletedPtrShift) & CQ_PtrOffsetMask) | \
87868 +        (CQ_InsertPtr(QueuePtrs) & CQ_PtrBaseMask))
87869 +
87870 +#define CQ_Size(SizeVal)               (1024 * (1 << ((SizeVal)*3)))
87871 +
87872 +/* Rev A specific */
87873 +#define CQ_RevA_Error                  (1 << 0)
87874 +#define CQ_RevA_Priority               (1 << 1)
87875 +#define CQ_RevA_TimedOut               (1 << 2)
87876 +
87877 +/* Rev B specific */
87878 +#define CQ_RevB_ErrorType(QueuePtr)    ((QueuePtr) & (3 << 0))
87879 +#  define CQ_RevB_NoError              (0ull << 0)
87880 +#  define CQ_RevB_Overflowed           (1ull << 0)
87881 +#  define CQ_RevB_InvalidWriteSize     (2ull << 0)
87882 +#  define CQ_RevB_InvalidWriteOrder    (3ull << 0)
87883 +#define CQ_RevB_OrderControl           (1ull << 2)
87884 +
87885 +#define CQ_RevB_QueueType(QueuePtr)    ((QueuePtr) & (1ull << 61))
87886 +#  define CQ_RevB_ReorderingQueue      (1ull << 61)
87887 +#  define CQ_RevB_32bitWriteQueue      (0ull << 61)
87888 +
87889 +#define CQ_RevB_Priority               (1ull << 62)
87890 +#define CQ_RevB_TimedOut               (1ull << 62)
87891 +
87892 +/* 
87893 + * CQ_AckBuffers - Packet Ack Values
87894 + */
87895 +#define PackOk                 (0x0)
87896 +#define PackTestFail           (0x1)
87897 +#define PackDiscard            (0x2)
87898 +#define PackError              (0x7)
87899 +#define PackTimeout            (0x8)
87900 +#define PackWaiting            (0xF)
87901 +#define PackValue(val,chan)    (((val) >> ((chan) * 4)) & 0xf)
87902 +
87903 +/*
87904 + * CQ_Control
87905 + * [63:35]     ExtractPtr
87906 + * [34]                Unused
87907 + * [33:32]     ChannelNotCompleted
87908 + * [31:24]     Permissions
87909 + * [23:16]     RestartCount            Decremented after each restart. Will trap when zero
87910 + * [15:14]     Unused                  Should be set to zero
87911 + * [13:0]      Context
87912 + */
87913 +#define CQ_Context(Control)            ((E4_uint32) ((Control) >>  0) & 0x3fff)
87914 +#define CQ_RestartCount(Control)       ((E4_uint32) ((Control) >> 16) & 0x7f)
87915 +#define CQ_ChannelNotCompleted(Control)        ((E4_uint32) ((Control) >> 32) & 3)
87916 +#define CQ_ExtractPtr(Control)         ((E4_uint32) ((Control) >> 32) & 0xFFFFFFF8)
87917 +
87918 +#define CQ_RestartCountShift           16
87919 +
87920 +#define CQ_SetEventEnableBit   (1 << 24)
87921 +#define CQ_WaitEventEnableBit  (1 << 25)
87922 +#define CQ_ModifyEnableBit     (1 << 26)
87923 +#define CQ_WriteEnableBit      (1 << 27)
87924 +#define CQ_ThreadStartEnableBit        (1 << 28)
87925 +#define CQ_DmaStartEnableBit   (1 << 29)
87926 +#define CQ_STENEnableBit       (1 << 30)
87927 +#define CQ_InterruptEnableBit  (1 << 31)
87928 +#define CQ_EnableAllBits        (0xFF000000)
87929 +#define CQ_PermissionMask      (0xFF000000)
87930 +
87931 +#define CQ_ControlValue(Cntx, RestartCount, Permissions) \
87932 +       (((Cntx) & 0x3fff) | (((RestartCount) & 0xff) << 16) | ((Permissions) & CQ_PermissionMask))
87933 +
87934 +/*
87935 + * This file describes the slave address map of Elan4.
87936 + *
87937 + * Elan4 has two PCI 64 bit base address registers. One is setup for elan
87938 + * local memory and the other is for the command port, elan registers and ebus.
87939 + *
87940 + * This file describes the command port, elan registers and ebus BAR. This is a
87941 + * 26 bit base address register and is split up as follows:
87942 + * 1 The ebus requires 21 bits of address. 26'h3e00000 to 26'h3ffffff
87943 + * 2 The control regsiters requires 16 bits of address. 26'h3df0000 to 26'h3dfffff
87944 + * 3 The command port has the rest. This give just under 8k command ports or about 123 per
87945 + *   processor of a 64 node SMP.
87946 + */
87947 +
87948 +/* BAR1 contains the command queues followed by the registers and the Ebus - and is 26 bits */
87949 +/* each command queue has an 8K page associated with it */
87950 +#define CQ_CommandMappingSize          (1 << 13)
87951 +#define CQ_NumCommandDescs             ((1 << (26 - 13)))
87952 +#define CQ_CommandDescsAlignment       ((1 << (26 - 13)) * sizeof (E4_CommandQueueDesc))
87953 +
87954 +/* control reg bits i.e. E4_DataBusMap.SysControlReg */
87955 +#define CONT_EN_ALL_SETS               (1ULL << 0) /* enable cache */
87956 +#define CONT_MMU_ENABLE                        (1ULL << 1) /* bit 0 enables mmu */
87957 +#define CONT_CACHE_HASH_TABLE          (1ULL << 2) /* cache up hash table entries */
87958 +#define CONT_CACHE_CHAINS              (1ULL << 3) /* cache up chain entries */
87959 +#define CONT_CACHE_ROOT_CNTX           (1ULL << 4) /* cache root context table for routes and filters. */
87960 +#define CONT_CACHE_STEN_ROUTES         (1ULL << 5) /* cache up sten packet routes */
87961 +#define CONT_CACHE_DMA_ROUTES          (1ULL << 6) /* cache up dma packet routes */
87962 +
87963 +#define CONT_CACHE_NONE                0ULL
87964 +#define CONT_CACHE_ALL         (CONT_CACHE_HASH_TABLE | CONT_CACHE_CHAINS | CONT_CACHE_ROOT_CNTX | \
87965 +                                CONT_CACHE_STEN_ROUTES | CONT_CACHE_DMA_ROUTES)
87966 +
87967 +/* This controls the format size and position of the MMU hash tables. */
87968 +#define CONT_INHIBIT_MAX_CHAIN_ITEMS   (1ULL << 7)     /* Prevents the MaxChainItems value of 1024 from forcing a translation miss */
87969 +#define CONT_TABLE0_MASK_SIZE_SHIFT    8               /* Defines the size of hash table 0 */
87970 +#define CONT_TABLE0_PAGE_SIZE_SHIFT    13              /* Set the page size for hash table 0 */
87971 +#define CONT_TABLE1_MASK_SIZE_SHIFT    16              /* Defines the size of hash table 1 */
87972 +#define CONT_TABLE1_PAGE_SIZE_SHIFT    21              /* Set the page size for hash table 1 */
87973 +#define CONT_TWO_HASH_TABLES           (1ULL << 24)    /* Sets the MMU to use two hash tables. If not set only 0 used. */
87974 +#define CONT_2K_NOT_1K_DMA_PACKETS     (1ULL << 25)    /* Used to select the default DMA packet size. */
87975 +#define CONT_ALIGN_ALL_DMA_PACKETS     (1ULL << 26)    /* Will force all dma packets to be aligned to a page.*/
87976 +#define CONT_DIRECT_MAP_PCI_WRITES     (1ULL << 27)    /* Will force pci writes to write and flush the dcache.*/
87977 +#define CONT_TLB_FLUSH                 (1ULL << 28)    /* Invalidates the TLB and indicates when flushed */
87978 +#define CONT_CLEAR_WALK_WROTE_TABLES   (1ULL << 29)    /* Used to guarantee that the elan is using new PTE values. */
87979 +#define CONT_ROUTE_FLUSH               (1ULL << 30)    /* Invalidates all route cache entries. */
87980 +#define CONT_CLEAR_LINKPORT_INT                (1ULL << 31)    /* Clears the Linkport key fail interrupt. Reads as 0. */
87981 +#define CONT_CLEAR_SDRAM_ERROR         (1ULL << 32)    /* Clears an EEC error interrupt. Reads as 0. */
87982 +
87983 +/*
87984 + * These are extra control bits used for testing the DLLs of the SDRAM interface. Most of the Sdram
87985 + * control bits are defined in xsdram.h
87986 + */
87987 +#define SDRAM_FIXED_DLL_DELAY_SHIFT    47
87988 +#define SDRAM_FIXED_DLL_DELAY_BITS     5
87989 +#define SDRAM_FIXED_DLL_DELAY_MASK     ((1ULL << SDRAM_FIXED_DLL_DELAY_BITS) - 1ULL)
87990 +#define SDRAM_FIXED_DLL_DELAY(Value)   ((SDRAM_FIXED_DLL_DELAY_MASK & (Value)) << SDRAM_FIXED_DLL_DELAY_SHIFT)
87991 +#define SDRAM_FIXED_DELAY_ENABLE       (1ULL << 52)
87992 +#define SDRAM_GET_DLL_DELAY(Value)     (((Value) >> SDRAM_FIXED_DLL_DELAY_SHIFT) & SDRAM_FIXED_DLL_DELAY_MASK)
87993 +
87994 +#define SDRAM_DLL_CORRECTION_FACTOR    3       /* This is to allow for SSO and ringing on the DQ lines */
87995 +
87996 +#define PAGE_SIZE_4K   0x0
87997 +#define PAGE_SIZE_8K   0x1
87998 +#define PAGE_SIZE_64K  0x2
87999 +#define PAGE_SIZE_512K 0x3
88000 +#define PAGE_SIZE_2M   0x4
88001 +#define PAGE_SIZE_4M   0x5
88002 +#define PAGE_SIZE_64M  0x6
88003 +#define PAGE_SIZE_512M 0x7
88004 +
88005 +#define PAGE_SIZE_MASK 0x7
88006 +#define PAGE_MASK_MASK 0x1f
88007 +
88008 +/* control reg bits i.e. E4_DataBusMap.LinkControlReg */
88009 +#define LCONT_REVA_GREEN_LED           (1 << 0)
88010 +#define LCONT_REVA_YELLOW_LED          (1 << 1)
88011 +#define LCONT_REVA_RED_LED             (1 << 2)
88012 +#define LCONT_REVA_ENABLE_LED_DRIVE    (1 << 3) /* Enable manual setting of the Leds to the bits set above. */
88013 +
88014 +#define LCONT_REVB_DISABLE_TLB_PREFETCH        (1 << 0)
88015 +#define LCONT_REVB_DISABLE_CRC_ERROR_CHECKING  (1 << 1)
88016 +
88017 +
88018 +#define LCONT_EN_SYS_WRITES            (1 << 4) /* Enable linkport writes to sys registers. i.e. all of E4_DataBusMap. */
88019 +#define LCONT_EN_SYS_READS             (1 << 5) /* Enable linkport reads from sys registers. i.e. all of E4_DataBusMap. */
88020 +#define LCONT_EN_USER_WRITES           (1 << 6) /* Enable linkport writes to user registers. i.e. all of E4_User_Regs. */
88021 +#define LCONT_EN_USER_READS            (1 << 7) /* Enable linkport reads from user registers. i.e. all of E4_User_Regs. */
88022 +
88023 +#define LCONT_TEST_VALUE_MASK          0x3ff    /* Value used for test writes and link boundary scan. */
88024 +#define LCONT_TEST_VALUE_SHIFT         8
88025 +#define LCONT_TEST_VALUE(Value)                ((LCONT_LINK_STATE_MASK & (Value)) << LCONT_TEST_VALUE_SHIFT)
88026 +
88027 +/*
88028 + * State read from LINK_STATE when TEST_VALUE is set to the following values.
88029 + * TEST_VALUE    LINK_STATE read       TEST_VALUE        LINK_STATE read
88030 + *    000     -   Data delay count 0      008       -  Data delay count 8
88031 + *    001     -   Data delay count 1      009       -  Link in value
88032 + *    002     -   Data delay count 2      00a       -  PLL delay
88033 + *    003     -   Data delay count 3      00b       -  Clock Delay
88034 + *    004     -   Data delay count 4      00c       ?  ErrorVal8to0
88035 + *    005     -   Data delay count 5      00d       ?  ErrorVal17to9
88036 + *    006     -   Data delay count 6      00e       ?  ErrorVal26to18
88037 + *    007     -   Data delay count 7      00f       ?  ErrorVal35to27
88038 + */
88039 +
88040 +#define LCONT_TEST_CONTROL_MASK                0x3     /* Selects and controls the action of the LINK_STATE value. */
88041 +#define LCONT_TEST_CONTROL_SHIFT       18
88042 +
88043 +#define LCONT_READ_ERRORS              0       /* {Mod45RequestChanged, FifoOverflowError, DataError, PhaseError,
88044 +                                                *      DeskewError, LockError, Locked, LinkNotReady} */
88045 +#define LCONT_READ_STATE               1       /* Read valus addressed by TEST_CONTROL value */
88046 +#define LCONT_FIX_LINK_DELAYS          2       /* Sets delays to TEST_CONTROL value */
88047 +#define LCONT_BOUNDARY_SCAN            3       /* Puts link into boundary scan. Outputs TEST_CONTROL value to link,
88048 +                                                * reads LINK_STATE from link. */ 
88049 +
88050 +#define LCONT_LINK_STATE_MASK          0x3ff   /* Read only */
88051 +#define LCONT_LINK_STATE_SHIFT         20      /* Read only */
88052 +#define LCONT_LINK_STATE(ControlRegValue)      (LCONT_LINK_STATE_MASK & ((ControlRegValue) >> LCONT_LINK_STATE_SHIFT))
88053 +
88054 +/* control reg bits i.e. E4_DataBusMap.LinkContSettings */
88055 +#define LCONT_MOD45_DISABLE            (1 << 0) /* is set the link will try to run in TNB mode. */
88056 +#define LCONT_CONFIG_PHASE_MASK                0x7     /* This set the delay through the phase alignment buffer. */
88057 +#define LCONT_CONFIG_PHASE_SHIFT       1
88058 +
88059 +#define LCONT_PLL_REF_VAL_BITS_MASK    0x7f    /* This is the divide value on the LinkIn clock to form the comms PLL */
88060 +#define LCONT_PLL_REF_VAL_BITS_SHIFT   4       /* reference clock. Div value is (n - 2). e.g. to Divide by 7 set to 5. */
88061 +
88062 +#define LCONT_FORCE_COMMSCLK_LOCAL     (1 << 11) /* This must be set at one end of a back to back Elan configuration. */
88063 +#define LCONT_LVDS_VOLTAGE_BITS_MASK   0x3     /* This is used to set the voltage swing on the LVDS link output pads. */
88064 +#define LCONT_LVDS_VOLTAGE_BITS_SHIFT  12      /* reference clock. Div value is (n - 2). e.g. to Divide by 7 set to 5. */
88065 +
88066 +#define LCONT_VOD_170                  0       /* Approximate differential voltage swing in mV of link outputs into */
88067 +#define LCONT_VOD_360                  1       /* a 100 ohm diferential load. */
88068 +#define LCONT_VOD_460                  2
88069 +#define LCONT_VOD_550                  3
88070 +
88071 +#define LCONT_LVDS_TERMINATION_MASK    0x3     /* This set the resistor values of the internal single ended termation */
88072 +#define LCONT_LVDS_TERMINATION_SHIFT   14      /* resistors of the link input and comms input clcok. */
88073 +
88074 +#define LCONT_TERM_55_OHM              0       /* Resistor values for internal termination of LVDS pads. */
88075 +#define LCONT_TERM_50_OHM              1
88076 +#define LCONT_TERM_AUTO_OHM            2       /* Should normally be set to auto. */
88077 +#define LCONT_TERM_45_OHM              3
88078 +
88079 +#define LCONT_LVDS_EN_TERM_UPDATE      (1 << 47) /* This should be asserted and deasserted if LCONT_LVDS_TERMINATION is changed. */
88080 +
88081 +/* Macros used to access and construct MMU hash table and chain entries. */
88082 +/*
88083 + * Each hash entry is made up of a 64 byte block. Each entry hash two tags where each
88084 + * tag has 4 PTE's. PTE's 0 to 2 use the bottom 48 bits of a 64 bit word and PTE 3
88085 + * uses the top 16 bits of 3 64 bit words.
88086 + *
88087 + * These macros can be used to build a single PTE. PTE3 needs to be built into a 48 bit
88088 + * object before they can be used.
88089 + */
88090 +#define PTE_ENTRY_MASK         0x0000ffffffffffffULL
88091 +#define PTE_TYPE_MASK          0x000000000000000fULL   
88092 +#define PTE_PERM_MASK          0x00000000000000f0ULL
88093 +#define PTE_PERM_TYPE_MASK     0x00000000000000ffULL
88094 +#define PTE_REF_MASK           0x0000000000000100ULL
88095 +#define PTE_PPN_MASK           0x00007ffffffffe00ULL
88096 +#define PTE_MOD_MASK           0x0000800000000000ULL
88097 +#define PTE_TOPADDR_MASK       0x0000600000000000ULL
88098 +
88099 +#define PTE_MOD_SHIFT          47
88100 +#define PTE_PPN_SHIFT          9
88101 +#define PTE_REF_SHIFT          8
88102 +#define PTE_PERM_SHIFT         4
88103 +#define PTE_TYPE_SHIFT         0
88104 +
88105 +#define PTE_PADDR_SHIFT                (12 - 9)                /* Physical addresses are shifted down 3 this to go into the PTE */
88106 +
88107 +
88108 +/* Values required for tag 3 */
88109 +#define PTE_REF_3                      0x0100000000000000ULL
88110 +#define PTE_MOD_3                      0x8000000000000000ULL
88111 +#define PTE_ENTRY_MASK_3               0xffff000000000000ULL
88112 +#define PTE_PERM_TYPE_MASK_3           0x00ff000000000000ULL
88113 +#define PTE_ENTRY_3_FOR_0(NewPte)      ((NewPte << (48)) & PTE_ENTRY_MASK_3)
88114 +#define PTE_ENTRY_3_FOR_1(NewPte)      ((NewPte << (32)) & PTE_ENTRY_MASK_3)
88115 +#define PTE_ENTRY_3_FOR_2(NewPte)      ((NewPte << (16)) & PTE_ENTRY_MASK_3)
88116 +
88117 +/* Values required for the tags */
88118 +#define TAG_CONTEXT_MASK               0x0000000000003fffULL
88119 +#define TAG_ADDRESS_MASK               0xfffffffff8000000ULL
88120 +#define TAG_CHAINPTR_18TO6_MASK                0x0000000007ffc000ULL
88121 +#define TAG_CHAINPTR_LOW_SHIFT         (14 - 6)
88122 +#define TAG_CHAINPTR_30TO19_MASK       0x0000000003ffc000ULL
88123 +#define TAG_CHAINPTR_HIGH_SHIFT                (19 - 14)
88124 +#define TAG_COPY_BIT                   0x0000000004000000ULL
88125 +
88126 +/*
88127 + * This takes number loaded into the control register and returns the page size as a power of two.
88128 + */
88129 +
88130 +#define E4_PAGE_SIZE_TABLE             E4_uint32 const PageSizeTable[] = {12, 13, 16, 19, 21, 22, 26, 29}
88131 +#define E4_PAGE_SIZE_TABLE_SIZE                (sizeof(PageSizeTable)/sizeof(PageSizeTable[0]))
88132 +
88133 +/*
88134 + * This macro generates a hash block index.
88135 + *
88136 + * Cntx                 This is the 14 bit context. It should not be larger than 14 bits.
88137 + * VAddr        This is the 64 bit virtual address. It does not require any masking and can be a byte address.
88138 + * PageSize     This is the value loaded into the control register for this hash table.
88139 + * HashTableMask This should be set mask out upper bits past the end of the hash table.
88140 + */
88141 +#define E4MMU_SHIFT_ADDR(VAddr, Shift) \
88142 +    ((((E4_uint32)(VAddr)) >> (Shift)) | (((E4_uint32)((VAddr) >> 32)) << (32 - (Shift))))
88143 +
88144 +#define E4MMU_CONTEXT_SCRAMBLE(Cntx) \
88145 +             ((((Cntx) << 8) | ((Cntx) >> 6)) ^ (((Cntx) << 15) | ((Cntx) << 1)))
88146 +
88147 +#define E4MMU_HASH_INDEX(Cntx, VAddr, PageShift, HashTableMask)                \
88148 +           ((E4MMU_SHIFT_ADDR(VAddr, (PageShift) + 2) ^ E4MMU_CONTEXT_SCRAMBLE(Cntx)) & (HashTableMask))
88149 +
88150 +#define E4MMU_TAG(vaddr,ctx)   (((vaddr) & TAG_ADDRESS_MASK) | ((ctx) & TAG_CONTEXT_MASK))
88151 +
88152 +#define E4MMU_TAG2VADDR(tag,hashidx,PageShift,HashTableMask)   \
88153 +               (((tag) & TAG_ADDRESS_MASK) | ((((hashidx) ^ E4MMU_CONTEXT_SCRAMBLE((tag) & TAG_CONTEXT_MASK)) & (HashTableMask)) << ((PageShift + 2))))
88154 +
88155 +/*
88156 + * Detailed bit descriptions for the tags and PTE's are better done with the macros
88157 + * defined above.
88158 + */
88159 +typedef struct _E4_HashTableEntry
88160 +{
88161 +   E4_uint64   Tag[2];
88162 +   E4_uint64   TagPTE[2][3];
88163 +} E4_HashTableEntry;
88164 +
88165 +#define E4MMU_TAG_OFFSET(tag)          ((tag) << 3)
88166 +#define E4MMU_PTE_LOW_OFFSET(tag,pte)  ((((tag)*3 + (pte) + 2) << 3))
88167 +#define E4MMU_PTE_HIGH_OFFSET(tag,pte) ((((tag)*3 + (pte) + 2) << 3) + 4)
88168 +#define E4MMU_PTE3_WORD0_OFFSET(tag)   ((((tag)*3 + 2) << 3) + 6)
88169 +#define E4MMU_PTE3_WORD1_OFFSET(tag)   ((((tag)*3 + 3) << 3) + 6)
88170 +#define E4MMU_PTE3_WORD2_OFFSET(tag)   ((((tag)*3 + 4) << 3) + 6)
88171 +
88172 +
88173 +/*
88174 + * Hash0AddrBits is the size of the hash table in bytes as a power of 2.
88175 + * e.g. 11 would give 32 hash entries where each entry is 64 bytes.
88176 + */
88177 +#define SETUP_HASH_TABLES(Hash0PageSize, Hash0AddrBits, Hash1PageSize, Hash1AddrBits)  \
88178 +                         (((Hash0PageSize) << CONT_TABLE0_PAGE_SIZE_SHIFT) |   \
88179 +                          ((Hash0AddrBits) << CONT_TABLE0_MASK_SIZE_SHIFT) |   \
88180 +                          ((Hash1PageSize) << CONT_TABLE1_PAGE_SIZE_SHIFT) |   \
88181 +                          ((Hash1AddrBits) << CONT_TABLE1_MASK_SIZE_SHIFT))
88182 +
88183 +/* ECC status register */
88184 +#define ECC_Addr(s)                    ((s) & 0x7ffffff8ULL)
88185 +#define ECC_Syndrome(s)                        (((s) >> 32) & 0xffffULL)
88186 +#define ECC_RisingDQSSyndrome(s)       (((s) >> 32) & 0xffULL)
88187 +#define ECC_FallingDQSSyndrome(s)      (((s) >> 40) & 0xffULL)
88188 +#define ECC_UncorrectableErr(s)        (((s) >> 48) & 1ULL)
88189 +#define ECC_MultUncorrectErrs(s)       (((s) >> 49) & 1ULL)
88190 +#define ECC_CorrectableErr(s)          (((s) >> 50) & 1ULL)
88191 +#define ECC_MultCorrectErrs(s)         (((s) >> 51) & 1ULL)
88192 +
88193 +/* Permission type saved in a PTE. This is a four bit field */
88194 +#define PERM_Disabled          0x0
88195 +#define PERM_Unused            0x1
88196 +#define PERM_LocDataRead       0x2
88197 +#define PERM_LocDataWrite      0x3
88198 +#define PERM_LocRead           0x4
88199 +#define PERM_LocExecute                0x5
88200 +#define PERM_ReadOnly          0x6
88201 +#define PERM_LocWrite          0x7
88202 +#define PERM_LocEventOnly      0x8
88203 +#define PERM_LocEventWrite     0x9
88204 +#define PERM_RemoteEvent       0xa
88205 +#define PERM_RemoteAll         0xb
88206 +#define PERM_RemoteReadOnly    0xc
88207 +#define PERM_RemoteWriteLocRead        0xd
88208 +#define PERM_DataReadWrite     0xe
88209 +#define PERM_NoFault           0xf
88210 +
88211 +#define PERM_Mask              0xf
88212 +
88213 +/* Permission type hints to device driver */
88214 +#define PERM_Preload           0x10
88215 +
88216 +#define PTE_SetPerm(Perm)      (((Perm) & PERM_Mask) << 4)
88217 +
88218 +/* Control info saved in the lookup field of the TLB */
88219 +#define PTE_PciNotLocal                (1ULL << 0)             /* Directs the access to the PCI interface */
88220 +#define PTE_BigEndian          (1ULL << 1)             /* Valid for PCI entries only */
88221 +#define PTE_RelaxedOrder       (1ULL << 2)             /* Valid for PCI entries only */
88222 +#define PTE_DontSnoop          (1ULL << 3)             /* Valid for PCI entries only */
88223 +
88224 +#define PTE_UseFixedSet                (1ULL << 1)             /* Value for non PCI entries only */
88225 +#define PTE_CommandQueue       (1ULL << 2)             /* Value for non PCI entries only */
88226 +#define PTE_SetFixedSetNo(Set) ((((Set) & 3) << 2) | PTE_UseFixedSet)
88227 +
88228 +#define PTE_TypeBitsMask       (0xfULL)
88229 +#define PTE_PermissionTypeMask (0xfULL << 4)
88230 +#define PTE_Referenced         (1ULL << 8)
88231 +#define PTE_PhysicalPageNoMask (0x7ffffffffe00ULL)
88232 +#define PTE_Modified           (1ULL << 47)
88233 +
88234 +#define PTE_PhysicalAddrShiftIntoPTE   (12 - 9)
88235 +
88236 +/* define page table entry bit fields */
88237 +#define TLB_PageSizeBits       (3 << 0)
88238 +#define TLB_ACCBits            (7 << 2)
88239 +#define TLB_LocalBit           (1 << 5)
88240 +#define TLB_PCI64BitTargetBit  (1 << 6)
88241 +#define TLB_PCIBigEndianBit    (1 << 7)
88242 +
88243 +#define TLB_ModifiedBit                (1 << 55)
88244 +#define TLB_ReferencedBit      (1 << 63)
88245 +
88246 +/* Used to read values from the tlb. */
88247 +#define TLB_TlbReadCntBitsSh   56
88248 +#define TLB_UseSelAddrSh       (1ULL << 60)
88249 +#define TLB_WriteTlbLine       (1ULL << 61)
88250 +
88251 +#define TLB_SEL_LINE(LineNo) (TLB_UseSelAddrSh | \
88252 +                             ((E4_uint64)((LineNo) & 0xf) << TLB_TlbReadCntBitsSh))
88253 +
88254 +#define TLB_NUM_ENTRIES                16
88255 +/*
88256 + * The following macros are used with the test access port (TlbLineValue) for the TLBs.
88257 + */
88258 +#define TLV_DoPciAccess                        (1ULL << 0)
88259 +#define TLV_CommandAccess              (1ULL << 1)
88260 +#define TLV_DoCacheAccess              (1ULL << 2)
88261 +#define TLV_notStartTLBWalk            (1ULL << 3)
88262 +#define TLV_UseFixedSet                        (1ULL << 4)
88263 +#define TLV_BigEndian                  (1ULL << 4)
88264 +#define TLV_RelaxedOrder               (1ULL << 5)
88265 +#define TLV_DontSnoop                  (1ULL << 6)
88266 +#define TLV_FixedSetNo_MASK            (3ULL << 5)
88267 +#define TLV_PciTypeBits_MASK           (7ULL << 4)
88268 +#define TLV_LookupBits_MASK            (0x7fULL)
88269 +#define TLV_MissErr                    (1ULL << 7)
88270 +#define TLV_TypeBits                   (0xffULL)
88271 +
88272 +#define TLV_PhysicalAddr_MASK          (0x3fffffffff000ULL)
88273 +
88274 +#define TLV_TlbTesting                 (1ULL << 51)
88275 +#define TLV_SelectUnitsTlbRead         (1ULL << 52)
88276 +#define TLV_SelectTProcTlbRead         (1ULL << 53)
88277 +
88278 +#define TLV_TlbLineSelect_MASK         (0xf)
88279 +#define TLV_UnitsTlbLineSelect_SHIFT   (54)
88280 +#define TLV_TProcTlbLineSelect_SHIFT   (59)
88281 +#define TLV_EnableUnitsTlbRead         (1ULL << 58)
88282 +#define TLV_EnableTProcTlbRead         (1ULL << 63)
88283 +
88284 +/*
88285 + * Use this macro to enable direct testing of the Units TLB.
88286 + * When Line is in the range 0 to 15 a TLB line is selected for reading or writing.
88287 + * When Line is set to -1 the tlb will be activated to perform a match.
88288 + */
88289 +#define TLV_UnitsTlbLineSel(Line) (((Line) == -1) ? 0ULL : \
88290 +    (TLV_EnableUnitsTlbRead | ((E4_uint64)((Line) & TLV_TlbLineSelect_MASK) << TLV_UnitsTlbLineSelect_SHIFT)))
88291 +#define TLV_TProcTlbLineSel(Line) (((Line) == -1) ? 0ULL : \
88292 +    (TLV_EnableTProcTlbRead | ((E4_uint64)((Line) & TLV_TlbLineSelect_MASK) << TLV_TProcTlbLineSelect_SHIFT)))
88293
88294 +/* 
88295 + * Thread_Trap_State
88296 + *  see f_RegFileControl.v TProcStatus
88297 + */
88298 +#define TS_HaltThread                (1 << 0)
88299 +#define TS_TrapForTooManyInstructions (1 << 1)
88300 +#define TS_InstAccessException       (1 << 2)
88301 +#define TS_Unimplemented             (1 << 3)
88302 +#define TS_DataAccessException       (1 << 4)
88303 +#define TS_DataAlignmentError        (1 << 5)
88304 +#define TS_TrapForUsingBadData       (1 << 6)
88305 +#define TS_TrapTypeMask                      (0x7f)
88306 +#define TS_DataPortNo(ts)            (((ts) >> 7) & 7)
88307 +#define TS_TrappedFlag               (1 << 10)
88308 +#define TS_MemLock                   (1 << 11)
88309 +#define TS_XCCshift                  12
88310 +#define TS_XCCmask                   0xff
88311 +#define TS_ICC(ts)                   (((ts) >> 12) & 15)
88312 +#define TS_XCC(ts)                   (((ts) >> 16) & 15)
88313 +#define TS_InstValid_F               (1 << 20)
88314 +#define TS_InstValid_R               (1 << 21)
88315 +#define TS_InstValid_E               (1 << 22)
88316 +#define TS_InstValid_W               (1 << 23)
88317 +#define TS_HighPriority                      (1 << 24)
88318 +#define TS_RemoteThread                      (1 << 25)
88319 +#define TS_TProcTranslationInProgress (1 << 26)
88320 +#define TS_MemLock_E                 (1 << 27)
88321 +
88322 +/* Thread run queue entries */
88323 +typedef struct E4_ThreadRegs
88324 +{
88325 +    E4_uint64 Registers[7];
88326 +} E4_ThreadRegs;
88327 +
88328 +typedef struct E4_TProcQueueEntry
88329 +{
88330 +    E4_ThreadRegs      Regs;                   /* XXXX: jon check this */
88331 +    E4_uint64          Context;                /* XXXX: jon check this */
88332 +} E4_TProcQueueEntry;
88333 +
88334 +typedef struct E4_DProcQueueEntry
88335 +{
88336 +    E4_DMA             Desc;
88337 +    E4_uint64          Pad;
88338 +} E4_DProcQueueEntry;
88339 +
88340 +/*
88341 + * Packet acknowledge values.
88342 + */
88343 +#define E4_PAckOk      0
88344 +#define E4_PAckTestFail        1
88345 +#define E4_PAckDiscard 2
88346 +#define E4_PAckError   3
88347 +
88348 +/*
88349 + * return values from breaktest instruction.
88350 + */
88351 +#define ICC_CARRY_BIT           (0x1ULL << 0)  /* Breaktest: Load pending         */
88352 +#define ICC_ZERO_BIT            (0x1ULL << 1)  /* Breaktest: Time to break        */
88353 +#define ICC_SIGNED_BIT          (0x1ULL << 2)  /* Breaktest: Another thread ready */
88354 +#define ICC_TPROC_RDY_LOW_PRI   (0x1ULL << 3)
88355 +#define ICC_TPROC_RDY_HIGH_PRI  (0x1ULL << 4)
88356 +#define ICC_RUNNING_HIGH_PRI    (0x1ULL << 5)
88357 +#define ICC_RUNNING_AS_REMOTE   (0x1ULL << 6)
88358 +#define ICC_TIME_TO_BREAK       (0x1ULL << 7)
88359 +#define ICC_RS1LOAD_PENDING     (0x1ULL << 8)
88360 +#define ICC_TPROC_HALT          (0x1ULL << 9)
88361 +
88362 +/*
88363 + * Main Interrupt cookies
88364 + * [63:14]     user cookie
88365 + * [13:0]      context
88366 + */
88367 +#define E4_MAIN_INT_SHIFT              14
88368 +#define E4_MAIN_INT_COOKIE(cookie)     ((cookie) >> E4_MAIN_INT_SHIFT)
88369 +#define E4_MAIN_INT_CTX(cookie)                ((cookie) & 0x3FFF)
88370 +
88371 +typedef E4_uint64 E4_MainIntEntry;
88372 +
88373 +#define E4_MainIntEntrySize    sizeof (E4_MainIntEntry)
88374 +
88375 +/*
88376 + * The internal databus is 64 bits wide.
88377 + * All writes to the internal registers MUST be made with 64 bit write operations.
88378 + * These can be made up of pairs 32 bit writes on the PCI bus. The writes will be
88379 + * treated as nops if they are performed with two separate 32 bit writes.
88380 + */
88381 +typedef volatile struct _E4_DataBusMap
88382 +{
88383 +   E4_uint64           InputTrans[4][16];                                                                      /* 0x000 */
88384 +
88385 +   E4_uint64           Dma0TransAddr;                                                                          /* 0x200 */
88386 +   E4_DMA              Dma0Desc;       /* Current Dma0 registers */                                            /* 0x208 */
88387 +
88388 +   E4_uint64           Dma1TransAddr;                                                                          /* 0x240 */
88389 +   E4_DMA              Dma1Desc;       /* Current Dma1 registers */                                            /* 0x248 */
88390 +  
88391 +   E4_uint64           Dma0LastPacketSize;                                                                     /* 0x280 */
88392 +   E4_uint64           Dma0ThisPacketSize;                                                                     /* 0x288 */
88393 +   E4_uint64           Dma0DescSizeInProg;                                                                     /* 0x290 */
88394 +   E4_uint64           Dma0BytesToPrefetch;                                                                    /* 0x298 */
88395 +   E4_uint64           Dma0PrefetchAddr;                                                                       /* 0x2a0 */
88396 +   E4_uint64           EventCountAndType;                                                                      /* 0x2a8 */
88397 +   E4_uint64           EventParameters[2];                                                                     /* 0x2b0 */
88398 +  
88399 +   E4_uint64           Dma1LastPacketSize;                                                                     /* 0x2c0 */
88400 +   E4_uint64           Dma1ThisPacketSize;                                                                     /* 0x2c8 */
88401 +   E4_uint64           Dma1DescSizeInProg;                                                                     /* 0x2d0 */
88402 +   E4_uint64           Dma1BytesToPrefetch;                                                                    /* 0x2d8 */
88403 +   E4_uint64           Dma1PrefetchAddr;                                                                       /* 0x2e0 */
88404 +   E4_Input_Ptrs       InputTrapAndFilter;                                                                     /* 0x2e8 */
88405 +   E4_uint64           EventAddress;                                                                           /* 0x2f0 */
88406 +   E4_QueuePtr         MainIntQueuePtrs;                                                                       /* 0x2f8 */
88407 +   
88408 +   E4_uint64           Event_Copy[16];                                                                         /* 0x300 */
88409 +
88410 +   E4_uint64           CommandCopy[7];                                                                         /* 0x380 */
88411 +   E4_uint64           CommandHold;                                                                            /* 0x3b8 */
88412 +
88413 +   E4_uint64           InputQueueDesc[4];                                                                      /* 0x3c0 */
88414 +
88415 +   /* Run queue Pointers */
88416 +   E4_uint64           DProcLowPriPtrs;                                                                        /* 0x3e0 */
88417 +   E4_uint64           DProcHighPriPtrs;                                                                       /* 0x3e8 */
88418 +   E4_uint64           TProcLowPriPtrs;                                                                        /* 0x3f0 */
88419 +   E4_uint64           TProcHighPriPtrs;                                                                       /* 0x3f8 */
88420 +
88421 +   E4_uint64           CProcStatus;                                                                            /* 0x400 */
88422 +   E4_uint64           TProcStatus;                                                                            /* 0x408 */
88423 +   E4_uint64           IProcStatus;                                                                            /* 0x410 */
88424 +   E4_uint64           EProcStatus;                                                                            /* 0x418 */
88425 +   E4_uint64           DProc0Status;                                                                           /* 0x420 */
88426 +   E4_uint64           DProc1Status;                                                                           /* 0x428 */
88427 +   E4_Sched_Status     SchedStatus;                                                                            /* 0x430 */
88428 +
88429 +   E4_uint64           LoadIProcCntxFilter;    /* Will load one of 4 cntx filter regs. Write only */           /* 0x438 */
88430 +
88431 +   E4_CommandControl   CommandControl;                                                                         /* 0x440 */
88432 +   E4_uint64           CommandCacheTestPort;                                                                   /* 0x448 */
88433 +   E4_uint64           CommandLowPriRunPtrs;                                                                   /* 0x450 */
88434 +   E4_uint64           CommandHighPriRunPtrs;                                                                  /* 0x458 */
88435 +   E4_uint64           CommandSchedDataPort[4];                                                                /* 0x460 */
88436 +
88437 +   E4_uint64           DmaRouteBuffer[2][2];   /* Write only. Should not be written to. */                     /* 0x480 */
88438 +   E4_uint64           StenRouteBuffer[2];     /* Write only. Should not be written to. */                     /* 0x4a0 */
88439 +   E4_uint64           pad4[0x098 - 0x096];                                                                    /* 0x4b0 */
88440 +
88441 +   E4_uint64           DmaAlignmentPort[8];    /* Write only. Should only be written to clear the prev reg. */ /* 0x4c0 */
88442 +
88443 +   E4_uint64           MmuBlockEntry[8];       /* Used for hash table and chain fetches */                     /* 0x500 */
88444 +   E4_uint64           WriteUnitsTlbLine[3];                                                                   /* 0x550 */
88445 +   E4_uint64           pad5;                                                                                   /* 0x540 */
88446 +   E4_uint64           WriteTProcTlbLine[3];                                                                   /* 0x568 */
88447 +   E4_uint64           pad6;                                                                                   /* 0x540 */
88448 +
88449 +   E4_uint64           MmuTableBasePtrs;       /* Both tables packed into a single 64 bit value */             /* 0x580 */
88450 +   E4_uint64           MmuFaultAndRootCntxPtr; /* Both packed into a single 64 bit value */                    /* 0x588 */
88451 +   E4_uint64           UnitsVAddr;                                                                             /* 0x590 */
88452 +   E4_uint64           TProcVAddr;                                                                             /* 0x598 */
88453 +   E4_uint64           UnitsCntx;                                                                              /* 0x5a0 */
88454 +   E4_uint64           TProcCntx;              /* Read only. Writes access VProcCacheWritePort */              /* 0x5a8 */
88455 +   E4_uint64           FaultAddrReg;                                                                           /* 0x5b0 */
88456 +   E4_uint64           FaultTypeAndContextReg;                                                                 /* 0x5b8 */
88457 +
88458 +   E4_uint32           SysControlReg;                                                                          /* 0x5c0 */
88459 +   E4_uint32           CacheTagValue;                                                                          /* 0x5c4 */
88460 +   E4_uint64           TlbLineValue;                                                                           /* 0x5c8 */
88461 +   E4_uint64           SDRamConfigReg;                                                                         /* 0x5d0 */
88462 +   E4_uint32           InterruptMask;                                                                          /* 0x5d8 */
88463 +   E4_uint32           InterruptReg;                                                                           /* 0x5dc */
88464 +   E4_uint64           SDRamECCStatus;                                                                         /* 0x5e0 */
88465 +   E4_uint32           LinkControlReg;                                                                         /* 0x5e8 */
88466 +   E4_uint32           LinkContSettings;                                                                       /* 0x5ec */
88467 +   E4_uint64           LinkPortKey;                                                                            /* 0x5f0 */
88468 +   E4_uint64           LinkPortLock;                                                                           /* 0x5f8 */
88469 +
88470 +   E4_uint64           SDRamWriteBuffer[4][8];                                                                 /* 0x600 */
88471 +   E4_uint64           SDRamReadBuffer[4][8];                                                                  /* 0x700 */
88472 +
88473 +   E4_uint64           TProcRegs[64];                                                                          /* 0x800 */
88474 +   E4_uint64           TProcStartUp[8];        /* Not to be used except by the elan itself */                  /* 0xa00 */
88475 +
88476 +   E4_uint64           LoadPending;                                                                            /* 0xa40 */
88477 +   E4_uint64           StortPending;                                                                           /* 0xa48 */
88478 +   E4_uint64           DirtyBits;                                                                              /* 0xa50 */
88479 +   E4_uint64           BadBits;                                                                                /* 0xa58 */
88480 +
88481 +   E4_uint64           ICachePort_Cntl_Addr;                                                                   /* 0xa60 */
88482 +   E4_uint64           Thread_Trap_State;                                                                      /* 0xa68 */
88483 +
88484 +/* Instruction buffer (4 * 32 bit words) */
88485 +   E4_uint64           nPC_W;                                                                                  /* 0xa70 */
88486 +   E4_uint64           PC_W;                                                                                   /* 0xa78 */
88487 +
88488 +   E4_uint64           ICacheFillData[8];                                                                      /* 0xa80 */
88489 +   E4_uint64           ICachePort[8];                                                                          /* 0xac0 */
88490 +
88491 +   E4_uint64           PciDataBufs[4][8];                                                                      /* 0xb00 */
88492 +
88493 +   E4_uint64           CommandQueueBuffer[128];                                                                /* 0xc00 */
88494 +} E4_DataBusMap;
88495 +
88496 +#define LINK_PORT_LOCK_VALUE   0x123456789abcdef0ULL
88497 +
88498 +/*
88499 + * These macros are used to setup the thread pcoessors ICache.
88500 + */
88501 +#define E4_ICacheTagAddrShift          6
88502 +#define E4_AccessICacheRams            1
88503 +#define E4_InvalidTagValue             0xffffffffffffffffULL
88504 +#define E4_ICacheSizeInBytes           (1024*16)
88505 +#define E4_ICacheLineSizeInBytes       (64)
88506 +#define E4_ICacheLines                 (E4_ICacheSizeInBytes/E4_ICacheLineSizeInBytes)
88507 +#define E4_ICachePortSize              ( (sizeof((E4_DataBusMap *) 0)->ICachePort) /   \
88508 +                                         (sizeof((E4_DataBusMap *) 0)->ICachePort[0]))
88509 +
88510 +#define E4_ICacheFixupInsn             0xc0b02f95ull           /* st1 [%r0 +  0xf95] */
88511 +#define E4_ICacheFixupAddr             0xf95ull
88512 +#define E4_ICacheFixupOffset           0xfc0
88513 +
88514 +/*
88515 + * Event interrupt
88516 + */
88517 +typedef volatile union _E4_EventInt
88518 +{
88519 +   E4_uint64    ForceAlign;
88520 +   struct {
88521 +       E4_uint32 IntCookie;
88522 +       E4_uint32 EventContext; /* Bits 16 to 28 */
88523 +    } s;
88524 +} E4_EventInt;
88525 +
88526 +/*
88527 + * The following are used to interpret a fault status register.
88528 + */
88529 +
88530 +/*
88531 + * FSR[14:0] - AccessType
88532 + *
88533 + * T = Type bit
88534 + * S = size bit. Size is in units of 64 bits or 8 bytes.
88535 + * E = Byte end pointer. Used to define the last written byte of the last 64 bits written.
88536 + * D = Data type bit. Used for endian conversion in the PCI interface.
88537 + * C = Used by the cache to decide if this access should allocate a cache line.
88538 + * d = Set if dma read or write data data. This is used to guarantee order at the PCI interface.
88539 + * A = Access type used to check permissions by the MMU in a virtual access.
88540 + * P = Part Write. If set some byte enables may be used. Effects the action of a cache miss.
88541 + */
88542 +
88543 +/* FSR[7:0] */
88544 +/* bit 7 => virtual write */
88545 +#define AT_VirtualWriteAccBit          (1 << 7)                /* AAADDdC1EEESSSS = Virtual Write */
88546 +#define AT_VirtualWriteSizeMask                0xf                     /* size of write access (0 => 128 bytes) */
88547 +#define AT_VirtualWriteEndPtrShift     4                       /* end byte pointer for part write block */
88548 +#define AT_VirtualWriteEndPtrMask      0x7
88549 +
88550 +/* else bit 6 => virtual read */
88551 +#define AT_VirtualReadAccBit           (1 << 6)                /* AAADDdC01SSSSSS = Virtual Read */
88552 +#define AT_VirtualReadSizeMask         0x3f                    /* size of read access (0 => 512 bytes) */
88553 +
88554 +/* else => special access */
88555 +#define AT_SelBitsMask                 0xf                     /* Bits to select the type of acces from */
88556 +#define AT_SelBitsShift                        0x4
88557 +#define AT_SpecialRd                   (0x0 << 4)              /* AAADDdC0000TTTT = Special read Access */
88558 +#define AT_SpecialWr                   (0x1 << 4)              /* AAADDdC0001TTTT = Special write Access */
88559 +#define AT_PhysicalRd                  (0x2 << 4)              /* AAADDdC00100SSS = Physical Read */
88560 +#define AT_PhysicalWr                  (0x3 << 4)              /* AAADDdC0011PSSS = Physical write */
88561 +
88562 +#define AT_OtherSizeMask               0xf                     /* Size bits used by all other accesses. 0=128 bytes */
88563 +#define AT_SpecialBitsMask             0xf                     /* Bits used to define the special access types */
88564 +#define AT_CacheSizeBitsMask           0x7                     /* Size bits used for local accesses. 0=64 */
88565 +#define AT_CachePhysPartWriteBit       0x8                     /* This bit is set if the access is a part write to the cache */
88566 +
88567 +/* Special memory access operations */
88568 +#define AT_RegAccess                   0x0
88569 +#define AT_GetCntxFilter               0xe                     /* Only used by special reads */
88570 +#define AT_RouteFetch                  0xf                     /* Only used by special reads */
88571 +
88572 +/* FSR[9:8] */
88573 +#define AT_NonAlloc                    (1 << 8)                /* 1=Do not fill cache with this data */
88574 +#define AT_DmaData                     (1 << 9)                /* This is a DMA read access. Required to guarantee dma read order. */
88575 +
88576 +/* FSR[11:10] - Data Type - defines data type for endian conversion in PCI interface*/
88577 +#define AT_BlkDataTyMask               0x3
88578 +#define AT_BlkDataTyShift              10
88579 +
88580 +#define AT_BlkDataType(FSR)            (((FSR) >> AT_BlkDataTyShift) & AT_BlkDataTyMask)
88581 +#define AT_TypeByte                    0x0
88582 +#define AT_TypeHWord                   0x1
88583 +#define AT_TypeWord                    0x2
88584 +#define AT_TypeDWord                   0x3
88585 +
88586 +/* FSR[14:12] - Access Permissions */
88587 +#define AT_PermBitsMask                        0x7
88588 +#define AT_PermBitsShift               12
88589 +
88590 +#define AT_Perm(FSR)                   (((FSR) >> AT_PermBitsShift) & AT_PermBitsMask)
88591 +#define AT_PermLocalDataRead           0x0
88592 +#define AT_PermLocalDataWrite          0x1
88593 +#define AT_PermRemoteRead              0x2
88594 +#define AT_PermRemoteWrite             0x3
88595 +#define AT_PermExecute                 0x4
88596 +#define AT_PermLocalEvent              0x5
88597 +#define AT_PermRemoteEvent             0x7
88598 +
88599 +/* FSR[22:15] - reason for fault */
88600 +
88601 +#define FSR_WalkForThread              (1 << 15) /* The thread processor caused the fault */
88602 +#define FSR_Walking                    (1 << 16) /* The fault was caused during a hash table access */
88603 +#define FSR_NoTranslationsFound                (1 << 17) /* The hash table did not contain a matching tag */
88604 +#define FSR_WalkingProtectionFault     (1 << 18) /* A protection fault was detected while walking */
88605 +#define FSR_HashTable1                 (1 << 19) /* Was accessing hash table 1 not 0 */
88606 +#define FSR_RouteVProcErr              (1 << 20) /* This is an invalid vproc for a route fetch */
88607 +#define FSR_FaultForBadData            (1 << 21) /* Bad data (double bit ECC error) while performing a walk access */
88608 +#define FSR_FaultForMaxChainCount      (1 << 22) /* The Elan4 has walked a chain of 1024 items. */
88609 +
88610 +typedef volatile struct _E4_FaultSave
88611 +{
88612 +    E4_uint64 FSRAndFaultContext;                 /* Bits 0-31 : FaultContext. Bits 32-63 : FaultStatus Register */
88613 +    E4_uint64 FaultAddress;
88614 +} E4_FaultSave;
88615 +
88616 +#define FaultSaveContext(FSRAndFaultContext)   ((E4_uint32) ((FSRAndFaultContext) & 0xFFFFFFFF))
88617 +#define FaultSaveFSR(FSRAndFaultContext)       ((E4_uint32) ((FSRAndFaultContext) >> 32))
88618 +
88619 +typedef union E4_TrTypeCntx
88620 +{
88621 +   E4_uint32 TypeContext;
88622 +   struct
88623 +   {
88624 +#if (BYTE_ORDER == LITTLE_ENDIAN) || defined(__LITTLE_ENDIAN__)
88625 +      E4_uint32 Type:16;               /* Transaction type field */
88626 +      E4_uint32 Context:13;            /* Transaction context */
88627 +      E4_uint32 TypeCntxInvalid:1;     /* Bit  29 */
88628 +      E4_uint32 StatusRegValid:1;      /* Bit  30 */
88629 +      E4_uint32 LastTrappedTrans:1;    /* Bit  31 */
88630 +#else
88631 +      E4_uint32 LastTrappedTrans:1;    /* Bit  31 */
88632 +      E4_uint32 StatusRegValid:1;      /* Bit  30 */
88633 +      E4_uint32 TypeCntxInvalid:1;     /* Bit  29 */
88634 +      E4_uint32 Context:13;            /* Transaction context */
88635 +      E4_uint32 Type:16;               /* Transaction type field */
88636 +#endif
88637 +   } s;
88638 +} E4_TrTypeCntx;
88639 +
88640 +#define MAX_TRAPPED_TRANS      28
88641 +#define TRANS_DATA_DWORDS      16
88642 +#define TRANS_DATA_BYTES       128
88643 +#define NO_OF_INPUT_CHANNELS   4
88644 +
88645 +#define CH0_LOW_PRI_CHAN       0
88646 +#define CH1_LOW_PRI_CHAN       1
88647 +#define CH0_HIGH_PRI_CHAN      2
88648 +#define CH1_HIGH_PRI_CHAN      3
88649 +
88650 +/* Words have been swapped for big endian access when fetched with dword access from elan.*/
88651 +typedef struct _E4_IprocTrapHeader
88652 +{
88653 +   E4_uint64   TrAddr;
88654 +   E4_uint64   IProcStatusCntxAndTrType;
88655 +} E4_IprocTrapHeader;
88656 +
88657 +typedef struct _E4_IprocTrapData
88658 +{
88659 +   E4_uint64 Data[TRANS_DATA_DWORDS];
88660 +} E4_IprocTrapData;
88661 +
88662 +/*
88663 + * This struct defines the trap state for the inputers. It requires a contiguous 16K byte block of local memory.
88664 + * The channel bits have been grouped to the low end of the address to force all Identify cookies to use the
88665 + * same cache line.
88666 + */
88667 +typedef struct _E4_IprocTrapState
88668 +{
88669 +   E4_IprocTrapData   TrData[MAX_TRAPPED_TRANS][NO_OF_INPUT_CHANNELS];
88670 +   E4_IprocTrapHeader TrHeader[MAX_TRAPPED_TRANS][NO_OF_INPUT_CHANNELS];
88671 +   E4_uint64         pad[8*NO_OF_INPUT_CHANNELS];
88672 +} E4_IprocTrapState;
88673 +
88674 +/*
88675 + * 64 kbytes of elan local memory. Must be aligned on a 64k boundary
88676 + */
88677 +#define E4_LowPriQueueSize     0x400
88678 +#define E4_HighPriQueueSize    0x100
88679 +
88680 +typedef struct _E4_FaultSaveArea
88681 +{
88682 +   E4_FaultSave                TProcData[8];
88683 +   E4_FaultSave                TProcInst;
88684 +   E4_FaultSave                Dummy[7];
88685 +   E4_FaultSave                SchedProc;
88686 +   E4_FaultSave                DProc;
88687 +   E4_FaultSave                EventProc;
88688 +   E4_FaultSave                IProc;
88689 +   E4_FaultSave                DProcData[4];
88690 +   E4_FaultSave                QReadData[8];
88691 +} E4_FaultSaveArea;
88692 +
88693 +/* Macros to manipulate event queue pointers */
88694 +/*     generate index in EventIntQueue */
88695 +#define E4_EVENT_INTQ_INDEX(fptr)      (((fptr) & 0x1fff) >> 3)
88696 +/*     generate next fptr */
88697 +#define E4_EVENT_INTQ_NEXT(fptr)       ((((fptr) + 8) & ~0x4000) | 0x2000)
88698 +
88699 +typedef struct _E4_CommandPort
88700 +{
88701 +   volatile E4_uint64 Command[1024];   /* a whole 8k page */
88702 +} E4_CommandPort;
88703 +
88704 +/*
88705 + * This is the allocation of unit numbers within the ELAN. It is used to extract the fault address
88706 + * and fault type after a unit has trapped on a memory fetch. Only units that can generate traps
88707 + * have been included.
88708 + */
88709 +#define CUN_TProcData0         0x00
88710 +#define CUN_TProcData1         0x01
88711 +#define CUN_TProcData2         0x02
88712 +#define CUN_TProcData3         0x03
88713 +#define CUN_TProcData4         0x04
88714 +#define CUN_TProcData5         0x05
88715 +#define CUN_TProcData6         0x06
88716 +#define CUN_TProcData7         0x07
88717 +#define CUN_TProcInst          0x08
88718 +
88719 +/* memory current unit numbers
88720 + * TProc data bus */
88721 +#define CUN_DProcPA0           0x10
88722 +#define CUN_DProcPA1           0x11
88723 +#define CUN_DProcPrefetch      0x12
88724 +#define CUN_CommandProc                0x13
88725 +#define CUN_DProcData0         0x14    /* Dma prefetch reads. */
88726 +#define CUN_DProcData1         0x15    /* Dma prefetch reads. */
88727 +#define CUN_DProcData2         0x16    /* Dma prefetch reads. */
88728 +#define CUN_DProcData3         0x17    /* Dma prefetch reads. */
88729 +
88730 +#define CUN_IProcLowPri                0x18
88731 +#define CUN_IProcHighPri       0x19
88732 +#define CUN_Spare0             0x1A
88733 +#define CUN_Spare1             0x1B
88734 +#define CUN_Spare2             0x1C
88735 +#define CUN_ThreadQueue                0x1D
88736 +#define CUN_EventProc0         0x1e
88737 +#define CUN_EventProc1         0x1f
88738 +
88739 +#define CUN_Entries            0x20
88740 +
88741 +typedef struct E4_Registers
88742 +{
88743 +   E4_CacheTags                Tags;                           /* 4k bytes  c000 -> cfff */
88744 +   E4_DataBusMap       Regs;                           /* 4k bytes  d000 -> dfff */
88745 +   E4_User_Regs                uRegs;                          /* 8k bytes  e000 -> ffff */
88746 +} E4_Registers;
88747 +
88748 +#define I2cCntl_I2cPortWrite           (0 << 0)
88749 +#define I2cCntl_I2cPortRead            (1 << 0)
88750 +#define I2cCntl_I2cPortGenStopBit      (1 << 1)
88751 +#define I2cCntl_I2cPortGenRestartBit   (1 << 2)
88752 +#define I2cCntl_I2cPortAccFailed       (1 << 3)
88753 +#define I2cCntl_I2cStopped             (1 << 4)
88754 +#define I2cCntl_I2cWakeupFailed                (1 << 5)
88755 +#define I2cCntl_I2cFastMode            (1 << 6)
88756 +#define I2cCntl_I2cPortBusy            (1 << 7)
88757 +
88758 +#define I2cCntl_LedI2cRegBase_Mask     0x7f
88759 +#define I2cCntl_I2cUpdatingLedReg      (1 << 7)
88760 +
88761 +#define I2cCntl_InvertLedValues                (1 << 0)                /* read/write */
88762 +#define I2cCntl_LedRegWriteFailed      (1 << 1)                /* read only */
88763 +#define I2cCntl_EEPromLoadFailed       (1 << 2)                /* read only */
88764 +#define I2cCntl_InhibitI2CRom          (1 << 3)                /* read only */
88765 +#define I2cCntl_BadRomCrc              (1 << 4)                /* read only */
88766 +#define I2cCntl_MapInI2cConfigData     (1 << 5)                /* read/write */
88767 +#define I2cCntl_SampleNewLedValues     (1 << 6)                /* read/write */
88768 +#define I2cCntl_ClearLinkError         (1 << 7)                /* write only */
88769 +
88770 +typedef struct E4_I2C
88771 +{
88772 +   volatile E4_uint8    I2cWrData;
88773 +   volatile E4_uint8    I2cRdData;
88774 +   volatile E4_uint8    I2cPortControl;
88775 +   volatile E4_uint8   I2cLedBase;
88776 +   volatile E4_uint8    I2cStatus;
88777 +   volatile E4_uint8    I2cLedsValue;
88778 +   volatile E4_uint16  I2cPad;
88779
88780 +   E4_uint8            pad[256 - sizeof(E4_uint64)];
88781 +
88782 +   E4_uint8            UnchangedElan4ConfigRegs[256];
88783 +   E4_uint8            I2cRomConfigShadowValues[256];
88784 +   E4_uint8            ChangedElan4ConfigRegs[256];
88785 +} E4_I2C;
88786 +
88787 +typedef struct _E4_ContextControlBlock 
88788 +{
88789 +    E4_uint32 Filter;                  /* Use a Network context to index for this value */
88790 +    E4_uint32 VirtualProcessTable;     /* Use a local context to index for this value */
88791 +} E4_ContextControlBlock;
88792 +
88793 +/*
88794 + * Filter
88795 + *   [13:0]    Context
88796 + *   [14]      DiscardAll
88797 + *   [15]      AckAll
88798 + *   [16]      HighPri
88799 + *   [17]      CountStats
88800 + *   [31:18]   Unused
88801 + */
88802 +#define E4_FILTER_STATS                (1 << 17)
88803 +#define E4_FILTER_HIGH_PRI     (1 << 16)
88804 +#define E4_FILTER_ACKOK_ALL    (1 << 15)
88805 +#define E4_FILTER_DISCARD_ALL  (1 << 14)
88806 +#define E4_FILTER_CONTEXT_MASK (0x3FFF)
88807 +
88808 +/*
88809 + * VirtualProcessTable
88810 + *   [8:0]     Unused  
88811 + *   [12:9]    Size       num vp entries = 512 << Size
88812 + *   [30:13]   Pointer
88813 + *   [31]      Valid
88814 + */
88815 +#define E4_VPT_MIN_ENTRIES      512
88816 +#define E4_VPT_VALID           ((unsigned)1 << 31)
88817 +#define E4_VPT_PTR_SHIFT       0
88818 +#define E4_VPT_SIZE_SHIFT      9
88819 +#define E4_VPT_SIZE_MASK        0xf
88820 +#define E4_VPT_NUM_VP(vpt_val)  (E4_VPT_MIN_ENTRIES << (((vpt_val) >> E4_VPT_SIZE_SHIFT) & E4_VPT_SIZE_MASK))
88821 +#define E4_VPT_VALUE(ptr,size) (((ptr) << E4_VPT_PTR_SHIFT) | ((size) << E4_VPT_SIZE_SHIFT))
88822 +
88823 +
88824 +/* Virtual Process Table */
88825 +typedef struct _E4_VirtualProcessEntry
88826 +{
88827 +    E4_uint64  Values[2];
88828 +} E4_VirtualProcessEntry;
88829 +
88830 +/*
88831 + * Entries have the following format - rtX is a packed route 
88832 + *
88833 + * |rt11|rt10|rt9 |rt8 |rt7 |rt6 |rt5 |rt4 |rt3 |rt2 |rt2 |rt0 |PAAADD       RRRRRR|
88834 + * |output context     |rt23|rt22|rt21|rt20|rt19|rt18|rt17|rt16|rt15|rt14|rt13|rt12|
88835 + */
88836 +
88837 +#define ROUTE_CTXT_SHIFT       48
88838 +#define ROUTE_CTXT_MASK                (~((1ull << ROUTE_CTXT_SHIFT)-1))
88839 +#define ROUTE_CTXT_VALUE(ctx)  (((E4_uint64) ctx) << ROUTE_CTXT_SHIFT)
88840 +
88841 +#define ROUTE_PACKED_OFFSET    16
88842 +#define ROUTE_NUM_PACKED       24
88843 +
88844 +/* defines for first flit of a route */
88845 +#define FIRST_TIMEOUT(Val)     ((Val) << 14)                   /* [15:14]  */
88846 +#define FIRST_SYSTEM_PACKET     (1 << 13)                       /* [13]     */
88847 +#define FIRST_FLOOD_PACKET      (1 << 12)                       /* [12]     */
88848 +#define FIRST_HIGH_PRI         (1 << 11)                       /* [11]    */
88849 +#define FIRST_AGE(Val)         ((Val) << 7)                    /* [10:7] */
88850 +#define FIRST_OPTIONS_MASK     (0xFF80)
88851 +
88852 +/* [6:0] unpacked 1st route value */
88853 +#define FIRST_INVALID          (0)
88854 +#define FIRST_ROUTE(Val)       (0x08 | (Val))
88855 +#define FIRST_ADAPTIVE         (0x30)
88856 +#define FIRST_BCAST_TREE       (0x20)
88857 +#define FIRST_MYLINK           (0x10)
88858 +#define FIRST_BCAST(Top, Bot)  (0x40 | ((Top) << 3) | (Bot))
88859 +
88860 +/* defines for 3 bit packed entries for subsequent flits */
88861 +#define PACKED_INVALID         (0)
88862 +#define PACKED_ROUTE(Val)      (8 | (Val))
88863 +#define PACKED_ADAPTIVE                (3)
88864 +#define PACKED_BCAST_TREE      (2)
88865 +#define PACKED_MYLINK          (1)
88866 +#define PACKED_BCAST0(Top,Bot) (4 | (Bot & 3))
88867 +#define PACKED_BCAST1(Top,Bot) ((Top << 1) | (Bot >> 2))
88868 +
88869 +#endif /* _ASM */
88870 +/* The MMU root context pointer has a mask to bounds check 
88871 + * it - this is computed as follows.
88872 + */
88873 +#define E4_CONTEXT_MASK(num)   (((num) >= 0x2000) ? 0x00 :     \
88874 +                                ((num) >= 0x1000) ? 0x80 :     \
88875 +                                ((num) >= 0x0800) ? 0xc0 :     \
88876 +                                ((num) >= 0x0400) ? 0xe0 :     \
88877 +                                ((num) >= 0x0200) ? 0xf0 :     \
88878 +                                ((num) >= 0x0100) ? 0xf8 :     \
88879 +                                ((num) >= 0x0080) ? 0xfc :     \
88880 +                                ((num) >= 0x0040) ? 0xfe : 0xff)
88881 +/*
88882 + * This generates the size field for a virtual process table.
88883 + * Size defined as 2^n no of 8K pages.
88884 + * Single cycle route fetches are possible if the minimum vproc table size is 8k.
88885 + */
88886 +#define E4_GEN_VPT_SIZE(Size)  (((Size) & E4_VPT_SIZE_MASK) << E4_VPT_SIZE_SHIFT)
88887 +
88888 +#define COMMAND_RUN_QUEUE_BITS         (13 + 2) /* 8K entries of 4 bytes. This is fixed in hardware. */
88889 +#define COMMAND_DESCS_SPACE_BITS       (13 + 5) /* 8K entries of 32 bytes. This is fixed in hardware. */
88890 +#define COMMAND_INSERTER_CACHE_ENTRIES 16
88891 +
88892 +#define COM_TEST_PORT_ADDR_MASK                0xfULL
88893 +#define COM_TEST_PORT_ADDR_SH          0
88894 +
88895 +/*
88896 + * The flush register is accessed through the CommandControl register.
88897 + * The address is naturally alligned. It also positions the command descriptors in memory.
88898 + * When no command queues need flushing it should be or with COM_FLUSH_INVALID. This sets
88899 + * it to the top command queue descriptor. This cannot be accessed from the PCI.
88900 + */
88901 +#define COM_ENABLE_DEQUEUE             (1 << 4)
88902 +#define COM_FLUSH_DESCRIPTOR_MASK      0x7fffffe0ULL
88903 +#define COM_FLUSH_INVALID              0x0003ffe0ULL
88904 +
88905 +
88906 +/*
88907 + * Elan4 BAR1 is split up as follows :
88908 + *
88909 + * RevA
88910 + *     0x3f00000 EBUS other
88911 + *     0x3e00000 EBUS ROM
88912 + *     0x3dfc000 registers
88913 + *     0x0000000 command ports
88914 + *
88915 + * RevB
88916 + *     0x3ffc000 registers
88917 + *     0x3ff8000 padding
88918 + *     0x3ff6000 i2c registers
88919 + *     0x0000000 command ports
88920 + */
88921 +#define ELAN4_BAR1_SIZE                        (1 << 26)       /* 64M */
88922 +#define ELAN4_REG_SIZE                 (1 << 14)       /* 16K */
88923 +
88924 +#define ELAN4_REVA_EBUS_SIZE           (1 << 21)       /* 2M */
88925 +#define ELAN4_REVA_EBUS_OFFSET         (ELAN4_BAR1_SIZE - ELAN4_REVA_EBUS_SIZE)
88926 +#define ELAN4_REVA_REG_OFFSET          (ELAN4_REVA_EBUS_OFFSET - ELAN4_REG_SIZE)
88927 +#define ELAN4_REVA_NUM_COMMAND_QUEUES  (ELAN4_REVA_REG_OFFSET >> 13)
88928 +
88929 +#define ELAN4_REVA_EBUS_ROM_SIZE       (1 << 20)       /* 1M */
88930 +#define ELAN4_REVA_EBUS_ROM_OFFSET     0
88931 +
88932 +#define ELAN4_REVB_I2C_PADDING         (1 << 14)       /* 16K */
88933 +#define ELAN4_REVB_I2C_SIZE            (1 << 13)       /* 8k */
88934 +#define ELAN4_REVB_REG_OFFSET          (ELAN4_BAR1_SIZE - ELAN4_REG_SIZE)
88935 +#define ELAN4_REVB_I2C_OFFSET          (ELAN4_REVB_REG_OFFSET - ELAN4_REVB_I2C_PADDING - ELAN4_REVB_I2C_SIZE)
88936 +#define ELAN4_REVB_NUM_COMMAND_QUEUES  (ELAN4_REVB_I2C_OFFSET >> 13)
88937 +
88938 +#endif /* notdef _ELAN4_REGISTERS_H */
88939 Index: linux-2.4.21/include/elan4/sdram.h
88940 ===================================================================
88941 --- linux-2.4.21.orig/include/elan4/sdram.h     2004-02-23 16:02:56.000000000 -0500
88942 +++ linux-2.4.21/include/elan4/sdram.h  2005-06-01 23:12:54.743417216 -0400
88943 @@ -0,0 +1,41 @@
88944 +/*
88945 + *    Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd.
88946 + *    Copyright (c) 2002-2003 by Quadrics Ltd.
88947 + *
88948 + *    For licensing information please see the supplied COPYING file
88949 + *
88950 + */
88951 +
88952 +#ifndef __ELAN4_SDRAM_H
88953 +#define __ELAN4_SDRAM_H
88954 +
88955 +#ident "$Id: sdram.h,v 1.8 2003/09/24 13:55:55 david Exp $"
88956 +/*      $Source: /cvs/master/quadrics/elan4hdr/sdram.h,v $*/
88957 +
88958 +/* Include header file generated by sdram configuration program */
88959 +#include <elan4/xsdram.h> 
88960 +
88961 +/* SDRAM bank shift definitions */
88962 +#define SDRAM_0_CS_SHIFT       25
88963 +#define SDRAM_1_CS_SHIFT       27
88964 +#define SDRAM_2_CS_SHIFT       28
88965 +#define SDRAM_3_CS_SHIFT       29
88966 +
88967 +#define SDRAM_BANK_SHIFT(cfg) \
88968 +       (((cfg >> SDRAM_RamSize_SH) & 3) == 0 ? SDRAM_0_CS_SHIFT : \
88969 +        ((cfg >> SDRAM_RamSize_SH) & 3) == 1 ? SDRAM_1_CS_SHIFT : \
88970 +        ((cfg >> SDRAM_RamSize_SH) & 3) == 2 ? SDRAM_2_CS_SHIFT : SDRAM_3_CS_SHIFT)
88971 +
88972 +#define SDRAM_BANK_SIZE(cfg)           (1ULL << SDRAM_BANK_SHIFT(cfg))
88973 +#define SDRAM_BANK_OFFSET(cfg,bank)    ((unsigned long long)(bank) << SDRAM_BANK_SHIFT(cfg))
88974 +#define SDRAM_NUM_BANKS(cfg)           (4)
88975 +#define SDRAM_MAX_BANKS                        4
88976 +
88977 +/* When the elan access sdram it passes eaddr[12] as sdramaddr[12] when
88978 + * running with a 4k page size, however PCI accesses pass paddr[12], so
88979 + * we must ensure that sdram pages are allocated such that eaddr[12] is the
88980 + * same as paddr[12] - the easiest way is to allocate sdram in 8k chunks and
88981 + * ensure that maddr[12] == eaddr[12] == pgoff[0] */
88982 +#define SDRAM_MIN_PAGE_SIZE            (8192)
88983 +
88984 +#endif /* __ELAN4_SDRAM_H */
88985 Index: linux-2.4.21/include/elan4/stats.h
88986 ===================================================================
88987 --- linux-2.4.21.orig/include/elan4/stats.h     2004-02-23 16:02:56.000000000 -0500
88988 +++ linux-2.4.21/include/elan4/stats.h  2005-06-01 23:12:54.743417216 -0400
88989 @@ -0,0 +1,83 @@
88990 +/*
88991 + *    Copyright (c) 2001-2002 by Quadrics Supercomputers World Ltd.
88992 + *    Copyright (c) 2002-2003 by Quadrics Ltd.
88993 + * 
88994 + *    For licensing information please see the supplied COPYING file
88995 + *
88996 + */
88997 +
88998 +#ident "@(#)$Id: stats.h,v 1.10.12.1 2004/10/06 11:09:12 david Exp $"
88999 +/*      $Source: /cvs/master/quadrics/elan4mod/stats.h,v $*/
89000 +
89001 +#ifndef __ELAN4_STATS_H
89002 +#define __ELAN4_STATS_H
89003 +
89004 +#define ELAN4_DEV_STATS_BUCKETS                8
89005 +
89006 +
89007 +typedef struct elan4_dev_stats
89008 +{
89009 +    unsigned long      s_interrupts;
89010 +    
89011 +    unsigned long       s_mainints[ELAN4_DEV_STATS_BUCKETS];
89012 +    unsigned long      s_mainint_punts;
89013 +    unsigned long      s_mainint_rescheds;
89014 +
89015 +    unsigned long       s_haltints;
89016 +
89017 +    unsigned long      s_cproc_traps;
89018 +    unsigned long      s_dproc_traps;
89019 +    unsigned long      s_eproc_traps;
89020 +    unsigned long      s_iproc_traps;
89021 +    unsigned long      s_tproc_traps;
89022 +
89023 +    unsigned long       s_cproc_trap_types[0x10];
89024 +    unsigned long       s_dproc_trap_types[6];
89025 +    unsigned long       s_eproc_trap_types[4];
89026 +    unsigned long       s_iproc_trap_types[0xa];
89027 +    unsigned long       s_tproc_trap_types[7];
89028 +
89029 +    unsigned long       s_correctable_errors;
89030 +    unsigned long       s_multiple_errors;
89031 +    
89032 +    unsigned long       s_link_errors;
89033 +    unsigned long       s_lock_errors;
89034 +    unsigned long       s_deskew_errors;
89035 +    unsigned long       s_phase_errors;
89036 +    unsigned long      s_data_errors;
89037 +    unsigned long      s_fifo_overflow0;
89038 +    unsigned long      s_fifo_overflow1;
89039 +    unsigned long       s_mod45changed;
89040 +    unsigned long       s_pack_not_seen;
89041 +    unsigned long       s_linkport_keyfail;
89042 +
89043 +    unsigned long      s_eop_reset;
89044 +    unsigned long       s_bad_length;
89045 +    unsigned long       s_crc_bad;
89046 +    unsigned long       s_crc_error;
89047 +
89048 +    unsigned long      s_cproc_timeout;
89049 +    unsigned long      s_dproc_timeout;
89050 +
89051 +    unsigned long      s_sdram_bytes_free;
89052 +} ELAN4_DEV_STATS;
89053 +
89054 +#define MainIntBuckets         ((int[ELAN4_DEV_STATS_BUCKETS-1]) {1, 2, 3, 4, 8, 16, 32})
89055 +
89056 +#define BumpDevStat(dev,stat)  ((dev)->dev_stats.stat++)
89057 +#define BucketDevStat(dev,stat,n,bucket)       ((n) <= (bucket)[0] ? (dev)->dev_stats.stat[0]++ : \
89058 +                                                (n) <= (bucket)[1] ? (dev)->dev_stats.stat[1]++ : \
89059 +                                                (n) <= (bucket)[2] ? (dev)->dev_stats.stat[2]++ : \
89060 +                                                (n) <= (bucket)[3] ? (dev)->dev_stats.stat[3]++ : \
89061 +                                                (n) <= (bucket)[4] ? (dev)->dev_stats.stat[4]++ : \
89062 +                                                (n) <= (bucket)[5] ? (dev)->dev_stats.stat[5]++ : \
89063 +                                                (n) <= (bucket)[6] ? (dev)->dev_stats.stat[6]++ : \
89064 +                                                                     (dev)->dev_stats.stat[7]++)
89065 +
89066 +
89067 +/*
89068 + * Local variables:
89069 + * c-file-style: "stroustrup"
89070 + * End:
89071 + */
89072 +#endif /*__ELAN4_STATS_H */
89073 Index: linux-2.4.21/include/elan4/tprintf.h
89074 ===================================================================
89075 --- linux-2.4.21.orig/include/elan4/tprintf.h   2004-02-23 16:02:56.000000000 -0500
89076 +++ linux-2.4.21/include/elan4/tprintf.h        2005-06-01 23:12:54.743417216 -0400
89077 @@ -0,0 +1,24 @@
89078 +/*
89079 + *    Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd.
89080 + *    Copyright (c) 2002-2003 by Quadrics Ltd.
89081 + *
89082 + *    For licensing information please see the supplied COPYING file
89083 + *
89084 + */
89085 +
89086 +#ifndef __ELAN4_TPRINTF_H
89087 +#define __ELAN4_TPRINTF_H
89088 +
89089 +#ident "$Id: tprintf.h,v 1.6 2003/09/04 12:39:17 david Exp $"
89090 +/*      $Source: /cvs/master/quadrics/elan4hdr/tprintf.h,v $*/
89091 +
89092 +
89093 +#ifdef _ASM
89094 +#define TPRINTF0(string)           add %r0, __LINE__, %r0
89095 +#define TPRINTF1(string,reg)       add reg, __LINE__, %r0
89096 +#else
89097 +#define TPRINTF0(string)           asm volatile ("add %%r0, %0, %%r0" : : "i" (__LINE__))
89098 +#define TPRINTF1(string, value)            asm volatile ("add %0,   %1, %%r0" : : "r" (value), "i" (__LINE__))
89099 +#endif /* _ASM */
89100 +
89101 +#endif /* __ELAN4_TPRINTF_H */
89102 Index: linux-2.4.21/include/elan4/trap.h
89103 ===================================================================
89104 --- linux-2.4.21.orig/include/elan4/trap.h      2004-02-23 16:02:56.000000000 -0500
89105 +++ linux-2.4.21/include/elan4/trap.h   2005-06-01 23:12:54.743417216 -0400
89106 @@ -0,0 +1,95 @@
89107 +/*
89108 + *    Copyright (c) 2001-2002 by Quadrics Supercomputers World Ltd.
89109 + *    Copyright (c) 2002-2003 by Quadrics Ltd.
89110 + * 
89111 + *    For licensing information please see the supplied COPYING file
89112 + *
89113 + */
89114 +
89115 +#ident "@(#)$Id: trap.h,v 1.10 2003/10/07 12:11:10 david Exp $"
89116 +/*      $Source: /cvs/master/quadrics/elan4mod/trap.h,v $*/
89117 +
89118 +#ifndef __ELAN4_TRAP_H
89119 +#define __ELAN4_TRAP_H
89120 +
89121 +/*
89122 + * If the EProc Faults whilst performing an action (e.g. Read/Write on the data src or dest Addr)
89123 + *  the Eproc increments the Addr(s) by a block size (64 bytes):
89124 + *  1: Fault on Read: 
89125 + *                     Src EventAddr = Read Addr + block
89126 + *  2: Fault on Write:
89127 + *                     Src EventAddr = Read Addr + block
89128 + *                     Dst EventAddr = Read Addr + block
89129 + *                     Size          = Size - block ndwords
89130 + *  We must rewind the addr correctly to completely the transfer successfully
89131 + */
89132 +#define EVENT_COPY_NDWORDS     0x8
89133 +#define EVENT_COPY_BLOCK_SIZE  0x40
89134 +
89135 +typedef struct elan4_eproc_trap
89136 +{
89137 +    E4_uint64          tr_status;
89138 +    E4_FaultSave       tr_faultarea;
89139 +    E4_Event           tr_event;
89140 +    E4_Addr            tr_eventaddr;
89141 +} ELAN4_EPROC_TRAP;
89142 +
89143 +typedef struct elan4_cproc_trap
89144 +{
89145 +    E4_uint64          tr_status;                                      /* cproc status register */
89146 +    E4_uint64          tr_command;                                     /* cproc command */
89147 +    E4_CommandQueueDesc tr_qdesc;                                      /* copy of command queue descriptor */
89148 +    E4_FaultSave       tr_faultarea;                                   /* fault area for mmu traps */
89149 +    ELAN4_EPROC_TRAP   tr_eventtrap;                                   /* associated event trap (waitevent) */
89150 +} ELAN4_CPROC_TRAP;
89151 +
89152 +typedef struct elan4_dproc_trap
89153 +{
89154 +    E4_DMA             tr_desc;
89155 +    E4_FaultSave       tr_packAssemFault;
89156 +    E4_FaultSave       tr_prefetchFault;
89157 +    E4_uint64          tr_status;
89158 +} ELAN4_DPROC_TRAP;
89159 +
89160 +typedef struct elan4_tproc_trap
89161 +{
89162 +    E4_uint64          tr_regs[64];
89163 +    E4_FaultSave       tr_dataFault;
89164 +    E4_FaultSave       tr_instFault;
89165 +    E4_uint64          tr_status;
89166 +    E4_uint64          tr_state;
89167 +    E4_Addr            tr_pc;
89168 +    E4_Addr            tr_npc;
89169 +    E4_uint64          tr_dirty;
89170 +    E4_uint64          tr_bad;
89171 +} ELAN4_TPROC_TRAP;
89172 +
89173 +typedef struct elan4_iproc_trap
89174 +{
89175 +    E4_uint32            tr_numTransactions;
89176 +    E4_uint32            tr_flags;
89177 +    E4_uint32            tr_trappedTrans;
89178 +    E4_uint32            tr_waitForEopTrans;
89179 +    E4_uint32            tr_identifyTrans;
89180 +    E4_uint32            tr_pad;
89181 +
89182 +    E4_FaultSave          tr_faultarea;
89183 +    E4_IprocTrapHeader    tr_transactions[MAX_TRAPPED_TRANS];
89184 +    E4_IprocTrapData      tr_dataBuffers[MAX_TRAPPED_TRANS];
89185 +} ELAN4_IPROC_TRAP;
89186 +
89187 +#define TR_FLAG_ACK_SENT       (1 << 0)
89188 +#define TR_FLAG_EOP_ERROR      (1 << 1)
89189 +#define TR_FLAG_BAD_TRANS      (1 << 2)
89190 +#define TR_FLAG_DMA_PACKET     (1 << 3)
89191 +#define TR_FLAG_EOP_BAD                (1 << 4)
89192 +#define TR_FLAG_TOOMANY_TRANS  (1 << 5)
89193 +
89194 +#define TR_TRANS_INVALID       (0xffffffff)
89195 +
89196 +/*
89197 + * Local variables:
89198 + * c-file-style: "stroustrup"
89199 + * End:
89200 + */
89201 +#endif /* __ELAN4_TRAP_H */
89202 Index: linux-2.4.21/include/elan4/trtype.h
89203 ===================================================================
89204 --- linux-2.4.21.orig/include/elan4/trtype.h    2004-02-23 16:02:56.000000000 -0500
89205 +++ linux-2.4.21/include/elan4/trtype.h 2005-06-01 23:12:54.744417064 -0400
89206 @@ -0,0 +1,112 @@
89207 +/*
89208 + *    Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd.
89209 + *    Copyright (c) 2002-2003 by Quadrics Ltd.
89210 + *
89211 + *    For licensing information please see the supplied COPYING file
89212 + *
89213 + */
89214 +
89215 +#ifndef _ELAN4_TRTYPE_H
89216 +#define _ELAN4_TRTYPE_H
89217 +
89218 +#ident "$Id: trtype.h,v 1.20 2004/02/06 10:38:21 mike Exp $"
89219 +/*      $Source: /cvs/master/quadrics/elan4hdr/trtype.h,v $*/
89220 +
89221 +/*<15:11> Size field is used to give the number of additional 64 bit data values.
89222 +         A value from 0 to 16 inclusive is valid. */
89223 +
89224 +#include <elan4/types.h>
89225 +
89226 +#define TR_SIZE_SHIFT          (11)
89227 +#define TR_SIZE_MASK           (0x1f << TR_SIZE_SHIFT)
89228 +#define SET_TR_SIZE(Size)      (((Size) << TR_SIZE_SHIFT) & TR_SIZE_MASK)
89229 +
89230 +/* <10:9> Last Transaction and AckNow bits, marks the last transaction and
89231 +          enables a PACK_OK to be sent. */
89232 +#define TR_LAST_AND_SEND_ACK   (3 << 9)
89233 +
89234 +
89235 +/* <8>  Only valid on the last transaction. Delays execution until an EOP_GOOD is received.
89236 + *      Any other EOP type will abort execution of this transaction. */
89237 +#define TR_WAIT_FOR_EOP                (1 << 8)
89238 +
89239 +/*
89240 + * Data type. This is used by transactions of variable data type. It controls any endian
89241 + * converion required if the destiantion host processor has a big endian memory format.
89242 + */
89243 +/*     WriteBlock      <8:7>   Data type
89244 +                       <6:0>   Part write size */
89245 +#define TR_DATATYPE_SHIFT      (6)
89246 +#define TR_DATATYPE_MASK       ((1 << 2) - 1)
89247 +
89248 +#define TR_DATATYPE_BYTE       E4_DATATYPE_BYTE        
89249 +#define TR_DATATYPE_SHORT      E4_DATATYPE_SHORT
89250 +#define TR_DATATYPE_WORD       E4_DATATYPE_WORD        
89251 +#define TR_DATATYPE_DWORD      E4_DATATYPE_DWORD
89252 +
89253 +/* <5:0> Transaction Type
89254 + *       For Writeblock <5:3> 000 => Write, 0001 => Read
89255 + *                      <2:0> End Byte Addr */
89256 +#define TR_OPCODE_MASK         0x3F
89257 +#define TR_BLOCK_OPCODE_MASK   0x38
89258 +
89259 +#define TR_WRITEBLOCK          0x0
89260 +#define TR_ENDBYTE_MASK                0x7
89261 +#define TR_WRITE(Size, EndByte, DataType)                                              \
89262 +                       (0x0 | SET_TR_SIZE(Size) | ((EndByte) & TR_ENDBYTE_MASK) |      \
89263 +                        (((DataType) & TR_DATATYPE_MASK) << TR_DATATYPE_SHIFT))
89264 +
89265 +#define TR_NOP_TRANS           (0x10 | SET_TR_SIZE(0))
89266 +#define TR_SETEVENT            0x10
89267 +#define TR_SETEVENT_NOIDENT    (TR_SETEVENT | SET_TR_SIZE(0) | TR_LAST_AND_SEND_ACK)
89268 +#define TR_SETEVENT_IDENTIFY   (TR_SETEVENT | SET_TR_SIZE(1) | TR_LAST_AND_SEND_ACK)
89269 +#define TR_REMOTEDMA           (0x11 | SET_TR_SIZE(7) | TR_LAST_AND_SEND_ACK)
89270 +#define TR_SENDDISCARD         (0x12 | SET_TR_SIZE(0))
89271 +
89272 +/*
89273 + * Conditional transactions that might return PAckTestFail.
89274 + * All will allow further exection of the packet if ([Address] operator DataValue) is true.
89275 + * e.g. for TR_GTE further execution if ([Address] >= DataValue) is true.
89276 + * These should be used where a definite TRUE/FALSE answer is required.
89277 + */
89278 +#define TR_GTE                 (0x14 | SET_TR_SIZE(1))
89279 +#define TR_LT                  (0x15 | SET_TR_SIZE(1))
89280 +#define TR_EQ                  (0x16 | SET_TR_SIZE(1))
89281 +#define TR_NEQ                 (0x17 | SET_TR_SIZE(1))
89282 +
89283 +/*
89284 + * Conditional transactions that might return PAckDiscard.
89285 + * All will allow further exection of the packet if ([Address] operator DataValue) is true.
89286 + * e.g. for TR_GTE further execution if ([Address] >= DataValue) is true.
89287 + * These should be used where eventually a TRUE answer is expected but the node might not be ready yet.
89288 + * These can be mixed with the normal conditionals to allow a single packet to test for readyness and
89289 + * a TRUE/FALSE answer.
89290 + */
89291 +#define TR_GTE_DISCARD         (0x34 | SET_TR_SIZE(1))
89292 +#define TR_LT_DISCARD          (0x35 | SET_TR_SIZE(1))
89293 +#define TR_EQ_DISCARD          (0x36 | SET_TR_SIZE(1))
89294 +#define TR_NEQ_DISCARD         (0x37 | SET_TR_SIZE(1))
89295 +
89296 +#define TR_TRACEROUTE_TRANS    0x18
89297 +#define TR_TRACEROUTE(Size)    (TR_TRACEROUTE_TRANS | (TR_DATATYPE_WORD << TR_DATATYPE_SHIFT) |SET_TR_SIZE(Size))
89298 +#define TR_IDENTIFY            (0x19 | SET_TR_SIZE(0))
89299 +
89300 +#define TR_ADDWORD             (0x1c | SET_TR_SIZE(2) | TR_LAST_AND_SEND_ACK)
89301 +#define TR_INPUT_Q_COMMIT      (0x1d | SET_TR_SIZE(1) | TR_LAST_AND_SEND_ACK)
89302 +#define TR_TESTANDWRITE        (0x1e | SET_TR_SIZE(3) | TR_LAST_AND_SEND_ACK)
89303 +#define TR_INPUT_Q_GETINDEX    (0x1f | SET_TR_SIZE(0))
89304 +
89305 +
89306 +
89307 +/* TraceRoute formate */
89308 +#define TR_TRACEROUTE0_CHANID(val)             ((val) & 1)                     /* 0     Chan Id */
89309 +#define TR_TRACEROUTE0_LINKID(val)             (((val) >> 1) & 7)              /* 1:3   Link Id */
89310 +#define TR_TRACEROUTE0_REVID(val)              (((val) >> 4) & 7)              /* 4:6   Revision Id */
89311 +#define TR_TRACEROUTE0_BCAST_PIN(val)          (((val) >> 7) & 1)              /* 7     Bcast Top Pin */
89312 +#define TR_TRACEROUTE0_LNR(val)                        (((val) >> 8) & 0xFF)           /* 8:15  Global Link Not Ready */
89313 +
89314 +#define TR_TRACEROUTE1_ROUTES_SELECTED(val)    ((val & 0xFF))                  /* 0:7   Routes Selected */
89315 +#define TR_TRACEROUTE1_BCAST_TOP(val)          (((val) >> 8) & 7)              /* 8:10  Broadcast Top */
89316 +#define TR_TRACEROUTE1_BCAST_BOTTOM(val)       (((val) >> 12) & 7)             /* 12:14 Broadcast Bottom */
89317 +
89318 +#endif /* _ELAN4_TRANSACTIONTYPE_H */
89319 Index: linux-2.4.21/include/elan4/types.h
89320 ===================================================================
89321 --- linux-2.4.21.orig/include/elan4/types.h     2004-02-23 16:02:56.000000000 -0500
89322 +++ linux-2.4.21/include/elan4/types.h  2005-06-01 23:12:54.744417064 -0400
89323 @@ -0,0 +1,69 @@
89324 +/*
89325 + *    Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd.
89326 + *    Copyright (c) 2002-2003 by Quadrics Ltd.
89327 + *
89328 + *    For licensing information please see the supplied COPYING file
89329 + *
89330 + */
89331 +
89332 +#ifndef __ELAN4_TYPES_H
89333 +#define __ELAN4_TYPES_H
89334 +
89335 +#ident "@(#)$Id: types.h,v 1.9 2003/09/04 12:39:17 david Exp $"
89336 +/*      $Source: /cvs/master/quadrics/elan4hdr/types.h,v $*/
89337 +
89338 +#include <qsnet/config.h>
89339 +/*
89340 + * "flip" values for correctly indexing into
89341 + * block data which was copied from the Elan
89342 + * using 64 bit accesses.
89343 + */
89344 +#if defined(__LITTLE_ENDIAN__)
89345 +#  define ByteEndianFlip  0
89346 +#  define ShortEndianFlip 0
89347 +#  define WordEndianFlip  0
89348 +#else
89349 +#  define ByteEndianFlip  7
89350 +#  define ShortEndianFlip 3
89351 +#  define WordEndianFlip  1
89352 +#endif
89353 +
89354 +
89355 +#ifndef _ASM
89356 +
89357 +typedef signed int        E4_int;
89358 +typedef unsigned int              E4_uint;
89359 +
89360 +typedef signed char       E4_int8;
89361 +typedef unsigned char     E4_uint8;
89362 +
89363 +typedef signed short      E4_int16;
89364 +typedef unsigned short            E4_uint16;
89365 +
89366 +typedef signed int        E4_int32;
89367 +typedef unsigned int              E4_uint32;
89368 +
89369 +#ifdef _LP64
89370 +typedef signed long        E4_int64;
89371 +typedef unsigned long      E4_uint64;
89372 +#else
89373 +typedef signed long long   E4_int64;
89374 +typedef unsigned long long E4_uint64;
89375 +#endif
89376 +
89377 +/* 64-bit Elan4 */
89378 +typedef E4_uint64         E4_Addr;
89379 +typedef E4_uint32         E4_LocPhysAddr;      /* Really 31 bits */
89380 +
89381 +#define OneK   (1024)
89382 +#define EightK (8*OneK)
89383 +
89384 +#define E4_DATATYPE_BYTE       0
89385 +#define E4_DATATYPE_SHORT      1
89386 +#define E4_DATATYPE_WORD       2
89387 +#define E4_DATATYPE_DWORD      3
89388 +
89389 +#endif /* _ASM */
89390 +
89391 +#endif /* __ELAN4_TYPES_H */
89392 +
89393 Index: linux-2.4.21/include/elan4/user.h
89394 ===================================================================
89395 --- linux-2.4.21.orig/include/elan4/user.h      2004-02-23 16:02:56.000000000 -0500
89396 +++ linux-2.4.21/include/elan4/user.h   2005-06-01 23:12:54.745416912 -0400
89397 @@ -0,0 +1,344 @@
89398 +/*
89399 + *    Copyright (c) 2001-2002 by Quadrics Supercomputers World Ltd.
89400 + *    Copyright (c) 2002-2003 by Quadrics Ltd.
89401 + * 
89402 + *    For licensing information please see the supplied COPYING file
89403 + *
89404 + */
89405 +
89406 +#ident "@(#)$Id: user.h,v 1.37.2.2 2004/11/18 17:54:17 duncant Exp $"
89407 +/*      $Source: /cvs/master/quadrics/elan4mod/user.h,v $*/
89408 +
89409 +#ifndef __ELAN4_USER_H
89410 +#define __ELAN4_USER_H
89411 +
89412 +#include <elan/capability.h>
89413 +#include <elan4/usertrap.h>
89414 +#include <elan4/intcookie.h>
89415 +
89416 +typedef struct trap_queue
89417 +{
89418 +    unsigned   q_back;                 /* Next free space */
89419 +    unsigned   q_front;                /* First object to remove */
89420 +    unsigned   q_size;                 /* Size of queue */
89421 +    unsigned   q_count;                /* Current number of entries */
89422 +    unsigned   q_slop;                 /* FULL <=> (count+slop) == size */
89423 +} RING_QUEUE;
89424 +
89425 +#define RING_QUEUE_INIT(q,num,slop)    ((q).q_size = (num), (q).q_slop = (slop), (q).q_front = (q).q_back = 0, (q).q_count = 0)
89426 +#define RING_QUEUE_FULL(q)             ((q).q_count >= ((q).q_size - (q).q_slop))
89427 +#define RING_QUEUE_REALLY_FULL(q)      ((q).q_count == (q).q_size)
89428 +#define RING_QUEUE_EMPTY(q)            ((q).q_count == 0)
89429 +#define RING_QUEUE_NEXT(q,indx)                ((indx) = (((indx)+1) % (q).q_size))
89430 +#define RING_QUEUE_PREV(q,indx)                ((indx) = (((indx)+(q).q_size-1) % (q).q_size))
89431 +#define RING_QUEUE_ADD(q)              (RING_QUEUE_NEXT(q ,(q).q_back),  (++(q).q_count) >= ((q).q_size - (q).q_slop))
89432 +#define RING_QUEUE_REMOVE(q)           (RING_QUEUE_NEXT(q, (q).q_front), (--(q).q_count) == 0)
89433 +#define RING_QUEUE_ADD_FRONT(q)                (RING_QUEUE_PREV(q, (q).q_front), (++(q).q_count) >= ((q).q_size - (q).q_slop))
89434 +#define RING_QUEUE_ENTRY(qArea,indx)   (&(qArea)[(indx)])
89435 +#define RING_QUEUE_FRONT(q,qArea)      RING_QUEUE_ENTRY(qArea, (q).q_front)
89436 +#define RING_QUEUE_BACK(q,qArea)       RING_QUEUE_ENTRY(qArea, (q).q_back)
89437 +#define RING_QUEUE_ITERATE(q,idx)      for (idx = (q).q_front; idx != (q).q_back; idx = (((idx) + 1) % (q).q_size))
89438 +
89439 +typedef struct user_rgn
89440 +{
89441 +    struct user_rgn *rgn_mnext;                                        /* Doubly linked list of regions */
89442 +    struct user_rgn *rgn_mprev;                                        /*   sorted on main address */ 
89443 +    virtaddr_t       rgn_mbase;                                        /* main address of base of region */
89444 +
89445 +    struct user_rgn *rgn_enext;                                        /* Doubly linked list of regions */
89446 +    struct user_rgn *rgn_eprev;                                        /*   sorted on elan address */
89447 +    E4_Addr         rgn_ebase;                                 /* elan address of base of region */
89448 +
89449 +    unsigned long    rgn_len;                                  /* length of region */
89450 +    unsigned        rgn_perm;                                  /* elan access permission */
89451 +} USER_RGN;
89452 +
89453 +typedef struct user_vpseg
89454 +{ 
89455 +    struct list_head   vps_link;
89456 +
89457 +    unsigned short     vps_process;                            /* virtual process number */
89458 +    unsigned short     vps_entries;                            /*   and # virtual processes */
89459 +
89460 +    unsigned           vps_type;
89461 +    union
89462 +    {
89463 +       struct {
89464 +           ELAN_CAPABILITY        *cap;
89465 +           E4_VirtualProcessEntry *routes;
89466 +       } p2p;
89467 +#define vps_p2p_cap    vps_u.p2p.cap
89468 +#define vps_p2p_routes  vps_u.p2p.routes
89469 +
89470 +       struct {
89471 +           unsigned short lowvp;
89472 +           unsigned short highvp;
89473 +       } bcast;
89474 +#define vps_bcast_lowvp                vps_u.bcast.lowvp
89475 +#define vps_bcast_highvp       vps_u.bcast.highvp
89476 +    } vps_u;
89477 +} USER_VPSEG;
89478 +
89479 +/* values for vps_type */
89480 +#define USER_VPSEG_P2P         0
89481 +#define USER_VPSEG_BCAST       1
89482 +
89483 +typedef struct user_cq
89484 +{
89485 +    struct list_head ucq_link;
89486 +
89487 +    ELAN4_CQ       *ucq_cq;                                    /* the real command queue */
89488 +
89489 +    unsigned char    ucq_state;                                        /* command queue state */
89490 +    unsigned char    ucq_errored;                              /* command queue has errored */
89491 +    unsigned char    ucq_flags;                                        /* flags */
89492 +    ELAN4_CPROC_TRAP ucq_trap;                                 /* trap state */
89493 +
89494 +    atomic_t        ucq_ref;                                   /* # references to this cq (mmaps) */
89495 +} USER_CQ;
89496 +
89497 +/* values for ucq_state */
89498 +#define UCQ_RUNNING                     0                      /* command queue is running */
89499 +#define UCQ_TRAPPED                     1                      /* command queue has trapped */
89500 +#define UCQ_NEEDS_RESTART                2                     /* command queue has trapped, and needs restarting */
89501 +#define UCQ_STOPPED                     3                      /* command queue has trapped, and delivered to user */
89502 +
89503 +/* values for ucq_flags */
89504 +#define UCQ_SYSTEM             (1 << 0)
89505 +#define UCQ_REORDER            (1 << 1)
89506 +
89507 +extern int num_fault_save;
89508 +extern int min_fault_pages;
89509 +extern int max_fault_pages;
89510 +
89511 +typedef struct fault_save
89512 +{
89513 +    struct fault_save           *next;
89514 +    E4_Addr                      addr;
89515 +    E4_uint32                    count;
89516 +} FAULT_SAVE;
89517 +
89518 +typedef struct user_iproc_trap
89519 +{
89520 +    unsigned char     ut_state;
89521 +    ELAN4_IPROC_TRAP  ut_trap;
89522 +} USER_IPROC_TRAP;
89523 +
89524 +/* values for ut_state */
89525 +#define UTS_IPROC_RUNNING                      0
89526 +#define UTS_IPROC_TRAPPED                      1
89527 +#define UTS_IPROC_RESOLVING                    2
89528 +#define UTS_IPROC_EXECUTE_PACKET               3
89529 +#define UTS_IPROC_EXECUTING                    4
89530 +#define UTS_IPROC_NETWORK_ERROR                        5
89531 +#define UTS_IPROC_STOPPED                      6
89532 +
89533 +typedef struct user_ctxt_entry
89534 +{
89535 +    struct list_head    cent_link;                                     /* entry chained on context */
89536 +    ELAN_CAPABILITY    *cent_cap;                                      /* capability we attached with */
89537 +} USER_CTXT_ENTRY;
89538 +
89539 +typedef struct user_ctxt
89540 +{
89541 +    ELAN4_CTXT         uctx_ctxt;                              /* is also an elan context */
89542 +
89543 +    spinlock_t        uctx_spinlock;                           /* spinlock for items used with interrupt handler */
89544 +    kcondvar_t        uctx_wait;                               /* place to sleep (traphandler/swapout/swapin/neterr fixup) */
89545 +
89546 +    unsigned          uctx_status;                             /* status                               (uctx_spinlock) */
89547 +
89548 +    pid_t             uctx_trap_pid;                           /* pid to deliver signals to on trap */
89549 +    int                       uctx_trap_signo;                         /* signal number to deliver */
89550 +    unsigned          uctx_trap_state;                         /* state of trap handling code */
89551 +    unsigned          uctx_trap_count;                         /* count of "thread" in user_trap_handler() */
89552 +
89553 +    unsigned          uctx_int_count;                          /* # interrupts since last zeroed */
89554 +    unsigned long      uctx_int_start;                         /* tick when int_count last zeroed */
89555 +    unsigned long      uctx_int_delay;                         /* # ticks to delay next wakeup */
89556 +    struct timer_list  uctx_int_timer;                         /* and timer to use to delay signal */
89557 +
89558 +    struct timer_list  uctx_neterr_timer;                      /* network error timer */
89559 +
89560 +    struct list_head   uctx_vpseg_list;                                /* list of vp segments we've got */
89561 +    kmutex_t           uctx_vpseg_lock;                                /*   and lock to protect it. */
89562 +    ELAN4_ROUTE_TABLE *uctx_routetable;                                /* our virtual process table */
89563 +    ELAN_POSITION      uctx_position;                          /* position in network */
89564 +
89565 +    struct list_head   uctx_cent_list;                                 /* list of attached network contexts */
89566 +
89567 +    USER_CQ          *uctx_ddcq;                               /* command queue for re-issueing traps */
89568 +    E4_uint64         uctx_ddcq_insertcnt;                     /* # dwords inserted into command queue */
89569 +    E4_uint64          uctx_ddcq_completed;                    /* last "completed" write was here */
89570 +    int                       uctx_ddcq_intr;                          /* count of outstanding ddcq interrupts */
89571 +
89572 +    ELAN4_HALTOP       uctx_haltop;                            /* halt operation for flushing */
89573 +    ELAN4_DMA_FLUSHOP  uctx_dma_flushop;                       /* flush operation for flushing dma runqueue */
89574 +
89575 +    INTCOOKIE_TABLE   *uctx_intcookie_table;                   /* table of interrupt cookies (shared with other uctxs for this task) */
89576 +
89577 +    kmutex_t          uctx_cqlock;                             /* lock for create/destory cqs */
89578 +    struct list_head   uctx_cqlist;                            /* list of command queues               (uctx_cqlock,uctx_spinlock) */
89579 +
89580 +    ELAN4_DPROC_TRAP  *uctx_dprocTraps;                                /* queue of dproc traps to resolve/reissue */
89581 +    RING_QUEUE        uctx_dprocTrapQ;
89582 +
89583 +    ELAN4_TPROC_TRAP  *uctx_tprocTraps;                                /* queue of tproc traps to resolve/reissue */
89584 +    RING_QUEUE         uctx_tprocTrapQ;
89585 +
89586 +    ELAN4_EPROC_TRAP  *uctx_eprocTraps;                                /* queue of eproc traps to resolve */
89587 +    RING_QUEUE        uctx_eprocTrapQ;
89588 +
89589 +    USER_IPROC_TRAP    uctx_iprocTrap[2];                      /* input trap state, 1 per virtual channel */
89590 +
89591 +    E4_DMA           *uctx_dmas;                               /* queue of dmas to restart */
89592 +    RING_QUEUE         uctx_dmaQ;
89593 +    
89594 +    E4_ThreadRegs     *uctx_threads;                           /* queue of threads to restart */
89595 +    RING_QUEUE         uctx_threadQ;
89596 +
89597 +    ELAN4_NETERR_MSG  *uctx_msgs;                              /* queue of neterr messages */
89598 +    RING_QUEUE        uctx_msgQ;
89599 +    kmutex_t          uctx_rgnmutex;                           /* lock for create/destroy regions */
89600 +    spinlock_t        uctx_rgnlock;                            /* spinlock to protect linked lists */
89601 +    USER_RGN         *uctx_mrgns;                              /* Doubly linked list of memory regions (uctx_rgnlock) */
89602 +    USER_RGN         *uctx_mtail;                              /* Last memory region on list           (uctx_rgnlock) */
89603 +    USER_RGN         *uctx_mrgnlast;                           /* Last region 'hit'                    (uctx_rgnlock) */
89604 +
89605 +    USER_RGN         *uctx_ergns;                              /* Doubly linked list of memory regions (uctx_rgnlock) */
89606 +    USER_RGN         *uctx_etail;                              /* Last memory region on list           (uctx_rgnlock) */
89607 +    USER_RGN         *uctx_ergnlast;                           /* Last region 'hit'                    (uctx_rgnlock) */
89608 +
89609 +    ELAN4_USER_PAGE   *uctx_upage;                             /* kernel page shared with user */
89610 +    sdramaddr_t               uctx_trampoline;                         /* sdram page for tproc trampoline */
89611 +
89612 +    E4_Addr           uctx_upage_addr;                         /*   elan addr page mapped into */
89613 +    E4_Addr           uctx_trestart_addr;                      /* address of thread restart code */
89614 +    FAULT_SAVE         *uctx_faults;
89615 +    FAULT_SAVE         *uctx_fault_list;
89616 +    int                 uctx_num_fault_save;
89617 +    spinlock_t          uctx_fault_lock;
89618 +} USER_CTXT;
89619 +
89620 +/* bit values for uctx_status */
89621 +#define UCTX_EXITING                           (1 << 0)                /* context is exiting. */
89622 +#define UCTX_USER_FILTERING                    (1 << 1)                /* user requested context filter */
89623 +#define UCTX_USER_STOPPED                      (1 << 2)                /* user requested stop */
89624 +
89625 +#define UCTX_SWAPPING                          (1 << 3)                /* context is swapping out */
89626 +#define UCTX_SWAPPED                           (1 << 4)                /* context is swapped out */
89627 +
89628 +#define UCTX_STOPPING                          (1 << 5)                /* stopping elan from running this context */
89629 +#define UCTX_STOPPED                           (1 << 6)                /* elan no longer running this context */
89630 +
89631 +#define UCTX_EPROC_QUEUE_FULL                  (1 << 7)                /* reasons for stopping running */
89632 +#define UCTX_DPROC_QUEUE_FULL                  (1 << 8)
89633 +#define UCTX_TPROC_QUEUE_FULL                  (1 << 9)
89634 +#define UCTX_IPROC_CH0_TRAPPED                 (1 << 10)
89635 +#define UCTX_IPROC_CH1_TRAPPED                 (1 << 11)
89636 +
89637 +#define UCTX_NETERR_TIMER                      (1 << 12)
89638 +#define UCTX_NETERR_FIXUP                      (1 << 13)
89639 +
89640 +#define UCTX_EPROC_QUEUE_OVERFLOW              (1 << 14)
89641 +#define UCTX_DPROC_QUEUE_OVERFLOW              (1 << 15)
89642 +#define UCTX_TPROC_QUEUE_OVERFLOW              (1 << 16)
89643 +
89644 +#define UCTX_EPROC_QUEUE_ERROR                 (1 << 17)
89645 +#define UCTX_DPROC_QUEUE_ERROR                 (1 << 18)
89646 +#define UCTX_TPROC_QUEUE_ERROR                 (1 << 19)
89647 +
89648 +#define UCTX_STOPPED_REASONS                   (UCTX_EPROC_QUEUE_FULL | UCTX_DPROC_QUEUE_FULL | UCTX_TPROC_QUEUE_FULL)
89649 +#define UCTX_SWAPPED_REASONS                   (UCTX_EXITING | UCTX_USER_STOPPED | UCTX_NETERR_FIXUP)
89650 +#define UCTX_NACKING_REASONS                   (UCTX_USER_FILTERING | UCTX_IPROC_CH0_TRAPPED | UCTX_IPROC_CH1_TRAPPED)
89651 +
89652 +#define UCTX_OVERFLOW_REASONS                  (UCTX_EPROC_QUEUE_OVERFLOW | UCTX_DPROC_QUEUE_OVERFLOW | UCTX_TPROC_QUEUE_OVERFLOW)
89653 +#define UCTX_ERROR_REASONS                     (UCTX_EPROC_QUEUE_ERROR | UCTX_DPROC_QUEUE_ERROR | UCTX_TPROC_QUEUE_ERROR)
89654 +
89655 +#define UCTX_RUNNABLE(uctx)                    (((uctx)->uctx_status & (UCTX_SWAPPED_REASONS | UCTX_STOPPED_REASONS)) == 0)
89656 +#define UCTX_NACKING(uctx)                     (((uctx)->uctx_status & (UCTX_SWAPPED_REASONS | UCTX_STOPPED_REASONS | UCTX_NACKING_REASONS)) != 0)
89657 +
89658 +/* values for uctx_trap_signalled */
89659 +#define UCTX_TRAP_IDLE                         0
89660 +#define UCTX_TRAP_SLEEPING                     1
89661 +#define UCTX_TRAP_SIGNALLED                    2
89662 +#define UCTX_TRAP_ACTIVE                       3
89663 +
89664 +extern int        user_p2p_route_options;
89665 +extern int        user_bcast_route_options;
89666 +extern int       user_dproc_retry_count;
89667 +extern int       user_cproc_retry_count;
89668 +
89669 +extern USER_CTXT *user_alloc (ELAN4_DEV *dev);
89670 +extern void       user_free (USER_CTXT *uctx);
89671 +extern void       user_swapout (USER_CTXT *uctx, unsigned reason);
89672 +extern void       user_swapin (USER_CTXT *uctx, unsigned reason);
89673 +extern int        user_attach (USER_CTXT *uctx, ELAN_CAPABILITY *cap);
89674 +extern void       user_detach (USER_CTXT *uctx, ELAN_CAPABILITY *cap);
89675 +extern void       user_block_inputter (USER_CTXT *uctx, unsigned blocked);
89676 +extern int        user_alloc_trap_queues (USER_CTXT *uctx, unsigned ndproc_traps, unsigned neproc_traps, 
89677 +                                         unsigned ntproc_traps, unsigned nthreads, unsigned ndmas);
89678 +
89679 +extern int        user_add_p2pvp (USER_CTXT *uctx, unsigned process, ELAN_CAPABILITY *cap);
89680 +extern int        user_add_bcastvp (USER_CTXT *uctx, unsigned process, unsigned lowvp, unsigned highvp);
89681 +extern int        user_removevp (USER_CTXT *uctx, unsigned process);
89682 +
89683 +extern int        user_set_route (USER_CTXT *uctx, unsigned process, E4_VirtualProcessEntry *route);
89684 +extern int        user_reset_route (USER_CTXT *uctx, unsigned process);
89685 +extern int        user_get_route (USER_CTXT *uctx, unsigned process, E4_VirtualProcessEntry *route);
89686 +extern int        user_check_route (USER_CTXT *uctx, unsigned process, E4_VirtualProcessEntry *route, unsigned *error);
89687 +extern int       user_send_neterr_msg (USER_CTXT *uctx, unsigned int vp, unsigned int nctx, unsigned int retries, ELAN4_NETERR_MSG *msg);
89688 +extern int        user_neterr_sten (USER_CTXT *uctx, unsigned int vp, E4_uint64 cookie, int waitforeop);
89689 +extern int        user_neterr_dma (USER_CTXT *uctx, unsigned int vp, E4_uint64 cookie, int waitforeop);
89690 +
89691 +extern int        user_resume_eproc_trap (USER_CTXT *uctx, E4_Addr addr);
89692 +extern int        user_resume_cproc_trap (USER_CTXT *uctx, unsigned indx);
89693 +extern int        user_resume_dproc_trap (USER_CTXT *uctx, E4_DMA *dma);
89694 +extern int        user_resume_tproc_trap (USER_CTXT *uctx, E4_ThreadRegs *regs);
89695 +extern int        user_resume_iproc_trap (USER_CTXT *uctx, unsigned channel, unsigned trans,
89696 +                                         E4_IprocTrapHeader *hdrp, E4_IprocTrapData *datap);
89697 +
89698 +extern int        user_trap_handler (USER_CTXT *uctx, ELAN4_USER_TRAP *utrapp, int nticks);
89699 +extern USER_CQ   *user_findcq (USER_CTXT *uctx, unsigned num);
89700 +extern USER_CQ   *user_alloccq (USER_CTXT *uctx, unsigned size, unsigned perm, unsigned flags);
89701 +extern void       user_freecq (USER_CTXT *uctx, USER_CQ *cq);
89702 +extern void       user_dropcq (USER_CTXT *uctx, USER_CQ *cq);
89703 +
89704 +/* user_osdep.c */
89705 +extern int        user_load_range (USER_CTXT *uctx, E4_Addr addr, unsigned long nbytes, E4_uint32 fsr);
89706 +extern void       user_update_main (USER_CTXT *uctx, struct mm_struct *mm, unsigned long start, unsigned long len);
89707 +extern void       user_unload_main (USER_CTXT *uctx, unsigned long start, unsigned long len);
89708 +
89709 +
89710 +/* regions.c */
89711 +extern USER_RGN  *user_findrgn_elan (USER_CTXT *uctx, E4_Addr addr, int tail);
89712 +extern USER_RGN  *user_findrgn_main (USER_CTXT *uctx, virtaddr_t addr, int tail);
89713 +extern USER_RGN  *user_rgnat_elan (USER_CTXT *uctx, E4_Addr addr);
89714 +extern USER_RGN  *user_rgnat_main (USER_CTXT *uctx, virtaddr_t addr);
89715 +extern int        user_setperm (USER_CTXT *uctx, virtaddr_t maddr, E4_Addr eaddr, unsigned long len, unsigned perm);
89716 +extern void       user_clrperm (USER_CTXT *uctx, E4_Addr addr, unsigned long len);
89717 +extern int        user_checkperm (USER_CTXT *uctx, E4_Addr raddr, unsigned long rsize, unsigned access);
89718 +extern virtaddr_t user_elan2main (USER_CTXT *uctx, E4_Addr addr);
89719 +extern E4_Addr    user_main2elan (USER_CTXT *uctx, virtaddr_t addr);
89720 +extern void       user_preload_main (USER_CTXT *uctx, virtaddr_t addr, unsigned long len);
89721 +extern void       user_freergns (USER_CTXT *uctx);
89722 +
89723 +/* user_ddcq.c */
89724 +extern int        user_ddcq_check (USER_CTXT *uctx, unsigned num);
89725 +extern int        user_ddcq_flush (USER_CTXT *uctx);
89726 +extern void       user_ddcq_intr (USER_CTXT *uctx);
89727 +extern void       user_ddcq_write_dword (USER_CTXT *uctx, E4_Addr addr, E4_uint64 value);
89728 +extern void       user_ddcq_interrupt (USER_CTXT *uctx, E4_uint64 cookie);
89729 +extern void       user_ddcq_run_dma (USER_CTXT *uctx, E4_DMA *dma);
89730 +extern void       user_ddcq_run_thread (USER_CTXT *uctx, E4_ThreadRegs *regs);
89731 +extern void       user_ddcq_setevent (USER_CTXT *uctx, E4_Addr addr);
89732 +extern void       user_ddcq_seteventn (USER_CTXT *uctx, E4_Addr addr, E4_uint32 count);
89733 +extern void       user_ddcq_waitevent (USER_CTXT *uctx, E4_Addr addr, E4_uint64 CountAndType, E4_uint64 Param0, E4_uint64 Param1);
89734 +
89735 +
89736 +/*
89737 + * Local variables:
89738 + * c-file-style: "stroustrup"
89739 + * End:
89740 + */
89741 +#endif /* __ELAN4_USER_H */
89742 Index: linux-2.4.21/include/elan4/userregs.h
89743 ===================================================================
89744 --- linux-2.4.21.orig/include/elan4/userregs.h  2004-02-23 16:02:56.000000000 -0500
89745 +++ linux-2.4.21/include/elan4/userregs.h       2005-06-01 23:12:54.746416760 -0400
89746 @@ -0,0 +1,383 @@
89747 +/*
89748 + *    Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd.
89749 + *    Copyright (c) 2002-2003 by Quadrics Ltd.
89750 + *
89751 + *    For licensing information please see the supplied COPYING file
89752 + *
89753 + */
89754 +
89755 +#ifndef __ELAN4_USERREGS_H
89756 +#define __ELAN4_USERREGS_H
89757 +
89758 +#ident "$Id: userregs.h,v 1.14.2.1 2004/10/07 10:57:40 addy Exp $"
89759 +/*      $Source: /cvs/master/quadrics/elan4hdr/userregs.h,v $*/
89760 +
89761 +#ifdef __cplusplus
89762 +extern "C" {
89763 +#endif
89764 +
89765 +/*
89766 + * Statistic control reg values
89767 + * Each 4-bit nibble of the control word specifies what statistic
89768 + * is to be recorded in each of the 8 statistic counters
89769 + */
89770 +#define COUNT_REG0_SHIFT   32ull
89771 +#define COUNT_REG1_SHIFT   36ull
89772 +#define COUNT_REG2_SHIFT   40ull
89773 +#define COUNT_REG3_SHIFT   44ull
89774 +#define COUNT_REG4_SHIFT   48ull
89775 +#define COUNT_REG5_SHIFT   52ull
89776 +#define COUNT_REG6_SHIFT   56ull
89777 +#define COUNT_REG7_SHIFT   60ull
89778 +
89779 +
89780 +/* Count reg 0 */
89781 +#define STC_INPUT_NON_WRITE_BLOCKS     (0x0ull << COUNT_REG0_SHIFT)
89782 +#define STP_DMA_EOP_WAIT_ACK           (0x1ull << COUNT_REG0_SHIFT)
89783 +#define STP_TPROC_RUNNING              (0x2ull << COUNT_REG0_SHIFT)
89784 +#define STC_STEN_PKTS_OPEN              (0x3ull << COUNT_REG0_SHIFT)
89785 +#define STP_CPROC_HOLDS_FFU_DP         (0x4ull << COUNT_REG0_SHIFT)
89786 +#define STC_TLB_TABLE_WALKS             (0x5ull << COUNT_REG0_SHIFT)
89787 +#define STC_CACHE_HITS                  (0x6ull << COUNT_REG0_SHIFT)
89788 +#define STC_PCI_SLAVE_READS             (0x7ull << COUNT_REG0_SHIFT)
89789 +#define STP_PCI_WAITING_FOR_GNT         (0x8ull << COUNT_REG0_SHIFT)
89790 +#define STP_SYS_CLOCK_RATE0            (0xfull << COUNT_REG0_SHIFT)
89791 +
89792 +#define STATS_REG0_NAMES {                     \
89793 +          "STC_INPUT_NON_WRITE_BLOCKS",        \
89794 +          "STP_DMA_EOP_WAIT_ACK",              \
89795 +          "STP_TPROC_RUNNING",                 \
89796 +          "STC_STEN_PKTS_OPEN",                \
89797 +          "STP_CPROC_HOLDS_FFU_DP",            \
89798 +          "STC_TLB_TABLE_WALKS",               \
89799 +          "STC_CACHE_HITS",                    \
89800 +          "STC_PCI_SLAVE_READS",               \
89801 +          "STP_PCI_WAITING_FOR_GNT",           \
89802 +          "STP_SYS_CLOCK_RATE0"                \
89803 +}
89804 +
89805 +/* Count reg 1 */
89806 +#define STC_INPUT_WRITE_BLOCKS         (0x0ull << COUNT_REG1_SHIFT)
89807 +#define STP_DMA_DATA_TRANSMITTING      (0x1ull << COUNT_REG1_SHIFT)
89808 +#define STC_CPROC_VALUES_EXE           (0x2ull << COUNT_REG1_SHIFT)
89809 +#define STC_STEN_TRANS_SENT            (0x3ull << COUNT_REG1_SHIFT)
89810 +#define STP_TPROC_DQ_HOLDS_FFU_DP      (0x4ull << COUNT_REG1_SHIFT)
89811 +#define STC_TPROC_TLB_HITS             (0x5ull << COUNT_REG1_SHIFT)
89812 +#define STC_CACHE_ALLOC_MISSES         (0x6ull << COUNT_REG1_SHIFT)
89813 +#define STP_PCI_MASTER_READ_WAITING    (0x7ull << COUNT_REG1_SHIFT)
89814 +#define STP_PCI_WAITING_FOR_DEVSEL      (0x8ull << COUNT_REG1_SHIFT)
89815 +#define STP_SYS_CLOCK_RATE1            (0xfull << COUNT_REG1_SHIFT)
89816 +
89817 +#define STATS_REG1_NAMES {                    \
89818 +          "STC_INPUT_WRITE_BLOCKS",            \
89819 +          "STP_DMA_DATA_TRANSMITTING",         \
89820 +          "STC_CPROC_VALUES_EXE",              \
89821 +          "STC_STEN_TRANS_SENT",               \
89822 +          "STP_TPROC_DQ_HOLDS_FFU_DP",         \
89823 +          "STC_TPROC_TLB_HITS",                \
89824 +          "STC_CACHE_ALLOC_MISSES",            \
89825 +          "STP_PCI_MASTER_READ_WAITING",       \
89826 +          "STP_PCI_WAITING_FOR_DEVSEL",        \
89827 +          "STP_SYS_CLOCK_RATE1"                \
89828 +}
89829 +
89830 +/* Count reg 2 */
89831 +#define STC_INPUT_PKTS                 (0x0ull << COUNT_REG2_SHIFT)
89832 +#define STP_DMA_WAITING_MEM            (0x1ull << COUNT_REG2_SHIFT)
89833 +#define STC_CPROC_TRANSFERS             (0x2ull << COUNT_REG2_SHIFT)
89834 +#define STP_STEN_WAIT_NETWORK_BUSY     (0x3ull << COUNT_REG2_SHIFT)
89835 +#define STP_IPROC_HOLDS_FFU_DP         (0x4ull << COUNT_REG2_SHIFT)
89836 +#define STC_UNITS_TLB_HITS             (0x5ull << COUNT_REG2_SHIFT)
89837 +#define STC_CACHE_NON_ALLOC_MISSES      (0x6ull << COUNT_REG2_SHIFT)
89838 +#define STP_PCI_MASTER_WRITE_WAITING   (0x7ull << COUNT_REG2_SHIFT)
89839 +#define STC_PCI_OUT_OF_ORDER_SPLIT_COMP (0x8ull << COUNT_REG2_SHIFT)
89840 +#define STP_SYS_CLOCK_RATE2            (0xfull << COUNT_REG2_SHIFT)
89841 +
89842 +#define STATS_REG2_NAMES {                    \
89843 +          "STC_INPUT_PKTS",                    \
89844 +          "STP_DMA_WAITING_MEM",               \
89845 +          "STC_CPROC_TRANSFERS",               \
89846 +          "STP_STEN_WAIT_NETWORK_BUSY",        \
89847 +          "STP_IPROC_HOLDS_FFU_DP",            \
89848 +          "STC_UNITS_TLB_HITS",                \
89849 +          "STC_CACHE_NON_ALLOC_MISSES",        \
89850 +          "STP_PCI_MASTER_WRITE_WAITING",      \
89851 +          "STC_PCI_OUT_OF_ORDER_SPLIT_COMP",   \
89852 +          "STP_SYS_CLOCK_RATE2"                \
89853 +}
89854 +
89855 +/* Count reg 3 */
89856 +#define STC_INPUT_PKTS_REJECTED         (0x0ull << COUNT_REG3_SHIFT)
89857 +#define STP_DMA_WAIT_NETWORK_BUSY       (0x1ull << COUNT_REG3_SHIFT)
89858 +#define STC_CPROC_PREFETCH_SDRAM        (0x2ull << COUNT_REG3_SHIFT)
89859 +#define STP_STEN_BLOCKED_ACKS_OR_VC     (0x3ull << COUNT_REG3_SHIFT)
89860 +#define STP_EPROC_HOLDS_FFU_DP          (0x4ull << COUNT_REG3_SHIFT)
89861 +#define STP_TPROC_BLOCKED_MEMSYS        (0x5ull << COUNT_REG3_SHIFT)
89862 +#define STC_CACHE_WRITE_BACKS           (0x6ull << COUNT_REG3_SHIFT)
89863 +#define STP_PCI_SLAVE_READ_WAITING      (0x7ull << COUNT_REG3_SHIFT)
89864 +#define STP_PCI_IDLE_CYCLES            (0x8ull << COUNT_REG3_SHIFT)
89865 +#define STP_SYS_CLOCK_RATE3            (0xfull << COUNT_REG3_SHIFT)
89866 +
89867 +#define STATS_REG3_NAMES {                    \
89868 +          "STC_INPUT_PKTS_REJECTED",           \
89869 +          "STP_DMA_WAIT_NETWORK_BUSY",         \
89870 +          "STC_CPROC_PREFETCH_SDRAM",          \
89871 +          "STP_STEN_BLOCKED_ACKS_OR_VC",       \
89872 +          "STP_EPROC_HOLDS_FFU_DP",            \
89873 +          "STP_TPROC_BLOCKED_MEMSYS",          \
89874 +          "STC_CACHE_WRITE_BACKS",             \
89875 +          "STP_PCI_SLAVE_READ_WAITING",        \
89876 +          "STP_PCI_IDLE_CYCLES",               \
89877 +          "STP_SYS_CLOCK_RATE3"                \
89878 +}
89879 +
89880 +/* Count reg 4 */
89881 +#define STP_INPUT_DATA_TRANSMITTING    (0x0ull << COUNT_REG4_SHIFT)
89882 +#define STC_DMA_PKTS_ACCEPTED          (0x1ull << COUNT_REG4_SHIFT)
89883 +#define STC_CPROC_FLUSH_REQ_SDRAM      (0x2ull << COUNT_REG4_SHIFT)
89884 +#define STP_STEN_EOP_WAIT_ACK          (0x3ull << COUNT_REG4_SHIFT)
89885 +#define STP_DMA_HOLDS_FFU_DP           (0x4ull << COUNT_REG4_SHIFT)
89886 +#define STP_UNIT_BLOCKED_MEMSYS        (0x5ull << COUNT_REG4_SHIFT)
89887 +#define STC_PCI_MASTER_READS           (0x6ull << COUNT_REG4_SHIFT)
89888 +#define STP_PCI_SLAVE_WRITE_WAITING    (0x7ull << COUNT_REG4_SHIFT)
89889 +#define STC_INPUT_PACKETS_DISCARDED    (0x8ull << COUNT_REG4_SHIFT)
89890 +#define STP_SYS_CLOCK_RATE4            (0xfull << COUNT_REG4_SHIFT)
89891 +
89892 +#define STATS_REG4_NAMES {                    \
89893 +          "STP_INPUT_DATA_TRANSMITTING",       \
89894 +          "STC_DMA_PKTS_ACCEPTED",             \
89895 +          "STC_CPROC_FLUSH_REQ_SDRAM",         \
89896 +          "STP_STEN_EOP_WAIT_ACK",             \
89897 +          "STP_DMA_HOLDS_FFU_DP",              \
89898 +          "STP_UNIT_BLOCKED_MEMSYS",           \
89899 +          "STC_PCI_MASTER_READS",              \
89900 +          "STP_PCI_SLAVE_WRITE_WAITING",       \
89901 +          "STC_INPUT_PACKETS_DISCARDED",       \
89902 +          "STP_SYS_CLOCK_RATE4"                \
89903 +}
89904 +
89905 +/* Count reg 5 */
89906 +#define STP_INPUT_WAITING_NETWORK_DATA  (0x0ull << COUNT_REG5_SHIFT)
89907 +#define STC_DMA_PKTS_REJECTED           (0x1ull << COUNT_REG5_SHIFT)
89908 +#define STC_CPROC_INSERT_CACHE_MISSES   (0x2ull << COUNT_REG5_SHIFT)
89909 +#define STP_STEN_TRANSMITTING_DATA      (0x3ull << COUNT_REG5_SHIFT)
89910 +#define FFU_BLOCKED_DIFF_FFU_PROC       (0x4ull << COUNT_REG5_SHIFT)
89911 +#define STP_TABLE_WALKS_BLOCKED_MEMSYS  (0x5ull << COUNT_REG5_SHIFT)
89912 +#define STC_PCI_MASTER_WRITES           (0x6ull << COUNT_REG5_SHIFT)
89913 +#define STP_PCI_MASTER_HOLDS_BUS        (0x7ull << COUNT_REG5_SHIFT)
89914 +#define STC_PCI_NO_SPLIT_COMPS         (0x8ull << COUNT_REG5_SHIFT)
89915 +#define STP_SYS_CLOCK_RATE5            (0xfull << COUNT_REG5_SHIFT)
89916 +
89917 +#define STATS_REG5_NAMES {                    \
89918 +          "STP_INPUT_WAITING_NETWORK_DATA",    \
89919 +          "STC_DMA_PKTS_REJECTED",             \
89920 +          "STC_CPROC_INSERT_CACHE_MISSES",     \
89921 +          "STP_STEN_TRANSMITTING_DATA",        \
89922 +          "FFU_BLOCKED_DIFF_FFU_PROC",         \
89923 +          "STP_TABLE_WALKS_BLOCKED_MEMSYS",    \
89924 +          "STC_PCI_MASTER_WRITES",             \
89925 +          "STP_PCI_MASTER_HOLDS_BUS",          \
89926 +          "STC_PCI_NO_SPLIT_COMPS",            \
89927 +          "STP_SYS_CLOCK_RATE5"                \
89928 +}
89929 +
89930 +/* Count reg 6 */
89931 +#define STP_INPUT_BLOCKED_WAITING_TRANS (0x0ull << COUNT_REG6_SHIFT)
89932 +#define STP_TPROC_INST_STALL           (0x1ull << COUNT_REG6_SHIFT)
89933 +#define STP_CPROC_WAITING_DESCHED      (0x2ull << COUNT_REG6_SHIFT)
89934 +#define STP_STEN_PKT_OPEN_WAITING_DATA (0x3ull << COUNT_REG6_SHIFT)
89935 +#define STP_TLB_HASH_TABLE_ACCESSES    (0x4ull << COUNT_REG6_SHIFT)
89936 +#define STP_PCI_SLAVE_BLOCKED_MEMSYS   (0x5ull << COUNT_REG6_SHIFT)
89937 +#define STP_PCI_TRANSFERRING_DATA       (0x6ull << COUNT_REG6_SHIFT)
89938 +#define STP_PCI_MASTER_WAITING_BUS      (0x7ull << COUNT_REG6_SHIFT)
89939 +#define STP_PCI_READ_LATENCY           (0x8ull << COUNT_REG6_SHIFT)
89940 +#define STP_SYS_CLOCK_RATE6            (0xfull << COUNT_REG6_SHIFT)
89941 +
89942 +#define STATS_REG6_NAMES {                    \
89943 +          "STP_INPUT_BLOCKED_WAITING_TRANS",   \
89944 +          "STP_TPROC_INST_STALL",              \
89945 +          "STP_CPROC_WAITING_DESCHED",         \
89946 +          "STP_STEN_PKT_OPEN_WAITING_DATA",    \
89947 +          "STP_TLB_HASH_TABLE_ACCESSES",       \
89948 +          "STP_PCI_SLAVE_BLOCKED_MEMSYS",      \
89949 +          "STP_PCI_TRANSFERRING_DATA",         \
89950 +          "STP_PCI_MASTER_WAITING_BUS",        \
89951 +          "STP_PCI_READ_LATENCY",              \
89952 +          "STP_SYS_CLOCK_RATE6"                \
89953 +}
89954 +
89955 +/* Count reg 7 */
89956 +#define STC_INPUT_CTX_FILTER_FILL       (0x0ull << COUNT_REG7_SHIFT)   
89957 +#define STP_TPROC_LOAD_STORE_STALL      (0x1ull << COUNT_REG7_SHIFT)
89958 +#define STC_CPROC_TIMEOUTS              (0x2ull << COUNT_REG7_SHIFT)
89959 +#define STP_STEN_BLOCKED_NETWORK        (0x3ull << COUNT_REG7_SHIFT)
89960 +#define STP_TLB_CHAIN_ACCESSES          (0x4ull << COUNT_REG7_SHIFT)
89961 +#define STP_CPROC_SCHED_BLOCKED_MEMSYS  (0x5ull << COUNT_REG7_SHIFT)
89962 +#define STC_PCI_SLAVE_WRITES            (0x6ull << COUNT_REG7_SHIFT)
89963 +#define STC_PCI_DISCONNECTS_RETRIES     (0x7ull << COUNT_REG7_SHIFT)
89964 +#define STC_RING_OSCILLATOR            (0x8ull << COUNT_REG7_SHIFT)
89965 +#define STP_SYS_CLOCK_RATE7            (0xfull << COUNT_REG7_SHIFT)
89966 +
89967 +#define STATS_REG7_NAMES {                    \
89968 +          "STC_INPUT_CTX_FILTER_FILL",         \
89969 +          "STP_TPROC_LOAD_STORE_STALL",        \
89970 +          "STC_CPROC_TIMEOUTS",                \
89971 +          "STP_STEN_BLOCKED_NETWORK",          \
89972 +          "STP_TLB_CHAIN_ACCESSES",            \
89973 +          "STP_CPROC_SCHED_BLOCKED_MEMSYS",    \
89974 +          "STC_PCI_SLAVE_WRITES",              \
89975 +          "STC_PCI_DISCONNECTS_RETRIES",       \
89976 +          "STC_RING_OSCILLATOR",               \
89977 +          "STP_SYS_CLOCK_RATE7"                \
89978 +}
89979 +
89980 +#define STATS_REG_NAMES { \
89981 +    STATS_REG0_NAMES, \
89982 +    STATS_REG1_NAMES, \
89983 +    STATS_REG2_NAMES, \
89984 +    STATS_REG3_NAMES, \
89985 +    STATS_REG4_NAMES, \
89986 +    STATS_REG5_NAMES, \
89987 +    STATS_REG6_NAMES, \
89988 +    STATS_REG7_NAMES, \
89989 +}
89990 +
89991 +
89992 +#define INPUT_PERF_STATS        (STC_INPUT_NON_WRITE_BLOCKS | STC_INPUT_WRITE_BLOCKS |              \
89993 +                                STC_INPUT_PKTS | STC_INPUT_PKTS_REJECTED |                         \
89994 +                                 STC_INPUT_CTX_FILTER_FILL | STP_INPUT_DATA_TRANSMITTING |           \
89995 +                                STP_INPUT_WAITING_NETWORK_DATA | STP_INPUT_BLOCKED_WAITING_TRANS | STC_INPUT_PACKETS_DISCARDED) 
89996 +
89997 +#define DMA_PERF_STATS          (STC_DMA_PKTS_ACCEPTED | STC_DMA_PKTS_REJECTED |                    \
89998 +                                 STP_DMA_EOP_WAIT_ACK | STP_DMA_DATA_TRANSMITTING |                 \
89999 +                                STP_DMA_WAITING_MEM | STP_DMA_WAIT_NETWORK_BUSY)                 
90000 +
90001 +
90002 +#define TPROC_PERF_STATS        (STP_TPROC_RUNNING | STP_TPROC_INST_STALL |                         \
90003 +                                 STP_TPROC_LOAD_STORE_STALL)
90004 +
90005 +#define CPROC_PERF_STATS        (STC_CPROC_VALUES_EXE | STC_CPROC_TRANSFERS |                       \
90006 +                                STC_CPROC_PREFETCH_SDRAM | STC_CPROC_FLUSH_REQ_SDRAM |             \
90007 +                                STC_CPROC_INSERT_CACHE_MISSES | STP_CPROC_WAITING_DESCHED |        \
90008 +                                STC_CPROC_TIMEOUTS)
90009 +
90010 +#define STEN_PERF_STATS         (STC_STEN_PKTS_OPEN | STC_STEN_TRANS_SENT |                         \
90011 +                                STP_STEN_WAIT_NETWORK_BUSY | STP_STEN_BLOCKED_ACKS_OR_VC |         \
90012 +                                STP_STEN_EOP_WAIT_ACK | STP_STEN_TRANSMITTING_DATA |               \
90013 +                                STP_STEN_PKT_OPEN_WAITING_DATA | STP_STEN_BLOCKED_NETWORK)
90014 +
90015 +#define FFU_PREF_STATS          (STP_CPROC_HOLDS_FFU_DP | STP_TPROC_DQ_HOLDS_FFU_DP |               \
90016 +                                STP_IPROC_HOLDS_FFU_DP | STP_EPROC_HOLDS_FFU_DP |                  \
90017 +                                STP_DMA_HOLDS_FFU_DP | FFU_BLOCKED_DIFF_FFU_PROC)
90018 +
90019 +#define TABLE_WALK_PERF_STATS   (STC_TPROC_TLB_HITS | STC_UNITS_TLB_HITS |                          \
90020 +                                STP_TLB_HASH_TABLE_ACCESSES | STP_TLB_CHAIN_ACCESSES |             \
90021 +                                STC_TLB_TABLE_WALKS)
90022 +
90023 +#define ADDRESS_ARB_PERF_STATS  (STP_UNIT_BLOCKED_MEMSYS | STP_TPROC_BLOCKED_MEMSYS |               \
90024 +                                STP_TABLE_WALKS_BLOCKED_MEMSYS | STP_CPROC_SCHED_BLOCKED_MEMSYS |  \
90025 +                                STP_PCI_SLAVE_BLOCKED_MEMSYS)
90026 +
90027 +#define CACHE_PERF_STATS        (STC_CACHE_HITS | STC_CACHE_ALLOC_MISSES |                          \
90028 +                                STC_CACHE_NON_ALLOC_MISSES | STC_CACHE_WRITE_BACKS)
90029 +
90030 +
90031 +#define PCI_PERF_STATS          (STC_PCI_SLAVE_READS | STP_PCI_MASTER_READ_WAITING |                \
90032 +                                 STP_PCI_MASTER_WRITE_WAITING | STP_PCI_SLAVE_READ_WAITING |        \
90033 +                                 STP_PCI_SLAVE_WRITE_WAITING | STC_PCI_MASTER_WRITES |              \
90034 +                                 STP_PCI_TRANSFERRING_DATA | STC_PCI_SLAVE_WRITES)
90035 +
90036 +#define PCIBUS_PERF_STATS       (STP_PCI_WAITING_FOR_GNT | STP_PCI_WAITING_FOR_DEVSEL |                    \
90037 +                                STC_PCI_OUT_OF_ORDER_SPLIT_COMP | STP_PCI_IDLE_CYCLES |            \
90038 +                                STC_PCI_MASTER_READS | STP_PCI_MASTER_HOLDS_BUS |                  \
90039 +                                STP_PCI_MASTER_WAITING_BUS | STC_PCI_DISCONNECTS_RETRIES)
90040 +
90041 +                                
90042 +    extern const char *elan_stats_names[8][10];
90043 +
90044 +#define ELAN_STATS_NAME(COUNT, CONTROL) (elan_stats_names[(COUNT)][(CONTROL) & 7])
90045 +
90046 +    typedef volatile union e4_StatsControl
90047 +    {
90048 +       E4_uint64 StatsControl;
90049 +       struct
90050 +       {
90051 +#if (BYTE_ORDER == LITTLE_ENDIAN) || defined(__LITTLE_ENDIAN__)
90052 +           E4_uint32 StatCont0:4;
90053 +           E4_uint32 StatCont1:4;
90054 +           E4_uint32 StatCont2:4;
90055 +           E4_uint32 StatCont3:4;
90056 +           E4_uint32 StatCont4:4;
90057 +           E4_uint32 StatCont5:4;
90058 +           E4_uint32 StatCont6:4;
90059 +           E4_uint32 StatCont7:4;
90060 +#else
90061 +           E4_uint32 StatCont7:4;
90062 +           E4_uint32 StatCont6:4;
90063 +           E4_uint32 StatCont5:4;
90064 +
90065 +           E4_uint32 StatCont4:4;
90066 +           E4_uint32 StatCont3:4;
90067 +           E4_uint32 StatCont2:4;
90068 +           E4_uint32 StatCont1:4;
90069 +           E4_uint32 StatCont0:4;
90070 +#endif
90071 +           E4_uint32 pad;
90072 +       } s;
90073 +    } E4_StatsControl;
90074 +
90075 +typedef volatile union e4_StatsCount
90076 +{
90077 +   E4_uint64    ClockStat; 
90078 +   struct
90079 +   {
90080 +       E4_uint32 ClockLSW;     /* read only */
90081 +       E4_uint32 StatsCount;
90082 +   } s;
90083 +} E4_StatsCount;
90084 +
90085 +typedef volatile union e4_clock
90086 +{
90087 +   E4_uint64 NanoSecClock;
90088 +   struct
90089 +   {
90090 +      E4_uint32 ClockLSW;
90091 +      E4_uint32 ClockMSW;
90092 +   } s;
90093 +} E4_Clock;
90094 +#define E4_TIME( X ) ((X).NanoSecClock)
90095 +
90096 +#define ELAN4_COMMS_CLOCK_FREQUENCY    660             /* In Mhz. This is half the bit rate. */
90097 +#define ELAN4_CLOCK_ADD_VALUE          200             /* For 200ns increment rate */
90098 +#define ELAN4_CLOCK_COMMS_DIV_VALUE    (((ELAN4_COMMS_CLOCK_FREQUENCY * ELAN4_CLOCK_ADD_VALUE) / (1000 * 4)) - 1)
90099 +#define ELAN4_CLOCK_TICK_RATE          ((ELAN4_CLOCK_ADD_VALUE << 8) + ELAN4_CLOCK_COMMS_DIV_VALUE)
90100 +
90101 +typedef volatile union e4_clocktickrate
90102 +{
90103 +   E4_uint64 NanoSecClock;
90104 +   struct
90105 +   {
90106 +      E4_uint32 pad1;
90107 +      E4_uint32 TickRates;
90108 +   } s;
90109 +} E4_ClockTickRate;
90110 +
90111 +/*
90112 + * This is made into an 8k byte object.
90113 + */
90114 +typedef volatile struct _E4_User_Regs
90115 +{
90116 +   E4_StatsCount       StatCounts[8];
90117 +   E4_StatsCount       InstCount;
90118 +   E4_Clock            Clock;
90119 +   E4_StatsControl     StatCont;
90120 +   E4_ClockTickRate    ClockTickRate;
90121 +   E4_uint8            pad1[EightK - ((sizeof(E4_StatsCount)*9)+sizeof(E4_StatsControl)+
90122 +                                       sizeof(E4_Clock)+sizeof(E4_ClockTickRate))];
90123 +} E4_User_Regs;
90124 +
90125 +#ifdef __cplusplus
90126 +}
90127 +#endif
90128 +
90129 +#endif /* __ELAN4_USERREGS_H */
90130 Index: linux-2.4.21/include/elan4/usertrap.h
90131 ===================================================================
90132 --- linux-2.4.21.orig/include/elan4/usertrap.h  2004-02-23 16:02:56.000000000 -0500
90133 +++ linux-2.4.21/include/elan4/usertrap.h       2005-06-01 23:12:54.746416760 -0400
90134 @@ -0,0 +1,114 @@
90135 +/*
90136 + *    Copyright (c) 2001-2002 by Quadrics Supercomputers World Ltd.
90137 + *    Copyright (c) 2002-2003 by Quadrics Ltd.
90138 + * 
90139 + *    For licensing information please see the supplied COPYING file
90140 + *
90141 + */
90142 +
90143 +#ident "@(#)$Id: usertrap.h,v 1.17 2004/05/05 09:08:35 david Exp $"
90144 +/*      $Source: /cvs/master/quadrics/elan4mod/usertrap.h,v $*/
90145 +
90146 +#ifndef __ELAN4_USERTRAP_H
90147 +#define __ELAN4_USERTRAP_H
90148 +
90149 +#ifndef _ASM
90150 +typedef struct elan4_user_page
90151 +{
90152 +    E4_uint64          upage_ddcq_completed;
90153 +} ELAN4_USER_PAGE;
90154 +
90155 +typedef struct elan4_user_trap
90156 +{
90157 +    int                                ut_type;
90158 +    unsigned                   ut_proc;
90159 +    unsigned                   ut_args[4];
90160 +
90161 +    union {
90162 +       ELAN4_EPROC_TRAP        eproc;
90163 +       ELAN4_CPROC_TRAP        cproc;
90164 +       ELAN4_DPROC_TRAP        dproc;
90165 +       ELAN4_IPROC_TRAP        iproc;
90166 +       ELAN4_TPROC_TRAP        tproc;
90167 +       ELAN4_NETERR_MSG        msg;
90168 +    }                  ut_trap;
90169 +} ELAN4_USER_TRAP;
90170 +
90171 +#endif /* _ASM */
90172 +
90173 +
90174 +/* value for ut_type */
90175 +#define UTS_FINISHED           0                               /* all pending traps have been handled */
90176 +#define UTS_RESCHEDULE         1                               /* must return to user mode and re-enter */
90177 +#define UTS_UNIMP_INSTR                2                               /* unimplemented thread instruction */
90178 +#define UTS_EXECUTE_PACKET     3                               /* iproc trap needs packet executing */
90179 +#define UTS_NETWORK_ERROR_TRAP 4                               /* network error on this trap */
90180 +#define UTS_NETWORK_ERROR_MSG  5                               /* network error message  */
90181 +#define UTS_NETWORK_ERROR_TIMER        6                               /* network error timer expired */
90182 +
90183 +#define UTS_EFAULT             -1                              /* failed to copyout trap */
90184 +#define UTS_INVALID_ADDR       -2                              /* all -ve codes mean trap could not be resolved. */
90185 +#define UTS_INVALID_VPROC      -3
90186 +#define UTS_INVALID_COMMAND    -4
90187 +#define UTS_BAD_TRAP           -5
90188 +#define UTS_ALIGNMENT_ERROR    -6
90189 +#define UTS_QUEUE_OVERFLOW     -7
90190 +#define UTS_QUEUE_ERROR                -8
90191 +#define UTS_INVALID_TRANS      -9
90192 +#define UTS_PERMISSION_DENIED  -10
90193 +#define UTS_CPROC_ERROR                -11
90194 +#define UTS_INVALID_COOKIE     -12
90195 +#define UTS_NETERR_ERROR       -13
90196 +
90197 +/* "special" values for registering handlers */
90198 +#define UTS_ALL_TRAPS          -9999
90199 +
90200 +/* value for ut_proc */
90201 +#define UTS_NOPROC             0
90202 +#define UTS_EPROC              1
90203 +#define UTS_CPROC              2
90204 +#define UTS_DPROC              3
90205 +#define UTS_TPROC              4
90206 +#define UTS_IPROC              5
90207 +#define UTS_NETERR_MSG         6
90208 +
90209 +/* unimplemented trap numbers for thread processor */
90210 +#define ELAN4_T_TRAP_INSTR(t)  (0x80202000 | ((t) & 0xFF))
90211 +
90212 +#define ELAN4_T_SYSCALL_TRAP   1
90213 +#  define ELAN4_T_OPEN         0
90214 +#  define ELAN4_T_WRITE                1
90215 +#  define ELAN4_T_READ         2
90216 +#  define ELAN4_T_IOCTL                3
90217 +#  define ELAN4_T_LSEEK                4
90218 +#  define ELAN4_T_POLL         5
90219 +#  define ELAN4_T_CLOSE                6
90220 +#  define ELAN4_T_KILL         7
90221 +#  define ELAN4_T_MMAP         8
90222 +#  define ELAN4_T_MUNMAP       9
90223 +#  define ELAN4_T_ABORT                100
90224 +#  define ELAN4_T_DEBUG                101
90225 +#  define ELAN4_T_REGDUMP      102
90226 +
90227 +#define ELAN4_T_REGDUMP_TRAP   2
90228 +
90229 +#define ELAN4_T_LIBELAN_TRAP   3
90230 +#  define ELAN4_T_TPORT_NEWBUF 0
90231 +#  define ELAN4_T_TPORT_GC     1
90232 +#  define ELAN4_T_TPORT_DEBUG  2
90233 +
90234 +#define ELAN4_T_ALLOC_TRAP     4
90235 +#  define ELAN4_T_ALLOC_ELAN   0
90236 +#  define ELAN4_T_ALLOC_MAIN   1
90237 +#  define ELAN4_T_FREE_ELAN    2
90238 +#  define ELAN4_T_FREE_MAIN    3
90239 +
90240 +/* reserved main interrupt cookies */
90241 +#define ELAN4_INT_COOKIE_DDCQ  0
90242 +
90243 +/*
90244 + * Local variables:
90245 + * c-file-style: "stroustrup"
90246 + * End:
90247 + */
90248 +#endif /* __ELAN4_USERTRAP_H */
90249 Index: linux-2.4.21/include/elan4/xsdram.h
90250 ===================================================================
90251 --- linux-2.4.21.orig/include/elan4/xsdram.h    2004-02-23 16:02:56.000000000 -0500
90252 +++ linux-2.4.21/include/elan4/xsdram.h 2005-06-01 23:12:54.747416608 -0400
90253 @@ -0,0 +1,59 @@
90254 +/*
90255 + *    Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd.
90256 + *    Copyright (c) 2002-2003 by Quadrics Ltd.
90257 + *
90258 + *    For licensing information please see the supplied COPYING file
90259 + *
90260 + */
90261 +
90262 +#ifndef __ELAN4_XSDRAM_H
90263 +#define __ELAN4_XSDRAM_H
90264 +
90265 +#ident "@(#)$Id: xsdram.h,v 1.13 2004/03/05 12:32:04 jon Exp $ $Name: QSNETMODULES-4-30_20050128 $"
90266 +/*      $Source: /cvs/master/quadrics/elan4hdr/xsdram.h,v $*/
90267 +
90268 +/* SAMSUNG K4H281638D-TCB3 */
90269 +
90270 +#define SDRAM_tRCF_1_SH         0
90271 +#define SDRAM_tRP_1_SH          4
90272 +#define SDRAM_tRCD_SH           8
90273 +#define SDRAM_tRRD_SH           12
90274 +#define SDRAM_tEndWr_SH         16
90275 +#define SDRAM_tEndRd_SH         20
90276 +#define SDRAM_Burst_SH          24
90277 +#define SDRAM_CL_SH             28
90278 +#define SDRAM_DsblBypass       (1ULL << 31)
90279 +#define SDRAM_RefreshRate_SH    32
90280 +#define SDRAM_RamSize_SH        34
90281 +#define SDRAM_ReadLtncy_1_SH    36
90282 +#define SDRAM_RdOffset_SH       40
90283 +#define SDRAM_FlightDelay_SH    42
90284 +
90285 +#define SDRAM_ENABLE_ECC       (1ULL << 44) // Enables error detecting on the ECC.
90286 +#define SDRAM_SDRAM_TESTING    (1ULL << 45) // Switches to test mode for checking EEC data bits
90287 +#define SDRAM_SETUP            (1ULL << 46) // Writes SDram control reg when set. Also starts
90288 +
90289 +#define SDRAM_CS_MODE0          0ULL         // 64Mbit, 128Mbit, 256Mbit, 512Mbit or 1Gbit (16-bit output)
90290 +#define SDRAM_CS_MODE1          1ULL         // 64Mbit, 128Mbit, 256Mbit or 512Mbit (8-bit output)
90291 +#define SDRAM_CS_MODE2          2ULL         // 2Gbit (16-bit output) or 1Gbit (8-bit output)
90292 +#define SDRAM_CS_MODE3          3ULL         // 4Gbit (16-bit output) or 2Gbit (8-bit output)
90293 +
90294 +#if defined(LINUX) && !defined(CONFIG_MPSAS)
90295 +#define SDRAM_STARTUP_VALUE   ((0xbULL << SDRAM_tRCF_1_SH)      | (0x2ULL << SDRAM_tRP_1_SH)       | \
90296 +                               (0x3ULL << SDRAM_tRCD_SH)        | (0x2ULL << SDRAM_tRRD_SH)        | \
90297 +                               (0xaULL << SDRAM_tEndWr_SH)      | (0x6ULL << SDRAM_tEndRd_SH)      | \
90298 +                               (0x8ULL << SDRAM_Burst_SH)       | (0x6ULL << SDRAM_CL_SH)          | \
90299 +                               (0x2ULL << SDRAM_RefreshRate_SH) | (0x3ULL << SDRAM_RamSize_SH)     | \
90300 +                               (0x1ULL << SDRAM_RdOffset_SH)    | (0x1ULL << SDRAM_FlightDelay_SH) | \
90301 +                               (0x4ULL << SDRAM_ReadLtncy_1_SH))
90302 +#else
90303 +#define SDRAM_STARTUP_VALUE   ((0xbULL << SDRAM_tRCF_1_SH)      | (0x2ULL << SDRAM_tRP_1_SH)       | \
90304 +                               (0x3ULL << SDRAM_tRCD_SH)        | (0x2ULL << SDRAM_tRRD_SH)        | \
90305 +                               (0xaULL << SDRAM_tEndWr_SH)      | (0x6ULL << SDRAM_tEndRd_SH)      | \
90306 +                               (0x8ULL << SDRAM_Burst_SH)       | (0x6ULL << SDRAM_CL_SH)          | \
90307 +                               (0x0ULL << SDRAM_RefreshRate_SH) | (0x0ULL << SDRAM_RamSize_SH)     | \
90308 +                               (0x1ULL << SDRAM_RdOffset_SH)    | (0x1ULL << SDRAM_FlightDelay_SH) | \
90309 +                               (0x4ULL << SDRAM_ReadLtncy_1_SH) | SDRAM_ENABLE_ECC | SDRAM_SETUP)
90310 +#endif
90311 +
90312 +#endif /* __ELAN4_XSDRAM_H */
90313 Index: linux-2.4.21/include/jtag/jtagio.h
90314 ===================================================================
90315 --- linux-2.4.21.orig/include/jtag/jtagio.h     2004-02-23 16:02:56.000000000 -0500
90316 +++ linux-2.4.21/include/jtag/jtagio.h  2005-06-01 23:12:54.747416608 -0400
90317 @@ -0,0 +1,106 @@
90318 +/*
90319 + *    Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd.
90320 + *
90321 + *    For licensing information please see the supplied COPYING file
90322 + *
90323 + */
90324 +
90325 +#ident "$Id: jtagio.h,v 1.7.8.1 2005/01/27 15:21:47 lee Exp $"
90326 +/*             $Source: /cvs/master/quadrics/jtagmod/jtagio.h,v $*/
90327 +
90328 +
90329 +#ifndef __SYS_JTAGMOD_H
90330 +#define __SYS_JTAGMOD_H
90331 +
90332 +#ifdef __cplusplus
90333 +extern "C" {
90334 +#endif
90335 +
90336 +#define JTAG_MAX_CHIPS         8
90337 +#define JTAG_MAX_INSTR_LEN     8
90338 +#define JTAG_MAX_BITS           (JTAG_MAX_CHIPS * JTAG_MAX_INSTR_LEN)
90339 +#define JTAG_MAX_DATA_LEN      1024
90340 +
90341 +#define JTAG_BYPASS            0xFF
90342 +
90343 +#define I2C_ADDR_LEN           7                               /* 7 bits of address */
90344 +#define I2C_DATA_LEN           8                               /* 8 bits of data */
90345 +#define I2C_MAX_DATA_LEN       9                               /* and upto 9 bytes worth */
90346 +
90347 +#define BITS_PER_BYTE          8
90348 +#define JTAG_NBYTES(nbits)     (((nbits)+BITS_PER_BYTE-1)/BITS_PER_BYTE)
90349 +#define JTAG_BIT(v, num)       (((v)[(num) / BITS_PER_BYTE] >> ((num) % BITS_PER_BYTE)) & 1)
90350 +#define JTAG_SET_BIT(v, num)   ((v)[(num) / BITS_PER_BYTE] |= (1 << ((num) % BITS_PER_BYTE)))
90351 +#define JTAG_CLR_BIT(v, num)   ((v)[(num) / BITS_PER_BYTE] &= ~(1 << ((num) % BITS_PER_BYTE)))
90352 +
90353 +#define RING_CLOCK_CARD                (0x3D)
90354 +#define RING_CLOCK_SHIFT       (0x3E)
90355 +#define RING_JTAG_LOOPBACK     (0x3F)
90356 +#define RING_MAX               (0x40)
90357 +
90358 +#define RING_QUAD_BIT          (0x40)
90359 +#define RING_I2C_BIT           (0x80)
90360 +
90361 +#define VALID_JTAG_RING(ring)  ((ring) < 0x20 || (ring) == RING_JTAG_LOOPBACK)
90362 +#define VALID_I2C_RING(ring)   ((ring) < 0x20 || (ring) == RING_CLOCK_CARD)
90363 +
90364 +
90365 +typedef struct jtag_value
90366 +{
90367 +    u_char     bytes[JTAG_NBYTES(JTAG_MAX_DATA_LEN)];
90368 +} JTAG_VALUE;
90369 +
90370 +/* arguements to JTAG_SHIFT_IR/JTAG_SHIFT_DR */
90371 +typedef struct jtag_reset_args
90372 +{
90373 +    u_int      ring;
90374 +} JTAG_RESET_ARGS;
90375 +
90376 +typedef struct jtag_shift_args
90377 +{
90378 +    u_int      ring;
90379 +    u_int      nbits;
90380 +    u_char     *value;
90381 +} JTAG_SHIFT_ARGS;
90382 +
90383 +typedef struct i2c_args
90384 +{
90385 +    u_int      ring;
90386 +    u_int      device;
90387 +    u_int      reg;
90388 +    u_int      count;
90389 +    u_int      ok;
90390 +    u_char     data[I2C_MAX_DATA_LEN];
90391 +} I2C_ARGS;
90392 +
90393 +/* values for 'ok' - the return value from i2c_xx functions */
90394 +#define I2C_OP_SUCCESS         0
90395 +#define I2C_OP_ERROR           1
90396 +#define I2C_OP_NOT_IDLE                2
90397 +#define I2C_OP_NO_DEVICE       3
90398 +#define I2C_OP_WRITE_TO_BIG    4
90399 +#define I2C_OP_BAD_RESOURCE    5
90400 +
90401 +typedef struct i2c_clock_shift_args
90402 +{
90403 +    u_int      t;
90404 +    u_int      n;
90405 +    u_int      m;
90406 +} I2C_CLOCK_SHIFT_ARGS;
90407 +
90408 +#define JTAG_RESET             _IOWR('j', '0', JTAG_RESET_ARGS)
90409 +#define JTAG_SHIFT_IR          _IOWR('j', '1', JTAG_SHIFT_ARGS)
90410 +#define JTAG_SHIFT_DR          _IOWR('j', '2', JTAG_SHIFT_ARGS)
90411 +
90412 +#define I2C_CLOCK_SHIFT                _IOWR('j', '4', I2C_CLOCK_SHIFT_ARGS)
90413 +#define I2C_WRITE              _IOWR('j', '5', I2C_ARGS)
90414 +#define I2C_READ               _IOWR('j', '6', I2C_ARGS)
90415 +#define I2C_WRITEREG           _IOWR('j', '7', I2C_ARGS)
90416 +#define I2C_READREG            _IOWR('j', '8', I2C_ARGS)
90417 +
90418 +
90419 +#ifdef __cplusplus
90420 +}
90421 +#endif
90422 +
90423 +#endif /* __SYS_JTAGMOD_H */
90424 Index: linux-2.4.21/include/linux/coproc.h
90425 ===================================================================
90426 --- linux-2.4.21.orig/include/linux/coproc.h    2004-02-23 16:02:56.000000000 -0500
90427 +++ linux-2.4.21/include/linux/coproc.h 2005-06-01 23:12:54.748416456 -0400
90428 @@ -0,0 +1,206 @@
90429 +/*
90430 + *    Copyright (C) 2002, 2003 Quadrics Ltd.
90431 + *
90432 + *    This program is free software; you can redistribute it and/or modify
90433 + *    it under the terms of the GNU General Public License as published by
90434 + *    the Free Software Foundation; either version 2 of the License, or
90435 + *    (at your option) any later version.
90436 + *
90437 + *    This program is distributed in the hope that it will be useful,
90438 + *    but WITHOUT ANY WARRANTY; without even the implied warranty of
90439 + *    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
90440 + *    GNU General Public License for more details.
90441 + *
90442 + *    You should have received a copy of the GNU General Public License
90443 + *    along with this program; if not, write to the Free Software
90444 + *    Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
90445 + *
90446 + *
90447 + */
90448 +
90449 +/*
90450 + * Callbacks for coprocessor page table updates.
90451 + */
90452 +
90453 +#ifndef __LINUX_COPROC_H__
90454 +#define __LINUX_COPROC_H__
90455 +
90456 +#include <linux/sched.h>
90457 +#include <linux/mm.h>
90458 +#include <linux/list.h>
90459 +#include <linux/slab.h> /* kmalloc */
90460 +
90461 +typedef struct coproc_ops_struct {
90462 +       struct list_head list;
90463 +       void *arg;
90464 +
90465 +       void (*release)(void *arg, struct mm_struct *mm);
90466 +       void (*sync_range)(void *arg, struct mm_struct *mm, unsigned long start, unsigned long end);
90467 +       void (*invalidate_range)(void *arg, struct mm_struct *mm, unsigned long start, unsigned long end);
90468 +       void (*update_range)(void *arg, struct mm_struct *mm, unsigned long start, unsigned long end);
90469 +
90470 +       void (*change_protection)(void *arg, struct mm_struct *mm, unsigned long start, unsigned long end, pgprot_t newprot);
90471 +
90472 +       void (*sync_page)(void *arg, struct vm_area_struct *vma, unsigned long address);
90473 +       void (*invalidate_page)(void *arg, struct vm_area_struct *vma, unsigned long address);
90474 +       void (*update_page)(void *arg, struct vm_area_struct *vma, unsigned long address);
90475 +
90476 +} coproc_ops_t;
90477 +
90478 +extern __inline__ void 
90479 +register_coproc_ops(struct mm_struct *mm, coproc_ops_t *cp)
90480 +{
90481 +       if (mm->coproc_ops == NULL) {
90482 +               mm->coproc_ops = (struct list_head *)
90483 +                   kmalloc(sizeof(struct list_head), GFP_KERNEL);
90484 +               INIT_LIST_HEAD(mm->coproc_ops);
90485 +       }
90486 +       list_add(&cp->list, mm->coproc_ops);
90487 +}
90488 +
90489 +extern __inline__ void 
90490 +unregister_coproc_ops(struct mm_struct *mm, coproc_ops_t *cp)
90491 +{
90492 +       list_del(&cp->list);
90493 +       if (list_empty(mm->coproc_ops)) {
90494 +               kfree(mm->coproc_ops);
90495 +               mm->coproc_ops = NULL;
90496 +       }
90497 +}
90498 +
90499 +extern __inline__ void 
90500 +coproc_release(struct mm_struct *mm)
90501 +{
90502 +       struct list_head *head = mm->coproc_ops;
90503 +        struct list_head *lp;
90504 +        coproc_ops_t *cp;
90505 +
90506 +       if (head) {
90507 +               while (! list_empty(head)) {
90508 +                   lp = head->next;
90509 +                   cp = list_entry(lp, coproc_ops_t, list);
90510 +                   
90511 +                   list_del (&cp->list);
90512 +                   
90513 +                   if (cp->release)
90514 +                       cp->release(cp->arg, mm);
90515 +               }
90516 +               kfree(head);
90517 +               mm->coproc_ops = NULL;
90518 +       }
90519 +}
90520 +
90521 +extern __inline__ void 
90522 +coproc_sync_range(struct mm_struct *mm, unsigned long start, unsigned long end)
90523 +{
90524 +       struct list_head *head = mm->coproc_ops;
90525 +        struct list_head *lp;
90526 +        coproc_ops_t *cp;
90527 +
90528 +       if (head) {
90529 +               for (lp = head->next; lp != head; lp = lp->next) {
90530 +                       cp = list_entry(lp, coproc_ops_t, list);
90531 +                       if (cp->sync_range)
90532 +                               cp->sync_range(cp->arg, mm, start, end);
90533 +               }               
90534 +       }
90535 +}
90536 +
90537 +extern __inline__ void 
90538 +coproc_invalidate_range(struct mm_struct *mm, unsigned long start, unsigned long end)
90539 +{
90540 +       struct list_head *head = mm->coproc_ops;
90541 +        struct list_head *lp;
90542 +        coproc_ops_t *cp;
90543 +
90544 +       if (head) {
90545 +               for (lp = head->next; lp != head; lp = lp->next) {
90546 +                       cp = list_entry(lp, coproc_ops_t, list);
90547 +                       if (cp->invalidate_range)
90548 +                               cp->invalidate_range(cp->arg, mm, start, end);
90549 +               }               
90550 +       }
90551 +}
90552 +
90553 +extern __inline__ void 
90554 +coproc_update_range(struct mm_struct *mm, unsigned long start, unsigned long end)
90555 +{
90556 +       struct list_head *head = mm->coproc_ops;
90557 +        struct list_head *lp;
90558 +        coproc_ops_t *cp;
90559 +
90560 +       if (head) {
90561 +               for (lp = head->next; lp != head; lp = lp->next) {
90562 +                       cp = list_entry(lp, coproc_ops_t, list);
90563 +                       if (cp->update_range)
90564 +                               cp->update_range(cp->arg, mm, start, end);
90565 +               }               
90566 +       }
90567 +}
90568 +
90569 +extern __inline__ void 
90570 +coproc_change_protection (struct mm_struct *mm, unsigned long start, unsigned long end, pgprot_t newprot)
90571 +{
90572 +       struct list_head *head = mm->coproc_ops;
90573 +        struct list_head *lp;
90574 +        coproc_ops_t *cp;
90575 +
90576 +       if (head) {
90577 +               for (lp = head->next; lp != head; lp = lp->next) {
90578 +                       cp = list_entry(lp, coproc_ops_t, list);
90579 +                       if (cp->change_protection)
90580 +                               cp->change_protection(cp->arg, mm, start, end, newprot);
90581 +               }               
90582 +       }
90583 +}
90584 +
90585 +extern __inline__ void 
90586 +coproc_sync_page(struct vm_area_struct *vma, unsigned long addr)
90587 +{
90588 +       struct list_head *head = vma->vm_mm->coproc_ops;
90589 +        struct list_head *lp;
90590 +        coproc_ops_t *cp;
90591 +
90592 +       if (head) {
90593 +               for (lp = head->next; lp != head; lp = lp->next) {
90594 +                       cp = list_entry(lp, coproc_ops_t, list);
90595 +                       if (cp->sync_page)
90596 +                               cp->sync_page(cp->arg, vma, addr);
90597 +               }               
90598 +       }
90599 +}
90600 +
90601 +extern __inline__ void 
90602 +coproc_invalidate_page(struct vm_area_struct *vma, unsigned long addr)
90603 +{
90604 +       struct list_head *head = vma->vm_mm->coproc_ops;
90605 +        struct list_head *lp;
90606 +        coproc_ops_t *cp;
90607 +
90608 +       if (head) {
90609 +               for (lp = head->next; lp != head; lp = lp->next) {
90610 +                       cp = list_entry(lp, coproc_ops_t, list);
90611 +                       if (cp->invalidate_page)
90612 +                               cp->invalidate_page(cp->arg, vma, addr);
90613 +               }               
90614 +       }
90615 +}
90616 +
90617 +extern __inline__ void 
90618 +coproc_update_page(struct vm_area_struct *vma, unsigned long addr)
90619 +{
90620 +       struct list_head *head = vma->vm_mm->coproc_ops;
90621 +        struct list_head *lp;
90622 +        coproc_ops_t *cp;
90623 +
90624 +       if (head) {
90625 +               for (lp = head->next; lp != head; lp = lp->next) {
90626 +                       cp = list_entry(lp, coproc_ops_t, list);
90627 +                       if (cp->update_page)
90628 +                               cp->update_page(cp->arg, vma, addr);
90629 +               }               
90630 +       }
90631 +}
90632 +
90633 +
90634 +#endif /* __LINUX_COPROC_H__ */
90635 Index: linux-2.4.21/include/linux/ptrack.h
90636 ===================================================================
90637 --- linux-2.4.21.orig/include/linux/ptrack.h    2004-02-23 16:02:56.000000000 -0500
90638 +++ linux-2.4.21/include/linux/ptrack.h 2005-06-01 23:12:54.748416456 -0400
90639 @@ -0,0 +1,53 @@
90640 +/*
90641 + *    Copyright (C) 2000  Regents of the University of California
90642 + *
90643 + *    This program is free software; you can redistribute it and/or modify
90644 + *    it under the terms of the GNU General Public License as published by
90645 + *    the Free Software Foundation; either version 2 of the License, or
90646 + *    (at your option) any later version.
90647 + *
90648 + *    This program is distributed in the hope that it will be useful,
90649 + *    but WITHOUT ANY WARRANTY; without even the implied warranty of
90650 + *    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
90651 + *    GNU General Public License for more details.
90652 + *
90653 + *    You should have received a copy of the GNU General Public License
90654 + *    along with this program; if not, write to the Free Software
90655 + *    Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
90656 + *
90657 + * Derived from exit_actn.c by
90658 + *    Copyright (C) 2003 Quadrics Ltd.
90659 + *
90660 + */
90661 +#ifndef __LINUX_PTRACK_H
90662 +#define __LINUX_PTRACK_H
90663 +
90664 +/* 
90665 + * Process tracking - this allows a module to keep track of processes
90666 + * in order that it can manage all tasks derived from a single process.
90667 + */
90668 +
90669 +#define PTRACK_PHASE_CLONE             1
90670 +#define PTRACK_PHASE_CLONE_FAIL        2
90671 +#define PTRACK_PHASE_EXEC              3
90672 +#define PTRACK_PHASE_EXIT              4
90673 +
90674 +typedef int (*ptrack_callback_t)(void *arg, int phase, struct task_struct *child);
90675 +
90676 +#define PTRACK_FINISHED                0
90677 +#define PTRACK_INNHERIT                1
90678 +#define PTRACK_DENIED          2
90679 +
90680 +struct ptrack_desc {
90681 +       struct list_head        link;
90682 +       ptrack_callback_t       callback;
90683 +       void                   *arg;
90684 +};
90685 +
90686 +extern int     ptrack_register (ptrack_callback_t callback, void *arg);
90687 +extern void    ptrack_deregister (ptrack_callback_t callback, void *arg);
90688 +extern int     ptrack_registered (ptrack_callback_t callback, void *arg);
90689 +
90690 +extern int     ptrack_call_callbacks (int phase, struct task_struct *child);
90691 +
90692 +#endif /* __LINUX_PTRACK_H */
90693 Index: linux-2.4.21/include/linux/sched.h
90694 ===================================================================
90695 --- linux-2.4.21.orig/include/linux/sched.h     2005-06-01 22:52:05.000000000 -0400
90696 +++ linux-2.4.21/include/linux/sched.h  2005-06-01 23:12:54.749416304 -0400
90697 @@ -30,6 +30,8 @@
90698  #include <linux/pid.h>
90699  #include <linux/kernel_stat.h>
90700  
90701 +#include <linux/list.h>
90702 +
90703  struct exec_domain;
90704  extern int exec_shield;
90705  extern int exec_shield_randomize;
90706 @@ -322,6 +324,9 @@
90707  #endif
90708         /* Architecture-specific MM context */
90709         mm_context_t context;
90710 +       
90711 +       /* Support page table updates on adapter cards with on-board MMU */
90712 +       struct list_head *coproc_ops;
90713  
90714         /* coredumping support */
90715         int core_waiters;
90716 @@ -342,6 +347,7 @@
90717         mmap_sem:       __RWSEM_INITIALIZER(name.mmap_sem), \
90718         page_table_lock: SPIN_LOCK_UNLOCKED,            \
90719         mmlist:         LIST_HEAD_INIT(name.mmlist),    \
90720 +       coproc_ops:     NULL,                           \
90721         rlimit_rss:     RLIM_INFINITY,                  \
90722  }
90723  
90724 @@ -572,6 +578,9 @@
90725  /* context-switch lock */
90726         spinlock_t switch_lock;
90727  
90728 +/* process tracking callbacks */
90729 +       struct list_head ptrack_list;
90730 +
90731  /* journalling filesystem info */
90732         void *journal_info;
90733  
90734 @@ -740,6 +749,7 @@
90735      blocked:           {{0}},                                          \
90736      alloc_lock:                SPIN_LOCK_UNLOCKED,                             \
90737      switch_lock:       SPIN_LOCK_UNLOCKED,                             \
90738 +    ptrack_list:       LIST_HEAD_INIT(tsk.ptrack_list),                \
90739      journal_info:      NULL,                                           \
90740      real_stack:                &tsk,                                           \
90741  }
90742 Index: linux-2.4.21/include/qsnet/autoconf.h
90743 ===================================================================
90744 --- linux-2.4.21.orig/include/qsnet/autoconf.h  2004-02-23 16:02:56.000000000 -0500
90745 +++ linux-2.4.21/include/qsnet/autoconf.h       2005-06-01 23:12:54.750416152 -0400
90746 @@ -0,0 +1,38 @@
90747 +/*
90748 + *    Copyright (c) 2004 by Quadrics Ltd.
90749 + *
90750 + *    For licensing information please see the supplied COPYING file
90751 + *
90752 + * NOTE: This file has been automatically generated:
90753 + *       node   : milano
90754 + *       kernel : /src/linux/qsnet/linux-2.4.21
90755 + *       date   : Wed May  4 18:24:23 EDT 2005
90756 + *
90757 + */
90758 +
90759 +#include <linux/version.h>
90760 +#undef NO_RMAP
90761 +#define        AC
90762 +#undef NO_O1_SCHED
90763 +#undef NO_NPTL
90764 +#define        NO_ABI
90765 +#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 5, 0)
90766 +#define        PROCESS_ACCT
90767 +#endif
90768 +#undef RSS_ATOMIC
90769 +#undef NO_COPROC
90770 +#define        NO_IOPROC
90771 +#undef NO_PTRACK
90772 +#define        NO_PANIC_NOTIFIER
90773 +#undef NO_SHM_CLEANUP
90774 +#undef NO_PDE
90775 +
90776 +
90777 +#define        CONFIG_EIP
90778 +#define        CONFIG_ELAN
90779 +#define        CONFIG_ELAN3
90780 +#define        CONFIG_ELAN4
90781 +#define        CONFIG_EP
90782 +#define        CONFIG_JTAG
90783 +#define        CONFIG_QSNET
90784 +#define        CONFIG_RMS
90785 Index: linux-2.4.21/include/qsnet/condvar.h
90786 ===================================================================
90787 --- linux-2.4.21.orig/include/qsnet/condvar.h   2004-02-23 16:02:56.000000000 -0500
90788 +++ linux-2.4.21/include/qsnet/condvar.h        2005-06-01 23:12:54.750416152 -0400
90789 @@ -0,0 +1,140 @@
90790 +/*
90791 + *    Copyright (C) 2000  Regents of the University of California
90792 + *
90793 + *    This program is free software; you can redistribute it and/or modify
90794 + *    it under the terms of the GNU General Public License as published by
90795 + *    the Free Software Foundation; either version 2 of the License, or
90796 + *    (at your option) any later version.
90797 + *
90798 + *    This program is distributed in the hope that it will be useful,
90799 + *    but WITHOUT ANY WARRANTY; without even the implied warranty of
90800 + *    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
90801 + *    GNU General Public License for more details.
90802 + *
90803 + *    You should have received a copy of the GNU General Public License
90804 + *    along with this program; if not, write to the Free Software
90805 + *    Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
90806 + *
90807 + */
90808 +
90809 +#if    !defined(_LINUX_CONDVAR_H)
90810 +#define        _LINUX_CONDVAR_H
90811 +
90812 +#if    defined(__KERNEL__)
90813 +
90814 +#include <linux/list.h>
90815 +#include <qsnet/debug.h>
90816 +
90817 +#define CV_RET_SIGPENDING      0
90818 +#define CV_RET_TIMEOUT         (-1)
90819 +#define CV_RET_NORMAL          1
90820 +
90821 +struct kcondvar_task {
90822 +       struct task_struct      *task;          /* need to wrap task in this */
90823 +       struct list_head        list;           /*   to thread as a list */
90824 +       int                     blocked;
90825 +};
90826 +
90827 +typedef struct {
90828 +       struct list_head        task_list;      /* list of kcondvar_task's */
90829 +} kcondvar_t;
90830 +
90831 +#define kcondvar_wait(c,l,fl)                  debug_kcondvar_wait(c, l, fl, 0,  TASK_UNINTERRUPTIBLE)
90832 +#define kcondvar_waitsig(c,l,fl)               debug_kcondvar_wait(c, l, fl, 0,  TASK_INTERRUPTIBLE)
90833 +#define kcondvar_timedwait(c,l,fl,to)          debug_kcondvar_wait(c, l, fl, to, TASK_UNINTERRUPTIBLE)
90834 +#define kcondvar_timedwaitsig(c,l,fl,to)       debug_kcondvar_wait(c, l, fl, to, TASK_INTERRUPTIBLE)
90835 +#define kcondvar_wakeupone(c,l)                        kcondvar_wakeup(c, l, 0)
90836 +#define kcondvar_wakeupall(c,l)                        kcondvar_wakeup(c, l, 1)
90837
90838 +extern __inline__ void
90839 +kcondvar_init(kcondvar_t *c)
90840 +{
90841 +       INIT_LIST_HEAD(&c->task_list);
90842 +}
90843 +
90844 +extern __inline__ void
90845 +kcondvar_destroy(kcondvar_t *c)
90846 +{
90847 +       ASSERT(list_empty(&c->task_list));
90848 +}
90849 +
90850 +/*
90851 + * We thread a struct kcondvar_task, allocated on the stack, onto the kcondvar_t's
90852 + * task_list, and take it off again when we wake up.
90853 + */
90854 +extern __inline__ int
90855 +debug_kcondvar_wait(kcondvar_t *c, spinlock_t *l, unsigned long *fl, long tmo, int state)
90856 +{
90857 +       struct kcondvar_task cvt;
90858 +       int ret = CV_RET_NORMAL;
90859 +
90860 +       ASSERT(!in_interrupt());                /* we can block */
90861 +       ASSERT(SPINLOCK_HELD(l));               /* enter holding lock */
90862 +
90863 +       cvt.task = current;
90864 +       cvt.blocked = 1;
90865 +       list_add(&cvt.list, &c->task_list);
90866 +       do {
90867 +              /* Note: we avoid using TASK_UNINTERRUPTIBLE here because avenrun()
90868 +               * (linux/kernel/timer.c:calc_load())
90869 +               * computation treats it like TASK_RUNNABLE hence creates false high
90870 +               * load averages when we create kernel threads.
90871 +               * The cvt.blocked flag distinguishes a signal wakeup from a kcondvar_wakeup.
90872 +               *
90873 +               * However, if we do take a signal we could end up busily spinning here, if
90874 +               * we ignore it (state == TASK_UNINTERRUPTIBLE) so once we see a signal
90875 +               * pending we do sleep TASK_UNINTERRUPTIBLE to stop a busy spin.
90876 +               * I have now blocked all signals for kernel threads to prevent this
90877 +               * happening but other users of kcondvar_wait may still hit this spin.
90878 +               */
90879 +               set_current_state (signal_pending(current) ? state : TASK_INTERRUPTIBLE);
90880 +
90881 +               if (fl)
90882 +                   spin_unlock_irqrestore(l, *fl);
90883 +               else
90884 +                   spin_unlock(l);
90885 +               if (tmo) {
90886 +                       if (tmo <= jiffies || !schedule_timeout(tmo - jiffies))
90887 +                               ret = CV_RET_TIMEOUT;
90888 +               } else
90889 +                       schedule();
90890 +               if (fl)
90891 +                   spin_lock_irqsave (l, *fl);
90892 +               else
90893 +                   spin_lock(l);
90894 +               
90895 +               /* signal_pending - Only exit the loop if the user was waiting TASK_INTERRUPTIBLE */
90896 +               if ((state == TASK_INTERRUPTIBLE) && signal_pending(current))
90897 +                       ret = CV_RET_SIGPENDING;
90898 +
90899 +       } while (cvt.blocked && ret == CV_RET_NORMAL);
90900 +       list_del(&cvt.list);
90901 +
90902 +       /* Reset task state in case we didn't sleep above */
90903 +       set_current_state (TASK_RUNNING);
90904 +
90905 +       return ret;                             /* return holding lock */
90906 +}
90907 +
90908 +extern __inline__ void
90909 +kcondvar_wakeup(kcondvar_t *c, spinlock_t *l, int wakeall)
90910 +{
90911 +       struct list_head *lp;
90912 +       struct kcondvar_task *cvtp;
90913 +
90914 +       ASSERT(SPINLOCK_HELD(l));                       /* already holding lock */
90915 +       for (lp = c->task_list.next; lp != &c->task_list; lp = lp->next) {
90916 +               cvtp = list_entry(lp, struct kcondvar_task, list);
90917 +               if (cvtp->blocked) {
90918 +                       cvtp->blocked = 0;
90919 +                       /* wake_up_process added to kernel/ksyms.c */
90920 +                       wake_up_process(cvtp->task); 
90921 +                       if (!wakeall)
90922 +                               break;
90923 +               }
90924 +       }
90925 +}                                              /* return still holding lock */
90926 +
90927 +
90928 +#endif /* __KERNEL__ */
90929 +#endif /* _LINUX_CONDVAR_H */
90930 Index: linux-2.4.21/include/qsnet/config.h
90931 ===================================================================
90932 --- linux-2.4.21.orig/include/qsnet/config.h    2004-02-23 16:02:56.000000000 -0500
90933 +++ linux-2.4.21/include/qsnet/config.h 2005-06-01 23:12:54.751416000 -0400
90934 @@ -0,0 +1,195 @@
90935 +/*
90936 + *    Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd.
90937 + *
90938 + *    For licensing information please see the supplied COPYING file
90939 + *
90940 + */
90941 +
90942 +#ifndef _QSNET_CONFIG_H
90943 +#define _QSNET_CONFIG_H
90944 +
90945 +#ident "$Id: config.h,v 1.23 2003/07/24 21:31:19 robin Exp $"
90946 +/*      $Source: /cvs/master/quadrics/qsnet/config.h,v $*/
90947 +
90948 +
90949 +/*
90950 + * QSNET standard defines :
90951 + *
90952 + *   Target operating system defines
90953 + *             SOLARIS
90954 + *             TRU64UNIX/DIGITAL_UNIX
90955 + *             LINUX
90956 + *
90957 + *   Target processor defines
90958 + *             SPARC
90959 + *             ALPHA
90960 + *             I386
90961 + *             IA64
90962 + *             X86_64
90963 + *
90964 + *   Byte order defines
90965 + *             __LITTLE_ENDIAN__
90966 + *             __BIG_ENDIAN__
90967 + *
90968 + *   Data size defines
90969 + *             _LP64                   - LP64 - long/pointer is 64 bits
90970 + *             _ILP32                  - LP32 - long/pointer is 32 bits
90971 + *
90972 + *   Elan defines for main processor
90973 + *             __MAIN_LITTLE_ENDIAN__  - main byte order (for thread code)
90974 + *             __MAIN_BIG_ENDIAN__
90975 + *             _MAIN_LP64              - main long size (for thread code)
90976 + *             _MAIN_ILP32
90977 + *
90978 + *   Compiling for kernel (defined in makefile)
90979 + *             _KERNEL
90980 + *
90981 + */
90982 +
90983 +#if defined(__LP64__) && !defined(_LP64)
90984 +#  define _LP64
90985 +#endif
90986 +
90987 +#if defined(__arch64__) && !defined(_LP64) && !defined(_ILP32)
90988 +#  define _LP64
90989 +#endif
90990 +
90991 +#if defined(__alpha__) && !defined(_LP64) && !defined(_ILP32)
90992 +#  define _LP64
90993 +#endif
90994 +
90995 +#if !defined(__arch64__) && !defined(_ILP32) && !defined(_LP64)
90996 +#  define _ILP32
90997 +#endif
90998 +
90999 +#if defined(__ELAN__) || defined(__ELAN3__)
91000 +
91001 +#define __LITTLE_ENDIAN__
91002 +
91003 +#if defined(__host_solaris) && defined(__host_sparc)
91004 +#define SOLARIS
91005 +#define SPARC
91006 +#define SOLARIS_SPARC
91007 +#define _MAIN_ILP32
91008 +#define __MAIN_BIG_ENDIAN__
91009 +
91010 +#elif defined(__host_osf)
91011 +#define TRU64UNIX
91012 +#define DIGITAL_UNIX
91013 +#define ALPHA
91014 +#define _MAIN_LP64
91015 +#define __MAIN_LITTLE_ENDIAN__
91016 +
91017 +#elif defined(__host_linux) && defined(__host_alpha)
91018 +#define LINUX
91019 +#define ALPHA
91020 +#define LINUX_ALPHA
91021 +#define _MAIN_LP64
91022 +#define __MAIN_LITTLE_ENDIAN__
91023 +
91024 +#elif defined(__host_linux) && defined(__host_sparc)
91025 +#define LINUX
91026 +#define SPARC
91027 +#define LINUX_SPARC
91028 +#define __MAIN_BIG_ENDIAN__
91029 +#ifdef __KERNEL__
91030 +#  define _MAIN_LP64
91031 +#else
91032 +#  define _MAIN_ILP32
91033 +#endif
91034 +
91035 +#elif defined(__host_linux) && defined(__host_i386)
91036 +#define LINUX
91037 +#define I386
91038 +#define LINUX_I386
91039 +#define _MAIN_ILP32
91040 +#define __MAIN_LITTLE_ENDIAN__
91041 +
91042 +#elif defined(__host_linux) && defined(__host_ia64)
91043 +#define LINUX
91044 +#define IA64
91045 +#define LINUX_IA64
91046 +#define _MAIN_LP64
91047 +#define __MAIN_LITTLE_ENDIAN__
91048 +
91049 +#elif defined(__host_linux) && defined(__host_x86_64)
91050 +#define LINUX
91051 +#define X86_64
91052 +#define LINUX_X86_64
91053 +#define _MAIN_LP64
91054 +#define __MAIN_LITTLE_ENDIAN__
91055 +
91056 +#else
91057 +#error Cannot determine operating system/processor architecture.
91058 +#endif
91059 +
91060 +#else /* !defined(__ELAN3__) */
91061 +
91062 +#if (defined(sun) || defined(__sun)) && defined(sparc) && !defined(__sparcv9)  /* Sun Solaris 5.6 */
91063 +#define SOLARIS
91064 +#define SPARC
91065 +#define SOLARIS_SPARC
91066 +#ifndef __BIG_ENDIAN__
91067 +#define __BIG_ENDIAN__
91068 +#endif 
91069 +
91070 +#elif (defined(sun) || defined(__sun)) && defined(sparc) && defined(__sparcv9) /* Sun Solaris 5.7 */
91071 +#define SOLARIS
91072 +#define SPARC
91073 +#define SOLARIS_SPARC
91074 +#define __BIG_ENDIAN__
91075 +
91076 +#elif defined(__osf__) && defined(__alpha)                                     /* Digital Unix */
91077 +#define TRU64UNIX
91078 +#define DIGITAL_UNIX
91079 +#define ALPHA
91080 +#define __LITTLE_ENDIAN__
91081 +
91082 +#elif (defined(linux) || defined(__linux__)) && defined(__alpha)               /* Linux Alpha */
91083 +
91084 +#define LINUX
91085 +#define ALPHA
91086 +#define LINUX_ALPHA
91087 +#define __LITTLE_ENDIAN__
91088 +
91089 +#elif (defined(linux) || defined(__linux__)) && defined(__sparc)               /* Linux Sparc */
91090 +
91091 +#define LINUX
91092 +#define SPARC
91093 +#define LINUX_SPARC
91094 +#define __BIG_ENDIAN__
91095 +
91096 +#elif (defined(linux) || defined(__linux__)) && defined(__i386)                        /* Linux i386 */
91097 +
91098 +#define LINUX
91099 +#define I386
91100 +#define LINUX_I386
91101 +#define __LITTLE_ENDIAN__
91102 +
91103 +#elif (defined(linux) || defined(__linux__)) && defined(__ia64)                        /* Linux ia64 */
91104 +
91105 +#define LINUX
91106 +#define IA64
91107 +#define LINUX_IA64
91108 +#define __LITTLE_ENDIAN__
91109 +
91110 +#elif (defined(linux) || defined(__linux__)) && defined(__x86_64)                      /* Linux x86_64 */
91111 +
91112 +#define LINUX
91113 +#define X86_64
91114 +#define LINUX_X86_64
91115 +#define __LITTLE_ENDIAN__
91116 +
91117 +#elif defined(__QNXNTO__)
91118 +#define QNX
91119 +#define I386
91120 +#define __LITTLE_ENDIAN__
91121 +#else
91122 +#error Cannot determine operating system/processor architecture.
91123 +#endif
91124 +
91125 +#endif
91126 +
91127 +#include <qsnet/workarounds.h>
91128 +
91129 +#endif /* _QSNET_CONFIG_H */
91130 Index: linux-2.4.21/include/qsnet/crwlock.h
91131 ===================================================================
91132 --- linux-2.4.21.orig/include/qsnet/crwlock.h   2004-02-23 16:02:56.000000000 -0500
91133 +++ linux-2.4.21/include/qsnet/crwlock.h        2005-06-01 23:12:54.751416000 -0400
91134 @@ -0,0 +1,207 @@
91135 +/* 
91136 + *    Copyright (C) 2000  Regents of the University of California
91137 + *
91138 + *    This program is free software; you can redistribute it and/or modify
91139 + *    it under the terms of the GNU General Public License as published by
91140 + *    the Free Software Foundation; either version 2 of the License, or
91141 + *    (at your option) any later version.
91142 + *
91143 + *    This program is distributed in the hope that it will be useful,
91144 + *    but WITHOUT ANY WARRANTY; without even the implied warranty of
91145 + *    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
91146 + *    GNU General Public License for more details.
91147 + *
91148 + *    You should have received a copy of the GNU General Public License
91149 + *    along with this program; if not, write to the Free Software
91150 + *    Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
91151 + *
91152 + */
91153 +
91154 +/*
91155 + *    Complex - Reader/Writer locks
91156 + *    Ref: "UNIX Systems for Modern Architectures", by Curt Schimmel, 
91157 + *    sec 11.6.3. 
91158 + *
91159 + *    This implementation is based on semaphores and may not be called from 
91160 + *    interrupt handlers.
91161 + *
91162 + */
91163 +
91164 +#if    !defined(_LINUX_RWLOCK_H)
91165 +#define        _LINUX_RWLOCK_H
91166 +
91167 +#if    defined(__KERNEL__)
91168 +
91169 +typedef enum { RD, WRT, ANY } crwlock_type_t;
91170 +
91171 +#define crwlock_write_held(l) debug_crwlock_held(l, WRT, __BASE_FILE__,__LINE__)
91172 +#define crwlock_read_held(l) debug_crwlock_held(l, RD, __BASE_FILE__, __LINE__)
91173 +#define crwlock_held(l)      debug_crwlock_held(l, ANY, __BASE_FILE__, __LINE__)
91174 +
91175 +#define crwlock_read(l)             debug_crwlock_read(l, __BASE_FILE__, __LINE__)
91176 +#define crwlock_write(l)     debug_crwlock_write(l, __BASE_FILE__, __LINE__)
91177 +#define crwlock_done(l)      debug_crwlock_done(l, __BASE_FILE__, __LINE__)
91178 +
91179 +#if     defined(DEBUG_RWLOCK) && defined(__alpha__) && !defined(DEBUG_SPINLOCK)
91180 +#define DEBUG_SPINLOCK
91181 +#endif
91182 +
91183 +#include <linux/spinlock.h>
91184 +#include <asm/semaphore.h>
91185 +#include <qsnet/debug.h>
91186 +#include <qsnet/mutex.h>
91187 +#include <linux/version.h>
91188 +
91189 +#if    !defined(DEBUG_SPINLOCK)
91190 +#define debug_spin_lock(lock, file, line)       spin_lock(lock)
91191 +#endif
91192 +
91193 +typedef struct {
91194 +        spinlock_t             m_lock;         /* protects cnt fields below */
91195 +        int                     m_rdcnt;        /* # of rdrs in crit section */
91196 +        int                     m_wrcnt;        /* # of wrtrs in crit section */
91197 +        int                     m_rdwcnt;       /* # of waiting readers */
91198 +        int                     m_wrwcnt;       /* # of waiting writers */
91199 +        struct semaphore        m_rdwait;       /* sema where readers wait */
91200 +        struct semaphore        m_wrwait;       /* sema where writers wait */
91201 +        pid_t                  m_wrholder;     /* task holding write lock */
91202 +} crwlock_t;
91203
91204 +extern __inline__ void 
91205 +crwlock_init(crwlock_t *l)
91206 +{
91207 +       l->m_lock = SPIN_LOCK_UNLOCKED;
91208 +#if    LINUX_VERSION_CODE < KERNEL_VERSION(2,3,0)
91209 +       l->m_rdwait = MUTEX_LOCKED;
91210 +       l->m_wrwait = MUTEX_LOCKED;
91211 +#else
91212 +       sema_init(&l->m_rdwait,0);
91213 +       sema_init(&l->m_wrwait,0);
91214 +#endif
91215 +       l->m_rdcnt = l->m_wrcnt = l->m_rdwcnt = l->m_wrwcnt = 0;
91216 +       l->m_wrholder = PID_NONE;
91217 +}
91218 +
91219 +extern __inline__ void 
91220 +crwlock_destroy(crwlock_t *l)
91221 +{
91222 +       ASSERT(l->m_rdcnt == 0 && l->m_wrcnt == 0);
91223 +}
91224 +
91225 +/*
91226 + * If a writer has the lock presently or there are writers waiting,
91227 + * then we have to wait.
91228 + */
91229 +extern __inline__ void 
91230 +debug_crwlock_read(crwlock_t *l, char *file, int line)
91231 +{
91232 +       ASSERT(!in_interrupt());
91233 +       spin_lock(&l->m_lock);
91234 +       if (l->m_wrcnt || l->m_wrwcnt) {
91235 +               l->m_rdwcnt++;
91236 +               spin_unlock(&l->m_lock);
91237 +               down(&l->m_rdwait); /* P */
91238 +       } else {
91239 +               l->m_rdcnt++;
91240 +               spin_unlock(&l->m_lock);
91241 +       }
91242 +}
91243 +
91244 +/*
91245 + * If we're the last reader, and a writer is waiting,
91246 + * then let the writer go now.
91247 + */
91248 +/* private */
91249 +extern __inline__ void 
91250 +debug_crwlock_read_done(crwlock_t *l, char *file, int line)
91251 +{
91252 +       spin_lock(&l->m_lock);
91253 +       l->m_rdcnt--;
91254 +       if (l->m_wrwcnt && l->m_rdcnt == 0) {
91255 +               l->m_wrcnt = 1;
91256 +               l->m_wrwcnt--;
91257 +               spin_unlock(&l->m_lock);
91258 +               up(&l->m_wrwait); /* V */       
91259 +               return;
91260 +       }
91261 +       spin_unlock(&l->m_lock);
91262 +}
91263 +
91264 +extern __inline__ void 
91265 +debug_crwlock_write(crwlock_t *l, char *file, int line)
91266 +{
91267 +       ASSERT(!in_interrupt());
91268 +       spin_lock(&l->m_lock);
91269 +       if (l->m_wrcnt || l->m_rdcnt) {         /* block if lock is in use */
91270 +               l->m_wrwcnt++;
91271 +               spin_unlock(&l->m_lock);
91272 +               down(&l->m_wrwait); /* P */
91273 +       } else {                                /* lock is not in use */
91274 +               l->m_wrcnt = 1;
91275 +               spin_unlock(&l->m_lock);
91276 +       }
91277 +       l->m_wrholder = current->pid;
91278 +}
91279 +
91280 +/* private */
91281 +extern __inline__ void
91282 +debug_crwlock_write_done(crwlock_t *l, char *file, int line)
91283 +{
91284 +       int rdrs;
91285 +
91286 +       spin_lock(&l->m_lock);
91287 +       l->m_wrholder = PID_NONE;
91288 +       if (l->m_rdwcnt) {                      /* let any readers go first */
91289 +               l->m_wrcnt = 0;
91290 +               rdrs = l->m_rdwcnt;
91291 +               l->m_rdcnt = rdrs;
91292 +               l->m_rdwcnt = 0;
91293 +               spin_unlock(&l->m_lock);
91294 +               while (rdrs--)
91295 +                       up(&l->m_rdwait); /* V */
91296 +       } else if (l->m_wrwcnt) {               /* or let any writer go */
91297 +               l->m_wrwcnt--;
91298 +               spin_unlock(&l->m_lock);
91299 +               up(&l->m_wrwait); /* V */
91300 +       } else {                                /* nobody waiting, unlock */
91301 +               l->m_wrcnt = 0;
91302 +               spin_unlock(&l->m_lock);
91303 +       }
91304 +}
91305 +
91306 +extern __inline__ void
91307 +debug_crwlock_done(crwlock_t *l, char *file, int line)
91308 +{
91309 +       if (l->m_wrholder == current->pid)
91310 +               debug_crwlock_write_done(l, file, line);
91311 +       else
91312 +               debug_crwlock_read_done(l, file, line);
91313 +}
91314 +
91315 +/*
91316 + * Return nonzero if lock is held
91317 + */
91318 +extern __inline__ int  
91319 +debug_crwlock_held(crwlock_t *l, crwlock_type_t t, char *file, int line)
91320 +{
91321 +       int res;
91322 +
91323 +       spin_lock(&l->m_lock);
91324 +       switch(t) {
91325 +               case RD:
91326 +                       res = l->m_rdcnt;
91327 +                       break;
91328 +               case WRT:
91329 +                       res = l->m_wrcnt;
91330 +                       break;
91331 +               case ANY:
91332 +                       res = l->m_wrcnt + l->m_rdcnt;
91333 +                       break;
91334 +       }
91335 +       spin_unlock(&l->m_lock);
91336 +
91337 +       return res;
91338 +}
91339 +
91340 +#endif /* __KERNEL__ */
91341 +#endif /* _LINUX_RWLOCK_H */
91342 Index: linux-2.4.21/include/qsnet/ctrl_linux.h
91343 ===================================================================
91344 --- linux-2.4.21.orig/include/qsnet/ctrl_linux.h        2004-02-23 16:02:56.000000000 -0500
91345 +++ linux-2.4.21/include/qsnet/ctrl_linux.h     2005-06-01 23:12:54.751416000 -0400
91346 @@ -0,0 +1,37 @@
91347 +/*
91348 + *    Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd.
91349 + *
91350 + *    For licensing information please see the supplied COPYING file
91351 + *
91352 + */
91353 +
91354 +#ifndef __QSNET_CTRL_LINUX_H
91355 +#define __QSNET_CTRL_LINUX_H
91356 +
91357 +#ident "$Id: ctrl_linux.h,v 1.3 2003/03/26 09:32:03 mike Exp $"
91358 +/*      $Source: /cvs/master/quadrics/qsnet/ctrl_linux.h,v $*/
91359 +
91360 +#define QSNETIO_USER_BASE              0x40
91361 +
91362 +#define QSNETIO_DEBUG_DUMP             _IO   ('e', QSNETIO_USER_BASE + 0)
91363 +
91364 +typedef struct qsnetio_debug_buffer_struct
91365 +{
91366 +       caddr_t addr; 
91367 +       size_t  len;
91368 +} QSNETIO_DEBUG_BUFFER_STRUCT;
91369 +#define QSNETIO_DEBUG_BUFFER           _IOWR ('e', QSNETIO_USER_BASE + 1, QSNETIO_DEBUG_BUFFER_STRUCT)
91370 +
91371 +typedef struct qsnetio_debug_kmem_struct
91372 +{
91373 +       void *handle;
91374 +} QSNETIO_DEBUG_KMEM_STRUCT;
91375 +#define QSNETIO_DEBUG_KMEM             _IOWR   ('e', QSNETIO_USER_BASE + 2, QSNETIO_DEBUG_KMEM_STRUCT)
91376 +
91377 +#endif /* __QSNET_CTRL_LINUX_H */
91378 +
91379 +/*
91380 + * Local variables:
91381 + * c-file-style: "linux"
91382 + * End:
91383 + */
91384 Index: linux-2.4.21/include/qsnet/debug.h
91385 ===================================================================
91386 --- linux-2.4.21.orig/include/qsnet/debug.h     2004-02-23 16:02:56.000000000 -0500
91387 +++ linux-2.4.21/include/qsnet/debug.h  2005-06-01 23:12:54.752415848 -0400
91388 @@ -0,0 +1,68 @@
91389 +/*
91390 + *    Copyright (C) 2000  Regents of the University of California
91391 + *
91392 + *    This program is free software; you can redistribute it and/or modify
91393 + *    it under the terms of the GNU General Public License as published by
91394 + *    the Free Software Foundation; either version 2 of the License, or
91395 + *    (at your option) any later version.
91396 + *
91397 + *    This program is distributed in the hope that it will be useful,
91398 + *    but WITHOUT ANY WARRANTY; without even the implied warranty of
91399 + *    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
91400 + *    GNU General Public License for more details.
91401 + *
91402 + *    You should have received a copy of the GNU General Public License
91403 + *    along with this program; if not, write to the Free Software
91404 + *    Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
91405 + *
91406 + */
91407 +#ifndef _QSNET_DEBUG_H
91408 +#define _QSNET_DEBUG_H
91409 +
91410 +#if defined(DIGITAL_UNIX) 
91411 +#include <kern/assert.h>
91412 +#elif defined(LINUX)
91413 +extern int qsnet_assfail (char *ex, const char *func, char *file, int line);
91414 +
91415 +#define ASSERT(EX)     do { \
91416 +       if (!(EX) && qsnet_assfail (#EX, __FUNCTION__, __BASE_FILE__, __LINE__)) { \
91417 +               BUG(); \
91418 +       } \
91419 +} while (0)
91420 +#endif /* DIGITAL_UNIX */
91421 +
91422 +/* debug.c */
91423 +extern void qsnet_debug_init(void);
91424 +extern void qsnet_debug_fini(void);
91425 +extern void qsnet_debug_disable(int);
91426 +extern void qsnet_debug_alloc(void);
91427 +
91428 +#define QSNET_DEBUG_BUFFER  ((unsigned int)(0x01))
91429 +#define QSNET_DEBUG_CONSOLE ((unsigned int)(0x02))
91430 +#define QSNET_DEBUG_BUF_CON ( QSNET_DEBUG_BUFFER | QSNET_DEBUG_CONSOLE )
91431 +
91432 +#ifdef __GNUC__
91433 +extern void qsnet_debugf      (unsigned int mode, char *fmt, ...)
91434 +       __attribute__ ((format (printf,2,3)));
91435 +extern void kqsnet_debugf      (char *fmt, ...)
91436 +       __attribute__ ((format (printf,1,2)));
91437 +#else
91438 +extern void qsnet_debugf      (unsigned int mode, char *fmt, ...);
91439 +extern void kqsnet_debugf     (char *fmt, ...);
91440 +#endif
91441 +extern void qsnet_vdebugf     (unsigned int mode, char * prefix, char *fmt,  va_list ap);
91442 +extern int  qsnet_debug_buffer(caddr_t ubuffer, int len);
91443 +extern int  qsnet_debug_dump  (void);
91444 +extern int  qsnet_debug_kmem  (void *handle);
91445 +
91446 +extern void qsnet_debug_buffer_on(void);
91447 +extern void qsnet_debug_buffer_clear(void);
91448 +extern void qsnet_debug_buffer_mark(char *str);
91449 +
91450 +#endif /* _QSNET_DEBUG_H */
91451 +
91452 +/*
91453 + * Local variables:
91454 + * c-file-style: "linux"
91455 + * End:
91456 + */
91457 Index: linux-2.4.21/include/qsnet/fence.h
91458 ===================================================================
91459 --- linux-2.4.21.orig/include/qsnet/fence.h     2004-02-23 16:02:56.000000000 -0500
91460 +++ linux-2.4.21/include/qsnet/fence.h  2005-06-01 23:12:54.752415848 -0400
91461 @@ -0,0 +1,178 @@
91462 +/*
91463 + *    Copyright (c) 2003 by Quadrics Supercomputers World Ltd.
91464 + *
91465 + *    For licensing information please see the supplied COPYING file
91466 + *
91467 + */
91468 +
91469 +/* $Id: fence.h,v 1.21.6.4 2004/11/23 14:34:45 addy Exp $ */
91470 +/*             $Source: /cvs/master/quadrics/qsnet/fence.h,v $*/
91471 +
91472 +#ifndef _CONFIG_FENCE_H
91473 +#define _CONFIG_FENCE_H
91474 +
91475 +#ident "$Id: fence.h,v 1.21.6.4 2004/11/23 14:34:45 addy Exp $"
91476 +
91477 +#ifdef __cplusplus
91478 +extern "C" {
91479 +#endif
91480 +
91481 +#if defined(__ELAN__) || defined(__ELAN3__)
91482 +
91483 +/* no memory barriers required on elan3/elan4 */
91484 +
91485 +#elif defined QSNET_MEMBARS_ASSERT
91486 +
91487 +#include <assert.h>
91488 +#define MEMBAR_MEMISSUE()       assert(0);
91489 +#define MEMBAR_SYNC()           assert(0);
91490 +#define MEMBAR_STORELOAD()      assert(0);
91491 +#define MEMBAR_LOADSTORE()      assert(0);
91492 +#define MEMBAR_STORESTORE()     assert(0);
91493 +#define MEMBAR_LOADLOAD()       assert(0);
91494 +#define MEMBAR_VISIBLE()        assert(0);
91495 +#define MEMBAR_DRAIN()          assert(0);
91496 +    
91497 +#elif defined(__alpha)
91498 +
91499 +/* Memory barrier instructions */
91500 +#if defined(__DECC) || defined(__DECXX)
91501 +long   asm( const char *,...);
91502 +#pragma intrinsic( asm )
91503 +#define MEMBAR_MEMISSUE()      asm("mb")
91504 +#define MEMBAR_SYNC()          asm("mb")
91505 +#define MEMBAR_STORELOAD()     asm("wmb")
91506 +#define MEMBAR_LOADSTORE()     asm("mb")
91507 +#define MEMBAR_STORESTORE()    asm("wmb")
91508 +#define MEMBAR_LOADLOAD()      asm("mb")
91509 +#define MEMBAR_VISIBLE()       asm("")
91510 +#define MEMBAR_DRAIN()                 asm("wmb")
91511 +
91512 +#else
91513 +/* Assume gcc */
91514 +#define MEMBAR_MEMISSUE()      asm volatile ("mb"::)
91515 +#define MEMBAR_SYNC()          asm volatile ("mb"::)
91516 +#define MEMBAR_STORELOAD()     asm volatile ("wmb"::)
91517 +#define MEMBAR_LOADSTORE()     asm volatile ("mb"::)
91518 +#define MEMBAR_STORESTORE()    asm volatile ("wmb"::)
91519 +#define MEMBAR_LOADLOAD()      asm volatile ("mb"::)
91520 +#define MEMBAR_VISIBLE()       asm volatile (""   ::: "memory")
91521 +#define MEMBAR_DRAIN()         asm volatile ("wmb"::: "memory")
91522 +
91523 +#endif /* __DECC */
91524 +
91525 +#elif defined(__sparc)
91526 +
91527 +/* UltraSPARC with WRITE MERGING enabled */
91528 +#define MEMBAR_MEMISSUE()      asm volatile ("membar #MemIssue");
91529 +#define MEMBAR_SYNC()          asm volatile ("membar #Sync");
91530 +#define MEMBAR_STORELOAD()     asm volatile ("membar #StoreLoad");
91531 +#define MEMBAR_LOADSTORE()     asm volatile ("membar #LoadStore");
91532 +#define MEMBAR_STORESTORE()    asm volatile ("membar #StoreStore");
91533 +#define MEMBAR_LOADLOAD()      asm volatile ("membar #LoadLoad");
91534 +#define MEMBAR_VISIBLE()       asm volatile (""::: "memory")
91535 +#define MEMBAR_DRAIN()         asm volatile (""::: "memory")
91536 +
91537 +#elif defined(__linux__)
91538 +
91539 +#if defined(__INTEL_COMPILER)
91540 +
91541 +/* NB: Intel compiler version 8.0 now also defines __GNUC__ unless you set the -no-gcc cmdline option
91542 + * I've moved the check for __INTEL_COMPILER to be first to get around this
91543 + */
91544 +#ifdef __ECC
91545 +
91546 +#include <ia64intrin.h>
91547 +
91548 +#define MEMBAR_MEMISSUE()       __mf()
91549 +#define MEMBAR_SYNC()           __mf()
91550 +#define MEMBAR_STORELOAD()      __mf()
91551 +#define MEMBAR_LOADSTORE()      __mf()
91552 +#define MEMBAR_STORESTORE()     __mf()
91553 +#define MEMBAR_LOADLOAD()       __mf()
91554 +#define MEMBAR_VISIBLE()       __mf()
91555 +#define MEMBAR_DRAIN()         __mf()
91556 +
91557 +#else
91558 +
91559 +#warning Membars not implemented with this compiler.
91560 +#define MEMBAR_MEMISSUE()       ;
91561 +#define MEMBAR_SYNC()           ;
91562 +#define MEMBAR_STORELOAD()      ;
91563 +#define MEMBAR_LOADSTORE()      ;
91564 +#define MEMBAR_STORESTORE()     ;
91565 +#define MEMBAR_LOADLOAD()       ;
91566 +#define MEMBAR_VISIBLE()        ;
91567 +#define MEMBAR_DRAIN()          ;
91568 +
91569 +#endif /* __ECC */
91570 +
91571 +#elif defined(__GNUC__)
91572 +
91573 +#ifndef __ia64
91574 +
91575 +/* These are needed by <asm/system.h> on AMD64 */
91576 +#include <asm/types.h>
91577 +#include <asm/bitops.h>
91578 +
91579 +#ifndef __cplusplus
91580 +/* this header file has a parameter called "new" - great huh */
91581 +#include <asm/system.h>
91582 +#endif
91583 +
91584 +#else
91585 +#  define mb()        __asm__ __volatile__ ("mf" ::: "memory")
91586 +#  define rmb()       mb()
91587 +#  define wmb()       mb()
91588 +#endif /* !__ia64 */
91589 +
91590 +#if defined(__x86_64) || defined(__i386)
91591 +/* For some reason the AMD64 definition (glibc-devel 2.3.X) of this 
91592 + * is not useful (compiler only directive) so we overload it here
91593 + */
91594 +/* I don't trust the IA32 header files either as with mtrr enabled
91595 + * we really need a membar and not a compiler directive
91596 + * NB: sfence is only available with X86_FEATURE_XMM CPUs
91597 + */
91598 +#undef wmb
91599 +#define wmb()    asm volatile("sfence":::"memory");
91600 +#endif /* __x86_64 */
91601 +
91602 +#define MEMBAR_MEMISSUE()      mb()
91603 +#define MEMBAR_SYNC()          mb()
91604 +#define MEMBAR_STORELOAD()     wmb()
91605 +#define MEMBAR_LOADSTORE()     mb()
91606 +#define MEMBAR_STORESTORE()    wmb()
91607 +#define MEMBAR_LOADLOAD()      mb()
91608 +
91609 +#ifdef __ia64
91610 +#define MEMBAR_VISIBLE()       asm volatile ("mf.a;;mf;;"::: "memory")
91611 +#define MEMBAR_DRAIN()         asm volatile ("mf;"::: "memory")
91612 +#else
91613 +#define MEMBAR_VISIBLE()       asm volatile (""::: "memory")
91614 +#define MEMBAR_DRAIN()         wmb()
91615 +#endif
91616 +
91617 +#else /* elif __GNUC__ */
91618 +
91619 +#error Membars not implemented for this architecture/compiler.
91620 +
91621 +#endif /* __INTEL_COMPILER */
91622 +
91623 +#else /* elif __linux__ */
91624 +
91625 +#error Membars not implemented for this architecture/compiler.
91626 +
91627 +#endif
91628 +
91629 +#ifdef __cplusplus
91630 +}
91631 +#endif
91632 +
91633 +#endif /* _CONFIG_FENCE_H */
91634 +
91635 +/*
91636 + * Local variables:
91637 + * c-file-style: "stroustrup"
91638 + * End:
91639 + */
91640 Index: linux-2.4.21/include/qsnet/kernel.h
91641 ===================================================================
91642 --- linux-2.4.21.orig/include/qsnet/kernel.h    2004-02-23 16:02:56.000000000 -0500
91643 +++ linux-2.4.21/include/qsnet/kernel.h 2005-06-01 23:12:54.752415848 -0400
91644 @@ -0,0 +1,38 @@
91645 +/*
91646 + *    Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd.
91647 + *
91648 + *    For licensing information please see the supplied COPYING file
91649 + *
91650 + */
91651 +
91652 +#ifndef __QSNET_KERNEL_H
91653 +#define __QSNET_KERNEL_H
91654 +
91655 +#ident "$Id: kernel.h,v 1.8 2003/03/14 10:18:22 mike Exp $"
91656 +/*      $Source: /cvs/master/quadrics/qsnet/kernel.h,v $*/
91657 +
91658 +#include <qsnet/config.h>
91659 +#include <qsnet/types.h>
91660 +
91661 +#if defined(SOLARIS)
91662 +#include <qsnet/kernel_solaris.h>
91663 +#endif
91664 +
91665 +#if defined(DIGITAL_UNIX)
91666 +#include <qsnet/kernel_dunix.h>
91667 +#endif
91668 +
91669 +#if defined(LINUX)
91670 +#include <qsnet/kernel_linux.h>
91671 +#endif
91672 +
91673 +#include <qsnet/debug.h>
91674 +
91675 +#endif /* __QSNET_KERNEL_H */
91676 +
91677 +
91678 +
91679 +
91680 +
91681 +
91682 +
91683 Index: linux-2.4.21/include/qsnet/kernel_linux.h
91684 ===================================================================
91685 --- linux-2.4.21.orig/include/qsnet/kernel_linux.h      2004-02-23 16:02:56.000000000 -0500
91686 +++ linux-2.4.21/include/qsnet/kernel_linux.h   2005-06-01 23:12:54.753415696 -0400
91687 @@ -0,0 +1,354 @@
91688 +/*
91689 + *    Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd.
91690 + *
91691 + *    For licensing information please see the supplied COPYING file
91692 + *
91693 + */
91694 +
91695 +#ifndef __QSNET_KERNEL_LINUX_H
91696 +#define __QSNET_KERNEL_LINUX_H
91697 +
91698 +#ident "$Id: kernel_linux.h,v 1.62.6.5 2005/01/18 14:37:22 david Exp $"
91699 +/*      $Source: /cvs/master/quadrics/qsnet/kernel_linux.h,v $*/
91700 +
91701 +#if defined(MODVERSIONS)
91702 +#include <linux/modversions.h>
91703 +#endif
91704 +
91705 +#include <linux/autoconf.h>
91706 +#include <linux/module.h>
91707 +
91708 +
91709 +/* ASSERT(spin_is_locked(l)) would always fail on UP kernels */
91710 +#if defined(CONFIG_SMP)
91711 +#define SPINLOCK_HELD(l)       spin_is_locked(l)
91712 +#else
91713 +#define SPINLOCK_HELD(l)       (1) 
91714 +#endif
91715 +
91716 +#include <asm/io.h>
91717 +#include <asm/uaccess.h>
91718 +
91719 +#include <linux/types.h>
91720 +#include <linux/time.h>
91721 +
91722 +#include <linux/delay.h>
91723 +#include <linux/smp_lock.h>
91724 +#include <linux/spinlock.h>
91725 +#include <linux/module.h>
91726 +
91727 +#include <linux/coproc.h>      /* Quadrics added */
91728 +
91729 +#include <linux/highmem.h>
91730 +
91731 +#include <qsnet/mutex.h>
91732 +#include <qsnet/condvar.h>
91733 +#include <qsnet/crwlock.h>
91734 +
91735 +#if defined(LINUX_ALPHA)
91736 +#  include <asm/core_tsunami.h>        /* for TSUNAMI_MEM */
91737 +#endif
91738 +
91739 +#if LINUX_VERSION_CODE > KERNEL_VERSION(2,5,0)
91740 +#      undef   MOD_INC_USE_COUNT
91741 +#      undef   MOD_DEC_USE_COUNT
91742 +#      define  MOD_INC_USE_COUNT
91743 +#      define  MOD_DEC_USE_COUNT
91744 +#endif
91745 +
91746 +#define MIN(a,b)       ((a) > (b) ? (b) : (a))
91747 +#define MAX(a,b)       ((a) > (b) ? (a) : (b))
91748 +
91749 +/* stray types */
91750 +typedef u64               u_longlong_t;
91751 +typedef unsigned long     uintptr_t;
91752 +typedef int               bool_t;
91753 +
91754 +typedef unsigned long     virtaddr_t;                          /* virtual address */
91755 +typedef unsigned long      ioaddr_t;                           /* io address */
91756 +typedef unsigned long      sdramaddr_t;                                /* elan sdram offset */
91757 +
91758 +/* 386 kernel can be compiled with PAE enabled to use a 44 bit physical address */
91759 +#if defined(CONFIG_X86_PAE)
91760 +typedef unsigned long long physaddr_t;
91761 +#else
91762 +typedef unsigned long     physaddr_t;
91763 +#endif
91764 +
91765 +/* ticks since reboot, and tick freq */
91766 +#define lbolt          jiffies 
91767 +#define hz             HZ
91768 +
91769 +/* System page size and friends */
91770 +#define PAGESIZE       PAGE_SIZE
91771 +#define PAGESHIFT      PAGE_SHIFT
91772 +#define PAGEOFFSET     (PAGE_SIZE - 1)
91773 +#define PAGEMASK       PAGE_MASK
91774 +
91775 +#define PAGE_ALIGNED(a)        (((a) & PAGE_MASK) == a)
91776 +
91777 +/* convert between bytes and pages */
91778 +#define btop(b)         ((unsigned long)(b) >> PAGE_SHIFT)     /* rnd down */ 
91779 +#define btopr(b)        btop(PAGE_ALIGN((unsigned long) b))    /* rnd up */
91780 +#define ptob(p)                ((unsigned long)(p) << PAGE_SHIFT)
91781 +
91782 +/* round up sz to the nearest multiple of blk */
91783 +#define roundup(sz,blk) ((blk) * ((sz) / (blk) + ((sz) % (blk) ? 1 : 0)))      
91784 +
91785 +/* send a signal to a process */
91786 +#define psignal(pr,sig)        send_sig(sig,pr,0)
91787 +
91788 +/* microsecond delay */
91789 +#define DELAY(us)      udelay(us)
91790 +
91791 +/* macro macros */
91792 +#define MACRO_BEGIN     do {
91793 +#define MACRO_END       } while (0)
91794 +
91795 +/* D-Unix compatable errno values */
91796 +#define ESUCCESS        0
91797 +#define EFAIL           255
91798 +
91799 +/* ASSERT(NO_LOCKS_HELD) will be a no-op */
91800 +#define NO_LOCKS_HELD  1
91801 +
91802 +/* misc */
91803 +typedef int            label_t;
91804 +#define on_fault(ljp)  ((ljp) == NULL)
91805 +#define _NOTE(X)
91806 +#define no_fault()     ((void) 0)
91807 +#define panicstr       0
91808 +
91809 +/* return from system call is -EXXX on linux */
91810 +#define set_errno(e)   (-(e))
91811 +
91812 +/* 
91813 + * BSD-style byte ops 
91814 + */
91815 +
91816 +#define bcmp(src1,src2,len)            memcmp(src1,src2,len)
91817 +#define bzero(dst,len)                 memset(dst,0,len)
91818 +#define bcopy(src,dst,len)             memcpy(dst,src,len)
91819 +
91820 +#define preemptable_start              do { long must_yield_at = lbolt + (hz/10);
91821 +#define preemptable_end                        } while (0)
91822 +#define preemptable_check()            do {\
91823 +                                            if ((lbolt - must_yield_at) > 0)\
91824 +                                           {\
91825 +                                               preemptable_yield() ; \
91826 +                                               must_yield_at = lbolt + (hz/10);\
91827 +                                           }\
91828 +                                       } while (0)
91829 +
91830 +#define preemptable_yield()            schedule()
91831 +
91832 +#define CURPROC()                       current
91833 +#define CURTHREAD()                     current
91834 +#define SUSER()                                suser()
91835 +
91836 +/* 64 bit IO operations on 32 bit intel cpus using MMX */
91837 +#if defined(LINUX_I386)
91838 +extern u64         qsnet_readq (volatile u64 *ptr);
91839 +extern void        qsnet_writeq (u64 value, volatile u64 *ptr);
91840 +
91841 +#define readq(ptr)             qsnet_readq((void *) ptr)
91842 +#define writeq(val,ptr)                qsnet_writeq(val, (void *)ptr)
91843 +#endif
91844 +
91845 +/*
91846 + * Memory barriers
91847 + */
91848 +#ifndef mmiob
91849 +#  define mmiob()                      mb()
91850 +#endif
91851 +
91852 +/* 
91853 + * Exit handlers
91854 + */
91855 +#define HANDLER_REGISTER(func,arg,flags)   xa_handler_register(func,arg,flags)
91856 +#define HANDLER_UNREGISTER(func,arg,flags) xa_handler_unregister(func,arg,flags)
91857 +
91858 +/* 
91859 + * KMEM_GETPAGES and KMEM_ALLOC both call kmem_alloc, which 
91860 + * translates the call to kmalloc if < PAGE_SIZE, or vmalloc 
91861 + * if >= PAGE_SIZE.  vmalloc will always return a page-aligned 
91862 + * region rounded up to the nearest page, while kmalloc will 
91863 + * return bits and pieces of a page.
91864 + */
91865 +
91866 +#ifdef KMEM_DEBUG
91867 +extern void          *qsnet_kmem_alloc_debug(int len, int sleep, int zerofill, char *file, int line);
91868 +extern void           qsnet_kmem_free_debug(void *ptr, int len, char *file, int line);
91869 +#define KMEM_ALLOC(ptr,type,len,sleep) \
91870 +       { KMEM_ASSERT(sleep); (ptr)=(type)qsnet_kmem_alloc_debug(len,sleep,0,__FILE__,__LINE__); }
91871 +#define KMEM_ZALLOC(ptr,type,len,sleep) \
91872 +       { KMEM_ASSERT(sleep); (ptr)=(type)qsnet_kmem_alloc_debug(len,sleep,1,__FILE__,__LINE__); }
91873 +
91874 +#define KMEM_FREE(ptr,len)               qsnet_kmem_free_debug((void *)ptr,len,__FILE__,__LINE__)
91875 +
91876 +#else
91877 +
91878 +extern void          *qsnet_kmem_alloc(int len, int sleep, int zerofill);
91879 +extern void           qsnet_kmem_free(void *ptr, int len);
91880 +
91881 +#define KMEM_ALLOC(ptr,type,len,sleep) \
91882 +       { KMEM_ASSERT(sleep); (ptr)=(type)qsnet_kmem_alloc(len,sleep,0); }
91883 +#define KMEM_ZALLOC(ptr,type,len,sleep) \
91884 +       { KMEM_ASSERT(sleep); (ptr)=(type)qsnet_kmem_alloc(len,sleep,1); }
91885 +
91886 +#define KMEM_FREE(ptr,len)               qsnet_kmem_free((void *)ptr,len)
91887 +
91888 +#endif
91889 +extern void       qsnet_kmem_display(void *handle);
91890 +extern physaddr_t kmem_to_phys(void *ptr);
91891 +
91892 +#define KMEM_ASSERT(sleep)              ASSERT(!(in_interrupt() && sleep))
91893 +
91894 +
91895 +#define KMEM_GETPAGES(ptr,type,pgs,sleep) KMEM_ZALLOC(ptr,type,ptob(pgs),sleep)
91896 +#define KMEM_FREEPAGES(ptr,pgs)          KMEM_FREE(ptr,ptob(pgs));
91897 +
91898 +/*
91899 + * Copying from user space -> kernel space (perms checked)
91900 + */
91901 +#define copyin(up,kp,size)             copy_from_user(kp,up,size)
91902 +#define copyin_noerr(up,kp,size)       copy_from_user(kp,up,size)
91903 +
91904 +/* get_user() gets xfer width right */
91905 +#define fulinux(ret, up)               (get_user(ret, (up)) == 0 ? ret : -1)
91906 +#define fulinuxp(ret, up)              (get_user(ret, (up)) == 0 ? ret : NULL)
91907 +
91908 +extern __inline__ int fubyte    (u8  *up) { u8  ret;   return fulinux(ret, up);}
91909 +extern __inline__ int fusword   (u16 *up) { u16 ret;   return fulinux(ret, up);}
91910 +extern __inline__ int fuword    (u32 *up) { u32 ret;   return fulinux(ret, up);}
91911 +#if BITS_PER_LONG > 32
91912 +extern __inline__ u64 fulonglong(u64 *up) { u64 ret;   return fulinux(ret, up);}
91913 +#else
91914 +extern __inline__ u64 fulonglong(u64 *up) { return ((u64) fuword((u32 *)up) | (((u64) fuword(((u32 *)up)+1))<<32)); }
91915 +#endif
91916 +extern __inline__ void *fuptr (void **up) { void *ret; return fulinuxp(ret,up);}
91917 +
91918 +#define fubyte_noerr(up)               fubyte(up)
91919 +#define fusword_noerr(up)              fusword(up)
91920 +#define fuword_noerr(up)               fuword(up)
91921 +#define fulonglong_noerr(up)           fulonglong(up)
91922 +#define fuptr_noerr(up)                        fuptr(up)
91923 +
91924 +extern __inline__ int copyinstr(char *up, char *kp, int max, int *size)
91925 +{ 
91926 +       for (*size = 1; *size <= max; (*size)++) {
91927 +               if (get_user(*kp, up++) != 0)
91928 +                       return EFAULT;  /* bad user space addr */
91929 +               if (*kp++ == '\0')
91930 +                       return 0;       /* success */
91931 +       }
91932 +       *size = max;
91933 +       return ENAMETOOLONG;            /* runaway string */
91934 +}
91935
91936 +/*
91937 + * Copying from kernel space -> user space (perms checked)
91938 + */
91939 +
91940 +#define copyout(kp,up,size)            copy_to_user(up,kp,size)
91941 +#define copyout_noerr(kp,up,size)      copy_to_user(up,kp,size)
91942 +
91943 +/* put_user() gets xfer width right */
91944 +#define sulinux(val, up)               (put_user(val, (up)) == 0 ? 0 : -1)
91945 +
91946 +extern __inline__ int subyte    (u8  *up, u8  val) { return sulinux(val, up); }
91947 +extern __inline__ int susword   (u16 *up, u16 val) { return sulinux(val, up); }
91948 +extern __inline__ int suword    (u32 *up, u32 val) { return sulinux(val, up); }
91949 +#if BITS_PER_LONG > 32
91950 +extern __inline__ int sulonglong(u64 *up, u64 val) { return sulinux(val, up); }
91951 +#else
91952 +extern __inline__ int sulonglong(u64 *up, u64 val) { return (suword((u32 *) up, (u32) val) == 0 ? 
91953 +                                                            suword(((u32 *) up)+1, (u32) (val >> 32)) : -1); }
91954 +#endif
91955 +extern __inline__ int suptr   (void **up,void *val){ return sulinux(val, up); }
91956 +
91957 +#define subyte_noerr(up,val)           subyte(up,val)  
91958 +#define susword_noerr(up,val)          susword(up,val) 
91959 +#define suword_noerr(up,val)           suword(up,val)  
91960 +#define sulonglong_noerr(up,val)       sulonglong(up,val)      
91961 +#define suptr_noerr(up,val)            suptr(up,val)   
91962 +
91963 +/*
91964 + * /proc/qsnet interface
91965 + */
91966 +extern inline int
91967 +str_append(char *buf, char *add, int size)
91968 +{
91969 +#define TRUNC_MSG       "[Output truncated]\n"
91970 +       int full = 0;
91971 +       int max = size - strlen(TRUNC_MSG) - strlen(add) - 1;
91972 +
91973 +       if (strlen(buf) > max) {
91974 +               strcat(buf, TRUNC_MSG);
91975 +               full = 1;
91976 +       } else
91977 +               strcat(buf, add);
91978 +       return full;
91979 +}
91980 +
91981 +/* Spinlocks */
91982 +#define spin_lock_destroy(l)           ((void) 0)
91983 +
91984 +/* Complex - Reader/Writer locks - we added <linux/crwlock.h> */
91985 +typedef crwlock_t                      krwlock_t;
91986 +#define krwlock_init(l)                        crwlock_init(l)
91987 +#define krwlock_destroy(l)             crwlock_destroy(l)
91988 +#define krwlock_write(l)               crwlock_write(l)
91989 +#define krwlock_read(l)                        crwlock_read(l)
91990 +#define krwlock_done(l)                        crwlock_done(l)
91991 +#define krwlock_is_locked(l)           crwlock_held(l)
91992 +#define krwlock_is_write_locked(l)     crwlock_write_held(l)
91993 +#define krwlock_is_read_locked(l)      crwlock_read_held(l)
91994 +
91995 +/*
91996 + * Timeouts - Solaris style.
91997 + */
91998 +typedef struct timer_list timer_fn_t;
91999 +
92000 +extern inline void
92001 +schedule_timer_fn(timer_fn_t *timer, void (*fun)(void *), void *arg, long hz_delay)
92002 +{
92003 +       init_timer(timer);
92004 +
92005 +       timer->function = (void (*)(unsigned long)) fun;
92006 +       timer->data     = (unsigned long) arg;
92007 +       timer->expires  = jiffies + hz_delay;
92008 +
92009 +       add_timer(timer);
92010 +}
92011 +
92012 +/* returns 1 if timer_fn was cancelled */
92013 +extern inline int
92014 +cancel_timer_fn(timer_fn_t *timer)
92015 +{
92016 +    return (del_timer_sync(timer));
92017 +}
92018 +
92019 +extern inline int
92020 +timer_fn_queued(timer_fn_t *timer)
92021 +{
92022 +    return (timer_pending (timer));
92023 +}
92024 +/*
92025 + * Hold/release CPU's.
92026 + */
92027 +
92028 +extern void    cpu_hold_all(void);
92029 +extern void    cpu_release_all(void);
92030 +#define CAPTURE_CPUS()         cpu_hold_all()
92031 +#define RELEASE_CPUS()         cpu_release_all()
92032 +
92033 +#define IASSERT ASSERT
92034 +
92035 +#endif /* __QSNET_KERNEL_LINUX_H */
92036 +
92037 +/*
92038 + * Local variables:
92039 + * c-file-style: "linux"
92040 + * End:
92041 + */
92042 Index: linux-2.4.21/include/qsnet/kpte.h
92043 ===================================================================
92044 --- linux-2.4.21.orig/include/qsnet/kpte.h      2004-02-23 16:02:56.000000000 -0500
92045 +++ linux-2.4.21/include/qsnet/kpte.h   2005-06-01 23:12:54.753415696 -0400
92046 @@ -0,0 +1,107 @@
92047 +/*
92048 + *    Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd.
92049 + *    Copyright (c) 2002-2004 by Quadrics Ltd.
92050 + *
92051 + *    For licensing information please see the supplied COPYING file
92052 + *
92053 + */
92054 +
92055 +#ifndef __QSNET_KPTE_H
92056 +#define __QSNET_KPTE_H
92057 +
92058 +#ident "@(#)$Id: kpte.h,v 1.1.2.1 2004/11/02 10:45:29 david Exp $ $Name: QSNETMODULES-4-30_20050128 $"
92059 +/*      $Source: /cvs/master/quadrics/qsnet/kpte.h,v $*/
92060 +
92061 +#include <qsnet/autoconf.h>
92062 +
92063 +#ifdef NO_RMAP
92064 +#      define pte_offset_kernel pte_offset
92065 +#      define pte_offset_map    pte_offset
92066 +#       define pte_unmap(A)      do { ; } while (0)
92067 +#endif
92068 +
92069 +/* 
92070 + * Pte stuff
92071 + */
92072 +static __inline__ struct mm_struct *
92073 +get_kern_mm(void)
92074 +{
92075 +        return &init_mm;
92076 +}
92077 +
92078 +static __inline__ pte_t *
92079 +find_pte_map(struct mm_struct *mm, unsigned long vaddr)
92080 +{
92081 +        pgd_t *pgd;
92082 +        pmd_t *pmd;
92083 +       pte_t *ptep;
92084 +
92085 +/* XXXX - handle hugh tlb code */
92086 +       pgd = pgd_offset(mm, vaddr);
92087 +       if (pgd_none(*pgd) || pgd_bad(*pgd))
92088 +               goto out;
92089 +       
92090 +       pmd = pmd_offset(pgd, vaddr);
92091 +       if (pmd_none(*pmd) || pmd_bad (*pmd))
92092 +               goto out;
92093 +
92094 +       ptep = pte_offset_map (pmd, vaddr);
92095 +       if (! ptep)
92096 +               goto out;
92097 +       
92098 +       if (pte_present (*ptep))
92099 +               return ptep;
92100 +
92101 +       pte_unmap (ptep);
92102 +out:
92103 +       return NULL;
92104 +}
92105 +
92106 +static __inline__ pte_t *
92107 +find_pte_kernel(unsigned long vaddr)
92108 +{
92109 +        pgd_t *pgd;
92110 +        pmd_t *pmd;
92111 +       pte_t *pte;
92112 +
92113 +       pgd = pgd_offset_k(vaddr);
92114 +       if (pgd && !pgd_none(*pgd)) {
92115 +           pmd = pmd_offset(pgd, vaddr);
92116 +           if (pmd && pmd_present(*pmd)) {
92117 +               pte = pte_offset_kernel(pmd, vaddr);
92118 +               if (pte && pte_present(*pte))
92119 +                   return (pte);
92120 +           }
92121 +       }
92122 +       return (NULL);
92123 +}
92124 +
92125 +static __inline__ physaddr_t
92126 +pte_phys(pte_t pte)
92127 +{
92128 +#if defined(LINUX_ALPHA)
92129 +       /* RedHat 7.1 2.4.3-12 
92130 +        * They have now enabled Monster windows on Tsunami
92131 +        * and so can use the Main's phys pte value 
92132 +        */
92133 +       return (pte_val(pte) >> (32-PAGE_SHIFT));
92134 +#elif defined(LINUX_I386)
92135 +       return (pte_val(pte) & ~((1 << PAGE_SHIFT)-1));
92136 +#elif defined(LINUX_SPARC)
92137 +       return (pte_val(pte) & _PAGE_PADDR);
92138 +#elif defined(LINUX_IA64)
92139 +       return (pte_val(pte) & _PFN_MASK);
92140 +#elif defined(LINUX_X86_64)
92141 +       return (pte_val(pte) & ~((1 << PAGE_SHIFT)-1) & ~_PAGE_NX);
92142 +#else
92143 +#error Unknown architecture
92144 +#endif
92145 +}
92146 +
92147 +#endif /* __QSNET_KPTE_H */
92148 +
92149 +/*
92150 + * Local variables:
92151 + * c-file-style: "stroustrup"
92152 + * End:
92153 + */
92154 Index: linux-2.4.21/include/qsnet/kthread.h
92155 ===================================================================
92156 --- linux-2.4.21.orig/include/qsnet/kthread.h   2004-02-23 16:02:56.000000000 -0500
92157 +++ linux-2.4.21/include/qsnet/kthread.h        2005-06-01 23:12:54.754415544 -0400
92158 @@ -0,0 +1,71 @@
92159 +/*
92160 + *    Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd.
92161 + *    Copyright (c) 2002-2004 by Quadrics Ltd.
92162 + *
92163 + *    For licensing information please see the supplied COPYING file
92164 + *
92165 + */
92166 +
92167 +#ifndef __QSNET_KTHREAD_H
92168 +#define __QSNET_KTHREAD_H
92169 +
92170 +#ident "@(#)$Id: kthread.h,v 1.1 2004/10/28 11:50:29 david Exp $ $Name: QSNETMODULES-4-30_20050128 $"
92171 +/*      $Source: /cvs/master/quadrics/qsnet/kthread.h,v $*/
92172 +
92173 +#include <qsnet/autoconf.h>
92174 +
92175 +/* 
92176 + * kernel threads 
92177 + */
92178 +extern __inline__ void
92179 +kernel_thread_init(char *comm)
92180 +{
92181 +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0)
92182 +#ifndef NO_NPTL
92183 +#      define sigmask_lock                     sighand->siglock
92184 +#endif
92185 +       lock_kernel();
92186 +       daemonize();
92187 +        reparent_to_init();
92188 +
92189 +        /* avoid getting signals */
92190 +        spin_lock_irq(&current->sigmask_lock);
92191 +        flush_signals(current);
92192 +        sigfillset(&current->blocked);
92193 +       
92194 +#ifdef NO_NPTL
92195 +        recalc_sigpending(current);
92196 +#else
92197 +        recalc_sigpending();
92198 +#endif
92199 +
92200 +        spin_unlock_irq(&current->sigmask_lock);
92201 +
92202 +       /* set our name for identification purposes */
92203 +       strncpy(current->comm, comm, sizeof(current->comm));
92204 +
92205 +       unlock_kernel();
92206 +#else
92207 +       daemonize(comm);
92208 +#endif
92209 +}
92210 +
92211 +extern __inline__ void *
92212 +kernel_thread_wrap(caddr_t stk, int stksize, void (*proc)(void *), void *arg)
92213 +{
92214 +        ASSERT(stk == NULL && stksize == 0);
92215 +        kernel_thread((int (*)(void *))proc, arg, CLONE_FS | CLONE_FILES | CLONE_SIGHAND);
92216 +        return (void *)1; /* non-null value */
92217 +}
92218 +
92219 +#define kernel_thread_create(proc,arg)  kernel_thread_wrap(NULL,0,(void (*)(void *))proc,arg)
92220 +#define kernel_thread_exit()           ((void) 0)
92221 +#define kernel_thread_become_highpri() ((void) 0)
92222 +
92223 +#endif /* __QSNET_KTHREAD_H */
92224 +
92225 +/*
92226 + * Local variables:
92227 + * c-file-style: "linux"
92228 + * End:
92229 + */
92230 Index: linux-2.4.21/include/qsnet/list.h
92231 ===================================================================
92232 --- linux-2.4.21.orig/include/qsnet/list.h      2004-02-23 16:02:56.000000000 -0500
92233 +++ linux-2.4.21/include/qsnet/list.h   2005-06-01 23:12:54.754415544 -0400
92234 @@ -0,0 +1,80 @@
92235 +/*
92236 + *    Copyright (c) 2003 by Quadrics Limited.
92237 + * 
92238 + *    For licensing information please see the supplied COPYING file
92239 + *
92240 + */
92241 +
92242 +#ident "@(#)$Id: list.h,v 1.5 2003/10/27 13:55:33 david Exp $"
92243 +/*      $Source: /cvs/master/quadrics/qsnet/list.h,v $*/
92244 +
92245 +#ifndef __QSNET_LIST_H
92246 +#define __QSNET_LIST_H
92247 +
92248 +/* Implementation of doubly linked lists - compatible with linux */
92249 +struct list_head 
92250 +{
92251 +    struct list_head *next;
92252 +    struct list_head *prev;
92253 +};
92254 +
92255 +#if !defined(LINUX)
92256 +#if ! defined( offsetof ) 
92257 +#define offsetof(T,F) ((int )&(((T *)0)->F))
92258 +#endif
92259 +
92260 +#define LIST_HEAD_INIT(name) { &(name), &(name) }
92261 +
92262 +#define LIST_HEAD(name) \
92263 +       struct list_head name = LIST_HEAD_INIT(name)
92264 +#endif
92265 +
92266 +#define list_entry(ptr, type, off) \
92267 +       ((type *) ((unsigned long)(ptr) - offsetof (type,off)))
92268 +
92269 +#define INIT_LIST_HEAD(list) \
92270 +MACRO_BEGIN \
92271 +       (list)->next = (list)->prev = (list); \
92272 +MACRO_END
92273 +
92274 +#define list_add(new, list) \
92275 +MACRO_BEGIN \
92276 +       (list)->next->prev = (new); \
92277 +       (new)->next = (list)->next; \
92278 +       (new)->prev = (list); \
92279 +       (list)->next = (new); \
92280 +MACRO_END
92281 +
92282 +#define list_add_tail(new, list) \
92283 +MACRO_BEGIN \
92284 +       (list)->prev->next = new; \
92285 +       (new)->prev = (list)->prev; \
92286 +       (new)->next = (list); \
92287 +       (list)->prev = (new); \
92288 +MACRO_END
92289 +
92290 +#define list_del(entry) \
92291 +MACRO_BEGIN \
92292 +       (entry)->prev->next = (entry)->next; \
92293 +       (entry)->next->prev = (entry)->prev; \
92294 +MACRO_END
92295 +
92296 +#define list_del_init(entry) \
92297 +MACRO_BEGIN \
92298 +       (entry)->prev->next = (entry)->next; \
92299 +       (entry)->next->prev = (entry)->prev; \
92300 +       (entry)->next = (entry)->prev = (entry); \
92301 +MACRO_END
92302 +
92303 +#define list_empty(list) \
92304 +       ((list)->next == (list))
92305 +
92306 +#define list_for_each(pos,list) \
92307 +       for (pos = (list)->next; pos != (list); \
92308 +            pos = (pos)->next)
92309 +
92310 +#define list_for_each_safe(pos,n,list) \
92311 +       for (pos = (list)->next, n = (pos)->next; pos != (list); \
92312 +            pos = n, n = (pos)->next)
92313 +
92314 +#endif /* __QSNET_LIST_H */
92315 Index: linux-2.4.21/include/qsnet/mutex.h
92316 ===================================================================
92317 --- linux-2.4.21.orig/include/qsnet/mutex.h     2004-02-23 16:02:56.000000000 -0500
92318 +++ linux-2.4.21/include/qsnet/mutex.h  2005-06-01 23:12:54.754415544 -0400
92319 @@ -0,0 +1,91 @@
92320 +/*
92321 + *    Copyright (C) 2000  Regents of the University of California
92322 + *
92323 + *    This program is free software; you can redistribute it and/or modify
92324 + *    it under the terms of the GNU General Public License as published by
92325 + *    the Free Software Foundation; either version 2 of the License, or
92326 + *    (at your option) any later version.
92327 + *
92328 + *    This program is distributed in the hope that it will be useful,
92329 + *    but WITHOUT ANY WARRANTY; without even the implied warranty of
92330 + *    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
92331 + *    GNU General Public License for more details.
92332 + *
92333 + *    You should have received a copy of the GNU General Public License
92334 + *    along with this program; if not, write to the Free Software
92335 + *    Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
92336 + *
92337 + */
92338 +
92339 +#if    !defined(_LINUX_MUTEX_H)
92340 +#define        _LINUX_MUTEX_H
92341 +#if    defined(__KERNEL__)
92342 +
92343 +#include <asm/smp.h>
92344 +#include <linux/spinlock.h>
92345 +#include <asm/semaphore.h>
92346 +#include <qsnet/debug.h>
92347 +#include <linux/interrupt.h>
92348 +#include <linux/version.h>
92349 +
92350 +#define PID_NONE       0
92351 +
92352 +typedef struct
92353 +{
92354 +    struct semaphore sem;
92355 +    pid_t           holder;
92356 +} kmutex_t;
92357 +
92358 +extern __inline__ void
92359 +kmutex_init (kmutex_t *l)
92360 +{
92361 +#if    LINUX_VERSION_CODE < KERNEL_VERSION(2,3,0)
92362 +    l->sem = MUTEX;
92363 +#else
92364 +    init_MUTEX(&l->sem);
92365 +#endif
92366 +    l->holder = PID_NONE;
92367 +}
92368 +
92369 +extern __inline__ void
92370 +kmutex_destroy (kmutex_t *l) 
92371 +{
92372 +    ASSERT (l->holder == PID_NONE); 
92373 +}
92374 +
92375 +extern __inline__ void
92376 +kmutex_lock (kmutex_t *l) 
92377 +{ 
92378 +    ASSERT(l->holder != current->pid);
92379 +    down (&l->sem);
92380 +    l->holder = current->pid; 
92381 +}
92382 +
92383 +extern __inline__ void
92384 +kmutex_unlock (kmutex_t *l) 
92385 +{
92386 +    ASSERT(l->holder == current->pid);
92387 +
92388 +    l->holder = PID_NONE;
92389 +    up (&l->sem);
92390 +}
92391 +
92392 +extern __inline__ int
92393 +kmutex_trylock (kmutex_t *l) 
92394 +{
92395 +    if (down_trylock (&l->sem) == 0) 
92396 +    {
92397 +       l->holder = current->pid;
92398 +       return (1);
92399 +    }
92400 +    return (0);
92401 +}
92402 +
92403 +extern __inline__ int
92404 +kmutex_is_locked (kmutex_t *l) 
92405 +{
92406 +    return (l->holder == current->pid);
92407 +}
92408 +
92409 +#endif /* __KERNEL__ */
92410 +#endif /* _LINUX_MUTEX_H */
92411 Index: linux-2.4.21/include/qsnet/procfs_linux.h
92412 ===================================================================
92413 --- linux-2.4.21.orig/include/qsnet/procfs_linux.h      2004-02-23 16:02:56.000000000 -0500
92414 +++ linux-2.4.21/include/qsnet/procfs_linux.h   2005-06-01 23:12:54.755415392 -0400
92415 @@ -0,0 +1,234 @@
92416 +/*
92417 + *    Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd.
92418 + *
92419 + *    For licensing information please see the supplied COPYING file
92420 + *
92421 + */
92422 +
92423 +#ifndef __PROCFS_LINUX_H
92424 +#define __PROCFS_LINUX_H
92425 +
92426 +#ident "$Id: procfs_linux.h,v 1.6.2.6 2004/12/06 17:36:24 robin Exp $"
92427 +/*      $Source: /cvs/master/quadrics/qsnet/procfs_linux.h,v $ */
92428 +
92429 +#if defined(__KERNEL__)
92430 +
92431 +#include <qsnet/kernel_linux.h>
92432 +#include <qsnet/autoconf.h>
92433 +#include <linux/proc_fs.h>
92434 +
92435 +extern gid_t qsnet_procfs_gid;
92436 +
92437 +/* borrowed from fs/proc/proc_misc - helper for proc_read_int */
92438 +static inline int 
92439 +qsnet_proc_calc_metrics(char *page, char **start, off_t off, int count, int *eof, int len)
92440 +{
92441 +       if (len <= off+count) *eof = 1;
92442 +       *start = page + off;
92443 +       len -= off;
92444 +       if (len>count) len = count;
92445 +       if (len<0) len = 0;
92446 +       return len;
92447 +}
92448 +
92449 +static inline int
92450 +qsnet_proc_write_int(struct file *file, const char *buf, unsigned long count, void *data)
92451 +{
92452 +       char tmpbuf[16];
92453 +       int  res = count;
92454 +       
92455 +       if (count > sizeof(tmpbuf) - 1)
92456 +               return (-EINVAL);
92457 +       
92458 +       MOD_INC_USE_COUNT;
92459 +       if (copy_from_user(tmpbuf, buf, count))
92460 +               res = -EFAULT;
92461 +       else
92462 +       {
92463 +               tmpbuf[count] = '\0'; 
92464 +               *(int *)data = simple_strtoul(tmpbuf, NULL, 0);
92465 +       }
92466 +       MOD_DEC_USE_COUNT;
92467 +       
92468 +       return (res);
92469 +}
92470 +
92471 +static inline int
92472 +qsnet_proc_read_int(char *page, char **start, off_t off, int count, int *eof, void *data)
92473 +{
92474 +       int len, res;
92475 +       
92476 +       MOD_INC_USE_COUNT;
92477 +       
92478 +       len = sprintf(page, "%d\n", *(int *)data);
92479 +       res = qsnet_proc_calc_metrics(page, start, off, count, eof, len);
92480 +       
92481 +       MOD_DEC_USE_COUNT;
92482 +       return (res);
92483 +}
92484 +
92485 +static inline struct proc_dir_entry *
92486 +qsnet_proc_register_int(struct proc_dir_entry *dir, char *path, int *var, int read_only)
92487 +{
92488 +       struct proc_dir_entry *p;
92489 +       
92490 +       p = create_proc_entry(path, read_only ? S_IRUGO : S_IRUGO|S_IWUSR|S_IWGRP, dir);
92491 +       if (p) {
92492 +               if (! read_only) 
92493 +                       p->write_proc = qsnet_proc_write_int;
92494 +               p->read_proc  = qsnet_proc_read_int;
92495 +               p->data       = var;
92496 +               p->owner      = THIS_MODULE;
92497 +               p->gid        = qsnet_procfs_gid;
92498 +       }
92499 +       return p;
92500 +}
92501 +
92502 +static inline int
92503 +qsnet_proc_write_hex(struct file *file, const char *buf, unsigned long count, void *data)
92504 +{
92505 +       char tmpbuf[16];
92506 +       int  res = count;
92507 +       
92508 +       if (count > sizeof(tmpbuf) - 1)
92509 +               return (-EINVAL);
92510 +       
92511 +       MOD_INC_USE_COUNT;
92512 +       if (copy_from_user(tmpbuf, buf, count))
92513 +               res = -EFAULT;
92514 +       else
92515 +       {
92516 +               tmpbuf[count] = '\0'; 
92517 +               *(int *)data = simple_strtoul(tmpbuf, NULL, 0);
92518 +       }
92519 +       MOD_DEC_USE_COUNT;
92520 +       
92521 +       return (res);
92522 +}
92523 +
92524 +static inline int
92525 +qsnet_proc_read_hex(char *page, char **start, off_t off, int count, int *eof, void *data)
92526 +{
92527 +       int len, res;
92528 +       
92529 +       MOD_INC_USE_COUNT;
92530 +       
92531 +       len = sprintf(page, "0x%x\n", *(int *)data);
92532 +       res = qsnet_proc_calc_metrics(page, start, off, count, eof, len);
92533 +       
92534 +       MOD_DEC_USE_COUNT;
92535 +       return (res);
92536 +}
92537 +
92538 +static inline struct proc_dir_entry *
92539 +qsnet_proc_register_hex(struct proc_dir_entry *dir, char *path, int *var, int read_only)
92540 +{
92541 +       struct proc_dir_entry *p;
92542 +       
92543 +       p = create_proc_entry(path, read_only ? S_IRUGO : S_IRUGO|S_IWUSR|S_IWGRP, dir);
92544 +       if (p) {
92545 +               if (! read_only) 
92546 +                       p->write_proc = qsnet_proc_write_hex;
92547 +               p->read_proc  = qsnet_proc_read_hex;
92548 +               p->data       = var;
92549 +               p->owner      = THIS_MODULE;
92550 +               p->gid        = qsnet_procfs_gid;
92551 +       }
92552 +       return p;
92553 +}
92554 +
92555 +#define QSNET_PROC_STR_LEN_MAX ((int)256)
92556 +
92557 +static inline int
92558 +qsnet_proc_write_str(struct file *file, const char *buf, unsigned long count, void *data)
92559 +{
92560 +       int  res = count;
92561 +       
92562 +       if (count > (QSNET_PROC_STR_LEN_MAX - 1))
92563 +               return (-EINVAL);
92564 +       
92565 +       MOD_INC_USE_COUNT;
92566 +       if (copy_from_user((char *)data, buf, count))
92567 +               res = -EFAULT;
92568 +       else
92569 +       {
92570 +               ((char *)data)[count] = '\0'; 
92571 +               /* remove linefeed */
92572 +               if ( (count) && (((char *)data)[count -1] == '\n'))
92573 +                       ((char *)data)[count -1] = '\0';
92574 +       }
92575 +       MOD_DEC_USE_COUNT;
92576 +       
92577 +       return (res);
92578 +}
92579 +
92580 +static inline int
92581 +qsnet_proc_read_str(char *page, char **start, off_t off, int count, int *eof, void *data)
92582 +{
92583 +       int len, res;
92584 +       
92585 +       if ( strlen(data) > (count + 1))
92586 +               return (-EINVAL);       
92587 +
92588 +       MOD_INC_USE_COUNT;
92589 +       
92590 +       /* cant output too much */
92591 +       if ( strlen(data) > (count + 1))
92592 +       {
92593 +               MOD_DEC_USE_COUNT;
92594 +               return (-EINVAL);       
92595 +       }
92596 +
92597 +
92598 +       len = sprintf(page, "%s\n", (char *)data);
92599 +       if (len > count)
92600 +       {
92601 +               MOD_DEC_USE_COUNT;
92602 +               return (-EINVAL);       
92603 +       }
92604 +
92605 +       res = qsnet_proc_calc_metrics(page, start, off, count, eof, len);
92606 +       
92607 +       MOD_DEC_USE_COUNT;
92608 +       return (res);
92609 +}
92610 +
92611 +static inline struct proc_dir_entry *
92612 +qsnet_proc_register_str(struct proc_dir_entry *dir, char *path, char *var, int read_only)
92613 +{
92614 +       struct proc_dir_entry *p;
92615 +       
92616 +       p = create_proc_entry(path, read_only ? S_IRUGO : S_IRUGO|S_IWUSR|S_IWGRP, dir);
92617 +       if (p) {
92618 +               if (! read_only) 
92619 +                       p->write_proc = qsnet_proc_write_str;
92620 +               p->read_proc  = qsnet_proc_read_str;
92621 +               p->data       = var;
92622 +               p->owner      = THIS_MODULE;
92623 +               p->gid        = qsnet_procfs_gid;
92624 +       }
92625 +       return p;
92626 +}
92627 +
92628 +extern struct proc_dir_entry *qsnet_procfs_root; 
92629 +extern struct proc_dir_entry *qsnet_procfs_config;
92630 +
92631 +#ifdef NO_PDE
92632 +static inline struct proc_dir_entry *PDE(const struct inode *inode)
92633 +{
92634 +    return inode->u.generic_ip;
92635 +}
92636 +#endif
92637 +#endif /* __KERNEL__ */
92638 +
92639 +#define QSNET_PROCFS_IOCTL      "/proc/qsnet/ioctl"
92640 +#define QSNET_PROCFS_KMEM_DEBUG "/proc/qsnet/kmem_debug"
92641 +#define QSNET_PROCFS_VERSION    "/proc/qsnet/version"
92642 +
92643 +#endif /* __PROCFS_LINUX_H */
92644 +
92645 +/*
92646 + * Local variables:
92647 + * c-file-style: "linux"
92648 + * End:
92649 + */
92650 Index: linux-2.4.21/include/qsnet/pthread.h
92651 ===================================================================
92652 --- linux-2.4.21.orig/include/qsnet/pthread.h   2004-02-23 16:02:56.000000000 -0500
92653 +++ linux-2.4.21/include/qsnet/pthread.h        2005-06-01 23:12:54.755415392 -0400
92654 @@ -0,0 +1,59 @@
92655 +/*
92656 + *    Copyright (c) 2003 by Quadrics Supercomputers World Ltd.
92657 + *
92658 + *    For licensing information please see the supplied COPYING file
92659 + *
92660 + */
92661 +
92662 +/* $Id: pthread.h,v 1.5 2004/06/07 10:47:06 addy Exp $ */
92663 +/*             $Source: /cvs/master/quadrics/qsnet/pthread.h,v $*/
92664 +
92665 +#ifndef _CONFIG_PTHREAD_H
92666 +#define _CONFIG_PTHREAD_H
92667 +
92668 +#ifdef __cplusplus
92669 +extern "C" {
92670 +#endif
92671 +
92672 +#if defined(__ELAN__)
92673 +
92674 +/* No pthread support on Elan co-processor */
92675 +
92676 +#define MUTEX                   unsigned long long
92677 +#define MUTEX_INIT(X)          ;
92678 +#define        MUTEX_LOCK(X)           ;
92679 +#define        MUTEX_UNLOCK(X)         ;
92680 +
92681 +#else
92682 +#if defined(DIGITAL_UNIX)
92683 +#include <tis.h>
92684 +#define MUTEX                  pthread_mutex_t
92685 +#define MUTEX_INIT(X)          tis_mutex_init(X)
92686 +#define        MUTEX_LOCK(X)           tis_mutex_lock(X)
92687 +#define        MUTEX_UNLOCK(X)         tis_mutex_unlock(X)
92688 +#define        MUTEX_TRYLOCK(X)        (tis_mutex_trylock(X) == 0)
92689 +
92690 +#else /* Linux... */
92691 +
92692 +/* Use standard pthread calls */
92693 +#include <pthread.h>
92694 +#define MUTEX                  pthread_mutex_t
92695 +#define MUTEX_INIT(X)          pthread_mutex_init(X, NULL)
92696 +#define        MUTEX_LOCK(X)           pthread_mutex_lock(X)
92697 +#define        MUTEX_UNLOCK(X)         pthread_mutex_unlock(X)
92698 +#define        MUTEX_TRYLOCK(X)        (pthread_mutex_trylock(X) == 0)
92699 +
92700 +#endif /* DIGITAL_UNIX */
92701 +#endif /* __ELAN__ */
92702 +
92703 +#ifdef __cplusplus
92704 +}
92705 +#endif
92706 +
92707 +#endif /* _CONFIG_PTHREAD_H */
92708 +
92709 +/*
92710 + * Local variables:
92711 + * c-file-style: "stroustrup"
92712 + * End:
92713 + */
92714 Index: linux-2.4.21/include/qsnet/statsformat.h
92715 ===================================================================
92716 --- linux-2.4.21.orig/include/qsnet/statsformat.h       2004-02-23 16:02:56.000000000 -0500
92717 +++ linux-2.4.21/include/qsnet/statsformat.h    2005-06-01 23:12:54.756415240 -0400
92718 @@ -0,0 +1,25 @@
92719 +#ifndef _QSNET_STATSFORMAT_H
92720 +#define _QSNET_STATSFORMAT_H
92721 +
92722 +#ident "$Id: statsformat.h,v 1.2 2003/05/22 19:37:14 addy Exp $"
92723 +/*      $Source: /cvs/master/quadrics/qsnet/statsformat.h,v $*/
92724 +
92725 +#include <qsnet/config.h>
92726 +
92727 +/*
92728 + * format of an Elan stats record
92729 + *
92730 + * type    char(8), type of statistic, e.g. FPAGE, ELAN3, TPORT
92731 + * time    uint64, 10 digits, time in millisecs since counters initialised
92732 + * device  uint, 2 digits, Elan device id
92733 + * name    char(32), name of the statistic
92734 + * value   uint64, current value of statistic
92735 + */
92736 +    
92737 +#ifdef _ILP32
92738 +#define ELAN_STATSFORMAT "%-8s %10llu %2d %-32s %llu\n"
92739 +#else
92740 +#define ELAN_STATSFORMAT "%-8s %10lu %2d %-32s %lu\n"
92741 +#endif
92742 +
92743 +#endif
92744 Index: linux-2.4.21/include/qsnet/types.h
92745 ===================================================================
92746 --- linux-2.4.21.orig/include/qsnet/types.h     2004-02-23 16:02:56.000000000 -0500
92747 +++ linux-2.4.21/include/qsnet/types.h  2005-06-01 23:12:54.756415240 -0400
92748 @@ -0,0 +1,90 @@
92749 +/*
92750 + *    Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd.
92751 + *
92752 + *    For licensing information please see the supplied COPYING file
92753 + *
92754 + */
92755 +
92756 +#ifndef __QSNET_TYPES_H
92757 +#define __QSNET_TYPES_H
92758 +
92759 +#ident "$Id: types.h,v 1.16 2003/08/01 16:21:38 addy Exp $"
92760 +/*      $Source: /cvs/master/quadrics/qsnet/types.h,v $*/
92761 +
92762 +/*
92763 + * Include typedefs for ISO/IEC 9899:1990 standard types
92764 + *
92765 + *
92766 + *    The following integer typedefs are used:
92767 + *
92768 + *     int8_t, int16_t, int32_t, int64_t, intptr_t
92769 + *     uint8_t, uint16_t, uint32_t, uint64_t, uintptr_t
92770 + *     uchar_t, ushort_t, uint_t, ulong_t
92771 + *
92772 + *    <sys/types.h> also defines the following:
92773 + *     u_char, u_short, u_int, u_long, caddr_t
92774 + */
92775 +
92776 +#include <qsnet/config.h>
92777 +
92778 +#if defined(SOLARIS) && defined(__KERNEL__)
92779 +#  include <sys/inttypes.h>
92780 +#endif
92781 +
92782 +#if defined(SOLARIS) && !defined(__KERNEL__)
92783 +#  include <inttypes.h>
92784 +#  include <sys/types.h>
92785 +#endif
92786 +
92787 +#if defined(DIGITAL_UNIX) && defined(__KERNEL__)
92788 +#  include <sys/bitypes.h>
92789 +#endif
92790 +
92791 +#if defined(DIGITAL_UNIX) && !defined(__KERNEL__)
92792 +#  include <inttypes.h>
92793 +#  include <sys/types.h>
92794 +#endif
92795 +
92796 +#if defined(LINUX) && defined(__KERNEL__)
92797 +#  include <linux/types.h>
92798 +#endif
92799 +
92800 +#if defined(LINUX) && !defined(__KERNEL__)
92801 +#  include <stdint.h>
92802 +#  include <inttypes.h>
92803 +#  include <sys/types.h>
92804 +
92805 +typedef unsigned char  uchar_t;
92806 +typedef unsigned short ushort_t;
92807 +typedef unsigned int   uint_t;
92808 +typedef unsigned long  ulong_t;
92809 +#endif
92810 +
92811 +#if defined(QNX)
92812 +#  include <inttypes.h>
92813 +#  include <sys/types.h>
92814 +#endif
92815 +
92816 +/* Define a type that will represent a Main CPU pointer
92817 + * on both the Main and the Elan
92818 + */
92819 +#ifdef __ELAN__
92820 +
92821 +#if defined(_MAIN_LP64)
92822 +#define QSNET_MAIN_PTR uint64_t
92823 +#else
92824 +#define QSNET_MAIN_PTR uint32_t
92825 +#endif
92826 +
92827 +#else
92828 +
92829 +#ifdef _LP64
92830 +#define QSNET_MAIN_PTR uint64_t
92831 +#else
92832 +#define QSNET_MAIN_PTR uint32_t
92833 +#endif
92834 +
92835 +#endif
92836 +
92837 +
92838 +#endif /* __QSNET_TYPES_H */
92839 Index: linux-2.4.21/include/qsnet/workarounds.h
92840 ===================================================================
92841 --- linux-2.4.21.orig/include/qsnet/workarounds.h       2004-02-23 16:02:56.000000000 -0500
92842 +++ linux-2.4.21/include/qsnet/workarounds.h    2005-06-01 23:12:54.756415240 -0400
92843 @@ -0,0 +1,24 @@
92844 +/*
92845 + *    Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd.
92846 + *
92847 + *    For licensing information please see the supplied COPYING file
92848 + *
92849 + */
92850 +
92851 +#ifndef _QSNET_WORKAROUNDS_H
92852 +#define _QSNET_WORKAROUNDS_H
92853 +
92854 +#ident "$Id: workarounds.h,v 1.11 2002/08/09 11:15:55 addy Exp $"
92855 +/*      $Source: /cvs/master/quadrics/qsnet/workarounds.h,v $ */
92856 +
92857 +/* Elan workarounds */
92858 +#undef  ELAN_REVA_SUPPORTED    /* rev a elans no longer supported. */
92859 +#undef  ELITE_REVA_SUPPORTED   /* removed since RMS disables broadcast on rev A elites. */
92860 +#define ELAN_REVB_BUG_1
92861 +/* WORKAROUND for GNAT hw-elan3/3263 */
92862 +#define ELAN_REVB_BUG_2
92863 +
92864 +/* WORKAROUND for GNATs ic-elan3/3637 & ic-elan3/3550 */
92865 +#define ELAN_REVB_BUG_3
92866 +
92867 +#endif /* _QSNET_WORKAROUNDS_H */
92868 Index: linux-2.4.21/include/rms/rmscall.h
92869 ===================================================================
92870 --- linux-2.4.21.orig/include/rms/rmscall.h     2004-02-23 16:02:56.000000000 -0500
92871 +++ linux-2.4.21/include/rms/rmscall.h  2005-06-01 23:12:54.757415088 -0400
92872 @@ -0,0 +1,144 @@
92873 +/*
92874 + * Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd.
92875 + *
92876 + * For licensing information please see the supplied COPYING file
92877 + *
92878 + * rmscall.h:  user interface to rms kernel module
92879 + *
92880 + * $Id: rmscall.h,v 1.25 2004/05/14 08:55:57 duncan Exp $
92881 + * $Source: /cvs/master/quadrics/rmsmod/rmscall.h,v $
92882 + *
92883 + */
92884 +
92885 +#ifndef RMSCALL_H_INCLUDED
92886 +#define RMSCALL_H_INCLUDED 1
92887 +
92888 +#ident "$Id: rmscall.h,v 1.25 2004/05/14 08:55:57 duncan Exp $"
92889 +
92890 +#ifdef __cplusplus
92891 +extern "C" {
92892 +#endif
92893 +
92894 +/*
92895 + * flags for rms_fork_register
92896 + *
92897 + * RMS_IOF is not in a public header file 
92898 + */
92899 +#define RMS_IOF 1              /* inherit on fork */
92900 +
92901 +#ifndef __KERNEL__
92902 +#include <sys/types.h>
92903 +#endif
92904 +
92905 +#include <qsnet/types.h>
92906 +#include <elan/capability.h>
92907 +
92908 +#define MAXCOREPATHLEN 32
92909 +
92910 +#if defined(SOLARIS)
92911 +typedef long long rmstime_t;
92912 +#else  /* DIGITAL_UNIX */
92913 +typedef long rmstime_t;
92914 +#endif
92915 +
92916 +typedef enum {
92917 +    
92918 +    PRG_RUNNING  = 0x01,       /* program is running                  */
92919 +    PRG_ZOMBIE   = 0x02,       /* last process on a node has exited   */
92920 +    PRG_NODE     = 0x04,       /* stats are complete for this node    */
92921 +    PRG_KILLED   = 0x08,       /* program was killed                  */
92922 +    PRG_SUSPEND  = 0x10                /* program is suspended                */
92923 +
92924 +} PRGSTATUS_FLAGS;
92925 +
92926 +/*
92927 + * program time statistics extended in version 5 of the kernel module
92928 + */
92929 +typedef struct {
92930 +    rmstime_t etime;           /* elapsed cpu time (milli-secs)       */
92931 +    rmstime_t atime;           /* allocated cpu time (cpu milli-secs) */
92932 +    rmstime_t utime;           /* user cpu time (cpu milli-secs)      */
92933 +    rmstime_t stime;           /* system cpu time (cpu milli-secs)    */
92934 +    int ncpus;                 /* number of cpus allocated            */
92935 +    int flags;                 /* program status flags                */
92936 +    int mem;                   /* max memory size in MBytes           */
92937 +    int pageflts;              /* number of page faults               */
92938 +    rmstime_t memint;          /* memory integral                     */
92939 +} prgstats_old_t;
92940 +
92941 +typedef struct {
92942 +    uint64_t etime;            /* elapsed cpu time (milli-secs)       */
92943 +    uint64_t atime;            /* allocated cpu time (cpu milli-secs) */
92944 +    uint64_t utime;            /* user cpu time (cpu milli-secs)      */
92945 +    uint64_t stime;            /* system cpu time (cpu milli-secs)    */
92946 +    uint64_t pageflts;         /* number of page faults               */
92947 +    uint64_t memint;           /* memory integral                     */
92948 +    uint64_t ebytes;           /* data transferred by the Elan(s)     */
92949 +    uint64_t exfers;           /* number of Elan data transfers       */
92950 +    uint64_t spare64[4];       /* expansion space                     */
92951 +    int ncpus;                 /* number of cpus allocated            */
92952 +    int flags;                 /* program status flags                */
92953 +    int mem;                   /* max memory size in MBytes           */
92954 +    int spare32[5];             /* expansion space                     */
92955 +} prgstats_t;
92956 +
92957 +int  rmsmod_init(void);
92958 +void rmsmod_fini(void);
92959 +
92960 +int rms_setcorepath(caddr_t path);
92961 +int rms_getcorepath(pid_t pid, caddr_t path, int maxlen);
92962 +int rms_prgcreate(int id, uid_t uid, int cpus);
92963 +int rms_prgdestroy(int id);
92964 +int rms_prgids(int maxids, int *prgids, int *nprgs);
92965 +int rms_prginfo(int id, int maxpids, pid_t *pids, int *nprocs);
92966 +int rms_prgaddcap(int id, int index, ELAN_CAPABILITY *cap);
92967 +
92968 +int rms_prgsuspend(int id);
92969 +int rms_prgresume(int id);
92970 +int rms_prgsignal(int id, int signo);
92971 +
92972 +int rms_getprgid(pid_t pid, int *id);
92973 +int rms_ncaps(int *ncaps);
92974 +int rms_getcap(int index, ELAN_CAPABILITY *cap);
92975 +int rms_mycap(int *index);
92976 +int rms_setcap(int index, int ctx);
92977 +int rms_prefcap(int nprocess, int *index);
92978 +
92979 +int   rms_prggetstats(int id, prgstats_t *stats);
92980 +void  rms_accumulatestats(prgstats_t *total, prgstats_t *stats);
92981 +char *rms_statsreport(prgstats_t *stats, char *buf);
92982 +
92983 +int rms_elaninitdone(int vp);
92984 +int rms_prgelanpids(int id, int maxpids, int *vps, pid_t *pids, int *npids);
92985 +int rms_setelanstats(int id, uint64_t ebytes, uint64_t exfers);
92986 +
92987 +int rms_setpset(int psid);
92988 +int rms_getpset(int id, int *psid);
92989 +int rms_modversion();
92990 +
92991 +#ifdef __cplusplus
92992 +}
92993 +#endif
92994 +
92995 +
92996 +#if defined(__KERNEL__)
92997 +
92998 +int rms_init(void);
92999 +int rms_fini(void);
93000 +int rms_reconfigure(void);
93001 +
93002 +extern int rms_debug;
93003 +
93004 +#if 1
93005 +#define DBG(x) do if (rms_debug) x ; while (0)
93006 +#else
93007 +#define DBG(x)
93008 +#endif
93009 +
93010 +#endif
93011 +
93012 +#endif /* RMSCALL_H_INCLUDED */
93013 +
93014 +
93015 +
93016 +
93017 Index: linux-2.4.21/include/rms/rmsio.h
93018 ===================================================================
93019 --- linux-2.4.21.orig/include/rms/rmsio.h       2004-02-23 16:02:56.000000000 -0500
93020 +++ linux-2.4.21/include/rms/rmsio.h    2005-06-01 23:12:54.757415088 -0400
93021 @@ -0,0 +1,185 @@
93022 +/*
93023 + *    Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd.
93024 + *
93025 + *    For licensing information please see the supplied COPYING file
93026 + *
93027 + */
93028 +
93029 +#ident "@(#)$Id: rmsio.h,v 1.6 2004/05/14 08:55:57 duncan Exp $"
93030 +/*      $Source: /cvs/master/quadrics/rmsmod/rmsio.h,v $*/
93031 +
93032 +
93033 +#ifndef __RMSMOD_RMSIO_H
93034 +#define __RMSMOD_RMSIO_H
93035 +
93036 +/* arg is corepath string */
93037 +#define RMSIO_SETCOREPATH      _IOW ('r', 1, char)
93038 +
93039 +typedef struct rmsio_getcorepath_struct
93040 +{
93041 +    pid_t              pid;
93042 +    char               *corepath;
93043 +    int                        maxlen;
93044 +} RMSIO_GETCOREPATH_STRUCT;
93045 +#define RMSIO_GETCOREPATH      _IOW ('r', 2, RMSIO_GETCOREPATH_STRUCT)
93046 +
93047 +typedef struct rmsio_prgcreate_struct
93048 +{
93049 +    int                        id;
93050 +    uid_t              uid;
93051 +    int                        cpus;
93052 +} RMSIO_PRGCREATE_STRUCT;
93053 +#define RMSIO_PRGCREATE                _IOW ('r', 3, RMSIO_PRGCREATE_STRUCT)
93054 +
93055 +typedef struct rmsio_prginfo_struct
93056 +{
93057 +    int                        id;
93058 +    int                        maxpids;
93059 +    pid_t              *pids;
93060 +    int                        *nprocs;
93061 +} RMSIO_PRGINFO_STRUCT;
93062 +#define RMSIO_PRGINFO          _IOW ('r', 4, RMSIO_PRGINFO_STRUCT)
93063 +
93064 +typedef struct rmsio_prgsignal_struct
93065 +{
93066 +    int                        id;
93067 +    int                        signo;
93068 +} RMSIO_PRGSIGNAL_STRUCT;
93069 +#define RMSIO_PRGSIGNAL                _IOW ('r', 5, RMSIO_PRGSIGNAL_STRUCT)
93070 +
93071 +typedef struct rmsio_prgaddcap_struct
93072 +{
93073 +    int                        id;
93074 +    int                        index;
93075 +    ELAN_CAPABILITY    *cap;
93076 +} RMSIO_PRGADDCAP_STRUCT;
93077 +#define RMSIO_PRGADDCAP                _IOW ('r', 6, RMSIO_PRGADDCAP_STRUCT)
93078 +typedef struct rmsio_setcap_struct
93079 +{
93080 +    int                        index;
93081 +    int                        ctx;
93082 +} RMSIO_SETCAP_STRUCT;
93083 +#define RMSIO_SETCAP           _IOW ('r', 7, RMSIO_SETCAP_STRUCT)
93084 +
93085 +typedef struct rmsio_getcap_struct
93086 +{
93087 +    int                        index;
93088 +    ELAN_CAPABILITY     *cap;
93089 +} RMSIO_GETCAP_STRUCT;
93090 +#define RMSIO_GETCAP           _IOW ('r', 8, RMSIO_GETCAP_STRUCT)
93091 +
93092 +typedef struct rmsio_getcap_struct32
93093 +{
93094 +    int                        index;
93095 +    unsigned int        capptr;
93096 +} RMSIO_GETCAP_STRUCT32;
93097 +#define RMSIO_GETCAP32         _IOW ('r', 8, RMSIO_GETCAP_STRUCT32)
93098 +
93099 +/* arg is pointer to ncaps */
93100 +#define RMSIO_NCAPS            _IOW ('r', 9, int)
93101 +
93102 +typedef struct rmsio_prggetstats_struct
93103 +{
93104 +    int                        id;
93105 +    prgstats_old_t     *stats;
93106 +} RMSIO_PRGGETSTATS_STRUCT;
93107 +#define RMSIO_PRGGETSTATS      _IOW ('r', 10, RMSIO_PRGGETSTATS_STRUCT)
93108 +
93109 +/* arg is program id */
93110 +#define RMSIO_PRGSUSPEND       _IOW ('r', 11, int)
93111 +#define RMSIO_PRGRESUME                _IOW ('r', 12, int)
93112 +#define RMSIO_PRGDESTROY       _IOW ('r', 13, int)
93113 +
93114 +typedef struct rmsio_getprgid_struct
93115 +{
93116 +    pid_t              pid;
93117 +    int                        *id;
93118 +} RMSIO_GETPRGID_STRUCT;
93119 +#define RMSIO_GETPRGID         _IOW ('r', 14, RMSIO_GETPRGID_STRUCT)
93120 +
93121 +typedef struct rmsio_getprgid_struct32
93122 +{
93123 +    pid_t              pid;
93124 +    unsigned int       idptr;
93125 +} RMSIO_GETPRGID_STRUCT32;
93126 +#define RMSIO_GETPRGID32       _IOW ('r', 14, RMSIO_GETPRGID_STRUCT32)
93127 +
93128 +/* arg is pointer to index */
93129 +#define RMSIO_GETMYCAP         _IOW ('r', 15, int)
93130 +
93131 +typedef struct rmsio_prgids_struct
93132 +{
93133 +    int                        maxids;
93134 +    int                        *prgids;
93135 +    int                        *nprgs;
93136 +} RMSIO_PRGIDS_STRUCT;
93137 +#define RMSIO_PRGIDS           _IOW ('r', 16, RMSIO_PRGIDS_STRUCT)
93138 +
93139 +/* arg is pointer to vp */
93140 +#define RMSIO_ELANINITDONE     _IOW ('r', 17, int)
93141 +
93142 +typedef struct rmsio_prgelanpids_struct
93143 +{
93144 +    int    id;
93145 +    int    maxpids;
93146 +    int   *vps;
93147 +    int   *pids;
93148 +    int   *npids;
93149 +} RMSIO_PRGELANPIDS_STRUCT;
93150 +#define RMSIO_PRGELANPIDS      _IOW ('r', 18, RMSIO_PRGELANPIDS_STRUCT)
93151 +
93152 +typedef struct rmsio_setpset_struct
93153 +{
93154 +    int    id;
93155 +    int    psid;
93156 +} RMSIO_SETPSET_STRUCT;
93157 +#define RMSIO_SETPSET          _IOW ('r', 19, RMSIO_SETPSET_STRUCT)
93158 +
93159 +typedef struct rmsio_getpset_struct
93160 +{
93161 +    int    id;
93162 +    int   *psid;
93163 +} RMSIO_GETPSET_STRUCT;
93164 +#define RMSIO_GETPSET          _IOW ('r', 20, RMSIO_GETPSET_STRUCT)
93165 +
93166 +/*
93167 + * have to pass a pointer to the stats, the switch
93168 + * statement goes wrong in the module of the size
93169 + * is too large
93170 + */
93171 +typedef struct {
93172 +    uint64_t ebytes;
93173 +    uint64_t exfers;
93174 +} elanstats_t;
93175 +
93176 +typedef struct rmsio_setelanstats_struct
93177 +{
93178 +    int    id;
93179 +    elanstats_t *estats;
93180 +} RMSIO_SETELANSTATS_STRUCT;
93181 +#define RMSIO_SETELANSTATS      _IOW ('r', 21, RMSIO_SETELANSTATS_STRUCT)
93182 +
93183 +typedef struct rmsio_prggetstats2_struct
93184 +{
93185 +    int                        id;
93186 +    prgstats_t         *stats;
93187 +} RMSIO_PRGGETSTATS2_STRUCT;
93188 +#define RMSIO_PRGGETSTATS2     _IOW ('r', 22, RMSIO_PRGGETSTATS2_STRUCT)
93189 +
93190 +typedef struct rmsio_modversion_struct
93191 +{
93192 +    int *version;
93193 +} RMSIO_MODVERSION_STRUCT;
93194 +#define RMSIO_MODVERSION       _IOW ('r', 23, RMSIO_MODVERSION_STRUCT)
93195 +
93196 +
93197 +#endif /* __RMSMOD_RMSIO_H */
93198 +
93199 +
93200 +
93201 +
93202 +
93203 +
93204 +
93205 +
93206 +
93207 Index: linux-2.4.21/ipc/shm.c
93208 ===================================================================
93209 --- linux-2.4.21.orig/ipc/shm.c 2005-06-01 22:51:50.000000000 -0400
93210 +++ linux-2.4.21/ipc/shm.c      2005-06-01 23:12:54.758414936 -0400
93211 @@ -723,6 +723,44 @@
93212         return retval;
93213  }
93214  
93215 +/*
93216 + * Mark all segments created by this process for destruction
93217 + */
93218 +asmlinkage int shm_cleanup ()
93219 +{
93220 +       int i;
93221 +
93222 +       down(&shm_ids.sem);
93223 +
93224 +       for(i = 0; i <= shm_ids.max_id; i++) {
93225 +               struct shmid_kernel* shp;
93226 +
93227 +               shp = shm_lock(i);
93228 +               if(shp!=NULL) {
93229 +
93230 +                   /* Mark this segment for destruction if we created it */
93231 +                   if (current->pid == shp->shm_cprid)
93232 +                   {
93233 +                       /* Copy of IPC_RMID code */
93234 +                       if (shp->shm_nattch){
93235 +                               shp->shm_flags |= SHM_DEST;
93236 +                               /* Do not find it any more */
93237 +                               shp->shm_perm.key = IPC_PRIVATE;
93238 +                       } else {
93239 +                               shm_destroy(shp);
93240 +                               continue;
93241 +                       }
93242 +                   }
93243 +
93244 +                   shm_unlock(i);
93245 +               }
93246 +       }
93247 +
93248 +       up(&shm_ids.sem);
93249 +
93250 +       return 0;
93251 +}
93252 +
93253  #ifdef CONFIG_PROC_FS
93254  static int sysvipc_shm_read_proc(char *buffer, char **start, off_t offset, int length, int *eof, void *data)
93255  {
93256 Index: linux-2.4.21/kernel/exit.c
93257 ===================================================================
93258 --- linux-2.4.21.orig/kernel/exit.c     2005-06-01 22:58:09.055062312 -0400
93259 +++ linux-2.4.21/kernel/exit.c  2005-06-01 23:12:54.759414784 -0400
93260 @@ -19,6 +19,7 @@
93261  #include <linux/file.h>
93262  #include <linux/binfmts.h>
93263  #include <linux/ptrace.h>
93264 +#include <linux/ptrack.h>
93265  #include <linux/mount.h>
93266  #include <linux/process_timing.h>
93267  #include <asm/uaccess.h>
93268 @@ -705,6 +706,10 @@
93269         if (current->tux_info)
93270                 current->tux_exit();
93271         acct_process(code);
93272 +
93273 +       /* Notify any ptrack callbacks of the process exit */
93274 +       ptrack_call_callbacks(PTRACK_PHASE_EXIT, NULL);
93275 +
93276         if (isaudit(tsk))
93277                 audit_exit(tsk, code);
93278         __exit_mm(tsk);
93279 Index: linux-2.4.21/kernel/fork.c
93280 ===================================================================
93281 --- linux-2.4.21.orig/kernel/fork.c     2005-06-01 22:58:09.055062312 -0400
93282 +++ linux-2.4.21/kernel/fork.c  2005-06-01 23:12:54.760414632 -0400
93283 @@ -14,6 +14,7 @@
93284  #include <linux/config.h>
93285  #include <linux/slab.h>
93286  #include <linux/init.h>
93287 +#include <linux/ptrack.h>
93288  #include <linux/unistd.h>
93289  #include <linux/smp_lock.h>
93290  #include <linux/module.h>
93291 @@ -308,6 +309,7 @@
93292         /* unlimited stack is larger than TASK_SIZE */
93293         mm->non_executable_cache = NON_EXECUTABLE_CACHE(current);
93294         mm->pgd = pgd_alloc(mm);
93295 +       mm->coproc_ops = NULL;
93296         mm->def_flags = 0;
93297         if (mm->pgd)
93298                 return mm;
93299 @@ -1110,6 +1112,12 @@
93300                         p->vfork_done = &vfork;
93301                         init_completion(&vfork);
93302                 }
93303 +               
93304 +               if (ptrack_call_callbacks (PTRACK_PHASE_CLONE, p)) {
93305 +                       /* start up with an immediate SIGKILL. */
93306 +                       sigaddset (&p->pending.signal, SIGKILL);
93307 +                       p->sigpending = 1;
93308 +               }
93309  
93310                 if ((p->ptrace & PT_PTRACED) || (clone_flags & CLONE_STOPPED)) {
93311                         /*
93312 Index: linux-2.4.21/kernel/ksyms.c
93313 ===================================================================
93314 --- linux-2.4.21.orig/kernel/ksyms.c    2005-06-01 23:12:40.911519984 -0400
93315 +++ linux-2.4.21/kernel/ksyms.c 2005-06-01 23:12:54.760414632 -0400
93316 @@ -43,6 +43,7 @@
93317  #include <linux/mmzone.h>
93318  #include <linux/mm.h>
93319  #include <linux/capability.h>
93320 +#include <linux/ptrack.h>
93321  #include <linux/highuid.h>
93322  #include <linux/brlock.h>
93323  #include <linux/fs.h>
93324 @@ -104,6 +105,10 @@
93325  
93326  #endif
93327  
93328 +EXPORT_SYMBOL_GPL(ptrack_register);
93329 +EXPORT_SYMBOL_GPL(ptrack_deregister);
93330 +EXPORT_SYMBOL_GPL(ptrack_registered);
93331 +
93332  /* process memory management */
93333  EXPORT_SYMBOL(do_mmap_pgoff);
93334  EXPORT_SYMBOL(do_munmap);
93335 @@ -113,6 +118,7 @@
93336  EXPORT_SYMBOL(exit_files);
93337  EXPORT_SYMBOL(exit_fs);
93338  EXPORT_SYMBOL(exit_sighand);
93339 +EXPORT_SYMBOL(make_pages_present);
93340  EXPORT_SYMBOL(unshare_files);
93341  EXPORT_SYMBOL(mmput);
93342  
93343 @@ -589,6 +595,10 @@
93344  EXPORT_SYMBOL(kernel_read);
93345  EXPORT_SYMBOL(open_exec);
93346  
93347 +/* QSW Shared-memory cleanup hook for rmsmod */
93348 +extern int shm_cleanup();
93349 +EXPORT_SYMBOL_GPL(shm_cleanup);
93350 +
93351  /* Miscellaneous access points */
93352  EXPORT_SYMBOL(si_meminfo);
93353  
93354 Index: linux-2.4.21/kernel/Makefile
93355 ===================================================================
93356 --- linux-2.4.21.orig/kernel/Makefile   2005-06-01 22:51:53.000000000 -0400
93357 +++ linux-2.4.21/kernel/Makefile        2005-06-01 23:12:54.760414632 -0400
93358 @@ -18,6 +18,10 @@
93359             signal.o sys.o kmod.o context.o \
93360             futex.o pid.o kksymoops.o
93361  
93362 +# Quadrics additions
93363 +export-objs += ptrack.o
93364 +obj-y += ptrack.o
93365 +
93366  obj-$(CONFIG_UID16) += uid16.o
93367  obj-$(CONFIG_MODULES) += ksyms.o
93368  obj-$(CONFIG_COMPAT) += compat.o
93369 Index: linux-2.4.21/kernel/ptrack.c
93370 ===================================================================
93371 --- linux-2.4.21.orig/kernel/ptrack.c   2004-02-23 16:02:56.000000000 -0500
93372 +++ linux-2.4.21/kernel/ptrack.c        2005-06-01 23:12:54.761414480 -0400
93373 @@ -0,0 +1,143 @@
93374 +/*
93375 + *    Copyright (C) 2000  Regents of the University of California
93376 + *
93377 + *    This program is free software; you can redistribute it and/or modify
93378 + *    it under the terms of the GNU General Public License as published by
93379 + *    the Free Software Foundation; either version 2 of the License, or
93380 + *    (at your option) any later version.
93381 + *
93382 + *    This program is distributed in the hope that it will be useful,
93383 + *    but WITHOUT ANY WARRANTY; without even the implied warranty of
93384 + *    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
93385 + *    GNU General Public License for more details.
93386 + *
93387 + *    You should have received a copy of the GNU General Public License
93388 + *    along with this program; if not, write to the Free Software
93389 + *    Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
93390 + *
93391 + * Derived from exit_actn.c by
93392 + *    Copyright (C) 2003 Quadrics Ltd.
93393 + */
93394 +
93395 +
93396 +#include <linux/spinlock.h>
93397 +#include <linux/sched.h>
93398 +#include <linux/ptrack.h>
93399 +#include <linux/slab.h>
93400 +#include <linux/list.h>
93401 +
93402 +int
93403 +ptrack_register (ptrack_callback_t callback, void *arg)
93404 +{
93405 +       struct ptrack_desc *desc = kmalloc (sizeof (struct ptrack_desc), GFP_KERNEL);
93406 +       
93407 +       if (desc == NULL)
93408 +               return -ENOMEM;
93409 +
93410 +       desc->callback = callback;
93411 +       desc->arg      = arg;
93412 +       
93413 +       list_add_tail (&desc->link, &current->ptrack_list);
93414 +       
93415 +       return 0;
93416 +}
93417 +
93418 +void
93419 +ptrack_deregister (ptrack_callback_t callback, void *arg)
93420 +{      
93421 +       struct list_head *el, *nel;
93422 +       
93423 +       list_for_each_safe (el, nel, &current->ptrack_list) {
93424 +               struct ptrack_desc *desc = list_entry (el, struct ptrack_desc, link);
93425 +               
93426 +               if (desc->callback == callback && desc->arg == arg) {
93427 +                       list_del (&desc->link);
93428 +                       kfree (desc);
93429 +               }
93430 +       }
93431 +}
93432 +
93433 +int
93434 +ptrack_registered (ptrack_callback_t callback, void *arg)
93435 +{
93436 +       struct list_head *el;
93437 +       
93438 +       list_for_each (el, &current->ptrack_list) {
93439 +               struct ptrack_desc *desc = list_entry (el, struct ptrack_desc, link);
93440 +               
93441 +               if (desc->callback == callback && desc->arg == arg)
93442 +                       return 1;
93443 +       }
93444 +       return 0;
93445 +}      
93446 +        
93447 +int
93448 +ptrack_call_callbacks (int phase, struct task_struct *child)
93449 +{
93450 +       struct list_head *el, *nel;
93451 +       struct ptrack_desc *new;
93452 +       int res;
93453 +
93454 +       if (phase == PTRACK_PHASE_CLONE)
93455 +               INIT_LIST_HEAD (&child->ptrack_list);
93456 +
93457 +       /* if init process, ignore */
93458 +       if (current->pid == 0)
93459 +           return 0;
93460 +       
93461 +       list_for_each_safe (el, nel, &current->ptrack_list) {
93462 +               struct ptrack_desc *desc = list_entry (el, struct ptrack_desc, link);
93463 +               
93464 +              res = desc->callback (desc->arg, phase, child);
93465 +               
93466 +               switch (phase)
93467 +               {
93468 +               case PTRACK_PHASE_EXIT:
93469 +                       list_del (&desc->link);
93470 +                       kfree (desc);
93471 +                       break;
93472 +                       
93473 +               case PTRACK_PHASE_CLONE:
93474 +                      switch (res)
93475 +                      {
93476 +                      case PTRACK_FINISHED:
93477 +                              break;
93478 +
93479 +                      case PTRACK_INNHERIT:
93480 +                              if ((new = kmalloc (sizeof (struct ptrack_desc), GFP_ATOMIC)) == NULL)
93481 +                              {
93482 +                                      /* allocation failed - notify that this process is not going
93483 +                                       * to be started by signalling clone failure.
93484 +                                       */
93485 +                                      desc->callback (desc->arg, PTRACK_PHASE_CLONE_FAIL, child);
93486 +                                      
93487 +                                      goto failed;
93488 +                              }
93489 +
93490 +                               new->callback = desc->callback;
93491 +                               new->arg      = desc->arg;
93492 +                               
93493 +                               list_add_tail (&new->link, &child->ptrack_list);
93494 +                              break;
93495 +
93496 +                      case PTRACK_DENIED:
93497 +                              goto failed;
93498 +                       }
93499 +                      break;
93500 +               }
93501 +       }
93502 +
93503 +       return 0;
93504 +
93505 + failed:
93506 +       while (! list_empty (&child->ptrack_list))
93507 +       {
93508 +              struct ptrack_desc *desc = list_entry (child->ptrack_list.next, struct ptrack_desc, link);
93509 +              
93510 +              desc->callback (desc->arg, PTRACK_PHASE_CLONE_FAIL, child);
93511 +
93512 +              list_del (&desc->link);
93513 +              kfree (desc);
93514 +       }
93515 +       return 1;
93516 +}
93517 Index: linux-2.4.21/mm/filemap.c
93518 ===================================================================
93519 --- linux-2.4.21.orig/mm/filemap.c      2005-06-01 23:12:41.100491256 -0400
93520 +++ linux-2.4.21/mm/filemap.c   2005-06-01 23:12:54.763414176 -0400
93521 @@ -22,6 +22,7 @@
93522  #include <linux/swapctl.h>
93523  #include <linux/init.h>
93524  #include <linux/mm.h>
93525 +#include <linux/coproc.h>
93526  #include <linux/mm_inline.h>
93527  #include <linux/iobuf.h>
93528  #include <linux/bootmem.h>
93529 @@ -2468,6 +2469,7 @@
93530         flush_cache_range(vma, end - size, end);
93531         if (address >= end)
93532                 BUG();
93533 +       coproc_sync_range (vma->vm_mm, address, end);
93534         do {
93535                 error |= filemap_sync_pmd_range(dir, address, end - address, vma, flags);
93536                 address = (address + PGDIR_SIZE) & PGDIR_MASK;
93537 Index: linux-2.4.21/mm/memory.c
93538 ===================================================================
93539 --- linux-2.4.21.orig/mm/memory.c       2005-06-01 22:52:04.000000000 -0400
93540 +++ linux-2.4.21/mm/memory.c    2005-06-01 23:13:59.371592240 -0400
93541 @@ -42,6 +42,7 @@
93542  #include <linux/smp_lock.h>
93543  #include <linux/swapctl.h>
93544  #include <linux/iobuf.h>
93545 +#include <linux/coproc.h>
93546  #include <linux/highmem.h>
93547  #include <linux/pagemap.h>
93548  #include <linux/module.h>
93549 @@ -632,6 +633,7 @@
93550                 BUG_ON(address >= end);
93551  
93552                 spin_lock(&mm->page_table_lock);
93553 +               coproc_invalidate_range (mm, address, end);
93554                 flush_cache_range(vma, start, end);
93555                 tlb = tlb_gather_mmu(vma);
93556  
93557 @@ -1302,6 +1304,7 @@
93558                 BUG();
93559  
93560         spin_lock(&mm->page_table_lock);
93561 +       coproc_invalidate_range (mm, beg, end);
93562         do {
93563                 pmd_t *pmd = pmd_alloc(mm, dir, address);
93564                 error = -ENOMEM;
93565 @@ -1313,6 +1316,7 @@
93566                 address = (address + PGDIR_SIZE) & PGDIR_MASK;
93567                 dir++;
93568         } while (address && (address < end));
93569 +       coproc_update_range(mm, beg, end);
93570         spin_unlock(&mm->page_table_lock);
93571         flush_tlb_range(vma, beg, end);
93572         return error;
93573 @@ -1391,6 +1395,7 @@
93574                 BUG();
93575  
93576         spin_lock(&mm->page_table_lock);
93577 +       coproc_invalidate_range(mm, beg, end);
93578         do {
93579                 pmd_t *pmd = pmd_alloc(mm, dir, from);
93580                 error = -ENOMEM;
93581 @@ -1402,6 +1407,7 @@
93582                 from = (from + PGDIR_SIZE) & PGDIR_MASK;
93583                 dir++;
93584         } while (from && (from < end));
93585 +       coproc_update_range(mm, beg, end);
93586         spin_unlock(&mm->page_table_lock);
93587         flush_tlb_range(vma, beg, end);
93588         return error;
93589 @@ -1497,8 +1503,10 @@
93590                         unlock_page(old_page);
93591                         flush_cache_page(vma, address);
93592                         entry = maybe_mkwrite(pte_mkyoung(pte_mkdirty(pte)), vma);
93593 +                       coproc_invalidate_page(vma, address);
93594                         establish_pte(vma, address, page_table, entry);
93595                         pte_unmap(page_table);
93596 +                       coproc_update_page(vma, address);
93597                         spin_unlock(&mm->page_table_lock);
93598                         return 1;       /* Minor fault */
93599                 }
93600 @@ -1528,6 +1536,7 @@
93601                 if (PageReserved(old_page))
93602                         ++mm->rss;
93603                 page_remove_rmap(old_page, page_table);
93604 +               coproc_invalidate_page(vma, address);
93605                 break_cow(vma, new_page, address, page_table);
93606                 pte_chain = page_add_rmap(new_page, page_table, pte_chain);
93607                 lru_cache_add(new_page);
93608 @@ -1536,6 +1545,7 @@
93609                 new_page = old_page;
93610         }
93611         pte_unmap(page_table);
93612 +       coproc_update_page(vma, address);
93613         spin_unlock(&mm->page_table_lock);
93614         if (old_page_locked)
93615                 unlock_page(old_page);
93616 @@ -1748,6 +1758,7 @@
93617         /* No need to invalidate - it was non-present before */
93618         update_mmu_cache(vma, address, pte);
93619         pte_unmap(page_table);
93620 +       coproc_update_page(vma, address);
93621         spin_unlock(&mm->page_table_lock);
93622         pte_chain_free(pte_chain);
93623         return ret;
93624 @@ -1804,6 +1815,7 @@
93625         /* No need to invalidate - it was non-present before */
93626         update_mmu_cache(vma, addr, entry);
93627         pte_unmap(page_table);
93628 +       coproc_update_page(vma, addr);
93629         spin_unlock(&mm->page_table_lock);
93630         ret = 1;        /* Minor fault */
93631         goto out;
93632 @@ -1902,6 +1914,7 @@
93633  
93634         /* no need to invalidate: a not-present page shouldn't be cached */
93635         update_mmu_cache(vma, address, entry);
93636 +       coproc_update_page(vma, address);
93637         spin_unlock(&mm->page_table_lock);
93638         pte_chain_free(pte_chain);
93639         return 2;       /* Major fault */
93640 @@ -1958,8 +1971,10 @@
93641                 entry = pte_mkdirty(entry);
93642         }
93643         entry = pte_mkyoung(entry);
93644 +       coproc_invalidate_page(vma, address);
93645         establish_pte(vma, address, pte, entry);
93646         pte_unmap(pte);
93647 +       coproc_update_page(vma, address);
93648         spin_unlock(&mm->page_table_lock);
93649         return 1;
93650  }
93651 Index: linux-2.4.21/mm/mmap.c
93652 ===================================================================
93653 --- linux-2.4.21.orig/mm/mmap.c 2005-06-01 22:51:50.000000000 -0400
93654 +++ linux-2.4.21/mm/mmap.c      2005-06-01 23:12:54.767413568 -0400
93655 @@ -30,6 +30,7 @@
93656  #include <linux/init.h>
93657  #include <linux/file.h>
93658  #include <linux/fs.h>
93659 +#include <linux/coproc.h>
93660  #include <linux/personality.h>
93661  #include <linux/compiler.h>
93662  #include <linux/profile.h>
93663 @@ -1450,6 +1451,7 @@
93664         release_segments(mm);
93665   
93666         spin_lock(&mm->page_table_lock);
93667 +       coproc_release(mm);
93668         mpnt = mm->mmap;
93669         mm->mmap = mm->mmap_cache = NULL;
93670         mm->mm_rb = RB_ROOT;
93671 Index: linux-2.4.21/mm/mprotect.c
93672 ===================================================================
93673 --- linux-2.4.21.orig/mm/mprotect.c     2005-06-01 22:51:50.000000000 -0400
93674 +++ linux-2.4.21/mm/mprotect.c  2005-06-01 23:12:54.767413568 -0400
93675 @@ -24,6 +24,7 @@
93676  #include <linux/smp_lock.h>
93677  #include <linux/shm.h>
93678  #include <linux/mman.h>
93679 +#include <linux/coproc.h>
93680  #include <linux/highmem.h>
93681  #include <linux/hugetlb.h>
93682  
93683 @@ -106,6 +107,7 @@
93684         if (start >= end)
93685                 BUG();
93686         spin_lock(&current->mm->page_table_lock);
93687 +       coproc_change_protection (current->mm, start, end, newprot);
93688         do {
93689                 change_pmd_range(vma, dir, start, end - start, newprot);
93690                 start = (start + PGDIR_SIZE) & PGDIR_MASK;
93691 Index: linux-2.4.21/mm/mremap.c
93692 ===================================================================
93693 --- linux-2.4.21.orig/mm/mremap.c       2005-06-01 22:51:50.000000000 -0400
93694 +++ linux-2.4.21/mm/mremap.c    2005-06-01 23:12:54.768413416 -0400
93695 @@ -26,6 +26,7 @@
93696  #include <linux/shm.h>
93697  #include <linux/mman.h>
93698  #include <linux/swap.h>
93699 +#include <linux/coproc.h>
93700  #include <linux/highmem.h>
93701  #include <linux/hugetlb.h>
93702  
93703 @@ -160,7 +161,10 @@
93704         unsigned long new_addr, unsigned long old_addr, unsigned long len)
93705  {
93706         unsigned long offset = len;
93707 +       struct mm_struct *mm = vma->vm_mm;
93708  
93709 +       coproc_invalidate_range(mm, old_addr, old_addr+len);
93710 +       coproc_invalidate_range(mm, new_addr, new_addr+len);
93711         flush_cache_range(vma, old_addr, old_addr + len);
93712  
93713         /*
93714 Index: linux-2.4.21/mm/rmap.c
93715 ===================================================================
93716 --- linux-2.4.21.orig/mm/rmap.c 2005-06-01 22:51:50.000000000 -0400
93717 +++ linux-2.4.21/mm/rmap.c      2005-06-01 23:12:54.768413416 -0400
93718 @@ -26,6 +26,7 @@
93719  #include <linux/slab.h>
93720  #include <linux/init.h>
93721  #include <linux/cache.h>
93722 +#include <linux/coproc.h>
93723  
93724  #include <asm/pgalloc.h>
93725  #include <asm/rmap.h>
93726 @@ -449,6 +450,7 @@
93727         }
93728  
93729         /* Nuke the page table entry. */
93730 +       coproc_invalidate_page(vma, address);
93731         pte = vm_ptep_get_and_clear(vma, address, ptep);
93732         flush_tlb_page(vma, address);
93733         flush_cache_page(vma, address);