Whamcloud - gitweb
land b1_5 onto HEAD
[fs/lustre-release.git] / lustre / kernel_patches / patches / qsnet-rhel4-2.6.patch
1 diff -urN clean/arch/i386/defconfig linux-2.6.9/arch/i386/defconfig
2 --- clean/arch/i386/defconfig   2004-10-18 17:54:38.000000000 -0400
3 +++ linux-2.6.9/arch/i386/defconfig     2005-10-10 17:47:17.000000000 -0400
4 @@ -119,6 +119,8 @@
5  CONFIG_IRQBALANCE=y
6  CONFIG_HAVE_DEC_LOCK=y
7  # CONFIG_REGPARM is not set
8 +CONFIG_IOPROC=y
9 +CONFIG_PTRACK=y
10  
11  #
12  # Power management options (ACPI, APM)
13 diff -urN clean/arch/i386/Kconfig linux-2.6.9/arch/i386/Kconfig
14 --- clean/arch/i386/Kconfig     2005-05-13 13:39:03.000000000 -0400
15 +++ linux-2.6.9/arch/i386/Kconfig       2005-10-10 17:47:17.000000000 -0400
16 @@ -946,6 +946,9 @@
17           support.  As of this writing the exact hardware interface is
18           strongly in flux, so no good recommendation can be made.
19  
20 +source "mm/Kconfig"
21 +source "kernel/Kconfig"
22 +       
23  endmenu
24  
25  
26 diff -urN clean/arch/ia64/defconfig linux-2.6.9/arch/ia64/defconfig
27 --- clean/arch/ia64/defconfig   2004-10-18 17:53:12.000000000 -0400
28 +++ linux-2.6.9/arch/ia64/defconfig     2005-10-10 17:47:17.000000000 -0400
29 @@ -83,6 +83,8 @@
30  CONFIG_COMPAT=y
31  CONFIG_PERFMON=y
32  CONFIG_IA64_PALINFO=y
33 +CONFIG_IOPROC=y
34 +CONFIG_PTRACK=y
35  
36  #
37  # Firmware Drivers
38 diff -urN clean/arch/ia64/Kconfig linux-2.6.9/arch/ia64/Kconfig
39 --- clean/arch/ia64/Kconfig     2005-05-13 13:39:00.000000000 -0400
40 +++ linux-2.6.9/arch/ia64/Kconfig       2005-10-10 17:47:17.000000000 -0400
41 @@ -299,6 +299,9 @@
42           To use this option, you have to ensure that the "/proc file system
43           support" (CONFIG_PROC_FS) is enabled, too.
44  
45 +source "mm/Kconfig"
46 +source "kernel/Kconfig"
47 +
48  source "drivers/firmware/Kconfig"
49  
50  source "fs/Kconfig.binfmt"
51 diff -urN clean/arch/x86_64/defconfig linux-2.6.9/arch/x86_64/defconfig
52 --- clean/arch/x86_64/defconfig 2004-10-18 17:54:39.000000000 -0400
53 +++ linux-2.6.9/arch/x86_64/defconfig   2005-10-10 17:47:17.000000000 -0400
54 @@ -87,6 +87,8 @@
55  CONFIG_GART_IOMMU=y
56  CONFIG_SWIOTLB=y
57  CONFIG_X86_MCE=y
58 +CONFIG_IOPROC=y
59 +CONFIG_PTRACK=y
60  
61  #
62  # Power management options
63 diff -urN clean/arch/x86_64/Kconfig linux-2.6.9/arch/x86_64/Kconfig
64 --- clean/arch/x86_64/Kconfig   2005-05-13 13:39:03.000000000 -0400
65 +++ linux-2.6.9/arch/x86_64/Kconfig     2005-10-10 17:47:17.000000000 -0400
66 @@ -327,6 +327,9 @@
67            machine check error logs. See
68            ftp://ftp.x86-64.org/pub/linux/tools/mcelog
69  
70 +source "mm/Kconfig"
71 +source "kernel/Kconfig"
72 +
73  endmenu
74  
75  
76 diff -urN clean/Documentation/vm/ioproc.txt linux-2.6.9/Documentation/vm/ioproc.txt
77 --- clean/Documentation/vm/ioproc.txt   1969-12-31 19:00:00.000000000 -0500
78 +++ linux-2.6.9/Documentation/vm/ioproc.txt     2005-10-10 17:47:17.000000000 -0400
79 @@ -0,0 +1,468 @@
80 +Linux IOPROC patch overview
81 +===========================
82 +
83 +The network interface for an HPC network differs significantly from
84 +network interfaces for traditional IP networks. HPC networks tend to
85 +be used directly from user processes and perform large RDMA transfers
86 +between theses processes address space. They also have a requirement
87 +for low latency communication, and typically achieve this by OS bypass
88 +techniques.  This then requires a different model to traditional
89 +interconnects, in that a process may need to expose a large amount of
90 +it's address space to the network RDMA.
91 +
92 +Locking down of memory has been a common mechanism for performing
93 +this, together with a pin-down cache implemented in user
94 +libraries. The disadvantage of this method is that large portions of
95 +the physical memory can be locked down for a single process, even if
96 +it's working set changes over the different phases of it's
97 +execution. This leads to inefficient memory utilisation - akin to the
98 +disadvantage of swapping compared to paging.
99 +
100 +This model also has problems where memory is being dynamically
101 +allocated and freed, since the pin down cache is unaware that memory
102 +may have been released by a call to munmap() and so it will still be
103 +locking down the now unused pages.
104 +
105 +Some modern HPC network interfaces implement their own MMU and are
106 +able to handle a translation fault during a network access. The
107 +Quadrics (http://www.quadrics.com) devices (Elan3 and Elan4) have done
108 +this for some time and we expect others to follow the same route in
109 +the relatively near future. These NICs are able to operate in an
110 +environment where paging occurs and do not require memory to be locked
111 +down. The advantage of this is that the user process can expose large
112 +portions of it's address space without having to worry about physical
113 +memory constraints.
114 +
115 +However should the operating system decide to swap a page to disk,
116 +then the NIC must be made aware that it should no longer read/write
117 +from this memory, but should generate a translation fault instead.
118 +
119 +The ioproc patch has been developed to provide a mechanism whereby the
120 +device driver for a NIC can be aware of when a user process's address
121 +translations change, either by paging or by explicitly mapping or
122 +unmapping memory.
123 +
124 +The patch involves inserting callbacks where translations are being
125 +invalidated to notify the NIC that the memory behind those
126 +translations is no longer visible to the application (and so should
127 +not be visible to the NIC). This callback is then responsible for
128 +ensuring that the NIC will not access the physical memory that was
129 +being mapped.
130 +
131 +An ioproc invalidate callback in the kswapd code could be utilised to
132 +prevent memory from being paged out if the NIC is unable to support
133 +network page faulting.
134 +
135 +For NICs which support network page faulting, there is no requirement
136 +for a user level pin down cache, since they are able to page-in their
137 +translations on the first communication using a buffer. However this
138 +is likely to be inefficient, resulting in slow first use of the
139 +buffer. If the communication buffers were continually allocated and
140 +freed using mmap based malloc() calls then this would lead to all
141 +communications being slower than desirable.
142 +
143 +To optimise these warm-up cases the ioproc patch adds calls to
144 +ioproc_update wherever the kernel is creating translations for a user
145 +process. These then allows the device driver to preload translations
146 +so that they are already present for the first network communication
147 +from a buffer.
148 +
149 +Linux 2.6 IOPROC implementation details
150 +=======================================
151 +
152 +The Linux IOPROC patch adds hooks to the Linux VM code whenever page
153 +table entries are being created and/or invalidated. IOPROC device
154 +drivers can register their interest in being informed of such changes
155 +by registering an ioproc_ops structure which is defined as follows;
156 +
157 +extern int ioproc_register_ops(struct mm_struct *mm, struct ioproc_ops *ip);
158 +extern int ioproc_unregister_ops(struct mm_struct *mm, struct ioproc_ops *ip);
159 +
160 +typedef struct ioproc_ops {
161 +       struct ioproc_ops *next;
162 +       void *arg;
163 +
164 +       void (*release)(void *arg, struct mm_struct *mm);
165 +       void (*sync_range)(void *arg, struct vm_area_struct *vma, unsigned long start, unsigned long end);
166 +       void (*invalidate_range)(void *arg, struct vm_area_struct *vma, unsigned long start, unsigned long end);
167 +       void (*update_range)(void *arg, struct vm_area_struct *vma, unsigned long start, unsigned long end);
168 +
169 +       void (*change_protection)(void *arg, struct vm_area_struct *vma, unsigned long start, unsigned long end, pgprot_t newprot);
170 +
171 +       void (*sync_page)(void *arg, struct vm_area_struct *vma, unsigned long address);
172 +       void (*invalidate_page)(void *arg, struct vm_area_struct *vma, unsigned long address);
173 +       void (*update_page)(void *arg, struct vm_area_struct *vma, unsigned long address);
174 +
175 +} ioproc_ops_t;
176 +
177 +ioproc_register_ops
178 +===================
179 +This function should be called by the IOPROC device driver to register
180 +its interest in PTE changes for the process associated with the passed
181 +in mm_struct.
182 +
183 +The ioproc registration is not inherited across fork() and should be
184 +called once for each process that IOPROC is interested in.
185 +
186 +This function must be called whilst holding the mm->page_table_lock.
187 +
188 +ioproc_unregister_ops
189 +=====================
190 +This function should be called by the IOPROC device driver when it no
191 +longer requires informing of PTE changes in the process associated
192 +with the supplied mm_struct.
193 +
194 +This function is not normally needed to be called as the ioproc_ops
195 +struct is unlinked from the associated mm_struct during the
196 +ioproc_release() call.
197 +
198 +This function must be called whilst holding the mm->page_table_lock.
199 +
200 +ioproc_ops struct
201 +=================
202 +A linked list ioproc_ops structures is hung off the user process
203 +mm_struct (linux/sched.h). At each hook point in the patched kernel
204 +the ioproc patch will call the associated ioproc_ops callback function
205 +pointer in turn for each registered structure.
206 +
207 +The intention of the callbacks is to allow the IOPROC device driver to
208 +inspect the new or modified PTE entry via the Linux kernel
209 +(e.g. find_pte_map()). These callbacks should not modify the Linux
210 +kernel VM state or PTE entries.
211 +
212 +The ioproc_ops callback function pointers are defined as follows;
213 +
214 +ioproc_release
215 +==============
216 +The release hook is called when a program exits and all its vma areas
217 +are torn down and unmapped. i.e. during exit_mmap(). Before each
218 +release hook is called the ioproc_ops structure is unlinked from the
219 +mm_struct.
220 +
221 +No locks are required as the process has the only reference to the mm
222 +at this point.
223 +
224 +ioproc_sync_[range|page]
225 +========================
226 +The sync hooks are called when a memory map is synchronised with its
227 +disk image i.e. when the msync() syscall is invoked. Any future read
228 +or write by the IOPROC device to the associated pages should cause the
229 +page to be marked as referenced or modified.
230 +
231 +Called holding the mm->page_table_lock
232 +
233 +ioproc_invalidate_[range|page]
234 +==============================
235 +The invalidate hooks are called whenever a valid PTE is unloaded
236 +e.g. when a page is unmapped by the user or paged out by the
237 +kernel. After this call the IOPROC must not access the physical memory
238 +again unless a new translation is loaded.
239 +
240 +Called holding the mm->page_table_lock
241 +
242 +ioproc_update_[range|page]
243 +==========================
244 +The update hooks are called whenever a valid PTE is loaded
245 +e.g. mmaping memory, moving the brk up, when breaking COW or faulting
246 +in an anonymous page of memory. These give the IOPROC device the
247 +opportunity to load translations speculatively, which can improve
248 +performance by avoiding device translation faults.
249 +
250 +Called holding the mm->page_table_lock
251 +
252 +ioproc_change_protection
253 +========================
254 +This hook is called when the protection on a region of memory is
255 +changed i.e. when the mprotect() syscall is invoked.
256 +
257 +The IOPROC must not be able to write to a read-only page, so if the
258 +permissions are downgraded then it must honour them. If they are
259 +upgraded it can treat this in the same way as the
260 +ioproc_update_[range|page]() calls
261 +
262 +Called holding the mm->page_table_lock
263 +
264 +
265 +Linux 2.6 IOPROC patch details
266 +==============================
267 +
268 +Here are the specific details of each ioproc hook added to the Linux
269 +2.6 VM system and the reasons for doing so;
270 +
271 +++++ FILE
272 +       mm/fremap.c
273 +
274 +==== FUNCTION
275 +       zap_pte
276 +
277 +CALLED FROM
278 +       install_page
279 +       install_file_pte
280 +
281 +PTE MODIFICATION
282 +       ptep_clear_flush
283 +
284 +ADDED HOOKS
285 +       ioproc_invalidate_page
286 +
287 +==== FUNCTION
288 +       install_page
289 +
290 +CALLED FROM
291 +       filemap_populate, shmem_populate
292 +
293 +PTE MODIFICATION
294 +       set_pte
295 +
296 +ADDED HOOKS
297 +       ioproc_update_page
298 +
299 +==== FUNCTION
300 +       install_file_pte
301 +
302 +CALLED FROM
303 +       filemap_populate, shmem_populate
304 +
305 +PTE MODIFICATION
306 +       set_pte
307 +
308 +ADDED HOOKS
309 +       ioproc_update_page
310 +
311 +
312 +++++ FILE
313 +       mm/memory.c
314 +
315 +==== FUNCTION
316 +       zap_page_range
317 +
318 +CALLED FROM
319 +       read_zero_pagealigned, madvise_dontneed, unmap_mapping_range,
320 +       unmap_mapping_range_list, do_mmap_pgoff
321 +
322 +PTE MODIFICATION
323 +       set_pte (unmap_vmas)
324 +
325 +ADDED HOOKS
326 +       ioproc_invalidate_range
327 +
328 +
329 +==== FUNCTION
330 +       zeromap_page_range
331 +
332 +CALLED FROM
333 +       read_zero_pagealigned, mmap_zero
334 +
335 +PTE MODIFICATION
336 +       set_pte (zeromap_pte_range)
337 +
338 +ADDED HOOKS
339 +       ioproc_invalidate_range
340 +       ioproc_update_range
341 +
342 +
343 +==== FUNCTION
344 +       remap_page_range
345 +
346 +CALLED FROM
347 +       many device drivers
348 +
349 +PTE MODIFICATION
350 +       set_pte (remap_pte_range)
351 +
352 +ADDED HOOKS
353 +       ioproc_invalidate_range
354 +       ioproc_update_range
355 +
356 +
357 +==== FUNCTION
358 +       break_cow
359 +
360 +CALLED FROM
361 +       do_wp_page
362 +
363 +PTE MODIFICATION
364 +       ptep_establish
365 +
366 +ADDED HOOKS
367 +       ioproc_invalidate_page
368 +       ioproc_update_page
369 +
370 +
371 +==== FUNCTION
372 +       do_wp_page
373 +
374 +CALLED FROM
375 +       do_swap_page, handle_pte_fault
376 +
377 +PTE MODIFICATION
378 +       ptep_set_access_flags
379 +
380 +ADDED HOOKS
381 +       ioproc_update_page
382 +
383 +
384 +==== FUNCTION
385 +       do_swap_page
386 +
387 +CALLED FROM
388 +       handle_pte_fault
389 +
390 +PTE MODIFICATION
391 +       set_pte
392 +
393 +ADDED HOOKS
394 +       ioproc_update_page
395 +
396 +
397 +==== FUNCTION
398 +       do_anonymous_page
399 +
400 +CALLED FROM
401 +       do_no_page
402 +
403 +PTE MODIFICATION
404 +       set_pte
405 +
406 +ADDED HOOKS
407 +       ioproc_update_page
408 +
409 +
410 +==== FUNCTION
411 +       do_no_page
412 +
413 +CALLED FROM
414 +       do_file_page, handle_pte_fault
415 +
416 +PTE MODIFICATION
417 +       set_pte
418 +
419 +ADDED HOOKS
420 +       ioproc_update_page
421 +
422 +
423 +++++ FILE
424 +       mm/mmap.c
425 +
426 +==== FUNCTION
427 +       unmap_region
428 +
429 +CALLED FROM
430 +       do_munmap
431 +
432 +PTE MODIFICATION
433 +       set_pte (unmap_vmas)
434 +
435 +ADDED HOOKS
436 +       ioproc_invalidate_range
437 +
438 +
439 +==== FUNCTION
440 +       exit_mmap
441 +
442 +CALLED FROM
443 +       mmput
444 +
445 +PTE MODIFICATION
446 +       set_pte (unmap_vmas)
447 +
448 +ADDED HOOKS
449 +       ioproc_release
450 +
451 +
452 +++++ FILE
453 +       mm/mprotect.c
454 +
455 +==== FUNCTION
456 +       change_protection
457 +
458 +CALLED FROM
459 +       mprotect_fixup
460 +
461 +PTE MODIFICATION
462 +       set_pte (change_pte_range)
463 +
464 +ADDED HOOKS
465 +       ioproc_change_protection
466 +
467 +
468 +++++ FILE
469 +       mm/mremap.c
470 +
471 +==== FUNCTION
472 +       move_page_tables
473 +
474 +CALLED FROM
475 +       move_vma
476 +
477 +PTE MODIFICATION
478 +       ptep_clear_flush (move_one_page)
479 +
480 +ADDED HOOKS
481 +       ioproc_invalidate_range
482 +       ioproc_invalidate_range
483 +
484 +
485 +++++ FILE
486 +       mm/rmap.c
487 +
488 +==== FUNCTION
489 +       try_to_unmap_one
490 +
491 +CALLED FROM
492 +       try_to_unmap_anon, try_to_unmap_file
493 +
494 +PTE MODIFICATION
495 +       ptep_clear_flush
496 +
497 +ADDED HOOKS
498 +       ioproc_invalidate_page
499 +
500 +
501 +==== FUNCTION
502 +       try_to_unmap_cluster
503 +
504 +CALLED FROM
505 +       try_to_unmap_file
506 +
507 +PTE MODIFICATION
508 +       ptep_clear_flush
509 +
510 +ADDED HOOKS
511 +       ioproc_invalidate_page
512 +
513 +
514 +
515 +++++ FILE 
516 +       mm/msync.c
517 +
518 +==== FUNCTION
519 +       filemap_sync
520 +
521 +CALLED FROM
522 +       msync_interval
523 +
524 +PTE MODIFICATION
525 +       ptep_clear_flush_dirty (filemap_sync_pte)
526 +
527 +ADDED HOOKS
528 +       ioproc_sync_range
529 +
530 +
531 +++++ FILE
532 +       mm/hugetlb.c
533 +
534 +==== FUNCTION
535 +       zap_hugepage_range
536 +
537 +CALLED FROM
538 +       hugetlb_vmtruncate_list
539 +
540 +PTE MODIFICATION
541 +       ptep_get_and_clear (unmap_hugepage_range)
542 +
543 +ADDED HOOK
544 +       ioproc_invalidate_range
545 +
546 +
547 +-- Last update DavidAddison - 17 Aug 2004
548 diff -urN clean/drivers/net/qsnet/eip/eip_linux.c linux-2.6.9/drivers/net/qsnet/eip/eip_linux.c
549 --- clean/drivers/net/qsnet/eip/eip_linux.c     1969-12-31 19:00:00.000000000 -0500
550 +++ linux-2.6.9/drivers/net/qsnet/eip/eip_linux.c       2005-09-07 10:34:58.000000000 -0400
551 @@ -0,0 +1,1575 @@
552 +/*
553 + *    Copyright (c) 2003 by Quadrics Ltd.
554 + * 
555 + *    For licensing information please see the supplied COPYING file
556 + *
557 + */
558 +
559 +#ident "@(#)$Id: eip_linux.c,v 1.96.2.3 2005/09/07 14:34:58 mike Exp $"
560 +
561 +#include <qsnet/kernel.h>
562 +#include <qsnet/debug.h>
563 +
564 +#include <qsnet/module.h>
565 +#include <linux/init.h>
566 +#include <linux/list.h>
567 +#include <linux/netdevice.h>
568 +#include <linux/etherdevice.h>
569 +#include <linux/skbuff.h>
570 +#include <linux/kernel.h>
571 +#include <linux/proc_fs.h>
572 +#include <linux/time.h>
573 +#include <linux/version.h>
574 +
575 +#include <asm/uaccess.h>
576 +#include <asm/unaligned.h>
577 +
578 +#undef ASSERT
579 +#include <net/sock.h>
580 +#include <net/ip.h>
581 +
582 +
583 +
584 +#include <elan/epcomms.h>
585 +#include <elan/epsvc.h>
586 +
587 +#include "eip_linux.h"
588 +#include "eip_stats.h"
589 +
590 +#ifdef UNUSED
591 +static void eip_skb_display(struct sk_buff *);
592 +#endif
593 +static void eip_iph_display(struct iphdr *);
594 +#ifdef UNUSED
595 +static void eip_eiph_display(EIP_HEADER *);
596 +static void eip_packet_display(unsigned char *);
597 +#endif
598 +static void eip_tmd_display(EIP_TMD *);
599 +static void eip_tmd_head_display(EIP_TMD_HEAD *);
600 +static void eip_rmd_display(EIP_RMD *);
601 +static void eip_rmd_head_display(EIP_RMD_HEAD *);
602 +
603 +static void eip_rmd_reclaim(EIP_RMD *);
604 +
605 +static inline EP_NMH *eip_dma_reserve(int, int);
606 +static inline void __eip_tmd_load(EIP_TMD *, EP_RAILMASK *);
607 +static inline void __eip_tmd_unload(EIP_TMD *);
608 +static inline unsigned long eip_buff_alloc(int, int);
609 +static inline void eip_buff_free(unsigned long, int);
610 +static struct iphdr *eip_ipfrag_get(char *);
611 +static inline void eip_rmd_free(EIP_RMD *);
612 +static inline void eip_skb_load(EIP_RMD *);
613 +static inline void eip_skb_unload(EIP_RMD *);
614 +static inline void eip_rmd_requeue(EIP_RMD *);
615 +static EIP_RMD *eip_rmd_alloc(int, int);
616 +static int eip_rmd_alloc_replace(EIP_RMD *, int, int);
617 +static int eip_rmd_alloc_queue(int, int, int, int);
618 +static int eip_rmds_alloc(void);
619 +static void eip_rxhandler(EP_RXD *);
620 +static void eip_rx_tasklet(unsigned long);
621 +static inline void eip_tmd_init(EIP_TMD *, unsigned long, EIP_TMD_HEAD *, unsigned long, int);
622 +static inline EIP_TMD *eip_tmd_get(int);
623 +static inline void eip_tmd_put(EIP_TMD *);
624 +static inline void eip_tmd_load(EIP_TMD *);
625 +static inline void eip_tmd_unload(EIP_TMD *);
626 +static inline EIP_TMD *eip_tmd_alloc_queue(EIP_TMD *, EIP_TMD_HEAD *, int);
627 +static inline EIP_TMD *eip_tmd_alloc_queue_copybreak(EIP_TMD_HEAD *, int);
628 +static inline EIP_TMD *eip_tmd_alloc_queue_aggreg(EIP_TMD_HEAD *, int);
629 +static int eip_tmds_alloc(void);
630 +int eip_hard_start_xmit(struct sk_buff *, struct net_device *);
631 +static inline int eip_do_xmit(EIP_TMD *, EP_NMD *i, EP_PAYLOAD *);
632 +static void eip_txhandler(EP_TXD *, void *, EP_STATUS);
633 +static void eip_tx_tasklet(unsigned long);
634 +void eip_stop_queue(void);
635 +void eip_start_queue(void);
636 +static int eip_open(struct net_device *);
637 +static int eip_close(struct net_device *);
638 +static struct net_device_stats *eip_get_stats(struct net_device *);
639 +static int eip_change_mtu(struct net_device *, int);
640 +
641 +static int eip_rx_dropping = 0;
642 +static int eip_rx_tasklet_locked = 1;
643 +
644 +/* Global */
645 +struct timer_list eip_rx_tasklet_timer;
646 +       
647 +EIP_RX *eip_rx = NULL;
648 +EIP_TX *eip_tx = NULL;
649 +int  eip_checksum_state=CHECKSUM_NONE;
650 +
651 +int tmd_max = EIP_TMD_MAX_NR;
652 +int rmd_max = EIP_RMD_MAX_NR;
653 +int rx_envelope_nr = EIP_RX_ENVELOPE_NR;
654 +int rx_granularity = EIP_RX_GRANULARITY;
655 +int tx_copybreak_max = EIP_TX_COPYBREAK_MAX;
656 +EP_RAILMASK tx_railmask = EP_RAILMASK_ALL;
657 +int eipdebug = 0;
658 +
659 +#ifdef UNUSED
660 +static void eip_skb_display(struct sk_buff *skb)
661 +{
662 +       if (skb) {
663 +               __EIP_DBG_PRINTF("SKB [%p] : len %d truesize %d  proto %x pkt type %x cloned %d users %d summed %d\n", 
664 +                       skb, skb->len, skb->truesize, skb->protocol, skb->pkt_type, skb->cloned, atomic_read(&skb->users), skb->ip_summed);
665 +               __EIP_DBG_PRINTF("SKB [%p] : skb_shinfo dataref %d nr_frags %d frag_list[%p] (device %p)\n", skb,
666 +                        atomic_read(&skb_shinfo(skb)->dataref), skb_shinfo(skb)->nr_frags, skb_shinfo(skb)->frag_list, skb->dev);
667 +               __EIP_DBG_PRINTF("SKB [%p] : head[%p] data[%p] tail [%p] end [%p] data_len [%d]\n", skb, skb->head, skb->data, 
668 +                               skb->tail, skb->end, skb->data_len);
669 +               __EIP_DBG_PRINTF("SKB [%p] : Transport Layer h.(th, uh, icmph, raw)[%p]\n", skb, skb->h.th);
670 +               __EIP_DBG_PRINTF("SKB [%p] : Network Layer      nh.(iph, arph, raw)[%p]\n", skb, skb->nh.iph);
671 +               __EIP_DBG_PRINTF("SKB [%p] : Link Layer         mac.(ethernet, raw)[%p]\n", skb, skb->mac.ethernet);
672 +               return;
673 +       }
674 +       EIP_ERR_PRINTF("SKB IS NULL - NO SKB TO DISPLAY\n");
675 +}
676 +#endif
677 +static void eip_iph_display(struct iphdr *iph)
678 +{
679 +       if (iph) {
680 +               __EIP_DBG_PRINTF("IPH [%p] : version %d header len %d TOS 0x%x Total len %d\n", 
681 +                       iph, iph->version, iph->ihl, htons(iph->tos), htons(iph->tot_len));
682 +               __EIP_DBG_PRINTF("IPH [%p] : id %d frag flags 0x%x offset %d\n",
683 +                               iph, htons(iph->id), (iph->frag_off & htons(IP_CE | IP_DF | IP_MF)) >> 4, 
684 +                               (htons(iph->frag_off) << 3) & IP_OFFSET);
685 +               __EIP_DBG_PRINTF("IPH [%p] : TTL %d proto %d header checksum 0x%x\n", iph, iph->ttl, iph->protocol, iph->check);
686 +               __EIP_DBG_PRINTF("IPH [%p] : IP src %u.%u.%u.%u dest %u.%u.%u.%u\n", iph, 
687 +                                ((unsigned char *)&(iph->saddr))[0],((unsigned char *)&(iph->saddr))[1], ((unsigned char *)&(iph->saddr))[2],((unsigned char *)&(iph->saddr))[3],
688 +                                ((unsigned char *)&(iph->daddr))[0],((unsigned char *)&(iph->daddr))[1], ((unsigned char *)&(iph->daddr))[2],((unsigned char *)&(iph->daddr))[3]);
689 +               return;
690 +       }
691 +       EIP_ERR_PRINTF("IPH IS NULL - NO IPH TO DISPLAY\n");
692 +}
693 +#ifdef UNUSED
694 +static void eip_eiph_display(EIP_HEADER * eiph)
695 +{
696 +       if (eiph) {
697 +               __EIP_DBG_PRINTF("EIPH [%p] : dhost %04x.%04x.%04x sap %x\n", eiph, eiph->h_dhost.ip_bcast, eiph->h_dhost.ip_inst, 
698 +                               eiph->h_dhost.ip_addr, eiph->h_sap);
699 +               __EIP_DBG_PRINTF("EIPH [%p] : shost %04x.%04x.%04x \n", eiph, eiph->h_shost.ip_bcast, eiph->h_shost.ip_inst,
700 +                                eiph->h_shost.ip_addr);
701 +               return;
702 +       }
703 +       EIP_ERR_PRINTF("EIPH IS NULL - NO EIPH TO DISPLAY\n");
704 +}
705 +static void eip_packet_display(unsigned char *data)
706 +{
707 +       eip_eiph_display((EIP_HEADER *) data);
708 +       eip_iph_display((struct iphdr *) (data + EIP_HEADER_PAD + ETH_HLEN));
709 +}
710 +#endif
711 +static void eip_tmd_display(EIP_TMD * tmd)
712 +{
713 +       if (tmd) {
714 +               __EIP_DBG_PRINTF("\t\tTMD [%p] : next[%p] skb[%p] DVMA[%d]\n", tmd, tmd->chain.next, tmd->skb, tmd->dvma_idx);
715 +               if (tmd->dma_base)
716 +                       __EIP_DBG_PRINTF("TMD [%p] : head[%p] *data 0x%lx\n", tmd, tmd->head, *((unsigned long *) tmd->dma_base));
717 +               else
718 +                       __EIP_DBG_PRINTF("TMD [%p] : head[%p] NO DATA !!!\n", tmd, tmd->head);
719 +               __EIP_DBG_PRINTF("TMD [%p] : DMA(%lx,%d,%d) ebase[%x]\n",tmd,  tmd->dma_base, tmd->dma_len, tmd->nmd.nmd_len,
720 +                                tmd->nmd.nmd_addr);
721 +               return;
722 +       }
723 +       EIP_ERR_PRINTF("TMD IS NULL - NO TMD TO DISPLAY\n");
724 +       
725 +}
726 +static void eip_ipf_display(EIP_IPFRAG * ipf)
727 +{
728 +       if (ipf) {
729 +               __EIP_DBG_PRINTF("IPF[%p] : datagram len %d dma correction %d uts %lx frag_nr %d\n", ipf, ipf->datagram_len,
730 +                               ipf->dma_correction, ipf->timestamp.tv_usec, ipf->frag_nr);
731 +               eip_tmd_display((EIP_TMD *) ipf);
732 +               return;
733 +       }
734 +       EIP_ERR_PRINTF("IPF IS NULL - NO IPF TO DISPLAY\n");
735 +}
736 +
737 +static void eip_tmd_head_display(EIP_TMD_HEAD * head)
738 +{
739 +       if (head) {
740 +               __EIP_DBG_PRINTF("TMD HEAD [%p] : handle[%p] tmds[%p] %3.3d/%3.3d/%3.3d\n", head, head->handle, head->tmd, 
741 +                       EIP_STAT_QUEUED_GET(&head->stats), EIP_STAT_ALLOC_GET(&head->stats),
742 +                       eip_tx->tmd_max_nr);
743 +               return;
744 +       }
745 +       EIP_ERR_PRINTF("TMD HEAD IS NULL - NO TMD HEAD TO DISPLAY\n");
746 +}
747 +static void eip_rmd_display(EIP_RMD * rmd)
748 +{
749 +       if (rmd) {
750 +               __EIP_DBG_PRINTF("RMD [%p] : next[%p] rxd[%p] DVMA[%d]\n", rmd, rmd->chain.next, rmd->rxd, rmd->dvma_idx);
751 +               __EIP_DBG_PRINTF("RMD [%p] : head[%p]\n", rmd, rmd->head); 
752 +               __EIP_DBG_PRINTF("RMD [%p] : ebase[%x]\n", rmd,  rmd->nmd.nmd_addr); 
753 +               return;
754 +       }
755 +       EIP_ERR_PRINTF("RMD IS NULL - NO RMD TO DISPLAY\n");
756 +}
757 +static void eip_rmd_head_display(EIP_RMD_HEAD * head)
758 +{
759 +       if (head) {
760 +               __EIP_DBG_PRINTF("RMD HEAD [%p] : rcvr[%p] handle[%p] busy list[%p]\n", head, head->rcvr, head->handle, head->busy_list);
761 +               __EIP_DBG_PRINTF("RMD HEAD [%p] : %3.3d/%3.3d/%3.3d\n", head, 
762 +                               EIP_STAT_QUEUED_GET(&head->stats), EIP_STAT_ALLOC_GET(&head->stats), eip_rx->rmd_max_nr);
763 +               return;
764 +       }
765 +       EIP_ERR_PRINTF("RMD HEAD IS NULL - NO RMD HEAD TO DISPLAY\n");
766 +}
767 +
768 +/* END  - DISPLAY FUNCTIONS */
769 +static inline EP_NMH *eip_dma_reserve(int pages_nr, int perm)
770 +{
771 +       EP_NMH *handle = ep_dvma_reserve(eip_tx->ep_system, pages_nr, perm);
772 +       
773 +       if (handle)
774 +               EIP_DBG_PRINTF(EIP_DBG_EP_DVMA, "HANDLE [%p] %d pages of elan address space reserved\n", 
775 +                               handle, pages_nr);
776 +       else
777 +               EIP_ERR_PRINTF("cannot reserve %d page(s) of elan address space\n", pages_nr);
778 +
779 +       return handle;
780 +}
781 +
782 +static inline void __eip_tmd_load(EIP_TMD * tmd, EP_RAILMASK *rmask)
783 +{
784 +       EIP_ASSERT(tmd->nmd.nmd_len > 0);
785 +       
786 +       ep_dvma_load(eip_tx->ep_system, NULL, (caddr_t) tmd->dma_base, tmd->nmd.nmd_len, tmd->head->handle,
787 +                       tmd->dvma_idx, rmask, &tmd->nmd);
788 +}
789 +
790 +static inline void __eip_tmd_unload(EIP_TMD * tmd)
791 +{
792 +       EIP_ASSERT(tmd->nmd.nmd_addr && tmd->head->handle);
793 +       
794 +       ep_dvma_unload(eip_tx->ep_system, tmd->head->handle, &tmd->nmd);
795 +       tmd->nmd.nmd_addr = 0;
796 +}
797 +static inline unsigned long eip_buff_alloc(int buff_len, int gfp)
798 +{
799 +       unsigned long buff_base = (buff_len < PAGE_SIZE) ? 
800 +                               (unsigned long) kmalloc(buff_len, gfp) :
801 +                               __get_dma_pages(gfp, get_order(buff_len));
802 +       
803 +       if (likely(buff_base))
804 +               return buff_base;
805 +
806 +       EIP_ERR_PRINTF("cannot allocate %db of memory\n", buff_len);
807 +       return 0;
808 +}
809 +static inline void eip_buff_free(unsigned long buff_base, int buff_len)
810 +{
811 +       (buff_len < PAGE_SIZE) ?  kfree((void *) buff_base) :
812 +               free_pages(buff_base, get_order(buff_len));
813 +}
814 +static struct iphdr *eip_ipfrag_get(char *data)
815 +{
816 +       struct ethhdr *eh = (struct ethhdr *) (data);
817 +       struct iphdr *iph;
818 +
819 +       if (eh->h_proto == htons(ETH_P_IP)) {
820 +               iph = (struct iphdr *) ((char *) eh + ETH_HLEN);
821 +
822 +               /* EIP_DBG(eip_iph_display(iph)); */
823 +
824 +               if ((iph->frag_off & htons(IP_MF | IP_OFFSET)))
825 +                       return iph;
826 +       }
827 +       return NULL;
828 +}
829 +
830 +static inline void eip_rmd_free(EIP_RMD * rmd)
831 +{
832 +       EIP_ASSERT2(rmd->nmd.nmd_addr == 0, eip_rmd_display, rmd);
833 +       
834 +       if ( rmd->skb != NULL) 
835 +               kfree_skb (rmd->skb);
836 +       
837 +       kfree(rmd);
838 +
839 +       EIP_DBG_PRINTF(EIP_DBG_MEMFREE, "RMD [%p] : FREED\n", rmd);
840 +}
841 +static inline void eip_skb_load(EIP_RMD * rmd)
842 +{
843 +       EP_RAILMASK rmask = rmd->rxd ? ep_rxd_railmask (rmd->rxd) : 0;
844 +
845 +       EIP_ASSERT(skb_tailroom(rmd->skb) > 0);
846 +
847 +       ep_dvma_load(eip_tx->ep_system, NULL, (caddr_t) rmd->skb->data, skb_tailroom(rmd->skb), rmd->head->handle,
848 +                    rmd->dvma_idx, &rmask, &rmd->nmd);
849 +       
850 +       EIP_DBG_PRINTF(EIP_DBG_RMD_EP_DVMA, "RMD [%p] : LOADED\n", rmd);
851 +}
852 +static inline void eip_skb_unload(EIP_RMD * rmd)
853 +{
854 +       EIP_ASSERT(rmd->nmd.nmd_addr && rmd->head->handle);
855 +       
856 +       ep_dvma_unload(eip_tx->ep_system, rmd->head->handle, &rmd->nmd);
857 +       rmd->nmd.nmd_addr = 0;
858 +       
859 +       EIP_DBG_PRINTF(EIP_DBG_RMD_EP_DVMA, "RMD [%p] : UNLOADED\n", rmd);
860 +}
861 +static inline void eip_rmd_requeue(EIP_RMD * rmd)
862 +{
863 +       EIP_ASSERT(rmd->rxd);
864 +
865 +       rmd->chain.next    = NULL;
866 +
867 +       ep_requeue_receive(rmd->rxd, eip_rxhandler, rmd, &rmd->nmd, EP_NO_ALLOC|EP_NO_SLEEP );
868 +
869 +       atomic_inc(&rmd->head->stats);
870 +       
871 +       EIP_DBG_PRINTF(EIP_DBG_RMD_QUEUE, "RMD [%p] : REQUEUED\n", rmd);
872 +}
873 +static EIP_RMD * eip_rmd_alloc(int svc, int gfp)
874 +{
875 +       int buff_len = EIP_SVC_SMALLEST_LEN << svc;
876 +       EIP_RMD *rmd;
877 +       struct sk_buff *skb;
878 +
879 +       if (!(skb = alloc_skb((buff_len -  EIP_EXTRA), gfp)))
880 +               return NULL;
881 +       
882 +       skb_reserve(skb, 2);
883 +
884 +       if (!(rmd = (EIP_RMD *) kmalloc(buff_len, gfp))) {
885 +               kfree_skb(skb);
886 +               return NULL;
887 +       }
888 +
889 +       rmd->skb = skb;
890 +
891 +       rmd->chain.next = NULL;
892 +       rmd->rxd = NULL;
893 +       rmd->head = &eip_rx->head[svc];
894 +
895 +       return rmd;
896 +}
897 +
898 +static int eip_rmd_alloc_replace(EIP_RMD *rmd, int svc, int gfp) 
899 +{
900 +       struct sk_buff *skb,*old;
901 +       int buff_len = EIP_SVC_SMALLEST_LEN << svc;
902 +
903 +       if (!(skb = alloc_skb(buff_len, gfp)))
904 +               return 1;
905 +       
906 +       skb_reserve(skb, 2);
907 +
908 +       eip_skb_unload(rmd);
909 +
910 +       old      = rmd->skb;
911 +       rmd->skb = skb;
912 +
913 +       eip_skb_load(rmd);
914 +
915 +       eip_rmd_requeue(rmd);
916 +
917 +       kfree_skb(old);
918 +
919 +       return 0;
920 +}
921 +
922 +static int eip_rmd_alloc_queue(int svc, int dvma_idx, int gfp, int attr)
923 +{
924 +       EIP_RMD * rmd = eip_rmd_alloc(svc, gfp);
925 +
926 +       if (!rmd)
927 +               return 1;
928 +
929 +       EIP_STAT_ALLOC_ADD(&rmd->head->stats, 1);
930 +
931 +       rmd->dvma_idx = dvma_idx;
932 +       eip_skb_load(rmd);
933 +
934 +       EIP_DBG2(EIP_DBG_RMD, eip_rmd_display, rmd, "RMD [%p] : ALLOCATED for SVC 0x%x\n", rmd, svc);
935 +
936 +       if (ep_queue_receive(rmd->head->rcvr, eip_rxhandler, (void *) rmd, &rmd->nmd, attr) == ESUCCESS) {
937 +               atomic_inc(&rmd->head->stats);
938 +               EIP_DBG_PRINTF(EIP_DBG_RMD_QUEUE, "RMD [%p] : QUEUED on SVC 0x%x\n", rmd, svc);
939 +               return 0;
940 +       }
941 +       
942 +       EIP_ERR_PRINTF("RMD [%p] : couldn't be QUEUED on SVC 0x%x\n", rmd, svc);
943 +
944 +       EIP_STAT_ALLOC_SUB(&rmd->head->stats, 1);
945 +
946 +       eip_skb_unload(rmd);
947 +       eip_rmd_free(rmd);
948 +
949 +       return 1;
950 +}
951 +
952 +static int eip_rmds_alloc(void)
953 +{
954 +       int idx, svc;
955 +
956 +       eip_rx->irq_list = NULL;
957 +       eip_rx->irq_list_nr = 0;
958 +
959 +       for (svc = 0; svc < EIP_SVC_NR; svc++) {
960 +               eip_rx->head[svc].rcvr = ep_alloc_rcvr(eip_tx->ep_system, EIP_SVC_EP(svc), rx_envelope_nr);
961 +               if (!eip_rx->head[svc].rcvr) {
962 +                       EIP_ERR_PRINTF("Cannot install receiver for SVC 0x%x - maybe cable is disconnected\n", svc);
963 +                       return -EAGAIN;
964 +               }
965 +
966 +               eip_rx->head[svc].handle =
967 +                   eip_dma_reserve(EIP_DVMA_PAGES((EIP_SVC_SMALLEST_LEN << svc)) * eip_rx->rmd_max_nr,
968 +                                   EP_PERM_WRITE);
969 +               if (!eip_rx->head[svc].handle)
970 +                       return -ENOMEM;
971 +               
972 +               EIP_DBG(EIP_DBG_RMD_HEAD, eip_rmd_head_display, &eip_rx->head[svc]);
973 +
974 +               for (idx = 0; idx < EIP_RMD_NR; idx++) {
975 +                       if (eip_rmd_alloc_queue(svc, idx * EIP_DVMA_PAGES((EIP_SVC_SMALLEST_LEN << svc)), 
976 +                                               GFP_KERNEL, EP_NO_SLEEP))
977 +                               return -ENOMEM;
978 +               }
979 +       }
980 +       return 0;
981 +}
982 +static void eip_rmds_free(void)
983 +{
984 +       unsigned long flags;
985 +       EIP_RMD *rmd;
986 +       int svc; 
987 +       
988 +       spin_lock_irqsave(&eip_rx->lock, flags);
989 +       rmd = eip_rx->irq_list;
990 +       eip_rx->irq_list = NULL;
991 +       eip_rx->irq_list_nr = 0;
992 +       spin_unlock_irqrestore(&eip_rx->lock, flags);
993 +
994 +       eip_rmd_reclaim(rmd);
995 +       
996 +       for (svc = 0; svc < EIP_SVC_NR ; svc++) {
997 +               
998 +               while ((rmd = eip_rx->head[svc].busy_list)) {
999 +                       eip_rx->head[svc].busy_list = NULL;
1000 +                       eip_rmd_reclaim(rmd);
1001 +                       if (eip_rx->head[svc].busy_list) {
1002 +                               EIP_DBG_PRINTF(EIP_DBG_RMD_QUEUE, "Still RMD [%p] on BUSY list SVC 0x%d - Scheduling\n", rmd, svc);     
1003 +                               schedule();
1004 +                       }
1005 +               }
1006 +
1007 +               EIP_ASSERT(EIP_STAT_QUEUED_GET(&eip_rx->head[svc].stats) == EIP_STAT_ALLOC_GET(&eip_rx->head[svc].stats));
1008 +               
1009 +               EIP_DBG_PRINTF(EIP_DBG_GEN, "HEAD[%p] : FREEING RCVR [%p]\n", &eip_rx->head[svc],
1010 +                               eip_rx->head[svc].rcvr);
1011 +               
1012 +               ep_free_rcvr(eip_rx->head[svc].rcvr);
1013 +
1014 +               EIP_DBG_PRINTF(EIP_DBG_EP_DVMA, "HEAD[%p] : RELEASING DVMA [%p]\n", &eip_rx->head[svc], 
1015 +                               eip_rx->head[svc].handle);
1016 +
1017 +               ep_dvma_release(eip_tx->ep_system, eip_rx->head[svc].handle);
1018 +       }
1019 +
1020 +}
1021 +static int eip_rx_queues_low (void) {
1022 +       int svc;
1023 +       for (svc = 0; svc < EIP_SVC_NR; svc++) 
1024 +               if (EIP_STAT_QUEUED_GET(&eip_rx->head[svc].stats)  < EIP_RMD_ALLOC_THRESH) 
1025 +                       return (1);
1026 +       return (0);
1027 +}
1028 +static void eip_rxhandler(EP_RXD * rxd)
1029 +{
1030 +       EIP_RMD *rmd            = (EIP_RMD *) ep_rxd_arg(rxd);
1031 +       EP_STATUS ret           = ep_rxd_status(rxd);
1032 +       EP_PAYLOAD * payload    = ep_rxd_payload(rxd);
1033 +       unsigned long data      = (unsigned long) rmd->skb->data; 
1034 +       int frag_nr             = 0;
1035 +       int len;
1036 +
1037 +       struct sk_buff *skb;
1038 +       static char count = 0;
1039 +
1040 +       atomic_dec(&rmd->head->stats);
1041 +       rmd->rxd = rxd;
1042 +
1043 +       if (likely(ret == EP_SUCCESS)) {
1044 +
1045 +               rmd->head->dma++;
1046 +
1047 +               if ( eip_rx_dropping) {
1048 +                   eip_rmd_requeue(rmd);
1049 +                   return;
1050 +               }
1051 +
1052 +               len = (payload) ? payload->Data[frag_nr++] : ep_rxd_len(rxd);
1053 +
1054 +               EIP_DBG(EIP_DBG_RMD, eip_rmd_display, rmd);
1055 +
1056 +again:
1057 +               if ( (skb = skb_clone(rmd->skb, GFP_ATOMIC)) ) {
1058 +                       unsigned int off = (data - (unsigned long) rmd->skb->data);
1059 +
1060 +                       /* have to set the length before calling
1061 +                        * skb pull as it will not allow you to
1062 +                        * pull past the end */
1063 +
1064 +                       skb_put (skb, off + len);
1065 +                       skb_pull (skb, off);
1066 +
1067 +                       skb->protocol = eth_type_trans(skb, eip_rx->net_device);
1068 +                       skb->ip_summed = eip_checksum_state;
1069 +                       skb->dev = eip_rx->net_device;
1070 +
1071 +                       /* Fabien/David/Mike this is a hack/fix to allow aggrigation of packets to work.
1072 +                        * The problem is ip_frag looks at the truesize to see if it is caching too much space.
1073 +                        * As we are reusing a large skb (cloned) for a number of small fragments, they appear to take up alot of space.
1074 +                        * so ip_frag dropped them after 4 frags (not good). So we lie and set the truesize to just bigger than the data. 
1075 +                        */
1076 +                       if (payload) 
1077 +                               skb->truesize = SKB_DATA_ALIGN(skb->len + EIP_HEADER_PAD) +sizeof(struct sk_buff);
1078 +
1079 +               }
1080 +               if ( (skb) && 
1081 +                    (netif_rx(skb) != NET_RX_DROP)){
1082 +
1083 +                       eip_rx->bytes += len;
1084 +                       
1085 +                       if (payload && payload->Data[frag_nr] ) {
1086 +                               data += EIP_IP_ALIGN(len);
1087 +                               len   = payload->Data[frag_nr++];
1088 +                               goto again;
1089 +                       }
1090 +                       eip_rx->packets += ++frag_nr;
1091 +               } else if ( (eip_rx->dropped++ % 20) == 0)
1092 +                               __EIP_DBG_PRINTK("Packet dropped by the TCP/IP stack - increase /proc/sys/net/core/netdev_max_backlog\n");
1093 +       } else if (ret == EP_SHUTDOWN ) {
1094 +               EIP_DBG2(EIP_DBG_RMD, eip_rmd_display, rmd, "ABORTING\n");
1095 +                ep_complete_receive(rxd);
1096 +                eip_skb_unload(rmd);
1097 +               EIP_STAT_ALLOC_SUB(&rmd->head->stats, 1);
1098 +                eip_rmd_free(rmd);
1099 +               return;
1100 +       } else {
1101 +               EP_ENVELOPE *env = ep_rxd_envelope(rxd);
1102 +               EP_NMD *nmd ;
1103 +               
1104 +               EIP_ERR_PRINTF("RMD[%p] : RECEIVE ret = %d\n", rmd, ret);
1105 +
1106 +               for (len = 0 ; len < env->nFrags ; len++) {
1107 +                       nmd = &env->Frags[len];
1108 +                       EIP_ERR_PRINTF("RMD[%p] : ep_frag #%d nmd_addr [%x] nmd_len %d\n", rmd, len, 
1109 +                                       (unsigned int) nmd->nmd_addr, nmd->nmd_len);
1110 +               }
1111 +               eip_rx->errors++;
1112 +               EIP_ASSERT2(atomic_read(&skb_shinfo(rmd->skb)->dataref) == 1, eip_rmd_display, rmd);
1113 +       }
1114 +
1115 +       /* data is used to store the irq flags */
1116 +       spin_lock_irqsave(&eip_rx->lock, data);
1117 +       rmd->chain.next = eip_rx->irq_list;
1118 +       eip_rx->irq_list = rmd;
1119 +       eip_rx->irq_list_nr++;
1120 +       spin_unlock_irqrestore(&eip_rx->lock, data);
1121 +
1122 +       if (((count++ % eip_rx->sysctl_granularity) == 0) /* and either we have passed up a number of them */
1123 +           || eip_rx_queues_low())                       /* or we are low                                 */
1124 +               tasklet_schedule(&eip_rx->tasklet);
1125 +       else
1126 +       {
1127 +               if ( !timer_pending (&eip_rx_tasklet_timer)  )    /* the timer not already set  */
1128 +                       mod_timer (&eip_rx_tasklet_timer, lbolt);
1129 +       }
1130 +}
1131 +
1132 +/* dest ; if the buffer still reference on it mocve the rmd to the dest list */
1133 +static void eip_rmd_reclaim(EIP_RMD *rmd) 
1134 +{
1135 +       EIP_RMD *rmd_next = rmd;
1136 +       int dataref;
1137 +
1138 +       while (rmd_next) {
1139 +               rmd = rmd_next;
1140 +               rmd_next = rmd_next->chain.next;
1141 +
1142 +               dataref = atomic_read(&skb_shinfo(rmd->skb)->dataref);
1143 +               EIP_ASSERT(dataref > 0);
1144 +               
1145 +               if (dataref == 1) {
1146 +                       eip_rmd_requeue(rmd);
1147 +               } else {
1148 +                       rmd->chain.next = rmd->head->busy_list;
1149 +                       rmd->head->busy_list = rmd;
1150 +               }
1151 +       }
1152 +}
1153 +static void eip_rx_tasklet(unsigned long arg)
1154 +{
1155 +       EIP_RMD *rmd, *rmd_next;
1156 +       unsigned long flags;
1157 +       short svc, queued;
1158 +       int   needs_reschedule;
1159 +
1160 +       if (eip_rx_tasklet_locked) /* we dont want the tasklet to do anything when we are finishing */
1161 +           return;
1162 +
1163 +       for (svc = 0; svc < EIP_SVC_NR; svc++) {
1164 +               rmd = eip_rx->head[svc].busy_list;
1165 +               eip_rx->head[svc].busy_list = NULL;
1166 +               eip_rmd_reclaim(rmd);
1167 +       }
1168 +
1169 +       spin_lock_irqsave(&eip_rx->lock, flags);
1170 +       rmd = eip_rx->irq_list;
1171 +       eip_rx->irq_list = NULL;
1172 +       eip_rx->irq_list_nr = 0;
1173 +       spin_unlock_irqrestore(&eip_rx->lock, flags);
1174 +       
1175 +       eip_rmd_reclaim(rmd);
1176 +
1177 +       needs_reschedule = 0;
1178 +
1179 +       for (svc = 0; svc < EIP_SVC_NR; svc++) {
1180 +               /* the plan is : allocate some more if possible or steall some dvma space from those on the EIP_BUSY_LIST */
1181 +               queued = EIP_STAT_QUEUED_GET(&eip_rx->head[svc].stats);
1182 +
1183 +               EIP_ASSERT(queued >= 0 && queued <= EIP_RMD_MAX_NR);    
1184 +               
1185 +               if (queued < EIP_RMD_ALLOC_THRESH) {
1186 +                       short allocated = EIP_STAT_ALLOC_GET(&eip_rx->head[svc].stats);
1187 +                       short how_many; 
1188 +
1189 +                       EIP_ASSERT(allocated >= 0 && allocated <= EIP_RMD_MAX_NR);
1190 +                       
1191 +                       if (likely(allocated < eip_rx->rmd_max_nr)) {
1192 +
1193 +                               how_many = (((allocated / EIP_RMD_ALLOC_STEP) + 1) * EIP_RMD_ALLOC_STEP);
1194 +                               if (how_many > eip_rx->rmd_max_nr)
1195 +                                       how_many = eip_rx->rmd_max_nr;
1196 +
1197 +                               for (; allocated < how_many &&  
1198 +                                                       (eip_rmd_alloc_queue(svc, allocated * EIP_DVMA_PAGES((EIP_SVC_SMALLEST_LEN << svc)), 
1199 +                                                                             GFP_ATOMIC, EP_NO_ALLOC|EP_NO_SLEEP) == 0) ; allocated++);
1200 +                               if ( allocated != how_many ) {
1201 +                                       eip_rx->reschedule++;
1202 +                                       needs_reschedule = 1;
1203 +                               }
1204 +                       } else {
1205 +                               /* steal how_many rmds and put them on the aside list */
1206 +                               how_many = EIP_RMD_ALLOC_THRESH - queued;
1207 +
1208 +                               EIP_ASSERT(how_many >= 0 && how_many <= EIP_RMD_ALLOC_THRESH);
1209 +                               
1210 +                               rmd_next = eip_rx->head[svc].busy_list;
1211 +                               eip_rx->head[svc].busy_list = NULL;
1212 +
1213 +                               while (how_many-- && rmd_next) {
1214 +                                       rmd = rmd_next;
1215 +                                       rmd_next = rmd_next->chain.next;
1216 +
1217 +                                       if (eip_rmd_alloc_replace(rmd, svc, GFP_ATOMIC)) {
1218 +                                               rmd_next = rmd;
1219 +                                               break;
1220 +                                       }
1221 +                               }
1222 +                               eip_rx->head[svc].busy_list = rmd_next;
1223 +                               if ( how_many )
1224 +                                       needs_reschedule = 1;
1225 +                       }
1226 +               }
1227 +       }
1228 +       
1229 +       if (needs_reschedule) 
1230 +       {
1231 +               if ( !timer_pending (&eip_rx_tasklet_timer)) 
1232 +                       mod_timer (&eip_rx_tasklet_timer, lbolt);
1233 +       }
1234 +}
1235 +static void eip_rx_tasklet_resched(unsigned long arg)
1236 +{
1237 +       tasklet_schedule(&eip_rx->tasklet);     
1238 +}
1239 +
1240 +static inline void eip_tmd_init(EIP_TMD * tmd, unsigned long buff_base, EIP_TMD_HEAD * head, unsigned long buff_len,
1241 +                               int dvma_idx)
1242 +{
1243 +       tmd->dvma_idx = dvma_idx;
1244 +       tmd->dma_base = buff_base;
1245 +       tmd->dma_len = -1;
1246 +       tmd->skb = NULL;
1247 +       tmd->head = head;
1248 +       tmd->chain.next = NULL;
1249 +
1250 +       if (tmd->head != &eip_tx->head[EIP_TMD_STD]) {
1251 +               tmd->nmd.nmd_len = buff_len;
1252 +               eip_tmd_load(tmd);
1253 +       } else  {
1254 +               tmd->nmd.nmd_len  = -1;
1255 +               tmd->nmd.nmd_addr = 0;
1256 +       }
1257 +}
1258 +
1259 +static inline EIP_TMD *eip_tmd_get(int id)
1260 +{
1261 +       unsigned long flags;
1262 +       EIP_TMD *tmd = NULL;
1263 +       spin_lock_irqsave(&eip_tx->lock, flags);
1264 +       while ((tmd = eip_tx->head[id].tmd) == NULL) {
1265 +               spin_unlock_irqrestore(&eip_tx->lock, flags);
1266 +               if (ep_enable_txcallbacks(eip_tx->xmtr) == 0) {
1267 +
1268 +                       spin_lock_irqsave (&eip_tx->lock, flags);
1269 +                       if (eip_tx->head[id].tmd == NULL) {
1270 +                               __EIP_DBG_PRINTF("Cannot get a TMD on head %d ... stopping queue\n", id);
1271 +                               
1272 +                               eip_stop_queue ();
1273 +                               
1274 +                               spin_unlock_irqrestore (&eip_tx->lock, flags);
1275 +
1276 +                               return NULL;
1277 +                       }
1278 +                       spin_unlock_irqrestore (&eip_tx->lock, flags);
1279 +               }
1280 +
1281 +               ep_disable_txcallbacks(eip_tx->xmtr);
1282 +               spin_lock_irqsave(&eip_tx->lock, flags);
1283 +       }
1284 +       eip_tx->head[id].tmd = tmd->chain.next;
1285 +       spin_unlock_irqrestore(&eip_tx->lock, flags);
1286 +       atomic_dec(&tmd->head->stats);
1287 +       return tmd;
1288 +}
1289 +
1290 +static inline void eip_tmd_put(EIP_TMD * tmd)
1291 +{
1292 +       unsigned long flags;
1293 +
1294 +       tmd->skb = NULL;
1295 +
1296 +       spin_lock_irqsave(&eip_tx->lock, flags);
1297 +       tmd->chain.next = tmd->head->tmd;
1298 +       tmd->head->tmd = tmd;
1299 +       spin_unlock_irqrestore(&eip_tx->lock, flags);
1300 +       atomic_inc(&tmd->head->stats);
1301 +
1302 +       eip_start_queue();
1303 +
1304 +       EIP_DBG_PRINTF(EIP_DBG_TMD_QUEUE, "TMD [%p] : REQUEUED\n", tmd);
1305 +}
1306 +static inline void eip_tmd_load(EIP_TMD * tmd)
1307 +{
1308 +       EP_RAILMASK rmask = tx_railmask;
1309 +
1310 +       __eip_tmd_load(tmd, &rmask);
1311 +       
1312 +       EIP_DBG_PRINTF(EIP_DBG_EP_DVMA, "TMD [%p] : LOADED\n", tmd);
1313 +}
1314 +static inline void eip_tmd_unload(EIP_TMD * tmd)
1315 +{
1316 +       __eip_tmd_unload(tmd);
1317 +       
1318 +       EIP_DBG_PRINTF(EIP_DBG_EP_DVMA, "TMD [%p] : UNLOADED\n", tmd);
1319 +}
1320 +static inline void eip_tmd_free(EIP_TMD * tmd)
1321 +{
1322 +       eip_buff_free(tmd->dma_base, tmd->nmd.nmd_len);
1323 +       
1324 +       EIP_DBG_PRINTF(EIP_DBG_MEMFREE, "TMD [%p] : FREED\n", tmd);
1325 +       
1326 +       EIP_STAT_ALLOC_SUB(&tmd->head->stats, 1);
1327 +}
1328 +
1329 +/* tmd on a separate block */
1330 +static inline EIP_TMD *eip_tmd_alloc_queue(EIP_TMD * tmd, EIP_TMD_HEAD * head, int dvma_idx)
1331 +{
1332 +       eip_tmd_init(tmd, 0, head, -1, dvma_idx);
1333 +
1334 +       eip_tmd_put(tmd);
1335 +
1336 +       EIP_STAT_ALLOC_ADD(&tmd->head->stats, 1);
1337 +       EIP_DBG(EIP_DBG_TMD, eip_tmd_display, tmd);
1338 +       return tmd;
1339 +}
1340 +/* tmd on the buffer */
1341 +static inline EIP_TMD *eip_tmd_alloc_queue_copybreak(EIP_TMD_HEAD * head, int dvma_idx)
1342 +{
1343 +       EIP_TMD *tmd;
1344 +       unsigned long buff_base;
1345 +
1346 +       if (!(buff_base = eip_buff_alloc(tx_copybreak_max + sizeof(EIP_TMD), GFP_KERNEL)))
1347 +               return NULL;
1348 +
1349 +       tmd = (EIP_TMD *) (buff_base + tx_copybreak_max);
1350 +       eip_tmd_init(tmd, buff_base, head, tx_copybreak_max, dvma_idx);
1351 +
1352 +       eip_tmd_put(tmd);
1353 +       EIP_STAT_ALLOC_ADD(&tmd->head->stats, 1);
1354 +       EIP_DBG(EIP_DBG_TMD, eip_tmd_display, tmd);
1355 +       return tmd;
1356 +}
1357 +
1358 +/* ipf are on the buffer */
1359 +static inline EIP_TMD *eip_tmd_alloc_queue_aggreg(EIP_TMD_HEAD * head, int dvma_idx)
1360 +{
1361 +       EIP_TMD *tmd;
1362 +       unsigned long buff_base;
1363 +
1364 +       if (!(buff_base = eip_buff_alloc(EIP_SVC_BIGGEST_LEN, GFP_KERNEL)))
1365 +               return NULL;
1366 +
1367 +       tmd = (EIP_TMD *) (buff_base + EIP_SVC_BIGGEST_LEN - sizeof(EIP_IPFRAG));
1368 +       eip_tmd_init(tmd, buff_base, head, EIP_SVC_BIGGEST_LEN - sizeof(EIP_IPFRAG), dvma_idx);
1369 +
1370 +       eip_tmd_put(tmd);
1371 +       EIP_STAT_ALLOC_ADD(&tmd->head->stats, 1);
1372 +       EIP_DBG(EIP_DBG_TMD, eip_tmd_display, tmd);
1373 +       return tmd;
1374 +}
1375 +
1376 +static int eip_tmds_alloc()
1377 +{
1378 +       int i;
1379 +       int page_nr;
1380 +       EIP_TMD *tmd;
1381 +
1382 +       page_nr = EIP_DVMA_PAGES(tx_copybreak_max);
1383 +
1384 +       eip_tx->head[EIP_TMD_COPYBREAK].handle = eip_dma_reserve(page_nr * eip_tx->tmd_max_nr, EP_PERM_READ);
1385 +       
1386 +       EIP_DBG(EIP_DBG_TMD_HEAD, eip_tmd_head_display, &eip_tx->head[EIP_TMD_COPYBREAK]);
1387 +
1388 +       for (i = 0; i < EIP_TMD_NR; i++) {
1389 +               if (!eip_tmd_alloc_queue_copybreak(&eip_tx->head[EIP_TMD_COPYBREAK], i * page_nr))
1390 +                       return -ENOMEM;
1391 +       }
1392 +
1393 +       eip_tx->head[EIP_TMD_STD].handle =
1394 +           eip_dma_reserve(EIP_DVMA_PAGES(EIP_SVC_BIGGEST_LEN) * eip_tx->tmd_max_nr, EP_PERM_READ);
1395 +       
1396 +       EIP_DBG(EIP_DBG_TMD_HEAD, eip_tmd_head_display, &eip_tx->head[EIP_TMD_STD]);
1397 +
1398 +       tmd = kmalloc(sizeof(EIP_TMD) * EIP_TMD_NR, GFP_KERNEL);
1399 +       if (!tmd) {
1400 +               EIP_ERR_PRINTF("Cannot ALLOCATE %d of tmds\n", (int) sizeof(EIP_TMD) * EIP_TMD_NR);
1401 +               return -ENOMEM;
1402 +       }
1403 +       
1404 +       page_nr = EIP_DVMA_PAGES(EIP_SVC_BIGGEST_LEN);
1405 +       
1406 +       for (i = 0; i < EIP_TMD_NR; i++, tmd++) {
1407 +               if (!eip_tmd_alloc_queue(tmd, &eip_tx->head[EIP_TMD_STD], i * page_nr))
1408 +                       return -ENOMEM;
1409 +       }
1410 +
1411 +       page_nr = EIP_DVMA_PAGES(EIP_SVC_BIGGEST_LEN);
1412 +
1413 +       eip_tx->head[EIP_TMD_AGGREG].handle = eip_dma_reserve(page_nr * eip_tx->tmd_max_nr, EP_PERM_READ);
1414 +       EIP_DBG(EIP_DBG_TMD_HEAD, eip_tmd_head_display, &eip_tx->head[EIP_TMD_AGGREG]);
1415 +
1416 +       for (i = 0; i < EIP_TMD_NR; i++) {
1417 +               if (!eip_tmd_alloc_queue_aggreg(&eip_tx->head[EIP_TMD_AGGREG], i * page_nr))
1418 +                       return -ENOMEM;
1419 +       }
1420 +       return 0;
1421 +}
1422 +
1423 +static void eip_tmds_free(void) 
1424 +{
1425 +       EIP_TMD *tmd;
1426 +       EIP_TMD *tmd_next;
1427 +       int i;
1428 +       
1429 +       ep_poll_transmits(eip_tx->xmtr);
1430 +
1431 +       for (i = 0 ; i < 3 ; i++) {
1432 +again:
1433 +               if (EIP_STAT_QUEUED_GET(&eip_rx->head[i].stats) < EIP_STAT_ALLOC_GET(&eip_rx->head[i].stats)) {
1434 +                       EIP_DBG_PRINTF(EIP_DBG_TMD, "Polling XMTR [%p]\n", eip_tx->xmtr);       
1435 +                       ep_poll_transmits(eip_tx->xmtr);
1436 +                       goto again;
1437 +               }
1438 +       }
1439 +       /* everything should be queued */
1440 +        if ((tmd = eip_tx->head[EIP_TMD_COPYBREAK].tmd)) {
1441 +            do {
1442 +                       tmd_next = tmd->chain.next;
1443 +                        eip_tmd_unload(tmd);
1444 +                       
1445 +                       EIP_DBG(EIP_DBG_TMD, eip_tmd_display, tmd);
1446 +                       
1447 +                        eip_tmd_free(tmd);
1448 +            } while (tmd_next && (tmd = tmd_next));
1449 +        }
1450 +       
1451 +       EIP_DBG_PRINTF(EIP_DBG_TMD_EP_DVMA, "HEAD[EIP_TMD_COPYBREAK] release DVMA [%p]\n",
1452 +                       eip_tx->head[EIP_TMD_COPYBREAK].handle);        
1453 +       
1454 +        ep_dvma_release(eip_tx->ep_system, eip_tx->head[EIP_TMD_COPYBREAK].handle);
1455 +       
1456 +       /* these ones have been allocated as a block */
1457 +       if ((tmd = eip_tx->head[EIP_TMD_STD].tmd)) {
1458 +               do {
1459 +                       if (tmd->dvma_idx == 0 ) {
1460 +                               kfree(tmd);
1461 +                               /* eip_tmd_free(tmd); */
1462 +                               EIP_STAT_ALLOC_SUB(&tmd->head->stats, EIP_TMD_NR);
1463 +                               tmd_next = NULL;
1464 +                               EIP_DBG_PRINTF(EIP_DBG_TMD_EP_DVMA, "TMD HEAD[%p] : [EIP_TMD_STD] BLOCK FREED\n", tmd); 
1465 +                       } else 
1466 +                               tmd_next = tmd->chain.next;
1467 +               } while (tmd_next && (tmd = tmd_next));
1468 +       }
1469 +       EIP_DBG_PRINTF(EIP_DBG_TMD_EP_DVMA, "HEAD[EIP_TMD_STD] release DVMA [%p]\n", 
1470 +                       eip_tx->head[EIP_TMD_STD].handle);      
1471 +       
1472 +        ep_dvma_release(eip_tx->ep_system, eip_tx->head[EIP_TMD_STD].handle);
1473 +       
1474 +       if ((tmd = eip_tx->head[EIP_TMD_AGGREG].tmd)) {
1475 +               do {
1476 +                       tmd_next = tmd->chain.next;
1477 +
1478 +                       EIP_DBG(EIP_DBG_TMD, eip_tmd_display, tmd);
1479 +                       
1480 +                       eip_tmd_unload(tmd);
1481 +                       eip_tmd_free(tmd);
1482 +               } while (tmd_next && (tmd = tmd_next));
1483 +       }
1484 +       EIP_DBG_PRINTF(EIP_DBG_TMD_EP_DVMA, "TMD HEAD[%p] : [EIP_TMD_AGGREG] release DVMA\n", 
1485 +                       eip_tx->head[EIP_TMD_AGGREG].handle);   
1486 +       
1487 +        ep_dvma_release(eip_tx->ep_system, eip_tx->head[EIP_TMD_AGGREG].handle);
1488 +
1489 +       ep_free_xmtr(eip_tx->xmtr);
1490 +       EIP_DBG_PRINTF(EIP_DBG_TMD, "XMTR[%p] : FREED\n", eip_tx->xmtr);
1491 +}
1492 +
1493 +static inline void eip_ipf_skb_add(EIP_IPFRAG * ipf, struct sk_buff *skb)
1494 +{
1495 +       int align = EIP_IP_ALIGN(skb->len);
1496 +       
1497 +       
1498 +       if (ipf->dma_len == -1) {       /* like a virgin; touched for the very first time */
1499 +               do_gettimeofday(&ipf->timestamp);
1500 +               /* FIXE ME put that in release tmd code */
1501 +               ipf->frag_nr            = 0;
1502 +               ipf->dma_len            = 0;
1503 +               ipf->datagram_len       = -1;
1504 +               ipf->dma_correction     = 0;
1505 +       }
1506 +       
1507 +       memcpy((void *) (ipf->dma_base + ipf->dma_len), skb->data, skb->len);
1508 +       
1509 +       if (ipf->datagram_len == -1) {
1510 +               struct iphdr * iph = skb->nh.iph;
1511 +               int offset = ntohs(iph->frag_off);
1512 +
1513 +               /* last one ? ;  offset & ~IP_OFFSET = IP fragment flags */
1514 +               if (((offset & ~IP_OFFSET) & IP_MF) == 0) {
1515 +                       offset &= IP_OFFSET;
1516 +                       offset <<= 3;    
1517 +                       ipf->datagram_len = offset + htons(iph->tot_len) - sizeof(struct iphdr);
1518 +               }
1519 +       }
1520 +
1521 +       skb->next                       = ipf->skb;
1522 +       ipf->skb                        = skb;
1523 +       ipf->payload.Data[ipf->frag_nr] = skb->len;
1524 +       ipf->dma_len                   += align;
1525 +       ipf->dma_correction            += align - skb->len  + ETH_HLEN + sizeof(struct iphdr);
1526 +       /* FIXME ; Count got wrong if ip header has options */
1527 +
1528 +       ipf->frag_nr++;
1529 +
1530 +       EIP_DBG2(EIP_DBG_TMD, eip_ipf_display, ipf, "ADDED skb[%p] len %db ALIGNED(%db)\n", skb, skb->len, EIP_IP_ALIGN(skb->len));
1531 +}
1532 +
1533 +#define eip_ipf_hasroom(ipf, skb) ((ipf->dma_len + EIP_IP_ALIGN(skb->len) < eip_tx->sysctl_ipfrag_copybreak))
1534 +int eip_hard_start_xmit(struct sk_buff *skb, struct net_device *devnet) 
1535 +{
1536 +
1537 +       EIP_TMD *tmd;
1538 +       EP_NMD nmd;
1539 +       struct iphdr *iph;
1540 +       int j;
1541 +
1542 +       if (skb->destructor){
1543 +               atomic_inc(&eip_tx->destructor);
1544 +               tasklet_schedule(&eip_tx->tasklet);
1545 +       } 
1546 +
1547 +       if (!(iph = eip_ipfrag_get(skb->data)) || (eip_tx->sysctl_aggregation == 0)) { /* not ip fragment */
1548 +no_aggreg:
1549 +               j = (skb->len < eip_tx->sysctl_copybreak) ? EIP_TMD_COPYBREAK : EIP_TMD_STD; /* j = head id */
1550 +               
1551 +               if (!(tmd = eip_tmd_get(j))) {
1552 +                       if (skb->destructor)
1553 +                               atomic_dec(&eip_tx->destructor);
1554 +                       return 1;
1555 +               }
1556 +               
1557 +               tmd->dma_len    = skb->len;
1558 +               tmd->skb        = skb;
1559 +               tmd->skb->next  = NULL;
1560 +               tmd->chain.next = NULL;
1561 +               
1562 +               if (j == EIP_TMD_COPYBREAK) {
1563 +                       memcpy((void *) tmd->dma_base, skb->data, skb->len);
1564 +                       
1565 +                       ep_nmd_subset(&nmd, &tmd->nmd, 0, skb->len);
1566 +#ifdef EIP_MORE_STATS
1567 +                       eip_tx->sent_copybreak++;
1568 +#endif
1569 +                       return eip_do_xmit(tmd, &nmd, NULL);
1570 +               }
1571 +               tmd->dma_base           = (unsigned long) skb->data;
1572 +               tmd->nmd.nmd_len        = skb->len;
1573 +               eip_tmd_load(tmd);
1574 +
1575 +#ifdef EIP_MORE_STATS
1576 +               eip_tx->sent_std++;
1577 +#endif
1578 +               return eip_do_xmit(tmd, &tmd->nmd, NULL);
1579 +       } else if ( skb->len > EIP_SVC_BIGGEST_LEN/2 ) { 
1580 +               /* don't aggregate when we have a full mtu of data */
1581 +               /* or more than 32k ; in this case it is cheaper   */
1582 +               /* to just map the buffer and send it              */
1583 +               goto no_aggreg;
1584 +       } else {
1585 +               EIP_IPFRAG *ipf = NULL;
1586 +               unsigned long flags;
1587 +               struct list_head *l;
1588 +               struct iphdr *iph2;
1589 +               int i;
1590 +               __u16 id = iph->id;
1591 +               __u32 saddr = iph->saddr;
1592 +               __u32 daddr = iph->daddr;
1593 +               __u8 protocol = iph->protocol;
1594 +
1595 +                       EIP_DBG(EIP_DBG_IPH, eip_iph_display, iph);
1596 +
1597 +               j = 0;
1598 +
1599 +               /* here we can't have full mtu size aggregated packet */
1600 +               EIP_ASSERT_RET(skb->len < eip_tx->sysctl_ipfrag_copybreak, 0);
1601 +
1602 +               spin_lock_irqsave(&eip_tx->ipfraglock, flags);
1603 +               list_for_each(l, &eip_tx->ipfrag) {
1604 +                       ipf = list_entry(l, EIP_IPFRAG, list);
1605 +                       iph2 = eip_ipfrag_get((char *) ipf->dma_base);
1606 +                       
1607 +                        EIP_ASSERT(iph2);
1608 +                       
1609 +                       if ((iph2->id == id) && 
1610 +                                       (get_unaligned(&iph2->saddr) == saddr) && 
1611 +                                       (get_unaligned(&iph2->daddr) == daddr) && 
1612 +                                       (iph2->protocol == protocol)) {
1613 +                               /* || timeout */
1614 +                               if (eip_ipf_hasroom(ipf, skb)) {
1615 +                                       
1616 +                                       eip_ipf_skb_add(ipf, skb);
1617 +                                       
1618 +                                       if ((ipf->datagram_len != -1) && 
1619 +                                                       (ipf->dma_len == (ipf->datagram_len + ipf->dma_correction) || 
1620 +                                                        ipf->frag_nr == (128 / sizeof(uint32_t)))) {
1621 +send_aggreg:
1622 +                                               ipf->payload.Data[ipf->frag_nr] = 0;
1623 +                                               list_del(&ipf->list);
1624 +                                               eip_tx->ipfrag_count--;
1625 +                                               spin_unlock_irqrestore(&eip_tx->ipfraglock, flags);
1626 +                                       
1627 +                                               ep_nmd_subset(&nmd, &ipf->nmd, 0, ipf->dma_len);
1628 +                                               
1629 +#ifdef EIP_MORE_STATS
1630 +                                               eip_tx->sent_aggreg++;
1631 +#endif
1632 +                                               if ((i = eip_do_xmit((EIP_TMD *) ipf, &nmd, &ipf->payload)) != EP_SUCCESS)
1633 +                                                       return i;
1634 +                                               if (j)
1635 +                                                       goto new;
1636 +                                               return 0;
1637 +                                       }
1638 +                                       
1639 +                                       spin_unlock_irqrestore(&eip_tx->ipfraglock, flags);
1640 +                                       tasklet_schedule(&eip_tx->tasklet);
1641 +                                       return 0;
1642 +                               } else {
1643 +                                       EIP_DBG_PRINTF(EIP_DBG_TMD, "IPF[%p] : FULL %db full - sending it\n", ipf, ipf->dma_len);
1644 +                                       j = 1;
1645 +                                       goto send_aggreg;
1646 +                               }
1647 +                       }
1648 +               }
1649 +               spin_unlock_irqrestore(&eip_tx->ipfraglock, flags);
1650 +new:
1651 +               if (!(ipf = (EIP_IPFRAG *) eip_tmd_get(EIP_TMD_AGGREG)))
1652 +                       goto no_aggreg;
1653 +
1654 +               eip_ipf_skb_add(ipf, skb);
1655 +               
1656 +               spin_lock_irqsave(&eip_tx->ipfraglock, flags);
1657 +               list_add_tail(&ipf->list, &eip_tx->ipfrag);
1658 +               eip_tx->ipfrag_count++;
1659 +               spin_unlock_irqrestore(&eip_tx->ipfraglock, flags);
1660 +               tasklet_schedule(&eip_tx->tasklet);
1661 +       }
1662 +       return 0;
1663 +}
1664 +static int eip_do_xmit(EIP_TMD * tmd, EP_NMD *nmd, EP_PAYLOAD *payload)
1665 +{
1666 +       EIP_HEADER *eiph = (EIP_HEADER *) tmd->dma_base;
1667 +       int         attr = EP_SET_DATA((EP_NO_SLEEP | EP_NO_INTERRUPT | EP_NO_FAILOVER), EP_TYPE_SVC_INDICATOR, EP_SVC_EIP);
1668 +       unsigned long flags;
1669 +       int svc, rnum;
1670 +
1671 +       SIZE_TO_SVC(nmd->nmd_len, svc);
1672 +
1673 +       EIP_DBG(EIP_DBG_TMD, eip_tmd_display, tmd);
1674 +       /* EIP_DBG(eip_eiph_display(eiph)); */
1675 +       
1676 +       if (unlikely (eiph->h_dhost.ip_bcast))
1677 +               rnum = ep_pickRail (EP_NMD_RAILMASK (nmd) & tx_railmask & ep_xmtr_availrails(eip_tx->xmtr));
1678 +       else
1679 +               rnum = ep_pickRail (EP_NMD_RAILMASK (nmd) & tx_railmask & ep_xmtr_noderails(eip_tx->xmtr, ntohs(eiph->h_dhost.ip_addr)));
1680 +
1681 +       if (rnum >= 0)
1682 +               attr = EP_SET_PREFRAIL(attr, rnum);
1683 +
1684 +       /* add to inuse list  */
1685 +       spin_lock_irqsave (&eip_tx->lock, flags);
1686 +       list_add_tail (&tmd->chain.link, &eip_tx->inuse);
1687 +       spin_unlock_irqrestore (&eip_tx->lock, flags);
1688 +
1689 +       /* ENOMEM EINVAL ECONNREFUSED ESUCCESS */
1690 +       svc = (unlikely(eiph->h_dhost.ip_bcast)) ? 
1691 +               ep_multicast_message(eip_tx->xmtr, -1, -1, NULL, EIP_SVC_EP(svc), attr | EP_NOT_MYSELF, eip_txhandler, tmd, payload, nmd, 1) :
1692 +
1693 +               ep_transmit_message(eip_tx->xmtr, ntohs(eiph->h_dhost.ip_addr), EIP_SVC_EP(svc),  attr, eip_txhandler, tmd, payload, nmd, 1);
1694 +               
1695 +       if (likely(svc == EP_SUCCESS))
1696 +               return 0;
1697 +       else if (svc == ENOMEM) {
1698 +               EIP_ERR_PRINTF("%s", "Memory allocation error ...\n");
1699 +               eip_tx->errors++;
1700 +       }
1701 +       else
1702 +       {
1703 +               /* EP_EINVAL occurs when the svc has a bad value or the iovec has too many frag; */
1704 +               /* we don't use the latter option here                                        */
1705 +               __EIP_DBG_PRINTF("TMD [%p] : DROPPED skb[%p] status = %d from ep_?_message\n", tmd, tmd->skb, svc);
1706 +
1707 +               eip_tx->dropped++;
1708 +       }
1709 +
1710 +       eip_txhandler(NULL, tmd, -99);
1711 +
1712 +       /* Quadrics GNAT sw-elan/4397 - since we will "never" be able to send this packet to the */
1713 +       /* destination node, we drop it and feign success - this has the same behaviour as an    */
1714 +       /* ethernet where it sticks the packet on the wire, but no-one receives it.              */
1715 +       return 0;
1716 +}
1717 +
1718 +static void eip_txhandler(EP_TXD * txd, void *arg, EP_STATUS status)
1719 +{
1720 +       EIP_TMD *tmd = (EIP_TMD *) arg;
1721 +       struct sk_buff *skb_next;
1722 +       unsigned long flags;
1723 +       int svc = 0;
1724 +       
1725 +       if (likely(status == EP_SUCCESS)) {
1726 +               SIZE_TO_SVC(tmd->dma_len, svc);
1727 +               eip_tx->dma[svc]++;
1728 +               eip_tx->bytes += tmd->dma_len;
1729 +               
1730 +               if (tmd->head == &eip_tx->head[EIP_TMD_AGGREG]) {
1731 +                       EIP_IPFRAG *ipf = (EIP_IPFRAG *) tmd;
1732 +                       eip_tx->packets += ipf->frag_nr;
1733 +               } else
1734 +                       eip_tx->packets++;
1735 +       } else {
1736 +               if (tmd->head == &eip_tx->head[EIP_TMD_AGGREG]) {
1737 +                       EIP_IPFRAG *ipf = (EIP_IPFRAG *) tmd;
1738 +                       eip_tx->dropped += ipf->frag_nr;
1739 +                       EIP_DBG_PRINTF(EIP_DBG_TMD, "txhandler aggreg packet dropped status = %d\n", status);
1740 +               } else  {
1741 +                       eip_tx->dropped++;
1742 +                       EIP_DBG_PRINTF(EIP_DBG_TMD, "txhandler packet dropped status = %d\n", status);
1743 +               }
1744 +       }
1745 +
1746 +       if (tmd->head == &eip_tx->head[EIP_TMD_STD]) {
1747 +               eip_tmd_unload(tmd);
1748 +               tmd->dma_base = 0;
1749 +               tmd->nmd.nmd_len = -1;
1750 +       }
1751 +               
1752 +       tmd->dma_len = -1;
1753 +       
1754 +       svc = 0;
1755 +       while (tmd->skb) {
1756 +               svc++;
1757 +               
1758 +               if (tmd->skb->destructor)
1759 +                       atomic_dec(&eip_tx->destructor);
1760 +
1761 +               skb_next = tmd->skb->next;
1762 +               dev_kfree_skb_any(tmd->skb);
1763 +               tmd->skb = skb_next;
1764 +       }
1765 +       EIP_DBG_PRINTF(EIP_DBG_TMD, "IPF/TMD [%p] : %d skb RELEASE/FREED\n", tmd, svc);
1766 +
1767 +       /* remove from inuse list  */
1768 +       spin_lock_irqsave (&eip_tx->lock, flags);
1769 +       list_del (&tmd->chain.link);
1770 +       spin_unlock_irqrestore (&eip_tx->lock, flags);
1771 +
1772 +       eip_tmd_put(tmd);
1773 +}
1774 +
1775 +static void eip_tx_tasklet(unsigned long arg)
1776 +{
1777 +       struct timeval now;
1778 +       unsigned long flags;
1779 +       EIP_IPFRAG *ipf, *ipfq = NULL;
1780 +       EP_NMD nmd;
1781 +       struct list_head *list;
1782 +       struct list_head *tmp;
1783 +       char resched = 0;
1784 +       char poll = 1;
1785 +       
1786 +       do_gettimeofday(&now);
1787 +       
1788 +       spin_lock_irqsave(&eip_tx->ipfraglock, flags);
1789 +       if (eip_tx->ipfrag_count) {
1790 +               list_for_each_safe(list, tmp, &eip_tx->ipfrag) {
1791 +                       ipf = list_entry(list, EIP_IPFRAG, list);
1792 +                       /* delta = (((now.tv_sec - ipf->timestamp.tv_sec) * 1000000UL) + now.tv_usec) - ipf->timestamp.tv_usec; */
1793 +                       if (((((now.tv_sec - ipf->timestamp.tv_sec) * 1000000UL) + now.tv_usec) - 
1794 +                                       ipf->timestamp.tv_usec) >= (1000UL * eip_tx->sysctl_ipfrag_to)) {
1795 +                               list_del(&ipf->list);
1796 +                               eip_tx->ipfrag_count--;
1797 +                               ipf->chain.next = (EIP_TMD *) ipfq;
1798 +                               ipfq = ipf;
1799 +                       }
1800 +               }
1801 +       }
1802 +       if (eip_tx->ipfrag_count)
1803 +               resched = 1;
1804 +       spin_unlock_irqrestore(&eip_tx->ipfraglock, flags);
1805 +
1806 +       while (ipfq) {
1807 +               poll = 0;
1808 +
1809 +               ep_nmd_subset(&nmd, &ipfq->nmd, 0, ipfq->dma_len);
1810 +               
1811 +               ipfq->payload.Data[ipfq->frag_nr] = 0;
1812 +               
1813 +#ifdef EIP_MORE_STATS
1814 +               eip_tx->sent_aggreg++;
1815 +#endif
1816 +               ipf = (EIP_IPFRAG *) ipfq->chain.next;
1817 +               eip_do_xmit((EIP_TMD *) ipfq, &nmd, &ipfq->payload);
1818 +               ipfq = ipf;
1819 +       }
1820 +       
1821 +       if (poll)
1822 +               ep_poll_transmits(eip_tx->xmtr);
1823 +
1824 +       if (atomic_read(&eip_tx->destructor) || resched )
1825 +               tasklet_schedule(&eip_tx->tasklet);
1826 +}
1827 +void eip_start_queue()
1828 +{
1829 +       if (netif_queue_stopped(eip_tx->net_device)) {
1830 +               EIP_DBG_PRINTK(EIP_DBG_GEN, "Waking up %s queue\n", eip_tx->net_device->name);
1831 +               netif_wake_queue(eip_tx->net_device);
1832 +       }
1833 +}
1834 +void eip_stop_queue()
1835 +{
1836 +       EIP_DBG_PRINTK(EIP_DBG_GEN, "Stopping %s queue\n", eip_tx->net_device->name);
1837 +       netif_stop_queue(eip_tx->net_device);
1838 +}
1839 +
1840 +static int eip_open(struct net_device *devnet)
1841 +{
1842 +       if (devnet->flags & IFF_PROMISC)
1843 +               EIP_DBG_PRINTK(EIP_DBG_GEN, "%s entering in promiscuous mode\n", devnet->name);
1844 +
1845 +       netif_start_queue(devnet);
1846 +       EIP_DBG_PRINTK(EIP_DBG_GEN, "iface %s MAC %02x:%02x:%02x:%02x:%02x:%02x up\n",
1847 +                       devnet->name, (devnet->dev_addr[0]) & 0xff,
1848 +                       (devnet->dev_addr[1]) & 0xff, (devnet->dev_addr[2]) & 0xff, (devnet->dev_addr[3]) & 0xff,
1849 +                       (devnet->dev_addr[4]) & 0xff, (devnet->dev_addr[5]) & 0xff);
1850 +       return 0;
1851 +}
1852 +
1853 +static int eip_close(struct net_device *devnet)
1854 +{
1855 +       if (devnet->flags & IFF_PROMISC)
1856 +               EIP_DBG_PRINTK(EIP_DBG_GEN, "%s leaving promiscuous mode\n", devnet->name);
1857 +
1858 +       netif_stop_queue(devnet);
1859 +
1860 +       eip_rx_tasklet(0);
1861 +
1862 +       EIP_DBG_PRINTK(EIP_DBG_GEN, "iface %s MAC %02x:%02x:%02x:%02x:%02x:%02x down\n", 
1863 +               devnet->name, (devnet->dev_addr[0]) & 0xff,
1864 +               (devnet->dev_addr[1]) & 0xff, (devnet->dev_addr[2]) & 0xff, (devnet->dev_addr[3]) & 0xff,
1865 +               (devnet->dev_addr[4]) & 0xff, (devnet->dev_addr[5]) & 0xff);
1866 +       return 0;
1867 +}
1868 +
1869 +static struct net_device_stats *eip_get_stats(struct net_device *devnet)
1870 +{
1871 +       static struct net_device_stats stats;
1872 +
1873 +       stats.rx_packets = eip_rx->packets;
1874 +       stats.rx_bytes = eip_rx->bytes;
1875 +       stats.rx_errors = eip_rx->errors;
1876 +       stats.rx_dropped = eip_rx->dropped;
1877 +
1878 +       stats.tx_packets = eip_tx->packets;
1879 +       stats.tx_bytes = eip_tx->bytes;
1880 +       stats.tx_errors = eip_tx->errors;
1881 +       stats.tx_dropped = eip_tx->dropped;
1882 +       return &stats;
1883 +}
1884 +
1885 +static int eip_change_mtu(struct net_device *devnet, int mtu)
1886 +{
1887 +       if (mtu <= EIP_MTU_MAX) {
1888 +               EIP_DBG_PRINTK(EIP_DBG_GEN, "MTU size changed from %d to %d\n", devnet->mtu, mtu);
1889 +               devnet->mtu = mtu;
1890 +       }
1891 +       return 0;
1892 +}
1893 +
1894 +#ifdef MODULE
1895 +int eip_init(void)
1896 +{
1897 +       struct net_device *devnet;
1898 +       int errno = 0;
1899 +
1900 +       eip_rx_dropping = 0; 
1901 +       eip_rx_tasklet_locked = 1;
1902 +
1903 +       /* timer up but not started */
1904 +       init_timer (&eip_rx_tasklet_timer);
1905 +       eip_rx_tasklet_timer.function = eip_rx_tasklet_resched;
1906 +       eip_rx_tasklet_timer.data     = (unsigned long) 0;
1907 +       eip_rx_tasklet_timer.expires  = lbolt + hz;
1908 +
1909 +       devnet = alloc_etherdev(sizeof(EIP_RX) + sizeof(EIP_TX));
1910 +       if (!devnet) {
1911 +               EIP_ERR_PRINTF("Unable to ALLOCATE etherdev structure\n");
1912 +               return -ENOMEM;
1913 +       }
1914 +       strcpy (devnet->name, "eip0");
1915 +
1916 +       EIP_DBG_PRINTK(EIP_DBG_GEN, "Enabling aggregation code\n");
1917 +       devnet->change_mtu = eip_change_mtu;
1918 +       devnet->mtu = EIP_MTU_MAX;
1919 +       devnet->open = eip_open;
1920 +       devnet->stop = eip_close;
1921 +       devnet->hard_start_xmit = eip_hard_start_xmit;
1922 +       devnet->get_stats = eip_get_stats;
1923 +
1924 +        /* devnet->features |= (NETIF_F_DYNALLOC); */
1925 +        /* devnet->features = (NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_HIGHDMA); */
1926 +        /* devnet->features |= (NETIF_F_SG|NETIF_F_FRAGLIST|NETIF_F_HIGHDMA|NETIF_F_HW_CSUM); */
1927 +
1928 +       eip_rx = (EIP_RX *) devnet->priv;
1929 +       eip_tx = (EIP_TX *) (eip_rx + 1);
1930 +
1931 +       /* instance 0 */
1932 +       eip_tx->ep_system = ep_system();
1933 +       if (eip_tx->ep_system == NULL) {
1934 +               EIP_ERR_PRINTF("kernel comms for iface %s does not exist\n", devnet->name);
1935 +               errno = -ENXIO;
1936 +               goto out;
1937 +       }
1938 +       if (ep_waitfor_nodeid(eip_tx->ep_system) == ELAN_INVALID_NODE) {
1939 +               EIP_ERR_PRINTF("network position not found\n");
1940 +               errno = -EAGAIN;
1941 +               goto out;
1942 +       }
1943 +       eip_tx->xmtr = ep_alloc_xmtr(eip_tx->ep_system);
1944 +       if (!eip_tx->xmtr) {
1945 +               EIP_ERR_PRINTF("Cannot create allocated transmitter - maybe cable is disconnected\n");
1946 +               errno = -EAGAIN;
1947 +               goto out;
1948 +       }
1949 +       /* assign MAC address */
1950 +       *((int *) &devnet->dev_addr[4]) = htons(ep_nodeid(eip_tx->ep_system));
1951 +       eip_rx->net_device = devnet;
1952 +       eip_tx->net_device = devnet;
1953 +
1954 +       atomic_set(&eip_tx->destructor, 0);
1955 +
1956 +       if ((tmd_max >= EIP_TMD_MIN_NR) && (tmd_max <= EIP_TMD_MAX_NR)) {
1957 +               EIP_DBG_PRINTF(EIP_DBG_GEN, "Setting tmd_max_nr to %d\n", tmd_max);
1958 +               eip_tx->tmd_max_nr = tmd_max;
1959 +       } else {
1960 +               EIP_ERR_PRINTF("parameter error : %d <= tmd_max(%d) <= %d using default %d\n", 
1961 +                               EIP_TMD_MIN_NR, tmd_max, EIP_TMD_MAX_NR, EIP_TMD_MAX_NR);
1962 +               eip_tx->tmd_max_nr = EIP_TMD_MAX_NR;
1963 +       }
1964 +
1965 +       if ((rmd_max >= EIP_RMD_MIN_NR) && (rmd_max <= EIP_RMD_MAX_NR)) {
1966 +               EIP_DBG_PRINTF(EIP_DBG_GEN, "Setting rmd_max_nr to %d\n", rmd_max);
1967 +               eip_rx->rmd_max_nr = rmd_max;
1968 +       } else {
1969 +               EIP_ERR_PRINTF("parameter error : %d <= rmd_max(%d) <= %d using default %d\n", EIP_RMD_MIN_NR,
1970 +                          rmd_max, EIP_RMD_MAX_NR, EIP_RMD_MAX_NR);
1971 +               eip_rx->rmd_max_nr = EIP_RMD_MAX_NR;
1972 +       }
1973 +
1974 +       if ((rx_envelope_nr > 0) && (rx_envelope_nr <= 1024)) { /* > 1024 don't be silly */
1975 +               EIP_DBG_PRINTK(EIP_DBG_GEN, "Setting rx_envelope_nr to %d\n", rx_envelope_nr);
1976 +       } else {
1977 +               EIP_ERR_PRINTF("parameter error : 0 < rx_envelope_nr(%d) <= 1024 using default %d\n",
1978 +                          rx_envelope_nr, EIP_RX_ENVELOPE_NR);
1979 +               rx_envelope_nr = EIP_RX_ENVELOPE_NR;
1980 +       }
1981 +
1982 +       if (tx_copybreak_max <= EIP_TX_COPYBREAK_MAX) {
1983 +               EIP_DBG_PRINTF(EIP_DBG_GEN, "Setting tx_copybreak_max to %d\n", tx_copybreak_max);
1984 +       } else {
1985 +               EIP_ERR_PRINTF("parameter error : tx_copybreak_max > %d using default %d\n",
1986 +                          EIP_TX_COPYBREAK_MAX, EIP_TX_COPYBREAK_MAX);
1987 +               tx_copybreak_max = EIP_TX_COPYBREAK_MAX;
1988 +       }
1989 +#ifdef EIP_MORE_STATS
1990 +       eip_tx->sent_copybreak = 0;
1991 +       eip_tx->sent_std = 0;
1992 +       eip_tx->sent_aggreg = 0;
1993 +#endif
1994 +
1995 +       eip_tx->ipfrag_count = 0;
1996 +       eip_aggregation_set(1);
1997 +       eip_rx_granularity_set(rx_granularity);
1998 +       eip_tx_copybreak_set(EIP_TX_COPYBREAK);
1999 +       eip_ipfrag_to_set(EIP_IPFRAG_TO);
2000 +       eip_ipfrag_copybreak_set(EIP_IPFRAG_COPYBREAK);
2001 +
2002 +       spin_lock_init(&eip_tx->lock);
2003 +       spin_lock_init(&eip_tx->ipfraglock);
2004 +       spin_lock_init(&eip_rx->lock);
2005 +       tasklet_init(&eip_rx->tasklet, eip_rx_tasklet, 0);
2006 +       tasklet_init(&eip_tx->tasklet, eip_tx_tasklet, 0);
2007 +       INIT_LIST_HEAD(&eip_tx->ipfrag);
2008 +       INIT_LIST_HEAD(&eip_tx->inuse);
2009 +
2010 +       /* if we fail here cannot do much yet; waiting for rcvr remove code in ep. */
2011 +       errno = eip_tmds_alloc();
2012 +       if (errno)
2013 +               goto out;
2014 +
2015 +       errno = eip_rmds_alloc();
2016 +       if (errno)
2017 +               goto out;
2018 +
2019 +       errno = eip_stats_init();
2020 +       if (errno)
2021 +               goto out;
2022 +
2023 +       if (ep_svc_indicator_set(eip_tx->ep_system, EP_SVC_EIP) != EP_SUCCESS) {
2024 +               EIP_ERR_PRINTF("Cannot set the service indicator\n");
2025 +               errno = -EINVAL;
2026 +               goto out;
2027 +       }
2028 +
2029 +       eip_rx_tasklet_locked = 0;
2030 +       tasklet_schedule(&eip_rx->tasklet);
2031 +
2032 +       SET_MODULE_OWNER(eip_tx->net_device);
2033 +
2034 +       if (register_netdev(devnet)) {
2035 +               printk("eip: failed to register netdev\n");
2036 +               goto out;
2037 +       }
2038 +
2039 +       EIP_DBG_PRINTK(EIP_DBG_GEN, "iface %s MAC %02x:%02x:%02x:%02x:%02x:%02x ready\n", 
2040 +               devnet->name, (devnet->dev_addr[0]) & 0xff,
2041 +               (devnet->dev_addr[1]) & 0xff, (devnet->dev_addr[2]) & 0xff, (devnet->dev_addr[3]) & 0xff,
2042 +               (devnet->dev_addr[4]) & 0xff, (devnet->dev_addr[5]) & 0xff);
2043 +
2044 +       return 0;
2045 +      out:
2046 +       unregister_netdev(devnet);
2047 +#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 5, 25)
2048 +       kfree(devnet);
2049 +#else
2050 +       free_netdev(devnet);
2051 +#endif
2052 +
2053 +       return errno;
2054 +}
2055 +void eip_exit(void)
2056 +{
2057 +       int i;
2058 +
2059 +       eip_rx_dropping = 1;                /* means that new messages wont be sent to tcp stack */
2060 +       eip_rx_tasklet_locked = 1;
2061 +
2062 +       netif_stop_queue(eip_tx->net_device);
2063 +
2064 +       if (ep_svc_indicator_clear(eip_tx->ep_system, EP_SVC_EIP) != EP_SUCCESS) {
2065 +               EIP_ERR_PRINTF("Cannot unset the service indicator\n");
2066 +       }
2067 +
2068 +       schedule_timeout(10);
2069 +       
2070 +       del_timer_sync (&eip_rx_tasklet_timer);
2071 +
2072 +       tasklet_disable(&eip_rx->tasklet);
2073 +       tasklet_disable(&eip_tx->tasklet);
2074 +
2075 +       tasklet_kill(&eip_tx->tasklet);
2076 +       tasklet_kill(&eip_rx->tasklet);
2077 +
2078 +        eip_rmds_free();
2079 +        eip_tmds_free();
2080 +
2081 +       /* that things freed */
2082 +       for (i = 0 ; i < EIP_SVC_NR ; i++) {
2083 +               if ( EIP_STAT_ALLOC_GET(&eip_rx->head[i].stats) != 0 )
2084 +                       EIP_ERR_PRINTF("%d RMDs not FREED on SVC[%d]\n", EIP_STAT_ALLOC_GET(&eip_rx->head[i].stats), i);
2085 +       }
2086 +       for (i = 0 ; i < 3 ; i++) {
2087 +               if ( EIP_STAT_ALLOC_GET(&eip_tx->head[i].stats) != 0 )
2088 +                       EIP_ERR_PRINTF("%d TMDs not freed on TX HEAD[%d]\n", EIP_STAT_ALLOC_GET(&eip_tx->head[i].stats), i);
2089 +               
2090 +       }
2091 +       unregister_netdev(eip_tx->net_device);
2092 +       kfree(eip_tx->net_device);
2093 +       
2094 +       eip_stats_cleanup();
2095 +}
2096 +
2097 +module_init(eip_init);
2098 +module_exit(eip_exit);
2099 +
2100 +module_param(eipdebug, uint, 0);
2101 +MODULE_PARM_DESC(eipdebug, "Set debug flags");
2102 +
2103 +module_param(rx_envelope_nr, uint, 0);
2104 +MODULE_PARM_DESC(rx_enveloppe_nr, "Number of allocated enveloppe on the rx side");
2105 +
2106 +module_param(tx_copybreak_max, uint, 0);
2107 +MODULE_PARM_DESC(tx_copybreak_max, "Maximum size of the tx copybreak limit (default 512)");
2108 +
2109 +module_param(tmd_max, uint, 0);
2110 +module_param(rmd_max, uint, 0);
2111 +MODULE_PARM_DESC(tmd_max, "Maximun number of transmit buffers (default 64)");
2112 +MODULE_PARM_DESC(rmd_max, "Maximun number of receive buffers (default 64)");
2113 +
2114 +module_param(tx_railmask, ushort, 0);
2115 +MODULE_PARM_DESC(tx_railmask, "Mask of which rails transmits can be queued on");
2116 +
2117 +MODULE_AUTHOR("Quadrics Ltd.");
2118 +MODULE_DESCRIPTION("Elan IP driver");
2119 +MODULE_LICENSE("GPL");
2120 +#endif /* MODULE */
2121 +
2122 +/*
2123 + * Local variables:
2124 + * c-file-style: "linux"
2125 + * End:
2126 + */
2127 diff -urN clean/drivers/net/qsnet/eip/eip_linux.h linux-2.6.9/drivers/net/qsnet/eip/eip_linux.h
2128 --- clean/drivers/net/qsnet/eip/eip_linux.h     1969-12-31 19:00:00.000000000 -0500
2129 +++ linux-2.6.9/drivers/net/qsnet/eip/eip_linux.h       2004-10-01 06:49:29.000000000 -0400
2130 @@ -0,0 +1,399 @@
2131 +/*
2132 + *    Copyright (c) 2003 by Quadrics Ltd.
2133 + * 
2134 + *    For licensing information please see the supplied COPYING file
2135 + *
2136 + */
2137 +
2138 +#ident "$Id: eip_linux.h,v 1.47 2004/10/01 10:49:29 mike Exp $"
2139 +
2140 +#ifndef __EIP_LINUX_H
2141 +#define __EIP_LINUX_H
2142 +
2143 +#define EIP_WATERMARK                  (0xfab1e)
2144 +
2145 +#define EIP_PAGES(s)                   (((s - 1) >> PAGE_SHIFT) + 1)
2146 +#define EIP_DVMA_PAGES(s)              ((s < PAGE_SIZE) ? EIP_PAGES(s) + 1 : EIP_PAGES(s))
2147 +
2148 +#define EIP_SVC_SMALLEST_LEN           (1 << 9)        /* 512 */
2149 +#define EIP_SVC_BIGGEST_LEN            (1 << 16)       /* 64k */
2150 +
2151 +#define EIP_SVC_SMALLEST               (0)
2152 +#define EIP_SVC_BIGGEST                        (7)
2153 +
2154 +#define EIP_SVC_NR                     (8)
2155 +#define EIP_SVC_EP(s)                  (s + EP_MSG_SVC_EIP512)
2156 +
2157 +#define EIP_STAT_ALLOC_SHIFT           (8)
2158 +#define EIP_STAT_ALLOC_GET(atomicp)    ((int) atomic_read(atomicp) >> EIP_STAT_ALLOC_SHIFT)
2159 +#define EIP_STAT_ALLOC_ADD(atomicp, v) (atomic_add((v << EIP_STAT_ALLOC_SHIFT), atomicp))
2160 +#define EIP_STAT_ALLOC_SUB(atomicp, v) (atomic_sub((v << EIP_STAT_ALLOC_SHIFT), atomicp))
2161 +
2162 +#define EIP_STAT_QUEUED_MASK           (0xff)
2163 +#define EIP_STAT_QUEUED_GET(atomicp)   ((int) atomic_read(atomicp) & EIP_STAT_QUEUED_MASK)
2164 +
2165 +#define EIP_RMD_NR                     (8)
2166 +#define EIP_RMD_MIN_NR                 (8)
2167 +#define EIP_RMD_MAX_NR                 (64)    /* should be < than (1 << EIP_STAT_ALLOC_SHIFT) */
2168 +
2169 +#define EIP_RMD_ALLOC_STEP             (8)
2170 +#define EIP_RMD_ALLOC_THRESH           (16)
2171 +
2172 +#define EIP_RMD_ALLOC                  (1)
2173 +#define EIP_RMD_REPLACE                        (0)
2174 +
2175 +#define EIP_TMD_NR                     (64)
2176 +#define EIP_TMD_MIN_NR                 (16)
2177 +#define EIP_TMD_MAX_NR                 (64)    /* should be < than (1 << EIP_STAT_ALLOC_SHIFT) */
2178 +
2179 +#define EIP_TMD_TYPE_NR                        (3)
2180 +#define EIP_TMD_COPYBREAK              (0x0)
2181 +#define EIP_TMD_STD                    (0x1)
2182 +#define EIP_TMD_AGGREG                 (0x2)
2183 +
2184 +#define EIP_TX_COPYBREAK               (512)
2185 +#define EIP_TX_COPYBREAK_MAX           (1024)
2186 +
2187 +#define EIP_IPFRAG_TO                  (50)    /* time out before a frag is sent in msec */
2188 +#define EIP_IPFRAG_COPYBREAK           (EIP_SVC_BIGGEST_LEN - sizeof(EIP_IPFRAG) - EIP_HEADER_PAD)
2189 +
2190 +#define EIP_RX_ENVELOPE_NR             ((EIP_RMD_MAX_NR*EIP_SVC_NR)/2)
2191 +#define EIP_RX_GRANULARITY             (1)
2192 +
2193 +#define EIP_IP_ALIGN(X)                        (((X) + (15)) & ~(15))
2194 +#define EIP_EXTRA                      roundup (sizeof(EIP_RMD), 256)
2195 +#define EIP_RCV_DMA_LEN(s)                     (s - EIP_EXTRA - EIP_HEADER_PAD)
2196 +#define EIP_MTU_MAX                    (EIP_RCV_DMA_LEN(EIP_SVC_BIGGEST_LEN) - (ETH_HLEN))
2197 +
2198 +#define SIZE_TO_SVC(s, svc)                                                                    \
2199 +       do {                                                                                    \
2200 +                                       if (s <= EIP_RCV_DMA_LEN((1 << 9)))  {svc = 0;break;}   \
2201 +                                       if (s <= EIP_RCV_DMA_LEN((1 << 10))) {svc = 1;break;}   \
2202 +                                       if (s <= EIP_RCV_DMA_LEN((1 << 11))) {svc = 2;break;}   \
2203 +                                       if (s <= EIP_RCV_DMA_LEN((1 << 12))) {svc = 3;break;}   \
2204 +                                       if (s <= EIP_RCV_DMA_LEN((1 << 13))) {svc = 4;break;}   \
2205 +                                       if (s <= EIP_RCV_DMA_LEN((1 << 14))) {svc = 5;break;}   \
2206 +                                       if (s <= EIP_RCV_DMA_LEN((1 << 15))) {svc = 6;break;}   \
2207 +                                       if (s <= EIP_RCV_DMA_LEN((1 << 16))) {svc = 7;break;}   \
2208 +                                       svc = -666;                                             \
2209 +                                       EIP_ASSERT(1 == 0);                                     \
2210 +       } while (0)
2211 +
2212 +extern int eipdebug;
2213 +#define EIP_ASSERT_ON 
2214 +/* #define NO_DEBUG */
2215 +
2216 +
2217 +/* ######################## */
2218 +#ifdef NO_DEBUG
2219 +#define __EIP_DBG_PRINTF(fmt, args...)
2220 +#define EIP_DBG_PRINTF(flag, fmt, args...)
2221 +#else
2222 +
2223 +#define EIP_DBG_RMD            0x1
2224 +#define EIP_DBG_TMD            0x2
2225 +#define EIP_DBG_RMD_HEAD       0x4
2226 +#define EIP_DBG_TMD_HEAD       0x8
2227 +#define EIP_DBG_EIPH           0x10
2228 +#define EIP_DBG_IPH            0x20
2229 +#define EIP_DBG_RMD_EP_DVMA    0x40
2230 +#define EIP_DBG_TMD_EP_DVMA    0x80
2231 +#define EIP_DBG_EP_DVMA                (EIP_DBG_RMD_EP_DVMA|EIP_DBG_TMD_EP_DVMA)
2232 +#define EIP_DBG_MEMALLOC       0x100
2233 +#define EIP_DBG_MEMFREE                0x200
2234 +#define EIP_DBG_RMD_QUEUE      0x400
2235 +#define EIP_DBG_TMD_QUEUE      0x800
2236 +#define EIP_DBG_GEN            0x1000
2237 +#define EIP_DBG_DEBUG          0x2000
2238 +       
2239 +#define __EIP_DBG_PRINTF(fmt, args...) (qsnet_debugf (QSNET_DEBUG_BUFFER, " CPU #%d %s: " fmt, smp_processor_id(), __func__, ## args))
2240 +#define EIP_DBG_PRINTF(flag, fmt, args...) (unlikely(eipdebug & flag) ? __EIP_DBG_PRINTF(fmt, ## args):(void)0)
2241 +
2242 +#define __EIP_DBG_PRINTK(fmt, args...) (qsnet_debugf (QSNET_DEBUG_BUF_CON, " CPU #%d %s: " fmt, smp_processor_id(), __func__, ## args))
2243 +#define EIP_DBG_PRINTK(flag, fmt, args...) (unlikely(eipdebug & flag) ? __EIP_DBG_PRINTF(fmt, ## args):(void)0)
2244 +           
2245 +#define EIP_ERR_PRINTF(fmt, args...)   __EIP_DBG_PRINTK("!!! ERROR !!! - " fmt, ## args)
2246 +
2247 +       
2248 +#define EIP_DBG2(flag, fn, fn_arg, fmt, args...)                                                               \
2249 +    if (unlikely(eipdebug & flag)) {                                                                           \
2250 +           qsnet_debugf (QSNET_DEBUG_BUFFER, "+CPU #%d %s: " fmt, smp_processor_id(), __func__, ##args);       \
2251 +            (void)(fn)(fn_arg);                                                                                \
2252 +           qsnet_debugf (QSNET_DEBUG_BUFFER, "-CPU #%d %s: " fmt, smp_processor_id(), __func__, ##args);       \
2253 +    }
2254 +
2255 +
2256 +#define EIP_DBG(flag, fn, args...)                                                             \
2257 +    if (unlikely(eipdebug & flag)) {                                                           \
2258 +           qsnet_debugf (QSNET_DEBUG_BUFFER, "+CPU #%d %s\n", smp_processor_id(), __func__);   \
2259 +            (void)(fn)(args);                                                                  \
2260 +           qsnet_debugf (QSNET_DEBUG_BUFFER, "-CPU #%d %s :\n", smp_processor_id(), __func__); \
2261 +    }
2262 +#endif /* NO_DEBUG */
2263 +
2264 +
2265 +#ifdef EIP_ASSERT_ON
2266 +
2267 +#define __EIP_ASSERT_PRINT(exp)                                \
2268 +               eipdebug = 0xffff;                              \
2269 +               EIP_ERR_PRINTF("ASSERT : %s, %s::%d\n",         \
2270 +                      #exp, __BASE_FILE__, __LINE__);          
2271 +
2272 +#define EIP_ASSERT(exp)                                                        \
2273 +               if (!(exp)) {                                           \
2274 +                       __EIP_ASSERT_PRINT(exp);                        \
2275 +                       netif_stop_queue(eip_tx->net_device);           \
2276 +               }
2277 +
2278 +#define EIP_ASSERT2(exp, f, arg)                                       \
2279 +       do {                                                            \
2280 +               if (!(exp)) {                                           \
2281 +                       __EIP_ASSERT_PRINT(exp);                        \
2282 +                       f(arg);                                         \
2283 +               }                                                       \
2284 +       } while (0)
2285 +
2286 +#define EIP_ASSERT_BUG(exp)                                            \
2287 +       do {                                                            \
2288 +               if (!(exp)) {                                           \
2289 +                       __EIP_ASSERT_PRINT(exp);                        \
2290 +                       BUG();                                          \
2291 +               }                                                       \
2292 +       } while (0)
2293 +
2294 +#define EIP_ASSERT_GOTO(exp, label, f, arg)                            \
2295 +       do {                                                            \
2296 +               if (!(exp)) {                                           \
2297 +                       __EIP_ASSERT_PRINT(exp);                        \
2298 +                       f(arg);                                         \
2299 +                       goto label;                                     \
2300 +               }                                                       \
2301 +       } while (0)
2302 +
2303 +#define EIP_ASSERT_RET(exp, ret)                                       \
2304 +       do {                                                            \
2305 +               if (!(exp)) {                                           \
2306 +                       __EIP_ASSERT_PRINT(exp);                        \
2307 +                       return ret;                                     \
2308 +               }                                                       \
2309 +       } while (0)
2310 +
2311 +#define EIP_ASSERT_RETURN(exp, f, arg)                                 \
2312 +       do {                                                            \
2313 +               if (!(exp)) {                                           \
2314 +                       __EIP_ASSERT_PRINT(exp);                        \
2315 +                       f(arg);                                         \
2316 +                       return;                                         \
2317 +               }                                                       \
2318 +       } while (0)
2319 +
2320 +#define EIP_ASSERT_RETNULL(exp, f, arg)                                        \
2321 +       do {                                                            \
2322 +               if (!(exp)) {                                           \
2323 +                       __EIP_ASSERT_PRINT(exp);                        \
2324 +                       f(arg);                                         \
2325 +                       return NULL;                                    \
2326 +               }                                                       \
2327 +       } while (0)
2328 +
2329 +#else
2330 +
2331 +#define EIP_ASSERT(exp)                do {} while(0)
2332 +#define EIP_ASSERT_OUT(exp)            do {} while(0)
2333 +#define EIP_ASSERT_RETURN(exp)                 do {} while(0)
2334 +#define EIP_ASSERT_RETNULL(exp)                do {} while(0)
2335 +#define EIP_ASSERT_BUG(exp)            do {} while(0)
2336 +
2337 +#endif /* EIP_ASSERT */
2338 +
2339 +
2340 +
2341 +typedef struct {
2342 +       u_short ip_bcast;
2343 +       u_short ip_inst;
2344 +       u_short ip_addr;
2345 +} EIP_ADDRESS;
2346 +
2347 +typedef struct {
2348 +       EIP_ADDRESS h_dhost;
2349 +       EIP_ADDRESS h_shost;
2350 +       u_short h_sap;
2351 +} EIP_HEADER;
2352 +#define EIP_HEADER_PAD                 (2)
2353 +
2354 +typedef struct eip_proc_fs {
2355 +       const char *name;
2356 +       struct proc_dir_entry **parent;
2357 +       read_proc_t *read;
2358 +       write_proc_t *write;
2359 +       unsigned char allocated;
2360 +       struct proc_dir_entry *entry;
2361 +} EIP_PROC_FS;
2362 +
2363 +#define EIP_PROC_ROOT_DIR              "eip"
2364 +
2365 +#define EIP_PROC_DEBUG_DIR             "debug"
2366 +#define EIP_PROC_DEBUG_RX_FLUSH                "rx_flush"
2367 +#define EIP_PROC_DEBUG_TX_FLUSH                "tx_flush"
2368 +
2369 +#define EIP_PROC_AGGREG_DIR            "aggregation"
2370 +#define EIP_PROC_AGGREG_ONOFF          "enable"
2371 +#define EIP_PROC_AGGREG_TO             "timeout"
2372 +#define EIP_PROC_AGGREG_COPYBREAK      "copybreak"
2373 +
2374 +#define EIP_PROC_TX_COPYBREAK          "tx_copybreak"
2375 +#define EIP_PROC_STATS                 "stats"
2376 +#define EIP_PROC_RX_GRAN               "rx_granularity"
2377 +#define EIP_PROC_TX_RAILMASK           "tx_railmask"
2378 +#define EIP_PROC_TMD_INUSE             "tmd_inuse"
2379 +#define EIP_PROC_EIPDEBUG              "eipdebug"
2380 +#define EIP_PROC_CHECKSUM               "checksum"
2381 +
2382 +/* RX */
2383 +/* dma_len is used to keep the len of a received packet */
2384 +/* nmd.nmd_len is the max dma that can be received      */
2385 +/*                                                      */
2386 +struct eip_rmd {
2387 +       struct sk_buff *skb;
2388 +
2389 +       EP_NMD nmd;
2390 +       u16 dvma_idx;
2391 +
2392 +       EP_RXD *rxd;
2393 +       struct eip_rmd_head *head;
2394 +       union {
2395 +               struct list_head link;                          /* when on "busy" list */
2396 +               struct eip_rmd  *next;                          /* all other lists */
2397 +       } chain;
2398 +};
2399 +typedef struct eip_rmd EIP_RMD;
2400 +struct eip_rmd_head {
2401 +       EP_NMH *handle;
2402 +
2403 +       EP_RCVR *rcvr;
2404 +       EIP_RMD *busy_list;
2405 +
2406 +       /* stats */
2407 +       atomic_t stats;
2408 +       unsigned long dma;
2409 +};
2410 +
2411 +typedef struct eip_rmd_head EIP_RMD_HEAD;
2412 +typedef struct eip_rx {
2413 +       struct eip_rmd_head head[EIP_SVC_NR];
2414 +
2415 +       EIP_RMD *irq_list;
2416 +       short    irq_list_nr;   
2417 +
2418 +       /* stats */
2419 +       unsigned long packets;
2420 +       unsigned long bytes;
2421 +       unsigned long errors;
2422 +       unsigned long dropped;
2423 +       unsigned long reschedule;
2424 +
2425 +       spinlock_t lock;
2426 +       struct tasklet_struct tasklet;
2427 +       unsigned char rmd_max_nr;
2428 +       unsigned char sysctl_granularity;
2429 +       struct net_device *net_device;
2430 +} EIP_RX;
2431 +
2432 +/* TX */
2433 +/* dma_len_max is the maximum len for a given DMA                      */
2434 +/* where mnd.nmd_len is the len of the packet to send ~> than skb->len */
2435 +typedef struct eip_ipfrag_handle {
2436 +       /* common with tmd */
2437 +       unsigned long dma_base;
2438 +       int dma_len;
2439 +       EP_NMD nmd;
2440 +       u16 dvma_idx;
2441 +
2442 +       struct sk_buff *skb;
2443 +       struct eip_tmd_head *head;
2444 +       union {
2445 +               struct list_head link;                          /* when on "busy" list */
2446 +               struct eip_tmd  *next;                          /* all other lists */
2447 +       } chain;
2448 +
2449 +       /* private */
2450 +       struct list_head list;
2451 +       struct timeval timestamp;
2452 +       unsigned int frag_nr;
2453 +       int datagram_len; /* Ip data */
2454 +       int dma_correction;
2455 +       EP_PAYLOAD payload;
2456 +} EIP_IPFRAG;
2457 +
2458 +struct eip_tmd {
2459 +       unsigned long dma_base;
2460 +       int dma_len;
2461 +       EP_NMD nmd;
2462 +       u16 dvma_idx;
2463 +
2464 +       struct sk_buff *skb;
2465 +       struct eip_tmd_head *head;
2466 +       union {
2467 +               struct list_head link;                          /* when on "busy" list */
2468 +               struct eip_tmd  *next;                          /* all other lists */
2469 +       } chain;
2470 +};
2471 +
2472 +struct eip_tmd_head {
2473 +       EP_NMH *handle;
2474 +
2475 +       struct eip_tmd *tmd;
2476 +       atomic_t stats;
2477 +};
2478 +
2479 +typedef struct eip_tmd EIP_TMD;
2480 +typedef struct eip_tmd_head EIP_TMD_HEAD;
2481 +
2482 +/* #define EIP_MORE_STATS */
2483 +
2484 +typedef struct eip_tx {
2485 +       struct net_device *net_device;
2486 +       EP_XMTR *xmtr;
2487 +       EP_SYS *ep_system;
2488 +
2489 +       struct eip_tmd_head head[EIP_TMD_TYPE_NR];
2490 +       struct list_head inuse;
2491 +       atomic_t destructor;
2492 +
2493 +       /* stats */
2494 +       unsigned long packets;
2495 +       unsigned long bytes;
2496 +       unsigned long errors;
2497 +       unsigned long dropped;
2498 +       unsigned long dma[EIP_SVC_NR];
2499 +       
2500 +#ifdef EIP_MORE_STATS
2501 +       unsigned long sent_copybreak;
2502 +       unsigned long sent_std;
2503 +       unsigned long sent_aggreg;
2504 +#endif
2505 +
2506 +       unsigned char tmd_max_nr;
2507 +
2508 +       unsigned short sysctl_copybreak;
2509 +       unsigned short sysctl_ipfrag_to;
2510 +       unsigned short sysctl_ipfrag_copybreak;
2511 +       unsigned short sysctl_aggregation;
2512 +
2513 +       unsigned short ipfrag_count;
2514 +       struct list_head ipfrag;
2515 +       spinlock_t ipfraglock;
2516 +
2517 +       spinlock_t lock;
2518 +       struct tasklet_struct tasklet;
2519 +} EIP_TX;
2520 +
2521 +/* =============================================== */
2522 +    /* unsigned long   multicast; */
2523 +#endif                         /* __EIP_LINUX_H */
2524 +
2525 +/*
2526 + * Local variables:
2527 + * c-file-style: "linux"
2528 + * End:
2529 + */
2530 diff -urN clean/drivers/net/qsnet/eip/eip_stats.c linux-2.6.9/drivers/net/qsnet/eip/eip_stats.c
2531 --- clean/drivers/net/qsnet/eip/eip_stats.c     1969-12-31 19:00:00.000000000 -0500
2532 +++ linux-2.6.9/drivers/net/qsnet/eip/eip_stats.c       2005-09-07 10:34:58.000000000 -0400
2533 @@ -0,0 +1,374 @@
2534 +/*
2535 + *    Copyright (c) 2003 by Quadrics Ltd.
2536 + * 
2537 + *    For licensing information please see the supplied COPYING file
2538 + *
2539 + */
2540 +
2541 +/*
2542 + * $Id: eip_stats.c,v 1.36.2.2 2005/09/07 14:34:58 mike Exp $
2543 + * $Source: /cvs/master/quadrics/eipmod/eip_stats.c,v $
2544 + */
2545 +
2546 +#include <qsnet/kernel.h>
2547 +#include <qsnet/module.h>
2548 +#include <elan/epcomms.h>
2549 +
2550 +#include <linux/netdevice.h>
2551 +
2552 +#include <linux/kernel.h>
2553 +#include <linux/proc_fs.h>
2554 +
2555 +#include <asm/atomic.h>
2556 +
2557 +#include <qsnet/procfs_linux.h>
2558 +
2559 +#include "eip_linux.h"
2560 +#include "eip_stats.h"
2561 +
2562 +extern EIP_RX *eip_rx;
2563 +extern EIP_TX *eip_tx;
2564 +extern int tx_copybreak_max;
2565 +extern EP_RAILMASK tx_railmask;
2566 +extern int  eip_checksum_state;
2567 +extern void eip_stop_queue(void);
2568 +extern void eip_start_queue(void);
2569 +
2570 +static int eip_stats_read(char *buf, char **start, off_t off, int count, int *eof, void *data)
2571 +{
2572 +       int i, outlen = 0;
2573 +
2574 +       *buf = '\0';
2575 +       strcat(buf, "\n");
2576 +       strcat(buf, "--------------------------------------------+------------+-----------------+\n");
2577 +       strcat(buf, "    SKB/DMA    |               | Rx         | Tx         |  TMD TYPE       |\n");
2578 +       strcat(buf, "--------------------------------------------+------------|-----------------+\n");
2579 +
2580 +       i = 0;
2581 +       sprintf(buf + strlen(buf), " [%5d/%5d] | [%3.3d/%3.3d/%3.3d] | %10ld | %10ld | #1[%3.3d/%3.3d/%3.3d] |\n",
2582 +               EIP_SVC_SMALLEST_LEN << i, (int) EIP_RCV_DMA_LEN((EIP_SVC_SMALLEST_LEN << i)),
2583 +               EIP_STAT_QUEUED_GET(&eip_rx->head[i].stats), EIP_STAT_ALLOC_GET(&eip_rx->head[i].stats),
2584 +               eip_rx->rmd_max_nr, eip_rx->head[i].dma, eip_tx->dma[i],
2585 +               EIP_STAT_QUEUED_GET(&eip_tx->head[i].stats), EIP_STAT_ALLOC_GET(&eip_tx->head[i].stats),
2586 +                eip_tx->tmd_max_nr);
2587 +
2588 +       i++;
2589 +       sprintf(buf + strlen(buf), " [%5d/%5d] | [%3.3d/%3.3d/%3.3d] | %10ld | %10ld | #2[%3.3d/%3.3d/%3.3d] |\n",
2590 +               EIP_SVC_SMALLEST_LEN << i, (int) EIP_RCV_DMA_LEN((EIP_SVC_SMALLEST_LEN << i)),
2591 +               EIP_STAT_QUEUED_GET(&eip_rx->head[i].stats), EIP_STAT_ALLOC_GET(&eip_rx->head[i].stats),
2592 +               eip_rx->rmd_max_nr, eip_rx->head[i].dma, eip_tx->dma[i],
2593 +               EIP_STAT_QUEUED_GET(&eip_tx->head[i].stats), EIP_STAT_ALLOC_GET(&eip_tx->head[i].stats),
2594 +               eip_tx->tmd_max_nr);
2595 +
2596 +       i++;
2597 +       sprintf(buf + strlen(buf), " [%5d/%5d] | [%3.3d/%3.3d/%3.3d] | %10ld | %10ld | #3[%3.3d/%3.3d/%3.3d] |\n",
2598 +               EIP_SVC_SMALLEST_LEN << i, (int) EIP_RCV_DMA_LEN((EIP_SVC_SMALLEST_LEN << i)),
2599 +               EIP_STAT_QUEUED_GET(&eip_rx->head[i].stats), EIP_STAT_ALLOC_GET(&eip_rx->head[i].stats),
2600 +               eip_rx->rmd_max_nr, eip_rx->head[i].dma, eip_tx->dma[i],
2601 +               EIP_STAT_QUEUED_GET(&eip_tx->head[i].stats), EIP_STAT_ALLOC_GET(&eip_tx->head[i].stats),
2602 +               eip_tx->tmd_max_nr);
2603 +
2604 +       i++;
2605 +       sprintf(buf + strlen(buf), " [%5d/%5d] | [%3.3d/%3.3d/%3.3d] | %10ld | %10ld +-----------------+\n",
2606 +               EIP_SVC_SMALLEST_LEN << i, (int) EIP_RCV_DMA_LEN((EIP_SVC_SMALLEST_LEN << i)),
2607 +               EIP_STAT_QUEUED_GET(&eip_rx->head[i].stats), EIP_STAT_ALLOC_GET(&eip_rx->head[i].stats),
2608 +               eip_rx->rmd_max_nr, eip_rx->head[i].dma, eip_tx->dma[i]);
2609 +
2610 +       i++;
2611 +       sprintf(buf + strlen(buf), " [%5d/%5d] | [%3.3d/%3.3d/%3.3d] | %10ld | %10ld |\n",
2612 +               EIP_SVC_SMALLEST_LEN << i, (int) EIP_RCV_DMA_LEN((EIP_SVC_SMALLEST_LEN << i)),
2613 +               EIP_STAT_QUEUED_GET(&eip_rx->head[i].stats), EIP_STAT_ALLOC_GET(&eip_rx->head[i].stats),
2614 +               eip_rx->rmd_max_nr, eip_rx->head[i].dma, eip_tx->dma[i]);
2615 +
2616 +       i++;
2617 +       sprintf(buf + strlen(buf), " [%5d/%5d] | [%3.3d/%3.3d/%3.3d] | %10ld | %10ld |\n",
2618 +               EIP_SVC_SMALLEST_LEN << i, (int) EIP_RCV_DMA_LEN((EIP_SVC_SMALLEST_LEN << i)),
2619 +               EIP_STAT_QUEUED_GET(&eip_rx->head[i].stats), EIP_STAT_ALLOC_GET(&eip_rx->head[i].stats),
2620 +               eip_rx->rmd_max_nr, eip_rx->head[i].dma, eip_tx->dma[i]);
2621 +
2622 +       i++;
2623 +       sprintf(buf + strlen(buf), " [%5d/%5d] | [%3.3d/%3.3d/%3.3d] | %10ld | %10ld |\n",
2624 +               EIP_SVC_SMALLEST_LEN << i, (int) EIP_RCV_DMA_LEN((EIP_SVC_SMALLEST_LEN << i)),
2625 +               EIP_STAT_QUEUED_GET(&eip_rx->head[i].stats), EIP_STAT_ALLOC_GET(&eip_rx->head[i].stats),
2626 +               eip_rx->rmd_max_nr, eip_rx->head[i].dma, eip_tx->dma[i]);
2627 +
2628 +       i++;
2629 +       sprintf(buf + strlen(buf), " [%5d/%5d] | [%3.3d/%3.3d/%3.3d] | %10ld | %10ld |\n",
2630 +               EIP_SVC_SMALLEST_LEN << i, (int) EIP_RCV_DMA_LEN((EIP_SVC_SMALLEST_LEN << i)),
2631 +               EIP_STAT_QUEUED_GET(&eip_rx->head[i].stats), EIP_STAT_ALLOC_GET(&eip_rx->head[i].stats),
2632 +               eip_rx->rmd_max_nr, eip_rx->head[i].dma, eip_tx->dma[i]);
2633 +
2634 +       strcat(buf, "--------------------------------------------+------------+\n");
2635 +       sprintf(buf + strlen(buf), " RMD IRQ %4.4d                    %10lu | %10lu |\n",
2636 +               eip_rx->irq_list_nr, 
2637 +               eip_rx->packets, eip_tx->packets);
2638 +       strcat(buf, "--------------------------------------------+------------+\n");
2639 +
2640 +#ifdef EIP_MORE_STATS
2641 +       strcat(buf, "\n");
2642 +       sprintf(buf + strlen(buf), " Copybreak %10ld Std %10ld Aggreg %10ld\n",
2643 +                       eip_tx->sent_copybreak, eip_tx->sent_std, eip_tx->sent_aggreg);
2644 +#endif
2645 +
2646 +
2647 +       strcat(buf, "\n");
2648 +       sprintf(buf + strlen(buf), "Rx bytes: %lu (%lu Mb) errors: %lu dropped: %lu reschedule: %lu\n",
2649 +               eip_rx->bytes, eip_rx->bytes / (1024 * 1024), eip_rx->errors, eip_rx->dropped, eip_rx->reschedule);
2650 +       sprintf(buf + strlen(buf), "Tx bytes: %lu (%lu Mb) errors: %lu dropped: %lu\n",
2651 +               eip_tx->bytes, eip_tx->bytes / (1024 * 1024), eip_tx->errors, eip_tx->dropped);
2652 +       strcat(buf, "\n");
2653 +
2654 +       outlen = strlen(buf);
2655 +       ASSERT(outlen < PAGE_SIZE);
2656 +       *eof = 1;
2657 +       return outlen;
2658 +}
2659 +
2660 +void eip_stats_dump(void)
2661 +{
2662 +    int eof;
2663 +
2664 +    char *buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
2665 +
2666 +    if (buf == NULL)
2667 +    {
2668 +       printk("no memory to produce eip_stats\n");
2669 +       return;
2670 +    }
2671 +
2672 +    eip_stats_read(buf, NULL, 0, 0, &eof, NULL);
2673 +
2674 +    printk(buf);
2675 +
2676 +    kfree(buf);
2677 +}
2678 +
2679 +static int eip_stats_write(struct file *file, const char *buf, unsigned long count, void *data)
2680 +{
2681 +       int i;
2682 +       unsigned long flags;
2683 +
2684 +       spin_lock_irqsave(&eip_rx->lock, flags);
2685 +       eip_rx->packets = 0;
2686 +       eip_rx->bytes = 0;
2687 +       eip_rx->errors = 0;
2688 +       eip_rx->dropped = 0;
2689 +       eip_rx->reschedule = 0;
2690 +       for (i = 0; i < EIP_SVC_NR; eip_rx->head[i].dma = 0, i++);
2691 +       spin_unlock_irqrestore(&eip_rx->lock, flags);
2692 +
2693 +       spin_lock_irqsave(&eip_tx->lock, flags);
2694 +       eip_tx->packets = 0;
2695 +       eip_tx->bytes = 0;
2696 +       eip_tx->errors = 0;
2697 +       eip_tx->dropped = 0;
2698 +#ifdef EIP_MORE_STATS
2699 +       eip_tx->sent_copybreak = 0;
2700 +       eip_tx->sent_std = 0;
2701 +       eip_tx->sent_aggreg = 0;
2702 +#endif
2703 +       for (i = 0; i < EIP_SVC_NR; eip_tx->dma[i] = 0, i++);
2704 +       spin_unlock_irqrestore(&eip_tx->lock, flags);
2705 +
2706 +       return count;
2707 +}
2708 +
2709 +#define                eip_stats_var_write(name)                                                                       \
2710 +static int eip_stats_##name##_write(struct file *file, const char *buf, unsigned long count, void *data)       \
2711 +{                                                                                                              \
2712 +       char * b = (char *) buf;                                                                                \
2713 +       *(b + count) = '\0';                                                                                    \
2714 +       eip_##name##_set((int) simple_strtoul(b, NULL, 10));                                                    \
2715 +       return count;                                                                                           \
2716 +}
2717 +
2718 +#define        eip_stats_var_read(name, var)                                                                   \
2719 +static int eip_stats_##name##_read(char *buf, char **start, off_t off, int count, int *eof, void *data)                \
2720 +{                                                                                                              \
2721 +       sprintf(buf, "%d\n", var);                                                                              \
2722 +       *eof = 1;                                                                                               \
2723 +       return strlen(buf);                                                                                     \
2724 +}
2725 +
2726 +
2727 +#define                eip_stats_var_set(name, min, max, default, var)                                                                 \
2728 +void eip_##name##_set(int i)                                                                                                   \
2729 +{                                                                                                                              \
2730 +       if ( (i >= min) && (i <= max)) {                                                                                        \
2731 +               EIP_DBG_PRINTK(EIP_DBG_GEN, "Setting " #name " to %d\n", i);                                                    \
2732 +               var =(unsigned short) i;                                                                                        \
2733 +       }                                                                                                                       \
2734 +       else {                                                                                                                  \
2735 +               EIP_ERR_PRINTF("parameter error : %d <= " #name "(%d) <= %d using default %d\n", min, i, (int) max, (int) default);     \
2736 +       }                                                                                                                       \
2737 +}
2738 +
2739 +eip_stats_var_set(tx_copybreak, 0, tx_copybreak_max, EIP_TX_COPYBREAK, eip_tx->sysctl_copybreak);
2740 +eip_stats_var_set(rx_granularity, 1, EIP_RMD_MIN_NR, EIP_RX_GRANULARITY, eip_rx->sysctl_granularity);
2741 +eip_stats_var_set(tx_railmask, 0, EP_RAILMASK_ALL, EP_RAILMASK_ALL, tx_railmask);
2742 +eip_stats_var_set(ipfrag_to, 0, (1 << 16), EIP_IPFRAG_TO, eip_tx->sysctl_ipfrag_to);
2743 +eip_stats_var_set(aggregation, 0, 1, 1, eip_tx->sysctl_aggregation);
2744 +eip_stats_var_set(ipfrag_copybreak, 0, EIP_IPFRAG_COPYBREAK, EIP_IPFRAG_COPYBREAK, eip_tx->sysctl_ipfrag_copybreak);
2745 +/* eip_stats_var_set(eipdebug, 0, , 0, eipdebug); */
2746 +
2747 +eip_stats_var_read(aggregation, eip_tx->sysctl_aggregation);
2748 +eip_stats_var_read(ipfrag_count, eip_tx->ipfrag_count);
2749 +eip_stats_var_read(ipfrag_to, eip_tx->sysctl_ipfrag_to);
2750 +eip_stats_var_read(ipfrag_copybreak, eip_tx->sysctl_ipfrag_copybreak);
2751 +eip_stats_var_read(tx_copybreak, eip_tx->sysctl_copybreak);
2752 +eip_stats_var_read(rx_granularity, eip_rx->sysctl_granularity);
2753 +eip_stats_var_read(tx_railmask, tx_railmask);
2754 +
2755 +eip_stats_var_write(aggregation);
2756 +eip_stats_var_write(ipfrag_to);
2757 +eip_stats_var_write(ipfrag_copybreak);
2758 +eip_stats_var_write(tx_copybreak);
2759 +eip_stats_var_write(rx_granularity);
2760 +eip_stats_var_write(tx_railmask);
2761 +
2762 +
2763 +static int eip_checksum_write(struct file *file, const char *buf, unsigned long count, void *data)
2764 +{
2765 +       char * b = (char *) buf;
2766 +       int    value;
2767 +
2768 +       *(b + count) = '\0';
2769 +
2770 +       value = (int) simple_strtoul(b, NULL, 10);
2771 +       if  ((value >= CHECKSUM_NONE) && (value <= CHECKSUM_UNNECESSARY)) 
2772 +               eip_checksum_state = value;
2773 +       else 
2774 +               EIP_ERR_PRINTF("%d <= checksum(%d) <= %d using old value %d\n", CHECKSUM_NONE, value, CHECKSUM_UNNECESSARY, eip_checksum_state);
2775 +
2776 +       return count;
2777 +}
2778 +
2779 +static int eip_checksum_read(char *buf, char **start, off_t off, int count, int *eof, void *data)
2780 +{
2781 +       switch ( eip_checksum_state ) 
2782 +       {
2783 +       case 0  : sprintf(buf, "0 CHECKSUM_NONE\n");                      break;
2784 +       case 1  : sprintf(buf, "1 CHECKSUM_HW\n");                        break;
2785 +       case 2  : sprintf(buf, "2 CHECKSUM_UNNECESSARY\n");               break;
2786 +       default : sprintf(buf, "%d INVALID VALUE\n", eip_checksum_state); break;
2787 +       }
2788 +       *eof = 1;
2789 +       return strlen(buf);
2790 +}
2791 +
2792 +static int eip_stats_eipdebug_read(char *buf, char **start, off_t off, int count, int *eof, void *data)
2793 +{
2794 +       *buf = '\0';
2795 +       sprintf(buf + strlen(buf), "0x%x\n", eipdebug);
2796 +       *eof = 1;
2797 +       return strlen(buf);
2798 +}
2799 +static int eip_stats_eipdebug_write(struct file *file, const char *buf, unsigned long count, void *data)
2800 +{
2801 +       char * p = (char *) buf;
2802 +       *(p + count - 1) = '\0';
2803 +       eipdebug = simple_strtoul(p, NULL, 0);
2804 +       __EIP_DBG_PRINTK("Setting eipdebug to 0x%x\n", eipdebug);
2805 +       return count;
2806 +}
2807 +
2808 +static int eip_stats_tmd_inuse_read(char *page, char **start, off_t off, int count, int *eof, void *data)
2809 +{
2810 +       struct list_head *lp;
2811 +       unsigned long flags;
2812 +       unsigned int len = 0;
2813 +
2814 +       spin_lock_irqsave(&eip_tx->lock, flags);
2815 +       list_for_each (lp, &eip_tx->inuse) {
2816 +               EIP_TMD *tmd = list_entry (lp, EIP_TMD, chain.link);
2817 +               EIP_HEADER *eiph = (EIP_HEADER *) tmd->dma_base;
2818 +               
2819 +                len += sprintf(page+len, "tmd=%p id=%d len=%d\n",
2820 +                              tmd, eiph ? ntohs(eiph->h_dhost.ip_addr) : -1,
2821 +                              tmd->dma_len);
2822 +
2823 +                if (len + 40 >= count)
2824 +                        break;
2825 +        }
2826 +        spin_unlock_irqrestore(&eip_tx->lock, flags);
2827 +
2828 +       return qsnet_proc_calc_metrics (page, start, off, count, eof, len);
2829 +}
2830 +
2831 +static int eip_stats_debug_rx_flush(struct file *file, const char *buf, unsigned long count, void *data)
2832 +{
2833 +       EIP_DBG_PRINTF(EIP_DBG_GEN, "Flushing rx ...\n");
2834 +       tasklet_schedule(&eip_rx->tasklet);
2835 +       return count;
2836 +}
2837 +static int eip_stats_debug_tx_flush(struct file *file, const char *buf, unsigned long count, void *data)
2838 +{
2839 +       EIP_DBG_PRINTF(EIP_DBG_GEN, "Flushing tx ... %d tmds reclaimed\n", ep_enable_txcallbacks(eip_tx->xmtr));
2840 +       ep_disable_txcallbacks(eip_tx->xmtr);
2841 +       tasklet_schedule(&eip_tx->tasklet);
2842 +       return count;
2843 +}
2844 +
2845 +#define EIP_PROC_PARENT_NR     (3)
2846 +/* NOTE : the parents should be declared b4 the children */
2847 +static EIP_PROC_FS eip_procs[] = {
2848 +       /* {name, parent, read fn, write fn, allocated, entry}, */
2849 +       {EIP_PROC_ROOT_DIR, &qsnet_procfs_root, NULL, NULL, 0, NULL},
2850 +       {EIP_PROC_DEBUG_DIR, &eip_procs[0].entry, NULL, NULL, 0, NULL},
2851 +       {EIP_PROC_AGGREG_DIR, &eip_procs[0].entry, NULL, NULL, 0, NULL},        /* end of parents */
2852 +       {EIP_PROC_STATS, &eip_procs[0].entry, eip_stats_read, eip_stats_write, 0, NULL},
2853 +       {EIP_PROC_TX_COPYBREAK, &eip_procs[0].entry, eip_stats_tx_copybreak_read, eip_stats_tx_copybreak_write, 0, NULL},
2854 +       {EIP_PROC_RX_GRAN, &eip_procs[0].entry, eip_stats_rx_granularity_read, eip_stats_rx_granularity_write, 0, NULL},
2855 +       {EIP_PROC_TX_RAILMASK, &eip_procs[0].entry, eip_stats_tx_railmask_read, eip_stats_tx_railmask_write, 0, NULL},
2856 +       {EIP_PROC_TMD_INUSE, &eip_procs[0].entry, eip_stats_tmd_inuse_read, NULL, 0, NULL},
2857 +       {EIP_PROC_EIPDEBUG, &eip_procs[0].entry, eip_stats_eipdebug_read, eip_stats_eipdebug_write, 0, NULL},
2858 +       {EIP_PROC_CHECKSUM, &eip_procs[0].entry, eip_checksum_read, eip_checksum_write, 0, NULL},
2859 +       {EIP_PROC_DEBUG_RX_FLUSH, &eip_procs[1].entry, NULL, eip_stats_debug_rx_flush, 0, NULL},
2860 +       {EIP_PROC_DEBUG_TX_FLUSH, &eip_procs[1].entry, NULL, eip_stats_debug_tx_flush, 0, NULL},
2861 +       {"ipfrag_count", &eip_procs[2].entry, eip_stats_ipfrag_count_read, NULL, 0, NULL},
2862 +       {EIP_PROC_AGGREG_TO, &eip_procs[2].entry, eip_stats_ipfrag_to_read, eip_stats_ipfrag_to_write, 0, NULL},
2863 +       {EIP_PROC_AGGREG_ONOFF, &eip_procs[2].entry, eip_stats_aggregation_read, eip_stats_aggregation_write, 0, NULL},
2864 +       {EIP_PROC_AGGREG_COPYBREAK, &eip_procs[2].entry, eip_stats_ipfrag_copybreak_read, eip_stats_ipfrag_copybreak_write, 0, NULL},
2865 +       {NULL, NULL, NULL, NULL, 1, NULL},
2866 +};
2867 +
2868 +int eip_stats_init(void)
2869 +{
2870 +       int p;
2871 +
2872 +       for (p = 0; !eip_procs[p].allocated; p++) {
2873 +               if (p < EIP_PROC_PARENT_NR)
2874 +                       eip_procs[p].entry = proc_mkdir(eip_procs[p].name, *eip_procs[p].parent);
2875 +               else
2876 +                       eip_procs[p].entry = create_proc_entry(eip_procs[p].name, 0, *eip_procs[p].parent);
2877 +
2878 +               if (!eip_procs[p].entry) {
2879 +                       EIP_ERR_PRINTF("%s\n", "Cannot allocate proc entry");
2880 +                       eip_stats_cleanup();
2881 +                       return -ENOMEM;
2882 +               }
2883 +
2884 +               eip_procs[p].entry->owner = THIS_MODULE;
2885 +               eip_procs[p].entry->write_proc = eip_procs[p].write;
2886 +               eip_procs[p].entry->read_proc = eip_procs[p].read;
2887 +               eip_procs[p].allocated = 1;
2888 +       }
2889 +       eip_procs[p].allocated = 0;
2890 +       return 0;
2891 +}
2892 +
2893 +void eip_stats_cleanup(void)
2894 +{
2895 +       int p;
2896 +       for (p = (sizeof (eip_procs)/sizeof (eip_procs[0]))-1; p >= 0; p--)
2897 +               if (eip_procs[p].allocated) {
2898 +                       EIP_DBG_PRINTF(EIP_DBG_GEN, "Removing %s from proc\n", eip_procs[p].name);
2899 +                       remove_proc_entry(eip_procs[p].name, *eip_procs[p].parent);
2900 +               }
2901 +}
2902 +
2903 +/*
2904 + * Local variables:
2905 + * c-file-style: "linux"
2906 + * End:
2907 + */
2908 diff -urN clean/drivers/net/qsnet/eip/eip_stats.h linux-2.6.9/drivers/net/qsnet/eip/eip_stats.h
2909 --- clean/drivers/net/qsnet/eip/eip_stats.h     1969-12-31 19:00:00.000000000 -0500
2910 +++ linux-2.6.9/drivers/net/qsnet/eip/eip_stats.h       2004-05-10 10:47:47.000000000 -0400
2911 @@ -0,0 +1,22 @@
2912 +/*
2913 + *    Copyright (c) 2003 by Quadrics Ltd.
2914 + * 
2915 + *    For licensing information please see the supplied COPYING file
2916 + *
2917 + */
2918 +
2919 +#ident "$Id: eip_stats.h,v 1.14 2004/05/10 14:47:47 daniel Exp $"
2920 +
2921 +#ifndef __EIP_STATS_H
2922 +#define        __EIP_STATS_H
2923 +
2924 +int eip_stats_init(void);
2925 +void eip_stats_cleanup(void);
2926 +void eip_rx_granularity_set(int);
2927 +void eip_tx_copybreak_set(int);
2928 +void eip_ipfrag_to_set(int);
2929 +void eip_aggregation_set(int);
2930 +void eip_ipfrag_copybreak_set(int);
2931 +void eip_stats_dump(void);
2932 +
2933 +#endif                         /* __EIP_STATS_H */
2934 diff -urN clean/drivers/net/qsnet/eip/Makefile linux-2.6.9/drivers/net/qsnet/eip/Makefile
2935 --- clean/drivers/net/qsnet/eip/Makefile        1969-12-31 19:00:00.000000000 -0500
2936 +++ linux-2.6.9/drivers/net/qsnet/eip/Makefile  2005-10-10 17:47:30.000000000 -0400
2937 @@ -0,0 +1,15 @@
2938 +#
2939 +# Makefile for Quadrics QsNet
2940 +#
2941 +# Copyright (c) 2002-2004 Quadrics Ltd
2942 +#
2943 +# File: drivers/net/qsnet/eip/Makefile
2944 +#
2945 +
2946 +
2947 +#
2948 +
2949 +obj-$(CONFIG_EIP)      += eip.o
2950 +eip-objs       := eip_linux.o eip_stats.o
2951 +
2952 +EXTRA_CFLAGS           +=  -DDEBUG -DDEBUG_PRINTF -DDEBUG_ASSERT
2953 diff -urN clean/drivers/net/qsnet/eip/Makefile.conf linux-2.6.9/drivers/net/qsnet/eip/Makefile.conf
2954 --- clean/drivers/net/qsnet/eip/Makefile.conf   1969-12-31 19:00:00.000000000 -0500
2955 +++ linux-2.6.9/drivers/net/qsnet/eip/Makefile.conf     2005-09-07 10:39:48.000000000 -0400
2956 @@ -0,0 +1,10 @@
2957 +# Flags for generating QsNet Linux Kernel Makefiles
2958 +MODNAME                =       eip.o
2959 +MODULENAME     =       eip
2960 +KOBJFILES      =       eip_linux.o eip_stats.o
2961 +EXPORT_KOBJS   =       
2962 +CONFIG_NAME    =       CONFIG_EIP
2963 +SGALFC         =       
2964 +# EXTRALINES START
2965 +
2966 +# EXTRALINES END
2967 diff -urN clean/drivers/net/qsnet/eip/quadrics_version.h linux-2.6.9/drivers/net/qsnet/eip/quadrics_version.h
2968 --- clean/drivers/net/qsnet/eip/quadrics_version.h      1969-12-31 19:00:00.000000000 -0500
2969 +++ linux-2.6.9/drivers/net/qsnet/eip/quadrics_version.h        2005-09-07 10:39:49.000000000 -0400
2970 @@ -0,0 +1 @@
2971 +#define QUADRICS_VERSION "5.11.3qsnet"
2972 diff -urN clean/drivers/net/qsnet/elan/bitmap.c linux-2.6.9/drivers/net/qsnet/elan/bitmap.c
2973 --- clean/drivers/net/qsnet/elan/bitmap.c       1969-12-31 19:00:00.000000000 -0500
2974 +++ linux-2.6.9/drivers/net/qsnet/elan/bitmap.c 2004-01-20 12:32:17.000000000 -0500
2975 @@ -0,0 +1,287 @@
2976 +/*
2977 + *    Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd.
2978 + *
2979 + *    For licensing information please see the supplied COPYING file
2980 + *
2981 + */
2982 +
2983 +#ident "@(#)$Id: bitmap.c,v 1.5 2004/01/20 17:32:17 david Exp $"
2984 +/*      $Source: /cvs/master/quadrics/elanmod/shared/bitmap.c,v $*/
2985 +
2986 +#if defined(__KERNEL__)
2987 +#include <qsnet/kernel.h>
2988 +#endif
2989 +#include <qsnet/config.h>
2990 +#include <elan/bitmap.h>
2991 +
2992 +/*
2993 + * Return the index of the first available bit in the 
2994 + * bitmap , or -1 for failure
2995 + */
2996 +int
2997 +bt_freebit (bitmap_t *bitmap, int nbits)
2998 +{
2999 +    int last = (--nbits) >> BT_ULSHIFT;
3000 +    int maxbit;
3001 +    int        i, j;
3002 +
3003 +    /* look for a word with a bit off */
3004 +    for (i = 0; i <= last; i++)
3005 +       if (bitmap[i] != ~((bitmap_t) 0))
3006 +           break;
3007 +
3008 +    if (i <= last)
3009 +    {
3010 +       /* found an word with a bit off,  now see which bit it is */
3011 +       maxbit = (i == last) ? (nbits & BT_ULMASK) : (BT_NBIPUL-1);
3012 +       for (j = 0; j <= maxbit; j++)
3013 +           if ((bitmap[i] & (1 << j)) == 0)
3014 +               return ((i << BT_ULSHIFT) | j);
3015 +    }
3016 +    return (-1);
3017 +    
3018 +}
3019 +
3020 +/*
3021 + * bt_lowbit:
3022 + *     Return the index of the lowest set bit in the
3023 + *     bitmap, or -1 for failure.
3024 + */
3025 +int
3026 +bt_lowbit (bitmap_t *bitmap, int nbits)
3027 +{
3028 +    int last = (--nbits) >> BT_ULSHIFT;
3029 +    int maxbit;
3030 +    int i, j;
3031 +    
3032 +    /* look for a word with a bit on */
3033 +    for (i = 0; i <= last; i++)
3034 +       if (bitmap[i] != 0)
3035 +           break;
3036 +    if (i <= last)
3037 +    {
3038 +       /* found a word bit a bit on, now see which bit it is */
3039 +       maxbit = (i == last) ? (nbits & BT_ULMASK) : (BT_NBIPUL-1);
3040 +       for (j = 0; j <= maxbit; j++)
3041 +           if (bitmap[i] & (1 << j))
3042 +               return ((i << BT_ULSHIFT) | j);
3043 +    }
3044 +
3045 +    return (-1);
3046 +}
3047 +
3048 +/*
3049 + * Return the index of the first available bit in the 
3050 + * bitmap , or -1 for failure
3051 + */
3052 +int
3053 +bt_nextbit (bitmap_t *bitmap, int nbits, int last, int isset)
3054 +{
3055 +    int first = ((last+1) + BT_NBIPUL-1) >> BT_ULSHIFT;
3056 +    int end   = (--nbits) >> BT_ULSHIFT;
3057 +    int maxbit;
3058 +    int        i, j;
3059 +
3060 +    /* look for bits before the first whole word */
3061 +    if (((last+1) & BT_ULMASK) != 0)
3062 +    {
3063 +       maxbit = ((first-1) == last) ? (nbits & BT_ULMASK) : (BT_NBIPUL-1);
3064 +       for (j = ((last+1) & BT_ULMASK); j <= maxbit; j++)
3065 +           if ((bitmap[first-1] & (1 << j)) == (isset << j))
3066 +               return (((first-1) << BT_ULSHIFT) | j);
3067 +    }
3068 +
3069 +    /* look for a word with a bit off */
3070 +    for (i = first; i <= end; i++)
3071 +       if (bitmap[i] != (isset ? 0 : ~((bitmap_t) 0)))
3072 +           break;
3073 +
3074 +    if (i <= end)
3075 +    {
3076 +       /* found an word with a bit off,  now see which bit it is */
3077 +       maxbit = (i == end) ? (nbits & BT_ULMASK) : (BT_NBIPUL-1);
3078 +       for (j = 0; j <= maxbit; j++)
3079 +           if ((bitmap[i] & (1 << j)) == (isset << j))
3080 +               return ((i << BT_ULSHIFT) | j);
3081 +    }
3082 +    return (-1);
3083 +}
3084 +
3085 +void
3086 +bt_copy (bitmap_t *a, bitmap_t *b, int nbits)
3087 +{
3088 +    int i;
3089 +
3090 +    for (i = 0; i < (nbits>>BT_ULSHIFT); i++)
3091 +       b[i] = a[i];
3092 +
3093 +    for (i <<= BT_ULSHIFT; i < nbits; i++)
3094 +       if (BT_TEST(a, i))
3095 +           BT_SET(b,i);
3096 +       else
3097 +           BT_CLEAR(b,i);
3098 +}
3099 +
3100 +void
3101 +bt_zero (bitmap_t *bitmap, int nbits)
3102 +{
3103 +    int i;
3104 +
3105 +    for (i = 0; i < (nbits>>BT_ULSHIFT); i++)
3106 +       bitmap[i] = 0;
3107 +
3108 +    for (i <<= BT_ULSHIFT; i < nbits; i++)
3109 +       BT_CLEAR(bitmap,i);
3110 +}
3111 +
3112 +void
3113 +bt_fill (bitmap_t *bitmap, int nbits)
3114 +{
3115 +    int i;
3116 +
3117 +    for (i = 0; i < (nbits>>BT_ULSHIFT); i++)
3118 +       bitmap[i] = ~((bitmap_t) 0);
3119 +
3120 +    for (i <<= BT_ULSHIFT; i < nbits; i++)
3121 +       BT_SET(bitmap,i);
3122 +}
3123 +
3124 +int
3125 +bt_cmp (bitmap_t *a, bitmap_t *b, int nbits)
3126 +{
3127 +    int i;
3128 +
3129 +    for (i = 0; i < (nbits>>BT_ULSHIFT); i++)
3130 +       if (a[i] != b[i])
3131 +           return (1);
3132 +
3133 +    for (i <<= BT_ULSHIFT; i < nbits; i++)
3134 +       if (BT_TEST (a, i) != BT_TEST(b, i))
3135 +           return (1);
3136 +    return (0);
3137 +}
3138 +
3139 +void
3140 +bt_intersect (bitmap_t *a, bitmap_t *b, int nbits)
3141 +{
3142 +    int i;
3143 +    
3144 +    for (i = 0; i < (nbits>>BT_ULSHIFT); i++)
3145 +       a[i] &= b[i];
3146 +
3147 +    for (i <<= BT_ULSHIFT; i < nbits; i++)
3148 +       if (BT_TEST (a, i) && BT_TEST (b, i))
3149 +           BT_SET (a, i);
3150 +       else
3151 +           BT_CLEAR (a, i);
3152 +}
3153 +
3154 +void
3155 +bt_remove (bitmap_t *a, bitmap_t *b, int nbits)
3156 +{
3157 +    int i;
3158 +
3159 +    for (i = 0; i < (nbits>>BT_ULSHIFT); i++)
3160 +       a[i] &= ~b[i];
3161 +
3162 +    for (i <<= BT_ULSHIFT; i < nbits; i++)
3163 +       if (BT_TEST (b, i))
3164 +           BT_CLEAR (a, i);
3165 +}
3166 +
3167 +void
3168 +bt_add (bitmap_t *a, bitmap_t *b, int nbits)
3169 +{
3170 +    int i;
3171 +
3172 +    for (i = 0; i < (nbits>>BT_ULSHIFT); i++)
3173 +       a[i] |= b[i];
3174 +
3175 +    for (i <<= BT_ULSHIFT; i < nbits; i++)
3176 +       if (BT_TEST(b, i))
3177 +           BT_SET (a, i);
3178 +}
3179 +
3180 +/*
3181 + * bt_spans : partition a spans partition b
3182 + *    == all bits set in 'b' are set in 'a'
3183 + */
3184 +int
3185 +bt_spans (bitmap_t *a, bitmap_t *b, int nbits)
3186 +{
3187 +    int i;
3188 +    
3189 +    for (i = 0; i < nbits; i++)
3190 +       if (BT_TEST (b, i) && !BT_TEST (a, i))
3191 +           return (0);
3192 +    return (1);
3193 +}
3194 +
3195 +/*
3196 + * bt_subset: copy [base,base+nbits-1] from 'a' to 'b'
3197 + */
3198 +void
3199 +bt_subset (bitmap_t *a, bitmap_t *b, int base, int nbits)
3200 +{
3201 +    int i;
3202 +
3203 +    for (i = 0; i < nbits; i++)
3204 +    {
3205 +       if (BT_TEST (a, base+i))
3206 +           BT_SET(b,i);
3207 +       else
3208 +           BT_CLEAR (b,i);
3209 +    }
3210 +}
3211 +
3212 +void 
3213 +bt_up (bitmap_t *a, bitmap_t *b, bitmap_t *c, int nbits)
3214 +{
3215 +    int i;
3216 +    
3217 +    for (i = 0; i < nbits; i++)
3218 +    {
3219 +       if (!BT_TEST (a, i) && BT_TEST (b, i))
3220 +       {
3221 +           BT_SET (c, i);
3222 +        }
3223 +       else
3224 +       {
3225 +           BT_CLEAR (c, i);
3226 +        }
3227 +    }
3228 +}
3229 +
3230 +void 
3231 +bt_down (bitmap_t *a, bitmap_t *b, bitmap_t *c, int nbits)
3232 +{
3233 +    int i;
3234 +    
3235 +    for (i = 0; i < nbits; i++)
3236 +    {
3237 +       if (BT_TEST (a, i) && !BT_TEST (b, i))
3238 +       {
3239 +           BT_SET (c, i);
3240 +        }
3241 +       else
3242 +       {
3243 +           BT_CLEAR (c, i);
3244 +        }
3245 +    }
3246 +}
3247 +
3248 +int
3249 +bt_nbits (bitmap_t *a, int nbits)
3250 +{
3251 +    int i, c;
3252 +    for (i = 0, c = 0; i < nbits; i++)
3253 +       if (BT_TEST (a, i))
3254 +           c++;
3255 +    return (c);
3256 +}
3257 +
3258 +/*
3259 + * Local variables:
3260 + * c-file-style: "stroustrup"
3261 + * End:
3262 + */
3263 diff -urN clean/drivers/net/qsnet/elan/capability.c linux-2.6.9/drivers/net/qsnet/elan/capability.c
3264 --- clean/drivers/net/qsnet/elan/capability.c   1969-12-31 19:00:00.000000000 -0500
3265 +++ linux-2.6.9/drivers/net/qsnet/elan/capability.c     2005-07-21 06:42:36.000000000 -0400
3266 @@ -0,0 +1,796 @@
3267 +/*
3268 + *    Copyright (c) 2003 by Quadrics Ltd.
3269 + * 
3270 + *    For licensing information please see the supplied COPYING file
3271 + *
3272 + */
3273 +
3274 +#ident "@(#)$Id: capability.c,v 1.19.2.2 2005/07/21 10:42:36 addy Exp $"
3275 +/*      $Source: /cvs/master/quadrics/elanmod/modsrc/capability.c,v $ */
3276 +
3277 +
3278 +#include <qsnet/kernel.h>
3279 +#include <elan/elanmod.h>
3280 +
3281 +static LIST_HEAD(elan_cap_list); 
3282 +
3283 +typedef struct elan_vp_struct
3284 +{
3285 +       struct list_head list;
3286 +       ELAN_CAPABILITY  vp;
3287 +} ELAN_VP_NODE_STRUCT;
3288 +
3289 +/* There is an array of these structs for each process/context in the CAP 
3290 + * This is then replicated for each rail. The usercopy handle stuff is 
3291 + * only maintained in rail 0 though
3292 + */
3293 +typedef struct elan_attached_struct
3294 +{
3295 +       void               *cb_args;
3296 +       ELAN_DESTROY_CB  cb_func;
3297 +       struct task_struct *handle;             /* usercopy: attached task handle */
3298 +       struct task_struct *owner;              /* usercopy: attached task handle owner */
3299 +} ELAN_ATTACHED_STRUCT;
3300 +
3301 +typedef struct elan_cap_node_struct
3302 +{
3303 +       struct list_head list;
3304 +       ELAN_CAP_STRUCT     node;
3305 +       ELAN_ATTACHED_STRUCT *attached[ELAN_MAX_RAILS];
3306 +       struct list_head vp_list;
3307 +} ELAN_CAP_NODE_STRUCT;
3308 +
3309 +
3310 +ELAN_CAP_NODE_STRUCT *
3311 +find_cap_node(ELAN_CAPABILITY *cap)
3312 +{
3313 +       struct list_head        *tmp;
3314 +       ELAN_CAP_NODE_STRUCT *ptr=NULL;
3315 +
3316 +       list_for_each(tmp, &elan_cap_list) {
3317 +               ptr = list_entry(tmp, ELAN_CAP_NODE_STRUCT , list);
3318 +               /* is it an exact match (key not checked) */
3319 +               if ( ELAN_CAP_TYPE_MATCH(&ptr->node.cap,cap) 
3320 +                    && ELAN_CAP_GEOM_MATCH(&ptr->node.cap,cap)) {
3321 +                       return ptr;
3322 +               }
3323 +       }
3324 +       return ptr;
3325 +}
3326 +
3327 +ELAN_VP_NODE_STRUCT *
3328 +find_vp_node( ELAN_CAP_NODE_STRUCT *cap_node,ELAN_CAPABILITY *map)
3329 +{
3330 +       struct list_head       * tmp;
3331 +       ELAN_VP_NODE_STRUCT * ptr = NULL;
3332 +
3333 +       list_for_each(tmp, &cap_node->vp_list) {
3334 +               ptr = list_entry(tmp, ELAN_VP_NODE_STRUCT , list);
3335 +               /* is it an exact match (key not checked) */
3336 +               if ( ELAN_CAP_TYPE_MATCH(&ptr->vp,map) 
3337 +                    && ELAN_CAP_GEOM_MATCH(&ptr->vp,map)){
3338 +                       return ptr;
3339 +               }
3340 +       }
3341 +       return ptr;
3342 +}
3343 +
3344 +int 
3345 +elan_validate_cap(ELAN_CAPABILITY *cap)
3346 +{
3347 +       char                      space[127];
3348 +
3349 +       ELAN_DEBUG1 (ELAN_DBG_VP,"elan_validate_cap %s\n",elan_capability_string(cap,space));
3350 +
3351 +       /* check versions */
3352 +       if (cap->cap_version != ELAN_CAP_VERSION_NUMBER)
3353 +       {
3354 +               ELAN_DEBUG2 (ELAN_DBG_VP,"elan_validate_cap: (cap->Version != ELAN_CAP_VERSION) %d %d\n", cap->cap_version, ELAN_CAP_VERSION_NUMBER);
3355 +               return (EINVAL);
3356 +       }
3357 +
3358 +       /* check its not HWTEST */
3359 +       if ( cap->cap_type & ELAN_CAP_TYPE_HWTEST )
3360 +       {
3361 +               ELAN_DEBUG0 (ELAN_DBG_VP,"elan_validate_cap: failed type = ELAN_CAP_TYPE_HWTEST \n");   
3362 +               return (EINVAL);
3363 +       }
3364 +       
3365 +       /* check its type */
3366 +       switch (cap->cap_type & ELAN_CAP_TYPE_MASK)
3367 +       {
3368 +       case ELAN_CAP_TYPE_KERNEL :     
3369 +               ELAN_DEBUG0 (ELAN_DBG_VP,"elan_validate_cap: failed type = ELAN_CAP_TYPE_KERNEL \n");   
3370 +               return (EINVAL);
3371 +
3372 +               /* check it has a valid type */
3373 +       case ELAN_CAP_TYPE_BLOCK:
3374 +       case ELAN_CAP_TYPE_CYCLIC:
3375 +               break;
3376 +
3377 +               /* all others are failed as well */
3378 +       default:
3379 +               ELAN_DEBUG1 (ELAN_DBG_VP,"elan_validate_cap: failed unknown type = %x \n", (cap->cap_type & ELAN_CAP_TYPE_MASK));       
3380 +               return (EINVAL);
3381 +       }
3382 +       
3383 +       if ((cap->cap_lowcontext == ELAN_CAP_UNINITIALISED) || (cap->cap_highcontext == ELAN_CAP_UNINITIALISED)
3384 +           || (cap->cap_lownode == ELAN_CAP_UNINITIALISED) || (cap->cap_highnode    == ELAN_CAP_UNINITIALISED))
3385 +       {
3386 +               
3387 +               ELAN_DEBUG4 (ELAN_DBG_VP,"elan_validate_cap: ELAN_CAP_UNINITIALISED   LowNode %d   HighNode %d   LowContext %d   highContext %d\n",
3388 +                            cap->cap_lownode , cap->cap_highnode,
3389 +                            cap->cap_lowcontext , cap->cap_highcontext);
3390 +               return (EINVAL);
3391 +       }       
3392 +
3393 +       if (cap->cap_lowcontext > cap->cap_highcontext)
3394 +       {
3395 +               ELAN_DEBUG2 (ELAN_DBG_VP,"elan_validate_cap: (cap->cap_lowcontext > cap->cap_highcontext) %d %d\n",cap->cap_lowcontext , cap->cap_highcontext);
3396 +               return (EINVAL);
3397 +       }
3398 +       
3399 +       if (cap->cap_lownode > cap->cap_highnode)
3400 +       {
3401 +               ELAN_DEBUG2 (ELAN_DBG_VP,"elan_validate_cap: (cap->cap_lownode > cap->cap_highnode) %d %d\n",cap->cap_lownode, cap->cap_highnode);
3402 +               return (EINVAL);
3403 +       }
3404 +
3405 +       if (cap->cap_mycontext != ELAN_CAP_UNINITIALISED) 
3406 +       {
3407 +               ELAN_DEBUG1 (ELAN_DBG_VP,"elan_validate_cap: failed cap->cap_mycontext is set %d  \n", cap->cap_mycontext);
3408 +               return (EINVAL);
3409 +       }
3410 +
3411 +
3412 +       if ((ELAN_CAP_NUM_NODES(cap) * ELAN_CAP_NUM_CONTEXTS(cap)) > ELAN_MAX_VPS)
3413 +       {
3414 +               ELAN_DEBUG6 (ELAN_DBG_VP,"elan_validate_cap: too many vps  LowNode %d   HighNode %d   LowContext %d   highContext %d,  %d >% d\n",
3415 +                            cap->cap_lownode , cap->cap_highnode,
3416 +                            cap->cap_lowcontext , cap->cap_highcontext,
3417 +                            (ELAN_CAP_NUM_NODES(cap) * ELAN_CAP_NUM_CONTEXTS(cap)),
3418 +                            ELAN_MAX_VPS);
3419 +               
3420 +               return (EINVAL);
3421 +       }
3422 +
3423 +       return (ESUCCESS);
3424 +}
3425 +
3426 +int
3427 +elan_validate_map(ELAN_CAPABILITY *cap, ELAN_CAPABILITY *map)
3428 +{
3429 +       ELAN_CAP_NODE_STRUCT * ptr  = NULL;
3430 +       ELAN_VP_NODE_STRUCT  * vptr = NULL;
3431 +       char space[256];
3432 +
3433 +       ELANMOD_RWLOCK_READ(&elan_rwlock);
3434 +
3435 +       ELAN_DEBUG0 (ELAN_DBG_VP,"elan_validate_map \n");
3436 +       ELAN_DEBUG1 (ELAN_DBG_VP,"elan_validate_map cap = %s \n",elan_capability_string(cap,space));
3437 +       ELAN_DEBUG1 (ELAN_DBG_VP,"elan_validate_map map = %s \n",elan_capability_string(map,space));
3438 +
3439 +       /* does cap exist    */
3440 +       ptr = find_cap_node(cap);
3441 +       if ( ptr == NULL ) 
3442 +       {
3443 +               ELAN_DEBUG0 (ELAN_DBG_VP,"elan_validate_map: cap not found \n");
3444 +               ELANMOD_RWLOCK_READ_UNLOCK(&elan_rwlock);
3445 +               return EINVAL;
3446 +       }
3447 +       /* is it active */
3448 +       if ( ! ptr->node.active ) 
3449 +       {
3450 +               ELAN_DEBUG0 (ELAN_DBG_VP,"elan_validate_map: cap not active \n");
3451 +               ELANMOD_RWLOCK_READ_UNLOCK(&elan_rwlock);
3452 +               return EINVAL;
3453 +       }
3454 +
3455 +       /* are they the same */
3456 +       if ( ELAN_CAP_TYPE_MATCH(cap,map) 
3457 +            && ELAN_CAP_GEOM_MATCH(cap,map)) 
3458 +       {
3459 +               ELAN_DEBUG0 (ELAN_DBG_VP,"elan_validate_map: cap == map  passed\n");
3460 +               ELANMOD_RWLOCK_READ_UNLOCK(&elan_rwlock);
3461 +               return ESUCCESS;
3462 +       }
3463 +
3464 +       /* is map in map list */
3465 +       vptr = find_vp_node(ptr, map);
3466 +       if ( vptr == NULL ) 
3467 +       {
3468 +               ELAN_DEBUG0 (ELAN_DBG_VP,"elan_validate_map:  map not found\n");
3469 +               ELANMOD_RWLOCK_READ_UNLOCK(&elan_rwlock);
3470 +               return EINVAL;
3471 +       }
3472 +       
3473 +       ELAN_DEBUG0 (ELAN_DBG_VP,"elan_validate_map:  map passed\n");
3474 +       ELANMOD_RWLOCK_READ_UNLOCK(&elan_rwlock);
3475 +       return ESUCCESS;
3476 +}
3477 +
3478 +int
3479 +elan_create_cap(ELAN_CAP_OWNER owner, ELAN_CAPABILITY *cap)
3480 +{
3481 +       char                      space[127];
3482 +       struct list_head        * tmp;
3483 +       ELAN_CAP_NODE_STRUCT * ptr = NULL;
3484 +       int                       i, rail;
3485 +
3486 +       ELANMOD_RWLOCK_WRITE(&elan_rwlock);
3487 +
3488 +       ELAN_DEBUG1 (ELAN_DBG_VP,"elan_create_cap %s\n",elan_capability_string(cap,space));     
3489 +
3490 +       /* need to check that the cap does not over lap another one 
3491 +          or is an exact match with only the userkey changing */
3492 +       list_for_each(tmp, &elan_cap_list) {
3493 +               ptr = list_entry(tmp, ELAN_CAP_NODE_STRUCT , list);
3494 +
3495 +               /* is it an exact match (key not checked) */
3496 +               if ( ELAN_CAP_TYPE_MATCH(&ptr->node.cap,cap) 
3497 +                    && ELAN_CAP_GEOM_MATCH(&ptr->node.cap,cap)
3498 +                    && (ptr->node.owner == owner)) {
3499 +                       if ( ptr->node.active ) {
3500 +                               /* dont inc attached count as its like a create */
3501 +                               ptr->node.cap.cap_userkey = cap->cap_userkey;
3502 +                               ELANMOD_RWLOCK_WRITE_UNLOCK(&elan_rwlock);
3503 +                               return ESUCCESS;
3504 +                       }
3505 +                       else
3506 +                       {
3507 +                               ELAN_DEBUG (ELAN_DBG_VP,"elan_create_cap failed %s\n",
3508 +                                           elan_capability_string(&ptr->node.cap,space));
3509 +                               ELAN_DEBUG (ELAN_DBG_VP,"elan_create_cap failed ptr %p owner %p attached %d\n",
3510 +                                           ptr, owner, ptr->node.attached);
3511 +                                            
3512 +                               ELANMOD_RWLOCK_WRITE_UNLOCK(&elan_rwlock);
3513 +                               return EINVAL;
3514 +                       }
3515 +               }
3516 +               
3517 +               /* does it overlap, even with ones being destroyed */