Whamcloud - gitweb
merge b_devel into HEAD, which will become 0.7.3
[fs/lustre-release.git] / lustre / kernel_patches / patches / tg3_netconsole.patch
1  0 files changed
2
3 --- linux-2.4.20-rh/drivers/net/tg3.c~tg3_netconsole    2003-04-11 14:04:56.000000000 +0800
4 +++ linux-2.4.20-rh-root/drivers/net/tg3.c      2003-07-01 11:27:46.000000000 +0800
5 @@ -170,6 +170,10 @@ static void tg3_write_indirect_reg32(str
6         }
7  }
8  
9 +#ifdef HAVE_POLL_CONTROLLER
10 +static void     Poll_tg3(struct net_device *);
11 +#endif
12 +
13  #define tw32(reg,val)          tg3_write_indirect_reg32(tp,(reg),(val))
14  #define tw32_mailbox(reg, val) writel(((val) & 0xffffffff), tp->regs + (reg))
15  #define tw16(reg,val)          writew(((val) & 0xffff), tp->regs + (reg))
16 @@ -1899,7 +1903,138 @@ static int tg3_vlan_rx(struct tg3 *tp, s
17         return vlan_hwaccel_receive_skb(skb, tp->vlgrp, vlan_tag);
18  }
19  #endif
20 +/* for netconsole */
21 +static int upcall_rx_hook(struct net_device *dev)
22 +{
23 +       struct tg3 *tp = dev->priv;     
24 +       u32 work_mask;
25 +       u32 rx_rcb_ptr = tp->rx_rcb_ptr;
26 +       u16 hw_idx, sw_idx;
27 +       int received;
28 +
29 +       hw_idx = tp->hw_status->idx[0].rx_producer;
30 +       sw_idx = rx_rcb_ptr % TG3_RX_RCB_RING_SIZE;
31 +       work_mask = 0;
32 +       received = 0;
33 +       while (sw_idx != hw_idx) {
34 +               struct tg3_rx_buffer_desc *desc = &tp->rx_rcb[sw_idx];
35 +               unsigned int len;
36 +               struct sk_buff *skb;
37 +               dma_addr_t dma_addr;
38 +               u32 opaque_key, desc_idx, *post_ptr;
39 +
40 +               desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
41 +               opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
42 +               if (opaque_key == RXD_OPAQUE_RING_STD) {
43 +                       dma_addr = pci_unmap_addr(&tp->rx_std_buffers[desc_idx],
44 +                                                 mapping);
45 +                       skb = tp->rx_std_buffers[desc_idx].skb;
46 +                       post_ptr = &tp->rx_std_ptr;
47 +               } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
48 +                       dma_addr = pci_unmap_addr(&tp->rx_jumbo_buffers[desc_idx],
49 +                                                 mapping);
50 +                       skb = tp->rx_jumbo_buffers[desc_idx].skb;
51 +                       post_ptr = &tp->rx_jumbo_ptr;
52 +               }
53 +               else {
54 +                       goto next_pkt_nopost;
55 +               }
56 +
57 +               work_mask |= opaque_key;
58 +
59 +               if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
60 +                   (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) {
61 +               drop_it:
62 +                       tg3_recycle_rx(tp, opaque_key,
63 +                                      desc_idx, *post_ptr);
64 +               drop_it_no_recycle:
65 +                       /* Other statistics kept track of by card. */
66 +                       tp->net_stats.rx_dropped++;
67 +                       goto next_pkt;
68 +               }
69 +
70 +               len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) - 4; /* omit crc */
71 +
72 +               if (len > RX_COPY_THRESHOLD) {
73 +                       int skb_size;
74 +
75 +                       skb_size = tg3_alloc_rx_skb(tp, opaque_key,
76 +                                                   desc_idx, *post_ptr);
77 +                       if (skb_size < 0)
78 +                               goto drop_it;
79 +
80 +                       pci_unmap_single(tp->pdev, dma_addr,
81 +                                        skb_size - tp->rx_offset,
82 +                                        PCI_DMA_FROMDEVICE);
83 +
84 +                       skb_put(skb, len);
85 +               } else {
86 +                       struct sk_buff *copy_skb;
87 +
88 +                       tg3_recycle_rx(tp, opaque_key,
89 +                                      desc_idx, *post_ptr);
90 +
91 +                       copy_skb = dev_alloc_skb(len + 2);
92 +                       if (copy_skb == NULL)
93 +                               goto drop_it_no_recycle;
94 +
95 +                       copy_skb->dev = tp->dev;
96 +                       skb_reserve(copy_skb, 2);
97 +                       skb_put(copy_skb, len);
98 +                       pci_dma_sync_single(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
99 +                       memcpy(copy_skb->data, skb->data, len);
100 +
101 +                       /* We'll reuse the original ring buffer. */
102 +                       skb = copy_skb;
103 +               }
104 +               
105 +               if ((tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) &&
106 +                   (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
107 +                   (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
108 +                     >> RXD_TCPCSUM_SHIFT) == 0xffff))
109 +                       skb->ip_summed = CHECKSUM_UNNECESSARY;
110 +               else
111 +                       skb->ip_summed = CHECKSUM_NONE;
112 +
113 +               skb->protocol = eth_type_trans(skb, tp->dev);
114 +/*into netconsole driver*/     
115 +               dev->rx_hook(skb);
116 +                       kfree_skb(skb);
117 +               tp->dev->last_rx = jiffies;
118 +               received++;
119 +next_pkt:
120 +               (*post_ptr)++;
121 +next_pkt_nopost:
122 +               rx_rcb_ptr++;
123 +               sw_idx = rx_rcb_ptr % TG3_RX_RCB_RING_SIZE;
124 +       }
125 +
126 +       /* ACK the status ring. */
127 +       tp->rx_rcb_ptr = rx_rcb_ptr;
128 +       tw32_mailbox(MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW,
129 +                    (rx_rcb_ptr % TG3_RX_RCB_RING_SIZE));
130 +       if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)
131 +               tr32(MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW);
132  
133 +       /* Refill RX ring(s). */
134 +       if (work_mask & RXD_OPAQUE_RING_STD) {
135 +               sw_idx = tp->rx_std_ptr % TG3_RX_RING_SIZE;
136 +               tw32_mailbox(MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW,
137 +                            sw_idx);
138 +               if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)
139 +                       tr32(MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW);
140 +       }
141 +       if (work_mask & RXD_OPAQUE_RING_JUMBO) {
142 +               sw_idx = tp->rx_jumbo_ptr % TG3_RX_JUMBO_RING_SIZE;
143 +               tw32_mailbox(MAILBOX_RCV_JUMBO_PROD_IDX + TG3_64BIT_REG_LOW,
144 +                            sw_idx);
145 +               if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)
146 +                       tr32(MAILBOX_RCV_JUMBO_PROD_IDX + TG3_64BIT_REG_LOW);
147 +       }
148 +
149 +       return received;
150 +
151 +}
152  /* The RX ring scheme is composed of multiple rings which post fresh
153   * buffers to the chip, and one special ring the chip uses to report
154   * status back to the host.
155 @@ -2006,7 +2141,7 @@ static int tg3_rx(struct tg3 *tp, int bu
156                         /* We'll reuse the original ring buffer. */
157                         skb = copy_skb;
158                 }
159 -
160 +               
161                 if ((tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) &&
162                     (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
163                     (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
164 @@ -2016,6 +2151,8 @@ static int tg3_rx(struct tg3 *tp, int bu
165                         skb->ip_summed = CHECKSUM_NONE;
166  
167                 skb->protocol = eth_type_trans(skb, tp->dev);
168 +
169 +                                       
170  #if TG3_VLAN_TAG_USED
171                 if (tp->vlgrp != NULL &&
172                     desc->type_flags & RXD_FLAG_VLAN) {
173 @@ -2058,7 +2195,6 @@ next_pkt_nopost:
174                 if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)
175                         tr32(MAILBOX_RCV_JUMBO_PROD_IDX + TG3_64BIT_REG_LOW);
176         }
177 -
178         return received;
179  }
180  
181 @@ -2151,7 +2287,6 @@ static void tg3_interrupt(int irq, void 
182         unsigned long flags;
183  
184         spin_lock_irqsave(&tp->lock, flags);
185 -
186         if (sblk->status & SD_STATUS_UPDATED) {
187                 /*
188                  * writing any value to intr-mbox-0 clears PCI INTA# and
189 @@ -2169,8 +2304,17 @@ static void tg3_interrupt(int irq, void 
190                 tr32(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW);
191                 sblk->status &= ~SD_STATUS_UPDATED;
192  
193 -               if (likely(tg3_has_work(dev, tp)))
194 -                       netif_rx_schedule(dev);         /* schedule NAPI poll */
195 +               if (likely(tg3_has_work(dev, tp))){
196 +                       if (unlikely(dev->rx_hook != NULL) && netdump_mode) {
197 +                               int ret;
198 +                               struct sk_buff *skb;
199 +                               ret = upcall_rx_hook(dev);
200 +                               if (!ret){
201 +                                       goto out;
202 +                               }
203 +                       }
204 +                        netif_rx_schedule(dev);                /* schedule NAPI poll */
205 +               }
206                 else {
207                         /* no work, shared interrupt perhaps?  re-enable
208                          * interrupts, and flush that PCI write
209 @@ -2180,7 +2324,7 @@ static void tg3_interrupt(int irq, void 
210                         tr32(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW);
211                 }
212         }
213 -
214 +out:
215         spin_unlock_irqrestore(&tp->lock, flags);
216  }
217  
218 @@ -6804,7 +6948,10 @@ static int __devinit tg3_init_one(struct
219         dev->watchdog_timeo = TG3_TX_TIMEOUT;
220         dev->change_mtu = tg3_change_mtu;
221         dev->irq = pdev->irq;
222 -
223 +#ifdef HAVE_POLL_CONTROLLER
224 +       dev->poll_controller = &Poll_tg3;
225 +#endif
226 +               
227         err = tg3_get_invariants(tp);
228         if (err) {
229                 printk(KERN_ERR PFX "Problem fetching invariants of chip, "
230 @@ -6882,6 +7029,15 @@ err_out_disable_pdev:
231         return err;
232  }
233  
234 +#ifdef HAVE_POLL_CONTROLLER
235 +static void Poll_tg3(struct net_device *dev)
236 +{
237 +       if (!netdump_mode) disable_irq(dev->irq);
238 +       tg3_interrupt(dev->irq, dev, NULL);
239 +       if (!netdump_mode) enable_irq(dev->irq);
240 +}
241 +#endif
242 +
243  static void __devexit tg3_remove_one(struct pci_dev *pdev)
244  {
245         struct net_device *dev = pci_get_drvdata(pdev);
246
247 _