1/*
2	drivers/net/tulip/interrupt.c
3
4	Maintained by Valerie Henson <val_henson@linux.intel.com>
5	Copyright 2000,2001  The Linux Kernel Team
6	Written/copyright 1994-2001 by Donald Becker.
7
8	This software may be used and distributed according to the terms
9	of the GNU General Public License, incorporated herein by reference.
10
11	Please refer to Documentation/DocBook/tulip-user.{pdf,ps,html}
12	for more information on this driver, or visit the project
13	Web page at http://sourceforge.net/projects/tulip/
14
15*/
16
17#include <linux/pci.h>
18#include "tulip.h"
19#include <linux/etherdevice.h>
20
21int tulip_rx_copybreak;
22unsigned int tulip_max_interrupt_work;
23
24#ifdef CONFIG_TULIP_NAPI_HW_MITIGATION
25#define MIT_SIZE 15
26#define MIT_TABLE 15 /* We use 0 or max */
27
28static unsigned int mit_table[MIT_SIZE+1] =
29{
30        /*  CRS11 21143 hardware Mitigation Control Interrupt
31            We use only RX mitigation we other techniques for
32            TX intr. mitigation.
33
34           31    Cycle Size (timer control)
35           30:27 TX timer in 16 * Cycle size
36           26:24 TX No pkts before Int.
37           23:20 RX timer in Cycle size
38           19:17 RX No pkts before Int.
39           16       Continues Mode (CM)
40        */
41
42        0x0,             /* IM disabled */
43        0x80150000,      /* RX time = 1, RX pkts = 2, CM = 1 */
44        0x80150000,
45        0x80270000,
46        0x80370000,
47        0x80490000,
48        0x80590000,
49        0x80690000,
50        0x807B0000,
51        0x808B0000,
52        0x809D0000,
53        0x80AD0000,
54        0x80BD0000,
55        0x80CF0000,
56        0x80DF0000,
57//       0x80FF0000      /* RX time = 16, RX pkts = 7, CM = 1 */
58        0x80F10000      /* RX time = 16, RX pkts = 0, CM = 1 */
59};
60#endif
61
62
63int tulip_refill_rx(struct net_device *dev)
64{
65	struct tulip_private *tp = netdev_priv(dev);
66	int entry;
67	int refilled = 0;
68
69	/* Refill the Rx ring buffers. */
70	for (; tp->cur_rx - tp->dirty_rx > 0; tp->dirty_rx++) {
71		entry = tp->dirty_rx % RX_RING_SIZE;
72		if (tp->rx_buffers[entry].skb == NULL) {
73			struct sk_buff *skb;
74			dma_addr_t mapping;
75
76			skb = tp->rx_buffers[entry].skb = dev_alloc_skb(PKT_BUF_SZ);
77			if (skb == NULL)
78				break;
79
80			mapping = pci_map_single(tp->pdev, skb->data, PKT_BUF_SZ,
81						 PCI_DMA_FROMDEVICE);
82			tp->rx_buffers[entry].mapping = mapping;
83
84			skb->dev = dev;			/* Mark as being used by this device. */
85			tp->rx_ring[entry].buffer1 = cpu_to_le32(mapping);
86			refilled++;
87		}
88		tp->rx_ring[entry].status = cpu_to_le32(DescOwned);
89	}
90	if(tp->chip_id == LC82C168) {
91		if(((ioread32(tp->base_addr + CSR5)>>17)&0x07) == 4) {
92			/* Rx stopped due to out of buffers,
93			 * restart it
94			 */
95			iowrite32(0x01, tp->base_addr + CSR2);
96		}
97	}
98	return refilled;
99}
100
101#ifdef CONFIG_TULIP_NAPI
102
103void oom_timer(unsigned long data)
104{
105        struct net_device *dev = (struct net_device *)data;
106	netif_rx_schedule(dev);
107}
108
109int tulip_poll(struct net_device *dev, int *budget)
110{
111	struct tulip_private *tp = netdev_priv(dev);
112	int entry = tp->cur_rx % RX_RING_SIZE;
113	int rx_work_limit = *budget;
114	int received = 0;
115
116	if (!netif_running(dev))
117		goto done;
118
119	if (rx_work_limit > dev->quota)
120		rx_work_limit = dev->quota;
121
122#ifdef CONFIG_TULIP_NAPI_HW_MITIGATION
123
124/* that one buffer is needed for mit activation; or might be a
125   bug in the ring buffer code; check later -- JHS*/
126
127        if (rx_work_limit >=RX_RING_SIZE) rx_work_limit--;
128#endif
129
130	if (tulip_debug > 4)
131		printk(KERN_DEBUG " In tulip_rx(), entry %d %8.8x.\n", entry,
132			   tp->rx_ring[entry].status);
133
134       do {
135		if (ioread32(tp->base_addr + CSR5) == 0xffffffff) {
136			printk(KERN_DEBUG " In tulip_poll(), hardware disappeared.\n");
137			break;
138		}
139               /* Acknowledge current RX interrupt sources. */
140               iowrite32((RxIntr | RxNoBuf), tp->base_addr + CSR5);
141
142
143               /* If we own the next entry, it is a new packet. Send it up. */
144               while ( ! (tp->rx_ring[entry].status & cpu_to_le32(DescOwned))) {
145                       s32 status = le32_to_cpu(tp->rx_ring[entry].status);
146
147
148                       if (tp->dirty_rx + RX_RING_SIZE == tp->cur_rx)
149                               break;
150
151                       if (tulip_debug > 5)
152                               printk(KERN_DEBUG "%s: In tulip_rx(), entry %d %8.8x.\n",
153                                      dev->name, entry, status);
154                       if (--rx_work_limit < 0)
155                               goto not_done;
156
157                       if ((status & 0x38008300) != 0x0300) {
158                               if ((status & 0x38000300) != 0x0300) {
159                                /* Ingore earlier buffers. */
160                                       if ((status & 0xffff) != 0x7fff) {
161                                               if (tulip_debug > 1)
162                                                       printk(KERN_WARNING "%s: Oversized Ethernet frame "
163                                                              "spanned multiple buffers, status %8.8x!\n",
164                                                              dev->name, status);
165                                               tp->stats.rx_length_errors++;
166                                       }
167                               } else if (status & RxDescFatalErr) {
168                                /* There was a fatal error. */
169                                       if (tulip_debug > 2)
170                                               printk(KERN_DEBUG "%s: Receive error, Rx status %8.8x.\n",
171                                                      dev->name, status);
172                                       tp->stats.rx_errors++; /* end of a packet.*/
173                                       if (status & 0x0890) tp->stats.rx_length_errors++;
174                                       if (status & 0x0004) tp->stats.rx_frame_errors++;
175                                       if (status & 0x0002) tp->stats.rx_crc_errors++;
176                                       if (status & 0x0001) tp->stats.rx_fifo_errors++;
177                               }
178                       } else {
179                               /* Omit the four octet CRC from the length. */
180                               short pkt_len = ((status >> 16) & 0x7ff) - 4;
181                               struct sk_buff *skb;
182
183#ifndef final_version
184                               if (pkt_len > 1518) {
185                                       printk(KERN_WARNING "%s: Bogus packet size of %d (%#x).\n",
186                                              dev->name, pkt_len, pkt_len);
187                                       pkt_len = 1518;
188                                       tp->stats.rx_length_errors++;
189                               }
190#endif
191                               /* Check if the packet is long enough to accept without copying
192                                  to a minimally-sized skbuff. */
193                               if (pkt_len < tulip_rx_copybreak
194                                   && (skb = dev_alloc_skb(pkt_len + 2)) != NULL) {
195                                       skb_reserve(skb, 2);    /* 16 byte align the IP header */
196                                       pci_dma_sync_single_for_cpu(tp->pdev,
197								   tp->rx_buffers[entry].mapping,
198								   pkt_len, PCI_DMA_FROMDEVICE);
199#if !defined(__alpha__)
200                                       eth_copy_and_sum(skb, tp->rx_buffers[entry].skb->data,
201                                                        pkt_len, 0);
202                                       skb_put(skb, pkt_len);
203#else
204                                       memcpy(skb_put(skb, pkt_len),
205                                              tp->rx_buffers[entry].skb->data,
206                                              pkt_len);
207#endif
208                                       pci_dma_sync_single_for_device(tp->pdev,
209								      tp->rx_buffers[entry].mapping,
210								      pkt_len, PCI_DMA_FROMDEVICE);
211                               } else {        /* Pass up the skb already on the Rx ring. */
212                                       char *temp = skb_put(skb = tp->rx_buffers[entry].skb,
213                                                            pkt_len);
214
215#ifndef final_version
216                                       if (tp->rx_buffers[entry].mapping !=
217                                           le32_to_cpu(tp->rx_ring[entry].buffer1)) {
218                                               printk(KERN_ERR "%s: Internal fault: The skbuff addresses "
219                                                      "do not match in tulip_rx: %08x vs. %08llx %p / %p.\n",
220                                                      dev->name,
221                                                      le32_to_cpu(tp->rx_ring[entry].buffer1),
222                                                      (unsigned long long)tp->rx_buffers[entry].mapping,
223                                                      skb->head, temp);
224                                       }
225#endif
226
227                                       pci_unmap_single(tp->pdev, tp->rx_buffers[entry].mapping,
228                                                        PKT_BUF_SZ, PCI_DMA_FROMDEVICE);
229
230                                       tp->rx_buffers[entry].skb = NULL;
231                                       tp->rx_buffers[entry].mapping = 0;
232                               }
233                               skb->protocol = eth_type_trans(skb, dev);
234
235                               netif_receive_skb(skb);
236
237                               dev->last_rx = jiffies;
238                               tp->stats.rx_packets++;
239                               tp->stats.rx_bytes += pkt_len;
240                       }
241                       received++;
242
243                       entry = (++tp->cur_rx) % RX_RING_SIZE;
244                       if (tp->cur_rx - tp->dirty_rx > RX_RING_SIZE/4)
245                               tulip_refill_rx(dev);
246
247                }
248
249               /* New ack strategy... irq does not ack Rx any longer
250                  hopefully this helps */
251
252               /* Really bad things can happen here... If new packet arrives
253                * and an irq arrives (tx or just due to occasionally unset
254                * mask), it will be acked by irq handler, but new thread
255                * is not scheduled. It is major hole in design.
256                * No idea how to fix this if "playing with fire" will fail
257                * tomorrow (night 011029). If it will not fail, we won
258                * finally: amount of IO did not increase at all. */
259       } while ((ioread32(tp->base_addr + CSR5) & RxIntr));
260
261done:
262
263 #ifdef CONFIG_TULIP_NAPI_HW_MITIGATION
264
265          /* We use this simplistic scheme for IM. It's proven by
266             real life installations. We can have IM enabled
267            continuesly but this would cause unnecessary latency.
268            Unfortunely we can't use all the NET_RX_* feedback here.
269            This would turn on IM for devices that is not contributing
270            to backlog congestion with unnecessary latency.
271
272             We monitor the device RX-ring and have:
273
274             HW Interrupt Mitigation either ON or OFF.
275
276            ON:  More then 1 pkt received (per intr.) OR we are dropping
277             OFF: Only 1 pkt received
278
279             Note. We only use min and max (0, 15) settings from mit_table */
280
281
282          if( tp->flags &  HAS_INTR_MITIGATION) {
283                 if( received > 1 ) {
284                         if( ! tp->mit_on ) {
285                                 tp->mit_on = 1;
286                                 iowrite32(mit_table[MIT_TABLE], tp->base_addr + CSR11);
287                         }
288                  }
289                 else {
290                         if( tp->mit_on ) {
291                                 tp->mit_on = 0;
292                                 iowrite32(0, tp->base_addr + CSR11);
293                         }
294                  }
295          }
296
297#endif /* CONFIG_TULIP_NAPI_HW_MITIGATION */
298
299         dev->quota -= received;
300         *budget -= received;
301
302         tulip_refill_rx(dev);
303
304         /* If RX ring is not full we are out of memory. */
305         if (tp->rx_buffers[tp->dirty_rx % RX_RING_SIZE].skb == NULL) goto oom;
306
307         /* Remove us from polling list and enable RX intr. */
308
309         netif_rx_complete(dev);
310         iowrite32(tulip_tbl[tp->chip_id].valid_intrs, tp->base_addr+CSR7);
311
312         /* The last op happens after poll completion. Which means the following:
313          * 1. it can race with disabling irqs in irq handler
314          * 2. it can race with dise/enabling irqs in other poll threads
315          * 3. if an irq raised after beginning loop, it will be immediately
316          *    triggered here.
317          *
318          * Summarizing: the logic results in some redundant irqs both
319          * due to races in masking and due to too late acking of already
320          * processed irqs. But it must not result in losing events.
321          */
322
323         return 0;
324
325 not_done:
326         if (!received) {
327
328                 received = dev->quota; /* Not to happen */
329         }
330         dev->quota -= received;
331         *budget -= received;
332
333         if (tp->cur_rx - tp->dirty_rx > RX_RING_SIZE/2 ||
334             tp->rx_buffers[tp->dirty_rx % RX_RING_SIZE].skb == NULL)
335                 tulip_refill_rx(dev);
336
337         if (tp->rx_buffers[tp->dirty_rx % RX_RING_SIZE].skb == NULL) goto oom;
338
339         return 1;
340
341
342 oom:    /* Executed with RX ints disabled */
343
344
345         /* Start timer, stop polling, but do not enable rx interrupts. */
346         mod_timer(&tp->oom_timer, jiffies+1);
347
348         /* Think: timer_pending() was an explicit signature of bug.
349          * Timer can be pending now but fired and completed
350          * before we did netif_rx_complete(). See? We would lose it. */
351
352         /* remove ourselves from the polling list */
353         netif_rx_complete(dev);
354
355         return 0;
356}
357
358#else /* CONFIG_TULIP_NAPI */
359
360static int tulip_rx(struct net_device *dev)
361{
362	struct tulip_private *tp = netdev_priv(dev);
363	int entry = tp->cur_rx % RX_RING_SIZE;
364	int rx_work_limit = tp->dirty_rx + RX_RING_SIZE - tp->cur_rx;
365	int received = 0;
366
367	if (tulip_debug > 4)
368		printk(KERN_DEBUG " In tulip_rx(), entry %d %8.8x.\n", entry,
369			   tp->rx_ring[entry].status);
370	/* If we own the next entry, it is a new packet. Send it up. */
371	while ( ! (tp->rx_ring[entry].status & cpu_to_le32(DescOwned))) {
372		s32 status = le32_to_cpu(tp->rx_ring[entry].status);
373
374		if (tulip_debug > 5)
375			printk(KERN_DEBUG "%s: In tulip_rx(), entry %d %8.8x.\n",
376				   dev->name, entry, status);
377		if (--rx_work_limit < 0)
378			break;
379		if ((status & 0x38008300) != 0x0300) {
380			if ((status & 0x38000300) != 0x0300) {
381				/* Ingore earlier buffers. */
382				if ((status & 0xffff) != 0x7fff) {
383					if (tulip_debug > 1)
384						printk(KERN_WARNING "%s: Oversized Ethernet frame "
385							   "spanned multiple buffers, status %8.8x!\n",
386							   dev->name, status);
387					tp->stats.rx_length_errors++;
388				}
389			} else if (status & RxDescFatalErr) {
390				/* There was a fatal error. */
391				if (tulip_debug > 2)
392					printk(KERN_DEBUG "%s: Receive error, Rx status %8.8x.\n",
393						   dev->name, status);
394				tp->stats.rx_errors++; /* end of a packet.*/
395				if (status & 0x0890) tp->stats.rx_length_errors++;
396				if (status & 0x0004) tp->stats.rx_frame_errors++;
397				if (status & 0x0002) tp->stats.rx_crc_errors++;
398				if (status & 0x0001) tp->stats.rx_fifo_errors++;
399			}
400		} else {
401			/* Omit the four octet CRC from the length. */
402			short pkt_len = ((status >> 16) & 0x7ff) - 4;
403			struct sk_buff *skb;
404
405#ifndef final_version
406			if (pkt_len > 1518) {
407				printk(KERN_WARNING "%s: Bogus packet size of %d (%#x).\n",
408					   dev->name, pkt_len, pkt_len);
409				pkt_len = 1518;
410				tp->stats.rx_length_errors++;
411			}
412#endif
413
414			/* Check if the packet is long enough to accept without copying
415			   to a minimally-sized skbuff. */
416			if (pkt_len < tulip_rx_copybreak
417				&& (skb = dev_alloc_skb(pkt_len + 2)) != NULL) {
418				skb_reserve(skb, 2);	/* 16 byte align the IP header */
419				pci_dma_sync_single_for_cpu(tp->pdev,
420							    tp->rx_buffers[entry].mapping,
421							    pkt_len, PCI_DMA_FROMDEVICE);
422#if !defined(__alpha__)
423				eth_copy_and_sum(skb, tp->rx_buffers[entry].skb->data,
424						 pkt_len, 0);
425				skb_put(skb, pkt_len);
426#else
427				memcpy(skb_put(skb, pkt_len),
428				       tp->rx_buffers[entry].skb->data,
429				       pkt_len);
430#endif
431				pci_dma_sync_single_for_device(tp->pdev,
432							       tp->rx_buffers[entry].mapping,
433							       pkt_len, PCI_DMA_FROMDEVICE);
434			} else { 	/* Pass up the skb already on the Rx ring. */
435				char *temp = skb_put(skb = tp->rx_buffers[entry].skb,
436						     pkt_len);
437
438#ifndef final_version
439				if (tp->rx_buffers[entry].mapping !=
440				    le32_to_cpu(tp->rx_ring[entry].buffer1)) {
441					printk(KERN_ERR "%s: Internal fault: The skbuff addresses "
442					       "do not match in tulip_rx: %08x vs. %Lx %p / %p.\n",
443					       dev->name,
444					       le32_to_cpu(tp->rx_ring[entry].buffer1),
445					       (long long)tp->rx_buffers[entry].mapping,
446					       skb->head, temp);
447				}
448#endif
449
450				pci_unmap_single(tp->pdev, tp->rx_buffers[entry].mapping,
451						 PKT_BUF_SZ, PCI_DMA_FROMDEVICE);
452
453				tp->rx_buffers[entry].skb = NULL;
454				tp->rx_buffers[entry].mapping = 0;
455			}
456			skb->protocol = eth_type_trans(skb, dev);
457
458			netif_rx(skb);
459
460			dev->last_rx = jiffies;
461			tp->stats.rx_packets++;
462			tp->stats.rx_bytes += pkt_len;
463		}
464		received++;
465		entry = (++tp->cur_rx) % RX_RING_SIZE;
466	}
467	return received;
468}
469#endif  /* CONFIG_TULIP_NAPI */
470
471static inline unsigned int phy_interrupt (struct net_device *dev)
472{
473#ifdef __hppa__
474	struct tulip_private *tp = netdev_priv(dev);
475	int csr12 = ioread32(tp->base_addr + CSR12) & 0xff;
476
477	if (csr12 != tp->csr12_shadow) {
478		/* ack interrupt */
479		iowrite32(csr12 | 0x02, tp->base_addr + CSR12);
480		tp->csr12_shadow = csr12;
481		/* do link change stuff */
482		spin_lock(&tp->lock);
483		tulip_check_duplex(dev);
484		spin_unlock(&tp->lock);
485		/* clear irq ack bit */
486		iowrite32(csr12 & ~0x02, tp->base_addr + CSR12);
487
488		return 1;
489	}
490#endif
491
492	return 0;
493}
494
495/* The interrupt handler does all of the Rx thread work and cleans up
496   after the Tx thread. */
497irqreturn_t tulip_interrupt(int irq, void *dev_instance)
498{
499	struct net_device *dev = (struct net_device *)dev_instance;
500	struct tulip_private *tp = netdev_priv(dev);
501	void __iomem *ioaddr = tp->base_addr;
502	int csr5;
503	int missed;
504	int rx = 0;
505	int tx = 0;
506	int oi = 0;
507	int maxrx = RX_RING_SIZE;
508	int maxtx = TX_RING_SIZE;
509	int maxoi = TX_RING_SIZE;
510#ifdef CONFIG_TULIP_NAPI
511	int rxd = 0;
512#else
513	int entry;
514#endif
515	unsigned int work_count = tulip_max_interrupt_work;
516	unsigned int handled = 0;
517
518	/* Let's see whether the interrupt really is for us */
519	csr5 = ioread32(ioaddr + CSR5);
520
521        if (tp->flags & HAS_PHY_IRQ)
522	        handled = phy_interrupt (dev);
523
524	if ((csr5 & (NormalIntr|AbnormalIntr)) == 0)
525		return IRQ_RETVAL(handled);
526
527	tp->nir++;
528
529	do {
530
531#ifdef CONFIG_TULIP_NAPI
532
533		if (!rxd && (csr5 & (RxIntr | RxNoBuf))) {
534			rxd++;
535			/* Mask RX intrs and add the device to poll list. */
536			iowrite32(tulip_tbl[tp->chip_id].valid_intrs&~RxPollInt, ioaddr + CSR7);
537			netif_rx_schedule(dev);
538
539			if (!(csr5&~(AbnormalIntr|NormalIntr|RxPollInt|TPLnkPass)))
540                               break;
541		}
542
543               /* Acknowledge the interrupt sources we handle here ASAP
544                  the poll function does Rx and RxNoBuf acking */
545
546		iowrite32(csr5 & 0x0001ff3f, ioaddr + CSR5);
547
548#else
549		/* Acknowledge all of the current interrupt sources ASAP. */
550		iowrite32(csr5 & 0x0001ffff, ioaddr + CSR5);
551
552
553		if (csr5 & (RxIntr | RxNoBuf)) {
554				rx += tulip_rx(dev);
555			tulip_refill_rx(dev);
556		}
557
558#endif /*  CONFIG_TULIP_NAPI */
559
560		if (tulip_debug > 4)
561			printk(KERN_DEBUG "%s: interrupt  csr5=%#8.8x new csr5=%#8.8x.\n",
562			       dev->name, csr5, ioread32(ioaddr + CSR5));
563
564
565		if (csr5 & (TxNoBuf | TxDied | TxIntr | TimerInt)) {
566			unsigned int dirty_tx;
567
568			spin_lock(&tp->lock);
569
570			for (dirty_tx = tp->dirty_tx; tp->cur_tx - dirty_tx > 0;
571				 dirty_tx++) {
572				int entry = dirty_tx % TX_RING_SIZE;
573				int status = le32_to_cpu(tp->tx_ring[entry].status);
574
575				if (status < 0)
576					break;			/* It still has not been Txed */
577
578				/* Check for Rx filter setup frames. */
579				if (tp->tx_buffers[entry].skb == NULL) {
580					/* test because dummy frames not mapped */
581					if (tp->tx_buffers[entry].mapping)
582						pci_unmap_single(tp->pdev,
583							 tp->tx_buffers[entry].mapping,
584							 sizeof(tp->setup_frame),
585							 PCI_DMA_TODEVICE);
586					continue;
587				}
588
589				if (status & 0x8000) {
590					/* There was an major error, log it. */
591#ifndef final_version
592					if (tulip_debug > 1)
593						printk(KERN_DEBUG "%s: Transmit error, Tx status %8.8x.\n",
594							   dev->name, status);
595#endif
596					tp->stats.tx_errors++;
597					if (status & 0x4104) tp->stats.tx_aborted_errors++;
598					if (status & 0x0C00) tp->stats.tx_carrier_errors++;
599					if (status & 0x0200) tp->stats.tx_window_errors++;
600					if (status & 0x0002) tp->stats.tx_fifo_errors++;
601					if ((status & 0x0080) && tp->full_duplex == 0)
602						tp->stats.tx_heartbeat_errors++;
603				} else {
604					tp->stats.tx_bytes +=
605						tp->tx_buffers[entry].skb->len;
606					tp->stats.collisions += (status >> 3) & 15;
607					tp->stats.tx_packets++;
608				}
609
610				pci_unmap_single(tp->pdev, tp->tx_buffers[entry].mapping,
611						 tp->tx_buffers[entry].skb->len,
612						 PCI_DMA_TODEVICE);
613
614				/* Free the original skb. */
615				dev_kfree_skb_irq(tp->tx_buffers[entry].skb);
616				tp->tx_buffers[entry].skb = NULL;
617				tp->tx_buffers[entry].mapping = 0;
618				tx++;
619			}
620
621#ifndef final_version
622			if (tp->cur_tx - dirty_tx > TX_RING_SIZE) {
623				printk(KERN_ERR "%s: Out-of-sync dirty pointer, %d vs. %d.\n",
624					   dev->name, dirty_tx, tp->cur_tx);
625				dirty_tx += TX_RING_SIZE;
626			}
627#endif
628
629			if (tp->cur_tx - dirty_tx < TX_RING_SIZE - 2)
630				netif_wake_queue(dev);
631
632			tp->dirty_tx = dirty_tx;
633			if (csr5 & TxDied) {
634				if (tulip_debug > 2)
635					printk(KERN_WARNING "%s: The transmitter stopped."
636						   "  CSR5 is %x, CSR6 %x, new CSR6 %x.\n",
637						   dev->name, csr5, ioread32(ioaddr + CSR6), tp->csr6);
638				tulip_restart_rxtx(tp);
639			}
640			spin_unlock(&tp->lock);
641		}
642
643		/* Log errors. */
644		if (csr5 & AbnormalIntr) {	/* Abnormal error summary bit. */
645			if (csr5 == 0xffffffff)
646				break;
647			if (csr5 & TxJabber) tp->stats.tx_errors++;
648			if (csr5 & TxFIFOUnderflow) {
649				if ((tp->csr6 & 0xC000) != 0xC000)
650					tp->csr6 += 0x4000;	/* Bump up the Tx threshold */
651				else
652					tp->csr6 |= 0x00200000;  /* Store-n-forward. */
653				/* Restart the transmit process. */
654				tulip_restart_rxtx(tp);
655				iowrite32(0, ioaddr + CSR1);
656			}
657			if (csr5 & (RxDied | RxNoBuf)) {
658				if (tp->flags & COMET_MAC_ADDR) {
659					iowrite32(tp->mc_filter[0], ioaddr + 0xAC);
660					iowrite32(tp->mc_filter[1], ioaddr + 0xB0);
661				}
662			}
663			if (csr5 & RxDied) {		/* Missed a Rx frame. */
664                                tp->stats.rx_missed_errors += ioread32(ioaddr + CSR8) & 0xffff;
665				tp->stats.rx_errors++;
666				tulip_start_rxtx(tp);
667			}
668			/*
669			 * NB: t21142_lnk_change() does a del_timer_sync(), so be careful if this
670			 * call is ever done under the spinlock
671			 */
672			if (csr5 & (TPLnkPass | TPLnkFail | 0x08000000)) {
673				if (tp->link_change)
674					(tp->link_change)(dev, csr5);
675			}
676			if (csr5 & SystemError) {
677				int error = (csr5 >> 23) & 7;
678				/* oops, we hit a PCI error.  The code produced corresponds
679				 * to the reason:
680				 *  0 - parity error
681				 *  1 - master abort
682				 *  2 - target abort
683				 * Note that on parity error, we should do a software reset
684				 * of the chip to get it back into a sane state (according
685				 * to the 21142/3 docs that is).
686				 *   -- rmk
687				 */
688				printk(KERN_ERR "%s: (%lu) System Error occurred (%d)\n",
689					dev->name, tp->nir, error);
690			}
691			/* Clear all error sources, included undocumented ones! */
692			iowrite32(0x0800f7ba, ioaddr + CSR5);
693			oi++;
694		}
695		if (csr5 & TimerInt) {
696
697			if (tulip_debug > 2)
698				printk(KERN_ERR "%s: Re-enabling interrupts, %8.8x.\n",
699					   dev->name, csr5);
700			iowrite32(tulip_tbl[tp->chip_id].valid_intrs, ioaddr + CSR7);
701			tp->ttimer = 0;
702			oi++;
703		}
704		if (tx > maxtx || rx > maxrx || oi > maxoi) {
705			if (tulip_debug > 1)
706				printk(KERN_WARNING "%s: Too much work during an interrupt, "
707					   "csr5=0x%8.8x. (%lu) (%d,%d,%d)\n", dev->name, csr5, tp->nir, tx, rx, oi);
708
709                       /* Acknowledge all interrupt sources. */
710                        iowrite32(0x8001ffff, ioaddr + CSR5);
711                        if (tp->flags & HAS_INTR_MITIGATION) {
712                     /* Josip Loncaric at ICASE did extensive experimentation
713			to develop a good interrupt mitigation setting.*/
714                                iowrite32(0x8b240000, ioaddr + CSR11);
715                        } else if (tp->chip_id == LC82C168) {
716				/* the LC82C168 doesn't have a hw timer.*/
717				iowrite32(0x00, ioaddr + CSR7);
718				mod_timer(&tp->timer, RUN_AT(HZ/50));
719			} else {
720                          /* Mask all interrupting sources, set timer to
721				re-enable. */
722                                iowrite32(((~csr5) & 0x0001ebef) | AbnormalIntr | TimerInt, ioaddr + CSR7);
723                                iowrite32(0x0012, ioaddr + CSR11);
724                        }
725			break;
726		}
727
728		work_count--;
729		if (work_count == 0)
730			break;
731
732		csr5 = ioread32(ioaddr + CSR5);
733
734#ifdef CONFIG_TULIP_NAPI
735		if (rxd)
736			csr5 &= ~RxPollInt;
737	} while ((csr5 & (TxNoBuf |
738			  TxDied |
739			  TxIntr |
740			  TimerInt |
741			  /* Abnormal intr. */
742			  RxDied |
743			  TxFIFOUnderflow |
744			  TxJabber |
745			  TPLnkFail |
746			  SystemError )) != 0);
747#else
748	} while ((csr5 & (NormalIntr|AbnormalIntr)) != 0);
749
750	tulip_refill_rx(dev);
751
752	/* check if the card is in suspend mode */
753	entry = tp->dirty_rx % RX_RING_SIZE;
754	if (tp->rx_buffers[entry].skb == NULL) {
755		if (tulip_debug > 1)
756			printk(KERN_WARNING "%s: in rx suspend mode: (%lu) (tp->cur_rx = %u, ttimer = %d, rx = %d) go/stay in suspend mode\n", dev->name, tp->nir, tp->cur_rx, tp->ttimer, rx);
757		if (tp->chip_id == LC82C168) {
758			iowrite32(0x00, ioaddr + CSR7);
759			mod_timer(&tp->timer, RUN_AT(HZ/50));
760		} else {
761			if (tp->ttimer == 0 || (ioread32(ioaddr + CSR11) & 0xffff) == 0) {
762				if (tulip_debug > 1)
763					printk(KERN_WARNING "%s: in rx suspend mode: (%lu) set timer\n", dev->name, tp->nir);
764				iowrite32(tulip_tbl[tp->chip_id].valid_intrs | TimerInt,
765					ioaddr + CSR7);
766				iowrite32(TimerInt, ioaddr + CSR5);
767				iowrite32(12, ioaddr + CSR11);
768				tp->ttimer = 1;
769			}
770		}
771	}
772#endif /* CONFIG_TULIP_NAPI */
773
774	if ((missed = ioread32(ioaddr + CSR8) & 0x1ffff)) {
775		tp->stats.rx_dropped += missed & 0x10000 ? 0x10000 : missed;
776	}
777
778	if (tulip_debug > 4)
779		printk(KERN_DEBUG "%s: exiting interrupt, csr5=%#4.4x.\n",
780			   dev->name, ioread32(ioaddr + CSR5));
781
782	return IRQ_HANDLED;
783}
784