• Home
  • History
  • Annotate
  • Line#
  • Navigate
  • Raw
  • Download
  • only in /asuswrt-rt-n18u-9.0.0.4.380.2695/release/src-rt-6.x.4708/linux/linux-2.6.36/drivers/staging/et131x/
1/*
2 * Agere Systems Inc.
3 * 10/100/1000 Base-T Ethernet Driver for the ET1301 and ET131x series MACs
4 *
5 * Copyright �� 2005 Agere Systems Inc.
6 * All rights reserved.
7 *   http://www.agere.com
8 *
9 *------------------------------------------------------------------------------
10 *
11 * et1310_rx.c - Routines used to perform data reception
12 *
13 *------------------------------------------------------------------------------
14 *
15 * SOFTWARE LICENSE
16 *
17 * This software is provided subject to the following terms and conditions,
18 * which you should read carefully before using the software.  Using this
19 * software indicates your acceptance of these terms and conditions.  If you do
20 * not agree with these terms and conditions, do not use the software.
21 *
22 * Copyright �� 2005 Agere Systems Inc.
23 * All rights reserved.
24 *
25 * Redistribution and use in source or binary forms, with or without
26 * modifications, are permitted provided that the following conditions are met:
27 *
28 * . Redistributions of source code must retain the above copyright notice, this
29 *    list of conditions and the following Disclaimer as comments in the code as
30 *    well as in the documentation and/or other materials provided with the
31 *    distribution.
32 *
33 * . Redistributions in binary form must reproduce the above copyright notice,
34 *    this list of conditions and the following Disclaimer in the documentation
35 *    and/or other materials provided with the distribution.
36 *
37 * . Neither the name of Agere Systems Inc. nor the names of the contributors
38 *    may be used to endorse or promote products derived from this software
39 *    without specific prior written permission.
40 *
41 * Disclaimer
42 *
43 * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
44 * INCLUDING, BUT NOT LIMITED TO, INFRINGEMENT AND THE IMPLIED WARRANTIES OF
45 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  ANY
46 * USE, MODIFICATION OR DISTRIBUTION OF THIS SOFTWARE IS SOLELY AT THE USERS OWN
47 * RISK. IN NO EVENT SHALL AGERE SYSTEMS INC. OR CONTRIBUTORS BE LIABLE FOR ANY
48 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
49 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
50 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
51 * ON ANY THEORY OF LIABILITY, INCLUDING, BUT NOT LIMITED TO, CONTRACT, STRICT
52 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
53 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
54 * DAMAGE.
55 *
56 */
57
58#include "et131x_version.h"
59#include "et131x_defs.h"
60
61#include <linux/pci.h>
62#include <linux/init.h>
63#include <linux/module.h>
64#include <linux/types.h>
65#include <linux/kernel.h>
66
67#include <linux/sched.h>
68#include <linux/ptrace.h>
69#include <linux/slab.h>
70#include <linux/ctype.h>
71#include <linux/string.h>
72#include <linux/timer.h>
73#include <linux/interrupt.h>
74#include <linux/in.h>
75#include <linux/delay.h>
76#include <linux/io.h>
77#include <linux/bitops.h>
78#include <asm/system.h>
79
80#include <linux/netdevice.h>
81#include <linux/etherdevice.h>
82#include <linux/skbuff.h>
83#include <linux/if_arp.h>
84#include <linux/ioport.h>
85
86#include "et1310_phy.h"
87#include "et131x_adapter.h"
88#include "et1310_rx.h"
89#include "et131x.h"
90
91void nic_return_rfd(struct et131x_adapter *etdev, PMP_RFD pMpRfd);
92
93/**
94 * et131x_rx_dma_memory_alloc
95 * @adapter: pointer to our private adapter structure
96 *
97 * Returns 0 on success and errno on failure (as defined in errno.h)
98 *
99 * Allocates Free buffer ring 1 for sure, free buffer ring 0 if required,
100 * and the Packet Status Ring.
101 */
102int et131x_rx_dma_memory_alloc(struct et131x_adapter *adapter)
103{
104	u32 i, j;
105	u32 bufsize;
106	u32 pktStatRingSize, FBRChunkSize;
107	struct rx_ring *rx_ring;
108
109	/* Setup some convenience pointers */
110	rx_ring = &adapter->rx_ring;
111
112	/* Alloc memory for the lookup table */
113#ifdef USE_FBR0
114	rx_ring->fbr[0] = kmalloc(sizeof(struct fbr_lookup), GFP_KERNEL);
115#endif
116	rx_ring->fbr[1] = kmalloc(sizeof(struct fbr_lookup), GFP_KERNEL);
117
118	/* The first thing we will do is configure the sizes of the buffer
119	 * rings. These will change based on jumbo packet support.  Larger
120	 * jumbo packets increases the size of each entry in FBR0, and the
121	 * number of entries in FBR0, while at the same time decreasing the
122	 * number of entries in FBR1.
123	 *
124	 * FBR1 holds "large" frames, FBR0 holds "small" frames.  If FBR1
125	 * entries are huge in order to accomodate a "jumbo" frame, then it
126	 * will have less entries.  Conversely, FBR1 will now be relied upon
127	 * to carry more "normal" frames, thus it's entry size also increases
128	 * and the number of entries goes up too (since it now carries
129	 * "small" + "regular" packets.
130	 *
131	 * In this scheme, we try to maintain 512 entries between the two
132	 * rings. Also, FBR1 remains a constant size - when it's size doubles
133	 * the number of entries halves.  FBR0 increases in size, however.
134	 */
135
136	if (adapter->RegistryJumboPacket < 2048) {
137#ifdef USE_FBR0
138		rx_ring->Fbr0BufferSize = 256;
139		rx_ring->Fbr0NumEntries = 512;
140#endif
141		rx_ring->Fbr1BufferSize = 2048;
142		rx_ring->Fbr1NumEntries = 512;
143	} else if (adapter->RegistryJumboPacket < 4096) {
144#ifdef USE_FBR0
145		rx_ring->Fbr0BufferSize = 512;
146		rx_ring->Fbr0NumEntries = 1024;
147#endif
148		rx_ring->Fbr1BufferSize = 4096;
149		rx_ring->Fbr1NumEntries = 512;
150	} else {
151#ifdef USE_FBR0
152		rx_ring->Fbr0BufferSize = 1024;
153		rx_ring->Fbr0NumEntries = 768;
154#endif
155		rx_ring->Fbr1BufferSize = 16384;
156		rx_ring->Fbr1NumEntries = 128;
157	}
158
159#ifdef USE_FBR0
160	adapter->rx_ring.PsrNumEntries = adapter->rx_ring.Fbr0NumEntries +
161	    adapter->rx_ring.Fbr1NumEntries;
162#else
163	adapter->rx_ring.PsrNumEntries = adapter->rx_ring.Fbr1NumEntries;
164#endif
165
166	/* Allocate an area of memory for Free Buffer Ring 1 */
167	bufsize = (sizeof(struct fbr_desc) * rx_ring->Fbr1NumEntries) + 0xfff;
168	rx_ring->pFbr1RingVa = pci_alloc_consistent(adapter->pdev,
169						    bufsize,
170						    &rx_ring->pFbr1RingPa);
171	if (!rx_ring->pFbr1RingVa) {
172		dev_err(&adapter->pdev->dev,
173			  "Cannot alloc memory for Free Buffer Ring 1\n");
174		return -ENOMEM;
175	}
176
177	/* Save physical address
178	 *
179	 * NOTE: pci_alloc_consistent(), used above to alloc DMA regions,
180	 * ALWAYS returns SAC (32-bit) addresses. If DAC (64-bit) addresses
181	 * are ever returned, make sure the high part is retrieved here
182	 * before storing the adjusted address.
183	 */
184	rx_ring->Fbr1Realpa = rx_ring->pFbr1RingPa;
185
186	/* Align Free Buffer Ring 1 on a 4K boundary */
187	et131x_align_allocated_memory(adapter,
188				      &rx_ring->Fbr1Realpa,
189				      &rx_ring->Fbr1offset, 0x0FFF);
190
191	rx_ring->pFbr1RingVa = (void *)((u8 *) rx_ring->pFbr1RingVa +
192					rx_ring->Fbr1offset);
193
194#ifdef USE_FBR0
195	/* Allocate an area of memory for Free Buffer Ring 0 */
196	bufsize = (sizeof(struct fbr_desc) * rx_ring->Fbr0NumEntries) + 0xfff;
197	rx_ring->pFbr0RingVa = pci_alloc_consistent(adapter->pdev,
198						    bufsize,
199						    &rx_ring->pFbr0RingPa);
200	if (!rx_ring->pFbr0RingVa) {
201		dev_err(&adapter->pdev->dev,
202			  "Cannot alloc memory for Free Buffer Ring 0\n");
203		return -ENOMEM;
204	}
205
206	/* Save physical address
207	 *
208	 * NOTE: pci_alloc_consistent(), used above to alloc DMA regions,
209	 * ALWAYS returns SAC (32-bit) addresses. If DAC (64-bit) addresses
210	 * are ever returned, make sure the high part is retrieved here before
211	 * storing the adjusted address.
212	 */
213	rx_ring->Fbr0Realpa = rx_ring->pFbr0RingPa;
214
215	/* Align Free Buffer Ring 0 on a 4K boundary */
216	et131x_align_allocated_memory(adapter,
217				      &rx_ring->Fbr0Realpa,
218				      &rx_ring->Fbr0offset, 0x0FFF);
219
220	rx_ring->pFbr0RingVa = (void *)((u8 *) rx_ring->pFbr0RingVa +
221					rx_ring->Fbr0offset);
222#endif
223
224	for (i = 0; i < (rx_ring->Fbr1NumEntries / FBR_CHUNKS);
225	     i++) {
226		u64 Fbr1Offset;
227		u64 Fbr1TempPa;
228		u32 Fbr1Align;
229
230		/* This code allocates an area of memory big enough for N
231		 * free buffers + (buffer_size - 1) so that the buffers can
232		 * be aligned on 4k boundaries.  If each buffer were aligned
233		 * to a buffer_size boundary, the effect would be to double
234		 * the size of FBR0.  By allocating N buffers at once, we
235		 * reduce this overhead.
236		 */
237		if (rx_ring->Fbr1BufferSize > 4096)
238			Fbr1Align = 4096;
239		else
240			Fbr1Align = rx_ring->Fbr1BufferSize;
241
242		FBRChunkSize =
243		    (FBR_CHUNKS * rx_ring->Fbr1BufferSize) + Fbr1Align - 1;
244		rx_ring->Fbr1MemVa[i] =
245		    pci_alloc_consistent(adapter->pdev, FBRChunkSize,
246					 &rx_ring->Fbr1MemPa[i]);
247
248		if (!rx_ring->Fbr1MemVa[i]) {
249		dev_err(&adapter->pdev->dev,
250				"Could not alloc memory\n");
251			return -ENOMEM;
252		}
253
254		/* See NOTE in "Save Physical Address" comment above */
255		Fbr1TempPa = rx_ring->Fbr1MemPa[i];
256
257		et131x_align_allocated_memory(adapter,
258					      &Fbr1TempPa,
259					      &Fbr1Offset, (Fbr1Align - 1));
260
261		for (j = 0; j < FBR_CHUNKS; j++) {
262			u32 index = (i * FBR_CHUNKS) + j;
263
264			/* Save the Virtual address of this index for quick
265			 * access later
266			 */
267			rx_ring->fbr[1]->virt[index] =
268			    (u8 *) rx_ring->Fbr1MemVa[i] +
269			    (j * rx_ring->Fbr1BufferSize) + Fbr1Offset;
270
271			/* now store the physical address in the descriptor
272			 * so the device can access it
273			 */
274			rx_ring->fbr[1]->bus_high[index] =
275			    (u32) (Fbr1TempPa >> 32);
276			rx_ring->fbr[1]->bus_low[index] = (u32) Fbr1TempPa;
277
278			Fbr1TempPa += rx_ring->Fbr1BufferSize;
279
280			rx_ring->fbr[1]->buffer1[index] =
281			    rx_ring->fbr[1]->virt[index];
282			rx_ring->fbr[1]->buffer2[index] =
283			    rx_ring->fbr[1]->virt[index] - 4;
284		}
285	}
286
287#ifdef USE_FBR0
288	/* Same for FBR0 (if in use) */
289	for (i = 0; i < (rx_ring->Fbr0NumEntries / FBR_CHUNKS);
290	     i++) {
291		u64 Fbr0Offset;
292		u64 Fbr0TempPa;
293
294		FBRChunkSize = ((FBR_CHUNKS + 1) * rx_ring->Fbr0BufferSize) - 1;
295		rx_ring->Fbr0MemVa[i] =
296		    pci_alloc_consistent(adapter->pdev, FBRChunkSize,
297					 &rx_ring->Fbr0MemPa[i]);
298
299		if (!rx_ring->Fbr0MemVa[i]) {
300			dev_err(&adapter->pdev->dev,
301				"Could not alloc memory\n");
302			return -ENOMEM;
303		}
304
305		/* See NOTE in "Save Physical Address" comment above */
306		Fbr0TempPa = rx_ring->Fbr0MemPa[i];
307
308		et131x_align_allocated_memory(adapter,
309					      &Fbr0TempPa,
310					      &Fbr0Offset,
311					      rx_ring->Fbr0BufferSize - 1);
312
313		for (j = 0; j < FBR_CHUNKS; j++) {
314			u32 index = (i * FBR_CHUNKS) + j;
315
316			rx_ring->fbr[0]->virt[index] =
317			    (u8 *) rx_ring->Fbr0MemVa[i] +
318			    (j * rx_ring->Fbr0BufferSize) + Fbr0Offset;
319
320			rx_ring->fbr[0]->bus_high[index] =
321			    (u32) (Fbr0TempPa >> 32);
322			rx_ring->fbr[0]->bus_low[index] = (u32) Fbr0TempPa;
323
324			Fbr0TempPa += rx_ring->Fbr0BufferSize;
325
326			rx_ring->fbr[0]->buffer1[index] =
327			    rx_ring->fbr[0]->virt[index];
328			rx_ring->fbr[0]->buffer2[index] =
329			    rx_ring->fbr[0]->virt[index] - 4;
330		}
331	}
332#endif
333
334	/* Allocate an area of memory for FIFO of Packet Status ring entries */
335	pktStatRingSize =
336	    sizeof(struct pkt_stat_desc) * adapter->rx_ring.PsrNumEntries;
337
338	rx_ring->pPSRingVa = pci_alloc_consistent(adapter->pdev,
339						  pktStatRingSize,
340						  &rx_ring->pPSRingPa);
341
342	if (!rx_ring->pPSRingVa) {
343		dev_err(&adapter->pdev->dev,
344			  "Cannot alloc memory for Packet Status Ring\n");
345		return -ENOMEM;
346	}
347	printk(KERN_INFO "PSR %lx\n", (unsigned long) rx_ring->pPSRingPa);
348
349	/*
350	 * NOTE : pci_alloc_consistent(), used above to alloc DMA regions,
351	 * ALWAYS returns SAC (32-bit) addresses. If DAC (64-bit) addresses
352	 * are ever returned, make sure the high part is retrieved here before
353	 * storing the adjusted address.
354	 */
355
356	/* Allocate an area of memory for writeback of status information */
357	rx_ring->rx_status_block = pci_alloc_consistent(adapter->pdev,
358					    sizeof(struct rx_status_block),
359					    &rx_ring->rx_status_bus);
360	if (!rx_ring->rx_status_block) {
361		dev_err(&adapter->pdev->dev,
362			  "Cannot alloc memory for Status Block\n");
363		return -ENOMEM;
364	}
365	rx_ring->NumRfd = NIC_DEFAULT_NUM_RFD;
366	printk(KERN_INFO "PRS %lx\n", (unsigned long)rx_ring->rx_status_bus);
367
368	/* Recv
369	 * pci_pool_create initializes a lookaside list. After successful
370	 * creation, nonpaged fixed-size blocks can be allocated from and
371	 * freed to the lookaside list.
372	 * RFDs will be allocated from this pool.
373	 */
374	rx_ring->RecvLookaside = kmem_cache_create(adapter->netdev->name,
375						   sizeof(MP_RFD),
376						   0,
377						   SLAB_CACHE_DMA |
378						   SLAB_HWCACHE_ALIGN,
379						   NULL);
380
381	adapter->Flags |= fMP_ADAPTER_RECV_LOOKASIDE;
382
383	/* The RFDs are going to be put on lists later on, so initialize the
384	 * lists now.
385	 */
386	INIT_LIST_HEAD(&rx_ring->RecvList);
387	return 0;
388}
389
390/**
391 * et131x_rx_dma_memory_free - Free all memory allocated within this module.
392 * @adapter: pointer to our private adapter structure
393 */
394void et131x_rx_dma_memory_free(struct et131x_adapter *adapter)
395{
396	u32 index;
397	u32 bufsize;
398	u32 pktStatRingSize;
399	PMP_RFD rfd;
400	struct rx_ring *rx_ring;
401
402	/* Setup some convenience pointers */
403	rx_ring = &adapter->rx_ring;
404
405	/* Free RFDs and associated packet descriptors */
406	WARN_ON(rx_ring->nReadyRecv != rx_ring->NumRfd);
407
408	while (!list_empty(&rx_ring->RecvList)) {
409		rfd = (MP_RFD *) list_entry(rx_ring->RecvList.next,
410					       MP_RFD, list_node);
411
412		list_del(&rfd->list_node);
413		rfd->Packet = NULL;
414		kmem_cache_free(adapter->rx_ring.RecvLookaside, rfd);
415	}
416
417	/* Free Free Buffer Ring 1 */
418	if (rx_ring->pFbr1RingVa) {
419		/* First the packet memory */
420		for (index = 0; index <
421		     (rx_ring->Fbr1NumEntries / FBR_CHUNKS); index++) {
422			if (rx_ring->Fbr1MemVa[index]) {
423				u32 Fbr1Align;
424
425				if (rx_ring->Fbr1BufferSize > 4096)
426					Fbr1Align = 4096;
427				else
428					Fbr1Align = rx_ring->Fbr1BufferSize;
429
430				bufsize =
431				    (rx_ring->Fbr1BufferSize * FBR_CHUNKS) +
432				    Fbr1Align - 1;
433
434				pci_free_consistent(adapter->pdev,
435						    bufsize,
436						    rx_ring->Fbr1MemVa[index],
437						    rx_ring->Fbr1MemPa[index]);
438
439				rx_ring->Fbr1MemVa[index] = NULL;
440			}
441		}
442
443		/* Now the FIFO itself */
444		rx_ring->pFbr1RingVa = (void *)((u8 *)
445				rx_ring->pFbr1RingVa - rx_ring->Fbr1offset);
446
447		bufsize = (sizeof(struct fbr_desc) * rx_ring->Fbr1NumEntries)
448							    + 0xfff;
449
450		pci_free_consistent(adapter->pdev, bufsize,
451				rx_ring->pFbr1RingVa, rx_ring->pFbr1RingPa);
452
453		rx_ring->pFbr1RingVa = NULL;
454	}
455
456#ifdef USE_FBR0
457	/* Now the same for Free Buffer Ring 0 */
458	if (rx_ring->pFbr0RingVa) {
459		/* First the packet memory */
460		for (index = 0; index <
461		     (rx_ring->Fbr0NumEntries / FBR_CHUNKS); index++) {
462			if (rx_ring->Fbr0MemVa[index]) {
463				bufsize =
464				    (rx_ring->Fbr0BufferSize *
465				     (FBR_CHUNKS + 1)) - 1;
466
467				pci_free_consistent(adapter->pdev,
468						    bufsize,
469						    rx_ring->Fbr0MemVa[index],
470						    rx_ring->Fbr0MemPa[index]);
471
472				rx_ring->Fbr0MemVa[index] = NULL;
473			}
474		}
475
476		/* Now the FIFO itself */
477		rx_ring->pFbr0RingVa = (void *)((u8 *)
478				rx_ring->pFbr0RingVa - rx_ring->Fbr0offset);
479
480		bufsize = (sizeof(struct fbr_desc) * rx_ring->Fbr0NumEntries)
481							    + 0xfff;
482
483		pci_free_consistent(adapter->pdev,
484				    bufsize,
485				    rx_ring->pFbr0RingVa, rx_ring->pFbr0RingPa);
486
487		rx_ring->pFbr0RingVa = NULL;
488	}
489#endif
490
491	/* Free Packet Status Ring */
492	if (rx_ring->pPSRingVa) {
493		pktStatRingSize =
494		    sizeof(struct pkt_stat_desc) * adapter->rx_ring.PsrNumEntries;
495
496		pci_free_consistent(adapter->pdev, pktStatRingSize,
497				    rx_ring->pPSRingVa, rx_ring->pPSRingPa);
498
499		rx_ring->pPSRingVa = NULL;
500	}
501
502	/* Free area of memory for the writeback of status information */
503	if (rx_ring->rx_status_block) {
504		pci_free_consistent(adapter->pdev,
505			sizeof(struct rx_status_block),
506			rx_ring->rx_status_block, rx_ring->rx_status_bus);
507		rx_ring->rx_status_block = NULL;
508	}
509
510	/* Free receive buffer pool */
511
512	/* Free receive packet pool */
513
514	/* Destroy the lookaside (RFD) pool */
515	if (adapter->Flags & fMP_ADAPTER_RECV_LOOKASIDE) {
516		kmem_cache_destroy(rx_ring->RecvLookaside);
517		adapter->Flags &= ~fMP_ADAPTER_RECV_LOOKASIDE;
518	}
519
520	/* Free the FBR Lookup Table */
521#ifdef USE_FBR0
522	kfree(rx_ring->fbr[0]);
523#endif
524
525	kfree(rx_ring->fbr[1]);
526
527	/* Reset Counters */
528	rx_ring->nReadyRecv = 0;
529}
530
531/**
532 * et131x_init_recv - Initialize receive data structures.
533 * @adapter: pointer to our private adapter structure
534 *
535 * Returns 0 on success and errno on failure (as defined in errno.h)
536 */
537int et131x_init_recv(struct et131x_adapter *adapter)
538{
539	int status = -ENOMEM;
540	PMP_RFD rfd = NULL;
541	u32 rfdct;
542	u32 numrfd = 0;
543	struct rx_ring *rx_ring;
544
545	/* Setup some convenience pointers */
546	rx_ring = &adapter->rx_ring;
547
548	/* Setup each RFD */
549	for (rfdct = 0; rfdct < rx_ring->NumRfd; rfdct++) {
550		rfd = kmem_cache_alloc(rx_ring->RecvLookaside,
551						     GFP_ATOMIC | GFP_DMA);
552
553		if (!rfd) {
554			dev_err(&adapter->pdev->dev,
555				  "Couldn't alloc RFD out of kmem_cache\n");
556			status = -ENOMEM;
557			continue;
558		}
559
560		rfd->Packet = NULL;
561
562		/* Add this RFD to the RecvList */
563		list_add_tail(&rfd->list_node, &rx_ring->RecvList);
564
565		/* Increment both the available RFD's, and the total RFD's. */
566		rx_ring->nReadyRecv++;
567		numrfd++;
568	}
569
570	if (numrfd > NIC_MIN_NUM_RFD)
571		status = 0;
572
573	rx_ring->NumRfd = numrfd;
574
575	if (status != 0) {
576		kmem_cache_free(rx_ring->RecvLookaside, rfd);
577		dev_err(&adapter->pdev->dev,
578			  "Allocation problems in et131x_init_recv\n");
579	}
580	return status;
581}
582
583/**
584 * ConfigRxDmaRegs - Start of Rx_DMA init sequence
585 * @etdev: pointer to our adapter structure
586 */
587void ConfigRxDmaRegs(struct et131x_adapter *etdev)
588{
589	struct rxdma_regs __iomem *rx_dma = &etdev->regs->rxdma;
590	struct rx_ring *rx_local = &etdev->rx_ring;
591	struct fbr_desc *fbr_entry;
592	u32 entry;
593	u32 psr_num_des;
594	unsigned long flags;
595
596	/* Halt RXDMA to perform the reconfigure.  */
597	et131x_rx_dma_disable(etdev);
598
599	/* Load the completion writeback physical address
600	 *
601	 * NOTE : pci_alloc_consistent(), used above to alloc DMA regions,
602	 * ALWAYS returns SAC (32-bit) addresses. If DAC (64-bit) addresses
603	 * are ever returned, make sure the high part is retrieved here
604	 * before storing the adjusted address.
605	 */
606	writel((u32) ((u64)rx_local->rx_status_bus >> 32),
607	       &rx_dma->dma_wb_base_hi);
608	writel((u32) rx_local->rx_status_bus, &rx_dma->dma_wb_base_lo);
609
610	memset(rx_local->rx_status_block, 0, sizeof(struct rx_status_block));
611
612	/* Set the address and parameters of the packet status ring into the
613	 * 1310's registers
614	 */
615	writel((u32) ((u64)rx_local->pPSRingPa >> 32),
616	       &rx_dma->psr_base_hi);
617	writel((u32) rx_local->pPSRingPa, &rx_dma->psr_base_lo);
618	writel(rx_local->PsrNumEntries - 1, &rx_dma->psr_num_des);
619	writel(0, &rx_dma->psr_full_offset);
620
621	psr_num_des = readl(&rx_dma->psr_num_des) & 0xFFF;
622	writel((psr_num_des * LO_MARK_PERCENT_FOR_PSR) / 100,
623	       &rx_dma->psr_min_des);
624
625	spin_lock_irqsave(&etdev->RcvLock, flags);
626
627	/* These local variables track the PSR in the adapter structure */
628	rx_local->local_psr_full = 0;
629
630	/* Now's the best time to initialize FBR1 contents */
631	fbr_entry = (struct fbr_desc *) rx_local->pFbr1RingVa;
632	for (entry = 0; entry < rx_local->Fbr1NumEntries; entry++) {
633		fbr_entry->addr_hi = rx_local->fbr[1]->bus_high[entry];
634		fbr_entry->addr_lo = rx_local->fbr[1]->bus_low[entry];
635		fbr_entry->word2 = entry;
636		fbr_entry++;
637	}
638
639	/* Set the address and parameters of Free buffer ring 1 (and 0 if
640	 * required) into the 1310's registers
641	 */
642	writel((u32) (rx_local->Fbr1Realpa >> 32), &rx_dma->fbr1_base_hi);
643	writel((u32) rx_local->Fbr1Realpa, &rx_dma->fbr1_base_lo);
644	writel(rx_local->Fbr1NumEntries - 1, &rx_dma->fbr1_num_des);
645	writel(ET_DMA10_WRAP, &rx_dma->fbr1_full_offset);
646
647	/* This variable tracks the free buffer ring 1 full position, so it
648	 * has to match the above.
649	 */
650	rx_local->local_Fbr1_full = ET_DMA10_WRAP;
651	writel(((rx_local->Fbr1NumEntries * LO_MARK_PERCENT_FOR_RX) / 100) - 1,
652	       &rx_dma->fbr1_min_des);
653
654#ifdef USE_FBR0
655	/* Now's the best time to initialize FBR0 contents */
656	fbr_entry = (struct fbr_desc *) rx_local->pFbr0RingVa;
657	for (entry = 0; entry < rx_local->Fbr0NumEntries; entry++) {
658		fbr_entry->addr_hi = rx_local->fbr[0]->bus_high[entry];
659		fbr_entry->addr_lo = rx_local->fbr[0]->bus_low[entry];
660		fbr_entry->word2 = entry;
661		fbr_entry++;
662	}
663
664	writel((u32) (rx_local->Fbr0Realpa >> 32), &rx_dma->fbr0_base_hi);
665	writel((u32) rx_local->Fbr0Realpa, &rx_dma->fbr0_base_lo);
666	writel(rx_local->Fbr0NumEntries - 1, &rx_dma->fbr0_num_des);
667	writel(ET_DMA10_WRAP, &rx_dma->fbr0_full_offset);
668
669	/* This variable tracks the free buffer ring 0 full position, so it
670	 * has to match the above.
671	 */
672	rx_local->local_Fbr0_full = ET_DMA10_WRAP;
673	writel(((rx_local->Fbr0NumEntries * LO_MARK_PERCENT_FOR_RX) / 100) - 1,
674	       &rx_dma->fbr0_min_des);
675#endif
676
677	/* Program the number of packets we will receive before generating an
678	 * interrupt.
679	 * For version B silicon, this value gets updated once autoneg is
680	 *complete.
681	 */
682	writel(PARM_RX_NUM_BUFS_DEF, &rx_dma->num_pkt_done);
683
684	/* The "time_done" is not working correctly to coalesce interrupts
685	 * after a given time period, but rather is giving us an interrupt
686	 * regardless of whether we have received packets.
687	 * This value gets updated once autoneg is complete.
688	 */
689	writel(PARM_RX_TIME_INT_DEF, &rx_dma->max_pkt_time);
690
691	spin_unlock_irqrestore(&etdev->RcvLock, flags);
692}
693
694/**
695 * SetRxDmaTimer - Set the heartbeat timer according to line rate.
696 * @etdev: pointer to our adapter structure
697 */
698void SetRxDmaTimer(struct et131x_adapter *etdev)
699{
700	/* For version B silicon, we do not use the RxDMA timer for 10 and 100
701	 * Mbits/s line rates. We do not enable and RxDMA interrupt coalescing.
702	 */
703	if ((etdev->linkspeed == TRUEPHY_SPEED_100MBPS) ||
704	    (etdev->linkspeed == TRUEPHY_SPEED_10MBPS)) {
705		writel(0, &etdev->regs->rxdma.max_pkt_time);
706		writel(1, &etdev->regs->rxdma.num_pkt_done);
707	}
708}
709
710/**
711 * et131x_rx_dma_disable - Stop of Rx_DMA on the ET1310
712 * @etdev: pointer to our adapter structure
713 */
714void et131x_rx_dma_disable(struct et131x_adapter *etdev)
715{
716	u32 csr;
717	/* Setup the receive dma configuration register */
718	writel(0x00002001, &etdev->regs->rxdma.csr);
719	csr = readl(&etdev->regs->rxdma.csr);
720	if ((csr & 0x00020000) != 1) {	/* Check halt status (bit 17) */
721		udelay(5);
722		csr = readl(&etdev->regs->rxdma.csr);
723		if ((csr & 0x00020000) != 1)
724			dev_err(&etdev->pdev->dev,
725			"RX Dma failed to enter halt state. CSR 0x%08x\n",
726				csr);
727	}
728}
729
730/**
731 * et131x_rx_dma_enable - re-start of Rx_DMA on the ET1310.
732 * @etdev: pointer to our adapter structure
733 */
734void et131x_rx_dma_enable(struct et131x_adapter *etdev)
735{
736	/* Setup the receive dma configuration register for normal operation */
737	u32 csr =  0x2000;	/* FBR1 enable */
738
739	if (etdev->rx_ring.Fbr1BufferSize == 4096)
740		csr |= 0x0800;
741	else if (etdev->rx_ring.Fbr1BufferSize == 8192)
742		csr |= 0x1000;
743	else if (etdev->rx_ring.Fbr1BufferSize == 16384)
744		csr |= 0x1800;
745#ifdef USE_FBR0
746	csr |= 0x0400;		/* FBR0 enable */
747	if (etdev->rx_ring.Fbr0BufferSize == 256)
748		csr |= 0x0100;
749	else if (etdev->rx_ring.Fbr0BufferSize == 512)
750		csr |= 0x0200;
751	else if (etdev->rx_ring.Fbr0BufferSize == 1024)
752		csr |= 0x0300;
753#endif
754	writel(csr, &etdev->regs->rxdma.csr);
755
756	csr = readl(&etdev->regs->rxdma.csr);
757	if ((csr & 0x00020000) != 0) {
758		udelay(5);
759		csr = readl(&etdev->regs->rxdma.csr);
760		if ((csr & 0x00020000) != 0) {
761			dev_err(&etdev->pdev->dev,
762			    "RX Dma failed to exit halt state.  CSR 0x%08x\n",
763				csr);
764		}
765	}
766}
767
768/**
769 * nic_rx_pkts - Checks the hardware for available packets
770 * @etdev: pointer to our adapter
771 *
772 * Returns rfd, a pointer to our MPRFD.
773 *
774 * Checks the hardware for available packets, using completion ring
775 * If packets are available, it gets an RFD from the RecvList, attaches
776 * the packet to it, puts the RFD in the RecvPendList, and also returns
777 * the pointer to the RFD.
778 */
779PMP_RFD nic_rx_pkts(struct et131x_adapter *etdev)
780{
781	struct rx_ring *rx_local = &etdev->rx_ring;
782	struct rx_status_block *status;
783	struct pkt_stat_desc *psr;
784	PMP_RFD rfd;
785	u32 i;
786	u8 *buf;
787	unsigned long flags;
788	struct list_head *element;
789	u8 rindex;
790	u16 bindex;
791	u32 len;
792	u32 word0;
793	u32 word1;
794
795	/* RX Status block is written by the DMA engine prior to every
796	 * interrupt. It contains the next to be used entry in the Packet
797	 * Status Ring, and also the two Free Buffer rings.
798	 */
799	status = rx_local->rx_status_block;
800	word1 = status->Word1 >> 16;	/* Get the useful bits */
801
802	/* Check the PSR and wrap bits do not match */
803	if ((word1 & 0x1FFF) == (rx_local->local_psr_full & 0x1FFF))
804		/* Looks like this ring is not updated yet */
805		return NULL;
806
807	/* The packet status ring indicates that data is available. */
808	psr = (struct pkt_stat_desc *) (rx_local->pPSRingVa) +
809			(rx_local->local_psr_full & 0xFFF);
810
811	/* Grab any information that is required once the PSR is
812	 * advanced, since we can no longer rely on the memory being
813	 * accurate
814	 */
815	len = psr->word1 & 0xFFFF;
816	rindex = (psr->word1 >> 26) & 0x03;
817	bindex = (psr->word1 >> 16) & 0x3FF;
818	word0 = psr->word0;
819
820	/* Indicate that we have used this PSR entry. */
821	add_12bit(&rx_local->local_psr_full, 1);
822	if ((rx_local->local_psr_full & 0xFFF)  > rx_local->PsrNumEntries - 1) {
823		/* Clear psr full and toggle the wrap bit */
824		rx_local->local_psr_full &=  ~0xFFF;
825		rx_local->local_psr_full ^= 0x1000;
826	}
827
828	writel(rx_local->local_psr_full,
829	       &etdev->regs->rxdma.psr_full_offset);
830
831#ifndef USE_FBR0
832	if (rindex != 1)
833		return NULL;
834#endif
835
836#ifdef USE_FBR0
837	if (rindex > 1 ||
838		(rindex == 0 &&
839		bindex > rx_local->Fbr0NumEntries - 1) ||
840		(rindex == 1 &&
841		bindex > rx_local->Fbr1NumEntries - 1))
842#else
843	if (rindex != 1 || bindex > rx_local->Fbr1NumEntries - 1)
844#endif
845	{
846		/* Illegal buffer or ring index cannot be used by S/W*/
847		dev_err(&etdev->pdev->dev,
848			  "NICRxPkts PSR Entry %d indicates "
849			  "length of %d and/or bad bi(%d)\n",
850			  rx_local->local_psr_full & 0xFFF,
851			  len, bindex);
852		return NULL;
853	}
854
855	/* Get and fill the RFD. */
856	spin_lock_irqsave(&etdev->RcvLock, flags);
857
858	rfd = NULL;
859	element = rx_local->RecvList.next;
860	rfd = (PMP_RFD) list_entry(element, MP_RFD, list_node);
861
862	if (rfd == NULL) {
863		spin_unlock_irqrestore(&etdev->RcvLock, flags);
864		return NULL;
865	}
866
867	list_del(&rfd->list_node);
868	rx_local->nReadyRecv--;
869
870	spin_unlock_irqrestore(&etdev->RcvLock, flags);
871
872	rfd->bufferindex = bindex;
873	rfd->ringindex = rindex;
874
875	/* In V1 silicon, there is a bug which screws up filtering of
876	 * runt packets.  Therefore runt packet filtering is disabled
877	 * in the MAC and the packets are dropped here.  They are
878	 * also counted here.
879	 */
880	if (len < (NIC_MIN_PACKET_SIZE + 4)) {
881		etdev->Stats.other_errors++;
882		len = 0;
883	}
884
885	if (len) {
886		if (etdev->ReplicaPhyLoopbk == 1) {
887			buf = rx_local->fbr[rindex]->virt[bindex];
888
889			if (memcmp(&buf[6], &etdev->CurrentAddress[0],
890				   ETH_ALEN) == 0) {
891				if (memcmp(&buf[42], "Replica packet",
892					   ETH_HLEN)) {
893					etdev->ReplicaPhyLoopbkPF = 1;
894				}
895			}
896		}
897
898		/* Determine if this is a multicast packet coming in */
899		if ((word0 & ALCATEL_MULTICAST_PKT) &&
900		    !(word0 & ALCATEL_BROADCAST_PKT)) {
901			/* Promiscuous mode and Multicast mode are
902			 * not mutually exclusive as was first
903			 * thought.  I guess Promiscuous is just
904			 * considered a super-set of the other
905			 * filters. Generally filter is 0x2b when in
906			 * promiscuous mode.
907			 */
908			if ((etdev->PacketFilter & ET131X_PACKET_TYPE_MULTICAST)
909			    && !(etdev->PacketFilter & ET131X_PACKET_TYPE_PROMISCUOUS)
910			    && !(etdev->PacketFilter & ET131X_PACKET_TYPE_ALL_MULTICAST)) {
911				buf = rx_local->fbr[rindex]->
912						virt[bindex];
913
914				/* Loop through our list to see if the
915				 * destination address of this packet
916				 * matches one in our list.
917				 */
918				for (i = 0;
919				     i < etdev->MCAddressCount;
920				     i++) {
921					if (buf[0] ==
922					    etdev->MCList[i][0]
923					    && buf[1] ==
924					    etdev->MCList[i][1]
925					    && buf[2] ==
926					    etdev->MCList[i][2]
927					    && buf[3] ==
928					    etdev->MCList[i][3]
929					    && buf[4] ==
930					    etdev->MCList[i][4]
931					    && buf[5] ==
932					    etdev->MCList[i][5]) {
933						break;
934					}
935				}
936
937				/* If our index is equal to the number
938				 * of Multicast address we have, then
939				 * this means we did not find this
940				 * packet's matching address in our
941				 * list.  Set the PacketSize to zero,
942				 * so we free our RFD when we return
943				 * from this function.
944				 */
945				if (i == etdev->MCAddressCount)
946					len = 0;
947			}
948
949			if (len > 0)
950				etdev->Stats.multircv++;
951		} else if (word0 & ALCATEL_BROADCAST_PKT)
952			etdev->Stats.brdcstrcv++;
953		else
954			/* Not sure what this counter measures in
955			 * promiscuous mode. Perhaps we should check
956			 * the MAC address to see if it is directed
957			 * to us in promiscuous mode.
958			 */
959			etdev->Stats.unircv++;
960	}
961
962	if (len > 0) {
963		struct sk_buff *skb = NULL;
964
965		/* rfd->PacketSize = len - 4; */
966		rfd->PacketSize = len;
967
968		skb = dev_alloc_skb(rfd->PacketSize + 2);
969		if (!skb) {
970			dev_err(&etdev->pdev->dev,
971				  "Couldn't alloc an SKB for Rx\n");
972			return NULL;
973		}
974
975		etdev->net_stats.rx_bytes += rfd->PacketSize;
976
977		memcpy(skb_put(skb, rfd->PacketSize),
978		       rx_local->fbr[rindex]->virt[bindex],
979		       rfd->PacketSize);
980
981		skb->dev = etdev->netdev;
982		skb->protocol = eth_type_trans(skb, etdev->netdev);
983		skb->ip_summed = CHECKSUM_NONE;
984
985		netif_rx(skb);
986	} else {
987		rfd->PacketSize = 0;
988	}
989
990	nic_return_rfd(etdev, rfd);
991	return rfd;
992}
993
994/**
995 * et131x_reset_recv - Reset the receive list
996 * @etdev: pointer to our adapter
997 *
998 * Assumption, Rcv spinlock has been acquired.
999 */
1000void et131x_reset_recv(struct et131x_adapter *etdev)
1001{
1002	WARN_ON(list_empty(&etdev->rx_ring.RecvList));
1003
1004}
1005
1006/**
1007 * et131x_handle_recv_interrupt - Interrupt handler for receive processing
1008 * @etdev: pointer to our adapter
1009 *
1010 * Assumption, Rcv spinlock has been acquired.
1011 */
1012void et131x_handle_recv_interrupt(struct et131x_adapter *etdev)
1013{
1014	PMP_RFD rfd = NULL;
1015	u32 count = 0;
1016	bool done = true;
1017
1018	/* Process up to available RFD's */
1019	while (count < NUM_PACKETS_HANDLED) {
1020		if (list_empty(&etdev->rx_ring.RecvList)) {
1021			WARN_ON(etdev->rx_ring.nReadyRecv != 0);
1022			done = false;
1023			break;
1024		}
1025
1026		rfd = nic_rx_pkts(etdev);
1027
1028		if (rfd == NULL)
1029			break;
1030
1031		/* Do not receive any packets until a filter has been set.
1032		 * Do not receive any packets until we have link.
1033		 * If length is zero, return the RFD in order to advance the
1034		 * Free buffer ring.
1035		 */
1036		if (!etdev->PacketFilter ||
1037		    !(etdev->Flags & fMP_ADAPTER_LINK_DETECTION) ||
1038		    rfd->PacketSize == 0) {
1039			continue;
1040		}
1041
1042		/* Increment the number of packets we received */
1043		etdev->Stats.ipackets++;
1044
1045		/* Set the status on the packet, either resources or success */
1046		if (etdev->rx_ring.nReadyRecv < RFD_LOW_WATER_MARK) {
1047			dev_warn(&etdev->pdev->dev,
1048				    "RFD's are running out\n");
1049		}
1050		count++;
1051	}
1052
1053	if (count == NUM_PACKETS_HANDLED || !done) {
1054		etdev->rx_ring.UnfinishedReceives = true;
1055		writel(PARM_TX_TIME_INT_DEF * NANO_IN_A_MICRO,
1056		       &etdev->regs->global.watchdog_timer);
1057	} else
1058		/* Watchdog timer will disable itself if appropriate. */
1059		etdev->rx_ring.UnfinishedReceives = false;
1060}
1061
1062static inline u32 bump_fbr(u32 *fbr, u32 limit)
1063{
1064	u32 v = *fbr;
1065	v++;
1066	/* This works for all cases where limit < 1024. The 1023 case
1067	   works because 1023++ is 1024 which means the if condition is not
1068	   taken but the carry of the bit into the wrap bit toggles the wrap
1069	   value correctly */
1070	if ((v & ET_DMA10_MASK) > limit) {
1071		v &= ~ET_DMA10_MASK;
1072		v ^= ET_DMA10_WRAP;
1073	}
1074	/* For the 1023 case */
1075	v &= (ET_DMA10_MASK|ET_DMA10_WRAP);
1076	*fbr = v;
1077	return v;
1078}
1079
1080/**
1081 * NICReturnRFD - Recycle a RFD and put it back onto the receive list
1082 * @etdev: pointer to our adapter
1083 * @rfd: pointer to the RFD
1084 */
1085void nic_return_rfd(struct et131x_adapter *etdev, PMP_RFD rfd)
1086{
1087	struct rx_ring *rx_local = &etdev->rx_ring;
1088	struct rxdma_regs __iomem *rx_dma = &etdev->regs->rxdma;
1089	u16 bi = rfd->bufferindex;
1090	u8 ri = rfd->ringindex;
1091	unsigned long flags;
1092
1093	/* We don't use any of the OOB data besides status. Otherwise, we
1094	 * need to clean up OOB data
1095	 */
1096	if (
1097#ifdef USE_FBR0
1098	    (ri == 0 && bi < rx_local->Fbr0NumEntries) ||
1099#endif
1100	    (ri == 1 && bi < rx_local->Fbr1NumEntries)) {
1101		spin_lock_irqsave(&etdev->FbrLock, flags);
1102
1103		if (ri == 1) {
1104			struct fbr_desc *next =
1105			    (struct fbr_desc *) (rx_local->pFbr1RingVa) +
1106					 INDEX10(rx_local->local_Fbr1_full);
1107
1108			/* Handle the Free Buffer Ring advancement here. Write
1109			 * the PA / Buffer Index for the returned buffer into
1110			 * the oldest (next to be freed)FBR entry
1111			 */
1112			next->addr_hi = rx_local->fbr[1]->bus_high[bi];
1113			next->addr_lo = rx_local->fbr[1]->bus_low[bi];
1114			next->word2 = bi;
1115
1116			writel(bump_fbr(&rx_local->local_Fbr1_full,
1117				rx_local->Fbr1NumEntries - 1),
1118				&rx_dma->fbr1_full_offset);
1119		}
1120#ifdef USE_FBR0
1121		else {
1122			struct fbr_desc *next = (struct fbr_desc *)
1123				rx_local->pFbr0RingVa +
1124					INDEX10(rx_local->local_Fbr0_full);
1125
1126			/* Handle the Free Buffer Ring advancement here. Write
1127			 * the PA / Buffer Index for the returned buffer into
1128			 * the oldest (next to be freed) FBR entry
1129			 */
1130			next->addr_hi = rx_local->fbr[0]->bus_high[bi];
1131			next->addr_lo = rx_local->fbr[0]->bus_low[bi];
1132			next->word2 = bi;
1133
1134			writel(bump_fbr(&rx_local->local_Fbr0_full,
1135					rx_local->Fbr0NumEntries - 1),
1136			       &rx_dma->fbr0_full_offset);
1137		}
1138#endif
1139		spin_unlock_irqrestore(&etdev->FbrLock, flags);
1140	} else {
1141		dev_err(&etdev->pdev->dev,
1142			  "NICReturnRFD illegal Buffer Index returned\n");
1143	}
1144
1145	/* The processing on this RFD is done, so put it back on the tail of
1146	 * our list
1147	 */
1148	spin_lock_irqsave(&etdev->RcvLock, flags);
1149	list_add_tail(&rfd->list_node, &rx_local->RecvList);
1150	rx_local->nReadyRecv++;
1151	spin_unlock_irqrestore(&etdev->RcvLock, flags);
1152
1153	WARN_ON(rx_local->nReadyRecv > rx_local->NumRfd);
1154}
1155