• Home
  • History
  • Annotate
  • Line#
  • Navigate
  • Raw
  • Download
  • only in /asuswrt-rt-n18u-9.0.0.4.380.2695/release/src-rt-6.x.4708/linux/linux-2.6.36/drivers/atm/
1/*
2
3  he.c
4
5  ForeRunnerHE ATM Adapter driver for ATM on Linux
6  Copyright (C) 1999-2001  Naval Research Laboratory
7
8  This library is free software; you can redistribute it and/or
9  modify it under the terms of the GNU Lesser General Public
10  License as published by the Free Software Foundation; either
11  version 2.1 of the License, or (at your option) any later version.
12
13  This library is distributed in the hope that it will be useful,
14  but WITHOUT ANY WARRANTY; without even the implied warranty of
15  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
16  Lesser General Public License for more details.
17
18  You should have received a copy of the GNU Lesser General Public
19  License along with this library; if not, write to the Free Software
20  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
21
22*/
23
24/*
25
26  he.c
27
28  ForeRunnerHE ATM Adapter driver for ATM on Linux
29  Copyright (C) 1999-2001  Naval Research Laboratory
30
31  Permission to use, copy, modify and distribute this software and its
32  documentation is hereby granted, provided that both the copyright
33  notice and this permission notice appear in all copies of the software,
34  derivative works or modified versions, and any portions thereof, and
35  that both notices appear in supporting documentation.
36
37  NRL ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" CONDITION AND
38  DISCLAIMS ANY LIABILITY OF ANY KIND FOR ANY DAMAGES WHATSOEVER
39  RESULTING FROM THE USE OF THIS SOFTWARE.
40
41  This driver was written using the "Programmer's Reference Manual for
42  ForeRunnerHE(tm)", MANU0361-01 - Rev. A, 08/21/98.
43
44  AUTHORS:
45	chas williams <chas@cmf.nrl.navy.mil>
46	eric kinzie <ekinzie@cmf.nrl.navy.mil>
47
48  NOTES:
49	4096 supported 'connections'
50	group 0 is used for all traffic
51	interrupt queue 0 is used for all interrupts
52	aal0 support (based on work from ulrich.u.muller@nokia.com)
53
54 */
55
56#include <linux/module.h>
57#include <linux/kernel.h>
58#include <linux/skbuff.h>
59#include <linux/pci.h>
60#include <linux/errno.h>
61#include <linux/types.h>
62#include <linux/string.h>
63#include <linux/delay.h>
64#include <linux/init.h>
65#include <linux/mm.h>
66#include <linux/sched.h>
67#include <linux/timer.h>
68#include <linux/interrupt.h>
69#include <linux/dma-mapping.h>
70#include <linux/bitmap.h>
71#include <linux/slab.h>
72#include <asm/io.h>
73#include <asm/byteorder.h>
74#include <asm/uaccess.h>
75
76#include <linux/atmdev.h>
77#include <linux/atm.h>
78#include <linux/sonet.h>
79
80#undef USE_SCATTERGATHER
81#undef USE_CHECKSUM_HW			/* still confused about this */
82/* #undef HE_DEBUG */
83
84#include "he.h"
85#include "suni.h"
86#include <linux/atm_he.h>
87
88#define hprintk(fmt,args...)	printk(KERN_ERR DEV_LABEL "%d: " fmt, he_dev->number , ##args)
89
90#ifdef HE_DEBUG
91#define HPRINTK(fmt,args...)	printk(KERN_DEBUG DEV_LABEL "%d: " fmt, he_dev->number , ##args)
92#else /* !HE_DEBUG */
93#define HPRINTK(fmt,args...)	do { } while (0)
94#endif /* HE_DEBUG */
95
96/* declarations */
97
98static int he_open(struct atm_vcc *vcc);
99static void he_close(struct atm_vcc *vcc);
100static int he_send(struct atm_vcc *vcc, struct sk_buff *skb);
101static int he_ioctl(struct atm_dev *dev, unsigned int cmd, void __user *arg);
102static irqreturn_t he_irq_handler(int irq, void *dev_id);
103static void he_tasklet(unsigned long data);
104static int he_proc_read(struct atm_dev *dev,loff_t *pos,char *page);
105static int he_start(struct atm_dev *dev);
106static void he_stop(struct he_dev *dev);
107static void he_phy_put(struct atm_dev *, unsigned char, unsigned long);
108static unsigned char he_phy_get(struct atm_dev *, unsigned long);
109
110static u8 read_prom_byte(struct he_dev *he_dev, int addr);
111
112/* globals */
113
114static struct he_dev *he_devs;
115static int disable64;
116static short nvpibits = -1;
117static short nvcibits = -1;
118static short rx_skb_reserve = 16;
119static int irq_coalesce = 1;
120static int sdh = 0;
121
122/* Read from EEPROM = 0000 0011b */
123static unsigned int readtab[] = {
124	CS_HIGH | CLK_HIGH,
125	CS_LOW | CLK_LOW,
126	CLK_HIGH,               /* 0 */
127	CLK_LOW,
128	CLK_HIGH,               /* 0 */
129	CLK_LOW,
130	CLK_HIGH,               /* 0 */
131	CLK_LOW,
132	CLK_HIGH,               /* 0 */
133	CLK_LOW,
134	CLK_HIGH,               /* 0 */
135	CLK_LOW,
136	CLK_HIGH,               /* 0 */
137	CLK_LOW | SI_HIGH,
138	CLK_HIGH | SI_HIGH,     /* 1 */
139	CLK_LOW | SI_HIGH,
140	CLK_HIGH | SI_HIGH      /* 1 */
141};
142
143/* Clock to read from/write to the EEPROM */
144static unsigned int clocktab[] = {
145	CLK_LOW,
146	CLK_HIGH,
147	CLK_LOW,
148	CLK_HIGH,
149	CLK_LOW,
150	CLK_HIGH,
151	CLK_LOW,
152	CLK_HIGH,
153	CLK_LOW,
154	CLK_HIGH,
155	CLK_LOW,
156	CLK_HIGH,
157	CLK_LOW,
158	CLK_HIGH,
159	CLK_LOW,
160	CLK_HIGH,
161	CLK_LOW
162};
163
164static struct atmdev_ops he_ops =
165{
166	.open =		he_open,
167	.close =	he_close,
168	.ioctl =	he_ioctl,
169	.send =		he_send,
170	.phy_put =	he_phy_put,
171	.phy_get =	he_phy_get,
172	.proc_read =	he_proc_read,
173	.owner =	THIS_MODULE
174};
175
176#define he_writel(dev, val, reg)	do { writel(val, (dev)->membase + (reg)); wmb(); } while (0)
177#define he_readl(dev, reg)		readl((dev)->membase + (reg))
178
179/* section 2.12 connection memory access */
180
181static __inline__ void
182he_writel_internal(struct he_dev *he_dev, unsigned val, unsigned addr,
183								unsigned flags)
184{
185	he_writel(he_dev, val, CON_DAT);
186	(void) he_readl(he_dev, CON_DAT);		/* flush posted writes */
187	he_writel(he_dev, flags | CON_CTL_WRITE | CON_CTL_ADDR(addr), CON_CTL);
188	while (he_readl(he_dev, CON_CTL) & CON_CTL_BUSY);
189}
190
191#define he_writel_rcm(dev, val, reg) 				\
192			he_writel_internal(dev, val, reg, CON_CTL_RCM)
193
194#define he_writel_tcm(dev, val, reg) 				\
195			he_writel_internal(dev, val, reg, CON_CTL_TCM)
196
197#define he_writel_mbox(dev, val, reg) 				\
198			he_writel_internal(dev, val, reg, CON_CTL_MBOX)
199
200static unsigned
201he_readl_internal(struct he_dev *he_dev, unsigned addr, unsigned flags)
202{
203	he_writel(he_dev, flags | CON_CTL_READ | CON_CTL_ADDR(addr), CON_CTL);
204	while (he_readl(he_dev, CON_CTL) & CON_CTL_BUSY);
205	return he_readl(he_dev, CON_DAT);
206}
207
208#define he_readl_rcm(dev, reg) \
209			he_readl_internal(dev, reg, CON_CTL_RCM)
210
211#define he_readl_tcm(dev, reg) \
212			he_readl_internal(dev, reg, CON_CTL_TCM)
213
214#define he_readl_mbox(dev, reg) \
215			he_readl_internal(dev, reg, CON_CTL_MBOX)
216
217
218/* figure 2.2 connection id */
219
220#define he_mkcid(dev, vpi, vci)		(((vpi << (dev)->vcibits) | vci) & 0x1fff)
221
222/* 2.5.1 per connection transmit state registers */
223
224#define he_writel_tsr0(dev, val, cid) \
225		he_writel_tcm(dev, val, CONFIG_TSRA | (cid << 3) | 0)
226#define he_readl_tsr0(dev, cid) \
227		he_readl_tcm(dev, CONFIG_TSRA | (cid << 3) | 0)
228
229#define he_writel_tsr1(dev, val, cid) \
230		he_writel_tcm(dev, val, CONFIG_TSRA | (cid << 3) | 1)
231
232#define he_writel_tsr2(dev, val, cid) \
233		he_writel_tcm(dev, val, CONFIG_TSRA | (cid << 3) | 2)
234
235#define he_writel_tsr3(dev, val, cid) \
236		he_writel_tcm(dev, val, CONFIG_TSRA | (cid << 3) | 3)
237
238#define he_writel_tsr4(dev, val, cid) \
239		he_writel_tcm(dev, val, CONFIG_TSRA | (cid << 3) | 4)
240
241	/* from page 2-20
242	 *
243	 * NOTE While the transmit connection is active, bits 23 through 0
244	 *      of this register must not be written by the host.  Byte
245	 *      enables should be used during normal operation when writing
246	 *      the most significant byte.
247	 */
248
249#define he_writel_tsr4_upper(dev, val, cid) \
250		he_writel_internal(dev, val, CONFIG_TSRA | (cid << 3) | 4, \
251							CON_CTL_TCM \
252							| CON_BYTE_DISABLE_2 \
253							| CON_BYTE_DISABLE_1 \
254							| CON_BYTE_DISABLE_0)
255
256#define he_readl_tsr4(dev, cid) \
257		he_readl_tcm(dev, CONFIG_TSRA | (cid << 3) | 4)
258
259#define he_writel_tsr5(dev, val, cid) \
260		he_writel_tcm(dev, val, CONFIG_TSRA | (cid << 3) | 5)
261
262#define he_writel_tsr6(dev, val, cid) \
263		he_writel_tcm(dev, val, CONFIG_TSRA | (cid << 3) | 6)
264
265#define he_writel_tsr7(dev, val, cid) \
266		he_writel_tcm(dev, val, CONFIG_TSRA | (cid << 3) | 7)
267
268
269#define he_writel_tsr8(dev, val, cid) \
270		he_writel_tcm(dev, val, CONFIG_TSRB | (cid << 2) | 0)
271
272#define he_writel_tsr9(dev, val, cid) \
273		he_writel_tcm(dev, val, CONFIG_TSRB | (cid << 2) | 1)
274
275#define he_writel_tsr10(dev, val, cid) \
276		he_writel_tcm(dev, val, CONFIG_TSRB | (cid << 2) | 2)
277
278#define he_writel_tsr11(dev, val, cid) \
279		he_writel_tcm(dev, val, CONFIG_TSRB | (cid << 2) | 3)
280
281
282#define he_writel_tsr12(dev, val, cid) \
283		he_writel_tcm(dev, val, CONFIG_TSRC | (cid << 1) | 0)
284
285#define he_writel_tsr13(dev, val, cid) \
286		he_writel_tcm(dev, val, CONFIG_TSRC | (cid << 1) | 1)
287
288
289#define he_writel_tsr14(dev, val, cid) \
290		he_writel_tcm(dev, val, CONFIG_TSRD | cid)
291
292#define he_writel_tsr14_upper(dev, val, cid) \
293		he_writel_internal(dev, val, CONFIG_TSRD | cid, \
294							CON_CTL_TCM \
295							| CON_BYTE_DISABLE_2 \
296							| CON_BYTE_DISABLE_1 \
297							| CON_BYTE_DISABLE_0)
298
299/* 2.7.1 per connection receive state registers */
300
301#define he_writel_rsr0(dev, val, cid) \
302		he_writel_rcm(dev, val, 0x00000 | (cid << 3) | 0)
303#define he_readl_rsr0(dev, cid) \
304		he_readl_rcm(dev, 0x00000 | (cid << 3) | 0)
305
306#define he_writel_rsr1(dev, val, cid) \
307		he_writel_rcm(dev, val, 0x00000 | (cid << 3) | 1)
308
309#define he_writel_rsr2(dev, val, cid) \
310		he_writel_rcm(dev, val, 0x00000 | (cid << 3) | 2)
311
312#define he_writel_rsr3(dev, val, cid) \
313		he_writel_rcm(dev, val, 0x00000 | (cid << 3) | 3)
314
315#define he_writel_rsr4(dev, val, cid) \
316		he_writel_rcm(dev, val, 0x00000 | (cid << 3) | 4)
317
318#define he_writel_rsr5(dev, val, cid) \
319		he_writel_rcm(dev, val, 0x00000 | (cid << 3) | 5)
320
321#define he_writel_rsr6(dev, val, cid) \
322		he_writel_rcm(dev, val, 0x00000 | (cid << 3) | 6)
323
324#define he_writel_rsr7(dev, val, cid) \
325		he_writel_rcm(dev, val, 0x00000 | (cid << 3) | 7)
326
327static __inline__ struct atm_vcc*
328__find_vcc(struct he_dev *he_dev, unsigned cid)
329{
330	struct hlist_head *head;
331	struct atm_vcc *vcc;
332	struct hlist_node *node;
333	struct sock *s;
334	short vpi;
335	int vci;
336
337	vpi = cid >> he_dev->vcibits;
338	vci = cid & ((1 << he_dev->vcibits) - 1);
339	head = &vcc_hash[vci & (VCC_HTABLE_SIZE -1)];
340
341	sk_for_each(s, node, head) {
342		vcc = atm_sk(s);
343		if (vcc->dev == he_dev->atm_dev &&
344		    vcc->vci == vci && vcc->vpi == vpi &&
345		    vcc->qos.rxtp.traffic_class != ATM_NONE) {
346				return vcc;
347		}
348	}
349	return NULL;
350}
351
352static int __devinit
353he_init_one(struct pci_dev *pci_dev, const struct pci_device_id *pci_ent)
354{
355	struct atm_dev *atm_dev = NULL;
356	struct he_dev *he_dev = NULL;
357	int err = 0;
358
359	printk(KERN_INFO "ATM he driver\n");
360
361	if (pci_enable_device(pci_dev))
362		return -EIO;
363	if (pci_set_dma_mask(pci_dev, DMA_BIT_MASK(32)) != 0) {
364		printk(KERN_WARNING "he: no suitable dma available\n");
365		err = -EIO;
366		goto init_one_failure;
367	}
368
369	atm_dev = atm_dev_register(DEV_LABEL, &he_ops, -1, NULL);
370	if (!atm_dev) {
371		err = -ENODEV;
372		goto init_one_failure;
373	}
374	pci_set_drvdata(pci_dev, atm_dev);
375
376	he_dev = kzalloc(sizeof(struct he_dev),
377							GFP_KERNEL);
378	if (!he_dev) {
379		err = -ENOMEM;
380		goto init_one_failure;
381	}
382	he_dev->pci_dev = pci_dev;
383	he_dev->atm_dev = atm_dev;
384	he_dev->atm_dev->dev_data = he_dev;
385	atm_dev->dev_data = he_dev;
386	he_dev->number = atm_dev->number;
387	tasklet_init(&he_dev->tasklet, he_tasklet, (unsigned long) he_dev);
388	spin_lock_init(&he_dev->global_lock);
389
390	if (he_start(atm_dev)) {
391		he_stop(he_dev);
392		err = -ENODEV;
393		goto init_one_failure;
394	}
395	he_dev->next = NULL;
396	if (he_devs)
397		he_dev->next = he_devs;
398	he_devs = he_dev;
399	return 0;
400
401init_one_failure:
402	if (atm_dev)
403		atm_dev_deregister(atm_dev);
404	kfree(he_dev);
405	pci_disable_device(pci_dev);
406	return err;
407}
408
409static void __devexit
410he_remove_one (struct pci_dev *pci_dev)
411{
412	struct atm_dev *atm_dev;
413	struct he_dev *he_dev;
414
415	atm_dev = pci_get_drvdata(pci_dev);
416	he_dev = HE_DEV(atm_dev);
417
418	/* need to remove from he_devs */
419
420	he_stop(he_dev);
421	atm_dev_deregister(atm_dev);
422	kfree(he_dev);
423
424	pci_set_drvdata(pci_dev, NULL);
425	pci_disable_device(pci_dev);
426}
427
428
429static unsigned
430rate_to_atmf(unsigned rate)		/* cps to atm forum format */
431{
432#define NONZERO (1 << 14)
433
434	unsigned exp = 0;
435
436	if (rate == 0)
437		return 0;
438
439	rate <<= 9;
440	while (rate > 0x3ff) {
441		++exp;
442		rate >>= 1;
443	}
444
445	return (NONZERO | (exp << 9) | (rate & 0x1ff));
446}
447
448static void __devinit
449he_init_rx_lbfp0(struct he_dev *he_dev)
450{
451	unsigned i, lbm_offset, lbufd_index, lbuf_addr, lbuf_count;
452	unsigned lbufs_per_row = he_dev->cells_per_row / he_dev->cells_per_lbuf;
453	unsigned lbuf_bufsize = he_dev->cells_per_lbuf * ATM_CELL_PAYLOAD;
454	unsigned row_offset = he_dev->r0_startrow * he_dev->bytes_per_row;
455
456	lbufd_index = 0;
457	lbm_offset = he_readl(he_dev, RCMLBM_BA);
458
459	he_writel(he_dev, lbufd_index, RLBF0_H);
460
461	for (i = 0, lbuf_count = 0; i < he_dev->r0_numbuffs; ++i) {
462		lbufd_index += 2;
463		lbuf_addr = (row_offset + (lbuf_count * lbuf_bufsize)) / 32;
464
465		he_writel_rcm(he_dev, lbuf_addr, lbm_offset);
466		he_writel_rcm(he_dev, lbufd_index, lbm_offset + 1);
467
468		if (++lbuf_count == lbufs_per_row) {
469			lbuf_count = 0;
470			row_offset += he_dev->bytes_per_row;
471		}
472		lbm_offset += 4;
473	}
474
475	he_writel(he_dev, lbufd_index - 2, RLBF0_T);
476	he_writel(he_dev, he_dev->r0_numbuffs, RLBF0_C);
477}
478
479static void __devinit
480he_init_rx_lbfp1(struct he_dev *he_dev)
481{
482	unsigned i, lbm_offset, lbufd_index, lbuf_addr, lbuf_count;
483	unsigned lbufs_per_row = he_dev->cells_per_row / he_dev->cells_per_lbuf;
484	unsigned lbuf_bufsize = he_dev->cells_per_lbuf * ATM_CELL_PAYLOAD;
485	unsigned row_offset = he_dev->r1_startrow * he_dev->bytes_per_row;
486
487	lbufd_index = 1;
488	lbm_offset = he_readl(he_dev, RCMLBM_BA) + (2 * lbufd_index);
489
490	he_writel(he_dev, lbufd_index, RLBF1_H);
491
492	for (i = 0, lbuf_count = 0; i < he_dev->r1_numbuffs; ++i) {
493		lbufd_index += 2;
494		lbuf_addr = (row_offset + (lbuf_count * lbuf_bufsize)) / 32;
495
496		he_writel_rcm(he_dev, lbuf_addr, lbm_offset);
497		he_writel_rcm(he_dev, lbufd_index, lbm_offset + 1);
498
499		if (++lbuf_count == lbufs_per_row) {
500			lbuf_count = 0;
501			row_offset += he_dev->bytes_per_row;
502		}
503		lbm_offset += 4;
504	}
505
506	he_writel(he_dev, lbufd_index - 2, RLBF1_T);
507	he_writel(he_dev, he_dev->r1_numbuffs, RLBF1_C);
508}
509
510static void __devinit
511he_init_tx_lbfp(struct he_dev *he_dev)
512{
513	unsigned i, lbm_offset, lbufd_index, lbuf_addr, lbuf_count;
514	unsigned lbufs_per_row = he_dev->cells_per_row / he_dev->cells_per_lbuf;
515	unsigned lbuf_bufsize = he_dev->cells_per_lbuf * ATM_CELL_PAYLOAD;
516	unsigned row_offset = he_dev->tx_startrow * he_dev->bytes_per_row;
517
518	lbufd_index = he_dev->r0_numbuffs + he_dev->r1_numbuffs;
519	lbm_offset = he_readl(he_dev, RCMLBM_BA) + (2 * lbufd_index);
520
521	he_writel(he_dev, lbufd_index, TLBF_H);
522
523	for (i = 0, lbuf_count = 0; i < he_dev->tx_numbuffs; ++i) {
524		lbufd_index += 1;
525		lbuf_addr = (row_offset + (lbuf_count * lbuf_bufsize)) / 32;
526
527		he_writel_rcm(he_dev, lbuf_addr, lbm_offset);
528		he_writel_rcm(he_dev, lbufd_index, lbm_offset + 1);
529
530		if (++lbuf_count == lbufs_per_row) {
531			lbuf_count = 0;
532			row_offset += he_dev->bytes_per_row;
533		}
534		lbm_offset += 2;
535	}
536
537	he_writel(he_dev, lbufd_index - 1, TLBF_T);
538}
539
540static int __devinit
541he_init_tpdrq(struct he_dev *he_dev)
542{
543	he_dev->tpdrq_base = pci_alloc_consistent(he_dev->pci_dev,
544		CONFIG_TPDRQ_SIZE * sizeof(struct he_tpdrq), &he_dev->tpdrq_phys);
545	if (he_dev->tpdrq_base == NULL) {
546		hprintk("failed to alloc tpdrq\n");
547		return -ENOMEM;
548	}
549	memset(he_dev->tpdrq_base, 0,
550				CONFIG_TPDRQ_SIZE * sizeof(struct he_tpdrq));
551
552	he_dev->tpdrq_tail = he_dev->tpdrq_base;
553	he_dev->tpdrq_head = he_dev->tpdrq_base;
554
555	he_writel(he_dev, he_dev->tpdrq_phys, TPDRQ_B_H);
556	he_writel(he_dev, 0, TPDRQ_T);
557	he_writel(he_dev, CONFIG_TPDRQ_SIZE - 1, TPDRQ_S);
558
559	return 0;
560}
561
562static void __devinit
563he_init_cs_block(struct he_dev *he_dev)
564{
565	unsigned clock, rate, delta;
566	int reg;
567
568	/* 5.1.7 cs block initialization */
569
570	for (reg = 0; reg < 0x20; ++reg)
571		he_writel_mbox(he_dev, 0x0, CS_STTIM0 + reg);
572
573	/* rate grid timer reload values */
574
575	clock = he_is622(he_dev) ? 66667000 : 50000000;
576	rate = he_dev->atm_dev->link_rate;
577	delta = rate / 16 / 2;
578
579	for (reg = 0; reg < 0x10; ++reg) {
580		/* 2.4 internal transmit function
581		 *
582	 	 * we initialize the first row in the rate grid.
583		 * values are period (in clock cycles) of timer
584		 */
585		unsigned period = clock / rate;
586
587		he_writel_mbox(he_dev, period, CS_TGRLD0 + reg);
588		rate -= delta;
589	}
590
591	if (he_is622(he_dev)) {
592		/* table 5.2 (4 cells per lbuf) */
593		he_writel_mbox(he_dev, 0x000800fa, CS_ERTHR0);
594		he_writel_mbox(he_dev, 0x000c33cb, CS_ERTHR1);
595		he_writel_mbox(he_dev, 0x0010101b, CS_ERTHR2);
596		he_writel_mbox(he_dev, 0x00181dac, CS_ERTHR3);
597		he_writel_mbox(he_dev, 0x00280600, CS_ERTHR4);
598
599		/* table 5.3, 5.4, 5.5, 5.6, 5.7 */
600		he_writel_mbox(he_dev, 0x023de8b3, CS_ERCTL0);
601		he_writel_mbox(he_dev, 0x1801, CS_ERCTL1);
602		he_writel_mbox(he_dev, 0x68b3, CS_ERCTL2);
603		he_writel_mbox(he_dev, 0x1280, CS_ERSTAT0);
604		he_writel_mbox(he_dev, 0x68b3, CS_ERSTAT1);
605		he_writel_mbox(he_dev, 0x14585, CS_RTFWR);
606
607		he_writel_mbox(he_dev, 0x4680, CS_RTATR);
608
609		/* table 5.8 */
610		he_writel_mbox(he_dev, 0x00159ece, CS_TFBSET);
611		he_writel_mbox(he_dev, 0x68b3, CS_WCRMAX);
612		he_writel_mbox(he_dev, 0x5eb3, CS_WCRMIN);
613		he_writel_mbox(he_dev, 0xe8b3, CS_WCRINC);
614		he_writel_mbox(he_dev, 0xdeb3, CS_WCRDEC);
615		he_writel_mbox(he_dev, 0x68b3, CS_WCRCEIL);
616
617		/* table 5.9 */
618		he_writel_mbox(he_dev, 0x5, CS_OTPPER);
619		he_writel_mbox(he_dev, 0x14, CS_OTWPER);
620	} else {
621		/* table 5.1 (4 cells per lbuf) */
622		he_writel_mbox(he_dev, 0x000400ea, CS_ERTHR0);
623		he_writel_mbox(he_dev, 0x00063388, CS_ERTHR1);
624		he_writel_mbox(he_dev, 0x00081018, CS_ERTHR2);
625		he_writel_mbox(he_dev, 0x000c1dac, CS_ERTHR3);
626		he_writel_mbox(he_dev, 0x0014051a, CS_ERTHR4);
627
628		/* table 5.3, 5.4, 5.5, 5.6, 5.7 */
629		he_writel_mbox(he_dev, 0x0235e4b1, CS_ERCTL0);
630		he_writel_mbox(he_dev, 0x4701, CS_ERCTL1);
631		he_writel_mbox(he_dev, 0x64b1, CS_ERCTL2);
632		he_writel_mbox(he_dev, 0x1280, CS_ERSTAT0);
633		he_writel_mbox(he_dev, 0x64b1, CS_ERSTAT1);
634		he_writel_mbox(he_dev, 0xf424, CS_RTFWR);
635
636		he_writel_mbox(he_dev, 0x4680, CS_RTATR);
637
638		/* table 5.8 */
639		he_writel_mbox(he_dev, 0x000563b7, CS_TFBSET);
640		he_writel_mbox(he_dev, 0x64b1, CS_WCRMAX);
641		he_writel_mbox(he_dev, 0x5ab1, CS_WCRMIN);
642		he_writel_mbox(he_dev, 0xe4b1, CS_WCRINC);
643		he_writel_mbox(he_dev, 0xdab1, CS_WCRDEC);
644		he_writel_mbox(he_dev, 0x64b1, CS_WCRCEIL);
645
646		/* table 5.9 */
647		he_writel_mbox(he_dev, 0x6, CS_OTPPER);
648		he_writel_mbox(he_dev, 0x1e, CS_OTWPER);
649	}
650
651	he_writel_mbox(he_dev, 0x8, CS_OTTLIM);
652
653	for (reg = 0; reg < 0x8; ++reg)
654		he_writel_mbox(he_dev, 0x0, CS_HGRRT0 + reg);
655
656}
657
658static int __devinit
659he_init_cs_block_rcm(struct he_dev *he_dev)
660{
661	unsigned (*rategrid)[16][16];
662	unsigned rate, delta;
663	int i, j, reg;
664
665	unsigned rate_atmf, exp, man;
666	unsigned long long rate_cps;
667	int mult, buf, buf_limit = 4;
668
669	rategrid = kmalloc( sizeof(unsigned) * 16 * 16, GFP_KERNEL);
670	if (!rategrid)
671		return -ENOMEM;
672
673	/* initialize rate grid group table */
674
675	for (reg = 0x0; reg < 0xff; ++reg)
676		he_writel_rcm(he_dev, 0x0, CONFIG_RCMABR + reg);
677
678	/* initialize rate controller groups */
679
680	for (reg = 0x100; reg < 0x1ff; ++reg)
681		he_writel_rcm(he_dev, 0x0, CONFIG_RCMABR + reg);
682
683	/* initialize tNrm lookup table */
684
685	/* the manual makes reference to a routine in a sample driver
686	   for proper configuration; fortunately, we only need this
687	   in order to support abr connection */
688
689	/* initialize rate to group table */
690
691	rate = he_dev->atm_dev->link_rate;
692	delta = rate / 32;
693
694	/*
695	 * 2.4 transmit internal functions
696	 *
697	 * we construct a copy of the rate grid used by the scheduler
698	 * in order to construct the rate to group table below
699	 */
700
701	for (j = 0; j < 16; j++) {
702		(*rategrid)[0][j] = rate;
703		rate -= delta;
704	}
705
706	for (i = 1; i < 16; i++)
707		for (j = 0; j < 16; j++)
708			if (i > 14)
709				(*rategrid)[i][j] = (*rategrid)[i - 1][j] / 4;
710			else
711				(*rategrid)[i][j] = (*rategrid)[i - 1][j] / 2;
712
713	/*
714	 * 2.4 transmit internal function
715	 *
716	 * this table maps the upper 5 bits of exponent and mantissa
717	 * of the atm forum representation of the rate into an index
718	 * on rate grid
719	 */
720
721	rate_atmf = 0;
722	while (rate_atmf < 0x400) {
723		man = (rate_atmf & 0x1f) << 4;
724		exp = rate_atmf >> 5;
725
726		/*
727			instead of '/ 512', use '>> 9' to prevent a call
728			to divdu3 on x86 platforms
729		*/
730		rate_cps = (unsigned long long) (1 << exp) * (man + 512) >> 9;
731
732		if (rate_cps < 10)
733			rate_cps = 10;	/* 2.2.1 minimum payload rate is 10 cps */
734
735		for (i = 255; i > 0; i--)
736			if ((*rategrid)[i/16][i%16] >= rate_cps)
737				break;	 /* pick nearest rate instead? */
738
739		/*
740		 * each table entry is 16 bits: (rate grid index (8 bits)
741		 * and a buffer limit (8 bits)
742		 * there are two table entries in each 32-bit register
743		 */
744
745#ifdef notdef
746		buf = rate_cps * he_dev->tx_numbuffs /
747				(he_dev->atm_dev->link_rate * 2);
748#else
749		/* this is pretty, but avoids _divdu3 and is mostly correct */
750		mult = he_dev->atm_dev->link_rate / ATM_OC3_PCR;
751		if (rate_cps > (272 * mult))
752			buf = 4;
753		else if (rate_cps > (204 * mult))
754			buf = 3;
755		else if (rate_cps > (136 * mult))
756			buf = 2;
757		else if (rate_cps > (68 * mult))
758			buf = 1;
759		else
760			buf = 0;
761#endif
762		if (buf > buf_limit)
763			buf = buf_limit;
764		reg = (reg << 16) | ((i << 8) | buf);
765
766#define RTGTBL_OFFSET 0x400
767
768		if (rate_atmf & 0x1)
769			he_writel_rcm(he_dev, reg,
770				CONFIG_RCMABR + RTGTBL_OFFSET + (rate_atmf >> 1));
771
772		++rate_atmf;
773	}
774
775	kfree(rategrid);
776	return 0;
777}
778
779static int __devinit
780he_init_group(struct he_dev *he_dev, int group)
781{
782	struct he_buff *heb, *next;
783	dma_addr_t mapping;
784	int i;
785
786	he_writel(he_dev, 0x0, G0_RBPS_S + (group * 32));
787	he_writel(he_dev, 0x0, G0_RBPS_T + (group * 32));
788	he_writel(he_dev, 0x0, G0_RBPS_QI + (group * 32));
789	he_writel(he_dev, RBP_THRESH(0x1) | RBP_QSIZE(0x0),
790		  G0_RBPS_BS + (group * 32));
791
792	/* bitmap table */
793	he_dev->rbpl_table = kmalloc(BITS_TO_LONGS(RBPL_TABLE_SIZE)
794				     * sizeof(unsigned long), GFP_KERNEL);
795	if (!he_dev->rbpl_table) {
796		hprintk("unable to allocate rbpl bitmap table\n");
797		return -ENOMEM;
798	}
799	bitmap_zero(he_dev->rbpl_table, RBPL_TABLE_SIZE);
800
801	/* rbpl_virt 64-bit pointers */
802	he_dev->rbpl_virt = kmalloc(RBPL_TABLE_SIZE
803				    * sizeof(struct he_buff *), GFP_KERNEL);
804	if (!he_dev->rbpl_virt) {
805		hprintk("unable to allocate rbpl virt table\n");
806		goto out_free_rbpl_table;
807	}
808
809	/* large buffer pool */
810	he_dev->rbpl_pool = pci_pool_create("rbpl", he_dev->pci_dev,
811					    CONFIG_RBPL_BUFSIZE, 64, 0);
812	if (he_dev->rbpl_pool == NULL) {
813		hprintk("unable to create rbpl pool\n");
814		goto out_free_rbpl_virt;
815	}
816
817	he_dev->rbpl_base = pci_alloc_consistent(he_dev->pci_dev,
818		CONFIG_RBPL_SIZE * sizeof(struct he_rbp), &he_dev->rbpl_phys);
819	if (he_dev->rbpl_base == NULL) {
820		hprintk("failed to alloc rbpl_base\n");
821		goto out_destroy_rbpl_pool;
822	}
823	memset(he_dev->rbpl_base, 0, CONFIG_RBPL_SIZE * sizeof(struct he_rbp));
824
825	INIT_LIST_HEAD(&he_dev->rbpl_outstanding);
826
827	for (i = 0; i < CONFIG_RBPL_SIZE; ++i) {
828
829		heb = pci_pool_alloc(he_dev->rbpl_pool, GFP_KERNEL|GFP_DMA, &mapping);
830		if (!heb)
831			goto out_free_rbpl;
832		heb->mapping = mapping;
833		list_add(&heb->entry, &he_dev->rbpl_outstanding);
834
835		set_bit(i, he_dev->rbpl_table);
836		he_dev->rbpl_virt[i] = heb;
837		he_dev->rbpl_hint = i + 1;
838		he_dev->rbpl_base[i].idx =  i << RBP_IDX_OFFSET;
839		he_dev->rbpl_base[i].phys = mapping + offsetof(struct he_buff, data);
840	}
841	he_dev->rbpl_tail = &he_dev->rbpl_base[CONFIG_RBPL_SIZE - 1];
842
843	he_writel(he_dev, he_dev->rbpl_phys, G0_RBPL_S + (group * 32));
844	he_writel(he_dev, RBPL_MASK(he_dev->rbpl_tail),
845						G0_RBPL_T + (group * 32));
846	he_writel(he_dev, (CONFIG_RBPL_BUFSIZE - sizeof(struct he_buff))/4,
847						G0_RBPL_BS + (group * 32));
848	he_writel(he_dev,
849			RBP_THRESH(CONFIG_RBPL_THRESH) |
850			RBP_QSIZE(CONFIG_RBPL_SIZE - 1) |
851			RBP_INT_ENB,
852						G0_RBPL_QI + (group * 32));
853
854	/* rx buffer ready queue */
855
856	he_dev->rbrq_base = pci_alloc_consistent(he_dev->pci_dev,
857		CONFIG_RBRQ_SIZE * sizeof(struct he_rbrq), &he_dev->rbrq_phys);
858	if (he_dev->rbrq_base == NULL) {
859		hprintk("failed to allocate rbrq\n");
860		goto out_free_rbpl;
861	}
862	memset(he_dev->rbrq_base, 0, CONFIG_RBRQ_SIZE * sizeof(struct he_rbrq));
863
864	he_dev->rbrq_head = he_dev->rbrq_base;
865	he_writel(he_dev, he_dev->rbrq_phys, G0_RBRQ_ST + (group * 16));
866	he_writel(he_dev, 0, G0_RBRQ_H + (group * 16));
867	he_writel(he_dev,
868		RBRQ_THRESH(CONFIG_RBRQ_THRESH) | RBRQ_SIZE(CONFIG_RBRQ_SIZE - 1),
869						G0_RBRQ_Q + (group * 16));
870	if (irq_coalesce) {
871		hprintk("coalescing interrupts\n");
872		he_writel(he_dev, RBRQ_TIME(768) | RBRQ_COUNT(7),
873						G0_RBRQ_I + (group * 16));
874	} else
875		he_writel(he_dev, RBRQ_TIME(0) | RBRQ_COUNT(1),
876						G0_RBRQ_I + (group * 16));
877
878	/* tx buffer ready queue */
879
880	he_dev->tbrq_base = pci_alloc_consistent(he_dev->pci_dev,
881		CONFIG_TBRQ_SIZE * sizeof(struct he_tbrq), &he_dev->tbrq_phys);
882	if (he_dev->tbrq_base == NULL) {
883		hprintk("failed to allocate tbrq\n");
884		goto out_free_rbpq_base;
885	}
886	memset(he_dev->tbrq_base, 0, CONFIG_TBRQ_SIZE * sizeof(struct he_tbrq));
887
888	he_dev->tbrq_head = he_dev->tbrq_base;
889
890	he_writel(he_dev, he_dev->tbrq_phys, G0_TBRQ_B_T + (group * 16));
891	he_writel(he_dev, 0, G0_TBRQ_H + (group * 16));
892	he_writel(he_dev, CONFIG_TBRQ_SIZE - 1, G0_TBRQ_S + (group * 16));
893	he_writel(he_dev, CONFIG_TBRQ_THRESH, G0_TBRQ_THRESH + (group * 16));
894
895	return 0;
896
897out_free_rbpq_base:
898	pci_free_consistent(he_dev->pci_dev, CONFIG_RBRQ_SIZE *
899			sizeof(struct he_rbrq), he_dev->rbrq_base,
900			he_dev->rbrq_phys);
901out_free_rbpl:
902	list_for_each_entry_safe(heb, next, &he_dev->rbpl_outstanding, entry)
903		pci_pool_free(he_dev->rbpl_pool, heb, heb->mapping);
904
905	pci_free_consistent(he_dev->pci_dev, CONFIG_RBPL_SIZE *
906			sizeof(struct he_rbp), he_dev->rbpl_base,
907			he_dev->rbpl_phys);
908out_destroy_rbpl_pool:
909	pci_pool_destroy(he_dev->rbpl_pool);
910out_free_rbpl_virt:
911	kfree(he_dev->rbpl_virt);
912out_free_rbpl_table:
913	kfree(he_dev->rbpl_table);
914
915	return -ENOMEM;
916}
917
918static int __devinit
919he_init_irq(struct he_dev *he_dev)
920{
921	int i;
922
923	/* 2.9.3.5  tail offset for each interrupt queue is located after the
924		    end of the interrupt queue */
925
926	he_dev->irq_base = pci_alloc_consistent(he_dev->pci_dev,
927			(CONFIG_IRQ_SIZE+1) * sizeof(struct he_irq), &he_dev->irq_phys);
928	if (he_dev->irq_base == NULL) {
929		hprintk("failed to allocate irq\n");
930		return -ENOMEM;
931	}
932	he_dev->irq_tailoffset = (unsigned *)
933					&he_dev->irq_base[CONFIG_IRQ_SIZE];
934	*he_dev->irq_tailoffset = 0;
935	he_dev->irq_head = he_dev->irq_base;
936	he_dev->irq_tail = he_dev->irq_base;
937
938	for (i = 0; i < CONFIG_IRQ_SIZE; ++i)
939		he_dev->irq_base[i].isw = ITYPE_INVALID;
940
941	he_writel(he_dev, he_dev->irq_phys, IRQ0_BASE);
942	he_writel(he_dev,
943		IRQ_SIZE(CONFIG_IRQ_SIZE) | IRQ_THRESH(CONFIG_IRQ_THRESH),
944								IRQ0_HEAD);
945	he_writel(he_dev, IRQ_INT_A | IRQ_TYPE_LINE, IRQ0_CNTL);
946	he_writel(he_dev, 0x0, IRQ0_DATA);
947
948	he_writel(he_dev, 0x0, IRQ1_BASE);
949	he_writel(he_dev, 0x0, IRQ1_HEAD);
950	he_writel(he_dev, 0x0, IRQ1_CNTL);
951	he_writel(he_dev, 0x0, IRQ1_DATA);
952
953	he_writel(he_dev, 0x0, IRQ2_BASE);
954	he_writel(he_dev, 0x0, IRQ2_HEAD);
955	he_writel(he_dev, 0x0, IRQ2_CNTL);
956	he_writel(he_dev, 0x0, IRQ2_DATA);
957
958	he_writel(he_dev, 0x0, IRQ3_BASE);
959	he_writel(he_dev, 0x0, IRQ3_HEAD);
960	he_writel(he_dev, 0x0, IRQ3_CNTL);
961	he_writel(he_dev, 0x0, IRQ3_DATA);
962
963	/* 2.9.3.2 interrupt queue mapping registers */
964
965	he_writel(he_dev, 0x0, GRP_10_MAP);
966	he_writel(he_dev, 0x0, GRP_32_MAP);
967	he_writel(he_dev, 0x0, GRP_54_MAP);
968	he_writel(he_dev, 0x0, GRP_76_MAP);
969
970	if (request_irq(he_dev->pci_dev->irq,
971			he_irq_handler, IRQF_SHARED, DEV_LABEL, he_dev)) {
972		hprintk("irq %d already in use\n", he_dev->pci_dev->irq);
973		return -EINVAL;
974	}
975
976	he_dev->irq = he_dev->pci_dev->irq;
977
978	return 0;
979}
980
981static int __devinit
982he_start(struct atm_dev *dev)
983{
984	struct he_dev *he_dev;
985	struct pci_dev *pci_dev;
986	unsigned long membase;
987
988	u16 command;
989	u32 gen_cntl_0, host_cntl, lb_swap;
990	u8 cache_size, timer;
991
992	unsigned err;
993	unsigned int status, reg;
994	int i, group;
995
996	he_dev = HE_DEV(dev);
997	pci_dev = he_dev->pci_dev;
998
999	membase = pci_resource_start(pci_dev, 0);
1000	HPRINTK("membase = 0x%lx  irq = %d.\n", membase, pci_dev->irq);
1001
1002	/*
1003	 * pci bus controller initialization
1004	 */
1005
1006	/* 4.3 pci bus controller-specific initialization */
1007	if (pci_read_config_dword(pci_dev, GEN_CNTL_0, &gen_cntl_0) != 0) {
1008		hprintk("can't read GEN_CNTL_0\n");
1009		return -EINVAL;
1010	}
1011	gen_cntl_0 |= (MRL_ENB | MRM_ENB | IGNORE_TIMEOUT);
1012	if (pci_write_config_dword(pci_dev, GEN_CNTL_0, gen_cntl_0) != 0) {
1013		hprintk("can't write GEN_CNTL_0.\n");
1014		return -EINVAL;
1015	}
1016
1017	if (pci_read_config_word(pci_dev, PCI_COMMAND, &command) != 0) {
1018		hprintk("can't read PCI_COMMAND.\n");
1019		return -EINVAL;
1020	}
1021
1022	command |= (PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER | PCI_COMMAND_INVALIDATE);
1023	if (pci_write_config_word(pci_dev, PCI_COMMAND, command) != 0) {
1024		hprintk("can't enable memory.\n");
1025		return -EINVAL;
1026	}
1027
1028	if (pci_read_config_byte(pci_dev, PCI_CACHE_LINE_SIZE, &cache_size)) {
1029		hprintk("can't read cache line size?\n");
1030		return -EINVAL;
1031	}
1032
1033	if (cache_size < 16) {
1034		cache_size = 16;
1035		if (pci_write_config_byte(pci_dev, PCI_CACHE_LINE_SIZE, cache_size))
1036			hprintk("can't set cache line size to %d\n", cache_size);
1037	}
1038
1039	if (pci_read_config_byte(pci_dev, PCI_LATENCY_TIMER, &timer)) {
1040		hprintk("can't read latency timer?\n");
1041		return -EINVAL;
1042	}
1043
1044	/* from table 3.9
1045	 *
1046	 * LAT_TIMER = 1 + AVG_LAT + BURST_SIZE/BUS_SIZE
1047	 *
1048	 * AVG_LAT: The average first data read/write latency [maximum 16 clock cycles]
1049	 * BURST_SIZE: 1536 bytes (read) for 622, 768 bytes (read) for 155 [192 clock cycles]
1050	 *
1051	 */
1052#define LAT_TIMER 209
1053	if (timer < LAT_TIMER) {
1054		HPRINTK("latency timer was %d, setting to %d\n", timer, LAT_TIMER);
1055		timer = LAT_TIMER;
1056		if (pci_write_config_byte(pci_dev, PCI_LATENCY_TIMER, timer))
1057			hprintk("can't set latency timer to %d\n", timer);
1058	}
1059
1060	if (!(he_dev->membase = ioremap(membase, HE_REGMAP_SIZE))) {
1061		hprintk("can't set up page mapping\n");
1062		return -EINVAL;
1063	}
1064
1065	/* 4.4 card reset */
1066	he_writel(he_dev, 0x0, RESET_CNTL);
1067	he_writel(he_dev, 0xff, RESET_CNTL);
1068
1069	udelay(16*1000);	/* 16 ms */
1070	status = he_readl(he_dev, RESET_CNTL);
1071	if ((status & BOARD_RST_STATUS) == 0) {
1072		hprintk("reset failed\n");
1073		return -EINVAL;
1074	}
1075
1076	/* 4.5 set bus width */
1077	host_cntl = he_readl(he_dev, HOST_CNTL);
1078	if (host_cntl & PCI_BUS_SIZE64)
1079		gen_cntl_0 |= ENBL_64;
1080	else
1081		gen_cntl_0 &= ~ENBL_64;
1082
1083	if (disable64 == 1) {
1084		hprintk("disabling 64-bit pci bus transfers\n");
1085		gen_cntl_0 &= ~ENBL_64;
1086	}
1087
1088	if (gen_cntl_0 & ENBL_64)
1089		hprintk("64-bit transfers enabled\n");
1090
1091	pci_write_config_dword(pci_dev, GEN_CNTL_0, gen_cntl_0);
1092
1093	/* 4.7 read prom contents */
1094	for (i = 0; i < PROD_ID_LEN; ++i)
1095		he_dev->prod_id[i] = read_prom_byte(he_dev, PROD_ID + i);
1096
1097	he_dev->media = read_prom_byte(he_dev, MEDIA);
1098
1099	for (i = 0; i < 6; ++i)
1100		dev->esi[i] = read_prom_byte(he_dev, MAC_ADDR + i);
1101
1102	hprintk("%s%s, %x:%x:%x:%x:%x:%x\n",
1103				he_dev->prod_id,
1104					he_dev->media & 0x40 ? "SM" : "MM",
1105						dev->esi[0],
1106						dev->esi[1],
1107						dev->esi[2],
1108						dev->esi[3],
1109						dev->esi[4],
1110						dev->esi[5]);
1111	he_dev->atm_dev->link_rate = he_is622(he_dev) ?
1112						ATM_OC12_PCR : ATM_OC3_PCR;
1113
1114	/* 4.6 set host endianess */
1115	lb_swap = he_readl(he_dev, LB_SWAP);
1116	if (he_is622(he_dev))
1117		lb_swap &= ~XFER_SIZE;		/* 4 cells */
1118	else
1119		lb_swap |= XFER_SIZE;		/* 8 cells */
1120#ifdef __BIG_ENDIAN
1121	lb_swap |= DESC_WR_SWAP | INTR_SWAP | BIG_ENDIAN_HOST;
1122#else
1123	lb_swap &= ~(DESC_WR_SWAP | INTR_SWAP | BIG_ENDIAN_HOST |
1124			DATA_WR_SWAP | DATA_RD_SWAP | DESC_RD_SWAP);
1125#endif /* __BIG_ENDIAN */
1126	he_writel(he_dev, lb_swap, LB_SWAP);
1127
1128	/* 4.8 sdram controller initialization */
1129	he_writel(he_dev, he_is622(he_dev) ? LB_64_ENB : 0x0, SDRAM_CTL);
1130
1131	/* 4.9 initialize rnum value */
1132	lb_swap |= SWAP_RNUM_MAX(0xf);
1133	he_writel(he_dev, lb_swap, LB_SWAP);
1134
1135	/* 4.10 initialize the interrupt queues */
1136	if ((err = he_init_irq(he_dev)) != 0)
1137		return err;
1138
1139	/* 4.11 enable pci bus controller state machines */
1140	host_cntl |= (OUTFF_ENB | CMDFF_ENB |
1141				QUICK_RD_RETRY | QUICK_WR_RETRY | PERR_INT_ENB);
1142	he_writel(he_dev, host_cntl, HOST_CNTL);
1143
1144	gen_cntl_0 |= INT_PROC_ENBL|INIT_ENB;
1145	pci_write_config_dword(pci_dev, GEN_CNTL_0, gen_cntl_0);
1146
1147	/*
1148	 * atm network controller initialization
1149	 */
1150
1151	/* 5.1.1 generic configuration state */
1152
1153	/*
1154	 *		local (cell) buffer memory map
1155	 *
1156	 *             HE155                          HE622
1157	 *
1158	 *        0 ____________1023 bytes  0 _______________________2047 bytes
1159	 *         |            |            |                   |   |
1160	 *         |  utility   |            |        rx0        |   |
1161	 *        5|____________|         255|___________________| u |
1162	 *        6|            |         256|                   | t |
1163	 *         |            |            |                   | i |
1164	 *         |    rx0     |     row    |        tx         | l |
1165	 *         |            |            |                   | i |
1166	 *         |            |         767|___________________| t |
1167	 *      517|____________|         768|                   | y |
1168	 * row  518|            |            |        rx1        |   |
1169	 *         |            |        1023|___________________|___|
1170	 *         |            |
1171	 *         |    tx      |
1172	 *         |            |
1173	 *         |            |
1174	 *     1535|____________|
1175	 *     1536|            |
1176	 *         |    rx1     |
1177	 *     2047|____________|
1178	 *
1179	 */
1180
1181	/* total 4096 connections */
1182	he_dev->vcibits = CONFIG_DEFAULT_VCIBITS;
1183	he_dev->vpibits = CONFIG_DEFAULT_VPIBITS;
1184
1185	if (nvpibits != -1 && nvcibits != -1 && nvpibits+nvcibits != HE_MAXCIDBITS) {
1186		hprintk("nvpibits + nvcibits != %d\n", HE_MAXCIDBITS);
1187		return -ENODEV;
1188	}
1189
1190	if (nvpibits != -1) {
1191		he_dev->vpibits = nvpibits;
1192		he_dev->vcibits = HE_MAXCIDBITS - nvpibits;
1193	}
1194
1195	if (nvcibits != -1) {
1196		he_dev->vcibits = nvcibits;
1197		he_dev->vpibits = HE_MAXCIDBITS - nvcibits;
1198	}
1199
1200
1201	if (he_is622(he_dev)) {
1202		he_dev->cells_per_row = 40;
1203		he_dev->bytes_per_row = 2048;
1204		he_dev->r0_numrows = 256;
1205		he_dev->tx_numrows = 512;
1206		he_dev->r1_numrows = 256;
1207		he_dev->r0_startrow = 0;
1208		he_dev->tx_startrow = 256;
1209		he_dev->r1_startrow = 768;
1210	} else {
1211		he_dev->cells_per_row = 20;
1212		he_dev->bytes_per_row = 1024;
1213		he_dev->r0_numrows = 512;
1214		he_dev->tx_numrows = 1018;
1215		he_dev->r1_numrows = 512;
1216		he_dev->r0_startrow = 6;
1217		he_dev->tx_startrow = 518;
1218		he_dev->r1_startrow = 1536;
1219	}
1220
1221	he_dev->cells_per_lbuf = 4;
1222	he_dev->buffer_limit = 4;
1223	he_dev->r0_numbuffs = he_dev->r0_numrows *
1224				he_dev->cells_per_row / he_dev->cells_per_lbuf;
1225	if (he_dev->r0_numbuffs > 2560)
1226		he_dev->r0_numbuffs = 2560;
1227
1228	he_dev->r1_numbuffs = he_dev->r1_numrows *
1229				he_dev->cells_per_row / he_dev->cells_per_lbuf;
1230	if (he_dev->r1_numbuffs > 2560)
1231		he_dev->r1_numbuffs = 2560;
1232
1233	he_dev->tx_numbuffs = he_dev->tx_numrows *
1234				he_dev->cells_per_row / he_dev->cells_per_lbuf;
1235	if (he_dev->tx_numbuffs > 5120)
1236		he_dev->tx_numbuffs = 5120;
1237
1238	/* 5.1.2 configure hardware dependent registers */
1239
1240	he_writel(he_dev,
1241		SLICE_X(0x2) | ARB_RNUM_MAX(0xf) | TH_PRTY(0x3) |
1242		RH_PRTY(0x3) | TL_PRTY(0x2) | RL_PRTY(0x1) |
1243		(he_is622(he_dev) ? BUS_MULTI(0x28) : BUS_MULTI(0x46)) |
1244		(he_is622(he_dev) ? NET_PREF(0x50) : NET_PREF(0x8c)),
1245								LBARB);
1246
1247	he_writel(he_dev, BANK_ON |
1248		(he_is622(he_dev) ? (REF_RATE(0x384) | WIDE_DATA) : REF_RATE(0x150)),
1249								SDRAMCON);
1250
1251	he_writel(he_dev,
1252		(he_is622(he_dev) ? RM_BANK_WAIT(1) : RM_BANK_WAIT(0)) |
1253						RM_RW_WAIT(1), RCMCONFIG);
1254	he_writel(he_dev,
1255		(he_is622(he_dev) ? TM_BANK_WAIT(2) : TM_BANK_WAIT(1)) |
1256						TM_RW_WAIT(1), TCMCONFIG);
1257
1258	he_writel(he_dev, he_dev->cells_per_lbuf * ATM_CELL_PAYLOAD, LB_CONFIG);
1259
1260	he_writel(he_dev,
1261		(he_is622(he_dev) ? UT_RD_DELAY(8) : UT_RD_DELAY(0)) |
1262		(he_is622(he_dev) ? RC_UT_MODE(0) : RC_UT_MODE(1)) |
1263		RX_VALVP(he_dev->vpibits) |
1264		RX_VALVC(he_dev->vcibits),			 RC_CONFIG);
1265
1266	he_writel(he_dev, DRF_THRESH(0x20) |
1267		(he_is622(he_dev) ? TX_UT_MODE(0) : TX_UT_MODE(1)) |
1268		TX_VCI_MASK(he_dev->vcibits) |
1269		LBFREE_CNT(he_dev->tx_numbuffs), 		TX_CONFIG);
1270
1271	he_writel(he_dev, 0x0, TXAAL5_PROTO);
1272
1273	he_writel(he_dev, PHY_INT_ENB |
1274		(he_is622(he_dev) ? PTMR_PRE(67 - 1) : PTMR_PRE(50 - 1)),
1275								RH_CONFIG);
1276
1277	/* 5.1.3 initialize connection memory */
1278
1279	for (i = 0; i < TCM_MEM_SIZE; ++i)
1280		he_writel_tcm(he_dev, 0, i);
1281
1282	for (i = 0; i < RCM_MEM_SIZE; ++i)
1283		he_writel_rcm(he_dev, 0, i);
1284
1285	/*
1286	 *	transmit connection memory map
1287	 *
1288	 *                  tx memory
1289	 *          0x0 ___________________
1290	 *             |                   |
1291	 *             |                   |
1292	 *             |       TSRa        |
1293	 *             |                   |
1294	 *             |                   |
1295	 *       0x8000|___________________|
1296	 *             |                   |
1297	 *             |       TSRb        |
1298	 *       0xc000|___________________|
1299	 *             |                   |
1300	 *             |       TSRc        |
1301	 *       0xe000|___________________|
1302	 *             |       TSRd        |
1303	 *       0xf000|___________________|
1304	 *             |       tmABR       |
1305	 *      0x10000|___________________|
1306	 *             |                   |
1307	 *             |       tmTPD       |
1308	 *             |___________________|
1309	 *             |                   |
1310	 *                      ....
1311	 *      0x1ffff|___________________|
1312	 *
1313	 *
1314	 */
1315
1316	he_writel(he_dev, CONFIG_TSRB, TSRB_BA);
1317	he_writel(he_dev, CONFIG_TSRC, TSRC_BA);
1318	he_writel(he_dev, CONFIG_TSRD, TSRD_BA);
1319	he_writel(he_dev, CONFIG_TMABR, TMABR_BA);
1320	he_writel(he_dev, CONFIG_TPDBA, TPD_BA);
1321
1322
1323	/*
1324	 *	receive connection memory map
1325	 *
1326	 *          0x0 ___________________
1327	 *             |                   |
1328	 *             |                   |
1329	 *             |       RSRa        |
1330	 *             |                   |
1331	 *             |                   |
1332	 *       0x8000|___________________|
1333	 *             |                   |
1334	 *             |             rx0/1 |
1335	 *             |       LBM         |   link lists of local
1336	 *             |             tx    |   buffer memory
1337	 *             |                   |
1338	 *       0xd000|___________________|
1339	 *             |                   |
1340	 *             |      rmABR        |
1341	 *       0xe000|___________________|
1342	 *             |                   |
1343	 *             |       RSRb        |
1344	 *             |___________________|
1345	 *             |                   |
1346	 *                      ....
1347	 *       0xffff|___________________|
1348	 */
1349
1350	he_writel(he_dev, 0x08000, RCMLBM_BA);
1351	he_writel(he_dev, 0x0e000, RCMRSRB_BA);
1352	he_writel(he_dev, 0x0d800, RCMABR_BA);
1353
1354	/* 5.1.4 initialize local buffer free pools linked lists */
1355
1356	he_init_rx_lbfp0(he_dev);
1357	he_init_rx_lbfp1(he_dev);
1358
1359	he_writel(he_dev, 0x0, RLBC_H);
1360	he_writel(he_dev, 0x0, RLBC_T);
1361	he_writel(he_dev, 0x0, RLBC_H2);
1362
1363	he_writel(he_dev, 512, RXTHRSH);	/* 10% of r0+r1 buffers */
1364	he_writel(he_dev, 256, LITHRSH); 	/* 5% of r0+r1 buffers */
1365
1366	he_init_tx_lbfp(he_dev);
1367
1368	he_writel(he_dev, he_is622(he_dev) ? 0x104780 : 0x800, UBUFF_BA);
1369
1370	/* 5.1.5 initialize intermediate receive queues */
1371
1372	if (he_is622(he_dev)) {
1373		he_writel(he_dev, 0x000f, G0_INMQ_S);
1374		he_writel(he_dev, 0x200f, G0_INMQ_L);
1375
1376		he_writel(he_dev, 0x001f, G1_INMQ_S);
1377		he_writel(he_dev, 0x201f, G1_INMQ_L);
1378
1379		he_writel(he_dev, 0x002f, G2_INMQ_S);
1380		he_writel(he_dev, 0x202f, G2_INMQ_L);
1381
1382		he_writel(he_dev, 0x003f, G3_INMQ_S);
1383		he_writel(he_dev, 0x203f, G3_INMQ_L);
1384
1385		he_writel(he_dev, 0x004f, G4_INMQ_S);
1386		he_writel(he_dev, 0x204f, G4_INMQ_L);
1387
1388		he_writel(he_dev, 0x005f, G5_INMQ_S);
1389		he_writel(he_dev, 0x205f, G5_INMQ_L);
1390
1391		he_writel(he_dev, 0x006f, G6_INMQ_S);
1392		he_writel(he_dev, 0x206f, G6_INMQ_L);
1393
1394		he_writel(he_dev, 0x007f, G7_INMQ_S);
1395		he_writel(he_dev, 0x207f, G7_INMQ_L);
1396	} else {
1397		he_writel(he_dev, 0x0000, G0_INMQ_S);
1398		he_writel(he_dev, 0x0008, G0_INMQ_L);
1399
1400		he_writel(he_dev, 0x0001, G1_INMQ_S);
1401		he_writel(he_dev, 0x0009, G1_INMQ_L);
1402
1403		he_writel(he_dev, 0x0002, G2_INMQ_S);
1404		he_writel(he_dev, 0x000a, G2_INMQ_L);
1405
1406		he_writel(he_dev, 0x0003, G3_INMQ_S);
1407		he_writel(he_dev, 0x000b, G3_INMQ_L);
1408
1409		he_writel(he_dev, 0x0004, G4_INMQ_S);
1410		he_writel(he_dev, 0x000c, G4_INMQ_L);
1411
1412		he_writel(he_dev, 0x0005, G5_INMQ_S);
1413		he_writel(he_dev, 0x000d, G5_INMQ_L);
1414
1415		he_writel(he_dev, 0x0006, G6_INMQ_S);
1416		he_writel(he_dev, 0x000e, G6_INMQ_L);
1417
1418		he_writel(he_dev, 0x0007, G7_INMQ_S);
1419		he_writel(he_dev, 0x000f, G7_INMQ_L);
1420	}
1421
1422	/* 5.1.6 application tunable parameters */
1423
1424	he_writel(he_dev, 0x0, MCC);
1425	he_writel(he_dev, 0x0, OEC);
1426	he_writel(he_dev, 0x0, DCC);
1427	he_writel(he_dev, 0x0, CEC);
1428
1429	/* 5.1.7 cs block initialization */
1430
1431	he_init_cs_block(he_dev);
1432
1433	/* 5.1.8 cs block connection memory initialization */
1434
1435	if (he_init_cs_block_rcm(he_dev) < 0)
1436		return -ENOMEM;
1437
1438	/* 5.1.10 initialize host structures */
1439
1440	he_init_tpdrq(he_dev);
1441
1442	he_dev->tpd_pool = pci_pool_create("tpd", he_dev->pci_dev,
1443		sizeof(struct he_tpd), TPD_ALIGNMENT, 0);
1444	if (he_dev->tpd_pool == NULL) {
1445		hprintk("unable to create tpd pci_pool\n");
1446		return -ENOMEM;
1447	}
1448
1449	INIT_LIST_HEAD(&he_dev->outstanding_tpds);
1450
1451	if (he_init_group(he_dev, 0) != 0)
1452		return -ENOMEM;
1453
1454	for (group = 1; group < HE_NUM_GROUPS; ++group) {
1455		he_writel(he_dev, 0x0, G0_RBPS_S + (group * 32));
1456		he_writel(he_dev, 0x0, G0_RBPS_T + (group * 32));
1457		he_writel(he_dev, 0x0, G0_RBPS_QI + (group * 32));
1458		he_writel(he_dev, RBP_THRESH(0x1) | RBP_QSIZE(0x0),
1459						G0_RBPS_BS + (group * 32));
1460
1461		he_writel(he_dev, 0x0, G0_RBPL_S + (group * 32));
1462		he_writel(he_dev, 0x0, G0_RBPL_T + (group * 32));
1463		he_writel(he_dev, RBP_THRESH(0x1) | RBP_QSIZE(0x0),
1464						G0_RBPL_QI + (group * 32));
1465		he_writel(he_dev, 0x0, G0_RBPL_BS + (group * 32));
1466
1467		he_writel(he_dev, 0x0, G0_RBRQ_ST + (group * 16));
1468		he_writel(he_dev, 0x0, G0_RBRQ_H + (group * 16));
1469		he_writel(he_dev, RBRQ_THRESH(0x1) | RBRQ_SIZE(0x0),
1470						G0_RBRQ_Q + (group * 16));
1471		he_writel(he_dev, 0x0, G0_RBRQ_I + (group * 16));
1472
1473		he_writel(he_dev, 0x0, G0_TBRQ_B_T + (group * 16));
1474		he_writel(he_dev, 0x0, G0_TBRQ_H + (group * 16));
1475		he_writel(he_dev, TBRQ_THRESH(0x1),
1476						G0_TBRQ_THRESH + (group * 16));
1477		he_writel(he_dev, 0x0, G0_TBRQ_S + (group * 16));
1478	}
1479
1480	/* host status page */
1481
1482	he_dev->hsp = pci_alloc_consistent(he_dev->pci_dev,
1483				sizeof(struct he_hsp), &he_dev->hsp_phys);
1484	if (he_dev->hsp == NULL) {
1485		hprintk("failed to allocate host status page\n");
1486		return -ENOMEM;
1487	}
1488	memset(he_dev->hsp, 0, sizeof(struct he_hsp));
1489	he_writel(he_dev, he_dev->hsp_phys, HSP_BA);
1490
1491	/* initialize framer */
1492
1493#ifdef CONFIG_ATM_HE_USE_SUNI
1494	if (he_isMM(he_dev))
1495		suni_init(he_dev->atm_dev);
1496	if (he_dev->atm_dev->phy && he_dev->atm_dev->phy->start)
1497		he_dev->atm_dev->phy->start(he_dev->atm_dev);
1498#endif /* CONFIG_ATM_HE_USE_SUNI */
1499
1500	if (sdh) {
1501		/* this really should be in suni.c but for now... */
1502		int val;
1503
1504		val = he_phy_get(he_dev->atm_dev, SUNI_TPOP_APM);
1505		val = (val & ~SUNI_TPOP_APM_S) | (SUNI_TPOP_S_SDH << SUNI_TPOP_APM_S_SHIFT);
1506		he_phy_put(he_dev->atm_dev, val, SUNI_TPOP_APM);
1507		he_phy_put(he_dev->atm_dev, SUNI_TACP_IUCHP_CLP, SUNI_TACP_IUCHP);
1508	}
1509
1510	/* 5.1.12 enable transmit and receive */
1511
1512	reg = he_readl_mbox(he_dev, CS_ERCTL0);
1513	reg |= TX_ENABLE|ER_ENABLE;
1514	he_writel_mbox(he_dev, reg, CS_ERCTL0);
1515
1516	reg = he_readl(he_dev, RC_CONFIG);
1517	reg |= RX_ENABLE;
1518	he_writel(he_dev, reg, RC_CONFIG);
1519
1520	for (i = 0; i < HE_NUM_CS_STPER; ++i) {
1521		he_dev->cs_stper[i].inuse = 0;
1522		he_dev->cs_stper[i].pcr = -1;
1523	}
1524	he_dev->total_bw = 0;
1525
1526
1527	/* atm linux initialization */
1528
1529	he_dev->atm_dev->ci_range.vpi_bits = he_dev->vpibits;
1530	he_dev->atm_dev->ci_range.vci_bits = he_dev->vcibits;
1531
1532	he_dev->irq_peak = 0;
1533	he_dev->rbrq_peak = 0;
1534	he_dev->rbpl_peak = 0;
1535	he_dev->tbrq_peak = 0;
1536
1537	HPRINTK("hell bent for leather!\n");
1538
1539	return 0;
1540}
1541
1542static void
1543he_stop(struct he_dev *he_dev)
1544{
1545	struct he_buff *heb, *next;
1546	struct pci_dev *pci_dev;
1547	u32 gen_cntl_0, reg;
1548	u16 command;
1549
1550	pci_dev = he_dev->pci_dev;
1551
1552	/* disable interrupts */
1553
1554	if (he_dev->membase) {
1555		pci_read_config_dword(pci_dev, GEN_CNTL_0, &gen_cntl_0);
1556		gen_cntl_0 &= ~(INT_PROC_ENBL | INIT_ENB);
1557		pci_write_config_dword(pci_dev, GEN_CNTL_0, gen_cntl_0);
1558
1559		tasklet_disable(&he_dev->tasklet);
1560
1561		/* disable recv and transmit */
1562
1563		reg = he_readl_mbox(he_dev, CS_ERCTL0);
1564		reg &= ~(TX_ENABLE|ER_ENABLE);
1565		he_writel_mbox(he_dev, reg, CS_ERCTL0);
1566
1567		reg = he_readl(he_dev, RC_CONFIG);
1568		reg &= ~(RX_ENABLE);
1569		he_writel(he_dev, reg, RC_CONFIG);
1570	}
1571
1572#ifdef CONFIG_ATM_HE_USE_SUNI
1573	if (he_dev->atm_dev->phy && he_dev->atm_dev->phy->stop)
1574		he_dev->atm_dev->phy->stop(he_dev->atm_dev);
1575#endif /* CONFIG_ATM_HE_USE_SUNI */
1576
1577	if (he_dev->irq)
1578		free_irq(he_dev->irq, he_dev);
1579
1580	if (he_dev->irq_base)
1581		pci_free_consistent(he_dev->pci_dev, (CONFIG_IRQ_SIZE+1)
1582			* sizeof(struct he_irq), he_dev->irq_base, he_dev->irq_phys);
1583
1584	if (he_dev->hsp)
1585		pci_free_consistent(he_dev->pci_dev, sizeof(struct he_hsp),
1586						he_dev->hsp, he_dev->hsp_phys);
1587
1588	if (he_dev->rbpl_base) {
1589		list_for_each_entry_safe(heb, next, &he_dev->rbpl_outstanding, entry)
1590			pci_pool_free(he_dev->rbpl_pool, heb, heb->mapping);
1591
1592		pci_free_consistent(he_dev->pci_dev, CONFIG_RBPL_SIZE
1593			* sizeof(struct he_rbp), he_dev->rbpl_base, he_dev->rbpl_phys);
1594	}
1595
1596	kfree(he_dev->rbpl_virt);
1597	kfree(he_dev->rbpl_table);
1598
1599	if (he_dev->rbpl_pool)
1600		pci_pool_destroy(he_dev->rbpl_pool);
1601
1602	if (he_dev->rbrq_base)
1603		pci_free_consistent(he_dev->pci_dev, CONFIG_RBRQ_SIZE * sizeof(struct he_rbrq),
1604							he_dev->rbrq_base, he_dev->rbrq_phys);
1605
1606	if (he_dev->tbrq_base)
1607		pci_free_consistent(he_dev->pci_dev, CONFIG_TBRQ_SIZE * sizeof(struct he_tbrq),
1608							he_dev->tbrq_base, he_dev->tbrq_phys);
1609
1610	if (he_dev->tpdrq_base)
1611		pci_free_consistent(he_dev->pci_dev, CONFIG_TBRQ_SIZE * sizeof(struct he_tbrq),
1612							he_dev->tpdrq_base, he_dev->tpdrq_phys);
1613
1614	if (he_dev->tpd_pool)
1615		pci_pool_destroy(he_dev->tpd_pool);
1616
1617	if (he_dev->pci_dev) {
1618		pci_read_config_word(he_dev->pci_dev, PCI_COMMAND, &command);
1619		command &= ~(PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER);
1620		pci_write_config_word(he_dev->pci_dev, PCI_COMMAND, command);
1621	}
1622
1623	if (he_dev->membase)
1624		iounmap(he_dev->membase);
1625}
1626
1627static struct he_tpd *
1628__alloc_tpd(struct he_dev *he_dev)
1629{
1630	struct he_tpd *tpd;
1631	dma_addr_t mapping;
1632
1633	tpd = pci_pool_alloc(he_dev->tpd_pool, GFP_ATOMIC|GFP_DMA, &mapping);
1634	if (tpd == NULL)
1635		return NULL;
1636
1637	tpd->status = TPD_ADDR(mapping);
1638	tpd->reserved = 0;
1639	tpd->iovec[0].addr = 0; tpd->iovec[0].len = 0;
1640	tpd->iovec[1].addr = 0; tpd->iovec[1].len = 0;
1641	tpd->iovec[2].addr = 0; tpd->iovec[2].len = 0;
1642
1643	return tpd;
1644}
1645
1646#define AAL5_LEN(buf,len) 						\
1647			((((unsigned char *)(buf))[(len)-6] << 8) |	\
1648				(((unsigned char *)(buf))[(len)-5]))
1649
1650/* 2.10.1.2 receive
1651 *
1652 * aal5 packets can optionally return the tcp checksum in the lower
1653 * 16 bits of the crc (RSR0_TCP_CKSUM)
1654 */
1655
1656#define TCP_CKSUM(buf,len) 						\
1657			((((unsigned char *)(buf))[(len)-2] << 8) |	\
1658				(((unsigned char *)(buf))[(len-1)]))
1659
1660static int
1661he_service_rbrq(struct he_dev *he_dev, int group)
1662{
1663	struct he_rbrq *rbrq_tail = (struct he_rbrq *)
1664				((unsigned long)he_dev->rbrq_base |
1665					he_dev->hsp->group[group].rbrq_tail);
1666	unsigned cid, lastcid = -1;
1667	struct sk_buff *skb;
1668	struct atm_vcc *vcc = NULL;
1669	struct he_vcc *he_vcc;
1670	struct he_buff *heb, *next;
1671	int i;
1672	int pdus_assembled = 0;
1673	int updated = 0;
1674
1675	read_lock(&vcc_sklist_lock);
1676	while (he_dev->rbrq_head != rbrq_tail) {
1677		++updated;
1678
1679		HPRINTK("%p rbrq%d 0x%x len=%d cid=0x%x %s%s%s%s%s%s\n",
1680			he_dev->rbrq_head, group,
1681			RBRQ_ADDR(he_dev->rbrq_head),
1682			RBRQ_BUFLEN(he_dev->rbrq_head),
1683			RBRQ_CID(he_dev->rbrq_head),
1684			RBRQ_CRC_ERR(he_dev->rbrq_head) ? " CRC_ERR" : "",
1685			RBRQ_LEN_ERR(he_dev->rbrq_head) ? " LEN_ERR" : "",
1686			RBRQ_END_PDU(he_dev->rbrq_head) ? " END_PDU" : "",
1687			RBRQ_AAL5_PROT(he_dev->rbrq_head) ? " AAL5_PROT" : "",
1688			RBRQ_CON_CLOSED(he_dev->rbrq_head) ? " CON_CLOSED" : "",
1689			RBRQ_HBUF_ERR(he_dev->rbrq_head) ? " HBUF_ERR" : "");
1690
1691		i = RBRQ_ADDR(he_dev->rbrq_head) >> RBP_IDX_OFFSET;
1692		heb = he_dev->rbpl_virt[i];
1693
1694		cid = RBRQ_CID(he_dev->rbrq_head);
1695		if (cid != lastcid)
1696			vcc = __find_vcc(he_dev, cid);
1697		lastcid = cid;
1698
1699		if (vcc == NULL || (he_vcc = HE_VCC(vcc)) == NULL) {
1700			hprintk("vcc/he_vcc == NULL  (cid 0x%x)\n", cid);
1701			if (!RBRQ_HBUF_ERR(he_dev->rbrq_head)) {
1702				clear_bit(i, he_dev->rbpl_table);
1703				list_del(&heb->entry);
1704				pci_pool_free(he_dev->rbpl_pool, heb, heb->mapping);
1705			}
1706
1707			goto next_rbrq_entry;
1708		}
1709
1710		if (RBRQ_HBUF_ERR(he_dev->rbrq_head)) {
1711			hprintk("HBUF_ERR!  (cid 0x%x)\n", cid);
1712				atomic_inc(&vcc->stats->rx_drop);
1713			goto return_host_buffers;
1714		}
1715
1716		heb->len = RBRQ_BUFLEN(he_dev->rbrq_head) * 4;
1717		clear_bit(i, he_dev->rbpl_table);
1718		list_move_tail(&heb->entry, &he_vcc->buffers);
1719		he_vcc->pdu_len += heb->len;
1720
1721		if (RBRQ_CON_CLOSED(he_dev->rbrq_head)) {
1722			lastcid = -1;
1723			HPRINTK("wake_up rx_waitq  (cid 0x%x)\n", cid);
1724			wake_up(&he_vcc->rx_waitq);
1725			goto return_host_buffers;
1726		}
1727
1728		if (!RBRQ_END_PDU(he_dev->rbrq_head))
1729			goto next_rbrq_entry;
1730
1731		if (RBRQ_LEN_ERR(he_dev->rbrq_head)
1732				|| RBRQ_CRC_ERR(he_dev->rbrq_head)) {
1733			HPRINTK("%s%s (%d.%d)\n",
1734				RBRQ_CRC_ERR(he_dev->rbrq_head)
1735							? "CRC_ERR " : "",
1736				RBRQ_LEN_ERR(he_dev->rbrq_head)
1737							? "LEN_ERR" : "",
1738							vcc->vpi, vcc->vci);
1739			atomic_inc(&vcc->stats->rx_err);
1740			goto return_host_buffers;
1741		}
1742
1743		skb = atm_alloc_charge(vcc, he_vcc->pdu_len + rx_skb_reserve,
1744							GFP_ATOMIC);
1745		if (!skb) {
1746			HPRINTK("charge failed (%d.%d)\n", vcc->vpi, vcc->vci);
1747			goto return_host_buffers;
1748		}
1749
1750		if (rx_skb_reserve > 0)
1751			skb_reserve(skb, rx_skb_reserve);
1752
1753		__net_timestamp(skb);
1754
1755		list_for_each_entry(heb, &he_vcc->buffers, entry)
1756			memcpy(skb_put(skb, heb->len), &heb->data, heb->len);
1757
1758		switch (vcc->qos.aal) {
1759			case ATM_AAL0:
1760				/* 2.10.1.5 raw cell receive */
1761				skb->len = ATM_AAL0_SDU;
1762				skb_set_tail_pointer(skb, skb->len);
1763				break;
1764			case ATM_AAL5:
1765				/* 2.10.1.2 aal5 receive */
1766
1767				skb->len = AAL5_LEN(skb->data, he_vcc->pdu_len);
1768				skb_set_tail_pointer(skb, skb->len);
1769#ifdef USE_CHECKSUM_HW
1770				if (vcc->vpi == 0 && vcc->vci >= ATM_NOT_RSV_VCI) {
1771					skb->ip_summed = CHECKSUM_COMPLETE;
1772					skb->csum = TCP_CKSUM(skb->data,
1773							he_vcc->pdu_len);
1774				}
1775#endif
1776				break;
1777		}
1778
1779#ifdef should_never_happen
1780		if (skb->len > vcc->qos.rxtp.max_sdu)
1781			hprintk("pdu_len (%d) > vcc->qos.rxtp.max_sdu (%d)!  cid 0x%x\n", skb->len, vcc->qos.rxtp.max_sdu, cid);
1782#endif
1783
1784#ifdef notdef
1785		ATM_SKB(skb)->vcc = vcc;
1786#endif
1787		spin_unlock(&he_dev->global_lock);
1788		vcc->push(vcc, skb);
1789		spin_lock(&he_dev->global_lock);
1790
1791		atomic_inc(&vcc->stats->rx);
1792
1793return_host_buffers:
1794		++pdus_assembled;
1795
1796		list_for_each_entry_safe(heb, next, &he_vcc->buffers, entry)
1797			pci_pool_free(he_dev->rbpl_pool, heb, heb->mapping);
1798		INIT_LIST_HEAD(&he_vcc->buffers);
1799		he_vcc->pdu_len = 0;
1800
1801next_rbrq_entry:
1802		he_dev->rbrq_head = (struct he_rbrq *)
1803				((unsigned long) he_dev->rbrq_base |
1804					RBRQ_MASK(++he_dev->rbrq_head));
1805
1806	}
1807	read_unlock(&vcc_sklist_lock);
1808
1809	if (updated) {
1810		if (updated > he_dev->rbrq_peak)
1811			he_dev->rbrq_peak = updated;
1812
1813		he_writel(he_dev, RBRQ_MASK(he_dev->rbrq_head),
1814						G0_RBRQ_H + (group * 16));
1815	}
1816
1817	return pdus_assembled;
1818}
1819
1820static void
1821he_service_tbrq(struct he_dev *he_dev, int group)
1822{
1823	struct he_tbrq *tbrq_tail = (struct he_tbrq *)
1824				((unsigned long)he_dev->tbrq_base |
1825					he_dev->hsp->group[group].tbrq_tail);
1826	struct he_tpd *tpd;
1827	int slot, updated = 0;
1828	struct he_tpd *__tpd;
1829
1830	/* 2.1.6 transmit buffer return queue */
1831
1832	while (he_dev->tbrq_head != tbrq_tail) {
1833		++updated;
1834
1835		HPRINTK("tbrq%d 0x%x%s%s\n",
1836			group,
1837			TBRQ_TPD(he_dev->tbrq_head),
1838			TBRQ_EOS(he_dev->tbrq_head) ? " EOS" : "",
1839			TBRQ_MULTIPLE(he_dev->tbrq_head) ? " MULTIPLE" : "");
1840		tpd = NULL;
1841		list_for_each_entry(__tpd, &he_dev->outstanding_tpds, entry) {
1842			if (TPD_ADDR(__tpd->status) == TBRQ_TPD(he_dev->tbrq_head)) {
1843				tpd = __tpd;
1844				list_del(&__tpd->entry);
1845				break;
1846			}
1847		}
1848
1849		if (tpd == NULL) {
1850			hprintk("unable to locate tpd for dma buffer %x\n",
1851						TBRQ_TPD(he_dev->tbrq_head));
1852			goto next_tbrq_entry;
1853		}
1854
1855		if (TBRQ_EOS(he_dev->tbrq_head)) {
1856			HPRINTK("wake_up(tx_waitq) cid 0x%x\n",
1857				he_mkcid(he_dev, tpd->vcc->vpi, tpd->vcc->vci));
1858			if (tpd->vcc)
1859				wake_up(&HE_VCC(tpd->vcc)->tx_waitq);
1860
1861			goto next_tbrq_entry;
1862		}
1863
1864		for (slot = 0; slot < TPD_MAXIOV; ++slot) {
1865			if (tpd->iovec[slot].addr)
1866				pci_unmap_single(he_dev->pci_dev,
1867					tpd->iovec[slot].addr,
1868					tpd->iovec[slot].len & TPD_LEN_MASK,
1869							PCI_DMA_TODEVICE);
1870			if (tpd->iovec[slot].len & TPD_LST)
1871				break;
1872
1873		}
1874
1875		if (tpd->skb) {	/* && !TBRQ_MULTIPLE(he_dev->tbrq_head) */
1876			if (tpd->vcc && tpd->vcc->pop)
1877				tpd->vcc->pop(tpd->vcc, tpd->skb);
1878			else
1879				dev_kfree_skb_any(tpd->skb);
1880		}
1881
1882next_tbrq_entry:
1883		if (tpd)
1884			pci_pool_free(he_dev->tpd_pool, tpd, TPD_ADDR(tpd->status));
1885		he_dev->tbrq_head = (struct he_tbrq *)
1886				((unsigned long) he_dev->tbrq_base |
1887					TBRQ_MASK(++he_dev->tbrq_head));
1888	}
1889
1890	if (updated) {
1891		if (updated > he_dev->tbrq_peak)
1892			he_dev->tbrq_peak = updated;
1893
1894		he_writel(he_dev, TBRQ_MASK(he_dev->tbrq_head),
1895						G0_TBRQ_H + (group * 16));
1896	}
1897}
1898
1899static void
1900he_service_rbpl(struct he_dev *he_dev, int group)
1901{
1902	struct he_rbp *new_tail;
1903	struct he_rbp *rbpl_head;
1904	struct he_buff *heb;
1905	dma_addr_t mapping;
1906	int i;
1907	int moved = 0;
1908
1909	rbpl_head = (struct he_rbp *) ((unsigned long)he_dev->rbpl_base |
1910					RBPL_MASK(he_readl(he_dev, G0_RBPL_S)));
1911
1912	for (;;) {
1913		new_tail = (struct he_rbp *) ((unsigned long)he_dev->rbpl_base |
1914						RBPL_MASK(he_dev->rbpl_tail+1));
1915
1916		/* table 3.42 -- rbpl_tail should never be set to rbpl_head */
1917		if (new_tail == rbpl_head)
1918			break;
1919
1920		i = find_next_zero_bit(he_dev->rbpl_table, RBPL_TABLE_SIZE, he_dev->rbpl_hint);
1921		if (i > (RBPL_TABLE_SIZE - 1)) {
1922			i = find_first_zero_bit(he_dev->rbpl_table, RBPL_TABLE_SIZE);
1923			if (i > (RBPL_TABLE_SIZE - 1))
1924				break;
1925		}
1926		he_dev->rbpl_hint = i + 1;
1927
1928		heb = pci_pool_alloc(he_dev->rbpl_pool, GFP_ATOMIC|GFP_DMA, &mapping);
1929		if (!heb)
1930			break;
1931		heb->mapping = mapping;
1932		list_add(&heb->entry, &he_dev->rbpl_outstanding);
1933		he_dev->rbpl_virt[i] = heb;
1934		set_bit(i, he_dev->rbpl_table);
1935		new_tail->idx = i << RBP_IDX_OFFSET;
1936		new_tail->phys = mapping + offsetof(struct he_buff, data);
1937
1938		he_dev->rbpl_tail = new_tail;
1939		++moved;
1940	}
1941
1942	if (moved)
1943		he_writel(he_dev, RBPL_MASK(he_dev->rbpl_tail), G0_RBPL_T);
1944}
1945
1946static void
1947he_tasklet(unsigned long data)
1948{
1949	unsigned long flags;
1950	struct he_dev *he_dev = (struct he_dev *) data;
1951	int group, type;
1952	int updated = 0;
1953
1954	HPRINTK("tasklet (0x%lx)\n", data);
1955	spin_lock_irqsave(&he_dev->global_lock, flags);
1956
1957	while (he_dev->irq_head != he_dev->irq_tail) {
1958		++updated;
1959
1960		type = ITYPE_TYPE(he_dev->irq_head->isw);
1961		group = ITYPE_GROUP(he_dev->irq_head->isw);
1962
1963		switch (type) {
1964			case ITYPE_RBRQ_THRESH:
1965				HPRINTK("rbrq%d threshold\n", group);
1966				/* fall through */
1967			case ITYPE_RBRQ_TIMER:
1968				if (he_service_rbrq(he_dev, group))
1969					he_service_rbpl(he_dev, group);
1970				break;
1971			case ITYPE_TBRQ_THRESH:
1972				HPRINTK("tbrq%d threshold\n", group);
1973				/* fall through */
1974			case ITYPE_TPD_COMPLETE:
1975				he_service_tbrq(he_dev, group);
1976				break;
1977			case ITYPE_RBPL_THRESH:
1978				he_service_rbpl(he_dev, group);
1979				break;
1980			case ITYPE_RBPS_THRESH:
1981				/* shouldn't happen unless small buffers enabled */
1982				break;
1983			case ITYPE_PHY:
1984				HPRINTK("phy interrupt\n");
1985#ifdef CONFIG_ATM_HE_USE_SUNI
1986				spin_unlock_irqrestore(&he_dev->global_lock, flags);
1987				if (he_dev->atm_dev->phy && he_dev->atm_dev->phy->interrupt)
1988					he_dev->atm_dev->phy->interrupt(he_dev->atm_dev);
1989				spin_lock_irqsave(&he_dev->global_lock, flags);
1990#endif
1991				break;
1992			case ITYPE_OTHER:
1993				switch (type|group) {
1994					case ITYPE_PARITY:
1995						hprintk("parity error\n");
1996						break;
1997					case ITYPE_ABORT:
1998						hprintk("abort 0x%x\n", he_readl(he_dev, ABORT_ADDR));
1999						break;
2000				}
2001				break;
2002			case ITYPE_TYPE(ITYPE_INVALID):
2003				/* see 8.1.1 -- check all queues */
2004
2005				HPRINTK("isw not updated 0x%x\n", he_dev->irq_head->isw);
2006
2007				he_service_rbrq(he_dev, 0);
2008				he_service_rbpl(he_dev, 0);
2009				he_service_tbrq(he_dev, 0);
2010				break;
2011			default:
2012				hprintk("bad isw 0x%x?\n", he_dev->irq_head->isw);
2013		}
2014
2015		he_dev->irq_head->isw = ITYPE_INVALID;
2016
2017		he_dev->irq_head = (struct he_irq *) NEXT_ENTRY(he_dev->irq_base, he_dev->irq_head, IRQ_MASK);
2018	}
2019
2020	if (updated) {
2021		if (updated > he_dev->irq_peak)
2022			he_dev->irq_peak = updated;
2023
2024		he_writel(he_dev,
2025			IRQ_SIZE(CONFIG_IRQ_SIZE) |
2026			IRQ_THRESH(CONFIG_IRQ_THRESH) |
2027			IRQ_TAIL(he_dev->irq_tail), IRQ0_HEAD);
2028		(void) he_readl(he_dev, INT_FIFO); /* 8.1.2 controller errata; flush posted writes */
2029	}
2030	spin_unlock_irqrestore(&he_dev->global_lock, flags);
2031}
2032
2033static irqreturn_t
2034he_irq_handler(int irq, void *dev_id)
2035{
2036	unsigned long flags;
2037	struct he_dev *he_dev = (struct he_dev * )dev_id;
2038	int handled = 0;
2039
2040	if (he_dev == NULL)
2041		return IRQ_NONE;
2042
2043	spin_lock_irqsave(&he_dev->global_lock, flags);
2044
2045	he_dev->irq_tail = (struct he_irq *) (((unsigned long)he_dev->irq_base) |
2046						(*he_dev->irq_tailoffset << 2));
2047
2048	if (he_dev->irq_tail == he_dev->irq_head) {
2049		HPRINTK("tailoffset not updated?\n");
2050		he_dev->irq_tail = (struct he_irq *) ((unsigned long)he_dev->irq_base |
2051			((he_readl(he_dev, IRQ0_BASE) & IRQ_MASK) << 2));
2052		(void) he_readl(he_dev, INT_FIFO);	/* 8.1.2 controller errata */
2053	}
2054
2055#ifdef DEBUG
2056	if (he_dev->irq_head == he_dev->irq_tail /* && !IRQ_PENDING */)
2057		hprintk("spurious (or shared) interrupt?\n");
2058#endif
2059
2060	if (he_dev->irq_head != he_dev->irq_tail) {
2061		handled = 1;
2062		tasklet_schedule(&he_dev->tasklet);
2063		he_writel(he_dev, INT_CLEAR_A, INT_FIFO);	/* clear interrupt */
2064		(void) he_readl(he_dev, INT_FIFO);		/* flush posted writes */
2065	}
2066	spin_unlock_irqrestore(&he_dev->global_lock, flags);
2067	return IRQ_RETVAL(handled);
2068
2069}
2070
2071static __inline__ void
2072__enqueue_tpd(struct he_dev *he_dev, struct he_tpd *tpd, unsigned cid)
2073{
2074	struct he_tpdrq *new_tail;
2075
2076	HPRINTK("tpdrq %p cid 0x%x -> tpdrq_tail %p\n",
2077					tpd, cid, he_dev->tpdrq_tail);
2078
2079	/* new_tail = he_dev->tpdrq_tail; */
2080	new_tail = (struct he_tpdrq *) ((unsigned long) he_dev->tpdrq_base |
2081					TPDRQ_MASK(he_dev->tpdrq_tail+1));
2082
2083	/*
2084	 * check to see if we are about to set the tail == head
2085	 * if true, update the head pointer from the adapter
2086	 * to see if this is really the case (reading the queue
2087	 * head for every enqueue would be unnecessarily slow)
2088	 */
2089
2090	if (new_tail == he_dev->tpdrq_head) {
2091		he_dev->tpdrq_head = (struct he_tpdrq *)
2092			(((unsigned long)he_dev->tpdrq_base) |
2093				TPDRQ_MASK(he_readl(he_dev, TPDRQ_B_H)));
2094
2095		if (new_tail == he_dev->tpdrq_head) {
2096			int slot;
2097
2098			hprintk("tpdrq full (cid 0x%x)\n", cid);
2099			for (slot = 0; slot < TPD_MAXIOV; ++slot) {
2100				if (tpd->iovec[slot].addr)
2101					pci_unmap_single(he_dev->pci_dev,
2102						tpd->iovec[slot].addr,
2103						tpd->iovec[slot].len & TPD_LEN_MASK,
2104								PCI_DMA_TODEVICE);
2105			}
2106			if (tpd->skb) {
2107				if (tpd->vcc->pop)
2108					tpd->vcc->pop(tpd->vcc, tpd->skb);
2109				else
2110					dev_kfree_skb_any(tpd->skb);
2111				atomic_inc(&tpd->vcc->stats->tx_err);
2112			}
2113			pci_pool_free(he_dev->tpd_pool, tpd, TPD_ADDR(tpd->status));
2114			return;
2115		}
2116	}
2117
2118	/* 2.1.5 transmit packet descriptor ready queue */
2119	list_add_tail(&tpd->entry, &he_dev->outstanding_tpds);
2120	he_dev->tpdrq_tail->tpd = TPD_ADDR(tpd->status);
2121	he_dev->tpdrq_tail->cid = cid;
2122	wmb();
2123
2124	he_dev->tpdrq_tail = new_tail;
2125
2126	he_writel(he_dev, TPDRQ_MASK(he_dev->tpdrq_tail), TPDRQ_T);
2127	(void) he_readl(he_dev, TPDRQ_T);		/* flush posted writes */
2128}
2129
2130static int
2131he_open(struct atm_vcc *vcc)
2132{
2133	unsigned long flags;
2134	struct he_dev *he_dev = HE_DEV(vcc->dev);
2135	struct he_vcc *he_vcc;
2136	int err = 0;
2137	unsigned cid, rsr0, rsr1, rsr4, tsr0, tsr0_aal, tsr4, period, reg, clock;
2138	short vpi = vcc->vpi;
2139	int vci = vcc->vci;
2140
2141	if (vci == ATM_VCI_UNSPEC || vpi == ATM_VPI_UNSPEC)
2142		return 0;
2143
2144	HPRINTK("open vcc %p %d.%d\n", vcc, vpi, vci);
2145
2146	set_bit(ATM_VF_ADDR, &vcc->flags);
2147
2148	cid = he_mkcid(he_dev, vpi, vci);
2149
2150	he_vcc = kmalloc(sizeof(struct he_vcc), GFP_ATOMIC);
2151	if (he_vcc == NULL) {
2152		hprintk("unable to allocate he_vcc during open\n");
2153		return -ENOMEM;
2154	}
2155
2156	INIT_LIST_HEAD(&he_vcc->buffers);
2157	he_vcc->pdu_len = 0;
2158	he_vcc->rc_index = -1;
2159
2160	init_waitqueue_head(&he_vcc->rx_waitq);
2161	init_waitqueue_head(&he_vcc->tx_waitq);
2162
2163	vcc->dev_data = he_vcc;
2164
2165	if (vcc->qos.txtp.traffic_class != ATM_NONE) {
2166		int pcr_goal;
2167
2168		pcr_goal = atm_pcr_goal(&vcc->qos.txtp);
2169		if (pcr_goal == 0)
2170			pcr_goal = he_dev->atm_dev->link_rate;
2171		if (pcr_goal < 0)	/* means round down, technically */
2172			pcr_goal = -pcr_goal;
2173
2174		HPRINTK("open tx cid 0x%x pcr_goal %d\n", cid, pcr_goal);
2175
2176		switch (vcc->qos.aal) {
2177			case ATM_AAL5:
2178				tsr0_aal = TSR0_AAL5;
2179				tsr4 = TSR4_AAL5;
2180				break;
2181			case ATM_AAL0:
2182				tsr0_aal = TSR0_AAL0_SDU;
2183				tsr4 = TSR4_AAL0_SDU;
2184				break;
2185			default:
2186				err = -EINVAL;
2187				goto open_failed;
2188		}
2189
2190		spin_lock_irqsave(&he_dev->global_lock, flags);
2191		tsr0 = he_readl_tsr0(he_dev, cid);
2192		spin_unlock_irqrestore(&he_dev->global_lock, flags);
2193
2194		if (TSR0_CONN_STATE(tsr0) != 0) {
2195			hprintk("cid 0x%x not idle (tsr0 = 0x%x)\n", cid, tsr0);
2196			err = -EBUSY;
2197			goto open_failed;
2198		}
2199
2200		switch (vcc->qos.txtp.traffic_class) {
2201			case ATM_UBR:
2202				/* 2.3.3.1 open connection ubr */
2203
2204				tsr0 = TSR0_UBR | TSR0_GROUP(0) | tsr0_aal |
2205					TSR0_USE_WMIN | TSR0_UPDATE_GER;
2206				break;
2207
2208			case ATM_CBR:
2209				/* 2.3.3.2 open connection cbr */
2210
2211				/* 8.2.3 cbr scheduler wrap problem -- limit to 90% total link rate */
2212				if ((he_dev->total_bw + pcr_goal)
2213					> (he_dev->atm_dev->link_rate * 9 / 10))
2214				{
2215					err = -EBUSY;
2216					goto open_failed;
2217				}
2218
2219				spin_lock_irqsave(&he_dev->global_lock, flags);			/* also protects he_dev->cs_stper[] */
2220
2221				/* find an unused cs_stper register */
2222				for (reg = 0; reg < HE_NUM_CS_STPER; ++reg)
2223					if (he_dev->cs_stper[reg].inuse == 0 ||
2224					    he_dev->cs_stper[reg].pcr == pcr_goal)
2225							break;
2226
2227				if (reg == HE_NUM_CS_STPER) {
2228					err = -EBUSY;
2229					spin_unlock_irqrestore(&he_dev->global_lock, flags);
2230					goto open_failed;
2231				}
2232
2233				he_dev->total_bw += pcr_goal;
2234
2235				he_vcc->rc_index = reg;
2236				++he_dev->cs_stper[reg].inuse;
2237				he_dev->cs_stper[reg].pcr = pcr_goal;
2238
2239				clock = he_is622(he_dev) ? 66667000 : 50000000;
2240				period = clock / pcr_goal;
2241
2242				HPRINTK("rc_index = %d period = %d\n",
2243								reg, period);
2244
2245				he_writel_mbox(he_dev, rate_to_atmf(period/2),
2246							CS_STPER0 + reg);
2247				spin_unlock_irqrestore(&he_dev->global_lock, flags);
2248
2249				tsr0 = TSR0_CBR | TSR0_GROUP(0) | tsr0_aal |
2250							TSR0_RC_INDEX(reg);
2251
2252				break;
2253			default:
2254				err = -EINVAL;
2255				goto open_failed;
2256		}
2257
2258		spin_lock_irqsave(&he_dev->global_lock, flags);
2259
2260		he_writel_tsr0(he_dev, tsr0, cid);
2261		he_writel_tsr4(he_dev, tsr4 | 1, cid);
2262		he_writel_tsr1(he_dev, TSR1_MCR(rate_to_atmf(0)) |
2263					TSR1_PCR(rate_to_atmf(pcr_goal)), cid);
2264		he_writel_tsr2(he_dev, TSR2_ACR(rate_to_atmf(pcr_goal)), cid);
2265		he_writel_tsr9(he_dev, TSR9_OPEN_CONN, cid);
2266
2267		he_writel_tsr3(he_dev, 0x0, cid);
2268		he_writel_tsr5(he_dev, 0x0, cid);
2269		he_writel_tsr6(he_dev, 0x0, cid);
2270		he_writel_tsr7(he_dev, 0x0, cid);
2271		he_writel_tsr8(he_dev, 0x0, cid);
2272		he_writel_tsr10(he_dev, 0x0, cid);
2273		he_writel_tsr11(he_dev, 0x0, cid);
2274		he_writel_tsr12(he_dev, 0x0, cid);
2275		he_writel_tsr13(he_dev, 0x0, cid);
2276		he_writel_tsr14(he_dev, 0x0, cid);
2277		(void) he_readl_tsr0(he_dev, cid);		/* flush posted writes */
2278		spin_unlock_irqrestore(&he_dev->global_lock, flags);
2279	}
2280
2281	if (vcc->qos.rxtp.traffic_class != ATM_NONE) {
2282		unsigned aal;
2283
2284		HPRINTK("open rx cid 0x%x (rx_waitq %p)\n", cid,
2285		 				&HE_VCC(vcc)->rx_waitq);
2286
2287		switch (vcc->qos.aal) {
2288			case ATM_AAL5:
2289				aal = RSR0_AAL5;
2290				break;
2291			case ATM_AAL0:
2292				aal = RSR0_RAWCELL;
2293				break;
2294			default:
2295				err = -EINVAL;
2296				goto open_failed;
2297		}
2298
2299		spin_lock_irqsave(&he_dev->global_lock, flags);
2300
2301		rsr0 = he_readl_rsr0(he_dev, cid);
2302		if (rsr0 & RSR0_OPEN_CONN) {
2303			spin_unlock_irqrestore(&he_dev->global_lock, flags);
2304
2305			hprintk("cid 0x%x not idle (rsr0 = 0x%x)\n", cid, rsr0);
2306			err = -EBUSY;
2307			goto open_failed;
2308		}
2309
2310		rsr1 = RSR1_GROUP(0) | RSR1_RBPL_ONLY;
2311		rsr4 = RSR4_GROUP(0) | RSR4_RBPL_ONLY;
2312		rsr0 = vcc->qos.rxtp.traffic_class == ATM_UBR ?
2313				(RSR0_EPD_ENABLE|RSR0_PPD_ENABLE) : 0;
2314
2315#ifdef USE_CHECKSUM_HW
2316		if (vpi == 0 && vci >= ATM_NOT_RSV_VCI)
2317			rsr0 |= RSR0_TCP_CKSUM;
2318#endif
2319
2320		he_writel_rsr4(he_dev, rsr4, cid);
2321		he_writel_rsr1(he_dev, rsr1, cid);
2322		/* 5.1.11 last parameter initialized should be
2323			  the open/closed indication in rsr0 */
2324		he_writel_rsr0(he_dev,
2325			rsr0 | RSR0_START_PDU | RSR0_OPEN_CONN | aal, cid);
2326		(void) he_readl_rsr0(he_dev, cid);		/* flush posted writes */
2327
2328		spin_unlock_irqrestore(&he_dev->global_lock, flags);
2329	}
2330
2331open_failed:
2332
2333	if (err) {
2334		kfree(he_vcc);
2335		clear_bit(ATM_VF_ADDR, &vcc->flags);
2336	}
2337	else
2338		set_bit(ATM_VF_READY, &vcc->flags);
2339
2340	return err;
2341}
2342
2343static void
2344he_close(struct atm_vcc *vcc)
2345{
2346	unsigned long flags;
2347	DECLARE_WAITQUEUE(wait, current);
2348	struct he_dev *he_dev = HE_DEV(vcc->dev);
2349	struct he_tpd *tpd;
2350	unsigned cid;
2351	struct he_vcc *he_vcc = HE_VCC(vcc);
2352#define MAX_RETRY 30
2353	int retry = 0, sleep = 1, tx_inuse;
2354
2355	HPRINTK("close vcc %p %d.%d\n", vcc, vcc->vpi, vcc->vci);
2356
2357	clear_bit(ATM_VF_READY, &vcc->flags);
2358	cid = he_mkcid(he_dev, vcc->vpi, vcc->vci);
2359
2360	if (vcc->qos.rxtp.traffic_class != ATM_NONE) {
2361		int timeout;
2362
2363		HPRINTK("close rx cid 0x%x\n", cid);
2364
2365		/* 2.7.2.2 close receive operation */
2366
2367		/* wait for previous close (if any) to finish */
2368
2369		spin_lock_irqsave(&he_dev->global_lock, flags);
2370		while (he_readl(he_dev, RCC_STAT) & RCC_BUSY) {
2371			HPRINTK("close cid 0x%x RCC_BUSY\n", cid);
2372			udelay(250);
2373		}
2374
2375		set_current_state(TASK_UNINTERRUPTIBLE);
2376		add_wait_queue(&he_vcc->rx_waitq, &wait);
2377
2378		he_writel_rsr0(he_dev, RSR0_CLOSE_CONN, cid);
2379		(void) he_readl_rsr0(he_dev, cid);		/* flush posted writes */
2380		he_writel_mbox(he_dev, cid, RXCON_CLOSE);
2381		spin_unlock_irqrestore(&he_dev->global_lock, flags);
2382
2383		timeout = schedule_timeout(30*HZ);
2384
2385		remove_wait_queue(&he_vcc->rx_waitq, &wait);
2386		set_current_state(TASK_RUNNING);
2387
2388		if (timeout == 0)
2389			hprintk("close rx timeout cid 0x%x\n", cid);
2390
2391		HPRINTK("close rx cid 0x%x complete\n", cid);
2392
2393	}
2394
2395	if (vcc->qos.txtp.traffic_class != ATM_NONE) {
2396		volatile unsigned tsr4, tsr0;
2397		int timeout;
2398
2399		HPRINTK("close tx cid 0x%x\n", cid);
2400
2401		/* 2.1.2
2402		 *
2403		 * ... the host must first stop queueing packets to the TPDRQ
2404		 * on the connection to be closed, then wait for all outstanding
2405		 * packets to be transmitted and their buffers returned to the
2406		 * TBRQ. When the last packet on the connection arrives in the
2407		 * TBRQ, the host issues the close command to the adapter.
2408		 */
2409
2410		while (((tx_inuse = atomic_read(&sk_atm(vcc)->sk_wmem_alloc)) > 1) &&
2411		       (retry < MAX_RETRY)) {
2412			msleep(sleep);
2413			if (sleep < 250)
2414				sleep = sleep * 2;
2415
2416			++retry;
2417		}
2418
2419		if (tx_inuse > 1)
2420			hprintk("close tx cid 0x%x tx_inuse = %d\n", cid, tx_inuse);
2421
2422		/* 2.3.1.1 generic close operations with flush */
2423
2424		spin_lock_irqsave(&he_dev->global_lock, flags);
2425		he_writel_tsr4_upper(he_dev, TSR4_FLUSH_CONN, cid);
2426					/* also clears TSR4_SESSION_ENDED */
2427
2428		switch (vcc->qos.txtp.traffic_class) {
2429			case ATM_UBR:
2430				he_writel_tsr1(he_dev,
2431					TSR1_MCR(rate_to_atmf(200000))
2432					| TSR1_PCR(0), cid);
2433				break;
2434			case ATM_CBR:
2435				he_writel_tsr14_upper(he_dev, TSR14_DELETE, cid);
2436				break;
2437		}
2438		(void) he_readl_tsr4(he_dev, cid);		/* flush posted writes */
2439
2440		tpd = __alloc_tpd(he_dev);
2441		if (tpd == NULL) {
2442			hprintk("close tx he_alloc_tpd failed cid 0x%x\n", cid);
2443			goto close_tx_incomplete;
2444		}
2445		tpd->status |= TPD_EOS | TPD_INT;
2446		tpd->skb = NULL;
2447		tpd->vcc = vcc;
2448		wmb();
2449
2450		set_current_state(TASK_UNINTERRUPTIBLE);
2451		add_wait_queue(&he_vcc->tx_waitq, &wait);
2452		__enqueue_tpd(he_dev, tpd, cid);
2453		spin_unlock_irqrestore(&he_dev->global_lock, flags);
2454
2455		timeout = schedule_timeout(30*HZ);
2456
2457		remove_wait_queue(&he_vcc->tx_waitq, &wait);
2458		set_current_state(TASK_RUNNING);
2459
2460		spin_lock_irqsave(&he_dev->global_lock, flags);
2461
2462		if (timeout == 0) {
2463			hprintk("close tx timeout cid 0x%x\n", cid);
2464			goto close_tx_incomplete;
2465		}
2466
2467		while (!((tsr4 = he_readl_tsr4(he_dev, cid)) & TSR4_SESSION_ENDED)) {
2468			HPRINTK("close tx cid 0x%x !TSR4_SESSION_ENDED (tsr4 = 0x%x)\n", cid, tsr4);
2469			udelay(250);
2470		}
2471
2472		while (TSR0_CONN_STATE(tsr0 = he_readl_tsr0(he_dev, cid)) != 0) {
2473			HPRINTK("close tx cid 0x%x TSR0_CONN_STATE != 0 (tsr0 = 0x%x)\n", cid, tsr0);
2474			udelay(250);
2475		}
2476
2477close_tx_incomplete:
2478
2479		if (vcc->qos.txtp.traffic_class == ATM_CBR) {
2480			int reg = he_vcc->rc_index;
2481
2482			HPRINTK("cs_stper reg = %d\n", reg);
2483
2484			if (he_dev->cs_stper[reg].inuse == 0)
2485				hprintk("cs_stper[%d].inuse = 0!\n", reg);
2486			else
2487				--he_dev->cs_stper[reg].inuse;
2488
2489			he_dev->total_bw -= he_dev->cs_stper[reg].pcr;
2490		}
2491		spin_unlock_irqrestore(&he_dev->global_lock, flags);
2492
2493		HPRINTK("close tx cid 0x%x complete\n", cid);
2494	}
2495
2496	kfree(he_vcc);
2497
2498	clear_bit(ATM_VF_ADDR, &vcc->flags);
2499}
2500
2501static int
2502he_send(struct atm_vcc *vcc, struct sk_buff *skb)
2503{
2504	unsigned long flags;
2505	struct he_dev *he_dev = HE_DEV(vcc->dev);
2506	unsigned cid = he_mkcid(he_dev, vcc->vpi, vcc->vci);
2507	struct he_tpd *tpd;
2508#ifdef USE_SCATTERGATHER
2509	int i, slot = 0;
2510#endif
2511
2512#define HE_TPD_BUFSIZE 0xffff
2513
2514	HPRINTK("send %d.%d\n", vcc->vpi, vcc->vci);
2515
2516	if ((skb->len > HE_TPD_BUFSIZE) ||
2517	    ((vcc->qos.aal == ATM_AAL0) && (skb->len != ATM_AAL0_SDU))) {
2518		hprintk("buffer too large (or small) -- %d bytes\n", skb->len );
2519		if (vcc->pop)
2520			vcc->pop(vcc, skb);
2521		else
2522			dev_kfree_skb_any(skb);
2523		atomic_inc(&vcc->stats->tx_err);
2524		return -EINVAL;
2525	}
2526
2527#ifndef USE_SCATTERGATHER
2528	if (skb_shinfo(skb)->nr_frags) {
2529		hprintk("no scatter/gather support\n");
2530		if (vcc->pop)
2531			vcc->pop(vcc, skb);
2532		else
2533			dev_kfree_skb_any(skb);
2534		atomic_inc(&vcc->stats->tx_err);
2535		return -EINVAL;
2536	}
2537#endif
2538	spin_lock_irqsave(&he_dev->global_lock, flags);
2539
2540	tpd = __alloc_tpd(he_dev);
2541	if (tpd == NULL) {
2542		if (vcc->pop)
2543			vcc->pop(vcc, skb);
2544		else
2545			dev_kfree_skb_any(skb);
2546		atomic_inc(&vcc->stats->tx_err);
2547		spin_unlock_irqrestore(&he_dev->global_lock, flags);
2548		return -ENOMEM;
2549	}
2550
2551	if (vcc->qos.aal == ATM_AAL5)
2552		tpd->status |= TPD_CELLTYPE(TPD_USERCELL);
2553	else {
2554		char *pti_clp = (void *) (skb->data + 3);
2555		int clp, pti;
2556
2557		pti = (*pti_clp & ATM_HDR_PTI_MASK) >> ATM_HDR_PTI_SHIFT;
2558		clp = (*pti_clp & ATM_HDR_CLP);
2559		tpd->status |= TPD_CELLTYPE(pti);
2560		if (clp)
2561			tpd->status |= TPD_CLP;
2562
2563		skb_pull(skb, ATM_AAL0_SDU - ATM_CELL_PAYLOAD);
2564	}
2565
2566#ifdef USE_SCATTERGATHER
2567	tpd->iovec[slot].addr = pci_map_single(he_dev->pci_dev, skb->data,
2568				skb_headlen(skb), PCI_DMA_TODEVICE);
2569	tpd->iovec[slot].len = skb_headlen(skb);
2570	++slot;
2571
2572	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2573		skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2574
2575		if (slot == TPD_MAXIOV) {	/* queue tpd; start new tpd */
2576			tpd->vcc = vcc;
2577			tpd->skb = NULL;	/* not the last fragment
2578						   so dont ->push() yet */
2579			wmb();
2580
2581			__enqueue_tpd(he_dev, tpd, cid);
2582			tpd = __alloc_tpd(he_dev);
2583			if (tpd == NULL) {
2584				if (vcc->pop)
2585					vcc->pop(vcc, skb);
2586				else
2587					dev_kfree_skb_any(skb);
2588				atomic_inc(&vcc->stats->tx_err);
2589				spin_unlock_irqrestore(&he_dev->global_lock, flags);
2590				return -ENOMEM;
2591			}
2592			tpd->status |= TPD_USERCELL;
2593			slot = 0;
2594		}
2595
2596		tpd->iovec[slot].addr = pci_map_single(he_dev->pci_dev,
2597			(void *) page_address(frag->page) + frag->page_offset,
2598				frag->size, PCI_DMA_TODEVICE);
2599		tpd->iovec[slot].len = frag->size;
2600		++slot;
2601
2602	}
2603
2604	tpd->iovec[slot - 1].len |= TPD_LST;
2605#else
2606	tpd->address0 = pci_map_single(he_dev->pci_dev, skb->data, skb->len, PCI_DMA_TODEVICE);
2607	tpd->length0 = skb->len | TPD_LST;
2608#endif
2609	tpd->status |= TPD_INT;
2610
2611	tpd->vcc = vcc;
2612	tpd->skb = skb;
2613	wmb();
2614	ATM_SKB(skb)->vcc = vcc;
2615
2616	__enqueue_tpd(he_dev, tpd, cid);
2617	spin_unlock_irqrestore(&he_dev->global_lock, flags);
2618
2619	atomic_inc(&vcc->stats->tx);
2620
2621	return 0;
2622}
2623
2624static int
2625he_ioctl(struct atm_dev *atm_dev, unsigned int cmd, void __user *arg)
2626{
2627	unsigned long flags;
2628	struct he_dev *he_dev = HE_DEV(atm_dev);
2629	struct he_ioctl_reg reg;
2630	int err = 0;
2631
2632	switch (cmd) {
2633		case HE_GET_REG:
2634			if (!capable(CAP_NET_ADMIN))
2635				return -EPERM;
2636
2637			if (copy_from_user(&reg, arg,
2638					   sizeof(struct he_ioctl_reg)))
2639				return -EFAULT;
2640
2641			spin_lock_irqsave(&he_dev->global_lock, flags);
2642			switch (reg.type) {
2643				case HE_REGTYPE_PCI:
2644					if (reg.addr >= HE_REGMAP_SIZE) {
2645						err = -EINVAL;
2646						break;
2647					}
2648
2649					reg.val = he_readl(he_dev, reg.addr);
2650					break;
2651				case HE_REGTYPE_RCM:
2652					reg.val =
2653						he_readl_rcm(he_dev, reg.addr);
2654					break;
2655				case HE_REGTYPE_TCM:
2656					reg.val =
2657						he_readl_tcm(he_dev, reg.addr);
2658					break;
2659				case HE_REGTYPE_MBOX:
2660					reg.val =
2661						he_readl_mbox(he_dev, reg.addr);
2662					break;
2663				default:
2664					err = -EINVAL;
2665					break;
2666			}
2667			spin_unlock_irqrestore(&he_dev->global_lock, flags);
2668			if (err == 0)
2669				if (copy_to_user(arg, &reg,
2670							sizeof(struct he_ioctl_reg)))
2671					return -EFAULT;
2672			break;
2673		default:
2674#ifdef CONFIG_ATM_HE_USE_SUNI
2675			if (atm_dev->phy && atm_dev->phy->ioctl)
2676				err = atm_dev->phy->ioctl(atm_dev, cmd, arg);
2677#else /* CONFIG_ATM_HE_USE_SUNI */
2678			err = -EINVAL;
2679#endif /* CONFIG_ATM_HE_USE_SUNI */
2680			break;
2681	}
2682
2683	return err;
2684}
2685
2686static void
2687he_phy_put(struct atm_dev *atm_dev, unsigned char val, unsigned long addr)
2688{
2689	unsigned long flags;
2690	struct he_dev *he_dev = HE_DEV(atm_dev);
2691
2692	HPRINTK("phy_put(val 0x%x, addr 0x%lx)\n", val, addr);
2693
2694	spin_lock_irqsave(&he_dev->global_lock, flags);
2695	he_writel(he_dev, val, FRAMER + (addr*4));
2696	(void) he_readl(he_dev, FRAMER + (addr*4));		/* flush posted writes */
2697	spin_unlock_irqrestore(&he_dev->global_lock, flags);
2698}
2699
2700
2701static unsigned char
2702he_phy_get(struct atm_dev *atm_dev, unsigned long addr)
2703{
2704	unsigned long flags;
2705	struct he_dev *he_dev = HE_DEV(atm_dev);
2706	unsigned reg;
2707
2708	spin_lock_irqsave(&he_dev->global_lock, flags);
2709	reg = he_readl(he_dev, FRAMER + (addr*4));
2710	spin_unlock_irqrestore(&he_dev->global_lock, flags);
2711
2712	HPRINTK("phy_get(addr 0x%lx) =0x%x\n", addr, reg);
2713	return reg;
2714}
2715
2716static int
2717he_proc_read(struct atm_dev *dev, loff_t *pos, char *page)
2718{
2719	unsigned long flags;
2720	struct he_dev *he_dev = HE_DEV(dev);
2721	int left, i;
2722#ifdef notdef
2723	struct he_rbrq *rbrq_tail;
2724	struct he_tpdrq *tpdrq_head;
2725	int rbpl_head, rbpl_tail;
2726#endif
2727	static long mcc = 0, oec = 0, dcc = 0, cec = 0;
2728
2729
2730	left = *pos;
2731	if (!left--)
2732		return sprintf(page, "ATM he driver\n");
2733
2734	if (!left--)
2735		return sprintf(page, "%s%s\n\n",
2736			he_dev->prod_id, he_dev->media & 0x40 ? "SM" : "MM");
2737
2738	if (!left--)
2739		return sprintf(page, "Mismatched Cells  VPI/VCI Not Open  Dropped Cells  RCM Dropped Cells\n");
2740
2741	spin_lock_irqsave(&he_dev->global_lock, flags);
2742	mcc += he_readl(he_dev, MCC);
2743	oec += he_readl(he_dev, OEC);
2744	dcc += he_readl(he_dev, DCC);
2745	cec += he_readl(he_dev, CEC);
2746	spin_unlock_irqrestore(&he_dev->global_lock, flags);
2747
2748	if (!left--)
2749		return sprintf(page, "%16ld  %16ld  %13ld  %17ld\n\n",
2750							mcc, oec, dcc, cec);
2751
2752	if (!left--)
2753		return sprintf(page, "irq_size = %d  inuse = ?  peak = %d\n",
2754				CONFIG_IRQ_SIZE, he_dev->irq_peak);
2755
2756	if (!left--)
2757		return sprintf(page, "tpdrq_size = %d  inuse = ?\n",
2758						CONFIG_TPDRQ_SIZE);
2759
2760	if (!left--)
2761		return sprintf(page, "rbrq_size = %d  inuse = ?  peak = %d\n",
2762				CONFIG_RBRQ_SIZE, he_dev->rbrq_peak);
2763
2764	if (!left--)
2765		return sprintf(page, "tbrq_size = %d  peak = %d\n",
2766					CONFIG_TBRQ_SIZE, he_dev->tbrq_peak);
2767
2768
2769#ifdef notdef
2770	rbpl_head = RBPL_MASK(he_readl(he_dev, G0_RBPL_S));
2771	rbpl_tail = RBPL_MASK(he_readl(he_dev, G0_RBPL_T));
2772
2773	inuse = rbpl_head - rbpl_tail;
2774	if (inuse < 0)
2775		inuse += CONFIG_RBPL_SIZE * sizeof(struct he_rbp);
2776	inuse /= sizeof(struct he_rbp);
2777
2778	if (!left--)
2779		return sprintf(page, "rbpl_size = %d  inuse = %d\n\n",
2780						CONFIG_RBPL_SIZE, inuse);
2781#endif
2782
2783	if (!left--)
2784		return sprintf(page, "rate controller periods (cbr)\n                 pcr  #vc\n");
2785
2786	for (i = 0; i < HE_NUM_CS_STPER; ++i)
2787		if (!left--)
2788			return sprintf(page, "cs_stper%-2d  %8ld  %3d\n", i,
2789						he_dev->cs_stper[i].pcr,
2790						he_dev->cs_stper[i].inuse);
2791
2792	if (!left--)
2793		return sprintf(page, "total bw (cbr): %d  (limit %d)\n",
2794			he_dev->total_bw, he_dev->atm_dev->link_rate * 10 / 9);
2795
2796	return 0;
2797}
2798
2799/* eeprom routines  -- see 4.7 */
2800
2801static u8 read_prom_byte(struct he_dev *he_dev, int addr)
2802{
2803	u32 val = 0, tmp_read = 0;
2804	int i, j = 0;
2805	u8 byte_read = 0;
2806
2807	val = readl(he_dev->membase + HOST_CNTL);
2808	val &= 0xFFFFE0FF;
2809
2810	/* Turn on write enable */
2811	val |= 0x800;
2812	he_writel(he_dev, val, HOST_CNTL);
2813
2814	/* Send READ instruction */
2815	for (i = 0; i < ARRAY_SIZE(readtab); i++) {
2816		he_writel(he_dev, val | readtab[i], HOST_CNTL);
2817		udelay(EEPROM_DELAY);
2818	}
2819
2820	/* Next, we need to send the byte address to read from */
2821	for (i = 7; i >= 0; i--) {
2822		he_writel(he_dev, val | clocktab[j++] | (((addr >> i) & 1) << 9), HOST_CNTL);
2823		udelay(EEPROM_DELAY);
2824		he_writel(he_dev, val | clocktab[j++] | (((addr >> i) & 1) << 9), HOST_CNTL);
2825		udelay(EEPROM_DELAY);
2826	}
2827
2828	j = 0;
2829
2830	val &= 0xFFFFF7FF;      /* Turn off write enable */
2831	he_writel(he_dev, val, HOST_CNTL);
2832
2833	/* Now, we can read data from the EEPROM by clocking it in */
2834	for (i = 7; i >= 0; i--) {
2835		he_writel(he_dev, val | clocktab[j++], HOST_CNTL);
2836		udelay(EEPROM_DELAY);
2837		tmp_read = he_readl(he_dev, HOST_CNTL);
2838		byte_read |= (unsigned char)
2839			   ((tmp_read & ID_DOUT) >> ID_DOFFSET << i);
2840		he_writel(he_dev, val | clocktab[j++], HOST_CNTL);
2841		udelay(EEPROM_DELAY);
2842	}
2843
2844	he_writel(he_dev, val | ID_CS, HOST_CNTL);
2845	udelay(EEPROM_DELAY);
2846
2847	return byte_read;
2848}
2849
2850MODULE_LICENSE("GPL");
2851MODULE_AUTHOR("chas williams <chas@cmf.nrl.navy.mil>");
2852MODULE_DESCRIPTION("ForeRunnerHE ATM Adapter driver");
2853module_param(disable64, bool, 0);
2854MODULE_PARM_DESC(disable64, "disable 64-bit pci bus transfers");
2855module_param(nvpibits, short, 0);
2856MODULE_PARM_DESC(nvpibits, "numbers of bits for vpi (default 0)");
2857module_param(nvcibits, short, 0);
2858MODULE_PARM_DESC(nvcibits, "numbers of bits for vci (default 12)");
2859module_param(rx_skb_reserve, short, 0);
2860MODULE_PARM_DESC(rx_skb_reserve, "padding for receive skb (default 16)");
2861module_param(irq_coalesce, bool, 0);
2862MODULE_PARM_DESC(irq_coalesce, "use interrupt coalescing (default 1)");
2863module_param(sdh, bool, 0);
2864MODULE_PARM_DESC(sdh, "use SDH framing (default 0)");
2865
2866static struct pci_device_id he_pci_tbl[] = {
2867	{ PCI_VDEVICE(FORE, PCI_DEVICE_ID_FORE_HE), 0 },
2868	{ 0, }
2869};
2870
2871MODULE_DEVICE_TABLE(pci, he_pci_tbl);
2872
2873static struct pci_driver he_driver = {
2874	.name =		"he",
2875	.probe =	he_init_one,
2876	.remove =	__devexit_p(he_remove_one),
2877	.id_table =	he_pci_tbl,
2878};
2879
2880static int __init he_init(void)
2881{
2882	return pci_register_driver(&he_driver);
2883}
2884
2885static void __exit he_cleanup(void)
2886{
2887	pci_unregister_driver(&he_driver);
2888}
2889
2890module_init(he_init);
2891module_exit(he_cleanup);
2892