if_hatm.c revision 117687
1/*
2 * Copyright (c) 2001-2003
3 *	Fraunhofer Institute for Open Communication Systems (FhG Fokus).
4 * 	All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 *    notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 *    notice, this list of conditions and the following disclaimer in the
13 *    documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25 * SUCH DAMAGE.
26 *
27 * Author: Hartmut Brandt <harti@freebsd.org>
28 *
29 * ForeHE driver.
30 *
31 * This file contains the module and driver infrastructure stuff as well
32 * as a couple of utility functions and the entire initialisation.
33 */
34
35#include <sys/cdefs.h>
36__FBSDID("$FreeBSD: head/sys/dev/hatm/if_hatm.c 117687 2003-07-17 13:43:16Z harti $");
37
38#include "opt_inet.h"
39#include "opt_natm.h"
40
41#include <sys/types.h>
42#include <sys/param.h>
43#include <sys/systm.h>
44#include <sys/malloc.h>
45#include <sys/kernel.h>
46#include <sys/bus.h>
47#include <sys/errno.h>
48#include <sys/conf.h>
49#include <sys/module.h>
50#include <sys/queue.h>
51#include <sys/syslog.h>
52#include <sys/lock.h>
53#include <sys/mutex.h>
54#include <sys/condvar.h>
55#include <sys/sysctl.h>
56#include <vm/uma.h>
57
58#include <sys/sockio.h>
59#include <sys/mbuf.h>
60#include <sys/socket.h>
61
62#include <net/if.h>
63#include <net/if_media.h>
64#include <net/if_atm.h>
65#include <net/route.h>
66#ifdef ENABLE_BPF
67#include <net/bpf.h>
68#endif
69#include <netinet/in.h>
70#include <netinet/if_atm.h>
71
72#include <machine/bus.h>
73#include <machine/resource.h>
74#include <sys/bus.h>
75#include <sys/rman.h>
76#include <pci/pcireg.h>
77#include <pci/pcivar.h>
78
79#include <dev/utopia/utopia.h>
80#include <dev/hatm/if_hatmconf.h>
81#include <dev/hatm/if_hatmreg.h>
82#include <dev/hatm/if_hatmvar.h>
83
84static const struct {
85	uint16_t	vid;
86	uint16_t	did;
87	const char	*name;
88} hatm_devs[] = {
89	{ 0x1127, 0x400,
90	  "FORE HE" },
91	{ 0, 0, NULL }
92};
93
94SYSCTL_DECL(_hw_atm);
95
96MODULE_DEPEND(hatm, utopia, 1, 1, 1);
97MODULE_DEPEND(hatm, pci, 1, 1, 1);
98MODULE_DEPEND(hatm, atm, 1, 1, 1);
99
100#define EEPROM_DELAY	400 /* microseconds */
101
102/* Read from EEPROM 0000 0011b */
103static const uint32_t readtab[] = {
104	HE_REGM_HOST_PROM_SEL | HE_REGM_HOST_PROM_CLOCK,
105	0,
106	HE_REGM_HOST_PROM_CLOCK,
107	0,				/* 0 */
108	HE_REGM_HOST_PROM_CLOCK,
109	0,				/* 0 */
110	HE_REGM_HOST_PROM_CLOCK,
111	0,				/* 0 */
112	HE_REGM_HOST_PROM_CLOCK,
113	0,				/* 0 */
114	HE_REGM_HOST_PROM_CLOCK,
115	0,				/* 0 */
116	HE_REGM_HOST_PROM_CLOCK,
117	HE_REGM_HOST_PROM_DATA_IN,	/* 0 */
118	HE_REGM_HOST_PROM_CLOCK | HE_REGM_HOST_PROM_DATA_IN,
119	HE_REGM_HOST_PROM_DATA_IN,	/* 1 */
120	HE_REGM_HOST_PROM_CLOCK | HE_REGM_HOST_PROM_DATA_IN,
121	HE_REGM_HOST_PROM_DATA_IN,	/* 1 */
122};
123static const uint32_t clocktab[] = {
124	0, HE_REGM_HOST_PROM_CLOCK,
125	0, HE_REGM_HOST_PROM_CLOCK,
126	0, HE_REGM_HOST_PROM_CLOCK,
127	0, HE_REGM_HOST_PROM_CLOCK,
128	0, HE_REGM_HOST_PROM_CLOCK,
129	0, HE_REGM_HOST_PROM_CLOCK,
130	0, HE_REGM_HOST_PROM_CLOCK,
131	0, HE_REGM_HOST_PROM_CLOCK,
132	0
133};
134
135/*
136 * Convert cell rate to ATM Forum format
137 */
138u_int
139hatm_cps2atmf(uint32_t pcr)
140{
141	u_int e;
142
143	if (pcr == 0)
144		return (0);
145	pcr <<= 9;
146	e = 0;
147	while (pcr > (1024 - 1)) {
148		e++;
149		pcr >>= 1;
150	}
151	return ((1 << 14) | (e << 9) | (pcr & 0x1ff));
152}
153u_int
154hatm_atmf2cps(uint32_t fcr)
155{
156	fcr &= 0x7fff;
157
158	return ((1 << ((fcr >> 9) & 0x1f)) * (512 + (fcr & 0x1ff)) / 512
159	  * (fcr >> 14));
160}
161
162/************************************************************
163 *
164 * Initialisation
165 */
166/*
167 * Probe for a HE controller
168 */
169static int
170hatm_probe(device_t dev)
171{
172	int i;
173
174	for (i = 0; hatm_devs[i].name; i++)
175		if (pci_get_vendor(dev) == hatm_devs[i].vid &&
176		    pci_get_device(dev) == hatm_devs[i].did) {
177			device_set_desc(dev, hatm_devs[i].name);
178			return (0);
179		}
180	return (ENXIO);
181}
182
183/*
184 * Allocate and map DMA-able memory. We support only contiguous mappings.
185 */
186static void
187dmaload_helper(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
188{
189	if (error)
190		return;
191	KASSERT(nsegs == 1, ("too many segments for DMA: %d", nsegs));
192	KASSERT(segs[0].ds_addr <= 0xffffffffUL,
193	    ("phys addr too large %lx", (u_long)segs[0].ds_addr));
194
195	*(bus_addr_t *)arg = segs[0].ds_addr;
196}
197static int
198hatm_alloc_dmamem(struct hatm_softc *sc, const char *what, struct dmamem *mem)
199{
200	int error;
201
202	mem->base = NULL;
203
204	/*
205	 * Alignement does not work in the bus_dmamem_alloc function below
206	 * on FreeBSD. malloc seems to align objects at least to the object
207	 * size so increase the size to the alignment if the size is lesser
208	 * than the alignemnt.
209	 * XXX on sparc64 this is (probably) not needed.
210	 */
211	if (mem->size < mem->align)
212		mem->size = mem->align;
213
214	error = bus_dma_tag_create(sc->parent_tag, mem->align, 0,
215	    BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR,
216	    NULL, NULL, mem->size, 1,
217	    BUS_SPACE_MAXSIZE_32BIT, BUS_DMA_ALLOCNOW,
218	    NULL, NULL, &mem->tag);
219	if (error) {
220		if_printf(&sc->ifatm.ifnet, "DMA tag create (%s)\n", what);
221		return (error);
222	}
223
224	error = bus_dmamem_alloc(mem->tag, &mem->base, 0, &mem->map);
225	if (error) {
226		if_printf(&sc->ifatm.ifnet, "DMA mem alloc (%s): %d\n",
227		    what, error);
228		bus_dma_tag_destroy(mem->tag);
229		mem->base = NULL;
230		return (error);
231	}
232
233	error = bus_dmamap_load(mem->tag, mem->map, mem->base, mem->size,
234	    dmaload_helper, &mem->paddr, BUS_DMA_NOWAIT);
235	if (error) {
236		if_printf(&sc->ifatm.ifnet, "DMA map load (%s): %d\n",
237		    what, error);
238		bus_dmamem_free(mem->tag, mem->base, mem->map);
239		bus_dma_tag_destroy(mem->tag);
240		mem->base = NULL;
241		return (error);
242	}
243
244	DBG(sc, DMA, ("%s S/A/V/P 0x%x 0x%x %p 0x%lx", what, mem->size,
245	    mem->align, mem->base, (u_long)mem->paddr));
246
247	return (0);
248}
249
250/*
251 * Destroy all the resources of an DMA-able memory region.
252 */
253static void
254hatm_destroy_dmamem(struct dmamem *mem)
255{
256	if (mem->base != NULL) {
257		bus_dmamap_unload(mem->tag, mem->map);
258		bus_dmamem_free(mem->tag, mem->base, mem->map);
259		(void)bus_dma_tag_destroy(mem->tag);
260		mem->base = NULL;
261	}
262}
263
264/*
265 * Initialize/destroy DMA maps for the large pool 0
266 */
267static void
268hatm_destroy_rmaps(struct hatm_softc *sc)
269{
270	u_int b;
271
272	DBG(sc, ATTACH, ("destroying rmaps and lbuf pointers..."));
273	if (sc->rmaps != NULL) {
274		for (b = 0; b < sc->lbufs_size; b++)
275			bus_dmamap_destroy(sc->mbuf_tag, sc->rmaps[b]);
276		free(sc->rmaps, M_DEVBUF);
277	}
278	if (sc->lbufs != NULL)
279		free(sc->lbufs, M_DEVBUF);
280}
281
282static void
283hatm_init_rmaps(struct hatm_softc *sc)
284{
285	u_int b;
286	int err;
287
288	DBG(sc, ATTACH, ("allocating rmaps and lbuf pointers..."));
289	sc->lbufs = malloc(sizeof(sc->lbufs[0]) * sc->lbufs_size,
290	    M_DEVBUF, M_ZERO | M_WAITOK);
291
292	/* allocate and create the DMA maps for the large pool */
293	sc->rmaps = malloc(sizeof(sc->rmaps[0]) * sc->lbufs_size,
294	    M_DEVBUF, M_WAITOK);
295	for (b = 0; b < sc->lbufs_size; b++) {
296		err = bus_dmamap_create(sc->mbuf_tag, 0, &sc->rmaps[b]);
297		if (err != 0)
298			panic("bus_dmamap_create: %d\n", err);
299	}
300}
301
302/*
303 * Initialize and destroy small mbuf page pointers and pages
304 */
305static void
306hatm_destroy_smbufs(struct hatm_softc *sc)
307{
308	u_int i, b;
309	struct mbuf_page *pg;
310
311	if (sc->mbuf_pages != NULL) {
312		for (i = 0; i < sc->mbuf_npages; i++) {
313			pg = sc->mbuf_pages[i];
314			for (b = 0; b < pg->hdr.nchunks; b++) {
315				if (MBUF_TST_BIT(pg->hdr.card, b))
316					if_printf(&sc->ifatm.ifnet,
317					    "%s -- mbuf page=%u card buf %u\n",
318					    __func__, i, b);
319				if (MBUF_TST_BIT(pg->hdr.used, b))
320					if_printf(&sc->ifatm.ifnet,
321					    "%s -- mbuf page=%u used buf %u\n",
322					    __func__, i, b);
323			}
324			bus_dmamap_unload(sc->mbuf_tag, pg->hdr.map);
325			bus_dmamap_destroy(sc->mbuf_tag, pg->hdr.map);
326			free(pg, M_DEVBUF);
327		}
328		free(sc->mbuf_pages, M_DEVBUF);
329	}
330}
331
332static void
333hatm_init_smbufs(struct hatm_softc *sc)
334{
335	sc->mbuf_pages = malloc(sizeof(sc->mbuf_pages[0]) *
336	    HE_CONFIG_MAX_MBUF_PAGES, M_DEVBUF, M_WAITOK);
337	sc->mbuf_npages = 0;
338}
339
340/*
341 * Initialize/destroy TPDs. This is called from attach/detach.
342 */
343static void
344hatm_destroy_tpds(struct hatm_softc *sc)
345{
346	struct tpd *t;
347
348	if (sc->tpds.base == NULL)
349		return;
350
351	DBG(sc, ATTACH, ("releasing TPDs ..."));
352	if (sc->tpd_nfree != sc->tpd_total)
353		if_printf(&sc->ifatm.ifnet, "%u tpds still in use from %u\n",
354		    sc->tpd_total - sc->tpd_nfree, sc->tpd_total);
355	while ((t = SLIST_FIRST(&sc->tpd_free)) != NULL) {
356		SLIST_REMOVE_HEAD(&sc->tpd_free, link);
357		bus_dmamap_destroy(sc->tx_tag, t->map);
358	}
359	hatm_destroy_dmamem(&sc->tpds);
360	free(sc->tpd_used, M_DEVBUF);
361	DBG(sc, ATTACH, ("... done"));
362}
363static int
364hatm_init_tpds(struct hatm_softc *sc)
365{
366	int error;
367	u_int i;
368	struct tpd *t;
369
370	DBG(sc, ATTACH, ("allocating %u TPDs and maps ...", sc->tpd_total));
371	error = hatm_alloc_dmamem(sc, "TPD memory", &sc->tpds);
372	if (error != 0) {
373		DBG(sc, ATTACH, ("... dmamem error=%d", error));
374		return (error);
375	}
376
377	/* put all the TPDs on the free list and allocate DMA maps */
378	for (i = 0; i < sc->tpd_total; i++) {
379		t = TPD_ADDR(sc, i);
380		t->no = i;
381		t->mbuf = NULL;
382		error = bus_dmamap_create(sc->tx_tag, 0, &t->map);
383		if (error != 0) {
384			DBG(sc, ATTACH, ("... dmamap error=%d", error));
385			while ((t = SLIST_FIRST(&sc->tpd_free)) != NULL) {
386				SLIST_REMOVE_HEAD(&sc->tpd_free, link);
387				bus_dmamap_destroy(sc->tx_tag, t->map);
388			}
389			hatm_destroy_dmamem(&sc->tpds);
390			return (error);
391		}
392
393		SLIST_INSERT_HEAD(&sc->tpd_free, t, link);
394	}
395
396	/* allocate and zero bitmap */
397	sc->tpd_used = malloc(sizeof(uint8_t) * (sc->tpd_total + 7) / 8,
398	    M_DEVBUF, M_ZERO | M_WAITOK);
399	sc->tpd_nfree = sc->tpd_total;
400
401	DBG(sc, ATTACH, ("... done"));
402
403	return (0);
404}
405
406/*
407 * Free all the TPDs that where given to the card.
408 * An mbuf chain may be attached to a TPD - free it also and
409 * unload its associated DMA map.
410 */
411static void
412hatm_stop_tpds(struct hatm_softc *sc)
413{
414	u_int i;
415	struct tpd *t;
416
417	DBG(sc, ATTACH, ("free TPDs ..."));
418	for (i = 0; i < sc->tpd_total; i++) {
419		if (TPD_TST_USED(sc, i)) {
420			t = TPD_ADDR(sc, i);
421			if (t->mbuf) {
422				m_freem(t->mbuf);
423				t->mbuf = NULL;
424				bus_dmamap_unload(sc->tx_tag, t->map);
425			}
426			TPD_CLR_USED(sc, i);
427			SLIST_INSERT_HEAD(&sc->tpd_free, t, link);
428			sc->tpd_nfree++;
429		}
430	}
431}
432
433/*
434 * This frees ALL resources of this interface and leaves the structure
435 * in an indeterminate state. This is called just before detaching or
436 * on a failed attach. No lock should be held.
437 */
438static void
439hatm_destroy(struct hatm_softc *sc)
440{
441	bus_teardown_intr(sc->dev, sc->irqres, sc->ih);
442
443	hatm_destroy_rmaps(sc);
444	hatm_destroy_smbufs(sc);
445	hatm_destroy_tpds(sc);
446
447	if (sc->vcc_zone != NULL)
448		uma_zdestroy(sc->vcc_zone);
449
450	/*
451	 * Release all memory allocated to the various queues and
452	 * Status pages. These have there own flag which shows whether
453	 * they are really allocated.
454	 */
455	hatm_destroy_dmamem(&sc->irq_0.mem);
456	hatm_destroy_dmamem(&sc->rbp_s0.mem);
457	hatm_destroy_dmamem(&sc->rbp_l0.mem);
458	hatm_destroy_dmamem(&sc->rbp_s1.mem);
459	hatm_destroy_dmamem(&sc->rbrq_0.mem);
460	hatm_destroy_dmamem(&sc->rbrq_1.mem);
461	hatm_destroy_dmamem(&sc->tbrq.mem);
462	hatm_destroy_dmamem(&sc->tpdrq.mem);
463	hatm_destroy_dmamem(&sc->hsp_mem);
464
465	if (sc->irqres != NULL)
466		bus_release_resource(sc->dev, SYS_RES_IRQ,
467		    sc->irqid, sc->irqres);
468
469	if (sc->tx_tag != NULL)
470		if (bus_dma_tag_destroy(sc->tx_tag))
471			if_printf(&sc->ifatm.ifnet, "mbuf DMA tag busy\n");
472
473	if (sc->mbuf_tag != NULL)
474		if (bus_dma_tag_destroy(sc->mbuf_tag))
475			if_printf(&sc->ifatm.ifnet, "mbuf DMA tag busy\n");
476
477	if (sc->parent_tag != NULL)
478		if (bus_dma_tag_destroy(sc->parent_tag))
479			if_printf(&sc->ifatm.ifnet, "parent DMA tag busy\n");
480
481	if (sc->memres != NULL)
482		bus_release_resource(sc->dev, SYS_RES_MEMORY,
483		    sc->memid, sc->memres);
484
485	sysctl_ctx_free(&sc->sysctl_ctx);
486
487	cv_destroy(&sc->cv_rcclose);
488	cv_destroy(&sc->vcc_cv);
489	mtx_destroy(&sc->mbuf0_mtx);
490	mtx_destroy(&sc->mbuf1_mtx);
491	mtx_destroy(&sc->mtx);
492}
493
494/*
495 * 4.4 Card reset
496 */
497static int
498hatm_reset(struct hatm_softc *sc)
499{
500	u_int v, count;
501
502	WRITE4(sc, HE_REGO_RESET_CNTL, 0x00);
503	BARRIER_W(sc);
504	WRITE4(sc, HE_REGO_RESET_CNTL, 0xff);
505	BARRIER_RW(sc);
506	count = 0;
507	while (((v = READ4(sc, HE_REGO_RESET_CNTL)) & HE_REGM_RESET_STATE) == 0) {
508		BARRIER_R(sc);
509		if (++count == 100) {
510			if_printf(&sc->ifatm.ifnet, "reset failed\n");
511			return (ENXIO);
512		}
513		DELAY(1000);
514	}
515	return (0);
516}
517
518/*
519 * 4.5 Set Bus Width
520 */
521static void
522hatm_init_bus_width(struct hatm_softc *sc)
523{
524	uint32_t v, v1;
525
526	v = READ4(sc, HE_REGO_HOST_CNTL);
527	BARRIER_R(sc);
528	if (v & HE_REGM_HOST_BUS64) {
529		sc->pci64 = 1;
530		v1 = pci_read_config(sc->dev, HE_PCIR_GEN_CNTL_0, 4);
531		v1 |= HE_PCIM_CTL0_64BIT;
532		pci_write_config(sc->dev, HE_PCIR_GEN_CNTL_0, v1, 4);
533
534		v |= HE_REGM_HOST_DESC_RD64
535		    | HE_REGM_HOST_DATA_RD64
536		    | HE_REGM_HOST_DATA_WR64;
537		WRITE4(sc, HE_REGO_HOST_CNTL, v);
538		BARRIER_W(sc);
539	} else {
540		sc->pci64 = 0;
541		v = pci_read_config(sc->dev, HE_PCIR_GEN_CNTL_0, 4);
542		v &= ~HE_PCIM_CTL0_64BIT;
543		pci_write_config(sc->dev, HE_PCIR_GEN_CNTL_0, v, 4);
544	}
545}
546
547/*
548 * 4.6 Set Host Endianess
549 */
550static void
551hatm_init_endianess(struct hatm_softc *sc)
552{
553	uint32_t v;
554
555	v = READ4(sc, HE_REGO_LB_SWAP);
556	BARRIER_R(sc);
557#if BYTE_ORDER == BIG_ENDIAN
558	v |= HE_REGM_LBSWAP_INTR_SWAP |
559	    HE_REGM_LBSWAP_DESC_WR_SWAP |
560	    HE_REGM_LBSWAP_BIG_ENDIAN;
561	v &= ~(HE_REGM_LBSWAP_DATA_WR_SWAP |
562	    HE_REGM_LBSWAP_DESC_RD_SWAP |
563	    HE_REGM_LBSWAP_DATA_RD_SWAP);
564#else
565	v &= ~(HE_REGM_LBSWAP_DATA_WR_SWAP |
566	    HE_REGM_LBSWAP_DESC_RD_SWAP |
567	    HE_REGM_LBSWAP_DATA_RD_SWAP |
568	    HE_REGM_LBSWAP_INTR_SWAP |
569	    HE_REGM_LBSWAP_DESC_WR_SWAP |
570	    HE_REGM_LBSWAP_BIG_ENDIAN);
571#endif
572
573	if (sc->he622)
574		v |= HE_REGM_LBSWAP_XFER_SIZE;
575
576	WRITE4(sc, HE_REGO_LB_SWAP, v);
577	BARRIER_W(sc);
578}
579
580/*
581 * 4.7 Read EEPROM
582 */
583static uint8_t
584hatm_read_prom_byte(struct hatm_softc *sc, u_int addr)
585{
586	uint32_t val, tmp_read, byte_read;
587	u_int i, j;
588	int n;
589
590	val = READ4(sc, HE_REGO_HOST_CNTL);
591	val &= HE_REGM_HOST_PROM_BITS;
592	BARRIER_R(sc);
593
594	val |= HE_REGM_HOST_PROM_WREN;
595	WRITE4(sc, HE_REGO_HOST_CNTL, val);
596	BARRIER_W(sc);
597
598	/* send READ */
599	for (i = 0; i < sizeof(readtab) / sizeof(readtab[0]); i++) {
600		WRITE4(sc, HE_REGO_HOST_CNTL, val | readtab[i]);
601		BARRIER_W(sc);
602		DELAY(EEPROM_DELAY);
603	}
604
605	/* send ADDRESS */
606	for (n = 7, j = 0; n >= 0; n--) {
607		WRITE4(sc, HE_REGO_HOST_CNTL, val | clocktab[j++] |
608		    (((addr >> n) & 1 ) << HE_REGS_HOST_PROM_DATA_IN));
609		BARRIER_W(sc);
610		DELAY(EEPROM_DELAY);
611		WRITE4(sc, HE_REGO_HOST_CNTL, val | clocktab[j++] |
612		    (((addr >> n) & 1 ) << HE_REGS_HOST_PROM_DATA_IN));
613		BARRIER_W(sc);
614		DELAY(EEPROM_DELAY);
615	}
616
617	val &= ~HE_REGM_HOST_PROM_WREN;
618	WRITE4(sc, HE_REGO_HOST_CNTL, val);
619	BARRIER_W(sc);
620
621	/* read DATA */
622	byte_read = 0;
623	for (n = 7, j = 0; n >= 0; n--) {
624		WRITE4(sc, HE_REGO_HOST_CNTL, val | clocktab[j++]);
625		BARRIER_W(sc);
626		DELAY(EEPROM_DELAY);
627		tmp_read = READ4(sc, HE_REGO_HOST_CNTL);
628		byte_read |= (uint8_t)(((tmp_read & HE_REGM_HOST_PROM_DATA_OUT)
629				>> HE_REGS_HOST_PROM_DATA_OUT) << n);
630		WRITE4(sc, HE_REGO_HOST_CNTL, val | clocktab[j++]);
631		BARRIER_W(sc);
632		DELAY(EEPROM_DELAY);
633	}
634	WRITE4(sc, HE_REGO_HOST_CNTL, val | clocktab[j++]);
635	BARRIER_W(sc);
636	DELAY(EEPROM_DELAY);
637
638	return (byte_read);
639}
640
641static void
642hatm_init_read_eeprom(struct hatm_softc *sc)
643{
644	u_int n, count;
645	u_char byte;
646	uint32_t v;
647
648	for (n = count = 0; count < HE_EEPROM_PROD_ID_LEN; count++) {
649		byte = hatm_read_prom_byte(sc, HE_EEPROM_PROD_ID + count);
650		if (n > 0 || byte != ' ')
651			sc->prod_id[n++] = byte;
652	}
653	while (n > 0 && sc->prod_id[n-1] == ' ')
654		n--;
655	sc->prod_id[n] = '\0';
656
657	for (n = count = 0; count < HE_EEPROM_REV_LEN; count++) {
658		byte = hatm_read_prom_byte(sc, HE_EEPROM_REV + count);
659		if (n > 0 || byte != ' ')
660			sc->rev[n++] = byte;
661	}
662	while (n > 0 && sc->rev[n-1] == ' ')
663		n--;
664	sc->rev[n] = '\0';
665	sc->ifatm.mib.hw_version = sc->rev[0];
666
667	sc->ifatm.mib.serial =  hatm_read_prom_byte(sc, HE_EEPROM_M_SN + 0) << 0;
668	sc->ifatm.mib.serial |= hatm_read_prom_byte(sc, HE_EEPROM_M_SN + 1) << 8;
669	sc->ifatm.mib.serial |= hatm_read_prom_byte(sc, HE_EEPROM_M_SN + 2) << 16;
670	sc->ifatm.mib.serial |= hatm_read_prom_byte(sc, HE_EEPROM_M_SN + 3) << 24;
671
672	v =  hatm_read_prom_byte(sc, HE_EEPROM_MEDIA + 0) << 0;
673	v |= hatm_read_prom_byte(sc, HE_EEPROM_MEDIA + 1) << 8;
674	v |= hatm_read_prom_byte(sc, HE_EEPROM_MEDIA + 2) << 16;
675	v |= hatm_read_prom_byte(sc, HE_EEPROM_MEDIA + 3) << 24;
676
677	switch (v) {
678	  case HE_MEDIA_UTP155:
679		sc->ifatm.mib.media = IFM_ATM_UTP_155;
680		sc->ifatm.mib.pcr = ATM_RATE_155M;
681		break;
682
683	  case HE_MEDIA_MMF155:
684		sc->ifatm.mib.media = IFM_ATM_MM_155;
685		sc->ifatm.mib.pcr = ATM_RATE_155M;
686		break;
687
688	  case HE_MEDIA_MMF622:
689		sc->ifatm.mib.media = IFM_ATM_MM_622;
690		sc->ifatm.mib.device = ATM_DEVICE_HE622;
691		sc->ifatm.mib.pcr = ATM_RATE_622M;
692		sc->he622 = 1;
693		break;
694
695	  case HE_MEDIA_SMF155:
696		sc->ifatm.mib.media = IFM_ATM_SM_155;
697		sc->ifatm.mib.pcr = ATM_RATE_155M;
698		break;
699
700	  case HE_MEDIA_SMF622:
701		sc->ifatm.mib.media = IFM_ATM_SM_622;
702		sc->ifatm.mib.device = ATM_DEVICE_HE622;
703		sc->ifatm.mib.pcr = ATM_RATE_622M;
704		sc->he622 = 1;
705		break;
706	}
707
708	sc->ifatm.mib.esi[0] = hatm_read_prom_byte(sc, HE_EEPROM_MAC + 0);
709	sc->ifatm.mib.esi[1] = hatm_read_prom_byte(sc, HE_EEPROM_MAC + 1);
710	sc->ifatm.mib.esi[2] = hatm_read_prom_byte(sc, HE_EEPROM_MAC + 2);
711	sc->ifatm.mib.esi[3] = hatm_read_prom_byte(sc, HE_EEPROM_MAC + 3);
712	sc->ifatm.mib.esi[4] = hatm_read_prom_byte(sc, HE_EEPROM_MAC + 4);
713	sc->ifatm.mib.esi[5] = hatm_read_prom_byte(sc, HE_EEPROM_MAC + 5);
714}
715
716/*
717 * Clear unused interrupt queue
718 */
719static void
720hatm_clear_irq(struct hatm_softc *sc, u_int group)
721{
722	WRITE4(sc, HE_REGO_IRQ_BASE(group), 0);
723	WRITE4(sc, HE_REGO_IRQ_HEAD(group), 0);
724	WRITE4(sc, HE_REGO_IRQ_CNTL(group), 0);
725	WRITE4(sc, HE_REGO_IRQ_DATA(group), 0);
726}
727
728/*
729 * 4.10 Initialize interrupt queues
730 */
731static void
732hatm_init_irq(struct hatm_softc *sc, struct heirq *q, u_int group)
733{
734	u_int i;
735
736	if (q->size == 0) {
737		hatm_clear_irq(sc, group);
738		return;
739	}
740
741	q->group = group;
742	q->sc = sc;
743	q->irq = q->mem.base;
744	q->head = 0;
745	q->tailp = q->irq + (q->size - 1);
746	*q->tailp = 0;
747
748	for (i = 0; i < q->size; i++)
749		q->irq[i] = HE_REGM_ITYPE_INVALID;
750
751	WRITE4(sc, HE_REGO_IRQ_BASE(group), q->mem.paddr);
752	WRITE4(sc, HE_REGO_IRQ_HEAD(group),
753	    ((q->size - 1) << HE_REGS_IRQ_HEAD_SIZE) |
754	    (q->thresh << HE_REGS_IRQ_HEAD_THRESH));
755	WRITE4(sc, HE_REGO_IRQ_CNTL(group), q->line);
756	WRITE4(sc, HE_REGO_IRQ_DATA(group), 0);
757}
758
759/*
760 * 5.1.3 Initialize connection memory
761 */
762static void
763hatm_init_cm(struct hatm_softc *sc)
764{
765	u_int rsra, mlbm, rabr, numbuffs;
766	u_int tsra, tabr, mtpd;
767	u_int n;
768
769	for (n = 0; n < HE_CONFIG_TXMEM; n++)
770		WRITE_TCM4(sc, n, 0);
771	for (n = 0; n < HE_CONFIG_RXMEM; n++)
772		WRITE_RCM4(sc, n, 0);
773
774	numbuffs = sc->r0_numbuffs + sc->r1_numbuffs + sc->tx_numbuffs;
775
776	rsra = 0;
777	mlbm = ((rsra + sc->ifatm.mib.max_vccs * 8) + 0x7ff) & ~0x7ff;
778	rabr = ((mlbm + numbuffs * 2) + 0x7ff) & ~0x7ff;
779	sc->rsrb = ((rabr + 2048) + (2 * sc->ifatm.mib.max_vccs - 1)) &
780	    ~(2 * sc->ifatm.mib.max_vccs - 1);
781
782	tsra = 0;
783	sc->tsrb = tsra + sc->ifatm.mib.max_vccs * 8;
784	sc->tsrc = sc->tsrb + sc->ifatm.mib.max_vccs * 4;
785	sc->tsrd = sc->tsrc + sc->ifatm.mib.max_vccs * 2;
786	tabr = sc->tsrd + sc->ifatm.mib.max_vccs * 1;
787	mtpd = ((tabr + 1024) + (16 * sc->ifatm.mib.max_vccs - 1)) &
788	    ~(16 * sc->ifatm.mib.max_vccs - 1);
789
790	DBG(sc, ATTACH, ("rsra=%x mlbm=%x rabr=%x rsrb=%x",
791	    rsra, mlbm, rabr, sc->rsrb));
792	DBG(sc, ATTACH, ("tsra=%x tsrb=%x tsrc=%x tsrd=%x tabr=%x mtpd=%x",
793	    tsra, sc->tsrb, sc->tsrc, sc->tsrd, tabr, mtpd));
794
795	WRITE4(sc, HE_REGO_TSRB_BA, sc->tsrb);
796	WRITE4(sc, HE_REGO_TSRC_BA, sc->tsrc);
797	WRITE4(sc, HE_REGO_TSRD_BA, sc->tsrd);
798	WRITE4(sc, HE_REGO_TMABR_BA, tabr);
799	WRITE4(sc, HE_REGO_TPD_BA, mtpd);
800
801	WRITE4(sc, HE_REGO_RCMRSRB_BA, sc->rsrb);
802	WRITE4(sc, HE_REGO_RCMLBM_BA, mlbm);
803	WRITE4(sc, HE_REGO_RCMABR_BA, rabr);
804
805	BARRIER_W(sc);
806}
807
808/*
809 * 5.1.4 Initialize Local buffer Pools
810 */
811static void
812hatm_init_rx_buffer_pool(struct hatm_softc *sc,
813	u_int num,		/* bank */
814	u_int start,		/* start row */
815	u_int numbuffs		/* number of entries */
816)
817{
818	u_int row_size;		/* bytes per row */
819	uint32_t row_addr;	/* start address of this row */
820	u_int lbuf_size;	/* bytes per lbuf */
821	u_int lbufs_per_row;	/* number of lbufs per memory row */
822	uint32_t lbufd_index;	/* index of lbuf descriptor */
823	uint32_t lbufd_addr;	/* address of lbuf descriptor */
824	u_int lbuf_row_cnt;	/* current lbuf in current row */
825	uint32_t lbuf_addr;	/* address of current buffer */
826	u_int i;
827
828	row_size = sc->bytes_per_row;;
829	row_addr = start * row_size;
830	lbuf_size = sc->cells_per_lbuf * 48;
831	lbufs_per_row = sc->cells_per_row / sc->cells_per_lbuf;
832
833	/* descriptor index */
834	lbufd_index = num;
835
836	/* 2 words per entry */
837	lbufd_addr = READ4(sc, HE_REGO_RCMLBM_BA) + lbufd_index * 2;
838
839	/* write head of queue */
840	WRITE4(sc, HE_REGO_RLBF_H(num), lbufd_index);
841
842	lbuf_row_cnt = 0;
843	for (i = 0; i < numbuffs; i++) {
844		lbuf_addr = (row_addr + lbuf_row_cnt * lbuf_size) / 32;
845
846		WRITE_RCM4(sc, lbufd_addr, lbuf_addr);
847
848		lbufd_index += 2;
849		WRITE_RCM4(sc, lbufd_addr + 1, lbufd_index);
850
851		if (++lbuf_row_cnt == lbufs_per_row) {
852			lbuf_row_cnt = 0;
853			row_addr += row_size;
854		}
855
856		lbufd_addr += 2 * 2;
857	}
858
859	WRITE4(sc, HE_REGO_RLBF_T(num), lbufd_index - 2);
860	WRITE4(sc, HE_REGO_RLBF_C(num), numbuffs);
861
862	BARRIER_W(sc);
863}
864
865static void
866hatm_init_tx_buffer_pool(struct hatm_softc *sc,
867	u_int start,		/* start row */
868	u_int numbuffs		/* number of entries */
869)
870{
871	u_int row_size;		/* bytes per row */
872	uint32_t row_addr;	/* start address of this row */
873	u_int lbuf_size;	/* bytes per lbuf */
874	u_int lbufs_per_row;	/* number of lbufs per memory row */
875	uint32_t lbufd_index;	/* index of lbuf descriptor */
876	uint32_t lbufd_addr;	/* address of lbuf descriptor */
877	u_int lbuf_row_cnt;	/* current lbuf in current row */
878	uint32_t lbuf_addr;	/* address of current buffer */
879	u_int i;
880
881	row_size = sc->bytes_per_row;;
882	row_addr = start * row_size;
883	lbuf_size = sc->cells_per_lbuf * 48;
884	lbufs_per_row = sc->cells_per_row / sc->cells_per_lbuf;
885
886	/* descriptor index */
887	lbufd_index = sc->r0_numbuffs + sc->r1_numbuffs;
888
889	/* 2 words per entry */
890	lbufd_addr = READ4(sc, HE_REGO_RCMLBM_BA) + lbufd_index * 2;
891
892	/* write head of queue */
893	WRITE4(sc, HE_REGO_TLBF_H, lbufd_index);
894
895	lbuf_row_cnt = 0;
896	for (i = 0; i < numbuffs; i++) {
897		lbuf_addr = (row_addr + lbuf_row_cnt * lbuf_size) / 32;
898
899		WRITE_RCM4(sc, lbufd_addr, lbuf_addr);
900		lbufd_index++;
901		WRITE_RCM4(sc, lbufd_addr + 1, lbufd_index);
902
903		if (++lbuf_row_cnt == lbufs_per_row) {
904			lbuf_row_cnt = 0;
905			row_addr += row_size;
906		}
907
908		lbufd_addr += 2;
909	}
910
911	WRITE4(sc, HE_REGO_TLBF_T, lbufd_index - 1);
912	BARRIER_W(sc);
913}
914
915/*
916 * 5.1.5 Initialize Intermediate Receive Queues
917 */
918static void
919hatm_init_imed_queues(struct hatm_softc *sc)
920{
921	u_int n;
922
923	if (sc->he622) {
924		for (n = 0; n < 8; n++) {
925			WRITE4(sc, HE_REGO_INMQ_S(n), 0x10*n+0x000f);
926			WRITE4(sc, HE_REGO_INMQ_L(n), 0x10*n+0x200f);
927		}
928	} else {
929		for (n = 0; n < 8; n++) {
930			WRITE4(sc, HE_REGO_INMQ_S(n), n);
931			WRITE4(sc, HE_REGO_INMQ_L(n), n+0x8);
932		}
933	}
934}
935
936/*
937 * 5.1.7 Init CS block
938 */
939static void
940hatm_init_cs_block(struct hatm_softc *sc)
941{
942	u_int n, i;
943	u_int clkfreg, cellrate, decr, tmp;
944	static const uint32_t erthr[2][5][3] = HE_REGT_CS_ERTHR;
945	static const uint32_t erctl[2][3] = HE_REGT_CS_ERCTL;
946	static const uint32_t erstat[2][2] = HE_REGT_CS_ERSTAT;
947	static const uint32_t rtfwr[2] = HE_REGT_CS_RTFWR;
948	static const uint32_t rtatr[2] = HE_REGT_CS_RTATR;
949	static const uint32_t bwalloc[2][6] = HE_REGT_CS_BWALLOC;
950	static const uint32_t orcf[2][2] = HE_REGT_CS_ORCF;
951
952	/* Clear Rate Controller Start Times and Occupied Flags */
953	for (n = 0; n < 32; n++)
954		WRITE_MBOX4(sc, HE_REGO_CS_STTIM(n), 0);
955
956	clkfreg = sc->he622 ? HE_622_CLOCK : HE_155_CLOCK;
957	cellrate = sc->he622 ? ATM_RATE_622M : ATM_RATE_155M;
958	decr = cellrate / 32;
959
960	for (n = 0; n < 16; n++) {
961		tmp = clkfreg / cellrate;
962		WRITE_MBOX4(sc, HE_REGO_CS_TGRLD(n), tmp - 1);
963		cellrate -= decr;
964	}
965
966	i = (sc->cells_per_lbuf == 2) ? 0
967	   :(sc->cells_per_lbuf == 4) ? 1
968	   :                            2;
969
970	/* table 5.2 */
971	WRITE_MBOX4(sc, HE_REGO_CS_ERTHR0, erthr[sc->he622][0][i]);
972	WRITE_MBOX4(sc, HE_REGO_CS_ERTHR1, erthr[sc->he622][1][i]);
973	WRITE_MBOX4(sc, HE_REGO_CS_ERTHR2, erthr[sc->he622][2][i]);
974	WRITE_MBOX4(sc, HE_REGO_CS_ERTHR3, erthr[sc->he622][3][i]);
975	WRITE_MBOX4(sc, HE_REGO_CS_ERTHR4, erthr[sc->he622][4][i]);
976
977	WRITE_MBOX4(sc, HE_REGO_CS_ERCTL0, erctl[sc->he622][0]);
978	WRITE_MBOX4(sc, HE_REGO_CS_ERCTL1, erctl[sc->he622][1]);
979	WRITE_MBOX4(sc, HE_REGO_CS_ERCTL2, erctl[sc->he622][2]);
980
981	WRITE_MBOX4(sc, HE_REGO_CS_ERSTAT0, erstat[sc->he622][0]);
982	WRITE_MBOX4(sc, HE_REGO_CS_ERSTAT1, erstat[sc->he622][1]);
983
984	WRITE_MBOX4(sc, HE_REGO_CS_RTFWR, rtfwr[sc->he622]);
985	WRITE_MBOX4(sc, HE_REGO_CS_RTATR, rtatr[sc->he622]);
986
987	WRITE_MBOX4(sc, HE_REGO_CS_TFBSET, bwalloc[sc->he622][0]);
988	WRITE_MBOX4(sc, HE_REGO_CS_WCRMAX, bwalloc[sc->he622][1]);
989	WRITE_MBOX4(sc, HE_REGO_CS_WCRMIN, bwalloc[sc->he622][2]);
990	WRITE_MBOX4(sc, HE_REGO_CS_WCRINC, bwalloc[sc->he622][3]);
991	WRITE_MBOX4(sc, HE_REGO_CS_WCRDEC, bwalloc[sc->he622][4]);
992	WRITE_MBOX4(sc, HE_REGO_CS_WCRCEIL, bwalloc[sc->he622][5]);
993
994	WRITE_MBOX4(sc, HE_REGO_CS_OTPPER, orcf[sc->he622][0]);
995	WRITE_MBOX4(sc, HE_REGO_CS_OTWPER, orcf[sc->he622][1]);
996
997	WRITE_MBOX4(sc, HE_REGO_CS_OTTLIM, 8);
998
999	for (n = 0; n < 8; n++)
1000		WRITE_MBOX4(sc, HE_REGO_CS_HGRRT(n), 0);
1001}
1002
1003/*
1004 * 5.1.8 CS Block Connection Memory Initialisation
1005 */
1006static void
1007hatm_init_cs_block_cm(struct hatm_softc *sc)
1008{
1009	u_int n, i;
1010	u_int expt, mant, etrm, wcr, ttnrm, tnrm;
1011	uint32_t rate;
1012	uint32_t clkfreq, cellrate, decr;
1013	uint32_t *rg, rtg, val = 0;
1014	uint64_t drate;
1015	u_int buf, buf_limit;
1016	uint32_t base = READ4(sc, HE_REGO_RCMABR_BA);
1017
1018	for (n = 0; n < HE_REGL_CM_GQTBL; n++)
1019		WRITE_RCM4(sc, base + HE_REGO_CM_GQTBL + n, 0);
1020	for (n = 0; n < HE_REGL_CM_RGTBL; n++)
1021		WRITE_RCM4(sc, base + HE_REGO_CM_RGTBL + n, 0);
1022
1023	tnrm = 0;
1024	for (n = 0; n < HE_REGL_CM_TNRMTBL * 4; n++) {
1025		expt = (n >> 5) & 0x1f;
1026		mant = ((n & 0x18) << 4) | 0x7f;
1027		wcr = (1 << expt) * (mant + 512) / 512;
1028		etrm = n & 0x7;
1029		ttnrm = wcr / 10 / (1 << etrm);
1030		if (ttnrm > 255)
1031			ttnrm = 255;
1032		else if(ttnrm < 2)
1033			ttnrm = 2;
1034		tnrm = (tnrm << 8) | (ttnrm & 0xff);
1035		if (n % 4 == 0)
1036			WRITE_RCM4(sc, base + HE_REGO_CM_TNRMTBL + (n/4), tnrm);
1037	}
1038
1039	clkfreq = sc->he622 ? HE_622_CLOCK : HE_155_CLOCK;
1040	buf_limit = 4;
1041
1042	cellrate = sc->he622 ? ATM_RATE_622M : ATM_RATE_155M;
1043	decr = cellrate / 32;
1044
1045	/* compute GRID top row in 1000 * cps */
1046	for (n = 0; n < 16; n++) {
1047		u_int interval = clkfreq / cellrate;
1048		sc->rate_grid[0][n] = (u_int64_t)clkfreq * 1000 / interval;
1049		cellrate -= decr;
1050	}
1051
1052	/* compute the other rows according to 2.4 */
1053	for (i = 1; i < 16; i++)
1054		for (n = 0; n < 16; n++)
1055			sc->rate_grid[i][n] = sc->rate_grid[i-1][n] /
1056			    ((i < 14) ? 2 : 4);
1057
1058	/* first entry is line rate */
1059	n = hatm_cps2atmf(sc->he622 ? ATM_RATE_622M : ATM_RATE_155M);
1060	expt = (n >> 9) & 0x1f;
1061	mant = n & 0x1f0;
1062	sc->rate_grid[0][0] = (u_int64_t)(1<<expt) * 1000 * (mant+512) / 512;
1063
1064	/* now build the conversion table - each 32 bit word contains
1065	 * two entries - this gives a total of 0x400 16 bit entries.
1066	 * This table maps the truncated ATMF rate version into a grid index */
1067	cellrate = sc->he622 ? ATM_RATE_622M : ATM_RATE_155M;
1068	rg = &sc->rate_grid[15][15];
1069
1070	for (rate = 0; rate < 2 * HE_REGL_CM_RTGTBL; rate++) {
1071		/* unpack the ATMF rate */
1072		expt = rate >> 5;
1073		mant = (rate & 0x1f) << 4;
1074
1075		/* get the cell rate - minimum is 10 per second */
1076		drate = (uint64_t)(1 << expt) * 1000 * (mant + 512) / 512;
1077		if (drate < 10 * 1000)
1078			drate = 10 * 1000;
1079
1080		/* now look up the grid index */
1081		while (drate >= *rg && rg-- > &sc->rate_grid[0][0])
1082			;
1083		rg++;
1084		rtg = rg - &sc->rate_grid[0][0];
1085
1086		/* now compute the buffer limit */
1087		buf = drate * sc->tx_numbuffs / (cellrate * 2) / 1000;
1088		if (buf == 0)
1089			buf = 1;
1090		else if (buf > buf_limit)
1091			buf = buf_limit;
1092
1093		/* make value */
1094		val = (val << 16) | (rtg << 8) | buf;
1095
1096		/* write */
1097		if (rate % 2 == 1)
1098			WRITE_RCM4(sc, base + HE_REGO_CM_RTGTBL + rate/2, val);
1099	}
1100}
1101
1102/*
1103 * Clear an unused receive group buffer pool
1104 */
1105static void
1106hatm_clear_rpool(struct hatm_softc *sc, u_int group, u_int large)
1107{
1108	WRITE4(sc, HE_REGO_RBP_S(large, group), 0);
1109	WRITE4(sc, HE_REGO_RBP_T(large, group), 0);
1110	WRITE4(sc, HE_REGO_RBP_QI(large, group), 1);
1111	WRITE4(sc, HE_REGO_RBP_BL(large, group), 0);
1112}
1113
1114/*
1115 * Initialize a receive group buffer pool
1116 */
1117static void
1118hatm_init_rpool(struct hatm_softc *sc, struct herbp *q, u_int group,
1119    u_int large)
1120{
1121	if (q->size == 0) {
1122		hatm_clear_rpool(sc, group, large);
1123		return;
1124	}
1125
1126	bzero(q->mem.base, q->mem.size);
1127	q->rbp = q->mem.base;
1128	q->head = q->tail = 0;
1129
1130	DBG(sc, ATTACH, ("RBP%u%c=0x%lx", group, "SL"[large],
1131	    (u_long)q->mem.paddr));
1132
1133	WRITE4(sc, HE_REGO_RBP_S(large, group), q->mem.paddr);
1134	WRITE4(sc, HE_REGO_RBP_T(large, group), 0);
1135	WRITE4(sc, HE_REGO_RBP_QI(large, group),
1136	    ((q->size - 1) << HE_REGS_RBP_SIZE) |
1137	    HE_REGM_RBP_INTR_ENB |
1138	    (q->thresh << HE_REGS_RBP_THRESH));
1139	WRITE4(sc, HE_REGO_RBP_BL(large, group), (q->bsize >> 2) & ~1);
1140}
1141
1142/*
1143 * Clear an unused receive buffer return queue
1144 */
1145static void
1146hatm_clear_rbrq(struct hatm_softc *sc, u_int group)
1147{
1148	WRITE4(sc, HE_REGO_RBRQ_ST(group), 0);
1149	WRITE4(sc, HE_REGO_RBRQ_H(group), 0);
1150	WRITE4(sc, HE_REGO_RBRQ_Q(group), (1 << HE_REGS_RBRQ_THRESH));
1151	WRITE4(sc, HE_REGO_RBRQ_I(group), 0);
1152}
1153
1154/*
1155 * Initialize receive buffer return queue
1156 */
1157static void
1158hatm_init_rbrq(struct hatm_softc *sc, struct herbrq *rq, u_int group)
1159{
1160	if (rq->size == 0) {
1161		hatm_clear_rbrq(sc, group);
1162		return;
1163	}
1164
1165	rq->rbrq = rq->mem.base;
1166	rq->head = 0;
1167
1168	DBG(sc, ATTACH, ("RBRQ%u=0x%lx", group, (u_long)rq->mem.paddr));
1169
1170	WRITE4(sc, HE_REGO_RBRQ_ST(group), rq->mem.paddr);
1171	WRITE4(sc, HE_REGO_RBRQ_H(group), 0);
1172	WRITE4(sc, HE_REGO_RBRQ_Q(group),
1173	    (rq->thresh << HE_REGS_RBRQ_THRESH) |
1174	    ((rq->size - 1) << HE_REGS_RBRQ_SIZE));
1175	WRITE4(sc, HE_REGO_RBRQ_I(group),
1176	    (rq->tout << HE_REGS_RBRQ_TIME) |
1177	    (rq->pcnt << HE_REGS_RBRQ_COUNT));
1178}
1179
1180/*
1181 * Clear an unused transmit buffer return queue N
1182 */
1183static void
1184hatm_clear_tbrq(struct hatm_softc *sc, u_int group)
1185{
1186	WRITE4(sc, HE_REGO_TBRQ_B_T(group), 0);
1187	WRITE4(sc, HE_REGO_TBRQ_H(group), 0);
1188	WRITE4(sc, HE_REGO_TBRQ_S(group), 0);
1189	WRITE4(sc, HE_REGO_TBRQ_THRESH(group), 1);
1190}
1191
1192/*
1193 * Initialize transmit buffer return queue N
1194 */
1195static void
1196hatm_init_tbrq(struct hatm_softc *sc, struct hetbrq *tq, u_int group)
1197{
1198	if (tq->size == 0) {
1199		hatm_clear_tbrq(sc, group);
1200		return;
1201	}
1202
1203	tq->tbrq = tq->mem.base;
1204	tq->head = 0;
1205
1206	DBG(sc, ATTACH, ("TBRQ%u=0x%lx", group, (u_long)tq->mem.paddr));
1207
1208	WRITE4(sc, HE_REGO_TBRQ_B_T(group), tq->mem.paddr);
1209	WRITE4(sc, HE_REGO_TBRQ_H(group), 0);
1210	WRITE4(sc, HE_REGO_TBRQ_S(group), tq->size - 1);
1211	WRITE4(sc, HE_REGO_TBRQ_THRESH(group), tq->thresh);
1212}
1213
1214/*
1215 * Initialize TPDRQ
1216 */
1217static void
1218hatm_init_tpdrq(struct hatm_softc *sc)
1219{
1220	struct hetpdrq *tq;
1221
1222	tq = &sc->tpdrq;
1223	tq->tpdrq = tq->mem.base;
1224	tq->tail = tq->head = 0;
1225
1226	DBG(sc, ATTACH, ("TPDRQ=0x%lx", (u_long)tq->mem.paddr));
1227
1228	WRITE4(sc, HE_REGO_TPDRQ_H, tq->mem.paddr);
1229	WRITE4(sc, HE_REGO_TPDRQ_T, 0);
1230	WRITE4(sc, HE_REGO_TPDRQ_S, tq->size - 1);
1231}
1232
1233/*
1234 * Function can be called by the infrastructure to start the card.
1235 */
1236static void
1237hatm_init(void *p)
1238{
1239	struct hatm_softc *sc = p;
1240
1241	mtx_lock(&sc->mtx);
1242	hatm_stop(sc);
1243	hatm_initialize(sc);
1244	mtx_unlock(&sc->mtx);
1245}
1246
1247enum {
1248	CTL_STATS,
1249	CTL_ISTATS,
1250};
1251
1252/*
1253 * Sysctl handler
1254 */
1255static int
1256hatm_sysctl(SYSCTL_HANDLER_ARGS)
1257{
1258	struct hatm_softc *sc = arg1;
1259	uint32_t *ret;
1260	int error;
1261	size_t len;
1262
1263	switch (arg2) {
1264
1265	  case CTL_STATS:
1266		len = sizeof(uint32_t) * 4;
1267		break;
1268
1269	  case CTL_ISTATS:
1270		len = sizeof(sc->istats);
1271		break;
1272
1273	  default:
1274		panic("bad control code");
1275	}
1276
1277	ret = malloc(len, M_TEMP, M_WAITOK);
1278	mtx_lock(&sc->mtx);
1279
1280	switch (arg2) {
1281
1282	  case CTL_STATS:
1283		ret[0] = READ4(sc, HE_REGO_MCC);
1284		ret[1] = READ4(sc, HE_REGO_OEC);
1285		ret[2] = READ4(sc, HE_REGO_DCC);
1286		ret[3] = READ4(sc, HE_REGO_CEC);
1287		break;
1288
1289	  case CTL_ISTATS:
1290		bcopy(&sc->istats, ret, sizeof(sc->istats));
1291		break;
1292	}
1293	mtx_unlock(&sc->mtx);
1294
1295	error = SYSCTL_OUT(req, ret, len);
1296	free(ret, M_TEMP);
1297
1298	return (error);
1299}
1300
1301static int
1302kenv_getuint(struct hatm_softc *sc, const char *var,
1303    u_int *ptr, u_int def, int rw)
1304{
1305	char full[IFNAMSIZ + 3 + 20];
1306	char *val, *end;
1307	u_int u;
1308
1309	*ptr = def;
1310
1311	if (SYSCTL_ADD_UINT(&sc->sysctl_ctx, SYSCTL_CHILDREN(sc->sysctl_tree),
1312	    OID_AUTO, var, rw ? CTLFLAG_RW : CTLFLAG_RD, ptr, 0, "") == NULL)
1313		return (ENOMEM);
1314
1315	snprintf(full, sizeof(full), "hw.%s.%s",
1316	    device_get_nameunit(sc->dev), var);
1317
1318	if ((val = getenv(full)) == NULL)
1319		return (0);
1320	u = strtoul(val, &end, 0);
1321	if (end == val || *end != '\0') {
1322		freeenv(val);
1323		return (EINVAL);
1324	}
1325	if (bootverbose)
1326		if_printf(&sc->ifatm.ifnet, "%s=%u\n", full, u);
1327	*ptr = u;
1328	return (0);
1329}
1330
1331/*
1332 * Set configurable parameters. Many of these are configurable via
1333 * kenv.
1334 */
1335static int
1336hatm_configure(struct hatm_softc *sc)
1337{
1338	/* Receive buffer pool 0 small */
1339	kenv_getuint(sc, "rbps0.size", &sc->rbp_s0.size,
1340	    HE_CONFIG_RBPS0_SIZE, 0);
1341	kenv_getuint(sc, "rbps0.thresh", &sc->rbp_s0.thresh,
1342	    HE_CONFIG_RBPS0_THRESH, 0);
1343	sc->rbp_s0.bsize = MBUF0_SIZE;
1344
1345	/* Receive buffer pool 0 large */
1346	kenv_getuint(sc, "rbpl0.size", &sc->rbp_l0.size,
1347	    HE_CONFIG_RBPL0_SIZE, 0);
1348	kenv_getuint(sc, "rbpl0.thresh", &sc->rbp_l0.thresh,
1349	    HE_CONFIG_RBPL0_THRESH, 0);
1350	sc->rbp_l0.bsize = MCLBYTES - MBUFL_OFFSET;
1351
1352	/* Receive buffer return queue 0 */
1353	kenv_getuint(sc, "rbrq0.size", &sc->rbrq_0.size,
1354	    HE_CONFIG_RBRQ0_SIZE, 0);
1355	kenv_getuint(sc, "rbrq0.thresh", &sc->rbrq_0.thresh,
1356	    HE_CONFIG_RBRQ0_THRESH, 0);
1357	kenv_getuint(sc, "rbrq0.tout", &sc->rbrq_0.tout,
1358	    HE_CONFIG_RBRQ0_TOUT, 0);
1359	kenv_getuint(sc, "rbrq0.pcnt", &sc->rbrq_0.pcnt,
1360	    HE_CONFIG_RBRQ0_PCNT, 0);
1361
1362	/* Receive buffer pool 1 small */
1363	kenv_getuint(sc, "rbps1.size", &sc->rbp_s1.size,
1364	    HE_CONFIG_RBPS1_SIZE, 0);
1365	kenv_getuint(sc, "rbps1.thresh", &sc->rbp_s1.thresh,
1366	    HE_CONFIG_RBPS1_THRESH, 0);
1367	sc->rbp_s1.bsize = MBUF1_SIZE;
1368
1369	/* Receive buffer return queue 1 */
1370	kenv_getuint(sc, "rbrq1.size", &sc->rbrq_1.size,
1371	    HE_CONFIG_RBRQ1_SIZE, 0);
1372	kenv_getuint(sc, "rbrq1.thresh", &sc->rbrq_1.thresh,
1373	    HE_CONFIG_RBRQ1_THRESH, 0);
1374	kenv_getuint(sc, "rbrq1.tout", &sc->rbrq_1.tout,
1375	    HE_CONFIG_RBRQ1_TOUT, 0);
1376	kenv_getuint(sc, "rbrq1.pcnt", &sc->rbrq_1.pcnt,
1377	    HE_CONFIG_RBRQ1_PCNT, 0);
1378
1379	/* Interrupt queue 0 */
1380	kenv_getuint(sc, "irq0.size", &sc->irq_0.size,
1381	    HE_CONFIG_IRQ0_SIZE, 0);
1382	kenv_getuint(sc, "irq0.thresh", &sc->irq_0.thresh,
1383	    HE_CONFIG_IRQ0_THRESH, 0);
1384	sc->irq_0.line = HE_CONFIG_IRQ0_LINE;
1385
1386	/* Transmit buffer return queue 0 */
1387	kenv_getuint(sc, "tbrq0.size", &sc->tbrq.size,
1388	    HE_CONFIG_TBRQ_SIZE, 0);
1389	kenv_getuint(sc, "tbrq0.thresh", &sc->tbrq.thresh,
1390	    HE_CONFIG_TBRQ_THRESH, 0);
1391
1392	/* Transmit buffer ready queue */
1393	kenv_getuint(sc, "tpdrq.size", &sc->tpdrq.size,
1394	    HE_CONFIG_TPDRQ_SIZE, 0);
1395	/* Max TPDs per VCC */
1396	kenv_getuint(sc, "tpdmax", &sc->max_tpd,
1397	    HE_CONFIG_TPD_MAXCC, 0);
1398
1399	return (0);
1400}
1401
1402#ifdef HATM_DEBUG
1403
1404/*
1405 * Get TSRs from connection memory
1406 */
1407static int
1408hatm_sysctl_tsr(SYSCTL_HANDLER_ARGS)
1409{
1410	struct hatm_softc *sc = arg1;
1411	int error, i, j;
1412	uint32_t *val;
1413
1414	val = malloc(sizeof(uint32_t) * HE_MAX_VCCS * 15, M_TEMP, M_WAITOK);
1415
1416	mtx_lock(&sc->mtx);
1417	for (i = 0; i < HE_MAX_VCCS; i++)
1418		for (j = 0; j <= 14; j++)
1419			val[15 * i + j] = READ_TSR(sc, i, j);
1420	mtx_unlock(&sc->mtx);
1421
1422	error = SYSCTL_OUT(req, val, sizeof(uint32_t) * HE_MAX_VCCS * 15);
1423	free(val, M_TEMP);
1424	if (error != 0 || req->newptr == NULL)
1425		return (error);
1426
1427	return (EPERM);
1428}
1429
1430/*
1431 * Get TPDs from connection memory
1432 */
1433static int
1434hatm_sysctl_tpd(SYSCTL_HANDLER_ARGS)
1435{
1436	struct hatm_softc *sc = arg1;
1437	int error, i, j;
1438	uint32_t *val;
1439
1440	val = malloc(sizeof(uint32_t) * HE_MAX_VCCS * 16, M_TEMP, M_WAITOK);
1441
1442	mtx_lock(&sc->mtx);
1443	for (i = 0; i < HE_MAX_VCCS; i++)
1444		for (j = 0; j < 16; j++)
1445			val[16 * i + j] = READ_TCM4(sc, 16 * i + j);
1446	mtx_unlock(&sc->mtx);
1447
1448	error = SYSCTL_OUT(req, val, sizeof(uint32_t) * HE_MAX_VCCS * 16);
1449	free(val, M_TEMP);
1450	if (error != 0 || req->newptr == NULL)
1451		return (error);
1452
1453	return (EPERM);
1454}
1455
1456/*
1457 * Get mbox registers
1458 */
1459static int
1460hatm_sysctl_mbox(SYSCTL_HANDLER_ARGS)
1461{
1462	struct hatm_softc *sc = arg1;
1463	int error, i;
1464	uint32_t *val;
1465
1466	val = malloc(sizeof(uint32_t) * HE_REGO_CS_END, M_TEMP, M_WAITOK);
1467
1468	mtx_lock(&sc->mtx);
1469	for (i = 0; i < HE_REGO_CS_END; i++)
1470		val[i] = READ_MBOX4(sc, i);
1471	mtx_unlock(&sc->mtx);
1472
1473	error = SYSCTL_OUT(req, val, sizeof(uint32_t) * HE_REGO_CS_END);
1474	free(val, M_TEMP);
1475	if (error != 0 || req->newptr == NULL)
1476		return (error);
1477
1478	return (EPERM);
1479}
1480
1481/*
1482 * Get connection memory
1483 */
1484static int
1485hatm_sysctl_cm(SYSCTL_HANDLER_ARGS)
1486{
1487	struct hatm_softc *sc = arg1;
1488	int error, i;
1489	uint32_t *val;
1490
1491	val = malloc(sizeof(uint32_t) * (HE_CONFIG_RXMEM + 1), M_TEMP, M_WAITOK);
1492
1493	mtx_lock(&sc->mtx);
1494	val[0] = READ4(sc, HE_REGO_RCMABR_BA);
1495	for (i = 0; i < HE_CONFIG_RXMEM; i++)
1496		val[i + 1] = READ_RCM4(sc, i);
1497	mtx_unlock(&sc->mtx);
1498
1499	error = SYSCTL_OUT(req, val, sizeof(uint32_t) * (HE_CONFIG_RXMEM + 1));
1500	free(val, M_TEMP);
1501	if (error != 0 || req->newptr == NULL)
1502		return (error);
1503
1504	return (EPERM);
1505}
1506
1507/*
1508 * Get local buffer memory
1509 */
1510static int
1511hatm_sysctl_lbmem(SYSCTL_HANDLER_ARGS)
1512{
1513	struct hatm_softc *sc = arg1;
1514	int error, i;
1515	uint32_t *val;
1516	u_int bytes = (1 << 21);
1517
1518	val = malloc(bytes, M_TEMP, M_WAITOK);
1519
1520	mtx_lock(&sc->mtx);
1521	for (i = 0; i < bytes / 4; i++)
1522		val[i] = READ_LB4(sc, i);
1523	mtx_unlock(&sc->mtx);
1524
1525	error = SYSCTL_OUT(req, val, bytes);
1526	free(val, M_TEMP);
1527	if (error != 0 || req->newptr == NULL)
1528		return (error);
1529
1530	return (EPERM);
1531}
1532
1533/*
1534 * Get all card registers
1535 */
1536static int
1537hatm_sysctl_heregs(SYSCTL_HANDLER_ARGS)
1538{
1539	struct hatm_softc *sc = arg1;
1540	int error, i;
1541	uint32_t *val;
1542
1543	val = malloc(HE_REGO_END, M_TEMP, M_WAITOK);
1544
1545	mtx_lock(&sc->mtx);
1546	for (i = 0; i < HE_REGO_END; i += 4)
1547		val[i / 4] = READ4(sc, i);
1548	mtx_unlock(&sc->mtx);
1549
1550	error = SYSCTL_OUT(req, val, HE_REGO_END);
1551	free(val, M_TEMP);
1552	if (error != 0 || req->newptr == NULL)
1553		return (error);
1554
1555	return (EPERM);
1556}
1557#endif
1558
1559/*
1560 * Suni register access
1561 */
1562/*
1563 * read at most n SUNI registers starting at reg into val
1564 */
1565static int
1566hatm_utopia_readregs(struct ifatm *ifatm, u_int reg, uint8_t *val, u_int *n)
1567{
1568	u_int i;
1569	struct hatm_softc *sc = (struct hatm_softc *)ifatm;
1570
1571	if (reg >= (HE_REGO_SUNI_END - HE_REGO_SUNI) / 4)
1572		return (EINVAL);
1573	if (reg + *n > (HE_REGO_SUNI_END - HE_REGO_SUNI) / 4)
1574		*n = reg - (HE_REGO_SUNI_END - HE_REGO_SUNI) / 4;
1575
1576	mtx_assert(&sc->mtx, MA_OWNED);
1577	for (i = 0; i < *n; i++)
1578		val[i] = READ4(sc, HE_REGO_SUNI + 4 * (reg + i));
1579
1580	return (0);
1581}
1582
1583/*
1584 * change the bits given by mask to them in val in register reg
1585 */
1586static int
1587hatm_utopia_writereg(struct ifatm *ifatm, u_int reg, u_int mask, u_int val)
1588{
1589	uint32_t regval;
1590	struct hatm_softc *sc = (struct hatm_softc *)ifatm;
1591
1592	if (reg >= (HE_REGO_SUNI_END - HE_REGO_SUNI) / 4)
1593		return (EINVAL);
1594
1595	mtx_assert(&sc->mtx, MA_OWNED);
1596	regval = READ4(sc, HE_REGO_SUNI + 4 * reg);
1597	regval = (regval & ~mask) | (val & mask);
1598	WRITE4(sc, HE_REGO_SUNI + 4 * reg, regval);
1599
1600	return (0);
1601}
1602
1603static struct utopia_methods hatm_utopia_methods = {
1604	hatm_utopia_readregs,
1605	hatm_utopia_writereg,
1606};
1607
1608/*
1609 * Detach - if it is running, stop. Destroy.
1610 */
1611static int
1612hatm_detach(device_t dev)
1613{
1614	struct hatm_softc *sc = (struct hatm_softc *)device_get_softc(dev);
1615
1616	mtx_lock(&sc->mtx);
1617	hatm_stop(sc);
1618	if (sc->utopia.state & UTP_ST_ATTACHED) {
1619		utopia_stop(&sc->utopia);
1620		utopia_detach(&sc->utopia);
1621	}
1622	mtx_unlock(&sc->mtx);
1623
1624	atm_ifdetach(&sc->ifatm.ifnet);
1625
1626	hatm_destroy(sc);
1627
1628	return (0);
1629}
1630
1631/*
1632 * Attach to the device. Assume that no locking is needed here.
1633 * All resource we allocate here are freed by calling hatm_destroy.
1634 */
1635static int
1636hatm_attach(device_t dev)
1637{
1638	struct hatm_softc *sc;
1639	int unit;
1640	int error;
1641	uint32_t v;
1642	struct ifnet *ifp;
1643
1644	sc = device_get_softc(dev);
1645	unit = device_get_unit(dev);
1646
1647	sc->dev = dev;
1648	sc->ifatm.mib.device = ATM_DEVICE_HE155;
1649	sc->ifatm.mib.serial = 0;
1650	sc->ifatm.mib.hw_version = 0;
1651	sc->ifatm.mib.sw_version = 0;
1652	sc->ifatm.mib.vpi_bits = HE_CONFIG_VPI_BITS;
1653	sc->ifatm.mib.vci_bits = HE_CONFIG_VCI_BITS;
1654	sc->ifatm.mib.max_vpcs = 0;
1655	sc->ifatm.mib.max_vccs = HE_MAX_VCCS;
1656	sc->ifatm.mib.media = IFM_ATM_UNKNOWN;
1657	sc->he622 = 0;
1658	sc->ifatm.phy = &sc->utopia;
1659
1660	SLIST_INIT(&sc->mbuf0_list);
1661	SLIST_INIT(&sc->mbuf1_list);
1662	SLIST_INIT(&sc->tpd_free);
1663
1664	mtx_init(&sc->mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK, MTX_DEF);
1665	mtx_init(&sc->mbuf0_mtx, device_get_nameunit(dev), "HEb0", MTX_DEF);
1666	mtx_init(&sc->mbuf1_mtx, device_get_nameunit(dev), "HEb1", MTX_DEF);
1667	cv_init(&sc->vcc_cv, "HEVCCcv");
1668	cv_init(&sc->cv_rcclose, "RCClose");
1669
1670	sysctl_ctx_init(&sc->sysctl_ctx);
1671
1672	/*
1673	 * 4.2 BIOS Configuration
1674	 */
1675	v = pci_read_config(dev, PCIR_COMMAND, 2);
1676	v |= PCIM_CMD_MEMEN | PCIM_CMD_BUSMASTEREN | PCIM_CMD_MWRICEN;
1677	pci_write_config(dev, PCIR_COMMAND, v, 2);
1678
1679	/*
1680	 * 4.3 PCI Bus Controller-Specific Initialisation
1681	 */
1682	v = pci_read_config(dev, HE_PCIR_GEN_CNTL_0, 4);
1683	v |= HE_PCIM_CTL0_MRL | HE_PCIM_CTL0_MRM | HE_PCIM_CTL0_IGNORE_TIMEOUT;
1684#if BYTE_ORDER == BIG_ENDIAN && 0
1685	v |= HE_PCIM_CTL0_BIGENDIAN;
1686#endif
1687	pci_write_config(dev, HE_PCIR_GEN_CNTL_0, v, 4);
1688
1689	/*
1690	 * Map memory
1691	 */
1692	v = pci_read_config(dev, PCIR_COMMAND, 2);
1693	if (!(v & PCIM_CMD_MEMEN)) {
1694		device_printf(dev, "failed to enable memory\n");
1695		error = ENXIO;
1696		goto failed;
1697	}
1698	sc->memid = PCIR_MAPS;
1699	sc->memres = bus_alloc_resource(dev, SYS_RES_MEMORY, &sc->memid,
1700	    0, ~0, 1, RF_ACTIVE);
1701	if (sc->memres == NULL) {
1702		device_printf(dev, "could not map memory\n");
1703		error = ENXIO;
1704		goto failed;
1705	}
1706	sc->memh = rman_get_bushandle(sc->memres);
1707	sc->memt = rman_get_bustag(sc->memres);
1708
1709	/*
1710	 * ALlocate a DMA tag for subsequent allocations
1711	 */
1712	if (bus_dma_tag_create(NULL, 1, 0,
1713	    BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR,
1714	    NULL, NULL,
1715	    BUS_SPACE_MAXSIZE_32BIT, 1,
1716	    BUS_SPACE_MAXSIZE_32BIT, 0,
1717	    NULL, NULL, &sc->parent_tag)) {
1718		device_printf(dev, "could not allocate DMA tag\n");
1719		error = ENOMEM;
1720		goto failed;
1721	}
1722
1723	if (bus_dma_tag_create(sc->parent_tag, 1, 0,
1724	    BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR,
1725	    NULL, NULL,
1726	    MBUF_ALLOC_SIZE, 1,
1727	    MBUF_ALLOC_SIZE, 0,
1728	    NULL, NULL, &sc->mbuf_tag)) {
1729		device_printf(dev, "could not allocate mbuf DMA tag\n");
1730		error = ENOMEM;
1731		goto failed;
1732	}
1733
1734	/*
1735	 * Allocate a DMA tag for packets to send. Here we have a problem with
1736	 * the specification of the maximum number of segments. Theoretically
1737	 * this would be the size of the transmit ring - 1 multiplied by 3,
1738	 * but this would not work. So make the maximum number of TPDs
1739	 * occupied by one packet a configuration parameter.
1740	 */
1741	if (bus_dma_tag_create(NULL, 1, 0,
1742	    BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
1743	    HE_MAX_PDU, 3 * HE_CONFIG_MAX_TPD_PER_PACKET, HE_MAX_PDU, 0,
1744	    NULL, NULL, &sc->tx_tag)) {
1745		device_printf(dev, "could not allocate TX tag\n");
1746		error = ENOMEM;
1747		goto failed;
1748	}
1749
1750	/*
1751	 * Setup the interrupt
1752	 */
1753	sc->irqid = 0;
1754	sc->irqres = bus_alloc_resource(dev, SYS_RES_IRQ, &sc->irqid,
1755	    0, ~0, 1, RF_SHAREABLE | RF_ACTIVE);
1756	if (sc->irqres == 0) {
1757		device_printf(dev, "could not allocate irq\n");
1758		error = ENXIO;
1759		goto failed;
1760	}
1761
1762	ifp = &sc->ifatm.ifnet;
1763	ifp->if_softc = sc;
1764	ifp->if_unit = unit;
1765	ifp->if_name = "hatm";
1766
1767	/*
1768	 * Make the sysctl tree
1769	 */
1770	error = ENOMEM;
1771	if ((sc->sysctl_tree = SYSCTL_ADD_NODE(&sc->sysctl_ctx,
1772	    SYSCTL_STATIC_CHILDREN(_hw_atm), OID_AUTO,
1773	    device_get_nameunit(dev), CTLFLAG_RD, 0, "")) == NULL)
1774		goto failed;
1775
1776	if (SYSCTL_ADD_PROC(&sc->sysctl_ctx, SYSCTL_CHILDREN(sc->sysctl_tree),
1777	    OID_AUTO, "istats", CTLFLAG_RD | CTLTYPE_OPAQUE, sc, CTL_ISTATS,
1778	    hatm_sysctl, "LU", "internal statistics") == NULL)
1779		goto failed;
1780
1781	if (SYSCTL_ADD_PROC(&sc->sysctl_ctx, SYSCTL_CHILDREN(sc->sysctl_tree),
1782	    OID_AUTO, "stats", CTLFLAG_RD | CTLTYPE_OPAQUE, sc, CTL_STATS,
1783	    hatm_sysctl, "LU", "card statistics") == NULL)
1784		goto failed;
1785
1786#ifdef HATM_DEBUG
1787	if (SYSCTL_ADD_PROC(&sc->sysctl_ctx, SYSCTL_CHILDREN(sc->sysctl_tree),
1788	    OID_AUTO, "tsr", CTLFLAG_RD | CTLTYPE_OPAQUE, sc, 0,
1789	    hatm_sysctl_tsr, "S", "transmission status registers") == NULL)
1790		goto failed;
1791
1792	if (SYSCTL_ADD_PROC(&sc->sysctl_ctx, SYSCTL_CHILDREN(sc->sysctl_tree),
1793	    OID_AUTO, "tpd", CTLFLAG_RD | CTLTYPE_OPAQUE, sc, 0,
1794	    hatm_sysctl_tpd, "S", "transmission packet descriptors") == NULL)
1795		goto failed;
1796
1797	if (SYSCTL_ADD_PROC(&sc->sysctl_ctx, SYSCTL_CHILDREN(sc->sysctl_tree),
1798	    OID_AUTO, "mbox", CTLFLAG_RD | CTLTYPE_OPAQUE, sc, 0,
1799	    hatm_sysctl_mbox, "S", "mbox registers") == NULL)
1800		goto failed;
1801
1802	if (SYSCTL_ADD_PROC(&sc->sysctl_ctx, SYSCTL_CHILDREN(sc->sysctl_tree),
1803	    OID_AUTO, "cm", CTLFLAG_RD | CTLTYPE_OPAQUE, sc, 0,
1804	    hatm_sysctl_cm, "S", "connection memory") == NULL)
1805		goto failed;
1806
1807	if (SYSCTL_ADD_PROC(&sc->sysctl_ctx, SYSCTL_CHILDREN(sc->sysctl_tree),
1808	    OID_AUTO, "heregs", CTLFLAG_RD | CTLTYPE_OPAQUE, sc, 0,
1809	    hatm_sysctl_heregs, "S", "card registers") == NULL)
1810		goto failed;
1811
1812	if (SYSCTL_ADD_PROC(&sc->sysctl_ctx, SYSCTL_CHILDREN(sc->sysctl_tree),
1813	    OID_AUTO, "lbmem", CTLFLAG_RD | CTLTYPE_OPAQUE, sc, 0,
1814	    hatm_sysctl_lbmem, "S", "local memory") == NULL)
1815		goto failed;
1816
1817	kenv_getuint(sc, "debug", &sc->debug, 0, 1);
1818#endif
1819
1820	/*
1821	 * Configure
1822	 */
1823	if ((error = hatm_configure(sc)) != 0)
1824		goto failed;
1825
1826	/*
1827	 * Compute memory parameters
1828	 */
1829	if (sc->rbp_s0.size != 0) {
1830		sc->rbp_s0.mask = (sc->rbp_s0.size - 1) << 3;
1831		sc->rbp_s0.mem.size = sc->rbp_s0.size * 8;
1832		sc->rbp_s0.mem.align = sc->rbp_s0.mem.size;
1833	}
1834	if (sc->rbp_l0.size != 0) {
1835		sc->rbp_l0.mask = (sc->rbp_l0.size - 1) << 3;
1836		sc->rbp_l0.mem.size = sc->rbp_l0.size * 8;
1837		sc->rbp_l0.mem.align = sc->rbp_l0.mem.size;
1838	}
1839	if (sc->rbp_s1.size != 0) {
1840		sc->rbp_s1.mask = (sc->rbp_s1.size - 1) << 3;
1841		sc->rbp_s1.mem.size = sc->rbp_s1.size * 8;
1842		sc->rbp_s1.mem.align = sc->rbp_s1.mem.size;
1843	}
1844	if (sc->rbrq_0.size != 0) {
1845		sc->rbrq_0.mem.size = sc->rbrq_0.size * 8;
1846		sc->rbrq_0.mem.align = sc->rbrq_0.mem.size;
1847	}
1848	if (sc->rbrq_1.size != 0) {
1849		sc->rbrq_1.mem.size = sc->rbrq_1.size * 8;
1850		sc->rbrq_1.mem.align = sc->rbrq_1.mem.size;
1851	}
1852
1853	sc->irq_0.mem.size = sc->irq_0.size * sizeof(uint32_t);
1854	sc->irq_0.mem.align = 4 * 1024;
1855
1856	sc->tbrq.mem.size = sc->tbrq.size * 4;
1857	sc->tbrq.mem.align = 2 * sc->tbrq.mem.size; /* ZZZ */
1858
1859	sc->tpdrq.mem.size = sc->tpdrq.size * 8;
1860	sc->tpdrq.mem.align = sc->tpdrq.mem.size;
1861
1862	sc->hsp_mem.size = sizeof(struct he_hsp);
1863	sc->hsp_mem.align = 1024;
1864
1865	sc->lbufs_size = sc->rbp_l0.size + sc->rbrq_0.size;
1866	sc->tpd_total = sc->tbrq.size + sc->tpdrq.size;
1867	sc->tpds.align = 64;
1868	sc->tpds.size = sc->tpd_total * HE_TPD_SIZE;
1869
1870	hatm_init_rmaps(sc);
1871	hatm_init_smbufs(sc);
1872	if ((error = hatm_init_tpds(sc)) != 0)
1873		goto failed;
1874
1875	/*
1876	 * Allocate memory
1877	 */
1878	if ((error = hatm_alloc_dmamem(sc, "IRQ", &sc->irq_0.mem)) != 0 ||
1879	    (error = hatm_alloc_dmamem(sc, "TBRQ0", &sc->tbrq.mem)) != 0 ||
1880	    (error = hatm_alloc_dmamem(sc, "TPDRQ", &sc->tpdrq.mem)) != 0 ||
1881	    (error = hatm_alloc_dmamem(sc, "HSP", &sc->hsp_mem)) != 0)
1882		goto failed;
1883
1884	if (sc->rbp_s0.mem.size != 0 &&
1885	    (error = hatm_alloc_dmamem(sc, "RBPS0", &sc->rbp_s0.mem)))
1886		goto failed;
1887	if (sc->rbp_l0.mem.size != 0 &&
1888	    (error = hatm_alloc_dmamem(sc, "RBPL0", &sc->rbp_l0.mem)))
1889		goto failed;
1890	if (sc->rbp_s1.mem.size != 0 &&
1891	    (error = hatm_alloc_dmamem(sc, "RBPS1", &sc->rbp_s1.mem)))
1892		goto failed;
1893
1894	if (sc->rbrq_0.mem.size != 0 &&
1895	    (error = hatm_alloc_dmamem(sc, "RBRQ0", &sc->rbrq_0.mem)))
1896		goto failed;
1897	if (sc->rbrq_1.mem.size != 0 &&
1898	    (error = hatm_alloc_dmamem(sc, "RBRQ1", &sc->rbrq_1.mem)))
1899		goto failed;
1900
1901	if ((sc->vcc_zone = uma_zcreate("HE vccs", sizeof(struct hevcc),
1902	    NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0)) == NULL) {
1903		device_printf(dev, "cannot allocate zone for vccs\n");
1904		goto failed;
1905	}
1906
1907	/*
1908	 * 4.4 Reset the card.
1909	 */
1910	if ((error = hatm_reset(sc)) != 0)
1911		goto failed;
1912
1913	/*
1914	 * Read the prom.
1915	 */
1916	hatm_init_bus_width(sc);
1917	hatm_init_read_eeprom(sc);
1918	hatm_init_endianess(sc);
1919
1920	/*
1921	 * Initialize interface
1922	 */
1923	ifp->if_flags = IFF_SIMPLEX;
1924	ifp->if_ioctl = hatm_ioctl;
1925	ifp->if_start = hatm_start;
1926	ifp->if_watchdog = NULL;
1927	ifp->if_init = hatm_init;
1928
1929	utopia_attach(&sc->utopia, &sc->ifatm, &sc->media, &sc->mtx,
1930	    &sc->sysctl_ctx, SYSCTL_CHILDREN(sc->sysctl_tree),
1931	    &hatm_utopia_methods);
1932	utopia_init_media(&sc->utopia);
1933
1934	/* these two SUNI routines need the lock */
1935	mtx_lock(&sc->mtx);
1936	/* poll while we are not running */
1937	sc->utopia.flags |= UTP_FL_POLL_CARRIER;
1938	utopia_start(&sc->utopia);
1939	utopia_reset(&sc->utopia);
1940	mtx_unlock(&sc->mtx);
1941
1942	atm_ifattach(ifp);
1943
1944#ifdef ENABLE_BPF
1945	bpfattach(ifp, DLT_ATM_RFC1483, sizeof(struct atmllc));
1946#endif
1947
1948	error = bus_setup_intr(dev, sc->irqres, INTR_TYPE_NET, hatm_intr,
1949	    &sc->irq_0, &sc->ih);
1950	if (error != 0) {
1951		device_printf(dev, "could not setup interrupt\n");
1952		hatm_detach(dev);
1953		return (error);
1954	}
1955
1956	return (0);
1957
1958  failed:
1959	hatm_destroy(sc);
1960	return (error);
1961}
1962
1963/*
1964 * Start the interface. Assume a state as from attach().
1965 */
1966void
1967hatm_initialize(struct hatm_softc *sc)
1968{
1969	uint32_t v;
1970	static const u_int layout[2][7] = HE_CONFIG_MEM_LAYOUT;
1971
1972	if (sc->ifatm.ifnet.if_flags & IFF_RUNNING)
1973		return;
1974
1975	hatm_init_bus_width(sc);
1976	hatm_init_endianess(sc);
1977
1978	if_printf(&sc->ifatm.ifnet, "%s, Rev. %s, S/N %u, "
1979	    "MAC=%02x:%02x:%02x:%02x:%02x:%02x (%ubit PCI)\n",
1980	    sc->prod_id, sc->rev, sc->ifatm.mib.serial,
1981	    sc->ifatm.mib.esi[0], sc->ifatm.mib.esi[1], sc->ifatm.mib.esi[2],
1982	    sc->ifatm.mib.esi[3], sc->ifatm.mib.esi[4], sc->ifatm.mib.esi[5],
1983	    sc->pci64 ? 64 : 32);
1984
1985	/*
1986	 * 4.8 SDRAM Controller Initialisation
1987	 * 4.9 Initialize RNUM value
1988	 */
1989	if (sc->he622)
1990		WRITE4(sc, HE_REGO_SDRAM_CNTL, HE_REGM_SDRAM_64BIT);
1991	else
1992		WRITE4(sc, HE_REGO_SDRAM_CNTL, 0);
1993	BARRIER_W(sc);
1994
1995	v = READ4(sc, HE_REGO_LB_SWAP);
1996	BARRIER_R(sc);
1997	v |= 0xf << HE_REGS_LBSWAP_RNUM;
1998	WRITE4(sc, HE_REGO_LB_SWAP, v);
1999	BARRIER_W(sc);
2000
2001	hatm_init_irq(sc, &sc->irq_0, 0);
2002	hatm_clear_irq(sc, 1);
2003	hatm_clear_irq(sc, 2);
2004	hatm_clear_irq(sc, 3);
2005
2006	WRITE4(sc, HE_REGO_GRP_1_0_MAP, 0);
2007	WRITE4(sc, HE_REGO_GRP_3_2_MAP, 0);
2008	WRITE4(sc, HE_REGO_GRP_5_4_MAP, 0);
2009	WRITE4(sc, HE_REGO_GRP_7_6_MAP, 0);
2010	BARRIER_W(sc);
2011
2012	/*
2013	 * 4.11 Enable PCI Bus Controller State Machine
2014	 */
2015	v = READ4(sc, HE_REGO_HOST_CNTL);
2016	BARRIER_R(sc);
2017	v |= HE_REGM_HOST_OUTFF_ENB | HE_REGM_HOST_CMDFF_ENB |
2018	    HE_REGM_HOST_QUICK_RD | HE_REGM_HOST_QUICK_WR;
2019	WRITE4(sc, HE_REGO_HOST_CNTL, v);
2020	BARRIER_W(sc);
2021
2022	/*
2023	 * 5.1.1 Generic configuration state
2024	 */
2025	sc->cells_per_row = layout[sc->he622][0];
2026	sc->bytes_per_row = layout[sc->he622][1];
2027	sc->r0_numrows = layout[sc->he622][2];
2028	sc->tx_numrows = layout[sc->he622][3];
2029	sc->r1_numrows = layout[sc->he622][4];
2030	sc->r0_startrow = layout[sc->he622][5];
2031	sc->tx_startrow = sc->r0_startrow + sc->r0_numrows;
2032	sc->r1_startrow = sc->tx_startrow + sc->tx_numrows;
2033	sc->cells_per_lbuf = layout[sc->he622][6];
2034
2035	sc->r0_numbuffs = sc->r0_numrows * (sc->cells_per_row /
2036	    sc->cells_per_lbuf);
2037	sc->r1_numbuffs = sc->r1_numrows * (sc->cells_per_row /
2038	    sc->cells_per_lbuf);
2039	sc->tx_numbuffs = sc->tx_numrows * (sc->cells_per_row /
2040	    sc->cells_per_lbuf);
2041
2042	if (sc->r0_numbuffs > 2560)
2043		sc->r0_numbuffs = 2560;
2044	if (sc->r1_numbuffs > 2560)
2045		sc->r1_numbuffs = 2560;
2046	if (sc->tx_numbuffs > 5120)
2047		sc->tx_numbuffs = 5120;
2048
2049	DBG(sc, ATTACH, ("cells_per_row=%u bytes_per_row=%u r0_numrows=%u "
2050	    "tx_numrows=%u r1_numrows=%u r0_startrow=%u tx_startrow=%u "
2051	    "r1_startrow=%u cells_per_lbuf=%u\nr0_numbuffs=%u r1_numbuffs=%u "
2052	    "tx_numbuffs=%u\n", sc->cells_per_row, sc->bytes_per_row,
2053	    sc->r0_numrows, sc->tx_numrows, sc->r1_numrows, sc->r0_startrow,
2054	    sc->tx_startrow, sc->r1_startrow, sc->cells_per_lbuf,
2055	    sc->r0_numbuffs, sc->r1_numbuffs, sc->tx_numbuffs));
2056
2057	/*
2058	 * 5.1.2 Configure Hardware dependend registers
2059	 */
2060	if (sc->he622) {
2061		WRITE4(sc, HE_REGO_LBARB,
2062		    (0x2 << HE_REGS_LBARB_SLICE) |
2063		    (0xf << HE_REGS_LBARB_RNUM) |
2064		    (0x3 << HE_REGS_LBARB_THPRI) |
2065		    (0x3 << HE_REGS_LBARB_RHPRI) |
2066		    (0x2 << HE_REGS_LBARB_TLPRI) |
2067		    (0x1 << HE_REGS_LBARB_RLPRI) |
2068		    (0x28 << HE_REGS_LBARB_BUS_MULT) |
2069		    (0x50 << HE_REGS_LBARB_NET_PREF));
2070		BARRIER_W(sc);
2071		WRITE4(sc, HE_REGO_SDRAMCON,
2072		    /* HW bug: don't use banking */
2073		    /* HE_REGM_SDRAMCON_BANK | */
2074		    HE_REGM_SDRAMCON_WIDE |
2075		    (0x384 << HE_REGS_SDRAMCON_REF));
2076		BARRIER_W(sc);
2077		WRITE4(sc, HE_REGO_RCMCONFIG,
2078		    (0x1 << HE_REGS_RCMCONFIG_BANK_WAIT) |
2079		    (0x1 << HE_REGS_RCMCONFIG_RW_WAIT) |
2080		    (0x0 << HE_REGS_RCMCONFIG_TYPE));
2081		WRITE4(sc, HE_REGO_TCMCONFIG,
2082		    (0x2 << HE_REGS_TCMCONFIG_BANK_WAIT) |
2083		    (0x1 << HE_REGS_TCMCONFIG_RW_WAIT) |
2084		    (0x0 << HE_REGS_TCMCONFIG_TYPE));
2085	} else {
2086		WRITE4(sc, HE_REGO_LBARB,
2087		    (0x2 << HE_REGS_LBARB_SLICE) |
2088		    (0xf << HE_REGS_LBARB_RNUM) |
2089		    (0x3 << HE_REGS_LBARB_THPRI) |
2090		    (0x3 << HE_REGS_LBARB_RHPRI) |
2091		    (0x2 << HE_REGS_LBARB_TLPRI) |
2092		    (0x1 << HE_REGS_LBARB_RLPRI) |
2093		    (0x46 << HE_REGS_LBARB_BUS_MULT) |
2094		    (0x8C << HE_REGS_LBARB_NET_PREF));
2095		BARRIER_W(sc);
2096		WRITE4(sc, HE_REGO_SDRAMCON,
2097		    /* HW bug: don't use banking */
2098		    /* HE_REGM_SDRAMCON_BANK | */
2099		    (0x150 << HE_REGS_SDRAMCON_REF));
2100		BARRIER_W(sc);
2101		WRITE4(sc, HE_REGO_RCMCONFIG,
2102		    (0x0 << HE_REGS_RCMCONFIG_BANK_WAIT) |
2103		    (0x1 << HE_REGS_RCMCONFIG_RW_WAIT) |
2104		    (0x0 << HE_REGS_RCMCONFIG_TYPE));
2105		WRITE4(sc, HE_REGO_TCMCONFIG,
2106		    (0x1 << HE_REGS_TCMCONFIG_BANK_WAIT) |
2107		    (0x1 << HE_REGS_TCMCONFIG_RW_WAIT) |
2108		    (0x0 << HE_REGS_TCMCONFIG_TYPE));
2109	}
2110	WRITE4(sc, HE_REGO_LBCONFIG, (sc->cells_per_lbuf * 48));
2111
2112	WRITE4(sc, HE_REGO_RLBC_H, 0);
2113	WRITE4(sc, HE_REGO_RLBC_T, 0);
2114	WRITE4(sc, HE_REGO_RLBC_H2, 0);
2115
2116	WRITE4(sc, HE_REGO_RXTHRSH, 512);
2117	WRITE4(sc, HE_REGO_LITHRSH, 256);
2118
2119	WRITE4(sc, HE_REGO_RLBF0_C, sc->r0_numbuffs);
2120	WRITE4(sc, HE_REGO_RLBF1_C, sc->r1_numbuffs);
2121
2122	if (sc->he622) {
2123		WRITE4(sc, HE_REGO_RCCONFIG,
2124		    (8 << HE_REGS_RCCONFIG_UTDELAY) |
2125		    (sc->ifatm.mib.vpi_bits << HE_REGS_RCCONFIG_VP) |
2126		    (sc->ifatm.mib.vci_bits << HE_REGS_RCCONFIG_VC));
2127		WRITE4(sc, HE_REGO_TXCONFIG,
2128		    (32 << HE_REGS_TXCONFIG_THRESH) |
2129		    (sc->ifatm.mib.vci_bits << HE_REGS_TXCONFIG_VCI_MASK) |
2130		    (sc->tx_numbuffs << HE_REGS_TXCONFIG_LBFREE));
2131	} else {
2132		WRITE4(sc, HE_REGO_RCCONFIG,
2133		    (0 << HE_REGS_RCCONFIG_UTDELAY) |
2134		    HE_REGM_RCCONFIG_UT_MODE |
2135		    (sc->ifatm.mib.vpi_bits << HE_REGS_RCCONFIG_VP) |
2136		    (sc->ifatm.mib.vci_bits << HE_REGS_RCCONFIG_VC));
2137		WRITE4(sc, HE_REGO_TXCONFIG,
2138		    (32 << HE_REGS_TXCONFIG_THRESH) |
2139		    HE_REGM_TXCONFIG_UTMODE |
2140		    (sc->ifatm.mib.vci_bits << HE_REGS_TXCONFIG_VCI_MASK) |
2141		    (sc->tx_numbuffs << HE_REGS_TXCONFIG_LBFREE));
2142	}
2143
2144	WRITE4(sc, HE_REGO_TXAAL5_PROTO, 0);
2145
2146	if (sc->rbp_s1.size != 0) {
2147		WRITE4(sc, HE_REGO_RHCONFIG,
2148		    HE_REGM_RHCONFIG_PHYENB |
2149		    ((sc->he622 ? 0x41 : 0x31) << HE_REGS_RHCONFIG_PTMR_PRE) |
2150		    (1 << HE_REGS_RHCONFIG_OAM_GID));
2151	} else {
2152		WRITE4(sc, HE_REGO_RHCONFIG,
2153		    HE_REGM_RHCONFIG_PHYENB |
2154		    ((sc->he622 ? 0x41 : 0x31) << HE_REGS_RHCONFIG_PTMR_PRE) |
2155		    (0 << HE_REGS_RHCONFIG_OAM_GID));
2156	}
2157	BARRIER_W(sc);
2158
2159	hatm_init_cm(sc);
2160
2161	hatm_init_rx_buffer_pool(sc, 0, sc->r0_startrow, sc->r0_numbuffs);
2162	hatm_init_rx_buffer_pool(sc, 1, sc->r1_startrow, sc->r1_numbuffs);
2163	hatm_init_tx_buffer_pool(sc, sc->tx_startrow, sc->tx_numbuffs);
2164
2165	hatm_init_imed_queues(sc);
2166
2167	/*
2168	 * 5.1.6 Application tunable Parameters
2169	 */
2170	WRITE4(sc, HE_REGO_MCC, 0);
2171	WRITE4(sc, HE_REGO_OEC, 0);
2172	WRITE4(sc, HE_REGO_DCC, 0);
2173	WRITE4(sc, HE_REGO_CEC, 0);
2174
2175	hatm_init_cs_block(sc);
2176	hatm_init_cs_block_cm(sc);
2177
2178	hatm_init_rpool(sc, &sc->rbp_s0, 0, 0);
2179	hatm_init_rpool(sc, &sc->rbp_l0, 0, 1);
2180	hatm_init_rpool(sc, &sc->rbp_s1, 1, 0);
2181	hatm_clear_rpool(sc, 1, 1);
2182	hatm_clear_rpool(sc, 2, 0);
2183	hatm_clear_rpool(sc, 2, 1);
2184	hatm_clear_rpool(sc, 3, 0);
2185	hatm_clear_rpool(sc, 3, 1);
2186	hatm_clear_rpool(sc, 4, 0);
2187	hatm_clear_rpool(sc, 4, 1);
2188	hatm_clear_rpool(sc, 5, 0);
2189	hatm_clear_rpool(sc, 5, 1);
2190	hatm_clear_rpool(sc, 6, 0);
2191	hatm_clear_rpool(sc, 6, 1);
2192	hatm_clear_rpool(sc, 7, 0);
2193	hatm_clear_rpool(sc, 7, 1);
2194	hatm_init_rbrq(sc, &sc->rbrq_0, 0);
2195	hatm_init_rbrq(sc, &sc->rbrq_1, 1);
2196	hatm_clear_rbrq(sc, 2);
2197	hatm_clear_rbrq(sc, 3);
2198	hatm_clear_rbrq(sc, 4);
2199	hatm_clear_rbrq(sc, 5);
2200	hatm_clear_rbrq(sc, 6);
2201	hatm_clear_rbrq(sc, 7);
2202
2203	sc->lbufs_next = 0;
2204	bzero(sc->lbufs, sizeof(sc->lbufs[0]) * sc->lbufs_size);
2205
2206	hatm_init_tbrq(sc, &sc->tbrq, 0);
2207	hatm_clear_tbrq(sc, 1);
2208	hatm_clear_tbrq(sc, 2);
2209	hatm_clear_tbrq(sc, 3);
2210	hatm_clear_tbrq(sc, 4);
2211	hatm_clear_tbrq(sc, 5);
2212	hatm_clear_tbrq(sc, 6);
2213	hatm_clear_tbrq(sc, 7);
2214
2215	hatm_init_tpdrq(sc);
2216
2217	WRITE4(sc, HE_REGO_UBUFF_BA, (sc->he622 ? 0x104780 : 0x800));
2218
2219	/*
2220	 * Initialize HSP
2221	 */
2222	bzero(sc->hsp_mem.base, sc->hsp_mem.size);
2223	sc->hsp = sc->hsp_mem.base;
2224	WRITE4(sc, HE_REGO_HSP_BA, sc->hsp_mem.paddr);
2225
2226	/*
2227	 * 5.1.12 Enable transmit and receive
2228	 * Enable bus master and interrupts
2229	 */
2230	v = READ_MBOX4(sc, HE_REGO_CS_ERCTL0);
2231	v |= 0x18000000;
2232	WRITE_MBOX4(sc, HE_REGO_CS_ERCTL0, v);
2233
2234	v = READ4(sc, HE_REGO_RCCONFIG);
2235	v |= HE_REGM_RCCONFIG_RXENB;
2236	WRITE4(sc, HE_REGO_RCCONFIG, v);
2237
2238	v = pci_read_config(sc->dev, HE_PCIR_GEN_CNTL_0, 4);
2239	v |= HE_PCIM_CTL0_INIT_ENB | HE_PCIM_CTL0_INT_PROC_ENB;
2240	pci_write_config(sc->dev, HE_PCIR_GEN_CNTL_0, v, 4);
2241
2242	sc->ifatm.ifnet.if_flags |= IFF_RUNNING;
2243	sc->ifatm.ifnet.if_baudrate = 53 * 8 * sc->ifatm.mib.pcr;
2244
2245	sc->utopia.flags &= ~UTP_FL_POLL_CARRIER;
2246}
2247
2248/*
2249 * This functions stops the card and frees all resources allocated after
2250 * the attach. Must have the global lock.
2251 */
2252void
2253hatm_stop(struct hatm_softc *sc)
2254{
2255	uint32_t v;
2256	u_int i, p, cid;
2257	struct mbuf_chunk_hdr *ch;
2258	struct mbuf_page *pg;
2259
2260	mtx_assert(&sc->mtx, MA_OWNED);
2261
2262	if (!(sc->ifatm.ifnet.if_flags & IFF_RUNNING))
2263		return;
2264	sc->ifatm.ifnet.if_flags &= ~IFF_RUNNING;
2265
2266	sc->utopia.flags |= UTP_FL_POLL_CARRIER;
2267
2268	/*
2269	 * Stop and reset the hardware so that everything remains
2270	 * stable.
2271	 */
2272	v = READ_MBOX4(sc, HE_REGO_CS_ERCTL0);
2273	v &= ~0x18000000;
2274	WRITE_MBOX4(sc, HE_REGO_CS_ERCTL0, v);
2275
2276	v = READ4(sc, HE_REGO_RCCONFIG);
2277	v &= ~HE_REGM_RCCONFIG_RXENB;
2278	WRITE4(sc, HE_REGO_RCCONFIG, v);
2279
2280	WRITE4(sc, HE_REGO_RHCONFIG, (0x2 << HE_REGS_RHCONFIG_PTMR_PRE));
2281	BARRIER_W(sc);
2282
2283	v = READ4(sc, HE_REGO_HOST_CNTL);
2284	BARRIER_R(sc);
2285	v &= ~(HE_REGM_HOST_OUTFF_ENB | HE_REGM_HOST_CMDFF_ENB);
2286	WRITE4(sc, HE_REGO_HOST_CNTL, v);
2287	BARRIER_W(sc);
2288
2289	/*
2290	 * Disable bust master and interrupts
2291	 */
2292	v = pci_read_config(sc->dev, HE_PCIR_GEN_CNTL_0, 4);
2293	v &= ~(HE_PCIM_CTL0_INIT_ENB | HE_PCIM_CTL0_INT_PROC_ENB);
2294	pci_write_config(sc->dev, HE_PCIR_GEN_CNTL_0, v, 4);
2295
2296	(void)hatm_reset(sc);
2297
2298	/*
2299	 * Card resets the SUNI when resetted, so re-initialize it
2300	 */
2301	utopia_reset(&sc->utopia);
2302
2303	/*
2304	 * Give any waiters on closing a VCC a chance. They will stop
2305	 * to wait if they see that IFF_RUNNING disappeared.
2306	 */
2307	while (!(cv_waitq_empty(&sc->vcc_cv))) {
2308		cv_broadcast(&sc->vcc_cv);
2309		DELAY(100);
2310	}
2311	while (!(cv_waitq_empty(&sc->cv_rcclose))) {
2312		cv_broadcast(&sc->cv_rcclose);
2313	}
2314
2315	/*
2316	 * Now free all resources.
2317	 */
2318
2319	/*
2320	 * Free the large mbufs that are given to the card.
2321	 */
2322	for (i = 0 ; i < sc->lbufs_size; i++) {
2323		if (sc->lbufs[i] != NULL) {
2324			bus_dmamap_unload(sc->mbuf_tag, sc->rmaps[i]);
2325			m_freem(sc->lbufs[i]);
2326			sc->lbufs[i] = NULL;
2327		}
2328	}
2329
2330	/*
2331	 * Free small buffers
2332	 */
2333	for (p = 0; p < sc->mbuf_npages; p++) {
2334		pg = sc->mbuf_pages[p];
2335		for (i = 0; i < pg->hdr.nchunks; i++) {
2336			if (MBUF_TST_BIT(pg->hdr.card, i)) {
2337				MBUF_CLR_BIT(pg->hdr.card, i);
2338				MBUF_CLR_BIT(pg->hdr.used, i);
2339				ch = (struct mbuf_chunk_hdr *) ((char *)pg +
2340				    i * pg->hdr.chunksize + pg->hdr.hdroff);
2341				m_freem(ch->mbuf);
2342			}
2343		}
2344	}
2345
2346	hatm_stop_tpds(sc);
2347
2348	/*
2349	 * Free all partial reassembled PDUs on any VCC.
2350	 */
2351	for (cid = 0; cid < HE_MAX_VCCS; cid++) {
2352		if (sc->vccs[cid] != NULL) {
2353			if (sc->vccs[cid]->chain != NULL)
2354				m_freem(sc->vccs[cid]->chain);
2355			uma_zfree(sc->vcc_zone, sc->vccs[cid]);
2356		}
2357	}
2358	bzero(sc->vccs, sizeof(sc->vccs));
2359	sc->cbr_bw = 0;
2360	sc->open_vccs = 0;
2361
2362	/*
2363	 * Reset CBR rate groups
2364	 */
2365	bzero(sc->rate_ctrl, sizeof(sc->rate_ctrl));
2366
2367	if (sc->rbp_s0.size != 0)
2368		bzero(sc->rbp_s0.mem.base, sc->rbp_s0.mem.size);
2369	if (sc->rbp_l0.size != 0)
2370		bzero(sc->rbp_l0.mem.base, sc->rbp_l0.mem.size);
2371	if (sc->rbp_s1.size != 0)
2372		bzero(sc->rbp_s1.mem.base, sc->rbp_s1.mem.size);
2373	if (sc->rbrq_0.size != 0)
2374		bzero(sc->rbrq_0.mem.base, sc->rbrq_0.mem.size);
2375	if (sc->rbrq_1.size != 0)
2376		bzero(sc->rbrq_1.mem.base, sc->rbrq_1.mem.size);
2377
2378	bzero(sc->tbrq.mem.base, sc->tbrq.mem.size);
2379	bzero(sc->tpdrq.mem.base, sc->tpdrq.mem.size);
2380	bzero(sc->hsp_mem.base, sc->hsp_mem.size);
2381}
2382
2383/************************************************************
2384 *
2385 * Driver infrastructure
2386 */
2387devclass_t hatm_devclass;
2388
2389static device_method_t hatm_methods[] = {
2390	DEVMETHOD(device_probe,		hatm_probe),
2391	DEVMETHOD(device_attach,	hatm_attach),
2392	DEVMETHOD(device_detach,	hatm_detach),
2393	{0,0}
2394};
2395static driver_t hatm_driver = {
2396	"hatm",
2397	hatm_methods,
2398	sizeof(struct hatm_softc),
2399};
2400DRIVER_MODULE(hatm, pci, hatm_driver, hatm_devclass, NULL, 0);
2401