1235368Sgnn/*-
2235368Sgnn * Copyright (c) 2001-2003
3235368Sgnn *	Fraunhofer Institute for Open Communication Systems (FhG Fokus).
4235368Sgnn * 	All rights reserved.
5235368Sgnn *
6235368Sgnn * Redistribution and use in source and binary forms, with or without
7235368Sgnn * modification, are permitted provided that the following conditions
8235368Sgnn * are met:
9235368Sgnn * 1. Redistributions of source code must retain the above copyright
10235368Sgnn *    notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 *    notice, this list of conditions and the following disclaimer in the
13 *    documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25 * SUCH DAMAGE.
26 *
27 * Author: Hartmut Brandt <harti@freebsd.org>
28 *
29 * ForeHE driver.
30 *
31 * This file contains the module and driver infrastructure stuff as well
32 * as a couple of utility functions and the entire initialisation.
33 */
34
35#include <sys/cdefs.h>
36__FBSDID("$FreeBSD: releng/11.0/sys/dev/hatm/if_hatm.c 298955 2016-05-03 03:41:25Z pfg $");
37
38#include "opt_inet.h"
39#include "opt_natm.h"
40
41#include <sys/types.h>
42#include <sys/param.h>
43#include <sys/systm.h>
44#include <sys/malloc.h>
45#include <sys/kernel.h>
46#include <sys/bus.h>
47#include <sys/errno.h>
48#include <sys/conf.h>
49#include <sys/module.h>
50#include <sys/queue.h>
51#include <sys/syslog.h>
52#include <sys/lock.h>
53#include <sys/mutex.h>
54#include <sys/condvar.h>
55#include <sys/sysctl.h>
56#include <vm/uma.h>
57
58#include <sys/sockio.h>
59#include <sys/mbuf.h>
60#include <sys/socket.h>
61
62#include <net/if.h>
63#include <net/if_var.h>
64#include <net/if_media.h>
65#include <net/if_atm.h>
66#include <net/if_types.h>
67#include <net/route.h>
68#ifdef ENABLE_BPF
69#include <net/bpf.h>
70#endif
71#include <netinet/in.h>
72#include <netinet/if_atm.h>
73
74#include <machine/bus.h>
75#include <machine/resource.h>
76#include <sys/bus.h>
77#include <sys/rman.h>
78#include <dev/pci/pcireg.h>
79#include <dev/pci/pcivar.h>
80
81#include <dev/utopia/utopia.h>
82#include <dev/hatm/if_hatmconf.h>
83#include <dev/hatm/if_hatmreg.h>
84#include <dev/hatm/if_hatmvar.h>
85
86static const struct {
87	uint16_t	vid;
88	uint16_t	did;
89	const char	*name;
90} hatm_devs[] = {
91	{ 0x1127, 0x400,
92	  "FORE HE" },
93	{ 0, 0, NULL }
94};
95
96SYSCTL_DECL(_hw_atm);
97
98MODULE_DEPEND(hatm, utopia, 1, 1, 1);
99MODULE_DEPEND(hatm, pci, 1, 1, 1);
100MODULE_DEPEND(hatm, atm, 1, 1, 1);
101
102#define EEPROM_DELAY	400 /* microseconds */
103
104/* Read from EEPROM 0000 0011b */
105static const uint32_t readtab[] = {
106	HE_REGM_HOST_PROM_SEL | HE_REGM_HOST_PROM_CLOCK,
107	0,
108	HE_REGM_HOST_PROM_CLOCK,
109	0,				/* 0 */
110	HE_REGM_HOST_PROM_CLOCK,
111	0,				/* 0 */
112	HE_REGM_HOST_PROM_CLOCK,
113	0,				/* 0 */
114	HE_REGM_HOST_PROM_CLOCK,
115	0,				/* 0 */
116	HE_REGM_HOST_PROM_CLOCK,
117	0,				/* 0 */
118	HE_REGM_HOST_PROM_CLOCK,
119	HE_REGM_HOST_PROM_DATA_IN,	/* 0 */
120	HE_REGM_HOST_PROM_CLOCK | HE_REGM_HOST_PROM_DATA_IN,
121	HE_REGM_HOST_PROM_DATA_IN,	/* 1 */
122	HE_REGM_HOST_PROM_CLOCK | HE_REGM_HOST_PROM_DATA_IN,
123	HE_REGM_HOST_PROM_DATA_IN,	/* 1 */
124};
125static const uint32_t clocktab[] = {
126	0, HE_REGM_HOST_PROM_CLOCK,
127	0, HE_REGM_HOST_PROM_CLOCK,
128	0, HE_REGM_HOST_PROM_CLOCK,
129	0, HE_REGM_HOST_PROM_CLOCK,
130	0, HE_REGM_HOST_PROM_CLOCK,
131	0, HE_REGM_HOST_PROM_CLOCK,
132	0, HE_REGM_HOST_PROM_CLOCK,
133	0, HE_REGM_HOST_PROM_CLOCK,
134	0
135};
136
137/*
138 * Convert cell rate to ATM Forum format
139 */
140u_int
141hatm_cps2atmf(uint32_t pcr)
142{
143	u_int e;
144
145	if (pcr == 0)
146		return (0);
147	pcr <<= 9;
148	e = 0;
149	while (pcr > (1024 - 1)) {
150		e++;
151		pcr >>= 1;
152	}
153	return ((1 << 14) | (e << 9) | (pcr & 0x1ff));
154}
155u_int
156hatm_atmf2cps(uint32_t fcr)
157{
158	fcr &= 0x7fff;
159
160	return ((1 << ((fcr >> 9) & 0x1f)) * (512 + (fcr & 0x1ff)) / 512
161	  * (fcr >> 14));
162}
163
164/************************************************************
165 *
166 * Initialisation
167 */
168/*
169 * Probe for a HE controller
170 */
171static int
172hatm_probe(device_t dev)
173{
174	int i;
175
176	for (i = 0; hatm_devs[i].name; i++)
177		if (pci_get_vendor(dev) == hatm_devs[i].vid &&
178		    pci_get_device(dev) == hatm_devs[i].did) {
179			device_set_desc(dev, hatm_devs[i].name);
180			return (BUS_PROBE_DEFAULT);
181		}
182	return (ENXIO);
183}
184
185/*
186 * Allocate and map DMA-able memory. We support only contiguous mappings.
187 */
188static void
189dmaload_helper(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
190{
191	if (error)
192		return;
193	KASSERT(nsegs == 1, ("too many segments for DMA: %d", nsegs));
194	KASSERT(segs[0].ds_addr <= 0xffffffffUL,
195	    ("phys addr too large %lx", (u_long)segs[0].ds_addr));
196
197	*(bus_addr_t *)arg = segs[0].ds_addr;
198}
199static int
200hatm_alloc_dmamem(struct hatm_softc *sc, const char *what, struct dmamem *mem)
201{
202	int error;
203
204	mem->base = NULL;
205
206	/*
207	 * Alignement does not work in the bus_dmamem_alloc function below
208	 * on FreeBSD. malloc seems to align objects at least to the object
209	 * size so increase the size to the alignment if the size is lesser
210	 * than the alignemnt.
211	 * XXX on sparc64 this is (probably) not needed.
212	 */
213	if (mem->size < mem->align)
214		mem->size = mem->align;
215
216	error = bus_dma_tag_create(sc->parent_tag, mem->align, 0,
217	    BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR,
218	    NULL, NULL, mem->size, 1,
219	    BUS_SPACE_MAXSIZE_32BIT, BUS_DMA_ALLOCNOW,
220	    NULL, NULL, &mem->tag);
221	if (error) {
222		if_printf(sc->ifp, "DMA tag create (%s)\n", what);
223		return (error);
224	}
225
226	error = bus_dmamem_alloc(mem->tag, &mem->base, 0, &mem->map);
227	if (error) {
228		if_printf(sc->ifp, "DMA mem alloc (%s): %d\n",
229		    what, error);
230		bus_dma_tag_destroy(mem->tag);
231		mem->base = NULL;
232		return (error);
233	}
234
235	error = bus_dmamap_load(mem->tag, mem->map, mem->base, mem->size,
236	    dmaload_helper, &mem->paddr, BUS_DMA_NOWAIT);
237	if (error) {
238		if_printf(sc->ifp, "DMA map load (%s): %d\n",
239		    what, error);
240		bus_dmamem_free(mem->tag, mem->base, mem->map);
241		bus_dma_tag_destroy(mem->tag);
242		mem->base = NULL;
243		return (error);
244	}
245
246	DBG(sc, DMA, ("%s S/A/V/P 0x%x 0x%x %p 0x%lx", what, mem->size,
247	    mem->align, mem->base, (u_long)mem->paddr));
248
249	return (0);
250}
251
252/*
253 * Destroy all the resources of an DMA-able memory region.
254 */
255static void
256hatm_destroy_dmamem(struct dmamem *mem)
257{
258	if (mem->base != NULL) {
259		bus_dmamap_unload(mem->tag, mem->map);
260		bus_dmamem_free(mem->tag, mem->base, mem->map);
261		(void)bus_dma_tag_destroy(mem->tag);
262		mem->base = NULL;
263	}
264}
265
266/*
267 * Initialize/destroy DMA maps for the large pool 0
268 */
269static void
270hatm_destroy_rmaps(struct hatm_softc *sc)
271{
272	u_int b;
273
274	DBG(sc, ATTACH, ("destroying rmaps and lbuf pointers..."));
275	if (sc->rmaps != NULL) {
276		for (b = 0; b < sc->lbufs_size; b++)
277			bus_dmamap_destroy(sc->mbuf_tag, sc->rmaps[b]);
278		free(sc->rmaps, M_DEVBUF);
279	}
280	if (sc->lbufs != NULL)
281		free(sc->lbufs, M_DEVBUF);
282}
283
284static void
285hatm_init_rmaps(struct hatm_softc *sc)
286{
287	u_int b;
288	int err;
289
290	DBG(sc, ATTACH, ("allocating rmaps and lbuf pointers..."));
291	sc->lbufs = malloc(sizeof(sc->lbufs[0]) * sc->lbufs_size,
292	    M_DEVBUF, M_ZERO | M_WAITOK);
293
294	/* allocate and create the DMA maps for the large pool */
295	sc->rmaps = malloc(sizeof(sc->rmaps[0]) * sc->lbufs_size,
296	    M_DEVBUF, M_WAITOK);
297	for (b = 0; b < sc->lbufs_size; b++) {
298		err = bus_dmamap_create(sc->mbuf_tag, 0, &sc->rmaps[b]);
299		if (err != 0)
300			panic("bus_dmamap_create: %d\n", err);
301	}
302}
303
304/*
305 * Initialize and destroy small mbuf page pointers and pages
306 */
307static void
308hatm_destroy_smbufs(struct hatm_softc *sc)
309{
310	u_int i, b;
311	struct mbuf_page *pg;
312	struct mbuf_chunk_hdr *h;
313
314	if (sc->mbuf_pages != NULL) {
315		for (i = 0; i < sc->mbuf_npages; i++) {
316			pg = sc->mbuf_pages[i];
317			for (b = 0; b < pg->hdr.nchunks; b++) {
318				h = (struct mbuf_chunk_hdr *) ((char *)pg +
319				    b * pg->hdr.chunksize + pg->hdr.hdroff);
320				if (h->flags & MBUF_CARD)
321					if_printf(sc->ifp,
322					    "%s -- mbuf page=%u card buf %u\n",
323					    __func__, i, b);
324				if (h->flags & MBUF_USED)
325					if_printf(sc->ifp,
326					    "%s -- mbuf page=%u used buf %u\n",
327					    __func__, i, b);
328			}
329			bus_dmamap_unload(sc->mbuf_tag, pg->hdr.map);
330			bus_dmamap_destroy(sc->mbuf_tag, pg->hdr.map);
331			free(pg, M_DEVBUF);
332		}
333		free(sc->mbuf_pages, M_DEVBUF);
334	}
335}
336
337static void
338hatm_init_smbufs(struct hatm_softc *sc)
339{
340	sc->mbuf_pages = malloc(sizeof(sc->mbuf_pages[0]) *
341	    sc->mbuf_max_pages, M_DEVBUF, M_WAITOK);
342	sc->mbuf_npages = 0;
343}
344
345/*
346 * Initialize/destroy TPDs. This is called from attach/detach.
347 */
348static void
349hatm_destroy_tpds(struct hatm_softc *sc)
350{
351	struct tpd *t;
352
353	if (sc->tpds.base == NULL)
354		return;
355
356	DBG(sc, ATTACH, ("releasing TPDs ..."));
357	if (sc->tpd_nfree != sc->tpd_total)
358		if_printf(sc->ifp, "%u tpds still in use from %u\n",
359		    sc->tpd_total - sc->tpd_nfree, sc->tpd_total);
360	while ((t = SLIST_FIRST(&sc->tpd_free)) != NULL) {
361		SLIST_REMOVE_HEAD(&sc->tpd_free, link);
362		bus_dmamap_destroy(sc->tx_tag, t->map);
363	}
364	hatm_destroy_dmamem(&sc->tpds);
365	free(sc->tpd_used, M_DEVBUF);
366	DBG(sc, ATTACH, ("... done"));
367}
368static int
369hatm_init_tpds(struct hatm_softc *sc)
370{
371	int error;
372	u_int i;
373	struct tpd *t;
374
375	DBG(sc, ATTACH, ("allocating %u TPDs and maps ...", sc->tpd_total));
376	error = hatm_alloc_dmamem(sc, "TPD memory", &sc->tpds);
377	if (error != 0) {
378		DBG(sc, ATTACH, ("... dmamem error=%d", error));
379		return (error);
380	}
381
382	/* put all the TPDs on the free list and allocate DMA maps */
383	for (i = 0; i < sc->tpd_total; i++) {
384		t = TPD_ADDR(sc, i);
385		t->no = i;
386		t->mbuf = NULL;
387		error = bus_dmamap_create(sc->tx_tag, 0, &t->map);
388		if (error != 0) {
389			DBG(sc, ATTACH, ("... dmamap error=%d", error));
390			while ((t = SLIST_FIRST(&sc->tpd_free)) != NULL) {
391				SLIST_REMOVE_HEAD(&sc->tpd_free, link);
392				bus_dmamap_destroy(sc->tx_tag, t->map);
393			}
394			hatm_destroy_dmamem(&sc->tpds);
395			return (error);
396		}
397
398		SLIST_INSERT_HEAD(&sc->tpd_free, t, link);
399	}
400
401	/* allocate and zero bitmap */
402	sc->tpd_used = malloc(sizeof(uint8_t) * (sc->tpd_total + 7) / 8,
403	    M_DEVBUF, M_ZERO | M_WAITOK);
404	sc->tpd_nfree = sc->tpd_total;
405
406	DBG(sc, ATTACH, ("... done"));
407
408	return (0);
409}
410
411/*
412 * Free all the TPDs that where given to the card.
413 * An mbuf chain may be attached to a TPD - free it also and
414 * unload its associated DMA map.
415 */
416static void
417hatm_stop_tpds(struct hatm_softc *sc)
418{
419	u_int i;
420	struct tpd *t;
421
422	DBG(sc, ATTACH, ("free TPDs ..."));
423	for (i = 0; i < sc->tpd_total; i++) {
424		if (TPD_TST_USED(sc, i)) {
425			t = TPD_ADDR(sc, i);
426			if (t->mbuf) {
427				m_freem(t->mbuf);
428				t->mbuf = NULL;
429				bus_dmamap_unload(sc->tx_tag, t->map);
430			}
431			TPD_CLR_USED(sc, i);
432			SLIST_INSERT_HEAD(&sc->tpd_free, t, link);
433			sc->tpd_nfree++;
434		}
435	}
436}
437
438/*
439 * This frees ALL resources of this interface and leaves the structure
440 * in an indeterminate state. This is called just before detaching or
441 * on a failed attach. No lock should be held.
442 */
443static void
444hatm_destroy(struct hatm_softc *sc)
445{
446	u_int cid;
447
448	bus_teardown_intr(sc->dev, sc->irqres, sc->ih);
449
450	hatm_destroy_rmaps(sc);
451	hatm_destroy_smbufs(sc);
452	hatm_destroy_tpds(sc);
453
454	if (sc->vcc_zone != NULL) {
455		for (cid = 0; cid < HE_MAX_VCCS; cid++)
456			if (sc->vccs[cid] != NULL)
457				uma_zfree(sc->vcc_zone, sc->vccs[cid]);
458		uma_zdestroy(sc->vcc_zone);
459	}
460
461	/*
462	 * Release all memory allocated to the various queues and
463	 * Status pages. These have there own flag which shows whether
464	 * they are really allocated.
465	 */
466	hatm_destroy_dmamem(&sc->irq_0.mem);
467	hatm_destroy_dmamem(&sc->rbp_s0.mem);
468	hatm_destroy_dmamem(&sc->rbp_l0.mem);
469	hatm_destroy_dmamem(&sc->rbp_s1.mem);
470	hatm_destroy_dmamem(&sc->rbrq_0.mem);
471	hatm_destroy_dmamem(&sc->rbrq_1.mem);
472	hatm_destroy_dmamem(&sc->tbrq.mem);
473	hatm_destroy_dmamem(&sc->tpdrq.mem);
474	hatm_destroy_dmamem(&sc->hsp_mem);
475
476	if (sc->irqres != NULL)
477		bus_release_resource(sc->dev, SYS_RES_IRQ,
478		    sc->irqid, sc->irqres);
479
480	if (sc->tx_tag != NULL)
481		if (bus_dma_tag_destroy(sc->tx_tag))
482			if_printf(sc->ifp, "mbuf DMA tag busy\n");
483
484	if (sc->mbuf_tag != NULL)
485		if (bus_dma_tag_destroy(sc->mbuf_tag))
486			if_printf(sc->ifp, "mbuf DMA tag busy\n");
487
488	if (sc->parent_tag != NULL)
489		if (bus_dma_tag_destroy(sc->parent_tag))
490			if_printf(sc->ifp, "parent DMA tag busy\n");
491
492	if (sc->memres != NULL)
493		bus_release_resource(sc->dev, SYS_RES_MEMORY,
494		    sc->memid, sc->memres);
495
496	sysctl_ctx_free(&sc->sysctl_ctx);
497
498	cv_destroy(&sc->cv_rcclose);
499	cv_destroy(&sc->vcc_cv);
500	mtx_destroy(&sc->mtx);
501
502	if (sc->ifp != NULL)
503		if_free(sc->ifp);
504}
505
506/*
507 * 4.4 Card reset
508 */
509static int
510hatm_reset(struct hatm_softc *sc)
511{
512	u_int v, count;
513
514	WRITE4(sc, HE_REGO_RESET_CNTL, 0x00);
515	BARRIER_W(sc);
516	WRITE4(sc, HE_REGO_RESET_CNTL, 0xff);
517	BARRIER_RW(sc);
518	count = 0;
519	while (((v = READ4(sc, HE_REGO_RESET_CNTL)) & HE_REGM_RESET_STATE) == 0) {
520		BARRIER_R(sc);
521		if (++count == 100) {
522			if_printf(sc->ifp, "reset failed\n");
523			return (ENXIO);
524		}
525		DELAY(1000);
526	}
527	return (0);
528}
529
530/*
531 * 4.5 Set Bus Width
532 */
533static void
534hatm_init_bus_width(struct hatm_softc *sc)
535{
536	uint32_t v, v1;
537
538	v = READ4(sc, HE_REGO_HOST_CNTL);
539	BARRIER_R(sc);
540	if (v & HE_REGM_HOST_BUS64) {
541		sc->pci64 = 1;
542		v1 = pci_read_config(sc->dev, HE_PCIR_GEN_CNTL_0, 4);
543		v1 |= HE_PCIM_CTL0_64BIT;
544		pci_write_config(sc->dev, HE_PCIR_GEN_CNTL_0, v1, 4);
545
546		v |= HE_REGM_HOST_DESC_RD64
547		    | HE_REGM_HOST_DATA_RD64
548		    | HE_REGM_HOST_DATA_WR64;
549		WRITE4(sc, HE_REGO_HOST_CNTL, v);
550		BARRIER_W(sc);
551	} else {
552		sc->pci64 = 0;
553		v = pci_read_config(sc->dev, HE_PCIR_GEN_CNTL_0, 4);
554		v &= ~HE_PCIM_CTL0_64BIT;
555		pci_write_config(sc->dev, HE_PCIR_GEN_CNTL_0, v, 4);
556	}
557}
558
559/*
560 * 4.6 Set Host Endianness
561 */
562static void
563hatm_init_endianess(struct hatm_softc *sc)
564{
565	uint32_t v;
566
567	v = READ4(sc, HE_REGO_LB_SWAP);
568	BARRIER_R(sc);
569#if BYTE_ORDER == BIG_ENDIAN
570	v |= HE_REGM_LBSWAP_INTR_SWAP |
571	    HE_REGM_LBSWAP_DESC_WR_SWAP |
572	    HE_REGM_LBSWAP_BIG_ENDIAN;
573	v &= ~(HE_REGM_LBSWAP_DATA_WR_SWAP |
574	    HE_REGM_LBSWAP_DESC_RD_SWAP |
575	    HE_REGM_LBSWAP_DATA_RD_SWAP);
576#else
577	v &= ~(HE_REGM_LBSWAP_DATA_WR_SWAP |
578	    HE_REGM_LBSWAP_DESC_RD_SWAP |
579	    HE_REGM_LBSWAP_DATA_RD_SWAP |
580	    HE_REGM_LBSWAP_INTR_SWAP |
581	    HE_REGM_LBSWAP_DESC_WR_SWAP |
582	    HE_REGM_LBSWAP_BIG_ENDIAN);
583#endif
584
585	if (sc->he622)
586		v |= HE_REGM_LBSWAP_XFER_SIZE;
587
588	WRITE4(sc, HE_REGO_LB_SWAP, v);
589	BARRIER_W(sc);
590}
591
592/*
593 * 4.7 Read EEPROM
594 */
595static uint8_t
596hatm_read_prom_byte(struct hatm_softc *sc, u_int addr)
597{
598	uint32_t val, tmp_read, byte_read;
599	u_int i, j;
600	int n;
601
602	val = READ4(sc, HE_REGO_HOST_CNTL);
603	val &= HE_REGM_HOST_PROM_BITS;
604	BARRIER_R(sc);
605
606	val |= HE_REGM_HOST_PROM_WREN;
607	WRITE4(sc, HE_REGO_HOST_CNTL, val);
608	BARRIER_W(sc);
609
610	/* send READ */
611	for (i = 0; i < nitems(readtab); i++) {
612		WRITE4(sc, HE_REGO_HOST_CNTL, val | readtab[i]);
613		BARRIER_W(sc);
614		DELAY(EEPROM_DELAY);
615	}
616
617	/* send ADDRESS */
618	for (n = 7, j = 0; n >= 0; n--) {
619		WRITE4(sc, HE_REGO_HOST_CNTL, val | clocktab[j++] |
620		    (((addr >> n) & 1 ) << HE_REGS_HOST_PROM_DATA_IN));
621		BARRIER_W(sc);
622		DELAY(EEPROM_DELAY);
623		WRITE4(sc, HE_REGO_HOST_CNTL, val | clocktab[j++] |
624		    (((addr >> n) & 1 ) << HE_REGS_HOST_PROM_DATA_IN));
625		BARRIER_W(sc);
626		DELAY(EEPROM_DELAY);
627	}
628
629	val &= ~HE_REGM_HOST_PROM_WREN;
630	WRITE4(sc, HE_REGO_HOST_CNTL, val);
631	BARRIER_W(sc);
632
633	/* read DATA */
634	byte_read = 0;
635	for (n = 7, j = 0; n >= 0; n--) {
636		WRITE4(sc, HE_REGO_HOST_CNTL, val | clocktab[j++]);
637		BARRIER_W(sc);
638		DELAY(EEPROM_DELAY);
639		tmp_read = READ4(sc, HE_REGO_HOST_CNTL);
640		byte_read |= (uint8_t)(((tmp_read & HE_REGM_HOST_PROM_DATA_OUT)
641				>> HE_REGS_HOST_PROM_DATA_OUT) << n);
642		WRITE4(sc, HE_REGO_HOST_CNTL, val | clocktab[j++]);
643		BARRIER_W(sc);
644		DELAY(EEPROM_DELAY);
645	}
646	WRITE4(sc, HE_REGO_HOST_CNTL, val | clocktab[j++]);
647	BARRIER_W(sc);
648	DELAY(EEPROM_DELAY);
649
650	return (byte_read);
651}
652
653static void
654hatm_init_read_eeprom(struct hatm_softc *sc)
655{
656	u_int n, count;
657	u_char byte;
658	uint32_t v;
659
660	for (n = count = 0; count < HE_EEPROM_PROD_ID_LEN; count++) {
661		byte = hatm_read_prom_byte(sc, HE_EEPROM_PROD_ID + count);
662		if (n > 0 || byte != ' ')
663			sc->prod_id[n++] = byte;
664	}
665	while (n > 0 && sc->prod_id[n-1] == ' ')
666		n--;
667	sc->prod_id[n] = '\0';
668
669	for (n = count = 0; count < HE_EEPROM_REV_LEN; count++) {
670		byte = hatm_read_prom_byte(sc, HE_EEPROM_REV + count);
671		if (n > 0 || byte != ' ')
672			sc->rev[n++] = byte;
673	}
674	while (n > 0 && sc->rev[n-1] == ' ')
675		n--;
676	sc->rev[n] = '\0';
677	IFP2IFATM(sc->ifp)->mib.hw_version = sc->rev[0];
678
679	IFP2IFATM(sc->ifp)->mib.serial =  hatm_read_prom_byte(sc, HE_EEPROM_M_SN + 0) << 0;
680	IFP2IFATM(sc->ifp)->mib.serial |= hatm_read_prom_byte(sc, HE_EEPROM_M_SN + 1) << 8;
681	IFP2IFATM(sc->ifp)->mib.serial |= hatm_read_prom_byte(sc, HE_EEPROM_M_SN + 2) << 16;
682	IFP2IFATM(sc->ifp)->mib.serial |= hatm_read_prom_byte(sc, HE_EEPROM_M_SN + 3) << 24;
683
684	v =  hatm_read_prom_byte(sc, HE_EEPROM_MEDIA + 0) << 0;
685	v |= hatm_read_prom_byte(sc, HE_EEPROM_MEDIA + 1) << 8;
686	v |= hatm_read_prom_byte(sc, HE_EEPROM_MEDIA + 2) << 16;
687	v |= hatm_read_prom_byte(sc, HE_EEPROM_MEDIA + 3) << 24;
688
689	switch (v) {
690	  case HE_MEDIA_UTP155:
691		IFP2IFATM(sc->ifp)->mib.media = IFM_ATM_UTP_155;
692		IFP2IFATM(sc->ifp)->mib.pcr = ATM_RATE_155M;
693		break;
694
695	  case HE_MEDIA_MMF155:
696		IFP2IFATM(sc->ifp)->mib.media = IFM_ATM_MM_155;
697		IFP2IFATM(sc->ifp)->mib.pcr = ATM_RATE_155M;
698		break;
699
700	  case HE_MEDIA_MMF622:
701		IFP2IFATM(sc->ifp)->mib.media = IFM_ATM_MM_622;
702		IFP2IFATM(sc->ifp)->mib.device = ATM_DEVICE_HE622;
703		IFP2IFATM(sc->ifp)->mib.pcr = ATM_RATE_622M;
704		sc->he622 = 1;
705		break;
706
707	  case HE_MEDIA_SMF155:
708		IFP2IFATM(sc->ifp)->mib.media = IFM_ATM_SM_155;
709		IFP2IFATM(sc->ifp)->mib.pcr = ATM_RATE_155M;
710		break;
711
712	  case HE_MEDIA_SMF622:
713		IFP2IFATM(sc->ifp)->mib.media = IFM_ATM_SM_622;
714		IFP2IFATM(sc->ifp)->mib.device = ATM_DEVICE_HE622;
715		IFP2IFATM(sc->ifp)->mib.pcr = ATM_RATE_622M;
716		sc->he622 = 1;
717		break;
718	}
719
720	IFP2IFATM(sc->ifp)->mib.esi[0] = hatm_read_prom_byte(sc, HE_EEPROM_MAC + 0);
721	IFP2IFATM(sc->ifp)->mib.esi[1] = hatm_read_prom_byte(sc, HE_EEPROM_MAC + 1);
722	IFP2IFATM(sc->ifp)->mib.esi[2] = hatm_read_prom_byte(sc, HE_EEPROM_MAC + 2);
723	IFP2IFATM(sc->ifp)->mib.esi[3] = hatm_read_prom_byte(sc, HE_EEPROM_MAC + 3);
724	IFP2IFATM(sc->ifp)->mib.esi[4] = hatm_read_prom_byte(sc, HE_EEPROM_MAC + 4);
725	IFP2IFATM(sc->ifp)->mib.esi[5] = hatm_read_prom_byte(sc, HE_EEPROM_MAC + 5);
726}
727
728/*
729 * Clear unused interrupt queue
730 */
731static void
732hatm_clear_irq(struct hatm_softc *sc, u_int group)
733{
734	WRITE4(sc, HE_REGO_IRQ_BASE(group), 0);
735	WRITE4(sc, HE_REGO_IRQ_HEAD(group), 0);
736	WRITE4(sc, HE_REGO_IRQ_CNTL(group), 0);
737	WRITE4(sc, HE_REGO_IRQ_DATA(group), 0);
738}
739
740/*
741 * 4.10 Initialize interrupt queues
742 */
743static void
744hatm_init_irq(struct hatm_softc *sc, struct heirq *q, u_int group)
745{
746	u_int i;
747
748	if (q->size == 0) {
749		hatm_clear_irq(sc, group);
750		return;
751	}
752
753	q->group = group;
754	q->sc = sc;
755	q->irq = q->mem.base;
756	q->head = 0;
757	q->tailp = q->irq + (q->size - 1);
758	*q->tailp = 0;
759
760	for (i = 0; i < q->size; i++)
761		q->irq[i] = HE_REGM_ITYPE_INVALID;
762
763	WRITE4(sc, HE_REGO_IRQ_BASE(group), q->mem.paddr);
764	WRITE4(sc, HE_REGO_IRQ_HEAD(group),
765	    ((q->size - 1) << HE_REGS_IRQ_HEAD_SIZE) |
766	    (q->thresh << HE_REGS_IRQ_HEAD_THRESH));
767	WRITE4(sc, HE_REGO_IRQ_CNTL(group), q->line);
768	WRITE4(sc, HE_REGO_IRQ_DATA(group), 0);
769}
770
771/*
772 * 5.1.3 Initialize connection memory
773 */
774static void
775hatm_init_cm(struct hatm_softc *sc)
776{
777	u_int rsra, mlbm, rabr, numbuffs;
778	u_int tsra, tabr, mtpd;
779	u_int n;
780
781	for (n = 0; n < HE_CONFIG_TXMEM; n++)
782		WRITE_TCM4(sc, n, 0);
783	for (n = 0; n < HE_CONFIG_RXMEM; n++)
784		WRITE_RCM4(sc, n, 0);
785
786	numbuffs = sc->r0_numbuffs + sc->r1_numbuffs + sc->tx_numbuffs;
787
788	rsra = 0;
789	mlbm = ((rsra + IFP2IFATM(sc->ifp)->mib.max_vccs * 8) + 0x7ff) & ~0x7ff;
790	rabr = ((mlbm + numbuffs * 2) + 0x7ff) & ~0x7ff;
791	sc->rsrb = roundup2(rabr + 2048, 2 * IFP2IFATM(sc->ifp)->mib.max_vccs);
792
793	tsra = 0;
794	sc->tsrb = tsra + IFP2IFATM(sc->ifp)->mib.max_vccs * 8;
795	sc->tsrc = sc->tsrb + IFP2IFATM(sc->ifp)->mib.max_vccs * 4;
796	sc->tsrd = sc->tsrc + IFP2IFATM(sc->ifp)->mib.max_vccs * 2;
797	tabr = sc->tsrd + IFP2IFATM(sc->ifp)->mib.max_vccs * 1;
798	mtpd = roundup2(tabr + 1024, 16 * IFP2IFATM(sc->ifp)->mib.max_vccs);
799
800	DBG(sc, ATTACH, ("rsra=%x mlbm=%x rabr=%x rsrb=%x",
801	    rsra, mlbm, rabr, sc->rsrb));
802	DBG(sc, ATTACH, ("tsra=%x tsrb=%x tsrc=%x tsrd=%x tabr=%x mtpd=%x",
803	    tsra, sc->tsrb, sc->tsrc, sc->tsrd, tabr, mtpd));
804
805	WRITE4(sc, HE_REGO_TSRB_BA, sc->tsrb);
806	WRITE4(sc, HE_REGO_TSRC_BA, sc->tsrc);
807	WRITE4(sc, HE_REGO_TSRD_BA, sc->tsrd);
808	WRITE4(sc, HE_REGO_TMABR_BA, tabr);
809	WRITE4(sc, HE_REGO_TPD_BA, mtpd);
810
811	WRITE4(sc, HE_REGO_RCMRSRB_BA, sc->rsrb);
812	WRITE4(sc, HE_REGO_RCMLBM_BA, mlbm);
813	WRITE4(sc, HE_REGO_RCMABR_BA, rabr);
814
815	BARRIER_W(sc);
816}
817
818/*
819 * 5.1.4 Initialize Local buffer Pools
820 */
821static void
822hatm_init_rx_buffer_pool(struct hatm_softc *sc,
823	u_int num,		/* bank */
824	u_int start,		/* start row */
825	u_int numbuffs		/* number of entries */
826)
827{
828	u_int row_size;		/* bytes per row */
829	uint32_t row_addr;	/* start address of this row */
830	u_int lbuf_size;	/* bytes per lbuf */
831	u_int lbufs_per_row;	/* number of lbufs per memory row */
832	uint32_t lbufd_index;	/* index of lbuf descriptor */
833	uint32_t lbufd_addr;	/* address of lbuf descriptor */
834	u_int lbuf_row_cnt;	/* current lbuf in current row */
835	uint32_t lbuf_addr;	/* address of current buffer */
836	u_int i;
837
838	row_size = sc->bytes_per_row;
839	row_addr = start * row_size;
840	lbuf_size = sc->cells_per_lbuf * 48;
841	lbufs_per_row = sc->cells_per_row / sc->cells_per_lbuf;
842
843	/* descriptor index */
844	lbufd_index = num;
845
846	/* 2 words per entry */
847	lbufd_addr = READ4(sc, HE_REGO_RCMLBM_BA) + lbufd_index * 2;
848
849	/* write head of queue */
850	WRITE4(sc, HE_REGO_RLBF_H(num), lbufd_index);
851
852	lbuf_row_cnt = 0;
853	for (i = 0; i < numbuffs; i++) {
854		lbuf_addr = (row_addr + lbuf_row_cnt * lbuf_size) / 32;
855
856		WRITE_RCM4(sc, lbufd_addr, lbuf_addr);
857
858		lbufd_index += 2;
859		WRITE_RCM4(sc, lbufd_addr + 1, lbufd_index);
860
861		if (++lbuf_row_cnt == lbufs_per_row) {
862			lbuf_row_cnt = 0;
863			row_addr += row_size;
864		}
865
866		lbufd_addr += 2 * 2;
867	}
868
869	WRITE4(sc, HE_REGO_RLBF_T(num), lbufd_index - 2);
870	WRITE4(sc, HE_REGO_RLBF_C(num), numbuffs);
871
872	BARRIER_W(sc);
873}
874
875static void
876hatm_init_tx_buffer_pool(struct hatm_softc *sc,
877	u_int start,		/* start row */
878	u_int numbuffs		/* number of entries */
879)
880{
881	u_int row_size;		/* bytes per row */
882	uint32_t row_addr;	/* start address of this row */
883	u_int lbuf_size;	/* bytes per lbuf */
884	u_int lbufs_per_row;	/* number of lbufs per memory row */
885	uint32_t lbufd_index;	/* index of lbuf descriptor */
886	uint32_t lbufd_addr;	/* address of lbuf descriptor */
887	u_int lbuf_row_cnt;	/* current lbuf in current row */
888	uint32_t lbuf_addr;	/* address of current buffer */
889	u_int i;
890
891	row_size = sc->bytes_per_row;
892	row_addr = start * row_size;
893	lbuf_size = sc->cells_per_lbuf * 48;
894	lbufs_per_row = sc->cells_per_row / sc->cells_per_lbuf;
895
896	/* descriptor index */
897	lbufd_index = sc->r0_numbuffs + sc->r1_numbuffs;
898
899	/* 2 words per entry */
900	lbufd_addr = READ4(sc, HE_REGO_RCMLBM_BA) + lbufd_index * 2;
901
902	/* write head of queue */
903	WRITE4(sc, HE_REGO_TLBF_H, lbufd_index);
904
905	lbuf_row_cnt = 0;
906	for (i = 0; i < numbuffs; i++) {
907		lbuf_addr = (row_addr + lbuf_row_cnt * lbuf_size) / 32;
908
909		WRITE_RCM4(sc, lbufd_addr, lbuf_addr);
910		lbufd_index++;
911		WRITE_RCM4(sc, lbufd_addr + 1, lbufd_index);
912
913		if (++lbuf_row_cnt == lbufs_per_row) {
914			lbuf_row_cnt = 0;
915			row_addr += row_size;
916		}
917
918		lbufd_addr += 2;
919	}
920
921	WRITE4(sc, HE_REGO_TLBF_T, lbufd_index - 1);
922	BARRIER_W(sc);
923}
924
925/*
926 * 5.1.5 Initialize Intermediate Receive Queues
927 */
928static void
929hatm_init_imed_queues(struct hatm_softc *sc)
930{
931	u_int n;
932
933	if (sc->he622) {
934		for (n = 0; n < 8; n++) {
935			WRITE4(sc, HE_REGO_INMQ_S(n), 0x10*n+0x000f);
936			WRITE4(sc, HE_REGO_INMQ_L(n), 0x10*n+0x200f);
937		}
938	} else {
939		for (n = 0; n < 8; n++) {
940			WRITE4(sc, HE_REGO_INMQ_S(n), n);
941			WRITE4(sc, HE_REGO_INMQ_L(n), n+0x8);
942		}
943	}
944}
945
946/*
947 * 5.1.7 Init CS block
948 */
949static void
950hatm_init_cs_block(struct hatm_softc *sc)
951{
952	u_int n, i;
953	u_int clkfreg, cellrate, decr, tmp;
954	static const uint32_t erthr[2][5][3] = HE_REGT_CS_ERTHR;
955	static const uint32_t erctl[2][3] = HE_REGT_CS_ERCTL;
956	static const uint32_t erstat[2][2] = HE_REGT_CS_ERSTAT;
957	static const uint32_t rtfwr[2] = HE_REGT_CS_RTFWR;
958	static const uint32_t rtatr[2] = HE_REGT_CS_RTATR;
959	static const uint32_t bwalloc[2][6] = HE_REGT_CS_BWALLOC;
960	static const uint32_t orcf[2][2] = HE_REGT_CS_ORCF;
961
962	/* Clear Rate Controller Start Times and Occupied Flags */
963	for (n = 0; n < 32; n++)
964		WRITE_MBOX4(sc, HE_REGO_CS_STTIM(n), 0);
965
966	clkfreg = sc->he622 ? HE_622_CLOCK : HE_155_CLOCK;
967	cellrate = sc->he622 ? ATM_RATE_622M : ATM_RATE_155M;
968	decr = cellrate / 32;
969
970	for (n = 0; n < 16; n++) {
971		tmp = clkfreg / cellrate;
972		WRITE_MBOX4(sc, HE_REGO_CS_TGRLD(n), tmp - 1);
973		cellrate -= decr;
974	}
975
976	i = (sc->cells_per_lbuf == 2) ? 0
977	   :(sc->cells_per_lbuf == 4) ? 1
978	   :                            2;
979
980	/* table 5.2 */
981	WRITE_MBOX4(sc, HE_REGO_CS_ERTHR0, erthr[sc->he622][0][i]);
982	WRITE_MBOX4(sc, HE_REGO_CS_ERTHR1, erthr[sc->he622][1][i]);
983	WRITE_MBOX4(sc, HE_REGO_CS_ERTHR2, erthr[sc->he622][2][i]);
984	WRITE_MBOX4(sc, HE_REGO_CS_ERTHR3, erthr[sc->he622][3][i]);
985	WRITE_MBOX4(sc, HE_REGO_CS_ERTHR4, erthr[sc->he622][4][i]);
986
987	WRITE_MBOX4(sc, HE_REGO_CS_ERCTL0, erctl[sc->he622][0]);
988	WRITE_MBOX4(sc, HE_REGO_CS_ERCTL1, erctl[sc->he622][1]);
989	WRITE_MBOX4(sc, HE_REGO_CS_ERCTL2, erctl[sc->he622][2]);
990
991	WRITE_MBOX4(sc, HE_REGO_CS_ERSTAT0, erstat[sc->he622][0]);
992	WRITE_MBOX4(sc, HE_REGO_CS_ERSTAT1, erstat[sc->he622][1]);
993
994	WRITE_MBOX4(sc, HE_REGO_CS_RTFWR, rtfwr[sc->he622]);
995	WRITE_MBOX4(sc, HE_REGO_CS_RTATR, rtatr[sc->he622]);
996
997	WRITE_MBOX4(sc, HE_REGO_CS_TFBSET, bwalloc[sc->he622][0]);
998	WRITE_MBOX4(sc, HE_REGO_CS_WCRMAX, bwalloc[sc->he622][1]);
999	WRITE_MBOX4(sc, HE_REGO_CS_WCRMIN, bwalloc[sc->he622][2]);
1000	WRITE_MBOX4(sc, HE_REGO_CS_WCRINC, bwalloc[sc->he622][3]);
1001	WRITE_MBOX4(sc, HE_REGO_CS_WCRDEC, bwalloc[sc->he622][4]);
1002	WRITE_MBOX4(sc, HE_REGO_CS_WCRCEIL, bwalloc[sc->he622][5]);
1003
1004	WRITE_MBOX4(sc, HE_REGO_CS_OTPPER, orcf[sc->he622][0]);
1005	WRITE_MBOX4(sc, HE_REGO_CS_OTWPER, orcf[sc->he622][1]);
1006
1007	WRITE_MBOX4(sc, HE_REGO_CS_OTTLIM, 8);
1008
1009	for (n = 0; n < 8; n++)
1010		WRITE_MBOX4(sc, HE_REGO_CS_HGRRT(n), 0);
1011}
1012
1013/*
1014 * 5.1.8 CS Block Connection Memory Initialisation
1015 */
1016static void
1017hatm_init_cs_block_cm(struct hatm_softc *sc)
1018{
1019	u_int n, i;
1020	u_int expt, mant, etrm, wcr, ttnrm, tnrm;
1021	uint32_t rate;
1022	uint32_t clkfreq, cellrate, decr;
1023	uint32_t *rg, rtg, val = 0;
1024	uint64_t drate;
1025	u_int buf, buf_limit;
1026	uint32_t base = READ4(sc, HE_REGO_RCMABR_BA);
1027
1028	for (n = 0; n < HE_REGL_CM_GQTBL; n++)
1029		WRITE_RCM4(sc, base + HE_REGO_CM_GQTBL + n, 0);
1030	for (n = 0; n < HE_REGL_CM_RGTBL; n++)
1031		WRITE_RCM4(sc, base + HE_REGO_CM_RGTBL + n, 0);
1032
1033	tnrm = 0;
1034	for (n = 0; n < HE_REGL_CM_TNRMTBL * 4; n++) {
1035		expt = (n >> 5) & 0x1f;
1036		mant = ((n & 0x18) << 4) | 0x7f;
1037		wcr = (1 << expt) * (mant + 512) / 512;
1038		etrm = n & 0x7;
1039		ttnrm = wcr / 10 / (1 << etrm);
1040		if (ttnrm > 255)
1041			ttnrm = 255;
1042		else if(ttnrm < 2)
1043			ttnrm = 2;
1044		tnrm = (tnrm << 8) | (ttnrm & 0xff);
1045		if (n % 4 == 0)
1046			WRITE_RCM4(sc, base + HE_REGO_CM_TNRMTBL + (n/4), tnrm);
1047	}
1048
1049	clkfreq = sc->he622 ? HE_622_CLOCK : HE_155_CLOCK;
1050	buf_limit = 4;
1051
1052	cellrate = sc->he622 ? ATM_RATE_622M : ATM_RATE_155M;
1053	decr = cellrate / 32;
1054
1055	/* compute GRID top row in 1000 * cps */
1056	for (n = 0; n < 16; n++) {
1057		u_int interval = clkfreq / cellrate;
1058		sc->rate_grid[0][n] = (u_int64_t)clkfreq * 1000 / interval;
1059		cellrate -= decr;
1060	}
1061
1062	/* compute the other rows according to 2.4 */
1063	for (i = 1; i < 16; i++)
1064		for (n = 0; n < 16; n++)
1065			sc->rate_grid[i][n] = sc->rate_grid[i-1][n] /
1066			    ((i < 14) ? 2 : 4);
1067
1068	/* first entry is line rate */
1069	n = hatm_cps2atmf(sc->he622 ? ATM_RATE_622M : ATM_RATE_155M);
1070	expt = (n >> 9) & 0x1f;
1071	mant = n & 0x1f0;
1072	sc->rate_grid[0][0] = (u_int64_t)(1<<expt) * 1000 * (mant+512) / 512;
1073
1074	/* now build the conversion table - each 32 bit word contains
1075	 * two entries - this gives a total of 0x400 16 bit entries.
1076	 * This table maps the truncated ATMF rate version into a grid index */
1077	cellrate = sc->he622 ? ATM_RATE_622M : ATM_RATE_155M;
1078	rg = &sc->rate_grid[15][15];
1079
1080	for (rate = 0; rate < 2 * HE_REGL_CM_RTGTBL; rate++) {
1081		/* unpack the ATMF rate */
1082		expt = rate >> 5;
1083		mant = (rate & 0x1f) << 4;
1084
1085		/* get the cell rate - minimum is 10 per second */
1086		drate = (uint64_t)(1 << expt) * 1000 * (mant + 512) / 512;
1087		if (drate < 10 * 1000)
1088			drate = 10 * 1000;
1089
1090		/* now look up the grid index */
1091		while (drate >= *rg && rg-- > &sc->rate_grid[0][0])
1092			;
1093		rg++;
1094		rtg = rg - &sc->rate_grid[0][0];
1095
1096		/* now compute the buffer limit */
1097		buf = drate * sc->tx_numbuffs / (cellrate * 2) / 1000;
1098		if (buf == 0)
1099			buf = 1;
1100		else if (buf > buf_limit)
1101			buf = buf_limit;
1102
1103		/* make value */
1104		val = (val << 16) | (rtg << 8) | buf;
1105
1106		/* write */
1107		if (rate % 2 == 1)
1108			WRITE_RCM4(sc, base + HE_REGO_CM_RTGTBL + rate/2, val);
1109	}
1110}
1111
1112/*
1113 * Clear an unused receive group buffer pool
1114 */
1115static void
1116hatm_clear_rpool(struct hatm_softc *sc, u_int group, u_int large)
1117{
1118	WRITE4(sc, HE_REGO_RBP_S(large, group), 0);
1119	WRITE4(sc, HE_REGO_RBP_T(large, group), 0);
1120	WRITE4(sc, HE_REGO_RBP_QI(large, group), 1);
1121	WRITE4(sc, HE_REGO_RBP_BL(large, group), 0);
1122}
1123
1124/*
1125 * Initialize a receive group buffer pool
1126 */
1127static void
1128hatm_init_rpool(struct hatm_softc *sc, struct herbp *q, u_int group,
1129    u_int large)
1130{
1131	if (q->size == 0) {
1132		hatm_clear_rpool(sc, group, large);
1133		return;
1134	}
1135
1136	bzero(q->mem.base, q->mem.size);
1137	q->rbp = q->mem.base;
1138	q->head = q->tail = 0;
1139
1140	DBG(sc, ATTACH, ("RBP%u%c=0x%lx", group, "SL"[large],
1141	    (u_long)q->mem.paddr));
1142
1143	WRITE4(sc, HE_REGO_RBP_S(large, group), q->mem.paddr);
1144	WRITE4(sc, HE_REGO_RBP_T(large, group), 0);
1145	WRITE4(sc, HE_REGO_RBP_QI(large, group),
1146	    ((q->size - 1) << HE_REGS_RBP_SIZE) |
1147	    HE_REGM_RBP_INTR_ENB |
1148	    (q->thresh << HE_REGS_RBP_THRESH));
1149	WRITE4(sc, HE_REGO_RBP_BL(large, group), (q->bsize >> 2) & ~1);
1150}
1151
1152/*
1153 * Clear an unused receive buffer return queue
1154 */
1155static void
1156hatm_clear_rbrq(struct hatm_softc *sc, u_int group)
1157{
1158	WRITE4(sc, HE_REGO_RBRQ_ST(group), 0);
1159	WRITE4(sc, HE_REGO_RBRQ_H(group), 0);
1160	WRITE4(sc, HE_REGO_RBRQ_Q(group), (1 << HE_REGS_RBRQ_THRESH));
1161	WRITE4(sc, HE_REGO_RBRQ_I(group), 0);
1162}
1163
1164/*
1165 * Initialize receive buffer return queue
1166 */
1167static void
1168hatm_init_rbrq(struct hatm_softc *sc, struct herbrq *rq, u_int group)
1169{
1170	if (rq->size == 0) {
1171		hatm_clear_rbrq(sc, group);
1172		return;
1173	}
1174
1175	rq->rbrq = rq->mem.base;
1176	rq->head = 0;
1177
1178	DBG(sc, ATTACH, ("RBRQ%u=0x%lx", group, (u_long)rq->mem.paddr));
1179
1180	WRITE4(sc, HE_REGO_RBRQ_ST(group), rq->mem.paddr);
1181	WRITE4(sc, HE_REGO_RBRQ_H(group), 0);
1182	WRITE4(sc, HE_REGO_RBRQ_Q(group),
1183	    (rq->thresh << HE_REGS_RBRQ_THRESH) |
1184	    ((rq->size - 1) << HE_REGS_RBRQ_SIZE));
1185	WRITE4(sc, HE_REGO_RBRQ_I(group),
1186	    (rq->tout << HE_REGS_RBRQ_TIME) |
1187	    (rq->pcnt << HE_REGS_RBRQ_COUNT));
1188}
1189
1190/*
1191 * Clear an unused transmit buffer return queue N
1192 */
1193static void
1194hatm_clear_tbrq(struct hatm_softc *sc, u_int group)
1195{
1196	WRITE4(sc, HE_REGO_TBRQ_B_T(group), 0);
1197	WRITE4(sc, HE_REGO_TBRQ_H(group), 0);
1198	WRITE4(sc, HE_REGO_TBRQ_S(group), 0);
1199	WRITE4(sc, HE_REGO_TBRQ_THRESH(group), 1);
1200}
1201
1202/*
1203 * Initialize transmit buffer return queue N
1204 */
1205static void
1206hatm_init_tbrq(struct hatm_softc *sc, struct hetbrq *tq, u_int group)
1207{
1208	if (tq->size == 0) {
1209		hatm_clear_tbrq(sc, group);
1210		return;
1211	}
1212
1213	tq->tbrq = tq->mem.base;
1214	tq->head = 0;
1215
1216	DBG(sc, ATTACH, ("TBRQ%u=0x%lx", group, (u_long)tq->mem.paddr));
1217
1218	WRITE4(sc, HE_REGO_TBRQ_B_T(group), tq->mem.paddr);
1219	WRITE4(sc, HE_REGO_TBRQ_H(group), 0);
1220	WRITE4(sc, HE_REGO_TBRQ_S(group), tq->size - 1);
1221	WRITE4(sc, HE_REGO_TBRQ_THRESH(group), tq->thresh);
1222}
1223
1224/*
1225 * Initialize TPDRQ
1226 */
1227static void
1228hatm_init_tpdrq(struct hatm_softc *sc)
1229{
1230	struct hetpdrq *tq;
1231
1232	tq = &sc->tpdrq;
1233	tq->tpdrq = tq->mem.base;
1234	tq->tail = tq->head = 0;
1235
1236	DBG(sc, ATTACH, ("TPDRQ=0x%lx", (u_long)tq->mem.paddr));
1237
1238	WRITE4(sc, HE_REGO_TPDRQ_H, tq->mem.paddr);
1239	WRITE4(sc, HE_REGO_TPDRQ_T, 0);
1240	WRITE4(sc, HE_REGO_TPDRQ_S, tq->size - 1);
1241}
1242
1243/*
1244 * Function can be called by the infrastructure to start the card.
1245 */
1246static void
1247hatm_init(void *p)
1248{
1249	struct hatm_softc *sc = p;
1250
1251	mtx_lock(&sc->mtx);
1252	hatm_stop(sc);
1253	hatm_initialize(sc);
1254	mtx_unlock(&sc->mtx);
1255}
1256
1257enum {
1258	CTL_ISTATS,
1259};
1260
1261/*
1262 * Sysctl handler
1263 */
1264static int
1265hatm_sysctl(SYSCTL_HANDLER_ARGS)
1266{
1267	struct hatm_softc *sc = arg1;
1268	uint32_t *ret;
1269	int error;
1270	size_t len;
1271
1272	switch (arg2) {
1273
1274	  case CTL_ISTATS:
1275		len = sizeof(sc->istats);
1276		break;
1277
1278	  default:
1279		panic("bad control code");
1280	}
1281
1282	ret = malloc(len, M_TEMP, M_WAITOK);
1283	mtx_lock(&sc->mtx);
1284
1285	switch (arg2) {
1286
1287	  case CTL_ISTATS:
1288		sc->istats.mcc += READ4(sc, HE_REGO_MCC);
1289		sc->istats.oec += READ4(sc, HE_REGO_OEC);
1290		sc->istats.dcc += READ4(sc, HE_REGO_DCC);
1291		sc->istats.cec += READ4(sc, HE_REGO_CEC);
1292		bcopy(&sc->istats, ret, sizeof(sc->istats));
1293		break;
1294	}
1295	mtx_unlock(&sc->mtx);
1296
1297	error = SYSCTL_OUT(req, ret, len);
1298	free(ret, M_TEMP);
1299
1300	return (error);
1301}
1302
1303static int
1304kenv_getuint(struct hatm_softc *sc, const char *var,
1305    u_int *ptr, u_int def, int rw)
1306{
1307	char full[IFNAMSIZ + 3 + 20];
1308	char *val, *end;
1309	u_int u;
1310
1311	*ptr = def;
1312
1313	if (rw != 0) {
1314		if (SYSCTL_ADD_UINT(&sc->sysctl_ctx,
1315		    SYSCTL_CHILDREN(sc->sysctl_tree), OID_AUTO, var,
1316		    CTLFLAG_RW, ptr, 0, "") == NULL)
1317			return (ENOMEM);
1318	} else {
1319		if (SYSCTL_ADD_UINT(&sc->sysctl_ctx,
1320		    SYSCTL_CHILDREN(sc->sysctl_tree), OID_AUTO, var,
1321		    CTLFLAG_RD, ptr, 0, "") == NULL)
1322			return (ENOMEM);
1323	}
1324
1325	snprintf(full, sizeof(full), "hw.%s.%s",
1326	    device_get_nameunit(sc->dev), var);
1327
1328	if ((val = kern_getenv(full)) == NULL)
1329		return (0);
1330	u = strtoul(val, &end, 0);
1331	if (end == val || *end != '\0') {
1332		freeenv(val);
1333		return (EINVAL);
1334	}
1335	freeenv(val);
1336	if (bootverbose)
1337		if_printf(sc->ifp, "%s=%u\n", full, u);
1338	*ptr = u;
1339	return (0);
1340}
1341
1342/*
1343 * Set configurable parameters. Many of these are configurable via
1344 * kenv.
1345 */
1346static int
1347hatm_configure(struct hatm_softc *sc)
1348{
1349	/* Receive buffer pool 0 small */
1350	kenv_getuint(sc, "rbps0_size", &sc->rbp_s0.size,
1351	    HE_CONFIG_RBPS0_SIZE, 0);
1352	kenv_getuint(sc, "rbps0_thresh", &sc->rbp_s0.thresh,
1353	    HE_CONFIG_RBPS0_THRESH, 0);
1354	sc->rbp_s0.bsize = MBUF0_SIZE;
1355
1356	/* Receive buffer pool 0 large */
1357	kenv_getuint(sc, "rbpl0_size", &sc->rbp_l0.size,
1358	    HE_CONFIG_RBPL0_SIZE, 0);
1359	kenv_getuint(sc, "rbpl0_thresh", &sc->rbp_l0.thresh,
1360	    HE_CONFIG_RBPL0_THRESH, 0);
1361	sc->rbp_l0.bsize = MCLBYTES - MBUFL_OFFSET;
1362
1363	/* Receive buffer return queue 0 */
1364	kenv_getuint(sc, "rbrq0_size", &sc->rbrq_0.size,
1365	    HE_CONFIG_RBRQ0_SIZE, 0);
1366	kenv_getuint(sc, "rbrq0_thresh", &sc->rbrq_0.thresh,
1367	    HE_CONFIG_RBRQ0_THRESH, 0);
1368	kenv_getuint(sc, "rbrq0_tout", &sc->rbrq_0.tout,
1369	    HE_CONFIG_RBRQ0_TOUT, 0);
1370	kenv_getuint(sc, "rbrq0_pcnt", &sc->rbrq_0.pcnt,
1371	    HE_CONFIG_RBRQ0_PCNT, 0);
1372
1373	/* Receive buffer pool 1 small */
1374	kenv_getuint(sc, "rbps1_size", &sc->rbp_s1.size,
1375	    HE_CONFIG_RBPS1_SIZE, 0);
1376	kenv_getuint(sc, "rbps1_thresh", &sc->rbp_s1.thresh,
1377	    HE_CONFIG_RBPS1_THRESH, 0);
1378	sc->rbp_s1.bsize = MBUF1_SIZE;
1379
1380	/* Receive buffer return queue 1 */
1381	kenv_getuint(sc, "rbrq1_size", &sc->rbrq_1.size,
1382	    HE_CONFIG_RBRQ1_SIZE, 0);
1383	kenv_getuint(sc, "rbrq1_thresh", &sc->rbrq_1.thresh,
1384	    HE_CONFIG_RBRQ1_THRESH, 0);
1385	kenv_getuint(sc, "rbrq1_tout", &sc->rbrq_1.tout,
1386	    HE_CONFIG_RBRQ1_TOUT, 0);
1387	kenv_getuint(sc, "rbrq1_pcnt", &sc->rbrq_1.pcnt,
1388	    HE_CONFIG_RBRQ1_PCNT, 0);
1389
1390	/* Interrupt queue 0 */
1391	kenv_getuint(sc, "irq0_size", &sc->irq_0.size,
1392	    HE_CONFIG_IRQ0_SIZE, 0);
1393	kenv_getuint(sc, "irq0_thresh", &sc->irq_0.thresh,
1394	    HE_CONFIG_IRQ0_THRESH, 0);
1395	sc->irq_0.line = HE_CONFIG_IRQ0_LINE;
1396
1397	/* Transmit buffer return queue 0 */
1398	kenv_getuint(sc, "tbrq0_size", &sc->tbrq.size,
1399	    HE_CONFIG_TBRQ_SIZE, 0);
1400	kenv_getuint(sc, "tbrq0_thresh", &sc->tbrq.thresh,
1401	    HE_CONFIG_TBRQ_THRESH, 0);
1402
1403	/* Transmit buffer ready queue */
1404	kenv_getuint(sc, "tpdrq_size", &sc->tpdrq.size,
1405	    HE_CONFIG_TPDRQ_SIZE, 0);
1406	/* Max TPDs per VCC */
1407	kenv_getuint(sc, "tpdmax", &sc->max_tpd,
1408	    HE_CONFIG_TPD_MAXCC, 0);
1409
1410	/* external mbuf pages */
1411	kenv_getuint(sc, "max_mbuf_pages", &sc->mbuf_max_pages,
1412	    HE_CONFIG_MAX_MBUF_PAGES, 0);
1413
1414	/* mpsafe */
1415	kenv_getuint(sc, "mpsafe", &sc->mpsafe, 0, 0);
1416	if (sc->mpsafe != 0)
1417		sc->mpsafe = INTR_MPSAFE;
1418
1419	return (0);
1420}
1421
1422#ifdef HATM_DEBUG
1423
1424/*
1425 * Get TSRs from connection memory
1426 */
1427static int
1428hatm_sysctl_tsr(SYSCTL_HANDLER_ARGS)
1429{
1430	struct hatm_softc *sc = arg1;
1431	int error, i, j;
1432	uint32_t *val;
1433
1434	val = malloc(sizeof(uint32_t) * HE_MAX_VCCS * 15, M_TEMP, M_WAITOK);
1435
1436	mtx_lock(&sc->mtx);
1437	for (i = 0; i < HE_MAX_VCCS; i++)
1438		for (j = 0; j <= 14; j++)
1439			val[15 * i + j] = READ_TSR(sc, i, j);
1440	mtx_unlock(&sc->mtx);
1441
1442	error = SYSCTL_OUT(req, val, sizeof(uint32_t) * HE_MAX_VCCS * 15);
1443	free(val, M_TEMP);
1444	if (error != 0 || req->newptr == NULL)
1445		return (error);
1446
1447	return (EPERM);
1448}
1449
1450/*
1451 * Get TPDs from connection memory
1452 */
1453static int
1454hatm_sysctl_tpd(SYSCTL_HANDLER_ARGS)
1455{
1456	struct hatm_softc *sc = arg1;
1457	int error, i, j;
1458	uint32_t *val;
1459
1460	val = malloc(sizeof(uint32_t) * HE_MAX_VCCS * 16, M_TEMP, M_WAITOK);
1461
1462	mtx_lock(&sc->mtx);
1463	for (i = 0; i < HE_MAX_VCCS; i++)
1464		for (j = 0; j < 16; j++)
1465			val[16 * i + j] = READ_TCM4(sc, 16 * i + j);
1466	mtx_unlock(&sc->mtx);
1467
1468	error = SYSCTL_OUT(req, val, sizeof(uint32_t) * HE_MAX_VCCS * 16);
1469	free(val, M_TEMP);
1470	if (error != 0 || req->newptr == NULL)
1471		return (error);
1472
1473	return (EPERM);
1474}
1475
1476/*
1477 * Get mbox registers
1478 */
1479static int
1480hatm_sysctl_mbox(SYSCTL_HANDLER_ARGS)
1481{
1482	struct hatm_softc *sc = arg1;
1483	int error, i;
1484	uint32_t *val;
1485
1486	val = malloc(sizeof(uint32_t) * HE_REGO_CS_END, M_TEMP, M_WAITOK);
1487
1488	mtx_lock(&sc->mtx);
1489	for (i = 0; i < HE_REGO_CS_END; i++)
1490		val[i] = READ_MBOX4(sc, i);
1491	mtx_unlock(&sc->mtx);
1492
1493	error = SYSCTL_OUT(req, val, sizeof(uint32_t) * HE_REGO_CS_END);
1494	free(val, M_TEMP);
1495	if (error != 0 || req->newptr == NULL)
1496		return (error);
1497
1498	return (EPERM);
1499}
1500
1501/*
1502 * Get connection memory
1503 */
1504static int
1505hatm_sysctl_cm(SYSCTL_HANDLER_ARGS)
1506{
1507	struct hatm_softc *sc = arg1;
1508	int error, i;
1509	uint32_t *val;
1510
1511	val = malloc(sizeof(uint32_t) * (HE_CONFIG_RXMEM + 1), M_TEMP, M_WAITOK);
1512
1513	mtx_lock(&sc->mtx);
1514	val[0] = READ4(sc, HE_REGO_RCMABR_BA);
1515	for (i = 0; i < HE_CONFIG_RXMEM; i++)
1516		val[i + 1] = READ_RCM4(sc, i);
1517	mtx_unlock(&sc->mtx);
1518
1519	error = SYSCTL_OUT(req, val, sizeof(uint32_t) * (HE_CONFIG_RXMEM + 1));
1520	free(val, M_TEMP);
1521	if (error != 0 || req->newptr == NULL)
1522		return (error);
1523
1524	return (EPERM);
1525}
1526
1527/*
1528 * Get local buffer memory
1529 */
1530static int
1531hatm_sysctl_lbmem(SYSCTL_HANDLER_ARGS)
1532{
1533	struct hatm_softc *sc = arg1;
1534	int error, i;
1535	uint32_t *val;
1536	u_int bytes = (1 << 21);
1537
1538	val = malloc(bytes, M_TEMP, M_WAITOK);
1539
1540	mtx_lock(&sc->mtx);
1541	for (i = 0; i < bytes / 4; i++)
1542		val[i] = READ_LB4(sc, i);
1543	mtx_unlock(&sc->mtx);
1544
1545	error = SYSCTL_OUT(req, val, bytes);
1546	free(val, M_TEMP);
1547	if (error != 0 || req->newptr == NULL)
1548		return (error);
1549
1550	return (EPERM);
1551}
1552
1553/*
1554 * Get all card registers
1555 */
1556static int
1557hatm_sysctl_heregs(SYSCTL_HANDLER_ARGS)
1558{
1559	struct hatm_softc *sc = arg1;
1560	int error, i;
1561	uint32_t *val;
1562
1563	val = malloc(HE_REGO_END, M_TEMP, M_WAITOK);
1564
1565	mtx_lock(&sc->mtx);
1566	for (i = 0; i < HE_REGO_END; i += 4)
1567		val[i / 4] = READ4(sc, i);
1568	mtx_unlock(&sc->mtx);
1569
1570	error = SYSCTL_OUT(req, val, HE_REGO_END);
1571	free(val, M_TEMP);
1572	if (error != 0 || req->newptr == NULL)
1573		return (error);
1574
1575	return (EPERM);
1576}
1577#endif
1578
1579/*
1580 * Suni register access
1581 */
1582/*
1583 * read at most n SUNI registers starting at reg into val
1584 */
1585static int
1586hatm_utopia_readregs(struct ifatm *ifatm, u_int reg, uint8_t *val, u_int *n)
1587{
1588	u_int i;
1589	struct hatm_softc *sc = ifatm->ifp->if_softc;
1590
1591	if (reg >= (HE_REGO_SUNI_END - HE_REGO_SUNI) / 4)
1592		return (EINVAL);
1593	if (reg + *n > (HE_REGO_SUNI_END - HE_REGO_SUNI) / 4)
1594		*n = reg - (HE_REGO_SUNI_END - HE_REGO_SUNI) / 4;
1595
1596	mtx_assert(&sc->mtx, MA_OWNED);
1597	for (i = 0; i < *n; i++)
1598		val[i] = READ4(sc, HE_REGO_SUNI + 4 * (reg + i));
1599
1600	return (0);
1601}
1602
1603/*
1604 * change the bits given by mask to them in val in register reg
1605 */
1606static int
1607hatm_utopia_writereg(struct ifatm *ifatm, u_int reg, u_int mask, u_int val)
1608{
1609	uint32_t regval;
1610	struct hatm_softc *sc = ifatm->ifp->if_softc;
1611
1612	if (reg >= (HE_REGO_SUNI_END - HE_REGO_SUNI) / 4)
1613		return (EINVAL);
1614
1615	mtx_assert(&sc->mtx, MA_OWNED);
1616	regval = READ4(sc, HE_REGO_SUNI + 4 * reg);
1617	regval = (regval & ~mask) | (val & mask);
1618	WRITE4(sc, HE_REGO_SUNI + 4 * reg, regval);
1619
1620	return (0);
1621}
1622
1623static struct utopia_methods hatm_utopia_methods = {
1624	hatm_utopia_readregs,
1625	hatm_utopia_writereg,
1626};
1627
1628/*
1629 * Detach - if it is running, stop. Destroy.
1630 */
1631static int
1632hatm_detach(device_t dev)
1633{
1634	struct hatm_softc *sc = device_get_softc(dev);
1635
1636	mtx_lock(&sc->mtx);
1637	hatm_stop(sc);
1638	if (sc->utopia.state & UTP_ST_ATTACHED) {
1639		utopia_stop(&sc->utopia);
1640		utopia_detach(&sc->utopia);
1641	}
1642	mtx_unlock(&sc->mtx);
1643
1644	atm_ifdetach(sc->ifp);
1645
1646	hatm_destroy(sc);
1647
1648	return (0);
1649}
1650
1651/*
1652 * Attach to the device. Assume that no locking is needed here.
1653 * All resource we allocate here are freed by calling hatm_destroy.
1654 */
1655static int
1656hatm_attach(device_t dev)
1657{
1658	struct hatm_softc *sc;
1659	int error;
1660	uint32_t v;
1661	struct ifnet *ifp;
1662
1663	sc = device_get_softc(dev);
1664
1665	ifp = sc->ifp = if_alloc(IFT_ATM);
1666	if (ifp == NULL) {
1667		device_printf(dev, "could not if_alloc()\n");
1668		return (ENOSPC);
1669	}
1670
1671	sc->dev = dev;
1672	IFP2IFATM(sc->ifp)->mib.device = ATM_DEVICE_HE155;
1673	IFP2IFATM(sc->ifp)->mib.serial = 0;
1674	IFP2IFATM(sc->ifp)->mib.hw_version = 0;
1675	IFP2IFATM(sc->ifp)->mib.sw_version = 0;
1676	IFP2IFATM(sc->ifp)->mib.vpi_bits = HE_CONFIG_VPI_BITS;
1677	IFP2IFATM(sc->ifp)->mib.vci_bits = HE_CONFIG_VCI_BITS;
1678	IFP2IFATM(sc->ifp)->mib.max_vpcs = 0;
1679	IFP2IFATM(sc->ifp)->mib.max_vccs = HE_MAX_VCCS;
1680	IFP2IFATM(sc->ifp)->mib.media = IFM_ATM_UNKNOWN;
1681	sc->he622 = 0;
1682	IFP2IFATM(sc->ifp)->phy = &sc->utopia;
1683
1684	SLIST_INIT(&sc->tpd_free);
1685
1686	mtx_init(&sc->mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK, MTX_DEF);
1687	cv_init(&sc->vcc_cv, "HEVCCcv");
1688	cv_init(&sc->cv_rcclose, "RCClose");
1689
1690	sysctl_ctx_init(&sc->sysctl_ctx);
1691
1692	/*
1693	 * 4.2 BIOS Configuration
1694	 */
1695	v = pci_read_config(dev, PCIR_COMMAND, 2);
1696	v |= PCIM_CMD_BUSMASTEREN | PCIM_CMD_MWRICEN;
1697	pci_write_config(dev, PCIR_COMMAND, v, 2);
1698
1699	/*
1700	 * 4.3 PCI Bus Controller-Specific Initialisation
1701	 */
1702	v = pci_read_config(dev, HE_PCIR_GEN_CNTL_0, 4);
1703	v |= HE_PCIM_CTL0_MRL | HE_PCIM_CTL0_MRM | HE_PCIM_CTL0_IGNORE_TIMEOUT;
1704#if BYTE_ORDER == BIG_ENDIAN && 0
1705	v |= HE_PCIM_CTL0_BIGENDIAN;
1706#endif
1707	pci_write_config(dev, HE_PCIR_GEN_CNTL_0, v, 4);
1708
1709	/*
1710	 * Map memory
1711	 */
1712	sc->memid = PCIR_BAR(0);
1713	sc->memres = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &sc->memid,
1714	    RF_ACTIVE);
1715	if (sc->memres == NULL) {
1716		device_printf(dev, "could not map memory\n");
1717		error = ENXIO;
1718		goto failed;
1719	}
1720	sc->memh = rman_get_bushandle(sc->memres);
1721	sc->memt = rman_get_bustag(sc->memres);
1722
1723	/*
1724	 * ALlocate a DMA tag for subsequent allocations
1725	 */
1726	if (bus_dma_tag_create(bus_get_dma_tag(sc->dev), 1, 0,
1727	    BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR,
1728	    NULL, NULL,
1729	    BUS_SPACE_MAXSIZE_32BIT, 1,
1730	    BUS_SPACE_MAXSIZE_32BIT, 0,
1731	    NULL, NULL, &sc->parent_tag)) {
1732		device_printf(dev, "could not allocate DMA tag\n");
1733		error = ENOMEM;
1734		goto failed;
1735	}
1736
1737	if (bus_dma_tag_create(sc->parent_tag, 1, 0,
1738	    BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR,
1739	    NULL, NULL,
1740	    MBUF_ALLOC_SIZE, 1,
1741	    MBUF_ALLOC_SIZE, 0,
1742	    NULL, NULL, &sc->mbuf_tag)) {
1743		device_printf(dev, "could not allocate mbuf DMA tag\n");
1744		error = ENOMEM;
1745		goto failed;
1746	}
1747
1748	/*
1749	 * Allocate a DMA tag for packets to send. Here we have a problem with
1750	 * the specification of the maximum number of segments. Theoretically
1751	 * this would be the size of the transmit ring - 1 multiplied by 3,
1752	 * but this would not work. So make the maximum number of TPDs
1753	 * occupied by one packet a configuration parameter.
1754	 */
1755	if (bus_dma_tag_create(bus_get_dma_tag(sc->dev), 1, 0,
1756	    BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
1757	    HE_MAX_PDU, 3 * HE_CONFIG_MAX_TPD_PER_PACKET, HE_MAX_PDU, 0,
1758	    NULL, NULL, &sc->tx_tag)) {
1759		device_printf(dev, "could not allocate TX tag\n");
1760		error = ENOMEM;
1761		goto failed;
1762	}
1763
1764	/*
1765	 * Setup the interrupt
1766	 */
1767	sc->irqid = 0;
1768	sc->irqres = bus_alloc_resource_any(dev, SYS_RES_IRQ, &sc->irqid,
1769	    RF_SHAREABLE | RF_ACTIVE);
1770	if (sc->irqres == 0) {
1771		device_printf(dev, "could not allocate irq\n");
1772		error = ENXIO;
1773		goto failed;
1774	}
1775
1776	ifp->if_softc = sc;
1777	if_initname(ifp, device_get_name(dev), device_get_unit(dev));
1778
1779	/*
1780	 * Make the sysctl tree
1781	 */
1782	error = ENOMEM;
1783	if ((sc->sysctl_tree = SYSCTL_ADD_NODE(&sc->sysctl_ctx,
1784	    SYSCTL_STATIC_CHILDREN(_hw_atm), OID_AUTO,
1785	    device_get_nameunit(dev), CTLFLAG_RD, 0, "")) == NULL)
1786		goto failed;
1787
1788	if (SYSCTL_ADD_PROC(&sc->sysctl_ctx, SYSCTL_CHILDREN(sc->sysctl_tree),
1789	    OID_AUTO, "istats", CTLFLAG_RD | CTLTYPE_OPAQUE, sc, CTL_ISTATS,
1790	    hatm_sysctl, "LU", "internal statistics") == NULL)
1791		goto failed;
1792
1793#ifdef HATM_DEBUG
1794	if (SYSCTL_ADD_PROC(&sc->sysctl_ctx, SYSCTL_CHILDREN(sc->sysctl_tree),
1795	    OID_AUTO, "tsr", CTLFLAG_RD | CTLTYPE_OPAQUE, sc, 0,
1796	    hatm_sysctl_tsr, "S", "transmission status registers") == NULL)
1797		goto failed;
1798
1799	if (SYSCTL_ADD_PROC(&sc->sysctl_ctx, SYSCTL_CHILDREN(sc->sysctl_tree),
1800	    OID_AUTO, "tpd", CTLFLAG_RD | CTLTYPE_OPAQUE, sc, 0,
1801	    hatm_sysctl_tpd, "S", "transmission packet descriptors") == NULL)
1802		goto failed;
1803
1804	if (SYSCTL_ADD_PROC(&sc->sysctl_ctx, SYSCTL_CHILDREN(sc->sysctl_tree),
1805	    OID_AUTO, "mbox", CTLFLAG_RD | CTLTYPE_OPAQUE, sc, 0,
1806	    hatm_sysctl_mbox, "S", "mbox registers") == NULL)
1807		goto failed;
1808
1809	if (SYSCTL_ADD_PROC(&sc->sysctl_ctx, SYSCTL_CHILDREN(sc->sysctl_tree),
1810	    OID_AUTO, "cm", CTLFLAG_RD | CTLTYPE_OPAQUE, sc, 0,
1811	    hatm_sysctl_cm, "S", "connection memory") == NULL)
1812		goto failed;
1813
1814	if (SYSCTL_ADD_PROC(&sc->sysctl_ctx, SYSCTL_CHILDREN(sc->sysctl_tree),
1815	    OID_AUTO, "heregs", CTLFLAG_RD | CTLTYPE_OPAQUE, sc, 0,
1816	    hatm_sysctl_heregs, "S", "card registers") == NULL)
1817		goto failed;
1818
1819	if (SYSCTL_ADD_PROC(&sc->sysctl_ctx, SYSCTL_CHILDREN(sc->sysctl_tree),
1820	    OID_AUTO, "lbmem", CTLFLAG_RD | CTLTYPE_OPAQUE, sc, 0,
1821	    hatm_sysctl_lbmem, "S", "local memory") == NULL)
1822		goto failed;
1823
1824	kenv_getuint(sc, "debug", &sc->debug, HATM_DEBUG, 1);
1825#endif
1826
1827	/*
1828	 * Configure
1829	 */
1830	if ((error = hatm_configure(sc)) != 0)
1831		goto failed;
1832
1833	/*
1834	 * Compute memory parameters
1835	 */
1836	if (sc->rbp_s0.size != 0) {
1837		sc->rbp_s0.mask = (sc->rbp_s0.size - 1) << 3;
1838		sc->rbp_s0.mem.size = sc->rbp_s0.size * 8;
1839		sc->rbp_s0.mem.align = sc->rbp_s0.mem.size;
1840	}
1841	if (sc->rbp_l0.size != 0) {
1842		sc->rbp_l0.mask = (sc->rbp_l0.size - 1) << 3;
1843		sc->rbp_l0.mem.size = sc->rbp_l0.size * 8;
1844		sc->rbp_l0.mem.align = sc->rbp_l0.mem.size;
1845	}
1846	if (sc->rbp_s1.size != 0) {
1847		sc->rbp_s1.mask = (sc->rbp_s1.size - 1) << 3;
1848		sc->rbp_s1.mem.size = sc->rbp_s1.size * 8;
1849		sc->rbp_s1.mem.align = sc->rbp_s1.mem.size;
1850	}
1851	if (sc->rbrq_0.size != 0) {
1852		sc->rbrq_0.mem.size = sc->rbrq_0.size * 8;
1853		sc->rbrq_0.mem.align = sc->rbrq_0.mem.size;
1854	}
1855	if (sc->rbrq_1.size != 0) {
1856		sc->rbrq_1.mem.size = sc->rbrq_1.size * 8;
1857		sc->rbrq_1.mem.align = sc->rbrq_1.mem.size;
1858	}
1859
1860	sc->irq_0.mem.size = sc->irq_0.size * sizeof(uint32_t);
1861	sc->irq_0.mem.align = 4 * 1024;
1862
1863	sc->tbrq.mem.size = sc->tbrq.size * 4;
1864	sc->tbrq.mem.align = 2 * sc->tbrq.mem.size; /* ZZZ */
1865
1866	sc->tpdrq.mem.size = sc->tpdrq.size * 8;
1867	sc->tpdrq.mem.align = sc->tpdrq.mem.size;
1868
1869	sc->hsp_mem.size = sizeof(struct he_hsp);
1870	sc->hsp_mem.align = 1024;
1871
1872	sc->lbufs_size = sc->rbp_l0.size + sc->rbrq_0.size;
1873	sc->tpd_total = sc->tbrq.size + sc->tpdrq.size;
1874	sc->tpds.align = 64;
1875	sc->tpds.size = sc->tpd_total * HE_TPD_SIZE;
1876
1877	hatm_init_rmaps(sc);
1878	hatm_init_smbufs(sc);
1879	if ((error = hatm_init_tpds(sc)) != 0)
1880		goto failed;
1881
1882	/*
1883	 * Allocate memory
1884	 */
1885	if ((error = hatm_alloc_dmamem(sc, "IRQ", &sc->irq_0.mem)) != 0 ||
1886	    (error = hatm_alloc_dmamem(sc, "TBRQ0", &sc->tbrq.mem)) != 0 ||
1887	    (error = hatm_alloc_dmamem(sc, "TPDRQ", &sc->tpdrq.mem)) != 0 ||
1888	    (error = hatm_alloc_dmamem(sc, "HSP", &sc->hsp_mem)) != 0)
1889		goto failed;
1890
1891	if (sc->rbp_s0.mem.size != 0 &&
1892	    (error = hatm_alloc_dmamem(sc, "RBPS0", &sc->rbp_s0.mem)))
1893		goto failed;
1894	if (sc->rbp_l0.mem.size != 0 &&
1895	    (error = hatm_alloc_dmamem(sc, "RBPL0", &sc->rbp_l0.mem)))
1896		goto failed;
1897	if (sc->rbp_s1.mem.size != 0 &&
1898	    (error = hatm_alloc_dmamem(sc, "RBPS1", &sc->rbp_s1.mem)))
1899		goto failed;
1900
1901	if (sc->rbrq_0.mem.size != 0 &&
1902	    (error = hatm_alloc_dmamem(sc, "RBRQ0", &sc->rbrq_0.mem)))
1903		goto failed;
1904	if (sc->rbrq_1.mem.size != 0 &&
1905	    (error = hatm_alloc_dmamem(sc, "RBRQ1", &sc->rbrq_1.mem)))
1906		goto failed;
1907
1908	if ((sc->vcc_zone = uma_zcreate("HE vccs", sizeof(struct hevcc),
1909	    NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0)) == NULL) {
1910		device_printf(dev, "cannot allocate zone for vccs\n");
1911		goto failed;
1912	}
1913
1914	/*
1915	 * 4.4 Reset the card.
1916	 */
1917	if ((error = hatm_reset(sc)) != 0)
1918		goto failed;
1919
1920	/*
1921	 * Read the prom.
1922	 */
1923	hatm_init_bus_width(sc);
1924	hatm_init_read_eeprom(sc);
1925	hatm_init_endianess(sc);
1926
1927	/*
1928	 * Initialize interface
1929	 */
1930	ifp->if_flags = IFF_SIMPLEX;
1931	ifp->if_ioctl = hatm_ioctl;
1932	ifp->if_start = hatm_start;
1933	ifp->if_init = hatm_init;
1934
1935	utopia_attach(&sc->utopia, IFP2IFATM(sc->ifp), &sc->media, &sc->mtx,
1936	    &sc->sysctl_ctx, SYSCTL_CHILDREN(sc->sysctl_tree),
1937	    &hatm_utopia_methods);
1938	utopia_init_media(&sc->utopia);
1939
1940	/* these two SUNI routines need the lock */
1941	mtx_lock(&sc->mtx);
1942	/* poll while we are not running */
1943	sc->utopia.flags |= UTP_FL_POLL_CARRIER;
1944	utopia_start(&sc->utopia);
1945	utopia_reset(&sc->utopia);
1946	mtx_unlock(&sc->mtx);
1947
1948	atm_ifattach(ifp);
1949
1950#ifdef ENABLE_BPF
1951	bpfattach(ifp, DLT_ATM_RFC1483, sizeof(struct atmllc));
1952#endif
1953
1954	error = bus_setup_intr(dev, sc->irqres, sc->mpsafe | INTR_TYPE_NET,
1955	    NULL, hatm_intr, &sc->irq_0, &sc->ih);
1956	if (error != 0) {
1957		device_printf(dev, "could not setup interrupt\n");
1958		hatm_detach(dev);
1959		return (error);
1960	}
1961
1962	return (0);
1963
1964  failed:
1965	hatm_destroy(sc);
1966	return (error);
1967}
1968
1969/*
1970 * Start the interface. Assume a state as from attach().
1971 */
1972void
1973hatm_initialize(struct hatm_softc *sc)
1974{
1975	uint32_t v;
1976	u_int cid;
1977	static const u_int layout[2][7] = HE_CONFIG_MEM_LAYOUT;
1978
1979	if (sc->ifp->if_drv_flags & IFF_DRV_RUNNING)
1980		return;
1981
1982	hatm_init_bus_width(sc);
1983	hatm_init_endianess(sc);
1984
1985	if_printf(sc->ifp, "%s, Rev. %s, S/N %u, "
1986	    "MAC=%02x:%02x:%02x:%02x:%02x:%02x (%ubit PCI)\n",
1987	    sc->prod_id, sc->rev, IFP2IFATM(sc->ifp)->mib.serial,
1988	    IFP2IFATM(sc->ifp)->mib.esi[0], IFP2IFATM(sc->ifp)->mib.esi[1], IFP2IFATM(sc->ifp)->mib.esi[2],
1989	    IFP2IFATM(sc->ifp)->mib.esi[3], IFP2IFATM(sc->ifp)->mib.esi[4], IFP2IFATM(sc->ifp)->mib.esi[5],
1990	    sc->pci64 ? 64 : 32);
1991
1992	/*
1993	 * 4.8 SDRAM Controller Initialisation
1994	 * 4.9 Initialize RNUM value
1995	 */
1996	if (sc->he622)
1997		WRITE4(sc, HE_REGO_SDRAM_CNTL, HE_REGM_SDRAM_64BIT);
1998	else
1999		WRITE4(sc, HE_REGO_SDRAM_CNTL, 0);
2000	BARRIER_W(sc);
2001
2002	v = READ4(sc, HE_REGO_LB_SWAP);
2003	BARRIER_R(sc);
2004	v |= 0xf << HE_REGS_LBSWAP_RNUM;
2005	WRITE4(sc, HE_REGO_LB_SWAP, v);
2006	BARRIER_W(sc);
2007
2008	hatm_init_irq(sc, &sc->irq_0, 0);
2009	hatm_clear_irq(sc, 1);
2010	hatm_clear_irq(sc, 2);
2011	hatm_clear_irq(sc, 3);
2012
2013	WRITE4(sc, HE_REGO_GRP_1_0_MAP, 0);
2014	WRITE4(sc, HE_REGO_GRP_3_2_MAP, 0);
2015	WRITE4(sc, HE_REGO_GRP_5_4_MAP, 0);
2016	WRITE4(sc, HE_REGO_GRP_7_6_MAP, 0);
2017	BARRIER_W(sc);
2018
2019	/*
2020	 * 4.11 Enable PCI Bus Controller State Machine
2021	 */
2022	v = READ4(sc, HE_REGO_HOST_CNTL);
2023	BARRIER_R(sc);
2024	v |= HE_REGM_HOST_OUTFF_ENB | HE_REGM_HOST_CMDFF_ENB |
2025	    HE_REGM_HOST_QUICK_RD | HE_REGM_HOST_QUICK_WR;
2026	WRITE4(sc, HE_REGO_HOST_CNTL, v);
2027	BARRIER_W(sc);
2028
2029	/*
2030	 * 5.1.1 Generic configuration state
2031	 */
2032	sc->cells_per_row = layout[sc->he622][0];
2033	sc->bytes_per_row = layout[sc->he622][1];
2034	sc->r0_numrows = layout[sc->he622][2];
2035	sc->tx_numrows = layout[sc->he622][3];
2036	sc->r1_numrows = layout[sc->he622][4];
2037	sc->r0_startrow = layout[sc->he622][5];
2038	sc->tx_startrow = sc->r0_startrow + sc->r0_numrows;
2039	sc->r1_startrow = sc->tx_startrow + sc->tx_numrows;
2040	sc->cells_per_lbuf = layout[sc->he622][6];
2041
2042	sc->r0_numbuffs = sc->r0_numrows * (sc->cells_per_row /
2043	    sc->cells_per_lbuf);
2044	sc->r1_numbuffs = sc->r1_numrows * (sc->cells_per_row /
2045	    sc->cells_per_lbuf);
2046	sc->tx_numbuffs = sc->tx_numrows * (sc->cells_per_row /
2047	    sc->cells_per_lbuf);
2048
2049	if (sc->r0_numbuffs > 2560)
2050		sc->r0_numbuffs = 2560;
2051	if (sc->r1_numbuffs > 2560)
2052		sc->r1_numbuffs = 2560;
2053	if (sc->tx_numbuffs > 5120)
2054		sc->tx_numbuffs = 5120;
2055
2056	DBG(sc, ATTACH, ("cells_per_row=%u bytes_per_row=%u r0_numrows=%u "
2057	    "tx_numrows=%u r1_numrows=%u r0_startrow=%u tx_startrow=%u "
2058	    "r1_startrow=%u cells_per_lbuf=%u\nr0_numbuffs=%u r1_numbuffs=%u "
2059	    "tx_numbuffs=%u\n", sc->cells_per_row, sc->bytes_per_row,
2060	    sc->r0_numrows, sc->tx_numrows, sc->r1_numrows, sc->r0_startrow,
2061	    sc->tx_startrow, sc->r1_startrow, sc->cells_per_lbuf,
2062	    sc->r0_numbuffs, sc->r1_numbuffs, sc->tx_numbuffs));
2063
2064	/*
2065	 * 5.1.2 Configure Hardware dependend registers
2066	 */
2067	if (sc->he622) {
2068		WRITE4(sc, HE_REGO_LBARB,
2069		    (0x2 << HE_REGS_LBARB_SLICE) |
2070		    (0xf << HE_REGS_LBARB_RNUM) |
2071		    (0x3 << HE_REGS_LBARB_THPRI) |
2072		    (0x3 << HE_REGS_LBARB_RHPRI) |
2073		    (0x2 << HE_REGS_LBARB_TLPRI) |
2074		    (0x1 << HE_REGS_LBARB_RLPRI) |
2075		    (0x28 << HE_REGS_LBARB_BUS_MULT) |
2076		    (0x50 << HE_REGS_LBARB_NET_PREF));
2077		BARRIER_W(sc);
2078		WRITE4(sc, HE_REGO_SDRAMCON,
2079		    /* HW bug: don't use banking */
2080		    /* HE_REGM_SDRAMCON_BANK | */
2081		    HE_REGM_SDRAMCON_WIDE |
2082		    (0x384 << HE_REGS_SDRAMCON_REF));
2083		BARRIER_W(sc);
2084		WRITE4(sc, HE_REGO_RCMCONFIG,
2085		    (0x1 << HE_REGS_RCMCONFIG_BANK_WAIT) |
2086		    (0x1 << HE_REGS_RCMCONFIG_RW_WAIT) |
2087		    (0x0 << HE_REGS_RCMCONFIG_TYPE));
2088		WRITE4(sc, HE_REGO_TCMCONFIG,
2089		    (0x2 << HE_REGS_TCMCONFIG_BANK_WAIT) |
2090		    (0x1 << HE_REGS_TCMCONFIG_RW_WAIT) |
2091		    (0x0 << HE_REGS_TCMCONFIG_TYPE));
2092	} else {
2093		WRITE4(sc, HE_REGO_LBARB,
2094		    (0x2 << HE_REGS_LBARB_SLICE) |
2095		    (0xf << HE_REGS_LBARB_RNUM) |
2096		    (0x3 << HE_REGS_LBARB_THPRI) |
2097		    (0x3 << HE_REGS_LBARB_RHPRI) |
2098		    (0x2 << HE_REGS_LBARB_TLPRI) |
2099		    (0x1 << HE_REGS_LBARB_RLPRI) |
2100		    (0x46 << HE_REGS_LBARB_BUS_MULT) |
2101		    (0x8C << HE_REGS_LBARB_NET_PREF));
2102		BARRIER_W(sc);
2103		WRITE4(sc, HE_REGO_SDRAMCON,
2104		    /* HW bug: don't use banking */
2105		    /* HE_REGM_SDRAMCON_BANK | */
2106		    (0x150 << HE_REGS_SDRAMCON_REF));
2107		BARRIER_W(sc);
2108		WRITE4(sc, HE_REGO_RCMCONFIG,
2109		    (0x0 << HE_REGS_RCMCONFIG_BANK_WAIT) |
2110		    (0x1 << HE_REGS_RCMCONFIG_RW_WAIT) |
2111		    (0x0 << HE_REGS_RCMCONFIG_TYPE));
2112		WRITE4(sc, HE_REGO_TCMCONFIG,
2113		    (0x1 << HE_REGS_TCMCONFIG_BANK_WAIT) |
2114		    (0x1 << HE_REGS_TCMCONFIG_RW_WAIT) |
2115		    (0x0 << HE_REGS_TCMCONFIG_TYPE));
2116	}
2117	WRITE4(sc, HE_REGO_LBCONFIG, (sc->cells_per_lbuf * 48));
2118
2119	WRITE4(sc, HE_REGO_RLBC_H, 0);
2120	WRITE4(sc, HE_REGO_RLBC_T, 0);
2121	WRITE4(sc, HE_REGO_RLBC_H2, 0);
2122
2123	WRITE4(sc, HE_REGO_RXTHRSH, 512);
2124	WRITE4(sc, HE_REGO_LITHRSH, 256);
2125
2126	WRITE4(sc, HE_REGO_RLBF0_C, sc->r0_numbuffs);
2127	WRITE4(sc, HE_REGO_RLBF1_C, sc->r1_numbuffs);
2128
2129	if (sc->he622) {
2130		WRITE4(sc, HE_REGO_RCCONFIG,
2131		    (8 << HE_REGS_RCCONFIG_UTDELAY) |
2132		    (IFP2IFATM(sc->ifp)->mib.vpi_bits << HE_REGS_RCCONFIG_VP) |
2133		    (IFP2IFATM(sc->ifp)->mib.vci_bits << HE_REGS_RCCONFIG_VC));
2134		WRITE4(sc, HE_REGO_TXCONFIG,
2135		    (32 << HE_REGS_TXCONFIG_THRESH) |
2136		    (IFP2IFATM(sc->ifp)->mib.vci_bits << HE_REGS_TXCONFIG_VCI_MASK) |
2137		    (sc->tx_numbuffs << HE_REGS_TXCONFIG_LBFREE));
2138	} else {
2139		WRITE4(sc, HE_REGO_RCCONFIG,
2140		    (0 << HE_REGS_RCCONFIG_UTDELAY) |
2141		    HE_REGM_RCCONFIG_UT_MODE |
2142		    (IFP2IFATM(sc->ifp)->mib.vpi_bits << HE_REGS_RCCONFIG_VP) |
2143		    (IFP2IFATM(sc->ifp)->mib.vci_bits << HE_REGS_RCCONFIG_VC));
2144		WRITE4(sc, HE_REGO_TXCONFIG,
2145		    (32 << HE_REGS_TXCONFIG_THRESH) |
2146		    HE_REGM_TXCONFIG_UTMODE |
2147		    (IFP2IFATM(sc->ifp)->mib.vci_bits << HE_REGS_TXCONFIG_VCI_MASK) |
2148		    (sc->tx_numbuffs << HE_REGS_TXCONFIG_LBFREE));
2149	}
2150
2151	WRITE4(sc, HE_REGO_TXAAL5_PROTO, 0);
2152
2153	if (sc->rbp_s1.size != 0) {
2154		WRITE4(sc, HE_REGO_RHCONFIG,
2155		    HE_REGM_RHCONFIG_PHYENB |
2156		    ((sc->he622 ? 0x41 : 0x31) << HE_REGS_RHCONFIG_PTMR_PRE) |
2157		    (1 << HE_REGS_RHCONFIG_OAM_GID));
2158	} else {
2159		WRITE4(sc, HE_REGO_RHCONFIG,
2160		    HE_REGM_RHCONFIG_PHYENB |
2161		    ((sc->he622 ? 0x41 : 0x31) << HE_REGS_RHCONFIG_PTMR_PRE) |
2162		    (0 << HE_REGS_RHCONFIG_OAM_GID));
2163	}
2164	BARRIER_W(sc);
2165
2166	hatm_init_cm(sc);
2167
2168	hatm_init_rx_buffer_pool(sc, 0, sc->r0_startrow, sc->r0_numbuffs);
2169	hatm_init_rx_buffer_pool(sc, 1, sc->r1_startrow, sc->r1_numbuffs);
2170	hatm_init_tx_buffer_pool(sc, sc->tx_startrow, sc->tx_numbuffs);
2171
2172	hatm_init_imed_queues(sc);
2173
2174	/*
2175	 * 5.1.6 Application tunable Parameters
2176	 */
2177	WRITE4(sc, HE_REGO_MCC, 0);
2178	WRITE4(sc, HE_REGO_OEC, 0);
2179	WRITE4(sc, HE_REGO_DCC, 0);
2180	WRITE4(sc, HE_REGO_CEC, 0);
2181
2182	hatm_init_cs_block(sc);
2183	hatm_init_cs_block_cm(sc);
2184
2185	hatm_init_rpool(sc, &sc->rbp_s0, 0, 0);
2186	hatm_init_rpool(sc, &sc->rbp_l0, 0, 1);
2187	hatm_init_rpool(sc, &sc->rbp_s1, 1, 0);
2188	hatm_clear_rpool(sc, 1, 1);
2189	hatm_clear_rpool(sc, 2, 0);
2190	hatm_clear_rpool(sc, 2, 1);
2191	hatm_clear_rpool(sc, 3, 0);
2192	hatm_clear_rpool(sc, 3, 1);
2193	hatm_clear_rpool(sc, 4, 0);
2194	hatm_clear_rpool(sc, 4, 1);
2195	hatm_clear_rpool(sc, 5, 0);
2196	hatm_clear_rpool(sc, 5, 1);
2197	hatm_clear_rpool(sc, 6, 0);
2198	hatm_clear_rpool(sc, 6, 1);
2199	hatm_clear_rpool(sc, 7, 0);
2200	hatm_clear_rpool(sc, 7, 1);
2201	hatm_init_rbrq(sc, &sc->rbrq_0, 0);
2202	hatm_init_rbrq(sc, &sc->rbrq_1, 1);
2203	hatm_clear_rbrq(sc, 2);
2204	hatm_clear_rbrq(sc, 3);
2205	hatm_clear_rbrq(sc, 4);
2206	hatm_clear_rbrq(sc, 5);
2207	hatm_clear_rbrq(sc, 6);
2208	hatm_clear_rbrq(sc, 7);
2209
2210	sc->lbufs_next = 0;
2211	bzero(sc->lbufs, sizeof(sc->lbufs[0]) * sc->lbufs_size);
2212
2213	hatm_init_tbrq(sc, &sc->tbrq, 0);
2214	hatm_clear_tbrq(sc, 1);
2215	hatm_clear_tbrq(sc, 2);
2216	hatm_clear_tbrq(sc, 3);
2217	hatm_clear_tbrq(sc, 4);
2218	hatm_clear_tbrq(sc, 5);
2219	hatm_clear_tbrq(sc, 6);
2220	hatm_clear_tbrq(sc, 7);
2221
2222	hatm_init_tpdrq(sc);
2223
2224	WRITE4(sc, HE_REGO_UBUFF_BA, (sc->he622 ? 0x104780 : 0x800));
2225
2226	/*
2227	 * Initialize HSP
2228	 */
2229	bzero(sc->hsp_mem.base, sc->hsp_mem.size);
2230	sc->hsp = sc->hsp_mem.base;
2231	WRITE4(sc, HE_REGO_HSP_BA, sc->hsp_mem.paddr);
2232
2233	/*
2234	 * 5.1.12 Enable transmit and receive
2235	 * Enable bus master and interrupts
2236	 */
2237	v = READ_MBOX4(sc, HE_REGO_CS_ERCTL0);
2238	v |= 0x18000000;
2239	WRITE_MBOX4(sc, HE_REGO_CS_ERCTL0, v);
2240
2241	v = READ4(sc, HE_REGO_RCCONFIG);
2242	v |= HE_REGM_RCCONFIG_RXENB;
2243	WRITE4(sc, HE_REGO_RCCONFIG, v);
2244
2245	v = pci_read_config(sc->dev, HE_PCIR_GEN_CNTL_0, 4);
2246	v |= HE_PCIM_CTL0_INIT_ENB | HE_PCIM_CTL0_INT_PROC_ENB;
2247	pci_write_config(sc->dev, HE_PCIR_GEN_CNTL_0, v, 4);
2248
2249	sc->ifp->if_drv_flags |= IFF_DRV_RUNNING;
2250	sc->ifp->if_baudrate = 53 * 8 * IFP2IFATM(sc->ifp)->mib.pcr;
2251
2252	sc->utopia.flags &= ~UTP_FL_POLL_CARRIER;
2253
2254	/* reopen vccs */
2255	for (cid = 0; cid < HE_MAX_VCCS; cid++)
2256		if (sc->vccs[cid] != NULL)
2257			hatm_load_vc(sc, cid, 1);
2258
2259	ATMEV_SEND_IFSTATE_CHANGED(IFP2IFATM(sc->ifp),
2260	    sc->utopia.carrier == UTP_CARR_OK);
2261}
2262
2263/*
2264 * This functions stops the card and frees all resources allocated after
2265 * the attach. Must have the global lock.
2266 */
2267void
2268hatm_stop(struct hatm_softc *sc)
2269{
2270	uint32_t v;
2271	u_int i, p, cid;
2272	struct mbuf_chunk_hdr *ch;
2273	struct mbuf_page *pg;
2274
2275	mtx_assert(&sc->mtx, MA_OWNED);
2276
2277	if (!(sc->ifp->if_drv_flags & IFF_DRV_RUNNING))
2278		return;
2279	sc->ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
2280
2281	ATMEV_SEND_IFSTATE_CHANGED(IFP2IFATM(sc->ifp),
2282	    sc->utopia.carrier == UTP_CARR_OK);
2283
2284	sc->utopia.flags |= UTP_FL_POLL_CARRIER;
2285
2286	/*
2287	 * Stop and reset the hardware so that everything remains
2288	 * stable.
2289	 */
2290	v = READ_MBOX4(sc, HE_REGO_CS_ERCTL0);
2291	v &= ~0x18000000;
2292	WRITE_MBOX4(sc, HE_REGO_CS_ERCTL0, v);
2293
2294	v = READ4(sc, HE_REGO_RCCONFIG);
2295	v &= ~HE_REGM_RCCONFIG_RXENB;
2296	WRITE4(sc, HE_REGO_RCCONFIG, v);
2297
2298	WRITE4(sc, HE_REGO_RHCONFIG, (0x2 << HE_REGS_RHCONFIG_PTMR_PRE));
2299	BARRIER_W(sc);
2300
2301	v = READ4(sc, HE_REGO_HOST_CNTL);
2302	BARRIER_R(sc);
2303	v &= ~(HE_REGM_HOST_OUTFF_ENB | HE_REGM_HOST_CMDFF_ENB);
2304	WRITE4(sc, HE_REGO_HOST_CNTL, v);
2305	BARRIER_W(sc);
2306
2307	/*
2308	 * Disable bust master and interrupts
2309	 */
2310	v = pci_read_config(sc->dev, HE_PCIR_GEN_CNTL_0, 4);
2311	v &= ~(HE_PCIM_CTL0_INIT_ENB | HE_PCIM_CTL0_INT_PROC_ENB);
2312	pci_write_config(sc->dev, HE_PCIR_GEN_CNTL_0, v, 4);
2313
2314	(void)hatm_reset(sc);
2315
2316	/*
2317	 * Card resets the SUNI when resetted, so re-initialize it
2318	 */
2319	utopia_reset(&sc->utopia);
2320
2321	/*
2322	 * Give any waiters on closing a VCC a chance. They will stop
2323	 * to wait if they see that IFF_DRV_RUNNING disappeared.
2324	 */
2325	cv_broadcast(&sc->vcc_cv);
2326	cv_broadcast(&sc->cv_rcclose);
2327
2328	/*
2329	 * Now free all resources.
2330	 */
2331
2332	/*
2333	 * Free the large mbufs that are given to the card.
2334	 */
2335	for (i = 0 ; i < sc->lbufs_size; i++) {
2336		if (sc->lbufs[i] != NULL) {
2337			bus_dmamap_unload(sc->mbuf_tag, sc->rmaps[i]);
2338			m_freem(sc->lbufs[i]);
2339			sc->lbufs[i] = NULL;
2340		}
2341	}
2342
2343	/*
2344	 * Free small buffers
2345	 */
2346	for (p = 0; p < sc->mbuf_npages; p++) {
2347		pg = sc->mbuf_pages[p];
2348		for (i = 0; i < pg->hdr.nchunks; i++) {
2349			ch = (struct mbuf_chunk_hdr *) ((char *)pg +
2350			    i * pg->hdr.chunksize + pg->hdr.hdroff);
2351			if (ch->flags & MBUF_CARD) {
2352				ch->flags &= ~MBUF_CARD;
2353				ch->flags |= MBUF_USED;
2354				hatm_ext_free(&sc->mbuf_list[pg->hdr.pool],
2355				    (struct mbufx_free *)((u_char *)ch -
2356				    pg->hdr.hdroff));
2357			}
2358		}
2359	}
2360
2361	hatm_stop_tpds(sc);
2362
2363	/*
2364	 * Free all partial reassembled PDUs on any VCC.
2365	 */
2366	for (cid = 0; cid < HE_MAX_VCCS; cid++) {
2367		if (sc->vccs[cid] != NULL) {
2368			if (sc->vccs[cid]->chain != NULL) {
2369				m_freem(sc->vccs[cid]->chain);
2370				sc->vccs[cid]->chain = NULL;
2371				sc->vccs[cid]->last = NULL;
2372			}
2373			if (!(sc->vccs[cid]->vflags & (HE_VCC_RX_OPEN |
2374			    HE_VCC_TX_OPEN))) {
2375				hatm_tx_vcc_closed(sc, cid);
2376				uma_zfree(sc->vcc_zone, sc->vccs[cid]);
2377				sc->vccs[cid] = NULL;
2378				sc->open_vccs--;
2379			} else {
2380				sc->vccs[cid]->vflags = 0;
2381				sc->vccs[cid]->ntpds = 0;
2382			}
2383		}
2384	}
2385
2386	if (sc->rbp_s0.size != 0)
2387		bzero(sc->rbp_s0.mem.base, sc->rbp_s0.mem.size);
2388	if (sc->rbp_l0.size != 0)
2389		bzero(sc->rbp_l0.mem.base, sc->rbp_l0.mem.size);
2390	if (sc->rbp_s1.size != 0)
2391		bzero(sc->rbp_s1.mem.base, sc->rbp_s1.mem.size);
2392	if (sc->rbrq_0.size != 0)
2393		bzero(sc->rbrq_0.mem.base, sc->rbrq_0.mem.size);
2394	if (sc->rbrq_1.size != 0)
2395		bzero(sc->rbrq_1.mem.base, sc->rbrq_1.mem.size);
2396
2397	bzero(sc->tbrq.mem.base, sc->tbrq.mem.size);
2398	bzero(sc->tpdrq.mem.base, sc->tpdrq.mem.size);
2399	bzero(sc->hsp_mem.base, sc->hsp_mem.size);
2400}
2401
2402/************************************************************
2403 *
2404 * Driver infrastructure
2405 */
2406devclass_t hatm_devclass;
2407
2408static device_method_t hatm_methods[] = {
2409	DEVMETHOD(device_probe,		hatm_probe),
2410	DEVMETHOD(device_attach,	hatm_attach),
2411	DEVMETHOD(device_detach,	hatm_detach),
2412	{0,0}
2413};
2414static driver_t hatm_driver = {
2415	"hatm",
2416	hatm_methods,
2417	sizeof(struct hatm_softc),
2418};
2419DRIVER_MODULE(hatm, pci, hatm_driver, hatm_devclass, NULL, 0);
2420