if_hatm.c revision 139749
1/*-
2 * Copyright (c) 2001-2003
3 *	Fraunhofer Institute for Open Communication Systems (FhG Fokus).
4 * 	All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 *    notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 *    notice, this list of conditions and the following disclaimer in the
13 *    documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25 * SUCH DAMAGE.
26 *
27 * Author: Hartmut Brandt <harti@freebsd.org>
28 *
29 * ForeHE driver.
30 *
31 * This file contains the module and driver infrastructure stuff as well
32 * as a couple of utility functions and the entire initialisation.
33 */
34
35#include <sys/cdefs.h>
36__FBSDID("$FreeBSD: head/sys/dev/hatm/if_hatm.c 139749 2005-01-06 01:43:34Z imp $");
37
38#include "opt_inet.h"
39#include "opt_natm.h"
40
41#include <sys/types.h>
42#include <sys/param.h>
43#include <sys/systm.h>
44#include <sys/malloc.h>
45#include <sys/kernel.h>
46#include <sys/bus.h>
47#include <sys/errno.h>
48#include <sys/conf.h>
49#include <sys/module.h>
50#include <sys/queue.h>
51#include <sys/syslog.h>
52#include <sys/lock.h>
53#include <sys/mutex.h>
54#include <sys/condvar.h>
55#include <sys/sysctl.h>
56#include <vm/uma.h>
57
58#include <sys/sockio.h>
59#include <sys/mbuf.h>
60#include <sys/socket.h>
61
62#include <net/if.h>
63#include <net/if_media.h>
64#include <net/if_atm.h>
65#include <net/route.h>
66#ifdef ENABLE_BPF
67#include <net/bpf.h>
68#endif
69#include <netinet/in.h>
70#include <netinet/if_atm.h>
71
72#include <machine/bus.h>
73#include <machine/resource.h>
74#include <sys/bus.h>
75#include <sys/rman.h>
76#include <dev/pci/pcireg.h>
77#include <dev/pci/pcivar.h>
78
79#include <dev/utopia/utopia.h>
80#include <dev/hatm/if_hatmconf.h>
81#include <dev/hatm/if_hatmreg.h>
82#include <dev/hatm/if_hatmvar.h>
83
84static const struct {
85	uint16_t	vid;
86	uint16_t	did;
87	const char	*name;
88} hatm_devs[] = {
89	{ 0x1127, 0x400,
90	  "FORE HE" },
91	{ 0, 0, NULL }
92};
93
94SYSCTL_DECL(_hw_atm);
95
96MODULE_DEPEND(hatm, utopia, 1, 1, 1);
97MODULE_DEPEND(hatm, pci, 1, 1, 1);
98MODULE_DEPEND(hatm, atm, 1, 1, 1);
99
100#define EEPROM_DELAY	400 /* microseconds */
101
102/* Read from EEPROM 0000 0011b */
103static const uint32_t readtab[] = {
104	HE_REGM_HOST_PROM_SEL | HE_REGM_HOST_PROM_CLOCK,
105	0,
106	HE_REGM_HOST_PROM_CLOCK,
107	0,				/* 0 */
108	HE_REGM_HOST_PROM_CLOCK,
109	0,				/* 0 */
110	HE_REGM_HOST_PROM_CLOCK,
111	0,				/* 0 */
112	HE_REGM_HOST_PROM_CLOCK,
113	0,				/* 0 */
114	HE_REGM_HOST_PROM_CLOCK,
115	0,				/* 0 */
116	HE_REGM_HOST_PROM_CLOCK,
117	HE_REGM_HOST_PROM_DATA_IN,	/* 0 */
118	HE_REGM_HOST_PROM_CLOCK | HE_REGM_HOST_PROM_DATA_IN,
119	HE_REGM_HOST_PROM_DATA_IN,	/* 1 */
120	HE_REGM_HOST_PROM_CLOCK | HE_REGM_HOST_PROM_DATA_IN,
121	HE_REGM_HOST_PROM_DATA_IN,	/* 1 */
122};
123static const uint32_t clocktab[] = {
124	0, HE_REGM_HOST_PROM_CLOCK,
125	0, HE_REGM_HOST_PROM_CLOCK,
126	0, HE_REGM_HOST_PROM_CLOCK,
127	0, HE_REGM_HOST_PROM_CLOCK,
128	0, HE_REGM_HOST_PROM_CLOCK,
129	0, HE_REGM_HOST_PROM_CLOCK,
130	0, HE_REGM_HOST_PROM_CLOCK,
131	0, HE_REGM_HOST_PROM_CLOCK,
132	0
133};
134
135/*
136 * Convert cell rate to ATM Forum format
137 */
138u_int
139hatm_cps2atmf(uint32_t pcr)
140{
141	u_int e;
142
143	if (pcr == 0)
144		return (0);
145	pcr <<= 9;
146	e = 0;
147	while (pcr > (1024 - 1)) {
148		e++;
149		pcr >>= 1;
150	}
151	return ((1 << 14) | (e << 9) | (pcr & 0x1ff));
152}
153u_int
154hatm_atmf2cps(uint32_t fcr)
155{
156	fcr &= 0x7fff;
157
158	return ((1 << ((fcr >> 9) & 0x1f)) * (512 + (fcr & 0x1ff)) / 512
159	  * (fcr >> 14));
160}
161
162/************************************************************
163 *
164 * Initialisation
165 */
166/*
167 * Probe for a HE controller
168 */
169static int
170hatm_probe(device_t dev)
171{
172	int i;
173
174	for (i = 0; hatm_devs[i].name; i++)
175		if (pci_get_vendor(dev) == hatm_devs[i].vid &&
176		    pci_get_device(dev) == hatm_devs[i].did) {
177			device_set_desc(dev, hatm_devs[i].name);
178			return (0);
179		}
180	return (ENXIO);
181}
182
183/*
184 * Allocate and map DMA-able memory. We support only contiguous mappings.
185 */
186static void
187dmaload_helper(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
188{
189	if (error)
190		return;
191	KASSERT(nsegs == 1, ("too many segments for DMA: %d", nsegs));
192	KASSERT(segs[0].ds_addr <= 0xffffffffUL,
193	    ("phys addr too large %lx", (u_long)segs[0].ds_addr));
194
195	*(bus_addr_t *)arg = segs[0].ds_addr;
196}
197static int
198hatm_alloc_dmamem(struct hatm_softc *sc, const char *what, struct dmamem *mem)
199{
200	int error;
201
202	mem->base = NULL;
203
204	/*
205	 * Alignement does not work in the bus_dmamem_alloc function below
206	 * on FreeBSD. malloc seems to align objects at least to the object
207	 * size so increase the size to the alignment if the size is lesser
208	 * than the alignemnt.
209	 * XXX on sparc64 this is (probably) not needed.
210	 */
211	if (mem->size < mem->align)
212		mem->size = mem->align;
213
214	error = bus_dma_tag_create(sc->parent_tag, mem->align, 0,
215	    BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR,
216	    NULL, NULL, mem->size, 1,
217	    BUS_SPACE_MAXSIZE_32BIT, BUS_DMA_ALLOCNOW,
218	    NULL, NULL, &mem->tag);
219	if (error) {
220		if_printf(&sc->ifatm.ifnet, "DMA tag create (%s)\n", what);
221		return (error);
222	}
223
224	error = bus_dmamem_alloc(mem->tag, &mem->base, 0, &mem->map);
225	if (error) {
226		if_printf(&sc->ifatm.ifnet, "DMA mem alloc (%s): %d\n",
227		    what, error);
228		bus_dma_tag_destroy(mem->tag);
229		mem->base = NULL;
230		return (error);
231	}
232
233	error = bus_dmamap_load(mem->tag, mem->map, mem->base, mem->size,
234	    dmaload_helper, &mem->paddr, BUS_DMA_NOWAIT);
235	if (error) {
236		if_printf(&sc->ifatm.ifnet, "DMA map load (%s): %d\n",
237		    what, error);
238		bus_dmamem_free(mem->tag, mem->base, mem->map);
239		bus_dma_tag_destroy(mem->tag);
240		mem->base = NULL;
241		return (error);
242	}
243
244	DBG(sc, DMA, ("%s S/A/V/P 0x%x 0x%x %p 0x%lx", what, mem->size,
245	    mem->align, mem->base, (u_long)mem->paddr));
246
247	return (0);
248}
249
250/*
251 * Destroy all the resources of an DMA-able memory region.
252 */
253static void
254hatm_destroy_dmamem(struct dmamem *mem)
255{
256	if (mem->base != NULL) {
257		bus_dmamap_unload(mem->tag, mem->map);
258		bus_dmamem_free(mem->tag, mem->base, mem->map);
259		(void)bus_dma_tag_destroy(mem->tag);
260		mem->base = NULL;
261	}
262}
263
264/*
265 * Initialize/destroy DMA maps for the large pool 0
266 */
267static void
268hatm_destroy_rmaps(struct hatm_softc *sc)
269{
270	u_int b;
271
272	DBG(sc, ATTACH, ("destroying rmaps and lbuf pointers..."));
273	if (sc->rmaps != NULL) {
274		for (b = 0; b < sc->lbufs_size; b++)
275			bus_dmamap_destroy(sc->mbuf_tag, sc->rmaps[b]);
276		free(sc->rmaps, M_DEVBUF);
277	}
278	if (sc->lbufs != NULL)
279		free(sc->lbufs, M_DEVBUF);
280}
281
282static void
283hatm_init_rmaps(struct hatm_softc *sc)
284{
285	u_int b;
286	int err;
287
288	DBG(sc, ATTACH, ("allocating rmaps and lbuf pointers..."));
289	sc->lbufs = malloc(sizeof(sc->lbufs[0]) * sc->lbufs_size,
290	    M_DEVBUF, M_ZERO | M_WAITOK);
291
292	/* allocate and create the DMA maps for the large pool */
293	sc->rmaps = malloc(sizeof(sc->rmaps[0]) * sc->lbufs_size,
294	    M_DEVBUF, M_WAITOK);
295	for (b = 0; b < sc->lbufs_size; b++) {
296		err = bus_dmamap_create(sc->mbuf_tag, 0, &sc->rmaps[b]);
297		if (err != 0)
298			panic("bus_dmamap_create: %d\n", err);
299	}
300}
301
302/*
303 * Initialize and destroy small mbuf page pointers and pages
304 */
305static void
306hatm_destroy_smbufs(struct hatm_softc *sc)
307{
308	u_int i, b;
309	struct mbuf_page *pg;
310	struct mbuf_chunk_hdr *h;
311
312	if (sc->mbuf_pages != NULL) {
313		for (i = 0; i < sc->mbuf_npages; i++) {
314			pg = sc->mbuf_pages[i];
315			for (b = 0; b < pg->hdr.nchunks; b++) {
316				h = (struct mbuf_chunk_hdr *) ((char *)pg +
317				    b * pg->hdr.chunksize + pg->hdr.hdroff);
318				if (h->flags & MBUF_CARD)
319					if_printf(&sc->ifatm.ifnet,
320					    "%s -- mbuf page=%u card buf %u\n",
321					    __func__, i, b);
322				if (h->flags & MBUF_USED)
323					if_printf(&sc->ifatm.ifnet,
324					    "%s -- mbuf page=%u used buf %u\n",
325					    __func__, i, b);
326			}
327			bus_dmamap_unload(sc->mbuf_tag, pg->hdr.map);
328			bus_dmamap_destroy(sc->mbuf_tag, pg->hdr.map);
329			free(pg, M_DEVBUF);
330		}
331		free(sc->mbuf_pages, M_DEVBUF);
332	}
333}
334
335static void
336hatm_init_smbufs(struct hatm_softc *sc)
337{
338	sc->mbuf_pages = malloc(sizeof(sc->mbuf_pages[0]) *
339	    sc->mbuf_max_pages, M_DEVBUF, M_WAITOK);
340	sc->mbuf_npages = 0;
341}
342
343/*
344 * Initialize/destroy TPDs. This is called from attach/detach.
345 */
346static void
347hatm_destroy_tpds(struct hatm_softc *sc)
348{
349	struct tpd *t;
350
351	if (sc->tpds.base == NULL)
352		return;
353
354	DBG(sc, ATTACH, ("releasing TPDs ..."));
355	if (sc->tpd_nfree != sc->tpd_total)
356		if_printf(&sc->ifatm.ifnet, "%u tpds still in use from %u\n",
357		    sc->tpd_total - sc->tpd_nfree, sc->tpd_total);
358	while ((t = SLIST_FIRST(&sc->tpd_free)) != NULL) {
359		SLIST_REMOVE_HEAD(&sc->tpd_free, link);
360		bus_dmamap_destroy(sc->tx_tag, t->map);
361	}
362	hatm_destroy_dmamem(&sc->tpds);
363	free(sc->tpd_used, M_DEVBUF);
364	DBG(sc, ATTACH, ("... done"));
365}
366static int
367hatm_init_tpds(struct hatm_softc *sc)
368{
369	int error;
370	u_int i;
371	struct tpd *t;
372
373	DBG(sc, ATTACH, ("allocating %u TPDs and maps ...", sc->tpd_total));
374	error = hatm_alloc_dmamem(sc, "TPD memory", &sc->tpds);
375	if (error != 0) {
376		DBG(sc, ATTACH, ("... dmamem error=%d", error));
377		return (error);
378	}
379
380	/* put all the TPDs on the free list and allocate DMA maps */
381	for (i = 0; i < sc->tpd_total; i++) {
382		t = TPD_ADDR(sc, i);
383		t->no = i;
384		t->mbuf = NULL;
385		error = bus_dmamap_create(sc->tx_tag, 0, &t->map);
386		if (error != 0) {
387			DBG(sc, ATTACH, ("... dmamap error=%d", error));
388			while ((t = SLIST_FIRST(&sc->tpd_free)) != NULL) {
389				SLIST_REMOVE_HEAD(&sc->tpd_free, link);
390				bus_dmamap_destroy(sc->tx_tag, t->map);
391			}
392			hatm_destroy_dmamem(&sc->tpds);
393			return (error);
394		}
395
396		SLIST_INSERT_HEAD(&sc->tpd_free, t, link);
397	}
398
399	/* allocate and zero bitmap */
400	sc->tpd_used = malloc(sizeof(uint8_t) * (sc->tpd_total + 7) / 8,
401	    M_DEVBUF, M_ZERO | M_WAITOK);
402	sc->tpd_nfree = sc->tpd_total;
403
404	DBG(sc, ATTACH, ("... done"));
405
406	return (0);
407}
408
409/*
410 * Free all the TPDs that where given to the card.
411 * An mbuf chain may be attached to a TPD - free it also and
412 * unload its associated DMA map.
413 */
414static void
415hatm_stop_tpds(struct hatm_softc *sc)
416{
417	u_int i;
418	struct tpd *t;
419
420	DBG(sc, ATTACH, ("free TPDs ..."));
421	for (i = 0; i < sc->tpd_total; i++) {
422		if (TPD_TST_USED(sc, i)) {
423			t = TPD_ADDR(sc, i);
424			if (t->mbuf) {
425				m_freem(t->mbuf);
426				t->mbuf = NULL;
427				bus_dmamap_unload(sc->tx_tag, t->map);
428			}
429			TPD_CLR_USED(sc, i);
430			SLIST_INSERT_HEAD(&sc->tpd_free, t, link);
431			sc->tpd_nfree++;
432		}
433	}
434}
435
436/*
437 * This frees ALL resources of this interface and leaves the structure
438 * in an indeterminate state. This is called just before detaching or
439 * on a failed attach. No lock should be held.
440 */
441static void
442hatm_destroy(struct hatm_softc *sc)
443{
444	u_int cid;
445
446	bus_teardown_intr(sc->dev, sc->irqres, sc->ih);
447
448	hatm_destroy_rmaps(sc);
449	hatm_destroy_smbufs(sc);
450	hatm_destroy_tpds(sc);
451
452	if (sc->vcc_zone != NULL) {
453		for (cid = 0; cid < HE_MAX_VCCS; cid++)
454			if (sc->vccs[cid] != NULL)
455				uma_zfree(sc->vcc_zone, sc->vccs[cid]);
456		uma_zdestroy(sc->vcc_zone);
457	}
458
459	/*
460	 * Release all memory allocated to the various queues and
461	 * Status pages. These have there own flag which shows whether
462	 * they are really allocated.
463	 */
464	hatm_destroy_dmamem(&sc->irq_0.mem);
465	hatm_destroy_dmamem(&sc->rbp_s0.mem);
466	hatm_destroy_dmamem(&sc->rbp_l0.mem);
467	hatm_destroy_dmamem(&sc->rbp_s1.mem);
468	hatm_destroy_dmamem(&sc->rbrq_0.mem);
469	hatm_destroy_dmamem(&sc->rbrq_1.mem);
470	hatm_destroy_dmamem(&sc->tbrq.mem);
471	hatm_destroy_dmamem(&sc->tpdrq.mem);
472	hatm_destroy_dmamem(&sc->hsp_mem);
473
474	if (sc->irqres != NULL)
475		bus_release_resource(sc->dev, SYS_RES_IRQ,
476		    sc->irqid, sc->irqres);
477
478	if (sc->tx_tag != NULL)
479		if (bus_dma_tag_destroy(sc->tx_tag))
480			if_printf(&sc->ifatm.ifnet, "mbuf DMA tag busy\n");
481
482	if (sc->mbuf_tag != NULL)
483		if (bus_dma_tag_destroy(sc->mbuf_tag))
484			if_printf(&sc->ifatm.ifnet, "mbuf DMA tag busy\n");
485
486	if (sc->parent_tag != NULL)
487		if (bus_dma_tag_destroy(sc->parent_tag))
488			if_printf(&sc->ifatm.ifnet, "parent DMA tag busy\n");
489
490	if (sc->memres != NULL)
491		bus_release_resource(sc->dev, SYS_RES_MEMORY,
492		    sc->memid, sc->memres);
493
494	sysctl_ctx_free(&sc->sysctl_ctx);
495
496	cv_destroy(&sc->cv_rcclose);
497	cv_destroy(&sc->vcc_cv);
498	mtx_destroy(&sc->mtx);
499}
500
501/*
502 * 4.4 Card reset
503 */
504static int
505hatm_reset(struct hatm_softc *sc)
506{
507	u_int v, count;
508
509	WRITE4(sc, HE_REGO_RESET_CNTL, 0x00);
510	BARRIER_W(sc);
511	WRITE4(sc, HE_REGO_RESET_CNTL, 0xff);
512	BARRIER_RW(sc);
513	count = 0;
514	while (((v = READ4(sc, HE_REGO_RESET_CNTL)) & HE_REGM_RESET_STATE) == 0) {
515		BARRIER_R(sc);
516		if (++count == 100) {
517			if_printf(&sc->ifatm.ifnet, "reset failed\n");
518			return (ENXIO);
519		}
520		DELAY(1000);
521	}
522	return (0);
523}
524
525/*
526 * 4.5 Set Bus Width
527 */
528static void
529hatm_init_bus_width(struct hatm_softc *sc)
530{
531	uint32_t v, v1;
532
533	v = READ4(sc, HE_REGO_HOST_CNTL);
534	BARRIER_R(sc);
535	if (v & HE_REGM_HOST_BUS64) {
536		sc->pci64 = 1;
537		v1 = pci_read_config(sc->dev, HE_PCIR_GEN_CNTL_0, 4);
538		v1 |= HE_PCIM_CTL0_64BIT;
539		pci_write_config(sc->dev, HE_PCIR_GEN_CNTL_0, v1, 4);
540
541		v |= HE_REGM_HOST_DESC_RD64
542		    | HE_REGM_HOST_DATA_RD64
543		    | HE_REGM_HOST_DATA_WR64;
544		WRITE4(sc, HE_REGO_HOST_CNTL, v);
545		BARRIER_W(sc);
546	} else {
547		sc->pci64 = 0;
548		v = pci_read_config(sc->dev, HE_PCIR_GEN_CNTL_0, 4);
549		v &= ~HE_PCIM_CTL0_64BIT;
550		pci_write_config(sc->dev, HE_PCIR_GEN_CNTL_0, v, 4);
551	}
552}
553
554/*
555 * 4.6 Set Host Endianess
556 */
557static void
558hatm_init_endianess(struct hatm_softc *sc)
559{
560	uint32_t v;
561
562	v = READ4(sc, HE_REGO_LB_SWAP);
563	BARRIER_R(sc);
564#if BYTE_ORDER == BIG_ENDIAN
565	v |= HE_REGM_LBSWAP_INTR_SWAP |
566	    HE_REGM_LBSWAP_DESC_WR_SWAP |
567	    HE_REGM_LBSWAP_BIG_ENDIAN;
568	v &= ~(HE_REGM_LBSWAP_DATA_WR_SWAP |
569	    HE_REGM_LBSWAP_DESC_RD_SWAP |
570	    HE_REGM_LBSWAP_DATA_RD_SWAP);
571#else
572	v &= ~(HE_REGM_LBSWAP_DATA_WR_SWAP |
573	    HE_REGM_LBSWAP_DESC_RD_SWAP |
574	    HE_REGM_LBSWAP_DATA_RD_SWAP |
575	    HE_REGM_LBSWAP_INTR_SWAP |
576	    HE_REGM_LBSWAP_DESC_WR_SWAP |
577	    HE_REGM_LBSWAP_BIG_ENDIAN);
578#endif
579
580	if (sc->he622)
581		v |= HE_REGM_LBSWAP_XFER_SIZE;
582
583	WRITE4(sc, HE_REGO_LB_SWAP, v);
584	BARRIER_W(sc);
585}
586
587/*
588 * 4.7 Read EEPROM
589 */
590static uint8_t
591hatm_read_prom_byte(struct hatm_softc *sc, u_int addr)
592{
593	uint32_t val, tmp_read, byte_read;
594	u_int i, j;
595	int n;
596
597	val = READ4(sc, HE_REGO_HOST_CNTL);
598	val &= HE_REGM_HOST_PROM_BITS;
599	BARRIER_R(sc);
600
601	val |= HE_REGM_HOST_PROM_WREN;
602	WRITE4(sc, HE_REGO_HOST_CNTL, val);
603	BARRIER_W(sc);
604
605	/* send READ */
606	for (i = 0; i < sizeof(readtab) / sizeof(readtab[0]); i++) {
607		WRITE4(sc, HE_REGO_HOST_CNTL, val | readtab[i]);
608		BARRIER_W(sc);
609		DELAY(EEPROM_DELAY);
610	}
611
612	/* send ADDRESS */
613	for (n = 7, j = 0; n >= 0; n--) {
614		WRITE4(sc, HE_REGO_HOST_CNTL, val | clocktab[j++] |
615		    (((addr >> n) & 1 ) << HE_REGS_HOST_PROM_DATA_IN));
616		BARRIER_W(sc);
617		DELAY(EEPROM_DELAY);
618		WRITE4(sc, HE_REGO_HOST_CNTL, val | clocktab[j++] |
619		    (((addr >> n) & 1 ) << HE_REGS_HOST_PROM_DATA_IN));
620		BARRIER_W(sc);
621		DELAY(EEPROM_DELAY);
622	}
623
624	val &= ~HE_REGM_HOST_PROM_WREN;
625	WRITE4(sc, HE_REGO_HOST_CNTL, val);
626	BARRIER_W(sc);
627
628	/* read DATA */
629	byte_read = 0;
630	for (n = 7, j = 0; n >= 0; n--) {
631		WRITE4(sc, HE_REGO_HOST_CNTL, val | clocktab[j++]);
632		BARRIER_W(sc);
633		DELAY(EEPROM_DELAY);
634		tmp_read = READ4(sc, HE_REGO_HOST_CNTL);
635		byte_read |= (uint8_t)(((tmp_read & HE_REGM_HOST_PROM_DATA_OUT)
636				>> HE_REGS_HOST_PROM_DATA_OUT) << n);
637		WRITE4(sc, HE_REGO_HOST_CNTL, val | clocktab[j++]);
638		BARRIER_W(sc);
639		DELAY(EEPROM_DELAY);
640	}
641	WRITE4(sc, HE_REGO_HOST_CNTL, val | clocktab[j++]);
642	BARRIER_W(sc);
643	DELAY(EEPROM_DELAY);
644
645	return (byte_read);
646}
647
648static void
649hatm_init_read_eeprom(struct hatm_softc *sc)
650{
651	u_int n, count;
652	u_char byte;
653	uint32_t v;
654
655	for (n = count = 0; count < HE_EEPROM_PROD_ID_LEN; count++) {
656		byte = hatm_read_prom_byte(sc, HE_EEPROM_PROD_ID + count);
657		if (n > 0 || byte != ' ')
658			sc->prod_id[n++] = byte;
659	}
660	while (n > 0 && sc->prod_id[n-1] == ' ')
661		n--;
662	sc->prod_id[n] = '\0';
663
664	for (n = count = 0; count < HE_EEPROM_REV_LEN; count++) {
665		byte = hatm_read_prom_byte(sc, HE_EEPROM_REV + count);
666		if (n > 0 || byte != ' ')
667			sc->rev[n++] = byte;
668	}
669	while (n > 0 && sc->rev[n-1] == ' ')
670		n--;
671	sc->rev[n] = '\0';
672	sc->ifatm.mib.hw_version = sc->rev[0];
673
674	sc->ifatm.mib.serial =  hatm_read_prom_byte(sc, HE_EEPROM_M_SN + 0) << 0;
675	sc->ifatm.mib.serial |= hatm_read_prom_byte(sc, HE_EEPROM_M_SN + 1) << 8;
676	sc->ifatm.mib.serial |= hatm_read_prom_byte(sc, HE_EEPROM_M_SN + 2) << 16;
677	sc->ifatm.mib.serial |= hatm_read_prom_byte(sc, HE_EEPROM_M_SN + 3) << 24;
678
679	v =  hatm_read_prom_byte(sc, HE_EEPROM_MEDIA + 0) << 0;
680	v |= hatm_read_prom_byte(sc, HE_EEPROM_MEDIA + 1) << 8;
681	v |= hatm_read_prom_byte(sc, HE_EEPROM_MEDIA + 2) << 16;
682	v |= hatm_read_prom_byte(sc, HE_EEPROM_MEDIA + 3) << 24;
683
684	switch (v) {
685	  case HE_MEDIA_UTP155:
686		sc->ifatm.mib.media = IFM_ATM_UTP_155;
687		sc->ifatm.mib.pcr = ATM_RATE_155M;
688		break;
689
690	  case HE_MEDIA_MMF155:
691		sc->ifatm.mib.media = IFM_ATM_MM_155;
692		sc->ifatm.mib.pcr = ATM_RATE_155M;
693		break;
694
695	  case HE_MEDIA_MMF622:
696		sc->ifatm.mib.media = IFM_ATM_MM_622;
697		sc->ifatm.mib.device = ATM_DEVICE_HE622;
698		sc->ifatm.mib.pcr = ATM_RATE_622M;
699		sc->he622 = 1;
700		break;
701
702	  case HE_MEDIA_SMF155:
703		sc->ifatm.mib.media = IFM_ATM_SM_155;
704		sc->ifatm.mib.pcr = ATM_RATE_155M;
705		break;
706
707	  case HE_MEDIA_SMF622:
708		sc->ifatm.mib.media = IFM_ATM_SM_622;
709		sc->ifatm.mib.device = ATM_DEVICE_HE622;
710		sc->ifatm.mib.pcr = ATM_RATE_622M;
711		sc->he622 = 1;
712		break;
713	}
714
715	sc->ifatm.mib.esi[0] = hatm_read_prom_byte(sc, HE_EEPROM_MAC + 0);
716	sc->ifatm.mib.esi[1] = hatm_read_prom_byte(sc, HE_EEPROM_MAC + 1);
717	sc->ifatm.mib.esi[2] = hatm_read_prom_byte(sc, HE_EEPROM_MAC + 2);
718	sc->ifatm.mib.esi[3] = hatm_read_prom_byte(sc, HE_EEPROM_MAC + 3);
719	sc->ifatm.mib.esi[4] = hatm_read_prom_byte(sc, HE_EEPROM_MAC + 4);
720	sc->ifatm.mib.esi[5] = hatm_read_prom_byte(sc, HE_EEPROM_MAC + 5);
721}
722
723/*
724 * Clear unused interrupt queue
725 */
726static void
727hatm_clear_irq(struct hatm_softc *sc, u_int group)
728{
729	WRITE4(sc, HE_REGO_IRQ_BASE(group), 0);
730	WRITE4(sc, HE_REGO_IRQ_HEAD(group), 0);
731	WRITE4(sc, HE_REGO_IRQ_CNTL(group), 0);
732	WRITE4(sc, HE_REGO_IRQ_DATA(group), 0);
733}
734
735/*
736 * 4.10 Initialize interrupt queues
737 */
738static void
739hatm_init_irq(struct hatm_softc *sc, struct heirq *q, u_int group)
740{
741	u_int i;
742
743	if (q->size == 0) {
744		hatm_clear_irq(sc, group);
745		return;
746	}
747
748	q->group = group;
749	q->sc = sc;
750	q->irq = q->mem.base;
751	q->head = 0;
752	q->tailp = q->irq + (q->size - 1);
753	*q->tailp = 0;
754
755	for (i = 0; i < q->size; i++)
756		q->irq[i] = HE_REGM_ITYPE_INVALID;
757
758	WRITE4(sc, HE_REGO_IRQ_BASE(group), q->mem.paddr);
759	WRITE4(sc, HE_REGO_IRQ_HEAD(group),
760	    ((q->size - 1) << HE_REGS_IRQ_HEAD_SIZE) |
761	    (q->thresh << HE_REGS_IRQ_HEAD_THRESH));
762	WRITE4(sc, HE_REGO_IRQ_CNTL(group), q->line);
763	WRITE4(sc, HE_REGO_IRQ_DATA(group), 0);
764}
765
766/*
767 * 5.1.3 Initialize connection memory
768 */
769static void
770hatm_init_cm(struct hatm_softc *sc)
771{
772	u_int rsra, mlbm, rabr, numbuffs;
773	u_int tsra, tabr, mtpd;
774	u_int n;
775
776	for (n = 0; n < HE_CONFIG_TXMEM; n++)
777		WRITE_TCM4(sc, n, 0);
778	for (n = 0; n < HE_CONFIG_RXMEM; n++)
779		WRITE_RCM4(sc, n, 0);
780
781	numbuffs = sc->r0_numbuffs + sc->r1_numbuffs + sc->tx_numbuffs;
782
783	rsra = 0;
784	mlbm = ((rsra + sc->ifatm.mib.max_vccs * 8) + 0x7ff) & ~0x7ff;
785	rabr = ((mlbm + numbuffs * 2) + 0x7ff) & ~0x7ff;
786	sc->rsrb = ((rabr + 2048) + (2 * sc->ifatm.mib.max_vccs - 1)) &
787	    ~(2 * sc->ifatm.mib.max_vccs - 1);
788
789	tsra = 0;
790	sc->tsrb = tsra + sc->ifatm.mib.max_vccs * 8;
791	sc->tsrc = sc->tsrb + sc->ifatm.mib.max_vccs * 4;
792	sc->tsrd = sc->tsrc + sc->ifatm.mib.max_vccs * 2;
793	tabr = sc->tsrd + sc->ifatm.mib.max_vccs * 1;
794	mtpd = ((tabr + 1024) + (16 * sc->ifatm.mib.max_vccs - 1)) &
795	    ~(16 * sc->ifatm.mib.max_vccs - 1);
796
797	DBG(sc, ATTACH, ("rsra=%x mlbm=%x rabr=%x rsrb=%x",
798	    rsra, mlbm, rabr, sc->rsrb));
799	DBG(sc, ATTACH, ("tsra=%x tsrb=%x tsrc=%x tsrd=%x tabr=%x mtpd=%x",
800	    tsra, sc->tsrb, sc->tsrc, sc->tsrd, tabr, mtpd));
801
802	WRITE4(sc, HE_REGO_TSRB_BA, sc->tsrb);
803	WRITE4(sc, HE_REGO_TSRC_BA, sc->tsrc);
804	WRITE4(sc, HE_REGO_TSRD_BA, sc->tsrd);
805	WRITE4(sc, HE_REGO_TMABR_BA, tabr);
806	WRITE4(sc, HE_REGO_TPD_BA, mtpd);
807
808	WRITE4(sc, HE_REGO_RCMRSRB_BA, sc->rsrb);
809	WRITE4(sc, HE_REGO_RCMLBM_BA, mlbm);
810	WRITE4(sc, HE_REGO_RCMABR_BA, rabr);
811
812	BARRIER_W(sc);
813}
814
815/*
816 * 5.1.4 Initialize Local buffer Pools
817 */
818static void
819hatm_init_rx_buffer_pool(struct hatm_softc *sc,
820	u_int num,		/* bank */
821	u_int start,		/* start row */
822	u_int numbuffs		/* number of entries */
823)
824{
825	u_int row_size;		/* bytes per row */
826	uint32_t row_addr;	/* start address of this row */
827	u_int lbuf_size;	/* bytes per lbuf */
828	u_int lbufs_per_row;	/* number of lbufs per memory row */
829	uint32_t lbufd_index;	/* index of lbuf descriptor */
830	uint32_t lbufd_addr;	/* address of lbuf descriptor */
831	u_int lbuf_row_cnt;	/* current lbuf in current row */
832	uint32_t lbuf_addr;	/* address of current buffer */
833	u_int i;
834
835	row_size = sc->bytes_per_row;;
836	row_addr = start * row_size;
837	lbuf_size = sc->cells_per_lbuf * 48;
838	lbufs_per_row = sc->cells_per_row / sc->cells_per_lbuf;
839
840	/* descriptor index */
841	lbufd_index = num;
842
843	/* 2 words per entry */
844	lbufd_addr = READ4(sc, HE_REGO_RCMLBM_BA) + lbufd_index * 2;
845
846	/* write head of queue */
847	WRITE4(sc, HE_REGO_RLBF_H(num), lbufd_index);
848
849	lbuf_row_cnt = 0;
850	for (i = 0; i < numbuffs; i++) {
851		lbuf_addr = (row_addr + lbuf_row_cnt * lbuf_size) / 32;
852
853		WRITE_RCM4(sc, lbufd_addr, lbuf_addr);
854
855		lbufd_index += 2;
856		WRITE_RCM4(sc, lbufd_addr + 1, lbufd_index);
857
858		if (++lbuf_row_cnt == lbufs_per_row) {
859			lbuf_row_cnt = 0;
860			row_addr += row_size;
861		}
862
863		lbufd_addr += 2 * 2;
864	}
865
866	WRITE4(sc, HE_REGO_RLBF_T(num), lbufd_index - 2);
867	WRITE4(sc, HE_REGO_RLBF_C(num), numbuffs);
868
869	BARRIER_W(sc);
870}
871
872static void
873hatm_init_tx_buffer_pool(struct hatm_softc *sc,
874	u_int start,		/* start row */
875	u_int numbuffs		/* number of entries */
876)
877{
878	u_int row_size;		/* bytes per row */
879	uint32_t row_addr;	/* start address of this row */
880	u_int lbuf_size;	/* bytes per lbuf */
881	u_int lbufs_per_row;	/* number of lbufs per memory row */
882	uint32_t lbufd_index;	/* index of lbuf descriptor */
883	uint32_t lbufd_addr;	/* address of lbuf descriptor */
884	u_int lbuf_row_cnt;	/* current lbuf in current row */
885	uint32_t lbuf_addr;	/* address of current buffer */
886	u_int i;
887
888	row_size = sc->bytes_per_row;;
889	row_addr = start * row_size;
890	lbuf_size = sc->cells_per_lbuf * 48;
891	lbufs_per_row = sc->cells_per_row / sc->cells_per_lbuf;
892
893	/* descriptor index */
894	lbufd_index = sc->r0_numbuffs + sc->r1_numbuffs;
895
896	/* 2 words per entry */
897	lbufd_addr = READ4(sc, HE_REGO_RCMLBM_BA) + lbufd_index * 2;
898
899	/* write head of queue */
900	WRITE4(sc, HE_REGO_TLBF_H, lbufd_index);
901
902	lbuf_row_cnt = 0;
903	for (i = 0; i < numbuffs; i++) {
904		lbuf_addr = (row_addr + lbuf_row_cnt * lbuf_size) / 32;
905
906		WRITE_RCM4(sc, lbufd_addr, lbuf_addr);
907		lbufd_index++;
908		WRITE_RCM4(sc, lbufd_addr + 1, lbufd_index);
909
910		if (++lbuf_row_cnt == lbufs_per_row) {
911			lbuf_row_cnt = 0;
912			row_addr += row_size;
913		}
914
915		lbufd_addr += 2;
916	}
917
918	WRITE4(sc, HE_REGO_TLBF_T, lbufd_index - 1);
919	BARRIER_W(sc);
920}
921
922/*
923 * 5.1.5 Initialize Intermediate Receive Queues
924 */
925static void
926hatm_init_imed_queues(struct hatm_softc *sc)
927{
928	u_int n;
929
930	if (sc->he622) {
931		for (n = 0; n < 8; n++) {
932			WRITE4(sc, HE_REGO_INMQ_S(n), 0x10*n+0x000f);
933			WRITE4(sc, HE_REGO_INMQ_L(n), 0x10*n+0x200f);
934		}
935	} else {
936		for (n = 0; n < 8; n++) {
937			WRITE4(sc, HE_REGO_INMQ_S(n), n);
938			WRITE4(sc, HE_REGO_INMQ_L(n), n+0x8);
939		}
940	}
941}
942
943/*
944 * 5.1.7 Init CS block
945 */
946static void
947hatm_init_cs_block(struct hatm_softc *sc)
948{
949	u_int n, i;
950	u_int clkfreg, cellrate, decr, tmp;
951	static const uint32_t erthr[2][5][3] = HE_REGT_CS_ERTHR;
952	static const uint32_t erctl[2][3] = HE_REGT_CS_ERCTL;
953	static const uint32_t erstat[2][2] = HE_REGT_CS_ERSTAT;
954	static const uint32_t rtfwr[2] = HE_REGT_CS_RTFWR;
955	static const uint32_t rtatr[2] = HE_REGT_CS_RTATR;
956	static const uint32_t bwalloc[2][6] = HE_REGT_CS_BWALLOC;
957	static const uint32_t orcf[2][2] = HE_REGT_CS_ORCF;
958
959	/* Clear Rate Controller Start Times and Occupied Flags */
960	for (n = 0; n < 32; n++)
961		WRITE_MBOX4(sc, HE_REGO_CS_STTIM(n), 0);
962
963	clkfreg = sc->he622 ? HE_622_CLOCK : HE_155_CLOCK;
964	cellrate = sc->he622 ? ATM_RATE_622M : ATM_RATE_155M;
965	decr = cellrate / 32;
966
967	for (n = 0; n < 16; n++) {
968		tmp = clkfreg / cellrate;
969		WRITE_MBOX4(sc, HE_REGO_CS_TGRLD(n), tmp - 1);
970		cellrate -= decr;
971	}
972
973	i = (sc->cells_per_lbuf == 2) ? 0
974	   :(sc->cells_per_lbuf == 4) ? 1
975	   :                            2;
976
977	/* table 5.2 */
978	WRITE_MBOX4(sc, HE_REGO_CS_ERTHR0, erthr[sc->he622][0][i]);
979	WRITE_MBOX4(sc, HE_REGO_CS_ERTHR1, erthr[sc->he622][1][i]);
980	WRITE_MBOX4(sc, HE_REGO_CS_ERTHR2, erthr[sc->he622][2][i]);
981	WRITE_MBOX4(sc, HE_REGO_CS_ERTHR3, erthr[sc->he622][3][i]);
982	WRITE_MBOX4(sc, HE_REGO_CS_ERTHR4, erthr[sc->he622][4][i]);
983
984	WRITE_MBOX4(sc, HE_REGO_CS_ERCTL0, erctl[sc->he622][0]);
985	WRITE_MBOX4(sc, HE_REGO_CS_ERCTL1, erctl[sc->he622][1]);
986	WRITE_MBOX4(sc, HE_REGO_CS_ERCTL2, erctl[sc->he622][2]);
987
988	WRITE_MBOX4(sc, HE_REGO_CS_ERSTAT0, erstat[sc->he622][0]);
989	WRITE_MBOX4(sc, HE_REGO_CS_ERSTAT1, erstat[sc->he622][1]);
990
991	WRITE_MBOX4(sc, HE_REGO_CS_RTFWR, rtfwr[sc->he622]);
992	WRITE_MBOX4(sc, HE_REGO_CS_RTATR, rtatr[sc->he622]);
993
994	WRITE_MBOX4(sc, HE_REGO_CS_TFBSET, bwalloc[sc->he622][0]);
995	WRITE_MBOX4(sc, HE_REGO_CS_WCRMAX, bwalloc[sc->he622][1]);
996	WRITE_MBOX4(sc, HE_REGO_CS_WCRMIN, bwalloc[sc->he622][2]);
997	WRITE_MBOX4(sc, HE_REGO_CS_WCRINC, bwalloc[sc->he622][3]);
998	WRITE_MBOX4(sc, HE_REGO_CS_WCRDEC, bwalloc[sc->he622][4]);
999	WRITE_MBOX4(sc, HE_REGO_CS_WCRCEIL, bwalloc[sc->he622][5]);
1000
1001	WRITE_MBOX4(sc, HE_REGO_CS_OTPPER, orcf[sc->he622][0]);
1002	WRITE_MBOX4(sc, HE_REGO_CS_OTWPER, orcf[sc->he622][1]);
1003
1004	WRITE_MBOX4(sc, HE_REGO_CS_OTTLIM, 8);
1005
1006	for (n = 0; n < 8; n++)
1007		WRITE_MBOX4(sc, HE_REGO_CS_HGRRT(n), 0);
1008}
1009
1010/*
1011 * 5.1.8 CS Block Connection Memory Initialisation
1012 */
1013static void
1014hatm_init_cs_block_cm(struct hatm_softc *sc)
1015{
1016	u_int n, i;
1017	u_int expt, mant, etrm, wcr, ttnrm, tnrm;
1018	uint32_t rate;
1019	uint32_t clkfreq, cellrate, decr;
1020	uint32_t *rg, rtg, val = 0;
1021	uint64_t drate;
1022	u_int buf, buf_limit;
1023	uint32_t base = READ4(sc, HE_REGO_RCMABR_BA);
1024
1025	for (n = 0; n < HE_REGL_CM_GQTBL; n++)
1026		WRITE_RCM4(sc, base + HE_REGO_CM_GQTBL + n, 0);
1027	for (n = 0; n < HE_REGL_CM_RGTBL; n++)
1028		WRITE_RCM4(sc, base + HE_REGO_CM_RGTBL + n, 0);
1029
1030	tnrm = 0;
1031	for (n = 0; n < HE_REGL_CM_TNRMTBL * 4; n++) {
1032		expt = (n >> 5) & 0x1f;
1033		mant = ((n & 0x18) << 4) | 0x7f;
1034		wcr = (1 << expt) * (mant + 512) / 512;
1035		etrm = n & 0x7;
1036		ttnrm = wcr / 10 / (1 << etrm);
1037		if (ttnrm > 255)
1038			ttnrm = 255;
1039		else if(ttnrm < 2)
1040			ttnrm = 2;
1041		tnrm = (tnrm << 8) | (ttnrm & 0xff);
1042		if (n % 4 == 0)
1043			WRITE_RCM4(sc, base + HE_REGO_CM_TNRMTBL + (n/4), tnrm);
1044	}
1045
1046	clkfreq = sc->he622 ? HE_622_CLOCK : HE_155_CLOCK;
1047	buf_limit = 4;
1048
1049	cellrate = sc->he622 ? ATM_RATE_622M : ATM_RATE_155M;
1050	decr = cellrate / 32;
1051
1052	/* compute GRID top row in 1000 * cps */
1053	for (n = 0; n < 16; n++) {
1054		u_int interval = clkfreq / cellrate;
1055		sc->rate_grid[0][n] = (u_int64_t)clkfreq * 1000 / interval;
1056		cellrate -= decr;
1057	}
1058
1059	/* compute the other rows according to 2.4 */
1060	for (i = 1; i < 16; i++)
1061		for (n = 0; n < 16; n++)
1062			sc->rate_grid[i][n] = sc->rate_grid[i-1][n] /
1063			    ((i < 14) ? 2 : 4);
1064
1065	/* first entry is line rate */
1066	n = hatm_cps2atmf(sc->he622 ? ATM_RATE_622M : ATM_RATE_155M);
1067	expt = (n >> 9) & 0x1f;
1068	mant = n & 0x1f0;
1069	sc->rate_grid[0][0] = (u_int64_t)(1<<expt) * 1000 * (mant+512) / 512;
1070
1071	/* now build the conversion table - each 32 bit word contains
1072	 * two entries - this gives a total of 0x400 16 bit entries.
1073	 * This table maps the truncated ATMF rate version into a grid index */
1074	cellrate = sc->he622 ? ATM_RATE_622M : ATM_RATE_155M;
1075	rg = &sc->rate_grid[15][15];
1076
1077	for (rate = 0; rate < 2 * HE_REGL_CM_RTGTBL; rate++) {
1078		/* unpack the ATMF rate */
1079		expt = rate >> 5;
1080		mant = (rate & 0x1f) << 4;
1081
1082		/* get the cell rate - minimum is 10 per second */
1083		drate = (uint64_t)(1 << expt) * 1000 * (mant + 512) / 512;
1084		if (drate < 10 * 1000)
1085			drate = 10 * 1000;
1086
1087		/* now look up the grid index */
1088		while (drate >= *rg && rg-- > &sc->rate_grid[0][0])
1089			;
1090		rg++;
1091		rtg = rg - &sc->rate_grid[0][0];
1092
1093		/* now compute the buffer limit */
1094		buf = drate * sc->tx_numbuffs / (cellrate * 2) / 1000;
1095		if (buf == 0)
1096			buf = 1;
1097		else if (buf > buf_limit)
1098			buf = buf_limit;
1099
1100		/* make value */
1101		val = (val << 16) | (rtg << 8) | buf;
1102
1103		/* write */
1104		if (rate % 2 == 1)
1105			WRITE_RCM4(sc, base + HE_REGO_CM_RTGTBL + rate/2, val);
1106	}
1107}
1108
1109/*
1110 * Clear an unused receive group buffer pool
1111 */
1112static void
1113hatm_clear_rpool(struct hatm_softc *sc, u_int group, u_int large)
1114{
1115	WRITE4(sc, HE_REGO_RBP_S(large, group), 0);
1116	WRITE4(sc, HE_REGO_RBP_T(large, group), 0);
1117	WRITE4(sc, HE_REGO_RBP_QI(large, group), 1);
1118	WRITE4(sc, HE_REGO_RBP_BL(large, group), 0);
1119}
1120
1121/*
1122 * Initialize a receive group buffer pool
1123 */
1124static void
1125hatm_init_rpool(struct hatm_softc *sc, struct herbp *q, u_int group,
1126    u_int large)
1127{
1128	if (q->size == 0) {
1129		hatm_clear_rpool(sc, group, large);
1130		return;
1131	}
1132
1133	bzero(q->mem.base, q->mem.size);
1134	q->rbp = q->mem.base;
1135	q->head = q->tail = 0;
1136
1137	DBG(sc, ATTACH, ("RBP%u%c=0x%lx", group, "SL"[large],
1138	    (u_long)q->mem.paddr));
1139
1140	WRITE4(sc, HE_REGO_RBP_S(large, group), q->mem.paddr);
1141	WRITE4(sc, HE_REGO_RBP_T(large, group), 0);
1142	WRITE4(sc, HE_REGO_RBP_QI(large, group),
1143	    ((q->size - 1) << HE_REGS_RBP_SIZE) |
1144	    HE_REGM_RBP_INTR_ENB |
1145	    (q->thresh << HE_REGS_RBP_THRESH));
1146	WRITE4(sc, HE_REGO_RBP_BL(large, group), (q->bsize >> 2) & ~1);
1147}
1148
1149/*
1150 * Clear an unused receive buffer return queue
1151 */
1152static void
1153hatm_clear_rbrq(struct hatm_softc *sc, u_int group)
1154{
1155	WRITE4(sc, HE_REGO_RBRQ_ST(group), 0);
1156	WRITE4(sc, HE_REGO_RBRQ_H(group), 0);
1157	WRITE4(sc, HE_REGO_RBRQ_Q(group), (1 << HE_REGS_RBRQ_THRESH));
1158	WRITE4(sc, HE_REGO_RBRQ_I(group), 0);
1159}
1160
1161/*
1162 * Initialize receive buffer return queue
1163 */
1164static void
1165hatm_init_rbrq(struct hatm_softc *sc, struct herbrq *rq, u_int group)
1166{
1167	if (rq->size == 0) {
1168		hatm_clear_rbrq(sc, group);
1169		return;
1170	}
1171
1172	rq->rbrq = rq->mem.base;
1173	rq->head = 0;
1174
1175	DBG(sc, ATTACH, ("RBRQ%u=0x%lx", group, (u_long)rq->mem.paddr));
1176
1177	WRITE4(sc, HE_REGO_RBRQ_ST(group), rq->mem.paddr);
1178	WRITE4(sc, HE_REGO_RBRQ_H(group), 0);
1179	WRITE4(sc, HE_REGO_RBRQ_Q(group),
1180	    (rq->thresh << HE_REGS_RBRQ_THRESH) |
1181	    ((rq->size - 1) << HE_REGS_RBRQ_SIZE));
1182	WRITE4(sc, HE_REGO_RBRQ_I(group),
1183	    (rq->tout << HE_REGS_RBRQ_TIME) |
1184	    (rq->pcnt << HE_REGS_RBRQ_COUNT));
1185}
1186
1187/*
1188 * Clear an unused transmit buffer return queue N
1189 */
1190static void
1191hatm_clear_tbrq(struct hatm_softc *sc, u_int group)
1192{
1193	WRITE4(sc, HE_REGO_TBRQ_B_T(group), 0);
1194	WRITE4(sc, HE_REGO_TBRQ_H(group), 0);
1195	WRITE4(sc, HE_REGO_TBRQ_S(group), 0);
1196	WRITE4(sc, HE_REGO_TBRQ_THRESH(group), 1);
1197}
1198
1199/*
1200 * Initialize transmit buffer return queue N
1201 */
1202static void
1203hatm_init_tbrq(struct hatm_softc *sc, struct hetbrq *tq, u_int group)
1204{
1205	if (tq->size == 0) {
1206		hatm_clear_tbrq(sc, group);
1207		return;
1208	}
1209
1210	tq->tbrq = tq->mem.base;
1211	tq->head = 0;
1212
1213	DBG(sc, ATTACH, ("TBRQ%u=0x%lx", group, (u_long)tq->mem.paddr));
1214
1215	WRITE4(sc, HE_REGO_TBRQ_B_T(group), tq->mem.paddr);
1216	WRITE4(sc, HE_REGO_TBRQ_H(group), 0);
1217	WRITE4(sc, HE_REGO_TBRQ_S(group), tq->size - 1);
1218	WRITE4(sc, HE_REGO_TBRQ_THRESH(group), tq->thresh);
1219}
1220
1221/*
1222 * Initialize TPDRQ
1223 */
1224static void
1225hatm_init_tpdrq(struct hatm_softc *sc)
1226{
1227	struct hetpdrq *tq;
1228
1229	tq = &sc->tpdrq;
1230	tq->tpdrq = tq->mem.base;
1231	tq->tail = tq->head = 0;
1232
1233	DBG(sc, ATTACH, ("TPDRQ=0x%lx", (u_long)tq->mem.paddr));
1234
1235	WRITE4(sc, HE_REGO_TPDRQ_H, tq->mem.paddr);
1236	WRITE4(sc, HE_REGO_TPDRQ_T, 0);
1237	WRITE4(sc, HE_REGO_TPDRQ_S, tq->size - 1);
1238}
1239
1240/*
1241 * Function can be called by the infrastructure to start the card.
1242 */
1243static void
1244hatm_init(void *p)
1245{
1246	struct hatm_softc *sc = p;
1247
1248	mtx_lock(&sc->mtx);
1249	hatm_stop(sc);
1250	hatm_initialize(sc);
1251	mtx_unlock(&sc->mtx);
1252}
1253
1254enum {
1255	CTL_ISTATS,
1256};
1257
1258/*
1259 * Sysctl handler
1260 */
1261static int
1262hatm_sysctl(SYSCTL_HANDLER_ARGS)
1263{
1264	struct hatm_softc *sc = arg1;
1265	uint32_t *ret;
1266	int error;
1267	size_t len;
1268
1269	switch (arg2) {
1270
1271	  case CTL_ISTATS:
1272		len = sizeof(sc->istats);
1273		break;
1274
1275	  default:
1276		panic("bad control code");
1277	}
1278
1279	ret = malloc(len, M_TEMP, M_WAITOK);
1280	mtx_lock(&sc->mtx);
1281
1282	switch (arg2) {
1283
1284	  case CTL_ISTATS:
1285		sc->istats.mcc += READ4(sc, HE_REGO_MCC);
1286		sc->istats.oec += READ4(sc, HE_REGO_OEC);
1287		sc->istats.dcc += READ4(sc, HE_REGO_DCC);
1288		sc->istats.cec += READ4(sc, HE_REGO_CEC);
1289		bcopy(&sc->istats, ret, sizeof(sc->istats));
1290		break;
1291	}
1292	mtx_unlock(&sc->mtx);
1293
1294	error = SYSCTL_OUT(req, ret, len);
1295	free(ret, M_TEMP);
1296
1297	return (error);
1298}
1299
1300static int
1301kenv_getuint(struct hatm_softc *sc, const char *var,
1302    u_int *ptr, u_int def, int rw)
1303{
1304	char full[IFNAMSIZ + 3 + 20];
1305	char *val, *end;
1306	u_int u;
1307
1308	*ptr = def;
1309
1310	if (SYSCTL_ADD_UINT(&sc->sysctl_ctx, SYSCTL_CHILDREN(sc->sysctl_tree),
1311	    OID_AUTO, var, rw ? CTLFLAG_RW : CTLFLAG_RD, ptr, 0, "") == NULL)
1312		return (ENOMEM);
1313
1314	snprintf(full, sizeof(full), "hw.%s.%s",
1315	    device_get_nameunit(sc->dev), var);
1316
1317	if ((val = getenv(full)) == NULL)
1318		return (0);
1319	u = strtoul(val, &end, 0);
1320	if (end == val || *end != '\0') {
1321		freeenv(val);
1322		return (EINVAL);
1323	}
1324	if (bootverbose)
1325		if_printf(&sc->ifatm.ifnet, "%s=%u\n", full, u);
1326	*ptr = u;
1327	return (0);
1328}
1329
1330/*
1331 * Set configurable parameters. Many of these are configurable via
1332 * kenv.
1333 */
1334static int
1335hatm_configure(struct hatm_softc *sc)
1336{
1337	/* Receive buffer pool 0 small */
1338	kenv_getuint(sc, "rbps0_size", &sc->rbp_s0.size,
1339	    HE_CONFIG_RBPS0_SIZE, 0);
1340	kenv_getuint(sc, "rbps0_thresh", &sc->rbp_s0.thresh,
1341	    HE_CONFIG_RBPS0_THRESH, 0);
1342	sc->rbp_s0.bsize = MBUF0_SIZE;
1343
1344	/* Receive buffer pool 0 large */
1345	kenv_getuint(sc, "rbpl0_size", &sc->rbp_l0.size,
1346	    HE_CONFIG_RBPL0_SIZE, 0);
1347	kenv_getuint(sc, "rbpl0_thresh", &sc->rbp_l0.thresh,
1348	    HE_CONFIG_RBPL0_THRESH, 0);
1349	sc->rbp_l0.bsize = MCLBYTES - MBUFL_OFFSET;
1350
1351	/* Receive buffer return queue 0 */
1352	kenv_getuint(sc, "rbrq0_size", &sc->rbrq_0.size,
1353	    HE_CONFIG_RBRQ0_SIZE, 0);
1354	kenv_getuint(sc, "rbrq0_thresh", &sc->rbrq_0.thresh,
1355	    HE_CONFIG_RBRQ0_THRESH, 0);
1356	kenv_getuint(sc, "rbrq0_tout", &sc->rbrq_0.tout,
1357	    HE_CONFIG_RBRQ0_TOUT, 0);
1358	kenv_getuint(sc, "rbrq0_pcnt", &sc->rbrq_0.pcnt,
1359	    HE_CONFIG_RBRQ0_PCNT, 0);
1360
1361	/* Receive buffer pool 1 small */
1362	kenv_getuint(sc, "rbps1_size", &sc->rbp_s1.size,
1363	    HE_CONFIG_RBPS1_SIZE, 0);
1364	kenv_getuint(sc, "rbps1_thresh", &sc->rbp_s1.thresh,
1365	    HE_CONFIG_RBPS1_THRESH, 0);
1366	sc->rbp_s1.bsize = MBUF1_SIZE;
1367
1368	/* Receive buffer return queue 1 */
1369	kenv_getuint(sc, "rbrq1_size", &sc->rbrq_1.size,
1370	    HE_CONFIG_RBRQ1_SIZE, 0);
1371	kenv_getuint(sc, "rbrq1_thresh", &sc->rbrq_1.thresh,
1372	    HE_CONFIG_RBRQ1_THRESH, 0);
1373	kenv_getuint(sc, "rbrq1_tout", &sc->rbrq_1.tout,
1374	    HE_CONFIG_RBRQ1_TOUT, 0);
1375	kenv_getuint(sc, "rbrq1_pcnt", &sc->rbrq_1.pcnt,
1376	    HE_CONFIG_RBRQ1_PCNT, 0);
1377
1378	/* Interrupt queue 0 */
1379	kenv_getuint(sc, "irq0_size", &sc->irq_0.size,
1380	    HE_CONFIG_IRQ0_SIZE, 0);
1381	kenv_getuint(sc, "irq0_thresh", &sc->irq_0.thresh,
1382	    HE_CONFIG_IRQ0_THRESH, 0);
1383	sc->irq_0.line = HE_CONFIG_IRQ0_LINE;
1384
1385	/* Transmit buffer return queue 0 */
1386	kenv_getuint(sc, "tbrq0_size", &sc->tbrq.size,
1387	    HE_CONFIG_TBRQ_SIZE, 0);
1388	kenv_getuint(sc, "tbrq0_thresh", &sc->tbrq.thresh,
1389	    HE_CONFIG_TBRQ_THRESH, 0);
1390
1391	/* Transmit buffer ready queue */
1392	kenv_getuint(sc, "tpdrq_size", &sc->tpdrq.size,
1393	    HE_CONFIG_TPDRQ_SIZE, 0);
1394	/* Max TPDs per VCC */
1395	kenv_getuint(sc, "tpdmax", &sc->max_tpd,
1396	    HE_CONFIG_TPD_MAXCC, 0);
1397
1398	/* external mbuf pages */
1399	kenv_getuint(sc, "max_mbuf_pages", &sc->mbuf_max_pages,
1400	    HE_CONFIG_MAX_MBUF_PAGES, 0);
1401
1402	/* mpsafe */
1403	kenv_getuint(sc, "mpsafe", &sc->mpsafe, 0, 0);
1404	if (sc->mpsafe != 0)
1405		sc->mpsafe = INTR_MPSAFE;
1406
1407	return (0);
1408}
1409
1410#ifdef HATM_DEBUG
1411
1412/*
1413 * Get TSRs from connection memory
1414 */
1415static int
1416hatm_sysctl_tsr(SYSCTL_HANDLER_ARGS)
1417{
1418	struct hatm_softc *sc = arg1;
1419	int error, i, j;
1420	uint32_t *val;
1421
1422	val = malloc(sizeof(uint32_t) * HE_MAX_VCCS * 15, M_TEMP, M_WAITOK);
1423
1424	mtx_lock(&sc->mtx);
1425	for (i = 0; i < HE_MAX_VCCS; i++)
1426		for (j = 0; j <= 14; j++)
1427			val[15 * i + j] = READ_TSR(sc, i, j);
1428	mtx_unlock(&sc->mtx);
1429
1430	error = SYSCTL_OUT(req, val, sizeof(uint32_t) * HE_MAX_VCCS * 15);
1431	free(val, M_TEMP);
1432	if (error != 0 || req->newptr == NULL)
1433		return (error);
1434
1435	return (EPERM);
1436}
1437
1438/*
1439 * Get TPDs from connection memory
1440 */
1441static int
1442hatm_sysctl_tpd(SYSCTL_HANDLER_ARGS)
1443{
1444	struct hatm_softc *sc = arg1;
1445	int error, i, j;
1446	uint32_t *val;
1447
1448	val = malloc(sizeof(uint32_t) * HE_MAX_VCCS * 16, M_TEMP, M_WAITOK);
1449
1450	mtx_lock(&sc->mtx);
1451	for (i = 0; i < HE_MAX_VCCS; i++)
1452		for (j = 0; j < 16; j++)
1453			val[16 * i + j] = READ_TCM4(sc, 16 * i + j);
1454	mtx_unlock(&sc->mtx);
1455
1456	error = SYSCTL_OUT(req, val, sizeof(uint32_t) * HE_MAX_VCCS * 16);
1457	free(val, M_TEMP);
1458	if (error != 0 || req->newptr == NULL)
1459		return (error);
1460
1461	return (EPERM);
1462}
1463
1464/*
1465 * Get mbox registers
1466 */
1467static int
1468hatm_sysctl_mbox(SYSCTL_HANDLER_ARGS)
1469{
1470	struct hatm_softc *sc = arg1;
1471	int error, i;
1472	uint32_t *val;
1473
1474	val = malloc(sizeof(uint32_t) * HE_REGO_CS_END, M_TEMP, M_WAITOK);
1475
1476	mtx_lock(&sc->mtx);
1477	for (i = 0; i < HE_REGO_CS_END; i++)
1478		val[i] = READ_MBOX4(sc, i);
1479	mtx_unlock(&sc->mtx);
1480
1481	error = SYSCTL_OUT(req, val, sizeof(uint32_t) * HE_REGO_CS_END);
1482	free(val, M_TEMP);
1483	if (error != 0 || req->newptr == NULL)
1484		return (error);
1485
1486	return (EPERM);
1487}
1488
1489/*
1490 * Get connection memory
1491 */
1492static int
1493hatm_sysctl_cm(SYSCTL_HANDLER_ARGS)
1494{
1495	struct hatm_softc *sc = arg1;
1496	int error, i;
1497	uint32_t *val;
1498
1499	val = malloc(sizeof(uint32_t) * (HE_CONFIG_RXMEM + 1), M_TEMP, M_WAITOK);
1500
1501	mtx_lock(&sc->mtx);
1502	val[0] = READ4(sc, HE_REGO_RCMABR_BA);
1503	for (i = 0; i < HE_CONFIG_RXMEM; i++)
1504		val[i + 1] = READ_RCM4(sc, i);
1505	mtx_unlock(&sc->mtx);
1506
1507	error = SYSCTL_OUT(req, val, sizeof(uint32_t) * (HE_CONFIG_RXMEM + 1));
1508	free(val, M_TEMP);
1509	if (error != 0 || req->newptr == NULL)
1510		return (error);
1511
1512	return (EPERM);
1513}
1514
1515/*
1516 * Get local buffer memory
1517 */
1518static int
1519hatm_sysctl_lbmem(SYSCTL_HANDLER_ARGS)
1520{
1521	struct hatm_softc *sc = arg1;
1522	int error, i;
1523	uint32_t *val;
1524	u_int bytes = (1 << 21);
1525
1526	val = malloc(bytes, M_TEMP, M_WAITOK);
1527
1528	mtx_lock(&sc->mtx);
1529	for (i = 0; i < bytes / 4; i++)
1530		val[i] = READ_LB4(sc, i);
1531	mtx_unlock(&sc->mtx);
1532
1533	error = SYSCTL_OUT(req, val, bytes);
1534	free(val, M_TEMP);
1535	if (error != 0 || req->newptr == NULL)
1536		return (error);
1537
1538	return (EPERM);
1539}
1540
1541/*
1542 * Get all card registers
1543 */
1544static int
1545hatm_sysctl_heregs(SYSCTL_HANDLER_ARGS)
1546{
1547	struct hatm_softc *sc = arg1;
1548	int error, i;
1549	uint32_t *val;
1550
1551	val = malloc(HE_REGO_END, M_TEMP, M_WAITOK);
1552
1553	mtx_lock(&sc->mtx);
1554	for (i = 0; i < HE_REGO_END; i += 4)
1555		val[i / 4] = READ4(sc, i);
1556	mtx_unlock(&sc->mtx);
1557
1558	error = SYSCTL_OUT(req, val, HE_REGO_END);
1559	free(val, M_TEMP);
1560	if (error != 0 || req->newptr == NULL)
1561		return (error);
1562
1563	return (EPERM);
1564}
1565#endif
1566
1567/*
1568 * Suni register access
1569 */
1570/*
1571 * read at most n SUNI registers starting at reg into val
1572 */
1573static int
1574hatm_utopia_readregs(struct ifatm *ifatm, u_int reg, uint8_t *val, u_int *n)
1575{
1576	u_int i;
1577	struct hatm_softc *sc = (struct hatm_softc *)ifatm;
1578
1579	if (reg >= (HE_REGO_SUNI_END - HE_REGO_SUNI) / 4)
1580		return (EINVAL);
1581	if (reg + *n > (HE_REGO_SUNI_END - HE_REGO_SUNI) / 4)
1582		*n = reg - (HE_REGO_SUNI_END - HE_REGO_SUNI) / 4;
1583
1584	mtx_assert(&sc->mtx, MA_OWNED);
1585	for (i = 0; i < *n; i++)
1586		val[i] = READ4(sc, HE_REGO_SUNI + 4 * (reg + i));
1587
1588	return (0);
1589}
1590
1591/*
1592 * change the bits given by mask to them in val in register reg
1593 */
1594static int
1595hatm_utopia_writereg(struct ifatm *ifatm, u_int reg, u_int mask, u_int val)
1596{
1597	uint32_t regval;
1598	struct hatm_softc *sc = (struct hatm_softc *)ifatm;
1599
1600	if (reg >= (HE_REGO_SUNI_END - HE_REGO_SUNI) / 4)
1601		return (EINVAL);
1602
1603	mtx_assert(&sc->mtx, MA_OWNED);
1604	regval = READ4(sc, HE_REGO_SUNI + 4 * reg);
1605	regval = (regval & ~mask) | (val & mask);
1606	WRITE4(sc, HE_REGO_SUNI + 4 * reg, regval);
1607
1608	return (0);
1609}
1610
1611static struct utopia_methods hatm_utopia_methods = {
1612	hatm_utopia_readregs,
1613	hatm_utopia_writereg,
1614};
1615
1616/*
1617 * Detach - if it is running, stop. Destroy.
1618 */
1619static int
1620hatm_detach(device_t dev)
1621{
1622	struct hatm_softc *sc = (struct hatm_softc *)device_get_softc(dev);
1623
1624	mtx_lock(&sc->mtx);
1625	hatm_stop(sc);
1626	if (sc->utopia.state & UTP_ST_ATTACHED) {
1627		utopia_stop(&sc->utopia);
1628		utopia_detach(&sc->utopia);
1629	}
1630	mtx_unlock(&sc->mtx);
1631
1632	atm_ifdetach(&sc->ifatm.ifnet);
1633
1634	hatm_destroy(sc);
1635
1636	return (0);
1637}
1638
1639/*
1640 * Attach to the device. Assume that no locking is needed here.
1641 * All resource we allocate here are freed by calling hatm_destroy.
1642 */
1643static int
1644hatm_attach(device_t dev)
1645{
1646	struct hatm_softc *sc;
1647	int error;
1648	uint32_t v;
1649	struct ifnet *ifp;
1650
1651	sc = device_get_softc(dev);
1652
1653	sc->dev = dev;
1654	sc->ifatm.mib.device = ATM_DEVICE_HE155;
1655	sc->ifatm.mib.serial = 0;
1656	sc->ifatm.mib.hw_version = 0;
1657	sc->ifatm.mib.sw_version = 0;
1658	sc->ifatm.mib.vpi_bits = HE_CONFIG_VPI_BITS;
1659	sc->ifatm.mib.vci_bits = HE_CONFIG_VCI_BITS;
1660	sc->ifatm.mib.max_vpcs = 0;
1661	sc->ifatm.mib.max_vccs = HE_MAX_VCCS;
1662	sc->ifatm.mib.media = IFM_ATM_UNKNOWN;
1663	sc->he622 = 0;
1664	sc->ifatm.phy = &sc->utopia;
1665
1666	SLIST_INIT(&sc->tpd_free);
1667
1668	mtx_init(&sc->mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK, MTX_DEF);
1669	cv_init(&sc->vcc_cv, "HEVCCcv");
1670	cv_init(&sc->cv_rcclose, "RCClose");
1671
1672	sysctl_ctx_init(&sc->sysctl_ctx);
1673
1674	/*
1675	 * 4.2 BIOS Configuration
1676	 */
1677	v = pci_read_config(dev, PCIR_COMMAND, 2);
1678	v |= PCIM_CMD_MEMEN | PCIM_CMD_BUSMASTEREN | PCIM_CMD_MWRICEN;
1679	pci_write_config(dev, PCIR_COMMAND, v, 2);
1680
1681	/*
1682	 * 4.3 PCI Bus Controller-Specific Initialisation
1683	 */
1684	v = pci_read_config(dev, HE_PCIR_GEN_CNTL_0, 4);
1685	v |= HE_PCIM_CTL0_MRL | HE_PCIM_CTL0_MRM | HE_PCIM_CTL0_IGNORE_TIMEOUT;
1686#if BYTE_ORDER == BIG_ENDIAN && 0
1687	v |= HE_PCIM_CTL0_BIGENDIAN;
1688#endif
1689	pci_write_config(dev, HE_PCIR_GEN_CNTL_0, v, 4);
1690
1691	/*
1692	 * Map memory
1693	 */
1694	v = pci_read_config(dev, PCIR_COMMAND, 2);
1695	if (!(v & PCIM_CMD_MEMEN)) {
1696		device_printf(dev, "failed to enable memory\n");
1697		error = ENXIO;
1698		goto failed;
1699	}
1700	sc->memid = PCIR_BAR(0);
1701	sc->memres = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &sc->memid,
1702	    RF_ACTIVE);
1703	if (sc->memres == NULL) {
1704		device_printf(dev, "could not map memory\n");
1705		error = ENXIO;
1706		goto failed;
1707	}
1708	sc->memh = rman_get_bushandle(sc->memres);
1709	sc->memt = rman_get_bustag(sc->memres);
1710
1711	/*
1712	 * ALlocate a DMA tag for subsequent allocations
1713	 */
1714	if (bus_dma_tag_create(NULL, 1, 0,
1715	    BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR,
1716	    NULL, NULL,
1717	    BUS_SPACE_MAXSIZE_32BIT, 1,
1718	    BUS_SPACE_MAXSIZE_32BIT, 0,
1719	    NULL, NULL, &sc->parent_tag)) {
1720		device_printf(dev, "could not allocate DMA tag\n");
1721		error = ENOMEM;
1722		goto failed;
1723	}
1724
1725	if (bus_dma_tag_create(sc->parent_tag, 1, 0,
1726	    BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR,
1727	    NULL, NULL,
1728	    MBUF_ALLOC_SIZE, 1,
1729	    MBUF_ALLOC_SIZE, 0,
1730	    NULL, NULL, &sc->mbuf_tag)) {
1731		device_printf(dev, "could not allocate mbuf DMA tag\n");
1732		error = ENOMEM;
1733		goto failed;
1734	}
1735
1736	/*
1737	 * Allocate a DMA tag for packets to send. Here we have a problem with
1738	 * the specification of the maximum number of segments. Theoretically
1739	 * this would be the size of the transmit ring - 1 multiplied by 3,
1740	 * but this would not work. So make the maximum number of TPDs
1741	 * occupied by one packet a configuration parameter.
1742	 */
1743	if (bus_dma_tag_create(NULL, 1, 0,
1744	    BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
1745	    HE_MAX_PDU, 3 * HE_CONFIG_MAX_TPD_PER_PACKET, HE_MAX_PDU, 0,
1746	    NULL, NULL, &sc->tx_tag)) {
1747		device_printf(dev, "could not allocate TX tag\n");
1748		error = ENOMEM;
1749		goto failed;
1750	}
1751
1752	/*
1753	 * Setup the interrupt
1754	 */
1755	sc->irqid = 0;
1756	sc->irqres = bus_alloc_resource_any(dev, SYS_RES_IRQ, &sc->irqid,
1757	    RF_SHAREABLE | RF_ACTIVE);
1758	if (sc->irqres == 0) {
1759		device_printf(dev, "could not allocate irq\n");
1760		error = ENXIO;
1761		goto failed;
1762	}
1763
1764	ifp = &sc->ifatm.ifnet;
1765	ifp->if_softc = sc;
1766	if_initname(ifp, device_get_name(dev), device_get_unit(dev));
1767
1768	/*
1769	 * Make the sysctl tree
1770	 */
1771	error = ENOMEM;
1772	if ((sc->sysctl_tree = SYSCTL_ADD_NODE(&sc->sysctl_ctx,
1773	    SYSCTL_STATIC_CHILDREN(_hw_atm), OID_AUTO,
1774	    device_get_nameunit(dev), CTLFLAG_RD, 0, "")) == NULL)
1775		goto failed;
1776
1777	if (SYSCTL_ADD_PROC(&sc->sysctl_ctx, SYSCTL_CHILDREN(sc->sysctl_tree),
1778	    OID_AUTO, "istats", CTLFLAG_RD | CTLTYPE_OPAQUE, sc, CTL_ISTATS,
1779	    hatm_sysctl, "LU", "internal statistics") == NULL)
1780		goto failed;
1781
1782#ifdef HATM_DEBUG
1783	if (SYSCTL_ADD_PROC(&sc->sysctl_ctx, SYSCTL_CHILDREN(sc->sysctl_tree),
1784	    OID_AUTO, "tsr", CTLFLAG_RD | CTLTYPE_OPAQUE, sc, 0,
1785	    hatm_sysctl_tsr, "S", "transmission status registers") == NULL)
1786		goto failed;
1787
1788	if (SYSCTL_ADD_PROC(&sc->sysctl_ctx, SYSCTL_CHILDREN(sc->sysctl_tree),
1789	    OID_AUTO, "tpd", CTLFLAG_RD | CTLTYPE_OPAQUE, sc, 0,
1790	    hatm_sysctl_tpd, "S", "transmission packet descriptors") == NULL)
1791		goto failed;
1792
1793	if (SYSCTL_ADD_PROC(&sc->sysctl_ctx, SYSCTL_CHILDREN(sc->sysctl_tree),
1794	    OID_AUTO, "mbox", CTLFLAG_RD | CTLTYPE_OPAQUE, sc, 0,
1795	    hatm_sysctl_mbox, "S", "mbox registers") == NULL)
1796		goto failed;
1797
1798	if (SYSCTL_ADD_PROC(&sc->sysctl_ctx, SYSCTL_CHILDREN(sc->sysctl_tree),
1799	    OID_AUTO, "cm", CTLFLAG_RD | CTLTYPE_OPAQUE, sc, 0,
1800	    hatm_sysctl_cm, "S", "connection memory") == NULL)
1801		goto failed;
1802
1803	if (SYSCTL_ADD_PROC(&sc->sysctl_ctx, SYSCTL_CHILDREN(sc->sysctl_tree),
1804	    OID_AUTO, "heregs", CTLFLAG_RD | CTLTYPE_OPAQUE, sc, 0,
1805	    hatm_sysctl_heregs, "S", "card registers") == NULL)
1806		goto failed;
1807
1808	if (SYSCTL_ADD_PROC(&sc->sysctl_ctx, SYSCTL_CHILDREN(sc->sysctl_tree),
1809	    OID_AUTO, "lbmem", CTLFLAG_RD | CTLTYPE_OPAQUE, sc, 0,
1810	    hatm_sysctl_lbmem, "S", "local memory") == NULL)
1811		goto failed;
1812
1813	kenv_getuint(sc, "debug", &sc->debug, HATM_DEBUG, 1);
1814#endif
1815
1816	/*
1817	 * Configure
1818	 */
1819	if ((error = hatm_configure(sc)) != 0)
1820		goto failed;
1821
1822	/*
1823	 * Compute memory parameters
1824	 */
1825	if (sc->rbp_s0.size != 0) {
1826		sc->rbp_s0.mask = (sc->rbp_s0.size - 1) << 3;
1827		sc->rbp_s0.mem.size = sc->rbp_s0.size * 8;
1828		sc->rbp_s0.mem.align = sc->rbp_s0.mem.size;
1829	}
1830	if (sc->rbp_l0.size != 0) {
1831		sc->rbp_l0.mask = (sc->rbp_l0.size - 1) << 3;
1832		sc->rbp_l0.mem.size = sc->rbp_l0.size * 8;
1833		sc->rbp_l0.mem.align = sc->rbp_l0.mem.size;
1834	}
1835	if (sc->rbp_s1.size != 0) {
1836		sc->rbp_s1.mask = (sc->rbp_s1.size - 1) << 3;
1837		sc->rbp_s1.mem.size = sc->rbp_s1.size * 8;
1838		sc->rbp_s1.mem.align = sc->rbp_s1.mem.size;
1839	}
1840	if (sc->rbrq_0.size != 0) {
1841		sc->rbrq_0.mem.size = sc->rbrq_0.size * 8;
1842		sc->rbrq_0.mem.align = sc->rbrq_0.mem.size;
1843	}
1844	if (sc->rbrq_1.size != 0) {
1845		sc->rbrq_1.mem.size = sc->rbrq_1.size * 8;
1846		sc->rbrq_1.mem.align = sc->rbrq_1.mem.size;
1847	}
1848
1849	sc->irq_0.mem.size = sc->irq_0.size * sizeof(uint32_t);
1850	sc->irq_0.mem.align = 4 * 1024;
1851
1852	sc->tbrq.mem.size = sc->tbrq.size * 4;
1853	sc->tbrq.mem.align = 2 * sc->tbrq.mem.size; /* ZZZ */
1854
1855	sc->tpdrq.mem.size = sc->tpdrq.size * 8;
1856	sc->tpdrq.mem.align = sc->tpdrq.mem.size;
1857
1858	sc->hsp_mem.size = sizeof(struct he_hsp);
1859	sc->hsp_mem.align = 1024;
1860
1861	sc->lbufs_size = sc->rbp_l0.size + sc->rbrq_0.size;
1862	sc->tpd_total = sc->tbrq.size + sc->tpdrq.size;
1863	sc->tpds.align = 64;
1864	sc->tpds.size = sc->tpd_total * HE_TPD_SIZE;
1865
1866	hatm_init_rmaps(sc);
1867	hatm_init_smbufs(sc);
1868	if ((error = hatm_init_tpds(sc)) != 0)
1869		goto failed;
1870
1871	/*
1872	 * Allocate memory
1873	 */
1874	if ((error = hatm_alloc_dmamem(sc, "IRQ", &sc->irq_0.mem)) != 0 ||
1875	    (error = hatm_alloc_dmamem(sc, "TBRQ0", &sc->tbrq.mem)) != 0 ||
1876	    (error = hatm_alloc_dmamem(sc, "TPDRQ", &sc->tpdrq.mem)) != 0 ||
1877	    (error = hatm_alloc_dmamem(sc, "HSP", &sc->hsp_mem)) != 0)
1878		goto failed;
1879
1880	if (sc->rbp_s0.mem.size != 0 &&
1881	    (error = hatm_alloc_dmamem(sc, "RBPS0", &sc->rbp_s0.mem)))
1882		goto failed;
1883	if (sc->rbp_l0.mem.size != 0 &&
1884	    (error = hatm_alloc_dmamem(sc, "RBPL0", &sc->rbp_l0.mem)))
1885		goto failed;
1886	if (sc->rbp_s1.mem.size != 0 &&
1887	    (error = hatm_alloc_dmamem(sc, "RBPS1", &sc->rbp_s1.mem)))
1888		goto failed;
1889
1890	if (sc->rbrq_0.mem.size != 0 &&
1891	    (error = hatm_alloc_dmamem(sc, "RBRQ0", &sc->rbrq_0.mem)))
1892		goto failed;
1893	if (sc->rbrq_1.mem.size != 0 &&
1894	    (error = hatm_alloc_dmamem(sc, "RBRQ1", &sc->rbrq_1.mem)))
1895		goto failed;
1896
1897	if ((sc->vcc_zone = uma_zcreate("HE vccs", sizeof(struct hevcc),
1898	    NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0)) == NULL) {
1899		device_printf(dev, "cannot allocate zone for vccs\n");
1900		goto failed;
1901	}
1902
1903	/*
1904	 * 4.4 Reset the card.
1905	 */
1906	if ((error = hatm_reset(sc)) != 0)
1907		goto failed;
1908
1909	/*
1910	 * Read the prom.
1911	 */
1912	hatm_init_bus_width(sc);
1913	hatm_init_read_eeprom(sc);
1914	hatm_init_endianess(sc);
1915
1916	/*
1917	 * Initialize interface
1918	 */
1919	ifp->if_flags = IFF_SIMPLEX;
1920	ifp->if_ioctl = hatm_ioctl;
1921	ifp->if_start = hatm_start;
1922	ifp->if_watchdog = NULL;
1923	ifp->if_init = hatm_init;
1924
1925	utopia_attach(&sc->utopia, &sc->ifatm, &sc->media, &sc->mtx,
1926	    &sc->sysctl_ctx, SYSCTL_CHILDREN(sc->sysctl_tree),
1927	    &hatm_utopia_methods);
1928	utopia_init_media(&sc->utopia);
1929
1930	/* these two SUNI routines need the lock */
1931	mtx_lock(&sc->mtx);
1932	/* poll while we are not running */
1933	sc->utopia.flags |= UTP_FL_POLL_CARRIER;
1934	utopia_start(&sc->utopia);
1935	utopia_reset(&sc->utopia);
1936	mtx_unlock(&sc->mtx);
1937
1938	atm_ifattach(ifp);
1939
1940#ifdef ENABLE_BPF
1941	bpfattach(ifp, DLT_ATM_RFC1483, sizeof(struct atmllc));
1942#endif
1943
1944	error = bus_setup_intr(dev, sc->irqres, sc->mpsafe | INTR_TYPE_NET,
1945	    hatm_intr, &sc->irq_0, &sc->ih);
1946	if (error != 0) {
1947		device_printf(dev, "could not setup interrupt\n");
1948		hatm_detach(dev);
1949		return (error);
1950	}
1951
1952	return (0);
1953
1954  failed:
1955	hatm_destroy(sc);
1956	return (error);
1957}
1958
1959/*
1960 * Start the interface. Assume a state as from attach().
1961 */
1962void
1963hatm_initialize(struct hatm_softc *sc)
1964{
1965	uint32_t v;
1966	u_int cid;
1967	static const u_int layout[2][7] = HE_CONFIG_MEM_LAYOUT;
1968
1969	if (sc->ifatm.ifnet.if_flags & IFF_RUNNING)
1970		return;
1971
1972	hatm_init_bus_width(sc);
1973	hatm_init_endianess(sc);
1974
1975	if_printf(&sc->ifatm.ifnet, "%s, Rev. %s, S/N %u, "
1976	    "MAC=%02x:%02x:%02x:%02x:%02x:%02x (%ubit PCI)\n",
1977	    sc->prod_id, sc->rev, sc->ifatm.mib.serial,
1978	    sc->ifatm.mib.esi[0], sc->ifatm.mib.esi[1], sc->ifatm.mib.esi[2],
1979	    sc->ifatm.mib.esi[3], sc->ifatm.mib.esi[4], sc->ifatm.mib.esi[5],
1980	    sc->pci64 ? 64 : 32);
1981
1982	/*
1983	 * 4.8 SDRAM Controller Initialisation
1984	 * 4.9 Initialize RNUM value
1985	 */
1986	if (sc->he622)
1987		WRITE4(sc, HE_REGO_SDRAM_CNTL, HE_REGM_SDRAM_64BIT);
1988	else
1989		WRITE4(sc, HE_REGO_SDRAM_CNTL, 0);
1990	BARRIER_W(sc);
1991
1992	v = READ4(sc, HE_REGO_LB_SWAP);
1993	BARRIER_R(sc);
1994	v |= 0xf << HE_REGS_LBSWAP_RNUM;
1995	WRITE4(sc, HE_REGO_LB_SWAP, v);
1996	BARRIER_W(sc);
1997
1998	hatm_init_irq(sc, &sc->irq_0, 0);
1999	hatm_clear_irq(sc, 1);
2000	hatm_clear_irq(sc, 2);
2001	hatm_clear_irq(sc, 3);
2002
2003	WRITE4(sc, HE_REGO_GRP_1_0_MAP, 0);
2004	WRITE4(sc, HE_REGO_GRP_3_2_MAP, 0);
2005	WRITE4(sc, HE_REGO_GRP_5_4_MAP, 0);
2006	WRITE4(sc, HE_REGO_GRP_7_6_MAP, 0);
2007	BARRIER_W(sc);
2008
2009	/*
2010	 * 4.11 Enable PCI Bus Controller State Machine
2011	 */
2012	v = READ4(sc, HE_REGO_HOST_CNTL);
2013	BARRIER_R(sc);
2014	v |= HE_REGM_HOST_OUTFF_ENB | HE_REGM_HOST_CMDFF_ENB |
2015	    HE_REGM_HOST_QUICK_RD | HE_REGM_HOST_QUICK_WR;
2016	WRITE4(sc, HE_REGO_HOST_CNTL, v);
2017	BARRIER_W(sc);
2018
2019	/*
2020	 * 5.1.1 Generic configuration state
2021	 */
2022	sc->cells_per_row = layout[sc->he622][0];
2023	sc->bytes_per_row = layout[sc->he622][1];
2024	sc->r0_numrows = layout[sc->he622][2];
2025	sc->tx_numrows = layout[sc->he622][3];
2026	sc->r1_numrows = layout[sc->he622][4];
2027	sc->r0_startrow = layout[sc->he622][5];
2028	sc->tx_startrow = sc->r0_startrow + sc->r0_numrows;
2029	sc->r1_startrow = sc->tx_startrow + sc->tx_numrows;
2030	sc->cells_per_lbuf = layout[sc->he622][6];
2031
2032	sc->r0_numbuffs = sc->r0_numrows * (sc->cells_per_row /
2033	    sc->cells_per_lbuf);
2034	sc->r1_numbuffs = sc->r1_numrows * (sc->cells_per_row /
2035	    sc->cells_per_lbuf);
2036	sc->tx_numbuffs = sc->tx_numrows * (sc->cells_per_row /
2037	    sc->cells_per_lbuf);
2038
2039	if (sc->r0_numbuffs > 2560)
2040		sc->r0_numbuffs = 2560;
2041	if (sc->r1_numbuffs > 2560)
2042		sc->r1_numbuffs = 2560;
2043	if (sc->tx_numbuffs > 5120)
2044		sc->tx_numbuffs = 5120;
2045
2046	DBG(sc, ATTACH, ("cells_per_row=%u bytes_per_row=%u r0_numrows=%u "
2047	    "tx_numrows=%u r1_numrows=%u r0_startrow=%u tx_startrow=%u "
2048	    "r1_startrow=%u cells_per_lbuf=%u\nr0_numbuffs=%u r1_numbuffs=%u "
2049	    "tx_numbuffs=%u\n", sc->cells_per_row, sc->bytes_per_row,
2050	    sc->r0_numrows, sc->tx_numrows, sc->r1_numrows, sc->r0_startrow,
2051	    sc->tx_startrow, sc->r1_startrow, sc->cells_per_lbuf,
2052	    sc->r0_numbuffs, sc->r1_numbuffs, sc->tx_numbuffs));
2053
2054	/*
2055	 * 5.1.2 Configure Hardware dependend registers
2056	 */
2057	if (sc->he622) {
2058		WRITE4(sc, HE_REGO_LBARB,
2059		    (0x2 << HE_REGS_LBARB_SLICE) |
2060		    (0xf << HE_REGS_LBARB_RNUM) |
2061		    (0x3 << HE_REGS_LBARB_THPRI) |
2062		    (0x3 << HE_REGS_LBARB_RHPRI) |
2063		    (0x2 << HE_REGS_LBARB_TLPRI) |
2064		    (0x1 << HE_REGS_LBARB_RLPRI) |
2065		    (0x28 << HE_REGS_LBARB_BUS_MULT) |
2066		    (0x50 << HE_REGS_LBARB_NET_PREF));
2067		BARRIER_W(sc);
2068		WRITE4(sc, HE_REGO_SDRAMCON,
2069		    /* HW bug: don't use banking */
2070		    /* HE_REGM_SDRAMCON_BANK | */
2071		    HE_REGM_SDRAMCON_WIDE |
2072		    (0x384 << HE_REGS_SDRAMCON_REF));
2073		BARRIER_W(sc);
2074		WRITE4(sc, HE_REGO_RCMCONFIG,
2075		    (0x1 << HE_REGS_RCMCONFIG_BANK_WAIT) |
2076		    (0x1 << HE_REGS_RCMCONFIG_RW_WAIT) |
2077		    (0x0 << HE_REGS_RCMCONFIG_TYPE));
2078		WRITE4(sc, HE_REGO_TCMCONFIG,
2079		    (0x2 << HE_REGS_TCMCONFIG_BANK_WAIT) |
2080		    (0x1 << HE_REGS_TCMCONFIG_RW_WAIT) |
2081		    (0x0 << HE_REGS_TCMCONFIG_TYPE));
2082	} else {
2083		WRITE4(sc, HE_REGO_LBARB,
2084		    (0x2 << HE_REGS_LBARB_SLICE) |
2085		    (0xf << HE_REGS_LBARB_RNUM) |
2086		    (0x3 << HE_REGS_LBARB_THPRI) |
2087		    (0x3 << HE_REGS_LBARB_RHPRI) |
2088		    (0x2 << HE_REGS_LBARB_TLPRI) |
2089		    (0x1 << HE_REGS_LBARB_RLPRI) |
2090		    (0x46 << HE_REGS_LBARB_BUS_MULT) |
2091		    (0x8C << HE_REGS_LBARB_NET_PREF));
2092		BARRIER_W(sc);
2093		WRITE4(sc, HE_REGO_SDRAMCON,
2094		    /* HW bug: don't use banking */
2095		    /* HE_REGM_SDRAMCON_BANK | */
2096		    (0x150 << HE_REGS_SDRAMCON_REF));
2097		BARRIER_W(sc);
2098		WRITE4(sc, HE_REGO_RCMCONFIG,
2099		    (0x0 << HE_REGS_RCMCONFIG_BANK_WAIT) |
2100		    (0x1 << HE_REGS_RCMCONFIG_RW_WAIT) |
2101		    (0x0 << HE_REGS_RCMCONFIG_TYPE));
2102		WRITE4(sc, HE_REGO_TCMCONFIG,
2103		    (0x1 << HE_REGS_TCMCONFIG_BANK_WAIT) |
2104		    (0x1 << HE_REGS_TCMCONFIG_RW_WAIT) |
2105		    (0x0 << HE_REGS_TCMCONFIG_TYPE));
2106	}
2107	WRITE4(sc, HE_REGO_LBCONFIG, (sc->cells_per_lbuf * 48));
2108
2109	WRITE4(sc, HE_REGO_RLBC_H, 0);
2110	WRITE4(sc, HE_REGO_RLBC_T, 0);
2111	WRITE4(sc, HE_REGO_RLBC_H2, 0);
2112
2113	WRITE4(sc, HE_REGO_RXTHRSH, 512);
2114	WRITE4(sc, HE_REGO_LITHRSH, 256);
2115
2116	WRITE4(sc, HE_REGO_RLBF0_C, sc->r0_numbuffs);
2117	WRITE4(sc, HE_REGO_RLBF1_C, sc->r1_numbuffs);
2118
2119	if (sc->he622) {
2120		WRITE4(sc, HE_REGO_RCCONFIG,
2121		    (8 << HE_REGS_RCCONFIG_UTDELAY) |
2122		    (sc->ifatm.mib.vpi_bits << HE_REGS_RCCONFIG_VP) |
2123		    (sc->ifatm.mib.vci_bits << HE_REGS_RCCONFIG_VC));
2124		WRITE4(sc, HE_REGO_TXCONFIG,
2125		    (32 << HE_REGS_TXCONFIG_THRESH) |
2126		    (sc->ifatm.mib.vci_bits << HE_REGS_TXCONFIG_VCI_MASK) |
2127		    (sc->tx_numbuffs << HE_REGS_TXCONFIG_LBFREE));
2128	} else {
2129		WRITE4(sc, HE_REGO_RCCONFIG,
2130		    (0 << HE_REGS_RCCONFIG_UTDELAY) |
2131		    HE_REGM_RCCONFIG_UT_MODE |
2132		    (sc->ifatm.mib.vpi_bits << HE_REGS_RCCONFIG_VP) |
2133		    (sc->ifatm.mib.vci_bits << HE_REGS_RCCONFIG_VC));
2134		WRITE4(sc, HE_REGO_TXCONFIG,
2135		    (32 << HE_REGS_TXCONFIG_THRESH) |
2136		    HE_REGM_TXCONFIG_UTMODE |
2137		    (sc->ifatm.mib.vci_bits << HE_REGS_TXCONFIG_VCI_MASK) |
2138		    (sc->tx_numbuffs << HE_REGS_TXCONFIG_LBFREE));
2139	}
2140
2141	WRITE4(sc, HE_REGO_TXAAL5_PROTO, 0);
2142
2143	if (sc->rbp_s1.size != 0) {
2144		WRITE4(sc, HE_REGO_RHCONFIG,
2145		    HE_REGM_RHCONFIG_PHYENB |
2146		    ((sc->he622 ? 0x41 : 0x31) << HE_REGS_RHCONFIG_PTMR_PRE) |
2147		    (1 << HE_REGS_RHCONFIG_OAM_GID));
2148	} else {
2149		WRITE4(sc, HE_REGO_RHCONFIG,
2150		    HE_REGM_RHCONFIG_PHYENB |
2151		    ((sc->he622 ? 0x41 : 0x31) << HE_REGS_RHCONFIG_PTMR_PRE) |
2152		    (0 << HE_REGS_RHCONFIG_OAM_GID));
2153	}
2154	BARRIER_W(sc);
2155
2156	hatm_init_cm(sc);
2157
2158	hatm_init_rx_buffer_pool(sc, 0, sc->r0_startrow, sc->r0_numbuffs);
2159	hatm_init_rx_buffer_pool(sc, 1, sc->r1_startrow, sc->r1_numbuffs);
2160	hatm_init_tx_buffer_pool(sc, sc->tx_startrow, sc->tx_numbuffs);
2161
2162	hatm_init_imed_queues(sc);
2163
2164	/*
2165	 * 5.1.6 Application tunable Parameters
2166	 */
2167	WRITE4(sc, HE_REGO_MCC, 0);
2168	WRITE4(sc, HE_REGO_OEC, 0);
2169	WRITE4(sc, HE_REGO_DCC, 0);
2170	WRITE4(sc, HE_REGO_CEC, 0);
2171
2172	hatm_init_cs_block(sc);
2173	hatm_init_cs_block_cm(sc);
2174
2175	hatm_init_rpool(sc, &sc->rbp_s0, 0, 0);
2176	hatm_init_rpool(sc, &sc->rbp_l0, 0, 1);
2177	hatm_init_rpool(sc, &sc->rbp_s1, 1, 0);
2178	hatm_clear_rpool(sc, 1, 1);
2179	hatm_clear_rpool(sc, 2, 0);
2180	hatm_clear_rpool(sc, 2, 1);
2181	hatm_clear_rpool(sc, 3, 0);
2182	hatm_clear_rpool(sc, 3, 1);
2183	hatm_clear_rpool(sc, 4, 0);
2184	hatm_clear_rpool(sc, 4, 1);
2185	hatm_clear_rpool(sc, 5, 0);
2186	hatm_clear_rpool(sc, 5, 1);
2187	hatm_clear_rpool(sc, 6, 0);
2188	hatm_clear_rpool(sc, 6, 1);
2189	hatm_clear_rpool(sc, 7, 0);
2190	hatm_clear_rpool(sc, 7, 1);
2191	hatm_init_rbrq(sc, &sc->rbrq_0, 0);
2192	hatm_init_rbrq(sc, &sc->rbrq_1, 1);
2193	hatm_clear_rbrq(sc, 2);
2194	hatm_clear_rbrq(sc, 3);
2195	hatm_clear_rbrq(sc, 4);
2196	hatm_clear_rbrq(sc, 5);
2197	hatm_clear_rbrq(sc, 6);
2198	hatm_clear_rbrq(sc, 7);
2199
2200	sc->lbufs_next = 0;
2201	bzero(sc->lbufs, sizeof(sc->lbufs[0]) * sc->lbufs_size);
2202
2203	hatm_init_tbrq(sc, &sc->tbrq, 0);
2204	hatm_clear_tbrq(sc, 1);
2205	hatm_clear_tbrq(sc, 2);
2206	hatm_clear_tbrq(sc, 3);
2207	hatm_clear_tbrq(sc, 4);
2208	hatm_clear_tbrq(sc, 5);
2209	hatm_clear_tbrq(sc, 6);
2210	hatm_clear_tbrq(sc, 7);
2211
2212	hatm_init_tpdrq(sc);
2213
2214	WRITE4(sc, HE_REGO_UBUFF_BA, (sc->he622 ? 0x104780 : 0x800));
2215
2216	/*
2217	 * Initialize HSP
2218	 */
2219	bzero(sc->hsp_mem.base, sc->hsp_mem.size);
2220	sc->hsp = sc->hsp_mem.base;
2221	WRITE4(sc, HE_REGO_HSP_BA, sc->hsp_mem.paddr);
2222
2223	/*
2224	 * 5.1.12 Enable transmit and receive
2225	 * Enable bus master and interrupts
2226	 */
2227	v = READ_MBOX4(sc, HE_REGO_CS_ERCTL0);
2228	v |= 0x18000000;
2229	WRITE_MBOX4(sc, HE_REGO_CS_ERCTL0, v);
2230
2231	v = READ4(sc, HE_REGO_RCCONFIG);
2232	v |= HE_REGM_RCCONFIG_RXENB;
2233	WRITE4(sc, HE_REGO_RCCONFIG, v);
2234
2235	v = pci_read_config(sc->dev, HE_PCIR_GEN_CNTL_0, 4);
2236	v |= HE_PCIM_CTL0_INIT_ENB | HE_PCIM_CTL0_INT_PROC_ENB;
2237	pci_write_config(sc->dev, HE_PCIR_GEN_CNTL_0, v, 4);
2238
2239	sc->ifatm.ifnet.if_flags |= IFF_RUNNING;
2240	sc->ifatm.ifnet.if_baudrate = 53 * 8 * sc->ifatm.mib.pcr;
2241
2242	sc->utopia.flags &= ~UTP_FL_POLL_CARRIER;
2243
2244	/* reopen vccs */
2245	for (cid = 0; cid < HE_MAX_VCCS; cid++)
2246		if (sc->vccs[cid] != NULL)
2247			hatm_load_vc(sc, cid, 1);
2248
2249	ATMEV_SEND_IFSTATE_CHANGED(&sc->ifatm,
2250	    sc->utopia.carrier == UTP_CARR_OK);
2251}
2252
2253/*
2254 * This functions stops the card and frees all resources allocated after
2255 * the attach. Must have the global lock.
2256 */
2257void
2258hatm_stop(struct hatm_softc *sc)
2259{
2260	uint32_t v;
2261	u_int i, p, cid;
2262	struct mbuf_chunk_hdr *ch;
2263	struct mbuf_page *pg;
2264
2265	mtx_assert(&sc->mtx, MA_OWNED);
2266
2267	if (!(sc->ifatm.ifnet.if_flags & IFF_RUNNING))
2268		return;
2269	sc->ifatm.ifnet.if_flags &= ~IFF_RUNNING;
2270
2271	ATMEV_SEND_IFSTATE_CHANGED(&sc->ifatm,
2272	    sc->utopia.carrier == UTP_CARR_OK);
2273
2274	sc->utopia.flags |= UTP_FL_POLL_CARRIER;
2275
2276	/*
2277	 * Stop and reset the hardware so that everything remains
2278	 * stable.
2279	 */
2280	v = READ_MBOX4(sc, HE_REGO_CS_ERCTL0);
2281	v &= ~0x18000000;
2282	WRITE_MBOX4(sc, HE_REGO_CS_ERCTL0, v);
2283
2284	v = READ4(sc, HE_REGO_RCCONFIG);
2285	v &= ~HE_REGM_RCCONFIG_RXENB;
2286	WRITE4(sc, HE_REGO_RCCONFIG, v);
2287
2288	WRITE4(sc, HE_REGO_RHCONFIG, (0x2 << HE_REGS_RHCONFIG_PTMR_PRE));
2289	BARRIER_W(sc);
2290
2291	v = READ4(sc, HE_REGO_HOST_CNTL);
2292	BARRIER_R(sc);
2293	v &= ~(HE_REGM_HOST_OUTFF_ENB | HE_REGM_HOST_CMDFF_ENB);
2294	WRITE4(sc, HE_REGO_HOST_CNTL, v);
2295	BARRIER_W(sc);
2296
2297	/*
2298	 * Disable bust master and interrupts
2299	 */
2300	v = pci_read_config(sc->dev, HE_PCIR_GEN_CNTL_0, 4);
2301	v &= ~(HE_PCIM_CTL0_INIT_ENB | HE_PCIM_CTL0_INT_PROC_ENB);
2302	pci_write_config(sc->dev, HE_PCIR_GEN_CNTL_0, v, 4);
2303
2304	(void)hatm_reset(sc);
2305
2306	/*
2307	 * Card resets the SUNI when resetted, so re-initialize it
2308	 */
2309	utopia_reset(&sc->utopia);
2310
2311	/*
2312	 * Give any waiters on closing a VCC a chance. They will stop
2313	 * to wait if they see that IFF_RUNNING disappeared.
2314	 */
2315	cv_broadcast(&sc->vcc_cv);
2316	cv_broadcast(&sc->cv_rcclose);
2317
2318	/*
2319	 * Now free all resources.
2320	 */
2321
2322	/*
2323	 * Free the large mbufs that are given to the card.
2324	 */
2325	for (i = 0 ; i < sc->lbufs_size; i++) {
2326		if (sc->lbufs[i] != NULL) {
2327			bus_dmamap_unload(sc->mbuf_tag, sc->rmaps[i]);
2328			m_freem(sc->lbufs[i]);
2329			sc->lbufs[i] = NULL;
2330		}
2331	}
2332
2333	/*
2334	 * Free small buffers
2335	 */
2336	for (p = 0; p < sc->mbuf_npages; p++) {
2337		pg = sc->mbuf_pages[p];
2338		for (i = 0; i < pg->hdr.nchunks; i++) {
2339			ch = (struct mbuf_chunk_hdr *) ((char *)pg +
2340			    i * pg->hdr.chunksize + pg->hdr.hdroff);
2341			if (ch->flags & MBUF_CARD) {
2342				ch->flags &= ~MBUF_CARD;
2343				ch->flags |= MBUF_USED;
2344				hatm_ext_free(&sc->mbuf_list[pg->hdr.pool],
2345				    (struct mbufx_free *)((u_char *)ch -
2346				    pg->hdr.hdroff));
2347			}
2348		}
2349	}
2350
2351	hatm_stop_tpds(sc);
2352
2353	/*
2354	 * Free all partial reassembled PDUs on any VCC.
2355	 */
2356	for (cid = 0; cid < HE_MAX_VCCS; cid++) {
2357		if (sc->vccs[cid] != NULL) {
2358			if (sc->vccs[cid]->chain != NULL) {
2359				m_freem(sc->vccs[cid]->chain);
2360				sc->vccs[cid]->chain = NULL;
2361				sc->vccs[cid]->last = NULL;
2362			}
2363			if (!(sc->vccs[cid]->vflags & (HE_VCC_RX_OPEN |
2364			    HE_VCC_TX_OPEN))) {
2365				hatm_tx_vcc_closed(sc, cid);
2366				uma_zfree(sc->vcc_zone, sc->vccs[cid]);
2367				sc->vccs[cid] = NULL;
2368				sc->open_vccs--;
2369			} else {
2370				sc->vccs[cid]->vflags = 0;
2371				sc->vccs[cid]->ntpds = 0;
2372			}
2373		}
2374	}
2375
2376	if (sc->rbp_s0.size != 0)
2377		bzero(sc->rbp_s0.mem.base, sc->rbp_s0.mem.size);
2378	if (sc->rbp_l0.size != 0)
2379		bzero(sc->rbp_l0.mem.base, sc->rbp_l0.mem.size);
2380	if (sc->rbp_s1.size != 0)
2381		bzero(sc->rbp_s1.mem.base, sc->rbp_s1.mem.size);
2382	if (sc->rbrq_0.size != 0)
2383		bzero(sc->rbrq_0.mem.base, sc->rbrq_0.mem.size);
2384	if (sc->rbrq_1.size != 0)
2385		bzero(sc->rbrq_1.mem.base, sc->rbrq_1.mem.size);
2386
2387	bzero(sc->tbrq.mem.base, sc->tbrq.mem.size);
2388	bzero(sc->tpdrq.mem.base, sc->tpdrq.mem.size);
2389	bzero(sc->hsp_mem.base, sc->hsp_mem.size);
2390}
2391
2392/************************************************************
2393 *
2394 * Driver infrastructure
2395 */
2396devclass_t hatm_devclass;
2397
2398static device_method_t hatm_methods[] = {
2399	DEVMETHOD(device_probe,		hatm_probe),
2400	DEVMETHOD(device_attach,	hatm_attach),
2401	DEVMETHOD(device_detach,	hatm_detach),
2402	{0,0}
2403};
2404static driver_t hatm_driver = {
2405	"hatm",
2406	hatm_methods,
2407	sizeof(struct hatm_softc),
2408};
2409DRIVER_MODULE(hatm, pci, hatm_driver, hatm_devclass, NULL, 0);
2410