Deleted Added
full compact
if_hatm.c (119280) if_hatm.c (119690)
1/*
2 * Copyright (c) 2001-2003
3 * Fraunhofer Institute for Open Communication Systems (FhG Fokus).
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25 * SUCH DAMAGE.
26 *
27 * Author: Hartmut Brandt <harti@freebsd.org>
28 *
29 * ForeHE driver.
30 *
31 * This file contains the module and driver infrastructure stuff as well
32 * as a couple of utility functions and the entire initialisation.
33 */
34
35#include <sys/cdefs.h>
1/*
2 * Copyright (c) 2001-2003
3 * Fraunhofer Institute for Open Communication Systems (FhG Fokus).
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25 * SUCH DAMAGE.
26 *
27 * Author: Hartmut Brandt <harti@freebsd.org>
28 *
29 * ForeHE driver.
30 *
31 * This file contains the module and driver infrastructure stuff as well
32 * as a couple of utility functions and the entire initialisation.
33 */
34
35#include <sys/cdefs.h>
36__FBSDID("$FreeBSD: head/sys/dev/hatm/if_hatm.c 119280 2003-08-22 06:00:27Z imp $");
36__FBSDID("$FreeBSD: head/sys/dev/hatm/if_hatm.c 119690 2003-09-02 17:30:40Z jhb $");
37
38#include "opt_inet.h"
39#include "opt_natm.h"
40
41#include <sys/types.h>
42#include <sys/param.h>
43#include <sys/systm.h>
44#include <sys/malloc.h>
45#include <sys/kernel.h>
46#include <sys/bus.h>
47#include <sys/errno.h>
48#include <sys/conf.h>
49#include <sys/module.h>
50#include <sys/queue.h>
51#include <sys/syslog.h>
52#include <sys/lock.h>
53#include <sys/mutex.h>
54#include <sys/condvar.h>
55#include <sys/sysctl.h>
56#include <vm/uma.h>
57
58#include <sys/sockio.h>
59#include <sys/mbuf.h>
60#include <sys/socket.h>
61
62#include <net/if.h>
63#include <net/if_media.h>
64#include <net/if_atm.h>
65#include <net/route.h>
66#ifdef ENABLE_BPF
67#include <net/bpf.h>
68#endif
69#include <netinet/in.h>
70#include <netinet/if_atm.h>
71
72#include <machine/bus.h>
73#include <machine/resource.h>
74#include <sys/bus.h>
75#include <sys/rman.h>
76#include <dev/pci/pcireg.h>
77#include <dev/pci/pcivar.h>
78
79#include <dev/utopia/utopia.h>
80#include <dev/hatm/if_hatmconf.h>
81#include <dev/hatm/if_hatmreg.h>
82#include <dev/hatm/if_hatmvar.h>
83
84static const struct {
85 uint16_t vid;
86 uint16_t did;
87 const char *name;
88} hatm_devs[] = {
89 { 0x1127, 0x400,
90 "FORE HE" },
91 { 0, 0, NULL }
92};
93
94SYSCTL_DECL(_hw_atm);
95
96MODULE_DEPEND(hatm, utopia, 1, 1, 1);
97MODULE_DEPEND(hatm, pci, 1, 1, 1);
98MODULE_DEPEND(hatm, atm, 1, 1, 1);
99
100#define EEPROM_DELAY 400 /* microseconds */
101
102/* Read from EEPROM 0000 0011b */
103static const uint32_t readtab[] = {
104 HE_REGM_HOST_PROM_SEL | HE_REGM_HOST_PROM_CLOCK,
105 0,
106 HE_REGM_HOST_PROM_CLOCK,
107 0, /* 0 */
108 HE_REGM_HOST_PROM_CLOCK,
109 0, /* 0 */
110 HE_REGM_HOST_PROM_CLOCK,
111 0, /* 0 */
112 HE_REGM_HOST_PROM_CLOCK,
113 0, /* 0 */
114 HE_REGM_HOST_PROM_CLOCK,
115 0, /* 0 */
116 HE_REGM_HOST_PROM_CLOCK,
117 HE_REGM_HOST_PROM_DATA_IN, /* 0 */
118 HE_REGM_HOST_PROM_CLOCK | HE_REGM_HOST_PROM_DATA_IN,
119 HE_REGM_HOST_PROM_DATA_IN, /* 1 */
120 HE_REGM_HOST_PROM_CLOCK | HE_REGM_HOST_PROM_DATA_IN,
121 HE_REGM_HOST_PROM_DATA_IN, /* 1 */
122};
123static const uint32_t clocktab[] = {
124 0, HE_REGM_HOST_PROM_CLOCK,
125 0, HE_REGM_HOST_PROM_CLOCK,
126 0, HE_REGM_HOST_PROM_CLOCK,
127 0, HE_REGM_HOST_PROM_CLOCK,
128 0, HE_REGM_HOST_PROM_CLOCK,
129 0, HE_REGM_HOST_PROM_CLOCK,
130 0, HE_REGM_HOST_PROM_CLOCK,
131 0, HE_REGM_HOST_PROM_CLOCK,
132 0
133};
134
135/*
136 * Convert cell rate to ATM Forum format
137 */
138u_int
139hatm_cps2atmf(uint32_t pcr)
140{
141 u_int e;
142
143 if (pcr == 0)
144 return (0);
145 pcr <<= 9;
146 e = 0;
147 while (pcr > (1024 - 1)) {
148 e++;
149 pcr >>= 1;
150 }
151 return ((1 << 14) | (e << 9) | (pcr & 0x1ff));
152}
153u_int
154hatm_atmf2cps(uint32_t fcr)
155{
156 fcr &= 0x7fff;
157
158 return ((1 << ((fcr >> 9) & 0x1f)) * (512 + (fcr & 0x1ff)) / 512
159 * (fcr >> 14));
160}
161
162/************************************************************
163 *
164 * Initialisation
165 */
166/*
167 * Probe for a HE controller
168 */
169static int
170hatm_probe(device_t dev)
171{
172 int i;
173
174 for (i = 0; hatm_devs[i].name; i++)
175 if (pci_get_vendor(dev) == hatm_devs[i].vid &&
176 pci_get_device(dev) == hatm_devs[i].did) {
177 device_set_desc(dev, hatm_devs[i].name);
178 return (0);
179 }
180 return (ENXIO);
181}
182
183/*
184 * Allocate and map DMA-able memory. We support only contiguous mappings.
185 */
186static void
187dmaload_helper(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
188{
189 if (error)
190 return;
191 KASSERT(nsegs == 1, ("too many segments for DMA: %d", nsegs));
192 KASSERT(segs[0].ds_addr <= 0xffffffffUL,
193 ("phys addr too large %lx", (u_long)segs[0].ds_addr));
194
195 *(bus_addr_t *)arg = segs[0].ds_addr;
196}
197static int
198hatm_alloc_dmamem(struct hatm_softc *sc, const char *what, struct dmamem *mem)
199{
200 int error;
201
202 mem->base = NULL;
203
204 /*
205 * Alignement does not work in the bus_dmamem_alloc function below
206 * on FreeBSD. malloc seems to align objects at least to the object
207 * size so increase the size to the alignment if the size is lesser
208 * than the alignemnt.
209 * XXX on sparc64 this is (probably) not needed.
210 */
211 if (mem->size < mem->align)
212 mem->size = mem->align;
213
214 error = bus_dma_tag_create(sc->parent_tag, mem->align, 0,
215 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR,
216 NULL, NULL, mem->size, 1,
217 BUS_SPACE_MAXSIZE_32BIT, BUS_DMA_ALLOCNOW,
218 NULL, NULL, &mem->tag);
219 if (error) {
220 if_printf(&sc->ifatm.ifnet, "DMA tag create (%s)\n", what);
221 return (error);
222 }
223
224 error = bus_dmamem_alloc(mem->tag, &mem->base, 0, &mem->map);
225 if (error) {
226 if_printf(&sc->ifatm.ifnet, "DMA mem alloc (%s): %d\n",
227 what, error);
228 bus_dma_tag_destroy(mem->tag);
229 mem->base = NULL;
230 return (error);
231 }
232
233 error = bus_dmamap_load(mem->tag, mem->map, mem->base, mem->size,
234 dmaload_helper, &mem->paddr, BUS_DMA_NOWAIT);
235 if (error) {
236 if_printf(&sc->ifatm.ifnet, "DMA map load (%s): %d\n",
237 what, error);
238 bus_dmamem_free(mem->tag, mem->base, mem->map);
239 bus_dma_tag_destroy(mem->tag);
240 mem->base = NULL;
241 return (error);
242 }
243
244 DBG(sc, DMA, ("%s S/A/V/P 0x%x 0x%x %p 0x%lx", what, mem->size,
245 mem->align, mem->base, (u_long)mem->paddr));
246
247 return (0);
248}
249
250/*
251 * Destroy all the resources of an DMA-able memory region.
252 */
253static void
254hatm_destroy_dmamem(struct dmamem *mem)
255{
256 if (mem->base != NULL) {
257 bus_dmamap_unload(mem->tag, mem->map);
258 bus_dmamem_free(mem->tag, mem->base, mem->map);
259 (void)bus_dma_tag_destroy(mem->tag);
260 mem->base = NULL;
261 }
262}
263
264/*
265 * Initialize/destroy DMA maps for the large pool 0
266 */
267static void
268hatm_destroy_rmaps(struct hatm_softc *sc)
269{
270 u_int b;
271
272 DBG(sc, ATTACH, ("destroying rmaps and lbuf pointers..."));
273 if (sc->rmaps != NULL) {
274 for (b = 0; b < sc->lbufs_size; b++)
275 bus_dmamap_destroy(sc->mbuf_tag, sc->rmaps[b]);
276 free(sc->rmaps, M_DEVBUF);
277 }
278 if (sc->lbufs != NULL)
279 free(sc->lbufs, M_DEVBUF);
280}
281
282static void
283hatm_init_rmaps(struct hatm_softc *sc)
284{
285 u_int b;
286 int err;
287
288 DBG(sc, ATTACH, ("allocating rmaps and lbuf pointers..."));
289 sc->lbufs = malloc(sizeof(sc->lbufs[0]) * sc->lbufs_size,
290 M_DEVBUF, M_ZERO | M_WAITOK);
291
292 /* allocate and create the DMA maps for the large pool */
293 sc->rmaps = malloc(sizeof(sc->rmaps[0]) * sc->lbufs_size,
294 M_DEVBUF, M_WAITOK);
295 for (b = 0; b < sc->lbufs_size; b++) {
296 err = bus_dmamap_create(sc->mbuf_tag, 0, &sc->rmaps[b]);
297 if (err != 0)
298 panic("bus_dmamap_create: %d\n", err);
299 }
300}
301
302/*
303 * Initialize and destroy small mbuf page pointers and pages
304 */
305static void
306hatm_destroy_smbufs(struct hatm_softc *sc)
307{
308 u_int i, b;
309 struct mbuf_page *pg;
310
311 if (sc->mbuf_pages != NULL) {
312 for (i = 0; i < sc->mbuf_npages; i++) {
313 pg = sc->mbuf_pages[i];
314 for (b = 0; b < pg->hdr.nchunks; b++) {
315 if (MBUF_TST_BIT(pg->hdr.card, b))
316 if_printf(&sc->ifatm.ifnet,
317 "%s -- mbuf page=%u card buf %u\n",
318 __func__, i, b);
319 if (MBUF_TST_BIT(pg->hdr.used, b))
320 if_printf(&sc->ifatm.ifnet,
321 "%s -- mbuf page=%u used buf %u\n",
322 __func__, i, b);
323 }
324 bus_dmamap_unload(sc->mbuf_tag, pg->hdr.map);
325 bus_dmamap_destroy(sc->mbuf_tag, pg->hdr.map);
326 free(pg, M_DEVBUF);
327 }
328 free(sc->mbuf_pages, M_DEVBUF);
329 }
330}
331
332static void
333hatm_init_smbufs(struct hatm_softc *sc)
334{
335 sc->mbuf_pages = malloc(sizeof(sc->mbuf_pages[0]) *
336 HE_CONFIG_MAX_MBUF_PAGES, M_DEVBUF, M_WAITOK);
337 sc->mbuf_npages = 0;
338}
339
340/*
341 * Initialize/destroy TPDs. This is called from attach/detach.
342 */
343static void
344hatm_destroy_tpds(struct hatm_softc *sc)
345{
346 struct tpd *t;
347
348 if (sc->tpds.base == NULL)
349 return;
350
351 DBG(sc, ATTACH, ("releasing TPDs ..."));
352 if (sc->tpd_nfree != sc->tpd_total)
353 if_printf(&sc->ifatm.ifnet, "%u tpds still in use from %u\n",
354 sc->tpd_total - sc->tpd_nfree, sc->tpd_total);
355 while ((t = SLIST_FIRST(&sc->tpd_free)) != NULL) {
356 SLIST_REMOVE_HEAD(&sc->tpd_free, link);
357 bus_dmamap_destroy(sc->tx_tag, t->map);
358 }
359 hatm_destroy_dmamem(&sc->tpds);
360 free(sc->tpd_used, M_DEVBUF);
361 DBG(sc, ATTACH, ("... done"));
362}
363static int
364hatm_init_tpds(struct hatm_softc *sc)
365{
366 int error;
367 u_int i;
368 struct tpd *t;
369
370 DBG(sc, ATTACH, ("allocating %u TPDs and maps ...", sc->tpd_total));
371 error = hatm_alloc_dmamem(sc, "TPD memory", &sc->tpds);
372 if (error != 0) {
373 DBG(sc, ATTACH, ("... dmamem error=%d", error));
374 return (error);
375 }
376
377 /* put all the TPDs on the free list and allocate DMA maps */
378 for (i = 0; i < sc->tpd_total; i++) {
379 t = TPD_ADDR(sc, i);
380 t->no = i;
381 t->mbuf = NULL;
382 error = bus_dmamap_create(sc->tx_tag, 0, &t->map);
383 if (error != 0) {
384 DBG(sc, ATTACH, ("... dmamap error=%d", error));
385 while ((t = SLIST_FIRST(&sc->tpd_free)) != NULL) {
386 SLIST_REMOVE_HEAD(&sc->tpd_free, link);
387 bus_dmamap_destroy(sc->tx_tag, t->map);
388 }
389 hatm_destroy_dmamem(&sc->tpds);
390 return (error);
391 }
392
393 SLIST_INSERT_HEAD(&sc->tpd_free, t, link);
394 }
395
396 /* allocate and zero bitmap */
397 sc->tpd_used = malloc(sizeof(uint8_t) * (sc->tpd_total + 7) / 8,
398 M_DEVBUF, M_ZERO | M_WAITOK);
399 sc->tpd_nfree = sc->tpd_total;
400
401 DBG(sc, ATTACH, ("... done"));
402
403 return (0);
404}
405
406/*
407 * Free all the TPDs that where given to the card.
408 * An mbuf chain may be attached to a TPD - free it also and
409 * unload its associated DMA map.
410 */
411static void
412hatm_stop_tpds(struct hatm_softc *sc)
413{
414 u_int i;
415 struct tpd *t;
416
417 DBG(sc, ATTACH, ("free TPDs ..."));
418 for (i = 0; i < sc->tpd_total; i++) {
419 if (TPD_TST_USED(sc, i)) {
420 t = TPD_ADDR(sc, i);
421 if (t->mbuf) {
422 m_freem(t->mbuf);
423 t->mbuf = NULL;
424 bus_dmamap_unload(sc->tx_tag, t->map);
425 }
426 TPD_CLR_USED(sc, i);
427 SLIST_INSERT_HEAD(&sc->tpd_free, t, link);
428 sc->tpd_nfree++;
429 }
430 }
431}
432
433/*
434 * This frees ALL resources of this interface and leaves the structure
435 * in an indeterminate state. This is called just before detaching or
436 * on a failed attach. No lock should be held.
437 */
438static void
439hatm_destroy(struct hatm_softc *sc)
440{
441 u_int cid;
442
443 bus_teardown_intr(sc->dev, sc->irqres, sc->ih);
444
445 hatm_destroy_rmaps(sc);
446 hatm_destroy_smbufs(sc);
447 hatm_destroy_tpds(sc);
448
449 if (sc->vcc_zone != NULL) {
450 for (cid = 0; cid < HE_MAX_VCCS; cid++)
451 if (sc->vccs[cid] != NULL)
452 uma_zfree(sc->vcc_zone, sc->vccs[cid]);
453 uma_zdestroy(sc->vcc_zone);
454 }
455
456 /*
457 * Release all memory allocated to the various queues and
458 * Status pages. These have there own flag which shows whether
459 * they are really allocated.
460 */
461 hatm_destroy_dmamem(&sc->irq_0.mem);
462 hatm_destroy_dmamem(&sc->rbp_s0.mem);
463 hatm_destroy_dmamem(&sc->rbp_l0.mem);
464 hatm_destroy_dmamem(&sc->rbp_s1.mem);
465 hatm_destroy_dmamem(&sc->rbrq_0.mem);
466 hatm_destroy_dmamem(&sc->rbrq_1.mem);
467 hatm_destroy_dmamem(&sc->tbrq.mem);
468 hatm_destroy_dmamem(&sc->tpdrq.mem);
469 hatm_destroy_dmamem(&sc->hsp_mem);
470
471 if (sc->irqres != NULL)
472 bus_release_resource(sc->dev, SYS_RES_IRQ,
473 sc->irqid, sc->irqres);
474
475 if (sc->tx_tag != NULL)
476 if (bus_dma_tag_destroy(sc->tx_tag))
477 if_printf(&sc->ifatm.ifnet, "mbuf DMA tag busy\n");
478
479 if (sc->mbuf_tag != NULL)
480 if (bus_dma_tag_destroy(sc->mbuf_tag))
481 if_printf(&sc->ifatm.ifnet, "mbuf DMA tag busy\n");
482
483 if (sc->parent_tag != NULL)
484 if (bus_dma_tag_destroy(sc->parent_tag))
485 if_printf(&sc->ifatm.ifnet, "parent DMA tag busy\n");
486
487 if (sc->memres != NULL)
488 bus_release_resource(sc->dev, SYS_RES_MEMORY,
489 sc->memid, sc->memres);
490
491 sysctl_ctx_free(&sc->sysctl_ctx);
492
493 cv_destroy(&sc->cv_rcclose);
494 cv_destroy(&sc->vcc_cv);
495 mtx_destroy(&sc->mbuf0_mtx);
496 mtx_destroy(&sc->mbuf1_mtx);
497 mtx_destroy(&sc->mtx);
498}
499
500/*
501 * 4.4 Card reset
502 */
503static int
504hatm_reset(struct hatm_softc *sc)
505{
506 u_int v, count;
507
508 WRITE4(sc, HE_REGO_RESET_CNTL, 0x00);
509 BARRIER_W(sc);
510 WRITE4(sc, HE_REGO_RESET_CNTL, 0xff);
511 BARRIER_RW(sc);
512 count = 0;
513 while (((v = READ4(sc, HE_REGO_RESET_CNTL)) & HE_REGM_RESET_STATE) == 0) {
514 BARRIER_R(sc);
515 if (++count == 100) {
516 if_printf(&sc->ifatm.ifnet, "reset failed\n");
517 return (ENXIO);
518 }
519 DELAY(1000);
520 }
521 return (0);
522}
523
524/*
525 * 4.5 Set Bus Width
526 */
527static void
528hatm_init_bus_width(struct hatm_softc *sc)
529{
530 uint32_t v, v1;
531
532 v = READ4(sc, HE_REGO_HOST_CNTL);
533 BARRIER_R(sc);
534 if (v & HE_REGM_HOST_BUS64) {
535 sc->pci64 = 1;
536 v1 = pci_read_config(sc->dev, HE_PCIR_GEN_CNTL_0, 4);
537 v1 |= HE_PCIM_CTL0_64BIT;
538 pci_write_config(sc->dev, HE_PCIR_GEN_CNTL_0, v1, 4);
539
540 v |= HE_REGM_HOST_DESC_RD64
541 | HE_REGM_HOST_DATA_RD64
542 | HE_REGM_HOST_DATA_WR64;
543 WRITE4(sc, HE_REGO_HOST_CNTL, v);
544 BARRIER_W(sc);
545 } else {
546 sc->pci64 = 0;
547 v = pci_read_config(sc->dev, HE_PCIR_GEN_CNTL_0, 4);
548 v &= ~HE_PCIM_CTL0_64BIT;
549 pci_write_config(sc->dev, HE_PCIR_GEN_CNTL_0, v, 4);
550 }
551}
552
553/*
554 * 4.6 Set Host Endianess
555 */
556static void
557hatm_init_endianess(struct hatm_softc *sc)
558{
559 uint32_t v;
560
561 v = READ4(sc, HE_REGO_LB_SWAP);
562 BARRIER_R(sc);
563#if BYTE_ORDER == BIG_ENDIAN
564 v |= HE_REGM_LBSWAP_INTR_SWAP |
565 HE_REGM_LBSWAP_DESC_WR_SWAP |
566 HE_REGM_LBSWAP_BIG_ENDIAN;
567 v &= ~(HE_REGM_LBSWAP_DATA_WR_SWAP |
568 HE_REGM_LBSWAP_DESC_RD_SWAP |
569 HE_REGM_LBSWAP_DATA_RD_SWAP);
570#else
571 v &= ~(HE_REGM_LBSWAP_DATA_WR_SWAP |
572 HE_REGM_LBSWAP_DESC_RD_SWAP |
573 HE_REGM_LBSWAP_DATA_RD_SWAP |
574 HE_REGM_LBSWAP_INTR_SWAP |
575 HE_REGM_LBSWAP_DESC_WR_SWAP |
576 HE_REGM_LBSWAP_BIG_ENDIAN);
577#endif
578
579 if (sc->he622)
580 v |= HE_REGM_LBSWAP_XFER_SIZE;
581
582 WRITE4(sc, HE_REGO_LB_SWAP, v);
583 BARRIER_W(sc);
584}
585
586/*
587 * 4.7 Read EEPROM
588 */
589static uint8_t
590hatm_read_prom_byte(struct hatm_softc *sc, u_int addr)
591{
592 uint32_t val, tmp_read, byte_read;
593 u_int i, j;
594 int n;
595
596 val = READ4(sc, HE_REGO_HOST_CNTL);
597 val &= HE_REGM_HOST_PROM_BITS;
598 BARRIER_R(sc);
599
600 val |= HE_REGM_HOST_PROM_WREN;
601 WRITE4(sc, HE_REGO_HOST_CNTL, val);
602 BARRIER_W(sc);
603
604 /* send READ */
605 for (i = 0; i < sizeof(readtab) / sizeof(readtab[0]); i++) {
606 WRITE4(sc, HE_REGO_HOST_CNTL, val | readtab[i]);
607 BARRIER_W(sc);
608 DELAY(EEPROM_DELAY);
609 }
610
611 /* send ADDRESS */
612 for (n = 7, j = 0; n >= 0; n--) {
613 WRITE4(sc, HE_REGO_HOST_CNTL, val | clocktab[j++] |
614 (((addr >> n) & 1 ) << HE_REGS_HOST_PROM_DATA_IN));
615 BARRIER_W(sc);
616 DELAY(EEPROM_DELAY);
617 WRITE4(sc, HE_REGO_HOST_CNTL, val | clocktab[j++] |
618 (((addr >> n) & 1 ) << HE_REGS_HOST_PROM_DATA_IN));
619 BARRIER_W(sc);
620 DELAY(EEPROM_DELAY);
621 }
622
623 val &= ~HE_REGM_HOST_PROM_WREN;
624 WRITE4(sc, HE_REGO_HOST_CNTL, val);
625 BARRIER_W(sc);
626
627 /* read DATA */
628 byte_read = 0;
629 for (n = 7, j = 0; n >= 0; n--) {
630 WRITE4(sc, HE_REGO_HOST_CNTL, val | clocktab[j++]);
631 BARRIER_W(sc);
632 DELAY(EEPROM_DELAY);
633 tmp_read = READ4(sc, HE_REGO_HOST_CNTL);
634 byte_read |= (uint8_t)(((tmp_read & HE_REGM_HOST_PROM_DATA_OUT)
635 >> HE_REGS_HOST_PROM_DATA_OUT) << n);
636 WRITE4(sc, HE_REGO_HOST_CNTL, val | clocktab[j++]);
637 BARRIER_W(sc);
638 DELAY(EEPROM_DELAY);
639 }
640 WRITE4(sc, HE_REGO_HOST_CNTL, val | clocktab[j++]);
641 BARRIER_W(sc);
642 DELAY(EEPROM_DELAY);
643
644 return (byte_read);
645}
646
647static void
648hatm_init_read_eeprom(struct hatm_softc *sc)
649{
650 u_int n, count;
651 u_char byte;
652 uint32_t v;
653
654 for (n = count = 0; count < HE_EEPROM_PROD_ID_LEN; count++) {
655 byte = hatm_read_prom_byte(sc, HE_EEPROM_PROD_ID + count);
656 if (n > 0 || byte != ' ')
657 sc->prod_id[n++] = byte;
658 }
659 while (n > 0 && sc->prod_id[n-1] == ' ')
660 n--;
661 sc->prod_id[n] = '\0';
662
663 for (n = count = 0; count < HE_EEPROM_REV_LEN; count++) {
664 byte = hatm_read_prom_byte(sc, HE_EEPROM_REV + count);
665 if (n > 0 || byte != ' ')
666 sc->rev[n++] = byte;
667 }
668 while (n > 0 && sc->rev[n-1] == ' ')
669 n--;
670 sc->rev[n] = '\0';
671 sc->ifatm.mib.hw_version = sc->rev[0];
672
673 sc->ifatm.mib.serial = hatm_read_prom_byte(sc, HE_EEPROM_M_SN + 0) << 0;
674 sc->ifatm.mib.serial |= hatm_read_prom_byte(sc, HE_EEPROM_M_SN + 1) << 8;
675 sc->ifatm.mib.serial |= hatm_read_prom_byte(sc, HE_EEPROM_M_SN + 2) << 16;
676 sc->ifatm.mib.serial |= hatm_read_prom_byte(sc, HE_EEPROM_M_SN + 3) << 24;
677
678 v = hatm_read_prom_byte(sc, HE_EEPROM_MEDIA + 0) << 0;
679 v |= hatm_read_prom_byte(sc, HE_EEPROM_MEDIA + 1) << 8;
680 v |= hatm_read_prom_byte(sc, HE_EEPROM_MEDIA + 2) << 16;
681 v |= hatm_read_prom_byte(sc, HE_EEPROM_MEDIA + 3) << 24;
682
683 switch (v) {
684 case HE_MEDIA_UTP155:
685 sc->ifatm.mib.media = IFM_ATM_UTP_155;
686 sc->ifatm.mib.pcr = ATM_RATE_155M;
687 break;
688
689 case HE_MEDIA_MMF155:
690 sc->ifatm.mib.media = IFM_ATM_MM_155;
691 sc->ifatm.mib.pcr = ATM_RATE_155M;
692 break;
693
694 case HE_MEDIA_MMF622:
695 sc->ifatm.mib.media = IFM_ATM_MM_622;
696 sc->ifatm.mib.device = ATM_DEVICE_HE622;
697 sc->ifatm.mib.pcr = ATM_RATE_622M;
698 sc->he622 = 1;
699 break;
700
701 case HE_MEDIA_SMF155:
702 sc->ifatm.mib.media = IFM_ATM_SM_155;
703 sc->ifatm.mib.pcr = ATM_RATE_155M;
704 break;
705
706 case HE_MEDIA_SMF622:
707 sc->ifatm.mib.media = IFM_ATM_SM_622;
708 sc->ifatm.mib.device = ATM_DEVICE_HE622;
709 sc->ifatm.mib.pcr = ATM_RATE_622M;
710 sc->he622 = 1;
711 break;
712 }
713
714 sc->ifatm.mib.esi[0] = hatm_read_prom_byte(sc, HE_EEPROM_MAC + 0);
715 sc->ifatm.mib.esi[1] = hatm_read_prom_byte(sc, HE_EEPROM_MAC + 1);
716 sc->ifatm.mib.esi[2] = hatm_read_prom_byte(sc, HE_EEPROM_MAC + 2);
717 sc->ifatm.mib.esi[3] = hatm_read_prom_byte(sc, HE_EEPROM_MAC + 3);
718 sc->ifatm.mib.esi[4] = hatm_read_prom_byte(sc, HE_EEPROM_MAC + 4);
719 sc->ifatm.mib.esi[5] = hatm_read_prom_byte(sc, HE_EEPROM_MAC + 5);
720}
721
722/*
723 * Clear unused interrupt queue
724 */
725static void
726hatm_clear_irq(struct hatm_softc *sc, u_int group)
727{
728 WRITE4(sc, HE_REGO_IRQ_BASE(group), 0);
729 WRITE4(sc, HE_REGO_IRQ_HEAD(group), 0);
730 WRITE4(sc, HE_REGO_IRQ_CNTL(group), 0);
731 WRITE4(sc, HE_REGO_IRQ_DATA(group), 0);
732}
733
734/*
735 * 4.10 Initialize interrupt queues
736 */
737static void
738hatm_init_irq(struct hatm_softc *sc, struct heirq *q, u_int group)
739{
740 u_int i;
741
742 if (q->size == 0) {
743 hatm_clear_irq(sc, group);
744 return;
745 }
746
747 q->group = group;
748 q->sc = sc;
749 q->irq = q->mem.base;
750 q->head = 0;
751 q->tailp = q->irq + (q->size - 1);
752 *q->tailp = 0;
753
754 for (i = 0; i < q->size; i++)
755 q->irq[i] = HE_REGM_ITYPE_INVALID;
756
757 WRITE4(sc, HE_REGO_IRQ_BASE(group), q->mem.paddr);
758 WRITE4(sc, HE_REGO_IRQ_HEAD(group),
759 ((q->size - 1) << HE_REGS_IRQ_HEAD_SIZE) |
760 (q->thresh << HE_REGS_IRQ_HEAD_THRESH));
761 WRITE4(sc, HE_REGO_IRQ_CNTL(group), q->line);
762 WRITE4(sc, HE_REGO_IRQ_DATA(group), 0);
763}
764
765/*
766 * 5.1.3 Initialize connection memory
767 */
768static void
769hatm_init_cm(struct hatm_softc *sc)
770{
771 u_int rsra, mlbm, rabr, numbuffs;
772 u_int tsra, tabr, mtpd;
773 u_int n;
774
775 for (n = 0; n < HE_CONFIG_TXMEM; n++)
776 WRITE_TCM4(sc, n, 0);
777 for (n = 0; n < HE_CONFIG_RXMEM; n++)
778 WRITE_RCM4(sc, n, 0);
779
780 numbuffs = sc->r0_numbuffs + sc->r1_numbuffs + sc->tx_numbuffs;
781
782 rsra = 0;
783 mlbm = ((rsra + sc->ifatm.mib.max_vccs * 8) + 0x7ff) & ~0x7ff;
784 rabr = ((mlbm + numbuffs * 2) + 0x7ff) & ~0x7ff;
785 sc->rsrb = ((rabr + 2048) + (2 * sc->ifatm.mib.max_vccs - 1)) &
786 ~(2 * sc->ifatm.mib.max_vccs - 1);
787
788 tsra = 0;
789 sc->tsrb = tsra + sc->ifatm.mib.max_vccs * 8;
790 sc->tsrc = sc->tsrb + sc->ifatm.mib.max_vccs * 4;
791 sc->tsrd = sc->tsrc + sc->ifatm.mib.max_vccs * 2;
792 tabr = sc->tsrd + sc->ifatm.mib.max_vccs * 1;
793 mtpd = ((tabr + 1024) + (16 * sc->ifatm.mib.max_vccs - 1)) &
794 ~(16 * sc->ifatm.mib.max_vccs - 1);
795
796 DBG(sc, ATTACH, ("rsra=%x mlbm=%x rabr=%x rsrb=%x",
797 rsra, mlbm, rabr, sc->rsrb));
798 DBG(sc, ATTACH, ("tsra=%x tsrb=%x tsrc=%x tsrd=%x tabr=%x mtpd=%x",
799 tsra, sc->tsrb, sc->tsrc, sc->tsrd, tabr, mtpd));
800
801 WRITE4(sc, HE_REGO_TSRB_BA, sc->tsrb);
802 WRITE4(sc, HE_REGO_TSRC_BA, sc->tsrc);
803 WRITE4(sc, HE_REGO_TSRD_BA, sc->tsrd);
804 WRITE4(sc, HE_REGO_TMABR_BA, tabr);
805 WRITE4(sc, HE_REGO_TPD_BA, mtpd);
806
807 WRITE4(sc, HE_REGO_RCMRSRB_BA, sc->rsrb);
808 WRITE4(sc, HE_REGO_RCMLBM_BA, mlbm);
809 WRITE4(sc, HE_REGO_RCMABR_BA, rabr);
810
811 BARRIER_W(sc);
812}
813
814/*
815 * 5.1.4 Initialize Local buffer Pools
816 */
817static void
818hatm_init_rx_buffer_pool(struct hatm_softc *sc,
819 u_int num, /* bank */
820 u_int start, /* start row */
821 u_int numbuffs /* number of entries */
822)
823{
824 u_int row_size; /* bytes per row */
825 uint32_t row_addr; /* start address of this row */
826 u_int lbuf_size; /* bytes per lbuf */
827 u_int lbufs_per_row; /* number of lbufs per memory row */
828 uint32_t lbufd_index; /* index of lbuf descriptor */
829 uint32_t lbufd_addr; /* address of lbuf descriptor */
830 u_int lbuf_row_cnt; /* current lbuf in current row */
831 uint32_t lbuf_addr; /* address of current buffer */
832 u_int i;
833
834 row_size = sc->bytes_per_row;;
835 row_addr = start * row_size;
836 lbuf_size = sc->cells_per_lbuf * 48;
837 lbufs_per_row = sc->cells_per_row / sc->cells_per_lbuf;
838
839 /* descriptor index */
840 lbufd_index = num;
841
842 /* 2 words per entry */
843 lbufd_addr = READ4(sc, HE_REGO_RCMLBM_BA) + lbufd_index * 2;
844
845 /* write head of queue */
846 WRITE4(sc, HE_REGO_RLBF_H(num), lbufd_index);
847
848 lbuf_row_cnt = 0;
849 for (i = 0; i < numbuffs; i++) {
850 lbuf_addr = (row_addr + lbuf_row_cnt * lbuf_size) / 32;
851
852 WRITE_RCM4(sc, lbufd_addr, lbuf_addr);
853
854 lbufd_index += 2;
855 WRITE_RCM4(sc, lbufd_addr + 1, lbufd_index);
856
857 if (++lbuf_row_cnt == lbufs_per_row) {
858 lbuf_row_cnt = 0;
859 row_addr += row_size;
860 }
861
862 lbufd_addr += 2 * 2;
863 }
864
865 WRITE4(sc, HE_REGO_RLBF_T(num), lbufd_index - 2);
866 WRITE4(sc, HE_REGO_RLBF_C(num), numbuffs);
867
868 BARRIER_W(sc);
869}
870
871static void
872hatm_init_tx_buffer_pool(struct hatm_softc *sc,
873 u_int start, /* start row */
874 u_int numbuffs /* number of entries */
875)
876{
877 u_int row_size; /* bytes per row */
878 uint32_t row_addr; /* start address of this row */
879 u_int lbuf_size; /* bytes per lbuf */
880 u_int lbufs_per_row; /* number of lbufs per memory row */
881 uint32_t lbufd_index; /* index of lbuf descriptor */
882 uint32_t lbufd_addr; /* address of lbuf descriptor */
883 u_int lbuf_row_cnt; /* current lbuf in current row */
884 uint32_t lbuf_addr; /* address of current buffer */
885 u_int i;
886
887 row_size = sc->bytes_per_row;;
888 row_addr = start * row_size;
889 lbuf_size = sc->cells_per_lbuf * 48;
890 lbufs_per_row = sc->cells_per_row / sc->cells_per_lbuf;
891
892 /* descriptor index */
893 lbufd_index = sc->r0_numbuffs + sc->r1_numbuffs;
894
895 /* 2 words per entry */
896 lbufd_addr = READ4(sc, HE_REGO_RCMLBM_BA) + lbufd_index * 2;
897
898 /* write head of queue */
899 WRITE4(sc, HE_REGO_TLBF_H, lbufd_index);
900
901 lbuf_row_cnt = 0;
902 for (i = 0; i < numbuffs; i++) {
903 lbuf_addr = (row_addr + lbuf_row_cnt * lbuf_size) / 32;
904
905 WRITE_RCM4(sc, lbufd_addr, lbuf_addr);
906 lbufd_index++;
907 WRITE_RCM4(sc, lbufd_addr + 1, lbufd_index);
908
909 if (++lbuf_row_cnt == lbufs_per_row) {
910 lbuf_row_cnt = 0;
911 row_addr += row_size;
912 }
913
914 lbufd_addr += 2;
915 }
916
917 WRITE4(sc, HE_REGO_TLBF_T, lbufd_index - 1);
918 BARRIER_W(sc);
919}
920
921/*
922 * 5.1.5 Initialize Intermediate Receive Queues
923 */
924static void
925hatm_init_imed_queues(struct hatm_softc *sc)
926{
927 u_int n;
928
929 if (sc->he622) {
930 for (n = 0; n < 8; n++) {
931 WRITE4(sc, HE_REGO_INMQ_S(n), 0x10*n+0x000f);
932 WRITE4(sc, HE_REGO_INMQ_L(n), 0x10*n+0x200f);
933 }
934 } else {
935 for (n = 0; n < 8; n++) {
936 WRITE4(sc, HE_REGO_INMQ_S(n), n);
937 WRITE4(sc, HE_REGO_INMQ_L(n), n+0x8);
938 }
939 }
940}
941
942/*
943 * 5.1.7 Init CS block
944 */
945static void
946hatm_init_cs_block(struct hatm_softc *sc)
947{
948 u_int n, i;
949 u_int clkfreg, cellrate, decr, tmp;
950 static const uint32_t erthr[2][5][3] = HE_REGT_CS_ERTHR;
951 static const uint32_t erctl[2][3] = HE_REGT_CS_ERCTL;
952 static const uint32_t erstat[2][2] = HE_REGT_CS_ERSTAT;
953 static const uint32_t rtfwr[2] = HE_REGT_CS_RTFWR;
954 static const uint32_t rtatr[2] = HE_REGT_CS_RTATR;
955 static const uint32_t bwalloc[2][6] = HE_REGT_CS_BWALLOC;
956 static const uint32_t orcf[2][2] = HE_REGT_CS_ORCF;
957
958 /* Clear Rate Controller Start Times and Occupied Flags */
959 for (n = 0; n < 32; n++)
960 WRITE_MBOX4(sc, HE_REGO_CS_STTIM(n), 0);
961
962 clkfreg = sc->he622 ? HE_622_CLOCK : HE_155_CLOCK;
963 cellrate = sc->he622 ? ATM_RATE_622M : ATM_RATE_155M;
964 decr = cellrate / 32;
965
966 for (n = 0; n < 16; n++) {
967 tmp = clkfreg / cellrate;
968 WRITE_MBOX4(sc, HE_REGO_CS_TGRLD(n), tmp - 1);
969 cellrate -= decr;
970 }
971
972 i = (sc->cells_per_lbuf == 2) ? 0
973 :(sc->cells_per_lbuf == 4) ? 1
974 : 2;
975
976 /* table 5.2 */
977 WRITE_MBOX4(sc, HE_REGO_CS_ERTHR0, erthr[sc->he622][0][i]);
978 WRITE_MBOX4(sc, HE_REGO_CS_ERTHR1, erthr[sc->he622][1][i]);
979 WRITE_MBOX4(sc, HE_REGO_CS_ERTHR2, erthr[sc->he622][2][i]);
980 WRITE_MBOX4(sc, HE_REGO_CS_ERTHR3, erthr[sc->he622][3][i]);
981 WRITE_MBOX4(sc, HE_REGO_CS_ERTHR4, erthr[sc->he622][4][i]);
982
983 WRITE_MBOX4(sc, HE_REGO_CS_ERCTL0, erctl[sc->he622][0]);
984 WRITE_MBOX4(sc, HE_REGO_CS_ERCTL1, erctl[sc->he622][1]);
985 WRITE_MBOX4(sc, HE_REGO_CS_ERCTL2, erctl[sc->he622][2]);
986
987 WRITE_MBOX4(sc, HE_REGO_CS_ERSTAT0, erstat[sc->he622][0]);
988 WRITE_MBOX4(sc, HE_REGO_CS_ERSTAT1, erstat[sc->he622][1]);
989
990 WRITE_MBOX4(sc, HE_REGO_CS_RTFWR, rtfwr[sc->he622]);
991 WRITE_MBOX4(sc, HE_REGO_CS_RTATR, rtatr[sc->he622]);
992
993 WRITE_MBOX4(sc, HE_REGO_CS_TFBSET, bwalloc[sc->he622][0]);
994 WRITE_MBOX4(sc, HE_REGO_CS_WCRMAX, bwalloc[sc->he622][1]);
995 WRITE_MBOX4(sc, HE_REGO_CS_WCRMIN, bwalloc[sc->he622][2]);
996 WRITE_MBOX4(sc, HE_REGO_CS_WCRINC, bwalloc[sc->he622][3]);
997 WRITE_MBOX4(sc, HE_REGO_CS_WCRDEC, bwalloc[sc->he622][4]);
998 WRITE_MBOX4(sc, HE_REGO_CS_WCRCEIL, bwalloc[sc->he622][5]);
999
1000 WRITE_MBOX4(sc, HE_REGO_CS_OTPPER, orcf[sc->he622][0]);
1001 WRITE_MBOX4(sc, HE_REGO_CS_OTWPER, orcf[sc->he622][1]);
1002
1003 WRITE_MBOX4(sc, HE_REGO_CS_OTTLIM, 8);
1004
1005 for (n = 0; n < 8; n++)
1006 WRITE_MBOX4(sc, HE_REGO_CS_HGRRT(n), 0);
1007}
1008
1009/*
1010 * 5.1.8 CS Block Connection Memory Initialisation
1011 */
1012static void
1013hatm_init_cs_block_cm(struct hatm_softc *sc)
1014{
1015 u_int n, i;
1016 u_int expt, mant, etrm, wcr, ttnrm, tnrm;
1017 uint32_t rate;
1018 uint32_t clkfreq, cellrate, decr;
1019 uint32_t *rg, rtg, val = 0;
1020 uint64_t drate;
1021 u_int buf, buf_limit;
1022 uint32_t base = READ4(sc, HE_REGO_RCMABR_BA);
1023
1024 for (n = 0; n < HE_REGL_CM_GQTBL; n++)
1025 WRITE_RCM4(sc, base + HE_REGO_CM_GQTBL + n, 0);
1026 for (n = 0; n < HE_REGL_CM_RGTBL; n++)
1027 WRITE_RCM4(sc, base + HE_REGO_CM_RGTBL + n, 0);
1028
1029 tnrm = 0;
1030 for (n = 0; n < HE_REGL_CM_TNRMTBL * 4; n++) {
1031 expt = (n >> 5) & 0x1f;
1032 mant = ((n & 0x18) << 4) | 0x7f;
1033 wcr = (1 << expt) * (mant + 512) / 512;
1034 etrm = n & 0x7;
1035 ttnrm = wcr / 10 / (1 << etrm);
1036 if (ttnrm > 255)
1037 ttnrm = 255;
1038 else if(ttnrm < 2)
1039 ttnrm = 2;
1040 tnrm = (tnrm << 8) | (ttnrm & 0xff);
1041 if (n % 4 == 0)
1042 WRITE_RCM4(sc, base + HE_REGO_CM_TNRMTBL + (n/4), tnrm);
1043 }
1044
1045 clkfreq = sc->he622 ? HE_622_CLOCK : HE_155_CLOCK;
1046 buf_limit = 4;
1047
1048 cellrate = sc->he622 ? ATM_RATE_622M : ATM_RATE_155M;
1049 decr = cellrate / 32;
1050
1051 /* compute GRID top row in 1000 * cps */
1052 for (n = 0; n < 16; n++) {
1053 u_int interval = clkfreq / cellrate;
1054 sc->rate_grid[0][n] = (u_int64_t)clkfreq * 1000 / interval;
1055 cellrate -= decr;
1056 }
1057
1058 /* compute the other rows according to 2.4 */
1059 for (i = 1; i < 16; i++)
1060 for (n = 0; n < 16; n++)
1061 sc->rate_grid[i][n] = sc->rate_grid[i-1][n] /
1062 ((i < 14) ? 2 : 4);
1063
1064 /* first entry is line rate */
1065 n = hatm_cps2atmf(sc->he622 ? ATM_RATE_622M : ATM_RATE_155M);
1066 expt = (n >> 9) & 0x1f;
1067 mant = n & 0x1f0;
1068 sc->rate_grid[0][0] = (u_int64_t)(1<<expt) * 1000 * (mant+512) / 512;
1069
1070 /* now build the conversion table - each 32 bit word contains
1071 * two entries - this gives a total of 0x400 16 bit entries.
1072 * This table maps the truncated ATMF rate version into a grid index */
1073 cellrate = sc->he622 ? ATM_RATE_622M : ATM_RATE_155M;
1074 rg = &sc->rate_grid[15][15];
1075
1076 for (rate = 0; rate < 2 * HE_REGL_CM_RTGTBL; rate++) {
1077 /* unpack the ATMF rate */
1078 expt = rate >> 5;
1079 mant = (rate & 0x1f) << 4;
1080
1081 /* get the cell rate - minimum is 10 per second */
1082 drate = (uint64_t)(1 << expt) * 1000 * (mant + 512) / 512;
1083 if (drate < 10 * 1000)
1084 drate = 10 * 1000;
1085
1086 /* now look up the grid index */
1087 while (drate >= *rg && rg-- > &sc->rate_grid[0][0])
1088 ;
1089 rg++;
1090 rtg = rg - &sc->rate_grid[0][0];
1091
1092 /* now compute the buffer limit */
1093 buf = drate * sc->tx_numbuffs / (cellrate * 2) / 1000;
1094 if (buf == 0)
1095 buf = 1;
1096 else if (buf > buf_limit)
1097 buf = buf_limit;
1098
1099 /* make value */
1100 val = (val << 16) | (rtg << 8) | buf;
1101
1102 /* write */
1103 if (rate % 2 == 1)
1104 WRITE_RCM4(sc, base + HE_REGO_CM_RTGTBL + rate/2, val);
1105 }
1106}
1107
1108/*
1109 * Clear an unused receive group buffer pool
1110 */
1111static void
1112hatm_clear_rpool(struct hatm_softc *sc, u_int group, u_int large)
1113{
1114 WRITE4(sc, HE_REGO_RBP_S(large, group), 0);
1115 WRITE4(sc, HE_REGO_RBP_T(large, group), 0);
1116 WRITE4(sc, HE_REGO_RBP_QI(large, group), 1);
1117 WRITE4(sc, HE_REGO_RBP_BL(large, group), 0);
1118}
1119
1120/*
1121 * Initialize a receive group buffer pool
1122 */
1123static void
1124hatm_init_rpool(struct hatm_softc *sc, struct herbp *q, u_int group,
1125 u_int large)
1126{
1127 if (q->size == 0) {
1128 hatm_clear_rpool(sc, group, large);
1129 return;
1130 }
1131
1132 bzero(q->mem.base, q->mem.size);
1133 q->rbp = q->mem.base;
1134 q->head = q->tail = 0;
1135
1136 DBG(sc, ATTACH, ("RBP%u%c=0x%lx", group, "SL"[large],
1137 (u_long)q->mem.paddr));
1138
1139 WRITE4(sc, HE_REGO_RBP_S(large, group), q->mem.paddr);
1140 WRITE4(sc, HE_REGO_RBP_T(large, group), 0);
1141 WRITE4(sc, HE_REGO_RBP_QI(large, group),
1142 ((q->size - 1) << HE_REGS_RBP_SIZE) |
1143 HE_REGM_RBP_INTR_ENB |
1144 (q->thresh << HE_REGS_RBP_THRESH));
1145 WRITE4(sc, HE_REGO_RBP_BL(large, group), (q->bsize >> 2) & ~1);
1146}
1147
1148/*
1149 * Clear an unused receive buffer return queue
1150 */
1151static void
1152hatm_clear_rbrq(struct hatm_softc *sc, u_int group)
1153{
1154 WRITE4(sc, HE_REGO_RBRQ_ST(group), 0);
1155 WRITE4(sc, HE_REGO_RBRQ_H(group), 0);
1156 WRITE4(sc, HE_REGO_RBRQ_Q(group), (1 << HE_REGS_RBRQ_THRESH));
1157 WRITE4(sc, HE_REGO_RBRQ_I(group), 0);
1158}
1159
1160/*
1161 * Initialize receive buffer return queue
1162 */
1163static void
1164hatm_init_rbrq(struct hatm_softc *sc, struct herbrq *rq, u_int group)
1165{
1166 if (rq->size == 0) {
1167 hatm_clear_rbrq(sc, group);
1168 return;
1169 }
1170
1171 rq->rbrq = rq->mem.base;
1172 rq->head = 0;
1173
1174 DBG(sc, ATTACH, ("RBRQ%u=0x%lx", group, (u_long)rq->mem.paddr));
1175
1176 WRITE4(sc, HE_REGO_RBRQ_ST(group), rq->mem.paddr);
1177 WRITE4(sc, HE_REGO_RBRQ_H(group), 0);
1178 WRITE4(sc, HE_REGO_RBRQ_Q(group),
1179 (rq->thresh << HE_REGS_RBRQ_THRESH) |
1180 ((rq->size - 1) << HE_REGS_RBRQ_SIZE));
1181 WRITE4(sc, HE_REGO_RBRQ_I(group),
1182 (rq->tout << HE_REGS_RBRQ_TIME) |
1183 (rq->pcnt << HE_REGS_RBRQ_COUNT));
1184}
1185
1186/*
1187 * Clear an unused transmit buffer return queue N
1188 */
1189static void
1190hatm_clear_tbrq(struct hatm_softc *sc, u_int group)
1191{
1192 WRITE4(sc, HE_REGO_TBRQ_B_T(group), 0);
1193 WRITE4(sc, HE_REGO_TBRQ_H(group), 0);
1194 WRITE4(sc, HE_REGO_TBRQ_S(group), 0);
1195 WRITE4(sc, HE_REGO_TBRQ_THRESH(group), 1);
1196}
1197
1198/*
1199 * Initialize transmit buffer return queue N
1200 */
1201static void
1202hatm_init_tbrq(struct hatm_softc *sc, struct hetbrq *tq, u_int group)
1203{
1204 if (tq->size == 0) {
1205 hatm_clear_tbrq(sc, group);
1206 return;
1207 }
1208
1209 tq->tbrq = tq->mem.base;
1210 tq->head = 0;
1211
1212 DBG(sc, ATTACH, ("TBRQ%u=0x%lx", group, (u_long)tq->mem.paddr));
1213
1214 WRITE4(sc, HE_REGO_TBRQ_B_T(group), tq->mem.paddr);
1215 WRITE4(sc, HE_REGO_TBRQ_H(group), 0);
1216 WRITE4(sc, HE_REGO_TBRQ_S(group), tq->size - 1);
1217 WRITE4(sc, HE_REGO_TBRQ_THRESH(group), tq->thresh);
1218}
1219
1220/*
1221 * Initialize TPDRQ
1222 */
1223static void
1224hatm_init_tpdrq(struct hatm_softc *sc)
1225{
1226 struct hetpdrq *tq;
1227
1228 tq = &sc->tpdrq;
1229 tq->tpdrq = tq->mem.base;
1230 tq->tail = tq->head = 0;
1231
1232 DBG(sc, ATTACH, ("TPDRQ=0x%lx", (u_long)tq->mem.paddr));
1233
1234 WRITE4(sc, HE_REGO_TPDRQ_H, tq->mem.paddr);
1235 WRITE4(sc, HE_REGO_TPDRQ_T, 0);
1236 WRITE4(sc, HE_REGO_TPDRQ_S, tq->size - 1);
1237}
1238
1239/*
1240 * Function can be called by the infrastructure to start the card.
1241 */
1242static void
1243hatm_init(void *p)
1244{
1245 struct hatm_softc *sc = p;
1246
1247 mtx_lock(&sc->mtx);
1248 hatm_stop(sc);
1249 hatm_initialize(sc);
1250 mtx_unlock(&sc->mtx);
1251}
1252
1253enum {
1254 CTL_ISTATS,
1255};
1256
1257/*
1258 * Sysctl handler
1259 */
1260static int
1261hatm_sysctl(SYSCTL_HANDLER_ARGS)
1262{
1263 struct hatm_softc *sc = arg1;
1264 uint32_t *ret;
1265 int error;
1266 size_t len;
1267
1268 switch (arg2) {
1269
1270 case CTL_ISTATS:
1271 len = sizeof(sc->istats);
1272 break;
1273
1274 default:
1275 panic("bad control code");
1276 }
1277
1278 ret = malloc(len, M_TEMP, M_WAITOK);
1279 mtx_lock(&sc->mtx);
1280
1281 switch (arg2) {
1282
1283 case CTL_ISTATS:
1284 sc->istats.mcc += READ4(sc, HE_REGO_MCC);
1285 sc->istats.oec += READ4(sc, HE_REGO_OEC);
1286 sc->istats.dcc += READ4(sc, HE_REGO_DCC);
1287 sc->istats.cec += READ4(sc, HE_REGO_CEC);
1288 bcopy(&sc->istats, ret, sizeof(sc->istats));
1289 break;
1290 }
1291 mtx_unlock(&sc->mtx);
1292
1293 error = SYSCTL_OUT(req, ret, len);
1294 free(ret, M_TEMP);
1295
1296 return (error);
1297}
1298
1299static int
1300kenv_getuint(struct hatm_softc *sc, const char *var,
1301 u_int *ptr, u_int def, int rw)
1302{
1303 char full[IFNAMSIZ + 3 + 20];
1304 char *val, *end;
1305 u_int u;
1306
1307 *ptr = def;
1308
1309 if (SYSCTL_ADD_UINT(&sc->sysctl_ctx, SYSCTL_CHILDREN(sc->sysctl_tree),
1310 OID_AUTO, var, rw ? CTLFLAG_RW : CTLFLAG_RD, ptr, 0, "") == NULL)
1311 return (ENOMEM);
1312
1313 snprintf(full, sizeof(full), "hw.%s.%s",
1314 device_get_nameunit(sc->dev), var);
1315
1316 if ((val = getenv(full)) == NULL)
1317 return (0);
1318 u = strtoul(val, &end, 0);
1319 if (end == val || *end != '\0') {
1320 freeenv(val);
1321 return (EINVAL);
1322 }
1323 if (bootverbose)
1324 if_printf(&sc->ifatm.ifnet, "%s=%u\n", full, u);
1325 *ptr = u;
1326 return (0);
1327}
1328
1329/*
1330 * Set configurable parameters. Many of these are configurable via
1331 * kenv.
1332 */
1333static int
1334hatm_configure(struct hatm_softc *sc)
1335{
1336 /* Receive buffer pool 0 small */
1337 kenv_getuint(sc, "rbps0.size", &sc->rbp_s0.size,
1338 HE_CONFIG_RBPS0_SIZE, 0);
1339 kenv_getuint(sc, "rbps0.thresh", &sc->rbp_s0.thresh,
1340 HE_CONFIG_RBPS0_THRESH, 0);
1341 sc->rbp_s0.bsize = MBUF0_SIZE;
1342
1343 /* Receive buffer pool 0 large */
1344 kenv_getuint(sc, "rbpl0.size", &sc->rbp_l0.size,
1345 HE_CONFIG_RBPL0_SIZE, 0);
1346 kenv_getuint(sc, "rbpl0.thresh", &sc->rbp_l0.thresh,
1347 HE_CONFIG_RBPL0_THRESH, 0);
1348 sc->rbp_l0.bsize = MCLBYTES - MBUFL_OFFSET;
1349
1350 /* Receive buffer return queue 0 */
1351 kenv_getuint(sc, "rbrq0.size", &sc->rbrq_0.size,
1352 HE_CONFIG_RBRQ0_SIZE, 0);
1353 kenv_getuint(sc, "rbrq0.thresh", &sc->rbrq_0.thresh,
1354 HE_CONFIG_RBRQ0_THRESH, 0);
1355 kenv_getuint(sc, "rbrq0.tout", &sc->rbrq_0.tout,
1356 HE_CONFIG_RBRQ0_TOUT, 0);
1357 kenv_getuint(sc, "rbrq0.pcnt", &sc->rbrq_0.pcnt,
1358 HE_CONFIG_RBRQ0_PCNT, 0);
1359
1360 /* Receive buffer pool 1 small */
1361 kenv_getuint(sc, "rbps1.size", &sc->rbp_s1.size,
1362 HE_CONFIG_RBPS1_SIZE, 0);
1363 kenv_getuint(sc, "rbps1.thresh", &sc->rbp_s1.thresh,
1364 HE_CONFIG_RBPS1_THRESH, 0);
1365 sc->rbp_s1.bsize = MBUF1_SIZE;
1366
1367 /* Receive buffer return queue 1 */
1368 kenv_getuint(sc, "rbrq1.size", &sc->rbrq_1.size,
1369 HE_CONFIG_RBRQ1_SIZE, 0);
1370 kenv_getuint(sc, "rbrq1.thresh", &sc->rbrq_1.thresh,
1371 HE_CONFIG_RBRQ1_THRESH, 0);
1372 kenv_getuint(sc, "rbrq1.tout", &sc->rbrq_1.tout,
1373 HE_CONFIG_RBRQ1_TOUT, 0);
1374 kenv_getuint(sc, "rbrq1.pcnt", &sc->rbrq_1.pcnt,
1375 HE_CONFIG_RBRQ1_PCNT, 0);
1376
1377 /* Interrupt queue 0 */
1378 kenv_getuint(sc, "irq0.size", &sc->irq_0.size,
1379 HE_CONFIG_IRQ0_SIZE, 0);
1380 kenv_getuint(sc, "irq0.thresh", &sc->irq_0.thresh,
1381 HE_CONFIG_IRQ0_THRESH, 0);
1382 sc->irq_0.line = HE_CONFIG_IRQ0_LINE;
1383
1384 /* Transmit buffer return queue 0 */
1385 kenv_getuint(sc, "tbrq0.size", &sc->tbrq.size,
1386 HE_CONFIG_TBRQ_SIZE, 0);
1387 kenv_getuint(sc, "tbrq0.thresh", &sc->tbrq.thresh,
1388 HE_CONFIG_TBRQ_THRESH, 0);
1389
1390 /* Transmit buffer ready queue */
1391 kenv_getuint(sc, "tpdrq.size", &sc->tpdrq.size,
1392 HE_CONFIG_TPDRQ_SIZE, 0);
1393 /* Max TPDs per VCC */
1394 kenv_getuint(sc, "tpdmax", &sc->max_tpd,
1395 HE_CONFIG_TPD_MAXCC, 0);
1396
1397 return (0);
1398}
1399
1400#ifdef HATM_DEBUG
1401
1402/*
1403 * Get TSRs from connection memory
1404 */
1405static int
1406hatm_sysctl_tsr(SYSCTL_HANDLER_ARGS)
1407{
1408 struct hatm_softc *sc = arg1;
1409 int error, i, j;
1410 uint32_t *val;
1411
1412 val = malloc(sizeof(uint32_t) * HE_MAX_VCCS * 15, M_TEMP, M_WAITOK);
1413
1414 mtx_lock(&sc->mtx);
1415 for (i = 0; i < HE_MAX_VCCS; i++)
1416 for (j = 0; j <= 14; j++)
1417 val[15 * i + j] = READ_TSR(sc, i, j);
1418 mtx_unlock(&sc->mtx);
1419
1420 error = SYSCTL_OUT(req, val, sizeof(uint32_t) * HE_MAX_VCCS * 15);
1421 free(val, M_TEMP);
1422 if (error != 0 || req->newptr == NULL)
1423 return (error);
1424
1425 return (EPERM);
1426}
1427
1428/*
1429 * Get TPDs from connection memory
1430 */
1431static int
1432hatm_sysctl_tpd(SYSCTL_HANDLER_ARGS)
1433{
1434 struct hatm_softc *sc = arg1;
1435 int error, i, j;
1436 uint32_t *val;
1437
1438 val = malloc(sizeof(uint32_t) * HE_MAX_VCCS * 16, M_TEMP, M_WAITOK);
1439
1440 mtx_lock(&sc->mtx);
1441 for (i = 0; i < HE_MAX_VCCS; i++)
1442 for (j = 0; j < 16; j++)
1443 val[16 * i + j] = READ_TCM4(sc, 16 * i + j);
1444 mtx_unlock(&sc->mtx);
1445
1446 error = SYSCTL_OUT(req, val, sizeof(uint32_t) * HE_MAX_VCCS * 16);
1447 free(val, M_TEMP);
1448 if (error != 0 || req->newptr == NULL)
1449 return (error);
1450
1451 return (EPERM);
1452}
1453
1454/*
1455 * Get mbox registers
1456 */
1457static int
1458hatm_sysctl_mbox(SYSCTL_HANDLER_ARGS)
1459{
1460 struct hatm_softc *sc = arg1;
1461 int error, i;
1462 uint32_t *val;
1463
1464 val = malloc(sizeof(uint32_t) * HE_REGO_CS_END, M_TEMP, M_WAITOK);
1465
1466 mtx_lock(&sc->mtx);
1467 for (i = 0; i < HE_REGO_CS_END; i++)
1468 val[i] = READ_MBOX4(sc, i);
1469 mtx_unlock(&sc->mtx);
1470
1471 error = SYSCTL_OUT(req, val, sizeof(uint32_t) * HE_REGO_CS_END);
1472 free(val, M_TEMP);
1473 if (error != 0 || req->newptr == NULL)
1474 return (error);
1475
1476 return (EPERM);
1477}
1478
1479/*
1480 * Get connection memory
1481 */
1482static int
1483hatm_sysctl_cm(SYSCTL_HANDLER_ARGS)
1484{
1485 struct hatm_softc *sc = arg1;
1486 int error, i;
1487 uint32_t *val;
1488
1489 val = malloc(sizeof(uint32_t) * (HE_CONFIG_RXMEM + 1), M_TEMP, M_WAITOK);
1490
1491 mtx_lock(&sc->mtx);
1492 val[0] = READ4(sc, HE_REGO_RCMABR_BA);
1493 for (i = 0; i < HE_CONFIG_RXMEM; i++)
1494 val[i + 1] = READ_RCM4(sc, i);
1495 mtx_unlock(&sc->mtx);
1496
1497 error = SYSCTL_OUT(req, val, sizeof(uint32_t) * (HE_CONFIG_RXMEM + 1));
1498 free(val, M_TEMP);
1499 if (error != 0 || req->newptr == NULL)
1500 return (error);
1501
1502 return (EPERM);
1503}
1504
1505/*
1506 * Get local buffer memory
1507 */
1508static int
1509hatm_sysctl_lbmem(SYSCTL_HANDLER_ARGS)
1510{
1511 struct hatm_softc *sc = arg1;
1512 int error, i;
1513 uint32_t *val;
1514 u_int bytes = (1 << 21);
1515
1516 val = malloc(bytes, M_TEMP, M_WAITOK);
1517
1518 mtx_lock(&sc->mtx);
1519 for (i = 0; i < bytes / 4; i++)
1520 val[i] = READ_LB4(sc, i);
1521 mtx_unlock(&sc->mtx);
1522
1523 error = SYSCTL_OUT(req, val, bytes);
1524 free(val, M_TEMP);
1525 if (error != 0 || req->newptr == NULL)
1526 return (error);
1527
1528 return (EPERM);
1529}
1530
1531/*
1532 * Get all card registers
1533 */
1534static int
1535hatm_sysctl_heregs(SYSCTL_HANDLER_ARGS)
1536{
1537 struct hatm_softc *sc = arg1;
1538 int error, i;
1539 uint32_t *val;
1540
1541 val = malloc(HE_REGO_END, M_TEMP, M_WAITOK);
1542
1543 mtx_lock(&sc->mtx);
1544 for (i = 0; i < HE_REGO_END; i += 4)
1545 val[i / 4] = READ4(sc, i);
1546 mtx_unlock(&sc->mtx);
1547
1548 error = SYSCTL_OUT(req, val, HE_REGO_END);
1549 free(val, M_TEMP);
1550 if (error != 0 || req->newptr == NULL)
1551 return (error);
1552
1553 return (EPERM);
1554}
1555#endif
1556
1557/*
1558 * Suni register access
1559 */
1560/*
1561 * read at most n SUNI registers starting at reg into val
1562 */
1563static int
1564hatm_utopia_readregs(struct ifatm *ifatm, u_int reg, uint8_t *val, u_int *n)
1565{
1566 u_int i;
1567 struct hatm_softc *sc = (struct hatm_softc *)ifatm;
1568
1569 if (reg >= (HE_REGO_SUNI_END - HE_REGO_SUNI) / 4)
1570 return (EINVAL);
1571 if (reg + *n > (HE_REGO_SUNI_END - HE_REGO_SUNI) / 4)
1572 *n = reg - (HE_REGO_SUNI_END - HE_REGO_SUNI) / 4;
1573
1574 mtx_assert(&sc->mtx, MA_OWNED);
1575 for (i = 0; i < *n; i++)
1576 val[i] = READ4(sc, HE_REGO_SUNI + 4 * (reg + i));
1577
1578 return (0);
1579}
1580
1581/*
1582 * change the bits given by mask to them in val in register reg
1583 */
1584static int
1585hatm_utopia_writereg(struct ifatm *ifatm, u_int reg, u_int mask, u_int val)
1586{
1587 uint32_t regval;
1588 struct hatm_softc *sc = (struct hatm_softc *)ifatm;
1589
1590 if (reg >= (HE_REGO_SUNI_END - HE_REGO_SUNI) / 4)
1591 return (EINVAL);
1592
1593 mtx_assert(&sc->mtx, MA_OWNED);
1594 regval = READ4(sc, HE_REGO_SUNI + 4 * reg);
1595 regval = (regval & ~mask) | (val & mask);
1596 WRITE4(sc, HE_REGO_SUNI + 4 * reg, regval);
1597
1598 return (0);
1599}
1600
1601static struct utopia_methods hatm_utopia_methods = {
1602 hatm_utopia_readregs,
1603 hatm_utopia_writereg,
1604};
1605
1606/*
1607 * Detach - if it is running, stop. Destroy.
1608 */
1609static int
1610hatm_detach(device_t dev)
1611{
1612 struct hatm_softc *sc = (struct hatm_softc *)device_get_softc(dev);
1613
1614 mtx_lock(&sc->mtx);
1615 hatm_stop(sc);
1616 if (sc->utopia.state & UTP_ST_ATTACHED) {
1617 utopia_stop(&sc->utopia);
1618 utopia_detach(&sc->utopia);
1619 }
1620 mtx_unlock(&sc->mtx);
1621
1622 atm_ifdetach(&sc->ifatm.ifnet);
1623
1624 hatm_destroy(sc);
1625
1626 return (0);
1627}
1628
1629/*
1630 * Attach to the device. Assume that no locking is needed here.
1631 * All resource we allocate here are freed by calling hatm_destroy.
1632 */
1633static int
1634hatm_attach(device_t dev)
1635{
1636 struct hatm_softc *sc;
1637 int unit;
1638 int error;
1639 uint32_t v;
1640 struct ifnet *ifp;
1641
1642 sc = device_get_softc(dev);
1643 unit = device_get_unit(dev);
1644
1645 sc->dev = dev;
1646 sc->ifatm.mib.device = ATM_DEVICE_HE155;
1647 sc->ifatm.mib.serial = 0;
1648 sc->ifatm.mib.hw_version = 0;
1649 sc->ifatm.mib.sw_version = 0;
1650 sc->ifatm.mib.vpi_bits = HE_CONFIG_VPI_BITS;
1651 sc->ifatm.mib.vci_bits = HE_CONFIG_VCI_BITS;
1652 sc->ifatm.mib.max_vpcs = 0;
1653 sc->ifatm.mib.max_vccs = HE_MAX_VCCS;
1654 sc->ifatm.mib.media = IFM_ATM_UNKNOWN;
1655 sc->he622 = 0;
1656 sc->ifatm.phy = &sc->utopia;
1657
1658 SLIST_INIT(&sc->mbuf0_list);
1659 SLIST_INIT(&sc->mbuf1_list);
1660 SLIST_INIT(&sc->tpd_free);
1661
1662 mtx_init(&sc->mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK, MTX_DEF);
1663 mtx_init(&sc->mbuf0_mtx, device_get_nameunit(dev), "HEb0", MTX_DEF);
1664 mtx_init(&sc->mbuf1_mtx, device_get_nameunit(dev), "HEb1", MTX_DEF);
1665 cv_init(&sc->vcc_cv, "HEVCCcv");
1666 cv_init(&sc->cv_rcclose, "RCClose");
1667
1668 sysctl_ctx_init(&sc->sysctl_ctx);
1669
1670 /*
1671 * 4.2 BIOS Configuration
1672 */
1673 v = pci_read_config(dev, PCIR_COMMAND, 2);
1674 v |= PCIM_CMD_MEMEN | PCIM_CMD_BUSMASTEREN | PCIM_CMD_MWRICEN;
1675 pci_write_config(dev, PCIR_COMMAND, v, 2);
1676
1677 /*
1678 * 4.3 PCI Bus Controller-Specific Initialisation
1679 */
1680 v = pci_read_config(dev, HE_PCIR_GEN_CNTL_0, 4);
1681 v |= HE_PCIM_CTL0_MRL | HE_PCIM_CTL0_MRM | HE_PCIM_CTL0_IGNORE_TIMEOUT;
1682#if BYTE_ORDER == BIG_ENDIAN && 0
1683 v |= HE_PCIM_CTL0_BIGENDIAN;
1684#endif
1685 pci_write_config(dev, HE_PCIR_GEN_CNTL_0, v, 4);
1686
1687 /*
1688 * Map memory
1689 */
1690 v = pci_read_config(dev, PCIR_COMMAND, 2);
1691 if (!(v & PCIM_CMD_MEMEN)) {
1692 device_printf(dev, "failed to enable memory\n");
1693 error = ENXIO;
1694 goto failed;
1695 }
37
38#include "opt_inet.h"
39#include "opt_natm.h"
40
41#include <sys/types.h>
42#include <sys/param.h>
43#include <sys/systm.h>
44#include <sys/malloc.h>
45#include <sys/kernel.h>
46#include <sys/bus.h>
47#include <sys/errno.h>
48#include <sys/conf.h>
49#include <sys/module.h>
50#include <sys/queue.h>
51#include <sys/syslog.h>
52#include <sys/lock.h>
53#include <sys/mutex.h>
54#include <sys/condvar.h>
55#include <sys/sysctl.h>
56#include <vm/uma.h>
57
58#include <sys/sockio.h>
59#include <sys/mbuf.h>
60#include <sys/socket.h>
61
62#include <net/if.h>
63#include <net/if_media.h>
64#include <net/if_atm.h>
65#include <net/route.h>
66#ifdef ENABLE_BPF
67#include <net/bpf.h>
68#endif
69#include <netinet/in.h>
70#include <netinet/if_atm.h>
71
72#include <machine/bus.h>
73#include <machine/resource.h>
74#include <sys/bus.h>
75#include <sys/rman.h>
76#include <dev/pci/pcireg.h>
77#include <dev/pci/pcivar.h>
78
79#include <dev/utopia/utopia.h>
80#include <dev/hatm/if_hatmconf.h>
81#include <dev/hatm/if_hatmreg.h>
82#include <dev/hatm/if_hatmvar.h>
83
84static const struct {
85 uint16_t vid;
86 uint16_t did;
87 const char *name;
88} hatm_devs[] = {
89 { 0x1127, 0x400,
90 "FORE HE" },
91 { 0, 0, NULL }
92};
93
94SYSCTL_DECL(_hw_atm);
95
96MODULE_DEPEND(hatm, utopia, 1, 1, 1);
97MODULE_DEPEND(hatm, pci, 1, 1, 1);
98MODULE_DEPEND(hatm, atm, 1, 1, 1);
99
100#define EEPROM_DELAY 400 /* microseconds */
101
102/* Read from EEPROM 0000 0011b */
103static const uint32_t readtab[] = {
104 HE_REGM_HOST_PROM_SEL | HE_REGM_HOST_PROM_CLOCK,
105 0,
106 HE_REGM_HOST_PROM_CLOCK,
107 0, /* 0 */
108 HE_REGM_HOST_PROM_CLOCK,
109 0, /* 0 */
110 HE_REGM_HOST_PROM_CLOCK,
111 0, /* 0 */
112 HE_REGM_HOST_PROM_CLOCK,
113 0, /* 0 */
114 HE_REGM_HOST_PROM_CLOCK,
115 0, /* 0 */
116 HE_REGM_HOST_PROM_CLOCK,
117 HE_REGM_HOST_PROM_DATA_IN, /* 0 */
118 HE_REGM_HOST_PROM_CLOCK | HE_REGM_HOST_PROM_DATA_IN,
119 HE_REGM_HOST_PROM_DATA_IN, /* 1 */
120 HE_REGM_HOST_PROM_CLOCK | HE_REGM_HOST_PROM_DATA_IN,
121 HE_REGM_HOST_PROM_DATA_IN, /* 1 */
122};
123static const uint32_t clocktab[] = {
124 0, HE_REGM_HOST_PROM_CLOCK,
125 0, HE_REGM_HOST_PROM_CLOCK,
126 0, HE_REGM_HOST_PROM_CLOCK,
127 0, HE_REGM_HOST_PROM_CLOCK,
128 0, HE_REGM_HOST_PROM_CLOCK,
129 0, HE_REGM_HOST_PROM_CLOCK,
130 0, HE_REGM_HOST_PROM_CLOCK,
131 0, HE_REGM_HOST_PROM_CLOCK,
132 0
133};
134
135/*
136 * Convert cell rate to ATM Forum format
137 */
138u_int
139hatm_cps2atmf(uint32_t pcr)
140{
141 u_int e;
142
143 if (pcr == 0)
144 return (0);
145 pcr <<= 9;
146 e = 0;
147 while (pcr > (1024 - 1)) {
148 e++;
149 pcr >>= 1;
150 }
151 return ((1 << 14) | (e << 9) | (pcr & 0x1ff));
152}
153u_int
154hatm_atmf2cps(uint32_t fcr)
155{
156 fcr &= 0x7fff;
157
158 return ((1 << ((fcr >> 9) & 0x1f)) * (512 + (fcr & 0x1ff)) / 512
159 * (fcr >> 14));
160}
161
162/************************************************************
163 *
164 * Initialisation
165 */
166/*
167 * Probe for a HE controller
168 */
169static int
170hatm_probe(device_t dev)
171{
172 int i;
173
174 for (i = 0; hatm_devs[i].name; i++)
175 if (pci_get_vendor(dev) == hatm_devs[i].vid &&
176 pci_get_device(dev) == hatm_devs[i].did) {
177 device_set_desc(dev, hatm_devs[i].name);
178 return (0);
179 }
180 return (ENXIO);
181}
182
183/*
184 * Allocate and map DMA-able memory. We support only contiguous mappings.
185 */
186static void
187dmaload_helper(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
188{
189 if (error)
190 return;
191 KASSERT(nsegs == 1, ("too many segments for DMA: %d", nsegs));
192 KASSERT(segs[0].ds_addr <= 0xffffffffUL,
193 ("phys addr too large %lx", (u_long)segs[0].ds_addr));
194
195 *(bus_addr_t *)arg = segs[0].ds_addr;
196}
197static int
198hatm_alloc_dmamem(struct hatm_softc *sc, const char *what, struct dmamem *mem)
199{
200 int error;
201
202 mem->base = NULL;
203
204 /*
205 * Alignement does not work in the bus_dmamem_alloc function below
206 * on FreeBSD. malloc seems to align objects at least to the object
207 * size so increase the size to the alignment if the size is lesser
208 * than the alignemnt.
209 * XXX on sparc64 this is (probably) not needed.
210 */
211 if (mem->size < mem->align)
212 mem->size = mem->align;
213
214 error = bus_dma_tag_create(sc->parent_tag, mem->align, 0,
215 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR,
216 NULL, NULL, mem->size, 1,
217 BUS_SPACE_MAXSIZE_32BIT, BUS_DMA_ALLOCNOW,
218 NULL, NULL, &mem->tag);
219 if (error) {
220 if_printf(&sc->ifatm.ifnet, "DMA tag create (%s)\n", what);
221 return (error);
222 }
223
224 error = bus_dmamem_alloc(mem->tag, &mem->base, 0, &mem->map);
225 if (error) {
226 if_printf(&sc->ifatm.ifnet, "DMA mem alloc (%s): %d\n",
227 what, error);
228 bus_dma_tag_destroy(mem->tag);
229 mem->base = NULL;
230 return (error);
231 }
232
233 error = bus_dmamap_load(mem->tag, mem->map, mem->base, mem->size,
234 dmaload_helper, &mem->paddr, BUS_DMA_NOWAIT);
235 if (error) {
236 if_printf(&sc->ifatm.ifnet, "DMA map load (%s): %d\n",
237 what, error);
238 bus_dmamem_free(mem->tag, mem->base, mem->map);
239 bus_dma_tag_destroy(mem->tag);
240 mem->base = NULL;
241 return (error);
242 }
243
244 DBG(sc, DMA, ("%s S/A/V/P 0x%x 0x%x %p 0x%lx", what, mem->size,
245 mem->align, mem->base, (u_long)mem->paddr));
246
247 return (0);
248}
249
250/*
251 * Destroy all the resources of an DMA-able memory region.
252 */
253static void
254hatm_destroy_dmamem(struct dmamem *mem)
255{
256 if (mem->base != NULL) {
257 bus_dmamap_unload(mem->tag, mem->map);
258 bus_dmamem_free(mem->tag, mem->base, mem->map);
259 (void)bus_dma_tag_destroy(mem->tag);
260 mem->base = NULL;
261 }
262}
263
264/*
265 * Initialize/destroy DMA maps for the large pool 0
266 */
267static void
268hatm_destroy_rmaps(struct hatm_softc *sc)
269{
270 u_int b;
271
272 DBG(sc, ATTACH, ("destroying rmaps and lbuf pointers..."));
273 if (sc->rmaps != NULL) {
274 for (b = 0; b < sc->lbufs_size; b++)
275 bus_dmamap_destroy(sc->mbuf_tag, sc->rmaps[b]);
276 free(sc->rmaps, M_DEVBUF);
277 }
278 if (sc->lbufs != NULL)
279 free(sc->lbufs, M_DEVBUF);
280}
281
282static void
283hatm_init_rmaps(struct hatm_softc *sc)
284{
285 u_int b;
286 int err;
287
288 DBG(sc, ATTACH, ("allocating rmaps and lbuf pointers..."));
289 sc->lbufs = malloc(sizeof(sc->lbufs[0]) * sc->lbufs_size,
290 M_DEVBUF, M_ZERO | M_WAITOK);
291
292 /* allocate and create the DMA maps for the large pool */
293 sc->rmaps = malloc(sizeof(sc->rmaps[0]) * sc->lbufs_size,
294 M_DEVBUF, M_WAITOK);
295 for (b = 0; b < sc->lbufs_size; b++) {
296 err = bus_dmamap_create(sc->mbuf_tag, 0, &sc->rmaps[b]);
297 if (err != 0)
298 panic("bus_dmamap_create: %d\n", err);
299 }
300}
301
302/*
303 * Initialize and destroy small mbuf page pointers and pages
304 */
305static void
306hatm_destroy_smbufs(struct hatm_softc *sc)
307{
308 u_int i, b;
309 struct mbuf_page *pg;
310
311 if (sc->mbuf_pages != NULL) {
312 for (i = 0; i < sc->mbuf_npages; i++) {
313 pg = sc->mbuf_pages[i];
314 for (b = 0; b < pg->hdr.nchunks; b++) {
315 if (MBUF_TST_BIT(pg->hdr.card, b))
316 if_printf(&sc->ifatm.ifnet,
317 "%s -- mbuf page=%u card buf %u\n",
318 __func__, i, b);
319 if (MBUF_TST_BIT(pg->hdr.used, b))
320 if_printf(&sc->ifatm.ifnet,
321 "%s -- mbuf page=%u used buf %u\n",
322 __func__, i, b);
323 }
324 bus_dmamap_unload(sc->mbuf_tag, pg->hdr.map);
325 bus_dmamap_destroy(sc->mbuf_tag, pg->hdr.map);
326 free(pg, M_DEVBUF);
327 }
328 free(sc->mbuf_pages, M_DEVBUF);
329 }
330}
331
332static void
333hatm_init_smbufs(struct hatm_softc *sc)
334{
335 sc->mbuf_pages = malloc(sizeof(sc->mbuf_pages[0]) *
336 HE_CONFIG_MAX_MBUF_PAGES, M_DEVBUF, M_WAITOK);
337 sc->mbuf_npages = 0;
338}
339
340/*
341 * Initialize/destroy TPDs. This is called from attach/detach.
342 */
343static void
344hatm_destroy_tpds(struct hatm_softc *sc)
345{
346 struct tpd *t;
347
348 if (sc->tpds.base == NULL)
349 return;
350
351 DBG(sc, ATTACH, ("releasing TPDs ..."));
352 if (sc->tpd_nfree != sc->tpd_total)
353 if_printf(&sc->ifatm.ifnet, "%u tpds still in use from %u\n",
354 sc->tpd_total - sc->tpd_nfree, sc->tpd_total);
355 while ((t = SLIST_FIRST(&sc->tpd_free)) != NULL) {
356 SLIST_REMOVE_HEAD(&sc->tpd_free, link);
357 bus_dmamap_destroy(sc->tx_tag, t->map);
358 }
359 hatm_destroy_dmamem(&sc->tpds);
360 free(sc->tpd_used, M_DEVBUF);
361 DBG(sc, ATTACH, ("... done"));
362}
363static int
364hatm_init_tpds(struct hatm_softc *sc)
365{
366 int error;
367 u_int i;
368 struct tpd *t;
369
370 DBG(sc, ATTACH, ("allocating %u TPDs and maps ...", sc->tpd_total));
371 error = hatm_alloc_dmamem(sc, "TPD memory", &sc->tpds);
372 if (error != 0) {
373 DBG(sc, ATTACH, ("... dmamem error=%d", error));
374 return (error);
375 }
376
377 /* put all the TPDs on the free list and allocate DMA maps */
378 for (i = 0; i < sc->tpd_total; i++) {
379 t = TPD_ADDR(sc, i);
380 t->no = i;
381 t->mbuf = NULL;
382 error = bus_dmamap_create(sc->tx_tag, 0, &t->map);
383 if (error != 0) {
384 DBG(sc, ATTACH, ("... dmamap error=%d", error));
385 while ((t = SLIST_FIRST(&sc->tpd_free)) != NULL) {
386 SLIST_REMOVE_HEAD(&sc->tpd_free, link);
387 bus_dmamap_destroy(sc->tx_tag, t->map);
388 }
389 hatm_destroy_dmamem(&sc->tpds);
390 return (error);
391 }
392
393 SLIST_INSERT_HEAD(&sc->tpd_free, t, link);
394 }
395
396 /* allocate and zero bitmap */
397 sc->tpd_used = malloc(sizeof(uint8_t) * (sc->tpd_total + 7) / 8,
398 M_DEVBUF, M_ZERO | M_WAITOK);
399 sc->tpd_nfree = sc->tpd_total;
400
401 DBG(sc, ATTACH, ("... done"));
402
403 return (0);
404}
405
406/*
407 * Free all the TPDs that where given to the card.
408 * An mbuf chain may be attached to a TPD - free it also and
409 * unload its associated DMA map.
410 */
411static void
412hatm_stop_tpds(struct hatm_softc *sc)
413{
414 u_int i;
415 struct tpd *t;
416
417 DBG(sc, ATTACH, ("free TPDs ..."));
418 for (i = 0; i < sc->tpd_total; i++) {
419 if (TPD_TST_USED(sc, i)) {
420 t = TPD_ADDR(sc, i);
421 if (t->mbuf) {
422 m_freem(t->mbuf);
423 t->mbuf = NULL;
424 bus_dmamap_unload(sc->tx_tag, t->map);
425 }
426 TPD_CLR_USED(sc, i);
427 SLIST_INSERT_HEAD(&sc->tpd_free, t, link);
428 sc->tpd_nfree++;
429 }
430 }
431}
432
433/*
434 * This frees ALL resources of this interface and leaves the structure
435 * in an indeterminate state. This is called just before detaching or
436 * on a failed attach. No lock should be held.
437 */
438static void
439hatm_destroy(struct hatm_softc *sc)
440{
441 u_int cid;
442
443 bus_teardown_intr(sc->dev, sc->irqres, sc->ih);
444
445 hatm_destroy_rmaps(sc);
446 hatm_destroy_smbufs(sc);
447 hatm_destroy_tpds(sc);
448
449 if (sc->vcc_zone != NULL) {
450 for (cid = 0; cid < HE_MAX_VCCS; cid++)
451 if (sc->vccs[cid] != NULL)
452 uma_zfree(sc->vcc_zone, sc->vccs[cid]);
453 uma_zdestroy(sc->vcc_zone);
454 }
455
456 /*
457 * Release all memory allocated to the various queues and
458 * Status pages. These have there own flag which shows whether
459 * they are really allocated.
460 */
461 hatm_destroy_dmamem(&sc->irq_0.mem);
462 hatm_destroy_dmamem(&sc->rbp_s0.mem);
463 hatm_destroy_dmamem(&sc->rbp_l0.mem);
464 hatm_destroy_dmamem(&sc->rbp_s1.mem);
465 hatm_destroy_dmamem(&sc->rbrq_0.mem);
466 hatm_destroy_dmamem(&sc->rbrq_1.mem);
467 hatm_destroy_dmamem(&sc->tbrq.mem);
468 hatm_destroy_dmamem(&sc->tpdrq.mem);
469 hatm_destroy_dmamem(&sc->hsp_mem);
470
471 if (sc->irqres != NULL)
472 bus_release_resource(sc->dev, SYS_RES_IRQ,
473 sc->irqid, sc->irqres);
474
475 if (sc->tx_tag != NULL)
476 if (bus_dma_tag_destroy(sc->tx_tag))
477 if_printf(&sc->ifatm.ifnet, "mbuf DMA tag busy\n");
478
479 if (sc->mbuf_tag != NULL)
480 if (bus_dma_tag_destroy(sc->mbuf_tag))
481 if_printf(&sc->ifatm.ifnet, "mbuf DMA tag busy\n");
482
483 if (sc->parent_tag != NULL)
484 if (bus_dma_tag_destroy(sc->parent_tag))
485 if_printf(&sc->ifatm.ifnet, "parent DMA tag busy\n");
486
487 if (sc->memres != NULL)
488 bus_release_resource(sc->dev, SYS_RES_MEMORY,
489 sc->memid, sc->memres);
490
491 sysctl_ctx_free(&sc->sysctl_ctx);
492
493 cv_destroy(&sc->cv_rcclose);
494 cv_destroy(&sc->vcc_cv);
495 mtx_destroy(&sc->mbuf0_mtx);
496 mtx_destroy(&sc->mbuf1_mtx);
497 mtx_destroy(&sc->mtx);
498}
499
500/*
501 * 4.4 Card reset
502 */
503static int
504hatm_reset(struct hatm_softc *sc)
505{
506 u_int v, count;
507
508 WRITE4(sc, HE_REGO_RESET_CNTL, 0x00);
509 BARRIER_W(sc);
510 WRITE4(sc, HE_REGO_RESET_CNTL, 0xff);
511 BARRIER_RW(sc);
512 count = 0;
513 while (((v = READ4(sc, HE_REGO_RESET_CNTL)) & HE_REGM_RESET_STATE) == 0) {
514 BARRIER_R(sc);
515 if (++count == 100) {
516 if_printf(&sc->ifatm.ifnet, "reset failed\n");
517 return (ENXIO);
518 }
519 DELAY(1000);
520 }
521 return (0);
522}
523
524/*
525 * 4.5 Set Bus Width
526 */
527static void
528hatm_init_bus_width(struct hatm_softc *sc)
529{
530 uint32_t v, v1;
531
532 v = READ4(sc, HE_REGO_HOST_CNTL);
533 BARRIER_R(sc);
534 if (v & HE_REGM_HOST_BUS64) {
535 sc->pci64 = 1;
536 v1 = pci_read_config(sc->dev, HE_PCIR_GEN_CNTL_0, 4);
537 v1 |= HE_PCIM_CTL0_64BIT;
538 pci_write_config(sc->dev, HE_PCIR_GEN_CNTL_0, v1, 4);
539
540 v |= HE_REGM_HOST_DESC_RD64
541 | HE_REGM_HOST_DATA_RD64
542 | HE_REGM_HOST_DATA_WR64;
543 WRITE4(sc, HE_REGO_HOST_CNTL, v);
544 BARRIER_W(sc);
545 } else {
546 sc->pci64 = 0;
547 v = pci_read_config(sc->dev, HE_PCIR_GEN_CNTL_0, 4);
548 v &= ~HE_PCIM_CTL0_64BIT;
549 pci_write_config(sc->dev, HE_PCIR_GEN_CNTL_0, v, 4);
550 }
551}
552
553/*
554 * 4.6 Set Host Endianess
555 */
556static void
557hatm_init_endianess(struct hatm_softc *sc)
558{
559 uint32_t v;
560
561 v = READ4(sc, HE_REGO_LB_SWAP);
562 BARRIER_R(sc);
563#if BYTE_ORDER == BIG_ENDIAN
564 v |= HE_REGM_LBSWAP_INTR_SWAP |
565 HE_REGM_LBSWAP_DESC_WR_SWAP |
566 HE_REGM_LBSWAP_BIG_ENDIAN;
567 v &= ~(HE_REGM_LBSWAP_DATA_WR_SWAP |
568 HE_REGM_LBSWAP_DESC_RD_SWAP |
569 HE_REGM_LBSWAP_DATA_RD_SWAP);
570#else
571 v &= ~(HE_REGM_LBSWAP_DATA_WR_SWAP |
572 HE_REGM_LBSWAP_DESC_RD_SWAP |
573 HE_REGM_LBSWAP_DATA_RD_SWAP |
574 HE_REGM_LBSWAP_INTR_SWAP |
575 HE_REGM_LBSWAP_DESC_WR_SWAP |
576 HE_REGM_LBSWAP_BIG_ENDIAN);
577#endif
578
579 if (sc->he622)
580 v |= HE_REGM_LBSWAP_XFER_SIZE;
581
582 WRITE4(sc, HE_REGO_LB_SWAP, v);
583 BARRIER_W(sc);
584}
585
586/*
587 * 4.7 Read EEPROM
588 */
589static uint8_t
590hatm_read_prom_byte(struct hatm_softc *sc, u_int addr)
591{
592 uint32_t val, tmp_read, byte_read;
593 u_int i, j;
594 int n;
595
596 val = READ4(sc, HE_REGO_HOST_CNTL);
597 val &= HE_REGM_HOST_PROM_BITS;
598 BARRIER_R(sc);
599
600 val |= HE_REGM_HOST_PROM_WREN;
601 WRITE4(sc, HE_REGO_HOST_CNTL, val);
602 BARRIER_W(sc);
603
604 /* send READ */
605 for (i = 0; i < sizeof(readtab) / sizeof(readtab[0]); i++) {
606 WRITE4(sc, HE_REGO_HOST_CNTL, val | readtab[i]);
607 BARRIER_W(sc);
608 DELAY(EEPROM_DELAY);
609 }
610
611 /* send ADDRESS */
612 for (n = 7, j = 0; n >= 0; n--) {
613 WRITE4(sc, HE_REGO_HOST_CNTL, val | clocktab[j++] |
614 (((addr >> n) & 1 ) << HE_REGS_HOST_PROM_DATA_IN));
615 BARRIER_W(sc);
616 DELAY(EEPROM_DELAY);
617 WRITE4(sc, HE_REGO_HOST_CNTL, val | clocktab[j++] |
618 (((addr >> n) & 1 ) << HE_REGS_HOST_PROM_DATA_IN));
619 BARRIER_W(sc);
620 DELAY(EEPROM_DELAY);
621 }
622
623 val &= ~HE_REGM_HOST_PROM_WREN;
624 WRITE4(sc, HE_REGO_HOST_CNTL, val);
625 BARRIER_W(sc);
626
627 /* read DATA */
628 byte_read = 0;
629 for (n = 7, j = 0; n >= 0; n--) {
630 WRITE4(sc, HE_REGO_HOST_CNTL, val | clocktab[j++]);
631 BARRIER_W(sc);
632 DELAY(EEPROM_DELAY);
633 tmp_read = READ4(sc, HE_REGO_HOST_CNTL);
634 byte_read |= (uint8_t)(((tmp_read & HE_REGM_HOST_PROM_DATA_OUT)
635 >> HE_REGS_HOST_PROM_DATA_OUT) << n);
636 WRITE4(sc, HE_REGO_HOST_CNTL, val | clocktab[j++]);
637 BARRIER_W(sc);
638 DELAY(EEPROM_DELAY);
639 }
640 WRITE4(sc, HE_REGO_HOST_CNTL, val | clocktab[j++]);
641 BARRIER_W(sc);
642 DELAY(EEPROM_DELAY);
643
644 return (byte_read);
645}
646
647static void
648hatm_init_read_eeprom(struct hatm_softc *sc)
649{
650 u_int n, count;
651 u_char byte;
652 uint32_t v;
653
654 for (n = count = 0; count < HE_EEPROM_PROD_ID_LEN; count++) {
655 byte = hatm_read_prom_byte(sc, HE_EEPROM_PROD_ID + count);
656 if (n > 0 || byte != ' ')
657 sc->prod_id[n++] = byte;
658 }
659 while (n > 0 && sc->prod_id[n-1] == ' ')
660 n--;
661 sc->prod_id[n] = '\0';
662
663 for (n = count = 0; count < HE_EEPROM_REV_LEN; count++) {
664 byte = hatm_read_prom_byte(sc, HE_EEPROM_REV + count);
665 if (n > 0 || byte != ' ')
666 sc->rev[n++] = byte;
667 }
668 while (n > 0 && sc->rev[n-1] == ' ')
669 n--;
670 sc->rev[n] = '\0';
671 sc->ifatm.mib.hw_version = sc->rev[0];
672
673 sc->ifatm.mib.serial = hatm_read_prom_byte(sc, HE_EEPROM_M_SN + 0) << 0;
674 sc->ifatm.mib.serial |= hatm_read_prom_byte(sc, HE_EEPROM_M_SN + 1) << 8;
675 sc->ifatm.mib.serial |= hatm_read_prom_byte(sc, HE_EEPROM_M_SN + 2) << 16;
676 sc->ifatm.mib.serial |= hatm_read_prom_byte(sc, HE_EEPROM_M_SN + 3) << 24;
677
678 v = hatm_read_prom_byte(sc, HE_EEPROM_MEDIA + 0) << 0;
679 v |= hatm_read_prom_byte(sc, HE_EEPROM_MEDIA + 1) << 8;
680 v |= hatm_read_prom_byte(sc, HE_EEPROM_MEDIA + 2) << 16;
681 v |= hatm_read_prom_byte(sc, HE_EEPROM_MEDIA + 3) << 24;
682
683 switch (v) {
684 case HE_MEDIA_UTP155:
685 sc->ifatm.mib.media = IFM_ATM_UTP_155;
686 sc->ifatm.mib.pcr = ATM_RATE_155M;
687 break;
688
689 case HE_MEDIA_MMF155:
690 sc->ifatm.mib.media = IFM_ATM_MM_155;
691 sc->ifatm.mib.pcr = ATM_RATE_155M;
692 break;
693
694 case HE_MEDIA_MMF622:
695 sc->ifatm.mib.media = IFM_ATM_MM_622;
696 sc->ifatm.mib.device = ATM_DEVICE_HE622;
697 sc->ifatm.mib.pcr = ATM_RATE_622M;
698 sc->he622 = 1;
699 break;
700
701 case HE_MEDIA_SMF155:
702 sc->ifatm.mib.media = IFM_ATM_SM_155;
703 sc->ifatm.mib.pcr = ATM_RATE_155M;
704 break;
705
706 case HE_MEDIA_SMF622:
707 sc->ifatm.mib.media = IFM_ATM_SM_622;
708 sc->ifatm.mib.device = ATM_DEVICE_HE622;
709 sc->ifatm.mib.pcr = ATM_RATE_622M;
710 sc->he622 = 1;
711 break;
712 }
713
714 sc->ifatm.mib.esi[0] = hatm_read_prom_byte(sc, HE_EEPROM_MAC + 0);
715 sc->ifatm.mib.esi[1] = hatm_read_prom_byte(sc, HE_EEPROM_MAC + 1);
716 sc->ifatm.mib.esi[2] = hatm_read_prom_byte(sc, HE_EEPROM_MAC + 2);
717 sc->ifatm.mib.esi[3] = hatm_read_prom_byte(sc, HE_EEPROM_MAC + 3);
718 sc->ifatm.mib.esi[4] = hatm_read_prom_byte(sc, HE_EEPROM_MAC + 4);
719 sc->ifatm.mib.esi[5] = hatm_read_prom_byte(sc, HE_EEPROM_MAC + 5);
720}
721
722/*
723 * Clear unused interrupt queue
724 */
725static void
726hatm_clear_irq(struct hatm_softc *sc, u_int group)
727{
728 WRITE4(sc, HE_REGO_IRQ_BASE(group), 0);
729 WRITE4(sc, HE_REGO_IRQ_HEAD(group), 0);
730 WRITE4(sc, HE_REGO_IRQ_CNTL(group), 0);
731 WRITE4(sc, HE_REGO_IRQ_DATA(group), 0);
732}
733
734/*
735 * 4.10 Initialize interrupt queues
736 */
737static void
738hatm_init_irq(struct hatm_softc *sc, struct heirq *q, u_int group)
739{
740 u_int i;
741
742 if (q->size == 0) {
743 hatm_clear_irq(sc, group);
744 return;
745 }
746
747 q->group = group;
748 q->sc = sc;
749 q->irq = q->mem.base;
750 q->head = 0;
751 q->tailp = q->irq + (q->size - 1);
752 *q->tailp = 0;
753
754 for (i = 0; i < q->size; i++)
755 q->irq[i] = HE_REGM_ITYPE_INVALID;
756
757 WRITE4(sc, HE_REGO_IRQ_BASE(group), q->mem.paddr);
758 WRITE4(sc, HE_REGO_IRQ_HEAD(group),
759 ((q->size - 1) << HE_REGS_IRQ_HEAD_SIZE) |
760 (q->thresh << HE_REGS_IRQ_HEAD_THRESH));
761 WRITE4(sc, HE_REGO_IRQ_CNTL(group), q->line);
762 WRITE4(sc, HE_REGO_IRQ_DATA(group), 0);
763}
764
765/*
766 * 5.1.3 Initialize connection memory
767 */
768static void
769hatm_init_cm(struct hatm_softc *sc)
770{
771 u_int rsra, mlbm, rabr, numbuffs;
772 u_int tsra, tabr, mtpd;
773 u_int n;
774
775 for (n = 0; n < HE_CONFIG_TXMEM; n++)
776 WRITE_TCM4(sc, n, 0);
777 for (n = 0; n < HE_CONFIG_RXMEM; n++)
778 WRITE_RCM4(sc, n, 0);
779
780 numbuffs = sc->r0_numbuffs + sc->r1_numbuffs + sc->tx_numbuffs;
781
782 rsra = 0;
783 mlbm = ((rsra + sc->ifatm.mib.max_vccs * 8) + 0x7ff) & ~0x7ff;
784 rabr = ((mlbm + numbuffs * 2) + 0x7ff) & ~0x7ff;
785 sc->rsrb = ((rabr + 2048) + (2 * sc->ifatm.mib.max_vccs - 1)) &
786 ~(2 * sc->ifatm.mib.max_vccs - 1);
787
788 tsra = 0;
789 sc->tsrb = tsra + sc->ifatm.mib.max_vccs * 8;
790 sc->tsrc = sc->tsrb + sc->ifatm.mib.max_vccs * 4;
791 sc->tsrd = sc->tsrc + sc->ifatm.mib.max_vccs * 2;
792 tabr = sc->tsrd + sc->ifatm.mib.max_vccs * 1;
793 mtpd = ((tabr + 1024) + (16 * sc->ifatm.mib.max_vccs - 1)) &
794 ~(16 * sc->ifatm.mib.max_vccs - 1);
795
796 DBG(sc, ATTACH, ("rsra=%x mlbm=%x rabr=%x rsrb=%x",
797 rsra, mlbm, rabr, sc->rsrb));
798 DBG(sc, ATTACH, ("tsra=%x tsrb=%x tsrc=%x tsrd=%x tabr=%x mtpd=%x",
799 tsra, sc->tsrb, sc->tsrc, sc->tsrd, tabr, mtpd));
800
801 WRITE4(sc, HE_REGO_TSRB_BA, sc->tsrb);
802 WRITE4(sc, HE_REGO_TSRC_BA, sc->tsrc);
803 WRITE4(sc, HE_REGO_TSRD_BA, sc->tsrd);
804 WRITE4(sc, HE_REGO_TMABR_BA, tabr);
805 WRITE4(sc, HE_REGO_TPD_BA, mtpd);
806
807 WRITE4(sc, HE_REGO_RCMRSRB_BA, sc->rsrb);
808 WRITE4(sc, HE_REGO_RCMLBM_BA, mlbm);
809 WRITE4(sc, HE_REGO_RCMABR_BA, rabr);
810
811 BARRIER_W(sc);
812}
813
814/*
815 * 5.1.4 Initialize Local buffer Pools
816 */
817static void
818hatm_init_rx_buffer_pool(struct hatm_softc *sc,
819 u_int num, /* bank */
820 u_int start, /* start row */
821 u_int numbuffs /* number of entries */
822)
823{
824 u_int row_size; /* bytes per row */
825 uint32_t row_addr; /* start address of this row */
826 u_int lbuf_size; /* bytes per lbuf */
827 u_int lbufs_per_row; /* number of lbufs per memory row */
828 uint32_t lbufd_index; /* index of lbuf descriptor */
829 uint32_t lbufd_addr; /* address of lbuf descriptor */
830 u_int lbuf_row_cnt; /* current lbuf in current row */
831 uint32_t lbuf_addr; /* address of current buffer */
832 u_int i;
833
834 row_size = sc->bytes_per_row;;
835 row_addr = start * row_size;
836 lbuf_size = sc->cells_per_lbuf * 48;
837 lbufs_per_row = sc->cells_per_row / sc->cells_per_lbuf;
838
839 /* descriptor index */
840 lbufd_index = num;
841
842 /* 2 words per entry */
843 lbufd_addr = READ4(sc, HE_REGO_RCMLBM_BA) + lbufd_index * 2;
844
845 /* write head of queue */
846 WRITE4(sc, HE_REGO_RLBF_H(num), lbufd_index);
847
848 lbuf_row_cnt = 0;
849 for (i = 0; i < numbuffs; i++) {
850 lbuf_addr = (row_addr + lbuf_row_cnt * lbuf_size) / 32;
851
852 WRITE_RCM4(sc, lbufd_addr, lbuf_addr);
853
854 lbufd_index += 2;
855 WRITE_RCM4(sc, lbufd_addr + 1, lbufd_index);
856
857 if (++lbuf_row_cnt == lbufs_per_row) {
858 lbuf_row_cnt = 0;
859 row_addr += row_size;
860 }
861
862 lbufd_addr += 2 * 2;
863 }
864
865 WRITE4(sc, HE_REGO_RLBF_T(num), lbufd_index - 2);
866 WRITE4(sc, HE_REGO_RLBF_C(num), numbuffs);
867
868 BARRIER_W(sc);
869}
870
871static void
872hatm_init_tx_buffer_pool(struct hatm_softc *sc,
873 u_int start, /* start row */
874 u_int numbuffs /* number of entries */
875)
876{
877 u_int row_size; /* bytes per row */
878 uint32_t row_addr; /* start address of this row */
879 u_int lbuf_size; /* bytes per lbuf */
880 u_int lbufs_per_row; /* number of lbufs per memory row */
881 uint32_t lbufd_index; /* index of lbuf descriptor */
882 uint32_t lbufd_addr; /* address of lbuf descriptor */
883 u_int lbuf_row_cnt; /* current lbuf in current row */
884 uint32_t lbuf_addr; /* address of current buffer */
885 u_int i;
886
887 row_size = sc->bytes_per_row;;
888 row_addr = start * row_size;
889 lbuf_size = sc->cells_per_lbuf * 48;
890 lbufs_per_row = sc->cells_per_row / sc->cells_per_lbuf;
891
892 /* descriptor index */
893 lbufd_index = sc->r0_numbuffs + sc->r1_numbuffs;
894
895 /* 2 words per entry */
896 lbufd_addr = READ4(sc, HE_REGO_RCMLBM_BA) + lbufd_index * 2;
897
898 /* write head of queue */
899 WRITE4(sc, HE_REGO_TLBF_H, lbufd_index);
900
901 lbuf_row_cnt = 0;
902 for (i = 0; i < numbuffs; i++) {
903 lbuf_addr = (row_addr + lbuf_row_cnt * lbuf_size) / 32;
904
905 WRITE_RCM4(sc, lbufd_addr, lbuf_addr);
906 lbufd_index++;
907 WRITE_RCM4(sc, lbufd_addr + 1, lbufd_index);
908
909 if (++lbuf_row_cnt == lbufs_per_row) {
910 lbuf_row_cnt = 0;
911 row_addr += row_size;
912 }
913
914 lbufd_addr += 2;
915 }
916
917 WRITE4(sc, HE_REGO_TLBF_T, lbufd_index - 1);
918 BARRIER_W(sc);
919}
920
921/*
922 * 5.1.5 Initialize Intermediate Receive Queues
923 */
924static void
925hatm_init_imed_queues(struct hatm_softc *sc)
926{
927 u_int n;
928
929 if (sc->he622) {
930 for (n = 0; n < 8; n++) {
931 WRITE4(sc, HE_REGO_INMQ_S(n), 0x10*n+0x000f);
932 WRITE4(sc, HE_REGO_INMQ_L(n), 0x10*n+0x200f);
933 }
934 } else {
935 for (n = 0; n < 8; n++) {
936 WRITE4(sc, HE_REGO_INMQ_S(n), n);
937 WRITE4(sc, HE_REGO_INMQ_L(n), n+0x8);
938 }
939 }
940}
941
942/*
943 * 5.1.7 Init CS block
944 */
945static void
946hatm_init_cs_block(struct hatm_softc *sc)
947{
948 u_int n, i;
949 u_int clkfreg, cellrate, decr, tmp;
950 static const uint32_t erthr[2][5][3] = HE_REGT_CS_ERTHR;
951 static const uint32_t erctl[2][3] = HE_REGT_CS_ERCTL;
952 static const uint32_t erstat[2][2] = HE_REGT_CS_ERSTAT;
953 static const uint32_t rtfwr[2] = HE_REGT_CS_RTFWR;
954 static const uint32_t rtatr[2] = HE_REGT_CS_RTATR;
955 static const uint32_t bwalloc[2][6] = HE_REGT_CS_BWALLOC;
956 static const uint32_t orcf[2][2] = HE_REGT_CS_ORCF;
957
958 /* Clear Rate Controller Start Times and Occupied Flags */
959 for (n = 0; n < 32; n++)
960 WRITE_MBOX4(sc, HE_REGO_CS_STTIM(n), 0);
961
962 clkfreg = sc->he622 ? HE_622_CLOCK : HE_155_CLOCK;
963 cellrate = sc->he622 ? ATM_RATE_622M : ATM_RATE_155M;
964 decr = cellrate / 32;
965
966 for (n = 0; n < 16; n++) {
967 tmp = clkfreg / cellrate;
968 WRITE_MBOX4(sc, HE_REGO_CS_TGRLD(n), tmp - 1);
969 cellrate -= decr;
970 }
971
972 i = (sc->cells_per_lbuf == 2) ? 0
973 :(sc->cells_per_lbuf == 4) ? 1
974 : 2;
975
976 /* table 5.2 */
977 WRITE_MBOX4(sc, HE_REGO_CS_ERTHR0, erthr[sc->he622][0][i]);
978 WRITE_MBOX4(sc, HE_REGO_CS_ERTHR1, erthr[sc->he622][1][i]);
979 WRITE_MBOX4(sc, HE_REGO_CS_ERTHR2, erthr[sc->he622][2][i]);
980 WRITE_MBOX4(sc, HE_REGO_CS_ERTHR3, erthr[sc->he622][3][i]);
981 WRITE_MBOX4(sc, HE_REGO_CS_ERTHR4, erthr[sc->he622][4][i]);
982
983 WRITE_MBOX4(sc, HE_REGO_CS_ERCTL0, erctl[sc->he622][0]);
984 WRITE_MBOX4(sc, HE_REGO_CS_ERCTL1, erctl[sc->he622][1]);
985 WRITE_MBOX4(sc, HE_REGO_CS_ERCTL2, erctl[sc->he622][2]);
986
987 WRITE_MBOX4(sc, HE_REGO_CS_ERSTAT0, erstat[sc->he622][0]);
988 WRITE_MBOX4(sc, HE_REGO_CS_ERSTAT1, erstat[sc->he622][1]);
989
990 WRITE_MBOX4(sc, HE_REGO_CS_RTFWR, rtfwr[sc->he622]);
991 WRITE_MBOX4(sc, HE_REGO_CS_RTATR, rtatr[sc->he622]);
992
993 WRITE_MBOX4(sc, HE_REGO_CS_TFBSET, bwalloc[sc->he622][0]);
994 WRITE_MBOX4(sc, HE_REGO_CS_WCRMAX, bwalloc[sc->he622][1]);
995 WRITE_MBOX4(sc, HE_REGO_CS_WCRMIN, bwalloc[sc->he622][2]);
996 WRITE_MBOX4(sc, HE_REGO_CS_WCRINC, bwalloc[sc->he622][3]);
997 WRITE_MBOX4(sc, HE_REGO_CS_WCRDEC, bwalloc[sc->he622][4]);
998 WRITE_MBOX4(sc, HE_REGO_CS_WCRCEIL, bwalloc[sc->he622][5]);
999
1000 WRITE_MBOX4(sc, HE_REGO_CS_OTPPER, orcf[sc->he622][0]);
1001 WRITE_MBOX4(sc, HE_REGO_CS_OTWPER, orcf[sc->he622][1]);
1002
1003 WRITE_MBOX4(sc, HE_REGO_CS_OTTLIM, 8);
1004
1005 for (n = 0; n < 8; n++)
1006 WRITE_MBOX4(sc, HE_REGO_CS_HGRRT(n), 0);
1007}
1008
1009/*
1010 * 5.1.8 CS Block Connection Memory Initialisation
1011 */
1012static void
1013hatm_init_cs_block_cm(struct hatm_softc *sc)
1014{
1015 u_int n, i;
1016 u_int expt, mant, etrm, wcr, ttnrm, tnrm;
1017 uint32_t rate;
1018 uint32_t clkfreq, cellrate, decr;
1019 uint32_t *rg, rtg, val = 0;
1020 uint64_t drate;
1021 u_int buf, buf_limit;
1022 uint32_t base = READ4(sc, HE_REGO_RCMABR_BA);
1023
1024 for (n = 0; n < HE_REGL_CM_GQTBL; n++)
1025 WRITE_RCM4(sc, base + HE_REGO_CM_GQTBL + n, 0);
1026 for (n = 0; n < HE_REGL_CM_RGTBL; n++)
1027 WRITE_RCM4(sc, base + HE_REGO_CM_RGTBL + n, 0);
1028
1029 tnrm = 0;
1030 for (n = 0; n < HE_REGL_CM_TNRMTBL * 4; n++) {
1031 expt = (n >> 5) & 0x1f;
1032 mant = ((n & 0x18) << 4) | 0x7f;
1033 wcr = (1 << expt) * (mant + 512) / 512;
1034 etrm = n & 0x7;
1035 ttnrm = wcr / 10 / (1 << etrm);
1036 if (ttnrm > 255)
1037 ttnrm = 255;
1038 else if(ttnrm < 2)
1039 ttnrm = 2;
1040 tnrm = (tnrm << 8) | (ttnrm & 0xff);
1041 if (n % 4 == 0)
1042 WRITE_RCM4(sc, base + HE_REGO_CM_TNRMTBL + (n/4), tnrm);
1043 }
1044
1045 clkfreq = sc->he622 ? HE_622_CLOCK : HE_155_CLOCK;
1046 buf_limit = 4;
1047
1048 cellrate = sc->he622 ? ATM_RATE_622M : ATM_RATE_155M;
1049 decr = cellrate / 32;
1050
1051 /* compute GRID top row in 1000 * cps */
1052 for (n = 0; n < 16; n++) {
1053 u_int interval = clkfreq / cellrate;
1054 sc->rate_grid[0][n] = (u_int64_t)clkfreq * 1000 / interval;
1055 cellrate -= decr;
1056 }
1057
1058 /* compute the other rows according to 2.4 */
1059 for (i = 1; i < 16; i++)
1060 for (n = 0; n < 16; n++)
1061 sc->rate_grid[i][n] = sc->rate_grid[i-1][n] /
1062 ((i < 14) ? 2 : 4);
1063
1064 /* first entry is line rate */
1065 n = hatm_cps2atmf(sc->he622 ? ATM_RATE_622M : ATM_RATE_155M);
1066 expt = (n >> 9) & 0x1f;
1067 mant = n & 0x1f0;
1068 sc->rate_grid[0][0] = (u_int64_t)(1<<expt) * 1000 * (mant+512) / 512;
1069
1070 /* now build the conversion table - each 32 bit word contains
1071 * two entries - this gives a total of 0x400 16 bit entries.
1072 * This table maps the truncated ATMF rate version into a grid index */
1073 cellrate = sc->he622 ? ATM_RATE_622M : ATM_RATE_155M;
1074 rg = &sc->rate_grid[15][15];
1075
1076 for (rate = 0; rate < 2 * HE_REGL_CM_RTGTBL; rate++) {
1077 /* unpack the ATMF rate */
1078 expt = rate >> 5;
1079 mant = (rate & 0x1f) << 4;
1080
1081 /* get the cell rate - minimum is 10 per second */
1082 drate = (uint64_t)(1 << expt) * 1000 * (mant + 512) / 512;
1083 if (drate < 10 * 1000)
1084 drate = 10 * 1000;
1085
1086 /* now look up the grid index */
1087 while (drate >= *rg && rg-- > &sc->rate_grid[0][0])
1088 ;
1089 rg++;
1090 rtg = rg - &sc->rate_grid[0][0];
1091
1092 /* now compute the buffer limit */
1093 buf = drate * sc->tx_numbuffs / (cellrate * 2) / 1000;
1094 if (buf == 0)
1095 buf = 1;
1096 else if (buf > buf_limit)
1097 buf = buf_limit;
1098
1099 /* make value */
1100 val = (val << 16) | (rtg << 8) | buf;
1101
1102 /* write */
1103 if (rate % 2 == 1)
1104 WRITE_RCM4(sc, base + HE_REGO_CM_RTGTBL + rate/2, val);
1105 }
1106}
1107
1108/*
1109 * Clear an unused receive group buffer pool
1110 */
1111static void
1112hatm_clear_rpool(struct hatm_softc *sc, u_int group, u_int large)
1113{
1114 WRITE4(sc, HE_REGO_RBP_S(large, group), 0);
1115 WRITE4(sc, HE_REGO_RBP_T(large, group), 0);
1116 WRITE4(sc, HE_REGO_RBP_QI(large, group), 1);
1117 WRITE4(sc, HE_REGO_RBP_BL(large, group), 0);
1118}
1119
1120/*
1121 * Initialize a receive group buffer pool
1122 */
1123static void
1124hatm_init_rpool(struct hatm_softc *sc, struct herbp *q, u_int group,
1125 u_int large)
1126{
1127 if (q->size == 0) {
1128 hatm_clear_rpool(sc, group, large);
1129 return;
1130 }
1131
1132 bzero(q->mem.base, q->mem.size);
1133 q->rbp = q->mem.base;
1134 q->head = q->tail = 0;
1135
1136 DBG(sc, ATTACH, ("RBP%u%c=0x%lx", group, "SL"[large],
1137 (u_long)q->mem.paddr));
1138
1139 WRITE4(sc, HE_REGO_RBP_S(large, group), q->mem.paddr);
1140 WRITE4(sc, HE_REGO_RBP_T(large, group), 0);
1141 WRITE4(sc, HE_REGO_RBP_QI(large, group),
1142 ((q->size - 1) << HE_REGS_RBP_SIZE) |
1143 HE_REGM_RBP_INTR_ENB |
1144 (q->thresh << HE_REGS_RBP_THRESH));
1145 WRITE4(sc, HE_REGO_RBP_BL(large, group), (q->bsize >> 2) & ~1);
1146}
1147
1148/*
1149 * Clear an unused receive buffer return queue
1150 */
1151static void
1152hatm_clear_rbrq(struct hatm_softc *sc, u_int group)
1153{
1154 WRITE4(sc, HE_REGO_RBRQ_ST(group), 0);
1155 WRITE4(sc, HE_REGO_RBRQ_H(group), 0);
1156 WRITE4(sc, HE_REGO_RBRQ_Q(group), (1 << HE_REGS_RBRQ_THRESH));
1157 WRITE4(sc, HE_REGO_RBRQ_I(group), 0);
1158}
1159
1160/*
1161 * Initialize receive buffer return queue
1162 */
1163static void
1164hatm_init_rbrq(struct hatm_softc *sc, struct herbrq *rq, u_int group)
1165{
1166 if (rq->size == 0) {
1167 hatm_clear_rbrq(sc, group);
1168 return;
1169 }
1170
1171 rq->rbrq = rq->mem.base;
1172 rq->head = 0;
1173
1174 DBG(sc, ATTACH, ("RBRQ%u=0x%lx", group, (u_long)rq->mem.paddr));
1175
1176 WRITE4(sc, HE_REGO_RBRQ_ST(group), rq->mem.paddr);
1177 WRITE4(sc, HE_REGO_RBRQ_H(group), 0);
1178 WRITE4(sc, HE_REGO_RBRQ_Q(group),
1179 (rq->thresh << HE_REGS_RBRQ_THRESH) |
1180 ((rq->size - 1) << HE_REGS_RBRQ_SIZE));
1181 WRITE4(sc, HE_REGO_RBRQ_I(group),
1182 (rq->tout << HE_REGS_RBRQ_TIME) |
1183 (rq->pcnt << HE_REGS_RBRQ_COUNT));
1184}
1185
1186/*
1187 * Clear an unused transmit buffer return queue N
1188 */
1189static void
1190hatm_clear_tbrq(struct hatm_softc *sc, u_int group)
1191{
1192 WRITE4(sc, HE_REGO_TBRQ_B_T(group), 0);
1193 WRITE4(sc, HE_REGO_TBRQ_H(group), 0);
1194 WRITE4(sc, HE_REGO_TBRQ_S(group), 0);
1195 WRITE4(sc, HE_REGO_TBRQ_THRESH(group), 1);
1196}
1197
1198/*
1199 * Initialize transmit buffer return queue N
1200 */
1201static void
1202hatm_init_tbrq(struct hatm_softc *sc, struct hetbrq *tq, u_int group)
1203{
1204 if (tq->size == 0) {
1205 hatm_clear_tbrq(sc, group);
1206 return;
1207 }
1208
1209 tq->tbrq = tq->mem.base;
1210 tq->head = 0;
1211
1212 DBG(sc, ATTACH, ("TBRQ%u=0x%lx", group, (u_long)tq->mem.paddr));
1213
1214 WRITE4(sc, HE_REGO_TBRQ_B_T(group), tq->mem.paddr);
1215 WRITE4(sc, HE_REGO_TBRQ_H(group), 0);
1216 WRITE4(sc, HE_REGO_TBRQ_S(group), tq->size - 1);
1217 WRITE4(sc, HE_REGO_TBRQ_THRESH(group), tq->thresh);
1218}
1219
1220/*
1221 * Initialize TPDRQ
1222 */
1223static void
1224hatm_init_tpdrq(struct hatm_softc *sc)
1225{
1226 struct hetpdrq *tq;
1227
1228 tq = &sc->tpdrq;
1229 tq->tpdrq = tq->mem.base;
1230 tq->tail = tq->head = 0;
1231
1232 DBG(sc, ATTACH, ("TPDRQ=0x%lx", (u_long)tq->mem.paddr));
1233
1234 WRITE4(sc, HE_REGO_TPDRQ_H, tq->mem.paddr);
1235 WRITE4(sc, HE_REGO_TPDRQ_T, 0);
1236 WRITE4(sc, HE_REGO_TPDRQ_S, tq->size - 1);
1237}
1238
1239/*
1240 * Function can be called by the infrastructure to start the card.
1241 */
1242static void
1243hatm_init(void *p)
1244{
1245 struct hatm_softc *sc = p;
1246
1247 mtx_lock(&sc->mtx);
1248 hatm_stop(sc);
1249 hatm_initialize(sc);
1250 mtx_unlock(&sc->mtx);
1251}
1252
1253enum {
1254 CTL_ISTATS,
1255};
1256
1257/*
1258 * Sysctl handler
1259 */
1260static int
1261hatm_sysctl(SYSCTL_HANDLER_ARGS)
1262{
1263 struct hatm_softc *sc = arg1;
1264 uint32_t *ret;
1265 int error;
1266 size_t len;
1267
1268 switch (arg2) {
1269
1270 case CTL_ISTATS:
1271 len = sizeof(sc->istats);
1272 break;
1273
1274 default:
1275 panic("bad control code");
1276 }
1277
1278 ret = malloc(len, M_TEMP, M_WAITOK);
1279 mtx_lock(&sc->mtx);
1280
1281 switch (arg2) {
1282
1283 case CTL_ISTATS:
1284 sc->istats.mcc += READ4(sc, HE_REGO_MCC);
1285 sc->istats.oec += READ4(sc, HE_REGO_OEC);
1286 sc->istats.dcc += READ4(sc, HE_REGO_DCC);
1287 sc->istats.cec += READ4(sc, HE_REGO_CEC);
1288 bcopy(&sc->istats, ret, sizeof(sc->istats));
1289 break;
1290 }
1291 mtx_unlock(&sc->mtx);
1292
1293 error = SYSCTL_OUT(req, ret, len);
1294 free(ret, M_TEMP);
1295
1296 return (error);
1297}
1298
1299static int
1300kenv_getuint(struct hatm_softc *sc, const char *var,
1301 u_int *ptr, u_int def, int rw)
1302{
1303 char full[IFNAMSIZ + 3 + 20];
1304 char *val, *end;
1305 u_int u;
1306
1307 *ptr = def;
1308
1309 if (SYSCTL_ADD_UINT(&sc->sysctl_ctx, SYSCTL_CHILDREN(sc->sysctl_tree),
1310 OID_AUTO, var, rw ? CTLFLAG_RW : CTLFLAG_RD, ptr, 0, "") == NULL)
1311 return (ENOMEM);
1312
1313 snprintf(full, sizeof(full), "hw.%s.%s",
1314 device_get_nameunit(sc->dev), var);
1315
1316 if ((val = getenv(full)) == NULL)
1317 return (0);
1318 u = strtoul(val, &end, 0);
1319 if (end == val || *end != '\0') {
1320 freeenv(val);
1321 return (EINVAL);
1322 }
1323 if (bootverbose)
1324 if_printf(&sc->ifatm.ifnet, "%s=%u\n", full, u);
1325 *ptr = u;
1326 return (0);
1327}
1328
1329/*
1330 * Set configurable parameters. Many of these are configurable via
1331 * kenv.
1332 */
1333static int
1334hatm_configure(struct hatm_softc *sc)
1335{
1336 /* Receive buffer pool 0 small */
1337 kenv_getuint(sc, "rbps0.size", &sc->rbp_s0.size,
1338 HE_CONFIG_RBPS0_SIZE, 0);
1339 kenv_getuint(sc, "rbps0.thresh", &sc->rbp_s0.thresh,
1340 HE_CONFIG_RBPS0_THRESH, 0);
1341 sc->rbp_s0.bsize = MBUF0_SIZE;
1342
1343 /* Receive buffer pool 0 large */
1344 kenv_getuint(sc, "rbpl0.size", &sc->rbp_l0.size,
1345 HE_CONFIG_RBPL0_SIZE, 0);
1346 kenv_getuint(sc, "rbpl0.thresh", &sc->rbp_l0.thresh,
1347 HE_CONFIG_RBPL0_THRESH, 0);
1348 sc->rbp_l0.bsize = MCLBYTES - MBUFL_OFFSET;
1349
1350 /* Receive buffer return queue 0 */
1351 kenv_getuint(sc, "rbrq0.size", &sc->rbrq_0.size,
1352 HE_CONFIG_RBRQ0_SIZE, 0);
1353 kenv_getuint(sc, "rbrq0.thresh", &sc->rbrq_0.thresh,
1354 HE_CONFIG_RBRQ0_THRESH, 0);
1355 kenv_getuint(sc, "rbrq0.tout", &sc->rbrq_0.tout,
1356 HE_CONFIG_RBRQ0_TOUT, 0);
1357 kenv_getuint(sc, "rbrq0.pcnt", &sc->rbrq_0.pcnt,
1358 HE_CONFIG_RBRQ0_PCNT, 0);
1359
1360 /* Receive buffer pool 1 small */
1361 kenv_getuint(sc, "rbps1.size", &sc->rbp_s1.size,
1362 HE_CONFIG_RBPS1_SIZE, 0);
1363 kenv_getuint(sc, "rbps1.thresh", &sc->rbp_s1.thresh,
1364 HE_CONFIG_RBPS1_THRESH, 0);
1365 sc->rbp_s1.bsize = MBUF1_SIZE;
1366
1367 /* Receive buffer return queue 1 */
1368 kenv_getuint(sc, "rbrq1.size", &sc->rbrq_1.size,
1369 HE_CONFIG_RBRQ1_SIZE, 0);
1370 kenv_getuint(sc, "rbrq1.thresh", &sc->rbrq_1.thresh,
1371 HE_CONFIG_RBRQ1_THRESH, 0);
1372 kenv_getuint(sc, "rbrq1.tout", &sc->rbrq_1.tout,
1373 HE_CONFIG_RBRQ1_TOUT, 0);
1374 kenv_getuint(sc, "rbrq1.pcnt", &sc->rbrq_1.pcnt,
1375 HE_CONFIG_RBRQ1_PCNT, 0);
1376
1377 /* Interrupt queue 0 */
1378 kenv_getuint(sc, "irq0.size", &sc->irq_0.size,
1379 HE_CONFIG_IRQ0_SIZE, 0);
1380 kenv_getuint(sc, "irq0.thresh", &sc->irq_0.thresh,
1381 HE_CONFIG_IRQ0_THRESH, 0);
1382 sc->irq_0.line = HE_CONFIG_IRQ0_LINE;
1383
1384 /* Transmit buffer return queue 0 */
1385 kenv_getuint(sc, "tbrq0.size", &sc->tbrq.size,
1386 HE_CONFIG_TBRQ_SIZE, 0);
1387 kenv_getuint(sc, "tbrq0.thresh", &sc->tbrq.thresh,
1388 HE_CONFIG_TBRQ_THRESH, 0);
1389
1390 /* Transmit buffer ready queue */
1391 kenv_getuint(sc, "tpdrq.size", &sc->tpdrq.size,
1392 HE_CONFIG_TPDRQ_SIZE, 0);
1393 /* Max TPDs per VCC */
1394 kenv_getuint(sc, "tpdmax", &sc->max_tpd,
1395 HE_CONFIG_TPD_MAXCC, 0);
1396
1397 return (0);
1398}
1399
1400#ifdef HATM_DEBUG
1401
1402/*
1403 * Get TSRs from connection memory
1404 */
1405static int
1406hatm_sysctl_tsr(SYSCTL_HANDLER_ARGS)
1407{
1408 struct hatm_softc *sc = arg1;
1409 int error, i, j;
1410 uint32_t *val;
1411
1412 val = malloc(sizeof(uint32_t) * HE_MAX_VCCS * 15, M_TEMP, M_WAITOK);
1413
1414 mtx_lock(&sc->mtx);
1415 for (i = 0; i < HE_MAX_VCCS; i++)
1416 for (j = 0; j <= 14; j++)
1417 val[15 * i + j] = READ_TSR(sc, i, j);
1418 mtx_unlock(&sc->mtx);
1419
1420 error = SYSCTL_OUT(req, val, sizeof(uint32_t) * HE_MAX_VCCS * 15);
1421 free(val, M_TEMP);
1422 if (error != 0 || req->newptr == NULL)
1423 return (error);
1424
1425 return (EPERM);
1426}
1427
1428/*
1429 * Get TPDs from connection memory
1430 */
1431static int
1432hatm_sysctl_tpd(SYSCTL_HANDLER_ARGS)
1433{
1434 struct hatm_softc *sc = arg1;
1435 int error, i, j;
1436 uint32_t *val;
1437
1438 val = malloc(sizeof(uint32_t) * HE_MAX_VCCS * 16, M_TEMP, M_WAITOK);
1439
1440 mtx_lock(&sc->mtx);
1441 for (i = 0; i < HE_MAX_VCCS; i++)
1442 for (j = 0; j < 16; j++)
1443 val[16 * i + j] = READ_TCM4(sc, 16 * i + j);
1444 mtx_unlock(&sc->mtx);
1445
1446 error = SYSCTL_OUT(req, val, sizeof(uint32_t) * HE_MAX_VCCS * 16);
1447 free(val, M_TEMP);
1448 if (error != 0 || req->newptr == NULL)
1449 return (error);
1450
1451 return (EPERM);
1452}
1453
1454/*
1455 * Get mbox registers
1456 */
1457static int
1458hatm_sysctl_mbox(SYSCTL_HANDLER_ARGS)
1459{
1460 struct hatm_softc *sc = arg1;
1461 int error, i;
1462 uint32_t *val;
1463
1464 val = malloc(sizeof(uint32_t) * HE_REGO_CS_END, M_TEMP, M_WAITOK);
1465
1466 mtx_lock(&sc->mtx);
1467 for (i = 0; i < HE_REGO_CS_END; i++)
1468 val[i] = READ_MBOX4(sc, i);
1469 mtx_unlock(&sc->mtx);
1470
1471 error = SYSCTL_OUT(req, val, sizeof(uint32_t) * HE_REGO_CS_END);
1472 free(val, M_TEMP);
1473 if (error != 0 || req->newptr == NULL)
1474 return (error);
1475
1476 return (EPERM);
1477}
1478
1479/*
1480 * Get connection memory
1481 */
1482static int
1483hatm_sysctl_cm(SYSCTL_HANDLER_ARGS)
1484{
1485 struct hatm_softc *sc = arg1;
1486 int error, i;
1487 uint32_t *val;
1488
1489 val = malloc(sizeof(uint32_t) * (HE_CONFIG_RXMEM + 1), M_TEMP, M_WAITOK);
1490
1491 mtx_lock(&sc->mtx);
1492 val[0] = READ4(sc, HE_REGO_RCMABR_BA);
1493 for (i = 0; i < HE_CONFIG_RXMEM; i++)
1494 val[i + 1] = READ_RCM4(sc, i);
1495 mtx_unlock(&sc->mtx);
1496
1497 error = SYSCTL_OUT(req, val, sizeof(uint32_t) * (HE_CONFIG_RXMEM + 1));
1498 free(val, M_TEMP);
1499 if (error != 0 || req->newptr == NULL)
1500 return (error);
1501
1502 return (EPERM);
1503}
1504
1505/*
1506 * Get local buffer memory
1507 */
1508static int
1509hatm_sysctl_lbmem(SYSCTL_HANDLER_ARGS)
1510{
1511 struct hatm_softc *sc = arg1;
1512 int error, i;
1513 uint32_t *val;
1514 u_int bytes = (1 << 21);
1515
1516 val = malloc(bytes, M_TEMP, M_WAITOK);
1517
1518 mtx_lock(&sc->mtx);
1519 for (i = 0; i < bytes / 4; i++)
1520 val[i] = READ_LB4(sc, i);
1521 mtx_unlock(&sc->mtx);
1522
1523 error = SYSCTL_OUT(req, val, bytes);
1524 free(val, M_TEMP);
1525 if (error != 0 || req->newptr == NULL)
1526 return (error);
1527
1528 return (EPERM);
1529}
1530
1531/*
1532 * Get all card registers
1533 */
1534static int
1535hatm_sysctl_heregs(SYSCTL_HANDLER_ARGS)
1536{
1537 struct hatm_softc *sc = arg1;
1538 int error, i;
1539 uint32_t *val;
1540
1541 val = malloc(HE_REGO_END, M_TEMP, M_WAITOK);
1542
1543 mtx_lock(&sc->mtx);
1544 for (i = 0; i < HE_REGO_END; i += 4)
1545 val[i / 4] = READ4(sc, i);
1546 mtx_unlock(&sc->mtx);
1547
1548 error = SYSCTL_OUT(req, val, HE_REGO_END);
1549 free(val, M_TEMP);
1550 if (error != 0 || req->newptr == NULL)
1551 return (error);
1552
1553 return (EPERM);
1554}
1555#endif
1556
1557/*
1558 * Suni register access
1559 */
1560/*
1561 * read at most n SUNI registers starting at reg into val
1562 */
1563static int
1564hatm_utopia_readregs(struct ifatm *ifatm, u_int reg, uint8_t *val, u_int *n)
1565{
1566 u_int i;
1567 struct hatm_softc *sc = (struct hatm_softc *)ifatm;
1568
1569 if (reg >= (HE_REGO_SUNI_END - HE_REGO_SUNI) / 4)
1570 return (EINVAL);
1571 if (reg + *n > (HE_REGO_SUNI_END - HE_REGO_SUNI) / 4)
1572 *n = reg - (HE_REGO_SUNI_END - HE_REGO_SUNI) / 4;
1573
1574 mtx_assert(&sc->mtx, MA_OWNED);
1575 for (i = 0; i < *n; i++)
1576 val[i] = READ4(sc, HE_REGO_SUNI + 4 * (reg + i));
1577
1578 return (0);
1579}
1580
1581/*
1582 * change the bits given by mask to them in val in register reg
1583 */
1584static int
1585hatm_utopia_writereg(struct ifatm *ifatm, u_int reg, u_int mask, u_int val)
1586{
1587 uint32_t regval;
1588 struct hatm_softc *sc = (struct hatm_softc *)ifatm;
1589
1590 if (reg >= (HE_REGO_SUNI_END - HE_REGO_SUNI) / 4)
1591 return (EINVAL);
1592
1593 mtx_assert(&sc->mtx, MA_OWNED);
1594 regval = READ4(sc, HE_REGO_SUNI + 4 * reg);
1595 regval = (regval & ~mask) | (val & mask);
1596 WRITE4(sc, HE_REGO_SUNI + 4 * reg, regval);
1597
1598 return (0);
1599}
1600
1601static struct utopia_methods hatm_utopia_methods = {
1602 hatm_utopia_readregs,
1603 hatm_utopia_writereg,
1604};
1605
1606/*
1607 * Detach - if it is running, stop. Destroy.
1608 */
1609static int
1610hatm_detach(device_t dev)
1611{
1612 struct hatm_softc *sc = (struct hatm_softc *)device_get_softc(dev);
1613
1614 mtx_lock(&sc->mtx);
1615 hatm_stop(sc);
1616 if (sc->utopia.state & UTP_ST_ATTACHED) {
1617 utopia_stop(&sc->utopia);
1618 utopia_detach(&sc->utopia);
1619 }
1620 mtx_unlock(&sc->mtx);
1621
1622 atm_ifdetach(&sc->ifatm.ifnet);
1623
1624 hatm_destroy(sc);
1625
1626 return (0);
1627}
1628
1629/*
1630 * Attach to the device. Assume that no locking is needed here.
1631 * All resource we allocate here are freed by calling hatm_destroy.
1632 */
1633static int
1634hatm_attach(device_t dev)
1635{
1636 struct hatm_softc *sc;
1637 int unit;
1638 int error;
1639 uint32_t v;
1640 struct ifnet *ifp;
1641
1642 sc = device_get_softc(dev);
1643 unit = device_get_unit(dev);
1644
1645 sc->dev = dev;
1646 sc->ifatm.mib.device = ATM_DEVICE_HE155;
1647 sc->ifatm.mib.serial = 0;
1648 sc->ifatm.mib.hw_version = 0;
1649 sc->ifatm.mib.sw_version = 0;
1650 sc->ifatm.mib.vpi_bits = HE_CONFIG_VPI_BITS;
1651 sc->ifatm.mib.vci_bits = HE_CONFIG_VCI_BITS;
1652 sc->ifatm.mib.max_vpcs = 0;
1653 sc->ifatm.mib.max_vccs = HE_MAX_VCCS;
1654 sc->ifatm.mib.media = IFM_ATM_UNKNOWN;
1655 sc->he622 = 0;
1656 sc->ifatm.phy = &sc->utopia;
1657
1658 SLIST_INIT(&sc->mbuf0_list);
1659 SLIST_INIT(&sc->mbuf1_list);
1660 SLIST_INIT(&sc->tpd_free);
1661
1662 mtx_init(&sc->mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK, MTX_DEF);
1663 mtx_init(&sc->mbuf0_mtx, device_get_nameunit(dev), "HEb0", MTX_DEF);
1664 mtx_init(&sc->mbuf1_mtx, device_get_nameunit(dev), "HEb1", MTX_DEF);
1665 cv_init(&sc->vcc_cv, "HEVCCcv");
1666 cv_init(&sc->cv_rcclose, "RCClose");
1667
1668 sysctl_ctx_init(&sc->sysctl_ctx);
1669
1670 /*
1671 * 4.2 BIOS Configuration
1672 */
1673 v = pci_read_config(dev, PCIR_COMMAND, 2);
1674 v |= PCIM_CMD_MEMEN | PCIM_CMD_BUSMASTEREN | PCIM_CMD_MWRICEN;
1675 pci_write_config(dev, PCIR_COMMAND, v, 2);
1676
1677 /*
1678 * 4.3 PCI Bus Controller-Specific Initialisation
1679 */
1680 v = pci_read_config(dev, HE_PCIR_GEN_CNTL_0, 4);
1681 v |= HE_PCIM_CTL0_MRL | HE_PCIM_CTL0_MRM | HE_PCIM_CTL0_IGNORE_TIMEOUT;
1682#if BYTE_ORDER == BIG_ENDIAN && 0
1683 v |= HE_PCIM_CTL0_BIGENDIAN;
1684#endif
1685 pci_write_config(dev, HE_PCIR_GEN_CNTL_0, v, 4);
1686
1687 /*
1688 * Map memory
1689 */
1690 v = pci_read_config(dev, PCIR_COMMAND, 2);
1691 if (!(v & PCIM_CMD_MEMEN)) {
1692 device_printf(dev, "failed to enable memory\n");
1693 error = ENXIO;
1694 goto failed;
1695 }
1696 sc->memid = PCIR_MAPS;
1696 sc->memid = PCIR_BAR(0);
1697 sc->memres = bus_alloc_resource(dev, SYS_RES_MEMORY, &sc->memid,
1698 0, ~0, 1, RF_ACTIVE);
1699 if (sc->memres == NULL) {
1700 device_printf(dev, "could not map memory\n");
1701 error = ENXIO;
1702 goto failed;
1703 }
1704 sc->memh = rman_get_bushandle(sc->memres);
1705 sc->memt = rman_get_bustag(sc->memres);
1706
1707 /*
1708 * ALlocate a DMA tag for subsequent allocations
1709 */
1710 if (bus_dma_tag_create(NULL, 1, 0,
1711 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR,
1712 NULL, NULL,
1713 BUS_SPACE_MAXSIZE_32BIT, 1,
1714 BUS_SPACE_MAXSIZE_32BIT, 0,
1715 NULL, NULL, &sc->parent_tag)) {
1716 device_printf(dev, "could not allocate DMA tag\n");
1717 error = ENOMEM;
1718 goto failed;
1719 }
1720
1721 if (bus_dma_tag_create(sc->parent_tag, 1, 0,
1722 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR,
1723 NULL, NULL,
1724 MBUF_ALLOC_SIZE, 1,
1725 MBUF_ALLOC_SIZE, 0,
1726 NULL, NULL, &sc->mbuf_tag)) {
1727 device_printf(dev, "could not allocate mbuf DMA tag\n");
1728 error = ENOMEM;
1729 goto failed;
1730 }
1731
1732 /*
1733 * Allocate a DMA tag for packets to send. Here we have a problem with
1734 * the specification of the maximum number of segments. Theoretically
1735 * this would be the size of the transmit ring - 1 multiplied by 3,
1736 * but this would not work. So make the maximum number of TPDs
1737 * occupied by one packet a configuration parameter.
1738 */
1739 if (bus_dma_tag_create(NULL, 1, 0,
1740 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
1741 HE_MAX_PDU, 3 * HE_CONFIG_MAX_TPD_PER_PACKET, HE_MAX_PDU, 0,
1742 NULL, NULL, &sc->tx_tag)) {
1743 device_printf(dev, "could not allocate TX tag\n");
1744 error = ENOMEM;
1745 goto failed;
1746 }
1747
1748 /*
1749 * Setup the interrupt
1750 */
1751 sc->irqid = 0;
1752 sc->irqres = bus_alloc_resource(dev, SYS_RES_IRQ, &sc->irqid,
1753 0, ~0, 1, RF_SHAREABLE | RF_ACTIVE);
1754 if (sc->irqres == 0) {
1755 device_printf(dev, "could not allocate irq\n");
1756 error = ENXIO;
1757 goto failed;
1758 }
1759
1760 ifp = &sc->ifatm.ifnet;
1761 ifp->if_softc = sc;
1762 ifp->if_unit = unit;
1763 ifp->if_name = "hatm";
1764
1765 /*
1766 * Make the sysctl tree
1767 */
1768 error = ENOMEM;
1769 if ((sc->sysctl_tree = SYSCTL_ADD_NODE(&sc->sysctl_ctx,
1770 SYSCTL_STATIC_CHILDREN(_hw_atm), OID_AUTO,
1771 device_get_nameunit(dev), CTLFLAG_RD, 0, "")) == NULL)
1772 goto failed;
1773
1774 if (SYSCTL_ADD_PROC(&sc->sysctl_ctx, SYSCTL_CHILDREN(sc->sysctl_tree),
1775 OID_AUTO, "istats", CTLFLAG_RD | CTLTYPE_OPAQUE, sc, CTL_ISTATS,
1776 hatm_sysctl, "LU", "internal statistics") == NULL)
1777 goto failed;
1778
1779#ifdef HATM_DEBUG
1780 if (SYSCTL_ADD_PROC(&sc->sysctl_ctx, SYSCTL_CHILDREN(sc->sysctl_tree),
1781 OID_AUTO, "tsr", CTLFLAG_RD | CTLTYPE_OPAQUE, sc, 0,
1782 hatm_sysctl_tsr, "S", "transmission status registers") == NULL)
1783 goto failed;
1784
1785 if (SYSCTL_ADD_PROC(&sc->sysctl_ctx, SYSCTL_CHILDREN(sc->sysctl_tree),
1786 OID_AUTO, "tpd", CTLFLAG_RD | CTLTYPE_OPAQUE, sc, 0,
1787 hatm_sysctl_tpd, "S", "transmission packet descriptors") == NULL)
1788 goto failed;
1789
1790 if (SYSCTL_ADD_PROC(&sc->sysctl_ctx, SYSCTL_CHILDREN(sc->sysctl_tree),
1791 OID_AUTO, "mbox", CTLFLAG_RD | CTLTYPE_OPAQUE, sc, 0,
1792 hatm_sysctl_mbox, "S", "mbox registers") == NULL)
1793 goto failed;
1794
1795 if (SYSCTL_ADD_PROC(&sc->sysctl_ctx, SYSCTL_CHILDREN(sc->sysctl_tree),
1796 OID_AUTO, "cm", CTLFLAG_RD | CTLTYPE_OPAQUE, sc, 0,
1797 hatm_sysctl_cm, "S", "connection memory") == NULL)
1798 goto failed;
1799
1800 if (SYSCTL_ADD_PROC(&sc->sysctl_ctx, SYSCTL_CHILDREN(sc->sysctl_tree),
1801 OID_AUTO, "heregs", CTLFLAG_RD | CTLTYPE_OPAQUE, sc, 0,
1802 hatm_sysctl_heregs, "S", "card registers") == NULL)
1803 goto failed;
1804
1805 if (SYSCTL_ADD_PROC(&sc->sysctl_ctx, SYSCTL_CHILDREN(sc->sysctl_tree),
1806 OID_AUTO, "lbmem", CTLFLAG_RD | CTLTYPE_OPAQUE, sc, 0,
1807 hatm_sysctl_lbmem, "S", "local memory") == NULL)
1808 goto failed;
1809
1810 kenv_getuint(sc, "debug", &sc->debug, 0, 1);
1811#endif
1812
1813 /*
1814 * Configure
1815 */
1816 if ((error = hatm_configure(sc)) != 0)
1817 goto failed;
1818
1819 /*
1820 * Compute memory parameters
1821 */
1822 if (sc->rbp_s0.size != 0) {
1823 sc->rbp_s0.mask = (sc->rbp_s0.size - 1) << 3;
1824 sc->rbp_s0.mem.size = sc->rbp_s0.size * 8;
1825 sc->rbp_s0.mem.align = sc->rbp_s0.mem.size;
1826 }
1827 if (sc->rbp_l0.size != 0) {
1828 sc->rbp_l0.mask = (sc->rbp_l0.size - 1) << 3;
1829 sc->rbp_l0.mem.size = sc->rbp_l0.size * 8;
1830 sc->rbp_l0.mem.align = sc->rbp_l0.mem.size;
1831 }
1832 if (sc->rbp_s1.size != 0) {
1833 sc->rbp_s1.mask = (sc->rbp_s1.size - 1) << 3;
1834 sc->rbp_s1.mem.size = sc->rbp_s1.size * 8;
1835 sc->rbp_s1.mem.align = sc->rbp_s1.mem.size;
1836 }
1837 if (sc->rbrq_0.size != 0) {
1838 sc->rbrq_0.mem.size = sc->rbrq_0.size * 8;
1839 sc->rbrq_0.mem.align = sc->rbrq_0.mem.size;
1840 }
1841 if (sc->rbrq_1.size != 0) {
1842 sc->rbrq_1.mem.size = sc->rbrq_1.size * 8;
1843 sc->rbrq_1.mem.align = sc->rbrq_1.mem.size;
1844 }
1845
1846 sc->irq_0.mem.size = sc->irq_0.size * sizeof(uint32_t);
1847 sc->irq_0.mem.align = 4 * 1024;
1848
1849 sc->tbrq.mem.size = sc->tbrq.size * 4;
1850 sc->tbrq.mem.align = 2 * sc->tbrq.mem.size; /* ZZZ */
1851
1852 sc->tpdrq.mem.size = sc->tpdrq.size * 8;
1853 sc->tpdrq.mem.align = sc->tpdrq.mem.size;
1854
1855 sc->hsp_mem.size = sizeof(struct he_hsp);
1856 sc->hsp_mem.align = 1024;
1857
1858 sc->lbufs_size = sc->rbp_l0.size + sc->rbrq_0.size;
1859 sc->tpd_total = sc->tbrq.size + sc->tpdrq.size;
1860 sc->tpds.align = 64;
1861 sc->tpds.size = sc->tpd_total * HE_TPD_SIZE;
1862
1863 hatm_init_rmaps(sc);
1864 hatm_init_smbufs(sc);
1865 if ((error = hatm_init_tpds(sc)) != 0)
1866 goto failed;
1867
1868 /*
1869 * Allocate memory
1870 */
1871 if ((error = hatm_alloc_dmamem(sc, "IRQ", &sc->irq_0.mem)) != 0 ||
1872 (error = hatm_alloc_dmamem(sc, "TBRQ0", &sc->tbrq.mem)) != 0 ||
1873 (error = hatm_alloc_dmamem(sc, "TPDRQ", &sc->tpdrq.mem)) != 0 ||
1874 (error = hatm_alloc_dmamem(sc, "HSP", &sc->hsp_mem)) != 0)
1875 goto failed;
1876
1877 if (sc->rbp_s0.mem.size != 0 &&
1878 (error = hatm_alloc_dmamem(sc, "RBPS0", &sc->rbp_s0.mem)))
1879 goto failed;
1880 if (sc->rbp_l0.mem.size != 0 &&
1881 (error = hatm_alloc_dmamem(sc, "RBPL0", &sc->rbp_l0.mem)))
1882 goto failed;
1883 if (sc->rbp_s1.mem.size != 0 &&
1884 (error = hatm_alloc_dmamem(sc, "RBPS1", &sc->rbp_s1.mem)))
1885 goto failed;
1886
1887 if (sc->rbrq_0.mem.size != 0 &&
1888 (error = hatm_alloc_dmamem(sc, "RBRQ0", &sc->rbrq_0.mem)))
1889 goto failed;
1890 if (sc->rbrq_1.mem.size != 0 &&
1891 (error = hatm_alloc_dmamem(sc, "RBRQ1", &sc->rbrq_1.mem)))
1892 goto failed;
1893
1894 if ((sc->vcc_zone = uma_zcreate("HE vccs", sizeof(struct hevcc),
1895 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0)) == NULL) {
1896 device_printf(dev, "cannot allocate zone for vccs\n");
1897 goto failed;
1898 }
1899
1900 /*
1901 * 4.4 Reset the card.
1902 */
1903 if ((error = hatm_reset(sc)) != 0)
1904 goto failed;
1905
1906 /*
1907 * Read the prom.
1908 */
1909 hatm_init_bus_width(sc);
1910 hatm_init_read_eeprom(sc);
1911 hatm_init_endianess(sc);
1912
1913 /*
1914 * Initialize interface
1915 */
1916 ifp->if_flags = IFF_SIMPLEX;
1917 ifp->if_ioctl = hatm_ioctl;
1918 ifp->if_start = hatm_start;
1919 ifp->if_watchdog = NULL;
1920 ifp->if_init = hatm_init;
1921
1922 utopia_attach(&sc->utopia, &sc->ifatm, &sc->media, &sc->mtx,
1923 &sc->sysctl_ctx, SYSCTL_CHILDREN(sc->sysctl_tree),
1924 &hatm_utopia_methods);
1925 utopia_init_media(&sc->utopia);
1926
1927 /* these two SUNI routines need the lock */
1928 mtx_lock(&sc->mtx);
1929 /* poll while we are not running */
1930 sc->utopia.flags |= UTP_FL_POLL_CARRIER;
1931 utopia_start(&sc->utopia);
1932 utopia_reset(&sc->utopia);
1933 mtx_unlock(&sc->mtx);
1934
1935 atm_ifattach(ifp);
1936
1937#ifdef ENABLE_BPF
1938 bpfattach(ifp, DLT_ATM_RFC1483, sizeof(struct atmllc));
1939#endif
1940
1941 error = bus_setup_intr(dev, sc->irqres, INTR_TYPE_NET, hatm_intr,
1942 &sc->irq_0, &sc->ih);
1943 if (error != 0) {
1944 device_printf(dev, "could not setup interrupt\n");
1945 hatm_detach(dev);
1946 return (error);
1947 }
1948
1949 return (0);
1950
1951 failed:
1952 hatm_destroy(sc);
1953 return (error);
1954}
1955
1956/*
1957 * Start the interface. Assume a state as from attach().
1958 */
1959void
1960hatm_initialize(struct hatm_softc *sc)
1961{
1962 uint32_t v;
1963 u_int cid;
1964 static const u_int layout[2][7] = HE_CONFIG_MEM_LAYOUT;
1965
1966 if (sc->ifatm.ifnet.if_flags & IFF_RUNNING)
1967 return;
1968
1969 hatm_init_bus_width(sc);
1970 hatm_init_endianess(sc);
1971
1972 if_printf(&sc->ifatm.ifnet, "%s, Rev. %s, S/N %u, "
1973 "MAC=%02x:%02x:%02x:%02x:%02x:%02x (%ubit PCI)\n",
1974 sc->prod_id, sc->rev, sc->ifatm.mib.serial,
1975 sc->ifatm.mib.esi[0], sc->ifatm.mib.esi[1], sc->ifatm.mib.esi[2],
1976 sc->ifatm.mib.esi[3], sc->ifatm.mib.esi[4], sc->ifatm.mib.esi[5],
1977 sc->pci64 ? 64 : 32);
1978
1979 /*
1980 * 4.8 SDRAM Controller Initialisation
1981 * 4.9 Initialize RNUM value
1982 */
1983 if (sc->he622)
1984 WRITE4(sc, HE_REGO_SDRAM_CNTL, HE_REGM_SDRAM_64BIT);
1985 else
1986 WRITE4(sc, HE_REGO_SDRAM_CNTL, 0);
1987 BARRIER_W(sc);
1988
1989 v = READ4(sc, HE_REGO_LB_SWAP);
1990 BARRIER_R(sc);
1991 v |= 0xf << HE_REGS_LBSWAP_RNUM;
1992 WRITE4(sc, HE_REGO_LB_SWAP, v);
1993 BARRIER_W(sc);
1994
1995 hatm_init_irq(sc, &sc->irq_0, 0);
1996 hatm_clear_irq(sc, 1);
1997 hatm_clear_irq(sc, 2);
1998 hatm_clear_irq(sc, 3);
1999
2000 WRITE4(sc, HE_REGO_GRP_1_0_MAP, 0);
2001 WRITE4(sc, HE_REGO_GRP_3_2_MAP, 0);
2002 WRITE4(sc, HE_REGO_GRP_5_4_MAP, 0);
2003 WRITE4(sc, HE_REGO_GRP_7_6_MAP, 0);
2004 BARRIER_W(sc);
2005
2006 /*
2007 * 4.11 Enable PCI Bus Controller State Machine
2008 */
2009 v = READ4(sc, HE_REGO_HOST_CNTL);
2010 BARRIER_R(sc);
2011 v |= HE_REGM_HOST_OUTFF_ENB | HE_REGM_HOST_CMDFF_ENB |
2012 HE_REGM_HOST_QUICK_RD | HE_REGM_HOST_QUICK_WR;
2013 WRITE4(sc, HE_REGO_HOST_CNTL, v);
2014 BARRIER_W(sc);
2015
2016 /*
2017 * 5.1.1 Generic configuration state
2018 */
2019 sc->cells_per_row = layout[sc->he622][0];
2020 sc->bytes_per_row = layout[sc->he622][1];
2021 sc->r0_numrows = layout[sc->he622][2];
2022 sc->tx_numrows = layout[sc->he622][3];
2023 sc->r1_numrows = layout[sc->he622][4];
2024 sc->r0_startrow = layout[sc->he622][5];
2025 sc->tx_startrow = sc->r0_startrow + sc->r0_numrows;
2026 sc->r1_startrow = sc->tx_startrow + sc->tx_numrows;
2027 sc->cells_per_lbuf = layout[sc->he622][6];
2028
2029 sc->r0_numbuffs = sc->r0_numrows * (sc->cells_per_row /
2030 sc->cells_per_lbuf);
2031 sc->r1_numbuffs = sc->r1_numrows * (sc->cells_per_row /
2032 sc->cells_per_lbuf);
2033 sc->tx_numbuffs = sc->tx_numrows * (sc->cells_per_row /
2034 sc->cells_per_lbuf);
2035
2036 if (sc->r0_numbuffs > 2560)
2037 sc->r0_numbuffs = 2560;
2038 if (sc->r1_numbuffs > 2560)
2039 sc->r1_numbuffs = 2560;
2040 if (sc->tx_numbuffs > 5120)
2041 sc->tx_numbuffs = 5120;
2042
2043 DBG(sc, ATTACH, ("cells_per_row=%u bytes_per_row=%u r0_numrows=%u "
2044 "tx_numrows=%u r1_numrows=%u r0_startrow=%u tx_startrow=%u "
2045 "r1_startrow=%u cells_per_lbuf=%u\nr0_numbuffs=%u r1_numbuffs=%u "
2046 "tx_numbuffs=%u\n", sc->cells_per_row, sc->bytes_per_row,
2047 sc->r0_numrows, sc->tx_numrows, sc->r1_numrows, sc->r0_startrow,
2048 sc->tx_startrow, sc->r1_startrow, sc->cells_per_lbuf,
2049 sc->r0_numbuffs, sc->r1_numbuffs, sc->tx_numbuffs));
2050
2051 /*
2052 * 5.1.2 Configure Hardware dependend registers
2053 */
2054 if (sc->he622) {
2055 WRITE4(sc, HE_REGO_LBARB,
2056 (0x2 << HE_REGS_LBARB_SLICE) |
2057 (0xf << HE_REGS_LBARB_RNUM) |
2058 (0x3 << HE_REGS_LBARB_THPRI) |
2059 (0x3 << HE_REGS_LBARB_RHPRI) |
2060 (0x2 << HE_REGS_LBARB_TLPRI) |
2061 (0x1 << HE_REGS_LBARB_RLPRI) |
2062 (0x28 << HE_REGS_LBARB_BUS_MULT) |
2063 (0x50 << HE_REGS_LBARB_NET_PREF));
2064 BARRIER_W(sc);
2065 WRITE4(sc, HE_REGO_SDRAMCON,
2066 /* HW bug: don't use banking */
2067 /* HE_REGM_SDRAMCON_BANK | */
2068 HE_REGM_SDRAMCON_WIDE |
2069 (0x384 << HE_REGS_SDRAMCON_REF));
2070 BARRIER_W(sc);
2071 WRITE4(sc, HE_REGO_RCMCONFIG,
2072 (0x1 << HE_REGS_RCMCONFIG_BANK_WAIT) |
2073 (0x1 << HE_REGS_RCMCONFIG_RW_WAIT) |
2074 (0x0 << HE_REGS_RCMCONFIG_TYPE));
2075 WRITE4(sc, HE_REGO_TCMCONFIG,
2076 (0x2 << HE_REGS_TCMCONFIG_BANK_WAIT) |
2077 (0x1 << HE_REGS_TCMCONFIG_RW_WAIT) |
2078 (0x0 << HE_REGS_TCMCONFIG_TYPE));
2079 } else {
2080 WRITE4(sc, HE_REGO_LBARB,
2081 (0x2 << HE_REGS_LBARB_SLICE) |
2082 (0xf << HE_REGS_LBARB_RNUM) |
2083 (0x3 << HE_REGS_LBARB_THPRI) |
2084 (0x3 << HE_REGS_LBARB_RHPRI) |
2085 (0x2 << HE_REGS_LBARB_TLPRI) |
2086 (0x1 << HE_REGS_LBARB_RLPRI) |
2087 (0x46 << HE_REGS_LBARB_BUS_MULT) |
2088 (0x8C << HE_REGS_LBARB_NET_PREF));
2089 BARRIER_W(sc);
2090 WRITE4(sc, HE_REGO_SDRAMCON,
2091 /* HW bug: don't use banking */
2092 /* HE_REGM_SDRAMCON_BANK | */
2093 (0x150 << HE_REGS_SDRAMCON_REF));
2094 BARRIER_W(sc);
2095 WRITE4(sc, HE_REGO_RCMCONFIG,
2096 (0x0 << HE_REGS_RCMCONFIG_BANK_WAIT) |
2097 (0x1 << HE_REGS_RCMCONFIG_RW_WAIT) |
2098 (0x0 << HE_REGS_RCMCONFIG_TYPE));
2099 WRITE4(sc, HE_REGO_TCMCONFIG,
2100 (0x1 << HE_REGS_TCMCONFIG_BANK_WAIT) |
2101 (0x1 << HE_REGS_TCMCONFIG_RW_WAIT) |
2102 (0x0 << HE_REGS_TCMCONFIG_TYPE));
2103 }
2104 WRITE4(sc, HE_REGO_LBCONFIG, (sc->cells_per_lbuf * 48));
2105
2106 WRITE4(sc, HE_REGO_RLBC_H, 0);
2107 WRITE4(sc, HE_REGO_RLBC_T, 0);
2108 WRITE4(sc, HE_REGO_RLBC_H2, 0);
2109
2110 WRITE4(sc, HE_REGO_RXTHRSH, 512);
2111 WRITE4(sc, HE_REGO_LITHRSH, 256);
2112
2113 WRITE4(sc, HE_REGO_RLBF0_C, sc->r0_numbuffs);
2114 WRITE4(sc, HE_REGO_RLBF1_C, sc->r1_numbuffs);
2115
2116 if (sc->he622) {
2117 WRITE4(sc, HE_REGO_RCCONFIG,
2118 (8 << HE_REGS_RCCONFIG_UTDELAY) |
2119 (sc->ifatm.mib.vpi_bits << HE_REGS_RCCONFIG_VP) |
2120 (sc->ifatm.mib.vci_bits << HE_REGS_RCCONFIG_VC));
2121 WRITE4(sc, HE_REGO_TXCONFIG,
2122 (32 << HE_REGS_TXCONFIG_THRESH) |
2123 (sc->ifatm.mib.vci_bits << HE_REGS_TXCONFIG_VCI_MASK) |
2124 (sc->tx_numbuffs << HE_REGS_TXCONFIG_LBFREE));
2125 } else {
2126 WRITE4(sc, HE_REGO_RCCONFIG,
2127 (0 << HE_REGS_RCCONFIG_UTDELAY) |
2128 HE_REGM_RCCONFIG_UT_MODE |
2129 (sc->ifatm.mib.vpi_bits << HE_REGS_RCCONFIG_VP) |
2130 (sc->ifatm.mib.vci_bits << HE_REGS_RCCONFIG_VC));
2131 WRITE4(sc, HE_REGO_TXCONFIG,
2132 (32 << HE_REGS_TXCONFIG_THRESH) |
2133 HE_REGM_TXCONFIG_UTMODE |
2134 (sc->ifatm.mib.vci_bits << HE_REGS_TXCONFIG_VCI_MASK) |
2135 (sc->tx_numbuffs << HE_REGS_TXCONFIG_LBFREE));
2136 }
2137
2138 WRITE4(sc, HE_REGO_TXAAL5_PROTO, 0);
2139
2140 if (sc->rbp_s1.size != 0) {
2141 WRITE4(sc, HE_REGO_RHCONFIG,
2142 HE_REGM_RHCONFIG_PHYENB |
2143 ((sc->he622 ? 0x41 : 0x31) << HE_REGS_RHCONFIG_PTMR_PRE) |
2144 (1 << HE_REGS_RHCONFIG_OAM_GID));
2145 } else {
2146 WRITE4(sc, HE_REGO_RHCONFIG,
2147 HE_REGM_RHCONFIG_PHYENB |
2148 ((sc->he622 ? 0x41 : 0x31) << HE_REGS_RHCONFIG_PTMR_PRE) |
2149 (0 << HE_REGS_RHCONFIG_OAM_GID));
2150 }
2151 BARRIER_W(sc);
2152
2153 hatm_init_cm(sc);
2154
2155 hatm_init_rx_buffer_pool(sc, 0, sc->r0_startrow, sc->r0_numbuffs);
2156 hatm_init_rx_buffer_pool(sc, 1, sc->r1_startrow, sc->r1_numbuffs);
2157 hatm_init_tx_buffer_pool(sc, sc->tx_startrow, sc->tx_numbuffs);
2158
2159 hatm_init_imed_queues(sc);
2160
2161 /*
2162 * 5.1.6 Application tunable Parameters
2163 */
2164 WRITE4(sc, HE_REGO_MCC, 0);
2165 WRITE4(sc, HE_REGO_OEC, 0);
2166 WRITE4(sc, HE_REGO_DCC, 0);
2167 WRITE4(sc, HE_REGO_CEC, 0);
2168
2169 hatm_init_cs_block(sc);
2170 hatm_init_cs_block_cm(sc);
2171
2172 hatm_init_rpool(sc, &sc->rbp_s0, 0, 0);
2173 hatm_init_rpool(sc, &sc->rbp_l0, 0, 1);
2174 hatm_init_rpool(sc, &sc->rbp_s1, 1, 0);
2175 hatm_clear_rpool(sc, 1, 1);
2176 hatm_clear_rpool(sc, 2, 0);
2177 hatm_clear_rpool(sc, 2, 1);
2178 hatm_clear_rpool(sc, 3, 0);
2179 hatm_clear_rpool(sc, 3, 1);
2180 hatm_clear_rpool(sc, 4, 0);
2181 hatm_clear_rpool(sc, 4, 1);
2182 hatm_clear_rpool(sc, 5, 0);
2183 hatm_clear_rpool(sc, 5, 1);
2184 hatm_clear_rpool(sc, 6, 0);
2185 hatm_clear_rpool(sc, 6, 1);
2186 hatm_clear_rpool(sc, 7, 0);
2187 hatm_clear_rpool(sc, 7, 1);
2188 hatm_init_rbrq(sc, &sc->rbrq_0, 0);
2189 hatm_init_rbrq(sc, &sc->rbrq_1, 1);
2190 hatm_clear_rbrq(sc, 2);
2191 hatm_clear_rbrq(sc, 3);
2192 hatm_clear_rbrq(sc, 4);
2193 hatm_clear_rbrq(sc, 5);
2194 hatm_clear_rbrq(sc, 6);
2195 hatm_clear_rbrq(sc, 7);
2196
2197 sc->lbufs_next = 0;
2198 bzero(sc->lbufs, sizeof(sc->lbufs[0]) * sc->lbufs_size);
2199
2200 hatm_init_tbrq(sc, &sc->tbrq, 0);
2201 hatm_clear_tbrq(sc, 1);
2202 hatm_clear_tbrq(sc, 2);
2203 hatm_clear_tbrq(sc, 3);
2204 hatm_clear_tbrq(sc, 4);
2205 hatm_clear_tbrq(sc, 5);
2206 hatm_clear_tbrq(sc, 6);
2207 hatm_clear_tbrq(sc, 7);
2208
2209 hatm_init_tpdrq(sc);
2210
2211 WRITE4(sc, HE_REGO_UBUFF_BA, (sc->he622 ? 0x104780 : 0x800));
2212
2213 /*
2214 * Initialize HSP
2215 */
2216 bzero(sc->hsp_mem.base, sc->hsp_mem.size);
2217 sc->hsp = sc->hsp_mem.base;
2218 WRITE4(sc, HE_REGO_HSP_BA, sc->hsp_mem.paddr);
2219
2220 /*
2221 * 5.1.12 Enable transmit and receive
2222 * Enable bus master and interrupts
2223 */
2224 v = READ_MBOX4(sc, HE_REGO_CS_ERCTL0);
2225 v |= 0x18000000;
2226 WRITE_MBOX4(sc, HE_REGO_CS_ERCTL0, v);
2227
2228 v = READ4(sc, HE_REGO_RCCONFIG);
2229 v |= HE_REGM_RCCONFIG_RXENB;
2230 WRITE4(sc, HE_REGO_RCCONFIG, v);
2231
2232 v = pci_read_config(sc->dev, HE_PCIR_GEN_CNTL_0, 4);
2233 v |= HE_PCIM_CTL0_INIT_ENB | HE_PCIM_CTL0_INT_PROC_ENB;
2234 pci_write_config(sc->dev, HE_PCIR_GEN_CNTL_0, v, 4);
2235
2236 sc->ifatm.ifnet.if_flags |= IFF_RUNNING;
2237 sc->ifatm.ifnet.if_baudrate = 53 * 8 * sc->ifatm.mib.pcr;
2238
2239 sc->utopia.flags &= ~UTP_FL_POLL_CARRIER;
2240
2241 /* reopen vccs */
2242 for (cid = 0; cid < HE_MAX_VCCS; cid++)
2243 if (sc->vccs[cid] != NULL)
2244 hatm_load_vc(sc, cid, 1);
2245
2246 ATMEV_SEND_IFSTATE_CHANGED(&sc->ifatm,
2247 sc->utopia.carrier == UTP_CARR_OK);
2248}
2249
2250/*
2251 * This functions stops the card and frees all resources allocated after
2252 * the attach. Must have the global lock.
2253 */
2254void
2255hatm_stop(struct hatm_softc *sc)
2256{
2257 uint32_t v;
2258 u_int i, p, cid;
2259 struct mbuf_chunk_hdr *ch;
2260 struct mbuf_page *pg;
2261
2262 mtx_assert(&sc->mtx, MA_OWNED);
2263
2264 if (!(sc->ifatm.ifnet.if_flags & IFF_RUNNING))
2265 return;
2266 sc->ifatm.ifnet.if_flags &= ~IFF_RUNNING;
2267
2268 ATMEV_SEND_IFSTATE_CHANGED(&sc->ifatm,
2269 sc->utopia.carrier == UTP_CARR_OK);
2270
2271 sc->utopia.flags |= UTP_FL_POLL_CARRIER;
2272
2273 /*
2274 * Stop and reset the hardware so that everything remains
2275 * stable.
2276 */
2277 v = READ_MBOX4(sc, HE_REGO_CS_ERCTL0);
2278 v &= ~0x18000000;
2279 WRITE_MBOX4(sc, HE_REGO_CS_ERCTL0, v);
2280
2281 v = READ4(sc, HE_REGO_RCCONFIG);
2282 v &= ~HE_REGM_RCCONFIG_RXENB;
2283 WRITE4(sc, HE_REGO_RCCONFIG, v);
2284
2285 WRITE4(sc, HE_REGO_RHCONFIG, (0x2 << HE_REGS_RHCONFIG_PTMR_PRE));
2286 BARRIER_W(sc);
2287
2288 v = READ4(sc, HE_REGO_HOST_CNTL);
2289 BARRIER_R(sc);
2290 v &= ~(HE_REGM_HOST_OUTFF_ENB | HE_REGM_HOST_CMDFF_ENB);
2291 WRITE4(sc, HE_REGO_HOST_CNTL, v);
2292 BARRIER_W(sc);
2293
2294 /*
2295 * Disable bust master and interrupts
2296 */
2297 v = pci_read_config(sc->dev, HE_PCIR_GEN_CNTL_0, 4);
2298 v &= ~(HE_PCIM_CTL0_INIT_ENB | HE_PCIM_CTL0_INT_PROC_ENB);
2299 pci_write_config(sc->dev, HE_PCIR_GEN_CNTL_0, v, 4);
2300
2301 (void)hatm_reset(sc);
2302
2303 /*
2304 * Card resets the SUNI when resetted, so re-initialize it
2305 */
2306 utopia_reset(&sc->utopia);
2307
2308 /*
2309 * Give any waiters on closing a VCC a chance. They will stop
2310 * to wait if they see that IFF_RUNNING disappeared.
2311 */
2312 while (!(cv_waitq_empty(&sc->vcc_cv))) {
2313 cv_broadcast(&sc->vcc_cv);
2314 DELAY(100);
2315 }
2316 while (!(cv_waitq_empty(&sc->cv_rcclose))) {
2317 cv_broadcast(&sc->cv_rcclose);
2318 }
2319
2320 /*
2321 * Now free all resources.
2322 */
2323
2324 /*
2325 * Free the large mbufs that are given to the card.
2326 */
2327 for (i = 0 ; i < sc->lbufs_size; i++) {
2328 if (sc->lbufs[i] != NULL) {
2329 bus_dmamap_unload(sc->mbuf_tag, sc->rmaps[i]);
2330 m_freem(sc->lbufs[i]);
2331 sc->lbufs[i] = NULL;
2332 }
2333 }
2334
2335 /*
2336 * Free small buffers
2337 */
2338 for (p = 0; p < sc->mbuf_npages; p++) {
2339 pg = sc->mbuf_pages[p];
2340 for (i = 0; i < pg->hdr.nchunks; i++) {
2341 if (MBUF_TST_BIT(pg->hdr.card, i)) {
2342 MBUF_CLR_BIT(pg->hdr.card, i);
2343 MBUF_CLR_BIT(pg->hdr.used, i);
2344 ch = (struct mbuf_chunk_hdr *) ((char *)pg +
2345 i * pg->hdr.chunksize + pg->hdr.hdroff);
2346 m_freem(ch->mbuf);
2347 }
2348 }
2349 }
2350
2351 hatm_stop_tpds(sc);
2352
2353 /*
2354 * Free all partial reassembled PDUs on any VCC.
2355 */
2356 for (cid = 0; cid < HE_MAX_VCCS; cid++) {
2357 if (sc->vccs[cid] != NULL) {
2358 if (sc->vccs[cid]->chain != NULL) {
2359 m_freem(sc->vccs[cid]->chain);
2360 sc->vccs[cid]->chain = NULL;
2361 sc->vccs[cid]->last = NULL;
2362 }
2363 if (!(sc->vccs[cid]->vflags & (HE_VCC_RX_OPEN |
2364 HE_VCC_TX_OPEN))) {
2365 hatm_tx_vcc_closed(sc, cid);
2366 uma_zfree(sc->vcc_zone, sc->vccs[cid]);
2367 sc->vccs[cid] = NULL;
2368 sc->open_vccs--;
2369 } else {
2370 sc->vccs[cid]->vflags = 0;
2371 sc->vccs[cid]->ntpds = 0;
2372 }
2373 }
2374 }
2375
2376 if (sc->rbp_s0.size != 0)
2377 bzero(sc->rbp_s0.mem.base, sc->rbp_s0.mem.size);
2378 if (sc->rbp_l0.size != 0)
2379 bzero(sc->rbp_l0.mem.base, sc->rbp_l0.mem.size);
2380 if (sc->rbp_s1.size != 0)
2381 bzero(sc->rbp_s1.mem.base, sc->rbp_s1.mem.size);
2382 if (sc->rbrq_0.size != 0)
2383 bzero(sc->rbrq_0.mem.base, sc->rbrq_0.mem.size);
2384 if (sc->rbrq_1.size != 0)
2385 bzero(sc->rbrq_1.mem.base, sc->rbrq_1.mem.size);
2386
2387 bzero(sc->tbrq.mem.base, sc->tbrq.mem.size);
2388 bzero(sc->tpdrq.mem.base, sc->tpdrq.mem.size);
2389 bzero(sc->hsp_mem.base, sc->hsp_mem.size);
2390}
2391
2392/************************************************************
2393 *
2394 * Driver infrastructure
2395 */
2396devclass_t hatm_devclass;
2397
2398static device_method_t hatm_methods[] = {
2399 DEVMETHOD(device_probe, hatm_probe),
2400 DEVMETHOD(device_attach, hatm_attach),
2401 DEVMETHOD(device_detach, hatm_detach),
2402 {0,0}
2403};
2404static driver_t hatm_driver = {
2405 "hatm",
2406 hatm_methods,
2407 sizeof(struct hatm_softc),
2408};
2409DRIVER_MODULE(hatm, pci, hatm_driver, hatm_devclass, NULL, 0);
1697 sc->memres = bus_alloc_resource(dev, SYS_RES_MEMORY, &sc->memid,
1698 0, ~0, 1, RF_ACTIVE);
1699 if (sc->memres == NULL) {
1700 device_printf(dev, "could not map memory\n");
1701 error = ENXIO;
1702 goto failed;
1703 }
1704 sc->memh = rman_get_bushandle(sc->memres);
1705 sc->memt = rman_get_bustag(sc->memres);
1706
1707 /*
1708 * ALlocate a DMA tag for subsequent allocations
1709 */
1710 if (bus_dma_tag_create(NULL, 1, 0,
1711 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR,
1712 NULL, NULL,
1713 BUS_SPACE_MAXSIZE_32BIT, 1,
1714 BUS_SPACE_MAXSIZE_32BIT, 0,
1715 NULL, NULL, &sc->parent_tag)) {
1716 device_printf(dev, "could not allocate DMA tag\n");
1717 error = ENOMEM;
1718 goto failed;
1719 }
1720
1721 if (bus_dma_tag_create(sc->parent_tag, 1, 0,
1722 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR,
1723 NULL, NULL,
1724 MBUF_ALLOC_SIZE, 1,
1725 MBUF_ALLOC_SIZE, 0,
1726 NULL, NULL, &sc->mbuf_tag)) {
1727 device_printf(dev, "could not allocate mbuf DMA tag\n");
1728 error = ENOMEM;
1729 goto failed;
1730 }
1731
1732 /*
1733 * Allocate a DMA tag for packets to send. Here we have a problem with
1734 * the specification of the maximum number of segments. Theoretically
1735 * this would be the size of the transmit ring - 1 multiplied by 3,
1736 * but this would not work. So make the maximum number of TPDs
1737 * occupied by one packet a configuration parameter.
1738 */
1739 if (bus_dma_tag_create(NULL, 1, 0,
1740 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
1741 HE_MAX_PDU, 3 * HE_CONFIG_MAX_TPD_PER_PACKET, HE_MAX_PDU, 0,
1742 NULL, NULL, &sc->tx_tag)) {
1743 device_printf(dev, "could not allocate TX tag\n");
1744 error = ENOMEM;
1745 goto failed;
1746 }
1747
1748 /*
1749 * Setup the interrupt
1750 */
1751 sc->irqid = 0;
1752 sc->irqres = bus_alloc_resource(dev, SYS_RES_IRQ, &sc->irqid,
1753 0, ~0, 1, RF_SHAREABLE | RF_ACTIVE);
1754 if (sc->irqres == 0) {
1755 device_printf(dev, "could not allocate irq\n");
1756 error = ENXIO;
1757 goto failed;
1758 }
1759
1760 ifp = &sc->ifatm.ifnet;
1761 ifp->if_softc = sc;
1762 ifp->if_unit = unit;
1763 ifp->if_name = "hatm";
1764
1765 /*
1766 * Make the sysctl tree
1767 */
1768 error = ENOMEM;
1769 if ((sc->sysctl_tree = SYSCTL_ADD_NODE(&sc->sysctl_ctx,
1770 SYSCTL_STATIC_CHILDREN(_hw_atm), OID_AUTO,
1771 device_get_nameunit(dev), CTLFLAG_RD, 0, "")) == NULL)
1772 goto failed;
1773
1774 if (SYSCTL_ADD_PROC(&sc->sysctl_ctx, SYSCTL_CHILDREN(sc->sysctl_tree),
1775 OID_AUTO, "istats", CTLFLAG_RD | CTLTYPE_OPAQUE, sc, CTL_ISTATS,
1776 hatm_sysctl, "LU", "internal statistics") == NULL)
1777 goto failed;
1778
1779#ifdef HATM_DEBUG
1780 if (SYSCTL_ADD_PROC(&sc->sysctl_ctx, SYSCTL_CHILDREN(sc->sysctl_tree),
1781 OID_AUTO, "tsr", CTLFLAG_RD | CTLTYPE_OPAQUE, sc, 0,
1782 hatm_sysctl_tsr, "S", "transmission status registers") == NULL)
1783 goto failed;
1784
1785 if (SYSCTL_ADD_PROC(&sc->sysctl_ctx, SYSCTL_CHILDREN(sc->sysctl_tree),
1786 OID_AUTO, "tpd", CTLFLAG_RD | CTLTYPE_OPAQUE, sc, 0,
1787 hatm_sysctl_tpd, "S", "transmission packet descriptors") == NULL)
1788 goto failed;
1789
1790 if (SYSCTL_ADD_PROC(&sc->sysctl_ctx, SYSCTL_CHILDREN(sc->sysctl_tree),
1791 OID_AUTO, "mbox", CTLFLAG_RD | CTLTYPE_OPAQUE, sc, 0,
1792 hatm_sysctl_mbox, "S", "mbox registers") == NULL)
1793 goto failed;
1794
1795 if (SYSCTL_ADD_PROC(&sc->sysctl_ctx, SYSCTL_CHILDREN(sc->sysctl_tree),
1796 OID_AUTO, "cm", CTLFLAG_RD | CTLTYPE_OPAQUE, sc, 0,
1797 hatm_sysctl_cm, "S", "connection memory") == NULL)
1798 goto failed;
1799
1800 if (SYSCTL_ADD_PROC(&sc->sysctl_ctx, SYSCTL_CHILDREN(sc->sysctl_tree),
1801 OID_AUTO, "heregs", CTLFLAG_RD | CTLTYPE_OPAQUE, sc, 0,
1802 hatm_sysctl_heregs, "S", "card registers") == NULL)
1803 goto failed;
1804
1805 if (SYSCTL_ADD_PROC(&sc->sysctl_ctx, SYSCTL_CHILDREN(sc->sysctl_tree),
1806 OID_AUTO, "lbmem", CTLFLAG_RD | CTLTYPE_OPAQUE, sc, 0,
1807 hatm_sysctl_lbmem, "S", "local memory") == NULL)
1808 goto failed;
1809
1810 kenv_getuint(sc, "debug", &sc->debug, 0, 1);
1811#endif
1812
1813 /*
1814 * Configure
1815 */
1816 if ((error = hatm_configure(sc)) != 0)
1817 goto failed;
1818
1819 /*
1820 * Compute memory parameters
1821 */
1822 if (sc->rbp_s0.size != 0) {
1823 sc->rbp_s0.mask = (sc->rbp_s0.size - 1) << 3;
1824 sc->rbp_s0.mem.size = sc->rbp_s0.size * 8;
1825 sc->rbp_s0.mem.align = sc->rbp_s0.mem.size;
1826 }
1827 if (sc->rbp_l0.size != 0) {
1828 sc->rbp_l0.mask = (sc->rbp_l0.size - 1) << 3;
1829 sc->rbp_l0.mem.size = sc->rbp_l0.size * 8;
1830 sc->rbp_l0.mem.align = sc->rbp_l0.mem.size;
1831 }
1832 if (sc->rbp_s1.size != 0) {
1833 sc->rbp_s1.mask = (sc->rbp_s1.size - 1) << 3;
1834 sc->rbp_s1.mem.size = sc->rbp_s1.size * 8;
1835 sc->rbp_s1.mem.align = sc->rbp_s1.mem.size;
1836 }
1837 if (sc->rbrq_0.size != 0) {
1838 sc->rbrq_0.mem.size = sc->rbrq_0.size * 8;
1839 sc->rbrq_0.mem.align = sc->rbrq_0.mem.size;
1840 }
1841 if (sc->rbrq_1.size != 0) {
1842 sc->rbrq_1.mem.size = sc->rbrq_1.size * 8;
1843 sc->rbrq_1.mem.align = sc->rbrq_1.mem.size;
1844 }
1845
1846 sc->irq_0.mem.size = sc->irq_0.size * sizeof(uint32_t);
1847 sc->irq_0.mem.align = 4 * 1024;
1848
1849 sc->tbrq.mem.size = sc->tbrq.size * 4;
1850 sc->tbrq.mem.align = 2 * sc->tbrq.mem.size; /* ZZZ */
1851
1852 sc->tpdrq.mem.size = sc->tpdrq.size * 8;
1853 sc->tpdrq.mem.align = sc->tpdrq.mem.size;
1854
1855 sc->hsp_mem.size = sizeof(struct he_hsp);
1856 sc->hsp_mem.align = 1024;
1857
1858 sc->lbufs_size = sc->rbp_l0.size + sc->rbrq_0.size;
1859 sc->tpd_total = sc->tbrq.size + sc->tpdrq.size;
1860 sc->tpds.align = 64;
1861 sc->tpds.size = sc->tpd_total * HE_TPD_SIZE;
1862
1863 hatm_init_rmaps(sc);
1864 hatm_init_smbufs(sc);
1865 if ((error = hatm_init_tpds(sc)) != 0)
1866 goto failed;
1867
1868 /*
1869 * Allocate memory
1870 */
1871 if ((error = hatm_alloc_dmamem(sc, "IRQ", &sc->irq_0.mem)) != 0 ||
1872 (error = hatm_alloc_dmamem(sc, "TBRQ0", &sc->tbrq.mem)) != 0 ||
1873 (error = hatm_alloc_dmamem(sc, "TPDRQ", &sc->tpdrq.mem)) != 0 ||
1874 (error = hatm_alloc_dmamem(sc, "HSP", &sc->hsp_mem)) != 0)
1875 goto failed;
1876
1877 if (sc->rbp_s0.mem.size != 0 &&
1878 (error = hatm_alloc_dmamem(sc, "RBPS0", &sc->rbp_s0.mem)))
1879 goto failed;
1880 if (sc->rbp_l0.mem.size != 0 &&
1881 (error = hatm_alloc_dmamem(sc, "RBPL0", &sc->rbp_l0.mem)))
1882 goto failed;
1883 if (sc->rbp_s1.mem.size != 0 &&
1884 (error = hatm_alloc_dmamem(sc, "RBPS1", &sc->rbp_s1.mem)))
1885 goto failed;
1886
1887 if (sc->rbrq_0.mem.size != 0 &&
1888 (error = hatm_alloc_dmamem(sc, "RBRQ0", &sc->rbrq_0.mem)))
1889 goto failed;
1890 if (sc->rbrq_1.mem.size != 0 &&
1891 (error = hatm_alloc_dmamem(sc, "RBRQ1", &sc->rbrq_1.mem)))
1892 goto failed;
1893
1894 if ((sc->vcc_zone = uma_zcreate("HE vccs", sizeof(struct hevcc),
1895 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0)) == NULL) {
1896 device_printf(dev, "cannot allocate zone for vccs\n");
1897 goto failed;
1898 }
1899
1900 /*
1901 * 4.4 Reset the card.
1902 */
1903 if ((error = hatm_reset(sc)) != 0)
1904 goto failed;
1905
1906 /*
1907 * Read the prom.
1908 */
1909 hatm_init_bus_width(sc);
1910 hatm_init_read_eeprom(sc);
1911 hatm_init_endianess(sc);
1912
1913 /*
1914 * Initialize interface
1915 */
1916 ifp->if_flags = IFF_SIMPLEX;
1917 ifp->if_ioctl = hatm_ioctl;
1918 ifp->if_start = hatm_start;
1919 ifp->if_watchdog = NULL;
1920 ifp->if_init = hatm_init;
1921
1922 utopia_attach(&sc->utopia, &sc->ifatm, &sc->media, &sc->mtx,
1923 &sc->sysctl_ctx, SYSCTL_CHILDREN(sc->sysctl_tree),
1924 &hatm_utopia_methods);
1925 utopia_init_media(&sc->utopia);
1926
1927 /* these two SUNI routines need the lock */
1928 mtx_lock(&sc->mtx);
1929 /* poll while we are not running */
1930 sc->utopia.flags |= UTP_FL_POLL_CARRIER;
1931 utopia_start(&sc->utopia);
1932 utopia_reset(&sc->utopia);
1933 mtx_unlock(&sc->mtx);
1934
1935 atm_ifattach(ifp);
1936
1937#ifdef ENABLE_BPF
1938 bpfattach(ifp, DLT_ATM_RFC1483, sizeof(struct atmllc));
1939#endif
1940
1941 error = bus_setup_intr(dev, sc->irqres, INTR_TYPE_NET, hatm_intr,
1942 &sc->irq_0, &sc->ih);
1943 if (error != 0) {
1944 device_printf(dev, "could not setup interrupt\n");
1945 hatm_detach(dev);
1946 return (error);
1947 }
1948
1949 return (0);
1950
1951 failed:
1952 hatm_destroy(sc);
1953 return (error);
1954}
1955
1956/*
1957 * Start the interface. Assume a state as from attach().
1958 */
1959void
1960hatm_initialize(struct hatm_softc *sc)
1961{
1962 uint32_t v;
1963 u_int cid;
1964 static const u_int layout[2][7] = HE_CONFIG_MEM_LAYOUT;
1965
1966 if (sc->ifatm.ifnet.if_flags & IFF_RUNNING)
1967 return;
1968
1969 hatm_init_bus_width(sc);
1970 hatm_init_endianess(sc);
1971
1972 if_printf(&sc->ifatm.ifnet, "%s, Rev. %s, S/N %u, "
1973 "MAC=%02x:%02x:%02x:%02x:%02x:%02x (%ubit PCI)\n",
1974 sc->prod_id, sc->rev, sc->ifatm.mib.serial,
1975 sc->ifatm.mib.esi[0], sc->ifatm.mib.esi[1], sc->ifatm.mib.esi[2],
1976 sc->ifatm.mib.esi[3], sc->ifatm.mib.esi[4], sc->ifatm.mib.esi[5],
1977 sc->pci64 ? 64 : 32);
1978
1979 /*
1980 * 4.8 SDRAM Controller Initialisation
1981 * 4.9 Initialize RNUM value
1982 */
1983 if (sc->he622)
1984 WRITE4(sc, HE_REGO_SDRAM_CNTL, HE_REGM_SDRAM_64BIT);
1985 else
1986 WRITE4(sc, HE_REGO_SDRAM_CNTL, 0);
1987 BARRIER_W(sc);
1988
1989 v = READ4(sc, HE_REGO_LB_SWAP);
1990 BARRIER_R(sc);
1991 v |= 0xf << HE_REGS_LBSWAP_RNUM;
1992 WRITE4(sc, HE_REGO_LB_SWAP, v);
1993 BARRIER_W(sc);
1994
1995 hatm_init_irq(sc, &sc->irq_0, 0);
1996 hatm_clear_irq(sc, 1);
1997 hatm_clear_irq(sc, 2);
1998 hatm_clear_irq(sc, 3);
1999
2000 WRITE4(sc, HE_REGO_GRP_1_0_MAP, 0);
2001 WRITE4(sc, HE_REGO_GRP_3_2_MAP, 0);
2002 WRITE4(sc, HE_REGO_GRP_5_4_MAP, 0);
2003 WRITE4(sc, HE_REGO_GRP_7_6_MAP, 0);
2004 BARRIER_W(sc);
2005
2006 /*
2007 * 4.11 Enable PCI Bus Controller State Machine
2008 */
2009 v = READ4(sc, HE_REGO_HOST_CNTL);
2010 BARRIER_R(sc);
2011 v |= HE_REGM_HOST_OUTFF_ENB | HE_REGM_HOST_CMDFF_ENB |
2012 HE_REGM_HOST_QUICK_RD | HE_REGM_HOST_QUICK_WR;
2013 WRITE4(sc, HE_REGO_HOST_CNTL, v);
2014 BARRIER_W(sc);
2015
2016 /*
2017 * 5.1.1 Generic configuration state
2018 */
2019 sc->cells_per_row = layout[sc->he622][0];
2020 sc->bytes_per_row = layout[sc->he622][1];
2021 sc->r0_numrows = layout[sc->he622][2];
2022 sc->tx_numrows = layout[sc->he622][3];
2023 sc->r1_numrows = layout[sc->he622][4];
2024 sc->r0_startrow = layout[sc->he622][5];
2025 sc->tx_startrow = sc->r0_startrow + sc->r0_numrows;
2026 sc->r1_startrow = sc->tx_startrow + sc->tx_numrows;
2027 sc->cells_per_lbuf = layout[sc->he622][6];
2028
2029 sc->r0_numbuffs = sc->r0_numrows * (sc->cells_per_row /
2030 sc->cells_per_lbuf);
2031 sc->r1_numbuffs = sc->r1_numrows * (sc->cells_per_row /
2032 sc->cells_per_lbuf);
2033 sc->tx_numbuffs = sc->tx_numrows * (sc->cells_per_row /
2034 sc->cells_per_lbuf);
2035
2036 if (sc->r0_numbuffs > 2560)
2037 sc->r0_numbuffs = 2560;
2038 if (sc->r1_numbuffs > 2560)
2039 sc->r1_numbuffs = 2560;
2040 if (sc->tx_numbuffs > 5120)
2041 sc->tx_numbuffs = 5120;
2042
2043 DBG(sc, ATTACH, ("cells_per_row=%u bytes_per_row=%u r0_numrows=%u "
2044 "tx_numrows=%u r1_numrows=%u r0_startrow=%u tx_startrow=%u "
2045 "r1_startrow=%u cells_per_lbuf=%u\nr0_numbuffs=%u r1_numbuffs=%u "
2046 "tx_numbuffs=%u\n", sc->cells_per_row, sc->bytes_per_row,
2047 sc->r0_numrows, sc->tx_numrows, sc->r1_numrows, sc->r0_startrow,
2048 sc->tx_startrow, sc->r1_startrow, sc->cells_per_lbuf,
2049 sc->r0_numbuffs, sc->r1_numbuffs, sc->tx_numbuffs));
2050
2051 /*
2052 * 5.1.2 Configure Hardware dependend registers
2053 */
2054 if (sc->he622) {
2055 WRITE4(sc, HE_REGO_LBARB,
2056 (0x2 << HE_REGS_LBARB_SLICE) |
2057 (0xf << HE_REGS_LBARB_RNUM) |
2058 (0x3 << HE_REGS_LBARB_THPRI) |
2059 (0x3 << HE_REGS_LBARB_RHPRI) |
2060 (0x2 << HE_REGS_LBARB_TLPRI) |
2061 (0x1 << HE_REGS_LBARB_RLPRI) |
2062 (0x28 << HE_REGS_LBARB_BUS_MULT) |
2063 (0x50 << HE_REGS_LBARB_NET_PREF));
2064 BARRIER_W(sc);
2065 WRITE4(sc, HE_REGO_SDRAMCON,
2066 /* HW bug: don't use banking */
2067 /* HE_REGM_SDRAMCON_BANK | */
2068 HE_REGM_SDRAMCON_WIDE |
2069 (0x384 << HE_REGS_SDRAMCON_REF));
2070 BARRIER_W(sc);
2071 WRITE4(sc, HE_REGO_RCMCONFIG,
2072 (0x1 << HE_REGS_RCMCONFIG_BANK_WAIT) |
2073 (0x1 << HE_REGS_RCMCONFIG_RW_WAIT) |
2074 (0x0 << HE_REGS_RCMCONFIG_TYPE));
2075 WRITE4(sc, HE_REGO_TCMCONFIG,
2076 (0x2 << HE_REGS_TCMCONFIG_BANK_WAIT) |
2077 (0x1 << HE_REGS_TCMCONFIG_RW_WAIT) |
2078 (0x0 << HE_REGS_TCMCONFIG_TYPE));
2079 } else {
2080 WRITE4(sc, HE_REGO_LBARB,
2081 (0x2 << HE_REGS_LBARB_SLICE) |
2082 (0xf << HE_REGS_LBARB_RNUM) |
2083 (0x3 << HE_REGS_LBARB_THPRI) |
2084 (0x3 << HE_REGS_LBARB_RHPRI) |
2085 (0x2 << HE_REGS_LBARB_TLPRI) |
2086 (0x1 << HE_REGS_LBARB_RLPRI) |
2087 (0x46 << HE_REGS_LBARB_BUS_MULT) |
2088 (0x8C << HE_REGS_LBARB_NET_PREF));
2089 BARRIER_W(sc);
2090 WRITE4(sc, HE_REGO_SDRAMCON,
2091 /* HW bug: don't use banking */
2092 /* HE_REGM_SDRAMCON_BANK | */
2093 (0x150 << HE_REGS_SDRAMCON_REF));
2094 BARRIER_W(sc);
2095 WRITE4(sc, HE_REGO_RCMCONFIG,
2096 (0x0 << HE_REGS_RCMCONFIG_BANK_WAIT) |
2097 (0x1 << HE_REGS_RCMCONFIG_RW_WAIT) |
2098 (0x0 << HE_REGS_RCMCONFIG_TYPE));
2099 WRITE4(sc, HE_REGO_TCMCONFIG,
2100 (0x1 << HE_REGS_TCMCONFIG_BANK_WAIT) |
2101 (0x1 << HE_REGS_TCMCONFIG_RW_WAIT) |
2102 (0x0 << HE_REGS_TCMCONFIG_TYPE));
2103 }
2104 WRITE4(sc, HE_REGO_LBCONFIG, (sc->cells_per_lbuf * 48));
2105
2106 WRITE4(sc, HE_REGO_RLBC_H, 0);
2107 WRITE4(sc, HE_REGO_RLBC_T, 0);
2108 WRITE4(sc, HE_REGO_RLBC_H2, 0);
2109
2110 WRITE4(sc, HE_REGO_RXTHRSH, 512);
2111 WRITE4(sc, HE_REGO_LITHRSH, 256);
2112
2113 WRITE4(sc, HE_REGO_RLBF0_C, sc->r0_numbuffs);
2114 WRITE4(sc, HE_REGO_RLBF1_C, sc->r1_numbuffs);
2115
2116 if (sc->he622) {
2117 WRITE4(sc, HE_REGO_RCCONFIG,
2118 (8 << HE_REGS_RCCONFIG_UTDELAY) |
2119 (sc->ifatm.mib.vpi_bits << HE_REGS_RCCONFIG_VP) |
2120 (sc->ifatm.mib.vci_bits << HE_REGS_RCCONFIG_VC));
2121 WRITE4(sc, HE_REGO_TXCONFIG,
2122 (32 << HE_REGS_TXCONFIG_THRESH) |
2123 (sc->ifatm.mib.vci_bits << HE_REGS_TXCONFIG_VCI_MASK) |
2124 (sc->tx_numbuffs << HE_REGS_TXCONFIG_LBFREE));
2125 } else {
2126 WRITE4(sc, HE_REGO_RCCONFIG,
2127 (0 << HE_REGS_RCCONFIG_UTDELAY) |
2128 HE_REGM_RCCONFIG_UT_MODE |
2129 (sc->ifatm.mib.vpi_bits << HE_REGS_RCCONFIG_VP) |
2130 (sc->ifatm.mib.vci_bits << HE_REGS_RCCONFIG_VC));
2131 WRITE4(sc, HE_REGO_TXCONFIG,
2132 (32 << HE_REGS_TXCONFIG_THRESH) |
2133 HE_REGM_TXCONFIG_UTMODE |
2134 (sc->ifatm.mib.vci_bits << HE_REGS_TXCONFIG_VCI_MASK) |
2135 (sc->tx_numbuffs << HE_REGS_TXCONFIG_LBFREE));
2136 }
2137
2138 WRITE4(sc, HE_REGO_TXAAL5_PROTO, 0);
2139
2140 if (sc->rbp_s1.size != 0) {
2141 WRITE4(sc, HE_REGO_RHCONFIG,
2142 HE_REGM_RHCONFIG_PHYENB |
2143 ((sc->he622 ? 0x41 : 0x31) << HE_REGS_RHCONFIG_PTMR_PRE) |
2144 (1 << HE_REGS_RHCONFIG_OAM_GID));
2145 } else {
2146 WRITE4(sc, HE_REGO_RHCONFIG,
2147 HE_REGM_RHCONFIG_PHYENB |
2148 ((sc->he622 ? 0x41 : 0x31) << HE_REGS_RHCONFIG_PTMR_PRE) |
2149 (0 << HE_REGS_RHCONFIG_OAM_GID));
2150 }
2151 BARRIER_W(sc);
2152
2153 hatm_init_cm(sc);
2154
2155 hatm_init_rx_buffer_pool(sc, 0, sc->r0_startrow, sc->r0_numbuffs);
2156 hatm_init_rx_buffer_pool(sc, 1, sc->r1_startrow, sc->r1_numbuffs);
2157 hatm_init_tx_buffer_pool(sc, sc->tx_startrow, sc->tx_numbuffs);
2158
2159 hatm_init_imed_queues(sc);
2160
2161 /*
2162 * 5.1.6 Application tunable Parameters
2163 */
2164 WRITE4(sc, HE_REGO_MCC, 0);
2165 WRITE4(sc, HE_REGO_OEC, 0);
2166 WRITE4(sc, HE_REGO_DCC, 0);
2167 WRITE4(sc, HE_REGO_CEC, 0);
2168
2169 hatm_init_cs_block(sc);
2170 hatm_init_cs_block_cm(sc);
2171
2172 hatm_init_rpool(sc, &sc->rbp_s0, 0, 0);
2173 hatm_init_rpool(sc, &sc->rbp_l0, 0, 1);
2174 hatm_init_rpool(sc, &sc->rbp_s1, 1, 0);
2175 hatm_clear_rpool(sc, 1, 1);
2176 hatm_clear_rpool(sc, 2, 0);
2177 hatm_clear_rpool(sc, 2, 1);
2178 hatm_clear_rpool(sc, 3, 0);
2179 hatm_clear_rpool(sc, 3, 1);
2180 hatm_clear_rpool(sc, 4, 0);
2181 hatm_clear_rpool(sc, 4, 1);
2182 hatm_clear_rpool(sc, 5, 0);
2183 hatm_clear_rpool(sc, 5, 1);
2184 hatm_clear_rpool(sc, 6, 0);
2185 hatm_clear_rpool(sc, 6, 1);
2186 hatm_clear_rpool(sc, 7, 0);
2187 hatm_clear_rpool(sc, 7, 1);
2188 hatm_init_rbrq(sc, &sc->rbrq_0, 0);
2189 hatm_init_rbrq(sc, &sc->rbrq_1, 1);
2190 hatm_clear_rbrq(sc, 2);
2191 hatm_clear_rbrq(sc, 3);
2192 hatm_clear_rbrq(sc, 4);
2193 hatm_clear_rbrq(sc, 5);
2194 hatm_clear_rbrq(sc, 6);
2195 hatm_clear_rbrq(sc, 7);
2196
2197 sc->lbufs_next = 0;
2198 bzero(sc->lbufs, sizeof(sc->lbufs[0]) * sc->lbufs_size);
2199
2200 hatm_init_tbrq(sc, &sc->tbrq, 0);
2201 hatm_clear_tbrq(sc, 1);
2202 hatm_clear_tbrq(sc, 2);
2203 hatm_clear_tbrq(sc, 3);
2204 hatm_clear_tbrq(sc, 4);
2205 hatm_clear_tbrq(sc, 5);
2206 hatm_clear_tbrq(sc, 6);
2207 hatm_clear_tbrq(sc, 7);
2208
2209 hatm_init_tpdrq(sc);
2210
2211 WRITE4(sc, HE_REGO_UBUFF_BA, (sc->he622 ? 0x104780 : 0x800));
2212
2213 /*
2214 * Initialize HSP
2215 */
2216 bzero(sc->hsp_mem.base, sc->hsp_mem.size);
2217 sc->hsp = sc->hsp_mem.base;
2218 WRITE4(sc, HE_REGO_HSP_BA, sc->hsp_mem.paddr);
2219
2220 /*
2221 * 5.1.12 Enable transmit and receive
2222 * Enable bus master and interrupts
2223 */
2224 v = READ_MBOX4(sc, HE_REGO_CS_ERCTL0);
2225 v |= 0x18000000;
2226 WRITE_MBOX4(sc, HE_REGO_CS_ERCTL0, v);
2227
2228 v = READ4(sc, HE_REGO_RCCONFIG);
2229 v |= HE_REGM_RCCONFIG_RXENB;
2230 WRITE4(sc, HE_REGO_RCCONFIG, v);
2231
2232 v = pci_read_config(sc->dev, HE_PCIR_GEN_CNTL_0, 4);
2233 v |= HE_PCIM_CTL0_INIT_ENB | HE_PCIM_CTL0_INT_PROC_ENB;
2234 pci_write_config(sc->dev, HE_PCIR_GEN_CNTL_0, v, 4);
2235
2236 sc->ifatm.ifnet.if_flags |= IFF_RUNNING;
2237 sc->ifatm.ifnet.if_baudrate = 53 * 8 * sc->ifatm.mib.pcr;
2238
2239 sc->utopia.flags &= ~UTP_FL_POLL_CARRIER;
2240
2241 /* reopen vccs */
2242 for (cid = 0; cid < HE_MAX_VCCS; cid++)
2243 if (sc->vccs[cid] != NULL)
2244 hatm_load_vc(sc, cid, 1);
2245
2246 ATMEV_SEND_IFSTATE_CHANGED(&sc->ifatm,
2247 sc->utopia.carrier == UTP_CARR_OK);
2248}
2249
2250/*
2251 * This functions stops the card and frees all resources allocated after
2252 * the attach. Must have the global lock.
2253 */
2254void
2255hatm_stop(struct hatm_softc *sc)
2256{
2257 uint32_t v;
2258 u_int i, p, cid;
2259 struct mbuf_chunk_hdr *ch;
2260 struct mbuf_page *pg;
2261
2262 mtx_assert(&sc->mtx, MA_OWNED);
2263
2264 if (!(sc->ifatm.ifnet.if_flags & IFF_RUNNING))
2265 return;
2266 sc->ifatm.ifnet.if_flags &= ~IFF_RUNNING;
2267
2268 ATMEV_SEND_IFSTATE_CHANGED(&sc->ifatm,
2269 sc->utopia.carrier == UTP_CARR_OK);
2270
2271 sc->utopia.flags |= UTP_FL_POLL_CARRIER;
2272
2273 /*
2274 * Stop and reset the hardware so that everything remains
2275 * stable.
2276 */
2277 v = READ_MBOX4(sc, HE_REGO_CS_ERCTL0);
2278 v &= ~0x18000000;
2279 WRITE_MBOX4(sc, HE_REGO_CS_ERCTL0, v);
2280
2281 v = READ4(sc, HE_REGO_RCCONFIG);
2282 v &= ~HE_REGM_RCCONFIG_RXENB;
2283 WRITE4(sc, HE_REGO_RCCONFIG, v);
2284
2285 WRITE4(sc, HE_REGO_RHCONFIG, (0x2 << HE_REGS_RHCONFIG_PTMR_PRE));
2286 BARRIER_W(sc);
2287
2288 v = READ4(sc, HE_REGO_HOST_CNTL);
2289 BARRIER_R(sc);
2290 v &= ~(HE_REGM_HOST_OUTFF_ENB | HE_REGM_HOST_CMDFF_ENB);
2291 WRITE4(sc, HE_REGO_HOST_CNTL, v);
2292 BARRIER_W(sc);
2293
2294 /*
2295 * Disable bust master and interrupts
2296 */
2297 v = pci_read_config(sc->dev, HE_PCIR_GEN_CNTL_0, 4);
2298 v &= ~(HE_PCIM_CTL0_INIT_ENB | HE_PCIM_CTL0_INT_PROC_ENB);
2299 pci_write_config(sc->dev, HE_PCIR_GEN_CNTL_0, v, 4);
2300
2301 (void)hatm_reset(sc);
2302
2303 /*
2304 * Card resets the SUNI when resetted, so re-initialize it
2305 */
2306 utopia_reset(&sc->utopia);
2307
2308 /*
2309 * Give any waiters on closing a VCC a chance. They will stop
2310 * to wait if they see that IFF_RUNNING disappeared.
2311 */
2312 while (!(cv_waitq_empty(&sc->vcc_cv))) {
2313 cv_broadcast(&sc->vcc_cv);
2314 DELAY(100);
2315 }
2316 while (!(cv_waitq_empty(&sc->cv_rcclose))) {
2317 cv_broadcast(&sc->cv_rcclose);
2318 }
2319
2320 /*
2321 * Now free all resources.
2322 */
2323
2324 /*
2325 * Free the large mbufs that are given to the card.
2326 */
2327 for (i = 0 ; i < sc->lbufs_size; i++) {
2328 if (sc->lbufs[i] != NULL) {
2329 bus_dmamap_unload(sc->mbuf_tag, sc->rmaps[i]);
2330 m_freem(sc->lbufs[i]);
2331 sc->lbufs[i] = NULL;
2332 }
2333 }
2334
2335 /*
2336 * Free small buffers
2337 */
2338 for (p = 0; p < sc->mbuf_npages; p++) {
2339 pg = sc->mbuf_pages[p];
2340 for (i = 0; i < pg->hdr.nchunks; i++) {
2341 if (MBUF_TST_BIT(pg->hdr.card, i)) {
2342 MBUF_CLR_BIT(pg->hdr.card, i);
2343 MBUF_CLR_BIT(pg->hdr.used, i);
2344 ch = (struct mbuf_chunk_hdr *) ((char *)pg +
2345 i * pg->hdr.chunksize + pg->hdr.hdroff);
2346 m_freem(ch->mbuf);
2347 }
2348 }
2349 }
2350
2351 hatm_stop_tpds(sc);
2352
2353 /*
2354 * Free all partial reassembled PDUs on any VCC.
2355 */
2356 for (cid = 0; cid < HE_MAX_VCCS; cid++) {
2357 if (sc->vccs[cid] != NULL) {
2358 if (sc->vccs[cid]->chain != NULL) {
2359 m_freem(sc->vccs[cid]->chain);
2360 sc->vccs[cid]->chain = NULL;
2361 sc->vccs[cid]->last = NULL;
2362 }
2363 if (!(sc->vccs[cid]->vflags & (HE_VCC_RX_OPEN |
2364 HE_VCC_TX_OPEN))) {
2365 hatm_tx_vcc_closed(sc, cid);
2366 uma_zfree(sc->vcc_zone, sc->vccs[cid]);
2367 sc->vccs[cid] = NULL;
2368 sc->open_vccs--;
2369 } else {
2370 sc->vccs[cid]->vflags = 0;
2371 sc->vccs[cid]->ntpds = 0;
2372 }
2373 }
2374 }
2375
2376 if (sc->rbp_s0.size != 0)
2377 bzero(sc->rbp_s0.mem.base, sc->rbp_s0.mem.size);
2378 if (sc->rbp_l0.size != 0)
2379 bzero(sc->rbp_l0.mem.base, sc->rbp_l0.mem.size);
2380 if (sc->rbp_s1.size != 0)
2381 bzero(sc->rbp_s1.mem.base, sc->rbp_s1.mem.size);
2382 if (sc->rbrq_0.size != 0)
2383 bzero(sc->rbrq_0.mem.base, sc->rbrq_0.mem.size);
2384 if (sc->rbrq_1.size != 0)
2385 bzero(sc->rbrq_1.mem.base, sc->rbrq_1.mem.size);
2386
2387 bzero(sc->tbrq.mem.base, sc->tbrq.mem.size);
2388 bzero(sc->tpdrq.mem.base, sc->tpdrq.mem.size);
2389 bzero(sc->hsp_mem.base, sc->hsp_mem.size);
2390}
2391
2392/************************************************************
2393 *
2394 * Driver infrastructure
2395 */
2396devclass_t hatm_devclass;
2397
2398static device_method_t hatm_methods[] = {
2399 DEVMETHOD(device_probe, hatm_probe),
2400 DEVMETHOD(device_attach, hatm_attach),
2401 DEVMETHOD(device_detach, hatm_detach),
2402 {0,0}
2403};
2404static driver_t hatm_driver = {
2405 "hatm",
2406 hatm_methods,
2407 sizeof(struct hatm_softc),
2408};
2409DRIVER_MODULE(hatm, pci, hatm_driver, hatm_devclass, NULL, 0);