Deleted Added
full compact
if_hatm.c (232874) if_hatm.c (254263)
1/*-
2 * Copyright (c) 2001-2003
3 * Fraunhofer Institute for Open Communication Systems (FhG Fokus).
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25 * SUCH DAMAGE.
26 *
27 * Author: Hartmut Brandt <harti@freebsd.org>
28 *
29 * ForeHE driver.
30 *
31 * This file contains the module and driver infrastructure stuff as well
32 * as a couple of utility functions and the entire initialisation.
33 */
34
35#include <sys/cdefs.h>
1/*-
2 * Copyright (c) 2001-2003
3 * Fraunhofer Institute for Open Communication Systems (FhG Fokus).
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25 * SUCH DAMAGE.
26 *
27 * Author: Hartmut Brandt <harti@freebsd.org>
28 *
29 * ForeHE driver.
30 *
31 * This file contains the module and driver infrastructure stuff as well
32 * as a couple of utility functions and the entire initialisation.
33 */
34
35#include <sys/cdefs.h>
36__FBSDID("$FreeBSD: head/sys/dev/hatm/if_hatm.c 232874 2012-03-12 18:15:08Z scottl $");
36__FBSDID("$FreeBSD: head/sys/dev/hatm/if_hatm.c 254263 2013-08-12 23:30:01Z scottl $");
37
38#include "opt_inet.h"
39#include "opt_natm.h"
40
41#include <sys/types.h>
42#include <sys/param.h>
43#include <sys/systm.h>
44#include <sys/malloc.h>
45#include <sys/kernel.h>
46#include <sys/bus.h>
47#include <sys/errno.h>
48#include <sys/conf.h>
49#include <sys/module.h>
50#include <sys/queue.h>
51#include <sys/syslog.h>
52#include <sys/lock.h>
53#include <sys/mutex.h>
54#include <sys/condvar.h>
55#include <sys/sysctl.h>
56#include <vm/uma.h>
57
58#include <sys/sockio.h>
59#include <sys/mbuf.h>
60#include <sys/socket.h>
61
62#include <net/if.h>
63#include <net/if_media.h>
64#include <net/if_atm.h>
65#include <net/if_types.h>
66#include <net/route.h>
67#ifdef ENABLE_BPF
68#include <net/bpf.h>
69#endif
70#include <netinet/in.h>
71#include <netinet/if_atm.h>
72
73#include <machine/bus.h>
74#include <machine/resource.h>
75#include <sys/bus.h>
76#include <sys/rman.h>
77#include <dev/pci/pcireg.h>
78#include <dev/pci/pcivar.h>
79
80#include <dev/utopia/utopia.h>
81#include <dev/hatm/if_hatmconf.h>
82#include <dev/hatm/if_hatmreg.h>
83#include <dev/hatm/if_hatmvar.h>
84
85static const struct {
86 uint16_t vid;
87 uint16_t did;
88 const char *name;
89} hatm_devs[] = {
90 { 0x1127, 0x400,
91 "FORE HE" },
92 { 0, 0, NULL }
93};
94
95SYSCTL_DECL(_hw_atm);
96
97MODULE_DEPEND(hatm, utopia, 1, 1, 1);
98MODULE_DEPEND(hatm, pci, 1, 1, 1);
99MODULE_DEPEND(hatm, atm, 1, 1, 1);
100
101#define EEPROM_DELAY 400 /* microseconds */
102
103/* Read from EEPROM 0000 0011b */
104static const uint32_t readtab[] = {
105 HE_REGM_HOST_PROM_SEL | HE_REGM_HOST_PROM_CLOCK,
106 0,
107 HE_REGM_HOST_PROM_CLOCK,
108 0, /* 0 */
109 HE_REGM_HOST_PROM_CLOCK,
110 0, /* 0 */
111 HE_REGM_HOST_PROM_CLOCK,
112 0, /* 0 */
113 HE_REGM_HOST_PROM_CLOCK,
114 0, /* 0 */
115 HE_REGM_HOST_PROM_CLOCK,
116 0, /* 0 */
117 HE_REGM_HOST_PROM_CLOCK,
118 HE_REGM_HOST_PROM_DATA_IN, /* 0 */
119 HE_REGM_HOST_PROM_CLOCK | HE_REGM_HOST_PROM_DATA_IN,
120 HE_REGM_HOST_PROM_DATA_IN, /* 1 */
121 HE_REGM_HOST_PROM_CLOCK | HE_REGM_HOST_PROM_DATA_IN,
122 HE_REGM_HOST_PROM_DATA_IN, /* 1 */
123};
124static const uint32_t clocktab[] = {
125 0, HE_REGM_HOST_PROM_CLOCK,
126 0, HE_REGM_HOST_PROM_CLOCK,
127 0, HE_REGM_HOST_PROM_CLOCK,
128 0, HE_REGM_HOST_PROM_CLOCK,
129 0, HE_REGM_HOST_PROM_CLOCK,
130 0, HE_REGM_HOST_PROM_CLOCK,
131 0, HE_REGM_HOST_PROM_CLOCK,
132 0, HE_REGM_HOST_PROM_CLOCK,
133 0
134};
135
136/*
137 * Convert cell rate to ATM Forum format
138 */
139u_int
140hatm_cps2atmf(uint32_t pcr)
141{
142 u_int e;
143
144 if (pcr == 0)
145 return (0);
146 pcr <<= 9;
147 e = 0;
148 while (pcr > (1024 - 1)) {
149 e++;
150 pcr >>= 1;
151 }
152 return ((1 << 14) | (e << 9) | (pcr & 0x1ff));
153}
154u_int
155hatm_atmf2cps(uint32_t fcr)
156{
157 fcr &= 0x7fff;
158
159 return ((1 << ((fcr >> 9) & 0x1f)) * (512 + (fcr & 0x1ff)) / 512
160 * (fcr >> 14));
161}
162
163/************************************************************
164 *
165 * Initialisation
166 */
167/*
168 * Probe for a HE controller
169 */
170static int
171hatm_probe(device_t dev)
172{
173 int i;
174
175 for (i = 0; hatm_devs[i].name; i++)
176 if (pci_get_vendor(dev) == hatm_devs[i].vid &&
177 pci_get_device(dev) == hatm_devs[i].did) {
178 device_set_desc(dev, hatm_devs[i].name);
179 return (BUS_PROBE_DEFAULT);
180 }
181 return (ENXIO);
182}
183
184/*
185 * Allocate and map DMA-able memory. We support only contiguous mappings.
186 */
187static void
188dmaload_helper(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
189{
190 if (error)
191 return;
192 KASSERT(nsegs == 1, ("too many segments for DMA: %d", nsegs));
193 KASSERT(segs[0].ds_addr <= 0xffffffffUL,
194 ("phys addr too large %lx", (u_long)segs[0].ds_addr));
195
196 *(bus_addr_t *)arg = segs[0].ds_addr;
197}
198static int
199hatm_alloc_dmamem(struct hatm_softc *sc, const char *what, struct dmamem *mem)
200{
201 int error;
202
203 mem->base = NULL;
204
205 /*
206 * Alignement does not work in the bus_dmamem_alloc function below
207 * on FreeBSD. malloc seems to align objects at least to the object
208 * size so increase the size to the alignment if the size is lesser
209 * than the alignemnt.
210 * XXX on sparc64 this is (probably) not needed.
211 */
212 if (mem->size < mem->align)
213 mem->size = mem->align;
214
215 error = bus_dma_tag_create(sc->parent_tag, mem->align, 0,
216 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR,
217 NULL, NULL, mem->size, 1,
218 BUS_SPACE_MAXSIZE_32BIT, BUS_DMA_ALLOCNOW,
219 NULL, NULL, &mem->tag);
220 if (error) {
221 if_printf(sc->ifp, "DMA tag create (%s)\n", what);
222 return (error);
223 }
224
225 error = bus_dmamem_alloc(mem->tag, &mem->base, 0, &mem->map);
226 if (error) {
227 if_printf(sc->ifp, "DMA mem alloc (%s): %d\n",
228 what, error);
229 bus_dma_tag_destroy(mem->tag);
230 mem->base = NULL;
231 return (error);
232 }
233
234 error = bus_dmamap_load(mem->tag, mem->map, mem->base, mem->size,
235 dmaload_helper, &mem->paddr, BUS_DMA_NOWAIT);
236 if (error) {
237 if_printf(sc->ifp, "DMA map load (%s): %d\n",
238 what, error);
239 bus_dmamem_free(mem->tag, mem->base, mem->map);
240 bus_dma_tag_destroy(mem->tag);
241 mem->base = NULL;
242 return (error);
243 }
244
245 DBG(sc, DMA, ("%s S/A/V/P 0x%x 0x%x %p 0x%lx", what, mem->size,
246 mem->align, mem->base, (u_long)mem->paddr));
247
248 return (0);
249}
250
251/*
252 * Destroy all the resources of an DMA-able memory region.
253 */
254static void
255hatm_destroy_dmamem(struct dmamem *mem)
256{
257 if (mem->base != NULL) {
258 bus_dmamap_unload(mem->tag, mem->map);
259 bus_dmamem_free(mem->tag, mem->base, mem->map);
260 (void)bus_dma_tag_destroy(mem->tag);
261 mem->base = NULL;
262 }
263}
264
265/*
266 * Initialize/destroy DMA maps for the large pool 0
267 */
268static void
269hatm_destroy_rmaps(struct hatm_softc *sc)
270{
271 u_int b;
272
273 DBG(sc, ATTACH, ("destroying rmaps and lbuf pointers..."));
274 if (sc->rmaps != NULL) {
275 for (b = 0; b < sc->lbufs_size; b++)
276 bus_dmamap_destroy(sc->mbuf_tag, sc->rmaps[b]);
277 free(sc->rmaps, M_DEVBUF);
278 }
279 if (sc->lbufs != NULL)
280 free(sc->lbufs, M_DEVBUF);
281}
282
283static void
284hatm_init_rmaps(struct hatm_softc *sc)
285{
286 u_int b;
287 int err;
288
289 DBG(sc, ATTACH, ("allocating rmaps and lbuf pointers..."));
290 sc->lbufs = malloc(sizeof(sc->lbufs[0]) * sc->lbufs_size,
291 M_DEVBUF, M_ZERO | M_WAITOK);
292
293 /* allocate and create the DMA maps for the large pool */
294 sc->rmaps = malloc(sizeof(sc->rmaps[0]) * sc->lbufs_size,
295 M_DEVBUF, M_WAITOK);
296 for (b = 0; b < sc->lbufs_size; b++) {
297 err = bus_dmamap_create(sc->mbuf_tag, 0, &sc->rmaps[b]);
298 if (err != 0)
299 panic("bus_dmamap_create: %d\n", err);
300 }
301}
302
303/*
304 * Initialize and destroy small mbuf page pointers and pages
305 */
306static void
307hatm_destroy_smbufs(struct hatm_softc *sc)
308{
309 u_int i, b;
310 struct mbuf_page *pg;
311 struct mbuf_chunk_hdr *h;
312
313 if (sc->mbuf_pages != NULL) {
314 for (i = 0; i < sc->mbuf_npages; i++) {
315 pg = sc->mbuf_pages[i];
316 for (b = 0; b < pg->hdr.nchunks; b++) {
317 h = (struct mbuf_chunk_hdr *) ((char *)pg +
318 b * pg->hdr.chunksize + pg->hdr.hdroff);
319 if (h->flags & MBUF_CARD)
320 if_printf(sc->ifp,
321 "%s -- mbuf page=%u card buf %u\n",
322 __func__, i, b);
323 if (h->flags & MBUF_USED)
324 if_printf(sc->ifp,
325 "%s -- mbuf page=%u used buf %u\n",
326 __func__, i, b);
327 }
328 bus_dmamap_unload(sc->mbuf_tag, pg->hdr.map);
329 bus_dmamap_destroy(sc->mbuf_tag, pg->hdr.map);
330 free(pg, M_DEVBUF);
331 }
332 free(sc->mbuf_pages, M_DEVBUF);
333 }
334}
335
336static void
337hatm_init_smbufs(struct hatm_softc *sc)
338{
339 sc->mbuf_pages = malloc(sizeof(sc->mbuf_pages[0]) *
340 sc->mbuf_max_pages, M_DEVBUF, M_WAITOK);
341 sc->mbuf_npages = 0;
342}
343
344/*
345 * Initialize/destroy TPDs. This is called from attach/detach.
346 */
347static void
348hatm_destroy_tpds(struct hatm_softc *sc)
349{
350 struct tpd *t;
351
352 if (sc->tpds.base == NULL)
353 return;
354
355 DBG(sc, ATTACH, ("releasing TPDs ..."));
356 if (sc->tpd_nfree != sc->tpd_total)
357 if_printf(sc->ifp, "%u tpds still in use from %u\n",
358 sc->tpd_total - sc->tpd_nfree, sc->tpd_total);
359 while ((t = SLIST_FIRST(&sc->tpd_free)) != NULL) {
360 SLIST_REMOVE_HEAD(&sc->tpd_free, link);
361 bus_dmamap_destroy(sc->tx_tag, t->map);
362 }
363 hatm_destroy_dmamem(&sc->tpds);
364 free(sc->tpd_used, M_DEVBUF);
365 DBG(sc, ATTACH, ("... done"));
366}
367static int
368hatm_init_tpds(struct hatm_softc *sc)
369{
370 int error;
371 u_int i;
372 struct tpd *t;
373
374 DBG(sc, ATTACH, ("allocating %u TPDs and maps ...", sc->tpd_total));
375 error = hatm_alloc_dmamem(sc, "TPD memory", &sc->tpds);
376 if (error != 0) {
377 DBG(sc, ATTACH, ("... dmamem error=%d", error));
378 return (error);
379 }
380
381 /* put all the TPDs on the free list and allocate DMA maps */
382 for (i = 0; i < sc->tpd_total; i++) {
383 t = TPD_ADDR(sc, i);
384 t->no = i;
385 t->mbuf = NULL;
386 error = bus_dmamap_create(sc->tx_tag, 0, &t->map);
387 if (error != 0) {
388 DBG(sc, ATTACH, ("... dmamap error=%d", error));
389 while ((t = SLIST_FIRST(&sc->tpd_free)) != NULL) {
390 SLIST_REMOVE_HEAD(&sc->tpd_free, link);
391 bus_dmamap_destroy(sc->tx_tag, t->map);
392 }
393 hatm_destroy_dmamem(&sc->tpds);
394 return (error);
395 }
396
397 SLIST_INSERT_HEAD(&sc->tpd_free, t, link);
398 }
399
400 /* allocate and zero bitmap */
401 sc->tpd_used = malloc(sizeof(uint8_t) * (sc->tpd_total + 7) / 8,
402 M_DEVBUF, M_ZERO | M_WAITOK);
403 sc->tpd_nfree = sc->tpd_total;
404
405 DBG(sc, ATTACH, ("... done"));
406
407 return (0);
408}
409
410/*
411 * Free all the TPDs that where given to the card.
412 * An mbuf chain may be attached to a TPD - free it also and
413 * unload its associated DMA map.
414 */
415static void
416hatm_stop_tpds(struct hatm_softc *sc)
417{
418 u_int i;
419 struct tpd *t;
420
421 DBG(sc, ATTACH, ("free TPDs ..."));
422 for (i = 0; i < sc->tpd_total; i++) {
423 if (TPD_TST_USED(sc, i)) {
424 t = TPD_ADDR(sc, i);
425 if (t->mbuf) {
426 m_freem(t->mbuf);
427 t->mbuf = NULL;
428 bus_dmamap_unload(sc->tx_tag, t->map);
429 }
430 TPD_CLR_USED(sc, i);
431 SLIST_INSERT_HEAD(&sc->tpd_free, t, link);
432 sc->tpd_nfree++;
433 }
434 }
435}
436
437/*
438 * This frees ALL resources of this interface and leaves the structure
439 * in an indeterminate state. This is called just before detaching or
440 * on a failed attach. No lock should be held.
441 */
442static void
443hatm_destroy(struct hatm_softc *sc)
444{
445 u_int cid;
446
447 bus_teardown_intr(sc->dev, sc->irqres, sc->ih);
448
449 hatm_destroy_rmaps(sc);
450 hatm_destroy_smbufs(sc);
451 hatm_destroy_tpds(sc);
452
453 if (sc->vcc_zone != NULL) {
454 for (cid = 0; cid < HE_MAX_VCCS; cid++)
455 if (sc->vccs[cid] != NULL)
456 uma_zfree(sc->vcc_zone, sc->vccs[cid]);
457 uma_zdestroy(sc->vcc_zone);
458 }
459
460 /*
461 * Release all memory allocated to the various queues and
462 * Status pages. These have there own flag which shows whether
463 * they are really allocated.
464 */
465 hatm_destroy_dmamem(&sc->irq_0.mem);
466 hatm_destroy_dmamem(&sc->rbp_s0.mem);
467 hatm_destroy_dmamem(&sc->rbp_l0.mem);
468 hatm_destroy_dmamem(&sc->rbp_s1.mem);
469 hatm_destroy_dmamem(&sc->rbrq_0.mem);
470 hatm_destroy_dmamem(&sc->rbrq_1.mem);
471 hatm_destroy_dmamem(&sc->tbrq.mem);
472 hatm_destroy_dmamem(&sc->tpdrq.mem);
473 hatm_destroy_dmamem(&sc->hsp_mem);
474
475 if (sc->irqres != NULL)
476 bus_release_resource(sc->dev, SYS_RES_IRQ,
477 sc->irqid, sc->irqres);
478
479 if (sc->tx_tag != NULL)
480 if (bus_dma_tag_destroy(sc->tx_tag))
481 if_printf(sc->ifp, "mbuf DMA tag busy\n");
482
483 if (sc->mbuf_tag != NULL)
484 if (bus_dma_tag_destroy(sc->mbuf_tag))
485 if_printf(sc->ifp, "mbuf DMA tag busy\n");
486
487 if (sc->parent_tag != NULL)
488 if (bus_dma_tag_destroy(sc->parent_tag))
489 if_printf(sc->ifp, "parent DMA tag busy\n");
490
491 if (sc->memres != NULL)
492 bus_release_resource(sc->dev, SYS_RES_MEMORY,
493 sc->memid, sc->memres);
494
495 sysctl_ctx_free(&sc->sysctl_ctx);
496
497 cv_destroy(&sc->cv_rcclose);
498 cv_destroy(&sc->vcc_cv);
499 mtx_destroy(&sc->mtx);
500
501 if (sc->ifp != NULL)
502 if_free(sc->ifp);
503}
504
505/*
506 * 4.4 Card reset
507 */
508static int
509hatm_reset(struct hatm_softc *sc)
510{
511 u_int v, count;
512
513 WRITE4(sc, HE_REGO_RESET_CNTL, 0x00);
514 BARRIER_W(sc);
515 WRITE4(sc, HE_REGO_RESET_CNTL, 0xff);
516 BARRIER_RW(sc);
517 count = 0;
518 while (((v = READ4(sc, HE_REGO_RESET_CNTL)) & HE_REGM_RESET_STATE) == 0) {
519 BARRIER_R(sc);
520 if (++count == 100) {
521 if_printf(sc->ifp, "reset failed\n");
522 return (ENXIO);
523 }
524 DELAY(1000);
525 }
526 return (0);
527}
528
529/*
530 * 4.5 Set Bus Width
531 */
532static void
533hatm_init_bus_width(struct hatm_softc *sc)
534{
535 uint32_t v, v1;
536
537 v = READ4(sc, HE_REGO_HOST_CNTL);
538 BARRIER_R(sc);
539 if (v & HE_REGM_HOST_BUS64) {
540 sc->pci64 = 1;
541 v1 = pci_read_config(sc->dev, HE_PCIR_GEN_CNTL_0, 4);
542 v1 |= HE_PCIM_CTL0_64BIT;
543 pci_write_config(sc->dev, HE_PCIR_GEN_CNTL_0, v1, 4);
544
545 v |= HE_REGM_HOST_DESC_RD64
546 | HE_REGM_HOST_DATA_RD64
547 | HE_REGM_HOST_DATA_WR64;
548 WRITE4(sc, HE_REGO_HOST_CNTL, v);
549 BARRIER_W(sc);
550 } else {
551 sc->pci64 = 0;
552 v = pci_read_config(sc->dev, HE_PCIR_GEN_CNTL_0, 4);
553 v &= ~HE_PCIM_CTL0_64BIT;
554 pci_write_config(sc->dev, HE_PCIR_GEN_CNTL_0, v, 4);
555 }
556}
557
558/*
559 * 4.6 Set Host Endianess
560 */
561static void
562hatm_init_endianess(struct hatm_softc *sc)
563{
564 uint32_t v;
565
566 v = READ4(sc, HE_REGO_LB_SWAP);
567 BARRIER_R(sc);
568#if BYTE_ORDER == BIG_ENDIAN
569 v |= HE_REGM_LBSWAP_INTR_SWAP |
570 HE_REGM_LBSWAP_DESC_WR_SWAP |
571 HE_REGM_LBSWAP_BIG_ENDIAN;
572 v &= ~(HE_REGM_LBSWAP_DATA_WR_SWAP |
573 HE_REGM_LBSWAP_DESC_RD_SWAP |
574 HE_REGM_LBSWAP_DATA_RD_SWAP);
575#else
576 v &= ~(HE_REGM_LBSWAP_DATA_WR_SWAP |
577 HE_REGM_LBSWAP_DESC_RD_SWAP |
578 HE_REGM_LBSWAP_DATA_RD_SWAP |
579 HE_REGM_LBSWAP_INTR_SWAP |
580 HE_REGM_LBSWAP_DESC_WR_SWAP |
581 HE_REGM_LBSWAP_BIG_ENDIAN);
582#endif
583
584 if (sc->he622)
585 v |= HE_REGM_LBSWAP_XFER_SIZE;
586
587 WRITE4(sc, HE_REGO_LB_SWAP, v);
588 BARRIER_W(sc);
589}
590
591/*
592 * 4.7 Read EEPROM
593 */
594static uint8_t
595hatm_read_prom_byte(struct hatm_softc *sc, u_int addr)
596{
597 uint32_t val, tmp_read, byte_read;
598 u_int i, j;
599 int n;
600
601 val = READ4(sc, HE_REGO_HOST_CNTL);
602 val &= HE_REGM_HOST_PROM_BITS;
603 BARRIER_R(sc);
604
605 val |= HE_REGM_HOST_PROM_WREN;
606 WRITE4(sc, HE_REGO_HOST_CNTL, val);
607 BARRIER_W(sc);
608
609 /* send READ */
610 for (i = 0; i < sizeof(readtab) / sizeof(readtab[0]); i++) {
611 WRITE4(sc, HE_REGO_HOST_CNTL, val | readtab[i]);
612 BARRIER_W(sc);
613 DELAY(EEPROM_DELAY);
614 }
615
616 /* send ADDRESS */
617 for (n = 7, j = 0; n >= 0; n--) {
618 WRITE4(sc, HE_REGO_HOST_CNTL, val | clocktab[j++] |
619 (((addr >> n) & 1 ) << HE_REGS_HOST_PROM_DATA_IN));
620 BARRIER_W(sc);
621 DELAY(EEPROM_DELAY);
622 WRITE4(sc, HE_REGO_HOST_CNTL, val | clocktab[j++] |
623 (((addr >> n) & 1 ) << HE_REGS_HOST_PROM_DATA_IN));
624 BARRIER_W(sc);
625 DELAY(EEPROM_DELAY);
626 }
627
628 val &= ~HE_REGM_HOST_PROM_WREN;
629 WRITE4(sc, HE_REGO_HOST_CNTL, val);
630 BARRIER_W(sc);
631
632 /* read DATA */
633 byte_read = 0;
634 for (n = 7, j = 0; n >= 0; n--) {
635 WRITE4(sc, HE_REGO_HOST_CNTL, val | clocktab[j++]);
636 BARRIER_W(sc);
637 DELAY(EEPROM_DELAY);
638 tmp_read = READ4(sc, HE_REGO_HOST_CNTL);
639 byte_read |= (uint8_t)(((tmp_read & HE_REGM_HOST_PROM_DATA_OUT)
640 >> HE_REGS_HOST_PROM_DATA_OUT) << n);
641 WRITE4(sc, HE_REGO_HOST_CNTL, val | clocktab[j++]);
642 BARRIER_W(sc);
643 DELAY(EEPROM_DELAY);
644 }
645 WRITE4(sc, HE_REGO_HOST_CNTL, val | clocktab[j++]);
646 BARRIER_W(sc);
647 DELAY(EEPROM_DELAY);
648
649 return (byte_read);
650}
651
652static void
653hatm_init_read_eeprom(struct hatm_softc *sc)
654{
655 u_int n, count;
656 u_char byte;
657 uint32_t v;
658
659 for (n = count = 0; count < HE_EEPROM_PROD_ID_LEN; count++) {
660 byte = hatm_read_prom_byte(sc, HE_EEPROM_PROD_ID + count);
661 if (n > 0 || byte != ' ')
662 sc->prod_id[n++] = byte;
663 }
664 while (n > 0 && sc->prod_id[n-1] == ' ')
665 n--;
666 sc->prod_id[n] = '\0';
667
668 for (n = count = 0; count < HE_EEPROM_REV_LEN; count++) {
669 byte = hatm_read_prom_byte(sc, HE_EEPROM_REV + count);
670 if (n > 0 || byte != ' ')
671 sc->rev[n++] = byte;
672 }
673 while (n > 0 && sc->rev[n-1] == ' ')
674 n--;
675 sc->rev[n] = '\0';
676 IFP2IFATM(sc->ifp)->mib.hw_version = sc->rev[0];
677
678 IFP2IFATM(sc->ifp)->mib.serial = hatm_read_prom_byte(sc, HE_EEPROM_M_SN + 0) << 0;
679 IFP2IFATM(sc->ifp)->mib.serial |= hatm_read_prom_byte(sc, HE_EEPROM_M_SN + 1) << 8;
680 IFP2IFATM(sc->ifp)->mib.serial |= hatm_read_prom_byte(sc, HE_EEPROM_M_SN + 2) << 16;
681 IFP2IFATM(sc->ifp)->mib.serial |= hatm_read_prom_byte(sc, HE_EEPROM_M_SN + 3) << 24;
682
683 v = hatm_read_prom_byte(sc, HE_EEPROM_MEDIA + 0) << 0;
684 v |= hatm_read_prom_byte(sc, HE_EEPROM_MEDIA + 1) << 8;
685 v |= hatm_read_prom_byte(sc, HE_EEPROM_MEDIA + 2) << 16;
686 v |= hatm_read_prom_byte(sc, HE_EEPROM_MEDIA + 3) << 24;
687
688 switch (v) {
689 case HE_MEDIA_UTP155:
690 IFP2IFATM(sc->ifp)->mib.media = IFM_ATM_UTP_155;
691 IFP2IFATM(sc->ifp)->mib.pcr = ATM_RATE_155M;
692 break;
693
694 case HE_MEDIA_MMF155:
695 IFP2IFATM(sc->ifp)->mib.media = IFM_ATM_MM_155;
696 IFP2IFATM(sc->ifp)->mib.pcr = ATM_RATE_155M;
697 break;
698
699 case HE_MEDIA_MMF622:
700 IFP2IFATM(sc->ifp)->mib.media = IFM_ATM_MM_622;
701 IFP2IFATM(sc->ifp)->mib.device = ATM_DEVICE_HE622;
702 IFP2IFATM(sc->ifp)->mib.pcr = ATM_RATE_622M;
703 sc->he622 = 1;
704 break;
705
706 case HE_MEDIA_SMF155:
707 IFP2IFATM(sc->ifp)->mib.media = IFM_ATM_SM_155;
708 IFP2IFATM(sc->ifp)->mib.pcr = ATM_RATE_155M;
709 break;
710
711 case HE_MEDIA_SMF622:
712 IFP2IFATM(sc->ifp)->mib.media = IFM_ATM_SM_622;
713 IFP2IFATM(sc->ifp)->mib.device = ATM_DEVICE_HE622;
714 IFP2IFATM(sc->ifp)->mib.pcr = ATM_RATE_622M;
715 sc->he622 = 1;
716 break;
717 }
718
719 IFP2IFATM(sc->ifp)->mib.esi[0] = hatm_read_prom_byte(sc, HE_EEPROM_MAC + 0);
720 IFP2IFATM(sc->ifp)->mib.esi[1] = hatm_read_prom_byte(sc, HE_EEPROM_MAC + 1);
721 IFP2IFATM(sc->ifp)->mib.esi[2] = hatm_read_prom_byte(sc, HE_EEPROM_MAC + 2);
722 IFP2IFATM(sc->ifp)->mib.esi[3] = hatm_read_prom_byte(sc, HE_EEPROM_MAC + 3);
723 IFP2IFATM(sc->ifp)->mib.esi[4] = hatm_read_prom_byte(sc, HE_EEPROM_MAC + 4);
724 IFP2IFATM(sc->ifp)->mib.esi[5] = hatm_read_prom_byte(sc, HE_EEPROM_MAC + 5);
725}
726
727/*
728 * Clear unused interrupt queue
729 */
730static void
731hatm_clear_irq(struct hatm_softc *sc, u_int group)
732{
733 WRITE4(sc, HE_REGO_IRQ_BASE(group), 0);
734 WRITE4(sc, HE_REGO_IRQ_HEAD(group), 0);
735 WRITE4(sc, HE_REGO_IRQ_CNTL(group), 0);
736 WRITE4(sc, HE_REGO_IRQ_DATA(group), 0);
737}
738
739/*
740 * 4.10 Initialize interrupt queues
741 */
742static void
743hatm_init_irq(struct hatm_softc *sc, struct heirq *q, u_int group)
744{
745 u_int i;
746
747 if (q->size == 0) {
748 hatm_clear_irq(sc, group);
749 return;
750 }
751
752 q->group = group;
753 q->sc = sc;
754 q->irq = q->mem.base;
755 q->head = 0;
756 q->tailp = q->irq + (q->size - 1);
757 *q->tailp = 0;
758
759 for (i = 0; i < q->size; i++)
760 q->irq[i] = HE_REGM_ITYPE_INVALID;
761
762 WRITE4(sc, HE_REGO_IRQ_BASE(group), q->mem.paddr);
763 WRITE4(sc, HE_REGO_IRQ_HEAD(group),
764 ((q->size - 1) << HE_REGS_IRQ_HEAD_SIZE) |
765 (q->thresh << HE_REGS_IRQ_HEAD_THRESH));
766 WRITE4(sc, HE_REGO_IRQ_CNTL(group), q->line);
767 WRITE4(sc, HE_REGO_IRQ_DATA(group), 0);
768}
769
770/*
771 * 5.1.3 Initialize connection memory
772 */
773static void
774hatm_init_cm(struct hatm_softc *sc)
775{
776 u_int rsra, mlbm, rabr, numbuffs;
777 u_int tsra, tabr, mtpd;
778 u_int n;
779
780 for (n = 0; n < HE_CONFIG_TXMEM; n++)
781 WRITE_TCM4(sc, n, 0);
782 for (n = 0; n < HE_CONFIG_RXMEM; n++)
783 WRITE_RCM4(sc, n, 0);
784
785 numbuffs = sc->r0_numbuffs + sc->r1_numbuffs + sc->tx_numbuffs;
786
787 rsra = 0;
788 mlbm = ((rsra + IFP2IFATM(sc->ifp)->mib.max_vccs * 8) + 0x7ff) & ~0x7ff;
789 rabr = ((mlbm + numbuffs * 2) + 0x7ff) & ~0x7ff;
790 sc->rsrb = ((rabr + 2048) + (2 * IFP2IFATM(sc->ifp)->mib.max_vccs - 1)) &
791 ~(2 * IFP2IFATM(sc->ifp)->mib.max_vccs - 1);
792
793 tsra = 0;
794 sc->tsrb = tsra + IFP2IFATM(sc->ifp)->mib.max_vccs * 8;
795 sc->tsrc = sc->tsrb + IFP2IFATM(sc->ifp)->mib.max_vccs * 4;
796 sc->tsrd = sc->tsrc + IFP2IFATM(sc->ifp)->mib.max_vccs * 2;
797 tabr = sc->tsrd + IFP2IFATM(sc->ifp)->mib.max_vccs * 1;
798 mtpd = ((tabr + 1024) + (16 * IFP2IFATM(sc->ifp)->mib.max_vccs - 1)) &
799 ~(16 * IFP2IFATM(sc->ifp)->mib.max_vccs - 1);
800
801 DBG(sc, ATTACH, ("rsra=%x mlbm=%x rabr=%x rsrb=%x",
802 rsra, mlbm, rabr, sc->rsrb));
803 DBG(sc, ATTACH, ("tsra=%x tsrb=%x tsrc=%x tsrd=%x tabr=%x mtpd=%x",
804 tsra, sc->tsrb, sc->tsrc, sc->tsrd, tabr, mtpd));
805
806 WRITE4(sc, HE_REGO_TSRB_BA, sc->tsrb);
807 WRITE4(sc, HE_REGO_TSRC_BA, sc->tsrc);
808 WRITE4(sc, HE_REGO_TSRD_BA, sc->tsrd);
809 WRITE4(sc, HE_REGO_TMABR_BA, tabr);
810 WRITE4(sc, HE_REGO_TPD_BA, mtpd);
811
812 WRITE4(sc, HE_REGO_RCMRSRB_BA, sc->rsrb);
813 WRITE4(sc, HE_REGO_RCMLBM_BA, mlbm);
814 WRITE4(sc, HE_REGO_RCMABR_BA, rabr);
815
816 BARRIER_W(sc);
817}
818
819/*
820 * 5.1.4 Initialize Local buffer Pools
821 */
822static void
823hatm_init_rx_buffer_pool(struct hatm_softc *sc,
824 u_int num, /* bank */
825 u_int start, /* start row */
826 u_int numbuffs /* number of entries */
827)
828{
829 u_int row_size; /* bytes per row */
830 uint32_t row_addr; /* start address of this row */
831 u_int lbuf_size; /* bytes per lbuf */
832 u_int lbufs_per_row; /* number of lbufs per memory row */
833 uint32_t lbufd_index; /* index of lbuf descriptor */
834 uint32_t lbufd_addr; /* address of lbuf descriptor */
835 u_int lbuf_row_cnt; /* current lbuf in current row */
836 uint32_t lbuf_addr; /* address of current buffer */
837 u_int i;
838
839 row_size = sc->bytes_per_row;
840 row_addr = start * row_size;
841 lbuf_size = sc->cells_per_lbuf * 48;
842 lbufs_per_row = sc->cells_per_row / sc->cells_per_lbuf;
843
844 /* descriptor index */
845 lbufd_index = num;
846
847 /* 2 words per entry */
848 lbufd_addr = READ4(sc, HE_REGO_RCMLBM_BA) + lbufd_index * 2;
849
850 /* write head of queue */
851 WRITE4(sc, HE_REGO_RLBF_H(num), lbufd_index);
852
853 lbuf_row_cnt = 0;
854 for (i = 0; i < numbuffs; i++) {
855 lbuf_addr = (row_addr + lbuf_row_cnt * lbuf_size) / 32;
856
857 WRITE_RCM4(sc, lbufd_addr, lbuf_addr);
858
859 lbufd_index += 2;
860 WRITE_RCM4(sc, lbufd_addr + 1, lbufd_index);
861
862 if (++lbuf_row_cnt == lbufs_per_row) {
863 lbuf_row_cnt = 0;
864 row_addr += row_size;
865 }
866
867 lbufd_addr += 2 * 2;
868 }
869
870 WRITE4(sc, HE_REGO_RLBF_T(num), lbufd_index - 2);
871 WRITE4(sc, HE_REGO_RLBF_C(num), numbuffs);
872
873 BARRIER_W(sc);
874}
875
876static void
877hatm_init_tx_buffer_pool(struct hatm_softc *sc,
878 u_int start, /* start row */
879 u_int numbuffs /* number of entries */
880)
881{
882 u_int row_size; /* bytes per row */
883 uint32_t row_addr; /* start address of this row */
884 u_int lbuf_size; /* bytes per lbuf */
885 u_int lbufs_per_row; /* number of lbufs per memory row */
886 uint32_t lbufd_index; /* index of lbuf descriptor */
887 uint32_t lbufd_addr; /* address of lbuf descriptor */
888 u_int lbuf_row_cnt; /* current lbuf in current row */
889 uint32_t lbuf_addr; /* address of current buffer */
890 u_int i;
891
892 row_size = sc->bytes_per_row;
893 row_addr = start * row_size;
894 lbuf_size = sc->cells_per_lbuf * 48;
895 lbufs_per_row = sc->cells_per_row / sc->cells_per_lbuf;
896
897 /* descriptor index */
898 lbufd_index = sc->r0_numbuffs + sc->r1_numbuffs;
899
900 /* 2 words per entry */
901 lbufd_addr = READ4(sc, HE_REGO_RCMLBM_BA) + lbufd_index * 2;
902
903 /* write head of queue */
904 WRITE4(sc, HE_REGO_TLBF_H, lbufd_index);
905
906 lbuf_row_cnt = 0;
907 for (i = 0; i < numbuffs; i++) {
908 lbuf_addr = (row_addr + lbuf_row_cnt * lbuf_size) / 32;
909
910 WRITE_RCM4(sc, lbufd_addr, lbuf_addr);
911 lbufd_index++;
912 WRITE_RCM4(sc, lbufd_addr + 1, lbufd_index);
913
914 if (++lbuf_row_cnt == lbufs_per_row) {
915 lbuf_row_cnt = 0;
916 row_addr += row_size;
917 }
918
919 lbufd_addr += 2;
920 }
921
922 WRITE4(sc, HE_REGO_TLBF_T, lbufd_index - 1);
923 BARRIER_W(sc);
924}
925
926/*
927 * 5.1.5 Initialize Intermediate Receive Queues
928 */
929static void
930hatm_init_imed_queues(struct hatm_softc *sc)
931{
932 u_int n;
933
934 if (sc->he622) {
935 for (n = 0; n < 8; n++) {
936 WRITE4(sc, HE_REGO_INMQ_S(n), 0x10*n+0x000f);
937 WRITE4(sc, HE_REGO_INMQ_L(n), 0x10*n+0x200f);
938 }
939 } else {
940 for (n = 0; n < 8; n++) {
941 WRITE4(sc, HE_REGO_INMQ_S(n), n);
942 WRITE4(sc, HE_REGO_INMQ_L(n), n+0x8);
943 }
944 }
945}
946
947/*
948 * 5.1.7 Init CS block
949 */
950static void
951hatm_init_cs_block(struct hatm_softc *sc)
952{
953 u_int n, i;
954 u_int clkfreg, cellrate, decr, tmp;
955 static const uint32_t erthr[2][5][3] = HE_REGT_CS_ERTHR;
956 static const uint32_t erctl[2][3] = HE_REGT_CS_ERCTL;
957 static const uint32_t erstat[2][2] = HE_REGT_CS_ERSTAT;
958 static const uint32_t rtfwr[2] = HE_REGT_CS_RTFWR;
959 static const uint32_t rtatr[2] = HE_REGT_CS_RTATR;
960 static const uint32_t bwalloc[2][6] = HE_REGT_CS_BWALLOC;
961 static const uint32_t orcf[2][2] = HE_REGT_CS_ORCF;
962
963 /* Clear Rate Controller Start Times and Occupied Flags */
964 for (n = 0; n < 32; n++)
965 WRITE_MBOX4(sc, HE_REGO_CS_STTIM(n), 0);
966
967 clkfreg = sc->he622 ? HE_622_CLOCK : HE_155_CLOCK;
968 cellrate = sc->he622 ? ATM_RATE_622M : ATM_RATE_155M;
969 decr = cellrate / 32;
970
971 for (n = 0; n < 16; n++) {
972 tmp = clkfreg / cellrate;
973 WRITE_MBOX4(sc, HE_REGO_CS_TGRLD(n), tmp - 1);
974 cellrate -= decr;
975 }
976
977 i = (sc->cells_per_lbuf == 2) ? 0
978 :(sc->cells_per_lbuf == 4) ? 1
979 : 2;
980
981 /* table 5.2 */
982 WRITE_MBOX4(sc, HE_REGO_CS_ERTHR0, erthr[sc->he622][0][i]);
983 WRITE_MBOX4(sc, HE_REGO_CS_ERTHR1, erthr[sc->he622][1][i]);
984 WRITE_MBOX4(sc, HE_REGO_CS_ERTHR2, erthr[sc->he622][2][i]);
985 WRITE_MBOX4(sc, HE_REGO_CS_ERTHR3, erthr[sc->he622][3][i]);
986 WRITE_MBOX4(sc, HE_REGO_CS_ERTHR4, erthr[sc->he622][4][i]);
987
988 WRITE_MBOX4(sc, HE_REGO_CS_ERCTL0, erctl[sc->he622][0]);
989 WRITE_MBOX4(sc, HE_REGO_CS_ERCTL1, erctl[sc->he622][1]);
990 WRITE_MBOX4(sc, HE_REGO_CS_ERCTL2, erctl[sc->he622][2]);
991
992 WRITE_MBOX4(sc, HE_REGO_CS_ERSTAT0, erstat[sc->he622][0]);
993 WRITE_MBOX4(sc, HE_REGO_CS_ERSTAT1, erstat[sc->he622][1]);
994
995 WRITE_MBOX4(sc, HE_REGO_CS_RTFWR, rtfwr[sc->he622]);
996 WRITE_MBOX4(sc, HE_REGO_CS_RTATR, rtatr[sc->he622]);
997
998 WRITE_MBOX4(sc, HE_REGO_CS_TFBSET, bwalloc[sc->he622][0]);
999 WRITE_MBOX4(sc, HE_REGO_CS_WCRMAX, bwalloc[sc->he622][1]);
1000 WRITE_MBOX4(sc, HE_REGO_CS_WCRMIN, bwalloc[sc->he622][2]);
1001 WRITE_MBOX4(sc, HE_REGO_CS_WCRINC, bwalloc[sc->he622][3]);
1002 WRITE_MBOX4(sc, HE_REGO_CS_WCRDEC, bwalloc[sc->he622][4]);
1003 WRITE_MBOX4(sc, HE_REGO_CS_WCRCEIL, bwalloc[sc->he622][5]);
1004
1005 WRITE_MBOX4(sc, HE_REGO_CS_OTPPER, orcf[sc->he622][0]);
1006 WRITE_MBOX4(sc, HE_REGO_CS_OTWPER, orcf[sc->he622][1]);
1007
1008 WRITE_MBOX4(sc, HE_REGO_CS_OTTLIM, 8);
1009
1010 for (n = 0; n < 8; n++)
1011 WRITE_MBOX4(sc, HE_REGO_CS_HGRRT(n), 0);
1012}
1013
1014/*
1015 * 5.1.8 CS Block Connection Memory Initialisation
1016 */
1017static void
1018hatm_init_cs_block_cm(struct hatm_softc *sc)
1019{
1020 u_int n, i;
1021 u_int expt, mant, etrm, wcr, ttnrm, tnrm;
1022 uint32_t rate;
1023 uint32_t clkfreq, cellrate, decr;
1024 uint32_t *rg, rtg, val = 0;
1025 uint64_t drate;
1026 u_int buf, buf_limit;
1027 uint32_t base = READ4(sc, HE_REGO_RCMABR_BA);
1028
1029 for (n = 0; n < HE_REGL_CM_GQTBL; n++)
1030 WRITE_RCM4(sc, base + HE_REGO_CM_GQTBL + n, 0);
1031 for (n = 0; n < HE_REGL_CM_RGTBL; n++)
1032 WRITE_RCM4(sc, base + HE_REGO_CM_RGTBL + n, 0);
1033
1034 tnrm = 0;
1035 for (n = 0; n < HE_REGL_CM_TNRMTBL * 4; n++) {
1036 expt = (n >> 5) & 0x1f;
1037 mant = ((n & 0x18) << 4) | 0x7f;
1038 wcr = (1 << expt) * (mant + 512) / 512;
1039 etrm = n & 0x7;
1040 ttnrm = wcr / 10 / (1 << etrm);
1041 if (ttnrm > 255)
1042 ttnrm = 255;
1043 else if(ttnrm < 2)
1044 ttnrm = 2;
1045 tnrm = (tnrm << 8) | (ttnrm & 0xff);
1046 if (n % 4 == 0)
1047 WRITE_RCM4(sc, base + HE_REGO_CM_TNRMTBL + (n/4), tnrm);
1048 }
1049
1050 clkfreq = sc->he622 ? HE_622_CLOCK : HE_155_CLOCK;
1051 buf_limit = 4;
1052
1053 cellrate = sc->he622 ? ATM_RATE_622M : ATM_RATE_155M;
1054 decr = cellrate / 32;
1055
1056 /* compute GRID top row in 1000 * cps */
1057 for (n = 0; n < 16; n++) {
1058 u_int interval = clkfreq / cellrate;
1059 sc->rate_grid[0][n] = (u_int64_t)clkfreq * 1000 / interval;
1060 cellrate -= decr;
1061 }
1062
1063 /* compute the other rows according to 2.4 */
1064 for (i = 1; i < 16; i++)
1065 for (n = 0; n < 16; n++)
1066 sc->rate_grid[i][n] = sc->rate_grid[i-1][n] /
1067 ((i < 14) ? 2 : 4);
1068
1069 /* first entry is line rate */
1070 n = hatm_cps2atmf(sc->he622 ? ATM_RATE_622M : ATM_RATE_155M);
1071 expt = (n >> 9) & 0x1f;
1072 mant = n & 0x1f0;
1073 sc->rate_grid[0][0] = (u_int64_t)(1<<expt) * 1000 * (mant+512) / 512;
1074
1075 /* now build the conversion table - each 32 bit word contains
1076 * two entries - this gives a total of 0x400 16 bit entries.
1077 * This table maps the truncated ATMF rate version into a grid index */
1078 cellrate = sc->he622 ? ATM_RATE_622M : ATM_RATE_155M;
1079 rg = &sc->rate_grid[15][15];
1080
1081 for (rate = 0; rate < 2 * HE_REGL_CM_RTGTBL; rate++) {
1082 /* unpack the ATMF rate */
1083 expt = rate >> 5;
1084 mant = (rate & 0x1f) << 4;
1085
1086 /* get the cell rate - minimum is 10 per second */
1087 drate = (uint64_t)(1 << expt) * 1000 * (mant + 512) / 512;
1088 if (drate < 10 * 1000)
1089 drate = 10 * 1000;
1090
1091 /* now look up the grid index */
1092 while (drate >= *rg && rg-- > &sc->rate_grid[0][0])
1093 ;
1094 rg++;
1095 rtg = rg - &sc->rate_grid[0][0];
1096
1097 /* now compute the buffer limit */
1098 buf = drate * sc->tx_numbuffs / (cellrate * 2) / 1000;
1099 if (buf == 0)
1100 buf = 1;
1101 else if (buf > buf_limit)
1102 buf = buf_limit;
1103
1104 /* make value */
1105 val = (val << 16) | (rtg << 8) | buf;
1106
1107 /* write */
1108 if (rate % 2 == 1)
1109 WRITE_RCM4(sc, base + HE_REGO_CM_RTGTBL + rate/2, val);
1110 }
1111}
1112
1113/*
1114 * Clear an unused receive group buffer pool
1115 */
1116static void
1117hatm_clear_rpool(struct hatm_softc *sc, u_int group, u_int large)
1118{
1119 WRITE4(sc, HE_REGO_RBP_S(large, group), 0);
1120 WRITE4(sc, HE_REGO_RBP_T(large, group), 0);
1121 WRITE4(sc, HE_REGO_RBP_QI(large, group), 1);
1122 WRITE4(sc, HE_REGO_RBP_BL(large, group), 0);
1123}
1124
1125/*
1126 * Initialize a receive group buffer pool
1127 */
1128static void
1129hatm_init_rpool(struct hatm_softc *sc, struct herbp *q, u_int group,
1130 u_int large)
1131{
1132 if (q->size == 0) {
1133 hatm_clear_rpool(sc, group, large);
1134 return;
1135 }
1136
1137 bzero(q->mem.base, q->mem.size);
1138 q->rbp = q->mem.base;
1139 q->head = q->tail = 0;
1140
1141 DBG(sc, ATTACH, ("RBP%u%c=0x%lx", group, "SL"[large],
1142 (u_long)q->mem.paddr));
1143
1144 WRITE4(sc, HE_REGO_RBP_S(large, group), q->mem.paddr);
1145 WRITE4(sc, HE_REGO_RBP_T(large, group), 0);
1146 WRITE4(sc, HE_REGO_RBP_QI(large, group),
1147 ((q->size - 1) << HE_REGS_RBP_SIZE) |
1148 HE_REGM_RBP_INTR_ENB |
1149 (q->thresh << HE_REGS_RBP_THRESH));
1150 WRITE4(sc, HE_REGO_RBP_BL(large, group), (q->bsize >> 2) & ~1);
1151}
1152
1153/*
1154 * Clear an unused receive buffer return queue
1155 */
1156static void
1157hatm_clear_rbrq(struct hatm_softc *sc, u_int group)
1158{
1159 WRITE4(sc, HE_REGO_RBRQ_ST(group), 0);
1160 WRITE4(sc, HE_REGO_RBRQ_H(group), 0);
1161 WRITE4(sc, HE_REGO_RBRQ_Q(group), (1 << HE_REGS_RBRQ_THRESH));
1162 WRITE4(sc, HE_REGO_RBRQ_I(group), 0);
1163}
1164
1165/*
1166 * Initialize receive buffer return queue
1167 */
1168static void
1169hatm_init_rbrq(struct hatm_softc *sc, struct herbrq *rq, u_int group)
1170{
1171 if (rq->size == 0) {
1172 hatm_clear_rbrq(sc, group);
1173 return;
1174 }
1175
1176 rq->rbrq = rq->mem.base;
1177 rq->head = 0;
1178
1179 DBG(sc, ATTACH, ("RBRQ%u=0x%lx", group, (u_long)rq->mem.paddr));
1180
1181 WRITE4(sc, HE_REGO_RBRQ_ST(group), rq->mem.paddr);
1182 WRITE4(sc, HE_REGO_RBRQ_H(group), 0);
1183 WRITE4(sc, HE_REGO_RBRQ_Q(group),
1184 (rq->thresh << HE_REGS_RBRQ_THRESH) |
1185 ((rq->size - 1) << HE_REGS_RBRQ_SIZE));
1186 WRITE4(sc, HE_REGO_RBRQ_I(group),
1187 (rq->tout << HE_REGS_RBRQ_TIME) |
1188 (rq->pcnt << HE_REGS_RBRQ_COUNT));
1189}
1190
1191/*
1192 * Clear an unused transmit buffer return queue N
1193 */
1194static void
1195hatm_clear_tbrq(struct hatm_softc *sc, u_int group)
1196{
1197 WRITE4(sc, HE_REGO_TBRQ_B_T(group), 0);
1198 WRITE4(sc, HE_REGO_TBRQ_H(group), 0);
1199 WRITE4(sc, HE_REGO_TBRQ_S(group), 0);
1200 WRITE4(sc, HE_REGO_TBRQ_THRESH(group), 1);
1201}
1202
1203/*
1204 * Initialize transmit buffer return queue N
1205 */
1206static void
1207hatm_init_tbrq(struct hatm_softc *sc, struct hetbrq *tq, u_int group)
1208{
1209 if (tq->size == 0) {
1210 hatm_clear_tbrq(sc, group);
1211 return;
1212 }
1213
1214 tq->tbrq = tq->mem.base;
1215 tq->head = 0;
1216
1217 DBG(sc, ATTACH, ("TBRQ%u=0x%lx", group, (u_long)tq->mem.paddr));
1218
1219 WRITE4(sc, HE_REGO_TBRQ_B_T(group), tq->mem.paddr);
1220 WRITE4(sc, HE_REGO_TBRQ_H(group), 0);
1221 WRITE4(sc, HE_REGO_TBRQ_S(group), tq->size - 1);
1222 WRITE4(sc, HE_REGO_TBRQ_THRESH(group), tq->thresh);
1223}
1224
1225/*
1226 * Initialize TPDRQ
1227 */
1228static void
1229hatm_init_tpdrq(struct hatm_softc *sc)
1230{
1231 struct hetpdrq *tq;
1232
1233 tq = &sc->tpdrq;
1234 tq->tpdrq = tq->mem.base;
1235 tq->tail = tq->head = 0;
1236
1237 DBG(sc, ATTACH, ("TPDRQ=0x%lx", (u_long)tq->mem.paddr));
1238
1239 WRITE4(sc, HE_REGO_TPDRQ_H, tq->mem.paddr);
1240 WRITE4(sc, HE_REGO_TPDRQ_T, 0);
1241 WRITE4(sc, HE_REGO_TPDRQ_S, tq->size - 1);
1242}
1243
1244/*
1245 * Function can be called by the infrastructure to start the card.
1246 */
1247static void
1248hatm_init(void *p)
1249{
1250 struct hatm_softc *sc = p;
1251
1252 mtx_lock(&sc->mtx);
1253 hatm_stop(sc);
1254 hatm_initialize(sc);
1255 mtx_unlock(&sc->mtx);
1256}
1257
1258enum {
1259 CTL_ISTATS,
1260};
1261
1262/*
1263 * Sysctl handler
1264 */
1265static int
1266hatm_sysctl(SYSCTL_HANDLER_ARGS)
1267{
1268 struct hatm_softc *sc = arg1;
1269 uint32_t *ret;
1270 int error;
1271 size_t len;
1272
1273 switch (arg2) {
1274
1275 case CTL_ISTATS:
1276 len = sizeof(sc->istats);
1277 break;
1278
1279 default:
1280 panic("bad control code");
1281 }
1282
1283 ret = malloc(len, M_TEMP, M_WAITOK);
1284 mtx_lock(&sc->mtx);
1285
1286 switch (arg2) {
1287
1288 case CTL_ISTATS:
1289 sc->istats.mcc += READ4(sc, HE_REGO_MCC);
1290 sc->istats.oec += READ4(sc, HE_REGO_OEC);
1291 sc->istats.dcc += READ4(sc, HE_REGO_DCC);
1292 sc->istats.cec += READ4(sc, HE_REGO_CEC);
1293 bcopy(&sc->istats, ret, sizeof(sc->istats));
1294 break;
1295 }
1296 mtx_unlock(&sc->mtx);
1297
1298 error = SYSCTL_OUT(req, ret, len);
1299 free(ret, M_TEMP);
1300
1301 return (error);
1302}
1303
1304static int
1305kenv_getuint(struct hatm_softc *sc, const char *var,
1306 u_int *ptr, u_int def, int rw)
1307{
1308 char full[IFNAMSIZ + 3 + 20];
1309 char *val, *end;
1310 u_int u;
1311
1312 *ptr = def;
1313
1314 if (SYSCTL_ADD_UINT(&sc->sysctl_ctx, SYSCTL_CHILDREN(sc->sysctl_tree),
1315 OID_AUTO, var, rw ? CTLFLAG_RW : CTLFLAG_RD, ptr, 0, "") == NULL)
1316 return (ENOMEM);
1317
1318 snprintf(full, sizeof(full), "hw.%s.%s",
1319 device_get_nameunit(sc->dev), var);
1320
1321 if ((val = getenv(full)) == NULL)
1322 return (0);
1323 u = strtoul(val, &end, 0);
1324 if (end == val || *end != '\0') {
1325 freeenv(val);
1326 return (EINVAL);
1327 }
1328 freeenv(val);
1329 if (bootverbose)
1330 if_printf(sc->ifp, "%s=%u\n", full, u);
1331 *ptr = u;
1332 return (0);
1333}
1334
1335/*
1336 * Set configurable parameters. Many of these are configurable via
1337 * kenv.
1338 */
1339static int
1340hatm_configure(struct hatm_softc *sc)
1341{
1342 /* Receive buffer pool 0 small */
1343 kenv_getuint(sc, "rbps0_size", &sc->rbp_s0.size,
1344 HE_CONFIG_RBPS0_SIZE, 0);
1345 kenv_getuint(sc, "rbps0_thresh", &sc->rbp_s0.thresh,
1346 HE_CONFIG_RBPS0_THRESH, 0);
1347 sc->rbp_s0.bsize = MBUF0_SIZE;
1348
1349 /* Receive buffer pool 0 large */
1350 kenv_getuint(sc, "rbpl0_size", &sc->rbp_l0.size,
1351 HE_CONFIG_RBPL0_SIZE, 0);
1352 kenv_getuint(sc, "rbpl0_thresh", &sc->rbp_l0.thresh,
1353 HE_CONFIG_RBPL0_THRESH, 0);
1354 sc->rbp_l0.bsize = MCLBYTES - MBUFL_OFFSET;
1355
1356 /* Receive buffer return queue 0 */
1357 kenv_getuint(sc, "rbrq0_size", &sc->rbrq_0.size,
1358 HE_CONFIG_RBRQ0_SIZE, 0);
1359 kenv_getuint(sc, "rbrq0_thresh", &sc->rbrq_0.thresh,
1360 HE_CONFIG_RBRQ0_THRESH, 0);
1361 kenv_getuint(sc, "rbrq0_tout", &sc->rbrq_0.tout,
1362 HE_CONFIG_RBRQ0_TOUT, 0);
1363 kenv_getuint(sc, "rbrq0_pcnt", &sc->rbrq_0.pcnt,
1364 HE_CONFIG_RBRQ0_PCNT, 0);
1365
1366 /* Receive buffer pool 1 small */
1367 kenv_getuint(sc, "rbps1_size", &sc->rbp_s1.size,
1368 HE_CONFIG_RBPS1_SIZE, 0);
1369 kenv_getuint(sc, "rbps1_thresh", &sc->rbp_s1.thresh,
1370 HE_CONFIG_RBPS1_THRESH, 0);
1371 sc->rbp_s1.bsize = MBUF1_SIZE;
1372
1373 /* Receive buffer return queue 1 */
1374 kenv_getuint(sc, "rbrq1_size", &sc->rbrq_1.size,
1375 HE_CONFIG_RBRQ1_SIZE, 0);
1376 kenv_getuint(sc, "rbrq1_thresh", &sc->rbrq_1.thresh,
1377 HE_CONFIG_RBRQ1_THRESH, 0);
1378 kenv_getuint(sc, "rbrq1_tout", &sc->rbrq_1.tout,
1379 HE_CONFIG_RBRQ1_TOUT, 0);
1380 kenv_getuint(sc, "rbrq1_pcnt", &sc->rbrq_1.pcnt,
1381 HE_CONFIG_RBRQ1_PCNT, 0);
1382
1383 /* Interrupt queue 0 */
1384 kenv_getuint(sc, "irq0_size", &sc->irq_0.size,
1385 HE_CONFIG_IRQ0_SIZE, 0);
1386 kenv_getuint(sc, "irq0_thresh", &sc->irq_0.thresh,
1387 HE_CONFIG_IRQ0_THRESH, 0);
1388 sc->irq_0.line = HE_CONFIG_IRQ0_LINE;
1389
1390 /* Transmit buffer return queue 0 */
1391 kenv_getuint(sc, "tbrq0_size", &sc->tbrq.size,
1392 HE_CONFIG_TBRQ_SIZE, 0);
1393 kenv_getuint(sc, "tbrq0_thresh", &sc->tbrq.thresh,
1394 HE_CONFIG_TBRQ_THRESH, 0);
1395
1396 /* Transmit buffer ready queue */
1397 kenv_getuint(sc, "tpdrq_size", &sc->tpdrq.size,
1398 HE_CONFIG_TPDRQ_SIZE, 0);
1399 /* Max TPDs per VCC */
1400 kenv_getuint(sc, "tpdmax", &sc->max_tpd,
1401 HE_CONFIG_TPD_MAXCC, 0);
1402
1403 /* external mbuf pages */
1404 kenv_getuint(sc, "max_mbuf_pages", &sc->mbuf_max_pages,
1405 HE_CONFIG_MAX_MBUF_PAGES, 0);
1406
1407 /* mpsafe */
1408 kenv_getuint(sc, "mpsafe", &sc->mpsafe, 0, 0);
1409 if (sc->mpsafe != 0)
1410 sc->mpsafe = INTR_MPSAFE;
1411
1412 return (0);
1413}
1414
1415#ifdef HATM_DEBUG
1416
1417/*
1418 * Get TSRs from connection memory
1419 */
1420static int
1421hatm_sysctl_tsr(SYSCTL_HANDLER_ARGS)
1422{
1423 struct hatm_softc *sc = arg1;
1424 int error, i, j;
1425 uint32_t *val;
1426
1427 val = malloc(sizeof(uint32_t) * HE_MAX_VCCS * 15, M_TEMP, M_WAITOK);
1428
1429 mtx_lock(&sc->mtx);
1430 for (i = 0; i < HE_MAX_VCCS; i++)
1431 for (j = 0; j <= 14; j++)
1432 val[15 * i + j] = READ_TSR(sc, i, j);
1433 mtx_unlock(&sc->mtx);
1434
1435 error = SYSCTL_OUT(req, val, sizeof(uint32_t) * HE_MAX_VCCS * 15);
1436 free(val, M_TEMP);
1437 if (error != 0 || req->newptr == NULL)
1438 return (error);
1439
1440 return (EPERM);
1441}
1442
1443/*
1444 * Get TPDs from connection memory
1445 */
1446static int
1447hatm_sysctl_tpd(SYSCTL_HANDLER_ARGS)
1448{
1449 struct hatm_softc *sc = arg1;
1450 int error, i, j;
1451 uint32_t *val;
1452
1453 val = malloc(sizeof(uint32_t) * HE_MAX_VCCS * 16, M_TEMP, M_WAITOK);
1454
1455 mtx_lock(&sc->mtx);
1456 for (i = 0; i < HE_MAX_VCCS; i++)
1457 for (j = 0; j < 16; j++)
1458 val[16 * i + j] = READ_TCM4(sc, 16 * i + j);
1459 mtx_unlock(&sc->mtx);
1460
1461 error = SYSCTL_OUT(req, val, sizeof(uint32_t) * HE_MAX_VCCS * 16);
1462 free(val, M_TEMP);
1463 if (error != 0 || req->newptr == NULL)
1464 return (error);
1465
1466 return (EPERM);
1467}
1468
1469/*
1470 * Get mbox registers
1471 */
1472static int
1473hatm_sysctl_mbox(SYSCTL_HANDLER_ARGS)
1474{
1475 struct hatm_softc *sc = arg1;
1476 int error, i;
1477 uint32_t *val;
1478
1479 val = malloc(sizeof(uint32_t) * HE_REGO_CS_END, M_TEMP, M_WAITOK);
1480
1481 mtx_lock(&sc->mtx);
1482 for (i = 0; i < HE_REGO_CS_END; i++)
1483 val[i] = READ_MBOX4(sc, i);
1484 mtx_unlock(&sc->mtx);
1485
1486 error = SYSCTL_OUT(req, val, sizeof(uint32_t) * HE_REGO_CS_END);
1487 free(val, M_TEMP);
1488 if (error != 0 || req->newptr == NULL)
1489 return (error);
1490
1491 return (EPERM);
1492}
1493
1494/*
1495 * Get connection memory
1496 */
1497static int
1498hatm_sysctl_cm(SYSCTL_HANDLER_ARGS)
1499{
1500 struct hatm_softc *sc = arg1;
1501 int error, i;
1502 uint32_t *val;
1503
1504 val = malloc(sizeof(uint32_t) * (HE_CONFIG_RXMEM + 1), M_TEMP, M_WAITOK);
1505
1506 mtx_lock(&sc->mtx);
1507 val[0] = READ4(sc, HE_REGO_RCMABR_BA);
1508 for (i = 0; i < HE_CONFIG_RXMEM; i++)
1509 val[i + 1] = READ_RCM4(sc, i);
1510 mtx_unlock(&sc->mtx);
1511
1512 error = SYSCTL_OUT(req, val, sizeof(uint32_t) * (HE_CONFIG_RXMEM + 1));
1513 free(val, M_TEMP);
1514 if (error != 0 || req->newptr == NULL)
1515 return (error);
1516
1517 return (EPERM);
1518}
1519
1520/*
1521 * Get local buffer memory
1522 */
1523static int
1524hatm_sysctl_lbmem(SYSCTL_HANDLER_ARGS)
1525{
1526 struct hatm_softc *sc = arg1;
1527 int error, i;
1528 uint32_t *val;
1529 u_int bytes = (1 << 21);
1530
1531 val = malloc(bytes, M_TEMP, M_WAITOK);
1532
1533 mtx_lock(&sc->mtx);
1534 for (i = 0; i < bytes / 4; i++)
1535 val[i] = READ_LB4(sc, i);
1536 mtx_unlock(&sc->mtx);
1537
1538 error = SYSCTL_OUT(req, val, bytes);
1539 free(val, M_TEMP);
1540 if (error != 0 || req->newptr == NULL)
1541 return (error);
1542
1543 return (EPERM);
1544}
1545
1546/*
1547 * Get all card registers
1548 */
1549static int
1550hatm_sysctl_heregs(SYSCTL_HANDLER_ARGS)
1551{
1552 struct hatm_softc *sc = arg1;
1553 int error, i;
1554 uint32_t *val;
1555
1556 val = malloc(HE_REGO_END, M_TEMP, M_WAITOK);
1557
1558 mtx_lock(&sc->mtx);
1559 for (i = 0; i < HE_REGO_END; i += 4)
1560 val[i / 4] = READ4(sc, i);
1561 mtx_unlock(&sc->mtx);
1562
1563 error = SYSCTL_OUT(req, val, HE_REGO_END);
1564 free(val, M_TEMP);
1565 if (error != 0 || req->newptr == NULL)
1566 return (error);
1567
1568 return (EPERM);
1569}
1570#endif
1571
1572/*
1573 * Suni register access
1574 */
1575/*
1576 * read at most n SUNI registers starting at reg into val
1577 */
1578static int
1579hatm_utopia_readregs(struct ifatm *ifatm, u_int reg, uint8_t *val, u_int *n)
1580{
1581 u_int i;
1582 struct hatm_softc *sc = ifatm->ifp->if_softc;
1583
1584 if (reg >= (HE_REGO_SUNI_END - HE_REGO_SUNI) / 4)
1585 return (EINVAL);
1586 if (reg + *n > (HE_REGO_SUNI_END - HE_REGO_SUNI) / 4)
1587 *n = reg - (HE_REGO_SUNI_END - HE_REGO_SUNI) / 4;
1588
1589 mtx_assert(&sc->mtx, MA_OWNED);
1590 for (i = 0; i < *n; i++)
1591 val[i] = READ4(sc, HE_REGO_SUNI + 4 * (reg + i));
1592
1593 return (0);
1594}
1595
1596/*
1597 * change the bits given by mask to them in val in register reg
1598 */
1599static int
1600hatm_utopia_writereg(struct ifatm *ifatm, u_int reg, u_int mask, u_int val)
1601{
1602 uint32_t regval;
1603 struct hatm_softc *sc = ifatm->ifp->if_softc;
1604
1605 if (reg >= (HE_REGO_SUNI_END - HE_REGO_SUNI) / 4)
1606 return (EINVAL);
1607
1608 mtx_assert(&sc->mtx, MA_OWNED);
1609 regval = READ4(sc, HE_REGO_SUNI + 4 * reg);
1610 regval = (regval & ~mask) | (val & mask);
1611 WRITE4(sc, HE_REGO_SUNI + 4 * reg, regval);
1612
1613 return (0);
1614}
1615
1616static struct utopia_methods hatm_utopia_methods = {
1617 hatm_utopia_readregs,
1618 hatm_utopia_writereg,
1619};
1620
1621/*
1622 * Detach - if it is running, stop. Destroy.
1623 */
1624static int
1625hatm_detach(device_t dev)
1626{
1627 struct hatm_softc *sc = device_get_softc(dev);
1628
1629 mtx_lock(&sc->mtx);
1630 hatm_stop(sc);
1631 if (sc->utopia.state & UTP_ST_ATTACHED) {
1632 utopia_stop(&sc->utopia);
1633 utopia_detach(&sc->utopia);
1634 }
1635 mtx_unlock(&sc->mtx);
1636
1637 atm_ifdetach(sc->ifp);
1638
1639 hatm_destroy(sc);
1640
1641 return (0);
1642}
1643
1644/*
1645 * Attach to the device. Assume that no locking is needed here.
1646 * All resource we allocate here are freed by calling hatm_destroy.
1647 */
1648static int
1649hatm_attach(device_t dev)
1650{
1651 struct hatm_softc *sc;
1652 int error;
1653 uint32_t v;
1654 struct ifnet *ifp;
1655
1656 sc = device_get_softc(dev);
1657
1658 ifp = sc->ifp = if_alloc(IFT_ATM);
1659 if (ifp == NULL) {
1660 device_printf(dev, "could not if_alloc()\n");
1661 return (ENOSPC);
1662 }
1663
1664 sc->dev = dev;
1665 IFP2IFATM(sc->ifp)->mib.device = ATM_DEVICE_HE155;
1666 IFP2IFATM(sc->ifp)->mib.serial = 0;
1667 IFP2IFATM(sc->ifp)->mib.hw_version = 0;
1668 IFP2IFATM(sc->ifp)->mib.sw_version = 0;
1669 IFP2IFATM(sc->ifp)->mib.vpi_bits = HE_CONFIG_VPI_BITS;
1670 IFP2IFATM(sc->ifp)->mib.vci_bits = HE_CONFIG_VCI_BITS;
1671 IFP2IFATM(sc->ifp)->mib.max_vpcs = 0;
1672 IFP2IFATM(sc->ifp)->mib.max_vccs = HE_MAX_VCCS;
1673 IFP2IFATM(sc->ifp)->mib.media = IFM_ATM_UNKNOWN;
1674 sc->he622 = 0;
1675 IFP2IFATM(sc->ifp)->phy = &sc->utopia;
1676
1677 SLIST_INIT(&sc->tpd_free);
1678
1679 mtx_init(&sc->mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK, MTX_DEF);
1680 cv_init(&sc->vcc_cv, "HEVCCcv");
1681 cv_init(&sc->cv_rcclose, "RCClose");
1682
1683 sysctl_ctx_init(&sc->sysctl_ctx);
1684
1685 /*
1686 * 4.2 BIOS Configuration
1687 */
1688 v = pci_read_config(dev, PCIR_COMMAND, 2);
37
38#include "opt_inet.h"
39#include "opt_natm.h"
40
41#include <sys/types.h>
42#include <sys/param.h>
43#include <sys/systm.h>
44#include <sys/malloc.h>
45#include <sys/kernel.h>
46#include <sys/bus.h>
47#include <sys/errno.h>
48#include <sys/conf.h>
49#include <sys/module.h>
50#include <sys/queue.h>
51#include <sys/syslog.h>
52#include <sys/lock.h>
53#include <sys/mutex.h>
54#include <sys/condvar.h>
55#include <sys/sysctl.h>
56#include <vm/uma.h>
57
58#include <sys/sockio.h>
59#include <sys/mbuf.h>
60#include <sys/socket.h>
61
62#include <net/if.h>
63#include <net/if_media.h>
64#include <net/if_atm.h>
65#include <net/if_types.h>
66#include <net/route.h>
67#ifdef ENABLE_BPF
68#include <net/bpf.h>
69#endif
70#include <netinet/in.h>
71#include <netinet/if_atm.h>
72
73#include <machine/bus.h>
74#include <machine/resource.h>
75#include <sys/bus.h>
76#include <sys/rman.h>
77#include <dev/pci/pcireg.h>
78#include <dev/pci/pcivar.h>
79
80#include <dev/utopia/utopia.h>
81#include <dev/hatm/if_hatmconf.h>
82#include <dev/hatm/if_hatmreg.h>
83#include <dev/hatm/if_hatmvar.h>
84
85static const struct {
86 uint16_t vid;
87 uint16_t did;
88 const char *name;
89} hatm_devs[] = {
90 { 0x1127, 0x400,
91 "FORE HE" },
92 { 0, 0, NULL }
93};
94
95SYSCTL_DECL(_hw_atm);
96
97MODULE_DEPEND(hatm, utopia, 1, 1, 1);
98MODULE_DEPEND(hatm, pci, 1, 1, 1);
99MODULE_DEPEND(hatm, atm, 1, 1, 1);
100
101#define EEPROM_DELAY 400 /* microseconds */
102
103/* Read from EEPROM 0000 0011b */
104static const uint32_t readtab[] = {
105 HE_REGM_HOST_PROM_SEL | HE_REGM_HOST_PROM_CLOCK,
106 0,
107 HE_REGM_HOST_PROM_CLOCK,
108 0, /* 0 */
109 HE_REGM_HOST_PROM_CLOCK,
110 0, /* 0 */
111 HE_REGM_HOST_PROM_CLOCK,
112 0, /* 0 */
113 HE_REGM_HOST_PROM_CLOCK,
114 0, /* 0 */
115 HE_REGM_HOST_PROM_CLOCK,
116 0, /* 0 */
117 HE_REGM_HOST_PROM_CLOCK,
118 HE_REGM_HOST_PROM_DATA_IN, /* 0 */
119 HE_REGM_HOST_PROM_CLOCK | HE_REGM_HOST_PROM_DATA_IN,
120 HE_REGM_HOST_PROM_DATA_IN, /* 1 */
121 HE_REGM_HOST_PROM_CLOCK | HE_REGM_HOST_PROM_DATA_IN,
122 HE_REGM_HOST_PROM_DATA_IN, /* 1 */
123};
124static const uint32_t clocktab[] = {
125 0, HE_REGM_HOST_PROM_CLOCK,
126 0, HE_REGM_HOST_PROM_CLOCK,
127 0, HE_REGM_HOST_PROM_CLOCK,
128 0, HE_REGM_HOST_PROM_CLOCK,
129 0, HE_REGM_HOST_PROM_CLOCK,
130 0, HE_REGM_HOST_PROM_CLOCK,
131 0, HE_REGM_HOST_PROM_CLOCK,
132 0, HE_REGM_HOST_PROM_CLOCK,
133 0
134};
135
136/*
137 * Convert cell rate to ATM Forum format
138 */
139u_int
140hatm_cps2atmf(uint32_t pcr)
141{
142 u_int e;
143
144 if (pcr == 0)
145 return (0);
146 pcr <<= 9;
147 e = 0;
148 while (pcr > (1024 - 1)) {
149 e++;
150 pcr >>= 1;
151 }
152 return ((1 << 14) | (e << 9) | (pcr & 0x1ff));
153}
154u_int
155hatm_atmf2cps(uint32_t fcr)
156{
157 fcr &= 0x7fff;
158
159 return ((1 << ((fcr >> 9) & 0x1f)) * (512 + (fcr & 0x1ff)) / 512
160 * (fcr >> 14));
161}
162
163/************************************************************
164 *
165 * Initialisation
166 */
167/*
168 * Probe for a HE controller
169 */
170static int
171hatm_probe(device_t dev)
172{
173 int i;
174
175 for (i = 0; hatm_devs[i].name; i++)
176 if (pci_get_vendor(dev) == hatm_devs[i].vid &&
177 pci_get_device(dev) == hatm_devs[i].did) {
178 device_set_desc(dev, hatm_devs[i].name);
179 return (BUS_PROBE_DEFAULT);
180 }
181 return (ENXIO);
182}
183
184/*
185 * Allocate and map DMA-able memory. We support only contiguous mappings.
186 */
187static void
188dmaload_helper(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
189{
190 if (error)
191 return;
192 KASSERT(nsegs == 1, ("too many segments for DMA: %d", nsegs));
193 KASSERT(segs[0].ds_addr <= 0xffffffffUL,
194 ("phys addr too large %lx", (u_long)segs[0].ds_addr));
195
196 *(bus_addr_t *)arg = segs[0].ds_addr;
197}
198static int
199hatm_alloc_dmamem(struct hatm_softc *sc, const char *what, struct dmamem *mem)
200{
201 int error;
202
203 mem->base = NULL;
204
205 /*
206 * Alignement does not work in the bus_dmamem_alloc function below
207 * on FreeBSD. malloc seems to align objects at least to the object
208 * size so increase the size to the alignment if the size is lesser
209 * than the alignemnt.
210 * XXX on sparc64 this is (probably) not needed.
211 */
212 if (mem->size < mem->align)
213 mem->size = mem->align;
214
215 error = bus_dma_tag_create(sc->parent_tag, mem->align, 0,
216 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR,
217 NULL, NULL, mem->size, 1,
218 BUS_SPACE_MAXSIZE_32BIT, BUS_DMA_ALLOCNOW,
219 NULL, NULL, &mem->tag);
220 if (error) {
221 if_printf(sc->ifp, "DMA tag create (%s)\n", what);
222 return (error);
223 }
224
225 error = bus_dmamem_alloc(mem->tag, &mem->base, 0, &mem->map);
226 if (error) {
227 if_printf(sc->ifp, "DMA mem alloc (%s): %d\n",
228 what, error);
229 bus_dma_tag_destroy(mem->tag);
230 mem->base = NULL;
231 return (error);
232 }
233
234 error = bus_dmamap_load(mem->tag, mem->map, mem->base, mem->size,
235 dmaload_helper, &mem->paddr, BUS_DMA_NOWAIT);
236 if (error) {
237 if_printf(sc->ifp, "DMA map load (%s): %d\n",
238 what, error);
239 bus_dmamem_free(mem->tag, mem->base, mem->map);
240 bus_dma_tag_destroy(mem->tag);
241 mem->base = NULL;
242 return (error);
243 }
244
245 DBG(sc, DMA, ("%s S/A/V/P 0x%x 0x%x %p 0x%lx", what, mem->size,
246 mem->align, mem->base, (u_long)mem->paddr));
247
248 return (0);
249}
250
251/*
252 * Destroy all the resources of an DMA-able memory region.
253 */
254static void
255hatm_destroy_dmamem(struct dmamem *mem)
256{
257 if (mem->base != NULL) {
258 bus_dmamap_unload(mem->tag, mem->map);
259 bus_dmamem_free(mem->tag, mem->base, mem->map);
260 (void)bus_dma_tag_destroy(mem->tag);
261 mem->base = NULL;
262 }
263}
264
265/*
266 * Initialize/destroy DMA maps for the large pool 0
267 */
268static void
269hatm_destroy_rmaps(struct hatm_softc *sc)
270{
271 u_int b;
272
273 DBG(sc, ATTACH, ("destroying rmaps and lbuf pointers..."));
274 if (sc->rmaps != NULL) {
275 for (b = 0; b < sc->lbufs_size; b++)
276 bus_dmamap_destroy(sc->mbuf_tag, sc->rmaps[b]);
277 free(sc->rmaps, M_DEVBUF);
278 }
279 if (sc->lbufs != NULL)
280 free(sc->lbufs, M_DEVBUF);
281}
282
283static void
284hatm_init_rmaps(struct hatm_softc *sc)
285{
286 u_int b;
287 int err;
288
289 DBG(sc, ATTACH, ("allocating rmaps and lbuf pointers..."));
290 sc->lbufs = malloc(sizeof(sc->lbufs[0]) * sc->lbufs_size,
291 M_DEVBUF, M_ZERO | M_WAITOK);
292
293 /* allocate and create the DMA maps for the large pool */
294 sc->rmaps = malloc(sizeof(sc->rmaps[0]) * sc->lbufs_size,
295 M_DEVBUF, M_WAITOK);
296 for (b = 0; b < sc->lbufs_size; b++) {
297 err = bus_dmamap_create(sc->mbuf_tag, 0, &sc->rmaps[b]);
298 if (err != 0)
299 panic("bus_dmamap_create: %d\n", err);
300 }
301}
302
303/*
304 * Initialize and destroy small mbuf page pointers and pages
305 */
306static void
307hatm_destroy_smbufs(struct hatm_softc *sc)
308{
309 u_int i, b;
310 struct mbuf_page *pg;
311 struct mbuf_chunk_hdr *h;
312
313 if (sc->mbuf_pages != NULL) {
314 for (i = 0; i < sc->mbuf_npages; i++) {
315 pg = sc->mbuf_pages[i];
316 for (b = 0; b < pg->hdr.nchunks; b++) {
317 h = (struct mbuf_chunk_hdr *) ((char *)pg +
318 b * pg->hdr.chunksize + pg->hdr.hdroff);
319 if (h->flags & MBUF_CARD)
320 if_printf(sc->ifp,
321 "%s -- mbuf page=%u card buf %u\n",
322 __func__, i, b);
323 if (h->flags & MBUF_USED)
324 if_printf(sc->ifp,
325 "%s -- mbuf page=%u used buf %u\n",
326 __func__, i, b);
327 }
328 bus_dmamap_unload(sc->mbuf_tag, pg->hdr.map);
329 bus_dmamap_destroy(sc->mbuf_tag, pg->hdr.map);
330 free(pg, M_DEVBUF);
331 }
332 free(sc->mbuf_pages, M_DEVBUF);
333 }
334}
335
336static void
337hatm_init_smbufs(struct hatm_softc *sc)
338{
339 sc->mbuf_pages = malloc(sizeof(sc->mbuf_pages[0]) *
340 sc->mbuf_max_pages, M_DEVBUF, M_WAITOK);
341 sc->mbuf_npages = 0;
342}
343
344/*
345 * Initialize/destroy TPDs. This is called from attach/detach.
346 */
347static void
348hatm_destroy_tpds(struct hatm_softc *sc)
349{
350 struct tpd *t;
351
352 if (sc->tpds.base == NULL)
353 return;
354
355 DBG(sc, ATTACH, ("releasing TPDs ..."));
356 if (sc->tpd_nfree != sc->tpd_total)
357 if_printf(sc->ifp, "%u tpds still in use from %u\n",
358 sc->tpd_total - sc->tpd_nfree, sc->tpd_total);
359 while ((t = SLIST_FIRST(&sc->tpd_free)) != NULL) {
360 SLIST_REMOVE_HEAD(&sc->tpd_free, link);
361 bus_dmamap_destroy(sc->tx_tag, t->map);
362 }
363 hatm_destroy_dmamem(&sc->tpds);
364 free(sc->tpd_used, M_DEVBUF);
365 DBG(sc, ATTACH, ("... done"));
366}
367static int
368hatm_init_tpds(struct hatm_softc *sc)
369{
370 int error;
371 u_int i;
372 struct tpd *t;
373
374 DBG(sc, ATTACH, ("allocating %u TPDs and maps ...", sc->tpd_total));
375 error = hatm_alloc_dmamem(sc, "TPD memory", &sc->tpds);
376 if (error != 0) {
377 DBG(sc, ATTACH, ("... dmamem error=%d", error));
378 return (error);
379 }
380
381 /* put all the TPDs on the free list and allocate DMA maps */
382 for (i = 0; i < sc->tpd_total; i++) {
383 t = TPD_ADDR(sc, i);
384 t->no = i;
385 t->mbuf = NULL;
386 error = bus_dmamap_create(sc->tx_tag, 0, &t->map);
387 if (error != 0) {
388 DBG(sc, ATTACH, ("... dmamap error=%d", error));
389 while ((t = SLIST_FIRST(&sc->tpd_free)) != NULL) {
390 SLIST_REMOVE_HEAD(&sc->tpd_free, link);
391 bus_dmamap_destroy(sc->tx_tag, t->map);
392 }
393 hatm_destroy_dmamem(&sc->tpds);
394 return (error);
395 }
396
397 SLIST_INSERT_HEAD(&sc->tpd_free, t, link);
398 }
399
400 /* allocate and zero bitmap */
401 sc->tpd_used = malloc(sizeof(uint8_t) * (sc->tpd_total + 7) / 8,
402 M_DEVBUF, M_ZERO | M_WAITOK);
403 sc->tpd_nfree = sc->tpd_total;
404
405 DBG(sc, ATTACH, ("... done"));
406
407 return (0);
408}
409
410/*
411 * Free all the TPDs that where given to the card.
412 * An mbuf chain may be attached to a TPD - free it also and
413 * unload its associated DMA map.
414 */
415static void
416hatm_stop_tpds(struct hatm_softc *sc)
417{
418 u_int i;
419 struct tpd *t;
420
421 DBG(sc, ATTACH, ("free TPDs ..."));
422 for (i = 0; i < sc->tpd_total; i++) {
423 if (TPD_TST_USED(sc, i)) {
424 t = TPD_ADDR(sc, i);
425 if (t->mbuf) {
426 m_freem(t->mbuf);
427 t->mbuf = NULL;
428 bus_dmamap_unload(sc->tx_tag, t->map);
429 }
430 TPD_CLR_USED(sc, i);
431 SLIST_INSERT_HEAD(&sc->tpd_free, t, link);
432 sc->tpd_nfree++;
433 }
434 }
435}
436
437/*
438 * This frees ALL resources of this interface and leaves the structure
439 * in an indeterminate state. This is called just before detaching or
440 * on a failed attach. No lock should be held.
441 */
442static void
443hatm_destroy(struct hatm_softc *sc)
444{
445 u_int cid;
446
447 bus_teardown_intr(sc->dev, sc->irqres, sc->ih);
448
449 hatm_destroy_rmaps(sc);
450 hatm_destroy_smbufs(sc);
451 hatm_destroy_tpds(sc);
452
453 if (sc->vcc_zone != NULL) {
454 for (cid = 0; cid < HE_MAX_VCCS; cid++)
455 if (sc->vccs[cid] != NULL)
456 uma_zfree(sc->vcc_zone, sc->vccs[cid]);
457 uma_zdestroy(sc->vcc_zone);
458 }
459
460 /*
461 * Release all memory allocated to the various queues and
462 * Status pages. These have there own flag which shows whether
463 * they are really allocated.
464 */
465 hatm_destroy_dmamem(&sc->irq_0.mem);
466 hatm_destroy_dmamem(&sc->rbp_s0.mem);
467 hatm_destroy_dmamem(&sc->rbp_l0.mem);
468 hatm_destroy_dmamem(&sc->rbp_s1.mem);
469 hatm_destroy_dmamem(&sc->rbrq_0.mem);
470 hatm_destroy_dmamem(&sc->rbrq_1.mem);
471 hatm_destroy_dmamem(&sc->tbrq.mem);
472 hatm_destroy_dmamem(&sc->tpdrq.mem);
473 hatm_destroy_dmamem(&sc->hsp_mem);
474
475 if (sc->irqres != NULL)
476 bus_release_resource(sc->dev, SYS_RES_IRQ,
477 sc->irqid, sc->irqres);
478
479 if (sc->tx_tag != NULL)
480 if (bus_dma_tag_destroy(sc->tx_tag))
481 if_printf(sc->ifp, "mbuf DMA tag busy\n");
482
483 if (sc->mbuf_tag != NULL)
484 if (bus_dma_tag_destroy(sc->mbuf_tag))
485 if_printf(sc->ifp, "mbuf DMA tag busy\n");
486
487 if (sc->parent_tag != NULL)
488 if (bus_dma_tag_destroy(sc->parent_tag))
489 if_printf(sc->ifp, "parent DMA tag busy\n");
490
491 if (sc->memres != NULL)
492 bus_release_resource(sc->dev, SYS_RES_MEMORY,
493 sc->memid, sc->memres);
494
495 sysctl_ctx_free(&sc->sysctl_ctx);
496
497 cv_destroy(&sc->cv_rcclose);
498 cv_destroy(&sc->vcc_cv);
499 mtx_destroy(&sc->mtx);
500
501 if (sc->ifp != NULL)
502 if_free(sc->ifp);
503}
504
505/*
506 * 4.4 Card reset
507 */
508static int
509hatm_reset(struct hatm_softc *sc)
510{
511 u_int v, count;
512
513 WRITE4(sc, HE_REGO_RESET_CNTL, 0x00);
514 BARRIER_W(sc);
515 WRITE4(sc, HE_REGO_RESET_CNTL, 0xff);
516 BARRIER_RW(sc);
517 count = 0;
518 while (((v = READ4(sc, HE_REGO_RESET_CNTL)) & HE_REGM_RESET_STATE) == 0) {
519 BARRIER_R(sc);
520 if (++count == 100) {
521 if_printf(sc->ifp, "reset failed\n");
522 return (ENXIO);
523 }
524 DELAY(1000);
525 }
526 return (0);
527}
528
529/*
530 * 4.5 Set Bus Width
531 */
532static void
533hatm_init_bus_width(struct hatm_softc *sc)
534{
535 uint32_t v, v1;
536
537 v = READ4(sc, HE_REGO_HOST_CNTL);
538 BARRIER_R(sc);
539 if (v & HE_REGM_HOST_BUS64) {
540 sc->pci64 = 1;
541 v1 = pci_read_config(sc->dev, HE_PCIR_GEN_CNTL_0, 4);
542 v1 |= HE_PCIM_CTL0_64BIT;
543 pci_write_config(sc->dev, HE_PCIR_GEN_CNTL_0, v1, 4);
544
545 v |= HE_REGM_HOST_DESC_RD64
546 | HE_REGM_HOST_DATA_RD64
547 | HE_REGM_HOST_DATA_WR64;
548 WRITE4(sc, HE_REGO_HOST_CNTL, v);
549 BARRIER_W(sc);
550 } else {
551 sc->pci64 = 0;
552 v = pci_read_config(sc->dev, HE_PCIR_GEN_CNTL_0, 4);
553 v &= ~HE_PCIM_CTL0_64BIT;
554 pci_write_config(sc->dev, HE_PCIR_GEN_CNTL_0, v, 4);
555 }
556}
557
558/*
559 * 4.6 Set Host Endianess
560 */
561static void
562hatm_init_endianess(struct hatm_softc *sc)
563{
564 uint32_t v;
565
566 v = READ4(sc, HE_REGO_LB_SWAP);
567 BARRIER_R(sc);
568#if BYTE_ORDER == BIG_ENDIAN
569 v |= HE_REGM_LBSWAP_INTR_SWAP |
570 HE_REGM_LBSWAP_DESC_WR_SWAP |
571 HE_REGM_LBSWAP_BIG_ENDIAN;
572 v &= ~(HE_REGM_LBSWAP_DATA_WR_SWAP |
573 HE_REGM_LBSWAP_DESC_RD_SWAP |
574 HE_REGM_LBSWAP_DATA_RD_SWAP);
575#else
576 v &= ~(HE_REGM_LBSWAP_DATA_WR_SWAP |
577 HE_REGM_LBSWAP_DESC_RD_SWAP |
578 HE_REGM_LBSWAP_DATA_RD_SWAP |
579 HE_REGM_LBSWAP_INTR_SWAP |
580 HE_REGM_LBSWAP_DESC_WR_SWAP |
581 HE_REGM_LBSWAP_BIG_ENDIAN);
582#endif
583
584 if (sc->he622)
585 v |= HE_REGM_LBSWAP_XFER_SIZE;
586
587 WRITE4(sc, HE_REGO_LB_SWAP, v);
588 BARRIER_W(sc);
589}
590
591/*
592 * 4.7 Read EEPROM
593 */
594static uint8_t
595hatm_read_prom_byte(struct hatm_softc *sc, u_int addr)
596{
597 uint32_t val, tmp_read, byte_read;
598 u_int i, j;
599 int n;
600
601 val = READ4(sc, HE_REGO_HOST_CNTL);
602 val &= HE_REGM_HOST_PROM_BITS;
603 BARRIER_R(sc);
604
605 val |= HE_REGM_HOST_PROM_WREN;
606 WRITE4(sc, HE_REGO_HOST_CNTL, val);
607 BARRIER_W(sc);
608
609 /* send READ */
610 for (i = 0; i < sizeof(readtab) / sizeof(readtab[0]); i++) {
611 WRITE4(sc, HE_REGO_HOST_CNTL, val | readtab[i]);
612 BARRIER_W(sc);
613 DELAY(EEPROM_DELAY);
614 }
615
616 /* send ADDRESS */
617 for (n = 7, j = 0; n >= 0; n--) {
618 WRITE4(sc, HE_REGO_HOST_CNTL, val | clocktab[j++] |
619 (((addr >> n) & 1 ) << HE_REGS_HOST_PROM_DATA_IN));
620 BARRIER_W(sc);
621 DELAY(EEPROM_DELAY);
622 WRITE4(sc, HE_REGO_HOST_CNTL, val | clocktab[j++] |
623 (((addr >> n) & 1 ) << HE_REGS_HOST_PROM_DATA_IN));
624 BARRIER_W(sc);
625 DELAY(EEPROM_DELAY);
626 }
627
628 val &= ~HE_REGM_HOST_PROM_WREN;
629 WRITE4(sc, HE_REGO_HOST_CNTL, val);
630 BARRIER_W(sc);
631
632 /* read DATA */
633 byte_read = 0;
634 for (n = 7, j = 0; n >= 0; n--) {
635 WRITE4(sc, HE_REGO_HOST_CNTL, val | clocktab[j++]);
636 BARRIER_W(sc);
637 DELAY(EEPROM_DELAY);
638 tmp_read = READ4(sc, HE_REGO_HOST_CNTL);
639 byte_read |= (uint8_t)(((tmp_read & HE_REGM_HOST_PROM_DATA_OUT)
640 >> HE_REGS_HOST_PROM_DATA_OUT) << n);
641 WRITE4(sc, HE_REGO_HOST_CNTL, val | clocktab[j++]);
642 BARRIER_W(sc);
643 DELAY(EEPROM_DELAY);
644 }
645 WRITE4(sc, HE_REGO_HOST_CNTL, val | clocktab[j++]);
646 BARRIER_W(sc);
647 DELAY(EEPROM_DELAY);
648
649 return (byte_read);
650}
651
652static void
653hatm_init_read_eeprom(struct hatm_softc *sc)
654{
655 u_int n, count;
656 u_char byte;
657 uint32_t v;
658
659 for (n = count = 0; count < HE_EEPROM_PROD_ID_LEN; count++) {
660 byte = hatm_read_prom_byte(sc, HE_EEPROM_PROD_ID + count);
661 if (n > 0 || byte != ' ')
662 sc->prod_id[n++] = byte;
663 }
664 while (n > 0 && sc->prod_id[n-1] == ' ')
665 n--;
666 sc->prod_id[n] = '\0';
667
668 for (n = count = 0; count < HE_EEPROM_REV_LEN; count++) {
669 byte = hatm_read_prom_byte(sc, HE_EEPROM_REV + count);
670 if (n > 0 || byte != ' ')
671 sc->rev[n++] = byte;
672 }
673 while (n > 0 && sc->rev[n-1] == ' ')
674 n--;
675 sc->rev[n] = '\0';
676 IFP2IFATM(sc->ifp)->mib.hw_version = sc->rev[0];
677
678 IFP2IFATM(sc->ifp)->mib.serial = hatm_read_prom_byte(sc, HE_EEPROM_M_SN + 0) << 0;
679 IFP2IFATM(sc->ifp)->mib.serial |= hatm_read_prom_byte(sc, HE_EEPROM_M_SN + 1) << 8;
680 IFP2IFATM(sc->ifp)->mib.serial |= hatm_read_prom_byte(sc, HE_EEPROM_M_SN + 2) << 16;
681 IFP2IFATM(sc->ifp)->mib.serial |= hatm_read_prom_byte(sc, HE_EEPROM_M_SN + 3) << 24;
682
683 v = hatm_read_prom_byte(sc, HE_EEPROM_MEDIA + 0) << 0;
684 v |= hatm_read_prom_byte(sc, HE_EEPROM_MEDIA + 1) << 8;
685 v |= hatm_read_prom_byte(sc, HE_EEPROM_MEDIA + 2) << 16;
686 v |= hatm_read_prom_byte(sc, HE_EEPROM_MEDIA + 3) << 24;
687
688 switch (v) {
689 case HE_MEDIA_UTP155:
690 IFP2IFATM(sc->ifp)->mib.media = IFM_ATM_UTP_155;
691 IFP2IFATM(sc->ifp)->mib.pcr = ATM_RATE_155M;
692 break;
693
694 case HE_MEDIA_MMF155:
695 IFP2IFATM(sc->ifp)->mib.media = IFM_ATM_MM_155;
696 IFP2IFATM(sc->ifp)->mib.pcr = ATM_RATE_155M;
697 break;
698
699 case HE_MEDIA_MMF622:
700 IFP2IFATM(sc->ifp)->mib.media = IFM_ATM_MM_622;
701 IFP2IFATM(sc->ifp)->mib.device = ATM_DEVICE_HE622;
702 IFP2IFATM(sc->ifp)->mib.pcr = ATM_RATE_622M;
703 sc->he622 = 1;
704 break;
705
706 case HE_MEDIA_SMF155:
707 IFP2IFATM(sc->ifp)->mib.media = IFM_ATM_SM_155;
708 IFP2IFATM(sc->ifp)->mib.pcr = ATM_RATE_155M;
709 break;
710
711 case HE_MEDIA_SMF622:
712 IFP2IFATM(sc->ifp)->mib.media = IFM_ATM_SM_622;
713 IFP2IFATM(sc->ifp)->mib.device = ATM_DEVICE_HE622;
714 IFP2IFATM(sc->ifp)->mib.pcr = ATM_RATE_622M;
715 sc->he622 = 1;
716 break;
717 }
718
719 IFP2IFATM(sc->ifp)->mib.esi[0] = hatm_read_prom_byte(sc, HE_EEPROM_MAC + 0);
720 IFP2IFATM(sc->ifp)->mib.esi[1] = hatm_read_prom_byte(sc, HE_EEPROM_MAC + 1);
721 IFP2IFATM(sc->ifp)->mib.esi[2] = hatm_read_prom_byte(sc, HE_EEPROM_MAC + 2);
722 IFP2IFATM(sc->ifp)->mib.esi[3] = hatm_read_prom_byte(sc, HE_EEPROM_MAC + 3);
723 IFP2IFATM(sc->ifp)->mib.esi[4] = hatm_read_prom_byte(sc, HE_EEPROM_MAC + 4);
724 IFP2IFATM(sc->ifp)->mib.esi[5] = hatm_read_prom_byte(sc, HE_EEPROM_MAC + 5);
725}
726
727/*
728 * Clear unused interrupt queue
729 */
730static void
731hatm_clear_irq(struct hatm_softc *sc, u_int group)
732{
733 WRITE4(sc, HE_REGO_IRQ_BASE(group), 0);
734 WRITE4(sc, HE_REGO_IRQ_HEAD(group), 0);
735 WRITE4(sc, HE_REGO_IRQ_CNTL(group), 0);
736 WRITE4(sc, HE_REGO_IRQ_DATA(group), 0);
737}
738
739/*
740 * 4.10 Initialize interrupt queues
741 */
742static void
743hatm_init_irq(struct hatm_softc *sc, struct heirq *q, u_int group)
744{
745 u_int i;
746
747 if (q->size == 0) {
748 hatm_clear_irq(sc, group);
749 return;
750 }
751
752 q->group = group;
753 q->sc = sc;
754 q->irq = q->mem.base;
755 q->head = 0;
756 q->tailp = q->irq + (q->size - 1);
757 *q->tailp = 0;
758
759 for (i = 0; i < q->size; i++)
760 q->irq[i] = HE_REGM_ITYPE_INVALID;
761
762 WRITE4(sc, HE_REGO_IRQ_BASE(group), q->mem.paddr);
763 WRITE4(sc, HE_REGO_IRQ_HEAD(group),
764 ((q->size - 1) << HE_REGS_IRQ_HEAD_SIZE) |
765 (q->thresh << HE_REGS_IRQ_HEAD_THRESH));
766 WRITE4(sc, HE_REGO_IRQ_CNTL(group), q->line);
767 WRITE4(sc, HE_REGO_IRQ_DATA(group), 0);
768}
769
770/*
771 * 5.1.3 Initialize connection memory
772 */
773static void
774hatm_init_cm(struct hatm_softc *sc)
775{
776 u_int rsra, mlbm, rabr, numbuffs;
777 u_int tsra, tabr, mtpd;
778 u_int n;
779
780 for (n = 0; n < HE_CONFIG_TXMEM; n++)
781 WRITE_TCM4(sc, n, 0);
782 for (n = 0; n < HE_CONFIG_RXMEM; n++)
783 WRITE_RCM4(sc, n, 0);
784
785 numbuffs = sc->r0_numbuffs + sc->r1_numbuffs + sc->tx_numbuffs;
786
787 rsra = 0;
788 mlbm = ((rsra + IFP2IFATM(sc->ifp)->mib.max_vccs * 8) + 0x7ff) & ~0x7ff;
789 rabr = ((mlbm + numbuffs * 2) + 0x7ff) & ~0x7ff;
790 sc->rsrb = ((rabr + 2048) + (2 * IFP2IFATM(sc->ifp)->mib.max_vccs - 1)) &
791 ~(2 * IFP2IFATM(sc->ifp)->mib.max_vccs - 1);
792
793 tsra = 0;
794 sc->tsrb = tsra + IFP2IFATM(sc->ifp)->mib.max_vccs * 8;
795 sc->tsrc = sc->tsrb + IFP2IFATM(sc->ifp)->mib.max_vccs * 4;
796 sc->tsrd = sc->tsrc + IFP2IFATM(sc->ifp)->mib.max_vccs * 2;
797 tabr = sc->tsrd + IFP2IFATM(sc->ifp)->mib.max_vccs * 1;
798 mtpd = ((tabr + 1024) + (16 * IFP2IFATM(sc->ifp)->mib.max_vccs - 1)) &
799 ~(16 * IFP2IFATM(sc->ifp)->mib.max_vccs - 1);
800
801 DBG(sc, ATTACH, ("rsra=%x mlbm=%x rabr=%x rsrb=%x",
802 rsra, mlbm, rabr, sc->rsrb));
803 DBG(sc, ATTACH, ("tsra=%x tsrb=%x tsrc=%x tsrd=%x tabr=%x mtpd=%x",
804 tsra, sc->tsrb, sc->tsrc, sc->tsrd, tabr, mtpd));
805
806 WRITE4(sc, HE_REGO_TSRB_BA, sc->tsrb);
807 WRITE4(sc, HE_REGO_TSRC_BA, sc->tsrc);
808 WRITE4(sc, HE_REGO_TSRD_BA, sc->tsrd);
809 WRITE4(sc, HE_REGO_TMABR_BA, tabr);
810 WRITE4(sc, HE_REGO_TPD_BA, mtpd);
811
812 WRITE4(sc, HE_REGO_RCMRSRB_BA, sc->rsrb);
813 WRITE4(sc, HE_REGO_RCMLBM_BA, mlbm);
814 WRITE4(sc, HE_REGO_RCMABR_BA, rabr);
815
816 BARRIER_W(sc);
817}
818
819/*
820 * 5.1.4 Initialize Local buffer Pools
821 */
822static void
823hatm_init_rx_buffer_pool(struct hatm_softc *sc,
824 u_int num, /* bank */
825 u_int start, /* start row */
826 u_int numbuffs /* number of entries */
827)
828{
829 u_int row_size; /* bytes per row */
830 uint32_t row_addr; /* start address of this row */
831 u_int lbuf_size; /* bytes per lbuf */
832 u_int lbufs_per_row; /* number of lbufs per memory row */
833 uint32_t lbufd_index; /* index of lbuf descriptor */
834 uint32_t lbufd_addr; /* address of lbuf descriptor */
835 u_int lbuf_row_cnt; /* current lbuf in current row */
836 uint32_t lbuf_addr; /* address of current buffer */
837 u_int i;
838
839 row_size = sc->bytes_per_row;
840 row_addr = start * row_size;
841 lbuf_size = sc->cells_per_lbuf * 48;
842 lbufs_per_row = sc->cells_per_row / sc->cells_per_lbuf;
843
844 /* descriptor index */
845 lbufd_index = num;
846
847 /* 2 words per entry */
848 lbufd_addr = READ4(sc, HE_REGO_RCMLBM_BA) + lbufd_index * 2;
849
850 /* write head of queue */
851 WRITE4(sc, HE_REGO_RLBF_H(num), lbufd_index);
852
853 lbuf_row_cnt = 0;
854 for (i = 0; i < numbuffs; i++) {
855 lbuf_addr = (row_addr + lbuf_row_cnt * lbuf_size) / 32;
856
857 WRITE_RCM4(sc, lbufd_addr, lbuf_addr);
858
859 lbufd_index += 2;
860 WRITE_RCM4(sc, lbufd_addr + 1, lbufd_index);
861
862 if (++lbuf_row_cnt == lbufs_per_row) {
863 lbuf_row_cnt = 0;
864 row_addr += row_size;
865 }
866
867 lbufd_addr += 2 * 2;
868 }
869
870 WRITE4(sc, HE_REGO_RLBF_T(num), lbufd_index - 2);
871 WRITE4(sc, HE_REGO_RLBF_C(num), numbuffs);
872
873 BARRIER_W(sc);
874}
875
876static void
877hatm_init_tx_buffer_pool(struct hatm_softc *sc,
878 u_int start, /* start row */
879 u_int numbuffs /* number of entries */
880)
881{
882 u_int row_size; /* bytes per row */
883 uint32_t row_addr; /* start address of this row */
884 u_int lbuf_size; /* bytes per lbuf */
885 u_int lbufs_per_row; /* number of lbufs per memory row */
886 uint32_t lbufd_index; /* index of lbuf descriptor */
887 uint32_t lbufd_addr; /* address of lbuf descriptor */
888 u_int lbuf_row_cnt; /* current lbuf in current row */
889 uint32_t lbuf_addr; /* address of current buffer */
890 u_int i;
891
892 row_size = sc->bytes_per_row;
893 row_addr = start * row_size;
894 lbuf_size = sc->cells_per_lbuf * 48;
895 lbufs_per_row = sc->cells_per_row / sc->cells_per_lbuf;
896
897 /* descriptor index */
898 lbufd_index = sc->r0_numbuffs + sc->r1_numbuffs;
899
900 /* 2 words per entry */
901 lbufd_addr = READ4(sc, HE_REGO_RCMLBM_BA) + lbufd_index * 2;
902
903 /* write head of queue */
904 WRITE4(sc, HE_REGO_TLBF_H, lbufd_index);
905
906 lbuf_row_cnt = 0;
907 for (i = 0; i < numbuffs; i++) {
908 lbuf_addr = (row_addr + lbuf_row_cnt * lbuf_size) / 32;
909
910 WRITE_RCM4(sc, lbufd_addr, lbuf_addr);
911 lbufd_index++;
912 WRITE_RCM4(sc, lbufd_addr + 1, lbufd_index);
913
914 if (++lbuf_row_cnt == lbufs_per_row) {
915 lbuf_row_cnt = 0;
916 row_addr += row_size;
917 }
918
919 lbufd_addr += 2;
920 }
921
922 WRITE4(sc, HE_REGO_TLBF_T, lbufd_index - 1);
923 BARRIER_W(sc);
924}
925
926/*
927 * 5.1.5 Initialize Intermediate Receive Queues
928 */
929static void
930hatm_init_imed_queues(struct hatm_softc *sc)
931{
932 u_int n;
933
934 if (sc->he622) {
935 for (n = 0; n < 8; n++) {
936 WRITE4(sc, HE_REGO_INMQ_S(n), 0x10*n+0x000f);
937 WRITE4(sc, HE_REGO_INMQ_L(n), 0x10*n+0x200f);
938 }
939 } else {
940 for (n = 0; n < 8; n++) {
941 WRITE4(sc, HE_REGO_INMQ_S(n), n);
942 WRITE4(sc, HE_REGO_INMQ_L(n), n+0x8);
943 }
944 }
945}
946
947/*
948 * 5.1.7 Init CS block
949 */
950static void
951hatm_init_cs_block(struct hatm_softc *sc)
952{
953 u_int n, i;
954 u_int clkfreg, cellrate, decr, tmp;
955 static const uint32_t erthr[2][5][3] = HE_REGT_CS_ERTHR;
956 static const uint32_t erctl[2][3] = HE_REGT_CS_ERCTL;
957 static const uint32_t erstat[2][2] = HE_REGT_CS_ERSTAT;
958 static const uint32_t rtfwr[2] = HE_REGT_CS_RTFWR;
959 static const uint32_t rtatr[2] = HE_REGT_CS_RTATR;
960 static const uint32_t bwalloc[2][6] = HE_REGT_CS_BWALLOC;
961 static const uint32_t orcf[2][2] = HE_REGT_CS_ORCF;
962
963 /* Clear Rate Controller Start Times and Occupied Flags */
964 for (n = 0; n < 32; n++)
965 WRITE_MBOX4(sc, HE_REGO_CS_STTIM(n), 0);
966
967 clkfreg = sc->he622 ? HE_622_CLOCK : HE_155_CLOCK;
968 cellrate = sc->he622 ? ATM_RATE_622M : ATM_RATE_155M;
969 decr = cellrate / 32;
970
971 for (n = 0; n < 16; n++) {
972 tmp = clkfreg / cellrate;
973 WRITE_MBOX4(sc, HE_REGO_CS_TGRLD(n), tmp - 1);
974 cellrate -= decr;
975 }
976
977 i = (sc->cells_per_lbuf == 2) ? 0
978 :(sc->cells_per_lbuf == 4) ? 1
979 : 2;
980
981 /* table 5.2 */
982 WRITE_MBOX4(sc, HE_REGO_CS_ERTHR0, erthr[sc->he622][0][i]);
983 WRITE_MBOX4(sc, HE_REGO_CS_ERTHR1, erthr[sc->he622][1][i]);
984 WRITE_MBOX4(sc, HE_REGO_CS_ERTHR2, erthr[sc->he622][2][i]);
985 WRITE_MBOX4(sc, HE_REGO_CS_ERTHR3, erthr[sc->he622][3][i]);
986 WRITE_MBOX4(sc, HE_REGO_CS_ERTHR4, erthr[sc->he622][4][i]);
987
988 WRITE_MBOX4(sc, HE_REGO_CS_ERCTL0, erctl[sc->he622][0]);
989 WRITE_MBOX4(sc, HE_REGO_CS_ERCTL1, erctl[sc->he622][1]);
990 WRITE_MBOX4(sc, HE_REGO_CS_ERCTL2, erctl[sc->he622][2]);
991
992 WRITE_MBOX4(sc, HE_REGO_CS_ERSTAT0, erstat[sc->he622][0]);
993 WRITE_MBOX4(sc, HE_REGO_CS_ERSTAT1, erstat[sc->he622][1]);
994
995 WRITE_MBOX4(sc, HE_REGO_CS_RTFWR, rtfwr[sc->he622]);
996 WRITE_MBOX4(sc, HE_REGO_CS_RTATR, rtatr[sc->he622]);
997
998 WRITE_MBOX4(sc, HE_REGO_CS_TFBSET, bwalloc[sc->he622][0]);
999 WRITE_MBOX4(sc, HE_REGO_CS_WCRMAX, bwalloc[sc->he622][1]);
1000 WRITE_MBOX4(sc, HE_REGO_CS_WCRMIN, bwalloc[sc->he622][2]);
1001 WRITE_MBOX4(sc, HE_REGO_CS_WCRINC, bwalloc[sc->he622][3]);
1002 WRITE_MBOX4(sc, HE_REGO_CS_WCRDEC, bwalloc[sc->he622][4]);
1003 WRITE_MBOX4(sc, HE_REGO_CS_WCRCEIL, bwalloc[sc->he622][5]);
1004
1005 WRITE_MBOX4(sc, HE_REGO_CS_OTPPER, orcf[sc->he622][0]);
1006 WRITE_MBOX4(sc, HE_REGO_CS_OTWPER, orcf[sc->he622][1]);
1007
1008 WRITE_MBOX4(sc, HE_REGO_CS_OTTLIM, 8);
1009
1010 for (n = 0; n < 8; n++)
1011 WRITE_MBOX4(sc, HE_REGO_CS_HGRRT(n), 0);
1012}
1013
1014/*
1015 * 5.1.8 CS Block Connection Memory Initialisation
1016 */
1017static void
1018hatm_init_cs_block_cm(struct hatm_softc *sc)
1019{
1020 u_int n, i;
1021 u_int expt, mant, etrm, wcr, ttnrm, tnrm;
1022 uint32_t rate;
1023 uint32_t clkfreq, cellrate, decr;
1024 uint32_t *rg, rtg, val = 0;
1025 uint64_t drate;
1026 u_int buf, buf_limit;
1027 uint32_t base = READ4(sc, HE_REGO_RCMABR_BA);
1028
1029 for (n = 0; n < HE_REGL_CM_GQTBL; n++)
1030 WRITE_RCM4(sc, base + HE_REGO_CM_GQTBL + n, 0);
1031 for (n = 0; n < HE_REGL_CM_RGTBL; n++)
1032 WRITE_RCM4(sc, base + HE_REGO_CM_RGTBL + n, 0);
1033
1034 tnrm = 0;
1035 for (n = 0; n < HE_REGL_CM_TNRMTBL * 4; n++) {
1036 expt = (n >> 5) & 0x1f;
1037 mant = ((n & 0x18) << 4) | 0x7f;
1038 wcr = (1 << expt) * (mant + 512) / 512;
1039 etrm = n & 0x7;
1040 ttnrm = wcr / 10 / (1 << etrm);
1041 if (ttnrm > 255)
1042 ttnrm = 255;
1043 else if(ttnrm < 2)
1044 ttnrm = 2;
1045 tnrm = (tnrm << 8) | (ttnrm & 0xff);
1046 if (n % 4 == 0)
1047 WRITE_RCM4(sc, base + HE_REGO_CM_TNRMTBL + (n/4), tnrm);
1048 }
1049
1050 clkfreq = sc->he622 ? HE_622_CLOCK : HE_155_CLOCK;
1051 buf_limit = 4;
1052
1053 cellrate = sc->he622 ? ATM_RATE_622M : ATM_RATE_155M;
1054 decr = cellrate / 32;
1055
1056 /* compute GRID top row in 1000 * cps */
1057 for (n = 0; n < 16; n++) {
1058 u_int interval = clkfreq / cellrate;
1059 sc->rate_grid[0][n] = (u_int64_t)clkfreq * 1000 / interval;
1060 cellrate -= decr;
1061 }
1062
1063 /* compute the other rows according to 2.4 */
1064 for (i = 1; i < 16; i++)
1065 for (n = 0; n < 16; n++)
1066 sc->rate_grid[i][n] = sc->rate_grid[i-1][n] /
1067 ((i < 14) ? 2 : 4);
1068
1069 /* first entry is line rate */
1070 n = hatm_cps2atmf(sc->he622 ? ATM_RATE_622M : ATM_RATE_155M);
1071 expt = (n >> 9) & 0x1f;
1072 mant = n & 0x1f0;
1073 sc->rate_grid[0][0] = (u_int64_t)(1<<expt) * 1000 * (mant+512) / 512;
1074
1075 /* now build the conversion table - each 32 bit word contains
1076 * two entries - this gives a total of 0x400 16 bit entries.
1077 * This table maps the truncated ATMF rate version into a grid index */
1078 cellrate = sc->he622 ? ATM_RATE_622M : ATM_RATE_155M;
1079 rg = &sc->rate_grid[15][15];
1080
1081 for (rate = 0; rate < 2 * HE_REGL_CM_RTGTBL; rate++) {
1082 /* unpack the ATMF rate */
1083 expt = rate >> 5;
1084 mant = (rate & 0x1f) << 4;
1085
1086 /* get the cell rate - minimum is 10 per second */
1087 drate = (uint64_t)(1 << expt) * 1000 * (mant + 512) / 512;
1088 if (drate < 10 * 1000)
1089 drate = 10 * 1000;
1090
1091 /* now look up the grid index */
1092 while (drate >= *rg && rg-- > &sc->rate_grid[0][0])
1093 ;
1094 rg++;
1095 rtg = rg - &sc->rate_grid[0][0];
1096
1097 /* now compute the buffer limit */
1098 buf = drate * sc->tx_numbuffs / (cellrate * 2) / 1000;
1099 if (buf == 0)
1100 buf = 1;
1101 else if (buf > buf_limit)
1102 buf = buf_limit;
1103
1104 /* make value */
1105 val = (val << 16) | (rtg << 8) | buf;
1106
1107 /* write */
1108 if (rate % 2 == 1)
1109 WRITE_RCM4(sc, base + HE_REGO_CM_RTGTBL + rate/2, val);
1110 }
1111}
1112
1113/*
1114 * Clear an unused receive group buffer pool
1115 */
1116static void
1117hatm_clear_rpool(struct hatm_softc *sc, u_int group, u_int large)
1118{
1119 WRITE4(sc, HE_REGO_RBP_S(large, group), 0);
1120 WRITE4(sc, HE_REGO_RBP_T(large, group), 0);
1121 WRITE4(sc, HE_REGO_RBP_QI(large, group), 1);
1122 WRITE4(sc, HE_REGO_RBP_BL(large, group), 0);
1123}
1124
1125/*
1126 * Initialize a receive group buffer pool
1127 */
1128static void
1129hatm_init_rpool(struct hatm_softc *sc, struct herbp *q, u_int group,
1130 u_int large)
1131{
1132 if (q->size == 0) {
1133 hatm_clear_rpool(sc, group, large);
1134 return;
1135 }
1136
1137 bzero(q->mem.base, q->mem.size);
1138 q->rbp = q->mem.base;
1139 q->head = q->tail = 0;
1140
1141 DBG(sc, ATTACH, ("RBP%u%c=0x%lx", group, "SL"[large],
1142 (u_long)q->mem.paddr));
1143
1144 WRITE4(sc, HE_REGO_RBP_S(large, group), q->mem.paddr);
1145 WRITE4(sc, HE_REGO_RBP_T(large, group), 0);
1146 WRITE4(sc, HE_REGO_RBP_QI(large, group),
1147 ((q->size - 1) << HE_REGS_RBP_SIZE) |
1148 HE_REGM_RBP_INTR_ENB |
1149 (q->thresh << HE_REGS_RBP_THRESH));
1150 WRITE4(sc, HE_REGO_RBP_BL(large, group), (q->bsize >> 2) & ~1);
1151}
1152
1153/*
1154 * Clear an unused receive buffer return queue
1155 */
1156static void
1157hatm_clear_rbrq(struct hatm_softc *sc, u_int group)
1158{
1159 WRITE4(sc, HE_REGO_RBRQ_ST(group), 0);
1160 WRITE4(sc, HE_REGO_RBRQ_H(group), 0);
1161 WRITE4(sc, HE_REGO_RBRQ_Q(group), (1 << HE_REGS_RBRQ_THRESH));
1162 WRITE4(sc, HE_REGO_RBRQ_I(group), 0);
1163}
1164
1165/*
1166 * Initialize receive buffer return queue
1167 */
1168static void
1169hatm_init_rbrq(struct hatm_softc *sc, struct herbrq *rq, u_int group)
1170{
1171 if (rq->size == 0) {
1172 hatm_clear_rbrq(sc, group);
1173 return;
1174 }
1175
1176 rq->rbrq = rq->mem.base;
1177 rq->head = 0;
1178
1179 DBG(sc, ATTACH, ("RBRQ%u=0x%lx", group, (u_long)rq->mem.paddr));
1180
1181 WRITE4(sc, HE_REGO_RBRQ_ST(group), rq->mem.paddr);
1182 WRITE4(sc, HE_REGO_RBRQ_H(group), 0);
1183 WRITE4(sc, HE_REGO_RBRQ_Q(group),
1184 (rq->thresh << HE_REGS_RBRQ_THRESH) |
1185 ((rq->size - 1) << HE_REGS_RBRQ_SIZE));
1186 WRITE4(sc, HE_REGO_RBRQ_I(group),
1187 (rq->tout << HE_REGS_RBRQ_TIME) |
1188 (rq->pcnt << HE_REGS_RBRQ_COUNT));
1189}
1190
1191/*
1192 * Clear an unused transmit buffer return queue N
1193 */
1194static void
1195hatm_clear_tbrq(struct hatm_softc *sc, u_int group)
1196{
1197 WRITE4(sc, HE_REGO_TBRQ_B_T(group), 0);
1198 WRITE4(sc, HE_REGO_TBRQ_H(group), 0);
1199 WRITE4(sc, HE_REGO_TBRQ_S(group), 0);
1200 WRITE4(sc, HE_REGO_TBRQ_THRESH(group), 1);
1201}
1202
1203/*
1204 * Initialize transmit buffer return queue N
1205 */
1206static void
1207hatm_init_tbrq(struct hatm_softc *sc, struct hetbrq *tq, u_int group)
1208{
1209 if (tq->size == 0) {
1210 hatm_clear_tbrq(sc, group);
1211 return;
1212 }
1213
1214 tq->tbrq = tq->mem.base;
1215 tq->head = 0;
1216
1217 DBG(sc, ATTACH, ("TBRQ%u=0x%lx", group, (u_long)tq->mem.paddr));
1218
1219 WRITE4(sc, HE_REGO_TBRQ_B_T(group), tq->mem.paddr);
1220 WRITE4(sc, HE_REGO_TBRQ_H(group), 0);
1221 WRITE4(sc, HE_REGO_TBRQ_S(group), tq->size - 1);
1222 WRITE4(sc, HE_REGO_TBRQ_THRESH(group), tq->thresh);
1223}
1224
1225/*
1226 * Initialize TPDRQ
1227 */
1228static void
1229hatm_init_tpdrq(struct hatm_softc *sc)
1230{
1231 struct hetpdrq *tq;
1232
1233 tq = &sc->tpdrq;
1234 tq->tpdrq = tq->mem.base;
1235 tq->tail = tq->head = 0;
1236
1237 DBG(sc, ATTACH, ("TPDRQ=0x%lx", (u_long)tq->mem.paddr));
1238
1239 WRITE4(sc, HE_REGO_TPDRQ_H, tq->mem.paddr);
1240 WRITE4(sc, HE_REGO_TPDRQ_T, 0);
1241 WRITE4(sc, HE_REGO_TPDRQ_S, tq->size - 1);
1242}
1243
1244/*
1245 * Function can be called by the infrastructure to start the card.
1246 */
1247static void
1248hatm_init(void *p)
1249{
1250 struct hatm_softc *sc = p;
1251
1252 mtx_lock(&sc->mtx);
1253 hatm_stop(sc);
1254 hatm_initialize(sc);
1255 mtx_unlock(&sc->mtx);
1256}
1257
1258enum {
1259 CTL_ISTATS,
1260};
1261
1262/*
1263 * Sysctl handler
1264 */
1265static int
1266hatm_sysctl(SYSCTL_HANDLER_ARGS)
1267{
1268 struct hatm_softc *sc = arg1;
1269 uint32_t *ret;
1270 int error;
1271 size_t len;
1272
1273 switch (arg2) {
1274
1275 case CTL_ISTATS:
1276 len = sizeof(sc->istats);
1277 break;
1278
1279 default:
1280 panic("bad control code");
1281 }
1282
1283 ret = malloc(len, M_TEMP, M_WAITOK);
1284 mtx_lock(&sc->mtx);
1285
1286 switch (arg2) {
1287
1288 case CTL_ISTATS:
1289 sc->istats.mcc += READ4(sc, HE_REGO_MCC);
1290 sc->istats.oec += READ4(sc, HE_REGO_OEC);
1291 sc->istats.dcc += READ4(sc, HE_REGO_DCC);
1292 sc->istats.cec += READ4(sc, HE_REGO_CEC);
1293 bcopy(&sc->istats, ret, sizeof(sc->istats));
1294 break;
1295 }
1296 mtx_unlock(&sc->mtx);
1297
1298 error = SYSCTL_OUT(req, ret, len);
1299 free(ret, M_TEMP);
1300
1301 return (error);
1302}
1303
1304static int
1305kenv_getuint(struct hatm_softc *sc, const char *var,
1306 u_int *ptr, u_int def, int rw)
1307{
1308 char full[IFNAMSIZ + 3 + 20];
1309 char *val, *end;
1310 u_int u;
1311
1312 *ptr = def;
1313
1314 if (SYSCTL_ADD_UINT(&sc->sysctl_ctx, SYSCTL_CHILDREN(sc->sysctl_tree),
1315 OID_AUTO, var, rw ? CTLFLAG_RW : CTLFLAG_RD, ptr, 0, "") == NULL)
1316 return (ENOMEM);
1317
1318 snprintf(full, sizeof(full), "hw.%s.%s",
1319 device_get_nameunit(sc->dev), var);
1320
1321 if ((val = getenv(full)) == NULL)
1322 return (0);
1323 u = strtoul(val, &end, 0);
1324 if (end == val || *end != '\0') {
1325 freeenv(val);
1326 return (EINVAL);
1327 }
1328 freeenv(val);
1329 if (bootverbose)
1330 if_printf(sc->ifp, "%s=%u\n", full, u);
1331 *ptr = u;
1332 return (0);
1333}
1334
1335/*
1336 * Set configurable parameters. Many of these are configurable via
1337 * kenv.
1338 */
1339static int
1340hatm_configure(struct hatm_softc *sc)
1341{
1342 /* Receive buffer pool 0 small */
1343 kenv_getuint(sc, "rbps0_size", &sc->rbp_s0.size,
1344 HE_CONFIG_RBPS0_SIZE, 0);
1345 kenv_getuint(sc, "rbps0_thresh", &sc->rbp_s0.thresh,
1346 HE_CONFIG_RBPS0_THRESH, 0);
1347 sc->rbp_s0.bsize = MBUF0_SIZE;
1348
1349 /* Receive buffer pool 0 large */
1350 kenv_getuint(sc, "rbpl0_size", &sc->rbp_l0.size,
1351 HE_CONFIG_RBPL0_SIZE, 0);
1352 kenv_getuint(sc, "rbpl0_thresh", &sc->rbp_l0.thresh,
1353 HE_CONFIG_RBPL0_THRESH, 0);
1354 sc->rbp_l0.bsize = MCLBYTES - MBUFL_OFFSET;
1355
1356 /* Receive buffer return queue 0 */
1357 kenv_getuint(sc, "rbrq0_size", &sc->rbrq_0.size,
1358 HE_CONFIG_RBRQ0_SIZE, 0);
1359 kenv_getuint(sc, "rbrq0_thresh", &sc->rbrq_0.thresh,
1360 HE_CONFIG_RBRQ0_THRESH, 0);
1361 kenv_getuint(sc, "rbrq0_tout", &sc->rbrq_0.tout,
1362 HE_CONFIG_RBRQ0_TOUT, 0);
1363 kenv_getuint(sc, "rbrq0_pcnt", &sc->rbrq_0.pcnt,
1364 HE_CONFIG_RBRQ0_PCNT, 0);
1365
1366 /* Receive buffer pool 1 small */
1367 kenv_getuint(sc, "rbps1_size", &sc->rbp_s1.size,
1368 HE_CONFIG_RBPS1_SIZE, 0);
1369 kenv_getuint(sc, "rbps1_thresh", &sc->rbp_s1.thresh,
1370 HE_CONFIG_RBPS1_THRESH, 0);
1371 sc->rbp_s1.bsize = MBUF1_SIZE;
1372
1373 /* Receive buffer return queue 1 */
1374 kenv_getuint(sc, "rbrq1_size", &sc->rbrq_1.size,
1375 HE_CONFIG_RBRQ1_SIZE, 0);
1376 kenv_getuint(sc, "rbrq1_thresh", &sc->rbrq_1.thresh,
1377 HE_CONFIG_RBRQ1_THRESH, 0);
1378 kenv_getuint(sc, "rbrq1_tout", &sc->rbrq_1.tout,
1379 HE_CONFIG_RBRQ1_TOUT, 0);
1380 kenv_getuint(sc, "rbrq1_pcnt", &sc->rbrq_1.pcnt,
1381 HE_CONFIG_RBRQ1_PCNT, 0);
1382
1383 /* Interrupt queue 0 */
1384 kenv_getuint(sc, "irq0_size", &sc->irq_0.size,
1385 HE_CONFIG_IRQ0_SIZE, 0);
1386 kenv_getuint(sc, "irq0_thresh", &sc->irq_0.thresh,
1387 HE_CONFIG_IRQ0_THRESH, 0);
1388 sc->irq_0.line = HE_CONFIG_IRQ0_LINE;
1389
1390 /* Transmit buffer return queue 0 */
1391 kenv_getuint(sc, "tbrq0_size", &sc->tbrq.size,
1392 HE_CONFIG_TBRQ_SIZE, 0);
1393 kenv_getuint(sc, "tbrq0_thresh", &sc->tbrq.thresh,
1394 HE_CONFIG_TBRQ_THRESH, 0);
1395
1396 /* Transmit buffer ready queue */
1397 kenv_getuint(sc, "tpdrq_size", &sc->tpdrq.size,
1398 HE_CONFIG_TPDRQ_SIZE, 0);
1399 /* Max TPDs per VCC */
1400 kenv_getuint(sc, "tpdmax", &sc->max_tpd,
1401 HE_CONFIG_TPD_MAXCC, 0);
1402
1403 /* external mbuf pages */
1404 kenv_getuint(sc, "max_mbuf_pages", &sc->mbuf_max_pages,
1405 HE_CONFIG_MAX_MBUF_PAGES, 0);
1406
1407 /* mpsafe */
1408 kenv_getuint(sc, "mpsafe", &sc->mpsafe, 0, 0);
1409 if (sc->mpsafe != 0)
1410 sc->mpsafe = INTR_MPSAFE;
1411
1412 return (0);
1413}
1414
1415#ifdef HATM_DEBUG
1416
1417/*
1418 * Get TSRs from connection memory
1419 */
1420static int
1421hatm_sysctl_tsr(SYSCTL_HANDLER_ARGS)
1422{
1423 struct hatm_softc *sc = arg1;
1424 int error, i, j;
1425 uint32_t *val;
1426
1427 val = malloc(sizeof(uint32_t) * HE_MAX_VCCS * 15, M_TEMP, M_WAITOK);
1428
1429 mtx_lock(&sc->mtx);
1430 for (i = 0; i < HE_MAX_VCCS; i++)
1431 for (j = 0; j <= 14; j++)
1432 val[15 * i + j] = READ_TSR(sc, i, j);
1433 mtx_unlock(&sc->mtx);
1434
1435 error = SYSCTL_OUT(req, val, sizeof(uint32_t) * HE_MAX_VCCS * 15);
1436 free(val, M_TEMP);
1437 if (error != 0 || req->newptr == NULL)
1438 return (error);
1439
1440 return (EPERM);
1441}
1442
1443/*
1444 * Get TPDs from connection memory
1445 */
1446static int
1447hatm_sysctl_tpd(SYSCTL_HANDLER_ARGS)
1448{
1449 struct hatm_softc *sc = arg1;
1450 int error, i, j;
1451 uint32_t *val;
1452
1453 val = malloc(sizeof(uint32_t) * HE_MAX_VCCS * 16, M_TEMP, M_WAITOK);
1454
1455 mtx_lock(&sc->mtx);
1456 for (i = 0; i < HE_MAX_VCCS; i++)
1457 for (j = 0; j < 16; j++)
1458 val[16 * i + j] = READ_TCM4(sc, 16 * i + j);
1459 mtx_unlock(&sc->mtx);
1460
1461 error = SYSCTL_OUT(req, val, sizeof(uint32_t) * HE_MAX_VCCS * 16);
1462 free(val, M_TEMP);
1463 if (error != 0 || req->newptr == NULL)
1464 return (error);
1465
1466 return (EPERM);
1467}
1468
1469/*
1470 * Get mbox registers
1471 */
1472static int
1473hatm_sysctl_mbox(SYSCTL_HANDLER_ARGS)
1474{
1475 struct hatm_softc *sc = arg1;
1476 int error, i;
1477 uint32_t *val;
1478
1479 val = malloc(sizeof(uint32_t) * HE_REGO_CS_END, M_TEMP, M_WAITOK);
1480
1481 mtx_lock(&sc->mtx);
1482 for (i = 0; i < HE_REGO_CS_END; i++)
1483 val[i] = READ_MBOX4(sc, i);
1484 mtx_unlock(&sc->mtx);
1485
1486 error = SYSCTL_OUT(req, val, sizeof(uint32_t) * HE_REGO_CS_END);
1487 free(val, M_TEMP);
1488 if (error != 0 || req->newptr == NULL)
1489 return (error);
1490
1491 return (EPERM);
1492}
1493
1494/*
1495 * Get connection memory
1496 */
1497static int
1498hatm_sysctl_cm(SYSCTL_HANDLER_ARGS)
1499{
1500 struct hatm_softc *sc = arg1;
1501 int error, i;
1502 uint32_t *val;
1503
1504 val = malloc(sizeof(uint32_t) * (HE_CONFIG_RXMEM + 1), M_TEMP, M_WAITOK);
1505
1506 mtx_lock(&sc->mtx);
1507 val[0] = READ4(sc, HE_REGO_RCMABR_BA);
1508 for (i = 0; i < HE_CONFIG_RXMEM; i++)
1509 val[i + 1] = READ_RCM4(sc, i);
1510 mtx_unlock(&sc->mtx);
1511
1512 error = SYSCTL_OUT(req, val, sizeof(uint32_t) * (HE_CONFIG_RXMEM + 1));
1513 free(val, M_TEMP);
1514 if (error != 0 || req->newptr == NULL)
1515 return (error);
1516
1517 return (EPERM);
1518}
1519
1520/*
1521 * Get local buffer memory
1522 */
1523static int
1524hatm_sysctl_lbmem(SYSCTL_HANDLER_ARGS)
1525{
1526 struct hatm_softc *sc = arg1;
1527 int error, i;
1528 uint32_t *val;
1529 u_int bytes = (1 << 21);
1530
1531 val = malloc(bytes, M_TEMP, M_WAITOK);
1532
1533 mtx_lock(&sc->mtx);
1534 for (i = 0; i < bytes / 4; i++)
1535 val[i] = READ_LB4(sc, i);
1536 mtx_unlock(&sc->mtx);
1537
1538 error = SYSCTL_OUT(req, val, bytes);
1539 free(val, M_TEMP);
1540 if (error != 0 || req->newptr == NULL)
1541 return (error);
1542
1543 return (EPERM);
1544}
1545
1546/*
1547 * Get all card registers
1548 */
1549static int
1550hatm_sysctl_heregs(SYSCTL_HANDLER_ARGS)
1551{
1552 struct hatm_softc *sc = arg1;
1553 int error, i;
1554 uint32_t *val;
1555
1556 val = malloc(HE_REGO_END, M_TEMP, M_WAITOK);
1557
1558 mtx_lock(&sc->mtx);
1559 for (i = 0; i < HE_REGO_END; i += 4)
1560 val[i / 4] = READ4(sc, i);
1561 mtx_unlock(&sc->mtx);
1562
1563 error = SYSCTL_OUT(req, val, HE_REGO_END);
1564 free(val, M_TEMP);
1565 if (error != 0 || req->newptr == NULL)
1566 return (error);
1567
1568 return (EPERM);
1569}
1570#endif
1571
1572/*
1573 * Suni register access
1574 */
1575/*
1576 * read at most n SUNI registers starting at reg into val
1577 */
1578static int
1579hatm_utopia_readregs(struct ifatm *ifatm, u_int reg, uint8_t *val, u_int *n)
1580{
1581 u_int i;
1582 struct hatm_softc *sc = ifatm->ifp->if_softc;
1583
1584 if (reg >= (HE_REGO_SUNI_END - HE_REGO_SUNI) / 4)
1585 return (EINVAL);
1586 if (reg + *n > (HE_REGO_SUNI_END - HE_REGO_SUNI) / 4)
1587 *n = reg - (HE_REGO_SUNI_END - HE_REGO_SUNI) / 4;
1588
1589 mtx_assert(&sc->mtx, MA_OWNED);
1590 for (i = 0; i < *n; i++)
1591 val[i] = READ4(sc, HE_REGO_SUNI + 4 * (reg + i));
1592
1593 return (0);
1594}
1595
1596/*
1597 * change the bits given by mask to them in val in register reg
1598 */
1599static int
1600hatm_utopia_writereg(struct ifatm *ifatm, u_int reg, u_int mask, u_int val)
1601{
1602 uint32_t regval;
1603 struct hatm_softc *sc = ifatm->ifp->if_softc;
1604
1605 if (reg >= (HE_REGO_SUNI_END - HE_REGO_SUNI) / 4)
1606 return (EINVAL);
1607
1608 mtx_assert(&sc->mtx, MA_OWNED);
1609 regval = READ4(sc, HE_REGO_SUNI + 4 * reg);
1610 regval = (regval & ~mask) | (val & mask);
1611 WRITE4(sc, HE_REGO_SUNI + 4 * reg, regval);
1612
1613 return (0);
1614}
1615
1616static struct utopia_methods hatm_utopia_methods = {
1617 hatm_utopia_readregs,
1618 hatm_utopia_writereg,
1619};
1620
1621/*
1622 * Detach - if it is running, stop. Destroy.
1623 */
1624static int
1625hatm_detach(device_t dev)
1626{
1627 struct hatm_softc *sc = device_get_softc(dev);
1628
1629 mtx_lock(&sc->mtx);
1630 hatm_stop(sc);
1631 if (sc->utopia.state & UTP_ST_ATTACHED) {
1632 utopia_stop(&sc->utopia);
1633 utopia_detach(&sc->utopia);
1634 }
1635 mtx_unlock(&sc->mtx);
1636
1637 atm_ifdetach(sc->ifp);
1638
1639 hatm_destroy(sc);
1640
1641 return (0);
1642}
1643
1644/*
1645 * Attach to the device. Assume that no locking is needed here.
1646 * All resource we allocate here are freed by calling hatm_destroy.
1647 */
1648static int
1649hatm_attach(device_t dev)
1650{
1651 struct hatm_softc *sc;
1652 int error;
1653 uint32_t v;
1654 struct ifnet *ifp;
1655
1656 sc = device_get_softc(dev);
1657
1658 ifp = sc->ifp = if_alloc(IFT_ATM);
1659 if (ifp == NULL) {
1660 device_printf(dev, "could not if_alloc()\n");
1661 return (ENOSPC);
1662 }
1663
1664 sc->dev = dev;
1665 IFP2IFATM(sc->ifp)->mib.device = ATM_DEVICE_HE155;
1666 IFP2IFATM(sc->ifp)->mib.serial = 0;
1667 IFP2IFATM(sc->ifp)->mib.hw_version = 0;
1668 IFP2IFATM(sc->ifp)->mib.sw_version = 0;
1669 IFP2IFATM(sc->ifp)->mib.vpi_bits = HE_CONFIG_VPI_BITS;
1670 IFP2IFATM(sc->ifp)->mib.vci_bits = HE_CONFIG_VCI_BITS;
1671 IFP2IFATM(sc->ifp)->mib.max_vpcs = 0;
1672 IFP2IFATM(sc->ifp)->mib.max_vccs = HE_MAX_VCCS;
1673 IFP2IFATM(sc->ifp)->mib.media = IFM_ATM_UNKNOWN;
1674 sc->he622 = 0;
1675 IFP2IFATM(sc->ifp)->phy = &sc->utopia;
1676
1677 SLIST_INIT(&sc->tpd_free);
1678
1679 mtx_init(&sc->mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK, MTX_DEF);
1680 cv_init(&sc->vcc_cv, "HEVCCcv");
1681 cv_init(&sc->cv_rcclose, "RCClose");
1682
1683 sysctl_ctx_init(&sc->sysctl_ctx);
1684
1685 /*
1686 * 4.2 BIOS Configuration
1687 */
1688 v = pci_read_config(dev, PCIR_COMMAND, 2);
1689 v |= PCIM_CMD_MEMEN | PCIM_CMD_BUSMASTEREN | PCIM_CMD_MWRICEN;
1689 v |= PCIM_CMD_BUSMASTEREN | PCIM_CMD_MWRICEN;
1690 pci_write_config(dev, PCIR_COMMAND, v, 2);
1691
1692 /*
1693 * 4.3 PCI Bus Controller-Specific Initialisation
1694 */
1695 v = pci_read_config(dev, HE_PCIR_GEN_CNTL_0, 4);
1696 v |= HE_PCIM_CTL0_MRL | HE_PCIM_CTL0_MRM | HE_PCIM_CTL0_IGNORE_TIMEOUT;
1697#if BYTE_ORDER == BIG_ENDIAN && 0
1698 v |= HE_PCIM_CTL0_BIGENDIAN;
1699#endif
1700 pci_write_config(dev, HE_PCIR_GEN_CNTL_0, v, 4);
1701
1702 /*
1703 * Map memory
1704 */
1690 pci_write_config(dev, PCIR_COMMAND, v, 2);
1691
1692 /*
1693 * 4.3 PCI Bus Controller-Specific Initialisation
1694 */
1695 v = pci_read_config(dev, HE_PCIR_GEN_CNTL_0, 4);
1696 v |= HE_PCIM_CTL0_MRL | HE_PCIM_CTL0_MRM | HE_PCIM_CTL0_IGNORE_TIMEOUT;
1697#if BYTE_ORDER == BIG_ENDIAN && 0
1698 v |= HE_PCIM_CTL0_BIGENDIAN;
1699#endif
1700 pci_write_config(dev, HE_PCIR_GEN_CNTL_0, v, 4);
1701
1702 /*
1703 * Map memory
1704 */
1705 v = pci_read_config(dev, PCIR_COMMAND, 2);
1706 if (!(v & PCIM_CMD_MEMEN)) {
1707 device_printf(dev, "failed to enable memory\n");
1708 error = ENXIO;
1709 goto failed;
1710 }
1711 sc->memid = PCIR_BAR(0);
1712 sc->memres = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &sc->memid,
1713 RF_ACTIVE);
1714 if (sc->memres == NULL) {
1715 device_printf(dev, "could not map memory\n");
1716 error = ENXIO;
1717 goto failed;
1718 }
1719 sc->memh = rman_get_bushandle(sc->memres);
1720 sc->memt = rman_get_bustag(sc->memres);
1721
1722 /*
1723 * ALlocate a DMA tag for subsequent allocations
1724 */
1725 if (bus_dma_tag_create(bus_get_dma_tag(sc->dev), 1, 0,
1726 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR,
1727 NULL, NULL,
1728 BUS_SPACE_MAXSIZE_32BIT, 1,
1729 BUS_SPACE_MAXSIZE_32BIT, 0,
1730 NULL, NULL, &sc->parent_tag)) {
1731 device_printf(dev, "could not allocate DMA tag\n");
1732 error = ENOMEM;
1733 goto failed;
1734 }
1735
1736 if (bus_dma_tag_create(sc->parent_tag, 1, 0,
1737 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR,
1738 NULL, NULL,
1739 MBUF_ALLOC_SIZE, 1,
1740 MBUF_ALLOC_SIZE, 0,
1741 NULL, NULL, &sc->mbuf_tag)) {
1742 device_printf(dev, "could not allocate mbuf DMA tag\n");
1743 error = ENOMEM;
1744 goto failed;
1745 }
1746
1747 /*
1748 * Allocate a DMA tag for packets to send. Here we have a problem with
1749 * the specification of the maximum number of segments. Theoretically
1750 * this would be the size of the transmit ring - 1 multiplied by 3,
1751 * but this would not work. So make the maximum number of TPDs
1752 * occupied by one packet a configuration parameter.
1753 */
1754 if (bus_dma_tag_create(bus_get_dma_tag(sc->dev), 1, 0,
1755 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
1756 HE_MAX_PDU, 3 * HE_CONFIG_MAX_TPD_PER_PACKET, HE_MAX_PDU, 0,
1757 NULL, NULL, &sc->tx_tag)) {
1758 device_printf(dev, "could not allocate TX tag\n");
1759 error = ENOMEM;
1760 goto failed;
1761 }
1762
1763 /*
1764 * Setup the interrupt
1765 */
1766 sc->irqid = 0;
1767 sc->irqres = bus_alloc_resource_any(dev, SYS_RES_IRQ, &sc->irqid,
1768 RF_SHAREABLE | RF_ACTIVE);
1769 if (sc->irqres == 0) {
1770 device_printf(dev, "could not allocate irq\n");
1771 error = ENXIO;
1772 goto failed;
1773 }
1774
1775 ifp->if_softc = sc;
1776 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
1777
1778 /*
1779 * Make the sysctl tree
1780 */
1781 error = ENOMEM;
1782 if ((sc->sysctl_tree = SYSCTL_ADD_NODE(&sc->sysctl_ctx,
1783 SYSCTL_STATIC_CHILDREN(_hw_atm), OID_AUTO,
1784 device_get_nameunit(dev), CTLFLAG_RD, 0, "")) == NULL)
1785 goto failed;
1786
1787 if (SYSCTL_ADD_PROC(&sc->sysctl_ctx, SYSCTL_CHILDREN(sc->sysctl_tree),
1788 OID_AUTO, "istats", CTLFLAG_RD | CTLTYPE_OPAQUE, sc, CTL_ISTATS,
1789 hatm_sysctl, "LU", "internal statistics") == NULL)
1790 goto failed;
1791
1792#ifdef HATM_DEBUG
1793 if (SYSCTL_ADD_PROC(&sc->sysctl_ctx, SYSCTL_CHILDREN(sc->sysctl_tree),
1794 OID_AUTO, "tsr", CTLFLAG_RD | CTLTYPE_OPAQUE, sc, 0,
1795 hatm_sysctl_tsr, "S", "transmission status registers") == NULL)
1796 goto failed;
1797
1798 if (SYSCTL_ADD_PROC(&sc->sysctl_ctx, SYSCTL_CHILDREN(sc->sysctl_tree),
1799 OID_AUTO, "tpd", CTLFLAG_RD | CTLTYPE_OPAQUE, sc, 0,
1800 hatm_sysctl_tpd, "S", "transmission packet descriptors") == NULL)
1801 goto failed;
1802
1803 if (SYSCTL_ADD_PROC(&sc->sysctl_ctx, SYSCTL_CHILDREN(sc->sysctl_tree),
1804 OID_AUTO, "mbox", CTLFLAG_RD | CTLTYPE_OPAQUE, sc, 0,
1805 hatm_sysctl_mbox, "S", "mbox registers") == NULL)
1806 goto failed;
1807
1808 if (SYSCTL_ADD_PROC(&sc->sysctl_ctx, SYSCTL_CHILDREN(sc->sysctl_tree),
1809 OID_AUTO, "cm", CTLFLAG_RD | CTLTYPE_OPAQUE, sc, 0,
1810 hatm_sysctl_cm, "S", "connection memory") == NULL)
1811 goto failed;
1812
1813 if (SYSCTL_ADD_PROC(&sc->sysctl_ctx, SYSCTL_CHILDREN(sc->sysctl_tree),
1814 OID_AUTO, "heregs", CTLFLAG_RD | CTLTYPE_OPAQUE, sc, 0,
1815 hatm_sysctl_heregs, "S", "card registers") == NULL)
1816 goto failed;
1817
1818 if (SYSCTL_ADD_PROC(&sc->sysctl_ctx, SYSCTL_CHILDREN(sc->sysctl_tree),
1819 OID_AUTO, "lbmem", CTLFLAG_RD | CTLTYPE_OPAQUE, sc, 0,
1820 hatm_sysctl_lbmem, "S", "local memory") == NULL)
1821 goto failed;
1822
1823 kenv_getuint(sc, "debug", &sc->debug, HATM_DEBUG, 1);
1824#endif
1825
1826 /*
1827 * Configure
1828 */
1829 if ((error = hatm_configure(sc)) != 0)
1830 goto failed;
1831
1832 /*
1833 * Compute memory parameters
1834 */
1835 if (sc->rbp_s0.size != 0) {
1836 sc->rbp_s0.mask = (sc->rbp_s0.size - 1) << 3;
1837 sc->rbp_s0.mem.size = sc->rbp_s0.size * 8;
1838 sc->rbp_s0.mem.align = sc->rbp_s0.mem.size;
1839 }
1840 if (sc->rbp_l0.size != 0) {
1841 sc->rbp_l0.mask = (sc->rbp_l0.size - 1) << 3;
1842 sc->rbp_l0.mem.size = sc->rbp_l0.size * 8;
1843 sc->rbp_l0.mem.align = sc->rbp_l0.mem.size;
1844 }
1845 if (sc->rbp_s1.size != 0) {
1846 sc->rbp_s1.mask = (sc->rbp_s1.size - 1) << 3;
1847 sc->rbp_s1.mem.size = sc->rbp_s1.size * 8;
1848 sc->rbp_s1.mem.align = sc->rbp_s1.mem.size;
1849 }
1850 if (sc->rbrq_0.size != 0) {
1851 sc->rbrq_0.mem.size = sc->rbrq_0.size * 8;
1852 sc->rbrq_0.mem.align = sc->rbrq_0.mem.size;
1853 }
1854 if (sc->rbrq_1.size != 0) {
1855 sc->rbrq_1.mem.size = sc->rbrq_1.size * 8;
1856 sc->rbrq_1.mem.align = sc->rbrq_1.mem.size;
1857 }
1858
1859 sc->irq_0.mem.size = sc->irq_0.size * sizeof(uint32_t);
1860 sc->irq_0.mem.align = 4 * 1024;
1861
1862 sc->tbrq.mem.size = sc->tbrq.size * 4;
1863 sc->tbrq.mem.align = 2 * sc->tbrq.mem.size; /* ZZZ */
1864
1865 sc->tpdrq.mem.size = sc->tpdrq.size * 8;
1866 sc->tpdrq.mem.align = sc->tpdrq.mem.size;
1867
1868 sc->hsp_mem.size = sizeof(struct he_hsp);
1869 sc->hsp_mem.align = 1024;
1870
1871 sc->lbufs_size = sc->rbp_l0.size + sc->rbrq_0.size;
1872 sc->tpd_total = sc->tbrq.size + sc->tpdrq.size;
1873 sc->tpds.align = 64;
1874 sc->tpds.size = sc->tpd_total * HE_TPD_SIZE;
1875
1876 hatm_init_rmaps(sc);
1877 hatm_init_smbufs(sc);
1878 if ((error = hatm_init_tpds(sc)) != 0)
1879 goto failed;
1880
1881 /*
1882 * Allocate memory
1883 */
1884 if ((error = hatm_alloc_dmamem(sc, "IRQ", &sc->irq_0.mem)) != 0 ||
1885 (error = hatm_alloc_dmamem(sc, "TBRQ0", &sc->tbrq.mem)) != 0 ||
1886 (error = hatm_alloc_dmamem(sc, "TPDRQ", &sc->tpdrq.mem)) != 0 ||
1887 (error = hatm_alloc_dmamem(sc, "HSP", &sc->hsp_mem)) != 0)
1888 goto failed;
1889
1890 if (sc->rbp_s0.mem.size != 0 &&
1891 (error = hatm_alloc_dmamem(sc, "RBPS0", &sc->rbp_s0.mem)))
1892 goto failed;
1893 if (sc->rbp_l0.mem.size != 0 &&
1894 (error = hatm_alloc_dmamem(sc, "RBPL0", &sc->rbp_l0.mem)))
1895 goto failed;
1896 if (sc->rbp_s1.mem.size != 0 &&
1897 (error = hatm_alloc_dmamem(sc, "RBPS1", &sc->rbp_s1.mem)))
1898 goto failed;
1899
1900 if (sc->rbrq_0.mem.size != 0 &&
1901 (error = hatm_alloc_dmamem(sc, "RBRQ0", &sc->rbrq_0.mem)))
1902 goto failed;
1903 if (sc->rbrq_1.mem.size != 0 &&
1904 (error = hatm_alloc_dmamem(sc, "RBRQ1", &sc->rbrq_1.mem)))
1905 goto failed;
1906
1907 if ((sc->vcc_zone = uma_zcreate("HE vccs", sizeof(struct hevcc),
1908 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0)) == NULL) {
1909 device_printf(dev, "cannot allocate zone for vccs\n");
1910 goto failed;
1911 }
1912
1913 /*
1914 * 4.4 Reset the card.
1915 */
1916 if ((error = hatm_reset(sc)) != 0)
1917 goto failed;
1918
1919 /*
1920 * Read the prom.
1921 */
1922 hatm_init_bus_width(sc);
1923 hatm_init_read_eeprom(sc);
1924 hatm_init_endianess(sc);
1925
1926 /*
1927 * Initialize interface
1928 */
1929 ifp->if_flags = IFF_SIMPLEX;
1930 ifp->if_ioctl = hatm_ioctl;
1931 ifp->if_start = hatm_start;
1932 ifp->if_init = hatm_init;
1933
1934 utopia_attach(&sc->utopia, IFP2IFATM(sc->ifp), &sc->media, &sc->mtx,
1935 &sc->sysctl_ctx, SYSCTL_CHILDREN(sc->sysctl_tree),
1936 &hatm_utopia_methods);
1937 utopia_init_media(&sc->utopia);
1938
1939 /* these two SUNI routines need the lock */
1940 mtx_lock(&sc->mtx);
1941 /* poll while we are not running */
1942 sc->utopia.flags |= UTP_FL_POLL_CARRIER;
1943 utopia_start(&sc->utopia);
1944 utopia_reset(&sc->utopia);
1945 mtx_unlock(&sc->mtx);
1946
1947 atm_ifattach(ifp);
1948
1949#ifdef ENABLE_BPF
1950 bpfattach(ifp, DLT_ATM_RFC1483, sizeof(struct atmllc));
1951#endif
1952
1953 error = bus_setup_intr(dev, sc->irqres, sc->mpsafe | INTR_TYPE_NET,
1954 NULL, hatm_intr, &sc->irq_0, &sc->ih);
1955 if (error != 0) {
1956 device_printf(dev, "could not setup interrupt\n");
1957 hatm_detach(dev);
1958 return (error);
1959 }
1960
1961 return (0);
1962
1963 failed:
1964 hatm_destroy(sc);
1965 return (error);
1966}
1967
1968/*
1969 * Start the interface. Assume a state as from attach().
1970 */
1971void
1972hatm_initialize(struct hatm_softc *sc)
1973{
1974 uint32_t v;
1975 u_int cid;
1976 static const u_int layout[2][7] = HE_CONFIG_MEM_LAYOUT;
1977
1978 if (sc->ifp->if_drv_flags & IFF_DRV_RUNNING)
1979 return;
1980
1981 hatm_init_bus_width(sc);
1982 hatm_init_endianess(sc);
1983
1984 if_printf(sc->ifp, "%s, Rev. %s, S/N %u, "
1985 "MAC=%02x:%02x:%02x:%02x:%02x:%02x (%ubit PCI)\n",
1986 sc->prod_id, sc->rev, IFP2IFATM(sc->ifp)->mib.serial,
1987 IFP2IFATM(sc->ifp)->mib.esi[0], IFP2IFATM(sc->ifp)->mib.esi[1], IFP2IFATM(sc->ifp)->mib.esi[2],
1988 IFP2IFATM(sc->ifp)->mib.esi[3], IFP2IFATM(sc->ifp)->mib.esi[4], IFP2IFATM(sc->ifp)->mib.esi[5],
1989 sc->pci64 ? 64 : 32);
1990
1991 /*
1992 * 4.8 SDRAM Controller Initialisation
1993 * 4.9 Initialize RNUM value
1994 */
1995 if (sc->he622)
1996 WRITE4(sc, HE_REGO_SDRAM_CNTL, HE_REGM_SDRAM_64BIT);
1997 else
1998 WRITE4(sc, HE_REGO_SDRAM_CNTL, 0);
1999 BARRIER_W(sc);
2000
2001 v = READ4(sc, HE_REGO_LB_SWAP);
2002 BARRIER_R(sc);
2003 v |= 0xf << HE_REGS_LBSWAP_RNUM;
2004 WRITE4(sc, HE_REGO_LB_SWAP, v);
2005 BARRIER_W(sc);
2006
2007 hatm_init_irq(sc, &sc->irq_0, 0);
2008 hatm_clear_irq(sc, 1);
2009 hatm_clear_irq(sc, 2);
2010 hatm_clear_irq(sc, 3);
2011
2012 WRITE4(sc, HE_REGO_GRP_1_0_MAP, 0);
2013 WRITE4(sc, HE_REGO_GRP_3_2_MAP, 0);
2014 WRITE4(sc, HE_REGO_GRP_5_4_MAP, 0);
2015 WRITE4(sc, HE_REGO_GRP_7_6_MAP, 0);
2016 BARRIER_W(sc);
2017
2018 /*
2019 * 4.11 Enable PCI Bus Controller State Machine
2020 */
2021 v = READ4(sc, HE_REGO_HOST_CNTL);
2022 BARRIER_R(sc);
2023 v |= HE_REGM_HOST_OUTFF_ENB | HE_REGM_HOST_CMDFF_ENB |
2024 HE_REGM_HOST_QUICK_RD | HE_REGM_HOST_QUICK_WR;
2025 WRITE4(sc, HE_REGO_HOST_CNTL, v);
2026 BARRIER_W(sc);
2027
2028 /*
2029 * 5.1.1 Generic configuration state
2030 */
2031 sc->cells_per_row = layout[sc->he622][0];
2032 sc->bytes_per_row = layout[sc->he622][1];
2033 sc->r0_numrows = layout[sc->he622][2];
2034 sc->tx_numrows = layout[sc->he622][3];
2035 sc->r1_numrows = layout[sc->he622][4];
2036 sc->r0_startrow = layout[sc->he622][5];
2037 sc->tx_startrow = sc->r0_startrow + sc->r0_numrows;
2038 sc->r1_startrow = sc->tx_startrow + sc->tx_numrows;
2039 sc->cells_per_lbuf = layout[sc->he622][6];
2040
2041 sc->r0_numbuffs = sc->r0_numrows * (sc->cells_per_row /
2042 sc->cells_per_lbuf);
2043 sc->r1_numbuffs = sc->r1_numrows * (sc->cells_per_row /
2044 sc->cells_per_lbuf);
2045 sc->tx_numbuffs = sc->tx_numrows * (sc->cells_per_row /
2046 sc->cells_per_lbuf);
2047
2048 if (sc->r0_numbuffs > 2560)
2049 sc->r0_numbuffs = 2560;
2050 if (sc->r1_numbuffs > 2560)
2051 sc->r1_numbuffs = 2560;
2052 if (sc->tx_numbuffs > 5120)
2053 sc->tx_numbuffs = 5120;
2054
2055 DBG(sc, ATTACH, ("cells_per_row=%u bytes_per_row=%u r0_numrows=%u "
2056 "tx_numrows=%u r1_numrows=%u r0_startrow=%u tx_startrow=%u "
2057 "r1_startrow=%u cells_per_lbuf=%u\nr0_numbuffs=%u r1_numbuffs=%u "
2058 "tx_numbuffs=%u\n", sc->cells_per_row, sc->bytes_per_row,
2059 sc->r0_numrows, sc->tx_numrows, sc->r1_numrows, sc->r0_startrow,
2060 sc->tx_startrow, sc->r1_startrow, sc->cells_per_lbuf,
2061 sc->r0_numbuffs, sc->r1_numbuffs, sc->tx_numbuffs));
2062
2063 /*
2064 * 5.1.2 Configure Hardware dependend registers
2065 */
2066 if (sc->he622) {
2067 WRITE4(sc, HE_REGO_LBARB,
2068 (0x2 << HE_REGS_LBARB_SLICE) |
2069 (0xf << HE_REGS_LBARB_RNUM) |
2070 (0x3 << HE_REGS_LBARB_THPRI) |
2071 (0x3 << HE_REGS_LBARB_RHPRI) |
2072 (0x2 << HE_REGS_LBARB_TLPRI) |
2073 (0x1 << HE_REGS_LBARB_RLPRI) |
2074 (0x28 << HE_REGS_LBARB_BUS_MULT) |
2075 (0x50 << HE_REGS_LBARB_NET_PREF));
2076 BARRIER_W(sc);
2077 WRITE4(sc, HE_REGO_SDRAMCON,
2078 /* HW bug: don't use banking */
2079 /* HE_REGM_SDRAMCON_BANK | */
2080 HE_REGM_SDRAMCON_WIDE |
2081 (0x384 << HE_REGS_SDRAMCON_REF));
2082 BARRIER_W(sc);
2083 WRITE4(sc, HE_REGO_RCMCONFIG,
2084 (0x1 << HE_REGS_RCMCONFIG_BANK_WAIT) |
2085 (0x1 << HE_REGS_RCMCONFIG_RW_WAIT) |
2086 (0x0 << HE_REGS_RCMCONFIG_TYPE));
2087 WRITE4(sc, HE_REGO_TCMCONFIG,
2088 (0x2 << HE_REGS_TCMCONFIG_BANK_WAIT) |
2089 (0x1 << HE_REGS_TCMCONFIG_RW_WAIT) |
2090 (0x0 << HE_REGS_TCMCONFIG_TYPE));
2091 } else {
2092 WRITE4(sc, HE_REGO_LBARB,
2093 (0x2 << HE_REGS_LBARB_SLICE) |
2094 (0xf << HE_REGS_LBARB_RNUM) |
2095 (0x3 << HE_REGS_LBARB_THPRI) |
2096 (0x3 << HE_REGS_LBARB_RHPRI) |
2097 (0x2 << HE_REGS_LBARB_TLPRI) |
2098 (0x1 << HE_REGS_LBARB_RLPRI) |
2099 (0x46 << HE_REGS_LBARB_BUS_MULT) |
2100 (0x8C << HE_REGS_LBARB_NET_PREF));
2101 BARRIER_W(sc);
2102 WRITE4(sc, HE_REGO_SDRAMCON,
2103 /* HW bug: don't use banking */
2104 /* HE_REGM_SDRAMCON_BANK | */
2105 (0x150 << HE_REGS_SDRAMCON_REF));
2106 BARRIER_W(sc);
2107 WRITE4(sc, HE_REGO_RCMCONFIG,
2108 (0x0 << HE_REGS_RCMCONFIG_BANK_WAIT) |
2109 (0x1 << HE_REGS_RCMCONFIG_RW_WAIT) |
2110 (0x0 << HE_REGS_RCMCONFIG_TYPE));
2111 WRITE4(sc, HE_REGO_TCMCONFIG,
2112 (0x1 << HE_REGS_TCMCONFIG_BANK_WAIT) |
2113 (0x1 << HE_REGS_TCMCONFIG_RW_WAIT) |
2114 (0x0 << HE_REGS_TCMCONFIG_TYPE));
2115 }
2116 WRITE4(sc, HE_REGO_LBCONFIG, (sc->cells_per_lbuf * 48));
2117
2118 WRITE4(sc, HE_REGO_RLBC_H, 0);
2119 WRITE4(sc, HE_REGO_RLBC_T, 0);
2120 WRITE4(sc, HE_REGO_RLBC_H2, 0);
2121
2122 WRITE4(sc, HE_REGO_RXTHRSH, 512);
2123 WRITE4(sc, HE_REGO_LITHRSH, 256);
2124
2125 WRITE4(sc, HE_REGO_RLBF0_C, sc->r0_numbuffs);
2126 WRITE4(sc, HE_REGO_RLBF1_C, sc->r1_numbuffs);
2127
2128 if (sc->he622) {
2129 WRITE4(sc, HE_REGO_RCCONFIG,
2130 (8 << HE_REGS_RCCONFIG_UTDELAY) |
2131 (IFP2IFATM(sc->ifp)->mib.vpi_bits << HE_REGS_RCCONFIG_VP) |
2132 (IFP2IFATM(sc->ifp)->mib.vci_bits << HE_REGS_RCCONFIG_VC));
2133 WRITE4(sc, HE_REGO_TXCONFIG,
2134 (32 << HE_REGS_TXCONFIG_THRESH) |
2135 (IFP2IFATM(sc->ifp)->mib.vci_bits << HE_REGS_TXCONFIG_VCI_MASK) |
2136 (sc->tx_numbuffs << HE_REGS_TXCONFIG_LBFREE));
2137 } else {
2138 WRITE4(sc, HE_REGO_RCCONFIG,
2139 (0 << HE_REGS_RCCONFIG_UTDELAY) |
2140 HE_REGM_RCCONFIG_UT_MODE |
2141 (IFP2IFATM(sc->ifp)->mib.vpi_bits << HE_REGS_RCCONFIG_VP) |
2142 (IFP2IFATM(sc->ifp)->mib.vci_bits << HE_REGS_RCCONFIG_VC));
2143 WRITE4(sc, HE_REGO_TXCONFIG,
2144 (32 << HE_REGS_TXCONFIG_THRESH) |
2145 HE_REGM_TXCONFIG_UTMODE |
2146 (IFP2IFATM(sc->ifp)->mib.vci_bits << HE_REGS_TXCONFIG_VCI_MASK) |
2147 (sc->tx_numbuffs << HE_REGS_TXCONFIG_LBFREE));
2148 }
2149
2150 WRITE4(sc, HE_REGO_TXAAL5_PROTO, 0);
2151
2152 if (sc->rbp_s1.size != 0) {
2153 WRITE4(sc, HE_REGO_RHCONFIG,
2154 HE_REGM_RHCONFIG_PHYENB |
2155 ((sc->he622 ? 0x41 : 0x31) << HE_REGS_RHCONFIG_PTMR_PRE) |
2156 (1 << HE_REGS_RHCONFIG_OAM_GID));
2157 } else {
2158 WRITE4(sc, HE_REGO_RHCONFIG,
2159 HE_REGM_RHCONFIG_PHYENB |
2160 ((sc->he622 ? 0x41 : 0x31) << HE_REGS_RHCONFIG_PTMR_PRE) |
2161 (0 << HE_REGS_RHCONFIG_OAM_GID));
2162 }
2163 BARRIER_W(sc);
2164
2165 hatm_init_cm(sc);
2166
2167 hatm_init_rx_buffer_pool(sc, 0, sc->r0_startrow, sc->r0_numbuffs);
2168 hatm_init_rx_buffer_pool(sc, 1, sc->r1_startrow, sc->r1_numbuffs);
2169 hatm_init_tx_buffer_pool(sc, sc->tx_startrow, sc->tx_numbuffs);
2170
2171 hatm_init_imed_queues(sc);
2172
2173 /*
2174 * 5.1.6 Application tunable Parameters
2175 */
2176 WRITE4(sc, HE_REGO_MCC, 0);
2177 WRITE4(sc, HE_REGO_OEC, 0);
2178 WRITE4(sc, HE_REGO_DCC, 0);
2179 WRITE4(sc, HE_REGO_CEC, 0);
2180
2181 hatm_init_cs_block(sc);
2182 hatm_init_cs_block_cm(sc);
2183
2184 hatm_init_rpool(sc, &sc->rbp_s0, 0, 0);
2185 hatm_init_rpool(sc, &sc->rbp_l0, 0, 1);
2186 hatm_init_rpool(sc, &sc->rbp_s1, 1, 0);
2187 hatm_clear_rpool(sc, 1, 1);
2188 hatm_clear_rpool(sc, 2, 0);
2189 hatm_clear_rpool(sc, 2, 1);
2190 hatm_clear_rpool(sc, 3, 0);
2191 hatm_clear_rpool(sc, 3, 1);
2192 hatm_clear_rpool(sc, 4, 0);
2193 hatm_clear_rpool(sc, 4, 1);
2194 hatm_clear_rpool(sc, 5, 0);
2195 hatm_clear_rpool(sc, 5, 1);
2196 hatm_clear_rpool(sc, 6, 0);
2197 hatm_clear_rpool(sc, 6, 1);
2198 hatm_clear_rpool(sc, 7, 0);
2199 hatm_clear_rpool(sc, 7, 1);
2200 hatm_init_rbrq(sc, &sc->rbrq_0, 0);
2201 hatm_init_rbrq(sc, &sc->rbrq_1, 1);
2202 hatm_clear_rbrq(sc, 2);
2203 hatm_clear_rbrq(sc, 3);
2204 hatm_clear_rbrq(sc, 4);
2205 hatm_clear_rbrq(sc, 5);
2206 hatm_clear_rbrq(sc, 6);
2207 hatm_clear_rbrq(sc, 7);
2208
2209 sc->lbufs_next = 0;
2210 bzero(sc->lbufs, sizeof(sc->lbufs[0]) * sc->lbufs_size);
2211
2212 hatm_init_tbrq(sc, &sc->tbrq, 0);
2213 hatm_clear_tbrq(sc, 1);
2214 hatm_clear_tbrq(sc, 2);
2215 hatm_clear_tbrq(sc, 3);
2216 hatm_clear_tbrq(sc, 4);
2217 hatm_clear_tbrq(sc, 5);
2218 hatm_clear_tbrq(sc, 6);
2219 hatm_clear_tbrq(sc, 7);
2220
2221 hatm_init_tpdrq(sc);
2222
2223 WRITE4(sc, HE_REGO_UBUFF_BA, (sc->he622 ? 0x104780 : 0x800));
2224
2225 /*
2226 * Initialize HSP
2227 */
2228 bzero(sc->hsp_mem.base, sc->hsp_mem.size);
2229 sc->hsp = sc->hsp_mem.base;
2230 WRITE4(sc, HE_REGO_HSP_BA, sc->hsp_mem.paddr);
2231
2232 /*
2233 * 5.1.12 Enable transmit and receive
2234 * Enable bus master and interrupts
2235 */
2236 v = READ_MBOX4(sc, HE_REGO_CS_ERCTL0);
2237 v |= 0x18000000;
2238 WRITE_MBOX4(sc, HE_REGO_CS_ERCTL0, v);
2239
2240 v = READ4(sc, HE_REGO_RCCONFIG);
2241 v |= HE_REGM_RCCONFIG_RXENB;
2242 WRITE4(sc, HE_REGO_RCCONFIG, v);
2243
2244 v = pci_read_config(sc->dev, HE_PCIR_GEN_CNTL_0, 4);
2245 v |= HE_PCIM_CTL0_INIT_ENB | HE_PCIM_CTL0_INT_PROC_ENB;
2246 pci_write_config(sc->dev, HE_PCIR_GEN_CNTL_0, v, 4);
2247
2248 sc->ifp->if_drv_flags |= IFF_DRV_RUNNING;
2249 sc->ifp->if_baudrate = 53 * 8 * IFP2IFATM(sc->ifp)->mib.pcr;
2250
2251 sc->utopia.flags &= ~UTP_FL_POLL_CARRIER;
2252
2253 /* reopen vccs */
2254 for (cid = 0; cid < HE_MAX_VCCS; cid++)
2255 if (sc->vccs[cid] != NULL)
2256 hatm_load_vc(sc, cid, 1);
2257
2258 ATMEV_SEND_IFSTATE_CHANGED(IFP2IFATM(sc->ifp),
2259 sc->utopia.carrier == UTP_CARR_OK);
2260}
2261
2262/*
2263 * This functions stops the card and frees all resources allocated after
2264 * the attach. Must have the global lock.
2265 */
2266void
2267hatm_stop(struct hatm_softc *sc)
2268{
2269 uint32_t v;
2270 u_int i, p, cid;
2271 struct mbuf_chunk_hdr *ch;
2272 struct mbuf_page *pg;
2273
2274 mtx_assert(&sc->mtx, MA_OWNED);
2275
2276 if (!(sc->ifp->if_drv_flags & IFF_DRV_RUNNING))
2277 return;
2278 sc->ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
2279
2280 ATMEV_SEND_IFSTATE_CHANGED(IFP2IFATM(sc->ifp),
2281 sc->utopia.carrier == UTP_CARR_OK);
2282
2283 sc->utopia.flags |= UTP_FL_POLL_CARRIER;
2284
2285 /*
2286 * Stop and reset the hardware so that everything remains
2287 * stable.
2288 */
2289 v = READ_MBOX4(sc, HE_REGO_CS_ERCTL0);
2290 v &= ~0x18000000;
2291 WRITE_MBOX4(sc, HE_REGO_CS_ERCTL0, v);
2292
2293 v = READ4(sc, HE_REGO_RCCONFIG);
2294 v &= ~HE_REGM_RCCONFIG_RXENB;
2295 WRITE4(sc, HE_REGO_RCCONFIG, v);
2296
2297 WRITE4(sc, HE_REGO_RHCONFIG, (0x2 << HE_REGS_RHCONFIG_PTMR_PRE));
2298 BARRIER_W(sc);
2299
2300 v = READ4(sc, HE_REGO_HOST_CNTL);
2301 BARRIER_R(sc);
2302 v &= ~(HE_REGM_HOST_OUTFF_ENB | HE_REGM_HOST_CMDFF_ENB);
2303 WRITE4(sc, HE_REGO_HOST_CNTL, v);
2304 BARRIER_W(sc);
2305
2306 /*
2307 * Disable bust master and interrupts
2308 */
2309 v = pci_read_config(sc->dev, HE_PCIR_GEN_CNTL_0, 4);
2310 v &= ~(HE_PCIM_CTL0_INIT_ENB | HE_PCIM_CTL0_INT_PROC_ENB);
2311 pci_write_config(sc->dev, HE_PCIR_GEN_CNTL_0, v, 4);
2312
2313 (void)hatm_reset(sc);
2314
2315 /*
2316 * Card resets the SUNI when resetted, so re-initialize it
2317 */
2318 utopia_reset(&sc->utopia);
2319
2320 /*
2321 * Give any waiters on closing a VCC a chance. They will stop
2322 * to wait if they see that IFF_DRV_RUNNING disappeared.
2323 */
2324 cv_broadcast(&sc->vcc_cv);
2325 cv_broadcast(&sc->cv_rcclose);
2326
2327 /*
2328 * Now free all resources.
2329 */
2330
2331 /*
2332 * Free the large mbufs that are given to the card.
2333 */
2334 for (i = 0 ; i < sc->lbufs_size; i++) {
2335 if (sc->lbufs[i] != NULL) {
2336 bus_dmamap_unload(sc->mbuf_tag, sc->rmaps[i]);
2337 m_freem(sc->lbufs[i]);
2338 sc->lbufs[i] = NULL;
2339 }
2340 }
2341
2342 /*
2343 * Free small buffers
2344 */
2345 for (p = 0; p < sc->mbuf_npages; p++) {
2346 pg = sc->mbuf_pages[p];
2347 for (i = 0; i < pg->hdr.nchunks; i++) {
2348 ch = (struct mbuf_chunk_hdr *) ((char *)pg +
2349 i * pg->hdr.chunksize + pg->hdr.hdroff);
2350 if (ch->flags & MBUF_CARD) {
2351 ch->flags &= ~MBUF_CARD;
2352 ch->flags |= MBUF_USED;
2353 hatm_ext_free(&sc->mbuf_list[pg->hdr.pool],
2354 (struct mbufx_free *)((u_char *)ch -
2355 pg->hdr.hdroff));
2356 }
2357 }
2358 }
2359
2360 hatm_stop_tpds(sc);
2361
2362 /*
2363 * Free all partial reassembled PDUs on any VCC.
2364 */
2365 for (cid = 0; cid < HE_MAX_VCCS; cid++) {
2366 if (sc->vccs[cid] != NULL) {
2367 if (sc->vccs[cid]->chain != NULL) {
2368 m_freem(sc->vccs[cid]->chain);
2369 sc->vccs[cid]->chain = NULL;
2370 sc->vccs[cid]->last = NULL;
2371 }
2372 if (!(sc->vccs[cid]->vflags & (HE_VCC_RX_OPEN |
2373 HE_VCC_TX_OPEN))) {
2374 hatm_tx_vcc_closed(sc, cid);
2375 uma_zfree(sc->vcc_zone, sc->vccs[cid]);
2376 sc->vccs[cid] = NULL;
2377 sc->open_vccs--;
2378 } else {
2379 sc->vccs[cid]->vflags = 0;
2380 sc->vccs[cid]->ntpds = 0;
2381 }
2382 }
2383 }
2384
2385 if (sc->rbp_s0.size != 0)
2386 bzero(sc->rbp_s0.mem.base, sc->rbp_s0.mem.size);
2387 if (sc->rbp_l0.size != 0)
2388 bzero(sc->rbp_l0.mem.base, sc->rbp_l0.mem.size);
2389 if (sc->rbp_s1.size != 0)
2390 bzero(sc->rbp_s1.mem.base, sc->rbp_s1.mem.size);
2391 if (sc->rbrq_0.size != 0)
2392 bzero(sc->rbrq_0.mem.base, sc->rbrq_0.mem.size);
2393 if (sc->rbrq_1.size != 0)
2394 bzero(sc->rbrq_1.mem.base, sc->rbrq_1.mem.size);
2395
2396 bzero(sc->tbrq.mem.base, sc->tbrq.mem.size);
2397 bzero(sc->tpdrq.mem.base, sc->tpdrq.mem.size);
2398 bzero(sc->hsp_mem.base, sc->hsp_mem.size);
2399}
2400
2401/************************************************************
2402 *
2403 * Driver infrastructure
2404 */
2405devclass_t hatm_devclass;
2406
2407static device_method_t hatm_methods[] = {
2408 DEVMETHOD(device_probe, hatm_probe),
2409 DEVMETHOD(device_attach, hatm_attach),
2410 DEVMETHOD(device_detach, hatm_detach),
2411 {0,0}
2412};
2413static driver_t hatm_driver = {
2414 "hatm",
2415 hatm_methods,
2416 sizeof(struct hatm_softc),
2417};
2418DRIVER_MODULE(hatm, pci, hatm_driver, hatm_devclass, NULL, 0);
1705 sc->memid = PCIR_BAR(0);
1706 sc->memres = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &sc->memid,
1707 RF_ACTIVE);
1708 if (sc->memres == NULL) {
1709 device_printf(dev, "could not map memory\n");
1710 error = ENXIO;
1711 goto failed;
1712 }
1713 sc->memh = rman_get_bushandle(sc->memres);
1714 sc->memt = rman_get_bustag(sc->memres);
1715
1716 /*
1717 * ALlocate a DMA tag for subsequent allocations
1718 */
1719 if (bus_dma_tag_create(bus_get_dma_tag(sc->dev), 1, 0,
1720 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR,
1721 NULL, NULL,
1722 BUS_SPACE_MAXSIZE_32BIT, 1,
1723 BUS_SPACE_MAXSIZE_32BIT, 0,
1724 NULL, NULL, &sc->parent_tag)) {
1725 device_printf(dev, "could not allocate DMA tag\n");
1726 error = ENOMEM;
1727 goto failed;
1728 }
1729
1730 if (bus_dma_tag_create(sc->parent_tag, 1, 0,
1731 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR,
1732 NULL, NULL,
1733 MBUF_ALLOC_SIZE, 1,
1734 MBUF_ALLOC_SIZE, 0,
1735 NULL, NULL, &sc->mbuf_tag)) {
1736 device_printf(dev, "could not allocate mbuf DMA tag\n");
1737 error = ENOMEM;
1738 goto failed;
1739 }
1740
1741 /*
1742 * Allocate a DMA tag for packets to send. Here we have a problem with
1743 * the specification of the maximum number of segments. Theoretically
1744 * this would be the size of the transmit ring - 1 multiplied by 3,
1745 * but this would not work. So make the maximum number of TPDs
1746 * occupied by one packet a configuration parameter.
1747 */
1748 if (bus_dma_tag_create(bus_get_dma_tag(sc->dev), 1, 0,
1749 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
1750 HE_MAX_PDU, 3 * HE_CONFIG_MAX_TPD_PER_PACKET, HE_MAX_PDU, 0,
1751 NULL, NULL, &sc->tx_tag)) {
1752 device_printf(dev, "could not allocate TX tag\n");
1753 error = ENOMEM;
1754 goto failed;
1755 }
1756
1757 /*
1758 * Setup the interrupt
1759 */
1760 sc->irqid = 0;
1761 sc->irqres = bus_alloc_resource_any(dev, SYS_RES_IRQ, &sc->irqid,
1762 RF_SHAREABLE | RF_ACTIVE);
1763 if (sc->irqres == 0) {
1764 device_printf(dev, "could not allocate irq\n");
1765 error = ENXIO;
1766 goto failed;
1767 }
1768
1769 ifp->if_softc = sc;
1770 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
1771
1772 /*
1773 * Make the sysctl tree
1774 */
1775 error = ENOMEM;
1776 if ((sc->sysctl_tree = SYSCTL_ADD_NODE(&sc->sysctl_ctx,
1777 SYSCTL_STATIC_CHILDREN(_hw_atm), OID_AUTO,
1778 device_get_nameunit(dev), CTLFLAG_RD, 0, "")) == NULL)
1779 goto failed;
1780
1781 if (SYSCTL_ADD_PROC(&sc->sysctl_ctx, SYSCTL_CHILDREN(sc->sysctl_tree),
1782 OID_AUTO, "istats", CTLFLAG_RD | CTLTYPE_OPAQUE, sc, CTL_ISTATS,
1783 hatm_sysctl, "LU", "internal statistics") == NULL)
1784 goto failed;
1785
1786#ifdef HATM_DEBUG
1787 if (SYSCTL_ADD_PROC(&sc->sysctl_ctx, SYSCTL_CHILDREN(sc->sysctl_tree),
1788 OID_AUTO, "tsr", CTLFLAG_RD | CTLTYPE_OPAQUE, sc, 0,
1789 hatm_sysctl_tsr, "S", "transmission status registers") == NULL)
1790 goto failed;
1791
1792 if (SYSCTL_ADD_PROC(&sc->sysctl_ctx, SYSCTL_CHILDREN(sc->sysctl_tree),
1793 OID_AUTO, "tpd", CTLFLAG_RD | CTLTYPE_OPAQUE, sc, 0,
1794 hatm_sysctl_tpd, "S", "transmission packet descriptors") == NULL)
1795 goto failed;
1796
1797 if (SYSCTL_ADD_PROC(&sc->sysctl_ctx, SYSCTL_CHILDREN(sc->sysctl_tree),
1798 OID_AUTO, "mbox", CTLFLAG_RD | CTLTYPE_OPAQUE, sc, 0,
1799 hatm_sysctl_mbox, "S", "mbox registers") == NULL)
1800 goto failed;
1801
1802 if (SYSCTL_ADD_PROC(&sc->sysctl_ctx, SYSCTL_CHILDREN(sc->sysctl_tree),
1803 OID_AUTO, "cm", CTLFLAG_RD | CTLTYPE_OPAQUE, sc, 0,
1804 hatm_sysctl_cm, "S", "connection memory") == NULL)
1805 goto failed;
1806
1807 if (SYSCTL_ADD_PROC(&sc->sysctl_ctx, SYSCTL_CHILDREN(sc->sysctl_tree),
1808 OID_AUTO, "heregs", CTLFLAG_RD | CTLTYPE_OPAQUE, sc, 0,
1809 hatm_sysctl_heregs, "S", "card registers") == NULL)
1810 goto failed;
1811
1812 if (SYSCTL_ADD_PROC(&sc->sysctl_ctx, SYSCTL_CHILDREN(sc->sysctl_tree),
1813 OID_AUTO, "lbmem", CTLFLAG_RD | CTLTYPE_OPAQUE, sc, 0,
1814 hatm_sysctl_lbmem, "S", "local memory") == NULL)
1815 goto failed;
1816
1817 kenv_getuint(sc, "debug", &sc->debug, HATM_DEBUG, 1);
1818#endif
1819
1820 /*
1821 * Configure
1822 */
1823 if ((error = hatm_configure(sc)) != 0)
1824 goto failed;
1825
1826 /*
1827 * Compute memory parameters
1828 */
1829 if (sc->rbp_s0.size != 0) {
1830 sc->rbp_s0.mask = (sc->rbp_s0.size - 1) << 3;
1831 sc->rbp_s0.mem.size = sc->rbp_s0.size * 8;
1832 sc->rbp_s0.mem.align = sc->rbp_s0.mem.size;
1833 }
1834 if (sc->rbp_l0.size != 0) {
1835 sc->rbp_l0.mask = (sc->rbp_l0.size - 1) << 3;
1836 sc->rbp_l0.mem.size = sc->rbp_l0.size * 8;
1837 sc->rbp_l0.mem.align = sc->rbp_l0.mem.size;
1838 }
1839 if (sc->rbp_s1.size != 0) {
1840 sc->rbp_s1.mask = (sc->rbp_s1.size - 1) << 3;
1841 sc->rbp_s1.mem.size = sc->rbp_s1.size * 8;
1842 sc->rbp_s1.mem.align = sc->rbp_s1.mem.size;
1843 }
1844 if (sc->rbrq_0.size != 0) {
1845 sc->rbrq_0.mem.size = sc->rbrq_0.size * 8;
1846 sc->rbrq_0.mem.align = sc->rbrq_0.mem.size;
1847 }
1848 if (sc->rbrq_1.size != 0) {
1849 sc->rbrq_1.mem.size = sc->rbrq_1.size * 8;
1850 sc->rbrq_1.mem.align = sc->rbrq_1.mem.size;
1851 }
1852
1853 sc->irq_0.mem.size = sc->irq_0.size * sizeof(uint32_t);
1854 sc->irq_0.mem.align = 4 * 1024;
1855
1856 sc->tbrq.mem.size = sc->tbrq.size * 4;
1857 sc->tbrq.mem.align = 2 * sc->tbrq.mem.size; /* ZZZ */
1858
1859 sc->tpdrq.mem.size = sc->tpdrq.size * 8;
1860 sc->tpdrq.mem.align = sc->tpdrq.mem.size;
1861
1862 sc->hsp_mem.size = sizeof(struct he_hsp);
1863 sc->hsp_mem.align = 1024;
1864
1865 sc->lbufs_size = sc->rbp_l0.size + sc->rbrq_0.size;
1866 sc->tpd_total = sc->tbrq.size + sc->tpdrq.size;
1867 sc->tpds.align = 64;
1868 sc->tpds.size = sc->tpd_total * HE_TPD_SIZE;
1869
1870 hatm_init_rmaps(sc);
1871 hatm_init_smbufs(sc);
1872 if ((error = hatm_init_tpds(sc)) != 0)
1873 goto failed;
1874
1875 /*
1876 * Allocate memory
1877 */
1878 if ((error = hatm_alloc_dmamem(sc, "IRQ", &sc->irq_0.mem)) != 0 ||
1879 (error = hatm_alloc_dmamem(sc, "TBRQ0", &sc->tbrq.mem)) != 0 ||
1880 (error = hatm_alloc_dmamem(sc, "TPDRQ", &sc->tpdrq.mem)) != 0 ||
1881 (error = hatm_alloc_dmamem(sc, "HSP", &sc->hsp_mem)) != 0)
1882 goto failed;
1883
1884 if (sc->rbp_s0.mem.size != 0 &&
1885 (error = hatm_alloc_dmamem(sc, "RBPS0", &sc->rbp_s0.mem)))
1886 goto failed;
1887 if (sc->rbp_l0.mem.size != 0 &&
1888 (error = hatm_alloc_dmamem(sc, "RBPL0", &sc->rbp_l0.mem)))
1889 goto failed;
1890 if (sc->rbp_s1.mem.size != 0 &&
1891 (error = hatm_alloc_dmamem(sc, "RBPS1", &sc->rbp_s1.mem)))
1892 goto failed;
1893
1894 if (sc->rbrq_0.mem.size != 0 &&
1895 (error = hatm_alloc_dmamem(sc, "RBRQ0", &sc->rbrq_0.mem)))
1896 goto failed;
1897 if (sc->rbrq_1.mem.size != 0 &&
1898 (error = hatm_alloc_dmamem(sc, "RBRQ1", &sc->rbrq_1.mem)))
1899 goto failed;
1900
1901 if ((sc->vcc_zone = uma_zcreate("HE vccs", sizeof(struct hevcc),
1902 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0)) == NULL) {
1903 device_printf(dev, "cannot allocate zone for vccs\n");
1904 goto failed;
1905 }
1906
1907 /*
1908 * 4.4 Reset the card.
1909 */
1910 if ((error = hatm_reset(sc)) != 0)
1911 goto failed;
1912
1913 /*
1914 * Read the prom.
1915 */
1916 hatm_init_bus_width(sc);
1917 hatm_init_read_eeprom(sc);
1918 hatm_init_endianess(sc);
1919
1920 /*
1921 * Initialize interface
1922 */
1923 ifp->if_flags = IFF_SIMPLEX;
1924 ifp->if_ioctl = hatm_ioctl;
1925 ifp->if_start = hatm_start;
1926 ifp->if_init = hatm_init;
1927
1928 utopia_attach(&sc->utopia, IFP2IFATM(sc->ifp), &sc->media, &sc->mtx,
1929 &sc->sysctl_ctx, SYSCTL_CHILDREN(sc->sysctl_tree),
1930 &hatm_utopia_methods);
1931 utopia_init_media(&sc->utopia);
1932
1933 /* these two SUNI routines need the lock */
1934 mtx_lock(&sc->mtx);
1935 /* poll while we are not running */
1936 sc->utopia.flags |= UTP_FL_POLL_CARRIER;
1937 utopia_start(&sc->utopia);
1938 utopia_reset(&sc->utopia);
1939 mtx_unlock(&sc->mtx);
1940
1941 atm_ifattach(ifp);
1942
1943#ifdef ENABLE_BPF
1944 bpfattach(ifp, DLT_ATM_RFC1483, sizeof(struct atmllc));
1945#endif
1946
1947 error = bus_setup_intr(dev, sc->irqres, sc->mpsafe | INTR_TYPE_NET,
1948 NULL, hatm_intr, &sc->irq_0, &sc->ih);
1949 if (error != 0) {
1950 device_printf(dev, "could not setup interrupt\n");
1951 hatm_detach(dev);
1952 return (error);
1953 }
1954
1955 return (0);
1956
1957 failed:
1958 hatm_destroy(sc);
1959 return (error);
1960}
1961
1962/*
1963 * Start the interface. Assume a state as from attach().
1964 */
1965void
1966hatm_initialize(struct hatm_softc *sc)
1967{
1968 uint32_t v;
1969 u_int cid;
1970 static const u_int layout[2][7] = HE_CONFIG_MEM_LAYOUT;
1971
1972 if (sc->ifp->if_drv_flags & IFF_DRV_RUNNING)
1973 return;
1974
1975 hatm_init_bus_width(sc);
1976 hatm_init_endianess(sc);
1977
1978 if_printf(sc->ifp, "%s, Rev. %s, S/N %u, "
1979 "MAC=%02x:%02x:%02x:%02x:%02x:%02x (%ubit PCI)\n",
1980 sc->prod_id, sc->rev, IFP2IFATM(sc->ifp)->mib.serial,
1981 IFP2IFATM(sc->ifp)->mib.esi[0], IFP2IFATM(sc->ifp)->mib.esi[1], IFP2IFATM(sc->ifp)->mib.esi[2],
1982 IFP2IFATM(sc->ifp)->mib.esi[3], IFP2IFATM(sc->ifp)->mib.esi[4], IFP2IFATM(sc->ifp)->mib.esi[5],
1983 sc->pci64 ? 64 : 32);
1984
1985 /*
1986 * 4.8 SDRAM Controller Initialisation
1987 * 4.9 Initialize RNUM value
1988 */
1989 if (sc->he622)
1990 WRITE4(sc, HE_REGO_SDRAM_CNTL, HE_REGM_SDRAM_64BIT);
1991 else
1992 WRITE4(sc, HE_REGO_SDRAM_CNTL, 0);
1993 BARRIER_W(sc);
1994
1995 v = READ4(sc, HE_REGO_LB_SWAP);
1996 BARRIER_R(sc);
1997 v |= 0xf << HE_REGS_LBSWAP_RNUM;
1998 WRITE4(sc, HE_REGO_LB_SWAP, v);
1999 BARRIER_W(sc);
2000
2001 hatm_init_irq(sc, &sc->irq_0, 0);
2002 hatm_clear_irq(sc, 1);
2003 hatm_clear_irq(sc, 2);
2004 hatm_clear_irq(sc, 3);
2005
2006 WRITE4(sc, HE_REGO_GRP_1_0_MAP, 0);
2007 WRITE4(sc, HE_REGO_GRP_3_2_MAP, 0);
2008 WRITE4(sc, HE_REGO_GRP_5_4_MAP, 0);
2009 WRITE4(sc, HE_REGO_GRP_7_6_MAP, 0);
2010 BARRIER_W(sc);
2011
2012 /*
2013 * 4.11 Enable PCI Bus Controller State Machine
2014 */
2015 v = READ4(sc, HE_REGO_HOST_CNTL);
2016 BARRIER_R(sc);
2017 v |= HE_REGM_HOST_OUTFF_ENB | HE_REGM_HOST_CMDFF_ENB |
2018 HE_REGM_HOST_QUICK_RD | HE_REGM_HOST_QUICK_WR;
2019 WRITE4(sc, HE_REGO_HOST_CNTL, v);
2020 BARRIER_W(sc);
2021
2022 /*
2023 * 5.1.1 Generic configuration state
2024 */
2025 sc->cells_per_row = layout[sc->he622][0];
2026 sc->bytes_per_row = layout[sc->he622][1];
2027 sc->r0_numrows = layout[sc->he622][2];
2028 sc->tx_numrows = layout[sc->he622][3];
2029 sc->r1_numrows = layout[sc->he622][4];
2030 sc->r0_startrow = layout[sc->he622][5];
2031 sc->tx_startrow = sc->r0_startrow + sc->r0_numrows;
2032 sc->r1_startrow = sc->tx_startrow + sc->tx_numrows;
2033 sc->cells_per_lbuf = layout[sc->he622][6];
2034
2035 sc->r0_numbuffs = sc->r0_numrows * (sc->cells_per_row /
2036 sc->cells_per_lbuf);
2037 sc->r1_numbuffs = sc->r1_numrows * (sc->cells_per_row /
2038 sc->cells_per_lbuf);
2039 sc->tx_numbuffs = sc->tx_numrows * (sc->cells_per_row /
2040 sc->cells_per_lbuf);
2041
2042 if (sc->r0_numbuffs > 2560)
2043 sc->r0_numbuffs = 2560;
2044 if (sc->r1_numbuffs > 2560)
2045 sc->r1_numbuffs = 2560;
2046 if (sc->tx_numbuffs > 5120)
2047 sc->tx_numbuffs = 5120;
2048
2049 DBG(sc, ATTACH, ("cells_per_row=%u bytes_per_row=%u r0_numrows=%u "
2050 "tx_numrows=%u r1_numrows=%u r0_startrow=%u tx_startrow=%u "
2051 "r1_startrow=%u cells_per_lbuf=%u\nr0_numbuffs=%u r1_numbuffs=%u "
2052 "tx_numbuffs=%u\n", sc->cells_per_row, sc->bytes_per_row,
2053 sc->r0_numrows, sc->tx_numrows, sc->r1_numrows, sc->r0_startrow,
2054 sc->tx_startrow, sc->r1_startrow, sc->cells_per_lbuf,
2055 sc->r0_numbuffs, sc->r1_numbuffs, sc->tx_numbuffs));
2056
2057 /*
2058 * 5.1.2 Configure Hardware dependend registers
2059 */
2060 if (sc->he622) {
2061 WRITE4(sc, HE_REGO_LBARB,
2062 (0x2 << HE_REGS_LBARB_SLICE) |
2063 (0xf << HE_REGS_LBARB_RNUM) |
2064 (0x3 << HE_REGS_LBARB_THPRI) |
2065 (0x3 << HE_REGS_LBARB_RHPRI) |
2066 (0x2 << HE_REGS_LBARB_TLPRI) |
2067 (0x1 << HE_REGS_LBARB_RLPRI) |
2068 (0x28 << HE_REGS_LBARB_BUS_MULT) |
2069 (0x50 << HE_REGS_LBARB_NET_PREF));
2070 BARRIER_W(sc);
2071 WRITE4(sc, HE_REGO_SDRAMCON,
2072 /* HW bug: don't use banking */
2073 /* HE_REGM_SDRAMCON_BANK | */
2074 HE_REGM_SDRAMCON_WIDE |
2075 (0x384 << HE_REGS_SDRAMCON_REF));
2076 BARRIER_W(sc);
2077 WRITE4(sc, HE_REGO_RCMCONFIG,
2078 (0x1 << HE_REGS_RCMCONFIG_BANK_WAIT) |
2079 (0x1 << HE_REGS_RCMCONFIG_RW_WAIT) |
2080 (0x0 << HE_REGS_RCMCONFIG_TYPE));
2081 WRITE4(sc, HE_REGO_TCMCONFIG,
2082 (0x2 << HE_REGS_TCMCONFIG_BANK_WAIT) |
2083 (0x1 << HE_REGS_TCMCONFIG_RW_WAIT) |
2084 (0x0 << HE_REGS_TCMCONFIG_TYPE));
2085 } else {
2086 WRITE4(sc, HE_REGO_LBARB,
2087 (0x2 << HE_REGS_LBARB_SLICE) |
2088 (0xf << HE_REGS_LBARB_RNUM) |
2089 (0x3 << HE_REGS_LBARB_THPRI) |
2090 (0x3 << HE_REGS_LBARB_RHPRI) |
2091 (0x2 << HE_REGS_LBARB_TLPRI) |
2092 (0x1 << HE_REGS_LBARB_RLPRI) |
2093 (0x46 << HE_REGS_LBARB_BUS_MULT) |
2094 (0x8C << HE_REGS_LBARB_NET_PREF));
2095 BARRIER_W(sc);
2096 WRITE4(sc, HE_REGO_SDRAMCON,
2097 /* HW bug: don't use banking */
2098 /* HE_REGM_SDRAMCON_BANK | */
2099 (0x150 << HE_REGS_SDRAMCON_REF));
2100 BARRIER_W(sc);
2101 WRITE4(sc, HE_REGO_RCMCONFIG,
2102 (0x0 << HE_REGS_RCMCONFIG_BANK_WAIT) |
2103 (0x1 << HE_REGS_RCMCONFIG_RW_WAIT) |
2104 (0x0 << HE_REGS_RCMCONFIG_TYPE));
2105 WRITE4(sc, HE_REGO_TCMCONFIG,
2106 (0x1 << HE_REGS_TCMCONFIG_BANK_WAIT) |
2107 (0x1 << HE_REGS_TCMCONFIG_RW_WAIT) |
2108 (0x0 << HE_REGS_TCMCONFIG_TYPE));
2109 }
2110 WRITE4(sc, HE_REGO_LBCONFIG, (sc->cells_per_lbuf * 48));
2111
2112 WRITE4(sc, HE_REGO_RLBC_H, 0);
2113 WRITE4(sc, HE_REGO_RLBC_T, 0);
2114 WRITE4(sc, HE_REGO_RLBC_H2, 0);
2115
2116 WRITE4(sc, HE_REGO_RXTHRSH, 512);
2117 WRITE4(sc, HE_REGO_LITHRSH, 256);
2118
2119 WRITE4(sc, HE_REGO_RLBF0_C, sc->r0_numbuffs);
2120 WRITE4(sc, HE_REGO_RLBF1_C, sc->r1_numbuffs);
2121
2122 if (sc->he622) {
2123 WRITE4(sc, HE_REGO_RCCONFIG,
2124 (8 << HE_REGS_RCCONFIG_UTDELAY) |
2125 (IFP2IFATM(sc->ifp)->mib.vpi_bits << HE_REGS_RCCONFIG_VP) |
2126 (IFP2IFATM(sc->ifp)->mib.vci_bits << HE_REGS_RCCONFIG_VC));
2127 WRITE4(sc, HE_REGO_TXCONFIG,
2128 (32 << HE_REGS_TXCONFIG_THRESH) |
2129 (IFP2IFATM(sc->ifp)->mib.vci_bits << HE_REGS_TXCONFIG_VCI_MASK) |
2130 (sc->tx_numbuffs << HE_REGS_TXCONFIG_LBFREE));
2131 } else {
2132 WRITE4(sc, HE_REGO_RCCONFIG,
2133 (0 << HE_REGS_RCCONFIG_UTDELAY) |
2134 HE_REGM_RCCONFIG_UT_MODE |
2135 (IFP2IFATM(sc->ifp)->mib.vpi_bits << HE_REGS_RCCONFIG_VP) |
2136 (IFP2IFATM(sc->ifp)->mib.vci_bits << HE_REGS_RCCONFIG_VC));
2137 WRITE4(sc, HE_REGO_TXCONFIG,
2138 (32 << HE_REGS_TXCONFIG_THRESH) |
2139 HE_REGM_TXCONFIG_UTMODE |
2140 (IFP2IFATM(sc->ifp)->mib.vci_bits << HE_REGS_TXCONFIG_VCI_MASK) |
2141 (sc->tx_numbuffs << HE_REGS_TXCONFIG_LBFREE));
2142 }
2143
2144 WRITE4(sc, HE_REGO_TXAAL5_PROTO, 0);
2145
2146 if (sc->rbp_s1.size != 0) {
2147 WRITE4(sc, HE_REGO_RHCONFIG,
2148 HE_REGM_RHCONFIG_PHYENB |
2149 ((sc->he622 ? 0x41 : 0x31) << HE_REGS_RHCONFIG_PTMR_PRE) |
2150 (1 << HE_REGS_RHCONFIG_OAM_GID));
2151 } else {
2152 WRITE4(sc, HE_REGO_RHCONFIG,
2153 HE_REGM_RHCONFIG_PHYENB |
2154 ((sc->he622 ? 0x41 : 0x31) << HE_REGS_RHCONFIG_PTMR_PRE) |
2155 (0 << HE_REGS_RHCONFIG_OAM_GID));
2156 }
2157 BARRIER_W(sc);
2158
2159 hatm_init_cm(sc);
2160
2161 hatm_init_rx_buffer_pool(sc, 0, sc->r0_startrow, sc->r0_numbuffs);
2162 hatm_init_rx_buffer_pool(sc, 1, sc->r1_startrow, sc->r1_numbuffs);
2163 hatm_init_tx_buffer_pool(sc, sc->tx_startrow, sc->tx_numbuffs);
2164
2165 hatm_init_imed_queues(sc);
2166
2167 /*
2168 * 5.1.6 Application tunable Parameters
2169 */
2170 WRITE4(sc, HE_REGO_MCC, 0);
2171 WRITE4(sc, HE_REGO_OEC, 0);
2172 WRITE4(sc, HE_REGO_DCC, 0);
2173 WRITE4(sc, HE_REGO_CEC, 0);
2174
2175 hatm_init_cs_block(sc);
2176 hatm_init_cs_block_cm(sc);
2177
2178 hatm_init_rpool(sc, &sc->rbp_s0, 0, 0);
2179 hatm_init_rpool(sc, &sc->rbp_l0, 0, 1);
2180 hatm_init_rpool(sc, &sc->rbp_s1, 1, 0);
2181 hatm_clear_rpool(sc, 1, 1);
2182 hatm_clear_rpool(sc, 2, 0);
2183 hatm_clear_rpool(sc, 2, 1);
2184 hatm_clear_rpool(sc, 3, 0);
2185 hatm_clear_rpool(sc, 3, 1);
2186 hatm_clear_rpool(sc, 4, 0);
2187 hatm_clear_rpool(sc, 4, 1);
2188 hatm_clear_rpool(sc, 5, 0);
2189 hatm_clear_rpool(sc, 5, 1);
2190 hatm_clear_rpool(sc, 6, 0);
2191 hatm_clear_rpool(sc, 6, 1);
2192 hatm_clear_rpool(sc, 7, 0);
2193 hatm_clear_rpool(sc, 7, 1);
2194 hatm_init_rbrq(sc, &sc->rbrq_0, 0);
2195 hatm_init_rbrq(sc, &sc->rbrq_1, 1);
2196 hatm_clear_rbrq(sc, 2);
2197 hatm_clear_rbrq(sc, 3);
2198 hatm_clear_rbrq(sc, 4);
2199 hatm_clear_rbrq(sc, 5);
2200 hatm_clear_rbrq(sc, 6);
2201 hatm_clear_rbrq(sc, 7);
2202
2203 sc->lbufs_next = 0;
2204 bzero(sc->lbufs, sizeof(sc->lbufs[0]) * sc->lbufs_size);
2205
2206 hatm_init_tbrq(sc, &sc->tbrq, 0);
2207 hatm_clear_tbrq(sc, 1);
2208 hatm_clear_tbrq(sc, 2);
2209 hatm_clear_tbrq(sc, 3);
2210 hatm_clear_tbrq(sc, 4);
2211 hatm_clear_tbrq(sc, 5);
2212 hatm_clear_tbrq(sc, 6);
2213 hatm_clear_tbrq(sc, 7);
2214
2215 hatm_init_tpdrq(sc);
2216
2217 WRITE4(sc, HE_REGO_UBUFF_BA, (sc->he622 ? 0x104780 : 0x800));
2218
2219 /*
2220 * Initialize HSP
2221 */
2222 bzero(sc->hsp_mem.base, sc->hsp_mem.size);
2223 sc->hsp = sc->hsp_mem.base;
2224 WRITE4(sc, HE_REGO_HSP_BA, sc->hsp_mem.paddr);
2225
2226 /*
2227 * 5.1.12 Enable transmit and receive
2228 * Enable bus master and interrupts
2229 */
2230 v = READ_MBOX4(sc, HE_REGO_CS_ERCTL0);
2231 v |= 0x18000000;
2232 WRITE_MBOX4(sc, HE_REGO_CS_ERCTL0, v);
2233
2234 v = READ4(sc, HE_REGO_RCCONFIG);
2235 v |= HE_REGM_RCCONFIG_RXENB;
2236 WRITE4(sc, HE_REGO_RCCONFIG, v);
2237
2238 v = pci_read_config(sc->dev, HE_PCIR_GEN_CNTL_0, 4);
2239 v |= HE_PCIM_CTL0_INIT_ENB | HE_PCIM_CTL0_INT_PROC_ENB;
2240 pci_write_config(sc->dev, HE_PCIR_GEN_CNTL_0, v, 4);
2241
2242 sc->ifp->if_drv_flags |= IFF_DRV_RUNNING;
2243 sc->ifp->if_baudrate = 53 * 8 * IFP2IFATM(sc->ifp)->mib.pcr;
2244
2245 sc->utopia.flags &= ~UTP_FL_POLL_CARRIER;
2246
2247 /* reopen vccs */
2248 for (cid = 0; cid < HE_MAX_VCCS; cid++)
2249 if (sc->vccs[cid] != NULL)
2250 hatm_load_vc(sc, cid, 1);
2251
2252 ATMEV_SEND_IFSTATE_CHANGED(IFP2IFATM(sc->ifp),
2253 sc->utopia.carrier == UTP_CARR_OK);
2254}
2255
2256/*
2257 * This functions stops the card and frees all resources allocated after
2258 * the attach. Must have the global lock.
2259 */
2260void
2261hatm_stop(struct hatm_softc *sc)
2262{
2263 uint32_t v;
2264 u_int i, p, cid;
2265 struct mbuf_chunk_hdr *ch;
2266 struct mbuf_page *pg;
2267
2268 mtx_assert(&sc->mtx, MA_OWNED);
2269
2270 if (!(sc->ifp->if_drv_flags & IFF_DRV_RUNNING))
2271 return;
2272 sc->ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
2273
2274 ATMEV_SEND_IFSTATE_CHANGED(IFP2IFATM(sc->ifp),
2275 sc->utopia.carrier == UTP_CARR_OK);
2276
2277 sc->utopia.flags |= UTP_FL_POLL_CARRIER;
2278
2279 /*
2280 * Stop and reset the hardware so that everything remains
2281 * stable.
2282 */
2283 v = READ_MBOX4(sc, HE_REGO_CS_ERCTL0);
2284 v &= ~0x18000000;
2285 WRITE_MBOX4(sc, HE_REGO_CS_ERCTL0, v);
2286
2287 v = READ4(sc, HE_REGO_RCCONFIG);
2288 v &= ~HE_REGM_RCCONFIG_RXENB;
2289 WRITE4(sc, HE_REGO_RCCONFIG, v);
2290
2291 WRITE4(sc, HE_REGO_RHCONFIG, (0x2 << HE_REGS_RHCONFIG_PTMR_PRE));
2292 BARRIER_W(sc);
2293
2294 v = READ4(sc, HE_REGO_HOST_CNTL);
2295 BARRIER_R(sc);
2296 v &= ~(HE_REGM_HOST_OUTFF_ENB | HE_REGM_HOST_CMDFF_ENB);
2297 WRITE4(sc, HE_REGO_HOST_CNTL, v);
2298 BARRIER_W(sc);
2299
2300 /*
2301 * Disable bust master and interrupts
2302 */
2303 v = pci_read_config(sc->dev, HE_PCIR_GEN_CNTL_0, 4);
2304 v &= ~(HE_PCIM_CTL0_INIT_ENB | HE_PCIM_CTL0_INT_PROC_ENB);
2305 pci_write_config(sc->dev, HE_PCIR_GEN_CNTL_0, v, 4);
2306
2307 (void)hatm_reset(sc);
2308
2309 /*
2310 * Card resets the SUNI when resetted, so re-initialize it
2311 */
2312 utopia_reset(&sc->utopia);
2313
2314 /*
2315 * Give any waiters on closing a VCC a chance. They will stop
2316 * to wait if they see that IFF_DRV_RUNNING disappeared.
2317 */
2318 cv_broadcast(&sc->vcc_cv);
2319 cv_broadcast(&sc->cv_rcclose);
2320
2321 /*
2322 * Now free all resources.
2323 */
2324
2325 /*
2326 * Free the large mbufs that are given to the card.
2327 */
2328 for (i = 0 ; i < sc->lbufs_size; i++) {
2329 if (sc->lbufs[i] != NULL) {
2330 bus_dmamap_unload(sc->mbuf_tag, sc->rmaps[i]);
2331 m_freem(sc->lbufs[i]);
2332 sc->lbufs[i] = NULL;
2333 }
2334 }
2335
2336 /*
2337 * Free small buffers
2338 */
2339 for (p = 0; p < sc->mbuf_npages; p++) {
2340 pg = sc->mbuf_pages[p];
2341 for (i = 0; i < pg->hdr.nchunks; i++) {
2342 ch = (struct mbuf_chunk_hdr *) ((char *)pg +
2343 i * pg->hdr.chunksize + pg->hdr.hdroff);
2344 if (ch->flags & MBUF_CARD) {
2345 ch->flags &= ~MBUF_CARD;
2346 ch->flags |= MBUF_USED;
2347 hatm_ext_free(&sc->mbuf_list[pg->hdr.pool],
2348 (struct mbufx_free *)((u_char *)ch -
2349 pg->hdr.hdroff));
2350 }
2351 }
2352 }
2353
2354 hatm_stop_tpds(sc);
2355
2356 /*
2357 * Free all partial reassembled PDUs on any VCC.
2358 */
2359 for (cid = 0; cid < HE_MAX_VCCS; cid++) {
2360 if (sc->vccs[cid] != NULL) {
2361 if (sc->vccs[cid]->chain != NULL) {
2362 m_freem(sc->vccs[cid]->chain);
2363 sc->vccs[cid]->chain = NULL;
2364 sc->vccs[cid]->last = NULL;
2365 }
2366 if (!(sc->vccs[cid]->vflags & (HE_VCC_RX_OPEN |
2367 HE_VCC_TX_OPEN))) {
2368 hatm_tx_vcc_closed(sc, cid);
2369 uma_zfree(sc->vcc_zone, sc->vccs[cid]);
2370 sc->vccs[cid] = NULL;
2371 sc->open_vccs--;
2372 } else {
2373 sc->vccs[cid]->vflags = 0;
2374 sc->vccs[cid]->ntpds = 0;
2375 }
2376 }
2377 }
2378
2379 if (sc->rbp_s0.size != 0)
2380 bzero(sc->rbp_s0.mem.base, sc->rbp_s0.mem.size);
2381 if (sc->rbp_l0.size != 0)
2382 bzero(sc->rbp_l0.mem.base, sc->rbp_l0.mem.size);
2383 if (sc->rbp_s1.size != 0)
2384 bzero(sc->rbp_s1.mem.base, sc->rbp_s1.mem.size);
2385 if (sc->rbrq_0.size != 0)
2386 bzero(sc->rbrq_0.mem.base, sc->rbrq_0.mem.size);
2387 if (sc->rbrq_1.size != 0)
2388 bzero(sc->rbrq_1.mem.base, sc->rbrq_1.mem.size);
2389
2390 bzero(sc->tbrq.mem.base, sc->tbrq.mem.size);
2391 bzero(sc->tpdrq.mem.base, sc->tpdrq.mem.size);
2392 bzero(sc->hsp_mem.base, sc->hsp_mem.size);
2393}
2394
2395/************************************************************
2396 *
2397 * Driver infrastructure
2398 */
2399devclass_t hatm_devclass;
2400
2401static device_method_t hatm_methods[] = {
2402 DEVMETHOD(device_probe, hatm_probe),
2403 DEVMETHOD(device_attach, hatm_attach),
2404 DEVMETHOD(device_detach, hatm_detach),
2405 {0,0}
2406};
2407static driver_t hatm_driver = {
2408 "hatm",
2409 hatm_methods,
2410 sizeof(struct hatm_softc),
2411};
2412DRIVER_MODULE(hatm, pci, hatm_driver, hatm_devclass, NULL, 0);