if_hatm.c revision 121729
1/* 2 * Copyright (c) 2001-2003 3 * Fraunhofer Institute for Open Communication Systems (FhG Fokus). 4 * All rights reserved. 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 25 * SUCH DAMAGE. 26 * 27 * Author: Hartmut Brandt <harti@freebsd.org> 28 * 29 * ForeHE driver. 30 * 31 * This file contains the module and driver infrastructure stuff as well 32 * as a couple of utility functions and the entire initialisation. 33 */ 34 35#include <sys/cdefs.h> 36__FBSDID("$FreeBSD: head/sys/dev/hatm/if_hatm.c 121729 2003-10-30 10:43:52Z harti $"); 37 38#include "opt_inet.h" 39#include "opt_natm.h" 40 41#include <sys/types.h> 42#include <sys/param.h> 43#include <sys/systm.h> 44#include <sys/malloc.h> 45#include <sys/kernel.h> 46#include <sys/bus.h> 47#include <sys/errno.h> 48#include <sys/conf.h> 49#include <sys/module.h> 50#include <sys/queue.h> 51#include <sys/syslog.h> 52#include <sys/lock.h> 53#include <sys/mutex.h> 54#include <sys/condvar.h> 55#include <sys/sysctl.h> 56#include <vm/uma.h> 57 58#include <sys/sockio.h> 59#include <sys/mbuf.h> 60#include <sys/socket.h> 61 62#include <net/if.h> 63#include <net/if_media.h> 64#include <net/if_atm.h> 65#include <net/route.h> 66#ifdef ENABLE_BPF 67#include <net/bpf.h> 68#endif 69#include <netinet/in.h> 70#include <netinet/if_atm.h> 71 72#include <machine/bus.h> 73#include <machine/resource.h> 74#include <sys/bus.h> 75#include <sys/rman.h> 76#include <dev/pci/pcireg.h> 77#include <dev/pci/pcivar.h> 78 79#include <dev/utopia/utopia.h> 80#include <dev/hatm/if_hatmconf.h> 81#include <dev/hatm/if_hatmreg.h> 82#include <dev/hatm/if_hatmvar.h> 83 84static const struct { 85 uint16_t vid; 86 uint16_t did; 87 const char *name; 88} hatm_devs[] = { 89 { 0x1127, 0x400, 90 "FORE HE" }, 91 { 0, 0, NULL } 92}; 93 94SYSCTL_DECL(_hw_atm); 95 96MODULE_DEPEND(hatm, utopia, 1, 1, 1); 97MODULE_DEPEND(hatm, pci, 1, 1, 1); 98MODULE_DEPEND(hatm, atm, 1, 1, 1); 99 100#define EEPROM_DELAY 400 /* microseconds */ 101 102/* Read from EEPROM 0000 0011b */ 103static const uint32_t readtab[] = { 104 HE_REGM_HOST_PROM_SEL | HE_REGM_HOST_PROM_CLOCK, 105 0, 106 HE_REGM_HOST_PROM_CLOCK, 107 0, /* 0 */ 108 HE_REGM_HOST_PROM_CLOCK, 109 0, /* 0 */ 110 HE_REGM_HOST_PROM_CLOCK, 111 0, /* 0 */ 112 HE_REGM_HOST_PROM_CLOCK, 113 0, /* 0 */ 114 HE_REGM_HOST_PROM_CLOCK, 115 0, /* 0 */ 116 HE_REGM_HOST_PROM_CLOCK, 117 HE_REGM_HOST_PROM_DATA_IN, /* 0 */ 118 HE_REGM_HOST_PROM_CLOCK | HE_REGM_HOST_PROM_DATA_IN, 119 HE_REGM_HOST_PROM_DATA_IN, /* 1 */ 120 HE_REGM_HOST_PROM_CLOCK | HE_REGM_HOST_PROM_DATA_IN, 121 HE_REGM_HOST_PROM_DATA_IN, /* 1 */ 122}; 123static const uint32_t clocktab[] = { 124 0, HE_REGM_HOST_PROM_CLOCK, 125 0, HE_REGM_HOST_PROM_CLOCK, 126 0, HE_REGM_HOST_PROM_CLOCK, 127 0, HE_REGM_HOST_PROM_CLOCK, 128 0, HE_REGM_HOST_PROM_CLOCK, 129 0, HE_REGM_HOST_PROM_CLOCK, 130 0, HE_REGM_HOST_PROM_CLOCK, 131 0, HE_REGM_HOST_PROM_CLOCK, 132 0 133}; 134 135/* 136 * Convert cell rate to ATM Forum format 137 */ 138u_int 139hatm_cps2atmf(uint32_t pcr) 140{ 141 u_int e; 142 143 if (pcr == 0) 144 return (0); 145 pcr <<= 9; 146 e = 0; 147 while (pcr > (1024 - 1)) { 148 e++; 149 pcr >>= 1; 150 } 151 return ((1 << 14) | (e << 9) | (pcr & 0x1ff)); 152} 153u_int 154hatm_atmf2cps(uint32_t fcr) 155{ 156 fcr &= 0x7fff; 157 158 return ((1 << ((fcr >> 9) & 0x1f)) * (512 + (fcr & 0x1ff)) / 512 159 * (fcr >> 14)); 160} 161 162/************************************************************ 163 * 164 * Initialisation 165 */ 166/* 167 * Probe for a HE controller 168 */ 169static int 170hatm_probe(device_t dev) 171{ 172 int i; 173 174 for (i = 0; hatm_devs[i].name; i++) 175 if (pci_get_vendor(dev) == hatm_devs[i].vid && 176 pci_get_device(dev) == hatm_devs[i].did) { 177 device_set_desc(dev, hatm_devs[i].name); 178 return (0); 179 } 180 return (ENXIO); 181} 182 183/* 184 * Allocate and map DMA-able memory. We support only contiguous mappings. 185 */ 186static void 187dmaload_helper(void *arg, bus_dma_segment_t *segs, int nsegs, int error) 188{ 189 if (error) 190 return; 191 KASSERT(nsegs == 1, ("too many segments for DMA: %d", nsegs)); 192 KASSERT(segs[0].ds_addr <= 0xffffffffUL, 193 ("phys addr too large %lx", (u_long)segs[0].ds_addr)); 194 195 *(bus_addr_t *)arg = segs[0].ds_addr; 196} 197static int 198hatm_alloc_dmamem(struct hatm_softc *sc, const char *what, struct dmamem *mem) 199{ 200 int error; 201 202 mem->base = NULL; 203 204 /* 205 * Alignement does not work in the bus_dmamem_alloc function below 206 * on FreeBSD. malloc seems to align objects at least to the object 207 * size so increase the size to the alignment if the size is lesser 208 * than the alignemnt. 209 * XXX on sparc64 this is (probably) not needed. 210 */ 211 if (mem->size < mem->align) 212 mem->size = mem->align; 213 214 error = bus_dma_tag_create(sc->parent_tag, mem->align, 0, 215 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, 216 NULL, NULL, mem->size, 1, 217 BUS_SPACE_MAXSIZE_32BIT, BUS_DMA_ALLOCNOW, 218 NULL, NULL, &mem->tag); 219 if (error) { 220 if_printf(&sc->ifatm.ifnet, "DMA tag create (%s)\n", what); 221 return (error); 222 } 223 224 error = bus_dmamem_alloc(mem->tag, &mem->base, 0, &mem->map); 225 if (error) { 226 if_printf(&sc->ifatm.ifnet, "DMA mem alloc (%s): %d\n", 227 what, error); 228 bus_dma_tag_destroy(mem->tag); 229 mem->base = NULL; 230 return (error); 231 } 232 233 error = bus_dmamap_load(mem->tag, mem->map, mem->base, mem->size, 234 dmaload_helper, &mem->paddr, BUS_DMA_NOWAIT); 235 if (error) { 236 if_printf(&sc->ifatm.ifnet, "DMA map load (%s): %d\n", 237 what, error); 238 bus_dmamem_free(mem->tag, mem->base, mem->map); 239 bus_dma_tag_destroy(mem->tag); 240 mem->base = NULL; 241 return (error); 242 } 243 244 DBG(sc, DMA, ("%s S/A/V/P 0x%x 0x%x %p 0x%lx", what, mem->size, 245 mem->align, mem->base, (u_long)mem->paddr)); 246 247 return (0); 248} 249 250/* 251 * Destroy all the resources of an DMA-able memory region. 252 */ 253static void 254hatm_destroy_dmamem(struct dmamem *mem) 255{ 256 if (mem->base != NULL) { 257 bus_dmamap_unload(mem->tag, mem->map); 258 bus_dmamem_free(mem->tag, mem->base, mem->map); 259 (void)bus_dma_tag_destroy(mem->tag); 260 mem->base = NULL; 261 } 262} 263 264/* 265 * Initialize/destroy DMA maps for the large pool 0 266 */ 267static void 268hatm_destroy_rmaps(struct hatm_softc *sc) 269{ 270 u_int b; 271 272 DBG(sc, ATTACH, ("destroying rmaps and lbuf pointers...")); 273 if (sc->rmaps != NULL) { 274 for (b = 0; b < sc->lbufs_size; b++) 275 bus_dmamap_destroy(sc->mbuf_tag, sc->rmaps[b]); 276 free(sc->rmaps, M_DEVBUF); 277 } 278 if (sc->lbufs != NULL) 279 free(sc->lbufs, M_DEVBUF); 280} 281 282static void 283hatm_init_rmaps(struct hatm_softc *sc) 284{ 285 u_int b; 286 int err; 287 288 DBG(sc, ATTACH, ("allocating rmaps and lbuf pointers...")); 289 sc->lbufs = malloc(sizeof(sc->lbufs[0]) * sc->lbufs_size, 290 M_DEVBUF, M_ZERO | M_WAITOK); 291 292 /* allocate and create the DMA maps for the large pool */ 293 sc->rmaps = malloc(sizeof(sc->rmaps[0]) * sc->lbufs_size, 294 M_DEVBUF, M_WAITOK); 295 for (b = 0; b < sc->lbufs_size; b++) { 296 err = bus_dmamap_create(sc->mbuf_tag, 0, &sc->rmaps[b]); 297 if (err != 0) 298 panic("bus_dmamap_create: %d\n", err); 299 } 300} 301 302/* 303 * Initialize and destroy small mbuf page pointers and pages 304 */ 305static void 306hatm_destroy_smbufs(struct hatm_softc *sc) 307{ 308 u_int i, b; 309 struct mbuf_page *pg; 310 struct mbuf_chunk_hdr *h; 311 312 if (sc->mbuf_pages != NULL) { 313 for (i = 0; i < sc->mbuf_npages; i++) { 314 pg = sc->mbuf_pages[i]; 315 for (b = 0; b < pg->hdr.nchunks; b++) { 316 h = (struct mbuf_chunk_hdr *) ((char *)pg + 317 b * pg->hdr.chunksize + pg->hdr.hdroff); 318 if (h->flags & MBUF_CARD) 319 if_printf(&sc->ifatm.ifnet, 320 "%s -- mbuf page=%u card buf %u\n", 321 __func__, i, b); 322 if (h->flags & MBUF_USED) 323 if_printf(&sc->ifatm.ifnet, 324 "%s -- mbuf page=%u used buf %u\n", 325 __func__, i, b); 326 } 327 bus_dmamap_unload(sc->mbuf_tag, pg->hdr.map); 328 bus_dmamap_destroy(sc->mbuf_tag, pg->hdr.map); 329 free(pg, M_DEVBUF); 330 } 331 free(sc->mbuf_pages, M_DEVBUF); 332 } 333} 334 335static void 336hatm_init_smbufs(struct hatm_softc *sc) 337{ 338 sc->mbuf_pages = malloc(sizeof(sc->mbuf_pages[0]) * 339 sc->mbuf_max_pages, M_DEVBUF, M_WAITOK); 340 sc->mbuf_npages = 0; 341} 342 343/* 344 * Initialize/destroy TPDs. This is called from attach/detach. 345 */ 346static void 347hatm_destroy_tpds(struct hatm_softc *sc) 348{ 349 struct tpd *t; 350 351 if (sc->tpds.base == NULL) 352 return; 353 354 DBG(sc, ATTACH, ("releasing TPDs ...")); 355 if (sc->tpd_nfree != sc->tpd_total) 356 if_printf(&sc->ifatm.ifnet, "%u tpds still in use from %u\n", 357 sc->tpd_total - sc->tpd_nfree, sc->tpd_total); 358 while ((t = SLIST_FIRST(&sc->tpd_free)) != NULL) { 359 SLIST_REMOVE_HEAD(&sc->tpd_free, link); 360 bus_dmamap_destroy(sc->tx_tag, t->map); 361 } 362 hatm_destroy_dmamem(&sc->tpds); 363 free(sc->tpd_used, M_DEVBUF); 364 DBG(sc, ATTACH, ("... done")); 365} 366static int 367hatm_init_tpds(struct hatm_softc *sc) 368{ 369 int error; 370 u_int i; 371 struct tpd *t; 372 373 DBG(sc, ATTACH, ("allocating %u TPDs and maps ...", sc->tpd_total)); 374 error = hatm_alloc_dmamem(sc, "TPD memory", &sc->tpds); 375 if (error != 0) { 376 DBG(sc, ATTACH, ("... dmamem error=%d", error)); 377 return (error); 378 } 379 380 /* put all the TPDs on the free list and allocate DMA maps */ 381 for (i = 0; i < sc->tpd_total; i++) { 382 t = TPD_ADDR(sc, i); 383 t->no = i; 384 t->mbuf = NULL; 385 error = bus_dmamap_create(sc->tx_tag, 0, &t->map); 386 if (error != 0) { 387 DBG(sc, ATTACH, ("... dmamap error=%d", error)); 388 while ((t = SLIST_FIRST(&sc->tpd_free)) != NULL) { 389 SLIST_REMOVE_HEAD(&sc->tpd_free, link); 390 bus_dmamap_destroy(sc->tx_tag, t->map); 391 } 392 hatm_destroy_dmamem(&sc->tpds); 393 return (error); 394 } 395 396 SLIST_INSERT_HEAD(&sc->tpd_free, t, link); 397 } 398 399 /* allocate and zero bitmap */ 400 sc->tpd_used = malloc(sizeof(uint8_t) * (sc->tpd_total + 7) / 8, 401 M_DEVBUF, M_ZERO | M_WAITOK); 402 sc->tpd_nfree = sc->tpd_total; 403 404 DBG(sc, ATTACH, ("... done")); 405 406 return (0); 407} 408 409/* 410 * Free all the TPDs that where given to the card. 411 * An mbuf chain may be attached to a TPD - free it also and 412 * unload its associated DMA map. 413 */ 414static void 415hatm_stop_tpds(struct hatm_softc *sc) 416{ 417 u_int i; 418 struct tpd *t; 419 420 DBG(sc, ATTACH, ("free TPDs ...")); 421 for (i = 0; i < sc->tpd_total; i++) { 422 if (TPD_TST_USED(sc, i)) { 423 t = TPD_ADDR(sc, i); 424 if (t->mbuf) { 425 m_freem(t->mbuf); 426 t->mbuf = NULL; 427 bus_dmamap_unload(sc->tx_tag, t->map); 428 } 429 TPD_CLR_USED(sc, i); 430 SLIST_INSERT_HEAD(&sc->tpd_free, t, link); 431 sc->tpd_nfree++; 432 } 433 } 434} 435 436/* 437 * This frees ALL resources of this interface and leaves the structure 438 * in an indeterminate state. This is called just before detaching or 439 * on a failed attach. No lock should be held. 440 */ 441static void 442hatm_destroy(struct hatm_softc *sc) 443{ 444 u_int cid; 445 446 bus_teardown_intr(sc->dev, sc->irqres, sc->ih); 447 448 hatm_destroy_rmaps(sc); 449 hatm_destroy_smbufs(sc); 450 hatm_destroy_tpds(sc); 451 452 if (sc->vcc_zone != NULL) { 453 for (cid = 0; cid < HE_MAX_VCCS; cid++) 454 if (sc->vccs[cid] != NULL) 455 uma_zfree(sc->vcc_zone, sc->vccs[cid]); 456 uma_zdestroy(sc->vcc_zone); 457 } 458 459 /* 460 * Release all memory allocated to the various queues and 461 * Status pages. These have there own flag which shows whether 462 * they are really allocated. 463 */ 464 hatm_destroy_dmamem(&sc->irq_0.mem); 465 hatm_destroy_dmamem(&sc->rbp_s0.mem); 466 hatm_destroy_dmamem(&sc->rbp_l0.mem); 467 hatm_destroy_dmamem(&sc->rbp_s1.mem); 468 hatm_destroy_dmamem(&sc->rbrq_0.mem); 469 hatm_destroy_dmamem(&sc->rbrq_1.mem); 470 hatm_destroy_dmamem(&sc->tbrq.mem); 471 hatm_destroy_dmamem(&sc->tpdrq.mem); 472 hatm_destroy_dmamem(&sc->hsp_mem); 473 474 if (sc->irqres != NULL) 475 bus_release_resource(sc->dev, SYS_RES_IRQ, 476 sc->irqid, sc->irqres); 477 478 if (sc->tx_tag != NULL) 479 if (bus_dma_tag_destroy(sc->tx_tag)) 480 if_printf(&sc->ifatm.ifnet, "mbuf DMA tag busy\n"); 481 482 if (sc->mbuf_tag != NULL) 483 if (bus_dma_tag_destroy(sc->mbuf_tag)) 484 if_printf(&sc->ifatm.ifnet, "mbuf DMA tag busy\n"); 485 486 if (sc->parent_tag != NULL) 487 if (bus_dma_tag_destroy(sc->parent_tag)) 488 if_printf(&sc->ifatm.ifnet, "parent DMA tag busy\n"); 489 490 if (sc->memres != NULL) 491 bus_release_resource(sc->dev, SYS_RES_MEMORY, 492 sc->memid, sc->memres); 493 494 sysctl_ctx_free(&sc->sysctl_ctx); 495 496 cv_destroy(&sc->cv_rcclose); 497 cv_destroy(&sc->vcc_cv); 498 mtx_destroy(&sc->mtx); 499} 500 501/* 502 * 4.4 Card reset 503 */ 504static int 505hatm_reset(struct hatm_softc *sc) 506{ 507 u_int v, count; 508 509 WRITE4(sc, HE_REGO_RESET_CNTL, 0x00); 510 BARRIER_W(sc); 511 WRITE4(sc, HE_REGO_RESET_CNTL, 0xff); 512 BARRIER_RW(sc); 513 count = 0; 514 while (((v = READ4(sc, HE_REGO_RESET_CNTL)) & HE_REGM_RESET_STATE) == 0) { 515 BARRIER_R(sc); 516 if (++count == 100) { 517 if_printf(&sc->ifatm.ifnet, "reset failed\n"); 518 return (ENXIO); 519 } 520 DELAY(1000); 521 } 522 return (0); 523} 524 525/* 526 * 4.5 Set Bus Width 527 */ 528static void 529hatm_init_bus_width(struct hatm_softc *sc) 530{ 531 uint32_t v, v1; 532 533 v = READ4(sc, HE_REGO_HOST_CNTL); 534 BARRIER_R(sc); 535 if (v & HE_REGM_HOST_BUS64) { 536 sc->pci64 = 1; 537 v1 = pci_read_config(sc->dev, HE_PCIR_GEN_CNTL_0, 4); 538 v1 |= HE_PCIM_CTL0_64BIT; 539 pci_write_config(sc->dev, HE_PCIR_GEN_CNTL_0, v1, 4); 540 541 v |= HE_REGM_HOST_DESC_RD64 542 | HE_REGM_HOST_DATA_RD64 543 | HE_REGM_HOST_DATA_WR64; 544 WRITE4(sc, HE_REGO_HOST_CNTL, v); 545 BARRIER_W(sc); 546 } else { 547 sc->pci64 = 0; 548 v = pci_read_config(sc->dev, HE_PCIR_GEN_CNTL_0, 4); 549 v &= ~HE_PCIM_CTL0_64BIT; 550 pci_write_config(sc->dev, HE_PCIR_GEN_CNTL_0, v, 4); 551 } 552} 553 554/* 555 * 4.6 Set Host Endianess 556 */ 557static void 558hatm_init_endianess(struct hatm_softc *sc) 559{ 560 uint32_t v; 561 562 v = READ4(sc, HE_REGO_LB_SWAP); 563 BARRIER_R(sc); 564#if BYTE_ORDER == BIG_ENDIAN 565 v |= HE_REGM_LBSWAP_INTR_SWAP | 566 HE_REGM_LBSWAP_DESC_WR_SWAP | 567 HE_REGM_LBSWAP_BIG_ENDIAN; 568 v &= ~(HE_REGM_LBSWAP_DATA_WR_SWAP | 569 HE_REGM_LBSWAP_DESC_RD_SWAP | 570 HE_REGM_LBSWAP_DATA_RD_SWAP); 571#else 572 v &= ~(HE_REGM_LBSWAP_DATA_WR_SWAP | 573 HE_REGM_LBSWAP_DESC_RD_SWAP | 574 HE_REGM_LBSWAP_DATA_RD_SWAP | 575 HE_REGM_LBSWAP_INTR_SWAP | 576 HE_REGM_LBSWAP_DESC_WR_SWAP | 577 HE_REGM_LBSWAP_BIG_ENDIAN); 578#endif 579 580 if (sc->he622) 581 v |= HE_REGM_LBSWAP_XFER_SIZE; 582 583 WRITE4(sc, HE_REGO_LB_SWAP, v); 584 BARRIER_W(sc); 585} 586 587/* 588 * 4.7 Read EEPROM 589 */ 590static uint8_t 591hatm_read_prom_byte(struct hatm_softc *sc, u_int addr) 592{ 593 uint32_t val, tmp_read, byte_read; 594 u_int i, j; 595 int n; 596 597 val = READ4(sc, HE_REGO_HOST_CNTL); 598 val &= HE_REGM_HOST_PROM_BITS; 599 BARRIER_R(sc); 600 601 val |= HE_REGM_HOST_PROM_WREN; 602 WRITE4(sc, HE_REGO_HOST_CNTL, val); 603 BARRIER_W(sc); 604 605 /* send READ */ 606 for (i = 0; i < sizeof(readtab) / sizeof(readtab[0]); i++) { 607 WRITE4(sc, HE_REGO_HOST_CNTL, val | readtab[i]); 608 BARRIER_W(sc); 609 DELAY(EEPROM_DELAY); 610 } 611 612 /* send ADDRESS */ 613 for (n = 7, j = 0; n >= 0; n--) { 614 WRITE4(sc, HE_REGO_HOST_CNTL, val | clocktab[j++] | 615 (((addr >> n) & 1 ) << HE_REGS_HOST_PROM_DATA_IN)); 616 BARRIER_W(sc); 617 DELAY(EEPROM_DELAY); 618 WRITE4(sc, HE_REGO_HOST_CNTL, val | clocktab[j++] | 619 (((addr >> n) & 1 ) << HE_REGS_HOST_PROM_DATA_IN)); 620 BARRIER_W(sc); 621 DELAY(EEPROM_DELAY); 622 } 623 624 val &= ~HE_REGM_HOST_PROM_WREN; 625 WRITE4(sc, HE_REGO_HOST_CNTL, val); 626 BARRIER_W(sc); 627 628 /* read DATA */ 629 byte_read = 0; 630 for (n = 7, j = 0; n >= 0; n--) { 631 WRITE4(sc, HE_REGO_HOST_CNTL, val | clocktab[j++]); 632 BARRIER_W(sc); 633 DELAY(EEPROM_DELAY); 634 tmp_read = READ4(sc, HE_REGO_HOST_CNTL); 635 byte_read |= (uint8_t)(((tmp_read & HE_REGM_HOST_PROM_DATA_OUT) 636 >> HE_REGS_HOST_PROM_DATA_OUT) << n); 637 WRITE4(sc, HE_REGO_HOST_CNTL, val | clocktab[j++]); 638 BARRIER_W(sc); 639 DELAY(EEPROM_DELAY); 640 } 641 WRITE4(sc, HE_REGO_HOST_CNTL, val | clocktab[j++]); 642 BARRIER_W(sc); 643 DELAY(EEPROM_DELAY); 644 645 return (byte_read); 646} 647 648static void 649hatm_init_read_eeprom(struct hatm_softc *sc) 650{ 651 u_int n, count; 652 u_char byte; 653 uint32_t v; 654 655 for (n = count = 0; count < HE_EEPROM_PROD_ID_LEN; count++) { 656 byte = hatm_read_prom_byte(sc, HE_EEPROM_PROD_ID + count); 657 if (n > 0 || byte != ' ') 658 sc->prod_id[n++] = byte; 659 } 660 while (n > 0 && sc->prod_id[n-1] == ' ') 661 n--; 662 sc->prod_id[n] = '\0'; 663 664 for (n = count = 0; count < HE_EEPROM_REV_LEN; count++) { 665 byte = hatm_read_prom_byte(sc, HE_EEPROM_REV + count); 666 if (n > 0 || byte != ' ') 667 sc->rev[n++] = byte; 668 } 669 while (n > 0 && sc->rev[n-1] == ' ') 670 n--; 671 sc->rev[n] = '\0'; 672 sc->ifatm.mib.hw_version = sc->rev[0]; 673 674 sc->ifatm.mib.serial = hatm_read_prom_byte(sc, HE_EEPROM_M_SN + 0) << 0; 675 sc->ifatm.mib.serial |= hatm_read_prom_byte(sc, HE_EEPROM_M_SN + 1) << 8; 676 sc->ifatm.mib.serial |= hatm_read_prom_byte(sc, HE_EEPROM_M_SN + 2) << 16; 677 sc->ifatm.mib.serial |= hatm_read_prom_byte(sc, HE_EEPROM_M_SN + 3) << 24; 678 679 v = hatm_read_prom_byte(sc, HE_EEPROM_MEDIA + 0) << 0; 680 v |= hatm_read_prom_byte(sc, HE_EEPROM_MEDIA + 1) << 8; 681 v |= hatm_read_prom_byte(sc, HE_EEPROM_MEDIA + 2) << 16; 682 v |= hatm_read_prom_byte(sc, HE_EEPROM_MEDIA + 3) << 24; 683 684 switch (v) { 685 case HE_MEDIA_UTP155: 686 sc->ifatm.mib.media = IFM_ATM_UTP_155; 687 sc->ifatm.mib.pcr = ATM_RATE_155M; 688 break; 689 690 case HE_MEDIA_MMF155: 691 sc->ifatm.mib.media = IFM_ATM_MM_155; 692 sc->ifatm.mib.pcr = ATM_RATE_155M; 693 break; 694 695 case HE_MEDIA_MMF622: 696 sc->ifatm.mib.media = IFM_ATM_MM_622; 697 sc->ifatm.mib.device = ATM_DEVICE_HE622; 698 sc->ifatm.mib.pcr = ATM_RATE_622M; 699 sc->he622 = 1; 700 break; 701 702 case HE_MEDIA_SMF155: 703 sc->ifatm.mib.media = IFM_ATM_SM_155; 704 sc->ifatm.mib.pcr = ATM_RATE_155M; 705 break; 706 707 case HE_MEDIA_SMF622: 708 sc->ifatm.mib.media = IFM_ATM_SM_622; 709 sc->ifatm.mib.device = ATM_DEVICE_HE622; 710 sc->ifatm.mib.pcr = ATM_RATE_622M; 711 sc->he622 = 1; 712 break; 713 } 714 715 sc->ifatm.mib.esi[0] = hatm_read_prom_byte(sc, HE_EEPROM_MAC + 0); 716 sc->ifatm.mib.esi[1] = hatm_read_prom_byte(sc, HE_EEPROM_MAC + 1); 717 sc->ifatm.mib.esi[2] = hatm_read_prom_byte(sc, HE_EEPROM_MAC + 2); 718 sc->ifatm.mib.esi[3] = hatm_read_prom_byte(sc, HE_EEPROM_MAC + 3); 719 sc->ifatm.mib.esi[4] = hatm_read_prom_byte(sc, HE_EEPROM_MAC + 4); 720 sc->ifatm.mib.esi[5] = hatm_read_prom_byte(sc, HE_EEPROM_MAC + 5); 721} 722 723/* 724 * Clear unused interrupt queue 725 */ 726static void 727hatm_clear_irq(struct hatm_softc *sc, u_int group) 728{ 729 WRITE4(sc, HE_REGO_IRQ_BASE(group), 0); 730 WRITE4(sc, HE_REGO_IRQ_HEAD(group), 0); 731 WRITE4(sc, HE_REGO_IRQ_CNTL(group), 0); 732 WRITE4(sc, HE_REGO_IRQ_DATA(group), 0); 733} 734 735/* 736 * 4.10 Initialize interrupt queues 737 */ 738static void 739hatm_init_irq(struct hatm_softc *sc, struct heirq *q, u_int group) 740{ 741 u_int i; 742 743 if (q->size == 0) { 744 hatm_clear_irq(sc, group); 745 return; 746 } 747 748 q->group = group; 749 q->sc = sc; 750 q->irq = q->mem.base; 751 q->head = 0; 752 q->tailp = q->irq + (q->size - 1); 753 *q->tailp = 0; 754 755 for (i = 0; i < q->size; i++) 756 q->irq[i] = HE_REGM_ITYPE_INVALID; 757 758 WRITE4(sc, HE_REGO_IRQ_BASE(group), q->mem.paddr); 759 WRITE4(sc, HE_REGO_IRQ_HEAD(group), 760 ((q->size - 1) << HE_REGS_IRQ_HEAD_SIZE) | 761 (q->thresh << HE_REGS_IRQ_HEAD_THRESH)); 762 WRITE4(sc, HE_REGO_IRQ_CNTL(group), q->line); 763 WRITE4(sc, HE_REGO_IRQ_DATA(group), 0); 764} 765 766/* 767 * 5.1.3 Initialize connection memory 768 */ 769static void 770hatm_init_cm(struct hatm_softc *sc) 771{ 772 u_int rsra, mlbm, rabr, numbuffs; 773 u_int tsra, tabr, mtpd; 774 u_int n; 775 776 for (n = 0; n < HE_CONFIG_TXMEM; n++) 777 WRITE_TCM4(sc, n, 0); 778 for (n = 0; n < HE_CONFIG_RXMEM; n++) 779 WRITE_RCM4(sc, n, 0); 780 781 numbuffs = sc->r0_numbuffs + sc->r1_numbuffs + sc->tx_numbuffs; 782 783 rsra = 0; 784 mlbm = ((rsra + sc->ifatm.mib.max_vccs * 8) + 0x7ff) & ~0x7ff; 785 rabr = ((mlbm + numbuffs * 2) + 0x7ff) & ~0x7ff; 786 sc->rsrb = ((rabr + 2048) + (2 * sc->ifatm.mib.max_vccs - 1)) & 787 ~(2 * sc->ifatm.mib.max_vccs - 1); 788 789 tsra = 0; 790 sc->tsrb = tsra + sc->ifatm.mib.max_vccs * 8; 791 sc->tsrc = sc->tsrb + sc->ifatm.mib.max_vccs * 4; 792 sc->tsrd = sc->tsrc + sc->ifatm.mib.max_vccs * 2; 793 tabr = sc->tsrd + sc->ifatm.mib.max_vccs * 1; 794 mtpd = ((tabr + 1024) + (16 * sc->ifatm.mib.max_vccs - 1)) & 795 ~(16 * sc->ifatm.mib.max_vccs - 1); 796 797 DBG(sc, ATTACH, ("rsra=%x mlbm=%x rabr=%x rsrb=%x", 798 rsra, mlbm, rabr, sc->rsrb)); 799 DBG(sc, ATTACH, ("tsra=%x tsrb=%x tsrc=%x tsrd=%x tabr=%x mtpd=%x", 800 tsra, sc->tsrb, sc->tsrc, sc->tsrd, tabr, mtpd)); 801 802 WRITE4(sc, HE_REGO_TSRB_BA, sc->tsrb); 803 WRITE4(sc, HE_REGO_TSRC_BA, sc->tsrc); 804 WRITE4(sc, HE_REGO_TSRD_BA, sc->tsrd); 805 WRITE4(sc, HE_REGO_TMABR_BA, tabr); 806 WRITE4(sc, HE_REGO_TPD_BA, mtpd); 807 808 WRITE4(sc, HE_REGO_RCMRSRB_BA, sc->rsrb); 809 WRITE4(sc, HE_REGO_RCMLBM_BA, mlbm); 810 WRITE4(sc, HE_REGO_RCMABR_BA, rabr); 811 812 BARRIER_W(sc); 813} 814 815/* 816 * 5.1.4 Initialize Local buffer Pools 817 */ 818static void 819hatm_init_rx_buffer_pool(struct hatm_softc *sc, 820 u_int num, /* bank */ 821 u_int start, /* start row */ 822 u_int numbuffs /* number of entries */ 823) 824{ 825 u_int row_size; /* bytes per row */ 826 uint32_t row_addr; /* start address of this row */ 827 u_int lbuf_size; /* bytes per lbuf */ 828 u_int lbufs_per_row; /* number of lbufs per memory row */ 829 uint32_t lbufd_index; /* index of lbuf descriptor */ 830 uint32_t lbufd_addr; /* address of lbuf descriptor */ 831 u_int lbuf_row_cnt; /* current lbuf in current row */ 832 uint32_t lbuf_addr; /* address of current buffer */ 833 u_int i; 834 835 row_size = sc->bytes_per_row;; 836 row_addr = start * row_size; 837 lbuf_size = sc->cells_per_lbuf * 48; 838 lbufs_per_row = sc->cells_per_row / sc->cells_per_lbuf; 839 840 /* descriptor index */ 841 lbufd_index = num; 842 843 /* 2 words per entry */ 844 lbufd_addr = READ4(sc, HE_REGO_RCMLBM_BA) + lbufd_index * 2; 845 846 /* write head of queue */ 847 WRITE4(sc, HE_REGO_RLBF_H(num), lbufd_index); 848 849 lbuf_row_cnt = 0; 850 for (i = 0; i < numbuffs; i++) { 851 lbuf_addr = (row_addr + lbuf_row_cnt * lbuf_size) / 32; 852 853 WRITE_RCM4(sc, lbufd_addr, lbuf_addr); 854 855 lbufd_index += 2; 856 WRITE_RCM4(sc, lbufd_addr + 1, lbufd_index); 857 858 if (++lbuf_row_cnt == lbufs_per_row) { 859 lbuf_row_cnt = 0; 860 row_addr += row_size; 861 } 862 863 lbufd_addr += 2 * 2; 864 } 865 866 WRITE4(sc, HE_REGO_RLBF_T(num), lbufd_index - 2); 867 WRITE4(sc, HE_REGO_RLBF_C(num), numbuffs); 868 869 BARRIER_W(sc); 870} 871 872static void 873hatm_init_tx_buffer_pool(struct hatm_softc *sc, 874 u_int start, /* start row */ 875 u_int numbuffs /* number of entries */ 876) 877{ 878 u_int row_size; /* bytes per row */ 879 uint32_t row_addr; /* start address of this row */ 880 u_int lbuf_size; /* bytes per lbuf */ 881 u_int lbufs_per_row; /* number of lbufs per memory row */ 882 uint32_t lbufd_index; /* index of lbuf descriptor */ 883 uint32_t lbufd_addr; /* address of lbuf descriptor */ 884 u_int lbuf_row_cnt; /* current lbuf in current row */ 885 uint32_t lbuf_addr; /* address of current buffer */ 886 u_int i; 887 888 row_size = sc->bytes_per_row;; 889 row_addr = start * row_size; 890 lbuf_size = sc->cells_per_lbuf * 48; 891 lbufs_per_row = sc->cells_per_row / sc->cells_per_lbuf; 892 893 /* descriptor index */ 894 lbufd_index = sc->r0_numbuffs + sc->r1_numbuffs; 895 896 /* 2 words per entry */ 897 lbufd_addr = READ4(sc, HE_REGO_RCMLBM_BA) + lbufd_index * 2; 898 899 /* write head of queue */ 900 WRITE4(sc, HE_REGO_TLBF_H, lbufd_index); 901 902 lbuf_row_cnt = 0; 903 for (i = 0; i < numbuffs; i++) { 904 lbuf_addr = (row_addr + lbuf_row_cnt * lbuf_size) / 32; 905 906 WRITE_RCM4(sc, lbufd_addr, lbuf_addr); 907 lbufd_index++; 908 WRITE_RCM4(sc, lbufd_addr + 1, lbufd_index); 909 910 if (++lbuf_row_cnt == lbufs_per_row) { 911 lbuf_row_cnt = 0; 912 row_addr += row_size; 913 } 914 915 lbufd_addr += 2; 916 } 917 918 WRITE4(sc, HE_REGO_TLBF_T, lbufd_index - 1); 919 BARRIER_W(sc); 920} 921 922/* 923 * 5.1.5 Initialize Intermediate Receive Queues 924 */ 925static void 926hatm_init_imed_queues(struct hatm_softc *sc) 927{ 928 u_int n; 929 930 if (sc->he622) { 931 for (n = 0; n < 8; n++) { 932 WRITE4(sc, HE_REGO_INMQ_S(n), 0x10*n+0x000f); 933 WRITE4(sc, HE_REGO_INMQ_L(n), 0x10*n+0x200f); 934 } 935 } else { 936 for (n = 0; n < 8; n++) { 937 WRITE4(sc, HE_REGO_INMQ_S(n), n); 938 WRITE4(sc, HE_REGO_INMQ_L(n), n+0x8); 939 } 940 } 941} 942 943/* 944 * 5.1.7 Init CS block 945 */ 946static void 947hatm_init_cs_block(struct hatm_softc *sc) 948{ 949 u_int n, i; 950 u_int clkfreg, cellrate, decr, tmp; 951 static const uint32_t erthr[2][5][3] = HE_REGT_CS_ERTHR; 952 static const uint32_t erctl[2][3] = HE_REGT_CS_ERCTL; 953 static const uint32_t erstat[2][2] = HE_REGT_CS_ERSTAT; 954 static const uint32_t rtfwr[2] = HE_REGT_CS_RTFWR; 955 static const uint32_t rtatr[2] = HE_REGT_CS_RTATR; 956 static const uint32_t bwalloc[2][6] = HE_REGT_CS_BWALLOC; 957 static const uint32_t orcf[2][2] = HE_REGT_CS_ORCF; 958 959 /* Clear Rate Controller Start Times and Occupied Flags */ 960 for (n = 0; n < 32; n++) 961 WRITE_MBOX4(sc, HE_REGO_CS_STTIM(n), 0); 962 963 clkfreg = sc->he622 ? HE_622_CLOCK : HE_155_CLOCK; 964 cellrate = sc->he622 ? ATM_RATE_622M : ATM_RATE_155M; 965 decr = cellrate / 32; 966 967 for (n = 0; n < 16; n++) { 968 tmp = clkfreg / cellrate; 969 WRITE_MBOX4(sc, HE_REGO_CS_TGRLD(n), tmp - 1); 970 cellrate -= decr; 971 } 972 973 i = (sc->cells_per_lbuf == 2) ? 0 974 :(sc->cells_per_lbuf == 4) ? 1 975 : 2; 976 977 /* table 5.2 */ 978 WRITE_MBOX4(sc, HE_REGO_CS_ERTHR0, erthr[sc->he622][0][i]); 979 WRITE_MBOX4(sc, HE_REGO_CS_ERTHR1, erthr[sc->he622][1][i]); 980 WRITE_MBOX4(sc, HE_REGO_CS_ERTHR2, erthr[sc->he622][2][i]); 981 WRITE_MBOX4(sc, HE_REGO_CS_ERTHR3, erthr[sc->he622][3][i]); 982 WRITE_MBOX4(sc, HE_REGO_CS_ERTHR4, erthr[sc->he622][4][i]); 983 984 WRITE_MBOX4(sc, HE_REGO_CS_ERCTL0, erctl[sc->he622][0]); 985 WRITE_MBOX4(sc, HE_REGO_CS_ERCTL1, erctl[sc->he622][1]); 986 WRITE_MBOX4(sc, HE_REGO_CS_ERCTL2, erctl[sc->he622][2]); 987 988 WRITE_MBOX4(sc, HE_REGO_CS_ERSTAT0, erstat[sc->he622][0]); 989 WRITE_MBOX4(sc, HE_REGO_CS_ERSTAT1, erstat[sc->he622][1]); 990 991 WRITE_MBOX4(sc, HE_REGO_CS_RTFWR, rtfwr[sc->he622]); 992 WRITE_MBOX4(sc, HE_REGO_CS_RTATR, rtatr[sc->he622]); 993 994 WRITE_MBOX4(sc, HE_REGO_CS_TFBSET, bwalloc[sc->he622][0]); 995 WRITE_MBOX4(sc, HE_REGO_CS_WCRMAX, bwalloc[sc->he622][1]); 996 WRITE_MBOX4(sc, HE_REGO_CS_WCRMIN, bwalloc[sc->he622][2]); 997 WRITE_MBOX4(sc, HE_REGO_CS_WCRINC, bwalloc[sc->he622][3]); 998 WRITE_MBOX4(sc, HE_REGO_CS_WCRDEC, bwalloc[sc->he622][4]); 999 WRITE_MBOX4(sc, HE_REGO_CS_WCRCEIL, bwalloc[sc->he622][5]); 1000 1001 WRITE_MBOX4(sc, HE_REGO_CS_OTPPER, orcf[sc->he622][0]); 1002 WRITE_MBOX4(sc, HE_REGO_CS_OTWPER, orcf[sc->he622][1]); 1003 1004 WRITE_MBOX4(sc, HE_REGO_CS_OTTLIM, 8); 1005 1006 for (n = 0; n < 8; n++) 1007 WRITE_MBOX4(sc, HE_REGO_CS_HGRRT(n), 0); 1008} 1009 1010/* 1011 * 5.1.8 CS Block Connection Memory Initialisation 1012 */ 1013static void 1014hatm_init_cs_block_cm(struct hatm_softc *sc) 1015{ 1016 u_int n, i; 1017 u_int expt, mant, etrm, wcr, ttnrm, tnrm; 1018 uint32_t rate; 1019 uint32_t clkfreq, cellrate, decr; 1020 uint32_t *rg, rtg, val = 0; 1021 uint64_t drate; 1022 u_int buf, buf_limit; 1023 uint32_t base = READ4(sc, HE_REGO_RCMABR_BA); 1024 1025 for (n = 0; n < HE_REGL_CM_GQTBL; n++) 1026 WRITE_RCM4(sc, base + HE_REGO_CM_GQTBL + n, 0); 1027 for (n = 0; n < HE_REGL_CM_RGTBL; n++) 1028 WRITE_RCM4(sc, base + HE_REGO_CM_RGTBL + n, 0); 1029 1030 tnrm = 0; 1031 for (n = 0; n < HE_REGL_CM_TNRMTBL * 4; n++) { 1032 expt = (n >> 5) & 0x1f; 1033 mant = ((n & 0x18) << 4) | 0x7f; 1034 wcr = (1 << expt) * (mant + 512) / 512; 1035 etrm = n & 0x7; 1036 ttnrm = wcr / 10 / (1 << etrm); 1037 if (ttnrm > 255) 1038 ttnrm = 255; 1039 else if(ttnrm < 2) 1040 ttnrm = 2; 1041 tnrm = (tnrm << 8) | (ttnrm & 0xff); 1042 if (n % 4 == 0) 1043 WRITE_RCM4(sc, base + HE_REGO_CM_TNRMTBL + (n/4), tnrm); 1044 } 1045 1046 clkfreq = sc->he622 ? HE_622_CLOCK : HE_155_CLOCK; 1047 buf_limit = 4; 1048 1049 cellrate = sc->he622 ? ATM_RATE_622M : ATM_RATE_155M; 1050 decr = cellrate / 32; 1051 1052 /* compute GRID top row in 1000 * cps */ 1053 for (n = 0; n < 16; n++) { 1054 u_int interval = clkfreq / cellrate; 1055 sc->rate_grid[0][n] = (u_int64_t)clkfreq * 1000 / interval; 1056 cellrate -= decr; 1057 } 1058 1059 /* compute the other rows according to 2.4 */ 1060 for (i = 1; i < 16; i++) 1061 for (n = 0; n < 16; n++) 1062 sc->rate_grid[i][n] = sc->rate_grid[i-1][n] / 1063 ((i < 14) ? 2 : 4); 1064 1065 /* first entry is line rate */ 1066 n = hatm_cps2atmf(sc->he622 ? ATM_RATE_622M : ATM_RATE_155M); 1067 expt = (n >> 9) & 0x1f; 1068 mant = n & 0x1f0; 1069 sc->rate_grid[0][0] = (u_int64_t)(1<<expt) * 1000 * (mant+512) / 512; 1070 1071 /* now build the conversion table - each 32 bit word contains 1072 * two entries - this gives a total of 0x400 16 bit entries. 1073 * This table maps the truncated ATMF rate version into a grid index */ 1074 cellrate = sc->he622 ? ATM_RATE_622M : ATM_RATE_155M; 1075 rg = &sc->rate_grid[15][15]; 1076 1077 for (rate = 0; rate < 2 * HE_REGL_CM_RTGTBL; rate++) { 1078 /* unpack the ATMF rate */ 1079 expt = rate >> 5; 1080 mant = (rate & 0x1f) << 4; 1081 1082 /* get the cell rate - minimum is 10 per second */ 1083 drate = (uint64_t)(1 << expt) * 1000 * (mant + 512) / 512; 1084 if (drate < 10 * 1000) 1085 drate = 10 * 1000; 1086 1087 /* now look up the grid index */ 1088 while (drate >= *rg && rg-- > &sc->rate_grid[0][0]) 1089 ; 1090 rg++; 1091 rtg = rg - &sc->rate_grid[0][0]; 1092 1093 /* now compute the buffer limit */ 1094 buf = drate * sc->tx_numbuffs / (cellrate * 2) / 1000; 1095 if (buf == 0) 1096 buf = 1; 1097 else if (buf > buf_limit) 1098 buf = buf_limit; 1099 1100 /* make value */ 1101 val = (val << 16) | (rtg << 8) | buf; 1102 1103 /* write */ 1104 if (rate % 2 == 1) 1105 WRITE_RCM4(sc, base + HE_REGO_CM_RTGTBL + rate/2, val); 1106 } 1107} 1108 1109/* 1110 * Clear an unused receive group buffer pool 1111 */ 1112static void 1113hatm_clear_rpool(struct hatm_softc *sc, u_int group, u_int large) 1114{ 1115 WRITE4(sc, HE_REGO_RBP_S(large, group), 0); 1116 WRITE4(sc, HE_REGO_RBP_T(large, group), 0); 1117 WRITE4(sc, HE_REGO_RBP_QI(large, group), 1); 1118 WRITE4(sc, HE_REGO_RBP_BL(large, group), 0); 1119} 1120 1121/* 1122 * Initialize a receive group buffer pool 1123 */ 1124static void 1125hatm_init_rpool(struct hatm_softc *sc, struct herbp *q, u_int group, 1126 u_int large) 1127{ 1128 if (q->size == 0) { 1129 hatm_clear_rpool(sc, group, large); 1130 return; 1131 } 1132 1133 bzero(q->mem.base, q->mem.size); 1134 q->rbp = q->mem.base; 1135 q->head = q->tail = 0; 1136 1137 DBG(sc, ATTACH, ("RBP%u%c=0x%lx", group, "SL"[large], 1138 (u_long)q->mem.paddr)); 1139 1140 WRITE4(sc, HE_REGO_RBP_S(large, group), q->mem.paddr); 1141 WRITE4(sc, HE_REGO_RBP_T(large, group), 0); 1142 WRITE4(sc, HE_REGO_RBP_QI(large, group), 1143 ((q->size - 1) << HE_REGS_RBP_SIZE) | 1144 HE_REGM_RBP_INTR_ENB | 1145 (q->thresh << HE_REGS_RBP_THRESH)); 1146 WRITE4(sc, HE_REGO_RBP_BL(large, group), (q->bsize >> 2) & ~1); 1147} 1148 1149/* 1150 * Clear an unused receive buffer return queue 1151 */ 1152static void 1153hatm_clear_rbrq(struct hatm_softc *sc, u_int group) 1154{ 1155 WRITE4(sc, HE_REGO_RBRQ_ST(group), 0); 1156 WRITE4(sc, HE_REGO_RBRQ_H(group), 0); 1157 WRITE4(sc, HE_REGO_RBRQ_Q(group), (1 << HE_REGS_RBRQ_THRESH)); 1158 WRITE4(sc, HE_REGO_RBRQ_I(group), 0); 1159} 1160 1161/* 1162 * Initialize receive buffer return queue 1163 */ 1164static void 1165hatm_init_rbrq(struct hatm_softc *sc, struct herbrq *rq, u_int group) 1166{ 1167 if (rq->size == 0) { 1168 hatm_clear_rbrq(sc, group); 1169 return; 1170 } 1171 1172 rq->rbrq = rq->mem.base; 1173 rq->head = 0; 1174 1175 DBG(sc, ATTACH, ("RBRQ%u=0x%lx", group, (u_long)rq->mem.paddr)); 1176 1177 WRITE4(sc, HE_REGO_RBRQ_ST(group), rq->mem.paddr); 1178 WRITE4(sc, HE_REGO_RBRQ_H(group), 0); 1179 WRITE4(sc, HE_REGO_RBRQ_Q(group), 1180 (rq->thresh << HE_REGS_RBRQ_THRESH) | 1181 ((rq->size - 1) << HE_REGS_RBRQ_SIZE)); 1182 WRITE4(sc, HE_REGO_RBRQ_I(group), 1183 (rq->tout << HE_REGS_RBRQ_TIME) | 1184 (rq->pcnt << HE_REGS_RBRQ_COUNT)); 1185} 1186 1187/* 1188 * Clear an unused transmit buffer return queue N 1189 */ 1190static void 1191hatm_clear_tbrq(struct hatm_softc *sc, u_int group) 1192{ 1193 WRITE4(sc, HE_REGO_TBRQ_B_T(group), 0); 1194 WRITE4(sc, HE_REGO_TBRQ_H(group), 0); 1195 WRITE4(sc, HE_REGO_TBRQ_S(group), 0); 1196 WRITE4(sc, HE_REGO_TBRQ_THRESH(group), 1); 1197} 1198 1199/* 1200 * Initialize transmit buffer return queue N 1201 */ 1202static void 1203hatm_init_tbrq(struct hatm_softc *sc, struct hetbrq *tq, u_int group) 1204{ 1205 if (tq->size == 0) { 1206 hatm_clear_tbrq(sc, group); 1207 return; 1208 } 1209 1210 tq->tbrq = tq->mem.base; 1211 tq->head = 0; 1212 1213 DBG(sc, ATTACH, ("TBRQ%u=0x%lx", group, (u_long)tq->mem.paddr)); 1214 1215 WRITE4(sc, HE_REGO_TBRQ_B_T(group), tq->mem.paddr); 1216 WRITE4(sc, HE_REGO_TBRQ_H(group), 0); 1217 WRITE4(sc, HE_REGO_TBRQ_S(group), tq->size - 1); 1218 WRITE4(sc, HE_REGO_TBRQ_THRESH(group), tq->thresh); 1219} 1220 1221/* 1222 * Initialize TPDRQ 1223 */ 1224static void 1225hatm_init_tpdrq(struct hatm_softc *sc) 1226{ 1227 struct hetpdrq *tq; 1228 1229 tq = &sc->tpdrq; 1230 tq->tpdrq = tq->mem.base; 1231 tq->tail = tq->head = 0; 1232 1233 DBG(sc, ATTACH, ("TPDRQ=0x%lx", (u_long)tq->mem.paddr)); 1234 1235 WRITE4(sc, HE_REGO_TPDRQ_H, tq->mem.paddr); 1236 WRITE4(sc, HE_REGO_TPDRQ_T, 0); 1237 WRITE4(sc, HE_REGO_TPDRQ_S, tq->size - 1); 1238} 1239 1240/* 1241 * Function can be called by the infrastructure to start the card. 1242 */ 1243static void 1244hatm_init(void *p) 1245{ 1246 struct hatm_softc *sc = p; 1247 1248 mtx_lock(&sc->mtx); 1249 hatm_stop(sc); 1250 hatm_initialize(sc); 1251 mtx_unlock(&sc->mtx); 1252} 1253 1254enum { 1255 CTL_ISTATS, 1256}; 1257 1258/* 1259 * Sysctl handler 1260 */ 1261static int 1262hatm_sysctl(SYSCTL_HANDLER_ARGS) 1263{ 1264 struct hatm_softc *sc = arg1; 1265 uint32_t *ret; 1266 int error; 1267 size_t len; 1268 1269 switch (arg2) { 1270 1271 case CTL_ISTATS: 1272 len = sizeof(sc->istats); 1273 break; 1274 1275 default: 1276 panic("bad control code"); 1277 } 1278 1279 ret = malloc(len, M_TEMP, M_WAITOK); 1280 mtx_lock(&sc->mtx); 1281 1282 switch (arg2) { 1283 1284 case CTL_ISTATS: 1285 sc->istats.mcc += READ4(sc, HE_REGO_MCC); 1286 sc->istats.oec += READ4(sc, HE_REGO_OEC); 1287 sc->istats.dcc += READ4(sc, HE_REGO_DCC); 1288 sc->istats.cec += READ4(sc, HE_REGO_CEC); 1289 bcopy(&sc->istats, ret, sizeof(sc->istats)); 1290 break; 1291 } 1292 mtx_unlock(&sc->mtx); 1293 1294 error = SYSCTL_OUT(req, ret, len); 1295 free(ret, M_TEMP); 1296 1297 return (error); 1298} 1299 1300static int 1301kenv_getuint(struct hatm_softc *sc, const char *var, 1302 u_int *ptr, u_int def, int rw) 1303{ 1304 char full[IFNAMSIZ + 3 + 20]; 1305 char *val, *end; 1306 u_int u; 1307 1308 *ptr = def; 1309 1310 if (SYSCTL_ADD_UINT(&sc->sysctl_ctx, SYSCTL_CHILDREN(sc->sysctl_tree), 1311 OID_AUTO, var, rw ? CTLFLAG_RW : CTLFLAG_RD, ptr, 0, "") == NULL) 1312 return (ENOMEM); 1313 1314 snprintf(full, sizeof(full), "hw.%s.%s", 1315 device_get_nameunit(sc->dev), var); 1316 1317 if ((val = getenv(full)) == NULL) 1318 return (0); 1319 u = strtoul(val, &end, 0); 1320 if (end == val || *end != '\0') { 1321 freeenv(val); 1322 return (EINVAL); 1323 } 1324 if (bootverbose) 1325 if_printf(&sc->ifatm.ifnet, "%s=%u\n", full, u); 1326 *ptr = u; 1327 return (0); 1328} 1329 1330/* 1331 * Set configurable parameters. Many of these are configurable via 1332 * kenv. 1333 */ 1334static int 1335hatm_configure(struct hatm_softc *sc) 1336{ 1337 /* Receive buffer pool 0 small */ 1338 kenv_getuint(sc, "rbps0_size", &sc->rbp_s0.size, 1339 HE_CONFIG_RBPS0_SIZE, 0); 1340 kenv_getuint(sc, "rbps0_thresh", &sc->rbp_s0.thresh, 1341 HE_CONFIG_RBPS0_THRESH, 0); 1342 sc->rbp_s0.bsize = MBUF0_SIZE; 1343 1344 /* Receive buffer pool 0 large */ 1345 kenv_getuint(sc, "rbpl0_size", &sc->rbp_l0.size, 1346 HE_CONFIG_RBPL0_SIZE, 0); 1347 kenv_getuint(sc, "rbpl0_thresh", &sc->rbp_l0.thresh, 1348 HE_CONFIG_RBPL0_THRESH, 0); 1349 sc->rbp_l0.bsize = MCLBYTES - MBUFL_OFFSET; 1350 1351 /* Receive buffer return queue 0 */ 1352 kenv_getuint(sc, "rbrq0_size", &sc->rbrq_0.size, 1353 HE_CONFIG_RBRQ0_SIZE, 0); 1354 kenv_getuint(sc, "rbrq0_thresh", &sc->rbrq_0.thresh, 1355 HE_CONFIG_RBRQ0_THRESH, 0); 1356 kenv_getuint(sc, "rbrq0_tout", &sc->rbrq_0.tout, 1357 HE_CONFIG_RBRQ0_TOUT, 0); 1358 kenv_getuint(sc, "rbrq0_pcnt", &sc->rbrq_0.pcnt, 1359 HE_CONFIG_RBRQ0_PCNT, 0); 1360 1361 /* Receive buffer pool 1 small */ 1362 kenv_getuint(sc, "rbps1_size", &sc->rbp_s1.size, 1363 HE_CONFIG_RBPS1_SIZE, 0); 1364 kenv_getuint(sc, "rbps1_thresh", &sc->rbp_s1.thresh, 1365 HE_CONFIG_RBPS1_THRESH, 0); 1366 sc->rbp_s1.bsize = MBUF1_SIZE; 1367 1368 /* Receive buffer return queue 1 */ 1369 kenv_getuint(sc, "rbrq1_size", &sc->rbrq_1.size, 1370 HE_CONFIG_RBRQ1_SIZE, 0); 1371 kenv_getuint(sc, "rbrq1_thresh", &sc->rbrq_1.thresh, 1372 HE_CONFIG_RBRQ1_THRESH, 0); 1373 kenv_getuint(sc, "rbrq1_tout", &sc->rbrq_1.tout, 1374 HE_CONFIG_RBRQ1_TOUT, 0); 1375 kenv_getuint(sc, "rbrq1_pcnt", &sc->rbrq_1.pcnt, 1376 HE_CONFIG_RBRQ1_PCNT, 0); 1377 1378 /* Interrupt queue 0 */ 1379 kenv_getuint(sc, "irq0_size", &sc->irq_0.size, 1380 HE_CONFIG_IRQ0_SIZE, 0); 1381 kenv_getuint(sc, "irq0_thresh", &sc->irq_0.thresh, 1382 HE_CONFIG_IRQ0_THRESH, 0); 1383 sc->irq_0.line = HE_CONFIG_IRQ0_LINE; 1384 1385 /* Transmit buffer return queue 0 */ 1386 kenv_getuint(sc, "tbrq0_size", &sc->tbrq.size, 1387 HE_CONFIG_TBRQ_SIZE, 0); 1388 kenv_getuint(sc, "tbrq0_thresh", &sc->tbrq.thresh, 1389 HE_CONFIG_TBRQ_THRESH, 0); 1390 1391 /* Transmit buffer ready queue */ 1392 kenv_getuint(sc, "tpdrq_size", &sc->tpdrq.size, 1393 HE_CONFIG_TPDRQ_SIZE, 0); 1394 /* Max TPDs per VCC */ 1395 kenv_getuint(sc, "tpdmax", &sc->max_tpd, 1396 HE_CONFIG_TPD_MAXCC, 0); 1397 1398 /* external mbuf pages */ 1399 kenv_getuint(sc, "max_mbuf_pages", &sc->mbuf_max_pages, 1400 HE_CONFIG_MAX_MBUF_PAGES, 0); 1401 1402 return (0); 1403} 1404 1405#ifdef HATM_DEBUG 1406 1407/* 1408 * Get TSRs from connection memory 1409 */ 1410static int 1411hatm_sysctl_tsr(SYSCTL_HANDLER_ARGS) 1412{ 1413 struct hatm_softc *sc = arg1; 1414 int error, i, j; 1415 uint32_t *val; 1416 1417 val = malloc(sizeof(uint32_t) * HE_MAX_VCCS * 15, M_TEMP, M_WAITOK); 1418 1419 mtx_lock(&sc->mtx); 1420 for (i = 0; i < HE_MAX_VCCS; i++) 1421 for (j = 0; j <= 14; j++) 1422 val[15 * i + j] = READ_TSR(sc, i, j); 1423 mtx_unlock(&sc->mtx); 1424 1425 error = SYSCTL_OUT(req, val, sizeof(uint32_t) * HE_MAX_VCCS * 15); 1426 free(val, M_TEMP); 1427 if (error != 0 || req->newptr == NULL) 1428 return (error); 1429 1430 return (EPERM); 1431} 1432 1433/* 1434 * Get TPDs from connection memory 1435 */ 1436static int 1437hatm_sysctl_tpd(SYSCTL_HANDLER_ARGS) 1438{ 1439 struct hatm_softc *sc = arg1; 1440 int error, i, j; 1441 uint32_t *val; 1442 1443 val = malloc(sizeof(uint32_t) * HE_MAX_VCCS * 16, M_TEMP, M_WAITOK); 1444 1445 mtx_lock(&sc->mtx); 1446 for (i = 0; i < HE_MAX_VCCS; i++) 1447 for (j = 0; j < 16; j++) 1448 val[16 * i + j] = READ_TCM4(sc, 16 * i + j); 1449 mtx_unlock(&sc->mtx); 1450 1451 error = SYSCTL_OUT(req, val, sizeof(uint32_t) * HE_MAX_VCCS * 16); 1452 free(val, M_TEMP); 1453 if (error != 0 || req->newptr == NULL) 1454 return (error); 1455 1456 return (EPERM); 1457} 1458 1459/* 1460 * Get mbox registers 1461 */ 1462static int 1463hatm_sysctl_mbox(SYSCTL_HANDLER_ARGS) 1464{ 1465 struct hatm_softc *sc = arg1; 1466 int error, i; 1467 uint32_t *val; 1468 1469 val = malloc(sizeof(uint32_t) * HE_REGO_CS_END, M_TEMP, M_WAITOK); 1470 1471 mtx_lock(&sc->mtx); 1472 for (i = 0; i < HE_REGO_CS_END; i++) 1473 val[i] = READ_MBOX4(sc, i); 1474 mtx_unlock(&sc->mtx); 1475 1476 error = SYSCTL_OUT(req, val, sizeof(uint32_t) * HE_REGO_CS_END); 1477 free(val, M_TEMP); 1478 if (error != 0 || req->newptr == NULL) 1479 return (error); 1480 1481 return (EPERM); 1482} 1483 1484/* 1485 * Get connection memory 1486 */ 1487static int 1488hatm_sysctl_cm(SYSCTL_HANDLER_ARGS) 1489{ 1490 struct hatm_softc *sc = arg1; 1491 int error, i; 1492 uint32_t *val; 1493 1494 val = malloc(sizeof(uint32_t) * (HE_CONFIG_RXMEM + 1), M_TEMP, M_WAITOK); 1495 1496 mtx_lock(&sc->mtx); 1497 val[0] = READ4(sc, HE_REGO_RCMABR_BA); 1498 for (i = 0; i < HE_CONFIG_RXMEM; i++) 1499 val[i + 1] = READ_RCM4(sc, i); 1500 mtx_unlock(&sc->mtx); 1501 1502 error = SYSCTL_OUT(req, val, sizeof(uint32_t) * (HE_CONFIG_RXMEM + 1)); 1503 free(val, M_TEMP); 1504 if (error != 0 || req->newptr == NULL) 1505 return (error); 1506 1507 return (EPERM); 1508} 1509 1510/* 1511 * Get local buffer memory 1512 */ 1513static int 1514hatm_sysctl_lbmem(SYSCTL_HANDLER_ARGS) 1515{ 1516 struct hatm_softc *sc = arg1; 1517 int error, i; 1518 uint32_t *val; 1519 u_int bytes = (1 << 21); 1520 1521 val = malloc(bytes, M_TEMP, M_WAITOK); 1522 1523 mtx_lock(&sc->mtx); 1524 for (i = 0; i < bytes / 4; i++) 1525 val[i] = READ_LB4(sc, i); 1526 mtx_unlock(&sc->mtx); 1527 1528 error = SYSCTL_OUT(req, val, bytes); 1529 free(val, M_TEMP); 1530 if (error != 0 || req->newptr == NULL) 1531 return (error); 1532 1533 return (EPERM); 1534} 1535 1536/* 1537 * Get all card registers 1538 */ 1539static int 1540hatm_sysctl_heregs(SYSCTL_HANDLER_ARGS) 1541{ 1542 struct hatm_softc *sc = arg1; 1543 int error, i; 1544 uint32_t *val; 1545 1546 val = malloc(HE_REGO_END, M_TEMP, M_WAITOK); 1547 1548 mtx_lock(&sc->mtx); 1549 for (i = 0; i < HE_REGO_END; i += 4) 1550 val[i / 4] = READ4(sc, i); 1551 mtx_unlock(&sc->mtx); 1552 1553 error = SYSCTL_OUT(req, val, HE_REGO_END); 1554 free(val, M_TEMP); 1555 if (error != 0 || req->newptr == NULL) 1556 return (error); 1557 1558 return (EPERM); 1559} 1560#endif 1561 1562/* 1563 * Suni register access 1564 */ 1565/* 1566 * read at most n SUNI registers starting at reg into val 1567 */ 1568static int 1569hatm_utopia_readregs(struct ifatm *ifatm, u_int reg, uint8_t *val, u_int *n) 1570{ 1571 u_int i; 1572 struct hatm_softc *sc = (struct hatm_softc *)ifatm; 1573 1574 if (reg >= (HE_REGO_SUNI_END - HE_REGO_SUNI) / 4) 1575 return (EINVAL); 1576 if (reg + *n > (HE_REGO_SUNI_END - HE_REGO_SUNI) / 4) 1577 *n = reg - (HE_REGO_SUNI_END - HE_REGO_SUNI) / 4; 1578 1579 mtx_assert(&sc->mtx, MA_OWNED); 1580 for (i = 0; i < *n; i++) 1581 val[i] = READ4(sc, HE_REGO_SUNI + 4 * (reg + i)); 1582 1583 return (0); 1584} 1585 1586/* 1587 * change the bits given by mask to them in val in register reg 1588 */ 1589static int 1590hatm_utopia_writereg(struct ifatm *ifatm, u_int reg, u_int mask, u_int val) 1591{ 1592 uint32_t regval; 1593 struct hatm_softc *sc = (struct hatm_softc *)ifatm; 1594 1595 if (reg >= (HE_REGO_SUNI_END - HE_REGO_SUNI) / 4) 1596 return (EINVAL); 1597 1598 mtx_assert(&sc->mtx, MA_OWNED); 1599 regval = READ4(sc, HE_REGO_SUNI + 4 * reg); 1600 regval = (regval & ~mask) | (val & mask); 1601 WRITE4(sc, HE_REGO_SUNI + 4 * reg, regval); 1602 1603 return (0); 1604} 1605 1606static struct utopia_methods hatm_utopia_methods = { 1607 hatm_utopia_readregs, 1608 hatm_utopia_writereg, 1609}; 1610 1611/* 1612 * Detach - if it is running, stop. Destroy. 1613 */ 1614static int 1615hatm_detach(device_t dev) 1616{ 1617 struct hatm_softc *sc = (struct hatm_softc *)device_get_softc(dev); 1618 1619 mtx_lock(&sc->mtx); 1620 hatm_stop(sc); 1621 if (sc->utopia.state & UTP_ST_ATTACHED) { 1622 utopia_stop(&sc->utopia); 1623 utopia_detach(&sc->utopia); 1624 } 1625 mtx_unlock(&sc->mtx); 1626 1627 atm_ifdetach(&sc->ifatm.ifnet); 1628 1629 hatm_destroy(sc); 1630 1631 return (0); 1632} 1633 1634/* 1635 * Attach to the device. Assume that no locking is needed here. 1636 * All resource we allocate here are freed by calling hatm_destroy. 1637 */ 1638static int 1639hatm_attach(device_t dev) 1640{ 1641 struct hatm_softc *sc; 1642 int unit; 1643 int error; 1644 uint32_t v; 1645 struct ifnet *ifp; 1646 1647 sc = device_get_softc(dev); 1648 unit = device_get_unit(dev); 1649 1650 sc->dev = dev; 1651 sc->ifatm.mib.device = ATM_DEVICE_HE155; 1652 sc->ifatm.mib.serial = 0; 1653 sc->ifatm.mib.hw_version = 0; 1654 sc->ifatm.mib.sw_version = 0; 1655 sc->ifatm.mib.vpi_bits = HE_CONFIG_VPI_BITS; 1656 sc->ifatm.mib.vci_bits = HE_CONFIG_VCI_BITS; 1657 sc->ifatm.mib.max_vpcs = 0; 1658 sc->ifatm.mib.max_vccs = HE_MAX_VCCS; 1659 sc->ifatm.mib.media = IFM_ATM_UNKNOWN; 1660 sc->he622 = 0; 1661 sc->ifatm.phy = &sc->utopia; 1662 1663 SLIST_INIT(&sc->tpd_free); 1664 1665 mtx_init(&sc->mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK, MTX_DEF); 1666 cv_init(&sc->vcc_cv, "HEVCCcv"); 1667 cv_init(&sc->cv_rcclose, "RCClose"); 1668 1669 sysctl_ctx_init(&sc->sysctl_ctx); 1670 1671 /* 1672 * 4.2 BIOS Configuration 1673 */ 1674 v = pci_read_config(dev, PCIR_COMMAND, 2); 1675 v |= PCIM_CMD_MEMEN | PCIM_CMD_BUSMASTEREN | PCIM_CMD_MWRICEN; 1676 pci_write_config(dev, PCIR_COMMAND, v, 2); 1677 1678 /* 1679 * 4.3 PCI Bus Controller-Specific Initialisation 1680 */ 1681 v = pci_read_config(dev, HE_PCIR_GEN_CNTL_0, 4); 1682 v |= HE_PCIM_CTL0_MRL | HE_PCIM_CTL0_MRM | HE_PCIM_CTL0_IGNORE_TIMEOUT; 1683#if BYTE_ORDER == BIG_ENDIAN && 0 1684 v |= HE_PCIM_CTL0_BIGENDIAN; 1685#endif 1686 pci_write_config(dev, HE_PCIR_GEN_CNTL_0, v, 4); 1687 1688 /* 1689 * Map memory 1690 */ 1691 v = pci_read_config(dev, PCIR_COMMAND, 2); 1692 if (!(v & PCIM_CMD_MEMEN)) { 1693 device_printf(dev, "failed to enable memory\n"); 1694 error = ENXIO; 1695 goto failed; 1696 } 1697 sc->memid = PCIR_BAR(0); 1698 sc->memres = bus_alloc_resource(dev, SYS_RES_MEMORY, &sc->memid, 1699 0, ~0, 1, RF_ACTIVE); 1700 if (sc->memres == NULL) { 1701 device_printf(dev, "could not map memory\n"); 1702 error = ENXIO; 1703 goto failed; 1704 } 1705 sc->memh = rman_get_bushandle(sc->memres); 1706 sc->memt = rman_get_bustag(sc->memres); 1707 1708 /* 1709 * ALlocate a DMA tag for subsequent allocations 1710 */ 1711 if (bus_dma_tag_create(NULL, 1, 0, 1712 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, 1713 NULL, NULL, 1714 BUS_SPACE_MAXSIZE_32BIT, 1, 1715 BUS_SPACE_MAXSIZE_32BIT, 0, 1716 NULL, NULL, &sc->parent_tag)) { 1717 device_printf(dev, "could not allocate DMA tag\n"); 1718 error = ENOMEM; 1719 goto failed; 1720 } 1721 1722 if (bus_dma_tag_create(sc->parent_tag, 1, 0, 1723 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, 1724 NULL, NULL, 1725 MBUF_ALLOC_SIZE, 1, 1726 MBUF_ALLOC_SIZE, 0, 1727 NULL, NULL, &sc->mbuf_tag)) { 1728 device_printf(dev, "could not allocate mbuf DMA tag\n"); 1729 error = ENOMEM; 1730 goto failed; 1731 } 1732 1733 /* 1734 * Allocate a DMA tag for packets to send. Here we have a problem with 1735 * the specification of the maximum number of segments. Theoretically 1736 * this would be the size of the transmit ring - 1 multiplied by 3, 1737 * but this would not work. So make the maximum number of TPDs 1738 * occupied by one packet a configuration parameter. 1739 */ 1740 if (bus_dma_tag_create(NULL, 1, 0, 1741 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, 1742 HE_MAX_PDU, 3 * HE_CONFIG_MAX_TPD_PER_PACKET, HE_MAX_PDU, 0, 1743 NULL, NULL, &sc->tx_tag)) { 1744 device_printf(dev, "could not allocate TX tag\n"); 1745 error = ENOMEM; 1746 goto failed; 1747 } 1748 1749 /* 1750 * Setup the interrupt 1751 */ 1752 sc->irqid = 0; 1753 sc->irqres = bus_alloc_resource(dev, SYS_RES_IRQ, &sc->irqid, 1754 0, ~0, 1, RF_SHAREABLE | RF_ACTIVE); 1755 if (sc->irqres == 0) { 1756 device_printf(dev, "could not allocate irq\n"); 1757 error = ENXIO; 1758 goto failed; 1759 } 1760 1761 ifp = &sc->ifatm.ifnet; 1762 ifp->if_softc = sc; 1763 ifp->if_unit = unit; 1764 ifp->if_name = "hatm"; 1765 1766 /* 1767 * Make the sysctl tree 1768 */ 1769 error = ENOMEM; 1770 if ((sc->sysctl_tree = SYSCTL_ADD_NODE(&sc->sysctl_ctx, 1771 SYSCTL_STATIC_CHILDREN(_hw_atm), OID_AUTO, 1772 device_get_nameunit(dev), CTLFLAG_RD, 0, "")) == NULL) 1773 goto failed; 1774 1775 if (SYSCTL_ADD_PROC(&sc->sysctl_ctx, SYSCTL_CHILDREN(sc->sysctl_tree), 1776 OID_AUTO, "istats", CTLFLAG_RD | CTLTYPE_OPAQUE, sc, CTL_ISTATS, 1777 hatm_sysctl, "LU", "internal statistics") == NULL) 1778 goto failed; 1779 1780#ifdef HATM_DEBUG 1781 if (SYSCTL_ADD_PROC(&sc->sysctl_ctx, SYSCTL_CHILDREN(sc->sysctl_tree), 1782 OID_AUTO, "tsr", CTLFLAG_RD | CTLTYPE_OPAQUE, sc, 0, 1783 hatm_sysctl_tsr, "S", "transmission status registers") == NULL) 1784 goto failed; 1785 1786 if (SYSCTL_ADD_PROC(&sc->sysctl_ctx, SYSCTL_CHILDREN(sc->sysctl_tree), 1787 OID_AUTO, "tpd", CTLFLAG_RD | CTLTYPE_OPAQUE, sc, 0, 1788 hatm_sysctl_tpd, "S", "transmission packet descriptors") == NULL) 1789 goto failed; 1790 1791 if (SYSCTL_ADD_PROC(&sc->sysctl_ctx, SYSCTL_CHILDREN(sc->sysctl_tree), 1792 OID_AUTO, "mbox", CTLFLAG_RD | CTLTYPE_OPAQUE, sc, 0, 1793 hatm_sysctl_mbox, "S", "mbox registers") == NULL) 1794 goto failed; 1795 1796 if (SYSCTL_ADD_PROC(&sc->sysctl_ctx, SYSCTL_CHILDREN(sc->sysctl_tree), 1797 OID_AUTO, "cm", CTLFLAG_RD | CTLTYPE_OPAQUE, sc, 0, 1798 hatm_sysctl_cm, "S", "connection memory") == NULL) 1799 goto failed; 1800 1801 if (SYSCTL_ADD_PROC(&sc->sysctl_ctx, SYSCTL_CHILDREN(sc->sysctl_tree), 1802 OID_AUTO, "heregs", CTLFLAG_RD | CTLTYPE_OPAQUE, sc, 0, 1803 hatm_sysctl_heregs, "S", "card registers") == NULL) 1804 goto failed; 1805 1806 if (SYSCTL_ADD_PROC(&sc->sysctl_ctx, SYSCTL_CHILDREN(sc->sysctl_tree), 1807 OID_AUTO, "lbmem", CTLFLAG_RD | CTLTYPE_OPAQUE, sc, 0, 1808 hatm_sysctl_lbmem, "S", "local memory") == NULL) 1809 goto failed; 1810 1811 kenv_getuint(sc, "debug", &sc->debug, HATM_DEBUG, 1); 1812#endif 1813 1814 /* 1815 * Configure 1816 */ 1817 if ((error = hatm_configure(sc)) != 0) 1818 goto failed; 1819 1820 /* 1821 * Compute memory parameters 1822 */ 1823 if (sc->rbp_s0.size != 0) { 1824 sc->rbp_s0.mask = (sc->rbp_s0.size - 1) << 3; 1825 sc->rbp_s0.mem.size = sc->rbp_s0.size * 8; 1826 sc->rbp_s0.mem.align = sc->rbp_s0.mem.size; 1827 } 1828 if (sc->rbp_l0.size != 0) { 1829 sc->rbp_l0.mask = (sc->rbp_l0.size - 1) << 3; 1830 sc->rbp_l0.mem.size = sc->rbp_l0.size * 8; 1831 sc->rbp_l0.mem.align = sc->rbp_l0.mem.size; 1832 } 1833 if (sc->rbp_s1.size != 0) { 1834 sc->rbp_s1.mask = (sc->rbp_s1.size - 1) << 3; 1835 sc->rbp_s1.mem.size = sc->rbp_s1.size * 8; 1836 sc->rbp_s1.mem.align = sc->rbp_s1.mem.size; 1837 } 1838 if (sc->rbrq_0.size != 0) { 1839 sc->rbrq_0.mem.size = sc->rbrq_0.size * 8; 1840 sc->rbrq_0.mem.align = sc->rbrq_0.mem.size; 1841 } 1842 if (sc->rbrq_1.size != 0) { 1843 sc->rbrq_1.mem.size = sc->rbrq_1.size * 8; 1844 sc->rbrq_1.mem.align = sc->rbrq_1.mem.size; 1845 } 1846 1847 sc->irq_0.mem.size = sc->irq_0.size * sizeof(uint32_t); 1848 sc->irq_0.mem.align = 4 * 1024; 1849 1850 sc->tbrq.mem.size = sc->tbrq.size * 4; 1851 sc->tbrq.mem.align = 2 * sc->tbrq.mem.size; /* ZZZ */ 1852 1853 sc->tpdrq.mem.size = sc->tpdrq.size * 8; 1854 sc->tpdrq.mem.align = sc->tpdrq.mem.size; 1855 1856 sc->hsp_mem.size = sizeof(struct he_hsp); 1857 sc->hsp_mem.align = 1024; 1858 1859 sc->lbufs_size = sc->rbp_l0.size + sc->rbrq_0.size; 1860 sc->tpd_total = sc->tbrq.size + sc->tpdrq.size; 1861 sc->tpds.align = 64; 1862 sc->tpds.size = sc->tpd_total * HE_TPD_SIZE; 1863 1864 hatm_init_rmaps(sc); 1865 hatm_init_smbufs(sc); 1866 if ((error = hatm_init_tpds(sc)) != 0) 1867 goto failed; 1868 1869 /* 1870 * Allocate memory 1871 */ 1872 if ((error = hatm_alloc_dmamem(sc, "IRQ", &sc->irq_0.mem)) != 0 || 1873 (error = hatm_alloc_dmamem(sc, "TBRQ0", &sc->tbrq.mem)) != 0 || 1874 (error = hatm_alloc_dmamem(sc, "TPDRQ", &sc->tpdrq.mem)) != 0 || 1875 (error = hatm_alloc_dmamem(sc, "HSP", &sc->hsp_mem)) != 0) 1876 goto failed; 1877 1878 if (sc->rbp_s0.mem.size != 0 && 1879 (error = hatm_alloc_dmamem(sc, "RBPS0", &sc->rbp_s0.mem))) 1880 goto failed; 1881 if (sc->rbp_l0.mem.size != 0 && 1882 (error = hatm_alloc_dmamem(sc, "RBPL0", &sc->rbp_l0.mem))) 1883 goto failed; 1884 if (sc->rbp_s1.mem.size != 0 && 1885 (error = hatm_alloc_dmamem(sc, "RBPS1", &sc->rbp_s1.mem))) 1886 goto failed; 1887 1888 if (sc->rbrq_0.mem.size != 0 && 1889 (error = hatm_alloc_dmamem(sc, "RBRQ0", &sc->rbrq_0.mem))) 1890 goto failed; 1891 if (sc->rbrq_1.mem.size != 0 && 1892 (error = hatm_alloc_dmamem(sc, "RBRQ1", &sc->rbrq_1.mem))) 1893 goto failed; 1894 1895 if ((sc->vcc_zone = uma_zcreate("HE vccs", sizeof(struct hevcc), 1896 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0)) == NULL) { 1897 device_printf(dev, "cannot allocate zone for vccs\n"); 1898 goto failed; 1899 } 1900 1901 /* 1902 * 4.4 Reset the card. 1903 */ 1904 if ((error = hatm_reset(sc)) != 0) 1905 goto failed; 1906 1907 /* 1908 * Read the prom. 1909 */ 1910 hatm_init_bus_width(sc); 1911 hatm_init_read_eeprom(sc); 1912 hatm_init_endianess(sc); 1913 1914 /* 1915 * Initialize interface 1916 */ 1917 ifp->if_flags = IFF_SIMPLEX; 1918 ifp->if_ioctl = hatm_ioctl; 1919 ifp->if_start = hatm_start; 1920 ifp->if_watchdog = NULL; 1921 ifp->if_init = hatm_init; 1922 1923 utopia_attach(&sc->utopia, &sc->ifatm, &sc->media, &sc->mtx, 1924 &sc->sysctl_ctx, SYSCTL_CHILDREN(sc->sysctl_tree), 1925 &hatm_utopia_methods); 1926 utopia_init_media(&sc->utopia); 1927 1928 /* these two SUNI routines need the lock */ 1929 mtx_lock(&sc->mtx); 1930 /* poll while we are not running */ 1931 sc->utopia.flags |= UTP_FL_POLL_CARRIER; 1932 utopia_start(&sc->utopia); 1933 utopia_reset(&sc->utopia); 1934 mtx_unlock(&sc->mtx); 1935 1936 atm_ifattach(ifp); 1937 1938#ifdef ENABLE_BPF 1939 bpfattach(ifp, DLT_ATM_RFC1483, sizeof(struct atmllc)); 1940#endif 1941 1942 error = bus_setup_intr(dev, sc->irqres, INTR_TYPE_NET, hatm_intr, 1943 &sc->irq_0, &sc->ih); 1944 if (error != 0) { 1945 device_printf(dev, "could not setup interrupt\n"); 1946 hatm_detach(dev); 1947 return (error); 1948 } 1949 1950 return (0); 1951 1952 failed: 1953 hatm_destroy(sc); 1954 return (error); 1955} 1956 1957/* 1958 * Start the interface. Assume a state as from attach(). 1959 */ 1960void 1961hatm_initialize(struct hatm_softc *sc) 1962{ 1963 uint32_t v; 1964 u_int cid; 1965 static const u_int layout[2][7] = HE_CONFIG_MEM_LAYOUT; 1966 1967 if (sc->ifatm.ifnet.if_flags & IFF_RUNNING) 1968 return; 1969 1970 hatm_init_bus_width(sc); 1971 hatm_init_endianess(sc); 1972 1973 if_printf(&sc->ifatm.ifnet, "%s, Rev. %s, S/N %u, " 1974 "MAC=%02x:%02x:%02x:%02x:%02x:%02x (%ubit PCI)\n", 1975 sc->prod_id, sc->rev, sc->ifatm.mib.serial, 1976 sc->ifatm.mib.esi[0], sc->ifatm.mib.esi[1], sc->ifatm.mib.esi[2], 1977 sc->ifatm.mib.esi[3], sc->ifatm.mib.esi[4], sc->ifatm.mib.esi[5], 1978 sc->pci64 ? 64 : 32); 1979 1980 /* 1981 * 4.8 SDRAM Controller Initialisation 1982 * 4.9 Initialize RNUM value 1983 */ 1984 if (sc->he622) 1985 WRITE4(sc, HE_REGO_SDRAM_CNTL, HE_REGM_SDRAM_64BIT); 1986 else 1987 WRITE4(sc, HE_REGO_SDRAM_CNTL, 0); 1988 BARRIER_W(sc); 1989 1990 v = READ4(sc, HE_REGO_LB_SWAP); 1991 BARRIER_R(sc); 1992 v |= 0xf << HE_REGS_LBSWAP_RNUM; 1993 WRITE4(sc, HE_REGO_LB_SWAP, v); 1994 BARRIER_W(sc); 1995 1996 hatm_init_irq(sc, &sc->irq_0, 0); 1997 hatm_clear_irq(sc, 1); 1998 hatm_clear_irq(sc, 2); 1999 hatm_clear_irq(sc, 3); 2000 2001 WRITE4(sc, HE_REGO_GRP_1_0_MAP, 0); 2002 WRITE4(sc, HE_REGO_GRP_3_2_MAP, 0); 2003 WRITE4(sc, HE_REGO_GRP_5_4_MAP, 0); 2004 WRITE4(sc, HE_REGO_GRP_7_6_MAP, 0); 2005 BARRIER_W(sc); 2006 2007 /* 2008 * 4.11 Enable PCI Bus Controller State Machine 2009 */ 2010 v = READ4(sc, HE_REGO_HOST_CNTL); 2011 BARRIER_R(sc); 2012 v |= HE_REGM_HOST_OUTFF_ENB | HE_REGM_HOST_CMDFF_ENB | 2013 HE_REGM_HOST_QUICK_RD | HE_REGM_HOST_QUICK_WR; 2014 WRITE4(sc, HE_REGO_HOST_CNTL, v); 2015 BARRIER_W(sc); 2016 2017 /* 2018 * 5.1.1 Generic configuration state 2019 */ 2020 sc->cells_per_row = layout[sc->he622][0]; 2021 sc->bytes_per_row = layout[sc->he622][1]; 2022 sc->r0_numrows = layout[sc->he622][2]; 2023 sc->tx_numrows = layout[sc->he622][3]; 2024 sc->r1_numrows = layout[sc->he622][4]; 2025 sc->r0_startrow = layout[sc->he622][5]; 2026 sc->tx_startrow = sc->r0_startrow + sc->r0_numrows; 2027 sc->r1_startrow = sc->tx_startrow + sc->tx_numrows; 2028 sc->cells_per_lbuf = layout[sc->he622][6]; 2029 2030 sc->r0_numbuffs = sc->r0_numrows * (sc->cells_per_row / 2031 sc->cells_per_lbuf); 2032 sc->r1_numbuffs = sc->r1_numrows * (sc->cells_per_row / 2033 sc->cells_per_lbuf); 2034 sc->tx_numbuffs = sc->tx_numrows * (sc->cells_per_row / 2035 sc->cells_per_lbuf); 2036 2037 if (sc->r0_numbuffs > 2560) 2038 sc->r0_numbuffs = 2560; 2039 if (sc->r1_numbuffs > 2560) 2040 sc->r1_numbuffs = 2560; 2041 if (sc->tx_numbuffs > 5120) 2042 sc->tx_numbuffs = 5120; 2043 2044 DBG(sc, ATTACH, ("cells_per_row=%u bytes_per_row=%u r0_numrows=%u " 2045 "tx_numrows=%u r1_numrows=%u r0_startrow=%u tx_startrow=%u " 2046 "r1_startrow=%u cells_per_lbuf=%u\nr0_numbuffs=%u r1_numbuffs=%u " 2047 "tx_numbuffs=%u\n", sc->cells_per_row, sc->bytes_per_row, 2048 sc->r0_numrows, sc->tx_numrows, sc->r1_numrows, sc->r0_startrow, 2049 sc->tx_startrow, sc->r1_startrow, sc->cells_per_lbuf, 2050 sc->r0_numbuffs, sc->r1_numbuffs, sc->tx_numbuffs)); 2051 2052 /* 2053 * 5.1.2 Configure Hardware dependend registers 2054 */ 2055 if (sc->he622) { 2056 WRITE4(sc, HE_REGO_LBARB, 2057 (0x2 << HE_REGS_LBARB_SLICE) | 2058 (0xf << HE_REGS_LBARB_RNUM) | 2059 (0x3 << HE_REGS_LBARB_THPRI) | 2060 (0x3 << HE_REGS_LBARB_RHPRI) | 2061 (0x2 << HE_REGS_LBARB_TLPRI) | 2062 (0x1 << HE_REGS_LBARB_RLPRI) | 2063 (0x28 << HE_REGS_LBARB_BUS_MULT) | 2064 (0x50 << HE_REGS_LBARB_NET_PREF)); 2065 BARRIER_W(sc); 2066 WRITE4(sc, HE_REGO_SDRAMCON, 2067 /* HW bug: don't use banking */ 2068 /* HE_REGM_SDRAMCON_BANK | */ 2069 HE_REGM_SDRAMCON_WIDE | 2070 (0x384 << HE_REGS_SDRAMCON_REF)); 2071 BARRIER_W(sc); 2072 WRITE4(sc, HE_REGO_RCMCONFIG, 2073 (0x1 << HE_REGS_RCMCONFIG_BANK_WAIT) | 2074 (0x1 << HE_REGS_RCMCONFIG_RW_WAIT) | 2075 (0x0 << HE_REGS_RCMCONFIG_TYPE)); 2076 WRITE4(sc, HE_REGO_TCMCONFIG, 2077 (0x2 << HE_REGS_TCMCONFIG_BANK_WAIT) | 2078 (0x1 << HE_REGS_TCMCONFIG_RW_WAIT) | 2079 (0x0 << HE_REGS_TCMCONFIG_TYPE)); 2080 } else { 2081 WRITE4(sc, HE_REGO_LBARB, 2082 (0x2 << HE_REGS_LBARB_SLICE) | 2083 (0xf << HE_REGS_LBARB_RNUM) | 2084 (0x3 << HE_REGS_LBARB_THPRI) | 2085 (0x3 << HE_REGS_LBARB_RHPRI) | 2086 (0x2 << HE_REGS_LBARB_TLPRI) | 2087 (0x1 << HE_REGS_LBARB_RLPRI) | 2088 (0x46 << HE_REGS_LBARB_BUS_MULT) | 2089 (0x8C << HE_REGS_LBARB_NET_PREF)); 2090 BARRIER_W(sc); 2091 WRITE4(sc, HE_REGO_SDRAMCON, 2092 /* HW bug: don't use banking */ 2093 /* HE_REGM_SDRAMCON_BANK | */ 2094 (0x150 << HE_REGS_SDRAMCON_REF)); 2095 BARRIER_W(sc); 2096 WRITE4(sc, HE_REGO_RCMCONFIG, 2097 (0x0 << HE_REGS_RCMCONFIG_BANK_WAIT) | 2098 (0x1 << HE_REGS_RCMCONFIG_RW_WAIT) | 2099 (0x0 << HE_REGS_RCMCONFIG_TYPE)); 2100 WRITE4(sc, HE_REGO_TCMCONFIG, 2101 (0x1 << HE_REGS_TCMCONFIG_BANK_WAIT) | 2102 (0x1 << HE_REGS_TCMCONFIG_RW_WAIT) | 2103 (0x0 << HE_REGS_TCMCONFIG_TYPE)); 2104 } 2105 WRITE4(sc, HE_REGO_LBCONFIG, (sc->cells_per_lbuf * 48)); 2106 2107 WRITE4(sc, HE_REGO_RLBC_H, 0); 2108 WRITE4(sc, HE_REGO_RLBC_T, 0); 2109 WRITE4(sc, HE_REGO_RLBC_H2, 0); 2110 2111 WRITE4(sc, HE_REGO_RXTHRSH, 512); 2112 WRITE4(sc, HE_REGO_LITHRSH, 256); 2113 2114 WRITE4(sc, HE_REGO_RLBF0_C, sc->r0_numbuffs); 2115 WRITE4(sc, HE_REGO_RLBF1_C, sc->r1_numbuffs); 2116 2117 if (sc->he622) { 2118 WRITE4(sc, HE_REGO_RCCONFIG, 2119 (8 << HE_REGS_RCCONFIG_UTDELAY) | 2120 (sc->ifatm.mib.vpi_bits << HE_REGS_RCCONFIG_VP) | 2121 (sc->ifatm.mib.vci_bits << HE_REGS_RCCONFIG_VC)); 2122 WRITE4(sc, HE_REGO_TXCONFIG, 2123 (32 << HE_REGS_TXCONFIG_THRESH) | 2124 (sc->ifatm.mib.vci_bits << HE_REGS_TXCONFIG_VCI_MASK) | 2125 (sc->tx_numbuffs << HE_REGS_TXCONFIG_LBFREE)); 2126 } else { 2127 WRITE4(sc, HE_REGO_RCCONFIG, 2128 (0 << HE_REGS_RCCONFIG_UTDELAY) | 2129 HE_REGM_RCCONFIG_UT_MODE | 2130 (sc->ifatm.mib.vpi_bits << HE_REGS_RCCONFIG_VP) | 2131 (sc->ifatm.mib.vci_bits << HE_REGS_RCCONFIG_VC)); 2132 WRITE4(sc, HE_REGO_TXCONFIG, 2133 (32 << HE_REGS_TXCONFIG_THRESH) | 2134 HE_REGM_TXCONFIG_UTMODE | 2135 (sc->ifatm.mib.vci_bits << HE_REGS_TXCONFIG_VCI_MASK) | 2136 (sc->tx_numbuffs << HE_REGS_TXCONFIG_LBFREE)); 2137 } 2138 2139 WRITE4(sc, HE_REGO_TXAAL5_PROTO, 0); 2140 2141 if (sc->rbp_s1.size != 0) { 2142 WRITE4(sc, HE_REGO_RHCONFIG, 2143 HE_REGM_RHCONFIG_PHYENB | 2144 ((sc->he622 ? 0x41 : 0x31) << HE_REGS_RHCONFIG_PTMR_PRE) | 2145 (1 << HE_REGS_RHCONFIG_OAM_GID)); 2146 } else { 2147 WRITE4(sc, HE_REGO_RHCONFIG, 2148 HE_REGM_RHCONFIG_PHYENB | 2149 ((sc->he622 ? 0x41 : 0x31) << HE_REGS_RHCONFIG_PTMR_PRE) | 2150 (0 << HE_REGS_RHCONFIG_OAM_GID)); 2151 } 2152 BARRIER_W(sc); 2153 2154 hatm_init_cm(sc); 2155 2156 hatm_init_rx_buffer_pool(sc, 0, sc->r0_startrow, sc->r0_numbuffs); 2157 hatm_init_rx_buffer_pool(sc, 1, sc->r1_startrow, sc->r1_numbuffs); 2158 hatm_init_tx_buffer_pool(sc, sc->tx_startrow, sc->tx_numbuffs); 2159 2160 hatm_init_imed_queues(sc); 2161 2162 /* 2163 * 5.1.6 Application tunable Parameters 2164 */ 2165 WRITE4(sc, HE_REGO_MCC, 0); 2166 WRITE4(sc, HE_REGO_OEC, 0); 2167 WRITE4(sc, HE_REGO_DCC, 0); 2168 WRITE4(sc, HE_REGO_CEC, 0); 2169 2170 hatm_init_cs_block(sc); 2171 hatm_init_cs_block_cm(sc); 2172 2173 hatm_init_rpool(sc, &sc->rbp_s0, 0, 0); 2174 hatm_init_rpool(sc, &sc->rbp_l0, 0, 1); 2175 hatm_init_rpool(sc, &sc->rbp_s1, 1, 0); 2176 hatm_clear_rpool(sc, 1, 1); 2177 hatm_clear_rpool(sc, 2, 0); 2178 hatm_clear_rpool(sc, 2, 1); 2179 hatm_clear_rpool(sc, 3, 0); 2180 hatm_clear_rpool(sc, 3, 1); 2181 hatm_clear_rpool(sc, 4, 0); 2182 hatm_clear_rpool(sc, 4, 1); 2183 hatm_clear_rpool(sc, 5, 0); 2184 hatm_clear_rpool(sc, 5, 1); 2185 hatm_clear_rpool(sc, 6, 0); 2186 hatm_clear_rpool(sc, 6, 1); 2187 hatm_clear_rpool(sc, 7, 0); 2188 hatm_clear_rpool(sc, 7, 1); 2189 hatm_init_rbrq(sc, &sc->rbrq_0, 0); 2190 hatm_init_rbrq(sc, &sc->rbrq_1, 1); 2191 hatm_clear_rbrq(sc, 2); 2192 hatm_clear_rbrq(sc, 3); 2193 hatm_clear_rbrq(sc, 4); 2194 hatm_clear_rbrq(sc, 5); 2195 hatm_clear_rbrq(sc, 6); 2196 hatm_clear_rbrq(sc, 7); 2197 2198 sc->lbufs_next = 0; 2199 bzero(sc->lbufs, sizeof(sc->lbufs[0]) * sc->lbufs_size); 2200 2201 hatm_init_tbrq(sc, &sc->tbrq, 0); 2202 hatm_clear_tbrq(sc, 1); 2203 hatm_clear_tbrq(sc, 2); 2204 hatm_clear_tbrq(sc, 3); 2205 hatm_clear_tbrq(sc, 4); 2206 hatm_clear_tbrq(sc, 5); 2207 hatm_clear_tbrq(sc, 6); 2208 hatm_clear_tbrq(sc, 7); 2209 2210 hatm_init_tpdrq(sc); 2211 2212 WRITE4(sc, HE_REGO_UBUFF_BA, (sc->he622 ? 0x104780 : 0x800)); 2213 2214 /* 2215 * Initialize HSP 2216 */ 2217 bzero(sc->hsp_mem.base, sc->hsp_mem.size); 2218 sc->hsp = sc->hsp_mem.base; 2219 WRITE4(sc, HE_REGO_HSP_BA, sc->hsp_mem.paddr); 2220 2221 /* 2222 * 5.1.12 Enable transmit and receive 2223 * Enable bus master and interrupts 2224 */ 2225 v = READ_MBOX4(sc, HE_REGO_CS_ERCTL0); 2226 v |= 0x18000000; 2227 WRITE_MBOX4(sc, HE_REGO_CS_ERCTL0, v); 2228 2229 v = READ4(sc, HE_REGO_RCCONFIG); 2230 v |= HE_REGM_RCCONFIG_RXENB; 2231 WRITE4(sc, HE_REGO_RCCONFIG, v); 2232 2233 v = pci_read_config(sc->dev, HE_PCIR_GEN_CNTL_0, 4); 2234 v |= HE_PCIM_CTL0_INIT_ENB | HE_PCIM_CTL0_INT_PROC_ENB; 2235 pci_write_config(sc->dev, HE_PCIR_GEN_CNTL_0, v, 4); 2236 2237 sc->ifatm.ifnet.if_flags |= IFF_RUNNING; 2238 sc->ifatm.ifnet.if_baudrate = 53 * 8 * sc->ifatm.mib.pcr; 2239 2240 sc->utopia.flags &= ~UTP_FL_POLL_CARRIER; 2241 2242 /* reopen vccs */ 2243 for (cid = 0; cid < HE_MAX_VCCS; cid++) 2244 if (sc->vccs[cid] != NULL) 2245 hatm_load_vc(sc, cid, 1); 2246 2247 ATMEV_SEND_IFSTATE_CHANGED(&sc->ifatm, 2248 sc->utopia.carrier == UTP_CARR_OK); 2249} 2250 2251/* 2252 * This functions stops the card and frees all resources allocated after 2253 * the attach. Must have the global lock. 2254 */ 2255void 2256hatm_stop(struct hatm_softc *sc) 2257{ 2258 uint32_t v; 2259 u_int i, p, cid; 2260 struct mbuf_chunk_hdr *ch; 2261 struct mbuf_page *pg; 2262 2263 mtx_assert(&sc->mtx, MA_OWNED); 2264 2265 if (!(sc->ifatm.ifnet.if_flags & IFF_RUNNING)) 2266 return; 2267 sc->ifatm.ifnet.if_flags &= ~IFF_RUNNING; 2268 2269 ATMEV_SEND_IFSTATE_CHANGED(&sc->ifatm, 2270 sc->utopia.carrier == UTP_CARR_OK); 2271 2272 sc->utopia.flags |= UTP_FL_POLL_CARRIER; 2273 2274 /* 2275 * Stop and reset the hardware so that everything remains 2276 * stable. 2277 */ 2278 v = READ_MBOX4(sc, HE_REGO_CS_ERCTL0); 2279 v &= ~0x18000000; 2280 WRITE_MBOX4(sc, HE_REGO_CS_ERCTL0, v); 2281 2282 v = READ4(sc, HE_REGO_RCCONFIG); 2283 v &= ~HE_REGM_RCCONFIG_RXENB; 2284 WRITE4(sc, HE_REGO_RCCONFIG, v); 2285 2286 WRITE4(sc, HE_REGO_RHCONFIG, (0x2 << HE_REGS_RHCONFIG_PTMR_PRE)); 2287 BARRIER_W(sc); 2288 2289 v = READ4(sc, HE_REGO_HOST_CNTL); 2290 BARRIER_R(sc); 2291 v &= ~(HE_REGM_HOST_OUTFF_ENB | HE_REGM_HOST_CMDFF_ENB); 2292 WRITE4(sc, HE_REGO_HOST_CNTL, v); 2293 BARRIER_W(sc); 2294 2295 /* 2296 * Disable bust master and interrupts 2297 */ 2298 v = pci_read_config(sc->dev, HE_PCIR_GEN_CNTL_0, 4); 2299 v &= ~(HE_PCIM_CTL0_INIT_ENB | HE_PCIM_CTL0_INT_PROC_ENB); 2300 pci_write_config(sc->dev, HE_PCIR_GEN_CNTL_0, v, 4); 2301 2302 (void)hatm_reset(sc); 2303 2304 /* 2305 * Card resets the SUNI when resetted, so re-initialize it 2306 */ 2307 utopia_reset(&sc->utopia); 2308 2309 /* 2310 * Give any waiters on closing a VCC a chance. They will stop 2311 * to wait if they see that IFF_RUNNING disappeared. 2312 */ 2313 while (!(cv_waitq_empty(&sc->vcc_cv))) { 2314 cv_broadcast(&sc->vcc_cv); 2315 DELAY(100); 2316 } 2317 while (!(cv_waitq_empty(&sc->cv_rcclose))) { 2318 cv_broadcast(&sc->cv_rcclose); 2319 } 2320 2321 /* 2322 * Now free all resources. 2323 */ 2324 2325 /* 2326 * Free the large mbufs that are given to the card. 2327 */ 2328 for (i = 0 ; i < sc->lbufs_size; i++) { 2329 if (sc->lbufs[i] != NULL) { 2330 bus_dmamap_unload(sc->mbuf_tag, sc->rmaps[i]); 2331 m_freem(sc->lbufs[i]); 2332 sc->lbufs[i] = NULL; 2333 } 2334 } 2335 2336 /* 2337 * Free small buffers 2338 */ 2339 for (p = 0; p < sc->mbuf_npages; p++) { 2340 pg = sc->mbuf_pages[p]; 2341 for (i = 0; i < pg->hdr.nchunks; i++) { 2342 ch = (struct mbuf_chunk_hdr *) ((char *)pg + 2343 i * pg->hdr.chunksize + pg->hdr.hdroff); 2344 if (ch->flags & MBUF_CARD) { 2345 ch->flags &= ~MBUF_CARD; 2346 ch->flags |= MBUF_USED; 2347 hatm_ext_free(&sc->mbuf_list[pg->hdr.pool], 2348 (struct mbufx_free *)((u_char *)ch - 2349 pg->hdr.hdroff)); 2350 } 2351 } 2352 } 2353 2354 hatm_stop_tpds(sc); 2355 2356 /* 2357 * Free all partial reassembled PDUs on any VCC. 2358 */ 2359 for (cid = 0; cid < HE_MAX_VCCS; cid++) { 2360 if (sc->vccs[cid] != NULL) { 2361 if (sc->vccs[cid]->chain != NULL) { 2362 m_freem(sc->vccs[cid]->chain); 2363 sc->vccs[cid]->chain = NULL; 2364 sc->vccs[cid]->last = NULL; 2365 } 2366 if (!(sc->vccs[cid]->vflags & (HE_VCC_RX_OPEN | 2367 HE_VCC_TX_OPEN))) { 2368 hatm_tx_vcc_closed(sc, cid); 2369 uma_zfree(sc->vcc_zone, sc->vccs[cid]); 2370 sc->vccs[cid] = NULL; 2371 sc->open_vccs--; 2372 } else { 2373 sc->vccs[cid]->vflags = 0; 2374 sc->vccs[cid]->ntpds = 0; 2375 } 2376 } 2377 } 2378 2379 if (sc->rbp_s0.size != 0) 2380 bzero(sc->rbp_s0.mem.base, sc->rbp_s0.mem.size); 2381 if (sc->rbp_l0.size != 0) 2382 bzero(sc->rbp_l0.mem.base, sc->rbp_l0.mem.size); 2383 if (sc->rbp_s1.size != 0) 2384 bzero(sc->rbp_s1.mem.base, sc->rbp_s1.mem.size); 2385 if (sc->rbrq_0.size != 0) 2386 bzero(sc->rbrq_0.mem.base, sc->rbrq_0.mem.size); 2387 if (sc->rbrq_1.size != 0) 2388 bzero(sc->rbrq_1.mem.base, sc->rbrq_1.mem.size); 2389 2390 bzero(sc->tbrq.mem.base, sc->tbrq.mem.size); 2391 bzero(sc->tpdrq.mem.base, sc->tpdrq.mem.size); 2392 bzero(sc->hsp_mem.base, sc->hsp_mem.size); 2393} 2394 2395/************************************************************ 2396 * 2397 * Driver infrastructure 2398 */ 2399devclass_t hatm_devclass; 2400 2401static device_method_t hatm_methods[] = { 2402 DEVMETHOD(device_probe, hatm_probe), 2403 DEVMETHOD(device_attach, hatm_attach), 2404 DEVMETHOD(device_detach, hatm_detach), 2405 {0,0} 2406}; 2407static driver_t hatm_driver = { 2408 "hatm", 2409 hatm_methods, 2410 sizeof(struct hatm_softc), 2411}; 2412DRIVER_MODULE(hatm, pci, hatm_driver, hatm_devclass, NULL, 0); 2413