25 * 26 */ 27 28/* 29 * DEC PDQ FDDI Controller O/S independent code 30 * 31 * This module should work any PDQ based board. Note that changes for 32 * MIPS and Alpha architectures (or any other architecture which requires 33 * a flushing of memory or write buffers and/or has incoherent caches) 34 * have yet to be made. 35 * 36 * However, it is expected that the PDQ_CSR_WRITE macro will cause a 37 * flushing of the write buffers. 38 */ 39 40#define PDQ_HWSUPPORT /* for pdq.h */ 41 42#if defined(__FreeBSD__) 43#include <dev/pdq/pdqvar.h> 44#include <dev/pdq/pdqreg.h> 45#else 46#include "pdqvar.h" 47#include "pdqreg.h" 48#endif 49 50#define PDQ_ROUNDUP(n, x) (((n) + ((x) - 1)) & ~((x) - 1)) 51#define PDQ_CMD_RX_ALIGNMENT 16 52 53#if (defined(PDQTEST) && !defined(PDQ_NOPRINTF)) || defined(PDQVERBOSE) 54#define PDQ_PRINTF(x) printf x 55#else 56#define PDQ_PRINTF(x) do { } while (0) 57#endif 58 59static const char * const pdq_halt_codes[] = { 60 "Selftest Timeout", "Host Bus Parity Error", "Host Directed Fault", 61 "Software Fault", "Hardware Fault", "PC Trace Path Test", 62 "DMA Error", "Image CRC Error", "Adapter Processer Error" 63}; 64 65static const char * const pdq_adapter_states[] = { 66 "Reset", "Upgrade", "DMA Unavailable", "DMA Available", 67 "Link Available", "Link Unavailable", "Halted", "Ring Member" 68}; 69 70/* 71 * The following are used in conjunction with 72 * unsolicited events 73 */ 74static const char * const pdq_entities[] = { 75 "Station", "Link", "Phy Port" 76}; 77 78static const char * const pdq_station_events[] = { 79 "Trace Received" 80}; 81 82static const char * const pdq_station_arguments[] = { 83 "Reason" 84}; 85 86static const char * const pdq_link_events[] = { 87 "Transmit Underrun", 88 "Transmit Failed", 89 "Block Check Error (CRC)", 90 "Frame Status Error", 91 "PDU Length Error", 92 NULL, 93 NULL, 94 "Receive Data Overrun", 95 NULL, 96 "No User Buffer", 97 "Ring Initialization Initiated", 98 "Ring Initialization Received", 99 "Ring Beacon Initiated", 100 "Duplicate Address Failure", 101 "Duplicate Token Detected", 102 "Ring Purger Error", 103 "FCI Strip Error", 104 "Trace Initiated", 105 "Directed Beacon Received", 106}; 107 108static const char * const pdq_link_arguments[] = { 109 "Reason", 110 "Data Link Header", 111 "Source", 112 "Upstream Neighbor" 113}; 114 115static const char * const pdq_phy_events[] = { 116 "LEM Error Monitor Reject", 117 "Elasticy Buffer Error", 118 "Link Confidence Test Reject" 119}; 120 121static const char * const pdq_phy_arguments[] = { 122 "Direction" 123}; 124 125static const char * const * const pdq_event_arguments[] = { 126 pdq_station_arguments, 127 pdq_link_arguments, 128 pdq_phy_arguments 129}; 130 131static const char * const * const pdq_event_codes[] = { 132 pdq_station_events, 133 pdq_link_events, 134 pdq_phy_events 135}; 136 137static const char * const pdq_station_types[] = { 138 "SAS", "DAC", "SAC", "NAC", "DAS" 139}; 140 141static const char * const pdq_smt_versions[] = { "", "V6.2", "V7.2", "V7.3" }; 142 143static const char pdq_phy_types[] = "ABSM"; 144 145static const char * const pdq_pmd_types0[] = { 146 "ANSI Multi-Mode", "ANSI Single-Mode Type 1", "ANSI Single-Mode Type 2", 147 "ANSI Sonet" 148}; 149 150static const char * const pdq_pmd_types100[] = { 151 "Low Power", "Thin Wire", "Shielded Twisted Pair", 152 "Unshielded Twisted Pair" 153}; 154 155static const char * const * const pdq_pmd_types[] = { 156 pdq_pmd_types0, pdq_pmd_types100 157}; 158 159static const char * const pdq_descriptions[] = { 160 "DEFPA PCI", 161 "DEFEA EISA", 162 "DEFTA TC", 163 "DEFAA Futurebus", 164 "DEFQA Q-bus", 165}; 166 167static void 168pdq_print_fddi_chars( 169 pdq_t *pdq, 170 const pdq_response_status_chars_get_t *rsp) 171{ 172 const char hexchars[] = "0123456789abcdef"; 173 174 printf( 175#if !defined(__bsdi__) && !defined(__NetBSD__) 176 PDQ_OS_PREFIX 177#else 178 ": " 179#endif 180 "DEC %s FDDI %s Controller\n", 181#if !defined(__bsdi__) && !defined(__NetBSD__) 182 PDQ_OS_PREFIX_ARGS, 183#endif 184 pdq_descriptions[pdq->pdq_type], 185 pdq_station_types[rsp->status_chars_get.station_type]); 186 187 printf(PDQ_OS_PREFIX "FDDI address %c%c:%c%c:%c%c:%c%c:%c%c:%c%c, FW=%c%c%c%c, HW=%c", 188 PDQ_OS_PREFIX_ARGS, 189 hexchars[pdq->pdq_hwaddr.lanaddr_bytes[0] >> 4], 190 hexchars[pdq->pdq_hwaddr.lanaddr_bytes[0] & 0x0F], 191 hexchars[pdq->pdq_hwaddr.lanaddr_bytes[1] >> 4], 192 hexchars[pdq->pdq_hwaddr.lanaddr_bytes[1] & 0x0F], 193 hexchars[pdq->pdq_hwaddr.lanaddr_bytes[2] >> 4], 194 hexchars[pdq->pdq_hwaddr.lanaddr_bytes[2] & 0x0F], 195 hexchars[pdq->pdq_hwaddr.lanaddr_bytes[3] >> 4], 196 hexchars[pdq->pdq_hwaddr.lanaddr_bytes[3] & 0x0F], 197 hexchars[pdq->pdq_hwaddr.lanaddr_bytes[4] >> 4], 198 hexchars[pdq->pdq_hwaddr.lanaddr_bytes[4] & 0x0F], 199 hexchars[pdq->pdq_hwaddr.lanaddr_bytes[5] >> 4], 200 hexchars[pdq->pdq_hwaddr.lanaddr_bytes[5] & 0x0F], 201 pdq->pdq_fwrev.fwrev_bytes[0], pdq->pdq_fwrev.fwrev_bytes[1], 202 pdq->pdq_fwrev.fwrev_bytes[2], pdq->pdq_fwrev.fwrev_bytes[3], 203 rsp->status_chars_get.module_rev.fwrev_bytes[0]); 204 205 if (rsp->status_chars_get.smt_version_id < PDQ_ARRAY_SIZE(pdq_smt_versions)) { 206 printf(", SMT %s\n", pdq_smt_versions[rsp->status_chars_get.smt_version_id]); 207 } 208 209 printf(PDQ_OS_PREFIX "FDDI Port%s = %c (PMD = %s)", 210 PDQ_OS_PREFIX_ARGS, 211 rsp->status_chars_get.station_type == PDQ_STATION_TYPE_DAS ? "[A]" : "", 212 pdq_phy_types[rsp->status_chars_get.phy_type[0]], 213 pdq_pmd_types[rsp->status_chars_get.pmd_type[0] / 100][rsp->status_chars_get.pmd_type[0] % 100]); 214 215 if (rsp->status_chars_get.station_type == PDQ_STATION_TYPE_DAS) 216 printf(", FDDI Port[B] = %c (PMD = %s)", 217 pdq_phy_types[rsp->status_chars_get.phy_type[1]], 218 pdq_pmd_types[rsp->status_chars_get.pmd_type[1] / 100][rsp->status_chars_get.pmd_type[1] % 100]); 219 220 printf("\n"); 221} 222 223static void 224pdq_init_csrs( 225 pdq_csrs_t *csrs, 226 pdq_bus_t bus, 227 pdq_bus_memaddr_t csr_base, 228 size_t csrsize) 229{ 230 csrs->csr_bus = bus; 231 csrs->csr_base = csr_base; 232 csrs->csr_port_reset = PDQ_CSR_OFFSET(csr_base, 0 * csrsize); 233 csrs->csr_host_data = PDQ_CSR_OFFSET(csr_base, 1 * csrsize); 234 csrs->csr_port_control = PDQ_CSR_OFFSET(csr_base, 2 * csrsize); 235 csrs->csr_port_data_a = PDQ_CSR_OFFSET(csr_base, 3 * csrsize); 236 csrs->csr_port_data_b = PDQ_CSR_OFFSET(csr_base, 4 * csrsize); 237 csrs->csr_port_status = PDQ_CSR_OFFSET(csr_base, 5 * csrsize); 238 csrs->csr_host_int_type_0 = PDQ_CSR_OFFSET(csr_base, 6 * csrsize); 239 csrs->csr_host_int_enable = PDQ_CSR_OFFSET(csr_base, 7 * csrsize); 240 csrs->csr_type_2_producer = PDQ_CSR_OFFSET(csr_base, 8 * csrsize); 241 csrs->csr_cmd_response_producer = PDQ_CSR_OFFSET(csr_base, 10 * csrsize); 242 csrs->csr_cmd_request_producer = PDQ_CSR_OFFSET(csr_base, 11 * csrsize); 243 csrs->csr_host_smt_producer = PDQ_CSR_OFFSET(csr_base, 12 * csrsize); 244 csrs->csr_unsolicited_producer = PDQ_CSR_OFFSET(csr_base, 13 * csrsize); 245} 246 247static void 248pdq_init_pci_csrs( 249 pdq_pci_csrs_t *csrs, 250 pdq_bus_t bus, 251 pdq_bus_memaddr_t csr_base, 252 size_t csrsize) 253{ 254 csrs->csr_bus = bus; 255 csrs->csr_base = csr_base; 256 csrs->csr_pfi_mode_control = PDQ_CSR_OFFSET(csr_base, 16 * csrsize); 257 csrs->csr_pfi_status = PDQ_CSR_OFFSET(csr_base, 17 * csrsize); 258 csrs->csr_fifo_write = PDQ_CSR_OFFSET(csr_base, 18 * csrsize); 259 csrs->csr_fifo_read = PDQ_CSR_OFFSET(csr_base, 19 * csrsize); 260} 261 262static void 263pdq_flush_databuf_queue( 264 pdq_databuf_queue_t *q) 265{ 266 PDQ_OS_DATABUF_T *pdu; 267 for (;;) { 268 PDQ_OS_DATABUF_DEQUEUE(q, pdu); 269 if (pdu == NULL) 270 return; 271 PDQ_OS_DATABUF_FREE(pdu); 272 } 273} 274 275static pdq_boolean_t 276pdq_do_port_control( 277 const pdq_csrs_t * const csrs, 278 pdq_uint32_t cmd) 279{ 280 int cnt = 0; 281 PDQ_CSR_WRITE(csrs, csr_host_int_type_0, PDQ_HOST_INT_CSR_CMD_DONE); 282 PDQ_CSR_WRITE(csrs, csr_port_control, PDQ_PCTL_CMD_ERROR | cmd); 283 while ((PDQ_CSR_READ(csrs, csr_host_int_type_0) & PDQ_HOST_INT_CSR_CMD_DONE) == 0 && cnt < 33000000) 284 cnt++; 285 PDQ_PRINTF(("CSR cmd spun %d times\n", cnt)); 286 if (PDQ_CSR_READ(csrs, csr_host_int_type_0) & PDQ_HOST_INT_CSR_CMD_DONE) { 287 PDQ_CSR_WRITE(csrs, csr_host_int_type_0, PDQ_HOST_INT_CSR_CMD_DONE); 288 return (PDQ_CSR_READ(csrs, csr_port_control) & PDQ_PCTL_CMD_ERROR) ? PDQ_FALSE : PDQ_TRUE; 289 } 290 /* adapter failure */ 291 PDQ_ASSERT(0); 292 return PDQ_FALSE; 293} 294 295static void 296pdq_read_mla( 297 const pdq_csrs_t * const csrs, 298 pdq_lanaddr_t *hwaddr) 299{ 300 pdq_uint32_t data; 301 302 PDQ_CSR_WRITE(csrs, csr_port_data_a, 0); 303 pdq_do_port_control(csrs, PDQ_PCTL_MLA_READ); 304 data = PDQ_CSR_READ(csrs, csr_host_data); 305 306 hwaddr->lanaddr_bytes[0] = (data >> 0) & 0xFF; 307 hwaddr->lanaddr_bytes[1] = (data >> 8) & 0xFF; 308 hwaddr->lanaddr_bytes[2] = (data >> 16) & 0xFF; 309 hwaddr->lanaddr_bytes[3] = (data >> 24) & 0xFF; 310 311 PDQ_CSR_WRITE(csrs, csr_port_data_a, 1); 312 pdq_do_port_control(csrs, PDQ_PCTL_MLA_READ); 313 data = PDQ_CSR_READ(csrs, csr_host_data); 314 315 hwaddr->lanaddr_bytes[4] = (data >> 0) & 0xFF; 316 hwaddr->lanaddr_bytes[5] = (data >> 8) & 0xFF; 317} 318 319static void 320pdq_read_fwrev( 321 const pdq_csrs_t * const csrs, 322 pdq_fwrev_t *fwrev) 323{ 324 pdq_uint32_t data; 325 326 pdq_do_port_control(csrs, PDQ_PCTL_FW_REV_READ); 327 data = PDQ_CSR_READ(csrs, csr_host_data); 328 329 fwrev->fwrev_bytes[3] = (data >> 0) & 0xFF; 330 fwrev->fwrev_bytes[2] = (data >> 8) & 0xFF; 331 fwrev->fwrev_bytes[1] = (data >> 16) & 0xFF; 332 fwrev->fwrev_bytes[0] = (data >> 24) & 0xFF; 333} 334 335static pdq_boolean_t 336pdq_read_error_log( 337 pdq_t *pdq, 338 pdq_response_error_log_get_t *log_entry) 339{ 340 const pdq_csrs_t * const csrs = &pdq->pdq_csrs; 341 pdq_uint32_t *ptr = (pdq_uint32_t *) log_entry; 342 343 pdq_do_port_control(csrs, PDQ_PCTL_ERROR_LOG_START); 344 345 while (pdq_do_port_control(csrs, PDQ_PCTL_FW_REV_READ) == PDQ_TRUE) { 346 *ptr++ = PDQ_CSR_READ(csrs, csr_host_data); 347 if ((pdq_uint8_t *) ptr - (pdq_uint8_t *) log_entry == sizeof(*log_entry)) 348 break; 349 } 350 return (ptr == (pdq_uint32_t *) log_entry) ? PDQ_FALSE : PDQ_TRUE; 351} 352 353static pdq_chip_rev_t 354pdq_read_chiprev( 355 const pdq_csrs_t * const csrs) 356{ 357 pdq_uint32_t data; 358 359 PDQ_CSR_WRITE(csrs, csr_port_data_a, PDQ_SUB_CMD_PDQ_REV_GET); 360 pdq_do_port_control(csrs, PDQ_PCTL_SUB_CMD); 361 data = PDQ_CSR_READ(csrs, csr_host_data); 362 363 return (pdq_chip_rev_t) data; 364} 365 366static const struct { 367 size_t cmd_len; 368 size_t rsp_len; 369 const char *cmd_name; 370} pdq_cmd_info[] = { 371 { sizeof(pdq_cmd_generic_t), /* 0 - PDQC_START */ 372 sizeof(pdq_response_generic_t), 373 "Start" 374 }, 375 { sizeof(pdq_cmd_filter_set_t), /* 1 - PDQC_FILTER_SET */ 376 sizeof(pdq_response_generic_t), 377 "Filter Set" 378 }, 379 { sizeof(pdq_cmd_generic_t), /* 2 - PDQC_FILTER_GET */ 380 sizeof(pdq_response_filter_get_t), 381 "Filter Get" 382 }, 383 { sizeof(pdq_cmd_chars_set_t), /* 3 - PDQC_CHARS_SET */ 384 sizeof(pdq_response_generic_t), 385 "Chars Set" 386 }, 387 { sizeof(pdq_cmd_generic_t), /* 4 - PDQC_STATUS_CHARS_GET */ 388 sizeof(pdq_response_status_chars_get_t), 389 "Status Chars Get" 390 }, 391#if 0 392 { sizeof(pdq_cmd_generic_t), /* 5 - PDQC_COUNTERS_GET */ 393 sizeof(pdq_response_counters_get_t), 394 "Counters Get" 395 }, 396 { sizeof(pdq_cmd_counters_set_t), /* 6 - PDQC_COUNTERS_SET */ 397 sizeof(pdq_response_generic_t), 398 "Counters Set" 399 }, 400#else 401 { 0, 0, "Counters Get" }, 402 { 0, 0, "Counters Set" }, 403#endif 404 { sizeof(pdq_cmd_addr_filter_set_t), /* 7 - PDQC_ADDR_FILTER_SET */ 405 sizeof(pdq_response_generic_t), 406 "Addr Filter Set" 407 }, 408 { sizeof(pdq_cmd_generic_t), /* 8 - PDQC_ADDR_FILTER_GET */ 409 sizeof(pdq_response_addr_filter_get_t), 410 "Addr Filter Get" 411 }, 412#if 0 413 { sizeof(pdq_cmd_generic_t), /* 9 - PDQC_ERROR_LOG_CLEAR */ 414 sizeof(pdq_response_generic_t), 415 "Error Log Clear" 416 }, 417 { sizeof(pdq_cmd_generic_t), /* 10 - PDQC_ERROR_LOG_SET */ 418 sizeof(pdq_response_generic_t), 419 "Error Log Set" 420 }, 421 { sizeof(pdq_cmd_generic_t), /* 11 - PDQC_FDDI_MIB_GET */ 422 sizeof(pdq_response_generic_t), 423 "FDDI MIB Get" 424 }, 425 { sizeof(pdq_cmd_generic_t), /* 12 - PDQC_DEC_EXT_MIB_GET */ 426 sizeof(pdq_response_generic_t), 427 "DEC Ext MIB Get" 428 }, 429 { sizeof(pdq_cmd_generic_t), /* 13 - PDQC_DEC_SPECIFIC_GET */ 430 sizeof(pdq_response_generic_t), 431 "DEC Specific Get" 432 }, 433 { sizeof(pdq_cmd_generic_t), /* 14 - PDQC_SNMP_SET */ 434 sizeof(pdq_response_generic_t), 435 "SNMP Set" 436 }, 437 { 0, 0, "N/A" }, 438 { sizeof(pdq_cmd_generic_t), /* 16 - PDQC_SMT_MIB_GET */ 439 sizeof(pdq_response_generic_t), 440 "SMT MIB Get" 441 }, 442 { sizeof(pdq_cmd_generic_t), /* 17 - PDQC_SMT_MIB_SET */ 443 sizeof(pdq_response_generic_t), 444 "SMT MIB Set", 445 }, 446#endif 447}; 448 449static void 450pdq_queue_commands( 451 pdq_t *pdq) 452{ 453 const pdq_csrs_t * const csrs = &pdq->pdq_csrs; 454 pdq_command_info_t * const ci = &pdq->pdq_command_info; 455 pdq_descriptor_block_t * const dbp = pdq->pdq_dbp; 456 pdq_cmd_code_t op; 457 pdq_uint32_t cmdlen, rsplen, mask; 458 459 /* 460 * If there are commands or responses active or there aren't 461 * any pending commands, then don't queue any more. 462 */ 463 if (ci->ci_command_active || ci->ci_pending_commands == 0) 464 return; 465 466 /* 467 * Determine which command needs to be queued. 468 */ 469 op = PDQC_SMT_MIB_SET; 470 for (mask = 1 << ((int) op); (mask & ci->ci_pending_commands) == 0; mask >>= 1) 471 op = (pdq_cmd_code_t) ((int) op - 1); 472 /* 473 * Obtain the sizes needed for the command and response. 474 * Round up to PDQ_CMD_RX_ALIGNMENT so the receive buffer is 475 * always properly aligned. 476 */ 477 cmdlen = PDQ_ROUNDUP(pdq_cmd_info[op].cmd_len, PDQ_CMD_RX_ALIGNMENT); 478 rsplen = PDQ_ROUNDUP(pdq_cmd_info[op].rsp_len, PDQ_CMD_RX_ALIGNMENT); 479 if (cmdlen < rsplen) 480 cmdlen = rsplen; 481 /* 482 * Since only one command at a time will be queued, there will always 483 * be enough space. 484 */ 485 486 /* 487 * Obtain and fill in the descriptor for the command (descriptor is 488 * pre-initialized) 489 */ 490 dbp->pdqdb_command_requests[ci->ci_request_producer].txd_seg_len = cmdlen; 491 PDQ_ADVANCE(ci->ci_request_producer, 1, PDQ_RING_MASK(dbp->pdqdb_command_requests)); 492 493 /* 494 * Obtain and fill in the descriptor for the response (descriptor is 495 * pre-initialized) 496 */ 497 dbp->pdqdb_command_responses[ci->ci_response_producer].rxd_seg_len_hi = cmdlen / 16; 498 PDQ_ADVANCE(ci->ci_response_producer, 1, PDQ_RING_MASK(dbp->pdqdb_command_responses)); 499 500 /* 501 * Clear the command area, set the opcode, and the command from the pending 502 * mask. 503 */ 504 505 PDQ_OS_MEMZERO(ci->ci_bufstart, cmdlen); 506 *(pdq_cmd_code_t *) ci->ci_bufstart = op; 507 ci->ci_pending_commands &= ~mask; 508 509 /* 510 * Fill in the command area, if needed. 511 */ 512 switch (op) { 513 case PDQC_FILTER_SET: { 514 pdq_cmd_filter_set_t *filter_set = (pdq_cmd_filter_set_t *) ci->ci_bufstart; 515 unsigned idx = 0; 516 filter_set->filter_set_items[idx].item_code = PDQI_IND_GROUP_PROM; 517 filter_set->filter_set_items[idx].filter_state = (pdq->pdq_flags & PDQ_PROMISC ? PDQ_FILTER_PASS : PDQ_FILTER_BLOCK); 518 idx++; 519 filter_set->filter_set_items[idx].item_code = PDQI_GROUP_PROM; 520 filter_set->filter_set_items[idx].filter_state = (pdq->pdq_flags & PDQ_ALLMULTI ? PDQ_FILTER_PASS : PDQ_FILTER_BLOCK); 521 idx++; 522 filter_set->filter_set_items[idx].item_code = PDQI_SMT_PROM; 523 filter_set->filter_set_items[idx].filter_state = ((pdq->pdq_flags & (PDQ_PROMISC|PDQ_PASS_SMT)) == (PDQ_PROMISC|PDQ_PASS_SMT) ? PDQ_FILTER_PASS : PDQ_FILTER_BLOCK); 524 idx++; 525 filter_set->filter_set_items[idx].item_code = PDQI_SMT_USER; 526 filter_set->filter_set_items[idx].filter_state = (pdq->pdq_flags & PDQ_PASS_SMT ? PDQ_FILTER_PASS : PDQ_FILTER_BLOCK); 527 idx++; 528 filter_set->filter_set_items[idx].item_code = PDQI_EOL; 529 break; 530 } 531 case PDQC_ADDR_FILTER_SET: { 532 pdq_cmd_addr_filter_set_t *addr_filter_set = (pdq_cmd_addr_filter_set_t *) ci->ci_bufstart; 533 pdq_lanaddr_t *addr = addr_filter_set->addr_filter_set_addresses; 534 addr->lanaddr_bytes[0] = 0xFF; 535 addr->lanaddr_bytes[1] = 0xFF; 536 addr->lanaddr_bytes[2] = 0xFF; 537 addr->lanaddr_bytes[3] = 0xFF; 538 addr->lanaddr_bytes[4] = 0xFF; 539 addr->lanaddr_bytes[5] = 0xFF; 540 addr++; 541 pdq_os_addr_fill(pdq, addr, 61); 542 break; 543 } 544 default: { /* to make gcc happy */ 545 break; 546 } 547 } 548 /* 549 * At this point the command is done. All that needs to be done is to 550 * produce it to the PDQ. 551 */ 552 PDQ_PRINTF(("PDQ Queue Command Request: %s queued\n", 553 pdq_cmd_info[op].cmd_name)); 554 555 ci->ci_command_active++; 556 PDQ_CSR_WRITE(csrs, csr_cmd_response_producer, ci->ci_response_producer | (ci->ci_response_completion << 8)); 557 PDQ_CSR_WRITE(csrs, csr_cmd_request_producer, ci->ci_request_producer | (ci->ci_request_completion << 8)); 558} 559 560static void 561pdq_process_command_responses( 562 pdq_t * const pdq) 563{ 564 const pdq_csrs_t * const csrs = &pdq->pdq_csrs; 565 pdq_command_info_t * const ci = &pdq->pdq_command_info; 566 volatile const pdq_consumer_block_t * const cbp = pdq->pdq_cbp; 567 pdq_descriptor_block_t * const dbp = pdq->pdq_dbp; 568 const pdq_response_generic_t *rspgen; 569 570 /* 571 * We have to process the command and response in tandem so 572 * just wait for the response to be consumed. If it has been 573 * consumed then the command must have been as well. 574 */ 575 576 if (cbp->pdqcb_command_response == ci->ci_response_completion) 577 return; 578 579 PDQ_ASSERT(cbp->pdqcb_command_request != ci->ci_request_completion); 580 581 rspgen = (const pdq_response_generic_t *) ci->ci_bufstart; 582 PDQ_ASSERT(rspgen->generic_status == PDQR_SUCCESS); 583 PDQ_PRINTF(("PDQ Process Command Response: %s completed (status=%d)\n", 584 pdq_cmd_info[rspgen->generic_op].cmd_name, 585 rspgen->generic_status)); 586 587 if (rspgen->generic_op == PDQC_STATUS_CHARS_GET && (pdq->pdq_flags & PDQ_PRINTCHARS)) { 588 pdq->pdq_flags &= ~PDQ_PRINTCHARS; 589 pdq_print_fddi_chars(pdq, (const pdq_response_status_chars_get_t *) rspgen); 590 } 591 592 PDQ_ADVANCE(ci->ci_request_completion, 1, PDQ_RING_MASK(dbp->pdqdb_command_requests)); 593 PDQ_ADVANCE(ci->ci_response_completion, 1, PDQ_RING_MASK(dbp->pdqdb_command_responses)); 594 ci->ci_command_active = 0; 595 596 if (ci->ci_pending_commands != 0) { 597 pdq_queue_commands(pdq); 598 } else { 599 PDQ_CSR_WRITE(csrs, csr_cmd_response_producer, 600 ci->ci_response_producer | (ci->ci_response_completion << 8)); 601 PDQ_CSR_WRITE(csrs, csr_cmd_request_producer, 602 ci->ci_request_producer | (ci->ci_request_completion << 8)); 603 } 604} 605 606/* 607 * This following routine processes unsolicited events. 608 * In addition, it also fills the unsolicited queue with 609 * event buffers so it can be used to initialize the queue 610 * as well. 611 */ 612static void 613pdq_process_unsolicited_events( 614 pdq_t *pdq) 615{ 616 const pdq_csrs_t * const csrs = &pdq->pdq_csrs; 617 pdq_unsolicited_info_t *ui = &pdq->pdq_unsolicited_info; 618 volatile const pdq_consumer_block_t *cbp = pdq->pdq_cbp; 619 pdq_descriptor_block_t *dbp = pdq->pdq_dbp; 620 const pdq_unsolicited_event_t *event; 621 pdq_rxdesc_t *rxd; 622 623 /* 624 * Process each unsolicited event (if any). 625 */ 626 627 while (cbp->pdqcb_unsolicited_event != ui->ui_completion) { 628 rxd = &dbp->pdqdb_unsolicited_events[ui->ui_completion]; 629 event = &ui->ui_events[ui->ui_completion & (PDQ_NUM_UNSOLICITED_EVENTS-1)]; 630 631 switch (event->event_type) { 632 case PDQ_UNSOLICITED_EVENT: { 633 printf(PDQ_OS_PREFIX "Unsolicited Event: %s: %s", 634 PDQ_OS_PREFIX_ARGS, 635 pdq_entities[event->event_entity], 636 pdq_event_codes[event->event_entity][event->event_code.value]); 637 if (event->event_entity == PDQ_ENTITY_PHY_PORT) 638 printf("[%d]", event->event_index); 639 printf("\n"); 640 break; 641 } 642 case PDQ_UNSOLICITED_COUNTERS: { 643 break; 644 } 645 } 646 PDQ_ADVANCE(ui->ui_completion, 1, PDQ_RING_MASK(dbp->pdqdb_unsolicited_events)); 647 ui->ui_free++; 648 } 649 650 /* 651 * Now give back the event buffers back to the PDQ. 652 */ 653 PDQ_ADVANCE(ui->ui_producer, ui->ui_free, PDQ_RING_MASK(dbp->pdqdb_unsolicited_events)); 654 ui->ui_free = 0; 655 656 PDQ_CSR_WRITE(csrs, csr_unsolicited_producer, 657 ui->ui_producer | (ui->ui_completion << 8)); 658} 659 660static void 661pdq_process_received_data( 662 pdq_t *pdq, 663 pdq_rx_info_t *rx, 664 pdq_rxdesc_t *receives, 665 pdq_uint32_t completion_goal, 666 pdq_uint32_t ring_mask) 667{ 668 pdq_uint32_t completion = rx->rx_completion; 669 pdq_uint32_t producer = rx->rx_producer; 670 PDQ_OS_DATABUF_T **buffers = (PDQ_OS_DATABUF_T **) rx->rx_buffers; 671 pdq_rxdesc_t *rxd; 672 pdq_uint32_t idx; 673 674 while (completion != completion_goal) { 675 PDQ_OS_DATABUF_T *fpdu, *lpdu, *npdu; 676 pdq_uint8_t *dataptr; 677 pdq_uint32_t fc, datalen, pdulen, segcnt; 678 pdq_rxstatus_t status; 679 680 fpdu = lpdu = buffers[completion]; 681 PDQ_ASSERT(fpdu != NULL); 682 683 dataptr = PDQ_OS_DATABUF_PTR(fpdu); 684 status = *(pdq_rxstatus_t *) dataptr; 685 if ((status.rxs_status & 0x200000) == 0) { 686 datalen = status.rxs_status & 0x1FFF; 687 fc = dataptr[PDQ_RX_FC_OFFSET]; 688 switch (fc & (PDQ_FDDIFC_C|PDQ_FDDIFC_L|PDQ_FDDIFC_F)) { 689 case PDQ_FDDI_LLC_ASYNC: 690 case PDQ_FDDI_LLC_SYNC: 691 case PDQ_FDDI_IMP_ASYNC: 692 case PDQ_FDDI_IMP_SYNC: { 693 if (datalen > PDQ_FDDI_MAX || datalen < PDQ_FDDI_LLC_MIN) { 694 PDQ_PRINTF(("discard: bad length %d\n", datalen)); 695 goto discard_frame; 696 } 697 break; 698 } 699 case PDQ_FDDI_SMT: { 700 if (datalen > PDQ_FDDI_MAX || datalen < PDQ_FDDI_SMT_MIN) 701 goto discard_frame; 702 break; 703 } 704 default: { 705 PDQ_PRINTF(("discard: bad fc 0x%x\n", fc)); 706 goto discard_frame; 707 } 708 } 709 /* 710 * Update the lengths of the data buffers now that we know 711 * the real length. 712 */ 713 pdulen = datalen - 4 /* CRC */; 714 segcnt = (pdulen + PDQ_RX_FC_OFFSET + PDQ_OS_DATABUF_SIZE - 1) / PDQ_OS_DATABUF_SIZE; 715 PDQ_OS_DATABUF_ALLOC(npdu); 716 if (npdu == NULL) { 717 PDQ_PRINTF(("discard: no databuf #0\n")); 718 goto discard_frame; 719 } 720 buffers[completion] = npdu; 721 for (idx = 1; idx < segcnt; idx++) { 722 PDQ_OS_DATABUF_ALLOC(npdu); 723 if (npdu == NULL) { 724 PDQ_OS_DATABUF_NEXT_SET(lpdu, NULL); 725 PDQ_OS_DATABUF_FREE(fpdu); 726 goto discard_frame; 727 } 728 PDQ_OS_DATABUF_NEXT_SET(lpdu, buffers[(completion + idx) & ring_mask]); 729 lpdu = PDQ_OS_DATABUF_NEXT(lpdu); 730 buffers[(completion + idx) & ring_mask] = npdu; 731 } 732 PDQ_OS_DATABUF_NEXT_SET(lpdu, NULL); 733 for (idx = 0; idx < PDQ_RX_SEGCNT; idx++) { 734 buffers[(producer + idx) & ring_mask] = 735 buffers[(completion + idx) & ring_mask]; 736 buffers[(completion + idx) & ring_mask] = NULL; 737 } 738 PDQ_OS_DATABUF_ADJ(fpdu, PDQ_RX_FC_OFFSET); 739 if (segcnt == 1) { 740 PDQ_OS_DATABUF_LEN_SET(fpdu, pdulen); 741 } else { 742 PDQ_OS_DATABUF_LEN_SET(lpdu, pdulen + PDQ_RX_FC_OFFSET - (segcnt - 1) * PDQ_OS_DATABUF_SIZE); 743 } 744 pdq_os_receive_pdu(pdq, fpdu, pdulen); 745 rx->rx_free += PDQ_RX_SEGCNT; 746 PDQ_ADVANCE(producer, PDQ_RX_SEGCNT, ring_mask); 747 PDQ_ADVANCE(completion, PDQ_RX_SEGCNT, ring_mask); 748 continue; 749 } else { 750 PDQ_PRINTF(("discard: bad pdu 0x%x(%d.%d.%d.%d.%d)\n", status.rxs_status, 751 status.rxs_rcc_badpdu, status.rxs_rcc_badcrc, 752 status.rxs_rcc_reason, status.rxs_fsc, status.rxs_fsb_e)); 753 if (status.rxs_rcc_reason == 7) 754 goto discard_frame;
|
758 if (status.rxs_rcc_badcrc) { 759 printf(PDQ_OS_PREFIX " MAC CRC error (source=%x-%x-%x-%x-%x-%x)\n", 760 PDQ_OS_PREFIX_ARGS, 761 dataptr[PDQ_RX_FC_OFFSET+1], 762 dataptr[PDQ_RX_FC_OFFSET+2], 763 dataptr[PDQ_RX_FC_OFFSET+3], 764 dataptr[PDQ_RX_FC_OFFSET+4], 765 dataptr[PDQ_RX_FC_OFFSET+5], 766 dataptr[PDQ_RX_FC_OFFSET+6]); 767 /* rx->rx_badcrc++; */ 768 } else if (status.rxs_fsc == 0 || status.rxs_fsb_e == 1) { 769 /* rx->rx_frame_status_errors++; */ 770 } else { 771 /* hardware fault */ 772 } 773 } 774 discard_frame: 775 /* 776 * Discarded frames go right back on the queue; therefore 777 * ring entries were freed. 778 */ 779 for (idx = 0; idx < PDQ_RX_SEGCNT; idx++) { 780 buffers[producer] = buffers[completion]; 781 buffers[completion] = NULL; 782 rxd = &receives[rx->rx_producer]; 783 if (idx == 0) { 784 rxd->rxd_sop = 1; rxd->rxd_seg_cnt = PDQ_RX_SEGCNT - 1; 785 } else { 786 rxd->rxd_sop = 0; rxd->rxd_seg_cnt = 0; 787 } 788 rxd->rxd_pa_hi = 0; 789 rxd->rxd_seg_len_hi = PDQ_OS_DATABUF_SIZE / 16; 790 rxd->rxd_pa_lo = PDQ_OS_VA_TO_PA(pdq, PDQ_OS_DATABUF_PTR(buffers[rx->rx_producer])); 791 PDQ_ADVANCE(rx->rx_producer, 1, ring_mask); 792 PDQ_ADVANCE(producer, 1, ring_mask); 793 PDQ_ADVANCE(completion, 1, ring_mask); 794 } 795 } 796 rx->rx_completion = completion; 797 798 while (rx->rx_free > PDQ_RX_SEGCNT && rx->rx_free > rx->rx_target) { 799 PDQ_OS_DATABUF_T *pdu; 800 /* 801 * Allocate the needed number of data buffers. 802 * Try to obtain them from our free queue before 803 * asking the system for more. 804 */ 805 for (idx = 0; idx < PDQ_RX_SEGCNT; idx++) { 806 if ((pdu = buffers[(rx->rx_producer + idx) & ring_mask]) == NULL) { 807 PDQ_OS_DATABUF_ALLOC(pdu); 808 if (pdu == NULL) 809 break; 810 buffers[(rx->rx_producer + idx) & ring_mask] = pdu; 811 } 812 rxd = &receives[(rx->rx_producer + idx) & ring_mask]; 813 if (idx == 0) { 814 rxd->rxd_sop = 1; rxd->rxd_seg_cnt = PDQ_RX_SEGCNT - 1; 815 } else { 816 rxd->rxd_sop = 0; rxd->rxd_seg_cnt = 0; 817 } 818 rxd->rxd_pa_hi = 0; 819 rxd->rxd_seg_len_hi = PDQ_OS_DATABUF_SIZE / 16; 820 rxd->rxd_pa_lo = PDQ_OS_VA_TO_PA(pdq, PDQ_OS_DATABUF_PTR(pdu)); 821 } 822 if (idx < PDQ_RX_SEGCNT) { 823 /* 824 * We didn't get all databufs required to complete a new 825 * receive buffer. Keep the ones we got and retry a bit 826 * later for the rest. 827 */ 828 break; 829 } 830 PDQ_ADVANCE(rx->rx_producer, PDQ_RX_SEGCNT, ring_mask); 831 rx->rx_free -= PDQ_RX_SEGCNT; 832 } 833} 834 835pdq_boolean_t 836pdq_queue_transmit_data( 837 pdq_t *pdq, 838 PDQ_OS_DATABUF_T *pdu) 839{ 840 pdq_tx_info_t *tx = &pdq->pdq_tx_info; 841 pdq_descriptor_block_t *dbp = pdq->pdq_dbp; 842 pdq_uint32_t producer = tx->tx_producer; 843 pdq_txdesc_t *eop = NULL; 844 PDQ_OS_DATABUF_T *pdu0; 845 pdq_uint32_t freecnt; 846 847 if (tx->tx_free < 1) 848 return PDQ_FALSE; 849 850 dbp->pdqdb_transmits[producer] = tx->tx_hdrdesc; 851 PDQ_ADVANCE(producer, 1, PDQ_RING_MASK(dbp->pdqdb_transmits)); 852 853 for (freecnt = tx->tx_free - 1, pdu0 = pdu; pdu0 != NULL && freecnt > 0;) { 854 pdq_uint32_t fraglen, datalen = PDQ_OS_DATABUF_LEN(pdu0); 855 const pdq_uint8_t *dataptr = PDQ_OS_DATABUF_PTR(pdu0); 856 857 /* 858 * The first segment is limited to the space remaining in 859 * page. All segments after that can be up to a full page 860 * in size. 861 */ 862 fraglen = PDQ_OS_PAGESIZE - ((dataptr - (pdq_uint8_t *) NULL) & (PDQ_OS_PAGESIZE-1)); 863 while (datalen > 0 && freecnt > 0) { 864 pdq_uint32_t seglen = (fraglen < datalen ? fraglen : datalen); 865 866 /* 867 * Initialize the transmit descriptor 868 */ 869 eop = &dbp->pdqdb_transmits[producer]; 870 eop->txd_seg_len = seglen; 871 eop->txd_pa_lo = PDQ_OS_VA_TO_PA(pdq, dataptr); 872 eop->txd_sop = eop->txd_eop = eop->txd_pa_hi = 0; 873 874 datalen -= seglen; 875 dataptr += seglen; 876 fraglen = PDQ_OS_PAGESIZE; 877 freecnt--; 878 PDQ_ADVANCE(producer, 1, PDQ_RING_MASK(dbp->pdqdb_transmits)); 879 } 880 pdu0 = PDQ_OS_DATABUF_NEXT(pdu0); 881 } 882 if (pdu0 != NULL) { 883 PDQ_ASSERT(freecnt == 0); 884 /* 885 * If we still have data to process then the ring was too full 886 * to store the PDU. Return FALSE so the caller will requeue 887 * the PDU for later. 888 */ 889 return PDQ_FALSE; 890 } 891 /* 892 * Everything went fine. Finish it up. 893 */ 894 tx->tx_descriptor_count[tx->tx_producer] = tx->tx_free - freecnt; 895 eop->txd_eop = 1; 896 PDQ_OS_DATABUF_ENQUEUE(&tx->tx_txq, pdu); 897 tx->tx_producer = producer; 898 tx->tx_free = freecnt; 899 PDQ_DO_TYPE2_PRODUCER(pdq); 900 return PDQ_TRUE; 901} 902 903static void 904pdq_process_transmitted_data( 905 pdq_t *pdq) 906{ 907 pdq_tx_info_t *tx = &pdq->pdq_tx_info; 908 volatile const pdq_consumer_block_t *cbp = pdq->pdq_cbp; 909 pdq_descriptor_block_t *dbp = pdq->pdq_dbp; 910 pdq_uint32_t completion = tx->tx_completion; 911 912 while (completion != cbp->pdqcb_transmits) { 913 PDQ_OS_DATABUF_T *pdu; 914 pdq_uint32_t descriptor_count = tx->tx_descriptor_count[completion]; 915 PDQ_ASSERT(dbp->pdqdb_transmits[completion].txd_sop == 1); 916 PDQ_ASSERT(dbp->pdqdb_transmits[(completion + descriptor_count - 1) & PDQ_RING_MASK(dbp->pdqdb_transmits)].txd_eop == 1); 917 PDQ_OS_DATABUF_DEQUEUE(&tx->tx_txq, pdu); 918 pdq_os_transmit_done(pdq, pdu); 919 tx->tx_free += descriptor_count; 920 921 PDQ_ADVANCE(completion, descriptor_count, PDQ_RING_MASK(dbp->pdqdb_transmits)); 922 } 923 if (tx->tx_completion != completion) { 924 tx->tx_completion = completion; 925 pdq_os_restart_transmitter(pdq); 926 } 927 PDQ_DO_TYPE2_PRODUCER(pdq); 928} 929 930void 931pdq_flush_transmitter( 932 pdq_t *pdq) 933{ 934 volatile pdq_consumer_block_t *cbp = pdq->pdq_cbp; 935 pdq_tx_info_t *tx = &pdq->pdq_tx_info; 936 937 for (;;) { 938 PDQ_OS_DATABUF_T *pdu; 939 PDQ_OS_DATABUF_DEQUEUE(&tx->tx_txq, pdu); 940 if (pdu == NULL) 941 break; 942 /* 943 * Don't call transmit done since the packet never made it 944 * out on the wire. 945 */ 946 PDQ_OS_DATABUF_FREE(pdu); 947 } 948 949 tx->tx_free = PDQ_RING_MASK(pdq->pdq_dbp->pdqdb_transmits); 950 tx->tx_completion = cbp->pdqcb_transmits = tx->tx_producer; 951 952 PDQ_DO_TYPE2_PRODUCER(pdq); 953} 954 955void 956pdq_hwreset( 957 pdq_t *pdq) 958{ 959 const pdq_csrs_t * const csrs = &pdq->pdq_csrs; 960 pdq_state_t state; 961 int cnt; 962 963 state = PDQ_PSTS_ADAPTER_STATE(PDQ_CSR_READ(csrs, csr_port_status)); 964 if (state == PDQS_DMA_UNAVAILABLE) 965 return; 966 PDQ_CSR_WRITE(csrs, csr_port_data_a, 967 (state == PDQS_HALTED) ? 0 : PDQ_PRESET_SKIP_SELFTEST); 968 PDQ_CSR_WRITE(csrs, csr_port_reset, 1); 969 PDQ_OS_USEC_DELAY(100); 970 PDQ_CSR_WRITE(csrs, csr_port_reset, 0); 971 for (cnt = 45000;;cnt--) { 972 PDQ_OS_USEC_DELAY(1000); 973 state = PDQ_PSTS_ADAPTER_STATE(PDQ_CSR_READ(csrs, csr_port_status)); 974 if (state == PDQS_DMA_UNAVAILABLE || cnt == 0) 975 break; 976 } 977 PDQ_PRINTF(("PDQ Reset spun %d cycles\n", 45000 - cnt)); 978 PDQ_OS_USEC_DELAY(10000); 979 state = PDQ_PSTS_ADAPTER_STATE(PDQ_CSR_READ(csrs, csr_port_status)); 980 PDQ_ASSERT(state == PDQS_DMA_UNAVAILABLE); 981 PDQ_ASSERT(cnt > 0); 982} 983 984/* 985 * The following routine brings the PDQ from whatever state it is 986 * in to DMA_UNAVAILABLE (ie. like a RESET but without doing a RESET). 987 */ 988pdq_state_t 989pdq_stop( 990 pdq_t *pdq) 991{ 992 pdq_state_t state; 993 const pdq_csrs_t * const csrs = &pdq->pdq_csrs; 994 int cnt, pass = 0, idx; 995 PDQ_OS_DATABUF_T **buffers; 996 997 restart: 998 state = PDQ_PSTS_ADAPTER_STATE(PDQ_CSR_READ(csrs, csr_port_status)); 999 if (state != PDQS_DMA_UNAVAILABLE) { 1000 pdq_hwreset(pdq); 1001 state = PDQ_PSTS_ADAPTER_STATE(PDQ_CSR_READ(csrs, csr_port_status)); 1002 PDQ_ASSERT(state == PDQS_DMA_UNAVAILABLE); 1003 } 1004#if 0 1005 switch (state) { 1006 case PDQS_RING_MEMBER: 1007 case PDQS_LINK_UNAVAILABLE: 1008 case PDQS_LINK_AVAILABLE: { 1009 PDQ_CSR_WRITE(csrs, csr_port_data_a, PDQ_SUB_CMD_LINK_UNINIT); 1010 PDQ_CSR_WRITE(csrs, csr_port_data_b, 0); 1011 pdq_do_port_control(csrs, PDQ_PCTL_SUB_CMD); 1012 state = PDQ_PSTS_ADAPTER_STATE(PDQ_CSR_READ(csrs, csr_port_status)); 1013 PDQ_ASSERT(state == PDQS_DMA_AVAILABLE); 1014 /* FALL THROUGH */ 1015 } 1016 case PDQS_DMA_AVAILABLE: { 1017 PDQ_CSR_WRITE(csrs, csr_port_data_a, 0); 1018 PDQ_CSR_WRITE(csrs, csr_port_data_b, 0); 1019 pdq_do_port_control(csrs, PDQ_PCTL_DMA_UNINIT); 1020 state = PDQ_PSTS_ADAPTER_STATE(PDQ_CSR_READ(csrs, csr_port_status)); 1021 PDQ_ASSERT(state == PDQS_DMA_UNAVAILABLE); 1022 /* FALL THROUGH */ 1023 } 1024 case PDQS_DMA_UNAVAILABLE: { 1025 break; 1026 } 1027 } 1028#endif 1029 /* 1030 * Now we should be in DMA_UNAVAILABLE. So bring the PDQ into 1031 * DMA_AVAILABLE. 1032 */ 1033 1034 /* 1035 * Obtain the hardware address and firmware revisions 1036 * (MLA = my long address which is FDDI speak for hardware address) 1037 */ 1038 pdq_read_mla(&pdq->pdq_csrs, &pdq->pdq_hwaddr); 1039 pdq_read_fwrev(&pdq->pdq_csrs, &pdq->pdq_fwrev); 1040 pdq->pdq_chip_rev = pdq_read_chiprev(&pdq->pdq_csrs); 1041 1042 if (pdq->pdq_type == PDQ_DEFPA) { 1043 /* 1044 * Disable interrupts and DMA. 1045 */ 1046 PDQ_CSR_WRITE(&pdq->pdq_pci_csrs, csr_pfi_mode_control, 0); 1047 PDQ_CSR_WRITE(&pdq->pdq_pci_csrs, csr_pfi_status, 0x10); 1048 } 1049 1050 /* 1051 * Flush all the databuf queues. 1052 */ 1053 pdq_flush_databuf_queue(&pdq->pdq_tx_info.tx_txq); 1054 pdq->pdq_flags &= ~PDQ_TXOK; 1055 buffers = (PDQ_OS_DATABUF_T **) pdq->pdq_rx_info.rx_buffers; 1056 for (idx = 0; idx < PDQ_RING_SIZE(pdq->pdq_dbp->pdqdb_receives); idx++) { 1057 if (buffers[idx] != NULL) { 1058 PDQ_OS_DATABUF_FREE(buffers[idx]); 1059 buffers[idx] = NULL; 1060 } 1061 } 1062 pdq->pdq_rx_info.rx_free = PDQ_RING_MASK(pdq->pdq_dbp->pdqdb_receives); 1063 buffers = (PDQ_OS_DATABUF_T **) pdq->pdq_host_smt_info.rx_buffers; 1064 for (idx = 0; idx < PDQ_RING_SIZE(pdq->pdq_dbp->pdqdb_host_smt); idx++) { 1065 if (buffers[idx] != NULL) { 1066 PDQ_OS_DATABUF_FREE(buffers[idx]); 1067 buffers[idx] = NULL; 1068 } 1069 } 1070 pdq->pdq_host_smt_info.rx_free = PDQ_RING_MASK(pdq->pdq_dbp->pdqdb_host_smt); 1071 1072 /* 1073 * Reset the consumer indexes to 0. 1074 */ 1075 pdq->pdq_cbp->pdqcb_receives = 0; 1076 pdq->pdq_cbp->pdqcb_transmits = 0; 1077 pdq->pdq_cbp->pdqcb_host_smt = 0; 1078 pdq->pdq_cbp->pdqcb_unsolicited_event = 0; 1079 pdq->pdq_cbp->pdqcb_command_response = 0; 1080 pdq->pdq_cbp->pdqcb_command_request = 0; 1081 1082 /* 1083 * Reset the producer and completion indexes to 0. 1084 */ 1085 pdq->pdq_command_info.ci_request_producer = 0; 1086 pdq->pdq_command_info.ci_response_producer = 0; 1087 pdq->pdq_command_info.ci_request_completion = 0; 1088 pdq->pdq_command_info.ci_response_completion = 0; 1089 pdq->pdq_unsolicited_info.ui_producer = 0; 1090 pdq->pdq_unsolicited_info.ui_completion = 0; 1091 pdq->pdq_rx_info.rx_producer = 0; 1092 pdq->pdq_rx_info.rx_completion = 0; 1093 pdq->pdq_tx_info.tx_producer = 0; 1094 pdq->pdq_tx_info.tx_completion = 0; 1095 pdq->pdq_host_smt_info.rx_producer = 0; 1096 pdq->pdq_host_smt_info.rx_completion = 0; 1097 1098 pdq->pdq_command_info.ci_command_active = 0; 1099 pdq->pdq_unsolicited_info.ui_free = PDQ_NUM_UNSOLICITED_EVENTS; 1100 pdq->pdq_tx_info.tx_free = PDQ_RING_MASK(pdq->pdq_dbp->pdqdb_transmits); 1101 1102 /* 1103 * Allow the DEFPA to do DMA. Then program the physical 1104 * addresses of the consumer and descriptor blocks. 1105 */ 1106 if (pdq->pdq_type == PDQ_DEFPA) { 1107#ifdef PDQTEST 1108 PDQ_CSR_WRITE(&pdq->pdq_pci_csrs, csr_pfi_mode_control, 1109 PDQ_PFI_MODE_DMA_ENABLE); 1110#else 1111 PDQ_CSR_WRITE(&pdq->pdq_pci_csrs, csr_pfi_mode_control, 1112 PDQ_PFI_MODE_DMA_ENABLE 1113 /*|PDQ_PFI_MODE_PFI_PCI_INTR*/|PDQ_PFI_MODE_PDQ_PCI_INTR); 1114#endif 1115 } 1116 1117 /* 1118 * Make sure the unsolicited queue has events ... 1119 */ 1120 pdq_process_unsolicited_events(pdq); 1121 1122 if (pdq->pdq_type == PDQ_DEFEA && pdq->pdq_chip_rev == PDQ_CHIP_REV_E) 1123 PDQ_CSR_WRITE(csrs, csr_port_data_b, PDQ_DMA_BURST_16LW); 1124 else 1125 PDQ_CSR_WRITE(csrs, csr_port_data_b, PDQ_DMA_BURST_8LW); 1126 PDQ_CSR_WRITE(csrs, csr_port_data_a, PDQ_SUB_CMD_DMA_BURST_SIZE_SET); 1127 pdq_do_port_control(csrs, PDQ_PCTL_SUB_CMD); 1128 1129 PDQ_CSR_WRITE(csrs, csr_port_data_b, 0); 1130 PDQ_CSR_WRITE(csrs, csr_port_data_a, PDQ_OS_VA_TO_PA(pdq, pdq->pdq_cbp)); 1131 pdq_do_port_control(csrs, PDQ_PCTL_CONSUMER_BLOCK); 1132 1133 PDQ_CSR_WRITE(csrs, csr_port_data_b, 0); 1134 PDQ_CSR_WRITE(csrs, csr_port_data_a, 1135 PDQ_OS_VA_TO_PA(pdq, pdq->pdq_dbp) | PDQ_DMA_INIT_LW_BSWAP_DATA); 1136 pdq_do_port_control(csrs, PDQ_PCTL_DMA_INIT); 1137 1138 for (cnt = 0; cnt < 1000; cnt++) { 1139 state = PDQ_PSTS_ADAPTER_STATE(PDQ_CSR_READ(csrs, csr_port_status)); 1140 if (state == PDQS_HALTED) { 1141 if (pass > 0) 1142 return PDQS_HALTED; 1143 pass = 1; 1144 goto restart; 1145 } 1146 if (state == PDQS_DMA_AVAILABLE) { 1147 PDQ_PRINTF(("Transition to DMA Available took %d spins\n", cnt)); 1148 break; 1149 } 1150 PDQ_OS_USEC_DELAY(1000); 1151 } 1152 PDQ_ASSERT(state == PDQS_DMA_AVAILABLE); 1153 1154 PDQ_CSR_WRITE(csrs, csr_host_int_type_0, 0xFF); 1155 PDQ_CSR_WRITE(csrs, csr_host_int_enable, 0) /* PDQ_HOST_INT_STATE_CHANGE 1156 |PDQ_HOST_INT_FATAL_ERROR|PDQ_HOST_INT_CMD_RSP_ENABLE 1157 |PDQ_HOST_INT_UNSOL_ENABLE */; 1158 1159 /* 1160 * Any other command but START should be valid. 1161 */ 1162 pdq->pdq_command_info.ci_pending_commands &= ~(PDQ_BITMASK(PDQC_START)); 1163 if (pdq->pdq_flags & PDQ_PRINTCHARS) 1164 pdq->pdq_command_info.ci_pending_commands |= PDQ_BITMASK(PDQC_STATUS_CHARS_GET); 1165 pdq_queue_commands(pdq); 1166 1167 if (pdq->pdq_flags & PDQ_PRINTCHARS) { 1168 /* 1169 * Now wait (up to 100ms) for the command(s) to finish. 1170 */ 1171 for (cnt = 0; cnt < 1000; cnt++) { 1172 pdq_process_command_responses(pdq); 1173 if (pdq->pdq_command_info.ci_response_producer == pdq->pdq_command_info.ci_response_completion) 1174 break; 1175 PDQ_OS_USEC_DELAY(1000); 1176 } 1177 state = PDQ_PSTS_ADAPTER_STATE(PDQ_CSR_READ(csrs, csr_port_status)); 1178 } 1179 1180 return state; 1181} 1182 1183void 1184pdq_run( 1185 pdq_t *pdq) 1186{ 1187 const pdq_csrs_t * const csrs = &pdq->pdq_csrs; 1188 pdq_state_t state; 1189 1190 state = PDQ_PSTS_ADAPTER_STATE(PDQ_CSR_READ(csrs, csr_port_status)); 1191 PDQ_ASSERT(state != PDQS_DMA_UNAVAILABLE); 1192 PDQ_ASSERT(state != PDQS_RESET); 1193 PDQ_ASSERT(state != PDQS_HALTED); 1194 PDQ_ASSERT(state != PDQS_UPGRADE); 1195 PDQ_ASSERT(state != PDQS_RING_MEMBER); 1196 switch (state) { 1197 case PDQS_DMA_AVAILABLE: { 1198 /* 1199 * The PDQ after being reset screws up some of its state. 1200 * So we need to clear all the errors/interrupts so the real 1201 * ones will get through. 1202 */ 1203 PDQ_CSR_WRITE(csrs, csr_host_int_type_0, 0xFF); 1204 PDQ_CSR_WRITE(csrs, csr_host_int_enable, PDQ_HOST_INT_STATE_CHANGE|PDQ_HOST_INT_XMT_DATA_FLUSH 1205 |PDQ_HOST_INT_FATAL_ERROR|PDQ_HOST_INT_CMD_RSP_ENABLE|PDQ_HOST_INT_UNSOL_ENABLE 1206 |PDQ_HOST_INT_RX_ENABLE|PDQ_HOST_INT_TX_ENABLE|PDQ_HOST_INT_HOST_SMT_ENABLE); 1207 /* 1208 * Set the MAC and address filters and start up the PDQ. 1209 */ 1210 pdq_process_unsolicited_events(pdq); 1211 pdq_process_received_data(pdq, &pdq->pdq_rx_info, 1212 pdq->pdq_dbp->pdqdb_receives, 1213 pdq->pdq_cbp->pdqcb_receives, 1214 PDQ_RING_MASK(pdq->pdq_dbp->pdqdb_receives)); 1215 PDQ_DO_TYPE2_PRODUCER(pdq); 1216 if (pdq->pdq_flags & PDQ_PASS_SMT) { 1217 pdq_process_received_data(pdq, &pdq->pdq_host_smt_info, 1218 pdq->pdq_dbp->pdqdb_host_smt, 1219 pdq->pdq_cbp->pdqcb_host_smt, 1220 PDQ_RING_MASK(pdq->pdq_dbp->pdqdb_host_smt)); 1221 PDQ_CSR_WRITE(csrs, csr_host_smt_producer, 1222 pdq->pdq_host_smt_info.rx_producer 1223 | (pdq->pdq_host_smt_info.rx_completion << 8)); 1224 } 1225 pdq->pdq_command_info.ci_pending_commands = PDQ_BITMASK(PDQC_FILTER_SET) 1226 | PDQ_BITMASK(PDQC_ADDR_FILTER_SET) | PDQ_BITMASK(PDQC_START); 1227 if (pdq->pdq_flags & PDQ_PRINTCHARS) 1228 pdq->pdq_command_info.ci_pending_commands |= PDQ_BITMASK(PDQC_STATUS_CHARS_GET); 1229 pdq_queue_commands(pdq); 1230 break; 1231 } 1232 case PDQS_LINK_UNAVAILABLE: 1233 case PDQS_LINK_AVAILABLE: { 1234 pdq->pdq_command_info.ci_pending_commands = PDQ_BITMASK(PDQC_FILTER_SET) 1235 | PDQ_BITMASK(PDQC_ADDR_FILTER_SET); 1236 if (pdq->pdq_flags & PDQ_PRINTCHARS) 1237 pdq->pdq_command_info.ci_pending_commands |= PDQ_BITMASK(PDQC_STATUS_CHARS_GET); 1238 if (pdq->pdq_flags & PDQ_PASS_SMT) { 1239 pdq_process_received_data(pdq, &pdq->pdq_host_smt_info, 1240 pdq->pdq_dbp->pdqdb_host_smt, 1241 pdq->pdq_cbp->pdqcb_host_smt, 1242 PDQ_RING_MASK(pdq->pdq_dbp->pdqdb_host_smt)); 1243 PDQ_CSR_WRITE(csrs, csr_host_smt_producer, 1244 pdq->pdq_host_smt_info.rx_producer 1245 | (pdq->pdq_host_smt_info.rx_completion << 8)); 1246 } 1247 pdq_process_unsolicited_events(pdq); 1248 pdq_queue_commands(pdq); 1249 break; 1250 } 1251 case PDQS_RING_MEMBER: { 1252 } 1253 default: { /* to make gcc happy */ 1254 break; 1255 } 1256 } 1257} 1258 1259int 1260pdq_interrupt( 1261 pdq_t *pdq) 1262{ 1263 const pdq_csrs_t * const csrs = &pdq->pdq_csrs; 1264 pdq_uint32_t data; 1265 int progress = 0; 1266 1267 if (pdq->pdq_type == PDQ_DEFPA) 1268 PDQ_CSR_WRITE(&pdq->pdq_pci_csrs, csr_pfi_status, 0x18); 1269 1270 while ((data = PDQ_CSR_READ(csrs, csr_port_status)) & PDQ_PSTS_INTR_PENDING) { 1271 progress = 1; 1272 PDQ_PRINTF(("PDQ Interrupt: Status = 0x%08x\n", data)); 1273 if (data & PDQ_PSTS_RCV_DATA_PENDING) { 1274 pdq_process_received_data(pdq, &pdq->pdq_rx_info, 1275 pdq->pdq_dbp->pdqdb_receives, 1276 pdq->pdq_cbp->pdqcb_receives, 1277 PDQ_RING_MASK(pdq->pdq_dbp->pdqdb_receives)); 1278 PDQ_DO_TYPE2_PRODUCER(pdq); 1279 } 1280 if (data & PDQ_PSTS_HOST_SMT_PENDING) { 1281 pdq_process_received_data(pdq, &pdq->pdq_host_smt_info, 1282 pdq->pdq_dbp->pdqdb_host_smt, 1283 pdq->pdq_cbp->pdqcb_host_smt, 1284 PDQ_RING_MASK(pdq->pdq_dbp->pdqdb_host_smt)); 1285 PDQ_DO_HOST_SMT_PRODUCER(pdq); 1286 } 1287 if (data & PDQ_PSTS_XMT_DATA_PENDING) 1288 pdq_process_transmitted_data(pdq); 1289 if (data & PDQ_PSTS_UNSOL_PENDING) 1290 pdq_process_unsolicited_events(pdq); 1291 if (data & PDQ_PSTS_CMD_RSP_PENDING) 1292 pdq_process_command_responses(pdq); 1293 if (data & PDQ_PSTS_TYPE_0_PENDING) { 1294 data = PDQ_CSR_READ(csrs, csr_host_int_type_0); 1295 if (data & PDQ_HOST_INT_STATE_CHANGE) { 1296 pdq_state_t state = PDQ_PSTS_ADAPTER_STATE(PDQ_CSR_READ(csrs, csr_port_status)); 1297 printf(PDQ_OS_PREFIX "%s", PDQ_OS_PREFIX_ARGS, pdq_adapter_states[state]); 1298 if (state == PDQS_LINK_UNAVAILABLE) { 1299 pdq->pdq_flags &= ~PDQ_TXOK; 1300 } else if (state == PDQS_LINK_AVAILABLE) { 1301 pdq->pdq_flags |= PDQ_TXOK; 1302 pdq_os_restart_transmitter(pdq); 1303 } else if (state == PDQS_HALTED) { 1304 pdq_response_error_log_get_t log_entry; 1305 pdq_halt_code_t halt_code = PDQ_PSTS_HALT_ID(PDQ_CSR_READ(csrs, csr_port_status)); 1306 printf(": halt code = %d (%s)\n", 1307 halt_code, pdq_halt_codes[halt_code]); 1308 if (halt_code == PDQH_DMA_ERROR && pdq->pdq_type == PDQ_DEFPA) { 1309 PDQ_PRINTF(("\tPFI status = 0x%x, Host 0 Fatal Interrupt = 0x%x\n", 1310 PDQ_CSR_READ(&pdq->pdq_pci_csrs, csr_pfi_status), 1311 data & PDQ_HOST_INT_FATAL_ERROR)); 1312 } 1313 pdq_read_error_log(pdq, &log_entry); 1314 pdq_stop(pdq); 1315 if (pdq->pdq_flags & PDQ_RUNNING) 1316 pdq_run(pdq); 1317 return 1; 1318 } 1319 printf("\n"); 1320 PDQ_CSR_WRITE(csrs, csr_host_int_type_0, PDQ_HOST_INT_STATE_CHANGE); 1321 } 1322 if (data & PDQ_HOST_INT_FATAL_ERROR) { 1323 pdq_stop(pdq); 1324 if (pdq->pdq_flags & PDQ_RUNNING) 1325 pdq_run(pdq); 1326 return 1; 1327 } 1328 if (data & PDQ_HOST_INT_XMT_DATA_FLUSH) { 1329 printf(PDQ_OS_PREFIX "Flushing transmit queue\n", PDQ_OS_PREFIX_ARGS); 1330 pdq->pdq_flags &= ~PDQ_TXOK; 1331 pdq_flush_transmitter(pdq); 1332 pdq_do_port_control(csrs, PDQ_PCTL_XMT_DATA_FLUSH_DONE); 1333 PDQ_CSR_WRITE(csrs, csr_host_int_type_0, PDQ_HOST_INT_XMT_DATA_FLUSH); 1334 } 1335 } 1336 if (pdq->pdq_type == PDQ_DEFPA) 1337 PDQ_CSR_WRITE(&pdq->pdq_pci_csrs, csr_pfi_status, 0x18); 1338 } 1339 return progress; 1340} 1341 1342pdq_t * 1343pdq_initialize( 1344 pdq_bus_t bus, 1345 pdq_bus_memaddr_t csr_base, 1346 const char *name, 1347 int unit, 1348 void *ctx, 1349 pdq_type_t type) 1350{ 1351 pdq_t *pdq; 1352 pdq_state_t state; 1353 const pdq_uint32_t contig_bytes = (sizeof(pdq_descriptor_block_t) * 2) - PDQ_OS_PAGESIZE; 1354 pdq_uint8_t *p; 1355 int idx; 1356 1357 PDQ_ASSERT(sizeof(pdq_descriptor_block_t) == 8192); 1358 PDQ_ASSERT(sizeof(pdq_consumer_block_t) == 64); 1359 PDQ_ASSERT(sizeof(pdq_response_filter_get_t) == PDQ_SIZE_RESPONSE_FILTER_GET); 1360 PDQ_ASSERT(sizeof(pdq_cmd_addr_filter_set_t) == PDQ_SIZE_CMD_ADDR_FILTER_SET); 1361 PDQ_ASSERT(sizeof(pdq_response_addr_filter_get_t) == PDQ_SIZE_RESPONSE_ADDR_FILTER_GET); 1362 PDQ_ASSERT(sizeof(pdq_response_status_chars_get_t) == PDQ_SIZE_RESPONSE_STATUS_CHARS_GET); 1363 PDQ_ASSERT(sizeof(pdq_response_fddi_mib_get_t) == PDQ_SIZE_RESPONSE_FDDI_MIB_GET); 1364 PDQ_ASSERT(sizeof(pdq_response_dec_ext_mib_get_t) == PDQ_SIZE_RESPONSE_DEC_EXT_MIB_GET); 1365 PDQ_ASSERT(sizeof(pdq_unsolicited_event_t) == 512); 1366 1367 pdq = (pdq_t *) PDQ_OS_MEMALLOC(sizeof(pdq_t)); 1368 if (pdq == NULL) { 1369 PDQ_PRINTF(("malloc(%d) failed\n", sizeof(*pdq))); 1370 return NULL; 1371 } 1372 PDQ_OS_MEMZERO(pdq, sizeof(pdq_t)); 1373 pdq->pdq_type = type; 1374 pdq->pdq_unit = unit; 1375 pdq->pdq_os_ctx = (void *) ctx; 1376 pdq->pdq_os_name = name; 1377 pdq->pdq_flags = PDQ_PRINTCHARS; 1378 /* 1379 * Allocate the additional data structures required by 1380 * the PDQ driver. Allocate a contiguous region of memory 1381 * for the descriptor block. We need to allocated enough 1382 * to guarantee that we will a get 8KB block of memory aligned 1383 * on a 8KB boundary. This turns to require that we allocate 1384 * (N*2 - 1 page) pages of memory. On machine with less than 1385 * a 8KB page size, it mean we will allocate more memory than 1386 * we need. The extra will be used for the unsolicited event 1387 * buffers (though on machines with 8KB pages we will to allocate 1388 * them separately since there will be nothing left overs.) 1389 */ 1390 p = (pdq_uint8_t *) PDQ_OS_MEMALLOC_CONTIG(contig_bytes); 1391 if (p != NULL) { 1392 pdq_physaddr_t physaddr = PDQ_OS_VA_TO_PA(pdq, p); 1393 /* 1394 * Assert that we really got contiguous memory. This isn't really 1395 * needed on systems that actually have physical contiguous allocation 1396 * routines, but on those systems that don't ... 1397 */ 1398 for (idx = PDQ_OS_PAGESIZE; idx < 0x2000; idx += PDQ_OS_PAGESIZE) { 1399 if (PDQ_OS_VA_TO_PA(pdq, p + idx) - physaddr != idx) 1400 goto cleanup_and_return; 1401 } 1402 physaddr &= 0x1FFF; 1403 if (physaddr) { 1404 pdq->pdq_unsolicited_info.ui_events = (pdq_unsolicited_event_t *) p; 1405 pdq->pdq_dbp = (pdq_descriptor_block_t *) &p[0x2000 - physaddr]; 1406 } else { 1407 pdq->pdq_dbp = (pdq_descriptor_block_t *) p; 1408 pdq->pdq_unsolicited_info.ui_events = (pdq_unsolicited_event_t *) &p[0x2000]; 1409 } 1410 } 1411 if (contig_bytes == sizeof(pdq_descriptor_block_t)) { 1412 pdq->pdq_unsolicited_info.ui_events = 1413 (pdq_unsolicited_event_t *) PDQ_OS_MEMALLOC( 1414 PDQ_NUM_UNSOLICITED_EVENTS * sizeof(pdq_unsolicited_event_t)); 1415 } 1416 1417 /* 1418 * Make sure everything got allocated. If not, free what did 1419 * get allocated and return. 1420 */ 1421 if (pdq->pdq_dbp == NULL || pdq->pdq_unsolicited_info.ui_events == NULL) { 1422 cleanup_and_return: 1423 if (p /* pdq->pdq_dbp */ != NULL) 1424 PDQ_OS_MEMFREE_CONTIG(p /* pdq->pdq_dbp */, contig_bytes); 1425 if (contig_bytes == sizeof(pdq_descriptor_block_t) && pdq->pdq_unsolicited_info.ui_events != NULL) 1426 PDQ_OS_MEMFREE(pdq->pdq_unsolicited_info.ui_events, 1427 PDQ_NUM_UNSOLICITED_EVENTS * sizeof(pdq_unsolicited_event_t)); 1428 PDQ_OS_MEMFREE(pdq, sizeof(pdq_t)); 1429 return NULL; 1430 } 1431 1432 pdq->pdq_cbp = (volatile pdq_consumer_block_t *) &pdq->pdq_dbp->pdqdb_consumer; 1433 pdq->pdq_command_info.ci_bufstart = (pdq_uint8_t *) pdq->pdq_dbp->pdqdb_command_pool; 1434 pdq->pdq_rx_info.rx_buffers = (void *) pdq->pdq_dbp->pdqdb_receive_buffers; 1435 1436 pdq->pdq_host_smt_info.rx_buffers = (void *) pdq->pdq_dbp->pdqdb_host_smt_buffers; 1437 1438 PDQ_PRINTF(("\nPDQ Descriptor Block = " PDQ_OS_PTR_FMT "\n", pdq->pdq_dbp)); 1439 PDQ_PRINTF((" Recieve Queue = " PDQ_OS_PTR_FMT "\n", pdq->pdq_dbp->pdqdb_receives)); 1440 PDQ_PRINTF((" Transmit Queue = " PDQ_OS_PTR_FMT "\n", pdq->pdq_dbp->pdqdb_transmits)); 1441 PDQ_PRINTF((" Host SMT Queue = " PDQ_OS_PTR_FMT "\n", pdq->pdq_dbp->pdqdb_host_smt)); 1442 PDQ_PRINTF((" Command Response Queue = " PDQ_OS_PTR_FMT "\n", pdq->pdq_dbp->pdqdb_command_responses)); 1443 PDQ_PRINTF((" Command Request Queue = " PDQ_OS_PTR_FMT "\n", pdq->pdq_dbp->pdqdb_command_requests)); 1444 PDQ_PRINTF(("PDQ Consumer Block = " PDQ_OS_PTR_FMT "\n", pdq->pdq_cbp)); 1445 1446 /* 1447 * Zero out the descriptor block. Not really required but 1448 * it pays to be neat. This will also zero out the consumer 1449 * block, command pool, and buffer pointers for the receive 1450 * host_smt rings. 1451 */ 1452 PDQ_OS_MEMZERO(pdq->pdq_dbp, sizeof(*pdq->pdq_dbp)); 1453 1454 /* 1455 * Initialize the CSR references. 1456 * the DEFAA (FutureBus+) skips a longword between registers 1457 */ 1458 pdq_init_csrs(&pdq->pdq_csrs, bus, csr_base, pdq->pdq_type == PDQ_DEFAA ? 2 : 1); 1459 if (pdq->pdq_type == PDQ_DEFPA) 1460 pdq_init_pci_csrs(&pdq->pdq_pci_csrs, bus, csr_base, 1); 1461 1462 PDQ_PRINTF(("PDQ CSRs: BASE = " PDQ_OS_PTR_FMT "\n", pdq->pdq_csrs.csr_base)); 1463 PDQ_PRINTF((" Port Reset = " PDQ_OS_PTR_FMT " [0x%08x]\n", 1464 pdq->pdq_csrs.csr_port_reset, PDQ_CSR_READ(&pdq->pdq_csrs, csr_port_reset))); 1465 PDQ_PRINTF((" Host Data = " PDQ_OS_PTR_FMT " [0x%08x]\n", 1466 pdq->pdq_csrs.csr_host_data, PDQ_CSR_READ(&pdq->pdq_csrs, csr_host_data))); 1467 PDQ_PRINTF((" Port Control = " PDQ_OS_PTR_FMT " [0x%08x]\n", 1468 pdq->pdq_csrs.csr_port_control, PDQ_CSR_READ(&pdq->pdq_csrs, csr_port_control))); 1469 PDQ_PRINTF((" Port Data A = " PDQ_OS_PTR_FMT " [0x%08x]\n", 1470 pdq->pdq_csrs.csr_port_data_a, PDQ_CSR_READ(&pdq->pdq_csrs, csr_port_data_a))); 1471 PDQ_PRINTF((" Port Data B = " PDQ_OS_PTR_FMT " [0x%08x]\n", 1472 pdq->pdq_csrs.csr_port_data_b, PDQ_CSR_READ(&pdq->pdq_csrs, csr_port_data_b))); 1473 PDQ_PRINTF((" Port Status = " PDQ_OS_PTR_FMT " [0x%08x]\n", 1474 pdq->pdq_csrs.csr_port_status, PDQ_CSR_READ(&pdq->pdq_csrs, csr_port_status))); 1475 PDQ_PRINTF((" Host Int Type 0 = " PDQ_OS_PTR_FMT " [0x%08x]\n", 1476 pdq->pdq_csrs.csr_host_int_type_0, PDQ_CSR_READ(&pdq->pdq_csrs, csr_host_int_type_0))); 1477 PDQ_PRINTF((" Host Int Enable = " PDQ_OS_PTR_FMT " [0x%08x]\n", 1478 pdq->pdq_csrs.csr_host_int_enable, PDQ_CSR_READ(&pdq->pdq_csrs, csr_host_int_enable))); 1479 PDQ_PRINTF((" Type 2 Producer = " PDQ_OS_PTR_FMT " [0x%08x]\n", 1480 pdq->pdq_csrs.csr_type_2_producer, PDQ_CSR_READ(&pdq->pdq_csrs, csr_type_2_producer))); 1481 PDQ_PRINTF((" Command Response Producer = " PDQ_OS_PTR_FMT " [0x%08x]\n", 1482 pdq->pdq_csrs.csr_cmd_response_producer, PDQ_CSR_READ(&pdq->pdq_csrs, csr_cmd_response_producer))); 1483 PDQ_PRINTF((" Command Request Producer = " PDQ_OS_PTR_FMT " [0x%08x]\n", 1484 pdq->pdq_csrs.csr_cmd_request_producer, PDQ_CSR_READ(&pdq->pdq_csrs, csr_cmd_request_producer))); 1485 PDQ_PRINTF((" Host SMT Producer = " PDQ_OS_PTR_FMT " [0x%08x]\n", 1486 pdq->pdq_csrs.csr_host_smt_producer, PDQ_CSR_READ(&pdq->pdq_csrs, csr_host_smt_producer))); 1487 PDQ_PRINTF((" Unsolicited Producer = " PDQ_OS_PTR_FMT " [0x%08x]\n", 1488 pdq->pdq_csrs.csr_unsolicited_producer, PDQ_CSR_READ(&pdq->pdq_csrs, csr_unsolicited_producer))); 1489 1490 /* 1491 * Initialize the command information block 1492 */ 1493 pdq->pdq_command_info.ci_pa_bufstart = PDQ_OS_VA_TO_PA(pdq, pdq->pdq_command_info.ci_bufstart); 1494 for (idx = 0; idx < sizeof(pdq->pdq_dbp->pdqdb_command_requests)/sizeof(pdq->pdq_dbp->pdqdb_command_requests[0]); idx++) { 1495 pdq_txdesc_t *txd = &pdq->pdq_dbp->pdqdb_command_requests[idx]; 1496 1497 txd->txd_pa_lo = pdq->pdq_command_info.ci_pa_bufstart; 1498 txd->txd_eop = txd->txd_sop = 1; 1499 txd->txd_pa_hi = 0; 1500 } 1501 for (idx = 0; idx < sizeof(pdq->pdq_dbp->pdqdb_command_responses)/sizeof(pdq->pdq_dbp->pdqdb_command_responses[0]); idx++) { 1502 pdq_rxdesc_t *rxd = &pdq->pdq_dbp->pdqdb_command_responses[idx]; 1503 1504 rxd->rxd_pa_lo = pdq->pdq_command_info.ci_pa_bufstart; 1505 rxd->rxd_sop = 1; 1506 rxd->rxd_seg_cnt = 0; 1507 rxd->rxd_seg_len_lo = 0; 1508 } 1509 1510 /* 1511 * Initialize the unsolicited event information block 1512 */ 1513 pdq->pdq_unsolicited_info.ui_free = PDQ_NUM_UNSOLICITED_EVENTS; 1514 pdq->pdq_unsolicited_info.ui_pa_bufstart = PDQ_OS_VA_TO_PA(pdq, pdq->pdq_unsolicited_info.ui_events); 1515 for (idx = 0; idx < sizeof(pdq->pdq_dbp->pdqdb_unsolicited_events)/sizeof(pdq->pdq_dbp->pdqdb_unsolicited_events[0]); idx++) { 1516 pdq_rxdesc_t *rxd = &pdq->pdq_dbp->pdqdb_unsolicited_events[idx]; 1517 pdq_unsolicited_event_t *event = &pdq->pdq_unsolicited_info.ui_events[idx & (PDQ_NUM_UNSOLICITED_EVENTS-1)]; 1518 1519 rxd->rxd_sop = 1; 1520 rxd->rxd_seg_cnt = 0; 1521 rxd->rxd_seg_len_hi = sizeof(pdq_unsolicited_event_t) / 16; 1522 rxd->rxd_pa_lo = pdq->pdq_unsolicited_info.ui_pa_bufstart + (const pdq_uint8_t *) event 1523 - (const pdq_uint8_t *) pdq->pdq_unsolicited_info.ui_events; 1524 rxd->rxd_pa_hi = 0; 1525 } 1526 /* 1527 * Initialize the receive information blocks (normal and SMT). 1528 */ 1529 pdq->pdq_rx_info.rx_free = PDQ_RING_MASK(pdq->pdq_dbp->pdqdb_receives); 1530 pdq->pdq_rx_info.rx_target = pdq->pdq_rx_info.rx_free - PDQ_RX_SEGCNT * 8; 1531 1532 pdq->pdq_host_smt_info.rx_free = PDQ_RING_MASK(pdq->pdq_dbp->pdqdb_host_smt); 1533 pdq->pdq_host_smt_info.rx_target = pdq->pdq_host_smt_info.rx_free - PDQ_RX_SEGCNT * 3; 1534 1535 /* 1536 * Initialize the transmit information block. 1537 */ 1538 pdq->pdq_tx_hdr[0] = PDQ_FDDI_PH0; 1539 pdq->pdq_tx_hdr[1] = PDQ_FDDI_PH1; 1540 pdq->pdq_tx_hdr[2] = PDQ_FDDI_PH2; 1541 pdq->pdq_tx_info.tx_free = PDQ_RING_MASK(pdq->pdq_dbp->pdqdb_transmits); 1542 pdq->pdq_tx_info.tx_hdrdesc.txd_seg_len = sizeof(pdq->pdq_tx_hdr); 1543 pdq->pdq_tx_info.tx_hdrdesc.txd_sop = 1; 1544 pdq->pdq_tx_info.tx_hdrdesc.txd_pa_lo = PDQ_OS_VA_TO_PA(pdq, pdq->pdq_tx_hdr); 1545 1546 state = PDQ_PSTS_ADAPTER_STATE(PDQ_CSR_READ(&pdq->pdq_csrs, csr_port_status)); 1547 PDQ_PRINTF(("PDQ Adapter State = %s\n", pdq_adapter_states[state])); 1548 1549 /* 1550 * Stop the PDQ if it is running and put it into a known state. 1551 */ 1552 state = pdq_stop(pdq); 1553 1554 PDQ_PRINTF(("PDQ Adapter State = %s\n", pdq_adapter_states[state])); 1555 PDQ_ASSERT(state == PDQS_DMA_AVAILABLE); 1556 /* 1557 * If the adapter is not the state we expect, then the initialization 1558 * failed. Cleanup and exit. 1559 */ 1560#if defined(PDQVERBOSE) 1561 if (state == PDQS_HALTED) { 1562 pdq_halt_code_t halt_code = PDQ_PSTS_HALT_ID(PDQ_CSR_READ(&pdq->pdq_csrs, csr_port_status)); 1563 printf("Halt code = %d (%s)\n", halt_code, pdq_halt_codes[halt_code]); 1564 if (halt_code == PDQH_DMA_ERROR && pdq->pdq_type == PDQ_DEFPA) 1565 PDQ_PRINTF(("PFI status = 0x%x, Host 0 Fatal Interrupt = 0x%x\n", 1566 PDQ_CSR_READ(&pdq->pdq_pci_csrs, csr_pfi_status), 1567 PDQ_CSR_READ(&pdq->pdq_csrs, csr_host_int_type_0) & PDQ_HOST_INT_FATAL_ERROR)); 1568 } 1569#endif 1570 if (state == PDQS_RESET || state == PDQS_HALTED || state == PDQS_UPGRADE) 1571 goto cleanup_and_return; 1572 1573 PDQ_PRINTF(("PDQ Hardware Address = %02x-%02x-%02x-%02x-%02x-%02x\n", 1574 pdq->pdq_hwaddr.lanaddr_bytes[0], pdq->pdq_hwaddr.lanaddr_bytes[1], 1575 pdq->pdq_hwaddr.lanaddr_bytes[2], pdq->pdq_hwaddr.lanaddr_bytes[3], 1576 pdq->pdq_hwaddr.lanaddr_bytes[4], pdq->pdq_hwaddr.lanaddr_bytes[5])); 1577 PDQ_PRINTF(("PDQ Firmware Revision = %c%c%c%c\n", 1578 pdq->pdq_fwrev.fwrev_bytes[0], pdq->pdq_fwrev.fwrev_bytes[1], 1579 pdq->pdq_fwrev.fwrev_bytes[2], pdq->pdq_fwrev.fwrev_bytes[3])); 1580 PDQ_PRINTF(("PDQ Chip Revision = ")); 1581 switch (pdq->pdq_chip_rev) { 1582 case PDQ_CHIP_REV_A_B_OR_C: PDQ_PRINTF(("Rev C or below")); break; 1583 case PDQ_CHIP_REV_D: PDQ_PRINTF(("Rev D")); break; 1584 case PDQ_CHIP_REV_E: PDQ_PRINTF(("Rev E")); break; 1585 default: PDQ_PRINTF(("Unknown Rev %d", (int) pdq->pdq_chip_rev)); 1586 } 1587 PDQ_PRINTF(("\n")); 1588 1589 return pdq; 1590}
|