pdq.c revision 43292
1336817Sdim/*-
2336817Sdim * Copyright (c) 1995,1996 Matt Thomas <matt@3am-software.com>
3353358Sdim * All rights reserved.
4353358Sdim *
5353358Sdim * Redistribution and use in source and binary forms, with or without
6336817Sdim * modification, are permitted provided that the following conditions
7336817Sdim * are met:
8336817Sdim * 1. Redistributions of source code must retain the above copyright
9336817Sdim *    notice, this list of conditions and the following disclaimer.
10336817Sdim * 2. The name of the author may not be used to endorse or promote products
11336817Sdim *    derived from this software withough specific prior written permission
12336817Sdim *
13336817Sdim * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
14336817Sdim * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
15336817Sdim * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
16336817Sdim * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
17336817Sdim * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
18336817Sdim * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
19336817Sdim * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
20336817Sdim * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
21336817Sdim * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
22336817Sdim * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
23336817Sdim *
24336817Sdim * $Id: pdq.c,v 1.2 1997/01/17 23:54:31 joerg Exp $
25336817Sdim *
26336817Sdim */
27336817Sdim
28336817Sdim/*
29336817Sdim * DEC PDQ FDDI Controller O/S independent code
30336817Sdim *
31 * This module should work any PDQ based board.  Note that changes for
32 * MIPS and Alpha architectures (or any other architecture which requires
33 * a flushing of memory or write buffers and/or has incoherent caches)
34 * have yet to be made.
35 *
36 * However, it is expected that the PDQ_CSR_WRITE macro will cause a
37 * flushing of the write buffers.
38 */
39
40#define	PDQ_HWSUPPORT	/* for pdq.h */
41
42#if defined(__FreeBSD__)
43#include <dev/pdq/pdqvar.h>
44#include <dev/pdq/pdqreg.h>
45#else
46#include "pdqvar.h"
47#include "pdqreg.h"
48#endif
49
50#define	PDQ_ROUNDUP(n, x)	(((n) + ((x) - 1)) & ~((x) - 1))
51#define	PDQ_CMD_RX_ALIGNMENT	16
52
53#if (defined(PDQTEST) && !defined(PDQ_NOPRINTF)) || defined(PDQVERBOSE)
54#define	PDQ_PRINTF(x)	printf x
55#else
56#define	PDQ_PRINTF(x)	do { } while (0)
57#endif
58
59static const char * const pdq_halt_codes[] = {
60    "Selftest Timeout", "Host Bus Parity Error", "Host Directed Fault",
61    "Software Fault", "Hardware Fault", "PC Trace Path Test",
62    "DMA Error", "Image CRC Error", "Adapter Processer Error"
63};
64
65static const char * const pdq_adapter_states[] = {
66    "Reset", "Upgrade", "DMA Unavailable", "DMA Available",
67    "Link Available", "Link Unavailable", "Halted", "Ring Member"
68};
69
70/*
71 * The following are used in conjunction with
72 * unsolicited events
73 */
74static const char * const pdq_entities[] = {
75    "Station", "Link", "Phy Port"
76};
77
78static const char * const pdq_station_events[] = {
79    "Trace Received"
80};
81
82static const char * const pdq_station_arguments[] = {
83    "Reason"
84};
85
86static const char * const pdq_link_events[] = {
87    "Transmit Underrun",
88    "Transmit Failed",
89    "Block Check Error (CRC)",
90    "Frame Status Error",
91    "PDU Length Error",
92    NULL,
93    NULL,
94    "Receive Data Overrun",
95    NULL,
96    "No User Buffer",
97    "Ring Initialization Initiated",
98    "Ring Initialization Received",
99    "Ring Beacon Initiated",
100    "Duplicate Address Failure",
101    "Duplicate Token Detected",
102    "Ring Purger Error",
103    "FCI Strip Error",
104    "Trace Initiated",
105    "Directed Beacon Received",
106};
107
108static const char * const pdq_link_arguments[] = {
109    "Reason",
110    "Data Link Header",
111    "Source",
112    "Upstream Neighbor"
113};
114
115static const char * const pdq_phy_events[] = {
116    "LEM Error Monitor Reject",
117    "Elasticy Buffer Error",
118    "Link Confidence Test Reject"
119};
120
121static const char * const pdq_phy_arguments[] = {
122    "Direction"
123};
124
125static const char * const * const pdq_event_arguments[] = {
126    pdq_station_arguments,
127    pdq_link_arguments,
128    pdq_phy_arguments
129};
130
131static const char * const * const pdq_event_codes[] = {
132    pdq_station_events,
133    pdq_link_events,
134    pdq_phy_events
135};
136
137static const char * const pdq_station_types[] = {
138    "SAS", "DAC", "SAC", "NAC", "DAS"
139};
140
141static const char * const pdq_smt_versions[] = { "", "V6.2", "V7.2", "V7.3" };
142
143static const char pdq_phy_types[] = "ABSM";
144
145static const char * const pdq_pmd_types0[] = {
146    "ANSI Multi-Mode", "ANSI Single-Mode Type 1", "ANSI Single-Mode Type 2",
147    "ANSI Sonet"
148};
149
150static const char * const pdq_pmd_types100[] = {
151    "Low Power", "Thin Wire", "Shielded Twisted Pair",
152    "Unshielded Twisted Pair"
153};
154
155static const char * const * const pdq_pmd_types[] = {
156    pdq_pmd_types0, pdq_pmd_types100
157};
158
159static const char * const pdq_descriptions[] = {
160    "DEFPA PCI",
161    "DEFEA EISA",
162    "DEFTA TC",
163    "DEFAA Futurebus",
164    "DEFQA Q-bus",
165};
166
167static void
168pdq_print_fddi_chars(
169    pdq_t *pdq,
170    const pdq_response_status_chars_get_t *rsp)
171{
172    const char hexchars[] = "0123456789abcdef";
173
174    printf(
175#if !defined(__bsdi__) && !defined(__NetBSD__)
176	   PDQ_OS_PREFIX
177#else
178	   ": "
179#endif
180	   "DEC %s FDDI %s Controller\n",
181#if !defined(__bsdi__) && !defined(__NetBSD__)
182	   PDQ_OS_PREFIX_ARGS,
183#endif
184	   pdq_descriptions[pdq->pdq_type],
185	   pdq_station_types[rsp->status_chars_get.station_type]);
186
187    printf(PDQ_OS_PREFIX "FDDI address %c%c:%c%c:%c%c:%c%c:%c%c:%c%c, FW=%c%c%c%c, HW=%c",
188	   PDQ_OS_PREFIX_ARGS,
189	   hexchars[pdq->pdq_hwaddr.lanaddr_bytes[0] >> 4],
190	   hexchars[pdq->pdq_hwaddr.lanaddr_bytes[0] & 0x0F],
191	   hexchars[pdq->pdq_hwaddr.lanaddr_bytes[1] >> 4],
192	   hexchars[pdq->pdq_hwaddr.lanaddr_bytes[1] & 0x0F],
193	   hexchars[pdq->pdq_hwaddr.lanaddr_bytes[2] >> 4],
194	   hexchars[pdq->pdq_hwaddr.lanaddr_bytes[2] & 0x0F],
195	   hexchars[pdq->pdq_hwaddr.lanaddr_bytes[3] >> 4],
196	   hexchars[pdq->pdq_hwaddr.lanaddr_bytes[3] & 0x0F],
197	   hexchars[pdq->pdq_hwaddr.lanaddr_bytes[4] >> 4],
198	   hexchars[pdq->pdq_hwaddr.lanaddr_bytes[4] & 0x0F],
199	   hexchars[pdq->pdq_hwaddr.lanaddr_bytes[5] >> 4],
200	   hexchars[pdq->pdq_hwaddr.lanaddr_bytes[5] & 0x0F],
201	   pdq->pdq_fwrev.fwrev_bytes[0], pdq->pdq_fwrev.fwrev_bytes[1],
202	   pdq->pdq_fwrev.fwrev_bytes[2], pdq->pdq_fwrev.fwrev_bytes[3],
203	   rsp->status_chars_get.module_rev.fwrev_bytes[0]);
204
205    if (rsp->status_chars_get.smt_version_id < PDQ_ARRAY_SIZE(pdq_smt_versions)) {
206	printf(", SMT %s\n", pdq_smt_versions[rsp->status_chars_get.smt_version_id]);
207    }
208
209    printf(PDQ_OS_PREFIX "FDDI Port%s = %c (PMD = %s)",
210	   PDQ_OS_PREFIX_ARGS,
211	   rsp->status_chars_get.station_type == PDQ_STATION_TYPE_DAS ? "[A]" : "",
212	   pdq_phy_types[rsp->status_chars_get.phy_type[0]],
213	   pdq_pmd_types[rsp->status_chars_get.pmd_type[0] / 100][rsp->status_chars_get.pmd_type[0] % 100]);
214
215    if (rsp->status_chars_get.station_type == PDQ_STATION_TYPE_DAS)
216	printf(", FDDI Port[B] = %c (PMD = %s)",
217	       pdq_phy_types[rsp->status_chars_get.phy_type[1]],
218	       pdq_pmd_types[rsp->status_chars_get.pmd_type[1] / 100][rsp->status_chars_get.pmd_type[1] % 100]);
219
220    printf("\n");
221}
222
223static void
224pdq_init_csrs(
225    pdq_csrs_t *csrs,
226    pdq_bus_t bus,
227    pdq_bus_memaddr_t csr_base,
228    size_t csrsize)
229{
230    csrs->csr_bus = bus;
231    csrs->csr_base = csr_base;
232    csrs->csr_port_reset		= PDQ_CSR_OFFSET(csr_base,  0 * csrsize);
233    csrs->csr_host_data			= PDQ_CSR_OFFSET(csr_base,  1 * csrsize);
234    csrs->csr_port_control		= PDQ_CSR_OFFSET(csr_base,  2 * csrsize);
235    csrs->csr_port_data_a		= PDQ_CSR_OFFSET(csr_base,  3 * csrsize);
236    csrs->csr_port_data_b		= PDQ_CSR_OFFSET(csr_base,  4 * csrsize);
237    csrs->csr_port_status		= PDQ_CSR_OFFSET(csr_base,  5 * csrsize);
238    csrs->csr_host_int_type_0		= PDQ_CSR_OFFSET(csr_base,  6 * csrsize);
239    csrs->csr_host_int_enable		= PDQ_CSR_OFFSET(csr_base,  7 * csrsize);
240    csrs->csr_type_2_producer		= PDQ_CSR_OFFSET(csr_base,  8 * csrsize);
241    csrs->csr_cmd_response_producer	= PDQ_CSR_OFFSET(csr_base, 10 * csrsize);
242    csrs->csr_cmd_request_producer	= PDQ_CSR_OFFSET(csr_base, 11 * csrsize);
243    csrs->csr_host_smt_producer		= PDQ_CSR_OFFSET(csr_base, 12 * csrsize);
244    csrs->csr_unsolicited_producer	= PDQ_CSR_OFFSET(csr_base, 13 * csrsize);
245}
246
247static void
248pdq_init_pci_csrs(
249    pdq_pci_csrs_t *csrs,
250    pdq_bus_t bus,
251    pdq_bus_memaddr_t csr_base,
252    size_t csrsize)
253{
254    csrs->csr_bus = bus;
255    csrs->csr_base = csr_base;
256    csrs->csr_pfi_mode_control	= PDQ_CSR_OFFSET(csr_base, 16 * csrsize);
257    csrs->csr_pfi_status	= PDQ_CSR_OFFSET(csr_base, 17 * csrsize);
258    csrs->csr_fifo_write	= PDQ_CSR_OFFSET(csr_base, 18 * csrsize);
259    csrs->csr_fifo_read		= PDQ_CSR_OFFSET(csr_base, 19 * csrsize);
260}
261
262static void
263pdq_flush_databuf_queue(
264    pdq_databuf_queue_t *q)
265{
266    PDQ_OS_DATABUF_T *pdu;
267    for (;;) {
268	PDQ_OS_DATABUF_DEQUEUE(q, pdu);
269	if (pdu == NULL)
270	    return;
271	PDQ_OS_DATABUF_FREE(pdu);
272    }
273}
274
275static pdq_boolean_t
276pdq_do_port_control(
277    const pdq_csrs_t * const csrs,
278    pdq_uint32_t cmd)
279{
280    int cnt = 0;
281    PDQ_CSR_WRITE(csrs, csr_host_int_type_0, PDQ_HOST_INT_CSR_CMD_DONE);
282    PDQ_CSR_WRITE(csrs, csr_port_control, PDQ_PCTL_CMD_ERROR | cmd);
283    while ((PDQ_CSR_READ(csrs, csr_host_int_type_0) & PDQ_HOST_INT_CSR_CMD_DONE) == 0 && cnt < 33000000)
284	cnt++;
285    PDQ_PRINTF(("CSR cmd spun %d times\n", cnt));
286    if (PDQ_CSR_READ(csrs, csr_host_int_type_0) & PDQ_HOST_INT_CSR_CMD_DONE) {
287	PDQ_CSR_WRITE(csrs, csr_host_int_type_0, PDQ_HOST_INT_CSR_CMD_DONE);
288	return (PDQ_CSR_READ(csrs, csr_port_control) & PDQ_PCTL_CMD_ERROR) ? PDQ_FALSE : PDQ_TRUE;
289    }
290    /* adapter failure */
291    PDQ_ASSERT(0);
292    return PDQ_FALSE;
293}
294
295static void
296pdq_read_mla(
297    const pdq_csrs_t * const csrs,
298    pdq_lanaddr_t *hwaddr)
299{
300    pdq_uint32_t data;
301
302    PDQ_CSR_WRITE(csrs, csr_port_data_a, 0);
303    pdq_do_port_control(csrs, PDQ_PCTL_MLA_READ);
304    data = PDQ_CSR_READ(csrs, csr_host_data);
305
306    hwaddr->lanaddr_bytes[0] = (data >> 0) & 0xFF;
307    hwaddr->lanaddr_bytes[1] = (data >> 8) & 0xFF;
308    hwaddr->lanaddr_bytes[2] = (data >> 16) & 0xFF;
309    hwaddr->lanaddr_bytes[3] = (data >> 24) & 0xFF;
310
311    PDQ_CSR_WRITE(csrs, csr_port_data_a, 1);
312    pdq_do_port_control(csrs, PDQ_PCTL_MLA_READ);
313    data = PDQ_CSR_READ(csrs, csr_host_data);
314
315    hwaddr->lanaddr_bytes[4] = (data >> 0) & 0xFF;
316    hwaddr->lanaddr_bytes[5] = (data >> 8) & 0xFF;
317}
318
319static void
320pdq_read_fwrev(
321    const pdq_csrs_t * const csrs,
322    pdq_fwrev_t *fwrev)
323{
324    pdq_uint32_t data;
325
326    pdq_do_port_control(csrs, PDQ_PCTL_FW_REV_READ);
327    data = PDQ_CSR_READ(csrs, csr_host_data);
328
329    fwrev->fwrev_bytes[3] = (data >> 0) & 0xFF;
330    fwrev->fwrev_bytes[2] = (data >> 8) & 0xFF;
331    fwrev->fwrev_bytes[1] = (data >> 16) & 0xFF;
332    fwrev->fwrev_bytes[0] = (data >> 24) & 0xFF;
333}
334
335static pdq_boolean_t
336pdq_read_error_log(
337    pdq_t *pdq,
338    pdq_response_error_log_get_t *log_entry)
339{
340    const pdq_csrs_t * const csrs = &pdq->pdq_csrs;
341    pdq_uint32_t *ptr = (pdq_uint32_t *) log_entry;
342
343    pdq_do_port_control(csrs, PDQ_PCTL_ERROR_LOG_START);
344
345    while (pdq_do_port_control(csrs, PDQ_PCTL_FW_REV_READ) == PDQ_TRUE) {
346	*ptr++ = PDQ_CSR_READ(csrs, csr_host_data);
347	if ((pdq_uint8_t *) ptr - (pdq_uint8_t *) log_entry == sizeof(*log_entry))
348	    break;
349    }
350    return (ptr == (pdq_uint32_t *) log_entry) ? PDQ_FALSE : PDQ_TRUE;
351}
352
353static pdq_chip_rev_t
354pdq_read_chiprev(
355    const pdq_csrs_t * const csrs)
356{
357    pdq_uint32_t data;
358
359    PDQ_CSR_WRITE(csrs, csr_port_data_a, PDQ_SUB_CMD_PDQ_REV_GET);
360    pdq_do_port_control(csrs, PDQ_PCTL_SUB_CMD);
361    data = PDQ_CSR_READ(csrs, csr_host_data);
362
363    return (pdq_chip_rev_t) data;
364}
365
366static const struct {
367    size_t cmd_len;
368    size_t rsp_len;
369    const char *cmd_name;
370} pdq_cmd_info[] = {
371    { sizeof(pdq_cmd_generic_t),		/* 0 - PDQC_START */
372      sizeof(pdq_response_generic_t),
373      "Start"
374    },
375    { sizeof(pdq_cmd_filter_set_t),		/* 1 - PDQC_FILTER_SET */
376      sizeof(pdq_response_generic_t),
377      "Filter Set"
378    },
379    { sizeof(pdq_cmd_generic_t),		/* 2 - PDQC_FILTER_GET */
380      sizeof(pdq_response_filter_get_t),
381      "Filter Get"
382    },
383    { sizeof(pdq_cmd_chars_set_t),		/* 3 - PDQC_CHARS_SET */
384      sizeof(pdq_response_generic_t),
385      "Chars Set"
386    },
387    { sizeof(pdq_cmd_generic_t),		/* 4 - PDQC_STATUS_CHARS_GET */
388      sizeof(pdq_response_status_chars_get_t),
389      "Status Chars Get"
390    },
391#if 0
392    { sizeof(pdq_cmd_generic_t),		/* 5 - PDQC_COUNTERS_GET */
393      sizeof(pdq_response_counters_get_t),
394      "Counters Get"
395    },
396    { sizeof(pdq_cmd_counters_set_t),		/* 6 - PDQC_COUNTERS_SET */
397      sizeof(pdq_response_generic_t),
398      "Counters Set"
399    },
400#else
401    { 0, 0, "Counters Get" },
402    { 0, 0, "Counters Set" },
403#endif
404    { sizeof(pdq_cmd_addr_filter_set_t),	/* 7 - PDQC_ADDR_FILTER_SET */
405      sizeof(pdq_response_generic_t),
406      "Addr Filter Set"
407    },
408    { sizeof(pdq_cmd_generic_t),		/* 8 - PDQC_ADDR_FILTER_GET */
409      sizeof(pdq_response_addr_filter_get_t),
410      "Addr Filter Get"
411    },
412#if 0
413    { sizeof(pdq_cmd_generic_t),		/* 9 - PDQC_ERROR_LOG_CLEAR */
414      sizeof(pdq_response_generic_t),
415      "Error Log Clear"
416    },
417    { sizeof(pdq_cmd_generic_t),		/* 10 - PDQC_ERROR_LOG_SET */
418      sizeof(pdq_response_generic_t),
419      "Error Log Set"
420    },
421    { sizeof(pdq_cmd_generic_t),		/* 11 - PDQC_FDDI_MIB_GET */
422      sizeof(pdq_response_generic_t),
423      "FDDI MIB Get"
424    },
425    { sizeof(pdq_cmd_generic_t),		/* 12 - PDQC_DEC_EXT_MIB_GET */
426      sizeof(pdq_response_generic_t),
427      "DEC Ext MIB Get"
428    },
429    { sizeof(pdq_cmd_generic_t),		/* 13 - PDQC_DEC_SPECIFIC_GET */
430      sizeof(pdq_response_generic_t),
431      "DEC Specific Get"
432    },
433    { sizeof(pdq_cmd_generic_t),		/* 14 - PDQC_SNMP_SET */
434      sizeof(pdq_response_generic_t),
435      "SNMP Set"
436    },
437    { 0, 0, "N/A" },
438    { sizeof(pdq_cmd_generic_t),		/* 16 - PDQC_SMT_MIB_GET */
439      sizeof(pdq_response_generic_t),
440      "SMT MIB Get"
441    },
442    { sizeof(pdq_cmd_generic_t),		/* 17 - PDQC_SMT_MIB_SET */
443      sizeof(pdq_response_generic_t),
444      "SMT MIB Set",
445    },
446#endif
447};
448
449static void
450pdq_queue_commands(
451    pdq_t *pdq)
452{
453    const pdq_csrs_t * const csrs = &pdq->pdq_csrs;
454    pdq_command_info_t * const ci = &pdq->pdq_command_info;
455    pdq_descriptor_block_t * const dbp = pdq->pdq_dbp;
456    pdq_cmd_code_t op;
457    pdq_uint32_t cmdlen, rsplen, mask;
458
459    /*
460     * If there are commands or responses active or there aren't
461     * any pending commands, then don't queue any more.
462     */
463    if (ci->ci_command_active || ci->ci_pending_commands == 0)
464	return;
465
466    /*
467     * Determine which command needs to be queued.
468     */
469    op = PDQC_SMT_MIB_SET;
470    for (mask = 1 << ((int) op); (mask & ci->ci_pending_commands) == 0; mask >>= 1)
471	op = (pdq_cmd_code_t) ((int) op - 1);
472    /*
473     * Obtain the sizes needed for the command and response.
474     * Round up to PDQ_CMD_RX_ALIGNMENT so the receive buffer is
475     * always properly aligned.
476     */
477    cmdlen = PDQ_ROUNDUP(pdq_cmd_info[op].cmd_len, PDQ_CMD_RX_ALIGNMENT);
478    rsplen = PDQ_ROUNDUP(pdq_cmd_info[op].rsp_len, PDQ_CMD_RX_ALIGNMENT);
479    if (cmdlen < rsplen)
480	cmdlen = rsplen;
481    /*
482     * Since only one command at a time will be queued, there will always
483     * be enough space.
484     */
485
486    /*
487     * Obtain and fill in the descriptor for the command (descriptor is
488     * pre-initialized)
489     */
490    dbp->pdqdb_command_requests[ci->ci_request_producer].txd_seg_len = cmdlen;
491    PDQ_ADVANCE(ci->ci_request_producer, 1, PDQ_RING_MASK(dbp->pdqdb_command_requests));
492
493    /*
494     * Obtain and fill in the descriptor for the response (descriptor is
495     * pre-initialized)
496     */
497    dbp->pdqdb_command_responses[ci->ci_response_producer].rxd_seg_len_hi = cmdlen / 16;
498    PDQ_ADVANCE(ci->ci_response_producer, 1, PDQ_RING_MASK(dbp->pdqdb_command_responses));
499
500    /*
501     * Clear the command area, set the opcode, and the command from the pending
502     * mask.
503     */
504
505    PDQ_OS_MEMZERO(ci->ci_bufstart, cmdlen);
506    *(pdq_cmd_code_t *) ci->ci_bufstart = op;
507    ci->ci_pending_commands &= ~mask;
508
509    /*
510     * Fill in the command area, if needed.
511     */
512    switch (op) {
513	case PDQC_FILTER_SET: {
514	    pdq_cmd_filter_set_t *filter_set = (pdq_cmd_filter_set_t *) ci->ci_bufstart;
515	    unsigned idx = 0;
516	    filter_set->filter_set_items[idx].item_code = PDQI_IND_GROUP_PROM;
517	    filter_set->filter_set_items[idx].filter_state = (pdq->pdq_flags & PDQ_PROMISC ? PDQ_FILTER_PASS : PDQ_FILTER_BLOCK);
518	    idx++;
519	    filter_set->filter_set_items[idx].item_code = PDQI_GROUP_PROM;
520	    filter_set->filter_set_items[idx].filter_state = (pdq->pdq_flags & PDQ_ALLMULTI ? PDQ_FILTER_PASS : PDQ_FILTER_BLOCK);
521	    idx++;
522	    filter_set->filter_set_items[idx].item_code = PDQI_SMT_PROM;
523	    filter_set->filter_set_items[idx].filter_state = ((pdq->pdq_flags & (PDQ_PROMISC|PDQ_PASS_SMT)) == (PDQ_PROMISC|PDQ_PASS_SMT) ? PDQ_FILTER_PASS : PDQ_FILTER_BLOCK);
524	    idx++;
525	    filter_set->filter_set_items[idx].item_code = PDQI_SMT_USER;
526	    filter_set->filter_set_items[idx].filter_state = (pdq->pdq_flags & PDQ_PASS_SMT ? PDQ_FILTER_PASS : PDQ_FILTER_BLOCK);
527	    idx++;
528	    filter_set->filter_set_items[idx].item_code = PDQI_EOL;
529	    break;
530	}
531	case PDQC_ADDR_FILTER_SET: {
532	    pdq_cmd_addr_filter_set_t *addr_filter_set = (pdq_cmd_addr_filter_set_t *) ci->ci_bufstart;
533	    pdq_lanaddr_t *addr = addr_filter_set->addr_filter_set_addresses;
534	    addr->lanaddr_bytes[0] = 0xFF;
535	    addr->lanaddr_bytes[1] = 0xFF;
536	    addr->lanaddr_bytes[2] = 0xFF;
537	    addr->lanaddr_bytes[3] = 0xFF;
538	    addr->lanaddr_bytes[4] = 0xFF;
539	    addr->lanaddr_bytes[5] = 0xFF;
540	    addr++;
541	    pdq_os_addr_fill(pdq, addr, 61);
542	    break;
543	}
544	default: {	/* to make gcc happy */
545	    break;
546	}
547    }
548    /*
549     * At this point the command is done.  All that needs to be done is to
550     * produce it to the PDQ.
551     */
552    PDQ_PRINTF(("PDQ Queue Command Request: %s queued\n",
553		pdq_cmd_info[op].cmd_name));
554
555    ci->ci_command_active++;
556    PDQ_CSR_WRITE(csrs, csr_cmd_response_producer, ci->ci_response_producer | (ci->ci_response_completion << 8));
557    PDQ_CSR_WRITE(csrs, csr_cmd_request_producer, ci->ci_request_producer | (ci->ci_request_completion << 8));
558}
559
560static void
561pdq_process_command_responses(
562    pdq_t * const pdq)
563{
564    const pdq_csrs_t * const csrs = &pdq->pdq_csrs;
565    pdq_command_info_t * const ci = &pdq->pdq_command_info;
566    volatile const pdq_consumer_block_t * const cbp = pdq->pdq_cbp;
567    pdq_descriptor_block_t * const dbp = pdq->pdq_dbp;
568    const pdq_response_generic_t *rspgen;
569
570    /*
571     * We have to process the command and response in tandem so
572     * just wait for the response to be consumed.  If it has been
573     * consumed then the command must have been as well.
574     */
575
576    if (cbp->pdqcb_command_response == ci->ci_response_completion)
577	return;
578
579    PDQ_ASSERT(cbp->pdqcb_command_request != ci->ci_request_completion);
580
581    rspgen = (const pdq_response_generic_t *) ci->ci_bufstart;
582    PDQ_ASSERT(rspgen->generic_status == PDQR_SUCCESS);
583    PDQ_PRINTF(("PDQ Process Command Response: %s completed (status=%d)\n",
584		pdq_cmd_info[rspgen->generic_op].cmd_name,
585		rspgen->generic_status));
586
587    if (rspgen->generic_op == PDQC_STATUS_CHARS_GET && (pdq->pdq_flags & PDQ_PRINTCHARS)) {
588	pdq->pdq_flags &= ~PDQ_PRINTCHARS;
589	pdq_print_fddi_chars(pdq, (const pdq_response_status_chars_get_t *) rspgen);
590    }
591
592    PDQ_ADVANCE(ci->ci_request_completion, 1, PDQ_RING_MASK(dbp->pdqdb_command_requests));
593    PDQ_ADVANCE(ci->ci_response_completion, 1, PDQ_RING_MASK(dbp->pdqdb_command_responses));
594    ci->ci_command_active = 0;
595
596    if (ci->ci_pending_commands != 0) {
597	pdq_queue_commands(pdq);
598    } else {
599	PDQ_CSR_WRITE(csrs, csr_cmd_response_producer,
600		      ci->ci_response_producer | (ci->ci_response_completion << 8));
601	PDQ_CSR_WRITE(csrs, csr_cmd_request_producer,
602		      ci->ci_request_producer | (ci->ci_request_completion << 8));
603    }
604}
605
606/*
607 * This following routine processes unsolicited events.
608 * In addition, it also fills the unsolicited queue with
609 * event buffers so it can be used to initialize the queue
610 * as well.
611 */
612static void
613pdq_process_unsolicited_events(
614    pdq_t *pdq)
615{
616    const pdq_csrs_t * const csrs = &pdq->pdq_csrs;
617    pdq_unsolicited_info_t *ui = &pdq->pdq_unsolicited_info;
618    volatile const pdq_consumer_block_t *cbp = pdq->pdq_cbp;
619    pdq_descriptor_block_t *dbp = pdq->pdq_dbp;
620    const pdq_unsolicited_event_t *event;
621    pdq_rxdesc_t *rxd;
622
623    /*
624     * Process each unsolicited event (if any).
625     */
626
627    while (cbp->pdqcb_unsolicited_event != ui->ui_completion) {
628	rxd = &dbp->pdqdb_unsolicited_events[ui->ui_completion];
629	event = &ui->ui_events[ui->ui_completion & (PDQ_NUM_UNSOLICITED_EVENTS-1)];
630
631	switch (event->event_type) {
632	    case PDQ_UNSOLICITED_EVENT: {
633		printf(PDQ_OS_PREFIX "Unsolicited Event: %s: %s",
634		       PDQ_OS_PREFIX_ARGS,
635		       pdq_entities[event->event_entity],
636		       pdq_event_codes[event->event_entity][event->event_code.value]);
637		if (event->event_entity == PDQ_ENTITY_PHY_PORT)
638		    printf("[%d]", event->event_index);
639		printf("\n");
640		break;
641	    }
642	    case PDQ_UNSOLICITED_COUNTERS: {
643		break;
644	    }
645	}
646	PDQ_ADVANCE(ui->ui_completion, 1, PDQ_RING_MASK(dbp->pdqdb_unsolicited_events));
647	ui->ui_free++;
648    }
649
650    /*
651     * Now give back the event buffers back to the PDQ.
652     */
653    PDQ_ADVANCE(ui->ui_producer, ui->ui_free, PDQ_RING_MASK(dbp->pdqdb_unsolicited_events));
654    ui->ui_free = 0;
655
656    PDQ_CSR_WRITE(csrs, csr_unsolicited_producer,
657		  ui->ui_producer | (ui->ui_completion << 8));
658}
659
660static void
661pdq_process_received_data(
662    pdq_t *pdq,
663    pdq_rx_info_t *rx,
664    pdq_rxdesc_t *receives,
665    pdq_uint32_t completion_goal,
666    pdq_uint32_t ring_mask)
667{
668    pdq_uint32_t completion = rx->rx_completion;
669    pdq_uint32_t producer = rx->rx_producer;
670    PDQ_OS_DATABUF_T **buffers = (PDQ_OS_DATABUF_T **) rx->rx_buffers;
671    pdq_rxdesc_t *rxd;
672    pdq_uint32_t idx;
673
674    while (completion != completion_goal) {
675	PDQ_OS_DATABUF_T *fpdu, *lpdu, *npdu;
676	pdq_uint8_t *dataptr;
677	pdq_uint32_t fc, datalen, pdulen, segcnt;
678	pdq_rxstatus_t status;
679
680	fpdu = lpdu = buffers[completion];
681	PDQ_ASSERT(fpdu != NULL);
682
683	dataptr = PDQ_OS_DATABUF_PTR(fpdu);
684	status = *(pdq_rxstatus_t *) dataptr;
685	if ((status.rxs_status & 0x200000) == 0) {
686	    datalen = status.rxs_status & 0x1FFF;
687	    fc = dataptr[PDQ_RX_FC_OFFSET];
688	    switch (fc & (PDQ_FDDIFC_C|PDQ_FDDIFC_L|PDQ_FDDIFC_F)) {
689		case PDQ_FDDI_LLC_ASYNC:
690		case PDQ_FDDI_LLC_SYNC:
691		case PDQ_FDDI_IMP_ASYNC:
692		case PDQ_FDDI_IMP_SYNC: {
693		    if (datalen > PDQ_FDDI_MAX || datalen < PDQ_FDDI_LLC_MIN) {
694			PDQ_PRINTF(("discard: bad length %d\n", datalen));
695			goto discard_frame;
696		    }
697		    break;
698		}
699		case PDQ_FDDI_SMT: {
700		    if (datalen > PDQ_FDDI_MAX || datalen < PDQ_FDDI_SMT_MIN)
701			goto discard_frame;
702		    break;
703		}
704		default: {
705		    PDQ_PRINTF(("discard: bad fc 0x%x\n", fc));
706		    goto discard_frame;
707		}
708	    }
709	    /*
710	     * Update the lengths of the data buffers now that we know
711	     * the real length.
712	     */
713	    pdulen = datalen - 4 /* CRC */;
714	    segcnt = (pdulen + PDQ_RX_FC_OFFSET + PDQ_OS_DATABUF_SIZE - 1) / PDQ_OS_DATABUF_SIZE;
715	    PDQ_OS_DATABUF_ALLOC(npdu);
716	    if (npdu == NULL) {
717		PDQ_PRINTF(("discard: no databuf #0\n"));
718		goto discard_frame;
719	    }
720	    buffers[completion] = npdu;
721	    for (idx = 1; idx < segcnt; idx++) {
722		PDQ_OS_DATABUF_ALLOC(npdu);
723		if (npdu == NULL) {
724		    PDQ_OS_DATABUF_NEXT_SET(lpdu, NULL);
725		    PDQ_OS_DATABUF_FREE(fpdu);
726		    goto discard_frame;
727		}
728		PDQ_OS_DATABUF_NEXT_SET(lpdu, buffers[(completion + idx) & ring_mask]);
729		lpdu = PDQ_OS_DATABUF_NEXT(lpdu);
730		buffers[(completion + idx) & ring_mask] = npdu;
731	    }
732	    PDQ_OS_DATABUF_NEXT_SET(lpdu, NULL);
733	    for (idx = 0; idx < PDQ_RX_SEGCNT; idx++) {
734		buffers[(producer + idx) & ring_mask] =
735		    buffers[(completion + idx) & ring_mask];
736		buffers[(completion + idx) & ring_mask] = NULL;
737	    }
738	    PDQ_OS_DATABUF_ADJ(fpdu, PDQ_RX_FC_OFFSET);
739	    if (segcnt == 1) {
740		PDQ_OS_DATABUF_LEN_SET(fpdu, pdulen);
741	    } else {
742		PDQ_OS_DATABUF_LEN_SET(lpdu, pdulen + PDQ_RX_FC_OFFSET - (segcnt - 1) * PDQ_OS_DATABUF_SIZE);
743	    }
744	    pdq_os_receive_pdu(pdq, fpdu, pdulen);
745	    rx->rx_free += PDQ_RX_SEGCNT;
746	    PDQ_ADVANCE(producer, PDQ_RX_SEGCNT, ring_mask);
747	    PDQ_ADVANCE(completion, PDQ_RX_SEGCNT, ring_mask);
748	    continue;
749	} else {
750	    PDQ_PRINTF(("discard: bad pdu 0x%x(%d.%d.%d.%d.%d)\n", status.rxs_status,
751			status.rxs_rcc_badpdu, status.rxs_rcc_badcrc,
752			status.rxs_rcc_reason, status.rxs_fsc, status.rxs_fsb_e));
753	    if (status.rxs_rcc_reason == 7)
754		goto discard_frame;
755	    if (status.rxs_rcc_reason != 0)
756		/* hardware fault */
757	    if (status.rxs_rcc_badcrc) {
758		printf(PDQ_OS_PREFIX " MAC CRC error (source=%x-%x-%x-%x-%x-%x)\n",
759		       PDQ_OS_PREFIX_ARGS,
760		       dataptr[PDQ_RX_FC_OFFSET+1],
761		       dataptr[PDQ_RX_FC_OFFSET+2],
762		       dataptr[PDQ_RX_FC_OFFSET+3],
763		       dataptr[PDQ_RX_FC_OFFSET+4],
764		       dataptr[PDQ_RX_FC_OFFSET+5],
765		       dataptr[PDQ_RX_FC_OFFSET+6]);
766		/* rx->rx_badcrc++; */
767	    } else if (status.rxs_fsc == 0 || status.rxs_fsb_e == 1) {
768		/* rx->rx_frame_status_errors++; */
769	    } else {
770		/* hardware fault */
771	    }
772	}
773      discard_frame:
774	/*
775	 * Discarded frames go right back on the queue; therefore
776	 * ring entries were freed.
777	 */
778	for (idx = 0; idx < PDQ_RX_SEGCNT; idx++) {
779	    buffers[producer] = buffers[completion];
780	    buffers[completion] = NULL;
781	    rxd = &receives[rx->rx_producer];
782	    if (idx == 0) {
783		rxd->rxd_sop = 1; rxd->rxd_seg_cnt = PDQ_RX_SEGCNT - 1;
784	    } else {
785		rxd->rxd_sop = 0; rxd->rxd_seg_cnt = 0;
786	    }
787	    rxd->rxd_pa_hi = 0;
788	    rxd->rxd_seg_len_hi = PDQ_OS_DATABUF_SIZE / 16;
789	    rxd->rxd_pa_lo = PDQ_OS_VA_TO_PA(pdq, PDQ_OS_DATABUF_PTR(buffers[rx->rx_producer]));
790	    PDQ_ADVANCE(rx->rx_producer, 1, ring_mask);
791	    PDQ_ADVANCE(producer, 1, ring_mask);
792	    PDQ_ADVANCE(completion, 1, ring_mask);
793	}
794    }
795    rx->rx_completion = completion;
796
797    while (rx->rx_free > PDQ_RX_SEGCNT && rx->rx_free > rx->rx_target) {
798	PDQ_OS_DATABUF_T *pdu;
799	/*
800	 * Allocate the needed number of data buffers.
801	 * Try to obtain them from our free queue before
802	 * asking the system for more.
803	 */
804	for (idx = 0; idx < PDQ_RX_SEGCNT; idx++) {
805	    if ((pdu = buffers[(rx->rx_producer + idx) & ring_mask]) == NULL) {
806		PDQ_OS_DATABUF_ALLOC(pdu);
807		if (pdu == NULL)
808		    break;
809		buffers[(rx->rx_producer + idx) & ring_mask] = pdu;
810	    }
811	    rxd = &receives[(rx->rx_producer + idx) & ring_mask];
812	    if (idx == 0) {
813		rxd->rxd_sop = 1; rxd->rxd_seg_cnt = PDQ_RX_SEGCNT - 1;
814	    } else {
815		rxd->rxd_sop = 0; rxd->rxd_seg_cnt = 0;
816	    }
817	    rxd->rxd_pa_hi = 0;
818	    rxd->rxd_seg_len_hi = PDQ_OS_DATABUF_SIZE / 16;
819	    rxd->rxd_pa_lo = PDQ_OS_VA_TO_PA(pdq, PDQ_OS_DATABUF_PTR(pdu));
820	}
821	if (idx < PDQ_RX_SEGCNT) {
822	    /*
823	     * We didn't get all databufs required to complete a new
824	     * receive buffer.  Keep the ones we got and retry a bit
825	     * later for the rest.
826	     */
827	    break;
828	}
829	PDQ_ADVANCE(rx->rx_producer, PDQ_RX_SEGCNT, ring_mask);
830	rx->rx_free -= PDQ_RX_SEGCNT;
831    }
832}
833
834pdq_boolean_t
835pdq_queue_transmit_data(
836    pdq_t *pdq,
837    PDQ_OS_DATABUF_T *pdu)
838{
839    pdq_tx_info_t *tx = &pdq->pdq_tx_info;
840    pdq_descriptor_block_t *dbp = pdq->pdq_dbp;
841    pdq_uint32_t producer = tx->tx_producer;
842    pdq_txdesc_t *eop = NULL;
843    PDQ_OS_DATABUF_T *pdu0;
844    pdq_uint32_t freecnt;
845
846    if (tx->tx_free < 1)
847	return PDQ_FALSE;
848
849    dbp->pdqdb_transmits[producer] = tx->tx_hdrdesc;
850    PDQ_ADVANCE(producer, 1, PDQ_RING_MASK(dbp->pdqdb_transmits));
851
852    for (freecnt = tx->tx_free - 1, pdu0 = pdu; pdu0 != NULL && freecnt > 0;) {
853	pdq_uint32_t fraglen, datalen = PDQ_OS_DATABUF_LEN(pdu0);
854	const pdq_uint8_t *dataptr = PDQ_OS_DATABUF_PTR(pdu0);
855
856	/*
857	 * The first segment is limited to the space remaining in
858	 * page.  All segments after that can be up to a full page
859	 * in size.
860	 */
861	fraglen = PDQ_OS_PAGESIZE - ((dataptr - (pdq_uint8_t *) NULL) & (PDQ_OS_PAGESIZE-1));
862	while (datalen > 0 && freecnt > 0) {
863	    pdq_uint32_t seglen = (fraglen < datalen ? fraglen : datalen);
864
865	    /*
866	     * Initialize the transmit descriptor
867	     */
868	    eop = &dbp->pdqdb_transmits[producer];
869	    eop->txd_seg_len = seglen;
870	    eop->txd_pa_lo = PDQ_OS_VA_TO_PA(pdq, dataptr);
871	    eop->txd_sop = eop->txd_eop = eop->txd_pa_hi = 0;
872
873	    datalen -= seglen;
874	    dataptr += seglen;
875	    fraglen = PDQ_OS_PAGESIZE;
876	    freecnt--;
877	    PDQ_ADVANCE(producer, 1, PDQ_RING_MASK(dbp->pdqdb_transmits));
878	}
879	pdu0 = PDQ_OS_DATABUF_NEXT(pdu0);
880    }
881    if (pdu0 != NULL) {
882	PDQ_ASSERT(freecnt == 0);
883	/*
884	 * If we still have data to process then the ring was too full
885	 * to store the PDU.  Return FALSE so the caller will requeue
886	 * the PDU for later.
887	 */
888	return PDQ_FALSE;
889    }
890    /*
891     * Everything went fine.  Finish it up.
892     */
893    tx->tx_descriptor_count[tx->tx_producer] = tx->tx_free - freecnt;
894    eop->txd_eop = 1;
895    PDQ_OS_DATABUF_ENQUEUE(&tx->tx_txq, pdu);
896    tx->tx_producer = producer;
897    tx->tx_free = freecnt;
898    PDQ_DO_TYPE2_PRODUCER(pdq);
899    return PDQ_TRUE;
900}
901
902static void
903pdq_process_transmitted_data(
904    pdq_t *pdq)
905{
906    pdq_tx_info_t *tx = &pdq->pdq_tx_info;
907    volatile const pdq_consumer_block_t *cbp = pdq->pdq_cbp;
908    pdq_descriptor_block_t *dbp = pdq->pdq_dbp;
909    pdq_uint32_t completion = tx->tx_completion;
910
911    while (completion != cbp->pdqcb_transmits) {
912	PDQ_OS_DATABUF_T *pdu;
913	pdq_uint32_t descriptor_count = tx->tx_descriptor_count[completion];
914	PDQ_ASSERT(dbp->pdqdb_transmits[completion].txd_sop == 1);
915	PDQ_ASSERT(dbp->pdqdb_transmits[(completion + descriptor_count - 1) & PDQ_RING_MASK(dbp->pdqdb_transmits)].txd_eop == 1);
916	PDQ_OS_DATABUF_DEQUEUE(&tx->tx_txq, pdu);
917	pdq_os_transmit_done(pdq, pdu);
918	tx->tx_free += descriptor_count;
919
920	PDQ_ADVANCE(completion, descriptor_count, PDQ_RING_MASK(dbp->pdqdb_transmits));
921    }
922    if (tx->tx_completion != completion) {
923	tx->tx_completion = completion;
924	pdq_os_restart_transmitter(pdq);
925    }
926    PDQ_DO_TYPE2_PRODUCER(pdq);
927}
928
929void
930pdq_flush_transmitter(
931    pdq_t *pdq)
932{
933    volatile pdq_consumer_block_t *cbp = pdq->pdq_cbp;
934    pdq_tx_info_t *tx = &pdq->pdq_tx_info;
935
936    for (;;) {
937	PDQ_OS_DATABUF_T *pdu;
938	PDQ_OS_DATABUF_DEQUEUE(&tx->tx_txq, pdu);
939	if (pdu == NULL)
940	    break;
941	/*
942	 * Don't call transmit done since the packet never made it
943	 * out on the wire.
944	 */
945	PDQ_OS_DATABUF_FREE(pdu);
946    }
947
948    tx->tx_free = PDQ_RING_MASK(pdq->pdq_dbp->pdqdb_transmits);
949    tx->tx_completion = cbp->pdqcb_transmits = tx->tx_producer;
950
951    PDQ_DO_TYPE2_PRODUCER(pdq);
952}
953
954void
955pdq_hwreset(
956    pdq_t *pdq)
957{
958    const pdq_csrs_t * const csrs = &pdq->pdq_csrs;
959    pdq_state_t state;
960    int cnt;
961
962    state = PDQ_PSTS_ADAPTER_STATE(PDQ_CSR_READ(csrs, csr_port_status));
963    if (state == PDQS_DMA_UNAVAILABLE)
964	return;
965    PDQ_CSR_WRITE(csrs, csr_port_data_a,
966		  (state == PDQS_HALTED) ? 0 : PDQ_PRESET_SKIP_SELFTEST);
967    PDQ_CSR_WRITE(csrs, csr_port_reset, 1);
968    PDQ_OS_USEC_DELAY(100);
969    PDQ_CSR_WRITE(csrs, csr_port_reset, 0);
970    for (cnt = 45000;;cnt--) {
971	PDQ_OS_USEC_DELAY(1000);
972	state = PDQ_PSTS_ADAPTER_STATE(PDQ_CSR_READ(csrs, csr_port_status));
973	if (state == PDQS_DMA_UNAVAILABLE || cnt == 0)
974	    break;
975    }
976    PDQ_PRINTF(("PDQ Reset spun %d cycles\n", 45000 - cnt));
977    PDQ_OS_USEC_DELAY(10000);
978    state = PDQ_PSTS_ADAPTER_STATE(PDQ_CSR_READ(csrs, csr_port_status));
979    PDQ_ASSERT(state == PDQS_DMA_UNAVAILABLE);
980    PDQ_ASSERT(cnt > 0);
981}
982
983/*
984 * The following routine brings the PDQ from whatever state it is
985 * in to DMA_UNAVAILABLE (ie. like a RESET but without doing a RESET).
986 */
987pdq_state_t
988pdq_stop(
989    pdq_t *pdq)
990{
991    pdq_state_t state;
992    const pdq_csrs_t * const csrs = &pdq->pdq_csrs;
993    int cnt, pass = 0, idx;
994    PDQ_OS_DATABUF_T **buffers;
995
996  restart:
997    state = PDQ_PSTS_ADAPTER_STATE(PDQ_CSR_READ(csrs, csr_port_status));
998    if (state != PDQS_DMA_UNAVAILABLE) {
999	pdq_hwreset(pdq);
1000	state = PDQ_PSTS_ADAPTER_STATE(PDQ_CSR_READ(csrs, csr_port_status));
1001	PDQ_ASSERT(state == PDQS_DMA_UNAVAILABLE);
1002    }
1003#if 0
1004    switch (state) {
1005	case PDQS_RING_MEMBER:
1006	case PDQS_LINK_UNAVAILABLE:
1007	case PDQS_LINK_AVAILABLE: {
1008	    PDQ_CSR_WRITE(csrs, csr_port_data_a, PDQ_SUB_CMD_LINK_UNINIT);
1009	    PDQ_CSR_WRITE(csrs, csr_port_data_b, 0);
1010	    pdq_do_port_control(csrs, PDQ_PCTL_SUB_CMD);
1011	    state = PDQ_PSTS_ADAPTER_STATE(PDQ_CSR_READ(csrs, csr_port_status));
1012	    PDQ_ASSERT(state == PDQS_DMA_AVAILABLE);
1013	    /* FALL THROUGH */
1014	}
1015	case PDQS_DMA_AVAILABLE: {
1016	    PDQ_CSR_WRITE(csrs, csr_port_data_a, 0);
1017	    PDQ_CSR_WRITE(csrs, csr_port_data_b, 0);
1018	    pdq_do_port_control(csrs, PDQ_PCTL_DMA_UNINIT);
1019	    state = PDQ_PSTS_ADAPTER_STATE(PDQ_CSR_READ(csrs, csr_port_status));
1020	    PDQ_ASSERT(state == PDQS_DMA_UNAVAILABLE);
1021	    /* FALL THROUGH */
1022	}
1023	case PDQS_DMA_UNAVAILABLE: {
1024	    break;
1025	}
1026    }
1027#endif
1028    /*
1029     * Now we should be in DMA_UNAVAILABLE.  So bring the PDQ into
1030     * DMA_AVAILABLE.
1031     */
1032
1033    /*
1034     * Obtain the hardware address and firmware revisions
1035     * (MLA = my long address which is FDDI speak for hardware address)
1036     */
1037    pdq_read_mla(&pdq->pdq_csrs, &pdq->pdq_hwaddr);
1038    pdq_read_fwrev(&pdq->pdq_csrs, &pdq->pdq_fwrev);
1039    pdq->pdq_chip_rev = pdq_read_chiprev(&pdq->pdq_csrs);
1040
1041    if (pdq->pdq_type == PDQ_DEFPA) {
1042	/*
1043	 * Disable interrupts and DMA.
1044	 */
1045	PDQ_CSR_WRITE(&pdq->pdq_pci_csrs, csr_pfi_mode_control, 0);
1046	PDQ_CSR_WRITE(&pdq->pdq_pci_csrs, csr_pfi_status, 0x10);
1047    }
1048
1049    /*
1050     * Flush all the databuf queues.
1051     */
1052    pdq_flush_databuf_queue(&pdq->pdq_tx_info.tx_txq);
1053    pdq->pdq_flags &= ~PDQ_TXOK;
1054    buffers = (PDQ_OS_DATABUF_T **) pdq->pdq_rx_info.rx_buffers;
1055    for (idx = 0; idx < PDQ_RING_SIZE(pdq->pdq_dbp->pdqdb_receives); idx++) {
1056	if (buffers[idx] != NULL) {
1057	    PDQ_OS_DATABUF_FREE(buffers[idx]);
1058	    buffers[idx] = NULL;
1059	}
1060    }
1061    pdq->pdq_rx_info.rx_free = PDQ_RING_MASK(pdq->pdq_dbp->pdqdb_receives);
1062    buffers = (PDQ_OS_DATABUF_T **) pdq->pdq_host_smt_info.rx_buffers;
1063    for (idx = 0; idx < PDQ_RING_SIZE(pdq->pdq_dbp->pdqdb_host_smt); idx++) {
1064	if (buffers[idx] != NULL) {
1065	    PDQ_OS_DATABUF_FREE(buffers[idx]);
1066	    buffers[idx] = NULL;
1067	}
1068    }
1069    pdq->pdq_host_smt_info.rx_free = PDQ_RING_MASK(pdq->pdq_dbp->pdqdb_host_smt);
1070
1071    /*
1072     * Reset the consumer indexes to 0.
1073     */
1074    pdq->pdq_cbp->pdqcb_receives = 0;
1075    pdq->pdq_cbp->pdqcb_transmits = 0;
1076    pdq->pdq_cbp->pdqcb_host_smt = 0;
1077    pdq->pdq_cbp->pdqcb_unsolicited_event = 0;
1078    pdq->pdq_cbp->pdqcb_command_response = 0;
1079    pdq->pdq_cbp->pdqcb_command_request = 0;
1080
1081    /*
1082     * Reset the producer and completion indexes to 0.
1083     */
1084    pdq->pdq_command_info.ci_request_producer = 0;
1085    pdq->pdq_command_info.ci_response_producer = 0;
1086    pdq->pdq_command_info.ci_request_completion = 0;
1087    pdq->pdq_command_info.ci_response_completion = 0;
1088    pdq->pdq_unsolicited_info.ui_producer = 0;
1089    pdq->pdq_unsolicited_info.ui_completion = 0;
1090    pdq->pdq_rx_info.rx_producer = 0;
1091    pdq->pdq_rx_info.rx_completion = 0;
1092    pdq->pdq_tx_info.tx_producer = 0;
1093    pdq->pdq_tx_info.tx_completion = 0;
1094    pdq->pdq_host_smt_info.rx_producer = 0;
1095    pdq->pdq_host_smt_info.rx_completion = 0;
1096
1097    pdq->pdq_command_info.ci_command_active = 0;
1098    pdq->pdq_unsolicited_info.ui_free = PDQ_NUM_UNSOLICITED_EVENTS;
1099    pdq->pdq_tx_info.tx_free = PDQ_RING_MASK(pdq->pdq_dbp->pdqdb_transmits);
1100
1101    /*
1102     * Allow the DEFPA to do DMA.  Then program the physical
1103     * addresses of the consumer and descriptor blocks.
1104     */
1105    if (pdq->pdq_type == PDQ_DEFPA) {
1106#ifdef PDQTEST
1107	PDQ_CSR_WRITE(&pdq->pdq_pci_csrs, csr_pfi_mode_control,
1108		      PDQ_PFI_MODE_DMA_ENABLE);
1109#else
1110	PDQ_CSR_WRITE(&pdq->pdq_pci_csrs, csr_pfi_mode_control,
1111		      PDQ_PFI_MODE_DMA_ENABLE
1112	    /*|PDQ_PFI_MODE_PFI_PCI_INTR*/|PDQ_PFI_MODE_PDQ_PCI_INTR);
1113#endif
1114    }
1115
1116    /*
1117     * Make sure the unsolicited queue has events ...
1118     */
1119    pdq_process_unsolicited_events(pdq);
1120
1121    if (pdq->pdq_type == PDQ_DEFEA && pdq->pdq_chip_rev == PDQ_CHIP_REV_E)
1122	PDQ_CSR_WRITE(csrs, csr_port_data_b, PDQ_DMA_BURST_16LW);
1123    else
1124	PDQ_CSR_WRITE(csrs, csr_port_data_b, PDQ_DMA_BURST_8LW);
1125    PDQ_CSR_WRITE(csrs, csr_port_data_a, PDQ_SUB_CMD_DMA_BURST_SIZE_SET);
1126    pdq_do_port_control(csrs, PDQ_PCTL_SUB_CMD);
1127
1128    PDQ_CSR_WRITE(csrs, csr_port_data_b, 0);
1129    PDQ_CSR_WRITE(csrs, csr_port_data_a, PDQ_OS_VA_TO_PA(pdq, pdq->pdq_cbp));
1130    pdq_do_port_control(csrs, PDQ_PCTL_CONSUMER_BLOCK);
1131
1132    PDQ_CSR_WRITE(csrs, csr_port_data_b, 0);
1133    PDQ_CSR_WRITE(csrs, csr_port_data_a,
1134		  PDQ_OS_VA_TO_PA(pdq, pdq->pdq_dbp) | PDQ_DMA_INIT_LW_BSWAP_DATA);
1135    pdq_do_port_control(csrs, PDQ_PCTL_DMA_INIT);
1136
1137    for (cnt = 0; cnt < 1000; cnt++) {
1138	state = PDQ_PSTS_ADAPTER_STATE(PDQ_CSR_READ(csrs, csr_port_status));
1139	if (state == PDQS_HALTED) {
1140	    if (pass > 0)
1141		return PDQS_HALTED;
1142	    pass = 1;
1143	    goto restart;
1144	}
1145	if (state == PDQS_DMA_AVAILABLE) {
1146	    PDQ_PRINTF(("Transition to DMA Available took %d spins\n", cnt));
1147	    break;
1148	}
1149	PDQ_OS_USEC_DELAY(1000);
1150    }
1151    PDQ_ASSERT(state == PDQS_DMA_AVAILABLE);
1152
1153    PDQ_CSR_WRITE(csrs, csr_host_int_type_0, 0xFF);
1154    PDQ_CSR_WRITE(csrs, csr_host_int_enable, 0) /* PDQ_HOST_INT_STATE_CHANGE
1155	|PDQ_HOST_INT_FATAL_ERROR|PDQ_HOST_INT_CMD_RSP_ENABLE
1156	|PDQ_HOST_INT_UNSOL_ENABLE */;
1157
1158    /*
1159     * Any other command but START should be valid.
1160     */
1161    pdq->pdq_command_info.ci_pending_commands &= ~(PDQ_BITMASK(PDQC_START));
1162    if (pdq->pdq_flags & PDQ_PRINTCHARS)
1163	pdq->pdq_command_info.ci_pending_commands |= PDQ_BITMASK(PDQC_STATUS_CHARS_GET);
1164    pdq_queue_commands(pdq);
1165
1166    if (pdq->pdq_flags & PDQ_PRINTCHARS) {
1167	/*
1168	 * Now wait (up to 100ms) for the command(s) to finish.
1169	 */
1170	for (cnt = 0; cnt < 1000; cnt++) {
1171	    pdq_process_command_responses(pdq);
1172	    if (pdq->pdq_command_info.ci_response_producer == pdq->pdq_command_info.ci_response_completion)
1173		break;
1174	    PDQ_OS_USEC_DELAY(1000);
1175	}
1176	state = PDQ_PSTS_ADAPTER_STATE(PDQ_CSR_READ(csrs, csr_port_status));
1177    }
1178
1179    return state;
1180}
1181
1182void
1183pdq_run(
1184    pdq_t *pdq)
1185{
1186    const pdq_csrs_t * const csrs = &pdq->pdq_csrs;
1187    pdq_state_t state;
1188
1189    state = PDQ_PSTS_ADAPTER_STATE(PDQ_CSR_READ(csrs, csr_port_status));
1190    PDQ_ASSERT(state != PDQS_DMA_UNAVAILABLE);
1191    PDQ_ASSERT(state != PDQS_RESET);
1192    PDQ_ASSERT(state != PDQS_HALTED);
1193    PDQ_ASSERT(state != PDQS_UPGRADE);
1194    PDQ_ASSERT(state != PDQS_RING_MEMBER);
1195    switch (state) {
1196	case PDQS_DMA_AVAILABLE: {
1197	    /*
1198	     * The PDQ after being reset screws up some of its state.
1199	     * So we need to clear all the errors/interrupts so the real
1200	     * ones will get through.
1201	     */
1202	    PDQ_CSR_WRITE(csrs, csr_host_int_type_0, 0xFF);
1203	    PDQ_CSR_WRITE(csrs, csr_host_int_enable, PDQ_HOST_INT_STATE_CHANGE|PDQ_HOST_INT_XMT_DATA_FLUSH
1204		|PDQ_HOST_INT_FATAL_ERROR|PDQ_HOST_INT_CMD_RSP_ENABLE|PDQ_HOST_INT_UNSOL_ENABLE
1205		|PDQ_HOST_INT_RX_ENABLE|PDQ_HOST_INT_TX_ENABLE|PDQ_HOST_INT_HOST_SMT_ENABLE);
1206	    /*
1207	     * Set the MAC and address filters and start up the PDQ.
1208	     */
1209	    pdq_process_unsolicited_events(pdq);
1210	    pdq_process_received_data(pdq, &pdq->pdq_rx_info,
1211				      pdq->pdq_dbp->pdqdb_receives,
1212				      pdq->pdq_cbp->pdqcb_receives,
1213				      PDQ_RING_MASK(pdq->pdq_dbp->pdqdb_receives));
1214	    PDQ_DO_TYPE2_PRODUCER(pdq);
1215	    if (pdq->pdq_flags & PDQ_PASS_SMT) {
1216		pdq_process_received_data(pdq, &pdq->pdq_host_smt_info,
1217					  pdq->pdq_dbp->pdqdb_host_smt,
1218					  pdq->pdq_cbp->pdqcb_host_smt,
1219					  PDQ_RING_MASK(pdq->pdq_dbp->pdqdb_host_smt));
1220		PDQ_CSR_WRITE(csrs, csr_host_smt_producer,
1221			      pdq->pdq_host_smt_info.rx_producer
1222			          | (pdq->pdq_host_smt_info.rx_completion << 8));
1223	    }
1224	    pdq->pdq_command_info.ci_pending_commands = PDQ_BITMASK(PDQC_FILTER_SET)
1225		| PDQ_BITMASK(PDQC_ADDR_FILTER_SET) | PDQ_BITMASK(PDQC_START);
1226	    if (pdq->pdq_flags & PDQ_PRINTCHARS)
1227		pdq->pdq_command_info.ci_pending_commands |= PDQ_BITMASK(PDQC_STATUS_CHARS_GET);
1228	    pdq_queue_commands(pdq);
1229	    break;
1230	}
1231	case PDQS_LINK_UNAVAILABLE:
1232	case PDQS_LINK_AVAILABLE: {
1233	    pdq->pdq_command_info.ci_pending_commands = PDQ_BITMASK(PDQC_FILTER_SET)
1234		| PDQ_BITMASK(PDQC_ADDR_FILTER_SET);
1235	    if (pdq->pdq_flags & PDQ_PRINTCHARS)
1236		pdq->pdq_command_info.ci_pending_commands |= PDQ_BITMASK(PDQC_STATUS_CHARS_GET);
1237	    if (pdq->pdq_flags & PDQ_PASS_SMT) {
1238		pdq_process_received_data(pdq, &pdq->pdq_host_smt_info,
1239					  pdq->pdq_dbp->pdqdb_host_smt,
1240					  pdq->pdq_cbp->pdqcb_host_smt,
1241					  PDQ_RING_MASK(pdq->pdq_dbp->pdqdb_host_smt));
1242		PDQ_CSR_WRITE(csrs, csr_host_smt_producer,
1243			      pdq->pdq_host_smt_info.rx_producer
1244			          | (pdq->pdq_host_smt_info.rx_completion << 8));
1245	    }
1246	    pdq_process_unsolicited_events(pdq);
1247	    pdq_queue_commands(pdq);
1248	    break;
1249	}
1250	case PDQS_RING_MEMBER: {
1251	}
1252	default: {	/* to make gcc happy */
1253	    break;
1254	}
1255    }
1256}
1257
1258int
1259pdq_interrupt(
1260    pdq_t *pdq)
1261{
1262    const pdq_csrs_t * const csrs = &pdq->pdq_csrs;
1263    pdq_uint32_t data;
1264    int progress = 0;
1265
1266    if (pdq->pdq_type == PDQ_DEFPA)
1267	PDQ_CSR_WRITE(&pdq->pdq_pci_csrs, csr_pfi_status, 0x18);
1268
1269    while ((data = PDQ_CSR_READ(csrs, csr_port_status)) & PDQ_PSTS_INTR_PENDING) {
1270	progress = 1;
1271	PDQ_PRINTF(("PDQ Interrupt: Status = 0x%08x\n", data));
1272	if (data & PDQ_PSTS_RCV_DATA_PENDING) {
1273	    pdq_process_received_data(pdq, &pdq->pdq_rx_info,
1274				      pdq->pdq_dbp->pdqdb_receives,
1275				      pdq->pdq_cbp->pdqcb_receives,
1276				      PDQ_RING_MASK(pdq->pdq_dbp->pdqdb_receives));
1277	    PDQ_DO_TYPE2_PRODUCER(pdq);
1278	}
1279	if (data & PDQ_PSTS_HOST_SMT_PENDING) {
1280	    pdq_process_received_data(pdq, &pdq->pdq_host_smt_info,
1281				      pdq->pdq_dbp->pdqdb_host_smt,
1282				      pdq->pdq_cbp->pdqcb_host_smt,
1283				      PDQ_RING_MASK(pdq->pdq_dbp->pdqdb_host_smt));
1284	    PDQ_DO_HOST_SMT_PRODUCER(pdq);
1285	}
1286	if (data & PDQ_PSTS_XMT_DATA_PENDING)
1287	    pdq_process_transmitted_data(pdq);
1288	if (data & PDQ_PSTS_UNSOL_PENDING)
1289	    pdq_process_unsolicited_events(pdq);
1290	if (data & PDQ_PSTS_CMD_RSP_PENDING)
1291	    pdq_process_command_responses(pdq);
1292	if (data & PDQ_PSTS_TYPE_0_PENDING) {
1293	    data = PDQ_CSR_READ(csrs, csr_host_int_type_0);
1294	    if (data & PDQ_HOST_INT_STATE_CHANGE) {
1295		pdq_state_t state = PDQ_PSTS_ADAPTER_STATE(PDQ_CSR_READ(csrs, csr_port_status));
1296		printf(PDQ_OS_PREFIX "%s", PDQ_OS_PREFIX_ARGS, pdq_adapter_states[state]);
1297		if (state == PDQS_LINK_UNAVAILABLE) {
1298		    pdq->pdq_flags &= ~PDQ_TXOK;
1299		} else if (state == PDQS_LINK_AVAILABLE) {
1300		    pdq->pdq_flags |= PDQ_TXOK;
1301		    pdq_os_restart_transmitter(pdq);
1302		} else if (state == PDQS_HALTED) {
1303		    pdq_response_error_log_get_t log_entry;
1304		    pdq_halt_code_t halt_code = PDQ_PSTS_HALT_ID(PDQ_CSR_READ(csrs, csr_port_status));
1305		    printf(": halt code = %d (%s)\n",
1306			   halt_code, pdq_halt_codes[halt_code]);
1307		    if (halt_code == PDQH_DMA_ERROR && pdq->pdq_type == PDQ_DEFPA) {
1308			PDQ_PRINTF(("\tPFI status = 0x%x, Host 0 Fatal Interrupt = 0x%x\n",
1309			       PDQ_CSR_READ(&pdq->pdq_pci_csrs, csr_pfi_status),
1310			       data & PDQ_HOST_INT_FATAL_ERROR));
1311		    }
1312		    pdq_read_error_log(pdq, &log_entry);
1313		    pdq_stop(pdq);
1314		    if (pdq->pdq_flags & PDQ_RUNNING)
1315			pdq_run(pdq);
1316		    return 1;
1317		}
1318		printf("\n");
1319		PDQ_CSR_WRITE(csrs, csr_host_int_type_0, PDQ_HOST_INT_STATE_CHANGE);
1320	    }
1321	    if (data & PDQ_HOST_INT_FATAL_ERROR) {
1322		pdq_stop(pdq);
1323		if (pdq->pdq_flags & PDQ_RUNNING)
1324		    pdq_run(pdq);
1325		return 1;
1326	    }
1327	    if (data & PDQ_HOST_INT_XMT_DATA_FLUSH) {
1328		printf(PDQ_OS_PREFIX "Flushing transmit queue\n", PDQ_OS_PREFIX_ARGS);
1329		pdq->pdq_flags &= ~PDQ_TXOK;
1330		pdq_flush_transmitter(pdq);
1331		pdq_do_port_control(csrs, PDQ_PCTL_XMT_DATA_FLUSH_DONE);
1332		PDQ_CSR_WRITE(csrs, csr_host_int_type_0, PDQ_HOST_INT_XMT_DATA_FLUSH);
1333	    }
1334	}
1335	if (pdq->pdq_type == PDQ_DEFPA)
1336	    PDQ_CSR_WRITE(&pdq->pdq_pci_csrs, csr_pfi_status, 0x18);
1337    }
1338    return progress;
1339}
1340
1341pdq_t *
1342pdq_initialize(
1343    pdq_bus_t bus,
1344    pdq_bus_memaddr_t csr_base,
1345    const char *name,
1346    int unit,
1347    void *ctx,
1348    pdq_type_t type)
1349{
1350    pdq_t *pdq;
1351    pdq_state_t state;
1352    const pdq_uint32_t contig_bytes = (sizeof(pdq_descriptor_block_t) * 2) - PDQ_OS_PAGESIZE;
1353    pdq_uint8_t *p;
1354    int idx;
1355
1356    PDQ_ASSERT(sizeof(pdq_descriptor_block_t) == 8192);
1357    PDQ_ASSERT(sizeof(pdq_consumer_block_t) == 64);
1358    PDQ_ASSERT(sizeof(pdq_response_filter_get_t) == PDQ_SIZE_RESPONSE_FILTER_GET);
1359    PDQ_ASSERT(sizeof(pdq_cmd_addr_filter_set_t) == PDQ_SIZE_CMD_ADDR_FILTER_SET);
1360    PDQ_ASSERT(sizeof(pdq_response_addr_filter_get_t) == PDQ_SIZE_RESPONSE_ADDR_FILTER_GET);
1361    PDQ_ASSERT(sizeof(pdq_response_status_chars_get_t) == PDQ_SIZE_RESPONSE_STATUS_CHARS_GET);
1362    PDQ_ASSERT(sizeof(pdq_response_fddi_mib_get_t) == PDQ_SIZE_RESPONSE_FDDI_MIB_GET);
1363    PDQ_ASSERT(sizeof(pdq_response_dec_ext_mib_get_t) == PDQ_SIZE_RESPONSE_DEC_EXT_MIB_GET);
1364    PDQ_ASSERT(sizeof(pdq_unsolicited_event_t) == 512);
1365
1366    pdq = (pdq_t *) PDQ_OS_MEMALLOC(sizeof(pdq_t));
1367    if (pdq == NULL) {
1368	PDQ_PRINTF(("malloc(%d) failed\n", sizeof(*pdq)));
1369	return NULL;
1370    }
1371    PDQ_OS_MEMZERO(pdq, sizeof(pdq_t));
1372    pdq->pdq_type = type;
1373    pdq->pdq_unit = unit;
1374    pdq->pdq_os_ctx = (void *) ctx;
1375    pdq->pdq_os_name = name;
1376    pdq->pdq_flags = PDQ_PRINTCHARS;
1377    /*
1378     * Allocate the additional data structures required by
1379     * the PDQ driver.  Allocate a contiguous region of memory
1380     * for the descriptor block.  We need to allocated enough
1381     * to guarantee that we will a get 8KB block of memory aligned
1382     * on a 8KB boundary.  This turns to require that we allocate
1383     * (N*2 - 1 page) pages of memory.  On machine with less than
1384     * a 8KB page size, it mean we will allocate more memory than
1385     * we need.  The extra will be used for the unsolicited event
1386     * buffers (though on machines with 8KB pages we will to allocate
1387     * them separately since there will be nothing left overs.)
1388     */
1389    p = (pdq_uint8_t *) PDQ_OS_MEMALLOC_CONTIG(contig_bytes);
1390    if (p != NULL) {
1391	pdq_physaddr_t physaddr = PDQ_OS_VA_TO_PA(pdq, p);
1392	/*
1393	 * Assert that we really got contiguous memory.  This isn't really
1394	 * needed on systems that actually have physical contiguous allocation
1395	 * routines, but on those systems that don't ...
1396	 */
1397	for (idx = PDQ_OS_PAGESIZE; idx < 0x2000; idx += PDQ_OS_PAGESIZE) {
1398	    if (PDQ_OS_VA_TO_PA(pdq, p + idx) - physaddr != idx)
1399		goto cleanup_and_return;
1400	}
1401	physaddr &= 0x1FFF;
1402	if (physaddr) {
1403	    pdq->pdq_unsolicited_info.ui_events = (pdq_unsolicited_event_t *) p;
1404	    pdq->pdq_dbp = (pdq_descriptor_block_t *) &p[0x2000 - physaddr];
1405	} else {
1406	    pdq->pdq_dbp = (pdq_descriptor_block_t *) p;
1407	    pdq->pdq_unsolicited_info.ui_events = (pdq_unsolicited_event_t *) &p[0x2000];
1408	}
1409    }
1410    if (contig_bytes == sizeof(pdq_descriptor_block_t)) {
1411	pdq->pdq_unsolicited_info.ui_events =
1412	    (pdq_unsolicited_event_t *) PDQ_OS_MEMALLOC(
1413		PDQ_NUM_UNSOLICITED_EVENTS * sizeof(pdq_unsolicited_event_t));
1414    }
1415
1416    /*
1417     * Make sure everything got allocated.  If not, free what did
1418     * get allocated and return.
1419     */
1420    if (pdq->pdq_dbp == NULL || pdq->pdq_unsolicited_info.ui_events == NULL) {
1421      cleanup_and_return:
1422	if (p /* pdq->pdq_dbp */ != NULL)
1423	    PDQ_OS_MEMFREE_CONTIG(p /* pdq->pdq_dbp */, contig_bytes);
1424	if (contig_bytes == sizeof(pdq_descriptor_block_t) && pdq->pdq_unsolicited_info.ui_events != NULL)
1425	    PDQ_OS_MEMFREE(pdq->pdq_unsolicited_info.ui_events,
1426			   PDQ_NUM_UNSOLICITED_EVENTS * sizeof(pdq_unsolicited_event_t));
1427	PDQ_OS_MEMFREE(pdq, sizeof(pdq_t));
1428	return NULL;
1429    }
1430
1431    pdq->pdq_cbp = (volatile pdq_consumer_block_t *) &pdq->pdq_dbp->pdqdb_consumer;
1432    pdq->pdq_command_info.ci_bufstart = (pdq_uint8_t *) pdq->pdq_dbp->pdqdb_command_pool;
1433    pdq->pdq_rx_info.rx_buffers = (void *) pdq->pdq_dbp->pdqdb_receive_buffers;
1434
1435    pdq->pdq_host_smt_info.rx_buffers = (void *) pdq->pdq_dbp->pdqdb_host_smt_buffers;
1436
1437    PDQ_PRINTF(("\nPDQ Descriptor Block = " PDQ_OS_PTR_FMT "\n", pdq->pdq_dbp));
1438    PDQ_PRINTF(("    Recieve Queue          = " PDQ_OS_PTR_FMT "\n", pdq->pdq_dbp->pdqdb_receives));
1439    PDQ_PRINTF(("    Transmit Queue         = " PDQ_OS_PTR_FMT "\n", pdq->pdq_dbp->pdqdb_transmits));
1440    PDQ_PRINTF(("    Host SMT Queue         = " PDQ_OS_PTR_FMT "\n", pdq->pdq_dbp->pdqdb_host_smt));
1441    PDQ_PRINTF(("    Command Response Queue = " PDQ_OS_PTR_FMT "\n", pdq->pdq_dbp->pdqdb_command_responses));
1442    PDQ_PRINTF(("    Command Request Queue  = " PDQ_OS_PTR_FMT "\n", pdq->pdq_dbp->pdqdb_command_requests));
1443    PDQ_PRINTF(("PDQ Consumer Block = " PDQ_OS_PTR_FMT "\n", pdq->pdq_cbp));
1444
1445    /*
1446     * Zero out the descriptor block.  Not really required but
1447     * it pays to be neat.  This will also zero out the consumer
1448     * block, command pool, and buffer pointers for the receive
1449     * host_smt rings.
1450     */
1451    PDQ_OS_MEMZERO(pdq->pdq_dbp, sizeof(*pdq->pdq_dbp));
1452
1453    /*
1454     * Initialize the CSR references.
1455     * the DEFAA (FutureBus+) skips a longword between registers
1456     */
1457    pdq_init_csrs(&pdq->pdq_csrs, bus, csr_base, pdq->pdq_type == PDQ_DEFAA ? 2 : 1);
1458    if (pdq->pdq_type == PDQ_DEFPA)
1459	pdq_init_pci_csrs(&pdq->pdq_pci_csrs, bus, csr_base, 1);
1460
1461    PDQ_PRINTF(("PDQ CSRs: BASE = " PDQ_OS_PTR_FMT "\n", pdq->pdq_csrs.csr_base));
1462    PDQ_PRINTF(("    Port Reset                = " PDQ_OS_PTR_FMT " [0x%08x]\n",
1463	   pdq->pdq_csrs.csr_port_reset, PDQ_CSR_READ(&pdq->pdq_csrs, csr_port_reset)));
1464    PDQ_PRINTF(("    Host Data                 = " PDQ_OS_PTR_FMT " [0x%08x]\n",
1465	   pdq->pdq_csrs.csr_host_data, PDQ_CSR_READ(&pdq->pdq_csrs, csr_host_data)));
1466    PDQ_PRINTF(("    Port Control              = " PDQ_OS_PTR_FMT " [0x%08x]\n",
1467	   pdq->pdq_csrs.csr_port_control, PDQ_CSR_READ(&pdq->pdq_csrs, csr_port_control)));
1468    PDQ_PRINTF(("    Port Data A               = " PDQ_OS_PTR_FMT " [0x%08x]\n",
1469	   pdq->pdq_csrs.csr_port_data_a, PDQ_CSR_READ(&pdq->pdq_csrs, csr_port_data_a)));
1470    PDQ_PRINTF(("    Port Data B               = " PDQ_OS_PTR_FMT " [0x%08x]\n",
1471	   pdq->pdq_csrs.csr_port_data_b, PDQ_CSR_READ(&pdq->pdq_csrs, csr_port_data_b)));
1472    PDQ_PRINTF(("    Port Status               = " PDQ_OS_PTR_FMT " [0x%08x]\n",
1473	   pdq->pdq_csrs.csr_port_status, PDQ_CSR_READ(&pdq->pdq_csrs, csr_port_status)));
1474    PDQ_PRINTF(("    Host Int Type 0           = " PDQ_OS_PTR_FMT " [0x%08x]\n",
1475	   pdq->pdq_csrs.csr_host_int_type_0, PDQ_CSR_READ(&pdq->pdq_csrs, csr_host_int_type_0)));
1476    PDQ_PRINTF(("    Host Int Enable           = " PDQ_OS_PTR_FMT " [0x%08x]\n",
1477	   pdq->pdq_csrs.csr_host_int_enable, PDQ_CSR_READ(&pdq->pdq_csrs, csr_host_int_enable)));
1478    PDQ_PRINTF(("    Type 2 Producer           = " PDQ_OS_PTR_FMT " [0x%08x]\n",
1479	   pdq->pdq_csrs.csr_type_2_producer, PDQ_CSR_READ(&pdq->pdq_csrs, csr_type_2_producer)));
1480    PDQ_PRINTF(("    Command Response Producer = " PDQ_OS_PTR_FMT " [0x%08x]\n",
1481	   pdq->pdq_csrs.csr_cmd_response_producer, PDQ_CSR_READ(&pdq->pdq_csrs, csr_cmd_response_producer)));
1482    PDQ_PRINTF(("    Command Request Producer  = " PDQ_OS_PTR_FMT " [0x%08x]\n",
1483	   pdq->pdq_csrs.csr_cmd_request_producer, PDQ_CSR_READ(&pdq->pdq_csrs, csr_cmd_request_producer)));
1484    PDQ_PRINTF(("    Host SMT Producer         = " PDQ_OS_PTR_FMT " [0x%08x]\n",
1485	   pdq->pdq_csrs.csr_host_smt_producer, PDQ_CSR_READ(&pdq->pdq_csrs, csr_host_smt_producer)));
1486    PDQ_PRINTF(("    Unsolicited Producer      = " PDQ_OS_PTR_FMT " [0x%08x]\n",
1487	   pdq->pdq_csrs.csr_unsolicited_producer, PDQ_CSR_READ(&pdq->pdq_csrs, csr_unsolicited_producer)));
1488
1489    /*
1490     * Initialize the command information block
1491     */
1492    pdq->pdq_command_info.ci_pa_bufstart = PDQ_OS_VA_TO_PA(pdq, pdq->pdq_command_info.ci_bufstart);
1493    for (idx = 0; idx < sizeof(pdq->pdq_dbp->pdqdb_command_requests)/sizeof(pdq->pdq_dbp->pdqdb_command_requests[0]); idx++) {
1494	pdq_txdesc_t *txd = &pdq->pdq_dbp->pdqdb_command_requests[idx];
1495
1496	txd->txd_pa_lo = pdq->pdq_command_info.ci_pa_bufstart;
1497	txd->txd_eop = txd->txd_sop = 1;
1498	txd->txd_pa_hi = 0;
1499    }
1500    for (idx = 0; idx < sizeof(pdq->pdq_dbp->pdqdb_command_responses)/sizeof(pdq->pdq_dbp->pdqdb_command_responses[0]); idx++) {
1501	pdq_rxdesc_t *rxd = &pdq->pdq_dbp->pdqdb_command_responses[idx];
1502
1503	rxd->rxd_pa_lo = pdq->pdq_command_info.ci_pa_bufstart;
1504	rxd->rxd_sop = 1;
1505	rxd->rxd_seg_cnt = 0;
1506	rxd->rxd_seg_len_lo = 0;
1507    }
1508
1509    /*
1510     * Initialize the unsolicited event information block
1511     */
1512    pdq->pdq_unsolicited_info.ui_free = PDQ_NUM_UNSOLICITED_EVENTS;
1513    pdq->pdq_unsolicited_info.ui_pa_bufstart = PDQ_OS_VA_TO_PA(pdq, pdq->pdq_unsolicited_info.ui_events);
1514    for (idx = 0; idx < sizeof(pdq->pdq_dbp->pdqdb_unsolicited_events)/sizeof(pdq->pdq_dbp->pdqdb_unsolicited_events[0]); idx++) {
1515	pdq_rxdesc_t *rxd = &pdq->pdq_dbp->pdqdb_unsolicited_events[idx];
1516	pdq_unsolicited_event_t *event = &pdq->pdq_unsolicited_info.ui_events[idx & (PDQ_NUM_UNSOLICITED_EVENTS-1)];
1517
1518	rxd->rxd_sop = 1;
1519	rxd->rxd_seg_cnt = 0;
1520	rxd->rxd_seg_len_hi = sizeof(pdq_unsolicited_event_t) / 16;
1521	rxd->rxd_pa_lo = pdq->pdq_unsolicited_info.ui_pa_bufstart + (const pdq_uint8_t *) event
1522	    - (const pdq_uint8_t *) pdq->pdq_unsolicited_info.ui_events;
1523	rxd->rxd_pa_hi = 0;
1524    }
1525    /*
1526     * Initialize the receive information blocks (normal and SMT).
1527     */
1528    pdq->pdq_rx_info.rx_free = PDQ_RING_MASK(pdq->pdq_dbp->pdqdb_receives);
1529    pdq->pdq_rx_info.rx_target = pdq->pdq_rx_info.rx_free - PDQ_RX_SEGCNT * 8;
1530
1531    pdq->pdq_host_smt_info.rx_free = PDQ_RING_MASK(pdq->pdq_dbp->pdqdb_host_smt);
1532    pdq->pdq_host_smt_info.rx_target = pdq->pdq_host_smt_info.rx_free - PDQ_RX_SEGCNT * 3;
1533
1534    /*
1535     * Initialize the transmit information block.
1536     */
1537    pdq->pdq_tx_hdr[0] = PDQ_FDDI_PH0;
1538    pdq->pdq_tx_hdr[1] = PDQ_FDDI_PH1;
1539    pdq->pdq_tx_hdr[2] = PDQ_FDDI_PH2;
1540    pdq->pdq_tx_info.tx_free = PDQ_RING_MASK(pdq->pdq_dbp->pdqdb_transmits);
1541    pdq->pdq_tx_info.tx_hdrdesc.txd_seg_len = sizeof(pdq->pdq_tx_hdr);
1542    pdq->pdq_tx_info.tx_hdrdesc.txd_sop = 1;
1543    pdq->pdq_tx_info.tx_hdrdesc.txd_pa_lo = PDQ_OS_VA_TO_PA(pdq, pdq->pdq_tx_hdr);
1544
1545    state = PDQ_PSTS_ADAPTER_STATE(PDQ_CSR_READ(&pdq->pdq_csrs, csr_port_status));
1546    PDQ_PRINTF(("PDQ Adapter State = %s\n", pdq_adapter_states[state]));
1547
1548    /*
1549     * Stop the PDQ if it is running and put it into a known state.
1550     */
1551    state = pdq_stop(pdq);
1552
1553    PDQ_PRINTF(("PDQ Adapter State = %s\n", pdq_adapter_states[state]));
1554    PDQ_ASSERT(state == PDQS_DMA_AVAILABLE);
1555    /*
1556     * If the adapter is not the state we expect, then the initialization
1557     * failed.  Cleanup and exit.
1558     */
1559#if defined(PDQVERBOSE)
1560    if (state == PDQS_HALTED) {
1561	pdq_halt_code_t halt_code = PDQ_PSTS_HALT_ID(PDQ_CSR_READ(&pdq->pdq_csrs, csr_port_status));
1562	printf("Halt code = %d (%s)\n", halt_code, pdq_halt_codes[halt_code]);
1563	if (halt_code == PDQH_DMA_ERROR && pdq->pdq_type == PDQ_DEFPA)
1564	    PDQ_PRINTF(("PFI status = 0x%x, Host 0 Fatal Interrupt = 0x%x\n",
1565		       PDQ_CSR_READ(&pdq->pdq_pci_csrs, csr_pfi_status),
1566		       PDQ_CSR_READ(&pdq->pdq_csrs, csr_host_int_type_0) & PDQ_HOST_INT_FATAL_ERROR));
1567    }
1568#endif
1569    if (state == PDQS_RESET || state == PDQS_HALTED || state == PDQS_UPGRADE)
1570	goto cleanup_and_return;
1571
1572    PDQ_PRINTF(("PDQ Hardware Address = %02x-%02x-%02x-%02x-%02x-%02x\n",
1573	   pdq->pdq_hwaddr.lanaddr_bytes[0], pdq->pdq_hwaddr.lanaddr_bytes[1],
1574	   pdq->pdq_hwaddr.lanaddr_bytes[2], pdq->pdq_hwaddr.lanaddr_bytes[3],
1575	   pdq->pdq_hwaddr.lanaddr_bytes[4], pdq->pdq_hwaddr.lanaddr_bytes[5]));
1576    PDQ_PRINTF(("PDQ Firmware Revision = %c%c%c%c\n",
1577	   pdq->pdq_fwrev.fwrev_bytes[0], pdq->pdq_fwrev.fwrev_bytes[1],
1578	   pdq->pdq_fwrev.fwrev_bytes[2], pdq->pdq_fwrev.fwrev_bytes[3]));
1579    PDQ_PRINTF(("PDQ Chip Revision = "));
1580    switch (pdq->pdq_chip_rev) {
1581	case PDQ_CHIP_REV_A_B_OR_C: PDQ_PRINTF(("Rev C or below")); break;
1582	case PDQ_CHIP_REV_D: PDQ_PRINTF(("Rev D")); break;
1583	case PDQ_CHIP_REV_E: PDQ_PRINTF(("Rev E")); break;
1584	default: PDQ_PRINTF(("Unknown Rev %d", (int) pdq->pdq_chip_rev));
1585    }
1586    PDQ_PRINTF(("\n"));
1587
1588    return pdq;
1589}
1590