1/*	$NetBSD: pdq.c,v 1.33 2001/11/13 13:14:43 lukem Exp $	*/
2
3/*-
4 * Copyright (c) 1995,1996 Matt Thomas <matt@3am-software.com>
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 *    notice, this list of conditions and the following disclaimer.
12 * 2. The name of the author may not be used to endorse or promote products
13 *    derived from this software without specific prior written permission
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25 *
26 * Id: pdq.c,v 1.32 1997/06/05 01:56:35 thomas Exp
27 *
28 */
29
30#include <sys/cdefs.h>
31__FBSDID("$FreeBSD$");
32
33/*
34 * DEC PDQ FDDI Controller O/S independent code
35 *
36 * This module should work any on PDQ based board.  Note that changes for
37 * MIPS and Alpha architectures (or any other architecture which requires
38 * a flushing of memory or write buffers and/or has incoherent caches)
39 * have yet to be made.
40 *
41 * However, it is expected that the PDQ_CSR_WRITE macro will cause a
42 * flushing of the write buffers.
43 */
44
45
46#define	PDQ_HWSUPPORT	/* for pdq.h */
47
48/*
49 * What a botch having to specific includes for FreeBSD!
50 */
51#include <dev/pdq/pdq_freebsd.h>
52#include <dev/pdq/pdqreg.h>
53
54#define	PDQ_ROUNDUP(n, x)	(((n) + ((x) - 1)) & ~((x) - 1))
55#define	PDQ_CMD_RX_ALIGNMENT	16
56
57#if (defined(PDQTEST) && !defined(PDQ_NOPRINTF)) || defined(PDQVERBOSE)
58#define	PDQ_PRINTF(x)	printf x
59#else
60#define	PDQ_PRINTF(x)	do { } while (0)
61#endif
62
63static const char * const pdq_halt_codes[] = {
64    "Selftest Timeout", "Host Bus Parity Error", "Host Directed Fault",
65    "Software Fault", "Hardware Fault", "PC Trace Path Test",
66    "DMA Error", "Image CRC Error", "Adapter Processor Error"
67};
68
69static const char * const pdq_adapter_states[] = {
70    "Reset", "Upgrade", "DMA Unavailable", "DMA Available",
71    "Link Available", "Link Unavailable", "Halted", "Ring Member"
72};
73
74/*
75 * The following are used in conjunction with
76 * unsolicited events
77 */
78static const char * const pdq_entities[] = {
79    "Station", "Link", "Phy Port"
80};
81
82static const char * const pdq_station_events[] = {
83    "Unknown Event #0",
84    "Trace Received"
85};
86
87static const char * const pdq_station_arguments[] = {
88    "Reason"
89};
90
91static const char * const pdq_link_events[] = {
92    "Transmit Underrun",
93    "Transmit Failed",
94    "Block Check Error (CRC)",
95    "Frame Status Error",
96    "PDU Length Error",
97    NULL,
98    NULL,
99    "Receive Data Overrun",
100    NULL,
101    "No User Buffer",
102    "Ring Initialization Initiated",
103    "Ring Initialization Received",
104    "Ring Beacon Initiated",
105    "Duplicate Address Failure",
106    "Duplicate Token Detected",
107    "Ring Purger Error",
108    "FCI Strip Error",
109    "Trace Initiated",
110    "Directed Beacon Received",
111};
112
113static const char * const pdq_link_arguments[] = {
114    "Reason",
115    "Data Link Header",
116    "Source",
117    "Upstream Neighbor"
118};
119
120static const char * const pdq_phy_events[] = {
121    "LEM Error Monitor Reject",
122    "Elasticy Buffer Error",
123    "Link Confidence Test Reject"
124};
125
126static const char * const pdq_phy_arguments[] = {
127    "Direction"
128};
129
130static const char * const * const pdq_event_arguments[] = {
131    pdq_station_arguments,
132    pdq_link_arguments,
133    pdq_phy_arguments
134};
135
136static const char * const * const pdq_event_codes[] = {
137    pdq_station_events,
138    pdq_link_events,
139    pdq_phy_events
140};
141
142static const char * const pdq_station_types[] = {
143    "SAS", "DAC", "SAC", "NAC", "DAS"
144};
145
146static const char * const pdq_smt_versions[] = { "", "V6.2", "V7.2", "V7.3" };
147
148static const char pdq_phy_types[] = "ABSM";
149
150static const char * const pdq_pmd_types0[] = {
151    "ANSI Multi-Mode", "ANSI Single-Mode Type 1", "ANSI Single-Mode Type 2",
152    "ANSI Sonet"
153};
154
155static const char * const pdq_pmd_types100[] = {
156    "Low Power", "Thin Wire", "Shielded Twisted Pair",
157    "Unshielded Twisted Pair"
158};
159
160static const char * const * const pdq_pmd_types[] = {
161    pdq_pmd_types0, pdq_pmd_types100
162};
163
164static const char * const pdq_descriptions[] = {
165    "DEFPA PCI",
166    "DEFEA EISA",
167    "DEFTA TC",
168    "DEFAA Futurebus",
169    "DEFQA Q-bus",
170};
171
172static void
173pdq_print_fddi_chars(
174    pdq_t *pdq,
175    const pdq_response_status_chars_get_t *rsp)
176{
177    const char hexchars[] = "0123456789abcdef";
178
179    printf(
180	   PDQ_OS_PREFIX
181	   "DEC %s FDDI %s Controller\n",
182	   PDQ_OS_PREFIX_ARGS,
183	   pdq_descriptions[pdq->pdq_type],
184	   pdq_station_types[rsp->status_chars_get.station_type]);
185
186    printf(PDQ_OS_PREFIX "FDDI address %c%c:%c%c:%c%c:%c%c:%c%c:%c%c, FW=%c%c%c%c, HW=%c",
187	   PDQ_OS_PREFIX_ARGS,
188	   hexchars[pdq->pdq_hwaddr.lanaddr_bytes[0] >> 4],
189	   hexchars[pdq->pdq_hwaddr.lanaddr_bytes[0] & 0x0F],
190	   hexchars[pdq->pdq_hwaddr.lanaddr_bytes[1] >> 4],
191	   hexchars[pdq->pdq_hwaddr.lanaddr_bytes[1] & 0x0F],
192	   hexchars[pdq->pdq_hwaddr.lanaddr_bytes[2] >> 4],
193	   hexchars[pdq->pdq_hwaddr.lanaddr_bytes[2] & 0x0F],
194	   hexchars[pdq->pdq_hwaddr.lanaddr_bytes[3] >> 4],
195	   hexchars[pdq->pdq_hwaddr.lanaddr_bytes[3] & 0x0F],
196	   hexchars[pdq->pdq_hwaddr.lanaddr_bytes[4] >> 4],
197	   hexchars[pdq->pdq_hwaddr.lanaddr_bytes[4] & 0x0F],
198	   hexchars[pdq->pdq_hwaddr.lanaddr_bytes[5] >> 4],
199	   hexchars[pdq->pdq_hwaddr.lanaddr_bytes[5] & 0x0F],
200	   pdq->pdq_fwrev.fwrev_bytes[0], pdq->pdq_fwrev.fwrev_bytes[1],
201	   pdq->pdq_fwrev.fwrev_bytes[2], pdq->pdq_fwrev.fwrev_bytes[3],
202	   rsp->status_chars_get.module_rev.fwrev_bytes[0]);
203
204    if (rsp->status_chars_get.smt_version_id < PDQ_ARRAY_SIZE(pdq_smt_versions)) {
205	printf(", SMT %s\n", pdq_smt_versions[rsp->status_chars_get.smt_version_id]);
206    }
207
208    printf(PDQ_OS_PREFIX "FDDI Port%s = %c (PMD = %s)",
209	   PDQ_OS_PREFIX_ARGS,
210	   rsp->status_chars_get.station_type == PDQ_STATION_TYPE_DAS ? "[A]" : "",
211	   pdq_phy_types[rsp->status_chars_get.phy_type[0]],
212	   pdq_pmd_types[rsp->status_chars_get.pmd_type[0] / 100][rsp->status_chars_get.pmd_type[0] % 100]);
213
214    if (rsp->status_chars_get.station_type == PDQ_STATION_TYPE_DAS)
215	printf(", FDDI Port[B] = %c (PMD = %s)",
216	       pdq_phy_types[rsp->status_chars_get.phy_type[1]],
217	       pdq_pmd_types[rsp->status_chars_get.pmd_type[1] / 100][rsp->status_chars_get.pmd_type[1] % 100]);
218
219    printf("\n");
220
221    pdq_os_update_status(pdq, rsp);
222}
223
224static void
225pdq_init_csrs(
226    pdq_csrs_t *csrs,
227    pdq_bus_t bus,
228    pdq_bus_memaddr_t csr_base,
229    size_t csrsize)
230{
231    csrs->csr_bus = bus;
232    csrs->csr_base = csr_base;
233    csrs->csr_port_reset		= PDQ_CSR_OFFSET(csr_base,  0 * csrsize);
234    csrs->csr_host_data			= PDQ_CSR_OFFSET(csr_base,  1 * csrsize);
235    csrs->csr_port_control		= PDQ_CSR_OFFSET(csr_base,  2 * csrsize);
236    csrs->csr_port_data_a		= PDQ_CSR_OFFSET(csr_base,  3 * csrsize);
237    csrs->csr_port_data_b		= PDQ_CSR_OFFSET(csr_base,  4 * csrsize);
238    csrs->csr_port_status		= PDQ_CSR_OFFSET(csr_base,  5 * csrsize);
239    csrs->csr_host_int_type_0		= PDQ_CSR_OFFSET(csr_base,  6 * csrsize);
240    csrs->csr_host_int_enable		= PDQ_CSR_OFFSET(csr_base,  7 * csrsize);
241    csrs->csr_type_2_producer		= PDQ_CSR_OFFSET(csr_base,  8 * csrsize);
242    csrs->csr_cmd_response_producer	= PDQ_CSR_OFFSET(csr_base, 10 * csrsize);
243    csrs->csr_cmd_request_producer	= PDQ_CSR_OFFSET(csr_base, 11 * csrsize);
244    csrs->csr_host_smt_producer		= PDQ_CSR_OFFSET(csr_base, 12 * csrsize);
245    csrs->csr_unsolicited_producer	= PDQ_CSR_OFFSET(csr_base, 13 * csrsize);
246}
247
248static void
249pdq_init_pci_csrs(
250    pdq_pci_csrs_t *csrs,
251    pdq_bus_t bus,
252    pdq_bus_memaddr_t csr_base,
253    size_t csrsize)
254{
255    csrs->csr_bus = bus;
256    csrs->csr_base = csr_base;
257    csrs->csr_pfi_mode_control	= PDQ_CSR_OFFSET(csr_base, 16 * csrsize);
258    csrs->csr_pfi_status	= PDQ_CSR_OFFSET(csr_base, 17 * csrsize);
259    csrs->csr_fifo_write	= PDQ_CSR_OFFSET(csr_base, 18 * csrsize);
260    csrs->csr_fifo_read		= PDQ_CSR_OFFSET(csr_base, 19 * csrsize);
261}
262
263static void
264pdq_flush_databuf_queue(
265    pdq_t *pdq,
266    pdq_databuf_queue_t *q)
267{
268    PDQ_OS_DATABUF_T *pdu;
269    for (;;) {
270	PDQ_OS_DATABUF_DEQUEUE(q, pdu);
271	if (pdu == NULL)
272	    return;
273	PDQ_OS_DATABUF_FREE(pdq, pdu);
274    }
275}
276
277static pdq_boolean_t
278pdq_do_port_control(
279    const pdq_csrs_t * const csrs,
280    pdq_uint32_t cmd)
281{
282    int cnt = 0;
283    PDQ_CSR_WRITE(csrs, csr_host_int_type_0, PDQ_HOST_INT_CSR_CMD_DONE);
284    PDQ_CSR_WRITE(csrs, csr_port_control, PDQ_PCTL_CMD_ERROR | cmd);
285    while ((PDQ_CSR_READ(csrs, csr_host_int_type_0) & PDQ_HOST_INT_CSR_CMD_DONE) == 0 && cnt < 33000000)
286	cnt++;
287    PDQ_PRINTF(("CSR cmd spun %d times\n", cnt));
288    if (PDQ_CSR_READ(csrs, csr_host_int_type_0) & PDQ_HOST_INT_CSR_CMD_DONE) {
289	PDQ_CSR_WRITE(csrs, csr_host_int_type_0, PDQ_HOST_INT_CSR_CMD_DONE);
290	return (PDQ_CSR_READ(csrs, csr_port_control) & PDQ_PCTL_CMD_ERROR) ? PDQ_FALSE : PDQ_TRUE;
291    }
292    /* adapter failure */
293    PDQ_ASSERT(0);
294    return PDQ_FALSE;
295}
296
297static void
298pdq_read_mla(
299    const pdq_csrs_t * const csrs,
300    pdq_lanaddr_t *hwaddr)
301{
302    pdq_uint32_t data;
303
304    PDQ_CSR_WRITE(csrs, csr_port_data_a, 0);
305    pdq_do_port_control(csrs, PDQ_PCTL_MLA_READ);
306    data = PDQ_CSR_READ(csrs, csr_host_data);
307
308    hwaddr->lanaddr_bytes[0] = (data >> 0) & 0xFF;
309    hwaddr->lanaddr_bytes[1] = (data >> 8) & 0xFF;
310    hwaddr->lanaddr_bytes[2] = (data >> 16) & 0xFF;
311    hwaddr->lanaddr_bytes[3] = (data >> 24) & 0xFF;
312
313    PDQ_CSR_WRITE(csrs, csr_port_data_a, 1);
314    pdq_do_port_control(csrs, PDQ_PCTL_MLA_READ);
315    data = PDQ_CSR_READ(csrs, csr_host_data);
316
317    hwaddr->lanaddr_bytes[4] = (data >> 0) & 0xFF;
318    hwaddr->lanaddr_bytes[5] = (data >> 8) & 0xFF;
319}
320
321static void
322pdq_read_fwrev(
323    const pdq_csrs_t * const csrs,
324    pdq_fwrev_t *fwrev)
325{
326    pdq_uint32_t data;
327
328    pdq_do_port_control(csrs, PDQ_PCTL_FW_REV_READ);
329    data = PDQ_CSR_READ(csrs, csr_host_data);
330
331    fwrev->fwrev_bytes[3] = (data >> 0) & 0xFF;
332    fwrev->fwrev_bytes[2] = (data >> 8) & 0xFF;
333    fwrev->fwrev_bytes[1] = (data >> 16) & 0xFF;
334    fwrev->fwrev_bytes[0] = (data >> 24) & 0xFF;
335}
336
337static pdq_boolean_t
338pdq_read_error_log(
339    pdq_t *pdq,
340    pdq_response_error_log_get_t *log_entry)
341{
342    const pdq_csrs_t * const csrs = &pdq->pdq_csrs;
343    pdq_uint32_t *ptr = (pdq_uint32_t *) log_entry;
344
345    pdq_do_port_control(csrs, PDQ_PCTL_ERROR_LOG_START);
346
347    while (pdq_do_port_control(csrs, PDQ_PCTL_FW_REV_READ) == PDQ_TRUE) {
348	*ptr++ = PDQ_CSR_READ(csrs, csr_host_data);
349	if ((pdq_uint8_t *) ptr - (pdq_uint8_t *) log_entry == sizeof(*log_entry))
350	    break;
351    }
352    return (ptr == (pdq_uint32_t *) log_entry) ? PDQ_FALSE : PDQ_TRUE;
353}
354
355static pdq_chip_rev_t
356pdq_read_chiprev(
357    const pdq_csrs_t * const csrs)
358{
359    pdq_uint32_t data;
360
361    PDQ_CSR_WRITE(csrs, csr_port_data_a, PDQ_SUB_CMD_PDQ_REV_GET);
362    pdq_do_port_control(csrs, PDQ_PCTL_SUB_CMD);
363    data = PDQ_CSR_READ(csrs, csr_host_data);
364
365    return (pdq_chip_rev_t) data;
366}
367
368static const struct {
369    size_t cmd_len;
370    size_t rsp_len;
371    const char *cmd_name;
372} pdq_cmd_info[] = {
373    { sizeof(pdq_cmd_generic_t),		/* 0 - PDQC_START */
374      sizeof(pdq_response_generic_t),
375      "Start"
376    },
377    { sizeof(pdq_cmd_filter_set_t),		/* 1 - PDQC_FILTER_SET */
378      sizeof(pdq_response_generic_t),
379      "Filter Set"
380    },
381    { sizeof(pdq_cmd_generic_t),		/* 2 - PDQC_FILTER_GET */
382      sizeof(pdq_response_filter_get_t),
383      "Filter Get"
384    },
385    { sizeof(pdq_cmd_chars_set_t),		/* 3 - PDQC_CHARS_SET */
386      sizeof(pdq_response_generic_t),
387      "Chars Set"
388    },
389    { sizeof(pdq_cmd_generic_t),		/* 4 - PDQC_STATUS_CHARS_GET */
390      sizeof(pdq_response_status_chars_get_t),
391      "Status Chars Get"
392    },
393#if 0
394    { sizeof(pdq_cmd_generic_t),		/* 5 - PDQC_COUNTERS_GET */
395      sizeof(pdq_response_counters_get_t),
396      "Counters Get"
397    },
398    { sizeof(pdq_cmd_counters_set_t),		/* 6 - PDQC_COUNTERS_SET */
399      sizeof(pdq_response_generic_t),
400      "Counters Set"
401    },
402#else
403    { 0, 0, "Counters Get" },
404    { 0, 0, "Counters Set" },
405#endif
406    { sizeof(pdq_cmd_addr_filter_set_t),	/* 7 - PDQC_ADDR_FILTER_SET */
407      sizeof(pdq_response_generic_t),
408      "Addr Filter Set"
409    },
410    { sizeof(pdq_cmd_generic_t),		/* 8 - PDQC_ADDR_FILTER_GET */
411      sizeof(pdq_response_addr_filter_get_t),
412      "Addr Filter Get"
413    },
414    { sizeof(pdq_cmd_generic_t),		/* 9 - PDQC_ERROR_LOG_CLEAR */
415      sizeof(pdq_response_generic_t),
416      "Error Log Clear"
417    },
418    { sizeof(pdq_cmd_generic_t),		/* 10 - PDQC_ERROR_LOG_SET */
419      sizeof(pdq_response_generic_t),
420      "Error Log Set"
421    },
422    { sizeof(pdq_cmd_generic_t),		/* 11 - PDQC_FDDI_MIB_GET */
423      sizeof(pdq_response_generic_t),
424      "FDDI MIB Get"
425    },
426    { sizeof(pdq_cmd_generic_t),		/* 12 - PDQC_DEC_EXT_MIB_GET */
427      sizeof(pdq_response_generic_t),
428      "DEC Ext MIB Get"
429    },
430    { sizeof(pdq_cmd_generic_t),		/* 13 - PDQC_DEC_SPECIFIC_GET */
431      sizeof(pdq_response_generic_t),
432      "DEC Specific Get"
433    },
434    { sizeof(pdq_cmd_generic_t),		/* 14 - PDQC_SNMP_SET */
435      sizeof(pdq_response_generic_t),
436      "SNMP Set"
437    },
438    { 0, 0, "N/A" },
439    { sizeof(pdq_cmd_generic_t),		/* 16 - PDQC_SMT_MIB_GET */
440      sizeof(pdq_response_generic_t),
441      "SMT MIB Get"
442    },
443    { sizeof(pdq_cmd_generic_t),		/* 17 - PDQC_SMT_MIB_SET */
444      sizeof(pdq_response_generic_t),
445      "SMT MIB Set",
446    },
447    { 0, 0, "Bogus CMD" },
448};
449
450static void
451pdq_queue_commands(
452    pdq_t *pdq)
453{
454    const pdq_csrs_t * const csrs = &pdq->pdq_csrs;
455    pdq_command_info_t * const ci = &pdq->pdq_command_info;
456    pdq_descriptor_block_t * const dbp = pdq->pdq_dbp;
457    pdq_txdesc_t * const txd = &dbp->pdqdb_command_requests[ci->ci_request_producer];
458    pdq_cmd_code_t op;
459    pdq_uint32_t cmdlen, rsplen, mask;
460
461    /*
462     * If there are commands or responses active or there aren't
463     * any pending commands, then don't queue any more.
464     */
465    if (ci->ci_command_active || ci->ci_pending_commands == 0)
466	return;
467
468    /*
469     * Determine which command needs to be queued.
470     */
471    op = PDQC_SMT_MIB_SET;
472    for (mask = 1 << ((int) op); (mask & ci->ci_pending_commands) == 0; mask >>= 1)
473	op = (pdq_cmd_code_t) ((int) op - 1);
474    /*
475     * Obtain the sizes needed for the command and response.
476     * Round up to PDQ_CMD_RX_ALIGNMENT so the receive buffer is
477     * always properly aligned.
478     */
479    cmdlen = PDQ_ROUNDUP(pdq_cmd_info[op].cmd_len, PDQ_CMD_RX_ALIGNMENT);
480    rsplen = PDQ_ROUNDUP(pdq_cmd_info[op].rsp_len, PDQ_CMD_RX_ALIGNMENT);
481    if (cmdlen < rsplen)
482	cmdlen = rsplen;
483    /*
484     * Since only one command at a time will be queued, there will always
485     * be enough space.
486     */
487
488    /*
489     * Obtain and fill in the descriptor for the command (descriptor is
490     * pre-initialized)
491     */
492    txd->txd_seg_len = cmdlen;
493
494    /*
495     * Clear the command area, set the opcode, and the command from the pending
496     * mask.
497     */
498
499    ci->ci_queued_commands[ci->ci_request_producer] = op;
500#if defined(PDQVERBOSE)
501    ((pdq_response_generic_t *) ci->ci_response_bufstart)->generic_op = PDQC_BOGUS_CMD;
502#endif
503    PDQ_OS_MEMZERO(ci->ci_request_bufstart, cmdlen);
504    *(pdq_cmd_code_t *) ci->ci_request_bufstart = op;
505    ci->ci_pending_commands &= ~mask;
506
507    /*
508     * Fill in the command area, if needed.
509     */
510    switch (op) {
511	case PDQC_FILTER_SET: {
512	    pdq_cmd_filter_set_t *filter_set = (pdq_cmd_filter_set_t *) ci->ci_request_bufstart;
513	    unsigned idx = 0;
514	    filter_set->filter_set_items[idx].item_code = PDQI_IND_GROUP_PROM;
515	    filter_set->filter_set_items[idx].filter_state = (pdq->pdq_flags & PDQ_PROMISC ? PDQ_FILTER_PASS : PDQ_FILTER_BLOCK);
516	    idx++;
517	    filter_set->filter_set_items[idx].item_code = PDQI_GROUP_PROM;
518	    filter_set->filter_set_items[idx].filter_state = (pdq->pdq_flags & PDQ_ALLMULTI ? PDQ_FILTER_PASS : PDQ_FILTER_BLOCK);
519	    idx++;
520	    filter_set->filter_set_items[idx].item_code = PDQI_SMT_PROM;
521	    filter_set->filter_set_items[idx].filter_state = ((pdq->pdq_flags & (PDQ_PROMISC|PDQ_PASS_SMT)) == (PDQ_PROMISC|PDQ_PASS_SMT) ? PDQ_FILTER_PASS : PDQ_FILTER_BLOCK);
522	    idx++;
523	    filter_set->filter_set_items[idx].item_code = PDQI_SMT_USER;
524	    filter_set->filter_set_items[idx].filter_state = (pdq->pdq_flags & PDQ_PASS_SMT ? PDQ_FILTER_PASS : PDQ_FILTER_BLOCK);
525	    idx++;
526	    filter_set->filter_set_items[idx].item_code = PDQI_EOL;
527	    break;
528	}
529	case PDQC_ADDR_FILTER_SET: {
530	    pdq_cmd_addr_filter_set_t *addr_filter_set = (pdq_cmd_addr_filter_set_t *) ci->ci_request_bufstart;
531	    pdq_lanaddr_t *addr = addr_filter_set->addr_filter_set_addresses;
532	    addr->lanaddr_bytes[0] = 0xFF;
533	    addr->lanaddr_bytes[1] = 0xFF;
534	    addr->lanaddr_bytes[2] = 0xFF;
535	    addr->lanaddr_bytes[3] = 0xFF;
536	    addr->lanaddr_bytes[4] = 0xFF;
537	    addr->lanaddr_bytes[5] = 0xFF;
538	    addr++;
539	    pdq_os_addr_fill(pdq, addr, 61);
540	    break;
541	}
542	case PDQC_SNMP_SET: {
543	    pdq_cmd_snmp_set_t *snmp_set = (pdq_cmd_snmp_set_t *) ci->ci_request_bufstart;
544	    unsigned idx = 0;
545	    snmp_set->snmp_set_items[idx].item_code = PDQSNMP_FULL_DUPLEX_ENABLE;
546	    snmp_set->snmp_set_items[idx].item_value = (pdq->pdq_flags & PDQ_WANT_FDX ? 1 : 2);
547	    snmp_set->snmp_set_items[idx].item_port = 0;
548	    idx++;
549	    snmp_set->snmp_set_items[idx].item_code = PDQSNMP_EOL;
550	    break;
551	}
552	default: {	/* to make gcc happy */
553	    break;
554	}
555    }
556
557
558    /*
559     * Sync the command request buffer and descriptor, then advance
560     * the request producer index.
561     */
562    PDQ_OS_CMDRQST_PRESYNC(pdq, txd->txd_seg_len);
563    PDQ_OS_DESC_PRESYNC(pdq, txd, sizeof(pdq_txdesc_t));
564    PDQ_ADVANCE(ci->ci_request_producer, 1, PDQ_RING_MASK(dbp->pdqdb_command_requests));
565
566    /*
567     * Sync the command response buffer and advance the response
568     * producer index (descriptor is already pre-initialized)
569     */
570    PDQ_OS_CMDRSP_PRESYNC(pdq, PDQ_SIZE_COMMAND_RESPONSE);
571    PDQ_ADVANCE(ci->ci_response_producer, 1, PDQ_RING_MASK(dbp->pdqdb_command_responses));
572    /*
573     * At this point the command is done.  All that needs to be done is to
574     * produce it to the PDQ.
575     */
576    PDQ_PRINTF(("PDQ Queue Command Request: %s queued\n",
577		pdq_cmd_info[op].cmd_name));
578
579    ci->ci_command_active++;
580    PDQ_CSR_WRITE(csrs, csr_cmd_response_producer, ci->ci_response_producer | (ci->ci_response_completion << 8));
581    PDQ_CSR_WRITE(csrs, csr_cmd_request_producer, ci->ci_request_producer | (ci->ci_request_completion << 8));
582}
583
584static void
585pdq_process_command_responses(
586    pdq_t * const pdq)
587{
588    const pdq_csrs_t * const csrs = &pdq->pdq_csrs;
589    pdq_command_info_t * const ci = &pdq->pdq_command_info;
590    volatile const pdq_consumer_block_t * const cbp = pdq->pdq_cbp;
591    pdq_descriptor_block_t * const dbp = pdq->pdq_dbp;
592    const pdq_response_generic_t *rspgen;
593
594    /*
595     * We have to process the command and response in tandem so
596     * just wait for the response to be consumed.  If it has been
597     * consumed then the command must have been as well.
598     */
599
600    if (cbp->pdqcb_command_response == ci->ci_response_completion)
601	return;
602
603    PDQ_ASSERT(cbp->pdqcb_command_request != ci->ci_request_completion);
604
605    PDQ_OS_CMDRSP_POSTSYNC(pdq, PDQ_SIZE_COMMAND_RESPONSE);
606    rspgen = (const pdq_response_generic_t *) ci->ci_response_bufstart;
607    PDQ_ASSERT(rspgen->generic_op == ci->ci_queued_commands[ci->ci_request_completion]);
608    PDQ_ASSERT(rspgen->generic_status == PDQR_SUCCESS);
609    PDQ_PRINTF(("PDQ Process Command Response: %s completed (status=%d [0x%x])\n",
610		pdq_cmd_info[rspgen->generic_op].cmd_name,
611		rspgen->generic_status, rspgen->generic_status));
612
613    if (rspgen->generic_op == PDQC_STATUS_CHARS_GET && (pdq->pdq_flags & PDQ_PRINTCHARS)) {
614	pdq->pdq_flags &= ~PDQ_PRINTCHARS;
615	pdq_print_fddi_chars(pdq, (const pdq_response_status_chars_get_t *) rspgen);
616    } else if (rspgen->generic_op == PDQC_DEC_EXT_MIB_GET) {
617	pdq->pdq_flags &= ~PDQ_IS_FDX;
618	if (((const pdq_response_dec_ext_mib_get_t *)rspgen)->dec_ext_mib_get.fdx_operational)
619	    pdq->pdq_flags |= PDQ_IS_FDX;
620    }
621
622    PDQ_ADVANCE(ci->ci_request_completion, 1, PDQ_RING_MASK(dbp->pdqdb_command_requests));
623    PDQ_ADVANCE(ci->ci_response_completion, 1, PDQ_RING_MASK(dbp->pdqdb_command_responses));
624    ci->ci_command_active = 0;
625
626    if (ci->ci_pending_commands != 0) {
627	pdq_queue_commands(pdq);
628    } else {
629	PDQ_CSR_WRITE(csrs, csr_cmd_response_producer,
630		      ci->ci_response_producer | (ci->ci_response_completion << 8));
631	PDQ_CSR_WRITE(csrs, csr_cmd_request_producer,
632		      ci->ci_request_producer | (ci->ci_request_completion << 8));
633    }
634}
635
636/*
637 * This following routine processes unsolicited events.
638 * In addition, it also fills the unsolicited queue with
639 * event buffers so it can be used to initialize the queue
640 * as well.
641 */
642static void
643pdq_process_unsolicited_events(
644    pdq_t *pdq)
645{
646    const pdq_csrs_t * const csrs = &pdq->pdq_csrs;
647    pdq_unsolicited_info_t *ui = &pdq->pdq_unsolicited_info;
648    volatile const pdq_consumer_block_t *cbp = pdq->pdq_cbp;
649    pdq_descriptor_block_t *dbp = pdq->pdq_dbp;
650
651    /*
652     * Process each unsolicited event (if any).
653     */
654
655    while (cbp->pdqcb_unsolicited_event != ui->ui_completion) {
656	const pdq_unsolicited_event_t *event;
657	event = &ui->ui_events[ui->ui_completion & (PDQ_NUM_UNSOLICITED_EVENTS-1)];
658	PDQ_OS_UNSOL_EVENT_POSTSYNC(pdq, event);
659
660	switch (event->event_type) {
661	    case PDQ_UNSOLICITED_EVENT: {
662		int bad_event = 0;
663		switch (event->event_entity) {
664		    case PDQ_ENTITY_STATION: {
665			bad_event = event->event_code.value >= PDQ_STATION_EVENT_MAX;
666			break;
667		    }
668		    case PDQ_ENTITY_LINK: {
669			bad_event = event->event_code.value >= PDQ_LINK_EVENT_MAX;
670			break;
671		    }
672		    case PDQ_ENTITY_PHY_PORT: {
673			bad_event = event->event_code.value >= PDQ_PHY_EVENT_MAX;
674			break;
675		    }
676		    default: {
677			bad_event = 1;
678			break;
679		    }
680		}
681		if (bad_event) {
682		    break;
683		}
684		printf(PDQ_OS_PREFIX "Unsolicited Event: %s: %s",
685		       PDQ_OS_PREFIX_ARGS,
686		       pdq_entities[event->event_entity],
687		       pdq_event_codes[event->event_entity][event->event_code.value]);
688		if (event->event_entity == PDQ_ENTITY_PHY_PORT)
689		    printf("[%d]", event->event_index);
690		printf("\n");
691		break;
692	    }
693	    case PDQ_UNSOLICITED_COUNTERS: {
694		break;
695	    }
696	}
697	PDQ_OS_UNSOL_EVENT_PRESYNC(pdq, event);
698	PDQ_ADVANCE(ui->ui_completion, 1, PDQ_RING_MASK(dbp->pdqdb_unsolicited_events));
699	ui->ui_free++;
700    }
701
702    /*
703     * Now give back the event buffers back to the PDQ.
704     */
705    PDQ_ADVANCE(ui->ui_producer, ui->ui_free, PDQ_RING_MASK(dbp->pdqdb_unsolicited_events));
706    ui->ui_free = 0;
707
708    PDQ_CSR_WRITE(csrs, csr_unsolicited_producer,
709		  ui->ui_producer | (ui->ui_completion << 8));
710}
711
712static void
713pdq_process_received_data(
714    pdq_t *pdq,
715    pdq_rx_info_t *rx,
716    pdq_rxdesc_t *receives,
717    pdq_uint32_t completion_goal,
718    pdq_uint32_t ring_mask)
719{
720    pdq_uint32_t completion = rx->rx_completion;
721    pdq_uint32_t producer = rx->rx_producer;
722    PDQ_OS_DATABUF_T **buffers = (PDQ_OS_DATABUF_T **) rx->rx_buffers;
723    pdq_rxdesc_t *rxd;
724    pdq_uint32_t idx;
725
726    while (completion != completion_goal) {
727	PDQ_OS_DATABUF_T *fpdu, *lpdu, *npdu;
728	pdq_uint8_t *dataptr;
729	pdq_uint32_t fc, datalen, pdulen, segcnt;
730	pdq_rxstatus_t status;
731
732	fpdu = lpdu = buffers[completion];
733	PDQ_ASSERT(fpdu != NULL);
734	PDQ_OS_RXPDU_POSTSYNC(pdq, fpdu, 0, sizeof(u_int32_t));
735	dataptr = PDQ_OS_DATABUF_PTR(fpdu);
736	status = *(pdq_rxstatus_t *) dataptr;
737	if (status.rxs_rcc_badpdu == 0) {
738	    datalen = status.rxs_len;
739	    PDQ_OS_RXPDU_POSTSYNC(pdq, fpdu, sizeof(u_int32_t),
740				  PDQ_RX_FC_OFFSET + 1 - sizeof(u_int32_t));
741	    fc = dataptr[PDQ_RX_FC_OFFSET];
742	    switch (fc & (PDQ_FDDIFC_C|PDQ_FDDIFC_L|PDQ_FDDIFC_F)) {
743		case PDQ_FDDI_LLC_ASYNC:
744		case PDQ_FDDI_LLC_SYNC:
745		case PDQ_FDDI_IMP_ASYNC:
746		case PDQ_FDDI_IMP_SYNC: {
747		    if (datalen > PDQ_FDDI_MAX || datalen < PDQ_FDDI_LLC_MIN) {
748			PDQ_PRINTF(("discard: bad length %d\n", datalen));
749			goto discard_frame;
750		    }
751		    break;
752		}
753		case PDQ_FDDI_SMT: {
754		    if (datalen > PDQ_FDDI_MAX || datalen < PDQ_FDDI_SMT_MIN)
755			goto discard_frame;
756		    break;
757		}
758		default: {
759		    PDQ_PRINTF(("discard: bad fc 0x%x\n", fc));
760		    goto discard_frame;
761		}
762	    }
763	    /*
764	     * Update the lengths of the data buffers now that we know
765	     * the real length.
766	     */
767	    pdulen = datalen + (PDQ_RX_FC_OFFSET - PDQ_OS_HDR_OFFSET) - 4 /* CRC */;
768	    segcnt = (pdulen + PDQ_OS_HDR_OFFSET + PDQ_OS_DATABUF_SIZE - 1) / PDQ_OS_DATABUF_SIZE;
769	    PDQ_OS_DATABUF_ALLOC(pdq, npdu);
770	    if (npdu == NULL) {
771		PDQ_PRINTF(("discard: no databuf #0\n"));
772		goto discard_frame;
773	    }
774	    buffers[completion] = npdu;
775	    for (idx = 1; idx < segcnt; idx++) {
776		PDQ_OS_DATABUF_ALLOC(pdq, npdu);
777		if (npdu == NULL) {
778		    PDQ_OS_DATABUF_NEXT_SET(lpdu, NULL);
779		    PDQ_OS_DATABUF_FREE(pdq, fpdu);
780		    goto discard_frame;
781		}
782		PDQ_OS_DATABUF_NEXT_SET(lpdu, buffers[(completion + idx) & ring_mask]);
783		lpdu = PDQ_OS_DATABUF_NEXT(lpdu);
784		buffers[(completion + idx) & ring_mask] = npdu;
785	    }
786	    PDQ_OS_DATABUF_NEXT_SET(lpdu, NULL);
787	    for (idx = 0; idx < PDQ_RX_SEGCNT; idx++) {
788		buffers[(producer + idx) & ring_mask] =
789		    buffers[(completion + idx) & ring_mask];
790		buffers[(completion + idx) & ring_mask] = NULL;
791	    }
792	    PDQ_OS_DATABUF_ADJ(fpdu, PDQ_OS_HDR_OFFSET);
793	    if (segcnt == 1) {
794		PDQ_OS_DATABUF_LEN_SET(fpdu, pdulen);
795	    } else {
796		PDQ_OS_DATABUF_LEN_SET(lpdu, pdulen + PDQ_OS_HDR_OFFSET - (segcnt - 1) * PDQ_OS_DATABUF_SIZE);
797	    }
798	    /*
799	     * Do not pass to protocol if packet was received promiscuously
800	     */
801	    pdq_os_receive_pdu(pdq, fpdu, pdulen,
802			       status.rxs_rcc_dd < PDQ_RXS_RCC_DD_CAM_MATCH);
803	    rx->rx_free += PDQ_RX_SEGCNT;
804	    PDQ_ADVANCE(producer, PDQ_RX_SEGCNT, ring_mask);
805	    PDQ_ADVANCE(completion, PDQ_RX_SEGCNT, ring_mask);
806	    continue;
807	} else {
808	    PDQ_PRINTF(("discard: bad pdu 0x%x(%d.%d.%d.%d.%d)\n", status.rxs_status,
809			status.rxs_rcc_badpdu, status.rxs_rcc_badcrc,
810			status.rxs_rcc_reason, status.rxs_fsc, status.rxs_fsb_e));
811	    if (status.rxs_rcc_reason == 7)
812		goto discard_frame;
813	    if (status.rxs_rcc_reason != 0) {
814		/* hardware fault */
815		if (status.rxs_rcc_badcrc) {
816		    printf(PDQ_OS_PREFIX " MAC CRC error (source=%x-%x-%x-%x-%x-%x)\n",
817			   PDQ_OS_PREFIX_ARGS,
818			   dataptr[PDQ_RX_FC_OFFSET+1],
819			   dataptr[PDQ_RX_FC_OFFSET+2],
820			   dataptr[PDQ_RX_FC_OFFSET+3],
821			   dataptr[PDQ_RX_FC_OFFSET+4],
822			   dataptr[PDQ_RX_FC_OFFSET+5],
823			   dataptr[PDQ_RX_FC_OFFSET+6]);
824		    /* rx->rx_badcrc++; */
825		} else if (status.rxs_fsc == 0 || status.rxs_fsb_e == 1) {
826		    /* rx->rx_frame_status_errors++; */
827		} else {
828		    /* hardware fault */
829		}
830	    }
831	}
832      discard_frame:
833	/*
834	 * Discarded frames go right back on the queue; therefore
835	 * ring entries were freed.
836	 */
837	for (idx = 0; idx < PDQ_RX_SEGCNT; idx++) {
838	    buffers[producer] = buffers[completion];
839	    buffers[completion] = NULL;
840	    rxd = &receives[rx->rx_producer];
841	    if (idx == 0) {
842		rxd->rxd_sop = 1; rxd->rxd_seg_cnt = PDQ_RX_SEGCNT - 1;
843	    } else {
844		rxd->rxd_sop = 0; rxd->rxd_seg_cnt = 0;
845	    }
846	    rxd->rxd_pa_hi = 0;
847	    rxd->rxd_seg_len_hi = PDQ_OS_DATABUF_SIZE / 16;
848	    rxd->rxd_pa_lo = PDQ_OS_DATABUF_BUSPA(pdq, buffers[rx->rx_producer]);
849	    PDQ_OS_RXPDU_PRESYNC(pdq, buffers[rx->rx_producer], 0, PDQ_OS_DATABUF_SIZE);
850	    PDQ_OS_DESC_PRESYNC(pdq, rxd, sizeof(*rxd));
851	    PDQ_ADVANCE(rx->rx_producer, 1, ring_mask);
852	    PDQ_ADVANCE(producer, 1, ring_mask);
853	    PDQ_ADVANCE(completion, 1, ring_mask);
854	}
855    }
856    rx->rx_completion = completion;
857
858    while (rx->rx_free > PDQ_RX_SEGCNT && rx->rx_free > rx->rx_target) {
859	PDQ_OS_DATABUF_T *pdu;
860	/*
861	 * Allocate the needed number of data buffers.
862	 * Try to obtain them from our free queue before
863	 * asking the system for more.
864	 */
865	for (idx = 0; idx < PDQ_RX_SEGCNT; idx++) {
866	    if ((pdu = buffers[(rx->rx_producer + idx) & ring_mask]) == NULL) {
867		PDQ_OS_DATABUF_ALLOC(pdq, pdu);
868		if (pdu == NULL)
869		    break;
870		buffers[(rx->rx_producer + idx) & ring_mask] = pdu;
871	    }
872	    rxd = &receives[(rx->rx_producer + idx) & ring_mask];
873	    if (idx == 0) {
874		rxd->rxd_sop = 1; rxd->rxd_seg_cnt = PDQ_RX_SEGCNT - 1;
875	    } else {
876		rxd->rxd_sop = 0; rxd->rxd_seg_cnt = 0;
877	    }
878	    rxd->rxd_pa_hi = 0;
879	    rxd->rxd_seg_len_hi = PDQ_OS_DATABUF_SIZE / 16;
880	    rxd->rxd_pa_lo = PDQ_OS_DATABUF_BUSPA(pdq, pdu);
881	    PDQ_OS_RXPDU_PRESYNC(pdq, pdu, 0, PDQ_OS_DATABUF_SIZE);
882	    PDQ_OS_DESC_PRESYNC(pdq, rxd, sizeof(*rxd));
883	}
884	if (idx < PDQ_RX_SEGCNT) {
885	    /*
886	     * We didn't get all databufs required to complete a new
887	     * receive buffer.  Keep the ones we got and retry a bit
888	     * later for the rest.
889	     */
890	    break;
891	}
892	PDQ_ADVANCE(rx->rx_producer, PDQ_RX_SEGCNT, ring_mask);
893	rx->rx_free -= PDQ_RX_SEGCNT;
894    }
895}
896
897static void pdq_process_transmitted_data(pdq_t *pdq);
898
899pdq_boolean_t
900pdq_queue_transmit_data(
901    pdq_t *pdq,
902    PDQ_OS_DATABUF_T *pdu)
903{
904    pdq_tx_info_t * const tx = &pdq->pdq_tx_info;
905    pdq_descriptor_block_t * const dbp = pdq->pdq_dbp;
906    pdq_uint32_t producer = tx->tx_producer;
907    pdq_txdesc_t *eop = NULL;
908    PDQ_OS_DATABUF_T *pdu0;
909    pdq_uint32_t freecnt;
910#if defined(PDQ_BUS_DMA)
911    bus_dmamap_t map;
912#endif
913
914  again:
915    if (PDQ_RX_FC_OFFSET == PDQ_OS_HDR_OFFSET) {
916	freecnt = tx->tx_free - 1;
917    } else {
918	freecnt = tx->tx_free;
919    }
920    /*
921     * Need 2 or more descriptors to be able to send.
922     */
923    if (freecnt == 0) {
924	pdq->pdq_intrmask |= PDQ_HOST_INT_TX_ENABLE;
925	PDQ_CSR_WRITE(&pdq->pdq_csrs, csr_host_int_enable, pdq->pdq_intrmask);
926	return PDQ_FALSE;
927    }
928
929    if (PDQ_RX_FC_OFFSET == PDQ_OS_HDR_OFFSET) {
930	dbp->pdqdb_transmits[producer] = tx->tx_hdrdesc;
931	PDQ_OS_DESC_PRESYNC(pdq, &dbp->pdqdb_transmits[producer], sizeof(pdq_txdesc_t));
932	PDQ_ADVANCE(producer, 1, PDQ_RING_MASK(dbp->pdqdb_transmits));
933    }
934
935#if defined(PDQ_BUS_DMA)
936    map = M_GETCTX(pdu, bus_dmamap_t);
937    if (freecnt >= map->dm_nsegs) {
938	int idx;
939	for (idx = 0; idx < map->dm_nsegs; idx++) {
940	    /*
941	     * Initialize the transmit descriptor
942	     */
943	    eop = &dbp->pdqdb_transmits[producer];
944	    eop->txd_seg_len = map->dm_segs[idx].ds_len;
945	    eop->txd_pa_lo = map->dm_segs[idx].ds_addr;
946	    eop->txd_sop = eop->txd_eop = eop->txd_pa_hi = 0;
947	    PDQ_OS_DESC_PRESYNC(pdq, eop, sizeof(pdq_txdesc_t));
948	    freecnt--;
949	    PDQ_ADVANCE(producer, 1, PDQ_RING_MASK(dbp->pdqdb_transmits));
950	}
951	pdu0 = NULL;
952    } else {
953	pdu0 = pdu;
954    }
955#else
956    for (freecnt = tx->tx_free - 1, pdu0 = pdu; pdu0 != NULL && freecnt > 0;) {
957	pdq_uint32_t fraglen, datalen = PDQ_OS_DATABUF_LEN(pdu0);
958	const pdq_uint8_t *dataptr = PDQ_OS_DATABUF_PTR(pdu0);
959
960	/*
961	 * The first segment is limited to the space remaining in
962	 * page.  All segments after that can be up to a full page
963	 * in size.
964	 */
965	fraglen = PDQ_OS_PAGESIZE - ((dataptr - (pdq_uint8_t *) NULL) & (PDQ_OS_PAGESIZE-1));
966	while (datalen > 0 && freecnt > 0) {
967	    pdq_uint32_t seglen = (fraglen < datalen ? fraglen : datalen);
968
969	    /*
970	     * Initialize the transmit descriptor
971	     */
972	    eop = &dbp->pdqdb_transmits[producer];
973	    eop->txd_seg_len = seglen;
974	    eop->txd_pa_lo = PDQ_OS_VA_TO_BUSPA(pdq, dataptr);
975	    eop->txd_sop = eop->txd_eop = eop->txd_pa_hi = 0;
976	    PDQ_OS_DESC_PRESYNC(pdq, eop, sizeof(pdq_txdesc_t));
977	    datalen -= seglen;
978	    dataptr += seglen;
979	    fraglen = PDQ_OS_PAGESIZE;
980	    freecnt--;
981	    PDQ_ADVANCE(producer, 1, PDQ_RING_MASK(dbp->pdqdb_transmits));
982	}
983	pdu0 = PDQ_OS_DATABUF_NEXT(pdu0);
984    }
985#endif /* defined(PDQ_BUS_DMA) */
986    if (pdu0 != NULL) {
987	unsigned completion = tx->tx_completion;
988	PDQ_ASSERT(freecnt == 0);
989	PDQ_OS_CONSUMER_POSTSYNC(pdq);
990	pdq_process_transmitted_data(pdq);
991	if (completion != tx->tx_completion) {
992	    producer = tx->tx_producer;
993	    eop = NULL;
994	    goto again;
995	}
996	/*
997	 * If we still have data to process then the ring was too full
998	 * to store the PDU.  Return FALSE so the caller will requeue
999	 * the PDU for later.
1000	 */
1001	pdq->pdq_intrmask |= PDQ_HOST_INT_TX_ENABLE;
1002	PDQ_CSR_WRITE(&pdq->pdq_csrs, csr_host_int_enable, pdq->pdq_intrmask);
1003	return PDQ_FALSE;
1004    }
1005    /*
1006     * Everything went fine.  Finish it up.
1007     */
1008    tx->tx_descriptor_count[tx->tx_producer] = tx->tx_free - freecnt;
1009    if (PDQ_RX_FC_OFFSET != PDQ_OS_HDR_OFFSET) {
1010	dbp->pdqdb_transmits[tx->tx_producer].txd_sop = 1;
1011	PDQ_OS_DESC_PRESYNC(pdq, &dbp->pdqdb_transmits[tx->tx_producer],
1012	    sizeof(pdq_txdesc_t));
1013    }
1014    eop->txd_eop = 1;
1015    PDQ_OS_DESC_PRESYNC(pdq, eop, sizeof(pdq_txdesc_t));
1016    PDQ_OS_DATABUF_ENQUEUE(&tx->tx_txq, pdu);
1017    tx->tx_producer = producer;
1018    tx->tx_free = freecnt;
1019    PDQ_DO_TYPE2_PRODUCER(pdq);
1020    return PDQ_TRUE;
1021}
1022
1023static void
1024pdq_process_transmitted_data(
1025    pdq_t *pdq)
1026{
1027    pdq_tx_info_t *tx = &pdq->pdq_tx_info;
1028    volatile const pdq_consumer_block_t *cbp = pdq->pdq_cbp;
1029    pdq_descriptor_block_t *dbp = pdq->pdq_dbp;
1030    pdq_uint32_t completion = tx->tx_completion;
1031    int reclaimed = 0;
1032
1033    while (completion != cbp->pdqcb_transmits) {
1034	PDQ_OS_DATABUF_T *pdu;
1035	pdq_uint32_t descriptor_count = tx->tx_descriptor_count[completion];
1036	PDQ_ASSERT(dbp->pdqdb_transmits[completion].txd_sop == 1);
1037	PDQ_ASSERT(dbp->pdqdb_transmits[(completion + descriptor_count - 1) & PDQ_RING_MASK(dbp->pdqdb_transmits)].txd_eop == 1);
1038	PDQ_OS_DATABUF_DEQUEUE(&tx->tx_txq, pdu);
1039	pdq_os_transmit_done(pdq, pdu);
1040	tx->tx_free += descriptor_count;
1041	reclaimed = 1;
1042	PDQ_ADVANCE(completion, descriptor_count, PDQ_RING_MASK(dbp->pdqdb_transmits));
1043    }
1044    if (tx->tx_completion != completion) {
1045	tx->tx_completion = completion;
1046	pdq->pdq_intrmask &= ~PDQ_HOST_INT_TX_ENABLE;
1047	PDQ_CSR_WRITE(&pdq->pdq_csrs, csr_host_int_enable, pdq->pdq_intrmask);
1048	pdq_os_restart_transmitter(pdq);
1049    }
1050    if (reclaimed)
1051	PDQ_DO_TYPE2_PRODUCER(pdq);
1052}
1053
1054void
1055pdq_flush_transmitter(
1056    pdq_t *pdq)
1057{
1058    volatile pdq_consumer_block_t *cbp = pdq->pdq_cbp;
1059    pdq_tx_info_t *tx = &pdq->pdq_tx_info;
1060
1061    for (;;) {
1062	PDQ_OS_DATABUF_T *pdu;
1063	PDQ_OS_DATABUF_DEQUEUE(&tx->tx_txq, pdu);
1064	if (pdu == NULL)
1065	    break;
1066	/*
1067	 * Don't call transmit done since the packet never made it
1068	 * out on the wire.
1069	 */
1070	PDQ_OS_DATABUF_FREE(pdq, pdu);
1071    }
1072
1073    tx->tx_free = PDQ_RING_MASK(pdq->pdq_dbp->pdqdb_transmits);
1074    cbp->pdqcb_transmits = tx->tx_completion = tx->tx_producer;
1075    PDQ_OS_CONSUMER_PRESYNC(pdq);
1076
1077    PDQ_DO_TYPE2_PRODUCER(pdq);
1078}
1079
1080void
1081pdq_hwreset(
1082    pdq_t *pdq)
1083{
1084    const pdq_csrs_t * const csrs = &pdq->pdq_csrs;
1085    pdq_state_t state;
1086    int cnt;
1087
1088    state = PDQ_PSTS_ADAPTER_STATE(PDQ_CSR_READ(csrs, csr_port_status));
1089    if (state == PDQS_DMA_UNAVAILABLE)
1090	return;
1091    PDQ_CSR_WRITE(csrs, csr_port_data_a,
1092		  (state == PDQS_HALTED && pdq->pdq_type != PDQ_DEFTA) ? 0 : PDQ_PRESET_SKIP_SELFTEST);
1093    PDQ_CSR_WRITE(csrs, csr_port_reset, 1);
1094    PDQ_OS_USEC_DELAY(100);
1095    PDQ_CSR_WRITE(csrs, csr_port_reset, 0);
1096    for (cnt = 100000;;cnt--) {
1097	PDQ_OS_USEC_DELAY(1000);
1098	state = PDQ_PSTS_ADAPTER_STATE(PDQ_CSR_READ(csrs, csr_port_status));
1099	if (state == PDQS_DMA_UNAVAILABLE || cnt == 0)
1100	    break;
1101    }
1102    PDQ_PRINTF(("PDQ Reset spun %d cycles\n", 100000 - cnt));
1103    PDQ_OS_USEC_DELAY(10000);
1104    state = PDQ_PSTS_ADAPTER_STATE(PDQ_CSR_READ(csrs, csr_port_status));
1105    PDQ_ASSERT(state == PDQS_DMA_UNAVAILABLE);
1106    PDQ_ASSERT(cnt > 0);
1107}
1108
1109/*
1110 * The following routine brings the PDQ from whatever state it is
1111 * in to DMA_UNAVAILABLE (ie. like a RESET but without doing a RESET).
1112 */
1113pdq_state_t
1114pdq_stop(
1115    pdq_t *pdq)
1116{
1117    pdq_state_t state;
1118    const pdq_csrs_t * const csrs = &pdq->pdq_csrs;
1119    int cnt, pass = 0, idx;
1120    PDQ_OS_DATABUF_T **buffers;
1121
1122  restart:
1123    state = PDQ_PSTS_ADAPTER_STATE(PDQ_CSR_READ(csrs, csr_port_status));
1124    if (state != PDQS_DMA_UNAVAILABLE) {
1125	pdq_hwreset(pdq);
1126	state = PDQ_PSTS_ADAPTER_STATE(PDQ_CSR_READ(csrs, csr_port_status));
1127	PDQ_ASSERT(state == PDQS_DMA_UNAVAILABLE);
1128    }
1129#if 0
1130    switch (state) {
1131	case PDQS_RING_MEMBER:
1132	case PDQS_LINK_UNAVAILABLE:
1133	case PDQS_LINK_AVAILABLE: {
1134	    PDQ_CSR_WRITE(csrs, csr_port_data_a, PDQ_SUB_CMD_LINK_UNINIT);
1135	    PDQ_CSR_WRITE(csrs, csr_port_data_b, 0);
1136	    pdq_do_port_control(csrs, PDQ_PCTL_SUB_CMD);
1137	    state = PDQ_PSTS_ADAPTER_STATE(PDQ_CSR_READ(csrs, csr_port_status));
1138	    PDQ_ASSERT(state == PDQS_DMA_AVAILABLE);
1139	    /* FALLTHROUGH */
1140	}
1141	case PDQS_DMA_AVAILABLE: {
1142	    PDQ_CSR_WRITE(csrs, csr_port_data_a, 0);
1143	    PDQ_CSR_WRITE(csrs, csr_port_data_b, 0);
1144	    pdq_do_port_control(csrs, PDQ_PCTL_DMA_UNINIT);
1145	    state = PDQ_PSTS_ADAPTER_STATE(PDQ_CSR_READ(csrs, csr_port_status));
1146	    PDQ_ASSERT(state == PDQS_DMA_UNAVAILABLE);
1147	    /* FALLTHROUGH */
1148	}
1149	case PDQS_DMA_UNAVAILABLE: {
1150	    break;
1151	}
1152    }
1153#endif
1154    /*
1155     * Now we should be in DMA_UNAVAILABLE.  So bring the PDQ into
1156     * DMA_AVAILABLE.
1157     */
1158
1159    /*
1160     * Obtain the hardware address and firmware revisions
1161     * (MLA = my long address which is FDDI speak for hardware address)
1162     */
1163    pdq_read_mla(&pdq->pdq_csrs, &pdq->pdq_hwaddr);
1164    pdq_read_fwrev(&pdq->pdq_csrs, &pdq->pdq_fwrev);
1165    pdq->pdq_chip_rev = pdq_read_chiprev(&pdq->pdq_csrs);
1166
1167    if (pdq->pdq_type == PDQ_DEFPA) {
1168	/*
1169	 * Disable interrupts and DMA.
1170	 */
1171	PDQ_CSR_WRITE(&pdq->pdq_pci_csrs, csr_pfi_mode_control, 0);
1172	PDQ_CSR_WRITE(&pdq->pdq_pci_csrs, csr_pfi_status, 0x10);
1173    }
1174
1175    /*
1176     * Flush all the databuf queues.
1177     */
1178    pdq_flush_databuf_queue(pdq, &pdq->pdq_tx_info.tx_txq);
1179    pdq->pdq_flags &= ~(PDQ_TXOK|PDQ_IS_ONRING|PDQ_IS_FDX);
1180    buffers = (PDQ_OS_DATABUF_T **) pdq->pdq_rx_info.rx_buffers;
1181    for (idx = 0; idx < PDQ_RING_SIZE(pdq->pdq_dbp->pdqdb_receives); idx++) {
1182	if (buffers[idx] != NULL) {
1183	    PDQ_OS_DATABUF_FREE(pdq, buffers[idx]);
1184	    buffers[idx] = NULL;
1185	}
1186    }
1187    pdq->pdq_rx_info.rx_free = PDQ_RING_MASK(pdq->pdq_dbp->pdqdb_receives);
1188    buffers = (PDQ_OS_DATABUF_T **) pdq->pdq_host_smt_info.rx_buffers;
1189    for (idx = 0; idx < PDQ_RING_SIZE(pdq->pdq_dbp->pdqdb_host_smt); idx++) {
1190	if (buffers[idx] != NULL) {
1191	    PDQ_OS_DATABUF_FREE(pdq, buffers[idx]);
1192	    buffers[idx] = NULL;
1193	}
1194    }
1195    pdq->pdq_host_smt_info.rx_free = PDQ_RING_MASK(pdq->pdq_dbp->pdqdb_host_smt);
1196
1197    /*
1198     * Reset the consumer indexes to 0.
1199     */
1200    pdq->pdq_cbp->pdqcb_receives = 0;
1201    pdq->pdq_cbp->pdqcb_transmits = 0;
1202    pdq->pdq_cbp->pdqcb_host_smt = 0;
1203    pdq->pdq_cbp->pdqcb_unsolicited_event = 0;
1204    pdq->pdq_cbp->pdqcb_command_response = 0;
1205    pdq->pdq_cbp->pdqcb_command_request = 0;
1206    PDQ_OS_CONSUMER_PRESYNC(pdq);
1207
1208    /*
1209     * Reset the producer and completion indexes to 0.
1210     */
1211    pdq->pdq_command_info.ci_request_producer = 0;
1212    pdq->pdq_command_info.ci_response_producer = 0;
1213    pdq->pdq_command_info.ci_request_completion = 0;
1214    pdq->pdq_command_info.ci_response_completion = 0;
1215    pdq->pdq_unsolicited_info.ui_producer = 0;
1216    pdq->pdq_unsolicited_info.ui_completion = 0;
1217    pdq->pdq_rx_info.rx_producer = 0;
1218    pdq->pdq_rx_info.rx_completion = 0;
1219    pdq->pdq_tx_info.tx_producer = 0;
1220    pdq->pdq_tx_info.tx_completion = 0;
1221    pdq->pdq_host_smt_info.rx_producer = 0;
1222    pdq->pdq_host_smt_info.rx_completion = 0;
1223
1224    pdq->pdq_command_info.ci_command_active = 0;
1225    pdq->pdq_unsolicited_info.ui_free = PDQ_NUM_UNSOLICITED_EVENTS;
1226    pdq->pdq_tx_info.tx_free = PDQ_RING_MASK(pdq->pdq_dbp->pdqdb_transmits);
1227
1228    /*
1229     * Allow the DEFPA to do DMA.  Then program the physical
1230     * addresses of the consumer and descriptor blocks.
1231     */
1232    if (pdq->pdq_type == PDQ_DEFPA) {
1233#ifdef PDQTEST
1234	PDQ_CSR_WRITE(&pdq->pdq_pci_csrs, csr_pfi_mode_control,
1235		      PDQ_PFI_MODE_DMA_ENABLE);
1236#else
1237	PDQ_CSR_WRITE(&pdq->pdq_pci_csrs, csr_pfi_mode_control,
1238		      PDQ_PFI_MODE_DMA_ENABLE
1239	    /*|PDQ_PFI_MODE_PFI_PCI_INTR*/|PDQ_PFI_MODE_PDQ_PCI_INTR);
1240#endif
1241    }
1242
1243    /*
1244     * Make sure the unsolicited queue has events ...
1245     */
1246    pdq_process_unsolicited_events(pdq);
1247
1248    if ((pdq->pdq_type == PDQ_DEFEA && pdq->pdq_chip_rev == PDQ_CHIP_REV_E)
1249	    || pdq->pdq_type == PDQ_DEFTA)
1250	PDQ_CSR_WRITE(csrs, csr_port_data_b, PDQ_DMA_BURST_16LW);
1251    else
1252	PDQ_CSR_WRITE(csrs, csr_port_data_b, PDQ_DMA_BURST_8LW);
1253    PDQ_CSR_WRITE(csrs, csr_port_data_a, PDQ_SUB_CMD_DMA_BURST_SIZE_SET);
1254    pdq_do_port_control(csrs, PDQ_PCTL_SUB_CMD);
1255
1256    /*
1257     * Make sure there isn't stale information in the caches before
1258     * tell the adapter about the blocks it's going to use.
1259     */
1260    PDQ_OS_CONSUMER_PRESYNC(pdq);
1261
1262    PDQ_CSR_WRITE(csrs, csr_port_data_b, 0);
1263    PDQ_CSR_WRITE(csrs, csr_port_data_a, pdq->pdq_pa_consumer_block);
1264    pdq_do_port_control(csrs, PDQ_PCTL_CONSUMER_BLOCK);
1265
1266    PDQ_CSR_WRITE(csrs, csr_port_data_b, 0);
1267#if !defined(BYTE_ORDER) || BYTE_ORDER == LITTLE_ENDIAN
1268    PDQ_CSR_WRITE(csrs, csr_port_data_a, pdq->pdq_pa_descriptor_block | PDQ_DMA_INIT_LW_BSWAP_DATA);
1269#else
1270    PDQ_CSR_WRITE(csrs, csr_port_data_a, pdq->pdq_pa_descriptor_block | PDQ_DMA_INIT_LW_BSWAP_DATA | PDQ_DMA_INIT_LW_BSWAP_LITERAL);
1271#endif
1272    pdq_do_port_control(csrs, PDQ_PCTL_DMA_INIT);
1273
1274    for (cnt = 0; cnt < 1000; cnt++) {
1275	state = PDQ_PSTS_ADAPTER_STATE(PDQ_CSR_READ(csrs, csr_port_status));
1276	if (state == PDQS_HALTED) {
1277	    if (pass > 0)
1278		return PDQS_HALTED;
1279	    pass = 1;
1280	    goto restart;
1281	}
1282	if (state == PDQS_DMA_AVAILABLE) {
1283	    PDQ_PRINTF(("Transition to DMA Available took %d spins\n", cnt));
1284	    break;
1285	}
1286	PDQ_OS_USEC_DELAY(1000);
1287    }
1288    PDQ_ASSERT(state == PDQS_DMA_AVAILABLE);
1289
1290    PDQ_CSR_WRITE(csrs, csr_host_int_type_0, 0xFF);
1291    pdq->pdq_intrmask = 0;
1292      /* PDQ_HOST_INT_STATE_CHANGE
1293	|PDQ_HOST_INT_FATAL_ERROR|PDQ_HOST_INT_CMD_RSP_ENABLE
1294	|PDQ_HOST_INT_UNSOL_ENABLE */
1295    PDQ_CSR_WRITE(csrs, csr_host_int_enable, pdq->pdq_intrmask);
1296
1297    /*
1298     * Any other command but START should be valid.
1299     */
1300    pdq->pdq_command_info.ci_pending_commands &= ~(PDQ_BITMASK(PDQC_START));
1301    if (pdq->pdq_flags & PDQ_PRINTCHARS)
1302	pdq->pdq_command_info.ci_pending_commands |= PDQ_BITMASK(PDQC_STATUS_CHARS_GET);
1303    pdq_queue_commands(pdq);
1304
1305    if (pdq->pdq_flags & PDQ_PRINTCHARS) {
1306	/*
1307	 * Now wait (up to 100ms) for the command(s) to finish.
1308	 */
1309	for (cnt = 0; cnt < 1000; cnt++) {
1310	    PDQ_OS_CONSUMER_POSTSYNC(pdq);
1311	    pdq_process_command_responses(pdq);
1312	    if (pdq->pdq_command_info.ci_response_producer == pdq->pdq_command_info.ci_response_completion)
1313		break;
1314	    PDQ_OS_USEC_DELAY(1000);
1315	}
1316	state = PDQ_PSTS_ADAPTER_STATE(PDQ_CSR_READ(csrs, csr_port_status));
1317    }
1318
1319    return state;
1320}
1321
1322void
1323pdq_run(
1324    pdq_t *pdq)
1325{
1326    const pdq_csrs_t * const csrs = &pdq->pdq_csrs;
1327    pdq_state_t state;
1328
1329    state = PDQ_PSTS_ADAPTER_STATE(PDQ_CSR_READ(csrs, csr_port_status));
1330    PDQ_ASSERT(state != PDQS_DMA_UNAVAILABLE);
1331    PDQ_ASSERT(state != PDQS_RESET);
1332    PDQ_ASSERT(state != PDQS_HALTED);
1333    PDQ_ASSERT(state != PDQS_UPGRADE);
1334    PDQ_ASSERT(state != PDQS_RING_MEMBER);
1335    switch (state) {
1336	case PDQS_DMA_AVAILABLE: {
1337	    /*
1338	     * The PDQ after being reset screws up some of its state.
1339	     * So we need to clear all the errors/interrupts so the real
1340	     * ones will get through.
1341	     */
1342	    PDQ_CSR_WRITE(csrs, csr_host_int_type_0, 0xFF);
1343	    pdq->pdq_intrmask = PDQ_HOST_INT_STATE_CHANGE
1344		|PDQ_HOST_INT_XMT_DATA_FLUSH|PDQ_HOST_INT_FATAL_ERROR
1345		|PDQ_HOST_INT_CMD_RSP_ENABLE|PDQ_HOST_INT_UNSOL_ENABLE
1346		|PDQ_HOST_INT_RX_ENABLE|PDQ_HOST_INT_HOST_SMT_ENABLE;
1347	    PDQ_CSR_WRITE(csrs, csr_host_int_enable, pdq->pdq_intrmask);
1348	    /*
1349	     * Set the MAC and address filters and start up the PDQ.
1350	     */
1351	    pdq_process_unsolicited_events(pdq);
1352	    pdq_process_received_data(pdq, &pdq->pdq_rx_info,
1353				      pdq->pdq_dbp->pdqdb_receives,
1354				      pdq->pdq_cbp->pdqcb_receives,
1355				      PDQ_RING_MASK(pdq->pdq_dbp->pdqdb_receives));
1356	    PDQ_DO_TYPE2_PRODUCER(pdq);
1357	    if (pdq->pdq_flags & PDQ_PASS_SMT) {
1358		pdq_process_received_data(pdq, &pdq->pdq_host_smt_info,
1359					  pdq->pdq_dbp->pdqdb_host_smt,
1360					  pdq->pdq_cbp->pdqcb_host_smt,
1361					  PDQ_RING_MASK(pdq->pdq_dbp->pdqdb_host_smt));
1362		PDQ_CSR_WRITE(csrs, csr_host_smt_producer,
1363			      pdq->pdq_host_smt_info.rx_producer
1364			          | (pdq->pdq_host_smt_info.rx_completion << 8));
1365	    }
1366	    pdq->pdq_command_info.ci_pending_commands = PDQ_BITMASK(PDQC_FILTER_SET)
1367		| PDQ_BITMASK(PDQC_ADDR_FILTER_SET)
1368		| PDQ_BITMASK(PDQC_SNMP_SET)
1369		| PDQ_BITMASK(PDQC_START);
1370	    if (pdq->pdq_flags & PDQ_PRINTCHARS)
1371		pdq->pdq_command_info.ci_pending_commands |= PDQ_BITMASK(PDQC_STATUS_CHARS_GET);
1372	    pdq_queue_commands(pdq);
1373	    break;
1374	}
1375	case PDQS_LINK_UNAVAILABLE:
1376	case PDQS_LINK_AVAILABLE: {
1377	    pdq->pdq_command_info.ci_pending_commands = PDQ_BITMASK(PDQC_FILTER_SET)
1378		| PDQ_BITMASK(PDQC_ADDR_FILTER_SET)
1379		| PDQ_BITMASK(PDQC_SNMP_SET);
1380	    if (pdq->pdq_flags & PDQ_PRINTCHARS)
1381		pdq->pdq_command_info.ci_pending_commands |= PDQ_BITMASK(PDQC_STATUS_CHARS_GET);
1382	    if (pdq->pdq_flags & PDQ_PASS_SMT) {
1383		pdq_process_received_data(pdq, &pdq->pdq_host_smt_info,
1384					  pdq->pdq_dbp->pdqdb_host_smt,
1385					  pdq->pdq_cbp->pdqcb_host_smt,
1386					  PDQ_RING_MASK(pdq->pdq_dbp->pdqdb_host_smt));
1387		PDQ_CSR_WRITE(csrs, csr_host_smt_producer,
1388			      pdq->pdq_host_smt_info.rx_producer
1389			          | (pdq->pdq_host_smt_info.rx_completion << 8));
1390	    }
1391	    pdq_process_unsolicited_events(pdq);
1392	    pdq_queue_commands(pdq);
1393	    break;
1394	}
1395	case PDQS_RING_MEMBER: {
1396	}
1397	default: {	/* to make gcc happy */
1398	    break;
1399	}
1400    }
1401}
1402
1403int
1404pdq_interrupt(
1405    pdq_t *pdq)
1406{
1407    const pdq_csrs_t * const csrs = &pdq->pdq_csrs;
1408    pdq_uint32_t data;
1409    int progress = 0;
1410
1411    if (pdq->pdq_type == PDQ_DEFPA)
1412	PDQ_CSR_WRITE(&pdq->pdq_pci_csrs, csr_pfi_status, 0x18);
1413
1414    while ((data = PDQ_CSR_READ(csrs, csr_port_status)) & PDQ_PSTS_INTR_PENDING) {
1415	progress = 1;
1416	PDQ_PRINTF(("PDQ Interrupt: Status = 0x%08x\n", data));
1417	PDQ_OS_CONSUMER_POSTSYNC(pdq);
1418	if (data & PDQ_PSTS_RCV_DATA_PENDING) {
1419	    pdq_process_received_data(pdq, &pdq->pdq_rx_info,
1420				      pdq->pdq_dbp->pdqdb_receives,
1421				      pdq->pdq_cbp->pdqcb_receives,
1422				      PDQ_RING_MASK(pdq->pdq_dbp->pdqdb_receives));
1423	    PDQ_DO_TYPE2_PRODUCER(pdq);
1424	}
1425	if (data & PDQ_PSTS_HOST_SMT_PENDING) {
1426	    pdq_process_received_data(pdq, &pdq->pdq_host_smt_info,
1427				      pdq->pdq_dbp->pdqdb_host_smt,
1428				      pdq->pdq_cbp->pdqcb_host_smt,
1429				      PDQ_RING_MASK(pdq->pdq_dbp->pdqdb_host_smt));
1430	    PDQ_DO_HOST_SMT_PRODUCER(pdq);
1431	}
1432	/* if (data & PDQ_PSTS_XMT_DATA_PENDING) */
1433	    pdq_process_transmitted_data(pdq);
1434	if (data & PDQ_PSTS_UNSOL_PENDING)
1435	    pdq_process_unsolicited_events(pdq);
1436	if (data & PDQ_PSTS_CMD_RSP_PENDING)
1437	    pdq_process_command_responses(pdq);
1438	if (data & PDQ_PSTS_TYPE_0_PENDING) {
1439	    data = PDQ_CSR_READ(csrs, csr_host_int_type_0);
1440	    if (data & PDQ_HOST_INT_STATE_CHANGE) {
1441		pdq_state_t state = PDQ_PSTS_ADAPTER_STATE(PDQ_CSR_READ(csrs, csr_port_status));
1442		printf(PDQ_OS_PREFIX "%s", PDQ_OS_PREFIX_ARGS, pdq_adapter_states[state]);
1443		if (state == PDQS_LINK_UNAVAILABLE) {
1444		    pdq->pdq_flags &= ~(PDQ_TXOK|PDQ_IS_ONRING|PDQ_IS_FDX);
1445		} else if (state == PDQS_LINK_AVAILABLE) {
1446		    if (pdq->pdq_flags & PDQ_WANT_FDX) {
1447			pdq->pdq_command_info.ci_pending_commands |= PDQ_BITMASK(PDQC_DEC_EXT_MIB_GET);
1448			pdq_queue_commands(pdq);
1449		    }
1450		    pdq->pdq_flags |= PDQ_TXOK|PDQ_IS_ONRING;
1451		    pdq_os_restart_transmitter(pdq);
1452		} else if (state == PDQS_HALTED) {
1453		    pdq_response_error_log_get_t log_entry;
1454		    pdq_halt_code_t halt_code = PDQ_PSTS_HALT_ID(PDQ_CSR_READ(csrs, csr_port_status));
1455		    printf(": halt code = %d (%s)\n",
1456			   halt_code, pdq_halt_codes[halt_code]);
1457		    if (halt_code == PDQH_DMA_ERROR && pdq->pdq_type == PDQ_DEFPA) {
1458			PDQ_PRINTF(("\tPFI status = 0x%x, Host 0 Fatal Interrupt = 0x%x\n",
1459			       PDQ_CSR_READ(&pdq->pdq_pci_csrs, csr_pfi_status),
1460			       data & PDQ_HOST_INT_FATAL_ERROR));
1461		    }
1462		    PDQ_OS_MEMZERO(&log_entry, sizeof(log_entry));
1463		    if (pdq_read_error_log(pdq, &log_entry)) {
1464			PDQ_PRINTF(("  Error log Entry:\n"));
1465			PDQ_PRINTF(("    CMD Status           = %d (0x%x)\n",
1466				    log_entry.error_log_get_status,
1467				    log_entry.error_log_get_status));
1468			PDQ_PRINTF(("    Event Status         = %d (0x%x)\n",
1469				    log_entry.error_log_get_event_status,
1470				    log_entry.error_log_get_event_status));
1471			PDQ_PRINTF(("    Caller Id            = %d (0x%x)\n",
1472				    log_entry.error_log_get_caller_id,
1473				    log_entry.error_log_get_caller_id));
1474			PDQ_PRINTF(("    Write Count          = %d (0x%x)\n",
1475				    log_entry.error_log_get_write_count,
1476				    log_entry.error_log_get_write_count));
1477			PDQ_PRINTF(("    FRU Implication Mask = %d (0x%x)\n",
1478				    log_entry.error_log_get_fru_implication_mask,
1479				    log_entry.error_log_get_fru_implication_mask));
1480			PDQ_PRINTF(("    Test ID              = %d (0x%x)\n",
1481				    log_entry.error_log_get_test_id,
1482				    log_entry.error_log_get_test_id));
1483		    }
1484		    pdq_stop(pdq);
1485		    if (pdq->pdq_flags & PDQ_RUNNING)
1486			pdq_run(pdq);
1487		    return 1;
1488		}
1489		printf("\n");
1490		PDQ_CSR_WRITE(csrs, csr_host_int_type_0, PDQ_HOST_INT_STATE_CHANGE);
1491	    }
1492	    if (data & PDQ_HOST_INT_FATAL_ERROR) {
1493		pdq_stop(pdq);
1494		if (pdq->pdq_flags & PDQ_RUNNING)
1495		    pdq_run(pdq);
1496		return 1;
1497	    }
1498	    if (data & PDQ_HOST_INT_XMT_DATA_FLUSH) {
1499		printf(PDQ_OS_PREFIX "Flushing transmit queue\n", PDQ_OS_PREFIX_ARGS);
1500		pdq->pdq_flags &= ~PDQ_TXOK;
1501		pdq_flush_transmitter(pdq);
1502		pdq_do_port_control(csrs, PDQ_PCTL_XMT_DATA_FLUSH_DONE);
1503		PDQ_CSR_WRITE(csrs, csr_host_int_type_0, PDQ_HOST_INT_XMT_DATA_FLUSH);
1504	    }
1505	}
1506	if (pdq->pdq_type == PDQ_DEFPA)
1507	    PDQ_CSR_WRITE(&pdq->pdq_pci_csrs, csr_pfi_status, 0x18);
1508    }
1509    return progress;
1510}
1511
1512pdq_t *
1513pdq_initialize(
1514    pdq_bus_t bus,
1515    pdq_bus_memaddr_t csr_base,
1516    const char *name,
1517    int unit,
1518    void *ctx,
1519    pdq_type_t type)
1520{
1521    pdq_t *pdq;
1522    pdq_state_t state;
1523    pdq_descriptor_block_t *dbp;
1524#if !defined(PDQ_BUS_DMA)
1525    const pdq_uint32_t contig_bytes = (sizeof(pdq_descriptor_block_t) * 2) - PDQ_OS_PAGESIZE;
1526    pdq_uint8_t *p;
1527#endif
1528    int idx;
1529
1530    PDQ_ASSERT(sizeof(pdq_descriptor_block_t) == 8192);
1531    PDQ_ASSERT(sizeof(pdq_consumer_block_t) == 64);
1532    PDQ_ASSERT(sizeof(pdq_response_filter_get_t) == PDQ_SIZE_RESPONSE_FILTER_GET);
1533    PDQ_ASSERT(sizeof(pdq_cmd_addr_filter_set_t) == PDQ_SIZE_CMD_ADDR_FILTER_SET);
1534    PDQ_ASSERT(sizeof(pdq_response_addr_filter_get_t) == PDQ_SIZE_RESPONSE_ADDR_FILTER_GET);
1535    PDQ_ASSERT(sizeof(pdq_response_status_chars_get_t) == PDQ_SIZE_RESPONSE_STATUS_CHARS_GET);
1536    PDQ_ASSERT(sizeof(pdq_response_fddi_mib_get_t) == PDQ_SIZE_RESPONSE_FDDI_MIB_GET);
1537    PDQ_ASSERT(sizeof(pdq_response_dec_ext_mib_get_t) == PDQ_SIZE_RESPONSE_DEC_EXT_MIB_GET);
1538    PDQ_ASSERT(sizeof(pdq_unsolicited_event_t) == 512);
1539
1540    pdq = (pdq_t *) PDQ_OS_MEMALLOC(sizeof(pdq_t));
1541    if (pdq == NULL) {
1542	PDQ_PRINTF(("malloc(%d) failed\n", sizeof(*pdq)));
1543	return NULL;
1544    }
1545    PDQ_OS_MEMZERO(pdq, sizeof(pdq_t));
1546    pdq->pdq_type = type;
1547    pdq->pdq_unit = unit;
1548    pdq->pdq_os_ctx = (void *) ctx;
1549    pdq->pdq_os_name = name;
1550    pdq->pdq_flags = PDQ_PRINTCHARS;
1551    /*
1552     * Allocate the additional data structures required by
1553     * the PDQ driver.  Allocate a contiguous region of memory
1554     * for the descriptor block.  We need to allocated enough
1555     * to guarantee that we will a get 8KB block of memory aligned
1556     * on a 8KB boundary.  This turns to require that we allocate
1557     * (N*2 - 1 page) pages of memory.  On machine with less than
1558     * a 8KB page size, it mean we will allocate more memory than
1559     * we need.  The extra will be used for the unsolicited event
1560     * buffers (though on machines with 8KB pages we will to allocate
1561     * them separately since there will be nothing left overs.)
1562     */
1563#if defined(PDQ_OS_MEMALLOC_CONTIG)
1564    p = (pdq_uint8_t *) PDQ_OS_MEMALLOC_CONTIG(contig_bytes);
1565
1566    if (p == NULL)
1567	printf("%s() - PDQ_OS_MEMALLOC_CONTIG() failed!\n", __func__);
1568
1569    if (p != NULL) {
1570	pdq_physaddr_t physaddr = PDQ_OS_VA_TO_BUSPA(pdq, p);
1571	/*
1572	 * Assert that we really got contiguous memory.  This isn't really
1573	 * needed on systems that actually have physical contiguous allocation
1574	 * routines, but on those systems that don't ...
1575	 */
1576	for (idx = PDQ_OS_PAGESIZE; idx < 0x2000; idx += PDQ_OS_PAGESIZE) {
1577	    if (PDQ_OS_VA_TO_BUSPA(pdq, p + idx) - physaddr != idx)
1578		goto cleanup_and_return;
1579	}
1580	if (physaddr & 0x1FFF) {
1581	    pdq->pdq_unsolicited_info.ui_events = (pdq_unsolicited_event_t *) p;
1582	    pdq->pdq_unsolicited_info.ui_pa_bufstart = physaddr;
1583	    pdq->pdq_dbp = (pdq_descriptor_block_t *) &p[0x2000 - (physaddr & 0x1FFF)];
1584	    pdq->pdq_pa_descriptor_block = physaddr & ~0x1FFFUL;
1585	} else {
1586	    pdq->pdq_dbp = (pdq_descriptor_block_t *) p;
1587	    pdq->pdq_pa_descriptor_block = physaddr;
1588	    pdq->pdq_unsolicited_info.ui_events = (pdq_unsolicited_event_t *) &p[0x2000];
1589	    pdq->pdq_unsolicited_info.ui_pa_bufstart = physaddr + 0x2000;
1590	}
1591    }
1592    pdq->pdq_cbp = (volatile pdq_consumer_block_t *) &pdq->pdq_dbp->pdqdb_consumer;
1593    pdq->pdq_pa_consumer_block = PDQ_DB_BUSPA(pdq, pdq->pdq_cbp);
1594    if (contig_bytes == sizeof(pdq_descriptor_block_t)) {
1595	pdq->pdq_unsolicited_info.ui_events =
1596	    (pdq_unsolicited_event_t *) PDQ_OS_MEMALLOC(
1597		PDQ_NUM_UNSOLICITED_EVENTS * sizeof(pdq_unsolicited_event_t));
1598    }
1599#else
1600    if (pdq_os_memalloc_contig(pdq))
1601	goto cleanup_and_return;
1602#endif
1603
1604    /*
1605     * Make sure everything got allocated.  If not, free what did
1606     * get allocated and return.
1607     */
1608    if (pdq->pdq_dbp == NULL || pdq->pdq_unsolicited_info.ui_events == NULL) {
1609      cleanup_and_return:
1610#ifdef PDQ_OS_MEMFREE_CONTIG
1611	if (p /* pdq->pdq_dbp */ != NULL)
1612	    PDQ_OS_MEMFREE_CONTIG(p /* pdq->pdq_dbp */, contig_bytes);
1613	if (contig_bytes == sizeof(pdq_descriptor_block_t) && pdq->pdq_unsolicited_info.ui_events != NULL)
1614	    PDQ_OS_MEMFREE(pdq->pdq_unsolicited_info.ui_events,
1615			   PDQ_NUM_UNSOLICITED_EVENTS * sizeof(pdq_unsolicited_event_t));
1616#endif
1617	PDQ_OS_MEMFREE(pdq, sizeof(pdq_t));
1618	return NULL;
1619    }
1620    dbp = pdq->pdq_dbp;
1621
1622    PDQ_PRINTF(("\nPDQ Descriptor Block = " PDQ_OS_PTR_FMT " (PA = 0x%x)\n", dbp, pdq->pdq_pa_descriptor_block));
1623    PDQ_PRINTF(("    Receive Queue          = " PDQ_OS_PTR_FMT "\n", dbp->pdqdb_receives));
1624    PDQ_PRINTF(("    Transmit Queue         = " PDQ_OS_PTR_FMT "\n", dbp->pdqdb_transmits));
1625    PDQ_PRINTF(("    Host SMT Queue         = " PDQ_OS_PTR_FMT "\n", dbp->pdqdb_host_smt));
1626    PDQ_PRINTF(("    Command Response Queue = " PDQ_OS_PTR_FMT "\n", dbp->pdqdb_command_responses));
1627    PDQ_PRINTF(("    Command Request Queue  = " PDQ_OS_PTR_FMT "\n", dbp->pdqdb_command_requests));
1628    PDQ_PRINTF(("PDQ Consumer Block = " PDQ_OS_PTR_FMT "\n", pdq->pdq_cbp));
1629
1630    /*
1631     * Zero out the descriptor block.  Not really required but
1632     * it pays to be neat.  This will also zero out the consumer
1633     * block, command pool, and buffer pointers for the receive
1634     * host_smt rings.
1635     */
1636    PDQ_OS_MEMZERO(dbp, sizeof(*dbp));
1637
1638    /*
1639     * Initialize the CSR references.
1640     * the DEFAA (FutureBus+) skips a longword between registers
1641     */
1642    pdq_init_csrs(&pdq->pdq_csrs, bus, csr_base, pdq->pdq_type == PDQ_DEFAA ? 2 : 1);
1643    if (pdq->pdq_type == PDQ_DEFPA)
1644	pdq_init_pci_csrs(&pdq->pdq_pci_csrs, bus, csr_base, 1);
1645
1646    PDQ_PRINTF(("PDQ CSRs: BASE = " PDQ_OS_CSR_FMT "\n", pdq->pdq_csrs.csr_base));
1647    PDQ_PRINTF(("    Port Reset                = " PDQ_OS_CSR_FMT " [0x%08x]\n",
1648	   pdq->pdq_csrs.csr_port_reset, PDQ_CSR_READ(&pdq->pdq_csrs, csr_port_reset)));
1649    PDQ_PRINTF(("    Host Data                 = " PDQ_OS_CSR_FMT " [0x%08x]\n",
1650	   pdq->pdq_csrs.csr_host_data, PDQ_CSR_READ(&pdq->pdq_csrs, csr_host_data)));
1651    PDQ_PRINTF(("    Port Control              = " PDQ_OS_CSR_FMT " [0x%08x]\n",
1652	   pdq->pdq_csrs.csr_port_control, PDQ_CSR_READ(&pdq->pdq_csrs, csr_port_control)));
1653    PDQ_PRINTF(("    Port Data A               = " PDQ_OS_CSR_FMT " [0x%08x]\n",
1654	   pdq->pdq_csrs.csr_port_data_a, PDQ_CSR_READ(&pdq->pdq_csrs, csr_port_data_a)));
1655    PDQ_PRINTF(("    Port Data B               = " PDQ_OS_CSR_FMT " [0x%08x]\n",
1656	   pdq->pdq_csrs.csr_port_data_b, PDQ_CSR_READ(&pdq->pdq_csrs, csr_port_data_b)));
1657    PDQ_PRINTF(("    Port Status               = " PDQ_OS_CSR_FMT " [0x%08x]\n",
1658	   pdq->pdq_csrs.csr_port_status, PDQ_CSR_READ(&pdq->pdq_csrs, csr_port_status)));
1659    PDQ_PRINTF(("    Host Int Type 0           = " PDQ_OS_CSR_FMT " [0x%08x]\n",
1660	   pdq->pdq_csrs.csr_host_int_type_0, PDQ_CSR_READ(&pdq->pdq_csrs, csr_host_int_type_0)));
1661    PDQ_PRINTF(("    Host Int Enable           = " PDQ_OS_CSR_FMT " [0x%08x]\n",
1662	   pdq->pdq_csrs.csr_host_int_enable, PDQ_CSR_READ(&pdq->pdq_csrs, csr_host_int_enable)));
1663    PDQ_PRINTF(("    Type 2 Producer           = " PDQ_OS_CSR_FMT " [0x%08x]\n",
1664	   pdq->pdq_csrs.csr_type_2_producer, PDQ_CSR_READ(&pdq->pdq_csrs, csr_type_2_producer)));
1665    PDQ_PRINTF(("    Command Response Producer = " PDQ_OS_CSR_FMT " [0x%08x]\n",
1666	   pdq->pdq_csrs.csr_cmd_response_producer, PDQ_CSR_READ(&pdq->pdq_csrs, csr_cmd_response_producer)));
1667    PDQ_PRINTF(("    Command Request Producer  = " PDQ_OS_CSR_FMT " [0x%08x]\n",
1668	   pdq->pdq_csrs.csr_cmd_request_producer, PDQ_CSR_READ(&pdq->pdq_csrs, csr_cmd_request_producer)));
1669    PDQ_PRINTF(("    Host SMT Producer         = " PDQ_OS_CSR_FMT " [0x%08x]\n",
1670	   pdq->pdq_csrs.csr_host_smt_producer, PDQ_CSR_READ(&pdq->pdq_csrs, csr_host_smt_producer)));
1671    PDQ_PRINTF(("    Unsolicited Producer      = " PDQ_OS_CSR_FMT " [0x%08x]\n",
1672	   pdq->pdq_csrs.csr_unsolicited_producer, PDQ_CSR_READ(&pdq->pdq_csrs, csr_unsolicited_producer)));
1673
1674    /*
1675     * Initialize the command information block
1676     */
1677    pdq->pdq_command_info.ci_request_bufstart = dbp->pdqdb_cmd_request_buf;
1678    pdq->pdq_command_info.ci_pa_request_bufstart = PDQ_DB_BUSPA(pdq, pdq->pdq_command_info.ci_request_bufstart);
1679    pdq->pdq_command_info.ci_pa_request_descriptors = PDQ_DB_BUSPA(pdq, dbp->pdqdb_command_requests);
1680    PDQ_PRINTF(("PDQ Command Request Buffer = " PDQ_OS_PTR_FMT " (PA=0x%x)\n",
1681		pdq->pdq_command_info.ci_request_bufstart,
1682		pdq->pdq_command_info.ci_pa_request_bufstart));
1683    for (idx = 0; idx < sizeof(dbp->pdqdb_command_requests)/sizeof(dbp->pdqdb_command_requests[0]); idx++) {
1684	pdq_txdesc_t *txd = &dbp->pdqdb_command_requests[idx];
1685
1686	txd->txd_pa_lo = pdq->pdq_command_info.ci_pa_request_bufstart;
1687	txd->txd_eop = txd->txd_sop = 1;
1688	txd->txd_pa_hi = 0;
1689    }
1690    PDQ_OS_DESC_PRESYNC(pdq, dbp->pdqdb_command_requests,
1691			sizeof(dbp->pdqdb_command_requests));
1692
1693    pdq->pdq_command_info.ci_response_bufstart = dbp->pdqdb_cmd_response_buf;
1694    pdq->pdq_command_info.ci_pa_response_bufstart = PDQ_DB_BUSPA(pdq, pdq->pdq_command_info.ci_response_bufstart);
1695    pdq->pdq_command_info.ci_pa_response_descriptors = PDQ_DB_BUSPA(pdq, dbp->pdqdb_command_responses);
1696    PDQ_PRINTF(("PDQ Command Response Buffer = " PDQ_OS_PTR_FMT " (PA=0x%x)\n",
1697		pdq->pdq_command_info.ci_response_bufstart,
1698		pdq->pdq_command_info.ci_pa_response_bufstart));
1699    for (idx = 0; idx < sizeof(dbp->pdqdb_command_responses)/sizeof(dbp->pdqdb_command_responses[0]); idx++) {
1700	pdq_rxdesc_t *rxd = &dbp->pdqdb_command_responses[idx];
1701
1702	rxd->rxd_pa_lo = pdq->pdq_command_info.ci_pa_response_bufstart;
1703	rxd->rxd_sop = 1;
1704	rxd->rxd_seg_cnt = 0;
1705	rxd->rxd_seg_len_lo = 0;
1706	rxd->rxd_seg_len_hi = PDQ_SIZE_COMMAND_RESPONSE / 16;
1707    }
1708    PDQ_OS_DESC_PRESYNC(pdq, dbp->pdqdb_command_responses,
1709			sizeof(dbp->pdqdb_command_responses));
1710
1711    /*
1712     * Initialize the unsolicited event information block
1713     */
1714    pdq->pdq_unsolicited_info.ui_free = PDQ_NUM_UNSOLICITED_EVENTS;
1715    pdq->pdq_unsolicited_info.ui_pa_descriptors = PDQ_DB_BUSPA(pdq, dbp->pdqdb_unsolicited_events);
1716    PDQ_PRINTF(("PDQ Unsolicit Event Buffer = " PDQ_OS_PTR_FMT " (PA=0x%x)\n",
1717		pdq->pdq_unsolicited_info.ui_events,
1718		pdq->pdq_unsolicited_info.ui_pa_bufstart));
1719    for (idx = 0; idx < sizeof(dbp->pdqdb_unsolicited_events)/sizeof(dbp->pdqdb_unsolicited_events[0]); idx++) {
1720	pdq_rxdesc_t *rxd = &dbp->pdqdb_unsolicited_events[idx];
1721	pdq_unsolicited_event_t *event = &pdq->pdq_unsolicited_info.ui_events[idx & (PDQ_NUM_UNSOLICITED_EVENTS-1)];
1722
1723	rxd->rxd_sop = 1;
1724	rxd->rxd_seg_cnt = 0;
1725	rxd->rxd_seg_len_hi = sizeof(pdq_unsolicited_event_t) / 16;
1726	rxd->rxd_pa_lo = pdq->pdq_unsolicited_info.ui_pa_bufstart + (const pdq_uint8_t *) event
1727	    - (const pdq_uint8_t *) pdq->pdq_unsolicited_info.ui_events;
1728	rxd->rxd_pa_hi = 0;
1729	PDQ_OS_UNSOL_EVENT_PRESYNC(pdq, event);
1730    }
1731    PDQ_OS_DESC_PRESYNC(pdq, dbp->pdqdb_unsolicited_events,
1732			sizeof(dbp->pdqdb_unsolicited_events));
1733
1734    /*
1735     * Initialize the receive information blocks (normal and SMT).
1736     */
1737    pdq->pdq_rx_info.rx_buffers = pdq->pdq_receive_buffers;
1738    pdq->pdq_rx_info.rx_free = PDQ_RING_MASK(dbp->pdqdb_receives);
1739    pdq->pdq_rx_info.rx_target = pdq->pdq_rx_info.rx_free - PDQ_RX_SEGCNT * 8;
1740    pdq->pdq_rx_info.rx_pa_descriptors = PDQ_DB_BUSPA(pdq, dbp->pdqdb_receives);
1741
1742    pdq->pdq_host_smt_info.rx_buffers = pdq->pdq_host_smt_buffers;
1743    pdq->pdq_host_smt_info.rx_free = PDQ_RING_MASK(dbp->pdqdb_host_smt);
1744    pdq->pdq_host_smt_info.rx_target = pdq->pdq_host_smt_info.rx_free - PDQ_RX_SEGCNT * 3;
1745    pdq->pdq_host_smt_info.rx_pa_descriptors = PDQ_DB_BUSPA(pdq, dbp->pdqdb_host_smt);
1746
1747    /*
1748     * Initialize the transmit information block.
1749     */
1750    dbp->pdqdb_tx_hdr[0] = PDQ_FDDI_PH0;
1751    dbp->pdqdb_tx_hdr[1] = PDQ_FDDI_PH1;
1752    dbp->pdqdb_tx_hdr[2] = PDQ_FDDI_PH2;
1753    pdq->pdq_tx_info.tx_free = PDQ_RING_MASK(dbp->pdqdb_transmits);
1754    pdq->pdq_tx_info.tx_hdrdesc.txd_seg_len = 3;
1755    pdq->pdq_tx_info.tx_hdrdesc.txd_sop = 1;
1756    pdq->pdq_tx_info.tx_hdrdesc.txd_pa_lo = PDQ_DB_BUSPA(pdq, dbp->pdqdb_tx_hdr);
1757    pdq->pdq_tx_info.tx_pa_descriptors = PDQ_DB_BUSPA(pdq, dbp->pdqdb_transmits);
1758
1759    state = PDQ_PSTS_ADAPTER_STATE(PDQ_CSR_READ(&pdq->pdq_csrs, csr_port_status));
1760    PDQ_PRINTF(("PDQ Adapter State = %s\n", pdq_adapter_states[state]));
1761
1762    /*
1763     * Stop the PDQ if it is running and put it into a known state.
1764     */
1765    state = pdq_stop(pdq);
1766
1767    PDQ_PRINTF(("PDQ Adapter State = %s\n", pdq_adapter_states[state]));
1768    PDQ_ASSERT(state == PDQS_DMA_AVAILABLE);
1769    /*
1770     * If the adapter is not the state we expect, then the initialization
1771     * failed.  Cleanup and exit.
1772     */
1773#if defined(PDQVERBOSE)
1774    if (state == PDQS_HALTED) {
1775	pdq_halt_code_t halt_code = PDQ_PSTS_HALT_ID(PDQ_CSR_READ(&pdq->pdq_csrs, csr_port_status));
1776	printf("Halt code = %d (%s)\n", halt_code, pdq_halt_codes[halt_code]);
1777	if (halt_code == PDQH_DMA_ERROR && pdq->pdq_type == PDQ_DEFPA)
1778	    PDQ_PRINTF(("PFI status = 0x%x, Host 0 Fatal Interrupt = 0x%x\n",
1779		       PDQ_CSR_READ(&pdq->pdq_pci_csrs, csr_pfi_status),
1780		       PDQ_CSR_READ(&pdq->pdq_csrs, csr_host_int_type_0) & PDQ_HOST_INT_FATAL_ERROR));
1781    }
1782#endif
1783    if (state == PDQS_RESET || state == PDQS_HALTED || state == PDQS_UPGRADE)
1784	goto cleanup_and_return;
1785
1786    PDQ_PRINTF(("PDQ Hardware Address = %02x-%02x-%02x-%02x-%02x-%02x\n",
1787	   pdq->pdq_hwaddr.lanaddr_bytes[0], pdq->pdq_hwaddr.lanaddr_bytes[1],
1788	   pdq->pdq_hwaddr.lanaddr_bytes[2], pdq->pdq_hwaddr.lanaddr_bytes[3],
1789	   pdq->pdq_hwaddr.lanaddr_bytes[4], pdq->pdq_hwaddr.lanaddr_bytes[5]));
1790    PDQ_PRINTF(("PDQ Firmware Revision = %c%c%c%c\n",
1791	   pdq->pdq_fwrev.fwrev_bytes[0], pdq->pdq_fwrev.fwrev_bytes[1],
1792	   pdq->pdq_fwrev.fwrev_bytes[2], pdq->pdq_fwrev.fwrev_bytes[3]));
1793    PDQ_PRINTF(("PDQ Chip Revision = "));
1794    switch (pdq->pdq_chip_rev) {
1795	case PDQ_CHIP_REV_A_B_OR_C: PDQ_PRINTF(("Rev C or below")); break;
1796	case PDQ_CHIP_REV_D: PDQ_PRINTF(("Rev D")); break;
1797	case PDQ_CHIP_REV_E: PDQ_PRINTF(("Rev E")); break;
1798	default: PDQ_PRINTF(("Unknown Rev %d", (int) pdq->pdq_chip_rev));
1799    }
1800    PDQ_PRINTF(("\n"));
1801
1802    return pdq;
1803}
1804