Deleted Added
full compact
aic7xxx.c (74434) aic7xxx.c (74972)
1/*
2 * Core routines and tables shareable across OS platforms.
3 *
4 * Copyright (c) 1994-2001 Justin T. Gibbs.
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions, and the following disclaimer,
12 * without modification.
13 * 2. The name of the author may not be used to endorse or promote products
14 * derived from this software without specific prior written permission.
15 *
16 * Alternatively, this software may be distributed under the terms of the
17 * GNU Public License ("GPL").
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
23 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 * SUCH DAMAGE.
30 *
1/*
2 * Core routines and tables shareable across OS platforms.
3 *
4 * Copyright (c) 1994-2001 Justin T. Gibbs.
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions, and the following disclaimer,
12 * without modification.
13 * 2. The name of the author may not be used to endorse or promote products
14 * derived from this software without specific prior written permission.
15 *
16 * Alternatively, this software may be distributed under the terms of the
17 * GNU Public License ("GPL").
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
23 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 * SUCH DAMAGE.
30 *
31 * $Id: //depot/src/aic7xxx/aic7xxx.c#34 $
31 * $Id: //depot/src/aic7xxx/aic7xxx.c#35 $
32 *
32 *
33 * $FreeBSD: head/sys/dev/aic7xxx/aic7xxx.c 74434 2001-03-19 04:40:35Z gibbs $
33 * $FreeBSD: head/sys/dev/aic7xxx/aic7xxx.c 74972 2001-03-29 00:36:35Z gibbs $
34 */
35
36#ifdef __linux__
37#include "aic7xxx_linux.h"
38#include "aic7xxx_inline.h"
39#include "aicasm/aicasm_insformat.h"
40#endif
41
42#ifdef __FreeBSD__
43#include <dev/aic7xxx/aic7xxx_freebsd.h>
44#include <dev/aic7xxx/aic7xxx_inline.h>
45#include <dev/aic7xxx/aicasm/aicasm_insformat.h>
46#endif
47
48/****************************** Softc Data ************************************/
49struct ahc_softc_tailq ahc_tailq = TAILQ_HEAD_INITIALIZER(ahc_tailq);
50
51/***************************** Lookup Tables **********************************/
52char *ahc_chip_names[] =
53{
54 "NONE",
55 "aic7770",
56 "aic7850",
57 "aic7855",
58 "aic7859",
59 "aic7860",
60 "aic7870",
61 "aic7880",
62 "aic7895",
63 "aic7895C",
64 "aic7890/91",
65 "aic7896/97",
66 "aic7892",
67 "aic7899"
68};
69static const u_int num_chip_names = NUM_ELEMENTS(ahc_chip_names);
70
71/*
72 * Hardware error codes.
73 */
74struct ahc_hard_error_entry {
75 uint8_t errno;
76 char *errmesg;
77};
78
79static struct ahc_hard_error_entry ahc_hard_errors[] = {
80 { ILLHADDR, "Illegal Host Access" },
81 { ILLSADDR, "Illegal Sequencer Address referrenced" },
82 { ILLOPCODE, "Illegal Opcode in sequencer program" },
83 { SQPARERR, "Sequencer Parity Error" },
84 { DPARERR, "Data-path Parity Error" },
85 { MPARERR, "Scratch or SCB Memory Parity Error" },
86 { PCIERRSTAT, "PCI Error detected" },
87 { CIOPARERR, "CIOBUS Parity Error" },
88};
89static const u_int num_errors = NUM_ELEMENTS(ahc_hard_errors);
90
91static struct ahc_phase_table_entry ahc_phase_table[] =
92{
93 { P_DATAOUT, MSG_NOOP, "in Data-out phase" },
94 { P_DATAIN, MSG_INITIATOR_DET_ERR, "in Data-in phase" },
95 { P_DATAOUT_DT, MSG_NOOP, "in DT Data-out phase" },
96 { P_DATAIN_DT, MSG_INITIATOR_DET_ERR, "in DT Data-in phase" },
97 { P_COMMAND, MSG_NOOP, "in Command phase" },
98 { P_MESGOUT, MSG_NOOP, "in Message-out phase" },
99 { P_STATUS, MSG_INITIATOR_DET_ERR, "in Status phase" },
100 { P_MESGIN, MSG_PARITY_ERROR, "in Message-in phase" },
101 { P_BUSFREE, MSG_NOOP, "while idle" },
102 { 0, MSG_NOOP, "in unknown phase" }
103};
104
105/*
106 * In most cases we only wish to itterate over real phases, so
107 * exclude the last element from the count.
108 */
109static const u_int num_phases = NUM_ELEMENTS(ahc_phase_table) - 1;
110
111/*
112 * Valid SCSIRATE values. (p. 3-17)
113 * Provides a mapping of tranfer periods in ns to the proper value to
114 * stick in the scsixfer reg.
115 */
116static struct ahc_syncrate ahc_syncrates[] =
117{
118 /* ultra2 fast/ultra period rate */
119 { 0x42, 0x000, 9, "80.0" },
120 { 0x03, 0x000, 10, "40.0" },
121 { 0x04, 0x000, 11, "33.0" },
122 { 0x05, 0x100, 12, "20.0" },
123 { 0x06, 0x110, 15, "16.0" },
124 { 0x07, 0x120, 18, "13.4" },
125 { 0x08, 0x000, 25, "10.0" },
126 { 0x19, 0x010, 31, "8.0" },
127 { 0x1a, 0x020, 37, "6.67" },
128 { 0x1b, 0x030, 43, "5.7" },
129 { 0x1c, 0x040, 50, "5.0" },
130 { 0x00, 0x050, 56, "4.4" },
131 { 0x00, 0x060, 62, "4.0" },
132 { 0x00, 0x070, 68, "3.6" },
133 { 0x00, 0x000, 0, NULL }
134};
135
136/* Our Sequencer Program */
137#include "aic7xxx_seq.h"
138
139/**************************** Function Declarations ***************************/
34 */
35
36#ifdef __linux__
37#include "aic7xxx_linux.h"
38#include "aic7xxx_inline.h"
39#include "aicasm/aicasm_insformat.h"
40#endif
41
42#ifdef __FreeBSD__
43#include <dev/aic7xxx/aic7xxx_freebsd.h>
44#include <dev/aic7xxx/aic7xxx_inline.h>
45#include <dev/aic7xxx/aicasm/aicasm_insformat.h>
46#endif
47
48/****************************** Softc Data ************************************/
49struct ahc_softc_tailq ahc_tailq = TAILQ_HEAD_INITIALIZER(ahc_tailq);
50
51/***************************** Lookup Tables **********************************/
52char *ahc_chip_names[] =
53{
54 "NONE",
55 "aic7770",
56 "aic7850",
57 "aic7855",
58 "aic7859",
59 "aic7860",
60 "aic7870",
61 "aic7880",
62 "aic7895",
63 "aic7895C",
64 "aic7890/91",
65 "aic7896/97",
66 "aic7892",
67 "aic7899"
68};
69static const u_int num_chip_names = NUM_ELEMENTS(ahc_chip_names);
70
71/*
72 * Hardware error codes.
73 */
74struct ahc_hard_error_entry {
75 uint8_t errno;
76 char *errmesg;
77};
78
79static struct ahc_hard_error_entry ahc_hard_errors[] = {
80 { ILLHADDR, "Illegal Host Access" },
81 { ILLSADDR, "Illegal Sequencer Address referrenced" },
82 { ILLOPCODE, "Illegal Opcode in sequencer program" },
83 { SQPARERR, "Sequencer Parity Error" },
84 { DPARERR, "Data-path Parity Error" },
85 { MPARERR, "Scratch or SCB Memory Parity Error" },
86 { PCIERRSTAT, "PCI Error detected" },
87 { CIOPARERR, "CIOBUS Parity Error" },
88};
89static const u_int num_errors = NUM_ELEMENTS(ahc_hard_errors);
90
91static struct ahc_phase_table_entry ahc_phase_table[] =
92{
93 { P_DATAOUT, MSG_NOOP, "in Data-out phase" },
94 { P_DATAIN, MSG_INITIATOR_DET_ERR, "in Data-in phase" },
95 { P_DATAOUT_DT, MSG_NOOP, "in DT Data-out phase" },
96 { P_DATAIN_DT, MSG_INITIATOR_DET_ERR, "in DT Data-in phase" },
97 { P_COMMAND, MSG_NOOP, "in Command phase" },
98 { P_MESGOUT, MSG_NOOP, "in Message-out phase" },
99 { P_STATUS, MSG_INITIATOR_DET_ERR, "in Status phase" },
100 { P_MESGIN, MSG_PARITY_ERROR, "in Message-in phase" },
101 { P_BUSFREE, MSG_NOOP, "while idle" },
102 { 0, MSG_NOOP, "in unknown phase" }
103};
104
105/*
106 * In most cases we only wish to itterate over real phases, so
107 * exclude the last element from the count.
108 */
109static const u_int num_phases = NUM_ELEMENTS(ahc_phase_table) - 1;
110
111/*
112 * Valid SCSIRATE values. (p. 3-17)
113 * Provides a mapping of tranfer periods in ns to the proper value to
114 * stick in the scsixfer reg.
115 */
116static struct ahc_syncrate ahc_syncrates[] =
117{
118 /* ultra2 fast/ultra period rate */
119 { 0x42, 0x000, 9, "80.0" },
120 { 0x03, 0x000, 10, "40.0" },
121 { 0x04, 0x000, 11, "33.0" },
122 { 0x05, 0x100, 12, "20.0" },
123 { 0x06, 0x110, 15, "16.0" },
124 { 0x07, 0x120, 18, "13.4" },
125 { 0x08, 0x000, 25, "10.0" },
126 { 0x19, 0x010, 31, "8.0" },
127 { 0x1a, 0x020, 37, "6.67" },
128 { 0x1b, 0x030, 43, "5.7" },
129 { 0x1c, 0x040, 50, "5.0" },
130 { 0x00, 0x050, 56, "4.4" },
131 { 0x00, 0x060, 62, "4.0" },
132 { 0x00, 0x070, 68, "3.6" },
133 { 0x00, 0x000, 0, NULL }
134};
135
136/* Our Sequencer Program */
137#include "aic7xxx_seq.h"
138
139/**************************** Function Declarations ***************************/
140static struct tmode_tstate*
140static struct ahc_tmode_tstate*
141 ahc_alloc_tstate(struct ahc_softc *ahc,
142 u_int scsi_id, char channel);
143#ifdef AHC_TARGET_MODE
144static void ahc_free_tstate(struct ahc_softc *ahc,
145 u_int scsi_id, char channel, int force);
146#endif
147static struct ahc_syncrate*
148 ahc_devlimited_syncrate(struct ahc_softc *ahc,
149 struct ahc_initiator_tinfo *,
150 u_int *period,
151 u_int *ppr_options,
152 role_t role);
141 ahc_alloc_tstate(struct ahc_softc *ahc,
142 u_int scsi_id, char channel);
143#ifdef AHC_TARGET_MODE
144static void ahc_free_tstate(struct ahc_softc *ahc,
145 u_int scsi_id, char channel, int force);
146#endif
147static struct ahc_syncrate*
148 ahc_devlimited_syncrate(struct ahc_softc *ahc,
149 struct ahc_initiator_tinfo *,
150 u_int *period,
151 u_int *ppr_options,
152 role_t role);
153static void ahc_update_pending_syncrates(struct ahc_softc *ahc);
153static void ahc_update_pending_scbs(struct ahc_softc *ahc);
154static void ahc_fetch_devinfo(struct ahc_softc *ahc,
155 struct ahc_devinfo *devinfo);
156static void ahc_scb_devinfo(struct ahc_softc *ahc,
157 struct ahc_devinfo *devinfo,
158 struct scb *scb);
159static void ahc_setup_initiator_msgout(struct ahc_softc *ahc,
160 struct ahc_devinfo *devinfo,
161 struct scb *scb);
162static void ahc_build_transfer_msg(struct ahc_softc *ahc,
163 struct ahc_devinfo *devinfo);
164static void ahc_construct_sdtr(struct ahc_softc *ahc,
165 struct ahc_devinfo *devinfo,
166 u_int period, u_int offset);
167static void ahc_construct_wdtr(struct ahc_softc *ahc,
168 struct ahc_devinfo *devinfo,
169 u_int bus_width);
170static void ahc_construct_ppr(struct ahc_softc *ahc,
171 struct ahc_devinfo *devinfo,
172 u_int period, u_int offset,
173 u_int bus_width, u_int ppr_options);
174static void ahc_clear_msg_state(struct ahc_softc *ahc);
175static void ahc_handle_message_phase(struct ahc_softc *ahc);
176typedef enum {
177 AHCMSG_1B,
178 AHCMSG_2B,
179 AHCMSG_EXT
180} ahc_msgtype;
181static int ahc_sent_msg(struct ahc_softc *ahc, ahc_msgtype type,
182 u_int msgval, int full);
183static int ahc_parse_msg(struct ahc_softc *ahc,
184 struct ahc_devinfo *devinfo);
185static int ahc_handle_msg_reject(struct ahc_softc *ahc,
186 struct ahc_devinfo *devinfo);
187static void ahc_handle_ign_wide_residue(struct ahc_softc *ahc,
188 struct ahc_devinfo *devinfo);
189static void ahc_handle_devreset(struct ahc_softc *ahc,
190 struct ahc_devinfo *devinfo,
191 cam_status status, char *message,
192 int verbose_level);
154static void ahc_fetch_devinfo(struct ahc_softc *ahc,
155 struct ahc_devinfo *devinfo);
156static void ahc_scb_devinfo(struct ahc_softc *ahc,
157 struct ahc_devinfo *devinfo,
158 struct scb *scb);
159static void ahc_setup_initiator_msgout(struct ahc_softc *ahc,
160 struct ahc_devinfo *devinfo,
161 struct scb *scb);
162static void ahc_build_transfer_msg(struct ahc_softc *ahc,
163 struct ahc_devinfo *devinfo);
164static void ahc_construct_sdtr(struct ahc_softc *ahc,
165 struct ahc_devinfo *devinfo,
166 u_int period, u_int offset);
167static void ahc_construct_wdtr(struct ahc_softc *ahc,
168 struct ahc_devinfo *devinfo,
169 u_int bus_width);
170static void ahc_construct_ppr(struct ahc_softc *ahc,
171 struct ahc_devinfo *devinfo,
172 u_int period, u_int offset,
173 u_int bus_width, u_int ppr_options);
174static void ahc_clear_msg_state(struct ahc_softc *ahc);
175static void ahc_handle_message_phase(struct ahc_softc *ahc);
176typedef enum {
177 AHCMSG_1B,
178 AHCMSG_2B,
179 AHCMSG_EXT
180} ahc_msgtype;
181static int ahc_sent_msg(struct ahc_softc *ahc, ahc_msgtype type,
182 u_int msgval, int full);
183static int ahc_parse_msg(struct ahc_softc *ahc,
184 struct ahc_devinfo *devinfo);
185static int ahc_handle_msg_reject(struct ahc_softc *ahc,
186 struct ahc_devinfo *devinfo);
187static void ahc_handle_ign_wide_residue(struct ahc_softc *ahc,
188 struct ahc_devinfo *devinfo);
189static void ahc_handle_devreset(struct ahc_softc *ahc,
190 struct ahc_devinfo *devinfo,
191 cam_status status, char *message,
192 int verbose_level);
193static void ahc_setup_target_msgin(struct ahc_softc *ahc,
194 struct ahc_devinfo *devinfo,
195 struct scb *scb);
193
194static bus_dmamap_callback_t ahc_dmamap_cb;
195static void ahc_build_free_scb_list(struct ahc_softc *ahc);
196static int ahc_init_scbdata(struct ahc_softc *ahc);
197static void ahc_fini_scbdata(struct ahc_softc *ahc);
198static void ahc_qinfifo_requeue(struct ahc_softc *ahc,
199 struct scb *prev_scb,
200 struct scb *scb);
201static int ahc_qinfifo_count(struct ahc_softc *ahc);
202static u_int ahc_rem_scb_from_disc_list(struct ahc_softc *ahc,
203 u_int prev, u_int scbptr);
204static void ahc_add_curscb_to_free_list(struct ahc_softc *ahc);
205static u_int ahc_rem_wscb(struct ahc_softc *ahc,
206 u_int scbpos, u_int prev);
207static int ahc_abort_scbs(struct ahc_softc *ahc, int target,
208 char channel, int lun, u_int tag,
209 role_t role, uint32_t status);
210static void ahc_reset_current_bus(struct ahc_softc *ahc);
211static void ahc_calc_residual(struct scb *scb);
212#ifdef AHC_DUMP_SEQ
213static void ahc_dumpseq(struct ahc_softc *ahc);
214#endif
215static void ahc_loadseq(struct ahc_softc *ahc);
216static int ahc_check_patch(struct ahc_softc *ahc,
217 struct patch **start_patch,
218 u_int start_instr, u_int *skip_addr);
219static void ahc_download_instr(struct ahc_softc *ahc,
220 u_int instrptr, uint8_t *dconsts);
221#ifdef AHC_TARGET_MODE
222static void ahc_queue_lstate_event(struct ahc_softc *ahc,
196
197static bus_dmamap_callback_t ahc_dmamap_cb;
198static void ahc_build_free_scb_list(struct ahc_softc *ahc);
199static int ahc_init_scbdata(struct ahc_softc *ahc);
200static void ahc_fini_scbdata(struct ahc_softc *ahc);
201static void ahc_qinfifo_requeue(struct ahc_softc *ahc,
202 struct scb *prev_scb,
203 struct scb *scb);
204static int ahc_qinfifo_count(struct ahc_softc *ahc);
205static u_int ahc_rem_scb_from_disc_list(struct ahc_softc *ahc,
206 u_int prev, u_int scbptr);
207static void ahc_add_curscb_to_free_list(struct ahc_softc *ahc);
208static u_int ahc_rem_wscb(struct ahc_softc *ahc,
209 u_int scbpos, u_int prev);
210static int ahc_abort_scbs(struct ahc_softc *ahc, int target,
211 char channel, int lun, u_int tag,
212 role_t role, uint32_t status);
213static void ahc_reset_current_bus(struct ahc_softc *ahc);
214static void ahc_calc_residual(struct scb *scb);
215#ifdef AHC_DUMP_SEQ
216static void ahc_dumpseq(struct ahc_softc *ahc);
217#endif
218static void ahc_loadseq(struct ahc_softc *ahc);
219static int ahc_check_patch(struct ahc_softc *ahc,
220 struct patch **start_patch,
221 u_int start_instr, u_int *skip_addr);
222static void ahc_download_instr(struct ahc_softc *ahc,
223 u_int instrptr, uint8_t *dconsts);
224#ifdef AHC_TARGET_MODE
225static void ahc_queue_lstate_event(struct ahc_softc *ahc,
223 struct tmode_lstate *lstate,
226 struct ahc_tmode_lstate *lstate,
224 u_int initiator_id,
225 u_int event_type,
226 u_int event_arg);
227static void ahc_update_scsiid(struct ahc_softc *ahc,
228 u_int targid_mask);
229static int ahc_handle_target_cmd(struct ahc_softc *ahc,
230 struct target_cmd *cmd);
231#endif
232/************************* Sequencer Execution Control ************************/
233/*
234 * Restart the sequencer program from address zero
235 */
236void
237ahc_restart(struct ahc_softc *ahc)
238{
239
240 ahc_pause(ahc);
241
242 ahc_outb(ahc, SCSISIGO, 0); /* De-assert BSY */
243 ahc_outb(ahc, MSG_OUT, MSG_NOOP); /* No message to send */
244 ahc_outb(ahc, SXFRCTL1, ahc_inb(ahc, SXFRCTL1) & ~BITBUCKET);
245
246 /*
247 * Ensure that the sequencer's idea of TQINPOS
248 * matches our own. The sequencer increments TQINPOS
249 * only after it sees a DMA complete and a reset could
250 * occur before the increment leaving the kernel to believe
251 * the command arrived but the sequencer to not.
252 */
253 ahc_outb(ahc, TQINPOS, ahc->tqinfifonext);
254
255 /* Always allow reselection */
256 ahc_outb(ahc, SCSISEQ,
257 ahc_inb(ahc, SCSISEQ_TEMPLATE) & (ENSELI|ENRSELI|ENAUTOATNP));
258 if ((ahc->features & AHC_CMD_CHAN) != 0) {
259 /* Ensure that no DMA operations are in progress */
260 ahc_outb(ahc, CCSCBCNT, 0);
261 ahc_outb(ahc, CCSGCTL, 0);
262 ahc_outb(ahc, CCSCBCTL, 0);
263 }
264 /*
265 * If we were in the process of DMA'ing SCB data into
266 * an SCB, replace that SCB on the free list. This prevents
267 * an SCB leak.
268 */
269 if ((ahc_inb(ahc, SEQ_FLAGS2) & SCB_DMA) != 0) {
270 ahc_add_curscb_to_free_list(ahc);
271 ahc_outb(ahc, SEQ_FLAGS2,
272 ahc_inb(ahc, SEQ_FLAGS2) & ~SCB_DMA);
273 }
274 ahc_outb(ahc, MWI_RESIDUAL, 0);
275 ahc_outb(ahc, SEQCTL, FASTMODE);
276 ahc_outb(ahc, SEQADDR0, 0);
277 ahc_outb(ahc, SEQADDR1, 0);
278 ahc_unpause(ahc);
279}
280
281/************************* Input/Output Queues ********************************/
282void
283ahc_run_qoutfifo(struct ahc_softc *ahc)
284{
285 struct scb *scb;
286 u_int scb_index;
287
288 while (ahc->qoutfifo[ahc->qoutfifonext] != SCB_LIST_NULL) {
289
290 scb_index = ahc->qoutfifo[ahc->qoutfifonext];
291 if ((ahc->qoutfifonext & 0x03) == 0x03) {
292 u_int modnext;
293
294 /*
295 * Clear 32bits of QOUTFIFO at a time
296 * so that we don't clobber an incomming
297 * byte DMA to the array on architectures
298 * that only support 32bit load and store
299 * operations.
300 */
301 modnext = ahc->qoutfifonext & ~0x3;
302 *((uint32_t *)(&ahc->qoutfifo[modnext])) = 0xFFFFFFFFUL;
303 }
304 ahc->qoutfifonext++;
305
306 scb = ahc_lookup_scb(ahc, scb_index);
307 if (scb == NULL) {
308 printf("%s: WARNING no command for scb %d "
309 "(cmdcmplt)\nQOUTPOS = %d\n",
310 ahc_name(ahc), scb_index,
311 ahc->qoutfifonext - 1);
312 continue;
313 }
314
315 /*
316 * Save off the residual
317 * if there is one.
318 */
319 if (ahc_check_residual(scb) != 0)
320 ahc_calc_residual(scb);
321 else
322 ahc_set_residual(scb, 0);
323 ahc_done(ahc, scb);
324 }
325}
326
327void
328ahc_run_untagged_queues(struct ahc_softc *ahc)
329{
330 int i;
331
332 for (i = 0; i < 16; i++)
333 ahc_run_untagged_queue(ahc, &ahc->untagged_queues[i]);
334}
335
336void
337ahc_run_untagged_queue(struct ahc_softc *ahc, struct scb_tailq *queue)
338{
339 struct scb *scb;
340
341 if (ahc->untagged_queue_lock != 0)
342 return;
343
344 if ((scb = TAILQ_FIRST(queue)) != NULL
345 && (scb->flags & SCB_ACTIVE) == 0) {
346 scb->flags |= SCB_ACTIVE;
347 ahc_queue_scb(ahc, scb);
348 }
349}
350
351/************************* Interrupt Handling *********************************/
352void
353ahc_handle_brkadrint(struct ahc_softc *ahc)
354{
355 /*
356 * We upset the sequencer :-(
357 * Lookup the error message
358 */
359 int i;
360 int error;
361
362 error = ahc_inb(ahc, ERROR);
363 for (i = 0; error != 1 && i < num_errors; i++)
364 error >>= 1;
365 printf("%s: brkadrint, %s at seqaddr = 0x%x\n",
366 ahc_name(ahc), ahc_hard_errors[i].errmesg,
367 ahc_inb(ahc, SEQADDR0) |
368 (ahc_inb(ahc, SEQADDR1) << 8));
369
370 ahc_dump_card_state(ahc);
371
372 /* Tell everyone that this HBA is no longer availible */
373 ahc_abort_scbs(ahc, CAM_TARGET_WILDCARD, ALL_CHANNELS,
374 CAM_LUN_WILDCARD, SCB_LIST_NULL, ROLE_UNKNOWN,
375 CAM_NO_HBA);
376
377 /* Disable all interrupt sources by resetting the controller */
378 ahc_shutdown(ahc);
379}
380
381void
382ahc_handle_seqint(struct ahc_softc *ahc, u_int intstat)
383{
384 struct scb *scb;
385 struct ahc_devinfo devinfo;
386
387 ahc_fetch_devinfo(ahc, &devinfo);
388
389 /*
390 * Clear the upper byte that holds SEQINT status
391 * codes and clear the SEQINT bit. We will unpause
392 * the sequencer, if appropriate, after servicing
393 * the request.
394 */
395 ahc_outb(ahc, CLRINT, CLRSEQINT);
396 switch (intstat & SEQINT_MASK) {
397 case BAD_STATUS:
398 {
399 u_int scb_index;
400 struct hardware_scb *hscb;
401
402 /*
403 * Set the default return value to 0 (don't
404 * send sense). The sense code will change
405 * this if needed.
406 */
407 ahc_outb(ahc, RETURN_1, 0);
408
409 /*
410 * The sequencer will notify us when a command
411 * has an error that would be of interest to
412 * the kernel. This allows us to leave the sequencer
413 * running in the common case of command completes
414 * without error. The sequencer will already have
415 * dma'd the SCB back up to us, so we can reference
416 * the in kernel copy directly.
417 */
418 scb_index = ahc_inb(ahc, SCB_TAG);
419 scb = ahc_lookup_scb(ahc, scb_index);
420 if (scb == NULL) {
421 printf("%s:%c:%d: ahc_intr - referenced scb "
422 "not valid during seqint 0x%x scb(%d)\n",
423 ahc_name(ahc), devinfo.channel,
424 devinfo.target, intstat, scb_index);
425 ahc_dump_card_state(ahc);
426 panic("for safety");
427 goto unpause;
428 }
429
430 hscb = scb->hscb;
431
432 /* Don't want to clobber the original sense code */
433 if ((scb->flags & SCB_SENSE) != 0) {
434 /*
435 * Clear the SCB_SENSE Flag and have
436 * the sequencer do a normal command
437 * complete.
438 */
439 scb->flags &= ~SCB_SENSE;
440 ahc_set_transaction_status(scb, CAM_AUTOSENSE_FAIL);
441 break;
442 }
443 ahc_set_transaction_status(scb, CAM_SCSI_STATUS_ERROR);
444 /* Freeze the queue until the client sees the error. */
445 ahc_freeze_devq(ahc, scb);
446 ahc_freeze_scb(scb);
447 ahc_set_scsi_status(scb, hscb->shared_data.status.scsi_status);
448 switch (hscb->shared_data.status.scsi_status) {
449 case SCSI_STATUS_OK:
450 printf("%s: Interrupted for staus of 0???\n",
451 ahc_name(ahc));
452 break;
453 case SCSI_STATUS_CMD_TERMINATED:
454 case SCSI_STATUS_CHECK_COND:
227 u_int initiator_id,
228 u_int event_type,
229 u_int event_arg);
230static void ahc_update_scsiid(struct ahc_softc *ahc,
231 u_int targid_mask);
232static int ahc_handle_target_cmd(struct ahc_softc *ahc,
233 struct target_cmd *cmd);
234#endif
235/************************* Sequencer Execution Control ************************/
236/*
237 * Restart the sequencer program from address zero
238 */
239void
240ahc_restart(struct ahc_softc *ahc)
241{
242
243 ahc_pause(ahc);
244
245 ahc_outb(ahc, SCSISIGO, 0); /* De-assert BSY */
246 ahc_outb(ahc, MSG_OUT, MSG_NOOP); /* No message to send */
247 ahc_outb(ahc, SXFRCTL1, ahc_inb(ahc, SXFRCTL1) & ~BITBUCKET);
248
249 /*
250 * Ensure that the sequencer's idea of TQINPOS
251 * matches our own. The sequencer increments TQINPOS
252 * only after it sees a DMA complete and a reset could
253 * occur before the increment leaving the kernel to believe
254 * the command arrived but the sequencer to not.
255 */
256 ahc_outb(ahc, TQINPOS, ahc->tqinfifonext);
257
258 /* Always allow reselection */
259 ahc_outb(ahc, SCSISEQ,
260 ahc_inb(ahc, SCSISEQ_TEMPLATE) & (ENSELI|ENRSELI|ENAUTOATNP));
261 if ((ahc->features & AHC_CMD_CHAN) != 0) {
262 /* Ensure that no DMA operations are in progress */
263 ahc_outb(ahc, CCSCBCNT, 0);
264 ahc_outb(ahc, CCSGCTL, 0);
265 ahc_outb(ahc, CCSCBCTL, 0);
266 }
267 /*
268 * If we were in the process of DMA'ing SCB data into
269 * an SCB, replace that SCB on the free list. This prevents
270 * an SCB leak.
271 */
272 if ((ahc_inb(ahc, SEQ_FLAGS2) & SCB_DMA) != 0) {
273 ahc_add_curscb_to_free_list(ahc);
274 ahc_outb(ahc, SEQ_FLAGS2,
275 ahc_inb(ahc, SEQ_FLAGS2) & ~SCB_DMA);
276 }
277 ahc_outb(ahc, MWI_RESIDUAL, 0);
278 ahc_outb(ahc, SEQCTL, FASTMODE);
279 ahc_outb(ahc, SEQADDR0, 0);
280 ahc_outb(ahc, SEQADDR1, 0);
281 ahc_unpause(ahc);
282}
283
284/************************* Input/Output Queues ********************************/
285void
286ahc_run_qoutfifo(struct ahc_softc *ahc)
287{
288 struct scb *scb;
289 u_int scb_index;
290
291 while (ahc->qoutfifo[ahc->qoutfifonext] != SCB_LIST_NULL) {
292
293 scb_index = ahc->qoutfifo[ahc->qoutfifonext];
294 if ((ahc->qoutfifonext & 0x03) == 0x03) {
295 u_int modnext;
296
297 /*
298 * Clear 32bits of QOUTFIFO at a time
299 * so that we don't clobber an incomming
300 * byte DMA to the array on architectures
301 * that only support 32bit load and store
302 * operations.
303 */
304 modnext = ahc->qoutfifonext & ~0x3;
305 *((uint32_t *)(&ahc->qoutfifo[modnext])) = 0xFFFFFFFFUL;
306 }
307 ahc->qoutfifonext++;
308
309 scb = ahc_lookup_scb(ahc, scb_index);
310 if (scb == NULL) {
311 printf("%s: WARNING no command for scb %d "
312 "(cmdcmplt)\nQOUTPOS = %d\n",
313 ahc_name(ahc), scb_index,
314 ahc->qoutfifonext - 1);
315 continue;
316 }
317
318 /*
319 * Save off the residual
320 * if there is one.
321 */
322 if (ahc_check_residual(scb) != 0)
323 ahc_calc_residual(scb);
324 else
325 ahc_set_residual(scb, 0);
326 ahc_done(ahc, scb);
327 }
328}
329
330void
331ahc_run_untagged_queues(struct ahc_softc *ahc)
332{
333 int i;
334
335 for (i = 0; i < 16; i++)
336 ahc_run_untagged_queue(ahc, &ahc->untagged_queues[i]);
337}
338
339void
340ahc_run_untagged_queue(struct ahc_softc *ahc, struct scb_tailq *queue)
341{
342 struct scb *scb;
343
344 if (ahc->untagged_queue_lock != 0)
345 return;
346
347 if ((scb = TAILQ_FIRST(queue)) != NULL
348 && (scb->flags & SCB_ACTIVE) == 0) {
349 scb->flags |= SCB_ACTIVE;
350 ahc_queue_scb(ahc, scb);
351 }
352}
353
354/************************* Interrupt Handling *********************************/
355void
356ahc_handle_brkadrint(struct ahc_softc *ahc)
357{
358 /*
359 * We upset the sequencer :-(
360 * Lookup the error message
361 */
362 int i;
363 int error;
364
365 error = ahc_inb(ahc, ERROR);
366 for (i = 0; error != 1 && i < num_errors; i++)
367 error >>= 1;
368 printf("%s: brkadrint, %s at seqaddr = 0x%x\n",
369 ahc_name(ahc), ahc_hard_errors[i].errmesg,
370 ahc_inb(ahc, SEQADDR0) |
371 (ahc_inb(ahc, SEQADDR1) << 8));
372
373 ahc_dump_card_state(ahc);
374
375 /* Tell everyone that this HBA is no longer availible */
376 ahc_abort_scbs(ahc, CAM_TARGET_WILDCARD, ALL_CHANNELS,
377 CAM_LUN_WILDCARD, SCB_LIST_NULL, ROLE_UNKNOWN,
378 CAM_NO_HBA);
379
380 /* Disable all interrupt sources by resetting the controller */
381 ahc_shutdown(ahc);
382}
383
384void
385ahc_handle_seqint(struct ahc_softc *ahc, u_int intstat)
386{
387 struct scb *scb;
388 struct ahc_devinfo devinfo;
389
390 ahc_fetch_devinfo(ahc, &devinfo);
391
392 /*
393 * Clear the upper byte that holds SEQINT status
394 * codes and clear the SEQINT bit. We will unpause
395 * the sequencer, if appropriate, after servicing
396 * the request.
397 */
398 ahc_outb(ahc, CLRINT, CLRSEQINT);
399 switch (intstat & SEQINT_MASK) {
400 case BAD_STATUS:
401 {
402 u_int scb_index;
403 struct hardware_scb *hscb;
404
405 /*
406 * Set the default return value to 0 (don't
407 * send sense). The sense code will change
408 * this if needed.
409 */
410 ahc_outb(ahc, RETURN_1, 0);
411
412 /*
413 * The sequencer will notify us when a command
414 * has an error that would be of interest to
415 * the kernel. This allows us to leave the sequencer
416 * running in the common case of command completes
417 * without error. The sequencer will already have
418 * dma'd the SCB back up to us, so we can reference
419 * the in kernel copy directly.
420 */
421 scb_index = ahc_inb(ahc, SCB_TAG);
422 scb = ahc_lookup_scb(ahc, scb_index);
423 if (scb == NULL) {
424 printf("%s:%c:%d: ahc_intr - referenced scb "
425 "not valid during seqint 0x%x scb(%d)\n",
426 ahc_name(ahc), devinfo.channel,
427 devinfo.target, intstat, scb_index);
428 ahc_dump_card_state(ahc);
429 panic("for safety");
430 goto unpause;
431 }
432
433 hscb = scb->hscb;
434
435 /* Don't want to clobber the original sense code */
436 if ((scb->flags & SCB_SENSE) != 0) {
437 /*
438 * Clear the SCB_SENSE Flag and have
439 * the sequencer do a normal command
440 * complete.
441 */
442 scb->flags &= ~SCB_SENSE;
443 ahc_set_transaction_status(scb, CAM_AUTOSENSE_FAIL);
444 break;
445 }
446 ahc_set_transaction_status(scb, CAM_SCSI_STATUS_ERROR);
447 /* Freeze the queue until the client sees the error. */
448 ahc_freeze_devq(ahc, scb);
449 ahc_freeze_scb(scb);
450 ahc_set_scsi_status(scb, hscb->shared_data.status.scsi_status);
451 switch (hscb->shared_data.status.scsi_status) {
452 case SCSI_STATUS_OK:
453 printf("%s: Interrupted for staus of 0???\n",
454 ahc_name(ahc));
455 break;
456 case SCSI_STATUS_CMD_TERMINATED:
457 case SCSI_STATUS_CHECK_COND:
458 {
459 struct ahc_dma_seg *sg;
460 struct scsi_sense *sc;
461 struct ahc_initiator_tinfo *targ_info;
462 struct ahc_tmode_tstate *tstate;
463 struct ahc_transinfo *tinfo;
455#ifdef AHC_DEBUG
456 if (ahc_debug & AHC_SHOWSENSE) {
457 ahc_print_path(ahc, scb);
458 printf("SCB %d: requests Check Status\n",
459 scb->hscb->tag);
460 }
461#endif
462
464#ifdef AHC_DEBUG
465 if (ahc_debug & AHC_SHOWSENSE) {
466 ahc_print_path(ahc, scb);
467 printf("SCB %d: requests Check Status\n",
468 scb->hscb->tag);
469 }
470#endif
471
463 if (ahc_perform_autosense(scb)) {
464 struct ahc_dma_seg *sg;
465 struct scsi_sense *sc;
466 struct ahc_initiator_tinfo *targ_info;
467 struct tmode_tstate *tstate;
468 struct ahc_transinfo *tinfo;
472 if (ahc_perform_autosense(scb) == 0)
473 break;
469
474
470 targ_info =
471 ahc_fetch_transinfo(ahc,
475 targ_info = ahc_fetch_transinfo(ahc,
472 devinfo.channel,
473 devinfo.our_scsiid,
474 devinfo.target,
475 &tstate);
476 devinfo.channel,
477 devinfo.our_scsiid,
478 devinfo.target,
479 &tstate);
476 tinfo = &targ_info->current;
477 sg = scb->sg_list;
478 sc = (struct scsi_sense *)
479 (&hscb->shared_data.cdb);
480 /*
481 * Save off the residual if there is one.
482 */
483 if (ahc_check_residual(scb))
484 ahc_calc_residual(scb);
485 else
486 ahc_set_residual(scb, 0);
480 tinfo = &targ_info->current;
481 sg = scb->sg_list;
482 sc = (struct scsi_sense *)(&hscb->shared_data.cdb);
483 /*
484 * Save off the residual if there is one.
485 */
486 if (ahc_check_residual(scb))
487 ahc_calc_residual(scb);
488 else
489 ahc_set_residual(scb, 0);
487#ifdef AHC_DEBUG
490#ifdef AHC_DEBUG
488 if (ahc_debug & AHC_SHOWSENSE) {
489 ahc_print_path(ahc, scb);
490 printf("Sending Sense\n");
491 }
491 if (ahc_debug & AHC_SHOWSENSE) {
492 ahc_print_path(ahc, scb);
493 printf("Sending Sense\n");
494 }
492#endif
495#endif
493 sg->addr = ahc_get_sense_bufaddr(ahc, scb);
494 sg->len = ahc_get_sense_bufsize(ahc, scb);
495 sg->len |= AHC_DMA_LAST_SEG;
496 sg->addr = ahc_get_sense_bufaddr(ahc, scb);
497 sg->len = ahc_get_sense_bufsize(ahc, scb);
498 sg->len |= AHC_DMA_LAST_SEG;
496
499
497 /* Fixup byte order */
498 sg->addr = ahc_htole32(sg->addr);
499 sg->len = ahc_htole32(sg->len);
500 /* Fixup byte order */
501 sg->addr = ahc_htole32(sg->addr);
502 sg->len = ahc_htole32(sg->len);
500
503
501 sc->opcode = REQUEST_SENSE;
502 sc->byte2 = 0;
503 if (tinfo->protocol_version <= SCSI_REV_2
504 && SCB_GET_LUN(scb) < 8)
505 sc->byte2 = SCB_GET_LUN(scb) << 5;
506 sc->unused[0] = 0;
507 sc->unused[1] = 0;
508 sc->length = sg->len;
509 sc->control = 0;
504 sc->opcode = REQUEST_SENSE;
505 sc->byte2 = 0;
506 if (tinfo->protocol_version <= SCSI_REV_2
507 && SCB_GET_LUN(scb) < 8)
508 sc->byte2 = SCB_GET_LUN(scb) << 5;
509 sc->unused[0] = 0;
510 sc->unused[1] = 0;
511 sc->length = sg->len;
512 sc->control = 0;
510
513
511 /*
512 * XXX Still true???
513 * Would be nice to preserve DISCENB here,
514 * but due to the way we manage busy targets,
515 * we can't.
516 */
517 hscb->control = 0;
514 /*
515 * We can't allow the target to disconnect.
516 * This will be an untagged transaction and
517 * having the target disconnect will make this
518 * transaction indestinguishable from outstanding
519 * tagged transactions.
520 */
521 hscb->control = 0;
518
522
519 /*
520 * This request sense could be because the
521 * the device lost power or in some other
522 * way has lost our transfer negotiations.
523 * Renegotiate if appropriate. Unit attention
524 * errors will be reported before any data
525 * phases occur.
526 */
527 if (ahc_get_residual(scb)
528 == ahc_get_transfer_length(scb)) {
529 ahc_update_target_msg_request(ahc,
530 &devinfo,
531 targ_info,
532 /*force*/TRUE,
533 /*paused*/TRUE);
534 }
535 hscb->cdb_len = sizeof(*sc);
536 hscb->dataptr = sg->addr;
537 hscb->datacnt = sg->len;
538 hscb->sgptr = scb->sg_list_phys | SG_FULL_RESID;
539 hscb->sgptr = ahc_htole32(hscb->sgptr);
540 scb->sg_count = 1;
541 scb->flags |= SCB_SENSE;
542 ahc_qinfifo_requeue_tail(ahc, scb);
543 ahc_outb(ahc, RETURN_1, SEND_SENSE);
523 /*
524 * This request sense could be because the
525 * the device lost power or in some other
526 * way has lost our transfer negotiations.
527 * Renegotiate if appropriate. Unit attention
528 * errors will be reported before any data
529 * phases occur.
530 */
531 if (ahc_get_residual(scb)
532 == ahc_get_transfer_length(scb)) {
533 ahc_update_neg_request(ahc, &devinfo,
534 tstate, targ_info,
535 /*force*/TRUE);
536 }
537 if (tstate->auto_negotiate & devinfo.target_mask) {
538 hscb->control |= MK_MESSAGE;
539 scb->flags &= ~SCB_NEGOTIATE;
540 scb->flags |= SCB_AUTO_NEGOTIATE;
541 }
542 hscb->cdb_len = sizeof(*sc);
543 hscb->dataptr = sg->addr;
544 hscb->datacnt = sg->len;
545 hscb->sgptr = scb->sg_list_phys | SG_FULL_RESID;
546 hscb->sgptr = ahc_htole32(hscb->sgptr);
547 scb->sg_count = 1;
548 scb->flags |= SCB_SENSE;
549 ahc_qinfifo_requeue_tail(ahc, scb);
550 ahc_outb(ahc, RETURN_1, SEND_SENSE);
544#ifdef __FreeBSD__
551#ifdef __FreeBSD__
545 /*
546 * Ensure we have enough time to actually
547 * retrieve the sense.
548 */
549 untimeout(ahc_timeout, (caddr_t)scb,
550 scb->io_ctx->ccb_h.timeout_ch);
551 scb->io_ctx->ccb_h.timeout_ch =
552 timeout(ahc_timeout, (caddr_t)scb, 5 * hz);
552 /*
553 * Ensure we have enough time to actually
554 * retrieve the sense.
555 */
556 untimeout(ahc_timeout, (caddr_t)scb,
557 scb->io_ctx->ccb_h.timeout_ch);
558 scb->io_ctx->ccb_h.timeout_ch =
559 timeout(ahc_timeout, (caddr_t)scb, 5 * hz);
553#endif
560#endif
554 }
555 break;
561 break;
562 }
556 default:
557 break;
558 }
559 break;
560 }
561 case NO_MATCH:
562 {
563 /* Ensure we don't leave the selection hardware on */
564 ahc_outb(ahc, SCSISEQ,
565 ahc_inb(ahc, SCSISEQ) & (ENSELI|ENRSELI|ENAUTOATNP));
566
567 printf("%s:%c:%d: no active SCB for reconnecting "
568 "target - issuing BUS DEVICE RESET\n",
569 ahc_name(ahc), devinfo.channel, devinfo.target);
570 printf("SAVED_SCSIID == 0x%x, SAVED_LUN == 0x%x, "
571 "ARG_1 == 0x%x ACCUM = 0x%x\n",
572 ahc_inb(ahc, SAVED_SCSIID), ahc_inb(ahc, SAVED_LUN),
573 ahc_inb(ahc, ARG_1), ahc_inb(ahc, ACCUM));
574 printf("SEQ_FLAGS == 0x%x, SCBPTR == 0x%x, BTT == 0x%x, "
575 "SINDEX == 0x%x\n",
576 ahc_inb(ahc, SEQ_FLAGS), ahc_inb(ahc, SCBPTR),
577 ahc_index_busy_tcl(ahc,
578 BUILD_TCL(ahc_inb(ahc, SAVED_SCSIID),
579 ahc_inb(ahc, SAVED_LUN))),
580 ahc_inb(ahc, SINDEX));
581 printf("SCSIID == 0x%x, SCB_SCSIID == 0x%x, SCB_LUN == 0x%x, "
582 "SCB_TAG == 0x%x, SCB_CONTROL == 0x%x\n",
583 ahc_inb(ahc, SCSIID), ahc_inb(ahc, SCB_SCSIID),
584 ahc_inb(ahc, SCB_LUN), ahc_inb(ahc, SCB_TAG),
585 ahc_inb(ahc, SCB_CONTROL));
586 printf("SCSIBUSL == 0x%x, SCSISIGI == 0x%x\n",
587 ahc_inb(ahc, SCSIBUSL), ahc_inb(ahc, SCSISIGI));
588 printf("SXFRCTL0 == 0x%x\n", ahc_inb(ahc, SXFRCTL0));
589 printf("SEQCTL == 0x%x\n", ahc_inb(ahc, SEQCTL));
590 ahc_dump_card_state(ahc);
591 ahc->msgout_buf[0] = MSG_BUS_DEV_RESET;
592 ahc->msgout_len = 1;
593 ahc->msgout_index = 0;
594 ahc->msg_type = MSG_TYPE_INITIATOR_MSGOUT;
595 ahc_outb(ahc, MSG_OUT, HOST_MSG);
596 ahc_outb(ahc, SCSISIGO, ahc_inb(ahc, LASTPHASE) | ATNO);
597 break;
598 }
599 case SEND_REJECT:
600 {
601 u_int rejbyte = ahc_inb(ahc, ACCUM);
602 printf("%s:%c:%d: Warning - unknown message received from "
603 "target (0x%x). Rejecting\n",
604 ahc_name(ahc), devinfo.channel, devinfo.target, rejbyte);
605 break;
606 }
607 case NO_IDENT:
608 {
609 /*
610 * The reconnecting target either did not send an identify
611 * message, or did, but we didn't find an SCB to match and
612 * before it could respond to our ATN/abort, it hit a dataphase.
613 * The only safe thing to do is to blow it away with a bus
614 * reset.
615 */
616 int found;
617
618 printf("%s:%c:%d: Target did not send an IDENTIFY message. "
619 "LASTPHASE = 0x%x, SAVED_SCSIID == 0x%x\n",
620 ahc_name(ahc), devinfo.channel, devinfo.target,
621 ahc_inb(ahc, LASTPHASE), ahc_inb(ahc, SAVED_SCSIID));
622 found = ahc_reset_channel(ahc, devinfo.channel,
623 /*initiate reset*/TRUE);
624 printf("%s: Issued Channel %c Bus Reset. "
625 "%d SCBs aborted\n", ahc_name(ahc), devinfo.channel,
626 found);
627 return;
628 }
629 case IGN_WIDE_RES:
630 ahc_handle_ign_wide_residue(ahc, &devinfo);
631 break;
632 case BAD_PHASE:
633 {
634 u_int lastphase;
635
636 lastphase = ahc_inb(ahc, LASTPHASE);
637 printf("%s:%c:%d: unknown scsi bus phase %x, "
638 "lastphase = 0x%x. Attempting to continue\n",
639 ahc_name(ahc), devinfo.channel, devinfo.target,
640 lastphase, ahc_inb(ahc, SCSISIGI));
641 break;
642 }
643 case MISSED_BUSFREE:
644 {
645 u_int lastphase;
646
647 lastphase = ahc_inb(ahc, LASTPHASE);
648 printf("%s:%c:%d: Missed busfree. "
649 "Lastphase = 0x%x, Curphase = 0x%x\n",
650 ahc_name(ahc), devinfo.channel, devinfo.target,
651 lastphase, ahc_inb(ahc, SCSISIGI));
652 ahc_restart(ahc);
653 return;
654 }
655 case HOST_MSG_LOOP:
656 {
657 /*
658 * The sequencer has encountered a message phase
659 * that requires host assistance for completion.
660 * While handling the message phase(s), we will be
661 * notified by the sequencer after each byte is
662 * transfered so we can track bus phase changes.
663 *
664 * If this is the first time we've seen a HOST_MSG_LOOP
665 * interrupt, initialize the state of the host message
666 * loop.
667 */
668 if (ahc->msg_type == MSG_TYPE_NONE) {
563 default:
564 break;
565 }
566 break;
567 }
568 case NO_MATCH:
569 {
570 /* Ensure we don't leave the selection hardware on */
571 ahc_outb(ahc, SCSISEQ,
572 ahc_inb(ahc, SCSISEQ) & (ENSELI|ENRSELI|ENAUTOATNP));
573
574 printf("%s:%c:%d: no active SCB for reconnecting "
575 "target - issuing BUS DEVICE RESET\n",
576 ahc_name(ahc), devinfo.channel, devinfo.target);
577 printf("SAVED_SCSIID == 0x%x, SAVED_LUN == 0x%x, "
578 "ARG_1 == 0x%x ACCUM = 0x%x\n",
579 ahc_inb(ahc, SAVED_SCSIID), ahc_inb(ahc, SAVED_LUN),
580 ahc_inb(ahc, ARG_1), ahc_inb(ahc, ACCUM));
581 printf("SEQ_FLAGS == 0x%x, SCBPTR == 0x%x, BTT == 0x%x, "
582 "SINDEX == 0x%x\n",
583 ahc_inb(ahc, SEQ_FLAGS), ahc_inb(ahc, SCBPTR),
584 ahc_index_busy_tcl(ahc,
585 BUILD_TCL(ahc_inb(ahc, SAVED_SCSIID),
586 ahc_inb(ahc, SAVED_LUN))),
587 ahc_inb(ahc, SINDEX));
588 printf("SCSIID == 0x%x, SCB_SCSIID == 0x%x, SCB_LUN == 0x%x, "
589 "SCB_TAG == 0x%x, SCB_CONTROL == 0x%x\n",
590 ahc_inb(ahc, SCSIID), ahc_inb(ahc, SCB_SCSIID),
591 ahc_inb(ahc, SCB_LUN), ahc_inb(ahc, SCB_TAG),
592 ahc_inb(ahc, SCB_CONTROL));
593 printf("SCSIBUSL == 0x%x, SCSISIGI == 0x%x\n",
594 ahc_inb(ahc, SCSIBUSL), ahc_inb(ahc, SCSISIGI));
595 printf("SXFRCTL0 == 0x%x\n", ahc_inb(ahc, SXFRCTL0));
596 printf("SEQCTL == 0x%x\n", ahc_inb(ahc, SEQCTL));
597 ahc_dump_card_state(ahc);
598 ahc->msgout_buf[0] = MSG_BUS_DEV_RESET;
599 ahc->msgout_len = 1;
600 ahc->msgout_index = 0;
601 ahc->msg_type = MSG_TYPE_INITIATOR_MSGOUT;
602 ahc_outb(ahc, MSG_OUT, HOST_MSG);
603 ahc_outb(ahc, SCSISIGO, ahc_inb(ahc, LASTPHASE) | ATNO);
604 break;
605 }
606 case SEND_REJECT:
607 {
608 u_int rejbyte = ahc_inb(ahc, ACCUM);
609 printf("%s:%c:%d: Warning - unknown message received from "
610 "target (0x%x). Rejecting\n",
611 ahc_name(ahc), devinfo.channel, devinfo.target, rejbyte);
612 break;
613 }
614 case NO_IDENT:
615 {
616 /*
617 * The reconnecting target either did not send an identify
618 * message, or did, but we didn't find an SCB to match and
619 * before it could respond to our ATN/abort, it hit a dataphase.
620 * The only safe thing to do is to blow it away with a bus
621 * reset.
622 */
623 int found;
624
625 printf("%s:%c:%d: Target did not send an IDENTIFY message. "
626 "LASTPHASE = 0x%x, SAVED_SCSIID == 0x%x\n",
627 ahc_name(ahc), devinfo.channel, devinfo.target,
628 ahc_inb(ahc, LASTPHASE), ahc_inb(ahc, SAVED_SCSIID));
629 found = ahc_reset_channel(ahc, devinfo.channel,
630 /*initiate reset*/TRUE);
631 printf("%s: Issued Channel %c Bus Reset. "
632 "%d SCBs aborted\n", ahc_name(ahc), devinfo.channel,
633 found);
634 return;
635 }
636 case IGN_WIDE_RES:
637 ahc_handle_ign_wide_residue(ahc, &devinfo);
638 break;
639 case BAD_PHASE:
640 {
641 u_int lastphase;
642
643 lastphase = ahc_inb(ahc, LASTPHASE);
644 printf("%s:%c:%d: unknown scsi bus phase %x, "
645 "lastphase = 0x%x. Attempting to continue\n",
646 ahc_name(ahc), devinfo.channel, devinfo.target,
647 lastphase, ahc_inb(ahc, SCSISIGI));
648 break;
649 }
650 case MISSED_BUSFREE:
651 {
652 u_int lastphase;
653
654 lastphase = ahc_inb(ahc, LASTPHASE);
655 printf("%s:%c:%d: Missed busfree. "
656 "Lastphase = 0x%x, Curphase = 0x%x\n",
657 ahc_name(ahc), devinfo.channel, devinfo.target,
658 lastphase, ahc_inb(ahc, SCSISIGI));
659 ahc_restart(ahc);
660 return;
661 }
662 case HOST_MSG_LOOP:
663 {
664 /*
665 * The sequencer has encountered a message phase
666 * that requires host assistance for completion.
667 * While handling the message phase(s), we will be
668 * notified by the sequencer after each byte is
669 * transfered so we can track bus phase changes.
670 *
671 * If this is the first time we've seen a HOST_MSG_LOOP
672 * interrupt, initialize the state of the host message
673 * loop.
674 */
675 if (ahc->msg_type == MSG_TYPE_NONE) {
676 struct scb *scb;
677 u_int scb_index;
669 u_int bus_phase;
670
671 bus_phase = ahc_inb(ahc, SCSISIGI) & PHASE_MASK;
672 if (bus_phase != P_MESGIN
673 && bus_phase != P_MESGOUT) {
674 printf("ahc_intr: HOST_MSG_LOOP bad "
675 "phase 0x%x\n",
676 bus_phase);
677 /*
678 * Probably transitioned to bus free before
679 * we got here. Just punt the message.
680 */
681 ahc_clear_intstat(ahc);
682 ahc_restart(ahc);
683 return;
684 }
685
678 u_int bus_phase;
679
680 bus_phase = ahc_inb(ahc, SCSISIGI) & PHASE_MASK;
681 if (bus_phase != P_MESGIN
682 && bus_phase != P_MESGOUT) {
683 printf("ahc_intr: HOST_MSG_LOOP bad "
684 "phase 0x%x\n",
685 bus_phase);
686 /*
687 * Probably transitioned to bus free before
688 * we got here. Just punt the message.
689 */
690 ahc_clear_intstat(ahc);
691 ahc_restart(ahc);
692 return;
693 }
694
695 scb_index = ahc_inb(ahc, SCB_TAG);
696 scb = ahc_lookup_scb(ahc, scb_index);
686 if (devinfo.role == ROLE_INITIATOR) {
697 if (devinfo.role == ROLE_INITIATOR) {
687 struct scb *scb;
688 u_int scb_index;
689
690 scb_index = ahc_inb(ahc, SCB_TAG);
691 scb = ahc_lookup_scb(ahc, scb_index);
692
693 if (scb == NULL)
694 panic("HOST_MSG_LOOP with "
695 "invalid SCB %x\n", scb_index);
696
697 if (bus_phase == P_MESGOUT)
698 ahc_setup_initiator_msgout(ahc,
699 &devinfo,
700 scb);
701 else {
702 ahc->msg_type =
703 MSG_TYPE_INITIATOR_MSGIN;
704 ahc->msgin_index = 0;
705 }
706 } else {
707 if (bus_phase == P_MESGOUT) {
708 ahc->msg_type =
709 MSG_TYPE_TARGET_MSGOUT;
710 ahc->msgin_index = 0;
711 }
712#if AHC_TARGET_MODE
713 else
698 if (scb == NULL)
699 panic("HOST_MSG_LOOP with "
700 "invalid SCB %x\n", scb_index);
701
702 if (bus_phase == P_MESGOUT)
703 ahc_setup_initiator_msgout(ahc,
704 &devinfo,
705 scb);
706 else {
707 ahc->msg_type =
708 MSG_TYPE_INITIATOR_MSGIN;
709 ahc->msgin_index = 0;
710 }
711 } else {
712 if (bus_phase == P_MESGOUT) {
713 ahc->msg_type =
714 MSG_TYPE_TARGET_MSGOUT;
715 ahc->msgin_index = 0;
716 }
717#if AHC_TARGET_MODE
718 else
714 ahc_setup_target_msgin(ahc, &devinfo);
719 ahc_setup_target_msgin(ahc,
720 &devinfo,
721 scb);
715#endif
716 }
717 }
718
719 ahc_handle_message_phase(ahc);
720 break;
721 }
722 case PERR_DETECTED:
723 {
724 /*
725 * If we've cleared the parity error interrupt
726 * but the sequencer still believes that SCSIPERR
727 * is true, it must be that the parity error is
728 * for the currently presented byte on the bus,
729 * and we are not in a phase (data-in) where we will
730 * eventually ack this byte. Ack the byte and
731 * throw it away in the hope that the target will
732 * take us to message out to deliver the appropriate
733 * error message.
734 */
735 if ((intstat & SCSIINT) == 0
736 && (ahc_inb(ahc, SSTAT1) & SCSIPERR) != 0) {
737 u_int curphase;
738
739 /*
740 * The hardware will only let you ack bytes
741 * if the expected phase in SCSISIGO matches
742 * the current phase. Make sure this is
743 * currently the case.
744 */
745 curphase = ahc_inb(ahc, SCSISIGI) & PHASE_MASK;
746 ahc_outb(ahc, LASTPHASE, curphase);
747 ahc_outb(ahc, SCSISIGO, curphase);
748 ahc_inb(ahc, SCSIDATL);
749 }
750 break;
751 }
752 case DATA_OVERRUN:
753 {
754 /*
755 * When the sequencer detects an overrun, it
756 * places the controller in "BITBUCKET" mode
757 * and allows the target to complete its transfer.
758 * Unfortunately, none of the counters get updated
759 * when the controller is in this mode, so we have
760 * no way of knowing how large the overrun was.
761 */
762 u_int scbindex = ahc_inb(ahc, SCB_TAG);
763 u_int lastphase = ahc_inb(ahc, LASTPHASE);
764 u_int i;
765
766 scb = ahc_lookup_scb(ahc, scbindex);
767 for (i = 0; i < num_phases; i++) {
768 if (lastphase == ahc_phase_table[i].phase)
769 break;
770 }
771 ahc_print_path(ahc, scb);
772 printf("data overrun detected %s."
773 " Tag == 0x%x.\n",
774 ahc_phase_table[i].phasemsg,
775 scb->hscb->tag);
776 ahc_print_path(ahc, scb);
777 printf("%s seen Data Phase. Length = %ld. NumSGs = %d.\n",
778 ahc_inb(ahc, SEQ_FLAGS) & DPHASE ? "Have" : "Haven't",
779 ahc_get_transfer_length(scb), scb->sg_count);
780 if (scb->sg_count > 0) {
781 for (i = 0; i < scb->sg_count; i++) {
782 printf("sg[%d] - Addr 0x%x : Length %d\n",
783 i,
784 ahc_le32toh(scb->sg_list[i].addr),
785 ahc_le32toh(scb->sg_list[i].len)
786 & AHC_SG_LEN_MASK);
787 }
788 }
789 /*
790 * Set this and it will take effect when the
791 * target does a command complete.
792 */
793 ahc_freeze_devq(ahc, scb);
794 ahc_set_transaction_status(scb, CAM_DATA_RUN_ERR);
795 ahc_freeze_scb(scb);
796 break;
797 }
798 case MKMSG_FAILED:
799 {
800 u_int scbindex;
801
802 printf("%s:%c:%d:%d: Attempt to issue message failed\n",
803 ahc_name(ahc), devinfo.channel, devinfo.target,
804 devinfo.lun);
805 scbindex = ahc_inb(ahc, SCB_TAG);
806 scb = ahc_lookup_scb(ahc, scbindex);
807 if (scb != NULL
808 && (scb->flags & SCB_RECOVERY_SCB) != 0)
809 /*
810 * Ensure that we didn't put a second instance of this
811 * SCB into the QINFIFO.
812 */
813 ahc_search_qinfifo(ahc, SCB_GET_TARGET(ahc, scb),
814 SCB_GET_CHANNEL(ahc, scb),
815 SCB_GET_LUN(scb), scb->hscb->tag,
816 ROLE_INITIATOR, /*status*/0,
817 SEARCH_REMOVE);
818 break;
819 }
820 case NO_FREE_SCB:
821 {
822 printf("%s: No free or disconnected SCBs\n", ahc_name(ahc));
823 ahc_dump_card_state(ahc);
824 panic("for safety");
825 break;
826 }
827 case SCB_MISMATCH:
828 {
829 u_int scbptr;
830
831 scbptr = ahc_inb(ahc, SCBPTR);
832 printf("Bogus TAG after DMA. SCBPTR %d, tag %d, our tag %d\n",
833 scbptr, ahc_inb(ahc, ARG_1),
834 ahc->scb_data->hscbs[scbptr].tag);
835 ahc_dump_card_state(ahc);
836 panic("for saftey");
837 break;
838 }
839 case OUT_OF_RANGE:
840 {
841 printf("%s: BTT calculation out of range\n", ahc_name(ahc));
842 printf("SAVED_SCSIID == 0x%x, SAVED_LUN == 0x%x, "
843 "ARG_1 == 0x%x ACCUM = 0x%x\n",
844 ahc_inb(ahc, SAVED_SCSIID), ahc_inb(ahc, SAVED_LUN),
845 ahc_inb(ahc, ARG_1), ahc_inb(ahc, ACCUM));
846 printf("SEQ_FLAGS == 0x%x, SCBPTR == 0x%x, BTT == 0x%x, "
847 "SINDEX == 0x%x\n, A == 0x%x\n",
848 ahc_inb(ahc, SEQ_FLAGS), ahc_inb(ahc, SCBPTR),
849 ahc_index_busy_tcl(ahc,
850 BUILD_TCL(ahc_inb(ahc, SAVED_SCSIID),
851 ahc_inb(ahc, SAVED_LUN))),
852 ahc_inb(ahc, SINDEX),
853 ahc_inb(ahc, ACCUM));
854 printf("SCSIID == 0x%x, SCB_SCSIID == 0x%x, SCB_LUN == 0x%x, "
855 "SCB_TAG == 0x%x, SCB_CONTROL == 0x%x\n",
856 ahc_inb(ahc, SCSIID), ahc_inb(ahc, SCB_SCSIID),
857 ahc_inb(ahc, SCB_LUN), ahc_inb(ahc, SCB_TAG),
858 ahc_inb(ahc, SCB_CONTROL));
859 printf("SCSIBUSL == 0x%x, SCSISIGI == 0x%x\n",
860 ahc_inb(ahc, SCSIBUSL), ahc_inb(ahc, SCSISIGI));
861 ahc_dump_card_state(ahc);
862 panic("for safety");
863 break;
864 }
865 default:
866 printf("ahc_intr: seqint, "
867 "intstat == 0x%x, scsisigi = 0x%x\n",
868 intstat, ahc_inb(ahc, SCSISIGI));
869 break;
870 }
871unpause:
872 /*
873 * The sequencer is paused immediately on
874 * a SEQINT, so we should restart it when
875 * we're done.
876 */
877 ahc_unpause(ahc);
878}
879
880void
881ahc_handle_scsiint(struct ahc_softc *ahc, u_int intstat)
882{
883 u_int scb_index;
884 u_int status0;
885 u_int status;
886 struct scb *scb;
887 char cur_channel;
888 char intr_channel;
889
890 /* Make sure the sequencer is in a safe location. */
891 ahc_clear_critical_section(ahc);
892
893 if ((ahc->features & AHC_TWIN) != 0
894 && ((ahc_inb(ahc, SBLKCTL) & SELBUSB) != 0))
895 cur_channel = 'B';
896 else
897 cur_channel = 'A';
898 intr_channel = cur_channel;
899
900 if ((ahc->features & AHC_ULTRA2) != 0)
901 status0 = ahc_inb(ahc, SSTAT0) & IOERR;
902 else
903 status0 = 0;
904 status = ahc_inb(ahc, SSTAT1) & (SELTO|SCSIRSTI|BUSFREE|SCSIPERR);
905 if (status == 0 && status0 == 0) {
906 if ((ahc->features & AHC_TWIN) != 0) {
907 /* Try the other channel */
908 ahc_outb(ahc, SBLKCTL, ahc_inb(ahc, SBLKCTL) ^ SELBUSB);
909 status = ahc_inb(ahc, SSTAT1)
910 & (SELTO|SCSIRSTI|BUSFREE|SCSIPERR);
911 intr_channel = (cur_channel == 'A') ? 'B' : 'A';
912 }
913 if (status == 0) {
914 printf("%s: Spurious SCSI interrupt\n", ahc_name(ahc));
915 ahc_outb(ahc, CLRINT, CLRSCSIINT);
916 ahc_unpause(ahc);
917 return;
918 }
919 }
920
921 scb_index = ahc_inb(ahc, SCB_TAG);
922 scb = ahc_lookup_scb(ahc, scb_index);
923 if (scb != NULL
924 && (ahc_inb(ahc, SEQ_FLAGS) & IDENTIFY_SEEN) == 0)
925 scb = NULL;
926
927 if ((ahc->features & AHC_ULTRA2) != 0
928 && (status0 & IOERR) != 0) {
929 int now_lvd;
930
931 now_lvd = ahc_inb(ahc, SBLKCTL) & ENAB40;
932 printf("%s: Transceiver State Has Changed to %s mode\n",
933 ahc_name(ahc), now_lvd ? "LVD" : "SE");
934 ahc_outb(ahc, CLRSINT0, CLRIOERR);
935 /*
936 * When transitioning to SE mode, the reset line
937 * glitches, triggering an arbitration bug in some
938 * Ultra2 controllers. This bug is cleared when we
939 * assert the reset line. Since a reset glitch has
940 * already occurred with this transition and a
941 * transceiver state change is handled just like
942 * a bus reset anyway, asserting the reset line
943 * ourselves is safe.
944 */
945 ahc_reset_channel(ahc, intr_channel,
946 /*Initiate Reset*/now_lvd == 0);
947 } else if ((status & SCSIRSTI) != 0) {
948 printf("%s: Someone reset channel %c\n",
949 ahc_name(ahc), intr_channel);
950 if (intr_channel != cur_channel)
951 ahc_outb(ahc, SBLKCTL, ahc_inb(ahc, SBLKCTL) ^ SELBUSB);
952 ahc_reset_channel(ahc, intr_channel, /*Initiate Reset*/FALSE);
953 } else if ((status & SCSIPERR) != 0) {
954 /*
955 * Determine the bus phase and queue an appropriate message.
956 * SCSIPERR is latched true as soon as a parity error
957 * occurs. If the sequencer acked the transfer that
958 * caused the parity error and the currently presented
959 * transfer on the bus has correct parity, SCSIPERR will
960 * be cleared by CLRSCSIPERR. Use this to determine if
961 * we should look at the last phase the sequencer recorded,
962 * or the current phase presented on the bus.
963 */
964 u_int mesg_out;
965 u_int curphase;
966 u_int errorphase;
967 u_int lastphase;
968 u_int scsirate;
969 u_int i;
970 u_int sstat2;
971
972 lastphase = ahc_inb(ahc, LASTPHASE);
973 curphase = ahc_inb(ahc, SCSISIGI) & PHASE_MASK;
974 sstat2 = ahc_inb(ahc, SSTAT2);
975 ahc_outb(ahc, CLRSINT1, CLRSCSIPERR);
976 /*
977 * For all phases save DATA, the sequencer won't
978 * automatically ack a byte that has a parity error
979 * in it. So the only way that the current phase
980 * could be 'data-in' is if the parity error is for
981 * an already acked byte in the data phase. During
982 * synchronous data-in transfers, we may actually
983 * ack bytes before latching the current phase in
984 * LASTPHASE, leading to the discrepancy between
985 * curphase and lastphase.
986 */
987 if ((ahc_inb(ahc, SSTAT1) & SCSIPERR) != 0
988 || curphase == P_DATAIN || curphase == P_DATAIN_DT)
989 errorphase = curphase;
990 else
991 errorphase = lastphase;
992
993 for (i = 0; i < num_phases; i++) {
994 if (errorphase == ahc_phase_table[i].phase)
995 break;
996 }
997 mesg_out = ahc_phase_table[i].mesg_out;
998 if (scb != NULL)
999 ahc_print_path(ahc, scb);
1000 else
1001 printf("%s:%c:%d: ", ahc_name(ahc), intr_channel,
1002 SCSIID_TARGET(ahc, ahc_inb(ahc, SAVED_SCSIID)));
1003 scsirate = ahc_inb(ahc, SCSIRATE);
1004 printf("parity error detected %s. "
1005 "SEQADDR(0x%x) SCSIRATE(0x%x)\n",
1006 ahc_phase_table[i].phasemsg,
1007 ahc_inb(ahc, SEQADDR0) | (ahc_inb(ahc, SEQADDR1) << 8),
1008 scsirate);
1009
1010 if ((ahc->features & AHC_DT) != 0) {
1011
1012 if ((sstat2 & CRCVALERR) != 0)
1013 printf("\tCRC Value Mismatch\n");
1014 if ((sstat2 & CRCENDERR) != 0)
1015 printf("\tNo terminal CRC packet recevied\n");
1016 if ((sstat2 & CRCREQERR) != 0)
1017 printf("\tIllegal CRC packet request\n");
1018 if ((sstat2 & DUAL_EDGE_ERR) != 0)
1019 printf("\tUnexpected %sDT Data Phase\n",
1020 (scsirate & SINGLE_EDGE) ? "" : "non-");
1021 }
1022
1023 /*
1024 * We've set the hardware to assert ATN if we
1025 * get a parity error on "in" phases, so all we
1026 * need to do is stuff the message buffer with
1027 * the appropriate message. "In" phases have set
1028 * mesg_out to something other than MSG_NOP.
1029 */
1030 if (mesg_out != MSG_NOOP) {
1031 if (ahc->msg_type != MSG_TYPE_NONE)
1032 ahc->send_msg_perror = TRUE;
1033 else
1034 ahc_outb(ahc, MSG_OUT, mesg_out);
1035 }
1036 ahc_outb(ahc, CLRINT, CLRSCSIINT);
1037 ahc_unpause(ahc);
1038 } else if ((status & BUSFREE) != 0
1039 && (ahc_inb(ahc, SIMODE1) & ENBUSFREE) != 0) {
1040 u_int lastphase;
1041 u_int saved_scsiid;
1042 u_int saved_lun;
1043 u_int target;
1044 u_int initiator_role_id;
1045 char channel;
1046 int printerror;
1047
1048 /*
1049 * Clear our selection hardware as soon as possible.
1050 * We may have an entry in the waiting Q for this target,
1051 * that is affected by this busfree and we don't want to
1052 * go about selecting the target while we handle the event.
1053 */
1054 ahc_outb(ahc, SCSISEQ,
1055 ahc_inb(ahc, SCSISEQ) & (ENSELI|ENRSELI|ENAUTOATNP));
1056
1057 /*
1058 * Disable busfree interrupts and clear the busfree
1059 * interrupt status. We do this here so that several
1060 * bus transactions occur prior to clearing the SCSIINT
1061 * latch. It can take a bit for the clearing to take effect.
1062 */
1063 ahc_outb(ahc, SIMODE1, ahc_inb(ahc, SIMODE1) & ~ENBUSFREE);
1064 ahc_outb(ahc, CLRSINT1, CLRBUSFREE|CLRSCSIPERR);
1065
1066 /*
1067 * Look at what phase we were last in.
1068 * If its message out, chances are pretty good
1069 * that the busfree was in response to one of
1070 * our abort requests.
1071 */
1072 lastphase = ahc_inb(ahc, LASTPHASE);
1073 saved_scsiid = ahc_inb(ahc, SAVED_SCSIID);
1074 saved_lun = ahc_inb(ahc, SAVED_LUN);
1075 target = SCSIID_TARGET(ahc, saved_scsiid);
1076 initiator_role_id = SCSIID_OUR_ID(saved_scsiid);
1077 channel = SCSIID_CHANNEL(ahc, saved_scsiid);
1078 printerror = 1;
1079
1080 if (lastphase == P_MESGOUT) {
1081 struct ahc_devinfo devinfo;
1082 u_int tag;
1083
1084 ahc_fetch_devinfo(ahc, &devinfo);
1085 tag = SCB_LIST_NULL;
1086 if (ahc_sent_msg(ahc, AHCMSG_1B, MSG_ABORT_TAG, TRUE)
1087 || ahc_sent_msg(ahc, AHCMSG_1B, MSG_ABORT, TRUE)) {
1088 if (ahc->msgout_buf[ahc->msgout_index - 1]
1089 == MSG_ABORT_TAG)
1090 tag = scb->hscb->tag;
1091 ahc_print_path(ahc, scb);
1092 printf("SCB %d - Abort%s Completed.\n",
1093 scb->hscb->tag, tag == SCB_LIST_NULL ?
1094 "" : " Tag");
1095 ahc_abort_scbs(ahc, target, channel,
1096 saved_lun, tag,
1097 ROLE_INITIATOR,
1098 CAM_REQ_ABORTED);
1099 printerror = 0;
1100 } else if (ahc_sent_msg(ahc, AHCMSG_1B,
1101 MSG_BUS_DEV_RESET, TRUE)) {
1102 struct ahc_devinfo devinfo;
1103#ifdef __FreeBSD__
1104 /*
1105 * Don't mark the user's request for this BDR
1106 * as completing with CAM_BDR_SENT. CAM3
1107 * specifies CAM_REQ_CMP.
1108 */
1109 if (scb != NULL
1110 && scb->io_ctx->ccb_h.func_code== XPT_RESET_DEV
1111 && ahc_match_scb(ahc, scb, target, channel,
1112 CAM_LUN_WILDCARD,
1113 SCB_LIST_NULL,
1114 ROLE_INITIATOR)) {
1115 ahc_set_transaction_status(scb, CAM_REQ_CMP);
1116 }
1117#endif
1118 ahc_compile_devinfo(&devinfo,
1119 initiator_role_id,
1120 target,
1121 CAM_LUN_WILDCARD,
1122 channel,
1123 ROLE_INITIATOR);
1124 ahc_handle_devreset(ahc, &devinfo,
1125 CAM_BDR_SENT,
1126 "Bus Device Reset",
1127 /*verbose_level*/0);
1128 printerror = 0;
1129 } else if (ahc_sent_msg(ahc, AHCMSG_EXT,
1130 MSG_EXT_PPR, FALSE)) {
1131 struct ahc_initiator_tinfo *tinfo;
722#endif
723 }
724 }
725
726 ahc_handle_message_phase(ahc);
727 break;
728 }
729 case PERR_DETECTED:
730 {
731 /*
732 * If we've cleared the parity error interrupt
733 * but the sequencer still believes that SCSIPERR
734 * is true, it must be that the parity error is
735 * for the currently presented byte on the bus,
736 * and we are not in a phase (data-in) where we will
737 * eventually ack this byte. Ack the byte and
738 * throw it away in the hope that the target will
739 * take us to message out to deliver the appropriate
740 * error message.
741 */
742 if ((intstat & SCSIINT) == 0
743 && (ahc_inb(ahc, SSTAT1) & SCSIPERR) != 0) {
744 u_int curphase;
745
746 /*
747 * The hardware will only let you ack bytes
748 * if the expected phase in SCSISIGO matches
749 * the current phase. Make sure this is
750 * currently the case.
751 */
752 curphase = ahc_inb(ahc, SCSISIGI) & PHASE_MASK;
753 ahc_outb(ahc, LASTPHASE, curphase);
754 ahc_outb(ahc, SCSISIGO, curphase);
755 ahc_inb(ahc, SCSIDATL);
756 }
757 break;
758 }
759 case DATA_OVERRUN:
760 {
761 /*
762 * When the sequencer detects an overrun, it
763 * places the controller in "BITBUCKET" mode
764 * and allows the target to complete its transfer.
765 * Unfortunately, none of the counters get updated
766 * when the controller is in this mode, so we have
767 * no way of knowing how large the overrun was.
768 */
769 u_int scbindex = ahc_inb(ahc, SCB_TAG);
770 u_int lastphase = ahc_inb(ahc, LASTPHASE);
771 u_int i;
772
773 scb = ahc_lookup_scb(ahc, scbindex);
774 for (i = 0; i < num_phases; i++) {
775 if (lastphase == ahc_phase_table[i].phase)
776 break;
777 }
778 ahc_print_path(ahc, scb);
779 printf("data overrun detected %s."
780 " Tag == 0x%x.\n",
781 ahc_phase_table[i].phasemsg,
782 scb->hscb->tag);
783 ahc_print_path(ahc, scb);
784 printf("%s seen Data Phase. Length = %ld. NumSGs = %d.\n",
785 ahc_inb(ahc, SEQ_FLAGS) & DPHASE ? "Have" : "Haven't",
786 ahc_get_transfer_length(scb), scb->sg_count);
787 if (scb->sg_count > 0) {
788 for (i = 0; i < scb->sg_count; i++) {
789 printf("sg[%d] - Addr 0x%x : Length %d\n",
790 i,
791 ahc_le32toh(scb->sg_list[i].addr),
792 ahc_le32toh(scb->sg_list[i].len)
793 & AHC_SG_LEN_MASK);
794 }
795 }
796 /*
797 * Set this and it will take effect when the
798 * target does a command complete.
799 */
800 ahc_freeze_devq(ahc, scb);
801 ahc_set_transaction_status(scb, CAM_DATA_RUN_ERR);
802 ahc_freeze_scb(scb);
803 break;
804 }
805 case MKMSG_FAILED:
806 {
807 u_int scbindex;
808
809 printf("%s:%c:%d:%d: Attempt to issue message failed\n",
810 ahc_name(ahc), devinfo.channel, devinfo.target,
811 devinfo.lun);
812 scbindex = ahc_inb(ahc, SCB_TAG);
813 scb = ahc_lookup_scb(ahc, scbindex);
814 if (scb != NULL
815 && (scb->flags & SCB_RECOVERY_SCB) != 0)
816 /*
817 * Ensure that we didn't put a second instance of this
818 * SCB into the QINFIFO.
819 */
820 ahc_search_qinfifo(ahc, SCB_GET_TARGET(ahc, scb),
821 SCB_GET_CHANNEL(ahc, scb),
822 SCB_GET_LUN(scb), scb->hscb->tag,
823 ROLE_INITIATOR, /*status*/0,
824 SEARCH_REMOVE);
825 break;
826 }
827 case NO_FREE_SCB:
828 {
829 printf("%s: No free or disconnected SCBs\n", ahc_name(ahc));
830 ahc_dump_card_state(ahc);
831 panic("for safety");
832 break;
833 }
834 case SCB_MISMATCH:
835 {
836 u_int scbptr;
837
838 scbptr = ahc_inb(ahc, SCBPTR);
839 printf("Bogus TAG after DMA. SCBPTR %d, tag %d, our tag %d\n",
840 scbptr, ahc_inb(ahc, ARG_1),
841 ahc->scb_data->hscbs[scbptr].tag);
842 ahc_dump_card_state(ahc);
843 panic("for saftey");
844 break;
845 }
846 case OUT_OF_RANGE:
847 {
848 printf("%s: BTT calculation out of range\n", ahc_name(ahc));
849 printf("SAVED_SCSIID == 0x%x, SAVED_LUN == 0x%x, "
850 "ARG_1 == 0x%x ACCUM = 0x%x\n",
851 ahc_inb(ahc, SAVED_SCSIID), ahc_inb(ahc, SAVED_LUN),
852 ahc_inb(ahc, ARG_1), ahc_inb(ahc, ACCUM));
853 printf("SEQ_FLAGS == 0x%x, SCBPTR == 0x%x, BTT == 0x%x, "
854 "SINDEX == 0x%x\n, A == 0x%x\n",
855 ahc_inb(ahc, SEQ_FLAGS), ahc_inb(ahc, SCBPTR),
856 ahc_index_busy_tcl(ahc,
857 BUILD_TCL(ahc_inb(ahc, SAVED_SCSIID),
858 ahc_inb(ahc, SAVED_LUN))),
859 ahc_inb(ahc, SINDEX),
860 ahc_inb(ahc, ACCUM));
861 printf("SCSIID == 0x%x, SCB_SCSIID == 0x%x, SCB_LUN == 0x%x, "
862 "SCB_TAG == 0x%x, SCB_CONTROL == 0x%x\n",
863 ahc_inb(ahc, SCSIID), ahc_inb(ahc, SCB_SCSIID),
864 ahc_inb(ahc, SCB_LUN), ahc_inb(ahc, SCB_TAG),
865 ahc_inb(ahc, SCB_CONTROL));
866 printf("SCSIBUSL == 0x%x, SCSISIGI == 0x%x\n",
867 ahc_inb(ahc, SCSIBUSL), ahc_inb(ahc, SCSISIGI));
868 ahc_dump_card_state(ahc);
869 panic("for safety");
870 break;
871 }
872 default:
873 printf("ahc_intr: seqint, "
874 "intstat == 0x%x, scsisigi = 0x%x\n",
875 intstat, ahc_inb(ahc, SCSISIGI));
876 break;
877 }
878unpause:
879 /*
880 * The sequencer is paused immediately on
881 * a SEQINT, so we should restart it when
882 * we're done.
883 */
884 ahc_unpause(ahc);
885}
886
887void
888ahc_handle_scsiint(struct ahc_softc *ahc, u_int intstat)
889{
890 u_int scb_index;
891 u_int status0;
892 u_int status;
893 struct scb *scb;
894 char cur_channel;
895 char intr_channel;
896
897 /* Make sure the sequencer is in a safe location. */
898 ahc_clear_critical_section(ahc);
899
900 if ((ahc->features & AHC_TWIN) != 0
901 && ((ahc_inb(ahc, SBLKCTL) & SELBUSB) != 0))
902 cur_channel = 'B';
903 else
904 cur_channel = 'A';
905 intr_channel = cur_channel;
906
907 if ((ahc->features & AHC_ULTRA2) != 0)
908 status0 = ahc_inb(ahc, SSTAT0) & IOERR;
909 else
910 status0 = 0;
911 status = ahc_inb(ahc, SSTAT1) & (SELTO|SCSIRSTI|BUSFREE|SCSIPERR);
912 if (status == 0 && status0 == 0) {
913 if ((ahc->features & AHC_TWIN) != 0) {
914 /* Try the other channel */
915 ahc_outb(ahc, SBLKCTL, ahc_inb(ahc, SBLKCTL) ^ SELBUSB);
916 status = ahc_inb(ahc, SSTAT1)
917 & (SELTO|SCSIRSTI|BUSFREE|SCSIPERR);
918 intr_channel = (cur_channel == 'A') ? 'B' : 'A';
919 }
920 if (status == 0) {
921 printf("%s: Spurious SCSI interrupt\n", ahc_name(ahc));
922 ahc_outb(ahc, CLRINT, CLRSCSIINT);
923 ahc_unpause(ahc);
924 return;
925 }
926 }
927
928 scb_index = ahc_inb(ahc, SCB_TAG);
929 scb = ahc_lookup_scb(ahc, scb_index);
930 if (scb != NULL
931 && (ahc_inb(ahc, SEQ_FLAGS) & IDENTIFY_SEEN) == 0)
932 scb = NULL;
933
934 if ((ahc->features & AHC_ULTRA2) != 0
935 && (status0 & IOERR) != 0) {
936 int now_lvd;
937
938 now_lvd = ahc_inb(ahc, SBLKCTL) & ENAB40;
939 printf("%s: Transceiver State Has Changed to %s mode\n",
940 ahc_name(ahc), now_lvd ? "LVD" : "SE");
941 ahc_outb(ahc, CLRSINT0, CLRIOERR);
942 /*
943 * When transitioning to SE mode, the reset line
944 * glitches, triggering an arbitration bug in some
945 * Ultra2 controllers. This bug is cleared when we
946 * assert the reset line. Since a reset glitch has
947 * already occurred with this transition and a
948 * transceiver state change is handled just like
949 * a bus reset anyway, asserting the reset line
950 * ourselves is safe.
951 */
952 ahc_reset_channel(ahc, intr_channel,
953 /*Initiate Reset*/now_lvd == 0);
954 } else if ((status & SCSIRSTI) != 0) {
955 printf("%s: Someone reset channel %c\n",
956 ahc_name(ahc), intr_channel);
957 if (intr_channel != cur_channel)
958 ahc_outb(ahc, SBLKCTL, ahc_inb(ahc, SBLKCTL) ^ SELBUSB);
959 ahc_reset_channel(ahc, intr_channel, /*Initiate Reset*/FALSE);
960 } else if ((status & SCSIPERR) != 0) {
961 /*
962 * Determine the bus phase and queue an appropriate message.
963 * SCSIPERR is latched true as soon as a parity error
964 * occurs. If the sequencer acked the transfer that
965 * caused the parity error and the currently presented
966 * transfer on the bus has correct parity, SCSIPERR will
967 * be cleared by CLRSCSIPERR. Use this to determine if
968 * we should look at the last phase the sequencer recorded,
969 * or the current phase presented on the bus.
970 */
971 u_int mesg_out;
972 u_int curphase;
973 u_int errorphase;
974 u_int lastphase;
975 u_int scsirate;
976 u_int i;
977 u_int sstat2;
978
979 lastphase = ahc_inb(ahc, LASTPHASE);
980 curphase = ahc_inb(ahc, SCSISIGI) & PHASE_MASK;
981 sstat2 = ahc_inb(ahc, SSTAT2);
982 ahc_outb(ahc, CLRSINT1, CLRSCSIPERR);
983 /*
984 * For all phases save DATA, the sequencer won't
985 * automatically ack a byte that has a parity error
986 * in it. So the only way that the current phase
987 * could be 'data-in' is if the parity error is for
988 * an already acked byte in the data phase. During
989 * synchronous data-in transfers, we may actually
990 * ack bytes before latching the current phase in
991 * LASTPHASE, leading to the discrepancy between
992 * curphase and lastphase.
993 */
994 if ((ahc_inb(ahc, SSTAT1) & SCSIPERR) != 0
995 || curphase == P_DATAIN || curphase == P_DATAIN_DT)
996 errorphase = curphase;
997 else
998 errorphase = lastphase;
999
1000 for (i = 0; i < num_phases; i++) {
1001 if (errorphase == ahc_phase_table[i].phase)
1002 break;
1003 }
1004 mesg_out = ahc_phase_table[i].mesg_out;
1005 if (scb != NULL)
1006 ahc_print_path(ahc, scb);
1007 else
1008 printf("%s:%c:%d: ", ahc_name(ahc), intr_channel,
1009 SCSIID_TARGET(ahc, ahc_inb(ahc, SAVED_SCSIID)));
1010 scsirate = ahc_inb(ahc, SCSIRATE);
1011 printf("parity error detected %s. "
1012 "SEQADDR(0x%x) SCSIRATE(0x%x)\n",
1013 ahc_phase_table[i].phasemsg,
1014 ahc_inb(ahc, SEQADDR0) | (ahc_inb(ahc, SEQADDR1) << 8),
1015 scsirate);
1016
1017 if ((ahc->features & AHC_DT) != 0) {
1018
1019 if ((sstat2 & CRCVALERR) != 0)
1020 printf("\tCRC Value Mismatch\n");
1021 if ((sstat2 & CRCENDERR) != 0)
1022 printf("\tNo terminal CRC packet recevied\n");
1023 if ((sstat2 & CRCREQERR) != 0)
1024 printf("\tIllegal CRC packet request\n");
1025 if ((sstat2 & DUAL_EDGE_ERR) != 0)
1026 printf("\tUnexpected %sDT Data Phase\n",
1027 (scsirate & SINGLE_EDGE) ? "" : "non-");
1028 }
1029
1030 /*
1031 * We've set the hardware to assert ATN if we
1032 * get a parity error on "in" phases, so all we
1033 * need to do is stuff the message buffer with
1034 * the appropriate message. "In" phases have set
1035 * mesg_out to something other than MSG_NOP.
1036 */
1037 if (mesg_out != MSG_NOOP) {
1038 if (ahc->msg_type != MSG_TYPE_NONE)
1039 ahc->send_msg_perror = TRUE;
1040 else
1041 ahc_outb(ahc, MSG_OUT, mesg_out);
1042 }
1043 ahc_outb(ahc, CLRINT, CLRSCSIINT);
1044 ahc_unpause(ahc);
1045 } else if ((status & BUSFREE) != 0
1046 && (ahc_inb(ahc, SIMODE1) & ENBUSFREE) != 0) {
1047 u_int lastphase;
1048 u_int saved_scsiid;
1049 u_int saved_lun;
1050 u_int target;
1051 u_int initiator_role_id;
1052 char channel;
1053 int printerror;
1054
1055 /*
1056 * Clear our selection hardware as soon as possible.
1057 * We may have an entry in the waiting Q for this target,
1058 * that is affected by this busfree and we don't want to
1059 * go about selecting the target while we handle the event.
1060 */
1061 ahc_outb(ahc, SCSISEQ,
1062 ahc_inb(ahc, SCSISEQ) & (ENSELI|ENRSELI|ENAUTOATNP));
1063
1064 /*
1065 * Disable busfree interrupts and clear the busfree
1066 * interrupt status. We do this here so that several
1067 * bus transactions occur prior to clearing the SCSIINT
1068 * latch. It can take a bit for the clearing to take effect.
1069 */
1070 ahc_outb(ahc, SIMODE1, ahc_inb(ahc, SIMODE1) & ~ENBUSFREE);
1071 ahc_outb(ahc, CLRSINT1, CLRBUSFREE|CLRSCSIPERR);
1072
1073 /*
1074 * Look at what phase we were last in.
1075 * If its message out, chances are pretty good
1076 * that the busfree was in response to one of
1077 * our abort requests.
1078 */
1079 lastphase = ahc_inb(ahc, LASTPHASE);
1080 saved_scsiid = ahc_inb(ahc, SAVED_SCSIID);
1081 saved_lun = ahc_inb(ahc, SAVED_LUN);
1082 target = SCSIID_TARGET(ahc, saved_scsiid);
1083 initiator_role_id = SCSIID_OUR_ID(saved_scsiid);
1084 channel = SCSIID_CHANNEL(ahc, saved_scsiid);
1085 printerror = 1;
1086
1087 if (lastphase == P_MESGOUT) {
1088 struct ahc_devinfo devinfo;
1089 u_int tag;
1090
1091 ahc_fetch_devinfo(ahc, &devinfo);
1092 tag = SCB_LIST_NULL;
1093 if (ahc_sent_msg(ahc, AHCMSG_1B, MSG_ABORT_TAG, TRUE)
1094 || ahc_sent_msg(ahc, AHCMSG_1B, MSG_ABORT, TRUE)) {
1095 if (ahc->msgout_buf[ahc->msgout_index - 1]
1096 == MSG_ABORT_TAG)
1097 tag = scb->hscb->tag;
1098 ahc_print_path(ahc, scb);
1099 printf("SCB %d - Abort%s Completed.\n",
1100 scb->hscb->tag, tag == SCB_LIST_NULL ?
1101 "" : " Tag");
1102 ahc_abort_scbs(ahc, target, channel,
1103 saved_lun, tag,
1104 ROLE_INITIATOR,
1105 CAM_REQ_ABORTED);
1106 printerror = 0;
1107 } else if (ahc_sent_msg(ahc, AHCMSG_1B,
1108 MSG_BUS_DEV_RESET, TRUE)) {
1109 struct ahc_devinfo devinfo;
1110#ifdef __FreeBSD__
1111 /*
1112 * Don't mark the user's request for this BDR
1113 * as completing with CAM_BDR_SENT. CAM3
1114 * specifies CAM_REQ_CMP.
1115 */
1116 if (scb != NULL
1117 && scb->io_ctx->ccb_h.func_code== XPT_RESET_DEV
1118 && ahc_match_scb(ahc, scb, target, channel,
1119 CAM_LUN_WILDCARD,
1120 SCB_LIST_NULL,
1121 ROLE_INITIATOR)) {
1122 ahc_set_transaction_status(scb, CAM_REQ_CMP);
1123 }
1124#endif
1125 ahc_compile_devinfo(&devinfo,
1126 initiator_role_id,
1127 target,
1128 CAM_LUN_WILDCARD,
1129 channel,
1130 ROLE_INITIATOR);
1131 ahc_handle_devreset(ahc, &devinfo,
1132 CAM_BDR_SENT,
1133 "Bus Device Reset",
1134 /*verbose_level*/0);
1135 printerror = 0;
1136 } else if (ahc_sent_msg(ahc, AHCMSG_EXT,
1137 MSG_EXT_PPR, FALSE)) {
1138 struct ahc_initiator_tinfo *tinfo;
1132 struct tmode_tstate *tstate;
1139 struct ahc_tmode_tstate *tstate;
1133
1134 /*
1135 * PPR Rejected. Try non-ppr negotiation
1136 * and retry command.
1137 */
1138 tinfo = ahc_fetch_transinfo(ahc,
1139 devinfo.channel,
1140 devinfo.our_scsiid,
1141 devinfo.target,
1142 &tstate);
1143 tinfo->current.transport_version = 2;
1144 tinfo->goal.transport_version = 2;
1145 tinfo->goal.ppr_options = 0;
1146 ahc_qinfifo_requeue_tail(ahc, scb);
1147 printerror = 0;
1148 } else if (ahc_sent_msg(ahc, AHCMSG_EXT,
1149 MSG_EXT_WDTR, FALSE)
1150 || ahc_sent_msg(ahc, AHCMSG_EXT,
1151 MSG_EXT_SDTR, FALSE)) {
1152 /*
1153 * Negotiation Rejected. Go-async and
1154 * retry command.
1155 */
1156 ahc_set_width(ahc, &devinfo,
1157 MSG_EXT_WDTR_BUS_8_BIT,
1158 AHC_TRANS_CUR|AHC_TRANS_GOAL,
1159 /*paused*/TRUE);
1160 ahc_set_syncrate(ahc, &devinfo,
1161 /*syncrate*/NULL,
1162 /*period*/0, /*offset*/0,
1163 /*ppr_options*/0,
1164 AHC_TRANS_CUR|AHC_TRANS_GOAL,
1165 /*paused*/TRUE);
1166 ahc_qinfifo_requeue_tail(ahc, scb);
1167 printerror = 0;
1168 }
1169 }
1170 if (printerror != 0) {
1171 u_int i;
1172
1173 if (scb != NULL) {
1174 u_int tag;
1175
1176 if ((scb->hscb->control & TAG_ENB) != 0)
1177 tag = scb->hscb->tag;
1178 else
1179 tag = SCB_LIST_NULL;
1180 ahc_print_path(ahc, scb);
1181 ahc_abort_scbs(ahc, target, channel,
1182 SCB_GET_LUN(scb), tag,
1183 ROLE_INITIATOR,
1184 CAM_UNEXP_BUSFREE);
1185 } else {
1186 /*
1187 * We had not fully identified this connection,
1188 * so we cannot abort anything.
1189 */
1190 printf("%s: ", ahc_name(ahc));
1191 }
1192 for (i = 0; i < num_phases; i++) {
1193 if (lastphase == ahc_phase_table[i].phase)
1194 break;
1195 }
1196 printf("Unexpected busfree %s\n"
1197 "SEQADDR == 0x%x\n",
1198 ahc_phase_table[i].phasemsg,
1199 ahc_inb(ahc, SEQADDR0)
1200 | (ahc_inb(ahc, SEQADDR1) << 8));
1201 }
1202 ahc_clear_msg_state(ahc);
1203 ahc_outb(ahc, CLRINT, CLRSCSIINT);
1204 ahc_restart(ahc);
1205 } else if ((status & SELTO) != 0) {
1206 u_int scbptr;
1207
1208 /* Stop the selection */
1209 ahc_outb(ahc, SCSISEQ, 0);
1210
1211 /* No more pending messages */
1212 ahc_clear_msg_state(ahc);
1213
1214 /* Clear interrupt state */
1215 ahc_outb(ahc, CLRSINT1, CLRSELTIMEO|CLRBUSFREE|CLRSCSIPERR);
1216
1217 /*
1218 * Although the driver does not care about the
1219 * 'Selection in Progress' status bit, the busy
1220 * LED does. SELINGO is only cleared by a sucessful
1221 * selection, so we must manually clear it to insure
1222 * the LED turns off just incase no future successful
1223 * selections occur (e.g. no devices on the bus).
1224 */
1225 ahc_outb(ahc, CLRSINT0, CLRSELINGO);
1226
1227 scbptr = ahc_inb(ahc, WAITING_SCBH);
1228 ahc_outb(ahc, SCBPTR, scbptr);
1229 scb_index = ahc_inb(ahc, SCB_TAG);
1230
1231 scb = ahc_lookup_scb(ahc, scb_index);
1232 if (scb == NULL) {
1233 printf("%s: ahc_intr - referenced scb not "
1234 "valid during SELTO scb(%d, %d)\n",
1235 ahc_name(ahc), scbptr, scb_index);
1236 } else {
1237 ahc_set_transaction_status(scb, CAM_SEL_TIMEOUT);
1238 ahc_freeze_devq(ahc, scb);
1239 }
1240 ahc_outb(ahc, CLRINT, CLRSCSIINT);
1241 ahc_restart(ahc);
1242 } else {
1243 printf("%s: Missing case in ahc_handle_scsiint. status = %x\n",
1244 ahc_name(ahc), status);
1245 ahc_outb(ahc, CLRINT, CLRSCSIINT);
1246 }
1247}
1248
1249#define AHC_MAX_STEPS 2000
1250void
1251ahc_clear_critical_section(struct ahc_softc *ahc)
1252{
1253 int stepping;
1254 int steps;
1255 u_int simode0;
1256 u_int simode1;
1257
1258 if (ahc->num_critical_sections == 0)
1259 return;
1260
1261 stepping = FALSE;
1262 steps = 0;
1263 simode0 = 0;
1264 simode1 = 0;
1265 for (;;) {
1266 struct cs *cs;
1267 u_int seqaddr;
1268 u_int i;
1269
1270 seqaddr = ahc_inb(ahc, SEQADDR0)
1271 | (ahc_inb(ahc, SEQADDR1) << 8);
1272
1273 cs = ahc->critical_sections;
1274 for (i = 0; i < ahc->num_critical_sections; i++, cs++) {
1275
1276 if (cs->begin < seqaddr && cs->end >= seqaddr)
1277 break;
1278 }
1279
1280 if (i == ahc->num_critical_sections)
1281 break;
1282
1283 if (steps > AHC_MAX_STEPS) {
1284 printf("%s: Infinite loop in critical section\n",
1285 ahc_name(ahc));
1286 ahc_dump_card_state(ahc);
1287 panic("critical section loop");
1288 }
1289
1290 steps++;
1291 if (stepping == FALSE) {
1292
1293 /*
1294 * Disable all interrupt sources so that the
1295 * sequencer will not be stuck by a pausing
1296 * interrupt condition while we attempt to
1297 * leave a critical section.
1298 */
1299 simode0 = ahc_inb(ahc, SIMODE0);
1300 ahc_outb(ahc, SIMODE0, 0);
1301 simode1 = ahc_inb(ahc, SIMODE1);
1302 ahc_outb(ahc, SIMODE1, 0);
1303 ahc_outb(ahc, CLRINT, CLRSCSIINT);
1304 ahc_outb(ahc, SEQCTL, ahc_inb(ahc, SEQCTL) | STEP);
1305 stepping = TRUE;
1306 }
1307 ahc_outb(ahc, HCNTRL, ahc->unpause);
1308 do {
1309 ahc_delay(200);
1310 } while (!ahc_is_paused(ahc));
1311 }
1312 if (stepping) {
1313 ahc_outb(ahc, SIMODE0, simode0);
1314 ahc_outb(ahc, SIMODE1, simode1);
1315 ahc_outb(ahc, SEQCTL, ahc_inb(ahc, SEQCTL) & ~STEP);
1316 }
1317}
1318
1319/*
1320 * Clear any pending interrupt status.
1321 */
1322void
1323ahc_clear_intstat(struct ahc_softc *ahc)
1324{
1325 /* Clear any interrupt conditions this may have caused */
1326 ahc_outb(ahc, CLRSINT1, CLRSELTIMEO|CLRATNO|CLRSCSIRSTI
1327 |CLRBUSFREE|CLRSCSIPERR|CLRPHASECHG|
1328 CLRREQINIT);
1329 ahc_outb(ahc, CLRSINT0, CLRSELDO|CLRSELDI|CLRSELINGO);
1330 ahc_outb(ahc, CLRINT, CLRSCSIINT);
1331}
1332
1333/**************************** Debugging Routines ******************************/
1334void
1335ahc_print_scb(struct scb *scb)
1336{
1337 int i;
1338
1339 struct hardware_scb *hscb = scb->hscb;
1340
1341 printf("scb:%p control:0x%x scsiid:0x%x lun:%d cdb_len:%d\n",
1342 scb,
1343 hscb->control,
1344 hscb->scsiid,
1345 hscb->lun,
1346 hscb->cdb_len);
1347 i = 0;
1348 printf("Shared Data: %#02x %#02x %#02x %#02x\n",
1349 hscb->shared_data.cdb[i++],
1350 hscb->shared_data.cdb[i++],
1351 hscb->shared_data.cdb[i++],
1352 hscb->shared_data.cdb[i++]);
1353 printf(" %#02x %#02x %#02x %#02x\n",
1354 hscb->shared_data.cdb[i++],
1355 hscb->shared_data.cdb[i++],
1356 hscb->shared_data.cdb[i++],
1357 hscb->shared_data.cdb[i++]);
1358 printf(" %#02x %#02x %#02x %#02x\n",
1359 hscb->shared_data.cdb[i++],
1360 hscb->shared_data.cdb[i++],
1361 hscb->shared_data.cdb[i++],
1362 hscb->shared_data.cdb[i++]);
1363 printf(" dataptr:%#x datacnt:%#x sgptr:%#x tag:%#x\n",
1364 ahc_le32toh(hscb->dataptr),
1365 ahc_le32toh(hscb->datacnt),
1366 ahc_le32toh(hscb->sgptr),
1367 hscb->tag);
1368 if (scb->sg_count > 0) {
1369 for (i = 0; i < scb->sg_count; i++) {
1370 printf("sg[%d] - Addr 0x%x : Length %d\n",
1371 i,
1372 ahc_le32toh(scb->sg_list[i].addr),
1373 ahc_le32toh(scb->sg_list[i].len));
1374 }
1375 }
1376}
1377
1378/************************* Transfer Negotiation *******************************/
1379/*
1380 * Allocate per target mode instance (ID we respond to as a target)
1381 * transfer negotiation data structures.
1382 */
1140
1141 /*
1142 * PPR Rejected. Try non-ppr negotiation
1143 * and retry command.
1144 */
1145 tinfo = ahc_fetch_transinfo(ahc,
1146 devinfo.channel,
1147 devinfo.our_scsiid,
1148 devinfo.target,
1149 &tstate);
1150 tinfo->current.transport_version = 2;
1151 tinfo->goal.transport_version = 2;
1152 tinfo->goal.ppr_options = 0;
1153 ahc_qinfifo_requeue_tail(ahc, scb);
1154 printerror = 0;
1155 } else if (ahc_sent_msg(ahc, AHCMSG_EXT,
1156 MSG_EXT_WDTR, FALSE)
1157 || ahc_sent_msg(ahc, AHCMSG_EXT,
1158 MSG_EXT_SDTR, FALSE)) {
1159 /*
1160 * Negotiation Rejected. Go-async and
1161 * retry command.
1162 */
1163 ahc_set_width(ahc, &devinfo,
1164 MSG_EXT_WDTR_BUS_8_BIT,
1165 AHC_TRANS_CUR|AHC_TRANS_GOAL,
1166 /*paused*/TRUE);
1167 ahc_set_syncrate(ahc, &devinfo,
1168 /*syncrate*/NULL,
1169 /*period*/0, /*offset*/0,
1170 /*ppr_options*/0,
1171 AHC_TRANS_CUR|AHC_TRANS_GOAL,
1172 /*paused*/TRUE);
1173 ahc_qinfifo_requeue_tail(ahc, scb);
1174 printerror = 0;
1175 }
1176 }
1177 if (printerror != 0) {
1178 u_int i;
1179
1180 if (scb != NULL) {
1181 u_int tag;
1182
1183 if ((scb->hscb->control & TAG_ENB) != 0)
1184 tag = scb->hscb->tag;
1185 else
1186 tag = SCB_LIST_NULL;
1187 ahc_print_path(ahc, scb);
1188 ahc_abort_scbs(ahc, target, channel,
1189 SCB_GET_LUN(scb), tag,
1190 ROLE_INITIATOR,
1191 CAM_UNEXP_BUSFREE);
1192 } else {
1193 /*
1194 * We had not fully identified this connection,
1195 * so we cannot abort anything.
1196 */
1197 printf("%s: ", ahc_name(ahc));
1198 }
1199 for (i = 0; i < num_phases; i++) {
1200 if (lastphase == ahc_phase_table[i].phase)
1201 break;
1202 }
1203 printf("Unexpected busfree %s\n"
1204 "SEQADDR == 0x%x\n",
1205 ahc_phase_table[i].phasemsg,
1206 ahc_inb(ahc, SEQADDR0)
1207 | (ahc_inb(ahc, SEQADDR1) << 8));
1208 }
1209 ahc_clear_msg_state(ahc);
1210 ahc_outb(ahc, CLRINT, CLRSCSIINT);
1211 ahc_restart(ahc);
1212 } else if ((status & SELTO) != 0) {
1213 u_int scbptr;
1214
1215 /* Stop the selection */
1216 ahc_outb(ahc, SCSISEQ, 0);
1217
1218 /* No more pending messages */
1219 ahc_clear_msg_state(ahc);
1220
1221 /* Clear interrupt state */
1222 ahc_outb(ahc, CLRSINT1, CLRSELTIMEO|CLRBUSFREE|CLRSCSIPERR);
1223
1224 /*
1225 * Although the driver does not care about the
1226 * 'Selection in Progress' status bit, the busy
1227 * LED does. SELINGO is only cleared by a sucessful
1228 * selection, so we must manually clear it to insure
1229 * the LED turns off just incase no future successful
1230 * selections occur (e.g. no devices on the bus).
1231 */
1232 ahc_outb(ahc, CLRSINT0, CLRSELINGO);
1233
1234 scbptr = ahc_inb(ahc, WAITING_SCBH);
1235 ahc_outb(ahc, SCBPTR, scbptr);
1236 scb_index = ahc_inb(ahc, SCB_TAG);
1237
1238 scb = ahc_lookup_scb(ahc, scb_index);
1239 if (scb == NULL) {
1240 printf("%s: ahc_intr - referenced scb not "
1241 "valid during SELTO scb(%d, %d)\n",
1242 ahc_name(ahc), scbptr, scb_index);
1243 } else {
1244 ahc_set_transaction_status(scb, CAM_SEL_TIMEOUT);
1245 ahc_freeze_devq(ahc, scb);
1246 }
1247 ahc_outb(ahc, CLRINT, CLRSCSIINT);
1248 ahc_restart(ahc);
1249 } else {
1250 printf("%s: Missing case in ahc_handle_scsiint. status = %x\n",
1251 ahc_name(ahc), status);
1252 ahc_outb(ahc, CLRINT, CLRSCSIINT);
1253 }
1254}
1255
1256#define AHC_MAX_STEPS 2000
1257void
1258ahc_clear_critical_section(struct ahc_softc *ahc)
1259{
1260 int stepping;
1261 int steps;
1262 u_int simode0;
1263 u_int simode1;
1264
1265 if (ahc->num_critical_sections == 0)
1266 return;
1267
1268 stepping = FALSE;
1269 steps = 0;
1270 simode0 = 0;
1271 simode1 = 0;
1272 for (;;) {
1273 struct cs *cs;
1274 u_int seqaddr;
1275 u_int i;
1276
1277 seqaddr = ahc_inb(ahc, SEQADDR0)
1278 | (ahc_inb(ahc, SEQADDR1) << 8);
1279
1280 cs = ahc->critical_sections;
1281 for (i = 0; i < ahc->num_critical_sections; i++, cs++) {
1282
1283 if (cs->begin < seqaddr && cs->end >= seqaddr)
1284 break;
1285 }
1286
1287 if (i == ahc->num_critical_sections)
1288 break;
1289
1290 if (steps > AHC_MAX_STEPS) {
1291 printf("%s: Infinite loop in critical section\n",
1292 ahc_name(ahc));
1293 ahc_dump_card_state(ahc);
1294 panic("critical section loop");
1295 }
1296
1297 steps++;
1298 if (stepping == FALSE) {
1299
1300 /*
1301 * Disable all interrupt sources so that the
1302 * sequencer will not be stuck by a pausing
1303 * interrupt condition while we attempt to
1304 * leave a critical section.
1305 */
1306 simode0 = ahc_inb(ahc, SIMODE0);
1307 ahc_outb(ahc, SIMODE0, 0);
1308 simode1 = ahc_inb(ahc, SIMODE1);
1309 ahc_outb(ahc, SIMODE1, 0);
1310 ahc_outb(ahc, CLRINT, CLRSCSIINT);
1311 ahc_outb(ahc, SEQCTL, ahc_inb(ahc, SEQCTL) | STEP);
1312 stepping = TRUE;
1313 }
1314 ahc_outb(ahc, HCNTRL, ahc->unpause);
1315 do {
1316 ahc_delay(200);
1317 } while (!ahc_is_paused(ahc));
1318 }
1319 if (stepping) {
1320 ahc_outb(ahc, SIMODE0, simode0);
1321 ahc_outb(ahc, SIMODE1, simode1);
1322 ahc_outb(ahc, SEQCTL, ahc_inb(ahc, SEQCTL) & ~STEP);
1323 }
1324}
1325
1326/*
1327 * Clear any pending interrupt status.
1328 */
1329void
1330ahc_clear_intstat(struct ahc_softc *ahc)
1331{
1332 /* Clear any interrupt conditions this may have caused */
1333 ahc_outb(ahc, CLRSINT1, CLRSELTIMEO|CLRATNO|CLRSCSIRSTI
1334 |CLRBUSFREE|CLRSCSIPERR|CLRPHASECHG|
1335 CLRREQINIT);
1336 ahc_outb(ahc, CLRSINT0, CLRSELDO|CLRSELDI|CLRSELINGO);
1337 ahc_outb(ahc, CLRINT, CLRSCSIINT);
1338}
1339
1340/**************************** Debugging Routines ******************************/
1341void
1342ahc_print_scb(struct scb *scb)
1343{
1344 int i;
1345
1346 struct hardware_scb *hscb = scb->hscb;
1347
1348 printf("scb:%p control:0x%x scsiid:0x%x lun:%d cdb_len:%d\n",
1349 scb,
1350 hscb->control,
1351 hscb->scsiid,
1352 hscb->lun,
1353 hscb->cdb_len);
1354 i = 0;
1355 printf("Shared Data: %#02x %#02x %#02x %#02x\n",
1356 hscb->shared_data.cdb[i++],
1357 hscb->shared_data.cdb[i++],
1358 hscb->shared_data.cdb[i++],
1359 hscb->shared_data.cdb[i++]);
1360 printf(" %#02x %#02x %#02x %#02x\n",
1361 hscb->shared_data.cdb[i++],
1362 hscb->shared_data.cdb[i++],
1363 hscb->shared_data.cdb[i++],
1364 hscb->shared_data.cdb[i++]);
1365 printf(" %#02x %#02x %#02x %#02x\n",
1366 hscb->shared_data.cdb[i++],
1367 hscb->shared_data.cdb[i++],
1368 hscb->shared_data.cdb[i++],
1369 hscb->shared_data.cdb[i++]);
1370 printf(" dataptr:%#x datacnt:%#x sgptr:%#x tag:%#x\n",
1371 ahc_le32toh(hscb->dataptr),
1372 ahc_le32toh(hscb->datacnt),
1373 ahc_le32toh(hscb->sgptr),
1374 hscb->tag);
1375 if (scb->sg_count > 0) {
1376 for (i = 0; i < scb->sg_count; i++) {
1377 printf("sg[%d] - Addr 0x%x : Length %d\n",
1378 i,
1379 ahc_le32toh(scb->sg_list[i].addr),
1380 ahc_le32toh(scb->sg_list[i].len));
1381 }
1382 }
1383}
1384
1385/************************* Transfer Negotiation *******************************/
1386/*
1387 * Allocate per target mode instance (ID we respond to as a target)
1388 * transfer negotiation data structures.
1389 */
1383static struct tmode_tstate *
1390static struct ahc_tmode_tstate *
1384ahc_alloc_tstate(struct ahc_softc *ahc, u_int scsi_id, char channel)
1385{
1391ahc_alloc_tstate(struct ahc_softc *ahc, u_int scsi_id, char channel)
1392{
1386 struct tmode_tstate *master_tstate;
1387 struct tmode_tstate *tstate;
1393 struct ahc_tmode_tstate *master_tstate;
1394 struct ahc_tmode_tstate *tstate;
1388 int i;
1389
1390 master_tstate = ahc->enabled_targets[ahc->our_id];
1391 if (channel == 'B') {
1392 scsi_id += 8;
1393 master_tstate = ahc->enabled_targets[ahc->our_id_b + 8];
1394 }
1395 if (ahc->enabled_targets[scsi_id] != NULL
1396 && ahc->enabled_targets[scsi_id] != master_tstate)
1397 panic("%s: ahc_alloc_tstate - Target already allocated",
1398 ahc_name(ahc));
1399 tstate = malloc(sizeof(*tstate), M_DEVBUF, M_NOWAIT);
1400 if (tstate == NULL)
1401 return (NULL);
1402
1403 /*
1404 * If we have allocated a master tstate, copy user settings from
1405 * the master tstate (taken from SRAM or the EEPROM) for this
1406 * channel, but reset our current and goal settings to async/narrow
1407 * until an initiator talks to us.
1408 */
1409 if (master_tstate != NULL) {
1410 memcpy(tstate, master_tstate, sizeof(*tstate));
1411 memset(tstate->enabled_luns, 0, sizeof(tstate->enabled_luns));
1412 tstate->ultraenb = 0;
1413 for (i = 0; i < 16; i++) {
1414 memset(&tstate->transinfo[i].current, 0,
1415 sizeof(tstate->transinfo[i].current));
1416 memset(&tstate->transinfo[i].goal, 0,
1417 sizeof(tstate->transinfo[i].goal));
1418 }
1419 } else
1420 memset(tstate, 0, sizeof(*tstate));
1421 ahc->enabled_targets[scsi_id] = tstate;
1422 return (tstate);
1423}
1424
1425#ifdef AHC_TARGET_MODE
1426/*
1427 * Free per target mode instance (ID we respond to as a target)
1428 * transfer negotiation data structures.
1429 */
1430static void
1431ahc_free_tstate(struct ahc_softc *ahc, u_int scsi_id, char channel, int force)
1432{
1395 int i;
1396
1397 master_tstate = ahc->enabled_targets[ahc->our_id];
1398 if (channel == 'B') {
1399 scsi_id += 8;
1400 master_tstate = ahc->enabled_targets[ahc->our_id_b + 8];
1401 }
1402 if (ahc->enabled_targets[scsi_id] != NULL
1403 && ahc->enabled_targets[scsi_id] != master_tstate)
1404 panic("%s: ahc_alloc_tstate - Target already allocated",
1405 ahc_name(ahc));
1406 tstate = malloc(sizeof(*tstate), M_DEVBUF, M_NOWAIT);
1407 if (tstate == NULL)
1408 return (NULL);
1409
1410 /*
1411 * If we have allocated a master tstate, copy user settings from
1412 * the master tstate (taken from SRAM or the EEPROM) for this
1413 * channel, but reset our current and goal settings to async/narrow
1414 * until an initiator talks to us.
1415 */
1416 if (master_tstate != NULL) {
1417 memcpy(tstate, master_tstate, sizeof(*tstate));
1418 memset(tstate->enabled_luns, 0, sizeof(tstate->enabled_luns));
1419 tstate->ultraenb = 0;
1420 for (i = 0; i < 16; i++) {
1421 memset(&tstate->transinfo[i].current, 0,
1422 sizeof(tstate->transinfo[i].current));
1423 memset(&tstate->transinfo[i].goal, 0,
1424 sizeof(tstate->transinfo[i].goal));
1425 }
1426 } else
1427 memset(tstate, 0, sizeof(*tstate));
1428 ahc->enabled_targets[scsi_id] = tstate;
1429 return (tstate);
1430}
1431
1432#ifdef AHC_TARGET_MODE
1433/*
1434 * Free per target mode instance (ID we respond to as a target)
1435 * transfer negotiation data structures.
1436 */
1437static void
1438ahc_free_tstate(struct ahc_softc *ahc, u_int scsi_id, char channel, int force)
1439{
1433 struct tmode_tstate *tstate;
1440 struct ahc_tmode_tstate *tstate;
1434
1441
1435 /* Don't clean up the entry for our initiator role */
1436 if ((ahc->flags & AHC_INITIATORROLE) != 0
1437 && ((channel == 'B' && scsi_id == ahc->our_id_b)
1442 /*
1443 * Don't clean up our "master" tstate.
1444 * It has our default user settings.
1445 */
1446 if (((channel == 'B' && scsi_id == ahc->our_id_b)
1438 || (channel == 'A' && scsi_id == ahc->our_id))
1439 && force == FALSE)
1440 return;
1441
1442 if (channel == 'B')
1443 scsi_id += 8;
1444 tstate = ahc->enabled_targets[scsi_id];
1445 if (tstate != NULL)
1446 free(tstate, M_DEVBUF);
1447 ahc->enabled_targets[scsi_id] = NULL;
1448}
1449#endif
1450
1451/*
1452 * Called when we have an active connection to a target on the bus,
1453 * this function finds the nearest syncrate to the input period limited
1454 * by the capabilities of the bus connectivity of and sync settings for
1455 * the target.
1456 */
1457struct ahc_syncrate *
1458ahc_devlimited_syncrate(struct ahc_softc *ahc,
1459 struct ahc_initiator_tinfo *tinfo,
1460 u_int *period, u_int *ppr_options, role_t role) {
1461 struct ahc_transinfo *transinfo;
1462 u_int maxsync;
1463
1464 if ((ahc->features & AHC_ULTRA2) != 0) {
1465 if ((ahc_inb(ahc, SBLKCTL) & ENAB40) != 0
1466 && (ahc_inb(ahc, SSTAT2) & EXP_ACTIVE) == 0) {
1467 maxsync = AHC_SYNCRATE_DT;
1468 } else {
1469 maxsync = AHC_SYNCRATE_ULTRA;
1470 /* Can't do DT on an SE bus */
1471 *ppr_options &= ~MSG_EXT_PPR_DT_REQ;
1472 }
1473 } else if ((ahc->features & AHC_ULTRA) != 0
1474 && (ahc->flags & AHC_ULTRA_DISABLED) == 0) {
1475 maxsync = AHC_SYNCRATE_ULTRA;
1476 } else {
1477 maxsync = AHC_SYNCRATE_FAST;
1478 }
1479 /*
1480 * Never allow a value higher than our current goal
1481 * period otherwise we may allow a target initiated
1482 * negotiation to go above the limit as set by the
1483 * user. In the case of an initiator initiated
1484 * sync negotiation, we limit based on the user
1485 * setting. This allows the system to still accept
1486 * incoming negotiations even if target initiated
1487 * negotiation is not performed.
1488 */
1489 if (role == ROLE_TARGET)
1490 transinfo = &tinfo->user;
1491 else
1492 transinfo = &tinfo->goal;
1493 *ppr_options &= transinfo->ppr_options;
1494 if (transinfo->period == 0) {
1495 *period = 0;
1496 *ppr_options = 0;
1497 return (NULL);
1498 }
1499 *period = MAX(*period, transinfo->period);
1500 return (ahc_find_syncrate(ahc, period, ppr_options, maxsync));
1501}
1502
1503/*
1504 * Look up the valid period to SCSIRATE conversion in our table.
1505 * Return the period and offset that should be sent to the target
1506 * if this was the beginning of an SDTR.
1507 */
1508struct ahc_syncrate *
1509ahc_find_syncrate(struct ahc_softc *ahc, u_int *period,
1510 u_int *ppr_options, u_int maxsync)
1511{
1512 struct ahc_syncrate *syncrate;
1513
1514 if ((ahc->features & AHC_DT) == 0)
1515 *ppr_options &= ~MSG_EXT_PPR_DT_REQ;
1516
1517 for (syncrate = &ahc_syncrates[maxsync];
1518 syncrate->rate != NULL;
1519 syncrate++) {
1520
1521 /*
1522 * The Ultra2 table doesn't go as low
1523 * as for the Fast/Ultra cards.
1524 */
1525 if ((ahc->features & AHC_ULTRA2) != 0
1526 && (syncrate->sxfr_u2 == 0))
1527 break;
1528
1529 /* Skip any DT entries if DT is not available */
1530 if ((*ppr_options & MSG_EXT_PPR_DT_REQ) == 0
1531 && (syncrate->sxfr_u2 & DT_SXFR) != 0)
1532 continue;
1533
1534 if (*period <= syncrate->period) {
1535 /*
1536 * When responding to a target that requests
1537 * sync, the requested rate may fall between
1538 * two rates that we can output, but still be
1539 * a rate that we can receive. Because of this,
1540 * we want to respond to the target with
1541 * the same rate that it sent to us even
1542 * if the period we use to send data to it
1543 * is lower. Only lower the response period
1544 * if we must.
1545 */
1546 if (syncrate == &ahc_syncrates[maxsync])
1547 *period = syncrate->period;
1548
1549 /*
1550 * At some speeds, we only support
1551 * ST transfers.
1552 */
1553 if ((syncrate->sxfr_u2 & ST_SXFR) != 0)
1554 *ppr_options &= ~MSG_EXT_PPR_DT_REQ;
1555 break;
1556 }
1557 }
1558
1559 if ((*period == 0)
1560 || (syncrate->rate == NULL)
1561 || ((ahc->features & AHC_ULTRA2) != 0
1562 && (syncrate->sxfr_u2 == 0))) {
1563 /* Use asynchronous transfers. */
1564 *period = 0;
1565 syncrate = NULL;
1566 *ppr_options &= ~MSG_EXT_PPR_DT_REQ;
1567 }
1568 return (syncrate);
1569}
1570
1571/*
1572 * Convert from an entry in our syncrate table to the SCSI equivalent
1573 * sync "period" factor.
1574 */
1575u_int
1576ahc_find_period(struct ahc_softc *ahc, u_int scsirate, u_int maxsync)
1577{
1578 struct ahc_syncrate *syncrate;
1579
1580 if ((ahc->features & AHC_ULTRA2) != 0)
1581 scsirate &= SXFR_ULTRA2;
1582 else
1583 scsirate &= SXFR;
1584
1585 syncrate = &ahc_syncrates[maxsync];
1586 while (syncrate->rate != NULL) {
1587
1588 if ((ahc->features & AHC_ULTRA2) != 0) {
1589 if (syncrate->sxfr_u2 == 0)
1590 break;
1591 else if (scsirate == (syncrate->sxfr_u2 & SXFR_ULTRA2))
1592 return (syncrate->period);
1593 } else if (scsirate == (syncrate->sxfr & SXFR)) {
1594 return (syncrate->period);
1595 }
1596 syncrate++;
1597 }
1598 return (0); /* async */
1599}
1600
1601/*
1602 * Truncate the given synchronous offset to a value the
1603 * current adapter type and syncrate are capable of.
1604 */
1605void
1606ahc_validate_offset(struct ahc_softc *ahc,
1607 struct ahc_initiator_tinfo *tinfo,
1608 struct ahc_syncrate *syncrate,
1609 u_int *offset, int wide, role_t role)
1610{
1611 u_int maxoffset;
1612
1613 /* Limit offset to what we can do */
1614 if (syncrate == NULL) {
1615 maxoffset = 0;
1616 } else if ((ahc->features & AHC_ULTRA2) != 0) {
1617 maxoffset = MAX_OFFSET_ULTRA2;
1618 } else {
1619 if (wide)
1620 maxoffset = MAX_OFFSET_16BIT;
1621 else
1622 maxoffset = MAX_OFFSET_8BIT;
1623 }
1624 *offset = MIN(*offset, maxoffset);
1625 if (tinfo != NULL) {
1626 if (role == ROLE_TARGET)
1627 *offset = MIN(*offset, tinfo->user.offset);
1628 else
1629 *offset = MIN(*offset, tinfo->goal.offset);
1630 }
1631}
1632
1633/*
1634 * Truncate the given transfer width parameter to a value the
1635 * current adapter type is capable of.
1636 */
1637void
1638ahc_validate_width(struct ahc_softc *ahc, struct ahc_initiator_tinfo *tinfo,
1639 u_int *bus_width, role_t role)
1640{
1641 switch (*bus_width) {
1642 default:
1643 if (ahc->features & AHC_WIDE) {
1644 /* Respond Wide */
1645 *bus_width = MSG_EXT_WDTR_BUS_16_BIT;
1646 break;
1647 }
1648 /* FALLTHROUGH */
1649 case MSG_EXT_WDTR_BUS_8_BIT:
1650 *bus_width = MSG_EXT_WDTR_BUS_8_BIT;
1651 break;
1652 }
1653 if (tinfo != NULL) {
1654 if (role == ROLE_TARGET)
1655 *bus_width = MIN(tinfo->user.width, *bus_width);
1656 else
1657 *bus_width = MIN(tinfo->goal.width, *bus_width);
1658 }
1659}
1660
1661/*
1662 * Update the bitmask of targets for which the controller should
1663 * negotiate with at the next convenient oportunity. This currently
1664 * means the next time we send the initial identify messages for
1665 * a new transaction.
1666 */
1447 || (channel == 'A' && scsi_id == ahc->our_id))
1448 && force == FALSE)
1449 return;
1450
1451 if (channel == 'B')
1452 scsi_id += 8;
1453 tstate = ahc->enabled_targets[scsi_id];
1454 if (tstate != NULL)
1455 free(tstate, M_DEVBUF);
1456 ahc->enabled_targets[scsi_id] = NULL;
1457}
1458#endif
1459
1460/*
1461 * Called when we have an active connection to a target on the bus,
1462 * this function finds the nearest syncrate to the input period limited
1463 * by the capabilities of the bus connectivity of and sync settings for
1464 * the target.
1465 */
1466struct ahc_syncrate *
1467ahc_devlimited_syncrate(struct ahc_softc *ahc,
1468 struct ahc_initiator_tinfo *tinfo,
1469 u_int *period, u_int *ppr_options, role_t role) {
1470 struct ahc_transinfo *transinfo;
1471 u_int maxsync;
1472
1473 if ((ahc->features & AHC_ULTRA2) != 0) {
1474 if ((ahc_inb(ahc, SBLKCTL) & ENAB40) != 0
1475 && (ahc_inb(ahc, SSTAT2) & EXP_ACTIVE) == 0) {
1476 maxsync = AHC_SYNCRATE_DT;
1477 } else {
1478 maxsync = AHC_SYNCRATE_ULTRA;
1479 /* Can't do DT on an SE bus */
1480 *ppr_options &= ~MSG_EXT_PPR_DT_REQ;
1481 }
1482 } else if ((ahc->features & AHC_ULTRA) != 0
1483 && (ahc->flags & AHC_ULTRA_DISABLED) == 0) {
1484 maxsync = AHC_SYNCRATE_ULTRA;
1485 } else {
1486 maxsync = AHC_SYNCRATE_FAST;
1487 }
1488 /*
1489 * Never allow a value higher than our current goal
1490 * period otherwise we may allow a target initiated
1491 * negotiation to go above the limit as set by the
1492 * user. In the case of an initiator initiated
1493 * sync negotiation, we limit based on the user
1494 * setting. This allows the system to still accept
1495 * incoming negotiations even if target initiated
1496 * negotiation is not performed.
1497 */
1498 if (role == ROLE_TARGET)
1499 transinfo = &tinfo->user;
1500 else
1501 transinfo = &tinfo->goal;
1502 *ppr_options &= transinfo->ppr_options;
1503 if (transinfo->period == 0) {
1504 *period = 0;
1505 *ppr_options = 0;
1506 return (NULL);
1507 }
1508 *period = MAX(*period, transinfo->period);
1509 return (ahc_find_syncrate(ahc, period, ppr_options, maxsync));
1510}
1511
1512/*
1513 * Look up the valid period to SCSIRATE conversion in our table.
1514 * Return the period and offset that should be sent to the target
1515 * if this was the beginning of an SDTR.
1516 */
1517struct ahc_syncrate *
1518ahc_find_syncrate(struct ahc_softc *ahc, u_int *period,
1519 u_int *ppr_options, u_int maxsync)
1520{
1521 struct ahc_syncrate *syncrate;
1522
1523 if ((ahc->features & AHC_DT) == 0)
1524 *ppr_options &= ~MSG_EXT_PPR_DT_REQ;
1525
1526 for (syncrate = &ahc_syncrates[maxsync];
1527 syncrate->rate != NULL;
1528 syncrate++) {
1529
1530 /*
1531 * The Ultra2 table doesn't go as low
1532 * as for the Fast/Ultra cards.
1533 */
1534 if ((ahc->features & AHC_ULTRA2) != 0
1535 && (syncrate->sxfr_u2 == 0))
1536 break;
1537
1538 /* Skip any DT entries if DT is not available */
1539 if ((*ppr_options & MSG_EXT_PPR_DT_REQ) == 0
1540 && (syncrate->sxfr_u2 & DT_SXFR) != 0)
1541 continue;
1542
1543 if (*period <= syncrate->period) {
1544 /*
1545 * When responding to a target that requests
1546 * sync, the requested rate may fall between
1547 * two rates that we can output, but still be
1548 * a rate that we can receive. Because of this,
1549 * we want to respond to the target with
1550 * the same rate that it sent to us even
1551 * if the period we use to send data to it
1552 * is lower. Only lower the response period
1553 * if we must.
1554 */
1555 if (syncrate == &ahc_syncrates[maxsync])
1556 *period = syncrate->period;
1557
1558 /*
1559 * At some speeds, we only support
1560 * ST transfers.
1561 */
1562 if ((syncrate->sxfr_u2 & ST_SXFR) != 0)
1563 *ppr_options &= ~MSG_EXT_PPR_DT_REQ;
1564 break;
1565 }
1566 }
1567
1568 if ((*period == 0)
1569 || (syncrate->rate == NULL)
1570 || ((ahc->features & AHC_ULTRA2) != 0
1571 && (syncrate->sxfr_u2 == 0))) {
1572 /* Use asynchronous transfers. */
1573 *period = 0;
1574 syncrate = NULL;
1575 *ppr_options &= ~MSG_EXT_PPR_DT_REQ;
1576 }
1577 return (syncrate);
1578}
1579
1580/*
1581 * Convert from an entry in our syncrate table to the SCSI equivalent
1582 * sync "period" factor.
1583 */
1584u_int
1585ahc_find_period(struct ahc_softc *ahc, u_int scsirate, u_int maxsync)
1586{
1587 struct ahc_syncrate *syncrate;
1588
1589 if ((ahc->features & AHC_ULTRA2) != 0)
1590 scsirate &= SXFR_ULTRA2;
1591 else
1592 scsirate &= SXFR;
1593
1594 syncrate = &ahc_syncrates[maxsync];
1595 while (syncrate->rate != NULL) {
1596
1597 if ((ahc->features & AHC_ULTRA2) != 0) {
1598 if (syncrate->sxfr_u2 == 0)
1599 break;
1600 else if (scsirate == (syncrate->sxfr_u2 & SXFR_ULTRA2))
1601 return (syncrate->period);
1602 } else if (scsirate == (syncrate->sxfr & SXFR)) {
1603 return (syncrate->period);
1604 }
1605 syncrate++;
1606 }
1607 return (0); /* async */
1608}
1609
1610/*
1611 * Truncate the given synchronous offset to a value the
1612 * current adapter type and syncrate are capable of.
1613 */
1614void
1615ahc_validate_offset(struct ahc_softc *ahc,
1616 struct ahc_initiator_tinfo *tinfo,
1617 struct ahc_syncrate *syncrate,
1618 u_int *offset, int wide, role_t role)
1619{
1620 u_int maxoffset;
1621
1622 /* Limit offset to what we can do */
1623 if (syncrate == NULL) {
1624 maxoffset = 0;
1625 } else if ((ahc->features & AHC_ULTRA2) != 0) {
1626 maxoffset = MAX_OFFSET_ULTRA2;
1627 } else {
1628 if (wide)
1629 maxoffset = MAX_OFFSET_16BIT;
1630 else
1631 maxoffset = MAX_OFFSET_8BIT;
1632 }
1633 *offset = MIN(*offset, maxoffset);
1634 if (tinfo != NULL) {
1635 if (role == ROLE_TARGET)
1636 *offset = MIN(*offset, tinfo->user.offset);
1637 else
1638 *offset = MIN(*offset, tinfo->goal.offset);
1639 }
1640}
1641
1642/*
1643 * Truncate the given transfer width parameter to a value the
1644 * current adapter type is capable of.
1645 */
1646void
1647ahc_validate_width(struct ahc_softc *ahc, struct ahc_initiator_tinfo *tinfo,
1648 u_int *bus_width, role_t role)
1649{
1650 switch (*bus_width) {
1651 default:
1652 if (ahc->features & AHC_WIDE) {
1653 /* Respond Wide */
1654 *bus_width = MSG_EXT_WDTR_BUS_16_BIT;
1655 break;
1656 }
1657 /* FALLTHROUGH */
1658 case MSG_EXT_WDTR_BUS_8_BIT:
1659 *bus_width = MSG_EXT_WDTR_BUS_8_BIT;
1660 break;
1661 }
1662 if (tinfo != NULL) {
1663 if (role == ROLE_TARGET)
1664 *bus_width = MIN(tinfo->user.width, *bus_width);
1665 else
1666 *bus_width = MIN(tinfo->goal.width, *bus_width);
1667 }
1668}
1669
1670/*
1671 * Update the bitmask of targets for which the controller should
1672 * negotiate with at the next convenient oportunity. This currently
1673 * means the next time we send the initial identify messages for
1674 * a new transaction.
1675 */
1667void
1668ahc_update_target_msg_request(struct ahc_softc *ahc,
1669 struct ahc_devinfo *devinfo,
1670 struct ahc_initiator_tinfo *tinfo,
1671 int force, int paused)
1676int
1677ahc_update_neg_request(struct ahc_softc *ahc, struct ahc_devinfo *devinfo,
1678 struct ahc_tmode_tstate *tstate,
1679 struct ahc_initiator_tinfo *tinfo, int force)
1672{
1680{
1673 u_int targ_msg_req_orig;
1681 u_int auto_negotiate_orig;
1674
1682
1675 targ_msg_req_orig = ahc->targ_msg_req;
1683 auto_negotiate_orig = tstate->auto_negotiate;
1676 if (tinfo->current.period != tinfo->goal.period
1677 || tinfo->current.width != tinfo->goal.width
1678 || tinfo->current.offset != tinfo->goal.offset
1679 || tinfo->current.ppr_options != tinfo->goal.ppr_options
1680 || (force
1681 && (tinfo->goal.period != 0
1682 || tinfo->goal.width != MSG_EXT_WDTR_BUS_8_BIT
1683 || tinfo->goal.ppr_options != 0)))
1684 if (tinfo->current.period != tinfo->goal.period
1685 || tinfo->current.width != tinfo->goal.width
1686 || tinfo->current.offset != tinfo->goal.offset
1687 || tinfo->current.ppr_options != tinfo->goal.ppr_options
1688 || (force
1689 && (tinfo->goal.period != 0
1690 || tinfo->goal.width != MSG_EXT_WDTR_BUS_8_BIT
1691 || tinfo->goal.ppr_options != 0)))
1684 ahc->targ_msg_req |= devinfo->target_mask;
1692 tstate->auto_negotiate |= devinfo->target_mask;
1685 else
1693 else
1686 ahc->targ_msg_req &= ~devinfo->target_mask;
1694 tstate->auto_negotiate &= ~devinfo->target_mask;
1687
1695
1688 if (ahc->targ_msg_req != targ_msg_req_orig) {
1689 /* Update the message request bit for this target */
1690 if (!paused)
1691 ahc_pause(ahc);
1692
1693 ahc_outb(ahc, TARGET_MSG_REQUEST,
1694 ahc->targ_msg_req & 0xFF);
1695 ahc_outb(ahc, TARGET_MSG_REQUEST + 1,
1696 (ahc->targ_msg_req >> 8) & 0xFF);
1697
1698 if (!paused)
1699 ahc_unpause(ahc);
1700 }
1696 return (auto_negotiate_orig != tstate->auto_negotiate);
1701}
1702
1703/*
1704 * Update the user/goal/current tables of synchronous negotiation
1705 * parameters as well as, in the case of a current or active update,
1706 * any data structures on the host controller. In the case of an
1707 * active update, the specified target is currently talking to us on
1708 * the bus, so the transfer parameter update must take effect
1709 * immediately.
1710 */
1711void
1712ahc_set_syncrate(struct ahc_softc *ahc, struct ahc_devinfo *devinfo,
1713 struct ahc_syncrate *syncrate, u_int period,
1714 u_int offset, u_int ppr_options, u_int type, int paused)
1715{
1716 struct ahc_initiator_tinfo *tinfo;
1697}
1698
1699/*
1700 * Update the user/goal/current tables of synchronous negotiation
1701 * parameters as well as, in the case of a current or active update,
1702 * any data structures on the host controller. In the case of an
1703 * active update, the specified target is currently talking to us on
1704 * the bus, so the transfer parameter update must take effect
1705 * immediately.
1706 */
1707void
1708ahc_set_syncrate(struct ahc_softc *ahc, struct ahc_devinfo *devinfo,
1709 struct ahc_syncrate *syncrate, u_int period,
1710 u_int offset, u_int ppr_options, u_int type, int paused)
1711{
1712 struct ahc_initiator_tinfo *tinfo;
1717 struct tmode_tstate *tstate;
1713 struct ahc_tmode_tstate *tstate;
1718 u_int old_period;
1719 u_int old_offset;
1720 u_int old_ppr;
1714 u_int old_period;
1715 u_int old_offset;
1716 u_int old_ppr;
1721 int active = (type & AHC_TRANS_ACTIVE) == AHC_TRANS_ACTIVE;
1717 int active;
1718 int update_needed;
1722
1719
1720 active = (type & AHC_TRANS_ACTIVE) == AHC_TRANS_ACTIVE;
1721 update_needed = 0;
1722
1723 if (syncrate == NULL) {
1724 period = 0;
1725 offset = 0;
1726 }
1727
1728 tinfo = ahc_fetch_transinfo(ahc, devinfo->channel, devinfo->our_scsiid,
1729 devinfo->target, &tstate);
1730
1731 if ((type & AHC_TRANS_USER) != 0) {
1732 tinfo->user.period = period;
1733 tinfo->user.offset = offset;
1734 tinfo->user.ppr_options = ppr_options;
1735 }
1736
1737 if ((type & AHC_TRANS_GOAL) != 0) {
1738 tinfo->goal.period = period;
1739 tinfo->goal.offset = offset;
1740 tinfo->goal.ppr_options = ppr_options;
1741 }
1742
1743 old_period = tinfo->current.period;
1744 old_offset = tinfo->current.offset;
1745 old_ppr = tinfo->current.ppr_options;
1746
1747 if ((type & AHC_TRANS_CUR) != 0
1748 && (old_period != period
1749 || old_offset != offset
1750 || old_ppr != ppr_options)) {
1751 u_int scsirate;
1752
1723 if (syncrate == NULL) {
1724 period = 0;
1725 offset = 0;
1726 }
1727
1728 tinfo = ahc_fetch_transinfo(ahc, devinfo->channel, devinfo->our_scsiid,
1729 devinfo->target, &tstate);
1730
1731 if ((type & AHC_TRANS_USER) != 0) {
1732 tinfo->user.period = period;
1733 tinfo->user.offset = offset;
1734 tinfo->user.ppr_options = ppr_options;
1735 }
1736
1737 if ((type & AHC_TRANS_GOAL) != 0) {
1738 tinfo->goal.period = period;
1739 tinfo->goal.offset = offset;
1740 tinfo->goal.ppr_options = ppr_options;
1741 }
1742
1743 old_period = tinfo->current.period;
1744 old_offset = tinfo->current.offset;
1745 old_ppr = tinfo->current.ppr_options;
1746
1747 if ((type & AHC_TRANS_CUR) != 0
1748 && (old_period != period
1749 || old_offset != offset
1750 || old_ppr != ppr_options)) {
1751 u_int scsirate;
1752
1753 update_needed++;
1753 scsirate = tinfo->scsirate;
1754 if ((ahc->features & AHC_ULTRA2) != 0) {
1755
1756 scsirate &= ~(SXFR_ULTRA2|SINGLE_EDGE|ENABLE_CRC);
1757 if (syncrate != NULL) {
1758 scsirate |= syncrate->sxfr_u2;
1759 if ((ppr_options & MSG_EXT_PPR_DT_REQ) != 0)
1760 scsirate |= ENABLE_CRC;
1761 else
1762 scsirate |= SINGLE_EDGE;
1763 }
1764 } else {
1765
1766 scsirate &= ~(SXFR|SOFS);
1767 /*
1768 * Ensure Ultra mode is set properly for
1769 * this target.
1770 */
1771 tstate->ultraenb &= ~devinfo->target_mask;
1772 if (syncrate != NULL) {
1773 if (syncrate->sxfr & ULTRA_SXFR) {
1774 tstate->ultraenb |=
1775 devinfo->target_mask;
1776 }
1777 scsirate |= syncrate->sxfr & SXFR;
1778 scsirate |= offset & SOFS;
1779 }
1780 if (active) {
1781 u_int sxfrctl0;
1782
1783 sxfrctl0 = ahc_inb(ahc, SXFRCTL0);
1784 sxfrctl0 &= ~FAST20;
1785 if (tstate->ultraenb & devinfo->target_mask)
1786 sxfrctl0 |= FAST20;
1787 ahc_outb(ahc, SXFRCTL0, sxfrctl0);
1788 }
1789 }
1790 if (active) {
1791 ahc_outb(ahc, SCSIRATE, scsirate);
1792 if ((ahc->features & AHC_ULTRA2) != 0)
1793 ahc_outb(ahc, SCSIOFFSET, offset);
1794 }
1795
1796 tinfo->scsirate = scsirate;
1797 tinfo->current.period = period;
1798 tinfo->current.offset = offset;
1799 tinfo->current.ppr_options = ppr_options;
1800
1754 scsirate = tinfo->scsirate;
1755 if ((ahc->features & AHC_ULTRA2) != 0) {
1756
1757 scsirate &= ~(SXFR_ULTRA2|SINGLE_EDGE|ENABLE_CRC);
1758 if (syncrate != NULL) {
1759 scsirate |= syncrate->sxfr_u2;
1760 if ((ppr_options & MSG_EXT_PPR_DT_REQ) != 0)
1761 scsirate |= ENABLE_CRC;
1762 else
1763 scsirate |= SINGLE_EDGE;
1764 }
1765 } else {
1766
1767 scsirate &= ~(SXFR|SOFS);
1768 /*
1769 * Ensure Ultra mode is set properly for
1770 * this target.
1771 */
1772 tstate->ultraenb &= ~devinfo->target_mask;
1773 if (syncrate != NULL) {
1774 if (syncrate->sxfr & ULTRA_SXFR) {
1775 tstate->ultraenb |=
1776 devinfo->target_mask;
1777 }
1778 scsirate |= syncrate->sxfr & SXFR;
1779 scsirate |= offset & SOFS;
1780 }
1781 if (active) {
1782 u_int sxfrctl0;
1783
1784 sxfrctl0 = ahc_inb(ahc, SXFRCTL0);
1785 sxfrctl0 &= ~FAST20;
1786 if (tstate->ultraenb & devinfo->target_mask)
1787 sxfrctl0 |= FAST20;
1788 ahc_outb(ahc, SXFRCTL0, sxfrctl0);
1789 }
1790 }
1791 if (active) {
1792 ahc_outb(ahc, SCSIRATE, scsirate);
1793 if ((ahc->features & AHC_ULTRA2) != 0)
1794 ahc_outb(ahc, SCSIOFFSET, offset);
1795 }
1796
1797 tinfo->scsirate = scsirate;
1798 tinfo->current.period = period;
1799 tinfo->current.offset = offset;
1800 tinfo->current.ppr_options = ppr_options;
1801
1801 /* Update the syncrates in any pending scbs */
1802 ahc_update_pending_syncrates(ahc);
1803
1804 ahc_send_async(ahc, devinfo->channel, devinfo->target,
1805 CAM_LUN_WILDCARD, AC_TRANSFER_NEG);
1806 if (bootverbose) {
1807 if (offset != 0) {
1808 printf("%s: target %d synchronous at %sMHz%s, "
1809 "offset = 0x%x\n", ahc_name(ahc),
1810 devinfo->target, syncrate->rate,
1811 (ppr_options & MSG_EXT_PPR_DT_REQ)
1812 ? " DT" : "", offset);
1813 } else {
1814 printf("%s: target %d using "
1815 "asynchronous transfers\n",
1816 ahc_name(ahc), devinfo->target);
1817 }
1818 }
1819 }
1820
1802 ahc_send_async(ahc, devinfo->channel, devinfo->target,
1803 CAM_LUN_WILDCARD, AC_TRANSFER_NEG);
1804 if (bootverbose) {
1805 if (offset != 0) {
1806 printf("%s: target %d synchronous at %sMHz%s, "
1807 "offset = 0x%x\n", ahc_name(ahc),
1808 devinfo->target, syncrate->rate,
1809 (ppr_options & MSG_EXT_PPR_DT_REQ)
1810 ? " DT" : "", offset);
1811 } else {
1812 printf("%s: target %d using "
1813 "asynchronous transfers\n",
1814 ahc_name(ahc), devinfo->target);
1815 }
1816 }
1817 }
1818
1821 ahc_update_target_msg_request(ahc, devinfo, tinfo,
1822 /*force*/FALSE,
1823 paused);
1819 update_needed += ahc_update_neg_request(ahc, devinfo, tstate,
1820 tinfo, /*force*/FALSE);
1821
1822 if (update_needed)
1823 ahc_update_pending_scbs(ahc);
1824}
1825
1826/*
1827 * Update the user/goal/current tables of wide negotiation
1828 * parameters as well as, in the case of a current or active update,
1829 * any data structures on the host controller. In the case of an
1830 * active update, the specified target is currently talking to us on
1831 * the bus, so the transfer parameter update must take effect
1832 * immediately.
1833 */
1834void
1835ahc_set_width(struct ahc_softc *ahc, struct ahc_devinfo *devinfo,
1836 u_int width, u_int type, int paused)
1837{
1824}
1825
1826/*
1827 * Update the user/goal/current tables of wide negotiation
1828 * parameters as well as, in the case of a current or active update,
1829 * any data structures on the host controller. In the case of an
1830 * active update, the specified target is currently talking to us on
1831 * the bus, so the transfer parameter update must take effect
1832 * immediately.
1833 */
1834void
1835ahc_set_width(struct ahc_softc *ahc, struct ahc_devinfo *devinfo,
1836 u_int width, u_int type, int paused)
1837{
1838 struct ahc_initiator_tinfo *tinfo;
1839 struct tmode_tstate *tstate;
1840 u_int oldwidth;
1841 int active = (type & AHC_TRANS_ACTIVE) == AHC_TRANS_ACTIVE;
1838 struct ahc_initiator_tinfo *tinfo;
1839 struct ahc_tmode_tstate *tstate;
1840 u_int oldwidth;
1841 int active;
1842 int update_needed;
1842
1843
1844 active = (type & AHC_TRANS_ACTIVE) == AHC_TRANS_ACTIVE;
1845 update_needed = 0;
1843 tinfo = ahc_fetch_transinfo(ahc, devinfo->channel, devinfo->our_scsiid,
1844 devinfo->target, &tstate);
1845
1846 if ((type & AHC_TRANS_USER) != 0)
1847 tinfo->user.width = width;
1848
1849 if ((type & AHC_TRANS_GOAL) != 0)
1850 tinfo->goal.width = width;
1851
1852 oldwidth = tinfo->current.width;
1853 if ((type & AHC_TRANS_CUR) != 0 && oldwidth != width) {
1854 u_int scsirate;
1855
1846 tinfo = ahc_fetch_transinfo(ahc, devinfo->channel, devinfo->our_scsiid,
1847 devinfo->target, &tstate);
1848
1849 if ((type & AHC_TRANS_USER) != 0)
1850 tinfo->user.width = width;
1851
1852 if ((type & AHC_TRANS_GOAL) != 0)
1853 tinfo->goal.width = width;
1854
1855 oldwidth = tinfo->current.width;
1856 if ((type & AHC_TRANS_CUR) != 0 && oldwidth != width) {
1857 u_int scsirate;
1858
1859 update_needed++;
1856 scsirate = tinfo->scsirate;
1857 scsirate &= ~WIDEXFER;
1858 if (width == MSG_EXT_WDTR_BUS_16_BIT)
1859 scsirate |= WIDEXFER;
1860
1861 tinfo->scsirate = scsirate;
1862
1863 if (active)
1864 ahc_outb(ahc, SCSIRATE, scsirate);
1865
1866 tinfo->current.width = width;
1867
1868 ahc_send_async(ahc, devinfo->channel, devinfo->target,
1869 CAM_LUN_WILDCARD, AC_TRANSFER_NEG);
1870 if (bootverbose) {
1871 printf("%s: target %d using %dbit transfers\n",
1872 ahc_name(ahc), devinfo->target,
1873 8 * (0x01 << width));
1874 }
1875 }
1876
1860 scsirate = tinfo->scsirate;
1861 scsirate &= ~WIDEXFER;
1862 if (width == MSG_EXT_WDTR_BUS_16_BIT)
1863 scsirate |= WIDEXFER;
1864
1865 tinfo->scsirate = scsirate;
1866
1867 if (active)
1868 ahc_outb(ahc, SCSIRATE, scsirate);
1869
1870 tinfo->current.width = width;
1871
1872 ahc_send_async(ahc, devinfo->channel, devinfo->target,
1873 CAM_LUN_WILDCARD, AC_TRANSFER_NEG);
1874 if (bootverbose) {
1875 printf("%s: target %d using %dbit transfers\n",
1876 ahc_name(ahc), devinfo->target,
1877 8 * (0x01 << width));
1878 }
1879 }
1880
1877 ahc_update_target_msg_request(ahc, devinfo, tinfo,
1878 /*force*/FALSE, paused);
1881 update_needed += ahc_update_neg_request(ahc, devinfo, tstate,
1882 tinfo, /*force*/FALSE);
1883 if (update_needed)
1884 ahc_update_pending_scbs(ahc);
1879}
1880
1881/*
1882 * Update the current state of tagged queuing for a given target.
1883 */
1884void
1885ahc_set_tags(struct ahc_softc *ahc, struct ahc_devinfo *devinfo, int enable)
1886{
1887 struct ahc_initiator_tinfo *tinfo;
1885}
1886
1887/*
1888 * Update the current state of tagged queuing for a given target.
1889 */
1890void
1891ahc_set_tags(struct ahc_softc *ahc, struct ahc_devinfo *devinfo, int enable)
1892{
1893 struct ahc_initiator_tinfo *tinfo;
1888 struct tmode_tstate *tstate;
1894 struct ahc_tmode_tstate *tstate;
1889 uint16_t orig_tagenable;
1890
1891 tinfo = ahc_fetch_transinfo(ahc, devinfo->channel, devinfo->our_scsiid,
1892 devinfo->target, &tstate);
1893
1894 orig_tagenable = tstate->tagenable;
1895 if (enable)
1896 tstate->tagenable |= devinfo->target_mask;
1897 else
1898 tstate->tagenable &= ~devinfo->target_mask;
1899
1900 if (orig_tagenable != tstate->tagenable) {
1901 ahc_platform_set_tags(ahc, devinfo, enable);
1902 ahc_send_async(ahc, devinfo->channel, devinfo->target,
1903 devinfo->lun, AC_TRANSFER_NEG);
1904 }
1905
1906}
1907
1908/*
1909 * When the transfer settings for a connection change, update any
1910 * in-transit SCBs to contain the new data so the hardware will
1911 * be set correctly during future (re)selections.
1912 */
1913static void
1895 uint16_t orig_tagenable;
1896
1897 tinfo = ahc_fetch_transinfo(ahc, devinfo->channel, devinfo->our_scsiid,
1898 devinfo->target, &tstate);
1899
1900 orig_tagenable = tstate->tagenable;
1901 if (enable)
1902 tstate->tagenable |= devinfo->target_mask;
1903 else
1904 tstate->tagenable &= ~devinfo->target_mask;
1905
1906 if (orig_tagenable != tstate->tagenable) {
1907 ahc_platform_set_tags(ahc, devinfo, enable);
1908 ahc_send_async(ahc, devinfo->channel, devinfo->target,
1909 devinfo->lun, AC_TRANSFER_NEG);
1910 }
1911
1912}
1913
1914/*
1915 * When the transfer settings for a connection change, update any
1916 * in-transit SCBs to contain the new data so the hardware will
1917 * be set correctly during future (re)selections.
1918 */
1919static void
1914ahc_update_pending_syncrates(struct ahc_softc *ahc)
1920ahc_update_pending_scbs(struct ahc_softc *ahc)
1915{
1916 struct scb *pending_scb;
1917 int pending_scb_count;
1918 int i;
1919 u_int saved_scbptr;
1920
1921 /*
1922 * Traverse the pending SCB list and ensure that all of the
1923 * SCBs there have the proper settings.
1924 */
1925 pending_scb_count = 0;
1926 LIST_FOREACH(pending_scb, &ahc->pending_scbs, pending_links) {
1927 struct ahc_devinfo devinfo;
1928 struct hardware_scb *pending_hscb;
1929 struct ahc_initiator_tinfo *tinfo;
1921{
1922 struct scb *pending_scb;
1923 int pending_scb_count;
1924 int i;
1925 u_int saved_scbptr;
1926
1927 /*
1928 * Traverse the pending SCB list and ensure that all of the
1929 * SCBs there have the proper settings.
1930 */
1931 pending_scb_count = 0;
1932 LIST_FOREACH(pending_scb, &ahc->pending_scbs, pending_links) {
1933 struct ahc_devinfo devinfo;
1934 struct hardware_scb *pending_hscb;
1935 struct ahc_initiator_tinfo *tinfo;
1930 struct tmode_tstate *tstate;
1936 struct ahc_tmode_tstate *tstate;
1931
1932 ahc_scb_devinfo(ahc, &devinfo, pending_scb);
1933 tinfo = ahc_fetch_transinfo(ahc, devinfo.channel,
1934 devinfo.our_scsiid,
1935 devinfo.target, &tstate);
1936 pending_hscb = pending_scb->hscb;
1937 pending_hscb->control &= ~ULTRAENB;
1938 if ((tstate->ultraenb & devinfo.target_mask) != 0)
1939 pending_hscb->control |= ULTRAENB;
1940 pending_hscb->scsirate = tinfo->scsirate;
1941 pending_hscb->scsioffset = tinfo->current.offset;
1937
1938 ahc_scb_devinfo(ahc, &devinfo, pending_scb);
1939 tinfo = ahc_fetch_transinfo(ahc, devinfo.channel,
1940 devinfo.our_scsiid,
1941 devinfo.target, &tstate);
1942 pending_hscb = pending_scb->hscb;
1943 pending_hscb->control &= ~ULTRAENB;
1944 if ((tstate->ultraenb & devinfo.target_mask) != 0)
1945 pending_hscb->control |= ULTRAENB;
1946 pending_hscb->scsirate = tinfo->scsirate;
1947 pending_hscb->scsioffset = tinfo->current.offset;
1948 if ((tstate->auto_negotiate & devinfo.target_mask) == 0
1949 && (pending_scb->flags & SCB_AUTO_NEGOTIATE) != 0) {
1950 pending_scb->flags &= ~SCB_AUTO_NEGOTIATE;
1951 pending_hscb->control &= ~MK_MESSAGE;
1952 }
1942 pending_scb_count++;
1943 }
1944
1945 if (pending_scb_count == 0)
1946 return;
1947
1948 saved_scbptr = ahc_inb(ahc, SCBPTR);
1949 /* Ensure that the hscbs down on the card match the new information */
1950 for (i = 0; i < ahc->scb_data->maxhscbs; i++) {
1951 struct hardware_scb *pending_hscb;
1952 u_int control;
1953 u_int scb_tag;
1954
1955 ahc_outb(ahc, SCBPTR, i);
1956 scb_tag = ahc_inb(ahc, SCB_TAG);
1957 pending_scb = ahc_lookup_scb(ahc, scb_tag);
1958 if (pending_scb == NULL)
1959 continue;
1960
1961 pending_hscb = pending_scb->hscb;
1962 control = ahc_inb(ahc, SCB_CONTROL);
1953 pending_scb_count++;
1954 }
1955
1956 if (pending_scb_count == 0)
1957 return;
1958
1959 saved_scbptr = ahc_inb(ahc, SCBPTR);
1960 /* Ensure that the hscbs down on the card match the new information */
1961 for (i = 0; i < ahc->scb_data->maxhscbs; i++) {
1962 struct hardware_scb *pending_hscb;
1963 u_int control;
1964 u_int scb_tag;
1965
1966 ahc_outb(ahc, SCBPTR, i);
1967 scb_tag = ahc_inb(ahc, SCB_TAG);
1968 pending_scb = ahc_lookup_scb(ahc, scb_tag);
1969 if (pending_scb == NULL)
1970 continue;
1971
1972 pending_hscb = pending_scb->hscb;
1973 control = ahc_inb(ahc, SCB_CONTROL);
1963 control &= ~ULTRAENB;
1964 if ((pending_hscb->control & ULTRAENB) != 0)
1965 control |= ULTRAENB;
1974 control &= ~(ULTRAENB|MK_MESSAGE);
1975 control |= pending_hscb->control & (ULTRAENB|MK_MESSAGE);
1966 ahc_outb(ahc, SCB_CONTROL, control);
1967 ahc_outb(ahc, SCB_SCSIRATE, pending_hscb->scsirate);
1968 ahc_outb(ahc, SCB_SCSIOFFSET, pending_hscb->scsioffset);
1969 }
1970 ahc_outb(ahc, SCBPTR, saved_scbptr);
1971}
1972
1973/**************************** Pathing Information *****************************/
1974static void
1975ahc_fetch_devinfo(struct ahc_softc *ahc, struct ahc_devinfo *devinfo)
1976{
1977 u_int saved_scsiid;
1978 role_t role;
1979 int our_id;
1980
1981 if (ahc_inb(ahc, SSTAT0) & TARGET)
1982 role = ROLE_TARGET;
1983 else
1984 role = ROLE_INITIATOR;
1985
1986 if (role == ROLE_TARGET
1987 && (ahc->features & AHC_MULTI_TID) != 0
1988 && (ahc_inb(ahc, SEQ_FLAGS) & CMDPHASE_PENDING) != 0) {
1989 /* We were selected, so pull our id from TARGIDIN */
1990 our_id = ahc_inb(ahc, TARGIDIN) & OID;
1991 } else if ((ahc->features & AHC_ULTRA2) != 0)
1992 our_id = ahc_inb(ahc, SCSIID_ULTRA2) & OID;
1993 else
1994 our_id = ahc_inb(ahc, SCSIID) & OID;
1995
1996 saved_scsiid = ahc_inb(ahc, SAVED_SCSIID);
1997 ahc_compile_devinfo(devinfo,
1998 our_id,
1999 SCSIID_TARGET(ahc, saved_scsiid),
2000 ahc_inb(ahc, SAVED_LUN),
2001 SCSIID_CHANNEL(ahc, saved_scsiid),
2002 role);
2003}
2004
2005struct ahc_phase_table_entry*
2006ahc_lookup_phase_entry(int phase)
2007{
2008 struct ahc_phase_table_entry *entry;
2009 struct ahc_phase_table_entry *last_entry;
2010
2011 /*
2012 * num_phases doesn't include the default entry which
2013 * will be returned if the phase doesn't match.
2014 */
2015 last_entry = &ahc_phase_table[num_phases];
2016 for (entry = ahc_phase_table; entry < last_entry; entry++) {
2017 if (phase == entry->phase)
2018 break;
2019 }
2020 return (entry);
2021}
2022
2023void
2024ahc_compile_devinfo(struct ahc_devinfo *devinfo, u_int our_id, u_int target,
2025 u_int lun, char channel, role_t role)
2026{
2027 devinfo->our_scsiid = our_id;
2028 devinfo->target = target;
2029 devinfo->lun = lun;
2030 devinfo->target_offset = target;
2031 devinfo->channel = channel;
2032 devinfo->role = role;
2033 if (channel == 'B')
2034 devinfo->target_offset += 8;
2035 devinfo->target_mask = (0x01 << devinfo->target_offset);
2036}
2037
2038static void
2039ahc_scb_devinfo(struct ahc_softc *ahc, struct ahc_devinfo *devinfo,
2040 struct scb *scb)
2041{
2042 role_t role;
2043 int our_id;
2044
2045 our_id = SCSIID_OUR_ID(scb->hscb->scsiid);
2046 role = ROLE_INITIATOR;
2047 if ((scb->hscb->control & TARGET_SCB) != 0)
2048 role = ROLE_TARGET;
2049 ahc_compile_devinfo(devinfo, our_id, SCB_GET_TARGET(ahc, scb),
2050 SCB_GET_LUN(scb), SCB_GET_CHANNEL(ahc, scb), role);
2051}
2052
2053
2054/************************ Message Phase Processing ****************************/
2055/*
2056 * When an initiator transaction with the MK_MESSAGE flag either reconnects
2057 * or enters the initial message out phase, we are interrupted. Fill our
2058 * outgoing message buffer with the appropriate message and beging handing
2059 * the message phase(s) manually.
2060 */
2061static void
2062ahc_setup_initiator_msgout(struct ahc_softc *ahc, struct ahc_devinfo *devinfo,
2063 struct scb *scb)
2064{
2065 /*
2066 * To facilitate adding multiple messages together,
2067 * each routine should increment the index and len
2068 * variables instead of setting them explicitly.
2069 */
2070 ahc->msgout_index = 0;
2071 ahc->msgout_len = 0;
2072
2073 if ((scb->flags & SCB_DEVICE_RESET) == 0
2074 && ahc_inb(ahc, MSG_OUT) == MSG_IDENTIFYFLAG) {
2075 u_int identify_msg;
2076
2077 identify_msg = MSG_IDENTIFYFLAG | SCB_GET_LUN(scb);
2078 if ((scb->hscb->control & DISCENB) != 0)
2079 identify_msg |= MSG_IDENTIFY_DISCFLAG;
2080 ahc->msgout_buf[ahc->msgout_index++] = identify_msg;
2081 ahc->msgout_len++;
2082
2083 if ((scb->hscb->control & TAG_ENB) != 0) {
2084 ahc->msgout_buf[ahc->msgout_index++] =
2085 scb->hscb->control & (TAG_ENB|SCB_TAG_TYPE);
2086 ahc->msgout_buf[ahc->msgout_index++] = scb->hscb->tag;
2087 ahc->msgout_len += 2;
2088 }
2089 }
2090
2091 if (scb->flags & SCB_DEVICE_RESET) {
2092 ahc->msgout_buf[ahc->msgout_index++] = MSG_BUS_DEV_RESET;
2093 ahc->msgout_len++;
2094 ahc_print_path(ahc, scb);
2095 printf("Bus Device Reset Message Sent\n");
2096 /*
2097 * Clear our selection hardware in advance of
2098 * the busfree. We may have an entry in the waiting
2099 * Q for this target, and we don't want to go about
2100 * selecting while we handle the busfree and blow it
2101 * away.
2102 */
2103 ahc_outb(ahc, SCSISEQ, (ahc_inb(ahc, SCSISEQ) & ~ENSELO));
2104 } else if ((scb->flags & SCB_ABORT) != 0) {
2105 if ((scb->hscb->control & TAG_ENB) != 0)
2106 ahc->msgout_buf[ahc->msgout_index++] = MSG_ABORT_TAG;
2107 else
2108 ahc->msgout_buf[ahc->msgout_index++] = MSG_ABORT;
2109 ahc->msgout_len++;
2110 ahc_print_path(ahc, scb);
2111 printf("Abort%s Message Sent\n",
2112 (scb->hscb->control & TAG_ENB) != 0 ? " Tag" : "");
2113 /*
2114 * Clear our selection hardware in advance of
2115 * the busfree. We may have an entry in the waiting
2116 * Q for this target, and we don't want to go about
2117 * selecting while we handle the busfree and blow it
2118 * away.
2119 */
2120 ahc_outb(ahc, SCSISEQ, (ahc_inb(ahc, SCSISEQ) & ~ENSELO));
1976 ahc_outb(ahc, SCB_CONTROL, control);
1977 ahc_outb(ahc, SCB_SCSIRATE, pending_hscb->scsirate);
1978 ahc_outb(ahc, SCB_SCSIOFFSET, pending_hscb->scsioffset);
1979 }
1980 ahc_outb(ahc, SCBPTR, saved_scbptr);
1981}
1982
1983/**************************** Pathing Information *****************************/
1984static void
1985ahc_fetch_devinfo(struct ahc_softc *ahc, struct ahc_devinfo *devinfo)
1986{
1987 u_int saved_scsiid;
1988 role_t role;
1989 int our_id;
1990
1991 if (ahc_inb(ahc, SSTAT0) & TARGET)
1992 role = ROLE_TARGET;
1993 else
1994 role = ROLE_INITIATOR;
1995
1996 if (role == ROLE_TARGET
1997 && (ahc->features & AHC_MULTI_TID) != 0
1998 && (ahc_inb(ahc, SEQ_FLAGS) & CMDPHASE_PENDING) != 0) {
1999 /* We were selected, so pull our id from TARGIDIN */
2000 our_id = ahc_inb(ahc, TARGIDIN) & OID;
2001 } else if ((ahc->features & AHC_ULTRA2) != 0)
2002 our_id = ahc_inb(ahc, SCSIID_ULTRA2) & OID;
2003 else
2004 our_id = ahc_inb(ahc, SCSIID) & OID;
2005
2006 saved_scsiid = ahc_inb(ahc, SAVED_SCSIID);
2007 ahc_compile_devinfo(devinfo,
2008 our_id,
2009 SCSIID_TARGET(ahc, saved_scsiid),
2010 ahc_inb(ahc, SAVED_LUN),
2011 SCSIID_CHANNEL(ahc, saved_scsiid),
2012 role);
2013}
2014
2015struct ahc_phase_table_entry*
2016ahc_lookup_phase_entry(int phase)
2017{
2018 struct ahc_phase_table_entry *entry;
2019 struct ahc_phase_table_entry *last_entry;
2020
2021 /*
2022 * num_phases doesn't include the default entry which
2023 * will be returned if the phase doesn't match.
2024 */
2025 last_entry = &ahc_phase_table[num_phases];
2026 for (entry = ahc_phase_table; entry < last_entry; entry++) {
2027 if (phase == entry->phase)
2028 break;
2029 }
2030 return (entry);
2031}
2032
2033void
2034ahc_compile_devinfo(struct ahc_devinfo *devinfo, u_int our_id, u_int target,
2035 u_int lun, char channel, role_t role)
2036{
2037 devinfo->our_scsiid = our_id;
2038 devinfo->target = target;
2039 devinfo->lun = lun;
2040 devinfo->target_offset = target;
2041 devinfo->channel = channel;
2042 devinfo->role = role;
2043 if (channel == 'B')
2044 devinfo->target_offset += 8;
2045 devinfo->target_mask = (0x01 << devinfo->target_offset);
2046}
2047
2048static void
2049ahc_scb_devinfo(struct ahc_softc *ahc, struct ahc_devinfo *devinfo,
2050 struct scb *scb)
2051{
2052 role_t role;
2053 int our_id;
2054
2055 our_id = SCSIID_OUR_ID(scb->hscb->scsiid);
2056 role = ROLE_INITIATOR;
2057 if ((scb->hscb->control & TARGET_SCB) != 0)
2058 role = ROLE_TARGET;
2059 ahc_compile_devinfo(devinfo, our_id, SCB_GET_TARGET(ahc, scb),
2060 SCB_GET_LUN(scb), SCB_GET_CHANNEL(ahc, scb), role);
2061}
2062
2063
2064/************************ Message Phase Processing ****************************/
2065/*
2066 * When an initiator transaction with the MK_MESSAGE flag either reconnects
2067 * or enters the initial message out phase, we are interrupted. Fill our
2068 * outgoing message buffer with the appropriate message and beging handing
2069 * the message phase(s) manually.
2070 */
2071static void
2072ahc_setup_initiator_msgout(struct ahc_softc *ahc, struct ahc_devinfo *devinfo,
2073 struct scb *scb)
2074{
2075 /*
2076 * To facilitate adding multiple messages together,
2077 * each routine should increment the index and len
2078 * variables instead of setting them explicitly.
2079 */
2080 ahc->msgout_index = 0;
2081 ahc->msgout_len = 0;
2082
2083 if ((scb->flags & SCB_DEVICE_RESET) == 0
2084 && ahc_inb(ahc, MSG_OUT) == MSG_IDENTIFYFLAG) {
2085 u_int identify_msg;
2086
2087 identify_msg = MSG_IDENTIFYFLAG | SCB_GET_LUN(scb);
2088 if ((scb->hscb->control & DISCENB) != 0)
2089 identify_msg |= MSG_IDENTIFY_DISCFLAG;
2090 ahc->msgout_buf[ahc->msgout_index++] = identify_msg;
2091 ahc->msgout_len++;
2092
2093 if ((scb->hscb->control & TAG_ENB) != 0) {
2094 ahc->msgout_buf[ahc->msgout_index++] =
2095 scb->hscb->control & (TAG_ENB|SCB_TAG_TYPE);
2096 ahc->msgout_buf[ahc->msgout_index++] = scb->hscb->tag;
2097 ahc->msgout_len += 2;
2098 }
2099 }
2100
2101 if (scb->flags & SCB_DEVICE_RESET) {
2102 ahc->msgout_buf[ahc->msgout_index++] = MSG_BUS_DEV_RESET;
2103 ahc->msgout_len++;
2104 ahc_print_path(ahc, scb);
2105 printf("Bus Device Reset Message Sent\n");
2106 /*
2107 * Clear our selection hardware in advance of
2108 * the busfree. We may have an entry in the waiting
2109 * Q for this target, and we don't want to go about
2110 * selecting while we handle the busfree and blow it
2111 * away.
2112 */
2113 ahc_outb(ahc, SCSISEQ, (ahc_inb(ahc, SCSISEQ) & ~ENSELO));
2114 } else if ((scb->flags & SCB_ABORT) != 0) {
2115 if ((scb->hscb->control & TAG_ENB) != 0)
2116 ahc->msgout_buf[ahc->msgout_index++] = MSG_ABORT_TAG;
2117 else
2118 ahc->msgout_buf[ahc->msgout_index++] = MSG_ABORT;
2119 ahc->msgout_len++;
2120 ahc_print_path(ahc, scb);
2121 printf("Abort%s Message Sent\n",
2122 (scb->hscb->control & TAG_ENB) != 0 ? " Tag" : "");
2123 /*
2124 * Clear our selection hardware in advance of
2125 * the busfree. We may have an entry in the waiting
2126 * Q for this target, and we don't want to go about
2127 * selecting while we handle the busfree and blow it
2128 * away.
2129 */
2130 ahc_outb(ahc, SCSISEQ, (ahc_inb(ahc, SCSISEQ) & ~ENSELO));
2121 } else if ((ahc->targ_msg_req & devinfo->target_mask) != 0
2122 || (scb->flags & SCB_NEGOTIATE) != 0) {
2131 } else if ((scb->flags & (SCB_AUTO_NEGOTIATE|SCB_NEGOTIATE)) != 0) {
2123 ahc_build_transfer_msg(ahc, devinfo);
2124 } else {
2125 printf("ahc_intr: AWAITING_MSG for an SCB that "
2126 "does not have a waiting message\n");
2127 printf("SCSIID = %x, target_mask = %x\n", scb->hscb->scsiid,
2128 devinfo->target_mask);
2129 panic("SCB = %d, SCB Control = %x, MSG_OUT = %x "
2130 "SCB flags = %x", scb->hscb->tag, scb->hscb->control,
2131 ahc_inb(ahc, MSG_OUT), scb->flags);
2132 }
2133
2134 /*
2135 * Clear the MK_MESSAGE flag from the SCB so we aren't
2136 * asked to send this message again.
2137 */
2138 ahc_outb(ahc, SCB_CONTROL, ahc_inb(ahc, SCB_CONTROL) & ~MK_MESSAGE);
2132 ahc_build_transfer_msg(ahc, devinfo);
2133 } else {
2134 printf("ahc_intr: AWAITING_MSG for an SCB that "
2135 "does not have a waiting message\n");
2136 printf("SCSIID = %x, target_mask = %x\n", scb->hscb->scsiid,
2137 devinfo->target_mask);
2138 panic("SCB = %d, SCB Control = %x, MSG_OUT = %x "
2139 "SCB flags = %x", scb->hscb->tag, scb->hscb->control,
2140 ahc_inb(ahc, MSG_OUT), scb->flags);
2141 }
2142
2143 /*
2144 * Clear the MK_MESSAGE flag from the SCB so we aren't
2145 * asked to send this message again.
2146 */
2147 ahc_outb(ahc, SCB_CONTROL, ahc_inb(ahc, SCB_CONTROL) & ~MK_MESSAGE);
2148 scb->hscb->control &= ~MK_MESSAGE;
2139 ahc->msgout_index = 0;
2140 ahc->msg_type = MSG_TYPE_INITIATOR_MSGOUT;
2141}
2142
2143/*
2144 * Build an appropriate transfer negotiation message for the
2145 * currently active target.
2146 */
2147static void
2148ahc_build_transfer_msg(struct ahc_softc *ahc, struct ahc_devinfo *devinfo)
2149{
2150 /*
2151 * We need to initiate transfer negotiations.
2152 * If our current and goal settings are identical,
2153 * we want to renegotiate due to a check condition.
2154 */
2155 struct ahc_initiator_tinfo *tinfo;
2149 ahc->msgout_index = 0;
2150 ahc->msg_type = MSG_TYPE_INITIATOR_MSGOUT;
2151}
2152
2153/*
2154 * Build an appropriate transfer negotiation message for the
2155 * currently active target.
2156 */
2157static void
2158ahc_build_transfer_msg(struct ahc_softc *ahc, struct ahc_devinfo *devinfo)
2159{
2160 /*
2161 * We need to initiate transfer negotiations.
2162 * If our current and goal settings are identical,
2163 * we want to renegotiate due to a check condition.
2164 */
2165 struct ahc_initiator_tinfo *tinfo;
2156 struct tmode_tstate *tstate;
2166 struct ahc_tmode_tstate *tstate;
2157 struct ahc_syncrate *rate;
2158 int dowide;
2159 int dosync;
2160 int doppr;
2161 int use_ppr;
2162 u_int period;
2163 u_int ppr_options;
2164 u_int offset;
2165
2166 tinfo = ahc_fetch_transinfo(ahc, devinfo->channel, devinfo->our_scsiid,
2167 devinfo->target, &tstate);
2168 dowide = tinfo->current.width != tinfo->goal.width;
2169 dosync = tinfo->current.period != tinfo->goal.period;
2170 doppr = tinfo->current.ppr_options != tinfo->goal.ppr_options;
2171
2172 if (!dowide && !dosync && !doppr) {
2173 dowide = tinfo->goal.width != MSG_EXT_WDTR_BUS_8_BIT;
2174 dosync = tinfo->goal.period != 0;
2175 doppr = tinfo->goal.ppr_options != 0;
2176 }
2177
2178 if (!dowide && !dosync && !doppr) {
2179 panic("ahc_intr: AWAITING_MSG for negotiation, "
2180 "but no negotiation needed\n");
2181 }
2182
2183 use_ppr = (tinfo->current.transport_version >= 3) || doppr;
2184 /* Target initiated PPR is not allowed in the SCSI spec */
2185 if (devinfo->role == ROLE_TARGET)
2186 use_ppr = 0;
2187
2188 /*
2189 * Both the PPR message and SDTR message require the
2190 * goal syncrate to be limited to what the target device
2191 * is capable of handling (based on whether an LVD->SE
2192 * expander is on the bus), so combine these two cases.
2193 * Regardless, guarantee that if we are using WDTR and SDTR
2194 * messages that WDTR comes first.
2195 */
2196 if (use_ppr || (dosync && !dowide)) {
2197
2198 period = tinfo->goal.period;
2199 ppr_options = tinfo->goal.ppr_options;
2200 if (use_ppr == 0)
2201 ppr_options = 0;
2202 rate = ahc_devlimited_syncrate(ahc, tinfo, &period,
2203 &ppr_options, devinfo->role);
2204 offset = tinfo->goal.offset;
2205 ahc_validate_offset(ahc, tinfo, rate, &offset,
2206 use_ppr ? tinfo->goal.width
2207 : tinfo->current.width,
2208 devinfo->role);
2209 if (use_ppr) {
2210 ahc_construct_ppr(ahc, devinfo, period, offset,
2211 tinfo->goal.width, ppr_options);
2212 } else {
2213 ahc_construct_sdtr(ahc, devinfo, period, offset);
2214 }
2215 } else {
2216 ahc_construct_wdtr(ahc, devinfo, tinfo->goal.width);
2217 }
2218}
2219
2220/*
2221 * Build a synchronous negotiation message in our message
2222 * buffer based on the input parameters.
2223 */
2224static void
2225ahc_construct_sdtr(struct ahc_softc *ahc, struct ahc_devinfo *devinfo,
2226 u_int period, u_int offset)
2227{
2228 ahc->msgout_buf[ahc->msgout_index++] = MSG_EXTENDED;
2229 ahc->msgout_buf[ahc->msgout_index++] = MSG_EXT_SDTR_LEN;
2230 ahc->msgout_buf[ahc->msgout_index++] = MSG_EXT_SDTR;
2231 ahc->msgout_buf[ahc->msgout_index++] = period;
2232 ahc->msgout_buf[ahc->msgout_index++] = offset;
2233 ahc->msgout_len += 5;
2234 if (bootverbose) {
2235 printf("(%s:%c:%d:%d): Sending SDTR period %x, offset %x\n",
2236 ahc_name(ahc), devinfo->channel, devinfo->target,
2237 devinfo->lun, period, offset);
2238 }
2239}
2240
2241/*
2242 * Build a wide negotiateion message in our message
2243 * buffer based on the input parameters.
2244 */
2245static void
2246ahc_construct_wdtr(struct ahc_softc *ahc, struct ahc_devinfo *devinfo,
2247 u_int bus_width)
2248{
2249 ahc->msgout_buf[ahc->msgout_index++] = MSG_EXTENDED;
2250 ahc->msgout_buf[ahc->msgout_index++] = MSG_EXT_WDTR_LEN;
2251 ahc->msgout_buf[ahc->msgout_index++] = MSG_EXT_WDTR;
2252 ahc->msgout_buf[ahc->msgout_index++] = bus_width;
2253 ahc->msgout_len += 4;
2254 if (bootverbose) {
2255 printf("(%s:%c:%d:%d): Sending WDTR %x\n",
2256 ahc_name(ahc), devinfo->channel, devinfo->target,
2257 devinfo->lun, bus_width);
2258 }
2259}
2260
2261/*
2262 * Build a parallel protocol request message in our message
2263 * buffer based on the input parameters.
2264 */
2265static void
2266ahc_construct_ppr(struct ahc_softc *ahc, struct ahc_devinfo *devinfo,
2267 u_int period, u_int offset, u_int bus_width,
2268 u_int ppr_options)
2269{
2270 ahc->msgout_buf[ahc->msgout_index++] = MSG_EXTENDED;
2271 ahc->msgout_buf[ahc->msgout_index++] = MSG_EXT_PPR_LEN;
2272 ahc->msgout_buf[ahc->msgout_index++] = MSG_EXT_PPR;
2273 ahc->msgout_buf[ahc->msgout_index++] = period;
2274 ahc->msgout_buf[ahc->msgout_index++] = 0;
2275 ahc->msgout_buf[ahc->msgout_index++] = offset;
2276 ahc->msgout_buf[ahc->msgout_index++] = bus_width;
2277 ahc->msgout_buf[ahc->msgout_index++] = ppr_options;
2278 ahc->msgout_len += 8;
2279 if (bootverbose) {
2280 printf("(%s:%c:%d:%d): Sending PPR bus_width %x, period %x, "
2281 "offset %x, ppr_options %x\n", ahc_name(ahc),
2282 devinfo->channel, devinfo->target, devinfo->lun,
2283 bus_width, period, offset, ppr_options);
2284 }
2285}
2286
2287/*
2288 * Clear any active message state.
2289 */
2290static void
2291ahc_clear_msg_state(struct ahc_softc *ahc)
2292{
2293 ahc->msgout_len = 0;
2294 ahc->msgin_index = 0;
2295 ahc->msg_type = MSG_TYPE_NONE;
2296 if ((ahc_inb(ahc, SCSISIGI) & ATNI) != 0) {
2297 /*
2298 * The target didn't care to respond to our
2299 * message request, so clear ATN.
2300 */
2301 ahc_outb(ahc, CLRSINT1, CLRATNO);
2302 }
2303 ahc_outb(ahc, MSG_OUT, MSG_NOOP);
2304}
2305
2306/*
2307 * Manual message loop handler.
2308 */
2309static void
2310ahc_handle_message_phase(struct ahc_softc *ahc)
2311{
2312 struct ahc_devinfo devinfo;
2313 u_int bus_phase;
2314 int end_session;
2315
2316 ahc_fetch_devinfo(ahc, &devinfo);
2317 end_session = FALSE;
2318 bus_phase = ahc_inb(ahc, SCSISIGI) & PHASE_MASK;
2319
2320reswitch:
2321 switch (ahc->msg_type) {
2322 case MSG_TYPE_INITIATOR_MSGOUT:
2323 {
2324 int lastbyte;
2325 int phasemis;
2326 int msgdone;
2327
2328 if (ahc->msgout_len == 0)
2329 panic("HOST_MSG_LOOP interrupt with no active message");
2330
2331 phasemis = bus_phase != P_MESGOUT;
2332 if (phasemis) {
2333 if (bus_phase == P_MESGIN) {
2334 /*
2335 * Change gears and see if
2336 * this messages is of interest to
2337 * us or should be passed back to
2338 * the sequencer.
2339 */
2340 ahc_outb(ahc, CLRSINT1, CLRATNO);
2341 ahc->send_msg_perror = FALSE;
2342 ahc->msg_type = MSG_TYPE_INITIATOR_MSGIN;
2343 ahc->msgin_index = 0;
2344 goto reswitch;
2345 }
2346 end_session = TRUE;
2347 break;
2348 }
2349
2350 if (ahc->send_msg_perror) {
2351 ahc_outb(ahc, CLRSINT1, CLRATNO);
2352 ahc_outb(ahc, CLRSINT1, CLRREQINIT);
2353 ahc_outb(ahc, SCSIDATL, MSG_PARITY_ERROR);
2354 break;
2355 }
2356
2357 msgdone = ahc->msgout_index == ahc->msgout_len;
2358 if (msgdone) {
2359 /*
2360 * The target has requested a retry.
2361 * Re-assert ATN, reset our message index to
2362 * 0, and try again.
2363 */
2364 ahc->msgout_index = 0;
2365 ahc_outb(ahc, SCSISIGO, ahc_inb(ahc, SCSISIGO) | ATNO);
2366 }
2367
2368 lastbyte = ahc->msgout_index == (ahc->msgout_len - 1);
2369 if (lastbyte) {
2370 /* Last byte is signified by dropping ATN */
2371 ahc_outb(ahc, CLRSINT1, CLRATNO);
2372 }
2373
2374 /*
2375 * Clear our interrupt status and present
2376 * the next byte on the bus.
2377 */
2378 ahc_outb(ahc, CLRSINT1, CLRREQINIT);
2379 ahc_outb(ahc, SCSIDATL, ahc->msgout_buf[ahc->msgout_index++]);
2380 break;
2381 }
2382 case MSG_TYPE_INITIATOR_MSGIN:
2383 {
2384 int phasemis;
2385 int message_done;
2386
2387 phasemis = bus_phase != P_MESGIN;
2388
2389 if (phasemis) {
2390 ahc->msgin_index = 0;
2391 if (bus_phase == P_MESGOUT
2392 && (ahc->send_msg_perror == TRUE
2393 || (ahc->msgout_len != 0
2394 && ahc->msgout_index == 0))) {
2395 ahc->msg_type = MSG_TYPE_INITIATOR_MSGOUT;
2396 goto reswitch;
2397 }
2398 end_session = TRUE;
2399 break;
2400 }
2401
2402 /* Pull the byte in without acking it */
2403 ahc->msgin_buf[ahc->msgin_index] = ahc_inb(ahc, SCSIBUSL);
2404
2405 message_done = ahc_parse_msg(ahc, &devinfo);
2406
2407 if (message_done) {
2408 /*
2409 * Clear our incoming message buffer in case there
2410 * is another message following this one.
2411 */
2412 ahc->msgin_index = 0;
2413
2414 /*
2415 * If this message illicited a response,
2416 * assert ATN so the target takes us to the
2417 * message out phase.
2418 */
2419 if (ahc->msgout_len != 0)
2420 ahc_outb(ahc, SCSISIGO,
2421 ahc_inb(ahc, SCSISIGO) | ATNO);
2422 } else
2423 ahc->msgin_index++;
2424
2425 /* Ack the byte */
2426 ahc_outb(ahc, CLRSINT1, CLRREQINIT);
2427 ahc_inb(ahc, SCSIDATL);
2428 break;
2429 }
2430 case MSG_TYPE_TARGET_MSGIN:
2431 {
2432 int msgdone;
2433 int msgout_request;
2434
2435 if (ahc->msgout_len == 0)
2436 panic("Target MSGIN with no active message");
2437
2438 /*
2439 * If we interrupted a mesgout session, the initiator
2440 * will not know this until our first REQ. So, we
2441 * only honor mesgout requests after we've sent our
2442 * first byte.
2443 */
2444 if ((ahc_inb(ahc, SCSISIGI) & ATNI) != 0
2445 && ahc->msgout_index > 0)
2446 msgout_request = TRUE;
2447 else
2448 msgout_request = FALSE;
2449
2450 if (msgout_request) {
2451
2452 /*
2453 * Change gears and see if
2454 * this messages is of interest to
2455 * us or should be passed back to
2456 * the sequencer.
2457 */
2458 ahc->msg_type = MSG_TYPE_TARGET_MSGOUT;
2459 ahc_outb(ahc, SCSISIGO, P_MESGOUT | BSYO);
2460 ahc->msgin_index = 0;
2461 /* Dummy read to REQ for first byte */
2462 ahc_inb(ahc, SCSIDATL);
2463 ahc_outb(ahc, SXFRCTL0,
2464 ahc_inb(ahc, SXFRCTL0) | SPIOEN);
2465 break;
2466 }
2467
2468 msgdone = ahc->msgout_index == ahc->msgout_len;
2469 if (msgdone) {
2470 ahc_outb(ahc, SXFRCTL0,
2471 ahc_inb(ahc, SXFRCTL0) & ~SPIOEN);
2472 end_session = TRUE;
2473 break;
2474 }
2475
2476 /*
2477 * Present the next byte on the bus.
2478 */
2479 ahc_outb(ahc, SXFRCTL0, ahc_inb(ahc, SXFRCTL0) | SPIOEN);
2480 ahc_outb(ahc, SCSIDATL, ahc->msgout_buf[ahc->msgout_index++]);
2481 break;
2482 }
2483 case MSG_TYPE_TARGET_MSGOUT:
2484 {
2485 int lastbyte;
2486 int msgdone;
2487
2488 /*
2489 * The initiator signals that this is
2490 * the last byte by dropping ATN.
2491 */
2492 lastbyte = (ahc_inb(ahc, SCSISIGI) & ATNI) == 0;
2493
2494 /*
2495 * Read the latched byte, but turn off SPIOEN first
2496 * so that we don't inadvertantly cause a REQ for the
2497 * next byte.
2498 */
2499 ahc_outb(ahc, SXFRCTL0, ahc_inb(ahc, SXFRCTL0) & ~SPIOEN);
2500 ahc->msgin_buf[ahc->msgin_index] = ahc_inb(ahc, SCSIDATL);
2501 msgdone = ahc_parse_msg(ahc, &devinfo);
2502 if (msgdone == MSGLOOP_TERMINATED) {
2503 /*
2504 * The message is *really* done in that it caused
2505 * us to go to bus free. The sequencer has already
2506 * been reset at this point, so pull the ejection
2507 * handle.
2508 */
2509 return;
2510 }
2511
2512 ahc->msgin_index++;
2513
2514 /*
2515 * XXX Read spec about initiator dropping ATN too soon
2516 * and use msgdone to detect it.
2517 */
2518 if (msgdone == MSGLOOP_MSGCOMPLETE) {
2519 ahc->msgin_index = 0;
2520
2521 /*
2522 * If this message illicited a response, transition
2523 * to the Message in phase and send it.
2524 */
2525 if (ahc->msgout_len != 0) {
2526 ahc_outb(ahc, SCSISIGO, P_MESGIN | BSYO);
2527 ahc_outb(ahc, SXFRCTL0,
2528 ahc_inb(ahc, SXFRCTL0) | SPIOEN);
2529 ahc->msg_type = MSG_TYPE_TARGET_MSGIN;
2530 ahc->msgin_index = 0;
2531 break;
2532 }
2533 }
2534
2535 if (lastbyte)
2536 end_session = TRUE;
2537 else {
2538 /* Ask for the next byte. */
2539 ahc_outb(ahc, SXFRCTL0,
2540 ahc_inb(ahc, SXFRCTL0) | SPIOEN);
2541 }
2542
2543 break;
2544 }
2545 default:
2546 panic("Unknown REQINIT message type");
2547 }
2548
2549 if (end_session) {
2550 ahc_clear_msg_state(ahc);
2551 ahc_outb(ahc, RETURN_1, EXIT_MSG_LOOP);
2552 } else
2553 ahc_outb(ahc, RETURN_1, CONT_MSG_LOOP);
2554}
2555
2556/*
2557 * See if we sent a particular extended message to the target.
2558 * If "full" is true, return true only if the target saw the full
2559 * message. If "full" is false, return true if the target saw at
2560 * least the first byte of the message.
2561 */
2562static int
2563ahc_sent_msg(struct ahc_softc *ahc, ahc_msgtype type, u_int msgval, int full)
2564{
2565 int found;
2566 u_int index;
2567
2568 found = FALSE;
2569 index = 0;
2570
2571 while (index < ahc->msgout_len) {
2572 if (ahc->msgout_buf[index] == MSG_EXTENDED) {
2573 u_int end_index;
2574
2575 end_index = index + 1 + ahc->msgout_buf[index + 1];
2576 if (ahc->msgout_buf[index+2] == msgval
2577 && type == AHCMSG_EXT) {
2578
2579 if (full) {
2580 if (ahc->msgout_index > end_index)
2581 found = TRUE;
2582 } else if (ahc->msgout_index > index)
2583 found = TRUE;
2584 }
2585 index = end_index;
2586 } else if (ahc->msgout_buf[index] >= MSG_SIMPLE_Q_TAG
2587 && ahc->msgout_buf[index] <= MSG_IGN_WIDE_RESIDUE) {
2588
2589 /* Skip tag type and tag id or residue param*/
2590 index += 2;
2591 } else {
2592 /* Single byte message */
2593 if (type == AHCMSG_1B
2594 && ahc->msgout_buf[index] == msgval
2595 && ahc->msgout_index > index)
2596 found = TRUE;
2597 index++;
2598 }
2599
2600 if (found)
2601 break;
2602 }
2603 return (found);
2604}
2605
2606/*
2607 * Wait for a complete incomming message, parse it, and respond accordingly.
2608 */
2609static int
2610ahc_parse_msg(struct ahc_softc *ahc, struct ahc_devinfo *devinfo)
2611{
2612 struct ahc_initiator_tinfo *tinfo;
2167 struct ahc_syncrate *rate;
2168 int dowide;
2169 int dosync;
2170 int doppr;
2171 int use_ppr;
2172 u_int period;
2173 u_int ppr_options;
2174 u_int offset;
2175
2176 tinfo = ahc_fetch_transinfo(ahc, devinfo->channel, devinfo->our_scsiid,
2177 devinfo->target, &tstate);
2178 dowide = tinfo->current.width != tinfo->goal.width;
2179 dosync = tinfo->current.period != tinfo->goal.period;
2180 doppr = tinfo->current.ppr_options != tinfo->goal.ppr_options;
2181
2182 if (!dowide && !dosync && !doppr) {
2183 dowide = tinfo->goal.width != MSG_EXT_WDTR_BUS_8_BIT;
2184 dosync = tinfo->goal.period != 0;
2185 doppr = tinfo->goal.ppr_options != 0;
2186 }
2187
2188 if (!dowide && !dosync && !doppr) {
2189 panic("ahc_intr: AWAITING_MSG for negotiation, "
2190 "but no negotiation needed\n");
2191 }
2192
2193 use_ppr = (tinfo->current.transport_version >= 3) || doppr;
2194 /* Target initiated PPR is not allowed in the SCSI spec */
2195 if (devinfo->role == ROLE_TARGET)
2196 use_ppr = 0;
2197
2198 /*
2199 * Both the PPR message and SDTR message require the
2200 * goal syncrate to be limited to what the target device
2201 * is capable of handling (based on whether an LVD->SE
2202 * expander is on the bus), so combine these two cases.
2203 * Regardless, guarantee that if we are using WDTR and SDTR
2204 * messages that WDTR comes first.
2205 */
2206 if (use_ppr || (dosync && !dowide)) {
2207
2208 period = tinfo->goal.period;
2209 ppr_options = tinfo->goal.ppr_options;
2210 if (use_ppr == 0)
2211 ppr_options = 0;
2212 rate = ahc_devlimited_syncrate(ahc, tinfo, &period,
2213 &ppr_options, devinfo->role);
2214 offset = tinfo->goal.offset;
2215 ahc_validate_offset(ahc, tinfo, rate, &offset,
2216 use_ppr ? tinfo->goal.width
2217 : tinfo->current.width,
2218 devinfo->role);
2219 if (use_ppr) {
2220 ahc_construct_ppr(ahc, devinfo, period, offset,
2221 tinfo->goal.width, ppr_options);
2222 } else {
2223 ahc_construct_sdtr(ahc, devinfo, period, offset);
2224 }
2225 } else {
2226 ahc_construct_wdtr(ahc, devinfo, tinfo->goal.width);
2227 }
2228}
2229
2230/*
2231 * Build a synchronous negotiation message in our message
2232 * buffer based on the input parameters.
2233 */
2234static void
2235ahc_construct_sdtr(struct ahc_softc *ahc, struct ahc_devinfo *devinfo,
2236 u_int period, u_int offset)
2237{
2238 ahc->msgout_buf[ahc->msgout_index++] = MSG_EXTENDED;
2239 ahc->msgout_buf[ahc->msgout_index++] = MSG_EXT_SDTR_LEN;
2240 ahc->msgout_buf[ahc->msgout_index++] = MSG_EXT_SDTR;
2241 ahc->msgout_buf[ahc->msgout_index++] = period;
2242 ahc->msgout_buf[ahc->msgout_index++] = offset;
2243 ahc->msgout_len += 5;
2244 if (bootverbose) {
2245 printf("(%s:%c:%d:%d): Sending SDTR period %x, offset %x\n",
2246 ahc_name(ahc), devinfo->channel, devinfo->target,
2247 devinfo->lun, period, offset);
2248 }
2249}
2250
2251/*
2252 * Build a wide negotiateion message in our message
2253 * buffer based on the input parameters.
2254 */
2255static void
2256ahc_construct_wdtr(struct ahc_softc *ahc, struct ahc_devinfo *devinfo,
2257 u_int bus_width)
2258{
2259 ahc->msgout_buf[ahc->msgout_index++] = MSG_EXTENDED;
2260 ahc->msgout_buf[ahc->msgout_index++] = MSG_EXT_WDTR_LEN;
2261 ahc->msgout_buf[ahc->msgout_index++] = MSG_EXT_WDTR;
2262 ahc->msgout_buf[ahc->msgout_index++] = bus_width;
2263 ahc->msgout_len += 4;
2264 if (bootverbose) {
2265 printf("(%s:%c:%d:%d): Sending WDTR %x\n",
2266 ahc_name(ahc), devinfo->channel, devinfo->target,
2267 devinfo->lun, bus_width);
2268 }
2269}
2270
2271/*
2272 * Build a parallel protocol request message in our message
2273 * buffer based on the input parameters.
2274 */
2275static void
2276ahc_construct_ppr(struct ahc_softc *ahc, struct ahc_devinfo *devinfo,
2277 u_int period, u_int offset, u_int bus_width,
2278 u_int ppr_options)
2279{
2280 ahc->msgout_buf[ahc->msgout_index++] = MSG_EXTENDED;
2281 ahc->msgout_buf[ahc->msgout_index++] = MSG_EXT_PPR_LEN;
2282 ahc->msgout_buf[ahc->msgout_index++] = MSG_EXT_PPR;
2283 ahc->msgout_buf[ahc->msgout_index++] = period;
2284 ahc->msgout_buf[ahc->msgout_index++] = 0;
2285 ahc->msgout_buf[ahc->msgout_index++] = offset;
2286 ahc->msgout_buf[ahc->msgout_index++] = bus_width;
2287 ahc->msgout_buf[ahc->msgout_index++] = ppr_options;
2288 ahc->msgout_len += 8;
2289 if (bootverbose) {
2290 printf("(%s:%c:%d:%d): Sending PPR bus_width %x, period %x, "
2291 "offset %x, ppr_options %x\n", ahc_name(ahc),
2292 devinfo->channel, devinfo->target, devinfo->lun,
2293 bus_width, period, offset, ppr_options);
2294 }
2295}
2296
2297/*
2298 * Clear any active message state.
2299 */
2300static void
2301ahc_clear_msg_state(struct ahc_softc *ahc)
2302{
2303 ahc->msgout_len = 0;
2304 ahc->msgin_index = 0;
2305 ahc->msg_type = MSG_TYPE_NONE;
2306 if ((ahc_inb(ahc, SCSISIGI) & ATNI) != 0) {
2307 /*
2308 * The target didn't care to respond to our
2309 * message request, so clear ATN.
2310 */
2311 ahc_outb(ahc, CLRSINT1, CLRATNO);
2312 }
2313 ahc_outb(ahc, MSG_OUT, MSG_NOOP);
2314}
2315
2316/*
2317 * Manual message loop handler.
2318 */
2319static void
2320ahc_handle_message_phase(struct ahc_softc *ahc)
2321{
2322 struct ahc_devinfo devinfo;
2323 u_int bus_phase;
2324 int end_session;
2325
2326 ahc_fetch_devinfo(ahc, &devinfo);
2327 end_session = FALSE;
2328 bus_phase = ahc_inb(ahc, SCSISIGI) & PHASE_MASK;
2329
2330reswitch:
2331 switch (ahc->msg_type) {
2332 case MSG_TYPE_INITIATOR_MSGOUT:
2333 {
2334 int lastbyte;
2335 int phasemis;
2336 int msgdone;
2337
2338 if (ahc->msgout_len == 0)
2339 panic("HOST_MSG_LOOP interrupt with no active message");
2340
2341 phasemis = bus_phase != P_MESGOUT;
2342 if (phasemis) {
2343 if (bus_phase == P_MESGIN) {
2344 /*
2345 * Change gears and see if
2346 * this messages is of interest to
2347 * us or should be passed back to
2348 * the sequencer.
2349 */
2350 ahc_outb(ahc, CLRSINT1, CLRATNO);
2351 ahc->send_msg_perror = FALSE;
2352 ahc->msg_type = MSG_TYPE_INITIATOR_MSGIN;
2353 ahc->msgin_index = 0;
2354 goto reswitch;
2355 }
2356 end_session = TRUE;
2357 break;
2358 }
2359
2360 if (ahc->send_msg_perror) {
2361 ahc_outb(ahc, CLRSINT1, CLRATNO);
2362 ahc_outb(ahc, CLRSINT1, CLRREQINIT);
2363 ahc_outb(ahc, SCSIDATL, MSG_PARITY_ERROR);
2364 break;
2365 }
2366
2367 msgdone = ahc->msgout_index == ahc->msgout_len;
2368 if (msgdone) {
2369 /*
2370 * The target has requested a retry.
2371 * Re-assert ATN, reset our message index to
2372 * 0, and try again.
2373 */
2374 ahc->msgout_index = 0;
2375 ahc_outb(ahc, SCSISIGO, ahc_inb(ahc, SCSISIGO) | ATNO);
2376 }
2377
2378 lastbyte = ahc->msgout_index == (ahc->msgout_len - 1);
2379 if (lastbyte) {
2380 /* Last byte is signified by dropping ATN */
2381 ahc_outb(ahc, CLRSINT1, CLRATNO);
2382 }
2383
2384 /*
2385 * Clear our interrupt status and present
2386 * the next byte on the bus.
2387 */
2388 ahc_outb(ahc, CLRSINT1, CLRREQINIT);
2389 ahc_outb(ahc, SCSIDATL, ahc->msgout_buf[ahc->msgout_index++]);
2390 break;
2391 }
2392 case MSG_TYPE_INITIATOR_MSGIN:
2393 {
2394 int phasemis;
2395 int message_done;
2396
2397 phasemis = bus_phase != P_MESGIN;
2398
2399 if (phasemis) {
2400 ahc->msgin_index = 0;
2401 if (bus_phase == P_MESGOUT
2402 && (ahc->send_msg_perror == TRUE
2403 || (ahc->msgout_len != 0
2404 && ahc->msgout_index == 0))) {
2405 ahc->msg_type = MSG_TYPE_INITIATOR_MSGOUT;
2406 goto reswitch;
2407 }
2408 end_session = TRUE;
2409 break;
2410 }
2411
2412 /* Pull the byte in without acking it */
2413 ahc->msgin_buf[ahc->msgin_index] = ahc_inb(ahc, SCSIBUSL);
2414
2415 message_done = ahc_parse_msg(ahc, &devinfo);
2416
2417 if (message_done) {
2418 /*
2419 * Clear our incoming message buffer in case there
2420 * is another message following this one.
2421 */
2422 ahc->msgin_index = 0;
2423
2424 /*
2425 * If this message illicited a response,
2426 * assert ATN so the target takes us to the
2427 * message out phase.
2428 */
2429 if (ahc->msgout_len != 0)
2430 ahc_outb(ahc, SCSISIGO,
2431 ahc_inb(ahc, SCSISIGO) | ATNO);
2432 } else
2433 ahc->msgin_index++;
2434
2435 /* Ack the byte */
2436 ahc_outb(ahc, CLRSINT1, CLRREQINIT);
2437 ahc_inb(ahc, SCSIDATL);
2438 break;
2439 }
2440 case MSG_TYPE_TARGET_MSGIN:
2441 {
2442 int msgdone;
2443 int msgout_request;
2444
2445 if (ahc->msgout_len == 0)
2446 panic("Target MSGIN with no active message");
2447
2448 /*
2449 * If we interrupted a mesgout session, the initiator
2450 * will not know this until our first REQ. So, we
2451 * only honor mesgout requests after we've sent our
2452 * first byte.
2453 */
2454 if ((ahc_inb(ahc, SCSISIGI) & ATNI) != 0
2455 && ahc->msgout_index > 0)
2456 msgout_request = TRUE;
2457 else
2458 msgout_request = FALSE;
2459
2460 if (msgout_request) {
2461
2462 /*
2463 * Change gears and see if
2464 * this messages is of interest to
2465 * us or should be passed back to
2466 * the sequencer.
2467 */
2468 ahc->msg_type = MSG_TYPE_TARGET_MSGOUT;
2469 ahc_outb(ahc, SCSISIGO, P_MESGOUT | BSYO);
2470 ahc->msgin_index = 0;
2471 /* Dummy read to REQ for first byte */
2472 ahc_inb(ahc, SCSIDATL);
2473 ahc_outb(ahc, SXFRCTL0,
2474 ahc_inb(ahc, SXFRCTL0) | SPIOEN);
2475 break;
2476 }
2477
2478 msgdone = ahc->msgout_index == ahc->msgout_len;
2479 if (msgdone) {
2480 ahc_outb(ahc, SXFRCTL0,
2481 ahc_inb(ahc, SXFRCTL0) & ~SPIOEN);
2482 end_session = TRUE;
2483 break;
2484 }
2485
2486 /*
2487 * Present the next byte on the bus.
2488 */
2489 ahc_outb(ahc, SXFRCTL0, ahc_inb(ahc, SXFRCTL0) | SPIOEN);
2490 ahc_outb(ahc, SCSIDATL, ahc->msgout_buf[ahc->msgout_index++]);
2491 break;
2492 }
2493 case MSG_TYPE_TARGET_MSGOUT:
2494 {
2495 int lastbyte;
2496 int msgdone;
2497
2498 /*
2499 * The initiator signals that this is
2500 * the last byte by dropping ATN.
2501 */
2502 lastbyte = (ahc_inb(ahc, SCSISIGI) & ATNI) == 0;
2503
2504 /*
2505 * Read the latched byte, but turn off SPIOEN first
2506 * so that we don't inadvertantly cause a REQ for the
2507 * next byte.
2508 */
2509 ahc_outb(ahc, SXFRCTL0, ahc_inb(ahc, SXFRCTL0) & ~SPIOEN);
2510 ahc->msgin_buf[ahc->msgin_index] = ahc_inb(ahc, SCSIDATL);
2511 msgdone = ahc_parse_msg(ahc, &devinfo);
2512 if (msgdone == MSGLOOP_TERMINATED) {
2513 /*
2514 * The message is *really* done in that it caused
2515 * us to go to bus free. The sequencer has already
2516 * been reset at this point, so pull the ejection
2517 * handle.
2518 */
2519 return;
2520 }
2521
2522 ahc->msgin_index++;
2523
2524 /*
2525 * XXX Read spec about initiator dropping ATN too soon
2526 * and use msgdone to detect it.
2527 */
2528 if (msgdone == MSGLOOP_MSGCOMPLETE) {
2529 ahc->msgin_index = 0;
2530
2531 /*
2532 * If this message illicited a response, transition
2533 * to the Message in phase and send it.
2534 */
2535 if (ahc->msgout_len != 0) {
2536 ahc_outb(ahc, SCSISIGO, P_MESGIN | BSYO);
2537 ahc_outb(ahc, SXFRCTL0,
2538 ahc_inb(ahc, SXFRCTL0) | SPIOEN);
2539 ahc->msg_type = MSG_TYPE_TARGET_MSGIN;
2540 ahc->msgin_index = 0;
2541 break;
2542 }
2543 }
2544
2545 if (lastbyte)
2546 end_session = TRUE;
2547 else {
2548 /* Ask for the next byte. */
2549 ahc_outb(ahc, SXFRCTL0,
2550 ahc_inb(ahc, SXFRCTL0) | SPIOEN);
2551 }
2552
2553 break;
2554 }
2555 default:
2556 panic("Unknown REQINIT message type");
2557 }
2558
2559 if (end_session) {
2560 ahc_clear_msg_state(ahc);
2561 ahc_outb(ahc, RETURN_1, EXIT_MSG_LOOP);
2562 } else
2563 ahc_outb(ahc, RETURN_1, CONT_MSG_LOOP);
2564}
2565
2566/*
2567 * See if we sent a particular extended message to the target.
2568 * If "full" is true, return true only if the target saw the full
2569 * message. If "full" is false, return true if the target saw at
2570 * least the first byte of the message.
2571 */
2572static int
2573ahc_sent_msg(struct ahc_softc *ahc, ahc_msgtype type, u_int msgval, int full)
2574{
2575 int found;
2576 u_int index;
2577
2578 found = FALSE;
2579 index = 0;
2580
2581 while (index < ahc->msgout_len) {
2582 if (ahc->msgout_buf[index] == MSG_EXTENDED) {
2583 u_int end_index;
2584
2585 end_index = index + 1 + ahc->msgout_buf[index + 1];
2586 if (ahc->msgout_buf[index+2] == msgval
2587 && type == AHCMSG_EXT) {
2588
2589 if (full) {
2590 if (ahc->msgout_index > end_index)
2591 found = TRUE;
2592 } else if (ahc->msgout_index > index)
2593 found = TRUE;
2594 }
2595 index = end_index;
2596 } else if (ahc->msgout_buf[index] >= MSG_SIMPLE_Q_TAG
2597 && ahc->msgout_buf[index] <= MSG_IGN_WIDE_RESIDUE) {
2598
2599 /* Skip tag type and tag id or residue param*/
2600 index += 2;
2601 } else {
2602 /* Single byte message */
2603 if (type == AHCMSG_1B
2604 && ahc->msgout_buf[index] == msgval
2605 && ahc->msgout_index > index)
2606 found = TRUE;
2607 index++;
2608 }
2609
2610 if (found)
2611 break;
2612 }
2613 return (found);
2614}
2615
2616/*
2617 * Wait for a complete incomming message, parse it, and respond accordingly.
2618 */
2619static int
2620ahc_parse_msg(struct ahc_softc *ahc, struct ahc_devinfo *devinfo)
2621{
2622 struct ahc_initiator_tinfo *tinfo;
2613 struct tmode_tstate *tstate;
2623 struct ahc_tmode_tstate *tstate;
2614 int reject;
2615 int done;
2616 int response;
2617 u_int targ_scsirate;
2618
2619 done = MSGLOOP_IN_PROG;
2620 response = FALSE;
2621 reject = FALSE;
2622 tinfo = ahc_fetch_transinfo(ahc, devinfo->channel, devinfo->our_scsiid,
2623 devinfo->target, &tstate);
2624 targ_scsirate = tinfo->scsirate;
2625
2626 /*
2627 * Parse as much of the message as is availible,
2628 * rejecting it if we don't support it. When
2629 * the entire message is availible and has been
2630 * handled, return MSGLOOP_MSGCOMPLETE, indicating
2631 * that we have parsed an entire message.
2632 *
2633 * In the case of extended messages, we accept the length
2634 * byte outright and perform more checking once we know the
2635 * extended message type.
2636 */
2637 switch (ahc->msgin_buf[0]) {
2638 case MSG_MESSAGE_REJECT:
2639 response = ahc_handle_msg_reject(ahc, devinfo);
2640 /* FALLTHROUGH */
2641 case MSG_NOOP:
2642 done = MSGLOOP_MSGCOMPLETE;
2643 break;
2644 case MSG_EXTENDED:
2645 {
2646 /* Wait for enough of the message to begin validation */
2647 if (ahc->msgin_index < 2)
2648 break;
2649 switch (ahc->msgin_buf[2]) {
2650 case MSG_EXT_SDTR:
2651 {
2652 struct ahc_syncrate *syncrate;
2653 u_int period;
2654 u_int ppr_options;
2655 u_int offset;
2656 u_int saved_offset;
2657
2658 if (ahc->msgin_buf[1] != MSG_EXT_SDTR_LEN) {
2659 reject = TRUE;
2660 break;
2661 }
2662
2663 /*
2664 * Wait until we have both args before validating
2665 * and acting on this message.
2666 *
2667 * Add one to MSG_EXT_SDTR_LEN to account for
2668 * the extended message preamble.
2669 */
2670 if (ahc->msgin_index < (MSG_EXT_SDTR_LEN + 1))
2671 break;
2672
2673 period = ahc->msgin_buf[3];
2674 ppr_options = 0;
2675 saved_offset = offset = ahc->msgin_buf[4];
2676 syncrate = ahc_devlimited_syncrate(ahc, tinfo, &period,
2677 &ppr_options,
2678 devinfo->role);
2679 ahc_validate_offset(ahc, tinfo, syncrate, &offset,
2680 targ_scsirate & WIDEXFER,
2681 devinfo->role);
2682 if (bootverbose) {
2683 printf("(%s:%c:%d:%d): Received "
2684 "SDTR period %x, offset %x\n\t"
2685 "Filtered to period %x, offset %x\n",
2686 ahc_name(ahc), devinfo->channel,
2687 devinfo->target, devinfo->lun,
2688 ahc->msgin_buf[3], saved_offset,
2689 period, offset);
2690 }
2691 ahc_set_syncrate(ahc, devinfo,
2692 syncrate, period,
2693 offset, ppr_options,
2694 AHC_TRANS_ACTIVE|AHC_TRANS_GOAL,
2695 /*paused*/TRUE);
2696
2697 /*
2698 * See if we initiated Sync Negotiation
2699 * and didn't have to fall down to async
2700 * transfers.
2701 */
2702 if (ahc_sent_msg(ahc, AHCMSG_EXT, MSG_EXT_SDTR, TRUE)) {
2703 /* We started it */
2704 if (saved_offset != offset) {
2705 /* Went too low - force async */
2706 reject = TRUE;
2707 }
2708 } else {
2709 /*
2710 * Send our own SDTR in reply
2711 */
2624 int reject;
2625 int done;
2626 int response;
2627 u_int targ_scsirate;
2628
2629 done = MSGLOOP_IN_PROG;
2630 response = FALSE;
2631 reject = FALSE;
2632 tinfo = ahc_fetch_transinfo(ahc, devinfo->channel, devinfo->our_scsiid,
2633 devinfo->target, &tstate);
2634 targ_scsirate = tinfo->scsirate;
2635
2636 /*
2637 * Parse as much of the message as is availible,
2638 * rejecting it if we don't support it. When
2639 * the entire message is availible and has been
2640 * handled, return MSGLOOP_MSGCOMPLETE, indicating
2641 * that we have parsed an entire message.
2642 *
2643 * In the case of extended messages, we accept the length
2644 * byte outright and perform more checking once we know the
2645 * extended message type.
2646 */
2647 switch (ahc->msgin_buf[0]) {
2648 case MSG_MESSAGE_REJECT:
2649 response = ahc_handle_msg_reject(ahc, devinfo);
2650 /* FALLTHROUGH */
2651 case MSG_NOOP:
2652 done = MSGLOOP_MSGCOMPLETE;
2653 break;
2654 case MSG_EXTENDED:
2655 {
2656 /* Wait for enough of the message to begin validation */
2657 if (ahc->msgin_index < 2)
2658 break;
2659 switch (ahc->msgin_buf[2]) {
2660 case MSG_EXT_SDTR:
2661 {
2662 struct ahc_syncrate *syncrate;
2663 u_int period;
2664 u_int ppr_options;
2665 u_int offset;
2666 u_int saved_offset;
2667
2668 if (ahc->msgin_buf[1] != MSG_EXT_SDTR_LEN) {
2669 reject = TRUE;
2670 break;
2671 }
2672
2673 /*
2674 * Wait until we have both args before validating
2675 * and acting on this message.
2676 *
2677 * Add one to MSG_EXT_SDTR_LEN to account for
2678 * the extended message preamble.
2679 */
2680 if (ahc->msgin_index < (MSG_EXT_SDTR_LEN + 1))
2681 break;
2682
2683 period = ahc->msgin_buf[3];
2684 ppr_options = 0;
2685 saved_offset = offset = ahc->msgin_buf[4];
2686 syncrate = ahc_devlimited_syncrate(ahc, tinfo, &period,
2687 &ppr_options,
2688 devinfo->role);
2689 ahc_validate_offset(ahc, tinfo, syncrate, &offset,
2690 targ_scsirate & WIDEXFER,
2691 devinfo->role);
2692 if (bootverbose) {
2693 printf("(%s:%c:%d:%d): Received "
2694 "SDTR period %x, offset %x\n\t"
2695 "Filtered to period %x, offset %x\n",
2696 ahc_name(ahc), devinfo->channel,
2697 devinfo->target, devinfo->lun,
2698 ahc->msgin_buf[3], saved_offset,
2699 period, offset);
2700 }
2701 ahc_set_syncrate(ahc, devinfo,
2702 syncrate, period,
2703 offset, ppr_options,
2704 AHC_TRANS_ACTIVE|AHC_TRANS_GOAL,
2705 /*paused*/TRUE);
2706
2707 /*
2708 * See if we initiated Sync Negotiation
2709 * and didn't have to fall down to async
2710 * transfers.
2711 */
2712 if (ahc_sent_msg(ahc, AHCMSG_EXT, MSG_EXT_SDTR, TRUE)) {
2713 /* We started it */
2714 if (saved_offset != offset) {
2715 /* Went too low - force async */
2716 reject = TRUE;
2717 }
2718 } else {
2719 /*
2720 * Send our own SDTR in reply
2721 */
2712 if (bootverbose) {
2722 if (bootverbose
2723 && devinfo->role == ROLE_INITIATOR) {
2713 printf("(%s:%c:%d:%d): Target "
2714 "Initiated SDTR\n",
2715 ahc_name(ahc), devinfo->channel,
2716 devinfo->target, devinfo->lun);
2717 }
2718 ahc->msgout_index = 0;
2719 ahc->msgout_len = 0;
2720 ahc_construct_sdtr(ahc, devinfo,
2721 period, offset);
2722 ahc->msgout_index = 0;
2723 response = TRUE;
2724 }
2725 done = MSGLOOP_MSGCOMPLETE;
2726 break;
2727 }
2728 case MSG_EXT_WDTR:
2729 {
2730 u_int bus_width;
2731 u_int saved_width;
2732 u_int sending_reply;
2733
2734 sending_reply = FALSE;
2735 if (ahc->msgin_buf[1] != MSG_EXT_WDTR_LEN) {
2736 reject = TRUE;
2737 break;
2738 }
2739
2740 /*
2741 * Wait until we have our arg before validating
2742 * and acting on this message.
2743 *
2744 * Add one to MSG_EXT_WDTR_LEN to account for
2745 * the extended message preamble.
2746 */
2747 if (ahc->msgin_index < (MSG_EXT_WDTR_LEN + 1))
2748 break;
2749
2750 bus_width = ahc->msgin_buf[3];
2751 saved_width = bus_width;
2752 ahc_validate_width(ahc, tinfo, &bus_width,
2753 devinfo->role);
2754 if (bootverbose) {
2755 printf("(%s:%c:%d:%d): Received WDTR "
2756 "%x filtered to %x\n",
2757 ahc_name(ahc), devinfo->channel,
2758 devinfo->target, devinfo->lun,
2759 saved_width, bus_width);
2760 }
2761
2762 if (ahc_sent_msg(ahc, AHCMSG_EXT, MSG_EXT_WDTR, TRUE)) {
2763 /*
2764 * Don't send a WDTR back to the
2765 * target, since we asked first.
2766 * If the width went higher than our
2767 * request, reject it.
2768 */
2769 if (saved_width > bus_width) {
2770 reject = TRUE;
2771 printf("(%s:%c:%d:%d): requested %dBit "
2772 "transfers. Rejecting...\n",
2773 ahc_name(ahc), devinfo->channel,
2774 devinfo->target, devinfo->lun,
2775 8 * (0x01 << bus_width));
2776 bus_width = 0;
2777 }
2778 } else {
2779 /*
2780 * Send our own WDTR in reply
2781 */
2724 printf("(%s:%c:%d:%d): Target "
2725 "Initiated SDTR\n",
2726 ahc_name(ahc), devinfo->channel,
2727 devinfo->target, devinfo->lun);
2728 }
2729 ahc->msgout_index = 0;
2730 ahc->msgout_len = 0;
2731 ahc_construct_sdtr(ahc, devinfo,
2732 period, offset);
2733 ahc->msgout_index = 0;
2734 response = TRUE;
2735 }
2736 done = MSGLOOP_MSGCOMPLETE;
2737 break;
2738 }
2739 case MSG_EXT_WDTR:
2740 {
2741 u_int bus_width;
2742 u_int saved_width;
2743 u_int sending_reply;
2744
2745 sending_reply = FALSE;
2746 if (ahc->msgin_buf[1] != MSG_EXT_WDTR_LEN) {
2747 reject = TRUE;
2748 break;
2749 }
2750
2751 /*
2752 * Wait until we have our arg before validating
2753 * and acting on this message.
2754 *
2755 * Add one to MSG_EXT_WDTR_LEN to account for
2756 * the extended message preamble.
2757 */
2758 if (ahc->msgin_index < (MSG_EXT_WDTR_LEN + 1))
2759 break;
2760
2761 bus_width = ahc->msgin_buf[3];
2762 saved_width = bus_width;
2763 ahc_validate_width(ahc, tinfo, &bus_width,
2764 devinfo->role);
2765 if (bootverbose) {
2766 printf("(%s:%c:%d:%d): Received WDTR "
2767 "%x filtered to %x\n",
2768 ahc_name(ahc), devinfo->channel,
2769 devinfo->target, devinfo->lun,
2770 saved_width, bus_width);
2771 }
2772
2773 if (ahc_sent_msg(ahc, AHCMSG_EXT, MSG_EXT_WDTR, TRUE)) {
2774 /*
2775 * Don't send a WDTR back to the
2776 * target, since we asked first.
2777 * If the width went higher than our
2778 * request, reject it.
2779 */
2780 if (saved_width > bus_width) {
2781 reject = TRUE;
2782 printf("(%s:%c:%d:%d): requested %dBit "
2783 "transfers. Rejecting...\n",
2784 ahc_name(ahc), devinfo->channel,
2785 devinfo->target, devinfo->lun,
2786 8 * (0x01 << bus_width));
2787 bus_width = 0;
2788 }
2789 } else {
2790 /*
2791 * Send our own WDTR in reply
2792 */
2782 if (bootverbose) {
2793 if (bootverbose
2794 && devinfo->role == ROLE_INITIATOR) {
2783 printf("(%s:%c:%d:%d): Target "
2784 "Initiated WDTR\n",
2785 ahc_name(ahc), devinfo->channel,
2786 devinfo->target, devinfo->lun);
2787 }
2788 ahc->msgout_index = 0;
2789 ahc->msgout_len = 0;
2790 ahc_construct_wdtr(ahc, devinfo, bus_width);
2791 ahc->msgout_index = 0;
2792 response = TRUE;
2793 sending_reply = TRUE;
2794 }
2795 ahc_set_width(ahc, devinfo, bus_width,
2796 AHC_TRANS_ACTIVE|AHC_TRANS_GOAL,
2797 /*paused*/TRUE);
2798 /* After a wide message, we are async */
2799 ahc_set_syncrate(ahc, devinfo,
2800 /*syncrate*/NULL, /*period*/0,
2801 /*offset*/0, /*ppr_options*/0,
2802 AHC_TRANS_ACTIVE, /*paused*/TRUE);
2803 if (sending_reply == FALSE && reject == FALSE) {
2804
2805 if (tinfo->goal.period) {
2806 ahc->msgout_index = 0;
2807 ahc->msgout_len = 0;
2808 ahc_build_transfer_msg(ahc, devinfo);
2809 ahc->msgout_index = 0;
2810 response = TRUE;
2811 }
2812 }
2813 done = MSGLOOP_MSGCOMPLETE;
2814 break;
2815 }
2816 case MSG_EXT_PPR:
2817 {
2818 struct ahc_syncrate *syncrate;
2819 u_int period;
2820 u_int offset;
2821 u_int bus_width;
2822 u_int ppr_options;
2823 u_int saved_width;
2824 u_int saved_offset;
2825 u_int saved_ppr_options;
2826
2827 if (ahc->msgin_buf[1] != MSG_EXT_PPR_LEN) {
2828 reject = TRUE;
2829 break;
2830 }
2831
2832 /*
2833 * Wait until we have all args before validating
2834 * and acting on this message.
2835 *
2836 * Add one to MSG_EXT_PPR_LEN to account for
2837 * the extended message preamble.
2838 */
2839 if (ahc->msgin_index < (MSG_EXT_PPR_LEN + 1))
2840 break;
2841
2842 period = ahc->msgin_buf[3];
2843 offset = ahc->msgin_buf[5];
2844 bus_width = ahc->msgin_buf[6];
2845 saved_width = bus_width;
2846 ppr_options = ahc->msgin_buf[7];
2847 /*
2848 * According to the spec, a DT only
2849 * period factor with no DT option
2850 * set implies async.
2851 */
2852 if ((ppr_options & MSG_EXT_PPR_DT_REQ) == 0
2853 && period == 9)
2854 offset = 0;
2855 saved_ppr_options = ppr_options;
2856 saved_offset = offset;
2857
2858 /*
2859 * Mask out any options we don't support
2860 * on any controller. Transfer options are
2861 * only available if we are negotiating wide.
2862 */
2863 ppr_options &= MSG_EXT_PPR_DT_REQ;
2864 if (bus_width == 0)
2865 ppr_options = 0;
2866
2867 ahc_validate_width(ahc, tinfo, &bus_width,
2868 devinfo->role);
2869 syncrate = ahc_devlimited_syncrate(ahc, tinfo, &period,
2870 &ppr_options,
2871 devinfo->role);
2872 ahc_validate_offset(ahc, tinfo, syncrate,
2873 &offset, bus_width,
2874 devinfo->role);
2875
2876 if (ahc_sent_msg(ahc, AHCMSG_EXT, MSG_EXT_PPR, TRUE)) {
2877 /*
2878 * If we are unable to do any of the
2879 * requested options (we went too low),
2880 * then we'll have to reject the message.
2881 */
2882 if (saved_width > bus_width
2883 || saved_offset != offset
2884 || saved_ppr_options != ppr_options) {
2885 reject = TRUE;
2886 period = 0;
2887 offset = 0;
2888 bus_width = 0;
2889 ppr_options = 0;
2890 syncrate = NULL;
2891 }
2892 } else {
2893 if (devinfo->role != ROLE_TARGET)
2894 printf("(%s:%c:%d:%d): Target "
2895 "Initiated PPR\n",
2896 ahc_name(ahc), devinfo->channel,
2897 devinfo->target, devinfo->lun);
2898 else
2899 printf("(%s:%c:%d:%d): Initiator "
2900 "Initiated PPR\n",
2901 ahc_name(ahc), devinfo->channel,
2902 devinfo->target, devinfo->lun);
2903 ahc->msgout_index = 0;
2904 ahc->msgout_len = 0;
2905 ahc_construct_ppr(ahc, devinfo, period, offset,
2906 bus_width, ppr_options);
2907 ahc->msgout_index = 0;
2908 response = TRUE;
2909 }
2910 if (bootverbose) {
2911 printf("(%s:%c:%d:%d): Received PPR width %x, "
2912 "period %x, offset %x,options %x\n"
2913 "\tFiltered to width %x, period %x, "
2914 "offset %x, options %x\n",
2915 ahc_name(ahc), devinfo->channel,
2916 devinfo->target, devinfo->lun,
2917 saved_width, ahc->msgin_buf[3],
2918 saved_offset, saved_ppr_options,
2919 bus_width, period, offset, ppr_options);
2920 }
2921 ahc_set_width(ahc, devinfo, bus_width,
2922 AHC_TRANS_ACTIVE|AHC_TRANS_GOAL,
2923 /*paused*/TRUE);
2924 ahc_set_syncrate(ahc, devinfo,
2925 syncrate, period,
2926 offset, ppr_options,
2927 AHC_TRANS_ACTIVE|AHC_TRANS_GOAL,
2928 /*paused*/TRUE);
2929 done = MSGLOOP_MSGCOMPLETE;
2930 break;
2931 }
2932 default:
2933 /* Unknown extended message. Reject it. */
2934 reject = TRUE;
2935 break;
2936 }
2937 break;
2938 }
2939 case MSG_BUS_DEV_RESET:
2940 ahc_handle_devreset(ahc, devinfo,
2941 CAM_BDR_SENT,
2942 "Bus Device Reset Received",
2943 /*verbose_level*/0);
2944 ahc_restart(ahc);
2945 done = MSGLOOP_TERMINATED;
2946 break;
2947 case MSG_ABORT_TAG:
2948 case MSG_ABORT:
2949 case MSG_CLEAR_QUEUE:
2950#ifdef AHC_TARGET_MODE
2951 /* Target mode messages */
2952 if (devinfo->role != ROLE_TARGET) {
2953 reject = TRUE;
2954 break;
2955 }
2956 ahc_abort_scbs(ahc, devinfo->target, devinfo->channel,
2957 devinfo->lun,
2958 ahc->msgin_buf[0] == MSG_ABORT_TAG
2959 ? SCB_LIST_NULL
2960 : ahc_inb(ahc, INITIATOR_TAG),
2961 ROLE_TARGET, CAM_REQ_ABORTED);
2962
2963 tstate = ahc->enabled_targets[devinfo->our_scsiid];
2964 if (tstate != NULL) {
2795 printf("(%s:%c:%d:%d): Target "
2796 "Initiated WDTR\n",
2797 ahc_name(ahc), devinfo->channel,
2798 devinfo->target, devinfo->lun);
2799 }
2800 ahc->msgout_index = 0;
2801 ahc->msgout_len = 0;
2802 ahc_construct_wdtr(ahc, devinfo, bus_width);
2803 ahc->msgout_index = 0;
2804 response = TRUE;
2805 sending_reply = TRUE;
2806 }
2807 ahc_set_width(ahc, devinfo, bus_width,
2808 AHC_TRANS_ACTIVE|AHC_TRANS_GOAL,
2809 /*paused*/TRUE);
2810 /* After a wide message, we are async */
2811 ahc_set_syncrate(ahc, devinfo,
2812 /*syncrate*/NULL, /*period*/0,
2813 /*offset*/0, /*ppr_options*/0,
2814 AHC_TRANS_ACTIVE, /*paused*/TRUE);
2815 if (sending_reply == FALSE && reject == FALSE) {
2816
2817 if (tinfo->goal.period) {
2818 ahc->msgout_index = 0;
2819 ahc->msgout_len = 0;
2820 ahc_build_transfer_msg(ahc, devinfo);
2821 ahc->msgout_index = 0;
2822 response = TRUE;
2823 }
2824 }
2825 done = MSGLOOP_MSGCOMPLETE;
2826 break;
2827 }
2828 case MSG_EXT_PPR:
2829 {
2830 struct ahc_syncrate *syncrate;
2831 u_int period;
2832 u_int offset;
2833 u_int bus_width;
2834 u_int ppr_options;
2835 u_int saved_width;
2836 u_int saved_offset;
2837 u_int saved_ppr_options;
2838
2839 if (ahc->msgin_buf[1] != MSG_EXT_PPR_LEN) {
2840 reject = TRUE;
2841 break;
2842 }
2843
2844 /*
2845 * Wait until we have all args before validating
2846 * and acting on this message.
2847 *
2848 * Add one to MSG_EXT_PPR_LEN to account for
2849 * the extended message preamble.
2850 */
2851 if (ahc->msgin_index < (MSG_EXT_PPR_LEN + 1))
2852 break;
2853
2854 period = ahc->msgin_buf[3];
2855 offset = ahc->msgin_buf[5];
2856 bus_width = ahc->msgin_buf[6];
2857 saved_width = bus_width;
2858 ppr_options = ahc->msgin_buf[7];
2859 /*
2860 * According to the spec, a DT only
2861 * period factor with no DT option
2862 * set implies async.
2863 */
2864 if ((ppr_options & MSG_EXT_PPR_DT_REQ) == 0
2865 && period == 9)
2866 offset = 0;
2867 saved_ppr_options = ppr_options;
2868 saved_offset = offset;
2869
2870 /*
2871 * Mask out any options we don't support
2872 * on any controller. Transfer options are
2873 * only available if we are negotiating wide.
2874 */
2875 ppr_options &= MSG_EXT_PPR_DT_REQ;
2876 if (bus_width == 0)
2877 ppr_options = 0;
2878
2879 ahc_validate_width(ahc, tinfo, &bus_width,
2880 devinfo->role);
2881 syncrate = ahc_devlimited_syncrate(ahc, tinfo, &period,
2882 &ppr_options,
2883 devinfo->role);
2884 ahc_validate_offset(ahc, tinfo, syncrate,
2885 &offset, bus_width,
2886 devinfo->role);
2887
2888 if (ahc_sent_msg(ahc, AHCMSG_EXT, MSG_EXT_PPR, TRUE)) {
2889 /*
2890 * If we are unable to do any of the
2891 * requested options (we went too low),
2892 * then we'll have to reject the message.
2893 */
2894 if (saved_width > bus_width
2895 || saved_offset != offset
2896 || saved_ppr_options != ppr_options) {
2897 reject = TRUE;
2898 period = 0;
2899 offset = 0;
2900 bus_width = 0;
2901 ppr_options = 0;
2902 syncrate = NULL;
2903 }
2904 } else {
2905 if (devinfo->role != ROLE_TARGET)
2906 printf("(%s:%c:%d:%d): Target "
2907 "Initiated PPR\n",
2908 ahc_name(ahc), devinfo->channel,
2909 devinfo->target, devinfo->lun);
2910 else
2911 printf("(%s:%c:%d:%d): Initiator "
2912 "Initiated PPR\n",
2913 ahc_name(ahc), devinfo->channel,
2914 devinfo->target, devinfo->lun);
2915 ahc->msgout_index = 0;
2916 ahc->msgout_len = 0;
2917 ahc_construct_ppr(ahc, devinfo, period, offset,
2918 bus_width, ppr_options);
2919 ahc->msgout_index = 0;
2920 response = TRUE;
2921 }
2922 if (bootverbose) {
2923 printf("(%s:%c:%d:%d): Received PPR width %x, "
2924 "period %x, offset %x,options %x\n"
2925 "\tFiltered to width %x, period %x, "
2926 "offset %x, options %x\n",
2927 ahc_name(ahc), devinfo->channel,
2928 devinfo->target, devinfo->lun,
2929 saved_width, ahc->msgin_buf[3],
2930 saved_offset, saved_ppr_options,
2931 bus_width, period, offset, ppr_options);
2932 }
2933 ahc_set_width(ahc, devinfo, bus_width,
2934 AHC_TRANS_ACTIVE|AHC_TRANS_GOAL,
2935 /*paused*/TRUE);
2936 ahc_set_syncrate(ahc, devinfo,
2937 syncrate, period,
2938 offset, ppr_options,
2939 AHC_TRANS_ACTIVE|AHC_TRANS_GOAL,
2940 /*paused*/TRUE);
2941 done = MSGLOOP_MSGCOMPLETE;
2942 break;
2943 }
2944 default:
2945 /* Unknown extended message. Reject it. */
2946 reject = TRUE;
2947 break;
2948 }
2949 break;
2950 }
2951 case MSG_BUS_DEV_RESET:
2952 ahc_handle_devreset(ahc, devinfo,
2953 CAM_BDR_SENT,
2954 "Bus Device Reset Received",
2955 /*verbose_level*/0);
2956 ahc_restart(ahc);
2957 done = MSGLOOP_TERMINATED;
2958 break;
2959 case MSG_ABORT_TAG:
2960 case MSG_ABORT:
2961 case MSG_CLEAR_QUEUE:
2962#ifdef AHC_TARGET_MODE
2963 /* Target mode messages */
2964 if (devinfo->role != ROLE_TARGET) {
2965 reject = TRUE;
2966 break;
2967 }
2968 ahc_abort_scbs(ahc, devinfo->target, devinfo->channel,
2969 devinfo->lun,
2970 ahc->msgin_buf[0] == MSG_ABORT_TAG
2971 ? SCB_LIST_NULL
2972 : ahc_inb(ahc, INITIATOR_TAG),
2973 ROLE_TARGET, CAM_REQ_ABORTED);
2974
2975 tstate = ahc->enabled_targets[devinfo->our_scsiid];
2976 if (tstate != NULL) {
2965 struct tmode_lstate* lstate;
2977 struct ahc_tmode_lstate* lstate;
2966
2967 lstate = tstate->enabled_luns[devinfo->lun];
2968 if (lstate != NULL) {
2969 ahc_queue_lstate_event(ahc, lstate,
2970 devinfo->our_scsiid,
2971 ahc->msgin_buf[0],
2972 /*arg*/0);
2973 ahc_send_lstate_events(ahc, lstate);
2974 }
2975 }
2976 done = MSGLOOP_MSGCOMPLETE;
2977 break;
2978#endif
2979 case MSG_TERM_IO_PROC:
2980 default:
2981 reject = TRUE;
2982 break;
2983 }
2984
2985 if (reject) {
2986 /*
2987 * Setup to reject the message.
2988 */
2989 ahc->msgout_index = 0;
2990 ahc->msgout_len = 1;
2991 ahc->msgout_buf[0] = MSG_MESSAGE_REJECT;
2992 done = MSGLOOP_MSGCOMPLETE;
2993 response = TRUE;
2994 }
2995
2996 if (done != MSGLOOP_IN_PROG && !response)
2997 /* Clear the outgoing message buffer */
2998 ahc->msgout_len = 0;
2999
3000 return (done);
3001}
3002
3003/*
3004 * Process a message reject message.
3005 */
3006static int
3007ahc_handle_msg_reject(struct ahc_softc *ahc, struct ahc_devinfo *devinfo)
3008{
3009 /*
3010 * What we care about here is if we had an
3011 * outstanding SDTR or WDTR message for this
3012 * target. If we did, this is a signal that
3013 * the target is refusing negotiation.
3014 */
3015 struct scb *scb;
3016 struct ahc_initiator_tinfo *tinfo;
2978
2979 lstate = tstate->enabled_luns[devinfo->lun];
2980 if (lstate != NULL) {
2981 ahc_queue_lstate_event(ahc, lstate,
2982 devinfo->our_scsiid,
2983 ahc->msgin_buf[0],
2984 /*arg*/0);
2985 ahc_send_lstate_events(ahc, lstate);
2986 }
2987 }
2988 done = MSGLOOP_MSGCOMPLETE;
2989 break;
2990#endif
2991 case MSG_TERM_IO_PROC:
2992 default:
2993 reject = TRUE;
2994 break;
2995 }
2996
2997 if (reject) {
2998 /*
2999 * Setup to reject the message.
3000 */
3001 ahc->msgout_index = 0;
3002 ahc->msgout_len = 1;
3003 ahc->msgout_buf[0] = MSG_MESSAGE_REJECT;
3004 done = MSGLOOP_MSGCOMPLETE;
3005 response = TRUE;
3006 }
3007
3008 if (done != MSGLOOP_IN_PROG && !response)
3009 /* Clear the outgoing message buffer */
3010 ahc->msgout_len = 0;
3011
3012 return (done);
3013}
3014
3015/*
3016 * Process a message reject message.
3017 */
3018static int
3019ahc_handle_msg_reject(struct ahc_softc *ahc, struct ahc_devinfo *devinfo)
3020{
3021 /*
3022 * What we care about here is if we had an
3023 * outstanding SDTR or WDTR message for this
3024 * target. If we did, this is a signal that
3025 * the target is refusing negotiation.
3026 */
3027 struct scb *scb;
3028 struct ahc_initiator_tinfo *tinfo;
3017 struct tmode_tstate *tstate;
3029 struct ahc_tmode_tstate *tstate;
3018 u_int scb_index;
3019 u_int last_msg;
3020 int response = 0;
3021
3022 scb_index = ahc_inb(ahc, SCB_TAG);
3023 scb = ahc_lookup_scb(ahc, scb_index);
3024 tinfo = ahc_fetch_transinfo(ahc, devinfo->channel,
3025 devinfo->our_scsiid,
3026 devinfo->target, &tstate);
3027 /* Might be necessary */
3028 last_msg = ahc_inb(ahc, LAST_MSG);
3029
3030 if (ahc_sent_msg(ahc, AHCMSG_EXT, MSG_EXT_PPR, /*full*/FALSE)) {
3031 /*
3032 * Target does not support the PPR message.
3033 * Attempt to negotiate SPI-2 style.
3034 */
3035 if (bootverbose) {
3036 printf("(%s:%c:%d:%d): PPR Rejected. "
3037 "Trying WDTR/SDTR\n",
3038 ahc_name(ahc), devinfo->channel,
3039 devinfo->target, devinfo->lun);
3040 }
3041 tinfo->goal.ppr_options = 0;
3042 tinfo->current.transport_version = 2;
3043 tinfo->goal.transport_version = 2;
3044 ahc->msgout_index = 0;
3045 ahc->msgout_len = 0;
3046 ahc_build_transfer_msg(ahc, devinfo);
3047 ahc->msgout_index = 0;
3048 response = 1;
3049 } else if (ahc_sent_msg(ahc, AHCMSG_EXT, MSG_EXT_WDTR, /*full*/FALSE)) {
3050
3051 /* note 8bit xfers */
3052 printf("(%s:%c:%d:%d): refuses WIDE negotiation. Using "
3053 "8bit transfers\n", ahc_name(ahc),
3054 devinfo->channel, devinfo->target, devinfo->lun);
3055 ahc_set_width(ahc, devinfo, MSG_EXT_WDTR_BUS_8_BIT,
3056 AHC_TRANS_ACTIVE|AHC_TRANS_GOAL,
3057 /*paused*/TRUE);
3058 /*
3059 * No need to clear the sync rate. If the target
3060 * did not accept the command, our syncrate is
3061 * unaffected. If the target started the negotiation,
3062 * but rejected our response, we already cleared the
3063 * sync rate before sending our WDTR.
3064 */
3065 if (tinfo->goal.period) {
3066
3067 /* Start the sync negotiation */
3068 ahc->msgout_index = 0;
3069 ahc->msgout_len = 0;
3070 ahc_build_transfer_msg(ahc, devinfo);
3071 ahc->msgout_index = 0;
3072 response = 1;
3073 }
3074 } else if (ahc_sent_msg(ahc, AHCMSG_EXT, MSG_EXT_SDTR, /*full*/FALSE)) {
3075 /* note asynch xfers and clear flag */
3076 ahc_set_syncrate(ahc, devinfo, /*syncrate*/NULL, /*period*/0,
3077 /*offset*/0, /*ppr_options*/0,
3078 AHC_TRANS_ACTIVE|AHC_TRANS_GOAL,
3079 /*paused*/TRUE);
3080 printf("(%s:%c:%d:%d): refuses synchronous negotiation. "
3081 "Using asynchronous transfers\n",
3082 ahc_name(ahc), devinfo->channel,
3083 devinfo->target, devinfo->lun);
3084 } else if ((scb->hscb->control & MSG_SIMPLE_Q_TAG) != 0) {
3085
3086 printf("(%s:%c:%d:%d): refuses tagged commands. Performing "
3087 "non-tagged I/O\n", ahc_name(ahc),
3088 devinfo->channel, devinfo->target, devinfo->lun);
3089 ahc_set_tags(ahc, devinfo, FALSE);
3090
3091 /*
3092 * Resend the identify for this CCB as the target
3093 * may believe that the selection is invalid otherwise.
3094 */
3095 ahc_outb(ahc, SCB_CONTROL,
3096 ahc_inb(ahc, SCB_CONTROL) & ~MSG_SIMPLE_Q_TAG);
3097 scb->hscb->control &= ~MSG_SIMPLE_Q_TAG;
3098 ahc_set_transaction_tag(scb, /*enabled*/FALSE,
3099 /*type*/MSG_SIMPLE_Q_TAG);
3100 ahc_outb(ahc, MSG_OUT, MSG_IDENTIFYFLAG);
3101 ahc_outb(ahc, SCSISIGO, ahc_inb(ahc, SCSISIGO) | ATNO);
3102
3103 /*
3104 * This transaction is now at the head of
3105 * the untagged queue for this target.
3106 */
3107 if ((ahc->flags & AHC_SCB_BTT) == 0) {
3108 struct scb_tailq *untagged_q;
3109
3110 untagged_q =
3111 &(ahc->untagged_queues[devinfo->target_offset]);
3112 TAILQ_INSERT_HEAD(untagged_q, scb, links.tqe);
3113 scb->flags |= SCB_UNTAGGEDQ;
3114 }
3115 ahc_busy_tcl(ahc, BUILD_TCL(scb->hscb->scsiid, devinfo->lun),
3116 scb->hscb->tag);
3117
3118 /*
3119 * Requeue all tagged commands for this target
3120 * currently in our posession so they can be
3121 * converted to untagged commands.
3122 */
3123 ahc_search_qinfifo(ahc, SCB_GET_TARGET(ahc, scb),
3124 SCB_GET_CHANNEL(ahc, scb),
3125 SCB_GET_LUN(scb), /*tag*/SCB_LIST_NULL,
3126 ROLE_INITIATOR, CAM_REQUEUE_REQ,
3127 SEARCH_COMPLETE);
3128 } else {
3129 /*
3130 * Otherwise, we ignore it.
3131 */
3132 printf("%s:%c:%d: Message reject for %x -- ignored\n",
3133 ahc_name(ahc), devinfo->channel, devinfo->target,
3134 last_msg);
3135 }
3136 return (response);
3137}
3138
3139/*
3140 * Process an ingnore wide residue message.
3141 */
3142static void
3143ahc_handle_ign_wide_residue(struct ahc_softc *ahc, struct ahc_devinfo *devinfo)
3144{
3145 u_int scb_index;
3146 struct scb *scb;
3147
3148 scb_index = ahc_inb(ahc, SCB_TAG);
3149 scb = ahc_lookup_scb(ahc, scb_index);
3150 /*
3151 * XXX Actually check data direction in the sequencer?
3152 * Perhaps add datadir to some spare bits in the hscb?
3153 */
3154 if ((ahc_inb(ahc, SEQ_FLAGS) & DPHASE) == 0
3155 || ahc_get_transfer_dir(scb) != CAM_DIR_IN) {
3156 /*
3157 * Ignore the message if we haven't
3158 * seen an appropriate data phase yet.
3159 */
3160 } else {
3161 /*
3162 * If the residual occurred on the last
3163 * transfer and the transfer request was
3164 * expected to end on an odd count, do
3165 * nothing. Otherwise, subtract a byte
3166 * and update the residual count accordingly.
3167 */
3168 uint32_t sgptr;
3169
3170 sgptr = ahc_inb(ahc, SCB_RESIDUAL_SGPTR);
3171 if ((sgptr & SG_LIST_NULL) != 0
3172 && ahc_inb(ahc, DATA_COUNT_ODD) == 1) {
3173 /*
3174 * If the residual occurred on the last
3175 * transfer and the transfer request was
3176 * expected to end on an odd count, do
3177 * nothing.
3178 */
3179 } else {
3180 struct ahc_dma_seg *sg;
3181 uint32_t data_cnt;
3182 uint32_t data_addr;
3183
3184 /* Pull in the rest of the sgptr */
3185 sgptr |= (ahc_inb(ahc, SCB_RESIDUAL_SGPTR + 3) << 24)
3186 | (ahc_inb(ahc, SCB_RESIDUAL_SGPTR + 2) << 16)
3187 | (ahc_inb(ahc, SCB_RESIDUAL_SGPTR + 1) << 8);
3188 sgptr &= SG_PTR_MASK;
3189 data_cnt = (ahc_inb(ahc, SCB_RESIDUAL_DATACNT+2) << 16)
3190 | (ahc_inb(ahc, SCB_RESIDUAL_DATACNT+1) << 8)
3191 | (ahc_inb(ahc, SCB_RESIDUAL_DATACNT));
3192
3193 data_addr = (ahc_inb(ahc, SHADDR + 3) << 24)
3194 | (ahc_inb(ahc, SHADDR + 2) << 16)
3195 | (ahc_inb(ahc, SHADDR + 1) << 8)
3196 | (ahc_inb(ahc, SHADDR));
3197
3198 data_cnt += 1;
3199 data_addr -= 1;
3200
3201 sg = ahc_sg_bus_to_virt(scb, sgptr);
3202 /*
3203 * The residual sg ptr points to the next S/G
3204 * to load so we must go back one.
3205 */
3206 sg--;
3207 if (sg != scb->sg_list
3208 && (sg->len & AHC_SG_LEN_MASK) < data_cnt) {
3209
3210 sg--;
3211 data_cnt = 1 | (sg->len & AHC_DMA_LAST_SEG);
3212 data_addr = sg->addr
3213 + (sg->len & AHC_SG_LEN_MASK) - 1;
3214
3215 /*
3216 * Increment sg so it points to the
3217 * "next" sg.
3218 */
3219 sg++;
3220 sgptr = ahc_sg_virt_to_bus(scb, sg);
3221 ahc_outb(ahc, SCB_RESIDUAL_SGPTR + 3,
3222 sgptr >> 24);
3223 ahc_outb(ahc, SCB_RESIDUAL_SGPTR + 2,
3224 sgptr >> 16);
3225 ahc_outb(ahc, SCB_RESIDUAL_SGPTR + 1,
3226 sgptr >> 8);
3227 ahc_outb(ahc, SCB_RESIDUAL_SGPTR, sgptr);
3228 }
3229
3230/* XXX What about high address byte??? */
3231 ahc_outb(ahc, SCB_RESIDUAL_DATACNT + 3, data_cnt >> 24);
3232 ahc_outb(ahc, SCB_RESIDUAL_DATACNT + 2, data_cnt >> 16);
3233 ahc_outb(ahc, SCB_RESIDUAL_DATACNT + 1, data_cnt >> 8);
3234 ahc_outb(ahc, SCB_RESIDUAL_DATACNT, data_cnt);
3235
3236/* XXX Perhaps better to just keep the saved address in sram */
3237 if ((ahc->features & AHC_ULTRA2) != 0) {
3238 ahc_outb(ahc, HADDR + 3, data_addr >> 24);
3239 ahc_outb(ahc, HADDR + 2, data_addr >> 16);
3240 ahc_outb(ahc, HADDR + 1, data_addr >> 8);
3241 ahc_outb(ahc, HADDR, data_addr);
3242 ahc_outb(ahc, DFCNTRL, PRELOADEN);
3243 ahc_outb(ahc, SXFRCTL0,
3244 ahc_inb(ahc, SXFRCTL0) | CLRCHN);
3245 } else {
3246 ahc_outb(ahc, HADDR + 3, data_addr >> 24);
3247 ahc_outb(ahc, HADDR + 2, data_addr >> 16);
3248 ahc_outb(ahc, HADDR + 1, data_addr >> 8);
3249 ahc_outb(ahc, HADDR, data_addr);
3250 }
3251 }
3252 }
3253}
3254
3255/*
3256 * Handle the effects of issuing a bus device reset message.
3257 */
3258static void
3259ahc_handle_devreset(struct ahc_softc *ahc, struct ahc_devinfo *devinfo,
3260 cam_status status, char *message, int verbose_level)
3261{
3262#ifdef AHC_TARGET_MODE
3030 u_int scb_index;
3031 u_int last_msg;
3032 int response = 0;
3033
3034 scb_index = ahc_inb(ahc, SCB_TAG);
3035 scb = ahc_lookup_scb(ahc, scb_index);
3036 tinfo = ahc_fetch_transinfo(ahc, devinfo->channel,
3037 devinfo->our_scsiid,
3038 devinfo->target, &tstate);
3039 /* Might be necessary */
3040 last_msg = ahc_inb(ahc, LAST_MSG);
3041
3042 if (ahc_sent_msg(ahc, AHCMSG_EXT, MSG_EXT_PPR, /*full*/FALSE)) {
3043 /*
3044 * Target does not support the PPR message.
3045 * Attempt to negotiate SPI-2 style.
3046 */
3047 if (bootverbose) {
3048 printf("(%s:%c:%d:%d): PPR Rejected. "
3049 "Trying WDTR/SDTR\n",
3050 ahc_name(ahc), devinfo->channel,
3051 devinfo->target, devinfo->lun);
3052 }
3053 tinfo->goal.ppr_options = 0;
3054 tinfo->current.transport_version = 2;
3055 tinfo->goal.transport_version = 2;
3056 ahc->msgout_index = 0;
3057 ahc->msgout_len = 0;
3058 ahc_build_transfer_msg(ahc, devinfo);
3059 ahc->msgout_index = 0;
3060 response = 1;
3061 } else if (ahc_sent_msg(ahc, AHCMSG_EXT, MSG_EXT_WDTR, /*full*/FALSE)) {
3062
3063 /* note 8bit xfers */
3064 printf("(%s:%c:%d:%d): refuses WIDE negotiation. Using "
3065 "8bit transfers\n", ahc_name(ahc),
3066 devinfo->channel, devinfo->target, devinfo->lun);
3067 ahc_set_width(ahc, devinfo, MSG_EXT_WDTR_BUS_8_BIT,
3068 AHC_TRANS_ACTIVE|AHC_TRANS_GOAL,
3069 /*paused*/TRUE);
3070 /*
3071 * No need to clear the sync rate. If the target
3072 * did not accept the command, our syncrate is
3073 * unaffected. If the target started the negotiation,
3074 * but rejected our response, we already cleared the
3075 * sync rate before sending our WDTR.
3076 */
3077 if (tinfo->goal.period) {
3078
3079 /* Start the sync negotiation */
3080 ahc->msgout_index = 0;
3081 ahc->msgout_len = 0;
3082 ahc_build_transfer_msg(ahc, devinfo);
3083 ahc->msgout_index = 0;
3084 response = 1;
3085 }
3086 } else if (ahc_sent_msg(ahc, AHCMSG_EXT, MSG_EXT_SDTR, /*full*/FALSE)) {
3087 /* note asynch xfers and clear flag */
3088 ahc_set_syncrate(ahc, devinfo, /*syncrate*/NULL, /*period*/0,
3089 /*offset*/0, /*ppr_options*/0,
3090 AHC_TRANS_ACTIVE|AHC_TRANS_GOAL,
3091 /*paused*/TRUE);
3092 printf("(%s:%c:%d:%d): refuses synchronous negotiation. "
3093 "Using asynchronous transfers\n",
3094 ahc_name(ahc), devinfo->channel,
3095 devinfo->target, devinfo->lun);
3096 } else if ((scb->hscb->control & MSG_SIMPLE_Q_TAG) != 0) {
3097
3098 printf("(%s:%c:%d:%d): refuses tagged commands. Performing "
3099 "non-tagged I/O\n", ahc_name(ahc),
3100 devinfo->channel, devinfo->target, devinfo->lun);
3101 ahc_set_tags(ahc, devinfo, FALSE);
3102
3103 /*
3104 * Resend the identify for this CCB as the target
3105 * may believe that the selection is invalid otherwise.
3106 */
3107 ahc_outb(ahc, SCB_CONTROL,
3108 ahc_inb(ahc, SCB_CONTROL) & ~MSG_SIMPLE_Q_TAG);
3109 scb->hscb->control &= ~MSG_SIMPLE_Q_TAG;
3110 ahc_set_transaction_tag(scb, /*enabled*/FALSE,
3111 /*type*/MSG_SIMPLE_Q_TAG);
3112 ahc_outb(ahc, MSG_OUT, MSG_IDENTIFYFLAG);
3113 ahc_outb(ahc, SCSISIGO, ahc_inb(ahc, SCSISIGO) | ATNO);
3114
3115 /*
3116 * This transaction is now at the head of
3117 * the untagged queue for this target.
3118 */
3119 if ((ahc->flags & AHC_SCB_BTT) == 0) {
3120 struct scb_tailq *untagged_q;
3121
3122 untagged_q =
3123 &(ahc->untagged_queues[devinfo->target_offset]);
3124 TAILQ_INSERT_HEAD(untagged_q, scb, links.tqe);
3125 scb->flags |= SCB_UNTAGGEDQ;
3126 }
3127 ahc_busy_tcl(ahc, BUILD_TCL(scb->hscb->scsiid, devinfo->lun),
3128 scb->hscb->tag);
3129
3130 /*
3131 * Requeue all tagged commands for this target
3132 * currently in our posession so they can be
3133 * converted to untagged commands.
3134 */
3135 ahc_search_qinfifo(ahc, SCB_GET_TARGET(ahc, scb),
3136 SCB_GET_CHANNEL(ahc, scb),
3137 SCB_GET_LUN(scb), /*tag*/SCB_LIST_NULL,
3138 ROLE_INITIATOR, CAM_REQUEUE_REQ,
3139 SEARCH_COMPLETE);
3140 } else {
3141 /*
3142 * Otherwise, we ignore it.
3143 */
3144 printf("%s:%c:%d: Message reject for %x -- ignored\n",
3145 ahc_name(ahc), devinfo->channel, devinfo->target,
3146 last_msg);
3147 }
3148 return (response);
3149}
3150
3151/*
3152 * Process an ingnore wide residue message.
3153 */
3154static void
3155ahc_handle_ign_wide_residue(struct ahc_softc *ahc, struct ahc_devinfo *devinfo)
3156{
3157 u_int scb_index;
3158 struct scb *scb;
3159
3160 scb_index = ahc_inb(ahc, SCB_TAG);
3161 scb = ahc_lookup_scb(ahc, scb_index);
3162 /*
3163 * XXX Actually check data direction in the sequencer?
3164 * Perhaps add datadir to some spare bits in the hscb?
3165 */
3166 if ((ahc_inb(ahc, SEQ_FLAGS) & DPHASE) == 0
3167 || ahc_get_transfer_dir(scb) != CAM_DIR_IN) {
3168 /*
3169 * Ignore the message if we haven't
3170 * seen an appropriate data phase yet.
3171 */
3172 } else {
3173 /*
3174 * If the residual occurred on the last
3175 * transfer and the transfer request was
3176 * expected to end on an odd count, do
3177 * nothing. Otherwise, subtract a byte
3178 * and update the residual count accordingly.
3179 */
3180 uint32_t sgptr;
3181
3182 sgptr = ahc_inb(ahc, SCB_RESIDUAL_SGPTR);
3183 if ((sgptr & SG_LIST_NULL) != 0
3184 && ahc_inb(ahc, DATA_COUNT_ODD) == 1) {
3185 /*
3186 * If the residual occurred on the last
3187 * transfer and the transfer request was
3188 * expected to end on an odd count, do
3189 * nothing.
3190 */
3191 } else {
3192 struct ahc_dma_seg *sg;
3193 uint32_t data_cnt;
3194 uint32_t data_addr;
3195
3196 /* Pull in the rest of the sgptr */
3197 sgptr |= (ahc_inb(ahc, SCB_RESIDUAL_SGPTR + 3) << 24)
3198 | (ahc_inb(ahc, SCB_RESIDUAL_SGPTR + 2) << 16)
3199 | (ahc_inb(ahc, SCB_RESIDUAL_SGPTR + 1) << 8);
3200 sgptr &= SG_PTR_MASK;
3201 data_cnt = (ahc_inb(ahc, SCB_RESIDUAL_DATACNT+2) << 16)
3202 | (ahc_inb(ahc, SCB_RESIDUAL_DATACNT+1) << 8)
3203 | (ahc_inb(ahc, SCB_RESIDUAL_DATACNT));
3204
3205 data_addr = (ahc_inb(ahc, SHADDR + 3) << 24)
3206 | (ahc_inb(ahc, SHADDR + 2) << 16)
3207 | (ahc_inb(ahc, SHADDR + 1) << 8)
3208 | (ahc_inb(ahc, SHADDR));
3209
3210 data_cnt += 1;
3211 data_addr -= 1;
3212
3213 sg = ahc_sg_bus_to_virt(scb, sgptr);
3214 /*
3215 * The residual sg ptr points to the next S/G
3216 * to load so we must go back one.
3217 */
3218 sg--;
3219 if (sg != scb->sg_list
3220 && (sg->len & AHC_SG_LEN_MASK) < data_cnt) {
3221
3222 sg--;
3223 data_cnt = 1 | (sg->len & AHC_DMA_LAST_SEG);
3224 data_addr = sg->addr
3225 + (sg->len & AHC_SG_LEN_MASK) - 1;
3226
3227 /*
3228 * Increment sg so it points to the
3229 * "next" sg.
3230 */
3231 sg++;
3232 sgptr = ahc_sg_virt_to_bus(scb, sg);
3233 ahc_outb(ahc, SCB_RESIDUAL_SGPTR + 3,
3234 sgptr >> 24);
3235 ahc_outb(ahc, SCB_RESIDUAL_SGPTR + 2,
3236 sgptr >> 16);
3237 ahc_outb(ahc, SCB_RESIDUAL_SGPTR + 1,
3238 sgptr >> 8);
3239 ahc_outb(ahc, SCB_RESIDUAL_SGPTR, sgptr);
3240 }
3241
3242/* XXX What about high address byte??? */
3243 ahc_outb(ahc, SCB_RESIDUAL_DATACNT + 3, data_cnt >> 24);
3244 ahc_outb(ahc, SCB_RESIDUAL_DATACNT + 2, data_cnt >> 16);
3245 ahc_outb(ahc, SCB_RESIDUAL_DATACNT + 1, data_cnt >> 8);
3246 ahc_outb(ahc, SCB_RESIDUAL_DATACNT, data_cnt);
3247
3248/* XXX Perhaps better to just keep the saved address in sram */
3249 if ((ahc->features & AHC_ULTRA2) != 0) {
3250 ahc_outb(ahc, HADDR + 3, data_addr >> 24);
3251 ahc_outb(ahc, HADDR + 2, data_addr >> 16);
3252 ahc_outb(ahc, HADDR + 1, data_addr >> 8);
3253 ahc_outb(ahc, HADDR, data_addr);
3254 ahc_outb(ahc, DFCNTRL, PRELOADEN);
3255 ahc_outb(ahc, SXFRCTL0,
3256 ahc_inb(ahc, SXFRCTL0) | CLRCHN);
3257 } else {
3258 ahc_outb(ahc, HADDR + 3, data_addr >> 24);
3259 ahc_outb(ahc, HADDR + 2, data_addr >> 16);
3260 ahc_outb(ahc, HADDR + 1, data_addr >> 8);
3261 ahc_outb(ahc, HADDR, data_addr);
3262 }
3263 }
3264 }
3265}
3266
3267/*
3268 * Handle the effects of issuing a bus device reset message.
3269 */
3270static void
3271ahc_handle_devreset(struct ahc_softc *ahc, struct ahc_devinfo *devinfo,
3272 cam_status status, char *message, int verbose_level)
3273{
3274#ifdef AHC_TARGET_MODE
3263 struct tmode_tstate* tstate;
3275 struct ahc_tmode_tstate* tstate;
3264 u_int lun;
3265#endif
3266 int found;
3267
3268 found = ahc_abort_scbs(ahc, devinfo->target, devinfo->channel,
3269 CAM_LUN_WILDCARD, SCB_LIST_NULL, devinfo->role,
3270 status);
3271
3272#ifdef AHC_TARGET_MODE
3273 /*
3274 * Send an immediate notify ccb to all target mord peripheral
3275 * drivers affected by this action.
3276 */
3277 tstate = ahc->enabled_targets[devinfo->our_scsiid];
3278 if (tstate != NULL) {
3279 for (lun = 0; lun < AHC_NUM_LUNS; lun++) {
3276 u_int lun;
3277#endif
3278 int found;
3279
3280 found = ahc_abort_scbs(ahc, devinfo->target, devinfo->channel,
3281 CAM_LUN_WILDCARD, SCB_LIST_NULL, devinfo->role,
3282 status);
3283
3284#ifdef AHC_TARGET_MODE
3285 /*
3286 * Send an immediate notify ccb to all target mord peripheral
3287 * drivers affected by this action.
3288 */
3289 tstate = ahc->enabled_targets[devinfo->our_scsiid];
3290 if (tstate != NULL) {
3291 for (lun = 0; lun < AHC_NUM_LUNS; lun++) {
3280 struct tmode_lstate* lstate;
3292 struct ahc_tmode_lstate* lstate;
3281
3282 lstate = tstate->enabled_luns[lun];
3283 if (lstate == NULL)
3284 continue;
3285
3286 ahc_queue_lstate_event(ahc, lstate, devinfo->our_scsiid,
3287 MSG_BUS_DEV_RESET, /*arg*/0);
3288 ahc_send_lstate_events(ahc, lstate);
3289 }
3290 }
3291#endif
3292
3293 /*
3294 * Go back to async/narrow transfers and renegotiate.
3295 */
3296 ahc_set_width(ahc, devinfo, MSG_EXT_WDTR_BUS_8_BIT,
3297 AHC_TRANS_CUR, /*paused*/TRUE);
3298 ahc_set_syncrate(ahc, devinfo, /*syncrate*/NULL,
3299 /*period*/0, /*offset*/0, /*ppr_options*/0,
3300 AHC_TRANS_CUR, /*paused*/TRUE);
3301
3302 ahc_send_async(ahc, devinfo->channel, devinfo->target,
3303 CAM_LUN_WILDCARD, AC_SENT_BDR);
3304
3305 if (message != NULL
3306 && (verbose_level <= bootverbose))
3307 printf("%s: %s on %c:%d. %d SCBs aborted\n", ahc_name(ahc),
3308 message, devinfo->channel, devinfo->target, found);
3309}
3310
3311#ifdef AHC_TARGET_MODE
3293
3294 lstate = tstate->enabled_luns[lun];
3295 if (lstate == NULL)
3296 continue;
3297
3298 ahc_queue_lstate_event(ahc, lstate, devinfo->our_scsiid,
3299 MSG_BUS_DEV_RESET, /*arg*/0);
3300 ahc_send_lstate_events(ahc, lstate);
3301 }
3302 }
3303#endif
3304
3305 /*
3306 * Go back to async/narrow transfers and renegotiate.
3307 */
3308 ahc_set_width(ahc, devinfo, MSG_EXT_WDTR_BUS_8_BIT,
3309 AHC_TRANS_CUR, /*paused*/TRUE);
3310 ahc_set_syncrate(ahc, devinfo, /*syncrate*/NULL,
3311 /*period*/0, /*offset*/0, /*ppr_options*/0,
3312 AHC_TRANS_CUR, /*paused*/TRUE);
3313
3314 ahc_send_async(ahc, devinfo->channel, devinfo->target,
3315 CAM_LUN_WILDCARD, AC_SENT_BDR);
3316
3317 if (message != NULL
3318 && (verbose_level <= bootverbose))
3319 printf("%s: %s on %c:%d. %d SCBs aborted\n", ahc_name(ahc),
3320 message, devinfo->channel, devinfo->target, found);
3321}
3322
3323#ifdef AHC_TARGET_MODE
3312void
3313ahc_setup_target_msgin(struct ahc_softc *ahc, struct ahc_devinfo *devinfo)
3324static void
3325ahc_setup_target_msgin(struct ahc_softc *ahc, struct ahc_devinfo *devinfo,
3326 struct scb *scb)
3314{
3327{
3328
3315 /*
3316 * To facilitate adding multiple messages together,
3317 * each routine should increment the index and len
3318 * variables instead of setting them explicitly.
3319 */
3320 ahc->msgout_index = 0;
3321 ahc->msgout_len = 0;
3322
3329 /*
3330 * To facilitate adding multiple messages together,
3331 * each routine should increment the index and len
3332 * variables instead of setting them explicitly.
3333 */
3334 ahc->msgout_index = 0;
3335 ahc->msgout_len = 0;
3336
3323 if ((ahc->targ_msg_req & devinfo->target_mask) != 0)
3337 if (scb != NULL && (scb->flags & SCB_AUTO_NEGOTIATE) != 0)
3324 ahc_build_transfer_msg(ahc, devinfo);
3325 else
3326 panic("ahc_intr: AWAITING target message with no message");
3327
3328 ahc->msgout_index = 0;
3329 ahc->msg_type = MSG_TYPE_TARGET_MSGIN;
3330}
3331#endif
3332/**************************** Initialization **********************************/
3333/*
3334 * Allocate a controller structure for a new device
3335 * and perform initial initializion.
3336 */
3337struct ahc_softc *
3338ahc_alloc(void *platform_arg, char *name)
3339{
3340 struct ahc_softc *ahc;
3341 int i;
3342
3343#ifndef __FreeBSD__
3344 ahc = malloc(sizeof(*ahc), M_DEVBUF, M_NOWAIT);
3345 if (!ahc) {
3346 printf("aic7xxx: cannot malloc softc!\n");
3347 free(name, M_DEVBUF);
3348 return NULL;
3349 }
3350#else
3351 ahc = device_get_softc((device_t)platform_arg);
3352#endif
3353 memset(ahc, 0, sizeof(*ahc));
3354 LIST_INIT(&ahc->pending_scbs);
3355 /* We don't know our unit number until the OSM sets it */
3356 ahc->name = name;
3338 ahc_build_transfer_msg(ahc, devinfo);
3339 else
3340 panic("ahc_intr: AWAITING target message with no message");
3341
3342 ahc->msgout_index = 0;
3343 ahc->msg_type = MSG_TYPE_TARGET_MSGIN;
3344}
3345#endif
3346/**************************** Initialization **********************************/
3347/*
3348 * Allocate a controller structure for a new device
3349 * and perform initial initializion.
3350 */
3351struct ahc_softc *
3352ahc_alloc(void *platform_arg, char *name)
3353{
3354 struct ahc_softc *ahc;
3355 int i;
3356
3357#ifndef __FreeBSD__
3358 ahc = malloc(sizeof(*ahc), M_DEVBUF, M_NOWAIT);
3359 if (!ahc) {
3360 printf("aic7xxx: cannot malloc softc!\n");
3361 free(name, M_DEVBUF);
3362 return NULL;
3363 }
3364#else
3365 ahc = device_get_softc((device_t)platform_arg);
3366#endif
3367 memset(ahc, 0, sizeof(*ahc));
3368 LIST_INIT(&ahc->pending_scbs);
3369 /* We don't know our unit number until the OSM sets it */
3370 ahc->name = name;
3371 ahc->unit = -1;
3357 for (i = 0; i < 16; i++)
3358 TAILQ_INIT(&ahc->untagged_queues[i]);
3359 if (ahc_platform_alloc(ahc, platform_arg) != 0) {
3360 ahc_free(ahc);
3361 ahc = NULL;
3362 }
3363 return (ahc);
3364}
3365
3366int
3367ahc_softc_init(struct ahc_softc *ahc, struct ahc_probe_config *config)
3368{
3369
3370 ahc->chip = config->chip;
3371 ahc->features = config->features;
3372 ahc->bugs = config->bugs;
3373 ahc->flags = config->flags;
3374 ahc->channel = config->channel;
3372 for (i = 0; i < 16; i++)
3373 TAILQ_INIT(&ahc->untagged_queues[i]);
3374 if (ahc_platform_alloc(ahc, platform_arg) != 0) {
3375 ahc_free(ahc);
3376 ahc = NULL;
3377 }
3378 return (ahc);
3379}
3380
3381int
3382ahc_softc_init(struct ahc_softc *ahc, struct ahc_probe_config *config)
3383{
3384
3385 ahc->chip = config->chip;
3386 ahc->features = config->features;
3387 ahc->bugs = config->bugs;
3388 ahc->flags = config->flags;
3389 ahc->channel = config->channel;
3375 ahc->unpause = (ahc_inb(ahc, HCNTRL) & IRQMS) | INTEN;
3390 ahc->unpause = (ahc_inb(ahc, HCNTRL) & IRQMS);
3376 ahc->description = config->description;
3377 /* The IRQMS bit is only valid on VL and EISA chips */
3378 if ((ahc->chip & AHC_PCI) != 0)
3379 ahc->unpause &= ~IRQMS;
3380 ahc->pause = ahc->unpause | PAUSE;
3381 /* XXX The shared scb data stuff should be deprecated */
3382 if (ahc->scb_data == NULL) {
3383 ahc->scb_data = malloc(sizeof(*ahc->scb_data),
3384 M_DEVBUF, M_NOWAIT);
3385 if (ahc->scb_data == NULL)
3386 return (ENOMEM);
3387 memset(ahc->scb_data, 0, sizeof(*ahc->scb_data));
3388 }
3389
3390 return (0);
3391}
3392
3393void
3394ahc_softc_insert(struct ahc_softc *ahc)
3395{
3396 struct ahc_softc *list_ahc;
3397
3398#if AHC_PCI_CONFIG > 0
3399 /*
3400 * Second Function PCI devices need to inherit some
3401 * settings from function 0.
3402 */
3403 if ((ahc->chip & AHC_BUS_MASK) == AHC_PCI
3404 && (ahc->features & AHC_MULTI_FUNC) != 0) {
3405 TAILQ_FOREACH(list_ahc, &ahc_tailq, links) {
3406 ahc_dev_softc_t list_pci;
3407 ahc_dev_softc_t pci;
3408
3409 list_pci = list_ahc->dev_softc;
3410 pci = ahc->dev_softc;
3411 if (ahc_get_pci_slot(list_pci) == ahc_get_pci_slot(pci)
3412 && ahc_get_pci_bus(list_pci) == ahc_get_pci_bus(pci)) {
3413 struct ahc_softc *master;
3414 struct ahc_softc *slave;
3415
3416 if (ahc_get_pci_function(list_pci) == 0) {
3417 master = list_ahc;
3418 slave = ahc;
3419 } else {
3420 master = ahc;
3421 slave = list_ahc;
3422 }
3423 slave->flags &= ~AHC_BIOS_ENABLED;
3424 slave->flags |=
3425 master->flags & AHC_BIOS_ENABLED;
3426 slave->flags &= ~AHC_PRIMARY_CHANNEL;
3427 slave->flags |=
3428 master->flags & AHC_PRIMARY_CHANNEL;
3429 break;
3430 }
3431 }
3432 }
3433#endif
3434
3435 /*
3436 * Insertion sort into our list of softcs.
3437 */
3438 list_ahc = TAILQ_FIRST(&ahc_tailq);
3439 while (list_ahc != NULL
3440 && ahc_softc_comp(list_ahc, ahc) <= 0)
3441 list_ahc = TAILQ_NEXT(list_ahc, links);
3442 if (list_ahc != NULL)
3443 TAILQ_INSERT_BEFORE(list_ahc, ahc, links);
3444 else
3445 TAILQ_INSERT_TAIL(&ahc_tailq, ahc, links);
3446 ahc->init_level++;
3447}
3448
3449void
3450ahc_set_unit(struct ahc_softc *ahc, int unit)
3451{
3452 ahc->unit = unit;
3453}
3454
3455void
3456ahc_set_name(struct ahc_softc *ahc, char *name)
3457{
3458 if (ahc->name != NULL)
3459 free(ahc->name, M_DEVBUF);
3460 ahc->name = name;
3461}
3462
3463void
3464ahc_free(struct ahc_softc *ahc)
3465{
3466 int i;
3467
3468 ahc_fini_scbdata(ahc);
3469 switch (ahc->init_level) {
3470 default:
3471 case 5:
3472 ahc_shutdown(ahc);
3473 TAILQ_REMOVE(&ahc_tailq, ahc, links);
3474 /* FALLTHROUGH */
3475 case 4:
3476 ahc_dmamap_unload(ahc, ahc->shared_data_dmat,
3477 ahc->shared_data_dmamap);
3478 /* FALLTHROUGH */
3479 case 3:
3480 ahc_dmamem_free(ahc, ahc->shared_data_dmat, ahc->qoutfifo,
3481 ahc->shared_data_dmamap);
3482 ahc_dmamap_destroy(ahc, ahc->shared_data_dmat,
3483 ahc->shared_data_dmamap);
3484 /* FALLTHROUGH */
3485 case 2:
3486 ahc_dma_tag_destroy(ahc, ahc->shared_data_dmat);
3487 case 1:
3488#ifndef __linux__
3489 ahc_dma_tag_destroy(ahc, ahc->buffer_dmat);
3490#endif
3491 break;
3492 case 0:
3493 break;
3494 }
3495
3496#ifndef __linux__
3497 ahc_dma_tag_destroy(ahc, ahc->parent_dmat);
3498#endif
3499 ahc_platform_free(ahc);
3500 for (i = 0; i < AHC_NUM_TARGETS; i++) {
3391 ahc->description = config->description;
3392 /* The IRQMS bit is only valid on VL and EISA chips */
3393 if ((ahc->chip & AHC_PCI) != 0)
3394 ahc->unpause &= ~IRQMS;
3395 ahc->pause = ahc->unpause | PAUSE;
3396 /* XXX The shared scb data stuff should be deprecated */
3397 if (ahc->scb_data == NULL) {
3398 ahc->scb_data = malloc(sizeof(*ahc->scb_data),
3399 M_DEVBUF, M_NOWAIT);
3400 if (ahc->scb_data == NULL)
3401 return (ENOMEM);
3402 memset(ahc->scb_data, 0, sizeof(*ahc->scb_data));
3403 }
3404
3405 return (0);
3406}
3407
3408void
3409ahc_softc_insert(struct ahc_softc *ahc)
3410{
3411 struct ahc_softc *list_ahc;
3412
3413#if AHC_PCI_CONFIG > 0
3414 /*
3415 * Second Function PCI devices need to inherit some
3416 * settings from function 0.
3417 */
3418 if ((ahc->chip & AHC_BUS_MASK) == AHC_PCI
3419 && (ahc->features & AHC_MULTI_FUNC) != 0) {
3420 TAILQ_FOREACH(list_ahc, &ahc_tailq, links) {
3421 ahc_dev_softc_t list_pci;
3422 ahc_dev_softc_t pci;
3423
3424 list_pci = list_ahc->dev_softc;
3425 pci = ahc->dev_softc;
3426 if (ahc_get_pci_slot(list_pci) == ahc_get_pci_slot(pci)
3427 && ahc_get_pci_bus(list_pci) == ahc_get_pci_bus(pci)) {
3428 struct ahc_softc *master;
3429 struct ahc_softc *slave;
3430
3431 if (ahc_get_pci_function(list_pci) == 0) {
3432 master = list_ahc;
3433 slave = ahc;
3434 } else {
3435 master = ahc;
3436 slave = list_ahc;
3437 }
3438 slave->flags &= ~AHC_BIOS_ENABLED;
3439 slave->flags |=
3440 master->flags & AHC_BIOS_ENABLED;
3441 slave->flags &= ~AHC_PRIMARY_CHANNEL;
3442 slave->flags |=
3443 master->flags & AHC_PRIMARY_CHANNEL;
3444 break;
3445 }
3446 }
3447 }
3448#endif
3449
3450 /*
3451 * Insertion sort into our list of softcs.
3452 */
3453 list_ahc = TAILQ_FIRST(&ahc_tailq);
3454 while (list_ahc != NULL
3455 && ahc_softc_comp(list_ahc, ahc) <= 0)
3456 list_ahc = TAILQ_NEXT(list_ahc, links);
3457 if (list_ahc != NULL)
3458 TAILQ_INSERT_BEFORE(list_ahc, ahc, links);
3459 else
3460 TAILQ_INSERT_TAIL(&ahc_tailq, ahc, links);
3461 ahc->init_level++;
3462}
3463
3464void
3465ahc_set_unit(struct ahc_softc *ahc, int unit)
3466{
3467 ahc->unit = unit;
3468}
3469
3470void
3471ahc_set_name(struct ahc_softc *ahc, char *name)
3472{
3473 if (ahc->name != NULL)
3474 free(ahc->name, M_DEVBUF);
3475 ahc->name = name;
3476}
3477
3478void
3479ahc_free(struct ahc_softc *ahc)
3480{
3481 int i;
3482
3483 ahc_fini_scbdata(ahc);
3484 switch (ahc->init_level) {
3485 default:
3486 case 5:
3487 ahc_shutdown(ahc);
3488 TAILQ_REMOVE(&ahc_tailq, ahc, links);
3489 /* FALLTHROUGH */
3490 case 4:
3491 ahc_dmamap_unload(ahc, ahc->shared_data_dmat,
3492 ahc->shared_data_dmamap);
3493 /* FALLTHROUGH */
3494 case 3:
3495 ahc_dmamem_free(ahc, ahc->shared_data_dmat, ahc->qoutfifo,
3496 ahc->shared_data_dmamap);
3497 ahc_dmamap_destroy(ahc, ahc->shared_data_dmat,
3498 ahc->shared_data_dmamap);
3499 /* FALLTHROUGH */
3500 case 2:
3501 ahc_dma_tag_destroy(ahc, ahc->shared_data_dmat);
3502 case 1:
3503#ifndef __linux__
3504 ahc_dma_tag_destroy(ahc, ahc->buffer_dmat);
3505#endif
3506 break;
3507 case 0:
3508 break;
3509 }
3510
3511#ifndef __linux__
3512 ahc_dma_tag_destroy(ahc, ahc->parent_dmat);
3513#endif
3514 ahc_platform_free(ahc);
3515 for (i = 0; i < AHC_NUM_TARGETS; i++) {
3501 struct tmode_tstate *tstate;
3516 struct ahc_tmode_tstate *tstate;
3502
3503 tstate = ahc->enabled_targets[i];
3504 if (tstate != NULL) {
3505#if AHC_TARGET_MODE
3506 int j;
3507
3508 for (j = 0; j < AHC_NUM_LUNS; j++) {
3517
3518 tstate = ahc->enabled_targets[i];
3519 if (tstate != NULL) {
3520#if AHC_TARGET_MODE
3521 int j;
3522
3523 for (j = 0; j < AHC_NUM_LUNS; j++) {
3509 struct tmode_lstate *lstate;
3524 struct ahc_tmode_lstate *lstate;
3510
3511 lstate = tstate->enabled_luns[j];
3512 if (lstate != NULL) {
3513 xpt_free_path(lstate->path);
3514 free(lstate, M_DEVBUF);
3515 }
3516 }
3517#endif
3518 free(tstate, M_DEVBUF);
3519 }
3520 }
3521#if AHC_TARGET_MODE
3522 if (ahc->black_hole != NULL) {
3523 xpt_free_path(ahc->black_hole->path);
3524 free(ahc->black_hole, M_DEVBUF);
3525 }
3526#endif
3527 if (ahc->name != NULL)
3528 free(ahc->name, M_DEVBUF);
3529#ifndef __FreeBSD__
3530 free(ahc, M_DEVBUF);
3531#endif
3532 return;
3533}
3534
3535void
3536ahc_shutdown(void *arg)
3537{
3538 struct ahc_softc *ahc;
3539 int i;
3540
3541 ahc = (struct ahc_softc *)arg;
3542
3543 /* This will reset most registers to 0, but not all */
3544 ahc_reset(ahc);
3545 ahc_outb(ahc, SCSISEQ, 0);
3546 ahc_outb(ahc, SXFRCTL0, 0);
3547 ahc_outb(ahc, DSPCISTATUS, 0);
3548
3549 for (i = TARG_SCSIRATE; i < HA_274_BIOSCTRL; i++)
3550 ahc_outb(ahc, i, 0);
3551}
3552
3553/*
3554 * Reset the controller and record some information about it
3555 * that is only availabel just after a reset.
3556 */
3557int
3558ahc_reset(struct ahc_softc *ahc)
3559{
3560 u_int sblkctl;
3561 u_int sxfrctl1_a, sxfrctl1_b;
3562 int wait;
3563
3564 /*
3565 * Preserve the value of the SXFRCTL1 register for all channels.
3566 * It contains settings that affect termination and we don't want
3567 * to disturb the integrity of the bus.
3568 */
3569 ahc_pause(ahc);
3570 sxfrctl1_b = 0;
3571 if ((ahc->chip & AHC_CHIPID_MASK) == AHC_AIC7770) {
3572 u_int sblkctl;
3573
3574 /*
3575 * Save channel B's settings in case this chip
3576 * is setup for TWIN channel operation.
3577 */
3578 sblkctl = ahc_inb(ahc, SBLKCTL);
3579 ahc_outb(ahc, SBLKCTL, sblkctl | SELBUSB);
3580 sxfrctl1_b = ahc_inb(ahc, SXFRCTL1);
3581 ahc_outb(ahc, SBLKCTL, sblkctl & ~SELBUSB);
3582 }
3583 sxfrctl1_a = ahc_inb(ahc, SXFRCTL1);
3584
3585 ahc_outb(ahc, HCNTRL, CHIPRST | ahc->pause);
3586
3587 /*
3588 * Ensure that the reset has finished
3589 */
3590 wait = 1000;
3591 do {
3592 ahc_delay(1000);
3593 } while (--wait && !(ahc_inb(ahc, HCNTRL) & CHIPRSTACK));
3594
3595 if (wait == 0) {
3596 printf("%s: WARNING - Failed chip reset! "
3597 "Trying to initialize anyway.\n", ahc_name(ahc));
3598 }
3599 ahc_outb(ahc, HCNTRL, ahc->pause);
3600
3601 /* Determine channel configuration */
3602 sblkctl = ahc_inb(ahc, SBLKCTL) & (SELBUSB|SELWIDE);
3603 /* No Twin Channel PCI cards */
3604 if ((ahc->chip & AHC_PCI) != 0)
3605 sblkctl &= ~SELBUSB;
3606 switch (sblkctl) {
3607 case 0:
3608 /* Single Narrow Channel */
3609 break;
3610 case 2:
3611 /* Wide Channel */
3612 ahc->features |= AHC_WIDE;
3613 break;
3614 case 8:
3615 /* Twin Channel */
3616 ahc->features |= AHC_TWIN;
3617 break;
3618 default:
3619 printf(" Unsupported adapter type. Ignoring\n");
3620 return(-1);
3621 }
3622
3623 /*
3624 * Reload sxfrctl1.
3625 *
3626 * We must always initialize STPWEN to 1 before we
3627 * restore the saved values. STPWEN is initialized
3628 * to a tri-state condition which can only be cleared
3629 * by turning it on.
3630 */
3631 if ((ahc->features & AHC_TWIN) != 0) {
3632 u_int sblkctl;
3633
3634 sblkctl = ahc_inb(ahc, SBLKCTL);
3635 ahc_outb(ahc, SBLKCTL, sblkctl | SELBUSB);
3636 ahc_outb(ahc, SXFRCTL1, sxfrctl1_b);
3637 ahc_outb(ahc, SBLKCTL, sblkctl & ~SELBUSB);
3638 }
3639 ahc_outb(ahc, SXFRCTL1, sxfrctl1_a);
3640
3641#ifdef AHC_DUMP_SEQ
3642 if (ahc->init_level == 0)
3643 ahc_dumpseq(ahc);
3644#endif
3645
3646 return (0);
3647}
3648
3649/*
3650 * Determine the number of SCBs available on the controller
3651 */
3652int
3653ahc_probe_scbs(struct ahc_softc *ahc) {
3654 int i;
3655
3656 for (i = 0; i < AHC_SCB_MAX; i++) {
3657
3658 ahc_outb(ahc, SCBPTR, i);
3659 ahc_outb(ahc, SCB_BASE, i);
3660 if (ahc_inb(ahc, SCB_BASE) != i)
3661 break;
3662 ahc_outb(ahc, SCBPTR, 0);
3663 if (ahc_inb(ahc, SCB_BASE) != 0)
3664 break;
3665 }
3666 return (i);
3667}
3668
3669void
3670ahc_init_probe_config(struct ahc_probe_config *probe_config)
3671{
3672 probe_config->description = NULL;
3673 probe_config->channel = 'A';
3674 probe_config->channel_b = 'B';
3675 probe_config->chip = AHC_NONE;
3676 probe_config->features = AHC_FENONE;
3677 probe_config->bugs = AHC_BUGNONE;
3678 probe_config->flags = AHC_FNONE;
3679}
3680
3681static void
3682ahc_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error)
3683{
3684 bus_addr_t *baddr;
3685
3686 baddr = (bus_addr_t *)arg;
3687 *baddr = segs->ds_addr;
3688}
3689
3690static void
3691ahc_build_free_scb_list(struct ahc_softc *ahc)
3692{
3693 int i;
3694
3695 for (i = 0; i < ahc->scb_data->maxhscbs; i++) {
3696 ahc_outb(ahc, SCBPTR, i);
3697
3698 /* Clear the control byte. */
3699 ahc_outb(ahc, SCB_CONTROL, 0);
3700
3701 /* Set the next pointer */
3702 if ((ahc->flags & AHC_PAGESCBS) != 0)
3703 ahc_outb(ahc, SCB_NEXT, i+1);
3704 else
3705 ahc_outb(ahc, SCB_NEXT, SCB_LIST_NULL);
3706
3707 /* Make the tag number invalid */
3708 ahc_outb(ahc, SCB_TAG, SCB_LIST_NULL);
3709 }
3710
3711 /* Make sure that the last SCB terminates the free list */
3712 ahc_outb(ahc, SCBPTR, i-1);
3713 ahc_outb(ahc, SCB_NEXT, SCB_LIST_NULL);
3714
3715 /* Ensure we clear the 0 SCB's control byte. */
3716 ahc_outb(ahc, SCBPTR, 0);
3717 ahc_outb(ahc, SCB_CONTROL, 0);
3718}
3719
3720static int
3721ahc_init_scbdata(struct ahc_softc *ahc)
3722{
3723 struct scb_data *scb_data;
3724
3725 scb_data = ahc->scb_data;
3726 SLIST_INIT(&scb_data->free_scbs);
3727 SLIST_INIT(&scb_data->sg_maps);
3728
3729 /* Allocate SCB resources */
3730 scb_data->scbarray =
3731 (struct scb *)malloc(sizeof(struct scb) * AHC_SCB_MAX,
3732 M_DEVBUF, M_NOWAIT);
3733 if (scb_data->scbarray == NULL)
3734 return (ENOMEM);
3735 memset(scb_data->scbarray, 0, sizeof(struct scb) * AHC_SCB_MAX);
3736
3737 /* Determine the number of hardware SCBs and initialize them */
3738
3739 scb_data->maxhscbs = ahc_probe_scbs(ahc);
3740 if ((ahc->flags & AHC_PAGESCBS) != 0) {
3741 /* SCB 0 heads the free list */
3742 ahc_outb(ahc, FREE_SCBH, 0);
3743 } else {
3744 ahc_outb(ahc, FREE_SCBH, SCB_LIST_NULL);
3745 }
3746
3747 if (ahc->scb_data->maxhscbs == 0) {
3748 printf("%s: No SCB space found\n", ahc_name(ahc));
3749 return (ENXIO);
3750 }
3751
3752 ahc_build_free_scb_list(ahc);
3753
3754 /*
3755 * Create our DMA tags. These tags define the kinds of device
3756 * accessible memory allocations and memory mappings we will
3757 * need to perform during normal operation.
3758 *
3759 * Unless we need to further restrict the allocation, we rely
3760 * on the restrictions of the parent dmat, hence the common
3761 * use of MAXADDR and MAXSIZE.
3762 */
3763
3764 /* DMA tag for our hardware scb structures */
3765 if (ahc_dma_tag_create(ahc, ahc->parent_dmat, /*alignment*/1,
3766 /*boundary*/0, /*lowaddr*/BUS_SPACE_MAXADDR,
3767 /*highaddr*/BUS_SPACE_MAXADDR,
3768 /*filter*/NULL, /*filterarg*/NULL,
3769 AHC_SCB_MAX * sizeof(struct hardware_scb),
3770 /*nsegments*/1,
3771 /*maxsegsz*/BUS_SPACE_MAXSIZE_32BIT,
3772 /*flags*/0, &scb_data->hscb_dmat) != 0) {
3773 goto error_exit;
3774 }
3775
3776 scb_data->init_level++;
3777
3778 /* Allocation for our ccbs */
3779 if (ahc_dmamem_alloc(ahc, scb_data->hscb_dmat,
3780 (void **)&scb_data->hscbs,
3781 BUS_DMA_NOWAIT, &scb_data->hscb_dmamap) != 0) {
3782 goto error_exit;
3783 }
3784
3785 scb_data->init_level++;
3786
3787 /* And permanently map them */
3788 ahc_dmamap_load(ahc, scb_data->hscb_dmat, scb_data->hscb_dmamap,
3789 scb_data->hscbs,
3790 AHC_SCB_MAX * sizeof(struct hardware_scb),
3791 ahc_dmamap_cb, &scb_data->hscb_busaddr, /*flags*/0);
3792
3793 scb_data->init_level++;
3794
3795 /* DMA tag for our sense buffers */
3796 if (ahc_dma_tag_create(ahc, ahc->parent_dmat, /*alignment*/1,
3797 /*boundary*/0, /*lowaddr*/BUS_SPACE_MAXADDR,
3798 /*highaddr*/BUS_SPACE_MAXADDR,
3799 /*filter*/NULL, /*filterarg*/NULL,
3800 AHC_SCB_MAX * sizeof(struct scsi_sense_data),
3801 /*nsegments*/1,
3802 /*maxsegsz*/BUS_SPACE_MAXSIZE_32BIT,
3803 /*flags*/0, &scb_data->sense_dmat) != 0) {
3804 goto error_exit;
3805 }
3806
3807 scb_data->init_level++;
3808
3809 /* Allocate them */
3810 if (ahc_dmamem_alloc(ahc, scb_data->sense_dmat,
3811 (void **)&scb_data->sense,
3812 BUS_DMA_NOWAIT, &scb_data->sense_dmamap) != 0) {
3813 goto error_exit;
3814 }
3815
3816 scb_data->init_level++;
3817
3818 /* And permanently map them */
3819 ahc_dmamap_load(ahc, scb_data->sense_dmat, scb_data->sense_dmamap,
3820 scb_data->sense,
3821 AHC_SCB_MAX * sizeof(struct scsi_sense_data),
3822 ahc_dmamap_cb, &scb_data->sense_busaddr, /*flags*/0);
3823
3824 scb_data->init_level++;
3825
3826 /* DMA tag for our S/G structures. We allocate in page sized chunks */
3827 if (ahc_dma_tag_create(ahc, ahc->parent_dmat, /*alignment*/1,
3828 /*boundary*/0, /*lowaddr*/BUS_SPACE_MAXADDR,
3829 /*highaddr*/BUS_SPACE_MAXADDR,
3830 /*filter*/NULL, /*filterarg*/NULL,
3831 PAGE_SIZE, /*nsegments*/1,
3832 /*maxsegsz*/BUS_SPACE_MAXSIZE_32BIT,
3833 /*flags*/0, &scb_data->sg_dmat) != 0) {
3834 goto error_exit;
3835 }
3836
3837 scb_data->init_level++;
3838
3839 /* Perform initial CCB allocation */
3840 memset(scb_data->hscbs, 0, AHC_SCB_MAX * sizeof(struct hardware_scb));
3841 ahc_alloc_scbs(ahc);
3842
3843 if (scb_data->numscbs == 0) {
3844 printf("%s: ahc_init_scbdata - "
3845 "Unable to allocate initial scbs\n",
3846 ahc_name(ahc));
3847 goto error_exit;
3848 }
3849
3850 /*
3851 * Tell the sequencer which SCB will be the next one it receives.
3852 */
3853 ahc->next_queued_scb = ahc_get_scb(ahc);
3854 ahc_outb(ahc, NEXT_QUEUED_SCB, ahc->next_queued_scb->hscb->tag);
3855
3856 /*
3857 * Note that we were successfull
3858 */
3859 return (0);
3860
3861error_exit:
3862
3863 return (ENOMEM);
3864}
3865
3866static void
3867ahc_fini_scbdata(struct ahc_softc *ahc)
3868{
3869 struct scb_data *scb_data;
3870
3871 scb_data = ahc->scb_data;
3872 if (scb_data == NULL)
3873 return;
3874
3875 switch (scb_data->init_level) {
3876 default:
3877 case 7:
3878 {
3879 struct sg_map_node *sg_map;
3880
3881 while ((sg_map = SLIST_FIRST(&scb_data->sg_maps))!= NULL) {
3882 SLIST_REMOVE_HEAD(&scb_data->sg_maps, links);
3883 ahc_dmamap_unload(ahc, scb_data->sg_dmat,
3884 sg_map->sg_dmamap);
3885 ahc_dmamem_free(ahc, scb_data->sg_dmat,
3886 sg_map->sg_vaddr,
3887 sg_map->sg_dmamap);
3888 free(sg_map, M_DEVBUF);
3889 }
3890 ahc_dma_tag_destroy(ahc, scb_data->sg_dmat);
3891 }
3892 case 6:
3893 ahc_dmamap_unload(ahc, scb_data->sense_dmat,
3894 scb_data->sense_dmamap);
3895 case 5:
3896 ahc_dmamem_free(ahc, scb_data->sense_dmat, scb_data->sense,
3897 scb_data->sense_dmamap);
3898 ahc_dmamap_destroy(ahc, scb_data->sense_dmat,
3899 scb_data->sense_dmamap);
3900 case 4:
3901 ahc_dma_tag_destroy(ahc, scb_data->sense_dmat);
3902 case 3:
3903 ahc_dmamap_unload(ahc, scb_data->hscb_dmat,
3904 scb_data->hscb_dmamap);
3905 case 2:
3906 ahc_dmamem_free(ahc, scb_data->hscb_dmat, scb_data->hscbs,
3907 scb_data->hscb_dmamap);
3908 ahc_dmamap_destroy(ahc, scb_data->hscb_dmat,
3909 scb_data->hscb_dmamap);
3910 case 1:
3911 ahc_dma_tag_destroy(ahc, scb_data->hscb_dmat);
3912 break;
3913 case 0:
3914 break;
3915 }
3916 if (scb_data->scbarray != NULL)
3917 free(scb_data->scbarray, M_DEVBUF);
3918}
3919
3920void
3921ahc_alloc_scbs(struct ahc_softc *ahc)
3922{
3923 struct scb_data *scb_data;
3924 struct scb *next_scb;
3925 struct sg_map_node *sg_map;
3926 bus_addr_t physaddr;
3927 struct ahc_dma_seg *segs;
3928 int newcount;
3929 int i;
3930
3931 scb_data = ahc->scb_data;
3932 if (scb_data->numscbs >= AHC_SCB_MAX)
3933 /* Can't allocate any more */
3934 return;
3935
3936 next_scb = &scb_data->scbarray[scb_data->numscbs];
3937
3938 sg_map = malloc(sizeof(*sg_map), M_DEVBUF, M_NOWAIT);
3939
3940 if (sg_map == NULL)
3941 return;
3942
3943 /* Allocate S/G space for the next batch of SCBS */
3944 if (ahc_dmamem_alloc(ahc, scb_data->sg_dmat,
3945 (void **)&sg_map->sg_vaddr,
3946 BUS_DMA_NOWAIT, &sg_map->sg_dmamap) != 0) {
3947 free(sg_map, M_DEVBUF);
3948 return;
3949 }
3950
3951 SLIST_INSERT_HEAD(&scb_data->sg_maps, sg_map, links);
3952
3953 ahc_dmamap_load(ahc, scb_data->sg_dmat, sg_map->sg_dmamap,
3954 sg_map->sg_vaddr, PAGE_SIZE, ahc_dmamap_cb,
3955 &sg_map->sg_physaddr, /*flags*/0);
3956
3957 segs = sg_map->sg_vaddr;
3958 physaddr = sg_map->sg_physaddr;
3959
3960 newcount = (PAGE_SIZE / (AHC_NSEG * sizeof(struct ahc_dma_seg)));
3961 for (i = 0; scb_data->numscbs < AHC_SCB_MAX && i < newcount; i++) {
3962 struct scb_platform_data *pdata;
3963#ifndef __linux__
3964 int error;
3965#endif
3966 pdata = (struct scb_platform_data *)malloc(sizeof(*pdata),
3967 M_DEVBUF, M_NOWAIT);
3968 if (pdata == NULL)
3969 break;
3970 next_scb->platform_data = pdata;
3971 next_scb->sg_list = segs;
3972 /*
3973 * The sequencer always starts with the second entry.
3974 * The first entry is embedded in the scb.
3975 */
3976 next_scb->sg_list_phys = physaddr + sizeof(struct ahc_dma_seg);
3977 next_scb->ahc_softc = ahc;
3978 next_scb->flags = SCB_FREE;
3979#ifndef __linux__
3980 error = ahc_dmamap_create(ahc, ahc->buffer_dmat, /*flags*/0,
3981 &next_scb->dmamap);
3982 if (error != 0)
3983 break;
3984#endif
3985 next_scb->hscb = &scb_data->hscbs[scb_data->numscbs];
3986 next_scb->hscb->tag = ahc->scb_data->numscbs;
3987 SLIST_INSERT_HEAD(&ahc->scb_data->free_scbs,
3988 next_scb, links.sle);
3989 segs += AHC_NSEG;
3990 physaddr += (AHC_NSEG * sizeof(struct ahc_dma_seg));
3991 next_scb++;
3992 ahc->scb_data->numscbs++;
3993 }
3994}
3995
3996void
3997ahc_controller_info(struct ahc_softc *ahc, char *buf)
3998{
3999 int len;
4000
4001 len = sprintf(buf, "%s: ", ahc_chip_names[ahc->chip & AHC_CHIPID_MASK]);
4002 buf += len;
4003 if ((ahc->features & AHC_TWIN) != 0)
4004 len = sprintf(buf, "Twin Channel, A SCSI Id=%d, "
4005 "B SCSI Id=%d, primary %c, ",
4006 ahc->our_id, ahc->our_id_b,
4007 (ahc->flags & AHC_PRIMARY_CHANNEL) + 'A');
4008 else {
4009 const char *type;
4010
4011 if ((ahc->features & AHC_WIDE) != 0) {
4012 type = "Wide";
4013 } else {
4014 type = "Single";
4015 }
4016 len = sprintf(buf, "%s Channel %c, SCSI Id=%d, ",
4017 type, ahc->channel, ahc->our_id);
4018 }
4019 buf += len;
4020
4021 if ((ahc->flags & AHC_PAGESCBS) != 0)
4022 sprintf(buf, "%d/%d SCBs",
4023 ahc->scb_data->maxhscbs, AHC_SCB_MAX);
4024 else
4025 sprintf(buf, "%d SCBs", ahc->scb_data->maxhscbs);
4026}
4027
4028/*
4029 * Start the board, ready for normal operation
4030 */
4031int
4032ahc_init(struct ahc_softc *ahc)
4033{
4034 int max_targ;
4035 int i;
4036 int term;
4037 u_int scsi_conf;
4038 u_int scsiseq_template;
4039 u_int ultraenb;
4040 u_int discenable;
4041 u_int tagenable;
4042 size_t driver_data_size;
4043 uint32_t physaddr;
4044
4045#ifdef AHC_DEBUG_SEQUENCER
4046 ahc->flags |= AHC_SEQUENCER_DEBUG;
4047#endif
4048
4049#ifdef AHC_PRINT_SRAM
4050 printf("Scratch Ram:");
4051 for (i = 0x20; i < 0x5f; i++) {
4052 if (((i % 8) == 0) && (i != 0)) {
4053 printf ("\n ");
4054 }
4055 printf (" 0x%x", ahc_inb(ahc, i));
4056 }
4057 if ((ahc->features & AHC_MORE_SRAM) != 0) {
4058 for (i = 0x70; i < 0x7f; i++) {
4059 if (((i % 8) == 0) && (i != 0)) {
4060 printf ("\n ");
4061 }
4062 printf (" 0x%x", ahc_inb(ahc, i));
4063 }
4064 }
4065 printf ("\n");
4066#endif
4067 max_targ = 15;
4068
4069 /*
4070 * Assume we have a board at this stage and it has been reset.
4071 */
4072 if ((ahc->flags & AHC_USEDEFAULTS) != 0)
4073 ahc->our_id = ahc->our_id_b = 7;
4074
4075 /*
4076 * Default to allowing initiator operations.
4077 */
4078 ahc->flags |= AHC_INITIATORROLE;
4079
4080 /*
4081 * Only allow target mode features if this unit has them enabled.
4082 */
4083 if ((AHC_TMODE_ENABLE & (0x1 << ahc->unit)) == 0)
4084 ahc->features &= ~AHC_TARGETMODE;
4085
4086#ifndef __linux__
4087 /* DMA tag for mapping buffers into device visible space. */
4088 if (ahc_dma_tag_create(ahc, ahc->parent_dmat, /*alignment*/1,
4089 /*boundary*/0, /*lowaddr*/BUS_SPACE_MAXADDR,
4090 /*highaddr*/BUS_SPACE_MAXADDR,
4091 /*filter*/NULL, /*filterarg*/NULL,
4092 /*maxsize*/MAXBSIZE, /*nsegments*/AHC_NSEG,
4093 /*maxsegsz*/AHC_MAXTRANSFER_SIZE,
4094 /*flags*/BUS_DMA_ALLOCNOW,
4095 &ahc->buffer_dmat) != 0) {
4096 return (ENOMEM);
4097 }
4098#endif
4099
4100 ahc->init_level++;
4101
4102 /*
4103 * DMA tag for our command fifos and other data in system memory
4104 * the card's sequencer must be able to access. For initiator
4105 * roles, we need to allocate space for the the qinfifo and qoutfifo.
4106 * The qinfifo and qoutfifo are composed of 256 1 byte elements.
4107 * When providing for the target mode role, we must additionally
4108 * provide space for the incoming target command fifo and an extra
4109 * byte to deal with a dma bug in some chip versions.
4110 */
4111 driver_data_size = 2 * 256 * sizeof(uint8_t);
4112 if ((ahc->features & AHC_TARGETMODE) != 0)
4113 driver_data_size += AHC_TMODE_CMDS * sizeof(struct target_cmd)
4114 + /*DMA WideOdd Bug Buffer*/1;
4115 if (ahc_dma_tag_create(ahc, ahc->parent_dmat, /*alignment*/1,
4116 /*boundary*/0, /*lowaddr*/BUS_SPACE_MAXADDR,
4117 /*highaddr*/BUS_SPACE_MAXADDR,
4118 /*filter*/NULL, /*filterarg*/NULL,
4119 driver_data_size,
4120 /*nsegments*/1,
4121 /*maxsegsz*/BUS_SPACE_MAXSIZE_32BIT,
4122 /*flags*/0, &ahc->shared_data_dmat) != 0) {
4123 return (ENOMEM);
4124 }
4125
4126 ahc->init_level++;
4127
4128 /* Allocation of driver data */
4129 if (ahc_dmamem_alloc(ahc, ahc->shared_data_dmat,
4130 (void **)&ahc->qoutfifo,
4131 BUS_DMA_NOWAIT, &ahc->shared_data_dmamap) != 0) {
4132 return (ENOMEM);
4133 }
4134
4135 ahc->init_level++;
4136
4137 /* And permanently map it in */
4138 ahc_dmamap_load(ahc, ahc->shared_data_dmat, ahc->shared_data_dmamap,
4139 ahc->qoutfifo, driver_data_size, ahc_dmamap_cb,
4140 &ahc->shared_data_busaddr, /*flags*/0);
4141
4142 if ((ahc->features & AHC_TARGETMODE) != 0) {
4143 ahc->targetcmds = (struct target_cmd *)ahc->qoutfifo;
4144 ahc->qoutfifo = (uint8_t *)&ahc->targetcmds[AHC_TMODE_CMDS];
4145 ahc->dma_bug_buf = ahc->shared_data_busaddr
4146 + driver_data_size - 1;
4147 /* All target command blocks start out invalid. */
4148 for (i = 0; i < AHC_TMODE_CMDS; i++)
4149 ahc->targetcmds[i].cmd_valid = 0;
4150 ahc->tqinfifonext = 1;
4151 ahc_outb(ahc, KERNEL_TQINPOS, ahc->tqinfifonext - 1);
4152 ahc_outb(ahc, TQINPOS, ahc->tqinfifonext);
4153 ahc->qoutfifo = (uint8_t *)&ahc->targetcmds[256];
4154 }
4155 ahc->qinfifo = &ahc->qoutfifo[256];
4156
4157 ahc->init_level++;
4158
4159 /* Allocate SCB data now that buffer_dmat is initialized */
4160 if (ahc->scb_data->maxhscbs == 0)
4161 if (ahc_init_scbdata(ahc) != 0)
4162 return (ENOMEM);
4163
4164 /*
4165 * Allocate a tstate to house information for our
4166 * initiator presence on the bus as well as the user
4167 * data for any target mode initiator.
4168 */
4169 if (ahc_alloc_tstate(ahc, ahc->our_id, 'A') == NULL) {
3525
3526 lstate = tstate->enabled_luns[j];
3527 if (lstate != NULL) {
3528 xpt_free_path(lstate->path);
3529 free(lstate, M_DEVBUF);
3530 }
3531 }
3532#endif
3533 free(tstate, M_DEVBUF);
3534 }
3535 }
3536#if AHC_TARGET_MODE
3537 if (ahc->black_hole != NULL) {
3538 xpt_free_path(ahc->black_hole->path);
3539 free(ahc->black_hole, M_DEVBUF);
3540 }
3541#endif
3542 if (ahc->name != NULL)
3543 free(ahc->name, M_DEVBUF);
3544#ifndef __FreeBSD__
3545 free(ahc, M_DEVBUF);
3546#endif
3547 return;
3548}
3549
3550void
3551ahc_shutdown(void *arg)
3552{
3553 struct ahc_softc *ahc;
3554 int i;
3555
3556 ahc = (struct ahc_softc *)arg;
3557
3558 /* This will reset most registers to 0, but not all */
3559 ahc_reset(ahc);
3560 ahc_outb(ahc, SCSISEQ, 0);
3561 ahc_outb(ahc, SXFRCTL0, 0);
3562 ahc_outb(ahc, DSPCISTATUS, 0);
3563
3564 for (i = TARG_SCSIRATE; i < HA_274_BIOSCTRL; i++)
3565 ahc_outb(ahc, i, 0);
3566}
3567
3568/*
3569 * Reset the controller and record some information about it
3570 * that is only availabel just after a reset.
3571 */
3572int
3573ahc_reset(struct ahc_softc *ahc)
3574{
3575 u_int sblkctl;
3576 u_int sxfrctl1_a, sxfrctl1_b;
3577 int wait;
3578
3579 /*
3580 * Preserve the value of the SXFRCTL1 register for all channels.
3581 * It contains settings that affect termination and we don't want
3582 * to disturb the integrity of the bus.
3583 */
3584 ahc_pause(ahc);
3585 sxfrctl1_b = 0;
3586 if ((ahc->chip & AHC_CHIPID_MASK) == AHC_AIC7770) {
3587 u_int sblkctl;
3588
3589 /*
3590 * Save channel B's settings in case this chip
3591 * is setup for TWIN channel operation.
3592 */
3593 sblkctl = ahc_inb(ahc, SBLKCTL);
3594 ahc_outb(ahc, SBLKCTL, sblkctl | SELBUSB);
3595 sxfrctl1_b = ahc_inb(ahc, SXFRCTL1);
3596 ahc_outb(ahc, SBLKCTL, sblkctl & ~SELBUSB);
3597 }
3598 sxfrctl1_a = ahc_inb(ahc, SXFRCTL1);
3599
3600 ahc_outb(ahc, HCNTRL, CHIPRST | ahc->pause);
3601
3602 /*
3603 * Ensure that the reset has finished
3604 */
3605 wait = 1000;
3606 do {
3607 ahc_delay(1000);
3608 } while (--wait && !(ahc_inb(ahc, HCNTRL) & CHIPRSTACK));
3609
3610 if (wait == 0) {
3611 printf("%s: WARNING - Failed chip reset! "
3612 "Trying to initialize anyway.\n", ahc_name(ahc));
3613 }
3614 ahc_outb(ahc, HCNTRL, ahc->pause);
3615
3616 /* Determine channel configuration */
3617 sblkctl = ahc_inb(ahc, SBLKCTL) & (SELBUSB|SELWIDE);
3618 /* No Twin Channel PCI cards */
3619 if ((ahc->chip & AHC_PCI) != 0)
3620 sblkctl &= ~SELBUSB;
3621 switch (sblkctl) {
3622 case 0:
3623 /* Single Narrow Channel */
3624 break;
3625 case 2:
3626 /* Wide Channel */
3627 ahc->features |= AHC_WIDE;
3628 break;
3629 case 8:
3630 /* Twin Channel */
3631 ahc->features |= AHC_TWIN;
3632 break;
3633 default:
3634 printf(" Unsupported adapter type. Ignoring\n");
3635 return(-1);
3636 }
3637
3638 /*
3639 * Reload sxfrctl1.
3640 *
3641 * We must always initialize STPWEN to 1 before we
3642 * restore the saved values. STPWEN is initialized
3643 * to a tri-state condition which can only be cleared
3644 * by turning it on.
3645 */
3646 if ((ahc->features & AHC_TWIN) != 0) {
3647 u_int sblkctl;
3648
3649 sblkctl = ahc_inb(ahc, SBLKCTL);
3650 ahc_outb(ahc, SBLKCTL, sblkctl | SELBUSB);
3651 ahc_outb(ahc, SXFRCTL1, sxfrctl1_b);
3652 ahc_outb(ahc, SBLKCTL, sblkctl & ~SELBUSB);
3653 }
3654 ahc_outb(ahc, SXFRCTL1, sxfrctl1_a);
3655
3656#ifdef AHC_DUMP_SEQ
3657 if (ahc->init_level == 0)
3658 ahc_dumpseq(ahc);
3659#endif
3660
3661 return (0);
3662}
3663
3664/*
3665 * Determine the number of SCBs available on the controller
3666 */
3667int
3668ahc_probe_scbs(struct ahc_softc *ahc) {
3669 int i;
3670
3671 for (i = 0; i < AHC_SCB_MAX; i++) {
3672
3673 ahc_outb(ahc, SCBPTR, i);
3674 ahc_outb(ahc, SCB_BASE, i);
3675 if (ahc_inb(ahc, SCB_BASE) != i)
3676 break;
3677 ahc_outb(ahc, SCBPTR, 0);
3678 if (ahc_inb(ahc, SCB_BASE) != 0)
3679 break;
3680 }
3681 return (i);
3682}
3683
3684void
3685ahc_init_probe_config(struct ahc_probe_config *probe_config)
3686{
3687 probe_config->description = NULL;
3688 probe_config->channel = 'A';
3689 probe_config->channel_b = 'B';
3690 probe_config->chip = AHC_NONE;
3691 probe_config->features = AHC_FENONE;
3692 probe_config->bugs = AHC_BUGNONE;
3693 probe_config->flags = AHC_FNONE;
3694}
3695
3696static void
3697ahc_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error)
3698{
3699 bus_addr_t *baddr;
3700
3701 baddr = (bus_addr_t *)arg;
3702 *baddr = segs->ds_addr;
3703}
3704
3705static void
3706ahc_build_free_scb_list(struct ahc_softc *ahc)
3707{
3708 int i;
3709
3710 for (i = 0; i < ahc->scb_data->maxhscbs; i++) {
3711 ahc_outb(ahc, SCBPTR, i);
3712
3713 /* Clear the control byte. */
3714 ahc_outb(ahc, SCB_CONTROL, 0);
3715
3716 /* Set the next pointer */
3717 if ((ahc->flags & AHC_PAGESCBS) != 0)
3718 ahc_outb(ahc, SCB_NEXT, i+1);
3719 else
3720 ahc_outb(ahc, SCB_NEXT, SCB_LIST_NULL);
3721
3722 /* Make the tag number invalid */
3723 ahc_outb(ahc, SCB_TAG, SCB_LIST_NULL);
3724 }
3725
3726 /* Make sure that the last SCB terminates the free list */
3727 ahc_outb(ahc, SCBPTR, i-1);
3728 ahc_outb(ahc, SCB_NEXT, SCB_LIST_NULL);
3729
3730 /* Ensure we clear the 0 SCB's control byte. */
3731 ahc_outb(ahc, SCBPTR, 0);
3732 ahc_outb(ahc, SCB_CONTROL, 0);
3733}
3734
3735static int
3736ahc_init_scbdata(struct ahc_softc *ahc)
3737{
3738 struct scb_data *scb_data;
3739
3740 scb_data = ahc->scb_data;
3741 SLIST_INIT(&scb_data->free_scbs);
3742 SLIST_INIT(&scb_data->sg_maps);
3743
3744 /* Allocate SCB resources */
3745 scb_data->scbarray =
3746 (struct scb *)malloc(sizeof(struct scb) * AHC_SCB_MAX,
3747 M_DEVBUF, M_NOWAIT);
3748 if (scb_data->scbarray == NULL)
3749 return (ENOMEM);
3750 memset(scb_data->scbarray, 0, sizeof(struct scb) * AHC_SCB_MAX);
3751
3752 /* Determine the number of hardware SCBs and initialize them */
3753
3754 scb_data->maxhscbs = ahc_probe_scbs(ahc);
3755 if ((ahc->flags & AHC_PAGESCBS) != 0) {
3756 /* SCB 0 heads the free list */
3757 ahc_outb(ahc, FREE_SCBH, 0);
3758 } else {
3759 ahc_outb(ahc, FREE_SCBH, SCB_LIST_NULL);
3760 }
3761
3762 if (ahc->scb_data->maxhscbs == 0) {
3763 printf("%s: No SCB space found\n", ahc_name(ahc));
3764 return (ENXIO);
3765 }
3766
3767 ahc_build_free_scb_list(ahc);
3768
3769 /*
3770 * Create our DMA tags. These tags define the kinds of device
3771 * accessible memory allocations and memory mappings we will
3772 * need to perform during normal operation.
3773 *
3774 * Unless we need to further restrict the allocation, we rely
3775 * on the restrictions of the parent dmat, hence the common
3776 * use of MAXADDR and MAXSIZE.
3777 */
3778
3779 /* DMA tag for our hardware scb structures */
3780 if (ahc_dma_tag_create(ahc, ahc->parent_dmat, /*alignment*/1,
3781 /*boundary*/0, /*lowaddr*/BUS_SPACE_MAXADDR,
3782 /*highaddr*/BUS_SPACE_MAXADDR,
3783 /*filter*/NULL, /*filterarg*/NULL,
3784 AHC_SCB_MAX * sizeof(struct hardware_scb),
3785 /*nsegments*/1,
3786 /*maxsegsz*/BUS_SPACE_MAXSIZE_32BIT,
3787 /*flags*/0, &scb_data->hscb_dmat) != 0) {
3788 goto error_exit;
3789 }
3790
3791 scb_data->init_level++;
3792
3793 /* Allocation for our ccbs */
3794 if (ahc_dmamem_alloc(ahc, scb_data->hscb_dmat,
3795 (void **)&scb_data->hscbs,
3796 BUS_DMA_NOWAIT, &scb_data->hscb_dmamap) != 0) {
3797 goto error_exit;
3798 }
3799
3800 scb_data->init_level++;
3801
3802 /* And permanently map them */
3803 ahc_dmamap_load(ahc, scb_data->hscb_dmat, scb_data->hscb_dmamap,
3804 scb_data->hscbs,
3805 AHC_SCB_MAX * sizeof(struct hardware_scb),
3806 ahc_dmamap_cb, &scb_data->hscb_busaddr, /*flags*/0);
3807
3808 scb_data->init_level++;
3809
3810 /* DMA tag for our sense buffers */
3811 if (ahc_dma_tag_create(ahc, ahc->parent_dmat, /*alignment*/1,
3812 /*boundary*/0, /*lowaddr*/BUS_SPACE_MAXADDR,
3813 /*highaddr*/BUS_SPACE_MAXADDR,
3814 /*filter*/NULL, /*filterarg*/NULL,
3815 AHC_SCB_MAX * sizeof(struct scsi_sense_data),
3816 /*nsegments*/1,
3817 /*maxsegsz*/BUS_SPACE_MAXSIZE_32BIT,
3818 /*flags*/0, &scb_data->sense_dmat) != 0) {
3819 goto error_exit;
3820 }
3821
3822 scb_data->init_level++;
3823
3824 /* Allocate them */
3825 if (ahc_dmamem_alloc(ahc, scb_data->sense_dmat,
3826 (void **)&scb_data->sense,
3827 BUS_DMA_NOWAIT, &scb_data->sense_dmamap) != 0) {
3828 goto error_exit;
3829 }
3830
3831 scb_data->init_level++;
3832
3833 /* And permanently map them */
3834 ahc_dmamap_load(ahc, scb_data->sense_dmat, scb_data->sense_dmamap,
3835 scb_data->sense,
3836 AHC_SCB_MAX * sizeof(struct scsi_sense_data),
3837 ahc_dmamap_cb, &scb_data->sense_busaddr, /*flags*/0);
3838
3839 scb_data->init_level++;
3840
3841 /* DMA tag for our S/G structures. We allocate in page sized chunks */
3842 if (ahc_dma_tag_create(ahc, ahc->parent_dmat, /*alignment*/1,
3843 /*boundary*/0, /*lowaddr*/BUS_SPACE_MAXADDR,
3844 /*highaddr*/BUS_SPACE_MAXADDR,
3845 /*filter*/NULL, /*filterarg*/NULL,
3846 PAGE_SIZE, /*nsegments*/1,
3847 /*maxsegsz*/BUS_SPACE_MAXSIZE_32BIT,
3848 /*flags*/0, &scb_data->sg_dmat) != 0) {
3849 goto error_exit;
3850 }
3851
3852 scb_data->init_level++;
3853
3854 /* Perform initial CCB allocation */
3855 memset(scb_data->hscbs, 0, AHC_SCB_MAX * sizeof(struct hardware_scb));
3856 ahc_alloc_scbs(ahc);
3857
3858 if (scb_data->numscbs == 0) {
3859 printf("%s: ahc_init_scbdata - "
3860 "Unable to allocate initial scbs\n",
3861 ahc_name(ahc));
3862 goto error_exit;
3863 }
3864
3865 /*
3866 * Tell the sequencer which SCB will be the next one it receives.
3867 */
3868 ahc->next_queued_scb = ahc_get_scb(ahc);
3869 ahc_outb(ahc, NEXT_QUEUED_SCB, ahc->next_queued_scb->hscb->tag);
3870
3871 /*
3872 * Note that we were successfull
3873 */
3874 return (0);
3875
3876error_exit:
3877
3878 return (ENOMEM);
3879}
3880
3881static void
3882ahc_fini_scbdata(struct ahc_softc *ahc)
3883{
3884 struct scb_data *scb_data;
3885
3886 scb_data = ahc->scb_data;
3887 if (scb_data == NULL)
3888 return;
3889
3890 switch (scb_data->init_level) {
3891 default:
3892 case 7:
3893 {
3894 struct sg_map_node *sg_map;
3895
3896 while ((sg_map = SLIST_FIRST(&scb_data->sg_maps))!= NULL) {
3897 SLIST_REMOVE_HEAD(&scb_data->sg_maps, links);
3898 ahc_dmamap_unload(ahc, scb_data->sg_dmat,
3899 sg_map->sg_dmamap);
3900 ahc_dmamem_free(ahc, scb_data->sg_dmat,
3901 sg_map->sg_vaddr,
3902 sg_map->sg_dmamap);
3903 free(sg_map, M_DEVBUF);
3904 }
3905 ahc_dma_tag_destroy(ahc, scb_data->sg_dmat);
3906 }
3907 case 6:
3908 ahc_dmamap_unload(ahc, scb_data->sense_dmat,
3909 scb_data->sense_dmamap);
3910 case 5:
3911 ahc_dmamem_free(ahc, scb_data->sense_dmat, scb_data->sense,
3912 scb_data->sense_dmamap);
3913 ahc_dmamap_destroy(ahc, scb_data->sense_dmat,
3914 scb_data->sense_dmamap);
3915 case 4:
3916 ahc_dma_tag_destroy(ahc, scb_data->sense_dmat);
3917 case 3:
3918 ahc_dmamap_unload(ahc, scb_data->hscb_dmat,
3919 scb_data->hscb_dmamap);
3920 case 2:
3921 ahc_dmamem_free(ahc, scb_data->hscb_dmat, scb_data->hscbs,
3922 scb_data->hscb_dmamap);
3923 ahc_dmamap_destroy(ahc, scb_data->hscb_dmat,
3924 scb_data->hscb_dmamap);
3925 case 1:
3926 ahc_dma_tag_destroy(ahc, scb_data->hscb_dmat);
3927 break;
3928 case 0:
3929 break;
3930 }
3931 if (scb_data->scbarray != NULL)
3932 free(scb_data->scbarray, M_DEVBUF);
3933}
3934
3935void
3936ahc_alloc_scbs(struct ahc_softc *ahc)
3937{
3938 struct scb_data *scb_data;
3939 struct scb *next_scb;
3940 struct sg_map_node *sg_map;
3941 bus_addr_t physaddr;
3942 struct ahc_dma_seg *segs;
3943 int newcount;
3944 int i;
3945
3946 scb_data = ahc->scb_data;
3947 if (scb_data->numscbs >= AHC_SCB_MAX)
3948 /* Can't allocate any more */
3949 return;
3950
3951 next_scb = &scb_data->scbarray[scb_data->numscbs];
3952
3953 sg_map = malloc(sizeof(*sg_map), M_DEVBUF, M_NOWAIT);
3954
3955 if (sg_map == NULL)
3956 return;
3957
3958 /* Allocate S/G space for the next batch of SCBS */
3959 if (ahc_dmamem_alloc(ahc, scb_data->sg_dmat,
3960 (void **)&sg_map->sg_vaddr,
3961 BUS_DMA_NOWAIT, &sg_map->sg_dmamap) != 0) {
3962 free(sg_map, M_DEVBUF);
3963 return;
3964 }
3965
3966 SLIST_INSERT_HEAD(&scb_data->sg_maps, sg_map, links);
3967
3968 ahc_dmamap_load(ahc, scb_data->sg_dmat, sg_map->sg_dmamap,
3969 sg_map->sg_vaddr, PAGE_SIZE, ahc_dmamap_cb,
3970 &sg_map->sg_physaddr, /*flags*/0);
3971
3972 segs = sg_map->sg_vaddr;
3973 physaddr = sg_map->sg_physaddr;
3974
3975 newcount = (PAGE_SIZE / (AHC_NSEG * sizeof(struct ahc_dma_seg)));
3976 for (i = 0; scb_data->numscbs < AHC_SCB_MAX && i < newcount; i++) {
3977 struct scb_platform_data *pdata;
3978#ifndef __linux__
3979 int error;
3980#endif
3981 pdata = (struct scb_platform_data *)malloc(sizeof(*pdata),
3982 M_DEVBUF, M_NOWAIT);
3983 if (pdata == NULL)
3984 break;
3985 next_scb->platform_data = pdata;
3986 next_scb->sg_list = segs;
3987 /*
3988 * The sequencer always starts with the second entry.
3989 * The first entry is embedded in the scb.
3990 */
3991 next_scb->sg_list_phys = physaddr + sizeof(struct ahc_dma_seg);
3992 next_scb->ahc_softc = ahc;
3993 next_scb->flags = SCB_FREE;
3994#ifndef __linux__
3995 error = ahc_dmamap_create(ahc, ahc->buffer_dmat, /*flags*/0,
3996 &next_scb->dmamap);
3997 if (error != 0)
3998 break;
3999#endif
4000 next_scb->hscb = &scb_data->hscbs[scb_data->numscbs];
4001 next_scb->hscb->tag = ahc->scb_data->numscbs;
4002 SLIST_INSERT_HEAD(&ahc->scb_data->free_scbs,
4003 next_scb, links.sle);
4004 segs += AHC_NSEG;
4005 physaddr += (AHC_NSEG * sizeof(struct ahc_dma_seg));
4006 next_scb++;
4007 ahc->scb_data->numscbs++;
4008 }
4009}
4010
4011void
4012ahc_controller_info(struct ahc_softc *ahc, char *buf)
4013{
4014 int len;
4015
4016 len = sprintf(buf, "%s: ", ahc_chip_names[ahc->chip & AHC_CHIPID_MASK]);
4017 buf += len;
4018 if ((ahc->features & AHC_TWIN) != 0)
4019 len = sprintf(buf, "Twin Channel, A SCSI Id=%d, "
4020 "B SCSI Id=%d, primary %c, ",
4021 ahc->our_id, ahc->our_id_b,
4022 (ahc->flags & AHC_PRIMARY_CHANNEL) + 'A');
4023 else {
4024 const char *type;
4025
4026 if ((ahc->features & AHC_WIDE) != 0) {
4027 type = "Wide";
4028 } else {
4029 type = "Single";
4030 }
4031 len = sprintf(buf, "%s Channel %c, SCSI Id=%d, ",
4032 type, ahc->channel, ahc->our_id);
4033 }
4034 buf += len;
4035
4036 if ((ahc->flags & AHC_PAGESCBS) != 0)
4037 sprintf(buf, "%d/%d SCBs",
4038 ahc->scb_data->maxhscbs, AHC_SCB_MAX);
4039 else
4040 sprintf(buf, "%d SCBs", ahc->scb_data->maxhscbs);
4041}
4042
4043/*
4044 * Start the board, ready for normal operation
4045 */
4046int
4047ahc_init(struct ahc_softc *ahc)
4048{
4049 int max_targ;
4050 int i;
4051 int term;
4052 u_int scsi_conf;
4053 u_int scsiseq_template;
4054 u_int ultraenb;
4055 u_int discenable;
4056 u_int tagenable;
4057 size_t driver_data_size;
4058 uint32_t physaddr;
4059
4060#ifdef AHC_DEBUG_SEQUENCER
4061 ahc->flags |= AHC_SEQUENCER_DEBUG;
4062#endif
4063
4064#ifdef AHC_PRINT_SRAM
4065 printf("Scratch Ram:");
4066 for (i = 0x20; i < 0x5f; i++) {
4067 if (((i % 8) == 0) && (i != 0)) {
4068 printf ("\n ");
4069 }
4070 printf (" 0x%x", ahc_inb(ahc, i));
4071 }
4072 if ((ahc->features & AHC_MORE_SRAM) != 0) {
4073 for (i = 0x70; i < 0x7f; i++) {
4074 if (((i % 8) == 0) && (i != 0)) {
4075 printf ("\n ");
4076 }
4077 printf (" 0x%x", ahc_inb(ahc, i));
4078 }
4079 }
4080 printf ("\n");
4081#endif
4082 max_targ = 15;
4083
4084 /*
4085 * Assume we have a board at this stage and it has been reset.
4086 */
4087 if ((ahc->flags & AHC_USEDEFAULTS) != 0)
4088 ahc->our_id = ahc->our_id_b = 7;
4089
4090 /*
4091 * Default to allowing initiator operations.
4092 */
4093 ahc->flags |= AHC_INITIATORROLE;
4094
4095 /*
4096 * Only allow target mode features if this unit has them enabled.
4097 */
4098 if ((AHC_TMODE_ENABLE & (0x1 << ahc->unit)) == 0)
4099 ahc->features &= ~AHC_TARGETMODE;
4100
4101#ifndef __linux__
4102 /* DMA tag for mapping buffers into device visible space. */
4103 if (ahc_dma_tag_create(ahc, ahc->parent_dmat, /*alignment*/1,
4104 /*boundary*/0, /*lowaddr*/BUS_SPACE_MAXADDR,
4105 /*highaddr*/BUS_SPACE_MAXADDR,
4106 /*filter*/NULL, /*filterarg*/NULL,
4107 /*maxsize*/MAXBSIZE, /*nsegments*/AHC_NSEG,
4108 /*maxsegsz*/AHC_MAXTRANSFER_SIZE,
4109 /*flags*/BUS_DMA_ALLOCNOW,
4110 &ahc->buffer_dmat) != 0) {
4111 return (ENOMEM);
4112 }
4113#endif
4114
4115 ahc->init_level++;
4116
4117 /*
4118 * DMA tag for our command fifos and other data in system memory
4119 * the card's sequencer must be able to access. For initiator
4120 * roles, we need to allocate space for the the qinfifo and qoutfifo.
4121 * The qinfifo and qoutfifo are composed of 256 1 byte elements.
4122 * When providing for the target mode role, we must additionally
4123 * provide space for the incoming target command fifo and an extra
4124 * byte to deal with a dma bug in some chip versions.
4125 */
4126 driver_data_size = 2 * 256 * sizeof(uint8_t);
4127 if ((ahc->features & AHC_TARGETMODE) != 0)
4128 driver_data_size += AHC_TMODE_CMDS * sizeof(struct target_cmd)
4129 + /*DMA WideOdd Bug Buffer*/1;
4130 if (ahc_dma_tag_create(ahc, ahc->parent_dmat, /*alignment*/1,
4131 /*boundary*/0, /*lowaddr*/BUS_SPACE_MAXADDR,
4132 /*highaddr*/BUS_SPACE_MAXADDR,
4133 /*filter*/NULL, /*filterarg*/NULL,
4134 driver_data_size,
4135 /*nsegments*/1,
4136 /*maxsegsz*/BUS_SPACE_MAXSIZE_32BIT,
4137 /*flags*/0, &ahc->shared_data_dmat) != 0) {
4138 return (ENOMEM);
4139 }
4140
4141 ahc->init_level++;
4142
4143 /* Allocation of driver data */
4144 if (ahc_dmamem_alloc(ahc, ahc->shared_data_dmat,
4145 (void **)&ahc->qoutfifo,
4146 BUS_DMA_NOWAIT, &ahc->shared_data_dmamap) != 0) {
4147 return (ENOMEM);
4148 }
4149
4150 ahc->init_level++;
4151
4152 /* And permanently map it in */
4153 ahc_dmamap_load(ahc, ahc->shared_data_dmat, ahc->shared_data_dmamap,
4154 ahc->qoutfifo, driver_data_size, ahc_dmamap_cb,
4155 &ahc->shared_data_busaddr, /*flags*/0);
4156
4157 if ((ahc->features & AHC_TARGETMODE) != 0) {
4158 ahc->targetcmds = (struct target_cmd *)ahc->qoutfifo;
4159 ahc->qoutfifo = (uint8_t *)&ahc->targetcmds[AHC_TMODE_CMDS];
4160 ahc->dma_bug_buf = ahc->shared_data_busaddr
4161 + driver_data_size - 1;
4162 /* All target command blocks start out invalid. */
4163 for (i = 0; i < AHC_TMODE_CMDS; i++)
4164 ahc->targetcmds[i].cmd_valid = 0;
4165 ahc->tqinfifonext = 1;
4166 ahc_outb(ahc, KERNEL_TQINPOS, ahc->tqinfifonext - 1);
4167 ahc_outb(ahc, TQINPOS, ahc->tqinfifonext);
4168 ahc->qoutfifo = (uint8_t *)&ahc->targetcmds[256];
4169 }
4170 ahc->qinfifo = &ahc->qoutfifo[256];
4171
4172 ahc->init_level++;
4173
4174 /* Allocate SCB data now that buffer_dmat is initialized */
4175 if (ahc->scb_data->maxhscbs == 0)
4176 if (ahc_init_scbdata(ahc) != 0)
4177 return (ENOMEM);
4178
4179 /*
4180 * Allocate a tstate to house information for our
4181 * initiator presence on the bus as well as the user
4182 * data for any target mode initiator.
4183 */
4184 if (ahc_alloc_tstate(ahc, ahc->our_id, 'A') == NULL) {
4170 printf("%s: unable to allocate tmode_tstate. "
4185 printf("%s: unable to allocate ahc_tmode_tstate. "
4171 "Failing attach\n", ahc_name(ahc));
4186 "Failing attach\n", ahc_name(ahc));
4172 return (-1);
4187 return (ENOMEM);
4173 }
4174
4175 if ((ahc->features & AHC_TWIN) != 0) {
4176 if (ahc_alloc_tstate(ahc, ahc->our_id_b, 'B') == NULL) {
4188 }
4189
4190 if ((ahc->features & AHC_TWIN) != 0) {
4191 if (ahc_alloc_tstate(ahc, ahc->our_id_b, 'B') == NULL) {
4177 printf("%s: unable to allocate tmode_tstate. "
4192 printf("%s: unable to allocate ahc_tmode_tstate. "
4178 "Failing attach\n", ahc_name(ahc));
4193 "Failing attach\n", ahc_name(ahc));
4179 return (-1);
4194 return (ENOMEM);
4180 }
4181 }
4182
4183 ahc_outb(ahc, SEQ_FLAGS, 0);
4184 ahc_outb(ahc, SEQ_FLAGS2, 0);
4185
4186 if (ahc->scb_data->maxhscbs < AHC_SCB_MAX) {
4187 ahc->flags |= AHC_PAGESCBS;
4188 } else {
4189 ahc->flags &= ~AHC_PAGESCBS;
4190 }
4191
4192#ifdef AHC_DEBUG
4193 if (ahc_debug & AHC_SHOWMISC) {
4194 printf("%s: hardware scb %d bytes; kernel scb %d bytes; "
4195 "ahc_dma %d bytes\n",
4196 ahc_name(ahc),
4197 sizeof(struct hardware_scb),
4198 sizeof(struct scb),
4199 sizeof(struct ahc_dma_seg));
4200 }
4201#endif /* AHC_DEBUG */
4202
4203 /* Set the SCSI Id, SXFRCTL0, SXFRCTL1, and SIMODE1, for both channels*/
4204 if (ahc->features & AHC_TWIN) {
4205
4206 /*
4207 * The device is gated to channel B after a chip reset,
4208 * so set those values first
4209 */
4210 ahc_outb(ahc, SBLKCTL, ahc_inb(ahc, SBLKCTL) | SELBUSB);
4211 term = (ahc->flags & AHC_TERM_ENB_B) != 0 ? STPWEN : 0;
4212 ahc_outb(ahc, SCSIID, ahc->our_id_b);
4213 scsi_conf = ahc_inb(ahc, SCSICONF + 1);
4214 ahc_outb(ahc, SXFRCTL1, (scsi_conf & (ENSPCHK|STIMESEL))
4215 |term|ahc->seltime_b|ENSTIMER|ACTNEGEN);
4216 if ((ahc->features & AHC_ULTRA2) != 0)
4217 ahc_outb(ahc, SIMODE0, ahc_inb(ahc, SIMODE0)|ENIOERR);
4218 ahc_outb(ahc, SIMODE1, ENSELTIMO|ENSCSIRST|ENSCSIPERR);
4219 ahc_outb(ahc, SXFRCTL0, DFON|SPIOEN);
4220
4221 if ((scsi_conf & RESET_SCSI) != 0
4222 && (ahc->flags & AHC_INITIATORROLE) != 0)
4223 ahc->flags |= AHC_RESET_BUS_B;
4224
4225 /* Select Channel A */
4226 ahc_outb(ahc, SBLKCTL, ahc_inb(ahc, SBLKCTL) & ~SELBUSB);
4227 }
4228 term = (ahc->flags & AHC_TERM_ENB_A) != 0 ? STPWEN : 0;
4229 if ((ahc->features & AHC_ULTRA2) != 0)
4230 ahc_outb(ahc, SCSIID_ULTRA2, ahc->our_id);
4231 else
4232 ahc_outb(ahc, SCSIID, ahc->our_id);
4233 scsi_conf = ahc_inb(ahc, SCSICONF);
4234 ahc_outb(ahc, SXFRCTL1, (scsi_conf & (ENSPCHK|STIMESEL))
4235 |term|ahc->seltime
4236 |ENSTIMER|ACTNEGEN);
4237 if ((ahc->features & AHC_ULTRA2) != 0)
4238 ahc_outb(ahc, SIMODE0, ahc_inb(ahc, SIMODE0)|ENIOERR);
4239 ahc_outb(ahc, SIMODE1, ENSELTIMO|ENSCSIRST|ENSCSIPERR);
4240 ahc_outb(ahc, SXFRCTL0, DFON|SPIOEN);
4241
4242 if ((scsi_conf & RESET_SCSI) != 0
4243 && (ahc->flags & AHC_INITIATORROLE) != 0)
4244 ahc->flags |= AHC_RESET_BUS_A;
4245
4246 /*
4247 * Look at the information that board initialization or
4248 * the board bios has left us.
4249 */
4250 ultraenb = 0;
4251 tagenable = ALL_TARGETS_MASK;
4252
4253 /* Grab the disconnection disable table and invert it for our needs */
4254 if (ahc->flags & AHC_USEDEFAULTS) {
4255 printf("%s: Host Adapter Bios disabled. Using default SCSI "
4256 "device parameters\n", ahc_name(ahc));
4257 ahc->flags |= AHC_EXTENDED_TRANS_A|AHC_EXTENDED_TRANS_B|
4258 AHC_TERM_ENB_A|AHC_TERM_ENB_B;
4259 discenable = ALL_TARGETS_MASK;
4260 if ((ahc->features & AHC_ULTRA) != 0)
4261 ultraenb = ALL_TARGETS_MASK;
4262 } else {
4263 discenable = ~((ahc_inb(ahc, DISC_DSB + 1) << 8)
4264 | ahc_inb(ahc, DISC_DSB));
4265 if ((ahc->features & (AHC_ULTRA|AHC_ULTRA2)) != 0)
4266 ultraenb = (ahc_inb(ahc, ULTRA_ENB + 1) << 8)
4267 | ahc_inb(ahc, ULTRA_ENB);
4268 }
4269 if ((ahc->flags & AHC_ULTRA_DISABLED) != 0)
4270 ultraenb = 0;
4271
4272 if ((ahc->features & (AHC_WIDE|AHC_TWIN)) == 0)
4273 max_targ = 7;
4274
4275 for (i = 0; i <= max_targ; i++) {
4276 struct ahc_initiator_tinfo *tinfo;
4195 }
4196 }
4197
4198 ahc_outb(ahc, SEQ_FLAGS, 0);
4199 ahc_outb(ahc, SEQ_FLAGS2, 0);
4200
4201 if (ahc->scb_data->maxhscbs < AHC_SCB_MAX) {
4202 ahc->flags |= AHC_PAGESCBS;
4203 } else {
4204 ahc->flags &= ~AHC_PAGESCBS;
4205 }
4206
4207#ifdef AHC_DEBUG
4208 if (ahc_debug & AHC_SHOWMISC) {
4209 printf("%s: hardware scb %d bytes; kernel scb %d bytes; "
4210 "ahc_dma %d bytes\n",
4211 ahc_name(ahc),
4212 sizeof(struct hardware_scb),
4213 sizeof(struct scb),
4214 sizeof(struct ahc_dma_seg));
4215 }
4216#endif /* AHC_DEBUG */
4217
4218 /* Set the SCSI Id, SXFRCTL0, SXFRCTL1, and SIMODE1, for both channels*/
4219 if (ahc->features & AHC_TWIN) {
4220
4221 /*
4222 * The device is gated to channel B after a chip reset,
4223 * so set those values first
4224 */
4225 ahc_outb(ahc, SBLKCTL, ahc_inb(ahc, SBLKCTL) | SELBUSB);
4226 term = (ahc->flags & AHC_TERM_ENB_B) != 0 ? STPWEN : 0;
4227 ahc_outb(ahc, SCSIID, ahc->our_id_b);
4228 scsi_conf = ahc_inb(ahc, SCSICONF + 1);
4229 ahc_outb(ahc, SXFRCTL1, (scsi_conf & (ENSPCHK|STIMESEL))
4230 |term|ahc->seltime_b|ENSTIMER|ACTNEGEN);
4231 if ((ahc->features & AHC_ULTRA2) != 0)
4232 ahc_outb(ahc, SIMODE0, ahc_inb(ahc, SIMODE0)|ENIOERR);
4233 ahc_outb(ahc, SIMODE1, ENSELTIMO|ENSCSIRST|ENSCSIPERR);
4234 ahc_outb(ahc, SXFRCTL0, DFON|SPIOEN);
4235
4236 if ((scsi_conf & RESET_SCSI) != 0
4237 && (ahc->flags & AHC_INITIATORROLE) != 0)
4238 ahc->flags |= AHC_RESET_BUS_B;
4239
4240 /* Select Channel A */
4241 ahc_outb(ahc, SBLKCTL, ahc_inb(ahc, SBLKCTL) & ~SELBUSB);
4242 }
4243 term = (ahc->flags & AHC_TERM_ENB_A) != 0 ? STPWEN : 0;
4244 if ((ahc->features & AHC_ULTRA2) != 0)
4245 ahc_outb(ahc, SCSIID_ULTRA2, ahc->our_id);
4246 else
4247 ahc_outb(ahc, SCSIID, ahc->our_id);
4248 scsi_conf = ahc_inb(ahc, SCSICONF);
4249 ahc_outb(ahc, SXFRCTL1, (scsi_conf & (ENSPCHK|STIMESEL))
4250 |term|ahc->seltime
4251 |ENSTIMER|ACTNEGEN);
4252 if ((ahc->features & AHC_ULTRA2) != 0)
4253 ahc_outb(ahc, SIMODE0, ahc_inb(ahc, SIMODE0)|ENIOERR);
4254 ahc_outb(ahc, SIMODE1, ENSELTIMO|ENSCSIRST|ENSCSIPERR);
4255 ahc_outb(ahc, SXFRCTL0, DFON|SPIOEN);
4256
4257 if ((scsi_conf & RESET_SCSI) != 0
4258 && (ahc->flags & AHC_INITIATORROLE) != 0)
4259 ahc->flags |= AHC_RESET_BUS_A;
4260
4261 /*
4262 * Look at the information that board initialization or
4263 * the board bios has left us.
4264 */
4265 ultraenb = 0;
4266 tagenable = ALL_TARGETS_MASK;
4267
4268 /* Grab the disconnection disable table and invert it for our needs */
4269 if (ahc->flags & AHC_USEDEFAULTS) {
4270 printf("%s: Host Adapter Bios disabled. Using default SCSI "
4271 "device parameters\n", ahc_name(ahc));
4272 ahc->flags |= AHC_EXTENDED_TRANS_A|AHC_EXTENDED_TRANS_B|
4273 AHC_TERM_ENB_A|AHC_TERM_ENB_B;
4274 discenable = ALL_TARGETS_MASK;
4275 if ((ahc->features & AHC_ULTRA) != 0)
4276 ultraenb = ALL_TARGETS_MASK;
4277 } else {
4278 discenable = ~((ahc_inb(ahc, DISC_DSB + 1) << 8)
4279 | ahc_inb(ahc, DISC_DSB));
4280 if ((ahc->features & (AHC_ULTRA|AHC_ULTRA2)) != 0)
4281 ultraenb = (ahc_inb(ahc, ULTRA_ENB + 1) << 8)
4282 | ahc_inb(ahc, ULTRA_ENB);
4283 }
4284 if ((ahc->flags & AHC_ULTRA_DISABLED) != 0)
4285 ultraenb = 0;
4286
4287 if ((ahc->features & (AHC_WIDE|AHC_TWIN)) == 0)
4288 max_targ = 7;
4289
4290 for (i = 0; i <= max_targ; i++) {
4291 struct ahc_initiator_tinfo *tinfo;
4277 struct tmode_tstate *tstate;
4292 struct ahc_tmode_tstate *tstate;
4278 u_int our_id;
4279 u_int target_id;
4280 char channel;
4281
4282 channel = 'A';
4283 our_id = ahc->our_id;
4284 target_id = i;
4285 if (i > 7 && (ahc->features & AHC_TWIN) != 0) {
4286 channel = 'B';
4287 our_id = ahc->our_id_b;
4288 target_id = i % 8;
4289 }
4290 tinfo = ahc_fetch_transinfo(ahc, channel, our_id,
4291 target_id, &tstate);
4292 /* Default to async narrow across the board */
4293 memset(tinfo, 0, sizeof(*tinfo));
4294 if (ahc->flags & AHC_USEDEFAULTS) {
4295 if ((ahc->features & AHC_WIDE) != 0)
4296 tinfo->user.width = MSG_EXT_WDTR_BUS_16_BIT;
4297
4298 /*
4299 * These will be truncated when we determine the
4300 * connection type we have with the target.
4301 */
4302 tinfo->user.period = ahc_syncrates->period;
4303 tinfo->user.offset = ~0;
4304 } else {
4305 u_int scsirate;
4306 uint16_t mask;
4307
4308 /* Take the settings leftover in scratch RAM. */
4309 scsirate = ahc_inb(ahc, TARG_SCSIRATE + i);
4310 mask = (0x01 << i);
4311 if ((ahc->features & AHC_ULTRA2) != 0) {
4312 u_int offset;
4313 u_int maxsync;
4314
4315 if ((scsirate & SOFS) == 0x0F) {
4316 /*
4317 * Haven't negotiated yet,
4318 * so the format is different.
4319 */
4320 scsirate = (scsirate & SXFR) >> 4
4321 | (ultraenb & mask)
4322 ? 0x08 : 0x0
4323 | (scsirate & WIDEXFER);
4324 offset = MAX_OFFSET_ULTRA2;
4325 } else
4326 offset = ahc_inb(ahc, TARG_OFFSET + i);
4327 if ((scsirate & ~WIDEXFER) == 0 && offset != 0)
4328 /* Set to the lowest sync rate, 5MHz */
4329 scsirate |= 0x1c;
4330 maxsync = AHC_SYNCRATE_ULTRA2;
4331 if ((ahc->features & AHC_DT) != 0)
4332 maxsync = AHC_SYNCRATE_DT;
4333 tinfo->user.period =
4334 ahc_find_period(ahc, scsirate, maxsync);
4335 if (offset == 0)
4336 tinfo->user.period = 0;
4337 else
4338 tinfo->user.offset = ~0;
4339 if ((scsirate & SXFR_ULTRA2) <= 8/*10MHz*/
4340 && (ahc->features & AHC_DT) != 0)
4341 tinfo->user.ppr_options =
4342 MSG_EXT_PPR_DT_REQ;
4343 } else if ((scsirate & SOFS) != 0) {
4344 if ((scsirate & SXFR) == 0x40
4345 && (ultraenb & mask) != 0) {
4346 /* Treat 10MHz as a non-ultra speed */
4347 scsirate &= ~SXFR;
4348 ultraenb &= ~mask;
4349 }
4350 tinfo->user.period =
4351 ahc_find_period(ahc, scsirate,
4352 (ultraenb & mask)
4353 ? AHC_SYNCRATE_ULTRA
4354 : AHC_SYNCRATE_FAST);
4355 if (tinfo->user.period != 0)
4356 tinfo->user.offset = ~0;
4357 }
4358 if (tinfo->user.period == 0)
4359 tinfo->user.offset = 0;
4360 if ((scsirate & WIDEXFER) != 0
4361 && (ahc->features & AHC_WIDE) != 0)
4362 tinfo->user.width = MSG_EXT_WDTR_BUS_16_BIT;
4363 tinfo->user.protocol_version = 4;
4364 if ((ahc->features & AHC_DT) != 0)
4365 tinfo->user.transport_version = 3;
4366 else
4367 tinfo->user.transport_version = 2;
4368 tinfo->goal.protocol_version = 2;
4369 tinfo->goal.transport_version = 2;
4370 tinfo->current.protocol_version = 2;
4371 tinfo->current.transport_version = 2;
4372 }
4373 tstate->ultraenb = ultraenb;
4374 tstate->discenable = discenable;
4375 tstate->tagenable = 0; /* Wait until the XPT says its okay */
4376 }
4377 ahc->user_discenable = discenable;
4378 ahc->user_tagenable = tagenable;
4379
4380 /* There are no untagged SCBs active yet. */
4381 for (i = 0; i < 16; i++) {
4382 ahc_unbusy_tcl(ahc, BUILD_TCL(i << 4, 0));
4383 if ((ahc->flags & AHC_SCB_BTT) != 0) {
4384 int lun;
4385
4386 /*
4387 * The SCB based BTT allows an entry per
4388 * target and lun pair.
4389 */
4390 for (lun = 1; lun < AHC_NUM_LUNS; lun++)
4391 ahc_unbusy_tcl(ahc, BUILD_TCL(i << 4, lun));
4392 }
4393 }
4394
4395 /* All of our queues are empty */
4396 for (i = 0; i < 256; i++)
4397 ahc->qoutfifo[i] = SCB_LIST_NULL;
4398
4399 for (i = 0; i < 256; i++)
4400 ahc->qinfifo[i] = SCB_LIST_NULL;
4401
4402 if ((ahc->features & AHC_MULTI_TID) != 0) {
4403 ahc_outb(ahc, TARGID, 0);
4404 ahc_outb(ahc, TARGID + 1, 0);
4405 }
4406
4407 /*
4408 * Tell the sequencer where it can find our arrays in memory.
4409 */
4410 physaddr = ahc->scb_data->hscb_busaddr;
4411 ahc_outb(ahc, HSCB_ADDR, physaddr & 0xFF);
4412 ahc_outb(ahc, HSCB_ADDR + 1, (physaddr >> 8) & 0xFF);
4413 ahc_outb(ahc, HSCB_ADDR + 2, (physaddr >> 16) & 0xFF);
4414 ahc_outb(ahc, HSCB_ADDR + 3, (physaddr >> 24) & 0xFF);
4415
4416 physaddr = ahc->shared_data_busaddr;
4417 ahc_outb(ahc, SHARED_DATA_ADDR, physaddr & 0xFF);
4418 ahc_outb(ahc, SHARED_DATA_ADDR + 1, (physaddr >> 8) & 0xFF);
4419 ahc_outb(ahc, SHARED_DATA_ADDR + 2, (physaddr >> 16) & 0xFF);
4420 ahc_outb(ahc, SHARED_DATA_ADDR + 3, (physaddr >> 24) & 0xFF);
4421
4422 /*
4423 * Initialize the group code to command length table.
4424 * This overrides the values in TARG_SCSIRATE, so only
4425 * setup the table after we have processed that information.
4426 */
4427 ahc_outb(ahc, CMDSIZE_TABLE, 5);
4428 ahc_outb(ahc, CMDSIZE_TABLE + 1, 9);
4429 ahc_outb(ahc, CMDSIZE_TABLE + 2, 9);
4430 ahc_outb(ahc, CMDSIZE_TABLE + 3, 0);
4431 ahc_outb(ahc, CMDSIZE_TABLE + 4, 15);
4432 ahc_outb(ahc, CMDSIZE_TABLE + 5, 11);
4433 ahc_outb(ahc, CMDSIZE_TABLE + 6, 0);
4434 ahc_outb(ahc, CMDSIZE_TABLE + 7, 0);
4435
4436 /* Tell the sequencer of our initial queue positions */
4437 ahc_outb(ahc, KERNEL_QINPOS, 0);
4438 ahc_outb(ahc, QINPOS, 0);
4439 ahc_outb(ahc, QOUTPOS, 0);
4440
4293 u_int our_id;
4294 u_int target_id;
4295 char channel;
4296
4297 channel = 'A';
4298 our_id = ahc->our_id;
4299 target_id = i;
4300 if (i > 7 && (ahc->features & AHC_TWIN) != 0) {
4301 channel = 'B';
4302 our_id = ahc->our_id_b;
4303 target_id = i % 8;
4304 }
4305 tinfo = ahc_fetch_transinfo(ahc, channel, our_id,
4306 target_id, &tstate);
4307 /* Default to async narrow across the board */
4308 memset(tinfo, 0, sizeof(*tinfo));
4309 if (ahc->flags & AHC_USEDEFAULTS) {
4310 if ((ahc->features & AHC_WIDE) != 0)
4311 tinfo->user.width = MSG_EXT_WDTR_BUS_16_BIT;
4312
4313 /*
4314 * These will be truncated when we determine the
4315 * connection type we have with the target.
4316 */
4317 tinfo->user.period = ahc_syncrates->period;
4318 tinfo->user.offset = ~0;
4319 } else {
4320 u_int scsirate;
4321 uint16_t mask;
4322
4323 /* Take the settings leftover in scratch RAM. */
4324 scsirate = ahc_inb(ahc, TARG_SCSIRATE + i);
4325 mask = (0x01 << i);
4326 if ((ahc->features & AHC_ULTRA2) != 0) {
4327 u_int offset;
4328 u_int maxsync;
4329
4330 if ((scsirate & SOFS) == 0x0F) {
4331 /*
4332 * Haven't negotiated yet,
4333 * so the format is different.
4334 */
4335 scsirate = (scsirate & SXFR) >> 4
4336 | (ultraenb & mask)
4337 ? 0x08 : 0x0
4338 | (scsirate & WIDEXFER);
4339 offset = MAX_OFFSET_ULTRA2;
4340 } else
4341 offset = ahc_inb(ahc, TARG_OFFSET + i);
4342 if ((scsirate & ~WIDEXFER) == 0 && offset != 0)
4343 /* Set to the lowest sync rate, 5MHz */
4344 scsirate |= 0x1c;
4345 maxsync = AHC_SYNCRATE_ULTRA2;
4346 if ((ahc->features & AHC_DT) != 0)
4347 maxsync = AHC_SYNCRATE_DT;
4348 tinfo->user.period =
4349 ahc_find_period(ahc, scsirate, maxsync);
4350 if (offset == 0)
4351 tinfo->user.period = 0;
4352 else
4353 tinfo->user.offset = ~0;
4354 if ((scsirate & SXFR_ULTRA2) <= 8/*10MHz*/
4355 && (ahc->features & AHC_DT) != 0)
4356 tinfo->user.ppr_options =
4357 MSG_EXT_PPR_DT_REQ;
4358 } else if ((scsirate & SOFS) != 0) {
4359 if ((scsirate & SXFR) == 0x40
4360 && (ultraenb & mask) != 0) {
4361 /* Treat 10MHz as a non-ultra speed */
4362 scsirate &= ~SXFR;
4363 ultraenb &= ~mask;
4364 }
4365 tinfo->user.period =
4366 ahc_find_period(ahc, scsirate,
4367 (ultraenb & mask)
4368 ? AHC_SYNCRATE_ULTRA
4369 : AHC_SYNCRATE_FAST);
4370 if (tinfo->user.period != 0)
4371 tinfo->user.offset = ~0;
4372 }
4373 if (tinfo->user.period == 0)
4374 tinfo->user.offset = 0;
4375 if ((scsirate & WIDEXFER) != 0
4376 && (ahc->features & AHC_WIDE) != 0)
4377 tinfo->user.width = MSG_EXT_WDTR_BUS_16_BIT;
4378 tinfo->user.protocol_version = 4;
4379 if ((ahc->features & AHC_DT) != 0)
4380 tinfo->user.transport_version = 3;
4381 else
4382 tinfo->user.transport_version = 2;
4383 tinfo->goal.protocol_version = 2;
4384 tinfo->goal.transport_version = 2;
4385 tinfo->current.protocol_version = 2;
4386 tinfo->current.transport_version = 2;
4387 }
4388 tstate->ultraenb = ultraenb;
4389 tstate->discenable = discenable;
4390 tstate->tagenable = 0; /* Wait until the XPT says its okay */
4391 }
4392 ahc->user_discenable = discenable;
4393 ahc->user_tagenable = tagenable;
4394
4395 /* There are no untagged SCBs active yet. */
4396 for (i = 0; i < 16; i++) {
4397 ahc_unbusy_tcl(ahc, BUILD_TCL(i << 4, 0));
4398 if ((ahc->flags & AHC_SCB_BTT) != 0) {
4399 int lun;
4400
4401 /*
4402 * The SCB based BTT allows an entry per
4403 * target and lun pair.
4404 */
4405 for (lun = 1; lun < AHC_NUM_LUNS; lun++)
4406 ahc_unbusy_tcl(ahc, BUILD_TCL(i << 4, lun));
4407 }
4408 }
4409
4410 /* All of our queues are empty */
4411 for (i = 0; i < 256; i++)
4412 ahc->qoutfifo[i] = SCB_LIST_NULL;
4413
4414 for (i = 0; i < 256; i++)
4415 ahc->qinfifo[i] = SCB_LIST_NULL;
4416
4417 if ((ahc->features & AHC_MULTI_TID) != 0) {
4418 ahc_outb(ahc, TARGID, 0);
4419 ahc_outb(ahc, TARGID + 1, 0);
4420 }
4421
4422 /*
4423 * Tell the sequencer where it can find our arrays in memory.
4424 */
4425 physaddr = ahc->scb_data->hscb_busaddr;
4426 ahc_outb(ahc, HSCB_ADDR, physaddr & 0xFF);
4427 ahc_outb(ahc, HSCB_ADDR + 1, (physaddr >> 8) & 0xFF);
4428 ahc_outb(ahc, HSCB_ADDR + 2, (physaddr >> 16) & 0xFF);
4429 ahc_outb(ahc, HSCB_ADDR + 3, (physaddr >> 24) & 0xFF);
4430
4431 physaddr = ahc->shared_data_busaddr;
4432 ahc_outb(ahc, SHARED_DATA_ADDR, physaddr & 0xFF);
4433 ahc_outb(ahc, SHARED_DATA_ADDR + 1, (physaddr >> 8) & 0xFF);
4434 ahc_outb(ahc, SHARED_DATA_ADDR + 2, (physaddr >> 16) & 0xFF);
4435 ahc_outb(ahc, SHARED_DATA_ADDR + 3, (physaddr >> 24) & 0xFF);
4436
4437 /*
4438 * Initialize the group code to command length table.
4439 * This overrides the values in TARG_SCSIRATE, so only
4440 * setup the table after we have processed that information.
4441 */
4442 ahc_outb(ahc, CMDSIZE_TABLE, 5);
4443 ahc_outb(ahc, CMDSIZE_TABLE + 1, 9);
4444 ahc_outb(ahc, CMDSIZE_TABLE + 2, 9);
4445 ahc_outb(ahc, CMDSIZE_TABLE + 3, 0);
4446 ahc_outb(ahc, CMDSIZE_TABLE + 4, 15);
4447 ahc_outb(ahc, CMDSIZE_TABLE + 5, 11);
4448 ahc_outb(ahc, CMDSIZE_TABLE + 6, 0);
4449 ahc_outb(ahc, CMDSIZE_TABLE + 7, 0);
4450
4451 /* Tell the sequencer of our initial queue positions */
4452 ahc_outb(ahc, KERNEL_QINPOS, 0);
4453 ahc_outb(ahc, QINPOS, 0);
4454 ahc_outb(ahc, QOUTPOS, 0);
4455
4441 /* Don't have any special messages to send to targets */
4442 ahc_outb(ahc, TARGET_MSG_REQUEST, 0);
4443 ahc_outb(ahc, TARGET_MSG_REQUEST + 1, 0);
4444
4445 /*
4446 * Use the built in queue management registers
4447 * if they are available.
4448 */
4449 if ((ahc->features & AHC_QUEUE_REGS) != 0) {
4450 ahc_outb(ahc, QOFF_CTLSTA, SCB_QSIZE_256);
4451 ahc_outb(ahc, SDSCB_QOFF, 0);
4452 ahc_outb(ahc, SNSCB_QOFF, 0);
4453 ahc_outb(ahc, HNSCB_QOFF, 0);
4454 }
4455
4456
4457 /* We don't have any waiting selections */
4458 ahc_outb(ahc, WAITING_SCBH, SCB_LIST_NULL);
4459
4460 /* Our disconnection list is empty too */
4461 ahc_outb(ahc, DISCONNECTED_SCBH, SCB_LIST_NULL);
4462
4463 /* Message out buffer starts empty */
4464 ahc_outb(ahc, MSG_OUT, MSG_NOOP);
4465
4466 /*
4467 * Setup the allowed SCSI Sequences based on operational mode.
4468 * If we are a target, we'll enalbe select in operations once
4469 * we've had a lun enabled.
4470 */
4471 scsiseq_template = ENSELO|ENAUTOATNO|ENAUTOATNP;
4472 if ((ahc->flags & AHC_INITIATORROLE) != 0)
4473 scsiseq_template |= ENRSELI;
4474 ahc_outb(ahc, SCSISEQ_TEMPLATE, scsiseq_template);
4475
4476 /*
4477 * Load the Sequencer program and Enable the adapter
4478 * in "fast" mode.
4479 */
4480 if (bootverbose)
4481 printf("%s: Downloading Sequencer Program...",
4482 ahc_name(ahc));
4483
4484 ahc_loadseq(ahc);
4485
4486 if ((ahc->features & AHC_ULTRA2) != 0) {
4487 int wait;
4488
4489 /*
4490 * Wait for up to 500ms for our transceivers
4491 * to settle. If the adapter does not have
4492 * a cable attached, the tranceivers may
4493 * never settle, so don't complain if we
4494 * fail here.
4495 */
4496 ahc_pause(ahc);
4497 for (wait = 5000;
4498 (ahc_inb(ahc, SBLKCTL) & (ENAB40|ENAB20)) == 0 && wait;
4499 wait--)
4500 ahc_delay(100);
4501 ahc_unpause(ahc);
4502 }
4503 return (0);
4504}
4505
4456 /*
4457 * Use the built in queue management registers
4458 * if they are available.
4459 */
4460 if ((ahc->features & AHC_QUEUE_REGS) != 0) {
4461 ahc_outb(ahc, QOFF_CTLSTA, SCB_QSIZE_256);
4462 ahc_outb(ahc, SDSCB_QOFF, 0);
4463 ahc_outb(ahc, SNSCB_QOFF, 0);
4464 ahc_outb(ahc, HNSCB_QOFF, 0);
4465 }
4466
4467
4468 /* We don't have any waiting selections */
4469 ahc_outb(ahc, WAITING_SCBH, SCB_LIST_NULL);
4470
4471 /* Our disconnection list is empty too */
4472 ahc_outb(ahc, DISCONNECTED_SCBH, SCB_LIST_NULL);
4473
4474 /* Message out buffer starts empty */
4475 ahc_outb(ahc, MSG_OUT, MSG_NOOP);
4476
4477 /*
4478 * Setup the allowed SCSI Sequences based on operational mode.
4479 * If we are a target, we'll enalbe select in operations once
4480 * we've had a lun enabled.
4481 */
4482 scsiseq_template = ENSELO|ENAUTOATNO|ENAUTOATNP;
4483 if ((ahc->flags & AHC_INITIATORROLE) != 0)
4484 scsiseq_template |= ENRSELI;
4485 ahc_outb(ahc, SCSISEQ_TEMPLATE, scsiseq_template);
4486
4487 /*
4488 * Load the Sequencer program and Enable the adapter
4489 * in "fast" mode.
4490 */
4491 if (bootverbose)
4492 printf("%s: Downloading Sequencer Program...",
4493 ahc_name(ahc));
4494
4495 ahc_loadseq(ahc);
4496
4497 if ((ahc->features & AHC_ULTRA2) != 0) {
4498 int wait;
4499
4500 /*
4501 * Wait for up to 500ms for our transceivers
4502 * to settle. If the adapter does not have
4503 * a cable attached, the tranceivers may
4504 * never settle, so don't complain if we
4505 * fail here.
4506 */
4507 ahc_pause(ahc);
4508 for (wait = 5000;
4509 (ahc_inb(ahc, SBLKCTL) & (ENAB40|ENAB20)) == 0 && wait;
4510 wait--)
4511 ahc_delay(100);
4512 ahc_unpause(ahc);
4513 }
4514 return (0);
4515}
4516
4517void
4518ahc_intr_enable(struct ahc_softc *ahc, int enable)
4519{
4520 u_int hcntrl;
4521
4522 hcntrl = ahc_inb(ahc, HCNTRL);
4523 hcntrl &= ~INTEN;
4524 ahc->pause &= ~INTEN;
4525 ahc->unpause &= ~INTEN;
4526 if (enable) {
4527 hcntrl |= INTEN;
4528 ahc->pause |= INTEN;
4529 ahc->unpause |= INTEN;
4530 }
4531 ahc_outb(ahc, HCNTRL, hcntrl);
4532}
4533
4506/*
4507 * Ensure that the card is paused in a location
4508 * outside of all critical sections and that all
4509 * pending work is completed prior to returning.
4510 * This routine should only be called from outside
4511 * an interrupt context.
4512 */
4513void
4514ahc_pause_and_flushwork(struct ahc_softc *ahc)
4515{
4516 int intstat;
4517 int maxloops;
4518
4519 maxloops = 1000;
4520 ahc->flags |= AHC_ALL_INTERRUPTS;
4521 intstat = 0;
4522 do {
4523 ahc_intr(ahc);
4524 ahc_pause(ahc);
4525 ahc_clear_critical_section(ahc);
4526 if (intstat == 0xFF && (ahc->features & AHC_REMOVABLE) != 0)
4527 break;
4528 maxloops--;
4529 } while (((intstat = ahc_inb(ahc, INTSTAT)) & INT_PEND) && --maxloops);
4530 if (maxloops == 0) {
4531 printf("Infinite interrupt loop, INTSTAT = %x",
4532 ahc_inb(ahc, INTSTAT));
4533 }
4534 ahc_platform_flushwork(ahc);
4535 ahc->flags &= ~AHC_ALL_INTERRUPTS;
4536}
4537
4538int
4539ahc_suspend(struct ahc_softc *ahc)
4540{
4541 uint8_t *ptr;
4542 int i;
4543
4544 ahc_pause_and_flushwork(ahc);
4545
4546 if (LIST_FIRST(&ahc->pending_scbs) != NULL)
4547 return (EBUSY);
4548
4549#if AHC_TARGET_MODE
4550 /*
4551 * XXX What about ATIOs that have not yet been serviced?
4552 * Perhaps we should just refuse to be suspended if we
4553 * are acting in a target role.
4554 */
4555 if (ahc->pending_device != NULL)
4556 return (EBUSY);
4557#endif
4558
4559 /* Save volatile registers */
4560 if ((ahc->features & AHC_TWIN) != 0) {
4561 ahc_outb(ahc, SBLKCTL, ahc_inb(ahc, SBLKCTL) | SELBUSB);
4562 ahc->suspend_state.channel[1].scsiseq = ahc_inb(ahc, SCSISEQ);
4563 ahc->suspend_state.channel[1].sxfrctl0 = ahc_inb(ahc, SXFRCTL0);
4564 ahc->suspend_state.channel[1].sxfrctl1 = ahc_inb(ahc, SXFRCTL1);
4565 ahc->suspend_state.channel[1].simode0 = ahc_inb(ahc, SIMODE0);
4566 ahc->suspend_state.channel[1].simode1 = ahc_inb(ahc, SIMODE1);
4567 ahc->suspend_state.channel[1].seltimer = ahc_inb(ahc, SELTIMER);
4568 ahc->suspend_state.channel[1].seqctl = ahc_inb(ahc, SEQCTL);
4569 ahc_outb(ahc, SBLKCTL, ahc_inb(ahc, SBLKCTL) & ~SELBUSB);
4570 }
4571 ahc->suspend_state.channel[0].scsiseq = ahc_inb(ahc, SCSISEQ);
4572 ahc->suspend_state.channel[0].sxfrctl0 = ahc_inb(ahc, SXFRCTL0);
4573 ahc->suspend_state.channel[0].sxfrctl1 = ahc_inb(ahc, SXFRCTL1);
4574 ahc->suspend_state.channel[0].simode0 = ahc_inb(ahc, SIMODE0);
4575 ahc->suspend_state.channel[0].simode1 = ahc_inb(ahc, SIMODE1);
4576 ahc->suspend_state.channel[0].seltimer = ahc_inb(ahc, SELTIMER);
4577 ahc->suspend_state.channel[0].seqctl = ahc_inb(ahc, SEQCTL);
4578
4579 if ((ahc->chip & AHC_PCI) != 0) {
4580 ahc->suspend_state.dscommand0 = ahc_inb(ahc, DSCOMMAND0);
4581 ahc->suspend_state.dspcistatus = ahc_inb(ahc, DSPCISTATUS);
4582 }
4583
4584 if ((ahc->features & AHC_DT) != 0) {
4585 u_int sfunct;
4586
4587 sfunct = ahc_inb(ahc, SFUNCT) & ~ALT_MODE;
4588 ahc_outb(ahc, SFUNCT, sfunct | ALT_MODE);
4589 ahc->suspend_state.optionmode = ahc_inb(ahc, OPTIONMODE);
4590 ahc_outb(ahc, SFUNCT, sfunct);
4591 ahc->suspend_state.crccontrol1 = ahc_inb(ahc, CRCCONTROL1);
4592 }
4593
4594 if ((ahc->features & AHC_MULTI_FUNC) != 0)
4595 ahc->suspend_state.scbbaddr = ahc_inb(ahc, SCBBADDR);
4596
4597 if ((ahc->features & AHC_ULTRA2) != 0)
4598 ahc->suspend_state.dff_thrsh = ahc_inb(ahc, DFF_THRSH);
4599
4600 ptr = ahc->suspend_state.scratch_ram;
4601 for (i = 0; i < 64; i++)
4602 *ptr++ = ahc_inb(ahc, SRAM_BASE + i);
4603
4604 if ((ahc->features & AHC_MORE_SRAM) != 0) {
4605 for (i = 0; i < 16; i++)
4606 *ptr++ = ahc_inb(ahc, TARG_OFFSET + i);
4607 }
4608
4609 ptr = ahc->suspend_state.btt;
4610 if ((ahc->flags & AHC_SCB_BTT) != 0) {
4611 for (i = 0;i < AHC_NUM_TARGETS; i++) {
4612 int j;
4613
4614 for (j = 0;j < AHC_NUM_LUNS; j++) {
4615 u_int tcl;
4616
4617 tcl = BUILD_TCL(i << 4, j);
4618 *ptr = ahc_index_busy_tcl(ahc, tcl);
4619 }
4620 }
4621 }
4622 ahc_shutdown(ahc);
4623 return (0);
4624}
4625
4626int
4627ahc_resume(struct ahc_softc *ahc)
4628{
4629 uint8_t *ptr;
4630 int i;
4631
4632 ahc_reset(ahc);
4633
4634 ahc_build_free_scb_list(ahc);
4635
4636 /* Restore volatile registers */
4637 if ((ahc->features & AHC_TWIN) != 0) {
4638 ahc_outb(ahc, SBLKCTL, ahc_inb(ahc, SBLKCTL) | SELBUSB);
4639 ahc_outb(ahc, SCSIID, ahc->our_id);
4640 ahc_outb(ahc, SCSISEQ, ahc->suspend_state.channel[1].scsiseq);
4641 ahc_outb(ahc, SXFRCTL0, ahc->suspend_state.channel[1].sxfrctl0);
4642 ahc_outb(ahc, SXFRCTL1, ahc->suspend_state.channel[1].sxfrctl1);
4643 ahc_outb(ahc, SIMODE0, ahc->suspend_state.channel[1].simode0);
4644 ahc_outb(ahc, SIMODE1, ahc->suspend_state.channel[1].simode1);
4645 ahc_outb(ahc, SELTIMER, ahc->suspend_state.channel[1].seltimer);
4646 ahc_outb(ahc, SEQCTL, ahc->suspend_state.channel[1].seqctl);
4647 ahc_outb(ahc, SBLKCTL, ahc_inb(ahc, SBLKCTL) & ~SELBUSB);
4648 }
4649 ahc_outb(ahc, SCSISEQ, ahc->suspend_state.channel[0].scsiseq);
4650 ahc_outb(ahc, SXFRCTL0, ahc->suspend_state.channel[0].sxfrctl0);
4651 ahc_outb(ahc, SXFRCTL1, ahc->suspend_state.channel[0].sxfrctl1);
4652 ahc_outb(ahc, SIMODE0, ahc->suspend_state.channel[0].simode0);
4653 ahc_outb(ahc, SIMODE1, ahc->suspend_state.channel[0].simode1);
4654 ahc_outb(ahc, SELTIMER, ahc->suspend_state.channel[0].seltimer);
4655 ahc_outb(ahc, SEQCTL, ahc->suspend_state.channel[0].seqctl);
4656 if ((ahc->features & AHC_ULTRA2) != 0)
4657 ahc_outb(ahc, SCSIID_ULTRA2, ahc->our_id);
4658 else
4659 ahc_outb(ahc, SCSIID, ahc->our_id);
4660
4661 if ((ahc->chip & AHC_PCI) != 0) {
4662 ahc_outb(ahc, DSCOMMAND0, ahc->suspend_state.dscommand0);
4663 ahc_outb(ahc, DSPCISTATUS, ahc->suspend_state.dspcistatus);
4664 }
4665
4666 if ((ahc->features & AHC_DT) != 0) {
4667 u_int sfunct;
4668
4669 sfunct = ahc_inb(ahc, SFUNCT) & ~ALT_MODE;
4670 ahc_outb(ahc, SFUNCT, sfunct | ALT_MODE);
4671 ahc_outb(ahc, OPTIONMODE, ahc->suspend_state.optionmode);
4672 ahc_outb(ahc, SFUNCT, sfunct);
4673 ahc_outb(ahc, CRCCONTROL1, ahc->suspend_state.crccontrol1);
4674 }
4675
4676 if ((ahc->features & AHC_MULTI_FUNC) != 0)
4677 ahc_outb(ahc, SCBBADDR, ahc->suspend_state.scbbaddr);
4678
4679 if ((ahc->features & AHC_ULTRA2) != 0)
4680 ahc_outb(ahc, DFF_THRSH, ahc->suspend_state.dff_thrsh);
4681
4682 ptr = ahc->suspend_state.scratch_ram;
4683 for (i = 0; i < 64; i++)
4684 ahc_outb(ahc, SRAM_BASE + i, *ptr++);
4685
4686 if ((ahc->features & AHC_MORE_SRAM) != 0) {
4687 for (i = 0; i < 16; i++)
4688 ahc_outb(ahc, TARG_OFFSET + i, *ptr++);
4689 }
4690
4691 ptr = ahc->suspend_state.btt;
4692 if ((ahc->flags & AHC_SCB_BTT) != 0) {
4693 for (i = 0;i < AHC_NUM_TARGETS; i++) {
4694 int j;
4695
4696 for (j = 0;j < AHC_NUM_LUNS; j++) {
4697 u_int tcl;
4698
4699 tcl = BUILD_TCL(i << 4, j);
4700 ahc_busy_tcl(ahc, tcl, *ptr);
4701 }
4702 }
4703 }
4704 return (0);
4705}
4706
4707/************************** Busy Target Table *********************************/
4708/*
4709 * Return the untagged transaction id for a given target/channel lun.
4710 * Optionally, clear the entry.
4711 */
4712u_int
4713ahc_index_busy_tcl(struct ahc_softc *ahc, u_int tcl)
4714{
4715 u_int scbid;
4716 u_int target_offset;
4717
4718 if ((ahc->flags & AHC_SCB_BTT) != 0) {
4719 u_int saved_scbptr;
4720
4721 saved_scbptr = ahc_inb(ahc, SCBPTR);
4722 ahc_outb(ahc, SCBPTR, TCL_LUN(tcl));
4723 scbid = ahc_inb(ahc, SCB_64_BTT + TCL_TARGET_OFFSET(tcl));
4724 ahc_outb(ahc, SCBPTR, saved_scbptr);
4725 } else {
4726 target_offset = TCL_TARGET_OFFSET(tcl);
4727 scbid = ahc_inb(ahc, BUSY_TARGETS + target_offset);
4728 }
4729
4730 return (scbid);
4731}
4732
4733void
4734ahc_unbusy_tcl(struct ahc_softc *ahc, u_int tcl)
4735{
4736 u_int target_offset;
4737
4738 if ((ahc->flags & AHC_SCB_BTT) != 0) {
4739 u_int saved_scbptr;
4740
4741 saved_scbptr = ahc_inb(ahc, SCBPTR);
4742 ahc_outb(ahc, SCBPTR, TCL_LUN(tcl));
4743 ahc_outb(ahc, SCB_64_BTT+TCL_TARGET_OFFSET(tcl), SCB_LIST_NULL);
4744 ahc_outb(ahc, SCBPTR, saved_scbptr);
4745 } else {
4746 target_offset = TCL_TARGET_OFFSET(tcl);
4747 ahc_outb(ahc, BUSY_TARGETS + target_offset, SCB_LIST_NULL);
4748 }
4749}
4750
4751void
4752ahc_busy_tcl(struct ahc_softc *ahc, u_int tcl, u_int scbid)
4753{
4754 u_int target_offset;
4755
4756 if ((ahc->flags & AHC_SCB_BTT) != 0) {
4757 u_int saved_scbptr;
4758
4759 saved_scbptr = ahc_inb(ahc, SCBPTR);
4760 ahc_outb(ahc, SCBPTR, TCL_LUN(tcl));
4761 ahc_outb(ahc, SCB_64_BTT + TCL_TARGET_OFFSET(tcl), scbid);
4762 ahc_outb(ahc, SCBPTR, saved_scbptr);
4763 } else {
4764 target_offset = TCL_TARGET_OFFSET(tcl);
4765 ahc_outb(ahc, BUSY_TARGETS + target_offset, scbid);
4766 }
4767}
4768
4769/************************** SCB and SCB queue management **********************/
4770int
4771ahc_match_scb(struct ahc_softc *ahc, struct scb *scb, int target,
4772 char channel, int lun, u_int tag, role_t role)
4773{
4774 int targ = SCB_GET_TARGET(ahc, scb);
4775 char chan = SCB_GET_CHANNEL(ahc, scb);
4776 int slun = SCB_GET_LUN(scb);
4777 int match;
4778
4779 match = ((chan == channel) || (channel == ALL_CHANNELS));
4780 if (match != 0)
4781 match = ((targ == target) || (target == CAM_TARGET_WILDCARD));
4782 if (match != 0)
4783 match = ((lun == slun) || (lun == CAM_LUN_WILDCARD));
4784 if (match != 0) {
4785#if AHC_TARGET_MODE
4786 int group;
4787
4788 group = XPT_FC_GROUP(scb->io_ctx->ccb_h.func_code);
4789 if (role == ROLE_INITIATOR) {
4790 match = (group != XPT_FC_GROUP_TMODE)
4791 && ((tag == scb->hscb->tag)
4792 || (tag == SCB_LIST_NULL));
4793 } else if (role == ROLE_TARGET) {
4794 match = (group == XPT_FC_GROUP_TMODE)
4795 && ((tag == scb->io_ctx->csio.tag_id)
4796 || (tag == SCB_LIST_NULL));
4797 }
4798#else /* !AHC_TARGET_MODE */
4799 match = ((tag == scb->hscb->tag) || (tag == SCB_LIST_NULL));
4800#endif /* AHC_TARGET_MODE */
4801 }
4802
4803 return match;
4804}
4805
4806void
4807ahc_freeze_devq(struct ahc_softc *ahc, struct scb *scb)
4808{
4809 int target;
4810 char channel;
4811 int lun;
4812
4813 target = SCB_GET_TARGET(ahc, scb);
4814 lun = SCB_GET_LUN(scb);
4815 channel = SCB_GET_CHANNEL(ahc, scb);
4816
4817 ahc_search_qinfifo(ahc, target, channel, lun,
4818 /*tag*/SCB_LIST_NULL, ROLE_UNKNOWN,
4819 CAM_REQUEUE_REQ, SEARCH_COMPLETE);
4820
4821 ahc_platform_freeze_devq(ahc, scb);
4822}
4823
4824void
4825ahc_qinfifo_requeue_tail(struct ahc_softc *ahc, struct scb *scb)
4826{
4827 struct scb *prev_scb;
4828
4829 prev_scb = NULL;
4830 if (ahc_qinfifo_count(ahc) != 0) {
4831 u_int prev_tag;
4832 uint8_t prev_pos;
4833
4834 prev_pos = ahc->qinfifonext - 1;
4835 prev_tag = ahc->qinfifo[prev_pos];
4836 prev_scb = ahc_lookup_scb(ahc, prev_tag);
4837 }
4838 ahc_qinfifo_requeue(ahc, prev_scb, scb);
4839 if ((ahc->features & AHC_QUEUE_REGS) != 0) {
4840 ahc_outb(ahc, HNSCB_QOFF, ahc->qinfifonext);
4841 } else {
4842 ahc_outb(ahc, KERNEL_QINPOS, ahc->qinfifonext);
4843 }
4844}
4845
4846static void
4847ahc_qinfifo_requeue(struct ahc_softc *ahc, struct scb *prev_scb,
4848 struct scb *scb)
4849{
4850 if (prev_scb == NULL)
4851 ahc_outb(ahc, NEXT_QUEUED_SCB, scb->hscb->tag);
4852 else
4853 prev_scb->hscb->next = scb->hscb->tag;
4854 ahc->qinfifo[ahc->qinfifonext++] = scb->hscb->tag;
4855 scb->hscb->next = ahc->next_queued_scb->hscb->tag;
4856}
4857
4858static int
4859ahc_qinfifo_count(struct ahc_softc *ahc)
4860{
4861 u_int8_t qinpos;
4862 u_int8_t diff;
4863
4864 if ((ahc->features & AHC_QUEUE_REGS) != 0) {
4865 qinpos = ahc_inb(ahc, SNSCB_QOFF);
4866 ahc_outb(ahc, SNSCB_QOFF, qinpos);
4867 } else
4868 qinpos = ahc_inb(ahc, QINPOS);
4869 diff = ahc->qinfifonext - qinpos;
4870 return (diff);
4871}
4872
4873int
4874ahc_search_qinfifo(struct ahc_softc *ahc, int target, char channel,
4875 int lun, u_int tag, role_t role, uint32_t status,
4876 ahc_search_action action)
4877{
4878 struct scb *scb;
4879 struct scb *prev_scb;
4880 uint8_t qinstart;
4881 uint8_t qinpos;
4882 uint8_t qintail;
4883 uint8_t next, prev;
4884 uint8_t curscbptr;
4885 int found;
4886 int maxtarget;
4887 int i;
4888 int have_qregs;
4889
4890 qintail = ahc->qinfifonext;
4891 have_qregs = (ahc->features & AHC_QUEUE_REGS) != 0;
4892 if (have_qregs) {
4893 qinstart = ahc_inb(ahc, SNSCB_QOFF);
4894 ahc_outb(ahc, SNSCB_QOFF, qinstart);
4895 } else
4896 qinstart = ahc_inb(ahc, QINPOS);
4897 qinpos = qinstart;
4898 next = ahc_inb(ahc, NEXT_QUEUED_SCB);
4899 found = 0;
4900 prev_scb = NULL;
4901
4902 if (action == SEARCH_COMPLETE) {
4903 /*
4904 * Don't attempt to run any queued untagged transactions
4905 * until we are done with the abort process.
4906 */
4907 ahc_freeze_untagged_queues(ahc);
4908 }
4909
4910 /*
4911 * Start with an empty queue. Entries that are not chosen
4912 * for removal will be re-added to the queue as we go.
4913 */
4914 ahc->qinfifonext = qinpos;
4915 ahc_outb(ahc, NEXT_QUEUED_SCB, ahc->next_queued_scb->hscb->tag);
4916
4917 while (qinpos != qintail) {
4918 scb = ahc_lookup_scb(ahc, ahc->qinfifo[qinpos]);
4919 if (ahc_match_scb(ahc, scb, target, channel, lun, tag, role)) {
4920 /*
4921 * We found an scb that needs to be acted on.
4922 */
4923 found++;
4924 switch (action) {
4925 case SEARCH_COMPLETE:
4926 {
4927 cam_status ostat;
4928 cam_status cstat;
4929
4930 ostat = ahc_get_transaction_status(scb);
4931 if (ostat == CAM_REQ_INPROG)
4932 ahc_set_transaction_status(scb,
4933 status);
4934 cstat = ahc_get_transaction_status(scb);
4935 if (cstat != CAM_REQ_CMP)
4936 ahc_freeze_scb(scb);
4937 if ((scb->flags & SCB_ACTIVE) == 0)
4938 printf("Inactive SCB in qinfifo\n");
4939 ahc_done(ahc, scb);
4940
4941 /* FALLTHROUGH */
4942 case SEARCH_REMOVE:
4943 break;
4944 }
4945 case SEARCH_COUNT:
4946 ahc_qinfifo_requeue(ahc, prev_scb, scb);
4947 prev_scb = scb;
4948 break;
4949 }
4950 } else {
4951 ahc_qinfifo_requeue(ahc, prev_scb, scb);
4952 prev_scb = scb;
4953 }
4954 qinpos++;
4955 }
4956
4957 if ((ahc->features & AHC_QUEUE_REGS) != 0) {
4958 ahc_outb(ahc, HNSCB_QOFF, ahc->qinfifonext);
4959 } else {
4960 ahc_outb(ahc, KERNEL_QINPOS, ahc->qinfifonext);
4961 }
4962
4963 if (action != SEARCH_COUNT
4964 && (found != 0)
4965 && (qinstart != ahc->qinfifonext)) {
4966 /*
4967 * The sequencer may be in the process of dmaing
4968 * down the SCB at the beginning of the queue.
4969 * This could be problematic if either the first,
4970 * or the second SCB is removed from the queue
4971 * (the first SCB includes a pointer to the "next"
4972 * SCB to dma). If we have removed any entries, swap
4973 * the first element in the queue with the next HSCB
4974 * so the sequencer will notice that NEXT_QUEUED_SCB
4975 * has changed during its dma attempt and will retry
4976 * the DMA.
4977 */
4978 scb = ahc_lookup_scb(ahc, ahc->qinfifo[qinstart]);
4979
4980 /*
4981 * ahc_swap_with_next_hscb forces our next pointer to
4982 * point to the reserved SCB for future commands. Save
4983 * and restore our original next pointer to maintain
4984 * queue integrity.
4985 */
4986 next = scb->hscb->next;
4987 ahc->scb_data->scbindex[scb->hscb->tag] = NULL;
4988 ahc_swap_with_next_hscb(ahc, scb);
4989 scb->hscb->next = next;
4990 ahc->qinfifo[qinstart] = scb->hscb->tag;
4991
4992 /* Tell the card about the new head of the qinfifo. */
4993 ahc_outb(ahc, NEXT_QUEUED_SCB, scb->hscb->tag);
4994
4995 /* Fixup the tail "next" pointer. */
4996 qintail = ahc->qinfifonext - 1;
4997 scb = ahc_lookup_scb(ahc, ahc->qinfifo[qintail]);
4998 scb->hscb->next = ahc->next_queued_scb->hscb->tag;
4999 }
5000
5001 /*
5002 * Search waiting for selection list.
5003 */
5004 curscbptr = ahc_inb(ahc, SCBPTR);
5005 next = ahc_inb(ahc, WAITING_SCBH); /* Start at head of list. */
5006 prev = SCB_LIST_NULL;
5007
5008 while (next != SCB_LIST_NULL) {
5009 uint8_t scb_index;
5010
5011 ahc_outb(ahc, SCBPTR, next);
5012 scb_index = ahc_inb(ahc, SCB_TAG);
5013 if (scb_index >= ahc->scb_data->numscbs) {
5014 printf("Waiting List inconsistency. "
5015 "SCB index == %d, yet numscbs == %d.",
5016 scb_index, ahc->scb_data->numscbs);
5017 ahc_dump_card_state(ahc);
5018 panic("for safety");
5019 }
5020 scb = ahc_lookup_scb(ahc, scb_index);
5021 if (ahc_match_scb(ahc, scb, target, channel,
5022 lun, SCB_LIST_NULL, role)) {
5023 /*
5024 * We found an scb that needs to be acted on.
5025 */
5026 found++;
5027 switch (action) {
5028 case SEARCH_COMPLETE:
5029 {
5030 cam_status ostat;
5031 cam_status cstat;
5032
5033 ostat = ahc_get_transaction_status(scb);
5034 if (ostat == CAM_REQ_INPROG)
5035 ahc_set_transaction_status(scb,
5036 status);
5037 cstat = ahc_get_transaction_status(scb);
5038 if (cstat != CAM_REQ_CMP)
5039 ahc_freeze_scb(scb);
5040 if ((scb->flags & SCB_ACTIVE) == 0)
5041 printf("Inactive SCB in Waiting List\n");
5042 ahc_done(ahc, scb);
5043 /* FALLTHROUGH */
5044 }
5045 case SEARCH_REMOVE:
5046 next = ahc_rem_wscb(ahc, next, prev);
5047 break;
5048 case SEARCH_COUNT:
5049 prev = next;
5050 next = ahc_inb(ahc, SCB_NEXT);
5051 break;
5052 }
5053 } else {
5054
5055 prev = next;
5056 next = ahc_inb(ahc, SCB_NEXT);
5057 }
5058 }
5059 ahc_outb(ahc, SCBPTR, curscbptr);
5060
5061 /*
5062 * And lastly, the untagged holding queues.
5063 */
5064 i = 0;
5065 if ((ahc->flags & AHC_SCB_BTT) == 0) {
5066
5067 maxtarget = 16;
5068 if (target != CAM_TARGET_WILDCARD) {
5069
5070 i = target;
5071 if (channel == 'B')
5072 i += 8;
5073 maxtarget = i + 1;
5074 }
5075 } else {
5076 maxtarget = 0;
5077 }
5078
5079 for (; i < maxtarget; i++) {
5080 struct scb_tailq *untagged_q;
5081 struct scb *next_scb;
5082
5083 untagged_q = &(ahc->untagged_queues[i]);
5084 next_scb = TAILQ_FIRST(untagged_q);
5085 while (next_scb != NULL) {
5086
5087 scb = next_scb;
5088 next_scb = TAILQ_NEXT(scb, links.tqe);
5089
5090 /*
5091 * The head of the list may be the currently
5092 * active untagged command for a device.
5093 * We're only searching for commands that
5094 * have not been started. A transaction
5095 * marked active but still in the qinfifo
5096 * is removed by the qinfifo scanning code
5097 * above.
5098 */
5099 if ((scb->flags & SCB_ACTIVE) != 0)
5100 continue;
5101
5102 if (ahc_match_scb(ahc, scb, target, channel,
5103 lun, SCB_LIST_NULL, role)) {
5104 /*
5105 * We found an scb that needs to be acted on.
5106 */
5107 found++;
5108 switch (action) {
5109 case SEARCH_COMPLETE:
5110 {
5111 cam_status ostat;
5112 cam_status cstat;
5113
5114 ostat = ahc_get_transaction_status(scb);
5115 if (ostat == CAM_REQ_INPROG)
5116 ahc_set_transaction_status(scb,
5117 status);
5118 cstat = ahc_get_transaction_status(scb);
5119 if (cstat != CAM_REQ_CMP)
5120 ahc_freeze_scb(scb);
5121 if ((scb->flags & SCB_ACTIVE) == 0)
5122 printf("Inactive SCB in untaggedQ\n");
5123 ahc_done(ahc, scb);
5124 break;
5125 }
5126 case SEARCH_REMOVE:
5127 TAILQ_REMOVE(untagged_q, scb,
5128 links.tqe);
5129 break;
5130 case SEARCH_COUNT:
5131 break;
5132 }
5133 }
5134 }
5135 }
5136
5137 if (action == SEARCH_COMPLETE)
5138 ahc_release_untagged_queues(ahc);
5139 return (found);
5140}
5141
5142int
5143ahc_search_disc_list(struct ahc_softc *ahc, int target, char channel,
5144 int lun, u_int tag, int stop_on_first, int remove,
5145 int save_state)
5146{
5147 struct scb *scbp;
5148 u_int next;
5149 u_int prev;
5150 u_int count;
5151 u_int active_scb;
5152
5153 count = 0;
5154 next = ahc_inb(ahc, DISCONNECTED_SCBH);
5155 prev = SCB_LIST_NULL;
5156
5157 if (save_state) {
5158 /* restore this when we're done */
5159 active_scb = ahc_inb(ahc, SCBPTR);
5160 } else
5161 /* Silence compiler */
5162 active_scb = SCB_LIST_NULL;
5163
5164 while (next != SCB_LIST_NULL) {
5165 u_int scb_index;
5166
5167 ahc_outb(ahc, SCBPTR, next);
5168 scb_index = ahc_inb(ahc, SCB_TAG);
5169 if (scb_index >= ahc->scb_data->numscbs) {
5170 printf("Disconnected List inconsistency. "
5171 "SCB index == %d, yet numscbs == %d.",
5172 scb_index, ahc->scb_data->numscbs);
5173 ahc_dump_card_state(ahc);
5174 panic("for safety");
5175 }
5176
5177 if (next == prev) {
5178 panic("Disconnected List Loop. "
5179 "cur SCBPTR == %x, prev SCBPTR == %x.",
5180 next, prev);
5181 }
5182 scbp = ahc_lookup_scb(ahc, scb_index);
5183 if (ahc_match_scb(ahc, scbp, target, channel, lun,
5184 tag, ROLE_INITIATOR)) {
5185 count++;
5186 if (remove) {
5187 next =
5188 ahc_rem_scb_from_disc_list(ahc, prev, next);
5189 } else {
5190 prev = next;
5191 next = ahc_inb(ahc, SCB_NEXT);
5192 }
5193 if (stop_on_first)
5194 break;
5195 } else {
5196 prev = next;
5197 next = ahc_inb(ahc, SCB_NEXT);
5198 }
5199 }
5200 if (save_state)
5201 ahc_outb(ahc, SCBPTR, active_scb);
5202 return (count);
5203}
5204
5205/*
5206 * Remove an SCB from the on chip list of disconnected transactions.
5207 * This is empty/unused if we are not performing SCB paging.
5208 */
5209static u_int
5210ahc_rem_scb_from_disc_list(struct ahc_softc *ahc, u_int prev, u_int scbptr)
5211{
5212 u_int next;
5213
5214 ahc_outb(ahc, SCBPTR, scbptr);
5215 next = ahc_inb(ahc, SCB_NEXT);
5216
5217 ahc_outb(ahc, SCB_CONTROL, 0);
5218
5219 ahc_add_curscb_to_free_list(ahc);
5220
5221 if (prev != SCB_LIST_NULL) {
5222 ahc_outb(ahc, SCBPTR, prev);
5223 ahc_outb(ahc, SCB_NEXT, next);
5224 } else
5225 ahc_outb(ahc, DISCONNECTED_SCBH, next);
5226
5227 return (next);
5228}
5229
5230/*
5231 * Add the SCB as selected by SCBPTR onto the on chip list of
5232 * free hardware SCBs. This list is empty/unused if we are not
5233 * performing SCB paging.
5234 */
5235static void
5236ahc_add_curscb_to_free_list(struct ahc_softc *ahc)
5237{
5238 /*
5239 * Invalidate the tag so that our abort
5240 * routines don't think it's active.
5241 */
5242 ahc_outb(ahc, SCB_TAG, SCB_LIST_NULL);
5243
5244 if ((ahc->flags & AHC_PAGESCBS) != 0) {
5245 ahc_outb(ahc, SCB_NEXT, ahc_inb(ahc, FREE_SCBH));
5246 ahc_outb(ahc, FREE_SCBH, ahc_inb(ahc, SCBPTR));
5247 }
5248}
5249
5250/*
5251 * Manipulate the waiting for selection list and return the
5252 * scb that follows the one that we remove.
5253 */
5254static u_int
5255ahc_rem_wscb(struct ahc_softc *ahc, u_int scbpos, u_int prev)
5256{
5257 u_int curscb, next;
5258
5259 /*
5260 * Select the SCB we want to abort and
5261 * pull the next pointer out of it.
5262 */
5263 curscb = ahc_inb(ahc, SCBPTR);
5264 ahc_outb(ahc, SCBPTR, scbpos);
5265 next = ahc_inb(ahc, SCB_NEXT);
5266
5267 /* Clear the necessary fields */
5268 ahc_outb(ahc, SCB_CONTROL, 0);
5269
5270 ahc_add_curscb_to_free_list(ahc);
5271
5272 /* update the waiting list */
5273 if (prev == SCB_LIST_NULL) {
5274 /* First in the list */
5275 ahc_outb(ahc, WAITING_SCBH, next);
5276
5277 /*
5278 * Ensure we aren't attempting to perform
5279 * selection for this entry.
5280 */
5281 ahc_outb(ahc, SCSISEQ, (ahc_inb(ahc, SCSISEQ) & ~ENSELO));
5282 } else {
5283 /*
5284 * Select the scb that pointed to us
5285 * and update its next pointer.
5286 */
5287 ahc_outb(ahc, SCBPTR, prev);
5288 ahc_outb(ahc, SCB_NEXT, next);
5289 }
5290
5291 /*
5292 * Point us back at the original scb position.
5293 */
5294 ahc_outb(ahc, SCBPTR, curscb);
5295 return next;
5296}
5297
5298/******************************** Error Handling ******************************/
5299/*
5300 * Abort all SCBs that match the given description (target/channel/lun/tag),
5301 * setting their status to the passed in status if the status has not already
5302 * been modified from CAM_REQ_INPROG. This routine assumes that the sequencer
5303 * is paused before it is called.
5304 */
5305int
5306ahc_abort_scbs(struct ahc_softc *ahc, int target, char channel,
5307 int lun, u_int tag, role_t role, uint32_t status)
5308{
5309 struct scb *scbp;
5310 struct scb *scbp_next;
5311 u_int active_scb;
5312 int i, j;
5313 int maxtarget;
5314 int minlun;
5315 int maxlun;
5316
5317 int found;
5318
5319 /*
5320 * Don't attempt to run any queued untagged transactions
5321 * until we are done with the abort process.
5322 */
5323 ahc_freeze_untagged_queues(ahc);
5324
5325 /* restore this when we're done */
5326 active_scb = ahc_inb(ahc, SCBPTR);
5327
5328 found = ahc_search_qinfifo(ahc, target, channel, lun, SCB_LIST_NULL,
5329 role, CAM_REQUEUE_REQ, SEARCH_COMPLETE);
5330
5331 /*
5332 * Clean out the busy target table for any untagged commands.
5333 */
5334 i = 0;
5335 maxtarget = 16;
5336 if (target != CAM_TARGET_WILDCARD) {
5337 i = target;
5338 if (channel == 'B')
5339 i += 8;
5340 maxtarget = i + 1;
5341 }
5342
5343 if (lun == CAM_LUN_WILDCARD) {
5344
5345 /*
5346 * Unless we are using an SCB based
5347 * busy targets table, there is only
5348 * one table entry for all luns of
5349 * a target.
5350 */
5351 minlun = 0;
5352 maxlun = 1;
5353 if ((ahc->flags & AHC_SCB_BTT) != 0)
5354 maxlun = AHC_NUM_LUNS;
5355 } else {
5356 minlun = lun;
5357 maxlun = lun + 1;
5358 }
5359
5360 for (;i < maxtarget; i++) {
5361 for (j = minlun;j < maxlun; j++)
5362 ahc_unbusy_tcl(ahc, BUILD_TCL(i << 4, j));
5363 }
5364
5365 /*
5366 * Go through the disconnected list and remove any entries we
5367 * have queued for completion, 0'ing their control byte too.
5368 * We save the active SCB and restore it ourselves, so there
5369 * is no reason for this search to restore it too.
5370 */
5371 ahc_search_disc_list(ahc, target, channel, lun, tag,
5372 /*stop_on_first*/FALSE, /*remove*/TRUE,
5373 /*save_state*/FALSE);
5374
5375 /*
5376 * Go through the hardware SCB array looking for commands that
5377 * were active but not on any list.
5378 */
5379 for (i = 0; i < ahc->scb_data->maxhscbs; i++) {
5380 u_int scbid;
5381
5382 ahc_outb(ahc, SCBPTR, i);
5383 scbid = ahc_inb(ahc, SCB_TAG);
5384 scbp = ahc_lookup_scb(ahc, scbid);
5385 if (scbp != NULL
5386 && ahc_match_scb(ahc, scbp, target, channel, lun, tag, role))
5387 ahc_add_curscb_to_free_list(ahc);
5388 }
5389
5390 /*
5391 * Go through the pending CCB list and look for
5392 * commands for this target that are still active.
5393 * These are other tagged commands that were
5394 * disconnected when the reset occured.
5395 */
5396 scbp_next = LIST_FIRST(&ahc->pending_scbs);
5397 while (scbp_next != NULL) {
5398 scbp = scbp_next;
5399 scbp_next = LIST_NEXT(scbp, pending_links);
5400 if (ahc_match_scb(ahc, scbp, target, channel, lun, tag, role)) {
5401 cam_status ostat;
5402
5403 ostat = ahc_get_transaction_status(scbp);
5404 if (ostat == CAM_REQ_INPROG)
5405 ahc_set_transaction_status(scbp, status);
5406 if (ahc_get_transaction_status(scbp) != CAM_REQ_CMP)
5407 ahc_freeze_scb(scbp);
5408 if ((scbp->flags & SCB_ACTIVE) == 0)
5409 printf("Inactive SCB on pending list\n");
5410 ahc_done(ahc, scbp);
5411 found++;
5412 }
5413 }
5414 ahc_outb(ahc, SCBPTR, active_scb);
5415 ahc_platform_abort_scbs(ahc, target, channel, lun, tag, role, status);
5416 ahc_release_untagged_queues(ahc);
5417 return found;
5418}
5419
5420static void
5421ahc_reset_current_bus(struct ahc_softc *ahc)
5422{
5423 uint8_t scsiseq;
5424
5425 ahc_outb(ahc, SIMODE1, ahc_inb(ahc, SIMODE1) & ~ENSCSIRST);
5426 scsiseq = ahc_inb(ahc, SCSISEQ);
5427 ahc_outb(ahc, SCSISEQ, scsiseq | SCSIRSTO);
5428 ahc_delay(AHC_BUSRESET_DELAY);
5429 /* Turn off the bus reset */
5430 ahc_outb(ahc, SCSISEQ, scsiseq & ~SCSIRSTO);
5431
5432 ahc_clear_intstat(ahc);
5433
5434 /* Re-enable reset interrupts */
5435 ahc_outb(ahc, SIMODE1, ahc_inb(ahc, SIMODE1) | ENSCSIRST);
5436}
5437
5438int
5439ahc_reset_channel(struct ahc_softc *ahc, char channel, int initiate_reset)
5440{
5441 struct ahc_devinfo devinfo;
5442 u_int initiator, target, max_scsiid;
5443 u_int sblkctl;
5444 int found;
5445 int restart_needed;
5446 char cur_channel;
5447
5448 ahc->pending_device = NULL;
5449
5450 ahc_compile_devinfo(&devinfo,
5451 CAM_TARGET_WILDCARD,
5452 CAM_TARGET_WILDCARD,
5453 CAM_LUN_WILDCARD,
5454 channel, ROLE_UNKNOWN);
5455 ahc_pause(ahc);
5456
5457 /* Make sure the sequencer is in a safe location. */
5458 ahc_clear_critical_section(ahc);
5459
5460 /*
5461 * Run our command complete fifos to ensure that we perform
5462 * completion processing on any commands that 'completed'
5463 * before the reset occurred.
5464 */
5465 ahc_run_qoutfifo(ahc);
5466#if AHC_TARGET_MODE
5467 if ((ahc->flags & AHC_TARGETROLE) != 0) {
5468 ahc_run_tqinfifo(ahc, /*paused*/TRUE);
5469 }
5470#endif
5471
5472 /*
5473 * Reset the bus if we are initiating this reset
5474 */
5475 sblkctl = ahc_inb(ahc, SBLKCTL);
5476 cur_channel = 'A';
5477 if ((ahc->features & AHC_TWIN) != 0
5478 && ((sblkctl & SELBUSB) != 0))
5479 cur_channel = 'B';
5480 if (cur_channel != channel) {
5481 /* Case 1: Command for another bus is active
5482 * Stealthily reset the other bus without
5483 * upsetting the current bus.
5484 */
5485 ahc_outb(ahc, SBLKCTL, sblkctl ^ SELBUSB);
5486 ahc_outb(ahc, SIMODE1,
5487 ahc_inb(ahc, SIMODE1) & ~(ENBUSFREE|ENSCSIRST));
5488 ahc_outb(ahc, SCSISEQ,
5489 ahc_inb(ahc, SCSISEQ) & (ENSELI|ENRSELI|ENAUTOATNP));
5490 if (initiate_reset)
5491 ahc_reset_current_bus(ahc);
5492 ahc_clear_intstat(ahc);
5493 ahc_outb(ahc, SBLKCTL, sblkctl);
5494 restart_needed = FALSE;
5495 } else {
5496 /* Case 2: A command from this bus is active or we're idle */
5497 ahc_clear_msg_state(ahc);
5498 ahc_outb(ahc, SIMODE1,
5499 ahc_inb(ahc, SIMODE1) & ~(ENBUSFREE|ENSCSIRST));
5500 ahc_outb(ahc, SCSISEQ,
5501 ahc_inb(ahc, SCSISEQ) & (ENSELI|ENRSELI|ENAUTOATNP));
5502 if (initiate_reset)
5503 ahc_reset_current_bus(ahc);
5504 ahc_clear_intstat(ahc);
5505 restart_needed = TRUE;
5506 }
5507
5508 /*
5509 * Clean up all the state information for the
5510 * pending transactions on this bus.
5511 */
5512 found = ahc_abort_scbs(ahc, CAM_TARGET_WILDCARD, channel,
5513 CAM_LUN_WILDCARD, SCB_LIST_NULL,
5514 ROLE_UNKNOWN, CAM_SCSI_BUS_RESET);
5515
5516 max_scsiid = (ahc->features & AHC_WIDE) ? 15 : 7;
5517
5518#ifdef AHC_TARGET_MODE
5519 /*
5520 * Send an immediate notify ccb to all target more peripheral
5521 * drivers affected by this action.
5522 */
5523 for (target = 0; target <= max_scsiid; target++) {
4534/*
4535 * Ensure that the card is paused in a location
4536 * outside of all critical sections and that all
4537 * pending work is completed prior to returning.
4538 * This routine should only be called from outside
4539 * an interrupt context.
4540 */
4541void
4542ahc_pause_and_flushwork(struct ahc_softc *ahc)
4543{
4544 int intstat;
4545 int maxloops;
4546
4547 maxloops = 1000;
4548 ahc->flags |= AHC_ALL_INTERRUPTS;
4549 intstat = 0;
4550 do {
4551 ahc_intr(ahc);
4552 ahc_pause(ahc);
4553 ahc_clear_critical_section(ahc);
4554 if (intstat == 0xFF && (ahc->features & AHC_REMOVABLE) != 0)
4555 break;
4556 maxloops--;
4557 } while (((intstat = ahc_inb(ahc, INTSTAT)) & INT_PEND) && --maxloops);
4558 if (maxloops == 0) {
4559 printf("Infinite interrupt loop, INTSTAT = %x",
4560 ahc_inb(ahc, INTSTAT));
4561 }
4562 ahc_platform_flushwork(ahc);
4563 ahc->flags &= ~AHC_ALL_INTERRUPTS;
4564}
4565
4566int
4567ahc_suspend(struct ahc_softc *ahc)
4568{
4569 uint8_t *ptr;
4570 int i;
4571
4572 ahc_pause_and_flushwork(ahc);
4573
4574 if (LIST_FIRST(&ahc->pending_scbs) != NULL)
4575 return (EBUSY);
4576
4577#if AHC_TARGET_MODE
4578 /*
4579 * XXX What about ATIOs that have not yet been serviced?
4580 * Perhaps we should just refuse to be suspended if we
4581 * are acting in a target role.
4582 */
4583 if (ahc->pending_device != NULL)
4584 return (EBUSY);
4585#endif
4586
4587 /* Save volatile registers */
4588 if ((ahc->features & AHC_TWIN) != 0) {
4589 ahc_outb(ahc, SBLKCTL, ahc_inb(ahc, SBLKCTL) | SELBUSB);
4590 ahc->suspend_state.channel[1].scsiseq = ahc_inb(ahc, SCSISEQ);
4591 ahc->suspend_state.channel[1].sxfrctl0 = ahc_inb(ahc, SXFRCTL0);
4592 ahc->suspend_state.channel[1].sxfrctl1 = ahc_inb(ahc, SXFRCTL1);
4593 ahc->suspend_state.channel[1].simode0 = ahc_inb(ahc, SIMODE0);
4594 ahc->suspend_state.channel[1].simode1 = ahc_inb(ahc, SIMODE1);
4595 ahc->suspend_state.channel[1].seltimer = ahc_inb(ahc, SELTIMER);
4596 ahc->suspend_state.channel[1].seqctl = ahc_inb(ahc, SEQCTL);
4597 ahc_outb(ahc, SBLKCTL, ahc_inb(ahc, SBLKCTL) & ~SELBUSB);
4598 }
4599 ahc->suspend_state.channel[0].scsiseq = ahc_inb(ahc, SCSISEQ);
4600 ahc->suspend_state.channel[0].sxfrctl0 = ahc_inb(ahc, SXFRCTL0);
4601 ahc->suspend_state.channel[0].sxfrctl1 = ahc_inb(ahc, SXFRCTL1);
4602 ahc->suspend_state.channel[0].simode0 = ahc_inb(ahc, SIMODE0);
4603 ahc->suspend_state.channel[0].simode1 = ahc_inb(ahc, SIMODE1);
4604 ahc->suspend_state.channel[0].seltimer = ahc_inb(ahc, SELTIMER);
4605 ahc->suspend_state.channel[0].seqctl = ahc_inb(ahc, SEQCTL);
4606
4607 if ((ahc->chip & AHC_PCI) != 0) {
4608 ahc->suspend_state.dscommand0 = ahc_inb(ahc, DSCOMMAND0);
4609 ahc->suspend_state.dspcistatus = ahc_inb(ahc, DSPCISTATUS);
4610 }
4611
4612 if ((ahc->features & AHC_DT) != 0) {
4613 u_int sfunct;
4614
4615 sfunct = ahc_inb(ahc, SFUNCT) & ~ALT_MODE;
4616 ahc_outb(ahc, SFUNCT, sfunct | ALT_MODE);
4617 ahc->suspend_state.optionmode = ahc_inb(ahc, OPTIONMODE);
4618 ahc_outb(ahc, SFUNCT, sfunct);
4619 ahc->suspend_state.crccontrol1 = ahc_inb(ahc, CRCCONTROL1);
4620 }
4621
4622 if ((ahc->features & AHC_MULTI_FUNC) != 0)
4623 ahc->suspend_state.scbbaddr = ahc_inb(ahc, SCBBADDR);
4624
4625 if ((ahc->features & AHC_ULTRA2) != 0)
4626 ahc->suspend_state.dff_thrsh = ahc_inb(ahc, DFF_THRSH);
4627
4628 ptr = ahc->suspend_state.scratch_ram;
4629 for (i = 0; i < 64; i++)
4630 *ptr++ = ahc_inb(ahc, SRAM_BASE + i);
4631
4632 if ((ahc->features & AHC_MORE_SRAM) != 0) {
4633 for (i = 0; i < 16; i++)
4634 *ptr++ = ahc_inb(ahc, TARG_OFFSET + i);
4635 }
4636
4637 ptr = ahc->suspend_state.btt;
4638 if ((ahc->flags & AHC_SCB_BTT) != 0) {
4639 for (i = 0;i < AHC_NUM_TARGETS; i++) {
4640 int j;
4641
4642 for (j = 0;j < AHC_NUM_LUNS; j++) {
4643 u_int tcl;
4644
4645 tcl = BUILD_TCL(i << 4, j);
4646 *ptr = ahc_index_busy_tcl(ahc, tcl);
4647 }
4648 }
4649 }
4650 ahc_shutdown(ahc);
4651 return (0);
4652}
4653
4654int
4655ahc_resume(struct ahc_softc *ahc)
4656{
4657 uint8_t *ptr;
4658 int i;
4659
4660 ahc_reset(ahc);
4661
4662 ahc_build_free_scb_list(ahc);
4663
4664 /* Restore volatile registers */
4665 if ((ahc->features & AHC_TWIN) != 0) {
4666 ahc_outb(ahc, SBLKCTL, ahc_inb(ahc, SBLKCTL) | SELBUSB);
4667 ahc_outb(ahc, SCSIID, ahc->our_id);
4668 ahc_outb(ahc, SCSISEQ, ahc->suspend_state.channel[1].scsiseq);
4669 ahc_outb(ahc, SXFRCTL0, ahc->suspend_state.channel[1].sxfrctl0);
4670 ahc_outb(ahc, SXFRCTL1, ahc->suspend_state.channel[1].sxfrctl1);
4671 ahc_outb(ahc, SIMODE0, ahc->suspend_state.channel[1].simode0);
4672 ahc_outb(ahc, SIMODE1, ahc->suspend_state.channel[1].simode1);
4673 ahc_outb(ahc, SELTIMER, ahc->suspend_state.channel[1].seltimer);
4674 ahc_outb(ahc, SEQCTL, ahc->suspend_state.channel[1].seqctl);
4675 ahc_outb(ahc, SBLKCTL, ahc_inb(ahc, SBLKCTL) & ~SELBUSB);
4676 }
4677 ahc_outb(ahc, SCSISEQ, ahc->suspend_state.channel[0].scsiseq);
4678 ahc_outb(ahc, SXFRCTL0, ahc->suspend_state.channel[0].sxfrctl0);
4679 ahc_outb(ahc, SXFRCTL1, ahc->suspend_state.channel[0].sxfrctl1);
4680 ahc_outb(ahc, SIMODE0, ahc->suspend_state.channel[0].simode0);
4681 ahc_outb(ahc, SIMODE1, ahc->suspend_state.channel[0].simode1);
4682 ahc_outb(ahc, SELTIMER, ahc->suspend_state.channel[0].seltimer);
4683 ahc_outb(ahc, SEQCTL, ahc->suspend_state.channel[0].seqctl);
4684 if ((ahc->features & AHC_ULTRA2) != 0)
4685 ahc_outb(ahc, SCSIID_ULTRA2, ahc->our_id);
4686 else
4687 ahc_outb(ahc, SCSIID, ahc->our_id);
4688
4689 if ((ahc->chip & AHC_PCI) != 0) {
4690 ahc_outb(ahc, DSCOMMAND0, ahc->suspend_state.dscommand0);
4691 ahc_outb(ahc, DSPCISTATUS, ahc->suspend_state.dspcistatus);
4692 }
4693
4694 if ((ahc->features & AHC_DT) != 0) {
4695 u_int sfunct;
4696
4697 sfunct = ahc_inb(ahc, SFUNCT) & ~ALT_MODE;
4698 ahc_outb(ahc, SFUNCT, sfunct | ALT_MODE);
4699 ahc_outb(ahc, OPTIONMODE, ahc->suspend_state.optionmode);
4700 ahc_outb(ahc, SFUNCT, sfunct);
4701 ahc_outb(ahc, CRCCONTROL1, ahc->suspend_state.crccontrol1);
4702 }
4703
4704 if ((ahc->features & AHC_MULTI_FUNC) != 0)
4705 ahc_outb(ahc, SCBBADDR, ahc->suspend_state.scbbaddr);
4706
4707 if ((ahc->features & AHC_ULTRA2) != 0)
4708 ahc_outb(ahc, DFF_THRSH, ahc->suspend_state.dff_thrsh);
4709
4710 ptr = ahc->suspend_state.scratch_ram;
4711 for (i = 0; i < 64; i++)
4712 ahc_outb(ahc, SRAM_BASE + i, *ptr++);
4713
4714 if ((ahc->features & AHC_MORE_SRAM) != 0) {
4715 for (i = 0; i < 16; i++)
4716 ahc_outb(ahc, TARG_OFFSET + i, *ptr++);
4717 }
4718
4719 ptr = ahc->suspend_state.btt;
4720 if ((ahc->flags & AHC_SCB_BTT) != 0) {
4721 for (i = 0;i < AHC_NUM_TARGETS; i++) {
4722 int j;
4723
4724 for (j = 0;j < AHC_NUM_LUNS; j++) {
4725 u_int tcl;
4726
4727 tcl = BUILD_TCL(i << 4, j);
4728 ahc_busy_tcl(ahc, tcl, *ptr);
4729 }
4730 }
4731 }
4732 return (0);
4733}
4734
4735/************************** Busy Target Table *********************************/
4736/*
4737 * Return the untagged transaction id for a given target/channel lun.
4738 * Optionally, clear the entry.
4739 */
4740u_int
4741ahc_index_busy_tcl(struct ahc_softc *ahc, u_int tcl)
4742{
4743 u_int scbid;
4744 u_int target_offset;
4745
4746 if ((ahc->flags & AHC_SCB_BTT) != 0) {
4747 u_int saved_scbptr;
4748
4749 saved_scbptr = ahc_inb(ahc, SCBPTR);
4750 ahc_outb(ahc, SCBPTR, TCL_LUN(tcl));
4751 scbid = ahc_inb(ahc, SCB_64_BTT + TCL_TARGET_OFFSET(tcl));
4752 ahc_outb(ahc, SCBPTR, saved_scbptr);
4753 } else {
4754 target_offset = TCL_TARGET_OFFSET(tcl);
4755 scbid = ahc_inb(ahc, BUSY_TARGETS + target_offset);
4756 }
4757
4758 return (scbid);
4759}
4760
4761void
4762ahc_unbusy_tcl(struct ahc_softc *ahc, u_int tcl)
4763{
4764 u_int target_offset;
4765
4766 if ((ahc->flags & AHC_SCB_BTT) != 0) {
4767 u_int saved_scbptr;
4768
4769 saved_scbptr = ahc_inb(ahc, SCBPTR);
4770 ahc_outb(ahc, SCBPTR, TCL_LUN(tcl));
4771 ahc_outb(ahc, SCB_64_BTT+TCL_TARGET_OFFSET(tcl), SCB_LIST_NULL);
4772 ahc_outb(ahc, SCBPTR, saved_scbptr);
4773 } else {
4774 target_offset = TCL_TARGET_OFFSET(tcl);
4775 ahc_outb(ahc, BUSY_TARGETS + target_offset, SCB_LIST_NULL);
4776 }
4777}
4778
4779void
4780ahc_busy_tcl(struct ahc_softc *ahc, u_int tcl, u_int scbid)
4781{
4782 u_int target_offset;
4783
4784 if ((ahc->flags & AHC_SCB_BTT) != 0) {
4785 u_int saved_scbptr;
4786
4787 saved_scbptr = ahc_inb(ahc, SCBPTR);
4788 ahc_outb(ahc, SCBPTR, TCL_LUN(tcl));
4789 ahc_outb(ahc, SCB_64_BTT + TCL_TARGET_OFFSET(tcl), scbid);
4790 ahc_outb(ahc, SCBPTR, saved_scbptr);
4791 } else {
4792 target_offset = TCL_TARGET_OFFSET(tcl);
4793 ahc_outb(ahc, BUSY_TARGETS + target_offset, scbid);
4794 }
4795}
4796
4797/************************** SCB and SCB queue management **********************/
4798int
4799ahc_match_scb(struct ahc_softc *ahc, struct scb *scb, int target,
4800 char channel, int lun, u_int tag, role_t role)
4801{
4802 int targ = SCB_GET_TARGET(ahc, scb);
4803 char chan = SCB_GET_CHANNEL(ahc, scb);
4804 int slun = SCB_GET_LUN(scb);
4805 int match;
4806
4807 match = ((chan == channel) || (channel == ALL_CHANNELS));
4808 if (match != 0)
4809 match = ((targ == target) || (target == CAM_TARGET_WILDCARD));
4810 if (match != 0)
4811 match = ((lun == slun) || (lun == CAM_LUN_WILDCARD));
4812 if (match != 0) {
4813#if AHC_TARGET_MODE
4814 int group;
4815
4816 group = XPT_FC_GROUP(scb->io_ctx->ccb_h.func_code);
4817 if (role == ROLE_INITIATOR) {
4818 match = (group != XPT_FC_GROUP_TMODE)
4819 && ((tag == scb->hscb->tag)
4820 || (tag == SCB_LIST_NULL));
4821 } else if (role == ROLE_TARGET) {
4822 match = (group == XPT_FC_GROUP_TMODE)
4823 && ((tag == scb->io_ctx->csio.tag_id)
4824 || (tag == SCB_LIST_NULL));
4825 }
4826#else /* !AHC_TARGET_MODE */
4827 match = ((tag == scb->hscb->tag) || (tag == SCB_LIST_NULL));
4828#endif /* AHC_TARGET_MODE */
4829 }
4830
4831 return match;
4832}
4833
4834void
4835ahc_freeze_devq(struct ahc_softc *ahc, struct scb *scb)
4836{
4837 int target;
4838 char channel;
4839 int lun;
4840
4841 target = SCB_GET_TARGET(ahc, scb);
4842 lun = SCB_GET_LUN(scb);
4843 channel = SCB_GET_CHANNEL(ahc, scb);
4844
4845 ahc_search_qinfifo(ahc, target, channel, lun,
4846 /*tag*/SCB_LIST_NULL, ROLE_UNKNOWN,
4847 CAM_REQUEUE_REQ, SEARCH_COMPLETE);
4848
4849 ahc_platform_freeze_devq(ahc, scb);
4850}
4851
4852void
4853ahc_qinfifo_requeue_tail(struct ahc_softc *ahc, struct scb *scb)
4854{
4855 struct scb *prev_scb;
4856
4857 prev_scb = NULL;
4858 if (ahc_qinfifo_count(ahc) != 0) {
4859 u_int prev_tag;
4860 uint8_t prev_pos;
4861
4862 prev_pos = ahc->qinfifonext - 1;
4863 prev_tag = ahc->qinfifo[prev_pos];
4864 prev_scb = ahc_lookup_scb(ahc, prev_tag);
4865 }
4866 ahc_qinfifo_requeue(ahc, prev_scb, scb);
4867 if ((ahc->features & AHC_QUEUE_REGS) != 0) {
4868 ahc_outb(ahc, HNSCB_QOFF, ahc->qinfifonext);
4869 } else {
4870 ahc_outb(ahc, KERNEL_QINPOS, ahc->qinfifonext);
4871 }
4872}
4873
4874static void
4875ahc_qinfifo_requeue(struct ahc_softc *ahc, struct scb *prev_scb,
4876 struct scb *scb)
4877{
4878 if (prev_scb == NULL)
4879 ahc_outb(ahc, NEXT_QUEUED_SCB, scb->hscb->tag);
4880 else
4881 prev_scb->hscb->next = scb->hscb->tag;
4882 ahc->qinfifo[ahc->qinfifonext++] = scb->hscb->tag;
4883 scb->hscb->next = ahc->next_queued_scb->hscb->tag;
4884}
4885
4886static int
4887ahc_qinfifo_count(struct ahc_softc *ahc)
4888{
4889 u_int8_t qinpos;
4890 u_int8_t diff;
4891
4892 if ((ahc->features & AHC_QUEUE_REGS) != 0) {
4893 qinpos = ahc_inb(ahc, SNSCB_QOFF);
4894 ahc_outb(ahc, SNSCB_QOFF, qinpos);
4895 } else
4896 qinpos = ahc_inb(ahc, QINPOS);
4897 diff = ahc->qinfifonext - qinpos;
4898 return (diff);
4899}
4900
4901int
4902ahc_search_qinfifo(struct ahc_softc *ahc, int target, char channel,
4903 int lun, u_int tag, role_t role, uint32_t status,
4904 ahc_search_action action)
4905{
4906 struct scb *scb;
4907 struct scb *prev_scb;
4908 uint8_t qinstart;
4909 uint8_t qinpos;
4910 uint8_t qintail;
4911 uint8_t next, prev;
4912 uint8_t curscbptr;
4913 int found;
4914 int maxtarget;
4915 int i;
4916 int have_qregs;
4917
4918 qintail = ahc->qinfifonext;
4919 have_qregs = (ahc->features & AHC_QUEUE_REGS) != 0;
4920 if (have_qregs) {
4921 qinstart = ahc_inb(ahc, SNSCB_QOFF);
4922 ahc_outb(ahc, SNSCB_QOFF, qinstart);
4923 } else
4924 qinstart = ahc_inb(ahc, QINPOS);
4925 qinpos = qinstart;
4926 next = ahc_inb(ahc, NEXT_QUEUED_SCB);
4927 found = 0;
4928 prev_scb = NULL;
4929
4930 if (action == SEARCH_COMPLETE) {
4931 /*
4932 * Don't attempt to run any queued untagged transactions
4933 * until we are done with the abort process.
4934 */
4935 ahc_freeze_untagged_queues(ahc);
4936 }
4937
4938 /*
4939 * Start with an empty queue. Entries that are not chosen
4940 * for removal will be re-added to the queue as we go.
4941 */
4942 ahc->qinfifonext = qinpos;
4943 ahc_outb(ahc, NEXT_QUEUED_SCB, ahc->next_queued_scb->hscb->tag);
4944
4945 while (qinpos != qintail) {
4946 scb = ahc_lookup_scb(ahc, ahc->qinfifo[qinpos]);
4947 if (ahc_match_scb(ahc, scb, target, channel, lun, tag, role)) {
4948 /*
4949 * We found an scb that needs to be acted on.
4950 */
4951 found++;
4952 switch (action) {
4953 case SEARCH_COMPLETE:
4954 {
4955 cam_status ostat;
4956 cam_status cstat;
4957
4958 ostat = ahc_get_transaction_status(scb);
4959 if (ostat == CAM_REQ_INPROG)
4960 ahc_set_transaction_status(scb,
4961 status);
4962 cstat = ahc_get_transaction_status(scb);
4963 if (cstat != CAM_REQ_CMP)
4964 ahc_freeze_scb(scb);
4965 if ((scb->flags & SCB_ACTIVE) == 0)
4966 printf("Inactive SCB in qinfifo\n");
4967 ahc_done(ahc, scb);
4968
4969 /* FALLTHROUGH */
4970 case SEARCH_REMOVE:
4971 break;
4972 }
4973 case SEARCH_COUNT:
4974 ahc_qinfifo_requeue(ahc, prev_scb, scb);
4975 prev_scb = scb;
4976 break;
4977 }
4978 } else {
4979 ahc_qinfifo_requeue(ahc, prev_scb, scb);
4980 prev_scb = scb;
4981 }
4982 qinpos++;
4983 }
4984
4985 if ((ahc->features & AHC_QUEUE_REGS) != 0) {
4986 ahc_outb(ahc, HNSCB_QOFF, ahc->qinfifonext);
4987 } else {
4988 ahc_outb(ahc, KERNEL_QINPOS, ahc->qinfifonext);
4989 }
4990
4991 if (action != SEARCH_COUNT
4992 && (found != 0)
4993 && (qinstart != ahc->qinfifonext)) {
4994 /*
4995 * The sequencer may be in the process of dmaing
4996 * down the SCB at the beginning of the queue.
4997 * This could be problematic if either the first,
4998 * or the second SCB is removed from the queue
4999 * (the first SCB includes a pointer to the "next"
5000 * SCB to dma). If we have removed any entries, swap
5001 * the first element in the queue with the next HSCB
5002 * so the sequencer will notice that NEXT_QUEUED_SCB
5003 * has changed during its dma attempt and will retry
5004 * the DMA.
5005 */
5006 scb = ahc_lookup_scb(ahc, ahc->qinfifo[qinstart]);
5007
5008 /*
5009 * ahc_swap_with_next_hscb forces our next pointer to
5010 * point to the reserved SCB for future commands. Save
5011 * and restore our original next pointer to maintain
5012 * queue integrity.
5013 */
5014 next = scb->hscb->next;
5015 ahc->scb_data->scbindex[scb->hscb->tag] = NULL;
5016 ahc_swap_with_next_hscb(ahc, scb);
5017 scb->hscb->next = next;
5018 ahc->qinfifo[qinstart] = scb->hscb->tag;
5019
5020 /* Tell the card about the new head of the qinfifo. */
5021 ahc_outb(ahc, NEXT_QUEUED_SCB, scb->hscb->tag);
5022
5023 /* Fixup the tail "next" pointer. */
5024 qintail = ahc->qinfifonext - 1;
5025 scb = ahc_lookup_scb(ahc, ahc->qinfifo[qintail]);
5026 scb->hscb->next = ahc->next_queued_scb->hscb->tag;
5027 }
5028
5029 /*
5030 * Search waiting for selection list.
5031 */
5032 curscbptr = ahc_inb(ahc, SCBPTR);
5033 next = ahc_inb(ahc, WAITING_SCBH); /* Start at head of list. */
5034 prev = SCB_LIST_NULL;
5035
5036 while (next != SCB_LIST_NULL) {
5037 uint8_t scb_index;
5038
5039 ahc_outb(ahc, SCBPTR, next);
5040 scb_index = ahc_inb(ahc, SCB_TAG);
5041 if (scb_index >= ahc->scb_data->numscbs) {
5042 printf("Waiting List inconsistency. "
5043 "SCB index == %d, yet numscbs == %d.",
5044 scb_index, ahc->scb_data->numscbs);
5045 ahc_dump_card_state(ahc);
5046 panic("for safety");
5047 }
5048 scb = ahc_lookup_scb(ahc, scb_index);
5049 if (ahc_match_scb(ahc, scb, target, channel,
5050 lun, SCB_LIST_NULL, role)) {
5051 /*
5052 * We found an scb that needs to be acted on.
5053 */
5054 found++;
5055 switch (action) {
5056 case SEARCH_COMPLETE:
5057 {
5058 cam_status ostat;
5059 cam_status cstat;
5060
5061 ostat = ahc_get_transaction_status(scb);
5062 if (ostat == CAM_REQ_INPROG)
5063 ahc_set_transaction_status(scb,
5064 status);
5065 cstat = ahc_get_transaction_status(scb);
5066 if (cstat != CAM_REQ_CMP)
5067 ahc_freeze_scb(scb);
5068 if ((scb->flags & SCB_ACTIVE) == 0)
5069 printf("Inactive SCB in Waiting List\n");
5070 ahc_done(ahc, scb);
5071 /* FALLTHROUGH */
5072 }
5073 case SEARCH_REMOVE:
5074 next = ahc_rem_wscb(ahc, next, prev);
5075 break;
5076 case SEARCH_COUNT:
5077 prev = next;
5078 next = ahc_inb(ahc, SCB_NEXT);
5079 break;
5080 }
5081 } else {
5082
5083 prev = next;
5084 next = ahc_inb(ahc, SCB_NEXT);
5085 }
5086 }
5087 ahc_outb(ahc, SCBPTR, curscbptr);
5088
5089 /*
5090 * And lastly, the untagged holding queues.
5091 */
5092 i = 0;
5093 if ((ahc->flags & AHC_SCB_BTT) == 0) {
5094
5095 maxtarget = 16;
5096 if (target != CAM_TARGET_WILDCARD) {
5097
5098 i = target;
5099 if (channel == 'B')
5100 i += 8;
5101 maxtarget = i + 1;
5102 }
5103 } else {
5104 maxtarget = 0;
5105 }
5106
5107 for (; i < maxtarget; i++) {
5108 struct scb_tailq *untagged_q;
5109 struct scb *next_scb;
5110
5111 untagged_q = &(ahc->untagged_queues[i]);
5112 next_scb = TAILQ_FIRST(untagged_q);
5113 while (next_scb != NULL) {
5114
5115 scb = next_scb;
5116 next_scb = TAILQ_NEXT(scb, links.tqe);
5117
5118 /*
5119 * The head of the list may be the currently
5120 * active untagged command for a device.
5121 * We're only searching for commands that
5122 * have not been started. A transaction
5123 * marked active but still in the qinfifo
5124 * is removed by the qinfifo scanning code
5125 * above.
5126 */
5127 if ((scb->flags & SCB_ACTIVE) != 0)
5128 continue;
5129
5130 if (ahc_match_scb(ahc, scb, target, channel,
5131 lun, SCB_LIST_NULL, role)) {
5132 /*
5133 * We found an scb that needs to be acted on.
5134 */
5135 found++;
5136 switch (action) {
5137 case SEARCH_COMPLETE:
5138 {
5139 cam_status ostat;
5140 cam_status cstat;
5141
5142 ostat = ahc_get_transaction_status(scb);
5143 if (ostat == CAM_REQ_INPROG)
5144 ahc_set_transaction_status(scb,
5145 status);
5146 cstat = ahc_get_transaction_status(scb);
5147 if (cstat != CAM_REQ_CMP)
5148 ahc_freeze_scb(scb);
5149 if ((scb->flags & SCB_ACTIVE) == 0)
5150 printf("Inactive SCB in untaggedQ\n");
5151 ahc_done(ahc, scb);
5152 break;
5153 }
5154 case SEARCH_REMOVE:
5155 TAILQ_REMOVE(untagged_q, scb,
5156 links.tqe);
5157 break;
5158 case SEARCH_COUNT:
5159 break;
5160 }
5161 }
5162 }
5163 }
5164
5165 if (action == SEARCH_COMPLETE)
5166 ahc_release_untagged_queues(ahc);
5167 return (found);
5168}
5169
5170int
5171ahc_search_disc_list(struct ahc_softc *ahc, int target, char channel,
5172 int lun, u_int tag, int stop_on_first, int remove,
5173 int save_state)
5174{
5175 struct scb *scbp;
5176 u_int next;
5177 u_int prev;
5178 u_int count;
5179 u_int active_scb;
5180
5181 count = 0;
5182 next = ahc_inb(ahc, DISCONNECTED_SCBH);
5183 prev = SCB_LIST_NULL;
5184
5185 if (save_state) {
5186 /* restore this when we're done */
5187 active_scb = ahc_inb(ahc, SCBPTR);
5188 } else
5189 /* Silence compiler */
5190 active_scb = SCB_LIST_NULL;
5191
5192 while (next != SCB_LIST_NULL) {
5193 u_int scb_index;
5194
5195 ahc_outb(ahc, SCBPTR, next);
5196 scb_index = ahc_inb(ahc, SCB_TAG);
5197 if (scb_index >= ahc->scb_data->numscbs) {
5198 printf("Disconnected List inconsistency. "
5199 "SCB index == %d, yet numscbs == %d.",
5200 scb_index, ahc->scb_data->numscbs);
5201 ahc_dump_card_state(ahc);
5202 panic("for safety");
5203 }
5204
5205 if (next == prev) {
5206 panic("Disconnected List Loop. "
5207 "cur SCBPTR == %x, prev SCBPTR == %x.",
5208 next, prev);
5209 }
5210 scbp = ahc_lookup_scb(ahc, scb_index);
5211 if (ahc_match_scb(ahc, scbp, target, channel, lun,
5212 tag, ROLE_INITIATOR)) {
5213 count++;
5214 if (remove) {
5215 next =
5216 ahc_rem_scb_from_disc_list(ahc, prev, next);
5217 } else {
5218 prev = next;
5219 next = ahc_inb(ahc, SCB_NEXT);
5220 }
5221 if (stop_on_first)
5222 break;
5223 } else {
5224 prev = next;
5225 next = ahc_inb(ahc, SCB_NEXT);
5226 }
5227 }
5228 if (save_state)
5229 ahc_outb(ahc, SCBPTR, active_scb);
5230 return (count);
5231}
5232
5233/*
5234 * Remove an SCB from the on chip list of disconnected transactions.
5235 * This is empty/unused if we are not performing SCB paging.
5236 */
5237static u_int
5238ahc_rem_scb_from_disc_list(struct ahc_softc *ahc, u_int prev, u_int scbptr)
5239{
5240 u_int next;
5241
5242 ahc_outb(ahc, SCBPTR, scbptr);
5243 next = ahc_inb(ahc, SCB_NEXT);
5244
5245 ahc_outb(ahc, SCB_CONTROL, 0);
5246
5247 ahc_add_curscb_to_free_list(ahc);
5248
5249 if (prev != SCB_LIST_NULL) {
5250 ahc_outb(ahc, SCBPTR, prev);
5251 ahc_outb(ahc, SCB_NEXT, next);
5252 } else
5253 ahc_outb(ahc, DISCONNECTED_SCBH, next);
5254
5255 return (next);
5256}
5257
5258/*
5259 * Add the SCB as selected by SCBPTR onto the on chip list of
5260 * free hardware SCBs. This list is empty/unused if we are not
5261 * performing SCB paging.
5262 */
5263static void
5264ahc_add_curscb_to_free_list(struct ahc_softc *ahc)
5265{
5266 /*
5267 * Invalidate the tag so that our abort
5268 * routines don't think it's active.
5269 */
5270 ahc_outb(ahc, SCB_TAG, SCB_LIST_NULL);
5271
5272 if ((ahc->flags & AHC_PAGESCBS) != 0) {
5273 ahc_outb(ahc, SCB_NEXT, ahc_inb(ahc, FREE_SCBH));
5274 ahc_outb(ahc, FREE_SCBH, ahc_inb(ahc, SCBPTR));
5275 }
5276}
5277
5278/*
5279 * Manipulate the waiting for selection list and return the
5280 * scb that follows the one that we remove.
5281 */
5282static u_int
5283ahc_rem_wscb(struct ahc_softc *ahc, u_int scbpos, u_int prev)
5284{
5285 u_int curscb, next;
5286
5287 /*
5288 * Select the SCB we want to abort and
5289 * pull the next pointer out of it.
5290 */
5291 curscb = ahc_inb(ahc, SCBPTR);
5292 ahc_outb(ahc, SCBPTR, scbpos);
5293 next = ahc_inb(ahc, SCB_NEXT);
5294
5295 /* Clear the necessary fields */
5296 ahc_outb(ahc, SCB_CONTROL, 0);
5297
5298 ahc_add_curscb_to_free_list(ahc);
5299
5300 /* update the waiting list */
5301 if (prev == SCB_LIST_NULL) {
5302 /* First in the list */
5303 ahc_outb(ahc, WAITING_SCBH, next);
5304
5305 /*
5306 * Ensure we aren't attempting to perform
5307 * selection for this entry.
5308 */
5309 ahc_outb(ahc, SCSISEQ, (ahc_inb(ahc, SCSISEQ) & ~ENSELO));
5310 } else {
5311 /*
5312 * Select the scb that pointed to us
5313 * and update its next pointer.
5314 */
5315 ahc_outb(ahc, SCBPTR, prev);
5316 ahc_outb(ahc, SCB_NEXT, next);
5317 }
5318
5319 /*
5320 * Point us back at the original scb position.
5321 */
5322 ahc_outb(ahc, SCBPTR, curscb);
5323 return next;
5324}
5325
5326/******************************** Error Handling ******************************/
5327/*
5328 * Abort all SCBs that match the given description (target/channel/lun/tag),
5329 * setting their status to the passed in status if the status has not already
5330 * been modified from CAM_REQ_INPROG. This routine assumes that the sequencer
5331 * is paused before it is called.
5332 */
5333int
5334ahc_abort_scbs(struct ahc_softc *ahc, int target, char channel,
5335 int lun, u_int tag, role_t role, uint32_t status)
5336{
5337 struct scb *scbp;
5338 struct scb *scbp_next;
5339 u_int active_scb;
5340 int i, j;
5341 int maxtarget;
5342 int minlun;
5343 int maxlun;
5344
5345 int found;
5346
5347 /*
5348 * Don't attempt to run any queued untagged transactions
5349 * until we are done with the abort process.
5350 */
5351 ahc_freeze_untagged_queues(ahc);
5352
5353 /* restore this when we're done */
5354 active_scb = ahc_inb(ahc, SCBPTR);
5355
5356 found = ahc_search_qinfifo(ahc, target, channel, lun, SCB_LIST_NULL,
5357 role, CAM_REQUEUE_REQ, SEARCH_COMPLETE);
5358
5359 /*
5360 * Clean out the busy target table for any untagged commands.
5361 */
5362 i = 0;
5363 maxtarget = 16;
5364 if (target != CAM_TARGET_WILDCARD) {
5365 i = target;
5366 if (channel == 'B')
5367 i += 8;
5368 maxtarget = i + 1;
5369 }
5370
5371 if (lun == CAM_LUN_WILDCARD) {
5372
5373 /*
5374 * Unless we are using an SCB based
5375 * busy targets table, there is only
5376 * one table entry for all luns of
5377 * a target.
5378 */
5379 minlun = 0;
5380 maxlun = 1;
5381 if ((ahc->flags & AHC_SCB_BTT) != 0)
5382 maxlun = AHC_NUM_LUNS;
5383 } else {
5384 minlun = lun;
5385 maxlun = lun + 1;
5386 }
5387
5388 for (;i < maxtarget; i++) {
5389 for (j = minlun;j < maxlun; j++)
5390 ahc_unbusy_tcl(ahc, BUILD_TCL(i << 4, j));
5391 }
5392
5393 /*
5394 * Go through the disconnected list and remove any entries we
5395 * have queued for completion, 0'ing their control byte too.
5396 * We save the active SCB and restore it ourselves, so there
5397 * is no reason for this search to restore it too.
5398 */
5399 ahc_search_disc_list(ahc, target, channel, lun, tag,
5400 /*stop_on_first*/FALSE, /*remove*/TRUE,
5401 /*save_state*/FALSE);
5402
5403 /*
5404 * Go through the hardware SCB array looking for commands that
5405 * were active but not on any list.
5406 */
5407 for (i = 0; i < ahc->scb_data->maxhscbs; i++) {
5408 u_int scbid;
5409
5410 ahc_outb(ahc, SCBPTR, i);
5411 scbid = ahc_inb(ahc, SCB_TAG);
5412 scbp = ahc_lookup_scb(ahc, scbid);
5413 if (scbp != NULL
5414 && ahc_match_scb(ahc, scbp, target, channel, lun, tag, role))
5415 ahc_add_curscb_to_free_list(ahc);
5416 }
5417
5418 /*
5419 * Go through the pending CCB list and look for
5420 * commands for this target that are still active.
5421 * These are other tagged commands that were
5422 * disconnected when the reset occured.
5423 */
5424 scbp_next = LIST_FIRST(&ahc->pending_scbs);
5425 while (scbp_next != NULL) {
5426 scbp = scbp_next;
5427 scbp_next = LIST_NEXT(scbp, pending_links);
5428 if (ahc_match_scb(ahc, scbp, target, channel, lun, tag, role)) {
5429 cam_status ostat;
5430
5431 ostat = ahc_get_transaction_status(scbp);
5432 if (ostat == CAM_REQ_INPROG)
5433 ahc_set_transaction_status(scbp, status);
5434 if (ahc_get_transaction_status(scbp) != CAM_REQ_CMP)
5435 ahc_freeze_scb(scbp);
5436 if ((scbp->flags & SCB_ACTIVE) == 0)
5437 printf("Inactive SCB on pending list\n");
5438 ahc_done(ahc, scbp);
5439 found++;
5440 }
5441 }
5442 ahc_outb(ahc, SCBPTR, active_scb);
5443 ahc_platform_abort_scbs(ahc, target, channel, lun, tag, role, status);
5444 ahc_release_untagged_queues(ahc);
5445 return found;
5446}
5447
5448static void
5449ahc_reset_current_bus(struct ahc_softc *ahc)
5450{
5451 uint8_t scsiseq;
5452
5453 ahc_outb(ahc, SIMODE1, ahc_inb(ahc, SIMODE1) & ~ENSCSIRST);
5454 scsiseq = ahc_inb(ahc, SCSISEQ);
5455 ahc_outb(ahc, SCSISEQ, scsiseq | SCSIRSTO);
5456 ahc_delay(AHC_BUSRESET_DELAY);
5457 /* Turn off the bus reset */
5458 ahc_outb(ahc, SCSISEQ, scsiseq & ~SCSIRSTO);
5459
5460 ahc_clear_intstat(ahc);
5461
5462 /* Re-enable reset interrupts */
5463 ahc_outb(ahc, SIMODE1, ahc_inb(ahc, SIMODE1) | ENSCSIRST);
5464}
5465
5466int
5467ahc_reset_channel(struct ahc_softc *ahc, char channel, int initiate_reset)
5468{
5469 struct ahc_devinfo devinfo;
5470 u_int initiator, target, max_scsiid;
5471 u_int sblkctl;
5472 int found;
5473 int restart_needed;
5474 char cur_channel;
5475
5476 ahc->pending_device = NULL;
5477
5478 ahc_compile_devinfo(&devinfo,
5479 CAM_TARGET_WILDCARD,
5480 CAM_TARGET_WILDCARD,
5481 CAM_LUN_WILDCARD,
5482 channel, ROLE_UNKNOWN);
5483 ahc_pause(ahc);
5484
5485 /* Make sure the sequencer is in a safe location. */
5486 ahc_clear_critical_section(ahc);
5487
5488 /*
5489 * Run our command complete fifos to ensure that we perform
5490 * completion processing on any commands that 'completed'
5491 * before the reset occurred.
5492 */
5493 ahc_run_qoutfifo(ahc);
5494#if AHC_TARGET_MODE
5495 if ((ahc->flags & AHC_TARGETROLE) != 0) {
5496 ahc_run_tqinfifo(ahc, /*paused*/TRUE);
5497 }
5498#endif
5499
5500 /*
5501 * Reset the bus if we are initiating this reset
5502 */
5503 sblkctl = ahc_inb(ahc, SBLKCTL);
5504 cur_channel = 'A';
5505 if ((ahc->features & AHC_TWIN) != 0
5506 && ((sblkctl & SELBUSB) != 0))
5507 cur_channel = 'B';
5508 if (cur_channel != channel) {
5509 /* Case 1: Command for another bus is active
5510 * Stealthily reset the other bus without
5511 * upsetting the current bus.
5512 */
5513 ahc_outb(ahc, SBLKCTL, sblkctl ^ SELBUSB);
5514 ahc_outb(ahc, SIMODE1,
5515 ahc_inb(ahc, SIMODE1) & ~(ENBUSFREE|ENSCSIRST));
5516 ahc_outb(ahc, SCSISEQ,
5517 ahc_inb(ahc, SCSISEQ) & (ENSELI|ENRSELI|ENAUTOATNP));
5518 if (initiate_reset)
5519 ahc_reset_current_bus(ahc);
5520 ahc_clear_intstat(ahc);
5521 ahc_outb(ahc, SBLKCTL, sblkctl);
5522 restart_needed = FALSE;
5523 } else {
5524 /* Case 2: A command from this bus is active or we're idle */
5525 ahc_clear_msg_state(ahc);
5526 ahc_outb(ahc, SIMODE1,
5527 ahc_inb(ahc, SIMODE1) & ~(ENBUSFREE|ENSCSIRST));
5528 ahc_outb(ahc, SCSISEQ,
5529 ahc_inb(ahc, SCSISEQ) & (ENSELI|ENRSELI|ENAUTOATNP));
5530 if (initiate_reset)
5531 ahc_reset_current_bus(ahc);
5532 ahc_clear_intstat(ahc);
5533 restart_needed = TRUE;
5534 }
5535
5536 /*
5537 * Clean up all the state information for the
5538 * pending transactions on this bus.
5539 */
5540 found = ahc_abort_scbs(ahc, CAM_TARGET_WILDCARD, channel,
5541 CAM_LUN_WILDCARD, SCB_LIST_NULL,
5542 ROLE_UNKNOWN, CAM_SCSI_BUS_RESET);
5543
5544 max_scsiid = (ahc->features & AHC_WIDE) ? 15 : 7;
5545
5546#ifdef AHC_TARGET_MODE
5547 /*
5548 * Send an immediate notify ccb to all target more peripheral
5549 * drivers affected by this action.
5550 */
5551 for (target = 0; target <= max_scsiid; target++) {
5524 struct tmode_tstate* tstate;
5552 struct ahc_tmode_tstate* tstate;
5525 u_int lun;
5526
5527 tstate = ahc->enabled_targets[target];
5528 if (tstate == NULL)
5529 continue;
5530 for (lun = 0; lun < AHC_NUM_LUNS; lun++) {
5553 u_int lun;
5554
5555 tstate = ahc->enabled_targets[target];
5556 if (tstate == NULL)
5557 continue;
5558 for (lun = 0; lun < AHC_NUM_LUNS; lun++) {
5531 struct tmode_lstate* lstate;
5559 struct ahc_tmode_lstate* lstate;
5532
5533 lstate = tstate->enabled_luns[lun];
5534 if (lstate == NULL)
5535 continue;
5536
5537 ahc_queue_lstate_event(ahc, lstate, CAM_TARGET_WILDCARD,
5538 EVENT_TYPE_BUS_RESET, /*arg*/0);
5539 ahc_send_lstate_events(ahc, lstate);
5540 }
5541 }
5542#endif
5543 /* Notify the XPT that a bus reset occurred */
5544 ahc_send_async(ahc, devinfo.channel, CAM_TARGET_WILDCARD,
5545 CAM_LUN_WILDCARD, AC_BUS_RESET);
5546
5547 /*
5548 * Revert to async/narrow transfers until we renegotiate.
5549 */
5550 for (target = 0; target <= max_scsiid; target++) {
5551
5552 if (ahc->enabled_targets[target] == NULL)
5553 continue;
5554 for (initiator = 0; initiator <= max_scsiid; initiator++) {
5555 struct ahc_devinfo devinfo;
5556
5557 ahc_compile_devinfo(&devinfo, target, initiator,
5558 CAM_LUN_WILDCARD,
5559 channel, ROLE_UNKNOWN);
5560 ahc_set_width(ahc, &devinfo, MSG_EXT_WDTR_BUS_8_BIT,
5561 AHC_TRANS_CUR, /*paused*/TRUE);
5562 ahc_set_syncrate(ahc, &devinfo, /*syncrate*/NULL,
5563 /*period*/0, /*offset*/0,
5564 /*ppr_options*/0, AHC_TRANS_CUR,
5565 /*paused*/TRUE);
5566 }
5567 }
5568
5569 if (restart_needed)
5570 ahc_restart(ahc);
5571 else
5572 ahc_unpause(ahc);
5573 return found;
5574}
5575
5576
5577/***************************** Residual Processing ****************************/
5578/*
5579 * Calculate the residual for a just completed SCB.
5580 */
5581static void
5582ahc_calc_residual(struct scb *scb)
5583{
5584 struct hardware_scb *hscb;
5585 struct status_pkt *spkt;
5586 uint32_t sgptr;
5587 uint32_t resid_sgptr;
5588 uint32_t resid;
5589
5590 /*
5591 * 5 cases.
5592 * 1) No residual.
5593 * SG_RESID_VALID clear in sgptr.
5594 * 2) Transferless command
5595 * 3) Never performed any transfers.
5596 * sgptr has SG_FULL_RESID set.
5597 * 4) No residual but target did not
5598 * save data pointers after the
5599 * last transfer, so sgptr was
5600 * never updated.
5601 * 5) We have a partial residual.
5602 * Use residual_sgptr to determine
5603 * where we are.
5604 */
5605
5606 hscb = scb->hscb;
5607 sgptr = ahc_le32toh(hscb->sgptr);
5608 if ((sgptr & SG_RESID_VALID) == 0)
5609 /* Case 1 */
5610 return;
5611 sgptr &= ~SG_RESID_VALID;
5612
5613 if ((sgptr & SG_LIST_NULL) != 0)
5614 /* Case 2 */
5615 return;
5616
5617 spkt = &hscb->shared_data.status;
5618 resid_sgptr = ahc_le32toh(spkt->residual_sg_ptr);
5619 if ((sgptr & SG_FULL_RESID) != 0) {
5620 /* Case 3 */
5621 resid = ahc_get_transfer_length(scb);
5622 } else if ((resid_sgptr & SG_LIST_NULL) != 0) {
5623 /* Case 4 */
5624 return;
5625 } else if ((resid_sgptr & ~SG_PTR_MASK) != 0) {
5626 panic("Bogus resid sgptr value 0x%x\n", resid_sgptr);
5627 } else {
5628 struct ahc_dma_seg *sg;
5629
5630 /*
5631 * Remainder of the SG where the transfer
5632 * stopped.
5633 */
5634 resid = ahc_le32toh(spkt->residual_datacnt) & AHC_SG_LEN_MASK;
5635 sg = ahc_sg_bus_to_virt(scb, resid_sgptr & SG_PTR_MASK);
5636
5637 /* The residual sg_ptr always points to the next sg */
5638 sg--;
5639
5640 /*
5641 * Add up the contents of all residual
5642 * SG segments that are after the SG where
5643 * the transfer stopped.
5644 */
5645 while ((ahc_le32toh(sg->len) & AHC_DMA_LAST_SEG) == 0) {
5646 sg++;
5647 resid += ahc_le32toh(sg->len) & AHC_SG_LEN_MASK;
5648 }
5649 }
5650 if ((scb->flags & SCB_SENSE) == 0)
5651 ahc_set_residual(scb, resid);
5652 else
5653 ahc_set_sense_residual(scb, resid);
5654
5655#ifdef AHC_DEBUG
5656 if (ahc_debug & AHC_SHOWMISC) {
5657 ahc_print_path(ahc, scb);
5658 printf("Handled Residual of %d bytes\n", resid);
5659 }
5660#endif
5661}
5662
5663/******************************* Target Mode **********************************/
5664#ifdef AHC_TARGET_MODE
5665/*
5666 * Add a target mode event to this lun's queue
5667 */
5668static void
5560
5561 lstate = tstate->enabled_luns[lun];
5562 if (lstate == NULL)
5563 continue;
5564
5565 ahc_queue_lstate_event(ahc, lstate, CAM_TARGET_WILDCARD,
5566 EVENT_TYPE_BUS_RESET, /*arg*/0);
5567 ahc_send_lstate_events(ahc, lstate);
5568 }
5569 }
5570#endif
5571 /* Notify the XPT that a bus reset occurred */
5572 ahc_send_async(ahc, devinfo.channel, CAM_TARGET_WILDCARD,
5573 CAM_LUN_WILDCARD, AC_BUS_RESET);
5574
5575 /*
5576 * Revert to async/narrow transfers until we renegotiate.
5577 */
5578 for (target = 0; target <= max_scsiid; target++) {
5579
5580 if (ahc->enabled_targets[target] == NULL)
5581 continue;
5582 for (initiator = 0; initiator <= max_scsiid; initiator++) {
5583 struct ahc_devinfo devinfo;
5584
5585 ahc_compile_devinfo(&devinfo, target, initiator,
5586 CAM_LUN_WILDCARD,
5587 channel, ROLE_UNKNOWN);
5588 ahc_set_width(ahc, &devinfo, MSG_EXT_WDTR_BUS_8_BIT,
5589 AHC_TRANS_CUR, /*paused*/TRUE);
5590 ahc_set_syncrate(ahc, &devinfo, /*syncrate*/NULL,
5591 /*period*/0, /*offset*/0,
5592 /*ppr_options*/0, AHC_TRANS_CUR,
5593 /*paused*/TRUE);
5594 }
5595 }
5596
5597 if (restart_needed)
5598 ahc_restart(ahc);
5599 else
5600 ahc_unpause(ahc);
5601 return found;
5602}
5603
5604
5605/***************************** Residual Processing ****************************/
5606/*
5607 * Calculate the residual for a just completed SCB.
5608 */
5609static void
5610ahc_calc_residual(struct scb *scb)
5611{
5612 struct hardware_scb *hscb;
5613 struct status_pkt *spkt;
5614 uint32_t sgptr;
5615 uint32_t resid_sgptr;
5616 uint32_t resid;
5617
5618 /*
5619 * 5 cases.
5620 * 1) No residual.
5621 * SG_RESID_VALID clear in sgptr.
5622 * 2) Transferless command
5623 * 3) Never performed any transfers.
5624 * sgptr has SG_FULL_RESID set.
5625 * 4) No residual but target did not
5626 * save data pointers after the
5627 * last transfer, so sgptr was
5628 * never updated.
5629 * 5) We have a partial residual.
5630 * Use residual_sgptr to determine
5631 * where we are.
5632 */
5633
5634 hscb = scb->hscb;
5635 sgptr = ahc_le32toh(hscb->sgptr);
5636 if ((sgptr & SG_RESID_VALID) == 0)
5637 /* Case 1 */
5638 return;
5639 sgptr &= ~SG_RESID_VALID;
5640
5641 if ((sgptr & SG_LIST_NULL) != 0)
5642 /* Case 2 */
5643 return;
5644
5645 spkt = &hscb->shared_data.status;
5646 resid_sgptr = ahc_le32toh(spkt->residual_sg_ptr);
5647 if ((sgptr & SG_FULL_RESID) != 0) {
5648 /* Case 3 */
5649 resid = ahc_get_transfer_length(scb);
5650 } else if ((resid_sgptr & SG_LIST_NULL) != 0) {
5651 /* Case 4 */
5652 return;
5653 } else if ((resid_sgptr & ~SG_PTR_MASK) != 0) {
5654 panic("Bogus resid sgptr value 0x%x\n", resid_sgptr);
5655 } else {
5656 struct ahc_dma_seg *sg;
5657
5658 /*
5659 * Remainder of the SG where the transfer
5660 * stopped.
5661 */
5662 resid = ahc_le32toh(spkt->residual_datacnt) & AHC_SG_LEN_MASK;
5663 sg = ahc_sg_bus_to_virt(scb, resid_sgptr & SG_PTR_MASK);
5664
5665 /* The residual sg_ptr always points to the next sg */
5666 sg--;
5667
5668 /*
5669 * Add up the contents of all residual
5670 * SG segments that are after the SG where
5671 * the transfer stopped.
5672 */
5673 while ((ahc_le32toh(sg->len) & AHC_DMA_LAST_SEG) == 0) {
5674 sg++;
5675 resid += ahc_le32toh(sg->len) & AHC_SG_LEN_MASK;
5676 }
5677 }
5678 if ((scb->flags & SCB_SENSE) == 0)
5679 ahc_set_residual(scb, resid);
5680 else
5681 ahc_set_sense_residual(scb, resid);
5682
5683#ifdef AHC_DEBUG
5684 if (ahc_debug & AHC_SHOWMISC) {
5685 ahc_print_path(ahc, scb);
5686 printf("Handled Residual of %d bytes\n", resid);
5687 }
5688#endif
5689}
5690
5691/******************************* Target Mode **********************************/
5692#ifdef AHC_TARGET_MODE
5693/*
5694 * Add a target mode event to this lun's queue
5695 */
5696static void
5669ahc_queue_lstate_event(struct ahc_softc *ahc, struct tmode_lstate *lstate,
5697ahc_queue_lstate_event(struct ahc_softc *ahc, struct ahc_tmode_lstate *lstate,
5670 u_int initiator_id, u_int event_type, u_int event_arg)
5671{
5672 struct ahc_tmode_event *event;
5673 int pending;
5674
5675 xpt_freeze_devq(lstate->path, /*count*/1);
5676 if (lstate->event_w_idx >= lstate->event_r_idx)
5677 pending = lstate->event_w_idx - lstate->event_r_idx;
5678 else
5679 pending = AHC_TMODE_EVENT_BUFFER_SIZE + 1
5680 - (lstate->event_r_idx - lstate->event_w_idx);
5681
5682 if (event_type == EVENT_TYPE_BUS_RESET
5683 || event_type == MSG_BUS_DEV_RESET) {
5684 /*
5685 * Any earlier events are irrelevant, so reset our buffer.
5686 * This has the effect of allowing us to deal with reset
5687 * floods (an external device holding down the reset line)
5688 * without losing the event that is really interesting.
5689 */
5690 lstate->event_r_idx = 0;
5691 lstate->event_w_idx = 0;
5692 xpt_release_devq(lstate->path, pending, /*runqueue*/FALSE);
5693 }
5694
5695 if (pending == AHC_TMODE_EVENT_BUFFER_SIZE) {
5696 xpt_print_path(lstate->path);
5697 printf("immediate event %x:%x lost\n",
5698 lstate->event_buffer[lstate->event_r_idx].event_type,
5699 lstate->event_buffer[lstate->event_r_idx].event_arg);
5700 lstate->event_r_idx++;
5701 if (lstate->event_r_idx == AHC_TMODE_EVENT_BUFFER_SIZE)
5702 lstate->event_r_idx = 0;
5703 xpt_release_devq(lstate->path, /*count*/1, /*runqueue*/FALSE);
5704 }
5705
5706 event = &lstate->event_buffer[lstate->event_w_idx];
5707 event->initiator_id = initiator_id;
5708 event->event_type = event_type;
5709 event->event_arg = event_arg;
5710 lstate->event_w_idx++;
5711 if (lstate->event_w_idx == AHC_TMODE_EVENT_BUFFER_SIZE)
5712 lstate->event_w_idx = 0;
5713}
5714
5715/*
5716 * Send any target mode events queued up waiting
5717 * for immediate notify resources.
5718 */
5719void
5698 u_int initiator_id, u_int event_type, u_int event_arg)
5699{
5700 struct ahc_tmode_event *event;
5701 int pending;
5702
5703 xpt_freeze_devq(lstate->path, /*count*/1);
5704 if (lstate->event_w_idx >= lstate->event_r_idx)
5705 pending = lstate->event_w_idx - lstate->event_r_idx;
5706 else
5707 pending = AHC_TMODE_EVENT_BUFFER_SIZE + 1
5708 - (lstate->event_r_idx - lstate->event_w_idx);
5709
5710 if (event_type == EVENT_TYPE_BUS_RESET
5711 || event_type == MSG_BUS_DEV_RESET) {
5712 /*
5713 * Any earlier events are irrelevant, so reset our buffer.
5714 * This has the effect of allowing us to deal with reset
5715 * floods (an external device holding down the reset line)
5716 * without losing the event that is really interesting.
5717 */
5718 lstate->event_r_idx = 0;
5719 lstate->event_w_idx = 0;
5720 xpt_release_devq(lstate->path, pending, /*runqueue*/FALSE);
5721 }
5722
5723 if (pending == AHC_TMODE_EVENT_BUFFER_SIZE) {
5724 xpt_print_path(lstate->path);
5725 printf("immediate event %x:%x lost\n",
5726 lstate->event_buffer[lstate->event_r_idx].event_type,
5727 lstate->event_buffer[lstate->event_r_idx].event_arg);
5728 lstate->event_r_idx++;
5729 if (lstate->event_r_idx == AHC_TMODE_EVENT_BUFFER_SIZE)
5730 lstate->event_r_idx = 0;
5731 xpt_release_devq(lstate->path, /*count*/1, /*runqueue*/FALSE);
5732 }
5733
5734 event = &lstate->event_buffer[lstate->event_w_idx];
5735 event->initiator_id = initiator_id;
5736 event->event_type = event_type;
5737 event->event_arg = event_arg;
5738 lstate->event_w_idx++;
5739 if (lstate->event_w_idx == AHC_TMODE_EVENT_BUFFER_SIZE)
5740 lstate->event_w_idx = 0;
5741}
5742
5743/*
5744 * Send any target mode events queued up waiting
5745 * for immediate notify resources.
5746 */
5747void
5720ahc_send_lstate_events(struct ahc_softc *ahc, struct tmode_lstate *lstate)
5748ahc_send_lstate_events(struct ahc_softc *ahc, struct ahc_tmode_lstate *lstate)
5721{
5722 struct ccb_hdr *ccbh;
5723 struct ccb_immed_notify *inot;
5724
5725 while (lstate->event_r_idx != lstate->event_w_idx
5726 && (ccbh = SLIST_FIRST(&lstate->immed_notifies)) != NULL) {
5727 struct ahc_tmode_event *event;
5728
5729 event = &lstate->event_buffer[lstate->event_r_idx];
5730 SLIST_REMOVE_HEAD(&lstate->immed_notifies, sim_links.sle);
5731 inot = (struct ccb_immed_notify *)ccbh;
5732 switch (event->event_type) {
5733 case EVENT_TYPE_BUS_RESET:
5734 ccbh->status = CAM_SCSI_BUS_RESET|CAM_DEV_QFRZN;
5735 break;
5736 default:
5737 ccbh->status = CAM_MESSAGE_RECV|CAM_DEV_QFRZN;
5738 inot->message_args[0] = event->event_type;
5739 inot->message_args[1] = event->event_arg;
5740 break;
5741 }
5742 inot->initiator_id = event->initiator_id;
5743 inot->sense_len = 0;
5744 xpt_done((union ccb *)inot);
5745 lstate->event_r_idx++;
5746 if (lstate->event_r_idx == AHC_TMODE_EVENT_BUFFER_SIZE)
5747 lstate->event_r_idx = 0;
5748 }
5749}
5750#endif
5751
5752/******************** Sequencer Program Patching/Download *********************/
5753
5754#ifdef AHC_DUMP_SEQ
5755void
5756ahc_dumpseq(struct ahc_softc* ahc)
5757{
5758 int i;
5759 int max_prog;
5760
5761 if ((ahc->chip & AHC_BUS_MASK) < AHC_PCI)
5762 max_prog = 448;
5763 else if ((ahc->features & AHC_ULTRA2) != 0)
5764 max_prog = 768;
5765 else
5766 max_prog = 512;
5767
5768 ahc_outb(ahc, SEQCTL, PERRORDIS|FAILDIS|FASTMODE|LOADRAM);
5769 ahc_outb(ahc, SEQADDR0, 0);
5770 ahc_outb(ahc, SEQADDR1, 0);
5771 for (i = 0; i < max_prog; i++) {
5772 uint8_t ins_bytes[4];
5773
5774 ahc_insb(ahc, SEQRAM, ins_bytes, 4);
5775 printf("0x%08x\n", ins_bytes[0] << 24
5776 | ins_bytes[1] << 16
5777 | ins_bytes[2] << 8
5778 | ins_bytes[3]);
5779 }
5780}
5781#endif
5782
5783static void
5784ahc_loadseq(struct ahc_softc *ahc)
5785{
5786 struct cs cs_table[num_critical_sections];
5787 u_int begin_set[num_critical_sections];
5788 u_int end_set[num_critical_sections];
5789 struct patch *cur_patch;
5790 u_int cs_count;
5791 u_int cur_cs;
5792 u_int i;
5793 int downloaded;
5794 u_int skip_addr;
5795 u_int sg_prefetch_cnt;
5796 uint8_t download_consts[7];
5797
5798 /*
5799 * Start out with 0 critical sections
5800 * that apply to this firmware load.
5801 */
5802 cs_count = 0;
5803 cur_cs = 0;
5804 memset(begin_set, 0, sizeof(begin_set));
5805 memset(end_set, 0, sizeof(end_set));
5806
5807 /* Setup downloadable constant table */
5808 download_consts[QOUTFIFO_OFFSET] = 0;
5809 if (ahc->targetcmds != NULL)
5810 download_consts[QOUTFIFO_OFFSET] += 32;
5811 download_consts[QINFIFO_OFFSET] = download_consts[QOUTFIFO_OFFSET] + 1;
5812 download_consts[CACHESIZE_MASK] = ahc->pci_cachesize - 1;
5813 download_consts[INVERTED_CACHESIZE_MASK] = ~(ahc->pci_cachesize - 1);
5814 sg_prefetch_cnt = ahc->pci_cachesize;
5815 if (sg_prefetch_cnt < (2 * sizeof(struct ahc_dma_seg)))
5816 sg_prefetch_cnt = 2 * sizeof(struct ahc_dma_seg);
5817 download_consts[SG_PREFETCH_CNT] = sg_prefetch_cnt;
5818 download_consts[SG_PREFETCH_ALIGN_MASK] = ~(sg_prefetch_cnt - 1);
5819 download_consts[SG_PREFETCH_ADDR_MASK] = (sg_prefetch_cnt - 1);
5820
5821 cur_patch = patches;
5822 downloaded = 0;
5823 skip_addr = 0;
5824 ahc_outb(ahc, SEQCTL, PERRORDIS|FAILDIS|FASTMODE|LOADRAM);
5825 ahc_outb(ahc, SEQADDR0, 0);
5826 ahc_outb(ahc, SEQADDR1, 0);
5827
5828 for (i = 0; i < sizeof(seqprog)/4; i++) {
5829 if (ahc_check_patch(ahc, &cur_patch, i, &skip_addr) == 0) {
5830 /*
5831 * Don't download this instruction as it
5832 * is in a patch that was removed.
5833 */
5834 continue;
5835 }
5836 /*
5837 * Move through the CS table until we find a CS
5838 * that might apply to this instruction.
5839 */
5840 for (; cur_cs < num_critical_sections; cur_cs++) {
5841 if (critical_sections[cur_cs].end <= i) {
5842 if (begin_set[cs_count] == TRUE
5843 && end_set[cs_count] == FALSE) {
5844 cs_table[cs_count].end = downloaded;
5845 end_set[cs_count] = TRUE;
5846 cs_count++;
5847 }
5848 continue;
5849 }
5850 if (critical_sections[cur_cs].begin <= i
5851 && begin_set[cs_count] == FALSE) {
5852 cs_table[cs_count].begin = downloaded;
5853 begin_set[cs_count] = TRUE;
5854 }
5855 break;
5856 }
5857 ahc_download_instr(ahc, i, download_consts);
5858 downloaded++;
5859 }
5860
5861 ahc->num_critical_sections = cs_count;
5862 if (cs_count != 0) {
5863
5864 cs_count *= sizeof(struct cs);
5865 ahc->critical_sections = malloc(cs_count, M_DEVBUF, M_NOWAIT);
5866 if (ahc->critical_sections == NULL)
5867 panic("ahc_loadseq: Could not malloc");
5868 memcpy(ahc->critical_sections, cs_table, cs_count);
5869 }
5870 ahc_outb(ahc, SEQCTL, PERRORDIS|FAILDIS|FASTMODE);
5871 ahc_restart(ahc);
5872
5873 if (bootverbose)
5874 printf(" %d instructions downloaded\n", downloaded);
5875}
5876
5877static int
5878ahc_check_patch(struct ahc_softc *ahc, struct patch **start_patch,
5879 u_int start_instr, u_int *skip_addr)
5880{
5881 struct patch *cur_patch;
5882 struct patch *last_patch;
5883 u_int num_patches;
5884
5885 num_patches = sizeof(patches)/sizeof(struct patch);
5886 last_patch = &patches[num_patches];
5887 cur_patch = *start_patch;
5888
5889 while (cur_patch < last_patch && start_instr == cur_patch->begin) {
5890
5891 if (cur_patch->patch_func(ahc) == 0) {
5892
5893 /* Start rejecting code */
5894 *skip_addr = start_instr + cur_patch->skip_instr;
5895 cur_patch += cur_patch->skip_patch;
5896 } else {
5897 /* Accepted this patch. Advance to the next
5898 * one and wait for our intruction pointer to
5899 * hit this point.
5900 */
5901 cur_patch++;
5902 }
5903 }
5904
5905 *start_patch = cur_patch;
5906 if (start_instr < *skip_addr)
5907 /* Still skipping */
5908 return (0);
5909
5910 return (1);
5911}
5912
5913static void
5914ahc_download_instr(struct ahc_softc *ahc, u_int instrptr, uint8_t *dconsts)
5915{
5916 union ins_formats instr;
5917 struct ins_format1 *fmt1_ins;
5918 struct ins_format3 *fmt3_ins;
5919 u_int opcode;
5920
5921 /*
5922 * The firmware is always compiled into a little endian format.
5923 */
5924 instr.integer = ahc_le32toh(*(uint32_t*)&seqprog[instrptr * 4]);
5925
5926 fmt1_ins = &instr.format1;
5927 fmt3_ins = NULL;
5928
5929 /* Pull the opcode */
5930 opcode = instr.format1.opcode;
5931 switch (opcode) {
5932 case AIC_OP_JMP:
5933 case AIC_OP_JC:
5934 case AIC_OP_JNC:
5935 case AIC_OP_CALL:
5936 case AIC_OP_JNE:
5937 case AIC_OP_JNZ:
5938 case AIC_OP_JE:
5939 case AIC_OP_JZ:
5940 {
5941 struct patch *cur_patch;
5942 int address_offset;
5943 u_int address;
5944 u_int skip_addr;
5945 u_int i;
5946
5947 fmt3_ins = &instr.format3;
5948 address_offset = 0;
5949 address = fmt3_ins->address;
5950 cur_patch = patches;
5951 skip_addr = 0;
5952
5953 for (i = 0; i < address;) {
5954
5955 ahc_check_patch(ahc, &cur_patch, i, &skip_addr);
5956
5957 if (skip_addr > i) {
5958 int end_addr;
5959
5960 end_addr = MIN(address, skip_addr);
5961 address_offset += end_addr - i;
5962 i = skip_addr;
5963 } else {
5964 i++;
5965 }
5966 }
5967 address -= address_offset;
5968 fmt3_ins->address = address;
5969 /* FALLTHROUGH */
5970 }
5971 case AIC_OP_OR:
5972 case AIC_OP_AND:
5973 case AIC_OP_XOR:
5974 case AIC_OP_ADD:
5975 case AIC_OP_ADC:
5976 case AIC_OP_BMOV:
5977 if (fmt1_ins->parity != 0) {
5978 fmt1_ins->immediate = dconsts[fmt1_ins->immediate];
5979 }
5980 fmt1_ins->parity = 0;
5981 if ((ahc->features & AHC_CMD_CHAN) == 0
5982 && opcode == AIC_OP_BMOV) {
5983 /*
5984 * Block move was added at the same time
5985 * as the command channel. Verify that
5986 * this is only a move of a single element
5987 * and convert the BMOV to a MOV
5988 * (AND with an immediate of FF).
5989 */
5990 if (fmt1_ins->immediate != 1)
5991 panic("%s: BMOV not supported\n",
5992 ahc_name(ahc));
5993 fmt1_ins->opcode = AIC_OP_AND;
5994 fmt1_ins->immediate = 0xff;
5995 }
5996 /* FALLTHROUGH */
5997 case AIC_OP_ROL:
5998 if ((ahc->features & AHC_ULTRA2) != 0) {
5999 int i, count;
6000
6001 /* Calculate odd parity for the instruction */
6002 for (i = 0, count = 0; i < 31; i++) {
6003 uint32_t mask;
6004
6005 mask = 0x01 << i;
6006 if ((instr.integer & mask) != 0)
6007 count++;
6008 }
6009 if ((count & 0x01) == 0)
6010 instr.format1.parity = 1;
6011 } else {
6012 /* Compress the instruction for older sequencers */
6013 if (fmt3_ins != NULL) {
6014 instr.integer =
6015 fmt3_ins->immediate
6016 | (fmt3_ins->source << 8)
6017 | (fmt3_ins->address << 16)
6018 | (fmt3_ins->opcode << 25);
6019 } else {
6020 instr.integer =
6021 fmt1_ins->immediate
6022 | (fmt1_ins->source << 8)
6023 | (fmt1_ins->destination << 16)
6024 | (fmt1_ins->ret << 24)
6025 | (fmt1_ins->opcode << 25);
6026 }
6027 }
6028 /* The sequencer is a little endian cpu */
6029 instr.integer = ahc_htole32(instr.integer);
6030 ahc_outsb(ahc, SEQRAM, instr.bytes, 4);
6031 break;
6032 default:
6033 panic("Unknown opcode encountered in seq program");
6034 break;
6035 }
6036}
6037
6038void
6039ahc_dump_card_state(struct ahc_softc *ahc)
6040{
6041 struct scb *scb;
6042 struct scb_tailq *untagged_q;
6043 int target;
6044 int maxtarget;
6045 int i;
5749{
5750 struct ccb_hdr *ccbh;
5751 struct ccb_immed_notify *inot;
5752
5753 while (lstate->event_r_idx != lstate->event_w_idx
5754 && (ccbh = SLIST_FIRST(&lstate->immed_notifies)) != NULL) {
5755 struct ahc_tmode_event *event;
5756
5757 event = &lstate->event_buffer[lstate->event_r_idx];
5758 SLIST_REMOVE_HEAD(&lstate->immed_notifies, sim_links.sle);
5759 inot = (struct ccb_immed_notify *)ccbh;
5760 switch (event->event_type) {
5761 case EVENT_TYPE_BUS_RESET:
5762 ccbh->status = CAM_SCSI_BUS_RESET|CAM_DEV_QFRZN;
5763 break;
5764 default:
5765 ccbh->status = CAM_MESSAGE_RECV|CAM_DEV_QFRZN;
5766 inot->message_args[0] = event->event_type;
5767 inot->message_args[1] = event->event_arg;
5768 break;
5769 }
5770 inot->initiator_id = event->initiator_id;
5771 inot->sense_len = 0;
5772 xpt_done((union ccb *)inot);
5773 lstate->event_r_idx++;
5774 if (lstate->event_r_idx == AHC_TMODE_EVENT_BUFFER_SIZE)
5775 lstate->event_r_idx = 0;
5776 }
5777}
5778#endif
5779
5780/******************** Sequencer Program Patching/Download *********************/
5781
5782#ifdef AHC_DUMP_SEQ
5783void
5784ahc_dumpseq(struct ahc_softc* ahc)
5785{
5786 int i;
5787 int max_prog;
5788
5789 if ((ahc->chip & AHC_BUS_MASK) < AHC_PCI)
5790 max_prog = 448;
5791 else if ((ahc->features & AHC_ULTRA2) != 0)
5792 max_prog = 768;
5793 else
5794 max_prog = 512;
5795
5796 ahc_outb(ahc, SEQCTL, PERRORDIS|FAILDIS|FASTMODE|LOADRAM);
5797 ahc_outb(ahc, SEQADDR0, 0);
5798 ahc_outb(ahc, SEQADDR1, 0);
5799 for (i = 0; i < max_prog; i++) {
5800 uint8_t ins_bytes[4];
5801
5802 ahc_insb(ahc, SEQRAM, ins_bytes, 4);
5803 printf("0x%08x\n", ins_bytes[0] << 24
5804 | ins_bytes[1] << 16
5805 | ins_bytes[2] << 8
5806 | ins_bytes[3]);
5807 }
5808}
5809#endif
5810
5811static void
5812ahc_loadseq(struct ahc_softc *ahc)
5813{
5814 struct cs cs_table[num_critical_sections];
5815 u_int begin_set[num_critical_sections];
5816 u_int end_set[num_critical_sections];
5817 struct patch *cur_patch;
5818 u_int cs_count;
5819 u_int cur_cs;
5820 u_int i;
5821 int downloaded;
5822 u_int skip_addr;
5823 u_int sg_prefetch_cnt;
5824 uint8_t download_consts[7];
5825
5826 /*
5827 * Start out with 0 critical sections
5828 * that apply to this firmware load.
5829 */
5830 cs_count = 0;
5831 cur_cs = 0;
5832 memset(begin_set, 0, sizeof(begin_set));
5833 memset(end_set, 0, sizeof(end_set));
5834
5835 /* Setup downloadable constant table */
5836 download_consts[QOUTFIFO_OFFSET] = 0;
5837 if (ahc->targetcmds != NULL)
5838 download_consts[QOUTFIFO_OFFSET] += 32;
5839 download_consts[QINFIFO_OFFSET] = download_consts[QOUTFIFO_OFFSET] + 1;
5840 download_consts[CACHESIZE_MASK] = ahc->pci_cachesize - 1;
5841 download_consts[INVERTED_CACHESIZE_MASK] = ~(ahc->pci_cachesize - 1);
5842 sg_prefetch_cnt = ahc->pci_cachesize;
5843 if (sg_prefetch_cnt < (2 * sizeof(struct ahc_dma_seg)))
5844 sg_prefetch_cnt = 2 * sizeof(struct ahc_dma_seg);
5845 download_consts[SG_PREFETCH_CNT] = sg_prefetch_cnt;
5846 download_consts[SG_PREFETCH_ALIGN_MASK] = ~(sg_prefetch_cnt - 1);
5847 download_consts[SG_PREFETCH_ADDR_MASK] = (sg_prefetch_cnt - 1);
5848
5849 cur_patch = patches;
5850 downloaded = 0;
5851 skip_addr = 0;
5852 ahc_outb(ahc, SEQCTL, PERRORDIS|FAILDIS|FASTMODE|LOADRAM);
5853 ahc_outb(ahc, SEQADDR0, 0);
5854 ahc_outb(ahc, SEQADDR1, 0);
5855
5856 for (i = 0; i < sizeof(seqprog)/4; i++) {
5857 if (ahc_check_patch(ahc, &cur_patch, i, &skip_addr) == 0) {
5858 /*
5859 * Don't download this instruction as it
5860 * is in a patch that was removed.
5861 */
5862 continue;
5863 }
5864 /*
5865 * Move through the CS table until we find a CS
5866 * that might apply to this instruction.
5867 */
5868 for (; cur_cs < num_critical_sections; cur_cs++) {
5869 if (critical_sections[cur_cs].end <= i) {
5870 if (begin_set[cs_count] == TRUE
5871 && end_set[cs_count] == FALSE) {
5872 cs_table[cs_count].end = downloaded;
5873 end_set[cs_count] = TRUE;
5874 cs_count++;
5875 }
5876 continue;
5877 }
5878 if (critical_sections[cur_cs].begin <= i
5879 && begin_set[cs_count] == FALSE) {
5880 cs_table[cs_count].begin = downloaded;
5881 begin_set[cs_count] = TRUE;
5882 }
5883 break;
5884 }
5885 ahc_download_instr(ahc, i, download_consts);
5886 downloaded++;
5887 }
5888
5889 ahc->num_critical_sections = cs_count;
5890 if (cs_count != 0) {
5891
5892 cs_count *= sizeof(struct cs);
5893 ahc->critical_sections = malloc(cs_count, M_DEVBUF, M_NOWAIT);
5894 if (ahc->critical_sections == NULL)
5895 panic("ahc_loadseq: Could not malloc");
5896 memcpy(ahc->critical_sections, cs_table, cs_count);
5897 }
5898 ahc_outb(ahc, SEQCTL, PERRORDIS|FAILDIS|FASTMODE);
5899 ahc_restart(ahc);
5900
5901 if (bootverbose)
5902 printf(" %d instructions downloaded\n", downloaded);
5903}
5904
5905static int
5906ahc_check_patch(struct ahc_softc *ahc, struct patch **start_patch,
5907 u_int start_instr, u_int *skip_addr)
5908{
5909 struct patch *cur_patch;
5910 struct patch *last_patch;
5911 u_int num_patches;
5912
5913 num_patches = sizeof(patches)/sizeof(struct patch);
5914 last_patch = &patches[num_patches];
5915 cur_patch = *start_patch;
5916
5917 while (cur_patch < last_patch && start_instr == cur_patch->begin) {
5918
5919 if (cur_patch->patch_func(ahc) == 0) {
5920
5921 /* Start rejecting code */
5922 *skip_addr = start_instr + cur_patch->skip_instr;
5923 cur_patch += cur_patch->skip_patch;
5924 } else {
5925 /* Accepted this patch. Advance to the next
5926 * one and wait for our intruction pointer to
5927 * hit this point.
5928 */
5929 cur_patch++;
5930 }
5931 }
5932
5933 *start_patch = cur_patch;
5934 if (start_instr < *skip_addr)
5935 /* Still skipping */
5936 return (0);
5937
5938 return (1);
5939}
5940
5941static void
5942ahc_download_instr(struct ahc_softc *ahc, u_int instrptr, uint8_t *dconsts)
5943{
5944 union ins_formats instr;
5945 struct ins_format1 *fmt1_ins;
5946 struct ins_format3 *fmt3_ins;
5947 u_int opcode;
5948
5949 /*
5950 * The firmware is always compiled into a little endian format.
5951 */
5952 instr.integer = ahc_le32toh(*(uint32_t*)&seqprog[instrptr * 4]);
5953
5954 fmt1_ins = &instr.format1;
5955 fmt3_ins = NULL;
5956
5957 /* Pull the opcode */
5958 opcode = instr.format1.opcode;
5959 switch (opcode) {
5960 case AIC_OP_JMP:
5961 case AIC_OP_JC:
5962 case AIC_OP_JNC:
5963 case AIC_OP_CALL:
5964 case AIC_OP_JNE:
5965 case AIC_OP_JNZ:
5966 case AIC_OP_JE:
5967 case AIC_OP_JZ:
5968 {
5969 struct patch *cur_patch;
5970 int address_offset;
5971 u_int address;
5972 u_int skip_addr;
5973 u_int i;
5974
5975 fmt3_ins = &instr.format3;
5976 address_offset = 0;
5977 address = fmt3_ins->address;
5978 cur_patch = patches;
5979 skip_addr = 0;
5980
5981 for (i = 0; i < address;) {
5982
5983 ahc_check_patch(ahc, &cur_patch, i, &skip_addr);
5984
5985 if (skip_addr > i) {
5986 int end_addr;
5987
5988 end_addr = MIN(address, skip_addr);
5989 address_offset += end_addr - i;
5990 i = skip_addr;
5991 } else {
5992 i++;
5993 }
5994 }
5995 address -= address_offset;
5996 fmt3_ins->address = address;
5997 /* FALLTHROUGH */
5998 }
5999 case AIC_OP_OR:
6000 case AIC_OP_AND:
6001 case AIC_OP_XOR:
6002 case AIC_OP_ADD:
6003 case AIC_OP_ADC:
6004 case AIC_OP_BMOV:
6005 if (fmt1_ins->parity != 0) {
6006 fmt1_ins->immediate = dconsts[fmt1_ins->immediate];
6007 }
6008 fmt1_ins->parity = 0;
6009 if ((ahc->features & AHC_CMD_CHAN) == 0
6010 && opcode == AIC_OP_BMOV) {
6011 /*
6012 * Block move was added at the same time
6013 * as the command channel. Verify that
6014 * this is only a move of a single element
6015 * and convert the BMOV to a MOV
6016 * (AND with an immediate of FF).
6017 */
6018 if (fmt1_ins->immediate != 1)
6019 panic("%s: BMOV not supported\n",
6020 ahc_name(ahc));
6021 fmt1_ins->opcode = AIC_OP_AND;
6022 fmt1_ins->immediate = 0xff;
6023 }
6024 /* FALLTHROUGH */
6025 case AIC_OP_ROL:
6026 if ((ahc->features & AHC_ULTRA2) != 0) {
6027 int i, count;
6028
6029 /* Calculate odd parity for the instruction */
6030 for (i = 0, count = 0; i < 31; i++) {
6031 uint32_t mask;
6032
6033 mask = 0x01 << i;
6034 if ((instr.integer & mask) != 0)
6035 count++;
6036 }
6037 if ((count & 0x01) == 0)
6038 instr.format1.parity = 1;
6039 } else {
6040 /* Compress the instruction for older sequencers */
6041 if (fmt3_ins != NULL) {
6042 instr.integer =
6043 fmt3_ins->immediate
6044 | (fmt3_ins->source << 8)
6045 | (fmt3_ins->address << 16)
6046 | (fmt3_ins->opcode << 25);
6047 } else {
6048 instr.integer =
6049 fmt1_ins->immediate
6050 | (fmt1_ins->source << 8)
6051 | (fmt1_ins->destination << 16)
6052 | (fmt1_ins->ret << 24)
6053 | (fmt1_ins->opcode << 25);
6054 }
6055 }
6056 /* The sequencer is a little endian cpu */
6057 instr.integer = ahc_htole32(instr.integer);
6058 ahc_outsb(ahc, SEQRAM, instr.bytes, 4);
6059 break;
6060 default:
6061 panic("Unknown opcode encountered in seq program");
6062 break;
6063 }
6064}
6065
6066void
6067ahc_dump_card_state(struct ahc_softc *ahc)
6068{
6069 struct scb *scb;
6070 struct scb_tailq *untagged_q;
6071 int target;
6072 int maxtarget;
6073 int i;
6074 uint8_t last_phase;
6046 uint8_t qinpos;
6047 uint8_t qintail;
6048 uint8_t qoutpos;
6049 uint8_t scb_index;
6050 uint8_t saved_scbptr;
6051
6052 saved_scbptr = ahc_inb(ahc, SCBPTR);
6053
6075 uint8_t qinpos;
6076 uint8_t qintail;
6077 uint8_t qoutpos;
6078 uint8_t scb_index;
6079 uint8_t saved_scbptr;
6080
6081 saved_scbptr = ahc_inb(ahc, SCBPTR);
6082
6054 printf("%s: Dumping Card State at SEQADDR 0x%x\n",
6055 ahc_name(ahc),
6083 last_phase = ahc_inb(ahc, LASTPHASE);
6084 printf("%s: Dumping Card State %s, at SEQADDR 0x%x\n",
6085 ahc_name(ahc), ahc_lookup_phase_entry(last_phase)->phasemsg,
6056 ahc_inb(ahc, SEQADDR0) | (ahc_inb(ahc, SEQADDR1) << 8));
6086 ahc_inb(ahc, SEQADDR0) | (ahc_inb(ahc, SEQADDR1) << 8));
6057
6058 printf("SCSISEQ = 0x%x, SBLKCTL = 0x%x, SSTAT0 0x%x\n",
6059 ahc_inb(ahc, SCSISEQ), ahc_inb(ahc, SBLKCTL),
6060 ahc_inb(ahc, SSTAT0));
6087 printf("SCSISEQ = 0x%x, SBLKCTL = 0x%x\n",
6088 ahc_inb(ahc, SCSISEQ), ahc_inb(ahc, SBLKCTL));
6089 printf(" DFCNTRL = 0x%x, DFSTATUS = 0x%x\n",
6090 ahc_inb(ahc, DFCNTRL), ahc_inb(ahc, DFSTATUS));
6091 printf("LASTPHASE = 0x%x, SCSISIGI = 0x%x, SXFRCTL0 = 0x%x\n",
6092 last_phase, ahc_inb(ahc, SCSISIGI), ahc_inb(ahc, SXFRCTL0));
6093 printf("SSTAT0 = 0x%x, SSTAT1 = 0x%x\n",
6094 ahc_inb(ahc, SSTAT0), ahc_inb(ahc, SSTAT1));
6095 if ((ahc->features & AHC_DT) != 0)
6096 printf("SCSIPHASE = 0x%x\n", ahc_inb(ahc, SCSIPHASE));
6097 printf("STACK == 0x%x, 0x%x, 0x%x, 0x%x\n",
6098 ahc_inb(ahc, STACK) | (ahc_inb(ahc, STACK) << 8),
6099 ahc_inb(ahc, STACK) | (ahc_inb(ahc, STACK) << 8),
6100 ahc_inb(ahc, STACK) | (ahc_inb(ahc, STACK) << 8),
6101 ahc_inb(ahc, STACK) | (ahc_inb(ahc, STACK) << 8));
6061 printf("SCB count = %d\n", ahc->scb_data->numscbs);
6062 printf("Kernel NEXTQSCB = %d\n", ahc->next_queued_scb->hscb->tag);
6063 printf("Card NEXTQSCB = %d\n", ahc_inb(ahc, NEXT_QUEUED_SCB));
6064 /* QINFIFO */
6065 printf("QINFIFO entries: ");
6066 if ((ahc->features & AHC_QUEUE_REGS) != 0) {
6067 qinpos = ahc_inb(ahc, SNSCB_QOFF);
6068 ahc_outb(ahc, SNSCB_QOFF, qinpos);
6069 } else
6070 qinpos = ahc_inb(ahc, QINPOS);
6071 qintail = ahc->qinfifonext;
6072 while (qinpos != qintail) {
6073 printf("%d ", ahc->qinfifo[qinpos]);
6074 qinpos++;
6075 }
6076 printf("\n");
6077
6078 printf("Waiting Queue entries: ");
6079 scb_index = ahc_inb(ahc, WAITING_SCBH);
6080 i = 0;
6081 while (scb_index != SCB_LIST_NULL && i++ < 256) {
6082 ahc_outb(ahc, SCBPTR, scb_index);
6083 printf("%d:%d ", scb_index, ahc_inb(ahc, SCB_TAG));
6084 scb_index = ahc_inb(ahc, SCB_NEXT);
6085 }
6086 printf("\n");
6087
6088 printf("Disconnected Queue entries: ");
6089 scb_index = ahc_inb(ahc, DISCONNECTED_SCBH);
6090 i = 0;
6091 while (scb_index != SCB_LIST_NULL && i++ < 256) {
6092 ahc_outb(ahc, SCBPTR, scb_index);
6093 printf("%d:%d ", scb_index, ahc_inb(ahc, SCB_TAG));
6094 scb_index = ahc_inb(ahc, SCB_NEXT);
6095 }
6096 printf("\n");
6097
6098 printf("QOUTFIFO entries: ");
6099 qoutpos = ahc->qoutfifonext;
6100 i = 0;
6101 while (ahc->qoutfifo[qoutpos] != SCB_LIST_NULL && i++ < 256) {
6102 printf("%d ", ahc->qoutfifo[qoutpos]);
6103 qoutpos++;
6104 }
6105 printf("\n");
6106
6107 printf("Sequencer Free SCB List: ");
6108 scb_index = ahc_inb(ahc, FREE_SCBH);
6109 i = 0;
6110 while (scb_index != SCB_LIST_NULL && i++ < 256) {
6111 ahc_outb(ahc, SCBPTR, scb_index);
6112 printf("%d ", scb_index);
6113 scb_index = ahc_inb(ahc, SCB_NEXT);
6114 }
6115 printf("\n");
6116
6117 printf("Pending list: ");
6118 i = 0;
6119 LIST_FOREACH(scb, &ahc->pending_scbs, pending_links) {
6120 if (i++ > 256)
6121 break;
6122 printf("%d ", scb->hscb->tag);
6123 }
6124 printf("\n");
6125
6126 printf("Kernel Free SCB list: ");
6127 i = 0;
6128 SLIST_FOREACH(scb, &ahc->scb_data->free_scbs, links.sle) {
6129 if (i++ > 256)
6130 break;
6131 printf("%d ", scb->hscb->tag);
6132 }
6133 printf("\n");
6134
6135 maxtarget = (ahc->features & (AHC_WIDE|AHC_TWIN)) ? 15 : 7;
6136 for (target = 0; target <= maxtarget; target++) {
6137 untagged_q = &ahc->untagged_queues[target];
6138 if (TAILQ_FIRST(untagged_q) == NULL)
6139 continue;
6140 printf("Untagged Q(%d): ", target);
6141 i = 0;
6142 TAILQ_FOREACH(scb, untagged_q, links.tqe) {
6143 if (i++ > 256)
6144 break;
6145 printf("%d ", scb->hscb->tag);
6146 }
6147 printf("\n");
6148 }
6149
6150 ahc_platform_dump_card_state(ahc);
6151 ahc_outb(ahc, SCBPTR, saved_scbptr);
6152}
6153
6154/************************* Target Mode ****************************************/
6155#ifdef AHC_TARGET_MODE
6156cam_status
6157ahc_find_tmode_devs(struct ahc_softc *ahc, struct cam_sim *sim, union ccb *ccb,
6102 printf("SCB count = %d\n", ahc->scb_data->numscbs);
6103 printf("Kernel NEXTQSCB = %d\n", ahc->next_queued_scb->hscb->tag);
6104 printf("Card NEXTQSCB = %d\n", ahc_inb(ahc, NEXT_QUEUED_SCB));
6105 /* QINFIFO */
6106 printf("QINFIFO entries: ");
6107 if ((ahc->features & AHC_QUEUE_REGS) != 0) {
6108 qinpos = ahc_inb(ahc, SNSCB_QOFF);
6109 ahc_outb(ahc, SNSCB_QOFF, qinpos);
6110 } else
6111 qinpos = ahc_inb(ahc, QINPOS);
6112 qintail = ahc->qinfifonext;
6113 while (qinpos != qintail) {
6114 printf("%d ", ahc->qinfifo[qinpos]);
6115 qinpos++;
6116 }
6117 printf("\n");
6118
6119 printf("Waiting Queue entries: ");
6120 scb_index = ahc_inb(ahc, WAITING_SCBH);
6121 i = 0;
6122 while (scb_index != SCB_LIST_NULL && i++ < 256) {
6123 ahc_outb(ahc, SCBPTR, scb_index);
6124 printf("%d:%d ", scb_index, ahc_inb(ahc, SCB_TAG));
6125 scb_index = ahc_inb(ahc, SCB_NEXT);
6126 }
6127 printf("\n");
6128
6129 printf("Disconnected Queue entries: ");
6130 scb_index = ahc_inb(ahc, DISCONNECTED_SCBH);
6131 i = 0;
6132 while (scb_index != SCB_LIST_NULL && i++ < 256) {
6133 ahc_outb(ahc, SCBPTR, scb_index);
6134 printf("%d:%d ", scb_index, ahc_inb(ahc, SCB_TAG));
6135 scb_index = ahc_inb(ahc, SCB_NEXT);
6136 }
6137 printf("\n");
6138
6139 printf("QOUTFIFO entries: ");
6140 qoutpos = ahc->qoutfifonext;
6141 i = 0;
6142 while (ahc->qoutfifo[qoutpos] != SCB_LIST_NULL && i++ < 256) {
6143 printf("%d ", ahc->qoutfifo[qoutpos]);
6144 qoutpos++;
6145 }
6146 printf("\n");
6147
6148 printf("Sequencer Free SCB List: ");
6149 scb_index = ahc_inb(ahc, FREE_SCBH);
6150 i = 0;
6151 while (scb_index != SCB_LIST_NULL && i++ < 256) {
6152 ahc_outb(ahc, SCBPTR, scb_index);
6153 printf("%d ", scb_index);
6154 scb_index = ahc_inb(ahc, SCB_NEXT);
6155 }
6156 printf("\n");
6157
6158 printf("Pending list: ");
6159 i = 0;
6160 LIST_FOREACH(scb, &ahc->pending_scbs, pending_links) {
6161 if (i++ > 256)
6162 break;
6163 printf("%d ", scb->hscb->tag);
6164 }
6165 printf("\n");
6166
6167 printf("Kernel Free SCB list: ");
6168 i = 0;
6169 SLIST_FOREACH(scb, &ahc->scb_data->free_scbs, links.sle) {
6170 if (i++ > 256)
6171 break;
6172 printf("%d ", scb->hscb->tag);
6173 }
6174 printf("\n");
6175
6176 maxtarget = (ahc->features & (AHC_WIDE|AHC_TWIN)) ? 15 : 7;
6177 for (target = 0; target <= maxtarget; target++) {
6178 untagged_q = &ahc->untagged_queues[target];
6179 if (TAILQ_FIRST(untagged_q) == NULL)
6180 continue;
6181 printf("Untagged Q(%d): ", target);
6182 i = 0;
6183 TAILQ_FOREACH(scb, untagged_q, links.tqe) {
6184 if (i++ > 256)
6185 break;
6186 printf("%d ", scb->hscb->tag);
6187 }
6188 printf("\n");
6189 }
6190
6191 ahc_platform_dump_card_state(ahc);
6192 ahc_outb(ahc, SCBPTR, saved_scbptr);
6193}
6194
6195/************************* Target Mode ****************************************/
6196#ifdef AHC_TARGET_MODE
6197cam_status
6198ahc_find_tmode_devs(struct ahc_softc *ahc, struct cam_sim *sim, union ccb *ccb,
6158 struct tmode_tstate **tstate, struct tmode_lstate **lstate,
6199 struct ahc_tmode_tstate **tstate,
6200 struct ahc_tmode_lstate **lstate,
6159 int notfound_failure)
6160{
6161
6162 if ((ahc->features & AHC_TARGETMODE) == 0)
6163 return (CAM_REQ_INVALID);
6164
6165 /*
6166 * Handle the 'black hole' device that sucks up
6167 * requests to unattached luns on enabled targets.
6168 */
6169 if (ccb->ccb_h.target_id == CAM_TARGET_WILDCARD
6170 && ccb->ccb_h.target_lun == CAM_LUN_WILDCARD) {
6171 *tstate = NULL;
6172 *lstate = ahc->black_hole;
6173 } else {
6174 u_int max_id;
6175
6176 max_id = (ahc->features & AHC_WIDE) ? 15 : 7;
6177 if (ccb->ccb_h.target_id > max_id)
6178 return (CAM_TID_INVALID);
6179
6180 if (ccb->ccb_h.target_lun >= AHC_NUM_LUNS)
6181 return (CAM_LUN_INVALID);
6182
6183 *tstate = ahc->enabled_targets[ccb->ccb_h.target_id];
6184 *lstate = NULL;
6185 if (*tstate != NULL)
6186 *lstate =
6187 (*tstate)->enabled_luns[ccb->ccb_h.target_lun];
6188 }
6189
6190 if (notfound_failure != 0 && *lstate == NULL)
6191 return (CAM_PATH_INVALID);
6192
6193 return (CAM_REQ_CMP);
6194}
6195
6196void
6197ahc_handle_en_lun(struct ahc_softc *ahc, struct cam_sim *sim, union ccb *ccb)
6198{
6201 int notfound_failure)
6202{
6203
6204 if ((ahc->features & AHC_TARGETMODE) == 0)
6205 return (CAM_REQ_INVALID);
6206
6207 /*
6208 * Handle the 'black hole' device that sucks up
6209 * requests to unattached luns on enabled targets.
6210 */
6211 if (ccb->ccb_h.target_id == CAM_TARGET_WILDCARD
6212 && ccb->ccb_h.target_lun == CAM_LUN_WILDCARD) {
6213 *tstate = NULL;
6214 *lstate = ahc->black_hole;
6215 } else {
6216 u_int max_id;
6217
6218 max_id = (ahc->features & AHC_WIDE) ? 15 : 7;
6219 if (ccb->ccb_h.target_id > max_id)
6220 return (CAM_TID_INVALID);
6221
6222 if (ccb->ccb_h.target_lun >= AHC_NUM_LUNS)
6223 return (CAM_LUN_INVALID);
6224
6225 *tstate = ahc->enabled_targets[ccb->ccb_h.target_id];
6226 *lstate = NULL;
6227 if (*tstate != NULL)
6228 *lstate =
6229 (*tstate)->enabled_luns[ccb->ccb_h.target_lun];
6230 }
6231
6232 if (notfound_failure != 0 && *lstate == NULL)
6233 return (CAM_PATH_INVALID);
6234
6235 return (CAM_REQ_CMP);
6236}
6237
6238void
6239ahc_handle_en_lun(struct ahc_softc *ahc, struct cam_sim *sim, union ccb *ccb)
6240{
6199 struct tmode_tstate *tstate;
6200 struct tmode_lstate *lstate;
6241 struct ahc_tmode_tstate *tstate;
6242 struct ahc_tmode_lstate *lstate;
6201 struct ccb_en_lun *cel;
6202 cam_status status;
6203 u_int target;
6204 u_int lun;
6205 u_int target_mask;
6206 u_long s;
6207 char channel;
6208
6209 status = ahc_find_tmode_devs(ahc, sim, ccb, &tstate, &lstate,
6210 /*notfound_failure*/FALSE);
6211
6212 if (status != CAM_REQ_CMP) {
6213 ccb->ccb_h.status = status;
6214 return;
6215 }
6216
6217 if ((ahc->features & AHC_MULTIROLE) != 0) {
6218 u_int our_id;
6219
6220 if (cam_sim_bus(sim) == 0)
6221 our_id = ahc->our_id;
6222 else
6223 our_id = ahc->our_id_b;
6224
6225 if (ccb->ccb_h.target_id != our_id) {
6226 if ((ahc->features & AHC_MULTI_TID) != 0
6227 && (ahc->flags & AHC_INITIATORROLE) != 0) {
6228 /*
6229 * Only allow additional targets if
6230 * the initiator role is disabled.
6231 * The hardware cannot handle a re-select-in
6232 * on the initiator id during a re-select-out
6233 * on a different target id.
6234 */
6235 status = CAM_TID_INVALID;
6236 } else if ((ahc->flags & AHC_INITIATORROLE) != 0
6237 || ahc->enabled_luns > 0) {
6238 /*
6239 * Only allow our target id to change
6240 * if the initiator role is not configured
6241 * and there are no enabled luns which
6242 * are attached to the currently registered
6243 * scsi id.
6244 */
6245 status = CAM_TID_INVALID;
6246 }
6247 }
6248 }
6249
6250 if (status != CAM_REQ_CMP) {
6251 ccb->ccb_h.status = status;
6252 return;
6253 }
6254
6255 /*
6256 * We now have an id that is valid.
6257 * If we aren't in target mode, switch modes.
6258 */
6259 if ((ahc->flags & AHC_TARGETROLE) == 0
6260 && ccb->ccb_h.target_id != CAM_TARGET_WILDCARD) {
6261 u_long s;
6262
6263 printf("Configuring Target Mode\n");
6264 ahc_lock(ahc, &s);
6265 if (LIST_FIRST(&ahc->pending_scbs) != NULL) {
6266 ccb->ccb_h.status = CAM_BUSY;
6267 ahc_unlock(ahc, &s);
6268 return;
6269 }
6270 ahc->flags |= AHC_TARGETROLE;
6271 if ((ahc->features & AHC_MULTIROLE) == 0)
6272 ahc->flags &= ~AHC_INITIATORROLE;
6273 ahc_pause(ahc);
6274 ahc_loadseq(ahc);
6275 ahc_unlock(ahc, &s);
6276 }
6277 cel = &ccb->cel;
6278 target = ccb->ccb_h.target_id;
6279 lun = ccb->ccb_h.target_lun;
6280 channel = SIM_CHANNEL(ahc, sim);
6281 target_mask = 0x01 << target;
6282 if (channel == 'B')
6283 target_mask <<= 8;
6284
6285 if (cel->enable != 0) {
6286 u_int scsiseq;
6287
6288 /* Are we already enabled?? */
6289 if (lstate != NULL) {
6290 xpt_print_path(ccb->ccb_h.path);
6291 printf("Lun already enabled\n");
6292 ccb->ccb_h.status = CAM_LUN_ALRDY_ENA;
6293 return;
6294 }
6295
6296 if (cel->grp6_len != 0
6297 || cel->grp7_len != 0) {
6298 /*
6299 * Don't (yet?) support vendor
6300 * specific commands.
6301 */
6302 ccb->ccb_h.status = CAM_REQ_INVALID;
6303 printf("Non-zero Group Codes\n");
6304 return;
6305 }
6306
6307 /*
6308 * Seems to be okay.
6309 * Setup our data structures.
6310 */
6311 if (target != CAM_TARGET_WILDCARD && tstate == NULL) {
6312 tstate = ahc_alloc_tstate(ahc, target, channel);
6313 if (tstate == NULL) {
6314 xpt_print_path(ccb->ccb_h.path);
6315 printf("Couldn't allocate tstate\n");
6316 ccb->ccb_h.status = CAM_RESRC_UNAVAIL;
6317 return;
6318 }
6319 }
6320 lstate = malloc(sizeof(*lstate), M_DEVBUF, M_NOWAIT);
6321 if (lstate == NULL) {
6322 xpt_print_path(ccb->ccb_h.path);
6323 printf("Couldn't allocate lstate\n");
6324 ccb->ccb_h.status = CAM_RESRC_UNAVAIL;
6325 return;
6326 }
6327 memset(lstate, 0, sizeof(*lstate));
6328 status = xpt_create_path(&lstate->path, /*periph*/NULL,
6329 xpt_path_path_id(ccb->ccb_h.path),
6330 xpt_path_target_id(ccb->ccb_h.path),
6331 xpt_path_lun_id(ccb->ccb_h.path));
6332 if (status != CAM_REQ_CMP) {
6333 free(lstate, M_DEVBUF);
6334 xpt_print_path(ccb->ccb_h.path);
6335 printf("Couldn't allocate path\n");
6336 ccb->ccb_h.status = CAM_RESRC_UNAVAIL;
6337 return;
6338 }
6339 SLIST_INIT(&lstate->accept_tios);
6340 SLIST_INIT(&lstate->immed_notifies);
6341 ahc_lock(ahc, &s);
6342 ahc_pause(ahc);
6343 if (target != CAM_TARGET_WILDCARD) {
6344 tstate->enabled_luns[lun] = lstate;
6345 ahc->enabled_luns++;
6346
6347 if ((ahc->features & AHC_MULTI_TID) != 0) {
6348 u_int targid_mask;
6349
6350 targid_mask = ahc_inb(ahc, TARGID)
6351 | (ahc_inb(ahc, TARGID + 1) << 8);
6352
6353 targid_mask |= target_mask;
6354 ahc_outb(ahc, TARGID, targid_mask);
6355 ahc_outb(ahc, TARGID+1, (targid_mask >> 8));
6356
6357 ahc_update_scsiid(ahc, targid_mask);
6358 } else {
6359 u_int our_id;
6360 char channel;
6361
6362 channel = SIM_CHANNEL(ahc, sim);
6363 our_id = SIM_SCSI_ID(ahc, sim);
6364
6365 /*
6366 * This can only happen if selections
6367 * are not enabled
6368 */
6369 if (target != our_id) {
6370 u_int sblkctl;
6371 char cur_channel;
6372 int swap;
6373
6374 sblkctl = ahc_inb(ahc, SBLKCTL);
6375 cur_channel = (sblkctl & SELBUSB)
6376 ? 'B' : 'A';
6377 if ((ahc->features & AHC_TWIN) == 0)
6378 cur_channel = 'A';
6379 swap = cur_channel != channel;
6380 if (channel == 'A')
6381 ahc->our_id = target;
6382 else
6383 ahc->our_id_b = target;
6384
6385 if (swap)
6386 ahc_outb(ahc, SBLKCTL,
6387 sblkctl ^ SELBUSB);
6388
6389 ahc_outb(ahc, SCSIID, target);
6390
6391 if (swap)
6392 ahc_outb(ahc, SBLKCTL, sblkctl);
6393 }
6394 }
6395 } else
6396 ahc->black_hole = lstate;
6397 /* Allow select-in operations */
6398 if (ahc->black_hole != NULL && ahc->enabled_luns > 0) {
6399 scsiseq = ahc_inb(ahc, SCSISEQ_TEMPLATE);
6400 scsiseq |= ENSELI;
6401 ahc_outb(ahc, SCSISEQ_TEMPLATE, scsiseq);
6402 scsiseq = ahc_inb(ahc, SCSISEQ);
6403 scsiseq |= ENSELI;
6404 ahc_outb(ahc, SCSISEQ, scsiseq);
6405 }
6406 ahc_unpause(ahc);
6407 ahc_unlock(ahc, &s);
6408 ccb->ccb_h.status = CAM_REQ_CMP;
6409 xpt_print_path(ccb->ccb_h.path);
6410 printf("Lun now enabled for target mode\n");
6411 } else {
6412 struct scb *scb;
6413 int i, empty;
6414
6415 if (lstate == NULL) {
6416 ccb->ccb_h.status = CAM_LUN_INVALID;
6417 return;
6418 }
6419
6420 ahc_lock(ahc, &s);
6421
6422 ccb->ccb_h.status = CAM_REQ_CMP;
6423 LIST_FOREACH(scb, &ahc->pending_scbs, pending_links) {
6424 struct ccb_hdr *ccbh;
6425
6426 ccbh = &scb->io_ctx->ccb_h;
6427 if (ccbh->func_code == XPT_CONT_TARGET_IO
6428 && !xpt_path_comp(ccbh->path, ccb->ccb_h.path)){
6429 printf("CTIO pending\n");
6430 ccb->ccb_h.status = CAM_REQ_INVALID;
6431 ahc_unlock(ahc, &s);
6432 return;
6433 }
6434 }
6435
6436 if (SLIST_FIRST(&lstate->accept_tios) != NULL) {
6437 printf("ATIOs pending\n");
6438 ccb->ccb_h.status = CAM_REQ_INVALID;
6439 }
6440
6441 if (SLIST_FIRST(&lstate->immed_notifies) != NULL) {
6442 printf("INOTs pending\n");
6443 ccb->ccb_h.status = CAM_REQ_INVALID;
6444 }
6445
6446 if (ccb->ccb_h.status != CAM_REQ_CMP) {
6447 ahc_unlock(ahc, &s);
6448 return;
6449 }
6450
6451 xpt_print_path(ccb->ccb_h.path);
6452 printf("Target mode disabled\n");
6453 xpt_free_path(lstate->path);
6454 free(lstate, M_DEVBUF);
6455
6456 ahc_pause(ahc);
6457 /* Can we clean up the target too? */
6458 if (target != CAM_TARGET_WILDCARD) {
6459 tstate->enabled_luns[lun] = NULL;
6460 ahc->enabled_luns--;
6461 for (empty = 1, i = 0; i < 8; i++)
6462 if (tstate->enabled_luns[i] != NULL) {
6463 empty = 0;
6464 break;
6465 }
6466
6467 if (empty) {
6468 ahc_free_tstate(ahc, target, channel,
6469 /*force*/FALSE);
6470 if (ahc->features & AHC_MULTI_TID) {
6471 u_int targid_mask;
6472
6473 targid_mask = ahc_inb(ahc, TARGID)
6474 | (ahc_inb(ahc, TARGID + 1)
6475 << 8);
6476
6477 targid_mask &= ~target_mask;
6478 ahc_outb(ahc, TARGID, targid_mask);
6479 ahc_outb(ahc, TARGID+1,
6480 (targid_mask >> 8));
6481 ahc_update_scsiid(ahc, targid_mask);
6482 }
6483 }
6484 } else {
6485
6486 ahc->black_hole = NULL;
6487
6488 /*
6489 * We can't allow selections without
6490 * our black hole device.
6491 */
6492 empty = TRUE;
6493 }
6494 if (ahc->enabled_luns == 0) {
6495 /* Disallow select-in */
6496 u_int scsiseq;
6497
6498 scsiseq = ahc_inb(ahc, SCSISEQ_TEMPLATE);
6499 scsiseq &= ~ENSELI;
6500 ahc_outb(ahc, SCSISEQ_TEMPLATE, scsiseq);
6501 scsiseq = ahc_inb(ahc, SCSISEQ);
6502 scsiseq &= ~ENSELI;
6503 ahc_outb(ahc, SCSISEQ, scsiseq);
6504
6505 if ((ahc->features & AHC_MULTIROLE) == 0) {
6506 printf("Configuring Initiator Mode\n");
6507 ahc->flags &= ~AHC_TARGETROLE;
6508 ahc->flags |= AHC_INITIATORROLE;
6509 ahc_pause(ahc);
6510 ahc_loadseq(ahc);
6511 }
6512 }
6513 ahc_unpause(ahc);
6514 ahc_unlock(ahc, &s);
6515 }
6516}
6517
6518static void
6519ahc_update_scsiid(struct ahc_softc *ahc, u_int targid_mask)
6520{
6521 u_int scsiid_mask;
6522 u_int scsiid;
6523
6524 if ((ahc->features & AHC_MULTI_TID) == 0)
6525 panic("ahc_update_scsiid called on non-multitid unit\n");
6526
6527 /*
6528 * Since we will rely on the the TARGID mask
6529 * for selection enables, ensure that OID
6530 * in SCSIID is not set to some other ID
6531 * that we don't want to allow selections on.
6532 */
6533 if ((ahc->features & AHC_ULTRA2) != 0)
6534 scsiid = ahc_inb(ahc, SCSIID_ULTRA2);
6535 else
6536 scsiid = ahc_inb(ahc, SCSIID);
6537 scsiid_mask = 0x1 << (scsiid & OID);
6538 if ((targid_mask & scsiid_mask) == 0) {
6539 u_int our_id;
6540
6541 /* ffs counts from 1 */
6542 our_id = ffs(targid_mask);
6543 if (our_id == 0)
6544 our_id = ahc->our_id;
6545 else
6546 our_id--;
6547 scsiid &= TID;
6548 scsiid |= our_id;
6549 }
6550 if ((ahc->features & AHC_ULTRA2) != 0)
6551 ahc_outb(ahc, SCSIID_ULTRA2, scsiid);
6552 else
6553 ahc_outb(ahc, SCSIID, scsiid);
6554}
6555
6556void
6557ahc_run_tqinfifo(struct ahc_softc *ahc, int paused)
6558{
6559 struct target_cmd *cmd;
6560
6561 /*
6562 * If the card supports auto-access pause,
6563 * we can access the card directly regardless
6564 * of whether it is paused or not.
6565 */
6566 if ((ahc->features & AHC_AUTOPAUSE) != 0)
6567 paused = TRUE;
6568
6569 while ((cmd = &ahc->targetcmds[ahc->tqinfifonext])->cmd_valid != 0) {
6570
6571 /*
6572 * Only advance through the queue if we
6573 * have the resources to process the command.
6574 */
6575 if (ahc_handle_target_cmd(ahc, cmd) != 0)
6576 break;
6577
6578 ahc->tqinfifonext++;
6579 cmd->cmd_valid = 0;
6580
6581 /*
6582 * Lazily update our position in the target mode incomming
6583 * command queue as seen by the sequencer.
6584 */
6585 if ((ahc->tqinfifonext & (HOST_TQINPOS - 1)) == 1) {
6586 if ((ahc->features & AHC_HS_MAILBOX) != 0) {
6587 u_int hs_mailbox;
6588
6589 hs_mailbox = ahc_inb(ahc, HS_MAILBOX);
6590 hs_mailbox &= ~HOST_TQINPOS;
6591 hs_mailbox |= ahc->tqinfifonext & HOST_TQINPOS;
6592 ahc_outb(ahc, HS_MAILBOX, hs_mailbox);
6593 } else {
6594 if (!paused)
6595 ahc_pause(ahc);
6596 ahc_outb(ahc, KERNEL_TQINPOS,
6597 ahc->tqinfifonext & HOST_TQINPOS);
6598 if (!paused)
6599 ahc_unpause(ahc);
6600 }
6601 }
6602 }
6603}
6604
6605static int
6606ahc_handle_target_cmd(struct ahc_softc *ahc, struct target_cmd *cmd)
6607{
6243 struct ccb_en_lun *cel;
6244 cam_status status;
6245 u_int target;
6246 u_int lun;
6247 u_int target_mask;
6248 u_long s;
6249 char channel;
6250
6251 status = ahc_find_tmode_devs(ahc, sim, ccb, &tstate, &lstate,
6252 /*notfound_failure*/FALSE);
6253
6254 if (status != CAM_REQ_CMP) {
6255 ccb->ccb_h.status = status;
6256 return;
6257 }
6258
6259 if ((ahc->features & AHC_MULTIROLE) != 0) {
6260 u_int our_id;
6261
6262 if (cam_sim_bus(sim) == 0)
6263 our_id = ahc->our_id;
6264 else
6265 our_id = ahc->our_id_b;
6266
6267 if (ccb->ccb_h.target_id != our_id) {
6268 if ((ahc->features & AHC_MULTI_TID) != 0
6269 && (ahc->flags & AHC_INITIATORROLE) != 0) {
6270 /*
6271 * Only allow additional targets if
6272 * the initiator role is disabled.
6273 * The hardware cannot handle a re-select-in
6274 * on the initiator id during a re-select-out
6275 * on a different target id.
6276 */
6277 status = CAM_TID_INVALID;
6278 } else if ((ahc->flags & AHC_INITIATORROLE) != 0
6279 || ahc->enabled_luns > 0) {
6280 /*
6281 * Only allow our target id to change
6282 * if the initiator role is not configured
6283 * and there are no enabled luns which
6284 * are attached to the currently registered
6285 * scsi id.
6286 */
6287 status = CAM_TID_INVALID;
6288 }
6289 }
6290 }
6291
6292 if (status != CAM_REQ_CMP) {
6293 ccb->ccb_h.status = status;
6294 return;
6295 }
6296
6297 /*
6298 * We now have an id that is valid.
6299 * If we aren't in target mode, switch modes.
6300 */
6301 if ((ahc->flags & AHC_TARGETROLE) == 0
6302 && ccb->ccb_h.target_id != CAM_TARGET_WILDCARD) {
6303 u_long s;
6304
6305 printf("Configuring Target Mode\n");
6306 ahc_lock(ahc, &s);
6307 if (LIST_FIRST(&ahc->pending_scbs) != NULL) {
6308 ccb->ccb_h.status = CAM_BUSY;
6309 ahc_unlock(ahc, &s);
6310 return;
6311 }
6312 ahc->flags |= AHC_TARGETROLE;
6313 if ((ahc->features & AHC_MULTIROLE) == 0)
6314 ahc->flags &= ~AHC_INITIATORROLE;
6315 ahc_pause(ahc);
6316 ahc_loadseq(ahc);
6317 ahc_unlock(ahc, &s);
6318 }
6319 cel = &ccb->cel;
6320 target = ccb->ccb_h.target_id;
6321 lun = ccb->ccb_h.target_lun;
6322 channel = SIM_CHANNEL(ahc, sim);
6323 target_mask = 0x01 << target;
6324 if (channel == 'B')
6325 target_mask <<= 8;
6326
6327 if (cel->enable != 0) {
6328 u_int scsiseq;
6329
6330 /* Are we already enabled?? */
6331 if (lstate != NULL) {
6332 xpt_print_path(ccb->ccb_h.path);
6333 printf("Lun already enabled\n");
6334 ccb->ccb_h.status = CAM_LUN_ALRDY_ENA;
6335 return;
6336 }
6337
6338 if (cel->grp6_len != 0
6339 || cel->grp7_len != 0) {
6340 /*
6341 * Don't (yet?) support vendor
6342 * specific commands.
6343 */
6344 ccb->ccb_h.status = CAM_REQ_INVALID;
6345 printf("Non-zero Group Codes\n");
6346 return;
6347 }
6348
6349 /*
6350 * Seems to be okay.
6351 * Setup our data structures.
6352 */
6353 if (target != CAM_TARGET_WILDCARD && tstate == NULL) {
6354 tstate = ahc_alloc_tstate(ahc, target, channel);
6355 if (tstate == NULL) {
6356 xpt_print_path(ccb->ccb_h.path);
6357 printf("Couldn't allocate tstate\n");
6358 ccb->ccb_h.status = CAM_RESRC_UNAVAIL;
6359 return;
6360 }
6361 }
6362 lstate = malloc(sizeof(*lstate), M_DEVBUF, M_NOWAIT);
6363 if (lstate == NULL) {
6364 xpt_print_path(ccb->ccb_h.path);
6365 printf("Couldn't allocate lstate\n");
6366 ccb->ccb_h.status = CAM_RESRC_UNAVAIL;
6367 return;
6368 }
6369 memset(lstate, 0, sizeof(*lstate));
6370 status = xpt_create_path(&lstate->path, /*periph*/NULL,
6371 xpt_path_path_id(ccb->ccb_h.path),
6372 xpt_path_target_id(ccb->ccb_h.path),
6373 xpt_path_lun_id(ccb->ccb_h.path));
6374 if (status != CAM_REQ_CMP) {
6375 free(lstate, M_DEVBUF);
6376 xpt_print_path(ccb->ccb_h.path);
6377 printf("Couldn't allocate path\n");
6378 ccb->ccb_h.status = CAM_RESRC_UNAVAIL;
6379 return;
6380 }
6381 SLIST_INIT(&lstate->accept_tios);
6382 SLIST_INIT(&lstate->immed_notifies);
6383 ahc_lock(ahc, &s);
6384 ahc_pause(ahc);
6385 if (target != CAM_TARGET_WILDCARD) {
6386 tstate->enabled_luns[lun] = lstate;
6387 ahc->enabled_luns++;
6388
6389 if ((ahc->features & AHC_MULTI_TID) != 0) {
6390 u_int targid_mask;
6391
6392 targid_mask = ahc_inb(ahc, TARGID)
6393 | (ahc_inb(ahc, TARGID + 1) << 8);
6394
6395 targid_mask |= target_mask;
6396 ahc_outb(ahc, TARGID, targid_mask);
6397 ahc_outb(ahc, TARGID+1, (targid_mask >> 8));
6398
6399 ahc_update_scsiid(ahc, targid_mask);
6400 } else {
6401 u_int our_id;
6402 char channel;
6403
6404 channel = SIM_CHANNEL(ahc, sim);
6405 our_id = SIM_SCSI_ID(ahc, sim);
6406
6407 /*
6408 * This can only happen if selections
6409 * are not enabled
6410 */
6411 if (target != our_id) {
6412 u_int sblkctl;
6413 char cur_channel;
6414 int swap;
6415
6416 sblkctl = ahc_inb(ahc, SBLKCTL);
6417 cur_channel = (sblkctl & SELBUSB)
6418 ? 'B' : 'A';
6419 if ((ahc->features & AHC_TWIN) == 0)
6420 cur_channel = 'A';
6421 swap = cur_channel != channel;
6422 if (channel == 'A')
6423 ahc->our_id = target;
6424 else
6425 ahc->our_id_b = target;
6426
6427 if (swap)
6428 ahc_outb(ahc, SBLKCTL,
6429 sblkctl ^ SELBUSB);
6430
6431 ahc_outb(ahc, SCSIID, target);
6432
6433 if (swap)
6434 ahc_outb(ahc, SBLKCTL, sblkctl);
6435 }
6436 }
6437 } else
6438 ahc->black_hole = lstate;
6439 /* Allow select-in operations */
6440 if (ahc->black_hole != NULL && ahc->enabled_luns > 0) {
6441 scsiseq = ahc_inb(ahc, SCSISEQ_TEMPLATE);
6442 scsiseq |= ENSELI;
6443 ahc_outb(ahc, SCSISEQ_TEMPLATE, scsiseq);
6444 scsiseq = ahc_inb(ahc, SCSISEQ);
6445 scsiseq |= ENSELI;
6446 ahc_outb(ahc, SCSISEQ, scsiseq);
6447 }
6448 ahc_unpause(ahc);
6449 ahc_unlock(ahc, &s);
6450 ccb->ccb_h.status = CAM_REQ_CMP;
6451 xpt_print_path(ccb->ccb_h.path);
6452 printf("Lun now enabled for target mode\n");
6453 } else {
6454 struct scb *scb;
6455 int i, empty;
6456
6457 if (lstate == NULL) {
6458 ccb->ccb_h.status = CAM_LUN_INVALID;
6459 return;
6460 }
6461
6462 ahc_lock(ahc, &s);
6463
6464 ccb->ccb_h.status = CAM_REQ_CMP;
6465 LIST_FOREACH(scb, &ahc->pending_scbs, pending_links) {
6466 struct ccb_hdr *ccbh;
6467
6468 ccbh = &scb->io_ctx->ccb_h;
6469 if (ccbh->func_code == XPT_CONT_TARGET_IO
6470 && !xpt_path_comp(ccbh->path, ccb->ccb_h.path)){
6471 printf("CTIO pending\n");
6472 ccb->ccb_h.status = CAM_REQ_INVALID;
6473 ahc_unlock(ahc, &s);
6474 return;
6475 }
6476 }
6477
6478 if (SLIST_FIRST(&lstate->accept_tios) != NULL) {
6479 printf("ATIOs pending\n");
6480 ccb->ccb_h.status = CAM_REQ_INVALID;
6481 }
6482
6483 if (SLIST_FIRST(&lstate->immed_notifies) != NULL) {
6484 printf("INOTs pending\n");
6485 ccb->ccb_h.status = CAM_REQ_INVALID;
6486 }
6487
6488 if (ccb->ccb_h.status != CAM_REQ_CMP) {
6489 ahc_unlock(ahc, &s);
6490 return;
6491 }
6492
6493 xpt_print_path(ccb->ccb_h.path);
6494 printf("Target mode disabled\n");
6495 xpt_free_path(lstate->path);
6496 free(lstate, M_DEVBUF);
6497
6498 ahc_pause(ahc);
6499 /* Can we clean up the target too? */
6500 if (target != CAM_TARGET_WILDCARD) {
6501 tstate->enabled_luns[lun] = NULL;
6502 ahc->enabled_luns--;
6503 for (empty = 1, i = 0; i < 8; i++)
6504 if (tstate->enabled_luns[i] != NULL) {
6505 empty = 0;
6506 break;
6507 }
6508
6509 if (empty) {
6510 ahc_free_tstate(ahc, target, channel,
6511 /*force*/FALSE);
6512 if (ahc->features & AHC_MULTI_TID) {
6513 u_int targid_mask;
6514
6515 targid_mask = ahc_inb(ahc, TARGID)
6516 | (ahc_inb(ahc, TARGID + 1)
6517 << 8);
6518
6519 targid_mask &= ~target_mask;
6520 ahc_outb(ahc, TARGID, targid_mask);
6521 ahc_outb(ahc, TARGID+1,
6522 (targid_mask >> 8));
6523 ahc_update_scsiid(ahc, targid_mask);
6524 }
6525 }
6526 } else {
6527
6528 ahc->black_hole = NULL;
6529
6530 /*
6531 * We can't allow selections without
6532 * our black hole device.
6533 */
6534 empty = TRUE;
6535 }
6536 if (ahc->enabled_luns == 0) {
6537 /* Disallow select-in */
6538 u_int scsiseq;
6539
6540 scsiseq = ahc_inb(ahc, SCSISEQ_TEMPLATE);
6541 scsiseq &= ~ENSELI;
6542 ahc_outb(ahc, SCSISEQ_TEMPLATE, scsiseq);
6543 scsiseq = ahc_inb(ahc, SCSISEQ);
6544 scsiseq &= ~ENSELI;
6545 ahc_outb(ahc, SCSISEQ, scsiseq);
6546
6547 if ((ahc->features & AHC_MULTIROLE) == 0) {
6548 printf("Configuring Initiator Mode\n");
6549 ahc->flags &= ~AHC_TARGETROLE;
6550 ahc->flags |= AHC_INITIATORROLE;
6551 ahc_pause(ahc);
6552 ahc_loadseq(ahc);
6553 }
6554 }
6555 ahc_unpause(ahc);
6556 ahc_unlock(ahc, &s);
6557 }
6558}
6559
6560static void
6561ahc_update_scsiid(struct ahc_softc *ahc, u_int targid_mask)
6562{
6563 u_int scsiid_mask;
6564 u_int scsiid;
6565
6566 if ((ahc->features & AHC_MULTI_TID) == 0)
6567 panic("ahc_update_scsiid called on non-multitid unit\n");
6568
6569 /*
6570 * Since we will rely on the the TARGID mask
6571 * for selection enables, ensure that OID
6572 * in SCSIID is not set to some other ID
6573 * that we don't want to allow selections on.
6574 */
6575 if ((ahc->features & AHC_ULTRA2) != 0)
6576 scsiid = ahc_inb(ahc, SCSIID_ULTRA2);
6577 else
6578 scsiid = ahc_inb(ahc, SCSIID);
6579 scsiid_mask = 0x1 << (scsiid & OID);
6580 if ((targid_mask & scsiid_mask) == 0) {
6581 u_int our_id;
6582
6583 /* ffs counts from 1 */
6584 our_id = ffs(targid_mask);
6585 if (our_id == 0)
6586 our_id = ahc->our_id;
6587 else
6588 our_id--;
6589 scsiid &= TID;
6590 scsiid |= our_id;
6591 }
6592 if ((ahc->features & AHC_ULTRA2) != 0)
6593 ahc_outb(ahc, SCSIID_ULTRA2, scsiid);
6594 else
6595 ahc_outb(ahc, SCSIID, scsiid);
6596}
6597
6598void
6599ahc_run_tqinfifo(struct ahc_softc *ahc, int paused)
6600{
6601 struct target_cmd *cmd;
6602
6603 /*
6604 * If the card supports auto-access pause,
6605 * we can access the card directly regardless
6606 * of whether it is paused or not.
6607 */
6608 if ((ahc->features & AHC_AUTOPAUSE) != 0)
6609 paused = TRUE;
6610
6611 while ((cmd = &ahc->targetcmds[ahc->tqinfifonext])->cmd_valid != 0) {
6612
6613 /*
6614 * Only advance through the queue if we
6615 * have the resources to process the command.
6616 */
6617 if (ahc_handle_target_cmd(ahc, cmd) != 0)
6618 break;
6619
6620 ahc->tqinfifonext++;
6621 cmd->cmd_valid = 0;
6622
6623 /*
6624 * Lazily update our position in the target mode incomming
6625 * command queue as seen by the sequencer.
6626 */
6627 if ((ahc->tqinfifonext & (HOST_TQINPOS - 1)) == 1) {
6628 if ((ahc->features & AHC_HS_MAILBOX) != 0) {
6629 u_int hs_mailbox;
6630
6631 hs_mailbox = ahc_inb(ahc, HS_MAILBOX);
6632 hs_mailbox &= ~HOST_TQINPOS;
6633 hs_mailbox |= ahc->tqinfifonext & HOST_TQINPOS;
6634 ahc_outb(ahc, HS_MAILBOX, hs_mailbox);
6635 } else {
6636 if (!paused)
6637 ahc_pause(ahc);
6638 ahc_outb(ahc, KERNEL_TQINPOS,
6639 ahc->tqinfifonext & HOST_TQINPOS);
6640 if (!paused)
6641 ahc_unpause(ahc);
6642 }
6643 }
6644 }
6645}
6646
6647static int
6648ahc_handle_target_cmd(struct ahc_softc *ahc, struct target_cmd *cmd)
6649{
6608 struct tmode_tstate *tstate;
6609 struct tmode_lstate *lstate;
6650 struct ahc_tmode_tstate *tstate;
6651 struct ahc_tmode_lstate *lstate;
6610 struct ccb_accept_tio *atio;
6611 uint8_t *byte;
6612 int initiator;
6613 int target;
6614 int lun;
6615
6616 initiator = SCSIID_TARGET(ahc, cmd->scsiid);
6617 target = SCSIID_OUR_ID(cmd->scsiid);
6618 lun = (cmd->identify & MSG_IDENTIFY_LUNMASK);
6619
6620 byte = cmd->bytes;
6621 tstate = ahc->enabled_targets[target];
6622 lstate = NULL;
6623 if (tstate != NULL)
6624 lstate = tstate->enabled_luns[lun];
6625
6626 /*
6627 * Commands for disabled luns go to the black hole driver.
6628 */
6629 if (lstate == NULL)
6630 lstate = ahc->black_hole;
6631
6632 atio = (struct ccb_accept_tio*)SLIST_FIRST(&lstate->accept_tios);
6633 if (atio == NULL) {
6634 ahc->flags |= AHC_TQINFIFO_BLOCKED;
6635 /*
6636 * Wait for more ATIOs from the peripheral driver for this lun.
6637 */
6638 return (1);
6639 } else
6640 ahc->flags &= ~AHC_TQINFIFO_BLOCKED;
6641#if 0
6642 printf("Incoming command from %d for %d:%d%s\n",
6643 initiator, target, lun,
6644 lstate == ahc->black_hole ? "(Black Holed)" : "");
6645#endif
6646 SLIST_REMOVE_HEAD(&lstate->accept_tios, sim_links.sle);
6647
6648 if (lstate == ahc->black_hole) {
6649 /* Fill in the wildcards */
6650 atio->ccb_h.target_id = target;
6651 atio->ccb_h.target_lun = lun;
6652 }
6653
6654 /*
6655 * Package it up and send it off to
6656 * whomever has this lun enabled.
6657 */
6658 atio->sense_len = 0;
6659 atio->init_id = initiator;
6660 if (byte[0] != 0xFF) {
6661 /* Tag was included */
6662 atio->tag_action = *byte++;
6663 atio->tag_id = *byte++;
6664 atio->ccb_h.flags = CAM_TAG_ACTION_VALID;
6665 } else {
6666 atio->ccb_h.flags = 0;
6667 }
6668 byte++;
6669
6670 /* Okay. Now determine the cdb size based on the command code */
6671 switch (*byte >> CMD_GROUP_CODE_SHIFT) {
6672 case 0:
6673 atio->cdb_len = 6;
6674 break;
6675 case 1:
6676 case 2:
6677 atio->cdb_len = 10;
6678 break;
6679 case 4:
6680 atio->cdb_len = 16;
6681 break;
6682 case 5:
6683 atio->cdb_len = 12;
6684 break;
6685 case 3:
6686 default:
6687 /* Only copy the opcode. */
6688 atio->cdb_len = 1;
6689 printf("Reserved or VU command code type encountered\n");
6690 break;
6691 }
6692
6693 memcpy(atio->cdb_io.cdb_bytes, byte, atio->cdb_len);
6694
6695 atio->ccb_h.status |= CAM_CDB_RECVD;
6696
6697 if ((cmd->identify & MSG_IDENTIFY_DISCFLAG) == 0) {
6698 /*
6699 * We weren't allowed to disconnect.
6700 * We're hanging on the bus until a
6701 * continue target I/O comes in response
6702 * to this accept tio.
6703 */
6704#if 0
6705 printf("Received Immediate Command %d:%d:%d - %p\n",
6706 initiator, target, lun, ahc->pending_device);
6707#endif
6708 ahc->pending_device = lstate;
6709 ahc_freeze_ccb((union ccb *)atio);
6710 atio->ccb_h.flags |= CAM_DIS_DISCONNECT;
6711 }
6712 xpt_done((union ccb*)atio);
6713 return (0);
6714}
6715
6716#endif
6652 struct ccb_accept_tio *atio;
6653 uint8_t *byte;
6654 int initiator;
6655 int target;
6656 int lun;
6657
6658 initiator = SCSIID_TARGET(ahc, cmd->scsiid);
6659 target = SCSIID_OUR_ID(cmd->scsiid);
6660 lun = (cmd->identify & MSG_IDENTIFY_LUNMASK);
6661
6662 byte = cmd->bytes;
6663 tstate = ahc->enabled_targets[target];
6664 lstate = NULL;
6665 if (tstate != NULL)
6666 lstate = tstate->enabled_luns[lun];
6667
6668 /*
6669 * Commands for disabled luns go to the black hole driver.
6670 */
6671 if (lstate == NULL)
6672 lstate = ahc->black_hole;
6673
6674 atio = (struct ccb_accept_tio*)SLIST_FIRST(&lstate->accept_tios);
6675 if (atio == NULL) {
6676 ahc->flags |= AHC_TQINFIFO_BLOCKED;
6677 /*
6678 * Wait for more ATIOs from the peripheral driver for this lun.
6679 */
6680 return (1);
6681 } else
6682 ahc->flags &= ~AHC_TQINFIFO_BLOCKED;
6683#if 0
6684 printf("Incoming command from %d for %d:%d%s\n",
6685 initiator, target, lun,
6686 lstate == ahc->black_hole ? "(Black Holed)" : "");
6687#endif
6688 SLIST_REMOVE_HEAD(&lstate->accept_tios, sim_links.sle);
6689
6690 if (lstate == ahc->black_hole) {
6691 /* Fill in the wildcards */
6692 atio->ccb_h.target_id = target;
6693 atio->ccb_h.target_lun = lun;
6694 }
6695
6696 /*
6697 * Package it up and send it off to
6698 * whomever has this lun enabled.
6699 */
6700 atio->sense_len = 0;
6701 atio->init_id = initiator;
6702 if (byte[0] != 0xFF) {
6703 /* Tag was included */
6704 atio->tag_action = *byte++;
6705 atio->tag_id = *byte++;
6706 atio->ccb_h.flags = CAM_TAG_ACTION_VALID;
6707 } else {
6708 atio->ccb_h.flags = 0;
6709 }
6710 byte++;
6711
6712 /* Okay. Now determine the cdb size based on the command code */
6713 switch (*byte >> CMD_GROUP_CODE_SHIFT) {
6714 case 0:
6715 atio->cdb_len = 6;
6716 break;
6717 case 1:
6718 case 2:
6719 atio->cdb_len = 10;
6720 break;
6721 case 4:
6722 atio->cdb_len = 16;
6723 break;
6724 case 5:
6725 atio->cdb_len = 12;
6726 break;
6727 case 3:
6728 default:
6729 /* Only copy the opcode. */
6730 atio->cdb_len = 1;
6731 printf("Reserved or VU command code type encountered\n");
6732 break;
6733 }
6734
6735 memcpy(atio->cdb_io.cdb_bytes, byte, atio->cdb_len);
6736
6737 atio->ccb_h.status |= CAM_CDB_RECVD;
6738
6739 if ((cmd->identify & MSG_IDENTIFY_DISCFLAG) == 0) {
6740 /*
6741 * We weren't allowed to disconnect.
6742 * We're hanging on the bus until a
6743 * continue target I/O comes in response
6744 * to this accept tio.
6745 */
6746#if 0
6747 printf("Received Immediate Command %d:%d:%d - %p\n",
6748 initiator, target, lun, ahc->pending_device);
6749#endif
6750 ahc->pending_device = lstate;
6751 ahc_freeze_ccb((union ccb *)atio);
6752 atio->ccb_h.flags |= CAM_DIS_DISCONNECT;
6753 }
6754 xpt_done((union ccb*)atio);
6755 return (0);
6756}
6757
6758#endif