Deleted Added
full compact
aic7xxx_osm.c (66800) aic7xxx_osm.c (66845)
1/*
2 * Bus independent FreeBSD shim for the aic7xxx based adaptec SCSI controllers
3 *
4 * Copyright (c) 1994, 1995, 1996, 1997, 1998, 1999, 2000 Justin T. Gibbs.
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions, and the following disclaimer,
12 * without modification.
13 * 2. The name of the author may not be used to endorse or promote products
14 * derived from this software without specific prior written permission.
15 *
16 * Alternatively, this software may be distributed under the terms of the
17 * GNU Public License ("GPL").
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
23 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 * SUCH DAMAGE.
30 *
31 * $Id$
32 *
1/*
2 * Bus independent FreeBSD shim for the aic7xxx based adaptec SCSI controllers
3 *
4 * Copyright (c) 1994, 1995, 1996, 1997, 1998, 1999, 2000 Justin T. Gibbs.
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions, and the following disclaimer,
12 * without modification.
13 * 2. The name of the author may not be used to endorse or promote products
14 * derived from this software without specific prior written permission.
15 *
16 * Alternatively, this software may be distributed under the terms of the
17 * GNU Public License ("GPL").
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
23 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 * SUCH DAMAGE.
30 *
31 * $Id$
32 *
33 * $FreeBSD: head/sys/dev/aic7xxx/aic7xxx_osm.c 66800 2000-10-08 03:37:52Z gibbs $
33 * $FreeBSD: head/sys/dev/aic7xxx/aic7xxx_osm.c 66845 2000-10-09 01:46:01Z gibbs $
34 */
35
36#include <dev/aic7xxx/aic7xxx_freebsd.h>
37#include <dev/aic7xxx/aic7xxx_inline.h>
38
39#include <sys/eventhandler.h>
40
41#ifndef AHC_TMODE_ENABLE
42#define AHC_TMODE_ENABLE 0
43#endif
44
45#define ccb_scb_ptr spriv_ptr0
46#define ccb_ahc_ptr spriv_ptr1
47
48#ifdef AHC_DEBUG
49static int ahc_debug = AHC_DEBUG;
50#endif
51
52static void ahc_freebsd_intr(void *arg);
53
54#if UNUSED
55static void ahc_dump_targcmd(struct target_cmd *cmd);
56#endif
57static void ahc_action(struct cam_sim *sim, union ccb *ccb);
58static void ahc_get_tran_settings(struct ahc_softc *ahc,
59 int our_id, char channel,
60 struct ccb_trans_settings *cts);
61static void ahc_async(void *callback_arg, uint32_t code,
62 struct cam_path *path, void *arg);
63static void ahc_execute_scb(void *arg, bus_dma_segment_t *dm_segs,
64 int nsegments, int error);
65static void ahc_poll(struct cam_sim *sim);
66static void ahc_setup_data(struct ahc_softc *ahc, struct cam_sim *sim,
67 struct ccb_scsiio *csio, struct scb *scb);
68static void ahc_abort_ccb(struct ahc_softc *ahc, struct cam_sim *sim,
69 union ccb *ccb);
70static int ahc_create_path(struct ahc_softc *ahc,
71 char channel, u_int target, u_int lun,
72 struct cam_path **path);
73
74static void ahc_set_recoveryscb(struct ahc_softc *ahc, struct scb *scb);
75
76static int
77ahc_create_path(struct ahc_softc *ahc, char channel, u_int target,
78 u_int lun, struct cam_path **path)
79{
80 path_id_t path_id;
81
82 if (channel == 'B')
83 path_id = cam_sim_path(ahc->platform_data->sim_b);
84 else
85 path_id = cam_sim_path(ahc->platform_data->sim);
86
87 return (xpt_create_path(path, /*periph*/NULL,
88 path_id, target, lun));
89}
90
91/*
92 * Attach all the sub-devices we can find
93 */
94int
95ahc_attach(struct ahc_softc *ahc)
96{
97 char ahc_info[256];
98 struct ccb_setasync csa;
99 struct cam_devq *devq;
100 int bus_id;
101 int bus_id2;
102 struct cam_sim *sim;
103 struct cam_sim *sim2;
104 struct cam_path *path;
105 struct cam_path *path2;
106 long s;
107 int count;
108 int error;
109
110 count = 0;
111 sim = NULL;
112 sim2 = NULL;
113
114 ahc_controller_info(ahc, ahc_info);
115 printf("%s\n", ahc_info);
116 ahc_lock(ahc, &s);
117 /* Hook up our interrupt handler */
118 if ((error = bus_setup_intr(ahc->dev_softc, ahc->platform_data->irq,
119 INTR_TYPE_CAM, ahc_freebsd_intr, ahc,
120 &ahc->platform_data->ih)) != 0) {
121 device_printf(ahc->dev_softc, "bus_setup_intr() failed: %d\n",
122 error);
123 goto fail;
124 }
125
126 /*
127 * Attach secondary channel first if the user has
128 * declared it the primary channel.
129 */
130 if ((ahc->flags & AHC_CHANNEL_B_PRIMARY) != 0) {
131 bus_id = 1;
132 bus_id2 = 0;
133 } else {
134 bus_id = 0;
135 bus_id2 = 1;
136 }
137
138 /*
139 * Create the device queue for our SIM(s).
140 */
141 devq = cam_simq_alloc(AHC_SCB_MAX - 1);
142 if (devq == NULL)
143 goto fail;
144
145 /*
146 * Construct our first channel SIM entry
147 */
148 sim = cam_sim_alloc(ahc_action, ahc_poll, "ahc", ahc,
149 device_get_unit(ahc->dev_softc),
150 1, AHC_SCB_MAX - 1, devq);
151 if (sim == NULL) {
152 cam_simq_free(devq);
153 goto fail;
154 }
155
156 if (xpt_bus_register(sim, bus_id) != CAM_SUCCESS) {
157 cam_sim_free(sim, /*free_devq*/TRUE);
158 sim = NULL;
159 goto fail;
160 }
161
162 if (xpt_create_path(&path, /*periph*/NULL,
163 cam_sim_path(sim), CAM_TARGET_WILDCARD,
164 CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
165 xpt_bus_deregister(cam_sim_path(sim));
166 cam_sim_free(sim, /*free_devq*/TRUE);
167 sim = NULL;
168 goto fail;
169 }
170
171 xpt_setup_ccb(&csa.ccb_h, path, /*priority*/5);
172 csa.ccb_h.func_code = XPT_SASYNC_CB;
173 csa.event_enable = AC_LOST_DEVICE;
174 csa.callback = ahc_async;
175 csa.callback_arg = sim;
176 xpt_action((union ccb *)&csa);
177 count++;
178
179 if (ahc->features & AHC_TWIN) {
180 sim2 = cam_sim_alloc(ahc_action, ahc_poll, "ahc",
181 ahc, device_get_unit(ahc->dev_softc), 1,
182 AHC_SCB_MAX - 1, devq);
183
184 if (sim2 == NULL) {
185 printf("ahc_attach: Unable to attach second "
186 "bus due to resource shortage");
187 goto fail;
188 }
189
190 if (xpt_bus_register(sim2, bus_id2) != CAM_SUCCESS) {
191 printf("ahc_attach: Unable to attach second "
192 "bus due to resource shortage");
193 /*
194 * We do not want to destroy the device queue
195 * because the first bus is using it.
196 */
197 cam_sim_free(sim2, /*free_devq*/FALSE);
198 goto fail;
199 }
200
201 if (xpt_create_path(&path2, /*periph*/NULL,
202 cam_sim_path(sim2),
203 CAM_TARGET_WILDCARD,
204 CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
205 xpt_bus_deregister(cam_sim_path(sim2));
206 cam_sim_free(sim2, /*free_devq*/FALSE);
207 sim2 = NULL;
208 goto fail;
209 }
210 xpt_setup_ccb(&csa.ccb_h, path2, /*priority*/5);
211 csa.ccb_h.func_code = XPT_SASYNC_CB;
212 csa.event_enable = AC_LOST_DEVICE;
213 csa.callback = ahc_async;
214 csa.callback_arg = sim2;
215 xpt_action((union ccb *)&csa);
216 count++;
217 }
218
219fail:
220 if ((ahc->flags & AHC_CHANNEL_B_PRIMARY) != 0) {
221 ahc->platform_data->sim_b = sim;
222 ahc->platform_data->path_b = path;
223 ahc->platform_data->sim = sim2;
224 ahc->platform_data->path = path2;
225 } else {
226 ahc->platform_data->sim = sim;
227 ahc->platform_data->path = path;
228 ahc->platform_data->sim_b = sim2;
229 ahc->platform_data->path_b = path2;
230 }
231 ahc_unlock(ahc, &s);
232
233 if (count != 0)
234 /* We have to wait until after any system dumps... */
235 EVENTHANDLER_REGISTER(shutdown_final, ahc_shutdown,
236 ahc, SHUTDOWN_PRI_DEFAULT);
237
238 return (count);
239}
240
241/*
242 * Catch an interrupt from the adapter
243 */
244void
245ahc_freebsd_intr(void *arg)
246{
247 struct ahc_softc *ahc;
248
249 ahc = (struct ahc_softc *)arg;
250 ahc_intr(ahc);
251}
252
253/*
254 * We have an scb which has been processed by the
255 * adaptor, now we look to see how the operation
256 * went.
257 */
258void
259ahc_done(struct ahc_softc *ahc, struct scb *scb)
260{
261 union ccb *ccb;
262
263 CAM_DEBUG(scb->io_ctx->ccb_h.path, CAM_DEBUG_TRACE,
264 ("ahc_done - scb %d\n", scb->hscb->tag));
265
266 ccb = scb->io_ctx;
267 LIST_REMOVE(scb, pending_links);
268 if (ccb->ccb_h.func_code == XPT_SCSI_IO
269 && ((ccb->ccb_h.flags & CAM_TAG_ACTION_VALID) == 0
270 || ccb->csio.tag_action == CAM_TAG_ACTION_NONE)
271 && (ahc->features & AHC_SCB_BTT) == 0) {
272 struct scb_tailq *untagged_q;
273
274 untagged_q = &ahc->untagged_queues[ccb->ccb_h.target_id];
275 TAILQ_REMOVE(untagged_q, scb, links.tqe);
276 ahc_run_untagged_queue(ahc, untagged_q);
277 }
278
279 untimeout(ahc_timeout, (caddr_t)scb, ccb->ccb_h.timeout_ch);
280
281 if ((ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE) {
282 bus_dmasync_op_t op;
283
284 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN)
285 op = BUS_DMASYNC_POSTREAD;
286 else
287 op = BUS_DMASYNC_POSTWRITE;
288 bus_dmamap_sync(ahc->buffer_dmat, scb->dmamap, op);
289 bus_dmamap_unload(ahc->buffer_dmat, scb->dmamap);
290 }
291
292 if (ccb->ccb_h.func_code == XPT_CONT_TARGET_IO) {
293 if (ahc_get_transaction_status(scb) == CAM_REQ_INPROG)
294 ccb->ccb_h.status |= CAM_REQ_CMP;
295 ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
296 ahc_free_scb(ahc, scb);
297 xpt_done(ccb);
298 return;
299 }
300
301 /*
302 * If the recovery SCB completes, we have to be
303 * out of our timeout.
304 */
305 if ((scb->flags & SCB_RECOVERY_SCB) != 0) {
306 struct scb *list_scb;
307
308 /*
309 * We were able to complete the command successfully,
310 * so reinstate the timeouts for all other pending
311 * commands.
312 */
313 LIST_FOREACH(list_scb, &ahc->pending_scbs, pending_links) {
314 union ccb *ccb;
315
316 ccb = list_scb->io_ctx;
317 ccb->ccb_h.timeout_ch =
318 timeout(ahc_timeout, list_scb,
319 (ccb->ccb_h.timeout * hz)/1000);
320 }
321
322 /*
323 * Ensure that we didn't put a second instance of this
324 * SCB into the QINFIFO.
325 */
326 ahc_search_qinfifo(ahc, SCB_GET_TARGET(ahc, scb),
327 SCB_GET_CHANNEL(ahc, scb),
328 SCB_GET_LUN(scb), scb->hscb->tag,
329 ROLE_INITIATOR, /*status*/0,
330 SEARCH_REMOVE);
331 if (ahc_get_transaction_status(scb) == CAM_BDR_SENT
332 || ahc_get_transaction_status(scb) == CAM_REQ_ABORTED)
333 ahc_set_transaction_status(scb, CAM_CMD_TIMEOUT);
334 ahc_print_path(ahc, scb);
335 printf("no longer in timeout, status = %x\n",
336 ccb->ccb_h.status);
337 }
338
339 /* Don't clobber any existing error state */
340 if (ahc_get_transaction_status(scb) == CAM_REQ_INPROG) {
341 ccb->ccb_h.status |= CAM_REQ_CMP;
342 } else if ((scb->flags & SCB_SENSE) != 0) {
343 /*
344 * We performed autosense retrieval.
345 *
346 * Zero any sense not transferred by the
347 * device. The SCSI spec mandates that any
348 * untransfered data should be assumed to be
349 * zero. Complete the 'bounce' of sense information
350 * through buffers accessible via bus-space by
351 * copying it into the clients csio.
352 */
353 memset(&ccb->csio.sense_data, 0, sizeof(ccb->csio.sense_data));
354 memcpy(&ccb->csio.sense_data,
355 &ahc->scb_data->sense[scb->hscb->tag],
356 (scb->sg_list->len & AHC_SG_LEN_MASK)
357 - ccb->csio.sense_resid);
358 scb->io_ctx->ccb_h.status |= CAM_AUTOSNS_VALID;
359 }
360 ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
361 ahc_free_scb(ahc, scb);
362 xpt_done(ccb);
363}
364
365static void
366ahc_action(struct cam_sim *sim, union ccb *ccb)
367{
368 struct ahc_softc *ahc;
369 struct tmode_lstate *lstate;
370 u_int target_id;
371 u_int our_id;
372 long s;
373
374 CAM_DEBUG(ccb->ccb_h.path, CAM_DEBUG_TRACE, ("ahc_action\n"));
375
376 ahc = (struct ahc_softc *)cam_sim_softc(sim);
377
378 target_id = ccb->ccb_h.target_id;
379 our_id = SIM_SCSI_ID(ahc, sim);
380
381 switch (ccb->ccb_h.func_code) {
382 /* Common cases first */
383 case XPT_ACCEPT_TARGET_IO: /* Accept Host Target Mode CDB */
384 case XPT_CONT_TARGET_IO:/* Continue Host Target I/O Connection*/
385 {
386 struct tmode_tstate *tstate;
387 cam_status status;
388
389 status = ahc_find_tmode_devs(ahc, sim, ccb, &tstate,
390 &lstate, TRUE);
391
392 if (status != CAM_REQ_CMP) {
393 if (ccb->ccb_h.func_code == XPT_CONT_TARGET_IO) {
394 /* Response from the black hole device */
395 tstate = NULL;
396 lstate = ahc->black_hole;
397 } else {
398 ccb->ccb_h.status = status;
399 xpt_done(ccb);
400 break;
401 }
402 }
403 if (ccb->ccb_h.func_code == XPT_ACCEPT_TARGET_IO) {
404
405 ahc_lock(ahc, &s);
406 SLIST_INSERT_HEAD(&lstate->accept_tios, &ccb->ccb_h,
407 sim_links.sle);
408 ccb->ccb_h.status = CAM_REQ_INPROG;
409 if ((ahc->flags & AHC_TQINFIFO_BLOCKED) != 0)
410 ahc_run_tqinfifo(ahc, /*paused*/FALSE);
411 ahc_unlock(ahc, &s);
412 break;
413 }
414
415 /*
416 * The target_id represents the target we attempt to
417 * select. In target mode, this is the initiator of
418 * the original command.
419 */
420 our_id = target_id;
421 target_id = ccb->csio.init_id;
422 /* FALLTHROUGH */
423 }
424 case XPT_SCSI_IO: /* Execute the requested I/O operation */
425 case XPT_RESET_DEV: /* Bus Device Reset the specified SCSI device */
426 {
427 struct scb *scb;
428 struct hardware_scb *hscb;
429
430 /*
431 * get an scb to use.
432 */
433 ahc_lock(ahc, &s);
434 if ((scb = ahc_get_scb(ahc)) == NULL) {
435
436 ahc->flags |= AHC_RESOURCE_SHORTAGE;
437 ahc_unlock(ahc, &s);
438 xpt_freeze_simq(sim, /*count*/1);
439 ccb->ccb_h.status = CAM_REQUEUE_REQ;
440 xpt_done(ccb);
441 return;
442 }
443 ahc_unlock(ahc, &s);
444
445 hscb = scb->hscb;
446
447 CAM_DEBUG(ccb->ccb_h.path, CAM_DEBUG_SUBTRACE,
448 ("start scb(%p)\n", scb));
449 scb->io_ctx = ccb;
450 /*
451 * So we can find the SCB when an abort is requested
452 */
453 ccb->ccb_h.ccb_scb_ptr = scb;
454 ccb->ccb_h.ccb_ahc_ptr = ahc;
455
456 /*
457 * Put all the arguments for the xfer in the scb
458 */
459 hscb->control = 0;
460 hscb->scsiid = BUILD_SCSIID(ahc, sim, target_id, our_id);
461 hscb->lun = ccb->ccb_h.target_lun;
462 if (ccb->ccb_h.func_code == XPT_RESET_DEV) {
463 hscb->cdb_len = 0;
464 scb->flags |= SCB_DEVICE_RESET;
465 hscb->control |= MK_MESSAGE;
466 ahc_execute_scb(scb, NULL, 0, 0);
467 } else {
468 if (ccb->ccb_h.func_code == XPT_CONT_TARGET_IO) {
469 struct target_data *tdata;
470
471 tdata = &hscb->shared_data.tdata;
472 if (ahc->pending_device == lstate) {
473 scb->flags |= SCB_TARGET_IMMEDIATE;
474 ahc->pending_device = NULL;
475 }
476 hscb->control |= TARGET_SCB;
477 tdata->target_phases = IDENTIFY_SEEN;
478 if ((ccb->ccb_h.flags & CAM_SEND_STATUS) != 0) {
479 tdata->target_phases |= SPHASE_PENDING;
480 tdata->scsi_status =
481 ccb->csio.scsi_status;
482 }
483 tdata->initiator_tag = ccb->csio.tag_id;
484 }
485 if (ccb->ccb_h.flags & CAM_TAG_ACTION_VALID)
486 hscb->control |= ccb->csio.tag_action;
487
488 ahc_setup_data(ahc, sim, &ccb->csio, scb);
489 }
490 break;
491 }
492 case XPT_NOTIFY_ACK:
493 case XPT_IMMED_NOTIFY:
494 {
495 struct tmode_tstate *tstate;
496 struct tmode_lstate *lstate;
497 cam_status status;
498
499 status = ahc_find_tmode_devs(ahc, sim, ccb, &tstate,
500 &lstate, TRUE);
501
502 if (status != CAM_REQ_CMP) {
503 ccb->ccb_h.status = status;
504 xpt_done(ccb);
505 break;
506 }
507 SLIST_INSERT_HEAD(&lstate->immed_notifies, &ccb->ccb_h,
508 sim_links.sle);
509 ccb->ccb_h.status = CAM_REQ_INPROG;
510 ahc_send_lstate_events(ahc, lstate);
511 break;
512 }
513 case XPT_EN_LUN: /* Enable LUN as a target */
514 ahc_handle_en_lun(ahc, sim, ccb);
515 xpt_done(ccb);
516 break;
517 case XPT_ABORT: /* Abort the specified CCB */
518 {
519 ahc_abort_ccb(ahc, sim, ccb);
520 break;
521 }
522 case XPT_SET_TRAN_SETTINGS:
523 {
524#ifdef AHC_NEW_TRAN_SETTINGS
525 struct ahc_devinfo devinfo;
526 struct ccb_trans_settings *cts;
527 struct ccb_trans_settings_scsi *scsi;
528 struct ccb_trans_settings_spi *spi;
529 struct ahc_initiator_tinfo *tinfo;
530 struct tmode_tstate *tstate;
531 uint16_t *discenable;
532 uint16_t *tagenable;
533 u_int update_type;
534
535 cts = &ccb->cts;
536 scsi = &cts->proto_specific.scsi;
537 spi = &cts->xport_specific.spi;
538 ahc_compile_devinfo(&devinfo, SIM_SCSI_ID(ahc, sim),
539 cts->ccb_h.target_id,
540 cts->ccb_h.target_lun,
541 SIM_CHANNEL(ahc, sim),
542 ROLE_UNKNOWN);
543 tinfo = ahc_fetch_transinfo(ahc, devinfo.channel,
544 devinfo.our_scsiid,
545 devinfo.target, &tstate);
546 update_type = 0;
547 if (cts->type == CTS_TYPE_CURRENT_SETTINGS) {
548 update_type |= AHC_TRANS_GOAL;
549 discenable = &tstate->discenable;
550 tagenable = &tstate->tagenable;
551 tinfo->current.protocol_version =
552 cts->protocol_version;
553 tinfo->current.transport_version =
554 cts->transport_version;
555 tinfo->goal.protocol_version =
556 cts->protocol_version;
557 tinfo->goal.transport_version =
558 cts->transport_version;
559 } else if (cts->type == CTS_TYPE_USER_SETTINGS) {
560 update_type |= AHC_TRANS_USER;
561 discenable = &ahc->user_discenable;
562 tagenable = &ahc->user_tagenable;
563 tinfo->user.protocol_version =
564 cts->protocol_version;
565 tinfo->user.transport_version =
566 cts->transport_version;
567 } else {
568 ccb->ccb_h.status = CAM_REQ_INVALID;
569 xpt_done(ccb);
570 break;
571 }
572
573 ahc_lock(ahc, &s);
574
575 if ((spi->valid & CTS_SPI_VALID_DISC) != 0) {
576 if ((spi->flags & CTS_SPI_FLAGS_DISC_ENB) != 0)
577 *discenable |= devinfo.target_mask;
578 else
579 *discenable &= ~devinfo.target_mask;
580 }
581
582 if ((scsi->valid & CTS_SCSI_VALID_TQ) != 0) {
583 if ((scsi->flags & CTS_SCSI_FLAGS_TAG_ENB) != 0)
584 *tagenable |= devinfo.target_mask;
585 else
586 *tagenable &= ~devinfo.target_mask;
587 }
588
589 if ((spi->valid & CTS_SPI_VALID_BUS_WIDTH) != 0) {
590 ahc_validate_width(ahc, &spi->bus_width);
591 ahc_set_width(ahc, &devinfo, spi->bus_width,
592 update_type, /*paused*/FALSE);
593 }
594
595 if ((spi->valid & CTS_SPI_VALID_PPR_OPTIONS) == 0) {
596 if (update_type == AHC_TRANS_USER)
597 spi->ppr_options = tinfo->user.ppr_options;
598 else
599 spi->ppr_options = tinfo->goal.ppr_options;
600 }
601
602 if ((spi->valid & CTS_SPI_VALID_SYNC_OFFSET) == 0) {
603 if (update_type == AHC_TRANS_USER)
604 spi->sync_offset = tinfo->user.offset;
605 else
606 spi->sync_offset = tinfo->goal.offset;
607 }
608
609 if ((spi->valid & CTS_SPI_VALID_SYNC_RATE) == 0) {
610 if (update_type == AHC_TRANS_USER)
611 spi->sync_period = tinfo->user.period;
612 else
613 spi->sync_period = tinfo->goal.period;
614 }
615
616 if (((spi->valid & CTS_SPI_VALID_SYNC_RATE) != 0)
617 || ((spi->valid & CTS_SPI_VALID_SYNC_OFFSET) != 0)) {
618 struct ahc_syncrate *syncrate;
619 u_int maxsync;
620
621 if ((ahc->features & AHC_ULTRA2) != 0)
622 maxsync = AHC_SYNCRATE_DT;
623 else if ((ahc->features & AHC_ULTRA) != 0)
624 maxsync = AHC_SYNCRATE_ULTRA;
625 else
626 maxsync = AHC_SYNCRATE_FAST;
627
628 syncrate = ahc_find_syncrate(ahc, &spi->sync_period,
629 &spi->ppr_options,
630 maxsync);
631 ahc_validate_offset(ahc, syncrate, &spi->sync_offset,
632 spi->bus_width);
633
634 /* We use a period of 0 to represent async */
635 if (spi->sync_offset == 0) {
636 spi->sync_period = 0;
637 spi->ppr_options = 0;
638 }
639
640 ahc_set_syncrate(ahc, &devinfo, syncrate,
641 spi->sync_period, spi->sync_offset,
642 spi->ppr_options, update_type,
643 /*paused*/FALSE);
644 }
645 ahc_unlock(ahc, &s);
646 ccb->ccb_h.status = CAM_REQ_CMP;
647 xpt_done(ccb);
648#else
649 struct ahc_devinfo devinfo;
650 struct ccb_trans_settings *cts;
651 struct ahc_initiator_tinfo *tinfo;
652 struct tmode_tstate *tstate;
653 uint16_t *discenable;
654 uint16_t *tagenable;
655 u_int update_type;
656 long s;
657
658 cts = &ccb->cts;
659 ahc_compile_devinfo(&devinfo, SIM_SCSI_ID(ahc, sim),
660 cts->ccb_h.target_id,
661 cts->ccb_h.target_lun,
662 SIM_CHANNEL(ahc, sim),
663 ROLE_UNKNOWN);
664 tinfo = ahc_fetch_transinfo(ahc, devinfo.channel,
665 devinfo.our_scsiid,
666 devinfo.target, &tstate);
667 update_type = 0;
668 if ((cts->flags & CCB_TRANS_CURRENT_SETTINGS) != 0) {
669 update_type |= AHC_TRANS_GOAL;
670 discenable = &tstate->discenable;
671 tagenable = &tstate->tagenable;
672 } else if ((cts->flags & CCB_TRANS_USER_SETTINGS) != 0) {
673 update_type |= AHC_TRANS_USER;
674 discenable = &ahc->user_discenable;
675 tagenable = &ahc->user_tagenable;
676 } else {
677 ccb->ccb_h.status = CAM_REQ_INVALID;
678 xpt_done(ccb);
679 break;
680 }
681
682 ahc_lock(ahc, &s);
683
684 if ((cts->valid & CCB_TRANS_DISC_VALID) != 0) {
685 if ((cts->flags & CCB_TRANS_DISC_ENB) != 0)
686 *discenable |= devinfo.target_mask;
687 else
688 *discenable &= ~devinfo.target_mask;
689 }
690
691 if ((cts->valid & CCB_TRANS_TQ_VALID) != 0) {
692 if ((cts->flags & CCB_TRANS_TAG_ENB) != 0)
693 *tagenable |= devinfo.target_mask;
694 else
695 *tagenable &= ~devinfo.target_mask;
696 }
697
698 if ((cts->valid & CCB_TRANS_BUS_WIDTH_VALID) != 0) {
699 ahc_validate_width(ahc, &cts->bus_width);
700 ahc_set_width(ahc, &devinfo, cts->bus_width,
701 update_type, /*paused*/FALSE);
702 }
703
704 if ((cts->valid & CCB_TRANS_SYNC_OFFSET_VALID) == 0) {
705 if (update_type == AHC_TRANS_USER)
706 cts->sync_offset = tinfo->user.offset;
707 else
708 cts->sync_offset = tinfo->goal.offset;
709 }
710
711 if ((cts->valid & CCB_TRANS_SYNC_RATE_VALID) == 0) {
712 if (update_type == AHC_TRANS_USER)
713 cts->sync_period = tinfo->user.period;
714 else
715 cts->sync_period = tinfo->goal.period;
716 }
717
718 if (((cts->valid & CCB_TRANS_SYNC_RATE_VALID) != 0)
719 || ((cts->valid & CCB_TRANS_SYNC_OFFSET_VALID) != 0)) {
720 struct ahc_syncrate *syncrate;
721 u_int ppr_options;
722 u_int maxsync;
723
724 if ((ahc->features & AHC_ULTRA2) != 0)
725 maxsync = AHC_SYNCRATE_DT;
726 else if ((ahc->features & AHC_ULTRA) != 0)
727 maxsync = AHC_SYNCRATE_ULTRA;
728 else
729 maxsync = AHC_SYNCRATE_FAST;
730
731 ppr_options = 0;
732 if (cts->sync_period <= 9)
733 ppr_options = MSG_EXT_PPR_DT_REQ;
734
735 syncrate = ahc_find_syncrate(ahc, &cts->sync_period,
736 &ppr_options,
737 maxsync);
738 ahc_validate_offset(ahc, syncrate, &cts->sync_offset,
739 MSG_EXT_WDTR_BUS_8_BIT);
740
741 /* We use a period of 0 to represent async */
742 if (cts->sync_offset == 0) {
743 cts->sync_period = 0;
744 ppr_options = 0;
745 }
746
747 if (ppr_options == MSG_EXT_PPR_DT_REQ
748 && tinfo->user.transport_version >= 3) {
749 tinfo->goal.transport_version =
750 tinfo->user.transport_version;
751 tinfo->current.transport_version =
752 tinfo->user.transport_version;
753 }
754
755 ahc_set_syncrate(ahc, &devinfo, syncrate,
756 cts->sync_period, cts->sync_offset,
757 ppr_options, update_type,
758 /*paused*/FALSE);
759 }
760 ahc_unlock(ahc, &s);
761 ccb->ccb_h.status = CAM_REQ_CMP;
762 xpt_done(ccb);
763#endif
764 break;
765 }
766 case XPT_GET_TRAN_SETTINGS:
767 /* Get default/user set transfer settings for the target */
768 {
769
770 ahc_lock(ahc, &s);
771 ahc_get_tran_settings(ahc, SIM_SCSI_ID(ahc, sim),
772 SIM_CHANNEL(ahc, sim), &ccb->cts);
773 ahc_unlock(ahc, &s);
774 xpt_done(ccb);
775 break;
776 }
777 case XPT_CALC_GEOMETRY:
778 {
779 struct ccb_calc_geometry *ccg;
780 uint32_t size_mb;
781 uint32_t secs_per_cylinder;
782 int extended;
783
784 ccg = &ccb->ccg;
785 size_mb = ccg->volume_size
786 / ((1024L * 1024L) / ccg->block_size);
787 extended = SIM_IS_SCSIBUS_B(ahc, sim)
788 ? ahc->flags & AHC_EXTENDED_TRANS_B
789 : ahc->flags & AHC_EXTENDED_TRANS_A;
790
791 if (size_mb > 1024 && extended) {
792 ccg->heads = 255;
793 ccg->secs_per_track = 63;
794 } else {
795 ccg->heads = 64;
796 ccg->secs_per_track = 32;
797 }
798 secs_per_cylinder = ccg->heads * ccg->secs_per_track;
799 ccg->cylinders = ccg->volume_size / secs_per_cylinder;
800 ccb->ccb_h.status = CAM_REQ_CMP;
801 xpt_done(ccb);
802 break;
803 }
804 case XPT_RESET_BUS: /* Reset the specified SCSI bus */
805 {
806 int found;
807
808 ahc_lock(ahc, &s);
809 found = ahc_reset_channel(ahc, SIM_CHANNEL(ahc, sim),
810 /*initiate reset*/TRUE);
811 ahc_unlock(ahc, &s);
812 if (bootverbose) {
813 xpt_print_path(SIM_PATH(ahc, sim));
814 printf("SCSI bus reset delivered. "
815 "%d SCBs aborted.\n", found);
816 }
817 ccb->ccb_h.status = CAM_REQ_CMP;
818 xpt_done(ccb);
819 break;
820 }
821 case XPT_TERM_IO: /* Terminate the I/O process */
822 /* XXX Implement */
823 ccb->ccb_h.status = CAM_REQ_INVALID;
824 xpt_done(ccb);
825 break;
826 case XPT_PATH_INQ: /* Path routing inquiry */
827 {
828 struct ccb_pathinq *cpi = &ccb->cpi;
829
830 cpi->version_num = 1; /* XXX??? */
831 cpi->hba_inquiry = PI_SDTR_ABLE|PI_TAG_ABLE;
832 if ((ahc->features & AHC_WIDE) != 0)
833 cpi->hba_inquiry |= PI_WIDE_16;
834 if ((ahc->flags & AHC_TARGETMODE) != 0) {
835 cpi->target_sprt = PIT_PROCESSOR
836 | PIT_DISCONNECT
837 | PIT_TERM_IO;
838 } else {
839 cpi->target_sprt = 0;
840 }
841 cpi->hba_misc = (ahc->flags & AHC_INITIATORMODE)
842 ? 0 : PIM_NOINITIATOR;
843 cpi->hba_eng_cnt = 0;
844 cpi->max_target = (ahc->features & AHC_WIDE) ? 15 : 7;
845 cpi->max_lun = 64;
846 if (SIM_IS_SCSIBUS_B(ahc, sim)) {
847 cpi->initiator_id = ahc->our_id_b;
848 if ((ahc->flags & AHC_RESET_BUS_B) == 0)
849 cpi->hba_misc |= PIM_NOBUSRESET;
850 } else {
851 cpi->initiator_id = ahc->our_id;
852 if ((ahc->flags & AHC_RESET_BUS_A) == 0)
853 cpi->hba_misc |= PIM_NOBUSRESET;
854 }
855 cpi->bus_id = cam_sim_bus(sim);
856 cpi->base_transfer_speed = 3300;
857 strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
858 strncpy(cpi->hba_vid, "Adaptec", HBA_IDLEN);
859 strncpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN);
860 cpi->unit_number = cam_sim_unit(sim);
861#ifdef AHC_NEW_TRAN_SETTINGS
862 cpi->protocol = PROTO_SCSI;
863 cpi->protocol_version = SCSI_REV_2;
864 cpi->transport = XPORT_SPI;
865 cpi->transport_version = 2;
866 cpi->xport_specific.spi.ppr_options = SID_SPI_CLOCK_ST;
867 if ((ahc->features & AHC_DT) != 0) {
868 cpi->transport_version = 3;
869 cpi->xport_specific.spi.ppr_options =
870 SID_SPI_CLOCK_DT_ST;
871 }
872#endif
873 cpi->ccb_h.status = CAM_REQ_CMP;
874 xpt_done(ccb);
875 break;
876 }
877 default:
878 ccb->ccb_h.status = CAM_REQ_INVALID;
879 xpt_done(ccb);
880 break;
881 }
882}
883
884static void
885ahc_get_tran_settings(struct ahc_softc *ahc, int our_id, char channel,
886 struct ccb_trans_settings *cts)
887{
888#ifdef AHC_NEW_TRAN_SETTINGS
889 struct ahc_devinfo devinfo;
890 struct ccb_trans_settings_scsi *scsi;
891 struct ccb_trans_settings_spi *spi;
892 struct ahc_initiator_tinfo *targ_info;
893 struct tmode_tstate *tstate;
894 struct ahc_transinfo *tinfo;
895
896 scsi = &cts->proto_specific.scsi;
897 spi = &cts->xport_specific.spi;
898 ahc_compile_devinfo(&devinfo, our_id,
899 cts->ccb_h.target_id,
900 cts->ccb_h.target_lun,
901 channel, ROLE_UNKNOWN);
902 targ_info = ahc_fetch_transinfo(ahc, devinfo.channel,
903 devinfo.our_scsiid,
904 devinfo.target, &tstate);
905
906 if (cts->type == CTS_TYPE_CURRENT_SETTINGS)
907 tinfo = &targ_info->current;
908 else
909 tinfo = &targ_info->user;
910
911 scsi->flags &= ~CTS_SCSI_FLAGS_TAG_ENB;
912 spi->flags &= ~CTS_SPI_FLAGS_DISC_ENB;
913 if (cts->type == CTS_TYPE_USER_SETTINGS) {
914 if ((ahc->user_discenable & devinfo.target_mask) != 0)
915 spi->flags |= CTS_SPI_FLAGS_DISC_ENB;
916
917 if ((ahc->user_tagenable & devinfo.target_mask) != 0)
918 scsi->flags |= CTS_SCSI_FLAGS_TAG_ENB;
919 } else {
920 if ((tstate->discenable & devinfo.target_mask) != 0)
921 spi->flags |= CTS_SPI_FLAGS_DISC_ENB;
922
923 if ((tstate->tagenable & devinfo.target_mask) != 0)
924 scsi->flags |= CTS_SCSI_FLAGS_TAG_ENB;
925 }
926 cts->protocol_version = tinfo->protocol_version;
927 cts->transport_version = tinfo->transport_version;
928
929 spi->sync_period = tinfo->period;
930 spi->sync_offset = tinfo->offset;
931 spi->bus_width = tinfo->width;
932 spi->ppr_options = tinfo->ppr_options;
933
934 cts->protocol = PROTO_SCSI;
935 cts->transport = XPORT_SPI;
936 spi->valid = CTS_SPI_VALID_SYNC_RATE
937 | CTS_SPI_VALID_SYNC_OFFSET
938 | CTS_SPI_VALID_BUS_WIDTH
939 | CTS_SPI_VALID_PPR_OPTIONS;
940
941 if (cts->ccb_h.target_lun != CAM_LUN_WILDCARD) {
942 scsi->valid = CTS_SCSI_VALID_TQ;
943 spi->valid |= CTS_SPI_VALID_DISC;
944 } else {
945 scsi->valid = 0;
946 }
947
948 cts->ccb_h.status = CAM_REQ_CMP;
949#else
950 struct ahc_devinfo devinfo;
951 struct ahc_initiator_tinfo *targ_info;
952 struct tmode_tstate *tstate;
953 struct ahc_transinfo *tinfo;
954 long s;
955
956 ahc_compile_devinfo(&devinfo, our_id,
957 cts->ccb_h.target_id,
958 cts->ccb_h.target_lun,
959 channel, ROLE_UNKNOWN);
960 targ_info = ahc_fetch_transinfo(ahc, devinfo.channel,
961 devinfo.our_scsiid,
962 devinfo.target, &tstate);
963
964 if ((cts->flags & CCB_TRANS_CURRENT_SETTINGS) != 0)
965 tinfo = &targ_info->current;
966 else
967 tinfo = &targ_info->user;
968
969 ahc_lock(ahc, &s);
970
971 cts->flags &= ~(CCB_TRANS_DISC_ENB|CCB_TRANS_TAG_ENB);
972 if ((cts->flags & CCB_TRANS_CURRENT_SETTINGS) == 0) {
973 if ((ahc->user_discenable & devinfo.target_mask) != 0)
974 cts->flags |= CCB_TRANS_DISC_ENB;
975
976 if ((ahc->user_tagenable & devinfo.target_mask) != 0)
977 cts->flags |= CCB_TRANS_TAG_ENB;
978 } else {
979 if ((tstate->discenable & devinfo.target_mask) != 0)
980 cts->flags |= CCB_TRANS_DISC_ENB;
981
982 if ((tstate->tagenable & devinfo.target_mask) != 0)
983 cts->flags |= CCB_TRANS_TAG_ENB;
984 }
985 cts->sync_period = tinfo->period;
986 cts->sync_offset = tinfo->offset;
987 cts->bus_width = tinfo->width;
988
989 ahc_unlock(ahc, &s);
990
991 cts->valid = CCB_TRANS_SYNC_RATE_VALID
992 | CCB_TRANS_SYNC_OFFSET_VALID
993 | CCB_TRANS_BUS_WIDTH_VALID;
994
995 if (cts->ccb_h.target_lun != CAM_LUN_WILDCARD)
996 cts->valid |= CCB_TRANS_DISC_VALID|CCB_TRANS_TQ_VALID;
997
998 cts->ccb_h.status = CAM_REQ_CMP;
999#endif
1000}
1001
1002static void
1003ahc_async(void *callback_arg, uint32_t code, struct cam_path *path, void *arg)
1004{
1005 struct ahc_softc *ahc;
1006 struct cam_sim *sim;
1007
1008 sim = (struct cam_sim *)callback_arg;
1009 ahc = (struct ahc_softc *)cam_sim_softc(sim);
1010 switch (code) {
1011 case AC_LOST_DEVICE:
1012 {
1013 struct ahc_devinfo devinfo;
1014 long s;
1015
1016 ahc_compile_devinfo(&devinfo, SIM_SCSI_ID(ahc, sim),
1017 xpt_path_target_id(path),
1018 xpt_path_lun_id(path),
1019 SIM_CHANNEL(ahc, sim),
1020 ROLE_UNKNOWN);
1021
1022 /*
1023 * Revert to async/narrow transfers
1024 * for the next device.
1025 */
1026 ahc_lock(ahc, &s);
1027 ahc_set_width(ahc, &devinfo, MSG_EXT_WDTR_BUS_8_BIT,
1028 AHC_TRANS_GOAL|AHC_TRANS_CUR, /*paused*/FALSE);
1029 ahc_set_syncrate(ahc, &devinfo, /*syncrate*/NULL,
1030 /*period*/0, /*offset*/0, /*ppr_options*/0,
1031 AHC_TRANS_GOAL|AHC_TRANS_CUR,
1032 /*paused*/FALSE);
1033 ahc_unlock(ahc, &s);
1034 break;
1035 }
1036 default:
1037 break;
1038 }
1039}
1040
1041static void
1042ahc_execute_scb(void *arg, bus_dma_segment_t *dm_segs, int nsegments,
1043 int error)
1044{
1045 struct scb *scb;
1046 union ccb *ccb;
1047 struct ahc_softc *ahc;
1048 struct ahc_initiator_tinfo *tinfo;
1049 struct tmode_tstate *tstate;
1050 u_int mask;
1051 long s;
1052
1053 scb = (struct scb *)arg;
1054 ccb = scb->io_ctx;
1055 ahc = (struct ahc_softc *)ccb->ccb_h.ccb_ahc_ptr;
1056
1057 if (error != 0) {
1058 if (error == EFBIG)
1059 ahc_set_transaction_status(scb, CAM_REQ_TOO_BIG);
1060 else
1061 ahc_set_transaction_status(scb, CAM_REQ_CMP_ERR);
1062 if (nsegments != 0)
1063 bus_dmamap_unload(ahc->buffer_dmat, scb->dmamap);
1064 ahc_lock(ahc, &s);
1065 ahc_free_scb(ahc, scb);
1066 ahc_unlock(ahc, &s);
1067 xpt_done(ccb);
1068 return;
1069 }
1070 if (nsegments != 0) {
1071 struct ahc_dma_seg *sg;
1072 bus_dma_segment_t *end_seg;
1073 bus_dmasync_op_t op;
1074
1075 end_seg = dm_segs + nsegments;
1076
1077 /* Copy the segments into our SG list */
1078 sg = scb->sg_list;
1079 while (dm_segs < end_seg) {
1080 sg->addr = dm_segs->ds_addr;
1081/* XXX Add in the 5th byte of the address later. */
1082 sg->len = dm_segs->ds_len;
1083 sg++;
1084 dm_segs++;
1085 }
1086
1087 /*
1088 * Note where to find the SG entries in bus space.
1089 * We also set the full residual flag which the
1090 * sequencer will clear as soon as a data transfer
1091 * occurs.
1092 */
1093 scb->hscb->sgptr = scb->sg_list_phys | SG_FULL_RESID;
1094
1095 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN)
1096 op = BUS_DMASYNC_PREREAD;
1097 else
1098 op = BUS_DMASYNC_PREWRITE;
1099
1100 bus_dmamap_sync(ahc->buffer_dmat, scb->dmamap, op);
1101
1102 if (ccb->ccb_h.func_code == XPT_CONT_TARGET_IO) {
1103 struct target_data *tdata;
1104
1105 tdata = &scb->hscb->shared_data.tdata;
1106 tdata->target_phases |= DPHASE_PENDING;
1107 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT)
1108 tdata->data_phase = P_DATAOUT;
1109 else
1110 tdata->data_phase = P_DATAIN;
1111
1112 /*
1113 * If the transfer is of an odd length and in the
1114 * "in" direction (scsi->HostBus), then it may
1115 * trigger a bug in the 'WideODD' feature of
1116 * non-Ultra2 chips. Force the total data-length
1117 * to be even by adding an extra, 1 byte, SG,
1118 * element. We do this even if we are not currently
1119 * negotiated wide as negotiation could occur before
1120 * this command is executed.
1121 */
1122 if ((ahc->bugs & AHC_TMODE_WIDEODD_BUG) != 0
1123 && (ccb->csio.dxfer_len & 0x1) != 0
1124 && (ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
1125
1126 nsegments++;
1127 if (nsegments > AHC_NSEG) {
1128
1129 ahc_set_transaction_status(scb,
1130 CAM_REQ_TOO_BIG);
1131 bus_dmamap_unload(ahc->buffer_dmat,
1132 scb->dmamap);
1133 ahc_lock(ahc, &s);
1134 ahc_free_scb(ahc, scb);
1135 ahc_unlock(ahc, &s);
1136 xpt_done(ccb);
1137 return;
1138 }
1139 sg->addr = ahc->dma_bug_buf;
1140 sg->len = 1;
1141 sg++;
1142 }
1143 }
1144 sg--;
1145 sg->len |= AHC_DMA_LAST_SEG;
1146
1147 /* Copy the first SG into the "current" data pointer area */
1148 scb->hscb->dataptr = scb->sg_list->addr;
1149 scb->hscb->datacnt = scb->sg_list->len;
1150 } else {
1151 scb->hscb->sgptr = SG_LIST_NULL;
1152 scb->hscb->dataptr = 0;
1153 scb->hscb->datacnt = 0;
1154 }
1155
1156 scb->sg_count = nsegments;
1157
1158 ahc_lock(ahc, &s);
1159
1160 /*
1161 * Last time we need to check if this SCB needs to
1162 * be aborted.
1163 */
1164 if (ahc_get_transaction_status(scb) != CAM_REQ_INPROG) {
1165 if (nsegments != 0)
1166 bus_dmamap_unload(ahc->buffer_dmat,
1167 scb->dmamap);
1168 ahc_free_scb(ahc, scb);
1169 ahc_unlock(ahc, &s);
1170 xpt_done(ccb);
1171 return;
1172 }
1173
1174 tinfo = ahc_fetch_transinfo(ahc, SCSIID_CHANNEL(ahc, scb->hscb->scsiid),
1175 SCSIID_OUR_ID(scb->hscb->scsiid),
1176 ccb->ccb_h.target_id, &tstate);
1177
1178 mask = SCB_GET_TARGET_MASK(ahc, scb);
1179 scb->hscb->scsirate = tinfo->scsirate;
1180 scb->hscb->scsioffset = tinfo->current.offset;
1181 if ((tstate->ultraenb & mask) != 0)
1182 scb->hscb->control |= ULTRAENB;
1183
1184 if ((tstate->discenable & mask) != 0
1185 && (ccb->ccb_h.flags & CAM_DIS_DISCONNECT) == 0)
1186 scb->hscb->control |= DISCENB;
1187
1188 if ((ccb->ccb_h.flags & CAM_NEGOTIATE) != 0
1189 && (tinfo->current.width != 0 || tinfo->current.period != 0)) {
1190 scb->flags |= SCB_NEGOTIATE;
1191 scb->hscb->control |= MK_MESSAGE;
1192 }
1193
1194 LIST_INSERT_HEAD(&ahc->pending_scbs, scb, pending_links);
1195
1196 ccb->ccb_h.status |= CAM_SIM_QUEUED;
1197
1198 if (ccb->ccb_h.timeout != CAM_TIME_INFINITY) {
1199 if (ccb->ccb_h.timeout == CAM_TIME_DEFAULT)
1200 ccb->ccb_h.timeout = 5 * 1000;
1201 ccb->ccb_h.timeout_ch =
1202 timeout(ahc_timeout, (caddr_t)scb,
1203 (ccb->ccb_h.timeout * hz) / 1000);
1204 }
1205
1206 /*
1207 * We only allow one untagged transaction
1208 * per target in the initiator role unless
1209 * we are storing a full busy target *lun*
1210 * table in SCB space.
1211 */
1212 if ((scb->hscb->control & (TARGET_SCB|TAG_ENB)) == 0
1213 && (ahc->features & AHC_SCB_BTT) == 0) {
1214 struct scb_tailq *untagged_q;
1215
1216 untagged_q = &(ahc->untagged_queues[ccb->ccb_h.target_id]);
1217 TAILQ_INSERT_TAIL(untagged_q, scb, links.tqe);
1218 if (TAILQ_FIRST(untagged_q) != scb) {
1219 ahc_unlock(ahc, &s);
1220 return;
1221 }
1222 }
1223 scb->flags |= SCB_ACTIVE;
1224
1225 if ((scb->flags & SCB_TARGET_IMMEDIATE) != 0) {
1226 pause_sequencer(ahc);
1227 if ((ahc->flags & AHC_PAGESCBS) == 0)
1228 ahc_outb(ahc, SCBPTR, scb->hscb->tag);
1229 ahc_outb(ahc, SCB_TAG, scb->hscb->tag);
1230 ahc_outb(ahc, RETURN_1, CONT_MSG_LOOP);
1231 unpause_sequencer(ahc);
1232 } else {
1233 ahc_queue_scb(ahc, scb);
1234 }
1235
1236 ahc_unlock(ahc, &s);
1237}
1238
1239static void
1240ahc_poll(struct cam_sim *sim)
1241{
1242 ahc_intr(cam_sim_softc(sim));
1243}
1244
1245static void
1246ahc_setup_data(struct ahc_softc *ahc, struct cam_sim *sim,
1247 struct ccb_scsiio *csio, struct scb *scb)
1248{
1249 struct hardware_scb *hscb;
1250 struct ccb_hdr *ccb_h;
1251
1252 hscb = scb->hscb;
1253 ccb_h = &csio->ccb_h;
1254
1255 if (ccb_h->func_code == XPT_SCSI_IO) {
1256 hscb->cdb_len = csio->cdb_len;
1257 if ((ccb_h->flags & CAM_CDB_POINTER) != 0) {
1258
1259 if (hscb->cdb_len > sizeof(hscb->cdb32)
1260 || (ccb_h->flags & CAM_CDB_PHYS) != 0) {
1261 u_long s;
1262
1263 ahc_set_transaction_status(scb,
1264 CAM_REQ_INVALID);
1265 ahc_lock(ahc, &s);
1266 ahc_free_scb(ahc, scb);
1267 ahc_unlock(ahc, &s);
1268 xpt_done((union ccb *)csio);
1269 return;
1270 }
1271 if (hscb->cdb_len > 12) {
1272 memcpy(hscb->cdb32,
1273 csio->cdb_io.cdb_ptr,
1274 hscb->cdb_len);
1275 hscb->shared_data.cdb_ptr = scb->cdb32_busaddr;
1276 } else {
1277 memcpy(hscb->shared_data.cdb,
1278 csio->cdb_io.cdb_ptr,
1279 hscb->cdb_len);
1280 }
1281 } else {
1282 if (hscb->cdb_len > 12) {
1283 memcpy(hscb->cdb32, csio->cdb_io.cdb_bytes,
1284 hscb->cdb_len);
1285 hscb->shared_data.cdb_ptr = scb->cdb32_busaddr;
1286 } else {
1287 memcpy(hscb->shared_data.cdb,
1288 csio->cdb_io.cdb_bytes,
1289 hscb->cdb_len);
1290 }
1291 }
1292 }
1293
1294 /* Only use S/G if there is a transfer */
1295 if ((ccb_h->flags & CAM_DIR_MASK) != CAM_DIR_NONE) {
1296 if ((ccb_h->flags & CAM_SCATTER_VALID) == 0) {
1297 /* We've been given a pointer to a single buffer */
1298 if ((ccb_h->flags & CAM_DATA_PHYS) == 0) {
1299 int s;
1300 int error;
1301
1302 s = splsoftvm();
1303 error = bus_dmamap_load(ahc->buffer_dmat,
1304 scb->dmamap,
1305 csio->data_ptr,
1306 csio->dxfer_len,
1307 ahc_execute_scb,
1308 scb, /*flags*/0);
1309 if (error == EINPROGRESS) {
1310 /*
1311 * So as to maintain ordering,
1312 * freeze the controller queue
1313 * until our mapping is
1314 * returned.
1315 */
1316 xpt_freeze_simq(sim,
1317 /*count*/1);
1318 scb->io_ctx->ccb_h.status |=
1319 CAM_RELEASE_SIMQ;
1320 }
1321 splx(s);
1322 } else {
1323 struct bus_dma_segment seg;
1324
1325 /* Pointer to physical buffer */
1326 if (csio->dxfer_len > AHC_MAXTRANSFER_SIZE)
1327 panic("ahc_setup_data - Transfer size "
1328 "larger than can device max");
1329
1330 seg.ds_addr = (bus_addr_t)csio->data_ptr;
1331 seg.ds_len = csio->dxfer_len;
1332 ahc_execute_scb(scb, &seg, 1, 0);
1333 }
1334 } else {
1335 struct bus_dma_segment *segs;
1336
1337 if ((ccb_h->flags & CAM_DATA_PHYS) != 0)
1338 panic("ahc_setup_data - Physical segment "
1339 "pointers unsupported");
1340
1341 if ((ccb_h->flags & CAM_SG_LIST_PHYS) == 0)
1342 panic("ahc_setup_data - Virtual segment "
1343 "addresses unsupported");
1344
1345 /* Just use the segments provided */
1346 segs = (struct bus_dma_segment *)csio->data_ptr;
1347 ahc_execute_scb(scb, segs, csio->sglist_cnt, 0);
1348 }
1349 } else {
1350 ahc_execute_scb(scb, NULL, 0, 0);
1351 }
1352}
1353
1354static void
1355ahc_set_recoveryscb(struct ahc_softc *ahc, struct scb *scb) {
1356
1357 if ((scb->flags & SCB_RECOVERY_SCB) == 0) {
1358 struct scb *list_scb;
1359
1360 scb->flags |= SCB_RECOVERY_SCB;
1361
1362 /*
1363 * Take all queued, but not sent SCBs out of the equation.
1364 * Also ensure that no new CCBs are queued to us while we
1365 * try to fix this problem.
1366 */
1367 if ((scb->io_ctx->ccb_h.status & CAM_RELEASE_SIMQ) == 0) {
1368 xpt_freeze_simq(SCB_GET_SIM(ahc, scb), /*count*/1);
1369 scb->io_ctx->ccb_h.status |= CAM_RELEASE_SIMQ;
1370 }
1371
1372 /*
1373 * Go through all of our pending SCBs and remove
1374 * any scheduled timeouts for them. We will reschedule
1375 * them after we've successfully fixed this problem.
1376 */
1377 LIST_FOREACH(list_scb, &ahc->pending_scbs, pending_links) {
1378 union ccb *ccb;
1379
1380 ccb = list_scb->io_ctx;
1381 untimeout(ahc_timeout, list_scb, ccb->ccb_h.timeout_ch);
1382 }
1383 }
1384}
1385
1386void
1387ahc_timeout(void *arg)
1388{
1389 struct scb *scb;
1390 struct ahc_softc *ahc;
1391 long s;
1392 int found;
1393 u_int last_phase;
1394 int target;
1395 int lun;
1396 int i;
1397 char channel;
1398
1399 scb = (struct scb *)arg;
1400 ahc = (struct ahc_softc *)scb->io_ctx->ccb_h.ccb_ahc_ptr;
1401
1402 ahc_lock(ahc, &s);
1403
1404 /*
1405 * Ensure that the card doesn't do anything
1406 * behind our back. Also make sure that we
1407 * didn't "just" miss an interrupt that would
1408 * affect this timeout.
1409 */
1410 do {
1411 ahc_intr(ahc);
1412 pause_sequencer(ahc);
1413 } while (ahc_inb(ahc, INTSTAT) & INT_PEND);
1414
1415 /* Make sure the sequencer is in a safe location. */
1416 ahc_clear_critical_section(ahc);
1417
1418 ahc_print_path(ahc, scb);
1419 if ((scb->flags & SCB_ACTIVE) == 0) {
1420 /* Previous timeout took care of me already */
1421 printf("Timedout SCB %d handled by another timeout\n",
1422 scb->hscb->tag);
1423 unpause_sequencer(ahc);
1424 ahc_unlock(ahc, &s);
1425 return;
1426 }
1427
1428 target = SCB_GET_TARGET(ahc, scb);
1429 channel = SCB_GET_CHANNEL(ahc, scb);
1430 lun = SCB_GET_LUN(scb);
1431
1432 printf("SCB 0x%x - timed out ", scb->hscb->tag);
1433 /*
1434 * Take a snapshot of the bus state and print out
1435 * some information so we can track down driver bugs.
1436 */
1437 last_phase = ahc_inb(ahc, LASTPHASE);
1438
1439 for (i = 0; i < num_phases; i++) {
1440 if (last_phase == phase_table[i].phase)
1441 break;
1442 }
1443 printf("%s", phase_table[i].phasemsg);
1444
1445 printf(", SEQADDR == 0x%x\n",
1446 ahc_inb(ahc, SEQADDR0) | (ahc_inb(ahc, SEQADDR1) << 8));
1447
1448 if (scb->sg_count > 0) {
1449 for (i = 0; i < scb->sg_count; i++) {
1450 printf("sg[%d] - Addr 0x%x : Length %d\n",
1451 i,
1452 scb->sg_list[i].addr,
1453 scb->sg_list[i].len & AHC_SG_LEN_MASK);
1454 }
1455 }
1456 if (scb->flags & (SCB_DEVICE_RESET|SCB_ABORT)) {
1457 /*
1458 * Been down this road before.
1459 * Do a full bus reset.
1460 */
1461bus_reset:
1462 ahc_set_transaction_status(scb, CAM_CMD_TIMEOUT);
1463 found = ahc_reset_channel(ahc, channel, /*Initiate Reset*/TRUE);
1464 printf("%s: Issued Channel %c Bus Reset. "
1465 "%d SCBs aborted\n", ahc_name(ahc), channel, found);
1466 } else {
1467 /*
1468 * If we are a target, transition to bus free and report
1469 * the timeout.
1470 *
1471 * The target/initiator that is holding up the bus may not
1472 * be the same as the one that triggered this timeout
1473 * (different commands have different timeout lengths).
1474 * If the bus is idle and we are actiing as the initiator
1475 * for this request, queue a BDR message to the timed out
1476 * target. Otherwise, if the timed out transaction is
1477 * active:
1478 * Initiator transaction:
1479 * Stuff the message buffer with a BDR message and assert
1480 * ATN in the hopes that the target will let go of the bus
1481 * and go to the mesgout phase. If this fails, we'll
1482 * get another timeout 2 seconds later which will attempt
1483 * a bus reset.
1484 *
1485 * Target transaction:
1486 * Transition to BUS FREE and report the error.
1487 * It's good to be the target!
1488 */
1489 u_int active_scb_index;
1490
1491 active_scb_index = ahc_inb(ahc, SCB_TAG);
1492
1493 if (last_phase != P_BUSFREE
1494 && (active_scb_index < ahc->scb_data->numscbs)) {
1495 struct scb *active_scb;
1496
1497 /*
1498 * If the active SCB is not from our device,
1499 * assume that another device is hogging the bus
1500 * and wait for it's timeout to expire before
1501 * taking additional action.
1502 */
1503 active_scb = ahc_lookup_scb(ahc, active_scb_index);
1504 if (active_scb->hscb->scsiid != scb->hscb->scsiid
1505 || active_scb->hscb->lun != scb->hscb->lun) {
1506 struct ccb_hdr *ccbh;
1507 u_int newtimeout;
1508
1509 ahc_print_path(ahc, scb);
1510 printf("Other SCB Timeout%s",
1511 (scb->flags & SCB_OTHERTCL_TIMEOUT) != 0
1512 ? " again\n" : "\n");
1513 scb->flags |= SCB_OTHERTCL_TIMEOUT;
1514 newtimeout =
1515 MAX(active_scb->io_ctx->ccb_h.timeout,
1516 scb->io_ctx->ccb_h.timeout);
1517 ccbh = &scb->io_ctx->ccb_h;
1518 scb->io_ctx->ccb_h.timeout_ch =
1519 timeout(ahc_timeout, scb,
1520 (newtimeout * hz) / 1000);
1521 ahc_unlock(ahc, &s);
1522 return;
1523 }
1524
1525 /* It's us */
1526 if ((scb->hscb->control & TARGET_SCB) != 0) {
1527
1528 /*
1529 * Send back any queued up transactions
1530 * and properly record the error condition.
1531 */
1532 ahc_freeze_devq(ahc, scb);
1533 ahc_set_transaction_status(scb,
1534 CAM_CMD_TIMEOUT);
1535 ahc_freeze_scb(scb);
1536 ahc_done(ahc, scb);
1537
1538 /* Will clear us from the bus */
1539 restart_sequencer(ahc);
1540 ahc_unlock(ahc, &s);
1541 return;
1542 }
1543
1544 ahc_set_recoveryscb(ahc, active_scb);
34 */
35
36#include <dev/aic7xxx/aic7xxx_freebsd.h>
37#include <dev/aic7xxx/aic7xxx_inline.h>
38
39#include <sys/eventhandler.h>
40
41#ifndef AHC_TMODE_ENABLE
42#define AHC_TMODE_ENABLE 0
43#endif
44
45#define ccb_scb_ptr spriv_ptr0
46#define ccb_ahc_ptr spriv_ptr1
47
48#ifdef AHC_DEBUG
49static int ahc_debug = AHC_DEBUG;
50#endif
51
52static void ahc_freebsd_intr(void *arg);
53
54#if UNUSED
55static void ahc_dump_targcmd(struct target_cmd *cmd);
56#endif
57static void ahc_action(struct cam_sim *sim, union ccb *ccb);
58static void ahc_get_tran_settings(struct ahc_softc *ahc,
59 int our_id, char channel,
60 struct ccb_trans_settings *cts);
61static void ahc_async(void *callback_arg, uint32_t code,
62 struct cam_path *path, void *arg);
63static void ahc_execute_scb(void *arg, bus_dma_segment_t *dm_segs,
64 int nsegments, int error);
65static void ahc_poll(struct cam_sim *sim);
66static void ahc_setup_data(struct ahc_softc *ahc, struct cam_sim *sim,
67 struct ccb_scsiio *csio, struct scb *scb);
68static void ahc_abort_ccb(struct ahc_softc *ahc, struct cam_sim *sim,
69 union ccb *ccb);
70static int ahc_create_path(struct ahc_softc *ahc,
71 char channel, u_int target, u_int lun,
72 struct cam_path **path);
73
74static void ahc_set_recoveryscb(struct ahc_softc *ahc, struct scb *scb);
75
76static int
77ahc_create_path(struct ahc_softc *ahc, char channel, u_int target,
78 u_int lun, struct cam_path **path)
79{
80 path_id_t path_id;
81
82 if (channel == 'B')
83 path_id = cam_sim_path(ahc->platform_data->sim_b);
84 else
85 path_id = cam_sim_path(ahc->platform_data->sim);
86
87 return (xpt_create_path(path, /*periph*/NULL,
88 path_id, target, lun));
89}
90
91/*
92 * Attach all the sub-devices we can find
93 */
94int
95ahc_attach(struct ahc_softc *ahc)
96{
97 char ahc_info[256];
98 struct ccb_setasync csa;
99 struct cam_devq *devq;
100 int bus_id;
101 int bus_id2;
102 struct cam_sim *sim;
103 struct cam_sim *sim2;
104 struct cam_path *path;
105 struct cam_path *path2;
106 long s;
107 int count;
108 int error;
109
110 count = 0;
111 sim = NULL;
112 sim2 = NULL;
113
114 ahc_controller_info(ahc, ahc_info);
115 printf("%s\n", ahc_info);
116 ahc_lock(ahc, &s);
117 /* Hook up our interrupt handler */
118 if ((error = bus_setup_intr(ahc->dev_softc, ahc->platform_data->irq,
119 INTR_TYPE_CAM, ahc_freebsd_intr, ahc,
120 &ahc->platform_data->ih)) != 0) {
121 device_printf(ahc->dev_softc, "bus_setup_intr() failed: %d\n",
122 error);
123 goto fail;
124 }
125
126 /*
127 * Attach secondary channel first if the user has
128 * declared it the primary channel.
129 */
130 if ((ahc->flags & AHC_CHANNEL_B_PRIMARY) != 0) {
131 bus_id = 1;
132 bus_id2 = 0;
133 } else {
134 bus_id = 0;
135 bus_id2 = 1;
136 }
137
138 /*
139 * Create the device queue for our SIM(s).
140 */
141 devq = cam_simq_alloc(AHC_SCB_MAX - 1);
142 if (devq == NULL)
143 goto fail;
144
145 /*
146 * Construct our first channel SIM entry
147 */
148 sim = cam_sim_alloc(ahc_action, ahc_poll, "ahc", ahc,
149 device_get_unit(ahc->dev_softc),
150 1, AHC_SCB_MAX - 1, devq);
151 if (sim == NULL) {
152 cam_simq_free(devq);
153 goto fail;
154 }
155
156 if (xpt_bus_register(sim, bus_id) != CAM_SUCCESS) {
157 cam_sim_free(sim, /*free_devq*/TRUE);
158 sim = NULL;
159 goto fail;
160 }
161
162 if (xpt_create_path(&path, /*periph*/NULL,
163 cam_sim_path(sim), CAM_TARGET_WILDCARD,
164 CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
165 xpt_bus_deregister(cam_sim_path(sim));
166 cam_sim_free(sim, /*free_devq*/TRUE);
167 sim = NULL;
168 goto fail;
169 }
170
171 xpt_setup_ccb(&csa.ccb_h, path, /*priority*/5);
172 csa.ccb_h.func_code = XPT_SASYNC_CB;
173 csa.event_enable = AC_LOST_DEVICE;
174 csa.callback = ahc_async;
175 csa.callback_arg = sim;
176 xpt_action((union ccb *)&csa);
177 count++;
178
179 if (ahc->features & AHC_TWIN) {
180 sim2 = cam_sim_alloc(ahc_action, ahc_poll, "ahc",
181 ahc, device_get_unit(ahc->dev_softc), 1,
182 AHC_SCB_MAX - 1, devq);
183
184 if (sim2 == NULL) {
185 printf("ahc_attach: Unable to attach second "
186 "bus due to resource shortage");
187 goto fail;
188 }
189
190 if (xpt_bus_register(sim2, bus_id2) != CAM_SUCCESS) {
191 printf("ahc_attach: Unable to attach second "
192 "bus due to resource shortage");
193 /*
194 * We do not want to destroy the device queue
195 * because the first bus is using it.
196 */
197 cam_sim_free(sim2, /*free_devq*/FALSE);
198 goto fail;
199 }
200
201 if (xpt_create_path(&path2, /*periph*/NULL,
202 cam_sim_path(sim2),
203 CAM_TARGET_WILDCARD,
204 CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
205 xpt_bus_deregister(cam_sim_path(sim2));
206 cam_sim_free(sim2, /*free_devq*/FALSE);
207 sim2 = NULL;
208 goto fail;
209 }
210 xpt_setup_ccb(&csa.ccb_h, path2, /*priority*/5);
211 csa.ccb_h.func_code = XPT_SASYNC_CB;
212 csa.event_enable = AC_LOST_DEVICE;
213 csa.callback = ahc_async;
214 csa.callback_arg = sim2;
215 xpt_action((union ccb *)&csa);
216 count++;
217 }
218
219fail:
220 if ((ahc->flags & AHC_CHANNEL_B_PRIMARY) != 0) {
221 ahc->platform_data->sim_b = sim;
222 ahc->platform_data->path_b = path;
223 ahc->platform_data->sim = sim2;
224 ahc->platform_data->path = path2;
225 } else {
226 ahc->platform_data->sim = sim;
227 ahc->platform_data->path = path;
228 ahc->platform_data->sim_b = sim2;
229 ahc->platform_data->path_b = path2;
230 }
231 ahc_unlock(ahc, &s);
232
233 if (count != 0)
234 /* We have to wait until after any system dumps... */
235 EVENTHANDLER_REGISTER(shutdown_final, ahc_shutdown,
236 ahc, SHUTDOWN_PRI_DEFAULT);
237
238 return (count);
239}
240
241/*
242 * Catch an interrupt from the adapter
243 */
244void
245ahc_freebsd_intr(void *arg)
246{
247 struct ahc_softc *ahc;
248
249 ahc = (struct ahc_softc *)arg;
250 ahc_intr(ahc);
251}
252
253/*
254 * We have an scb which has been processed by the
255 * adaptor, now we look to see how the operation
256 * went.
257 */
258void
259ahc_done(struct ahc_softc *ahc, struct scb *scb)
260{
261 union ccb *ccb;
262
263 CAM_DEBUG(scb->io_ctx->ccb_h.path, CAM_DEBUG_TRACE,
264 ("ahc_done - scb %d\n", scb->hscb->tag));
265
266 ccb = scb->io_ctx;
267 LIST_REMOVE(scb, pending_links);
268 if (ccb->ccb_h.func_code == XPT_SCSI_IO
269 && ((ccb->ccb_h.flags & CAM_TAG_ACTION_VALID) == 0
270 || ccb->csio.tag_action == CAM_TAG_ACTION_NONE)
271 && (ahc->features & AHC_SCB_BTT) == 0) {
272 struct scb_tailq *untagged_q;
273
274 untagged_q = &ahc->untagged_queues[ccb->ccb_h.target_id];
275 TAILQ_REMOVE(untagged_q, scb, links.tqe);
276 ahc_run_untagged_queue(ahc, untagged_q);
277 }
278
279 untimeout(ahc_timeout, (caddr_t)scb, ccb->ccb_h.timeout_ch);
280
281 if ((ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE) {
282 bus_dmasync_op_t op;
283
284 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN)
285 op = BUS_DMASYNC_POSTREAD;
286 else
287 op = BUS_DMASYNC_POSTWRITE;
288 bus_dmamap_sync(ahc->buffer_dmat, scb->dmamap, op);
289 bus_dmamap_unload(ahc->buffer_dmat, scb->dmamap);
290 }
291
292 if (ccb->ccb_h.func_code == XPT_CONT_TARGET_IO) {
293 if (ahc_get_transaction_status(scb) == CAM_REQ_INPROG)
294 ccb->ccb_h.status |= CAM_REQ_CMP;
295 ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
296 ahc_free_scb(ahc, scb);
297 xpt_done(ccb);
298 return;
299 }
300
301 /*
302 * If the recovery SCB completes, we have to be
303 * out of our timeout.
304 */
305 if ((scb->flags & SCB_RECOVERY_SCB) != 0) {
306 struct scb *list_scb;
307
308 /*
309 * We were able to complete the command successfully,
310 * so reinstate the timeouts for all other pending
311 * commands.
312 */
313 LIST_FOREACH(list_scb, &ahc->pending_scbs, pending_links) {
314 union ccb *ccb;
315
316 ccb = list_scb->io_ctx;
317 ccb->ccb_h.timeout_ch =
318 timeout(ahc_timeout, list_scb,
319 (ccb->ccb_h.timeout * hz)/1000);
320 }
321
322 /*
323 * Ensure that we didn't put a second instance of this
324 * SCB into the QINFIFO.
325 */
326 ahc_search_qinfifo(ahc, SCB_GET_TARGET(ahc, scb),
327 SCB_GET_CHANNEL(ahc, scb),
328 SCB_GET_LUN(scb), scb->hscb->tag,
329 ROLE_INITIATOR, /*status*/0,
330 SEARCH_REMOVE);
331 if (ahc_get_transaction_status(scb) == CAM_BDR_SENT
332 || ahc_get_transaction_status(scb) == CAM_REQ_ABORTED)
333 ahc_set_transaction_status(scb, CAM_CMD_TIMEOUT);
334 ahc_print_path(ahc, scb);
335 printf("no longer in timeout, status = %x\n",
336 ccb->ccb_h.status);
337 }
338
339 /* Don't clobber any existing error state */
340 if (ahc_get_transaction_status(scb) == CAM_REQ_INPROG) {
341 ccb->ccb_h.status |= CAM_REQ_CMP;
342 } else if ((scb->flags & SCB_SENSE) != 0) {
343 /*
344 * We performed autosense retrieval.
345 *
346 * Zero any sense not transferred by the
347 * device. The SCSI spec mandates that any
348 * untransfered data should be assumed to be
349 * zero. Complete the 'bounce' of sense information
350 * through buffers accessible via bus-space by
351 * copying it into the clients csio.
352 */
353 memset(&ccb->csio.sense_data, 0, sizeof(ccb->csio.sense_data));
354 memcpy(&ccb->csio.sense_data,
355 &ahc->scb_data->sense[scb->hscb->tag],
356 (scb->sg_list->len & AHC_SG_LEN_MASK)
357 - ccb->csio.sense_resid);
358 scb->io_ctx->ccb_h.status |= CAM_AUTOSNS_VALID;
359 }
360 ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
361 ahc_free_scb(ahc, scb);
362 xpt_done(ccb);
363}
364
365static void
366ahc_action(struct cam_sim *sim, union ccb *ccb)
367{
368 struct ahc_softc *ahc;
369 struct tmode_lstate *lstate;
370 u_int target_id;
371 u_int our_id;
372 long s;
373
374 CAM_DEBUG(ccb->ccb_h.path, CAM_DEBUG_TRACE, ("ahc_action\n"));
375
376 ahc = (struct ahc_softc *)cam_sim_softc(sim);
377
378 target_id = ccb->ccb_h.target_id;
379 our_id = SIM_SCSI_ID(ahc, sim);
380
381 switch (ccb->ccb_h.func_code) {
382 /* Common cases first */
383 case XPT_ACCEPT_TARGET_IO: /* Accept Host Target Mode CDB */
384 case XPT_CONT_TARGET_IO:/* Continue Host Target I/O Connection*/
385 {
386 struct tmode_tstate *tstate;
387 cam_status status;
388
389 status = ahc_find_tmode_devs(ahc, sim, ccb, &tstate,
390 &lstate, TRUE);
391
392 if (status != CAM_REQ_CMP) {
393 if (ccb->ccb_h.func_code == XPT_CONT_TARGET_IO) {
394 /* Response from the black hole device */
395 tstate = NULL;
396 lstate = ahc->black_hole;
397 } else {
398 ccb->ccb_h.status = status;
399 xpt_done(ccb);
400 break;
401 }
402 }
403 if (ccb->ccb_h.func_code == XPT_ACCEPT_TARGET_IO) {
404
405 ahc_lock(ahc, &s);
406 SLIST_INSERT_HEAD(&lstate->accept_tios, &ccb->ccb_h,
407 sim_links.sle);
408 ccb->ccb_h.status = CAM_REQ_INPROG;
409 if ((ahc->flags & AHC_TQINFIFO_BLOCKED) != 0)
410 ahc_run_tqinfifo(ahc, /*paused*/FALSE);
411 ahc_unlock(ahc, &s);
412 break;
413 }
414
415 /*
416 * The target_id represents the target we attempt to
417 * select. In target mode, this is the initiator of
418 * the original command.
419 */
420 our_id = target_id;
421 target_id = ccb->csio.init_id;
422 /* FALLTHROUGH */
423 }
424 case XPT_SCSI_IO: /* Execute the requested I/O operation */
425 case XPT_RESET_DEV: /* Bus Device Reset the specified SCSI device */
426 {
427 struct scb *scb;
428 struct hardware_scb *hscb;
429
430 /*
431 * get an scb to use.
432 */
433 ahc_lock(ahc, &s);
434 if ((scb = ahc_get_scb(ahc)) == NULL) {
435
436 ahc->flags |= AHC_RESOURCE_SHORTAGE;
437 ahc_unlock(ahc, &s);
438 xpt_freeze_simq(sim, /*count*/1);
439 ccb->ccb_h.status = CAM_REQUEUE_REQ;
440 xpt_done(ccb);
441 return;
442 }
443 ahc_unlock(ahc, &s);
444
445 hscb = scb->hscb;
446
447 CAM_DEBUG(ccb->ccb_h.path, CAM_DEBUG_SUBTRACE,
448 ("start scb(%p)\n", scb));
449 scb->io_ctx = ccb;
450 /*
451 * So we can find the SCB when an abort is requested
452 */
453 ccb->ccb_h.ccb_scb_ptr = scb;
454 ccb->ccb_h.ccb_ahc_ptr = ahc;
455
456 /*
457 * Put all the arguments for the xfer in the scb
458 */
459 hscb->control = 0;
460 hscb->scsiid = BUILD_SCSIID(ahc, sim, target_id, our_id);
461 hscb->lun = ccb->ccb_h.target_lun;
462 if (ccb->ccb_h.func_code == XPT_RESET_DEV) {
463 hscb->cdb_len = 0;
464 scb->flags |= SCB_DEVICE_RESET;
465 hscb->control |= MK_MESSAGE;
466 ahc_execute_scb(scb, NULL, 0, 0);
467 } else {
468 if (ccb->ccb_h.func_code == XPT_CONT_TARGET_IO) {
469 struct target_data *tdata;
470
471 tdata = &hscb->shared_data.tdata;
472 if (ahc->pending_device == lstate) {
473 scb->flags |= SCB_TARGET_IMMEDIATE;
474 ahc->pending_device = NULL;
475 }
476 hscb->control |= TARGET_SCB;
477 tdata->target_phases = IDENTIFY_SEEN;
478 if ((ccb->ccb_h.flags & CAM_SEND_STATUS) != 0) {
479 tdata->target_phases |= SPHASE_PENDING;
480 tdata->scsi_status =
481 ccb->csio.scsi_status;
482 }
483 tdata->initiator_tag = ccb->csio.tag_id;
484 }
485 if (ccb->ccb_h.flags & CAM_TAG_ACTION_VALID)
486 hscb->control |= ccb->csio.tag_action;
487
488 ahc_setup_data(ahc, sim, &ccb->csio, scb);
489 }
490 break;
491 }
492 case XPT_NOTIFY_ACK:
493 case XPT_IMMED_NOTIFY:
494 {
495 struct tmode_tstate *tstate;
496 struct tmode_lstate *lstate;
497 cam_status status;
498
499 status = ahc_find_tmode_devs(ahc, sim, ccb, &tstate,
500 &lstate, TRUE);
501
502 if (status != CAM_REQ_CMP) {
503 ccb->ccb_h.status = status;
504 xpt_done(ccb);
505 break;
506 }
507 SLIST_INSERT_HEAD(&lstate->immed_notifies, &ccb->ccb_h,
508 sim_links.sle);
509 ccb->ccb_h.status = CAM_REQ_INPROG;
510 ahc_send_lstate_events(ahc, lstate);
511 break;
512 }
513 case XPT_EN_LUN: /* Enable LUN as a target */
514 ahc_handle_en_lun(ahc, sim, ccb);
515 xpt_done(ccb);
516 break;
517 case XPT_ABORT: /* Abort the specified CCB */
518 {
519 ahc_abort_ccb(ahc, sim, ccb);
520 break;
521 }
522 case XPT_SET_TRAN_SETTINGS:
523 {
524#ifdef AHC_NEW_TRAN_SETTINGS
525 struct ahc_devinfo devinfo;
526 struct ccb_trans_settings *cts;
527 struct ccb_trans_settings_scsi *scsi;
528 struct ccb_trans_settings_spi *spi;
529 struct ahc_initiator_tinfo *tinfo;
530 struct tmode_tstate *tstate;
531 uint16_t *discenable;
532 uint16_t *tagenable;
533 u_int update_type;
534
535 cts = &ccb->cts;
536 scsi = &cts->proto_specific.scsi;
537 spi = &cts->xport_specific.spi;
538 ahc_compile_devinfo(&devinfo, SIM_SCSI_ID(ahc, sim),
539 cts->ccb_h.target_id,
540 cts->ccb_h.target_lun,
541 SIM_CHANNEL(ahc, sim),
542 ROLE_UNKNOWN);
543 tinfo = ahc_fetch_transinfo(ahc, devinfo.channel,
544 devinfo.our_scsiid,
545 devinfo.target, &tstate);
546 update_type = 0;
547 if (cts->type == CTS_TYPE_CURRENT_SETTINGS) {
548 update_type |= AHC_TRANS_GOAL;
549 discenable = &tstate->discenable;
550 tagenable = &tstate->tagenable;
551 tinfo->current.protocol_version =
552 cts->protocol_version;
553 tinfo->current.transport_version =
554 cts->transport_version;
555 tinfo->goal.protocol_version =
556 cts->protocol_version;
557 tinfo->goal.transport_version =
558 cts->transport_version;
559 } else if (cts->type == CTS_TYPE_USER_SETTINGS) {
560 update_type |= AHC_TRANS_USER;
561 discenable = &ahc->user_discenable;
562 tagenable = &ahc->user_tagenable;
563 tinfo->user.protocol_version =
564 cts->protocol_version;
565 tinfo->user.transport_version =
566 cts->transport_version;
567 } else {
568 ccb->ccb_h.status = CAM_REQ_INVALID;
569 xpt_done(ccb);
570 break;
571 }
572
573 ahc_lock(ahc, &s);
574
575 if ((spi->valid & CTS_SPI_VALID_DISC) != 0) {
576 if ((spi->flags & CTS_SPI_FLAGS_DISC_ENB) != 0)
577 *discenable |= devinfo.target_mask;
578 else
579 *discenable &= ~devinfo.target_mask;
580 }
581
582 if ((scsi->valid & CTS_SCSI_VALID_TQ) != 0) {
583 if ((scsi->flags & CTS_SCSI_FLAGS_TAG_ENB) != 0)
584 *tagenable |= devinfo.target_mask;
585 else
586 *tagenable &= ~devinfo.target_mask;
587 }
588
589 if ((spi->valid & CTS_SPI_VALID_BUS_WIDTH) != 0) {
590 ahc_validate_width(ahc, &spi->bus_width);
591 ahc_set_width(ahc, &devinfo, spi->bus_width,
592 update_type, /*paused*/FALSE);
593 }
594
595 if ((spi->valid & CTS_SPI_VALID_PPR_OPTIONS) == 0) {
596 if (update_type == AHC_TRANS_USER)
597 spi->ppr_options = tinfo->user.ppr_options;
598 else
599 spi->ppr_options = tinfo->goal.ppr_options;
600 }
601
602 if ((spi->valid & CTS_SPI_VALID_SYNC_OFFSET) == 0) {
603 if (update_type == AHC_TRANS_USER)
604 spi->sync_offset = tinfo->user.offset;
605 else
606 spi->sync_offset = tinfo->goal.offset;
607 }
608
609 if ((spi->valid & CTS_SPI_VALID_SYNC_RATE) == 0) {
610 if (update_type == AHC_TRANS_USER)
611 spi->sync_period = tinfo->user.period;
612 else
613 spi->sync_period = tinfo->goal.period;
614 }
615
616 if (((spi->valid & CTS_SPI_VALID_SYNC_RATE) != 0)
617 || ((spi->valid & CTS_SPI_VALID_SYNC_OFFSET) != 0)) {
618 struct ahc_syncrate *syncrate;
619 u_int maxsync;
620
621 if ((ahc->features & AHC_ULTRA2) != 0)
622 maxsync = AHC_SYNCRATE_DT;
623 else if ((ahc->features & AHC_ULTRA) != 0)
624 maxsync = AHC_SYNCRATE_ULTRA;
625 else
626 maxsync = AHC_SYNCRATE_FAST;
627
628 syncrate = ahc_find_syncrate(ahc, &spi->sync_period,
629 &spi->ppr_options,
630 maxsync);
631 ahc_validate_offset(ahc, syncrate, &spi->sync_offset,
632 spi->bus_width);
633
634 /* We use a period of 0 to represent async */
635 if (spi->sync_offset == 0) {
636 spi->sync_period = 0;
637 spi->ppr_options = 0;
638 }
639
640 ahc_set_syncrate(ahc, &devinfo, syncrate,
641 spi->sync_period, spi->sync_offset,
642 spi->ppr_options, update_type,
643 /*paused*/FALSE);
644 }
645 ahc_unlock(ahc, &s);
646 ccb->ccb_h.status = CAM_REQ_CMP;
647 xpt_done(ccb);
648#else
649 struct ahc_devinfo devinfo;
650 struct ccb_trans_settings *cts;
651 struct ahc_initiator_tinfo *tinfo;
652 struct tmode_tstate *tstate;
653 uint16_t *discenable;
654 uint16_t *tagenable;
655 u_int update_type;
656 long s;
657
658 cts = &ccb->cts;
659 ahc_compile_devinfo(&devinfo, SIM_SCSI_ID(ahc, sim),
660 cts->ccb_h.target_id,
661 cts->ccb_h.target_lun,
662 SIM_CHANNEL(ahc, sim),
663 ROLE_UNKNOWN);
664 tinfo = ahc_fetch_transinfo(ahc, devinfo.channel,
665 devinfo.our_scsiid,
666 devinfo.target, &tstate);
667 update_type = 0;
668 if ((cts->flags & CCB_TRANS_CURRENT_SETTINGS) != 0) {
669 update_type |= AHC_TRANS_GOAL;
670 discenable = &tstate->discenable;
671 tagenable = &tstate->tagenable;
672 } else if ((cts->flags & CCB_TRANS_USER_SETTINGS) != 0) {
673 update_type |= AHC_TRANS_USER;
674 discenable = &ahc->user_discenable;
675 tagenable = &ahc->user_tagenable;
676 } else {
677 ccb->ccb_h.status = CAM_REQ_INVALID;
678 xpt_done(ccb);
679 break;
680 }
681
682 ahc_lock(ahc, &s);
683
684 if ((cts->valid & CCB_TRANS_DISC_VALID) != 0) {
685 if ((cts->flags & CCB_TRANS_DISC_ENB) != 0)
686 *discenable |= devinfo.target_mask;
687 else
688 *discenable &= ~devinfo.target_mask;
689 }
690
691 if ((cts->valid & CCB_TRANS_TQ_VALID) != 0) {
692 if ((cts->flags & CCB_TRANS_TAG_ENB) != 0)
693 *tagenable |= devinfo.target_mask;
694 else
695 *tagenable &= ~devinfo.target_mask;
696 }
697
698 if ((cts->valid & CCB_TRANS_BUS_WIDTH_VALID) != 0) {
699 ahc_validate_width(ahc, &cts->bus_width);
700 ahc_set_width(ahc, &devinfo, cts->bus_width,
701 update_type, /*paused*/FALSE);
702 }
703
704 if ((cts->valid & CCB_TRANS_SYNC_OFFSET_VALID) == 0) {
705 if (update_type == AHC_TRANS_USER)
706 cts->sync_offset = tinfo->user.offset;
707 else
708 cts->sync_offset = tinfo->goal.offset;
709 }
710
711 if ((cts->valid & CCB_TRANS_SYNC_RATE_VALID) == 0) {
712 if (update_type == AHC_TRANS_USER)
713 cts->sync_period = tinfo->user.period;
714 else
715 cts->sync_period = tinfo->goal.period;
716 }
717
718 if (((cts->valid & CCB_TRANS_SYNC_RATE_VALID) != 0)
719 || ((cts->valid & CCB_TRANS_SYNC_OFFSET_VALID) != 0)) {
720 struct ahc_syncrate *syncrate;
721 u_int ppr_options;
722 u_int maxsync;
723
724 if ((ahc->features & AHC_ULTRA2) != 0)
725 maxsync = AHC_SYNCRATE_DT;
726 else if ((ahc->features & AHC_ULTRA) != 0)
727 maxsync = AHC_SYNCRATE_ULTRA;
728 else
729 maxsync = AHC_SYNCRATE_FAST;
730
731 ppr_options = 0;
732 if (cts->sync_period <= 9)
733 ppr_options = MSG_EXT_PPR_DT_REQ;
734
735 syncrate = ahc_find_syncrate(ahc, &cts->sync_period,
736 &ppr_options,
737 maxsync);
738 ahc_validate_offset(ahc, syncrate, &cts->sync_offset,
739 MSG_EXT_WDTR_BUS_8_BIT);
740
741 /* We use a period of 0 to represent async */
742 if (cts->sync_offset == 0) {
743 cts->sync_period = 0;
744 ppr_options = 0;
745 }
746
747 if (ppr_options == MSG_EXT_PPR_DT_REQ
748 && tinfo->user.transport_version >= 3) {
749 tinfo->goal.transport_version =
750 tinfo->user.transport_version;
751 tinfo->current.transport_version =
752 tinfo->user.transport_version;
753 }
754
755 ahc_set_syncrate(ahc, &devinfo, syncrate,
756 cts->sync_period, cts->sync_offset,
757 ppr_options, update_type,
758 /*paused*/FALSE);
759 }
760 ahc_unlock(ahc, &s);
761 ccb->ccb_h.status = CAM_REQ_CMP;
762 xpt_done(ccb);
763#endif
764 break;
765 }
766 case XPT_GET_TRAN_SETTINGS:
767 /* Get default/user set transfer settings for the target */
768 {
769
770 ahc_lock(ahc, &s);
771 ahc_get_tran_settings(ahc, SIM_SCSI_ID(ahc, sim),
772 SIM_CHANNEL(ahc, sim), &ccb->cts);
773 ahc_unlock(ahc, &s);
774 xpt_done(ccb);
775 break;
776 }
777 case XPT_CALC_GEOMETRY:
778 {
779 struct ccb_calc_geometry *ccg;
780 uint32_t size_mb;
781 uint32_t secs_per_cylinder;
782 int extended;
783
784 ccg = &ccb->ccg;
785 size_mb = ccg->volume_size
786 / ((1024L * 1024L) / ccg->block_size);
787 extended = SIM_IS_SCSIBUS_B(ahc, sim)
788 ? ahc->flags & AHC_EXTENDED_TRANS_B
789 : ahc->flags & AHC_EXTENDED_TRANS_A;
790
791 if (size_mb > 1024 && extended) {
792 ccg->heads = 255;
793 ccg->secs_per_track = 63;
794 } else {
795 ccg->heads = 64;
796 ccg->secs_per_track = 32;
797 }
798 secs_per_cylinder = ccg->heads * ccg->secs_per_track;
799 ccg->cylinders = ccg->volume_size / secs_per_cylinder;
800 ccb->ccb_h.status = CAM_REQ_CMP;
801 xpt_done(ccb);
802 break;
803 }
804 case XPT_RESET_BUS: /* Reset the specified SCSI bus */
805 {
806 int found;
807
808 ahc_lock(ahc, &s);
809 found = ahc_reset_channel(ahc, SIM_CHANNEL(ahc, sim),
810 /*initiate reset*/TRUE);
811 ahc_unlock(ahc, &s);
812 if (bootverbose) {
813 xpt_print_path(SIM_PATH(ahc, sim));
814 printf("SCSI bus reset delivered. "
815 "%d SCBs aborted.\n", found);
816 }
817 ccb->ccb_h.status = CAM_REQ_CMP;
818 xpt_done(ccb);
819 break;
820 }
821 case XPT_TERM_IO: /* Terminate the I/O process */
822 /* XXX Implement */
823 ccb->ccb_h.status = CAM_REQ_INVALID;
824 xpt_done(ccb);
825 break;
826 case XPT_PATH_INQ: /* Path routing inquiry */
827 {
828 struct ccb_pathinq *cpi = &ccb->cpi;
829
830 cpi->version_num = 1; /* XXX??? */
831 cpi->hba_inquiry = PI_SDTR_ABLE|PI_TAG_ABLE;
832 if ((ahc->features & AHC_WIDE) != 0)
833 cpi->hba_inquiry |= PI_WIDE_16;
834 if ((ahc->flags & AHC_TARGETMODE) != 0) {
835 cpi->target_sprt = PIT_PROCESSOR
836 | PIT_DISCONNECT
837 | PIT_TERM_IO;
838 } else {
839 cpi->target_sprt = 0;
840 }
841 cpi->hba_misc = (ahc->flags & AHC_INITIATORMODE)
842 ? 0 : PIM_NOINITIATOR;
843 cpi->hba_eng_cnt = 0;
844 cpi->max_target = (ahc->features & AHC_WIDE) ? 15 : 7;
845 cpi->max_lun = 64;
846 if (SIM_IS_SCSIBUS_B(ahc, sim)) {
847 cpi->initiator_id = ahc->our_id_b;
848 if ((ahc->flags & AHC_RESET_BUS_B) == 0)
849 cpi->hba_misc |= PIM_NOBUSRESET;
850 } else {
851 cpi->initiator_id = ahc->our_id;
852 if ((ahc->flags & AHC_RESET_BUS_A) == 0)
853 cpi->hba_misc |= PIM_NOBUSRESET;
854 }
855 cpi->bus_id = cam_sim_bus(sim);
856 cpi->base_transfer_speed = 3300;
857 strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
858 strncpy(cpi->hba_vid, "Adaptec", HBA_IDLEN);
859 strncpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN);
860 cpi->unit_number = cam_sim_unit(sim);
861#ifdef AHC_NEW_TRAN_SETTINGS
862 cpi->protocol = PROTO_SCSI;
863 cpi->protocol_version = SCSI_REV_2;
864 cpi->transport = XPORT_SPI;
865 cpi->transport_version = 2;
866 cpi->xport_specific.spi.ppr_options = SID_SPI_CLOCK_ST;
867 if ((ahc->features & AHC_DT) != 0) {
868 cpi->transport_version = 3;
869 cpi->xport_specific.spi.ppr_options =
870 SID_SPI_CLOCK_DT_ST;
871 }
872#endif
873 cpi->ccb_h.status = CAM_REQ_CMP;
874 xpt_done(ccb);
875 break;
876 }
877 default:
878 ccb->ccb_h.status = CAM_REQ_INVALID;
879 xpt_done(ccb);
880 break;
881 }
882}
883
884static void
885ahc_get_tran_settings(struct ahc_softc *ahc, int our_id, char channel,
886 struct ccb_trans_settings *cts)
887{
888#ifdef AHC_NEW_TRAN_SETTINGS
889 struct ahc_devinfo devinfo;
890 struct ccb_trans_settings_scsi *scsi;
891 struct ccb_trans_settings_spi *spi;
892 struct ahc_initiator_tinfo *targ_info;
893 struct tmode_tstate *tstate;
894 struct ahc_transinfo *tinfo;
895
896 scsi = &cts->proto_specific.scsi;
897 spi = &cts->xport_specific.spi;
898 ahc_compile_devinfo(&devinfo, our_id,
899 cts->ccb_h.target_id,
900 cts->ccb_h.target_lun,
901 channel, ROLE_UNKNOWN);
902 targ_info = ahc_fetch_transinfo(ahc, devinfo.channel,
903 devinfo.our_scsiid,
904 devinfo.target, &tstate);
905
906 if (cts->type == CTS_TYPE_CURRENT_SETTINGS)
907 tinfo = &targ_info->current;
908 else
909 tinfo = &targ_info->user;
910
911 scsi->flags &= ~CTS_SCSI_FLAGS_TAG_ENB;
912 spi->flags &= ~CTS_SPI_FLAGS_DISC_ENB;
913 if (cts->type == CTS_TYPE_USER_SETTINGS) {
914 if ((ahc->user_discenable & devinfo.target_mask) != 0)
915 spi->flags |= CTS_SPI_FLAGS_DISC_ENB;
916
917 if ((ahc->user_tagenable & devinfo.target_mask) != 0)
918 scsi->flags |= CTS_SCSI_FLAGS_TAG_ENB;
919 } else {
920 if ((tstate->discenable & devinfo.target_mask) != 0)
921 spi->flags |= CTS_SPI_FLAGS_DISC_ENB;
922
923 if ((tstate->tagenable & devinfo.target_mask) != 0)
924 scsi->flags |= CTS_SCSI_FLAGS_TAG_ENB;
925 }
926 cts->protocol_version = tinfo->protocol_version;
927 cts->transport_version = tinfo->transport_version;
928
929 spi->sync_period = tinfo->period;
930 spi->sync_offset = tinfo->offset;
931 spi->bus_width = tinfo->width;
932 spi->ppr_options = tinfo->ppr_options;
933
934 cts->protocol = PROTO_SCSI;
935 cts->transport = XPORT_SPI;
936 spi->valid = CTS_SPI_VALID_SYNC_RATE
937 | CTS_SPI_VALID_SYNC_OFFSET
938 | CTS_SPI_VALID_BUS_WIDTH
939 | CTS_SPI_VALID_PPR_OPTIONS;
940
941 if (cts->ccb_h.target_lun != CAM_LUN_WILDCARD) {
942 scsi->valid = CTS_SCSI_VALID_TQ;
943 spi->valid |= CTS_SPI_VALID_DISC;
944 } else {
945 scsi->valid = 0;
946 }
947
948 cts->ccb_h.status = CAM_REQ_CMP;
949#else
950 struct ahc_devinfo devinfo;
951 struct ahc_initiator_tinfo *targ_info;
952 struct tmode_tstate *tstate;
953 struct ahc_transinfo *tinfo;
954 long s;
955
956 ahc_compile_devinfo(&devinfo, our_id,
957 cts->ccb_h.target_id,
958 cts->ccb_h.target_lun,
959 channel, ROLE_UNKNOWN);
960 targ_info = ahc_fetch_transinfo(ahc, devinfo.channel,
961 devinfo.our_scsiid,
962 devinfo.target, &tstate);
963
964 if ((cts->flags & CCB_TRANS_CURRENT_SETTINGS) != 0)
965 tinfo = &targ_info->current;
966 else
967 tinfo = &targ_info->user;
968
969 ahc_lock(ahc, &s);
970
971 cts->flags &= ~(CCB_TRANS_DISC_ENB|CCB_TRANS_TAG_ENB);
972 if ((cts->flags & CCB_TRANS_CURRENT_SETTINGS) == 0) {
973 if ((ahc->user_discenable & devinfo.target_mask) != 0)
974 cts->flags |= CCB_TRANS_DISC_ENB;
975
976 if ((ahc->user_tagenable & devinfo.target_mask) != 0)
977 cts->flags |= CCB_TRANS_TAG_ENB;
978 } else {
979 if ((tstate->discenable & devinfo.target_mask) != 0)
980 cts->flags |= CCB_TRANS_DISC_ENB;
981
982 if ((tstate->tagenable & devinfo.target_mask) != 0)
983 cts->flags |= CCB_TRANS_TAG_ENB;
984 }
985 cts->sync_period = tinfo->period;
986 cts->sync_offset = tinfo->offset;
987 cts->bus_width = tinfo->width;
988
989 ahc_unlock(ahc, &s);
990
991 cts->valid = CCB_TRANS_SYNC_RATE_VALID
992 | CCB_TRANS_SYNC_OFFSET_VALID
993 | CCB_TRANS_BUS_WIDTH_VALID;
994
995 if (cts->ccb_h.target_lun != CAM_LUN_WILDCARD)
996 cts->valid |= CCB_TRANS_DISC_VALID|CCB_TRANS_TQ_VALID;
997
998 cts->ccb_h.status = CAM_REQ_CMP;
999#endif
1000}
1001
1002static void
1003ahc_async(void *callback_arg, uint32_t code, struct cam_path *path, void *arg)
1004{
1005 struct ahc_softc *ahc;
1006 struct cam_sim *sim;
1007
1008 sim = (struct cam_sim *)callback_arg;
1009 ahc = (struct ahc_softc *)cam_sim_softc(sim);
1010 switch (code) {
1011 case AC_LOST_DEVICE:
1012 {
1013 struct ahc_devinfo devinfo;
1014 long s;
1015
1016 ahc_compile_devinfo(&devinfo, SIM_SCSI_ID(ahc, sim),
1017 xpt_path_target_id(path),
1018 xpt_path_lun_id(path),
1019 SIM_CHANNEL(ahc, sim),
1020 ROLE_UNKNOWN);
1021
1022 /*
1023 * Revert to async/narrow transfers
1024 * for the next device.
1025 */
1026 ahc_lock(ahc, &s);
1027 ahc_set_width(ahc, &devinfo, MSG_EXT_WDTR_BUS_8_BIT,
1028 AHC_TRANS_GOAL|AHC_TRANS_CUR, /*paused*/FALSE);
1029 ahc_set_syncrate(ahc, &devinfo, /*syncrate*/NULL,
1030 /*period*/0, /*offset*/0, /*ppr_options*/0,
1031 AHC_TRANS_GOAL|AHC_TRANS_CUR,
1032 /*paused*/FALSE);
1033 ahc_unlock(ahc, &s);
1034 break;
1035 }
1036 default:
1037 break;
1038 }
1039}
1040
1041static void
1042ahc_execute_scb(void *arg, bus_dma_segment_t *dm_segs, int nsegments,
1043 int error)
1044{
1045 struct scb *scb;
1046 union ccb *ccb;
1047 struct ahc_softc *ahc;
1048 struct ahc_initiator_tinfo *tinfo;
1049 struct tmode_tstate *tstate;
1050 u_int mask;
1051 long s;
1052
1053 scb = (struct scb *)arg;
1054 ccb = scb->io_ctx;
1055 ahc = (struct ahc_softc *)ccb->ccb_h.ccb_ahc_ptr;
1056
1057 if (error != 0) {
1058 if (error == EFBIG)
1059 ahc_set_transaction_status(scb, CAM_REQ_TOO_BIG);
1060 else
1061 ahc_set_transaction_status(scb, CAM_REQ_CMP_ERR);
1062 if (nsegments != 0)
1063 bus_dmamap_unload(ahc->buffer_dmat, scb->dmamap);
1064 ahc_lock(ahc, &s);
1065 ahc_free_scb(ahc, scb);
1066 ahc_unlock(ahc, &s);
1067 xpt_done(ccb);
1068 return;
1069 }
1070 if (nsegments != 0) {
1071 struct ahc_dma_seg *sg;
1072 bus_dma_segment_t *end_seg;
1073 bus_dmasync_op_t op;
1074
1075 end_seg = dm_segs + nsegments;
1076
1077 /* Copy the segments into our SG list */
1078 sg = scb->sg_list;
1079 while (dm_segs < end_seg) {
1080 sg->addr = dm_segs->ds_addr;
1081/* XXX Add in the 5th byte of the address later. */
1082 sg->len = dm_segs->ds_len;
1083 sg++;
1084 dm_segs++;
1085 }
1086
1087 /*
1088 * Note where to find the SG entries in bus space.
1089 * We also set the full residual flag which the
1090 * sequencer will clear as soon as a data transfer
1091 * occurs.
1092 */
1093 scb->hscb->sgptr = scb->sg_list_phys | SG_FULL_RESID;
1094
1095 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN)
1096 op = BUS_DMASYNC_PREREAD;
1097 else
1098 op = BUS_DMASYNC_PREWRITE;
1099
1100 bus_dmamap_sync(ahc->buffer_dmat, scb->dmamap, op);
1101
1102 if (ccb->ccb_h.func_code == XPT_CONT_TARGET_IO) {
1103 struct target_data *tdata;
1104
1105 tdata = &scb->hscb->shared_data.tdata;
1106 tdata->target_phases |= DPHASE_PENDING;
1107 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT)
1108 tdata->data_phase = P_DATAOUT;
1109 else
1110 tdata->data_phase = P_DATAIN;
1111
1112 /*
1113 * If the transfer is of an odd length and in the
1114 * "in" direction (scsi->HostBus), then it may
1115 * trigger a bug in the 'WideODD' feature of
1116 * non-Ultra2 chips. Force the total data-length
1117 * to be even by adding an extra, 1 byte, SG,
1118 * element. We do this even if we are not currently
1119 * negotiated wide as negotiation could occur before
1120 * this command is executed.
1121 */
1122 if ((ahc->bugs & AHC_TMODE_WIDEODD_BUG) != 0
1123 && (ccb->csio.dxfer_len & 0x1) != 0
1124 && (ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
1125
1126 nsegments++;
1127 if (nsegments > AHC_NSEG) {
1128
1129 ahc_set_transaction_status(scb,
1130 CAM_REQ_TOO_BIG);
1131 bus_dmamap_unload(ahc->buffer_dmat,
1132 scb->dmamap);
1133 ahc_lock(ahc, &s);
1134 ahc_free_scb(ahc, scb);
1135 ahc_unlock(ahc, &s);
1136 xpt_done(ccb);
1137 return;
1138 }
1139 sg->addr = ahc->dma_bug_buf;
1140 sg->len = 1;
1141 sg++;
1142 }
1143 }
1144 sg--;
1145 sg->len |= AHC_DMA_LAST_SEG;
1146
1147 /* Copy the first SG into the "current" data pointer area */
1148 scb->hscb->dataptr = scb->sg_list->addr;
1149 scb->hscb->datacnt = scb->sg_list->len;
1150 } else {
1151 scb->hscb->sgptr = SG_LIST_NULL;
1152 scb->hscb->dataptr = 0;
1153 scb->hscb->datacnt = 0;
1154 }
1155
1156 scb->sg_count = nsegments;
1157
1158 ahc_lock(ahc, &s);
1159
1160 /*
1161 * Last time we need to check if this SCB needs to
1162 * be aborted.
1163 */
1164 if (ahc_get_transaction_status(scb) != CAM_REQ_INPROG) {
1165 if (nsegments != 0)
1166 bus_dmamap_unload(ahc->buffer_dmat,
1167 scb->dmamap);
1168 ahc_free_scb(ahc, scb);
1169 ahc_unlock(ahc, &s);
1170 xpt_done(ccb);
1171 return;
1172 }
1173
1174 tinfo = ahc_fetch_transinfo(ahc, SCSIID_CHANNEL(ahc, scb->hscb->scsiid),
1175 SCSIID_OUR_ID(scb->hscb->scsiid),
1176 ccb->ccb_h.target_id, &tstate);
1177
1178 mask = SCB_GET_TARGET_MASK(ahc, scb);
1179 scb->hscb->scsirate = tinfo->scsirate;
1180 scb->hscb->scsioffset = tinfo->current.offset;
1181 if ((tstate->ultraenb & mask) != 0)
1182 scb->hscb->control |= ULTRAENB;
1183
1184 if ((tstate->discenable & mask) != 0
1185 && (ccb->ccb_h.flags & CAM_DIS_DISCONNECT) == 0)
1186 scb->hscb->control |= DISCENB;
1187
1188 if ((ccb->ccb_h.flags & CAM_NEGOTIATE) != 0
1189 && (tinfo->current.width != 0 || tinfo->current.period != 0)) {
1190 scb->flags |= SCB_NEGOTIATE;
1191 scb->hscb->control |= MK_MESSAGE;
1192 }
1193
1194 LIST_INSERT_HEAD(&ahc->pending_scbs, scb, pending_links);
1195
1196 ccb->ccb_h.status |= CAM_SIM_QUEUED;
1197
1198 if (ccb->ccb_h.timeout != CAM_TIME_INFINITY) {
1199 if (ccb->ccb_h.timeout == CAM_TIME_DEFAULT)
1200 ccb->ccb_h.timeout = 5 * 1000;
1201 ccb->ccb_h.timeout_ch =
1202 timeout(ahc_timeout, (caddr_t)scb,
1203 (ccb->ccb_h.timeout * hz) / 1000);
1204 }
1205
1206 /*
1207 * We only allow one untagged transaction
1208 * per target in the initiator role unless
1209 * we are storing a full busy target *lun*
1210 * table in SCB space.
1211 */
1212 if ((scb->hscb->control & (TARGET_SCB|TAG_ENB)) == 0
1213 && (ahc->features & AHC_SCB_BTT) == 0) {
1214 struct scb_tailq *untagged_q;
1215
1216 untagged_q = &(ahc->untagged_queues[ccb->ccb_h.target_id]);
1217 TAILQ_INSERT_TAIL(untagged_q, scb, links.tqe);
1218 if (TAILQ_FIRST(untagged_q) != scb) {
1219 ahc_unlock(ahc, &s);
1220 return;
1221 }
1222 }
1223 scb->flags |= SCB_ACTIVE;
1224
1225 if ((scb->flags & SCB_TARGET_IMMEDIATE) != 0) {
1226 pause_sequencer(ahc);
1227 if ((ahc->flags & AHC_PAGESCBS) == 0)
1228 ahc_outb(ahc, SCBPTR, scb->hscb->tag);
1229 ahc_outb(ahc, SCB_TAG, scb->hscb->tag);
1230 ahc_outb(ahc, RETURN_1, CONT_MSG_LOOP);
1231 unpause_sequencer(ahc);
1232 } else {
1233 ahc_queue_scb(ahc, scb);
1234 }
1235
1236 ahc_unlock(ahc, &s);
1237}
1238
1239static void
1240ahc_poll(struct cam_sim *sim)
1241{
1242 ahc_intr(cam_sim_softc(sim));
1243}
1244
1245static void
1246ahc_setup_data(struct ahc_softc *ahc, struct cam_sim *sim,
1247 struct ccb_scsiio *csio, struct scb *scb)
1248{
1249 struct hardware_scb *hscb;
1250 struct ccb_hdr *ccb_h;
1251
1252 hscb = scb->hscb;
1253 ccb_h = &csio->ccb_h;
1254
1255 if (ccb_h->func_code == XPT_SCSI_IO) {
1256 hscb->cdb_len = csio->cdb_len;
1257 if ((ccb_h->flags & CAM_CDB_POINTER) != 0) {
1258
1259 if (hscb->cdb_len > sizeof(hscb->cdb32)
1260 || (ccb_h->flags & CAM_CDB_PHYS) != 0) {
1261 u_long s;
1262
1263 ahc_set_transaction_status(scb,
1264 CAM_REQ_INVALID);
1265 ahc_lock(ahc, &s);
1266 ahc_free_scb(ahc, scb);
1267 ahc_unlock(ahc, &s);
1268 xpt_done((union ccb *)csio);
1269 return;
1270 }
1271 if (hscb->cdb_len > 12) {
1272 memcpy(hscb->cdb32,
1273 csio->cdb_io.cdb_ptr,
1274 hscb->cdb_len);
1275 hscb->shared_data.cdb_ptr = scb->cdb32_busaddr;
1276 } else {
1277 memcpy(hscb->shared_data.cdb,
1278 csio->cdb_io.cdb_ptr,
1279 hscb->cdb_len);
1280 }
1281 } else {
1282 if (hscb->cdb_len > 12) {
1283 memcpy(hscb->cdb32, csio->cdb_io.cdb_bytes,
1284 hscb->cdb_len);
1285 hscb->shared_data.cdb_ptr = scb->cdb32_busaddr;
1286 } else {
1287 memcpy(hscb->shared_data.cdb,
1288 csio->cdb_io.cdb_bytes,
1289 hscb->cdb_len);
1290 }
1291 }
1292 }
1293
1294 /* Only use S/G if there is a transfer */
1295 if ((ccb_h->flags & CAM_DIR_MASK) != CAM_DIR_NONE) {
1296 if ((ccb_h->flags & CAM_SCATTER_VALID) == 0) {
1297 /* We've been given a pointer to a single buffer */
1298 if ((ccb_h->flags & CAM_DATA_PHYS) == 0) {
1299 int s;
1300 int error;
1301
1302 s = splsoftvm();
1303 error = bus_dmamap_load(ahc->buffer_dmat,
1304 scb->dmamap,
1305 csio->data_ptr,
1306 csio->dxfer_len,
1307 ahc_execute_scb,
1308 scb, /*flags*/0);
1309 if (error == EINPROGRESS) {
1310 /*
1311 * So as to maintain ordering,
1312 * freeze the controller queue
1313 * until our mapping is
1314 * returned.
1315 */
1316 xpt_freeze_simq(sim,
1317 /*count*/1);
1318 scb->io_ctx->ccb_h.status |=
1319 CAM_RELEASE_SIMQ;
1320 }
1321 splx(s);
1322 } else {
1323 struct bus_dma_segment seg;
1324
1325 /* Pointer to physical buffer */
1326 if (csio->dxfer_len > AHC_MAXTRANSFER_SIZE)
1327 panic("ahc_setup_data - Transfer size "
1328 "larger than can device max");
1329
1330 seg.ds_addr = (bus_addr_t)csio->data_ptr;
1331 seg.ds_len = csio->dxfer_len;
1332 ahc_execute_scb(scb, &seg, 1, 0);
1333 }
1334 } else {
1335 struct bus_dma_segment *segs;
1336
1337 if ((ccb_h->flags & CAM_DATA_PHYS) != 0)
1338 panic("ahc_setup_data - Physical segment "
1339 "pointers unsupported");
1340
1341 if ((ccb_h->flags & CAM_SG_LIST_PHYS) == 0)
1342 panic("ahc_setup_data - Virtual segment "
1343 "addresses unsupported");
1344
1345 /* Just use the segments provided */
1346 segs = (struct bus_dma_segment *)csio->data_ptr;
1347 ahc_execute_scb(scb, segs, csio->sglist_cnt, 0);
1348 }
1349 } else {
1350 ahc_execute_scb(scb, NULL, 0, 0);
1351 }
1352}
1353
1354static void
1355ahc_set_recoveryscb(struct ahc_softc *ahc, struct scb *scb) {
1356
1357 if ((scb->flags & SCB_RECOVERY_SCB) == 0) {
1358 struct scb *list_scb;
1359
1360 scb->flags |= SCB_RECOVERY_SCB;
1361
1362 /*
1363 * Take all queued, but not sent SCBs out of the equation.
1364 * Also ensure that no new CCBs are queued to us while we
1365 * try to fix this problem.
1366 */
1367 if ((scb->io_ctx->ccb_h.status & CAM_RELEASE_SIMQ) == 0) {
1368 xpt_freeze_simq(SCB_GET_SIM(ahc, scb), /*count*/1);
1369 scb->io_ctx->ccb_h.status |= CAM_RELEASE_SIMQ;
1370 }
1371
1372 /*
1373 * Go through all of our pending SCBs and remove
1374 * any scheduled timeouts for them. We will reschedule
1375 * them after we've successfully fixed this problem.
1376 */
1377 LIST_FOREACH(list_scb, &ahc->pending_scbs, pending_links) {
1378 union ccb *ccb;
1379
1380 ccb = list_scb->io_ctx;
1381 untimeout(ahc_timeout, list_scb, ccb->ccb_h.timeout_ch);
1382 }
1383 }
1384}
1385
1386void
1387ahc_timeout(void *arg)
1388{
1389 struct scb *scb;
1390 struct ahc_softc *ahc;
1391 long s;
1392 int found;
1393 u_int last_phase;
1394 int target;
1395 int lun;
1396 int i;
1397 char channel;
1398
1399 scb = (struct scb *)arg;
1400 ahc = (struct ahc_softc *)scb->io_ctx->ccb_h.ccb_ahc_ptr;
1401
1402 ahc_lock(ahc, &s);
1403
1404 /*
1405 * Ensure that the card doesn't do anything
1406 * behind our back. Also make sure that we
1407 * didn't "just" miss an interrupt that would
1408 * affect this timeout.
1409 */
1410 do {
1411 ahc_intr(ahc);
1412 pause_sequencer(ahc);
1413 } while (ahc_inb(ahc, INTSTAT) & INT_PEND);
1414
1415 /* Make sure the sequencer is in a safe location. */
1416 ahc_clear_critical_section(ahc);
1417
1418 ahc_print_path(ahc, scb);
1419 if ((scb->flags & SCB_ACTIVE) == 0) {
1420 /* Previous timeout took care of me already */
1421 printf("Timedout SCB %d handled by another timeout\n",
1422 scb->hscb->tag);
1423 unpause_sequencer(ahc);
1424 ahc_unlock(ahc, &s);
1425 return;
1426 }
1427
1428 target = SCB_GET_TARGET(ahc, scb);
1429 channel = SCB_GET_CHANNEL(ahc, scb);
1430 lun = SCB_GET_LUN(scb);
1431
1432 printf("SCB 0x%x - timed out ", scb->hscb->tag);
1433 /*
1434 * Take a snapshot of the bus state and print out
1435 * some information so we can track down driver bugs.
1436 */
1437 last_phase = ahc_inb(ahc, LASTPHASE);
1438
1439 for (i = 0; i < num_phases; i++) {
1440 if (last_phase == phase_table[i].phase)
1441 break;
1442 }
1443 printf("%s", phase_table[i].phasemsg);
1444
1445 printf(", SEQADDR == 0x%x\n",
1446 ahc_inb(ahc, SEQADDR0) | (ahc_inb(ahc, SEQADDR1) << 8));
1447
1448 if (scb->sg_count > 0) {
1449 for (i = 0; i < scb->sg_count; i++) {
1450 printf("sg[%d] - Addr 0x%x : Length %d\n",
1451 i,
1452 scb->sg_list[i].addr,
1453 scb->sg_list[i].len & AHC_SG_LEN_MASK);
1454 }
1455 }
1456 if (scb->flags & (SCB_DEVICE_RESET|SCB_ABORT)) {
1457 /*
1458 * Been down this road before.
1459 * Do a full bus reset.
1460 */
1461bus_reset:
1462 ahc_set_transaction_status(scb, CAM_CMD_TIMEOUT);
1463 found = ahc_reset_channel(ahc, channel, /*Initiate Reset*/TRUE);
1464 printf("%s: Issued Channel %c Bus Reset. "
1465 "%d SCBs aborted\n", ahc_name(ahc), channel, found);
1466 } else {
1467 /*
1468 * If we are a target, transition to bus free and report
1469 * the timeout.
1470 *
1471 * The target/initiator that is holding up the bus may not
1472 * be the same as the one that triggered this timeout
1473 * (different commands have different timeout lengths).
1474 * If the bus is idle and we are actiing as the initiator
1475 * for this request, queue a BDR message to the timed out
1476 * target. Otherwise, if the timed out transaction is
1477 * active:
1478 * Initiator transaction:
1479 * Stuff the message buffer with a BDR message and assert
1480 * ATN in the hopes that the target will let go of the bus
1481 * and go to the mesgout phase. If this fails, we'll
1482 * get another timeout 2 seconds later which will attempt
1483 * a bus reset.
1484 *
1485 * Target transaction:
1486 * Transition to BUS FREE and report the error.
1487 * It's good to be the target!
1488 */
1489 u_int active_scb_index;
1490
1491 active_scb_index = ahc_inb(ahc, SCB_TAG);
1492
1493 if (last_phase != P_BUSFREE
1494 && (active_scb_index < ahc->scb_data->numscbs)) {
1495 struct scb *active_scb;
1496
1497 /*
1498 * If the active SCB is not from our device,
1499 * assume that another device is hogging the bus
1500 * and wait for it's timeout to expire before
1501 * taking additional action.
1502 */
1503 active_scb = ahc_lookup_scb(ahc, active_scb_index);
1504 if (active_scb->hscb->scsiid != scb->hscb->scsiid
1505 || active_scb->hscb->lun != scb->hscb->lun) {
1506 struct ccb_hdr *ccbh;
1507 u_int newtimeout;
1508
1509 ahc_print_path(ahc, scb);
1510 printf("Other SCB Timeout%s",
1511 (scb->flags & SCB_OTHERTCL_TIMEOUT) != 0
1512 ? " again\n" : "\n");
1513 scb->flags |= SCB_OTHERTCL_TIMEOUT;
1514 newtimeout =
1515 MAX(active_scb->io_ctx->ccb_h.timeout,
1516 scb->io_ctx->ccb_h.timeout);
1517 ccbh = &scb->io_ctx->ccb_h;
1518 scb->io_ctx->ccb_h.timeout_ch =
1519 timeout(ahc_timeout, scb,
1520 (newtimeout * hz) / 1000);
1521 ahc_unlock(ahc, &s);
1522 return;
1523 }
1524
1525 /* It's us */
1526 if ((scb->hscb->control & TARGET_SCB) != 0) {
1527
1528 /*
1529 * Send back any queued up transactions
1530 * and properly record the error condition.
1531 */
1532 ahc_freeze_devq(ahc, scb);
1533 ahc_set_transaction_status(scb,
1534 CAM_CMD_TIMEOUT);
1535 ahc_freeze_scb(scb);
1536 ahc_done(ahc, scb);
1537
1538 /* Will clear us from the bus */
1539 restart_sequencer(ahc);
1540 ahc_unlock(ahc, &s);
1541 return;
1542 }
1543
1544 ahc_set_recoveryscb(ahc, active_scb);
1545 ahc_outb(ahc, MSG_OUT, MSG_BUS_DEV_RESET);
1545 ahc_outb(ahc, MSG_OUT, HOST_MSG);
1546 ahc_outb(ahc, SCSISIGO, last_phase|ATNO);
1547 ahc_print_path(ahc, active_scb);
1548 printf("BDR message in message buffer\n");
1549 active_scb->flags |= SCB_DEVICE_RESET;
1550 active_scb->io_ctx->ccb_h.timeout_ch =
1551 timeout(ahc_timeout, (caddr_t)active_scb, 2 * hz);
1552 unpause_sequencer(ahc);
1553 } else {
1554 int disconnected;
1555
1556 /* XXX Shouldn't panic. Just punt instead */
1557 if ((scb->hscb->control & TARGET_SCB) != 0)
1558 panic("Timed-out target SCB but bus idle");
1559
1560 if (last_phase != P_BUSFREE
1561 && (ahc_inb(ahc, SSTAT0) & TARGET) != 0) {
1562 /* XXX What happened to the SCB? */
1563 /* Hung target selection. Goto busfree */
1564 printf("%s: Hung target selection\n",
1565 ahc_name(ahc));
1566 restart_sequencer(ahc);
1567 ahc_unlock(ahc, &s);
1568 return;
1569 }
1570
1571 if (ahc_search_qinfifo(ahc, target, channel, lun,
1572 scb->hscb->tag, ROLE_INITIATOR,
1573 /*status*/0, SEARCH_COUNT) > 0) {
1574 disconnected = FALSE;
1575 } else {
1576 disconnected = TRUE;
1577 }
1578
1579 if (disconnected) {
1580 struct scb *prev_scb;
1581
1582 ahc_set_recoveryscb(ahc, scb);
1583 /*
1546 ahc_outb(ahc, SCSISIGO, last_phase|ATNO);
1547 ahc_print_path(ahc, active_scb);
1548 printf("BDR message in message buffer\n");
1549 active_scb->flags |= SCB_DEVICE_RESET;
1550 active_scb->io_ctx->ccb_h.timeout_ch =
1551 timeout(ahc_timeout, (caddr_t)active_scb, 2 * hz);
1552 unpause_sequencer(ahc);
1553 } else {
1554 int disconnected;
1555
1556 /* XXX Shouldn't panic. Just punt instead */
1557 if ((scb->hscb->control & TARGET_SCB) != 0)
1558 panic("Timed-out target SCB but bus idle");
1559
1560 if (last_phase != P_BUSFREE
1561 && (ahc_inb(ahc, SSTAT0) & TARGET) != 0) {
1562 /* XXX What happened to the SCB? */
1563 /* Hung target selection. Goto busfree */
1564 printf("%s: Hung target selection\n",
1565 ahc_name(ahc));
1566 restart_sequencer(ahc);
1567 ahc_unlock(ahc, &s);
1568 return;
1569 }
1570
1571 if (ahc_search_qinfifo(ahc, target, channel, lun,
1572 scb->hscb->tag, ROLE_INITIATOR,
1573 /*status*/0, SEARCH_COUNT) > 0) {
1574 disconnected = FALSE;
1575 } else {
1576 disconnected = TRUE;
1577 }
1578
1579 if (disconnected) {
1580 struct scb *prev_scb;
1581
1582 ahc_set_recoveryscb(ahc, scb);
1583 /*
1584 * Simply set the MK_MESSAGE control bit.
1585 */
1586 scb->hscb->control |= MK_MESSAGE;
1587 scb->flags |= SCB_QUEUED_MSG
1588 | SCB_DEVICE_RESET;
1589
1590 /*
1591 * Actually re-queue this SCB in an attempt
1592 * to select the device before it reconnects.
1593 * In either case (selection or reselection),
1594 * we will now issue a target reset to the
1595 * timed-out device.
1596 *
1584 * Actually re-queue this SCB in an attempt
1585 * to select the device before it reconnects.
1586 * In either case (selection or reselection),
1587 * we will now issue a target reset to the
1588 * timed-out device.
1589 *
1590 * Set the MK_MESSAGE control bit indicating
1591 * that we desire to send a message. We
1592 * also set the disconnected flag since
1593 * in the paging case there is no guarantee
1594 * that our SCB control byte matches the
1595 * version on the card. We don't want the
1596 * sequencer to abort the command thinking
1597 * an unsolicited reselection occurred.
1598 */
1599 scb->hscb->control |= MK_MESSAGE|DISCONNECTED;
1600 scb->flags |= SCB_DEVICE_RESET;
1601
1602 /*
1597 * Remove any cached copy of this SCB in the
1598 * disconnected list in preparation for the
1599 * queuing of our abort SCB. We use the
1600 * same element in the SCB, SCB_NEXT, for
1601 * both the qinfifo and the disconnected list.
1602 */
1603 ahc_search_disc_list(ahc, target, channel,
1604 lun, scb->hscb->tag,
1605 /*stop_on_first*/TRUE,
1606 /*remove*/TRUE,
1603 * Remove any cached copy of this SCB in the
1604 * disconnected list in preparation for the
1605 * queuing of our abort SCB. We use the
1606 * same element in the SCB, SCB_NEXT, for
1607 * both the qinfifo and the disconnected list.
1608 */
1609 ahc_search_disc_list(ahc, target, channel,
1610 lun, scb->hscb->tag,
1611 /*stop_on_first*/TRUE,
1612 /*remove*/TRUE,
1607 /*save_state*/TRUE);
1613 /*save_state*/FALSE);
1608
1609 /*
1614
1615 /*
1616 * In the non-paging case, the sequencer will
1617 * never re-reference the in-core SCB.
1618 * To make sure we are notified during
1619 * reslection, set the MK_MESSAGE flag in
1620 * the card's copy of the SCB.
1621 */
1622 if ((ahc->flags & AHC_PAGESCBS) != 0) {
1623 ahc_outb(ahc, SCBPTR, scb->hscb->tag);
1624 ahc_outb(ahc, SCB_CONTROL,
1625 ahc_inb(ahc, SCB_CONTROL)
1626 | MK_MESSAGE);
1627 }
1628
1629 /*
1610 * Clear out any entries in the QINFIFO first
1611 * so we are the next SCB for this target
1612 * to run.
1613 */
1614 ahc_search_qinfifo(ahc,
1615 SCB_GET_TARGET(ahc, scb),
1616 channel, SCB_GET_LUN(scb),
1617 SCB_LIST_NULL,
1618 ROLE_INITIATOR,
1619 CAM_REQUEUE_REQ,
1620 SEARCH_COMPLETE);
1621 ahc_print_path(ahc, scb);
1622 printf("Queuing a BDR SCB\n");
1623 prev_scb = NULL;
1624 if (ahc_qinfifo_count(ahc) != 0) {
1625 u_int prev_tag;
1626
1627 prev_tag =
1628 ahc->qinfifo[ahc->qinfifonext - 1];
1629 prev_scb = ahc_lookup_scb(ahc,
1630 prev_tag);
1631 }
1632 ahc_qinfifo_requeue(ahc, prev_scb, scb);
1630 * Clear out any entries in the QINFIFO first
1631 * so we are the next SCB for this target
1632 * to run.
1633 */
1634 ahc_search_qinfifo(ahc,
1635 SCB_GET_TARGET(ahc, scb),
1636 channel, SCB_GET_LUN(scb),
1637 SCB_LIST_NULL,
1638 ROLE_INITIATOR,
1639 CAM_REQUEUE_REQ,
1640 SEARCH_COMPLETE);
1641 ahc_print_path(ahc, scb);
1642 printf("Queuing a BDR SCB\n");
1643 prev_scb = NULL;
1644 if (ahc_qinfifo_count(ahc) != 0) {
1645 u_int prev_tag;
1646
1647 prev_tag =
1648 ahc->qinfifo[ahc->qinfifonext - 1];
1649 prev_scb = ahc_lookup_scb(ahc,
1650 prev_tag);
1651 }
1652 ahc_qinfifo_requeue(ahc, prev_scb, scb);
1653 ahc_outb(ahc, SCBPTR, active_scb_index);
1633 scb->io_ctx->ccb_h.timeout_ch =
1634 timeout(ahc_timeout, (caddr_t)scb, 2 * hz);
1635 unpause_sequencer(ahc);
1636 } else {
1637 /* Go "immediatly" to the bus reset */
1638 /* This shouldn't happen */
1639 ahc_set_recoveryscb(ahc, scb);
1640 ahc_print_path(ahc, scb);
1641 printf("SCB %d: Immediate reset. "
1642 "Flags = 0x%x\n", scb->hscb->tag,
1643 scb->flags);
1644 goto bus_reset;
1645 }
1646 }
1647 }
1648 ahc_unlock(ahc, &s);
1649}
1650
1651static void
1652ahc_abort_ccb(struct ahc_softc *ahc, struct cam_sim *sim, union ccb *ccb)
1653{
1654 union ccb *abort_ccb;
1655
1656 abort_ccb = ccb->cab.abort_ccb;
1657 switch (abort_ccb->ccb_h.func_code) {
1658 case XPT_ACCEPT_TARGET_IO:
1659 case XPT_IMMED_NOTIFY:
1660 case XPT_CONT_TARGET_IO:
1661 {
1662 struct tmode_tstate *tstate;
1663 struct tmode_lstate *lstate;
1664 struct ccb_hdr_slist *list;
1665 cam_status status;
1666
1667 status = ahc_find_tmode_devs(ahc, sim, abort_ccb, &tstate,
1668 &lstate, TRUE);
1669
1670 if (status != CAM_REQ_CMP) {
1671 ccb->ccb_h.status = status;
1672 break;
1673 }
1674
1675 if (abort_ccb->ccb_h.func_code == XPT_ACCEPT_TARGET_IO)
1676 list = &lstate->accept_tios;
1677 else if (abort_ccb->ccb_h.func_code == XPT_IMMED_NOTIFY)
1678 list = &lstate->immed_notifies;
1679 else
1680 list = NULL;
1681
1682 if (list != NULL) {
1683 struct ccb_hdr *curelm;
1684 int found;
1685
1686 curelm = SLIST_FIRST(list);
1687 found = 0;
1688 if (curelm == &abort_ccb->ccb_h) {
1689 found = 1;
1690 SLIST_REMOVE_HEAD(list, sim_links.sle);
1691 } else {
1692 while(curelm != NULL) {
1693 struct ccb_hdr *nextelm;
1694
1695 nextelm =
1696 SLIST_NEXT(curelm, sim_links.sle);
1697
1698 if (nextelm == &abort_ccb->ccb_h) {
1699 found = 1;
1700 SLIST_NEXT(curelm,
1701 sim_links.sle) =
1702 SLIST_NEXT(nextelm,
1703 sim_links.sle);
1704 break;
1705 }
1706 curelm = nextelm;
1707 }
1708 }
1709
1710 if (found) {
1711 abort_ccb->ccb_h.status = CAM_REQ_ABORTED;
1712 xpt_done(abort_ccb);
1713 ccb->ccb_h.status = CAM_REQ_CMP;
1714 } else {
1715 printf("Not found\n");
1716 ccb->ccb_h.status = CAM_PATH_INVALID;
1717 }
1718 break;
1719 }
1720 /* FALLTHROUGH */
1721 }
1722 case XPT_SCSI_IO:
1723 /* XXX Fully implement the hard ones */
1724 ccb->ccb_h.status = CAM_UA_ABORT;
1725 break;
1726 default:
1727 ccb->ccb_h.status = CAM_REQ_INVALID;
1728 break;
1729 }
1730 xpt_done(ccb);
1731}
1732
1733void
1734ahc_send_async(struct ahc_softc *ahc, char channel, u_int target,
1735 u_int lun, ac_code code)
1736{
1737 struct ccb_trans_settings cts;
1738 struct cam_path *path;
1739 void *arg;
1740 int error;
1741
1742 arg = NULL;
1743 error = ahc_create_path(ahc, channel, target, lun, &path);
1744
1745 if (error != CAM_REQ_CMP)
1746 return;
1747
1748 switch (code) {
1749 case AC_TRANSFER_NEG:
1750#ifdef AHC_NEW_TRAN_SETTINGS
1751 cts.type = CTS_TYPE_CURRENT_SETTINGS;
1752#else
1753 cts.flags = CCB_TRANS_CURRENT_SETTINGS;
1754#endif
1755 cts.ccb_h.path = path;
1756 cts.ccb_h.target_id = target;
1757 cts.ccb_h.target_lun = lun;
1758 ahc_get_tran_settings(ahc, channel == 'A' ? ahc->our_id
1759 : ahc->our_id_b,
1760 channel, &cts);
1761 arg = &cts;
1762 break;
1763 case AC_SENT_BDR:
1764 case AC_BUS_RESET:
1765 break;
1766 default:
1767 panic("ahc_send_async: Unexpected async event");
1768 }
1769 xpt_async(code, path, arg);
1770}
1771
1772void
1773ahc_platform_set_tags(struct ahc_softc *ahc,
1774 struct ahc_devinfo *devinfo, int enable)
1775{
1776}
1777
1778int
1779ahc_platform_alloc(struct ahc_softc *ahc, void *platform_arg)
1780{
1781 ahc->platform_data =
1782 malloc(sizeof(struct ahc_platform_data), M_DEVBUF, M_NOWAIT);
1783 if (ahc->platform_data == NULL)
1784 return (ENOMEM);
1785 memset(ahc->platform_data, 0, sizeof(struct ahc_platform_data));
1786 return (0);
1787}
1788
1789void
1790ahc_platform_free(struct ahc_softc *ahc)
1791{
1792 if (ahc->platform_data != NULL) {
1793 if (ahc->platform_data->regs != NULL)
1794 bus_release_resource(ahc->dev_softc,
1795 ahc->platform_data->regs_res_type,
1796 ahc->platform_data->regs_res_id,
1797 ahc->platform_data->regs);
1798
1799 if (ahc->platform_data->irq != NULL)
1800 bus_release_resource(ahc->dev_softc,
1801 ahc->platform_data->irq_res_type,
1802 0, ahc->platform_data->irq);
1803
1804 free(ahc->platform_data, M_DEVBUF);
1805 }
1806}
1807
1808int
1809ahc_softc_comp(struct ahc_softc *lahc, struct ahc_softc *rahc)
1810{
1811 /* We don't sort softcs under FreeBSD so report equal always */
1812 return (0);
1813}
1814
1815#if UNUSED
1816static void
1817ahc_dump_targcmd(struct target_cmd *cmd)
1818{
1819 uint8_t *byte;
1820 uint8_t *last_byte;
1821 int i;
1822
1823 byte = &cmd->initiator_channel;
1824 /* Debugging info for received commands */
1825 last_byte = &cmd[1].initiator_channel;
1826
1827 i = 0;
1828 while (byte < last_byte) {
1829 if (i == 0)
1830 printf("\t");
1831 printf("%#x", *byte++);
1832 i++;
1833 if (i == 8) {
1834 printf("\n");
1835 i = 0;
1836 } else {
1837 printf(", ");
1838 }
1839 }
1840}
1841#endif
1654 scb->io_ctx->ccb_h.timeout_ch =
1655 timeout(ahc_timeout, (caddr_t)scb, 2 * hz);
1656 unpause_sequencer(ahc);
1657 } else {
1658 /* Go "immediatly" to the bus reset */
1659 /* This shouldn't happen */
1660 ahc_set_recoveryscb(ahc, scb);
1661 ahc_print_path(ahc, scb);
1662 printf("SCB %d: Immediate reset. "
1663 "Flags = 0x%x\n", scb->hscb->tag,
1664 scb->flags);
1665 goto bus_reset;
1666 }
1667 }
1668 }
1669 ahc_unlock(ahc, &s);
1670}
1671
1672static void
1673ahc_abort_ccb(struct ahc_softc *ahc, struct cam_sim *sim, union ccb *ccb)
1674{
1675 union ccb *abort_ccb;
1676
1677 abort_ccb = ccb->cab.abort_ccb;
1678 switch (abort_ccb->ccb_h.func_code) {
1679 case XPT_ACCEPT_TARGET_IO:
1680 case XPT_IMMED_NOTIFY:
1681 case XPT_CONT_TARGET_IO:
1682 {
1683 struct tmode_tstate *tstate;
1684 struct tmode_lstate *lstate;
1685 struct ccb_hdr_slist *list;
1686 cam_status status;
1687
1688 status = ahc_find_tmode_devs(ahc, sim, abort_ccb, &tstate,
1689 &lstate, TRUE);
1690
1691 if (status != CAM_REQ_CMP) {
1692 ccb->ccb_h.status = status;
1693 break;
1694 }
1695
1696 if (abort_ccb->ccb_h.func_code == XPT_ACCEPT_TARGET_IO)
1697 list = &lstate->accept_tios;
1698 else if (abort_ccb->ccb_h.func_code == XPT_IMMED_NOTIFY)
1699 list = &lstate->immed_notifies;
1700 else
1701 list = NULL;
1702
1703 if (list != NULL) {
1704 struct ccb_hdr *curelm;
1705 int found;
1706
1707 curelm = SLIST_FIRST(list);
1708 found = 0;
1709 if (curelm == &abort_ccb->ccb_h) {
1710 found = 1;
1711 SLIST_REMOVE_HEAD(list, sim_links.sle);
1712 } else {
1713 while(curelm != NULL) {
1714 struct ccb_hdr *nextelm;
1715
1716 nextelm =
1717 SLIST_NEXT(curelm, sim_links.sle);
1718
1719 if (nextelm == &abort_ccb->ccb_h) {
1720 found = 1;
1721 SLIST_NEXT(curelm,
1722 sim_links.sle) =
1723 SLIST_NEXT(nextelm,
1724 sim_links.sle);
1725 break;
1726 }
1727 curelm = nextelm;
1728 }
1729 }
1730
1731 if (found) {
1732 abort_ccb->ccb_h.status = CAM_REQ_ABORTED;
1733 xpt_done(abort_ccb);
1734 ccb->ccb_h.status = CAM_REQ_CMP;
1735 } else {
1736 printf("Not found\n");
1737 ccb->ccb_h.status = CAM_PATH_INVALID;
1738 }
1739 break;
1740 }
1741 /* FALLTHROUGH */
1742 }
1743 case XPT_SCSI_IO:
1744 /* XXX Fully implement the hard ones */
1745 ccb->ccb_h.status = CAM_UA_ABORT;
1746 break;
1747 default:
1748 ccb->ccb_h.status = CAM_REQ_INVALID;
1749 break;
1750 }
1751 xpt_done(ccb);
1752}
1753
1754void
1755ahc_send_async(struct ahc_softc *ahc, char channel, u_int target,
1756 u_int lun, ac_code code)
1757{
1758 struct ccb_trans_settings cts;
1759 struct cam_path *path;
1760 void *arg;
1761 int error;
1762
1763 arg = NULL;
1764 error = ahc_create_path(ahc, channel, target, lun, &path);
1765
1766 if (error != CAM_REQ_CMP)
1767 return;
1768
1769 switch (code) {
1770 case AC_TRANSFER_NEG:
1771#ifdef AHC_NEW_TRAN_SETTINGS
1772 cts.type = CTS_TYPE_CURRENT_SETTINGS;
1773#else
1774 cts.flags = CCB_TRANS_CURRENT_SETTINGS;
1775#endif
1776 cts.ccb_h.path = path;
1777 cts.ccb_h.target_id = target;
1778 cts.ccb_h.target_lun = lun;
1779 ahc_get_tran_settings(ahc, channel == 'A' ? ahc->our_id
1780 : ahc->our_id_b,
1781 channel, &cts);
1782 arg = &cts;
1783 break;
1784 case AC_SENT_BDR:
1785 case AC_BUS_RESET:
1786 break;
1787 default:
1788 panic("ahc_send_async: Unexpected async event");
1789 }
1790 xpt_async(code, path, arg);
1791}
1792
1793void
1794ahc_platform_set_tags(struct ahc_softc *ahc,
1795 struct ahc_devinfo *devinfo, int enable)
1796{
1797}
1798
1799int
1800ahc_platform_alloc(struct ahc_softc *ahc, void *platform_arg)
1801{
1802 ahc->platform_data =
1803 malloc(sizeof(struct ahc_platform_data), M_DEVBUF, M_NOWAIT);
1804 if (ahc->platform_data == NULL)
1805 return (ENOMEM);
1806 memset(ahc->platform_data, 0, sizeof(struct ahc_platform_data));
1807 return (0);
1808}
1809
1810void
1811ahc_platform_free(struct ahc_softc *ahc)
1812{
1813 if (ahc->platform_data != NULL) {
1814 if (ahc->platform_data->regs != NULL)
1815 bus_release_resource(ahc->dev_softc,
1816 ahc->platform_data->regs_res_type,
1817 ahc->platform_data->regs_res_id,
1818 ahc->platform_data->regs);
1819
1820 if (ahc->platform_data->irq != NULL)
1821 bus_release_resource(ahc->dev_softc,
1822 ahc->platform_data->irq_res_type,
1823 0, ahc->platform_data->irq);
1824
1825 free(ahc->platform_data, M_DEVBUF);
1826 }
1827}
1828
1829int
1830ahc_softc_comp(struct ahc_softc *lahc, struct ahc_softc *rahc)
1831{
1832 /* We don't sort softcs under FreeBSD so report equal always */
1833 return (0);
1834}
1835
1836#if UNUSED
1837static void
1838ahc_dump_targcmd(struct target_cmd *cmd)
1839{
1840 uint8_t *byte;
1841 uint8_t *last_byte;
1842 int i;
1843
1844 byte = &cmd->initiator_channel;
1845 /* Debugging info for received commands */
1846 last_byte = &cmd[1].initiator_channel;
1847
1848 i = 0;
1849 while (byte < last_byte) {
1850 if (i == 0)
1851 printf("\t");
1852 printf("%#x", *byte++);
1853 i++;
1854 if (i == 8) {
1855 printf("\n");
1856 i = 0;
1857 } else {
1858 printf(", ");
1859 }
1860 }
1861}
1862#endif