Deleted Added
sdiff udiff text old ( 40420 ) new ( 40733 )
full compact
1/*
2 * Generic driver for the Advanced Systems Inc. SCSI controllers
3 * Product specific probe and attach routines can be found in:
4 *
5 * i386/isa/adv_isa.c ABP5140, ABP542, ABP5150, ABP842, ABP852
6 * i386/eisa/adv_eisa.c ABP742, ABP752
7 * pci/adv_pci.c ABP920, ABP930, ABP930U, ABP930UA, ABP940, ABP940U,
8 * ABP940UA, ABP950, ABP960, ABP960U, ABP960UA,
9 * ABP970, ABP970U
10 *
11 * Copyright (c) 1996-1998 Justin Gibbs.
12 * All rights reserved.
13 *
14 * Redistribution and use in source and binary forms, with or without
15 * modification, are permitted provided that the following conditions
16 * are met:
17 * 1. Redistributions of source code must retain the above copyright
18 * notice, this list of conditions, and the following disclaimer,
19 * without modification, immediately at the beginning of the file.
20 * 2. The name of the author may not be used to endorse or promote products
21 * derived from this software without specific prior written permission.
22 *
23 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
24 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
25 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
26 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
27 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
28 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
29 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
30 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
31 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
32 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33 * SUCH DAMAGE.
34 *
35 * $Id: advansys.c,v 1.4 1998/10/15 23:47:14 gibbs Exp $
36 */
37/*
38 * Ported from:
39 * advansys.c - Linux Host Driver for AdvanSys SCSI Adapters
40 *
41 * Copyright (c) 1995-1997 Advanced System Products, Inc.
42 * All Rights Reserved.
43 *
44 * Redistribution and use in source and binary forms, with or without
45 * modification, are permitted provided that redistributions of source
46 * code retain the above copyright notice and this comment without
47 * modification.
48 */
49
50#include <sys/param.h>
51#include <sys/systm.h>
52#include <sys/malloc.h>
53#include <sys/buf.h>
54#include <sys/kernel.h>
55
56#include <machine/bus_pio.h>
57#include <machine/bus.h>
58#include <machine/clock.h>
59
60#include <cam/cam.h>
61#include <cam/cam_ccb.h>
62#include <cam/cam_sim.h>
63#include <cam/cam_xpt_sim.h>
64#include <cam/cam_xpt_periph.h>
65#include <cam/cam_debug.h>
66
67#include <cam/scsi/scsi_all.h>
68#include <cam/scsi/scsi_message.h>
69
70#include <vm/vm.h>
71#include <vm/vm_param.h>
72#include <vm/pmap.h>
73
74#include <dev/advansys/advansys.h>
75
76u_long adv_unit;
77
78static void adv_action(struct cam_sim *sim, union ccb *ccb);
79static void adv_execute_ccb(void *arg, bus_dma_segment_t *dm_segs,
80 int nsegments, int error);
81static void adv_poll(struct cam_sim *sim);
82static void adv_run_doneq(struct adv_softc *adv);
83static struct adv_ccb_info *
84 adv_alloc_ccb_info(struct adv_softc *adv);
85static void adv_destroy_ccb_info(struct adv_softc *adv,
86 struct adv_ccb_info *cinfo);
87static __inline struct adv_ccb_info *
88 adv_get_ccb_info(struct adv_softc *adv);
89static __inline void adv_free_ccb_info(struct adv_softc *adv,
90 struct adv_ccb_info *cinfo);
91
92
93struct adv_softc *advsoftcs[NADV]; /* XXX Config should handle this */
94
95static __inline struct adv_ccb_info *
96adv_get_ccb_info(struct adv_softc *adv)
97{
98 struct adv_ccb_info *cinfo;
99 int opri;
100
101 opri = splcam();
102 if ((cinfo = SLIST_FIRST(&adv->free_ccb_infos)) != NULL) {
103 SLIST_REMOVE_HEAD(&adv->free_ccb_infos, links);
104 } else {
105 cinfo = adv_alloc_ccb_info(adv);
106 }
107 splx(opri);
108
109 return (cinfo);
110}
111
112static __inline void
113adv_free_ccb_info(struct adv_softc *adv, struct adv_ccb_info *cinfo)
114{
115 int opri;
116
117 opri = splcam();
118 cinfo->state = ACCB_FREE;
119 SLIST_INSERT_HEAD(&adv->free_ccb_infos, cinfo, links);
120 splx(opri);
121}
122
123void
124adv_map(void *arg, bus_dma_segment_t *segs, int nseg, int error)
125{
126 bus_addr_t* physaddr;
127
128 physaddr = (bus_addr_t*)arg;
129 *physaddr = segs->ds_addr;
130}
131
132char *
133adv_name(struct adv_softc *adv)
134{
135 static char name[10];
136
137 sprintf(name, "adv%d", adv->unit);
138 return (name);
139}
140
141static void
142adv_action(struct cam_sim *sim, union ccb *ccb)
143{
144 struct adv_softc *adv;
145
146 CAM_DEBUG(ccb->ccb_h.path, CAM_DEBUG_TRACE, ("adv_action\n"));
147
148 adv = (struct adv_softc *)cam_sim_softc(sim);
149
150 switch (ccb->ccb_h.func_code) {
151 /* Common cases first */
152 case XPT_SCSI_IO: /* Execute the requested I/O operation */
153 {
154 struct ccb_hdr *ccb_h;
155 struct ccb_scsiio *csio;
156 struct adv_ccb_info *cinfo;
157
158 ccb_h = &ccb->ccb_h;
159 csio = &ccb->csio;
160 cinfo = adv_get_ccb_info(adv);
161 if (cinfo == NULL)
162 panic("XXX Handle CCB info error!!!");
163
164 ccb_h->ccb_cinfo_ptr = cinfo;
165
166 /* Only use S/G if there is a transfer */
167 if ((ccb_h->flags & CAM_DIR_MASK) != CAM_DIR_NONE) {
168 if ((ccb_h->flags & CAM_SCATTER_VALID) == 0) {
169 /*
170 * We've been given a pointer
171 * to a single buffer
172 */
173 if ((ccb_h->flags & CAM_DATA_PHYS) == 0) {
174 int s;
175 int error;
176
177 s = splsoftvm();
178 error =
179 bus_dmamap_load(adv->buffer_dmat,
180 cinfo->dmamap,
181 csio->data_ptr,
182 csio->dxfer_len,
183 adv_execute_ccb,
184 csio, /*flags*/0);
185 if (error == EINPROGRESS) {
186 /*
187 * So as to maintain ordering,
188 * freeze the controller queue
189 * until our mapping is
190 * returned.
191 */
192 xpt_freeze_simq(adv->sim,
193 /*count*/1);
194 cinfo->state |=
195 ACCB_RELEASE_SIMQ;
196 }
197 splx(s);
198 } else {
199 struct bus_dma_segment seg;
200
201 /* Pointer to physical buffer */
202 seg.ds_addr =
203 (bus_addr_t)csio->data_ptr;
204 seg.ds_len = csio->dxfer_len;
205 adv_execute_ccb(csio, &seg, 1, 0);
206 }
207 } else {
208 struct bus_dma_segment *segs;
209 if ((ccb_h->flags & CAM_DATA_PHYS) != 0)
210 panic("adv_setup_data - Physical "
211 "segment pointers unsupported");
212
213 if ((ccb_h->flags & CAM_SG_LIST_PHYS) == 0)
214 panic("adv_setup_data - Virtual "
215 "segment addresses unsupported");
216
217 /* Just use the segments provided */
218 segs = (struct bus_dma_segment *)csio->data_ptr;
219 adv_execute_ccb(ccb, segs, csio->sglist_cnt, 0);
220 }
221 } else {
222 adv_execute_ccb(ccb, NULL, 0, 0);
223 }
224 break;
225 }
226 case XPT_RESET_DEV: /* Bus Device Reset the specified SCSI device */
227 case XPT_TARGET_IO: /* Execute target I/O request */
228 case XPT_ACCEPT_TARGET_IO: /* Accept Host Target Mode CDB */
229 case XPT_CONT_TARGET_IO: /* Continue Host Target I/O Connection*/
230 case XPT_EN_LUN: /* Enable LUN as a target */
231 case XPT_ABORT: /* Abort the specified CCB */
232 /* XXX Implement */
233 ccb->ccb_h.status = CAM_REQ_INVALID;
234 xpt_done(ccb);
235 break;
236 case XPT_SET_TRAN_SETTINGS:
237 {
238 struct ccb_trans_settings *cts;
239 target_bit_vector targ_mask;
240 struct adv_target_transinfo *tconf;
241 u_int update_type;
242 int s;
243
244 cts = &ccb->cts;
245 targ_mask = ADV_TID_TO_TARGET_MASK(cts->ccb_h.target_id);
246 tconf = &adv->tinfo[cts->ccb_h.target_id];
247 update_type = 0;
248 if ((cts->flags & CCB_TRANS_CURRENT_SETTINGS) != 0)
249 update_type |= ADV_TRANS_GOAL;
250 if ((cts->flags & CCB_TRANS_USER_SETTINGS) != 0)
251 update_type |= ADV_TRANS_USER;
252
253 s = splcam();
254
255 if ((update_type & ADV_TRANS_GOAL) != 0) {
256 if ((cts->valid & CCB_TRANS_DISC_VALID) != 0) {
257 if ((cts->flags & CCB_TRANS_DISC_ENB) != 0)
258 adv->disc_enable |= targ_mask;
259 else
260 adv->disc_enable &= ~targ_mask;
261 adv_write_lram_8(adv, ADVV_DISC_ENABLE_B,
262 adv->disc_enable);
263 }
264
265 if ((cts->valid & CCB_TRANS_TQ_VALID) != 0) {
266 if ((cts->flags & CCB_TRANS_TAG_ENB) != 0)
267 adv->cmd_qng_enabled |= targ_mask;
268 else
269 adv->cmd_qng_enabled &= ~targ_mask;
270 }
271 }
272
273 if ((update_type & ADV_TRANS_USER) != 0) {
274 if ((cts->valid & CCB_TRANS_DISC_VALID) != 0) {
275 if ((cts->flags & CCB_TRANS_DISC_ENB) != 0)
276 adv->user_disc_enable |= targ_mask;
277 else
278 adv->user_disc_enable &= ~targ_mask;
279 }
280
281 if ((cts->valid & CCB_TRANS_TQ_VALID) != 0) {
282 if ((cts->flags & CCB_TRANS_TAG_ENB) != 0)
283 adv->user_cmd_qng_enabled |= targ_mask;
284 else
285 adv->user_cmd_qng_enabled &= ~targ_mask;
286 }
287 }
288
289 if ((cts->valid & CCB_TRANS_SYNC_RATE_VALID) != 0) {
290 if ((cts->valid & CCB_TRANS_SYNC_OFFSET_VALID) == 0)
291 cts->sync_offset = 0;
292
293 adv_period_offset_to_sdtr(adv, &cts->sync_period,
294 &cts->sync_offset,
295 cts->ccb_h.target_id);
296
297 adv_set_syncrate(adv, /*struct cam_path */NULL,
298 cts->ccb_h.target_id, cts->sync_period,
299 cts->sync_offset, update_type);
300 }
301 splx(s);
302 ccb->ccb_h.status = CAM_REQ_CMP;
303 xpt_done(ccb);
304 break;
305 }
306 case XPT_GET_TRAN_SETTINGS:
307 /* Get default/user set transfer settings for the target */
308 {
309 struct ccb_trans_settings *cts;
310 struct adv_transinfo *tconf;
311 target_bit_vector target_mask;
312 int s;
313
314 cts = &ccb->cts;
315 target_mask = ADV_TID_TO_TARGET_MASK(cts->ccb_h.target_id);
316
317 cts->flags &= ~(CCB_TRANS_DISC_ENB|CCB_TRANS_TAG_ENB);
318
319 s = splcam();
320 if ((cts->flags & CCB_TRANS_CURRENT_SETTINGS) != 0) {
321 tconf = &adv->tinfo[cts->ccb_h.target_id].current;
322 if ((adv->disc_enable & target_mask) != 0)
323 cts->flags |= CCB_TRANS_DISC_ENB;
324 if ((adv->cmd_qng_enabled & target_mask) != 0)
325 cts->flags |= CCB_TRANS_TAG_ENB;
326 } else {
327 tconf = &adv->tinfo[cts->ccb_h.target_id].user;
328 if ((adv->user_disc_enable & target_mask) != 0)
329 cts->flags |= CCB_TRANS_DISC_ENB;
330 if ((adv->user_cmd_qng_enabled & target_mask) != 0)
331 cts->flags |= CCB_TRANS_TAG_ENB;
332 }
333
334 cts->sync_period = tconf->period;
335 cts->sync_offset = tconf->offset;
336 splx(s);
337
338 cts->bus_width = MSG_EXT_WDTR_BUS_8_BIT;
339 cts->valid = CCB_TRANS_SYNC_RATE_VALID
340 | CCB_TRANS_SYNC_OFFSET_VALID
341 | CCB_TRANS_BUS_WIDTH_VALID
342 | CCB_TRANS_DISC_VALID
343 | CCB_TRANS_TQ_VALID;
344 ccb->ccb_h.status = CAM_REQ_CMP;
345 xpt_done(ccb);
346 break;
347 }
348 case XPT_CALC_GEOMETRY:
349 {
350 struct ccb_calc_geometry *ccg;
351 u_int32_t size_mb;
352 u_int32_t secs_per_cylinder;
353 int extended;
354
355 ccg = &ccb->ccg;
356 size_mb = ccg->volume_size
357 / ((1024L * 1024L) / ccg->block_size);
358 extended = (adv->control & ADV_CNTL_BIOS_GT_1GB) != 0;
359
360 if (size_mb > 1024 && extended) {
361 ccg->heads = 255;
362 ccg->secs_per_track = 63;
363 } else {
364 ccg->heads = 64;
365 ccg->secs_per_track = 32;
366 }
367 secs_per_cylinder = ccg->heads * ccg->secs_per_track;
368 ccg->cylinders = ccg->volume_size / secs_per_cylinder;
369 ccb->ccb_h.status = CAM_REQ_CMP;
370 xpt_done(ccb);
371 break;
372 }
373 case XPT_RESET_BUS: /* Reset the specified SCSI bus */
374 {
375 int s;
376
377 s = splcam();
378 adv_stop_execution(adv);
379 adv_reset_bus(adv);
380 adv_start_execution(adv);
381 splx(s);
382
383 ccb->ccb_h.status = CAM_REQ_CMP;
384 xpt_done(ccb);
385 break;
386 }
387 case XPT_TERM_IO: /* Terminate the I/O process */
388 /* XXX Implement */
389 ccb->ccb_h.status = CAM_REQ_INVALID;
390 xpt_done(ccb);
391 break;
392 case XPT_PATH_INQ: /* Path routing inquiry */
393 {
394 struct ccb_pathinq *cpi = &ccb->cpi;
395
396 cpi->version_num = 1; /* XXX??? */
397 cpi->hba_inquiry = PI_SDTR_ABLE|PI_TAG_ABLE;
398 cpi->target_sprt = 0;
399 cpi->hba_misc = 0;
400 cpi->hba_eng_cnt = 0;
401 cpi->max_target = 7;
402 cpi->max_lun = 7;
403 cpi->initiator_id = adv->scsi_id;
404 cpi->bus_id = cam_sim_bus(sim);
405 strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
406 strncpy(cpi->hba_vid, "Advansys", HBA_IDLEN);
407 strncpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN);
408 cpi->unit_number = cam_sim_unit(sim);
409 cpi->ccb_h.status = CAM_REQ_CMP;
410 xpt_done(ccb);
411 break;
412 }
413 default:
414 ccb->ccb_h.status = CAM_REQ_INVALID;
415 xpt_done(ccb);
416 break;
417 }
418}
419
420/*
421 * Currently, the output of bus_dmammap_load suits our needs just
422 * fine, but should it change, we'd need to do something here.
423 */
424#define adv_fixup_dmasegs(adv, dm_segs) (struct adv_sg_entry *)(dm_segs)
425
426static void
427adv_execute_ccb(void *arg, bus_dma_segment_t *dm_segs,
428 int nsegments, int error)
429{
430 struct ccb_scsiio *csio;
431 struct ccb_hdr *ccb_h;
432 struct cam_sim *sim;
433 struct adv_softc *adv;
434 struct adv_ccb_info *cinfo;
435 struct adv_scsi_q scsiq;
436 struct adv_sg_head sghead;
437 int s;
438
439 csio = (struct ccb_scsiio *)arg;
440 ccb_h = &csio->ccb_h;
441 sim = xpt_path_sim(ccb_h->path);
442 adv = (struct adv_softc *)cam_sim_softc(sim);
443 cinfo = (struct adv_ccb_info *)csio->ccb_h.ccb_cinfo_ptr;
444
445 if ((ccb_h->flags & CAM_CDB_POINTER) != 0) {
446 if ((ccb_h->flags & CAM_CDB_PHYS) == 0) {
447 /* XXX Need phystovirt!!!! */
448 /* How about pmap_kenter??? */
449 scsiq.cdbptr = csio->cdb_io.cdb_ptr;
450 } else {
451 scsiq.cdbptr = csio->cdb_io.cdb_ptr;
452 }
453 } else {
454 scsiq.cdbptr = csio->cdb_io.cdb_bytes;
455 }
456 /*
457 * Build up the request
458 */
459 scsiq.q1.status = 0;
460 scsiq.q1.q_no = 0;
461 scsiq.q1.cntl = 0;
462 scsiq.q1.sg_queue_cnt = 0;
463 scsiq.q1.target_id = ADV_TID_TO_TARGET_MASK(ccb_h->target_id);
464 scsiq.q1.target_lun = ccb_h->target_lun;
465 scsiq.q1.sense_len = csio->sense_len;
466 scsiq.q1.extra_bytes = 0;
467 scsiq.q2.ccb_ptr = (u_int32_t)csio;
468 scsiq.q2.target_ix = ADV_TIDLUN_TO_IX(ccb_h->target_id,
469 ccb_h->target_lun);
470 scsiq.q2.flag = 0;
471 scsiq.q2.cdb_len = csio->cdb_len;
472 if ((ccb_h->flags & CAM_TAG_ACTION_VALID) != 0)
473 scsiq.q2.tag_code = csio->tag_action;
474 else
475 scsiq.q2.tag_code = 0;
476 scsiq.q2.vm_id = 0;
477
478 if (nsegments != 0) {
479 bus_dmasync_op_t op;
480
481 scsiq.q1.data_addr = dm_segs->ds_addr;
482 scsiq.q1.data_cnt = dm_segs->ds_len;
483 if (nsegments > 1) {
484 scsiq.q1.cntl |= QC_SG_HEAD;
485 sghead.entry_cnt
486 = sghead.entry_to_copy
487 = nsegments;
488 sghead.res = 0;
489 sghead.sg_list = adv_fixup_dmasegs(adv, dm_segs);
490 scsiq.sg_head = &sghead;
491 } else {
492 scsiq.sg_head = NULL;
493 }
494 if ((ccb_h->flags & CAM_DIR_MASK) == CAM_DIR_IN)
495 op = BUS_DMASYNC_PREREAD;
496 else
497 op = BUS_DMASYNC_PREWRITE;
498 bus_dmamap_sync(adv->buffer_dmat, cinfo->dmamap, op);
499 } else {
500 scsiq.q1.data_addr = 0;
501 scsiq.q1.data_cnt = 0;
502 scsiq.sg_head = NULL;
503 }
504
505 s = splcam();
506
507 /*
508 * Last time we need to check if this SCB needs to
509 * be aborted.
510 */
511 if (ccb_h->status != CAM_REQ_INPROG) {
512 if (nsegments != 0) {
513 bus_dmamap_unload(adv->buffer_dmat, cinfo->dmamap);
514 }
515 if ((cinfo->state & ACCB_RELEASE_SIMQ) != 0) {
516 ccb_h->status |= CAM_RELEASE_SIMQ;
517 }
518 adv_free_ccb_info(adv, cinfo);
519 xpt_done((union ccb *)csio);
520 splx(s);
521 return;
522 }
523
524 if (adv_execute_scsi_queue(adv, &scsiq, csio->dxfer_len) != 0) {
525 /* Temporary resource shortage */
526 if (nsegments != 0) {
527 bus_dmamap_unload(adv->buffer_dmat, cinfo->dmamap);
528 }
529 ccb_h->status = CAM_REQUEUE_REQ;
530 if ((cinfo->state & ACCB_RELEASE_SIMQ) != 0)
531 ccb_h->status |= CAM_RELEASE_SIMQ;
532
533 /* Unfreeze when resources are available */
534 xpt_freeze_simq(adv->sim, /*count*/1);
535
536 adv_free_ccb_info(adv, cinfo);
537 xpt_done((union ccb *)csio);
538 splx(s);
539 return;
540 }
541 cinfo->state |= ACCB_ACTIVE;
542 ccb_h->status |= CAM_SIM_QUEUED;
543 LIST_INSERT_HEAD(&adv->pending_ccbs, ccb_h, sim_links.le);
544 /* Schedule our timeout */
545 ccb_h->timeout_ch =
546 timeout(adv_timeout, csio, (ccb_h->timeout * hz)/1000);
547 splx(s);
548}
549
550static struct adv_ccb_info *
551adv_alloc_ccb_info(struct adv_softc *adv)
552{
553 int error;
554 struct adv_ccb_info *cinfo;
555
556 cinfo = malloc(sizeof(*cinfo), M_DEVBUF, M_NOWAIT);
557 if (cinfo == NULL)
558 printf("%s: Can't malloc CCB info\n", adv_name(adv));
559 cinfo->state = ACCB_FREE;
560 error = bus_dmamap_create(adv->buffer_dmat, /*flags*/0,
561 &cinfo->dmamap);
562 if (error != 0) {
563 printf("%s: Unable to allocate CCB info "
564 "dmamap - error %d\n", adv_name(adv), error);
565 free(cinfo, M_DEVBUF);
566 cinfo = NULL;
567 }
568 return (cinfo);
569}
570
571static void
572adv_destroy_ccb_info(struct adv_softc *adv, struct adv_ccb_info *cinfo)
573{
574 bus_dmamap_destroy(adv->buffer_dmat, cinfo->dmamap);
575 free(cinfo, M_DEVBUF);
576}
577
578void
579adv_timeout(void *arg)
580{
581 int s;
582 union ccb *ccb;
583 struct adv_softc *adv;
584 struct adv_ccb_info *cinfo;
585
586 ccb = (union ccb *)arg;
587 adv = (struct adv_softc *)xpt_path_sim(ccb->ccb_h.path)->softc;
588 cinfo = (struct adv_ccb_info *)ccb->ccb_h.ccb_cinfo_ptr;
589
590 xpt_print_path(ccb->ccb_h.path);
591 printf("Timed out\n");
592
593 s = splcam();
594 /* Have we been taken care of already?? */
595 if (cinfo == NULL || cinfo->state == ACCB_FREE) {
596 splx(s);
597 return;
598 }
599
600 adv_stop_execution(adv);
601
602 if ((cinfo->state & ACCB_ABORT_QUEUED) == 0) {
603 struct ccb_hdr *ccb_h;
604
605 /*
606 * In order to simplify the recovery process, we ask the XPT
607 * layer to halt the queue of new transactions and we traverse
608 * the list of pending CCBs and remove their timeouts. This
609 * means that the driver attempts to clear only one error
610 * condition at a time. In general, timeouts that occur
611 * close together are related anyway, so there is no benefit
612 * in attempting to handle errors in parrallel. Timeouts will
613 * be reinstated when the recovery process ends.
614 */
615 if ((cinfo->state & ACCB_RELEASE_SIMQ) == 0) {
616 xpt_freeze_simq(adv->sim, /*count*/1);
617 cinfo->state |= ACCB_RELEASE_SIMQ;
618 }
619
620 /* This CCB is the CCB representing our recovery actions */
621 cinfo->state |= ACCB_RECOVERY_CCB|ACCB_ABORT_QUEUED;
622
623 ccb_h = LIST_FIRST(&adv->pending_ccbs);
624 while (ccb_h != NULL) {
625 untimeout(adv_timeout, ccb_h, ccb_h->timeout_ch);
626 ccb_h = LIST_NEXT(ccb_h, sim_links.le);
627 }
628
629 /* XXX Should send a BDR */
630 /* Attempt an abort as our first tact */
631 xpt_print_path(ccb->ccb_h.path);
632 printf("Attempting abort\n");
633 adv_abort_ccb(adv, ccb->ccb_h.target_id,
634 ccb->ccb_h.target_lun, ccb,
635 CAM_CMD_TIMEOUT, /*queued_only*/FALSE);
636 ccb->ccb_h.timeout_ch =
637 timeout(adv_timeout, ccb, 2 * hz);
638 } else {
639 /* Our attempt to perform an abort failed, go for a reset */
640 xpt_print_path(ccb->ccb_h.path);
641 printf("Resetting bus\n");
642 ccb->ccb_h.status &= ~CAM_STATUS_MASK;
643 ccb->ccb_h.status |= CAM_CMD_TIMEOUT;
644 adv_reset_bus(adv);
645 }
646 adv_start_execution(adv);
647 splx(s);
648}
649
650struct adv_softc *
651adv_alloc(int unit, bus_space_tag_t tag, bus_space_handle_t bsh)
652{
653 struct adv_softc *adv;
654
655 if (unit >= NADV) {
656 printf("adv: unit number (%d) too high\n", unit);
657 return NULL;
658 }
659
660 /*
661 * Allocate a storage area for us
662 */
663 if (advsoftcs[unit]) {
664 printf("adv%d: memory already allocated\n", unit);
665 return NULL;
666 }
667
668 adv = malloc(sizeof(struct adv_softc), M_DEVBUF, M_NOWAIT);
669 if (!adv) {
670 printf("adv%d: cannot malloc!\n", unit);
671 return NULL;
672 }
673 bzero(adv, sizeof(struct adv_softc));
674 LIST_INIT(&adv->pending_ccbs);
675 SLIST_INIT(&adv->free_ccb_infos);
676 advsoftcs[unit] = adv;
677 adv->unit = unit;
678 adv->tag = tag;
679 adv->bsh = bsh;
680
681 return(adv);
682}
683
684void
685adv_free(struct adv_softc *adv)
686{
687 switch (adv->init_level) {
688 case 5:
689 {
690 struct adv_ccb_info *cinfo;
691
692 while ((cinfo = SLIST_FIRST(&adv->free_ccb_infos)) != NULL) {
693 SLIST_REMOVE_HEAD(&adv->free_ccb_infos, links);
694 adv_destroy_ccb_info(adv, cinfo);
695 }
696
697 bus_dmamap_unload(adv->sense_dmat, adv->sense_dmamap);
698 }
699 case 4:
700 bus_dmamem_free(adv->sense_dmat, adv->sense_buffers,
701 adv->sense_dmamap);
702 case 3:
703 bus_dma_tag_destroy(adv->sense_dmat);
704 case 2:
705 bus_dma_tag_destroy(adv->buffer_dmat);
706 case 1:
707 bus_dma_tag_destroy(adv->parent_dmat);
708 case 0:
709 break;
710 }
711 free(adv, M_DEVBUF);
712}
713
714int
715adv_init(struct adv_softc *adv)
716{
717 struct adv_eeprom_config eeprom_config;
718 int checksum, i;
719 u_int16_t config_lsw;
720 u_int16_t config_msw;
721
722 adv_reset_chip_and_scsi_bus(adv);
723 adv_lib_init(adv);
724
725 /*
726 * Stop script execution.
727 */
728 adv_write_lram_16(adv, ADV_HALTCODE_W, 0x00FE);
729 adv_stop_execution(adv);
730 if (adv_is_chip_halted(adv) == 0) {
731 printf("adv%d: Unable to halt adapter. Initialization"
732 "failed\n", adv->unit);
733 return (1);
734 }
735 ADV_OUTW(adv, ADV_REG_PROG_COUNTER, ADV_MCODE_START_ADDR);
736 if (ADV_INW(adv, ADV_REG_PROG_COUNTER) != ADV_MCODE_START_ADDR) {
737 printf("adv%d: Unable to set program counter. Initialization"
738 "failed\n", adv->unit);
739 return (1);
740 }
741
742 config_msw = ADV_INW(adv, ADV_CONFIG_MSW);
743 config_lsw = ADV_INW(adv, ADV_CONFIG_LSW);
744
745 if ((config_msw & ADV_CFG_MSW_CLR_MASK) != 0) {
746 config_msw &= (~(ADV_CFG_MSW_CLR_MASK));
747 /*
748 * XXX The Linux code flags this as an error,
749 * but what should we report to the user???
750 * It seems that clearing the config register
751 * makes this error recoverable.
752 */
753 ADV_OUTW(adv, ADV_CONFIG_MSW, config_msw);
754 }
755
756 /* Suck in the configuration from the EEProm */
757 checksum = adv_get_eeprom_config(adv, &eeprom_config);
758
759 eeprom_config.cfg_msw &= (~(ADV_CFG_MSW_CLR_MASK));
760
761 if (ADV_INW(adv, ADV_CHIP_STATUS) & ADV_CSW_AUTO_CONFIG) {
762 /*
763 * XXX The Linux code sets a warning level for this
764 * condition, yet nothing of meaning is printed to
765 * the user. What does this mean???
766 */
767 if (adv->chip_version == 3) {
768 if (eeprom_config.cfg_lsw != config_lsw) {
769 eeprom_config.cfg_lsw =
770 ADV_INW(adv, ADV_CONFIG_LSW);
771 }
772 if (eeprom_config.cfg_msw != config_msw) {
773 eeprom_config.cfg_msw =
774 ADV_INW(adv, ADV_CONFIG_MSW);
775 }
776 }
777 }
778 eeprom_config.cfg_lsw |= ADV_CFG_LSW_HOST_INT_ON;
779 if (adv_test_external_lram(adv) == 0) {
780 /*
781 * XXX What about non PCI cards with no
782 * external LRAM????
783 */
784 if ((adv->type & (ADV_PCI|ADV_ULTRA)) == (ADV_PCI|ADV_ULTRA)) {
785 eeprom_config.max_total_qng =
786 ADV_MAX_PCI_ULTRA_INRAM_TOTAL_QNG;
787 eeprom_config.max_tag_qng =
788 ADV_MAX_PCI_ULTRA_INRAM_TAG_QNG;
789 } else {
790 eeprom_config.cfg_msw |= 0x0800;
791 config_msw |= 0x0800;
792 ADV_OUTW(adv, ADV_CONFIG_MSW, config_msw);
793 eeprom_config.max_total_qng =
794 ADV_MAX_PCI_INRAM_TOTAL_QNG;
795 eeprom_config.max_tag_qng = ADV_MAX_INRAM_TAG_QNG;
796 }
797 adv->max_openings = eeprom_config.max_total_qng;
798 }
799 if (checksum == eeprom_config.chksum) {
800 /* Range/Sanity checking */
801 if (eeprom_config.max_total_qng < ADV_MIN_TOTAL_QNG) {
802 eeprom_config.max_total_qng = ADV_MIN_TOTAL_QNG;
803 }
804 if (eeprom_config.max_total_qng > ADV_MAX_TOTAL_QNG) {
805 eeprom_config.max_total_qng = ADV_MAX_TOTAL_QNG;
806 }
807 if (eeprom_config.max_tag_qng > eeprom_config.max_total_qng) {
808 eeprom_config.max_tag_qng = eeprom_config.max_total_qng;
809 }
810 if (eeprom_config.max_tag_qng < ADV_MIN_TAG_Q_PER_DVC) {
811 eeprom_config.max_tag_qng = ADV_MIN_TAG_Q_PER_DVC;
812 }
813 adv->max_openings = eeprom_config.max_total_qng;
814
815 adv->user_disc_enable = eeprom_config.disc_enable;
816 adv->user_cmd_qng_enabled = eeprom_config.use_cmd_qng;
817 adv->isa_dma_speed = EEPROM_DMA_SPEED(eeprom_config);
818 adv->scsi_id = EEPROM_SCSIID(eeprom_config) & ADV_MAX_TID;
819 EEPROM_SET_SCSIID(eeprom_config, adv->scsi_id);
820 adv->control = eeprom_config.cntl;
821 for (i = 0; i <= ADV_MAX_TID; i++)
822 adv_sdtr_to_period_offset(adv,
823 eeprom_config.sdtr_data[i],
824 &adv->tinfo[i].user.period,
825 &adv->tinfo[i].user.offset,
826 i);
827 } else {
828 u_int8_t sync_data;
829
830 printf("adv%d: Warning EEPROM Checksum mismatch. "
831 "Using default device parameters\n", adv->unit);
832
833 /* Set reasonable defaults since we can't read the EEPROM */
834 adv->isa_dma_speed = /*ADV_DEF_ISA_DMA_SPEED*/1;
835 adv->max_openings = ADV_DEF_MAX_TOTAL_QNG;
836 adv->disc_enable = TARGET_BIT_VECTOR_SET;
837 adv->user_disc_enable = TARGET_BIT_VECTOR_SET;
838 adv->cmd_qng_enabled = TARGET_BIT_VECTOR_SET;
839 adv->user_cmd_qng_enabled = TARGET_BIT_VECTOR_SET;
840 adv->scsi_id = 7;
841
842 sync_data = ADV_DEF_SDTR_OFFSET | (ADV_DEF_SDTR_INDEX << 4);
843 for (i = 0; i <= ADV_MAX_TID; i++)
844 adv_sdtr_to_period_offset(adv, sync_data,
845 &adv->tinfo[i].user.period,
846 &adv->tinfo[i].user.offset,
847 i);
848 }
849
850 if (adv_set_eeprom_config(adv, &eeprom_config) != 0)
851 printf("%s: WARNING! Failure writing to EEPROM.\n",
852 adv_name(adv));
853
854 adv_set_chip_scsiid(adv, adv->scsi_id);
855 if (adv_init_lram_and_mcode(adv))
856 return (1);
857
858 adv->disc_enable = adv->user_disc_enable;
859
860 adv_write_lram_8(adv, ADVV_DISC_ENABLE_B, adv->disc_enable);
861 for (i = 0; i <= ADV_MAX_TID; i++) {
862 /*
863 * Start off in async mode.
864 */
865 adv_set_syncrate(adv, /*struct cam_path */NULL,
866 i, /*period*/0, /*offset*/0,
867 ADV_TRANS_CUR);
868 /*
869 * Enable the use of tagged commands on all targets.
870 * This allows the kernel driver to make up it's own mind
871 * as it sees fit to tag queue instead of having the
872 * firmware try and second guess the tag_code settins.
873 */
874 adv_write_lram_8(adv, ADVV_MAX_DVC_QNG_BEG + i,
875 adv->max_openings);
876 }
877 adv_write_lram_8(adv, ADVV_USE_TAGGED_QNG_B, TARGET_BIT_VECTOR_SET);
878 adv_write_lram_8(adv, ADVV_CAN_TAGGED_QNG_B, TARGET_BIT_VECTOR_SET);
879 printf("adv%d: AdvanSys %s Host Adapter, SCSI ID %d, queue depth %d\n",
880 adv->unit, (adv->type & ADV_ULTRA) ? "Ultra SCSI" : "SCSI",
881 adv->scsi_id, adv->max_openings);
882 return (0);
883}
884
885void
886adv_intr(void *arg)
887{
888 struct adv_softc *adv;
889 u_int16_t chipstat;
890 u_int16_t saved_ram_addr;
891 u_int8_t ctrl_reg;
892 u_int8_t saved_ctrl_reg;
893 u_int8_t host_flag;
894
895 adv = (struct adv_softc *)arg;
896
897 ctrl_reg = ADV_INB(adv, ADV_CHIP_CTRL);
898 saved_ctrl_reg = ctrl_reg & (~(ADV_CC_SCSI_RESET | ADV_CC_CHIP_RESET |
899 ADV_CC_SINGLE_STEP | ADV_CC_DIAG |
900 ADV_CC_TEST));
901
902
903 if ((chipstat = ADV_INW(adv, ADV_CHIP_STATUS)) & ADV_CSW_INT_PENDING) {
904
905 saved_ram_addr = ADV_INW(adv, ADV_LRAM_ADDR);
906 host_flag = adv_read_lram_8(adv, ADVV_HOST_FLAG_B);
907 adv_write_lram_8(adv, ADVV_HOST_FLAG_B,
908 host_flag | ADV_HOST_FLAG_IN_ISR);
909
910 adv_ack_interrupt(adv);
911
912 if ((chipstat & ADV_CSW_HALTED)
913 && (ctrl_reg & ADV_CC_SINGLE_STEP)) {
914 adv_isr_chip_halted(adv);
915 saved_ctrl_reg &= ~ADV_CC_HALT;
916 } else {
917 adv_run_doneq(adv);
918 }
919 ADV_OUTW(adv, ADV_LRAM_ADDR, saved_ram_addr);
920#ifdef DIAGNOSTIC
921 if (ADV_INW(adv, ADV_LRAM_ADDR) != saved_ram_addr)
922 panic("adv_intr: Unable to set LRAM addr");
923#endif
924 adv_write_lram_8(adv, ADVV_HOST_FLAG_B, host_flag);
925 }
926
927 ADV_OUTB(adv, ADV_CHIP_CTRL, saved_ctrl_reg);
928}
929
930void
931adv_run_doneq(struct adv_softc *adv)
932{
933 struct adv_q_done_info scsiq;
934 u_int doneq_head;
935 u_int done_qno;
936
937 doneq_head = adv_read_lram_16(adv, ADVV_DONE_Q_TAIL_W) & 0xFF;
938 done_qno = adv_read_lram_8(adv, ADV_QNO_TO_QADDR(doneq_head)
939 + ADV_SCSIQ_B_FWD);
940 while (done_qno != ADV_QLINK_END) {
941 union ccb* ccb;
942 u_int done_qaddr;
943 u_int sg_queue_cnt;
944 int aborted;
945
946 done_qaddr = ADV_QNO_TO_QADDR(done_qno);
947
948 /* Pull status from this request */
949 sg_queue_cnt = adv_copy_lram_doneq(adv, done_qaddr, &scsiq,
950 adv->max_dma_count);
951
952 /* Mark it as free */
953 adv_write_lram_8(adv, done_qaddr + ADV_SCSIQ_B_STATUS,
954 scsiq.q_status & ~(QS_READY|QS_ABORTED));
955
956 /* Process request based on retrieved info */
957 if ((scsiq.cntl & QC_SG_HEAD) != 0) {
958 u_int i;
959
960 /*
961 * S/G based request. Free all of the queue
962 * structures that contained S/G information.
963 */
964 for (i = 0; i < sg_queue_cnt; i++) {
965 done_qno = adv_read_lram_8(adv, done_qaddr
966 + ADV_SCSIQ_B_FWD);
967
968#ifdef DIAGNOSTIC
969 if (done_qno == ADV_QLINK_END) {
970 panic("adv_qdone: Corrupted SG "
971 "list encountered");
972 }
973#endif
974 done_qaddr = ADV_QNO_TO_QADDR(done_qno);
975
976 /* Mark SG queue as free */
977 adv_write_lram_8(adv, done_qaddr
978 + ADV_SCSIQ_B_STATUS, QS_FREE);
979 }
980 } else
981 sg_queue_cnt = 0;
982#ifdef DIAGNOSTIC
983 if (adv->cur_active < (sg_queue_cnt + 1))
984 panic("adv_qdone: Attempting to free more "
985 "queues than are active");
986#endif
987 adv->cur_active -= sg_queue_cnt + 1;
988
989 aborted = (scsiq.q_status & QS_ABORTED) != 0;
990
991 if ((scsiq.q_status != QS_DONE)
992 && (scsiq.q_status & QS_ABORTED) == 0)
993 panic("adv_qdone: completed scsiq with unknown status");
994
995 scsiq.remain_bytes += scsiq.extra_bytes;
996
997 if ((scsiq.d3.done_stat == QD_WITH_ERROR) &&
998 (scsiq.d3.host_stat == QHSTA_M_DATA_OVER_RUN)) {
999 if ((scsiq.cntl & (QC_DATA_IN|QC_DATA_OUT)) == 0) {
1000 scsiq.d3.done_stat = QD_NO_ERROR;
1001 scsiq.d3.host_stat = QHSTA_NO_ERROR;
1002 }
1003 }
1004
1005 ccb = (union ccb *)scsiq.d2.ccb_ptr;
1006 ccb->csio.resid = scsiq.remain_bytes;
1007 adv_done(adv, (union ccb *)scsiq.d2.ccb_ptr,
1008 scsiq.d3.done_stat, scsiq.d3.host_stat,
1009 scsiq.d3.scsi_stat, scsiq.q_no);
1010
1011 doneq_head = done_qno;
1012 done_qno = adv_read_lram_8(adv, done_qaddr + ADV_SCSIQ_B_FWD);
1013 }
1014 adv_write_lram_16(adv, ADVV_DONE_Q_TAIL_W, doneq_head);
1015}
1016
1017
1018void
1019adv_done(struct adv_softc *adv, union ccb *ccb, u_int done_stat,
1020 u_int host_stat, u_int scsi_status, u_int q_no)
1021{
1022 struct adv_ccb_info *cinfo;
1023
1024 cinfo = (struct adv_ccb_info *)ccb->ccb_h.ccb_cinfo_ptr;
1025 /*
1026 * Null this out so that we catch driver bugs that cause a
1027 * ccb to be completed twice.
1028 */
1029 ccb->ccb_h.ccb_cinfo_ptr = NULL;
1030 if ((ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE) {
1031 bus_dmasync_op_t op;
1032
1033 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN)
1034 op = BUS_DMASYNC_POSTREAD;
1035 else
1036 op = BUS_DMASYNC_POSTWRITE;
1037 bus_dmamap_sync(adv->buffer_dmat, cinfo->dmamap, op);
1038 bus_dmamap_unload(adv->buffer_dmat, cinfo->dmamap);
1039 }
1040
1041 switch (done_stat) {
1042 case QD_NO_ERROR:
1043 switch (host_stat) {
1044 case QHSTA_NO_ERROR:
1045 ccb->ccb_h.status = CAM_REQ_CMP;
1046 break;
1047 case QHSTA_M_SEL_TIMEOUT:
1048 ccb->ccb_h.status = CAM_SEL_TIMEOUT;
1049 break;
1050 default:
1051 xpt_print_path(ccb->ccb_h.path);
1052 printf("adv_done - queue done without error, "
1053 "unknown host status %x\n", host_stat);
1054 /* XXX Can I get more explicit information here? */
1055 ccb->ccb_h.status = CAM_REQ_CMP_ERR;
1056 break;
1057 }
1058 break;
1059
1060 case QD_WITH_ERROR:
1061 switch (host_stat) {
1062 case QHSTA_NO_ERROR:
1063 ccb->csio.scsi_status = scsi_status;
1064 switch (scsi_status) {
1065 case SCSI_STATUS_CHECK_COND:
1066 case SCSI_STATUS_CMD_TERMINATED:
1067 ccb->ccb_h.status |= CAM_AUTOSNS_VALID;
1068 /* Structure copy */
1069 ccb->csio.sense_data =
1070 adv->sense_buffers[q_no - 1];
1071 /* FALLTHROUGH */
1072 case SCSI_STATUS_BUSY:
1073 case SCSI_STATUS_RESERV_CONFLICT:
1074 case SCSI_STATUS_QUEUE_FULL:
1075 case SCSI_STATUS_COND_MET:
1076 case SCSI_STATUS_INTERMED:
1077 case SCSI_STATUS_INTERMED_COND_MET:
1078 ccb->ccb_h.status |= CAM_SCSI_STATUS_ERROR;
1079 break;
1080 case SCSI_STATUS_OK:
1081 ccb->ccb_h.status |= CAM_REQ_CMP;
1082 break;
1083 }
1084 break;
1085 case QHSTA_M_SEL_TIMEOUT:
1086 ccb->ccb_h.status = CAM_SEL_TIMEOUT;
1087 break;
1088 default:
1089 xpt_print_path(ccb->ccb_h.path);
1090 printf("adv_done - queue done with error, "
1091 "unknown host status %x\n", host_stat);
1092 /* XXX Can I get more explicit information here? */
1093 ccb->ccb_h.status = CAM_REQ_CMP_ERR;
1094 break;
1095 }
1096 break;
1097
1098 case QD_ABORTED_BY_HOST:
1099 /* Don't clobber any, more explicit, error codes we've set */
1100 if ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_INPROG)
1101 ccb->ccb_h.status = CAM_REQ_ABORTED;
1102 break;
1103
1104 default:
1105 xpt_print_path(ccb->ccb_h.path);
1106 printf("adv_done - queue done with unknown status %x:%x\n",
1107 done_stat, host_stat);
1108 ccb->ccb_h.status = CAM_REQ_CMP_ERR;
1109 break;
1110 }
1111 if ((cinfo->state & ACCB_RELEASE_SIMQ) != 0)
1112 ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
1113 else if (adv->openings_needed > 0) {
1114 int openings;
1115
1116 openings = adv->max_openings - adv->cur_active - ADV_MIN_FREE_Q;
1117 if (openings >= adv->openings_needed) {
1118 ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
1119 adv->openings_needed = 0;
1120 }
1121 }
1122 /* Remove from the pending list */
1123 LIST_REMOVE(&ccb->ccb_h, sim_links.le);
1124
1125 untimeout(adv_timeout, ccb, ccb->ccb_h.timeout_ch);
1126 if ((cinfo->state & ACCB_RECOVERY_CCB) != 0) {
1127 /*
1128 * We now traverse our list of pending CCBs and reinstate
1129 * their timeouts.
1130 */
1131 struct ccb_hdr *ccb_h;
1132
1133 ccb_h = LIST_FIRST(&adv->pending_ccbs);
1134 while (ccb_h != NULL) {
1135 ccb_h->timeout_ch =
1136 timeout(adv_timeout, (caddr_t)ccb_h,
1137 (ccb_h->timeout * hz) / 1000);
1138 ccb_h = LIST_NEXT(ccb_h, sim_links.le);
1139 }
1140 printf("%s: No longer in timeout\n", adv_name(adv));
1141 }
1142 if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP
1143 && (ccb->ccb_h.status & CAM_DEV_QFRZN) == 0) {
1144 xpt_freeze_devq(ccb->ccb_h.path, /*count*/1);
1145 ccb->ccb_h.status |= CAM_DEV_QFRZN;
1146 }
1147 adv_free_ccb_info(adv, cinfo);
1148 ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
1149 xpt_done(ccb);
1150}
1151
1152/*
1153 * Function to poll for command completion when
1154 * interrupts are disabled (crash dumps)
1155 */
1156static void
1157adv_poll(struct cam_sim *sim)
1158{
1159 adv_intr(cam_sim_softc(sim));
1160}
1161
1162/*
1163 * Attach all the sub-devices we can find
1164 */
1165int
1166adv_attach(adv)
1167 struct adv_softc *adv;
1168{
1169 struct ccb_setasync csa;
1170 struct cam_devq *devq;
1171
1172 /*
1173 * Create our DMA tags. These tags define the kinds of device
1174 * accessable memory allocations and memory mappings we will
1175 * need to perform during normal operation.
1176 *
1177 * Unless we need to further restrict the allocation, we rely
1178 * on the restrictions of the parent dmat, hence the common
1179 * use of MAXADDR and MAXSIZE.
1180 */
1181
1182 /* DMA tag for mapping buffers into device visible space. */
1183 if (bus_dma_tag_create(adv->parent_dmat, /*alignment*/0, /*boundary*/0,
1184 /*lowaddr*/BUS_SPACE_MAXADDR,
1185 /*highaddr*/BUS_SPACE_MAXADDR,
1186 /*filter*/NULL, /*filterarg*/NULL,
1187 /*maxsize*/MAXBSIZE,
1188 /*nsegments*/ADV_MAX_SG_LIST,
1189 /*maxsegsz*/BUS_SPACE_MAXSIZE_32BIT,
1190 /*flags*/BUS_DMA_ALLOCNOW,
1191 &adv->buffer_dmat) != 0) {
1192 goto error_exit;
1193 }
1194 adv->init_level++;
1195
1196 /* DMA tag for our sense buffers */
1197 if (bus_dma_tag_create(adv->parent_dmat, /*alignment*/0, /*boundary*/0,
1198 /*lowaddr*/BUS_SPACE_MAXADDR,
1199 /*highaddr*/BUS_SPACE_MAXADDR,
1200 /*filter*/NULL, /*filterarg*/NULL,
1201 sizeof(struct scsi_sense_data)*adv->max_openings,
1202 /*nsegments*/1,
1203 /*maxsegsz*/BUS_SPACE_MAXSIZE_32BIT,
1204 /*flags*/0, &adv->sense_dmat) != 0) {
1205 goto error_exit;
1206 }
1207
1208 adv->init_level++;
1209
1210 /* Allocation for our sense buffers */
1211 if (bus_dmamem_alloc(adv->sense_dmat, (void **)&adv->sense_buffers,
1212 BUS_DMA_NOWAIT, &adv->sense_dmamap) != 0) {
1213 goto error_exit;
1214 }
1215
1216 adv->init_level++;
1217
1218 /* And permanently map them */
1219 bus_dmamap_load(adv->sense_dmat, adv->sense_dmamap,
1220 adv->sense_buffers,
1221 sizeof(struct scsi_sense_data)*adv->max_openings,
1222 adv_map, &adv->sense_physbase, /*flags*/0);
1223
1224 adv->init_level++;
1225
1226 /*
1227 * Fire up the chip
1228 */
1229 if (adv_start_chip(adv) != 1) {
1230 printf("adv%d: Unable to start on board processor. Aborting.\n",
1231 adv->unit);
1232 return (0);
1233 }
1234
1235 /*
1236 * Create the device queue for our SIM.
1237 */
1238 devq = cam_simq_alloc(adv->max_openings);
1239 if (devq == NULL)
1240 return (0);
1241
1242 /*
1243 * Construct our SIM entry.
1244 */
1245 adv->sim = cam_sim_alloc(adv_action, adv_poll, "adv", adv, adv->unit,
1246 1, adv->max_openings, devq);
1247 if (adv->sim == NULL)
1248 return (0);
1249
1250 /*
1251 * Register the bus.
1252 *
1253 * XXX Twin Channel EISA Cards???
1254 */
1255 if (xpt_bus_register(adv->sim, 0) != CAM_SUCCESS) {
1256 cam_sim_free(adv->sim, /*free devq*/TRUE);
1257 return (0);
1258 }
1259
1260 if (xpt_create_path(&adv->path, /*periph*/NULL, cam_sim_path(adv->sim),
1261 CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD)
1262 == CAM_REQ_CMP) {
1263 xpt_setup_ccb(&csa.ccb_h, adv->path, /*priority*/5);
1264 csa.ccb_h.func_code = XPT_SASYNC_CB;
1265 csa.event_enable = AC_FOUND_DEVICE|AC_LOST_DEVICE;
1266 csa.callback = advasync;
1267 csa.callback_arg = adv;
1268 xpt_action((union ccb *)&csa);
1269 }
1270 return (1);
1271
1272error_exit:
1273 return (0);
1274}