Deleted Added
full compact
advansys.c (195534) advansys.c (241492)
1/*-
2 * Generic driver for the Advanced Systems Inc. SCSI controllers
3 * Product specific probe and attach routines can be found in:
4 *
5 * i386/isa/adv_isa.c ABP5140, ABP542, ABP5150, ABP842, ABP852
6 * i386/eisa/adv_eisa.c ABP742, ABP752
7 * pci/adv_pci.c ABP920, ABP930, ABP930U, ABP930UA, ABP940, ABP940U,
8 * ABP940UA, ABP950, ABP960, ABP960U, ABP960UA,

--- 32 unchanged lines hidden (view full) ---

41 *
42 * Redistribution and use in source and binary forms, with or without
43 * modification, are permitted provided that redistributions of source
44 * code retain the above copyright notice and this comment without
45 * modification.
46 */
47
48#include <sys/cdefs.h>
1/*-
2 * Generic driver for the Advanced Systems Inc. SCSI controllers
3 * Product specific probe and attach routines can be found in:
4 *
5 * i386/isa/adv_isa.c ABP5140, ABP542, ABP5150, ABP842, ABP852
6 * i386/eisa/adv_eisa.c ABP742, ABP752
7 * pci/adv_pci.c ABP920, ABP930, ABP930U, ABP930UA, ABP940, ABP940U,
8 * ABP940UA, ABP950, ABP960, ABP960U, ABP960UA,

--- 32 unchanged lines hidden (view full) ---

41 *
42 * Redistribution and use in source and binary forms, with or without
43 * modification, are permitted provided that redistributions of source
44 * code retain the above copyright notice and this comment without
45 * modification.
46 */
47
48#include <sys/cdefs.h>
49__FBSDID("$FreeBSD: head/sys/dev/advansys/advansys.c 195534 2009-07-10 08:18:08Z scottl $");
49__FBSDID("$FreeBSD: head/sys/dev/advansys/advansys.c 241492 2012-10-12 21:31:44Z jhb $");
50
51#include <sys/param.h>
50
51#include <sys/param.h>
52#include <sys/conf.h>
52#include <sys/systm.h>
53#include <sys/malloc.h>
54#include <sys/kernel.h>
55#include <sys/lock.h>
56#include <sys/module.h>
57#include <sys/mutex.h>
58
59#include <machine/bus.h>

--- 14 unchanged lines hidden (view full) ---

74#include <vm/vm_param.h>
75#include <vm/pmap.h>
76
77#include <dev/advansys/advansys.h>
78
79static void adv_action(struct cam_sim *sim, union ccb *ccb);
80static void adv_execute_ccb(void *arg, bus_dma_segment_t *dm_segs,
81 int nsegments, int error);
53#include <sys/systm.h>
54#include <sys/malloc.h>
55#include <sys/kernel.h>
56#include <sys/lock.h>
57#include <sys/module.h>
58#include <sys/mutex.h>
59
60#include <machine/bus.h>

--- 14 unchanged lines hidden (view full) ---

75#include <vm/vm_param.h>
76#include <vm/pmap.h>
77
78#include <dev/advansys/advansys.h>
79
80static void adv_action(struct cam_sim *sim, union ccb *ccb);
81static void adv_execute_ccb(void *arg, bus_dma_segment_t *dm_segs,
82 int nsegments, int error);
83static void adv_intr_locked(struct adv_softc *adv);
82static void adv_poll(struct cam_sim *sim);
83static void adv_run_doneq(struct adv_softc *adv);
84static struct adv_ccb_info *
85 adv_alloc_ccb_info(struct adv_softc *adv);
86static void adv_destroy_ccb_info(struct adv_softc *adv,
87 struct adv_ccb_info *cinfo);
88static __inline struct adv_ccb_info *
89 adv_get_ccb_info(struct adv_softc *adv);
90static __inline void adv_free_ccb_info(struct adv_softc *adv,
91 struct adv_ccb_info *cinfo);
92static __inline void adv_set_state(struct adv_softc *adv, adv_state state);
93static __inline void adv_clear_state(struct adv_softc *adv, union ccb* ccb);
94static void adv_clear_state_really(struct adv_softc *adv, union ccb* ccb);
95
96static __inline struct adv_ccb_info *
97adv_get_ccb_info(struct adv_softc *adv)
98{
99 struct adv_ccb_info *cinfo;
84static void adv_poll(struct cam_sim *sim);
85static void adv_run_doneq(struct adv_softc *adv);
86static struct adv_ccb_info *
87 adv_alloc_ccb_info(struct adv_softc *adv);
88static void adv_destroy_ccb_info(struct adv_softc *adv,
89 struct adv_ccb_info *cinfo);
90static __inline struct adv_ccb_info *
91 adv_get_ccb_info(struct adv_softc *adv);
92static __inline void adv_free_ccb_info(struct adv_softc *adv,
93 struct adv_ccb_info *cinfo);
94static __inline void adv_set_state(struct adv_softc *adv, adv_state state);
95static __inline void adv_clear_state(struct adv_softc *adv, union ccb* ccb);
96static void adv_clear_state_really(struct adv_softc *adv, union ccb* ccb);
97
98static __inline struct adv_ccb_info *
99adv_get_ccb_info(struct adv_softc *adv)
100{
101 struct adv_ccb_info *cinfo;
100 int opri;
101
102
102 opri = splcam();
103 if (!dumping)
104 mtx_assert(&adv->lock, MA_OWNED);
103 if ((cinfo = SLIST_FIRST(&adv->free_ccb_infos)) != NULL) {
104 SLIST_REMOVE_HEAD(&adv->free_ccb_infos, links);
105 } else {
106 cinfo = adv_alloc_ccb_info(adv);
107 }
105 if ((cinfo = SLIST_FIRST(&adv->free_ccb_infos)) != NULL) {
106 SLIST_REMOVE_HEAD(&adv->free_ccb_infos, links);
107 } else {
108 cinfo = adv_alloc_ccb_info(adv);
109 }
108 splx(opri);
109
110 return (cinfo);
111}
112
113static __inline void
114adv_free_ccb_info(struct adv_softc *adv, struct adv_ccb_info *cinfo)
115{
110
111 return (cinfo);
112}
113
114static __inline void
115adv_free_ccb_info(struct adv_softc *adv, struct adv_ccb_info *cinfo)
116{
116 int opri;
117
117
118 opri = splcam();
118 if (!dumping)
119 mtx_assert(&adv->lock, MA_OWNED);
119 cinfo->state = ACCB_FREE;
120 SLIST_INSERT_HEAD(&adv->free_ccb_infos, cinfo, links);
120 cinfo->state = ACCB_FREE;
121 SLIST_INSERT_HEAD(&adv->free_ccb_infos, cinfo, links);
121 splx(opri);
122}
123
124static __inline void
125adv_set_state(struct adv_softc *adv, adv_state state)
126{
127 if (adv->state == 0)
128 xpt_freeze_simq(adv->sim, /*count*/1);
129 adv->state |= state;

--- 4 unchanged lines hidden (view full) ---

134{
135 if (adv->state != 0)
136 adv_clear_state_really(adv, ccb);
137}
138
139static void
140adv_clear_state_really(struct adv_softc *adv, union ccb* ccb)
141{
122}
123
124static __inline void
125adv_set_state(struct adv_softc *adv, adv_state state)
126{
127 if (adv->state == 0)
128 xpt_freeze_simq(adv->sim, /*count*/1);
129 adv->state |= state;

--- 4 unchanged lines hidden (view full) ---

134{
135 if (adv->state != 0)
136 adv_clear_state_really(adv, ccb);
137}
138
139static void
140adv_clear_state_really(struct adv_softc *adv, union ccb* ccb)
141{
142
143 if (!dumping)
144 mtx_assert(&adv->lock, MA_OWNED);
142 if ((adv->state & ADV_BUSDMA_BLOCK_CLEARED) != 0)
143 adv->state &= ~(ADV_BUSDMA_BLOCK_CLEARED|ADV_BUSDMA_BLOCK);
144 if ((adv->state & ADV_RESOURCE_SHORTAGE) != 0) {
145 int openings;
146
147 openings = adv->max_openings - adv->cur_active - ADV_MIN_FREE_Q;
148 if (openings >= adv->openings_needed) {
149 adv->state &= ~ADV_RESOURCE_SHORTAGE;

--- 9 unchanged lines hidden (view full) ---

159 struct ccb_hdr *ccb_h;
160
161 /*
162 * We now traverse our list of pending CCBs
163 * and reinstate their timeouts.
164 */
165 ccb_h = LIST_FIRST(&adv->pending_ccbs);
166 while (ccb_h != NULL) {
145 if ((adv->state & ADV_BUSDMA_BLOCK_CLEARED) != 0)
146 adv->state &= ~(ADV_BUSDMA_BLOCK_CLEARED|ADV_BUSDMA_BLOCK);
147 if ((adv->state & ADV_RESOURCE_SHORTAGE) != 0) {
148 int openings;
149
150 openings = adv->max_openings - adv->cur_active - ADV_MIN_FREE_Q;
151 if (openings >= adv->openings_needed) {
152 adv->state &= ~ADV_RESOURCE_SHORTAGE;

--- 9 unchanged lines hidden (view full) ---

162 struct ccb_hdr *ccb_h;
163
164 /*
165 * We now traverse our list of pending CCBs
166 * and reinstate their timeouts.
167 */
168 ccb_h = LIST_FIRST(&adv->pending_ccbs);
169 while (ccb_h != NULL) {
167 ccb_h->timeout_ch =
168 timeout(adv_timeout, (caddr_t)ccb_h,
169 (ccb_h->timeout * hz) / 1000);
170 cinfo = ccb_h->ccb_cinfo_ptr;
171 callout_reset(&cinfo->timer,
172 ccb_h->timeout * hz / 1000, adv_timeout,
173 ccb_h);
170 ccb_h = LIST_NEXT(ccb_h, sim_links.le);
171 }
172 adv->state &= ~ADV_IN_TIMEOUT;
174 ccb_h = LIST_NEXT(ccb_h, sim_links.le);
175 }
176 adv->state &= ~ADV_IN_TIMEOUT;
173 printf("%s: No longer in timeout\n", adv_name(adv));
177 device_printf(adv->dev, "No longer in timeout\n");
174 }
175 }
176 if (adv->state == 0)
177 ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
178}
179
180void
181adv_map(void *arg, bus_dma_segment_t *segs, int nseg, int error)
182{
183 bus_addr_t* physaddr;
184
185 physaddr = (bus_addr_t*)arg;
186 *physaddr = segs->ds_addr;
187}
188
178 }
179 }
180 if (adv->state == 0)
181 ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
182}
183
184void
185adv_map(void *arg, bus_dma_segment_t *segs, int nseg, int error)
186{
187 bus_addr_t* physaddr;
188
189 physaddr = (bus_addr_t*)arg;
190 *physaddr = segs->ds_addr;
191}
192
189char *
190adv_name(struct adv_softc *adv)
191{
192 static char name[10];
193
194 snprintf(name, sizeof(name), "adv%d", adv->unit);
195 return (name);
196}
197
198static void
199adv_action(struct cam_sim *sim, union ccb *ccb)
200{
201 struct adv_softc *adv;
202
203 CAM_DEBUG(ccb->ccb_h.path, CAM_DEBUG_TRACE, ("adv_action\n"));
204
205 adv = (struct adv_softc *)cam_sim_softc(sim);
193static void
194adv_action(struct cam_sim *sim, union ccb *ccb)
195{
196 struct adv_softc *adv;
197
198 CAM_DEBUG(ccb->ccb_h.path, CAM_DEBUG_TRACE, ("adv_action\n"));
199
200 adv = (struct adv_softc *)cam_sim_softc(sim);
201 mtx_assert(&adv->lock, MA_OWNED);
206
207 switch (ccb->ccb_h.func_code) {
208 /* Common cases first */
209 case XPT_SCSI_IO: /* Execute the requested I/O operation */
210 {
211 struct ccb_hdr *ccb_h;
212 struct ccb_scsiio *csio;
213 struct adv_ccb_info *cinfo;

--- 10 unchanged lines hidden (view full) ---

224 /* Only use S/G if there is a transfer */
225 if ((ccb_h->flags & CAM_DIR_MASK) != CAM_DIR_NONE) {
226 if ((ccb_h->flags & CAM_SCATTER_VALID) == 0) {
227 /*
228 * We've been given a pointer
229 * to a single buffer
230 */
231 if ((ccb_h->flags & CAM_DATA_PHYS) == 0) {
202
203 switch (ccb->ccb_h.func_code) {
204 /* Common cases first */
205 case XPT_SCSI_IO: /* Execute the requested I/O operation */
206 {
207 struct ccb_hdr *ccb_h;
208 struct ccb_scsiio *csio;
209 struct adv_ccb_info *cinfo;

--- 10 unchanged lines hidden (view full) ---

220 /* Only use S/G if there is a transfer */
221 if ((ccb_h->flags & CAM_DIR_MASK) != CAM_DIR_NONE) {
222 if ((ccb_h->flags & CAM_SCATTER_VALID) == 0) {
223 /*
224 * We've been given a pointer
225 * to a single buffer
226 */
227 if ((ccb_h->flags & CAM_DATA_PHYS) == 0) {
232 int s;
233 int error;
234
228 int error;
229
235 s = splsoftvm();
236 error =
237 bus_dmamap_load(adv->buffer_dmat,
238 cinfo->dmamap,
239 csio->data_ptr,
240 csio->dxfer_len,
241 adv_execute_ccb,
242 csio, /*flags*/0);
243 if (error == EINPROGRESS) {
244 /*
245 * So as to maintain ordering,
246 * freeze the controller queue
247 * until our mapping is
248 * returned.
249 */
250 adv_set_state(adv,
251 ADV_BUSDMA_BLOCK);
252 }
230 error =
231 bus_dmamap_load(adv->buffer_dmat,
232 cinfo->dmamap,
233 csio->data_ptr,
234 csio->dxfer_len,
235 adv_execute_ccb,
236 csio, /*flags*/0);
237 if (error == EINPROGRESS) {
238 /*
239 * So as to maintain ordering,
240 * freeze the controller queue
241 * until our mapping is
242 * returned.
243 */
244 adv_set_state(adv,
245 ADV_BUSDMA_BLOCK);
246 }
253 splx(s);
254 } else {
255 struct bus_dma_segment seg;
256
257 /* Pointer to physical buffer */
258 seg.ds_addr =
259 (bus_addr_t)csio->data_ptr;
260 seg.ds_len = csio->dxfer_len;
261 adv_execute_ccb(csio, &seg, 1, 0);

--- 32 unchanged lines hidden (view full) ---

294 case XPT_SET_TRAN_SETTINGS:
295 {
296 struct ccb_trans_settings_scsi *scsi;
297 struct ccb_trans_settings_spi *spi;
298 struct ccb_trans_settings *cts;
299 target_bit_vector targ_mask;
300 struct adv_transinfo *tconf;
301 u_int update_type;
247 } else {
248 struct bus_dma_segment seg;
249
250 /* Pointer to physical buffer */
251 seg.ds_addr =
252 (bus_addr_t)csio->data_ptr;
253 seg.ds_len = csio->dxfer_len;
254 adv_execute_ccb(csio, &seg, 1, 0);

--- 32 unchanged lines hidden (view full) ---

287 case XPT_SET_TRAN_SETTINGS:
288 {
289 struct ccb_trans_settings_scsi *scsi;
290 struct ccb_trans_settings_spi *spi;
291 struct ccb_trans_settings *cts;
292 target_bit_vector targ_mask;
293 struct adv_transinfo *tconf;
294 u_int update_type;
302 int s;
303
304 cts = &ccb->cts;
305 targ_mask = ADV_TID_TO_TARGET_MASK(cts->ccb_h.target_id);
306 update_type = 0;
307
308 /*
309 * The user must specify which type of settings he wishes
310 * to change.

--- 4 unchanged lines hidden (view full) ---

315 } else if (IS_USER_SETTINGS(cts) && !IS_CURRENT_SETTINGS(cts)) {
316 tconf = &adv->tinfo[cts->ccb_h.target_id].user;
317 update_type |= ADV_TRANS_USER;
318 } else {
319 ccb->ccb_h.status = CAM_REQ_INVALID;
320 break;
321 }
322
295
296 cts = &ccb->cts;
297 targ_mask = ADV_TID_TO_TARGET_MASK(cts->ccb_h.target_id);
298 update_type = 0;
299
300 /*
301 * The user must specify which type of settings he wishes
302 * to change.

--- 4 unchanged lines hidden (view full) ---

307 } else if (IS_USER_SETTINGS(cts) && !IS_CURRENT_SETTINGS(cts)) {
308 tconf = &adv->tinfo[cts->ccb_h.target_id].user;
309 update_type |= ADV_TRANS_USER;
310 } else {
311 ccb->ccb_h.status = CAM_REQ_INVALID;
312 break;
313 }
314
323 s = splcam();
324 scsi = &cts->proto_specific.scsi;
325 spi = &cts->xport_specific.spi;
326 if ((update_type & ADV_TRANS_GOAL) != 0) {
327 if ((spi->valid & CTS_SPI_VALID_DISC) != 0) {
328 if ((spi->flags & CTS_SPI_FLAGS_DISC_ENB) != 0)
329 adv->disc_enable |= targ_mask;
330 else
331 adv->disc_enable &= ~targ_mask;

--- 50 unchanged lines hidden (view full) ---

382 &spi->sync_offset,
383 cts->ccb_h.target_id);
384
385 adv_set_syncrate(adv, /*struct cam_path */NULL,
386 cts->ccb_h.target_id, spi->sync_period,
387 spi->sync_offset, update_type);
388 }
389
315 scsi = &cts->proto_specific.scsi;
316 spi = &cts->xport_specific.spi;
317 if ((update_type & ADV_TRANS_GOAL) != 0) {
318 if ((spi->valid & CTS_SPI_VALID_DISC) != 0) {
319 if ((spi->flags & CTS_SPI_FLAGS_DISC_ENB) != 0)
320 adv->disc_enable |= targ_mask;
321 else
322 adv->disc_enable &= ~targ_mask;

--- 50 unchanged lines hidden (view full) ---

373 &spi->sync_offset,
374 cts->ccb_h.target_id);
375
376 adv_set_syncrate(adv, /*struct cam_path */NULL,
377 cts->ccb_h.target_id, spi->sync_period,
378 spi->sync_offset, update_type);
379 }
380
390 splx(s);
391 ccb->ccb_h.status = CAM_REQ_CMP;
392 xpt_done(ccb);
393 break;
394 }
395 case XPT_GET_TRAN_SETTINGS:
396 /* Get default/user set transfer settings for the target */
397 {
398 struct ccb_trans_settings_scsi *scsi;
399 struct ccb_trans_settings_spi *spi;
400 struct ccb_trans_settings *cts;
401 struct adv_transinfo *tconf;
402 target_bit_vector target_mask;
381 ccb->ccb_h.status = CAM_REQ_CMP;
382 xpt_done(ccb);
383 break;
384 }
385 case XPT_GET_TRAN_SETTINGS:
386 /* Get default/user set transfer settings for the target */
387 {
388 struct ccb_trans_settings_scsi *scsi;
389 struct ccb_trans_settings_spi *spi;
390 struct ccb_trans_settings *cts;
391 struct adv_transinfo *tconf;
392 target_bit_vector target_mask;
403 int s;
404
405 cts = &ccb->cts;
406 target_mask = ADV_TID_TO_TARGET_MASK(cts->ccb_h.target_id);
407
408 scsi = &cts->proto_specific.scsi;
409 spi = &cts->xport_specific.spi;
410
411 cts->protocol = PROTO_SCSI;
412 cts->protocol_version = SCSI_REV_2;
413 cts->transport = XPORT_SPI;
414 cts->transport_version = 2;
415
416 scsi->flags &= ~CTS_SCSI_FLAGS_TAG_ENB;
417 spi->flags &= ~CTS_SPI_FLAGS_DISC_ENB;
418
393
394 cts = &ccb->cts;
395 target_mask = ADV_TID_TO_TARGET_MASK(cts->ccb_h.target_id);
396
397 scsi = &cts->proto_specific.scsi;
398 spi = &cts->xport_specific.spi;
399
400 cts->protocol = PROTO_SCSI;
401 cts->protocol_version = SCSI_REV_2;
402 cts->transport = XPORT_SPI;
403 cts->transport_version = 2;
404
405 scsi->flags &= ~CTS_SCSI_FLAGS_TAG_ENB;
406 spi->flags &= ~CTS_SPI_FLAGS_DISC_ENB;
407
419 s = splcam();
420 if (cts->type == CTS_TYPE_CURRENT_SETTINGS) {
421 tconf = &adv->tinfo[cts->ccb_h.target_id].current;
422 if ((adv->disc_enable & target_mask) != 0)
423 spi->flags |= CTS_SPI_FLAGS_DISC_ENB;
424 if ((adv->cmd_qng_enabled & target_mask) != 0)
425 scsi->flags |= CTS_SCSI_FLAGS_TAG_ENB;
426 } else {
427 tconf = &adv->tinfo[cts->ccb_h.target_id].user;
428 if ((adv->user_disc_enable & target_mask) != 0)
429 spi->flags |= CTS_SPI_FLAGS_DISC_ENB;
430 if ((adv->user_cmd_qng_enabled & target_mask) != 0)
431 scsi->flags |= CTS_SCSI_FLAGS_TAG_ENB;
432 }
433 spi->sync_period = tconf->period;
434 spi->sync_offset = tconf->offset;
408 if (cts->type == CTS_TYPE_CURRENT_SETTINGS) {
409 tconf = &adv->tinfo[cts->ccb_h.target_id].current;
410 if ((adv->disc_enable & target_mask) != 0)
411 spi->flags |= CTS_SPI_FLAGS_DISC_ENB;
412 if ((adv->cmd_qng_enabled & target_mask) != 0)
413 scsi->flags |= CTS_SCSI_FLAGS_TAG_ENB;
414 } else {
415 tconf = &adv->tinfo[cts->ccb_h.target_id].user;
416 if ((adv->user_disc_enable & target_mask) != 0)
417 spi->flags |= CTS_SPI_FLAGS_DISC_ENB;
418 if ((adv->user_cmd_qng_enabled & target_mask) != 0)
419 scsi->flags |= CTS_SCSI_FLAGS_TAG_ENB;
420 }
421 spi->sync_period = tconf->period;
422 spi->sync_offset = tconf->offset;
435 splx(s);
436 spi->bus_width = MSG_EXT_WDTR_BUS_8_BIT;
437 spi->valid = CTS_SPI_VALID_SYNC_RATE
438 | CTS_SPI_VALID_SYNC_OFFSET
439 | CTS_SPI_VALID_BUS_WIDTH
440 | CTS_SPI_VALID_DISC;
441 scsi->valid = CTS_SCSI_VALID_TQ;
442 ccb->ccb_h.status = CAM_REQ_CMP;
443 xpt_done(ccb);

--- 5 unchanged lines hidden (view full) ---

449
450 extended = (adv->control & ADV_CNTL_BIOS_GT_1GB) != 0;
451 cam_calc_geometry(&ccb->ccg, extended);
452 xpt_done(ccb);
453 break;
454 }
455 case XPT_RESET_BUS: /* Reset the specified SCSI bus */
456 {
423 spi->bus_width = MSG_EXT_WDTR_BUS_8_BIT;
424 spi->valid = CTS_SPI_VALID_SYNC_RATE
425 | CTS_SPI_VALID_SYNC_OFFSET
426 | CTS_SPI_VALID_BUS_WIDTH
427 | CTS_SPI_VALID_DISC;
428 scsi->valid = CTS_SCSI_VALID_TQ;
429 ccb->ccb_h.status = CAM_REQ_CMP;
430 xpt_done(ccb);

--- 5 unchanged lines hidden (view full) ---

436
437 extended = (adv->control & ADV_CNTL_BIOS_GT_1GB) != 0;
438 cam_calc_geometry(&ccb->ccg, extended);
439 xpt_done(ccb);
440 break;
441 }
442 case XPT_RESET_BUS: /* Reset the specified SCSI bus */
443 {
457 int s;
458
444
459 s = splcam();
460 adv_stop_execution(adv);
461 adv_reset_bus(adv, /*initiate_reset*/TRUE);
462 adv_start_execution(adv);
445 adv_stop_execution(adv);
446 adv_reset_bus(adv, /*initiate_reset*/TRUE);
447 adv_start_execution(adv);
463 splx(s);
464
465 ccb->ccb_h.status = CAM_REQ_CMP;
466 xpt_done(ccb);
467 break;
468 }
469 case XPT_TERM_IO: /* Terminate the I/O process */
470 /* XXX Implement */
471 ccb->ccb_h.status = CAM_REQ_INVALID;

--- 44 unchanged lines hidden (view full) ---

516{
517 struct ccb_scsiio *csio;
518 struct ccb_hdr *ccb_h;
519 struct cam_sim *sim;
520 struct adv_softc *adv;
521 struct adv_ccb_info *cinfo;
522 struct adv_scsi_q scsiq;
523 struct adv_sg_head sghead;
448
449 ccb->ccb_h.status = CAM_REQ_CMP;
450 xpt_done(ccb);
451 break;
452 }
453 case XPT_TERM_IO: /* Terminate the I/O process */
454 /* XXX Implement */
455 ccb->ccb_h.status = CAM_REQ_INVALID;

--- 44 unchanged lines hidden (view full) ---

500{
501 struct ccb_scsiio *csio;
502 struct ccb_hdr *ccb_h;
503 struct cam_sim *sim;
504 struct adv_softc *adv;
505 struct adv_ccb_info *cinfo;
506 struct adv_scsi_q scsiq;
507 struct adv_sg_head sghead;
524 int s;
525
526 csio = (struct ccb_scsiio *)arg;
527 ccb_h = &csio->ccb_h;
528 sim = xpt_path_sim(ccb_h->path);
529 adv = (struct adv_softc *)cam_sim_softc(sim);
530 cinfo = (struct adv_ccb_info *)csio->ccb_h.ccb_cinfo_ptr;
508
509 csio = (struct ccb_scsiio *)arg;
510 ccb_h = &csio->ccb_h;
511 sim = xpt_path_sim(ccb_h->path);
512 adv = (struct adv_softc *)cam_sim_softc(sim);
513 cinfo = (struct adv_ccb_info *)csio->ccb_h.ccb_cinfo_ptr;
514 if (!dumping)
515 mtx_assert(&adv->lock, MA_OWNED);
531
532 /*
533 * Setup our done routine to release the simq on
534 * the next ccb that completes.
535 */
536 if ((adv->state & ADV_BUSDMA_BLOCK) != 0)
537 adv->state |= ADV_BUSDMA_BLOCK_CLEARED;
538

--- 52 unchanged lines hidden (view full) ---

591 op = BUS_DMASYNC_PREWRITE;
592 bus_dmamap_sync(adv->buffer_dmat, cinfo->dmamap, op);
593 } else {
594 scsiq.q1.data_addr = 0;
595 scsiq.q1.data_cnt = 0;
596 scsiq.sg_head = NULL;
597 }
598
516
517 /*
518 * Setup our done routine to release the simq on
519 * the next ccb that completes.
520 */
521 if ((adv->state & ADV_BUSDMA_BLOCK) != 0)
522 adv->state |= ADV_BUSDMA_BLOCK_CLEARED;
523

--- 52 unchanged lines hidden (view full) ---

576 op = BUS_DMASYNC_PREWRITE;
577 bus_dmamap_sync(adv->buffer_dmat, cinfo->dmamap, op);
578 } else {
579 scsiq.q1.data_addr = 0;
580 scsiq.q1.data_cnt = 0;
581 scsiq.sg_head = NULL;
582 }
583
599 s = splcam();
600
601 /*
602 * Last time we need to check if this SCB needs to
603 * be aborted.
604 */
605 if (ccb_h->status != CAM_REQ_INPROG) {
606 if (nsegments != 0)
607 bus_dmamap_unload(adv->buffer_dmat, cinfo->dmamap);
608 adv_clear_state(adv, (union ccb *)csio);
609 adv_free_ccb_info(adv, cinfo);
610 xpt_done((union ccb *)csio);
584 /*
585 * Last time we need to check if this SCB needs to
586 * be aborted.
587 */
588 if (ccb_h->status != CAM_REQ_INPROG) {
589 if (nsegments != 0)
590 bus_dmamap_unload(adv->buffer_dmat, cinfo->dmamap);
591 adv_clear_state(adv, (union ccb *)csio);
592 adv_free_ccb_info(adv, cinfo);
593 xpt_done((union ccb *)csio);
611 splx(s);
612 return;
613 }
614
615 if (adv_execute_scsi_queue(adv, &scsiq, csio->dxfer_len) != 0) {
616 /* Temporary resource shortage */
617 adv_set_state(adv, ADV_RESOURCE_SHORTAGE);
618 if (nsegments != 0)
619 bus_dmamap_unload(adv->buffer_dmat, cinfo->dmamap);
620 csio->ccb_h.status = CAM_REQUEUE_REQ;
621 adv_clear_state(adv, (union ccb *)csio);
622 adv_free_ccb_info(adv, cinfo);
623 xpt_done((union ccb *)csio);
594 return;
595 }
596
597 if (adv_execute_scsi_queue(adv, &scsiq, csio->dxfer_len) != 0) {
598 /* Temporary resource shortage */
599 adv_set_state(adv, ADV_RESOURCE_SHORTAGE);
600 if (nsegments != 0)
601 bus_dmamap_unload(adv->buffer_dmat, cinfo->dmamap);
602 csio->ccb_h.status = CAM_REQUEUE_REQ;
603 adv_clear_state(adv, (union ccb *)csio);
604 adv_free_ccb_info(adv, cinfo);
605 xpt_done((union ccb *)csio);
624 splx(s);
625 return;
626 }
627 cinfo->state |= ACCB_ACTIVE;
628 ccb_h->status |= CAM_SIM_QUEUED;
629 LIST_INSERT_HEAD(&adv->pending_ccbs, ccb_h, sim_links.le);
630 /* Schedule our timeout */
606 return;
607 }
608 cinfo->state |= ACCB_ACTIVE;
609 ccb_h->status |= CAM_SIM_QUEUED;
610 LIST_INSERT_HEAD(&adv->pending_ccbs, ccb_h, sim_links.le);
611 /* Schedule our timeout */
631 ccb_h->timeout_ch =
632 timeout(adv_timeout, csio, (ccb_h->timeout * hz)/1000);
633 splx(s);
612 callout_reset(&cinfo->timer, ccb_h->timeout * hz /1000, adv_timeout,
613 csio);
634}
635
636static struct adv_ccb_info *
637adv_alloc_ccb_info(struct adv_softc *adv)
638{
639 int error;
640 struct adv_ccb_info *cinfo;
641
642 cinfo = &adv->ccb_infos[adv->ccb_infos_allocated];
643 cinfo->state = ACCB_FREE;
614}
615
616static struct adv_ccb_info *
617adv_alloc_ccb_info(struct adv_softc *adv)
618{
619 int error;
620 struct adv_ccb_info *cinfo;
621
622 cinfo = &adv->ccb_infos[adv->ccb_infos_allocated];
623 cinfo->state = ACCB_FREE;
624 callout_init_mtx(&cinfo->timer, &adv->lock, 0);
644 error = bus_dmamap_create(adv->buffer_dmat, /*flags*/0,
645 &cinfo->dmamap);
646 if (error != 0) {
625 error = bus_dmamap_create(adv->buffer_dmat, /*flags*/0,
626 &cinfo->dmamap);
627 if (error != 0) {
647 printf("%s: Unable to allocate CCB info "
648 "dmamap - error %d\n", adv_name(adv), error);
628 device_printf(adv->dev, "Unable to allocate CCB info "
629 "dmamap - error %d\n", error);
649 return (NULL);
650 }
651 adv->ccb_infos_allocated++;
652 return (cinfo);
653}
654
655static void
656adv_destroy_ccb_info(struct adv_softc *adv, struct adv_ccb_info *cinfo)
657{
630 return (NULL);
631 }
632 adv->ccb_infos_allocated++;
633 return (cinfo);
634}
635
636static void
637adv_destroy_ccb_info(struct adv_softc *adv, struct adv_ccb_info *cinfo)
638{
639
640 callout_drain(&cinfo->timer);
658 bus_dmamap_destroy(adv->buffer_dmat, cinfo->dmamap);
659}
660
661void
662adv_timeout(void *arg)
663{
641 bus_dmamap_destroy(adv->buffer_dmat, cinfo->dmamap);
642}
643
644void
645adv_timeout(void *arg)
646{
664 int s;
665 union ccb *ccb;
666 struct adv_softc *adv;
647 union ccb *ccb;
648 struct adv_softc *adv;
667 struct adv_ccb_info *cinfo;
649 struct adv_ccb_info *cinfo, *cinfo2;
668
669 ccb = (union ccb *)arg;
670 adv = (struct adv_softc *)xpt_path_sim(ccb->ccb_h.path)->softc;
671 cinfo = (struct adv_ccb_info *)ccb->ccb_h.ccb_cinfo_ptr;
650
651 ccb = (union ccb *)arg;
652 adv = (struct adv_softc *)xpt_path_sim(ccb->ccb_h.path)->softc;
653 cinfo = (struct adv_ccb_info *)ccb->ccb_h.ccb_cinfo_ptr;
654 mtx_assert(&adv->lock, MA_OWNED);
672
673 xpt_print_path(ccb->ccb_h.path);
674 printf("Timed out\n");
675
655
656 xpt_print_path(ccb->ccb_h.path);
657 printf("Timed out\n");
658
676 s = splcam();
677 /* Have we been taken care of already?? */
678 if (cinfo == NULL || cinfo->state == ACCB_FREE) {
659 /* Have we been taken care of already?? */
660 if (cinfo == NULL || cinfo->state == ACCB_FREE) {
679 splx(s);
680 return;
681 }
682
683 adv_stop_execution(adv);
684
685 if ((cinfo->state & ACCB_ABORT_QUEUED) == 0) {
686 struct ccb_hdr *ccb_h;
687

--- 9 unchanged lines hidden (view full) ---

697 */
698 adv_set_state(adv, ADV_IN_TIMEOUT);
699
700 /* This CCB is the CCB representing our recovery actions */
701 cinfo->state |= ACCB_RECOVERY_CCB|ACCB_ABORT_QUEUED;
702
703 ccb_h = LIST_FIRST(&adv->pending_ccbs);
704 while (ccb_h != NULL) {
661 return;
662 }
663
664 adv_stop_execution(adv);
665
666 if ((cinfo->state & ACCB_ABORT_QUEUED) == 0) {
667 struct ccb_hdr *ccb_h;
668

--- 9 unchanged lines hidden (view full) ---

678 */
679 adv_set_state(adv, ADV_IN_TIMEOUT);
680
681 /* This CCB is the CCB representing our recovery actions */
682 cinfo->state |= ACCB_RECOVERY_CCB|ACCB_ABORT_QUEUED;
683
684 ccb_h = LIST_FIRST(&adv->pending_ccbs);
685 while (ccb_h != NULL) {
705 untimeout(adv_timeout, ccb_h, ccb_h->timeout_ch);
686 cinfo2 = ccb_h->ccb_cinfo_ptr;
687 callout_stop(&cinfo2->timer);
706 ccb_h = LIST_NEXT(ccb_h, sim_links.le);
707 }
708
709 /* XXX Should send a BDR */
710 /* Attempt an abort as our first tact */
711 xpt_print_path(ccb->ccb_h.path);
712 printf("Attempting abort\n");
713 adv_abort_ccb(adv, ccb->ccb_h.target_id,
714 ccb->ccb_h.target_lun, ccb,
715 CAM_CMD_TIMEOUT, /*queued_only*/FALSE);
688 ccb_h = LIST_NEXT(ccb_h, sim_links.le);
689 }
690
691 /* XXX Should send a BDR */
692 /* Attempt an abort as our first tact */
693 xpt_print_path(ccb->ccb_h.path);
694 printf("Attempting abort\n");
695 adv_abort_ccb(adv, ccb->ccb_h.target_id,
696 ccb->ccb_h.target_lun, ccb,
697 CAM_CMD_TIMEOUT, /*queued_only*/FALSE);
716 ccb->ccb_h.timeout_ch =
717 timeout(adv_timeout, ccb, 2 * hz);
698 callout_reset(&cinfo->timer, 2 * hz, adv_timeout, ccb);
718 } else {
719 /* Our attempt to perform an abort failed, go for a reset */
720 xpt_print_path(ccb->ccb_h.path);
721 printf("Resetting bus\n");
722 ccb->ccb_h.status &= ~CAM_STATUS_MASK;
723 ccb->ccb_h.status |= CAM_CMD_TIMEOUT;
724 adv_reset_bus(adv, /*initiate_reset*/TRUE);
725 }
726 adv_start_execution(adv);
699 } else {
700 /* Our attempt to perform an abort failed, go for a reset */
701 xpt_print_path(ccb->ccb_h.path);
702 printf("Resetting bus\n");
703 ccb->ccb_h.status &= ~CAM_STATUS_MASK;
704 ccb->ccb_h.status |= CAM_CMD_TIMEOUT;
705 adv_reset_bus(adv, /*initiate_reset*/TRUE);
706 }
707 adv_start_execution(adv);
727 splx(s);
728}
729
730struct adv_softc *
708}
709
710struct adv_softc *
731adv_alloc(device_t dev, bus_space_tag_t tag, bus_space_handle_t bsh)
711adv_alloc(device_t dev, struct resource *res, long offset)
732{
733 struct adv_softc *adv = device_get_softc(dev);
734
735 /*
736 * Allocate a storage area for us
737 */
738 LIST_INIT(&adv->pending_ccbs);
739 SLIST_INIT(&adv->free_ccb_infos);
740 adv->dev = dev;
712{
713 struct adv_softc *adv = device_get_softc(dev);
714
715 /*
716 * Allocate a storage area for us
717 */
718 LIST_INIT(&adv->pending_ccbs);
719 SLIST_INIT(&adv->free_ccb_infos);
720 adv->dev = dev;
741 adv->unit = device_get_unit(dev);
742 adv->tag = tag;
743 adv->bsh = bsh;
721 adv->res = res;
722 adv->reg_off = offset;
723 mtx_init(&adv->lock, "adv", NULL, MTX_DEF);
744
745 return(adv);
746}
747
748void
749adv_free(struct adv_softc *adv)
750{
751 switch (adv->init_level) {

--- 16 unchanged lines hidden (view full) ---

768 case 3:
769 bus_dma_tag_destroy(adv->buffer_dmat);
770 case 2:
771 bus_dma_tag_destroy(adv->parent_dmat);
772 case 1:
773 if (adv->ccb_infos != NULL)
774 free(adv->ccb_infos, M_DEVBUF);
775 case 0:
724
725 return(adv);
726}
727
728void
729adv_free(struct adv_softc *adv)
730{
731 switch (adv->init_level) {

--- 16 unchanged lines hidden (view full) ---

748 case 3:
749 bus_dma_tag_destroy(adv->buffer_dmat);
750 case 2:
751 bus_dma_tag_destroy(adv->parent_dmat);
752 case 1:
753 if (adv->ccb_infos != NULL)
754 free(adv->ccb_infos, M_DEVBUF);
755 case 0:
756 mtx_destroy(&adv->lock);
776 break;
777 }
778}
779
780int
781adv_init(struct adv_softc *adv)
782{
783 struct adv_eeprom_config eeprom_config;
784 int checksum, i;
785 int max_sync;
786 u_int16_t config_lsw;
787 u_int16_t config_msw;
788
757 break;
758 }
759}
760
761int
762adv_init(struct adv_softc *adv)
763{
764 struct adv_eeprom_config eeprom_config;
765 int checksum, i;
766 int max_sync;
767 u_int16_t config_lsw;
768 u_int16_t config_msw;
769
770 mtx_lock(&adv->lock);
789 adv_lib_init(adv);
790
791 /*
792 * Stop script execution.
793 */
794 adv_write_lram_16(adv, ADV_HALTCODE_W, 0x00FE);
795 adv_stop_execution(adv);
796 if (adv_stop_chip(adv) == 0 || adv_is_chip_halted(adv) == 0) {
771 adv_lib_init(adv);
772
773 /*
774 * Stop script execution.
775 */
776 adv_write_lram_16(adv, ADV_HALTCODE_W, 0x00FE);
777 adv_stop_execution(adv);
778 if (adv_stop_chip(adv) == 0 || adv_is_chip_halted(adv) == 0) {
797 printf("adv%d: Unable to halt adapter. Initialization"
798 "failed\n", adv->unit);
779 mtx_unlock(&adv->lock);
780 device_printf(adv->dev,
781 "Unable to halt adapter. Initialization failed\n");
799 return (1);
800 }
801 ADV_OUTW(adv, ADV_REG_PROG_COUNTER, ADV_MCODE_START_ADDR);
802 if (ADV_INW(adv, ADV_REG_PROG_COUNTER) != ADV_MCODE_START_ADDR) {
782 return (1);
783 }
784 ADV_OUTW(adv, ADV_REG_PROG_COUNTER, ADV_MCODE_START_ADDR);
785 if (ADV_INW(adv, ADV_REG_PROG_COUNTER) != ADV_MCODE_START_ADDR) {
803 printf("adv%d: Unable to set program counter. Initialization"
804 "failed\n", adv->unit);
786 mtx_unlock(&adv->lock);
787 device_printf(adv->dev,
788 "Unable to set program counter. Initialization failed\n");
805 return (1);
806 }
807
808 config_msw = ADV_INW(adv, ADV_CONFIG_MSW);
809 config_lsw = ADV_INW(adv, ADV_CONFIG_LSW);
810
811 if ((config_msw & ADV_CFG_MSW_CLR_MASK) != 0) {
812 config_msw &= ~ADV_CFG_MSW_CLR_MASK;

--- 58 unchanged lines hidden (view full) ---

871 &adv->tinfo[i].user.offset,
872 i);
873 }
874 config_lsw = eeprom_config.cfg_lsw;
875 eeprom_config.cfg_msw = config_msw;
876 } else {
877 u_int8_t sync_data;
878
789 return (1);
790 }
791
792 config_msw = ADV_INW(adv, ADV_CONFIG_MSW);
793 config_lsw = ADV_INW(adv, ADV_CONFIG_LSW);
794
795 if ((config_msw & ADV_CFG_MSW_CLR_MASK) != 0) {
796 config_msw &= ~ADV_CFG_MSW_CLR_MASK;

--- 58 unchanged lines hidden (view full) ---

855 &adv->tinfo[i].user.offset,
856 i);
857 }
858 config_lsw = eeprom_config.cfg_lsw;
859 eeprom_config.cfg_msw = config_msw;
860 } else {
861 u_int8_t sync_data;
862
879 printf("adv%d: Warning EEPROM Checksum mismatch. "
880 "Using default device parameters\n", adv->unit);
863 device_printf(adv->dev, "Warning EEPROM Checksum mismatch. "
864 "Using default device parameters\n");
881
882 /* Set reasonable defaults since we can't read the EEPROM */
883 adv->isa_dma_speed = /*ADV_DEF_ISA_DMA_SPEED*/1;
884 adv->max_openings = ADV_DEF_MAX_TOTAL_QNG;
885 adv->disc_enable = TARGET_BIT_VECTOR_SET;
886 adv->user_disc_enable = TARGET_BIT_VECTOR_SET;
887 adv->cmd_qng_enabled = TARGET_BIT_VECTOR_SET;
888 adv->user_cmd_qng_enabled = TARGET_BIT_VECTOR_SET;

--- 46 unchanged lines hidden (view full) ---

935#if 0
936 /*
937 * Don't write the eeprom data back for now.
938 * I'd rather not mess up the user's card. We also don't
939 * fully sanitize the eeprom settings above for the write-back
940 * to be 100% correct.
941 */
942 if (adv_set_eeprom_config(adv, &eeprom_config) != 0)
865
866 /* Set reasonable defaults since we can't read the EEPROM */
867 adv->isa_dma_speed = /*ADV_DEF_ISA_DMA_SPEED*/1;
868 adv->max_openings = ADV_DEF_MAX_TOTAL_QNG;
869 adv->disc_enable = TARGET_BIT_VECTOR_SET;
870 adv->user_disc_enable = TARGET_BIT_VECTOR_SET;
871 adv->cmd_qng_enabled = TARGET_BIT_VECTOR_SET;
872 adv->user_cmd_qng_enabled = TARGET_BIT_VECTOR_SET;

--- 46 unchanged lines hidden (view full) ---

919#if 0
920 /*
921 * Don't write the eeprom data back for now.
922 * I'd rather not mess up the user's card. We also don't
923 * fully sanitize the eeprom settings above for the write-back
924 * to be 100% correct.
925 */
926 if (adv_set_eeprom_config(adv, &eeprom_config) != 0)
943 printf("%s: WARNING! Failure writing to EEPROM.\n",
944 adv_name(adv));
927 device_printf(adv->dev,
928 "WARNING! Failure writing to EEPROM.\n");
945#endif
946
947 adv_set_chip_scsiid(adv, adv->scsi_id);
929#endif
930
931 adv_set_chip_scsiid(adv, adv->scsi_id);
948 if (adv_init_lram_and_mcode(adv))
932 if (adv_init_lram_and_mcode(adv)) {
933 mtx_unlock(&adv->lock);
949 return (1);
934 return (1);
935 }
950
951 adv->disc_enable = adv->user_disc_enable;
952
953 adv_write_lram_8(adv, ADVV_DISC_ENABLE_B, adv->disc_enable);
954 for (i = 0; i <= ADV_MAX_TID; i++) {
955 /*
956 * Start off in async mode.
957 */

--- 6 unchanged lines hidden (view full) ---

964 * as it sees fit to tag queue instead of having the
965 * firmware try and second guess the tag_code settins.
966 */
967 adv_write_lram_8(adv, ADVV_MAX_DVC_QNG_BEG + i,
968 adv->max_openings);
969 }
970 adv_write_lram_8(adv, ADVV_USE_TAGGED_QNG_B, TARGET_BIT_VECTOR_SET);
971 adv_write_lram_8(adv, ADVV_CAN_TAGGED_QNG_B, TARGET_BIT_VECTOR_SET);
936
937 adv->disc_enable = adv->user_disc_enable;
938
939 adv_write_lram_8(adv, ADVV_DISC_ENABLE_B, adv->disc_enable);
940 for (i = 0; i <= ADV_MAX_TID; i++) {
941 /*
942 * Start off in async mode.
943 */

--- 6 unchanged lines hidden (view full) ---

950 * as it sees fit to tag queue instead of having the
951 * firmware try and second guess the tag_code settins.
952 */
953 adv_write_lram_8(adv, ADVV_MAX_DVC_QNG_BEG + i,
954 adv->max_openings);
955 }
956 adv_write_lram_8(adv, ADVV_USE_TAGGED_QNG_B, TARGET_BIT_VECTOR_SET);
957 adv_write_lram_8(adv, ADVV_CAN_TAGGED_QNG_B, TARGET_BIT_VECTOR_SET);
972 printf("adv%d: AdvanSys %s Host Adapter, SCSI ID %d, queue depth %d\n",
973 adv->unit, (adv->type & ADV_ULTRA) && (max_sync == 0)
974 ? "Ultra SCSI" : "SCSI",
975 adv->scsi_id, adv->max_openings);
958 device_printf(adv->dev,
959 "AdvanSys %s Host Adapter, SCSI ID %d, queue depth %d\n",
960 (adv->type & ADV_ULTRA) && (max_sync == 0)
961 ? "Ultra SCSI" : "SCSI",
962 adv->scsi_id, adv->max_openings);
963 mtx_unlock(&adv->lock);
976 return (0);
977}
978
979void
980adv_intr(void *arg)
981{
982 struct adv_softc *adv;
964 return (0);
965}
966
967void
968adv_intr(void *arg)
969{
970 struct adv_softc *adv;
971
972 adv = arg;
973 mtx_lock(&adv->lock);
974 adv_intr_locked(adv);
975 mtx_unlock(&adv->lock);
976}
977
978void
979adv_intr_locked(struct adv_softc *adv)
980{
983 u_int16_t chipstat;
984 u_int16_t saved_ram_addr;
985 u_int8_t ctrl_reg;
986 u_int8_t saved_ctrl_reg;
987 u_int8_t host_flag;
988
981 u_int16_t chipstat;
982 u_int16_t saved_ram_addr;
983 u_int8_t ctrl_reg;
984 u_int8_t saved_ctrl_reg;
985 u_int8_t host_flag;
986
989 adv = (struct adv_softc *)arg;
990
987 if (!dumping)
988 mtx_assert(&adv->lock, MA_OWNED);
991 chipstat = ADV_INW(adv, ADV_CHIP_STATUS);
992
993 /* Is it for us? */
994 if ((chipstat & (ADV_CSW_INT_PENDING|ADV_CSW_SCSI_RESET_LATCH)) == 0)
995 return;
996
997 ctrl_reg = ADV_INB(adv, ADV_CHIP_CTRL);
998 saved_ctrl_reg = ctrl_reg & (~(ADV_CC_SCSI_RESET | ADV_CC_CHIP_RESET |
999 ADV_CC_SINGLE_STEP | ADV_CC_DIAG |
1000 ADV_CC_TEST));
1001
1002 if ((chipstat & (ADV_CSW_SCSI_RESET_LATCH|ADV_CSW_SCSI_RESET_ACTIVE))) {
989 chipstat = ADV_INW(adv, ADV_CHIP_STATUS);
990
991 /* Is it for us? */
992 if ((chipstat & (ADV_CSW_INT_PENDING|ADV_CSW_SCSI_RESET_LATCH)) == 0)
993 return;
994
995 ctrl_reg = ADV_INB(adv, ADV_CHIP_CTRL);
996 saved_ctrl_reg = ctrl_reg & (~(ADV_CC_SCSI_RESET | ADV_CC_CHIP_RESET |
997 ADV_CC_SINGLE_STEP | ADV_CC_DIAG |
998 ADV_CC_TEST));
999
1000 if ((chipstat & (ADV_CSW_SCSI_RESET_LATCH|ADV_CSW_SCSI_RESET_ACTIVE))) {
1003 printf("Detected Bus Reset\n");
1001 device_printf(adv->dev, "Detected Bus Reset\n");
1004 adv_reset_bus(adv, /*initiate_reset*/FALSE);
1005 return;
1006 }
1007
1008 if ((chipstat & ADV_CSW_INT_PENDING) != 0) {
1009
1010 saved_ram_addr = ADV_INW(adv, ADV_LRAM_ADDR);
1011 host_flag = adv_read_lram_8(adv, ADVV_HOST_FLAG_B);

--- 111 unchanged lines hidden (view full) ---

1123
1124
1125void
1126adv_done(struct adv_softc *adv, union ccb *ccb, u_int done_stat,
1127 u_int host_stat, u_int scsi_status, u_int q_no)
1128{
1129 struct adv_ccb_info *cinfo;
1130
1002 adv_reset_bus(adv, /*initiate_reset*/FALSE);
1003 return;
1004 }
1005
1006 if ((chipstat & ADV_CSW_INT_PENDING) != 0) {
1007
1008 saved_ram_addr = ADV_INW(adv, ADV_LRAM_ADDR);
1009 host_flag = adv_read_lram_8(adv, ADVV_HOST_FLAG_B);

--- 111 unchanged lines hidden (view full) ---

1121
1122
1123void
1124adv_done(struct adv_softc *adv, union ccb *ccb, u_int done_stat,
1125 u_int host_stat, u_int scsi_status, u_int q_no)
1126{
1127 struct adv_ccb_info *cinfo;
1128
1129 if (!dumping)
1130 mtx_assert(&adv->lock, MA_OWNED);
1131 cinfo = (struct adv_ccb_info *)ccb->ccb_h.ccb_cinfo_ptr;
1132 LIST_REMOVE(&ccb->ccb_h, sim_links.le);
1131 cinfo = (struct adv_ccb_info *)ccb->ccb_h.ccb_cinfo_ptr;
1132 LIST_REMOVE(&ccb->ccb_h, sim_links.le);
1133 untimeout(adv_timeout, ccb, ccb->ccb_h.timeout_ch);
1133 callout_stop(&cinfo->timer);
1134 if ((ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE) {
1135 bus_dmasync_op_t op;
1136
1137 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN)
1138 op = BUS_DMASYNC_POSTREAD;
1139 else
1140 op = BUS_DMASYNC_POSTWRITE;
1141 bus_dmamap_sync(adv->buffer_dmat, cinfo->dmamap, op);

--- 92 unchanged lines hidden (view full) ---

1234 case QHSTA_D_HOST_ABORT_FAILED:
1235 case QHSTA_D_EXE_SCSI_Q_FAILED:
1236 case QHSTA_D_ASPI_NO_BUF_POOL:
1237 case QHSTA_M_BAD_TAG_CODE:
1238 case QHSTA_D_LRAM_CMP_ERROR:
1239 case QHSTA_M_MICRO_CODE_ERROR_HALT:
1240 default:
1241 panic("%s: Unhandled Host status error %x",
1134 if ((ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE) {
1135 bus_dmasync_op_t op;
1136
1137 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN)
1138 op = BUS_DMASYNC_POSTREAD;
1139 else
1140 op = BUS_DMASYNC_POSTWRITE;
1141 bus_dmamap_sync(adv->buffer_dmat, cinfo->dmamap, op);

--- 92 unchanged lines hidden (view full) ---

1234 case QHSTA_D_HOST_ABORT_FAILED:
1235 case QHSTA_D_EXE_SCSI_Q_FAILED:
1236 case QHSTA_D_ASPI_NO_BUF_POOL:
1237 case QHSTA_M_BAD_TAG_CODE:
1238 case QHSTA_D_LRAM_CMP_ERROR:
1239 case QHSTA_M_MICRO_CODE_ERROR_HALT:
1240 default:
1241 panic("%s: Unhandled Host status error %x",
1242 adv_name(adv), host_stat);
1242 device_get_nameunit(adv->dev), host_stat);
1243 /* NOTREACHED */
1244 }
1245 break;
1246
1247 case QD_ABORTED_BY_HOST:
1248 /* Don't clobber any, more explicit, error codes we've set */
1249 if ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_INPROG)
1250 ccb->ccb_h.status = CAM_REQ_ABORTED;

--- 24 unchanged lines hidden (view full) ---

1275
1276/*
1277 * Function to poll for command completion when
1278 * interrupts are disabled (crash dumps)
1279 */
1280static void
1281adv_poll(struct cam_sim *sim)
1282{
1243 /* NOTREACHED */
1244 }
1245 break;
1246
1247 case QD_ABORTED_BY_HOST:
1248 /* Don't clobber any, more explicit, error codes we've set */
1249 if ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_INPROG)
1250 ccb->ccb_h.status = CAM_REQ_ABORTED;

--- 24 unchanged lines hidden (view full) ---

1275
1276/*
1277 * Function to poll for command completion when
1278 * interrupts are disabled (crash dumps)
1279 */
1280static void
1281adv_poll(struct cam_sim *sim)
1282{
1283 adv_intr(cam_sim_softc(sim));
1283
1284 adv_intr_locked(cam_sim_softc(sim));
1284}
1285
1286/*
1287 * Attach all the sub-devices we can find
1288 */
1289int
1290adv_attach(adv)
1291 struct adv_softc *adv;

--- 53 unchanged lines hidden (view full) ---

1345 /* highaddr */ BUS_SPACE_MAXADDR,
1346 /* filter */ NULL,
1347 /* filterarg */ NULL,
1348 /* maxsize */ ADV_MAXPHYS,
1349 /* nsegments */ max_sg,
1350 /* maxsegsz */ BUS_SPACE_MAXSIZE_32BIT,
1351 /* flags */ BUS_DMA_ALLOCNOW,
1352 /* lockfunc */ busdma_lock_mutex,
1285}
1286
1287/*
1288 * Attach all the sub-devices we can find
1289 */
1290int
1291adv_attach(adv)
1292 struct adv_softc *adv;

--- 53 unchanged lines hidden (view full) ---

1346 /* highaddr */ BUS_SPACE_MAXADDR,
1347 /* filter */ NULL,
1348 /* filterarg */ NULL,
1349 /* maxsize */ ADV_MAXPHYS,
1350 /* nsegments */ max_sg,
1351 /* maxsegsz */ BUS_SPACE_MAXSIZE_32BIT,
1352 /* flags */ BUS_DMA_ALLOCNOW,
1353 /* lockfunc */ busdma_lock_mutex,
1353 /* lockarg */ &Giant,
1354 /* lockarg */ &adv->lock,
1354 &adv->buffer_dmat) != 0) {
1355 return (ENXIO);
1356 }
1357 adv->init_level++;
1358
1359 /* DMA tag for our sense buffers */
1360 if (bus_dma_tag_create(
1361 /* parent */ adv->parent_dmat,

--- 4 unchanged lines hidden (view full) ---

1366 /* filter */ NULL,
1367 /* filterarg */ NULL,
1368 /* maxsize */ sizeof(struct scsi_sense_data) *
1369 adv->max_openings,
1370 /* nsegments */ 1,
1371 /* maxsegsz */ BUS_SPACE_MAXSIZE_32BIT,
1372 /* flags */ 0,
1373 /* lockfunc */ busdma_lock_mutex,
1355 &adv->buffer_dmat) != 0) {
1356 return (ENXIO);
1357 }
1358 adv->init_level++;
1359
1360 /* DMA tag for our sense buffers */
1361 if (bus_dma_tag_create(
1362 /* parent */ adv->parent_dmat,

--- 4 unchanged lines hidden (view full) ---

1367 /* filter */ NULL,
1368 /* filterarg */ NULL,
1369 /* maxsize */ sizeof(struct scsi_sense_data) *
1370 adv->max_openings,
1371 /* nsegments */ 1,
1372 /* maxsegsz */ BUS_SPACE_MAXSIZE_32BIT,
1373 /* flags */ 0,
1374 /* lockfunc */ busdma_lock_mutex,
1374 /* lockarg */ &Giant,
1375 /* lockarg */ &adv->lock,
1375 &adv->sense_dmat) != 0) {
1376 return (ENXIO);
1377 }
1378
1379 adv->init_level++;
1380
1381 /* Allocation for our sense buffers */
1382 if (bus_dmamem_alloc(adv->sense_dmat, (void **)&adv->sense_buffers,

--- 10 unchanged lines hidden (view full) ---

1393 adv_map, &adv->sense_physbase, /*flags*/0);
1394
1395 adv->init_level++;
1396
1397 /*
1398 * Fire up the chip
1399 */
1400 if (adv_start_chip(adv) != 1) {
1376 &adv->sense_dmat) != 0) {
1377 return (ENXIO);
1378 }
1379
1380 adv->init_level++;
1381
1382 /* Allocation for our sense buffers */
1383 if (bus_dmamem_alloc(adv->sense_dmat, (void **)&adv->sense_buffers,

--- 10 unchanged lines hidden (view full) ---

1394 adv_map, &adv->sense_physbase, /*flags*/0);
1395
1396 adv->init_level++;
1397
1398 /*
1399 * Fire up the chip
1400 */
1401 if (adv_start_chip(adv) != 1) {
1401 printf("adv%d: Unable to start on board processor. Aborting.\n",
1402 adv->unit);
1402 device_printf(adv->dev,
1403 "Unable to start on board processor. Aborting.\n");
1403 return (ENXIO);
1404 }
1405
1406 /*
1407 * Create the device queue for our SIM.
1408 */
1409 devq = cam_simq_alloc(adv->max_openings);
1410 if (devq == NULL)
1411 return (ENOMEM);
1412
1413 /*
1414 * Construct our SIM entry.
1415 */
1404 return (ENXIO);
1405 }
1406
1407 /*
1408 * Create the device queue for our SIM.
1409 */
1410 devq = cam_simq_alloc(adv->max_openings);
1411 if (devq == NULL)
1412 return (ENOMEM);
1413
1414 /*
1415 * Construct our SIM entry.
1416 */
1416 adv->sim = cam_sim_alloc(adv_action, adv_poll, "adv", adv, adv->unit,
1417 &Giant, 1, adv->max_openings, devq);
1417 adv->sim = cam_sim_alloc(adv_action, adv_poll, "adv", adv,
1418 device_get_unit(adv->dev), &adv->lock, 1, adv->max_openings, devq);
1418 if (adv->sim == NULL)
1419 return (ENOMEM);
1420
1421 /*
1422 * Register the bus.
1423 *
1424 * XXX Twin Channel EISA Cards???
1425 */
1419 if (adv->sim == NULL)
1420 return (ENOMEM);
1421
1422 /*
1423 * Register the bus.
1424 *
1425 * XXX Twin Channel EISA Cards???
1426 */
1427 mtx_lock(&adv->lock);
1426 if (xpt_bus_register(adv->sim, adv->dev, 0) != CAM_SUCCESS) {
1427 cam_sim_free(adv->sim, /*free devq*/TRUE);
1428 if (xpt_bus_register(adv->sim, adv->dev, 0) != CAM_SUCCESS) {
1429 cam_sim_free(adv->sim, /*free devq*/TRUE);
1430 mtx_unlock(&adv->lock);
1428 return (ENXIO);
1429 }
1430
1431 if (xpt_create_path(&adv->path, /*periph*/NULL, cam_sim_path(adv->sim),
1432 CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD)
1433 != CAM_REQ_CMP) {
1434 xpt_bus_deregister(cam_sim_path(adv->sim));
1435 cam_sim_free(adv->sim, /*free devq*/TRUE);
1431 return (ENXIO);
1432 }
1433
1434 if (xpt_create_path(&adv->path, /*periph*/NULL, cam_sim_path(adv->sim),
1435 CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD)
1436 != CAM_REQ_CMP) {
1437 xpt_bus_deregister(cam_sim_path(adv->sim));
1438 cam_sim_free(adv->sim, /*free devq*/TRUE);
1439 mtx_unlock(&adv->lock);
1436 return (ENXIO);
1437 }
1438
1439 xpt_setup_ccb(&csa.ccb_h, adv->path, /*priority*/5);
1440 csa.ccb_h.func_code = XPT_SASYNC_CB;
1441 csa.event_enable = AC_FOUND_DEVICE|AC_LOST_DEVICE;
1442 csa.callback = advasync;
1443 csa.callback_arg = adv;
1444 xpt_action((union ccb *)&csa);
1440 return (ENXIO);
1441 }
1442
1443 xpt_setup_ccb(&csa.ccb_h, adv->path, /*priority*/5);
1444 csa.ccb_h.func_code = XPT_SASYNC_CB;
1445 csa.event_enable = AC_FOUND_DEVICE|AC_LOST_DEVICE;
1446 csa.callback = advasync;
1447 csa.callback_arg = adv;
1448 xpt_action((union ccb *)&csa);
1449 mtx_unlock(&adv->lock);
1445 return (0);
1446}
1447MODULE_DEPEND(adv, cam, 1, 1, 1);
1450 return (0);
1451}
1452MODULE_DEPEND(adv, cam, 1, 1, 1);