Deleted Added
full compact
isp_freebsd.c (104806) isp_freebsd.c (111815)
1/* $FreeBSD: head/sys/dev/isp/isp_freebsd.c 104806 2002-10-10 17:29:05Z mjacob $ */
1/* $FreeBSD: head/sys/dev/isp/isp_freebsd.c 111815 2003-03-03 12:15:54Z phk $ */
2/*
3 * Platform (FreeBSD) dependent common attachment code for Qlogic adapters.
4 *
5 * Copyright (c) 1997, 1998, 1999, 2000, 2001 by Matthew Jacob
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice immediately at the beginning of the file, without modification,
12 * this list of conditions, and the following disclaimer.
13 * 2. The name of the author may not be used to endorse or promote products
14 * derived from this software without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
20 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
27 */
28#include <dev/isp/isp_freebsd.h>
29#include <sys/unistd.h>
30#include <sys/kthread.h>
31#include <machine/stdarg.h> /* for use by isp_prt below */
32#include <sys/conf.h>
33#include <sys/module.h>
34#include <sys/ioccom.h>
35#include <dev/isp/isp_ioctl.h>
36
37
38MODULE_VERSION(isp, 1);
39int isp_announced = 0;
40ispfwfunc *isp_get_firmware_p = NULL;
41
42static d_ioctl_t ispioctl;
43static void isp_intr_enable(void *);
44static void isp_cam_async(void *, u_int32_t, struct cam_path *, void *);
45static void isp_poll(struct cam_sim *);
46static timeout_t isp_watchdog;
47static void isp_kthread(void *);
48static void isp_action(struct cam_sim *, union ccb *);
49
50
51#define ISP_CDEV_MAJOR 248
52static struct cdevsw isp_cdevsw = {
2/*
3 * Platform (FreeBSD) dependent common attachment code for Qlogic adapters.
4 *
5 * Copyright (c) 1997, 1998, 1999, 2000, 2001 by Matthew Jacob
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice immediately at the beginning of the file, without modification,
12 * this list of conditions, and the following disclaimer.
13 * 2. The name of the author may not be used to endorse or promote products
14 * derived from this software without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
20 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
27 */
28#include <dev/isp/isp_freebsd.h>
29#include <sys/unistd.h>
30#include <sys/kthread.h>
31#include <machine/stdarg.h> /* for use by isp_prt below */
32#include <sys/conf.h>
33#include <sys/module.h>
34#include <sys/ioccom.h>
35#include <dev/isp/isp_ioctl.h>
36
37
38MODULE_VERSION(isp, 1);
39int isp_announced = 0;
40ispfwfunc *isp_get_firmware_p = NULL;
41
42static d_ioctl_t ispioctl;
43static void isp_intr_enable(void *);
44static void isp_cam_async(void *, u_int32_t, struct cam_path *, void *);
45static void isp_poll(struct cam_sim *);
46static timeout_t isp_watchdog;
47static void isp_kthread(void *);
48static void isp_action(struct cam_sim *, union ccb *);
49
50
51#define ISP_CDEV_MAJOR 248
52static struct cdevsw isp_cdevsw = {
53 /* open */ nullopen,
54 /* close */ nullclose,
55 /* read */ noread,
56 /* write */ nowrite,
57 /* ioctl */ ispioctl,
58 /* poll */ nopoll,
59 /* mmap */ nommap,
60 /* strategy */ nostrategy,
61 /* name */ "isp",
62 /* maj */ ISP_CDEV_MAJOR,
63 /* dump */ nodump,
64 /* psize */ nopsize,
65 /* flags */ D_TAPE,
53 .d_open = nullopen,
54 .d_close = nullclose,
55 .d_ioctl = ispioctl,
56 .d_name = "isp",
57 .d_maj = ISP_CDEV_MAJOR,
58 .d_flags = D_TAPE,
66};
67
68static struct ispsoftc *isplist = NULL;
69
70void
71isp_attach(struct ispsoftc *isp)
72{
73 int primary, secondary;
74 struct ccb_setasync csa;
75 struct cam_devq *devq;
76 struct cam_sim *sim;
77 struct cam_path *path;
78
79 /*
80 * Establish (in case of 12X0) which bus is the primary.
81 */
82
83 primary = 0;
84 secondary = 1;
85
86 /*
87 * Create the device queue for our SIM(s).
88 */
89 devq = cam_simq_alloc(isp->isp_maxcmds);
90 if (devq == NULL) {
91 return;
92 }
93
94 /*
95 * Construct our SIM entry.
96 */
97 ISPLOCK_2_CAMLOCK(isp);
98 sim = cam_sim_alloc(isp_action, isp_poll, "isp", isp,
99 device_get_unit(isp->isp_dev), 1, isp->isp_maxcmds, devq);
100 if (sim == NULL) {
101 cam_simq_free(devq);
102 CAMLOCK_2_ISPLOCK(isp);
103 return;
104 }
105 CAMLOCK_2_ISPLOCK(isp);
106
107 isp->isp_osinfo.ehook.ich_func = isp_intr_enable;
108 isp->isp_osinfo.ehook.ich_arg = isp;
109 ISPLOCK_2_CAMLOCK(isp);
110 if (config_intrhook_establish(&isp->isp_osinfo.ehook) != 0) {
111 cam_sim_free(sim, TRUE);
112 CAMLOCK_2_ISPLOCK(isp);
113 isp_prt(isp, ISP_LOGERR,
114 "could not establish interrupt enable hook");
115 return;
116 }
117
118 if (xpt_bus_register(sim, primary) != CAM_SUCCESS) {
119 cam_sim_free(sim, TRUE);
120 CAMLOCK_2_ISPLOCK(isp);
121 return;
122 }
123
124 if (xpt_create_path(&path, NULL, cam_sim_path(sim),
125 CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
126 xpt_bus_deregister(cam_sim_path(sim));
127 cam_sim_free(sim, TRUE);
128 config_intrhook_disestablish(&isp->isp_osinfo.ehook);
129 CAMLOCK_2_ISPLOCK(isp);
130 return;
131 }
132
133 xpt_setup_ccb(&csa.ccb_h, path, 5);
134 csa.ccb_h.func_code = XPT_SASYNC_CB;
135 csa.event_enable = AC_LOST_DEVICE;
136 csa.callback = isp_cam_async;
137 csa.callback_arg = sim;
138 xpt_action((union ccb *)&csa);
139 CAMLOCK_2_ISPLOCK(isp);
140 isp->isp_sim = sim;
141 isp->isp_path = path;
142 /*
143 * Create a kernel thread for fibre channel instances. We
144 * don't have dual channel FC cards.
145 */
146 if (IS_FC(isp)) {
147 ISPLOCK_2_CAMLOCK(isp);
148 /* XXX: LOCK VIOLATION */
149 cv_init(&isp->isp_osinfo.kthread_cv, "isp_kthread_cv");
150 if (kthread_create(isp_kthread, isp, &isp->isp_osinfo.kproc,
151 RFHIGHPID, 0, "%s: fc_thrd",
152 device_get_nameunit(isp->isp_dev))) {
153 xpt_bus_deregister(cam_sim_path(sim));
154 cam_sim_free(sim, TRUE);
155 config_intrhook_disestablish(&isp->isp_osinfo.ehook);
156 CAMLOCK_2_ISPLOCK(isp);
157 isp_prt(isp, ISP_LOGERR, "could not create kthread");
158 return;
159 }
160 CAMLOCK_2_ISPLOCK(isp);
161 }
162
163
164 /*
165 * If we have a second channel, construct SIM entry for that.
166 */
167 if (IS_DUALBUS(isp)) {
168 ISPLOCK_2_CAMLOCK(isp);
169 sim = cam_sim_alloc(isp_action, isp_poll, "isp", isp,
170 device_get_unit(isp->isp_dev), 1, isp->isp_maxcmds, devq);
171 if (sim == NULL) {
172 xpt_bus_deregister(cam_sim_path(isp->isp_sim));
173 xpt_free_path(isp->isp_path);
174 cam_simq_free(devq);
175 config_intrhook_disestablish(&isp->isp_osinfo.ehook);
176 return;
177 }
178 if (xpt_bus_register(sim, secondary) != CAM_SUCCESS) {
179 xpt_bus_deregister(cam_sim_path(isp->isp_sim));
180 xpt_free_path(isp->isp_path);
181 cam_sim_free(sim, TRUE);
182 config_intrhook_disestablish(&isp->isp_osinfo.ehook);
183 CAMLOCK_2_ISPLOCK(isp);
184 return;
185 }
186
187 if (xpt_create_path(&path, NULL, cam_sim_path(sim),
188 CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
189 xpt_bus_deregister(cam_sim_path(isp->isp_sim));
190 xpt_free_path(isp->isp_path);
191 xpt_bus_deregister(cam_sim_path(sim));
192 cam_sim_free(sim, TRUE);
193 config_intrhook_disestablish(&isp->isp_osinfo.ehook);
194 CAMLOCK_2_ISPLOCK(isp);
195 return;
196 }
197
198 xpt_setup_ccb(&csa.ccb_h, path, 5);
199 csa.ccb_h.func_code = XPT_SASYNC_CB;
200 csa.event_enable = AC_LOST_DEVICE;
201 csa.callback = isp_cam_async;
202 csa.callback_arg = sim;
203 xpt_action((union ccb *)&csa);
204 CAMLOCK_2_ISPLOCK(isp);
205 isp->isp_sim2 = sim;
206 isp->isp_path2 = path;
207 }
208
209#ifdef ISP_TARGET_MODE
210 cv_init(&isp->isp_osinfo.tgtcv0[0], "isp_tgcv0a");
211 cv_init(&isp->isp_osinfo.tgtcv0[1], "isp_tgcv0b");
212 cv_init(&isp->isp_osinfo.tgtcv1[0], "isp_tgcv1a");
213 cv_init(&isp->isp_osinfo.tgtcv1[1], "isp_tgcv1b");
214#endif
215 /*
216 * Create device nodes
217 */
218 (void) make_dev(&isp_cdevsw, device_get_unit(isp->isp_dev), UID_ROOT,
219 GID_OPERATOR, 0600, "%s", device_get_nameunit(isp->isp_dev));
220
221 if (isp->isp_role != ISP_ROLE_NONE) {
222 isp->isp_state = ISP_RUNSTATE;
223 ENABLE_INTS(isp);
224 }
225 if (isplist == NULL) {
226 isplist = isp;
227 } else {
228 struct ispsoftc *tmp = isplist;
229 while (tmp->isp_osinfo.next) {
230 tmp = tmp->isp_osinfo.next;
231 }
232 tmp->isp_osinfo.next = isp;
233 }
234
235}
236
237static INLINE void
238isp_freeze_loopdown(struct ispsoftc *isp, char *msg)
239{
240 if (isp->isp_osinfo.simqfrozen == 0) {
241 isp_prt(isp, ISP_LOGDEBUG0, "%s: freeze simq (loopdown)", msg);
242 isp->isp_osinfo.simqfrozen |= SIMQFRZ_LOOPDOWN;
243 ISPLOCK_2_CAMLOCK(isp);
244 xpt_freeze_simq(isp->isp_sim, 1);
245 CAMLOCK_2_ISPLOCK(isp);
246 } else {
247 isp_prt(isp, ISP_LOGDEBUG0, "%s: mark frozen (loopdown)", msg);
248 isp->isp_osinfo.simqfrozen |= SIMQFRZ_LOOPDOWN;
249 }
250}
251
252static int
253ispioctl(dev_t dev, u_long cmd, caddr_t addr, int flags, struct thread *td)
254{
255 struct ispsoftc *isp;
256 int retval = ENOTTY;
257
258 isp = isplist;
259 while (isp) {
260 if (minor(dev) == device_get_unit(isp->isp_dev)) {
261 break;
262 }
263 isp = isp->isp_osinfo.next;
264 }
265 if (isp == NULL)
266 return (ENXIO);
267
268 switch (cmd) {
269#ifdef ISP_FW_CRASH_DUMP
270 case ISP_GET_FW_CRASH_DUMP:
271 {
272 u_int16_t *ptr = FCPARAM(isp)->isp_dump_data;
273 size_t sz;
274
275 retval = 0;
276 if (IS_2200(isp))
277 sz = QLA2200_RISC_IMAGE_DUMP_SIZE;
278 else
279 sz = QLA2300_RISC_IMAGE_DUMP_SIZE;
280 ISP_LOCK(isp);
281 if (ptr && *ptr) {
282 void *uaddr = *((void **) addr);
283 if (copyout(ptr, uaddr, sz)) {
284 retval = EFAULT;
285 } else {
286 *ptr = 0;
287 }
288 } else {
289 retval = ENXIO;
290 }
291 ISP_UNLOCK(isp);
292 break;
293 }
294
295 case ISP_FORCE_CRASH_DUMP:
296 ISP_LOCK(isp);
297 isp_freeze_loopdown(isp, "ispioctl(ISP_FORCE_CRASH_DUMP)");
298 isp_fw_dump(isp);
299 isp_reinit(isp);
300 ISP_UNLOCK(isp);
301 retval = 0;
302 break;
303#endif
304 case ISP_SDBLEV:
305 {
306 int olddblev = isp->isp_dblev;
307 isp->isp_dblev = *(int *)addr;
308 *(int *)addr = olddblev;
309 retval = 0;
310 break;
311 }
312 case ISP_RESETHBA:
313 ISP_LOCK(isp);
314 isp_reinit(isp);
315 ISP_UNLOCK(isp);
316 retval = 0;
317 break;
318 case ISP_RESCAN:
319 if (IS_FC(isp)) {
320 ISP_LOCK(isp);
321 if (isp_fc_runstate(isp, 5 * 1000000)) {
322 retval = EIO;
323 } else {
324 retval = 0;
325 }
326 ISP_UNLOCK(isp);
327 }
328 break;
329 case ISP_FC_LIP:
330 if (IS_FC(isp)) {
331 ISP_LOCK(isp);
332 if (isp_control(isp, ISPCTL_SEND_LIP, 0)) {
333 retval = EIO;
334 } else {
335 retval = 0;
336 }
337 ISP_UNLOCK(isp);
338 }
339 break;
340 case ISP_FC_GETDINFO:
341 {
342 struct isp_fc_device *ifc = (struct isp_fc_device *) addr;
343 struct lportdb *lp;
344
345 if (ifc->loopid < 0 || ifc->loopid >= MAX_FC_TARG) {
346 retval = EINVAL;
347 break;
348 }
349 ISP_LOCK(isp);
350 lp = &FCPARAM(isp)->portdb[ifc->loopid];
351 if (lp->valid) {
352 ifc->loopid = lp->loopid;
353 ifc->portid = lp->portid;
354 ifc->node_wwn = lp->node_wwn;
355 ifc->port_wwn = lp->port_wwn;
356 retval = 0;
357 } else {
358 retval = ENODEV;
359 }
360 ISP_UNLOCK(isp);
361 break;
362 }
363 case ISP_GET_STATS:
364 {
365 isp_stats_t *sp = (isp_stats_t *) addr;
366
367 MEMZERO(sp, sizeof (*sp));
368 sp->isp_stat_version = ISP_STATS_VERSION;
369 sp->isp_type = isp->isp_type;
370 sp->isp_revision = isp->isp_revision;
371 ISP_LOCK(isp);
372 sp->isp_stats[ISP_INTCNT] = isp->isp_intcnt;
373 sp->isp_stats[ISP_INTBOGUS] = isp->isp_intbogus;
374 sp->isp_stats[ISP_INTMBOXC] = isp->isp_intmboxc;
375 sp->isp_stats[ISP_INGOASYNC] = isp->isp_intoasync;
376 sp->isp_stats[ISP_RSLTCCMPLT] = isp->isp_rsltccmplt;
377 sp->isp_stats[ISP_FPHCCMCPLT] = isp->isp_fphccmplt;
378 sp->isp_stats[ISP_RSCCHIWAT] = isp->isp_rscchiwater;
379 sp->isp_stats[ISP_FPCCHIWAT] = isp->isp_fpcchiwater;
380 ISP_UNLOCK(isp);
381 retval = 0;
382 break;
383 }
384 case ISP_CLR_STATS:
385 ISP_LOCK(isp);
386 isp->isp_intcnt = 0;
387 isp->isp_intbogus = 0;
388 isp->isp_intmboxc = 0;
389 isp->isp_intoasync = 0;
390 isp->isp_rsltccmplt = 0;
391 isp->isp_fphccmplt = 0;
392 isp->isp_rscchiwater = 0;
393 isp->isp_fpcchiwater = 0;
394 ISP_UNLOCK(isp);
395 retval = 0;
396 break;
397 case ISP_FC_GETHINFO:
398 {
399 struct isp_hba_device *hba = (struct isp_hba_device *) addr;
400 MEMZERO(hba, sizeof (*hba));
401 ISP_LOCK(isp);
402 hba->fc_speed = FCPARAM(isp)->isp_gbspeed;
403 hba->fc_scsi_supported = 1;
404 hba->fc_topology = FCPARAM(isp)->isp_topo + 1;
405 hba->fc_loopid = FCPARAM(isp)->isp_loopid;
406 hba->active_node_wwn = FCPARAM(isp)->isp_nodewwn;
407 hba->active_port_wwn = FCPARAM(isp)->isp_portwwn;
408 ISP_UNLOCK(isp);
409 retval = 0;
410 break;
411 }
412 case ISP_GET_FC_PARAM:
413 {
414 struct isp_fc_param *f = (struct isp_fc_param *) addr;
415
416 if (!IS_FC(isp)) {
417 retval = EINVAL;
418 break;
419 }
420 f->parameter = 0;
421 if (strcmp(f->param_name, "framelength") == 0) {
422 f->parameter = FCPARAM(isp)->isp_maxfrmlen;
423 retval = 0;
424 break;
425 }
426 if (strcmp(f->param_name, "exec_throttle") == 0) {
427 f->parameter = FCPARAM(isp)->isp_execthrottle;
428 retval = 0;
429 break;
430 }
431 if (strcmp(f->param_name, "fullduplex") == 0) {
432 if (FCPARAM(isp)->isp_fwoptions & ICBOPT_FULL_DUPLEX)
433 f->parameter = 1;
434 retval = 0;
435 break;
436 }
437 if (strcmp(f->param_name, "loopid") == 0) {
438 f->parameter = FCPARAM(isp)->isp_loopid;
439 retval = 0;
440 break;
441 }
442 retval = EINVAL;
443 break;
444 }
445 case ISP_SET_FC_PARAM:
446 {
447 struct isp_fc_param *f = (struct isp_fc_param *) addr;
448 u_int32_t param = f->parameter;
449
450 if (!IS_FC(isp)) {
451 retval = EINVAL;
452 break;
453 }
454 f->parameter = 0;
455 if (strcmp(f->param_name, "framelength") == 0) {
456 if (param != 512 && param != 1024 && param != 1024) {
457 retval = EINVAL;
458 break;
459 }
460 FCPARAM(isp)->isp_maxfrmlen = param;
461 retval = 0;
462 break;
463 }
464 if (strcmp(f->param_name, "exec_throttle") == 0) {
465 if (param < 16 || param > 255) {
466 retval = EINVAL;
467 break;
468 }
469 FCPARAM(isp)->isp_execthrottle = param;
470 retval = 0;
471 break;
472 }
473 if (strcmp(f->param_name, "fullduplex") == 0) {
474 if (param != 0 && param != 1) {
475 retval = EINVAL;
476 break;
477 }
478 if (param) {
479 FCPARAM(isp)->isp_fwoptions |=
480 ICBOPT_FULL_DUPLEX;
481 } else {
482 FCPARAM(isp)->isp_fwoptions &=
483 ~ICBOPT_FULL_DUPLEX;
484 }
485 retval = 0;
486 break;
487 }
488 if (strcmp(f->param_name, "loopid") == 0) {
489 if (param < 0 || param > 125) {
490 retval = EINVAL;
491 break;
492 }
493 FCPARAM(isp)->isp_loopid = param;
494 retval = 0;
495 break;
496 }
497 retval = EINVAL;
498 break;
499 }
500 default:
501 break;
502 }
503 return (retval);
504}
505
506static void
507isp_intr_enable(void *arg)
508{
509 struct ispsoftc *isp = arg;
510 if (isp->isp_role != ISP_ROLE_NONE) {
511 ENABLE_INTS(isp);
512 isp->isp_osinfo.intsok = 1;
513 }
514 /* Release our hook so that the boot can continue. */
515 config_intrhook_disestablish(&isp->isp_osinfo.ehook);
516}
517
518/*
519 * Put the target mode functions here, because some are inlines
520 */
521
522#ifdef ISP_TARGET_MODE
523
524static INLINE int is_lun_enabled(struct ispsoftc *, int, lun_id_t);
525static INLINE int are_any_luns_enabled(struct ispsoftc *, int);
526static INLINE tstate_t *get_lun_statep(struct ispsoftc *, int, lun_id_t);
527static INLINE void rls_lun_statep(struct ispsoftc *, tstate_t *);
528static INLINE int isp_psema_sig_rqe(struct ispsoftc *, int);
529static INLINE int isp_cv_wait_timed_rqe(struct ispsoftc *, int, int);
530static INLINE void isp_cv_signal_rqe(struct ispsoftc *, int, int);
531static INLINE void isp_vsema_rqe(struct ispsoftc *, int);
532static INLINE atio_private_data_t *isp_get_atpd(struct ispsoftc *, int);
533static cam_status
534create_lun_state(struct ispsoftc *, int, struct cam_path *, tstate_t **);
535static void destroy_lun_state(struct ispsoftc *, tstate_t *);
536static void isp_en_lun(struct ispsoftc *, union ccb *);
537static cam_status isp_abort_tgt_ccb(struct ispsoftc *, union ccb *);
538static timeout_t isp_refire_putback_atio;
539static void isp_complete_ctio(union ccb *);
540static void isp_target_putback_atio(union ccb *);
541static cam_status isp_target_start_ctio(struct ispsoftc *, union ccb *);
542static int isp_handle_platform_atio(struct ispsoftc *, at_entry_t *);
543static int isp_handle_platform_atio2(struct ispsoftc *, at2_entry_t *);
544static int isp_handle_platform_ctio(struct ispsoftc *, void *);
545static int isp_handle_platform_notify_scsi(struct ispsoftc *, in_entry_t *);
546static int isp_handle_platform_notify_fc(struct ispsoftc *, in_fcentry_t *);
547
548static INLINE int
549is_lun_enabled(struct ispsoftc *isp, int bus, lun_id_t lun)
550{
551 tstate_t *tptr;
552 tptr = isp->isp_osinfo.lun_hash[LUN_HASH_FUNC(isp, bus, lun)];
553 if (tptr == NULL) {
554 return (0);
555 }
556 do {
557 if (tptr->lun == (lun_id_t) lun && tptr->bus == bus) {
558 return (1);
559 }
560 } while ((tptr = tptr->next) != NULL);
561 return (0);
562}
563
564static INLINE int
565are_any_luns_enabled(struct ispsoftc *isp, int port)
566{
567 int lo, hi;
568 if (IS_DUALBUS(isp)) {
569 lo = (port * (LUN_HASH_SIZE >> 1));
570 hi = lo + (LUN_HASH_SIZE >> 1);
571 } else {
572 lo = 0;
573 hi = LUN_HASH_SIZE;
574 }
575 for (lo = 0; lo < hi; lo++) {
576 if (isp->isp_osinfo.lun_hash[lo]) {
577 return (1);
578 }
579 }
580 return (0);
581}
582
583static INLINE tstate_t *
584get_lun_statep(struct ispsoftc *isp, int bus, lun_id_t lun)
585{
586 tstate_t *tptr = NULL;
587
588 if (lun == CAM_LUN_WILDCARD) {
589 if (isp->isp_osinfo.tmflags[bus] & TM_WILDCARD_ENABLED) {
590 tptr = &isp->isp_osinfo.tsdflt[bus];
591 tptr->hold++;
592 return (tptr);
593 }
594 } else {
595 tptr = isp->isp_osinfo.lun_hash[LUN_HASH_FUNC(isp, bus, lun)];
596 if (tptr == NULL) {
597 return (NULL);
598 }
599 }
600
601 do {
602 if (tptr->lun == lun && tptr->bus == bus) {
603 tptr->hold++;
604 return (tptr);
605 }
606 } while ((tptr = tptr->next) != NULL);
607 return (tptr);
608}
609
610static INLINE void
611rls_lun_statep(struct ispsoftc *isp, tstate_t *tptr)
612{
613 if (tptr->hold)
614 tptr->hold--;
615}
616
617static INLINE int
618isp_psema_sig_rqe(struct ispsoftc *isp, int bus)
619{
620 while (isp->isp_osinfo.tmflags[bus] & TM_BUSY) {
621 isp->isp_osinfo.tmflags[bus] |= TM_WANTED;
622#ifdef ISP_SMPLOCK
623 if (cv_wait_sig(&isp->isp_osinfo.tgtcv0[bus], &isp->isp_lock)) {
624 return (-1);
625 }
626#else
627 if (tsleep(&isp->isp_osinfo.tgtcv0[bus], PZERO, "cv_isp", 0)) {
628 return (-1);
629 }
630#endif
631 isp->isp_osinfo.tmflags[bus] |= TM_BUSY;
632 }
633 return (0);
634}
635
636static INLINE int
637isp_cv_wait_timed_rqe(struct ispsoftc *isp, int bus, int timo)
638{
639#ifdef ISP_SMPLOCK
640 if (cv_timedwait(&isp->isp_osinfo.tgtcv1[bus], &isp->isp_lock, timo)) {
641 return (-1);
642 }
643#else
644 if (tsleep(&isp->isp_osinfo.tgtcv1[bus], PZERO, "cv_isp1", 0)) {
645 return (-1);
646 }
647#endif
648 return (0);
649}
650
651static INLINE void
652isp_cv_signal_rqe(struct ispsoftc *isp, int bus, int status)
653{
654 isp->isp_osinfo.rstatus[bus] = status;
655#ifdef ISP_SMPLOCK
656 cv_signal(&isp->isp_osinfo.tgtcv1[bus]);
657#else
658 wakeup(&isp->isp_osinfo.tgtcv1[bus]);
659#endif
660}
661
662static INLINE void
663isp_vsema_rqe(struct ispsoftc *isp, int bus)
664{
665 if (isp->isp_osinfo.tmflags[bus] & TM_WANTED) {
666 isp->isp_osinfo.tmflags[bus] &= ~TM_WANTED;
667#ifdef ISP_SMPLOCK
668 cv_signal(&isp->isp_osinfo.tgtcv0[bus]);
669#else
670 cv_signal(&isp->isp_osinfo.tgtcv0[bus]);
671#endif
672 }
673 isp->isp_osinfo.tmflags[bus] &= ~TM_BUSY;
674}
675
676static INLINE atio_private_data_t *
677isp_get_atpd(struct ispsoftc *isp, int tag)
678{
679 atio_private_data_t *atp;
680 for (atp = isp->isp_osinfo.atpdp;
681 atp < &isp->isp_osinfo.atpdp[ATPDPSIZE]; atp++) {
682 if (atp->tag == tag)
683 return (atp);
684 }
685 return (NULL);
686}
687
688static cam_status
689create_lun_state(struct ispsoftc *isp, int bus,
690 struct cam_path *path, tstate_t **rslt)
691{
692 cam_status status;
693 lun_id_t lun;
694 int hfx;
695 tstate_t *tptr, *new;
696
697 lun = xpt_path_lun_id(path);
698 if (lun < 0) {
699 return (CAM_LUN_INVALID);
700 }
701 if (is_lun_enabled(isp, bus, lun)) {
702 return (CAM_LUN_ALRDY_ENA);
703 }
704 new = (tstate_t *) malloc(sizeof (tstate_t), M_DEVBUF, M_NOWAIT|M_ZERO);
705 if (new == NULL) {
706 return (CAM_RESRC_UNAVAIL);
707 }
708
709 status = xpt_create_path(&new->owner, NULL, xpt_path_path_id(path),
710 xpt_path_target_id(path), xpt_path_lun_id(path));
711 if (status != CAM_REQ_CMP) {
712 free(new, M_DEVBUF);
713 return (status);
714 }
715 new->bus = bus;
716 new->lun = lun;
717 SLIST_INIT(&new->atios);
718 SLIST_INIT(&new->inots);
719 new->hold = 1;
720
721 hfx = LUN_HASH_FUNC(isp, new->bus, new->lun);
722 tptr = isp->isp_osinfo.lun_hash[hfx];
723 if (tptr == NULL) {
724 isp->isp_osinfo.lun_hash[hfx] = new;
725 } else {
726 while (tptr->next)
727 tptr = tptr->next;
728 tptr->next = new;
729 }
730 *rslt = new;
731 return (CAM_REQ_CMP);
732}
733
734static INLINE void
735destroy_lun_state(struct ispsoftc *isp, tstate_t *tptr)
736{
737 int hfx;
738 tstate_t *lw, *pw;
739
740 hfx = LUN_HASH_FUNC(isp, tptr->bus, tptr->lun);
741 if (tptr->hold) {
742 return;
743 }
744 pw = isp->isp_osinfo.lun_hash[hfx];
745 if (pw == NULL) {
746 return;
747 } else if (pw->lun == tptr->lun && pw->bus == tptr->bus) {
748 isp->isp_osinfo.lun_hash[hfx] = pw->next;
749 } else {
750 lw = pw;
751 pw = lw->next;
752 while (pw) {
753 if (pw->lun == tptr->lun && pw->bus == tptr->bus) {
754 lw->next = pw->next;
755 break;
756 }
757 lw = pw;
758 pw = pw->next;
759 }
760 if (pw == NULL) {
761 return;
762 }
763 }
764 free(tptr, M_DEVBUF);
765}
766
767/*
768 * we enter with our locks held.
769 */
770static void
771isp_en_lun(struct ispsoftc *isp, union ccb *ccb)
772{
773 const char lfmt[] = "Lun now %sabled for target mode on channel %d";
774 struct ccb_en_lun *cel = &ccb->cel;
775 tstate_t *tptr;
776 u_int16_t rstat;
777 int bus, cmd, av, wildcard;
778 lun_id_t lun;
779 target_id_t tgt;
780
781
782 bus = XS_CHANNEL(ccb) & 0x1;
783 tgt = ccb->ccb_h.target_id;
784 lun = ccb->ccb_h.target_lun;
785
786 /*
787 * Do some sanity checking first.
788 */
789
790 if ((lun != CAM_LUN_WILDCARD) &&
791 (lun < 0 || lun >= (lun_id_t) isp->isp_maxluns)) {
792 ccb->ccb_h.status = CAM_LUN_INVALID;
793 return;
794 }
795
796 if (IS_SCSI(isp)) {
797 sdparam *sdp = isp->isp_param;
798 sdp += bus;
799 if (tgt != CAM_TARGET_WILDCARD &&
800 tgt != sdp->isp_initiator_id) {
801 ccb->ccb_h.status = CAM_TID_INVALID;
802 return;
803 }
804 } else {
805 if (tgt != CAM_TARGET_WILDCARD &&
806 tgt != FCPARAM(isp)->isp_iid) {
807 ccb->ccb_h.status = CAM_TID_INVALID;
808 return;
809 }
810 /*
811 * This is as a good a place as any to check f/w capabilities.
812 */
813 if ((FCPARAM(isp)->isp_fwattr & ISP_FW_ATTR_TMODE) == 0) {
814 isp_prt(isp, ISP_LOGERR,
815 "firmware does not support target mode");
816 ccb->ccb_h.status = CAM_FUNC_NOTAVAIL;
817 return;
818 }
819 /*
820 * XXX: We *could* handle non-SCCLUN f/w, but we'd have to
821 * XXX: dorks with our already fragile enable/disable code.
822 */
823 if ((FCPARAM(isp)->isp_fwattr & ISP_FW_ATTR_SCCLUN) == 0) {
824 isp_prt(isp, ISP_LOGERR,
825 "firmware not SCCLUN capable");
826 }
827 }
828
829 if (tgt == CAM_TARGET_WILDCARD) {
830 if (lun == CAM_LUN_WILDCARD) {
831 wildcard = 1;
832 } else {
833 ccb->ccb_h.status = CAM_LUN_INVALID;
834 return;
835 }
836 } else {
837 wildcard = 0;
838 }
839
840 /*
841 * Next check to see whether this is a target/lun wildcard action.
842 *
843 * If so, we know that we can accept commands for luns that haven't
844 * been enabled yet and send them upstream. Otherwise, we have to
845 * handle them locally (if we see them at all).
846 */
847
848 if (wildcard) {
849 tptr = &isp->isp_osinfo.tsdflt[bus];
850 if (cel->enable) {
851 if (isp->isp_osinfo.tmflags[bus] &
852 TM_WILDCARD_ENABLED) {
853 ccb->ccb_h.status = CAM_LUN_ALRDY_ENA;
854 return;
855 }
856 ccb->ccb_h.status =
857 xpt_create_path(&tptr->owner, NULL,
858 xpt_path_path_id(ccb->ccb_h.path),
859 xpt_path_target_id(ccb->ccb_h.path),
860 xpt_path_lun_id(ccb->ccb_h.path));
861 if (ccb->ccb_h.status != CAM_REQ_CMP) {
862 return;
863 }
864 SLIST_INIT(&tptr->atios);
865 SLIST_INIT(&tptr->inots);
866 isp->isp_osinfo.tmflags[bus] |= TM_WILDCARD_ENABLED;
867 } else {
868 if ((isp->isp_osinfo.tmflags[bus] &
869 TM_WILDCARD_ENABLED) == 0) {
870 ccb->ccb_h.status = CAM_REQ_CMP;
871 return;
872 }
873 if (tptr->hold) {
874 ccb->ccb_h.status = CAM_SCSI_BUSY;
875 return;
876 }
877 xpt_free_path(tptr->owner);
878 isp->isp_osinfo.tmflags[bus] &= ~TM_WILDCARD_ENABLED;
879 }
880 }
881
882 /*
883 * Now check to see whether this bus needs to be
884 * enabled/disabled with respect to target mode.
885 */
886 av = bus << 31;
887 if (cel->enable && !(isp->isp_osinfo.tmflags[bus] & TM_TMODE_ENABLED)) {
888 av |= ENABLE_TARGET_FLAG;
889 av = isp_control(isp, ISPCTL_TOGGLE_TMODE, &av);
890 if (av) {
891 ccb->ccb_h.status = CAM_FUNC_NOTAVAIL;
892 if (wildcard) {
893 isp->isp_osinfo.tmflags[bus] &=
894 ~TM_WILDCARD_ENABLED;
895 xpt_free_path(tptr->owner);
896 }
897 return;
898 }
899 isp->isp_osinfo.tmflags[bus] |= TM_TMODE_ENABLED;
900 isp_prt(isp, ISP_LOGINFO,
901 "Target Mode enabled on channel %d", bus);
902 } else if (cel->enable == 0 &&
903 (isp->isp_osinfo.tmflags[bus] & TM_TMODE_ENABLED) && wildcard) {
904 if (are_any_luns_enabled(isp, bus)) {
905 ccb->ccb_h.status = CAM_SCSI_BUSY;
906 return;
907 }
908 av = isp_control(isp, ISPCTL_TOGGLE_TMODE, &av);
909 if (av) {
910 ccb->ccb_h.status = CAM_FUNC_NOTAVAIL;
911 return;
912 }
913 isp->isp_osinfo.tmflags[bus] &= ~TM_TMODE_ENABLED;
914 isp_prt(isp, ISP_LOGINFO,
915 "Target Mode disabled on channel %d", bus);
916 }
917
918 if (wildcard) {
919 ccb->ccb_h.status = CAM_REQ_CMP;
920 return;
921 }
922
923 if (cel->enable) {
924 ccb->ccb_h.status =
925 create_lun_state(isp, bus, ccb->ccb_h.path, &tptr);
926 if (ccb->ccb_h.status != CAM_REQ_CMP) {
927 return;
928 }
929 } else {
930 tptr = get_lun_statep(isp, bus, lun);
931 if (tptr == NULL) {
932 ccb->ccb_h.status = CAM_LUN_INVALID;
933 return;
934 }
935 }
936
937 if (isp_psema_sig_rqe(isp, bus)) {
938 rls_lun_statep(isp, tptr);
939 if (cel->enable)
940 destroy_lun_state(isp, tptr);
941 ccb->ccb_h.status = CAM_REQ_CMP_ERR;
942 return;
943 }
944
945 if (cel->enable) {
946 u_int32_t seq = isp->isp_osinfo.rollinfo++;
947 int c, n, ulun = lun;
948
949 cmd = RQSTYPE_ENABLE_LUN;
950 c = DFLT_CMND_CNT;
951 n = DFLT_INOT_CNT;
952 if (IS_FC(isp) && lun != 0) {
953 cmd = RQSTYPE_MODIFY_LUN;
954 n = 0;
955 /*
956 * For SCC firmware, we only deal with setting
957 * (enabling or modifying) lun 0.
958 */
959 ulun = 0;
960 }
961 rstat = LUN_ERR;
962 if (isp_lun_cmd(isp, cmd, bus, tgt, ulun, c, n, seq)) {
963 xpt_print_path(ccb->ccb_h.path);
964 isp_prt(isp, ISP_LOGWARN, "isp_lun_cmd failed");
965 goto out;
966 }
967 if (isp_cv_wait_timed_rqe(isp, bus, 30 * hz)) {
968 xpt_print_path(ccb->ccb_h.path);
969 isp_prt(isp, ISP_LOGERR,
970 "wait for ENABLE/MODIFY LUN timed out");
971 goto out;
972 }
973 rstat = isp->isp_osinfo.rstatus[bus];
974 if (rstat != LUN_OK) {
975 xpt_print_path(ccb->ccb_h.path);
976 isp_prt(isp, ISP_LOGERR,
977 "ENABLE/MODIFY LUN returned 0x%x", rstat);
978 goto out;
979 }
980 } else {
981 int c, n, ulun = lun;
982 u_int32_t seq;
983
984 rstat = LUN_ERR;
985 seq = isp->isp_osinfo.rollinfo++;
986 cmd = -RQSTYPE_MODIFY_LUN;
987
988 c = DFLT_CMND_CNT;
989 n = DFLT_INOT_CNT;
990 if (IS_FC(isp) && lun != 0) {
991 n = 0;
992 /*
993 * For SCC firmware, we only deal with setting
994 * (enabling or modifying) lun 0.
995 */
996 ulun = 0;
997 }
998 if (isp_lun_cmd(isp, cmd, bus, tgt, ulun, c, n, seq)) {
999 xpt_print_path(ccb->ccb_h.path);
1000 isp_prt(isp, ISP_LOGERR, "isp_lun_cmd failed");
1001 goto out;
1002 }
1003 if (isp_cv_wait_timed_rqe(isp, bus, 30 * hz)) {
1004 xpt_print_path(ccb->ccb_h.path);
1005 isp_prt(isp, ISP_LOGERR,
1006 "wait for MODIFY LUN timed out");
1007 goto out;
1008 }
1009 rstat = isp->isp_osinfo.rstatus[bus];
1010 if (rstat != LUN_OK) {
1011 xpt_print_path(ccb->ccb_h.path);
1012 isp_prt(isp, ISP_LOGERR,
1013 "MODIFY LUN returned 0x%x", rstat);
1014 goto out;
1015 }
1016 if (IS_FC(isp) && lun) {
1017 goto out;
1018 }
1019
1020 seq = isp->isp_osinfo.rollinfo++;
1021
1022 rstat = LUN_ERR;
1023 cmd = -RQSTYPE_ENABLE_LUN;
1024 if (isp_lun_cmd(isp, cmd, bus, tgt, lun, 0, 0, seq)) {
1025 xpt_print_path(ccb->ccb_h.path);
1026 isp_prt(isp, ISP_LOGERR, "isp_lun_cmd failed");
1027 goto out;
1028 }
1029 if (isp_cv_wait_timed_rqe(isp, bus, 30 * hz)) {
1030 xpt_print_path(ccb->ccb_h.path);
1031 isp_prt(isp, ISP_LOGERR,
1032 "wait for DISABLE LUN timed out");
1033 goto out;
1034 }
1035 rstat = isp->isp_osinfo.rstatus[bus];
1036 if (rstat != LUN_OK) {
1037 xpt_print_path(ccb->ccb_h.path);
1038 isp_prt(isp, ISP_LOGWARN,
1039 "DISABLE LUN returned 0x%x", rstat);
1040 goto out;
1041 }
1042 if (are_any_luns_enabled(isp, bus) == 0) {
1043 av = isp_control(isp, ISPCTL_TOGGLE_TMODE, &av);
1044 if (av) {
1045 isp_prt(isp, ISP_LOGWARN,
1046 "disable target mode on channel %d failed",
1047 bus);
1048 goto out;
1049 }
1050 isp->isp_osinfo.tmflags[bus] &= ~TM_TMODE_ENABLED;
1051 xpt_print_path(ccb->ccb_h.path);
1052 isp_prt(isp, ISP_LOGINFO,
1053 "Target Mode disabled on channel %d", bus);
1054 }
1055 }
1056
1057out:
1058 isp_vsema_rqe(isp, bus);
1059
1060 if (rstat != LUN_OK) {
1061 xpt_print_path(ccb->ccb_h.path);
1062 isp_prt(isp, ISP_LOGWARN,
1063 "lun %sable failed", (cel->enable) ? "en" : "dis");
1064 ccb->ccb_h.status = CAM_REQ_CMP_ERR;
1065 rls_lun_statep(isp, tptr);
1066 if (cel->enable)
1067 destroy_lun_state(isp, tptr);
1068 } else {
1069 xpt_print_path(ccb->ccb_h.path);
1070 isp_prt(isp, ISP_LOGINFO, lfmt,
1071 (cel->enable) ? "en" : "dis", bus);
1072 rls_lun_statep(isp, tptr);
1073 if (cel->enable == 0) {
1074 destroy_lun_state(isp, tptr);
1075 }
1076 ccb->ccb_h.status = CAM_REQ_CMP;
1077 }
1078}
1079
1080static cam_status
1081isp_abort_tgt_ccb(struct ispsoftc *isp, union ccb *ccb)
1082{
1083 tstate_t *tptr;
1084 struct ccb_hdr_slist *lp;
1085 struct ccb_hdr *curelm;
1086 int found;
1087 union ccb *accb = ccb->cab.abort_ccb;
1088
1089 if (accb->ccb_h.target_id != CAM_TARGET_WILDCARD) {
1090 if (IS_FC(isp) && (accb->ccb_h.target_id !=
1091 ((fcparam *) isp->isp_param)->isp_loopid)) {
1092 return (CAM_PATH_INVALID);
1093 } else if (IS_SCSI(isp) && (accb->ccb_h.target_id !=
1094 ((sdparam *) isp->isp_param)->isp_initiator_id)) {
1095 return (CAM_PATH_INVALID);
1096 }
1097 }
1098 tptr = get_lun_statep(isp, XS_CHANNEL(ccb), accb->ccb_h.target_lun);
1099 if (tptr == NULL) {
1100 return (CAM_PATH_INVALID);
1101 }
1102 if (accb->ccb_h.func_code == XPT_ACCEPT_TARGET_IO) {
1103 lp = &tptr->atios;
1104 } else if (accb->ccb_h.func_code == XPT_IMMED_NOTIFY) {
1105 lp = &tptr->inots;
1106 } else {
1107 rls_lun_statep(isp, tptr);
1108 return (CAM_UA_ABORT);
1109 }
1110 curelm = SLIST_FIRST(lp);
1111 found = 0;
1112 if (curelm == &accb->ccb_h) {
1113 found = 1;
1114 SLIST_REMOVE_HEAD(lp, sim_links.sle);
1115 } else {
1116 while(curelm != NULL) {
1117 struct ccb_hdr *nextelm;
1118
1119 nextelm = SLIST_NEXT(curelm, sim_links.sle);
1120 if (nextelm == &accb->ccb_h) {
1121 found = 1;
1122 SLIST_NEXT(curelm, sim_links.sle) =
1123 SLIST_NEXT(nextelm, sim_links.sle);
1124 break;
1125 }
1126 curelm = nextelm;
1127 }
1128 }
1129 rls_lun_statep(isp, tptr);
1130 if (found) {
1131 accb->ccb_h.status = CAM_REQ_ABORTED;
1132 return (CAM_REQ_CMP);
1133 }
1134 return(CAM_PATH_INVALID);
1135}
1136
1137static cam_status
1138isp_target_start_ctio(struct ispsoftc *isp, union ccb *ccb)
1139{
1140 void *qe;
1141 struct ccb_scsiio *cso = &ccb->csio;
1142 u_int16_t *hp, save_handle;
1143 u_int16_t nxti, optr;
1144 u_int8_t local[QENTRY_LEN];
1145
1146
1147 if (isp_getrqentry(isp, &nxti, &optr, &qe)) {
1148 xpt_print_path(ccb->ccb_h.path);
1149 printf("Request Queue Overflow in isp_target_start_ctio\n");
1150 return (CAM_RESRC_UNAVAIL);
1151 }
1152 bzero(local, QENTRY_LEN);
1153
1154 /*
1155 * We're either moving data or completing a command here.
1156 */
1157
1158 if (IS_FC(isp)) {
1159 atio_private_data_t *atp;
1160 ct2_entry_t *cto = (ct2_entry_t *) local;
1161
1162 cto->ct_header.rqs_entry_type = RQSTYPE_CTIO2;
1163 cto->ct_header.rqs_entry_count = 1;
1164 cto->ct_iid = cso->init_id;
1165 if ((FCPARAM(isp)->isp_fwattr & ISP_FW_ATTR_SCCLUN) == 0) {
1166 cto->ct_lun = ccb->ccb_h.target_lun;
1167 }
1168
1169 atp = isp_get_atpd(isp, cso->tag_id);
1170 if (atp == NULL) {
1171 isp_prt(isp, ISP_LOGERR,
1172 "cannot find private data adjunct for tag %x",
1173 cso->tag_id);
1174 return (-1);
1175 }
1176
1177 cto->ct_rxid = cso->tag_id;
1178 if (cso->dxfer_len == 0) {
1179 cto->ct_flags |= CT2_FLAG_MODE1 | CT2_NO_DATA;
1180 if (ccb->ccb_h.flags & CAM_SEND_STATUS) {
1181 cto->ct_flags |= CT2_SENDSTATUS;
1182 cto->rsp.m1.ct_scsi_status = cso->scsi_status;
1183 cto->ct_resid =
1184 atp->orig_datalen - atp->bytes_xfered;
1185 if (cto->ct_resid < 0) {
1186 cto->rsp.m1.ct_scsi_status |=
1187 CT2_DATA_OVER;
1188 } else if (cto->ct_resid > 0) {
1189 cto->rsp.m1.ct_scsi_status |=
1190 CT2_DATA_UNDER;
1191 }
1192 }
1193 if ((ccb->ccb_h.flags & CAM_SEND_SENSE) != 0) {
1194 int m = min(cso->sense_len, MAXRESPLEN);
1195 bcopy(&cso->sense_data, cto->rsp.m1.ct_resp, m);
1196 cto->rsp.m1.ct_senselen = m;
1197 cto->rsp.m1.ct_scsi_status |= CT2_SNSLEN_VALID;
1198 }
1199 } else {
1200 cto->ct_flags |= CT2_FLAG_MODE0;
1201 if ((cso->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
1202 cto->ct_flags |= CT2_DATA_IN;
1203 } else {
1204 cto->ct_flags |= CT2_DATA_OUT;
1205 }
1206 cto->ct_reloff = atp->bytes_xfered;
1207 if ((ccb->ccb_h.flags & CAM_SEND_STATUS) != 0) {
1208 cto->ct_flags |= CT2_SENDSTATUS;
1209 cto->rsp.m0.ct_scsi_status = cso->scsi_status;
1210 cto->ct_resid =
1211 atp->orig_datalen -
1212 (atp->bytes_xfered + cso->dxfer_len);
1213 if (cto->ct_resid < 0) {
1214 cto->rsp.m0.ct_scsi_status |=
1215 CT2_DATA_OVER;
1216 } else if (cto->ct_resid > 0) {
1217 cto->rsp.m0.ct_scsi_status |=
1218 CT2_DATA_UNDER;
1219 }
1220 } else {
1221 atp->last_xframt = cso->dxfer_len;
1222 }
1223 /*
1224 * If we're sending data and status back together,
1225 * we can't also send back sense data as well.
1226 */
1227 ccb->ccb_h.flags &= ~CAM_SEND_SENSE;
1228 }
1229
1230 if (cto->ct_flags & CT2_SENDSTATUS) {
1231 isp_prt(isp, ISP_LOGTDEBUG0,
1232 "CTIO2[%x] STATUS %x origd %u curd %u resid %u",
1233 cto->ct_rxid, cso->scsi_status, atp->orig_datalen,
1234 cso->dxfer_len, cto->ct_resid);
1235 cto->ct_flags |= CT2_CCINCR;
1236 atp->state = ATPD_STATE_LAST_CTIO;
1237 } else
1238 atp->state = ATPD_STATE_CTIO;
1239 cto->ct_timeout = 10;
1240 hp = &cto->ct_syshandle;
1241 } else {
1242 ct_entry_t *cto = (ct_entry_t *) local;
1243
1244 cto->ct_header.rqs_entry_type = RQSTYPE_CTIO;
1245 cto->ct_header.rqs_entry_count = 1;
1246 cto->ct_iid = cso->init_id;
1247 cto->ct_iid |= XS_CHANNEL(ccb) << 7;
1248 cto->ct_tgt = ccb->ccb_h.target_id;
1249 cto->ct_lun = ccb->ccb_h.target_lun;
1250 cto->ct_fwhandle = AT_GET_HANDLE(cso->tag_id);
1251 if (AT_HAS_TAG(cso->tag_id)) {
1252 cto->ct_tag_val = (u_int8_t) AT_GET_TAG(cso->tag_id);
1253 cto->ct_flags |= CT_TQAE;
1254 }
1255 if (ccb->ccb_h.flags & CAM_DIS_DISCONNECT) {
1256 cto->ct_flags |= CT_NODISC;
1257 }
1258 if (cso->dxfer_len == 0) {
1259 cto->ct_flags |= CT_NO_DATA;
1260 } else if ((cso->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
1261 cto->ct_flags |= CT_DATA_IN;
1262 } else {
1263 cto->ct_flags |= CT_DATA_OUT;
1264 }
1265 if (ccb->ccb_h.flags & CAM_SEND_STATUS) {
1266 cto->ct_flags |= CT_SENDSTATUS|CT_CCINCR;
1267 cto->ct_scsi_status = cso->scsi_status;
1268 cto->ct_resid = cso->resid;
1269 isp_prt(isp, ISP_LOGTDEBUG0,
1270 "CTIO[%x] SCSI STATUS 0x%x resid %d tag_id %x",
1271 cto->ct_fwhandle, cso->scsi_status, cso->resid,
1272 cso->tag_id);
1273 }
1274 ccb->ccb_h.flags &= ~CAM_SEND_SENSE;
1275 cto->ct_timeout = 10;
1276 hp = &cto->ct_syshandle;
1277 }
1278
1279 if (isp_save_xs(isp, (XS_T *)ccb, hp)) {
1280 xpt_print_path(ccb->ccb_h.path);
1281 printf("No XFLIST pointers for isp_target_start_ctio\n");
1282 return (CAM_RESRC_UNAVAIL);
1283 }
1284
1285
1286 /*
1287 * Call the dma setup routines for this entry (and any subsequent
1288 * CTIOs) if there's data to move, and then tell the f/w it's got
1289 * new things to play with. As with isp_start's usage of DMA setup,
1290 * any swizzling is done in the machine dependent layer. Because
1291 * of this, we put the request onto the queue area first in native
1292 * format.
1293 */
1294
1295 save_handle = *hp;
1296
1297 switch (ISP_DMASETUP(isp, cso, (ispreq_t *) local, &nxti, optr)) {
1298 case CMD_QUEUED:
1299 ISP_ADD_REQUEST(isp, nxti);
1300 return (CAM_REQ_INPROG);
1301
1302 case CMD_EAGAIN:
1303 ccb->ccb_h.status = CAM_RESRC_UNAVAIL;
1304 isp_destroy_handle(isp, save_handle);
1305 return (CAM_RESRC_UNAVAIL);
1306
1307 default:
1308 isp_destroy_handle(isp, save_handle);
1309 return (XS_ERR(ccb));
1310 }
1311}
1312
1313static void
1314isp_refire_putback_atio(void *arg)
1315{
1316 int s = splcam();
1317 isp_target_putback_atio(arg);
1318 splx(s);
1319}
1320
1321static void
1322isp_target_putback_atio(union ccb *ccb)
1323{
1324 struct ispsoftc *isp;
1325 struct ccb_scsiio *cso;
1326 u_int16_t nxti, optr;
1327 void *qe;
1328
1329 isp = XS_ISP(ccb);
1330
1331 if (isp_getrqentry(isp, &nxti, &optr, &qe)) {
1332 (void) timeout(isp_refire_putback_atio, ccb, 10);
1333 isp_prt(isp, ISP_LOGWARN,
1334 "isp_target_putback_atio: Request Queue Overflow");
1335 return;
1336 }
1337 bzero(qe, QENTRY_LEN);
1338 cso = &ccb->csio;
1339 if (IS_FC(isp)) {
1340 at2_entry_t local, *at = &local;
1341 MEMZERO(at, sizeof (at2_entry_t));
1342 at->at_header.rqs_entry_type = RQSTYPE_ATIO2;
1343 at->at_header.rqs_entry_count = 1;
1344 if ((FCPARAM(isp)->isp_fwattr & ISP_FW_ATTR_SCCLUN) != 0) {
1345 at->at_scclun = (uint16_t) ccb->ccb_h.target_lun;
1346 } else {
1347 at->at_lun = (uint8_t) ccb->ccb_h.target_lun;
1348 }
1349 at->at_status = CT_OK;
1350 at->at_rxid = cso->tag_id;
1351 at->at_iid = cso->ccb_h.target_id;
1352 isp_put_atio2(isp, at, qe);
1353 } else {
1354 at_entry_t local, *at = &local;
1355 MEMZERO(at, sizeof (at_entry_t));
1356 at->at_header.rqs_entry_type = RQSTYPE_ATIO;
1357 at->at_header.rqs_entry_count = 1;
1358 at->at_iid = cso->init_id;
1359 at->at_iid |= XS_CHANNEL(ccb) << 7;
1360 at->at_tgt = cso->ccb_h.target_id;
1361 at->at_lun = cso->ccb_h.target_lun;
1362 at->at_status = CT_OK;
1363 at->at_tag_val = AT_GET_TAG(cso->tag_id);
1364 at->at_handle = AT_GET_HANDLE(cso->tag_id);
1365 isp_put_atio(isp, at, qe);
1366 }
1367 ISP_TDQE(isp, "isp_target_putback_atio", (int) optr, qe);
1368 ISP_ADD_REQUEST(isp, nxti);
1369 isp_complete_ctio(ccb);
1370}
1371
1372static void
1373isp_complete_ctio(union ccb *ccb)
1374{
1375 if ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_INPROG) {
1376 ccb->ccb_h.status |= CAM_REQ_CMP;
1377 }
1378 ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
1379 xpt_done(ccb);
1380}
1381
1382/*
1383 * Handle ATIO stuff that the generic code can't.
1384 * This means handling CDBs.
1385 */
1386
1387static int
1388isp_handle_platform_atio(struct ispsoftc *isp, at_entry_t *aep)
1389{
1390 tstate_t *tptr;
1391 int status, bus, iswildcard;
1392 struct ccb_accept_tio *atiop;
1393
1394 /*
1395 * The firmware status (except for the QLTM_SVALID bit)
1396 * indicates why this ATIO was sent to us.
1397 *
1398 * If QLTM_SVALID is set, the firware has recommended Sense Data.
1399 *
1400 * If the DISCONNECTS DISABLED bit is set in the flags field,
1401 * we're still connected on the SCSI bus.
1402 */
1403 status = aep->at_status;
1404 if ((status & ~QLTM_SVALID) == AT_PHASE_ERROR) {
1405 /*
1406 * Bus Phase Sequence error. We should have sense data
1407 * suggested by the f/w. I'm not sure quite yet what
1408 * to do about this for CAM.
1409 */
1410 isp_prt(isp, ISP_LOGWARN, "PHASE ERROR");
1411 isp_endcmd(isp, aep, SCSI_STATUS_BUSY, 0);
1412 return (0);
1413 }
1414 if ((status & ~QLTM_SVALID) != AT_CDB) {
1415 isp_prt(isp, ISP_LOGWARN, "bad atio (0x%x) leaked to platform",
1416 status);
1417 isp_endcmd(isp, aep, SCSI_STATUS_BUSY, 0);
1418 return (0);
1419 }
1420
1421 bus = GET_BUS_VAL(aep->at_iid);
1422 tptr = get_lun_statep(isp, bus, aep->at_lun);
1423 if (tptr == NULL) {
1424 tptr = get_lun_statep(isp, bus, CAM_LUN_WILDCARD);
1425 iswildcard = 1;
1426 } else {
1427 iswildcard = 0;
1428 }
1429
1430 if (tptr == NULL) {
1431 /*
1432 * Because we can't autofeed sense data back with
1433 * a command for parallel SCSI, we can't give back
1434 * a CHECK CONDITION. We'll give back a BUSY status
1435 * instead. This works out okay because the only
1436 * time we should, in fact, get this, is in the
1437 * case that somebody configured us without the
1438 * blackhole driver, so they get what they deserve.
1439 */
1440 isp_endcmd(isp, aep, SCSI_STATUS_BUSY, 0);
1441 return (0);
1442 }
1443
1444 atiop = (struct ccb_accept_tio *) SLIST_FIRST(&tptr->atios);
1445 if (atiop == NULL) {
1446 /*
1447 * Because we can't autofeed sense data back with
1448 * a command for parallel SCSI, we can't give back
1449 * a CHECK CONDITION. We'll give back a QUEUE FULL status
1450 * instead. This works out okay because the only time we
1451 * should, in fact, get this, is in the case that we've
1452 * run out of ATIOS.
1453 */
1454 xpt_print_path(tptr->owner);
1455 isp_prt(isp, ISP_LOGWARN,
1456 "no ATIOS for lun %d from initiator %d on channel %d",
1457 aep->at_lun, GET_IID_VAL(aep->at_iid), bus);
1458 if (aep->at_flags & AT_TQAE)
1459 isp_endcmd(isp, aep, SCSI_STATUS_QUEUE_FULL, 0);
1460 else
1461 isp_endcmd(isp, aep, SCSI_STATUS_BUSY, 0);
1462 rls_lun_statep(isp, tptr);
1463 return (0);
1464 }
1465 SLIST_REMOVE_HEAD(&tptr->atios, sim_links.sle);
1466 if (iswildcard) {
1467 atiop->ccb_h.target_id = aep->at_tgt;
1468 atiop->ccb_h.target_lun = aep->at_lun;
1469 }
1470 if (aep->at_flags & AT_NODISC) {
1471 atiop->ccb_h.flags = CAM_DIS_DISCONNECT;
1472 } else {
1473 atiop->ccb_h.flags = 0;
1474 }
1475
1476 if (status & QLTM_SVALID) {
1477 size_t amt = imin(QLTM_SENSELEN, sizeof (atiop->sense_data));
1478 atiop->sense_len = amt;
1479 MEMCPY(&atiop->sense_data, aep->at_sense, amt);
1480 } else {
1481 atiop->sense_len = 0;
1482 }
1483
1484 atiop->init_id = GET_IID_VAL(aep->at_iid);
1485 atiop->cdb_len = aep->at_cdblen;
1486 MEMCPY(atiop->cdb_io.cdb_bytes, aep->at_cdb, aep->at_cdblen);
1487 atiop->ccb_h.status = CAM_CDB_RECVD;
1488 /*
1489 * Construct a tag 'id' based upon tag value (which may be 0..255)
1490 * and the handle (which we have to preserve).
1491 */
1492 AT_MAKE_TAGID(atiop->tag_id, aep);
1493 if (aep->at_flags & AT_TQAE) {
1494 atiop->tag_action = aep->at_tag_type;
1495 atiop->ccb_h.status |= CAM_TAG_ACTION_VALID;
1496 }
1497 xpt_done((union ccb*)atiop);
1498 isp_prt(isp, ISP_LOGTDEBUG0,
1499 "ATIO[%x] CDB=0x%x bus %d iid%d->lun%d tag 0x%x ttype 0x%x %s",
1500 aep->at_handle, aep->at_cdb[0] & 0xff, GET_BUS_VAL(aep->at_iid),
1501 GET_IID_VAL(aep->at_iid), aep->at_lun, aep->at_tag_val & 0xff,
1502 aep->at_tag_type, (aep->at_flags & AT_NODISC)?
1503 "nondisc" : "disconnecting");
1504 rls_lun_statep(isp, tptr);
1505 return (0);
1506}
1507
1508static int
1509isp_handle_platform_atio2(struct ispsoftc *isp, at2_entry_t *aep)
1510{
1511 lun_id_t lun;
1512 tstate_t *tptr;
1513 struct ccb_accept_tio *atiop;
1514 atio_private_data_t *atp;
1515
1516 /*
1517 * The firmware status (except for the QLTM_SVALID bit)
1518 * indicates why this ATIO was sent to us.
1519 *
1520 * If QLTM_SVALID is set, the firware has recommended Sense Data.
1521 */
1522 if ((aep->at_status & ~QLTM_SVALID) != AT_CDB) {
1523 isp_prt(isp, ISP_LOGWARN,
1524 "bogus atio (0x%x) leaked to platform", aep->at_status);
1525 isp_endcmd(isp, aep, SCSI_STATUS_BUSY, 0);
1526 return (0);
1527 }
1528
1529 if ((FCPARAM(isp)->isp_fwattr & ISP_FW_ATTR_SCCLUN) != 0) {
1530 lun = aep->at_scclun;
1531 } else {
1532 lun = aep->at_lun;
1533 }
1534 tptr = get_lun_statep(isp, 0, lun);
1535 if (tptr == NULL) {
1536 isp_prt(isp, ISP_LOGWARN, "no state pointer for lun %d", lun);
1537 tptr = get_lun_statep(isp, 0, CAM_LUN_WILDCARD);
1538 }
1539
1540 if (tptr == NULL) {
1541 /*
1542 * What we'd like to know is whether or not we have a listener
1543 * upstream that really hasn't configured yet. If we do, then
1544 * we can give a more sensible reply here. If not, then we can
1545 * reject this out of hand.
1546 *
1547 * Choices for what to send were
1548 *
1549 * Not Ready, Unit Not Self-Configured Yet
1550 * (0x2,0x3e,0x00)
1551 *
1552 * for the former and
1553 *
1554 * Illegal Request, Logical Unit Not Supported
1555 * (0x5,0x25,0x00)
1556 *
1557 * for the latter.
1558 *
1559 * We used to decide whether there was at least one listener
1560 * based upon whether the black hole driver was configured.
1561 * However, recent config(8) changes have made this hard to do
1562 * at this time.
1563 *
1564 */
1565 isp_endcmd(isp, aep, SCSI_STATUS_BUSY, 0);
1566 return (0);
1567 }
1568
1569 atp = isp_get_atpd(isp, 0);
1570 atiop = (struct ccb_accept_tio *) SLIST_FIRST(&tptr->atios);
1571 if (atiop == NULL || atp == NULL) {
1572 /*
1573 * Because we can't autofeed sense data back with
1574 * a command for parallel SCSI, we can't give back
1575 * a CHECK CONDITION. We'll give back a QUEUE FULL status
1576 * instead. This works out okay because the only time we
1577 * should, in fact, get this, is in the case that we've
1578 * run out of ATIOS.
1579 */
1580 xpt_print_path(tptr->owner);
1581 isp_prt(isp, ISP_LOGWARN,
1582 "no %s for lun %d from initiator %d",
1583 (atp == NULL && atiop == NULL)? "ATIO2s *or* ATPS" :
1584 ((atp == NULL)? "ATPs" : "ATIO2s"), lun, aep->at_iid);
1585 rls_lun_statep(isp, tptr);
1586 isp_endcmd(isp, aep, SCSI_STATUS_QUEUE_FULL, 0);
1587 return (0);
1588 }
1589 atp->state = ATPD_STATE_ATIO;
1590 SLIST_REMOVE_HEAD(&tptr->atios, sim_links.sle);
1591 tptr->atio_count--;
1592 isp_prt(isp, ISP_LOGTDEBUG0, "Take FREE ATIO2 lun %d, count now %d",
1593 lun, tptr->atio_count);
1594
1595 if (tptr == &isp->isp_osinfo.tsdflt[0]) {
1596 atiop->ccb_h.target_id =
1597 ((fcparam *)isp->isp_param)->isp_loopid;
1598 atiop->ccb_h.target_lun = lun;
1599 }
1600 /*
1601 * We don't get 'suggested' sense data as we do with SCSI cards.
1602 */
1603 atiop->sense_len = 0;
1604
1605 atiop->init_id = aep->at_iid;
1606 atiop->cdb_len = ATIO2_CDBLEN;
1607 MEMCPY(atiop->cdb_io.cdb_bytes, aep->at_cdb, ATIO2_CDBLEN);
1608 atiop->ccb_h.status = CAM_CDB_RECVD;
1609 atiop->tag_id = aep->at_rxid;
1610 switch (aep->at_taskflags & ATIO2_TC_ATTR_MASK) {
1611 case ATIO2_TC_ATTR_SIMPLEQ:
1612 atiop->tag_action = MSG_SIMPLE_Q_TAG;
1613 break;
1614 case ATIO2_TC_ATTR_HEADOFQ:
1615 atiop->tag_action = MSG_HEAD_OF_Q_TAG;
1616 break;
1617 case ATIO2_TC_ATTR_ORDERED:
1618 atiop->tag_action = MSG_ORDERED_Q_TAG;
1619 break;
1620 case ATIO2_TC_ATTR_ACAQ: /* ?? */
1621 case ATIO2_TC_ATTR_UNTAGGED:
1622 default:
1623 atiop->tag_action = 0;
1624 break;
1625 }
1626 atiop->ccb_h.flags = CAM_TAG_ACTION_VALID;
1627
1628 atp->tag = atiop->tag_id;
1629 atp->lun = lun;
1630 atp->orig_datalen = aep->at_datalen;
1631 atp->last_xframt = 0;
1632 atp->bytes_xfered = 0;
1633 atp->state = ATPD_STATE_CAM;
1634 xpt_done((union ccb*)atiop);
1635
1636 isp_prt(isp, ISP_LOGTDEBUG0,
1637 "ATIO2[%x] CDB=0x%x iid%d->lun%d tattr 0x%x datalen %u",
1638 aep->at_rxid, aep->at_cdb[0] & 0xff, aep->at_iid,
1639 lun, aep->at_taskflags, aep->at_datalen);
1640 rls_lun_statep(isp, tptr);
1641 return (0);
1642}
1643
1644static int
1645isp_handle_platform_ctio(struct ispsoftc *isp, void *arg)
1646{
1647 union ccb *ccb;
1648 int sentstatus, ok, notify_cam, resid = 0;
1649 u_int16_t tval;
1650
1651 /*
1652 * CTIO and CTIO2 are close enough....
1653 */
1654
1655 ccb = (union ccb *) isp_find_xs(isp, ((ct_entry_t *)arg)->ct_syshandle);
1656 KASSERT((ccb != NULL), ("null ccb in isp_handle_platform_ctio"));
1657 isp_destroy_handle(isp, ((ct_entry_t *)arg)->ct_syshandle);
1658
1659 if (IS_FC(isp)) {
1660 ct2_entry_t *ct = arg;
1661 atio_private_data_t *atp = isp_get_atpd(isp, ct->ct_rxid);
1662 if (atp == NULL) {
1663 isp_prt(isp, ISP_LOGERR,
1664 "cannot find adjunct for %x after I/O",
1665 ct->ct_rxid);
1666 return (0);
1667 }
1668 sentstatus = ct->ct_flags & CT2_SENDSTATUS;
1669 ok = (ct->ct_status & ~QLTM_SVALID) == CT_OK;
1670 if (ok && sentstatus && (ccb->ccb_h.flags & CAM_SEND_SENSE)) {
1671 ccb->ccb_h.status |= CAM_SENT_SENSE;
1672 }
1673 notify_cam = ct->ct_header.rqs_seqno & 0x1;
1674 if ((ct->ct_flags & CT2_DATAMASK) != CT2_NO_DATA) {
1675 resid = ct->ct_resid;
1676 atp->bytes_xfered += (atp->last_xframt - resid);
1677 atp->last_xframt = 0;
1678 }
1679 if (sentstatus || !ok) {
1680 atp->tag = 0;
1681 }
1682 isp_prt(isp, ok? ISP_LOGTDEBUG0 : ISP_LOGWARN,
1683 "CTIO2[%x] sts 0x%x flg 0x%x sns %d resid %d %s",
1684 ct->ct_rxid, ct->ct_status, ct->ct_flags,
1685 (ccb->ccb_h.status & CAM_SENT_SENSE) != 0,
1686 resid, sentstatus? "FIN" : "MID");
1687 tval = ct->ct_rxid;
1688
1689 /* XXX: should really come after isp_complete_ctio */
1690 atp->state = ATPD_STATE_PDON;
1691 } else {
1692 ct_entry_t *ct = arg;
1693 sentstatus = ct->ct_flags & CT_SENDSTATUS;
1694 ok = (ct->ct_status & ~QLTM_SVALID) == CT_OK;
1695 /*
1696 * We *ought* to be able to get back to the original ATIO
1697 * here, but for some reason this gets lost. It's just as
1698 * well because it's squirrelled away as part of periph
1699 * private data.
1700 *
1701 * We can live without it as long as we continue to use
1702 * the auto-replenish feature for CTIOs.
1703 */
1704 notify_cam = ct->ct_header.rqs_seqno & 0x1;
1705 if (ct->ct_status & QLTM_SVALID) {
1706 char *sp = (char *)ct;
1707 sp += CTIO_SENSE_OFFSET;
1708 ccb->csio.sense_len =
1709 min(sizeof (ccb->csio.sense_data), QLTM_SENSELEN);
1710 MEMCPY(&ccb->csio.sense_data, sp, ccb->csio.sense_len);
1711 ccb->ccb_h.status |= CAM_AUTOSNS_VALID;
1712 }
1713 if ((ct->ct_flags & CT_DATAMASK) != CT_NO_DATA) {
1714 resid = ct->ct_resid;
1715 }
1716 isp_prt(isp, ISP_LOGTDEBUG0,
1717 "CTIO[%x] tag %x iid %d lun %d sts %x flg %x resid %d %s",
1718 ct->ct_fwhandle, ct->ct_tag_val, ct->ct_iid, ct->ct_lun,
1719 ct->ct_status, ct->ct_flags, resid,
1720 sentstatus? "FIN" : "MID");
1721 tval = ct->ct_fwhandle;
1722 }
1723 ccb->csio.resid += resid;
1724
1725 /*
1726 * We're here either because intermediate data transfers are done
1727 * and/or the final status CTIO (which may have joined with a
1728 * Data Transfer) is done.
1729 *
1730 * In any case, for this platform, the upper layers figure out
1731 * what to do next, so all we do here is collect status and
1732 * pass information along. Any DMA handles have already been
1733 * freed.
1734 */
1735 if (notify_cam == 0) {
1736 isp_prt(isp, ISP_LOGTDEBUG0, " INTER CTIO[0x%x] done", tval);
1737 return (0);
1738 }
1739
1740 isp_prt(isp, ISP_LOGTDEBUG0, "%s CTIO[0x%x] done",
1741 (sentstatus)? " FINAL " : "MIDTERM ", tval);
1742
1743 if (!ok) {
1744 isp_target_putback_atio(ccb);
1745 } else {
1746 isp_complete_ctio(ccb);
1747
1748 }
1749 return (0);
1750}
1751
1752static int
1753isp_handle_platform_notify_scsi(struct ispsoftc *isp, in_entry_t *inp)
1754{
1755 return (0); /* XXXX */
1756}
1757
1758static int
1759isp_handle_platform_notify_fc(struct ispsoftc *isp, in_fcentry_t *inp)
1760{
1761
1762 switch (inp->in_status) {
1763 case IN_PORT_LOGOUT:
1764 isp_prt(isp, ISP_LOGWARN, "port logout of iid %d",
1765 inp->in_iid);
1766 break;
1767 case IN_PORT_CHANGED:
1768 isp_prt(isp, ISP_LOGWARN, "port changed for iid %d",
1769 inp->in_iid);
1770 break;
1771 case IN_GLOBAL_LOGO:
1772 isp_prt(isp, ISP_LOGINFO, "all ports logged out");
1773 break;
1774 case IN_ABORT_TASK:
1775 {
1776 atio_private_data_t *atp = isp_get_atpd(isp, inp->in_seqid);
1777 struct ccb_immed_notify *inot = NULL;
1778
1779 if (atp) {
1780 tstate_t *tptr = get_lun_statep(isp, 0, atp->lun);
1781 if (tptr) {
1782 inot = (struct ccb_immed_notify *)
1783 SLIST_FIRST(&tptr->inots);
1784 if (inot) {
1785 SLIST_REMOVE_HEAD(&tptr->inots,
1786 sim_links.sle);
1787 }
1788 }
1789 isp_prt(isp, ISP_LOGWARN,
1790 "abort task RX_ID %x IID %d state %d",
1791 inp->in_seqid, inp->in_iid, atp->state);
1792 } else {
1793 isp_prt(isp, ISP_LOGWARN,
1794 "abort task RX_ID %x from iid %d, state unknown",
1795 inp->in_seqid, inp->in_iid);
1796 }
1797 if (inot) {
1798 inot->initiator_id = inp->in_iid;
1799 inot->sense_len = 0;
1800 inot->message_args[0] = MSG_ABORT_TAG;
1801 inot->message_args[1] = inp->in_seqid & 0xff;
1802 inot->message_args[2] = (inp->in_seqid >> 8) & 0xff;
1803 inot->ccb_h.status = CAM_MESSAGE_RECV|CAM_DEV_QFRZN;
1804 xpt_done((union ccb *)inot);
1805 }
1806 break;
1807 }
1808 default:
1809 break;
1810 }
1811 return (0);
1812}
1813#endif
1814
1815static void
1816isp_cam_async(void *cbarg, u_int32_t code, struct cam_path *path, void *arg)
1817{
1818 struct cam_sim *sim;
1819 struct ispsoftc *isp;
1820
1821 sim = (struct cam_sim *)cbarg;
1822 isp = (struct ispsoftc *) cam_sim_softc(sim);
1823 switch (code) {
1824 case AC_LOST_DEVICE:
1825 if (IS_SCSI(isp)) {
1826 u_int16_t oflags, nflags;
1827 sdparam *sdp = isp->isp_param;
1828 int tgt;
1829
1830 tgt = xpt_path_target_id(path);
1831 if (tgt >= 0) {
1832 sdp += cam_sim_bus(sim);
1833 ISP_LOCK(isp);
1834 nflags = sdp->isp_devparam[tgt].nvrm_flags;
1835#ifndef ISP_TARGET_MODE
1836 nflags &= DPARM_SAFE_DFLT;
1837 if (isp->isp_loaded_fw) {
1838 nflags |= DPARM_NARROW | DPARM_ASYNC;
1839 }
1840#else
1841 nflags = DPARM_DEFAULT;
1842#endif
1843 oflags = sdp->isp_devparam[tgt].goal_flags;
1844 sdp->isp_devparam[tgt].goal_flags = nflags;
1845 sdp->isp_devparam[tgt].dev_update = 1;
1846 isp->isp_update |= (1 << cam_sim_bus(sim));
1847 (void) isp_control(isp,
1848 ISPCTL_UPDATE_PARAMS, NULL);
1849 sdp->isp_devparam[tgt].goal_flags = oflags;
1850 ISP_UNLOCK(isp);
1851 }
1852 }
1853 break;
1854 default:
1855 isp_prt(isp, ISP_LOGWARN, "isp_cam_async: Code 0x%x", code);
1856 break;
1857 }
1858}
1859
1860static void
1861isp_poll(struct cam_sim *sim)
1862{
1863 struct ispsoftc *isp = cam_sim_softc(sim);
1864 u_int16_t isr, sema, mbox;
1865
1866 ISP_LOCK(isp);
1867 if (ISP_READ_ISR(isp, &isr, &sema, &mbox)) {
1868 isp_intr(isp, isr, sema, mbox);
1869 }
1870 ISP_UNLOCK(isp);
1871}
1872
1873
1874static void
1875isp_watchdog(void *arg)
1876{
1877 XS_T *xs = arg;
1878 struct ispsoftc *isp = XS_ISP(xs);
1879 u_int32_t handle;
1880 int iok;
1881
1882 /*
1883 * We've decided this command is dead. Make sure we're not trying
1884 * to kill a command that's already dead by getting it's handle and
1885 * and seeing whether it's still alive.
1886 */
1887 ISP_LOCK(isp);
1888 iok = isp->isp_osinfo.intsok;
1889 isp->isp_osinfo.intsok = 0;
1890 handle = isp_find_handle(isp, xs);
1891 if (handle) {
1892 u_int16_t isr, sema, mbox;
1893
1894 if (XS_CMD_DONE_P(xs)) {
1895 isp_prt(isp, ISP_LOGDEBUG1,
1896 "watchdog found done cmd (handle 0x%x)", handle);
1897 ISP_UNLOCK(isp);
1898 return;
1899 }
1900
1901 if (XS_CMD_WDOG_P(xs)) {
1902 isp_prt(isp, ISP_LOGDEBUG2,
1903 "recursive watchdog (handle 0x%x)", handle);
1904 ISP_UNLOCK(isp);
1905 return;
1906 }
1907
1908 XS_CMD_S_WDOG(xs);
1909 if (ISP_READ_ISR(isp, &isr, &sema, &mbox)) {
1910 isp_intr(isp, isr, sema, mbox);
1911 }
1912 if (XS_CMD_DONE_P(xs)) {
1913 isp_prt(isp, ISP_LOGDEBUG2,
1914 "watchdog cleanup for handle 0x%x", handle);
1915 xpt_done((union ccb *) xs);
1916 } else if (XS_CMD_GRACE_P(xs)) {
1917 /*
1918 * Make sure the command is *really* dead before we
1919 * release the handle (and DMA resources) for reuse.
1920 */
1921 (void) isp_control(isp, ISPCTL_ABORT_CMD, arg);
1922
1923 /*
1924 * After this point, the comamnd is really dead.
1925 */
1926 if (XS_XFRLEN(xs)) {
1927 ISP_DMAFREE(isp, xs, handle);
1928 }
1929 isp_destroy_handle(isp, handle);
1930 xpt_print_path(xs->ccb_h.path);
1931 isp_prt(isp, ISP_LOGWARN,
1932 "watchdog timeout for handle 0x%x", handle);
1933 XS_SETERR(xs, CAM_CMD_TIMEOUT);
1934 XS_CMD_C_WDOG(xs);
1935 isp_done(xs);
1936 } else {
1937 u_int16_t nxti, optr;
1938 ispreq_t local, *mp= &local, *qe;
1939
1940 XS_CMD_C_WDOG(xs);
1941 xs->ccb_h.timeout_ch = timeout(isp_watchdog, xs, hz);
1942 if (isp_getrqentry(isp, &nxti, &optr, (void **) &qe)) {
1943 ISP_UNLOCK(isp);
1944 return;
1945 }
1946 XS_CMD_S_GRACE(xs);
1947 MEMZERO((void *) mp, sizeof (*mp));
1948 mp->req_header.rqs_entry_count = 1;
1949 mp->req_header.rqs_entry_type = RQSTYPE_MARKER;
1950 mp->req_modifier = SYNC_ALL;
1951 mp->req_target = XS_CHANNEL(xs) << 7;
1952 isp_put_request(isp, mp, qe);
1953 ISP_ADD_REQUEST(isp, nxti);
1954 }
1955 } else {
1956 isp_prt(isp, ISP_LOGDEBUG2, "watchdog with no command");
1957 }
1958 isp->isp_osinfo.intsok = iok;
1959 ISP_UNLOCK(isp);
1960}
1961
1962static void
1963isp_kthread(void *arg)
1964{
1965 struct ispsoftc *isp = arg;
1966
1967#ifdef ISP_SMPLOCK
1968 mtx_lock(&isp->isp_lock);
1969#else
1970 mtx_lock(&Giant);
1971#endif
1972 /*
1973 * The first loop is for our usage where we have yet to have
1974 * gotten good fibre channel state.
1975 */
1976 for (;;) {
1977 int wasfrozen;
1978
1979 isp_prt(isp, ISP_LOGDEBUG0, "kthread: checking FC state");
1980 while (isp_fc_runstate(isp, 2 * 1000000) != 0) {
1981 isp_prt(isp, ISP_LOGDEBUG0, "kthread: FC state ungood");
1982 if (FCPARAM(isp)->isp_fwstate != FW_READY ||
1983 FCPARAM(isp)->isp_loopstate < LOOP_PDB_RCVD) {
1984 if (FCPARAM(isp)->loop_seen_once == 0 ||
1985 isp->isp_osinfo.ktmature == 0) {
1986 break;
1987 }
1988 }
1989#ifdef ISP_SMPLOCK
1990 msleep(isp_kthread, &isp->isp_lock,
1991 PRIBIO, "isp_fcthrd", hz);
1992#else
1993 (void) tsleep(isp_kthread, PRIBIO, "isp_fcthrd", hz);
1994#endif
1995 }
1996
1997 /*
1998 * Even if we didn't get good loop state we may be
1999 * unfreezing the SIMQ so that we can kill off
2000 * commands (if we've never seen loop before, for example).
2001 */
2002 isp->isp_osinfo.ktmature = 1;
2003 wasfrozen = isp->isp_osinfo.simqfrozen & SIMQFRZ_LOOPDOWN;
2004 isp->isp_osinfo.simqfrozen &= ~SIMQFRZ_LOOPDOWN;
2005 if (wasfrozen && isp->isp_osinfo.simqfrozen == 0) {
2006 isp_prt(isp, ISP_LOGDEBUG0, "kthread: releasing simq");
2007 ISPLOCK_2_CAMLOCK(isp);
2008 xpt_release_simq(isp->isp_sim, 1);
2009 CAMLOCK_2_ISPLOCK(isp);
2010 }
2011 isp_prt(isp, ISP_LOGDEBUG0, "kthread: waiting until called");
2012#ifdef ISP_SMPLOCK
2013 cv_wait(&isp->isp_osinfo.kthread_cv, &isp->isp_lock);
2014#else
2015 (void) tsleep(&isp->isp_osinfo.kthread_cv, PRIBIO, "fc_cv", 0);
2016#endif
2017 }
2018}
2019
2020static void
2021isp_action(struct cam_sim *sim, union ccb *ccb)
2022{
2023 int bus, tgt, error;
2024 struct ispsoftc *isp;
2025 struct ccb_trans_settings *cts;
2026
2027 CAM_DEBUG(ccb->ccb_h.path, CAM_DEBUG_TRACE, ("isp_action\n"));
2028
2029 isp = (struct ispsoftc *)cam_sim_softc(sim);
2030 ccb->ccb_h.sim_priv.entries[0].field = 0;
2031 ccb->ccb_h.sim_priv.entries[1].ptr = isp;
2032 if (isp->isp_state != ISP_RUNSTATE &&
2033 ccb->ccb_h.func_code == XPT_SCSI_IO) {
2034 CAMLOCK_2_ISPLOCK(isp);
2035 isp_init(isp);
2036 if (isp->isp_state != ISP_INITSTATE) {
2037 ISP_UNLOCK(isp);
2038 /*
2039 * Lie. Say it was a selection timeout.
2040 */
2041 ccb->ccb_h.status = CAM_SEL_TIMEOUT | CAM_DEV_QFRZN;
2042 xpt_freeze_devq(ccb->ccb_h.path, 1);
2043 xpt_done(ccb);
2044 return;
2045 }
2046 isp->isp_state = ISP_RUNSTATE;
2047 ISPLOCK_2_CAMLOCK(isp);
2048 }
2049 isp_prt(isp, ISP_LOGDEBUG2, "isp_action code %x", ccb->ccb_h.func_code);
2050
2051
2052 switch (ccb->ccb_h.func_code) {
2053 case XPT_SCSI_IO: /* Execute the requested I/O operation */
2054 /*
2055 * Do a couple of preliminary checks...
2056 */
2057 if ((ccb->ccb_h.flags & CAM_CDB_POINTER) != 0) {
2058 if ((ccb->ccb_h.flags & CAM_CDB_PHYS) != 0) {
2059 ccb->ccb_h.status = CAM_REQ_INVALID;
2060 xpt_done(ccb);
2061 break;
2062 }
2063 }
2064#ifdef DIAGNOSTIC
2065 if (ccb->ccb_h.target_id > (ISP_MAX_TARGETS(isp) - 1)) {
2066 ccb->ccb_h.status = CAM_PATH_INVALID;
2067 } else if (ccb->ccb_h.target_lun > (ISP_MAX_LUNS(isp) - 1)) {
2068 ccb->ccb_h.status = CAM_PATH_INVALID;
2069 }
2070 if (ccb->ccb_h.status == CAM_PATH_INVALID) {
2071 isp_prt(isp, ISP_LOGERR,
2072 "invalid tgt/lun (%d.%d) in XPT_SCSI_IO",
2073 ccb->ccb_h.target_id, ccb->ccb_h.target_lun);
2074 xpt_done(ccb);
2075 break;
2076 }
2077#endif
2078 ((struct ccb_scsiio *) ccb)->scsi_status = SCSI_STATUS_OK;
2079 CAMLOCK_2_ISPLOCK(isp);
2080 error = isp_start((XS_T *) ccb);
2081 switch (error) {
2082 case CMD_QUEUED:
2083 ccb->ccb_h.status |= CAM_SIM_QUEUED;
2084 if (ccb->ccb_h.timeout != CAM_TIME_INFINITY) {
2085 u_int64_t ticks = (u_int64_t) hz;
2086 if (ccb->ccb_h.timeout == CAM_TIME_DEFAULT)
2087 ticks = 60 * 1000 * ticks;
2088 else
2089 ticks = ccb->ccb_h.timeout * hz;
2090 ticks = ((ticks + 999) / 1000) + hz + hz;
2091 if (ticks >= 0x80000000) {
2092 isp_prt(isp, ISP_LOGERR,
2093 "timeout overflow");
2094 ticks = 0x7fffffff;
2095 }
2096 ccb->ccb_h.timeout_ch = timeout(isp_watchdog,
2097 (caddr_t)ccb, (int)ticks);
2098 } else {
2099 callout_handle_init(&ccb->ccb_h.timeout_ch);
2100 }
2101 ISPLOCK_2_CAMLOCK(isp);
2102 break;
2103 case CMD_RQLATER:
2104 /*
2105 * This can only happen for Fibre Channel
2106 */
2107 KASSERT((IS_FC(isp)), ("CMD_RQLATER for FC only"));
2108 if (FCPARAM(isp)->loop_seen_once == 0 &&
2109 isp->isp_osinfo.ktmature) {
2110 ISPLOCK_2_CAMLOCK(isp);
2111 XS_SETERR(ccb, CAM_SEL_TIMEOUT);
2112 xpt_done(ccb);
2113 break;
2114 }
2115#ifdef ISP_SMPLOCK
2116 cv_signal(&isp->isp_osinfo.kthread_cv);
2117#else
2118 wakeup(&isp->isp_osinfo.kthread_cv);
2119#endif
2120 isp_freeze_loopdown(isp, "isp_action(RQLATER)");
2121 XS_SETERR(ccb, CAM_REQUEUE_REQ);
2122 ISPLOCK_2_CAMLOCK(isp);
2123 xpt_done(ccb);
2124 break;
2125 case CMD_EAGAIN:
2126 XS_SETERR(ccb, CAM_REQUEUE_REQ);
2127 ISPLOCK_2_CAMLOCK(isp);
2128 xpt_done(ccb);
2129 break;
2130 case CMD_COMPLETE:
2131 isp_done((struct ccb_scsiio *) ccb);
2132 ISPLOCK_2_CAMLOCK(isp);
2133 break;
2134 default:
2135 isp_prt(isp, ISP_LOGERR,
2136 "What's this? 0x%x at %d in file %s",
2137 error, __LINE__, __FILE__);
2138 XS_SETERR(ccb, CAM_REQ_CMP_ERR);
2139 xpt_done(ccb);
2140 ISPLOCK_2_CAMLOCK(isp);
2141 }
2142 break;
2143
2144#ifdef ISP_TARGET_MODE
2145 case XPT_EN_LUN: /* Enable LUN as a target */
2146 {
2147 int iok;
2148 CAMLOCK_2_ISPLOCK(isp);
2149 iok = isp->isp_osinfo.intsok;
2150 isp->isp_osinfo.intsok = 0;
2151 isp_en_lun(isp, ccb);
2152 isp->isp_osinfo.intsok = iok;
2153 ISPLOCK_2_CAMLOCK(isp);
2154 xpt_done(ccb);
2155 break;
2156 }
2157 case XPT_NOTIFY_ACK: /* recycle notify ack */
2158 case XPT_IMMED_NOTIFY: /* Add Immediate Notify Resource */
2159 case XPT_ACCEPT_TARGET_IO: /* Add Accept Target IO Resource */
2160 {
2161 tstate_t *tptr =
2162 get_lun_statep(isp, XS_CHANNEL(ccb), ccb->ccb_h.target_lun);
2163 if (tptr == NULL) {
2164 ccb->ccb_h.status = CAM_LUN_INVALID;
2165 xpt_done(ccb);
2166 break;
2167 }
2168 ccb->ccb_h.sim_priv.entries[0].field = 0;
2169 ccb->ccb_h.sim_priv.entries[1].ptr = isp;
2170 ccb->ccb_h.flags = 0;
2171
2172 CAMLOCK_2_ISPLOCK(isp);
2173 if (ccb->ccb_h.func_code == XPT_ACCEPT_TARGET_IO) {
2174 /*
2175 * Note that the command itself may not be done-
2176 * it may not even have had the first CTIO sent.
2177 */
2178 tptr->atio_count++;
2179 isp_prt(isp, ISP_LOGTDEBUG0,
2180 "Put FREE ATIO2, lun %d, count now %d",
2181 ccb->ccb_h.target_lun, tptr->atio_count);
2182 SLIST_INSERT_HEAD(&tptr->atios, &ccb->ccb_h,
2183 sim_links.sle);
2184 } else if (ccb->ccb_h.func_code == XPT_IMMED_NOTIFY) {
2185 SLIST_INSERT_HEAD(&tptr->inots, &ccb->ccb_h,
2186 sim_links.sle);
2187 } else {
2188 ;
2189 }
2190 rls_lun_statep(isp, tptr);
2191 ccb->ccb_h.status = CAM_REQ_INPROG;
2192 ISPLOCK_2_CAMLOCK(isp);
2193 break;
2194 }
2195 case XPT_CONT_TARGET_IO:
2196 {
2197 CAMLOCK_2_ISPLOCK(isp);
2198 ccb->ccb_h.status = isp_target_start_ctio(isp, ccb);
2199 if (ccb->ccb_h.status != CAM_REQ_INPROG) {
2200 isp_prt(isp, ISP_LOGWARN,
2201 "XPT_CONT_TARGET_IO: status 0x%x",
2202 ccb->ccb_h.status);
2203 XS_SETERR(ccb, CAM_REQUEUE_REQ);
2204 ISPLOCK_2_CAMLOCK(isp);
2205 xpt_done(ccb);
2206 } else {
2207 ISPLOCK_2_CAMLOCK(isp);
2208 ccb->ccb_h.status |= CAM_SIM_QUEUED;
2209 }
2210 break;
2211 }
2212#endif
2213 case XPT_RESET_DEV: /* BDR the specified SCSI device */
2214
2215 bus = cam_sim_bus(xpt_path_sim(ccb->ccb_h.path));
2216 tgt = ccb->ccb_h.target_id;
2217 tgt |= (bus << 16);
2218
2219 CAMLOCK_2_ISPLOCK(isp);
2220 error = isp_control(isp, ISPCTL_RESET_DEV, &tgt);
2221 ISPLOCK_2_CAMLOCK(isp);
2222 if (error) {
2223 ccb->ccb_h.status = CAM_REQ_CMP_ERR;
2224 } else {
2225 ccb->ccb_h.status = CAM_REQ_CMP;
2226 }
2227 xpt_done(ccb);
2228 break;
2229 case XPT_ABORT: /* Abort the specified CCB */
2230 {
2231 union ccb *accb = ccb->cab.abort_ccb;
2232 CAMLOCK_2_ISPLOCK(isp);
2233 switch (accb->ccb_h.func_code) {
2234#ifdef ISP_TARGET_MODE
2235 case XPT_ACCEPT_TARGET_IO:
2236 case XPT_IMMED_NOTIFY:
2237 ccb->ccb_h.status = isp_abort_tgt_ccb(isp, ccb);
2238 break;
2239 case XPT_CONT_TARGET_IO:
2240 isp_prt(isp, ISP_LOGERR, "cannot abort CTIOs yet");
2241 ccb->ccb_h.status = CAM_UA_ABORT;
2242 break;
2243#endif
2244 case XPT_SCSI_IO:
2245 error = isp_control(isp, ISPCTL_ABORT_CMD, ccb);
2246 if (error) {
2247 ccb->ccb_h.status = CAM_UA_ABORT;
2248 } else {
2249 ccb->ccb_h.status = CAM_REQ_CMP;
2250 }
2251 break;
2252 default:
2253 ccb->ccb_h.status = CAM_REQ_INVALID;
2254 break;
2255 }
2256 ISPLOCK_2_CAMLOCK(isp);
2257 xpt_done(ccb);
2258 break;
2259 }
2260#ifdef CAM_NEW_TRAN_CODE
2261#define IS_CURRENT_SETTINGS(c) (c->type == CTS_TYPE_CURRENT_SETTINGS)
2262#else
2263#define IS_CURRENT_SETTINGS(c) (c->flags & CCB_TRANS_CURRENT_SETTINGS)
2264#endif
2265 case XPT_SET_TRAN_SETTINGS: /* Nexus Settings */
2266 cts = &ccb->cts;
2267 if (!IS_CURRENT_SETTINGS(cts)) {
2268 ccb->ccb_h.status = CAM_REQ_INVALID;
2269 xpt_done(ccb);
2270 break;
2271 }
2272 tgt = cts->ccb_h.target_id;
2273 CAMLOCK_2_ISPLOCK(isp);
2274 if (IS_SCSI(isp)) {
2275#ifndef CAM_NEW_TRAN_CODE
2276 sdparam *sdp = isp->isp_param;
2277 u_int16_t *dptr;
2278
2279 bus = cam_sim_bus(xpt_path_sim(cts->ccb_h.path));
2280
2281 sdp += bus;
2282 /*
2283 * We always update (internally) from goal_flags
2284 * so any request to change settings just gets
2285 * vectored to that location.
2286 */
2287 dptr = &sdp->isp_devparam[tgt].goal_flags;
2288
2289 /*
2290 * Note that these operations affect the
2291 * the goal flags (goal_flags)- not
2292 * the current state flags. Then we mark
2293 * things so that the next operation to
2294 * this HBA will cause the update to occur.
2295 */
2296 if (cts->valid & CCB_TRANS_DISC_VALID) {
2297 if ((cts->flags & CCB_TRANS_DISC_ENB) != 0) {
2298 *dptr |= DPARM_DISC;
2299 } else {
2300 *dptr &= ~DPARM_DISC;
2301 }
2302 }
2303 if (cts->valid & CCB_TRANS_TQ_VALID) {
2304 if ((cts->flags & CCB_TRANS_TAG_ENB) != 0) {
2305 *dptr |= DPARM_TQING;
2306 } else {
2307 *dptr &= ~DPARM_TQING;
2308 }
2309 }
2310 if (cts->valid & CCB_TRANS_BUS_WIDTH_VALID) {
2311 switch (cts->bus_width) {
2312 case MSG_EXT_WDTR_BUS_16_BIT:
2313 *dptr |= DPARM_WIDE;
2314 break;
2315 default:
2316 *dptr &= ~DPARM_WIDE;
2317 }
2318 }
2319 /*
2320 * Any SYNC RATE of nonzero and SYNC_OFFSET
2321 * of nonzero will cause us to go to the
2322 * selected (from NVRAM) maximum value for
2323 * this device. At a later point, we'll
2324 * allow finer control.
2325 */
2326 if ((cts->valid & CCB_TRANS_SYNC_RATE_VALID) &&
2327 (cts->valid & CCB_TRANS_SYNC_OFFSET_VALID) &&
2328 (cts->sync_offset > 0)) {
2329 *dptr |= DPARM_SYNC;
2330 } else {
2331 *dptr &= ~DPARM_SYNC;
2332 }
2333 *dptr |= DPARM_SAFE_DFLT;
2334#else
2335 struct ccb_trans_settings_scsi *scsi =
2336 &cts->proto_specific.scsi;
2337 struct ccb_trans_settings_spi *spi =
2338 &cts->xport_specific.spi;
2339 sdparam *sdp = isp->isp_param;
2340 u_int16_t *dptr;
2341
2342 bus = cam_sim_bus(xpt_path_sim(cts->ccb_h.path));
2343 sdp += bus;
2344 /*
2345 * We always update (internally) from goal_flags
2346 * so any request to change settings just gets
2347 * vectored to that location.
2348 */
2349 dptr = &sdp->isp_devparam[tgt].goal_flags;
2350
2351 if ((spi->valid & CTS_SPI_VALID_DISC) != 0) {
2352 if ((spi->flags & CTS_SPI_FLAGS_DISC_ENB) != 0)
2353 *dptr |= DPARM_DISC;
2354 else
2355 *dptr &= ~DPARM_DISC;
2356 }
2357
2358 if ((scsi->valid & CTS_SCSI_VALID_TQ) != 0) {
2359 if ((scsi->flags & CTS_SCSI_FLAGS_TAG_ENB) != 0)
2360 *dptr |= DPARM_TQING;
2361 else
2362 *dptr &= ~DPARM_TQING;
2363 }
2364
2365 if ((spi->valid & CTS_SPI_VALID_BUS_WIDTH) != 0) {
2366 if (spi->bus_width == MSG_EXT_WDTR_BUS_16_BIT)
2367 *dptr |= DPARM_WIDE;
2368 else
2369 *dptr &= ~DPARM_WIDE;
2370 }
2371
2372 /*
2373 * XXX: FIX ME
2374 */
2375 if ((spi->valid & CTS_SPI_VALID_SYNC_OFFSET) &&
2376 (spi->valid & CTS_SPI_VALID_SYNC_RATE) &&
2377 (spi->sync_period && spi->sync_offset)) {
2378 *dptr |= DPARM_SYNC;
2379 /*
2380 * XXX: CHECK FOR LEGALITY
2381 */
2382 sdp->isp_devparam[tgt].goal_period =
2383 spi->sync_period;
2384 sdp->isp_devparam[tgt].goal_offset =
2385 spi->sync_offset;
2386 } else {
2387 *dptr &= ~DPARM_SYNC;
2388 }
2389#endif
2390 isp_prt(isp, ISP_LOGDEBUG0,
2391 "SET bus %d targ %d to flags %x off %x per %x",
2392 bus, tgt, sdp->isp_devparam[tgt].goal_flags,
2393 sdp->isp_devparam[tgt].goal_offset,
2394 sdp->isp_devparam[tgt].goal_period);
2395 sdp->isp_devparam[tgt].dev_update = 1;
2396 isp->isp_update |= (1 << bus);
2397 }
2398 ISPLOCK_2_CAMLOCK(isp);
2399 ccb->ccb_h.status = CAM_REQ_CMP;
2400 xpt_done(ccb);
2401 break;
2402 case XPT_GET_TRAN_SETTINGS:
2403 cts = &ccb->cts;
2404 tgt = cts->ccb_h.target_id;
2405 CAMLOCK_2_ISPLOCK(isp);
2406 if (IS_FC(isp)) {
2407#ifndef CAM_NEW_TRAN_CODE
2408 /*
2409 * a lot of normal SCSI things don't make sense.
2410 */
2411 cts->flags = CCB_TRANS_TAG_ENB | CCB_TRANS_DISC_ENB;
2412 cts->valid = CCB_TRANS_DISC_VALID | CCB_TRANS_TQ_VALID;
2413 /*
2414 * How do you measure the width of a high
2415 * speed serial bus? Well, in bytes.
2416 *
2417 * Offset and period make no sense, though, so we set
2418 * (above) a 'base' transfer speed to be gigabit.
2419 */
2420 cts->bus_width = MSG_EXT_WDTR_BUS_8_BIT;
2421#else
2422 fcparam *fcp = isp->isp_param;
2423 struct ccb_trans_settings_fc *fc =
2424 &cts->xport_specific.fc;
2425
2426 cts->protocol = PROTO_SCSI;
2427 cts->protocol_version = SCSI_REV_2;
2428 cts->transport = XPORT_FC;
2429 cts->transport_version = 0;
2430
2431 fc->valid = CTS_FC_VALID_SPEED;
2432 if (fcp->isp_gbspeed == 2)
2433 fc->bitrate = 200000;
2434 else
2435 fc->bitrate = 100000;
2436 if (tgt > 0 && tgt < MAX_FC_TARG) {
2437 struct lportdb *lp = &fcp->portdb[tgt];
2438 fc->wwnn = lp->node_wwn;
2439 fc->wwpn = lp->port_wwn;
2440 fc->port = lp->portid;
2441 fc->valid |= CTS_FC_VALID_WWNN |
2442 CTS_FC_VALID_WWPN | CTS_FC_VALID_PORT;
2443 }
2444#endif
2445 } else {
2446#ifdef CAM_NEW_TRAN_CODE
2447 struct ccb_trans_settings_scsi *scsi =
2448 &cts->proto_specific.scsi;
2449 struct ccb_trans_settings_spi *spi =
2450 &cts->xport_specific.spi;
2451#endif
2452 sdparam *sdp = isp->isp_param;
2453 int bus = cam_sim_bus(xpt_path_sim(cts->ccb_h.path));
2454 u_int16_t dval, pval, oval;
2455
2456 sdp += bus;
2457
2458 if (IS_CURRENT_SETTINGS(cts)) {
2459 sdp->isp_devparam[tgt].dev_refresh = 1;
2460 isp->isp_update |= (1 << bus);
2461 (void) isp_control(isp, ISPCTL_UPDATE_PARAMS,
2462 NULL);
2463 dval = sdp->isp_devparam[tgt].actv_flags;
2464 oval = sdp->isp_devparam[tgt].actv_offset;
2465 pval = sdp->isp_devparam[tgt].actv_period;
2466 } else {
2467 dval = sdp->isp_devparam[tgt].nvrm_flags;
2468 oval = sdp->isp_devparam[tgt].nvrm_offset;
2469 pval = sdp->isp_devparam[tgt].nvrm_period;
2470 }
2471
2472#ifndef CAM_NEW_TRAN_CODE
2473 cts->flags &= ~(CCB_TRANS_DISC_ENB|CCB_TRANS_TAG_ENB);
2474
2475 if (dval & DPARM_DISC) {
2476 cts->flags |= CCB_TRANS_DISC_ENB;
2477 }
2478 if (dval & DPARM_TQING) {
2479 cts->flags |= CCB_TRANS_TAG_ENB;
2480 }
2481 if (dval & DPARM_WIDE) {
2482 cts->bus_width = MSG_EXT_WDTR_BUS_16_BIT;
2483 } else {
2484 cts->bus_width = MSG_EXT_WDTR_BUS_8_BIT;
2485 }
2486 cts->valid = CCB_TRANS_BUS_WIDTH_VALID |
2487 CCB_TRANS_DISC_VALID | CCB_TRANS_TQ_VALID;
2488
2489 if ((dval & DPARM_SYNC) && oval != 0) {
2490 cts->sync_period = pval;
2491 cts->sync_offset = oval;
2492 cts->valid |=
2493 CCB_TRANS_SYNC_RATE_VALID |
2494 CCB_TRANS_SYNC_OFFSET_VALID;
2495 }
2496#else
2497 cts->protocol = PROTO_SCSI;
2498 cts->protocol_version = SCSI_REV_2;
2499 cts->transport = XPORT_SPI;
2500 cts->transport_version = 2;
2501
2502 scsi->flags &= ~CTS_SCSI_FLAGS_TAG_ENB;
2503 spi->flags &= ~CTS_SPI_FLAGS_DISC_ENB;
2504 if (dval & DPARM_DISC) {
2505 spi->flags |= CTS_SPI_FLAGS_DISC_ENB;
2506 }
2507 if (dval & DPARM_TQING) {
2508 scsi->flags |= CTS_SCSI_FLAGS_TAG_ENB;
2509 }
2510 if ((dval & DPARM_SYNC) && oval && pval) {
2511 spi->sync_offset = oval;
2512 spi->sync_period = pval;
2513 spi->valid |= CTS_SPI_VALID_SYNC_OFFSET;
2514 spi->valid |= CTS_SPI_VALID_SYNC_RATE;
2515 }
2516 spi->valid |= CTS_SPI_VALID_BUS_WIDTH;
2517 if (dval & DPARM_WIDE) {
2518 spi->bus_width = MSG_EXT_WDTR_BUS_16_BIT;
2519 } else {
2520 spi->bus_width = MSG_EXT_WDTR_BUS_8_BIT;
2521 }
2522 if (cts->ccb_h.target_lun != CAM_LUN_WILDCARD) {
2523 scsi->valid = CTS_SCSI_VALID_TQ;
2524 spi->valid |= CTS_SPI_VALID_DISC;
2525 } else {
2526 scsi->valid = 0;
2527 }
2528#endif
2529 isp_prt(isp, ISP_LOGDEBUG0,
2530 "GET %s bus %d targ %d to flags %x off %x per %x",
2531 IS_CURRENT_SETTINGS(cts)? "ACTIVE" : "NVRAM",
2532 bus, tgt, dval, oval, pval);
2533 }
2534 ISPLOCK_2_CAMLOCK(isp);
2535 ccb->ccb_h.status = CAM_REQ_CMP;
2536 xpt_done(ccb);
2537 break;
2538
2539 case XPT_CALC_GEOMETRY:
2540 {
2541 struct ccb_calc_geometry *ccg;
2542 u_int32_t secs_per_cylinder;
2543 u_int32_t size_mb;
2544
2545 ccg = &ccb->ccg;
2546 if (ccg->block_size == 0) {
2547 isp_prt(isp, ISP_LOGERR,
2548 "%d.%d XPT_CALC_GEOMETRY block size 0?",
2549 ccg->ccb_h.target_id, ccg->ccb_h.target_lun);
2550 ccb->ccb_h.status = CAM_REQ_INVALID;
2551 xpt_done(ccb);
2552 break;
2553 }
2554 size_mb = ccg->volume_size /((1024L * 1024L) / ccg->block_size);
2555 if (size_mb > 1024) {
2556 ccg->heads = 255;
2557 ccg->secs_per_track = 63;
2558 } else {
2559 ccg->heads = 64;
2560 ccg->secs_per_track = 32;
2561 }
2562 secs_per_cylinder = ccg->heads * ccg->secs_per_track;
2563 ccg->cylinders = ccg->volume_size / secs_per_cylinder;
2564 ccb->ccb_h.status = CAM_REQ_CMP;
2565 xpt_done(ccb);
2566 break;
2567 }
2568 case XPT_RESET_BUS: /* Reset the specified bus */
2569 bus = cam_sim_bus(sim);
2570 CAMLOCK_2_ISPLOCK(isp);
2571 error = isp_control(isp, ISPCTL_RESET_BUS, &bus);
2572 ISPLOCK_2_CAMLOCK(isp);
2573 if (error)
2574 ccb->ccb_h.status = CAM_REQ_CMP_ERR;
2575 else {
2576 if (cam_sim_bus(sim) && isp->isp_path2 != NULL)
2577 xpt_async(AC_BUS_RESET, isp->isp_path2, NULL);
2578 else if (isp->isp_path != NULL)
2579 xpt_async(AC_BUS_RESET, isp->isp_path, NULL);
2580 ccb->ccb_h.status = CAM_REQ_CMP;
2581 }
2582 xpt_done(ccb);
2583 break;
2584
2585 case XPT_TERM_IO: /* Terminate the I/O process */
2586 ccb->ccb_h.status = CAM_REQ_INVALID;
2587 xpt_done(ccb);
2588 break;
2589
2590 case XPT_PATH_INQ: /* Path routing inquiry */
2591 {
2592 struct ccb_pathinq *cpi = &ccb->cpi;
2593
2594 cpi->version_num = 1;
2595#ifdef ISP_TARGET_MODE
2596 cpi->target_sprt = PIT_PROCESSOR | PIT_DISCONNECT | PIT_TERM_IO;
2597#else
2598 cpi->target_sprt = 0;
2599#endif
2600 cpi->hba_eng_cnt = 0;
2601 cpi->max_target = ISP_MAX_TARGETS(isp) - 1;
2602 cpi->max_lun = ISP_MAX_LUNS(isp) - 1;
2603 cpi->bus_id = cam_sim_bus(sim);
2604 if (IS_FC(isp)) {
2605 cpi->hba_misc = PIM_NOBUSRESET;
2606 /*
2607 * Because our loop ID can shift from time to time,
2608 * make our initiator ID out of range of our bus.
2609 */
2610 cpi->initiator_id = cpi->max_target + 1;
2611
2612 /*
2613 * Set base transfer capabilities for Fibre Channel.
2614 * Technically not correct because we don't know
2615 * what media we're running on top of- but we'll
2616 * look good if we always say 100MB/s.
2617 */
2618 if (FCPARAM(isp)->isp_gbspeed == 2)
2619 cpi->base_transfer_speed = 200000;
2620 else
2621 cpi->base_transfer_speed = 100000;
2622 cpi->hba_inquiry = PI_TAG_ABLE;
2623#ifdef CAM_NEW_TRAN_CODE
2624 cpi->transport = XPORT_FC;
2625 cpi->transport_version = 0; /* WHAT'S THIS FOR? */
2626#endif
2627 } else {
2628 sdparam *sdp = isp->isp_param;
2629 sdp += cam_sim_bus(xpt_path_sim(cpi->ccb_h.path));
2630 cpi->hba_inquiry = PI_SDTR_ABLE|PI_TAG_ABLE|PI_WIDE_16;
2631 cpi->hba_misc = 0;
2632 cpi->initiator_id = sdp->isp_initiator_id;
2633 cpi->base_transfer_speed = 3300;
2634#ifdef CAM_NEW_TRAN_CODE
2635 cpi->transport = XPORT_SPI;
2636 cpi->transport_version = 2; /* WHAT'S THIS FOR? */
2637#endif
2638 }
2639#ifdef CAM_NEW_TRAN_CODE
2640 cpi->protocol = PROTO_SCSI;
2641 cpi->protocol_version = SCSI_REV_2;
2642#endif
2643 strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
2644 strncpy(cpi->hba_vid, "Qlogic", HBA_IDLEN);
2645 strncpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN);
2646 cpi->unit_number = cam_sim_unit(sim);
2647 cpi->ccb_h.status = CAM_REQ_CMP;
2648 xpt_done(ccb);
2649 break;
2650 }
2651 default:
2652 ccb->ccb_h.status = CAM_REQ_INVALID;
2653 xpt_done(ccb);
2654 break;
2655 }
2656}
2657
2658#define ISPDDB (CAM_DEBUG_INFO|CAM_DEBUG_TRACE|CAM_DEBUG_CDB)
2659void
2660isp_done(struct ccb_scsiio *sccb)
2661{
2662 struct ispsoftc *isp = XS_ISP(sccb);
2663
2664 if (XS_NOERR(sccb))
2665 XS_SETERR(sccb, CAM_REQ_CMP);
2666
2667 if ((sccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP &&
2668 (sccb->scsi_status != SCSI_STATUS_OK)) {
2669 sccb->ccb_h.status &= ~CAM_STATUS_MASK;
2670 if ((sccb->scsi_status == SCSI_STATUS_CHECK_COND) &&
2671 (sccb->ccb_h.status & CAM_AUTOSNS_VALID) == 0) {
2672 sccb->ccb_h.status |= CAM_AUTOSENSE_FAIL;
2673 } else {
2674 sccb->ccb_h.status |= CAM_SCSI_STATUS_ERROR;
2675 }
2676 }
2677
2678 sccb->ccb_h.status &= ~CAM_SIM_QUEUED;
2679 if ((sccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
2680 if ((sccb->ccb_h.status & CAM_DEV_QFRZN) == 0) {
2681 sccb->ccb_h.status |= CAM_DEV_QFRZN;
2682 xpt_freeze_devq(sccb->ccb_h.path, 1);
2683 isp_prt(isp, ISP_LOGDEBUG0,
2684 "freeze devq %d.%d cam sts %x scsi sts %x",
2685 sccb->ccb_h.target_id, sccb->ccb_h.target_lun,
2686 sccb->ccb_h.status, sccb->scsi_status);
2687 }
2688 }
2689
2690 if ((CAM_DEBUGGED(sccb->ccb_h.path, ISPDDB)) &&
2691 (sccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
2692 xpt_print_path(sccb->ccb_h.path);
2693 isp_prt(isp, ISP_LOGINFO,
2694 "cam completion status 0x%x", sccb->ccb_h.status);
2695 }
2696
2697 XS_CMD_S_DONE(sccb);
2698 if (XS_CMD_WDOG_P(sccb) == 0) {
2699 untimeout(isp_watchdog, (caddr_t)sccb, sccb->ccb_h.timeout_ch);
2700 if (XS_CMD_GRACE_P(sccb)) {
2701 isp_prt(isp, ISP_LOGDEBUG2,
2702 "finished command on borrowed time");
2703 }
2704 XS_CMD_S_CLEAR(sccb);
2705 ISPLOCK_2_CAMLOCK(isp);
2706 xpt_done((union ccb *) sccb);
2707 CAMLOCK_2_ISPLOCK(isp);
2708 }
2709}
2710
2711int
2712isp_async(struct ispsoftc *isp, ispasync_t cmd, void *arg)
2713{
2714 int bus, rv = 0;
2715 switch (cmd) {
2716 case ISPASYNC_NEW_TGT_PARAMS:
2717 {
2718#ifdef CAM_NEW_TRAN_CODE
2719 struct ccb_trans_settings_scsi *scsi;
2720 struct ccb_trans_settings_spi *spi;
2721#endif
2722 int flags, tgt;
2723 sdparam *sdp = isp->isp_param;
2724 struct ccb_trans_settings cts;
2725 struct cam_path *tmppath;
2726
2727 bzero(&cts, sizeof (struct ccb_trans_settings));
2728
2729 tgt = *((int *)arg);
2730 bus = (tgt >> 16) & 0xffff;
2731 tgt &= 0xffff;
2732 sdp += bus;
2733 ISPLOCK_2_CAMLOCK(isp);
2734 if (xpt_create_path(&tmppath, NULL,
2735 cam_sim_path(bus? isp->isp_sim2 : isp->isp_sim),
2736 tgt, CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
2737 CAMLOCK_2_ISPLOCK(isp);
2738 isp_prt(isp, ISP_LOGWARN,
2739 "isp_async cannot make temp path for %d.%d",
2740 tgt, bus);
2741 rv = -1;
2742 break;
2743 }
2744 CAMLOCK_2_ISPLOCK(isp);
2745 flags = sdp->isp_devparam[tgt].actv_flags;
2746#ifdef CAM_NEW_TRAN_CODE
2747 cts.type = CTS_TYPE_CURRENT_SETTINGS;
2748 cts.protocol = PROTO_SCSI;
2749 cts.transport = XPORT_SPI;
2750
2751 scsi = &cts.proto_specific.scsi;
2752 spi = &cts.xport_specific.spi;
2753
2754 if (flags & DPARM_TQING) {
2755 scsi->valid |= CTS_SCSI_VALID_TQ;
2756 scsi->flags |= CTS_SCSI_FLAGS_TAG_ENB;
2757 spi->flags |= CTS_SPI_FLAGS_TAG_ENB;
2758 }
2759
2760 if (flags & DPARM_DISC) {
2761 spi->valid |= CTS_SPI_VALID_DISC;
2762 spi->flags |= CTS_SPI_FLAGS_DISC_ENB;
2763 }
2764 spi->flags |= CTS_SPI_VALID_BUS_WIDTH;
2765 if (flags & DPARM_WIDE) {
2766 spi->bus_width = MSG_EXT_WDTR_BUS_16_BIT;
2767 } else {
2768 spi->bus_width = MSG_EXT_WDTR_BUS_8_BIT;
2769 }
2770 if (flags & DPARM_SYNC) {
2771 spi->valid |= CTS_SPI_VALID_SYNC_RATE;
2772 spi->valid |= CTS_SPI_VALID_SYNC_OFFSET;
2773 spi->sync_period = sdp->isp_devparam[tgt].actv_period;
2774 spi->sync_offset = sdp->isp_devparam[tgt].actv_offset;
2775 }
2776#else
2777 cts.flags = CCB_TRANS_CURRENT_SETTINGS;
2778 cts.valid = CCB_TRANS_DISC_VALID | CCB_TRANS_TQ_VALID;
2779 if (flags & DPARM_DISC) {
2780 cts.flags |= CCB_TRANS_DISC_ENB;
2781 }
2782 if (flags & DPARM_TQING) {
2783 cts.flags |= CCB_TRANS_TAG_ENB;
2784 }
2785 cts.valid |= CCB_TRANS_BUS_WIDTH_VALID;
2786 cts.bus_width = (flags & DPARM_WIDE)?
2787 MSG_EXT_WDTR_BUS_8_BIT : MSG_EXT_WDTR_BUS_16_BIT;
2788 cts.sync_period = sdp->isp_devparam[tgt].actv_period;
2789 cts.sync_offset = sdp->isp_devparam[tgt].actv_offset;
2790 if (flags & DPARM_SYNC) {
2791 cts.valid |=
2792 CCB_TRANS_SYNC_RATE_VALID |
2793 CCB_TRANS_SYNC_OFFSET_VALID;
2794 }
2795#endif
2796 isp_prt(isp, ISP_LOGDEBUG2,
2797 "NEW_TGT_PARAMS bus %d tgt %d period %x offset %x flags %x",
2798 bus, tgt, sdp->isp_devparam[tgt].actv_period,
2799 sdp->isp_devparam[tgt].actv_offset, flags);
2800 xpt_setup_ccb(&cts.ccb_h, tmppath, 1);
2801 ISPLOCK_2_CAMLOCK(isp);
2802 xpt_async(AC_TRANSFER_NEG, tmppath, &cts);
2803 xpt_free_path(tmppath);
2804 CAMLOCK_2_ISPLOCK(isp);
2805 break;
2806 }
2807 case ISPASYNC_BUS_RESET:
2808 bus = *((int *)arg);
2809 isp_prt(isp, ISP_LOGINFO, "SCSI bus reset on bus %d detected",
2810 bus);
2811 if (bus > 0 && isp->isp_path2) {
2812 ISPLOCK_2_CAMLOCK(isp);
2813 xpt_async(AC_BUS_RESET, isp->isp_path2, NULL);
2814 CAMLOCK_2_ISPLOCK(isp);
2815 } else if (isp->isp_path) {
2816 ISPLOCK_2_CAMLOCK(isp);
2817 xpt_async(AC_BUS_RESET, isp->isp_path, NULL);
2818 CAMLOCK_2_ISPLOCK(isp);
2819 }
2820 break;
2821 case ISPASYNC_LIP:
2822 if (isp->isp_path) {
2823 isp_freeze_loopdown(isp, "ISPASYNC_LIP");
2824 }
2825 isp_prt(isp, ISP_LOGINFO, "LIP Received");
2826 break;
2827 case ISPASYNC_LOOP_RESET:
2828 if (isp->isp_path) {
2829 isp_freeze_loopdown(isp, "ISPASYNC_LOOP_RESET");
2830 }
2831 isp_prt(isp, ISP_LOGINFO, "Loop Reset Received");
2832 break;
2833 case ISPASYNC_LOOP_DOWN:
2834 if (isp->isp_path) {
2835 isp_freeze_loopdown(isp, "ISPASYNC_LOOP_DOWN");
2836 }
2837 isp_prt(isp, ISP_LOGINFO, "Loop DOWN");
2838 break;
2839 case ISPASYNC_LOOP_UP:
2840 /*
2841 * Now we just note that Loop has come up. We don't
2842 * actually do anything because we're waiting for a
2843 * Change Notify before activating the FC cleanup
2844 * thread to look at the state of the loop again.
2845 */
2846 isp_prt(isp, ISP_LOGINFO, "Loop UP");
2847 break;
2848 case ISPASYNC_PROMENADE:
2849 {
2850 struct cam_path *tmppath;
2851 const char *fmt = "Target %d (Loop 0x%x) Port ID 0x%x "
2852 "(role %s) %s\n Port WWN 0x%08x%08x\n Node WWN 0x%08x%08x";
2853 static const char *roles[4] = {
2854 "(none)", "Target", "Initiator", "Target/Initiator"
2855 };
2856 fcparam *fcp = isp->isp_param;
2857 int tgt = *((int *) arg);
2858 int is_tgt_mask = (SVC3_TGT_ROLE >> SVC3_ROLE_SHIFT);
2859 struct lportdb *lp = &fcp->portdb[tgt];
2860
2861 isp_prt(isp, ISP_LOGINFO, fmt, tgt, lp->loopid, lp->portid,
2862 roles[lp->roles & 0x3],
2863 (lp->valid)? "Arrived" : "Departed",
2864 (u_int32_t) (lp->port_wwn >> 32),
2865 (u_int32_t) (lp->port_wwn & 0xffffffffLL),
2866 (u_int32_t) (lp->node_wwn >> 32),
2867 (u_int32_t) (lp->node_wwn & 0xffffffffLL));
2868
2869 ISPLOCK_2_CAMLOCK(isp);
2870 if (xpt_create_path(&tmppath, NULL, cam_sim_path(isp->isp_sim),
2871 (target_id_t)tgt, CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
2872 CAMLOCK_2_ISPLOCK(isp);
2873 break;
2874 }
2875 /*
2876 * Policy: only announce targets.
2877 */
2878 if (lp->roles & is_tgt_mask) {
2879 if (lp->valid) {
2880 xpt_async(AC_FOUND_DEVICE, tmppath, NULL);
2881 } else {
2882 xpt_async(AC_LOST_DEVICE, tmppath, NULL);
2883 }
2884 }
2885 xpt_free_path(tmppath);
2886 CAMLOCK_2_ISPLOCK(isp);
2887 break;
2888 }
2889 case ISPASYNC_CHANGE_NOTIFY:
2890 if (arg == ISPASYNC_CHANGE_PDB) {
2891 isp_prt(isp, ISP_LOGINFO,
2892 "Port Database Changed");
2893 } else if (arg == ISPASYNC_CHANGE_SNS) {
2894 isp_prt(isp, ISP_LOGINFO,
2895 "Name Server Database Changed");
2896 }
2897#ifdef ISP_SMPLOCK
2898 cv_signal(&isp->isp_osinfo.kthread_cv);
2899#else
2900 wakeup(&isp->isp_osinfo.kthread_cv);
2901#endif
2902 break;
2903 case ISPASYNC_FABRIC_DEV:
2904 {
2905 int target, base, lim;
2906 fcparam *fcp = isp->isp_param;
2907 struct lportdb *lp = NULL;
2908 struct lportdb *clp = (struct lportdb *) arg;
2909 char *pt;
2910
2911 switch (clp->port_type) {
2912 case 1:
2913 pt = " N_Port";
2914 break;
2915 case 2:
2916 pt = " NL_Port";
2917 break;
2918 case 3:
2919 pt = "F/NL_Port";
2920 break;
2921 case 0x7f:
2922 pt = " Nx_Port";
2923 break;
2924 case 0x81:
2925 pt = " F_port";
2926 break;
2927 case 0x82:
2928 pt = " FL_Port";
2929 break;
2930 case 0x84:
2931 pt = " E_port";
2932 break;
2933 default:
2934 pt = " ";
2935 break;
2936 }
2937
2938 isp_prt(isp, ISP_LOGINFO,
2939 "%s Fabric Device @ PortID 0x%x", pt, clp->portid);
2940
2941 /*
2942 * If we don't have an initiator role we bail.
2943 *
2944 * We just use ISPASYNC_FABRIC_DEV for announcement purposes.
2945 */
2946
2947 if ((isp->isp_role & ISP_ROLE_INITIATOR) == 0) {
2948 break;
2949 }
2950
2951 /*
2952 * Is this entry for us? If so, we bail.
2953 */
2954
2955 if (fcp->isp_portid == clp->portid) {
2956 break;
2957 }
2958
2959 /*
2960 * Else, the default policy is to find room for it in
2961 * our local port database. Later, when we execute
2962 * the call to isp_pdb_sync either this newly arrived
2963 * or already logged in device will be (re)announced.
2964 */
2965
2966 if (fcp->isp_topo == TOPO_FL_PORT)
2967 base = FC_SNS_ID+1;
2968 else
2969 base = 0;
2970
2971 if (fcp->isp_topo == TOPO_N_PORT)
2972 lim = 1;
2973 else
2974 lim = MAX_FC_TARG;
2975
2976 /*
2977 * Is it already in our list?
2978 */
2979 for (target = base; target < lim; target++) {
2980 if (target >= FL_PORT_ID && target <= FC_SNS_ID) {
2981 continue;
2982 }
2983 lp = &fcp->portdb[target];
2984 if (lp->port_wwn == clp->port_wwn &&
2985 lp->node_wwn == clp->node_wwn) {
2986 lp->fabric_dev = 1;
2987 break;
2988 }
2989 }
2990 if (target < lim) {
2991 break;
2992 }
2993 for (target = base; target < lim; target++) {
2994 if (target >= FL_PORT_ID && target <= FC_SNS_ID) {
2995 continue;
2996 }
2997 lp = &fcp->portdb[target];
2998 if (lp->port_wwn == 0) {
2999 break;
3000 }
3001 }
3002 if (target == lim) {
3003 isp_prt(isp, ISP_LOGWARN,
3004 "out of space for fabric devices");
3005 break;
3006 }
3007 lp->port_type = clp->port_type;
3008 lp->fc4_type = clp->fc4_type;
3009 lp->node_wwn = clp->node_wwn;
3010 lp->port_wwn = clp->port_wwn;
3011 lp->portid = clp->portid;
3012 lp->fabric_dev = 1;
3013 break;
3014 }
3015#ifdef ISP_TARGET_MODE
3016 case ISPASYNC_TARGET_MESSAGE:
3017 {
3018 tmd_msg_t *mp = arg;
3019 isp_prt(isp, ISP_LOGALL,
3020 "bus %d iid %d tgt %d lun %d ttype %x tval %x msg[0]=%x",
3021 mp->nt_bus, (int) mp->nt_iid, (int) mp->nt_tgt,
3022 (int) mp->nt_lun, mp->nt_tagtype, mp->nt_tagval,
3023 mp->nt_msg[0]);
3024 break;
3025 }
3026 case ISPASYNC_TARGET_EVENT:
3027 {
3028 tmd_event_t *ep = arg;
3029 isp_prt(isp, ISP_LOGALL,
3030 "bus %d event code 0x%x", ep->ev_bus, ep->ev_event);
3031 break;
3032 }
3033 case ISPASYNC_TARGET_ACTION:
3034 switch (((isphdr_t *)arg)->rqs_entry_type) {
3035 default:
3036 isp_prt(isp, ISP_LOGWARN,
3037 "event 0x%x for unhandled target action",
3038 ((isphdr_t *)arg)->rqs_entry_type);
3039 break;
3040 case RQSTYPE_NOTIFY:
3041 if (IS_SCSI(isp)) {
3042 rv = isp_handle_platform_notify_scsi(isp,
3043 (in_entry_t *) arg);
3044 } else {
3045 rv = isp_handle_platform_notify_fc(isp,
3046 (in_fcentry_t *) arg);
3047 }
3048 break;
3049 case RQSTYPE_ATIO:
3050 rv = isp_handle_platform_atio(isp, (at_entry_t *) arg);
3051 break;
3052 case RQSTYPE_ATIO2:
3053 rv = isp_handle_platform_atio2(isp, (at2_entry_t *)arg);
3054 break;
3055 case RQSTYPE_CTIO2:
3056 case RQSTYPE_CTIO:
3057 rv = isp_handle_platform_ctio(isp, arg);
3058 break;
3059 case RQSTYPE_ENABLE_LUN:
3060 case RQSTYPE_MODIFY_LUN:
3061 if (IS_DUALBUS(isp)) {
3062 bus =
3063 GET_BUS_VAL(((lun_entry_t *)arg)->le_rsvd);
3064 } else {
3065 bus = 0;
3066 }
3067 isp_cv_signal_rqe(isp, bus,
3068 ((lun_entry_t *)arg)->le_status);
3069 break;
3070 }
3071 break;
3072#endif
3073 case ISPASYNC_FW_CRASH:
3074 {
3075 u_int16_t mbox1, mbox6;
3076 mbox1 = ISP_READ(isp, OUTMAILBOX1);
3077 if (IS_DUALBUS(isp)) {
3078 mbox6 = ISP_READ(isp, OUTMAILBOX6);
3079 } else {
3080 mbox6 = 0;
3081 }
3082 isp_prt(isp, ISP_LOGERR,
3083 "Internal Firmware Error on bus %d @ RISC Address 0x%x",
3084 mbox6, mbox1);
3085#ifdef ISP_FW_CRASH_DUMP
3086 /*
3087 * XXX: really need a thread to do this right.
3088 */
3089 if (IS_FC(isp)) {
3090 FCPARAM(isp)->isp_fwstate = FW_CONFIG_WAIT;
3091 FCPARAM(isp)->isp_loopstate = LOOP_NIL;
3092 isp_freeze_loopdown(isp, "f/w crash");
3093 isp_fw_dump(isp);
3094 }
3095 isp_reinit(isp);
3096 isp_async(isp, ISPASYNC_FW_RESTARTED, NULL);
3097#endif
3098 break;
3099 }
3100 case ISPASYNC_UNHANDLED_RESPONSE:
3101 break;
3102 default:
3103 isp_prt(isp, ISP_LOGERR, "unknown isp_async event %d", cmd);
3104 break;
3105 }
3106 return (rv);
3107}
3108
3109
3110/*
3111 * Locks are held before coming here.
3112 */
3113void
3114isp_uninit(struct ispsoftc *isp)
3115{
3116 ISP_WRITE(isp, HCCR, HCCR_CMD_RESET);
3117 DISABLE_INTS(isp);
3118}
3119
3120void
3121isp_prt(struct ispsoftc *isp, int level, const char *fmt, ...)
3122{
3123 va_list ap;
3124 if (level != ISP_LOGALL && (level & isp->isp_dblev) == 0) {
3125 return;
3126 }
3127 printf("%s: ", device_get_nameunit(isp->isp_dev));
3128 va_start(ap, fmt);
3129 vprintf(fmt, ap);
3130 va_end(ap);
3131 printf("\n");
3132}
59};
60
61static struct ispsoftc *isplist = NULL;
62
63void
64isp_attach(struct ispsoftc *isp)
65{
66 int primary, secondary;
67 struct ccb_setasync csa;
68 struct cam_devq *devq;
69 struct cam_sim *sim;
70 struct cam_path *path;
71
72 /*
73 * Establish (in case of 12X0) which bus is the primary.
74 */
75
76 primary = 0;
77 secondary = 1;
78
79 /*
80 * Create the device queue for our SIM(s).
81 */
82 devq = cam_simq_alloc(isp->isp_maxcmds);
83 if (devq == NULL) {
84 return;
85 }
86
87 /*
88 * Construct our SIM entry.
89 */
90 ISPLOCK_2_CAMLOCK(isp);
91 sim = cam_sim_alloc(isp_action, isp_poll, "isp", isp,
92 device_get_unit(isp->isp_dev), 1, isp->isp_maxcmds, devq);
93 if (sim == NULL) {
94 cam_simq_free(devq);
95 CAMLOCK_2_ISPLOCK(isp);
96 return;
97 }
98 CAMLOCK_2_ISPLOCK(isp);
99
100 isp->isp_osinfo.ehook.ich_func = isp_intr_enable;
101 isp->isp_osinfo.ehook.ich_arg = isp;
102 ISPLOCK_2_CAMLOCK(isp);
103 if (config_intrhook_establish(&isp->isp_osinfo.ehook) != 0) {
104 cam_sim_free(sim, TRUE);
105 CAMLOCK_2_ISPLOCK(isp);
106 isp_prt(isp, ISP_LOGERR,
107 "could not establish interrupt enable hook");
108 return;
109 }
110
111 if (xpt_bus_register(sim, primary) != CAM_SUCCESS) {
112 cam_sim_free(sim, TRUE);
113 CAMLOCK_2_ISPLOCK(isp);
114 return;
115 }
116
117 if (xpt_create_path(&path, NULL, cam_sim_path(sim),
118 CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
119 xpt_bus_deregister(cam_sim_path(sim));
120 cam_sim_free(sim, TRUE);
121 config_intrhook_disestablish(&isp->isp_osinfo.ehook);
122 CAMLOCK_2_ISPLOCK(isp);
123 return;
124 }
125
126 xpt_setup_ccb(&csa.ccb_h, path, 5);
127 csa.ccb_h.func_code = XPT_SASYNC_CB;
128 csa.event_enable = AC_LOST_DEVICE;
129 csa.callback = isp_cam_async;
130 csa.callback_arg = sim;
131 xpt_action((union ccb *)&csa);
132 CAMLOCK_2_ISPLOCK(isp);
133 isp->isp_sim = sim;
134 isp->isp_path = path;
135 /*
136 * Create a kernel thread for fibre channel instances. We
137 * don't have dual channel FC cards.
138 */
139 if (IS_FC(isp)) {
140 ISPLOCK_2_CAMLOCK(isp);
141 /* XXX: LOCK VIOLATION */
142 cv_init(&isp->isp_osinfo.kthread_cv, "isp_kthread_cv");
143 if (kthread_create(isp_kthread, isp, &isp->isp_osinfo.kproc,
144 RFHIGHPID, 0, "%s: fc_thrd",
145 device_get_nameunit(isp->isp_dev))) {
146 xpt_bus_deregister(cam_sim_path(sim));
147 cam_sim_free(sim, TRUE);
148 config_intrhook_disestablish(&isp->isp_osinfo.ehook);
149 CAMLOCK_2_ISPLOCK(isp);
150 isp_prt(isp, ISP_LOGERR, "could not create kthread");
151 return;
152 }
153 CAMLOCK_2_ISPLOCK(isp);
154 }
155
156
157 /*
158 * If we have a second channel, construct SIM entry for that.
159 */
160 if (IS_DUALBUS(isp)) {
161 ISPLOCK_2_CAMLOCK(isp);
162 sim = cam_sim_alloc(isp_action, isp_poll, "isp", isp,
163 device_get_unit(isp->isp_dev), 1, isp->isp_maxcmds, devq);
164 if (sim == NULL) {
165 xpt_bus_deregister(cam_sim_path(isp->isp_sim));
166 xpt_free_path(isp->isp_path);
167 cam_simq_free(devq);
168 config_intrhook_disestablish(&isp->isp_osinfo.ehook);
169 return;
170 }
171 if (xpt_bus_register(sim, secondary) != CAM_SUCCESS) {
172 xpt_bus_deregister(cam_sim_path(isp->isp_sim));
173 xpt_free_path(isp->isp_path);
174 cam_sim_free(sim, TRUE);
175 config_intrhook_disestablish(&isp->isp_osinfo.ehook);
176 CAMLOCK_2_ISPLOCK(isp);
177 return;
178 }
179
180 if (xpt_create_path(&path, NULL, cam_sim_path(sim),
181 CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
182 xpt_bus_deregister(cam_sim_path(isp->isp_sim));
183 xpt_free_path(isp->isp_path);
184 xpt_bus_deregister(cam_sim_path(sim));
185 cam_sim_free(sim, TRUE);
186 config_intrhook_disestablish(&isp->isp_osinfo.ehook);
187 CAMLOCK_2_ISPLOCK(isp);
188 return;
189 }
190
191 xpt_setup_ccb(&csa.ccb_h, path, 5);
192 csa.ccb_h.func_code = XPT_SASYNC_CB;
193 csa.event_enable = AC_LOST_DEVICE;
194 csa.callback = isp_cam_async;
195 csa.callback_arg = sim;
196 xpt_action((union ccb *)&csa);
197 CAMLOCK_2_ISPLOCK(isp);
198 isp->isp_sim2 = sim;
199 isp->isp_path2 = path;
200 }
201
202#ifdef ISP_TARGET_MODE
203 cv_init(&isp->isp_osinfo.tgtcv0[0], "isp_tgcv0a");
204 cv_init(&isp->isp_osinfo.tgtcv0[1], "isp_tgcv0b");
205 cv_init(&isp->isp_osinfo.tgtcv1[0], "isp_tgcv1a");
206 cv_init(&isp->isp_osinfo.tgtcv1[1], "isp_tgcv1b");
207#endif
208 /*
209 * Create device nodes
210 */
211 (void) make_dev(&isp_cdevsw, device_get_unit(isp->isp_dev), UID_ROOT,
212 GID_OPERATOR, 0600, "%s", device_get_nameunit(isp->isp_dev));
213
214 if (isp->isp_role != ISP_ROLE_NONE) {
215 isp->isp_state = ISP_RUNSTATE;
216 ENABLE_INTS(isp);
217 }
218 if (isplist == NULL) {
219 isplist = isp;
220 } else {
221 struct ispsoftc *tmp = isplist;
222 while (tmp->isp_osinfo.next) {
223 tmp = tmp->isp_osinfo.next;
224 }
225 tmp->isp_osinfo.next = isp;
226 }
227
228}
229
230static INLINE void
231isp_freeze_loopdown(struct ispsoftc *isp, char *msg)
232{
233 if (isp->isp_osinfo.simqfrozen == 0) {
234 isp_prt(isp, ISP_LOGDEBUG0, "%s: freeze simq (loopdown)", msg);
235 isp->isp_osinfo.simqfrozen |= SIMQFRZ_LOOPDOWN;
236 ISPLOCK_2_CAMLOCK(isp);
237 xpt_freeze_simq(isp->isp_sim, 1);
238 CAMLOCK_2_ISPLOCK(isp);
239 } else {
240 isp_prt(isp, ISP_LOGDEBUG0, "%s: mark frozen (loopdown)", msg);
241 isp->isp_osinfo.simqfrozen |= SIMQFRZ_LOOPDOWN;
242 }
243}
244
245static int
246ispioctl(dev_t dev, u_long cmd, caddr_t addr, int flags, struct thread *td)
247{
248 struct ispsoftc *isp;
249 int retval = ENOTTY;
250
251 isp = isplist;
252 while (isp) {
253 if (minor(dev) == device_get_unit(isp->isp_dev)) {
254 break;
255 }
256 isp = isp->isp_osinfo.next;
257 }
258 if (isp == NULL)
259 return (ENXIO);
260
261 switch (cmd) {
262#ifdef ISP_FW_CRASH_DUMP
263 case ISP_GET_FW_CRASH_DUMP:
264 {
265 u_int16_t *ptr = FCPARAM(isp)->isp_dump_data;
266 size_t sz;
267
268 retval = 0;
269 if (IS_2200(isp))
270 sz = QLA2200_RISC_IMAGE_DUMP_SIZE;
271 else
272 sz = QLA2300_RISC_IMAGE_DUMP_SIZE;
273 ISP_LOCK(isp);
274 if (ptr && *ptr) {
275 void *uaddr = *((void **) addr);
276 if (copyout(ptr, uaddr, sz)) {
277 retval = EFAULT;
278 } else {
279 *ptr = 0;
280 }
281 } else {
282 retval = ENXIO;
283 }
284 ISP_UNLOCK(isp);
285 break;
286 }
287
288 case ISP_FORCE_CRASH_DUMP:
289 ISP_LOCK(isp);
290 isp_freeze_loopdown(isp, "ispioctl(ISP_FORCE_CRASH_DUMP)");
291 isp_fw_dump(isp);
292 isp_reinit(isp);
293 ISP_UNLOCK(isp);
294 retval = 0;
295 break;
296#endif
297 case ISP_SDBLEV:
298 {
299 int olddblev = isp->isp_dblev;
300 isp->isp_dblev = *(int *)addr;
301 *(int *)addr = olddblev;
302 retval = 0;
303 break;
304 }
305 case ISP_RESETHBA:
306 ISP_LOCK(isp);
307 isp_reinit(isp);
308 ISP_UNLOCK(isp);
309 retval = 0;
310 break;
311 case ISP_RESCAN:
312 if (IS_FC(isp)) {
313 ISP_LOCK(isp);
314 if (isp_fc_runstate(isp, 5 * 1000000)) {
315 retval = EIO;
316 } else {
317 retval = 0;
318 }
319 ISP_UNLOCK(isp);
320 }
321 break;
322 case ISP_FC_LIP:
323 if (IS_FC(isp)) {
324 ISP_LOCK(isp);
325 if (isp_control(isp, ISPCTL_SEND_LIP, 0)) {
326 retval = EIO;
327 } else {
328 retval = 0;
329 }
330 ISP_UNLOCK(isp);
331 }
332 break;
333 case ISP_FC_GETDINFO:
334 {
335 struct isp_fc_device *ifc = (struct isp_fc_device *) addr;
336 struct lportdb *lp;
337
338 if (ifc->loopid < 0 || ifc->loopid >= MAX_FC_TARG) {
339 retval = EINVAL;
340 break;
341 }
342 ISP_LOCK(isp);
343 lp = &FCPARAM(isp)->portdb[ifc->loopid];
344 if (lp->valid) {
345 ifc->loopid = lp->loopid;
346 ifc->portid = lp->portid;
347 ifc->node_wwn = lp->node_wwn;
348 ifc->port_wwn = lp->port_wwn;
349 retval = 0;
350 } else {
351 retval = ENODEV;
352 }
353 ISP_UNLOCK(isp);
354 break;
355 }
356 case ISP_GET_STATS:
357 {
358 isp_stats_t *sp = (isp_stats_t *) addr;
359
360 MEMZERO(sp, sizeof (*sp));
361 sp->isp_stat_version = ISP_STATS_VERSION;
362 sp->isp_type = isp->isp_type;
363 sp->isp_revision = isp->isp_revision;
364 ISP_LOCK(isp);
365 sp->isp_stats[ISP_INTCNT] = isp->isp_intcnt;
366 sp->isp_stats[ISP_INTBOGUS] = isp->isp_intbogus;
367 sp->isp_stats[ISP_INTMBOXC] = isp->isp_intmboxc;
368 sp->isp_stats[ISP_INGOASYNC] = isp->isp_intoasync;
369 sp->isp_stats[ISP_RSLTCCMPLT] = isp->isp_rsltccmplt;
370 sp->isp_stats[ISP_FPHCCMCPLT] = isp->isp_fphccmplt;
371 sp->isp_stats[ISP_RSCCHIWAT] = isp->isp_rscchiwater;
372 sp->isp_stats[ISP_FPCCHIWAT] = isp->isp_fpcchiwater;
373 ISP_UNLOCK(isp);
374 retval = 0;
375 break;
376 }
377 case ISP_CLR_STATS:
378 ISP_LOCK(isp);
379 isp->isp_intcnt = 0;
380 isp->isp_intbogus = 0;
381 isp->isp_intmboxc = 0;
382 isp->isp_intoasync = 0;
383 isp->isp_rsltccmplt = 0;
384 isp->isp_fphccmplt = 0;
385 isp->isp_rscchiwater = 0;
386 isp->isp_fpcchiwater = 0;
387 ISP_UNLOCK(isp);
388 retval = 0;
389 break;
390 case ISP_FC_GETHINFO:
391 {
392 struct isp_hba_device *hba = (struct isp_hba_device *) addr;
393 MEMZERO(hba, sizeof (*hba));
394 ISP_LOCK(isp);
395 hba->fc_speed = FCPARAM(isp)->isp_gbspeed;
396 hba->fc_scsi_supported = 1;
397 hba->fc_topology = FCPARAM(isp)->isp_topo + 1;
398 hba->fc_loopid = FCPARAM(isp)->isp_loopid;
399 hba->active_node_wwn = FCPARAM(isp)->isp_nodewwn;
400 hba->active_port_wwn = FCPARAM(isp)->isp_portwwn;
401 ISP_UNLOCK(isp);
402 retval = 0;
403 break;
404 }
405 case ISP_GET_FC_PARAM:
406 {
407 struct isp_fc_param *f = (struct isp_fc_param *) addr;
408
409 if (!IS_FC(isp)) {
410 retval = EINVAL;
411 break;
412 }
413 f->parameter = 0;
414 if (strcmp(f->param_name, "framelength") == 0) {
415 f->parameter = FCPARAM(isp)->isp_maxfrmlen;
416 retval = 0;
417 break;
418 }
419 if (strcmp(f->param_name, "exec_throttle") == 0) {
420 f->parameter = FCPARAM(isp)->isp_execthrottle;
421 retval = 0;
422 break;
423 }
424 if (strcmp(f->param_name, "fullduplex") == 0) {
425 if (FCPARAM(isp)->isp_fwoptions & ICBOPT_FULL_DUPLEX)
426 f->parameter = 1;
427 retval = 0;
428 break;
429 }
430 if (strcmp(f->param_name, "loopid") == 0) {
431 f->parameter = FCPARAM(isp)->isp_loopid;
432 retval = 0;
433 break;
434 }
435 retval = EINVAL;
436 break;
437 }
438 case ISP_SET_FC_PARAM:
439 {
440 struct isp_fc_param *f = (struct isp_fc_param *) addr;
441 u_int32_t param = f->parameter;
442
443 if (!IS_FC(isp)) {
444 retval = EINVAL;
445 break;
446 }
447 f->parameter = 0;
448 if (strcmp(f->param_name, "framelength") == 0) {
449 if (param != 512 && param != 1024 && param != 1024) {
450 retval = EINVAL;
451 break;
452 }
453 FCPARAM(isp)->isp_maxfrmlen = param;
454 retval = 0;
455 break;
456 }
457 if (strcmp(f->param_name, "exec_throttle") == 0) {
458 if (param < 16 || param > 255) {
459 retval = EINVAL;
460 break;
461 }
462 FCPARAM(isp)->isp_execthrottle = param;
463 retval = 0;
464 break;
465 }
466 if (strcmp(f->param_name, "fullduplex") == 0) {
467 if (param != 0 && param != 1) {
468 retval = EINVAL;
469 break;
470 }
471 if (param) {
472 FCPARAM(isp)->isp_fwoptions |=
473 ICBOPT_FULL_DUPLEX;
474 } else {
475 FCPARAM(isp)->isp_fwoptions &=
476 ~ICBOPT_FULL_DUPLEX;
477 }
478 retval = 0;
479 break;
480 }
481 if (strcmp(f->param_name, "loopid") == 0) {
482 if (param < 0 || param > 125) {
483 retval = EINVAL;
484 break;
485 }
486 FCPARAM(isp)->isp_loopid = param;
487 retval = 0;
488 break;
489 }
490 retval = EINVAL;
491 break;
492 }
493 default:
494 break;
495 }
496 return (retval);
497}
498
499static void
500isp_intr_enable(void *arg)
501{
502 struct ispsoftc *isp = arg;
503 if (isp->isp_role != ISP_ROLE_NONE) {
504 ENABLE_INTS(isp);
505 isp->isp_osinfo.intsok = 1;
506 }
507 /* Release our hook so that the boot can continue. */
508 config_intrhook_disestablish(&isp->isp_osinfo.ehook);
509}
510
511/*
512 * Put the target mode functions here, because some are inlines
513 */
514
515#ifdef ISP_TARGET_MODE
516
517static INLINE int is_lun_enabled(struct ispsoftc *, int, lun_id_t);
518static INLINE int are_any_luns_enabled(struct ispsoftc *, int);
519static INLINE tstate_t *get_lun_statep(struct ispsoftc *, int, lun_id_t);
520static INLINE void rls_lun_statep(struct ispsoftc *, tstate_t *);
521static INLINE int isp_psema_sig_rqe(struct ispsoftc *, int);
522static INLINE int isp_cv_wait_timed_rqe(struct ispsoftc *, int, int);
523static INLINE void isp_cv_signal_rqe(struct ispsoftc *, int, int);
524static INLINE void isp_vsema_rqe(struct ispsoftc *, int);
525static INLINE atio_private_data_t *isp_get_atpd(struct ispsoftc *, int);
526static cam_status
527create_lun_state(struct ispsoftc *, int, struct cam_path *, tstate_t **);
528static void destroy_lun_state(struct ispsoftc *, tstate_t *);
529static void isp_en_lun(struct ispsoftc *, union ccb *);
530static cam_status isp_abort_tgt_ccb(struct ispsoftc *, union ccb *);
531static timeout_t isp_refire_putback_atio;
532static void isp_complete_ctio(union ccb *);
533static void isp_target_putback_atio(union ccb *);
534static cam_status isp_target_start_ctio(struct ispsoftc *, union ccb *);
535static int isp_handle_platform_atio(struct ispsoftc *, at_entry_t *);
536static int isp_handle_platform_atio2(struct ispsoftc *, at2_entry_t *);
537static int isp_handle_platform_ctio(struct ispsoftc *, void *);
538static int isp_handle_platform_notify_scsi(struct ispsoftc *, in_entry_t *);
539static int isp_handle_platform_notify_fc(struct ispsoftc *, in_fcentry_t *);
540
541static INLINE int
542is_lun_enabled(struct ispsoftc *isp, int bus, lun_id_t lun)
543{
544 tstate_t *tptr;
545 tptr = isp->isp_osinfo.lun_hash[LUN_HASH_FUNC(isp, bus, lun)];
546 if (tptr == NULL) {
547 return (0);
548 }
549 do {
550 if (tptr->lun == (lun_id_t) lun && tptr->bus == bus) {
551 return (1);
552 }
553 } while ((tptr = tptr->next) != NULL);
554 return (0);
555}
556
557static INLINE int
558are_any_luns_enabled(struct ispsoftc *isp, int port)
559{
560 int lo, hi;
561 if (IS_DUALBUS(isp)) {
562 lo = (port * (LUN_HASH_SIZE >> 1));
563 hi = lo + (LUN_HASH_SIZE >> 1);
564 } else {
565 lo = 0;
566 hi = LUN_HASH_SIZE;
567 }
568 for (lo = 0; lo < hi; lo++) {
569 if (isp->isp_osinfo.lun_hash[lo]) {
570 return (1);
571 }
572 }
573 return (0);
574}
575
576static INLINE tstate_t *
577get_lun_statep(struct ispsoftc *isp, int bus, lun_id_t lun)
578{
579 tstate_t *tptr = NULL;
580
581 if (lun == CAM_LUN_WILDCARD) {
582 if (isp->isp_osinfo.tmflags[bus] & TM_WILDCARD_ENABLED) {
583 tptr = &isp->isp_osinfo.tsdflt[bus];
584 tptr->hold++;
585 return (tptr);
586 }
587 } else {
588 tptr = isp->isp_osinfo.lun_hash[LUN_HASH_FUNC(isp, bus, lun)];
589 if (tptr == NULL) {
590 return (NULL);
591 }
592 }
593
594 do {
595 if (tptr->lun == lun && tptr->bus == bus) {
596 tptr->hold++;
597 return (tptr);
598 }
599 } while ((tptr = tptr->next) != NULL);
600 return (tptr);
601}
602
603static INLINE void
604rls_lun_statep(struct ispsoftc *isp, tstate_t *tptr)
605{
606 if (tptr->hold)
607 tptr->hold--;
608}
609
610static INLINE int
611isp_psema_sig_rqe(struct ispsoftc *isp, int bus)
612{
613 while (isp->isp_osinfo.tmflags[bus] & TM_BUSY) {
614 isp->isp_osinfo.tmflags[bus] |= TM_WANTED;
615#ifdef ISP_SMPLOCK
616 if (cv_wait_sig(&isp->isp_osinfo.tgtcv0[bus], &isp->isp_lock)) {
617 return (-1);
618 }
619#else
620 if (tsleep(&isp->isp_osinfo.tgtcv0[bus], PZERO, "cv_isp", 0)) {
621 return (-1);
622 }
623#endif
624 isp->isp_osinfo.tmflags[bus] |= TM_BUSY;
625 }
626 return (0);
627}
628
629static INLINE int
630isp_cv_wait_timed_rqe(struct ispsoftc *isp, int bus, int timo)
631{
632#ifdef ISP_SMPLOCK
633 if (cv_timedwait(&isp->isp_osinfo.tgtcv1[bus], &isp->isp_lock, timo)) {
634 return (-1);
635 }
636#else
637 if (tsleep(&isp->isp_osinfo.tgtcv1[bus], PZERO, "cv_isp1", 0)) {
638 return (-1);
639 }
640#endif
641 return (0);
642}
643
644static INLINE void
645isp_cv_signal_rqe(struct ispsoftc *isp, int bus, int status)
646{
647 isp->isp_osinfo.rstatus[bus] = status;
648#ifdef ISP_SMPLOCK
649 cv_signal(&isp->isp_osinfo.tgtcv1[bus]);
650#else
651 wakeup(&isp->isp_osinfo.tgtcv1[bus]);
652#endif
653}
654
655static INLINE void
656isp_vsema_rqe(struct ispsoftc *isp, int bus)
657{
658 if (isp->isp_osinfo.tmflags[bus] & TM_WANTED) {
659 isp->isp_osinfo.tmflags[bus] &= ~TM_WANTED;
660#ifdef ISP_SMPLOCK
661 cv_signal(&isp->isp_osinfo.tgtcv0[bus]);
662#else
663 cv_signal(&isp->isp_osinfo.tgtcv0[bus]);
664#endif
665 }
666 isp->isp_osinfo.tmflags[bus] &= ~TM_BUSY;
667}
668
669static INLINE atio_private_data_t *
670isp_get_atpd(struct ispsoftc *isp, int tag)
671{
672 atio_private_data_t *atp;
673 for (atp = isp->isp_osinfo.atpdp;
674 atp < &isp->isp_osinfo.atpdp[ATPDPSIZE]; atp++) {
675 if (atp->tag == tag)
676 return (atp);
677 }
678 return (NULL);
679}
680
681static cam_status
682create_lun_state(struct ispsoftc *isp, int bus,
683 struct cam_path *path, tstate_t **rslt)
684{
685 cam_status status;
686 lun_id_t lun;
687 int hfx;
688 tstate_t *tptr, *new;
689
690 lun = xpt_path_lun_id(path);
691 if (lun < 0) {
692 return (CAM_LUN_INVALID);
693 }
694 if (is_lun_enabled(isp, bus, lun)) {
695 return (CAM_LUN_ALRDY_ENA);
696 }
697 new = (tstate_t *) malloc(sizeof (tstate_t), M_DEVBUF, M_NOWAIT|M_ZERO);
698 if (new == NULL) {
699 return (CAM_RESRC_UNAVAIL);
700 }
701
702 status = xpt_create_path(&new->owner, NULL, xpt_path_path_id(path),
703 xpt_path_target_id(path), xpt_path_lun_id(path));
704 if (status != CAM_REQ_CMP) {
705 free(new, M_DEVBUF);
706 return (status);
707 }
708 new->bus = bus;
709 new->lun = lun;
710 SLIST_INIT(&new->atios);
711 SLIST_INIT(&new->inots);
712 new->hold = 1;
713
714 hfx = LUN_HASH_FUNC(isp, new->bus, new->lun);
715 tptr = isp->isp_osinfo.lun_hash[hfx];
716 if (tptr == NULL) {
717 isp->isp_osinfo.lun_hash[hfx] = new;
718 } else {
719 while (tptr->next)
720 tptr = tptr->next;
721 tptr->next = new;
722 }
723 *rslt = new;
724 return (CAM_REQ_CMP);
725}
726
727static INLINE void
728destroy_lun_state(struct ispsoftc *isp, tstate_t *tptr)
729{
730 int hfx;
731 tstate_t *lw, *pw;
732
733 hfx = LUN_HASH_FUNC(isp, tptr->bus, tptr->lun);
734 if (tptr->hold) {
735 return;
736 }
737 pw = isp->isp_osinfo.lun_hash[hfx];
738 if (pw == NULL) {
739 return;
740 } else if (pw->lun == tptr->lun && pw->bus == tptr->bus) {
741 isp->isp_osinfo.lun_hash[hfx] = pw->next;
742 } else {
743 lw = pw;
744 pw = lw->next;
745 while (pw) {
746 if (pw->lun == tptr->lun && pw->bus == tptr->bus) {
747 lw->next = pw->next;
748 break;
749 }
750 lw = pw;
751 pw = pw->next;
752 }
753 if (pw == NULL) {
754 return;
755 }
756 }
757 free(tptr, M_DEVBUF);
758}
759
760/*
761 * we enter with our locks held.
762 */
763static void
764isp_en_lun(struct ispsoftc *isp, union ccb *ccb)
765{
766 const char lfmt[] = "Lun now %sabled for target mode on channel %d";
767 struct ccb_en_lun *cel = &ccb->cel;
768 tstate_t *tptr;
769 u_int16_t rstat;
770 int bus, cmd, av, wildcard;
771 lun_id_t lun;
772 target_id_t tgt;
773
774
775 bus = XS_CHANNEL(ccb) & 0x1;
776 tgt = ccb->ccb_h.target_id;
777 lun = ccb->ccb_h.target_lun;
778
779 /*
780 * Do some sanity checking first.
781 */
782
783 if ((lun != CAM_LUN_WILDCARD) &&
784 (lun < 0 || lun >= (lun_id_t) isp->isp_maxluns)) {
785 ccb->ccb_h.status = CAM_LUN_INVALID;
786 return;
787 }
788
789 if (IS_SCSI(isp)) {
790 sdparam *sdp = isp->isp_param;
791 sdp += bus;
792 if (tgt != CAM_TARGET_WILDCARD &&
793 tgt != sdp->isp_initiator_id) {
794 ccb->ccb_h.status = CAM_TID_INVALID;
795 return;
796 }
797 } else {
798 if (tgt != CAM_TARGET_WILDCARD &&
799 tgt != FCPARAM(isp)->isp_iid) {
800 ccb->ccb_h.status = CAM_TID_INVALID;
801 return;
802 }
803 /*
804 * This is as a good a place as any to check f/w capabilities.
805 */
806 if ((FCPARAM(isp)->isp_fwattr & ISP_FW_ATTR_TMODE) == 0) {
807 isp_prt(isp, ISP_LOGERR,
808 "firmware does not support target mode");
809 ccb->ccb_h.status = CAM_FUNC_NOTAVAIL;
810 return;
811 }
812 /*
813 * XXX: We *could* handle non-SCCLUN f/w, but we'd have to
814 * XXX: dorks with our already fragile enable/disable code.
815 */
816 if ((FCPARAM(isp)->isp_fwattr & ISP_FW_ATTR_SCCLUN) == 0) {
817 isp_prt(isp, ISP_LOGERR,
818 "firmware not SCCLUN capable");
819 }
820 }
821
822 if (tgt == CAM_TARGET_WILDCARD) {
823 if (lun == CAM_LUN_WILDCARD) {
824 wildcard = 1;
825 } else {
826 ccb->ccb_h.status = CAM_LUN_INVALID;
827 return;
828 }
829 } else {
830 wildcard = 0;
831 }
832
833 /*
834 * Next check to see whether this is a target/lun wildcard action.
835 *
836 * If so, we know that we can accept commands for luns that haven't
837 * been enabled yet and send them upstream. Otherwise, we have to
838 * handle them locally (if we see them at all).
839 */
840
841 if (wildcard) {
842 tptr = &isp->isp_osinfo.tsdflt[bus];
843 if (cel->enable) {
844 if (isp->isp_osinfo.tmflags[bus] &
845 TM_WILDCARD_ENABLED) {
846 ccb->ccb_h.status = CAM_LUN_ALRDY_ENA;
847 return;
848 }
849 ccb->ccb_h.status =
850 xpt_create_path(&tptr->owner, NULL,
851 xpt_path_path_id(ccb->ccb_h.path),
852 xpt_path_target_id(ccb->ccb_h.path),
853 xpt_path_lun_id(ccb->ccb_h.path));
854 if (ccb->ccb_h.status != CAM_REQ_CMP) {
855 return;
856 }
857 SLIST_INIT(&tptr->atios);
858 SLIST_INIT(&tptr->inots);
859 isp->isp_osinfo.tmflags[bus] |= TM_WILDCARD_ENABLED;
860 } else {
861 if ((isp->isp_osinfo.tmflags[bus] &
862 TM_WILDCARD_ENABLED) == 0) {
863 ccb->ccb_h.status = CAM_REQ_CMP;
864 return;
865 }
866 if (tptr->hold) {
867 ccb->ccb_h.status = CAM_SCSI_BUSY;
868 return;
869 }
870 xpt_free_path(tptr->owner);
871 isp->isp_osinfo.tmflags[bus] &= ~TM_WILDCARD_ENABLED;
872 }
873 }
874
875 /*
876 * Now check to see whether this bus needs to be
877 * enabled/disabled with respect to target mode.
878 */
879 av = bus << 31;
880 if (cel->enable && !(isp->isp_osinfo.tmflags[bus] & TM_TMODE_ENABLED)) {
881 av |= ENABLE_TARGET_FLAG;
882 av = isp_control(isp, ISPCTL_TOGGLE_TMODE, &av);
883 if (av) {
884 ccb->ccb_h.status = CAM_FUNC_NOTAVAIL;
885 if (wildcard) {
886 isp->isp_osinfo.tmflags[bus] &=
887 ~TM_WILDCARD_ENABLED;
888 xpt_free_path(tptr->owner);
889 }
890 return;
891 }
892 isp->isp_osinfo.tmflags[bus] |= TM_TMODE_ENABLED;
893 isp_prt(isp, ISP_LOGINFO,
894 "Target Mode enabled on channel %d", bus);
895 } else if (cel->enable == 0 &&
896 (isp->isp_osinfo.tmflags[bus] & TM_TMODE_ENABLED) && wildcard) {
897 if (are_any_luns_enabled(isp, bus)) {
898 ccb->ccb_h.status = CAM_SCSI_BUSY;
899 return;
900 }
901 av = isp_control(isp, ISPCTL_TOGGLE_TMODE, &av);
902 if (av) {
903 ccb->ccb_h.status = CAM_FUNC_NOTAVAIL;
904 return;
905 }
906 isp->isp_osinfo.tmflags[bus] &= ~TM_TMODE_ENABLED;
907 isp_prt(isp, ISP_LOGINFO,
908 "Target Mode disabled on channel %d", bus);
909 }
910
911 if (wildcard) {
912 ccb->ccb_h.status = CAM_REQ_CMP;
913 return;
914 }
915
916 if (cel->enable) {
917 ccb->ccb_h.status =
918 create_lun_state(isp, bus, ccb->ccb_h.path, &tptr);
919 if (ccb->ccb_h.status != CAM_REQ_CMP) {
920 return;
921 }
922 } else {
923 tptr = get_lun_statep(isp, bus, lun);
924 if (tptr == NULL) {
925 ccb->ccb_h.status = CAM_LUN_INVALID;
926 return;
927 }
928 }
929
930 if (isp_psema_sig_rqe(isp, bus)) {
931 rls_lun_statep(isp, tptr);
932 if (cel->enable)
933 destroy_lun_state(isp, tptr);
934 ccb->ccb_h.status = CAM_REQ_CMP_ERR;
935 return;
936 }
937
938 if (cel->enable) {
939 u_int32_t seq = isp->isp_osinfo.rollinfo++;
940 int c, n, ulun = lun;
941
942 cmd = RQSTYPE_ENABLE_LUN;
943 c = DFLT_CMND_CNT;
944 n = DFLT_INOT_CNT;
945 if (IS_FC(isp) && lun != 0) {
946 cmd = RQSTYPE_MODIFY_LUN;
947 n = 0;
948 /*
949 * For SCC firmware, we only deal with setting
950 * (enabling or modifying) lun 0.
951 */
952 ulun = 0;
953 }
954 rstat = LUN_ERR;
955 if (isp_lun_cmd(isp, cmd, bus, tgt, ulun, c, n, seq)) {
956 xpt_print_path(ccb->ccb_h.path);
957 isp_prt(isp, ISP_LOGWARN, "isp_lun_cmd failed");
958 goto out;
959 }
960 if (isp_cv_wait_timed_rqe(isp, bus, 30 * hz)) {
961 xpt_print_path(ccb->ccb_h.path);
962 isp_prt(isp, ISP_LOGERR,
963 "wait for ENABLE/MODIFY LUN timed out");
964 goto out;
965 }
966 rstat = isp->isp_osinfo.rstatus[bus];
967 if (rstat != LUN_OK) {
968 xpt_print_path(ccb->ccb_h.path);
969 isp_prt(isp, ISP_LOGERR,
970 "ENABLE/MODIFY LUN returned 0x%x", rstat);
971 goto out;
972 }
973 } else {
974 int c, n, ulun = lun;
975 u_int32_t seq;
976
977 rstat = LUN_ERR;
978 seq = isp->isp_osinfo.rollinfo++;
979 cmd = -RQSTYPE_MODIFY_LUN;
980
981 c = DFLT_CMND_CNT;
982 n = DFLT_INOT_CNT;
983 if (IS_FC(isp) && lun != 0) {
984 n = 0;
985 /*
986 * For SCC firmware, we only deal with setting
987 * (enabling or modifying) lun 0.
988 */
989 ulun = 0;
990 }
991 if (isp_lun_cmd(isp, cmd, bus, tgt, ulun, c, n, seq)) {
992 xpt_print_path(ccb->ccb_h.path);
993 isp_prt(isp, ISP_LOGERR, "isp_lun_cmd failed");
994 goto out;
995 }
996 if (isp_cv_wait_timed_rqe(isp, bus, 30 * hz)) {
997 xpt_print_path(ccb->ccb_h.path);
998 isp_prt(isp, ISP_LOGERR,
999 "wait for MODIFY LUN timed out");
1000 goto out;
1001 }
1002 rstat = isp->isp_osinfo.rstatus[bus];
1003 if (rstat != LUN_OK) {
1004 xpt_print_path(ccb->ccb_h.path);
1005 isp_prt(isp, ISP_LOGERR,
1006 "MODIFY LUN returned 0x%x", rstat);
1007 goto out;
1008 }
1009 if (IS_FC(isp) && lun) {
1010 goto out;
1011 }
1012
1013 seq = isp->isp_osinfo.rollinfo++;
1014
1015 rstat = LUN_ERR;
1016 cmd = -RQSTYPE_ENABLE_LUN;
1017 if (isp_lun_cmd(isp, cmd, bus, tgt, lun, 0, 0, seq)) {
1018 xpt_print_path(ccb->ccb_h.path);
1019 isp_prt(isp, ISP_LOGERR, "isp_lun_cmd failed");
1020 goto out;
1021 }
1022 if (isp_cv_wait_timed_rqe(isp, bus, 30 * hz)) {
1023 xpt_print_path(ccb->ccb_h.path);
1024 isp_prt(isp, ISP_LOGERR,
1025 "wait for DISABLE LUN timed out");
1026 goto out;
1027 }
1028 rstat = isp->isp_osinfo.rstatus[bus];
1029 if (rstat != LUN_OK) {
1030 xpt_print_path(ccb->ccb_h.path);
1031 isp_prt(isp, ISP_LOGWARN,
1032 "DISABLE LUN returned 0x%x", rstat);
1033 goto out;
1034 }
1035 if (are_any_luns_enabled(isp, bus) == 0) {
1036 av = isp_control(isp, ISPCTL_TOGGLE_TMODE, &av);
1037 if (av) {
1038 isp_prt(isp, ISP_LOGWARN,
1039 "disable target mode on channel %d failed",
1040 bus);
1041 goto out;
1042 }
1043 isp->isp_osinfo.tmflags[bus] &= ~TM_TMODE_ENABLED;
1044 xpt_print_path(ccb->ccb_h.path);
1045 isp_prt(isp, ISP_LOGINFO,
1046 "Target Mode disabled on channel %d", bus);
1047 }
1048 }
1049
1050out:
1051 isp_vsema_rqe(isp, bus);
1052
1053 if (rstat != LUN_OK) {
1054 xpt_print_path(ccb->ccb_h.path);
1055 isp_prt(isp, ISP_LOGWARN,
1056 "lun %sable failed", (cel->enable) ? "en" : "dis");
1057 ccb->ccb_h.status = CAM_REQ_CMP_ERR;
1058 rls_lun_statep(isp, tptr);
1059 if (cel->enable)
1060 destroy_lun_state(isp, tptr);
1061 } else {
1062 xpt_print_path(ccb->ccb_h.path);
1063 isp_prt(isp, ISP_LOGINFO, lfmt,
1064 (cel->enable) ? "en" : "dis", bus);
1065 rls_lun_statep(isp, tptr);
1066 if (cel->enable == 0) {
1067 destroy_lun_state(isp, tptr);
1068 }
1069 ccb->ccb_h.status = CAM_REQ_CMP;
1070 }
1071}
1072
1073static cam_status
1074isp_abort_tgt_ccb(struct ispsoftc *isp, union ccb *ccb)
1075{
1076 tstate_t *tptr;
1077 struct ccb_hdr_slist *lp;
1078 struct ccb_hdr *curelm;
1079 int found;
1080 union ccb *accb = ccb->cab.abort_ccb;
1081
1082 if (accb->ccb_h.target_id != CAM_TARGET_WILDCARD) {
1083 if (IS_FC(isp) && (accb->ccb_h.target_id !=
1084 ((fcparam *) isp->isp_param)->isp_loopid)) {
1085 return (CAM_PATH_INVALID);
1086 } else if (IS_SCSI(isp) && (accb->ccb_h.target_id !=
1087 ((sdparam *) isp->isp_param)->isp_initiator_id)) {
1088 return (CAM_PATH_INVALID);
1089 }
1090 }
1091 tptr = get_lun_statep(isp, XS_CHANNEL(ccb), accb->ccb_h.target_lun);
1092 if (tptr == NULL) {
1093 return (CAM_PATH_INVALID);
1094 }
1095 if (accb->ccb_h.func_code == XPT_ACCEPT_TARGET_IO) {
1096 lp = &tptr->atios;
1097 } else if (accb->ccb_h.func_code == XPT_IMMED_NOTIFY) {
1098 lp = &tptr->inots;
1099 } else {
1100 rls_lun_statep(isp, tptr);
1101 return (CAM_UA_ABORT);
1102 }
1103 curelm = SLIST_FIRST(lp);
1104 found = 0;
1105 if (curelm == &accb->ccb_h) {
1106 found = 1;
1107 SLIST_REMOVE_HEAD(lp, sim_links.sle);
1108 } else {
1109 while(curelm != NULL) {
1110 struct ccb_hdr *nextelm;
1111
1112 nextelm = SLIST_NEXT(curelm, sim_links.sle);
1113 if (nextelm == &accb->ccb_h) {
1114 found = 1;
1115 SLIST_NEXT(curelm, sim_links.sle) =
1116 SLIST_NEXT(nextelm, sim_links.sle);
1117 break;
1118 }
1119 curelm = nextelm;
1120 }
1121 }
1122 rls_lun_statep(isp, tptr);
1123 if (found) {
1124 accb->ccb_h.status = CAM_REQ_ABORTED;
1125 return (CAM_REQ_CMP);
1126 }
1127 return(CAM_PATH_INVALID);
1128}
1129
1130static cam_status
1131isp_target_start_ctio(struct ispsoftc *isp, union ccb *ccb)
1132{
1133 void *qe;
1134 struct ccb_scsiio *cso = &ccb->csio;
1135 u_int16_t *hp, save_handle;
1136 u_int16_t nxti, optr;
1137 u_int8_t local[QENTRY_LEN];
1138
1139
1140 if (isp_getrqentry(isp, &nxti, &optr, &qe)) {
1141 xpt_print_path(ccb->ccb_h.path);
1142 printf("Request Queue Overflow in isp_target_start_ctio\n");
1143 return (CAM_RESRC_UNAVAIL);
1144 }
1145 bzero(local, QENTRY_LEN);
1146
1147 /*
1148 * We're either moving data or completing a command here.
1149 */
1150
1151 if (IS_FC(isp)) {
1152 atio_private_data_t *atp;
1153 ct2_entry_t *cto = (ct2_entry_t *) local;
1154
1155 cto->ct_header.rqs_entry_type = RQSTYPE_CTIO2;
1156 cto->ct_header.rqs_entry_count = 1;
1157 cto->ct_iid = cso->init_id;
1158 if ((FCPARAM(isp)->isp_fwattr & ISP_FW_ATTR_SCCLUN) == 0) {
1159 cto->ct_lun = ccb->ccb_h.target_lun;
1160 }
1161
1162 atp = isp_get_atpd(isp, cso->tag_id);
1163 if (atp == NULL) {
1164 isp_prt(isp, ISP_LOGERR,
1165 "cannot find private data adjunct for tag %x",
1166 cso->tag_id);
1167 return (-1);
1168 }
1169
1170 cto->ct_rxid = cso->tag_id;
1171 if (cso->dxfer_len == 0) {
1172 cto->ct_flags |= CT2_FLAG_MODE1 | CT2_NO_DATA;
1173 if (ccb->ccb_h.flags & CAM_SEND_STATUS) {
1174 cto->ct_flags |= CT2_SENDSTATUS;
1175 cto->rsp.m1.ct_scsi_status = cso->scsi_status;
1176 cto->ct_resid =
1177 atp->orig_datalen - atp->bytes_xfered;
1178 if (cto->ct_resid < 0) {
1179 cto->rsp.m1.ct_scsi_status |=
1180 CT2_DATA_OVER;
1181 } else if (cto->ct_resid > 0) {
1182 cto->rsp.m1.ct_scsi_status |=
1183 CT2_DATA_UNDER;
1184 }
1185 }
1186 if ((ccb->ccb_h.flags & CAM_SEND_SENSE) != 0) {
1187 int m = min(cso->sense_len, MAXRESPLEN);
1188 bcopy(&cso->sense_data, cto->rsp.m1.ct_resp, m);
1189 cto->rsp.m1.ct_senselen = m;
1190 cto->rsp.m1.ct_scsi_status |= CT2_SNSLEN_VALID;
1191 }
1192 } else {
1193 cto->ct_flags |= CT2_FLAG_MODE0;
1194 if ((cso->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
1195 cto->ct_flags |= CT2_DATA_IN;
1196 } else {
1197 cto->ct_flags |= CT2_DATA_OUT;
1198 }
1199 cto->ct_reloff = atp->bytes_xfered;
1200 if ((ccb->ccb_h.flags & CAM_SEND_STATUS) != 0) {
1201 cto->ct_flags |= CT2_SENDSTATUS;
1202 cto->rsp.m0.ct_scsi_status = cso->scsi_status;
1203 cto->ct_resid =
1204 atp->orig_datalen -
1205 (atp->bytes_xfered + cso->dxfer_len);
1206 if (cto->ct_resid < 0) {
1207 cto->rsp.m0.ct_scsi_status |=
1208 CT2_DATA_OVER;
1209 } else if (cto->ct_resid > 0) {
1210 cto->rsp.m0.ct_scsi_status |=
1211 CT2_DATA_UNDER;
1212 }
1213 } else {
1214 atp->last_xframt = cso->dxfer_len;
1215 }
1216 /*
1217 * If we're sending data and status back together,
1218 * we can't also send back sense data as well.
1219 */
1220 ccb->ccb_h.flags &= ~CAM_SEND_SENSE;
1221 }
1222
1223 if (cto->ct_flags & CT2_SENDSTATUS) {
1224 isp_prt(isp, ISP_LOGTDEBUG0,
1225 "CTIO2[%x] STATUS %x origd %u curd %u resid %u",
1226 cto->ct_rxid, cso->scsi_status, atp->orig_datalen,
1227 cso->dxfer_len, cto->ct_resid);
1228 cto->ct_flags |= CT2_CCINCR;
1229 atp->state = ATPD_STATE_LAST_CTIO;
1230 } else
1231 atp->state = ATPD_STATE_CTIO;
1232 cto->ct_timeout = 10;
1233 hp = &cto->ct_syshandle;
1234 } else {
1235 ct_entry_t *cto = (ct_entry_t *) local;
1236
1237 cto->ct_header.rqs_entry_type = RQSTYPE_CTIO;
1238 cto->ct_header.rqs_entry_count = 1;
1239 cto->ct_iid = cso->init_id;
1240 cto->ct_iid |= XS_CHANNEL(ccb) << 7;
1241 cto->ct_tgt = ccb->ccb_h.target_id;
1242 cto->ct_lun = ccb->ccb_h.target_lun;
1243 cto->ct_fwhandle = AT_GET_HANDLE(cso->tag_id);
1244 if (AT_HAS_TAG(cso->tag_id)) {
1245 cto->ct_tag_val = (u_int8_t) AT_GET_TAG(cso->tag_id);
1246 cto->ct_flags |= CT_TQAE;
1247 }
1248 if (ccb->ccb_h.flags & CAM_DIS_DISCONNECT) {
1249 cto->ct_flags |= CT_NODISC;
1250 }
1251 if (cso->dxfer_len == 0) {
1252 cto->ct_flags |= CT_NO_DATA;
1253 } else if ((cso->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
1254 cto->ct_flags |= CT_DATA_IN;
1255 } else {
1256 cto->ct_flags |= CT_DATA_OUT;
1257 }
1258 if (ccb->ccb_h.flags & CAM_SEND_STATUS) {
1259 cto->ct_flags |= CT_SENDSTATUS|CT_CCINCR;
1260 cto->ct_scsi_status = cso->scsi_status;
1261 cto->ct_resid = cso->resid;
1262 isp_prt(isp, ISP_LOGTDEBUG0,
1263 "CTIO[%x] SCSI STATUS 0x%x resid %d tag_id %x",
1264 cto->ct_fwhandle, cso->scsi_status, cso->resid,
1265 cso->tag_id);
1266 }
1267 ccb->ccb_h.flags &= ~CAM_SEND_SENSE;
1268 cto->ct_timeout = 10;
1269 hp = &cto->ct_syshandle;
1270 }
1271
1272 if (isp_save_xs(isp, (XS_T *)ccb, hp)) {
1273 xpt_print_path(ccb->ccb_h.path);
1274 printf("No XFLIST pointers for isp_target_start_ctio\n");
1275 return (CAM_RESRC_UNAVAIL);
1276 }
1277
1278
1279 /*
1280 * Call the dma setup routines for this entry (and any subsequent
1281 * CTIOs) if there's data to move, and then tell the f/w it's got
1282 * new things to play with. As with isp_start's usage of DMA setup,
1283 * any swizzling is done in the machine dependent layer. Because
1284 * of this, we put the request onto the queue area first in native
1285 * format.
1286 */
1287
1288 save_handle = *hp;
1289
1290 switch (ISP_DMASETUP(isp, cso, (ispreq_t *) local, &nxti, optr)) {
1291 case CMD_QUEUED:
1292 ISP_ADD_REQUEST(isp, nxti);
1293 return (CAM_REQ_INPROG);
1294
1295 case CMD_EAGAIN:
1296 ccb->ccb_h.status = CAM_RESRC_UNAVAIL;
1297 isp_destroy_handle(isp, save_handle);
1298 return (CAM_RESRC_UNAVAIL);
1299
1300 default:
1301 isp_destroy_handle(isp, save_handle);
1302 return (XS_ERR(ccb));
1303 }
1304}
1305
1306static void
1307isp_refire_putback_atio(void *arg)
1308{
1309 int s = splcam();
1310 isp_target_putback_atio(arg);
1311 splx(s);
1312}
1313
1314static void
1315isp_target_putback_atio(union ccb *ccb)
1316{
1317 struct ispsoftc *isp;
1318 struct ccb_scsiio *cso;
1319 u_int16_t nxti, optr;
1320 void *qe;
1321
1322 isp = XS_ISP(ccb);
1323
1324 if (isp_getrqentry(isp, &nxti, &optr, &qe)) {
1325 (void) timeout(isp_refire_putback_atio, ccb, 10);
1326 isp_prt(isp, ISP_LOGWARN,
1327 "isp_target_putback_atio: Request Queue Overflow");
1328 return;
1329 }
1330 bzero(qe, QENTRY_LEN);
1331 cso = &ccb->csio;
1332 if (IS_FC(isp)) {
1333 at2_entry_t local, *at = &local;
1334 MEMZERO(at, sizeof (at2_entry_t));
1335 at->at_header.rqs_entry_type = RQSTYPE_ATIO2;
1336 at->at_header.rqs_entry_count = 1;
1337 if ((FCPARAM(isp)->isp_fwattr & ISP_FW_ATTR_SCCLUN) != 0) {
1338 at->at_scclun = (uint16_t) ccb->ccb_h.target_lun;
1339 } else {
1340 at->at_lun = (uint8_t) ccb->ccb_h.target_lun;
1341 }
1342 at->at_status = CT_OK;
1343 at->at_rxid = cso->tag_id;
1344 at->at_iid = cso->ccb_h.target_id;
1345 isp_put_atio2(isp, at, qe);
1346 } else {
1347 at_entry_t local, *at = &local;
1348 MEMZERO(at, sizeof (at_entry_t));
1349 at->at_header.rqs_entry_type = RQSTYPE_ATIO;
1350 at->at_header.rqs_entry_count = 1;
1351 at->at_iid = cso->init_id;
1352 at->at_iid |= XS_CHANNEL(ccb) << 7;
1353 at->at_tgt = cso->ccb_h.target_id;
1354 at->at_lun = cso->ccb_h.target_lun;
1355 at->at_status = CT_OK;
1356 at->at_tag_val = AT_GET_TAG(cso->tag_id);
1357 at->at_handle = AT_GET_HANDLE(cso->tag_id);
1358 isp_put_atio(isp, at, qe);
1359 }
1360 ISP_TDQE(isp, "isp_target_putback_atio", (int) optr, qe);
1361 ISP_ADD_REQUEST(isp, nxti);
1362 isp_complete_ctio(ccb);
1363}
1364
1365static void
1366isp_complete_ctio(union ccb *ccb)
1367{
1368 if ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_INPROG) {
1369 ccb->ccb_h.status |= CAM_REQ_CMP;
1370 }
1371 ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
1372 xpt_done(ccb);
1373}
1374
1375/*
1376 * Handle ATIO stuff that the generic code can't.
1377 * This means handling CDBs.
1378 */
1379
1380static int
1381isp_handle_platform_atio(struct ispsoftc *isp, at_entry_t *aep)
1382{
1383 tstate_t *tptr;
1384 int status, bus, iswildcard;
1385 struct ccb_accept_tio *atiop;
1386
1387 /*
1388 * The firmware status (except for the QLTM_SVALID bit)
1389 * indicates why this ATIO was sent to us.
1390 *
1391 * If QLTM_SVALID is set, the firware has recommended Sense Data.
1392 *
1393 * If the DISCONNECTS DISABLED bit is set in the flags field,
1394 * we're still connected on the SCSI bus.
1395 */
1396 status = aep->at_status;
1397 if ((status & ~QLTM_SVALID) == AT_PHASE_ERROR) {
1398 /*
1399 * Bus Phase Sequence error. We should have sense data
1400 * suggested by the f/w. I'm not sure quite yet what
1401 * to do about this for CAM.
1402 */
1403 isp_prt(isp, ISP_LOGWARN, "PHASE ERROR");
1404 isp_endcmd(isp, aep, SCSI_STATUS_BUSY, 0);
1405 return (0);
1406 }
1407 if ((status & ~QLTM_SVALID) != AT_CDB) {
1408 isp_prt(isp, ISP_LOGWARN, "bad atio (0x%x) leaked to platform",
1409 status);
1410 isp_endcmd(isp, aep, SCSI_STATUS_BUSY, 0);
1411 return (0);
1412 }
1413
1414 bus = GET_BUS_VAL(aep->at_iid);
1415 tptr = get_lun_statep(isp, bus, aep->at_lun);
1416 if (tptr == NULL) {
1417 tptr = get_lun_statep(isp, bus, CAM_LUN_WILDCARD);
1418 iswildcard = 1;
1419 } else {
1420 iswildcard = 0;
1421 }
1422
1423 if (tptr == NULL) {
1424 /*
1425 * Because we can't autofeed sense data back with
1426 * a command for parallel SCSI, we can't give back
1427 * a CHECK CONDITION. We'll give back a BUSY status
1428 * instead. This works out okay because the only
1429 * time we should, in fact, get this, is in the
1430 * case that somebody configured us without the
1431 * blackhole driver, so they get what they deserve.
1432 */
1433 isp_endcmd(isp, aep, SCSI_STATUS_BUSY, 0);
1434 return (0);
1435 }
1436
1437 atiop = (struct ccb_accept_tio *) SLIST_FIRST(&tptr->atios);
1438 if (atiop == NULL) {
1439 /*
1440 * Because we can't autofeed sense data back with
1441 * a command for parallel SCSI, we can't give back
1442 * a CHECK CONDITION. We'll give back a QUEUE FULL status
1443 * instead. This works out okay because the only time we
1444 * should, in fact, get this, is in the case that we've
1445 * run out of ATIOS.
1446 */
1447 xpt_print_path(tptr->owner);
1448 isp_prt(isp, ISP_LOGWARN,
1449 "no ATIOS for lun %d from initiator %d on channel %d",
1450 aep->at_lun, GET_IID_VAL(aep->at_iid), bus);
1451 if (aep->at_flags & AT_TQAE)
1452 isp_endcmd(isp, aep, SCSI_STATUS_QUEUE_FULL, 0);
1453 else
1454 isp_endcmd(isp, aep, SCSI_STATUS_BUSY, 0);
1455 rls_lun_statep(isp, tptr);
1456 return (0);
1457 }
1458 SLIST_REMOVE_HEAD(&tptr->atios, sim_links.sle);
1459 if (iswildcard) {
1460 atiop->ccb_h.target_id = aep->at_tgt;
1461 atiop->ccb_h.target_lun = aep->at_lun;
1462 }
1463 if (aep->at_flags & AT_NODISC) {
1464 atiop->ccb_h.flags = CAM_DIS_DISCONNECT;
1465 } else {
1466 atiop->ccb_h.flags = 0;
1467 }
1468
1469 if (status & QLTM_SVALID) {
1470 size_t amt = imin(QLTM_SENSELEN, sizeof (atiop->sense_data));
1471 atiop->sense_len = amt;
1472 MEMCPY(&atiop->sense_data, aep->at_sense, amt);
1473 } else {
1474 atiop->sense_len = 0;
1475 }
1476
1477 atiop->init_id = GET_IID_VAL(aep->at_iid);
1478 atiop->cdb_len = aep->at_cdblen;
1479 MEMCPY(atiop->cdb_io.cdb_bytes, aep->at_cdb, aep->at_cdblen);
1480 atiop->ccb_h.status = CAM_CDB_RECVD;
1481 /*
1482 * Construct a tag 'id' based upon tag value (which may be 0..255)
1483 * and the handle (which we have to preserve).
1484 */
1485 AT_MAKE_TAGID(atiop->tag_id, aep);
1486 if (aep->at_flags & AT_TQAE) {
1487 atiop->tag_action = aep->at_tag_type;
1488 atiop->ccb_h.status |= CAM_TAG_ACTION_VALID;
1489 }
1490 xpt_done((union ccb*)atiop);
1491 isp_prt(isp, ISP_LOGTDEBUG0,
1492 "ATIO[%x] CDB=0x%x bus %d iid%d->lun%d tag 0x%x ttype 0x%x %s",
1493 aep->at_handle, aep->at_cdb[0] & 0xff, GET_BUS_VAL(aep->at_iid),
1494 GET_IID_VAL(aep->at_iid), aep->at_lun, aep->at_tag_val & 0xff,
1495 aep->at_tag_type, (aep->at_flags & AT_NODISC)?
1496 "nondisc" : "disconnecting");
1497 rls_lun_statep(isp, tptr);
1498 return (0);
1499}
1500
1501static int
1502isp_handle_platform_atio2(struct ispsoftc *isp, at2_entry_t *aep)
1503{
1504 lun_id_t lun;
1505 tstate_t *tptr;
1506 struct ccb_accept_tio *atiop;
1507 atio_private_data_t *atp;
1508
1509 /*
1510 * The firmware status (except for the QLTM_SVALID bit)
1511 * indicates why this ATIO was sent to us.
1512 *
1513 * If QLTM_SVALID is set, the firware has recommended Sense Data.
1514 */
1515 if ((aep->at_status & ~QLTM_SVALID) != AT_CDB) {
1516 isp_prt(isp, ISP_LOGWARN,
1517 "bogus atio (0x%x) leaked to platform", aep->at_status);
1518 isp_endcmd(isp, aep, SCSI_STATUS_BUSY, 0);
1519 return (0);
1520 }
1521
1522 if ((FCPARAM(isp)->isp_fwattr & ISP_FW_ATTR_SCCLUN) != 0) {
1523 lun = aep->at_scclun;
1524 } else {
1525 lun = aep->at_lun;
1526 }
1527 tptr = get_lun_statep(isp, 0, lun);
1528 if (tptr == NULL) {
1529 isp_prt(isp, ISP_LOGWARN, "no state pointer for lun %d", lun);
1530 tptr = get_lun_statep(isp, 0, CAM_LUN_WILDCARD);
1531 }
1532
1533 if (tptr == NULL) {
1534 /*
1535 * What we'd like to know is whether or not we have a listener
1536 * upstream that really hasn't configured yet. If we do, then
1537 * we can give a more sensible reply here. If not, then we can
1538 * reject this out of hand.
1539 *
1540 * Choices for what to send were
1541 *
1542 * Not Ready, Unit Not Self-Configured Yet
1543 * (0x2,0x3e,0x00)
1544 *
1545 * for the former and
1546 *
1547 * Illegal Request, Logical Unit Not Supported
1548 * (0x5,0x25,0x00)
1549 *
1550 * for the latter.
1551 *
1552 * We used to decide whether there was at least one listener
1553 * based upon whether the black hole driver was configured.
1554 * However, recent config(8) changes have made this hard to do
1555 * at this time.
1556 *
1557 */
1558 isp_endcmd(isp, aep, SCSI_STATUS_BUSY, 0);
1559 return (0);
1560 }
1561
1562 atp = isp_get_atpd(isp, 0);
1563 atiop = (struct ccb_accept_tio *) SLIST_FIRST(&tptr->atios);
1564 if (atiop == NULL || atp == NULL) {
1565 /*
1566 * Because we can't autofeed sense data back with
1567 * a command for parallel SCSI, we can't give back
1568 * a CHECK CONDITION. We'll give back a QUEUE FULL status
1569 * instead. This works out okay because the only time we
1570 * should, in fact, get this, is in the case that we've
1571 * run out of ATIOS.
1572 */
1573 xpt_print_path(tptr->owner);
1574 isp_prt(isp, ISP_LOGWARN,
1575 "no %s for lun %d from initiator %d",
1576 (atp == NULL && atiop == NULL)? "ATIO2s *or* ATPS" :
1577 ((atp == NULL)? "ATPs" : "ATIO2s"), lun, aep->at_iid);
1578 rls_lun_statep(isp, tptr);
1579 isp_endcmd(isp, aep, SCSI_STATUS_QUEUE_FULL, 0);
1580 return (0);
1581 }
1582 atp->state = ATPD_STATE_ATIO;
1583 SLIST_REMOVE_HEAD(&tptr->atios, sim_links.sle);
1584 tptr->atio_count--;
1585 isp_prt(isp, ISP_LOGTDEBUG0, "Take FREE ATIO2 lun %d, count now %d",
1586 lun, tptr->atio_count);
1587
1588 if (tptr == &isp->isp_osinfo.tsdflt[0]) {
1589 atiop->ccb_h.target_id =
1590 ((fcparam *)isp->isp_param)->isp_loopid;
1591 atiop->ccb_h.target_lun = lun;
1592 }
1593 /*
1594 * We don't get 'suggested' sense data as we do with SCSI cards.
1595 */
1596 atiop->sense_len = 0;
1597
1598 atiop->init_id = aep->at_iid;
1599 atiop->cdb_len = ATIO2_CDBLEN;
1600 MEMCPY(atiop->cdb_io.cdb_bytes, aep->at_cdb, ATIO2_CDBLEN);
1601 atiop->ccb_h.status = CAM_CDB_RECVD;
1602 atiop->tag_id = aep->at_rxid;
1603 switch (aep->at_taskflags & ATIO2_TC_ATTR_MASK) {
1604 case ATIO2_TC_ATTR_SIMPLEQ:
1605 atiop->tag_action = MSG_SIMPLE_Q_TAG;
1606 break;
1607 case ATIO2_TC_ATTR_HEADOFQ:
1608 atiop->tag_action = MSG_HEAD_OF_Q_TAG;
1609 break;
1610 case ATIO2_TC_ATTR_ORDERED:
1611 atiop->tag_action = MSG_ORDERED_Q_TAG;
1612 break;
1613 case ATIO2_TC_ATTR_ACAQ: /* ?? */
1614 case ATIO2_TC_ATTR_UNTAGGED:
1615 default:
1616 atiop->tag_action = 0;
1617 break;
1618 }
1619 atiop->ccb_h.flags = CAM_TAG_ACTION_VALID;
1620
1621 atp->tag = atiop->tag_id;
1622 atp->lun = lun;
1623 atp->orig_datalen = aep->at_datalen;
1624 atp->last_xframt = 0;
1625 atp->bytes_xfered = 0;
1626 atp->state = ATPD_STATE_CAM;
1627 xpt_done((union ccb*)atiop);
1628
1629 isp_prt(isp, ISP_LOGTDEBUG0,
1630 "ATIO2[%x] CDB=0x%x iid%d->lun%d tattr 0x%x datalen %u",
1631 aep->at_rxid, aep->at_cdb[0] & 0xff, aep->at_iid,
1632 lun, aep->at_taskflags, aep->at_datalen);
1633 rls_lun_statep(isp, tptr);
1634 return (0);
1635}
1636
1637static int
1638isp_handle_platform_ctio(struct ispsoftc *isp, void *arg)
1639{
1640 union ccb *ccb;
1641 int sentstatus, ok, notify_cam, resid = 0;
1642 u_int16_t tval;
1643
1644 /*
1645 * CTIO and CTIO2 are close enough....
1646 */
1647
1648 ccb = (union ccb *) isp_find_xs(isp, ((ct_entry_t *)arg)->ct_syshandle);
1649 KASSERT((ccb != NULL), ("null ccb in isp_handle_platform_ctio"));
1650 isp_destroy_handle(isp, ((ct_entry_t *)arg)->ct_syshandle);
1651
1652 if (IS_FC(isp)) {
1653 ct2_entry_t *ct = arg;
1654 atio_private_data_t *atp = isp_get_atpd(isp, ct->ct_rxid);
1655 if (atp == NULL) {
1656 isp_prt(isp, ISP_LOGERR,
1657 "cannot find adjunct for %x after I/O",
1658 ct->ct_rxid);
1659 return (0);
1660 }
1661 sentstatus = ct->ct_flags & CT2_SENDSTATUS;
1662 ok = (ct->ct_status & ~QLTM_SVALID) == CT_OK;
1663 if (ok && sentstatus && (ccb->ccb_h.flags & CAM_SEND_SENSE)) {
1664 ccb->ccb_h.status |= CAM_SENT_SENSE;
1665 }
1666 notify_cam = ct->ct_header.rqs_seqno & 0x1;
1667 if ((ct->ct_flags & CT2_DATAMASK) != CT2_NO_DATA) {
1668 resid = ct->ct_resid;
1669 atp->bytes_xfered += (atp->last_xframt - resid);
1670 atp->last_xframt = 0;
1671 }
1672 if (sentstatus || !ok) {
1673 atp->tag = 0;
1674 }
1675 isp_prt(isp, ok? ISP_LOGTDEBUG0 : ISP_LOGWARN,
1676 "CTIO2[%x] sts 0x%x flg 0x%x sns %d resid %d %s",
1677 ct->ct_rxid, ct->ct_status, ct->ct_flags,
1678 (ccb->ccb_h.status & CAM_SENT_SENSE) != 0,
1679 resid, sentstatus? "FIN" : "MID");
1680 tval = ct->ct_rxid;
1681
1682 /* XXX: should really come after isp_complete_ctio */
1683 atp->state = ATPD_STATE_PDON;
1684 } else {
1685 ct_entry_t *ct = arg;
1686 sentstatus = ct->ct_flags & CT_SENDSTATUS;
1687 ok = (ct->ct_status & ~QLTM_SVALID) == CT_OK;
1688 /*
1689 * We *ought* to be able to get back to the original ATIO
1690 * here, but for some reason this gets lost. It's just as
1691 * well because it's squirrelled away as part of periph
1692 * private data.
1693 *
1694 * We can live without it as long as we continue to use
1695 * the auto-replenish feature for CTIOs.
1696 */
1697 notify_cam = ct->ct_header.rqs_seqno & 0x1;
1698 if (ct->ct_status & QLTM_SVALID) {
1699 char *sp = (char *)ct;
1700 sp += CTIO_SENSE_OFFSET;
1701 ccb->csio.sense_len =
1702 min(sizeof (ccb->csio.sense_data), QLTM_SENSELEN);
1703 MEMCPY(&ccb->csio.sense_data, sp, ccb->csio.sense_len);
1704 ccb->ccb_h.status |= CAM_AUTOSNS_VALID;
1705 }
1706 if ((ct->ct_flags & CT_DATAMASK) != CT_NO_DATA) {
1707 resid = ct->ct_resid;
1708 }
1709 isp_prt(isp, ISP_LOGTDEBUG0,
1710 "CTIO[%x] tag %x iid %d lun %d sts %x flg %x resid %d %s",
1711 ct->ct_fwhandle, ct->ct_tag_val, ct->ct_iid, ct->ct_lun,
1712 ct->ct_status, ct->ct_flags, resid,
1713 sentstatus? "FIN" : "MID");
1714 tval = ct->ct_fwhandle;
1715 }
1716 ccb->csio.resid += resid;
1717
1718 /*
1719 * We're here either because intermediate data transfers are done
1720 * and/or the final status CTIO (which may have joined with a
1721 * Data Transfer) is done.
1722 *
1723 * In any case, for this platform, the upper layers figure out
1724 * what to do next, so all we do here is collect status and
1725 * pass information along. Any DMA handles have already been
1726 * freed.
1727 */
1728 if (notify_cam == 0) {
1729 isp_prt(isp, ISP_LOGTDEBUG0, " INTER CTIO[0x%x] done", tval);
1730 return (0);
1731 }
1732
1733 isp_prt(isp, ISP_LOGTDEBUG0, "%s CTIO[0x%x] done",
1734 (sentstatus)? " FINAL " : "MIDTERM ", tval);
1735
1736 if (!ok) {
1737 isp_target_putback_atio(ccb);
1738 } else {
1739 isp_complete_ctio(ccb);
1740
1741 }
1742 return (0);
1743}
1744
1745static int
1746isp_handle_platform_notify_scsi(struct ispsoftc *isp, in_entry_t *inp)
1747{
1748 return (0); /* XXXX */
1749}
1750
1751static int
1752isp_handle_platform_notify_fc(struct ispsoftc *isp, in_fcentry_t *inp)
1753{
1754
1755 switch (inp->in_status) {
1756 case IN_PORT_LOGOUT:
1757 isp_prt(isp, ISP_LOGWARN, "port logout of iid %d",
1758 inp->in_iid);
1759 break;
1760 case IN_PORT_CHANGED:
1761 isp_prt(isp, ISP_LOGWARN, "port changed for iid %d",
1762 inp->in_iid);
1763 break;
1764 case IN_GLOBAL_LOGO:
1765 isp_prt(isp, ISP_LOGINFO, "all ports logged out");
1766 break;
1767 case IN_ABORT_TASK:
1768 {
1769 atio_private_data_t *atp = isp_get_atpd(isp, inp->in_seqid);
1770 struct ccb_immed_notify *inot = NULL;
1771
1772 if (atp) {
1773 tstate_t *tptr = get_lun_statep(isp, 0, atp->lun);
1774 if (tptr) {
1775 inot = (struct ccb_immed_notify *)
1776 SLIST_FIRST(&tptr->inots);
1777 if (inot) {
1778 SLIST_REMOVE_HEAD(&tptr->inots,
1779 sim_links.sle);
1780 }
1781 }
1782 isp_prt(isp, ISP_LOGWARN,
1783 "abort task RX_ID %x IID %d state %d",
1784 inp->in_seqid, inp->in_iid, atp->state);
1785 } else {
1786 isp_prt(isp, ISP_LOGWARN,
1787 "abort task RX_ID %x from iid %d, state unknown",
1788 inp->in_seqid, inp->in_iid);
1789 }
1790 if (inot) {
1791 inot->initiator_id = inp->in_iid;
1792 inot->sense_len = 0;
1793 inot->message_args[0] = MSG_ABORT_TAG;
1794 inot->message_args[1] = inp->in_seqid & 0xff;
1795 inot->message_args[2] = (inp->in_seqid >> 8) & 0xff;
1796 inot->ccb_h.status = CAM_MESSAGE_RECV|CAM_DEV_QFRZN;
1797 xpt_done((union ccb *)inot);
1798 }
1799 break;
1800 }
1801 default:
1802 break;
1803 }
1804 return (0);
1805}
1806#endif
1807
1808static void
1809isp_cam_async(void *cbarg, u_int32_t code, struct cam_path *path, void *arg)
1810{
1811 struct cam_sim *sim;
1812 struct ispsoftc *isp;
1813
1814 sim = (struct cam_sim *)cbarg;
1815 isp = (struct ispsoftc *) cam_sim_softc(sim);
1816 switch (code) {
1817 case AC_LOST_DEVICE:
1818 if (IS_SCSI(isp)) {
1819 u_int16_t oflags, nflags;
1820 sdparam *sdp = isp->isp_param;
1821 int tgt;
1822
1823 tgt = xpt_path_target_id(path);
1824 if (tgt >= 0) {
1825 sdp += cam_sim_bus(sim);
1826 ISP_LOCK(isp);
1827 nflags = sdp->isp_devparam[tgt].nvrm_flags;
1828#ifndef ISP_TARGET_MODE
1829 nflags &= DPARM_SAFE_DFLT;
1830 if (isp->isp_loaded_fw) {
1831 nflags |= DPARM_NARROW | DPARM_ASYNC;
1832 }
1833#else
1834 nflags = DPARM_DEFAULT;
1835#endif
1836 oflags = sdp->isp_devparam[tgt].goal_flags;
1837 sdp->isp_devparam[tgt].goal_flags = nflags;
1838 sdp->isp_devparam[tgt].dev_update = 1;
1839 isp->isp_update |= (1 << cam_sim_bus(sim));
1840 (void) isp_control(isp,
1841 ISPCTL_UPDATE_PARAMS, NULL);
1842 sdp->isp_devparam[tgt].goal_flags = oflags;
1843 ISP_UNLOCK(isp);
1844 }
1845 }
1846 break;
1847 default:
1848 isp_prt(isp, ISP_LOGWARN, "isp_cam_async: Code 0x%x", code);
1849 break;
1850 }
1851}
1852
1853static void
1854isp_poll(struct cam_sim *sim)
1855{
1856 struct ispsoftc *isp = cam_sim_softc(sim);
1857 u_int16_t isr, sema, mbox;
1858
1859 ISP_LOCK(isp);
1860 if (ISP_READ_ISR(isp, &isr, &sema, &mbox)) {
1861 isp_intr(isp, isr, sema, mbox);
1862 }
1863 ISP_UNLOCK(isp);
1864}
1865
1866
1867static void
1868isp_watchdog(void *arg)
1869{
1870 XS_T *xs = arg;
1871 struct ispsoftc *isp = XS_ISP(xs);
1872 u_int32_t handle;
1873 int iok;
1874
1875 /*
1876 * We've decided this command is dead. Make sure we're not trying
1877 * to kill a command that's already dead by getting it's handle and
1878 * and seeing whether it's still alive.
1879 */
1880 ISP_LOCK(isp);
1881 iok = isp->isp_osinfo.intsok;
1882 isp->isp_osinfo.intsok = 0;
1883 handle = isp_find_handle(isp, xs);
1884 if (handle) {
1885 u_int16_t isr, sema, mbox;
1886
1887 if (XS_CMD_DONE_P(xs)) {
1888 isp_prt(isp, ISP_LOGDEBUG1,
1889 "watchdog found done cmd (handle 0x%x)", handle);
1890 ISP_UNLOCK(isp);
1891 return;
1892 }
1893
1894 if (XS_CMD_WDOG_P(xs)) {
1895 isp_prt(isp, ISP_LOGDEBUG2,
1896 "recursive watchdog (handle 0x%x)", handle);
1897 ISP_UNLOCK(isp);
1898 return;
1899 }
1900
1901 XS_CMD_S_WDOG(xs);
1902 if (ISP_READ_ISR(isp, &isr, &sema, &mbox)) {
1903 isp_intr(isp, isr, sema, mbox);
1904 }
1905 if (XS_CMD_DONE_P(xs)) {
1906 isp_prt(isp, ISP_LOGDEBUG2,
1907 "watchdog cleanup for handle 0x%x", handle);
1908 xpt_done((union ccb *) xs);
1909 } else if (XS_CMD_GRACE_P(xs)) {
1910 /*
1911 * Make sure the command is *really* dead before we
1912 * release the handle (and DMA resources) for reuse.
1913 */
1914 (void) isp_control(isp, ISPCTL_ABORT_CMD, arg);
1915
1916 /*
1917 * After this point, the comamnd is really dead.
1918 */
1919 if (XS_XFRLEN(xs)) {
1920 ISP_DMAFREE(isp, xs, handle);
1921 }
1922 isp_destroy_handle(isp, handle);
1923 xpt_print_path(xs->ccb_h.path);
1924 isp_prt(isp, ISP_LOGWARN,
1925 "watchdog timeout for handle 0x%x", handle);
1926 XS_SETERR(xs, CAM_CMD_TIMEOUT);
1927 XS_CMD_C_WDOG(xs);
1928 isp_done(xs);
1929 } else {
1930 u_int16_t nxti, optr;
1931 ispreq_t local, *mp= &local, *qe;
1932
1933 XS_CMD_C_WDOG(xs);
1934 xs->ccb_h.timeout_ch = timeout(isp_watchdog, xs, hz);
1935 if (isp_getrqentry(isp, &nxti, &optr, (void **) &qe)) {
1936 ISP_UNLOCK(isp);
1937 return;
1938 }
1939 XS_CMD_S_GRACE(xs);
1940 MEMZERO((void *) mp, sizeof (*mp));
1941 mp->req_header.rqs_entry_count = 1;
1942 mp->req_header.rqs_entry_type = RQSTYPE_MARKER;
1943 mp->req_modifier = SYNC_ALL;
1944 mp->req_target = XS_CHANNEL(xs) << 7;
1945 isp_put_request(isp, mp, qe);
1946 ISP_ADD_REQUEST(isp, nxti);
1947 }
1948 } else {
1949 isp_prt(isp, ISP_LOGDEBUG2, "watchdog with no command");
1950 }
1951 isp->isp_osinfo.intsok = iok;
1952 ISP_UNLOCK(isp);
1953}
1954
1955static void
1956isp_kthread(void *arg)
1957{
1958 struct ispsoftc *isp = arg;
1959
1960#ifdef ISP_SMPLOCK
1961 mtx_lock(&isp->isp_lock);
1962#else
1963 mtx_lock(&Giant);
1964#endif
1965 /*
1966 * The first loop is for our usage where we have yet to have
1967 * gotten good fibre channel state.
1968 */
1969 for (;;) {
1970 int wasfrozen;
1971
1972 isp_prt(isp, ISP_LOGDEBUG0, "kthread: checking FC state");
1973 while (isp_fc_runstate(isp, 2 * 1000000) != 0) {
1974 isp_prt(isp, ISP_LOGDEBUG0, "kthread: FC state ungood");
1975 if (FCPARAM(isp)->isp_fwstate != FW_READY ||
1976 FCPARAM(isp)->isp_loopstate < LOOP_PDB_RCVD) {
1977 if (FCPARAM(isp)->loop_seen_once == 0 ||
1978 isp->isp_osinfo.ktmature == 0) {
1979 break;
1980 }
1981 }
1982#ifdef ISP_SMPLOCK
1983 msleep(isp_kthread, &isp->isp_lock,
1984 PRIBIO, "isp_fcthrd", hz);
1985#else
1986 (void) tsleep(isp_kthread, PRIBIO, "isp_fcthrd", hz);
1987#endif
1988 }
1989
1990 /*
1991 * Even if we didn't get good loop state we may be
1992 * unfreezing the SIMQ so that we can kill off
1993 * commands (if we've never seen loop before, for example).
1994 */
1995 isp->isp_osinfo.ktmature = 1;
1996 wasfrozen = isp->isp_osinfo.simqfrozen & SIMQFRZ_LOOPDOWN;
1997 isp->isp_osinfo.simqfrozen &= ~SIMQFRZ_LOOPDOWN;
1998 if (wasfrozen && isp->isp_osinfo.simqfrozen == 0) {
1999 isp_prt(isp, ISP_LOGDEBUG0, "kthread: releasing simq");
2000 ISPLOCK_2_CAMLOCK(isp);
2001 xpt_release_simq(isp->isp_sim, 1);
2002 CAMLOCK_2_ISPLOCK(isp);
2003 }
2004 isp_prt(isp, ISP_LOGDEBUG0, "kthread: waiting until called");
2005#ifdef ISP_SMPLOCK
2006 cv_wait(&isp->isp_osinfo.kthread_cv, &isp->isp_lock);
2007#else
2008 (void) tsleep(&isp->isp_osinfo.kthread_cv, PRIBIO, "fc_cv", 0);
2009#endif
2010 }
2011}
2012
2013static void
2014isp_action(struct cam_sim *sim, union ccb *ccb)
2015{
2016 int bus, tgt, error;
2017 struct ispsoftc *isp;
2018 struct ccb_trans_settings *cts;
2019
2020 CAM_DEBUG(ccb->ccb_h.path, CAM_DEBUG_TRACE, ("isp_action\n"));
2021
2022 isp = (struct ispsoftc *)cam_sim_softc(sim);
2023 ccb->ccb_h.sim_priv.entries[0].field = 0;
2024 ccb->ccb_h.sim_priv.entries[1].ptr = isp;
2025 if (isp->isp_state != ISP_RUNSTATE &&
2026 ccb->ccb_h.func_code == XPT_SCSI_IO) {
2027 CAMLOCK_2_ISPLOCK(isp);
2028 isp_init(isp);
2029 if (isp->isp_state != ISP_INITSTATE) {
2030 ISP_UNLOCK(isp);
2031 /*
2032 * Lie. Say it was a selection timeout.
2033 */
2034 ccb->ccb_h.status = CAM_SEL_TIMEOUT | CAM_DEV_QFRZN;
2035 xpt_freeze_devq(ccb->ccb_h.path, 1);
2036 xpt_done(ccb);
2037 return;
2038 }
2039 isp->isp_state = ISP_RUNSTATE;
2040 ISPLOCK_2_CAMLOCK(isp);
2041 }
2042 isp_prt(isp, ISP_LOGDEBUG2, "isp_action code %x", ccb->ccb_h.func_code);
2043
2044
2045 switch (ccb->ccb_h.func_code) {
2046 case XPT_SCSI_IO: /* Execute the requested I/O operation */
2047 /*
2048 * Do a couple of preliminary checks...
2049 */
2050 if ((ccb->ccb_h.flags & CAM_CDB_POINTER) != 0) {
2051 if ((ccb->ccb_h.flags & CAM_CDB_PHYS) != 0) {
2052 ccb->ccb_h.status = CAM_REQ_INVALID;
2053 xpt_done(ccb);
2054 break;
2055 }
2056 }
2057#ifdef DIAGNOSTIC
2058 if (ccb->ccb_h.target_id > (ISP_MAX_TARGETS(isp) - 1)) {
2059 ccb->ccb_h.status = CAM_PATH_INVALID;
2060 } else if (ccb->ccb_h.target_lun > (ISP_MAX_LUNS(isp) - 1)) {
2061 ccb->ccb_h.status = CAM_PATH_INVALID;
2062 }
2063 if (ccb->ccb_h.status == CAM_PATH_INVALID) {
2064 isp_prt(isp, ISP_LOGERR,
2065 "invalid tgt/lun (%d.%d) in XPT_SCSI_IO",
2066 ccb->ccb_h.target_id, ccb->ccb_h.target_lun);
2067 xpt_done(ccb);
2068 break;
2069 }
2070#endif
2071 ((struct ccb_scsiio *) ccb)->scsi_status = SCSI_STATUS_OK;
2072 CAMLOCK_2_ISPLOCK(isp);
2073 error = isp_start((XS_T *) ccb);
2074 switch (error) {
2075 case CMD_QUEUED:
2076 ccb->ccb_h.status |= CAM_SIM_QUEUED;
2077 if (ccb->ccb_h.timeout != CAM_TIME_INFINITY) {
2078 u_int64_t ticks = (u_int64_t) hz;
2079 if (ccb->ccb_h.timeout == CAM_TIME_DEFAULT)
2080 ticks = 60 * 1000 * ticks;
2081 else
2082 ticks = ccb->ccb_h.timeout * hz;
2083 ticks = ((ticks + 999) / 1000) + hz + hz;
2084 if (ticks >= 0x80000000) {
2085 isp_prt(isp, ISP_LOGERR,
2086 "timeout overflow");
2087 ticks = 0x7fffffff;
2088 }
2089 ccb->ccb_h.timeout_ch = timeout(isp_watchdog,
2090 (caddr_t)ccb, (int)ticks);
2091 } else {
2092 callout_handle_init(&ccb->ccb_h.timeout_ch);
2093 }
2094 ISPLOCK_2_CAMLOCK(isp);
2095 break;
2096 case CMD_RQLATER:
2097 /*
2098 * This can only happen for Fibre Channel
2099 */
2100 KASSERT((IS_FC(isp)), ("CMD_RQLATER for FC only"));
2101 if (FCPARAM(isp)->loop_seen_once == 0 &&
2102 isp->isp_osinfo.ktmature) {
2103 ISPLOCK_2_CAMLOCK(isp);
2104 XS_SETERR(ccb, CAM_SEL_TIMEOUT);
2105 xpt_done(ccb);
2106 break;
2107 }
2108#ifdef ISP_SMPLOCK
2109 cv_signal(&isp->isp_osinfo.kthread_cv);
2110#else
2111 wakeup(&isp->isp_osinfo.kthread_cv);
2112#endif
2113 isp_freeze_loopdown(isp, "isp_action(RQLATER)");
2114 XS_SETERR(ccb, CAM_REQUEUE_REQ);
2115 ISPLOCK_2_CAMLOCK(isp);
2116 xpt_done(ccb);
2117 break;
2118 case CMD_EAGAIN:
2119 XS_SETERR(ccb, CAM_REQUEUE_REQ);
2120 ISPLOCK_2_CAMLOCK(isp);
2121 xpt_done(ccb);
2122 break;
2123 case CMD_COMPLETE:
2124 isp_done((struct ccb_scsiio *) ccb);
2125 ISPLOCK_2_CAMLOCK(isp);
2126 break;
2127 default:
2128 isp_prt(isp, ISP_LOGERR,
2129 "What's this? 0x%x at %d in file %s",
2130 error, __LINE__, __FILE__);
2131 XS_SETERR(ccb, CAM_REQ_CMP_ERR);
2132 xpt_done(ccb);
2133 ISPLOCK_2_CAMLOCK(isp);
2134 }
2135 break;
2136
2137#ifdef ISP_TARGET_MODE
2138 case XPT_EN_LUN: /* Enable LUN as a target */
2139 {
2140 int iok;
2141 CAMLOCK_2_ISPLOCK(isp);
2142 iok = isp->isp_osinfo.intsok;
2143 isp->isp_osinfo.intsok = 0;
2144 isp_en_lun(isp, ccb);
2145 isp->isp_osinfo.intsok = iok;
2146 ISPLOCK_2_CAMLOCK(isp);
2147 xpt_done(ccb);
2148 break;
2149 }
2150 case XPT_NOTIFY_ACK: /* recycle notify ack */
2151 case XPT_IMMED_NOTIFY: /* Add Immediate Notify Resource */
2152 case XPT_ACCEPT_TARGET_IO: /* Add Accept Target IO Resource */
2153 {
2154 tstate_t *tptr =
2155 get_lun_statep(isp, XS_CHANNEL(ccb), ccb->ccb_h.target_lun);
2156 if (tptr == NULL) {
2157 ccb->ccb_h.status = CAM_LUN_INVALID;
2158 xpt_done(ccb);
2159 break;
2160 }
2161 ccb->ccb_h.sim_priv.entries[0].field = 0;
2162 ccb->ccb_h.sim_priv.entries[1].ptr = isp;
2163 ccb->ccb_h.flags = 0;
2164
2165 CAMLOCK_2_ISPLOCK(isp);
2166 if (ccb->ccb_h.func_code == XPT_ACCEPT_TARGET_IO) {
2167 /*
2168 * Note that the command itself may not be done-
2169 * it may not even have had the first CTIO sent.
2170 */
2171 tptr->atio_count++;
2172 isp_prt(isp, ISP_LOGTDEBUG0,
2173 "Put FREE ATIO2, lun %d, count now %d",
2174 ccb->ccb_h.target_lun, tptr->atio_count);
2175 SLIST_INSERT_HEAD(&tptr->atios, &ccb->ccb_h,
2176 sim_links.sle);
2177 } else if (ccb->ccb_h.func_code == XPT_IMMED_NOTIFY) {
2178 SLIST_INSERT_HEAD(&tptr->inots, &ccb->ccb_h,
2179 sim_links.sle);
2180 } else {
2181 ;
2182 }
2183 rls_lun_statep(isp, tptr);
2184 ccb->ccb_h.status = CAM_REQ_INPROG;
2185 ISPLOCK_2_CAMLOCK(isp);
2186 break;
2187 }
2188 case XPT_CONT_TARGET_IO:
2189 {
2190 CAMLOCK_2_ISPLOCK(isp);
2191 ccb->ccb_h.status = isp_target_start_ctio(isp, ccb);
2192 if (ccb->ccb_h.status != CAM_REQ_INPROG) {
2193 isp_prt(isp, ISP_LOGWARN,
2194 "XPT_CONT_TARGET_IO: status 0x%x",
2195 ccb->ccb_h.status);
2196 XS_SETERR(ccb, CAM_REQUEUE_REQ);
2197 ISPLOCK_2_CAMLOCK(isp);
2198 xpt_done(ccb);
2199 } else {
2200 ISPLOCK_2_CAMLOCK(isp);
2201 ccb->ccb_h.status |= CAM_SIM_QUEUED;
2202 }
2203 break;
2204 }
2205#endif
2206 case XPT_RESET_DEV: /* BDR the specified SCSI device */
2207
2208 bus = cam_sim_bus(xpt_path_sim(ccb->ccb_h.path));
2209 tgt = ccb->ccb_h.target_id;
2210 tgt |= (bus << 16);
2211
2212 CAMLOCK_2_ISPLOCK(isp);
2213 error = isp_control(isp, ISPCTL_RESET_DEV, &tgt);
2214 ISPLOCK_2_CAMLOCK(isp);
2215 if (error) {
2216 ccb->ccb_h.status = CAM_REQ_CMP_ERR;
2217 } else {
2218 ccb->ccb_h.status = CAM_REQ_CMP;
2219 }
2220 xpt_done(ccb);
2221 break;
2222 case XPT_ABORT: /* Abort the specified CCB */
2223 {
2224 union ccb *accb = ccb->cab.abort_ccb;
2225 CAMLOCK_2_ISPLOCK(isp);
2226 switch (accb->ccb_h.func_code) {
2227#ifdef ISP_TARGET_MODE
2228 case XPT_ACCEPT_TARGET_IO:
2229 case XPT_IMMED_NOTIFY:
2230 ccb->ccb_h.status = isp_abort_tgt_ccb(isp, ccb);
2231 break;
2232 case XPT_CONT_TARGET_IO:
2233 isp_prt(isp, ISP_LOGERR, "cannot abort CTIOs yet");
2234 ccb->ccb_h.status = CAM_UA_ABORT;
2235 break;
2236#endif
2237 case XPT_SCSI_IO:
2238 error = isp_control(isp, ISPCTL_ABORT_CMD, ccb);
2239 if (error) {
2240 ccb->ccb_h.status = CAM_UA_ABORT;
2241 } else {
2242 ccb->ccb_h.status = CAM_REQ_CMP;
2243 }
2244 break;
2245 default:
2246 ccb->ccb_h.status = CAM_REQ_INVALID;
2247 break;
2248 }
2249 ISPLOCK_2_CAMLOCK(isp);
2250 xpt_done(ccb);
2251 break;
2252 }
2253#ifdef CAM_NEW_TRAN_CODE
2254#define IS_CURRENT_SETTINGS(c) (c->type == CTS_TYPE_CURRENT_SETTINGS)
2255#else
2256#define IS_CURRENT_SETTINGS(c) (c->flags & CCB_TRANS_CURRENT_SETTINGS)
2257#endif
2258 case XPT_SET_TRAN_SETTINGS: /* Nexus Settings */
2259 cts = &ccb->cts;
2260 if (!IS_CURRENT_SETTINGS(cts)) {
2261 ccb->ccb_h.status = CAM_REQ_INVALID;
2262 xpt_done(ccb);
2263 break;
2264 }
2265 tgt = cts->ccb_h.target_id;
2266 CAMLOCK_2_ISPLOCK(isp);
2267 if (IS_SCSI(isp)) {
2268#ifndef CAM_NEW_TRAN_CODE
2269 sdparam *sdp = isp->isp_param;
2270 u_int16_t *dptr;
2271
2272 bus = cam_sim_bus(xpt_path_sim(cts->ccb_h.path));
2273
2274 sdp += bus;
2275 /*
2276 * We always update (internally) from goal_flags
2277 * so any request to change settings just gets
2278 * vectored to that location.
2279 */
2280 dptr = &sdp->isp_devparam[tgt].goal_flags;
2281
2282 /*
2283 * Note that these operations affect the
2284 * the goal flags (goal_flags)- not
2285 * the current state flags. Then we mark
2286 * things so that the next operation to
2287 * this HBA will cause the update to occur.
2288 */
2289 if (cts->valid & CCB_TRANS_DISC_VALID) {
2290 if ((cts->flags & CCB_TRANS_DISC_ENB) != 0) {
2291 *dptr |= DPARM_DISC;
2292 } else {
2293 *dptr &= ~DPARM_DISC;
2294 }
2295 }
2296 if (cts->valid & CCB_TRANS_TQ_VALID) {
2297 if ((cts->flags & CCB_TRANS_TAG_ENB) != 0) {
2298 *dptr |= DPARM_TQING;
2299 } else {
2300 *dptr &= ~DPARM_TQING;
2301 }
2302 }
2303 if (cts->valid & CCB_TRANS_BUS_WIDTH_VALID) {
2304 switch (cts->bus_width) {
2305 case MSG_EXT_WDTR_BUS_16_BIT:
2306 *dptr |= DPARM_WIDE;
2307 break;
2308 default:
2309 *dptr &= ~DPARM_WIDE;
2310 }
2311 }
2312 /*
2313 * Any SYNC RATE of nonzero and SYNC_OFFSET
2314 * of nonzero will cause us to go to the
2315 * selected (from NVRAM) maximum value for
2316 * this device. At a later point, we'll
2317 * allow finer control.
2318 */
2319 if ((cts->valid & CCB_TRANS_SYNC_RATE_VALID) &&
2320 (cts->valid & CCB_TRANS_SYNC_OFFSET_VALID) &&
2321 (cts->sync_offset > 0)) {
2322 *dptr |= DPARM_SYNC;
2323 } else {
2324 *dptr &= ~DPARM_SYNC;
2325 }
2326 *dptr |= DPARM_SAFE_DFLT;
2327#else
2328 struct ccb_trans_settings_scsi *scsi =
2329 &cts->proto_specific.scsi;
2330 struct ccb_trans_settings_spi *spi =
2331 &cts->xport_specific.spi;
2332 sdparam *sdp = isp->isp_param;
2333 u_int16_t *dptr;
2334
2335 bus = cam_sim_bus(xpt_path_sim(cts->ccb_h.path));
2336 sdp += bus;
2337 /*
2338 * We always update (internally) from goal_flags
2339 * so any request to change settings just gets
2340 * vectored to that location.
2341 */
2342 dptr = &sdp->isp_devparam[tgt].goal_flags;
2343
2344 if ((spi->valid & CTS_SPI_VALID_DISC) != 0) {
2345 if ((spi->flags & CTS_SPI_FLAGS_DISC_ENB) != 0)
2346 *dptr |= DPARM_DISC;
2347 else
2348 *dptr &= ~DPARM_DISC;
2349 }
2350
2351 if ((scsi->valid & CTS_SCSI_VALID_TQ) != 0) {
2352 if ((scsi->flags & CTS_SCSI_FLAGS_TAG_ENB) != 0)
2353 *dptr |= DPARM_TQING;
2354 else
2355 *dptr &= ~DPARM_TQING;
2356 }
2357
2358 if ((spi->valid & CTS_SPI_VALID_BUS_WIDTH) != 0) {
2359 if (spi->bus_width == MSG_EXT_WDTR_BUS_16_BIT)
2360 *dptr |= DPARM_WIDE;
2361 else
2362 *dptr &= ~DPARM_WIDE;
2363 }
2364
2365 /*
2366 * XXX: FIX ME
2367 */
2368 if ((spi->valid & CTS_SPI_VALID_SYNC_OFFSET) &&
2369 (spi->valid & CTS_SPI_VALID_SYNC_RATE) &&
2370 (spi->sync_period && spi->sync_offset)) {
2371 *dptr |= DPARM_SYNC;
2372 /*
2373 * XXX: CHECK FOR LEGALITY
2374 */
2375 sdp->isp_devparam[tgt].goal_period =
2376 spi->sync_period;
2377 sdp->isp_devparam[tgt].goal_offset =
2378 spi->sync_offset;
2379 } else {
2380 *dptr &= ~DPARM_SYNC;
2381 }
2382#endif
2383 isp_prt(isp, ISP_LOGDEBUG0,
2384 "SET bus %d targ %d to flags %x off %x per %x",
2385 bus, tgt, sdp->isp_devparam[tgt].goal_flags,
2386 sdp->isp_devparam[tgt].goal_offset,
2387 sdp->isp_devparam[tgt].goal_period);
2388 sdp->isp_devparam[tgt].dev_update = 1;
2389 isp->isp_update |= (1 << bus);
2390 }
2391 ISPLOCK_2_CAMLOCK(isp);
2392 ccb->ccb_h.status = CAM_REQ_CMP;
2393 xpt_done(ccb);
2394 break;
2395 case XPT_GET_TRAN_SETTINGS:
2396 cts = &ccb->cts;
2397 tgt = cts->ccb_h.target_id;
2398 CAMLOCK_2_ISPLOCK(isp);
2399 if (IS_FC(isp)) {
2400#ifndef CAM_NEW_TRAN_CODE
2401 /*
2402 * a lot of normal SCSI things don't make sense.
2403 */
2404 cts->flags = CCB_TRANS_TAG_ENB | CCB_TRANS_DISC_ENB;
2405 cts->valid = CCB_TRANS_DISC_VALID | CCB_TRANS_TQ_VALID;
2406 /*
2407 * How do you measure the width of a high
2408 * speed serial bus? Well, in bytes.
2409 *
2410 * Offset and period make no sense, though, so we set
2411 * (above) a 'base' transfer speed to be gigabit.
2412 */
2413 cts->bus_width = MSG_EXT_WDTR_BUS_8_BIT;
2414#else
2415 fcparam *fcp = isp->isp_param;
2416 struct ccb_trans_settings_fc *fc =
2417 &cts->xport_specific.fc;
2418
2419 cts->protocol = PROTO_SCSI;
2420 cts->protocol_version = SCSI_REV_2;
2421 cts->transport = XPORT_FC;
2422 cts->transport_version = 0;
2423
2424 fc->valid = CTS_FC_VALID_SPEED;
2425 if (fcp->isp_gbspeed == 2)
2426 fc->bitrate = 200000;
2427 else
2428 fc->bitrate = 100000;
2429 if (tgt > 0 && tgt < MAX_FC_TARG) {
2430 struct lportdb *lp = &fcp->portdb[tgt];
2431 fc->wwnn = lp->node_wwn;
2432 fc->wwpn = lp->port_wwn;
2433 fc->port = lp->portid;
2434 fc->valid |= CTS_FC_VALID_WWNN |
2435 CTS_FC_VALID_WWPN | CTS_FC_VALID_PORT;
2436 }
2437#endif
2438 } else {
2439#ifdef CAM_NEW_TRAN_CODE
2440 struct ccb_trans_settings_scsi *scsi =
2441 &cts->proto_specific.scsi;
2442 struct ccb_trans_settings_spi *spi =
2443 &cts->xport_specific.spi;
2444#endif
2445 sdparam *sdp = isp->isp_param;
2446 int bus = cam_sim_bus(xpt_path_sim(cts->ccb_h.path));
2447 u_int16_t dval, pval, oval;
2448
2449 sdp += bus;
2450
2451 if (IS_CURRENT_SETTINGS(cts)) {
2452 sdp->isp_devparam[tgt].dev_refresh = 1;
2453 isp->isp_update |= (1 << bus);
2454 (void) isp_control(isp, ISPCTL_UPDATE_PARAMS,
2455 NULL);
2456 dval = sdp->isp_devparam[tgt].actv_flags;
2457 oval = sdp->isp_devparam[tgt].actv_offset;
2458 pval = sdp->isp_devparam[tgt].actv_period;
2459 } else {
2460 dval = sdp->isp_devparam[tgt].nvrm_flags;
2461 oval = sdp->isp_devparam[tgt].nvrm_offset;
2462 pval = sdp->isp_devparam[tgt].nvrm_period;
2463 }
2464
2465#ifndef CAM_NEW_TRAN_CODE
2466 cts->flags &= ~(CCB_TRANS_DISC_ENB|CCB_TRANS_TAG_ENB);
2467
2468 if (dval & DPARM_DISC) {
2469 cts->flags |= CCB_TRANS_DISC_ENB;
2470 }
2471 if (dval & DPARM_TQING) {
2472 cts->flags |= CCB_TRANS_TAG_ENB;
2473 }
2474 if (dval & DPARM_WIDE) {
2475 cts->bus_width = MSG_EXT_WDTR_BUS_16_BIT;
2476 } else {
2477 cts->bus_width = MSG_EXT_WDTR_BUS_8_BIT;
2478 }
2479 cts->valid = CCB_TRANS_BUS_WIDTH_VALID |
2480 CCB_TRANS_DISC_VALID | CCB_TRANS_TQ_VALID;
2481
2482 if ((dval & DPARM_SYNC) && oval != 0) {
2483 cts->sync_period = pval;
2484 cts->sync_offset = oval;
2485 cts->valid |=
2486 CCB_TRANS_SYNC_RATE_VALID |
2487 CCB_TRANS_SYNC_OFFSET_VALID;
2488 }
2489#else
2490 cts->protocol = PROTO_SCSI;
2491 cts->protocol_version = SCSI_REV_2;
2492 cts->transport = XPORT_SPI;
2493 cts->transport_version = 2;
2494
2495 scsi->flags &= ~CTS_SCSI_FLAGS_TAG_ENB;
2496 spi->flags &= ~CTS_SPI_FLAGS_DISC_ENB;
2497 if (dval & DPARM_DISC) {
2498 spi->flags |= CTS_SPI_FLAGS_DISC_ENB;
2499 }
2500 if (dval & DPARM_TQING) {
2501 scsi->flags |= CTS_SCSI_FLAGS_TAG_ENB;
2502 }
2503 if ((dval & DPARM_SYNC) && oval && pval) {
2504 spi->sync_offset = oval;
2505 spi->sync_period = pval;
2506 spi->valid |= CTS_SPI_VALID_SYNC_OFFSET;
2507 spi->valid |= CTS_SPI_VALID_SYNC_RATE;
2508 }
2509 spi->valid |= CTS_SPI_VALID_BUS_WIDTH;
2510 if (dval & DPARM_WIDE) {
2511 spi->bus_width = MSG_EXT_WDTR_BUS_16_BIT;
2512 } else {
2513 spi->bus_width = MSG_EXT_WDTR_BUS_8_BIT;
2514 }
2515 if (cts->ccb_h.target_lun != CAM_LUN_WILDCARD) {
2516 scsi->valid = CTS_SCSI_VALID_TQ;
2517 spi->valid |= CTS_SPI_VALID_DISC;
2518 } else {
2519 scsi->valid = 0;
2520 }
2521#endif
2522 isp_prt(isp, ISP_LOGDEBUG0,
2523 "GET %s bus %d targ %d to flags %x off %x per %x",
2524 IS_CURRENT_SETTINGS(cts)? "ACTIVE" : "NVRAM",
2525 bus, tgt, dval, oval, pval);
2526 }
2527 ISPLOCK_2_CAMLOCK(isp);
2528 ccb->ccb_h.status = CAM_REQ_CMP;
2529 xpt_done(ccb);
2530 break;
2531
2532 case XPT_CALC_GEOMETRY:
2533 {
2534 struct ccb_calc_geometry *ccg;
2535 u_int32_t secs_per_cylinder;
2536 u_int32_t size_mb;
2537
2538 ccg = &ccb->ccg;
2539 if (ccg->block_size == 0) {
2540 isp_prt(isp, ISP_LOGERR,
2541 "%d.%d XPT_CALC_GEOMETRY block size 0?",
2542 ccg->ccb_h.target_id, ccg->ccb_h.target_lun);
2543 ccb->ccb_h.status = CAM_REQ_INVALID;
2544 xpt_done(ccb);
2545 break;
2546 }
2547 size_mb = ccg->volume_size /((1024L * 1024L) / ccg->block_size);
2548 if (size_mb > 1024) {
2549 ccg->heads = 255;
2550 ccg->secs_per_track = 63;
2551 } else {
2552 ccg->heads = 64;
2553 ccg->secs_per_track = 32;
2554 }
2555 secs_per_cylinder = ccg->heads * ccg->secs_per_track;
2556 ccg->cylinders = ccg->volume_size / secs_per_cylinder;
2557 ccb->ccb_h.status = CAM_REQ_CMP;
2558 xpt_done(ccb);
2559 break;
2560 }
2561 case XPT_RESET_BUS: /* Reset the specified bus */
2562 bus = cam_sim_bus(sim);
2563 CAMLOCK_2_ISPLOCK(isp);
2564 error = isp_control(isp, ISPCTL_RESET_BUS, &bus);
2565 ISPLOCK_2_CAMLOCK(isp);
2566 if (error)
2567 ccb->ccb_h.status = CAM_REQ_CMP_ERR;
2568 else {
2569 if (cam_sim_bus(sim) && isp->isp_path2 != NULL)
2570 xpt_async(AC_BUS_RESET, isp->isp_path2, NULL);
2571 else if (isp->isp_path != NULL)
2572 xpt_async(AC_BUS_RESET, isp->isp_path, NULL);
2573 ccb->ccb_h.status = CAM_REQ_CMP;
2574 }
2575 xpt_done(ccb);
2576 break;
2577
2578 case XPT_TERM_IO: /* Terminate the I/O process */
2579 ccb->ccb_h.status = CAM_REQ_INVALID;
2580 xpt_done(ccb);
2581 break;
2582
2583 case XPT_PATH_INQ: /* Path routing inquiry */
2584 {
2585 struct ccb_pathinq *cpi = &ccb->cpi;
2586
2587 cpi->version_num = 1;
2588#ifdef ISP_TARGET_MODE
2589 cpi->target_sprt = PIT_PROCESSOR | PIT_DISCONNECT | PIT_TERM_IO;
2590#else
2591 cpi->target_sprt = 0;
2592#endif
2593 cpi->hba_eng_cnt = 0;
2594 cpi->max_target = ISP_MAX_TARGETS(isp) - 1;
2595 cpi->max_lun = ISP_MAX_LUNS(isp) - 1;
2596 cpi->bus_id = cam_sim_bus(sim);
2597 if (IS_FC(isp)) {
2598 cpi->hba_misc = PIM_NOBUSRESET;
2599 /*
2600 * Because our loop ID can shift from time to time,
2601 * make our initiator ID out of range of our bus.
2602 */
2603 cpi->initiator_id = cpi->max_target + 1;
2604
2605 /*
2606 * Set base transfer capabilities for Fibre Channel.
2607 * Technically not correct because we don't know
2608 * what media we're running on top of- but we'll
2609 * look good if we always say 100MB/s.
2610 */
2611 if (FCPARAM(isp)->isp_gbspeed == 2)
2612 cpi->base_transfer_speed = 200000;
2613 else
2614 cpi->base_transfer_speed = 100000;
2615 cpi->hba_inquiry = PI_TAG_ABLE;
2616#ifdef CAM_NEW_TRAN_CODE
2617 cpi->transport = XPORT_FC;
2618 cpi->transport_version = 0; /* WHAT'S THIS FOR? */
2619#endif
2620 } else {
2621 sdparam *sdp = isp->isp_param;
2622 sdp += cam_sim_bus(xpt_path_sim(cpi->ccb_h.path));
2623 cpi->hba_inquiry = PI_SDTR_ABLE|PI_TAG_ABLE|PI_WIDE_16;
2624 cpi->hba_misc = 0;
2625 cpi->initiator_id = sdp->isp_initiator_id;
2626 cpi->base_transfer_speed = 3300;
2627#ifdef CAM_NEW_TRAN_CODE
2628 cpi->transport = XPORT_SPI;
2629 cpi->transport_version = 2; /* WHAT'S THIS FOR? */
2630#endif
2631 }
2632#ifdef CAM_NEW_TRAN_CODE
2633 cpi->protocol = PROTO_SCSI;
2634 cpi->protocol_version = SCSI_REV_2;
2635#endif
2636 strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
2637 strncpy(cpi->hba_vid, "Qlogic", HBA_IDLEN);
2638 strncpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN);
2639 cpi->unit_number = cam_sim_unit(sim);
2640 cpi->ccb_h.status = CAM_REQ_CMP;
2641 xpt_done(ccb);
2642 break;
2643 }
2644 default:
2645 ccb->ccb_h.status = CAM_REQ_INVALID;
2646 xpt_done(ccb);
2647 break;
2648 }
2649}
2650
2651#define ISPDDB (CAM_DEBUG_INFO|CAM_DEBUG_TRACE|CAM_DEBUG_CDB)
2652void
2653isp_done(struct ccb_scsiio *sccb)
2654{
2655 struct ispsoftc *isp = XS_ISP(sccb);
2656
2657 if (XS_NOERR(sccb))
2658 XS_SETERR(sccb, CAM_REQ_CMP);
2659
2660 if ((sccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP &&
2661 (sccb->scsi_status != SCSI_STATUS_OK)) {
2662 sccb->ccb_h.status &= ~CAM_STATUS_MASK;
2663 if ((sccb->scsi_status == SCSI_STATUS_CHECK_COND) &&
2664 (sccb->ccb_h.status & CAM_AUTOSNS_VALID) == 0) {
2665 sccb->ccb_h.status |= CAM_AUTOSENSE_FAIL;
2666 } else {
2667 sccb->ccb_h.status |= CAM_SCSI_STATUS_ERROR;
2668 }
2669 }
2670
2671 sccb->ccb_h.status &= ~CAM_SIM_QUEUED;
2672 if ((sccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
2673 if ((sccb->ccb_h.status & CAM_DEV_QFRZN) == 0) {
2674 sccb->ccb_h.status |= CAM_DEV_QFRZN;
2675 xpt_freeze_devq(sccb->ccb_h.path, 1);
2676 isp_prt(isp, ISP_LOGDEBUG0,
2677 "freeze devq %d.%d cam sts %x scsi sts %x",
2678 sccb->ccb_h.target_id, sccb->ccb_h.target_lun,
2679 sccb->ccb_h.status, sccb->scsi_status);
2680 }
2681 }
2682
2683 if ((CAM_DEBUGGED(sccb->ccb_h.path, ISPDDB)) &&
2684 (sccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
2685 xpt_print_path(sccb->ccb_h.path);
2686 isp_prt(isp, ISP_LOGINFO,
2687 "cam completion status 0x%x", sccb->ccb_h.status);
2688 }
2689
2690 XS_CMD_S_DONE(sccb);
2691 if (XS_CMD_WDOG_P(sccb) == 0) {
2692 untimeout(isp_watchdog, (caddr_t)sccb, sccb->ccb_h.timeout_ch);
2693 if (XS_CMD_GRACE_P(sccb)) {
2694 isp_prt(isp, ISP_LOGDEBUG2,
2695 "finished command on borrowed time");
2696 }
2697 XS_CMD_S_CLEAR(sccb);
2698 ISPLOCK_2_CAMLOCK(isp);
2699 xpt_done((union ccb *) sccb);
2700 CAMLOCK_2_ISPLOCK(isp);
2701 }
2702}
2703
2704int
2705isp_async(struct ispsoftc *isp, ispasync_t cmd, void *arg)
2706{
2707 int bus, rv = 0;
2708 switch (cmd) {
2709 case ISPASYNC_NEW_TGT_PARAMS:
2710 {
2711#ifdef CAM_NEW_TRAN_CODE
2712 struct ccb_trans_settings_scsi *scsi;
2713 struct ccb_trans_settings_spi *spi;
2714#endif
2715 int flags, tgt;
2716 sdparam *sdp = isp->isp_param;
2717 struct ccb_trans_settings cts;
2718 struct cam_path *tmppath;
2719
2720 bzero(&cts, sizeof (struct ccb_trans_settings));
2721
2722 tgt = *((int *)arg);
2723 bus = (tgt >> 16) & 0xffff;
2724 tgt &= 0xffff;
2725 sdp += bus;
2726 ISPLOCK_2_CAMLOCK(isp);
2727 if (xpt_create_path(&tmppath, NULL,
2728 cam_sim_path(bus? isp->isp_sim2 : isp->isp_sim),
2729 tgt, CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
2730 CAMLOCK_2_ISPLOCK(isp);
2731 isp_prt(isp, ISP_LOGWARN,
2732 "isp_async cannot make temp path for %d.%d",
2733 tgt, bus);
2734 rv = -1;
2735 break;
2736 }
2737 CAMLOCK_2_ISPLOCK(isp);
2738 flags = sdp->isp_devparam[tgt].actv_flags;
2739#ifdef CAM_NEW_TRAN_CODE
2740 cts.type = CTS_TYPE_CURRENT_SETTINGS;
2741 cts.protocol = PROTO_SCSI;
2742 cts.transport = XPORT_SPI;
2743
2744 scsi = &cts.proto_specific.scsi;
2745 spi = &cts.xport_specific.spi;
2746
2747 if (flags & DPARM_TQING) {
2748 scsi->valid |= CTS_SCSI_VALID_TQ;
2749 scsi->flags |= CTS_SCSI_FLAGS_TAG_ENB;
2750 spi->flags |= CTS_SPI_FLAGS_TAG_ENB;
2751 }
2752
2753 if (flags & DPARM_DISC) {
2754 spi->valid |= CTS_SPI_VALID_DISC;
2755 spi->flags |= CTS_SPI_FLAGS_DISC_ENB;
2756 }
2757 spi->flags |= CTS_SPI_VALID_BUS_WIDTH;
2758 if (flags & DPARM_WIDE) {
2759 spi->bus_width = MSG_EXT_WDTR_BUS_16_BIT;
2760 } else {
2761 spi->bus_width = MSG_EXT_WDTR_BUS_8_BIT;
2762 }
2763 if (flags & DPARM_SYNC) {
2764 spi->valid |= CTS_SPI_VALID_SYNC_RATE;
2765 spi->valid |= CTS_SPI_VALID_SYNC_OFFSET;
2766 spi->sync_period = sdp->isp_devparam[tgt].actv_period;
2767 spi->sync_offset = sdp->isp_devparam[tgt].actv_offset;
2768 }
2769#else
2770 cts.flags = CCB_TRANS_CURRENT_SETTINGS;
2771 cts.valid = CCB_TRANS_DISC_VALID | CCB_TRANS_TQ_VALID;
2772 if (flags & DPARM_DISC) {
2773 cts.flags |= CCB_TRANS_DISC_ENB;
2774 }
2775 if (flags & DPARM_TQING) {
2776 cts.flags |= CCB_TRANS_TAG_ENB;
2777 }
2778 cts.valid |= CCB_TRANS_BUS_WIDTH_VALID;
2779 cts.bus_width = (flags & DPARM_WIDE)?
2780 MSG_EXT_WDTR_BUS_8_BIT : MSG_EXT_WDTR_BUS_16_BIT;
2781 cts.sync_period = sdp->isp_devparam[tgt].actv_period;
2782 cts.sync_offset = sdp->isp_devparam[tgt].actv_offset;
2783 if (flags & DPARM_SYNC) {
2784 cts.valid |=
2785 CCB_TRANS_SYNC_RATE_VALID |
2786 CCB_TRANS_SYNC_OFFSET_VALID;
2787 }
2788#endif
2789 isp_prt(isp, ISP_LOGDEBUG2,
2790 "NEW_TGT_PARAMS bus %d tgt %d period %x offset %x flags %x",
2791 bus, tgt, sdp->isp_devparam[tgt].actv_period,
2792 sdp->isp_devparam[tgt].actv_offset, flags);
2793 xpt_setup_ccb(&cts.ccb_h, tmppath, 1);
2794 ISPLOCK_2_CAMLOCK(isp);
2795 xpt_async(AC_TRANSFER_NEG, tmppath, &cts);
2796 xpt_free_path(tmppath);
2797 CAMLOCK_2_ISPLOCK(isp);
2798 break;
2799 }
2800 case ISPASYNC_BUS_RESET:
2801 bus = *((int *)arg);
2802 isp_prt(isp, ISP_LOGINFO, "SCSI bus reset on bus %d detected",
2803 bus);
2804 if (bus > 0 && isp->isp_path2) {
2805 ISPLOCK_2_CAMLOCK(isp);
2806 xpt_async(AC_BUS_RESET, isp->isp_path2, NULL);
2807 CAMLOCK_2_ISPLOCK(isp);
2808 } else if (isp->isp_path) {
2809 ISPLOCK_2_CAMLOCK(isp);
2810 xpt_async(AC_BUS_RESET, isp->isp_path, NULL);
2811 CAMLOCK_2_ISPLOCK(isp);
2812 }
2813 break;
2814 case ISPASYNC_LIP:
2815 if (isp->isp_path) {
2816 isp_freeze_loopdown(isp, "ISPASYNC_LIP");
2817 }
2818 isp_prt(isp, ISP_LOGINFO, "LIP Received");
2819 break;
2820 case ISPASYNC_LOOP_RESET:
2821 if (isp->isp_path) {
2822 isp_freeze_loopdown(isp, "ISPASYNC_LOOP_RESET");
2823 }
2824 isp_prt(isp, ISP_LOGINFO, "Loop Reset Received");
2825 break;
2826 case ISPASYNC_LOOP_DOWN:
2827 if (isp->isp_path) {
2828 isp_freeze_loopdown(isp, "ISPASYNC_LOOP_DOWN");
2829 }
2830 isp_prt(isp, ISP_LOGINFO, "Loop DOWN");
2831 break;
2832 case ISPASYNC_LOOP_UP:
2833 /*
2834 * Now we just note that Loop has come up. We don't
2835 * actually do anything because we're waiting for a
2836 * Change Notify before activating the FC cleanup
2837 * thread to look at the state of the loop again.
2838 */
2839 isp_prt(isp, ISP_LOGINFO, "Loop UP");
2840 break;
2841 case ISPASYNC_PROMENADE:
2842 {
2843 struct cam_path *tmppath;
2844 const char *fmt = "Target %d (Loop 0x%x) Port ID 0x%x "
2845 "(role %s) %s\n Port WWN 0x%08x%08x\n Node WWN 0x%08x%08x";
2846 static const char *roles[4] = {
2847 "(none)", "Target", "Initiator", "Target/Initiator"
2848 };
2849 fcparam *fcp = isp->isp_param;
2850 int tgt = *((int *) arg);
2851 int is_tgt_mask = (SVC3_TGT_ROLE >> SVC3_ROLE_SHIFT);
2852 struct lportdb *lp = &fcp->portdb[tgt];
2853
2854 isp_prt(isp, ISP_LOGINFO, fmt, tgt, lp->loopid, lp->portid,
2855 roles[lp->roles & 0x3],
2856 (lp->valid)? "Arrived" : "Departed",
2857 (u_int32_t) (lp->port_wwn >> 32),
2858 (u_int32_t) (lp->port_wwn & 0xffffffffLL),
2859 (u_int32_t) (lp->node_wwn >> 32),
2860 (u_int32_t) (lp->node_wwn & 0xffffffffLL));
2861
2862 ISPLOCK_2_CAMLOCK(isp);
2863 if (xpt_create_path(&tmppath, NULL, cam_sim_path(isp->isp_sim),
2864 (target_id_t)tgt, CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
2865 CAMLOCK_2_ISPLOCK(isp);
2866 break;
2867 }
2868 /*
2869 * Policy: only announce targets.
2870 */
2871 if (lp->roles & is_tgt_mask) {
2872 if (lp->valid) {
2873 xpt_async(AC_FOUND_DEVICE, tmppath, NULL);
2874 } else {
2875 xpt_async(AC_LOST_DEVICE, tmppath, NULL);
2876 }
2877 }
2878 xpt_free_path(tmppath);
2879 CAMLOCK_2_ISPLOCK(isp);
2880 break;
2881 }
2882 case ISPASYNC_CHANGE_NOTIFY:
2883 if (arg == ISPASYNC_CHANGE_PDB) {
2884 isp_prt(isp, ISP_LOGINFO,
2885 "Port Database Changed");
2886 } else if (arg == ISPASYNC_CHANGE_SNS) {
2887 isp_prt(isp, ISP_LOGINFO,
2888 "Name Server Database Changed");
2889 }
2890#ifdef ISP_SMPLOCK
2891 cv_signal(&isp->isp_osinfo.kthread_cv);
2892#else
2893 wakeup(&isp->isp_osinfo.kthread_cv);
2894#endif
2895 break;
2896 case ISPASYNC_FABRIC_DEV:
2897 {
2898 int target, base, lim;
2899 fcparam *fcp = isp->isp_param;
2900 struct lportdb *lp = NULL;
2901 struct lportdb *clp = (struct lportdb *) arg;
2902 char *pt;
2903
2904 switch (clp->port_type) {
2905 case 1:
2906 pt = " N_Port";
2907 break;
2908 case 2:
2909 pt = " NL_Port";
2910 break;
2911 case 3:
2912 pt = "F/NL_Port";
2913 break;
2914 case 0x7f:
2915 pt = " Nx_Port";
2916 break;
2917 case 0x81:
2918 pt = " F_port";
2919 break;
2920 case 0x82:
2921 pt = " FL_Port";
2922 break;
2923 case 0x84:
2924 pt = " E_port";
2925 break;
2926 default:
2927 pt = " ";
2928 break;
2929 }
2930
2931 isp_prt(isp, ISP_LOGINFO,
2932 "%s Fabric Device @ PortID 0x%x", pt, clp->portid);
2933
2934 /*
2935 * If we don't have an initiator role we bail.
2936 *
2937 * We just use ISPASYNC_FABRIC_DEV for announcement purposes.
2938 */
2939
2940 if ((isp->isp_role & ISP_ROLE_INITIATOR) == 0) {
2941 break;
2942 }
2943
2944 /*
2945 * Is this entry for us? If so, we bail.
2946 */
2947
2948 if (fcp->isp_portid == clp->portid) {
2949 break;
2950 }
2951
2952 /*
2953 * Else, the default policy is to find room for it in
2954 * our local port database. Later, when we execute
2955 * the call to isp_pdb_sync either this newly arrived
2956 * or already logged in device will be (re)announced.
2957 */
2958
2959 if (fcp->isp_topo == TOPO_FL_PORT)
2960 base = FC_SNS_ID+1;
2961 else
2962 base = 0;
2963
2964 if (fcp->isp_topo == TOPO_N_PORT)
2965 lim = 1;
2966 else
2967 lim = MAX_FC_TARG;
2968
2969 /*
2970 * Is it already in our list?
2971 */
2972 for (target = base; target < lim; target++) {
2973 if (target >= FL_PORT_ID && target <= FC_SNS_ID) {
2974 continue;
2975 }
2976 lp = &fcp->portdb[target];
2977 if (lp->port_wwn == clp->port_wwn &&
2978 lp->node_wwn == clp->node_wwn) {
2979 lp->fabric_dev = 1;
2980 break;
2981 }
2982 }
2983 if (target < lim) {
2984 break;
2985 }
2986 for (target = base; target < lim; target++) {
2987 if (target >= FL_PORT_ID && target <= FC_SNS_ID) {
2988 continue;
2989 }
2990 lp = &fcp->portdb[target];
2991 if (lp->port_wwn == 0) {
2992 break;
2993 }
2994 }
2995 if (target == lim) {
2996 isp_prt(isp, ISP_LOGWARN,
2997 "out of space for fabric devices");
2998 break;
2999 }
3000 lp->port_type = clp->port_type;
3001 lp->fc4_type = clp->fc4_type;
3002 lp->node_wwn = clp->node_wwn;
3003 lp->port_wwn = clp->port_wwn;
3004 lp->portid = clp->portid;
3005 lp->fabric_dev = 1;
3006 break;
3007 }
3008#ifdef ISP_TARGET_MODE
3009 case ISPASYNC_TARGET_MESSAGE:
3010 {
3011 tmd_msg_t *mp = arg;
3012 isp_prt(isp, ISP_LOGALL,
3013 "bus %d iid %d tgt %d lun %d ttype %x tval %x msg[0]=%x",
3014 mp->nt_bus, (int) mp->nt_iid, (int) mp->nt_tgt,
3015 (int) mp->nt_lun, mp->nt_tagtype, mp->nt_tagval,
3016 mp->nt_msg[0]);
3017 break;
3018 }
3019 case ISPASYNC_TARGET_EVENT:
3020 {
3021 tmd_event_t *ep = arg;
3022 isp_prt(isp, ISP_LOGALL,
3023 "bus %d event code 0x%x", ep->ev_bus, ep->ev_event);
3024 break;
3025 }
3026 case ISPASYNC_TARGET_ACTION:
3027 switch (((isphdr_t *)arg)->rqs_entry_type) {
3028 default:
3029 isp_prt(isp, ISP_LOGWARN,
3030 "event 0x%x for unhandled target action",
3031 ((isphdr_t *)arg)->rqs_entry_type);
3032 break;
3033 case RQSTYPE_NOTIFY:
3034 if (IS_SCSI(isp)) {
3035 rv = isp_handle_platform_notify_scsi(isp,
3036 (in_entry_t *) arg);
3037 } else {
3038 rv = isp_handle_platform_notify_fc(isp,
3039 (in_fcentry_t *) arg);
3040 }
3041 break;
3042 case RQSTYPE_ATIO:
3043 rv = isp_handle_platform_atio(isp, (at_entry_t *) arg);
3044 break;
3045 case RQSTYPE_ATIO2:
3046 rv = isp_handle_platform_atio2(isp, (at2_entry_t *)arg);
3047 break;
3048 case RQSTYPE_CTIO2:
3049 case RQSTYPE_CTIO:
3050 rv = isp_handle_platform_ctio(isp, arg);
3051 break;
3052 case RQSTYPE_ENABLE_LUN:
3053 case RQSTYPE_MODIFY_LUN:
3054 if (IS_DUALBUS(isp)) {
3055 bus =
3056 GET_BUS_VAL(((lun_entry_t *)arg)->le_rsvd);
3057 } else {
3058 bus = 0;
3059 }
3060 isp_cv_signal_rqe(isp, bus,
3061 ((lun_entry_t *)arg)->le_status);
3062 break;
3063 }
3064 break;
3065#endif
3066 case ISPASYNC_FW_CRASH:
3067 {
3068 u_int16_t mbox1, mbox6;
3069 mbox1 = ISP_READ(isp, OUTMAILBOX1);
3070 if (IS_DUALBUS(isp)) {
3071 mbox6 = ISP_READ(isp, OUTMAILBOX6);
3072 } else {
3073 mbox6 = 0;
3074 }
3075 isp_prt(isp, ISP_LOGERR,
3076 "Internal Firmware Error on bus %d @ RISC Address 0x%x",
3077 mbox6, mbox1);
3078#ifdef ISP_FW_CRASH_DUMP
3079 /*
3080 * XXX: really need a thread to do this right.
3081 */
3082 if (IS_FC(isp)) {
3083 FCPARAM(isp)->isp_fwstate = FW_CONFIG_WAIT;
3084 FCPARAM(isp)->isp_loopstate = LOOP_NIL;
3085 isp_freeze_loopdown(isp, "f/w crash");
3086 isp_fw_dump(isp);
3087 }
3088 isp_reinit(isp);
3089 isp_async(isp, ISPASYNC_FW_RESTARTED, NULL);
3090#endif
3091 break;
3092 }
3093 case ISPASYNC_UNHANDLED_RESPONSE:
3094 break;
3095 default:
3096 isp_prt(isp, ISP_LOGERR, "unknown isp_async event %d", cmd);
3097 break;
3098 }
3099 return (rv);
3100}
3101
3102
3103/*
3104 * Locks are held before coming here.
3105 */
3106void
3107isp_uninit(struct ispsoftc *isp)
3108{
3109 ISP_WRITE(isp, HCCR, HCCR_CMD_RESET);
3110 DISABLE_INTS(isp);
3111}
3112
3113void
3114isp_prt(struct ispsoftc *isp, int level, const char *fmt, ...)
3115{
3116 va_list ap;
3117 if (level != ISP_LOGALL && (level & isp->isp_dblev) == 0) {
3118 return;
3119 }
3120 printf("%s: ", device_get_nameunit(isp->isp_dev));
3121 va_start(ap, fmt);
3122 vprintf(fmt, ap);
3123 va_end(ap);
3124 printf("\n");
3125}