Deleted Added
full compact
isp_freebsd.c (151834) isp_freebsd.c (154704)
1/*-
2 * Platform (FreeBSD) dependent common attachment code for Qlogic adapters.
3 *
1/*-
2 * Platform (FreeBSD) dependent common attachment code for Qlogic adapters.
3 *
4 * Copyright (c) 1997, 1998, 1999, 2000, 2001 by Matthew Jacob
4 * Copyright (c) 1997-2006 by Matthew Jacob
5 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice immediately at the beginning of the file, without modification,
11 * this list of conditions, and the following disclaimer.
12 * 2. The name of the author may not be used to endorse or promote products
13 * derived from this software without specific prior written permission.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
19 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25 * SUCH DAMAGE.
26 */
27
28#include <sys/cdefs.h>
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice immediately at the beginning of the file, without modification,
12 * this list of conditions, and the following disclaimer.
13 * 2. The name of the author may not be used to endorse or promote products
14 * derived from this software without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
20 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
27 */
28
29#include <sys/cdefs.h>
29__FBSDID("$FreeBSD: head/sys/dev/isp/isp_freebsd.c 151834 2005-10-29 02:46:59Z mjacob $");
30__FBSDID("$FreeBSD: head/sys/dev/isp/isp_freebsd.c 154704 2006-01-23 06:23:37Z mjacob $");
30
31#include <dev/isp/isp_freebsd.h>
32#include <sys/unistd.h>
33#include <sys/kthread.h>
34#include <machine/stdarg.h> /* for use by isp_prt below */
35#include <sys/conf.h>
36#include <sys/module.h>
37#include <sys/ioccom.h>
38#include <dev/isp/isp_ioctl.h>
39
40
41MODULE_VERSION(isp, 1);
42MODULE_DEPEND(isp, cam, 1, 1, 1);
43int isp_announced = 0;
44ispfwfunc *isp_get_firmware_p = NULL;
45
46static d_ioctl_t ispioctl;
47static void isp_intr_enable(void *);
48static void isp_cam_async(void *, u_int32_t, struct cam_path *, void *);
49static void isp_poll(struct cam_sim *);
50static timeout_t isp_watchdog;
51static void isp_kthread(void *);
52static void isp_action(struct cam_sim *, union ccb *);
53
54
55static struct cdevsw isp_cdevsw = {
56 .d_version = D_VERSION,
57 .d_flags = D_NEEDGIANT,
58 .d_ioctl = ispioctl,
59 .d_name = "isp",
60};
61
62static struct ispsoftc *isplist = NULL;
63
64void
65isp_attach(struct ispsoftc *isp)
66{
67 int primary, secondary;
68 struct ccb_setasync csa;
69 struct cam_devq *devq;
70 struct cam_sim *sim;
71 struct cam_path *path;
72
73 /*
74 * Establish (in case of 12X0) which bus is the primary.
75 */
76
77 primary = 0;
78 secondary = 1;
79
80 /*
81 * Create the device queue for our SIM(s).
82 */
83 devq = cam_simq_alloc(isp->isp_maxcmds);
84 if (devq == NULL) {
85 return;
86 }
87
88 /*
89 * Construct our SIM entry.
90 */
91 ISPLOCK_2_CAMLOCK(isp);
92 sim = cam_sim_alloc(isp_action, isp_poll, "isp", isp,
93 device_get_unit(isp->isp_dev), 1, isp->isp_maxcmds, devq);
94 if (sim == NULL) {
95 cam_simq_free(devq);
96 CAMLOCK_2_ISPLOCK(isp);
97 return;
98 }
99 CAMLOCK_2_ISPLOCK(isp);
100
101 isp->isp_osinfo.ehook.ich_func = isp_intr_enable;
102 isp->isp_osinfo.ehook.ich_arg = isp;
103 ISPLOCK_2_CAMLOCK(isp);
104 if (config_intrhook_establish(&isp->isp_osinfo.ehook) != 0) {
105 cam_sim_free(sim, TRUE);
106 CAMLOCK_2_ISPLOCK(isp);
107 isp_prt(isp, ISP_LOGERR,
108 "could not establish interrupt enable hook");
109 return;
110 }
111
112 if (xpt_bus_register(sim, primary) != CAM_SUCCESS) {
113 cam_sim_free(sim, TRUE);
114 CAMLOCK_2_ISPLOCK(isp);
115 return;
116 }
117
118 if (xpt_create_path(&path, NULL, cam_sim_path(sim),
119 CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
120 xpt_bus_deregister(cam_sim_path(sim));
121 cam_sim_free(sim, TRUE);
122 config_intrhook_disestablish(&isp->isp_osinfo.ehook);
123 CAMLOCK_2_ISPLOCK(isp);
124 return;
125 }
126
127 xpt_setup_ccb(&csa.ccb_h, path, 5);
128 csa.ccb_h.func_code = XPT_SASYNC_CB;
129 csa.event_enable = AC_LOST_DEVICE;
130 csa.callback = isp_cam_async;
131 csa.callback_arg = sim;
132 xpt_action((union ccb *)&csa);
133 CAMLOCK_2_ISPLOCK(isp);
134 isp->isp_sim = sim;
135 isp->isp_path = path;
136 /*
137 * Create a kernel thread for fibre channel instances. We
138 * don't have dual channel FC cards.
139 */
140 if (IS_FC(isp)) {
141 ISPLOCK_2_CAMLOCK(isp);
142 /* XXX: LOCK VIOLATION */
143 cv_init(&isp->isp_osinfo.kthread_cv, "isp_kthread_cv");
144 if (kthread_create(isp_kthread, isp, &isp->isp_osinfo.kproc,
145 RFHIGHPID, 0, "%s: fc_thrd",
146 device_get_nameunit(isp->isp_dev))) {
147 xpt_bus_deregister(cam_sim_path(sim));
148 cam_sim_free(sim, TRUE);
149 config_intrhook_disestablish(&isp->isp_osinfo.ehook);
150 CAMLOCK_2_ISPLOCK(isp);
151 isp_prt(isp, ISP_LOGERR, "could not create kthread");
152 return;
153 }
154 CAMLOCK_2_ISPLOCK(isp);
155 }
156
157
158 /*
159 * If we have a second channel, construct SIM entry for that.
160 */
161 if (IS_DUALBUS(isp)) {
162 ISPLOCK_2_CAMLOCK(isp);
163 sim = cam_sim_alloc(isp_action, isp_poll, "isp", isp,
164 device_get_unit(isp->isp_dev), 1, isp->isp_maxcmds, devq);
165 if (sim == NULL) {
166 xpt_bus_deregister(cam_sim_path(isp->isp_sim));
167 xpt_free_path(isp->isp_path);
168 cam_simq_free(devq);
169 config_intrhook_disestablish(&isp->isp_osinfo.ehook);
170 return;
171 }
172 if (xpt_bus_register(sim, secondary) != CAM_SUCCESS) {
173 xpt_bus_deregister(cam_sim_path(isp->isp_sim));
174 xpt_free_path(isp->isp_path);
175 cam_sim_free(sim, TRUE);
176 config_intrhook_disestablish(&isp->isp_osinfo.ehook);
177 CAMLOCK_2_ISPLOCK(isp);
178 return;
179 }
180
181 if (xpt_create_path(&path, NULL, cam_sim_path(sim),
182 CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
183 xpt_bus_deregister(cam_sim_path(isp->isp_sim));
184 xpt_free_path(isp->isp_path);
185 xpt_bus_deregister(cam_sim_path(sim));
186 cam_sim_free(sim, TRUE);
187 config_intrhook_disestablish(&isp->isp_osinfo.ehook);
188 CAMLOCK_2_ISPLOCK(isp);
189 return;
190 }
191
192 xpt_setup_ccb(&csa.ccb_h, path, 5);
193 csa.ccb_h.func_code = XPT_SASYNC_CB;
194 csa.event_enable = AC_LOST_DEVICE;
195 csa.callback = isp_cam_async;
196 csa.callback_arg = sim;
197 xpt_action((union ccb *)&csa);
198 CAMLOCK_2_ISPLOCK(isp);
199 isp->isp_sim2 = sim;
200 isp->isp_path2 = path;
201 }
202
203 /*
204 * Create device nodes
205 */
206 (void) make_dev(&isp_cdevsw, device_get_unit(isp->isp_dev), UID_ROOT,
207 GID_OPERATOR, 0600, "%s", device_get_nameunit(isp->isp_dev));
208
209 if (isp->isp_role != ISP_ROLE_NONE) {
210 isp->isp_state = ISP_RUNSTATE;
211 ENABLE_INTS(isp);
212 }
213 if (isplist == NULL) {
214 isplist = isp;
215 } else {
216 struct ispsoftc *tmp = isplist;
217 while (tmp->isp_osinfo.next) {
218 tmp = tmp->isp_osinfo.next;
219 }
220 tmp->isp_osinfo.next = isp;
221 }
222
223}
224
225static INLINE void
226isp_freeze_loopdown(struct ispsoftc *isp, char *msg)
227{
228 if (isp->isp_osinfo.simqfrozen == 0) {
229 isp_prt(isp, ISP_LOGDEBUG0, "%s: freeze simq (loopdown)", msg);
230 isp->isp_osinfo.simqfrozen |= SIMQFRZ_LOOPDOWN;
231 ISPLOCK_2_CAMLOCK(isp);
232 xpt_freeze_simq(isp->isp_sim, 1);
233 CAMLOCK_2_ISPLOCK(isp);
234 } else {
235 isp_prt(isp, ISP_LOGDEBUG0, "%s: mark frozen (loopdown)", msg);
236 isp->isp_osinfo.simqfrozen |= SIMQFRZ_LOOPDOWN;
237 }
238}
239
240static int
241ispioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flags, struct thread *td)
242{
243 struct ispsoftc *isp;
244 int nr, retval = ENOTTY;
245
246 isp = isplist;
247 while (isp) {
248 if (minor(dev) == device_get_unit(isp->isp_dev)) {
249 break;
250 }
251 isp = isp->isp_osinfo.next;
252 }
253 if (isp == NULL)
254 return (ENXIO);
255
256 switch (cmd) {
257#ifdef ISP_FW_CRASH_DUMP
258 case ISP_GET_FW_CRASH_DUMP:
259 {
260 u_int16_t *ptr = FCPARAM(isp)->isp_dump_data;
261 size_t sz;
262
263 retval = 0;
264 if (IS_2200(isp))
265 sz = QLA2200_RISC_IMAGE_DUMP_SIZE;
266 else
267 sz = QLA2300_RISC_IMAGE_DUMP_SIZE;
268 ISP_LOCK(isp);
269 if (ptr && *ptr) {
270 void *uaddr = *((void **) addr);
271 if (copyout(ptr, uaddr, sz)) {
272 retval = EFAULT;
273 } else {
274 *ptr = 0;
275 }
276 } else {
277 retval = ENXIO;
278 }
279 ISP_UNLOCK(isp);
280 break;
281 }
282
283 case ISP_FORCE_CRASH_DUMP:
284 ISP_LOCK(isp);
285 isp_freeze_loopdown(isp, "ispioctl(ISP_FORCE_CRASH_DUMP)");
286 isp_fw_dump(isp);
287 isp_reinit(isp);
288 ISP_UNLOCK(isp);
289 retval = 0;
290 break;
291#endif
292 case ISP_SDBLEV:
293 {
294 int olddblev = isp->isp_dblev;
295 isp->isp_dblev = *(int *)addr;
296 *(int *)addr = olddblev;
297 retval = 0;
298 break;
299 }
300 case ISP_GETROLE:
301 *(int *)addr = isp->isp_role;
302 retval = 0;
303 break;
304 case ISP_SETROLE:
305 nr = *(int *)addr;
306 if (nr & ~(ISP_ROLE_INITIATOR|ISP_ROLE_TARGET)) {
307 retval = EINVAL;
308 break;
309 }
310 *(int *)addr = isp->isp_role;
311 isp->isp_role = nr;
312 /* FALLTHROUGH */
313 case ISP_RESETHBA:
314 ISP_LOCK(isp);
315 isp_reinit(isp);
316 ISP_UNLOCK(isp);
317 retval = 0;
318 break;
319 case ISP_RESCAN:
320 if (IS_FC(isp)) {
321 ISP_LOCK(isp);
322 if (isp_fc_runstate(isp, 5 * 1000000)) {
323 retval = EIO;
324 } else {
325 retval = 0;
326 }
327 ISP_UNLOCK(isp);
328 }
329 break;
330 case ISP_FC_LIP:
331 if (IS_FC(isp)) {
332 ISP_LOCK(isp);
333 if (isp_control(isp, ISPCTL_SEND_LIP, 0)) {
334 retval = EIO;
335 } else {
336 retval = 0;
337 }
338 ISP_UNLOCK(isp);
339 }
340 break;
341 case ISP_FC_GETDINFO:
342 {
343 struct isp_fc_device *ifc = (struct isp_fc_device *) addr;
344 struct lportdb *lp;
345
346 if (ifc->loopid < 0 || ifc->loopid >= MAX_FC_TARG) {
347 retval = EINVAL;
348 break;
349 }
350 ISP_LOCK(isp);
351 lp = &FCPARAM(isp)->portdb[ifc->loopid];
352 if (lp->valid) {
353 ifc->loopid = lp->loopid;
354 ifc->portid = lp->portid;
355 ifc->node_wwn = lp->node_wwn;
356 ifc->port_wwn = lp->port_wwn;
357 retval = 0;
358 } else {
359 retval = ENODEV;
360 }
361 ISP_UNLOCK(isp);
362 break;
363 }
364 case ISP_GET_STATS:
365 {
366 isp_stats_t *sp = (isp_stats_t *) addr;
367
368 MEMZERO(sp, sizeof (*sp));
369 sp->isp_stat_version = ISP_STATS_VERSION;
370 sp->isp_type = isp->isp_type;
371 sp->isp_revision = isp->isp_revision;
372 ISP_LOCK(isp);
373 sp->isp_stats[ISP_INTCNT] = isp->isp_intcnt;
374 sp->isp_stats[ISP_INTBOGUS] = isp->isp_intbogus;
375 sp->isp_stats[ISP_INTMBOXC] = isp->isp_intmboxc;
376 sp->isp_stats[ISP_INGOASYNC] = isp->isp_intoasync;
377 sp->isp_stats[ISP_RSLTCCMPLT] = isp->isp_rsltccmplt;
378 sp->isp_stats[ISP_FPHCCMCPLT] = isp->isp_fphccmplt;
379 sp->isp_stats[ISP_RSCCHIWAT] = isp->isp_rscchiwater;
380 sp->isp_stats[ISP_FPCCHIWAT] = isp->isp_fpcchiwater;
381 ISP_UNLOCK(isp);
382 retval = 0;
383 break;
384 }
385 case ISP_CLR_STATS:
386 ISP_LOCK(isp);
387 isp->isp_intcnt = 0;
388 isp->isp_intbogus = 0;
389 isp->isp_intmboxc = 0;
390 isp->isp_intoasync = 0;
391 isp->isp_rsltccmplt = 0;
392 isp->isp_fphccmplt = 0;
393 isp->isp_rscchiwater = 0;
394 isp->isp_fpcchiwater = 0;
395 ISP_UNLOCK(isp);
396 retval = 0;
397 break;
398 case ISP_FC_GETHINFO:
399 {
400 struct isp_hba_device *hba = (struct isp_hba_device *) addr;
401 MEMZERO(hba, sizeof (*hba));
402 ISP_LOCK(isp);
403 hba->fc_fw_major = ISP_FW_MAJORX(isp->isp_fwrev);
404 hba->fc_fw_minor = ISP_FW_MINORX(isp->isp_fwrev);
405 hba->fc_fw_micro = ISP_FW_MICROX(isp->isp_fwrev);
406 hba->fc_speed = FCPARAM(isp)->isp_gbspeed;
407 hba->fc_scsi_supported = 1;
408 hba->fc_topology = FCPARAM(isp)->isp_topo + 1;
409 hba->fc_loopid = FCPARAM(isp)->isp_loopid;
410 hba->nvram_node_wwn = FCPARAM(isp)->isp_nodewwn;
411 hba->nvram_port_wwn = FCPARAM(isp)->isp_portwwn;
412 hba->active_node_wwn = ISP_NODEWWN(isp);
413 hba->active_port_wwn = ISP_PORTWWN(isp);
414 ISP_UNLOCK(isp);
415 retval = 0;
416 break;
417 }
418 case ISP_GET_FC_PARAM:
419 {
420 struct isp_fc_param *f = (struct isp_fc_param *) addr;
421
422 if (!IS_FC(isp)) {
423 retval = EINVAL;
424 break;
425 }
426 f->parameter = 0;
427 if (strcmp(f->param_name, "framelength") == 0) {
428 f->parameter = FCPARAM(isp)->isp_maxfrmlen;
429 retval = 0;
430 break;
431 }
432 if (strcmp(f->param_name, "exec_throttle") == 0) {
433 f->parameter = FCPARAM(isp)->isp_execthrottle;
434 retval = 0;
435 break;
436 }
437 if (strcmp(f->param_name, "fullduplex") == 0) {
438 if (FCPARAM(isp)->isp_fwoptions & ICBOPT_FULL_DUPLEX)
439 f->parameter = 1;
440 retval = 0;
441 break;
442 }
443 if (strcmp(f->param_name, "loopid") == 0) {
444 f->parameter = FCPARAM(isp)->isp_loopid;
445 retval = 0;
446 break;
447 }
448 retval = EINVAL;
449 break;
450 }
451 case ISP_SET_FC_PARAM:
452 {
453 struct isp_fc_param *f = (struct isp_fc_param *) addr;
454 u_int32_t param = f->parameter;
455
456 if (!IS_FC(isp)) {
457 retval = EINVAL;
458 break;
459 }
460 f->parameter = 0;
461 if (strcmp(f->param_name, "framelength") == 0) {
462 if (param != 512 && param != 1024 && param != 1024) {
463 retval = EINVAL;
464 break;
465 }
466 FCPARAM(isp)->isp_maxfrmlen = param;
467 retval = 0;
468 break;
469 }
470 if (strcmp(f->param_name, "exec_throttle") == 0) {
471 if (param < 16 || param > 255) {
472 retval = EINVAL;
473 break;
474 }
475 FCPARAM(isp)->isp_execthrottle = param;
476 retval = 0;
477 break;
478 }
479 if (strcmp(f->param_name, "fullduplex") == 0) {
480 if (param != 0 && param != 1) {
481 retval = EINVAL;
482 break;
483 }
484 if (param) {
485 FCPARAM(isp)->isp_fwoptions |=
486 ICBOPT_FULL_DUPLEX;
487 } else {
488 FCPARAM(isp)->isp_fwoptions &=
489 ~ICBOPT_FULL_DUPLEX;
490 }
491 retval = 0;
492 break;
493 }
494 if (strcmp(f->param_name, "loopid") == 0) {
495 if (param < 0 || param > 125) {
496 retval = EINVAL;
497 break;
498 }
499 FCPARAM(isp)->isp_loopid = param;
500 retval = 0;
501 break;
502 }
503 retval = EINVAL;
504 break;
505 }
506 case ISP_TSK_MGMT:
507 {
508 int needmarker;
509 struct isp_fc_tsk_mgmt *fct = (struct isp_fc_tsk_mgmt *) addr;
31
32#include <dev/isp/isp_freebsd.h>
33#include <sys/unistd.h>
34#include <sys/kthread.h>
35#include <machine/stdarg.h> /* for use by isp_prt below */
36#include <sys/conf.h>
37#include <sys/module.h>
38#include <sys/ioccom.h>
39#include <dev/isp/isp_ioctl.h>
40
41
42MODULE_VERSION(isp, 1);
43MODULE_DEPEND(isp, cam, 1, 1, 1);
44int isp_announced = 0;
45ispfwfunc *isp_get_firmware_p = NULL;
46
47static d_ioctl_t ispioctl;
48static void isp_intr_enable(void *);
49static void isp_cam_async(void *, u_int32_t, struct cam_path *, void *);
50static void isp_poll(struct cam_sim *);
51static timeout_t isp_watchdog;
52static void isp_kthread(void *);
53static void isp_action(struct cam_sim *, union ccb *);
54
55
56static struct cdevsw isp_cdevsw = {
57 .d_version = D_VERSION,
58 .d_flags = D_NEEDGIANT,
59 .d_ioctl = ispioctl,
60 .d_name = "isp",
61};
62
63static struct ispsoftc *isplist = NULL;
64
65void
66isp_attach(struct ispsoftc *isp)
67{
68 int primary, secondary;
69 struct ccb_setasync csa;
70 struct cam_devq *devq;
71 struct cam_sim *sim;
72 struct cam_path *path;
73
74 /*
75 * Establish (in case of 12X0) which bus is the primary.
76 */
77
78 primary = 0;
79 secondary = 1;
80
81 /*
82 * Create the device queue for our SIM(s).
83 */
84 devq = cam_simq_alloc(isp->isp_maxcmds);
85 if (devq == NULL) {
86 return;
87 }
88
89 /*
90 * Construct our SIM entry.
91 */
92 ISPLOCK_2_CAMLOCK(isp);
93 sim = cam_sim_alloc(isp_action, isp_poll, "isp", isp,
94 device_get_unit(isp->isp_dev), 1, isp->isp_maxcmds, devq);
95 if (sim == NULL) {
96 cam_simq_free(devq);
97 CAMLOCK_2_ISPLOCK(isp);
98 return;
99 }
100 CAMLOCK_2_ISPLOCK(isp);
101
102 isp->isp_osinfo.ehook.ich_func = isp_intr_enable;
103 isp->isp_osinfo.ehook.ich_arg = isp;
104 ISPLOCK_2_CAMLOCK(isp);
105 if (config_intrhook_establish(&isp->isp_osinfo.ehook) != 0) {
106 cam_sim_free(sim, TRUE);
107 CAMLOCK_2_ISPLOCK(isp);
108 isp_prt(isp, ISP_LOGERR,
109 "could not establish interrupt enable hook");
110 return;
111 }
112
113 if (xpt_bus_register(sim, primary) != CAM_SUCCESS) {
114 cam_sim_free(sim, TRUE);
115 CAMLOCK_2_ISPLOCK(isp);
116 return;
117 }
118
119 if (xpt_create_path(&path, NULL, cam_sim_path(sim),
120 CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
121 xpt_bus_deregister(cam_sim_path(sim));
122 cam_sim_free(sim, TRUE);
123 config_intrhook_disestablish(&isp->isp_osinfo.ehook);
124 CAMLOCK_2_ISPLOCK(isp);
125 return;
126 }
127
128 xpt_setup_ccb(&csa.ccb_h, path, 5);
129 csa.ccb_h.func_code = XPT_SASYNC_CB;
130 csa.event_enable = AC_LOST_DEVICE;
131 csa.callback = isp_cam_async;
132 csa.callback_arg = sim;
133 xpt_action((union ccb *)&csa);
134 CAMLOCK_2_ISPLOCK(isp);
135 isp->isp_sim = sim;
136 isp->isp_path = path;
137 /*
138 * Create a kernel thread for fibre channel instances. We
139 * don't have dual channel FC cards.
140 */
141 if (IS_FC(isp)) {
142 ISPLOCK_2_CAMLOCK(isp);
143 /* XXX: LOCK VIOLATION */
144 cv_init(&isp->isp_osinfo.kthread_cv, "isp_kthread_cv");
145 if (kthread_create(isp_kthread, isp, &isp->isp_osinfo.kproc,
146 RFHIGHPID, 0, "%s: fc_thrd",
147 device_get_nameunit(isp->isp_dev))) {
148 xpt_bus_deregister(cam_sim_path(sim));
149 cam_sim_free(sim, TRUE);
150 config_intrhook_disestablish(&isp->isp_osinfo.ehook);
151 CAMLOCK_2_ISPLOCK(isp);
152 isp_prt(isp, ISP_LOGERR, "could not create kthread");
153 return;
154 }
155 CAMLOCK_2_ISPLOCK(isp);
156 }
157
158
159 /*
160 * If we have a second channel, construct SIM entry for that.
161 */
162 if (IS_DUALBUS(isp)) {
163 ISPLOCK_2_CAMLOCK(isp);
164 sim = cam_sim_alloc(isp_action, isp_poll, "isp", isp,
165 device_get_unit(isp->isp_dev), 1, isp->isp_maxcmds, devq);
166 if (sim == NULL) {
167 xpt_bus_deregister(cam_sim_path(isp->isp_sim));
168 xpt_free_path(isp->isp_path);
169 cam_simq_free(devq);
170 config_intrhook_disestablish(&isp->isp_osinfo.ehook);
171 return;
172 }
173 if (xpt_bus_register(sim, secondary) != CAM_SUCCESS) {
174 xpt_bus_deregister(cam_sim_path(isp->isp_sim));
175 xpt_free_path(isp->isp_path);
176 cam_sim_free(sim, TRUE);
177 config_intrhook_disestablish(&isp->isp_osinfo.ehook);
178 CAMLOCK_2_ISPLOCK(isp);
179 return;
180 }
181
182 if (xpt_create_path(&path, NULL, cam_sim_path(sim),
183 CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
184 xpt_bus_deregister(cam_sim_path(isp->isp_sim));
185 xpt_free_path(isp->isp_path);
186 xpt_bus_deregister(cam_sim_path(sim));
187 cam_sim_free(sim, TRUE);
188 config_intrhook_disestablish(&isp->isp_osinfo.ehook);
189 CAMLOCK_2_ISPLOCK(isp);
190 return;
191 }
192
193 xpt_setup_ccb(&csa.ccb_h, path, 5);
194 csa.ccb_h.func_code = XPT_SASYNC_CB;
195 csa.event_enable = AC_LOST_DEVICE;
196 csa.callback = isp_cam_async;
197 csa.callback_arg = sim;
198 xpt_action((union ccb *)&csa);
199 CAMLOCK_2_ISPLOCK(isp);
200 isp->isp_sim2 = sim;
201 isp->isp_path2 = path;
202 }
203
204 /*
205 * Create device nodes
206 */
207 (void) make_dev(&isp_cdevsw, device_get_unit(isp->isp_dev), UID_ROOT,
208 GID_OPERATOR, 0600, "%s", device_get_nameunit(isp->isp_dev));
209
210 if (isp->isp_role != ISP_ROLE_NONE) {
211 isp->isp_state = ISP_RUNSTATE;
212 ENABLE_INTS(isp);
213 }
214 if (isplist == NULL) {
215 isplist = isp;
216 } else {
217 struct ispsoftc *tmp = isplist;
218 while (tmp->isp_osinfo.next) {
219 tmp = tmp->isp_osinfo.next;
220 }
221 tmp->isp_osinfo.next = isp;
222 }
223
224}
225
226static INLINE void
227isp_freeze_loopdown(struct ispsoftc *isp, char *msg)
228{
229 if (isp->isp_osinfo.simqfrozen == 0) {
230 isp_prt(isp, ISP_LOGDEBUG0, "%s: freeze simq (loopdown)", msg);
231 isp->isp_osinfo.simqfrozen |= SIMQFRZ_LOOPDOWN;
232 ISPLOCK_2_CAMLOCK(isp);
233 xpt_freeze_simq(isp->isp_sim, 1);
234 CAMLOCK_2_ISPLOCK(isp);
235 } else {
236 isp_prt(isp, ISP_LOGDEBUG0, "%s: mark frozen (loopdown)", msg);
237 isp->isp_osinfo.simqfrozen |= SIMQFRZ_LOOPDOWN;
238 }
239}
240
241static int
242ispioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flags, struct thread *td)
243{
244 struct ispsoftc *isp;
245 int nr, retval = ENOTTY;
246
247 isp = isplist;
248 while (isp) {
249 if (minor(dev) == device_get_unit(isp->isp_dev)) {
250 break;
251 }
252 isp = isp->isp_osinfo.next;
253 }
254 if (isp == NULL)
255 return (ENXIO);
256
257 switch (cmd) {
258#ifdef ISP_FW_CRASH_DUMP
259 case ISP_GET_FW_CRASH_DUMP:
260 {
261 u_int16_t *ptr = FCPARAM(isp)->isp_dump_data;
262 size_t sz;
263
264 retval = 0;
265 if (IS_2200(isp))
266 sz = QLA2200_RISC_IMAGE_DUMP_SIZE;
267 else
268 sz = QLA2300_RISC_IMAGE_DUMP_SIZE;
269 ISP_LOCK(isp);
270 if (ptr && *ptr) {
271 void *uaddr = *((void **) addr);
272 if (copyout(ptr, uaddr, sz)) {
273 retval = EFAULT;
274 } else {
275 *ptr = 0;
276 }
277 } else {
278 retval = ENXIO;
279 }
280 ISP_UNLOCK(isp);
281 break;
282 }
283
284 case ISP_FORCE_CRASH_DUMP:
285 ISP_LOCK(isp);
286 isp_freeze_loopdown(isp, "ispioctl(ISP_FORCE_CRASH_DUMP)");
287 isp_fw_dump(isp);
288 isp_reinit(isp);
289 ISP_UNLOCK(isp);
290 retval = 0;
291 break;
292#endif
293 case ISP_SDBLEV:
294 {
295 int olddblev = isp->isp_dblev;
296 isp->isp_dblev = *(int *)addr;
297 *(int *)addr = olddblev;
298 retval = 0;
299 break;
300 }
301 case ISP_GETROLE:
302 *(int *)addr = isp->isp_role;
303 retval = 0;
304 break;
305 case ISP_SETROLE:
306 nr = *(int *)addr;
307 if (nr & ~(ISP_ROLE_INITIATOR|ISP_ROLE_TARGET)) {
308 retval = EINVAL;
309 break;
310 }
311 *(int *)addr = isp->isp_role;
312 isp->isp_role = nr;
313 /* FALLTHROUGH */
314 case ISP_RESETHBA:
315 ISP_LOCK(isp);
316 isp_reinit(isp);
317 ISP_UNLOCK(isp);
318 retval = 0;
319 break;
320 case ISP_RESCAN:
321 if (IS_FC(isp)) {
322 ISP_LOCK(isp);
323 if (isp_fc_runstate(isp, 5 * 1000000)) {
324 retval = EIO;
325 } else {
326 retval = 0;
327 }
328 ISP_UNLOCK(isp);
329 }
330 break;
331 case ISP_FC_LIP:
332 if (IS_FC(isp)) {
333 ISP_LOCK(isp);
334 if (isp_control(isp, ISPCTL_SEND_LIP, 0)) {
335 retval = EIO;
336 } else {
337 retval = 0;
338 }
339 ISP_UNLOCK(isp);
340 }
341 break;
342 case ISP_FC_GETDINFO:
343 {
344 struct isp_fc_device *ifc = (struct isp_fc_device *) addr;
345 struct lportdb *lp;
346
347 if (ifc->loopid < 0 || ifc->loopid >= MAX_FC_TARG) {
348 retval = EINVAL;
349 break;
350 }
351 ISP_LOCK(isp);
352 lp = &FCPARAM(isp)->portdb[ifc->loopid];
353 if (lp->valid) {
354 ifc->loopid = lp->loopid;
355 ifc->portid = lp->portid;
356 ifc->node_wwn = lp->node_wwn;
357 ifc->port_wwn = lp->port_wwn;
358 retval = 0;
359 } else {
360 retval = ENODEV;
361 }
362 ISP_UNLOCK(isp);
363 break;
364 }
365 case ISP_GET_STATS:
366 {
367 isp_stats_t *sp = (isp_stats_t *) addr;
368
369 MEMZERO(sp, sizeof (*sp));
370 sp->isp_stat_version = ISP_STATS_VERSION;
371 sp->isp_type = isp->isp_type;
372 sp->isp_revision = isp->isp_revision;
373 ISP_LOCK(isp);
374 sp->isp_stats[ISP_INTCNT] = isp->isp_intcnt;
375 sp->isp_stats[ISP_INTBOGUS] = isp->isp_intbogus;
376 sp->isp_stats[ISP_INTMBOXC] = isp->isp_intmboxc;
377 sp->isp_stats[ISP_INGOASYNC] = isp->isp_intoasync;
378 sp->isp_stats[ISP_RSLTCCMPLT] = isp->isp_rsltccmplt;
379 sp->isp_stats[ISP_FPHCCMCPLT] = isp->isp_fphccmplt;
380 sp->isp_stats[ISP_RSCCHIWAT] = isp->isp_rscchiwater;
381 sp->isp_stats[ISP_FPCCHIWAT] = isp->isp_fpcchiwater;
382 ISP_UNLOCK(isp);
383 retval = 0;
384 break;
385 }
386 case ISP_CLR_STATS:
387 ISP_LOCK(isp);
388 isp->isp_intcnt = 0;
389 isp->isp_intbogus = 0;
390 isp->isp_intmboxc = 0;
391 isp->isp_intoasync = 0;
392 isp->isp_rsltccmplt = 0;
393 isp->isp_fphccmplt = 0;
394 isp->isp_rscchiwater = 0;
395 isp->isp_fpcchiwater = 0;
396 ISP_UNLOCK(isp);
397 retval = 0;
398 break;
399 case ISP_FC_GETHINFO:
400 {
401 struct isp_hba_device *hba = (struct isp_hba_device *) addr;
402 MEMZERO(hba, sizeof (*hba));
403 ISP_LOCK(isp);
404 hba->fc_fw_major = ISP_FW_MAJORX(isp->isp_fwrev);
405 hba->fc_fw_minor = ISP_FW_MINORX(isp->isp_fwrev);
406 hba->fc_fw_micro = ISP_FW_MICROX(isp->isp_fwrev);
407 hba->fc_speed = FCPARAM(isp)->isp_gbspeed;
408 hba->fc_scsi_supported = 1;
409 hba->fc_topology = FCPARAM(isp)->isp_topo + 1;
410 hba->fc_loopid = FCPARAM(isp)->isp_loopid;
411 hba->nvram_node_wwn = FCPARAM(isp)->isp_nodewwn;
412 hba->nvram_port_wwn = FCPARAM(isp)->isp_portwwn;
413 hba->active_node_wwn = ISP_NODEWWN(isp);
414 hba->active_port_wwn = ISP_PORTWWN(isp);
415 ISP_UNLOCK(isp);
416 retval = 0;
417 break;
418 }
419 case ISP_GET_FC_PARAM:
420 {
421 struct isp_fc_param *f = (struct isp_fc_param *) addr;
422
423 if (!IS_FC(isp)) {
424 retval = EINVAL;
425 break;
426 }
427 f->parameter = 0;
428 if (strcmp(f->param_name, "framelength") == 0) {
429 f->parameter = FCPARAM(isp)->isp_maxfrmlen;
430 retval = 0;
431 break;
432 }
433 if (strcmp(f->param_name, "exec_throttle") == 0) {
434 f->parameter = FCPARAM(isp)->isp_execthrottle;
435 retval = 0;
436 break;
437 }
438 if (strcmp(f->param_name, "fullduplex") == 0) {
439 if (FCPARAM(isp)->isp_fwoptions & ICBOPT_FULL_DUPLEX)
440 f->parameter = 1;
441 retval = 0;
442 break;
443 }
444 if (strcmp(f->param_name, "loopid") == 0) {
445 f->parameter = FCPARAM(isp)->isp_loopid;
446 retval = 0;
447 break;
448 }
449 retval = EINVAL;
450 break;
451 }
452 case ISP_SET_FC_PARAM:
453 {
454 struct isp_fc_param *f = (struct isp_fc_param *) addr;
455 u_int32_t param = f->parameter;
456
457 if (!IS_FC(isp)) {
458 retval = EINVAL;
459 break;
460 }
461 f->parameter = 0;
462 if (strcmp(f->param_name, "framelength") == 0) {
463 if (param != 512 && param != 1024 && param != 1024) {
464 retval = EINVAL;
465 break;
466 }
467 FCPARAM(isp)->isp_maxfrmlen = param;
468 retval = 0;
469 break;
470 }
471 if (strcmp(f->param_name, "exec_throttle") == 0) {
472 if (param < 16 || param > 255) {
473 retval = EINVAL;
474 break;
475 }
476 FCPARAM(isp)->isp_execthrottle = param;
477 retval = 0;
478 break;
479 }
480 if (strcmp(f->param_name, "fullduplex") == 0) {
481 if (param != 0 && param != 1) {
482 retval = EINVAL;
483 break;
484 }
485 if (param) {
486 FCPARAM(isp)->isp_fwoptions |=
487 ICBOPT_FULL_DUPLEX;
488 } else {
489 FCPARAM(isp)->isp_fwoptions &=
490 ~ICBOPT_FULL_DUPLEX;
491 }
492 retval = 0;
493 break;
494 }
495 if (strcmp(f->param_name, "loopid") == 0) {
496 if (param < 0 || param > 125) {
497 retval = EINVAL;
498 break;
499 }
500 FCPARAM(isp)->isp_loopid = param;
501 retval = 0;
502 break;
503 }
504 retval = EINVAL;
505 break;
506 }
507 case ISP_TSK_MGMT:
508 {
509 int needmarker;
510 struct isp_fc_tsk_mgmt *fct = (struct isp_fc_tsk_mgmt *) addr;
511 u_int16_t loopid;
510 mbreg_t mbs;
511
512 if (IS_SCSI(isp)) {
513 retval = EINVAL;
514 break;
515 }
516
517 memset(&mbs, 0, sizeof (mbs));
518 needmarker = retval = 0;
512 mbreg_t mbs;
513
514 if (IS_SCSI(isp)) {
515 retval = EINVAL;
516 break;
517 }
518
519 memset(&mbs, 0, sizeof (mbs));
520 needmarker = retval = 0;
519
521 loopid = fct->loopid;
522 if (IS_2KLOGIN(isp) == 0) {
523 loopid <<= 8;
524 }
520 switch (fct->action) {
521 case CLEAR_ACA:
522 mbs.param[0] = MBOX_CLEAR_ACA;
525 switch (fct->action) {
526 case CLEAR_ACA:
527 mbs.param[0] = MBOX_CLEAR_ACA;
523 mbs.param[1] = fct->loopid << 8;
528 mbs.param[1] = loopid;
524 mbs.param[2] = fct->lun;
525 break;
526 case TARGET_RESET:
527 mbs.param[0] = MBOX_TARGET_RESET;
529 mbs.param[2] = fct->lun;
530 break;
531 case TARGET_RESET:
532 mbs.param[0] = MBOX_TARGET_RESET;
528 mbs.param[1] = fct->loopid << 8;
533 mbs.param[1] = loopid;
529 needmarker = 1;
530 break;
531 case LUN_RESET:
532 mbs.param[0] = MBOX_LUN_RESET;
534 needmarker = 1;
535 break;
536 case LUN_RESET:
537 mbs.param[0] = MBOX_LUN_RESET;
533 mbs.param[1] = fct->loopid << 8;
538 mbs.param[1] = loopid;
534 mbs.param[2] = fct->lun;
535 needmarker = 1;
536 break;
537 case CLEAR_TASK_SET:
538 mbs.param[0] = MBOX_CLEAR_TASK_SET;
539 mbs.param[2] = fct->lun;
540 needmarker = 1;
541 break;
542 case CLEAR_TASK_SET:
543 mbs.param[0] = MBOX_CLEAR_TASK_SET;
539 mbs.param[1] = fct->loopid << 8;
544 mbs.param[1] = loopid;
540 mbs.param[2] = fct->lun;
541 needmarker = 1;
542 break;
543 case ABORT_TASK_SET:
544 mbs.param[0] = MBOX_ABORT_TASK_SET;
545 mbs.param[2] = fct->lun;
546 needmarker = 1;
547 break;
548 case ABORT_TASK_SET:
549 mbs.param[0] = MBOX_ABORT_TASK_SET;
545 mbs.param[1] = fct->loopid << 8;
550 mbs.param[1] = loopid;
546 mbs.param[2] = fct->lun;
547 needmarker = 1;
548 break;
549 default:
550 retval = EINVAL;
551 break;
552 }
553 if (retval == 0) {
554 ISP_LOCK(isp);
555 if (needmarker) {
556 isp->isp_sendmarker |= 1;
557 }
558 retval = isp_control(isp, ISPCTL_RUN_MBOXCMD, &mbs);
559 ISP_UNLOCK(isp);
560 if (retval)
561 retval = EIO;
562 }
563 break;
564 }
565 default:
566 break;
567 }
568 return (retval);
569}
570
571static void
572isp_intr_enable(void *arg)
573{
574 struct ispsoftc *isp = arg;
575 if (isp->isp_role != ISP_ROLE_NONE) {
576 ENABLE_INTS(isp);
577#if 0
578 isp->isp_osinfo.intsok = 1;
579#endif
580 }
581 /* Release our hook so that the boot can continue. */
582 config_intrhook_disestablish(&isp->isp_osinfo.ehook);
583}
584
585/*
586 * Put the target mode functions here, because some are inlines
587 */
588
589#ifdef ISP_TARGET_MODE
590
591static INLINE int is_lun_enabled(struct ispsoftc *, int, lun_id_t);
592static INLINE int are_any_luns_enabled(struct ispsoftc *, int);
593static INLINE tstate_t *get_lun_statep(struct ispsoftc *, int, lun_id_t);
594static INLINE void rls_lun_statep(struct ispsoftc *, tstate_t *);
595static INLINE atio_private_data_t *isp_get_atpd(struct ispsoftc *, int);
596static cam_status
597create_lun_state(struct ispsoftc *, int, struct cam_path *, tstate_t **);
598static void destroy_lun_state(struct ispsoftc *, tstate_t *);
599static int isp_en_lun(struct ispsoftc *, union ccb *);
600static void isp_ledone(struct ispsoftc *, lun_entry_t *);
601static cam_status isp_abort_tgt_ccb(struct ispsoftc *, union ccb *);
602static timeout_t isp_refire_putback_atio;
603static void isp_complete_ctio(union ccb *);
604static void isp_target_putback_atio(union ccb *);
605static cam_status isp_target_start_ctio(struct ispsoftc *, union ccb *);
606static int isp_handle_platform_atio(struct ispsoftc *, at_entry_t *);
607static int isp_handle_platform_atio2(struct ispsoftc *, at2_entry_t *);
608static int isp_handle_platform_ctio(struct ispsoftc *, void *);
551 mbs.param[2] = fct->lun;
552 needmarker = 1;
553 break;
554 default:
555 retval = EINVAL;
556 break;
557 }
558 if (retval == 0) {
559 ISP_LOCK(isp);
560 if (needmarker) {
561 isp->isp_sendmarker |= 1;
562 }
563 retval = isp_control(isp, ISPCTL_RUN_MBOXCMD, &mbs);
564 ISP_UNLOCK(isp);
565 if (retval)
566 retval = EIO;
567 }
568 break;
569 }
570 default:
571 break;
572 }
573 return (retval);
574}
575
576static void
577isp_intr_enable(void *arg)
578{
579 struct ispsoftc *isp = arg;
580 if (isp->isp_role != ISP_ROLE_NONE) {
581 ENABLE_INTS(isp);
582#if 0
583 isp->isp_osinfo.intsok = 1;
584#endif
585 }
586 /* Release our hook so that the boot can continue. */
587 config_intrhook_disestablish(&isp->isp_osinfo.ehook);
588}
589
590/*
591 * Put the target mode functions here, because some are inlines
592 */
593
594#ifdef ISP_TARGET_MODE
595
596static INLINE int is_lun_enabled(struct ispsoftc *, int, lun_id_t);
597static INLINE int are_any_luns_enabled(struct ispsoftc *, int);
598static INLINE tstate_t *get_lun_statep(struct ispsoftc *, int, lun_id_t);
599static INLINE void rls_lun_statep(struct ispsoftc *, tstate_t *);
600static INLINE atio_private_data_t *isp_get_atpd(struct ispsoftc *, int);
601static cam_status
602create_lun_state(struct ispsoftc *, int, struct cam_path *, tstate_t **);
603static void destroy_lun_state(struct ispsoftc *, tstate_t *);
604static int isp_en_lun(struct ispsoftc *, union ccb *);
605static void isp_ledone(struct ispsoftc *, lun_entry_t *);
606static cam_status isp_abort_tgt_ccb(struct ispsoftc *, union ccb *);
607static timeout_t isp_refire_putback_atio;
608static void isp_complete_ctio(union ccb *);
609static void isp_target_putback_atio(union ccb *);
610static cam_status isp_target_start_ctio(struct ispsoftc *, union ccb *);
611static int isp_handle_platform_atio(struct ispsoftc *, at_entry_t *);
612static int isp_handle_platform_atio2(struct ispsoftc *, at2_entry_t *);
613static int isp_handle_platform_ctio(struct ispsoftc *, void *);
609static void isp_handle_platform_ctio_fastpost(struct ispsoftc *, u_int32_t);
610static int isp_handle_platform_notify_scsi(struct ispsoftc *, in_entry_t *);
611static int isp_handle_platform_notify_fc(struct ispsoftc *, in_fcentry_t *);
612
613static INLINE int
614is_lun_enabled(struct ispsoftc *isp, int bus, lun_id_t lun)
615{
616 tstate_t *tptr;
617 tptr = isp->isp_osinfo.lun_hash[LUN_HASH_FUNC(isp, bus, lun)];
618 if (tptr == NULL) {
619 return (0);
620 }
621 do {
622 if (tptr->lun == (lun_id_t) lun && tptr->bus == bus) {
623 return (1);
624 }
625 } while ((tptr = tptr->next) != NULL);
626 return (0);
627}
628
629static INLINE int
630are_any_luns_enabled(struct ispsoftc *isp, int port)
631{
632 int lo, hi;
633 if (IS_DUALBUS(isp)) {
634 lo = (port * (LUN_HASH_SIZE >> 1));
635 hi = lo + (LUN_HASH_SIZE >> 1);
636 } else {
637 lo = 0;
638 hi = LUN_HASH_SIZE;
639 }
640 for (lo = 0; lo < hi; lo++) {
641 if (isp->isp_osinfo.lun_hash[lo]) {
642 return (1);
643 }
644 }
645 return (0);
646}
647
648static INLINE tstate_t *
649get_lun_statep(struct ispsoftc *isp, int bus, lun_id_t lun)
650{
651 tstate_t *tptr = NULL;
652
653 if (lun == CAM_LUN_WILDCARD) {
654 if (isp->isp_osinfo.tmflags[bus] & TM_WILDCARD_ENABLED) {
655 tptr = &isp->isp_osinfo.tsdflt[bus];
656 tptr->hold++;
657 return (tptr);
658 }
659 return (NULL);
660 } else {
661 tptr = isp->isp_osinfo.lun_hash[LUN_HASH_FUNC(isp, bus, lun)];
662 if (tptr == NULL) {
663 return (NULL);
664 }
665 }
666
667 do {
668 if (tptr->lun == lun && tptr->bus == bus) {
669 tptr->hold++;
670 return (tptr);
671 }
672 } while ((tptr = tptr->next) != NULL);
673 return (tptr);
674}
675
676static INLINE void
677rls_lun_statep(struct ispsoftc *isp, tstate_t *tptr)
678{
679 if (tptr->hold)
680 tptr->hold--;
681}
682
683static INLINE atio_private_data_t *
684isp_get_atpd(struct ispsoftc *isp, int tag)
685{
686 atio_private_data_t *atp;
687 for (atp = isp->isp_osinfo.atpdp;
688 atp < &isp->isp_osinfo.atpdp[ATPDPSIZE]; atp++) {
689 if (atp->tag == tag)
690 return (atp);
691 }
692 return (NULL);
693}
694
695static cam_status
696create_lun_state(struct ispsoftc *isp, int bus,
697 struct cam_path *path, tstate_t **rslt)
698{
699 cam_status status;
700 lun_id_t lun;
701 int hfx;
702 tstate_t *tptr, *new;
703
704 lun = xpt_path_lun_id(path);
705 if (lun < 0) {
706 return (CAM_LUN_INVALID);
707 }
708 if (is_lun_enabled(isp, bus, lun)) {
709 return (CAM_LUN_ALRDY_ENA);
710 }
711 new = (tstate_t *) malloc(sizeof (tstate_t), M_DEVBUF, M_NOWAIT|M_ZERO);
712 if (new == NULL) {
713 return (CAM_RESRC_UNAVAIL);
714 }
715
716 status = xpt_create_path(&new->owner, NULL, xpt_path_path_id(path),
717 xpt_path_target_id(path), xpt_path_lun_id(path));
718 if (status != CAM_REQ_CMP) {
719 free(new, M_DEVBUF);
720 return (status);
721 }
722 new->bus = bus;
723 new->lun = lun;
724 SLIST_INIT(&new->atios);
725 SLIST_INIT(&new->inots);
726 new->hold = 1;
727
728 hfx = LUN_HASH_FUNC(isp, new->bus, new->lun);
729 tptr = isp->isp_osinfo.lun_hash[hfx];
730 if (tptr == NULL) {
731 isp->isp_osinfo.lun_hash[hfx] = new;
732 } else {
733 while (tptr->next)
734 tptr = tptr->next;
735 tptr->next = new;
736 }
737 *rslt = new;
738 return (CAM_REQ_CMP);
739}
740
741static INLINE void
742destroy_lun_state(struct ispsoftc *isp, tstate_t *tptr)
743{
744 int hfx;
745 tstate_t *lw, *pw;
746
747 if (tptr->hold) {
748 return;
749 }
750 hfx = LUN_HASH_FUNC(isp, tptr->bus, tptr->lun);
751 pw = isp->isp_osinfo.lun_hash[hfx];
752 if (pw == NULL) {
753 return;
754 } else if (pw->lun == tptr->lun && pw->bus == tptr->bus) {
755 isp->isp_osinfo.lun_hash[hfx] = pw->next;
756 } else {
757 lw = pw;
758 pw = lw->next;
759 while (pw) {
760 if (pw->lun == tptr->lun && pw->bus == tptr->bus) {
761 lw->next = pw->next;
762 break;
763 }
764 lw = pw;
765 pw = pw->next;
766 }
767 if (pw == NULL) {
768 return;
769 }
770 }
771 free(tptr, M_DEVBUF);
772}
773
774/*
775 * Enable luns.
776 */
777static int
778isp_en_lun(struct ispsoftc *isp, union ccb *ccb)
779{
780 struct ccb_en_lun *cel = &ccb->cel;
781 tstate_t *tptr;
782 u_int32_t seq;
783 int bus, cmd, av, wildcard, tm_on;
784 lun_id_t lun;
785 target_id_t tgt;
786
787 bus = XS_CHANNEL(ccb);
788 if (bus > 1) {
789 xpt_print_path(ccb->ccb_h.path);
790 printf("illegal bus %d\n", bus);
791 ccb->ccb_h.status = CAM_PATH_INVALID;
792 return (-1);
793 }
794 tgt = ccb->ccb_h.target_id;
795 lun = ccb->ccb_h.target_lun;
796
797 isp_prt(isp, ISP_LOGTDEBUG0,
798 "isp_en_lun: %sabling lun 0x%x on channel %d",
799 cel->enable? "en" : "dis", lun, bus);
800
801
802 if ((lun != CAM_LUN_WILDCARD) &&
803 (lun < 0 || lun >= (lun_id_t) isp->isp_maxluns)) {
804 ccb->ccb_h.status = CAM_LUN_INVALID;
805 return (-1);
806 }
807
808 if (IS_SCSI(isp)) {
809 sdparam *sdp = isp->isp_param;
810 sdp += bus;
811 if (tgt != CAM_TARGET_WILDCARD &&
812 tgt != sdp->isp_initiator_id) {
813 ccb->ccb_h.status = CAM_TID_INVALID;
814 return (-1);
815 }
816 } else {
817 /*
818 * There's really no point in doing this yet w/o multi-tid
819 * capability. Even then, it's problematic.
820 */
821#if 0
822 if (tgt != CAM_TARGET_WILDCARD &&
823 tgt != FCPARAM(isp)->isp_iid) {
824 ccb->ccb_h.status = CAM_TID_INVALID;
825 return (-1);
826 }
827#endif
828 /*
829 * This is as a good a place as any to check f/w capabilities.
830 */
831 if ((FCPARAM(isp)->isp_fwattr & ISP_FW_ATTR_TMODE) == 0) {
832 isp_prt(isp, ISP_LOGERR,
833 "firmware does not support target mode");
834 ccb->ccb_h.status = CAM_FUNC_NOTAVAIL;
835 return (-1);
836 }
837 /*
838 * XXX: We *could* handle non-SCCLUN f/w, but we'd have to
839 * XXX: dorks with our already fragile enable/disable code.
840 */
841 if ((FCPARAM(isp)->isp_fwattr & ISP_FW_ATTR_SCCLUN) == 0) {
842 isp_prt(isp, ISP_LOGERR,
843 "firmware not SCCLUN capable");
844 ccb->ccb_h.status = CAM_FUNC_NOTAVAIL;
845 return (-1);
846 }
847 }
848
849 if (tgt == CAM_TARGET_WILDCARD) {
850 if (lun == CAM_LUN_WILDCARD) {
851 wildcard = 1;
852 } else {
853 ccb->ccb_h.status = CAM_LUN_INVALID;
854 return (-1);
855 }
856 } else {
857 wildcard = 0;
858 }
859
860 tm_on = (isp->isp_osinfo.tmflags[bus] & TM_TMODE_ENABLED) != 0;
861
862 /*
863 * Next check to see whether this is a target/lun wildcard action.
864 *
865 * If so, we know that we can accept commands for luns that haven't
866 * been enabled yet and send them upstream. Otherwise, we have to
867 * handle them locally (if we see them at all).
868 */
869
870 if (wildcard) {
871 tptr = &isp->isp_osinfo.tsdflt[bus];
872 if (cel->enable) {
873 if (tm_on) {
874 ccb->ccb_h.status = CAM_LUN_ALRDY_ENA;
875 return (-1);
876 }
877 ccb->ccb_h.status =
878 xpt_create_path(&tptr->owner, NULL,
879 xpt_path_path_id(ccb->ccb_h.path),
880 xpt_path_target_id(ccb->ccb_h.path),
881 xpt_path_lun_id(ccb->ccb_h.path));
882 if (ccb->ccb_h.status != CAM_REQ_CMP) {
883 return (-1);
884 }
885 SLIST_INIT(&tptr->atios);
886 SLIST_INIT(&tptr->inots);
887 isp->isp_osinfo.tmflags[bus] |= TM_WILDCARD_ENABLED;
888 } else {
889 if (tm_on == 0) {
890 ccb->ccb_h.status = CAM_REQ_CMP;
891 return (-1);
892 }
893 if (tptr->hold) {
894 ccb->ccb_h.status = CAM_SCSI_BUSY;
895 return (-1);
896 }
897 xpt_free_path(tptr->owner);
898 isp->isp_osinfo.tmflags[bus] &= ~TM_WILDCARD_ENABLED;
899 }
900 }
901
902 /*
903 * Now check to see whether this bus needs to be
904 * enabled/disabled with respect to target mode.
905 */
906 av = bus << 31;
907 if (cel->enable && tm_on == 0) {
908 av |= ENABLE_TARGET_FLAG;
909 av = isp_control(isp, ISPCTL_TOGGLE_TMODE, &av);
910 if (av) {
911 ccb->ccb_h.status = CAM_FUNC_NOTAVAIL;
912 if (wildcard) {
913 isp->isp_osinfo.tmflags[bus] &=
914 ~TM_WILDCARD_ENABLED;
915 xpt_free_path(tptr->owner);
916 }
917 return (-1);
918 }
919 isp->isp_osinfo.tmflags[bus] |= TM_TMODE_ENABLED;
920 isp_prt(isp, ISP_LOGINFO,
921 "Target Mode enabled on channel %d", bus);
922 } else if (cel->enable == 0 && tm_on && wildcard) {
923 if (are_any_luns_enabled(isp, bus)) {
924 ccb->ccb_h.status = CAM_SCSI_BUSY;
925 return (-1);
926 }
927 av = isp_control(isp, ISPCTL_TOGGLE_TMODE, &av);
928 if (av) {
929 ccb->ccb_h.status = CAM_FUNC_NOTAVAIL;
930 return (-1);
931 }
932 isp->isp_osinfo.tmflags[bus] &= ~TM_TMODE_ENABLED;
933 isp_prt(isp, ISP_LOGINFO,
934 "Target Mode disabled on channel %d", bus);
935 }
936
937 if (wildcard) {
938 ccb->ccb_h.status = CAM_REQ_CMP;
939 return (-1);
940 }
941
942 /*
943 * Find an empty slot
944 */
945 for (seq = 0; seq < NLEACT; seq++) {
946 if (isp->isp_osinfo.leact[seq] == 0) {
947 break;
948 }
949 }
950 if (seq >= NLEACT) {
951 ccb->ccb_h.status = CAM_RESRC_UNAVAIL;
952 return (-1);
953
954 }
955 isp->isp_osinfo.leact[seq] = ccb;
956
957 if (cel->enable) {
958 ccb->ccb_h.status =
959 create_lun_state(isp, bus, ccb->ccb_h.path, &tptr);
960 if (ccb->ccb_h.status != CAM_REQ_CMP) {
961 isp->isp_osinfo.leact[seq] = 0;
962 return (-1);
963 }
964 } else {
965 tptr = get_lun_statep(isp, bus, lun);
966 if (tptr == NULL) {
967 ccb->ccb_h.status = CAM_LUN_INVALID;
968 return (-1);
969 }
970 }
971
972 if (cel->enable) {
973 int c, n, ulun = lun;
974
975 cmd = RQSTYPE_ENABLE_LUN;
976 c = DFLT_CMND_CNT;
977 n = DFLT_INOT_CNT;
978 if (IS_FC(isp) && lun != 0) {
979 cmd = RQSTYPE_MODIFY_LUN;
980 n = 0;
981 /*
982 * For SCC firmware, we only deal with setting
983 * (enabling or modifying) lun 0.
984 */
985 ulun = 0;
986 }
987 if (isp_lun_cmd(isp, cmd, bus, tgt, ulun, c, n, seq+1) == 0) {
988 rls_lun_statep(isp, tptr);
989 ccb->ccb_h.status = CAM_REQ_INPROG;
990 return (seq);
991 }
992 } else {
993 int c, n, ulun = lun;
994
995 cmd = -RQSTYPE_MODIFY_LUN;
996 c = DFLT_CMND_CNT;
997 n = DFLT_INOT_CNT;
998 if (IS_FC(isp) && lun != 0) {
999 n = 0;
1000 /*
1001 * For SCC firmware, we only deal with setting
1002 * (enabling or modifying) lun 0.
1003 */
1004 ulun = 0;
1005 }
1006 if (isp_lun_cmd(isp, cmd, bus, tgt, ulun, c, n, seq+1) == 0) {
1007 rls_lun_statep(isp, tptr);
1008 ccb->ccb_h.status = CAM_REQ_INPROG;
1009 return (seq);
1010 }
1011 }
1012 rls_lun_statep(isp, tptr);
1013 xpt_print_path(ccb->ccb_h.path);
1014 printf("isp_lun_cmd failed\n");
1015 isp->isp_osinfo.leact[seq] = 0;
1016 ccb->ccb_h.status = CAM_REQ_CMP_ERR;
1017 return (-1);
1018}
1019
1020static void
1021isp_ledone(struct ispsoftc *isp, lun_entry_t *lep)
1022{
1023 const char lfmt[] = "lun %d now %sabled for target mode on channel %d";
1024 union ccb *ccb;
1025 u_int32_t seq;
1026 tstate_t *tptr;
1027 int av;
1028 struct ccb_en_lun *cel;
1029
1030 seq = lep->le_reserved - 1;
1031 if (seq >= NLEACT) {
1032 isp_prt(isp, ISP_LOGERR,
1033 "seq out of range (%u) in isp_ledone", seq);
1034 return;
1035 }
1036 ccb = isp->isp_osinfo.leact[seq];
1037 if (ccb == 0) {
1038 isp_prt(isp, ISP_LOGERR,
1039 "no ccb for seq %u in isp_ledone", seq);
1040 return;
1041 }
1042 cel = &ccb->cel;
1043 tptr = get_lun_statep(isp, XS_CHANNEL(ccb), XS_LUN(ccb));
1044 if (tptr == NULL) {
1045 xpt_print_path(ccb->ccb_h.path);
1046 printf("null tptr in isp_ledone\n");
1047 isp->isp_osinfo.leact[seq] = 0;
1048 return;
1049 }
1050
1051 if (lep->le_status != LUN_OK) {
1052 xpt_print_path(ccb->ccb_h.path);
1053 printf("ENABLE/MODIFY LUN returned 0x%x\n", lep->le_status);
1054err:
1055 ccb->ccb_h.status = CAM_REQ_CMP_ERR;
1056 xpt_print_path(ccb->ccb_h.path);
1057 rls_lun_statep(isp, tptr);
1058 isp->isp_osinfo.leact[seq] = 0;
1059 ISPLOCK_2_CAMLOCK(isp);
1060 xpt_done(ccb);
1061 CAMLOCK_2_ISPLOCK(isp);
1062 return;
1063 } else {
1064 isp_prt(isp, ISP_LOGTDEBUG0,
1065 "isp_ledone: ENABLE/MODIFY done okay");
1066 }
1067
1068
1069 if (cel->enable) {
1070 ccb->ccb_h.status = CAM_REQ_CMP;
1071 isp_prt(isp, /* ISP_LOGINFO */ ISP_LOGALL, lfmt,
1072 XS_LUN(ccb), "en", XS_CHANNEL(ccb));
1073 rls_lun_statep(isp, tptr);
1074 isp->isp_osinfo.leact[seq] = 0;
1075 ISPLOCK_2_CAMLOCK(isp);
1076 xpt_done(ccb);
1077 CAMLOCK_2_ISPLOCK(isp);
1078 return;
1079 }
1080
1081 if (lep->le_header.rqs_entry_type == RQSTYPE_MODIFY_LUN) {
1082 if (isp_lun_cmd(isp, -RQSTYPE_ENABLE_LUN, XS_CHANNEL(ccb),
1083 XS_TGT(ccb), XS_LUN(ccb), 0, 0, seq+1)) {
1084 xpt_print_path(ccb->ccb_h.path);
1085 printf("isp_ledone: isp_lun_cmd failed\n");
1086 goto err;
1087 }
1088 rls_lun_statep(isp, tptr);
1089 return;
1090 }
1091
1092 isp_prt(isp, ISP_LOGINFO, lfmt, XS_LUN(ccb), "dis", XS_CHANNEL(ccb));
1093 rls_lun_statep(isp, tptr);
1094 destroy_lun_state(isp, tptr);
1095 ccb->ccb_h.status = CAM_REQ_CMP;
1096 isp->isp_osinfo.leact[seq] = 0;
1097 ISPLOCK_2_CAMLOCK(isp);
1098 xpt_done(ccb);
1099 CAMLOCK_2_ISPLOCK(isp);
1100 if (are_any_luns_enabled(isp, XS_CHANNEL(ccb)) == 0) {
1101 int bus = XS_CHANNEL(ccb);
1102 av = bus << 31;
1103 av = isp_control(isp, ISPCTL_TOGGLE_TMODE, &av);
1104 if (av) {
1105 isp_prt(isp, ISP_LOGWARN,
1106 "disable target mode on channel %d failed", bus);
1107 } else {
1108 isp_prt(isp, ISP_LOGINFO,
1109 "Target Mode disabled on channel %d", bus);
1110 }
1111 isp->isp_osinfo.tmflags[bus] &= ~TM_TMODE_ENABLED;
1112 }
1113}
1114
1115
1116static cam_status
1117isp_abort_tgt_ccb(struct ispsoftc *isp, union ccb *ccb)
1118{
1119 tstate_t *tptr;
1120 struct ccb_hdr_slist *lp;
1121 struct ccb_hdr *curelm;
1122 int found, *ctr;
1123 union ccb *accb = ccb->cab.abort_ccb;
1124
1125 isp_prt(isp, ISP_LOGTDEBUG0, "aborting ccb %p", accb);
1126 if (accb->ccb_h.target_id != CAM_TARGET_WILDCARD) {
1127 int badpath = 0;
1128 if (IS_FC(isp) && (accb->ccb_h.target_id !=
1129 ((fcparam *) isp->isp_param)->isp_loopid)) {
1130 badpath = 1;
1131 } else if (IS_SCSI(isp) && (accb->ccb_h.target_id !=
1132 ((sdparam *) isp->isp_param)->isp_initiator_id)) {
1133 badpath = 1;
1134 }
1135 if (badpath) {
1136 /*
1137 * Being restrictive about target ids is really about
1138 * making sure we're aborting for the right multi-tid
1139 * path. This doesn't really make much sense at present.
1140 */
1141#if 0
1142 return (CAM_PATH_INVALID);
1143#endif
1144 }
1145 }
1146 tptr = get_lun_statep(isp, XS_CHANNEL(ccb), accb->ccb_h.target_lun);
1147 if (tptr == NULL) {
1148 isp_prt(isp, ISP_LOGTDEBUG0,
1149 "isp_abort_tgt_ccb: can't get statep");
1150 return (CAM_PATH_INVALID);
1151 }
1152 if (accb->ccb_h.func_code == XPT_ACCEPT_TARGET_IO) {
1153 lp = &tptr->atios;
1154 ctr = &tptr->atio_count;
1155 } else if (accb->ccb_h.func_code == XPT_IMMED_NOTIFY) {
1156 lp = &tptr->inots;
1157 ctr = &tptr->inot_count;
1158 } else {
1159 rls_lun_statep(isp, tptr);
1160 isp_prt(isp, ISP_LOGTDEBUG0,
1161 "isp_abort_tgt_ccb: bad func %d\n", accb->ccb_h.func_code);
1162 return (CAM_UA_ABORT);
1163 }
1164 curelm = SLIST_FIRST(lp);
1165 found = 0;
1166 if (curelm == &accb->ccb_h) {
1167 found = 1;
1168 SLIST_REMOVE_HEAD(lp, sim_links.sle);
1169 } else {
1170 while(curelm != NULL) {
1171 struct ccb_hdr *nextelm;
1172
1173 nextelm = SLIST_NEXT(curelm, sim_links.sle);
1174 if (nextelm == &accb->ccb_h) {
1175 found = 1;
1176 SLIST_NEXT(curelm, sim_links.sle) =
1177 SLIST_NEXT(nextelm, sim_links.sle);
1178 break;
1179 }
1180 curelm = nextelm;
1181 }
1182 }
1183 rls_lun_statep(isp, tptr);
1184 if (found) {
1185 *ctr--;
1186 accb->ccb_h.status = CAM_REQ_ABORTED;
1187 xpt_done(accb);
1188 return (CAM_REQ_CMP);
1189 }
1190 isp_prt(isp, ISP_LOGTDEBUG0,
1191 "isp_abort_tgt_ccb: CCB %p not found\n", ccb);
1192 return (CAM_PATH_INVALID);
1193}
1194
1195static cam_status
1196isp_target_start_ctio(struct ispsoftc *isp, union ccb *ccb)
1197{
1198 void *qe;
1199 struct ccb_scsiio *cso = &ccb->csio;
1200 u_int16_t *hp, save_handle;
1201 u_int16_t nxti, optr;
1202 u_int8_t local[QENTRY_LEN];
1203
1204
1205 if (isp_getrqentry(isp, &nxti, &optr, &qe)) {
1206 xpt_print_path(ccb->ccb_h.path);
1207 printf("Request Queue Overflow in isp_target_start_ctio\n");
1208 return (CAM_RESRC_UNAVAIL);
1209 }
1210 bzero(local, QENTRY_LEN);
1211
1212 /*
1213 * We're either moving data or completing a command here.
1214 */
1215
1216 if (IS_FC(isp)) {
1217 atio_private_data_t *atp;
1218 ct2_entry_t *cto = (ct2_entry_t *) local;
1219
1220 cto->ct_header.rqs_entry_type = RQSTYPE_CTIO2;
1221 cto->ct_header.rqs_entry_count = 1;
1222 cto->ct_iid = cso->init_id;
1223 if ((FCPARAM(isp)->isp_fwattr & ISP_FW_ATTR_SCCLUN) == 0) {
1224 cto->ct_lun = ccb->ccb_h.target_lun;
1225 }
1226
1227 atp = isp_get_atpd(isp, cso->tag_id);
1228 if (atp == NULL) {
1229 isp_prt(isp, ISP_LOGERR,
1230 "cannot find private data adjunct for tag %x",
1231 cso->tag_id);
1232 return (-1);
1233 }
1234
1235 cto->ct_rxid = cso->tag_id;
1236 if (cso->dxfer_len == 0) {
1237 cto->ct_flags |= CT2_FLAG_MODE1 | CT2_NO_DATA;
1238 if (ccb->ccb_h.flags & CAM_SEND_STATUS) {
1239 cto->ct_flags |= CT2_SENDSTATUS;
1240 cto->rsp.m1.ct_scsi_status = cso->scsi_status;
1241 cto->ct_resid =
1242 atp->orig_datalen - atp->bytes_xfered;
1243 if (cto->ct_resid < 0) {
1244 cto->rsp.m1.ct_scsi_status |=
1245 CT2_DATA_OVER;
1246 } else if (cto->ct_resid > 0) {
1247 cto->rsp.m1.ct_scsi_status |=
1248 CT2_DATA_UNDER;
1249 }
1250 }
1251 if ((ccb->ccb_h.flags & CAM_SEND_SENSE) != 0) {
1252 int m = min(cso->sense_len, MAXRESPLEN);
1253 bcopy(&cso->sense_data, cto->rsp.m1.ct_resp, m);
1254 cto->rsp.m1.ct_senselen = m;
1255 cto->rsp.m1.ct_scsi_status |= CT2_SNSLEN_VALID;
1256 }
1257 } else {
1258 cto->ct_flags |= CT2_FLAG_MODE0;
1259 if ((cso->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
1260 cto->ct_flags |= CT2_DATA_IN;
1261 } else {
1262 cto->ct_flags |= CT2_DATA_OUT;
1263 }
1264 cto->ct_reloff = atp->bytes_xfered;
1265 if ((ccb->ccb_h.flags & CAM_SEND_STATUS) != 0) {
1266 cto->ct_flags |= CT2_SENDSTATUS;
1267 cto->rsp.m0.ct_scsi_status = cso->scsi_status;
1268 cto->ct_resid =
1269 atp->orig_datalen -
1270 (atp->bytes_xfered + cso->dxfer_len);
1271 if (cto->ct_resid < 0) {
1272 cto->rsp.m0.ct_scsi_status |=
1273 CT2_DATA_OVER;
1274 } else if (cto->ct_resid > 0) {
1275 cto->rsp.m0.ct_scsi_status |=
1276 CT2_DATA_UNDER;
1277 }
1278 } else {
1279 atp->last_xframt = cso->dxfer_len;
1280 }
1281 /*
1282 * If we're sending data and status back together,
1283 * we can't also send back sense data as well.
1284 */
1285 ccb->ccb_h.flags &= ~CAM_SEND_SENSE;
1286 }
1287
1288 if (cto->ct_flags & CT2_SENDSTATUS) {
1289 isp_prt(isp, ISP_LOGTDEBUG0,
1290 "CTIO2[%x] STATUS %x origd %u curd %u resid %u",
1291 cto->ct_rxid, cso->scsi_status, atp->orig_datalen,
1292 cso->dxfer_len, cto->ct_resid);
1293 cto->ct_flags |= CT2_CCINCR;
1294 atp->state = ATPD_STATE_LAST_CTIO;
1295 } else
1296 atp->state = ATPD_STATE_CTIO;
1297 cto->ct_timeout = 10;
1298 hp = &cto->ct_syshandle;
1299 } else {
1300 ct_entry_t *cto = (ct_entry_t *) local;
1301
1302 cto->ct_header.rqs_entry_type = RQSTYPE_CTIO;
1303 cto->ct_header.rqs_entry_count = 1;
1304 cto->ct_iid = cso->init_id;
1305 cto->ct_iid |= XS_CHANNEL(ccb) << 7;
1306 cto->ct_tgt = ccb->ccb_h.target_id;
1307 cto->ct_lun = ccb->ccb_h.target_lun;
1308 cto->ct_fwhandle = AT_GET_HANDLE(cso->tag_id);
1309 if (AT_HAS_TAG(cso->tag_id)) {
1310 cto->ct_tag_val = (u_int8_t) AT_GET_TAG(cso->tag_id);
1311 cto->ct_flags |= CT_TQAE;
1312 }
1313 if (ccb->ccb_h.flags & CAM_DIS_DISCONNECT) {
1314 cto->ct_flags |= CT_NODISC;
1315 }
1316 if (cso->dxfer_len == 0) {
1317 cto->ct_flags |= CT_NO_DATA;
1318 } else if ((cso->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
1319 cto->ct_flags |= CT_DATA_IN;
1320 } else {
1321 cto->ct_flags |= CT_DATA_OUT;
1322 }
1323 if (ccb->ccb_h.flags & CAM_SEND_STATUS) {
1324 cto->ct_flags |= CT_SENDSTATUS|CT_CCINCR;
1325 cto->ct_scsi_status = cso->scsi_status;
1326 cto->ct_resid = cso->resid;
1327 isp_prt(isp, ISP_LOGTDEBUG0,
1328 "CTIO[%x] SCSI STATUS 0x%x resid %d tag_id %x",
1329 cto->ct_fwhandle, cso->scsi_status, cso->resid,
1330 cso->tag_id);
1331 }
1332 ccb->ccb_h.flags &= ~CAM_SEND_SENSE;
1333 cto->ct_timeout = 10;
1334 hp = &cto->ct_syshandle;
1335 }
1336
1337 if (isp_save_xs_tgt(isp, ccb, hp)) {
1338 xpt_print_path(ccb->ccb_h.path);
1339 printf("No XFLIST pointers for isp_target_start_ctio\n");
1340 return (CAM_RESRC_UNAVAIL);
1341 }
1342
1343
1344 /*
1345 * Call the dma setup routines for this entry (and any subsequent
1346 * CTIOs) if there's data to move, and then tell the f/w it's got
1347 * new things to play with. As with isp_start's usage of DMA setup,
1348 * any swizzling is done in the machine dependent layer. Because
1349 * of this, we put the request onto the queue area first in native
1350 * format.
1351 */
1352
1353 save_handle = *hp;
1354
1355 switch (ISP_DMASETUP(isp, cso, (ispreq_t *) local, &nxti, optr)) {
1356 case CMD_QUEUED:
1357 ISP_ADD_REQUEST(isp, nxti);
1358 return (CAM_REQ_INPROG);
1359
1360 case CMD_EAGAIN:
1361 ccb->ccb_h.status = CAM_RESRC_UNAVAIL;
1362 isp_destroy_tgt_handle(isp, save_handle);
1363 return (CAM_RESRC_UNAVAIL);
1364
1365 default:
1366 isp_destroy_tgt_handle(isp, save_handle);
1367 return (XS_ERR(ccb));
1368 }
1369}
1370
1371static void
1372isp_refire_putback_atio(void *arg)
1373{
1374 int s = splcam();
1375 isp_target_putback_atio(arg);
1376 splx(s);
1377}
1378
1379static void
1380isp_target_putback_atio(union ccb *ccb)
1381{
1382 struct ispsoftc *isp;
1383 struct ccb_scsiio *cso;
1384 u_int16_t nxti, optr;
1385 void *qe;
1386
1387 isp = XS_ISP(ccb);
1388
1389 if (isp_getrqentry(isp, &nxti, &optr, &qe)) {
1390 (void) timeout(isp_refire_putback_atio, ccb, 10);
1391 isp_prt(isp, ISP_LOGWARN,
1392 "isp_target_putback_atio: Request Queue Overflow");
1393 return;
1394 }
1395 bzero(qe, QENTRY_LEN);
1396 cso = &ccb->csio;
1397 if (IS_FC(isp)) {
1398 at2_entry_t local, *at = &local;
1399 MEMZERO(at, sizeof (at2_entry_t));
1400 at->at_header.rqs_entry_type = RQSTYPE_ATIO2;
1401 at->at_header.rqs_entry_count = 1;
1402 if ((FCPARAM(isp)->isp_fwattr & ISP_FW_ATTR_SCCLUN) != 0) {
1403 at->at_scclun = (uint16_t) ccb->ccb_h.target_lun;
1404 } else {
1405 at->at_lun = (uint8_t) ccb->ccb_h.target_lun;
1406 }
1407 at->at_status = CT_OK;
1408 at->at_rxid = cso->tag_id;
1409 at->at_iid = cso->ccb_h.target_id;
1410 isp_put_atio2(isp, at, qe);
1411 } else {
1412 at_entry_t local, *at = &local;
1413 MEMZERO(at, sizeof (at_entry_t));
1414 at->at_header.rqs_entry_type = RQSTYPE_ATIO;
1415 at->at_header.rqs_entry_count = 1;
1416 at->at_iid = cso->init_id;
1417 at->at_iid |= XS_CHANNEL(ccb) << 7;
1418 at->at_tgt = cso->ccb_h.target_id;
1419 at->at_lun = cso->ccb_h.target_lun;
1420 at->at_status = CT_OK;
1421 at->at_tag_val = AT_GET_TAG(cso->tag_id);
1422 at->at_handle = AT_GET_HANDLE(cso->tag_id);
1423 isp_put_atio(isp, at, qe);
1424 }
1425 ISP_TDQE(isp, "isp_target_putback_atio", (int) optr, qe);
1426 ISP_ADD_REQUEST(isp, nxti);
1427 isp_complete_ctio(ccb);
1428}
1429
1430static void
1431isp_complete_ctio(union ccb *ccb)
1432{
1433 if ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_INPROG) {
1434 ccb->ccb_h.status |= CAM_REQ_CMP;
1435 }
1436 ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
1437 xpt_done(ccb);
1438}
1439
1440/*
1441 * Handle ATIO stuff that the generic code can't.
1442 * This means handling CDBs.
1443 */
1444
1445static int
1446isp_handle_platform_atio(struct ispsoftc *isp, at_entry_t *aep)
1447{
1448 tstate_t *tptr;
1449 int status, bus, iswildcard;
1450 struct ccb_accept_tio *atiop;
1451
1452 /*
1453 * The firmware status (except for the QLTM_SVALID bit)
1454 * indicates why this ATIO was sent to us.
1455 *
1456 * If QLTM_SVALID is set, the firware has recommended Sense Data.
1457 *
1458 * If the DISCONNECTS DISABLED bit is set in the flags field,
1459 * we're still connected on the SCSI bus.
1460 */
1461 status = aep->at_status;
1462 if ((status & ~QLTM_SVALID) == AT_PHASE_ERROR) {
1463 /*
1464 * Bus Phase Sequence error. We should have sense data
1465 * suggested by the f/w. I'm not sure quite yet what
1466 * to do about this for CAM.
1467 */
1468 isp_prt(isp, ISP_LOGWARN, "PHASE ERROR");
1469 isp_endcmd(isp, aep, SCSI_STATUS_BUSY, 0);
1470 return (0);
1471 }
1472 if ((status & ~QLTM_SVALID) != AT_CDB) {
1473 isp_prt(isp, ISP_LOGWARN, "bad atio (0x%x) leaked to platform",
1474 status);
1475 isp_endcmd(isp, aep, SCSI_STATUS_BUSY, 0);
1476 return (0);
1477 }
1478
1479 bus = GET_BUS_VAL(aep->at_iid);
1480 tptr = get_lun_statep(isp, bus, aep->at_lun);
1481 if (tptr == NULL) {
1482 tptr = get_lun_statep(isp, bus, CAM_LUN_WILDCARD);
1483 if (tptr == NULL) {
1484 isp_endcmd(isp, aep,
1485 SCSI_STATUS_CHECK_COND | ECMD_SVALID |
1486 (0x5 << 12) | (0x25 << 16), 0);
1487 return (0);
1488 }
1489 iswildcard = 1;
1490 } else {
1491 iswildcard = 0;
1492 }
1493
1494 if (tptr == NULL) {
1495 /*
1496 * Because we can't autofeed sense data back with
1497 * a command for parallel SCSI, we can't give back
1498 * a CHECK CONDITION. We'll give back a BUSY status
1499 * instead. This works out okay because the only
1500 * time we should, in fact, get this, is in the
1501 * case that somebody configured us without the
1502 * blackhole driver, so they get what they deserve.
1503 */
1504 isp_endcmd(isp, aep, SCSI_STATUS_BUSY, 0);
1505 return (0);
1506 }
1507
1508 atiop = (struct ccb_accept_tio *) SLIST_FIRST(&tptr->atios);
1509 if (atiop == NULL) {
1510 /*
1511 * Because we can't autofeed sense data back with
1512 * a command for parallel SCSI, we can't give back
1513 * a CHECK CONDITION. We'll give back a QUEUE FULL status
1514 * instead. This works out okay because the only time we
1515 * should, in fact, get this, is in the case that we've
1516 * run out of ATIOS.
1517 */
1518 xpt_print_path(tptr->owner);
1519 isp_prt(isp, ISP_LOGWARN,
1520 "no ATIOS for lun %d from initiator %d on channel %d",
1521 aep->at_lun, GET_IID_VAL(aep->at_iid), bus);
1522 if (aep->at_flags & AT_TQAE)
1523 isp_endcmd(isp, aep, SCSI_STATUS_QUEUE_FULL, 0);
1524 else
1525 isp_endcmd(isp, aep, SCSI_STATUS_BUSY, 0);
1526 rls_lun_statep(isp, tptr);
1527 return (0);
1528 }
1529 SLIST_REMOVE_HEAD(&tptr->atios, sim_links.sle);
1530 tptr->atio_count--;
1531 isp_prt(isp, ISP_LOGTDEBUG0, "Take FREE ATIO lun %d, count now %d",
1532 aep->at_lun, tptr->atio_count);
1533 if (iswildcard) {
1534 atiop->ccb_h.target_id = aep->at_tgt;
1535 atiop->ccb_h.target_lun = aep->at_lun;
1536 }
1537 if (aep->at_flags & AT_NODISC) {
1538 atiop->ccb_h.flags = CAM_DIS_DISCONNECT;
1539 } else {
1540 atiop->ccb_h.flags = 0;
1541 }
1542
1543 if (status & QLTM_SVALID) {
1544 size_t amt = imin(QLTM_SENSELEN, sizeof (atiop->sense_data));
1545 atiop->sense_len = amt;
1546 MEMCPY(&atiop->sense_data, aep->at_sense, amt);
1547 } else {
1548 atiop->sense_len = 0;
1549 }
1550
1551 atiop->init_id = GET_IID_VAL(aep->at_iid);
1552 atiop->cdb_len = aep->at_cdblen;
1553 MEMCPY(atiop->cdb_io.cdb_bytes, aep->at_cdb, aep->at_cdblen);
1554 atiop->ccb_h.status = CAM_CDB_RECVD;
1555 /*
1556 * Construct a tag 'id' based upon tag value (which may be 0..255)
1557 * and the handle (which we have to preserve).
1558 */
1559 AT_MAKE_TAGID(atiop->tag_id, device_get_unit(isp->isp_dev), aep);
1560 if (aep->at_flags & AT_TQAE) {
1561 atiop->tag_action = aep->at_tag_type;
1562 atiop->ccb_h.status |= CAM_TAG_ACTION_VALID;
1563 }
1564 xpt_done((union ccb*)atiop);
1565 isp_prt(isp, ISP_LOGTDEBUG0,
1566 "ATIO[%x] CDB=0x%x bus %d iid%d->lun%d tag 0x%x ttype 0x%x %s",
1567 aep->at_handle, aep->at_cdb[0] & 0xff, GET_BUS_VAL(aep->at_iid),
1568 GET_IID_VAL(aep->at_iid), aep->at_lun, aep->at_tag_val & 0xff,
1569 aep->at_tag_type, (aep->at_flags & AT_NODISC)?
1570 "nondisc" : "disconnecting");
1571 rls_lun_statep(isp, tptr);
1572 return (0);
1573}
1574
1575static int
1576isp_handle_platform_atio2(struct ispsoftc *isp, at2_entry_t *aep)
1577{
1578 lun_id_t lun;
1579 tstate_t *tptr;
1580 struct ccb_accept_tio *atiop;
1581 atio_private_data_t *atp;
1582
1583 /*
1584 * The firmware status (except for the QLTM_SVALID bit)
1585 * indicates why this ATIO was sent to us.
1586 *
1587 * If QLTM_SVALID is set, the firware has recommended Sense Data.
1588 */
1589 if ((aep->at_status & ~QLTM_SVALID) != AT_CDB) {
1590 isp_prt(isp, ISP_LOGWARN,
1591 "bogus atio (0x%x) leaked to platform", aep->at_status);
1592 isp_endcmd(isp, aep, SCSI_STATUS_BUSY, 0);
1593 return (0);
1594 }
1595
1596 if ((FCPARAM(isp)->isp_fwattr & ISP_FW_ATTR_SCCLUN) != 0) {
1597 lun = aep->at_scclun;
1598 } else {
1599 lun = aep->at_lun;
1600 }
1601 tptr = get_lun_statep(isp, 0, lun);
1602 if (tptr == NULL) {
1603 isp_prt(isp, ISP_LOGTDEBUG0,
1604 "[0x%x] no state pointer for lun %d", aep->at_rxid, lun);
1605 tptr = get_lun_statep(isp, 0, CAM_LUN_WILDCARD);
1606 if (tptr == NULL) {
1607 isp_endcmd(isp, aep,
1608 SCSI_STATUS_CHECK_COND | ECMD_SVALID |
1609 (0x5 << 12) | (0x25 << 16), 0);
1610 return (0);
1611 }
1612 }
1613
1614 atp = isp_get_atpd(isp, 0);
1615 atiop = (struct ccb_accept_tio *) SLIST_FIRST(&tptr->atios);
1616 if (atiop == NULL || atp == NULL) {
1617
1618 /*
1619 * Because we can't autofeed sense data back with
1620 * a command for parallel SCSI, we can't give back
1621 * a CHECK CONDITION. We'll give back a QUEUE FULL status
1622 * instead. This works out okay because the only time we
1623 * should, in fact, get this, is in the case that we've
1624 * run out of ATIOS.
1625 */
1626 xpt_print_path(tptr->owner);
1627 isp_prt(isp, ISP_LOGWARN,
1628 "no %s for lun %d from initiator %d",
1629 (atp == NULL && atiop == NULL)? "ATIO2s *or* ATPS" :
1630 ((atp == NULL)? "ATPs" : "ATIO2s"), lun, aep->at_iid);
1631 rls_lun_statep(isp, tptr);
1632 isp_endcmd(isp, aep, SCSI_STATUS_QUEUE_FULL, 0);
1633 return (0);
1634 }
1635 atp->state = ATPD_STATE_ATIO;
1636 SLIST_REMOVE_HEAD(&tptr->atios, sim_links.sle);
1637 tptr->atio_count--;
1638 isp_prt(isp, ISP_LOGTDEBUG0, "Take FREE ATIO lun %d, count now %d",
1639 lun, tptr->atio_count);
1640
1641 if (tptr == &isp->isp_osinfo.tsdflt[0]) {
1642 atiop->ccb_h.target_id =
1643 ((fcparam *)isp->isp_param)->isp_loopid;
1644 atiop->ccb_h.target_lun = lun;
1645 }
1646 /*
1647 * We don't get 'suggested' sense data as we do with SCSI cards.
1648 */
1649 atiop->sense_len = 0;
1650
1651 atiop->init_id = aep->at_iid;
1652 atiop->cdb_len = ATIO2_CDBLEN;
1653 MEMCPY(atiop->cdb_io.cdb_bytes, aep->at_cdb, ATIO2_CDBLEN);
1654 atiop->ccb_h.status = CAM_CDB_RECVD;
1655 atiop->tag_id = aep->at_rxid;
1656 switch (aep->at_taskflags & ATIO2_TC_ATTR_MASK) {
1657 case ATIO2_TC_ATTR_SIMPLEQ:
1658 atiop->tag_action = MSG_SIMPLE_Q_TAG;
1659 break;
1660 case ATIO2_TC_ATTR_HEADOFQ:
1661 atiop->tag_action = MSG_HEAD_OF_Q_TAG;
1662 break;
1663 case ATIO2_TC_ATTR_ORDERED:
1664 atiop->tag_action = MSG_ORDERED_Q_TAG;
1665 break;
1666 case ATIO2_TC_ATTR_ACAQ: /* ?? */
1667 case ATIO2_TC_ATTR_UNTAGGED:
1668 default:
1669 atiop->tag_action = 0;
1670 break;
1671 }
1672 atiop->ccb_h.flags = CAM_TAG_ACTION_VALID;
1673
1674 atp->tag = atiop->tag_id;
1675 atp->lun = lun;
1676 atp->orig_datalen = aep->at_datalen;
1677 atp->last_xframt = 0;
1678 atp->bytes_xfered = 0;
1679 atp->state = ATPD_STATE_CAM;
1680 ISPLOCK_2_CAMLOCK(siP);
1681 xpt_done((union ccb*)atiop);
1682
1683 isp_prt(isp, ISP_LOGTDEBUG0,
1684 "ATIO2[%x] CDB=0x%x iid%d->lun%d tattr 0x%x datalen %u",
1685 aep->at_rxid, aep->at_cdb[0] & 0xff, aep->at_iid,
1686 lun, aep->at_taskflags, aep->at_datalen);
1687 rls_lun_statep(isp, tptr);
1688 return (0);
1689}
1690
1691static int
1692isp_handle_platform_ctio(struct ispsoftc *isp, void *arg)
1693{
1694 union ccb *ccb;
1695 int sentstatus, ok, notify_cam, resid = 0;
1696 u_int16_t tval;
1697
1698 /*
1699 * CTIO and CTIO2 are close enough....
1700 */
1701
1702 ccb = isp_find_xs_tgt(isp, ((ct_entry_t *)arg)->ct_syshandle);
1703 KASSERT((ccb != NULL), ("null ccb in isp_handle_platform_ctio"));
1704 isp_destroy_tgt_handle(isp, ((ct_entry_t *)arg)->ct_syshandle);
1705
1706 if (IS_FC(isp)) {
1707 ct2_entry_t *ct = arg;
1708 atio_private_data_t *atp = isp_get_atpd(isp, ct->ct_rxid);
1709 if (atp == NULL) {
1710 isp_prt(isp, ISP_LOGERR,
1711 "cannot find adjunct for %x after I/O",
1712 ct->ct_rxid);
1713 return (0);
1714 }
1715 sentstatus = ct->ct_flags & CT2_SENDSTATUS;
1716 ok = (ct->ct_status & ~QLTM_SVALID) == CT_OK;
1717 if (ok && sentstatus && (ccb->ccb_h.flags & CAM_SEND_SENSE)) {
1718 ccb->ccb_h.status |= CAM_SENT_SENSE;
1719 }
1720 notify_cam = ct->ct_header.rqs_seqno & 0x1;
1721 if ((ct->ct_flags & CT2_DATAMASK) != CT2_NO_DATA) {
1722 resid = ct->ct_resid;
1723 atp->bytes_xfered += (atp->last_xframt - resid);
1724 atp->last_xframt = 0;
1725 }
1726 if (sentstatus || !ok) {
1727 atp->tag = 0;
1728 }
1729 isp_prt(isp, ok? ISP_LOGTDEBUG0 : ISP_LOGWARN,
1730 "CTIO2[%x] sts 0x%x flg 0x%x sns %d resid %d %s",
1731 ct->ct_rxid, ct->ct_status, ct->ct_flags,
1732 (ccb->ccb_h.status & CAM_SENT_SENSE) != 0,
1733 resid, sentstatus? "FIN" : "MID");
1734 tval = ct->ct_rxid;
1735
1736 /* XXX: should really come after isp_complete_ctio */
1737 atp->state = ATPD_STATE_PDON;
1738 } else {
1739 ct_entry_t *ct = arg;
1740 sentstatus = ct->ct_flags & CT_SENDSTATUS;
1741 ok = (ct->ct_status & ~QLTM_SVALID) == CT_OK;
1742 /*
1743 * We *ought* to be able to get back to the original ATIO
1744 * here, but for some reason this gets lost. It's just as
1745 * well because it's squirrelled away as part of periph
1746 * private data.
1747 *
1748 * We can live without it as long as we continue to use
1749 * the auto-replenish feature for CTIOs.
1750 */
1751 notify_cam = ct->ct_header.rqs_seqno & 0x1;
1752 if (ct->ct_status & QLTM_SVALID) {
1753 char *sp = (char *)ct;
1754 sp += CTIO_SENSE_OFFSET;
1755 ccb->csio.sense_len =
1756 min(sizeof (ccb->csio.sense_data), QLTM_SENSELEN);
1757 MEMCPY(&ccb->csio.sense_data, sp, ccb->csio.sense_len);
1758 ccb->ccb_h.status |= CAM_AUTOSNS_VALID;
1759 }
1760 if ((ct->ct_flags & CT_DATAMASK) != CT_NO_DATA) {
1761 resid = ct->ct_resid;
1762 }
1763 isp_prt(isp, ISP_LOGTDEBUG0,
1764 "CTIO[%x] tag %x iid %d lun %d sts %x flg %x resid %d %s",
1765 ct->ct_fwhandle, ct->ct_tag_val, ct->ct_iid, ct->ct_lun,
1766 ct->ct_status, ct->ct_flags, resid,
1767 sentstatus? "FIN" : "MID");
1768 tval = ct->ct_fwhandle;
1769 }
1770 ccb->csio.resid += resid;
1771
1772 /*
1773 * We're here either because intermediate data transfers are done
1774 * and/or the final status CTIO (which may have joined with a
1775 * Data Transfer) is done.
1776 *
1777 * In any case, for this platform, the upper layers figure out
1778 * what to do next, so all we do here is collect status and
1779 * pass information along. Any DMA handles have already been
1780 * freed.
1781 */
1782 if (notify_cam == 0) {
1783 isp_prt(isp, ISP_LOGTDEBUG0, " INTER CTIO[0x%x] done", tval);
1784 return (0);
1785 }
1786
1787 isp_prt(isp, ISP_LOGTDEBUG0, "%s CTIO[0x%x] done",
1788 (sentstatus)? " FINAL " : "MIDTERM ", tval);
1789
1790 if (!ok) {
1791 isp_target_putback_atio(ccb);
1792 } else {
1793 isp_complete_ctio(ccb);
1794
1795 }
1796 return (0);
1797}
1798
614static int isp_handle_platform_notify_scsi(struct ispsoftc *, in_entry_t *);
615static int isp_handle_platform_notify_fc(struct ispsoftc *, in_fcentry_t *);
616
617static INLINE int
618is_lun_enabled(struct ispsoftc *isp, int bus, lun_id_t lun)
619{
620 tstate_t *tptr;
621 tptr = isp->isp_osinfo.lun_hash[LUN_HASH_FUNC(isp, bus, lun)];
622 if (tptr == NULL) {
623 return (0);
624 }
625 do {
626 if (tptr->lun == (lun_id_t) lun && tptr->bus == bus) {
627 return (1);
628 }
629 } while ((tptr = tptr->next) != NULL);
630 return (0);
631}
632
633static INLINE int
634are_any_luns_enabled(struct ispsoftc *isp, int port)
635{
636 int lo, hi;
637 if (IS_DUALBUS(isp)) {
638 lo = (port * (LUN_HASH_SIZE >> 1));
639 hi = lo + (LUN_HASH_SIZE >> 1);
640 } else {
641 lo = 0;
642 hi = LUN_HASH_SIZE;
643 }
644 for (lo = 0; lo < hi; lo++) {
645 if (isp->isp_osinfo.lun_hash[lo]) {
646 return (1);
647 }
648 }
649 return (0);
650}
651
652static INLINE tstate_t *
653get_lun_statep(struct ispsoftc *isp, int bus, lun_id_t lun)
654{
655 tstate_t *tptr = NULL;
656
657 if (lun == CAM_LUN_WILDCARD) {
658 if (isp->isp_osinfo.tmflags[bus] & TM_WILDCARD_ENABLED) {
659 tptr = &isp->isp_osinfo.tsdflt[bus];
660 tptr->hold++;
661 return (tptr);
662 }
663 return (NULL);
664 } else {
665 tptr = isp->isp_osinfo.lun_hash[LUN_HASH_FUNC(isp, bus, lun)];
666 if (tptr == NULL) {
667 return (NULL);
668 }
669 }
670
671 do {
672 if (tptr->lun == lun && tptr->bus == bus) {
673 tptr->hold++;
674 return (tptr);
675 }
676 } while ((tptr = tptr->next) != NULL);
677 return (tptr);
678}
679
680static INLINE void
681rls_lun_statep(struct ispsoftc *isp, tstate_t *tptr)
682{
683 if (tptr->hold)
684 tptr->hold--;
685}
686
687static INLINE atio_private_data_t *
688isp_get_atpd(struct ispsoftc *isp, int tag)
689{
690 atio_private_data_t *atp;
691 for (atp = isp->isp_osinfo.atpdp;
692 atp < &isp->isp_osinfo.atpdp[ATPDPSIZE]; atp++) {
693 if (atp->tag == tag)
694 return (atp);
695 }
696 return (NULL);
697}
698
699static cam_status
700create_lun_state(struct ispsoftc *isp, int bus,
701 struct cam_path *path, tstate_t **rslt)
702{
703 cam_status status;
704 lun_id_t lun;
705 int hfx;
706 tstate_t *tptr, *new;
707
708 lun = xpt_path_lun_id(path);
709 if (lun < 0) {
710 return (CAM_LUN_INVALID);
711 }
712 if (is_lun_enabled(isp, bus, lun)) {
713 return (CAM_LUN_ALRDY_ENA);
714 }
715 new = (tstate_t *) malloc(sizeof (tstate_t), M_DEVBUF, M_NOWAIT|M_ZERO);
716 if (new == NULL) {
717 return (CAM_RESRC_UNAVAIL);
718 }
719
720 status = xpt_create_path(&new->owner, NULL, xpt_path_path_id(path),
721 xpt_path_target_id(path), xpt_path_lun_id(path));
722 if (status != CAM_REQ_CMP) {
723 free(new, M_DEVBUF);
724 return (status);
725 }
726 new->bus = bus;
727 new->lun = lun;
728 SLIST_INIT(&new->atios);
729 SLIST_INIT(&new->inots);
730 new->hold = 1;
731
732 hfx = LUN_HASH_FUNC(isp, new->bus, new->lun);
733 tptr = isp->isp_osinfo.lun_hash[hfx];
734 if (tptr == NULL) {
735 isp->isp_osinfo.lun_hash[hfx] = new;
736 } else {
737 while (tptr->next)
738 tptr = tptr->next;
739 tptr->next = new;
740 }
741 *rslt = new;
742 return (CAM_REQ_CMP);
743}
744
745static INLINE void
746destroy_lun_state(struct ispsoftc *isp, tstate_t *tptr)
747{
748 int hfx;
749 tstate_t *lw, *pw;
750
751 if (tptr->hold) {
752 return;
753 }
754 hfx = LUN_HASH_FUNC(isp, tptr->bus, tptr->lun);
755 pw = isp->isp_osinfo.lun_hash[hfx];
756 if (pw == NULL) {
757 return;
758 } else if (pw->lun == tptr->lun && pw->bus == tptr->bus) {
759 isp->isp_osinfo.lun_hash[hfx] = pw->next;
760 } else {
761 lw = pw;
762 pw = lw->next;
763 while (pw) {
764 if (pw->lun == tptr->lun && pw->bus == tptr->bus) {
765 lw->next = pw->next;
766 break;
767 }
768 lw = pw;
769 pw = pw->next;
770 }
771 if (pw == NULL) {
772 return;
773 }
774 }
775 free(tptr, M_DEVBUF);
776}
777
778/*
779 * Enable luns.
780 */
781static int
782isp_en_lun(struct ispsoftc *isp, union ccb *ccb)
783{
784 struct ccb_en_lun *cel = &ccb->cel;
785 tstate_t *tptr;
786 u_int32_t seq;
787 int bus, cmd, av, wildcard, tm_on;
788 lun_id_t lun;
789 target_id_t tgt;
790
791 bus = XS_CHANNEL(ccb);
792 if (bus > 1) {
793 xpt_print_path(ccb->ccb_h.path);
794 printf("illegal bus %d\n", bus);
795 ccb->ccb_h.status = CAM_PATH_INVALID;
796 return (-1);
797 }
798 tgt = ccb->ccb_h.target_id;
799 lun = ccb->ccb_h.target_lun;
800
801 isp_prt(isp, ISP_LOGTDEBUG0,
802 "isp_en_lun: %sabling lun 0x%x on channel %d",
803 cel->enable? "en" : "dis", lun, bus);
804
805
806 if ((lun != CAM_LUN_WILDCARD) &&
807 (lun < 0 || lun >= (lun_id_t) isp->isp_maxluns)) {
808 ccb->ccb_h.status = CAM_LUN_INVALID;
809 return (-1);
810 }
811
812 if (IS_SCSI(isp)) {
813 sdparam *sdp = isp->isp_param;
814 sdp += bus;
815 if (tgt != CAM_TARGET_WILDCARD &&
816 tgt != sdp->isp_initiator_id) {
817 ccb->ccb_h.status = CAM_TID_INVALID;
818 return (-1);
819 }
820 } else {
821 /*
822 * There's really no point in doing this yet w/o multi-tid
823 * capability. Even then, it's problematic.
824 */
825#if 0
826 if (tgt != CAM_TARGET_WILDCARD &&
827 tgt != FCPARAM(isp)->isp_iid) {
828 ccb->ccb_h.status = CAM_TID_INVALID;
829 return (-1);
830 }
831#endif
832 /*
833 * This is as a good a place as any to check f/w capabilities.
834 */
835 if ((FCPARAM(isp)->isp_fwattr & ISP_FW_ATTR_TMODE) == 0) {
836 isp_prt(isp, ISP_LOGERR,
837 "firmware does not support target mode");
838 ccb->ccb_h.status = CAM_FUNC_NOTAVAIL;
839 return (-1);
840 }
841 /*
842 * XXX: We *could* handle non-SCCLUN f/w, but we'd have to
843 * XXX: dorks with our already fragile enable/disable code.
844 */
845 if ((FCPARAM(isp)->isp_fwattr & ISP_FW_ATTR_SCCLUN) == 0) {
846 isp_prt(isp, ISP_LOGERR,
847 "firmware not SCCLUN capable");
848 ccb->ccb_h.status = CAM_FUNC_NOTAVAIL;
849 return (-1);
850 }
851 }
852
853 if (tgt == CAM_TARGET_WILDCARD) {
854 if (lun == CAM_LUN_WILDCARD) {
855 wildcard = 1;
856 } else {
857 ccb->ccb_h.status = CAM_LUN_INVALID;
858 return (-1);
859 }
860 } else {
861 wildcard = 0;
862 }
863
864 tm_on = (isp->isp_osinfo.tmflags[bus] & TM_TMODE_ENABLED) != 0;
865
866 /*
867 * Next check to see whether this is a target/lun wildcard action.
868 *
869 * If so, we know that we can accept commands for luns that haven't
870 * been enabled yet and send them upstream. Otherwise, we have to
871 * handle them locally (if we see them at all).
872 */
873
874 if (wildcard) {
875 tptr = &isp->isp_osinfo.tsdflt[bus];
876 if (cel->enable) {
877 if (tm_on) {
878 ccb->ccb_h.status = CAM_LUN_ALRDY_ENA;
879 return (-1);
880 }
881 ccb->ccb_h.status =
882 xpt_create_path(&tptr->owner, NULL,
883 xpt_path_path_id(ccb->ccb_h.path),
884 xpt_path_target_id(ccb->ccb_h.path),
885 xpt_path_lun_id(ccb->ccb_h.path));
886 if (ccb->ccb_h.status != CAM_REQ_CMP) {
887 return (-1);
888 }
889 SLIST_INIT(&tptr->atios);
890 SLIST_INIT(&tptr->inots);
891 isp->isp_osinfo.tmflags[bus] |= TM_WILDCARD_ENABLED;
892 } else {
893 if (tm_on == 0) {
894 ccb->ccb_h.status = CAM_REQ_CMP;
895 return (-1);
896 }
897 if (tptr->hold) {
898 ccb->ccb_h.status = CAM_SCSI_BUSY;
899 return (-1);
900 }
901 xpt_free_path(tptr->owner);
902 isp->isp_osinfo.tmflags[bus] &= ~TM_WILDCARD_ENABLED;
903 }
904 }
905
906 /*
907 * Now check to see whether this bus needs to be
908 * enabled/disabled with respect to target mode.
909 */
910 av = bus << 31;
911 if (cel->enable && tm_on == 0) {
912 av |= ENABLE_TARGET_FLAG;
913 av = isp_control(isp, ISPCTL_TOGGLE_TMODE, &av);
914 if (av) {
915 ccb->ccb_h.status = CAM_FUNC_NOTAVAIL;
916 if (wildcard) {
917 isp->isp_osinfo.tmflags[bus] &=
918 ~TM_WILDCARD_ENABLED;
919 xpt_free_path(tptr->owner);
920 }
921 return (-1);
922 }
923 isp->isp_osinfo.tmflags[bus] |= TM_TMODE_ENABLED;
924 isp_prt(isp, ISP_LOGINFO,
925 "Target Mode enabled on channel %d", bus);
926 } else if (cel->enable == 0 && tm_on && wildcard) {
927 if (are_any_luns_enabled(isp, bus)) {
928 ccb->ccb_h.status = CAM_SCSI_BUSY;
929 return (-1);
930 }
931 av = isp_control(isp, ISPCTL_TOGGLE_TMODE, &av);
932 if (av) {
933 ccb->ccb_h.status = CAM_FUNC_NOTAVAIL;
934 return (-1);
935 }
936 isp->isp_osinfo.tmflags[bus] &= ~TM_TMODE_ENABLED;
937 isp_prt(isp, ISP_LOGINFO,
938 "Target Mode disabled on channel %d", bus);
939 }
940
941 if (wildcard) {
942 ccb->ccb_h.status = CAM_REQ_CMP;
943 return (-1);
944 }
945
946 /*
947 * Find an empty slot
948 */
949 for (seq = 0; seq < NLEACT; seq++) {
950 if (isp->isp_osinfo.leact[seq] == 0) {
951 break;
952 }
953 }
954 if (seq >= NLEACT) {
955 ccb->ccb_h.status = CAM_RESRC_UNAVAIL;
956 return (-1);
957
958 }
959 isp->isp_osinfo.leact[seq] = ccb;
960
961 if (cel->enable) {
962 ccb->ccb_h.status =
963 create_lun_state(isp, bus, ccb->ccb_h.path, &tptr);
964 if (ccb->ccb_h.status != CAM_REQ_CMP) {
965 isp->isp_osinfo.leact[seq] = 0;
966 return (-1);
967 }
968 } else {
969 tptr = get_lun_statep(isp, bus, lun);
970 if (tptr == NULL) {
971 ccb->ccb_h.status = CAM_LUN_INVALID;
972 return (-1);
973 }
974 }
975
976 if (cel->enable) {
977 int c, n, ulun = lun;
978
979 cmd = RQSTYPE_ENABLE_LUN;
980 c = DFLT_CMND_CNT;
981 n = DFLT_INOT_CNT;
982 if (IS_FC(isp) && lun != 0) {
983 cmd = RQSTYPE_MODIFY_LUN;
984 n = 0;
985 /*
986 * For SCC firmware, we only deal with setting
987 * (enabling or modifying) lun 0.
988 */
989 ulun = 0;
990 }
991 if (isp_lun_cmd(isp, cmd, bus, tgt, ulun, c, n, seq+1) == 0) {
992 rls_lun_statep(isp, tptr);
993 ccb->ccb_h.status = CAM_REQ_INPROG;
994 return (seq);
995 }
996 } else {
997 int c, n, ulun = lun;
998
999 cmd = -RQSTYPE_MODIFY_LUN;
1000 c = DFLT_CMND_CNT;
1001 n = DFLT_INOT_CNT;
1002 if (IS_FC(isp) && lun != 0) {
1003 n = 0;
1004 /*
1005 * For SCC firmware, we only deal with setting
1006 * (enabling or modifying) lun 0.
1007 */
1008 ulun = 0;
1009 }
1010 if (isp_lun_cmd(isp, cmd, bus, tgt, ulun, c, n, seq+1) == 0) {
1011 rls_lun_statep(isp, tptr);
1012 ccb->ccb_h.status = CAM_REQ_INPROG;
1013 return (seq);
1014 }
1015 }
1016 rls_lun_statep(isp, tptr);
1017 xpt_print_path(ccb->ccb_h.path);
1018 printf("isp_lun_cmd failed\n");
1019 isp->isp_osinfo.leact[seq] = 0;
1020 ccb->ccb_h.status = CAM_REQ_CMP_ERR;
1021 return (-1);
1022}
1023
1024static void
1025isp_ledone(struct ispsoftc *isp, lun_entry_t *lep)
1026{
1027 const char lfmt[] = "lun %d now %sabled for target mode on channel %d";
1028 union ccb *ccb;
1029 u_int32_t seq;
1030 tstate_t *tptr;
1031 int av;
1032 struct ccb_en_lun *cel;
1033
1034 seq = lep->le_reserved - 1;
1035 if (seq >= NLEACT) {
1036 isp_prt(isp, ISP_LOGERR,
1037 "seq out of range (%u) in isp_ledone", seq);
1038 return;
1039 }
1040 ccb = isp->isp_osinfo.leact[seq];
1041 if (ccb == 0) {
1042 isp_prt(isp, ISP_LOGERR,
1043 "no ccb for seq %u in isp_ledone", seq);
1044 return;
1045 }
1046 cel = &ccb->cel;
1047 tptr = get_lun_statep(isp, XS_CHANNEL(ccb), XS_LUN(ccb));
1048 if (tptr == NULL) {
1049 xpt_print_path(ccb->ccb_h.path);
1050 printf("null tptr in isp_ledone\n");
1051 isp->isp_osinfo.leact[seq] = 0;
1052 return;
1053 }
1054
1055 if (lep->le_status != LUN_OK) {
1056 xpt_print_path(ccb->ccb_h.path);
1057 printf("ENABLE/MODIFY LUN returned 0x%x\n", lep->le_status);
1058err:
1059 ccb->ccb_h.status = CAM_REQ_CMP_ERR;
1060 xpt_print_path(ccb->ccb_h.path);
1061 rls_lun_statep(isp, tptr);
1062 isp->isp_osinfo.leact[seq] = 0;
1063 ISPLOCK_2_CAMLOCK(isp);
1064 xpt_done(ccb);
1065 CAMLOCK_2_ISPLOCK(isp);
1066 return;
1067 } else {
1068 isp_prt(isp, ISP_LOGTDEBUG0,
1069 "isp_ledone: ENABLE/MODIFY done okay");
1070 }
1071
1072
1073 if (cel->enable) {
1074 ccb->ccb_h.status = CAM_REQ_CMP;
1075 isp_prt(isp, /* ISP_LOGINFO */ ISP_LOGALL, lfmt,
1076 XS_LUN(ccb), "en", XS_CHANNEL(ccb));
1077 rls_lun_statep(isp, tptr);
1078 isp->isp_osinfo.leact[seq] = 0;
1079 ISPLOCK_2_CAMLOCK(isp);
1080 xpt_done(ccb);
1081 CAMLOCK_2_ISPLOCK(isp);
1082 return;
1083 }
1084
1085 if (lep->le_header.rqs_entry_type == RQSTYPE_MODIFY_LUN) {
1086 if (isp_lun_cmd(isp, -RQSTYPE_ENABLE_LUN, XS_CHANNEL(ccb),
1087 XS_TGT(ccb), XS_LUN(ccb), 0, 0, seq+1)) {
1088 xpt_print_path(ccb->ccb_h.path);
1089 printf("isp_ledone: isp_lun_cmd failed\n");
1090 goto err;
1091 }
1092 rls_lun_statep(isp, tptr);
1093 return;
1094 }
1095
1096 isp_prt(isp, ISP_LOGINFO, lfmt, XS_LUN(ccb), "dis", XS_CHANNEL(ccb));
1097 rls_lun_statep(isp, tptr);
1098 destroy_lun_state(isp, tptr);
1099 ccb->ccb_h.status = CAM_REQ_CMP;
1100 isp->isp_osinfo.leact[seq] = 0;
1101 ISPLOCK_2_CAMLOCK(isp);
1102 xpt_done(ccb);
1103 CAMLOCK_2_ISPLOCK(isp);
1104 if (are_any_luns_enabled(isp, XS_CHANNEL(ccb)) == 0) {
1105 int bus = XS_CHANNEL(ccb);
1106 av = bus << 31;
1107 av = isp_control(isp, ISPCTL_TOGGLE_TMODE, &av);
1108 if (av) {
1109 isp_prt(isp, ISP_LOGWARN,
1110 "disable target mode on channel %d failed", bus);
1111 } else {
1112 isp_prt(isp, ISP_LOGINFO,
1113 "Target Mode disabled on channel %d", bus);
1114 }
1115 isp->isp_osinfo.tmflags[bus] &= ~TM_TMODE_ENABLED;
1116 }
1117}
1118
1119
1120static cam_status
1121isp_abort_tgt_ccb(struct ispsoftc *isp, union ccb *ccb)
1122{
1123 tstate_t *tptr;
1124 struct ccb_hdr_slist *lp;
1125 struct ccb_hdr *curelm;
1126 int found, *ctr;
1127 union ccb *accb = ccb->cab.abort_ccb;
1128
1129 isp_prt(isp, ISP_LOGTDEBUG0, "aborting ccb %p", accb);
1130 if (accb->ccb_h.target_id != CAM_TARGET_WILDCARD) {
1131 int badpath = 0;
1132 if (IS_FC(isp) && (accb->ccb_h.target_id !=
1133 ((fcparam *) isp->isp_param)->isp_loopid)) {
1134 badpath = 1;
1135 } else if (IS_SCSI(isp) && (accb->ccb_h.target_id !=
1136 ((sdparam *) isp->isp_param)->isp_initiator_id)) {
1137 badpath = 1;
1138 }
1139 if (badpath) {
1140 /*
1141 * Being restrictive about target ids is really about
1142 * making sure we're aborting for the right multi-tid
1143 * path. This doesn't really make much sense at present.
1144 */
1145#if 0
1146 return (CAM_PATH_INVALID);
1147#endif
1148 }
1149 }
1150 tptr = get_lun_statep(isp, XS_CHANNEL(ccb), accb->ccb_h.target_lun);
1151 if (tptr == NULL) {
1152 isp_prt(isp, ISP_LOGTDEBUG0,
1153 "isp_abort_tgt_ccb: can't get statep");
1154 return (CAM_PATH_INVALID);
1155 }
1156 if (accb->ccb_h.func_code == XPT_ACCEPT_TARGET_IO) {
1157 lp = &tptr->atios;
1158 ctr = &tptr->atio_count;
1159 } else if (accb->ccb_h.func_code == XPT_IMMED_NOTIFY) {
1160 lp = &tptr->inots;
1161 ctr = &tptr->inot_count;
1162 } else {
1163 rls_lun_statep(isp, tptr);
1164 isp_prt(isp, ISP_LOGTDEBUG0,
1165 "isp_abort_tgt_ccb: bad func %d\n", accb->ccb_h.func_code);
1166 return (CAM_UA_ABORT);
1167 }
1168 curelm = SLIST_FIRST(lp);
1169 found = 0;
1170 if (curelm == &accb->ccb_h) {
1171 found = 1;
1172 SLIST_REMOVE_HEAD(lp, sim_links.sle);
1173 } else {
1174 while(curelm != NULL) {
1175 struct ccb_hdr *nextelm;
1176
1177 nextelm = SLIST_NEXT(curelm, sim_links.sle);
1178 if (nextelm == &accb->ccb_h) {
1179 found = 1;
1180 SLIST_NEXT(curelm, sim_links.sle) =
1181 SLIST_NEXT(nextelm, sim_links.sle);
1182 break;
1183 }
1184 curelm = nextelm;
1185 }
1186 }
1187 rls_lun_statep(isp, tptr);
1188 if (found) {
1189 *ctr--;
1190 accb->ccb_h.status = CAM_REQ_ABORTED;
1191 xpt_done(accb);
1192 return (CAM_REQ_CMP);
1193 }
1194 isp_prt(isp, ISP_LOGTDEBUG0,
1195 "isp_abort_tgt_ccb: CCB %p not found\n", ccb);
1196 return (CAM_PATH_INVALID);
1197}
1198
1199static cam_status
1200isp_target_start_ctio(struct ispsoftc *isp, union ccb *ccb)
1201{
1202 void *qe;
1203 struct ccb_scsiio *cso = &ccb->csio;
1204 u_int16_t *hp, save_handle;
1205 u_int16_t nxti, optr;
1206 u_int8_t local[QENTRY_LEN];
1207
1208
1209 if (isp_getrqentry(isp, &nxti, &optr, &qe)) {
1210 xpt_print_path(ccb->ccb_h.path);
1211 printf("Request Queue Overflow in isp_target_start_ctio\n");
1212 return (CAM_RESRC_UNAVAIL);
1213 }
1214 bzero(local, QENTRY_LEN);
1215
1216 /*
1217 * We're either moving data or completing a command here.
1218 */
1219
1220 if (IS_FC(isp)) {
1221 atio_private_data_t *atp;
1222 ct2_entry_t *cto = (ct2_entry_t *) local;
1223
1224 cto->ct_header.rqs_entry_type = RQSTYPE_CTIO2;
1225 cto->ct_header.rqs_entry_count = 1;
1226 cto->ct_iid = cso->init_id;
1227 if ((FCPARAM(isp)->isp_fwattr & ISP_FW_ATTR_SCCLUN) == 0) {
1228 cto->ct_lun = ccb->ccb_h.target_lun;
1229 }
1230
1231 atp = isp_get_atpd(isp, cso->tag_id);
1232 if (atp == NULL) {
1233 isp_prt(isp, ISP_LOGERR,
1234 "cannot find private data adjunct for tag %x",
1235 cso->tag_id);
1236 return (-1);
1237 }
1238
1239 cto->ct_rxid = cso->tag_id;
1240 if (cso->dxfer_len == 0) {
1241 cto->ct_flags |= CT2_FLAG_MODE1 | CT2_NO_DATA;
1242 if (ccb->ccb_h.flags & CAM_SEND_STATUS) {
1243 cto->ct_flags |= CT2_SENDSTATUS;
1244 cto->rsp.m1.ct_scsi_status = cso->scsi_status;
1245 cto->ct_resid =
1246 atp->orig_datalen - atp->bytes_xfered;
1247 if (cto->ct_resid < 0) {
1248 cto->rsp.m1.ct_scsi_status |=
1249 CT2_DATA_OVER;
1250 } else if (cto->ct_resid > 0) {
1251 cto->rsp.m1.ct_scsi_status |=
1252 CT2_DATA_UNDER;
1253 }
1254 }
1255 if ((ccb->ccb_h.flags & CAM_SEND_SENSE) != 0) {
1256 int m = min(cso->sense_len, MAXRESPLEN);
1257 bcopy(&cso->sense_data, cto->rsp.m1.ct_resp, m);
1258 cto->rsp.m1.ct_senselen = m;
1259 cto->rsp.m1.ct_scsi_status |= CT2_SNSLEN_VALID;
1260 }
1261 } else {
1262 cto->ct_flags |= CT2_FLAG_MODE0;
1263 if ((cso->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
1264 cto->ct_flags |= CT2_DATA_IN;
1265 } else {
1266 cto->ct_flags |= CT2_DATA_OUT;
1267 }
1268 cto->ct_reloff = atp->bytes_xfered;
1269 if ((ccb->ccb_h.flags & CAM_SEND_STATUS) != 0) {
1270 cto->ct_flags |= CT2_SENDSTATUS;
1271 cto->rsp.m0.ct_scsi_status = cso->scsi_status;
1272 cto->ct_resid =
1273 atp->orig_datalen -
1274 (atp->bytes_xfered + cso->dxfer_len);
1275 if (cto->ct_resid < 0) {
1276 cto->rsp.m0.ct_scsi_status |=
1277 CT2_DATA_OVER;
1278 } else if (cto->ct_resid > 0) {
1279 cto->rsp.m0.ct_scsi_status |=
1280 CT2_DATA_UNDER;
1281 }
1282 } else {
1283 atp->last_xframt = cso->dxfer_len;
1284 }
1285 /*
1286 * If we're sending data and status back together,
1287 * we can't also send back sense data as well.
1288 */
1289 ccb->ccb_h.flags &= ~CAM_SEND_SENSE;
1290 }
1291
1292 if (cto->ct_flags & CT2_SENDSTATUS) {
1293 isp_prt(isp, ISP_LOGTDEBUG0,
1294 "CTIO2[%x] STATUS %x origd %u curd %u resid %u",
1295 cto->ct_rxid, cso->scsi_status, atp->orig_datalen,
1296 cso->dxfer_len, cto->ct_resid);
1297 cto->ct_flags |= CT2_CCINCR;
1298 atp->state = ATPD_STATE_LAST_CTIO;
1299 } else
1300 atp->state = ATPD_STATE_CTIO;
1301 cto->ct_timeout = 10;
1302 hp = &cto->ct_syshandle;
1303 } else {
1304 ct_entry_t *cto = (ct_entry_t *) local;
1305
1306 cto->ct_header.rqs_entry_type = RQSTYPE_CTIO;
1307 cto->ct_header.rqs_entry_count = 1;
1308 cto->ct_iid = cso->init_id;
1309 cto->ct_iid |= XS_CHANNEL(ccb) << 7;
1310 cto->ct_tgt = ccb->ccb_h.target_id;
1311 cto->ct_lun = ccb->ccb_h.target_lun;
1312 cto->ct_fwhandle = AT_GET_HANDLE(cso->tag_id);
1313 if (AT_HAS_TAG(cso->tag_id)) {
1314 cto->ct_tag_val = (u_int8_t) AT_GET_TAG(cso->tag_id);
1315 cto->ct_flags |= CT_TQAE;
1316 }
1317 if (ccb->ccb_h.flags & CAM_DIS_DISCONNECT) {
1318 cto->ct_flags |= CT_NODISC;
1319 }
1320 if (cso->dxfer_len == 0) {
1321 cto->ct_flags |= CT_NO_DATA;
1322 } else if ((cso->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
1323 cto->ct_flags |= CT_DATA_IN;
1324 } else {
1325 cto->ct_flags |= CT_DATA_OUT;
1326 }
1327 if (ccb->ccb_h.flags & CAM_SEND_STATUS) {
1328 cto->ct_flags |= CT_SENDSTATUS|CT_CCINCR;
1329 cto->ct_scsi_status = cso->scsi_status;
1330 cto->ct_resid = cso->resid;
1331 isp_prt(isp, ISP_LOGTDEBUG0,
1332 "CTIO[%x] SCSI STATUS 0x%x resid %d tag_id %x",
1333 cto->ct_fwhandle, cso->scsi_status, cso->resid,
1334 cso->tag_id);
1335 }
1336 ccb->ccb_h.flags &= ~CAM_SEND_SENSE;
1337 cto->ct_timeout = 10;
1338 hp = &cto->ct_syshandle;
1339 }
1340
1341 if (isp_save_xs_tgt(isp, ccb, hp)) {
1342 xpt_print_path(ccb->ccb_h.path);
1343 printf("No XFLIST pointers for isp_target_start_ctio\n");
1344 return (CAM_RESRC_UNAVAIL);
1345 }
1346
1347
1348 /*
1349 * Call the dma setup routines for this entry (and any subsequent
1350 * CTIOs) if there's data to move, and then tell the f/w it's got
1351 * new things to play with. As with isp_start's usage of DMA setup,
1352 * any swizzling is done in the machine dependent layer. Because
1353 * of this, we put the request onto the queue area first in native
1354 * format.
1355 */
1356
1357 save_handle = *hp;
1358
1359 switch (ISP_DMASETUP(isp, cso, (ispreq_t *) local, &nxti, optr)) {
1360 case CMD_QUEUED:
1361 ISP_ADD_REQUEST(isp, nxti);
1362 return (CAM_REQ_INPROG);
1363
1364 case CMD_EAGAIN:
1365 ccb->ccb_h.status = CAM_RESRC_UNAVAIL;
1366 isp_destroy_tgt_handle(isp, save_handle);
1367 return (CAM_RESRC_UNAVAIL);
1368
1369 default:
1370 isp_destroy_tgt_handle(isp, save_handle);
1371 return (XS_ERR(ccb));
1372 }
1373}
1374
1375static void
1376isp_refire_putback_atio(void *arg)
1377{
1378 int s = splcam();
1379 isp_target_putback_atio(arg);
1380 splx(s);
1381}
1382
1383static void
1384isp_target_putback_atio(union ccb *ccb)
1385{
1386 struct ispsoftc *isp;
1387 struct ccb_scsiio *cso;
1388 u_int16_t nxti, optr;
1389 void *qe;
1390
1391 isp = XS_ISP(ccb);
1392
1393 if (isp_getrqentry(isp, &nxti, &optr, &qe)) {
1394 (void) timeout(isp_refire_putback_atio, ccb, 10);
1395 isp_prt(isp, ISP_LOGWARN,
1396 "isp_target_putback_atio: Request Queue Overflow");
1397 return;
1398 }
1399 bzero(qe, QENTRY_LEN);
1400 cso = &ccb->csio;
1401 if (IS_FC(isp)) {
1402 at2_entry_t local, *at = &local;
1403 MEMZERO(at, sizeof (at2_entry_t));
1404 at->at_header.rqs_entry_type = RQSTYPE_ATIO2;
1405 at->at_header.rqs_entry_count = 1;
1406 if ((FCPARAM(isp)->isp_fwattr & ISP_FW_ATTR_SCCLUN) != 0) {
1407 at->at_scclun = (uint16_t) ccb->ccb_h.target_lun;
1408 } else {
1409 at->at_lun = (uint8_t) ccb->ccb_h.target_lun;
1410 }
1411 at->at_status = CT_OK;
1412 at->at_rxid = cso->tag_id;
1413 at->at_iid = cso->ccb_h.target_id;
1414 isp_put_atio2(isp, at, qe);
1415 } else {
1416 at_entry_t local, *at = &local;
1417 MEMZERO(at, sizeof (at_entry_t));
1418 at->at_header.rqs_entry_type = RQSTYPE_ATIO;
1419 at->at_header.rqs_entry_count = 1;
1420 at->at_iid = cso->init_id;
1421 at->at_iid |= XS_CHANNEL(ccb) << 7;
1422 at->at_tgt = cso->ccb_h.target_id;
1423 at->at_lun = cso->ccb_h.target_lun;
1424 at->at_status = CT_OK;
1425 at->at_tag_val = AT_GET_TAG(cso->tag_id);
1426 at->at_handle = AT_GET_HANDLE(cso->tag_id);
1427 isp_put_atio(isp, at, qe);
1428 }
1429 ISP_TDQE(isp, "isp_target_putback_atio", (int) optr, qe);
1430 ISP_ADD_REQUEST(isp, nxti);
1431 isp_complete_ctio(ccb);
1432}
1433
1434static void
1435isp_complete_ctio(union ccb *ccb)
1436{
1437 if ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_INPROG) {
1438 ccb->ccb_h.status |= CAM_REQ_CMP;
1439 }
1440 ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
1441 xpt_done(ccb);
1442}
1443
1444/*
1445 * Handle ATIO stuff that the generic code can't.
1446 * This means handling CDBs.
1447 */
1448
1449static int
1450isp_handle_platform_atio(struct ispsoftc *isp, at_entry_t *aep)
1451{
1452 tstate_t *tptr;
1453 int status, bus, iswildcard;
1454 struct ccb_accept_tio *atiop;
1455
1456 /*
1457 * The firmware status (except for the QLTM_SVALID bit)
1458 * indicates why this ATIO was sent to us.
1459 *
1460 * If QLTM_SVALID is set, the firware has recommended Sense Data.
1461 *
1462 * If the DISCONNECTS DISABLED bit is set in the flags field,
1463 * we're still connected on the SCSI bus.
1464 */
1465 status = aep->at_status;
1466 if ((status & ~QLTM_SVALID) == AT_PHASE_ERROR) {
1467 /*
1468 * Bus Phase Sequence error. We should have sense data
1469 * suggested by the f/w. I'm not sure quite yet what
1470 * to do about this for CAM.
1471 */
1472 isp_prt(isp, ISP_LOGWARN, "PHASE ERROR");
1473 isp_endcmd(isp, aep, SCSI_STATUS_BUSY, 0);
1474 return (0);
1475 }
1476 if ((status & ~QLTM_SVALID) != AT_CDB) {
1477 isp_prt(isp, ISP_LOGWARN, "bad atio (0x%x) leaked to platform",
1478 status);
1479 isp_endcmd(isp, aep, SCSI_STATUS_BUSY, 0);
1480 return (0);
1481 }
1482
1483 bus = GET_BUS_VAL(aep->at_iid);
1484 tptr = get_lun_statep(isp, bus, aep->at_lun);
1485 if (tptr == NULL) {
1486 tptr = get_lun_statep(isp, bus, CAM_LUN_WILDCARD);
1487 if (tptr == NULL) {
1488 isp_endcmd(isp, aep,
1489 SCSI_STATUS_CHECK_COND | ECMD_SVALID |
1490 (0x5 << 12) | (0x25 << 16), 0);
1491 return (0);
1492 }
1493 iswildcard = 1;
1494 } else {
1495 iswildcard = 0;
1496 }
1497
1498 if (tptr == NULL) {
1499 /*
1500 * Because we can't autofeed sense data back with
1501 * a command for parallel SCSI, we can't give back
1502 * a CHECK CONDITION. We'll give back a BUSY status
1503 * instead. This works out okay because the only
1504 * time we should, in fact, get this, is in the
1505 * case that somebody configured us without the
1506 * blackhole driver, so they get what they deserve.
1507 */
1508 isp_endcmd(isp, aep, SCSI_STATUS_BUSY, 0);
1509 return (0);
1510 }
1511
1512 atiop = (struct ccb_accept_tio *) SLIST_FIRST(&tptr->atios);
1513 if (atiop == NULL) {
1514 /*
1515 * Because we can't autofeed sense data back with
1516 * a command for parallel SCSI, we can't give back
1517 * a CHECK CONDITION. We'll give back a QUEUE FULL status
1518 * instead. This works out okay because the only time we
1519 * should, in fact, get this, is in the case that we've
1520 * run out of ATIOS.
1521 */
1522 xpt_print_path(tptr->owner);
1523 isp_prt(isp, ISP_LOGWARN,
1524 "no ATIOS for lun %d from initiator %d on channel %d",
1525 aep->at_lun, GET_IID_VAL(aep->at_iid), bus);
1526 if (aep->at_flags & AT_TQAE)
1527 isp_endcmd(isp, aep, SCSI_STATUS_QUEUE_FULL, 0);
1528 else
1529 isp_endcmd(isp, aep, SCSI_STATUS_BUSY, 0);
1530 rls_lun_statep(isp, tptr);
1531 return (0);
1532 }
1533 SLIST_REMOVE_HEAD(&tptr->atios, sim_links.sle);
1534 tptr->atio_count--;
1535 isp_prt(isp, ISP_LOGTDEBUG0, "Take FREE ATIO lun %d, count now %d",
1536 aep->at_lun, tptr->atio_count);
1537 if (iswildcard) {
1538 atiop->ccb_h.target_id = aep->at_tgt;
1539 atiop->ccb_h.target_lun = aep->at_lun;
1540 }
1541 if (aep->at_flags & AT_NODISC) {
1542 atiop->ccb_h.flags = CAM_DIS_DISCONNECT;
1543 } else {
1544 atiop->ccb_h.flags = 0;
1545 }
1546
1547 if (status & QLTM_SVALID) {
1548 size_t amt = imin(QLTM_SENSELEN, sizeof (atiop->sense_data));
1549 atiop->sense_len = amt;
1550 MEMCPY(&atiop->sense_data, aep->at_sense, amt);
1551 } else {
1552 atiop->sense_len = 0;
1553 }
1554
1555 atiop->init_id = GET_IID_VAL(aep->at_iid);
1556 atiop->cdb_len = aep->at_cdblen;
1557 MEMCPY(atiop->cdb_io.cdb_bytes, aep->at_cdb, aep->at_cdblen);
1558 atiop->ccb_h.status = CAM_CDB_RECVD;
1559 /*
1560 * Construct a tag 'id' based upon tag value (which may be 0..255)
1561 * and the handle (which we have to preserve).
1562 */
1563 AT_MAKE_TAGID(atiop->tag_id, device_get_unit(isp->isp_dev), aep);
1564 if (aep->at_flags & AT_TQAE) {
1565 atiop->tag_action = aep->at_tag_type;
1566 atiop->ccb_h.status |= CAM_TAG_ACTION_VALID;
1567 }
1568 xpt_done((union ccb*)atiop);
1569 isp_prt(isp, ISP_LOGTDEBUG0,
1570 "ATIO[%x] CDB=0x%x bus %d iid%d->lun%d tag 0x%x ttype 0x%x %s",
1571 aep->at_handle, aep->at_cdb[0] & 0xff, GET_BUS_VAL(aep->at_iid),
1572 GET_IID_VAL(aep->at_iid), aep->at_lun, aep->at_tag_val & 0xff,
1573 aep->at_tag_type, (aep->at_flags & AT_NODISC)?
1574 "nondisc" : "disconnecting");
1575 rls_lun_statep(isp, tptr);
1576 return (0);
1577}
1578
1579static int
1580isp_handle_platform_atio2(struct ispsoftc *isp, at2_entry_t *aep)
1581{
1582 lun_id_t lun;
1583 tstate_t *tptr;
1584 struct ccb_accept_tio *atiop;
1585 atio_private_data_t *atp;
1586
1587 /*
1588 * The firmware status (except for the QLTM_SVALID bit)
1589 * indicates why this ATIO was sent to us.
1590 *
1591 * If QLTM_SVALID is set, the firware has recommended Sense Data.
1592 */
1593 if ((aep->at_status & ~QLTM_SVALID) != AT_CDB) {
1594 isp_prt(isp, ISP_LOGWARN,
1595 "bogus atio (0x%x) leaked to platform", aep->at_status);
1596 isp_endcmd(isp, aep, SCSI_STATUS_BUSY, 0);
1597 return (0);
1598 }
1599
1600 if ((FCPARAM(isp)->isp_fwattr & ISP_FW_ATTR_SCCLUN) != 0) {
1601 lun = aep->at_scclun;
1602 } else {
1603 lun = aep->at_lun;
1604 }
1605 tptr = get_lun_statep(isp, 0, lun);
1606 if (tptr == NULL) {
1607 isp_prt(isp, ISP_LOGTDEBUG0,
1608 "[0x%x] no state pointer for lun %d", aep->at_rxid, lun);
1609 tptr = get_lun_statep(isp, 0, CAM_LUN_WILDCARD);
1610 if (tptr == NULL) {
1611 isp_endcmd(isp, aep,
1612 SCSI_STATUS_CHECK_COND | ECMD_SVALID |
1613 (0x5 << 12) | (0x25 << 16), 0);
1614 return (0);
1615 }
1616 }
1617
1618 atp = isp_get_atpd(isp, 0);
1619 atiop = (struct ccb_accept_tio *) SLIST_FIRST(&tptr->atios);
1620 if (atiop == NULL || atp == NULL) {
1621
1622 /*
1623 * Because we can't autofeed sense data back with
1624 * a command for parallel SCSI, we can't give back
1625 * a CHECK CONDITION. We'll give back a QUEUE FULL status
1626 * instead. This works out okay because the only time we
1627 * should, in fact, get this, is in the case that we've
1628 * run out of ATIOS.
1629 */
1630 xpt_print_path(tptr->owner);
1631 isp_prt(isp, ISP_LOGWARN,
1632 "no %s for lun %d from initiator %d",
1633 (atp == NULL && atiop == NULL)? "ATIO2s *or* ATPS" :
1634 ((atp == NULL)? "ATPs" : "ATIO2s"), lun, aep->at_iid);
1635 rls_lun_statep(isp, tptr);
1636 isp_endcmd(isp, aep, SCSI_STATUS_QUEUE_FULL, 0);
1637 return (0);
1638 }
1639 atp->state = ATPD_STATE_ATIO;
1640 SLIST_REMOVE_HEAD(&tptr->atios, sim_links.sle);
1641 tptr->atio_count--;
1642 isp_prt(isp, ISP_LOGTDEBUG0, "Take FREE ATIO lun %d, count now %d",
1643 lun, tptr->atio_count);
1644
1645 if (tptr == &isp->isp_osinfo.tsdflt[0]) {
1646 atiop->ccb_h.target_id =
1647 ((fcparam *)isp->isp_param)->isp_loopid;
1648 atiop->ccb_h.target_lun = lun;
1649 }
1650 /*
1651 * We don't get 'suggested' sense data as we do with SCSI cards.
1652 */
1653 atiop->sense_len = 0;
1654
1655 atiop->init_id = aep->at_iid;
1656 atiop->cdb_len = ATIO2_CDBLEN;
1657 MEMCPY(atiop->cdb_io.cdb_bytes, aep->at_cdb, ATIO2_CDBLEN);
1658 atiop->ccb_h.status = CAM_CDB_RECVD;
1659 atiop->tag_id = aep->at_rxid;
1660 switch (aep->at_taskflags & ATIO2_TC_ATTR_MASK) {
1661 case ATIO2_TC_ATTR_SIMPLEQ:
1662 atiop->tag_action = MSG_SIMPLE_Q_TAG;
1663 break;
1664 case ATIO2_TC_ATTR_HEADOFQ:
1665 atiop->tag_action = MSG_HEAD_OF_Q_TAG;
1666 break;
1667 case ATIO2_TC_ATTR_ORDERED:
1668 atiop->tag_action = MSG_ORDERED_Q_TAG;
1669 break;
1670 case ATIO2_TC_ATTR_ACAQ: /* ?? */
1671 case ATIO2_TC_ATTR_UNTAGGED:
1672 default:
1673 atiop->tag_action = 0;
1674 break;
1675 }
1676 atiop->ccb_h.flags = CAM_TAG_ACTION_VALID;
1677
1678 atp->tag = atiop->tag_id;
1679 atp->lun = lun;
1680 atp->orig_datalen = aep->at_datalen;
1681 atp->last_xframt = 0;
1682 atp->bytes_xfered = 0;
1683 atp->state = ATPD_STATE_CAM;
1684 ISPLOCK_2_CAMLOCK(siP);
1685 xpt_done((union ccb*)atiop);
1686
1687 isp_prt(isp, ISP_LOGTDEBUG0,
1688 "ATIO2[%x] CDB=0x%x iid%d->lun%d tattr 0x%x datalen %u",
1689 aep->at_rxid, aep->at_cdb[0] & 0xff, aep->at_iid,
1690 lun, aep->at_taskflags, aep->at_datalen);
1691 rls_lun_statep(isp, tptr);
1692 return (0);
1693}
1694
1695static int
1696isp_handle_platform_ctio(struct ispsoftc *isp, void *arg)
1697{
1698 union ccb *ccb;
1699 int sentstatus, ok, notify_cam, resid = 0;
1700 u_int16_t tval;
1701
1702 /*
1703 * CTIO and CTIO2 are close enough....
1704 */
1705
1706 ccb = isp_find_xs_tgt(isp, ((ct_entry_t *)arg)->ct_syshandle);
1707 KASSERT((ccb != NULL), ("null ccb in isp_handle_platform_ctio"));
1708 isp_destroy_tgt_handle(isp, ((ct_entry_t *)arg)->ct_syshandle);
1709
1710 if (IS_FC(isp)) {
1711 ct2_entry_t *ct = arg;
1712 atio_private_data_t *atp = isp_get_atpd(isp, ct->ct_rxid);
1713 if (atp == NULL) {
1714 isp_prt(isp, ISP_LOGERR,
1715 "cannot find adjunct for %x after I/O",
1716 ct->ct_rxid);
1717 return (0);
1718 }
1719 sentstatus = ct->ct_flags & CT2_SENDSTATUS;
1720 ok = (ct->ct_status & ~QLTM_SVALID) == CT_OK;
1721 if (ok && sentstatus && (ccb->ccb_h.flags & CAM_SEND_SENSE)) {
1722 ccb->ccb_h.status |= CAM_SENT_SENSE;
1723 }
1724 notify_cam = ct->ct_header.rqs_seqno & 0x1;
1725 if ((ct->ct_flags & CT2_DATAMASK) != CT2_NO_DATA) {
1726 resid = ct->ct_resid;
1727 atp->bytes_xfered += (atp->last_xframt - resid);
1728 atp->last_xframt = 0;
1729 }
1730 if (sentstatus || !ok) {
1731 atp->tag = 0;
1732 }
1733 isp_prt(isp, ok? ISP_LOGTDEBUG0 : ISP_LOGWARN,
1734 "CTIO2[%x] sts 0x%x flg 0x%x sns %d resid %d %s",
1735 ct->ct_rxid, ct->ct_status, ct->ct_flags,
1736 (ccb->ccb_h.status & CAM_SENT_SENSE) != 0,
1737 resid, sentstatus? "FIN" : "MID");
1738 tval = ct->ct_rxid;
1739
1740 /* XXX: should really come after isp_complete_ctio */
1741 atp->state = ATPD_STATE_PDON;
1742 } else {
1743 ct_entry_t *ct = arg;
1744 sentstatus = ct->ct_flags & CT_SENDSTATUS;
1745 ok = (ct->ct_status & ~QLTM_SVALID) == CT_OK;
1746 /*
1747 * We *ought* to be able to get back to the original ATIO
1748 * here, but for some reason this gets lost. It's just as
1749 * well because it's squirrelled away as part of periph
1750 * private data.
1751 *
1752 * We can live without it as long as we continue to use
1753 * the auto-replenish feature for CTIOs.
1754 */
1755 notify_cam = ct->ct_header.rqs_seqno & 0x1;
1756 if (ct->ct_status & QLTM_SVALID) {
1757 char *sp = (char *)ct;
1758 sp += CTIO_SENSE_OFFSET;
1759 ccb->csio.sense_len =
1760 min(sizeof (ccb->csio.sense_data), QLTM_SENSELEN);
1761 MEMCPY(&ccb->csio.sense_data, sp, ccb->csio.sense_len);
1762 ccb->ccb_h.status |= CAM_AUTOSNS_VALID;
1763 }
1764 if ((ct->ct_flags & CT_DATAMASK) != CT_NO_DATA) {
1765 resid = ct->ct_resid;
1766 }
1767 isp_prt(isp, ISP_LOGTDEBUG0,
1768 "CTIO[%x] tag %x iid %d lun %d sts %x flg %x resid %d %s",
1769 ct->ct_fwhandle, ct->ct_tag_val, ct->ct_iid, ct->ct_lun,
1770 ct->ct_status, ct->ct_flags, resid,
1771 sentstatus? "FIN" : "MID");
1772 tval = ct->ct_fwhandle;
1773 }
1774 ccb->csio.resid += resid;
1775
1776 /*
1777 * We're here either because intermediate data transfers are done
1778 * and/or the final status CTIO (which may have joined with a
1779 * Data Transfer) is done.
1780 *
1781 * In any case, for this platform, the upper layers figure out
1782 * what to do next, so all we do here is collect status and
1783 * pass information along. Any DMA handles have already been
1784 * freed.
1785 */
1786 if (notify_cam == 0) {
1787 isp_prt(isp, ISP_LOGTDEBUG0, " INTER CTIO[0x%x] done", tval);
1788 return (0);
1789 }
1790
1791 isp_prt(isp, ISP_LOGTDEBUG0, "%s CTIO[0x%x] done",
1792 (sentstatus)? " FINAL " : "MIDTERM ", tval);
1793
1794 if (!ok) {
1795 isp_target_putback_atio(ccb);
1796 } else {
1797 isp_complete_ctio(ccb);
1798
1799 }
1800 return (0);
1801}
1802
1799static void
1800isp_handle_platform_ctio_fastpost(struct ispsoftc *isp, u_int32_t token)
1801{
1802 union ccb *ccb;
1803 ccb = isp_find_xs_tgt(isp, token & 0xffff);
1804 KASSERT((ccb != NULL),
1805 ("null ccb in isp_handle_platform_ctio_fastpost"));
1806 isp_destroy_tgt_handle(isp, token & 0xffff);
1807 isp_prt(isp, ISP_LOGTDEBUG1, "CTIOx[%x] fastpost complete",
1808 token & 0xffff);
1809 isp_complete_ctio(ccb);
1810}
1811
1812static int
1813isp_handle_platform_notify_scsi(struct ispsoftc *isp, in_entry_t *inp)
1814{
1815 return (0); /* XXXX */
1816}
1817
1818static int
1819isp_handle_platform_notify_fc(struct ispsoftc *isp, in_fcentry_t *inp)
1820{
1821
1822 switch (inp->in_status) {
1823 case IN_PORT_LOGOUT:
1824 isp_prt(isp, ISP_LOGWARN, "port logout of iid %d",
1825 inp->in_iid);
1826 break;
1827 case IN_PORT_CHANGED:
1828 isp_prt(isp, ISP_LOGWARN, "port changed for iid %d",
1829 inp->in_iid);
1830 break;
1831 case IN_GLOBAL_LOGO:
1832 isp_prt(isp, ISP_LOGINFO, "all ports logged out");
1833 break;
1834 case IN_ABORT_TASK:
1835 {
1836 atio_private_data_t *atp = isp_get_atpd(isp, inp->in_seqid);
1837 struct ccb_immed_notify *inot = NULL;
1838
1839 if (atp) {
1840 tstate_t *tptr = get_lun_statep(isp, 0, atp->lun);
1841 if (tptr) {
1842 inot = (struct ccb_immed_notify *)
1843 SLIST_FIRST(&tptr->inots);
1844 if (inot) {
1845 tptr->inot_count--;
1846 SLIST_REMOVE_HEAD(&tptr->inots,
1847 sim_links.sle);
1848 isp_prt(isp, ISP_LOGTDEBUG0,
1849 "Take FREE INOT count now %d",
1850 tptr->inot_count);
1851 }
1852 }
1853 isp_prt(isp, ISP_LOGWARN,
1854 "abort task RX_ID %x IID %d state %d",
1855 inp->in_seqid, inp->in_iid, atp->state);
1856 } else {
1857 isp_prt(isp, ISP_LOGWARN,
1858 "abort task RX_ID %x from iid %d, state unknown",
1859 inp->in_seqid, inp->in_iid);
1860 }
1861 if (inot) {
1862 inot->initiator_id = inp->in_iid;
1863 inot->sense_len = 0;
1864 inot->message_args[0] = MSG_ABORT_TAG;
1865 inot->message_args[1] = inp->in_seqid & 0xff;
1866 inot->message_args[2] = (inp->in_seqid >> 8) & 0xff;
1867 inot->ccb_h.status = CAM_MESSAGE_RECV|CAM_DEV_QFRZN;
1868 xpt_done((union ccb *)inot);
1869 }
1870 break;
1871 }
1872 default:
1873 break;
1874 }
1875 return (0);
1876}
1877#endif
1878
1879static void
1880isp_cam_async(void *cbarg, u_int32_t code, struct cam_path *path, void *arg)
1881{
1882 struct cam_sim *sim;
1883 struct ispsoftc *isp;
1884
1885 sim = (struct cam_sim *)cbarg;
1886 isp = (struct ispsoftc *) cam_sim_softc(sim);
1887 switch (code) {
1888 case AC_LOST_DEVICE:
1889 if (IS_SCSI(isp)) {
1890 u_int16_t oflags, nflags;
1891 sdparam *sdp = isp->isp_param;
1892 int tgt;
1893
1894 tgt = xpt_path_target_id(path);
1895 if (tgt >= 0) {
1896 sdp += cam_sim_bus(sim);
1897 ISP_LOCK(isp);
1898 nflags = sdp->isp_devparam[tgt].nvrm_flags;
1899#ifndef ISP_TARGET_MODE
1900 nflags &= DPARM_SAFE_DFLT;
1901 if (isp->isp_loaded_fw) {
1902 nflags |= DPARM_NARROW | DPARM_ASYNC;
1903 }
1904#else
1905 nflags = DPARM_DEFAULT;
1906#endif
1907 oflags = sdp->isp_devparam[tgt].goal_flags;
1908 sdp->isp_devparam[tgt].goal_flags = nflags;
1909 sdp->isp_devparam[tgt].dev_update = 1;
1910 isp->isp_update |= (1 << cam_sim_bus(sim));
1911 (void) isp_control(isp,
1912 ISPCTL_UPDATE_PARAMS, NULL);
1913 sdp->isp_devparam[tgt].goal_flags = oflags;
1914 ISP_UNLOCK(isp);
1915 }
1916 }
1917 break;
1918 default:
1919 isp_prt(isp, ISP_LOGWARN, "isp_cam_async: Code 0x%x", code);
1920 break;
1921 }
1922}
1923
1924static void
1925isp_poll(struct cam_sim *sim)
1926{
1927 struct ispsoftc *isp = cam_sim_softc(sim);
1928 u_int16_t isr, sema, mbox;
1929
1930 ISP_LOCK(isp);
1931 if (ISP_READ_ISR(isp, &isr, &sema, &mbox)) {
1932 isp_intr(isp, isr, sema, mbox);
1933 }
1934 ISP_UNLOCK(isp);
1935}
1936
1937
1938static void
1939isp_watchdog(void *arg)
1940{
1941 XS_T *xs = arg;
1942 struct ispsoftc *isp = XS_ISP(xs);
1943 u_int32_t handle;
1944 int iok;
1945
1946 /*
1947 * We've decided this command is dead. Make sure we're not trying
1948 * to kill a command that's already dead by getting it's handle and
1949 * and seeing whether it's still alive.
1950 */
1951 ISP_LOCK(isp);
1952 iok = isp->isp_osinfo.intsok;
1953 isp->isp_osinfo.intsok = 0;
1954 handle = isp_find_handle(isp, xs);
1955 if (handle) {
1956 u_int16_t isr, sema, mbox;
1957
1958 if (XS_CMD_DONE_P(xs)) {
1959 isp_prt(isp, ISP_LOGDEBUG1,
1960 "watchdog found done cmd (handle 0x%x)", handle);
1961 ISP_UNLOCK(isp);
1962 return;
1963 }
1964
1965 if (XS_CMD_WDOG_P(xs)) {
1966 isp_prt(isp, ISP_LOGDEBUG2,
1967 "recursive watchdog (handle 0x%x)", handle);
1968 ISP_UNLOCK(isp);
1969 return;
1970 }
1971
1972 XS_CMD_S_WDOG(xs);
1973 if (ISP_READ_ISR(isp, &isr, &sema, &mbox)) {
1974 isp_intr(isp, isr, sema, mbox);
1975 }
1976 if (XS_CMD_DONE_P(xs)) {
1977 isp_prt(isp, ISP_LOGDEBUG2,
1978 "watchdog cleanup for handle 0x%x", handle);
1979 xpt_done((union ccb *) xs);
1980 } else if (XS_CMD_GRACE_P(xs)) {
1981 /*
1982 * Make sure the command is *really* dead before we
1983 * release the handle (and DMA resources) for reuse.
1984 */
1985 (void) isp_control(isp, ISPCTL_ABORT_CMD, arg);
1986
1987 /*
1988 * After this point, the comamnd is really dead.
1989 */
1990 if (XS_XFRLEN(xs)) {
1991 ISP_DMAFREE(isp, xs, handle);
1992 }
1993 isp_destroy_handle(isp, handle);
1994 xpt_print_path(xs->ccb_h.path);
1995 isp_prt(isp, ISP_LOGWARN,
1996 "watchdog timeout for handle 0x%x", handle);
1997 XS_SETERR(xs, CAM_CMD_TIMEOUT);
1998 XS_CMD_C_WDOG(xs);
1999 isp_done(xs);
2000 } else {
2001 u_int16_t nxti, optr;
2002 ispreq_t local, *mp= &local, *qe;
2003
2004 XS_CMD_C_WDOG(xs);
2005 xs->ccb_h.timeout_ch = timeout(isp_watchdog, xs, hz);
2006 if (isp_getrqentry(isp, &nxti, &optr, (void **) &qe)) {
2007 ISP_UNLOCK(isp);
2008 return;
2009 }
2010 XS_CMD_S_GRACE(xs);
2011 MEMZERO((void *) mp, sizeof (*mp));
2012 mp->req_header.rqs_entry_count = 1;
2013 mp->req_header.rqs_entry_type = RQSTYPE_MARKER;
2014 mp->req_modifier = SYNC_ALL;
2015 mp->req_target = XS_CHANNEL(xs) << 7;
2016 isp_put_request(isp, mp, qe);
2017 ISP_ADD_REQUEST(isp, nxti);
2018 }
2019 } else {
2020 isp_prt(isp, ISP_LOGDEBUG2, "watchdog with no command");
2021 }
2022 isp->isp_osinfo.intsok = iok;
2023 ISP_UNLOCK(isp);
2024}
2025
2026static void
2027isp_kthread(void *arg)
2028{
2029 struct ispsoftc *isp = arg;
2030
2031#ifdef ISP_SMPLOCK
2032 mtx_lock(&isp->isp_lock);
2033#else
2034 mtx_lock(&Giant);
2035#endif
2036 /*
2037 * The first loop is for our usage where we have yet to have
2038 * gotten good fibre channel state.
2039 */
2040 for (;;) {
2041 int wasfrozen;
2042
2043 isp_prt(isp, ISP_LOGDEBUG0, "kthread: checking FC state");
2044 while (isp_fc_runstate(isp, 2 * 1000000) != 0) {
2045 isp_prt(isp, ISP_LOGDEBUG0, "kthread: FC state ungood");
2046 if (FCPARAM(isp)->isp_fwstate != FW_READY ||
2047 FCPARAM(isp)->isp_loopstate < LOOP_PDB_RCVD) {
2048 if (FCPARAM(isp)->loop_seen_once == 0 ||
2049 isp->isp_osinfo.ktmature == 0) {
2050 break;
2051 }
2052 }
2053#ifdef ISP_SMPLOCK
2054 msleep(isp_kthread, &isp->isp_lock,
2055 PRIBIO, "isp_fcthrd", hz);
2056#else
2057 (void) tsleep(isp_kthread, PRIBIO, "isp_fcthrd", hz);
2058#endif
2059 }
2060
2061 /*
2062 * Even if we didn't get good loop state we may be
2063 * unfreezing the SIMQ so that we can kill off
2064 * commands (if we've never seen loop before, for example).
2065 */
2066 isp->isp_osinfo.ktmature = 1;
2067 wasfrozen = isp->isp_osinfo.simqfrozen & SIMQFRZ_LOOPDOWN;
2068 isp->isp_osinfo.simqfrozen &= ~SIMQFRZ_LOOPDOWN;
2069 if (wasfrozen && isp->isp_osinfo.simqfrozen == 0) {
2070 isp_prt(isp, ISP_LOGDEBUG0, "kthread: releasing simq");
2071 ISPLOCK_2_CAMLOCK(isp);
2072 xpt_release_simq(isp->isp_sim, 1);
2073 CAMLOCK_2_ISPLOCK(isp);
2074 }
2075 isp_prt(isp, ISP_LOGDEBUG0, "kthread: waiting until called");
2076#ifdef ISP_SMPLOCK
2077 cv_wait(&isp->isp_osinfo.kthread_cv, &isp->isp_lock);
2078#else
2079 (void) tsleep(&isp->isp_osinfo.kthread_cv, PRIBIO, "fc_cv", 0);
2080#endif
2081 }
2082}
2083
2084static void
2085isp_action(struct cam_sim *sim, union ccb *ccb)
2086{
2087 int bus, tgt, error;
2088 struct ispsoftc *isp;
2089 struct ccb_trans_settings *cts;
2090
2091 CAM_DEBUG(ccb->ccb_h.path, CAM_DEBUG_TRACE, ("isp_action\n"));
2092
2093 isp = (struct ispsoftc *)cam_sim_softc(sim);
2094 ccb->ccb_h.sim_priv.entries[0].field = 0;
2095 ccb->ccb_h.sim_priv.entries[1].ptr = isp;
2096 if (isp->isp_state != ISP_RUNSTATE &&
2097 ccb->ccb_h.func_code == XPT_SCSI_IO) {
2098 CAMLOCK_2_ISPLOCK(isp);
2099 isp_init(isp);
2100 if (isp->isp_state != ISP_INITSTATE) {
2101 ISP_UNLOCK(isp);
2102 /*
2103 * Lie. Say it was a selection timeout.
2104 */
2105 ccb->ccb_h.status = CAM_SEL_TIMEOUT | CAM_DEV_QFRZN;
2106 xpt_freeze_devq(ccb->ccb_h.path, 1);
2107 xpt_done(ccb);
2108 return;
2109 }
2110 isp->isp_state = ISP_RUNSTATE;
2111 ISPLOCK_2_CAMLOCK(isp);
2112 }
2113 isp_prt(isp, ISP_LOGDEBUG2, "isp_action code %x", ccb->ccb_h.func_code);
2114
2115
2116 switch (ccb->ccb_h.func_code) {
2117 case XPT_SCSI_IO: /* Execute the requested I/O operation */
2118 /*
2119 * Do a couple of preliminary checks...
2120 */
2121 if ((ccb->ccb_h.flags & CAM_CDB_POINTER) != 0) {
2122 if ((ccb->ccb_h.flags & CAM_CDB_PHYS) != 0) {
2123 ccb->ccb_h.status = CAM_REQ_INVALID;
2124 xpt_done(ccb);
2125 break;
2126 }
2127 }
2128#ifdef DIAGNOSTIC
2129 if (ccb->ccb_h.target_id > (ISP_MAX_TARGETS(isp) - 1)) {
2130 ccb->ccb_h.status = CAM_PATH_INVALID;
2131 } else if (ccb->ccb_h.target_lun > (ISP_MAX_LUNS(isp) - 1)) {
2132 ccb->ccb_h.status = CAM_PATH_INVALID;
2133 }
2134 if (ccb->ccb_h.status == CAM_PATH_INVALID) {
2135 isp_prt(isp, ISP_LOGERR,
2136 "invalid tgt/lun (%d.%d) in XPT_SCSI_IO",
2137 ccb->ccb_h.target_id, ccb->ccb_h.target_lun);
2138 xpt_done(ccb);
2139 break;
2140 }
2141#endif
2142 ((struct ccb_scsiio *) ccb)->scsi_status = SCSI_STATUS_OK;
2143 CAMLOCK_2_ISPLOCK(isp);
2144 error = isp_start((XS_T *) ccb);
2145 switch (error) {
2146 case CMD_QUEUED:
2147 ccb->ccb_h.status |= CAM_SIM_QUEUED;
2148 if (ccb->ccb_h.timeout != CAM_TIME_INFINITY) {
2149 u_int64_t ticks = (u_int64_t) hz;
2150 if (ccb->ccb_h.timeout == CAM_TIME_DEFAULT)
2151 ticks = 60 * 1000 * ticks;
2152 else
2153 ticks = ccb->ccb_h.timeout * hz;
2154 ticks = ((ticks + 999) / 1000) + hz + hz;
2155 if (ticks >= 0x80000000) {
2156 isp_prt(isp, ISP_LOGERR,
2157 "timeout overflow");
2158 ticks = 0x7fffffff;
2159 }
2160 ccb->ccb_h.timeout_ch = timeout(isp_watchdog,
2161 (caddr_t)ccb, (int)ticks);
2162 } else {
2163 callout_handle_init(&ccb->ccb_h.timeout_ch);
2164 }
2165 ISPLOCK_2_CAMLOCK(isp);
2166 break;
2167 case CMD_RQLATER:
2168 /*
2169 * This can only happen for Fibre Channel
2170 */
2171 KASSERT((IS_FC(isp)), ("CMD_RQLATER for FC only"));
2172 if (FCPARAM(isp)->loop_seen_once == 0 &&
2173 isp->isp_osinfo.ktmature) {
2174 ISPLOCK_2_CAMLOCK(isp);
2175 XS_SETERR(ccb, CAM_SEL_TIMEOUT);
2176 xpt_done(ccb);
2177 break;
2178 }
2179#ifdef ISP_SMPLOCK
2180 cv_signal(&isp->isp_osinfo.kthread_cv);
2181#else
2182 wakeup(&isp->isp_osinfo.kthread_cv);
2183#endif
2184 isp_freeze_loopdown(isp, "isp_action(RQLATER)");
2185 XS_SETERR(ccb, CAM_REQUEUE_REQ);
2186 ISPLOCK_2_CAMLOCK(isp);
2187 xpt_done(ccb);
2188 break;
2189 case CMD_EAGAIN:
2190 XS_SETERR(ccb, CAM_REQUEUE_REQ);
2191 ISPLOCK_2_CAMLOCK(isp);
2192 xpt_done(ccb);
2193 break;
2194 case CMD_COMPLETE:
2195 isp_done((struct ccb_scsiio *) ccb);
2196 ISPLOCK_2_CAMLOCK(isp);
2197 break;
2198 default:
2199 isp_prt(isp, ISP_LOGERR,
2200 "What's this? 0x%x at %d in file %s",
2201 error, __LINE__, __FILE__);
2202 XS_SETERR(ccb, CAM_REQ_CMP_ERR);
2203 xpt_done(ccb);
2204 ISPLOCK_2_CAMLOCK(isp);
2205 }
2206 break;
2207
2208#ifdef ISP_TARGET_MODE
2209 case XPT_EN_LUN: /* Enable LUN as a target */
2210 {
2211 int seq, iok, i;
2212 CAMLOCK_2_ISPLOCK(isp);
2213 iok = isp->isp_osinfo.intsok;
2214 isp->isp_osinfo.intsok = 0;
2215 seq = isp_en_lun(isp, ccb);
2216 if (seq < 0) {
2217 isp->isp_osinfo.intsok = iok;
2218 ISPLOCK_2_CAMLOCK(isp);
2219 xpt_done(ccb);
2220 break;
2221 }
2222 for (i = 0; isp->isp_osinfo.leact[seq] && i < 30 * 1000; i++) {
2223 u_int16_t isr, sema, mbox;
2224 if (ISP_READ_ISR(isp, &isr, &sema, &mbox)) {
2225 isp_intr(isp, isr, sema, mbox);
2226 }
2227 DELAY(1000);
2228 }
2229 isp->isp_osinfo.intsok = iok;
2230 ISPLOCK_2_CAMLOCK(isp);
2231 break;
2232 }
2233 case XPT_NOTIFY_ACK: /* recycle notify ack */
2234 case XPT_IMMED_NOTIFY: /* Add Immediate Notify Resource */
2235 case XPT_ACCEPT_TARGET_IO: /* Add Accept Target IO Resource */
2236 {
2237 tstate_t *tptr =
2238 get_lun_statep(isp, XS_CHANNEL(ccb), ccb->ccb_h.target_lun);
2239 if (tptr == NULL) {
2240 ccb->ccb_h.status = CAM_LUN_INVALID;
2241 xpt_done(ccb);
2242 break;
2243 }
2244 ccb->ccb_h.sim_priv.entries[0].field = 0;
2245 ccb->ccb_h.sim_priv.entries[1].ptr = isp;
2246 ccb->ccb_h.flags = 0;
2247
2248 CAMLOCK_2_ISPLOCK(isp);
2249 if (ccb->ccb_h.func_code == XPT_ACCEPT_TARGET_IO) {
2250 /*
2251 * Note that the command itself may not be done-
2252 * it may not even have had the first CTIO sent.
2253 */
2254 tptr->atio_count++;
2255 isp_prt(isp, ISP_LOGTDEBUG0,
2256 "Put FREE ATIO, lun %d, count now %d",
2257 ccb->ccb_h.target_lun, tptr->atio_count);
2258 SLIST_INSERT_HEAD(&tptr->atios, &ccb->ccb_h,
2259 sim_links.sle);
2260 } else if (ccb->ccb_h.func_code == XPT_IMMED_NOTIFY) {
2261 tptr->inot_count++;
2262 isp_prt(isp, ISP_LOGTDEBUG0,
2263 "Put FREE INOT, lun %d, count now %d",
2264 ccb->ccb_h.target_lun, tptr->inot_count);
2265 SLIST_INSERT_HEAD(&tptr->inots, &ccb->ccb_h,
2266 sim_links.sle);
2267 } else {
2268 isp_prt(isp, ISP_LOGWARN, "Got Notify ACK");;
2269 }
2270 rls_lun_statep(isp, tptr);
2271 ccb->ccb_h.status = CAM_REQ_INPROG;
2272 ISPLOCK_2_CAMLOCK(isp);
2273 break;
2274 }
2275 case XPT_CONT_TARGET_IO:
2276 {
2277 CAMLOCK_2_ISPLOCK(isp);
2278 ccb->ccb_h.status = isp_target_start_ctio(isp, ccb);
2279 if (ccb->ccb_h.status != CAM_REQ_INPROG) {
2280 isp_prt(isp, ISP_LOGWARN,
2281 "XPT_CONT_TARGET_IO: status 0x%x",
2282 ccb->ccb_h.status);
2283 XS_SETERR(ccb, CAM_REQUEUE_REQ);
2284 ISPLOCK_2_CAMLOCK(isp);
2285 xpt_done(ccb);
2286 } else {
2287 ISPLOCK_2_CAMLOCK(isp);
2288 ccb->ccb_h.status |= CAM_SIM_QUEUED;
2289 }
2290 break;
2291 }
2292#endif
2293 case XPT_RESET_DEV: /* BDR the specified SCSI device */
2294
2295 bus = cam_sim_bus(xpt_path_sim(ccb->ccb_h.path));
2296 tgt = ccb->ccb_h.target_id;
2297 tgt |= (bus << 16);
2298
2299 CAMLOCK_2_ISPLOCK(isp);
2300 error = isp_control(isp, ISPCTL_RESET_DEV, &tgt);
2301 ISPLOCK_2_CAMLOCK(isp);
2302 if (error) {
2303 ccb->ccb_h.status = CAM_REQ_CMP_ERR;
2304 } else {
2305 ccb->ccb_h.status = CAM_REQ_CMP;
2306 }
2307 xpt_done(ccb);
2308 break;
2309 case XPT_ABORT: /* Abort the specified CCB */
2310 {
2311 union ccb *accb = ccb->cab.abort_ccb;
2312 CAMLOCK_2_ISPLOCK(isp);
2313 switch (accb->ccb_h.func_code) {
2314#ifdef ISP_TARGET_MODE
2315 case XPT_ACCEPT_TARGET_IO:
2316 case XPT_IMMED_NOTIFY:
2317 ccb->ccb_h.status = isp_abort_tgt_ccb(isp, ccb);
2318 break;
2319 case XPT_CONT_TARGET_IO:
2320 isp_prt(isp, ISP_LOGERR, "cannot abort CTIOs yet");
2321 ccb->ccb_h.status = CAM_UA_ABORT;
2322 break;
2323#endif
2324 case XPT_SCSI_IO:
2325 error = isp_control(isp, ISPCTL_ABORT_CMD, ccb);
2326 if (error) {
2327 ccb->ccb_h.status = CAM_UA_ABORT;
2328 } else {
2329 ccb->ccb_h.status = CAM_REQ_CMP;
2330 }
2331 break;
2332 default:
2333 ccb->ccb_h.status = CAM_REQ_INVALID;
2334 break;
2335 }
2336 ISPLOCK_2_CAMLOCK(isp);
2337 xpt_done(ccb);
2338 break;
2339 }
2340#ifdef CAM_NEW_TRAN_CODE
2341#define IS_CURRENT_SETTINGS(c) (c->type == CTS_TYPE_CURRENT_SETTINGS)
2342#else
2343#define IS_CURRENT_SETTINGS(c) (c->flags & CCB_TRANS_CURRENT_SETTINGS)
2344#endif
2345 case XPT_SET_TRAN_SETTINGS: /* Nexus Settings */
2346 cts = &ccb->cts;
2347 if (!IS_CURRENT_SETTINGS(cts)) {
2348 ccb->ccb_h.status = CAM_REQ_INVALID;
2349 xpt_done(ccb);
2350 break;
2351 }
2352 tgt = cts->ccb_h.target_id;
2353 CAMLOCK_2_ISPLOCK(isp);
2354 if (IS_SCSI(isp)) {
2355#ifndef CAM_NEW_TRAN_CODE
2356 sdparam *sdp = isp->isp_param;
2357 u_int16_t *dptr;
2358
2359 bus = cam_sim_bus(xpt_path_sim(cts->ccb_h.path));
2360
2361 sdp += bus;
2362 /*
2363 * We always update (internally) from goal_flags
2364 * so any request to change settings just gets
2365 * vectored to that location.
2366 */
2367 dptr = &sdp->isp_devparam[tgt].goal_flags;
2368
2369 /*
2370 * Note that these operations affect the
2371 * the goal flags (goal_flags)- not
2372 * the current state flags. Then we mark
2373 * things so that the next operation to
2374 * this HBA will cause the update to occur.
2375 */
2376 if (cts->valid & CCB_TRANS_DISC_VALID) {
2377 if ((cts->flags & CCB_TRANS_DISC_ENB) != 0) {
2378 *dptr |= DPARM_DISC;
2379 } else {
2380 *dptr &= ~DPARM_DISC;
2381 }
2382 }
2383 if (cts->valid & CCB_TRANS_TQ_VALID) {
2384 if ((cts->flags & CCB_TRANS_TAG_ENB) != 0) {
2385 *dptr |= DPARM_TQING;
2386 } else {
2387 *dptr &= ~DPARM_TQING;
2388 }
2389 }
2390 if (cts->valid & CCB_TRANS_BUS_WIDTH_VALID) {
2391 switch (cts->bus_width) {
2392 case MSG_EXT_WDTR_BUS_16_BIT:
2393 *dptr |= DPARM_WIDE;
2394 break;
2395 default:
2396 *dptr &= ~DPARM_WIDE;
2397 }
2398 }
2399 /*
2400 * Any SYNC RATE of nonzero and SYNC_OFFSET
2401 * of nonzero will cause us to go to the
2402 * selected (from NVRAM) maximum value for
2403 * this device. At a later point, we'll
2404 * allow finer control.
2405 */
2406 if ((cts->valid & CCB_TRANS_SYNC_RATE_VALID) &&
2407 (cts->valid & CCB_TRANS_SYNC_OFFSET_VALID) &&
2408 (cts->sync_offset > 0)) {
2409 *dptr |= DPARM_SYNC;
2410 } else {
2411 *dptr &= ~DPARM_SYNC;
2412 }
2413 *dptr |= DPARM_SAFE_DFLT;
2414#else
2415 struct ccb_trans_settings_scsi *scsi =
2416 &cts->proto_specific.scsi;
2417 struct ccb_trans_settings_spi *spi =
2418 &cts->xport_specific.spi;
2419 sdparam *sdp = isp->isp_param;
2420 u_int16_t *dptr;
2421
2422 bus = cam_sim_bus(xpt_path_sim(cts->ccb_h.path));
2423 sdp += bus;
2424 /*
2425 * We always update (internally) from goal_flags
2426 * so any request to change settings just gets
2427 * vectored to that location.
2428 */
2429 dptr = &sdp->isp_devparam[tgt].goal_flags;
2430
2431 if ((spi->valid & CTS_SPI_VALID_DISC) != 0) {
2432 if ((spi->flags & CTS_SPI_FLAGS_DISC_ENB) != 0)
2433 *dptr |= DPARM_DISC;
2434 else
2435 *dptr &= ~DPARM_DISC;
2436 }
2437
2438 if ((scsi->valid & CTS_SCSI_VALID_TQ) != 0) {
2439 if ((scsi->flags & CTS_SCSI_FLAGS_TAG_ENB) != 0)
2440 *dptr |= DPARM_TQING;
2441 else
2442 *dptr &= ~DPARM_TQING;
2443 }
2444
2445 if ((spi->valid & CTS_SPI_VALID_BUS_WIDTH) != 0) {
2446 if (spi->bus_width == MSG_EXT_WDTR_BUS_16_BIT)
2447 *dptr |= DPARM_WIDE;
2448 else
2449 *dptr &= ~DPARM_WIDE;
2450 }
2451
2452 /*
2453 * XXX: FIX ME
2454 */
2455 if ((spi->valid & CTS_SPI_VALID_SYNC_OFFSET) &&
2456 (spi->valid & CTS_SPI_VALID_SYNC_RATE) &&
2457 (spi->sync_period && spi->sync_offset)) {
2458 *dptr |= DPARM_SYNC;
2459 /*
2460 * XXX: CHECK FOR LEGALITY
2461 */
2462 sdp->isp_devparam[tgt].goal_period =
2463 spi->sync_period;
2464 sdp->isp_devparam[tgt].goal_offset =
2465 spi->sync_offset;
2466 } else {
2467 *dptr &= ~DPARM_SYNC;
2468 }
2469#endif
2470 isp_prt(isp, ISP_LOGDEBUG0,
2471 "SET bus %d targ %d to flags %x off %x per %x",
2472 bus, tgt, sdp->isp_devparam[tgt].goal_flags,
2473 sdp->isp_devparam[tgt].goal_offset,
2474 sdp->isp_devparam[tgt].goal_period);
2475 sdp->isp_devparam[tgt].dev_update = 1;
2476 isp->isp_update |= (1 << bus);
2477 }
2478 ISPLOCK_2_CAMLOCK(isp);
2479 ccb->ccb_h.status = CAM_REQ_CMP;
2480 xpt_done(ccb);
2481 break;
2482 case XPT_GET_TRAN_SETTINGS:
2483 cts = &ccb->cts;
2484 tgt = cts->ccb_h.target_id;
2485 CAMLOCK_2_ISPLOCK(isp);
2486 if (IS_FC(isp)) {
2487#ifndef CAM_NEW_TRAN_CODE
2488 /*
2489 * a lot of normal SCSI things don't make sense.
2490 */
2491 cts->flags = CCB_TRANS_TAG_ENB | CCB_TRANS_DISC_ENB;
2492 cts->valid = CCB_TRANS_DISC_VALID | CCB_TRANS_TQ_VALID;
2493 /*
2494 * How do you measure the width of a high
2495 * speed serial bus? Well, in bytes.
2496 *
2497 * Offset and period make no sense, though, so we set
2498 * (above) a 'base' transfer speed to be gigabit.
2499 */
2500 cts->bus_width = MSG_EXT_WDTR_BUS_8_BIT;
2501#else
2502 fcparam *fcp = isp->isp_param;
2503 struct ccb_trans_settings_fc *fc =
2504 &cts->xport_specific.fc;
2505
2506 cts->protocol = PROTO_SCSI;
2507 cts->protocol_version = SCSI_REV_2;
2508 cts->transport = XPORT_FC;
2509 cts->transport_version = 0;
2510
2511 fc->valid = CTS_FC_VALID_SPEED;
2512 if (fcp->isp_gbspeed == 2)
2513 fc->bitrate = 200000;
2514 else
2515 fc->bitrate = 100000;
2516 if (tgt > 0 && tgt < MAX_FC_TARG) {
2517 struct lportdb *lp = &fcp->portdb[tgt];
2518 fc->wwnn = lp->node_wwn;
2519 fc->wwpn = lp->port_wwn;
2520 fc->port = lp->portid;
2521 fc->valid |= CTS_FC_VALID_WWNN |
2522 CTS_FC_VALID_WWPN | CTS_FC_VALID_PORT;
2523 }
2524#endif
2525 } else {
2526#ifdef CAM_NEW_TRAN_CODE
2527 struct ccb_trans_settings_scsi *scsi =
2528 &cts->proto_specific.scsi;
2529 struct ccb_trans_settings_spi *spi =
2530 &cts->xport_specific.spi;
2531#endif
2532 sdparam *sdp = isp->isp_param;
2533 int bus = cam_sim_bus(xpt_path_sim(cts->ccb_h.path));
2534 u_int16_t dval, pval, oval;
2535
2536 sdp += bus;
2537
2538 if (IS_CURRENT_SETTINGS(cts)) {
2539 sdp->isp_devparam[tgt].dev_refresh = 1;
2540 isp->isp_update |= (1 << bus);
2541 (void) isp_control(isp, ISPCTL_UPDATE_PARAMS,
2542 NULL);
2543 dval = sdp->isp_devparam[tgt].actv_flags;
2544 oval = sdp->isp_devparam[tgt].actv_offset;
2545 pval = sdp->isp_devparam[tgt].actv_period;
2546 } else {
2547 dval = sdp->isp_devparam[tgt].nvrm_flags;
2548 oval = sdp->isp_devparam[tgt].nvrm_offset;
2549 pval = sdp->isp_devparam[tgt].nvrm_period;
2550 }
2551
2552#ifndef CAM_NEW_TRAN_CODE
2553 cts->flags &= ~(CCB_TRANS_DISC_ENB|CCB_TRANS_TAG_ENB);
2554
2555 if (dval & DPARM_DISC) {
2556 cts->flags |= CCB_TRANS_DISC_ENB;
2557 }
2558 if (dval & DPARM_TQING) {
2559 cts->flags |= CCB_TRANS_TAG_ENB;
2560 }
2561 if (dval & DPARM_WIDE) {
2562 cts->bus_width = MSG_EXT_WDTR_BUS_16_BIT;
2563 } else {
2564 cts->bus_width = MSG_EXT_WDTR_BUS_8_BIT;
2565 }
2566 cts->valid = CCB_TRANS_BUS_WIDTH_VALID |
2567 CCB_TRANS_DISC_VALID | CCB_TRANS_TQ_VALID;
2568
2569 if ((dval & DPARM_SYNC) && oval != 0) {
2570 cts->sync_period = pval;
2571 cts->sync_offset = oval;
2572 cts->valid |=
2573 CCB_TRANS_SYNC_RATE_VALID |
2574 CCB_TRANS_SYNC_OFFSET_VALID;
2575 }
2576#else
2577 cts->protocol = PROTO_SCSI;
2578 cts->protocol_version = SCSI_REV_2;
2579 cts->transport = XPORT_SPI;
2580 cts->transport_version = 2;
2581
2582 scsi->flags &= ~CTS_SCSI_FLAGS_TAG_ENB;
2583 spi->flags &= ~CTS_SPI_FLAGS_DISC_ENB;
2584 if (dval & DPARM_DISC) {
2585 spi->flags |= CTS_SPI_FLAGS_DISC_ENB;
2586 }
2587 if (dval & DPARM_TQING) {
2588 scsi->flags |= CTS_SCSI_FLAGS_TAG_ENB;
2589 }
2590 if ((dval & DPARM_SYNC) && oval && pval) {
2591 spi->sync_offset = oval;
2592 spi->sync_period = pval;
2593 spi->valid |= CTS_SPI_VALID_SYNC_OFFSET;
2594 spi->valid |= CTS_SPI_VALID_SYNC_RATE;
2595 }
2596 spi->valid |= CTS_SPI_VALID_BUS_WIDTH;
2597 if (dval & DPARM_WIDE) {
2598 spi->bus_width = MSG_EXT_WDTR_BUS_16_BIT;
2599 } else {
2600 spi->bus_width = MSG_EXT_WDTR_BUS_8_BIT;
2601 }
2602 if (cts->ccb_h.target_lun != CAM_LUN_WILDCARD) {
2603 scsi->valid = CTS_SCSI_VALID_TQ;
2604 spi->valid |= CTS_SPI_VALID_DISC;
2605 } else {
2606 scsi->valid = 0;
2607 }
2608#endif
2609 isp_prt(isp, ISP_LOGDEBUG0,
2610 "GET %s bus %d targ %d to flags %x off %x per %x",
2611 IS_CURRENT_SETTINGS(cts)? "ACTIVE" : "NVRAM",
2612 bus, tgt, dval, oval, pval);
2613 }
2614 ISPLOCK_2_CAMLOCK(isp);
2615 ccb->ccb_h.status = CAM_REQ_CMP;
2616 xpt_done(ccb);
2617 break;
2618
2619 case XPT_CALC_GEOMETRY:
2620 {
2621 struct ccb_calc_geometry *ccg;
2622
2623 ccg = &ccb->ccg;
2624 if (ccg->block_size == 0) {
2625 isp_prt(isp, ISP_LOGERR,
2626 "%d.%d XPT_CALC_GEOMETRY block size 0?",
2627 ccg->ccb_h.target_id, ccg->ccb_h.target_lun);
2628 ccb->ccb_h.status = CAM_REQ_INVALID;
2629 xpt_done(ccb);
2630 break;
2631 }
2632 cam_calc_geometry(ccg, /*extended*/1);
2633 xpt_done(ccb);
2634 break;
2635 }
2636 case XPT_RESET_BUS: /* Reset the specified bus */
2637 bus = cam_sim_bus(sim);
2638 CAMLOCK_2_ISPLOCK(isp);
2639 error = isp_control(isp, ISPCTL_RESET_BUS, &bus);
2640 ISPLOCK_2_CAMLOCK(isp);
2641 if (error)
2642 ccb->ccb_h.status = CAM_REQ_CMP_ERR;
2643 else {
2644 if (cam_sim_bus(sim) && isp->isp_path2 != NULL)
2645 xpt_async(AC_BUS_RESET, isp->isp_path2, NULL);
2646 else if (isp->isp_path != NULL)
2647 xpt_async(AC_BUS_RESET, isp->isp_path, NULL);
2648 ccb->ccb_h.status = CAM_REQ_CMP;
2649 }
2650 xpt_done(ccb);
2651 break;
2652
2653 case XPT_TERM_IO: /* Terminate the I/O process */
2654 ccb->ccb_h.status = CAM_REQ_INVALID;
2655 xpt_done(ccb);
2656 break;
2657
2658 case XPT_PATH_INQ: /* Path routing inquiry */
2659 {
2660 struct ccb_pathinq *cpi = &ccb->cpi;
2661
2662 cpi->version_num = 1;
2663#ifdef ISP_TARGET_MODE
2664 cpi->target_sprt = PIT_PROCESSOR | PIT_DISCONNECT | PIT_TERM_IO;
2665#else
2666 cpi->target_sprt = 0;
2667#endif
2668 cpi->hba_eng_cnt = 0;
2669 cpi->max_target = ISP_MAX_TARGETS(isp) - 1;
2670 cpi->max_lun = ISP_MAX_LUNS(isp) - 1;
2671 cpi->bus_id = cam_sim_bus(sim);
2672 if (IS_FC(isp)) {
2673 cpi->hba_misc = PIM_NOBUSRESET;
2674 /*
2675 * Because our loop ID can shift from time to time,
2676 * make our initiator ID out of range of our bus.
2677 */
2678 cpi->initiator_id = cpi->max_target + 1;
2679
2680 /*
2681 * Set base transfer capabilities for Fibre Channel.
2682 * Technically not correct because we don't know
2683 * what media we're running on top of- but we'll
2684 * look good if we always say 100MB/s.
2685 */
2686 if (FCPARAM(isp)->isp_gbspeed == 2)
2687 cpi->base_transfer_speed = 200000;
2688 else
2689 cpi->base_transfer_speed = 100000;
2690 cpi->hba_inquiry = PI_TAG_ABLE;
2691#ifdef CAM_NEW_TRAN_CODE
2692 cpi->transport = XPORT_FC;
2693 cpi->transport_version = 0; /* WHAT'S THIS FOR? */
2694#endif
2695 } else {
2696 sdparam *sdp = isp->isp_param;
2697 sdp += cam_sim_bus(xpt_path_sim(cpi->ccb_h.path));
2698 cpi->hba_inquiry = PI_SDTR_ABLE|PI_TAG_ABLE|PI_WIDE_16;
2699 cpi->hba_misc = 0;
2700 cpi->initiator_id = sdp->isp_initiator_id;
2701 cpi->base_transfer_speed = 3300;
2702#ifdef CAM_NEW_TRAN_CODE
2703 cpi->transport = XPORT_SPI;
2704 cpi->transport_version = 2; /* WHAT'S THIS FOR? */
2705#endif
2706 }
2707#ifdef CAM_NEW_TRAN_CODE
2708 cpi->protocol = PROTO_SCSI;
2709 cpi->protocol_version = SCSI_REV_2;
2710#endif
2711 strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
2712 strncpy(cpi->hba_vid, "Qlogic", HBA_IDLEN);
2713 strncpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN);
2714 cpi->unit_number = cam_sim_unit(sim);
2715 cpi->ccb_h.status = CAM_REQ_CMP;
2716 xpt_done(ccb);
2717 break;
2718 }
2719 default:
2720 ccb->ccb_h.status = CAM_REQ_INVALID;
2721 xpt_done(ccb);
2722 break;
2723 }
2724}
2725
2726#define ISPDDB (CAM_DEBUG_INFO|CAM_DEBUG_TRACE|CAM_DEBUG_CDB)
2727void
2728isp_done(struct ccb_scsiio *sccb)
2729{
2730 struct ispsoftc *isp = XS_ISP(sccb);
2731
2732 if (XS_NOERR(sccb))
2733 XS_SETERR(sccb, CAM_REQ_CMP);
2734
2735 if ((sccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP &&
2736 (sccb->scsi_status != SCSI_STATUS_OK)) {
2737 sccb->ccb_h.status &= ~CAM_STATUS_MASK;
2738 if ((sccb->scsi_status == SCSI_STATUS_CHECK_COND) &&
2739 (sccb->ccb_h.status & CAM_AUTOSNS_VALID) == 0) {
2740 sccb->ccb_h.status |= CAM_AUTOSENSE_FAIL;
2741 } else {
2742 sccb->ccb_h.status |= CAM_SCSI_STATUS_ERROR;
2743 }
2744 }
2745
2746 sccb->ccb_h.status &= ~CAM_SIM_QUEUED;
2747 if ((sccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
2748 if ((sccb->ccb_h.status & CAM_DEV_QFRZN) == 0) {
2749 sccb->ccb_h.status |= CAM_DEV_QFRZN;
2750 xpt_freeze_devq(sccb->ccb_h.path, 1);
2751 isp_prt(isp, ISP_LOGDEBUG0,
2752 "freeze devq %d.%d cam sts %x scsi sts %x",
2753 sccb->ccb_h.target_id, sccb->ccb_h.target_lun,
2754 sccb->ccb_h.status, sccb->scsi_status);
2755 }
2756 }
2757
2758 if ((CAM_DEBUGGED(sccb->ccb_h.path, ISPDDB)) &&
2759 (sccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
2760 xpt_print_path(sccb->ccb_h.path);
2761 isp_prt(isp, ISP_LOGINFO,
2762 "cam completion status 0x%x", sccb->ccb_h.status);
2763 }
2764
2765 XS_CMD_S_DONE(sccb);
2766 if (XS_CMD_WDOG_P(sccb) == 0) {
2767 untimeout(isp_watchdog, (caddr_t)sccb, sccb->ccb_h.timeout_ch);
2768 if (XS_CMD_GRACE_P(sccb)) {
2769 isp_prt(isp, ISP_LOGDEBUG2,
2770 "finished command on borrowed time");
2771 }
2772 XS_CMD_S_CLEAR(sccb);
2773 ISPLOCK_2_CAMLOCK(isp);
2774 xpt_done((union ccb *) sccb);
2775 CAMLOCK_2_ISPLOCK(isp);
2776 }
2777}
2778
2779int
2780isp_async(struct ispsoftc *isp, ispasync_t cmd, void *arg)
2781{
2782 int bus, rv = 0;
2783 switch (cmd) {
2784 case ISPASYNC_NEW_TGT_PARAMS:
2785 {
2786#ifdef CAM_NEW_TRAN_CODE
2787 struct ccb_trans_settings_scsi *scsi;
2788 struct ccb_trans_settings_spi *spi;
2789#endif
2790 int flags, tgt;
2791 sdparam *sdp = isp->isp_param;
2792 struct ccb_trans_settings cts;
2793 struct cam_path *tmppath;
2794
2795 bzero(&cts, sizeof (struct ccb_trans_settings));
2796
2797 tgt = *((int *)arg);
2798 bus = (tgt >> 16) & 0xffff;
2799 tgt &= 0xffff;
2800 sdp += bus;
2801 ISPLOCK_2_CAMLOCK(isp);
2802 if (xpt_create_path(&tmppath, NULL,
2803 cam_sim_path(bus? isp->isp_sim2 : isp->isp_sim),
2804 tgt, CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
2805 CAMLOCK_2_ISPLOCK(isp);
2806 isp_prt(isp, ISP_LOGWARN,
2807 "isp_async cannot make temp path for %d.%d",
2808 tgt, bus);
2809 rv = -1;
2810 break;
2811 }
2812 CAMLOCK_2_ISPLOCK(isp);
2813 flags = sdp->isp_devparam[tgt].actv_flags;
2814#ifdef CAM_NEW_TRAN_CODE
2815 cts.type = CTS_TYPE_CURRENT_SETTINGS;
2816 cts.protocol = PROTO_SCSI;
2817 cts.transport = XPORT_SPI;
2818
2819 scsi = &cts.proto_specific.scsi;
2820 spi = &cts.xport_specific.spi;
2821
2822 if (flags & DPARM_TQING) {
2823 scsi->valid |= CTS_SCSI_VALID_TQ;
2824 scsi->flags |= CTS_SCSI_FLAGS_TAG_ENB;
2825 spi->flags |= CTS_SPI_FLAGS_TAG_ENB;
2826 }
2827
2828 if (flags & DPARM_DISC) {
2829 spi->valid |= CTS_SPI_VALID_DISC;
2830 spi->flags |= CTS_SPI_FLAGS_DISC_ENB;
2831 }
2832 spi->flags |= CTS_SPI_VALID_BUS_WIDTH;
2833 if (flags & DPARM_WIDE) {
2834 spi->bus_width = MSG_EXT_WDTR_BUS_16_BIT;
2835 } else {
2836 spi->bus_width = MSG_EXT_WDTR_BUS_8_BIT;
2837 }
2838 if (flags & DPARM_SYNC) {
2839 spi->valid |= CTS_SPI_VALID_SYNC_RATE;
2840 spi->valid |= CTS_SPI_VALID_SYNC_OFFSET;
2841 spi->sync_period = sdp->isp_devparam[tgt].actv_period;
2842 spi->sync_offset = sdp->isp_devparam[tgt].actv_offset;
2843 }
2844#else
2845 cts.flags = CCB_TRANS_CURRENT_SETTINGS;
2846 cts.valid = CCB_TRANS_DISC_VALID | CCB_TRANS_TQ_VALID;
2847 if (flags & DPARM_DISC) {
2848 cts.flags |= CCB_TRANS_DISC_ENB;
2849 }
2850 if (flags & DPARM_TQING) {
2851 cts.flags |= CCB_TRANS_TAG_ENB;
2852 }
2853 cts.valid |= CCB_TRANS_BUS_WIDTH_VALID;
2854 cts.bus_width = (flags & DPARM_WIDE)?
2855 MSG_EXT_WDTR_BUS_8_BIT : MSG_EXT_WDTR_BUS_16_BIT;
2856 cts.sync_period = sdp->isp_devparam[tgt].actv_period;
2857 cts.sync_offset = sdp->isp_devparam[tgt].actv_offset;
2858 if (flags & DPARM_SYNC) {
2859 cts.valid |=
2860 CCB_TRANS_SYNC_RATE_VALID |
2861 CCB_TRANS_SYNC_OFFSET_VALID;
2862 }
2863#endif
2864 isp_prt(isp, ISP_LOGDEBUG2,
2865 "NEW_TGT_PARAMS bus %d tgt %d period %x offset %x flags %x",
2866 bus, tgt, sdp->isp_devparam[tgt].actv_period,
2867 sdp->isp_devparam[tgt].actv_offset, flags);
2868 xpt_setup_ccb(&cts.ccb_h, tmppath, 1);
2869 ISPLOCK_2_CAMLOCK(isp);
2870 xpt_async(AC_TRANSFER_NEG, tmppath, &cts);
2871 xpt_free_path(tmppath);
2872 CAMLOCK_2_ISPLOCK(isp);
2873 break;
2874 }
2875 case ISPASYNC_BUS_RESET:
2876 bus = *((int *)arg);
2877 isp_prt(isp, ISP_LOGINFO, "SCSI bus reset on bus %d detected",
2878 bus);
2879 if (bus > 0 && isp->isp_path2) {
2880 ISPLOCK_2_CAMLOCK(isp);
2881 xpt_async(AC_BUS_RESET, isp->isp_path2, NULL);
2882 CAMLOCK_2_ISPLOCK(isp);
2883 } else if (isp->isp_path) {
2884 ISPLOCK_2_CAMLOCK(isp);
2885 xpt_async(AC_BUS_RESET, isp->isp_path, NULL);
2886 CAMLOCK_2_ISPLOCK(isp);
2887 }
2888 break;
2889 case ISPASYNC_LIP:
2890 if (isp->isp_path) {
2891 isp_freeze_loopdown(isp, "ISPASYNC_LIP");
2892 }
2893 isp_prt(isp, ISP_LOGINFO, "LIP Received");
2894 break;
2895 case ISPASYNC_LOOP_RESET:
2896 if (isp->isp_path) {
2897 isp_freeze_loopdown(isp, "ISPASYNC_LOOP_RESET");
2898 }
2899 isp_prt(isp, ISP_LOGINFO, "Loop Reset Received");
2900 break;
2901 case ISPASYNC_LOOP_DOWN:
2902 if (isp->isp_path) {
2903 isp_freeze_loopdown(isp, "ISPASYNC_LOOP_DOWN");
2904 }
2905 isp_prt(isp, ISP_LOGINFO, "Loop DOWN");
2906 break;
2907 case ISPASYNC_LOOP_UP:
2908 /*
2909 * Now we just note that Loop has come up. We don't
2910 * actually do anything because we're waiting for a
2911 * Change Notify before activating the FC cleanup
2912 * thread to look at the state of the loop again.
2913 */
2914 isp_prt(isp, ISP_LOGINFO, "Loop UP");
2915 break;
2916 case ISPASYNC_PROMENADE:
2917 {
2918 struct cam_path *tmppath;
2919 const char *fmt = "Target %d (Loop 0x%x) Port ID 0x%x "
2920 "(role %s) %s\n Port WWN 0x%08x%08x\n Node WWN 0x%08x%08x";
2921 static const char *roles[4] = {
2922 "(none)", "Target", "Initiator", "Target/Initiator"
2923 };
2924 fcparam *fcp = isp->isp_param;
2925 int tgt = *((int *) arg);
2926 int is_tgt_mask = (SVC3_TGT_ROLE >> SVC3_ROLE_SHIFT);
2927 struct lportdb *lp = &fcp->portdb[tgt];
2928
2929 isp_prt(isp, ISP_LOGINFO, fmt, tgt, lp->loopid, lp->portid,
2930 roles[lp->roles & 0x3],
2931 (lp->valid)? "Arrived" : "Departed",
2932 (u_int32_t) (lp->port_wwn >> 32),
2933 (u_int32_t) (lp->port_wwn & 0xffffffffLL),
2934 (u_int32_t) (lp->node_wwn >> 32),
2935 (u_int32_t) (lp->node_wwn & 0xffffffffLL));
2936
2937 ISPLOCK_2_CAMLOCK(isp);
2938 if (xpt_create_path(&tmppath, NULL, cam_sim_path(isp->isp_sim),
2939 (target_id_t)tgt, CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
2940 CAMLOCK_2_ISPLOCK(isp);
2941 break;
2942 }
2943 /*
2944 * Policy: only announce targets.
2945 */
2946 if (lp->roles & is_tgt_mask) {
2947 if (lp->valid) {
2948 xpt_async(AC_FOUND_DEVICE, tmppath, NULL);
2949 } else {
2950 xpt_async(AC_LOST_DEVICE, tmppath, NULL);
2951 }
2952 }
2953 xpt_free_path(tmppath);
2954 CAMLOCK_2_ISPLOCK(isp);
2955 break;
2956 }
2957 case ISPASYNC_CHANGE_NOTIFY:
2958 if (arg == ISPASYNC_CHANGE_PDB) {
2959 isp_prt(isp, ISP_LOGINFO,
2960 "Port Database Changed");
2961 } else if (arg == ISPASYNC_CHANGE_SNS) {
2962 isp_prt(isp, ISP_LOGINFO,
2963 "Name Server Database Changed");
2964 }
2965#ifdef ISP_SMPLOCK
2966 cv_signal(&isp->isp_osinfo.kthread_cv);
2967#else
2968 wakeup(&isp->isp_osinfo.kthread_cv);
2969#endif
2970 break;
2971 case ISPASYNC_FABRIC_DEV:
2972 {
2973 int target, base, lim;
2974 fcparam *fcp = isp->isp_param;
2975 struct lportdb *lp = NULL;
2976 struct lportdb *clp = (struct lportdb *) arg;
2977 char *pt;
2978
2979 switch (clp->port_type) {
2980 case 1:
2981 pt = " N_Port";
2982 break;
2983 case 2:
2984 pt = " NL_Port";
2985 break;
2986 case 3:
2987 pt = "F/NL_Port";
2988 break;
2989 case 0x7f:
2990 pt = " Nx_Port";
2991 break;
2992 case 0x81:
2993 pt = " F_port";
2994 break;
2995 case 0x82:
2996 pt = " FL_Port";
2997 break;
2998 case 0x84:
2999 pt = " E_port";
3000 break;
3001 default:
3002 pt = " ";
3003 break;
3004 }
3005
3006 isp_prt(isp, ISP_LOGINFO,
3007 "%s Fabric Device @ PortID 0x%x", pt, clp->portid);
3008
3009 /*
3010 * If we don't have an initiator role we bail.
3011 *
3012 * We just use ISPASYNC_FABRIC_DEV for announcement purposes.
3013 */
3014
3015 if ((isp->isp_role & ISP_ROLE_INITIATOR) == 0) {
3016 break;
3017 }
3018
3019 /*
3020 * Is this entry for us? If so, we bail.
3021 */
3022
3023 if (fcp->isp_portid == clp->portid) {
3024 break;
3025 }
3026
3027 /*
3028 * Else, the default policy is to find room for it in
3029 * our local port database. Later, when we execute
3030 * the call to isp_pdb_sync either this newly arrived
3031 * or already logged in device will be (re)announced.
3032 */
3033
3034 if (fcp->isp_topo == TOPO_FL_PORT)
3035 base = FC_SNS_ID+1;
3036 else
3037 base = 0;
3038
3039 if (fcp->isp_topo == TOPO_N_PORT)
3040 lim = 1;
3041 else
3042 lim = MAX_FC_TARG;
3043
3044 /*
3045 * Is it already in our list?
3046 */
3047 for (target = base; target < lim; target++) {
3048 if (target >= FL_PORT_ID && target <= FC_SNS_ID) {
3049 continue;
3050 }
3051 lp = &fcp->portdb[target];
3052 if (lp->port_wwn == clp->port_wwn &&
3053 lp->node_wwn == clp->node_wwn) {
3054 lp->fabric_dev = 1;
3055 break;
3056 }
3057 }
3058 if (target < lim) {
3059 break;
3060 }
3061 for (target = base; target < lim; target++) {
3062 if (target >= FL_PORT_ID && target <= FC_SNS_ID) {
3063 continue;
3064 }
3065 lp = &fcp->portdb[target];
3066 if (lp->port_wwn == 0) {
3067 break;
3068 }
3069 }
3070 if (target == lim) {
3071 isp_prt(isp, ISP_LOGWARN,
3072 "out of space for fabric devices");
3073 break;
3074 }
3075 lp->port_type = clp->port_type;
3076 lp->fc4_type = clp->fc4_type;
3077 lp->node_wwn = clp->node_wwn;
3078 lp->port_wwn = clp->port_wwn;
3079 lp->portid = clp->portid;
3080 lp->fabric_dev = 1;
3081 break;
3082 }
3083#ifdef ISP_TARGET_MODE
1803static int
1804isp_handle_platform_notify_scsi(struct ispsoftc *isp, in_entry_t *inp)
1805{
1806 return (0); /* XXXX */
1807}
1808
1809static int
1810isp_handle_platform_notify_fc(struct ispsoftc *isp, in_fcentry_t *inp)
1811{
1812
1813 switch (inp->in_status) {
1814 case IN_PORT_LOGOUT:
1815 isp_prt(isp, ISP_LOGWARN, "port logout of iid %d",
1816 inp->in_iid);
1817 break;
1818 case IN_PORT_CHANGED:
1819 isp_prt(isp, ISP_LOGWARN, "port changed for iid %d",
1820 inp->in_iid);
1821 break;
1822 case IN_GLOBAL_LOGO:
1823 isp_prt(isp, ISP_LOGINFO, "all ports logged out");
1824 break;
1825 case IN_ABORT_TASK:
1826 {
1827 atio_private_data_t *atp = isp_get_atpd(isp, inp->in_seqid);
1828 struct ccb_immed_notify *inot = NULL;
1829
1830 if (atp) {
1831 tstate_t *tptr = get_lun_statep(isp, 0, atp->lun);
1832 if (tptr) {
1833 inot = (struct ccb_immed_notify *)
1834 SLIST_FIRST(&tptr->inots);
1835 if (inot) {
1836 tptr->inot_count--;
1837 SLIST_REMOVE_HEAD(&tptr->inots,
1838 sim_links.sle);
1839 isp_prt(isp, ISP_LOGTDEBUG0,
1840 "Take FREE INOT count now %d",
1841 tptr->inot_count);
1842 }
1843 }
1844 isp_prt(isp, ISP_LOGWARN,
1845 "abort task RX_ID %x IID %d state %d",
1846 inp->in_seqid, inp->in_iid, atp->state);
1847 } else {
1848 isp_prt(isp, ISP_LOGWARN,
1849 "abort task RX_ID %x from iid %d, state unknown",
1850 inp->in_seqid, inp->in_iid);
1851 }
1852 if (inot) {
1853 inot->initiator_id = inp->in_iid;
1854 inot->sense_len = 0;
1855 inot->message_args[0] = MSG_ABORT_TAG;
1856 inot->message_args[1] = inp->in_seqid & 0xff;
1857 inot->message_args[2] = (inp->in_seqid >> 8) & 0xff;
1858 inot->ccb_h.status = CAM_MESSAGE_RECV|CAM_DEV_QFRZN;
1859 xpt_done((union ccb *)inot);
1860 }
1861 break;
1862 }
1863 default:
1864 break;
1865 }
1866 return (0);
1867}
1868#endif
1869
1870static void
1871isp_cam_async(void *cbarg, u_int32_t code, struct cam_path *path, void *arg)
1872{
1873 struct cam_sim *sim;
1874 struct ispsoftc *isp;
1875
1876 sim = (struct cam_sim *)cbarg;
1877 isp = (struct ispsoftc *) cam_sim_softc(sim);
1878 switch (code) {
1879 case AC_LOST_DEVICE:
1880 if (IS_SCSI(isp)) {
1881 u_int16_t oflags, nflags;
1882 sdparam *sdp = isp->isp_param;
1883 int tgt;
1884
1885 tgt = xpt_path_target_id(path);
1886 if (tgt >= 0) {
1887 sdp += cam_sim_bus(sim);
1888 ISP_LOCK(isp);
1889 nflags = sdp->isp_devparam[tgt].nvrm_flags;
1890#ifndef ISP_TARGET_MODE
1891 nflags &= DPARM_SAFE_DFLT;
1892 if (isp->isp_loaded_fw) {
1893 nflags |= DPARM_NARROW | DPARM_ASYNC;
1894 }
1895#else
1896 nflags = DPARM_DEFAULT;
1897#endif
1898 oflags = sdp->isp_devparam[tgt].goal_flags;
1899 sdp->isp_devparam[tgt].goal_flags = nflags;
1900 sdp->isp_devparam[tgt].dev_update = 1;
1901 isp->isp_update |= (1 << cam_sim_bus(sim));
1902 (void) isp_control(isp,
1903 ISPCTL_UPDATE_PARAMS, NULL);
1904 sdp->isp_devparam[tgt].goal_flags = oflags;
1905 ISP_UNLOCK(isp);
1906 }
1907 }
1908 break;
1909 default:
1910 isp_prt(isp, ISP_LOGWARN, "isp_cam_async: Code 0x%x", code);
1911 break;
1912 }
1913}
1914
1915static void
1916isp_poll(struct cam_sim *sim)
1917{
1918 struct ispsoftc *isp = cam_sim_softc(sim);
1919 u_int16_t isr, sema, mbox;
1920
1921 ISP_LOCK(isp);
1922 if (ISP_READ_ISR(isp, &isr, &sema, &mbox)) {
1923 isp_intr(isp, isr, sema, mbox);
1924 }
1925 ISP_UNLOCK(isp);
1926}
1927
1928
1929static void
1930isp_watchdog(void *arg)
1931{
1932 XS_T *xs = arg;
1933 struct ispsoftc *isp = XS_ISP(xs);
1934 u_int32_t handle;
1935 int iok;
1936
1937 /*
1938 * We've decided this command is dead. Make sure we're not trying
1939 * to kill a command that's already dead by getting it's handle and
1940 * and seeing whether it's still alive.
1941 */
1942 ISP_LOCK(isp);
1943 iok = isp->isp_osinfo.intsok;
1944 isp->isp_osinfo.intsok = 0;
1945 handle = isp_find_handle(isp, xs);
1946 if (handle) {
1947 u_int16_t isr, sema, mbox;
1948
1949 if (XS_CMD_DONE_P(xs)) {
1950 isp_prt(isp, ISP_LOGDEBUG1,
1951 "watchdog found done cmd (handle 0x%x)", handle);
1952 ISP_UNLOCK(isp);
1953 return;
1954 }
1955
1956 if (XS_CMD_WDOG_P(xs)) {
1957 isp_prt(isp, ISP_LOGDEBUG2,
1958 "recursive watchdog (handle 0x%x)", handle);
1959 ISP_UNLOCK(isp);
1960 return;
1961 }
1962
1963 XS_CMD_S_WDOG(xs);
1964 if (ISP_READ_ISR(isp, &isr, &sema, &mbox)) {
1965 isp_intr(isp, isr, sema, mbox);
1966 }
1967 if (XS_CMD_DONE_P(xs)) {
1968 isp_prt(isp, ISP_LOGDEBUG2,
1969 "watchdog cleanup for handle 0x%x", handle);
1970 xpt_done((union ccb *) xs);
1971 } else if (XS_CMD_GRACE_P(xs)) {
1972 /*
1973 * Make sure the command is *really* dead before we
1974 * release the handle (and DMA resources) for reuse.
1975 */
1976 (void) isp_control(isp, ISPCTL_ABORT_CMD, arg);
1977
1978 /*
1979 * After this point, the comamnd is really dead.
1980 */
1981 if (XS_XFRLEN(xs)) {
1982 ISP_DMAFREE(isp, xs, handle);
1983 }
1984 isp_destroy_handle(isp, handle);
1985 xpt_print_path(xs->ccb_h.path);
1986 isp_prt(isp, ISP_LOGWARN,
1987 "watchdog timeout for handle 0x%x", handle);
1988 XS_SETERR(xs, CAM_CMD_TIMEOUT);
1989 XS_CMD_C_WDOG(xs);
1990 isp_done(xs);
1991 } else {
1992 u_int16_t nxti, optr;
1993 ispreq_t local, *mp= &local, *qe;
1994
1995 XS_CMD_C_WDOG(xs);
1996 xs->ccb_h.timeout_ch = timeout(isp_watchdog, xs, hz);
1997 if (isp_getrqentry(isp, &nxti, &optr, (void **) &qe)) {
1998 ISP_UNLOCK(isp);
1999 return;
2000 }
2001 XS_CMD_S_GRACE(xs);
2002 MEMZERO((void *) mp, sizeof (*mp));
2003 mp->req_header.rqs_entry_count = 1;
2004 mp->req_header.rqs_entry_type = RQSTYPE_MARKER;
2005 mp->req_modifier = SYNC_ALL;
2006 mp->req_target = XS_CHANNEL(xs) << 7;
2007 isp_put_request(isp, mp, qe);
2008 ISP_ADD_REQUEST(isp, nxti);
2009 }
2010 } else {
2011 isp_prt(isp, ISP_LOGDEBUG2, "watchdog with no command");
2012 }
2013 isp->isp_osinfo.intsok = iok;
2014 ISP_UNLOCK(isp);
2015}
2016
2017static void
2018isp_kthread(void *arg)
2019{
2020 struct ispsoftc *isp = arg;
2021
2022#ifdef ISP_SMPLOCK
2023 mtx_lock(&isp->isp_lock);
2024#else
2025 mtx_lock(&Giant);
2026#endif
2027 /*
2028 * The first loop is for our usage where we have yet to have
2029 * gotten good fibre channel state.
2030 */
2031 for (;;) {
2032 int wasfrozen;
2033
2034 isp_prt(isp, ISP_LOGDEBUG0, "kthread: checking FC state");
2035 while (isp_fc_runstate(isp, 2 * 1000000) != 0) {
2036 isp_prt(isp, ISP_LOGDEBUG0, "kthread: FC state ungood");
2037 if (FCPARAM(isp)->isp_fwstate != FW_READY ||
2038 FCPARAM(isp)->isp_loopstate < LOOP_PDB_RCVD) {
2039 if (FCPARAM(isp)->loop_seen_once == 0 ||
2040 isp->isp_osinfo.ktmature == 0) {
2041 break;
2042 }
2043 }
2044#ifdef ISP_SMPLOCK
2045 msleep(isp_kthread, &isp->isp_lock,
2046 PRIBIO, "isp_fcthrd", hz);
2047#else
2048 (void) tsleep(isp_kthread, PRIBIO, "isp_fcthrd", hz);
2049#endif
2050 }
2051
2052 /*
2053 * Even if we didn't get good loop state we may be
2054 * unfreezing the SIMQ so that we can kill off
2055 * commands (if we've never seen loop before, for example).
2056 */
2057 isp->isp_osinfo.ktmature = 1;
2058 wasfrozen = isp->isp_osinfo.simqfrozen & SIMQFRZ_LOOPDOWN;
2059 isp->isp_osinfo.simqfrozen &= ~SIMQFRZ_LOOPDOWN;
2060 if (wasfrozen && isp->isp_osinfo.simqfrozen == 0) {
2061 isp_prt(isp, ISP_LOGDEBUG0, "kthread: releasing simq");
2062 ISPLOCK_2_CAMLOCK(isp);
2063 xpt_release_simq(isp->isp_sim, 1);
2064 CAMLOCK_2_ISPLOCK(isp);
2065 }
2066 isp_prt(isp, ISP_LOGDEBUG0, "kthread: waiting until called");
2067#ifdef ISP_SMPLOCK
2068 cv_wait(&isp->isp_osinfo.kthread_cv, &isp->isp_lock);
2069#else
2070 (void) tsleep(&isp->isp_osinfo.kthread_cv, PRIBIO, "fc_cv", 0);
2071#endif
2072 }
2073}
2074
2075static void
2076isp_action(struct cam_sim *sim, union ccb *ccb)
2077{
2078 int bus, tgt, error;
2079 struct ispsoftc *isp;
2080 struct ccb_trans_settings *cts;
2081
2082 CAM_DEBUG(ccb->ccb_h.path, CAM_DEBUG_TRACE, ("isp_action\n"));
2083
2084 isp = (struct ispsoftc *)cam_sim_softc(sim);
2085 ccb->ccb_h.sim_priv.entries[0].field = 0;
2086 ccb->ccb_h.sim_priv.entries[1].ptr = isp;
2087 if (isp->isp_state != ISP_RUNSTATE &&
2088 ccb->ccb_h.func_code == XPT_SCSI_IO) {
2089 CAMLOCK_2_ISPLOCK(isp);
2090 isp_init(isp);
2091 if (isp->isp_state != ISP_INITSTATE) {
2092 ISP_UNLOCK(isp);
2093 /*
2094 * Lie. Say it was a selection timeout.
2095 */
2096 ccb->ccb_h.status = CAM_SEL_TIMEOUT | CAM_DEV_QFRZN;
2097 xpt_freeze_devq(ccb->ccb_h.path, 1);
2098 xpt_done(ccb);
2099 return;
2100 }
2101 isp->isp_state = ISP_RUNSTATE;
2102 ISPLOCK_2_CAMLOCK(isp);
2103 }
2104 isp_prt(isp, ISP_LOGDEBUG2, "isp_action code %x", ccb->ccb_h.func_code);
2105
2106
2107 switch (ccb->ccb_h.func_code) {
2108 case XPT_SCSI_IO: /* Execute the requested I/O operation */
2109 /*
2110 * Do a couple of preliminary checks...
2111 */
2112 if ((ccb->ccb_h.flags & CAM_CDB_POINTER) != 0) {
2113 if ((ccb->ccb_h.flags & CAM_CDB_PHYS) != 0) {
2114 ccb->ccb_h.status = CAM_REQ_INVALID;
2115 xpt_done(ccb);
2116 break;
2117 }
2118 }
2119#ifdef DIAGNOSTIC
2120 if (ccb->ccb_h.target_id > (ISP_MAX_TARGETS(isp) - 1)) {
2121 ccb->ccb_h.status = CAM_PATH_INVALID;
2122 } else if (ccb->ccb_h.target_lun > (ISP_MAX_LUNS(isp) - 1)) {
2123 ccb->ccb_h.status = CAM_PATH_INVALID;
2124 }
2125 if (ccb->ccb_h.status == CAM_PATH_INVALID) {
2126 isp_prt(isp, ISP_LOGERR,
2127 "invalid tgt/lun (%d.%d) in XPT_SCSI_IO",
2128 ccb->ccb_h.target_id, ccb->ccb_h.target_lun);
2129 xpt_done(ccb);
2130 break;
2131 }
2132#endif
2133 ((struct ccb_scsiio *) ccb)->scsi_status = SCSI_STATUS_OK;
2134 CAMLOCK_2_ISPLOCK(isp);
2135 error = isp_start((XS_T *) ccb);
2136 switch (error) {
2137 case CMD_QUEUED:
2138 ccb->ccb_h.status |= CAM_SIM_QUEUED;
2139 if (ccb->ccb_h.timeout != CAM_TIME_INFINITY) {
2140 u_int64_t ticks = (u_int64_t) hz;
2141 if (ccb->ccb_h.timeout == CAM_TIME_DEFAULT)
2142 ticks = 60 * 1000 * ticks;
2143 else
2144 ticks = ccb->ccb_h.timeout * hz;
2145 ticks = ((ticks + 999) / 1000) + hz + hz;
2146 if (ticks >= 0x80000000) {
2147 isp_prt(isp, ISP_LOGERR,
2148 "timeout overflow");
2149 ticks = 0x7fffffff;
2150 }
2151 ccb->ccb_h.timeout_ch = timeout(isp_watchdog,
2152 (caddr_t)ccb, (int)ticks);
2153 } else {
2154 callout_handle_init(&ccb->ccb_h.timeout_ch);
2155 }
2156 ISPLOCK_2_CAMLOCK(isp);
2157 break;
2158 case CMD_RQLATER:
2159 /*
2160 * This can only happen for Fibre Channel
2161 */
2162 KASSERT((IS_FC(isp)), ("CMD_RQLATER for FC only"));
2163 if (FCPARAM(isp)->loop_seen_once == 0 &&
2164 isp->isp_osinfo.ktmature) {
2165 ISPLOCK_2_CAMLOCK(isp);
2166 XS_SETERR(ccb, CAM_SEL_TIMEOUT);
2167 xpt_done(ccb);
2168 break;
2169 }
2170#ifdef ISP_SMPLOCK
2171 cv_signal(&isp->isp_osinfo.kthread_cv);
2172#else
2173 wakeup(&isp->isp_osinfo.kthread_cv);
2174#endif
2175 isp_freeze_loopdown(isp, "isp_action(RQLATER)");
2176 XS_SETERR(ccb, CAM_REQUEUE_REQ);
2177 ISPLOCK_2_CAMLOCK(isp);
2178 xpt_done(ccb);
2179 break;
2180 case CMD_EAGAIN:
2181 XS_SETERR(ccb, CAM_REQUEUE_REQ);
2182 ISPLOCK_2_CAMLOCK(isp);
2183 xpt_done(ccb);
2184 break;
2185 case CMD_COMPLETE:
2186 isp_done((struct ccb_scsiio *) ccb);
2187 ISPLOCK_2_CAMLOCK(isp);
2188 break;
2189 default:
2190 isp_prt(isp, ISP_LOGERR,
2191 "What's this? 0x%x at %d in file %s",
2192 error, __LINE__, __FILE__);
2193 XS_SETERR(ccb, CAM_REQ_CMP_ERR);
2194 xpt_done(ccb);
2195 ISPLOCK_2_CAMLOCK(isp);
2196 }
2197 break;
2198
2199#ifdef ISP_TARGET_MODE
2200 case XPT_EN_LUN: /* Enable LUN as a target */
2201 {
2202 int seq, iok, i;
2203 CAMLOCK_2_ISPLOCK(isp);
2204 iok = isp->isp_osinfo.intsok;
2205 isp->isp_osinfo.intsok = 0;
2206 seq = isp_en_lun(isp, ccb);
2207 if (seq < 0) {
2208 isp->isp_osinfo.intsok = iok;
2209 ISPLOCK_2_CAMLOCK(isp);
2210 xpt_done(ccb);
2211 break;
2212 }
2213 for (i = 0; isp->isp_osinfo.leact[seq] && i < 30 * 1000; i++) {
2214 u_int16_t isr, sema, mbox;
2215 if (ISP_READ_ISR(isp, &isr, &sema, &mbox)) {
2216 isp_intr(isp, isr, sema, mbox);
2217 }
2218 DELAY(1000);
2219 }
2220 isp->isp_osinfo.intsok = iok;
2221 ISPLOCK_2_CAMLOCK(isp);
2222 break;
2223 }
2224 case XPT_NOTIFY_ACK: /* recycle notify ack */
2225 case XPT_IMMED_NOTIFY: /* Add Immediate Notify Resource */
2226 case XPT_ACCEPT_TARGET_IO: /* Add Accept Target IO Resource */
2227 {
2228 tstate_t *tptr =
2229 get_lun_statep(isp, XS_CHANNEL(ccb), ccb->ccb_h.target_lun);
2230 if (tptr == NULL) {
2231 ccb->ccb_h.status = CAM_LUN_INVALID;
2232 xpt_done(ccb);
2233 break;
2234 }
2235 ccb->ccb_h.sim_priv.entries[0].field = 0;
2236 ccb->ccb_h.sim_priv.entries[1].ptr = isp;
2237 ccb->ccb_h.flags = 0;
2238
2239 CAMLOCK_2_ISPLOCK(isp);
2240 if (ccb->ccb_h.func_code == XPT_ACCEPT_TARGET_IO) {
2241 /*
2242 * Note that the command itself may not be done-
2243 * it may not even have had the first CTIO sent.
2244 */
2245 tptr->atio_count++;
2246 isp_prt(isp, ISP_LOGTDEBUG0,
2247 "Put FREE ATIO, lun %d, count now %d",
2248 ccb->ccb_h.target_lun, tptr->atio_count);
2249 SLIST_INSERT_HEAD(&tptr->atios, &ccb->ccb_h,
2250 sim_links.sle);
2251 } else if (ccb->ccb_h.func_code == XPT_IMMED_NOTIFY) {
2252 tptr->inot_count++;
2253 isp_prt(isp, ISP_LOGTDEBUG0,
2254 "Put FREE INOT, lun %d, count now %d",
2255 ccb->ccb_h.target_lun, tptr->inot_count);
2256 SLIST_INSERT_HEAD(&tptr->inots, &ccb->ccb_h,
2257 sim_links.sle);
2258 } else {
2259 isp_prt(isp, ISP_LOGWARN, "Got Notify ACK");;
2260 }
2261 rls_lun_statep(isp, tptr);
2262 ccb->ccb_h.status = CAM_REQ_INPROG;
2263 ISPLOCK_2_CAMLOCK(isp);
2264 break;
2265 }
2266 case XPT_CONT_TARGET_IO:
2267 {
2268 CAMLOCK_2_ISPLOCK(isp);
2269 ccb->ccb_h.status = isp_target_start_ctio(isp, ccb);
2270 if (ccb->ccb_h.status != CAM_REQ_INPROG) {
2271 isp_prt(isp, ISP_LOGWARN,
2272 "XPT_CONT_TARGET_IO: status 0x%x",
2273 ccb->ccb_h.status);
2274 XS_SETERR(ccb, CAM_REQUEUE_REQ);
2275 ISPLOCK_2_CAMLOCK(isp);
2276 xpt_done(ccb);
2277 } else {
2278 ISPLOCK_2_CAMLOCK(isp);
2279 ccb->ccb_h.status |= CAM_SIM_QUEUED;
2280 }
2281 break;
2282 }
2283#endif
2284 case XPT_RESET_DEV: /* BDR the specified SCSI device */
2285
2286 bus = cam_sim_bus(xpt_path_sim(ccb->ccb_h.path));
2287 tgt = ccb->ccb_h.target_id;
2288 tgt |= (bus << 16);
2289
2290 CAMLOCK_2_ISPLOCK(isp);
2291 error = isp_control(isp, ISPCTL_RESET_DEV, &tgt);
2292 ISPLOCK_2_CAMLOCK(isp);
2293 if (error) {
2294 ccb->ccb_h.status = CAM_REQ_CMP_ERR;
2295 } else {
2296 ccb->ccb_h.status = CAM_REQ_CMP;
2297 }
2298 xpt_done(ccb);
2299 break;
2300 case XPT_ABORT: /* Abort the specified CCB */
2301 {
2302 union ccb *accb = ccb->cab.abort_ccb;
2303 CAMLOCK_2_ISPLOCK(isp);
2304 switch (accb->ccb_h.func_code) {
2305#ifdef ISP_TARGET_MODE
2306 case XPT_ACCEPT_TARGET_IO:
2307 case XPT_IMMED_NOTIFY:
2308 ccb->ccb_h.status = isp_abort_tgt_ccb(isp, ccb);
2309 break;
2310 case XPT_CONT_TARGET_IO:
2311 isp_prt(isp, ISP_LOGERR, "cannot abort CTIOs yet");
2312 ccb->ccb_h.status = CAM_UA_ABORT;
2313 break;
2314#endif
2315 case XPT_SCSI_IO:
2316 error = isp_control(isp, ISPCTL_ABORT_CMD, ccb);
2317 if (error) {
2318 ccb->ccb_h.status = CAM_UA_ABORT;
2319 } else {
2320 ccb->ccb_h.status = CAM_REQ_CMP;
2321 }
2322 break;
2323 default:
2324 ccb->ccb_h.status = CAM_REQ_INVALID;
2325 break;
2326 }
2327 ISPLOCK_2_CAMLOCK(isp);
2328 xpt_done(ccb);
2329 break;
2330 }
2331#ifdef CAM_NEW_TRAN_CODE
2332#define IS_CURRENT_SETTINGS(c) (c->type == CTS_TYPE_CURRENT_SETTINGS)
2333#else
2334#define IS_CURRENT_SETTINGS(c) (c->flags & CCB_TRANS_CURRENT_SETTINGS)
2335#endif
2336 case XPT_SET_TRAN_SETTINGS: /* Nexus Settings */
2337 cts = &ccb->cts;
2338 if (!IS_CURRENT_SETTINGS(cts)) {
2339 ccb->ccb_h.status = CAM_REQ_INVALID;
2340 xpt_done(ccb);
2341 break;
2342 }
2343 tgt = cts->ccb_h.target_id;
2344 CAMLOCK_2_ISPLOCK(isp);
2345 if (IS_SCSI(isp)) {
2346#ifndef CAM_NEW_TRAN_CODE
2347 sdparam *sdp = isp->isp_param;
2348 u_int16_t *dptr;
2349
2350 bus = cam_sim_bus(xpt_path_sim(cts->ccb_h.path));
2351
2352 sdp += bus;
2353 /*
2354 * We always update (internally) from goal_flags
2355 * so any request to change settings just gets
2356 * vectored to that location.
2357 */
2358 dptr = &sdp->isp_devparam[tgt].goal_flags;
2359
2360 /*
2361 * Note that these operations affect the
2362 * the goal flags (goal_flags)- not
2363 * the current state flags. Then we mark
2364 * things so that the next operation to
2365 * this HBA will cause the update to occur.
2366 */
2367 if (cts->valid & CCB_TRANS_DISC_VALID) {
2368 if ((cts->flags & CCB_TRANS_DISC_ENB) != 0) {
2369 *dptr |= DPARM_DISC;
2370 } else {
2371 *dptr &= ~DPARM_DISC;
2372 }
2373 }
2374 if (cts->valid & CCB_TRANS_TQ_VALID) {
2375 if ((cts->flags & CCB_TRANS_TAG_ENB) != 0) {
2376 *dptr |= DPARM_TQING;
2377 } else {
2378 *dptr &= ~DPARM_TQING;
2379 }
2380 }
2381 if (cts->valid & CCB_TRANS_BUS_WIDTH_VALID) {
2382 switch (cts->bus_width) {
2383 case MSG_EXT_WDTR_BUS_16_BIT:
2384 *dptr |= DPARM_WIDE;
2385 break;
2386 default:
2387 *dptr &= ~DPARM_WIDE;
2388 }
2389 }
2390 /*
2391 * Any SYNC RATE of nonzero and SYNC_OFFSET
2392 * of nonzero will cause us to go to the
2393 * selected (from NVRAM) maximum value for
2394 * this device. At a later point, we'll
2395 * allow finer control.
2396 */
2397 if ((cts->valid & CCB_TRANS_SYNC_RATE_VALID) &&
2398 (cts->valid & CCB_TRANS_SYNC_OFFSET_VALID) &&
2399 (cts->sync_offset > 0)) {
2400 *dptr |= DPARM_SYNC;
2401 } else {
2402 *dptr &= ~DPARM_SYNC;
2403 }
2404 *dptr |= DPARM_SAFE_DFLT;
2405#else
2406 struct ccb_trans_settings_scsi *scsi =
2407 &cts->proto_specific.scsi;
2408 struct ccb_trans_settings_spi *spi =
2409 &cts->xport_specific.spi;
2410 sdparam *sdp = isp->isp_param;
2411 u_int16_t *dptr;
2412
2413 bus = cam_sim_bus(xpt_path_sim(cts->ccb_h.path));
2414 sdp += bus;
2415 /*
2416 * We always update (internally) from goal_flags
2417 * so any request to change settings just gets
2418 * vectored to that location.
2419 */
2420 dptr = &sdp->isp_devparam[tgt].goal_flags;
2421
2422 if ((spi->valid & CTS_SPI_VALID_DISC) != 0) {
2423 if ((spi->flags & CTS_SPI_FLAGS_DISC_ENB) != 0)
2424 *dptr |= DPARM_DISC;
2425 else
2426 *dptr &= ~DPARM_DISC;
2427 }
2428
2429 if ((scsi->valid & CTS_SCSI_VALID_TQ) != 0) {
2430 if ((scsi->flags & CTS_SCSI_FLAGS_TAG_ENB) != 0)
2431 *dptr |= DPARM_TQING;
2432 else
2433 *dptr &= ~DPARM_TQING;
2434 }
2435
2436 if ((spi->valid & CTS_SPI_VALID_BUS_WIDTH) != 0) {
2437 if (spi->bus_width == MSG_EXT_WDTR_BUS_16_BIT)
2438 *dptr |= DPARM_WIDE;
2439 else
2440 *dptr &= ~DPARM_WIDE;
2441 }
2442
2443 /*
2444 * XXX: FIX ME
2445 */
2446 if ((spi->valid & CTS_SPI_VALID_SYNC_OFFSET) &&
2447 (spi->valid & CTS_SPI_VALID_SYNC_RATE) &&
2448 (spi->sync_period && spi->sync_offset)) {
2449 *dptr |= DPARM_SYNC;
2450 /*
2451 * XXX: CHECK FOR LEGALITY
2452 */
2453 sdp->isp_devparam[tgt].goal_period =
2454 spi->sync_period;
2455 sdp->isp_devparam[tgt].goal_offset =
2456 spi->sync_offset;
2457 } else {
2458 *dptr &= ~DPARM_SYNC;
2459 }
2460#endif
2461 isp_prt(isp, ISP_LOGDEBUG0,
2462 "SET bus %d targ %d to flags %x off %x per %x",
2463 bus, tgt, sdp->isp_devparam[tgt].goal_flags,
2464 sdp->isp_devparam[tgt].goal_offset,
2465 sdp->isp_devparam[tgt].goal_period);
2466 sdp->isp_devparam[tgt].dev_update = 1;
2467 isp->isp_update |= (1 << bus);
2468 }
2469 ISPLOCK_2_CAMLOCK(isp);
2470 ccb->ccb_h.status = CAM_REQ_CMP;
2471 xpt_done(ccb);
2472 break;
2473 case XPT_GET_TRAN_SETTINGS:
2474 cts = &ccb->cts;
2475 tgt = cts->ccb_h.target_id;
2476 CAMLOCK_2_ISPLOCK(isp);
2477 if (IS_FC(isp)) {
2478#ifndef CAM_NEW_TRAN_CODE
2479 /*
2480 * a lot of normal SCSI things don't make sense.
2481 */
2482 cts->flags = CCB_TRANS_TAG_ENB | CCB_TRANS_DISC_ENB;
2483 cts->valid = CCB_TRANS_DISC_VALID | CCB_TRANS_TQ_VALID;
2484 /*
2485 * How do you measure the width of a high
2486 * speed serial bus? Well, in bytes.
2487 *
2488 * Offset and period make no sense, though, so we set
2489 * (above) a 'base' transfer speed to be gigabit.
2490 */
2491 cts->bus_width = MSG_EXT_WDTR_BUS_8_BIT;
2492#else
2493 fcparam *fcp = isp->isp_param;
2494 struct ccb_trans_settings_fc *fc =
2495 &cts->xport_specific.fc;
2496
2497 cts->protocol = PROTO_SCSI;
2498 cts->protocol_version = SCSI_REV_2;
2499 cts->transport = XPORT_FC;
2500 cts->transport_version = 0;
2501
2502 fc->valid = CTS_FC_VALID_SPEED;
2503 if (fcp->isp_gbspeed == 2)
2504 fc->bitrate = 200000;
2505 else
2506 fc->bitrate = 100000;
2507 if (tgt > 0 && tgt < MAX_FC_TARG) {
2508 struct lportdb *lp = &fcp->portdb[tgt];
2509 fc->wwnn = lp->node_wwn;
2510 fc->wwpn = lp->port_wwn;
2511 fc->port = lp->portid;
2512 fc->valid |= CTS_FC_VALID_WWNN |
2513 CTS_FC_VALID_WWPN | CTS_FC_VALID_PORT;
2514 }
2515#endif
2516 } else {
2517#ifdef CAM_NEW_TRAN_CODE
2518 struct ccb_trans_settings_scsi *scsi =
2519 &cts->proto_specific.scsi;
2520 struct ccb_trans_settings_spi *spi =
2521 &cts->xport_specific.spi;
2522#endif
2523 sdparam *sdp = isp->isp_param;
2524 int bus = cam_sim_bus(xpt_path_sim(cts->ccb_h.path));
2525 u_int16_t dval, pval, oval;
2526
2527 sdp += bus;
2528
2529 if (IS_CURRENT_SETTINGS(cts)) {
2530 sdp->isp_devparam[tgt].dev_refresh = 1;
2531 isp->isp_update |= (1 << bus);
2532 (void) isp_control(isp, ISPCTL_UPDATE_PARAMS,
2533 NULL);
2534 dval = sdp->isp_devparam[tgt].actv_flags;
2535 oval = sdp->isp_devparam[tgt].actv_offset;
2536 pval = sdp->isp_devparam[tgt].actv_period;
2537 } else {
2538 dval = sdp->isp_devparam[tgt].nvrm_flags;
2539 oval = sdp->isp_devparam[tgt].nvrm_offset;
2540 pval = sdp->isp_devparam[tgt].nvrm_period;
2541 }
2542
2543#ifndef CAM_NEW_TRAN_CODE
2544 cts->flags &= ~(CCB_TRANS_DISC_ENB|CCB_TRANS_TAG_ENB);
2545
2546 if (dval & DPARM_DISC) {
2547 cts->flags |= CCB_TRANS_DISC_ENB;
2548 }
2549 if (dval & DPARM_TQING) {
2550 cts->flags |= CCB_TRANS_TAG_ENB;
2551 }
2552 if (dval & DPARM_WIDE) {
2553 cts->bus_width = MSG_EXT_WDTR_BUS_16_BIT;
2554 } else {
2555 cts->bus_width = MSG_EXT_WDTR_BUS_8_BIT;
2556 }
2557 cts->valid = CCB_TRANS_BUS_WIDTH_VALID |
2558 CCB_TRANS_DISC_VALID | CCB_TRANS_TQ_VALID;
2559
2560 if ((dval & DPARM_SYNC) && oval != 0) {
2561 cts->sync_period = pval;
2562 cts->sync_offset = oval;
2563 cts->valid |=
2564 CCB_TRANS_SYNC_RATE_VALID |
2565 CCB_TRANS_SYNC_OFFSET_VALID;
2566 }
2567#else
2568 cts->protocol = PROTO_SCSI;
2569 cts->protocol_version = SCSI_REV_2;
2570 cts->transport = XPORT_SPI;
2571 cts->transport_version = 2;
2572
2573 scsi->flags &= ~CTS_SCSI_FLAGS_TAG_ENB;
2574 spi->flags &= ~CTS_SPI_FLAGS_DISC_ENB;
2575 if (dval & DPARM_DISC) {
2576 spi->flags |= CTS_SPI_FLAGS_DISC_ENB;
2577 }
2578 if (dval & DPARM_TQING) {
2579 scsi->flags |= CTS_SCSI_FLAGS_TAG_ENB;
2580 }
2581 if ((dval & DPARM_SYNC) && oval && pval) {
2582 spi->sync_offset = oval;
2583 spi->sync_period = pval;
2584 spi->valid |= CTS_SPI_VALID_SYNC_OFFSET;
2585 spi->valid |= CTS_SPI_VALID_SYNC_RATE;
2586 }
2587 spi->valid |= CTS_SPI_VALID_BUS_WIDTH;
2588 if (dval & DPARM_WIDE) {
2589 spi->bus_width = MSG_EXT_WDTR_BUS_16_BIT;
2590 } else {
2591 spi->bus_width = MSG_EXT_WDTR_BUS_8_BIT;
2592 }
2593 if (cts->ccb_h.target_lun != CAM_LUN_WILDCARD) {
2594 scsi->valid = CTS_SCSI_VALID_TQ;
2595 spi->valid |= CTS_SPI_VALID_DISC;
2596 } else {
2597 scsi->valid = 0;
2598 }
2599#endif
2600 isp_prt(isp, ISP_LOGDEBUG0,
2601 "GET %s bus %d targ %d to flags %x off %x per %x",
2602 IS_CURRENT_SETTINGS(cts)? "ACTIVE" : "NVRAM",
2603 bus, tgt, dval, oval, pval);
2604 }
2605 ISPLOCK_2_CAMLOCK(isp);
2606 ccb->ccb_h.status = CAM_REQ_CMP;
2607 xpt_done(ccb);
2608 break;
2609
2610 case XPT_CALC_GEOMETRY:
2611 {
2612 struct ccb_calc_geometry *ccg;
2613
2614 ccg = &ccb->ccg;
2615 if (ccg->block_size == 0) {
2616 isp_prt(isp, ISP_LOGERR,
2617 "%d.%d XPT_CALC_GEOMETRY block size 0?",
2618 ccg->ccb_h.target_id, ccg->ccb_h.target_lun);
2619 ccb->ccb_h.status = CAM_REQ_INVALID;
2620 xpt_done(ccb);
2621 break;
2622 }
2623 cam_calc_geometry(ccg, /*extended*/1);
2624 xpt_done(ccb);
2625 break;
2626 }
2627 case XPT_RESET_BUS: /* Reset the specified bus */
2628 bus = cam_sim_bus(sim);
2629 CAMLOCK_2_ISPLOCK(isp);
2630 error = isp_control(isp, ISPCTL_RESET_BUS, &bus);
2631 ISPLOCK_2_CAMLOCK(isp);
2632 if (error)
2633 ccb->ccb_h.status = CAM_REQ_CMP_ERR;
2634 else {
2635 if (cam_sim_bus(sim) && isp->isp_path2 != NULL)
2636 xpt_async(AC_BUS_RESET, isp->isp_path2, NULL);
2637 else if (isp->isp_path != NULL)
2638 xpt_async(AC_BUS_RESET, isp->isp_path, NULL);
2639 ccb->ccb_h.status = CAM_REQ_CMP;
2640 }
2641 xpt_done(ccb);
2642 break;
2643
2644 case XPT_TERM_IO: /* Terminate the I/O process */
2645 ccb->ccb_h.status = CAM_REQ_INVALID;
2646 xpt_done(ccb);
2647 break;
2648
2649 case XPT_PATH_INQ: /* Path routing inquiry */
2650 {
2651 struct ccb_pathinq *cpi = &ccb->cpi;
2652
2653 cpi->version_num = 1;
2654#ifdef ISP_TARGET_MODE
2655 cpi->target_sprt = PIT_PROCESSOR | PIT_DISCONNECT | PIT_TERM_IO;
2656#else
2657 cpi->target_sprt = 0;
2658#endif
2659 cpi->hba_eng_cnt = 0;
2660 cpi->max_target = ISP_MAX_TARGETS(isp) - 1;
2661 cpi->max_lun = ISP_MAX_LUNS(isp) - 1;
2662 cpi->bus_id = cam_sim_bus(sim);
2663 if (IS_FC(isp)) {
2664 cpi->hba_misc = PIM_NOBUSRESET;
2665 /*
2666 * Because our loop ID can shift from time to time,
2667 * make our initiator ID out of range of our bus.
2668 */
2669 cpi->initiator_id = cpi->max_target + 1;
2670
2671 /*
2672 * Set base transfer capabilities for Fibre Channel.
2673 * Technically not correct because we don't know
2674 * what media we're running on top of- but we'll
2675 * look good if we always say 100MB/s.
2676 */
2677 if (FCPARAM(isp)->isp_gbspeed == 2)
2678 cpi->base_transfer_speed = 200000;
2679 else
2680 cpi->base_transfer_speed = 100000;
2681 cpi->hba_inquiry = PI_TAG_ABLE;
2682#ifdef CAM_NEW_TRAN_CODE
2683 cpi->transport = XPORT_FC;
2684 cpi->transport_version = 0; /* WHAT'S THIS FOR? */
2685#endif
2686 } else {
2687 sdparam *sdp = isp->isp_param;
2688 sdp += cam_sim_bus(xpt_path_sim(cpi->ccb_h.path));
2689 cpi->hba_inquiry = PI_SDTR_ABLE|PI_TAG_ABLE|PI_WIDE_16;
2690 cpi->hba_misc = 0;
2691 cpi->initiator_id = sdp->isp_initiator_id;
2692 cpi->base_transfer_speed = 3300;
2693#ifdef CAM_NEW_TRAN_CODE
2694 cpi->transport = XPORT_SPI;
2695 cpi->transport_version = 2; /* WHAT'S THIS FOR? */
2696#endif
2697 }
2698#ifdef CAM_NEW_TRAN_CODE
2699 cpi->protocol = PROTO_SCSI;
2700 cpi->protocol_version = SCSI_REV_2;
2701#endif
2702 strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
2703 strncpy(cpi->hba_vid, "Qlogic", HBA_IDLEN);
2704 strncpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN);
2705 cpi->unit_number = cam_sim_unit(sim);
2706 cpi->ccb_h.status = CAM_REQ_CMP;
2707 xpt_done(ccb);
2708 break;
2709 }
2710 default:
2711 ccb->ccb_h.status = CAM_REQ_INVALID;
2712 xpt_done(ccb);
2713 break;
2714 }
2715}
2716
2717#define ISPDDB (CAM_DEBUG_INFO|CAM_DEBUG_TRACE|CAM_DEBUG_CDB)
2718void
2719isp_done(struct ccb_scsiio *sccb)
2720{
2721 struct ispsoftc *isp = XS_ISP(sccb);
2722
2723 if (XS_NOERR(sccb))
2724 XS_SETERR(sccb, CAM_REQ_CMP);
2725
2726 if ((sccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP &&
2727 (sccb->scsi_status != SCSI_STATUS_OK)) {
2728 sccb->ccb_h.status &= ~CAM_STATUS_MASK;
2729 if ((sccb->scsi_status == SCSI_STATUS_CHECK_COND) &&
2730 (sccb->ccb_h.status & CAM_AUTOSNS_VALID) == 0) {
2731 sccb->ccb_h.status |= CAM_AUTOSENSE_FAIL;
2732 } else {
2733 sccb->ccb_h.status |= CAM_SCSI_STATUS_ERROR;
2734 }
2735 }
2736
2737 sccb->ccb_h.status &= ~CAM_SIM_QUEUED;
2738 if ((sccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
2739 if ((sccb->ccb_h.status & CAM_DEV_QFRZN) == 0) {
2740 sccb->ccb_h.status |= CAM_DEV_QFRZN;
2741 xpt_freeze_devq(sccb->ccb_h.path, 1);
2742 isp_prt(isp, ISP_LOGDEBUG0,
2743 "freeze devq %d.%d cam sts %x scsi sts %x",
2744 sccb->ccb_h.target_id, sccb->ccb_h.target_lun,
2745 sccb->ccb_h.status, sccb->scsi_status);
2746 }
2747 }
2748
2749 if ((CAM_DEBUGGED(sccb->ccb_h.path, ISPDDB)) &&
2750 (sccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
2751 xpt_print_path(sccb->ccb_h.path);
2752 isp_prt(isp, ISP_LOGINFO,
2753 "cam completion status 0x%x", sccb->ccb_h.status);
2754 }
2755
2756 XS_CMD_S_DONE(sccb);
2757 if (XS_CMD_WDOG_P(sccb) == 0) {
2758 untimeout(isp_watchdog, (caddr_t)sccb, sccb->ccb_h.timeout_ch);
2759 if (XS_CMD_GRACE_P(sccb)) {
2760 isp_prt(isp, ISP_LOGDEBUG2,
2761 "finished command on borrowed time");
2762 }
2763 XS_CMD_S_CLEAR(sccb);
2764 ISPLOCK_2_CAMLOCK(isp);
2765 xpt_done((union ccb *) sccb);
2766 CAMLOCK_2_ISPLOCK(isp);
2767 }
2768}
2769
2770int
2771isp_async(struct ispsoftc *isp, ispasync_t cmd, void *arg)
2772{
2773 int bus, rv = 0;
2774 switch (cmd) {
2775 case ISPASYNC_NEW_TGT_PARAMS:
2776 {
2777#ifdef CAM_NEW_TRAN_CODE
2778 struct ccb_trans_settings_scsi *scsi;
2779 struct ccb_trans_settings_spi *spi;
2780#endif
2781 int flags, tgt;
2782 sdparam *sdp = isp->isp_param;
2783 struct ccb_trans_settings cts;
2784 struct cam_path *tmppath;
2785
2786 bzero(&cts, sizeof (struct ccb_trans_settings));
2787
2788 tgt = *((int *)arg);
2789 bus = (tgt >> 16) & 0xffff;
2790 tgt &= 0xffff;
2791 sdp += bus;
2792 ISPLOCK_2_CAMLOCK(isp);
2793 if (xpt_create_path(&tmppath, NULL,
2794 cam_sim_path(bus? isp->isp_sim2 : isp->isp_sim),
2795 tgt, CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
2796 CAMLOCK_2_ISPLOCK(isp);
2797 isp_prt(isp, ISP_LOGWARN,
2798 "isp_async cannot make temp path for %d.%d",
2799 tgt, bus);
2800 rv = -1;
2801 break;
2802 }
2803 CAMLOCK_2_ISPLOCK(isp);
2804 flags = sdp->isp_devparam[tgt].actv_flags;
2805#ifdef CAM_NEW_TRAN_CODE
2806 cts.type = CTS_TYPE_CURRENT_SETTINGS;
2807 cts.protocol = PROTO_SCSI;
2808 cts.transport = XPORT_SPI;
2809
2810 scsi = &cts.proto_specific.scsi;
2811 spi = &cts.xport_specific.spi;
2812
2813 if (flags & DPARM_TQING) {
2814 scsi->valid |= CTS_SCSI_VALID_TQ;
2815 scsi->flags |= CTS_SCSI_FLAGS_TAG_ENB;
2816 spi->flags |= CTS_SPI_FLAGS_TAG_ENB;
2817 }
2818
2819 if (flags & DPARM_DISC) {
2820 spi->valid |= CTS_SPI_VALID_DISC;
2821 spi->flags |= CTS_SPI_FLAGS_DISC_ENB;
2822 }
2823 spi->flags |= CTS_SPI_VALID_BUS_WIDTH;
2824 if (flags & DPARM_WIDE) {
2825 spi->bus_width = MSG_EXT_WDTR_BUS_16_BIT;
2826 } else {
2827 spi->bus_width = MSG_EXT_WDTR_BUS_8_BIT;
2828 }
2829 if (flags & DPARM_SYNC) {
2830 spi->valid |= CTS_SPI_VALID_SYNC_RATE;
2831 spi->valid |= CTS_SPI_VALID_SYNC_OFFSET;
2832 spi->sync_period = sdp->isp_devparam[tgt].actv_period;
2833 spi->sync_offset = sdp->isp_devparam[tgt].actv_offset;
2834 }
2835#else
2836 cts.flags = CCB_TRANS_CURRENT_SETTINGS;
2837 cts.valid = CCB_TRANS_DISC_VALID | CCB_TRANS_TQ_VALID;
2838 if (flags & DPARM_DISC) {
2839 cts.flags |= CCB_TRANS_DISC_ENB;
2840 }
2841 if (flags & DPARM_TQING) {
2842 cts.flags |= CCB_TRANS_TAG_ENB;
2843 }
2844 cts.valid |= CCB_TRANS_BUS_WIDTH_VALID;
2845 cts.bus_width = (flags & DPARM_WIDE)?
2846 MSG_EXT_WDTR_BUS_8_BIT : MSG_EXT_WDTR_BUS_16_BIT;
2847 cts.sync_period = sdp->isp_devparam[tgt].actv_period;
2848 cts.sync_offset = sdp->isp_devparam[tgt].actv_offset;
2849 if (flags & DPARM_SYNC) {
2850 cts.valid |=
2851 CCB_TRANS_SYNC_RATE_VALID |
2852 CCB_TRANS_SYNC_OFFSET_VALID;
2853 }
2854#endif
2855 isp_prt(isp, ISP_LOGDEBUG2,
2856 "NEW_TGT_PARAMS bus %d tgt %d period %x offset %x flags %x",
2857 bus, tgt, sdp->isp_devparam[tgt].actv_period,
2858 sdp->isp_devparam[tgt].actv_offset, flags);
2859 xpt_setup_ccb(&cts.ccb_h, tmppath, 1);
2860 ISPLOCK_2_CAMLOCK(isp);
2861 xpt_async(AC_TRANSFER_NEG, tmppath, &cts);
2862 xpt_free_path(tmppath);
2863 CAMLOCK_2_ISPLOCK(isp);
2864 break;
2865 }
2866 case ISPASYNC_BUS_RESET:
2867 bus = *((int *)arg);
2868 isp_prt(isp, ISP_LOGINFO, "SCSI bus reset on bus %d detected",
2869 bus);
2870 if (bus > 0 && isp->isp_path2) {
2871 ISPLOCK_2_CAMLOCK(isp);
2872 xpt_async(AC_BUS_RESET, isp->isp_path2, NULL);
2873 CAMLOCK_2_ISPLOCK(isp);
2874 } else if (isp->isp_path) {
2875 ISPLOCK_2_CAMLOCK(isp);
2876 xpt_async(AC_BUS_RESET, isp->isp_path, NULL);
2877 CAMLOCK_2_ISPLOCK(isp);
2878 }
2879 break;
2880 case ISPASYNC_LIP:
2881 if (isp->isp_path) {
2882 isp_freeze_loopdown(isp, "ISPASYNC_LIP");
2883 }
2884 isp_prt(isp, ISP_LOGINFO, "LIP Received");
2885 break;
2886 case ISPASYNC_LOOP_RESET:
2887 if (isp->isp_path) {
2888 isp_freeze_loopdown(isp, "ISPASYNC_LOOP_RESET");
2889 }
2890 isp_prt(isp, ISP_LOGINFO, "Loop Reset Received");
2891 break;
2892 case ISPASYNC_LOOP_DOWN:
2893 if (isp->isp_path) {
2894 isp_freeze_loopdown(isp, "ISPASYNC_LOOP_DOWN");
2895 }
2896 isp_prt(isp, ISP_LOGINFO, "Loop DOWN");
2897 break;
2898 case ISPASYNC_LOOP_UP:
2899 /*
2900 * Now we just note that Loop has come up. We don't
2901 * actually do anything because we're waiting for a
2902 * Change Notify before activating the FC cleanup
2903 * thread to look at the state of the loop again.
2904 */
2905 isp_prt(isp, ISP_LOGINFO, "Loop UP");
2906 break;
2907 case ISPASYNC_PROMENADE:
2908 {
2909 struct cam_path *tmppath;
2910 const char *fmt = "Target %d (Loop 0x%x) Port ID 0x%x "
2911 "(role %s) %s\n Port WWN 0x%08x%08x\n Node WWN 0x%08x%08x";
2912 static const char *roles[4] = {
2913 "(none)", "Target", "Initiator", "Target/Initiator"
2914 };
2915 fcparam *fcp = isp->isp_param;
2916 int tgt = *((int *) arg);
2917 int is_tgt_mask = (SVC3_TGT_ROLE >> SVC3_ROLE_SHIFT);
2918 struct lportdb *lp = &fcp->portdb[tgt];
2919
2920 isp_prt(isp, ISP_LOGINFO, fmt, tgt, lp->loopid, lp->portid,
2921 roles[lp->roles & 0x3],
2922 (lp->valid)? "Arrived" : "Departed",
2923 (u_int32_t) (lp->port_wwn >> 32),
2924 (u_int32_t) (lp->port_wwn & 0xffffffffLL),
2925 (u_int32_t) (lp->node_wwn >> 32),
2926 (u_int32_t) (lp->node_wwn & 0xffffffffLL));
2927
2928 ISPLOCK_2_CAMLOCK(isp);
2929 if (xpt_create_path(&tmppath, NULL, cam_sim_path(isp->isp_sim),
2930 (target_id_t)tgt, CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
2931 CAMLOCK_2_ISPLOCK(isp);
2932 break;
2933 }
2934 /*
2935 * Policy: only announce targets.
2936 */
2937 if (lp->roles & is_tgt_mask) {
2938 if (lp->valid) {
2939 xpt_async(AC_FOUND_DEVICE, tmppath, NULL);
2940 } else {
2941 xpt_async(AC_LOST_DEVICE, tmppath, NULL);
2942 }
2943 }
2944 xpt_free_path(tmppath);
2945 CAMLOCK_2_ISPLOCK(isp);
2946 break;
2947 }
2948 case ISPASYNC_CHANGE_NOTIFY:
2949 if (arg == ISPASYNC_CHANGE_PDB) {
2950 isp_prt(isp, ISP_LOGINFO,
2951 "Port Database Changed");
2952 } else if (arg == ISPASYNC_CHANGE_SNS) {
2953 isp_prt(isp, ISP_LOGINFO,
2954 "Name Server Database Changed");
2955 }
2956#ifdef ISP_SMPLOCK
2957 cv_signal(&isp->isp_osinfo.kthread_cv);
2958#else
2959 wakeup(&isp->isp_osinfo.kthread_cv);
2960#endif
2961 break;
2962 case ISPASYNC_FABRIC_DEV:
2963 {
2964 int target, base, lim;
2965 fcparam *fcp = isp->isp_param;
2966 struct lportdb *lp = NULL;
2967 struct lportdb *clp = (struct lportdb *) arg;
2968 char *pt;
2969
2970 switch (clp->port_type) {
2971 case 1:
2972 pt = " N_Port";
2973 break;
2974 case 2:
2975 pt = " NL_Port";
2976 break;
2977 case 3:
2978 pt = "F/NL_Port";
2979 break;
2980 case 0x7f:
2981 pt = " Nx_Port";
2982 break;
2983 case 0x81:
2984 pt = " F_port";
2985 break;
2986 case 0x82:
2987 pt = " FL_Port";
2988 break;
2989 case 0x84:
2990 pt = " E_port";
2991 break;
2992 default:
2993 pt = " ";
2994 break;
2995 }
2996
2997 isp_prt(isp, ISP_LOGINFO,
2998 "%s Fabric Device @ PortID 0x%x", pt, clp->portid);
2999
3000 /*
3001 * If we don't have an initiator role we bail.
3002 *
3003 * We just use ISPASYNC_FABRIC_DEV for announcement purposes.
3004 */
3005
3006 if ((isp->isp_role & ISP_ROLE_INITIATOR) == 0) {
3007 break;
3008 }
3009
3010 /*
3011 * Is this entry for us? If so, we bail.
3012 */
3013
3014 if (fcp->isp_portid == clp->portid) {
3015 break;
3016 }
3017
3018 /*
3019 * Else, the default policy is to find room for it in
3020 * our local port database. Later, when we execute
3021 * the call to isp_pdb_sync either this newly arrived
3022 * or already logged in device will be (re)announced.
3023 */
3024
3025 if (fcp->isp_topo == TOPO_FL_PORT)
3026 base = FC_SNS_ID+1;
3027 else
3028 base = 0;
3029
3030 if (fcp->isp_topo == TOPO_N_PORT)
3031 lim = 1;
3032 else
3033 lim = MAX_FC_TARG;
3034
3035 /*
3036 * Is it already in our list?
3037 */
3038 for (target = base; target < lim; target++) {
3039 if (target >= FL_PORT_ID && target <= FC_SNS_ID) {
3040 continue;
3041 }
3042 lp = &fcp->portdb[target];
3043 if (lp->port_wwn == clp->port_wwn &&
3044 lp->node_wwn == clp->node_wwn) {
3045 lp->fabric_dev = 1;
3046 break;
3047 }
3048 }
3049 if (target < lim) {
3050 break;
3051 }
3052 for (target = base; target < lim; target++) {
3053 if (target >= FL_PORT_ID && target <= FC_SNS_ID) {
3054 continue;
3055 }
3056 lp = &fcp->portdb[target];
3057 if (lp->port_wwn == 0) {
3058 break;
3059 }
3060 }
3061 if (target == lim) {
3062 isp_prt(isp, ISP_LOGWARN,
3063 "out of space for fabric devices");
3064 break;
3065 }
3066 lp->port_type = clp->port_type;
3067 lp->fc4_type = clp->fc4_type;
3068 lp->node_wwn = clp->node_wwn;
3069 lp->port_wwn = clp->port_wwn;
3070 lp->portid = clp->portid;
3071 lp->fabric_dev = 1;
3072 break;
3073 }
3074#ifdef ISP_TARGET_MODE
3084 case ISPASYNC_TARGET_MESSAGE:
3075 case ISPASYNC_TARGET_NOTIFY:
3085 {
3076 {
3086 tmd_msg_t *mp = arg;
3077 tmd_notify_t *nt = arg;
3087 isp_prt(isp, ISP_LOGALL,
3078 isp_prt(isp, ISP_LOGALL,
3088 "bus %d iid %d tgt %d lun %d ttype %x tval %x msg[0]=%x",
3089 mp->nt_bus, (int) mp->nt_iid, (int) mp->nt_tgt,
3090 (int) mp->nt_lun, mp->nt_tagtype, mp->nt_tagval,
3091 mp->nt_msg[0]);
3079 "target notify code 0x%x", nt->nt_ncode);
3092 break;
3093 }
3080 break;
3081 }
3094 case ISPASYNC_TARGET_EVENT:
3095 {
3096 tmd_event_t *ep = arg;
3097 if (ep->ev_event == ASYNC_CTIO_DONE) {
3098 /*
3099 * ACK the interrupt first
3100 */
3101 ISP_WRITE(isp, BIU_SEMA, 0);
3102 ISP_WRITE(isp, HCCR, HCCR_CMD_CLEAR_RISC_INT);
3103 isp_handle_platform_ctio_fastpost(isp, ep->ev_bus);
3104 break;
3105 }
3106 isp_prt(isp, ISP_LOGALL,
3107 "bus %d event code 0x%x", ep->ev_bus, ep->ev_event);
3108 break;
3109 }
3110 case ISPASYNC_TARGET_ACTION:
3111 switch (((isphdr_t *)arg)->rqs_entry_type) {
3112 default:
3113 isp_prt(isp, ISP_LOGWARN,
3114 "event 0x%x for unhandled target action",
3115 ((isphdr_t *)arg)->rqs_entry_type);
3116 break;
3117 case RQSTYPE_NOTIFY:
3118 if (IS_SCSI(isp)) {
3119 rv = isp_handle_platform_notify_scsi(isp,
3120 (in_entry_t *) arg);
3121 } else {
3122 rv = isp_handle_platform_notify_fc(isp,
3123 (in_fcentry_t *) arg);
3124 }
3125 break;
3126 case RQSTYPE_ATIO:
3127 rv = isp_handle_platform_atio(isp, (at_entry_t *) arg);
3128 break;
3129 case RQSTYPE_ATIO2:
3130 rv = isp_handle_platform_atio2(isp, (at2_entry_t *)arg);
3131 break;
3132 case RQSTYPE_CTIO2:
3133 case RQSTYPE_CTIO:
3134 rv = isp_handle_platform_ctio(isp, arg);
3135 break;
3136 case RQSTYPE_ENABLE_LUN:
3137 case RQSTYPE_MODIFY_LUN:
3138 isp_ledone(isp, (lun_entry_t *) arg);
3139 break;
3140 }
3141 break;
3142#endif
3143 case ISPASYNC_FW_CRASH:
3144 {
3145 u_int16_t mbox1, mbox6;
3146 mbox1 = ISP_READ(isp, OUTMAILBOX1);
3147 if (IS_DUALBUS(isp)) {
3148 mbox6 = ISP_READ(isp, OUTMAILBOX6);
3149 } else {
3150 mbox6 = 0;
3151 }
3152 isp_prt(isp, ISP_LOGERR,
3153 "Internal Firmware Error on bus %d @ RISC Address 0x%x",
3154 mbox6, mbox1);
3155#ifdef ISP_FW_CRASH_DUMP
3156 /*
3157 * XXX: really need a thread to do this right.
3158 */
3159 if (IS_FC(isp)) {
3160 FCPARAM(isp)->isp_fwstate = FW_CONFIG_WAIT;
3161 FCPARAM(isp)->isp_loopstate = LOOP_NIL;
3162 isp_freeze_loopdown(isp, "f/w crash");
3163 isp_fw_dump(isp);
3164 }
3165 isp_reinit(isp);
3166 isp_async(isp, ISPASYNC_FW_RESTARTED, NULL);
3167#endif
3168 break;
3169 }
3170 case ISPASYNC_UNHANDLED_RESPONSE:
3171 break;
3172 default:
3173 isp_prt(isp, ISP_LOGERR, "unknown isp_async event %d", cmd);
3174 break;
3175 }
3176 return (rv);
3177}
3178
3179
3180/*
3181 * Locks are held before coming here.
3182 */
3183void
3184isp_uninit(struct ispsoftc *isp)
3185{
3186 ISP_WRITE(isp, HCCR, HCCR_CMD_RESET);
3187 DISABLE_INTS(isp);
3188}
3189
3190void
3191isp_prt(struct ispsoftc *isp, int level, const char *fmt, ...)
3192{
3193 va_list ap;
3194 if (level != ISP_LOGALL && (level & isp->isp_dblev) == 0) {
3195 return;
3196 }
3197 printf("%s: ", device_get_nameunit(isp->isp_dev));
3198 va_start(ap, fmt);
3199 vprintf(fmt, ap);
3200 va_end(ap);
3201 printf("\n");
3202}
3082 case ISPASYNC_TARGET_ACTION:
3083 switch (((isphdr_t *)arg)->rqs_entry_type) {
3084 default:
3085 isp_prt(isp, ISP_LOGWARN,
3086 "event 0x%x for unhandled target action",
3087 ((isphdr_t *)arg)->rqs_entry_type);
3088 break;
3089 case RQSTYPE_NOTIFY:
3090 if (IS_SCSI(isp)) {
3091 rv = isp_handle_platform_notify_scsi(isp,
3092 (in_entry_t *) arg);
3093 } else {
3094 rv = isp_handle_platform_notify_fc(isp,
3095 (in_fcentry_t *) arg);
3096 }
3097 break;
3098 case RQSTYPE_ATIO:
3099 rv = isp_handle_platform_atio(isp, (at_entry_t *) arg);
3100 break;
3101 case RQSTYPE_ATIO2:
3102 rv = isp_handle_platform_atio2(isp, (at2_entry_t *)arg);
3103 break;
3104 case RQSTYPE_CTIO2:
3105 case RQSTYPE_CTIO:
3106 rv = isp_handle_platform_ctio(isp, arg);
3107 break;
3108 case RQSTYPE_ENABLE_LUN:
3109 case RQSTYPE_MODIFY_LUN:
3110 isp_ledone(isp, (lun_entry_t *) arg);
3111 break;
3112 }
3113 break;
3114#endif
3115 case ISPASYNC_FW_CRASH:
3116 {
3117 u_int16_t mbox1, mbox6;
3118 mbox1 = ISP_READ(isp, OUTMAILBOX1);
3119 if (IS_DUALBUS(isp)) {
3120 mbox6 = ISP_READ(isp, OUTMAILBOX6);
3121 } else {
3122 mbox6 = 0;
3123 }
3124 isp_prt(isp, ISP_LOGERR,
3125 "Internal Firmware Error on bus %d @ RISC Address 0x%x",
3126 mbox6, mbox1);
3127#ifdef ISP_FW_CRASH_DUMP
3128 /*
3129 * XXX: really need a thread to do this right.
3130 */
3131 if (IS_FC(isp)) {
3132 FCPARAM(isp)->isp_fwstate = FW_CONFIG_WAIT;
3133 FCPARAM(isp)->isp_loopstate = LOOP_NIL;
3134 isp_freeze_loopdown(isp, "f/w crash");
3135 isp_fw_dump(isp);
3136 }
3137 isp_reinit(isp);
3138 isp_async(isp, ISPASYNC_FW_RESTARTED, NULL);
3139#endif
3140 break;
3141 }
3142 case ISPASYNC_UNHANDLED_RESPONSE:
3143 break;
3144 default:
3145 isp_prt(isp, ISP_LOGERR, "unknown isp_async event %d", cmd);
3146 break;
3147 }
3148 return (rv);
3149}
3150
3151
3152/*
3153 * Locks are held before coming here.
3154 */
3155void
3156isp_uninit(struct ispsoftc *isp)
3157{
3158 ISP_WRITE(isp, HCCR, HCCR_CMD_RESET);
3159 DISABLE_INTS(isp);
3160}
3161
3162void
3163isp_prt(struct ispsoftc *isp, int level, const char *fmt, ...)
3164{
3165 va_list ap;
3166 if (level != ISP_LOGALL && (level & isp->isp_dblev) == 0) {
3167 return;
3168 }
3169 printf("%s: ", device_get_nameunit(isp->isp_dev));
3170 va_start(ap, fmt);
3171 vprintf(fmt, ap);
3172 va_end(ap);
3173 printf("\n");
3174}