Deleted Added
full compact
scsi_target.c (130585) scsi_target.c (130640)
1/*
2 * Generic SCSI Target Kernel Mode Driver
3 *
4 * Copyright (c) 2002 Nate Lawson.
5 * Copyright (c) 1998, 1999, 2001, 2002 Justin T. Gibbs.
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions, and the following disclaimer,
13 * without modification, immediately at the beginning of the file.
14 * 2. The name of the author may not be used to endorse or promote products
15 * derived from this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
21 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * SUCH DAMAGE.
28 */
29
30#include <sys/cdefs.h>
1/*
2 * Generic SCSI Target Kernel Mode Driver
3 *
4 * Copyright (c) 2002 Nate Lawson.
5 * Copyright (c) 1998, 1999, 2001, 2002 Justin T. Gibbs.
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions, and the following disclaimer,
13 * without modification, immediately at the beginning of the file.
14 * 2. The name of the author may not be used to endorse or promote products
15 * derived from this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
21 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * SUCH DAMAGE.
28 */
29
30#include <sys/cdefs.h>
31__FBSDID("$FreeBSD: head/sys/cam/scsi/scsi_target.c 130585 2004-06-16 09:47:26Z phk $");
31__FBSDID("$FreeBSD: head/sys/cam/scsi/scsi_target.c 130640 2004-06-17 17:16:53Z phk $");
32
33#include <sys/param.h>
34#include <sys/systm.h>
35#include <sys/kernel.h>
36#include <sys/conf.h>
37#include <sys/malloc.h>
38#include <sys/poll.h>
39#include <sys/vnode.h>
40#include <sys/lock.h>
41#include <sys/mutex.h>
42#include <sys/devicestat.h>
43
44#include <cam/cam.h>
45#include <cam/cam_ccb.h>
46#include <cam/cam_periph.h>
47#include <cam/cam_xpt_periph.h>
48#include <cam/scsi/scsi_targetio.h>
49
50/* Transaction information attached to each CCB sent by the user */
51struct targ_cmd_descr {
52 struct cam_periph_map_info mapinfo;
53 TAILQ_ENTRY(targ_cmd_descr) tqe;
54 union ccb *user_ccb;
55 int priority;
56 int func_code;
57};
58
59/* Offset into the private CCB area for storing our descriptor */
60#define targ_descr periph_priv.entries[1].ptr
61
62TAILQ_HEAD(descr_queue, targ_cmd_descr);
63
64typedef enum {
65 TARG_STATE_RESV = 0x00, /* Invalid state */
66 TARG_STATE_OPENED = 0x01, /* Device opened, softc initialized */
67 TARG_STATE_LUN_ENABLED = 0x02 /* Device enabled for a path */
68} targ_state;
69
70/* Per-instance device software context */
71struct targ_softc {
72 /* CCBs (CTIOs, ATIOs, INOTs) pending on the controller */
73 struct ccb_queue pending_ccb_queue;
74
75 /* Command descriptors awaiting CTIO resources from the XPT */
76 struct descr_queue work_queue;
77
78 /* Command descriptors that have been aborted back to the user. */
79 struct descr_queue abort_queue;
80
81 /*
82 * Queue of CCBs that have been copied out to userland, but our
83 * userland daemon has not yet seen.
84 */
85 struct ccb_queue user_ccb_queue;
86
87 struct cam_periph *periph;
88 struct cam_path *path;
89 targ_state state;
90 struct selinfo read_select;
91 struct devstat device_stats;
92 struct mtx mtx;
93};
94
95static d_open_t targopen;
96static d_close_t targclose;
97static d_read_t targread;
98static d_write_t targwrite;
99static d_ioctl_t targioctl;
100static d_poll_t targpoll;
101static d_kqfilter_t targkqfilter;
102static void targreadfiltdetach(struct knote *kn);
103static int targreadfilt(struct knote *kn, long hint);
104static struct filterops targread_filtops =
105 { 1, NULL, targreadfiltdetach, targreadfilt };
106
107static struct cdevsw targ_cdevsw = {
108 .d_version = D_VERSION,
109 .d_flags = D_NEEDGIANT,
110 .d_open = targopen,
111 .d_close = targclose,
112 .d_read = targread,
113 .d_write = targwrite,
114 .d_ioctl = targioctl,
115 .d_poll = targpoll,
116 .d_name = "targ",
117 .d_kqfilter = targkqfilter
118};
119
120static cam_status targendislun(struct cam_path *path, int enable,
121 int grp6_len, int grp7_len);
122static cam_status targenable(struct targ_softc *softc,
123 struct cam_path *path,
124 int grp6_len, int grp7_len);
125static cam_status targdisable(struct targ_softc *softc);
126static periph_ctor_t targctor;
127static periph_dtor_t targdtor;
128static periph_start_t targstart;
129static int targusermerge(struct targ_softc *softc,
130 struct targ_cmd_descr *descr,
131 union ccb *ccb);
132static int targsendccb(struct targ_softc *softc, union ccb *ccb,
133 struct targ_cmd_descr *descr);
134static void targdone(struct cam_periph *periph,
135 union ccb *done_ccb);
136static int targreturnccb(struct targ_softc *softc,
137 union ccb *ccb);
138static union ccb * targgetccb(struct targ_softc *softc, xpt_opcode type,
139 int priority);
140static void targfreeccb(struct targ_softc *softc, union ccb *ccb);
141static struct targ_cmd_descr *
142 targgetdescr(struct targ_softc *softc);
143static periph_init_t targinit;
144static void targclone(void *arg, char *name, int namelen,
145 struct cdev **dev);
146static void targasync(void *callback_arg, u_int32_t code,
147 struct cam_path *path, void *arg);
148static void abort_all_pending(struct targ_softc *softc);
149static void notify_user(struct targ_softc *softc);
150static int targcamstatus(cam_status status);
151static size_t targccblen(xpt_opcode func_code);
152
153static struct periph_driver targdriver =
154{
155 targinit, "targ",
156 TAILQ_HEAD_INITIALIZER(targdriver.units), /* generation */ 0
157};
158PERIPHDRIVER_DECLARE(targ, targdriver);
159
160static struct mtx targ_mtx;
161#define TARG_LOCK(softc) mtx_lock(&(softc)->mtx)
162#define TARG_UNLOCK(softc) mtx_unlock(&(softc)->mtx)
163
164static MALLOC_DEFINE(M_TARG, "TARG", "TARG data");
165
166/* Create softc and initialize it. Only one proc can open each targ device. */
167static int
168targopen(struct cdev *dev, int flags, int fmt, struct thread *td)
169{
170 struct targ_softc *softc;
171
172 mtx_lock(&targ_mtx);
173 if (dev->si_drv1 != 0) {
174 mtx_unlock(&targ_mtx);
175 return (EBUSY);
176 }
177
178 /* Mark device busy before any potentially blocking operations */
179 dev->si_drv1 = (void *)~0;
180 mtx_unlock(&targ_mtx);
181
182 /* Create the targ device, allocate its softc, initialize it */
183 if ((dev->si_flags & SI_NAMED) == 0) {
184 make_dev(&targ_cdevsw, minor(dev), UID_ROOT, GID_WHEEL, 0600,
185 "targ%d", dev2unit(dev));
186 }
187 MALLOC(softc, struct targ_softc *, sizeof(*softc), M_TARG,
188 M_WAITOK | M_ZERO);
189 dev->si_drv1 = softc;
190 softc->state = TARG_STATE_OPENED;
191 softc->periph = NULL;
192 softc->path = NULL;
193 mtx_init(&softc->mtx, devtoname(dev), "targ cdev", MTX_DEF);
194
195 TAILQ_INIT(&softc->pending_ccb_queue);
196 TAILQ_INIT(&softc->work_queue);
197 TAILQ_INIT(&softc->abort_queue);
198 TAILQ_INIT(&softc->user_ccb_queue);
199
200 return (0);
201}
202
203/* Disable LUN if enabled and teardown softc */
204static int
205targclose(struct cdev *dev, int flag, int fmt, struct thread *td)
206{
207 struct targ_softc *softc;
208 int error;
209
210 softc = (struct targ_softc *)dev->si_drv1;
211 TARG_LOCK(softc);
212 error = targdisable(softc);
213 if (error == CAM_REQ_CMP) {
214 dev->si_drv1 = 0;
215 mtx_lock(&targ_mtx);
216 if (softc->periph != NULL) {
217 cam_periph_invalidate(softc->periph);
218 softc->periph = NULL;
219 }
220 mtx_unlock(&targ_mtx);
221 TARG_UNLOCK(softc);
222 mtx_destroy(&softc->mtx);
223 destroy_dev(dev);
224 FREE(softc, M_TARG);
225 } else {
226 TARG_UNLOCK(softc);
227 }
228 return (error);
229}
230
231/* Enable/disable LUNs, set debugging level */
232static int
233targioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flag, struct thread *td)
234{
235 struct targ_softc *softc;
236 cam_status status;
237
238 softc = (struct targ_softc *)dev->si_drv1;
239
240 switch (cmd) {
241 case TARGIOCENABLE:
242 {
243 struct ioc_enable_lun *new_lun;
244 struct cam_path *path;
245
246 new_lun = (struct ioc_enable_lun *)addr;
247 status = xpt_create_path(&path, /*periph*/NULL,
248 new_lun->path_id,
249 new_lun->target_id,
250 new_lun->lun_id);
251 if (status != CAM_REQ_CMP) {
252 printf("Couldn't create path, status %#x\n", status);
253 break;
254 }
255 TARG_LOCK(softc);
256 status = targenable(softc, path, new_lun->grp6_len,
257 new_lun->grp7_len);
258 TARG_UNLOCK(softc);
259 xpt_free_path(path);
260 break;
261 }
262 case TARGIOCDISABLE:
263 TARG_LOCK(softc);
264 status = targdisable(softc);
265 TARG_UNLOCK(softc);
266 break;
267 case TARGIOCDEBUG:
268 {
269#ifdef CAMDEBUG
270 struct ccb_debug cdbg;
271
272 bzero(&cdbg, sizeof cdbg);
273 if (*((int *)addr) != 0)
274 cdbg.flags = CAM_DEBUG_PERIPH;
275 else
276 cdbg.flags = CAM_DEBUG_NONE;
277 xpt_setup_ccb(&cdbg.ccb_h, softc->path, /*priority*/0);
278 cdbg.ccb_h.func_code = XPT_DEBUG;
279 cdbg.ccb_h.cbfcnp = targdone;
280
281 /* If no periph available, disallow debugging changes */
282 TARG_LOCK(softc);
283 if ((softc->state & TARG_STATE_LUN_ENABLED) == 0) {
284 status = CAM_DEV_NOT_THERE;
285 TARG_UNLOCK(softc);
286 break;
287 }
288 xpt_action((union ccb *)&cdbg);
289 TARG_UNLOCK(softc);
290 status = cdbg.ccb_h.status & CAM_STATUS_MASK;
291#else
292 status = CAM_FUNC_NOTAVAIL;
293#endif
294 break;
295 }
296 default:
297 status = CAM_PROVIDE_FAIL;
298 break;
299 }
300
301 return (targcamstatus(status));
302}
303
304/* Writes are always ready, reads wait for user_ccb_queue or abort_queue */
305static int
306targpoll(struct cdev *dev, int poll_events, struct thread *td)
307{
308 struct targ_softc *softc;
309 int revents;
310
311 softc = (struct targ_softc *)dev->si_drv1;
312
313 /* Poll for write() is always ok. */
314 revents = poll_events & (POLLOUT | POLLWRNORM);
315 if ((poll_events & (POLLIN | POLLRDNORM)) != 0) {
316 /* Poll for read() depends on user and abort queues. */
317 TARG_LOCK(softc);
318 if (!TAILQ_EMPTY(&softc->user_ccb_queue) ||
319 !TAILQ_EMPTY(&softc->abort_queue)) {
320 revents |= poll_events & (POLLIN | POLLRDNORM);
321 }
322 /* Only sleep if the user didn't poll for write. */
323 if (revents == 0)
324 selrecord(td, &softc->read_select);
325 TARG_UNLOCK(softc);
326 }
327
328 return (revents);
329}
330
331static int
332targkqfilter(struct cdev *dev, struct knote *kn)
333{
334 struct targ_softc *softc;
335
336 softc = (struct targ_softc *)dev->si_drv1;
337 kn->kn_hook = (caddr_t)softc;
338 kn->kn_fop = &targread_filtops;
339 TARG_LOCK(softc);
340 SLIST_INSERT_HEAD(&softc->read_select.si_note, kn, kn_selnext);
341 TARG_UNLOCK(softc);
342 return (0);
343}
344
345static void
346targreadfiltdetach(struct knote *kn)
347{
348 struct targ_softc *softc;
349
350 softc = (struct targ_softc *)kn->kn_hook;
351 TARG_LOCK(softc);
352 SLIST_REMOVE(&softc->read_select.si_note, kn, knote, kn_selnext);
353 TARG_UNLOCK(softc);
354}
355
356/* Notify the user's kqueue when the user queue or abort queue gets a CCB */
357static int
358targreadfilt(struct knote *kn, long hint)
359{
360 struct targ_softc *softc;
361 int retval;
362
363 softc = (struct targ_softc *)kn->kn_hook;
364 TARG_LOCK(softc);
365 retval = !TAILQ_EMPTY(&softc->user_ccb_queue) ||
366 !TAILQ_EMPTY(&softc->abort_queue);
367 TARG_UNLOCK(softc);
368 return (retval);
369}
370
371/* Send the HBA the enable/disable message */
372static cam_status
373targendislun(struct cam_path *path, int enable, int grp6_len, int grp7_len)
374{
375 struct ccb_en_lun en_ccb;
376 cam_status status;
377
378 /* Tell the lun to begin answering selects */
379 xpt_setup_ccb(&en_ccb.ccb_h, path, /*priority*/1);
380 en_ccb.ccb_h.func_code = XPT_EN_LUN;
381 /* Don't need support for any vendor specific commands */
382 en_ccb.grp6_len = grp6_len;
383 en_ccb.grp7_len = grp7_len;
384 en_ccb.enable = enable ? 1 : 0;
385 xpt_action((union ccb *)&en_ccb);
386 status = en_ccb.ccb_h.status & CAM_STATUS_MASK;
387 if (status != CAM_REQ_CMP) {
388 xpt_print_path(path);
389 printf("%sable lun CCB rejected, status %#x\n",
390 enable ? "en" : "dis", status);
391 }
392 return (status);
393}
394
395/* Enable target mode on a LUN, given its path */
396static cam_status
397targenable(struct targ_softc *softc, struct cam_path *path, int grp6_len,
398 int grp7_len)
399{
400 struct cam_periph *periph;
401 struct ccb_pathinq cpi;
402 cam_status status;
403
404 if ((softc->state & TARG_STATE_LUN_ENABLED) != 0)
405 return (CAM_LUN_ALRDY_ENA);
406
407 /* Make sure SIM supports target mode */
408 xpt_setup_ccb(&cpi.ccb_h, path, /*priority*/1);
409 cpi.ccb_h.func_code = XPT_PATH_INQ;
410 xpt_action((union ccb *)&cpi);
411 status = cpi.ccb_h.status & CAM_STATUS_MASK;
412 if (status != CAM_REQ_CMP) {
413 printf("pathinq failed, status %#x\n", status);
414 goto enable_fail;
415 }
416 if ((cpi.target_sprt & PIT_PROCESSOR) == 0) {
417 printf("controller does not support target mode\n");
418 status = CAM_FUNC_NOTAVAIL;
419 goto enable_fail;
420 }
421
422 /* Destroy any periph on our path if it is disabled */
423 mtx_lock(&targ_mtx);
424 periph = cam_periph_find(path, "targ");
425 if (periph != NULL) {
426 struct targ_softc *del_softc;
427
428 del_softc = (struct targ_softc *)periph->softc;
429 if ((del_softc->state & TARG_STATE_LUN_ENABLED) == 0) {
430 cam_periph_invalidate(del_softc->periph);
431 del_softc->periph = NULL;
432 } else {
433 printf("Requested path still in use by targ%d\n",
434 periph->unit_number);
435 mtx_unlock(&targ_mtx);
436 status = CAM_LUN_ALRDY_ENA;
437 goto enable_fail;
438 }
439 }
440
441 /* Create a periph instance attached to this path */
442 status = cam_periph_alloc(targctor, NULL, targdtor, targstart,
443 "targ", CAM_PERIPH_BIO, path, targasync, 0, softc);
444 mtx_unlock(&targ_mtx);
445 if (status != CAM_REQ_CMP) {
446 printf("cam_periph_alloc failed, status %#x\n", status);
447 goto enable_fail;
448 }
449
450 /* Ensure that the periph now exists. */
451 if (cam_periph_find(path, "targ") == NULL) {
452 panic("targenable: succeeded but no periph?");
453 /* NOTREACHED */
454 }
455
456 /* Send the enable lun message */
457 status = targendislun(path, /*enable*/1, grp6_len, grp7_len);
458 if (status != CAM_REQ_CMP) {
459 printf("enable lun failed, status %#x\n", status);
460 goto enable_fail;
461 }
462 softc->state |= TARG_STATE_LUN_ENABLED;
463
464enable_fail:
465 return (status);
466}
467
468/* Disable this softc's target instance if enabled */
469static cam_status
470targdisable(struct targ_softc *softc)
471{
472 cam_status status;
473
474 if ((softc->state & TARG_STATE_LUN_ENABLED) == 0)
475 return (CAM_REQ_CMP);
476
477 CAM_DEBUG(softc->path, CAM_DEBUG_PERIPH, ("targdisable\n"));
478
479 /* Abort any ccbs pending on the controller */
480 abort_all_pending(softc);
481
482 /* Disable this lun */
483 status = targendislun(softc->path, /*enable*/0,
484 /*grp6_len*/0, /*grp7_len*/0);
485 if (status == CAM_REQ_CMP)
486 softc->state &= ~TARG_STATE_LUN_ENABLED;
487 else
488 printf("Disable lun failed, status %#x\n", status);
489
490 return (status);
491}
492
493/* Initialize a periph (called from cam_periph_alloc) */
494static cam_status
495targctor(struct cam_periph *periph, void *arg)
496{
497 struct targ_softc *softc;
498
499 /* Store pointer to softc for periph-driven routines */
500 softc = (struct targ_softc *)arg;
501 periph->softc = softc;
502 softc->periph = periph;
503 softc->path = periph->path;
504 return (CAM_REQ_CMP);
505}
506
507static void
508targdtor(struct cam_periph *periph)
509{
510 struct targ_softc *softc;
511 struct ccb_hdr *ccb_h;
512 struct targ_cmd_descr *descr;
513
514 softc = (struct targ_softc *)periph->softc;
515
516 /*
517 * targdisable() aborts CCBs back to the user and leaves them
518 * on user_ccb_queue and abort_queue in case the user is still
519 * interested in them. We free them now.
520 */
521 while ((ccb_h = TAILQ_FIRST(&softc->user_ccb_queue)) != NULL) {
522 TAILQ_REMOVE(&softc->user_ccb_queue, ccb_h, periph_links.tqe);
523 targfreeccb(softc, (union ccb *)ccb_h);
524 }
525 while ((descr = TAILQ_FIRST(&softc->abort_queue)) != NULL) {
526 TAILQ_REMOVE(&softc->abort_queue, descr, tqe);
527 FREE(descr, M_TARG);
528 }
529
530 softc->periph = NULL;
531 softc->path = NULL;
532 periph->softc = NULL;
533}
534
535/* Receive CCBs from user mode proc and send them to the HBA */
536static int
537targwrite(struct cdev *dev, struct uio *uio, int ioflag)
538{
539 union ccb *user_ccb;
540 struct targ_softc *softc;
541 struct targ_cmd_descr *descr;
542 int write_len, error;
543 int func_code, priority;
544
545 softc = (struct targ_softc *)dev->si_drv1;
546 write_len = error = 0;
547 CAM_DEBUG(softc->path, CAM_DEBUG_PERIPH,
548 ("write - uio_resid %d\n", uio->uio_resid));
549 while (uio->uio_resid >= sizeof(user_ccb) && error == 0) {
550 union ccb *ccb;
551 int error;
552
553 error = uiomove((caddr_t)&user_ccb, sizeof(user_ccb), uio);
554 if (error != 0) {
555 CAM_DEBUG(softc->path, CAM_DEBUG_PERIPH,
556 ("write - uiomove failed (%d)\n", error));
557 break;
558 }
559 priority = fuword32(&user_ccb->ccb_h.pinfo.priority);
560 if (priority == -1) {
561 error = EINVAL;
562 break;
563 }
564 func_code = fuword32(&user_ccb->ccb_h.func_code);
565 switch (func_code) {
566 case XPT_ACCEPT_TARGET_IO:
567 case XPT_IMMED_NOTIFY:
568 ccb = targgetccb(softc, func_code, priority);
569 descr = (struct targ_cmd_descr *)ccb->ccb_h.targ_descr;
570 descr->user_ccb = user_ccb;
571 descr->func_code = func_code;
572 CAM_DEBUG(softc->path, CAM_DEBUG_PERIPH,
573 ("Sent ATIO/INOT (%p)\n", user_ccb));
574 xpt_action(ccb);
575 TARG_LOCK(softc);
576 TAILQ_INSERT_TAIL(&softc->pending_ccb_queue,
577 &ccb->ccb_h,
578 periph_links.tqe);
579 TARG_UNLOCK(softc);
580 break;
581 default:
582 if ((func_code & XPT_FC_QUEUED) != 0) {
583 CAM_DEBUG(softc->path, CAM_DEBUG_PERIPH,
584 ("Sending queued ccb %#x (%p)\n",
585 func_code, user_ccb));
586 descr = targgetdescr(softc);
587 descr->user_ccb = user_ccb;
588 descr->priority = priority;
589 descr->func_code = func_code;
590 TARG_LOCK(softc);
591 TAILQ_INSERT_TAIL(&softc->work_queue,
592 descr, tqe);
593 TARG_UNLOCK(softc);
594 xpt_schedule(softc->periph, priority);
595 } else {
596 CAM_DEBUG(softc->path, CAM_DEBUG_PERIPH,
597 ("Sending inline ccb %#x (%p)\n",
598 func_code, user_ccb));
599 ccb = targgetccb(softc, func_code, priority);
600 descr = (struct targ_cmd_descr *)
601 ccb->ccb_h.targ_descr;
602 descr->user_ccb = user_ccb;
603 descr->priority = priority;
604 descr->func_code = func_code;
605 if (targusermerge(softc, descr, ccb) != EFAULT)
606 targsendccb(softc, ccb, descr);
607 targreturnccb(softc, ccb);
608 }
609 break;
610 }
611 write_len += sizeof(user_ccb);
612 }
613
614 /*
615 * If we've successfully taken in some amount of
616 * data, return success for that data first. If
617 * an error is persistent, it will be reported
618 * on the next write.
619 */
620 if (error != 0 && write_len == 0)
621 return (error);
622 if (write_len == 0 && uio->uio_resid != 0)
623 return (ENOSPC);
624 return (0);
625}
626
627/* Process requests (descrs) via the periph-supplied CCBs */
628static void
629targstart(struct cam_periph *periph, union ccb *start_ccb)
630{
631 struct targ_softc *softc;
632 struct targ_cmd_descr *descr, *next_descr;
633 int error;
634
635 softc = (struct targ_softc *)periph->softc;
636 CAM_DEBUG(softc->path, CAM_DEBUG_PERIPH, ("targstart %p\n", start_ccb));
637
638 TARG_LOCK(softc);
639 descr = TAILQ_FIRST(&softc->work_queue);
640 if (descr == NULL) {
641 TARG_UNLOCK(softc);
642 xpt_release_ccb(start_ccb);
643 } else {
644 TAILQ_REMOVE(&softc->work_queue, descr, tqe);
645 next_descr = TAILQ_FIRST(&softc->work_queue);
646 TARG_UNLOCK(softc);
647
648 /* Initiate a transaction using the descr and supplied CCB */
649 error = targusermerge(softc, descr, start_ccb);
650 if (error == 0)
651 error = targsendccb(softc, start_ccb, descr);
652 if (error != 0) {
653 xpt_print_path(periph->path);
654 printf("targsendccb failed, err %d\n", error);
655 xpt_release_ccb(start_ccb);
656 suword(&descr->user_ccb->ccb_h.status,
657 CAM_REQ_CMP_ERR);
658 TARG_LOCK(softc);
659 TAILQ_INSERT_TAIL(&softc->abort_queue, descr, tqe);
660 TARG_UNLOCK(softc);
661 notify_user(softc);
662 }
663
664 /* If we have more work to do, stay scheduled */
665 if (next_descr != NULL)
666 xpt_schedule(periph, next_descr->priority);
667 }
668}
669
670static int
671targusermerge(struct targ_softc *softc, struct targ_cmd_descr *descr,
672 union ccb *ccb)
673{
674 struct ccb_hdr *u_ccbh, *k_ccbh;
675 size_t ccb_len;
676 int error;
677
678 u_ccbh = &descr->user_ccb->ccb_h;
679 k_ccbh = &ccb->ccb_h;
680
681 /*
682 * There are some fields in the CCB header that need to be
683 * preserved, the rest we get from the user ccb. (See xpt_merge_ccb)
684 */
685 xpt_setup_ccb(k_ccbh, softc->path, descr->priority);
686 k_ccbh->retry_count = fuword32(&u_ccbh->retry_count);
687 k_ccbh->func_code = descr->func_code;
688 k_ccbh->flags = fuword32(&u_ccbh->flags);
689 k_ccbh->timeout = fuword32(&u_ccbh->timeout);
690 ccb_len = targccblen(k_ccbh->func_code) - sizeof(struct ccb_hdr);
691 error = copyin(u_ccbh + 1, k_ccbh + 1, ccb_len);
692 if (error != 0) {
693 k_ccbh->status = CAM_REQ_CMP_ERR;
694 return (error);
695 }
696
697 /* Translate usermode abort_ccb pointer to its kernel counterpart */
698 if (k_ccbh->func_code == XPT_ABORT) {
699 struct ccb_abort *cab;
700 struct ccb_hdr *ccb_h;
701
702 cab = (struct ccb_abort *)ccb;
703 TARG_LOCK(softc);
704 TAILQ_FOREACH(ccb_h, &softc->pending_ccb_queue,
705 periph_links.tqe) {
706 struct targ_cmd_descr *ab_descr;
707
708 ab_descr = (struct targ_cmd_descr *)ccb_h->targ_descr;
709 if (ab_descr->user_ccb == cab->abort_ccb) {
710 CAM_DEBUG(softc->path, CAM_DEBUG_PERIPH,
711 ("Changing abort for %p to %p\n",
712 cab->abort_ccb, ccb_h));
713 cab->abort_ccb = (union ccb *)ccb_h;
714 break;
715 }
716 }
717 TARG_UNLOCK(softc);
718 /* CCB not found, set appropriate status */
719 if (ccb_h == NULL) {
720 k_ccbh->status = CAM_PATH_INVALID;
721 error = ESRCH;
722 }
723 }
724
725 return (error);
726}
727
728/* Build and send a kernel CCB formed from descr->user_ccb */
729static int
730targsendccb(struct targ_softc *softc, union ccb *ccb,
731 struct targ_cmd_descr *descr)
732{
733 struct cam_periph_map_info *mapinfo;
734 struct ccb_hdr *ccb_h;
735 int error;
736
737 ccb_h = &ccb->ccb_h;
738 mapinfo = &descr->mapinfo;
739 mapinfo->num_bufs_used = 0;
740
741 /*
742 * There's no way for the user to have a completion
743 * function, so we put our own completion function in here.
744 * We also stash in a reference to our descriptor so targreturnccb()
745 * can find our mapping info.
746 */
747 ccb_h->cbfcnp = targdone;
748 ccb_h->targ_descr = descr;
749
750 /*
751 * We only attempt to map the user memory into kernel space
752 * if they haven't passed in a physical memory pointer,
753 * and if there is actually an I/O operation to perform.
754 * Right now cam_periph_mapmem() only supports SCSI and device
755 * match CCBs. For the SCSI CCBs, we only pass the CCB in if
756 * there's actually data to map. cam_periph_mapmem() will do the
757 * right thing, even if there isn't data to map, but since CCBs
758 * without data are a reasonably common occurance (e.g. test unit
759 * ready), it will save a few cycles if we check for it here.
760 */
761 if (((ccb_h->flags & CAM_DATA_PHYS) == 0)
762 && (((ccb_h->func_code == XPT_CONT_TARGET_IO)
763 && ((ccb_h->flags & CAM_DIR_MASK) != CAM_DIR_NONE))
764 || (ccb_h->func_code == XPT_DEV_MATCH))) {
765
766 error = cam_periph_mapmem(ccb, mapinfo);
767
768 /*
769 * cam_periph_mapmem returned an error, we can't continue.
770 * Return the error to the user.
771 */
772 if (error) {
773 ccb_h->status = CAM_REQ_CMP_ERR;
774 mapinfo->num_bufs_used = 0;
775 return (error);
776 }
777 }
778
779 /*
780 * Once queued on the pending CCB list, this CCB will be protected
781 * by our error recovery handler.
782 */
783 CAM_DEBUG(softc->path, CAM_DEBUG_PERIPH, ("sendccb %p\n", ccb));
784 if (XPT_FC_IS_QUEUED(ccb)) {
785 TARG_LOCK(softc);
786 TAILQ_INSERT_TAIL(&softc->pending_ccb_queue, ccb_h,
787 periph_links.tqe);
788 TARG_UNLOCK(softc);
789 }
790 xpt_action(ccb);
791
792 return (0);
793}
794
795/* Completion routine for CCBs (called at splsoftcam) */
796static void
797targdone(struct cam_periph *periph, union ccb *done_ccb)
798{
799 struct targ_softc *softc;
800 cam_status status;
801
802 CAM_DEBUG(periph->path, CAM_DEBUG_PERIPH, ("targdone %p\n", done_ccb));
803 softc = (struct targ_softc *)periph->softc;
804 TARG_LOCK(softc);
805 TAILQ_REMOVE(&softc->pending_ccb_queue, &done_ccb->ccb_h,
806 periph_links.tqe);
807 status = done_ccb->ccb_h.status & CAM_STATUS_MASK;
808
809 /* If we're no longer enabled, throw away CCB */
810 if ((softc->state & TARG_STATE_LUN_ENABLED) == 0) {
811 targfreeccb(softc, done_ccb);
812 TARG_UNLOCK(softc);
813 return;
814 }
815 /* abort_all_pending() waits for pending queue to be empty */
816 if (TAILQ_EMPTY(&softc->pending_ccb_queue))
817 wakeup(&softc->pending_ccb_queue);
818
819 switch (done_ccb->ccb_h.func_code) {
820 /* All FC_*_QUEUED CCBs go back to userland */
821 case XPT_IMMED_NOTIFY:
822 case XPT_ACCEPT_TARGET_IO:
823 case XPT_CONT_TARGET_IO:
824 TAILQ_INSERT_TAIL(&softc->user_ccb_queue, &done_ccb->ccb_h,
825 periph_links.tqe);
826 TARG_UNLOCK(softc);
827 notify_user(softc);
828 break;
829 default:
830 panic("targdone: impossible xpt opcode %#x",
831 done_ccb->ccb_h.func_code);
832 /* NOTREACHED */
833 }
834}
835
836/* Return CCBs to the user from the user queue and abort queue */
837static int
838targread(struct cdev *dev, struct uio *uio, int ioflag)
839{
840 struct descr_queue *abort_queue;
841 struct targ_cmd_descr *user_descr;
842 struct targ_softc *softc;
843 struct ccb_queue *user_queue;
844 struct ccb_hdr *ccb_h;
845 union ccb *user_ccb;
846 int read_len, error;
847
848 error = 0;
849 read_len = 0;
850 softc = (struct targ_softc *)dev->si_drv1;
851 user_queue = &softc->user_ccb_queue;
852 abort_queue = &softc->abort_queue;
853 CAM_DEBUG(softc->path, CAM_DEBUG_PERIPH, ("targread\n"));
854
855 /* If no data is available, wait or return immediately */
856 TARG_LOCK(softc);
857 ccb_h = TAILQ_FIRST(user_queue);
858 user_descr = TAILQ_FIRST(abort_queue);
859 while (ccb_h == NULL && user_descr == NULL) {
860 if ((ioflag & IO_NDELAY) == 0) {
861 error = msleep(user_queue, &softc->mtx,
862 PRIBIO | PCATCH, "targrd", 0);
863 ccb_h = TAILQ_FIRST(user_queue);
864 user_descr = TAILQ_FIRST(abort_queue);
865 if (error != 0) {
866 if (error == ERESTART) {
867 continue;
868 } else {
869 TARG_UNLOCK(softc);
870 goto read_fail;
871 }
872 }
873 } else {
874 TARG_UNLOCK(softc);
875 return (EAGAIN);
876 }
877 }
878
879 /* Data is available so fill the user's buffer */
880 while (ccb_h != NULL) {
881 struct targ_cmd_descr *descr;
882
883 if (uio->uio_resid < sizeof(user_ccb))
884 break;
885 TAILQ_REMOVE(user_queue, ccb_h, periph_links.tqe);
886 TARG_UNLOCK(softc);
887 descr = (struct targ_cmd_descr *)ccb_h->targ_descr;
888 user_ccb = descr->user_ccb;
889 CAM_DEBUG(softc->path, CAM_DEBUG_PERIPH,
890 ("targread ccb %p (%p)\n", ccb_h, user_ccb));
891 error = targreturnccb(softc, (union ccb *)ccb_h);
892 if (error != 0)
893 goto read_fail;
894 error = uiomove((caddr_t)&user_ccb, sizeof(user_ccb), uio);
895 if (error != 0)
896 goto read_fail;
897 read_len += sizeof(user_ccb);
898
899 TARG_LOCK(softc);
900 ccb_h = TAILQ_FIRST(user_queue);
901 }
902
903 /* Flush out any aborted descriptors */
904 while (user_descr != NULL) {
905 if (uio->uio_resid < sizeof(user_ccb))
906 break;
907 TAILQ_REMOVE(abort_queue, user_descr, tqe);
908 TARG_UNLOCK(softc);
909 user_ccb = user_descr->user_ccb;
910 CAM_DEBUG(softc->path, CAM_DEBUG_PERIPH,
911 ("targread aborted descr %p (%p)\n",
912 user_descr, user_ccb));
913 suword(&user_ccb->ccb_h.status, CAM_REQ_ABORTED);
914 error = uiomove((caddr_t)&user_ccb, sizeof(user_ccb), uio);
915 if (error != 0)
916 goto read_fail;
917 read_len += sizeof(user_ccb);
918
919 TARG_LOCK(softc);
920 user_descr = TAILQ_FIRST(abort_queue);
921 }
922 TARG_UNLOCK(softc);
923
924 /*
925 * If we've successfully read some amount of data, don't report an
926 * error. If the error is persistent, it will be reported on the
927 * next read().
928 */
929 if (read_len == 0 && uio->uio_resid != 0)
930 error = ENOSPC;
931
932read_fail:
933 return (error);
934}
935
936/* Copy completed ccb back to the user */
937static int
938targreturnccb(struct targ_softc *softc, union ccb *ccb)
939{
940 struct targ_cmd_descr *descr;
941 struct ccb_hdr *u_ccbh;
942 size_t ccb_len;
943 int error;
944
945 CAM_DEBUG(softc->path, CAM_DEBUG_PERIPH, ("targreturnccb %p\n", ccb));
946 descr = (struct targ_cmd_descr *)ccb->ccb_h.targ_descr;
947 u_ccbh = &descr->user_ccb->ccb_h;
948
949 /* Copy out the central portion of the ccb_hdr */
950 copyout(&ccb->ccb_h.retry_count, &u_ccbh->retry_count,
951 offsetof(struct ccb_hdr, periph_priv) -
952 offsetof(struct ccb_hdr, retry_count));
953
954 /* Copy out the rest of the ccb (after the ccb_hdr) */
955 ccb_len = targccblen(ccb->ccb_h.func_code) - sizeof(struct ccb_hdr);
956 if (descr->mapinfo.num_bufs_used != 0)
957 cam_periph_unmapmem(ccb, &descr->mapinfo);
958 error = copyout(&ccb->ccb_h + 1, u_ccbh + 1, ccb_len);
959 if (error != 0) {
960 xpt_print_path(softc->path);
961 printf("targreturnccb - CCB copyout failed (%d)\n",
962 error);
963 }
964 /* Free CCB or send back to devq. */
965 targfreeccb(softc, ccb);
966
967 return (error);
968}
969
970static union ccb *
971targgetccb(struct targ_softc *softc, xpt_opcode type, int priority)
972{
973 union ccb *ccb;
974 int ccb_len;
975
976 ccb_len = targccblen(type);
977 MALLOC(ccb, union ccb *, ccb_len, M_TARG, M_WAITOK);
978 CAM_DEBUG(softc->path, CAM_DEBUG_PERIPH, ("getccb %p\n", ccb));
979
980 xpt_setup_ccb(&ccb->ccb_h, softc->path, priority);
981 ccb->ccb_h.func_code = type;
982 ccb->ccb_h.cbfcnp = targdone;
983 ccb->ccb_h.targ_descr = targgetdescr(softc);
984 return (ccb);
985}
986
987static void
988targfreeccb(struct targ_softc *softc, union ccb *ccb)
989{
990 CAM_DEBUG_PRINT(CAM_DEBUG_PERIPH, ("targfreeccb descr %p and\n",
991 ccb->ccb_h.targ_descr));
992 FREE(ccb->ccb_h.targ_descr, M_TARG);
993
994 switch (ccb->ccb_h.func_code) {
995 case XPT_ACCEPT_TARGET_IO:
996 case XPT_IMMED_NOTIFY:
997 CAM_DEBUG_PRINT(CAM_DEBUG_PERIPH, ("freeing ccb %p\n", ccb));
998 FREE(ccb, M_TARG);
999 break;
1000 default:
1001 /* Send back CCB if we got it from the periph */
1002 if (XPT_FC_IS_QUEUED(ccb)) {
1003 CAM_DEBUG_PRINT(CAM_DEBUG_PERIPH,
1004 ("returning queued ccb %p\n", ccb));
1005 xpt_release_ccb(ccb);
1006 } else {
1007 CAM_DEBUG_PRINT(CAM_DEBUG_PERIPH,
1008 ("freeing ccb %p\n", ccb));
1009 FREE(ccb, M_TARG);
1010 }
1011 break;
1012 }
1013}
1014
1015static struct targ_cmd_descr *
1016targgetdescr(struct targ_softc *softc)
1017{
1018 struct targ_cmd_descr *descr;
1019
1020 MALLOC(descr, struct targ_cmd_descr *, sizeof(*descr), M_TARG,
1021 M_WAITOK);
1022 descr->mapinfo.num_bufs_used = 0;
1023 return (descr);
1024}
1025
1026static void
1027targinit(void)
1028{
1029 mtx_init(&targ_mtx, "targ global", NULL, MTX_DEF);
1030 EVENTHANDLER_REGISTER(dev_clone, targclone, 0, 1000);
1031}
1032
1033static void
1034targclone(void *arg, char *name, int namelen, struct cdev **dev)
1035{
1036 int u;
1037
32
33#include <sys/param.h>
34#include <sys/systm.h>
35#include <sys/kernel.h>
36#include <sys/conf.h>
37#include <sys/malloc.h>
38#include <sys/poll.h>
39#include <sys/vnode.h>
40#include <sys/lock.h>
41#include <sys/mutex.h>
42#include <sys/devicestat.h>
43
44#include <cam/cam.h>
45#include <cam/cam_ccb.h>
46#include <cam/cam_periph.h>
47#include <cam/cam_xpt_periph.h>
48#include <cam/scsi/scsi_targetio.h>
49
50/* Transaction information attached to each CCB sent by the user */
51struct targ_cmd_descr {
52 struct cam_periph_map_info mapinfo;
53 TAILQ_ENTRY(targ_cmd_descr) tqe;
54 union ccb *user_ccb;
55 int priority;
56 int func_code;
57};
58
59/* Offset into the private CCB area for storing our descriptor */
60#define targ_descr periph_priv.entries[1].ptr
61
62TAILQ_HEAD(descr_queue, targ_cmd_descr);
63
64typedef enum {
65 TARG_STATE_RESV = 0x00, /* Invalid state */
66 TARG_STATE_OPENED = 0x01, /* Device opened, softc initialized */
67 TARG_STATE_LUN_ENABLED = 0x02 /* Device enabled for a path */
68} targ_state;
69
70/* Per-instance device software context */
71struct targ_softc {
72 /* CCBs (CTIOs, ATIOs, INOTs) pending on the controller */
73 struct ccb_queue pending_ccb_queue;
74
75 /* Command descriptors awaiting CTIO resources from the XPT */
76 struct descr_queue work_queue;
77
78 /* Command descriptors that have been aborted back to the user. */
79 struct descr_queue abort_queue;
80
81 /*
82 * Queue of CCBs that have been copied out to userland, but our
83 * userland daemon has not yet seen.
84 */
85 struct ccb_queue user_ccb_queue;
86
87 struct cam_periph *periph;
88 struct cam_path *path;
89 targ_state state;
90 struct selinfo read_select;
91 struct devstat device_stats;
92 struct mtx mtx;
93};
94
95static d_open_t targopen;
96static d_close_t targclose;
97static d_read_t targread;
98static d_write_t targwrite;
99static d_ioctl_t targioctl;
100static d_poll_t targpoll;
101static d_kqfilter_t targkqfilter;
102static void targreadfiltdetach(struct knote *kn);
103static int targreadfilt(struct knote *kn, long hint);
104static struct filterops targread_filtops =
105 { 1, NULL, targreadfiltdetach, targreadfilt };
106
107static struct cdevsw targ_cdevsw = {
108 .d_version = D_VERSION,
109 .d_flags = D_NEEDGIANT,
110 .d_open = targopen,
111 .d_close = targclose,
112 .d_read = targread,
113 .d_write = targwrite,
114 .d_ioctl = targioctl,
115 .d_poll = targpoll,
116 .d_name = "targ",
117 .d_kqfilter = targkqfilter
118};
119
120static cam_status targendislun(struct cam_path *path, int enable,
121 int grp6_len, int grp7_len);
122static cam_status targenable(struct targ_softc *softc,
123 struct cam_path *path,
124 int grp6_len, int grp7_len);
125static cam_status targdisable(struct targ_softc *softc);
126static periph_ctor_t targctor;
127static periph_dtor_t targdtor;
128static periph_start_t targstart;
129static int targusermerge(struct targ_softc *softc,
130 struct targ_cmd_descr *descr,
131 union ccb *ccb);
132static int targsendccb(struct targ_softc *softc, union ccb *ccb,
133 struct targ_cmd_descr *descr);
134static void targdone(struct cam_periph *periph,
135 union ccb *done_ccb);
136static int targreturnccb(struct targ_softc *softc,
137 union ccb *ccb);
138static union ccb * targgetccb(struct targ_softc *softc, xpt_opcode type,
139 int priority);
140static void targfreeccb(struct targ_softc *softc, union ccb *ccb);
141static struct targ_cmd_descr *
142 targgetdescr(struct targ_softc *softc);
143static periph_init_t targinit;
144static void targclone(void *arg, char *name, int namelen,
145 struct cdev **dev);
146static void targasync(void *callback_arg, u_int32_t code,
147 struct cam_path *path, void *arg);
148static void abort_all_pending(struct targ_softc *softc);
149static void notify_user(struct targ_softc *softc);
150static int targcamstatus(cam_status status);
151static size_t targccblen(xpt_opcode func_code);
152
153static struct periph_driver targdriver =
154{
155 targinit, "targ",
156 TAILQ_HEAD_INITIALIZER(targdriver.units), /* generation */ 0
157};
158PERIPHDRIVER_DECLARE(targ, targdriver);
159
160static struct mtx targ_mtx;
161#define TARG_LOCK(softc) mtx_lock(&(softc)->mtx)
162#define TARG_UNLOCK(softc) mtx_unlock(&(softc)->mtx)
163
164static MALLOC_DEFINE(M_TARG, "TARG", "TARG data");
165
166/* Create softc and initialize it. Only one proc can open each targ device. */
167static int
168targopen(struct cdev *dev, int flags, int fmt, struct thread *td)
169{
170 struct targ_softc *softc;
171
172 mtx_lock(&targ_mtx);
173 if (dev->si_drv1 != 0) {
174 mtx_unlock(&targ_mtx);
175 return (EBUSY);
176 }
177
178 /* Mark device busy before any potentially blocking operations */
179 dev->si_drv1 = (void *)~0;
180 mtx_unlock(&targ_mtx);
181
182 /* Create the targ device, allocate its softc, initialize it */
183 if ((dev->si_flags & SI_NAMED) == 0) {
184 make_dev(&targ_cdevsw, minor(dev), UID_ROOT, GID_WHEEL, 0600,
185 "targ%d", dev2unit(dev));
186 }
187 MALLOC(softc, struct targ_softc *, sizeof(*softc), M_TARG,
188 M_WAITOK | M_ZERO);
189 dev->si_drv1 = softc;
190 softc->state = TARG_STATE_OPENED;
191 softc->periph = NULL;
192 softc->path = NULL;
193 mtx_init(&softc->mtx, devtoname(dev), "targ cdev", MTX_DEF);
194
195 TAILQ_INIT(&softc->pending_ccb_queue);
196 TAILQ_INIT(&softc->work_queue);
197 TAILQ_INIT(&softc->abort_queue);
198 TAILQ_INIT(&softc->user_ccb_queue);
199
200 return (0);
201}
202
203/* Disable LUN if enabled and teardown softc */
204static int
205targclose(struct cdev *dev, int flag, int fmt, struct thread *td)
206{
207 struct targ_softc *softc;
208 int error;
209
210 softc = (struct targ_softc *)dev->si_drv1;
211 TARG_LOCK(softc);
212 error = targdisable(softc);
213 if (error == CAM_REQ_CMP) {
214 dev->si_drv1 = 0;
215 mtx_lock(&targ_mtx);
216 if (softc->periph != NULL) {
217 cam_periph_invalidate(softc->periph);
218 softc->periph = NULL;
219 }
220 mtx_unlock(&targ_mtx);
221 TARG_UNLOCK(softc);
222 mtx_destroy(&softc->mtx);
223 destroy_dev(dev);
224 FREE(softc, M_TARG);
225 } else {
226 TARG_UNLOCK(softc);
227 }
228 return (error);
229}
230
231/* Enable/disable LUNs, set debugging level */
232static int
233targioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flag, struct thread *td)
234{
235 struct targ_softc *softc;
236 cam_status status;
237
238 softc = (struct targ_softc *)dev->si_drv1;
239
240 switch (cmd) {
241 case TARGIOCENABLE:
242 {
243 struct ioc_enable_lun *new_lun;
244 struct cam_path *path;
245
246 new_lun = (struct ioc_enable_lun *)addr;
247 status = xpt_create_path(&path, /*periph*/NULL,
248 new_lun->path_id,
249 new_lun->target_id,
250 new_lun->lun_id);
251 if (status != CAM_REQ_CMP) {
252 printf("Couldn't create path, status %#x\n", status);
253 break;
254 }
255 TARG_LOCK(softc);
256 status = targenable(softc, path, new_lun->grp6_len,
257 new_lun->grp7_len);
258 TARG_UNLOCK(softc);
259 xpt_free_path(path);
260 break;
261 }
262 case TARGIOCDISABLE:
263 TARG_LOCK(softc);
264 status = targdisable(softc);
265 TARG_UNLOCK(softc);
266 break;
267 case TARGIOCDEBUG:
268 {
269#ifdef CAMDEBUG
270 struct ccb_debug cdbg;
271
272 bzero(&cdbg, sizeof cdbg);
273 if (*((int *)addr) != 0)
274 cdbg.flags = CAM_DEBUG_PERIPH;
275 else
276 cdbg.flags = CAM_DEBUG_NONE;
277 xpt_setup_ccb(&cdbg.ccb_h, softc->path, /*priority*/0);
278 cdbg.ccb_h.func_code = XPT_DEBUG;
279 cdbg.ccb_h.cbfcnp = targdone;
280
281 /* If no periph available, disallow debugging changes */
282 TARG_LOCK(softc);
283 if ((softc->state & TARG_STATE_LUN_ENABLED) == 0) {
284 status = CAM_DEV_NOT_THERE;
285 TARG_UNLOCK(softc);
286 break;
287 }
288 xpt_action((union ccb *)&cdbg);
289 TARG_UNLOCK(softc);
290 status = cdbg.ccb_h.status & CAM_STATUS_MASK;
291#else
292 status = CAM_FUNC_NOTAVAIL;
293#endif
294 break;
295 }
296 default:
297 status = CAM_PROVIDE_FAIL;
298 break;
299 }
300
301 return (targcamstatus(status));
302}
303
304/* Writes are always ready, reads wait for user_ccb_queue or abort_queue */
305static int
306targpoll(struct cdev *dev, int poll_events, struct thread *td)
307{
308 struct targ_softc *softc;
309 int revents;
310
311 softc = (struct targ_softc *)dev->si_drv1;
312
313 /* Poll for write() is always ok. */
314 revents = poll_events & (POLLOUT | POLLWRNORM);
315 if ((poll_events & (POLLIN | POLLRDNORM)) != 0) {
316 /* Poll for read() depends on user and abort queues. */
317 TARG_LOCK(softc);
318 if (!TAILQ_EMPTY(&softc->user_ccb_queue) ||
319 !TAILQ_EMPTY(&softc->abort_queue)) {
320 revents |= poll_events & (POLLIN | POLLRDNORM);
321 }
322 /* Only sleep if the user didn't poll for write. */
323 if (revents == 0)
324 selrecord(td, &softc->read_select);
325 TARG_UNLOCK(softc);
326 }
327
328 return (revents);
329}
330
331static int
332targkqfilter(struct cdev *dev, struct knote *kn)
333{
334 struct targ_softc *softc;
335
336 softc = (struct targ_softc *)dev->si_drv1;
337 kn->kn_hook = (caddr_t)softc;
338 kn->kn_fop = &targread_filtops;
339 TARG_LOCK(softc);
340 SLIST_INSERT_HEAD(&softc->read_select.si_note, kn, kn_selnext);
341 TARG_UNLOCK(softc);
342 return (0);
343}
344
345static void
346targreadfiltdetach(struct knote *kn)
347{
348 struct targ_softc *softc;
349
350 softc = (struct targ_softc *)kn->kn_hook;
351 TARG_LOCK(softc);
352 SLIST_REMOVE(&softc->read_select.si_note, kn, knote, kn_selnext);
353 TARG_UNLOCK(softc);
354}
355
356/* Notify the user's kqueue when the user queue or abort queue gets a CCB */
357static int
358targreadfilt(struct knote *kn, long hint)
359{
360 struct targ_softc *softc;
361 int retval;
362
363 softc = (struct targ_softc *)kn->kn_hook;
364 TARG_LOCK(softc);
365 retval = !TAILQ_EMPTY(&softc->user_ccb_queue) ||
366 !TAILQ_EMPTY(&softc->abort_queue);
367 TARG_UNLOCK(softc);
368 return (retval);
369}
370
371/* Send the HBA the enable/disable message */
372static cam_status
373targendislun(struct cam_path *path, int enable, int grp6_len, int grp7_len)
374{
375 struct ccb_en_lun en_ccb;
376 cam_status status;
377
378 /* Tell the lun to begin answering selects */
379 xpt_setup_ccb(&en_ccb.ccb_h, path, /*priority*/1);
380 en_ccb.ccb_h.func_code = XPT_EN_LUN;
381 /* Don't need support for any vendor specific commands */
382 en_ccb.grp6_len = grp6_len;
383 en_ccb.grp7_len = grp7_len;
384 en_ccb.enable = enable ? 1 : 0;
385 xpt_action((union ccb *)&en_ccb);
386 status = en_ccb.ccb_h.status & CAM_STATUS_MASK;
387 if (status != CAM_REQ_CMP) {
388 xpt_print_path(path);
389 printf("%sable lun CCB rejected, status %#x\n",
390 enable ? "en" : "dis", status);
391 }
392 return (status);
393}
394
395/* Enable target mode on a LUN, given its path */
396static cam_status
397targenable(struct targ_softc *softc, struct cam_path *path, int grp6_len,
398 int grp7_len)
399{
400 struct cam_periph *periph;
401 struct ccb_pathinq cpi;
402 cam_status status;
403
404 if ((softc->state & TARG_STATE_LUN_ENABLED) != 0)
405 return (CAM_LUN_ALRDY_ENA);
406
407 /* Make sure SIM supports target mode */
408 xpt_setup_ccb(&cpi.ccb_h, path, /*priority*/1);
409 cpi.ccb_h.func_code = XPT_PATH_INQ;
410 xpt_action((union ccb *)&cpi);
411 status = cpi.ccb_h.status & CAM_STATUS_MASK;
412 if (status != CAM_REQ_CMP) {
413 printf("pathinq failed, status %#x\n", status);
414 goto enable_fail;
415 }
416 if ((cpi.target_sprt & PIT_PROCESSOR) == 0) {
417 printf("controller does not support target mode\n");
418 status = CAM_FUNC_NOTAVAIL;
419 goto enable_fail;
420 }
421
422 /* Destroy any periph on our path if it is disabled */
423 mtx_lock(&targ_mtx);
424 periph = cam_periph_find(path, "targ");
425 if (periph != NULL) {
426 struct targ_softc *del_softc;
427
428 del_softc = (struct targ_softc *)periph->softc;
429 if ((del_softc->state & TARG_STATE_LUN_ENABLED) == 0) {
430 cam_periph_invalidate(del_softc->periph);
431 del_softc->periph = NULL;
432 } else {
433 printf("Requested path still in use by targ%d\n",
434 periph->unit_number);
435 mtx_unlock(&targ_mtx);
436 status = CAM_LUN_ALRDY_ENA;
437 goto enable_fail;
438 }
439 }
440
441 /* Create a periph instance attached to this path */
442 status = cam_periph_alloc(targctor, NULL, targdtor, targstart,
443 "targ", CAM_PERIPH_BIO, path, targasync, 0, softc);
444 mtx_unlock(&targ_mtx);
445 if (status != CAM_REQ_CMP) {
446 printf("cam_periph_alloc failed, status %#x\n", status);
447 goto enable_fail;
448 }
449
450 /* Ensure that the periph now exists. */
451 if (cam_periph_find(path, "targ") == NULL) {
452 panic("targenable: succeeded but no periph?");
453 /* NOTREACHED */
454 }
455
456 /* Send the enable lun message */
457 status = targendislun(path, /*enable*/1, grp6_len, grp7_len);
458 if (status != CAM_REQ_CMP) {
459 printf("enable lun failed, status %#x\n", status);
460 goto enable_fail;
461 }
462 softc->state |= TARG_STATE_LUN_ENABLED;
463
464enable_fail:
465 return (status);
466}
467
468/* Disable this softc's target instance if enabled */
469static cam_status
470targdisable(struct targ_softc *softc)
471{
472 cam_status status;
473
474 if ((softc->state & TARG_STATE_LUN_ENABLED) == 0)
475 return (CAM_REQ_CMP);
476
477 CAM_DEBUG(softc->path, CAM_DEBUG_PERIPH, ("targdisable\n"));
478
479 /* Abort any ccbs pending on the controller */
480 abort_all_pending(softc);
481
482 /* Disable this lun */
483 status = targendislun(softc->path, /*enable*/0,
484 /*grp6_len*/0, /*grp7_len*/0);
485 if (status == CAM_REQ_CMP)
486 softc->state &= ~TARG_STATE_LUN_ENABLED;
487 else
488 printf("Disable lun failed, status %#x\n", status);
489
490 return (status);
491}
492
493/* Initialize a periph (called from cam_periph_alloc) */
494static cam_status
495targctor(struct cam_periph *periph, void *arg)
496{
497 struct targ_softc *softc;
498
499 /* Store pointer to softc for periph-driven routines */
500 softc = (struct targ_softc *)arg;
501 periph->softc = softc;
502 softc->periph = periph;
503 softc->path = periph->path;
504 return (CAM_REQ_CMP);
505}
506
507static void
508targdtor(struct cam_periph *periph)
509{
510 struct targ_softc *softc;
511 struct ccb_hdr *ccb_h;
512 struct targ_cmd_descr *descr;
513
514 softc = (struct targ_softc *)periph->softc;
515
516 /*
517 * targdisable() aborts CCBs back to the user and leaves them
518 * on user_ccb_queue and abort_queue in case the user is still
519 * interested in them. We free them now.
520 */
521 while ((ccb_h = TAILQ_FIRST(&softc->user_ccb_queue)) != NULL) {
522 TAILQ_REMOVE(&softc->user_ccb_queue, ccb_h, periph_links.tqe);
523 targfreeccb(softc, (union ccb *)ccb_h);
524 }
525 while ((descr = TAILQ_FIRST(&softc->abort_queue)) != NULL) {
526 TAILQ_REMOVE(&softc->abort_queue, descr, tqe);
527 FREE(descr, M_TARG);
528 }
529
530 softc->periph = NULL;
531 softc->path = NULL;
532 periph->softc = NULL;
533}
534
535/* Receive CCBs from user mode proc and send them to the HBA */
536static int
537targwrite(struct cdev *dev, struct uio *uio, int ioflag)
538{
539 union ccb *user_ccb;
540 struct targ_softc *softc;
541 struct targ_cmd_descr *descr;
542 int write_len, error;
543 int func_code, priority;
544
545 softc = (struct targ_softc *)dev->si_drv1;
546 write_len = error = 0;
547 CAM_DEBUG(softc->path, CAM_DEBUG_PERIPH,
548 ("write - uio_resid %d\n", uio->uio_resid));
549 while (uio->uio_resid >= sizeof(user_ccb) && error == 0) {
550 union ccb *ccb;
551 int error;
552
553 error = uiomove((caddr_t)&user_ccb, sizeof(user_ccb), uio);
554 if (error != 0) {
555 CAM_DEBUG(softc->path, CAM_DEBUG_PERIPH,
556 ("write - uiomove failed (%d)\n", error));
557 break;
558 }
559 priority = fuword32(&user_ccb->ccb_h.pinfo.priority);
560 if (priority == -1) {
561 error = EINVAL;
562 break;
563 }
564 func_code = fuword32(&user_ccb->ccb_h.func_code);
565 switch (func_code) {
566 case XPT_ACCEPT_TARGET_IO:
567 case XPT_IMMED_NOTIFY:
568 ccb = targgetccb(softc, func_code, priority);
569 descr = (struct targ_cmd_descr *)ccb->ccb_h.targ_descr;
570 descr->user_ccb = user_ccb;
571 descr->func_code = func_code;
572 CAM_DEBUG(softc->path, CAM_DEBUG_PERIPH,
573 ("Sent ATIO/INOT (%p)\n", user_ccb));
574 xpt_action(ccb);
575 TARG_LOCK(softc);
576 TAILQ_INSERT_TAIL(&softc->pending_ccb_queue,
577 &ccb->ccb_h,
578 periph_links.tqe);
579 TARG_UNLOCK(softc);
580 break;
581 default:
582 if ((func_code & XPT_FC_QUEUED) != 0) {
583 CAM_DEBUG(softc->path, CAM_DEBUG_PERIPH,
584 ("Sending queued ccb %#x (%p)\n",
585 func_code, user_ccb));
586 descr = targgetdescr(softc);
587 descr->user_ccb = user_ccb;
588 descr->priority = priority;
589 descr->func_code = func_code;
590 TARG_LOCK(softc);
591 TAILQ_INSERT_TAIL(&softc->work_queue,
592 descr, tqe);
593 TARG_UNLOCK(softc);
594 xpt_schedule(softc->periph, priority);
595 } else {
596 CAM_DEBUG(softc->path, CAM_DEBUG_PERIPH,
597 ("Sending inline ccb %#x (%p)\n",
598 func_code, user_ccb));
599 ccb = targgetccb(softc, func_code, priority);
600 descr = (struct targ_cmd_descr *)
601 ccb->ccb_h.targ_descr;
602 descr->user_ccb = user_ccb;
603 descr->priority = priority;
604 descr->func_code = func_code;
605 if (targusermerge(softc, descr, ccb) != EFAULT)
606 targsendccb(softc, ccb, descr);
607 targreturnccb(softc, ccb);
608 }
609 break;
610 }
611 write_len += sizeof(user_ccb);
612 }
613
614 /*
615 * If we've successfully taken in some amount of
616 * data, return success for that data first. If
617 * an error is persistent, it will be reported
618 * on the next write.
619 */
620 if (error != 0 && write_len == 0)
621 return (error);
622 if (write_len == 0 && uio->uio_resid != 0)
623 return (ENOSPC);
624 return (0);
625}
626
627/* Process requests (descrs) via the periph-supplied CCBs */
628static void
629targstart(struct cam_periph *periph, union ccb *start_ccb)
630{
631 struct targ_softc *softc;
632 struct targ_cmd_descr *descr, *next_descr;
633 int error;
634
635 softc = (struct targ_softc *)periph->softc;
636 CAM_DEBUG(softc->path, CAM_DEBUG_PERIPH, ("targstart %p\n", start_ccb));
637
638 TARG_LOCK(softc);
639 descr = TAILQ_FIRST(&softc->work_queue);
640 if (descr == NULL) {
641 TARG_UNLOCK(softc);
642 xpt_release_ccb(start_ccb);
643 } else {
644 TAILQ_REMOVE(&softc->work_queue, descr, tqe);
645 next_descr = TAILQ_FIRST(&softc->work_queue);
646 TARG_UNLOCK(softc);
647
648 /* Initiate a transaction using the descr and supplied CCB */
649 error = targusermerge(softc, descr, start_ccb);
650 if (error == 0)
651 error = targsendccb(softc, start_ccb, descr);
652 if (error != 0) {
653 xpt_print_path(periph->path);
654 printf("targsendccb failed, err %d\n", error);
655 xpt_release_ccb(start_ccb);
656 suword(&descr->user_ccb->ccb_h.status,
657 CAM_REQ_CMP_ERR);
658 TARG_LOCK(softc);
659 TAILQ_INSERT_TAIL(&softc->abort_queue, descr, tqe);
660 TARG_UNLOCK(softc);
661 notify_user(softc);
662 }
663
664 /* If we have more work to do, stay scheduled */
665 if (next_descr != NULL)
666 xpt_schedule(periph, next_descr->priority);
667 }
668}
669
670static int
671targusermerge(struct targ_softc *softc, struct targ_cmd_descr *descr,
672 union ccb *ccb)
673{
674 struct ccb_hdr *u_ccbh, *k_ccbh;
675 size_t ccb_len;
676 int error;
677
678 u_ccbh = &descr->user_ccb->ccb_h;
679 k_ccbh = &ccb->ccb_h;
680
681 /*
682 * There are some fields in the CCB header that need to be
683 * preserved, the rest we get from the user ccb. (See xpt_merge_ccb)
684 */
685 xpt_setup_ccb(k_ccbh, softc->path, descr->priority);
686 k_ccbh->retry_count = fuword32(&u_ccbh->retry_count);
687 k_ccbh->func_code = descr->func_code;
688 k_ccbh->flags = fuword32(&u_ccbh->flags);
689 k_ccbh->timeout = fuword32(&u_ccbh->timeout);
690 ccb_len = targccblen(k_ccbh->func_code) - sizeof(struct ccb_hdr);
691 error = copyin(u_ccbh + 1, k_ccbh + 1, ccb_len);
692 if (error != 0) {
693 k_ccbh->status = CAM_REQ_CMP_ERR;
694 return (error);
695 }
696
697 /* Translate usermode abort_ccb pointer to its kernel counterpart */
698 if (k_ccbh->func_code == XPT_ABORT) {
699 struct ccb_abort *cab;
700 struct ccb_hdr *ccb_h;
701
702 cab = (struct ccb_abort *)ccb;
703 TARG_LOCK(softc);
704 TAILQ_FOREACH(ccb_h, &softc->pending_ccb_queue,
705 periph_links.tqe) {
706 struct targ_cmd_descr *ab_descr;
707
708 ab_descr = (struct targ_cmd_descr *)ccb_h->targ_descr;
709 if (ab_descr->user_ccb == cab->abort_ccb) {
710 CAM_DEBUG(softc->path, CAM_DEBUG_PERIPH,
711 ("Changing abort for %p to %p\n",
712 cab->abort_ccb, ccb_h));
713 cab->abort_ccb = (union ccb *)ccb_h;
714 break;
715 }
716 }
717 TARG_UNLOCK(softc);
718 /* CCB not found, set appropriate status */
719 if (ccb_h == NULL) {
720 k_ccbh->status = CAM_PATH_INVALID;
721 error = ESRCH;
722 }
723 }
724
725 return (error);
726}
727
728/* Build and send a kernel CCB formed from descr->user_ccb */
729static int
730targsendccb(struct targ_softc *softc, union ccb *ccb,
731 struct targ_cmd_descr *descr)
732{
733 struct cam_periph_map_info *mapinfo;
734 struct ccb_hdr *ccb_h;
735 int error;
736
737 ccb_h = &ccb->ccb_h;
738 mapinfo = &descr->mapinfo;
739 mapinfo->num_bufs_used = 0;
740
741 /*
742 * There's no way for the user to have a completion
743 * function, so we put our own completion function in here.
744 * We also stash in a reference to our descriptor so targreturnccb()
745 * can find our mapping info.
746 */
747 ccb_h->cbfcnp = targdone;
748 ccb_h->targ_descr = descr;
749
750 /*
751 * We only attempt to map the user memory into kernel space
752 * if they haven't passed in a physical memory pointer,
753 * and if there is actually an I/O operation to perform.
754 * Right now cam_periph_mapmem() only supports SCSI and device
755 * match CCBs. For the SCSI CCBs, we only pass the CCB in if
756 * there's actually data to map. cam_periph_mapmem() will do the
757 * right thing, even if there isn't data to map, but since CCBs
758 * without data are a reasonably common occurance (e.g. test unit
759 * ready), it will save a few cycles if we check for it here.
760 */
761 if (((ccb_h->flags & CAM_DATA_PHYS) == 0)
762 && (((ccb_h->func_code == XPT_CONT_TARGET_IO)
763 && ((ccb_h->flags & CAM_DIR_MASK) != CAM_DIR_NONE))
764 || (ccb_h->func_code == XPT_DEV_MATCH))) {
765
766 error = cam_periph_mapmem(ccb, mapinfo);
767
768 /*
769 * cam_periph_mapmem returned an error, we can't continue.
770 * Return the error to the user.
771 */
772 if (error) {
773 ccb_h->status = CAM_REQ_CMP_ERR;
774 mapinfo->num_bufs_used = 0;
775 return (error);
776 }
777 }
778
779 /*
780 * Once queued on the pending CCB list, this CCB will be protected
781 * by our error recovery handler.
782 */
783 CAM_DEBUG(softc->path, CAM_DEBUG_PERIPH, ("sendccb %p\n", ccb));
784 if (XPT_FC_IS_QUEUED(ccb)) {
785 TARG_LOCK(softc);
786 TAILQ_INSERT_TAIL(&softc->pending_ccb_queue, ccb_h,
787 periph_links.tqe);
788 TARG_UNLOCK(softc);
789 }
790 xpt_action(ccb);
791
792 return (0);
793}
794
795/* Completion routine for CCBs (called at splsoftcam) */
796static void
797targdone(struct cam_periph *periph, union ccb *done_ccb)
798{
799 struct targ_softc *softc;
800 cam_status status;
801
802 CAM_DEBUG(periph->path, CAM_DEBUG_PERIPH, ("targdone %p\n", done_ccb));
803 softc = (struct targ_softc *)periph->softc;
804 TARG_LOCK(softc);
805 TAILQ_REMOVE(&softc->pending_ccb_queue, &done_ccb->ccb_h,
806 periph_links.tqe);
807 status = done_ccb->ccb_h.status & CAM_STATUS_MASK;
808
809 /* If we're no longer enabled, throw away CCB */
810 if ((softc->state & TARG_STATE_LUN_ENABLED) == 0) {
811 targfreeccb(softc, done_ccb);
812 TARG_UNLOCK(softc);
813 return;
814 }
815 /* abort_all_pending() waits for pending queue to be empty */
816 if (TAILQ_EMPTY(&softc->pending_ccb_queue))
817 wakeup(&softc->pending_ccb_queue);
818
819 switch (done_ccb->ccb_h.func_code) {
820 /* All FC_*_QUEUED CCBs go back to userland */
821 case XPT_IMMED_NOTIFY:
822 case XPT_ACCEPT_TARGET_IO:
823 case XPT_CONT_TARGET_IO:
824 TAILQ_INSERT_TAIL(&softc->user_ccb_queue, &done_ccb->ccb_h,
825 periph_links.tqe);
826 TARG_UNLOCK(softc);
827 notify_user(softc);
828 break;
829 default:
830 panic("targdone: impossible xpt opcode %#x",
831 done_ccb->ccb_h.func_code);
832 /* NOTREACHED */
833 }
834}
835
836/* Return CCBs to the user from the user queue and abort queue */
837static int
838targread(struct cdev *dev, struct uio *uio, int ioflag)
839{
840 struct descr_queue *abort_queue;
841 struct targ_cmd_descr *user_descr;
842 struct targ_softc *softc;
843 struct ccb_queue *user_queue;
844 struct ccb_hdr *ccb_h;
845 union ccb *user_ccb;
846 int read_len, error;
847
848 error = 0;
849 read_len = 0;
850 softc = (struct targ_softc *)dev->si_drv1;
851 user_queue = &softc->user_ccb_queue;
852 abort_queue = &softc->abort_queue;
853 CAM_DEBUG(softc->path, CAM_DEBUG_PERIPH, ("targread\n"));
854
855 /* If no data is available, wait or return immediately */
856 TARG_LOCK(softc);
857 ccb_h = TAILQ_FIRST(user_queue);
858 user_descr = TAILQ_FIRST(abort_queue);
859 while (ccb_h == NULL && user_descr == NULL) {
860 if ((ioflag & IO_NDELAY) == 0) {
861 error = msleep(user_queue, &softc->mtx,
862 PRIBIO | PCATCH, "targrd", 0);
863 ccb_h = TAILQ_FIRST(user_queue);
864 user_descr = TAILQ_FIRST(abort_queue);
865 if (error != 0) {
866 if (error == ERESTART) {
867 continue;
868 } else {
869 TARG_UNLOCK(softc);
870 goto read_fail;
871 }
872 }
873 } else {
874 TARG_UNLOCK(softc);
875 return (EAGAIN);
876 }
877 }
878
879 /* Data is available so fill the user's buffer */
880 while (ccb_h != NULL) {
881 struct targ_cmd_descr *descr;
882
883 if (uio->uio_resid < sizeof(user_ccb))
884 break;
885 TAILQ_REMOVE(user_queue, ccb_h, periph_links.tqe);
886 TARG_UNLOCK(softc);
887 descr = (struct targ_cmd_descr *)ccb_h->targ_descr;
888 user_ccb = descr->user_ccb;
889 CAM_DEBUG(softc->path, CAM_DEBUG_PERIPH,
890 ("targread ccb %p (%p)\n", ccb_h, user_ccb));
891 error = targreturnccb(softc, (union ccb *)ccb_h);
892 if (error != 0)
893 goto read_fail;
894 error = uiomove((caddr_t)&user_ccb, sizeof(user_ccb), uio);
895 if (error != 0)
896 goto read_fail;
897 read_len += sizeof(user_ccb);
898
899 TARG_LOCK(softc);
900 ccb_h = TAILQ_FIRST(user_queue);
901 }
902
903 /* Flush out any aborted descriptors */
904 while (user_descr != NULL) {
905 if (uio->uio_resid < sizeof(user_ccb))
906 break;
907 TAILQ_REMOVE(abort_queue, user_descr, tqe);
908 TARG_UNLOCK(softc);
909 user_ccb = user_descr->user_ccb;
910 CAM_DEBUG(softc->path, CAM_DEBUG_PERIPH,
911 ("targread aborted descr %p (%p)\n",
912 user_descr, user_ccb));
913 suword(&user_ccb->ccb_h.status, CAM_REQ_ABORTED);
914 error = uiomove((caddr_t)&user_ccb, sizeof(user_ccb), uio);
915 if (error != 0)
916 goto read_fail;
917 read_len += sizeof(user_ccb);
918
919 TARG_LOCK(softc);
920 user_descr = TAILQ_FIRST(abort_queue);
921 }
922 TARG_UNLOCK(softc);
923
924 /*
925 * If we've successfully read some amount of data, don't report an
926 * error. If the error is persistent, it will be reported on the
927 * next read().
928 */
929 if (read_len == 0 && uio->uio_resid != 0)
930 error = ENOSPC;
931
932read_fail:
933 return (error);
934}
935
936/* Copy completed ccb back to the user */
937static int
938targreturnccb(struct targ_softc *softc, union ccb *ccb)
939{
940 struct targ_cmd_descr *descr;
941 struct ccb_hdr *u_ccbh;
942 size_t ccb_len;
943 int error;
944
945 CAM_DEBUG(softc->path, CAM_DEBUG_PERIPH, ("targreturnccb %p\n", ccb));
946 descr = (struct targ_cmd_descr *)ccb->ccb_h.targ_descr;
947 u_ccbh = &descr->user_ccb->ccb_h;
948
949 /* Copy out the central portion of the ccb_hdr */
950 copyout(&ccb->ccb_h.retry_count, &u_ccbh->retry_count,
951 offsetof(struct ccb_hdr, periph_priv) -
952 offsetof(struct ccb_hdr, retry_count));
953
954 /* Copy out the rest of the ccb (after the ccb_hdr) */
955 ccb_len = targccblen(ccb->ccb_h.func_code) - sizeof(struct ccb_hdr);
956 if (descr->mapinfo.num_bufs_used != 0)
957 cam_periph_unmapmem(ccb, &descr->mapinfo);
958 error = copyout(&ccb->ccb_h + 1, u_ccbh + 1, ccb_len);
959 if (error != 0) {
960 xpt_print_path(softc->path);
961 printf("targreturnccb - CCB copyout failed (%d)\n",
962 error);
963 }
964 /* Free CCB or send back to devq. */
965 targfreeccb(softc, ccb);
966
967 return (error);
968}
969
970static union ccb *
971targgetccb(struct targ_softc *softc, xpt_opcode type, int priority)
972{
973 union ccb *ccb;
974 int ccb_len;
975
976 ccb_len = targccblen(type);
977 MALLOC(ccb, union ccb *, ccb_len, M_TARG, M_WAITOK);
978 CAM_DEBUG(softc->path, CAM_DEBUG_PERIPH, ("getccb %p\n", ccb));
979
980 xpt_setup_ccb(&ccb->ccb_h, softc->path, priority);
981 ccb->ccb_h.func_code = type;
982 ccb->ccb_h.cbfcnp = targdone;
983 ccb->ccb_h.targ_descr = targgetdescr(softc);
984 return (ccb);
985}
986
987static void
988targfreeccb(struct targ_softc *softc, union ccb *ccb)
989{
990 CAM_DEBUG_PRINT(CAM_DEBUG_PERIPH, ("targfreeccb descr %p and\n",
991 ccb->ccb_h.targ_descr));
992 FREE(ccb->ccb_h.targ_descr, M_TARG);
993
994 switch (ccb->ccb_h.func_code) {
995 case XPT_ACCEPT_TARGET_IO:
996 case XPT_IMMED_NOTIFY:
997 CAM_DEBUG_PRINT(CAM_DEBUG_PERIPH, ("freeing ccb %p\n", ccb));
998 FREE(ccb, M_TARG);
999 break;
1000 default:
1001 /* Send back CCB if we got it from the periph */
1002 if (XPT_FC_IS_QUEUED(ccb)) {
1003 CAM_DEBUG_PRINT(CAM_DEBUG_PERIPH,
1004 ("returning queued ccb %p\n", ccb));
1005 xpt_release_ccb(ccb);
1006 } else {
1007 CAM_DEBUG_PRINT(CAM_DEBUG_PERIPH,
1008 ("freeing ccb %p\n", ccb));
1009 FREE(ccb, M_TARG);
1010 }
1011 break;
1012 }
1013}
1014
1015static struct targ_cmd_descr *
1016targgetdescr(struct targ_softc *softc)
1017{
1018 struct targ_cmd_descr *descr;
1019
1020 MALLOC(descr, struct targ_cmd_descr *, sizeof(*descr), M_TARG,
1021 M_WAITOK);
1022 descr->mapinfo.num_bufs_used = 0;
1023 return (descr);
1024}
1025
1026static void
1027targinit(void)
1028{
1029 mtx_init(&targ_mtx, "targ global", NULL, MTX_DEF);
1030 EVENTHANDLER_REGISTER(dev_clone, targclone, 0, 1000);
1031}
1032
1033static void
1034targclone(void *arg, char *name, int namelen, struct cdev **dev)
1035{
1036 int u;
1037
1038 if (*dev != NODEV)
1038 if (*dev != NULL)
1039 return;
1040 if (dev_stdclone(name, NULL, "targ", &u) != 1)
1041 return;
1042 *dev = make_dev(&targ_cdevsw, unit2minor(u), UID_ROOT, GID_WHEEL,
1043 0600, "targ%d", u);
1044 (*dev)->si_flags |= SI_CHEAPCLONE;
1045}
1046
1047static void
1048targasync(void *callback_arg, u_int32_t code, struct cam_path *path, void *arg)
1049{
1050 /* All events are handled in usermode by INOTs */
1051 panic("targasync() called, should be an INOT instead");
1052}
1053
1054/* Cancel all pending requests and CCBs awaiting work. */
1055static void
1056abort_all_pending(struct targ_softc *softc)
1057{
1058 struct targ_cmd_descr *descr;
1059 struct ccb_abort cab;
1060 struct ccb_hdr *ccb_h;
1061
1062 CAM_DEBUG(softc->path, CAM_DEBUG_PERIPH, ("abort_all_pending\n"));
1063
1064 /* First abort the descriptors awaiting resources */
1065 while ((descr = TAILQ_FIRST(&softc->work_queue)) != NULL) {
1066 CAM_DEBUG(softc->path, CAM_DEBUG_PERIPH,
1067 ("Aborting descr from workq %p\n", descr));
1068 TAILQ_REMOVE(&softc->work_queue, descr, tqe);
1069 TAILQ_INSERT_TAIL(&softc->abort_queue, descr, tqe);
1070 }
1071
1072 /*
1073 * Then abort all pending CCBs.
1074 * targdone() will return the aborted CCB via user_ccb_queue
1075 */
1076 xpt_setup_ccb(&cab.ccb_h, softc->path, /*priority*/0);
1077 cab.ccb_h.func_code = XPT_ABORT;
1078 cab.ccb_h.status = CAM_REQ_CMP_ERR;
1079 TAILQ_FOREACH(ccb_h, &softc->pending_ccb_queue, periph_links.tqe) {
1080 CAM_DEBUG(softc->path, CAM_DEBUG_PERIPH,
1081 ("Aborting pending CCB %p\n", ccb_h));
1082 cab.abort_ccb = (union ccb *)ccb_h;
1083 xpt_action((union ccb *)&cab);
1084 if (cab.ccb_h.status != CAM_REQ_CMP) {
1085 xpt_print_path(cab.ccb_h.path);
1086 printf("Unable to abort CCB, status %#x\n",
1087 cab.ccb_h.status);
1088 }
1089 }
1090
1091 /* If we aborted at least one pending CCB ok, wait for it. */
1092 if (cab.ccb_h.status == CAM_REQ_CMP) {
1093 msleep(&softc->pending_ccb_queue, &softc->mtx,
1094 PRIBIO | PCATCH, "tgabrt", 0);
1095 }
1096
1097 /* If we aborted anything from the work queue, wakeup user. */
1098 if (!TAILQ_EMPTY(&softc->user_ccb_queue)
1099 || !TAILQ_EMPTY(&softc->abort_queue)) {
1100 /*
1101 * XXX KNOTE calls back into targreadfilt, causing a
1102 * lock recursion. So unlock around calls to it although
1103 * this may open up a race allowing a user to submit
1104 * another CCB after we have aborted all pending ones
1105 * A better approach is to mark the softc as dying
1106 * under lock and check for this in targstart().
1107 */
1108 TARG_UNLOCK(softc);
1109 notify_user(softc);
1110 TARG_LOCK(softc);
1111 }
1112}
1113
1114/* Notify the user that data is ready */
1115static void
1116notify_user(struct targ_softc *softc)
1117{
1118 /*
1119 * Notify users sleeping via poll(), kqueue(), and
1120 * blocking read().
1121 */
1122 selwakeuppri(&softc->read_select, PRIBIO);
1123 KNOTE(&softc->read_select.si_note, 0);
1124 wakeup(&softc->user_ccb_queue);
1125}
1126
1127/* Convert CAM status to errno values */
1128static int
1129targcamstatus(cam_status status)
1130{
1131 switch (status & CAM_STATUS_MASK) {
1132 case CAM_REQ_CMP: /* CCB request completed without error */
1133 return (0);
1134 case CAM_REQ_INPROG: /* CCB request is in progress */
1135 return (EINPROGRESS);
1136 case CAM_REQ_CMP_ERR: /* CCB request completed with an error */
1137 return (EIO);
1138 case CAM_PROVIDE_FAIL: /* Unable to provide requested capability */
1139 return (ENOTTY);
1140 case CAM_FUNC_NOTAVAIL: /* The requested function is not available */
1141 return (ENOTSUP);
1142 case CAM_LUN_ALRDY_ENA: /* LUN is already enabled for target mode */
1143 return (EADDRINUSE);
1144 case CAM_PATH_INVALID: /* Supplied Path ID is invalid */
1145 case CAM_DEV_NOT_THERE: /* SCSI Device Not Installed/there */
1146 return (ENOENT);
1147 case CAM_REQ_ABORTED: /* CCB request aborted by the host */
1148 return (ECANCELED);
1149 case CAM_CMD_TIMEOUT: /* Command timeout */
1150 return (ETIMEDOUT);
1151 case CAM_REQUEUE_REQ: /* Requeue to preserve transaction ordering */
1152 return (EAGAIN);
1153 case CAM_REQ_INVALID: /* CCB request was invalid */
1154 return (EINVAL);
1155 case CAM_RESRC_UNAVAIL: /* Resource Unavailable */
1156 return (ENOMEM);
1157 case CAM_BUSY: /* CAM subsytem is busy */
1158 case CAM_UA_ABORT: /* Unable to abort CCB request */
1159 return (EBUSY);
1160 default:
1161 return (ENXIO);
1162 }
1163}
1164
1165static size_t
1166targccblen(xpt_opcode func_code)
1167{
1168 int len;
1169
1170 /* Codes we expect to see as a target */
1171 switch (func_code) {
1172 case XPT_CONT_TARGET_IO:
1173 case XPT_SCSI_IO:
1174 len = sizeof(struct ccb_scsiio);
1175 break;
1176 case XPT_ACCEPT_TARGET_IO:
1177 len = sizeof(struct ccb_accept_tio);
1178 break;
1179 case XPT_IMMED_NOTIFY:
1180 len = sizeof(struct ccb_immed_notify);
1181 break;
1182 case XPT_REL_SIMQ:
1183 len = sizeof(struct ccb_relsim);
1184 break;
1185 case XPT_PATH_INQ:
1186 len = sizeof(struct ccb_pathinq);
1187 break;
1188 case XPT_DEBUG:
1189 len = sizeof(struct ccb_debug);
1190 break;
1191 case XPT_ABORT:
1192 len = sizeof(struct ccb_abort);
1193 break;
1194 case XPT_EN_LUN:
1195 len = sizeof(struct ccb_en_lun);
1196 break;
1197 default:
1198 len = sizeof(union ccb);
1199 break;
1200 }
1201
1202 return (len);
1203}
1039 return;
1040 if (dev_stdclone(name, NULL, "targ", &u) != 1)
1041 return;
1042 *dev = make_dev(&targ_cdevsw, unit2minor(u), UID_ROOT, GID_WHEEL,
1043 0600, "targ%d", u);
1044 (*dev)->si_flags |= SI_CHEAPCLONE;
1045}
1046
1047static void
1048targasync(void *callback_arg, u_int32_t code, struct cam_path *path, void *arg)
1049{
1050 /* All events are handled in usermode by INOTs */
1051 panic("targasync() called, should be an INOT instead");
1052}
1053
1054/* Cancel all pending requests and CCBs awaiting work. */
1055static void
1056abort_all_pending(struct targ_softc *softc)
1057{
1058 struct targ_cmd_descr *descr;
1059 struct ccb_abort cab;
1060 struct ccb_hdr *ccb_h;
1061
1062 CAM_DEBUG(softc->path, CAM_DEBUG_PERIPH, ("abort_all_pending\n"));
1063
1064 /* First abort the descriptors awaiting resources */
1065 while ((descr = TAILQ_FIRST(&softc->work_queue)) != NULL) {
1066 CAM_DEBUG(softc->path, CAM_DEBUG_PERIPH,
1067 ("Aborting descr from workq %p\n", descr));
1068 TAILQ_REMOVE(&softc->work_queue, descr, tqe);
1069 TAILQ_INSERT_TAIL(&softc->abort_queue, descr, tqe);
1070 }
1071
1072 /*
1073 * Then abort all pending CCBs.
1074 * targdone() will return the aborted CCB via user_ccb_queue
1075 */
1076 xpt_setup_ccb(&cab.ccb_h, softc->path, /*priority*/0);
1077 cab.ccb_h.func_code = XPT_ABORT;
1078 cab.ccb_h.status = CAM_REQ_CMP_ERR;
1079 TAILQ_FOREACH(ccb_h, &softc->pending_ccb_queue, periph_links.tqe) {
1080 CAM_DEBUG(softc->path, CAM_DEBUG_PERIPH,
1081 ("Aborting pending CCB %p\n", ccb_h));
1082 cab.abort_ccb = (union ccb *)ccb_h;
1083 xpt_action((union ccb *)&cab);
1084 if (cab.ccb_h.status != CAM_REQ_CMP) {
1085 xpt_print_path(cab.ccb_h.path);
1086 printf("Unable to abort CCB, status %#x\n",
1087 cab.ccb_h.status);
1088 }
1089 }
1090
1091 /* If we aborted at least one pending CCB ok, wait for it. */
1092 if (cab.ccb_h.status == CAM_REQ_CMP) {
1093 msleep(&softc->pending_ccb_queue, &softc->mtx,
1094 PRIBIO | PCATCH, "tgabrt", 0);
1095 }
1096
1097 /* If we aborted anything from the work queue, wakeup user. */
1098 if (!TAILQ_EMPTY(&softc->user_ccb_queue)
1099 || !TAILQ_EMPTY(&softc->abort_queue)) {
1100 /*
1101 * XXX KNOTE calls back into targreadfilt, causing a
1102 * lock recursion. So unlock around calls to it although
1103 * this may open up a race allowing a user to submit
1104 * another CCB after we have aborted all pending ones
1105 * A better approach is to mark the softc as dying
1106 * under lock and check for this in targstart().
1107 */
1108 TARG_UNLOCK(softc);
1109 notify_user(softc);
1110 TARG_LOCK(softc);
1111 }
1112}
1113
1114/* Notify the user that data is ready */
1115static void
1116notify_user(struct targ_softc *softc)
1117{
1118 /*
1119 * Notify users sleeping via poll(), kqueue(), and
1120 * blocking read().
1121 */
1122 selwakeuppri(&softc->read_select, PRIBIO);
1123 KNOTE(&softc->read_select.si_note, 0);
1124 wakeup(&softc->user_ccb_queue);
1125}
1126
1127/* Convert CAM status to errno values */
1128static int
1129targcamstatus(cam_status status)
1130{
1131 switch (status & CAM_STATUS_MASK) {
1132 case CAM_REQ_CMP: /* CCB request completed without error */
1133 return (0);
1134 case CAM_REQ_INPROG: /* CCB request is in progress */
1135 return (EINPROGRESS);
1136 case CAM_REQ_CMP_ERR: /* CCB request completed with an error */
1137 return (EIO);
1138 case CAM_PROVIDE_FAIL: /* Unable to provide requested capability */
1139 return (ENOTTY);
1140 case CAM_FUNC_NOTAVAIL: /* The requested function is not available */
1141 return (ENOTSUP);
1142 case CAM_LUN_ALRDY_ENA: /* LUN is already enabled for target mode */
1143 return (EADDRINUSE);
1144 case CAM_PATH_INVALID: /* Supplied Path ID is invalid */
1145 case CAM_DEV_NOT_THERE: /* SCSI Device Not Installed/there */
1146 return (ENOENT);
1147 case CAM_REQ_ABORTED: /* CCB request aborted by the host */
1148 return (ECANCELED);
1149 case CAM_CMD_TIMEOUT: /* Command timeout */
1150 return (ETIMEDOUT);
1151 case CAM_REQUEUE_REQ: /* Requeue to preserve transaction ordering */
1152 return (EAGAIN);
1153 case CAM_REQ_INVALID: /* CCB request was invalid */
1154 return (EINVAL);
1155 case CAM_RESRC_UNAVAIL: /* Resource Unavailable */
1156 return (ENOMEM);
1157 case CAM_BUSY: /* CAM subsytem is busy */
1158 case CAM_UA_ABORT: /* Unable to abort CCB request */
1159 return (EBUSY);
1160 default:
1161 return (ENXIO);
1162 }
1163}
1164
1165static size_t
1166targccblen(xpt_opcode func_code)
1167{
1168 int len;
1169
1170 /* Codes we expect to see as a target */
1171 switch (func_code) {
1172 case XPT_CONT_TARGET_IO:
1173 case XPT_SCSI_IO:
1174 len = sizeof(struct ccb_scsiio);
1175 break;
1176 case XPT_ACCEPT_TARGET_IO:
1177 len = sizeof(struct ccb_accept_tio);
1178 break;
1179 case XPT_IMMED_NOTIFY:
1180 len = sizeof(struct ccb_immed_notify);
1181 break;
1182 case XPT_REL_SIMQ:
1183 len = sizeof(struct ccb_relsim);
1184 break;
1185 case XPT_PATH_INQ:
1186 len = sizeof(struct ccb_pathinq);
1187 break;
1188 case XPT_DEBUG:
1189 len = sizeof(struct ccb_debug);
1190 break;
1191 case XPT_ABORT:
1192 len = sizeof(struct ccb_abort);
1193 break;
1194 case XPT_EN_LUN:
1195 len = sizeof(struct ccb_en_lun);
1196 break;
1197 default:
1198 len = sizeof(union ccb);
1199 break;
1200 }
1201
1202 return (len);
1203}