Deleted Added
sdiff udiff text old ( 47413 ) new ( 47625 )
full compact
1/*
2 * Implementation of a simple Target Mode SCSI Proccessor Target driver for CAM.
3 *
4 * Copyright (c) 1998, 1999 Justin T. Gibbs.
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions, and the following disclaimer,
12 * without modification, immediately at the beginning of the file.
13 * 2. The name of the author may not be used to endorse or promote products
14 * derived from this software without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
20 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
27 *
28 * $Id: scsi_target.c,v 1.12 1999/05/22 22:00:24 gibbs Exp $
29 */
30#include <stddef.h> /* For offsetof */
31
32#include <sys/param.h>
33#include <sys/queue.h>
34#include <sys/systm.h>
35#include <sys/kernel.h>
36#include <sys/types.h>
37#include <sys/buf.h>
38#include <sys/conf.h>
39#include <sys/devicestat.h>
40#include <sys/malloc.h>
41#include <sys/poll.h>
42#include <sys/select.h> /* For struct selinfo. */
43#include <sys/uio.h>
44
45#include <cam/cam.h>
46#include <cam/cam_ccb.h>
47#include <cam/cam_extend.h>
48#include <cam/cam_periph.h>
49#include <cam/cam_queue.h>
50#include <cam/cam_xpt_periph.h>
51#include <cam/cam_debug.h>
52
53#include <cam/scsi/scsi_all.h>
54#include <cam/scsi/scsi_pt.h>
55#include <cam/scsi/scsi_targetio.h>
56#include <cam/scsi/scsi_message.h>
57
58typedef enum {
59 TARG_STATE_NORMAL,
60 TARG_STATE_EXCEPTION,
61 TARG_STATE_TEARDOWN
62} targ_state;
63
64typedef enum {
65 TARG_FLAG_NONE = 0x00,
66 TARG_FLAG_SEND_EOF = 0x01,
67 TARG_FLAG_RECEIVE_EOF = 0x02,
68 TARG_FLAG_LUN_ENABLED = 0x04
69} targ_flags;
70
71typedef enum {
72 TARG_CCB_WORKQ,
73 TARG_CCB_WAITING
74} targ_ccb_types;
75
76#define MAX_ACCEPT 16
77#define MAX_IMMEDIATE 16
78#define MAX_BUF_SIZE 256 /* Max inquiry/sense/mode page transfer */
79#define MAX_INITIATORS 16 /* XXX More for Fibre-Channel */
80
81#define MIN(a, b) ((a > b) ? b : a)
82
83#define TARG_CONTROL_UNIT 0xffff00ff
84#define TARG_IS_CONTROL_DEV(unit) ((unit) == TARG_CONTROL_UNIT)
85
86/* Offsets into our private CCB area for storing accept information */
87#define ccb_type ppriv_field0
88#define ccb_descr ppriv_ptr1
89
90/* We stick a pointer to the originating accept TIO in each continue I/O CCB */
91#define ccb_atio ppriv_ptr1
92
93TAILQ_HEAD(ccb_queue, ccb_hdr);
94
95struct targ_softc {
96 struct ccb_queue pending_queue;
97 struct ccb_queue work_queue;
98 struct ccb_queue snd_ccb_queue;
99 struct ccb_queue rcv_ccb_queue;
100 struct ccb_queue unknown_atio_queue;
101 struct buf_queue_head snd_buf_queue;
102 struct buf_queue_head rcv_buf_queue;
103 struct devstat device_stats;
104 struct selinfo snd_select;
105 struct selinfo rcv_select;
106 targ_state state;
107 targ_flags flags;
108 targ_exception exceptions;
109 u_int init_level;
110 u_int inq_data_len;
111 struct scsi_inquiry_data *inq_data;
112 struct ccb_accept_tio *accept_tio_list;
113 struct ccb_hdr_slist immed_notify_slist;
114 struct initiator_state istate[MAX_INITIATORS];
115};
116
117struct targ_cmd_desc {
118 struct ccb_accept_tio* atio_link;
119 u_int data_resid; /* How much left to transfer */
120 u_int data_increment;/* Amount to send before next disconnect */
121 void* data; /* The data. Can be from backing_store or not */
122 void* backing_store;/* Backing store allocated for this descriptor*/
123 struct buf *bp; /* Buffer for this transfer */
124 u_int max_size; /* Size of backing_store */
125 u_int32_t timeout;
126 u_int8_t status; /* Status to return to initiator */
127};
128
129static d_open_t targopen;
130static d_close_t targclose;
131static d_read_t targread;
132static d_write_t targwrite;
133static d_ioctl_t targioctl;
134static d_poll_t targpoll;
135static d_strategy_t targstrategy;
136
137#define TARG_CDEV_MAJOR 65
138static struct cdevsw targ_cdevsw = {
139 /* open */ targopen,
140 /* close */ targclose,
141 /* read */ targread,
142 /* write */ targwrite,
143 /* ioctl */ targioctl,
144 /* stop */ nostop,
145 /* reset */ noreset,
146 /* devtotty */ nodevtotty,
147 /* poll */ targpoll,
148 /* mmap */ nommap,
149 /* strategy */ targstrategy,
150 /* name */ "targ",
151 /* parms */ noparms,
152 /* maj */ TARG_CDEV_MAJOR,
153 /* dump */ nodump,
154 /* psize */ nopsize,
155 /* flags */ 0,
156 /* maxio */ 0,
157 /* bmaj */ -1
158};
159
160static int targsendccb(struct cam_periph *periph, union ccb *ccb,
161 union ccb *inccb);
162static periph_init_t targinit;
163static void targasync(void *callback_arg, u_int32_t code,
164 struct cam_path *path, void *arg);
165static int targallocinstance(struct ioc_alloc_unit *alloc_unit);
166static int targfreeinstance(struct ioc_alloc_unit *alloc_unit);
167static cam_status targenlun(struct cam_periph *periph);
168static cam_status targdislun(struct cam_periph *periph);
169static periph_ctor_t targctor;
170static periph_dtor_t targdtor;
171static void targrunqueue(struct cam_periph *periph,
172 struct targ_softc *softc);
173static periph_start_t targstart;
174static void targdone(struct cam_periph *periph,
175 union ccb *done_ccb);
176static void targfireexception(struct cam_periph *periph,
177 struct targ_softc *softc);
178static int targerror(union ccb *ccb, u_int32_t cam_flags,
179 u_int32_t sense_flags);
180static struct targ_cmd_desc* allocdescr(void);
181static void freedescr(struct targ_cmd_desc *buf);
182static void fill_sense(struct scsi_sense_data *sense,
183 u_int error_code, u_int sense_key,
184 u_int asc, u_int ascq);
185
186static struct periph_driver targdriver =
187{
188 targinit, "targ",
189 TAILQ_HEAD_INITIALIZER(targdriver.units), /* generation */ 0
190};
191
192DATA_SET(periphdriver_set, targdriver);
193
194static struct extend_array *targperiphs;
195
196static void
197targinit(void)
198{
199 dev_t dev;
200
201 /*
202 * Create our extend array for storing the devices we attach to.
203 */
204 targperiphs = cam_extend_new();
205 if (targperiphs == NULL) {
206 printf("targ: Failed to alloc extend array!\n");
207 return;
208 }
209
210 /* If we were successfull, register our devsw */
211 dev = makedev(TARG_CDEV_MAJOR, 0);
212 cdevsw_add(&dev,&targ_cdevsw, NULL);
213}
214
215static void
216targasync(void *callback_arg, u_int32_t code,
217 struct cam_path *path, void *arg)
218{
219 struct cam_periph *periph;
220
221 periph = (struct cam_periph *)callback_arg;
222 switch (code) {
223 case AC_PATH_DEREGISTERED:
224 {
225 /* XXX Implement */
226 break;
227 }
228 case AC_BUS_RESET:
229 {
230 /* Flush transaction queue */
231 }
232 default:
233 break;
234 }
235}
236
237/* Attempt to enable our lun */
238static cam_status
239targenlun(struct cam_periph *periph)
240{
241 union ccb immed_ccb;
242 struct targ_softc *softc;
243 cam_status status;
244 int i;
245
246 softc = (struct targ_softc *)periph->softc;
247
248 if ((softc->flags & TARG_FLAG_LUN_ENABLED) != 0)
249 return (CAM_REQ_CMP);
250
251 xpt_setup_ccb(&immed_ccb.ccb_h, periph->path, /*priority*/1);
252 immed_ccb.ccb_h.func_code = XPT_EN_LUN;
253
254 /* Don't need support for any vendor specific commands */
255 immed_ccb.cel.grp6_len = 0;
256 immed_ccb.cel.grp7_len = 0;
257 immed_ccb.cel.enable = 1;
258 xpt_action(&immed_ccb);
259 status = immed_ccb.ccb_h.status;
260 if (status != CAM_REQ_CMP) {
261 xpt_print_path(periph->path);
262 printf("targenlun - Enable Lun Rejected for status 0x%x\n",
263 status);
264 return (status);
265 }
266
267 softc->flags |= TARG_FLAG_LUN_ENABLED;
268
269 /*
270 * Build up a buffer of accept target I/O
271 * operations for incoming selections.
272 */
273 for (i = 0; i < MAX_ACCEPT; i++) {
274 struct ccb_accept_tio *atio;
275
276 atio = (struct ccb_accept_tio*)malloc(sizeof(*atio), M_DEVBUF,
277 M_NOWAIT);
278 if (atio == NULL) {
279 status = CAM_RESRC_UNAVAIL;
280 break;
281 }
282
283 atio->ccb_h.ccb_descr = allocdescr();
284
285 if (atio->ccb_h.ccb_descr == NULL) {
286 free(atio, M_DEVBUF);
287 status = CAM_RESRC_UNAVAIL;
288 break;
289 }
290
291 xpt_setup_ccb(&atio->ccb_h, periph->path, /*priority*/1);
292 atio->ccb_h.func_code = XPT_ACCEPT_TARGET_IO;
293 atio->ccb_h.cbfcnp = targdone;
294 xpt_action((union ccb *)atio);
295 status = atio->ccb_h.status;
296 if (status != CAM_REQ_INPROG) {
297 xpt_print_path(periph->path);
298 printf("Queue of atio failed\n");
299 freedescr(atio->ccb_h.ccb_descr);
300 free(atio, M_DEVBUF);
301 break;
302 }
303 ((struct targ_cmd_desc*)atio->ccb_h.ccb_descr)->atio_link =
304 softc->accept_tio_list;
305 softc->accept_tio_list = atio;
306 }
307
308 if (i == 0) {
309 xpt_print_path(periph->path);
310 printf("targenlun - Could not allocate accept tio CCBs: "
311 "status = 0x%x\n", status);
312 targdislun(periph);
313 return (CAM_REQ_CMP_ERR);
314 }
315
316 /*
317 * Build up a buffer of immediate notify CCBs
318 * so the SIM can tell us of asynchronous target mode events.
319 */
320 for (i = 0; i < MAX_ACCEPT; i++) {
321 struct ccb_immed_notify *inot;
322
323 inot = (struct ccb_immed_notify*)malloc(sizeof(*inot), M_DEVBUF,
324 M_NOWAIT);
325
326 if (inot == NULL) {
327 status = CAM_RESRC_UNAVAIL;
328 break;
329 }
330
331 xpt_setup_ccb(&inot->ccb_h, periph->path, /*priority*/1);
332 inot->ccb_h.func_code = XPT_IMMED_NOTIFY;
333 inot->ccb_h.cbfcnp = targdone;
334 xpt_action((union ccb *)inot);
335 status = inot->ccb_h.status;
336 if (status != CAM_REQ_INPROG) {
337 printf("Queue of inot failed\n");
338 free(inot, M_DEVBUF);
339 break;
340 }
341 SLIST_INSERT_HEAD(&softc->immed_notify_slist, &inot->ccb_h,
342 periph_links.sle);
343 }
344
345 if (i == 0) {
346 xpt_print_path(periph->path);
347 printf("targenlun - Could not allocate immediate notify CCBs: "
348 "status = 0x%x\n", status);
349 targdislun(periph);
350 return (CAM_REQ_CMP_ERR);
351 }
352
353 return (CAM_REQ_CMP);
354}
355
356static cam_status
357targdislun(struct cam_periph *periph)
358{
359 union ccb ccb;
360 struct targ_softc *softc;
361 struct ccb_accept_tio* atio;
362 struct ccb_hdr *ccb_h;
363
364 softc = (struct targ_softc *)periph->softc;
365 if ((softc->flags & TARG_FLAG_LUN_ENABLED) == 0)
366 return CAM_REQ_CMP;
367
368 /* XXX Block for Continue I/O completion */
369
370 /* Kill off all ACCECPT and IMMEDIATE CCBs */
371 while ((atio = softc->accept_tio_list) != NULL) {
372
373 softc->accept_tio_list =
374 ((struct targ_cmd_desc*)atio->ccb_h.ccb_descr)->atio_link;
375 xpt_setup_ccb(&ccb.cab.ccb_h, periph->path, /*priority*/1);
376 ccb.cab.ccb_h.func_code = XPT_ABORT;
377 ccb.cab.abort_ccb = (union ccb *)atio;
378 xpt_action(&ccb);
379 }
380
381 while ((ccb_h = SLIST_FIRST(&softc->immed_notify_slist)) != NULL) {
382 SLIST_REMOVE_HEAD(&softc->immed_notify_slist, periph_links.sle);
383 xpt_setup_ccb(&ccb.cab.ccb_h, periph->path, /*priority*/1);
384 ccb.cab.ccb_h.func_code = XPT_ABORT;
385 ccb.cab.abort_ccb = (union ccb *)ccb_h;
386 xpt_action(&ccb);
387 }
388
389 /*
390 * Dissable this lun.
391 */
392 xpt_setup_ccb(&ccb.cel.ccb_h, periph->path, /*priority*/1);
393 ccb.cel.ccb_h.func_code = XPT_EN_LUN;
394 ccb.cel.enable = 0;
395 xpt_action(&ccb);
396
397 if (ccb.cel.ccb_h.status != CAM_REQ_CMP)
398 printf("targdislun - Disabling lun on controller failed "
399 "with status 0x%x\n", ccb.cel.ccb_h.status);
400 else
401 softc->flags &= ~TARG_FLAG_LUN_ENABLED;
402 return (ccb.cel.ccb_h.status);
403}
404
405static cam_status
406targctor(struct cam_periph *periph, void *arg)
407{
408 struct ccb_pathinq *cpi;
409 struct targ_softc *softc;
410 int i;
411
412 cpi = (struct ccb_pathinq *)arg;
413
414 /* Allocate our per-instance private storage */
415 softc = (struct targ_softc *)malloc(sizeof(*softc), M_DEVBUF, M_NOWAIT);
416 if (softc == NULL) {
417 printf("targctor: unable to malloc softc\n");
418 return (CAM_REQ_CMP_ERR);
419 }
420
421 bzero(softc, sizeof(softc));
422 TAILQ_INIT(&softc->pending_queue);
423 TAILQ_INIT(&softc->work_queue);
424 TAILQ_INIT(&softc->snd_ccb_queue);
425 TAILQ_INIT(&softc->rcv_ccb_queue);
426 TAILQ_INIT(&softc->unknown_atio_queue);
427 bufq_init(&softc->snd_buf_queue);
428 bufq_init(&softc->rcv_buf_queue);
429 softc->accept_tio_list = NULL;
430 SLIST_INIT(&softc->immed_notify_slist);
431 softc->state = TARG_STATE_NORMAL;
432 periph->softc = softc;
433 softc->init_level++;
434
435 cam_extend_set(targperiphs, periph->unit_number, periph);
436
437 /*
438 * We start out life with a UA to indicate power-on/reset.
439 */
440 for (i = 0; i < MAX_INITIATORS; i++)
441 softc->istate[i].pending_ua = UA_POWER_ON;
442
443 /*
444 * Allocate an initial inquiry data buffer. We might allow the
445 * user to override this later via an ioctl.
446 */
447 softc->inq_data_len = sizeof(*softc->inq_data);
448 softc->inq_data = malloc(softc->inq_data_len, M_DEVBUF, M_NOWAIT);
449 if (softc->inq_data == NULL) {
450 printf("targctor - Unable to malloc inquiry data\n");
451 targdtor(periph);
452 return (CAM_RESRC_UNAVAIL);
453 }
454 bzero(softc->inq_data, softc->inq_data_len);
455 softc->inq_data->device = T_PROCESSOR | (SID_QUAL_LU_CONNECTED << 5);
456 softc->inq_data->version = 2;
457 softc->inq_data->response_format = 2; /* SCSI2 Inquiry Format */
458 softc->inq_data->flags =
459 cpi->hba_inquiry & (PI_SDTR_ABLE|PI_WIDE_16|PI_WIDE_32);
460 softc->inq_data->additional_length = softc->inq_data_len - 4;
461 strncpy(softc->inq_data->vendor, "FreeBSD ", SID_VENDOR_SIZE);
462 strncpy(softc->inq_data->product, "TM-PT ", SID_PRODUCT_SIZE);
463 strncpy(softc->inq_data->revision, "0.0 ", SID_REVISION_SIZE);
464 softc->init_level++;
465
466 return (CAM_REQ_CMP);
467}
468
469static void
470targdtor(struct cam_periph *periph)
471{
472 struct targ_softc *softc;
473
474 softc = (struct targ_softc *)periph->softc;
475
476 softc->state = TARG_STATE_TEARDOWN;
477
478 targdislun(periph);
479
480 cam_extend_release(targperiphs, periph->unit_number);
481
482 switch (softc->init_level) {
483 default:
484 /* FALLTHROUGH */
485 case 2:
486 free(softc->inq_data, M_DEVBUF);
487 /* FALLTHROUGH */
488 case 1:
489 free(softc, M_DEVBUF);
490 break;
491 case 0:
492 panic("targdtor - impossible init level");;
493 }
494}
495
496static int
497targopen(dev_t dev, int flags, int fmt, struct proc *p)
498{
499 struct cam_periph *periph;
500 struct targ_softc *softc;
501 u_int unit;
502 cam_status status;
503 int error;
504 int s;
505
506 unit = minor(dev);
507
508 /* An open of the control device always succeeds */
509 if (TARG_IS_CONTROL_DEV(unit))
510 return 0;
511
512 s = splsoftcam();
513 periph = cam_extend_get(targperiphs, unit);
514 if (periph == NULL) {
515 return (ENXIO);
516 splx(s);
517 }
518 if ((error = cam_periph_lock(periph, PRIBIO | PCATCH)) != 0) {
519 splx(s);
520 return (error);
521 }
522
523 softc = (struct targ_softc *)periph->softc;
524 if ((softc->flags & TARG_FLAG_LUN_ENABLED) == 0) {
525 if (cam_periph_acquire(periph) != CAM_REQ_CMP) {
526 splx(s);
527 cam_periph_unlock(periph);
528 return(ENXIO);
529 }
530 }
531 splx(s);
532
533 status = targenlun(periph);
534 switch (status) {
535 case CAM_REQ_CMP:
536 error = 0;
537 break;
538 case CAM_RESRC_UNAVAIL:
539 error = ENOMEM;
540 break;
541 case CAM_LUN_ALRDY_ENA:
542 error = EADDRINUSE;
543 break;
544 default:
545 error = ENXIO;
546 break;
547 }
548 cam_periph_unlock(periph);
549 return (error);
550}
551
552static int
553targclose(dev_t dev, int flag, int fmt, struct proc *p)
554{
555 struct cam_periph *periph;
556 struct targ_softc *softc;
557 u_int unit;
558 int s;
559 int error;
560
561 unit = minor(dev);
562
563 /* A close of the control device always succeeds */
564 if (TARG_IS_CONTROL_DEV(unit))
565 return 0;
566
567 s = splsoftcam();
568 periph = cam_extend_get(targperiphs, unit);
569 if (periph == NULL) {
570 splx(s);
571 return (ENXIO);
572 }
573 if ((error = cam_periph_lock(periph, PRIBIO)) != 0)
574 return (error);
575 softc = (struct targ_softc *)periph->softc;
576 splx(s);
577
578 targdislun(periph);
579
580 cam_periph_unlock(periph);
581 cam_periph_release(periph);
582
583 return (0);
584}
585
586static int
587targallocinstance(struct ioc_alloc_unit *alloc_unit)
588{
589 struct ccb_pathinq cpi;
590 struct cam_path *path;
591 struct cam_periph *periph;
592 cam_status status;
593 int free_path_on_return;
594 int error;
595
596 free_path_on_return = 0;
597 status = xpt_create_path(&path, /*periph*/NULL,
598 alloc_unit->path_id,
599 alloc_unit->target_id,
600 alloc_unit->lun_id);
601 free_path_on_return++;
602
603 if (status != CAM_REQ_CMP) {
604 printf("Couldn't Allocate Path %x\n", status);
605 goto fail;
606 }
607
608 xpt_setup_ccb(&cpi.ccb_h, path, /*priority*/1);
609 cpi.ccb_h.func_code = XPT_PATH_INQ;
610 xpt_action((union ccb *)&cpi);
611 status = cpi.ccb_h.status;
612
613 if (status != CAM_REQ_CMP) {
614 printf("Couldn't CPI %x\n", status);
615 goto fail;
616 }
617
618 /* Can only alloc units on controllers that support target mode */
619 if ((cpi.target_sprt & PIT_PROCESSOR) == 0) {
620 printf("Controller does not support target mode%x\n", status);
621 status = CAM_PATH_INVALID;
622 goto fail;
623 }
624
625 /* Ensure that we don't already have an instance for this unit. */
626 if ((periph = cam_periph_find(path, "targ")) != NULL) {
627 printf("Lun already enabled%x\n", status);
628 status = CAM_LUN_ALRDY_ENA;
629 goto fail;
630 }
631
632 /*
633 * Allocate a peripheral instance for
634 * this target instance.
635 */
636 status = cam_periph_alloc(targctor, NULL, targdtor, targstart,
637 "targ", CAM_PERIPH_BIO, path, targasync,
638 0, &cpi);
639
640fail:
641 switch (status) {
642 case CAM_REQ_CMP:
643 {
644 struct cam_periph *periph;
645
646 if ((periph = cam_periph_find(path, "targ")) == NULL)
647 panic("targallocinstance: Succeeded but no periph?");
648 error = 0;
649 alloc_unit->unit = periph->unit_number;
650 break;
651 }
652 case CAM_RESRC_UNAVAIL:
653 error = ENOMEM;
654 break;
655 case CAM_LUN_ALRDY_ENA:
656 error = EADDRINUSE;
657 break;
658 default:
659 printf("targallocinstance: Unexpected CAM status %x\n", status);
660 /* FALLTHROUGH */
661 case CAM_PATH_INVALID:
662 error = ENXIO;
663 break;
664 case CAM_PROVIDE_FAIL:
665 error = ENODEV;
666 break;
667 }
668
669 if (free_path_on_return != 0)
670 xpt_free_path(path);
671
672 return (error);
673}
674
675static int
676targfreeinstance(struct ioc_alloc_unit *alloc_unit)
677{
678 struct cam_path *path;
679 struct cam_periph *periph;
680 struct targ_softc *softc;
681 cam_status status;
682 int free_path_on_return;
683 int error;
684
685 periph = NULL;
686 free_path_on_return = 0;
687 status = xpt_create_path(&path, /*periph*/NULL,
688 alloc_unit->path_id,
689 alloc_unit->target_id,
690 alloc_unit->lun_id);
691 free_path_on_return++;
692
693 if (status != CAM_REQ_CMP)
694 goto fail;
695
696 /* Find our instance. */
697 if ((periph = cam_periph_find(path, "targ")) == NULL) {
698 xpt_print_path(path);
699 status = CAM_PATH_INVALID;
700 goto fail;
701 }
702
703 softc = (struct targ_softc *)periph->softc;
704
705 if ((softc->flags & TARG_FLAG_LUN_ENABLED) != 0) {
706 status = CAM_BUSY;
707 goto fail;
708 }
709
710fail:
711 if (free_path_on_return != 0)
712 xpt_free_path(path);
713
714 switch (status) {
715 case CAM_REQ_CMP:
716 if (periph != NULL)
717 cam_periph_invalidate(periph);
718 error = 0;
719 break;
720 case CAM_RESRC_UNAVAIL:
721 error = ENOMEM;
722 break;
723 case CAM_LUN_ALRDY_ENA:
724 error = EADDRINUSE;
725 break;
726 default:
727 printf("targfreeinstance: Unexpected CAM status %x\n", status);
728 /* FALLTHROUGH */
729 case CAM_PATH_INVALID:
730 error = ENODEV;
731 break;
732 }
733 return (error);
734}
735
736static int
737targioctl(dev_t dev, u_long cmd, caddr_t addr, int flag, struct proc *p)
738{
739 struct cam_periph *periph;
740 struct targ_softc *softc;
741 u_int unit;
742 int error;
743
744 unit = minor(dev);
745 error = 0;
746 if (TARG_IS_CONTROL_DEV(unit)) {
747 switch (cmd) {
748 case TARGCTLIOALLOCUNIT:
749 error = targallocinstance((struct ioc_alloc_unit*)addr);
750 break;
751 case TARGCTLIOFREEUNIT:
752 error = targfreeinstance((struct ioc_alloc_unit*)addr);
753 break;
754 default:
755 error = EINVAL;
756 break;
757 }
758 return (error);
759 }
760
761 periph = cam_extend_get(targperiphs, unit);
762 if (periph == NULL)
763 return (ENXIO);
764 softc = (struct targ_softc *)periph->softc;
765 switch (cmd) {
766 case TARGIOCFETCHEXCEPTION:
767 *((targ_exception *)addr) = softc->exceptions;
768 break;
769 case TARGIOCCLEAREXCEPTION:
770 {
771 targ_exception clear_mask;
772
773 clear_mask = *((targ_exception *)addr);
774 if ((clear_mask & TARG_EXCEPT_UNKNOWN_ATIO) != 0) {
775 struct ccb_hdr *ccbh;
776
777 ccbh = TAILQ_FIRST(&softc->unknown_atio_queue);
778 if (ccbh != NULL) {
779 TAILQ_REMOVE(&softc->unknown_atio_queue,
780 ccbh, periph_links.tqe);
781 /* Requeue the ATIO back to the controller */
782 xpt_action((union ccb *)ccbh);
783 ccbh = TAILQ_FIRST(&softc->unknown_atio_queue);
784 }
785 if (ccbh != NULL)
786 clear_mask &= ~TARG_EXCEPT_UNKNOWN_ATIO;
787 }
788 softc->exceptions &= ~clear_mask;
789 if (softc->exceptions == TARG_EXCEPT_NONE
790 && softc->state == TARG_STATE_EXCEPTION) {
791 softc->state = TARG_STATE_NORMAL;
792 targrunqueue(periph, softc);
793 }
794 break;
795 }
796 case TARGIOCFETCHATIO:
797 {
798 struct ccb_hdr *ccbh;
799
800 ccbh = TAILQ_FIRST(&softc->unknown_atio_queue);
801 if (ccbh != NULL) {
802 bcopy(ccbh, addr, sizeof(struct ccb_accept_tio));
803 } else {
804 error = ENOENT;
805 }
806 break;
807 }
808 case TARGIOCCOMMAND:
809 {
810 union ccb *inccb;
811 union ccb *ccb;
812
813 /*
814 * XXX JGibbs
815 * This code is lifted directly from the pass-thru driver.
816 * Perhaps this should be moved to a library????
817 */
818 inccb = (union ccb *)addr;
819 ccb = cam_periph_getccb(periph, inccb->ccb_h.pinfo.priority);
820
821 error = targsendccb(periph, ccb, inccb);
822
823 xpt_release_ccb(ccb);
824
825 break;
826 }
827 case TARGIOCGETISTATE:
828 case TARGIOCSETISTATE:
829 {
830 struct ioc_initiator_state *ioc_istate;
831
832 ioc_istate = (struct ioc_initiator_state *)addr;
833 if (ioc_istate->initiator_id > MAX_INITIATORS) {
834 error = EINVAL;
835 break;
836 }
837 CAM_DEBUG(periph->path, CAM_DEBUG_SUBTRACE,
838 ("GET/SETISTATE for %d\n", ioc_istate->initiator_id));
839 if (cmd == TARGIOCGETISTATE) {
840 bcopy(&softc->istate[ioc_istate->initiator_id],
841 &ioc_istate->istate, sizeof(ioc_istate->istate));
842 } else {
843 bcopy(&ioc_istate->istate,
844 &softc->istate[ioc_istate->initiator_id],
845 sizeof(ioc_istate->istate));
846 CAM_DEBUG(periph->path, CAM_DEBUG_SUBTRACE,
847 ("pending_ca now %x\n",
848 softc->istate[ioc_istate->initiator_id].pending_ca));
849 }
850 break;
851 }
852 default:
853 error = ENOTTY;
854 break;
855 }
856 return (error);
857}
858
859/*
860 * XXX JGibbs lifted from pass-thru driver.
861 * Generally, "ccb" should be the CCB supplied by the kernel. "inccb"
862 * should be the CCB that is copied in from the user.
863 */
864static int
865targsendccb(struct cam_periph *periph, union ccb *ccb, union ccb *inccb)
866{
867 struct targ_softc *softc;
868 struct cam_periph_map_info mapinfo;
869 int error, need_unmap;
870
871 softc = (struct targ_softc *)periph->softc;
872
873 need_unmap = 0;
874
875 /*
876 * There are some fields in the CCB header that need to be
877 * preserved, the rest we get from the user.
878 */
879 xpt_merge_ccb(ccb, inccb);
880
881 /*
882 * There's no way for the user to have a completion
883 * function, so we put our own completion function in here.
884 */
885 ccb->ccb_h.cbfcnp = targdone;
886
887 /*
888 * We only attempt to map the user memory into kernel space
889 * if they haven't passed in a physical memory pointer,
890 * and if there is actually an I/O operation to perform.
891 * Right now cam_periph_mapmem() only supports SCSI and device
892 * match CCBs. For the SCSI CCBs, we only pass the CCB in if
893 * there's actually data to map. cam_periph_mapmem() will do the
894 * right thing, even if there isn't data to map, but since CCBs
895 * without data are a reasonably common occurance (e.g. test unit
896 * ready), it will save a few cycles if we check for it here.
897 */
898 if (((ccb->ccb_h.flags & CAM_DATA_PHYS) == 0)
899 && (((ccb->ccb_h.func_code == XPT_CONT_TARGET_IO)
900 && ((ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE))
901 || (ccb->ccb_h.func_code == XPT_DEV_MATCH))) {
902
903 bzero(&mapinfo, sizeof(mapinfo));
904
905 error = cam_periph_mapmem(ccb, &mapinfo);
906
907 /*
908 * cam_periph_mapmem returned an error, we can't continue.
909 * Return the error to the user.
910 */
911 if (error)
912 return(error);
913
914 /*
915 * We successfully mapped the memory in, so we need to
916 * unmap it when the transaction is done.
917 */
918 need_unmap = 1;
919 }
920
921 /*
922 * If the user wants us to perform any error recovery, then honor
923 * that request. Otherwise, it's up to the user to perform any
924 * error recovery.
925 */
926 error = cam_periph_runccb(ccb,
927 (ccb->ccb_h.flags & CAM_PASS_ERR_RECOVER) ?
928 targerror : NULL,
929 /* cam_flags */ 0,
930 /* sense_flags */SF_RETRY_UA,
931 &softc->device_stats);
932
933 if (need_unmap != 0)
934 cam_periph_unmapmem(ccb, &mapinfo);
935
936 ccb->ccb_h.cbfcnp = NULL;
937 ccb->ccb_h.periph_priv = inccb->ccb_h.periph_priv;
938 bcopy(ccb, inccb, sizeof(union ccb));
939
940 return(error);
941}
942
943
944static int
945targpoll(dev_t dev, int poll_events, struct proc *p)
946{
947 struct cam_periph *periph;
948 struct targ_softc *softc;
949 u_int unit;
950 int revents;
951 int s;
952
953 unit = minor(dev);
954
955 /* ioctl is the only supported operation of the control device */
956 if (TARG_IS_CONTROL_DEV(unit))
957 return EINVAL;
958
959 periph = cam_extend_get(targperiphs, unit);
960 if (periph == NULL)
961 return (ENXIO);
962 softc = (struct targ_softc *)periph->softc;
963
964 revents = 0;
965 s = splcam();
966 if ((poll_events & (POLLOUT | POLLWRNORM)) != 0) {
967 if (TAILQ_FIRST(&softc->rcv_ccb_queue) != NULL
968 && bufq_first(&softc->rcv_buf_queue) == NULL)
969 revents |= poll_events & (POLLOUT | POLLWRNORM);
970 }
971 if ((poll_events & (POLLIN | POLLRDNORM)) != 0) {
972 if (TAILQ_FIRST(&softc->snd_ccb_queue) != NULL
973 && bufq_first(&softc->snd_buf_queue) == NULL)
974 revents |= poll_events & (POLLIN | POLLRDNORM);
975 }
976
977 if (softc->state != TARG_STATE_NORMAL)
978 revents |= POLLERR;
979
980 if (revents == 0) {
981 if (poll_events & (POLLOUT | POLLWRNORM))
982 selrecord(p, &softc->rcv_select);
983 if (poll_events & (POLLIN | POLLRDNORM))
984 selrecord(p, &softc->snd_select);
985 }
986 splx(s);
987 return (revents);
988}
989
990static int
991targread(dev_t dev, struct uio *uio, int ioflag)
992{
993 u_int unit;
994
995 unit = minor(dev);
996 /* ioctl is the only supported operation of the control device */
997 if (TARG_IS_CONTROL_DEV(unit))
998 return EINVAL;
999
1000 if (uio->uio_iovcnt == 0
1001 || uio->uio_iov->iov_len == 0) {
1002 /* EOF */
1003 struct cam_periph *periph;
1004 struct targ_softc *softc;
1005 int s;
1006
1007 s = splcam();
1008 periph = cam_extend_get(targperiphs, unit);
1009 if (periph == NULL)
1010 return (ENXIO);
1011 softc = (struct targ_softc *)periph->softc;
1012 softc->flags |= TARG_FLAG_SEND_EOF;
1013 splx(s);
1014 targrunqueue(periph, softc);
1015 return (0);
1016 }
1017 return(physread(dev, uio, ioflag));
1018}
1019
1020static int
1021targwrite(dev_t dev, struct uio *uio, int ioflag)
1022{
1023 u_int unit;
1024
1025 unit = minor(dev);
1026 /* ioctl is the only supported operation of the control device */
1027 if (TARG_IS_CONTROL_DEV(unit))
1028 return EINVAL;
1029
1030 if (uio->uio_iovcnt == 0
1031 || uio->uio_iov->iov_len == 0) {
1032 /* EOF */
1033 struct cam_periph *periph;
1034 struct targ_softc *softc;
1035 int s;
1036
1037 s = splcam();
1038 periph = cam_extend_get(targperiphs, unit);
1039 if (periph == NULL)
1040 return (ENXIO);
1041 softc = (struct targ_softc *)periph->softc;
1042 softc->flags |= TARG_FLAG_RECEIVE_EOF;
1043 splx(s);
1044 targrunqueue(periph, softc);
1045 return (0);
1046 }
1047 return(physwrite(dev, uio, ioflag));
1048}
1049
1050/*
1051 * Actually translate the requested transfer into one the physical driver
1052 * can understand. The transfer is described by a buf and will include
1053 * only one physical transfer.
1054 */
1055static void
1056targstrategy(struct buf *bp)
1057{
1058 struct cam_periph *periph;
1059 struct targ_softc *softc;
1060 u_int unit;
1061 int s;
1062
1063 unit = minor(bp->b_dev);
1064
1065 /* ioctl is the only supported operation of the control device */
1066 if (TARG_IS_CONTROL_DEV(unit)) {
1067 bp->b_error = EINVAL;
1068 goto bad;
1069 }
1070
1071 periph = cam_extend_get(targperiphs, unit);
1072 if (periph == NULL) {
1073 bp->b_error = ENXIO;
1074 goto bad;
1075 }
1076 softc = (struct targ_softc *)periph->softc;
1077
1078 /*
1079 * Mask interrupts so that the device cannot be invalidated until
1080 * after we are in the queue. Otherwise, we might not properly
1081 * clean up one of the buffers.
1082 */
1083 s = splbio();
1084
1085 /*
1086 * If there is an exception pending, error out
1087 */
1088 if (softc->state != TARG_STATE_NORMAL) {
1089 splx(s);
1090 if (softc->state == TARG_STATE_EXCEPTION
1091 && (softc->exceptions & TARG_EXCEPT_DEVICE_INVALID) == 0)
1092 bp->b_error = EBUSY;
1093 else
1094 bp->b_error = ENXIO;
1095 goto bad;
1096 }
1097
1098 /*
1099 * Place it in the queue of buffers available for either
1100 * SEND or RECEIVE commands.
1101 *
1102 */
1103 bp->b_resid = bp->b_bcount;
1104 if ((bp->b_flags & B_READ) != 0) {
1105 CAM_DEBUG(periph->path, CAM_DEBUG_SUBTRACE,
1106 ("Queued a SEND buffer\n"));
1107 bufq_insert_tail(&softc->snd_buf_queue, bp);
1108 } else {
1109 CAM_DEBUG(periph->path, CAM_DEBUG_SUBTRACE,
1110 ("Queued a RECEIVE buffer\n"));
1111 bufq_insert_tail(&softc->rcv_buf_queue, bp);
1112 }
1113
1114 splx(s);
1115
1116 /*
1117 * Attempt to use the new buffer to service any pending
1118 * target commands.
1119 */
1120 targrunqueue(periph, softc);
1121
1122 return;
1123bad:
1124 bp->b_flags |= B_ERROR;
1125
1126 /*
1127 * Correctly set the buf to indicate a completed xfer
1128 */
1129 bp->b_resid = bp->b_bcount;
1130 biodone(bp);
1131}
1132
1133static void
1134targrunqueue(struct cam_periph *periph, struct targ_softc *softc)
1135{
1136 struct ccb_queue *pending_queue;
1137 struct ccb_accept_tio *atio;
1138 struct buf_queue_head *bufq;
1139 struct buf *bp;
1140 struct targ_cmd_desc *desc;
1141 struct ccb_hdr *ccbh;
1142 int s;
1143
1144 s = splbio();
1145 pending_queue = NULL;
1146 bufq = NULL;
1147 ccbh = NULL;
1148 /* Only run one request at a time to maintain data ordering. */
1149 if (softc->state != TARG_STATE_NORMAL
1150 || TAILQ_FIRST(&softc->work_queue) != NULL
1151 || TAILQ_FIRST(&softc->pending_queue) != NULL) {
1152 splx(s);
1153 return;
1154 }
1155
1156 if (((bp = bufq_first(&softc->snd_buf_queue)) != NULL
1157 || (softc->flags & TARG_FLAG_SEND_EOF) != 0)
1158 && (ccbh = TAILQ_FIRST(&softc->snd_ccb_queue)) != NULL) {
1159
1160 if (bp == NULL)
1161 softc->flags &= ~TARG_FLAG_SEND_EOF;
1162 else {
1163 CAM_DEBUG(periph->path, CAM_DEBUG_SUBTRACE,
1164 ("De-Queued a SEND buffer %ld\n",
1165 bp->b_bcount));
1166 }
1167 bufq = &softc->snd_buf_queue;
1168 pending_queue = &softc->snd_ccb_queue;
1169 } else if (((bp = bufq_first(&softc->rcv_buf_queue)) != NULL
1170 || (softc->flags & TARG_FLAG_RECEIVE_EOF) != 0)
1171 && (ccbh = TAILQ_FIRST(&softc->rcv_ccb_queue)) != NULL) {
1172
1173 if (bp == NULL)
1174 softc->flags &= ~TARG_FLAG_RECEIVE_EOF;
1175 else {
1176 CAM_DEBUG(periph->path, CAM_DEBUG_SUBTRACE,
1177 ("De-Queued a RECEIVE buffer %ld\n",
1178 bp->b_bcount));
1179 }
1180 bufq = &softc->rcv_buf_queue;
1181 pending_queue = &softc->rcv_ccb_queue;
1182 }
1183
1184 if (pending_queue != NULL) {
1185 /* Process a request */
1186 atio = (struct ccb_accept_tio *)ccbh;
1187 TAILQ_REMOVE(pending_queue, ccbh, periph_links.tqe);
1188 desc = (struct targ_cmd_desc *)atio->ccb_h.ccb_descr;
1189 desc->bp = bp;
1190 if (bp == NULL) {
1191 /* EOF */
1192 desc->data = NULL;
1193 desc->data_increment = 0;
1194 desc->data_resid = 0;
1195 atio->ccb_h.flags &= ~CAM_DIR_MASK;
1196 atio->ccb_h.flags |= CAM_DIR_NONE;
1197 } else {
1198 bufq_remove(bufq, bp);
1199 desc->data = &bp->b_data[bp->b_bcount - bp->b_resid];
1200 desc->data_increment =
1201 MIN(desc->data_resid, bp->b_resid);
1202 }
1203 CAM_DEBUG(periph->path, CAM_DEBUG_SUBTRACE,
1204 ("Buffer command: data %x: datacnt %d\n",
1205 (intptr_t)desc->data, desc->data_increment));
1206 TAILQ_INSERT_TAIL(&softc->work_queue, &atio->ccb_h,
1207 periph_links.tqe);
1208 }
1209 if (TAILQ_FIRST(&softc->work_queue) != NULL) {
1210 splx(s);
1211 xpt_schedule(periph, /*XXX priority*/1);
1212 } else
1213 splx(s);
1214}
1215
1216static void
1217targstart(struct cam_periph *periph, union ccb *start_ccb)
1218{
1219 struct targ_softc *softc;
1220 struct ccb_hdr *ccbh;
1221 struct ccb_accept_tio *atio;
1222 struct targ_cmd_desc *desc;
1223 struct ccb_scsiio *csio;
1224 ccb_flags flags;
1225 int s;
1226
1227 softc = (struct targ_softc *)periph->softc;
1228
1229 s = splbio();
1230 ccbh = TAILQ_FIRST(&softc->work_queue);
1231 if (periph->immediate_priority <= periph->pinfo.priority) {
1232 start_ccb->ccb_h.ccb_type = TARG_CCB_WAITING;
1233 SLIST_INSERT_HEAD(&periph->ccb_list, &start_ccb->ccb_h,
1234 periph_links.sle);
1235 periph->immediate_priority = CAM_PRIORITY_NONE;
1236 splx(s);
1237 wakeup(&periph->ccb_list);
1238 } else if (ccbh == NULL) {
1239 splx(s);
1240 xpt_release_ccb(start_ccb);
1241 } else {
1242 TAILQ_REMOVE(&softc->work_queue, ccbh, periph_links.tqe);
1243 TAILQ_INSERT_HEAD(&softc->pending_queue, ccbh,
1244 periph_links.tqe);
1245 splx(s);
1246 atio = (struct ccb_accept_tio*)ccbh;
1247 desc = (struct targ_cmd_desc *)atio->ccb_h.ccb_descr;
1248
1249 /* Is this a tagged request? */
1250 flags = atio->ccb_h.flags & (CAM_TAG_ACTION_VALID|CAM_DIR_MASK);
1251
1252 /*
1253 * If we are done with the transaction, tell the
1254 * controller to send status and perform a CMD_CMPLT.
1255 */
1256 if (desc->data_resid == desc->data_increment)
1257 flags |= CAM_SEND_STATUS;
1258
1259 csio = &start_ccb->csio;
1260 cam_fill_ctio(csio,
1261 /*retries*/2,
1262 targdone,
1263 flags,
1264 /*tag_action*/MSG_SIMPLE_Q_TAG,
1265 atio->tag_id,
1266 atio->init_id,
1267 desc->status,
1268 /*data_ptr*/desc->data_increment == 0
1269 ? NULL : desc->data,
1270 /*dxfer_len*/desc->data_increment,
1271 /*timeout*/desc->timeout);
1272
1273 start_ccb->ccb_h.ccb_type = TARG_CCB_WORKQ;
1274 start_ccb->ccb_h.ccb_atio = atio;
1275 CAM_DEBUG(periph->path, CAM_DEBUG_SUBTRACE,
1276 ("Sending a CTIO\n"));
1277 xpt_action(start_ccb);
1278 s = splbio();
1279 ccbh = TAILQ_FIRST(&softc->work_queue);
1280 splx(s);
1281 }
1282 if (ccbh != NULL)
1283 targrunqueue(periph, softc);
1284}
1285
1286static void
1287targdone(struct cam_periph *periph, union ccb *done_ccb)
1288{
1289 struct targ_softc *softc;
1290
1291 softc = (struct targ_softc *)periph->softc;
1292
1293 if (done_ccb->ccb_h.ccb_type == TARG_CCB_WAITING) {
1294 /* Caller will release the CCB */
1295 wakeup(&done_ccb->ccb_h.cbfcnp);
1296 return;
1297 }
1298
1299 switch (done_ccb->ccb_h.func_code) {
1300 case XPT_ACCEPT_TARGET_IO:
1301 {
1302 struct ccb_accept_tio *atio;
1303 struct targ_cmd_desc *descr;
1304 struct initiator_state *istate;
1305 u_int8_t *cdb;
1306
1307 atio = &done_ccb->atio;
1308 descr = (struct targ_cmd_desc*)atio->ccb_h.ccb_descr;
1309 istate = &softc->istate[atio->init_id];
1310 cdb = atio->cdb_io.cdb_bytes;
1311 if (softc->state == TARG_STATE_TEARDOWN
1312 || atio->ccb_h.status == CAM_REQ_ABORTED) {
1313 freedescr(descr);
1314 free(done_ccb, M_DEVBUF);
1315 return;
1316 }
1317
1318 if (istate->pending_ca == 0
1319 && istate->pending_ua != 0
1320 && cdb[0] != INQUIRY) {
1321 /* Pending UA, tell initiator */
1322 /* Direction is always relative to the initator */
1323 istate->pending_ca = CA_UNIT_ATTN;
1324 atio->ccb_h.flags &= ~CAM_DIR_MASK;
1325 atio->ccb_h.flags |= CAM_DIR_NONE;
1326 descr->data_resid = 0;
1327 descr->data_increment = 0;
1328 descr->timeout = 5 * 1000;
1329 descr->status = SCSI_STATUS_CHECK_COND;
1330 } else {
1331 /*
1332 * Save the current CA and UA status so
1333 * they can be used by this command.
1334 */
1335 ua_types pending_ua;
1336 ca_types pending_ca;
1337
1338 pending_ua = istate->pending_ua;
1339 pending_ca = istate->pending_ca;
1340
1341 /*
1342 * As per the SCSI2 spec, any command that occurs
1343 * after a CA is reported, clears the CA. If the
1344 * command is not an inquiry, we are also supposed
1345 * to clear the UA condition, if any, that caused
1346 * the CA to occur assuming the UA is not a
1347 * persistant state.
1348 */
1349 istate->pending_ca = CA_NONE;
1350 if ((pending_ca
1351 & (CA_CMD_SENSE|CA_UNIT_ATTN)) == CA_UNIT_ATTN
1352 && cdb[0] != INQUIRY)
1353 istate->pending_ua = UA_NONE;
1354
1355 /*
1356 * Determine the type of incoming command and
1357 * setup our buffer for a response.
1358 */
1359 switch (cdb[0]) {
1360 case INQUIRY:
1361 {
1362 struct scsi_inquiry *inq;
1363 struct scsi_sense_data *sense;
1364
1365 inq = (struct scsi_inquiry *)cdb;
1366 sense = &istate->sense_data;
1367 CAM_DEBUG(periph->path, CAM_DEBUG_SUBTRACE,
1368 ("Saw an inquiry!\n"));
1369 /*
1370 * Validate the command. We don't
1371 * support any VPD pages, so complain
1372 * if EVPD is set.
1373 */
1374 if ((inq->byte2 & SI_EVPD) != 0
1375 || inq->page_code != 0) {
1376 istate->pending_ca = CA_CMD_SENSE;
1377 atio->ccb_h.flags &= ~CAM_DIR_MASK;
1378 atio->ccb_h.flags |= CAM_DIR_NONE;
1379 descr->data_resid = 0;
1380 descr->data_increment = 0;
1381 descr->status = SCSI_STATUS_CHECK_COND;
1382 fill_sense(sense,
1383 SSD_CURRENT_ERROR,
1384 SSD_KEY_ILLEGAL_REQUEST,
1385 /*asc*/0x24, /*ascq*/0x00);
1386 sense->extra_len =
1387 offsetof(struct scsi_sense_data,
1388 extra_bytes)
1389 - offsetof(struct scsi_sense_data,
1390 extra_len);
1391 }
1392
1393 if ((inq->byte2 & SI_EVPD) != 0) {
1394 sense->sense_key_spec[0] =
1395 SSD_SCS_VALID|SSD_FIELDPTR_CMD
1396 |SSD_BITPTR_VALID| /*bit value*/1;
1397 sense->sense_key_spec[1] = 0;
1398 sense->sense_key_spec[2] =
1399 offsetof(struct scsi_inquiry,
1400 byte2);
1401 break;
1402 } else if (inq->page_code != 0) {
1403 sense->sense_key_spec[0] =
1404 SSD_SCS_VALID|SSD_FIELDPTR_CMD;
1405 sense->sense_key_spec[1] = 0;
1406 sense->sense_key_spec[2] =
1407 offsetof(struct scsi_inquiry,
1408 page_code);
1409 break;
1410 }
1411 /*
1412 * Direction is always relative
1413 * to the initator.
1414 */
1415 atio->ccb_h.flags &= ~CAM_DIR_MASK;
1416 atio->ccb_h.flags |= CAM_DIR_IN;
1417 descr->data = softc->inq_data;
1418 descr->data_resid = MIN(softc->inq_data_len,
1419 inq->length);
1420 descr->data_increment = descr->data_resid;
1421 descr->timeout = 5 * 1000;
1422 descr->status = SCSI_STATUS_OK;
1423 break;
1424 }
1425 case TEST_UNIT_READY:
1426 atio->ccb_h.flags &= ~CAM_DIR_MASK;
1427 atio->ccb_h.flags |= CAM_DIR_NONE;
1428 descr->data_resid = 0;
1429 descr->data_increment = 0;
1430 descr->timeout = 5 * 1000;
1431 descr->status = SCSI_STATUS_OK;
1432 break;
1433 case REQUEST_SENSE:
1434 {
1435 struct scsi_request_sense *rsense;
1436 struct scsi_sense_data *sense;
1437
1438 rsense = (struct scsi_request_sense *)cdb;
1439 sense = &istate->sense_data;
1440 if (pending_ca == 0) {
1441 fill_sense(sense, SSD_CURRENT_ERROR,
1442 SSD_KEY_NO_SENSE, 0x00,
1443 0x00);
1444 CAM_DEBUG(periph->path,
1445 CAM_DEBUG_SUBTRACE,
1446 ("No pending CA!\n"));
1447 } else if (pending_ca == CA_UNIT_ATTN) {
1448 u_int ascq;
1449
1450 if (pending_ua == UA_POWER_ON)
1451 ascq = 0x1;
1452 else
1453 ascq = 0x2;
1454 fill_sense(sense, SSD_CURRENT_ERROR,
1455 SSD_KEY_UNIT_ATTENTION,
1456 0x29, ascq);
1457 CAM_DEBUG(periph->path,
1458 CAM_DEBUG_SUBTRACE,
1459 ("Pending UA!\n"));
1460 }
1461 /*
1462 * Direction is always relative
1463 * to the initator.
1464 */
1465 atio->ccb_h.flags &= ~CAM_DIR_MASK;
1466 atio->ccb_h.flags |= CAM_DIR_IN;
1467 descr->data = sense;
1468 descr->data_resid =
1469 offsetof(struct scsi_sense_data,
1470 extra_len)
1471 + sense->extra_len;
1472 descr->data_resid = MIN(descr->data_resid,
1473 rsense->length);
1474 descr->data_increment = descr->data_resid;
1475 descr->timeout = 5 * 1000;
1476 descr->status = SCSI_STATUS_OK;
1477 break;
1478 }
1479 case RECEIVE:
1480 case SEND:
1481 {
1482 struct scsi_send_receive *sr;
1483
1484 sr = (struct scsi_send_receive *)cdb;
1485
1486 /*
1487 * Direction is always relative
1488 * to the initator.
1489 */
1490 atio->ccb_h.flags &= ~CAM_DIR_MASK;
1491 descr->data_resid = scsi_3btoul(sr->xfer_len);
1492 descr->timeout = 5 * 1000;
1493 descr->status = SCSI_STATUS_OK;
1494 if (cdb[0] == SEND) {
1495 atio->ccb_h.flags |= CAM_DIR_OUT;
1496 CAM_DEBUG(periph->path,
1497 CAM_DEBUG_SUBTRACE,
1498 ("Saw a SEND!\n"));
1499 atio->ccb_h.flags |= CAM_DIR_OUT;
1500 TAILQ_INSERT_TAIL(&softc->snd_ccb_queue,
1501 &atio->ccb_h,
1502 periph_links.tqe);
1503 selwakeup(&softc->snd_select);
1504 } else {
1505 atio->ccb_h.flags |= CAM_DIR_IN;
1506 CAM_DEBUG(periph->path,
1507 CAM_DEBUG_SUBTRACE,
1508 ("Saw a RECEIVE!\n"));
1509 TAILQ_INSERT_TAIL(&softc->rcv_ccb_queue,
1510 &atio->ccb_h,
1511 periph_links.tqe);
1512 selwakeup(&softc->rcv_select);
1513 }
1514 /*
1515 * Attempt to satisfy this request with
1516 * a user buffer.
1517 */
1518 targrunqueue(periph, softc);
1519 return;
1520 }
1521 default:
1522 /*
1523 * Queue for consumption by our userland
1524 * counterpart and transition to the exception
1525 * state.
1526 */
1527 TAILQ_INSERT_TAIL(&softc->unknown_atio_queue,
1528 &atio->ccb_h,
1529 periph_links.tqe);
1530 softc->exceptions |= TARG_EXCEPT_UNKNOWN_ATIO;
1531 targfireexception(periph, softc);
1532 return;
1533 }
1534 }
1535
1536 /* Queue us up to receive a Continue Target I/O ccb. */
1537 TAILQ_INSERT_TAIL(&softc->work_queue, &atio->ccb_h,
1538 periph_links.tqe);
1539 xpt_schedule(periph, /*priority*/1);
1540 break;
1541 }
1542 case XPT_CONT_TARGET_IO:
1543 {
1544 struct ccb_accept_tio *atio;
1545 struct targ_cmd_desc *desc;
1546 struct buf *bp;
1547
1548 CAM_DEBUG(periph->path, CAM_DEBUG_SUBTRACE,
1549 ("Received completed CTIO\n"));
1550 atio = (struct ccb_accept_tio*)done_ccb->ccb_h.ccb_atio;
1551 desc = (struct targ_cmd_desc *)atio->ccb_h.ccb_descr;
1552
1553 TAILQ_REMOVE(&softc->pending_queue, &atio->ccb_h,
1554 periph_links.tqe);
1555
1556 /* XXX Check for errors */
1557 if ((done_ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
1558
1559 }
1560 desc->data_resid -= desc->data_increment;
1561 if ((bp = desc->bp) != NULL) {
1562
1563 bp->b_resid -= desc->data_increment;
1564 bp->b_error = 0;
1565
1566 CAM_DEBUG(periph->path, CAM_DEBUG_SUBTRACE,
1567 ("Buffer I/O Completed - Resid %ld:%d\n",
1568 bp->b_resid, desc->data_resid));
1569 /*
1570 * Send the buffer back to the client if
1571 * either the command has completed or all
1572 * buffer space has been consumed.
1573 */
1574 if (desc->data_resid == 0
1575 || bp->b_resid == 0) {
1576 if (bp->b_resid != 0)
1577 /* Short transfer */
1578 bp->b_flags |= B_ERROR;
1579
1580 CAM_DEBUG(periph->path, CAM_DEBUG_SUBTRACE,
1581 ("Completing a buffer\n"));
1582 biodone(bp);
1583 desc->bp = NULL;
1584 }
1585 }
1586
1587 xpt_release_ccb(done_ccb);
1588 if (softc->state != TARG_STATE_TEARDOWN) {
1589
1590 if (desc->data_resid == 0) {
1591 /*
1592 * Send the original accept TIO back to the
1593 * controller to handle more work.
1594 */
1595 CAM_DEBUG(periph->path, CAM_DEBUG_SUBTRACE,
1596 ("Returning ATIO to target\n"));
1597 xpt_action((union ccb *)atio);
1598 break;
1599 }
1600
1601 /* Queue us up for another buffer */
1602 if (atio->cdb_io.cdb_bytes[0] == SEND) {
1603 if (desc->bp != NULL)
1604 TAILQ_INSERT_HEAD(
1605 &softc->snd_buf_queue.queue,
1606 bp, b_act);
1607 TAILQ_INSERT_HEAD(&softc->snd_ccb_queue,
1608 &atio->ccb_h,
1609 periph_links.tqe);
1610 } else {
1611 if (desc->bp != NULL)
1612 TAILQ_INSERT_HEAD(
1613 &softc->rcv_buf_queue.queue,
1614 bp, b_act);
1615 TAILQ_INSERT_HEAD(&softc->rcv_ccb_queue,
1616 &atio->ccb_h,
1617 periph_links.tqe);
1618 }
1619 desc->bp = NULL;
1620 targrunqueue(periph, softc);
1621 } else {
1622 if (desc->bp != NULL) {
1623 bp->b_flags |= B_ERROR;
1624 bp->b_error = ENXIO;
1625 biodone(bp);
1626 }
1627 freedescr(desc);
1628 free(atio, M_DEVBUF);
1629 }
1630 break;
1631 }
1632 case XPT_IMMED_NOTIFY:
1633 {
1634 if (softc->state == TARG_STATE_TEARDOWN
1635 || done_ccb->ccb_h.status == CAM_REQ_ABORTED)
1636 free(done_ccb, M_DEVBUF);
1637 break;
1638 }
1639 default:
1640 panic("targdone: Impossible xpt opcode %x encountered.",
1641 done_ccb->ccb_h.func_code);
1642 /* NOTREACHED */
1643 break;
1644 }
1645}
1646
1647/*
1648 * Transition to the exception state and notify our symbiotic
1649 * userland process of the change.
1650 */
1651static void
1652targfireexception(struct cam_periph *periph, struct targ_softc *softc)
1653{
1654 /*
1655 * return all pending buffers with short read/write status so our
1656 * process unblocks, and do a selwakeup on any process queued
1657 * waiting for reads or writes. When the selwakeup is performed,
1658 * the waking process will wakeup, call our poll routine again,
1659 * and pick up the exception.
1660 */
1661 struct buf *bp;
1662
1663 if (softc->state != TARG_STATE_NORMAL)
1664 /* Already either tearing down or in exception state */
1665 return;
1666
1667 softc->state = TARG_STATE_EXCEPTION;
1668
1669 while ((bp = bufq_first(&softc->snd_buf_queue)) != NULL) {
1670 bufq_remove(&softc->snd_buf_queue, bp);
1671 bp->b_flags |= B_ERROR;
1672 biodone(bp);
1673 }
1674
1675 while ((bp = bufq_first(&softc->rcv_buf_queue)) != NULL) {
1676 bufq_remove(&softc->snd_buf_queue, bp);
1677 bp->b_flags |= B_ERROR;
1678 biodone(bp);
1679 }
1680
1681 selwakeup(&softc->snd_select);
1682 selwakeup(&softc->rcv_select);
1683}
1684
1685static int
1686targerror(union ccb *ccb, u_int32_t cam_flags, u_int32_t sense_flags)
1687{
1688 return 0;
1689}
1690
1691static struct targ_cmd_desc*
1692allocdescr()
1693{
1694 struct targ_cmd_desc* descr;
1695
1696 /* Allocate the targ_descr structure */
1697 descr = (struct targ_cmd_desc *)malloc(sizeof(*descr),
1698 M_DEVBUF, M_NOWAIT);
1699 if (descr == NULL)
1700 return (NULL);
1701
1702 bzero(descr, sizeof(*descr));
1703
1704 /* Allocate buffer backing store */
1705 descr->backing_store = malloc(MAX_BUF_SIZE, M_DEVBUF, M_NOWAIT);
1706 if (descr->backing_store == NULL) {
1707 free(descr, M_DEVBUF);
1708 return (NULL);
1709 }
1710 descr->max_size = MAX_BUF_SIZE;
1711 return (descr);
1712}
1713
1714static void
1715freedescr(struct targ_cmd_desc *descr)
1716{
1717 free(descr->backing_store, M_DEVBUF);
1718 free(descr, M_DEVBUF);
1719}
1720
1721static void
1722fill_sense(struct scsi_sense_data *sense, u_int error_code, u_int sense_key,
1723 u_int asc, u_int ascq)
1724{
1725 bzero(sense, sizeof(*sense));
1726 sense->error_code = error_code;
1727 sense->flags = sense_key;
1728 sense->add_sense_code = asc;
1729 sense->add_sense_code_qual = ascq;
1730
1731 sense->extra_len = offsetof(struct scsi_sense_data, fru)
1732 - offsetof(struct scsi_sense_data, extra_len);
1733}