Deleted Added
full compact
scsi_target.c (58934) scsi_target.c (59249)
1/*
2 * Implementation of a simple Target Mode SCSI Proccessor Target driver for CAM.
3 *
4 * Copyright (c) 1998, 1999 Justin T. Gibbs.
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions, and the following disclaimer,
12 * without modification, immediately at the beginning of the file.
13 * 2. The name of the author may not be used to endorse or promote products
14 * derived from this software without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
20 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
27 *
1/*
2 * Implementation of a simple Target Mode SCSI Proccessor Target driver for CAM.
3 *
4 * Copyright (c) 1998, 1999 Justin T. Gibbs.
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions, and the following disclaimer,
12 * without modification, immediately at the beginning of the file.
13 * 2. The name of the author may not be used to endorse or promote products
14 * derived from this software without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
20 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
27 *
28 * $FreeBSD: head/sys/cam/scsi/scsi_target.c 58934 2000-04-02 15:24:56Z phk $
28 * $FreeBSD: head/sys/cam/scsi/scsi_target.c 59249 2000-04-15 05:54:02Z phk $
29 */
30#include <stddef.h> /* For offsetof */
31
32#include <sys/param.h>
33#include <sys/queue.h>
34#include <sys/systm.h>
35#include <sys/kernel.h>
36#include <sys/types.h>
37#include <sys/buf.h>
38#include <sys/conf.h>
39#include <sys/devicestat.h>
40#include <sys/malloc.h>
41#include <sys/poll.h>
42#include <sys/select.h> /* For struct selinfo. */
43#include <sys/uio.h>
44
45#include <cam/cam.h>
46#include <cam/cam_ccb.h>
47#include <cam/cam_extend.h>
48#include <cam/cam_periph.h>
49#include <cam/cam_queue.h>
50#include <cam/cam_xpt_periph.h>
51#include <cam/cam_debug.h>
52
53#include <cam/scsi/scsi_all.h>
54#include <cam/scsi/scsi_pt.h>
55#include <cam/scsi/scsi_targetio.h>
56#include <cam/scsi/scsi_message.h>
57
58typedef enum {
59 TARG_STATE_NORMAL,
60 TARG_STATE_EXCEPTION,
61 TARG_STATE_TEARDOWN
62} targ_state;
63
64typedef enum {
65 TARG_FLAG_NONE = 0x00,
66 TARG_FLAG_SEND_EOF = 0x01,
67 TARG_FLAG_RECEIVE_EOF = 0x02,
68 TARG_FLAG_LUN_ENABLED = 0x04
69} targ_flags;
70
71typedef enum {
72 TARG_CCB_NONE = 0x00,
73 TARG_CCB_WAITING = 0x01,
74 TARG_CCB_HELDQ = 0x02,
75 TARG_CCB_ABORT_TO_HELDQ = 0x04
76} targ_ccb_flags;
77
78#define MAX_ACCEPT 16
79#define MAX_IMMEDIATE 16
80#define MAX_BUF_SIZE 256 /* Max inquiry/sense/mode page transfer */
81#define MAX_INITIATORS 256 /* includes widest fibre channel for now */
82
83#define MIN(a, b) ((a > b) ? b : a)
84
85#define TARG_CONTROL_UNIT 0xffff00ff
86#define TARG_IS_CONTROL_DEV(unit) ((unit) == TARG_CONTROL_UNIT)
87
88#define TARG_TAG_WILDCARD ((u_int)~0)
89
90/* Offsets into our private CCB area for storing accept information */
91#define ccb_flags ppriv_field0
92#define ccb_descr ppriv_ptr1
93
94/* We stick a pointer to the originating accept TIO in each continue I/O CCB */
95#define ccb_atio ppriv_ptr1
96
97struct targ_softc {
98 /* CTIOs pending on the controller */
99 struct ccb_queue pending_queue;
100
101 /* ATIOs awaiting CTIO resources from the XPT */
102 struct ccb_queue work_queue;
103
104 /*
105 * ATIOs for SEND operations waiting for 'write'
106 * buffer resources from our userland daemon.
107 */
108 struct ccb_queue snd_ccb_queue;
109
110 /*
111 * ATIOs for RCV operations waiting for 'read'
112 * buffer resources from our userland daemon.
113 */
114 struct ccb_queue rcv_ccb_queue;
115
116 /*
117 * ATIOs for commands unknown to the kernel driver.
118 * These are queued for the userland daemon to
119 * consume.
120 */
121 struct ccb_queue unknown_atio_queue;
122
123 /*
124 * Userland buffers for SEND commands waiting for
125 * SEND ATIOs to be queued by an initiator.
126 */
29 */
30#include <stddef.h> /* For offsetof */
31
32#include <sys/param.h>
33#include <sys/queue.h>
34#include <sys/systm.h>
35#include <sys/kernel.h>
36#include <sys/types.h>
37#include <sys/buf.h>
38#include <sys/conf.h>
39#include <sys/devicestat.h>
40#include <sys/malloc.h>
41#include <sys/poll.h>
42#include <sys/select.h> /* For struct selinfo. */
43#include <sys/uio.h>
44
45#include <cam/cam.h>
46#include <cam/cam_ccb.h>
47#include <cam/cam_extend.h>
48#include <cam/cam_periph.h>
49#include <cam/cam_queue.h>
50#include <cam/cam_xpt_periph.h>
51#include <cam/cam_debug.h>
52
53#include <cam/scsi/scsi_all.h>
54#include <cam/scsi/scsi_pt.h>
55#include <cam/scsi/scsi_targetio.h>
56#include <cam/scsi/scsi_message.h>
57
58typedef enum {
59 TARG_STATE_NORMAL,
60 TARG_STATE_EXCEPTION,
61 TARG_STATE_TEARDOWN
62} targ_state;
63
64typedef enum {
65 TARG_FLAG_NONE = 0x00,
66 TARG_FLAG_SEND_EOF = 0x01,
67 TARG_FLAG_RECEIVE_EOF = 0x02,
68 TARG_FLAG_LUN_ENABLED = 0x04
69} targ_flags;
70
71typedef enum {
72 TARG_CCB_NONE = 0x00,
73 TARG_CCB_WAITING = 0x01,
74 TARG_CCB_HELDQ = 0x02,
75 TARG_CCB_ABORT_TO_HELDQ = 0x04
76} targ_ccb_flags;
77
78#define MAX_ACCEPT 16
79#define MAX_IMMEDIATE 16
80#define MAX_BUF_SIZE 256 /* Max inquiry/sense/mode page transfer */
81#define MAX_INITIATORS 256 /* includes widest fibre channel for now */
82
83#define MIN(a, b) ((a > b) ? b : a)
84
85#define TARG_CONTROL_UNIT 0xffff00ff
86#define TARG_IS_CONTROL_DEV(unit) ((unit) == TARG_CONTROL_UNIT)
87
88#define TARG_TAG_WILDCARD ((u_int)~0)
89
90/* Offsets into our private CCB area for storing accept information */
91#define ccb_flags ppriv_field0
92#define ccb_descr ppriv_ptr1
93
94/* We stick a pointer to the originating accept TIO in each continue I/O CCB */
95#define ccb_atio ppriv_ptr1
96
97struct targ_softc {
98 /* CTIOs pending on the controller */
99 struct ccb_queue pending_queue;
100
101 /* ATIOs awaiting CTIO resources from the XPT */
102 struct ccb_queue work_queue;
103
104 /*
105 * ATIOs for SEND operations waiting for 'write'
106 * buffer resources from our userland daemon.
107 */
108 struct ccb_queue snd_ccb_queue;
109
110 /*
111 * ATIOs for RCV operations waiting for 'read'
112 * buffer resources from our userland daemon.
113 */
114 struct ccb_queue rcv_ccb_queue;
115
116 /*
117 * ATIOs for commands unknown to the kernel driver.
118 * These are queued for the userland daemon to
119 * consume.
120 */
121 struct ccb_queue unknown_atio_queue;
122
123 /*
124 * Userland buffers for SEND commands waiting for
125 * SEND ATIOs to be queued by an initiator.
126 */
127 struct buf_queue_head snd_buf_queue;
127 struct bio_queue_head snd_bio_queue;
128
129 /*
130 * Userland buffers for RCV commands waiting for
131 * RCV ATIOs to be queued by an initiator.
132 */
128
129 /*
130 * Userland buffers for RCV commands waiting for
131 * RCV ATIOs to be queued by an initiator.
132 */
133 struct buf_queue_head rcv_buf_queue;
133 struct bio_queue_head rcv_bio_queue;
134 struct devstat device_stats;
135 dev_t targ_dev;
136 struct selinfo snd_select;
137 struct selinfo rcv_select;
138 targ_state state;
139 targ_flags flags;
140 targ_exception exceptions;
141 u_int init_level;
142 u_int inq_data_len;
143 struct scsi_inquiry_data *inq_data;
144 struct ccb_accept_tio *accept_tio_list;
145 struct ccb_hdr_slist immed_notify_slist;
146 struct initiator_state istate[MAX_INITIATORS];
147};
148
149struct targ_cmd_desc {
150 struct ccb_accept_tio* atio_link;
151 u_int data_resid; /* How much left to transfer */
152 u_int data_increment;/* Amount to send before next disconnect */
153 void* data; /* The data. Can be from backing_store or not */
154 void* backing_store;/* Backing store allocated for this descriptor*/
134 struct devstat device_stats;
135 dev_t targ_dev;
136 struct selinfo snd_select;
137 struct selinfo rcv_select;
138 targ_state state;
139 targ_flags flags;
140 targ_exception exceptions;
141 u_int init_level;
142 u_int inq_data_len;
143 struct scsi_inquiry_data *inq_data;
144 struct ccb_accept_tio *accept_tio_list;
145 struct ccb_hdr_slist immed_notify_slist;
146 struct initiator_state istate[MAX_INITIATORS];
147};
148
149struct targ_cmd_desc {
150 struct ccb_accept_tio* atio_link;
151 u_int data_resid; /* How much left to transfer */
152 u_int data_increment;/* Amount to send before next disconnect */
153 void* data; /* The data. Can be from backing_store or not */
154 void* backing_store;/* Backing store allocated for this descriptor*/
155 struct buf *bp; /* Buffer for this transfer */
155 struct bio *bp; /* Buffer for this transfer */
156 u_int max_size; /* Size of backing_store */
157 u_int32_t timeout;
158 u_int8_t status; /* Status to return to initiator */
159};
160
161static d_open_t targopen;
162static d_close_t targclose;
163static d_read_t targread;
164static d_write_t targwrite;
165static d_ioctl_t targioctl;
166static d_poll_t targpoll;
167static d_strategy_t targstrategy;
168
169#define TARG_CDEV_MAJOR 65
170static struct cdevsw targ_cdevsw = {
171 /* open */ targopen,
172 /* close */ targclose,
173 /* read */ targread,
174 /* write */ targwrite,
175 /* ioctl */ targioctl,
176 /* poll */ targpoll,
177 /* mmap */ nommap,
178 /* strategy */ targstrategy,
179 /* name */ "targ",
180 /* maj */ TARG_CDEV_MAJOR,
181 /* dump */ nodump,
182 /* psize */ nopsize,
183 /* flags */ 0,
184 /* bmaj */ -1
185};
186
187static int targsendccb(struct cam_periph *periph, union ccb *ccb,
188 union ccb *inccb);
189static periph_init_t targinit;
190static void targasync(void *callback_arg, u_int32_t code,
191 struct cam_path *path, void *arg);
192static int targallocinstance(struct ioc_alloc_unit *alloc_unit);
193static int targfreeinstance(struct ioc_alloc_unit *alloc_unit);
194static cam_status targenlun(struct cam_periph *periph);
195static cam_status targdislun(struct cam_periph *periph);
196static periph_ctor_t targctor;
197static periph_dtor_t targdtor;
198static void targrunqueue(struct cam_periph *periph,
199 struct targ_softc *softc);
200static periph_start_t targstart;
201static void targdone(struct cam_periph *periph,
202 union ccb *done_ccb);
203static void targfireexception(struct cam_periph *periph,
204 struct targ_softc *softc);
205static void targinoterror(struct cam_periph *periph,
206 struct targ_softc *softc,
207 struct ccb_immed_notify *inot);
208static int targerror(union ccb *ccb, u_int32_t cam_flags,
209 u_int32_t sense_flags);
210static struct targ_cmd_desc* allocdescr(void);
211static void freedescr(struct targ_cmd_desc *buf);
212static void fill_sense(struct targ_softc *softc,
213 u_int initiator_id, u_int error_code,
214 u_int sense_key, u_int asc, u_int ascq);
215static void copy_sense(struct targ_softc *softc,
216 struct initiator_state *istate,
217 u_int8_t *sense_buffer, size_t sense_len);
218static void set_unit_attention_cond(struct cam_periph *periph,
219 u_int initiator_id, ua_types ua);
220static void set_ca_condition(struct cam_periph *periph,
221 u_int initiator_id, ca_types ca);
222static void abort_pending_transactions(struct cam_periph *periph,
223 u_int initiator_id, u_int tag_id,
224 int errno, int to_held_queue);
225
226static struct periph_driver targdriver =
227{
228 targinit, "targ",
229 TAILQ_HEAD_INITIALIZER(targdriver.units), /* generation */ 0
230};
231
232DATA_SET(periphdriver_set, targdriver);
233
234static struct extend_array *targperiphs;
235static dev_t targ_ctl_dev;
236
237static void
238targinit(void)
239{
240 /*
241 * Create our extend array for storing the devices we attach to.
242 */
243 targperiphs = cam_extend_new();
244 if (targperiphs == NULL) {
245 printf("targ: Failed to alloc extend array!\n");
246 return;
247 }
248 targ_ctl_dev = make_dev(&targ_cdevsw, TARG_CONTROL_UNIT, UID_ROOT,
249 GID_OPERATOR, 0600, "%s.ctl", "targ");
250 if (targ_ctl_dev == (dev_t) 0) {
251 printf("targ: failed to create control dev\n");
252 }
253}
254
255static void
256targasync(void *callback_arg, u_int32_t code,
257 struct cam_path *path, void *arg)
258{
259 struct cam_periph *periph;
260 struct targ_softc *softc;
261
262 periph = (struct cam_periph *)callback_arg;
263 softc = (struct targ_softc *)periph->softc;
264 switch (code) {
265 case AC_PATH_DEREGISTERED:
266 {
267 /* XXX Implement */
268 break;
269 }
270 default:
271 break;
272 }
273}
274
275/* Attempt to enable our lun */
276static cam_status
277targenlun(struct cam_periph *periph)
278{
279 union ccb immed_ccb;
280 struct targ_softc *softc;
281 cam_status status;
282 int i;
283
284 softc = (struct targ_softc *)periph->softc;
285
286 if ((softc->flags & TARG_FLAG_LUN_ENABLED) != 0)
287 return (CAM_REQ_CMP);
288
289 xpt_setup_ccb(&immed_ccb.ccb_h, periph->path, /*priority*/1);
290 immed_ccb.ccb_h.func_code = XPT_EN_LUN;
291
292 /* Don't need support for any vendor specific commands */
293 immed_ccb.cel.grp6_len = 0;
294 immed_ccb.cel.grp7_len = 0;
295 immed_ccb.cel.enable = 1;
296 xpt_action(&immed_ccb);
297 status = immed_ccb.ccb_h.status;
298 if (status != CAM_REQ_CMP) {
299 xpt_print_path(periph->path);
300 printf("targenlun - Enable Lun Rejected with status 0x%x\n",
301 status);
302 return (status);
303 }
304
305 softc->flags |= TARG_FLAG_LUN_ENABLED;
306
307 /*
308 * Build up a buffer of accept target I/O
309 * operations for incoming selections.
310 */
311 for (i = 0; i < MAX_ACCEPT; i++) {
312 struct ccb_accept_tio *atio;
313
314 atio = (struct ccb_accept_tio*)malloc(sizeof(*atio), M_DEVBUF,
315 M_NOWAIT);
316 if (atio == NULL) {
317 status = CAM_RESRC_UNAVAIL;
318 break;
319 }
320
321 atio->ccb_h.ccb_descr = allocdescr();
322
323 if (atio->ccb_h.ccb_descr == NULL) {
324 free(atio, M_DEVBUF);
325 status = CAM_RESRC_UNAVAIL;
326 break;
327 }
328
329 xpt_setup_ccb(&atio->ccb_h, periph->path, /*priority*/1);
330 atio->ccb_h.func_code = XPT_ACCEPT_TARGET_IO;
331 atio->ccb_h.cbfcnp = targdone;
332 atio->ccb_h.ccb_flags = TARG_CCB_NONE;
333 xpt_action((union ccb *)atio);
334 status = atio->ccb_h.status;
335 if (status != CAM_REQ_INPROG) {
336 xpt_print_path(periph->path);
337 printf("Queue of atio failed\n");
338 freedescr(atio->ccb_h.ccb_descr);
339 free(atio, M_DEVBUF);
340 break;
341 }
342 ((struct targ_cmd_desc*)atio->ccb_h.ccb_descr)->atio_link =
343 softc->accept_tio_list;
344 softc->accept_tio_list = atio;
345 }
346
347 if (i == 0) {
348 xpt_print_path(periph->path);
349 printf("targenlun - Could not allocate accept tio CCBs: "
350 "status = 0x%x\n", status);
351 targdislun(periph);
352 return (CAM_REQ_CMP_ERR);
353 }
354
355 /*
356 * Build up a buffer of immediate notify CCBs
357 * so the SIM can tell us of asynchronous target mode events.
358 */
359 for (i = 0; i < MAX_ACCEPT; i++) {
360 struct ccb_immed_notify *inot;
361
362 inot = (struct ccb_immed_notify*)malloc(sizeof(*inot), M_DEVBUF,
363 M_NOWAIT);
364
365 if (inot == NULL) {
366 status = CAM_RESRC_UNAVAIL;
367 break;
368 }
369
370 xpt_setup_ccb(&inot->ccb_h, periph->path, /*priority*/1);
371 inot->ccb_h.func_code = XPT_IMMED_NOTIFY;
372 inot->ccb_h.cbfcnp = targdone;
373 SLIST_INSERT_HEAD(&softc->immed_notify_slist, &inot->ccb_h,
374 periph_links.sle);
375 xpt_action((union ccb *)inot);
376 }
377
378 if (i == 0) {
379 xpt_print_path(periph->path);
380 printf("targenlun - Could not allocate immediate notify CCBs: "
381 "status = 0x%x\n", status);
382 targdislun(periph);
383 return (CAM_REQ_CMP_ERR);
384 }
385
386 return (CAM_REQ_CMP);
387}
388
389static cam_status
390targdislun(struct cam_periph *periph)
391{
392 union ccb ccb;
393 struct targ_softc *softc;
394 struct ccb_accept_tio* atio;
395 struct ccb_hdr *ccb_h;
396
397 softc = (struct targ_softc *)periph->softc;
398 if ((softc->flags & TARG_FLAG_LUN_ENABLED) == 0)
399 return CAM_REQ_CMP;
400
401 /* XXX Block for Continue I/O completion */
402
403 /* Kill off all ACCECPT and IMMEDIATE CCBs */
404 while ((atio = softc->accept_tio_list) != NULL) {
405
406 softc->accept_tio_list =
407 ((struct targ_cmd_desc*)atio->ccb_h.ccb_descr)->atio_link;
408 xpt_setup_ccb(&ccb.cab.ccb_h, periph->path, /*priority*/1);
409 ccb.cab.ccb_h.func_code = XPT_ABORT;
410 ccb.cab.abort_ccb = (union ccb *)atio;
411 xpt_action(&ccb);
412 }
413
414 while ((ccb_h = SLIST_FIRST(&softc->immed_notify_slist)) != NULL) {
415 SLIST_REMOVE_HEAD(&softc->immed_notify_slist, periph_links.sle);
416 xpt_setup_ccb(&ccb.cab.ccb_h, periph->path, /*priority*/1);
417 ccb.cab.ccb_h.func_code = XPT_ABORT;
418 ccb.cab.abort_ccb = (union ccb *)ccb_h;
419 xpt_action(&ccb);
420 }
421
422 /*
423 * Dissable this lun.
424 */
425 xpt_setup_ccb(&ccb.cel.ccb_h, periph->path, /*priority*/1);
426 ccb.cel.ccb_h.func_code = XPT_EN_LUN;
427 ccb.cel.enable = 0;
428 xpt_action(&ccb);
429
430 if (ccb.cel.ccb_h.status != CAM_REQ_CMP)
431 printf("targdislun - Disabling lun on controller failed "
432 "with status 0x%x\n", ccb.cel.ccb_h.status);
433 else
434 softc->flags &= ~TARG_FLAG_LUN_ENABLED;
435 return (ccb.cel.ccb_h.status);
436}
437
438static cam_status
439targctor(struct cam_periph *periph, void *arg)
440{
441 struct ccb_pathinq *cpi;
442 struct targ_softc *softc;
443 int i;
444
445 cpi = (struct ccb_pathinq *)arg;
446
447 /* Allocate our per-instance private storage */
448 softc = (struct targ_softc *)malloc(sizeof(*softc), M_DEVBUF, M_NOWAIT);
449 if (softc == NULL) {
450 printf("targctor: unable to malloc softc\n");
451 return (CAM_REQ_CMP_ERR);
452 }
453
454 bzero(softc, sizeof(*softc));
455 TAILQ_INIT(&softc->pending_queue);
456 TAILQ_INIT(&softc->work_queue);
457 TAILQ_INIT(&softc->snd_ccb_queue);
458 TAILQ_INIT(&softc->rcv_ccb_queue);
459 TAILQ_INIT(&softc->unknown_atio_queue);
156 u_int max_size; /* Size of backing_store */
157 u_int32_t timeout;
158 u_int8_t status; /* Status to return to initiator */
159};
160
161static d_open_t targopen;
162static d_close_t targclose;
163static d_read_t targread;
164static d_write_t targwrite;
165static d_ioctl_t targioctl;
166static d_poll_t targpoll;
167static d_strategy_t targstrategy;
168
169#define TARG_CDEV_MAJOR 65
170static struct cdevsw targ_cdevsw = {
171 /* open */ targopen,
172 /* close */ targclose,
173 /* read */ targread,
174 /* write */ targwrite,
175 /* ioctl */ targioctl,
176 /* poll */ targpoll,
177 /* mmap */ nommap,
178 /* strategy */ targstrategy,
179 /* name */ "targ",
180 /* maj */ TARG_CDEV_MAJOR,
181 /* dump */ nodump,
182 /* psize */ nopsize,
183 /* flags */ 0,
184 /* bmaj */ -1
185};
186
187static int targsendccb(struct cam_periph *periph, union ccb *ccb,
188 union ccb *inccb);
189static periph_init_t targinit;
190static void targasync(void *callback_arg, u_int32_t code,
191 struct cam_path *path, void *arg);
192static int targallocinstance(struct ioc_alloc_unit *alloc_unit);
193static int targfreeinstance(struct ioc_alloc_unit *alloc_unit);
194static cam_status targenlun(struct cam_periph *periph);
195static cam_status targdislun(struct cam_periph *periph);
196static periph_ctor_t targctor;
197static periph_dtor_t targdtor;
198static void targrunqueue(struct cam_periph *periph,
199 struct targ_softc *softc);
200static periph_start_t targstart;
201static void targdone(struct cam_periph *periph,
202 union ccb *done_ccb);
203static void targfireexception(struct cam_periph *periph,
204 struct targ_softc *softc);
205static void targinoterror(struct cam_periph *periph,
206 struct targ_softc *softc,
207 struct ccb_immed_notify *inot);
208static int targerror(union ccb *ccb, u_int32_t cam_flags,
209 u_int32_t sense_flags);
210static struct targ_cmd_desc* allocdescr(void);
211static void freedescr(struct targ_cmd_desc *buf);
212static void fill_sense(struct targ_softc *softc,
213 u_int initiator_id, u_int error_code,
214 u_int sense_key, u_int asc, u_int ascq);
215static void copy_sense(struct targ_softc *softc,
216 struct initiator_state *istate,
217 u_int8_t *sense_buffer, size_t sense_len);
218static void set_unit_attention_cond(struct cam_periph *periph,
219 u_int initiator_id, ua_types ua);
220static void set_ca_condition(struct cam_periph *periph,
221 u_int initiator_id, ca_types ca);
222static void abort_pending_transactions(struct cam_periph *periph,
223 u_int initiator_id, u_int tag_id,
224 int errno, int to_held_queue);
225
226static struct periph_driver targdriver =
227{
228 targinit, "targ",
229 TAILQ_HEAD_INITIALIZER(targdriver.units), /* generation */ 0
230};
231
232DATA_SET(periphdriver_set, targdriver);
233
234static struct extend_array *targperiphs;
235static dev_t targ_ctl_dev;
236
237static void
238targinit(void)
239{
240 /*
241 * Create our extend array for storing the devices we attach to.
242 */
243 targperiphs = cam_extend_new();
244 if (targperiphs == NULL) {
245 printf("targ: Failed to alloc extend array!\n");
246 return;
247 }
248 targ_ctl_dev = make_dev(&targ_cdevsw, TARG_CONTROL_UNIT, UID_ROOT,
249 GID_OPERATOR, 0600, "%s.ctl", "targ");
250 if (targ_ctl_dev == (dev_t) 0) {
251 printf("targ: failed to create control dev\n");
252 }
253}
254
255static void
256targasync(void *callback_arg, u_int32_t code,
257 struct cam_path *path, void *arg)
258{
259 struct cam_periph *periph;
260 struct targ_softc *softc;
261
262 periph = (struct cam_periph *)callback_arg;
263 softc = (struct targ_softc *)periph->softc;
264 switch (code) {
265 case AC_PATH_DEREGISTERED:
266 {
267 /* XXX Implement */
268 break;
269 }
270 default:
271 break;
272 }
273}
274
275/* Attempt to enable our lun */
276static cam_status
277targenlun(struct cam_periph *periph)
278{
279 union ccb immed_ccb;
280 struct targ_softc *softc;
281 cam_status status;
282 int i;
283
284 softc = (struct targ_softc *)periph->softc;
285
286 if ((softc->flags & TARG_FLAG_LUN_ENABLED) != 0)
287 return (CAM_REQ_CMP);
288
289 xpt_setup_ccb(&immed_ccb.ccb_h, periph->path, /*priority*/1);
290 immed_ccb.ccb_h.func_code = XPT_EN_LUN;
291
292 /* Don't need support for any vendor specific commands */
293 immed_ccb.cel.grp6_len = 0;
294 immed_ccb.cel.grp7_len = 0;
295 immed_ccb.cel.enable = 1;
296 xpt_action(&immed_ccb);
297 status = immed_ccb.ccb_h.status;
298 if (status != CAM_REQ_CMP) {
299 xpt_print_path(periph->path);
300 printf("targenlun - Enable Lun Rejected with status 0x%x\n",
301 status);
302 return (status);
303 }
304
305 softc->flags |= TARG_FLAG_LUN_ENABLED;
306
307 /*
308 * Build up a buffer of accept target I/O
309 * operations for incoming selections.
310 */
311 for (i = 0; i < MAX_ACCEPT; i++) {
312 struct ccb_accept_tio *atio;
313
314 atio = (struct ccb_accept_tio*)malloc(sizeof(*atio), M_DEVBUF,
315 M_NOWAIT);
316 if (atio == NULL) {
317 status = CAM_RESRC_UNAVAIL;
318 break;
319 }
320
321 atio->ccb_h.ccb_descr = allocdescr();
322
323 if (atio->ccb_h.ccb_descr == NULL) {
324 free(atio, M_DEVBUF);
325 status = CAM_RESRC_UNAVAIL;
326 break;
327 }
328
329 xpt_setup_ccb(&atio->ccb_h, periph->path, /*priority*/1);
330 atio->ccb_h.func_code = XPT_ACCEPT_TARGET_IO;
331 atio->ccb_h.cbfcnp = targdone;
332 atio->ccb_h.ccb_flags = TARG_CCB_NONE;
333 xpt_action((union ccb *)atio);
334 status = atio->ccb_h.status;
335 if (status != CAM_REQ_INPROG) {
336 xpt_print_path(periph->path);
337 printf("Queue of atio failed\n");
338 freedescr(atio->ccb_h.ccb_descr);
339 free(atio, M_DEVBUF);
340 break;
341 }
342 ((struct targ_cmd_desc*)atio->ccb_h.ccb_descr)->atio_link =
343 softc->accept_tio_list;
344 softc->accept_tio_list = atio;
345 }
346
347 if (i == 0) {
348 xpt_print_path(periph->path);
349 printf("targenlun - Could not allocate accept tio CCBs: "
350 "status = 0x%x\n", status);
351 targdislun(periph);
352 return (CAM_REQ_CMP_ERR);
353 }
354
355 /*
356 * Build up a buffer of immediate notify CCBs
357 * so the SIM can tell us of asynchronous target mode events.
358 */
359 for (i = 0; i < MAX_ACCEPT; i++) {
360 struct ccb_immed_notify *inot;
361
362 inot = (struct ccb_immed_notify*)malloc(sizeof(*inot), M_DEVBUF,
363 M_NOWAIT);
364
365 if (inot == NULL) {
366 status = CAM_RESRC_UNAVAIL;
367 break;
368 }
369
370 xpt_setup_ccb(&inot->ccb_h, periph->path, /*priority*/1);
371 inot->ccb_h.func_code = XPT_IMMED_NOTIFY;
372 inot->ccb_h.cbfcnp = targdone;
373 SLIST_INSERT_HEAD(&softc->immed_notify_slist, &inot->ccb_h,
374 periph_links.sle);
375 xpt_action((union ccb *)inot);
376 }
377
378 if (i == 0) {
379 xpt_print_path(periph->path);
380 printf("targenlun - Could not allocate immediate notify CCBs: "
381 "status = 0x%x\n", status);
382 targdislun(periph);
383 return (CAM_REQ_CMP_ERR);
384 }
385
386 return (CAM_REQ_CMP);
387}
388
389static cam_status
390targdislun(struct cam_periph *periph)
391{
392 union ccb ccb;
393 struct targ_softc *softc;
394 struct ccb_accept_tio* atio;
395 struct ccb_hdr *ccb_h;
396
397 softc = (struct targ_softc *)periph->softc;
398 if ((softc->flags & TARG_FLAG_LUN_ENABLED) == 0)
399 return CAM_REQ_CMP;
400
401 /* XXX Block for Continue I/O completion */
402
403 /* Kill off all ACCECPT and IMMEDIATE CCBs */
404 while ((atio = softc->accept_tio_list) != NULL) {
405
406 softc->accept_tio_list =
407 ((struct targ_cmd_desc*)atio->ccb_h.ccb_descr)->atio_link;
408 xpt_setup_ccb(&ccb.cab.ccb_h, periph->path, /*priority*/1);
409 ccb.cab.ccb_h.func_code = XPT_ABORT;
410 ccb.cab.abort_ccb = (union ccb *)atio;
411 xpt_action(&ccb);
412 }
413
414 while ((ccb_h = SLIST_FIRST(&softc->immed_notify_slist)) != NULL) {
415 SLIST_REMOVE_HEAD(&softc->immed_notify_slist, periph_links.sle);
416 xpt_setup_ccb(&ccb.cab.ccb_h, periph->path, /*priority*/1);
417 ccb.cab.ccb_h.func_code = XPT_ABORT;
418 ccb.cab.abort_ccb = (union ccb *)ccb_h;
419 xpt_action(&ccb);
420 }
421
422 /*
423 * Dissable this lun.
424 */
425 xpt_setup_ccb(&ccb.cel.ccb_h, periph->path, /*priority*/1);
426 ccb.cel.ccb_h.func_code = XPT_EN_LUN;
427 ccb.cel.enable = 0;
428 xpt_action(&ccb);
429
430 if (ccb.cel.ccb_h.status != CAM_REQ_CMP)
431 printf("targdislun - Disabling lun on controller failed "
432 "with status 0x%x\n", ccb.cel.ccb_h.status);
433 else
434 softc->flags &= ~TARG_FLAG_LUN_ENABLED;
435 return (ccb.cel.ccb_h.status);
436}
437
438static cam_status
439targctor(struct cam_periph *periph, void *arg)
440{
441 struct ccb_pathinq *cpi;
442 struct targ_softc *softc;
443 int i;
444
445 cpi = (struct ccb_pathinq *)arg;
446
447 /* Allocate our per-instance private storage */
448 softc = (struct targ_softc *)malloc(sizeof(*softc), M_DEVBUF, M_NOWAIT);
449 if (softc == NULL) {
450 printf("targctor: unable to malloc softc\n");
451 return (CAM_REQ_CMP_ERR);
452 }
453
454 bzero(softc, sizeof(*softc));
455 TAILQ_INIT(&softc->pending_queue);
456 TAILQ_INIT(&softc->work_queue);
457 TAILQ_INIT(&softc->snd_ccb_queue);
458 TAILQ_INIT(&softc->rcv_ccb_queue);
459 TAILQ_INIT(&softc->unknown_atio_queue);
460 bufq_init(&softc->snd_buf_queue);
461 bufq_init(&softc->rcv_buf_queue);
460 bioq_init(&softc->snd_bio_queue);
461 bioq_init(&softc->rcv_bio_queue);
462 softc->accept_tio_list = NULL;
463 SLIST_INIT(&softc->immed_notify_slist);
464 softc->state = TARG_STATE_NORMAL;
465 periph->softc = softc;
466 softc->init_level++;
467
468 cam_extend_set(targperiphs, periph->unit_number, periph);
469
470 /*
471 * We start out life with a UA to indicate power-on/reset.
472 */
473 for (i = 0; i < MAX_INITIATORS; i++)
474 softc->istate[i].pending_ua = UA_POWER_ON;
475
476 /*
477 * Allocate an initial inquiry data buffer. We might allow the
478 * user to override this later via an ioctl.
479 */
480 softc->inq_data_len = sizeof(*softc->inq_data);
481 softc->inq_data = malloc(softc->inq_data_len, M_DEVBUF, M_NOWAIT);
482 if (softc->inq_data == NULL) {
483 printf("targctor - Unable to malloc inquiry data\n");
484 targdtor(periph);
485 return (CAM_RESRC_UNAVAIL);
486 }
487 bzero(softc->inq_data, softc->inq_data_len);
488 softc->inq_data->device = T_PROCESSOR | (SID_QUAL_LU_CONNECTED << 5);
489 softc->inq_data->version = 2;
490 softc->inq_data->response_format = 2; /* SCSI2 Inquiry Format */
491 softc->inq_data->flags =
492 cpi->hba_inquiry & (PI_SDTR_ABLE|PI_WIDE_16|PI_WIDE_32);
493 softc->inq_data->additional_length = softc->inq_data_len - 4;
494 strncpy(softc->inq_data->vendor, "FreeBSD ", SID_VENDOR_SIZE);
495 strncpy(softc->inq_data->product, "TM-PT ", SID_PRODUCT_SIZE);
496 strncpy(softc->inq_data->revision, "0.0 ", SID_REVISION_SIZE);
497 softc->targ_dev = make_dev(&targ_cdevsw, periph->unit_number, UID_ROOT,
498 GID_OPERATOR, 0600, "%s%d",
499 periph->periph_name, periph->unit_number);
500 softc->init_level++;
501 return (CAM_REQ_CMP);
502}
503
504static void
505targdtor(struct cam_periph *periph)
506{
507 struct targ_softc *softc;
508
509 softc = (struct targ_softc *)periph->softc;
510
511 softc->state = TARG_STATE_TEARDOWN;
512
513 targdislun(periph);
514
515 cam_extend_release(targperiphs, periph->unit_number);
516
517 switch (softc->init_level) {
518 default:
519 /* FALLTHROUGH */
520 case 2:
521 free(softc->inq_data, M_DEVBUF);
522 destroy_dev(softc->targ_dev);
523 /* FALLTHROUGH */
524 case 1:
525 free(softc, M_DEVBUF);
526 break;
527 case 0:
528 panic("targdtor - impossible init level");;
529 }
530}
531
532static int
533targopen(dev_t dev, int flags, int fmt, struct proc *p)
534{
535 struct cam_periph *periph;
536 struct targ_softc *softc;
537 u_int unit;
538 cam_status status;
539 int error;
540 int s;
541
542 unit = minor(dev);
543
544 /* An open of the control device always succeeds */
545 if (TARG_IS_CONTROL_DEV(unit))
546 return 0;
547
548 s = splsoftcam();
549 periph = cam_extend_get(targperiphs, unit);
550 if (periph == NULL) {
551 return (ENXIO);
552 splx(s);
553 }
554 if ((error = cam_periph_lock(periph, PRIBIO | PCATCH)) != 0) {
555 splx(s);
556 return (error);
557 }
558
559 softc = (struct targ_softc *)periph->softc;
560 if ((softc->flags & TARG_FLAG_LUN_ENABLED) == 0) {
561 if (cam_periph_acquire(periph) != CAM_REQ_CMP) {
562 splx(s);
563 cam_periph_unlock(periph);
564 return(ENXIO);
565 }
566 }
567 splx(s);
568
569 status = targenlun(periph);
570 switch (status) {
571 case CAM_REQ_CMP:
572 error = 0;
573 break;
574 case CAM_RESRC_UNAVAIL:
575 error = ENOMEM;
576 break;
577 case CAM_LUN_ALRDY_ENA:
578 error = EADDRINUSE;
579 break;
580 default:
581 error = ENXIO;
582 break;
583 }
584 cam_periph_unlock(periph);
585 if (error) {
586 cam_periph_release(periph);
587 }
588 return (error);
589}
590
591static int
592targclose(dev_t dev, int flag, int fmt, struct proc *p)
593{
594 struct cam_periph *periph;
595 struct targ_softc *softc;
596 u_int unit;
597 int s;
598 int error;
599
600 unit = minor(dev);
601
602 /* A close of the control device always succeeds */
603 if (TARG_IS_CONTROL_DEV(unit))
604 return 0;
605
606 s = splsoftcam();
607 periph = cam_extend_get(targperiphs, unit);
608 if (periph == NULL) {
609 splx(s);
610 return (ENXIO);
611 }
612 if ((error = cam_periph_lock(periph, PRIBIO)) != 0)
613 return (error);
614 softc = (struct targ_softc *)periph->softc;
615 splx(s);
616
617 targdislun(periph);
618
619 cam_periph_unlock(periph);
620 cam_periph_release(periph);
621
622 return (0);
623}
624
625static int
626targallocinstance(struct ioc_alloc_unit *alloc_unit)
627{
628 struct ccb_pathinq cpi;
629 struct cam_path *path;
630 struct cam_periph *periph;
631 cam_status status;
632 int free_path_on_return;
633 int error;
634
635 free_path_on_return = 0;
636 status = xpt_create_path(&path, /*periph*/NULL,
637 alloc_unit->path_id,
638 alloc_unit->target_id,
639 alloc_unit->lun_id);
640 if (status != CAM_REQ_CMP) {
641 printf("Couldn't Allocate Path %x\n", status);
642 goto fail;
643 }
644
645 free_path_on_return++;
646
647
648 xpt_setup_ccb(&cpi.ccb_h, path, /*priority*/1);
649 cpi.ccb_h.func_code = XPT_PATH_INQ;
650 xpt_action((union ccb *)&cpi);
651 status = cpi.ccb_h.status;
652
653 if (status != CAM_REQ_CMP) {
654 printf("Couldn't CPI %x\n", status);
655 goto fail;
656 }
657
658 /* Can only alloc units on controllers that support target mode */
659 if ((cpi.target_sprt & PIT_PROCESSOR) == 0) {
660 printf("Controller does not support target mode%x\n", status);
661 status = CAM_PATH_INVALID;
662 goto fail;
663 }
664
665 /* Ensure that we don't already have an instance for this unit. */
666 if ((periph = cam_periph_find(path, "targ")) != NULL) {
667 status = CAM_LUN_ALRDY_ENA;
668 goto fail;
669 }
670
671 /*
672 * Allocate a peripheral instance for
673 * this target instance.
674 */
675 status = cam_periph_alloc(targctor, NULL, targdtor, targstart,
676 "targ", CAM_PERIPH_BIO, path, targasync,
677 0, &cpi);
678
679fail:
680 switch (status) {
681 case CAM_REQ_CMP:
682 {
683 struct cam_periph *periph;
684
685 if ((periph = cam_periph_find(path, "targ")) == NULL)
686 panic("targallocinstance: Succeeded but no periph?");
687 error = 0;
688 alloc_unit->unit = periph->unit_number;
689 break;
690 }
691 case CAM_RESRC_UNAVAIL:
692 error = ENOMEM;
693 break;
694 case CAM_LUN_ALRDY_ENA:
695 error = EADDRINUSE;
696 break;
697 default:
698 printf("targallocinstance: Unexpected CAM status %x\n", status);
699 /* FALLTHROUGH */
700 case CAM_PATH_INVALID:
701 error = ENXIO;
702 break;
703 case CAM_PROVIDE_FAIL:
704 error = ENODEV;
705 break;
706 }
707
708 if (free_path_on_return != 0)
709 xpt_free_path(path);
710
711 return (error);
712}
713
714static int
715targfreeinstance(struct ioc_alloc_unit *alloc_unit)
716{
717 struct cam_path *path;
718 struct cam_periph *periph;
719 struct targ_softc *softc;
720 cam_status status;
721 int free_path_on_return;
722 int error;
723
724 periph = NULL;
725 free_path_on_return = 0;
726 status = xpt_create_path(&path, /*periph*/NULL,
727 alloc_unit->path_id,
728 alloc_unit->target_id,
729 alloc_unit->lun_id);
730 free_path_on_return++;
731
732 if (status != CAM_REQ_CMP)
733 goto fail;
734
735 /* Find our instance. */
736 if ((periph = cam_periph_find(path, "targ")) == NULL) {
737 xpt_print_path(path);
738 printf("Invalid path specified for freeing target instance\n");
739 status = CAM_PATH_INVALID;
740 goto fail;
741 }
742
743 softc = (struct targ_softc *)periph->softc;
744
745 if ((softc->flags & TARG_FLAG_LUN_ENABLED) != 0) {
746 status = CAM_BUSY;
747 goto fail;
748 }
749
750fail:
751 if (free_path_on_return != 0)
752 xpt_free_path(path);
753
754 switch (status) {
755 case CAM_REQ_CMP:
756 if (periph != NULL)
757 cam_periph_invalidate(periph);
758 error = 0;
759 break;
760 case CAM_RESRC_UNAVAIL:
761 error = ENOMEM;
762 break;
763 case CAM_LUN_ALRDY_ENA:
764 error = EADDRINUSE;
765 break;
766 default:
767 printf("targfreeinstance: Unexpected CAM status %x\n", status);
768 /* FALLTHROUGH */
769 case CAM_PATH_INVALID:
770 error = ENODEV;
771 break;
772 }
773 return (error);
774}
775
776static int
777targioctl(dev_t dev, u_long cmd, caddr_t addr, int flag, struct proc *p)
778{
779 struct cam_periph *periph;
780 struct targ_softc *softc;
781 u_int unit;
782 int error;
783
784 unit = minor(dev);
785 error = 0;
786 if (TARG_IS_CONTROL_DEV(unit)) {
787 switch (cmd) {
788 case TARGCTLIOALLOCUNIT:
789 error = targallocinstance((struct ioc_alloc_unit*)addr);
790 break;
791 case TARGCTLIOFREEUNIT:
792 error = targfreeinstance((struct ioc_alloc_unit*)addr);
793 break;
794 default:
795 error = EINVAL;
796 break;
797 }
798 return (error);
799 }
800
801 periph = cam_extend_get(targperiphs, unit);
802 if (periph == NULL)
803 return (ENXIO);
804 softc = (struct targ_softc *)periph->softc;
805 switch (cmd) {
806 case TARGIOCFETCHEXCEPTION:
807 *((targ_exception *)addr) = softc->exceptions;
808 break;
809 case TARGIOCCLEAREXCEPTION:
810 {
811 targ_exception clear_mask;
812
813 clear_mask = *((targ_exception *)addr);
814 if ((clear_mask & TARG_EXCEPT_UNKNOWN_ATIO) != 0) {
815 struct ccb_hdr *ccbh;
816
817 ccbh = TAILQ_FIRST(&softc->unknown_atio_queue);
818 if (ccbh != NULL) {
819 TAILQ_REMOVE(&softc->unknown_atio_queue,
820 ccbh, periph_links.tqe);
821 /* Requeue the ATIO back to the controller */
822 xpt_action((union ccb *)ccbh);
823 ccbh = TAILQ_FIRST(&softc->unknown_atio_queue);
824 }
825 if (ccbh != NULL)
826 clear_mask &= ~TARG_EXCEPT_UNKNOWN_ATIO;
827 }
828 softc->exceptions &= ~clear_mask;
829 if (softc->exceptions == TARG_EXCEPT_NONE
830 && softc->state == TARG_STATE_EXCEPTION) {
831 softc->state = TARG_STATE_NORMAL;
832 targrunqueue(periph, softc);
833 }
834 break;
835 }
836 case TARGIOCFETCHATIO:
837 {
838 struct ccb_hdr *ccbh;
839
840 ccbh = TAILQ_FIRST(&softc->unknown_atio_queue);
841 if (ccbh != NULL) {
842 bcopy(ccbh, addr, sizeof(struct ccb_accept_tio));
843 } else {
844 error = ENOENT;
845 }
846 break;
847 }
848 case TARGIOCCOMMAND:
849 {
850 union ccb *inccb;
851 union ccb *ccb;
852
853 /*
854 * XXX JGibbs
855 * This code is lifted directly from the pass-thru driver.
856 * Perhaps this should be moved to a library????
857 */
858 inccb = (union ccb *)addr;
859 ccb = cam_periph_getccb(periph, inccb->ccb_h.pinfo.priority);
860
861 error = targsendccb(periph, ccb, inccb);
862
863 xpt_release_ccb(ccb);
864
865 break;
866 }
867 case TARGIOCGETISTATE:
868 case TARGIOCSETISTATE:
869 {
870 struct ioc_initiator_state *ioc_istate;
871
872 ioc_istate = (struct ioc_initiator_state *)addr;
873 if (ioc_istate->initiator_id > MAX_INITIATORS) {
874 error = EINVAL;
875 break;
876 }
877 CAM_DEBUG(periph->path, CAM_DEBUG_PERIPH,
878 ("GET/SETISTATE for %d\n", ioc_istate->initiator_id));
879 if (cmd == TARGIOCGETISTATE) {
880 bcopy(&softc->istate[ioc_istate->initiator_id],
881 &ioc_istate->istate, sizeof(ioc_istate->istate));
882 } else {
883 bcopy(&ioc_istate->istate,
884 &softc->istate[ioc_istate->initiator_id],
885 sizeof(ioc_istate->istate));
886 CAM_DEBUG(periph->path, CAM_DEBUG_PERIPH,
887 ("pending_ca now %x\n",
888 softc->istate[ioc_istate->initiator_id].pending_ca));
889 }
890 break;
891 }
892 default:
893 error = ENOTTY;
894 break;
895 }
896 return (error);
897}
898
899/*
900 * XXX JGibbs lifted from pass-thru driver.
901 * Generally, "ccb" should be the CCB supplied by the kernel. "inccb"
902 * should be the CCB that is copied in from the user.
903 */
904static int
905targsendccb(struct cam_periph *periph, union ccb *ccb, union ccb *inccb)
906{
907 struct targ_softc *softc;
908 struct cam_periph_map_info mapinfo;
909 int error, need_unmap;
910 int s;
911
912 softc = (struct targ_softc *)periph->softc;
913
914 need_unmap = 0;
915
916 /*
917 * There are some fields in the CCB header that need to be
918 * preserved, the rest we get from the user.
919 */
920 xpt_merge_ccb(ccb, inccb);
921
922 /*
923 * There's no way for the user to have a completion
924 * function, so we put our own completion function in here.
925 */
926 ccb->ccb_h.cbfcnp = targdone;
927
928 /*
929 * We only attempt to map the user memory into kernel space
930 * if they haven't passed in a physical memory pointer,
931 * and if there is actually an I/O operation to perform.
932 * Right now cam_periph_mapmem() only supports SCSI and device
933 * match CCBs. For the SCSI CCBs, we only pass the CCB in if
934 * there's actually data to map. cam_periph_mapmem() will do the
935 * right thing, even if there isn't data to map, but since CCBs
936 * without data are a reasonably common occurance (e.g. test unit
937 * ready), it will save a few cycles if we check for it here.
938 */
939 if (((ccb->ccb_h.flags & CAM_DATA_PHYS) == 0)
940 && (((ccb->ccb_h.func_code == XPT_CONT_TARGET_IO)
941 && ((ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE))
942 || (ccb->ccb_h.func_code == XPT_DEV_MATCH))) {
943
944 bzero(&mapinfo, sizeof(mapinfo));
945
946 error = cam_periph_mapmem(ccb, &mapinfo);
947
948 /*
949 * cam_periph_mapmem returned an error, we can't continue.
950 * Return the error to the user.
951 */
952 if (error)
953 return(error);
954
955 /*
956 * We successfully mapped the memory in, so we need to
957 * unmap it when the transaction is done.
958 */
959 need_unmap = 1;
960 }
961
962 /*
963 * Once queued on the pending CCB list, this CCB will be protected
964 * by the error recovery handling used for 'buffer I/O' ccbs. Since
965 * we are in a process context here, however, the software interrupt
966 * for this driver may deliver an event invalidating this CCB just
967 * before we queue it. Close this race condition by blocking
968 * software interrupt delivery, checking for any pertinent queued
969 * events, and only then queuing this CCB.
970 */
971 s = splsoftcam();
972 if (softc->exceptions == 0) {
973 if (ccb->ccb_h.func_code == XPT_CONT_TARGET_IO)
974 TAILQ_INSERT_TAIL(&softc->pending_queue, &ccb->ccb_h,
975 periph_links.tqe);
976
977 /*
978 * If the user wants us to perform any error recovery,
979 * then honor that request. Otherwise, it's up to the
980 * user to perform any error recovery.
981 */
982 error = cam_periph_runccb(ccb,
983 /* error handler */NULL,
984 /* cam_flags */ 0,
985 /* sense_flags */SF_RETRY_UA,
986 &softc->device_stats);
987
988 if (ccb->ccb_h.func_code == XPT_CONT_TARGET_IO)
989 TAILQ_REMOVE(&softc->pending_queue, &ccb->ccb_h,
990 periph_links.tqe);
991 } else {
992 ccb->ccb_h.status = CAM_UNACKED_EVENT;
993 error = 0;
994 }
995 splx(s);
996
997 if (need_unmap != 0)
998 cam_periph_unmapmem(ccb, &mapinfo);
999
1000 ccb->ccb_h.cbfcnp = NULL;
1001 ccb->ccb_h.periph_priv = inccb->ccb_h.periph_priv;
1002 bcopy(ccb, inccb, sizeof(union ccb));
1003
1004 return(error);
1005}
1006
1007
1008static int
1009targpoll(dev_t dev, int poll_events, struct proc *p)
1010{
1011 struct cam_periph *periph;
1012 struct targ_softc *softc;
1013 u_int unit;
1014 int revents;
1015 int s;
1016
1017 unit = minor(dev);
1018
1019 /* ioctl is the only supported operation of the control device */
1020 if (TARG_IS_CONTROL_DEV(unit))
1021 return EINVAL;
1022
1023 periph = cam_extend_get(targperiphs, unit);
1024 if (periph == NULL)
1025 return (ENXIO);
1026 softc = (struct targ_softc *)periph->softc;
1027
1028 revents = 0;
1029 s = splcam();
1030 if ((poll_events & (POLLOUT | POLLWRNORM)) != 0) {
1031 if (TAILQ_FIRST(&softc->rcv_ccb_queue) != NULL
462 softc->accept_tio_list = NULL;
463 SLIST_INIT(&softc->immed_notify_slist);
464 softc->state = TARG_STATE_NORMAL;
465 periph->softc = softc;
466 softc->init_level++;
467
468 cam_extend_set(targperiphs, periph->unit_number, periph);
469
470 /*
471 * We start out life with a UA to indicate power-on/reset.
472 */
473 for (i = 0; i < MAX_INITIATORS; i++)
474 softc->istate[i].pending_ua = UA_POWER_ON;
475
476 /*
477 * Allocate an initial inquiry data buffer. We might allow the
478 * user to override this later via an ioctl.
479 */
480 softc->inq_data_len = sizeof(*softc->inq_data);
481 softc->inq_data = malloc(softc->inq_data_len, M_DEVBUF, M_NOWAIT);
482 if (softc->inq_data == NULL) {
483 printf("targctor - Unable to malloc inquiry data\n");
484 targdtor(periph);
485 return (CAM_RESRC_UNAVAIL);
486 }
487 bzero(softc->inq_data, softc->inq_data_len);
488 softc->inq_data->device = T_PROCESSOR | (SID_QUAL_LU_CONNECTED << 5);
489 softc->inq_data->version = 2;
490 softc->inq_data->response_format = 2; /* SCSI2 Inquiry Format */
491 softc->inq_data->flags =
492 cpi->hba_inquiry & (PI_SDTR_ABLE|PI_WIDE_16|PI_WIDE_32);
493 softc->inq_data->additional_length = softc->inq_data_len - 4;
494 strncpy(softc->inq_data->vendor, "FreeBSD ", SID_VENDOR_SIZE);
495 strncpy(softc->inq_data->product, "TM-PT ", SID_PRODUCT_SIZE);
496 strncpy(softc->inq_data->revision, "0.0 ", SID_REVISION_SIZE);
497 softc->targ_dev = make_dev(&targ_cdevsw, periph->unit_number, UID_ROOT,
498 GID_OPERATOR, 0600, "%s%d",
499 periph->periph_name, periph->unit_number);
500 softc->init_level++;
501 return (CAM_REQ_CMP);
502}
503
504static void
505targdtor(struct cam_periph *periph)
506{
507 struct targ_softc *softc;
508
509 softc = (struct targ_softc *)periph->softc;
510
511 softc->state = TARG_STATE_TEARDOWN;
512
513 targdislun(periph);
514
515 cam_extend_release(targperiphs, periph->unit_number);
516
517 switch (softc->init_level) {
518 default:
519 /* FALLTHROUGH */
520 case 2:
521 free(softc->inq_data, M_DEVBUF);
522 destroy_dev(softc->targ_dev);
523 /* FALLTHROUGH */
524 case 1:
525 free(softc, M_DEVBUF);
526 break;
527 case 0:
528 panic("targdtor - impossible init level");;
529 }
530}
531
532static int
533targopen(dev_t dev, int flags, int fmt, struct proc *p)
534{
535 struct cam_periph *periph;
536 struct targ_softc *softc;
537 u_int unit;
538 cam_status status;
539 int error;
540 int s;
541
542 unit = minor(dev);
543
544 /* An open of the control device always succeeds */
545 if (TARG_IS_CONTROL_DEV(unit))
546 return 0;
547
548 s = splsoftcam();
549 periph = cam_extend_get(targperiphs, unit);
550 if (periph == NULL) {
551 return (ENXIO);
552 splx(s);
553 }
554 if ((error = cam_periph_lock(periph, PRIBIO | PCATCH)) != 0) {
555 splx(s);
556 return (error);
557 }
558
559 softc = (struct targ_softc *)periph->softc;
560 if ((softc->flags & TARG_FLAG_LUN_ENABLED) == 0) {
561 if (cam_periph_acquire(periph) != CAM_REQ_CMP) {
562 splx(s);
563 cam_periph_unlock(periph);
564 return(ENXIO);
565 }
566 }
567 splx(s);
568
569 status = targenlun(periph);
570 switch (status) {
571 case CAM_REQ_CMP:
572 error = 0;
573 break;
574 case CAM_RESRC_UNAVAIL:
575 error = ENOMEM;
576 break;
577 case CAM_LUN_ALRDY_ENA:
578 error = EADDRINUSE;
579 break;
580 default:
581 error = ENXIO;
582 break;
583 }
584 cam_periph_unlock(periph);
585 if (error) {
586 cam_periph_release(periph);
587 }
588 return (error);
589}
590
591static int
592targclose(dev_t dev, int flag, int fmt, struct proc *p)
593{
594 struct cam_periph *periph;
595 struct targ_softc *softc;
596 u_int unit;
597 int s;
598 int error;
599
600 unit = minor(dev);
601
602 /* A close of the control device always succeeds */
603 if (TARG_IS_CONTROL_DEV(unit))
604 return 0;
605
606 s = splsoftcam();
607 periph = cam_extend_get(targperiphs, unit);
608 if (periph == NULL) {
609 splx(s);
610 return (ENXIO);
611 }
612 if ((error = cam_periph_lock(periph, PRIBIO)) != 0)
613 return (error);
614 softc = (struct targ_softc *)periph->softc;
615 splx(s);
616
617 targdislun(periph);
618
619 cam_periph_unlock(periph);
620 cam_periph_release(periph);
621
622 return (0);
623}
624
625static int
626targallocinstance(struct ioc_alloc_unit *alloc_unit)
627{
628 struct ccb_pathinq cpi;
629 struct cam_path *path;
630 struct cam_periph *periph;
631 cam_status status;
632 int free_path_on_return;
633 int error;
634
635 free_path_on_return = 0;
636 status = xpt_create_path(&path, /*periph*/NULL,
637 alloc_unit->path_id,
638 alloc_unit->target_id,
639 alloc_unit->lun_id);
640 if (status != CAM_REQ_CMP) {
641 printf("Couldn't Allocate Path %x\n", status);
642 goto fail;
643 }
644
645 free_path_on_return++;
646
647
648 xpt_setup_ccb(&cpi.ccb_h, path, /*priority*/1);
649 cpi.ccb_h.func_code = XPT_PATH_INQ;
650 xpt_action((union ccb *)&cpi);
651 status = cpi.ccb_h.status;
652
653 if (status != CAM_REQ_CMP) {
654 printf("Couldn't CPI %x\n", status);
655 goto fail;
656 }
657
658 /* Can only alloc units on controllers that support target mode */
659 if ((cpi.target_sprt & PIT_PROCESSOR) == 0) {
660 printf("Controller does not support target mode%x\n", status);
661 status = CAM_PATH_INVALID;
662 goto fail;
663 }
664
665 /* Ensure that we don't already have an instance for this unit. */
666 if ((periph = cam_periph_find(path, "targ")) != NULL) {
667 status = CAM_LUN_ALRDY_ENA;
668 goto fail;
669 }
670
671 /*
672 * Allocate a peripheral instance for
673 * this target instance.
674 */
675 status = cam_periph_alloc(targctor, NULL, targdtor, targstart,
676 "targ", CAM_PERIPH_BIO, path, targasync,
677 0, &cpi);
678
679fail:
680 switch (status) {
681 case CAM_REQ_CMP:
682 {
683 struct cam_periph *periph;
684
685 if ((periph = cam_periph_find(path, "targ")) == NULL)
686 panic("targallocinstance: Succeeded but no periph?");
687 error = 0;
688 alloc_unit->unit = periph->unit_number;
689 break;
690 }
691 case CAM_RESRC_UNAVAIL:
692 error = ENOMEM;
693 break;
694 case CAM_LUN_ALRDY_ENA:
695 error = EADDRINUSE;
696 break;
697 default:
698 printf("targallocinstance: Unexpected CAM status %x\n", status);
699 /* FALLTHROUGH */
700 case CAM_PATH_INVALID:
701 error = ENXIO;
702 break;
703 case CAM_PROVIDE_FAIL:
704 error = ENODEV;
705 break;
706 }
707
708 if (free_path_on_return != 0)
709 xpt_free_path(path);
710
711 return (error);
712}
713
714static int
715targfreeinstance(struct ioc_alloc_unit *alloc_unit)
716{
717 struct cam_path *path;
718 struct cam_periph *periph;
719 struct targ_softc *softc;
720 cam_status status;
721 int free_path_on_return;
722 int error;
723
724 periph = NULL;
725 free_path_on_return = 0;
726 status = xpt_create_path(&path, /*periph*/NULL,
727 alloc_unit->path_id,
728 alloc_unit->target_id,
729 alloc_unit->lun_id);
730 free_path_on_return++;
731
732 if (status != CAM_REQ_CMP)
733 goto fail;
734
735 /* Find our instance. */
736 if ((periph = cam_periph_find(path, "targ")) == NULL) {
737 xpt_print_path(path);
738 printf("Invalid path specified for freeing target instance\n");
739 status = CAM_PATH_INVALID;
740 goto fail;
741 }
742
743 softc = (struct targ_softc *)periph->softc;
744
745 if ((softc->flags & TARG_FLAG_LUN_ENABLED) != 0) {
746 status = CAM_BUSY;
747 goto fail;
748 }
749
750fail:
751 if (free_path_on_return != 0)
752 xpt_free_path(path);
753
754 switch (status) {
755 case CAM_REQ_CMP:
756 if (periph != NULL)
757 cam_periph_invalidate(periph);
758 error = 0;
759 break;
760 case CAM_RESRC_UNAVAIL:
761 error = ENOMEM;
762 break;
763 case CAM_LUN_ALRDY_ENA:
764 error = EADDRINUSE;
765 break;
766 default:
767 printf("targfreeinstance: Unexpected CAM status %x\n", status);
768 /* FALLTHROUGH */
769 case CAM_PATH_INVALID:
770 error = ENODEV;
771 break;
772 }
773 return (error);
774}
775
776static int
777targioctl(dev_t dev, u_long cmd, caddr_t addr, int flag, struct proc *p)
778{
779 struct cam_periph *periph;
780 struct targ_softc *softc;
781 u_int unit;
782 int error;
783
784 unit = minor(dev);
785 error = 0;
786 if (TARG_IS_CONTROL_DEV(unit)) {
787 switch (cmd) {
788 case TARGCTLIOALLOCUNIT:
789 error = targallocinstance((struct ioc_alloc_unit*)addr);
790 break;
791 case TARGCTLIOFREEUNIT:
792 error = targfreeinstance((struct ioc_alloc_unit*)addr);
793 break;
794 default:
795 error = EINVAL;
796 break;
797 }
798 return (error);
799 }
800
801 periph = cam_extend_get(targperiphs, unit);
802 if (periph == NULL)
803 return (ENXIO);
804 softc = (struct targ_softc *)periph->softc;
805 switch (cmd) {
806 case TARGIOCFETCHEXCEPTION:
807 *((targ_exception *)addr) = softc->exceptions;
808 break;
809 case TARGIOCCLEAREXCEPTION:
810 {
811 targ_exception clear_mask;
812
813 clear_mask = *((targ_exception *)addr);
814 if ((clear_mask & TARG_EXCEPT_UNKNOWN_ATIO) != 0) {
815 struct ccb_hdr *ccbh;
816
817 ccbh = TAILQ_FIRST(&softc->unknown_atio_queue);
818 if (ccbh != NULL) {
819 TAILQ_REMOVE(&softc->unknown_atio_queue,
820 ccbh, periph_links.tqe);
821 /* Requeue the ATIO back to the controller */
822 xpt_action((union ccb *)ccbh);
823 ccbh = TAILQ_FIRST(&softc->unknown_atio_queue);
824 }
825 if (ccbh != NULL)
826 clear_mask &= ~TARG_EXCEPT_UNKNOWN_ATIO;
827 }
828 softc->exceptions &= ~clear_mask;
829 if (softc->exceptions == TARG_EXCEPT_NONE
830 && softc->state == TARG_STATE_EXCEPTION) {
831 softc->state = TARG_STATE_NORMAL;
832 targrunqueue(periph, softc);
833 }
834 break;
835 }
836 case TARGIOCFETCHATIO:
837 {
838 struct ccb_hdr *ccbh;
839
840 ccbh = TAILQ_FIRST(&softc->unknown_atio_queue);
841 if (ccbh != NULL) {
842 bcopy(ccbh, addr, sizeof(struct ccb_accept_tio));
843 } else {
844 error = ENOENT;
845 }
846 break;
847 }
848 case TARGIOCCOMMAND:
849 {
850 union ccb *inccb;
851 union ccb *ccb;
852
853 /*
854 * XXX JGibbs
855 * This code is lifted directly from the pass-thru driver.
856 * Perhaps this should be moved to a library????
857 */
858 inccb = (union ccb *)addr;
859 ccb = cam_periph_getccb(periph, inccb->ccb_h.pinfo.priority);
860
861 error = targsendccb(periph, ccb, inccb);
862
863 xpt_release_ccb(ccb);
864
865 break;
866 }
867 case TARGIOCGETISTATE:
868 case TARGIOCSETISTATE:
869 {
870 struct ioc_initiator_state *ioc_istate;
871
872 ioc_istate = (struct ioc_initiator_state *)addr;
873 if (ioc_istate->initiator_id > MAX_INITIATORS) {
874 error = EINVAL;
875 break;
876 }
877 CAM_DEBUG(periph->path, CAM_DEBUG_PERIPH,
878 ("GET/SETISTATE for %d\n", ioc_istate->initiator_id));
879 if (cmd == TARGIOCGETISTATE) {
880 bcopy(&softc->istate[ioc_istate->initiator_id],
881 &ioc_istate->istate, sizeof(ioc_istate->istate));
882 } else {
883 bcopy(&ioc_istate->istate,
884 &softc->istate[ioc_istate->initiator_id],
885 sizeof(ioc_istate->istate));
886 CAM_DEBUG(periph->path, CAM_DEBUG_PERIPH,
887 ("pending_ca now %x\n",
888 softc->istate[ioc_istate->initiator_id].pending_ca));
889 }
890 break;
891 }
892 default:
893 error = ENOTTY;
894 break;
895 }
896 return (error);
897}
898
899/*
900 * XXX JGibbs lifted from pass-thru driver.
901 * Generally, "ccb" should be the CCB supplied by the kernel. "inccb"
902 * should be the CCB that is copied in from the user.
903 */
904static int
905targsendccb(struct cam_periph *periph, union ccb *ccb, union ccb *inccb)
906{
907 struct targ_softc *softc;
908 struct cam_periph_map_info mapinfo;
909 int error, need_unmap;
910 int s;
911
912 softc = (struct targ_softc *)periph->softc;
913
914 need_unmap = 0;
915
916 /*
917 * There are some fields in the CCB header that need to be
918 * preserved, the rest we get from the user.
919 */
920 xpt_merge_ccb(ccb, inccb);
921
922 /*
923 * There's no way for the user to have a completion
924 * function, so we put our own completion function in here.
925 */
926 ccb->ccb_h.cbfcnp = targdone;
927
928 /*
929 * We only attempt to map the user memory into kernel space
930 * if they haven't passed in a physical memory pointer,
931 * and if there is actually an I/O operation to perform.
932 * Right now cam_periph_mapmem() only supports SCSI and device
933 * match CCBs. For the SCSI CCBs, we only pass the CCB in if
934 * there's actually data to map. cam_periph_mapmem() will do the
935 * right thing, even if there isn't data to map, but since CCBs
936 * without data are a reasonably common occurance (e.g. test unit
937 * ready), it will save a few cycles if we check for it here.
938 */
939 if (((ccb->ccb_h.flags & CAM_DATA_PHYS) == 0)
940 && (((ccb->ccb_h.func_code == XPT_CONT_TARGET_IO)
941 && ((ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE))
942 || (ccb->ccb_h.func_code == XPT_DEV_MATCH))) {
943
944 bzero(&mapinfo, sizeof(mapinfo));
945
946 error = cam_periph_mapmem(ccb, &mapinfo);
947
948 /*
949 * cam_periph_mapmem returned an error, we can't continue.
950 * Return the error to the user.
951 */
952 if (error)
953 return(error);
954
955 /*
956 * We successfully mapped the memory in, so we need to
957 * unmap it when the transaction is done.
958 */
959 need_unmap = 1;
960 }
961
962 /*
963 * Once queued on the pending CCB list, this CCB will be protected
964 * by the error recovery handling used for 'buffer I/O' ccbs. Since
965 * we are in a process context here, however, the software interrupt
966 * for this driver may deliver an event invalidating this CCB just
967 * before we queue it. Close this race condition by blocking
968 * software interrupt delivery, checking for any pertinent queued
969 * events, and only then queuing this CCB.
970 */
971 s = splsoftcam();
972 if (softc->exceptions == 0) {
973 if (ccb->ccb_h.func_code == XPT_CONT_TARGET_IO)
974 TAILQ_INSERT_TAIL(&softc->pending_queue, &ccb->ccb_h,
975 periph_links.tqe);
976
977 /*
978 * If the user wants us to perform any error recovery,
979 * then honor that request. Otherwise, it's up to the
980 * user to perform any error recovery.
981 */
982 error = cam_periph_runccb(ccb,
983 /* error handler */NULL,
984 /* cam_flags */ 0,
985 /* sense_flags */SF_RETRY_UA,
986 &softc->device_stats);
987
988 if (ccb->ccb_h.func_code == XPT_CONT_TARGET_IO)
989 TAILQ_REMOVE(&softc->pending_queue, &ccb->ccb_h,
990 periph_links.tqe);
991 } else {
992 ccb->ccb_h.status = CAM_UNACKED_EVENT;
993 error = 0;
994 }
995 splx(s);
996
997 if (need_unmap != 0)
998 cam_periph_unmapmem(ccb, &mapinfo);
999
1000 ccb->ccb_h.cbfcnp = NULL;
1001 ccb->ccb_h.periph_priv = inccb->ccb_h.periph_priv;
1002 bcopy(ccb, inccb, sizeof(union ccb));
1003
1004 return(error);
1005}
1006
1007
1008static int
1009targpoll(dev_t dev, int poll_events, struct proc *p)
1010{
1011 struct cam_periph *periph;
1012 struct targ_softc *softc;
1013 u_int unit;
1014 int revents;
1015 int s;
1016
1017 unit = minor(dev);
1018
1019 /* ioctl is the only supported operation of the control device */
1020 if (TARG_IS_CONTROL_DEV(unit))
1021 return EINVAL;
1022
1023 periph = cam_extend_get(targperiphs, unit);
1024 if (periph == NULL)
1025 return (ENXIO);
1026 softc = (struct targ_softc *)periph->softc;
1027
1028 revents = 0;
1029 s = splcam();
1030 if ((poll_events & (POLLOUT | POLLWRNORM)) != 0) {
1031 if (TAILQ_FIRST(&softc->rcv_ccb_queue) != NULL
1032 && bufq_first(&softc->rcv_buf_queue) == NULL)
1032 && bioq_first(&softc->rcv_bio_queue) == NULL)
1033 revents |= poll_events & (POLLOUT | POLLWRNORM);
1034 }
1035 if ((poll_events & (POLLIN | POLLRDNORM)) != 0) {
1036 if (TAILQ_FIRST(&softc->snd_ccb_queue) != NULL
1033 revents |= poll_events & (POLLOUT | POLLWRNORM);
1034 }
1035 if ((poll_events & (POLLIN | POLLRDNORM)) != 0) {
1036 if (TAILQ_FIRST(&softc->snd_ccb_queue) != NULL
1037 && bufq_first(&softc->snd_buf_queue) == NULL)
1037 && bioq_first(&softc->snd_bio_queue) == NULL)
1038 revents |= poll_events & (POLLIN | POLLRDNORM);
1039 }
1040
1041 if (softc->state != TARG_STATE_NORMAL)
1042 revents |= POLLERR;
1043
1044 if (revents == 0) {
1045 if (poll_events & (POLLOUT | POLLWRNORM))
1046 selrecord(p, &softc->rcv_select);
1047 if (poll_events & (POLLIN | POLLRDNORM))
1048 selrecord(p, &softc->snd_select);
1049 }
1050 splx(s);
1051 return (revents);
1052}
1053
1054static int
1055targread(dev_t dev, struct uio *uio, int ioflag)
1056{
1057 u_int unit;
1058
1059 unit = minor(dev);
1060 /* ioctl is the only supported operation of the control device */
1061 if (TARG_IS_CONTROL_DEV(unit))
1062 return EINVAL;
1063
1064 if (uio->uio_iovcnt == 0
1065 || uio->uio_iov->iov_len == 0) {
1066 /* EOF */
1067 struct cam_periph *periph;
1068 struct targ_softc *softc;
1069 int s;
1070
1071 s = splcam();
1072 periph = cam_extend_get(targperiphs, unit);
1073 if (periph == NULL)
1074 return (ENXIO);
1075 softc = (struct targ_softc *)periph->softc;
1076 softc->flags |= TARG_FLAG_SEND_EOF;
1077 splx(s);
1078 targrunqueue(periph, softc);
1079 return (0);
1080 }
1081 return(physread(dev, uio, ioflag));
1082}
1083
1084static int
1085targwrite(dev_t dev, struct uio *uio, int ioflag)
1086{
1087 u_int unit;
1088
1089 unit = minor(dev);
1090 /* ioctl is the only supported operation of the control device */
1091 if (TARG_IS_CONTROL_DEV(unit))
1092 return EINVAL;
1093
1094 if (uio->uio_iovcnt == 0
1095 || uio->uio_iov->iov_len == 0) {
1096 /* EOF */
1097 struct cam_periph *periph;
1098 struct targ_softc *softc;
1099 int s;
1100
1101 s = splcam();
1102 periph = cam_extend_get(targperiphs, unit);
1103 if (periph == NULL)
1104 return (ENXIO);
1105 softc = (struct targ_softc *)periph->softc;
1106 softc->flags |= TARG_FLAG_RECEIVE_EOF;
1107 splx(s);
1108 targrunqueue(periph, softc);
1109 return (0);
1110 }
1111 return(physwrite(dev, uio, ioflag));
1112}
1113
1114/*
1115 * Actually translate the requested transfer into one the physical driver
1116 * can understand. The transfer is described by a buf and will include
1117 * only one physical transfer.
1118 */
1119static void
1038 revents |= poll_events & (POLLIN | POLLRDNORM);
1039 }
1040
1041 if (softc->state != TARG_STATE_NORMAL)
1042 revents |= POLLERR;
1043
1044 if (revents == 0) {
1045 if (poll_events & (POLLOUT | POLLWRNORM))
1046 selrecord(p, &softc->rcv_select);
1047 if (poll_events & (POLLIN | POLLRDNORM))
1048 selrecord(p, &softc->snd_select);
1049 }
1050 splx(s);
1051 return (revents);
1052}
1053
1054static int
1055targread(dev_t dev, struct uio *uio, int ioflag)
1056{
1057 u_int unit;
1058
1059 unit = minor(dev);
1060 /* ioctl is the only supported operation of the control device */
1061 if (TARG_IS_CONTROL_DEV(unit))
1062 return EINVAL;
1063
1064 if (uio->uio_iovcnt == 0
1065 || uio->uio_iov->iov_len == 0) {
1066 /* EOF */
1067 struct cam_periph *periph;
1068 struct targ_softc *softc;
1069 int s;
1070
1071 s = splcam();
1072 periph = cam_extend_get(targperiphs, unit);
1073 if (periph == NULL)
1074 return (ENXIO);
1075 softc = (struct targ_softc *)periph->softc;
1076 softc->flags |= TARG_FLAG_SEND_EOF;
1077 splx(s);
1078 targrunqueue(periph, softc);
1079 return (0);
1080 }
1081 return(physread(dev, uio, ioflag));
1082}
1083
1084static int
1085targwrite(dev_t dev, struct uio *uio, int ioflag)
1086{
1087 u_int unit;
1088
1089 unit = minor(dev);
1090 /* ioctl is the only supported operation of the control device */
1091 if (TARG_IS_CONTROL_DEV(unit))
1092 return EINVAL;
1093
1094 if (uio->uio_iovcnt == 0
1095 || uio->uio_iov->iov_len == 0) {
1096 /* EOF */
1097 struct cam_periph *periph;
1098 struct targ_softc *softc;
1099 int s;
1100
1101 s = splcam();
1102 periph = cam_extend_get(targperiphs, unit);
1103 if (periph == NULL)
1104 return (ENXIO);
1105 softc = (struct targ_softc *)periph->softc;
1106 softc->flags |= TARG_FLAG_RECEIVE_EOF;
1107 splx(s);
1108 targrunqueue(periph, softc);
1109 return (0);
1110 }
1111 return(physwrite(dev, uio, ioflag));
1112}
1113
1114/*
1115 * Actually translate the requested transfer into one the physical driver
1116 * can understand. The transfer is described by a buf and will include
1117 * only one physical transfer.
1118 */
1119static void
1120targstrategy(struct buf *bp)
1120targstrategy(struct bio *bp)
1121{
1122 struct cam_periph *periph;
1123 struct targ_softc *softc;
1124 u_int unit;
1125 int s;
1126
1121{
1122 struct cam_periph *periph;
1123 struct targ_softc *softc;
1124 u_int unit;
1125 int s;
1126
1127 unit = minor(bp->b_dev);
1127 unit = minor(bp->bio_dev);
1128
1129 /* ioctl is the only supported operation of the control device */
1130 if (TARG_IS_CONTROL_DEV(unit)) {
1128
1129 /* ioctl is the only supported operation of the control device */
1130 if (TARG_IS_CONTROL_DEV(unit)) {
1131 bp->b_error = EINVAL;
1131 bp->bio_error = EINVAL;
1132 goto bad;
1133 }
1134
1135 periph = cam_extend_get(targperiphs, unit);
1136 if (periph == NULL) {
1132 goto bad;
1133 }
1134
1135 periph = cam_extend_get(targperiphs, unit);
1136 if (periph == NULL) {
1137 bp->b_error = ENXIO;
1137 bp->bio_error = ENXIO;
1138 goto bad;
1139 }
1140 softc = (struct targ_softc *)periph->softc;
1141
1142 /*
1143 * Mask interrupts so that the device cannot be invalidated until
1144 * after we are in the queue. Otherwise, we might not properly
1145 * clean up one of the buffers.
1146 */
1147 s = splbio();
1148
1149 /*
1150 * If there is an exception pending, error out
1151 */
1152 if (softc->state != TARG_STATE_NORMAL) {
1153 splx(s);
1154 if (softc->state == TARG_STATE_EXCEPTION
1155 && (softc->exceptions & TARG_EXCEPT_DEVICE_INVALID) == 0)
1138 goto bad;
1139 }
1140 softc = (struct targ_softc *)periph->softc;
1141
1142 /*
1143 * Mask interrupts so that the device cannot be invalidated until
1144 * after we are in the queue. Otherwise, we might not properly
1145 * clean up one of the buffers.
1146 */
1147 s = splbio();
1148
1149 /*
1150 * If there is an exception pending, error out
1151 */
1152 if (softc->state != TARG_STATE_NORMAL) {
1153 splx(s);
1154 if (softc->state == TARG_STATE_EXCEPTION
1155 && (softc->exceptions & TARG_EXCEPT_DEVICE_INVALID) == 0)
1156 bp->b_error = EBUSY;
1156 bp->bio_error = EBUSY;
1157 else
1157 else
1158 bp->b_error = ENXIO;
1158 bp->bio_error = ENXIO;
1159 goto bad;
1160 }
1161
1162 /*
1163 * Place it in the queue of buffers available for either
1164 * SEND or RECEIVE commands.
1165 *
1166 */
1159 goto bad;
1160 }
1161
1162 /*
1163 * Place it in the queue of buffers available for either
1164 * SEND or RECEIVE commands.
1165 *
1166 */
1167 bp->b_resid = bp->b_bcount;
1168 if (bp->b_iocmd == BIO_READ) {
1167 bp->bio_resid = bp->bio_bcount;
1168 if (bp->bio_cmd == BIO_READ) {
1169 CAM_DEBUG(periph->path, CAM_DEBUG_PERIPH,
1170 ("Queued a SEND buffer\n"));
1169 CAM_DEBUG(periph->path, CAM_DEBUG_PERIPH,
1170 ("Queued a SEND buffer\n"));
1171 bufq_insert_tail(&softc->snd_buf_queue, bp);
1171 bioq_insert_tail(&softc->snd_bio_queue, bp);
1172 } else {
1173 CAM_DEBUG(periph->path, CAM_DEBUG_PERIPH,
1174 ("Queued a RECEIVE buffer\n"));
1172 } else {
1173 CAM_DEBUG(periph->path, CAM_DEBUG_PERIPH,
1174 ("Queued a RECEIVE buffer\n"));
1175 bufq_insert_tail(&softc->rcv_buf_queue, bp);
1175 bioq_insert_tail(&softc->rcv_bio_queue, bp);
1176 }
1177
1178 splx(s);
1179
1180 /*
1181 * Attempt to use the new buffer to service any pending
1182 * target commands.
1183 */
1184 targrunqueue(periph, softc);
1185
1186 return;
1187bad:
1176 }
1177
1178 splx(s);
1179
1180 /*
1181 * Attempt to use the new buffer to service any pending
1182 * target commands.
1183 */
1184 targrunqueue(periph, softc);
1185
1186 return;
1187bad:
1188 bp->b_ioflags |= BIO_ERROR;
1188 bp->bio_flags |= BIO_ERROR;
1189
1190 /*
1191 * Correctly set the buf to indicate a completed xfer
1192 */
1189
1190 /*
1191 * Correctly set the buf to indicate a completed xfer
1192 */
1193 bp->b_resid = bp->b_bcount;
1193 bp->bio_resid = bp->bio_bcount;
1194 biodone(bp);
1195}
1196
1197static void
1198targrunqueue(struct cam_periph *periph, struct targ_softc *softc)
1199{
1200 struct ccb_queue *pending_queue;
1201 struct ccb_accept_tio *atio;
1194 biodone(bp);
1195}
1196
1197static void
1198targrunqueue(struct cam_periph *periph, struct targ_softc *softc)
1199{
1200 struct ccb_queue *pending_queue;
1201 struct ccb_accept_tio *atio;
1202 struct buf_queue_head *bufq;
1203 struct buf *bp;
1202 struct bio_queue_head *bioq;
1203 struct bio *bp;
1204 struct targ_cmd_desc *desc;
1205 struct ccb_hdr *ccbh;
1206 int s;
1207
1208 s = splbio();
1209 pending_queue = NULL;
1204 struct targ_cmd_desc *desc;
1205 struct ccb_hdr *ccbh;
1206 int s;
1207
1208 s = splbio();
1209 pending_queue = NULL;
1210 bufq = NULL;
1210 bioq = NULL;
1211 ccbh = NULL;
1212 /* Only run one request at a time to maintain data ordering. */
1213 if (softc->state != TARG_STATE_NORMAL
1214 || TAILQ_FIRST(&softc->work_queue) != NULL
1215 || TAILQ_FIRST(&softc->pending_queue) != NULL) {
1216 splx(s);
1217 return;
1218 }
1219
1211 ccbh = NULL;
1212 /* Only run one request at a time to maintain data ordering. */
1213 if (softc->state != TARG_STATE_NORMAL
1214 || TAILQ_FIRST(&softc->work_queue) != NULL
1215 || TAILQ_FIRST(&softc->pending_queue) != NULL) {
1216 splx(s);
1217 return;
1218 }
1219
1220 if (((bp = bufq_first(&softc->snd_buf_queue)) != NULL
1220 if (((bp = bioq_first(&softc->snd_bio_queue)) != NULL
1221 || (softc->flags & TARG_FLAG_SEND_EOF) != 0)
1222 && (ccbh = TAILQ_FIRST(&softc->snd_ccb_queue)) != NULL) {
1223
1224 if (bp == NULL)
1225 softc->flags &= ~TARG_FLAG_SEND_EOF;
1226 else {
1227 CAM_DEBUG(periph->path, CAM_DEBUG_PERIPH,
1228 ("De-Queued a SEND buffer %ld\n",
1221 || (softc->flags & TARG_FLAG_SEND_EOF) != 0)
1222 && (ccbh = TAILQ_FIRST(&softc->snd_ccb_queue)) != NULL) {
1223
1224 if (bp == NULL)
1225 softc->flags &= ~TARG_FLAG_SEND_EOF;
1226 else {
1227 CAM_DEBUG(periph->path, CAM_DEBUG_PERIPH,
1228 ("De-Queued a SEND buffer %ld\n",
1229 bp->b_bcount));
1229 bp->bio_bcount));
1230 }
1230 }
1231 bufq = &softc->snd_buf_queue;
1231 bioq = &softc->snd_bio_queue;
1232 pending_queue = &softc->snd_ccb_queue;
1232 pending_queue = &softc->snd_ccb_queue;
1233 } else if (((bp = bufq_first(&softc->rcv_buf_queue)) != NULL
1233 } else if (((bp = bioq_first(&softc->rcv_bio_queue)) != NULL
1234 || (softc->flags & TARG_FLAG_RECEIVE_EOF) != 0)
1235 && (ccbh = TAILQ_FIRST(&softc->rcv_ccb_queue)) != NULL) {
1236
1237 if (bp == NULL)
1238 softc->flags &= ~TARG_FLAG_RECEIVE_EOF;
1239 else {
1240 CAM_DEBUG(periph->path, CAM_DEBUG_PERIPH,
1241 ("De-Queued a RECEIVE buffer %ld\n",
1234 || (softc->flags & TARG_FLAG_RECEIVE_EOF) != 0)
1235 && (ccbh = TAILQ_FIRST(&softc->rcv_ccb_queue)) != NULL) {
1236
1237 if (bp == NULL)
1238 softc->flags &= ~TARG_FLAG_RECEIVE_EOF;
1239 else {
1240 CAM_DEBUG(periph->path, CAM_DEBUG_PERIPH,
1241 ("De-Queued a RECEIVE buffer %ld\n",
1242 bp->b_bcount));
1242 bp->bio_bcount));
1243 }
1243 }
1244 bufq = &softc->rcv_buf_queue;
1244 bioq = &softc->rcv_bio_queue;
1245 pending_queue = &softc->rcv_ccb_queue;
1246 }
1247
1248 if (pending_queue != NULL) {
1249 /* Process a request */
1250 atio = (struct ccb_accept_tio *)ccbh;
1251 TAILQ_REMOVE(pending_queue, ccbh, periph_links.tqe);
1252 desc = (struct targ_cmd_desc *)atio->ccb_h.ccb_descr;
1253 desc->bp = bp;
1254 if (bp == NULL) {
1255 /* EOF */
1256 desc->data = NULL;
1257 desc->data_increment = 0;
1258 desc->data_resid = 0;
1259 atio->ccb_h.flags &= ~CAM_DIR_MASK;
1260 atio->ccb_h.flags |= CAM_DIR_NONE;
1261 } else {
1245 pending_queue = &softc->rcv_ccb_queue;
1246 }
1247
1248 if (pending_queue != NULL) {
1249 /* Process a request */
1250 atio = (struct ccb_accept_tio *)ccbh;
1251 TAILQ_REMOVE(pending_queue, ccbh, periph_links.tqe);
1252 desc = (struct targ_cmd_desc *)atio->ccb_h.ccb_descr;
1253 desc->bp = bp;
1254 if (bp == NULL) {
1255 /* EOF */
1256 desc->data = NULL;
1257 desc->data_increment = 0;
1258 desc->data_resid = 0;
1259 atio->ccb_h.flags &= ~CAM_DIR_MASK;
1260 atio->ccb_h.flags |= CAM_DIR_NONE;
1261 } else {
1262 bufq_remove(bufq, bp);
1263 desc->data = &bp->b_data[bp->b_bcount - bp->b_resid];
1262 bioq_remove(bioq, bp);
1263 desc->data = &bp->bio_data[bp->bio_bcount - bp->bio_resid];
1264 desc->data_increment =
1264 desc->data_increment =
1265 MIN(desc->data_resid, bp->b_resid);
1265 MIN(desc->data_resid, bp->bio_resid);
1266 desc->data_increment =
1267 MIN(desc->data_increment, 32);
1268 }
1269 CAM_DEBUG(periph->path, CAM_DEBUG_PERIPH,
1270 ("Buffer command: data %x: datacnt %d\n",
1271 (intptr_t)desc->data, desc->data_increment));
1272 TAILQ_INSERT_TAIL(&softc->work_queue, &atio->ccb_h,
1273 periph_links.tqe);
1274 }
1275 if (TAILQ_FIRST(&softc->work_queue) != NULL) {
1276 splx(s);
1277 xpt_schedule(periph, /*XXX priority*/1);
1278 } else
1279 splx(s);
1280}
1281
1282static void
1283targstart(struct cam_periph *periph, union ccb *start_ccb)
1284{
1285 struct targ_softc *softc;
1286 struct ccb_hdr *ccbh;
1287 struct ccb_accept_tio *atio;
1288 struct targ_cmd_desc *desc;
1289 struct ccb_scsiio *csio;
1290 targ_ccb_flags flags;
1291 int s;
1292
1293 softc = (struct targ_softc *)periph->softc;
1294
1295 s = splbio();
1296 ccbh = TAILQ_FIRST(&softc->work_queue);
1297 if (periph->immediate_priority <= periph->pinfo.priority) {
1298 start_ccb->ccb_h.ccb_flags = TARG_CCB_WAITING;
1299 SLIST_INSERT_HEAD(&periph->ccb_list, &start_ccb->ccb_h,
1300 periph_links.sle);
1301 periph->immediate_priority = CAM_PRIORITY_NONE;
1302 splx(s);
1303 wakeup(&periph->ccb_list);
1304 } else if (ccbh == NULL) {
1305 splx(s);
1306 xpt_release_ccb(start_ccb);
1307 } else {
1308 TAILQ_REMOVE(&softc->work_queue, ccbh, periph_links.tqe);
1309 splx(s);
1310 atio = (struct ccb_accept_tio*)ccbh;
1311 desc = (struct targ_cmd_desc *)atio->ccb_h.ccb_descr;
1312
1313 /* Is this a tagged request? */
1314 flags = atio->ccb_h.flags & (CAM_TAG_ACTION_VALID|CAM_DIR_MASK);
1315
1316 /*
1317 * If we are done with the transaction, tell the
1318 * controller to send status and perform a CMD_CMPLT.
1319 */
1320 if (desc->data_resid == desc->data_increment)
1321 flags |= CAM_SEND_STATUS;
1322
1323 csio = &start_ccb->csio;
1324 cam_fill_ctio(csio,
1325 /*retries*/2,
1326 targdone,
1327 flags,
1328 /*tag_action*/MSG_SIMPLE_Q_TAG,
1329 atio->tag_id,
1330 atio->init_id,
1331 desc->status,
1332 /*data_ptr*/desc->data_increment == 0
1333 ? NULL : desc->data,
1334 /*dxfer_len*/desc->data_increment,
1335 /*timeout*/desc->timeout);
1336
1337 if ((flags & CAM_SEND_STATUS) != 0
1338 && (desc->status == SCSI_STATUS_CHECK_COND
1339 || desc->status == SCSI_STATUS_CMD_TERMINATED)) {
1340 struct initiator_state *istate;
1341
1342 istate = &softc->istate[atio->init_id];
1343 csio->sense_len = istate->sense_data.extra_len
1344 + offsetof(struct scsi_sense_data,
1345 extra_len);
1346 bcopy(&istate->sense_data, &csio->sense_data,
1347 csio->sense_len);
1348 csio->ccb_h.flags |= CAM_SEND_SENSE;
1349 } else {
1350 csio->sense_len = 0;
1351 }
1352
1353 start_ccb->ccb_h.ccb_flags = TARG_CCB_NONE;
1354 start_ccb->ccb_h.ccb_atio = atio;
1355 CAM_DEBUG(periph->path, CAM_DEBUG_PERIPH,
1356 ("Sending a CTIO\n"));
1357 TAILQ_INSERT_TAIL(&softc->pending_queue, &csio->ccb_h,
1358 periph_links.tqe);
1359 xpt_action(start_ccb);
1360 s = splbio();
1361 ccbh = TAILQ_FIRST(&softc->work_queue);
1362 splx(s);
1363 }
1364 if (ccbh != NULL)
1365 targrunqueue(periph, softc);
1366}
1367
1368static void
1369targdone(struct cam_periph *periph, union ccb *done_ccb)
1370{
1371 struct targ_softc *softc;
1372
1373 softc = (struct targ_softc *)periph->softc;
1374
1375 if (done_ccb->ccb_h.ccb_flags == TARG_CCB_WAITING) {
1376 /* Caller will release the CCB */
1377 wakeup(&done_ccb->ccb_h.cbfcnp);
1378 return;
1379 }
1380
1381 switch (done_ccb->ccb_h.func_code) {
1382 case XPT_ACCEPT_TARGET_IO:
1383 {
1384 struct ccb_accept_tio *atio;
1385 struct targ_cmd_desc *descr;
1386 struct initiator_state *istate;
1387 u_int8_t *cdb;
1388
1389 atio = &done_ccb->atio;
1390 descr = (struct targ_cmd_desc*)atio->ccb_h.ccb_descr;
1391 istate = &softc->istate[atio->init_id];
1392 cdb = atio->cdb_io.cdb_bytes;
1393 if (softc->state == TARG_STATE_TEARDOWN
1394 || atio->ccb_h.status == CAM_REQ_ABORTED) {
1395 freedescr(descr);
1396 free(done_ccb, M_DEVBUF);
1397 return;
1398 }
1399
1400 if (atio->sense_len != 0) {
1401
1402 /*
1403 * We had an error in the reception of
1404 * this command. Immediately issue a CA.
1405 */
1406 atio->ccb_h.flags &= ~CAM_DIR_MASK;
1407 atio->ccb_h.flags |= CAM_DIR_NONE;
1408 descr->data_resid = 0;
1409 descr->data_increment = 0;
1410 descr->timeout = 5 * 1000;
1411 descr->status = SCSI_STATUS_CHECK_COND;
1412 copy_sense(softc, istate, (u_int8_t *)&atio->sense_data,
1413 atio->sense_len);
1414 set_ca_condition(periph, atio->init_id, CA_CMD_SENSE);
1415 } else if (istate->pending_ca == 0
1416 && istate->pending_ua != 0
1417 && cdb[0] != INQUIRY) {
1418
1419 /* Pending UA, tell initiator */
1420 /* Direction is always relative to the initator */
1421 atio->ccb_h.flags &= ~CAM_DIR_MASK;
1422 atio->ccb_h.flags |= CAM_DIR_NONE;
1423 descr->data_resid = 0;
1424 descr->data_increment = 0;
1425 descr->timeout = 5 * 1000;
1426 descr->status = SCSI_STATUS_CHECK_COND;
1427 fill_sense(softc, atio->init_id,
1428 SSD_CURRENT_ERROR, SSD_KEY_UNIT_ATTENTION,
1429 0x29,
1430 istate->pending_ua == UA_POWER_ON ? 1 : 2);
1431 set_ca_condition(periph, atio->init_id, CA_UNIT_ATTN);
1432 } else {
1433 /*
1434 * Save the current CA and UA status so
1435 * they can be used by this command.
1436 */
1437 ua_types pending_ua;
1438 ca_types pending_ca;
1439
1440 pending_ua = istate->pending_ua;
1441 pending_ca = istate->pending_ca;
1442
1443 /*
1444 * As per the SCSI2 spec, any command that occurs
1445 * after a CA is reported, clears the CA. We must
1446 * also clear the UA condition, if any, that caused
1447 * the CA to occur assuming the UA is not for a
1448 * persistant condition.
1449 */
1450 istate->pending_ca = CA_NONE;
1451 if (pending_ca == CA_UNIT_ATTN)
1452 istate->pending_ua = UA_NONE;
1453
1454 /*
1455 * Determine the type of incoming command and
1456 * setup our buffer for a response.
1457 */
1458 switch (cdb[0]) {
1459 case INQUIRY:
1460 {
1461 struct scsi_inquiry *inq;
1462 struct scsi_sense_data *sense;
1463
1464 inq = (struct scsi_inquiry *)cdb;
1465 sense = &istate->sense_data;
1466 descr->status = SCSI_STATUS_OK;
1467 CAM_DEBUG(periph->path, CAM_DEBUG_PERIPH,
1468 ("Saw an inquiry!\n"));
1469 /*
1470 * Validate the command. We don't
1471 * support any VPD pages, so complain
1472 * if EVPD is set.
1473 */
1474 if ((inq->byte2 & SI_EVPD) != 0
1475 || inq->page_code != 0) {
1476 atio->ccb_h.flags &= ~CAM_DIR_MASK;
1477 atio->ccb_h.flags |= CAM_DIR_NONE;
1478 descr->data_resid = 0;
1479 descr->data_increment = 0;
1480 descr->timeout = 5 * 1000;
1481 descr->status = SCSI_STATUS_CHECK_COND;
1482 fill_sense(softc, atio->init_id,
1483 SSD_CURRENT_ERROR,
1484 SSD_KEY_ILLEGAL_REQUEST,
1485 /*asc*/0x24, /*ascq*/0x00);
1486 sense->extra_len =
1487 offsetof(struct scsi_sense_data,
1488 extra_bytes)
1489 - offsetof(struct scsi_sense_data,
1490 extra_len);
1491 set_ca_condition(periph, atio->init_id,
1492 CA_CMD_SENSE);
1493 }
1494
1495 if ((inq->byte2 & SI_EVPD) != 0) {
1496 sense->sense_key_spec[0] =
1497 SSD_SCS_VALID|SSD_FIELDPTR_CMD
1498 |SSD_BITPTR_VALID| /*bit value*/1;
1499 sense->sense_key_spec[1] = 0;
1500 sense->sense_key_spec[2] =
1501 offsetof(struct scsi_inquiry,
1502 byte2);
1503 } else if (inq->page_code != 0) {
1504 sense->sense_key_spec[0] =
1505 SSD_SCS_VALID|SSD_FIELDPTR_CMD;
1506 sense->sense_key_spec[1] = 0;
1507 sense->sense_key_spec[2] =
1508 offsetof(struct scsi_inquiry,
1509 page_code);
1510 }
1511 if (descr->status == SCSI_STATUS_CHECK_COND)
1512 break;
1513
1514 /*
1515 * Direction is always relative
1516 * to the initator.
1517 */
1518 atio->ccb_h.flags &= ~CAM_DIR_MASK;
1519 atio->ccb_h.flags |= CAM_DIR_IN;
1520 descr->data = softc->inq_data;
1521 descr->data_resid = MIN(softc->inq_data_len,
1522 inq->length);
1523 descr->data_increment = descr->data_resid;
1524 descr->timeout = 5 * 1000;
1525 break;
1526 }
1527 case TEST_UNIT_READY:
1528 atio->ccb_h.flags &= ~CAM_DIR_MASK;
1529 atio->ccb_h.flags |= CAM_DIR_NONE;
1530 descr->data_resid = 0;
1531 descr->data_increment = 0;
1532 descr->timeout = 5 * 1000;
1533 descr->status = SCSI_STATUS_OK;
1534 break;
1535 case REQUEST_SENSE:
1536 {
1537 struct scsi_request_sense *rsense;
1538 struct scsi_sense_data *sense;
1539
1540 rsense = (struct scsi_request_sense *)cdb;
1541 sense = &istate->sense_data;
1542 if (pending_ca == 0) {
1543 fill_sense(softc, atio->init_id,
1544 SSD_CURRENT_ERROR,
1545 SSD_KEY_NO_SENSE, 0x00,
1546 0x00);
1547 CAM_DEBUG(periph->path,
1548 CAM_DEBUG_PERIPH,
1549 ("No pending CA!\n"));
1550 }
1551 /*
1552 * Direction is always relative
1553 * to the initator.
1554 */
1555 atio->ccb_h.flags &= ~CAM_DIR_MASK;
1556 atio->ccb_h.flags |= CAM_DIR_IN;
1557 descr->data = sense;
1558 descr->data_resid =
1559 offsetof(struct scsi_sense_data,
1560 extra_len)
1561 + sense->extra_len;
1562 descr->data_resid = MIN(descr->data_resid,
1563 rsense->length);
1564 descr->data_increment = descr->data_resid;
1565 descr->timeout = 5 * 1000;
1566 descr->status = SCSI_STATUS_OK;
1567 break;
1568 }
1569 case RECEIVE:
1570 case SEND:
1571 {
1572 struct scsi_send_receive *sr;
1573
1574 sr = (struct scsi_send_receive *)cdb;
1575
1576 /*
1577 * Direction is always relative
1578 * to the initator.
1579 */
1580 atio->ccb_h.flags &= ~CAM_DIR_MASK;
1581 descr->data_resid = scsi_3btoul(sr->xfer_len);
1582 descr->timeout = 5 * 1000;
1583 descr->status = SCSI_STATUS_OK;
1584 if (cdb[0] == SEND) {
1585 atio->ccb_h.flags |= CAM_DIR_OUT;
1586 CAM_DEBUG(periph->path,
1587 CAM_DEBUG_PERIPH,
1588 ("Saw a SEND!\n"));
1589 atio->ccb_h.flags |= CAM_DIR_OUT;
1590 TAILQ_INSERT_TAIL(&softc->snd_ccb_queue,
1591 &atio->ccb_h,
1592 periph_links.tqe);
1593 selwakeup(&softc->snd_select);
1594 } else {
1595 atio->ccb_h.flags |= CAM_DIR_IN;
1596 CAM_DEBUG(periph->path,
1597 CAM_DEBUG_PERIPH,
1598 ("Saw a RECEIVE!\n"));
1599 TAILQ_INSERT_TAIL(&softc->rcv_ccb_queue,
1600 &atio->ccb_h,
1601 periph_links.tqe);
1602 selwakeup(&softc->rcv_select);
1603 }
1604 /*
1605 * Attempt to satisfy this request with
1606 * a user buffer.
1607 */
1608 targrunqueue(periph, softc);
1609 return;
1610 }
1611 default:
1612 /*
1613 * Queue for consumption by our userland
1614 * counterpart and transition to the exception
1615 * state.
1616 */
1617 TAILQ_INSERT_TAIL(&softc->unknown_atio_queue,
1618 &atio->ccb_h,
1619 periph_links.tqe);
1620 softc->exceptions |= TARG_EXCEPT_UNKNOWN_ATIO;
1621 targfireexception(periph, softc);
1622 return;
1623 }
1624 }
1625
1626 /* Queue us up to receive a Continue Target I/O ccb. */
1627 TAILQ_INSERT_TAIL(&softc->work_queue, &atio->ccb_h,
1628 periph_links.tqe);
1629 xpt_schedule(periph, /*priority*/1);
1630 break;
1631 }
1632 case XPT_CONT_TARGET_IO:
1633 {
1634 struct ccb_scsiio *csio;
1635 struct ccb_accept_tio *atio;
1636 struct targ_cmd_desc *desc;
1266 desc->data_increment =
1267 MIN(desc->data_increment, 32);
1268 }
1269 CAM_DEBUG(periph->path, CAM_DEBUG_PERIPH,
1270 ("Buffer command: data %x: datacnt %d\n",
1271 (intptr_t)desc->data, desc->data_increment));
1272 TAILQ_INSERT_TAIL(&softc->work_queue, &atio->ccb_h,
1273 periph_links.tqe);
1274 }
1275 if (TAILQ_FIRST(&softc->work_queue) != NULL) {
1276 splx(s);
1277 xpt_schedule(periph, /*XXX priority*/1);
1278 } else
1279 splx(s);
1280}
1281
1282static void
1283targstart(struct cam_periph *periph, union ccb *start_ccb)
1284{
1285 struct targ_softc *softc;
1286 struct ccb_hdr *ccbh;
1287 struct ccb_accept_tio *atio;
1288 struct targ_cmd_desc *desc;
1289 struct ccb_scsiio *csio;
1290 targ_ccb_flags flags;
1291 int s;
1292
1293 softc = (struct targ_softc *)periph->softc;
1294
1295 s = splbio();
1296 ccbh = TAILQ_FIRST(&softc->work_queue);
1297 if (periph->immediate_priority <= periph->pinfo.priority) {
1298 start_ccb->ccb_h.ccb_flags = TARG_CCB_WAITING;
1299 SLIST_INSERT_HEAD(&periph->ccb_list, &start_ccb->ccb_h,
1300 periph_links.sle);
1301 periph->immediate_priority = CAM_PRIORITY_NONE;
1302 splx(s);
1303 wakeup(&periph->ccb_list);
1304 } else if (ccbh == NULL) {
1305 splx(s);
1306 xpt_release_ccb(start_ccb);
1307 } else {
1308 TAILQ_REMOVE(&softc->work_queue, ccbh, periph_links.tqe);
1309 splx(s);
1310 atio = (struct ccb_accept_tio*)ccbh;
1311 desc = (struct targ_cmd_desc *)atio->ccb_h.ccb_descr;
1312
1313 /* Is this a tagged request? */
1314 flags = atio->ccb_h.flags & (CAM_TAG_ACTION_VALID|CAM_DIR_MASK);
1315
1316 /*
1317 * If we are done with the transaction, tell the
1318 * controller to send status and perform a CMD_CMPLT.
1319 */
1320 if (desc->data_resid == desc->data_increment)
1321 flags |= CAM_SEND_STATUS;
1322
1323 csio = &start_ccb->csio;
1324 cam_fill_ctio(csio,
1325 /*retries*/2,
1326 targdone,
1327 flags,
1328 /*tag_action*/MSG_SIMPLE_Q_TAG,
1329 atio->tag_id,
1330 atio->init_id,
1331 desc->status,
1332 /*data_ptr*/desc->data_increment == 0
1333 ? NULL : desc->data,
1334 /*dxfer_len*/desc->data_increment,
1335 /*timeout*/desc->timeout);
1336
1337 if ((flags & CAM_SEND_STATUS) != 0
1338 && (desc->status == SCSI_STATUS_CHECK_COND
1339 || desc->status == SCSI_STATUS_CMD_TERMINATED)) {
1340 struct initiator_state *istate;
1341
1342 istate = &softc->istate[atio->init_id];
1343 csio->sense_len = istate->sense_data.extra_len
1344 + offsetof(struct scsi_sense_data,
1345 extra_len);
1346 bcopy(&istate->sense_data, &csio->sense_data,
1347 csio->sense_len);
1348 csio->ccb_h.flags |= CAM_SEND_SENSE;
1349 } else {
1350 csio->sense_len = 0;
1351 }
1352
1353 start_ccb->ccb_h.ccb_flags = TARG_CCB_NONE;
1354 start_ccb->ccb_h.ccb_atio = atio;
1355 CAM_DEBUG(periph->path, CAM_DEBUG_PERIPH,
1356 ("Sending a CTIO\n"));
1357 TAILQ_INSERT_TAIL(&softc->pending_queue, &csio->ccb_h,
1358 periph_links.tqe);
1359 xpt_action(start_ccb);
1360 s = splbio();
1361 ccbh = TAILQ_FIRST(&softc->work_queue);
1362 splx(s);
1363 }
1364 if (ccbh != NULL)
1365 targrunqueue(periph, softc);
1366}
1367
1368static void
1369targdone(struct cam_periph *periph, union ccb *done_ccb)
1370{
1371 struct targ_softc *softc;
1372
1373 softc = (struct targ_softc *)periph->softc;
1374
1375 if (done_ccb->ccb_h.ccb_flags == TARG_CCB_WAITING) {
1376 /* Caller will release the CCB */
1377 wakeup(&done_ccb->ccb_h.cbfcnp);
1378 return;
1379 }
1380
1381 switch (done_ccb->ccb_h.func_code) {
1382 case XPT_ACCEPT_TARGET_IO:
1383 {
1384 struct ccb_accept_tio *atio;
1385 struct targ_cmd_desc *descr;
1386 struct initiator_state *istate;
1387 u_int8_t *cdb;
1388
1389 atio = &done_ccb->atio;
1390 descr = (struct targ_cmd_desc*)atio->ccb_h.ccb_descr;
1391 istate = &softc->istate[atio->init_id];
1392 cdb = atio->cdb_io.cdb_bytes;
1393 if (softc->state == TARG_STATE_TEARDOWN
1394 || atio->ccb_h.status == CAM_REQ_ABORTED) {
1395 freedescr(descr);
1396 free(done_ccb, M_DEVBUF);
1397 return;
1398 }
1399
1400 if (atio->sense_len != 0) {
1401
1402 /*
1403 * We had an error in the reception of
1404 * this command. Immediately issue a CA.
1405 */
1406 atio->ccb_h.flags &= ~CAM_DIR_MASK;
1407 atio->ccb_h.flags |= CAM_DIR_NONE;
1408 descr->data_resid = 0;
1409 descr->data_increment = 0;
1410 descr->timeout = 5 * 1000;
1411 descr->status = SCSI_STATUS_CHECK_COND;
1412 copy_sense(softc, istate, (u_int8_t *)&atio->sense_data,
1413 atio->sense_len);
1414 set_ca_condition(periph, atio->init_id, CA_CMD_SENSE);
1415 } else if (istate->pending_ca == 0
1416 && istate->pending_ua != 0
1417 && cdb[0] != INQUIRY) {
1418
1419 /* Pending UA, tell initiator */
1420 /* Direction is always relative to the initator */
1421 atio->ccb_h.flags &= ~CAM_DIR_MASK;
1422 atio->ccb_h.flags |= CAM_DIR_NONE;
1423 descr->data_resid = 0;
1424 descr->data_increment = 0;
1425 descr->timeout = 5 * 1000;
1426 descr->status = SCSI_STATUS_CHECK_COND;
1427 fill_sense(softc, atio->init_id,
1428 SSD_CURRENT_ERROR, SSD_KEY_UNIT_ATTENTION,
1429 0x29,
1430 istate->pending_ua == UA_POWER_ON ? 1 : 2);
1431 set_ca_condition(periph, atio->init_id, CA_UNIT_ATTN);
1432 } else {
1433 /*
1434 * Save the current CA and UA status so
1435 * they can be used by this command.
1436 */
1437 ua_types pending_ua;
1438 ca_types pending_ca;
1439
1440 pending_ua = istate->pending_ua;
1441 pending_ca = istate->pending_ca;
1442
1443 /*
1444 * As per the SCSI2 spec, any command that occurs
1445 * after a CA is reported, clears the CA. We must
1446 * also clear the UA condition, if any, that caused
1447 * the CA to occur assuming the UA is not for a
1448 * persistant condition.
1449 */
1450 istate->pending_ca = CA_NONE;
1451 if (pending_ca == CA_UNIT_ATTN)
1452 istate->pending_ua = UA_NONE;
1453
1454 /*
1455 * Determine the type of incoming command and
1456 * setup our buffer for a response.
1457 */
1458 switch (cdb[0]) {
1459 case INQUIRY:
1460 {
1461 struct scsi_inquiry *inq;
1462 struct scsi_sense_data *sense;
1463
1464 inq = (struct scsi_inquiry *)cdb;
1465 sense = &istate->sense_data;
1466 descr->status = SCSI_STATUS_OK;
1467 CAM_DEBUG(periph->path, CAM_DEBUG_PERIPH,
1468 ("Saw an inquiry!\n"));
1469 /*
1470 * Validate the command. We don't
1471 * support any VPD pages, so complain
1472 * if EVPD is set.
1473 */
1474 if ((inq->byte2 & SI_EVPD) != 0
1475 || inq->page_code != 0) {
1476 atio->ccb_h.flags &= ~CAM_DIR_MASK;
1477 atio->ccb_h.flags |= CAM_DIR_NONE;
1478 descr->data_resid = 0;
1479 descr->data_increment = 0;
1480 descr->timeout = 5 * 1000;
1481 descr->status = SCSI_STATUS_CHECK_COND;
1482 fill_sense(softc, atio->init_id,
1483 SSD_CURRENT_ERROR,
1484 SSD_KEY_ILLEGAL_REQUEST,
1485 /*asc*/0x24, /*ascq*/0x00);
1486 sense->extra_len =
1487 offsetof(struct scsi_sense_data,
1488 extra_bytes)
1489 - offsetof(struct scsi_sense_data,
1490 extra_len);
1491 set_ca_condition(periph, atio->init_id,
1492 CA_CMD_SENSE);
1493 }
1494
1495 if ((inq->byte2 & SI_EVPD) != 0) {
1496 sense->sense_key_spec[0] =
1497 SSD_SCS_VALID|SSD_FIELDPTR_CMD
1498 |SSD_BITPTR_VALID| /*bit value*/1;
1499 sense->sense_key_spec[1] = 0;
1500 sense->sense_key_spec[2] =
1501 offsetof(struct scsi_inquiry,
1502 byte2);
1503 } else if (inq->page_code != 0) {
1504 sense->sense_key_spec[0] =
1505 SSD_SCS_VALID|SSD_FIELDPTR_CMD;
1506 sense->sense_key_spec[1] = 0;
1507 sense->sense_key_spec[2] =
1508 offsetof(struct scsi_inquiry,
1509 page_code);
1510 }
1511 if (descr->status == SCSI_STATUS_CHECK_COND)
1512 break;
1513
1514 /*
1515 * Direction is always relative
1516 * to the initator.
1517 */
1518 atio->ccb_h.flags &= ~CAM_DIR_MASK;
1519 atio->ccb_h.flags |= CAM_DIR_IN;
1520 descr->data = softc->inq_data;
1521 descr->data_resid = MIN(softc->inq_data_len,
1522 inq->length);
1523 descr->data_increment = descr->data_resid;
1524 descr->timeout = 5 * 1000;
1525 break;
1526 }
1527 case TEST_UNIT_READY:
1528 atio->ccb_h.flags &= ~CAM_DIR_MASK;
1529 atio->ccb_h.flags |= CAM_DIR_NONE;
1530 descr->data_resid = 0;
1531 descr->data_increment = 0;
1532 descr->timeout = 5 * 1000;
1533 descr->status = SCSI_STATUS_OK;
1534 break;
1535 case REQUEST_SENSE:
1536 {
1537 struct scsi_request_sense *rsense;
1538 struct scsi_sense_data *sense;
1539
1540 rsense = (struct scsi_request_sense *)cdb;
1541 sense = &istate->sense_data;
1542 if (pending_ca == 0) {
1543 fill_sense(softc, atio->init_id,
1544 SSD_CURRENT_ERROR,
1545 SSD_KEY_NO_SENSE, 0x00,
1546 0x00);
1547 CAM_DEBUG(periph->path,
1548 CAM_DEBUG_PERIPH,
1549 ("No pending CA!\n"));
1550 }
1551 /*
1552 * Direction is always relative
1553 * to the initator.
1554 */
1555 atio->ccb_h.flags &= ~CAM_DIR_MASK;
1556 atio->ccb_h.flags |= CAM_DIR_IN;
1557 descr->data = sense;
1558 descr->data_resid =
1559 offsetof(struct scsi_sense_data,
1560 extra_len)
1561 + sense->extra_len;
1562 descr->data_resid = MIN(descr->data_resid,
1563 rsense->length);
1564 descr->data_increment = descr->data_resid;
1565 descr->timeout = 5 * 1000;
1566 descr->status = SCSI_STATUS_OK;
1567 break;
1568 }
1569 case RECEIVE:
1570 case SEND:
1571 {
1572 struct scsi_send_receive *sr;
1573
1574 sr = (struct scsi_send_receive *)cdb;
1575
1576 /*
1577 * Direction is always relative
1578 * to the initator.
1579 */
1580 atio->ccb_h.flags &= ~CAM_DIR_MASK;
1581 descr->data_resid = scsi_3btoul(sr->xfer_len);
1582 descr->timeout = 5 * 1000;
1583 descr->status = SCSI_STATUS_OK;
1584 if (cdb[0] == SEND) {
1585 atio->ccb_h.flags |= CAM_DIR_OUT;
1586 CAM_DEBUG(periph->path,
1587 CAM_DEBUG_PERIPH,
1588 ("Saw a SEND!\n"));
1589 atio->ccb_h.flags |= CAM_DIR_OUT;
1590 TAILQ_INSERT_TAIL(&softc->snd_ccb_queue,
1591 &atio->ccb_h,
1592 periph_links.tqe);
1593 selwakeup(&softc->snd_select);
1594 } else {
1595 atio->ccb_h.flags |= CAM_DIR_IN;
1596 CAM_DEBUG(periph->path,
1597 CAM_DEBUG_PERIPH,
1598 ("Saw a RECEIVE!\n"));
1599 TAILQ_INSERT_TAIL(&softc->rcv_ccb_queue,
1600 &atio->ccb_h,
1601 periph_links.tqe);
1602 selwakeup(&softc->rcv_select);
1603 }
1604 /*
1605 * Attempt to satisfy this request with
1606 * a user buffer.
1607 */
1608 targrunqueue(periph, softc);
1609 return;
1610 }
1611 default:
1612 /*
1613 * Queue for consumption by our userland
1614 * counterpart and transition to the exception
1615 * state.
1616 */
1617 TAILQ_INSERT_TAIL(&softc->unknown_atio_queue,
1618 &atio->ccb_h,
1619 periph_links.tqe);
1620 softc->exceptions |= TARG_EXCEPT_UNKNOWN_ATIO;
1621 targfireexception(periph, softc);
1622 return;
1623 }
1624 }
1625
1626 /* Queue us up to receive a Continue Target I/O ccb. */
1627 TAILQ_INSERT_TAIL(&softc->work_queue, &atio->ccb_h,
1628 periph_links.tqe);
1629 xpt_schedule(periph, /*priority*/1);
1630 break;
1631 }
1632 case XPT_CONT_TARGET_IO:
1633 {
1634 struct ccb_scsiio *csio;
1635 struct ccb_accept_tio *atio;
1636 struct targ_cmd_desc *desc;
1637 struct buf *bp;
1637 struct bio *bp;
1638 int error;
1639
1640 CAM_DEBUG(periph->path, CAM_DEBUG_PERIPH,
1641 ("Received completed CTIO\n"));
1642 csio = &done_ccb->csio;
1643 atio = (struct ccb_accept_tio*)done_ccb->ccb_h.ccb_atio;
1644 desc = (struct targ_cmd_desc *)atio->ccb_h.ccb_descr;
1645
1646 TAILQ_REMOVE(&softc->pending_queue, &done_ccb->ccb_h,
1647 periph_links.tqe);
1648
1649 if ((done_ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
1650 printf("CCB with error %x\n", done_ccb->ccb_h.status);
1651 error = targerror(done_ccb, 0, 0);
1652 if (error == ERESTART)
1653 break;
1654 /*
1655 * Right now we don't need to do anything
1656 * prior to unfreezing the queue...
1657 */
1658 if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) {
1659 printf("Releasing Queue\n");
1660 cam_release_devq(done_ccb->ccb_h.path,
1661 /*relsim_flags*/0,
1662 /*reduction*/0,
1663 /*timeout*/0,
1664 /*getcount_only*/0);
1665 }
1666 } else
1667 error = 0;
1668
1669 /*
1670 * If we shipped back sense data when completing
1671 * this command, clear the pending CA for it.
1672 */
1673 if (done_ccb->ccb_h.status & CAM_SENT_SENSE) {
1674 struct initiator_state *istate;
1675
1676 istate = &softc->istate[csio->init_id];
1677 if (istate->pending_ca == CA_UNIT_ATTN)
1678 istate->pending_ua = UA_NONE;
1679 istate->pending_ca = CA_NONE;
1680 softc->istate[csio->init_id].pending_ca = CA_NONE;
1681 done_ccb->ccb_h.status &= ~CAM_SENT_SENSE;
1682 }
1683 done_ccb->ccb_h.flags &= ~CAM_SEND_SENSE;
1684
1685 desc->data_increment -= csio->resid;
1686 desc->data_resid -= desc->data_increment;
1687 if ((bp = desc->bp) != NULL) {
1688
1638 int error;
1639
1640 CAM_DEBUG(periph->path, CAM_DEBUG_PERIPH,
1641 ("Received completed CTIO\n"));
1642 csio = &done_ccb->csio;
1643 atio = (struct ccb_accept_tio*)done_ccb->ccb_h.ccb_atio;
1644 desc = (struct targ_cmd_desc *)atio->ccb_h.ccb_descr;
1645
1646 TAILQ_REMOVE(&softc->pending_queue, &done_ccb->ccb_h,
1647 periph_links.tqe);
1648
1649 if ((done_ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
1650 printf("CCB with error %x\n", done_ccb->ccb_h.status);
1651 error = targerror(done_ccb, 0, 0);
1652 if (error == ERESTART)
1653 break;
1654 /*
1655 * Right now we don't need to do anything
1656 * prior to unfreezing the queue...
1657 */
1658 if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) {
1659 printf("Releasing Queue\n");
1660 cam_release_devq(done_ccb->ccb_h.path,
1661 /*relsim_flags*/0,
1662 /*reduction*/0,
1663 /*timeout*/0,
1664 /*getcount_only*/0);
1665 }
1666 } else
1667 error = 0;
1668
1669 /*
1670 * If we shipped back sense data when completing
1671 * this command, clear the pending CA for it.
1672 */
1673 if (done_ccb->ccb_h.status & CAM_SENT_SENSE) {
1674 struct initiator_state *istate;
1675
1676 istate = &softc->istate[csio->init_id];
1677 if (istate->pending_ca == CA_UNIT_ATTN)
1678 istate->pending_ua = UA_NONE;
1679 istate->pending_ca = CA_NONE;
1680 softc->istate[csio->init_id].pending_ca = CA_NONE;
1681 done_ccb->ccb_h.status &= ~CAM_SENT_SENSE;
1682 }
1683 done_ccb->ccb_h.flags &= ~CAM_SEND_SENSE;
1684
1685 desc->data_increment -= csio->resid;
1686 desc->data_resid -= desc->data_increment;
1687 if ((bp = desc->bp) != NULL) {
1688
1689 bp->b_resid -= desc->data_increment;
1690 bp->b_error = error;
1689 bp->bio_resid -= desc->data_increment;
1690 bp->bio_error = error;
1691
1692 CAM_DEBUG(periph->path, CAM_DEBUG_PERIPH,
1693 ("Buffer I/O Completed - Resid %ld:%d\n",
1691
1692 CAM_DEBUG(periph->path, CAM_DEBUG_PERIPH,
1693 ("Buffer I/O Completed - Resid %ld:%d\n",
1694 bp->b_resid, desc->data_resid));
1694 bp->bio_resid, desc->data_resid));
1695 /*
1696 * Send the buffer back to the client if
1697 * either the command has completed or all
1698 * buffer space has been consumed.
1699 */
1700 if (desc->data_resid == 0
1695 /*
1696 * Send the buffer back to the client if
1697 * either the command has completed or all
1698 * buffer space has been consumed.
1699 */
1700 if (desc->data_resid == 0
1701 || bp->b_resid == 0
1701 || bp->bio_resid == 0
1702 || error != 0) {
1702 || error != 0) {
1703 if (bp->b_resid != 0)
1703 if (bp->bio_resid != 0)
1704 /* Short transfer */
1704 /* Short transfer */
1705 bp->b_ioflags |= BIO_ERROR;
1705 bp->bio_flags |= BIO_ERROR;
1706
1707 CAM_DEBUG(periph->path, CAM_DEBUG_PERIPH,
1708 ("Completing a buffer\n"));
1709 biodone(bp);
1710 desc->bp = NULL;
1711 }
1712 }
1713
1714 xpt_release_ccb(done_ccb);
1715 if (softc->state != TARG_STATE_TEARDOWN) {
1716
1717 if (desc->data_resid == 0) {
1718 /*
1719 * Send the original accept TIO back to the
1720 * controller to handle more work.
1721 */
1722 CAM_DEBUG(periph->path, CAM_DEBUG_PERIPH,
1723 ("Returning ATIO to target\n"));
1724 xpt_action((union ccb *)atio);
1725 break;
1726 }
1727
1728 /* Queue us up for another buffer */
1729 if (atio->cdb_io.cdb_bytes[0] == SEND) {
1730 if (desc->bp != NULL)
1731 TAILQ_INSERT_HEAD(
1706
1707 CAM_DEBUG(periph->path, CAM_DEBUG_PERIPH,
1708 ("Completing a buffer\n"));
1709 biodone(bp);
1710 desc->bp = NULL;
1711 }
1712 }
1713
1714 xpt_release_ccb(done_ccb);
1715 if (softc->state != TARG_STATE_TEARDOWN) {
1716
1717 if (desc->data_resid == 0) {
1718 /*
1719 * Send the original accept TIO back to the
1720 * controller to handle more work.
1721 */
1722 CAM_DEBUG(periph->path, CAM_DEBUG_PERIPH,
1723 ("Returning ATIO to target\n"));
1724 xpt_action((union ccb *)atio);
1725 break;
1726 }
1727
1728 /* Queue us up for another buffer */
1729 if (atio->cdb_io.cdb_bytes[0] == SEND) {
1730 if (desc->bp != NULL)
1731 TAILQ_INSERT_HEAD(
1732 &softc->snd_buf_queue.queue,
1732 &softc->snd_bio_queue.queue,
1733 bp, b_act);
1734 TAILQ_INSERT_HEAD(&softc->snd_ccb_queue,
1735 &atio->ccb_h,
1736 periph_links.tqe);
1737 } else {
1738 if (desc->bp != NULL)
1739 TAILQ_INSERT_HEAD(
1733 bp, b_act);
1734 TAILQ_INSERT_HEAD(&softc->snd_ccb_queue,
1735 &atio->ccb_h,
1736 periph_links.tqe);
1737 } else {
1738 if (desc->bp != NULL)
1739 TAILQ_INSERT_HEAD(
1740 &softc->rcv_buf_queue.queue,
1740 &softc->rcv_bio_queue.queue,
1741 bp, b_act);
1742 TAILQ_INSERT_HEAD(&softc->rcv_ccb_queue,
1743 &atio->ccb_h,
1744 periph_links.tqe);
1745 }
1746 desc->bp = NULL;
1747 targrunqueue(periph, softc);
1748 } else {
1749 if (desc->bp != NULL) {
1741 bp, b_act);
1742 TAILQ_INSERT_HEAD(&softc->rcv_ccb_queue,
1743 &atio->ccb_h,
1744 periph_links.tqe);
1745 }
1746 desc->bp = NULL;
1747 targrunqueue(periph, softc);
1748 } else {
1749 if (desc->bp != NULL) {
1750 bp->b_ioflags |= BIO_ERROR;
1751 bp->b_error = ENXIO;
1750 bp->bio_flags |= BIO_ERROR;
1751 bp->bio_error = ENXIO;
1752 biodone(bp);
1753 }
1754 freedescr(desc);
1755 free(atio, M_DEVBUF);
1756 }
1757 break;
1758 }
1759 case XPT_IMMED_NOTIFY:
1760 {
1761 int frozen;
1762
1763 frozen = (done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0;
1764 if (softc->state == TARG_STATE_TEARDOWN) {
1765 SLIST_REMOVE(&softc->immed_notify_slist,
1766 &done_ccb->ccb_h, ccb_hdr,
1767 periph_links.sle);
1768 free(done_ccb, M_DEVBUF);
1769 } else if (done_ccb->ccb_h.status == CAM_REQ_ABORTED) {
1770 free(done_ccb, M_DEVBUF);
1771 } else {
1772 printf("Saw event %x:%x\n", done_ccb->ccb_h.status,
1773 done_ccb->cin.message_args[0]);
1774 /* Process error condition. */
1775 targinoterror(periph, softc, &done_ccb->cin);
1776
1777 /* Requeue for another immediate event */
1778 xpt_action(done_ccb);
1779 }
1780 if (frozen != 0)
1781 cam_release_devq(periph->path,
1782 /*relsim_flags*/0,
1783 /*opening reduction*/0,
1784 /*timeout*/0,
1785 /*getcount_only*/0);
1786 break;
1787 }
1788 default:
1789 panic("targdone: Impossible xpt opcode %x encountered.",
1790 done_ccb->ccb_h.func_code);
1791 /* NOTREACHED */
1792 break;
1793 }
1794}
1795
1796/*
1797 * Transition to the exception state and notify our symbiotic
1798 * userland process of the change.
1799 */
1800static void
1801targfireexception(struct cam_periph *periph, struct targ_softc *softc)
1802{
1803 /*
1804 * return all pending buffers with short read/write status so our
1805 * process unblocks, and do a selwakeup on any process queued
1806 * waiting for reads or writes. When the selwakeup is performed,
1807 * the waking process will wakeup, call our poll routine again,
1808 * and pick up the exception.
1809 */
1752 biodone(bp);
1753 }
1754 freedescr(desc);
1755 free(atio, M_DEVBUF);
1756 }
1757 break;
1758 }
1759 case XPT_IMMED_NOTIFY:
1760 {
1761 int frozen;
1762
1763 frozen = (done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0;
1764 if (softc->state == TARG_STATE_TEARDOWN) {
1765 SLIST_REMOVE(&softc->immed_notify_slist,
1766 &done_ccb->ccb_h, ccb_hdr,
1767 periph_links.sle);
1768 free(done_ccb, M_DEVBUF);
1769 } else if (done_ccb->ccb_h.status == CAM_REQ_ABORTED) {
1770 free(done_ccb, M_DEVBUF);
1771 } else {
1772 printf("Saw event %x:%x\n", done_ccb->ccb_h.status,
1773 done_ccb->cin.message_args[0]);
1774 /* Process error condition. */
1775 targinoterror(periph, softc, &done_ccb->cin);
1776
1777 /* Requeue for another immediate event */
1778 xpt_action(done_ccb);
1779 }
1780 if (frozen != 0)
1781 cam_release_devq(periph->path,
1782 /*relsim_flags*/0,
1783 /*opening reduction*/0,
1784 /*timeout*/0,
1785 /*getcount_only*/0);
1786 break;
1787 }
1788 default:
1789 panic("targdone: Impossible xpt opcode %x encountered.",
1790 done_ccb->ccb_h.func_code);
1791 /* NOTREACHED */
1792 break;
1793 }
1794}
1795
1796/*
1797 * Transition to the exception state and notify our symbiotic
1798 * userland process of the change.
1799 */
1800static void
1801targfireexception(struct cam_periph *periph, struct targ_softc *softc)
1802{
1803 /*
1804 * return all pending buffers with short read/write status so our
1805 * process unblocks, and do a selwakeup on any process queued
1806 * waiting for reads or writes. When the selwakeup is performed,
1807 * the waking process will wakeup, call our poll routine again,
1808 * and pick up the exception.
1809 */
1810 struct buf *bp;
1810 struct bio *bp;
1811
1812 if (softc->state != TARG_STATE_NORMAL)
1813 /* Already either tearing down or in exception state */
1814 return;
1815
1816 softc->state = TARG_STATE_EXCEPTION;
1817
1811
1812 if (softc->state != TARG_STATE_NORMAL)
1813 /* Already either tearing down or in exception state */
1814 return;
1815
1816 softc->state = TARG_STATE_EXCEPTION;
1817
1818 while ((bp = bufq_first(&softc->snd_buf_queue)) != NULL) {
1819 bufq_remove(&softc->snd_buf_queue, bp);
1820 bp->b_ioflags |= BIO_ERROR;
1818 while ((bp = bioq_first(&softc->snd_bio_queue)) != NULL) {
1819 bioq_remove(&softc->snd_bio_queue, bp);
1820 bp->bio_flags |= BIO_ERROR;
1821 biodone(bp);
1822 }
1823
1821 biodone(bp);
1822 }
1823
1824 while ((bp = bufq_first(&softc->rcv_buf_queue)) != NULL) {
1825 bufq_remove(&softc->snd_buf_queue, bp);
1826 bp->b_ioflags |= BIO_ERROR;
1824 while ((bp = bioq_first(&softc->rcv_bio_queue)) != NULL) {
1825 bioq_remove(&softc->snd_bio_queue, bp);
1826 bp->bio_flags |= BIO_ERROR;
1827 biodone(bp);
1828 }
1829
1830 selwakeup(&softc->snd_select);
1831 selwakeup(&softc->rcv_select);
1832}
1833
1834static void
1835targinoterror(struct cam_periph *periph, struct targ_softc *softc,
1836 struct ccb_immed_notify *inot)
1837{
1838 cam_status status;
1839 int sense;
1840
1841 status = inot->ccb_h.status;
1842 sense = (status & CAM_AUTOSNS_VALID) != 0;
1843 status &= CAM_STATUS_MASK;
1844 switch (status) {
1845 case CAM_SCSI_BUS_RESET:
1846 set_unit_attention_cond(periph, /*init_id*/CAM_TARGET_WILDCARD,
1847 UA_BUS_RESET);
1848 abort_pending_transactions(periph,
1849 /*init_id*/CAM_TARGET_WILDCARD,
1850 TARG_TAG_WILDCARD, EINTR,
1851 /*to_held_queue*/FALSE);
1852 softc->exceptions |= TARG_EXCEPT_BUS_RESET_SEEN;
1853 targfireexception(periph, softc);
1854 break;
1855 case CAM_BDR_SENT:
1856 set_unit_attention_cond(periph, /*init_id*/CAM_TARGET_WILDCARD,
1857 UA_BDR);
1858 abort_pending_transactions(periph, CAM_TARGET_WILDCARD,
1859 TARG_TAG_WILDCARD, EINTR,
1860 /*to_held_queue*/FALSE);
1861 softc->exceptions |= TARG_EXCEPT_BDR_RECEIVED;
1862 targfireexception(periph, softc);
1863 break;
1864 case CAM_MESSAGE_RECV:
1865 switch (inot->message_args[0]) {
1866 case MSG_INITIATOR_DET_ERR:
1867 break;
1868 case MSG_ABORT:
1869 break;
1870 case MSG_BUS_DEV_RESET:
1871 break;
1872 case MSG_ABORT_TAG:
1873 break;
1874 case MSG_CLEAR_QUEUE:
1875 break;
1876 case MSG_TERM_IO_PROC:
1877 break;
1878 default:
1879 break;
1880 }
1881 break;
1882 default:
1883 break;
1884 }
1885}
1886
1887static int
1888targerror(union ccb *ccb, u_int32_t cam_flags, u_int32_t sense_flags)
1889{
1890 struct cam_periph *periph;
1891 struct targ_softc *softc;
1892 struct ccb_scsiio *csio;
1893 struct initiator_state *istate;
1894 cam_status status;
1895 int frozen;
1896 int sense;
1897 int error;
1898 int on_held_queue;
1899
1900 periph = xpt_path_periph(ccb->ccb_h.path);
1901 softc = (struct targ_softc *)periph->softc;
1902 status = ccb->ccb_h.status;
1903 sense = (status & CAM_AUTOSNS_VALID) != 0;
1904 frozen = (status & CAM_DEV_QFRZN) != 0;
1905 status &= CAM_STATUS_MASK;
1906 on_held_queue = FALSE;
1907 csio = &ccb->csio;
1908 istate = &softc->istate[csio->init_id];
1909 switch (status) {
1910 case CAM_REQ_ABORTED:
1911 if ((ccb->ccb_h.ccb_flags & TARG_CCB_ABORT_TO_HELDQ) != 0) {
1912
1913 /*
1914 * Place this CCB into the initiators
1915 * 'held' queue until the pending CA is cleared.
1916 * If there is no CA pending, reissue immediately.
1917 */
1918 if (istate->pending_ca == 0) {
1919 ccb->ccb_h.ccb_flags = TARG_CCB_NONE;
1920 xpt_action(ccb);
1921 } else {
1922 ccb->ccb_h.ccb_flags = TARG_CCB_HELDQ;
1923 TAILQ_INSERT_TAIL(&softc->pending_queue,
1924 &ccb->ccb_h,
1925 periph_links.tqe);
1926 }
1927 /* The command will be retried at a later time. */
1928 on_held_queue = TRUE;
1929 error = ERESTART;
1930 break;
1931 }
1932 /* FALLTHROUGH */
1933 case CAM_SCSI_BUS_RESET:
1934 case CAM_BDR_SENT:
1935 case CAM_REQ_TERMIO:
1936 case CAM_CMD_TIMEOUT:
1937 /* Assume we did not send any data */
1938 csio->resid = csio->dxfer_len;
1939 error = EIO;
1940 break;
1941 case CAM_SEL_TIMEOUT:
1942 if (ccb->ccb_h.retry_count > 0) {
1943 ccb->ccb_h.retry_count--;
1944 error = ERESTART;
1945 } else {
1946 /* "Select or reselect failure" */
1947 csio->resid = csio->dxfer_len;
1948 fill_sense(softc, csio->init_id, SSD_CURRENT_ERROR,
1949 SSD_KEY_HARDWARE_ERROR, 0x45, 0x00);
1950 set_ca_condition(periph, csio->init_id, CA_CMD_SENSE);
1951 error = EIO;
1952 }
1953 break;
1954 case CAM_UNCOR_PARITY:
1955 /* "SCSI parity error" */
1956 fill_sense(softc, csio->init_id, SSD_CURRENT_ERROR,
1957 SSD_KEY_HARDWARE_ERROR, 0x47, 0x00);
1958 set_ca_condition(periph, csio->init_id,
1959 CA_CMD_SENSE);
1960 csio->resid = csio->dxfer_len;
1961 error = EIO;
1962 break;
1963 case CAM_NO_HBA:
1964 csio->resid = csio->dxfer_len;
1965 error = ENXIO;
1966 break;
1967 case CAM_SEQUENCE_FAIL:
1968 if (sense != 0) {
1969 copy_sense(softc, istate, (u_int8_t *)&csio->sense_data,
1970 csio->sense_len);
1971 set_ca_condition(periph,
1972 csio->init_id,
1973 CA_CMD_SENSE);
1974 }
1975 csio->resid = csio->dxfer_len;
1976 error = EIO;
1977 break;
1978 case CAM_IDE:
1979 /* "Initiator detected error message received" */
1980 fill_sense(softc, csio->init_id, SSD_CURRENT_ERROR,
1981 SSD_KEY_HARDWARE_ERROR, 0x48, 0x00);
1982 set_ca_condition(periph, csio->init_id,
1983 CA_CMD_SENSE);
1984 csio->resid = csio->dxfer_len;
1985 error = EIO;
1986 break;
1987 case CAM_REQUEUE_REQ:
1988 printf("Requeue Request!\n");
1989 error = ERESTART;
1990 break;
1991 default:
1992 csio->resid = csio->dxfer_len;
1993 error = EIO;
1994 panic("targerror: Unexpected status %x encounterd", status);
1995 /* NOTREACHED */
1996 }
1997
1998 if (error == ERESTART || error == 0) {
1999 /* Clear the QFRZN flag as we will release the queue */
2000 if (frozen != 0)
2001 ccb->ccb_h.status &= ~CAM_DEV_QFRZN;
2002
2003 if (error == ERESTART && !on_held_queue)
2004 xpt_action(ccb);
2005
2006 if (frozen != 0)
2007 cam_release_devq(ccb->ccb_h.path,
2008 /*relsim_flags*/0,
2009 /*opening reduction*/0,
2010 /*timeout*/0,
2011 /*getcount_only*/0);
2012 }
2013 return (error);
2014}
2015
2016static struct targ_cmd_desc*
2017allocdescr()
2018{
2019 struct targ_cmd_desc* descr;
2020
2021 /* Allocate the targ_descr structure */
2022 descr = (struct targ_cmd_desc *)malloc(sizeof(*descr),
2023 M_DEVBUF, M_NOWAIT);
2024 if (descr == NULL)
2025 return (NULL);
2026
2027 bzero(descr, sizeof(*descr));
2028
2029 /* Allocate buffer backing store */
2030 descr->backing_store = malloc(MAX_BUF_SIZE, M_DEVBUF, M_NOWAIT);
2031 if (descr->backing_store == NULL) {
2032 free(descr, M_DEVBUF);
2033 return (NULL);
2034 }
2035 descr->max_size = MAX_BUF_SIZE;
2036 return (descr);
2037}
2038
2039static void
2040freedescr(struct targ_cmd_desc *descr)
2041{
2042 free(descr->backing_store, M_DEVBUF);
2043 free(descr, M_DEVBUF);
2044}
2045
2046static void
2047fill_sense(struct targ_softc *softc, u_int initiator_id, u_int error_code,
2048 u_int sense_key, u_int asc, u_int ascq)
2049{
2050 struct initiator_state *istate;
2051 struct scsi_sense_data *sense;
2052
2053 istate = &softc->istate[initiator_id];
2054 sense = &istate->sense_data;
2055 bzero(sense, sizeof(*sense));
2056 sense->error_code = error_code;
2057 sense->flags = sense_key;
2058 sense->add_sense_code = asc;
2059 sense->add_sense_code_qual = ascq;
2060
2061 sense->extra_len = offsetof(struct scsi_sense_data, fru)
2062 - offsetof(struct scsi_sense_data, extra_len);
2063}
2064
2065static void
2066copy_sense(struct targ_softc *softc, struct initiator_state *istate,
2067 u_int8_t *sense_buffer, size_t sense_len)
2068{
2069 struct scsi_sense_data *sense;
2070 size_t copylen;
2071
2072 sense = &istate->sense_data;
2073 copylen = sizeof(*sense);
2074 if (copylen > sense_len)
2075 copylen = sense_len;
2076 bcopy(sense_buffer, sense, copylen);
2077}
2078
2079static void
2080set_unit_attention_cond(struct cam_periph *periph,
2081 u_int initiator_id, ua_types ua)
2082{
2083 int start;
2084 int end;
2085 struct targ_softc *softc;
2086
2087 softc = (struct targ_softc *)periph->softc;
2088 if (initiator_id == CAM_TARGET_WILDCARD) {
2089 start = 0;
2090 end = MAX_INITIATORS - 1;
2091 } else
2092 start = end = initiator_id;
2093
2094 while (start <= end) {
2095 softc->istate[start].pending_ua = ua;
2096 start++;
2097 }
2098}
2099
2100static void
2101set_ca_condition(struct cam_periph *periph, u_int initiator_id, ca_types ca)
2102{
2103 struct targ_softc *softc;
2104
2105 softc = (struct targ_softc *)periph->softc;
2106 softc->istate[initiator_id].pending_ca = ca;
2107 abort_pending_transactions(periph, initiator_id, TARG_TAG_WILDCARD,
2108 /*errno*/0, /*to_held_queue*/TRUE);
2109}
2110
2111static void
2112abort_pending_transactions(struct cam_periph *periph, u_int initiator_id,
2113 u_int tag_id, int errno, int to_held_queue)
2114{
2115 struct ccb_abort cab;
2116 struct ccb_queue *atio_queues[3];
2117 struct targ_softc *softc;
2118 struct ccb_hdr *ccbh;
2119 u_int i;
2120
2121 softc = (struct targ_softc *)periph->softc;
2122
2123 atio_queues[0] = &softc->work_queue;
2124 atio_queues[1] = &softc->snd_ccb_queue;
2125 atio_queues[2] = &softc->rcv_ccb_queue;
2126
2127 /* First address the ATIOs awaiting resources */
2128 for (i = 0; i < (sizeof(atio_queues) / sizeof(*atio_queues)); i++) {
2129 struct ccb_queue *atio_queue;
2130
2131 if (to_held_queue) {
2132 /*
2133 * The device queue is frozen anyway, so there
2134 * is nothing for us to do.
2135 */
2136 continue;
2137 }
2138 atio_queue = atio_queues[i];
2139 ccbh = TAILQ_FIRST(atio_queue);
2140 while (ccbh != NULL) {
2141 struct ccb_accept_tio *atio;
2142 struct targ_cmd_desc *desc;
2143
2144 atio = (struct ccb_accept_tio *)ccbh;
2145 desc = (struct targ_cmd_desc *)atio->ccb_h.ccb_descr;
2146 ccbh = TAILQ_NEXT(ccbh, periph_links.tqe);
2147
2148 /* Only abort the CCBs that match */
2149 if ((atio->init_id != initiator_id
2150 && initiator_id != CAM_TARGET_WILDCARD)
2151 || (tag_id != TARG_TAG_WILDCARD
2152 && ((atio->ccb_h.flags & CAM_TAG_ACTION_VALID) == 0
2153 || atio->tag_id != tag_id)))
2154 continue;
2155
2156 TAILQ_REMOVE(atio_queue, &atio->ccb_h,
2157 periph_links.tqe);
2158
2159 CAM_DEBUG(periph->path, CAM_DEBUG_PERIPH,
2160 ("Aborting ATIO\n"));
2161 if (desc->bp != NULL) {
1827 biodone(bp);
1828 }
1829
1830 selwakeup(&softc->snd_select);
1831 selwakeup(&softc->rcv_select);
1832}
1833
1834static void
1835targinoterror(struct cam_periph *periph, struct targ_softc *softc,
1836 struct ccb_immed_notify *inot)
1837{
1838 cam_status status;
1839 int sense;
1840
1841 status = inot->ccb_h.status;
1842 sense = (status & CAM_AUTOSNS_VALID) != 0;
1843 status &= CAM_STATUS_MASK;
1844 switch (status) {
1845 case CAM_SCSI_BUS_RESET:
1846 set_unit_attention_cond(periph, /*init_id*/CAM_TARGET_WILDCARD,
1847 UA_BUS_RESET);
1848 abort_pending_transactions(periph,
1849 /*init_id*/CAM_TARGET_WILDCARD,
1850 TARG_TAG_WILDCARD, EINTR,
1851 /*to_held_queue*/FALSE);
1852 softc->exceptions |= TARG_EXCEPT_BUS_RESET_SEEN;
1853 targfireexception(periph, softc);
1854 break;
1855 case CAM_BDR_SENT:
1856 set_unit_attention_cond(periph, /*init_id*/CAM_TARGET_WILDCARD,
1857 UA_BDR);
1858 abort_pending_transactions(periph, CAM_TARGET_WILDCARD,
1859 TARG_TAG_WILDCARD, EINTR,
1860 /*to_held_queue*/FALSE);
1861 softc->exceptions |= TARG_EXCEPT_BDR_RECEIVED;
1862 targfireexception(periph, softc);
1863 break;
1864 case CAM_MESSAGE_RECV:
1865 switch (inot->message_args[0]) {
1866 case MSG_INITIATOR_DET_ERR:
1867 break;
1868 case MSG_ABORT:
1869 break;
1870 case MSG_BUS_DEV_RESET:
1871 break;
1872 case MSG_ABORT_TAG:
1873 break;
1874 case MSG_CLEAR_QUEUE:
1875 break;
1876 case MSG_TERM_IO_PROC:
1877 break;
1878 default:
1879 break;
1880 }
1881 break;
1882 default:
1883 break;
1884 }
1885}
1886
1887static int
1888targerror(union ccb *ccb, u_int32_t cam_flags, u_int32_t sense_flags)
1889{
1890 struct cam_periph *periph;
1891 struct targ_softc *softc;
1892 struct ccb_scsiio *csio;
1893 struct initiator_state *istate;
1894 cam_status status;
1895 int frozen;
1896 int sense;
1897 int error;
1898 int on_held_queue;
1899
1900 periph = xpt_path_periph(ccb->ccb_h.path);
1901 softc = (struct targ_softc *)periph->softc;
1902 status = ccb->ccb_h.status;
1903 sense = (status & CAM_AUTOSNS_VALID) != 0;
1904 frozen = (status & CAM_DEV_QFRZN) != 0;
1905 status &= CAM_STATUS_MASK;
1906 on_held_queue = FALSE;
1907 csio = &ccb->csio;
1908 istate = &softc->istate[csio->init_id];
1909 switch (status) {
1910 case CAM_REQ_ABORTED:
1911 if ((ccb->ccb_h.ccb_flags & TARG_CCB_ABORT_TO_HELDQ) != 0) {
1912
1913 /*
1914 * Place this CCB into the initiators
1915 * 'held' queue until the pending CA is cleared.
1916 * If there is no CA pending, reissue immediately.
1917 */
1918 if (istate->pending_ca == 0) {
1919 ccb->ccb_h.ccb_flags = TARG_CCB_NONE;
1920 xpt_action(ccb);
1921 } else {
1922 ccb->ccb_h.ccb_flags = TARG_CCB_HELDQ;
1923 TAILQ_INSERT_TAIL(&softc->pending_queue,
1924 &ccb->ccb_h,
1925 periph_links.tqe);
1926 }
1927 /* The command will be retried at a later time. */
1928 on_held_queue = TRUE;
1929 error = ERESTART;
1930 break;
1931 }
1932 /* FALLTHROUGH */
1933 case CAM_SCSI_BUS_RESET:
1934 case CAM_BDR_SENT:
1935 case CAM_REQ_TERMIO:
1936 case CAM_CMD_TIMEOUT:
1937 /* Assume we did not send any data */
1938 csio->resid = csio->dxfer_len;
1939 error = EIO;
1940 break;
1941 case CAM_SEL_TIMEOUT:
1942 if (ccb->ccb_h.retry_count > 0) {
1943 ccb->ccb_h.retry_count--;
1944 error = ERESTART;
1945 } else {
1946 /* "Select or reselect failure" */
1947 csio->resid = csio->dxfer_len;
1948 fill_sense(softc, csio->init_id, SSD_CURRENT_ERROR,
1949 SSD_KEY_HARDWARE_ERROR, 0x45, 0x00);
1950 set_ca_condition(periph, csio->init_id, CA_CMD_SENSE);
1951 error = EIO;
1952 }
1953 break;
1954 case CAM_UNCOR_PARITY:
1955 /* "SCSI parity error" */
1956 fill_sense(softc, csio->init_id, SSD_CURRENT_ERROR,
1957 SSD_KEY_HARDWARE_ERROR, 0x47, 0x00);
1958 set_ca_condition(periph, csio->init_id,
1959 CA_CMD_SENSE);
1960 csio->resid = csio->dxfer_len;
1961 error = EIO;
1962 break;
1963 case CAM_NO_HBA:
1964 csio->resid = csio->dxfer_len;
1965 error = ENXIO;
1966 break;
1967 case CAM_SEQUENCE_FAIL:
1968 if (sense != 0) {
1969 copy_sense(softc, istate, (u_int8_t *)&csio->sense_data,
1970 csio->sense_len);
1971 set_ca_condition(periph,
1972 csio->init_id,
1973 CA_CMD_SENSE);
1974 }
1975 csio->resid = csio->dxfer_len;
1976 error = EIO;
1977 break;
1978 case CAM_IDE:
1979 /* "Initiator detected error message received" */
1980 fill_sense(softc, csio->init_id, SSD_CURRENT_ERROR,
1981 SSD_KEY_HARDWARE_ERROR, 0x48, 0x00);
1982 set_ca_condition(periph, csio->init_id,
1983 CA_CMD_SENSE);
1984 csio->resid = csio->dxfer_len;
1985 error = EIO;
1986 break;
1987 case CAM_REQUEUE_REQ:
1988 printf("Requeue Request!\n");
1989 error = ERESTART;
1990 break;
1991 default:
1992 csio->resid = csio->dxfer_len;
1993 error = EIO;
1994 panic("targerror: Unexpected status %x encounterd", status);
1995 /* NOTREACHED */
1996 }
1997
1998 if (error == ERESTART || error == 0) {
1999 /* Clear the QFRZN flag as we will release the queue */
2000 if (frozen != 0)
2001 ccb->ccb_h.status &= ~CAM_DEV_QFRZN;
2002
2003 if (error == ERESTART && !on_held_queue)
2004 xpt_action(ccb);
2005
2006 if (frozen != 0)
2007 cam_release_devq(ccb->ccb_h.path,
2008 /*relsim_flags*/0,
2009 /*opening reduction*/0,
2010 /*timeout*/0,
2011 /*getcount_only*/0);
2012 }
2013 return (error);
2014}
2015
2016static struct targ_cmd_desc*
2017allocdescr()
2018{
2019 struct targ_cmd_desc* descr;
2020
2021 /* Allocate the targ_descr structure */
2022 descr = (struct targ_cmd_desc *)malloc(sizeof(*descr),
2023 M_DEVBUF, M_NOWAIT);
2024 if (descr == NULL)
2025 return (NULL);
2026
2027 bzero(descr, sizeof(*descr));
2028
2029 /* Allocate buffer backing store */
2030 descr->backing_store = malloc(MAX_BUF_SIZE, M_DEVBUF, M_NOWAIT);
2031 if (descr->backing_store == NULL) {
2032 free(descr, M_DEVBUF);
2033 return (NULL);
2034 }
2035 descr->max_size = MAX_BUF_SIZE;
2036 return (descr);
2037}
2038
2039static void
2040freedescr(struct targ_cmd_desc *descr)
2041{
2042 free(descr->backing_store, M_DEVBUF);
2043 free(descr, M_DEVBUF);
2044}
2045
2046static void
2047fill_sense(struct targ_softc *softc, u_int initiator_id, u_int error_code,
2048 u_int sense_key, u_int asc, u_int ascq)
2049{
2050 struct initiator_state *istate;
2051 struct scsi_sense_data *sense;
2052
2053 istate = &softc->istate[initiator_id];
2054 sense = &istate->sense_data;
2055 bzero(sense, sizeof(*sense));
2056 sense->error_code = error_code;
2057 sense->flags = sense_key;
2058 sense->add_sense_code = asc;
2059 sense->add_sense_code_qual = ascq;
2060
2061 sense->extra_len = offsetof(struct scsi_sense_data, fru)
2062 - offsetof(struct scsi_sense_data, extra_len);
2063}
2064
2065static void
2066copy_sense(struct targ_softc *softc, struct initiator_state *istate,
2067 u_int8_t *sense_buffer, size_t sense_len)
2068{
2069 struct scsi_sense_data *sense;
2070 size_t copylen;
2071
2072 sense = &istate->sense_data;
2073 copylen = sizeof(*sense);
2074 if (copylen > sense_len)
2075 copylen = sense_len;
2076 bcopy(sense_buffer, sense, copylen);
2077}
2078
2079static void
2080set_unit_attention_cond(struct cam_periph *periph,
2081 u_int initiator_id, ua_types ua)
2082{
2083 int start;
2084 int end;
2085 struct targ_softc *softc;
2086
2087 softc = (struct targ_softc *)periph->softc;
2088 if (initiator_id == CAM_TARGET_WILDCARD) {
2089 start = 0;
2090 end = MAX_INITIATORS - 1;
2091 } else
2092 start = end = initiator_id;
2093
2094 while (start <= end) {
2095 softc->istate[start].pending_ua = ua;
2096 start++;
2097 }
2098}
2099
2100static void
2101set_ca_condition(struct cam_periph *periph, u_int initiator_id, ca_types ca)
2102{
2103 struct targ_softc *softc;
2104
2105 softc = (struct targ_softc *)periph->softc;
2106 softc->istate[initiator_id].pending_ca = ca;
2107 abort_pending_transactions(periph, initiator_id, TARG_TAG_WILDCARD,
2108 /*errno*/0, /*to_held_queue*/TRUE);
2109}
2110
2111static void
2112abort_pending_transactions(struct cam_periph *periph, u_int initiator_id,
2113 u_int tag_id, int errno, int to_held_queue)
2114{
2115 struct ccb_abort cab;
2116 struct ccb_queue *atio_queues[3];
2117 struct targ_softc *softc;
2118 struct ccb_hdr *ccbh;
2119 u_int i;
2120
2121 softc = (struct targ_softc *)periph->softc;
2122
2123 atio_queues[0] = &softc->work_queue;
2124 atio_queues[1] = &softc->snd_ccb_queue;
2125 atio_queues[2] = &softc->rcv_ccb_queue;
2126
2127 /* First address the ATIOs awaiting resources */
2128 for (i = 0; i < (sizeof(atio_queues) / sizeof(*atio_queues)); i++) {
2129 struct ccb_queue *atio_queue;
2130
2131 if (to_held_queue) {
2132 /*
2133 * The device queue is frozen anyway, so there
2134 * is nothing for us to do.
2135 */
2136 continue;
2137 }
2138 atio_queue = atio_queues[i];
2139 ccbh = TAILQ_FIRST(atio_queue);
2140 while (ccbh != NULL) {
2141 struct ccb_accept_tio *atio;
2142 struct targ_cmd_desc *desc;
2143
2144 atio = (struct ccb_accept_tio *)ccbh;
2145 desc = (struct targ_cmd_desc *)atio->ccb_h.ccb_descr;
2146 ccbh = TAILQ_NEXT(ccbh, periph_links.tqe);
2147
2148 /* Only abort the CCBs that match */
2149 if ((atio->init_id != initiator_id
2150 && initiator_id != CAM_TARGET_WILDCARD)
2151 || (tag_id != TARG_TAG_WILDCARD
2152 && ((atio->ccb_h.flags & CAM_TAG_ACTION_VALID) == 0
2153 || atio->tag_id != tag_id)))
2154 continue;
2155
2156 TAILQ_REMOVE(atio_queue, &atio->ccb_h,
2157 periph_links.tqe);
2158
2159 CAM_DEBUG(periph->path, CAM_DEBUG_PERIPH,
2160 ("Aborting ATIO\n"));
2161 if (desc->bp != NULL) {
2162 desc->bp->b_ioflags |= BIO_ERROR;
2162 desc->bp->bio_flags |= BIO_ERROR;
2163 if (softc->state != TARG_STATE_TEARDOWN)
2163 if (softc->state != TARG_STATE_TEARDOWN)
2164 desc->bp->b_error = errno;
2164 desc->bp->bio_error = errno;
2165 else
2165 else
2166 desc->bp->b_error = ENXIO;
2166 desc->bp->bio_error = ENXIO;
2167 biodone(desc->bp);
2168 desc->bp = NULL;
2169 }
2170 if (softc->state == TARG_STATE_TEARDOWN) {
2171 freedescr(desc);
2172 free(atio, M_DEVBUF);
2173 } else {
2174 /* Return the ATIO back to the controller */
2175 xpt_action((union ccb *)atio);
2176 }
2177 }
2178 }
2179
2180 ccbh = TAILQ_FIRST(&softc->pending_queue);
2181 while (ccbh != NULL) {
2182 struct ccb_scsiio *csio;
2183
2184 csio = (struct ccb_scsiio *)ccbh;
2185 ccbh = TAILQ_NEXT(ccbh, periph_links.tqe);
2186
2187 /* Only abort the CCBs that match */
2188 if ((csio->init_id != initiator_id
2189 && initiator_id != CAM_TARGET_WILDCARD)
2190 || (tag_id != TARG_TAG_WILDCARD
2191 && ((csio->ccb_h.flags & CAM_TAG_ACTION_VALID) == 0
2192 || csio->tag_id != tag_id)))
2193 continue;
2194
2195 CAM_DEBUG(periph->path, CAM_DEBUG_PERIPH,
2196 ("Aborting CTIO\n"));
2197
2198 TAILQ_REMOVE(&softc->pending_queue, &csio->ccb_h,
2199 periph_links.tqe);
2200
2201 if (to_held_queue != 0)
2202 csio->ccb_h.ccb_flags |= TARG_CCB_ABORT_TO_HELDQ;
2203 xpt_setup_ccb(&cab.ccb_h, csio->ccb_h.path, /*priority*/1);
2204 cab.abort_ccb = (union ccb *)csio;
2205 xpt_action((union ccb *)&cab);
2206 if (cab.ccb_h.status != CAM_REQ_CMP) {
2207 xpt_print_path(cab.ccb_h.path);
2208 printf("Unable to abort CCB. Status %x\n",
2209 cab.ccb_h.status);
2210 }
2211 }
2212}
2167 biodone(desc->bp);
2168 desc->bp = NULL;
2169 }
2170 if (softc->state == TARG_STATE_TEARDOWN) {
2171 freedescr(desc);
2172 free(atio, M_DEVBUF);
2173 } else {
2174 /* Return the ATIO back to the controller */
2175 xpt_action((union ccb *)atio);
2176 }
2177 }
2178 }
2179
2180 ccbh = TAILQ_FIRST(&softc->pending_queue);
2181 while (ccbh != NULL) {
2182 struct ccb_scsiio *csio;
2183
2184 csio = (struct ccb_scsiio *)ccbh;
2185 ccbh = TAILQ_NEXT(ccbh, periph_links.tqe);
2186
2187 /* Only abort the CCBs that match */
2188 if ((csio->init_id != initiator_id
2189 && initiator_id != CAM_TARGET_WILDCARD)
2190 || (tag_id != TARG_TAG_WILDCARD
2191 && ((csio->ccb_h.flags & CAM_TAG_ACTION_VALID) == 0
2192 || csio->tag_id != tag_id)))
2193 continue;
2194
2195 CAM_DEBUG(periph->path, CAM_DEBUG_PERIPH,
2196 ("Aborting CTIO\n"));
2197
2198 TAILQ_REMOVE(&softc->pending_queue, &csio->ccb_h,
2199 periph_links.tqe);
2200
2201 if (to_held_queue != 0)
2202 csio->ccb_h.ccb_flags |= TARG_CCB_ABORT_TO_HELDQ;
2203 xpt_setup_ccb(&cab.ccb_h, csio->ccb_h.path, /*priority*/1);
2204 cab.abort_ccb = (union ccb *)csio;
2205 xpt_action((union ccb *)&cab);
2206 if (cab.ccb_h.status != CAM_REQ_CMP) {
2207 xpt_print_path(cab.ccb_h.path);
2208 printf("Unable to abort CCB. Status %x\n",
2209 cab.ccb_h.status);
2210 }
2211 }
2212}