Deleted Added
full compact
scsi_target.c (101940) scsi_target.c (107178)
1/*
1/*
2 * Implementation of a simple Target Mode SCSI Proccessor Target driver for CAM.
2 * Generic SCSI Target Kernel Mode Driver
3 *
3 *
4 * Copyright (c) 1998, 1999, 2001 Justin T. Gibbs.
4 * Copyright (c) 2002 Nate Lawson.
5 * Copyright (c) 1998, 1999, 2001, 2002 Justin T. Gibbs.
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions, and the following disclaimer,
12 * without modification, immediately at the beginning of the file.

--- 7 unchanged lines hidden (view full) ---

20 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
27 *
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions, and the following disclaimer,
13 * without modification, immediately at the beginning of the file.

--- 7 unchanged lines hidden (view full) ---

21 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * SUCH DAMAGE.
28 *
28 * $FreeBSD: head/sys/cam/scsi/scsi_target.c 101940 2002-08-15 20:54:03Z njl $
29 * $FreeBSD: head/sys/cam/scsi/scsi_target.c 107178 2002-11-22 22:55:51Z njl $
29 */
30
31#include <sys/param.h>
30 */
31
32#include <sys/param.h>
32#include <sys/queue.h>
33#include <sys/systm.h>
34#include <sys/kernel.h>
33#include <sys/systm.h>
34#include <sys/kernel.h>
35#include <sys/types.h>
36#include <sys/bio.h>
37#include <sys/conf.h>
35#include <sys/conf.h>
38#include <sys/devicestat.h>
36#include <sys/event.h>
39#include <sys/malloc.h>
40#include <sys/poll.h>
41#include <sys/selinfo.h>
42#include <sys/uio.h>
37#include <sys/malloc.h>
38#include <sys/poll.h>
39#include <sys/selinfo.h>
40#include <sys/uio.h>
41#include <sys/vnode.h>
42#include <sys/queue.h>
43#include <sys/devicestat.h>
43
44#include <cam/cam.h>
45#include <cam/cam_ccb.h>
46#include <cam/cam_periph.h>
47#include <cam/cam_queue.h>
48#include <cam/cam_xpt_periph.h>
49#include <cam/cam_debug.h>
44
45#include <cam/cam.h>
46#include <cam/cam_ccb.h>
47#include <cam/cam_periph.h>
48#include <cam/cam_queue.h>
49#include <cam/cam_xpt_periph.h>
50#include <cam/cam_debug.h>
50
51#include <cam/scsi/scsi_all.h>
52#include <cam/scsi/scsi_pt.h>
53#include <cam/scsi/scsi_targetio.h>
51#include <cam/scsi/scsi_targetio.h>
54#include <cam/scsi/scsi_message.h>
55
52
56typedef enum {
57 TARG_STATE_NORMAL,
58 TARG_STATE_EXCEPTION,
59 TARG_STATE_TEARDOWN
60} targ_state;
53/* Transaction information attached to each CCB sent by the user */
54struct targ_cmd_descr {
55 struct cam_periph_map_info mapinfo;
56 TAILQ_ENTRY(targ_cmd_descr) tqe;
57 union ccb *user_ccb;
58 int priority;
59 int func_code;
60};
61
61
62typedef enum {
63 TARG_FLAG_NONE = 0x00,
64 TARG_FLAG_SEND_EOF = 0x01,
65 TARG_FLAG_RECEIVE_EOF = 0x02,
66 TARG_FLAG_LUN_ENABLED = 0x04
67} targ_flags;
62/* Offset into the private CCB area for storing our descriptor */
63#define targ_descr periph_priv.entries[1].ptr
68
64
65TAILQ_HEAD(descr_queue, targ_cmd_descr);
66
69typedef enum {
67typedef enum {
70 TARG_CCB_NONE = 0x00,
71 TARG_CCB_WAITING = 0x01,
72 TARG_CCB_HELDQ = 0x02,
73 TARG_CCB_ABORT_TO_HELDQ = 0x04
74} targ_ccb_flags;
68 TARG_STATE_RESV = 0x00, /* Invalid state */
69 TARG_STATE_OPENED = 0x01, /* Device opened, softc initialized */
70 TARG_STATE_LUN_ENABLED = 0x02 /* Device enabled for a path */
71} targ_state;
75
72
76#define MAX_ACCEPT 16
77#define MAX_IMMEDIATE 16
78#define MAX_BUF_SIZE 256 /* Max inquiry/sense/mode page transfer */
79#define MAX_INITIATORS 256 /* includes widest fibre channel for now */
80
81#define MIN(a, b) ((a > b) ? b : a)
82
83#define TARG_CONTROL_UNIT 0xffff00ff
84#define TARG_IS_CONTROL_DEV(d) (minor((d)) == TARG_CONTROL_UNIT)
85
86#define TARG_TAG_WILDCARD ((u_int)~0)
87
88/* Offsets into our private CCB area for storing accept information */
89#define ccb_flags ppriv_field0
90#define ccb_descr ppriv_ptr1
91
92/* We stick a pointer to the originating accept TIO in each continue I/O CCB */
93#define ccb_atio ppriv_ptr1
94
95/*
96 * When we're constructing a unit, we point to passed in user inquiry data here.
97 */
98#define ccb_inq ppriv_ptr1
99
73/* Per-instance device software context */
100struct targ_softc {
74struct targ_softc {
101 /* CTIOs pending on the controller */
102 struct ccb_queue pending_queue;
75 /* CCBs (CTIOs, ATIOs, INOTs) pending on the controller */
76 struct ccb_queue pending_ccb_queue;
103
77
104 /* ATIOs awaiting CTIO resources from the XPT */
105 struct ccb_queue work_queue;
78 /* Command descriptors awaiting CTIO resources from the XPT */
79 struct descr_queue work_queue;
106
80
107 /*
108 * ATIOs for SEND operations waiting for 'write'
109 * buffer resources from our userland daemon.
110 */
111 struct ccb_queue snd_ccb_queue;
81 /* Command descriptors that have been aborted back to the user. */
82 struct descr_queue abort_queue;
112
113 /*
83
84 /*
114 * ATIOs for RCV operations waiting for 'read'
115 * buffer resources from our userland daemon.
85 * Queue of CCBs that have been copied out to userland, but our
86 * userland daemon has not yet seen.
116 */
87 */
117 struct ccb_queue rcv_ccb_queue;
88 struct ccb_queue user_ccb_queue;
118
89
119 /*
120 * ATIOs for commands unknown to the kernel driver.
121 * These are queued for the userland daemon to
122 * consume.
123 */
124 struct ccb_queue unknown_atio_queue;
125
126 /*
127 * Userland buffers for SEND commands waiting for
128 * SEND ATIOs to be queued by an initiator.
129 */
130 struct bio_queue_head snd_bio_queue;
131
132 /*
133 * Userland buffers for RCV commands waiting for
134 * RCV ATIOs to be queued by an initiator.
135 */
136 struct bio_queue_head rcv_bio_queue;
137 struct devstat device_stats;
138 dev_t targ_dev;
139 struct selinfo snd_select;
140 struct selinfo rcv_select;
141 targ_state state;
142 targ_flags flags;
143 targ_exception exceptions;
144 u_int init_level;
145 u_int inq_data_len;
146 struct scsi_inquiry_data *inq_data;
147 struct ccb_accept_tio *accept_tio_list;
148 struct ccb_hdr_slist immed_notify_slist;
149 struct initiator_state istate[MAX_INITIATORS];
90 struct cam_periph *periph;
91 struct cam_path *path;
92 targ_state state;
93 struct selinfo read_select;
94 struct devstat device_stats;
95 struct mtx mtx;
150};
151
96};
97
152struct targ_cmd_desc {
153 struct ccb_accept_tio* atio_link;
154 u_int data_resid; /* How much left to transfer */
155 u_int data_increment;/* Amount to send before next disconnect */
156 void* data; /* The data. Can be from backing_store or not */
157 void* backing_store;/* Backing store allocated for this descriptor*/
158 struct bio *bp; /* Buffer for this transfer */
159 u_int max_size; /* Size of backing_store */
160 u_int32_t timeout;
161 u_int32_t
162 user_atio : 1, /* user ATIO (will define last CTIO) */
163 status : 8; /* Status to return to initiator */
164};
98static d_open_t targopen;
99static d_close_t targclose;
100static d_read_t targread;
101static d_write_t targwrite;
102static d_ioctl_t targioctl;
103static d_poll_t targpoll;
104static d_kqfilter_t targkqfilter;
105static void targreadfiltdetach(struct knote *kn);
106static int targreadfilt(struct knote *kn, long hint);
107static struct filterops targread_filtops =
108 { 1, NULL, targreadfiltdetach, targreadfilt };
165
109
166static d_open_t targopen;
167static d_close_t targclose;
168static d_read_t targread;
169static d_write_t targwrite;
170static d_ioctl_t targioctl;
171static d_poll_t targpoll;
172static d_strategy_t targstrategy;
173
174#define TARG_CDEV_MAJOR 65
110#define TARG_CDEV_MAJOR 65
175static struct cdevsw targ_cdevsw = {
176 /* open */ targopen,
177 /* close */ targclose,
178 /* read */ targread,
179 /* write */ targwrite,
180 /* ioctl */ targioctl,
181 /* poll */ targpoll,
182 /* mmap */ nommap,
111static struct cdevsw targ_cdevsw = {
112 /* open */ targopen,
113 /* close */ targclose,
114 /* read */ targread,
115 /* write */ targwrite,
116 /* ioctl */ targioctl,
117 /* poll */ targpoll,
118 /* mmap */ nommap,
183 /* strategy */ targstrategy,
119 /* strategy */ nostrategy,
184 /* name */ "targ",
185 /* maj */ TARG_CDEV_MAJOR,
186 /* dump */ nodump,
187 /* psize */ nopsize,
120 /* name */ "targ",
121 /* maj */ TARG_CDEV_MAJOR,
122 /* dump */ nodump,
123 /* psize */ nopsize,
188 /* flags */ 0,
124 /* flags */ D_KQFILTER,
125 /* kqfilter */ targkqfilter
189};
190
126};
127
191static int targsendccb(struct cam_periph *periph, union ccb *ccb,
192 union ccb *inccb);
128static cam_status targendislun(struct cam_path *path, int enable,
129 int grp6_len, int grp7_len);
130static cam_status targenable(struct targ_softc *softc,
131 struct cam_path *path,
132 int grp6_len, int grp7_len);
133static cam_status targdisable(struct targ_softc *softc);
134static periph_ctor_t targctor;
135static periph_dtor_t targdtor;
136static periph_start_t targstart;
137static int targusermerge(struct targ_softc *softc,
138 struct targ_cmd_descr *descr,
139 union ccb *ccb);
140static int targsendccb(struct targ_softc *softc, union ccb *ccb,
141 struct targ_cmd_descr *descr);
142static void targdone(struct cam_periph *periph,
143 union ccb *done_ccb);
144static int targreturnccb(struct targ_softc *softc,
145 union ccb *ccb);
146static union ccb * targgetccb(struct targ_softc *softc, xpt_opcode type,
147 int priority);
148static void targfreeccb(struct targ_softc *softc, union ccb *ccb);
149static struct targ_cmd_descr *
150 targgetdescr(struct targ_softc *softc);
193static periph_init_t targinit;
151static periph_init_t targinit;
152static void targclone(void *arg, char *name, int namelen,
153 dev_t *dev);
194static void targasync(void *callback_arg, u_int32_t code,
154static void targasync(void *callback_arg, u_int32_t code,
195 struct cam_path *path, void *arg);
196static int targallocinstance(void *, u_long);
197static int targfreeinstance(struct ioc_alloc_unit *);
198static cam_status targenlun(struct cam_periph *periph);
199static cam_status targdislun(struct cam_periph *periph);
200static periph_ctor_t targctor;
201static periph_dtor_t targdtor;
202static void targrunqueue(struct cam_periph *periph,
203 struct targ_softc *softc);
204static periph_start_t targstart;
205static void targdone(struct cam_periph *periph,
206 union ccb *done_ccb);
207static void targfireexception(struct cam_periph *periph,
208 struct targ_softc *softc);
209static void targinoterror(struct cam_periph *periph,
210 struct targ_softc *softc,
211 struct ccb_immed_notify *inot);
212static int targerror(union ccb *ccb, u_int32_t cam_flags,
213 u_int32_t sense_flags);
214static struct targ_cmd_desc* allocdescr(void);
215static void freedescr(struct targ_cmd_desc *buf);
216static void fill_sense(struct targ_softc *softc,
217 u_int initiator_id, u_int error_code,
218 u_int sense_key, u_int asc, u_int ascq);
219static void copy_sense(struct targ_softc *softc,
220 struct initiator_state *istate,
221 u_int8_t *sense_buffer, size_t sense_len);
222static void set_unit_attention_cond(struct cam_periph *periph,
223 u_int initiator_id, ua_types ua);
224static void set_ca_condition(struct cam_periph *periph,
225 u_int initiator_id, ca_types ca);
226static void abort_pending_transactions(struct cam_periph *periph,
227 u_int initiator_id, u_int tag_id,
228 int errno, int to_held_queue);
229
155 struct cam_path *path, void *arg);
156static void abort_all_pending(struct targ_softc *softc);
157static void notify_user(struct targ_softc *softc);
158static int targcamstatus(cam_status status);
159static size_t targccblen(xpt_opcode func_code);
160
230static struct periph_driver targdriver =
231{
232 targinit, "targ",
233 TAILQ_HEAD_INITIALIZER(targdriver.units), /* generation */ 0
234};
161static struct periph_driver targdriver =
162{
163 targinit, "targ",
164 TAILQ_HEAD_INITIALIZER(targdriver.units), /* generation */ 0
165};
235
236PERIPHDRIVER_DECLARE(targ, targdriver);
237
166PERIPHDRIVER_DECLARE(targ, targdriver);
167
238static dev_t targ_ctl_dev;
168static struct mtx targ_mtx;
169#define TARG_LOCK(softc) mtx_lock(&(softc)->mtx)
170#define TARG_UNLOCK(softc) mtx_unlock(&(softc)->mtx)
239
171
240static void
241targinit(void)
242{
243 targ_ctl_dev = make_dev(&targ_cdevsw, TARG_CONTROL_UNIT, UID_ROOT,
244 GID_OPERATOR, 0600, "%s.ctl", "targ");
245 if (targ_ctl_dev == (dev_t) 0) {
246 printf("targ: failed to create control dev\n");
247 }
248}
172static MALLOC_DEFINE(M_TARG, "TARG", "TARG data");
249
173
250static void
251targasync(void *callback_arg, u_int32_t code,
252 struct cam_path *path, void *arg)
174/* Create softc and initialize it. Only one proc can open each targ device. */
175static int
176targopen(dev_t dev, int flags, int fmt, struct thread *td)
253{
177{
254 struct cam_periph *periph;
255 struct targ_softc *softc;
256
178 struct targ_softc *softc;
179
257 periph = (struct cam_periph *)callback_arg;
258 softc = (struct targ_softc *)periph->softc;
259 switch (code) {
260 case AC_PATH_DEREGISTERED:
261 {
262 /* XXX Implement */
263 break;
180 mtx_lock(&targ_mtx);
181 if (dev->si_drv1 != 0) {
182 mtx_unlock(&targ_mtx);
183 return (EBUSY);
264 }
184 }
265 default:
266 break;
185
186 /* Mark device busy before any potentially blocking operations */
187 dev->si_drv1 = (void *)~0;
188 mtx_unlock(&targ_mtx);
189
190 /* Create the targ device, allocate its softc, initialize it */
191 if ((dev->si_flags & SI_NAMED) == 0) {
192 make_dev(&targ_cdevsw, minor(dev), UID_ROOT, GID_WHEEL, 0600,
193 "targ%d", dev2unit(dev));
267 }
194 }
195 MALLOC(softc, struct targ_softc *, sizeof(*softc), M_TARG,
196 M_WAITOK | M_ZERO);
197 dev->si_drv1 = softc;
198 softc->state = TARG_STATE_OPENED;
199 softc->periph = NULL;
200 softc->path = NULL;
201 mtx_init(&softc->mtx, devtoname(dev), "targ cdev", MTX_DEF);
202
203 TAILQ_INIT(&softc->pending_ccb_queue);
204 TAILQ_INIT(&softc->work_queue);
205 TAILQ_INIT(&softc->abort_queue);
206 TAILQ_INIT(&softc->user_ccb_queue);
207
208 return (0);
268}
269
209}
210
270/* Attempt to enable our lun */
271static cam_status
272targenlun(struct cam_periph *periph)
211/* Disable LUN if enabled and teardown softc */
212static int
213targclose(dev_t dev, int flag, int fmt, struct thread *td)
273{
214{
274 union ccb immed_ccb;
275 struct targ_softc *softc;
276 cam_status status;
277 int i;
215 struct targ_softc *softc;
216 int error;
278
217
279 softc = (struct targ_softc *)periph->softc;
280
281 if ((softc->flags & TARG_FLAG_LUN_ENABLED) != 0)
282 return (CAM_REQ_CMP);
283
284 xpt_setup_ccb(&immed_ccb.ccb_h, periph->path, /*priority*/1);
285 immed_ccb.ccb_h.func_code = XPT_EN_LUN;
286
287 /* Don't need support for any vendor specific commands */
288 immed_ccb.cel.grp6_len = 0;
289 immed_ccb.cel.grp7_len = 0;
290 immed_ccb.cel.enable = 1;
291 xpt_action(&immed_ccb);
292 status = immed_ccb.ccb_h.status;
293 if (status != CAM_REQ_CMP) {
294 xpt_print_path(periph->path);
295 printf("targenlun - Enable Lun Rejected with status 0x%x\n",
296 status);
297 return (status);
218 softc = (struct targ_softc *)dev->si_drv1;
219 TARG_LOCK(softc);
220 error = targdisable(softc);
221 if (error == 0) {
222 dev->si_drv1 = 0;
223 mtx_lock(&targ_mtx);
224 if (softc->periph != NULL) {
225 cam_periph_invalidate(softc->periph);
226 softc->periph = NULL;
227 }
228 mtx_unlock(&targ_mtx);
229 TARG_UNLOCK(softc);
230 mtx_destroy(&softc->mtx);
231 destroy_dev(dev);
232 FREE(softc, M_TARG);
233 } else {
234 TARG_UNLOCK(softc);
298 }
235 }
299
300 softc->flags |= TARG_FLAG_LUN_ENABLED;
236 return (error);
237}
301
238
302 /*
303 * Build up a buffer of accept target I/O
304 * operations for incoming selections.
305 */
306 for (i = 0; i < MAX_ACCEPT; i++) {
307 struct ccb_accept_tio *atio;
239/* Enable/disable LUNs, set debugging level */
240static int
241targioctl(dev_t dev, u_long cmd, caddr_t addr, int flag, struct thread *td)
242{
243 struct targ_softc *softc;
244 cam_status status;
308
245
309 atio = (struct ccb_accept_tio*)malloc(sizeof(*atio), M_DEVBUF,
310 M_NOWAIT);
311 if (atio == NULL) {
312 status = CAM_RESRC_UNAVAIL;
313 break;
314 }
246 softc = (struct targ_softc *)dev->si_drv1;
315
247
316 atio->ccb_h.ccb_descr = allocdescr();
248 switch (cmd) {
249 case TARGIOCENABLE:
250 {
251 struct ioc_enable_lun *new_lun;
252 struct cam_path *path;
317
253
318 if (atio->ccb_h.ccb_descr == NULL) {
319 free(atio, M_DEVBUF);
320 status = CAM_RESRC_UNAVAIL;
254 new_lun = (struct ioc_enable_lun *)addr;
255 status = xpt_create_path(&path, /*periph*/NULL,
256 new_lun->path_id,
257 new_lun->target_id,
258 new_lun->lun_id);
259 if (status != CAM_REQ_CMP) {
260 printf("Couldn't create path, status %#x\n", status);
321 break;
322 }
261 break;
262 }
323
324 xpt_setup_ccb(&atio->ccb_h, periph->path, /*priority*/1);
325 atio->ccb_h.func_code = XPT_ACCEPT_TARGET_IO;
326 atio->ccb_h.cbfcnp = targdone;
327 atio->ccb_h.ccb_flags = TARG_CCB_NONE;
328 xpt_action((union ccb *)atio);
329 status = atio->ccb_h.status;
330 if (status != CAM_REQ_INPROG) {
331 xpt_print_path(periph->path);
332 printf("Queue of atio failed\n");
333 freedescr(atio->ccb_h.ccb_descr);
334 free(atio, M_DEVBUF);
335 break;
336 }
337 ((struct targ_cmd_desc*)atio->ccb_h.ccb_descr)->atio_link =
338 softc->accept_tio_list;
339 softc->accept_tio_list = atio;
263 TARG_LOCK(softc);
264 status = targenable(softc, path, new_lun->grp6_len,
265 new_lun->grp7_len);
266 TARG_UNLOCK(softc);
267 xpt_free_path(path);
268 break;
340 }
269 }
270 case TARGIOCDISABLE:
271 TARG_LOCK(softc);
272 status = targdisable(softc);
273 TARG_UNLOCK(softc);
274 break;
275 case TARGIOCDEBUG:
276 {
277#ifdef CAMDEBUG
278 struct ccb_debug cdbg;
341
279
342 if (i == 0) {
343 xpt_print_path(periph->path);
344 printf("targenlun - Could not allocate accept tio CCBs: "
345 "status = 0x%x\n", status);
346 targdislun(periph);
347 return (CAM_REQ_CMP_ERR);
348 }
280 bzero(&cdbg, sizeof cdbg);
281 if (*((int *)addr) != 0)
282 cdbg.flags = CAM_DEBUG_PERIPH;
283 else
284 cdbg.flags = CAM_DEBUG_NONE;
285 xpt_setup_ccb(&cdbg.ccb_h, softc->path, /*priority*/0);
286 cdbg.ccb_h.func_code = XPT_DEBUG;
287 cdbg.ccb_h.cbfcnp = targdone;
349
288
350 /*
351 * Build up a buffer of immediate notify CCBs
352 * so the SIM can tell us of asynchronous target mode events.
353 */
354 for (i = 0; i < MAX_ACCEPT; i++) {
355 struct ccb_immed_notify *inot;
356
357 inot = (struct ccb_immed_notify*)malloc(sizeof(*inot), M_DEVBUF,
358 M_NOWAIT);
359
360 if (inot == NULL) {
361 status = CAM_RESRC_UNAVAIL;
289 /* If no periph available, disallow debugging changes */
290 TARG_LOCK(softc);
291 if ((softc->state & TARG_STATE_LUN_ENABLED) == 0) {
292 status = CAM_DEV_NOT_THERE;
293 TARG_UNLOCK(softc);
362 break;
363 }
294 break;
295 }
364
365 xpt_setup_ccb(&inot->ccb_h, periph->path, /*priority*/1);
366 inot->ccb_h.func_code = XPT_IMMED_NOTIFY;
367 inot->ccb_h.cbfcnp = targdone;
368 SLIST_INSERT_HEAD(&softc->immed_notify_slist, &inot->ccb_h,
369 periph_links.sle);
370 xpt_action((union ccb *)inot);
296 xpt_action((union ccb *)&cdbg);
297 TARG_UNLOCK(softc);
298 status = cdbg.ccb_h.status & CAM_STATUS_MASK;
299#else
300 status = CAM_FUNC_NOTAVAIL;
301#endif
302 break;
371 }
303 }
372
373 if (i == 0) {
374 xpt_print_path(periph->path);
375 printf("targenlun - Could not allocate immediate notify CCBs: "
376 "status = 0x%x\n", status);
377 targdislun(periph);
378 return (CAM_REQ_CMP_ERR);
304 default:
305 status = CAM_PROVIDE_FAIL;
306 break;
379 }
380
307 }
308
381 return (CAM_REQ_CMP);
309 return (targcamstatus(status));
382}
383
310}
311
384static cam_status
385targdislun(struct cam_periph *periph)
312/* Writes are always ready, reads wait for user_ccb_queue or abort_queue */
313static int
314targpoll(dev_t dev, int poll_events, struct thread *td)
386{
315{
387 union ccb ccb;
388 struct targ_softc *softc;
316 struct targ_softc *softc;
389 struct ccb_accept_tio* atio;
390 struct ccb_hdr *ccb_h;
317 int revents;
391
318
392 softc = (struct targ_softc *)periph->softc;
393 if ((softc->flags & TARG_FLAG_LUN_ENABLED) == 0)
394 return CAM_REQ_CMP;
319 softc = (struct targ_softc *)dev->si_drv1;
395
320
396 /* XXX Block for Continue I/O completion */
397
398 /* Kill off all ACCECPT and IMMEDIATE CCBs */
399 while ((atio = softc->accept_tio_list) != NULL) {
400
401 softc->accept_tio_list =
402 ((struct targ_cmd_desc*)atio->ccb_h.ccb_descr)->atio_link;
403 xpt_setup_ccb(&ccb.cab.ccb_h, periph->path, /*priority*/1);
404 ccb.cab.ccb_h.func_code = XPT_ABORT;
405 ccb.cab.abort_ccb = (union ccb *)atio;
406 xpt_action(&ccb);
321 /* Poll for write() is always ok. */
322 revents = poll_events & (POLLOUT | POLLWRNORM);
323 if ((poll_events & (POLLIN | POLLRDNORM)) != 0) {
324 /* Poll for read() depends on user and abort queues. */
325 TARG_LOCK(softc);
326 if (!TAILQ_EMPTY(&softc->user_ccb_queue) ||
327 !TAILQ_EMPTY(&softc->abort_queue)) {
328 revents |= poll_events & (POLLIN | POLLRDNORM);
329 }
330 /* Only sleep if the user didn't poll for write. */
331 if (revents == 0)
332 selrecord(td, &softc->read_select);
333 TARG_UNLOCK(softc);
407 }
408
334 }
335
409 while ((ccb_h = SLIST_FIRST(&softc->immed_notify_slist)) != NULL) {
410 SLIST_REMOVE_HEAD(&softc->immed_notify_slist, periph_links.sle);
411 xpt_setup_ccb(&ccb.cab.ccb_h, periph->path, /*priority*/1);
412 ccb.cab.ccb_h.func_code = XPT_ABORT;
413 ccb.cab.abort_ccb = (union ccb *)ccb_h;
414 xpt_action(&ccb);
415 }
416
417 /*
418 * Dissable this lun.
419 */
420 xpt_setup_ccb(&ccb.cel.ccb_h, periph->path, /*priority*/1);
421 ccb.cel.ccb_h.func_code = XPT_EN_LUN;
422 ccb.cel.enable = 0;
423 xpt_action(&ccb);
424
425 if (ccb.cel.ccb_h.status != CAM_REQ_CMP)
426 printf("targdislun - Disabling lun on controller failed "
427 "with status 0x%x\n", ccb.cel.ccb_h.status);
428 else
429 softc->flags &= ~TARG_FLAG_LUN_ENABLED;
430 return (ccb.cel.ccb_h.status);
336 return (revents);
431}
432
337}
338
433static cam_status
434targctor(struct cam_periph *periph, void *arg)
339static int
340targkqfilter(dev_t dev, struct knote *kn)
435{
341{
436 struct ccb_pathinq *cpi;
437 struct targ_softc *softc;
438 int i;
342 struct targ_softc *softc;
439
343
440 cpi = (struct ccb_pathinq *)arg;
441
442 /* Allocate our per-instance private storage */
443 softc = (struct targ_softc *)malloc(sizeof(*softc), M_DEVBUF, M_NOWAIT);
444 if (softc == NULL) {
445 printf("targctor: unable to malloc softc\n");
446 return (CAM_REQ_CMP_ERR);
447 }
448
449 bzero(softc, sizeof(*softc));
450 TAILQ_INIT(&softc->pending_queue);
451 TAILQ_INIT(&softc->work_queue);
452 TAILQ_INIT(&softc->snd_ccb_queue);
453 TAILQ_INIT(&softc->rcv_ccb_queue);
454 TAILQ_INIT(&softc->unknown_atio_queue);
455 bioq_init(&softc->snd_bio_queue);
456 bioq_init(&softc->rcv_bio_queue);
457 softc->accept_tio_list = NULL;
458 SLIST_INIT(&softc->immed_notify_slist);
459 softc->state = TARG_STATE_NORMAL;
460 periph->softc = softc;
461 softc->init_level++;
462
463 /*
464 * We start out life with a UA to indicate power-on/reset.
465 */
466 for (i = 0; i < MAX_INITIATORS; i++)
467 softc->istate[i].pending_ua = UA_POWER_ON;
468
469 /*
470 * Allocate an inquiry data buffer.
471 * We let the user to override this if desired.
472 */
473 softc->inq_data_len = sizeof(*softc->inq_data);
474 softc->inq_data = malloc(softc->inq_data_len, M_DEVBUF, M_NOWAIT);
475 if (softc->inq_data == NULL) {
476 printf("targctor - Unable to malloc inquiry data\n");
477 targdtor(periph);
478 return (CAM_RESRC_UNAVAIL);
479 }
480 if (cpi->ccb_h.ccb_inq) {
481 bcopy(cpi->ccb_h.ccb_inq, softc->inq_data, softc->inq_data_len);
482 } else {
483 bzero(softc->inq_data, softc->inq_data_len);
484 softc->inq_data->device =
485 T_PROCESSOR | (SID_QUAL_LU_CONNECTED << 5);
486 softc->inq_data->version = 2;
487 softc->inq_data->response_format = 2; /* SCSI2 Inquiry Format */
488 softc->inq_data->additional_length = softc->inq_data_len - 4;
489 strncpy(softc->inq_data->vendor, "FreeBSD ", SID_VENDOR_SIZE);
490 strncpy(softc->inq_data->product,
491 "TM-PT ", SID_PRODUCT_SIZE);
492 strncpy(softc->inq_data->revision, "0.0 ", SID_REVISION_SIZE);
493 }
494
495 /*
496 * Preserve the SIM's capabilities here. Don't let user applications
497 * do something dumb.
498 */
499 if (softc->inq_data->version >= 2) {
500 softc->inq_data->flags &=
501 ~(PI_SDTR_ABLE|PI_WIDE_16|PI_WIDE_32|PI_TAG_ABLE);
502 softc->inq_data->flags |= (cpi->hba_inquiry &
503 (PI_SDTR_ABLE|PI_WIDE_16|PI_WIDE_32|PI_TAG_ABLE));
504 }
505 softc->targ_dev = make_dev(&targ_cdevsw, periph->unit_number, UID_ROOT,
506 GID_OPERATOR, 0600, "%s%d",
507 periph->periph_name, periph->unit_number);
508 softc->targ_dev->si_drv1 = periph;
509
510 softc->init_level++;
511 return (CAM_REQ_CMP);
344 softc = (struct targ_softc *)dev->si_drv1;
345 kn->kn_hook = (caddr_t)softc;
346 kn->kn_fop = &targread_filtops;
347 TARG_LOCK(softc);
348 SLIST_INSERT_HEAD(&softc->read_select.si_note, kn, kn_selnext);
349 TARG_UNLOCK(softc);
350 return (0);
512}
513
514static void
351}
352
353static void
515targdtor(struct cam_periph *periph)
354targreadfiltdetach(struct knote *kn)
516{
355{
517 struct targ_softc *softc;
356 struct targ_softc *softc;
518
357
519 softc = (struct targ_softc *)periph->softc;
520
521 softc->state = TARG_STATE_TEARDOWN;
522
523 targdislun(periph);
524
525 switch (softc->init_level) {
526 default:
527 /* FALLTHROUGH */
528 case 2:
529 free(softc->inq_data, M_DEVBUF);
530 destroy_dev(softc->targ_dev);
531 /* FALLTHROUGH */
532 case 1:
533 free(softc, M_DEVBUF);
534 break;
535 case 0:
536 panic("targdtor - impossible init level");;
537 }
358 softc = (struct targ_softc *)kn->kn_hook;
359 TARG_LOCK(softc);
360 SLIST_REMOVE(&softc->read_select.si_note, kn, knote, kn_selnext);
361 TARG_UNLOCK(softc);
538}
539
362}
363
364/* Notify the user's kqueue when the user queue or abort queue gets a CCB */
540static int
365static int
541targopen(dev_t dev, int flags, int fmt, struct thread *td)
366targreadfilt(struct knote *kn, long hint)
542{
367{
543 struct cam_periph *periph;
544 struct targ_softc *softc;
545 cam_status status;
546 int error;
547 int s;
368 struct targ_softc *softc;
369 int retval;
548
370
549 /* An open of the control device always succeeds */
550 if (TARG_IS_CONTROL_DEV(dev))
551 return 0;
552
553 s = splsoftcam();
554 periph = (struct cam_periph *)dev->si_drv1;
555 if (periph == NULL) {
556 splx(s);
557 return (ENXIO);
558 }
559 if ((error = cam_periph_lock(periph, PRIBIO | PCATCH)) != 0) {
560 splx(s);
561 return (error);
562 }
563
564 softc = (struct targ_softc *)periph->softc;
565 if ((softc->flags & TARG_FLAG_LUN_ENABLED) == 0) {
566 if (cam_periph_acquire(periph) != CAM_REQ_CMP) {
567 splx(s);
568 cam_periph_unlock(periph);
569 return(ENXIO);
570 }
571 }
572 splx(s);
573
574 status = targenlun(periph);
575 switch (status) {
576 case CAM_REQ_CMP:
577 error = 0;
578 break;
579 case CAM_RESRC_UNAVAIL:
580 error = ENOMEM;
581 break;
582 case CAM_LUN_ALRDY_ENA:
583 error = EADDRINUSE;
584 break;
585 default:
586 error = ENXIO;
587 break;
588 }
589 cam_periph_unlock(periph);
590 if (error) {
591 cam_periph_release(periph);
592 }
593 return (error);
371 softc = (struct targ_softc *)kn->kn_hook;
372 TARG_LOCK(softc);
373 retval = !TAILQ_EMPTY(&softc->user_ccb_queue) ||
374 !TAILQ_EMPTY(&softc->abort_queue);
375 TARG_UNLOCK(softc);
376 return (retval);
594}
595
377}
378
596static int
597targclose(dev_t dev, int flag, int fmt, struct thread *td)
379/* Send the HBA the enable/disable message */
380static cam_status
381targendislun(struct cam_path *path, int enable, int grp6_len, int grp7_len)
598{
382{
599 struct cam_periph *periph;
600 struct targ_softc *softc;
601 int s;
602 int error;
383 struct ccb_en_lun en_ccb;
384 cam_status status;
603
385
604 /* A close of the control device always succeeds */
605 if (TARG_IS_CONTROL_DEV(dev))
606 return 0;
607
608 s = splsoftcam();
609 periph = (struct cam_periph *)dev->si_drv1;
610 if (periph == NULL) {
611 splx(s);
612 return (ENXIO);
386 /* Tell the lun to begin answering selects */
387 xpt_setup_ccb(&en_ccb.ccb_h, path, /*priority*/1);
388 en_ccb.ccb_h.func_code = XPT_EN_LUN;
389 /* Don't need support for any vendor specific commands */
390 en_ccb.grp6_len = grp6_len;
391 en_ccb.grp7_len = grp7_len;
392 en_ccb.enable = enable ? 1 : 0;
393 xpt_action((union ccb *)&en_ccb);
394 status = en_ccb.ccb_h.status & CAM_STATUS_MASK;
395 if (status != CAM_REQ_CMP) {
396 xpt_print_path(path);
397 printf("%sable lun CCB rejected, status %#x\n",
398 enable ? "en" : "dis", status);
613 }
399 }
614 if ((error = cam_periph_lock(periph, PRIBIO)) != 0)
615 return (error);
616 softc = (struct targ_softc *)periph->softc;
617 splx(s);
618
619 targdislun(periph);
620
621 cam_periph_unlock(periph);
622 cam_periph_release(periph);
623
624 return (0);
400 return (status);
625}
626
401}
402
627static int
628targallocinstance(void *arg, u_long cmd)
403/* Enable target mode on a LUN, given its path */
404static cam_status
405targenable(struct targ_softc *softc, struct cam_path *path, int grp6_len,
406 int grp7_len)
629{
407{
630 struct ioc_alloc_unit *alloc_unit = arg;
631 struct scsi_inquiry_data local;
632 struct ccb_pathinq cpi;
633 struct cam_path *path;
634 struct cam_periph *periph;
408 struct cam_periph *periph;
635 cam_status status;
636 int free_path_on_return;
637 int error;
638
639 free_path_on_return = 0;
640 status = xpt_create_path(&path, /*periph*/NULL,
641 alloc_unit->path_id,
642 alloc_unit->target_id,
643 alloc_unit->lun_id);
644 if (status != CAM_REQ_CMP) {
645 printf("Couldn't Allocate Path %x\n", status);
646 goto fail;
647 }
409 struct ccb_pathinq cpi;
410 cam_status status;
648
411
649 free_path_on_return++;
412 if ((softc->state & TARG_STATE_LUN_ENABLED) != 0)
413 return (CAM_LUN_ALRDY_ENA);
650
414
415 /* Make sure SIM supports target mode */
651 xpt_setup_ccb(&cpi.ccb_h, path, /*priority*/1);
652 cpi.ccb_h.func_code = XPT_PATH_INQ;
653 xpt_action((union ccb *)&cpi);
416 xpt_setup_ccb(&cpi.ccb_h, path, /*priority*/1);
417 cpi.ccb_h.func_code = XPT_PATH_INQ;
418 xpt_action((union ccb *)&cpi);
654 status = cpi.ccb_h.status;
655
419 status = cpi.ccb_h.status & CAM_STATUS_MASK;
656 if (status != CAM_REQ_CMP) {
420 if (status != CAM_REQ_CMP) {
657 printf("Couldn't CPI %x\n", status);
658 goto fail;
421 printf("pathinq failed, status %#x\n", status);
422 goto enable_fail;
659 }
423 }
660
661 /* Can only alloc units on controllers that support target mode */
662 if ((cpi.target_sprt & PIT_PROCESSOR) == 0) {
424 if ((cpi.target_sprt & PIT_PROCESSOR) == 0) {
663 printf("Controller does not support target mode - status %x\n",
664 status);
665 status = CAM_PATH_INVALID;
666 goto fail;
425 printf("controller does not support target mode\n");
426 status = CAM_FUNC_NOTAVAIL;
427 goto enable_fail;
667 }
668
428 }
429
669 /* Ensure that we don't already have an instance for this unit. */
670 if ((periph = cam_periph_find(path, "targ")) != NULL) {
671 status = CAM_LUN_ALRDY_ENA;
672 goto fail;
673 }
430 /* Destroy any periph on our path if it is disabled */
431 mtx_lock(&targ_mtx);
432 periph = cam_periph_find(path, "targ");
433 if (periph != NULL) {
434 struct targ_softc *del_softc;
674
435
675 if (cmd == TARGCTLIOALLOCUNIT) {
676 status = copyin(alloc_unit->inquiry_data, &local, sizeof local);
677 if (status)
678 goto fail;
679 cpi.ccb_h.ccb_inq = &local;
680 } else {
681 cpi.ccb_h.ccb_inq = NULL;
436 del_softc = (struct targ_softc *)periph->softc;
437 if ((del_softc->state & TARG_STATE_LUN_ENABLED) == 0) {
438 cam_periph_invalidate(del_softc->periph);
439 del_softc->periph = NULL;
440 } else {
441 printf("Requested path still in use by targ%d\n",
442 periph->unit_number);
443 mtx_unlock(&targ_mtx);
444 status = CAM_LUN_ALRDY_ENA;
445 goto enable_fail;
446 }
682 }
447 }
683
684
448
685 /*
686 * Allocate a peripheral instance for
687 * this target instance.
688 */
449 /* Create a periph instance attached to this path */
689 status = cam_periph_alloc(targctor, NULL, targdtor, targstart,
450 status = cam_periph_alloc(targctor, NULL, targdtor, targstart,
690 "targ", CAM_PERIPH_BIO, path, targasync,
691 0, &cpi);
451 "targ", CAM_PERIPH_BIO, path, targasync, 0, softc);
452 mtx_unlock(&targ_mtx);
453 if (status != CAM_REQ_CMP) {
454 printf("cam_periph_alloc failed, status %#x\n", status);
455 goto enable_fail;
456 }
692
457
693fail:
694 switch (status) {
695 case CAM_REQ_CMP:
696 {
697 struct cam_periph *periph;
458 /* Ensure that the periph now exists. */
459 if (cam_periph_find(path, "targ") == NULL) {
460 panic("targenable: succeeded but no periph?");
461 /* NOTREACHED */
462 }
698
463
699 if ((periph = cam_periph_find(path, "targ")) == NULL)
700 panic("targallocinstance: Succeeded but no periph?");
701 error = 0;
702 alloc_unit->unit = periph->unit_number;
703 break;
464 /* Send the enable lun message */
465 status = targendislun(path, /*enable*/1, grp6_len, grp7_len);
466 if (status != CAM_REQ_CMP) {
467 printf("enable lun failed, status %#x\n", status);
468 goto enable_fail;
704 }
469 }
705 case CAM_RESRC_UNAVAIL:
706 error = ENOMEM;
707 break;
708 case CAM_LUN_ALRDY_ENA:
709 error = EADDRINUSE;
710 break;
711 default:
712 printf("targallocinstance: Unexpected CAM status %x\n", status);
713 /* FALLTHROUGH */
714 case CAM_PATH_INVALID:
715 error = ENXIO;
716 break;
717 case CAM_PROVIDE_FAIL:
718 error = ENODEV;
719 break;
720 }
470 softc->state |= TARG_STATE_LUN_ENABLED;
721
471
722 if (free_path_on_return != 0)
723 xpt_free_path(path);
724
725 return (error);
472enable_fail:
473 return (status);
726}
727
474}
475
728static int
729targfreeinstance(struct ioc_alloc_unit *alloc_unit)
476/* Disable this softc's target instance if enabled */
477static cam_status
478targdisable(struct targ_softc *softc)
730{
479{
731 struct cam_path *path;
732 struct cam_periph *periph;
733 struct targ_softc *softc;
734 cam_status status;
480 cam_status status;
735 int free_path_on_return;
736 int error;
737
738 periph = NULL;
739 free_path_on_return = 0;
740 status = xpt_create_path(&path, /*periph*/NULL,
741 alloc_unit->path_id,
742 alloc_unit->target_id,
743 alloc_unit->lun_id);
744 free_path_on_return++;
745
481
746 if (status != CAM_REQ_CMP)
747 goto fail;
482 if ((softc->state & TARG_STATE_LUN_ENABLED) == 0)
483 return (CAM_REQ_CMP);
748
484
749 /* Find our instance. */
750 if ((periph = cam_periph_find(path, "targ")) == NULL) {
751 xpt_print_path(path);
752 printf("Invalid path specified for freeing target instance\n");
753 status = CAM_PATH_INVALID;
754 goto fail;
755 }
485 CAM_DEBUG(softc->path, CAM_DEBUG_PERIPH, ("targdisable\n"));
756
486
757 softc = (struct targ_softc *)periph->softc;
758
759 if ((softc->flags & TARG_FLAG_LUN_ENABLED) != 0) {
760 status = CAM_BUSY;
761 goto fail;
762 }
487 /* Abort any ccbs pending on the controller */
488 abort_all_pending(softc);
763
489
764fail:
765 if (free_path_on_return != 0)
766 xpt_free_path(path);
490 /* Disable this lun */
491 status = targendislun(softc->path, /*enable*/0,
492 /*grp6_len*/0, /*grp7_len*/0);
493 if (status == CAM_REQ_CMP)
494 softc->state &= ~TARG_STATE_LUN_ENABLED;
495 else
496 printf("Disable lun failed, status %#x\n", status);
767
497
768 switch (status) {
769 case CAM_REQ_CMP:
770 if (periph != NULL)
771 cam_periph_invalidate(periph);
772 error = 0;
773 break;
774 case CAM_RESRC_UNAVAIL:
775 error = ENOMEM;
776 break;
777 case CAM_LUN_ALRDY_ENA:
778 error = EADDRINUSE;
779 break;
780 default:
781 printf("targfreeinstance: Unexpected CAM status %x\n", status);
782 /* FALLTHROUGH */
783 case CAM_PATH_INVALID:
784 error = ENODEV;
785 break;
498 return (status);
499}
500
501/* Initialize a periph (called from cam_periph_alloc) */
502static cam_status
503targctor(struct cam_periph *periph, void *arg)
504{
505 struct targ_softc *softc;
506
507 /* Store pointer to softc for periph-driven routines */
508 softc = (struct targ_softc *)arg;
509 periph->softc = softc;
510 softc->periph = periph;
511 softc->path = periph->path;
512 return (CAM_REQ_CMP);
513}
514
515static void
516targdtor(struct cam_periph *periph)
517{
518 struct targ_softc *softc;
519 struct ccb_hdr *ccb_h;
520 struct targ_cmd_descr *descr;
521
522 softc = (struct targ_softc *)periph->softc;
523
524 /*
525 * targdisable() aborts CCBs back to the user and leaves them
526 * on user_ccb_queue and abort_queue in case the user is still
527 * interested in them. We free them now.
528 */
529 while ((ccb_h = TAILQ_FIRST(&softc->user_ccb_queue)) != NULL) {
530 TAILQ_REMOVE(&softc->user_ccb_queue, ccb_h, periph_links.tqe);
531 targfreeccb(softc, (union ccb *)ccb_h);
786 }
532 }
787 return (error);
533 while ((descr = TAILQ_FIRST(&softc->abort_queue)) != NULL) {
534 TAILQ_REMOVE(&softc->abort_queue, descr, tqe);
535 FREE(descr, M_TARG);
536 }
537
538 softc->periph = NULL;
539 softc->path = NULL;
540 periph->softc = NULL;
788}
789
541}
542
543/* Receive CCBs from user mode proc and send them to the HBA */
790static int
544static int
791targioctl(dev_t dev, u_long cmd, caddr_t addr, int flag, struct thread *td)
545targwrite(dev_t dev, struct uio *uio, int ioflag)
792{
546{
793 struct cam_periph *periph;
547 union ccb *user_ccb;
794 struct targ_softc *softc;
548 struct targ_softc *softc;
795 int error;
549 struct targ_cmd_descr *descr;
550 int write_len, error;
551 int func_code, priority;
796
552
797 error = 0;
798 if (TARG_IS_CONTROL_DEV(dev)) {
799 switch (cmd) {
800 case OTARGCTLIOALLOCUNIT:
801 case TARGCTLIOALLOCUNIT:
802 error = targallocinstance(addr, cmd);
553 softc = (struct targ_softc *)dev->si_drv1;
554 write_len = error = 0;
555 CAM_DEBUG(softc->path, CAM_DEBUG_PERIPH,
556 ("write - uio_resid %d\n", uio->uio_resid));
557 while (uio->uio_resid >= sizeof(user_ccb) && error == 0) {
558 union ccb *ccb;
559 int error;
560
561 error = uiomove((caddr_t)&user_ccb, sizeof(user_ccb), uio);
562 if (error != 0) {
563 CAM_DEBUG(softc->path, CAM_DEBUG_PERIPH,
564 ("write - uiomove failed (%d)\n", error));
803 break;
565 break;
804 case OTARGCTLIOFREEUNIT:
805 case TARGCTLIOFREEUNIT:
806 /*
807 * Old_ioc_alloc_unit and ioc_alloc_unit are the
808 * same with respect to what we need from the structure
809 * for this function.
810 */
811 error = targfreeinstance((struct ioc_alloc_unit*)addr);
566 }
567 priority = fuword(&user_ccb->ccb_h.pinfo.priority);
568 if (priority == -1) {
569 error = EINVAL;
812 break;
570 break;
571 }
572 func_code = fuword(&user_ccb->ccb_h.func_code);
573 switch (func_code) {
574 case XPT_ACCEPT_TARGET_IO:
575 case XPT_IMMED_NOTIFY:
576 ccb = targgetccb(softc, func_code, priority);
577 descr = (struct targ_cmd_descr *)ccb->ccb_h.targ_descr;
578 descr->user_ccb = user_ccb;
579 descr->func_code = func_code;
580 CAM_DEBUG(softc->path, CAM_DEBUG_PERIPH,
581 ("Sent ATIO/INOT (%p)\n", user_ccb));
582 xpt_action(ccb);
583 TARG_LOCK(softc);
584 TAILQ_INSERT_TAIL(&softc->pending_ccb_queue,
585 &ccb->ccb_h,
586 periph_links.tqe);
587 TARG_UNLOCK(softc);
588 break;
813 default:
589 default:
814 error = EINVAL;
590 if ((func_code & XPT_FC_QUEUED) != 0) {
591 CAM_DEBUG(softc->path, CAM_DEBUG_PERIPH,
592 ("Sending queued ccb %#x (%p)\n",
593 func_code, user_ccb));
594 descr = targgetdescr(softc);
595 descr->user_ccb = user_ccb;
596 descr->priority = priority;
597 descr->func_code = func_code;
598 TARG_LOCK(softc);
599 TAILQ_INSERT_TAIL(&softc->work_queue,
600 descr, tqe);
601 TARG_UNLOCK(softc);
602 xpt_schedule(softc->periph, priority);
603 } else {
604 CAM_DEBUG(softc->path, CAM_DEBUG_PERIPH,
605 ("Sending inline ccb %#x (%p)\n",
606 func_code, user_ccb));
607 ccb = targgetccb(softc, func_code, priority);
608 descr = (struct targ_cmd_descr *)
609 ccb->ccb_h.targ_descr;
610 descr->user_ccb = user_ccb;
611 descr->priority = priority;
612 descr->func_code = func_code;
613 if (targusermerge(softc, descr, ccb) != EFAULT)
614 targsendccb(softc, ccb, descr);
615 targreturnccb(softc, ccb);
616 }
815 break;
816 }
617 break;
618 }
817 return (error);
619 write_len += sizeof(user_ccb);
818 }
620 }
621
622 /*
623 * If we've successfully taken in some amount of
624 * data, return success for that data first. If
625 * an error is persistent, it will be reported
626 * on the next write.
627 */
628 if (error != 0 && write_len == 0)
629 return (error);
630 if (write_len == 0 && uio->uio_resid != 0)
631 return (ENOSPC);
632 return (0);
633}
819
634
820 periph = (struct cam_periph *)dev->si_drv1;
821 if (periph == NULL)
822 return (ENXIO);
635/* Process requests (descrs) via the periph-supplied CCBs */
636static void
637targstart(struct cam_periph *periph, union ccb *start_ccb)
638{
639 struct targ_softc *softc;
640 struct targ_cmd_descr *descr, *next_descr;
641 int error;
642
823 softc = (struct targ_softc *)periph->softc;
643 softc = (struct targ_softc *)periph->softc;
824 switch (cmd) {
825 case TARGIOCFETCHEXCEPTION:
826 *((targ_exception *)addr) = softc->exceptions;
827 break;
828 case TARGIOCCLEAREXCEPTION:
829 {
830 targ_exception clear_mask;
644 CAM_DEBUG(softc->path, CAM_DEBUG_PERIPH, ("targstart %p\n", start_ccb));
831
645
832 clear_mask = *((targ_exception *)addr);
833 if ((clear_mask & TARG_EXCEPT_UNKNOWN_ATIO) != 0) {
834 struct ccb_hdr *ccbh;
646 TARG_LOCK(softc);
647 descr = TAILQ_FIRST(&softc->work_queue);
648 if (descr == NULL) {
649 TARG_UNLOCK(softc);
650 xpt_release_ccb(start_ccb);
651 } else {
652 TAILQ_REMOVE(&softc->work_queue, descr, tqe);
653 next_descr = TAILQ_FIRST(&softc->work_queue);
654 TARG_UNLOCK(softc);
835
655
836 ccbh = TAILQ_FIRST(&softc->unknown_atio_queue);
837 if (ccbh != NULL) {
838 TAILQ_REMOVE(&softc->unknown_atio_queue,
839 ccbh, periph_links.tqe);
840 /* Requeue the ATIO back to the controller */
841 ccbh->ccb_flags = TARG_CCB_NONE;
842 xpt_action((union ccb *)ccbh);
843 ccbh = TAILQ_FIRST(&softc->unknown_atio_queue);
844 }
845 if (ccbh != NULL)
846 clear_mask &= ~TARG_EXCEPT_UNKNOWN_ATIO;
656 /* Initiate a transaction using the descr and supplied CCB */
657 error = targusermerge(softc, descr, start_ccb);
658 if (error == 0)
659 error = targsendccb(softc, start_ccb, descr);
660 if (error != 0) {
661 xpt_print_path(periph->path);
662 printf("targsendccb failed, err %d\n", error);
663 xpt_release_ccb(start_ccb);
664 suword(&descr->user_ccb->ccb_h.status,
665 CAM_REQ_CMP_ERR);
666 TARG_LOCK(softc);
667 TAILQ_INSERT_TAIL(&softc->abort_queue, descr, tqe);
668 TARG_UNLOCK(softc);
669 notify_user(softc);
847 }
670 }
848 softc->exceptions &= ~clear_mask;
849 if (softc->exceptions == TARG_EXCEPT_NONE
850 && softc->state == TARG_STATE_EXCEPTION) {
851 softc->state = TARG_STATE_NORMAL;
852 targrunqueue(periph, softc);
853 }
854 break;
855 }
856 case TARGIOCFETCHATIO:
857 {
858 struct ccb_hdr *ccbh;
859
671
860 ccbh = TAILQ_FIRST(&softc->unknown_atio_queue);
861 if (ccbh != NULL) {
862 bcopy(ccbh, addr, sizeof(struct ccb_accept_tio));
863 } else {
864 error = ENOENT;
865 }
866 break;
672 /* If we have more work to do, stay scheduled */
673 if (next_descr != NULL)
674 xpt_schedule(periph, next_descr->priority);
867 }
675 }
868 case TARGIOCCOMMAND:
869 {
870 union ccb *inccb;
871 union ccb *ccb;
676}
872
677
873 /*
874 * XXX JGibbs
875 * This code is lifted directly from the pass-thru driver.
876 * Perhaps this should be moved to a library????
877 */
878 inccb = (union ccb *)addr;
879 ccb = cam_periph_getccb(periph, inccb->ccb_h.pinfo.priority);
678static int
679targusermerge(struct targ_softc *softc, struct targ_cmd_descr *descr,
680 union ccb *ccb)
681{
682 struct ccb_hdr *u_ccbh, *k_ccbh;
683 size_t ccb_len;
684 int error;
880
685
881 error = targsendccb(periph, ccb, inccb);
686 u_ccbh = &descr->user_ccb->ccb_h;
687 k_ccbh = &ccb->ccb_h;
882
688
883 xpt_release_ccb(ccb);
884
885 break;
689 /*
690 * There are some fields in the CCB header that need to be
691 * preserved, the rest we get from the user ccb. (See xpt_merge_ccb)
692 */
693 xpt_setup_ccb(k_ccbh, softc->path, descr->priority);
694 k_ccbh->retry_count = fuword(&u_ccbh->retry_count);
695 k_ccbh->func_code = descr->func_code;
696 k_ccbh->flags = fuword(&u_ccbh->flags);
697 k_ccbh->timeout = fuword(&u_ccbh->timeout);
698 ccb_len = targccblen(k_ccbh->func_code) - sizeof(struct ccb_hdr);
699 error = copyin(u_ccbh + 1, k_ccbh + 1, ccb_len);
700 if (error != 0) {
701 k_ccbh->status = CAM_REQ_CMP_ERR;
702 return (error);
886 }
703 }
887 case TARGIOCGETISTATE:
888 case TARGIOCSETISTATE:
889 {
890 struct ioc_initiator_state *ioc_istate;
891
704
892 ioc_istate = (struct ioc_initiator_state *)addr;
893 if (ioc_istate->initiator_id > MAX_INITIATORS) {
894 error = EINVAL;
895 break;
705 /* Translate usermode abort_ccb pointer to its kernel counterpart */
706 if (k_ccbh->func_code == XPT_ABORT) {
707 struct ccb_abort *cab;
708 struct ccb_hdr *ccb_h;
709
710 cab = (struct ccb_abort *)ccb;
711 TARG_LOCK(softc);
712 TAILQ_FOREACH(ccb_h, &softc->pending_ccb_queue,
713 periph_links.tqe) {
714 struct targ_cmd_descr *ab_descr;
715
716 ab_descr = (struct targ_cmd_descr *)ccb_h->targ_descr;
717 if (ab_descr->user_ccb == cab->abort_ccb) {
718 CAM_DEBUG(softc->path, CAM_DEBUG_PERIPH,
719 ("Changing abort for %p to %p\n",
720 cab->abort_ccb, ccb_h));
721 cab->abort_ccb = (union ccb *)ccb_h;
722 break;
723 }
896 }
724 }
897 CAM_DEBUG(periph->path, CAM_DEBUG_PERIPH,
898 ("GET/SETISTATE for %d\n", ioc_istate->initiator_id));
899 if (cmd == TARGIOCGETISTATE) {
900 bcopy(&softc->istate[ioc_istate->initiator_id],
901 &ioc_istate->istate, sizeof(ioc_istate->istate));
902 } else {
903 bcopy(&ioc_istate->istate,
904 &softc->istate[ioc_istate->initiator_id],
905 sizeof(ioc_istate->istate));
906 CAM_DEBUG(periph->path, CAM_DEBUG_PERIPH,
907 ("pending_ca now %x\n",
908 softc->istate[ioc_istate->initiator_id].pending_ca));
725 TARG_UNLOCK(softc);
726 /* CCB not found, set appropriate status */
727 if (ccb_h == NULL) {
728 k_ccbh->status = CAM_PATH_INVALID;
729 error = ESRCH;
909 }
730 }
910 break;
911 }
731 }
912 case TARGIODEBUG:
913 {
914#ifdef CAMDEBUG
915 union ccb ccb;
916 bzero (&ccb, sizeof ccb);
917 if (xpt_create_path(&ccb.ccb_h.path, periph,
918 xpt_path_path_id(periph->path),
919 xpt_path_target_id(periph->path),
920 xpt_path_lun_id(periph->path)) != CAM_REQ_CMP) {
921 error = EINVAL;
922 break;
923 }
924 if (*((int *)addr)) {
925 ccb.cdbg.flags = CAM_DEBUG_PERIPH;
926 } else {
927 ccb.cdbg.flags = CAM_DEBUG_NONE;
928 }
929 xpt_setup_ccb(&ccb.ccb_h, ccb.ccb_h.path, 0);
930 ccb.ccb_h.func_code = XPT_DEBUG;
931 ccb.ccb_h.path_id = xpt_path_path_id(ccb.ccb_h.path);
932 ccb.ccb_h.target_id = xpt_path_target_id(ccb.ccb_h.path);
933 ccb.ccb_h.target_lun = xpt_path_lun_id(ccb.ccb_h.path);
934 ccb.ccb_h.cbfcnp = targdone;
935 xpt_action(&ccb);
936 if ((ccb.ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
937 error = EIO;
938 } else {
939 error = 0;
940 }
941 xpt_free_path(ccb.ccb_h.path);
942#else
943 error = 0;
944#endif
945 break;
946 }
947 default:
948 error = ENOTTY;
949 break;
950 }
732
951 return (error);
952}
953
733 return (error);
734}
735
954/*
955 * XXX JGibbs lifted from pass-thru driver.
956 * Generally, "ccb" should be the CCB supplied by the kernel. "inccb"
957 * should be the CCB that is copied in from the user.
958 */
736/* Build and send a kernel CCB formed from descr->user_ccb */
959static int
737static int
960targsendccb(struct cam_periph *periph, union ccb *ccb, union ccb *inccb)
738targsendccb(struct targ_softc *softc, union ccb *ccb,
739 struct targ_cmd_descr *descr)
961{
740{
962 struct targ_softc *softc;
963 struct cam_periph_map_info mapinfo;
964 int error, need_unmap;
965 int s;
741 struct cam_periph_map_info *mapinfo;
742 struct ccb_hdr *ccb_h;
743 int error;
966
744
967 softc = (struct targ_softc *)periph->softc;
745 ccb_h = &ccb->ccb_h;
746 mapinfo = &descr->mapinfo;
747 mapinfo->num_bufs_used = 0;
968
748
969 need_unmap = 0;
970
971 /*
749 /*
972 * There are some fields in the CCB header that need to be
973 * preserved, the rest we get from the user.
974 */
975 xpt_merge_ccb(ccb, inccb);
976
977 /*
978 * There's no way for the user to have a completion
979 * function, so we put our own completion function in here.
750 * There's no way for the user to have a completion
751 * function, so we put our own completion function in here.
752 * We also stash in a reference to our descriptor so targreturnccb()
753 * can find our mapping info.
980 */
754 */
981 ccb->ccb_h.cbfcnp = targdone;
755 ccb_h->cbfcnp = targdone;
756 ccb_h->targ_descr = descr;
982
983 /*
984 * We only attempt to map the user memory into kernel space
985 * if they haven't passed in a physical memory pointer,
986 * and if there is actually an I/O operation to perform.
987 * Right now cam_periph_mapmem() only supports SCSI and device
988 * match CCBs. For the SCSI CCBs, we only pass the CCB in if
989 * there's actually data to map. cam_periph_mapmem() will do the
990 * right thing, even if there isn't data to map, but since CCBs
991 * without data are a reasonably common occurance (e.g. test unit
992 * ready), it will save a few cycles if we check for it here.
993 */
757
758 /*
759 * We only attempt to map the user memory into kernel space
760 * if they haven't passed in a physical memory pointer,
761 * and if there is actually an I/O operation to perform.
762 * Right now cam_periph_mapmem() only supports SCSI and device
763 * match CCBs. For the SCSI CCBs, we only pass the CCB in if
764 * there's actually data to map. cam_periph_mapmem() will do the
765 * right thing, even if there isn't data to map, but since CCBs
766 * without data are a reasonably common occurance (e.g. test unit
767 * ready), it will save a few cycles if we check for it here.
768 */
994 if (((ccb->ccb_h.flags & CAM_DATA_PHYS) == 0)
995 && (((ccb->ccb_h.func_code == XPT_CONT_TARGET_IO)
996 && ((ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE))
997 || (ccb->ccb_h.func_code == XPT_DEV_MATCH))) {
769 if (((ccb_h->flags & CAM_DATA_PHYS) == 0)
770 && (((ccb_h->func_code == XPT_CONT_TARGET_IO)
771 && ((ccb_h->flags & CAM_DIR_MASK) != CAM_DIR_NONE))
772 || (ccb_h->func_code == XPT_DEV_MATCH))) {
998
773
999 bzero(&mapinfo, sizeof(mapinfo));
774 error = cam_periph_mapmem(ccb, mapinfo);
1000
775
1001 error = cam_periph_mapmem(ccb, &mapinfo);
1002
1003 /*
1004 * cam_periph_mapmem returned an error, we can't continue.
1005 * Return the error to the user.
1006 */
776 /*
777 * cam_periph_mapmem returned an error, we can't continue.
778 * Return the error to the user.
779 */
1007 if (error)
1008 return(error);
1009
1010 /*
1011 * We successfully mapped the memory in, so we need to
1012 * unmap it when the transaction is done.
1013 */
1014 need_unmap = 1;
780 if (error) {
781 ccb_h->status = CAM_REQ_CMP_ERR;
782 mapinfo->num_bufs_used = 0;
783 return (error);
784 }
1015 }
1016
1017 /*
1018 * Once queued on the pending CCB list, this CCB will be protected
785 }
786
787 /*
788 * Once queued on the pending CCB list, this CCB will be protected
1019 * by the error recovery handling used for 'buffer I/O' ccbs. Since
1020 * we are in a process context here, however, the software interrupt
1021 * for this driver may deliver an event invalidating this CCB just
1022 * before we queue it. Close this race condition by blocking
1023 * software interrupt delivery, checking for any pertinent queued
1024 * events, and only then queuing this CCB.
789 * by our error recovery handler.
1025 */
790 */
1026 s = splsoftcam();
1027 if (softc->exceptions == 0) {
1028 if (ccb->ccb_h.func_code == XPT_CONT_TARGET_IO)
1029 TAILQ_INSERT_TAIL(&softc->pending_queue, &ccb->ccb_h,
1030 periph_links.tqe);
1031
1032 /*
1033 * If the user wants us to perform any error recovery,
1034 * then honor that request. Otherwise, it's up to the
1035 * user to perform any error recovery.
1036 */
1037 error = cam_periph_runccb(ccb, /* error handler */NULL,
1038 CAM_RETRY_SELTO, SF_RETRY_UA,
1039 &softc->device_stats);
1040
1041 if (ccb->ccb_h.func_code == XPT_CONT_TARGET_IO)
1042 TAILQ_REMOVE(&softc->pending_queue, &ccb->ccb_h,
1043 periph_links.tqe);
1044 } else {
1045 ccb->ccb_h.status = CAM_UNACKED_EVENT;
1046 error = 0;
791 CAM_DEBUG(softc->path, CAM_DEBUG_PERIPH, ("sendccb %p\n", ccb));
792 if (XPT_FC_IS_QUEUED(ccb)) {
793 TARG_LOCK(softc);
794 TAILQ_INSERT_TAIL(&softc->pending_ccb_queue, ccb_h,
795 periph_links.tqe);
796 TARG_UNLOCK(softc);
1047 }
797 }
1048 splx(s);
798 xpt_action(ccb);
1049
799
1050 if (need_unmap != 0)
1051 cam_periph_unmapmem(ccb, &mapinfo);
1052
1053 ccb->ccb_h.cbfcnp = NULL;
1054 ccb->ccb_h.periph_priv = inccb->ccb_h.periph_priv;
1055 bcopy(ccb, inccb, sizeof(union ccb));
1056
1057 return(error);
800 return (0);
1058}
1059
801}
802
1060
1061static int
1062targpoll(dev_t dev, int poll_events, struct thread *td)
803/* Completion routine for CCBs (called at splsoftcam) */
804static void
805targdone(struct cam_periph *periph, union ccb *done_ccb)
1063{
806{
1064 struct cam_periph *periph;
1065 struct targ_softc *softc;
807 struct targ_softc *softc;
1066 int revents;
1067 int s;
808 cam_status status;
1068
809
1069 /* ioctl is the only supported operation of the control device */
1070 if (TARG_IS_CONTROL_DEV(dev))
1071 return EINVAL;
1072
1073 periph = (struct cam_periph *)dev->si_drv1;
1074 if (periph == NULL)
1075 return (ENXIO);
810 CAM_DEBUG(periph->path, CAM_DEBUG_PERIPH, ("targdone %p\n", done_ccb));
1076 softc = (struct targ_softc *)periph->softc;
811 softc = (struct targ_softc *)periph->softc;
812 TARG_LOCK(softc);
813 TAILQ_REMOVE(&softc->pending_ccb_queue, &done_ccb->ccb_h,
814 periph_links.tqe);
815 status = done_ccb->ccb_h.status & CAM_STATUS_MASK;
1077
816
1078 revents = 0;
1079 s = splcam();
1080 if ((poll_events & (POLLOUT | POLLWRNORM)) != 0) {
1081 if (TAILQ_FIRST(&softc->rcv_ccb_queue) != NULL
1082 && bioq_first(&softc->rcv_bio_queue) == NULL)
1083 revents |= poll_events & (POLLOUT | POLLWRNORM);
817 /* If we're no longer enabled, throw away CCB */
818 if ((softc->state & TARG_STATE_LUN_ENABLED) == 0) {
819 targfreeccb(softc, done_ccb);
820 return;
1084 }
821 }
1085 if ((poll_events & (POLLIN | POLLRDNORM)) != 0) {
1086 if (TAILQ_FIRST(&softc->snd_ccb_queue) != NULL
1087 && bioq_first(&softc->snd_bio_queue) == NULL)
1088 revents |= poll_events & (POLLIN | POLLRDNORM);
1089 }
822 /* abort_all_pending() waits for pending queue to be empty */
823 if (TAILQ_EMPTY(&softc->pending_ccb_queue))
824 wakeup(&softc->pending_ccb_queue);
1090
825
1091 if (softc->state != TARG_STATE_NORMAL)
1092 revents |= POLLERR;
1093
1094 if (revents == 0) {
1095 if (poll_events & (POLLOUT | POLLWRNORM))
1096 selrecord(td, &softc->rcv_select);
1097 if (poll_events & (POLLIN | POLLRDNORM))
1098 selrecord(td, &softc->snd_select);
826 switch (done_ccb->ccb_h.func_code) {
827 /* All FC_*_QUEUED CCBs go back to userland */
828 case XPT_IMMED_NOTIFY:
829 case XPT_ACCEPT_TARGET_IO:
830 case XPT_CONT_TARGET_IO:
831 TAILQ_INSERT_TAIL(&softc->user_ccb_queue, &done_ccb->ccb_h,
832 periph_links.tqe);
833 notify_user(softc);
834 break;
835 default:
836 panic("targdone: impossible xpt opcode %#x",
837 done_ccb->ccb_h.func_code);
838 /* NOTREACHED */
1099 }
839 }
1100 splx(s);
1101 return (revents);
840 TARG_UNLOCK(softc);
1102}
1103
841}
842
843/* Return CCBs to the user from the user queue and abort queue */
1104static int
1105targread(dev_t dev, struct uio *uio, int ioflag)
1106{
844static int
845targread(dev_t dev, struct uio *uio, int ioflag)
846{
1107 /* ioctl is the only supported operation of the control device */
1108 if (TARG_IS_CONTROL_DEV(dev))
1109 return EINVAL;
847 struct descr_queue *abort_queue;
848 struct targ_cmd_descr *user_descr;
849 struct targ_softc *softc;
850 struct ccb_queue *user_queue;
851 struct ccb_hdr *ccb_h;
852 union ccb *user_ccb;
853 int read_len, error;
1110
854
1111 if (uio->uio_iovcnt == 0
1112 || uio->uio_iov->iov_len == 0) {
1113 /* EOF */
1114 struct cam_periph *periph;
1115 struct targ_softc *softc;
1116 int s;
1117
1118 s = splcam();
1119 periph = (struct cam_periph *)dev->si_drv1;
1120 if (periph == NULL)
1121 return (ENXIO);
1122 softc = (struct targ_softc *)periph->softc;
1123 softc->flags |= TARG_FLAG_SEND_EOF;
1124 splx(s);
1125 targrunqueue(periph, softc);
1126 return (0);
1127 }
1128 return(physread(dev, uio, ioflag));
1129}
855 error = 0;
856 read_len = 0;
857 softc = (struct targ_softc *)dev->si_drv1;
858 user_queue = &softc->user_ccb_queue;
859 abort_queue = &softc->abort_queue;
860 CAM_DEBUG(softc->path, CAM_DEBUG_PERIPH, ("targread\n"));
1130
861
1131static int
1132targwrite(dev_t dev, struct uio *uio, int ioflag)
1133{
1134 /* ioctl is the only supported operation of the control device */
1135 if (TARG_IS_CONTROL_DEV(dev))
1136 return EINVAL;
1137
1138 if (uio->uio_iovcnt == 0
1139 || uio->uio_iov->iov_len == 0) {
1140 /* EOF */
1141 struct cam_periph *periph;
1142 struct targ_softc *softc;
1143 int s;
1144
1145 s = splcam();
1146 periph = (struct cam_periph *)dev->si_drv1;
1147 if (periph == NULL)
1148 return (ENXIO);
1149 softc = (struct targ_softc *)periph->softc;
1150 softc->flags |= TARG_FLAG_RECEIVE_EOF;
1151 splx(s);
1152 targrunqueue(periph, softc);
1153 return (0);
862 /* If no data is available, wait or return immediately */
863 TARG_LOCK(softc);
864 ccb_h = TAILQ_FIRST(user_queue);
865 user_descr = TAILQ_FIRST(abort_queue);
866 while (ccb_h == NULL && user_descr == NULL) {
867 if ((ioflag & IO_NDELAY) == 0) {
868 error = msleep(user_queue, &softc->mtx,
869 PRIBIO | PCATCH, "targrd", 0);
870 ccb_h = TAILQ_FIRST(user_queue);
871 user_descr = TAILQ_FIRST(abort_queue);
872 if (error != 0) {
873 if (error == ERESTART) {
874 continue;
875 } else {
876 TARG_UNLOCK(softc);
877 goto read_fail;
878 }
879 }
880 } else {
881 TARG_UNLOCK(softc);
882 return (EAGAIN);
883 }
1154 }
884 }
1155 return(physwrite(dev, uio, ioflag));
1156}
1157
885
1158/*
1159 * Actually translate the requested transfer into one the physical driver
1160 * can understand. The transfer is described by a buf and will include
1161 * only one physical transfer.
1162 */
1163static void
1164targstrategy(struct bio *bp)
1165{
1166 struct cam_periph *periph;
1167 struct targ_softc *softc;
1168 int s;
1169
1170 bp->bio_resid = bp->bio_bcount;
886 /* Data is available so fill the user's buffer */
887 while (ccb_h != NULL) {
888 struct targ_cmd_descr *descr;
1171
889
1172 /* ioctl is the only supported operation of the control device */
1173 if (TARG_IS_CONTROL_DEV(bp->bio_dev)) {
1174 biofinish(bp, NULL, EINVAL);
1175 return;
1176 }
890 if (uio->uio_resid < sizeof(user_ccb))
891 break;
892 TAILQ_REMOVE(user_queue, ccb_h, periph_links.tqe);
893 TARG_UNLOCK(softc);
894 descr = (struct targ_cmd_descr *)ccb_h->targ_descr;
895 user_ccb = descr->user_ccb;
896 CAM_DEBUG(softc->path, CAM_DEBUG_PERIPH,
897 ("targread ccb %p (%p)\n", ccb_h, user_ccb));
898 error = targreturnccb(softc, (union ccb *)ccb_h);
899 if (error != 0)
900 goto read_fail;
901 error = uiomove((caddr_t)&user_ccb, sizeof(user_ccb), uio);
902 if (error != 0)
903 goto read_fail;
904 read_len += sizeof(user_ccb);
1177
905
1178 periph = (struct cam_periph *)bp->bio_dev->si_drv1;
1179 if (periph == NULL) {
1180 biofinish(bp, NULL, ENXIO);
1181 return;
906 TARG_LOCK(softc);
907 ccb_h = TAILQ_FIRST(user_queue);
1182 }
908 }
1183 softc = (struct targ_softc *)periph->softc;
1184
909
1185 /*
1186 * Mask interrupts so that the device cannot be invalidated until
1187 * after we are in the queue. Otherwise, we might not properly
1188 * clean up one of the buffers.
1189 */
1190 s = splbio();
1191
1192 /*
1193 * If there is an exception pending, error out
1194 */
1195 if (softc->state != TARG_STATE_NORMAL) {
1196 splx(s);
1197 if (softc->state == TARG_STATE_EXCEPTION
1198 && (softc->exceptions & TARG_EXCEPT_DEVICE_INVALID) == 0)
1199 s = EBUSY;
1200 else
1201 s = ENXIO;
1202 biofinish(bp, NULL, s);
1203 return;
910 /* Flush out any aborted descriptors */
911 while (user_descr != NULL) {
912 if (uio->uio_resid < sizeof(user_ccb))
913 break;
914 TAILQ_REMOVE(abort_queue, user_descr, tqe);
915 TARG_UNLOCK(softc);
916 user_ccb = user_descr->user_ccb;
917 CAM_DEBUG(softc->path, CAM_DEBUG_PERIPH,
918 ("targread aborted descr %p (%p)\n",
919 user_descr, user_ccb));
920 suword(&user_ccb->ccb_h.status, CAM_REQ_ABORTED);
921 error = uiomove((caddr_t)&user_ccb, sizeof(user_ccb), uio);
922 if (error != 0)
923 goto read_fail;
924 read_len += sizeof(user_ccb);
925
926 TARG_LOCK(softc);
927 user_descr = TAILQ_FIRST(abort_queue);
1204 }
928 }
1205
1206 /*
1207 * Place it in the queue of buffers available for either
1208 * SEND or RECEIVE commands.
1209 *
1210 */
1211 bp->bio_resid = bp->bio_bcount;
1212 if (bp->bio_cmd == BIO_READ) {
1213 CAM_DEBUG(periph->path, CAM_DEBUG_PERIPH,
1214 ("Queued a SEND buffer\n"));
1215 bioq_insert_tail(&softc->snd_bio_queue, bp);
1216 } else {
1217 CAM_DEBUG(periph->path, CAM_DEBUG_PERIPH,
1218 ("Queued a RECEIVE buffer\n"));
1219 bioq_insert_tail(&softc->rcv_bio_queue, bp);
1220 }
929 TARG_UNLOCK(softc);
1221
930
1222 splx(s);
1223
1224 /*
931 /*
1225 * Attempt to use the new buffer to service any pending
1226 * target commands.
932 * If we've successfully read some amount of data, don't report an
933 * error. If the error is persistent, it will be reported on the
934 * next read().
1227 */
935 */
1228 targrunqueue(periph, softc);
936 if (read_len == 0 && uio->uio_resid != 0)
937 error = ENOSPC;
1229
938
1230 return;
939read_fail:
940 return (error);
1231}
1232
941}
942
1233static void
1234targrunqueue(struct cam_periph *periph, struct targ_softc *softc)
943/* Copy completed ccb back to the user */
944static int
945targreturnccb(struct targ_softc *softc, union ccb *ccb)
1235{
946{
1236 struct ccb_queue *pending_queue;
1237 struct ccb_accept_tio *atio;
1238 struct bio_queue_head *bioq;
1239 struct bio *bp;
1240 struct targ_cmd_desc *desc;
1241 struct ccb_hdr *ccbh;
1242 int s;
947 struct targ_cmd_descr *descr;
948 struct ccb_hdr *u_ccbh;
949 size_t ccb_len;
950 int error;
1243
951
1244 s = splbio();
1245 pending_queue = NULL;
1246 bioq = NULL;
1247 ccbh = NULL;
1248 /* Only run one request at a time to maintain data ordering. */
1249 if (softc->state != TARG_STATE_NORMAL
1250 || TAILQ_FIRST(&softc->work_queue) != NULL
1251 || TAILQ_FIRST(&softc->pending_queue) != NULL) {
1252 splx(s);
1253 return;
1254 }
952 CAM_DEBUG(softc->path, CAM_DEBUG_PERIPH, ("targreturnccb %p\n", ccb));
953 descr = (struct targ_cmd_descr *)ccb->ccb_h.targ_descr;
954 u_ccbh = &descr->user_ccb->ccb_h;
1255
955
1256 if (((bp = bioq_first(&softc->snd_bio_queue)) != NULL
1257 || (softc->flags & TARG_FLAG_SEND_EOF) != 0)
1258 && (ccbh = TAILQ_FIRST(&softc->snd_ccb_queue)) != NULL) {
956 /* Copy out the central portion of the ccb_hdr */
957 copyout(&ccb->ccb_h.retry_count, &u_ccbh->retry_count,
958 offsetof(struct ccb_hdr, periph_priv) -
959 offsetof(struct ccb_hdr, retry_count));
1259
960
1260 if (bp == NULL)
1261 softc->flags &= ~TARG_FLAG_SEND_EOF;
1262 else {
1263 CAM_DEBUG(periph->path, CAM_DEBUG_PERIPH,
1264 ("De-Queued a SEND buffer %ld\n",
1265 bp->bio_bcount));
1266 }
1267 bioq = &softc->snd_bio_queue;
1268 pending_queue = &softc->snd_ccb_queue;
1269 } else if (((bp = bioq_first(&softc->rcv_bio_queue)) != NULL
1270 || (softc->flags & TARG_FLAG_RECEIVE_EOF) != 0)
1271 && (ccbh = TAILQ_FIRST(&softc->rcv_ccb_queue)) != NULL) {
1272
1273 if (bp == NULL)
1274 softc->flags &= ~TARG_FLAG_RECEIVE_EOF;
1275 else {
1276 CAM_DEBUG(periph->path, CAM_DEBUG_PERIPH,
1277 ("De-Queued a RECEIVE buffer %ld\n",
1278 bp->bio_bcount));
1279 }
1280 bioq = &softc->rcv_bio_queue;
1281 pending_queue = &softc->rcv_ccb_queue;
961 /* Copy out the rest of the ccb (after the ccb_hdr) */
962 ccb_len = targccblen(ccb->ccb_h.func_code) - sizeof(struct ccb_hdr);
963 if (descr->mapinfo.num_bufs_used != 0)
964 cam_periph_unmapmem(ccb, &descr->mapinfo);
965 error = copyout(&ccb->ccb_h + 1, u_ccbh + 1, ccb_len);
966 if (error != 0) {
967 xpt_print_path(softc->path);
968 printf("targreturnccb - CCB copyout failed (%d)\n",
969 error);
1282 }
970 }
971 /* Free CCB or send back to devq. */
972 targfreeccb(softc, ccb);
1283
973
1284 if (pending_queue != NULL) {
1285 /* Process a request */
1286 atio = (struct ccb_accept_tio *)ccbh;
1287 TAILQ_REMOVE(pending_queue, ccbh, periph_links.tqe);
1288 desc = (struct targ_cmd_desc *)atio->ccb_h.ccb_descr;
1289 desc->bp = bp;
1290 if (bp == NULL) {
1291 /* EOF */
1292 desc->data = NULL;
1293 desc->data_increment = 0;
1294 desc->data_resid = 0;
1295 atio->ccb_h.flags &= ~CAM_DIR_MASK;
1296 atio->ccb_h.flags |= CAM_DIR_NONE;
1297 } else {
1298 bioq_remove(bioq, bp);
1299 desc->data = &bp->bio_data[bp->bio_bcount - bp->bio_resid];
1300 desc->data_increment =
1301 MIN(desc->data_resid, bp->bio_resid);
1302 }
1303 CAM_DEBUG(periph->path, CAM_DEBUG_PERIPH,
1304 ("Buffer command: data %p: datacnt %d\n",
1305 desc->data, desc->data_increment));
1306 TAILQ_INSERT_TAIL(&softc->work_queue, &atio->ccb_h,
1307 periph_links.tqe);
1308 }
1309 atio = (struct ccb_accept_tio *)TAILQ_FIRST(&softc->work_queue);
1310 if (atio != NULL) {
1311 int priority;
1312
1313 priority = (atio->ccb_h.flags & CAM_DIS_DISCONNECT) ? 0 : 1;
1314 splx(s);
1315 xpt_schedule(periph, priority);
1316 } else
1317 splx(s);
974 return (error);
1318}
1319
975}
976
1320static void
1321targstart(struct cam_periph *periph, union ccb *start_ccb)
977static union ccb *
978targgetccb(struct targ_softc *softc, xpt_opcode type, int priority)
1322{
979{
1323 struct targ_softc *softc;
1324 struct ccb_hdr *ccbh;
1325 struct ccb_accept_tio *atio;
1326 struct targ_cmd_desc *desc;
1327 struct ccb_scsiio *csio;
1328 targ_ccb_flags flags;
1329 int s;
980 union ccb *ccb;
981 int ccb_len;
1330
982
1331 softc = (struct targ_softc *)periph->softc;
1332
1333 s = splbio();
1334 ccbh = TAILQ_FIRST(&softc->work_queue);
1335 if (periph->immediate_priority <= periph->pinfo.priority) {
1336 start_ccb->ccb_h.ccb_flags = TARG_CCB_WAITING;
1337 SLIST_INSERT_HEAD(&periph->ccb_list, &start_ccb->ccb_h,
1338 periph_links.sle);
1339 periph->immediate_priority = CAM_PRIORITY_NONE;
1340 splx(s);
1341 wakeup(&periph->ccb_list);
1342 } else if (ccbh == NULL) {
1343 splx(s);
1344 xpt_release_ccb(start_ccb);
1345 } else {
1346 TAILQ_REMOVE(&softc->work_queue, ccbh, periph_links.tqe);
1347 splx(s);
1348 atio = (struct ccb_accept_tio*)ccbh;
1349 desc = (struct targ_cmd_desc *)atio->ccb_h.ccb_descr;
983 ccb_len = targccblen(type);
984 MALLOC(ccb, union ccb *, ccb_len, M_TARG, M_WAITOK);
985 CAM_DEBUG(softc->path, CAM_DEBUG_PERIPH, ("getccb %p\n", ccb));
1350
986
1351 /* Is this a tagged request? */
1352 flags = atio->ccb_h.flags & (CAM_DIS_DISCONNECT |
1353 CAM_TAG_ACTION_VALID | CAM_DIR_MASK | CAM_SEND_STATUS);
1354
1355 /*
1356 * If we are done with the transaction, tell the
1357 * controller to send status and perform a CMD_CMPLT.
1358 */
1359 if (desc->user_atio == 0 &&
1360 desc->data_resid == desc->data_increment) {
1361 flags |= CAM_SEND_STATUS;
1362 }
1363
1364 csio = &start_ccb->csio;
1365 cam_fill_ctio(csio,
1366 /*retries*/2,
1367 targdone,
1368 flags,
1369 (flags & CAM_TAG_ACTION_VALID) ?
1370 MSG_SIMPLE_Q_TAG : 0,
1371 atio->tag_id,
1372 atio->init_id,
1373 desc->status,
1374 /*data_ptr*/desc->data_increment == 0
1375 ? NULL : desc->data,
1376 /*dxfer_len*/desc->data_increment,
1377 /*timeout*/desc->timeout);
1378
1379 if ((flags & CAM_SEND_STATUS) != 0
1380 && (desc->status == SCSI_STATUS_CHECK_COND
1381 || desc->status == SCSI_STATUS_CMD_TERMINATED)) {
1382 struct initiator_state *istate;
1383
1384 istate = &softc->istate[atio->init_id];
1385 csio->sense_len = istate->sense_data.extra_len
1386 + offsetof(struct scsi_sense_data,
1387 extra_len);
1388 bcopy(&istate->sense_data, &csio->sense_data,
1389 csio->sense_len);
1390 csio->ccb_h.flags |= CAM_SEND_SENSE;
1391 } else {
1392 csio->sense_len = 0;
1393 }
1394
1395 start_ccb->ccb_h.ccb_flags = TARG_CCB_NONE;
1396 start_ccb->ccb_h.ccb_atio = atio;
1397 CAM_DEBUG(periph->path, CAM_DEBUG_PERIPH,
1398 ("Sending a CTIO (flags 0x%x)\n", csio->ccb_h.flags));
1399 TAILQ_INSERT_TAIL(&softc->pending_queue, &csio->ccb_h,
1400 periph_links.tqe);
1401 xpt_action(start_ccb);
1402 /*
1403 * If the queue was frozen waiting for the response
1404 * to this ATIO (for instance disconnection was disallowed),
1405 * then release it now that our response has been queued.
1406 */
1407 if ((atio->ccb_h.status & CAM_DEV_QFRZN) != 0) {
1408 cam_release_devq(periph->path,
1409 /*relsim_flags*/0,
1410 /*reduction*/0,
1411 /*timeout*/0,
1412 /*getcount_only*/0);
1413 atio->ccb_h.status &= ~CAM_DEV_QFRZN;
1414 }
1415 s = splbio();
1416 ccbh = TAILQ_FIRST(&softc->work_queue);
1417 splx(s);
1418 }
1419 if (ccbh != NULL)
1420 targrunqueue(periph, softc);
987 xpt_setup_ccb(&ccb->ccb_h, softc->path, priority);
988 ccb->ccb_h.func_code = type;
989 ccb->ccb_h.cbfcnp = targdone;
990 ccb->ccb_h.targ_descr = targgetdescr(softc);
991 return (ccb);
1421}
1422
1423static void
992}
993
994static void
1424targdone(struct cam_periph *periph, union ccb *done_ccb)
995targfreeccb(struct targ_softc *softc, union ccb *ccb)
1425{
996{
1426 struct targ_softc *softc;
997 CAM_DEBUG_PRINT(CAM_DEBUG_PERIPH, ("targfreeccb descr %p and\n",
998 ccb->ccb_h.targ_descr));
999 FREE(ccb->ccb_h.targ_descr, M_TARG);
1427
1000
1428 softc = (struct targ_softc *)periph->softc;
1429
1430 if (done_ccb->ccb_h.ccb_flags == TARG_CCB_WAITING) {
1431 /* Caller will release the CCB */
1432 wakeup(&done_ccb->ccb_h.cbfcnp);
1433 return;
1434 }
1435
1436 CAM_DEBUG(periph->path, CAM_DEBUG_PERIPH,
1437 ("targdone %x\n", done_ccb->ccb_h.func_code));
1438
1439 switch (done_ccb->ccb_h.func_code) {
1001 switch (ccb->ccb_h.func_code) {
1440 case XPT_ACCEPT_TARGET_IO:
1002 case XPT_ACCEPT_TARGET_IO:
1441 {
1442 struct ccb_accept_tio *atio;
1443 struct targ_cmd_desc *descr;
1444 struct initiator_state *istate;
1445 u_int8_t *cdb;
1446 int priority;
1447
1448 atio = &done_ccb->atio;
1449 descr = (struct targ_cmd_desc*)atio->ccb_h.ccb_descr;
1450 istate = &softc->istate[atio->init_id];
1451 cdb = atio->cdb_io.cdb_bytes;
1452 if (softc->state == TARG_STATE_TEARDOWN
1453 || atio->ccb_h.status == CAM_REQ_ABORTED) {
1454 freedescr(descr);
1455 free(done_ccb, M_DEVBUF);
1456 return;
1457 }
1458 descr->data_resid = 0;
1459 descr->data_increment = 0;
1460 descr->user_atio = 0;
1461
1462#ifdef CAMDEBUG
1463 {
1464 int i;
1465 char dcb[128];
1466 for (dcb[0] = 0, i = 0; i < atio->cdb_len; i++) {
1467 snprintf(dcb, sizeof dcb,
1468 "%s %02x", dcb, cdb[i] & 0xff);
1469 }
1470 CAM_DEBUG(periph->path, CAM_DEBUG_PERIPH,
1471 ("flags %x cdb:%s\n", atio->ccb_h.flags, dcb));
1472 }
1473#endif
1474 if (atio->sense_len != 0) {
1475 CAM_DEBUG(periph->path, CAM_DEBUG_PERIPH,
1476 ("ATIO with sense_len\n"));
1477
1478 /*
1479 * We had an error in the reception of
1480 * this command. Immediately issue a CA.
1481 */
1482 atio->ccb_h.flags &= ~CAM_DIR_MASK;
1483 atio->ccb_h.flags |= CAM_DIR_NONE;
1484 descr->timeout = 5 * 1000;
1485 descr->status = SCSI_STATUS_CHECK_COND;
1486 copy_sense(softc, istate, (u_int8_t *)&atio->sense_data,
1487 atio->sense_len);
1488 set_ca_condition(periph, atio->init_id, CA_CMD_SENSE);
1489 } else if (istate->pending_ca == 0
1490 && istate->pending_ua != 0
1491 && cdb[0] != INQUIRY) {
1492
1493 CAM_DEBUG(periph->path, CAM_DEBUG_PERIPH,
1494 ("pending_ca %d pending_ua %d\n",
1495 istate->pending_ca, istate->pending_ua));
1496
1497 /* Pending UA, tell initiator */
1498 /* Direction is always relative to the initator */
1499 atio->ccb_h.flags &= ~CAM_DIR_MASK;
1500 atio->ccb_h.flags |= CAM_DIR_NONE;
1501 descr->timeout = 5 * 1000;
1502 descr->status = SCSI_STATUS_CHECK_COND;
1503 fill_sense(softc, atio->init_id,
1504 SSD_CURRENT_ERROR, SSD_KEY_UNIT_ATTENTION,
1505 0x29,
1506 istate->pending_ua == UA_POWER_ON ? 1 : 2);
1507 set_ca_condition(periph, atio->init_id, CA_UNIT_ATTN);
1003 case XPT_IMMED_NOTIFY:
1004 CAM_DEBUG_PRINT(CAM_DEBUG_PERIPH, ("freeing ccb %p\n", ccb));
1005 FREE(ccb, M_TARG);
1006 break;
1007 default:
1008 /* Send back CCB if we got it from the periph */
1009 if (XPT_FC_IS_QUEUED(ccb)) {
1010 CAM_DEBUG_PRINT(CAM_DEBUG_PERIPH,
1011 ("returning queued ccb %p\n", ccb));
1012 xpt_release_ccb(ccb);
1508 } else {
1013 } else {
1509 /*
1510 * Save the current CA and UA status so
1511 * they can be used by this command.
1512 */
1513 ua_types pending_ua;
1514 ca_types pending_ca;
1515
1516 pending_ua = istate->pending_ua;
1517 pending_ca = istate->pending_ca;
1518
1519 /*
1520 * As per the SCSI2 spec, any command that occurs
1521 * after a CA is reported, clears the CA. We must
1522 * also clear the UA condition, if any, that caused
1523 * the CA to occur assuming the UA is not for a
1524 * persistant condition.
1525 */
1526 istate->pending_ca = CA_NONE;
1527 if (pending_ca == CA_UNIT_ATTN)
1528 istate->pending_ua = UA_NONE;
1529
1530 /*
1531 * Determine the type of incoming command and
1532 * setup our buffer for a response.
1533 */
1534 switch (cdb[0]) {
1535 case INQUIRY:
1536 {
1537 struct scsi_inquiry *inq;
1538 struct scsi_sense_data *sense;
1539
1540 inq = (struct scsi_inquiry *)cdb;
1541 sense = &istate->sense_data;
1542 descr->status = SCSI_STATUS_OK;
1543 CAM_DEBUG(periph->path, CAM_DEBUG_PERIPH,
1544 ("Saw an inquiry!\n"));
1545 /*
1546 * Validate the command. We don't
1547 * support any VPD pages, so complain
1548 * if EVPD is set.
1549 */
1550 if ((inq->byte2 & SI_EVPD) != 0
1551 || inq->page_code != 0) {
1552 atio->ccb_h.flags &= ~CAM_DIR_MASK;
1553 atio->ccb_h.flags |= CAM_DIR_NONE;
1554 descr->timeout = 5 * 1000;
1555 descr->status = SCSI_STATUS_CHECK_COND;
1556 fill_sense(softc, atio->init_id,
1557 SSD_CURRENT_ERROR,
1558 SSD_KEY_ILLEGAL_REQUEST,
1559 /*asc*/0x24, /*ascq*/0x00);
1560 sense->extra_len =
1561 offsetof(struct scsi_sense_data,
1562 extra_bytes)
1563 - offsetof(struct scsi_sense_data,
1564 extra_len);
1565 set_ca_condition(periph, atio->init_id,
1566 CA_CMD_SENSE);
1567 }
1568
1569 if ((inq->byte2 & SI_EVPD) != 0) {
1570 sense->sense_key_spec[0] =
1571 SSD_SCS_VALID|SSD_FIELDPTR_CMD
1572 |SSD_BITPTR_VALID| /*bit value*/1;
1573 sense->sense_key_spec[1] = 0;
1574 sense->sense_key_spec[2] =
1575 offsetof(struct scsi_inquiry,
1576 byte2);
1577 } else if (inq->page_code != 0) {
1578 sense->sense_key_spec[0] =
1579 SSD_SCS_VALID|SSD_FIELDPTR_CMD;
1580 sense->sense_key_spec[1] = 0;
1581 sense->sense_key_spec[2] =
1582 offsetof(struct scsi_inquiry,
1583 page_code);
1584 }
1585 if (descr->status == SCSI_STATUS_CHECK_COND)
1586 break;
1587
1588 /*
1589 * Direction is always relative
1590 * to the initator.
1591 */
1592 atio->ccb_h.flags &= ~CAM_DIR_MASK;
1593 atio->ccb_h.flags |= CAM_DIR_IN;
1594 descr->data = softc->inq_data;
1595 descr->data_resid =
1596 MIN(softc->inq_data_len,
1597 SCSI_CDB6_LEN(inq->length));
1598 descr->data_increment = descr->data_resid;
1599 descr->timeout = 5 * 1000;
1600 break;
1601 }
1602 case TEST_UNIT_READY:
1603 atio->ccb_h.flags &= ~CAM_DIR_MASK;
1604 atio->ccb_h.flags |= CAM_DIR_NONE;
1605 descr->timeout = 5 * 1000;
1606 descr->status = SCSI_STATUS_OK;
1607 break;
1608 case REQUEST_SENSE:
1609 {
1610 struct scsi_request_sense *rsense;
1611 struct scsi_sense_data *sense;
1612
1613 rsense = (struct scsi_request_sense *)cdb;
1614 sense = &istate->sense_data;
1615 if (pending_ca == 0) {
1616 fill_sense(softc, atio->init_id,
1617 SSD_CURRENT_ERROR,
1618 SSD_KEY_NO_SENSE, 0x00,
1619 0x00);
1620 CAM_DEBUG(periph->path,
1621 CAM_DEBUG_PERIPH,
1622 ("No pending CA!\n"));
1623 }
1624 /*
1625 * Direction is always relative
1626 * to the initator.
1627 */
1628 atio->ccb_h.flags &= ~CAM_DIR_MASK;
1629 atio->ccb_h.flags |= CAM_DIR_IN;
1630 descr->data = sense;
1631 descr->data_resid =
1632 offsetof(struct scsi_sense_data,
1633 extra_len)
1634 + sense->extra_len;
1635 descr->data_resid =
1636 MIN(descr->data_resid,
1637 SCSI_CDB6_LEN(rsense->length));
1638 descr->data_increment = descr->data_resid;
1639 descr->timeout = 5 * 1000;
1640 descr->status = SCSI_STATUS_OK;
1641 break;
1642 }
1643 case RECEIVE:
1644 case SEND:
1645 if (SID_TYPE(softc->inq_data) == T_PROCESSOR) {
1646 struct scsi_send_receive *sr;
1647
1648 sr = (struct scsi_send_receive *)cdb;
1649
1650 /*
1651 * Direction is always relative
1652 * to the initator.
1653 */
1654 atio->ccb_h.flags &= ~CAM_DIR_MASK;
1655 descr->data_resid = scsi_3btoul(sr->xfer_len);
1656 descr->timeout = 5 * 1000;
1657 descr->status = SCSI_STATUS_OK;
1658 if (cdb[0] == SEND) {
1659 atio->ccb_h.flags |= CAM_DIR_OUT;
1660 CAM_DEBUG(periph->path,
1661 CAM_DEBUG_PERIPH,
1662 ("Saw a SEND!\n"));
1663 atio->ccb_h.flags |= CAM_DIR_OUT;
1664 TAILQ_INSERT_TAIL(&softc->snd_ccb_queue,
1665 &atio->ccb_h,
1666 periph_links.tqe);
1667 selwakeup(&softc->snd_select);
1668 } else {
1669 atio->ccb_h.flags |= CAM_DIR_IN;
1670 CAM_DEBUG(periph->path,
1671 CAM_DEBUG_PERIPH,
1672 ("Saw a RECEIVE!\n"));
1673 TAILQ_INSERT_TAIL(&softc->rcv_ccb_queue,
1674 &atio->ccb_h,
1675 periph_links.tqe);
1676 selwakeup(&softc->rcv_select);
1677 }
1678 /*
1679 * Attempt to satisfy this request with
1680 * a user buffer.
1681 */
1682 targrunqueue(periph, softc);
1683 return;
1684 }
1685 default:
1686 /*
1687 * Queue for consumption by our userland
1688 * counterpart and transition to the exception
1689 * state.
1690 */
1691 descr->data_resid = 0;
1692 descr->data_increment = 0;
1693 descr->user_atio = 1;
1694 TAILQ_INSERT_TAIL(&softc->unknown_atio_queue,
1695 &atio->ccb_h,
1696 periph_links.tqe);
1697 softc->exceptions |= TARG_EXCEPT_UNKNOWN_ATIO;
1698 targfireexception(periph, softc);
1699 return;
1700 }
1014 CAM_DEBUG_PRINT(CAM_DEBUG_PERIPH,
1015 ("freeing ccb %p\n", ccb));
1016 FREE(ccb, M_TARG);
1701 }
1017 }
1702
1703 /* Queue us up to receive a Continue Target I/O ccb. */
1704 if ((atio->ccb_h.flags & CAM_DIS_DISCONNECT) != 0) {
1705 TAILQ_INSERT_HEAD(&softc->work_queue, &atio->ccb_h,
1706 periph_links.tqe);
1707 priority = 0;
1708 } else {
1709 TAILQ_INSERT_TAIL(&softc->work_queue, &atio->ccb_h,
1710 periph_links.tqe);
1711 priority = 1;
1712 }
1713 xpt_schedule(periph, priority);
1714 break;
1715 }
1018 break;
1019 }
1716 case XPT_CONT_TARGET_IO:
1717 {
1718 struct ccb_scsiio *csio;
1719 struct ccb_accept_tio *atio;
1720 struct targ_cmd_desc *desc;
1721 struct bio *bp;
1722 int error, lastctio;
1020}
1723
1021
1724 CAM_DEBUG(periph->path, CAM_DEBUG_PERIPH,
1725 ("Received completed CTIO\n"));
1726 csio = &done_ccb->csio;
1727 atio = (struct ccb_accept_tio*)done_ccb->ccb_h.ccb_atio;
1728 desc = (struct targ_cmd_desc *)atio->ccb_h.ccb_descr;
1022static struct targ_cmd_descr *
1023targgetdescr(struct targ_softc *softc)
1024{
1025 struct targ_cmd_descr *descr;
1729
1026
1730 TAILQ_REMOVE(&softc->pending_queue, &done_ccb->ccb_h,
1731 periph_links.tqe);
1027 MALLOC(descr, struct targ_cmd_descr *, sizeof(*descr), M_TARG,
1028 M_WAITOK);
1029 descr->mapinfo.num_bufs_used = 0;
1030 return (descr);
1031}
1732
1032
1733 if ((done_ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
1734 printf("CCB with error %x\n", done_ccb->ccb_h.status);
1735 error = targerror(done_ccb, 0, 0);
1736 if (error == ERESTART)
1737 break;
1738 /*
1739 * Right now we don't need to do anything
1740 * prior to unfreezing the queue. This may
1741 * change if certain errors are reported while
1742 * we are in a connected state.
1743 */
1744 if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) {
1745 printf("Releasing Queue\n");
1746 cam_release_devq(done_ccb->ccb_h.path,
1747 /*relsim_flags*/0,
1748 /*reduction*/0,
1749 /*timeout*/0,
1750 /*getcount_only*/0);
1751 done_ccb->ccb_h.status &= ~CAM_DEV_QFRZN;
1752 }
1753 } else
1754 error = 0;
1033static void
1034targinit(void)
1035{
1036 mtx_init(&targ_mtx, "targ global", NULL, MTX_DEF);
1037 EVENTHANDLER_REGISTER(dev_clone, targclone, 0, 1000);
1038 cdevsw_add(&targ_cdevsw);
1039}
1755
1040
1756 /*
1757 * If we shipped back sense data when completing
1758 * this command, clear the pending CA for it.
1759 */
1760 if (done_ccb->ccb_h.status & CAM_SENT_SENSE) {
1761 struct initiator_state *istate;
1041static void
1042targclone(void *arg, char *name, int namelen, dev_t *dev)
1043{
1044 int u;
1762
1045
1763 istate = &softc->istate[csio->init_id];
1764 if (istate->pending_ca == CA_UNIT_ATTN)
1765 istate->pending_ua = UA_NONE;
1766 istate->pending_ca = CA_NONE;
1767 softc->istate[csio->init_id].pending_ca = CA_NONE;
1768 done_ccb->ccb_h.status &= ~CAM_SENT_SENSE;
1769 CAM_DEBUG(periph->path, CAM_DEBUG_PERIPH,
1770 ("Sent Sense\n"));
1771 done_ccb->ccb_h.flags &= ~CAM_SEND_SENSE;
1772 }
1046 if (*dev != NODEV)
1047 return;
1048 if (dev_stdclone(name, NULL, "targ", &u) != 1)
1049 return;
1050 *dev = make_dev(&targ_cdevsw, unit2minor(u), UID_ROOT, GID_WHEEL,
1051 0600, "targ%d", u);
1052 (*dev)->si_flags |= SI_CHEAPCLONE;
1053}
1773
1054
1774 if (done_ccb->ccb_h.status & CAM_AUTOSNS_VALID) {
1775 struct initiator_state *istate;
1055static void
1056targasync(void *callback_arg, u_int32_t code, struct cam_path *path, void *arg)
1057{
1058 /* All events are handled in usermode by INOTs */
1059 panic("targasync() called, should be an INOT instead");
1060}
1776
1061
1777 istate = &softc->istate[csio->init_id];
1778 copy_sense(softc, istate, (u_int8_t *)&csio->sense_data,
1779 csio->sense_len);
1780 set_ca_condition(periph, csio->init_id, CA_CMD_SENSE);
1781 done_ccb->ccb_h.status &= ~CAM_AUTOSNS_VALID;
1782 }
1783 /*
1784 * Was this the last CTIO?
1785 */
1786 lastctio = done_ccb->ccb_h.status & CAM_SEND_STATUS;
1062/* Cancel all pending requests and CCBs awaiting work. */
1063static void
1064abort_all_pending(struct targ_softc *softc)
1065{
1066 struct targ_cmd_descr *descr;
1067 struct ccb_abort cab;
1068 struct ccb_hdr *ccb_h;
1787
1069
1788 desc->data_increment -= csio->resid;
1789 desc->data_resid -= desc->data_increment;
1790 if ((bp = desc->bp) != NULL) {
1070 CAM_DEBUG(softc->path, CAM_DEBUG_PERIPH, ("abort_all_pending\n"));
1791
1071
1792 bp->bio_resid -= desc->data_increment;
1793 bp->bio_error = error;
1072 /* First abort the descriptors awaiting resources */
1073 while ((descr = TAILQ_FIRST(&softc->work_queue)) != NULL) {
1074 CAM_DEBUG(softc->path, CAM_DEBUG_PERIPH,
1075 ("Aborting descr from workq %p\n", descr));
1076 TAILQ_REMOVE(&softc->work_queue, descr, tqe);
1077 TAILQ_INSERT_TAIL(&softc->abort_queue, descr, tqe);
1078 }
1794
1079
1795 CAM_DEBUG(periph->path, CAM_DEBUG_PERIPH,
1796 ("Buffer I/O Completed - Resid %ld:%d\n",
1797 bp->bio_resid, desc->data_resid));
1798 /*
1799 * Send the buffer back to the client if
1800 * either the command has completed or all
1801 * buffer space has been consumed.
1802 */
1803 if (desc->data_resid == 0
1804 || bp->bio_resid == 0
1805 || error != 0) {
1806 if (bp->bio_resid != 0)
1807 /* Short transfer */
1808 bp->bio_flags |= BIO_ERROR;
1809
1810 CAM_DEBUG(periph->path, CAM_DEBUG_PERIPH,
1811 ("Completing a buffer\n"));
1812 biodone(bp);
1813 desc->bp = NULL;
1814 }
1080 /*
1081 * Then abort all pending CCBs.
1082 * targdone() will return the aborted CCB via user_ccb_queue
1083 */
1084 xpt_setup_ccb(&cab.ccb_h, softc->path, /*priority*/0);
1085 cab.ccb_h.func_code = XPT_ABORT;
1086 cab.ccb_h.status = CAM_REQ_CMP_ERR;
1087 TAILQ_FOREACH(ccb_h, &softc->pending_ccb_queue, periph_links.tqe) {
1088 CAM_DEBUG(softc->path, CAM_DEBUG_PERIPH,
1089 ("Aborting pending CCB %p\n", ccb_h));
1090 cab.abort_ccb = (union ccb *)ccb_h;
1091 xpt_action((union ccb *)&cab);
1092 if (cab.ccb_h.status != CAM_REQ_CMP) {
1093 xpt_print_path(cab.ccb_h.path);
1094 printf("Unable to abort CCB, status %#x\n",
1095 cab.ccb_h.status);
1815 }
1096 }
1816
1817 if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0)
1818 atio->ccb_h.status |= CAM_DEV_QFRZN;
1819 xpt_release_ccb(done_ccb);
1820 if (softc->state != TARG_STATE_TEARDOWN) {
1821 if (lastctio) {
1822 /*
1823 * Send the original accept TIO back to the
1824 * controller to handle more work.
1825 */
1826 CAM_DEBUG(periph->path, CAM_DEBUG_PERIPH,
1827 ("Returning ATIO to target SIM\n"));
1828 atio->ccb_h.ccb_flags = TARG_CCB_NONE;
1829 xpt_action((union ccb *)atio);
1830 break;
1831 }
1832
1833 if (SID_TYPE(softc->inq_data) == T_PROCESSOR) {
1834 /* Queue us up for another buffer */
1835 if (atio->cdb_io.cdb_bytes[0] == SEND) {
1836 if (desc->bp != NULL)
1837 TAILQ_INSERT_HEAD(&softc->snd_bio_queue.queue,
1838 bp, bio_queue);
1839 TAILQ_INSERT_HEAD(&softc->snd_ccb_queue,
1840 &atio->ccb_h,
1841 periph_links.tqe);
1842 } else {
1843 if (desc->bp != NULL)
1844 TAILQ_INSERT_HEAD(&softc->rcv_bio_queue.queue,
1845 bp, bio_queue);
1846 TAILQ_INSERT_HEAD(&softc->rcv_ccb_queue,
1847 &atio->ccb_h,
1848 periph_links.tqe);
1849 }
1850 desc->bp = NULL;
1851 }
1852 targrunqueue(periph, softc);
1853 } else {
1854 if (desc->bp != NULL) {
1855 bp->bio_flags |= BIO_ERROR;
1856 bp->bio_error = ENXIO;
1857 biodone(bp);
1858 }
1859 freedescr(desc);
1860 free(atio, M_DEVBUF);
1861 }
1862 break;
1863 }
1097 }
1864 case XPT_IMMED_NOTIFY:
1865 {
1866 int frozen;
1867
1098
1868 frozen = (done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0;
1869 if (softc->state == TARG_STATE_TEARDOWN) {
1870 SLIST_REMOVE(&softc->immed_notify_slist,
1871 &done_ccb->ccb_h, ccb_hdr,
1872 periph_links.sle);
1873 free(done_ccb, M_DEVBUF);
1874 } else if (done_ccb->ccb_h.status == CAM_REQ_ABORTED) {
1875 free(done_ccb, M_DEVBUF);
1876 } else {
1877 printf("Saw event %x:%x\n", done_ccb->ccb_h.status,
1878 done_ccb->cin.message_args[0]);
1879 /* Process error condition. */
1880 targinoterror(periph, softc, &done_ccb->cin);
1881
1882 /* Requeue for another immediate event */
1883 xpt_action(done_ccb);
1884 }
1885 if (frozen != 0)
1886 cam_release_devq(periph->path,
1887 /*relsim_flags*/0,
1888 /*opening reduction*/0,
1889 /*timeout*/0,
1890 /*getcount_only*/0);
1891 break;
1099 /* If we aborted at least one pending CCB ok, wait for it. */
1100 if (cab.ccb_h.status == CAM_REQ_CMP) {
1101 msleep(&softc->pending_ccb_queue, &softc->mtx,
1102 PRIBIO | PCATCH, "tgabrt", 0);
1892 }
1103 }
1893 case XPT_DEBUG:
1894 wakeup(&done_ccb->ccb_h.cbfcnp);
1895 break;
1896 default:
1897 panic("targdone: Impossible xpt opcode %x encountered.",
1898 done_ccb->ccb_h.func_code);
1899 /* NOTREACHED */
1900 break;
1901 }
1104
1105 /* If we aborted anything from the work queue, wakeup user. */
1106 if (!TAILQ_EMPTY(&softc->user_ccb_queue)
1107 || !TAILQ_EMPTY(&softc->abort_queue))
1108 notify_user(softc);
1902}
1903
1109}
1110
1904/*
1905 * Transition to the exception state and notify our symbiotic
1906 * userland process of the change.
1907 */
1111/* Notify the user that data is ready */
1908static void
1112static void
1909targfireexception(struct cam_periph *periph, struct targ_softc *softc)
1113notify_user(struct targ_softc *softc)
1910{
1911 /*
1114{
1115 /*
1912 * return all pending buffers with short read/write status so our
1913 * process unblocks, and do a selwakeup on any process queued
1914 * waiting for reads or writes. When the selwakeup is performed,
1915 * the waking process will wakeup, call our poll routine again,
1916 * and pick up the exception.
1116 * Notify users sleeping via poll(), kqueue(), and
1117 * blocking read().
1917 */
1118 */
1918 struct bio *bp;
1919
1920 if (softc->state != TARG_STATE_NORMAL)
1921 /* Already either tearing down or in exception state */
1922 return;
1923
1924 softc->state = TARG_STATE_EXCEPTION;
1925
1926 while ((bp = bioq_first(&softc->snd_bio_queue)) != NULL) {
1927 bioq_remove(&softc->snd_bio_queue, bp);
1928 bp->bio_flags |= BIO_ERROR;
1929 biodone(bp);
1930 }
1931
1932 while ((bp = bioq_first(&softc->rcv_bio_queue)) != NULL) {
1933 bioq_remove(&softc->snd_bio_queue, bp);
1934 bp->bio_flags |= BIO_ERROR;
1935 biodone(bp);
1936 }
1937
1938 selwakeup(&softc->snd_select);
1939 selwakeup(&softc->rcv_select);
1119 selwakeup(&softc->read_select);
1120 KNOTE(&softc->read_select.si_note, 0);
1121 wakeup(&softc->user_ccb_queue);
1940}
1941
1122}
1123
1942static void
1943targinoterror(struct cam_periph *periph, struct targ_softc *softc,
1944 struct ccb_immed_notify *inot)
1124/* Convert CAM status to errno values */
1125static int
1126targcamstatus(cam_status status)
1945{
1127{
1946 cam_status status;
1947 int sense;
1948
1949 status = inot->ccb_h.status;
1950 sense = (status & CAM_AUTOSNS_VALID) != 0;
1951 status &= CAM_STATUS_MASK;
1952 switch (status) {
1953 case CAM_SCSI_BUS_RESET:
1954 set_unit_attention_cond(periph, /*init_id*/CAM_TARGET_WILDCARD,
1955 UA_BUS_RESET);
1956 abort_pending_transactions(periph,
1957 /*init_id*/CAM_TARGET_WILDCARD,
1958 TARG_TAG_WILDCARD, EINTR,
1959 /*to_held_queue*/FALSE);
1960 softc->exceptions |= TARG_EXCEPT_BUS_RESET_SEEN;
1961 targfireexception(periph, softc);
1962 break;
1963 case CAM_BDR_SENT:
1964 set_unit_attention_cond(periph, /*init_id*/CAM_TARGET_WILDCARD,
1965 UA_BDR);
1966 abort_pending_transactions(periph, CAM_TARGET_WILDCARD,
1967 TARG_TAG_WILDCARD, EINTR,
1968 /*to_held_queue*/FALSE);
1969 softc->exceptions |= TARG_EXCEPT_BDR_RECEIVED;
1970 targfireexception(periph, softc);
1971 break;
1972 case CAM_MESSAGE_RECV:
1973 switch (inot->message_args[0]) {
1974 case MSG_INITIATOR_DET_ERR:
1975 break;
1976 case MSG_ABORT:
1977 break;
1978 case MSG_BUS_DEV_RESET:
1979 break;
1980 case MSG_ABORT_TAG:
1981 break;
1982 case MSG_CLEAR_QUEUE:
1983 break;
1984 case MSG_TERM_IO_PROC:
1985 break;
1986 default:
1987 break;
1988 }
1989 break;
1128 switch (status & CAM_STATUS_MASK) {
1129 case CAM_REQ_CMP: /* CCB request completed without error */
1130 return (0);
1131 case CAM_REQ_INPROG: /* CCB request is in progress */
1132 return (EINPROGRESS);
1133 case CAM_REQ_CMP_ERR: /* CCB request completed with an error */
1134 return (EIO);
1135 case CAM_PROVIDE_FAIL: /* Unable to provide requested capability */
1136 return (ENOTTY);
1137 case CAM_FUNC_NOTAVAIL: /* The requested function is not available */
1138 return (ENOTSUP);
1139 case CAM_LUN_ALRDY_ENA: /* LUN is already enabled for target mode */
1140 return (EADDRINUSE);
1141 case CAM_PATH_INVALID: /* Supplied Path ID is invalid */
1142 case CAM_DEV_NOT_THERE: /* SCSI Device Not Installed/there */
1143 return (ENOENT);
1144 case CAM_REQ_ABORTED: /* CCB request aborted by the host */
1145 return (ECANCELED);
1146 case CAM_CMD_TIMEOUT: /* Command timeout */
1147 return (ETIMEDOUT);
1148 case CAM_REQUEUE_REQ: /* Requeue to preserve transaction ordering */
1149 return (EAGAIN);
1150 case CAM_REQ_INVALID: /* CCB request was invalid */
1151 return (EINVAL);
1152 case CAM_RESRC_UNAVAIL: /* Resource Unavailable */
1153 return (ENOMEM);
1154 case CAM_BUSY: /* CAM subsytem is busy */
1155 case CAM_UA_ABORT: /* Unable to abort CCB request */
1156 return (EBUSY);
1990 default:
1157 default:
1991 break;
1158 return (ENXIO);
1992 }
1993}
1994
1159 }
1160}
1161
1995static int
1996targerror(union ccb *ccb, u_int32_t cam_flags, u_int32_t sense_flags)
1162static size_t
1163targccblen(xpt_opcode func_code)
1997{
1164{
1998 struct cam_periph *periph;
1999 struct targ_softc *softc;
2000 struct ccb_scsiio *csio;
2001 struct initiator_state *istate;
2002 cam_status status;
2003 int frozen;
2004 int sense;
2005 int error;
2006 int on_held_queue;
1165 int len;
2007
1166
2008 periph = xpt_path_periph(ccb->ccb_h.path);
2009 softc = (struct targ_softc *)periph->softc;
2010 status = ccb->ccb_h.status;
2011 sense = (status & CAM_AUTOSNS_VALID) != 0;
2012 frozen = (status & CAM_DEV_QFRZN) != 0;
2013 status &= CAM_STATUS_MASK;
2014 on_held_queue = FALSE;
2015 csio = &ccb->csio;
2016 istate = &softc->istate[csio->init_id];
2017 switch (status) {
2018 case CAM_REQ_ABORTED:
2019 if ((ccb->ccb_h.ccb_flags & TARG_CCB_ABORT_TO_HELDQ) != 0) {
2020
2021 /*
2022 * Place this CCB into the initiators
2023 * 'held' queue until the pending CA is cleared.
2024 * If there is no CA pending, reissue immediately.
2025 */
2026 if (istate->pending_ca == 0) {
2027 ccb->ccb_h.ccb_flags = TARG_CCB_NONE;
2028 xpt_action(ccb);
2029 } else {
2030 ccb->ccb_h.ccb_flags = TARG_CCB_HELDQ;
2031 TAILQ_INSERT_TAIL(&softc->pending_queue,
2032 &ccb->ccb_h,
2033 periph_links.tqe);
2034 }
2035 /* The command will be retried at a later time. */
2036 on_held_queue = TRUE;
2037 error = ERESTART;
2038 break;
2039 }
2040 /* FALLTHROUGH */
2041 case CAM_SCSI_BUS_RESET:
2042 case CAM_BDR_SENT:
2043 case CAM_REQ_TERMIO:
2044 case CAM_CMD_TIMEOUT:
2045 /* Assume we did not send any data */
2046 csio->resid = csio->dxfer_len;
2047 error = EIO;
1167 /* Codes we expect to see as a target */
1168 switch (func_code) {
1169 case XPT_CONT_TARGET_IO:
1170 case XPT_SCSI_IO:
1171 len = sizeof(struct ccb_scsiio);
2048 break;
1172 break;
2049 case CAM_SEL_TIMEOUT:
2050 if (ccb->ccb_h.retry_count > 0) {
2051 ccb->ccb_h.retry_count--;
2052 error = ERESTART;
2053 } else {
2054 /* "Select or reselect failure" */
2055 csio->resid = csio->dxfer_len;
2056 fill_sense(softc, csio->init_id, SSD_CURRENT_ERROR,
2057 SSD_KEY_HARDWARE_ERROR, 0x45, 0x00);
2058 set_ca_condition(periph, csio->init_id, CA_CMD_SENSE);
2059 error = EIO;
2060 }
1173 case XPT_ACCEPT_TARGET_IO:
1174 len = sizeof(struct ccb_accept_tio);
2061 break;
1175 break;
2062 case CAM_UNCOR_PARITY:
2063 /* "SCSI parity error" */
2064 fill_sense(softc, csio->init_id, SSD_CURRENT_ERROR,
2065 SSD_KEY_HARDWARE_ERROR, 0x47, 0x00);
2066 set_ca_condition(periph, csio->init_id, CA_CMD_SENSE);
2067 csio->resid = csio->dxfer_len;
2068 error = EIO;
1176 case XPT_IMMED_NOTIFY:
1177 len = sizeof(struct ccb_immed_notify);
2069 break;
1178 break;
2070 case CAM_NO_HBA:
2071 csio->resid = csio->dxfer_len;
2072 error = ENXIO;
1179 case XPT_REL_SIMQ:
1180 len = sizeof(struct ccb_relsim);
2073 break;
1181 break;
2074 case CAM_SEQUENCE_FAIL:
2075 if (sense != 0) {
2076 copy_sense(softc, istate, (u_int8_t *)&csio->sense_data,
2077 csio->sense_len);
2078 set_ca_condition(periph, csio->init_id, CA_CMD_SENSE);
2079 }
2080 csio->resid = csio->dxfer_len;
2081 error = EIO;
1182 case XPT_PATH_INQ:
1183 len = sizeof(struct ccb_pathinq);
2082 break;
1184 break;
2083 case CAM_IDE:
2084 /* "Initiator detected error message received" */
2085 fill_sense(softc, csio->init_id, SSD_CURRENT_ERROR,
2086 SSD_KEY_HARDWARE_ERROR, 0x48, 0x00);
2087 set_ca_condition(periph, csio->init_id, CA_CMD_SENSE);
2088 csio->resid = csio->dxfer_len;
2089 error = EIO;
1185 case XPT_DEBUG:
1186 len = sizeof(struct ccb_debug);
2090 break;
1187 break;
2091 case CAM_REQUEUE_REQ:
2092 printf("Requeue Request!\n");
2093 error = ERESTART;
1188 case XPT_ABORT:
1189 len = sizeof(struct ccb_abort);
2094 break;
1190 break;
1191 case XPT_EN_LUN:
1192 len = sizeof(struct ccb_en_lun);
1193 break;
2095 default:
1194 default:
2096 csio->resid = csio->dxfer_len;
2097 error = EIO;
2098 panic("targerror: Unexpected status %x encounterd", status);
2099 /* NOTREACHED */
1195 len = sizeof(union ccb);
1196 break;
2100 }
2101
1197 }
1198
2102 if (error == ERESTART || error == 0) {
2103 /* Clear the QFRZN flag as we will release the queue */
2104 if (frozen != 0)
2105 ccb->ccb_h.status &= ~CAM_DEV_QFRZN;
2106
2107 if (error == ERESTART && !on_held_queue)
2108 xpt_action(ccb);
2109
2110 if (frozen != 0)
2111 cam_release_devq(ccb->ccb_h.path,
2112 /*relsim_flags*/0,
2113 /*opening reduction*/0,
2114 /*timeout*/0,
2115 /*getcount_only*/0);
2116 }
2117 return (error);
1199 return (len);
2118}
1200}
2119
2120static struct targ_cmd_desc*
2121allocdescr()
2122{
2123 struct targ_cmd_desc* descr;
2124
2125 /* Allocate the targ_descr structure */
2126 descr = (struct targ_cmd_desc *)
2127 malloc(sizeof(*descr), M_DEVBUF, M_NOWAIT);
2128 if (descr == NULL)
2129 return (NULL);
2130
2131 bzero(descr, sizeof(*descr));
2132
2133 /* Allocate buffer backing store */
2134 descr->backing_store = malloc(MAX_BUF_SIZE, M_DEVBUF, M_NOWAIT);
2135 if (descr->backing_store == NULL) {
2136 free(descr, M_DEVBUF);
2137 return (NULL);
2138 }
2139 descr->max_size = MAX_BUF_SIZE;
2140 return (descr);
2141}
2142
2143static void
2144freedescr(struct targ_cmd_desc *descr)
2145{
2146 free(descr->backing_store, M_DEVBUF);
2147 free(descr, M_DEVBUF);
2148}
2149
2150static void
2151fill_sense(struct targ_softc *softc, u_int initiator_id, u_int error_code,
2152 u_int sense_key, u_int asc, u_int ascq)
2153{
2154 struct initiator_state *istate;
2155 struct scsi_sense_data *sense;
2156
2157 istate = &softc->istate[initiator_id];
2158 sense = &istate->sense_data;
2159 bzero(sense, sizeof(*sense));
2160 sense->error_code = error_code;
2161 sense->flags = sense_key;
2162 sense->add_sense_code = asc;
2163 sense->add_sense_code_qual = ascq;
2164
2165 sense->extra_len = offsetof(struct scsi_sense_data, fru)
2166 - offsetof(struct scsi_sense_data, extra_len);
2167}
2168
2169static void
2170copy_sense(struct targ_softc *softc, struct initiator_state *istate,
2171 u_int8_t *sense_buffer, size_t sense_len)
2172{
2173 struct scsi_sense_data *sense;
2174 size_t copylen;
2175
2176 sense = &istate->sense_data;
2177 copylen = sizeof(*sense);
2178 if (copylen > sense_len)
2179 copylen = sense_len;
2180 bcopy(sense_buffer, sense, copylen);
2181}
2182
2183static void
2184set_unit_attention_cond(struct cam_periph *periph,
2185 u_int initiator_id, ua_types ua)
2186{
2187 int start;
2188 int end;
2189 struct targ_softc *softc;
2190
2191 softc = (struct targ_softc *)periph->softc;
2192 if (initiator_id == CAM_TARGET_WILDCARD) {
2193 start = 0;
2194 end = MAX_INITIATORS - 1;
2195 } else
2196 start = end = initiator_id;
2197
2198 while (start <= end) {
2199 softc->istate[start].pending_ua = ua;
2200 start++;
2201 }
2202}
2203
2204static void
2205set_ca_condition(struct cam_periph *periph, u_int initiator_id, ca_types ca)
2206{
2207 struct targ_softc *softc;
2208
2209 softc = (struct targ_softc *)periph->softc;
2210 softc->istate[initiator_id].pending_ca = ca;
2211 abort_pending_transactions(periph, initiator_id, TARG_TAG_WILDCARD,
2212 /*errno*/0, /*to_held_queue*/TRUE);
2213}
2214
2215static void
2216abort_pending_transactions(struct cam_periph *periph, u_int initiator_id,
2217 u_int tag_id, int errno, int to_held_queue)
2218{
2219 struct ccb_abort cab;
2220 struct ccb_queue *atio_queues[3];
2221 struct targ_softc *softc;
2222 struct ccb_hdr *ccbh;
2223 u_int i;
2224
2225 softc = (struct targ_softc *)periph->softc;
2226
2227 atio_queues[0] = &softc->work_queue;
2228 atio_queues[1] = &softc->snd_ccb_queue;
2229 atio_queues[2] = &softc->rcv_ccb_queue;
2230
2231 /* First address the ATIOs awaiting resources */
2232 for (i = 0; i < (sizeof(atio_queues) / sizeof(*atio_queues)); i++) {
2233 struct ccb_queue *atio_queue;
2234
2235 if (to_held_queue) {
2236 /*
2237 * The device queue is frozen anyway, so there
2238 * is nothing for us to do.
2239 */
2240 continue;
2241 }
2242 atio_queue = atio_queues[i];
2243 ccbh = TAILQ_FIRST(atio_queue);
2244 while (ccbh != NULL) {
2245 struct ccb_accept_tio *atio;
2246 struct targ_cmd_desc *desc;
2247
2248 atio = (struct ccb_accept_tio *)ccbh;
2249 desc = (struct targ_cmd_desc *)atio->ccb_h.ccb_descr;
2250 ccbh = TAILQ_NEXT(ccbh, periph_links.tqe);
2251
2252 /* Only abort the CCBs that match */
2253 if ((atio->init_id != initiator_id
2254 && initiator_id != CAM_TARGET_WILDCARD)
2255 || (tag_id != TARG_TAG_WILDCARD
2256 && ((atio->ccb_h.flags & CAM_TAG_ACTION_VALID) == 0
2257 || atio->tag_id != tag_id)))
2258 continue;
2259
2260 TAILQ_REMOVE(atio_queue, &atio->ccb_h,
2261 periph_links.tqe);
2262
2263 CAM_DEBUG(periph->path, CAM_DEBUG_PERIPH,
2264 ("Aborting ATIO\n"));
2265 if (desc->bp != NULL) {
2266 desc->bp->bio_flags |= BIO_ERROR;
2267 if (softc->state != TARG_STATE_TEARDOWN)
2268 desc->bp->bio_error = errno;
2269 else
2270 desc->bp->bio_error = ENXIO;
2271 biodone(desc->bp);
2272 desc->bp = NULL;
2273 }
2274 if (softc->state == TARG_STATE_TEARDOWN) {
2275 freedescr(desc);
2276 free(atio, M_DEVBUF);
2277 } else {
2278 /* Return the ATIO back to the controller */
2279 atio->ccb_h.ccb_flags = TARG_CCB_NONE;
2280 xpt_action((union ccb *)atio);
2281 }
2282 }
2283 }
2284
2285 ccbh = TAILQ_FIRST(&softc->pending_queue);
2286 while (ccbh != NULL) {
2287 struct ccb_scsiio *csio;
2288
2289 csio = (struct ccb_scsiio *)ccbh;
2290 ccbh = TAILQ_NEXT(ccbh, periph_links.tqe);
2291
2292 /* Only abort the CCBs that match */
2293 if ((csio->init_id != initiator_id
2294 && initiator_id != CAM_TARGET_WILDCARD)
2295 || (tag_id != TARG_TAG_WILDCARD
2296 && ((csio->ccb_h.flags & CAM_TAG_ACTION_VALID) == 0
2297 || csio->tag_id != tag_id)))
2298 continue;
2299
2300 CAM_DEBUG(periph->path, CAM_DEBUG_PERIPH,
2301 ("Aborting CTIO\n"));
2302
2303 TAILQ_REMOVE(&softc->pending_queue, &csio->ccb_h,
2304 periph_links.tqe);
2305
2306 if (to_held_queue != 0)
2307 csio->ccb_h.ccb_flags |= TARG_CCB_ABORT_TO_HELDQ;
2308 xpt_setup_ccb(&cab.ccb_h, csio->ccb_h.path, /*priority*/1);
2309 cab.abort_ccb = (union ccb *)csio;
2310 xpt_action((union ccb *)&cab);
2311 if (cab.ccb_h.status != CAM_REQ_CMP) {
2312 xpt_print_path(cab.ccb_h.path);
2313 printf("Unable to abort CCB. Status %x\n",
2314 cab.ccb_h.status);
2315 }
2316 }
2317}