Deleted Added
full compact
1/*-
2 * Copyright (c) 2003-2009 Silicon Graphics International Corp.
3 * Copyright (c) 2012 The FreeBSD Foundation
4 * Copyright (c) 2015 Alexander Motin <mav@FreeBSD.org>
5 * All rights reserved.
6 *
7 * Portions of this software were developed by Edward Tomasz Napierala
8 * under sponsorship from the FreeBSD Foundation.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions, and the following disclaimer,
15 * without modification.
16 * 2. Redistributions in binary form must reproduce at minimum a disclaimer
17 * substantially similar to the "NO WARRANTY" disclaimer below
18 * ("Disclaimer") and any redistribution must be conditioned upon
19 * including a substantially similar Disclaimer requirement for further
20 * binary redistribution.
21 *
22 * NO WARRANTY
23 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
24 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
25 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
26 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
27 * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
28 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
29 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
30 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
31 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
32 * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
33 * POSSIBILITY OF SUCH DAMAGES.
34 *
35 * $Id$
36 */
37/*
38 * CAM Target Layer, a SCSI device emulation subsystem.
39 *
40 * Author: Ken Merry <ken@FreeBSD.org>
41 */
42
43#define _CTL_C
44
45#include <sys/cdefs.h>
45__FBSDID("$FreeBSD: stable/10/sys/cam/ctl/ctl.c 288731 2015-10-05 08:55:59Z mav $");
46__FBSDID("$FreeBSD: stable/10/sys/cam/ctl/ctl.c 288732 2015-10-05 08:57:16Z mav $");
47
48#include <sys/param.h>
49#include <sys/systm.h>
50#include <sys/ctype.h>
51#include <sys/kernel.h>
52#include <sys/types.h>
53#include <sys/kthread.h>
54#include <sys/bio.h>
55#include <sys/fcntl.h>
56#include <sys/lock.h>
57#include <sys/module.h>
58#include <sys/mutex.h>
59#include <sys/condvar.h>
60#include <sys/malloc.h>
61#include <sys/conf.h>
62#include <sys/ioccom.h>
63#include <sys/queue.h>
64#include <sys/sbuf.h>
65#include <sys/smp.h>
66#include <sys/endian.h>
67#include <sys/sysctl.h>
68#include <vm/uma.h>
69
70#include <cam/cam.h>
71#include <cam/scsi/scsi_all.h>
72#include <cam/scsi/scsi_da.h>
73#include <cam/ctl/ctl_io.h>
74#include <cam/ctl/ctl.h>
75#include <cam/ctl/ctl_frontend.h>
76#include <cam/ctl/ctl_util.h>
77#include <cam/ctl/ctl_backend.h>
78#include <cam/ctl/ctl_ioctl.h>
79#include <cam/ctl/ctl_ha.h>
80#include <cam/ctl/ctl_private.h>
81#include <cam/ctl/ctl_debug.h>
82#include <cam/ctl/ctl_scsi_all.h>
83#include <cam/ctl/ctl_error.h>
84
85struct ctl_softc *control_softc = NULL;
86
87/*
87 * Size and alignment macros needed for Copan-specific HA hardware. These
88 * can go away when the HA code is re-written, and uses busdma for any
89 * hardware.
90 */
91#define CTL_ALIGN_8B(target, source, type) \
92 if (((uint32_t)source & 0x7) != 0) \
93 target = (type)(source + (0x8 - ((uint32_t)source & 0x7)));\
94 else \
95 target = (type)source;
96
97#define CTL_SIZE_8B(target, size) \
98 if ((size & 0x7) != 0) \
99 target = size + (0x8 - (size & 0x7)); \
100 else \
101 target = size;
102
103#define CTL_ALIGN_8B_MARGIN 16
104
105/*
88 * Template mode pages.
89 */
90
91/*
92 * Note that these are default values only. The actual values will be
93 * filled in when the user does a mode sense.
94 */
95const static struct copan_debugconf_subpage debugconf_page_default = {
96 DBGCNF_PAGE_CODE | SMPH_SPF, /* page_code */
97 DBGCNF_SUBPAGE_CODE, /* subpage */
98 {(sizeof(struct copan_debugconf_subpage) - 4) >> 8,
99 (sizeof(struct copan_debugconf_subpage) - 4) >> 0}, /* page_length */
100 DBGCNF_VERSION, /* page_version */
101 {CTL_TIME_IO_DEFAULT_SECS>>8,
102 CTL_TIME_IO_DEFAULT_SECS>>0}, /* ctl_time_io_secs */
103};
104
105const static struct copan_debugconf_subpage debugconf_page_changeable = {
106 DBGCNF_PAGE_CODE | SMPH_SPF, /* page_code */
107 DBGCNF_SUBPAGE_CODE, /* subpage */
108 {(sizeof(struct copan_debugconf_subpage) - 4) >> 8,
109 (sizeof(struct copan_debugconf_subpage) - 4) >> 0}, /* page_length */
110 0, /* page_version */
111 {0xff,0xff}, /* ctl_time_io_secs */
112};
113
114const static struct scsi_da_rw_recovery_page rw_er_page_default = {
115 /*page_code*/SMS_RW_ERROR_RECOVERY_PAGE,
116 /*page_length*/sizeof(struct scsi_da_rw_recovery_page) - 2,
117 /*byte3*/SMS_RWER_AWRE|SMS_RWER_ARRE,
118 /*read_retry_count*/0,
119 /*correction_span*/0,
120 /*head_offset_count*/0,
121 /*data_strobe_offset_cnt*/0,
122 /*byte8*/SMS_RWER_LBPERE,
123 /*write_retry_count*/0,
124 /*reserved2*/0,
125 /*recovery_time_limit*/{0, 0},
126};
127
128const static struct scsi_da_rw_recovery_page rw_er_page_changeable = {
129 /*page_code*/SMS_RW_ERROR_RECOVERY_PAGE,
130 /*page_length*/sizeof(struct scsi_da_rw_recovery_page) - 2,
131 /*byte3*/0,
132 /*read_retry_count*/0,
133 /*correction_span*/0,
134 /*head_offset_count*/0,
135 /*data_strobe_offset_cnt*/0,
136 /*byte8*/0,
137 /*write_retry_count*/0,
138 /*reserved2*/0,
139 /*recovery_time_limit*/{0, 0},
140};
141
142const static struct scsi_format_page format_page_default = {
143 /*page_code*/SMS_FORMAT_DEVICE_PAGE,
144 /*page_length*/sizeof(struct scsi_format_page) - 2,
145 /*tracks_per_zone*/ {0, 0},
146 /*alt_sectors_per_zone*/ {0, 0},
147 /*alt_tracks_per_zone*/ {0, 0},
148 /*alt_tracks_per_lun*/ {0, 0},
149 /*sectors_per_track*/ {(CTL_DEFAULT_SECTORS_PER_TRACK >> 8) & 0xff,
150 CTL_DEFAULT_SECTORS_PER_TRACK & 0xff},
151 /*bytes_per_sector*/ {0, 0},
152 /*interleave*/ {0, 0},
153 /*track_skew*/ {0, 0},
154 /*cylinder_skew*/ {0, 0},
155 /*flags*/ SFP_HSEC,
156 /*reserved*/ {0, 0, 0}
157};
158
159const static struct scsi_format_page format_page_changeable = {
160 /*page_code*/SMS_FORMAT_DEVICE_PAGE,
161 /*page_length*/sizeof(struct scsi_format_page) - 2,
162 /*tracks_per_zone*/ {0, 0},
163 /*alt_sectors_per_zone*/ {0, 0},
164 /*alt_tracks_per_zone*/ {0, 0},
165 /*alt_tracks_per_lun*/ {0, 0},
166 /*sectors_per_track*/ {0, 0},
167 /*bytes_per_sector*/ {0, 0},
168 /*interleave*/ {0, 0},
169 /*track_skew*/ {0, 0},
170 /*cylinder_skew*/ {0, 0},
171 /*flags*/ 0,
172 /*reserved*/ {0, 0, 0}
173};
174
175const static struct scsi_rigid_disk_page rigid_disk_page_default = {
176 /*page_code*/SMS_RIGID_DISK_PAGE,
177 /*page_length*/sizeof(struct scsi_rigid_disk_page) - 2,
178 /*cylinders*/ {0, 0, 0},
179 /*heads*/ CTL_DEFAULT_HEADS,
180 /*start_write_precomp*/ {0, 0, 0},
181 /*start_reduced_current*/ {0, 0, 0},
182 /*step_rate*/ {0, 0},
183 /*landing_zone_cylinder*/ {0, 0, 0},
184 /*rpl*/ SRDP_RPL_DISABLED,
185 /*rotational_offset*/ 0,
186 /*reserved1*/ 0,
187 /*rotation_rate*/ {(CTL_DEFAULT_ROTATION_RATE >> 8) & 0xff,
188 CTL_DEFAULT_ROTATION_RATE & 0xff},
189 /*reserved2*/ {0, 0}
190};
191
192const static struct scsi_rigid_disk_page rigid_disk_page_changeable = {
193 /*page_code*/SMS_RIGID_DISK_PAGE,
194 /*page_length*/sizeof(struct scsi_rigid_disk_page) - 2,
195 /*cylinders*/ {0, 0, 0},
196 /*heads*/ 0,
197 /*start_write_precomp*/ {0, 0, 0},
198 /*start_reduced_current*/ {0, 0, 0},
199 /*step_rate*/ {0, 0},
200 /*landing_zone_cylinder*/ {0, 0, 0},
201 /*rpl*/ 0,
202 /*rotational_offset*/ 0,
203 /*reserved1*/ 0,
204 /*rotation_rate*/ {0, 0},
205 /*reserved2*/ {0, 0}
206};
207
208const static struct scsi_caching_page caching_page_default = {
209 /*page_code*/SMS_CACHING_PAGE,
210 /*page_length*/sizeof(struct scsi_caching_page) - 2,
211 /*flags1*/ SCP_DISC | SCP_WCE,
212 /*ret_priority*/ 0,
213 /*disable_pf_transfer_len*/ {0xff, 0xff},
214 /*min_prefetch*/ {0, 0},
215 /*max_prefetch*/ {0xff, 0xff},
216 /*max_pf_ceiling*/ {0xff, 0xff},
217 /*flags2*/ 0,
218 /*cache_segments*/ 0,
219 /*cache_seg_size*/ {0, 0},
220 /*reserved*/ 0,
221 /*non_cache_seg_size*/ {0, 0, 0}
222};
223
224const static struct scsi_caching_page caching_page_changeable = {
225 /*page_code*/SMS_CACHING_PAGE,
226 /*page_length*/sizeof(struct scsi_caching_page) - 2,
227 /*flags1*/ SCP_WCE | SCP_RCD,
228 /*ret_priority*/ 0,
229 /*disable_pf_transfer_len*/ {0, 0},
230 /*min_prefetch*/ {0, 0},
231 /*max_prefetch*/ {0, 0},
232 /*max_pf_ceiling*/ {0, 0},
233 /*flags2*/ 0,
234 /*cache_segments*/ 0,
235 /*cache_seg_size*/ {0, 0},
236 /*reserved*/ 0,
237 /*non_cache_seg_size*/ {0, 0, 0}
238};
239
240const static struct scsi_control_page control_page_default = {
241 /*page_code*/SMS_CONTROL_MODE_PAGE,
242 /*page_length*/sizeof(struct scsi_control_page) - 2,
243 /*rlec*/0,
244 /*queue_flags*/SCP_QUEUE_ALG_RESTRICTED,
245 /*eca_and_aen*/0,
246 /*flags4*/SCP_TAS,
247 /*aen_holdoff_period*/{0, 0},
248 /*busy_timeout_period*/{0, 0},
249 /*extended_selftest_completion_time*/{0, 0}
250};
251
252const static struct scsi_control_page control_page_changeable = {
253 /*page_code*/SMS_CONTROL_MODE_PAGE,
254 /*page_length*/sizeof(struct scsi_control_page) - 2,
255 /*rlec*/SCP_DSENSE,
256 /*queue_flags*/SCP_QUEUE_ALG_MASK,
257 /*eca_and_aen*/SCP_SWP,
258 /*flags4*/0,
259 /*aen_holdoff_period*/{0, 0},
260 /*busy_timeout_period*/{0, 0},
261 /*extended_selftest_completion_time*/{0, 0}
262};
263
264const static struct scsi_info_exceptions_page ie_page_default = {
265 /*page_code*/SMS_INFO_EXCEPTIONS_PAGE,
266 /*page_length*/sizeof(struct scsi_info_exceptions_page) - 2,
267 /*info_flags*/SIEP_FLAGS_DEXCPT,
268 /*mrie*/0,
269 /*interval_timer*/{0, 0, 0, 0},
270 /*report_count*/{0, 0, 0, 0}
271};
272
273const static struct scsi_info_exceptions_page ie_page_changeable = {
274 /*page_code*/SMS_INFO_EXCEPTIONS_PAGE,
275 /*page_length*/sizeof(struct scsi_info_exceptions_page) - 2,
276 /*info_flags*/0,
277 /*mrie*/0,
278 /*interval_timer*/{0, 0, 0, 0},
279 /*report_count*/{0, 0, 0, 0}
280};
281
282#define CTL_LBPM_LEN (sizeof(struct ctl_logical_block_provisioning_page) - 4)
283
284const static struct ctl_logical_block_provisioning_page lbp_page_default = {{
285 /*page_code*/SMS_INFO_EXCEPTIONS_PAGE | SMPH_SPF,
286 /*subpage_code*/0x02,
287 /*page_length*/{CTL_LBPM_LEN >> 8, CTL_LBPM_LEN},
288 /*flags*/0,
289 /*reserved*/{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
290 /*descr*/{}},
291 {{/*flags*/0,
292 /*resource*/0x01,
293 /*reserved*/{0, 0},
294 /*count*/{0, 0, 0, 0}},
295 {/*flags*/0,
296 /*resource*/0x02,
297 /*reserved*/{0, 0},
298 /*count*/{0, 0, 0, 0}},
299 {/*flags*/0,
300 /*resource*/0xf1,
301 /*reserved*/{0, 0},
302 /*count*/{0, 0, 0, 0}},
303 {/*flags*/0,
304 /*resource*/0xf2,
305 /*reserved*/{0, 0},
306 /*count*/{0, 0, 0, 0}}
307 }
308};
309
310const static struct ctl_logical_block_provisioning_page lbp_page_changeable = {{
311 /*page_code*/SMS_INFO_EXCEPTIONS_PAGE | SMPH_SPF,
312 /*subpage_code*/0x02,
313 /*page_length*/{CTL_LBPM_LEN >> 8, CTL_LBPM_LEN},
314 /*flags*/0,
315 /*reserved*/{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
316 /*descr*/{}},
317 {{/*flags*/0,
318 /*resource*/0,
319 /*reserved*/{0, 0},
320 /*count*/{0, 0, 0, 0}},
321 {/*flags*/0,
322 /*resource*/0,
323 /*reserved*/{0, 0},
324 /*count*/{0, 0, 0, 0}},
325 {/*flags*/0,
326 /*resource*/0,
327 /*reserved*/{0, 0},
328 /*count*/{0, 0, 0, 0}},
329 {/*flags*/0,
330 /*resource*/0,
331 /*reserved*/{0, 0},
332 /*count*/{0, 0, 0, 0}}
333 }
334};
335
354/*
355 * XXX KDM move these into the softc.
356 */
357static int rcv_sync_msg;
358static uint8_t ctl_pause_rtr;
359
336SYSCTL_NODE(_kern_cam, OID_AUTO, ctl, CTLFLAG_RD, 0, "CAM Target Layer");
337static int worker_threads = -1;
338TUNABLE_INT("kern.cam.ctl.worker_threads", &worker_threads);
339SYSCTL_INT(_kern_cam_ctl, OID_AUTO, worker_threads, CTLFLAG_RDTUN,
340 &worker_threads, 1, "Number of worker threads");
341static int ctl_debug = CTL_DEBUG_NONE;
342TUNABLE_INT("kern.cam.ctl.debug", &ctl_debug);
343SYSCTL_INT(_kern_cam_ctl, OID_AUTO, debug, CTLFLAG_RWTUN,
344 &ctl_debug, 0, "Enabled debug flags");
345
346/*
347 * Supported pages (0x00), Serial number (0x80), Device ID (0x83),
348 * Extended INQUIRY Data (0x86), Mode Page Policy (0x87),
349 * SCSI Ports (0x88), Third-party Copy (0x8F), Block limits (0xB0),
350 * Block Device Characteristics (0xB1) and Logical Block Provisioning (0xB2)
351 */
352#define SCSI_EVPD_NUM_SUPPORTED_PAGES 10
353
378#ifdef notyet
354static void ctl_isc_event_handler(ctl_ha_channel chanel, ctl_ha_event event,
355 int param);
356static void ctl_copy_sense_data(union ctl_ha_msg *src, union ctl_io *dest);
382#endif
357static void ctl_copy_sense_data_back(union ctl_io *src, union ctl_ha_msg *dest);
358static int ctl_init(void);
359void ctl_shutdown(void);
360static int ctl_open(struct cdev *dev, int flags, int fmt, struct thread *td);
361static int ctl_close(struct cdev *dev, int flags, int fmt, struct thread *td);
362static int ctl_serialize_other_sc_cmd(struct ctl_scsiio *ctsio);
363static int ctl_ioctl_fill_ooa(struct ctl_lun *lun, uint32_t *cur_fill_num,
364 struct ctl_ooa *ooa_hdr,
365 struct ctl_ooa_entry *kern_entries);
366static int ctl_ioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flag,
367 struct thread *td);
368static int ctl_alloc_lun(struct ctl_softc *ctl_softc, struct ctl_lun *lun,
369 struct ctl_be_lun *be_lun);
370static int ctl_free_lun(struct ctl_lun *lun);
371static void ctl_create_lun(struct ctl_be_lun *be_lun);
372static struct ctl_port * ctl_io_port(struct ctl_io_hdr *io_hdr);
398/**
399static void ctl_failover_change_pages(struct ctl_softc *softc,
400 struct ctl_scsiio *ctsio, int master);
401**/
373
374static int ctl_do_mode_select(union ctl_io *io);
375static int ctl_pro_preempt(struct ctl_softc *softc, struct ctl_lun *lun,
376 uint64_t res_key, uint64_t sa_res_key,
377 uint8_t type, uint32_t residx,
378 struct ctl_scsiio *ctsio,
379 struct scsi_per_res_out *cdb,
380 struct scsi_per_res_out_parms* param);
381static void ctl_pro_preempt_other(struct ctl_lun *lun,
382 union ctl_ha_msg *msg);
383static void ctl_hndl_per_res_out_on_other_sc(union ctl_ha_msg *msg);
384static int ctl_inquiry_evpd_supported(struct ctl_scsiio *ctsio, int alloc_len);
385static int ctl_inquiry_evpd_serial(struct ctl_scsiio *ctsio, int alloc_len);
386static int ctl_inquiry_evpd_devid(struct ctl_scsiio *ctsio, int alloc_len);
387static int ctl_inquiry_evpd_eid(struct ctl_scsiio *ctsio, int alloc_len);
388static int ctl_inquiry_evpd_mpp(struct ctl_scsiio *ctsio, int alloc_len);
389static int ctl_inquiry_evpd_scsi_ports(struct ctl_scsiio *ctsio,
390 int alloc_len);
391static int ctl_inquiry_evpd_block_limits(struct ctl_scsiio *ctsio,
392 int alloc_len);
393static int ctl_inquiry_evpd_bdc(struct ctl_scsiio *ctsio, int alloc_len);
394static int ctl_inquiry_evpd_lbp(struct ctl_scsiio *ctsio, int alloc_len);
395static int ctl_inquiry_evpd(struct ctl_scsiio *ctsio);
396static int ctl_inquiry_std(struct ctl_scsiio *ctsio);
397static int ctl_get_lba_len(union ctl_io *io, uint64_t *lba, uint64_t *len);
398static ctl_action ctl_extent_check(union ctl_io *io1, union ctl_io *io2,
399 bool seq);
400static ctl_action ctl_extent_check_seq(union ctl_io *io1, union ctl_io *io2);
401static ctl_action ctl_check_for_blockage(struct ctl_lun *lun,
402 union ctl_io *pending_io, union ctl_io *ooa_io);
403static ctl_action ctl_check_ooa(struct ctl_lun *lun, union ctl_io *pending_io,
404 union ctl_io *starting_io);
405static int ctl_check_blocked(struct ctl_lun *lun);
406static int ctl_scsiio_lun_check(struct ctl_lun *lun,
407 const struct ctl_cmd_entry *entry,
408 struct ctl_scsiio *ctsio);
438//static int ctl_check_rtr(union ctl_io *pending_io, struct ctl_softc *softc);
439#ifdef notyet
440static void ctl_failover(void);
441#endif
409static void ctl_failover_lun(struct ctl_lun *lun);
410static void ctl_est_ua(struct ctl_lun *lun, uint32_t initidx, ctl_ua_type ua);
411static void ctl_est_ua_all(struct ctl_lun *lun, uint32_t except, ctl_ua_type ua);
412static void ctl_clr_ua(struct ctl_lun *lun, uint32_t initidx, ctl_ua_type ua);
413static void ctl_clr_ua_all(struct ctl_lun *lun, uint32_t except, ctl_ua_type ua);
414static void ctl_clr_ua_allluns(struct ctl_softc *ctl_softc, uint32_t initidx,
415 ctl_ua_type ua_type);
416static int ctl_scsiio_precheck(struct ctl_softc *ctl_softc,
417 struct ctl_scsiio *ctsio);
418static int ctl_scsiio(struct ctl_scsiio *ctsio);
419
420static int ctl_bus_reset(struct ctl_softc *ctl_softc, union ctl_io *io);
421static int ctl_target_reset(struct ctl_softc *ctl_softc, union ctl_io *io,
422 ctl_ua_type ua_type);
423static int ctl_lun_reset(struct ctl_lun *lun, union ctl_io *io,
424 ctl_ua_type ua_type);
425static int ctl_abort_task(union ctl_io *io);
426static int ctl_abort_task_set(union ctl_io *io);
427static int ctl_i_t_nexus_reset(union ctl_io *io);
428static void ctl_run_task(union ctl_io *io);
429#ifdef CTL_IO_DELAY
430static void ctl_datamove_timer_wakeup(void *arg);
431static void ctl_done_timer_wakeup(void *arg);
432#endif /* CTL_IO_DELAY */
433
434static void ctl_send_datamove_done(union ctl_io *io, int have_lock);
435static void ctl_datamove_remote_write_cb(struct ctl_ha_dt_req *rq);
436static int ctl_datamove_remote_dm_write_cb(union ctl_io *io);
437static void ctl_datamove_remote_write(union ctl_io *io);
438static int ctl_datamove_remote_dm_read_cb(union ctl_io *io);
439static void ctl_datamove_remote_read_cb(struct ctl_ha_dt_req *rq);
440static int ctl_datamove_remote_sgl_setup(union ctl_io *io);
441static int ctl_datamove_remote_xfer(union ctl_io *io, unsigned command,
442 ctl_ha_dt_cb callback);
443static void ctl_datamove_remote_read(union ctl_io *io);
444static void ctl_datamove_remote(union ctl_io *io);
445static int ctl_process_done(union ctl_io *io);
446static void ctl_lun_thread(void *arg);
447static void ctl_thresh_thread(void *arg);
448static void ctl_work_thread(void *arg);
449static void ctl_enqueue_incoming(union ctl_io *io);
450static void ctl_enqueue_rtr(union ctl_io *io);
451static void ctl_enqueue_done(union ctl_io *io);
480#ifdef notyet
452static void ctl_enqueue_isc(union ctl_io *io);
482#endif
453static const struct ctl_cmd_entry *
454 ctl_get_cmd_entry(struct ctl_scsiio *ctsio, int *sa);
455static const struct ctl_cmd_entry *
456 ctl_validate_command(struct ctl_scsiio *ctsio);
457static int ctl_cmd_applicable(uint8_t lun_type,
458 const struct ctl_cmd_entry *entry);
459
460static uint64_t ctl_get_prkey(struct ctl_lun *lun, uint32_t residx);
461static void ctl_clr_prkey(struct ctl_lun *lun, uint32_t residx);
462static void ctl_alloc_prkey(struct ctl_lun *lun, uint32_t residx);
463static void ctl_set_prkey(struct ctl_lun *lun, uint32_t residx, uint64_t key);
464
465/*
466 * Load the serialization table. This isn't very pretty, but is probably
467 * the easiest way to do it.
468 */
469#include "ctl_ser_table.c"
470
471/*
472 * We only need to define open, close and ioctl routines for this driver.
473 */
474static struct cdevsw ctl_cdevsw = {
475 .d_version = D_VERSION,
476 .d_flags = 0,
477 .d_open = ctl_open,
478 .d_close = ctl_close,
479 .d_ioctl = ctl_ioctl,
480 .d_name = "ctl",
481};
482
483
484MALLOC_DEFINE(M_CTL, "ctlmem", "Memory used for CTL");
485
486static int ctl_module_event_handler(module_t, int /*modeventtype_t*/, void *);
487
488static moduledata_t ctl_moduledata = {
489 "ctl",
490 ctl_module_event_handler,
491 NULL
492};
493
494DECLARE_MODULE(ctl, ctl_moduledata, SI_SUB_CONFIGURE, SI_ORDER_THIRD);
495MODULE_VERSION(ctl, 1);
496
522#ifdef notyet
497static struct ctl_frontend ha_frontend =
498{
499 .name = "ha",
500};
501
502static void
503ctl_isc_handler_finish_xfer(struct ctl_softc *ctl_softc,
504 union ctl_ha_msg *msg_info)
505{
506 struct ctl_scsiio *ctsio;
507
508 if (msg_info->hdr.original_sc == NULL) {
509 printf("%s: original_sc == NULL!\n", __func__);
510 /* XXX KDM now what? */
511 return;
512 }
513
514 ctsio = &msg_info->hdr.original_sc->scsiio;
515 ctsio->io_hdr.flags |= CTL_FLAG_IO_ACTIVE;
516 ctsio->io_hdr.msg_type = CTL_MSG_FINISH_IO;
517 ctsio->io_hdr.status = msg_info->hdr.status;
518 ctsio->scsi_status = msg_info->scsi.scsi_status;
519 ctsio->sense_len = msg_info->scsi.sense_len;
520 ctsio->sense_residual = msg_info->scsi.sense_residual;
521 ctsio->residual = msg_info->scsi.residual;
522 memcpy(&ctsio->sense_data, &msg_info->scsi.sense_data,
544 sizeof(ctsio->sense_data));
523 msg_info->scsi.sense_len);
524 memcpy(&ctsio->io_hdr.ctl_private[CTL_PRIV_LBA_LEN].bytes,
525 &msg_info->scsi.lbalen, sizeof(msg_info->scsi.lbalen));
526 ctl_enqueue_isc((union ctl_io *)ctsio);
527}
528
529static void
530ctl_isc_handler_finish_ser_only(struct ctl_softc *ctl_softc,
531 union ctl_ha_msg *msg_info)
532{
533 struct ctl_scsiio *ctsio;
534
535 if (msg_info->hdr.serializing_sc == NULL) {
536 printf("%s: serializing_sc == NULL!\n", __func__);
537 /* XXX KDM now what? */
538 return;
539 }
540
541 ctsio = &msg_info->hdr.serializing_sc->scsiio;
563#if 0
564 /*
565 * Attempt to catch the situation where an I/O has
566 * been freed, and we're using it again.
567 */
568 if (ctsio->io_hdr.io_type == 0xff) {
569 union ctl_io *tmp_io;
570 tmp_io = (union ctl_io *)ctsio;
571 printf("%s: %p use after free!\n", __func__,
572 ctsio);
573 printf("%s: type %d msg %d cdb %x iptl: "
574 "%u:%u:%u tag 0x%04x "
575 "flag %#x status %x\n",
576 __func__,
577 tmp_io->io_hdr.io_type,
578 tmp_io->io_hdr.msg_type,
579 tmp_io->scsiio.cdb[0],
580 tmp_io->io_hdr.nexus.initid,
581 tmp_io->io_hdr.nexus.targ_port,
582 tmp_io->io_hdr.nexus.targ_lun,
583 (tmp_io->io_hdr.io_type ==
584 CTL_IO_TASK) ?
585 tmp_io->taskio.tag_num :
586 tmp_io->scsiio.tag_num,
587 tmp_io->io_hdr.flags,
588 tmp_io->io_hdr.status);
589 }
590#endif
542 ctsio->io_hdr.msg_type = CTL_MSG_FINISH_IO;
543 ctl_enqueue_isc((union ctl_io *)ctsio);
544}
545
546void
547ctl_isc_announce_lun(struct ctl_lun *lun)
548{
549 struct ctl_softc *softc = lun->ctl_softc;
550 union ctl_ha_msg *msg;
551 struct ctl_ha_msg_lun_pr_key pr_key;
552 int i, k;
553
554 if (softc->ha_link != CTL_HA_LINK_ONLINE)
555 return;
556 mtx_lock(&lun->lun_lock);
557 i = sizeof(msg->lun);
558 if (lun->lun_devid)
559 i += lun->lun_devid->len;
560 i += sizeof(pr_key) * lun->pr_key_count;
561alloc:
562 mtx_unlock(&lun->lun_lock);
563 msg = malloc(i, M_CTL, M_WAITOK);
564 mtx_lock(&lun->lun_lock);
565 k = sizeof(msg->lun);
566 if (lun->lun_devid)
567 k += lun->lun_devid->len;
568 k += sizeof(pr_key) * lun->pr_key_count;
569 if (i < k) {
570 free(msg, M_CTL);
571 i = k;
572 goto alloc;
573 }
574 bzero(&msg->lun, sizeof(msg->lun));
575 msg->hdr.msg_type = CTL_MSG_LUN_SYNC;
576 msg->hdr.nexus.targ_lun = lun->lun;
577 msg->hdr.nexus.targ_mapped_lun = lun->lun;
578 msg->lun.flags = lun->flags;
579 msg->lun.pr_generation = lun->PRGeneration;
580 msg->lun.pr_res_idx = lun->pr_res_idx;
581 msg->lun.pr_res_type = lun->res_type;
582 msg->lun.pr_key_count = lun->pr_key_count;
583 i = 0;
584 if (lun->lun_devid) {
585 msg->lun.lun_devid_len = lun->lun_devid->len;
586 memcpy(&msg->lun.data[i], lun->lun_devid->data,
587 msg->lun.lun_devid_len);
588 i += msg->lun.lun_devid_len;
589 }
590 for (k = 0; k < CTL_MAX_INITIATORS; k++) {
591 if ((pr_key.pr_key = ctl_get_prkey(lun, k)) == 0)
592 continue;
593 pr_key.pr_iid = k;
594 memcpy(&msg->lun.data[i], &pr_key, sizeof(pr_key));
595 i += sizeof(pr_key);
596 }
597 mtx_unlock(&lun->lun_lock);
598 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg->port, sizeof(msg->port) + i,
599 M_WAITOK);
600 free(msg, M_CTL);
601}
602
603void
604ctl_isc_announce_port(struct ctl_port *port)
605{
606 struct ctl_softc *softc = control_softc;
607 union ctl_ha_msg *msg;
608 int i;
609
610 if (port->targ_port < softc->port_min ||
611 port->targ_port >= softc->port_max ||
612 softc->ha_link != CTL_HA_LINK_ONLINE)
613 return;
614 i = sizeof(msg->port) + strlen(port->port_name) + 1;
615 if (port->lun_map)
616 i += sizeof(uint32_t) * CTL_MAX_LUNS;
617 if (port->port_devid)
618 i += port->port_devid->len;
619 if (port->target_devid)
620 i += port->target_devid->len;
621 msg = malloc(i, M_CTL, M_WAITOK);
622 bzero(&msg->port, sizeof(msg->port));
623 msg->hdr.msg_type = CTL_MSG_PORT_SYNC;
624 msg->hdr.nexus.targ_port = port->targ_port;
625 msg->port.port_type = port->port_type;
626 msg->port.physical_port = port->physical_port;
627 msg->port.virtual_port = port->virtual_port;
628 msg->port.status = port->status;
629 i = 0;
630 msg->port.name_len = sprintf(&msg->port.data[i],
631 "%d:%s", softc->ha_id, port->port_name) + 1;
632 i += msg->port.name_len;
633 if (port->lun_map) {
634 msg->port.lun_map_len = sizeof(uint32_t) * CTL_MAX_LUNS;
635 memcpy(&msg->port.data[i], port->lun_map,
636 msg->port.lun_map_len);
637 i += msg->port.lun_map_len;
638 }
639 if (port->port_devid) {
640 msg->port.port_devid_len = port->port_devid->len;
641 memcpy(&msg->port.data[i], port->port_devid->data,
642 msg->port.port_devid_len);
643 i += msg->port.port_devid_len;
644 }
645 if (port->target_devid) {
646 msg->port.target_devid_len = port->target_devid->len;
647 memcpy(&msg->port.data[i], port->target_devid->data,
648 msg->port.target_devid_len);
649 i += msg->port.target_devid_len;
650 }
651 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg->port, sizeof(msg->port) + i,
652 M_WAITOK);
653 free(msg, M_CTL);
654}
655
656static void
657ctl_isc_ha_link_up(struct ctl_softc *softc)
658{
659 struct ctl_port *port;
660 struct ctl_lun *lun;
661
662 STAILQ_FOREACH(port, &softc->port_list, links)
663 ctl_isc_announce_port(port);
664 STAILQ_FOREACH(lun, &softc->lun_list, links)
665 ctl_isc_announce_lun(lun);
666}
667
668static void
669ctl_isc_ha_link_down(struct ctl_softc *softc)
670{
671 struct ctl_port *port;
672 struct ctl_lun *lun;
673 union ctl_io *io;
674
675 mtx_lock(&softc->ctl_lock);
676 STAILQ_FOREACH(lun, &softc->lun_list, links) {
677 mtx_lock(&lun->lun_lock);
678 lun->flags &= ~CTL_LUN_PEER_SC_PRIMARY;
679 mtx_unlock(&lun->lun_lock);
680
681 mtx_unlock(&softc->ctl_lock);
682 io = ctl_alloc_io(softc->othersc_pool);
683 mtx_lock(&softc->ctl_lock);
684 ctl_zero_io(io);
685 io->io_hdr.msg_type = CTL_MSG_FAILOVER;
686 io->io_hdr.nexus.targ_mapped_lun = lun->lun;
687 ctl_enqueue_isc(io);
688 }
689
690 STAILQ_FOREACH(port, &softc->port_list, links) {
691 if (port->targ_port >= softc->port_min &&
692 port->targ_port < softc->port_max)
693 continue;
694 port->status &= ~CTL_PORT_STATUS_ONLINE;
695 }
696 mtx_unlock(&softc->ctl_lock);
697}
698
699static void
700ctl_isc_ua(struct ctl_softc *softc, union ctl_ha_msg *msg, int len)
701{
702 struct ctl_lun *lun;
703 uint32_t iid = ctl_get_initindex(&msg->hdr.nexus);
704
705 if (msg->hdr.nexus.targ_lun < CTL_MAX_LUNS &&
706 (lun = softc->ctl_luns[msg->hdr.nexus.targ_lun]) != NULL) {
707 if (msg->ua.ua_all) {
708 if (msg->ua.ua_set)
709 ctl_est_ua_all(lun, iid, msg->ua.ua_type);
710 else
711 ctl_clr_ua_all(lun, iid, msg->ua.ua_type);
712 } else {
713 if (msg->ua.ua_set)
714 ctl_est_ua(lun, iid, msg->ua.ua_type);
715 else
716 ctl_clr_ua(lun, iid, msg->ua.ua_type);
717 }
718 }
719}
720
721static void
722ctl_isc_lun_sync(struct ctl_softc *softc, union ctl_ha_msg *msg, int len)
723{
724 struct ctl_lun *lun;
725 struct ctl_ha_msg_lun_pr_key pr_key;
726 int i, k;
727
728 lun = softc->ctl_luns[msg->hdr.nexus.targ_lun];
729 if (lun == NULL) {
730 CTL_DEBUG_PRINT(("%s: Unknown LUN %d\n", __func__,
731 msg->hdr.nexus.targ_lun));
732 } else {
733 mtx_lock(&lun->lun_lock);
734 i = (lun->lun_devid != NULL) ? lun->lun_devid->len : 0;
735 if (msg->lun.lun_devid_len != i || (i > 0 &&
736 memcmp(&msg->lun.data[0], lun->lun_devid->data, i) != 0)) {
737 mtx_unlock(&lun->lun_lock);
738 printf("%s: Received conflicting HA LUN %d\n",
739 __func__, msg->hdr.nexus.targ_lun);
740 return;
741 } else {
742 /* Record whether peer is primary. */
743 if ((msg->lun.flags & CTL_LUN_PRIMARY_SC) &&
744 (msg->lun.flags & CTL_LUN_DISABLED) == 0)
745 lun->flags |= CTL_LUN_PEER_SC_PRIMARY;
746 else
747 lun->flags &= ~CTL_LUN_PEER_SC_PRIMARY;
748
749 /* If peer is primary and we are not -- use data */
750 if ((lun->flags & CTL_LUN_PRIMARY_SC) == 0 &&
751 (lun->flags & CTL_LUN_PEER_SC_PRIMARY)) {
752 lun->PRGeneration = msg->lun.pr_generation;
753 lun->pr_res_idx = msg->lun.pr_res_idx;
754 lun->res_type = msg->lun.pr_res_type;
755 lun->pr_key_count = msg->lun.pr_key_count;
756 for (k = 0; k < CTL_MAX_INITIATORS; k++)
757 ctl_clr_prkey(lun, k);
758 for (k = 0; k < msg->lun.pr_key_count; k++) {
759 memcpy(&pr_key, &msg->lun.data[i],
760 sizeof(pr_key));
761 ctl_alloc_prkey(lun, pr_key.pr_iid);
762 ctl_set_prkey(lun, pr_key.pr_iid,
763 pr_key.pr_key);
764 i += sizeof(pr_key);
765 }
766 }
767
768 mtx_unlock(&lun->lun_lock);
769 CTL_DEBUG_PRINT(("%s: Known LUN %d, peer is %s\n",
770 __func__, msg->hdr.nexus.targ_lun,
771 (msg->lun.flags & CTL_LUN_PRIMARY_SC) ?
772 "primary" : "secondary"));
773
774 /* If we are primary but peer doesn't know -- notify */
775 if ((lun->flags & CTL_LUN_PRIMARY_SC) &&
776 (msg->lun.flags & CTL_LUN_PEER_SC_PRIMARY) == 0)
777 ctl_isc_announce_lun(lun);
778 }
779 }
780}
781
782static void
783ctl_isc_port_sync(struct ctl_softc *softc, union ctl_ha_msg *msg, int len)
784{
785 struct ctl_port *port;
786 int i, new;
787
788 port = softc->ctl_ports[msg->hdr.nexus.targ_port];
789 if (port == NULL) {
790 CTL_DEBUG_PRINT(("%s: New port %d\n", __func__,
791 msg->hdr.nexus.targ_port));
792 new = 1;
793 port = malloc(sizeof(*port), M_CTL, M_WAITOK | M_ZERO);
794 port->frontend = &ha_frontend;
795 port->targ_port = msg->hdr.nexus.targ_port;
796 } else if (port->frontend == &ha_frontend) {
797 CTL_DEBUG_PRINT(("%s: Updated port %d\n", __func__,
798 msg->hdr.nexus.targ_port));
799 new = 0;
800 } else {
801 printf("%s: Received conflicting HA port %d\n",
802 __func__, msg->hdr.nexus.targ_port);
803 return;
804 }
805 port->port_type = msg->port.port_type;
806 port->physical_port = msg->port.physical_port;
807 port->virtual_port = msg->port.virtual_port;
808 port->status = msg->port.status;
809 i = 0;
810 free(port->port_name, M_CTL);
811 port->port_name = strndup(&msg->port.data[i], msg->port.name_len,
812 M_CTL);
813 i += msg->port.name_len;
814 if (msg->port.lun_map_len != 0) {
815 if (port->lun_map == NULL)
816 port->lun_map = malloc(sizeof(uint32_t) * CTL_MAX_LUNS,
817 M_CTL, M_WAITOK);
818 memcpy(port->lun_map, &msg->port.data[i],
819 sizeof(uint32_t) * CTL_MAX_LUNS);
820 i += msg->port.lun_map_len;
821 } else {
822 free(port->lun_map, M_CTL);
823 port->lun_map = NULL;
824 }
825 if (msg->port.port_devid_len != 0) {
826 if (port->port_devid == NULL ||
827 port->port_devid->len != msg->port.port_devid_len) {
828 free(port->port_devid, M_CTL);
829 port->port_devid = malloc(sizeof(struct ctl_devid) +
830 msg->port.port_devid_len, M_CTL, M_WAITOK);
831 }
832 memcpy(port->port_devid->data, &msg->port.data[i],
833 msg->port.port_devid_len);
834 port->port_devid->len = msg->port.port_devid_len;
835 i += msg->port.port_devid_len;
836 } else {
837 free(port->port_devid, M_CTL);
838 port->port_devid = NULL;
839 }
840 if (msg->port.target_devid_len != 0) {
841 if (port->target_devid == NULL ||
842 port->target_devid->len != msg->port.target_devid_len) {
843 free(port->target_devid, M_CTL);
844 port->target_devid = malloc(sizeof(struct ctl_devid) +
845 msg->port.target_devid_len, M_CTL, M_WAITOK);
846 }
847 memcpy(port->target_devid->data, &msg->port.data[i],
848 msg->port.target_devid_len);
849 port->target_devid->len = msg->port.target_devid_len;
850 i += msg->port.target_devid_len;
851 } else {
852 free(port->port_devid, M_CTL);
853 port->port_devid = NULL;
854 }
855 if (new) {
856 if (ctl_port_register(port) != 0) {
857 printf("%s: ctl_port_register() failed with error\n",
858 __func__);
859 }
860 }
861}
862
863/*
864 * ISC (Inter Shelf Communication) event handler. Events from the HA
865 * subsystem come in here.
866 */
867static void
868ctl_isc_event_handler(ctl_ha_channel channel, ctl_ha_event event, int param)
869{
870 struct ctl_softc *softc;
871 union ctl_io *io;
872 struct ctl_prio *presio;
873 ctl_ha_status isc_status;
874
875 softc = control_softc;
608 io = NULL;
609
610
611#if 0
612 printf("CTL: Isc Msg event %d\n", event);
613#endif
876 CTL_DEBUG_PRINT(("CTL: Isc Msg event %d\n", event));
877 if (event == CTL_HA_EVT_MSG_RECV) {
615 union ctl_ha_msg msg_info;
878 union ctl_ha_msg *msg, msgbuf;
879
617 isc_status = ctl_ha_msg_recv(CTL_HA_CHAN_CTL, &msg_info,
618 sizeof(msg_info), /*wait*/ 0);
619#if 0
620 printf("CTL: msg_type %d\n", msg_info.msg_type);
621#endif
622 if (isc_status != 0) {
623 printf("Error receiving message, status = %d\n",
624 isc_status);
880 if (param > sizeof(msgbuf))
881 msg = malloc(param, M_CTL, M_WAITOK);
882 else
883 msg = &msgbuf;
884 isc_status = ctl_ha_msg_recv(CTL_HA_CHAN_CTL, msg, param,
885 M_WAITOK);
886 if (isc_status != CTL_HA_STATUS_SUCCESS) {
887 printf("%s: Error receiving message: %d\n",
888 __func__, isc_status);
889 if (msg != &msgbuf)
890 free(msg, M_CTL);
891 return;
892 }
893
628 switch (msg_info.hdr.msg_type) {
894 CTL_DEBUG_PRINT(("CTL: msg_type %d\n", msg->msg_type));
895 switch (msg->hdr.msg_type) {
896 case CTL_MSG_SERIALIZE:
630#if 0
631 printf("Serialize\n");
632#endif
633 io = ctl_alloc_io_nowait(softc->othersc_pool);
634 if (io == NULL) {
635 printf("ctl_isc_event_handler: can't allocate "
636 "ctl_io!\n");
637 /* Bad Juju */
638 /* Need to set busy and send msg back */
639 msg_info.hdr.msg_type = CTL_MSG_BAD_JUJU;
640 msg_info.hdr.status = CTL_SCSI_ERROR;
641 msg_info.scsi.scsi_status = SCSI_STATUS_BUSY;
642 msg_info.scsi.sense_len = 0;
643 if (ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info,
644 sizeof(msg_info), 0) > CTL_HA_STATUS_SUCCESS){
645 }
646 goto bailout;
647 }
897 io = ctl_alloc_io(softc->othersc_pool);
898 ctl_zero_io(io);
649 // populate ctsio from msg_info
899 // populate ctsio from msg
900 io->io_hdr.io_type = CTL_IO_SCSI;
901 io->io_hdr.msg_type = CTL_MSG_SERIALIZE;
652 io->io_hdr.original_sc = msg_info.hdr.original_sc;
653#if 0
654 printf("pOrig %x\n", (int)msg_info.original_sc);
655#endif
902 io->io_hdr.original_sc = msg->hdr.original_sc;
903 io->io_hdr.flags |= CTL_FLAG_FROM_OTHER_SC |
904 CTL_FLAG_IO_ACTIVE;
905 /*
906 * If we're in serialization-only mode, we don't
907 * want to go through full done processing. Thus
908 * the COPY flag.
909 *
910 * XXX KDM add another flag that is more specific.
911 */
665 if (softc->ha_mode == CTL_HA_MODE_SER_ONLY)
912 if (softc->ha_mode != CTL_HA_MODE_XFER)
913 io->io_hdr.flags |= CTL_FLAG_INT_COPY;
667 io->io_hdr.nexus = msg_info.hdr.nexus;
914 io->io_hdr.nexus = msg->hdr.nexus;
915#if 0
916 printf("port %u, iid %u, lun %u\n",
917 io->io_hdr.nexus.targ_port,
918 io->io_hdr.nexus.initid,
919 io->io_hdr.nexus.targ_lun);
920#endif
674 io->scsiio.tag_num = msg_info.scsi.tag_num;
675 io->scsiio.tag_type = msg_info.scsi.tag_type;
676 memcpy(io->scsiio.cdb, msg_info.scsi.cdb,
921 io->scsiio.tag_num = msg->scsi.tag_num;
922 io->scsiio.tag_type = msg->scsi.tag_type;
923#ifdef CTL_TIME_IO
924 io->io_hdr.start_time = time_uptime;
925 getbintime(&io->io_hdr.start_bt);
926#endif /* CTL_TIME_IO */
927 io->scsiio.cdb_len = msg->scsi.cdb_len;
928 memcpy(io->scsiio.cdb, msg->scsi.cdb,
929 CTL_MAX_CDBLEN);
930 if (softc->ha_mode == CTL_HA_MODE_XFER) {
931 const struct ctl_cmd_entry *entry;
932
933 entry = ctl_get_cmd_entry(&io->scsiio, NULL);
934 io->io_hdr.flags &= ~CTL_FLAG_DATA_MASK;
935 io->io_hdr.flags |=
936 entry->flags & CTL_FLAG_DATA_MASK;
937 }
938 ctl_enqueue_isc(io);
939 break;
940
941 /* Performed on the Originating SC, XFER mode only */
942 case CTL_MSG_DATAMOVE: {
943 struct ctl_sg_entry *sgl;
944 int i, j;
945
694 io = msg_info.hdr.original_sc;
946 io = msg->hdr.original_sc;
947 if (io == NULL) {
948 printf("%s: original_sc == NULL!\n", __func__);
949 /* XXX KDM do something here */
950 break;
951 }
952 io->io_hdr.msg_type = CTL_MSG_DATAMOVE;
953 io->io_hdr.flags |= CTL_FLAG_IO_ACTIVE;
954 /*
955 * Keep track of this, we need to send it back over
956 * when the datamove is complete.
957 */
706 io->io_hdr.serializing_sc = msg_info.hdr.serializing_sc;
958 io->io_hdr.serializing_sc = msg->hdr.serializing_sc;
959
708 if (msg_info.dt.sg_sequence == 0) {
709 /*
710 * XXX KDM we use the preallocated S/G list
711 * here, but we'll need to change this to
712 * dynamic allocation if we need larger S/G
713 * lists.
714 */
715 if (msg_info.dt.kern_sg_entries >
716 sizeof(io->io_hdr.remote_sglist) /
717 sizeof(io->io_hdr.remote_sglist[0])) {
718 printf("%s: number of S/G entries "
719 "needed %u > allocated num %zd\n",
720 __func__,
721 msg_info.dt.kern_sg_entries,
722 sizeof(io->io_hdr.remote_sglist)/
723 sizeof(io->io_hdr.remote_sglist[0]));
724
725 /*
726 * XXX KDM send a message back to
727 * the other side to shut down the
728 * DMA. The error will come back
729 * through via the normal channel.
730 */
731 break;
732 }
733 sgl = io->io_hdr.remote_sglist;
734 memset(sgl, 0,
735 sizeof(io->io_hdr.remote_sglist));
960 if (msg->dt.sg_sequence == 0) {
961 i = msg->dt.kern_sg_entries +
962 io->scsiio.kern_data_len /
963 CTL_HA_DATAMOVE_SEGMENT + 1;
964 sgl = malloc(sizeof(*sgl) * i, M_CTL,
965 M_WAITOK | M_ZERO);
966 io->io_hdr.remote_sglist = sgl;
967 io->io_hdr.local_sglist =
968 &sgl[msg->dt.kern_sg_entries];
969
970 io->scsiio.kern_data_ptr = (uint8_t *)sgl;
971
972 io->scsiio.kern_sg_entries =
740 msg_info.dt.kern_sg_entries;
973 msg->dt.kern_sg_entries;
974 io->scsiio.rem_sg_entries =
742 msg_info.dt.kern_sg_entries;
975 msg->dt.kern_sg_entries;
976 io->scsiio.kern_data_len =
744 msg_info.dt.kern_data_len;
977 msg->dt.kern_data_len;
978 io->scsiio.kern_total_len =
746 msg_info.dt.kern_total_len;
979 msg->dt.kern_total_len;
980 io->scsiio.kern_data_resid =
748 msg_info.dt.kern_data_resid;
981 msg->dt.kern_data_resid;
982 io->scsiio.kern_rel_offset =
750 msg_info.dt.kern_rel_offset;
751 /*
752 * Clear out per-DMA flags.
753 */
754 io->io_hdr.flags &= ~CTL_FLAG_RDMA_MASK;
755 /*
756 * Add per-DMA flags that are set for this
757 * particular DMA request.
758 */
759 io->io_hdr.flags |= msg_info.dt.flags &
760 CTL_FLAG_RDMA_MASK;
983 msg->dt.kern_rel_offset;
984 io->io_hdr.flags &= ~CTL_FLAG_BUS_ADDR;
985 io->io_hdr.flags |= msg->dt.flags &
986 CTL_FLAG_BUS_ADDR;
987 } else
988 sgl = (struct ctl_sg_entry *)
989 io->scsiio.kern_data_ptr;
990
765 for (i = msg_info.dt.sent_sg_entries, j = 0;
766 i < (msg_info.dt.sent_sg_entries +
767 msg_info.dt.cur_sg_entries); i++, j++) {
768 sgl[i].addr = msg_info.dt.sg_list[j].addr;
769 sgl[i].len = msg_info.dt.sg_list[j].len;
991 for (i = msg->dt.sent_sg_entries, j = 0;
992 i < (msg->dt.sent_sg_entries +
993 msg->dt.cur_sg_entries); i++, j++) {
994 sgl[i].addr = msg->dt.sg_list[j].addr;
995 sgl[i].len = msg->dt.sg_list[j].len;
996
997#if 0
998 printf("%s: L: %p,%d -> %p,%d j=%d, i=%d\n",
999 __func__,
774 msg_info.dt.sg_list[j].addr,
775 msg_info.dt.sg_list[j].len,
1000 msg->dt.sg_list[j].addr,
1001 msg->dt.sg_list[j].len,
1002 sgl[i].addr, sgl[i].len, j, i);
1003#endif
1004 }
779#if 0
780 memcpy(&sgl[msg_info.dt.sent_sg_entries],
781 msg_info.dt.sg_list,
782 sizeof(*sgl) * msg_info.dt.cur_sg_entries);
783#endif
1005
1006 /*
1007 * If this is the last piece of the I/O, we've got
1008 * the full S/G list. Queue processing in the thread.
1009 * Otherwise wait for the next piece.
1010 */
790 if (msg_info.dt.sg_last != 0)
1011 if (msg->dt.sg_last != 0)
1012 ctl_enqueue_isc(io);
1013 break;
1014 }
1015 /* Performed on the Serializing (primary) SC, XFER mode only */
1016 case CTL_MSG_DATAMOVE_DONE: {
796 if (msg_info.hdr.serializing_sc == NULL) {
1017 if (msg->hdr.serializing_sc == NULL) {
1018 printf("%s: serializing_sc == NULL!\n",
1019 __func__);
1020 /* XXX KDM now what? */
1021 break;
1022 }
1023 /*
1024 * We grab the sense information here in case
1025 * there was a failure, so we can return status
1026 * back to the initiator.
1027 */
807 io = msg_info.hdr.serializing_sc;
1028 io = msg->hdr.serializing_sc;
1029 io->io_hdr.msg_type = CTL_MSG_DATAMOVE_DONE;
809 io->io_hdr.status = msg_info.hdr.status;
810 io->scsiio.scsi_status = msg_info.scsi.scsi_status;
811 io->scsiio.sense_len = msg_info.scsi.sense_len;
812 io->scsiio.sense_residual =msg_info.scsi.sense_residual;
813 io->io_hdr.port_status = msg_info.scsi.fetd_status;
814 io->scsiio.residual = msg_info.scsi.residual;
815 memcpy(&io->scsiio.sense_data,&msg_info.scsi.sense_data,
816 sizeof(io->scsiio.sense_data));
1030 io->io_hdr.flags |= CTL_FLAG_IO_ACTIVE;
1031 io->io_hdr.port_status = msg->scsi.fetd_status;
1032 io->scsiio.residual = msg->scsi.residual;
1033 if (msg->hdr.status != CTL_STATUS_NONE) {
1034 io->io_hdr.status = msg->hdr.status;
1035 io->scsiio.scsi_status = msg->scsi.scsi_status;
1036 io->scsiio.sense_len = msg->scsi.sense_len;
1037 io->scsiio.sense_residual =msg->scsi.sense_residual;
1038 memcpy(&io->scsiio.sense_data,
1039 &msg->scsi.sense_data,
1040 msg->scsi.sense_len);
1041 }
1042 ctl_enqueue_isc(io);
1043 break;
1044 }
1045
1046 /* Preformed on Originating SC, SER_ONLY mode */
1047 case CTL_MSG_R2R:
823 io = msg_info.hdr.original_sc;
1048 io = msg->hdr.original_sc;
1049 if (io == NULL) {
825 printf("%s: Major Bummer\n", __func__);
826 return;
827 } else {
828#if 0
829 printf("pOrig %x\n",(int) ctsio);
830#endif
1050 printf("%s: original_sc == NULL!\n",
1051 __func__);
1052 break;
1053 }
1054 io->io_hdr.flags |= CTL_FLAG_IO_ACTIVE;
1055 io->io_hdr.msg_type = CTL_MSG_R2R;
833 io->io_hdr.serializing_sc = msg_info.hdr.serializing_sc;
1056 io->io_hdr.serializing_sc = msg->hdr.serializing_sc;
1057 ctl_enqueue_isc(io);
1058 break;
1059
1060 /*
1061 * Performed on Serializing(i.e. primary SC) SC in SER_ONLY
1062 * mode.
1063 * Performed on the Originating (i.e. secondary) SC in XFER
1064 * mode
1065 */
1066 case CTL_MSG_FINISH_IO:
1067 if (softc->ha_mode == CTL_HA_MODE_XFER)
845 ctl_isc_handler_finish_xfer(softc,
846 &msg_info);
1068 ctl_isc_handler_finish_xfer(softc, msg);
1069 else
848 ctl_isc_handler_finish_ser_only(softc,
849 &msg_info);
1070 ctl_isc_handler_finish_ser_only(softc, msg);
1071 break;
1072
1073 /* Preformed on Originating SC */
1074 case CTL_MSG_BAD_JUJU:
854 io = msg_info.hdr.original_sc;
1075 io = msg->hdr.original_sc;
1076 if (io == NULL) {
1077 printf("%s: Bad JUJU!, original_sc is NULL!\n",
1078 __func__);
1079 break;
1080 }
860 ctl_copy_sense_data(&msg_info, io);
1081 ctl_copy_sense_data(msg, io);
1082 /*
1083 * IO should have already been cleaned up on other
1084 * SC so clear this flag so we won't send a message
1085 * back to finish the IO there.
1086 */
1087 io->io_hdr.flags &= ~CTL_FLAG_SENT_2OTHER_SC;
1088 io->io_hdr.flags |= CTL_FLAG_IO_ACTIVE;
1089
869 /* io = msg_info.hdr.serializing_sc; */
1090 /* io = msg->hdr.serializing_sc; */
1091 io->io_hdr.msg_type = CTL_MSG_BAD_JUJU;
1092 ctl_enqueue_isc(io);
1093 break;
1094
1095 /* Handle resets sent from the other side */
1096 case CTL_MSG_MANAGE_TASKS: {
1097 struct ctl_taskio *taskio;
877 taskio = (struct ctl_taskio *)ctl_alloc_io_nowait(
1098 taskio = (struct ctl_taskio *)ctl_alloc_io(
1099 softc->othersc_pool);
879 if (taskio == NULL) {
880 printf("ctl_isc_event_handler: can't allocate "
881 "ctl_io!\n");
882 /* Bad Juju */
883 /* should I just call the proper reset func
884 here??? */
885 goto bailout;
886 }
1100 ctl_zero_io((union ctl_io *)taskio);
1101 taskio->io_hdr.io_type = CTL_IO_TASK;
1102 taskio->io_hdr.flags |= CTL_FLAG_FROM_OTHER_SC;
890 taskio->io_hdr.nexus = msg_info.hdr.nexus;
891 taskio->task_action = msg_info.task.task_action;
892 taskio->tag_num = msg_info.task.tag_num;
893 taskio->tag_type = msg_info.task.tag_type;
1103 taskio->io_hdr.nexus = msg->hdr.nexus;
1104 taskio->task_action = msg->task.task_action;
1105 taskio->tag_num = msg->task.tag_num;
1106 taskio->tag_type = msg->task.tag_type;
1107#ifdef CTL_TIME_IO
1108 taskio->io_hdr.start_time = time_uptime;
1109 getbintime(&taskio->io_hdr.start_bt);
897#if 0
898 cs_prof_gettime(&taskio->io_hdr.start_ticks);
899#endif
1110#endif /* CTL_TIME_IO */
1111 ctl_run_task((union ctl_io *)taskio);
1112 break;
1113 }
1114 /* Persistent Reserve action which needs attention */
1115 case CTL_MSG_PERS_ACTION:
906 presio = (struct ctl_prio *)ctl_alloc_io_nowait(
1116 presio = (struct ctl_prio *)ctl_alloc_io(
1117 softc->othersc_pool);
908 if (presio == NULL) {
909 printf("ctl_isc_event_handler: can't allocate "
910 "ctl_io!\n");
911 /* Bad Juju */
912 /* Need to set busy and send msg back */
913 goto bailout;
914 }
1118 ctl_zero_io((union ctl_io *)presio);
1119 presio->io_hdr.msg_type = CTL_MSG_PERS_ACTION;
917 presio->pr_msg = msg_info.pr;
1120 presio->io_hdr.flags |= CTL_FLAG_FROM_OTHER_SC;
1121 presio->io_hdr.nexus = msg->hdr.nexus;
1122 presio->pr_msg = msg->pr;
1123 ctl_enqueue_isc((union ctl_io *)presio);
1124 break;
920 case CTL_MSG_SYNC_FE:
921 rcv_sync_msg = 1;
1125 case CTL_MSG_UA:
1126 ctl_isc_ua(softc, msg, param);
1127 break;
1128 case CTL_MSG_PORT_SYNC:
1129 ctl_isc_port_sync(softc, msg, param);
1130 break;
1131 case CTL_MSG_LUN_SYNC:
1132 ctl_isc_lun_sync(softc, msg, param);
1133 break;
1134 default:
924 printf("How did I get here?\n");
1135 printf("Received HA message of unknown type %d\n",
1136 msg->hdr.msg_type);
1137 break;
1138 }
926 } else if (event == CTL_HA_EVT_MSG_SENT) {
927 if (param != CTL_HA_STATUS_SUCCESS) {
928 printf("Bad status from ctl_ha_msg_send status %d\n",
929 param);
1139 if (msg != &msgbuf)
1140 free(msg, M_CTL);
1141 } else if (event == CTL_HA_EVT_LINK_CHANGE) {
1142 printf("CTL: HA link status changed from %d to %d\n",
1143 softc->ha_link, param);
1144 if (param == softc->ha_link)
1145 return;
1146 if (softc->ha_link == CTL_HA_LINK_ONLINE) {
1147 softc->ha_link = param;
1148 ctl_isc_ha_link_down(softc);
1149 } else {
1150 softc->ha_link = param;
1151 if (softc->ha_link == CTL_HA_LINK_ONLINE)
1152 ctl_isc_ha_link_up(softc);
1153 }
1154 return;
932 } else if (event == CTL_HA_EVT_DISCONNECT) {
933 printf("CTL: Got a disconnect from Isc\n");
934 return;
1155 } else {
1156 printf("ctl_isc_event_handler: Unknown event %d\n", event);
1157 return;
1158 }
939
940bailout:
941 return;
1159}
1160
1161static void
1162ctl_copy_sense_data(union ctl_ha_msg *src, union ctl_io *dest)
1163{
947 struct scsi_sense_data *sense;
1164
949 sense = &dest->scsiio.sense_data;
950 bcopy(&src->scsi.sense_data, sense, sizeof(*sense));
1165 memcpy(&dest->scsiio.sense_data, &src->scsi.sense_data,
1166 src->scsi.sense_len);
1167 dest->scsiio.scsi_status = src->scsi.scsi_status;
1168 dest->scsiio.sense_len = src->scsi.sense_len;
1169 dest->io_hdr.status = src->hdr.status;
1170}
955#endif
1171
1172static void
1173ctl_copy_sense_data_back(union ctl_io *src, union ctl_ha_msg *dest)
1174{
1175
1176 memcpy(&dest->scsi.sense_data, &src->scsiio.sense_data,
1177 src->scsiio.sense_len);
1178 dest->scsi.scsi_status = src->scsiio.scsi_status;
1179 dest->scsi.sense_len = src->scsiio.sense_len;
1180 dest->hdr.status = src->io_hdr.status;
1181}
1182
1183static void
1184ctl_est_ua(struct ctl_lun *lun, uint32_t initidx, ctl_ua_type ua)
1185{
1186 struct ctl_softc *softc = lun->ctl_softc;
1187 ctl_ua_type *pu;
1188
1189 if (initidx < softc->init_min || initidx >= softc->init_max)
1190 return;
1191 mtx_assert(&lun->lun_lock, MA_OWNED);
1192 pu = lun->pending_ua[initidx / CTL_MAX_INIT_PER_PORT];
1193 if (pu == NULL)
1194 return;
1195 pu[initidx % CTL_MAX_INIT_PER_PORT] |= ua;
1196}
1197
1198static void
1199ctl_est_ua_all(struct ctl_lun *lun, uint32_t except, ctl_ua_type ua)
1200{
1201 struct ctl_softc *softc = lun->ctl_softc;
1202 int i, j;
1203
1204 mtx_assert(&lun->lun_lock, MA_OWNED);
975 for (i = 0; i < CTL_MAX_PORTS; i++) {
1205 for (i = softc->port_min; i < softc->port_max; i++) {
1206 if (lun->pending_ua[i] == NULL)
1207 continue;
1208 for (j = 0; j < CTL_MAX_INIT_PER_PORT; j++) {
1209 if (i * CTL_MAX_INIT_PER_PORT + j == except)
1210 continue;
1211 lun->pending_ua[i][j] |= ua;
1212 }
1213 }
1214}
1215
1216static void
1217ctl_clr_ua(struct ctl_lun *lun, uint32_t initidx, ctl_ua_type ua)
1218{
1219 struct ctl_softc *softc = lun->ctl_softc;
1220 ctl_ua_type *pu;
1221
1222 if (initidx < softc->init_min || initidx >= softc->init_max)
1223 return;
1224 mtx_assert(&lun->lun_lock, MA_OWNED);
1225 pu = lun->pending_ua[initidx / CTL_MAX_INIT_PER_PORT];
1226 if (pu == NULL)
1227 return;
1228 pu[initidx % CTL_MAX_INIT_PER_PORT] &= ~ua;
1229}
1230
1231static void
1232ctl_clr_ua_all(struct ctl_lun *lun, uint32_t except, ctl_ua_type ua)
1233{
1234 struct ctl_softc *softc = lun->ctl_softc;
1235 int i, j;
1236
1237 mtx_assert(&lun->lun_lock, MA_OWNED);
1004 for (i = 0; i < CTL_MAX_PORTS; i++) {
1238 for (i = softc->port_min; i < softc->port_max; i++) {
1239 if (lun->pending_ua[i] == NULL)
1240 continue;
1241 for (j = 0; j < CTL_MAX_INIT_PER_PORT; j++) {
1242 if (i * CTL_MAX_INIT_PER_PORT + j == except)
1243 continue;
1244 lun->pending_ua[i][j] &= ~ua;
1245 }
1246 }
1247}
1248
1249static void
1250ctl_clr_ua_allluns(struct ctl_softc *ctl_softc, uint32_t initidx,
1251 ctl_ua_type ua_type)
1252{
1253 struct ctl_lun *lun;
1254
1255 mtx_assert(&ctl_softc->ctl_lock, MA_OWNED);
1256 STAILQ_FOREACH(lun, &ctl_softc->lun_list, links) {
1257 mtx_lock(&lun->lun_lock);
1258 ctl_clr_ua(lun, initidx, ua_type);
1259 mtx_unlock(&lun->lun_lock);
1260 }
1261}
1262
1263static int
1030ctl_ha_state_sysctl(SYSCTL_HANDLER_ARGS)
1264ctl_ha_role_sysctl(SYSCTL_HANDLER_ARGS)
1265{
1266 struct ctl_softc *softc = (struct ctl_softc *)arg1;
1267 struct ctl_lun *lun;
1268 struct ctl_lun_req ireq;
1269 int error, value;
1270
1036 if (softc->flags & CTL_FLAG_ACTIVE_SHELF)
1037 value = 0;
1038 else
1039 value = 1;
1040
1271 value = (softc->flags & CTL_FLAG_ACTIVE_SHELF) ? 0 : 1;
1272 error = sysctl_handle_int(oidp, &value, 0, req);
1273 if ((error != 0) || (req->newptr == NULL))
1274 return (error);
1275
1276 mtx_lock(&softc->ctl_lock);
1277 if (value == 0)
1278 softc->flags |= CTL_FLAG_ACTIVE_SHELF;
1279 else
1280 softc->flags &= ~CTL_FLAG_ACTIVE_SHELF;
1281 STAILQ_FOREACH(lun, &softc->lun_list, links) {
1051 mtx_lock(&lun->lun_lock);
1052 ctl_est_ua_all(lun, -1, CTL_UA_ASYM_ACC_CHANGE);
1053 mtx_unlock(&lun->lun_lock);
1282 mtx_unlock(&softc->ctl_lock);
1283 bzero(&ireq, sizeof(ireq));
1284 ireq.reqtype = CTL_LUNREQ_MODIFY;
1285 ireq.reqdata.modify.lun_id = lun->lun;
1286 lun->backend->ioctl(NULL, CTL_LUN_REQ, (caddr_t)&ireq, 0,
1287 curthread);
1288 if (ireq.status != CTL_LUN_OK) {
1289 printf("%s: CTL_LUNREQ_MODIFY returned %d '%s'\n",
1290 __func__, ireq.status, ireq.error_str);
1291 }
1292 mtx_lock(&softc->ctl_lock);
1293 }
1294 mtx_unlock(&softc->ctl_lock);
1295 return (0);
1296}
1297
1298static int
1299ctl_init(void)
1300{
1301 struct ctl_softc *softc;
1302 void *other_pool;
1303 int i, error, retval;
1065 //int isc_retval;
1304
1305 retval = 0;
1068 ctl_pause_rtr = 0;
1069 rcv_sync_msg = 0;
1070
1306 control_softc = malloc(sizeof(*control_softc), M_DEVBUF,
1307 M_WAITOK | M_ZERO);
1308 softc = control_softc;
1309
1310 softc->dev = make_dev(&ctl_cdevsw, 0, UID_ROOT, GID_OPERATOR, 0600,
1311 "cam/ctl");
1312
1313 softc->dev->si_drv1 = softc;
1314
1080 /*
1081 * By default, return a "bad LUN" peripheral qualifier for unknown
1082 * LUNs. The user can override this default using the tunable or
1083 * sysctl. See the comment in ctl_inquiry_std() for more details.
1084 */
1085 softc->inquiry_pq_no_lun = 1;
1086 TUNABLE_INT_FETCH("kern.cam.ctl.inquiry_pq_no_lun",
1087 &softc->inquiry_pq_no_lun);
1315 sysctl_ctx_init(&softc->sysctl_ctx);
1316 softc->sysctl_tree = SYSCTL_ADD_NODE(&softc->sysctl_ctx,
1317 SYSCTL_STATIC_CHILDREN(_kern_cam), OID_AUTO, "ctl",
1318 CTLFLAG_RD, 0, "CAM Target Layer");
1319
1320 if (softc->sysctl_tree == NULL) {
1321 printf("%s: unable to allocate sysctl tree\n", __func__);
1322 destroy_dev(softc->dev);
1323 free(control_softc, M_DEVBUF);
1324 control_softc = NULL;
1325 return (ENOMEM);
1326 }
1327
1101 SYSCTL_ADD_INT(&softc->sysctl_ctx,
1102 SYSCTL_CHILDREN(softc->sysctl_tree), OID_AUTO,
1103 "inquiry_pq_no_lun", CTLFLAG_RW,
1104 &softc->inquiry_pq_no_lun, 0,
1105 "Report no lun possible for invalid LUNs");
1106
1328 mtx_init(&softc->ctl_lock, "CTL mutex", NULL, MTX_DEF);
1329 softc->io_zone = uma_zcreate("CTL IO", sizeof(union ctl_io),
1330 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0);
1331 softc->open_count = 0;
1332
1333 /*
1334 * Default to actually sending a SYNCHRONIZE CACHE command down to
1335 * the drive.
1336 */
1337 softc->flags = CTL_FLAG_REAL_SYNC;
1338
1339 SYSCTL_ADD_INT(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree),
1340 OID_AUTO, "ha_mode", CTLFLAG_RDTUN, (int *)&softc->ha_mode, 0,
1341 "HA mode (0 - act/stby, 1 - serialize only, 2 - xfer)");
1342
1343 /*
1344 * In Copan's HA scheme, the "master" and "slave" roles are
1345 * figured out through the slot the controller is in. Although it
1346 * is an active/active system, someone has to be in charge.
1347 */
1348 SYSCTL_ADD_INT(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree),
1349 OID_AUTO, "ha_id", CTLFLAG_RDTUN, &softc->ha_id, 0,
1350 "HA head ID (0 - no HA)");
1126 if (softc->ha_id == 0) {
1351 if (softc->ha_id == 0 || softc->ha_id > NUM_TARGET_PORT_GROUPS) {
1352 softc->flags |= CTL_FLAG_ACTIVE_SHELF;
1353 softc->is_single = 1;
1129 softc->port_offset = 0;
1130 } else
1131 softc->port_offset = (softc->ha_id - 1) * CTL_MAX_PORTS;
1132 softc->persis_offset = softc->port_offset * CTL_MAX_INIT_PER_PORT;
1354 softc->port_cnt = CTL_MAX_PORTS;
1355 softc->port_min = 0;
1356 } else {
1357 softc->port_cnt = CTL_MAX_PORTS / NUM_TARGET_PORT_GROUPS;
1358 softc->port_min = (softc->ha_id - 1) * softc->port_cnt;
1359 }
1360 softc->port_max = softc->port_min + softc->port_cnt;
1361 softc->init_min = softc->port_min * CTL_MAX_INIT_PER_PORT;
1362 softc->init_max = softc->port_max * CTL_MAX_INIT_PER_PORT;
1363
1364 SYSCTL_ADD_INT(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree),
1365 OID_AUTO, "ha_link", CTLFLAG_RD, (int *)&softc->ha_link, 0,
1366 "HA link state (0 - offline, 1 - unknown, 2 - online)");
1367
1368 STAILQ_INIT(&softc->lun_list);
1369 STAILQ_INIT(&softc->pending_lun_queue);
1370 STAILQ_INIT(&softc->fe_list);
1371 STAILQ_INIT(&softc->port_list);
1372 STAILQ_INIT(&softc->be_list);
1373 ctl_tpc_init(softc);
1374
1375 if (ctl_pool_create(softc, "othersc", CTL_POOL_ENTRIES_OTHER_SC,
1376 &other_pool) != 0)
1377 {
1378 printf("ctl: can't allocate %d entry other SC pool, "
1379 "exiting\n", CTL_POOL_ENTRIES_OTHER_SC);
1380 return (ENOMEM);
1381 }
1382 softc->othersc_pool = other_pool;
1383
1384 if (worker_threads <= 0)
1385 worker_threads = max(1, mp_ncpus / 4);
1386 if (worker_threads > CTL_MAX_THREADS)
1387 worker_threads = CTL_MAX_THREADS;
1388
1389 for (i = 0; i < worker_threads; i++) {
1390 struct ctl_thread *thr = &softc->threads[i];
1391
1392 mtx_init(&thr->queue_lock, "CTL queue mutex", NULL, MTX_DEF);
1393 thr->ctl_softc = softc;
1394 STAILQ_INIT(&thr->incoming_queue);
1395 STAILQ_INIT(&thr->rtr_queue);
1396 STAILQ_INIT(&thr->done_queue);
1397 STAILQ_INIT(&thr->isc_queue);
1398
1399 error = kproc_kthread_add(ctl_work_thread, thr,
1400 &softc->ctl_proc, &thr->thread, 0, 0, "ctl", "work%d", i);
1401 if (error != 0) {
1402 printf("error creating CTL work thread!\n");
1403 ctl_pool_free(other_pool);
1404 return (error);
1405 }
1406 }
1407 error = kproc_kthread_add(ctl_lun_thread, softc,
1408 &softc->ctl_proc, NULL, 0, 0, "ctl", "lun");
1409 if (error != 0) {
1410 printf("error creating CTL lun thread!\n");
1411 ctl_pool_free(other_pool);
1412 return (error);
1413 }
1414 error = kproc_kthread_add(ctl_thresh_thread, softc,
1415 &softc->ctl_proc, NULL, 0, 0, "ctl", "thresh");
1416 if (error != 0) {
1417 printf("error creating CTL threshold thread!\n");
1418 ctl_pool_free(other_pool);
1419 return (error);
1420 }
1421
1422 SYSCTL_ADD_PROC(&softc->sysctl_ctx,SYSCTL_CHILDREN(softc->sysctl_tree),
1189 OID_AUTO, "ha_state", CTLTYPE_INT | CTLFLAG_RWTUN,
1190 softc, 0, ctl_ha_state_sysctl, "I", "HA state for this head");
1423 OID_AUTO, "ha_role", CTLTYPE_INT | CTLFLAG_RWTUN,
1424 softc, 0, ctl_ha_role_sysctl, "I", "HA role for this head");
1425
1426 if (softc->is_single == 0) {
1427 ctl_frontend_register(&ha_frontend);
1428 if (ctl_ha_msg_init(softc) != CTL_HA_STATUS_SUCCESS) {
1429 printf("ctl_init: ctl_ha_msg_init failed.\n");
1430 softc->is_single = 1;
1431 } else
1432 if (ctl_ha_msg_register(CTL_HA_CHAN_CTL, ctl_isc_event_handler)
1433 != CTL_HA_STATUS_SUCCESS) {
1434 printf("ctl_init: ctl_ha_msg_register failed.\n");
1435 softc->is_single = 1;
1436 }
1437 }
1438 return (0);
1439}
1440
1441void
1442ctl_shutdown(void)
1443{
1444 struct ctl_softc *softc;
1445 struct ctl_lun *lun, *next_lun;
1446
1447 softc = (struct ctl_softc *)control_softc;
1448
1449 if (softc->is_single == 0) {
1450 if (ctl_ha_msg_deregister(CTL_HA_CHAN_CTL)
1451 != CTL_HA_STATUS_SUCCESS) {
1452 printf("ctl_shutdown: ctl_ha_msg_deregister failed.\n");
1453 }
1454 if (ctl_ha_msg_shutdown(softc) != CTL_HA_STATUS_SUCCESS) {
1455 printf("ctl_shutdown: ctl_ha_msg_shutdown failed.\n");
1456 }
1457 ctl_frontend_deregister(&ha_frontend);
1458 }
1459
1460 mtx_lock(&softc->ctl_lock);
1461
1462 /*
1463 * Free up each LUN.
1464 */
1465 for (lun = STAILQ_FIRST(&softc->lun_list); lun != NULL; lun = next_lun){
1466 next_lun = STAILQ_NEXT(lun, links);
1467 ctl_free_lun(lun);
1468 }
1469
1470 mtx_unlock(&softc->ctl_lock);
1471
1472#if 0
1473 ctl_shutdown_thread(softc->work_thread);
1474 mtx_destroy(&softc->queue_lock);
1475#endif
1476
1477 ctl_tpc_shutdown(softc);
1478 uma_zdestroy(softc->io_zone);
1479 mtx_destroy(&softc->ctl_lock);
1480
1481 destroy_dev(softc->dev);
1482
1483 sysctl_ctx_free(&softc->sysctl_ctx);
1484
1485 free(control_softc, M_DEVBUF);
1486 control_softc = NULL;
1487}
1488
1489static int
1490ctl_module_event_handler(module_t mod, int what, void *arg)
1491{
1492
1493 switch (what) {
1494 case MOD_LOAD:
1495 return (ctl_init());
1496 case MOD_UNLOAD:
1497 return (EBUSY);
1498 default:
1499 return (EOPNOTSUPP);
1500 }
1501}
1502
1503/*
1504 * XXX KDM should we do some access checks here? Bump a reference count to
1505 * prevent a CTL module from being unloaded while someone has it open?
1506 */
1507static int
1508ctl_open(struct cdev *dev, int flags, int fmt, struct thread *td)
1509{
1510 return (0);
1511}
1512
1513static int
1514ctl_close(struct cdev *dev, int flags, int fmt, struct thread *td)
1515{
1516 return (0);
1517}
1518
1261int
1262ctl_port_enable(ctl_port_type port_type)
1263{
1264 struct ctl_softc *softc = control_softc;
1265 struct ctl_port *port;
1266
1267 if (softc->is_single == 0) {
1268 union ctl_ha_msg msg_info;
1269 int isc_retval;
1270
1271#if 0
1272 printf("%s: HA mode, synchronizing frontend enable\n",
1273 __func__);
1274#endif
1275 msg_info.hdr.msg_type = CTL_MSG_SYNC_FE;
1276 if ((isc_retval=ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info,
1277 sizeof(msg_info), 1 )) > CTL_HA_STATUS_SUCCESS) {
1278 printf("Sync msg send error retval %d\n", isc_retval);
1279 }
1280 if (!rcv_sync_msg) {
1281 isc_retval=ctl_ha_msg_recv(CTL_HA_CHAN_CTL, &msg_info,
1282 sizeof(msg_info), 1);
1283 }
1284#if 0
1285 printf("CTL:Frontend Enable\n");
1286 } else {
1287 printf("%s: single mode, skipping frontend synchronization\n",
1288 __func__);
1289#endif
1290 }
1291
1292 STAILQ_FOREACH(port, &softc->port_list, links) {
1293 if (port_type & port->port_type)
1294 {
1295#if 0
1296 printf("port %d\n", port->targ_port);
1297#endif
1298 ctl_port_online(port);
1299 }
1300 }
1301
1302 return (0);
1303}
1304
1305int
1306ctl_port_disable(ctl_port_type port_type)
1307{
1308 struct ctl_softc *softc;
1309 struct ctl_port *port;
1310
1311 softc = control_softc;
1312
1313 STAILQ_FOREACH(port, &softc->port_list, links) {
1314 if (port_type & port->port_type)
1315 ctl_port_offline(port);
1316 }
1317
1318 return (0);
1319}
1320
1519/*
1322 * Returns 0 for success, 1 for failure.
1323 * Currently the only failure mode is if there aren't enough entries
1324 * allocated. So, in case of a failure, look at num_entries_dropped,
1325 * reallocate and try again.
1326 */
1327int
1328ctl_port_list(struct ctl_port_entry *entries, int num_entries_alloced,
1329 int *num_entries_filled, int *num_entries_dropped,
1330 ctl_port_type port_type, int no_virtual)
1331{
1332 struct ctl_softc *softc;
1333 struct ctl_port *port;
1334 int entries_dropped, entries_filled;
1335 int retval;
1336 int i;
1337
1338 softc = control_softc;
1339
1340 retval = 0;
1341 entries_filled = 0;
1342 entries_dropped = 0;
1343
1344 i = 0;
1345 mtx_lock(&softc->ctl_lock);
1346 STAILQ_FOREACH(port, &softc->port_list, links) {
1347 struct ctl_port_entry *entry;
1348
1349 if ((port->port_type & port_type) == 0)
1350 continue;
1351
1352 if ((no_virtual != 0)
1353 && (port->virtual_port != 0))
1354 continue;
1355
1356 if (entries_filled >= num_entries_alloced) {
1357 entries_dropped++;
1358 continue;
1359 }
1360 entry = &entries[i];
1361
1362 entry->port_type = port->port_type;
1363 strlcpy(entry->port_name, port->port_name,
1364 sizeof(entry->port_name));
1365 entry->physical_port = port->physical_port;
1366 entry->virtual_port = port->virtual_port;
1367 entry->wwnn = port->wwnn;
1368 entry->wwpn = port->wwpn;
1369
1370 i++;
1371 entries_filled++;
1372 }
1373
1374 mtx_unlock(&softc->ctl_lock);
1375
1376 if (entries_dropped > 0)
1377 retval = 1;
1378
1379 *num_entries_dropped = entries_dropped;
1380 *num_entries_filled = entries_filled;
1381
1382 return (retval);
1383}
1384
1385/*
1520 * Remove an initiator by port number and initiator ID.
1521 * Returns 0 for success, -1 for failure.
1522 */
1523int
1524ctl_remove_initiator(struct ctl_port *port, int iid)
1525{
1526 struct ctl_softc *softc = control_softc;
1527
1528 mtx_assert(&softc->ctl_lock, MA_NOTOWNED);
1529
1530 if (iid > CTL_MAX_INIT_PER_PORT) {
1531 printf("%s: initiator ID %u > maximun %u!\n",
1532 __func__, iid, CTL_MAX_INIT_PER_PORT);
1533 return (-1);
1534 }
1535
1536 mtx_lock(&softc->ctl_lock);
1537 port->wwpn_iid[iid].in_use--;
1538 port->wwpn_iid[iid].last_use = time_uptime;
1539 mtx_unlock(&softc->ctl_lock);
1540
1541 return (0);
1542}
1543
1544/*
1545 * Add an initiator to the initiator map.
1546 * Returns iid for success, < 0 for failure.
1547 */
1548int
1549ctl_add_initiator(struct ctl_port *port, int iid, uint64_t wwpn, char *name)
1550{
1551 struct ctl_softc *softc = control_softc;
1552 time_t best_time;
1553 int i, best;
1554
1555 mtx_assert(&softc->ctl_lock, MA_NOTOWNED);
1556
1557 if (iid >= CTL_MAX_INIT_PER_PORT) {
1558 printf("%s: WWPN %#jx initiator ID %u > maximum %u!\n",
1559 __func__, wwpn, iid, CTL_MAX_INIT_PER_PORT);
1560 free(name, M_CTL);
1561 return (-1);
1562 }
1563
1564 mtx_lock(&softc->ctl_lock);
1565
1566 if (iid < 0 && (wwpn != 0 || name != NULL)) {
1567 for (i = 0; i < CTL_MAX_INIT_PER_PORT; i++) {
1568 if (wwpn != 0 && wwpn == port->wwpn_iid[i].wwpn) {
1569 iid = i;
1570 break;
1571 }
1572 if (name != NULL && port->wwpn_iid[i].name != NULL &&
1573 strcmp(name, port->wwpn_iid[i].name) == 0) {
1574 iid = i;
1575 break;
1576 }
1577 }
1578 }
1579
1580 if (iid < 0) {
1581 for (i = 0; i < CTL_MAX_INIT_PER_PORT; i++) {
1582 if (port->wwpn_iid[i].in_use == 0 &&
1583 port->wwpn_iid[i].wwpn == 0 &&
1584 port->wwpn_iid[i].name == NULL) {
1585 iid = i;
1586 break;
1587 }
1588 }
1589 }
1590
1591 if (iid < 0) {
1592 best = -1;
1593 best_time = INT32_MAX;
1594 for (i = 0; i < CTL_MAX_INIT_PER_PORT; i++) {
1595 if (port->wwpn_iid[i].in_use == 0) {
1596 if (port->wwpn_iid[i].last_use < best_time) {
1597 best = i;
1598 best_time = port->wwpn_iid[i].last_use;
1599 }
1600 }
1601 }
1602 iid = best;
1603 }
1604
1605 if (iid < 0) {
1606 mtx_unlock(&softc->ctl_lock);
1607 free(name, M_CTL);
1608 return (-2);
1609 }
1610
1611 if (port->wwpn_iid[iid].in_use > 0 && (wwpn != 0 || name != NULL)) {
1612 /*
1613 * This is not an error yet.
1614 */
1615 if (wwpn != 0 && wwpn == port->wwpn_iid[iid].wwpn) {
1616#if 0
1617 printf("%s: port %d iid %u WWPN %#jx arrived"
1618 " again\n", __func__, port->targ_port,
1619 iid, (uintmax_t)wwpn);
1620#endif
1621 goto take;
1622 }
1623 if (name != NULL && port->wwpn_iid[iid].name != NULL &&
1624 strcmp(name, port->wwpn_iid[iid].name) == 0) {
1625#if 0
1626 printf("%s: port %d iid %u name '%s' arrived"
1627 " again\n", __func__, port->targ_port,
1628 iid, name);
1629#endif
1630 goto take;
1631 }
1632
1633 /*
1634 * This is an error, but what do we do about it? The
1635 * driver is telling us we have a new WWPN for this
1636 * initiator ID, so we pretty much need to use it.
1637 */
1638 printf("%s: port %d iid %u WWPN %#jx '%s' arrived,"
1639 " but WWPN %#jx '%s' is still at that address\n",
1640 __func__, port->targ_port, iid, wwpn, name,
1641 (uintmax_t)port->wwpn_iid[iid].wwpn,
1642 port->wwpn_iid[iid].name);
1643
1644 /*
1645 * XXX KDM clear have_ca and ua_pending on each LUN for
1646 * this initiator.
1647 */
1648 }
1649take:
1650 free(port->wwpn_iid[iid].name, M_CTL);
1651 port->wwpn_iid[iid].name = name;
1652 port->wwpn_iid[iid].wwpn = wwpn;
1653 port->wwpn_iid[iid].in_use++;
1654 mtx_unlock(&softc->ctl_lock);
1655
1656 return (iid);
1657}
1658
1659static int
1660ctl_create_iid(struct ctl_port *port, int iid, uint8_t *buf)
1661{
1662 int len;
1663
1664 switch (port->port_type) {
1665 case CTL_PORT_FC:
1666 {
1667 struct scsi_transportid_fcp *id =
1668 (struct scsi_transportid_fcp *)buf;
1669 if (port->wwpn_iid[iid].wwpn == 0)
1670 return (0);
1671 memset(id, 0, sizeof(*id));
1672 id->format_protocol = SCSI_PROTO_FC;
1673 scsi_u64to8b(port->wwpn_iid[iid].wwpn, id->n_port_name);
1674 return (sizeof(*id));
1675 }
1676 case CTL_PORT_ISCSI:
1677 {
1678 struct scsi_transportid_iscsi_port *id =
1679 (struct scsi_transportid_iscsi_port *)buf;
1680 if (port->wwpn_iid[iid].name == NULL)
1681 return (0);
1682 memset(id, 0, 256);
1683 id->format_protocol = SCSI_TRN_ISCSI_FORMAT_PORT |
1684 SCSI_PROTO_ISCSI;
1685 len = strlcpy(id->iscsi_name, port->wwpn_iid[iid].name, 252) + 1;
1686 len = roundup2(min(len, 252), 4);
1687 scsi_ulto2b(len, id->additional_length);
1688 return (sizeof(*id) + len);
1689 }
1690 case CTL_PORT_SAS:
1691 {
1692 struct scsi_transportid_sas *id =
1693 (struct scsi_transportid_sas *)buf;
1694 if (port->wwpn_iid[iid].wwpn == 0)
1695 return (0);
1696 memset(id, 0, sizeof(*id));
1697 id->format_protocol = SCSI_PROTO_SAS;
1698 scsi_u64to8b(port->wwpn_iid[iid].wwpn, id->sas_address);
1699 return (sizeof(*id));
1700 }
1701 default:
1702 {
1703 struct scsi_transportid_spi *id =
1704 (struct scsi_transportid_spi *)buf;
1705 memset(id, 0, sizeof(*id));
1706 id->format_protocol = SCSI_PROTO_SPI;
1707 scsi_ulto2b(iid, id->scsi_addr);
1708 scsi_ulto2b(port->targ_port, id->rel_trgt_port_id);
1709 return (sizeof(*id));
1710 }
1711 }
1712}
1713
1714/*
1715 * Serialize a command that went down the "wrong" side, and so was sent to
1716 * this controller for execution. The logic is a little different than the
1717 * standard case in ctl_scsiio_precheck(). Errors in this case need to get
1718 * sent back to the other side, but in the success case, we execute the
1719 * command on this side (XFER mode) or tell the other side to execute it
1720 * (SER_ONLY mode).
1721 */
1722static int
1723ctl_serialize_other_sc_cmd(struct ctl_scsiio *ctsio)
1724{
1725 struct ctl_softc *softc;
1726 union ctl_ha_msg msg_info;
1727 struct ctl_lun *lun;
1728 const struct ctl_cmd_entry *entry;
1729 int retval = 0;
1730 uint32_t targ_lun;
1731
1732 softc = control_softc;
1733
1734 targ_lun = ctsio->io_hdr.nexus.targ_mapped_lun;
1600 lun = softc->ctl_luns[targ_lun];
1601 if (lun==NULL)
1602 {
1735 if ((targ_lun < CTL_MAX_LUNS) &&
1736 ((lun = softc->ctl_luns[targ_lun]) != NULL)) {
1737 /*
1738 * If the LUN is invalid, pretend that it doesn't exist.
1739 * It will go away as soon as all pending I/O has been
1740 * completed.
1741 */
1742 mtx_lock(&lun->lun_lock);
1743 if (lun->flags & CTL_LUN_DISABLED) {
1744 mtx_unlock(&lun->lun_lock);
1745 lun = NULL;
1746 }
1747 } else
1748 lun = NULL;
1749 if (lun == NULL) {
1750 /*
1751 * Why isn't LUN defined? The other side wouldn't
1752 * send a cmd if the LUN is undefined.
1753 */
1754 printf("%s: Bad JUJU!, LUN is NULL!\n", __func__);
1755
1609 /* "Logical unit not supported" */
1610 ctl_set_sense_data(&msg_info.scsi.sense_data,
1611 lun,
1612 /*sense_format*/SSD_TYPE_NONE,
1613 /*current_error*/ 1,
1614 /*sense_key*/ SSD_KEY_ILLEGAL_REQUEST,
1615 /*asc*/ 0x25,
1616 /*ascq*/ 0x00,
1617 SSD_ELEM_NONE);
1618
1619 msg_info.scsi.sense_len = SSD_FULL_SIZE;
1620 msg_info.scsi.scsi_status = SCSI_STATUS_CHECK_COND;
1621 msg_info.hdr.status = CTL_SCSI_ERROR | CTL_AUTOSENSE;
1756 ctl_set_unsupported_lun(ctsio);
1757 ctl_copy_sense_data_back((union ctl_io *)ctsio, &msg_info);
1758 msg_info.hdr.original_sc = ctsio->io_hdr.original_sc;
1759 msg_info.hdr.serializing_sc = NULL;
1760 msg_info.hdr.msg_type = CTL_MSG_BAD_JUJU;
1625 if (ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info,
1626 sizeof(msg_info), 0 ) > CTL_HA_STATUS_SUCCESS) {
1627 }
1761 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info,
1762 sizeof(msg_info.scsi), M_WAITOK);
1763 return(1);
1764 }
1765
1766 entry = ctl_get_cmd_entry(ctsio, NULL);
1767 if (ctl_scsiio_lun_check(lun, entry, ctsio) != 0) {
1768 mtx_unlock(&lun->lun_lock);
1769 ctl_copy_sense_data_back((union ctl_io *)ctsio, &msg_info);
1770 msg_info.hdr.original_sc = ctsio->io_hdr.original_sc;
1771 msg_info.hdr.serializing_sc = NULL;
1772 msg_info.hdr.msg_type = CTL_MSG_BAD_JUJU;
1773 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info,
1774 sizeof(msg_info.scsi), M_WAITOK);
1775 return(1);
1776 }
1777
1632 mtx_lock(&lun->lun_lock);
1633 TAILQ_INSERT_TAIL(&lun->ooa_queue, &ctsio->io_hdr, ooa_links);
1778 ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr = lun;
1779 ctsio->io_hdr.ctl_private[CTL_PRIV_BACKEND_LUN].ptr = lun->be_lun;
1780
1781 /*
1782 * Every I/O goes into the OOA queue for a
1783 * particular LUN, and stays there until completion.
1784 */
1785#ifdef CTL_TIME_IO
1786 if (TAILQ_EMPTY(&lun->ooa_queue))
1787 lun->idle_time += getsbinuptime() - lun->last_busy;
1788#endif
1789 TAILQ_INSERT_TAIL(&lun->ooa_queue, &ctsio->io_hdr, ooa_links);
1790
1791 switch (ctl_check_ooa(lun, (union ctl_io *)ctsio,
1792 (union ctl_io *)TAILQ_PREV(&ctsio->io_hdr, ctl_ooaq,
1793 ooa_links))) {
1794 case CTL_ACTION_BLOCK:
1795 ctsio->io_hdr.flags |= CTL_FLAG_BLOCKED;
1796 TAILQ_INSERT_TAIL(&lun->blocked_queue, &ctsio->io_hdr,
1797 blocked_links);
1798 mtx_unlock(&lun->lun_lock);
1799 break;
1800 case CTL_ACTION_PASS:
1801 case CTL_ACTION_SKIP:
1802 if (softc->ha_mode == CTL_HA_MODE_XFER) {
1803 ctsio->io_hdr.flags |= CTL_FLAG_IS_WAS_ON_RTR;
1804 ctl_enqueue_rtr((union ctl_io *)ctsio);
1805 mtx_unlock(&lun->lun_lock);
1806 } else {
1807 ctsio->io_hdr.flags &= ~CTL_FLAG_IO_ACTIVE;
1808 mtx_unlock(&lun->lun_lock);
1809
1810 /* send msg back to other side */
1811 msg_info.hdr.original_sc = ctsio->io_hdr.original_sc;
1812 msg_info.hdr.serializing_sc = (union ctl_io *)ctsio;
1813 msg_info.hdr.msg_type = CTL_MSG_R2R;
1654#if 0
1655 printf("2. pOrig %x\n", (int)msg_info.hdr.original_sc);
1656#endif
1657 if (ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info,
1658 sizeof(msg_info), 0 ) > CTL_HA_STATUS_SUCCESS) {
1659 }
1814 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info,
1815 sizeof(msg_info.hdr), M_WAITOK);
1816 }
1817 break;
1818 case CTL_ACTION_OVERLAP:
1663 /* OVERLAPPED COMMANDS ATTEMPTED */
1664 ctl_set_sense_data(&msg_info.scsi.sense_data,
1665 lun,
1666 /*sense_format*/SSD_TYPE_NONE,
1667 /*current_error*/ 1,
1668 /*sense_key*/ SSD_KEY_ILLEGAL_REQUEST,
1669 /*asc*/ 0x4E,
1670 /*ascq*/ 0x00,
1671 SSD_ELEM_NONE);
1819 TAILQ_REMOVE(&lun->ooa_queue, &ctsio->io_hdr, ooa_links);
1820 mtx_unlock(&lun->lun_lock);
1821 retval = 1;
1822
1673 msg_info.scsi.sense_len = SSD_FULL_SIZE;
1674 msg_info.scsi.scsi_status = SCSI_STATUS_CHECK_COND;
1675 msg_info.hdr.status = CTL_SCSI_ERROR | CTL_AUTOSENSE;
1823 ctl_set_overlapped_cmd(ctsio);
1824 ctl_copy_sense_data_back((union ctl_io *)ctsio, &msg_info);
1825 msg_info.hdr.original_sc = ctsio->io_hdr.original_sc;
1826 msg_info.hdr.serializing_sc = NULL;
1827 msg_info.hdr.msg_type = CTL_MSG_BAD_JUJU;
1679#if 0
1680 printf("BAD JUJU:Major Bummer Overlap\n");
1681#endif
1682 TAILQ_REMOVE(&lun->ooa_queue, &ctsio->io_hdr, ooa_links);
1683 retval = 1;
1684 if (ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info,
1685 sizeof(msg_info), 0 ) > CTL_HA_STATUS_SUCCESS) {
1686 }
1828 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info,
1829 sizeof(msg_info.scsi), M_WAITOK);
1830 break;
1831 case CTL_ACTION_OVERLAP_TAG:
1689 /* TAGGED OVERLAPPED COMMANDS (NN = QUEUE TAG) */
1690 ctl_set_sense_data(&msg_info.scsi.sense_data,
1691 lun,
1692 /*sense_format*/SSD_TYPE_NONE,
1693 /*current_error*/ 1,
1694 /*sense_key*/ SSD_KEY_ILLEGAL_REQUEST,
1695 /*asc*/ 0x4D,
1696 /*ascq*/ ctsio->tag_num & 0xff,
1697 SSD_ELEM_NONE);
1698
1699 msg_info.scsi.sense_len = SSD_FULL_SIZE;
1700 msg_info.scsi.scsi_status = SCSI_STATUS_CHECK_COND;
1701 msg_info.hdr.status = CTL_SCSI_ERROR | CTL_AUTOSENSE;
1832 TAILQ_REMOVE(&lun->ooa_queue, &ctsio->io_hdr, ooa_links);
1833 mtx_unlock(&lun->lun_lock);
1834 retval = 1;
1835 ctl_set_overlapped_tag(ctsio, ctsio->tag_num);
1836 ctl_copy_sense_data_back((union ctl_io *)ctsio, &msg_info);
1837 msg_info.hdr.original_sc = ctsio->io_hdr.original_sc;
1838 msg_info.hdr.serializing_sc = NULL;
1839 msg_info.hdr.msg_type = CTL_MSG_BAD_JUJU;
1705#if 0
1706 printf("BAD JUJU:Major Bummer Overlap Tag\n");
1707#endif
1708 TAILQ_REMOVE(&lun->ooa_queue, &ctsio->io_hdr, ooa_links);
1709 retval = 1;
1710 if (ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info,
1711 sizeof(msg_info), 0 ) > CTL_HA_STATUS_SUCCESS) {
1712 }
1840 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info,
1841 sizeof(msg_info.scsi), M_WAITOK);
1842 break;
1843 case CTL_ACTION_ERROR:
1844 default:
1716 /* "Internal target failure" */
1717 ctl_set_sense_data(&msg_info.scsi.sense_data,
1718 lun,
1719 /*sense_format*/SSD_TYPE_NONE,
1720 /*current_error*/ 1,
1721 /*sense_key*/ SSD_KEY_HARDWARE_ERROR,
1722 /*asc*/ 0x44,
1723 /*ascq*/ 0x00,
1724 SSD_ELEM_NONE);
1845 TAILQ_REMOVE(&lun->ooa_queue, &ctsio->io_hdr, ooa_links);
1846 mtx_unlock(&lun->lun_lock);
1847 retval = 1;
1848
1726 msg_info.scsi.sense_len = SSD_FULL_SIZE;
1727 msg_info.scsi.scsi_status = SCSI_STATUS_CHECK_COND;
1728 msg_info.hdr.status = CTL_SCSI_ERROR | CTL_AUTOSENSE;
1849 ctl_set_internal_failure(ctsio, /*sks_valid*/ 0,
1850 /*retry_count*/ 0);
1851 ctl_copy_sense_data_back((union ctl_io *)ctsio, &msg_info);
1852 msg_info.hdr.original_sc = ctsio->io_hdr.original_sc;
1853 msg_info.hdr.serializing_sc = NULL;
1854 msg_info.hdr.msg_type = CTL_MSG_BAD_JUJU;
1732#if 0
1733 printf("BAD JUJU:Major Bummer HW Error\n");
1734#endif
1735 TAILQ_REMOVE(&lun->ooa_queue, &ctsio->io_hdr, ooa_links);
1736 retval = 1;
1737 if (ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info,
1738 sizeof(msg_info), 0 ) > CTL_HA_STATUS_SUCCESS) {
1739 }
1855 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info,
1856 sizeof(msg_info.scsi), M_WAITOK);
1857 break;
1858 }
1742 mtx_unlock(&lun->lun_lock);
1859 return (retval);
1860}
1861
1862/*
1863 * Returns 0 for success, errno for failure.
1864 */
1865static int
1866ctl_ioctl_fill_ooa(struct ctl_lun *lun, uint32_t *cur_fill_num,
1867 struct ctl_ooa *ooa_hdr, struct ctl_ooa_entry *kern_entries)
1868{
1869 union ctl_io *io;
1870 int retval;
1871
1872 retval = 0;
1873
1874 mtx_lock(&lun->lun_lock);
1875 for (io = (union ctl_io *)TAILQ_FIRST(&lun->ooa_queue); (io != NULL);
1876 (*cur_fill_num)++, io = (union ctl_io *)TAILQ_NEXT(&io->io_hdr,
1877 ooa_links)) {
1878 struct ctl_ooa_entry *entry;
1879
1880 /*
1881 * If we've got more than we can fit, just count the
1882 * remaining entries.
1883 */
1884 if (*cur_fill_num >= ooa_hdr->alloc_num)
1885 continue;
1886
1887 entry = &kern_entries[*cur_fill_num];
1888
1889 entry->tag_num = io->scsiio.tag_num;
1890 entry->lun_num = lun->lun;
1891#ifdef CTL_TIME_IO
1892 entry->start_bt = io->io_hdr.start_bt;
1893#endif
1894 bcopy(io->scsiio.cdb, entry->cdb, io->scsiio.cdb_len);
1895 entry->cdb_len = io->scsiio.cdb_len;
1896 if (io->io_hdr.flags & CTL_FLAG_BLOCKED)
1897 entry->cmd_flags |= CTL_OOACMD_FLAG_BLOCKED;
1898
1899 if (io->io_hdr.flags & CTL_FLAG_DMA_INPROG)
1900 entry->cmd_flags |= CTL_OOACMD_FLAG_DMA;
1901
1902 if (io->io_hdr.flags & CTL_FLAG_ABORT)
1903 entry->cmd_flags |= CTL_OOACMD_FLAG_ABORT;
1904
1905 if (io->io_hdr.flags & CTL_FLAG_IS_WAS_ON_RTR)
1906 entry->cmd_flags |= CTL_OOACMD_FLAG_RTR;
1907
1908 if (io->io_hdr.flags & CTL_FLAG_DMA_QUEUED)
1909 entry->cmd_flags |= CTL_OOACMD_FLAG_DMA_QUEUED;
1910 }
1911 mtx_unlock(&lun->lun_lock);
1912
1913 return (retval);
1914}
1915
1916static void *
1917ctl_copyin_alloc(void *user_addr, int len, char *error_str,
1918 size_t error_str_len)
1919{
1920 void *kptr;
1921
1922 kptr = malloc(len, M_CTL, M_WAITOK | M_ZERO);
1923
1924 if (copyin(user_addr, kptr, len) != 0) {
1925 snprintf(error_str, error_str_len, "Error copying %d bytes "
1926 "from user address %p to kernel address %p", len,
1927 user_addr, kptr);
1928 free(kptr, M_CTL);
1929 return (NULL);
1930 }
1931
1932 return (kptr);
1933}
1934
1935static void
1936ctl_free_args(int num_args, struct ctl_be_arg *args)
1937{
1938 int i;
1939
1940 if (args == NULL)
1941 return;
1942
1943 for (i = 0; i < num_args; i++) {
1944 free(args[i].kname, M_CTL);
1945 free(args[i].kvalue, M_CTL);
1946 }
1947
1948 free(args, M_CTL);
1949}
1950
1951static struct ctl_be_arg *
1952ctl_copyin_args(int num_args, struct ctl_be_arg *uargs,
1953 char *error_str, size_t error_str_len)
1954{
1955 struct ctl_be_arg *args;
1956 int i;
1957
1958 args = ctl_copyin_alloc(uargs, num_args * sizeof(*args),
1959 error_str, error_str_len);
1960
1961 if (args == NULL)
1962 goto bailout;
1963
1964 for (i = 0; i < num_args; i++) {
1965 args[i].kname = NULL;
1966 args[i].kvalue = NULL;
1967 }
1968
1969 for (i = 0; i < num_args; i++) {
1970 uint8_t *tmpptr;
1971
1972 args[i].kname = ctl_copyin_alloc(args[i].name,
1973 args[i].namelen, error_str, error_str_len);
1974 if (args[i].kname == NULL)
1975 goto bailout;
1976
1977 if (args[i].kname[args[i].namelen - 1] != '\0') {
1978 snprintf(error_str, error_str_len, "Argument %d "
1979 "name is not NUL-terminated", i);
1980 goto bailout;
1981 }
1982
1983 if (args[i].flags & CTL_BEARG_RD) {
1984 tmpptr = ctl_copyin_alloc(args[i].value,
1985 args[i].vallen, error_str, error_str_len);
1986 if (tmpptr == NULL)
1987 goto bailout;
1988 if ((args[i].flags & CTL_BEARG_ASCII)
1989 && (tmpptr[args[i].vallen - 1] != '\0')) {
1990 snprintf(error_str, error_str_len, "Argument "
1991 "%d value is not NUL-terminated", i);
1992 goto bailout;
1993 }
1994 args[i].kvalue = tmpptr;
1995 } else {
1996 args[i].kvalue = malloc(args[i].vallen,
1997 M_CTL, M_WAITOK | M_ZERO);
1998 }
1999 }
2000
2001 return (args);
2002bailout:
2003
2004 ctl_free_args(num_args, args);
2005
2006 return (NULL);
2007}
2008
2009static void
2010ctl_copyout_args(int num_args, struct ctl_be_arg *args)
2011{
2012 int i;
2013
2014 for (i = 0; i < num_args; i++) {
2015 if (args[i].flags & CTL_BEARG_WR)
2016 copyout(args[i].kvalue, args[i].value, args[i].vallen);
2017 }
2018}
2019
2020/*
2021 * Escape characters that are illegal or not recommended in XML.
2022 */
2023int
2024ctl_sbuf_printf_esc(struct sbuf *sb, char *str, int size)
2025{
2026 char *end = str + size;
2027 int retval;
2028
2029 retval = 0;
2030
2031 for (; *str && str < end; str++) {
2032 switch (*str) {
2033 case '&':
2034 retval = sbuf_printf(sb, "&amp;");
2035 break;
2036 case '>':
2037 retval = sbuf_printf(sb, "&gt;");
2038 break;
2039 case '<':
2040 retval = sbuf_printf(sb, "&lt;");
2041 break;
2042 default:
2043 retval = sbuf_putc(sb, *str);
2044 break;
2045 }
2046
2047 if (retval != 0)
2048 break;
2049
2050 }
2051
2052 return (retval);
2053}
2054
2055static void
2056ctl_id_sbuf(struct ctl_devid *id, struct sbuf *sb)
2057{
2058 struct scsi_vpd_id_descriptor *desc;
2059 int i;
2060
2061 if (id == NULL || id->len < 4)
2062 return;
2063 desc = (struct scsi_vpd_id_descriptor *)id->data;
2064 switch (desc->id_type & SVPD_ID_TYPE_MASK) {
2065 case SVPD_ID_TYPE_T10:
2066 sbuf_printf(sb, "t10.");
2067 break;
2068 case SVPD_ID_TYPE_EUI64:
2069 sbuf_printf(sb, "eui.");
2070 break;
2071 case SVPD_ID_TYPE_NAA:
2072 sbuf_printf(sb, "naa.");
2073 break;
2074 case SVPD_ID_TYPE_SCSI_NAME:
2075 break;
2076 }
2077 switch (desc->proto_codeset & SVPD_ID_CODESET_MASK) {
2078 case SVPD_ID_CODESET_BINARY:
2079 for (i = 0; i < desc->length; i++)
2080 sbuf_printf(sb, "%02x", desc->identifier[i]);
2081 break;
2082 case SVPD_ID_CODESET_ASCII:
2083 sbuf_printf(sb, "%.*s", (int)desc->length,
2084 (char *)desc->identifier);
2085 break;
2086 case SVPD_ID_CODESET_UTF8:
2087 sbuf_printf(sb, "%s", (char *)desc->identifier);
2088 break;
2089 }
2090}
2091
2092static int
2093ctl_ioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flag,
2094 struct thread *td)
2095{
2096 struct ctl_softc *softc;
2097 int retval;
2098
2099 softc = control_softc;
2100
2101 retval = 0;
2102
2103 switch (cmd) {
2104 case CTL_IO:
2105 retval = ctl_ioctl_io(dev, cmd, addr, flag, td);
2106 break;
2107 case CTL_ENABLE_PORT:
2108 case CTL_DISABLE_PORT:
2109 case CTL_SET_PORT_WWNS: {
2110 struct ctl_port *port;
2111 struct ctl_port_entry *entry;
2112
2113 entry = (struct ctl_port_entry *)addr;
2114
2115 mtx_lock(&softc->ctl_lock);
2116 STAILQ_FOREACH(port, &softc->port_list, links) {
2117 int action, done;
2118
2119 if (port->targ_port < softc->port_min ||
2120 port->targ_port >= softc->port_max)
2121 continue;
2122
2123 action = 0;
2124 done = 0;
2005
2125 if ((entry->port_type == CTL_PORT_NONE)
2126 && (entry->targ_port == port->targ_port)) {
2127 /*
2128 * If the user only wants to enable or
2129 * disable or set WWNs on a specific port,
2130 * do the operation and we're done.
2131 */
2132 action = 1;
2133 done = 1;
2134 } else if (entry->port_type & port->port_type) {
2135 /*
2136 * Compare the user's type mask with the
2137 * particular frontend type to see if we
2138 * have a match.
2139 */
2140 action = 1;
2141 done = 0;
2142
2143 /*
2144 * Make sure the user isn't trying to set
2145 * WWNs on multiple ports at the same time.
2146 */
2147 if (cmd == CTL_SET_PORT_WWNS) {
2148 printf("%s: Can't set WWNs on "
2149 "multiple ports\n", __func__);
2150 retval = EINVAL;
2151 break;
2152 }
2153 }
2035 if (action != 0) {
2036 /*
2037 * XXX KDM we have to drop the lock here,
2038 * because the online/offline operations
2039 * can potentially block. We need to
2040 * reference count the frontends so they
2041 * can't go away,
2042 */
2043 mtx_unlock(&softc->ctl_lock);
2154 if (action == 0)
2155 continue;
2156
2045 if (cmd == CTL_ENABLE_PORT) {
2046 ctl_port_online(port);
2047 } else if (cmd == CTL_DISABLE_PORT) {
2048 ctl_port_offline(port);
2049 }
2050
2157 /*
2158 * XXX KDM we have to drop the lock here, because
2159 * the online/offline operations can potentially
2160 * block. We need to reference count the frontends
2161 * so they can't go away,
2162 */
2163 if (cmd == CTL_ENABLE_PORT) {
2164 mtx_unlock(&softc->ctl_lock);
2165 ctl_port_online(port);
2166 mtx_lock(&softc->ctl_lock);
2052
2053 if (cmd == CTL_SET_PORT_WWNS)
2054 ctl_port_set_wwns(port,
2055 (entry->flags & CTL_PORT_WWNN_VALID) ?
2056 1 : 0, entry->wwnn,
2057 (entry->flags & CTL_PORT_WWPN_VALID) ?
2058 1 : 0, entry->wwpn);
2167 } else if (cmd == CTL_DISABLE_PORT) {
2168 mtx_unlock(&softc->ctl_lock);
2169 ctl_port_offline(port);
2170 mtx_lock(&softc->ctl_lock);
2171 } else if (cmd == CTL_SET_PORT_WWNS) {
2172 ctl_port_set_wwns(port,
2173 (entry->flags & CTL_PORT_WWNN_VALID) ?
2174 1 : 0, entry->wwnn,
2175 (entry->flags & CTL_PORT_WWPN_VALID) ?
2176 1 : 0, entry->wwpn);
2177 }
2178 if (done != 0)
2179 break;
2180 }
2181 mtx_unlock(&softc->ctl_lock);
2182 break;
2183 }
2184 case CTL_GET_PORT_LIST: {
2185 struct ctl_port *port;
2186 struct ctl_port_list *list;
2187 int i;
2188
2189 list = (struct ctl_port_list *)addr;
2190
2191 if (list->alloc_len != (list->alloc_num *
2192 sizeof(struct ctl_port_entry))) {
2193 printf("%s: CTL_GET_PORT_LIST: alloc_len %u != "
2194 "alloc_num %u * sizeof(struct ctl_port_entry) "
2195 "%zu\n", __func__, list->alloc_len,
2196 list->alloc_num, sizeof(struct ctl_port_entry));
2197 retval = EINVAL;
2198 break;
2199 }
2200 list->fill_len = 0;
2201 list->fill_num = 0;
2202 list->dropped_num = 0;
2203 i = 0;
2204 mtx_lock(&softc->ctl_lock);
2205 STAILQ_FOREACH(port, &softc->port_list, links) {
2206 struct ctl_port_entry entry, *list_entry;
2207
2208 if (list->fill_num >= list->alloc_num) {
2209 list->dropped_num++;
2210 continue;
2211 }
2212
2213 entry.port_type = port->port_type;
2214 strlcpy(entry.port_name, port->port_name,
2215 sizeof(entry.port_name));
2216 entry.targ_port = port->targ_port;
2217 entry.physical_port = port->physical_port;
2218 entry.virtual_port = port->virtual_port;
2219 entry.wwnn = port->wwnn;
2220 entry.wwpn = port->wwpn;
2221 if (port->status & CTL_PORT_STATUS_ONLINE)
2222 entry.online = 1;
2223 else
2224 entry.online = 0;
2225
2226 list_entry = &list->entries[i];
2227
2228 retval = copyout(&entry, list_entry, sizeof(entry));
2229 if (retval != 0) {
2230 printf("%s: CTL_GET_PORT_LIST: copyout "
2231 "returned %d\n", __func__, retval);
2232 break;
2233 }
2234 i++;
2235 list->fill_num++;
2236 list->fill_len += sizeof(entry);
2237 }
2238 mtx_unlock(&softc->ctl_lock);
2239
2240 /*
2241 * If this is non-zero, we had a copyout fault, so there's
2242 * probably no point in attempting to set the status inside
2243 * the structure.
2244 */
2245 if (retval != 0)
2246 break;
2247
2248 if (list->dropped_num > 0)
2249 list->status = CTL_PORT_LIST_NEED_MORE_SPACE;
2250 else
2251 list->status = CTL_PORT_LIST_OK;
2252 break;
2253 }
2254 case CTL_DUMP_OOA: {
2255 struct ctl_lun *lun;
2256 union ctl_io *io;
2257 char printbuf[128];
2258 struct sbuf sb;
2259
2260 mtx_lock(&softc->ctl_lock);
2261 printf("Dumping OOA queues:\n");
2262 STAILQ_FOREACH(lun, &softc->lun_list, links) {
2263 mtx_lock(&lun->lun_lock);
2264 for (io = (union ctl_io *)TAILQ_FIRST(
2265 &lun->ooa_queue); io != NULL;
2266 io = (union ctl_io *)TAILQ_NEXT(&io->io_hdr,
2267 ooa_links)) {
2268 sbuf_new(&sb, printbuf, sizeof(printbuf),
2269 SBUF_FIXEDLEN);
2270 sbuf_printf(&sb, "LUN %jd tag 0x%04x%s%s%s%s: ",
2271 (intmax_t)lun->lun,
2272 io->scsiio.tag_num,
2273 (io->io_hdr.flags &
2274 CTL_FLAG_BLOCKED) ? "" : " BLOCKED",
2275 (io->io_hdr.flags &
2276 CTL_FLAG_DMA_INPROG) ? " DMA" : "",
2277 (io->io_hdr.flags &
2278 CTL_FLAG_ABORT) ? " ABORT" : "",
2279 (io->io_hdr.flags &
2280 CTL_FLAG_IS_WAS_ON_RTR) ? " RTR" : "");
2281 ctl_scsi_command_string(&io->scsiio, NULL, &sb);
2282 sbuf_finish(&sb);
2283 printf("%s\n", sbuf_data(&sb));
2284 }
2285 mtx_unlock(&lun->lun_lock);
2286 }
2287 printf("OOA queues dump done\n");
2288 mtx_unlock(&softc->ctl_lock);
2289 break;
2290 }
2291 case CTL_GET_OOA: {
2292 struct ctl_lun *lun;
2293 struct ctl_ooa *ooa_hdr;
2294 struct ctl_ooa_entry *entries;
2295 uint32_t cur_fill_num;
2296
2297 ooa_hdr = (struct ctl_ooa *)addr;
2298
2299 if ((ooa_hdr->alloc_len == 0)
2300 || (ooa_hdr->alloc_num == 0)) {
2301 printf("%s: CTL_GET_OOA: alloc len %u and alloc num %u "
2302 "must be non-zero\n", __func__,
2303 ooa_hdr->alloc_len, ooa_hdr->alloc_num);
2304 retval = EINVAL;
2305 break;
2306 }
2307
2308 if (ooa_hdr->alloc_len != (ooa_hdr->alloc_num *
2309 sizeof(struct ctl_ooa_entry))) {
2310 printf("%s: CTL_GET_OOA: alloc len %u must be alloc "
2311 "num %d * sizeof(struct ctl_ooa_entry) %zd\n",
2312 __func__, ooa_hdr->alloc_len,
2313 ooa_hdr->alloc_num,sizeof(struct ctl_ooa_entry));
2314 retval = EINVAL;
2315 break;
2316 }
2317
2318 entries = malloc(ooa_hdr->alloc_len, M_CTL, M_WAITOK | M_ZERO);
2319 if (entries == NULL) {
2320 printf("%s: could not allocate %d bytes for OOA "
2321 "dump\n", __func__, ooa_hdr->alloc_len);
2322 retval = ENOMEM;
2323 break;
2324 }
2325
2326 mtx_lock(&softc->ctl_lock);
2327 if (((ooa_hdr->flags & CTL_OOA_FLAG_ALL_LUNS) == 0)
2328 && ((ooa_hdr->lun_num >= CTL_MAX_LUNS)
2329 || (softc->ctl_luns[ooa_hdr->lun_num] == NULL))) {
2330 mtx_unlock(&softc->ctl_lock);
2331 free(entries, M_CTL);
2332 printf("%s: CTL_GET_OOA: invalid LUN %ju\n",
2333 __func__, (uintmax_t)ooa_hdr->lun_num);
2334 retval = EINVAL;
2335 break;
2336 }
2337
2338 cur_fill_num = 0;
2339
2340 if (ooa_hdr->flags & CTL_OOA_FLAG_ALL_LUNS) {
2341 STAILQ_FOREACH(lun, &softc->lun_list, links) {
2342 retval = ctl_ioctl_fill_ooa(lun, &cur_fill_num,
2343 ooa_hdr, entries);
2344 if (retval != 0)
2345 break;
2346 }
2347 if (retval != 0) {
2348 mtx_unlock(&softc->ctl_lock);
2349 free(entries, M_CTL);
2350 break;
2351 }
2352 } else {
2353 lun = softc->ctl_luns[ooa_hdr->lun_num];
2354
2355 retval = ctl_ioctl_fill_ooa(lun, &cur_fill_num,ooa_hdr,
2356 entries);
2357 }
2358 mtx_unlock(&softc->ctl_lock);
2359
2360 ooa_hdr->fill_num = min(cur_fill_num, ooa_hdr->alloc_num);
2361 ooa_hdr->fill_len = ooa_hdr->fill_num *
2362 sizeof(struct ctl_ooa_entry);
2363 retval = copyout(entries, ooa_hdr->entries, ooa_hdr->fill_len);
2364 if (retval != 0) {
2365 printf("%s: error copying out %d bytes for OOA dump\n",
2366 __func__, ooa_hdr->fill_len);
2367 }
2368
2369 getbintime(&ooa_hdr->cur_bt);
2370
2371 if (cur_fill_num > ooa_hdr->alloc_num) {
2372 ooa_hdr->dropped_num = cur_fill_num -ooa_hdr->alloc_num;
2373 ooa_hdr->status = CTL_OOA_NEED_MORE_SPACE;
2374 } else {
2375 ooa_hdr->dropped_num = 0;
2376 ooa_hdr->status = CTL_OOA_OK;
2377 }
2378
2379 free(entries, M_CTL);
2380 break;
2381 }
2382 case CTL_CHECK_OOA: {
2383 union ctl_io *io;
2384 struct ctl_lun *lun;
2385 struct ctl_ooa_info *ooa_info;
2386
2387
2388 ooa_info = (struct ctl_ooa_info *)addr;
2389
2390 if (ooa_info->lun_id >= CTL_MAX_LUNS) {
2391 ooa_info->status = CTL_OOA_INVALID_LUN;
2392 break;
2393 }
2394 mtx_lock(&softc->ctl_lock);
2395 lun = softc->ctl_luns[ooa_info->lun_id];
2396 if (lun == NULL) {
2397 mtx_unlock(&softc->ctl_lock);
2398 ooa_info->status = CTL_OOA_INVALID_LUN;
2399 break;
2400 }
2401 mtx_lock(&lun->lun_lock);
2402 mtx_unlock(&softc->ctl_lock);
2403 ooa_info->num_entries = 0;
2404 for (io = (union ctl_io *)TAILQ_FIRST(&lun->ooa_queue);
2405 io != NULL; io = (union ctl_io *)TAILQ_NEXT(
2406 &io->io_hdr, ooa_links)) {
2407 ooa_info->num_entries++;
2408 }
2409 mtx_unlock(&lun->lun_lock);
2410
2411 ooa_info->status = CTL_OOA_SUCCESS;
2412
2413 break;
2414 }
2415 case CTL_DELAY_IO: {
2416 struct ctl_io_delay_info *delay_info;
2417#ifdef CTL_IO_DELAY
2418 struct ctl_lun *lun;
2419#endif /* CTL_IO_DELAY */
2420
2421 delay_info = (struct ctl_io_delay_info *)addr;
2422
2423#ifdef CTL_IO_DELAY
2424 mtx_lock(&softc->ctl_lock);
2425
2426 if ((delay_info->lun_id >= CTL_MAX_LUNS)
2427 || (softc->ctl_luns[delay_info->lun_id] == NULL)) {
2428 delay_info->status = CTL_DELAY_STATUS_INVALID_LUN;
2429 } else {
2430 lun = softc->ctl_luns[delay_info->lun_id];
2431 mtx_lock(&lun->lun_lock);
2432
2433 delay_info->status = CTL_DELAY_STATUS_OK;
2434
2435 switch (delay_info->delay_type) {
2436 case CTL_DELAY_TYPE_CONT:
2437 break;
2438 case CTL_DELAY_TYPE_ONESHOT:
2439 break;
2440 default:
2441 delay_info->status =
2442 CTL_DELAY_STATUS_INVALID_TYPE;
2443 break;
2444 }
2445
2446 switch (delay_info->delay_loc) {
2447 case CTL_DELAY_LOC_DATAMOVE:
2448 lun->delay_info.datamove_type =
2449 delay_info->delay_type;
2450 lun->delay_info.datamove_delay =
2451 delay_info->delay_secs;
2452 break;
2453 case CTL_DELAY_LOC_DONE:
2454 lun->delay_info.done_type =
2455 delay_info->delay_type;
2456 lun->delay_info.done_delay =
2457 delay_info->delay_secs;
2458 break;
2459 default:
2460 delay_info->status =
2461 CTL_DELAY_STATUS_INVALID_LOC;
2462 break;
2463 }
2464 mtx_unlock(&lun->lun_lock);
2465 }
2466
2467 mtx_unlock(&softc->ctl_lock);
2468#else
2469 delay_info->status = CTL_DELAY_STATUS_NOT_IMPLEMENTED;
2470#endif /* CTL_IO_DELAY */
2471 break;
2472 }
2473 case CTL_REALSYNC_SET: {
2474 int *syncstate;
2475
2476 syncstate = (int *)addr;
2477
2478 mtx_lock(&softc->ctl_lock);
2479 switch (*syncstate) {
2480 case 0:
2481 softc->flags &= ~CTL_FLAG_REAL_SYNC;
2482 break;
2483 case 1:
2484 softc->flags |= CTL_FLAG_REAL_SYNC;
2485 break;
2486 default:
2487 retval = EINVAL;
2488 break;
2489 }
2490 mtx_unlock(&softc->ctl_lock);
2491 break;
2492 }
2493 case CTL_REALSYNC_GET: {
2494 int *syncstate;
2495
2496 syncstate = (int*)addr;
2497
2498 mtx_lock(&softc->ctl_lock);
2499 if (softc->flags & CTL_FLAG_REAL_SYNC)
2500 *syncstate = 1;
2501 else
2502 *syncstate = 0;
2503 mtx_unlock(&softc->ctl_lock);
2504
2505 break;
2506 }
2507 case CTL_SETSYNC:
2508 case CTL_GETSYNC: {
2509 struct ctl_sync_info *sync_info;
2510 struct ctl_lun *lun;
2511
2512 sync_info = (struct ctl_sync_info *)addr;
2513
2514 mtx_lock(&softc->ctl_lock);
2515 lun = softc->ctl_luns[sync_info->lun_id];
2516 if (lun == NULL) {
2517 mtx_unlock(&softc->ctl_lock);
2518 sync_info->status = CTL_GS_SYNC_NO_LUN;
2519 }
2520 /*
2521 * Get or set the sync interval. We're not bounds checking
2522 * in the set case, hopefully the user won't do something
2523 * silly.
2524 */
2525 mtx_lock(&lun->lun_lock);
2526 mtx_unlock(&softc->ctl_lock);
2527 if (cmd == CTL_GETSYNC)
2528 sync_info->sync_interval = lun->sync_interval;
2529 else
2530 lun->sync_interval = sync_info->sync_interval;
2531 mtx_unlock(&lun->lun_lock);
2532
2533 sync_info->status = CTL_GS_SYNC_OK;
2534
2535 break;
2536 }
2537 case CTL_GETSTATS: {
2538 struct ctl_stats *stats;
2539 struct ctl_lun *lun;
2540 int i;
2541
2542 stats = (struct ctl_stats *)addr;
2543
2544 if ((sizeof(struct ctl_lun_io_stats) * softc->num_luns) >
2545 stats->alloc_len) {
2546 stats->status = CTL_SS_NEED_MORE_SPACE;
2547 stats->num_luns = softc->num_luns;
2548 break;
2549 }
2550 /*
2551 * XXX KDM no locking here. If the LUN list changes,
2552 * things can blow up.
2553 */
2554 for (i = 0, lun = STAILQ_FIRST(&softc->lun_list); lun != NULL;
2555 i++, lun = STAILQ_NEXT(lun, links)) {
2556 retval = copyout(&lun->stats, &stats->lun_stats[i],
2557 sizeof(lun->stats));
2558 if (retval != 0)
2559 break;
2560 }
2561 stats->num_luns = softc->num_luns;
2562 stats->fill_len = sizeof(struct ctl_lun_io_stats) *
2563 softc->num_luns;
2564 stats->status = CTL_SS_OK;
2565#ifdef CTL_TIME_IO
2566 stats->flags = CTL_STATS_FLAG_TIME_VALID;
2567#else
2568 stats->flags = CTL_STATS_FLAG_NONE;
2569#endif
2570 getnanouptime(&stats->timestamp);
2571 break;
2572 }
2573 case CTL_ERROR_INJECT: {
2574 struct ctl_error_desc *err_desc, *new_err_desc;
2575 struct ctl_lun *lun;
2576
2577 err_desc = (struct ctl_error_desc *)addr;
2578
2579 new_err_desc = malloc(sizeof(*new_err_desc), M_CTL,
2580 M_WAITOK | M_ZERO);
2581 bcopy(err_desc, new_err_desc, sizeof(*new_err_desc));
2582
2583 mtx_lock(&softc->ctl_lock);
2584 lun = softc->ctl_luns[err_desc->lun_id];
2585 if (lun == NULL) {
2586 mtx_unlock(&softc->ctl_lock);
2587 free(new_err_desc, M_CTL);
2588 printf("%s: CTL_ERROR_INJECT: invalid LUN %ju\n",
2589 __func__, (uintmax_t)err_desc->lun_id);
2590 retval = EINVAL;
2591 break;
2592 }
2593 mtx_lock(&lun->lun_lock);
2594 mtx_unlock(&softc->ctl_lock);
2595
2596 /*
2597 * We could do some checking here to verify the validity
2598 * of the request, but given the complexity of error
2599 * injection requests, the checking logic would be fairly
2600 * complex.
2601 *
2602 * For now, if the request is invalid, it just won't get
2603 * executed and might get deleted.
2604 */
2605 STAILQ_INSERT_TAIL(&lun->error_list, new_err_desc, links);
2606
2607 /*
2608 * XXX KDM check to make sure the serial number is unique,
2609 * in case we somehow manage to wrap. That shouldn't
2610 * happen for a very long time, but it's the right thing to
2611 * do.
2612 */
2613 new_err_desc->serial = lun->error_serial;
2614 err_desc->serial = lun->error_serial;
2615 lun->error_serial++;
2616
2617 mtx_unlock(&lun->lun_lock);
2618 break;
2619 }
2620 case CTL_ERROR_INJECT_DELETE: {
2621 struct ctl_error_desc *delete_desc, *desc, *desc2;
2622 struct ctl_lun *lun;
2623 int delete_done;
2624
2625 delete_desc = (struct ctl_error_desc *)addr;
2626 delete_done = 0;
2627
2628 mtx_lock(&softc->ctl_lock);
2629 lun = softc->ctl_luns[delete_desc->lun_id];
2630 if (lun == NULL) {
2631 mtx_unlock(&softc->ctl_lock);
2632 printf("%s: CTL_ERROR_INJECT_DELETE: invalid LUN %ju\n",
2633 __func__, (uintmax_t)delete_desc->lun_id);
2634 retval = EINVAL;
2635 break;
2636 }
2637 mtx_lock(&lun->lun_lock);
2638 mtx_unlock(&softc->ctl_lock);
2639 STAILQ_FOREACH_SAFE(desc, &lun->error_list, links, desc2) {
2640 if (desc->serial != delete_desc->serial)
2641 continue;
2642
2643 STAILQ_REMOVE(&lun->error_list, desc, ctl_error_desc,
2644 links);
2645 free(desc, M_CTL);
2646 delete_done = 1;
2647 }
2648 mtx_unlock(&lun->lun_lock);
2649 if (delete_done == 0) {
2650 printf("%s: CTL_ERROR_INJECT_DELETE: can't find "
2651 "error serial %ju on LUN %u\n", __func__,
2652 delete_desc->serial, delete_desc->lun_id);
2653 retval = EINVAL;
2654 break;
2655 }
2656 break;
2657 }
2658 case CTL_DUMP_STRUCTS: {
2659 int i, j, k;
2660 struct ctl_port *port;
2661 struct ctl_frontend *fe;
2662
2663 mtx_lock(&softc->ctl_lock);
2664 printf("CTL Persistent Reservation information start:\n");
2665 for (i = 0; i < CTL_MAX_LUNS; i++) {
2666 struct ctl_lun *lun;
2667
2668 lun = softc->ctl_luns[i];
2669
2670 if ((lun == NULL)
2671 || ((lun->flags & CTL_LUN_DISABLED) != 0))
2672 continue;
2673
2556 for (j = 0; j < (CTL_MAX_PORTS * 2); j++) {
2674 for (j = 0; j < CTL_MAX_PORTS; j++) {
2675 if (lun->pr_keys[j] == NULL)
2676 continue;
2677 for (k = 0; k < CTL_MAX_INIT_PER_PORT; k++){
2678 if (lun->pr_keys[j][k] == 0)
2679 continue;
2680 printf(" LUN %d port %d iid %d key "
2681 "%#jx\n", i, j, k,
2682 (uintmax_t)lun->pr_keys[j][k]);
2683 }
2684 }
2685 }
2686 printf("CTL Persistent Reservation information end\n");
2687 printf("CTL Ports:\n");
2688 STAILQ_FOREACH(port, &softc->port_list, links) {
2689 printf(" Port %d '%s' Frontend '%s' Type %u pp %d vp %d WWNN "
2690 "%#jx WWPN %#jx\n", port->targ_port, port->port_name,
2691 port->frontend->name, port->port_type,
2692 port->physical_port, port->virtual_port,
2693 (uintmax_t)port->wwnn, (uintmax_t)port->wwpn);
2694 for (j = 0; j < CTL_MAX_INIT_PER_PORT; j++) {
2695 if (port->wwpn_iid[j].in_use == 0 &&
2696 port->wwpn_iid[j].wwpn == 0 &&
2697 port->wwpn_iid[j].name == NULL)
2698 continue;
2699
2700 printf(" iid %u use %d WWPN %#jx '%s'\n",
2701 j, port->wwpn_iid[j].in_use,
2702 (uintmax_t)port->wwpn_iid[j].wwpn,
2703 port->wwpn_iid[j].name);
2704 }
2705 }
2706 printf("CTL Port information end\n");
2707 mtx_unlock(&softc->ctl_lock);
2708 /*
2709 * XXX KDM calling this without a lock. We'd likely want
2710 * to drop the lock before calling the frontend's dump
2711 * routine anyway.
2712 */
2713 printf("CTL Frontends:\n");
2714 STAILQ_FOREACH(fe, &softc->fe_list, links) {
2715 printf(" Frontend '%s'\n", fe->name);
2716 if (fe->fe_dump != NULL)
2717 fe->fe_dump();
2718 }
2719 printf("CTL Frontend information end\n");
2720 break;
2721 }
2722 case CTL_LUN_REQ: {
2723 struct ctl_lun_req *lun_req;
2724 struct ctl_backend_driver *backend;
2725
2726 lun_req = (struct ctl_lun_req *)addr;
2727
2728 backend = ctl_backend_find(lun_req->backend);
2729 if (backend == NULL) {
2730 lun_req->status = CTL_LUN_ERROR;
2731 snprintf(lun_req->error_str,
2732 sizeof(lun_req->error_str),
2733 "Backend \"%s\" not found.",
2734 lun_req->backend);
2735 break;
2736 }
2737 if (lun_req->num_be_args > 0) {
2738 lun_req->kern_be_args = ctl_copyin_args(
2739 lun_req->num_be_args,
2740 lun_req->be_args,
2741 lun_req->error_str,
2742 sizeof(lun_req->error_str));
2743 if (lun_req->kern_be_args == NULL) {
2744 lun_req->status = CTL_LUN_ERROR;
2745 break;
2746 }
2747 }
2748
2749 retval = backend->ioctl(dev, cmd, addr, flag, td);
2750
2751 if (lun_req->num_be_args > 0) {
2752 ctl_copyout_args(lun_req->num_be_args,
2753 lun_req->kern_be_args);
2754 ctl_free_args(lun_req->num_be_args,
2755 lun_req->kern_be_args);
2756 }
2757 break;
2758 }
2759 case CTL_LUN_LIST: {
2760 struct sbuf *sb;
2761 struct ctl_lun *lun;
2762 struct ctl_lun_list *list;
2763 struct ctl_option *opt;
2764
2765 list = (struct ctl_lun_list *)addr;
2766
2767 /*
2768 * Allocate a fixed length sbuf here, based on the length
2769 * of the user's buffer. We could allocate an auto-extending
2770 * buffer, and then tell the user how much larger our
2771 * amount of data is than his buffer, but that presents
2772 * some problems:
2773 *
2774 * 1. The sbuf(9) routines use a blocking malloc, and so
2775 * we can't hold a lock while calling them with an
2776 * auto-extending buffer.
2777 *
2778 * 2. There is not currently a LUN reference counting
2779 * mechanism, outside of outstanding transactions on
2780 * the LUN's OOA queue. So a LUN could go away on us
2781 * while we're getting the LUN number, backend-specific
2782 * information, etc. Thus, given the way things
2783 * currently work, we need to hold the CTL lock while
2784 * grabbing LUN information.
2785 *
2786 * So, from the user's standpoint, the best thing to do is
2787 * allocate what he thinks is a reasonable buffer length,
2788 * and then if he gets a CTL_LUN_LIST_NEED_MORE_SPACE error,
2789 * double the buffer length and try again. (And repeat
2790 * that until he succeeds.)
2791 */
2792 sb = sbuf_new(NULL, NULL, list->alloc_len, SBUF_FIXEDLEN);
2793 if (sb == NULL) {
2794 list->status = CTL_LUN_LIST_ERROR;
2795 snprintf(list->error_str, sizeof(list->error_str),
2796 "Unable to allocate %d bytes for LUN list",
2797 list->alloc_len);
2798 break;
2799 }
2800
2801 sbuf_printf(sb, "<ctllunlist>\n");
2802
2803 mtx_lock(&softc->ctl_lock);
2804 STAILQ_FOREACH(lun, &softc->lun_list, links) {
2805 mtx_lock(&lun->lun_lock);
2806 retval = sbuf_printf(sb, "<lun id=\"%ju\">\n",
2807 (uintmax_t)lun->lun);
2808
2809 /*
2810 * Bail out as soon as we see that we've overfilled
2811 * the buffer.
2812 */
2813 if (retval != 0)
2814 break;
2815
2816 retval = sbuf_printf(sb, "\t<backend_type>%s"
2817 "</backend_type>\n",
2818 (lun->backend == NULL) ? "none" :
2819 lun->backend->name);
2820
2821 if (retval != 0)
2822 break;
2823
2824 retval = sbuf_printf(sb, "\t<lun_type>%d</lun_type>\n",
2825 lun->be_lun->lun_type);
2826
2827 if (retval != 0)
2828 break;
2829
2830 if (lun->backend == NULL) {
2831 retval = sbuf_printf(sb, "</lun>\n");
2832 if (retval != 0)
2833 break;
2834 continue;
2835 }
2836
2837 retval = sbuf_printf(sb, "\t<size>%ju</size>\n",
2838 (lun->be_lun->maxlba > 0) ?
2839 lun->be_lun->maxlba + 1 : 0);
2840
2841 if (retval != 0)
2842 break;
2843
2844 retval = sbuf_printf(sb, "\t<blocksize>%u</blocksize>\n",
2845 lun->be_lun->blocksize);
2846
2847 if (retval != 0)
2848 break;
2849
2850 retval = sbuf_printf(sb, "\t<serial_number>");
2851
2852 if (retval != 0)
2853 break;
2854
2855 retval = ctl_sbuf_printf_esc(sb,
2856 lun->be_lun->serial_num,
2857 sizeof(lun->be_lun->serial_num));
2858
2859 if (retval != 0)
2860 break;
2861
2862 retval = sbuf_printf(sb, "</serial_number>\n");
2863
2864 if (retval != 0)
2865 break;
2866
2867 retval = sbuf_printf(sb, "\t<device_id>");
2868
2869 if (retval != 0)
2870 break;
2871
2872 retval = ctl_sbuf_printf_esc(sb,
2873 lun->be_lun->device_id,
2874 sizeof(lun->be_lun->device_id));
2875
2876 if (retval != 0)
2877 break;
2878
2879 retval = sbuf_printf(sb, "</device_id>\n");
2880
2881 if (retval != 0)
2882 break;
2883
2884 if (lun->backend->lun_info != NULL) {
2885 retval = lun->backend->lun_info(lun->be_lun->be_lun, sb);
2886 if (retval != 0)
2887 break;
2888 }
2889 STAILQ_FOREACH(opt, &lun->be_lun->options, links) {
2890 retval = sbuf_printf(sb, "\t<%s>%s</%s>\n",
2891 opt->name, opt->value, opt->name);
2892 if (retval != 0)
2893 break;
2894 }
2895
2896 retval = sbuf_printf(sb, "</lun>\n");
2897
2898 if (retval != 0)
2899 break;
2900 mtx_unlock(&lun->lun_lock);
2901 }
2902 if (lun != NULL)
2903 mtx_unlock(&lun->lun_lock);
2904 mtx_unlock(&softc->ctl_lock);
2905
2906 if ((retval != 0)
2907 || ((retval = sbuf_printf(sb, "</ctllunlist>\n")) != 0)) {
2908 retval = 0;
2909 sbuf_delete(sb);
2910 list->status = CTL_LUN_LIST_NEED_MORE_SPACE;
2911 snprintf(list->error_str, sizeof(list->error_str),
2912 "Out of space, %d bytes is too small",
2913 list->alloc_len);
2914 break;
2915 }
2916
2917 sbuf_finish(sb);
2918
2919 retval = copyout(sbuf_data(sb), list->lun_xml,
2920 sbuf_len(sb) + 1);
2921
2922 list->fill_len = sbuf_len(sb) + 1;
2923 list->status = CTL_LUN_LIST_OK;
2924 sbuf_delete(sb);
2925 break;
2926 }
2927 case CTL_ISCSI: {
2928 struct ctl_iscsi *ci;
2929 struct ctl_frontend *fe;
2930
2931 ci = (struct ctl_iscsi *)addr;
2932
2933 fe = ctl_frontend_find("iscsi");
2934 if (fe == NULL) {
2935 ci->status = CTL_ISCSI_ERROR;
2936 snprintf(ci->error_str, sizeof(ci->error_str),
2937 "Frontend \"iscsi\" not found.");
2938 break;
2939 }
2940
2941 retval = fe->ioctl(dev, cmd, addr, flag, td);
2942 break;
2943 }
2944 case CTL_PORT_REQ: {
2945 struct ctl_req *req;
2946 struct ctl_frontend *fe;
2947
2948 req = (struct ctl_req *)addr;
2949
2950 fe = ctl_frontend_find(req->driver);
2951 if (fe == NULL) {
2952 req->status = CTL_LUN_ERROR;
2953 snprintf(req->error_str, sizeof(req->error_str),
2954 "Frontend \"%s\" not found.", req->driver);
2955 break;
2956 }
2957 if (req->num_args > 0) {
2958 req->kern_args = ctl_copyin_args(req->num_args,
2959 req->args, req->error_str, sizeof(req->error_str));
2960 if (req->kern_args == NULL) {
2961 req->status = CTL_LUN_ERROR;
2962 break;
2963 }
2964 }
2965
2848 retval = fe->ioctl(dev, cmd, addr, flag, td);
2966 if (fe->ioctl)
2967 retval = fe->ioctl(dev, cmd, addr, flag, td);
2968 else
2969 retval = ENODEV;
2970
2971 if (req->num_args > 0) {
2972 ctl_copyout_args(req->num_args, req->kern_args);
2973 ctl_free_args(req->num_args, req->kern_args);
2974 }
2975 break;
2976 }
2977 case CTL_PORT_LIST: {
2978 struct sbuf *sb;
2979 struct ctl_port *port;
2980 struct ctl_lun_list *list;
2981 struct ctl_option *opt;
2982 int j;
2983 uint32_t plun;
2984
2985 list = (struct ctl_lun_list *)addr;
2986
2987 sb = sbuf_new(NULL, NULL, list->alloc_len, SBUF_FIXEDLEN);
2988 if (sb == NULL) {
2989 list->status = CTL_LUN_LIST_ERROR;
2990 snprintf(list->error_str, sizeof(list->error_str),
2991 "Unable to allocate %d bytes for LUN list",
2992 list->alloc_len);
2993 break;
2994 }
2995
2996 sbuf_printf(sb, "<ctlportlist>\n");
2997
2998 mtx_lock(&softc->ctl_lock);
2999 STAILQ_FOREACH(port, &softc->port_list, links) {
3000 retval = sbuf_printf(sb, "<targ_port id=\"%ju\">\n",
3001 (uintmax_t)port->targ_port);
3002
3003 /*
3004 * Bail out as soon as we see that we've overfilled
3005 * the buffer.
3006 */
3007 if (retval != 0)
3008 break;
3009
3010 retval = sbuf_printf(sb, "\t<frontend_type>%s"
3011 "</frontend_type>\n", port->frontend->name);
3012 if (retval != 0)
3013 break;
3014
3015 retval = sbuf_printf(sb, "\t<port_type>%d</port_type>\n",
3016 port->port_type);
3017 if (retval != 0)
3018 break;
3019
3020 retval = sbuf_printf(sb, "\t<online>%s</online>\n",
3021 (port->status & CTL_PORT_STATUS_ONLINE) ? "YES" : "NO");
3022 if (retval != 0)
3023 break;
3024
3025 retval = sbuf_printf(sb, "\t<port_name>%s</port_name>\n",
3026 port->port_name);
3027 if (retval != 0)
3028 break;
3029
3030 retval = sbuf_printf(sb, "\t<physical_port>%d</physical_port>\n",
3031 port->physical_port);
3032 if (retval != 0)
3033 break;
3034
3035 retval = sbuf_printf(sb, "\t<virtual_port>%d</virtual_port>\n",
3036 port->virtual_port);
3037 if (retval != 0)
3038 break;
3039
3040 if (port->target_devid != NULL) {
3041 sbuf_printf(sb, "\t<target>");
3042 ctl_id_sbuf(port->target_devid, sb);
3043 sbuf_printf(sb, "</target>\n");
3044 }
3045
3046 if (port->port_devid != NULL) {
3047 sbuf_printf(sb, "\t<port>");
3048 ctl_id_sbuf(port->port_devid, sb);
3049 sbuf_printf(sb, "</port>\n");
3050 }
3051
3052 if (port->port_info != NULL) {
3053 retval = port->port_info(port->onoff_arg, sb);
3054 if (retval != 0)
3055 break;
3056 }
3057 STAILQ_FOREACH(opt, &port->options, links) {
3058 retval = sbuf_printf(sb, "\t<%s>%s</%s>\n",
3059 opt->name, opt->value, opt->name);
3060 if (retval != 0)
3061 break;
3062 }
3063
3064 if (port->lun_map != NULL) {
3065 sbuf_printf(sb, "\t<lun_map>on</lun_map>\n");
3066 for (j = 0; j < CTL_MAX_LUNS; j++) {
3067 plun = ctl_lun_map_from_port(port, j);
3068 if (plun >= CTL_MAX_LUNS)
3069 continue;
3070 sbuf_printf(sb,
3071 "\t<lun id=\"%u\">%u</lun>\n",
3072 j, plun);
3073 }
3074 }
3075
3076 for (j = 0; j < CTL_MAX_INIT_PER_PORT; j++) {
3077 if (port->wwpn_iid[j].in_use == 0 ||
3078 (port->wwpn_iid[j].wwpn == 0 &&
3079 port->wwpn_iid[j].name == NULL))
3080 continue;
3081
3082 if (port->wwpn_iid[j].name != NULL)
3083 retval = sbuf_printf(sb,
3084 "\t<initiator id=\"%u\">%s</initiator>\n",
3085 j, port->wwpn_iid[j].name);
3086 else
3087 retval = sbuf_printf(sb,
3088 "\t<initiator id=\"%u\">naa.%08jx</initiator>\n",
3089 j, port->wwpn_iid[j].wwpn);
3090 if (retval != 0)
3091 break;
3092 }
3093 if (retval != 0)
3094 break;
3095
3096 retval = sbuf_printf(sb, "</targ_port>\n");
3097 if (retval != 0)
3098 break;
3099 }
3100 mtx_unlock(&softc->ctl_lock);
3101
3102 if ((retval != 0)
3103 || ((retval = sbuf_printf(sb, "</ctlportlist>\n")) != 0)) {
3104 retval = 0;
3105 sbuf_delete(sb);
3106 list->status = CTL_LUN_LIST_NEED_MORE_SPACE;
3107 snprintf(list->error_str, sizeof(list->error_str),
3108 "Out of space, %d bytes is too small",
3109 list->alloc_len);
3110 break;
3111 }
3112
3113 sbuf_finish(sb);
3114
3115 retval = copyout(sbuf_data(sb), list->lun_xml,
3116 sbuf_len(sb) + 1);
3117
3118 list->fill_len = sbuf_len(sb) + 1;
3119 list->status = CTL_LUN_LIST_OK;
3120 sbuf_delete(sb);
3121 break;
3122 }
3123 case CTL_LUN_MAP: {
3124 struct ctl_lun_map *lm = (struct ctl_lun_map *)addr;
3125 struct ctl_port *port;
3126
3127 mtx_lock(&softc->ctl_lock);
3007 if (lm->port >= CTL_MAX_PORTS ||
3128 if (lm->port < softc->port_min ||
3129 lm->port >= softc->port_max ||
3130 (port = softc->ctl_ports[lm->port]) == NULL) {
3131 mtx_unlock(&softc->ctl_lock);
3132 return (ENXIO);
3133 }
3134 mtx_unlock(&softc->ctl_lock); // XXX: port_enable sleeps
3135 if (lm->plun < CTL_MAX_LUNS) {
3136 if (lm->lun == UINT32_MAX)
3137 retval = ctl_lun_map_unset(port, lm->plun);
3138 else if (lm->lun < CTL_MAX_LUNS &&
3139 softc->ctl_luns[lm->lun] != NULL)
3140 retval = ctl_lun_map_set(port, lm->plun, lm->lun);
3141 else
3142 return (ENXIO);
3143 } else if (lm->plun == UINT32_MAX) {
3144 if (lm->lun == UINT32_MAX)
3145 retval = ctl_lun_map_deinit(port);
3146 else
3147 retval = ctl_lun_map_init(port);
3148 } else
3149 return (ENXIO);
3150 break;
3151 }
3152 default: {
3153 /* XXX KDM should we fix this? */
3154#if 0
3155 struct ctl_backend_driver *backend;
3156 unsigned int type;
3157 int found;
3158
3159 found = 0;
3160
3161 /*
3162 * We encode the backend type as the ioctl type for backend
3163 * ioctls. So parse it out here, and then search for a
3164 * backend of this type.
3165 */
3166 type = _IOC_TYPE(cmd);
3167
3168 STAILQ_FOREACH(backend, &softc->be_list, links) {
3169 if (backend->type == type) {
3170 found = 1;
3171 break;
3172 }
3173 }
3174 if (found == 0) {
3175 printf("ctl: unknown ioctl command %#lx or backend "
3176 "%d\n", cmd, type);
3177 retval = EINVAL;
3178 break;
3179 }
3180 retval = backend->ioctl(dev, cmd, addr, flag, td);
3181#endif
3182 retval = ENOTTY;
3183 break;
3184 }
3185 }
3186 return (retval);
3187}
3188
3189uint32_t
3190ctl_get_initindex(struct ctl_nexus *nexus)
3191{
3070 if (nexus->targ_port < CTL_MAX_PORTS)
3071 return (nexus->initid +
3072 (nexus->targ_port * CTL_MAX_INIT_PER_PORT));
3073 else
3074 return (nexus->initid +
3075 ((nexus->targ_port - CTL_MAX_PORTS) *
3076 CTL_MAX_INIT_PER_PORT));
3077}
3078
3079uint32_t
3080ctl_get_resindex(struct ctl_nexus *nexus)
3081{
3192 return (nexus->initid + (nexus->targ_port * CTL_MAX_INIT_PER_PORT));
3193}
3194
3085uint32_t
3086ctl_port_idx(int port_num)
3087{
3088 if (port_num < CTL_MAX_PORTS)
3089 return(port_num);
3090 else
3091 return(port_num - CTL_MAX_PORTS);
3092}
3093
3195int
3196ctl_lun_map_init(struct ctl_port *port)
3197{
3198 struct ctl_softc *softc = control_softc;
3199 struct ctl_lun *lun;
3200 uint32_t i;
3201
3202 if (port->lun_map == NULL)
3203 port->lun_map = malloc(sizeof(uint32_t) * CTL_MAX_LUNS,
3204 M_CTL, M_NOWAIT);
3205 if (port->lun_map == NULL)
3206 return (ENOMEM);
3207 for (i = 0; i < CTL_MAX_LUNS; i++)
3208 port->lun_map[i] = UINT32_MAX;
3108 if (port->status & CTL_PORT_STATUS_ONLINE &&
3109 port->lun_disable != NULL) {
3110 STAILQ_FOREACH(lun, &softc->lun_list, links)
3111 port->lun_disable(port->targ_lun_arg, lun->lun);
3209 if (port->status & CTL_PORT_STATUS_ONLINE) {
3210 if (port->lun_disable != NULL) {
3211 STAILQ_FOREACH(lun, &softc->lun_list, links)
3212 port->lun_disable(port->targ_lun_arg, lun->lun);
3213 }
3214 ctl_isc_announce_port(port);
3215 }
3216 return (0);
3217}
3218
3219int
3220ctl_lun_map_deinit(struct ctl_port *port)
3221{
3222 struct ctl_softc *softc = control_softc;
3223 struct ctl_lun *lun;
3224
3225 if (port->lun_map == NULL)
3226 return (0);
3227 free(port->lun_map, M_CTL);
3228 port->lun_map = NULL;
3126 if (port->status & CTL_PORT_STATUS_ONLINE &&
3127 port->lun_enable != NULL) {
3128 STAILQ_FOREACH(lun, &softc->lun_list, links)
3129 port->lun_enable(port->targ_lun_arg, lun->lun);
3229 if (port->status & CTL_PORT_STATUS_ONLINE) {
3230 if (port->lun_enable != NULL) {
3231 STAILQ_FOREACH(lun, &softc->lun_list, links)
3232 port->lun_enable(port->targ_lun_arg, lun->lun);
3233 }
3234 ctl_isc_announce_port(port);
3235 }
3236 return (0);
3237}
3238
3239int
3240ctl_lun_map_set(struct ctl_port *port, uint32_t plun, uint32_t glun)
3241{
3242 int status;
3243 uint32_t old;
3244
3245 if (port->lun_map == NULL) {
3246 status = ctl_lun_map_init(port);
3247 if (status != 0)
3248 return (status);
3249 }
3250 old = port->lun_map[plun];
3251 port->lun_map[plun] = glun;
3147 if ((port->status & CTL_PORT_STATUS_ONLINE) && old >= CTL_MAX_LUNS &&
3148 port->lun_enable != NULL)
3149 port->lun_enable(port->targ_lun_arg, plun);
3252 if ((port->status & CTL_PORT_STATUS_ONLINE) && old >= CTL_MAX_LUNS) {
3253 if (port->lun_enable != NULL)
3254 port->lun_enable(port->targ_lun_arg, plun);
3255 ctl_isc_announce_port(port);
3256 }
3257 return (0);
3258}
3259
3260int
3261ctl_lun_map_unset(struct ctl_port *port, uint32_t plun)
3262{
3263 uint32_t old;
3264
3265 if (port->lun_map == NULL)
3266 return (0);
3267 old = port->lun_map[plun];
3268 port->lun_map[plun] = UINT32_MAX;
3162 if ((port->status & CTL_PORT_STATUS_ONLINE) && old < CTL_MAX_LUNS &&
3163 port->lun_disable != NULL)
3164 port->lun_disable(port->targ_lun_arg, plun);
3269 if ((port->status & CTL_PORT_STATUS_ONLINE) && old < CTL_MAX_LUNS) {
3270 if (port->lun_disable != NULL)
3271 port->lun_disable(port->targ_lun_arg, plun);
3272 ctl_isc_announce_port(port);
3273 }
3274 return (0);
3275}
3276
3277uint32_t
3278ctl_lun_map_from_port(struct ctl_port *port, uint32_t lun_id)
3279{
3280
3281 if (port == NULL)
3282 return (UINT32_MAX);
3283 if (port->lun_map == NULL || lun_id >= CTL_MAX_LUNS)
3284 return (lun_id);
3285 return (port->lun_map[lun_id]);
3286}
3287
3288uint32_t
3289ctl_lun_map_to_port(struct ctl_port *port, uint32_t lun_id)
3290{
3291 uint32_t i;
3292
3293 if (port == NULL)
3294 return (UINT32_MAX);
3295 if (port->lun_map == NULL)
3296 return (lun_id);
3297 for (i = 0; i < CTL_MAX_LUNS; i++) {
3298 if (port->lun_map[i] == lun_id)
3299 return (i);
3300 }
3301 return (UINT32_MAX);
3302}
3303
3304static struct ctl_port *
3305ctl_io_port(struct ctl_io_hdr *io_hdr)
3306{
3198 int port_num;
3307
3200 port_num = io_hdr->nexus.targ_port;
3201 return (control_softc->ctl_ports[ctl_port_idx(port_num)]);
3308 return (control_softc->ctl_ports[io_hdr->nexus.targ_port]);
3309}
3310
3204/*
3205 * Note: This only works for bitmask sizes that are at least 32 bits, and
3206 * that are a power of 2.
3207 */
3311int
3209ctl_ffz(uint32_t *mask, uint32_t size)
3312ctl_ffz(uint32_t *mask, uint32_t first, uint32_t last)
3313{
3211 uint32_t num_chunks, num_pieces;
3212 int i, j;
3314 int i;
3315
3214 num_chunks = (size >> 5);
3215 if (num_chunks == 0)
3216 num_chunks++;
3217 num_pieces = MIN((sizeof(uint32_t) * 8), size);
3218
3219 for (i = 0; i < num_chunks; i++) {
3220 for (j = 0; j < num_pieces; j++) {
3221 if ((mask[i] & (1 << j)) == 0)
3222 return ((i << 5) + j);
3223 }
3316 for (i = first; i < last; i++) {
3317 if ((mask[i / 32] & (1 << (i % 32))) == 0)
3318 return (i);
3319 }
3225
3320 return (-1);
3321}
3322
3323int
3324ctl_set_mask(uint32_t *mask, uint32_t bit)
3325{
3326 uint32_t chunk, piece;
3327
3328 chunk = bit >> 5;
3329 piece = bit % (sizeof(uint32_t) * 8);
3330
3331 if ((mask[chunk] & (1 << piece)) != 0)
3332 return (-1);
3333 else
3334 mask[chunk] |= (1 << piece);
3335
3336 return (0);
3337}
3338
3339int
3340ctl_clear_mask(uint32_t *mask, uint32_t bit)
3341{
3342 uint32_t chunk, piece;
3343
3344 chunk = bit >> 5;
3345 piece = bit % (sizeof(uint32_t) * 8);
3346
3347 if ((mask[chunk] & (1 << piece)) == 0)
3348 return (-1);
3349 else
3350 mask[chunk] &= ~(1 << piece);
3351
3352 return (0);
3353}
3354
3355int
3356ctl_is_set(uint32_t *mask, uint32_t bit)
3357{
3358 uint32_t chunk, piece;
3359
3360 chunk = bit >> 5;
3361 piece = bit % (sizeof(uint32_t) * 8);
3362
3363 if ((mask[chunk] & (1 << piece)) == 0)
3364 return (0);
3365 else
3366 return (1);
3367}
3368
3369static uint64_t
3370ctl_get_prkey(struct ctl_lun *lun, uint32_t residx)
3371{
3372 uint64_t *t;
3373
3374 t = lun->pr_keys[residx/CTL_MAX_INIT_PER_PORT];
3375 if (t == NULL)
3376 return (0);
3377 return (t[residx % CTL_MAX_INIT_PER_PORT]);
3378}
3379
3380static void
3381ctl_clr_prkey(struct ctl_lun *lun, uint32_t residx)
3382{
3383 uint64_t *t;
3384
3385 t = lun->pr_keys[residx/CTL_MAX_INIT_PER_PORT];
3386 if (t == NULL)
3387 return;
3388 t[residx % CTL_MAX_INIT_PER_PORT] = 0;
3389}
3390
3391static void
3392ctl_alloc_prkey(struct ctl_lun *lun, uint32_t residx)
3393{
3394 uint64_t *p;
3395 u_int i;
3396
3397 i = residx/CTL_MAX_INIT_PER_PORT;
3398 if (lun->pr_keys[i] != NULL)
3399 return;
3400 mtx_unlock(&lun->lun_lock);
3401 p = malloc(sizeof(uint64_t) * CTL_MAX_INIT_PER_PORT, M_CTL,
3402 M_WAITOK | M_ZERO);
3403 mtx_lock(&lun->lun_lock);
3404 if (lun->pr_keys[i] == NULL)
3405 lun->pr_keys[i] = p;
3406 else
3407 free(p, M_CTL);
3408}
3409
3410static void
3411ctl_set_prkey(struct ctl_lun *lun, uint32_t residx, uint64_t key)
3412{
3413 uint64_t *t;
3414
3415 t = lun->pr_keys[residx/CTL_MAX_INIT_PER_PORT];
3416 KASSERT(t != NULL, ("prkey %d is not allocated", residx));
3417 t[residx % CTL_MAX_INIT_PER_PORT] = key;
3418}
3419
3420/*
3421 * ctl_softc, pool_name, total_ctl_io are passed in.
3422 * npool is passed out.
3423 */
3424int
3425ctl_pool_create(struct ctl_softc *ctl_softc, const char *pool_name,
3426 uint32_t total_ctl_io, void **npool)
3427{
3428#ifdef IO_POOLS
3429 struct ctl_io_pool *pool;
3430
3431 pool = (struct ctl_io_pool *)malloc(sizeof(*pool), M_CTL,
3432 M_NOWAIT | M_ZERO);
3433 if (pool == NULL)
3434 return (ENOMEM);
3435
3436 snprintf(pool->name, sizeof(pool->name), "CTL IO %s", pool_name);
3437 pool->ctl_softc = ctl_softc;
3438 pool->zone = uma_zsecond_create(pool->name, NULL,
3439 NULL, NULL, NULL, ctl_softc->io_zone);
3440 /* uma_prealloc(pool->zone, total_ctl_io); */
3441
3442 *npool = pool;
3443#else
3444 *npool = ctl_softc->io_zone;
3445#endif
3446 return (0);
3447}
3448
3449void
3450ctl_pool_free(struct ctl_io_pool *pool)
3451{
3452
3453 if (pool == NULL)
3454 return;
3455
3456#ifdef IO_POOLS
3457 uma_zdestroy(pool->zone);
3458 free(pool, M_CTL);
3459#endif
3460}
3461
3462union ctl_io *
3463ctl_alloc_io(void *pool_ref)
3464{
3465 union ctl_io *io;
3466#ifdef IO_POOLS
3467 struct ctl_io_pool *pool = (struct ctl_io_pool *)pool_ref;
3468
3469 io = uma_zalloc(pool->zone, M_WAITOK);
3470#else
3471 io = uma_zalloc((uma_zone_t)pool_ref, M_WAITOK);
3472#endif
3473 if (io != NULL)
3474 io->io_hdr.pool = pool_ref;
3475 return (io);
3476}
3477
3478union ctl_io *
3479ctl_alloc_io_nowait(void *pool_ref)
3480{
3481 union ctl_io *io;
3482#ifdef IO_POOLS
3483 struct ctl_io_pool *pool = (struct ctl_io_pool *)pool_ref;
3484
3485 io = uma_zalloc(pool->zone, M_NOWAIT);
3486#else
3487 io = uma_zalloc((uma_zone_t)pool_ref, M_NOWAIT);
3488#endif
3489 if (io != NULL)
3490 io->io_hdr.pool = pool_ref;
3491 return (io);
3492}
3493
3494void
3495ctl_free_io(union ctl_io *io)
3496{
3497#ifdef IO_POOLS
3498 struct ctl_io_pool *pool;
3499#endif
3500
3501 if (io == NULL)
3502 return;
3503
3504#ifdef IO_POOLS
3505 pool = (struct ctl_io_pool *)io->io_hdr.pool;
3506 uma_zfree(pool->zone, io);
3507#else
3508 uma_zfree((uma_zone_t)io->io_hdr.pool, io);
3509#endif
3510}
3511
3512void
3513ctl_zero_io(union ctl_io *io)
3514{
3515 void *pool_ref;
3516
3517 if (io == NULL)
3518 return;
3519
3520 /*
3521 * May need to preserve linked list pointers at some point too.
3522 */
3523 pool_ref = io->io_hdr.pool;
3524 memset(io, 0, sizeof(*io));
3525 io->io_hdr.pool = pool_ref;
3526}
3527
3528/*
3529 * This routine is currently used for internal copies of ctl_ios that need
3530 * to persist for some reason after we've already returned status to the
3531 * FETD. (Thus the flag set.)
3532 *
3533 * XXX XXX
3534 * Note that this makes a blind copy of all fields in the ctl_io, except
3535 * for the pool reference. This includes any memory that has been
3536 * allocated! That memory will no longer be valid after done has been
3537 * called, so this would be VERY DANGEROUS for command that actually does
3538 * any reads or writes. Right now (11/7/2005), this is only used for immediate
3539 * start and stop commands, which don't transfer any data, so this is not a
3540 * problem. If it is used for anything else, the caller would also need to
3541 * allocate data buffer space and this routine would need to be modified to
3542 * copy the data buffer(s) as well.
3543 */
3544void
3545ctl_copy_io(union ctl_io *src, union ctl_io *dest)
3546{
3547 void *pool_ref;
3548
3549 if ((src == NULL)
3550 || (dest == NULL))
3551 return;
3552
3553 /*
3554 * May need to preserve linked list pointers at some point too.
3555 */
3556 pool_ref = dest->io_hdr.pool;
3557
3558 memcpy(dest, src, MIN(sizeof(*src), sizeof(*dest)));
3559
3560 dest->io_hdr.pool = pool_ref;
3561 /*
3562 * We need to know that this is an internal copy, and doesn't need
3563 * to get passed back to the FETD that allocated it.
3564 */
3565 dest->io_hdr.flags |= CTL_FLAG_INT_COPY;
3566}
3567
3568int
3569ctl_expand_number(const char *buf, uint64_t *num)
3570{
3571 char *endptr;
3572 uint64_t number;
3573 unsigned shift;
3574
3575 number = strtoq(buf, &endptr, 0);
3576
3577 switch (tolower((unsigned char)*endptr)) {
3578 case 'e':
3579 shift = 60;
3580 break;
3581 case 'p':
3582 shift = 50;
3583 break;
3584 case 't':
3585 shift = 40;
3586 break;
3587 case 'g':
3588 shift = 30;
3589 break;
3590 case 'm':
3591 shift = 20;
3592 break;
3593 case 'k':
3594 shift = 10;
3595 break;
3596 case 'b':
3597 case '\0': /* No unit. */
3598 *num = number;
3599 return (0);
3600 default:
3601 /* Unrecognized unit. */
3602 return (-1);
3603 }
3604
3605 if ((number << shift) >> shift != number) {
3606 /* Overflow */
3607 return (-1);
3608 }
3609 *num = number << shift;
3610 return (0);
3611}
3612
3613
3614/*
3615 * This routine could be used in the future to load default and/or saved
3616 * mode page parameters for a particuar lun.
3617 */
3618static int
3619ctl_init_page_index(struct ctl_lun *lun)
3620{
3621 int i;
3622 struct ctl_page_index *page_index;
3623 const char *value;
3624 uint64_t ival;
3625
3626 memcpy(&lun->mode_pages.index, page_index_template,
3627 sizeof(page_index_template));
3628
3629 for (i = 0; i < CTL_NUM_MODE_PAGES; i++) {
3630
3631 page_index = &lun->mode_pages.index[i];
3632 /*
3633 * If this is a disk-only mode page, there's no point in
3634 * setting it up. For some pages, we have to have some
3635 * basic information about the disk in order to calculate the
3636 * mode page data.
3637 */
3638 if ((lun->be_lun->lun_type != T_DIRECT)
3639 && (page_index->page_flags & CTL_PAGE_FLAG_DISK_ONLY))
3640 continue;
3641
3642 switch (page_index->page_code & SMPH_PC_MASK) {
3643 case SMS_RW_ERROR_RECOVERY_PAGE: {
3644 if (page_index->subpage != SMS_SUBPAGE_PAGE_0)
3645 panic("subpage is incorrect!");
3646 memcpy(&lun->mode_pages.rw_er_page[CTL_PAGE_CURRENT],
3647 &rw_er_page_default,
3648 sizeof(rw_er_page_default));
3649 memcpy(&lun->mode_pages.rw_er_page[CTL_PAGE_CHANGEABLE],
3650 &rw_er_page_changeable,
3651 sizeof(rw_er_page_changeable));
3652 memcpy(&lun->mode_pages.rw_er_page[CTL_PAGE_DEFAULT],
3653 &rw_er_page_default,
3654 sizeof(rw_er_page_default));
3655 memcpy(&lun->mode_pages.rw_er_page[CTL_PAGE_SAVED],
3656 &rw_er_page_default,
3657 sizeof(rw_er_page_default));
3658 page_index->page_data =
3659 (uint8_t *)lun->mode_pages.rw_er_page;
3660 break;
3661 }
3662 case SMS_FORMAT_DEVICE_PAGE: {
3663 struct scsi_format_page *format_page;
3664
3665 if (page_index->subpage != SMS_SUBPAGE_PAGE_0)
3666 panic("subpage is incorrect!");
3667
3668 /*
3669 * Sectors per track are set above. Bytes per
3670 * sector need to be set here on a per-LUN basis.
3671 */
3672 memcpy(&lun->mode_pages.format_page[CTL_PAGE_CURRENT],
3673 &format_page_default,
3674 sizeof(format_page_default));
3675 memcpy(&lun->mode_pages.format_page[
3676 CTL_PAGE_CHANGEABLE], &format_page_changeable,
3677 sizeof(format_page_changeable));
3678 memcpy(&lun->mode_pages.format_page[CTL_PAGE_DEFAULT],
3679 &format_page_default,
3680 sizeof(format_page_default));
3681 memcpy(&lun->mode_pages.format_page[CTL_PAGE_SAVED],
3682 &format_page_default,
3683 sizeof(format_page_default));
3684
3685 format_page = &lun->mode_pages.format_page[
3686 CTL_PAGE_CURRENT];
3687 scsi_ulto2b(lun->be_lun->blocksize,
3688 format_page->bytes_per_sector);
3689
3690 format_page = &lun->mode_pages.format_page[
3691 CTL_PAGE_DEFAULT];
3692 scsi_ulto2b(lun->be_lun->blocksize,
3693 format_page->bytes_per_sector);
3694
3695 format_page = &lun->mode_pages.format_page[
3696 CTL_PAGE_SAVED];
3697 scsi_ulto2b(lun->be_lun->blocksize,
3698 format_page->bytes_per_sector);
3699
3700 page_index->page_data =
3701 (uint8_t *)lun->mode_pages.format_page;
3702 break;
3703 }
3704 case SMS_RIGID_DISK_PAGE: {
3705 struct scsi_rigid_disk_page *rigid_disk_page;
3706 uint32_t sectors_per_cylinder;
3707 uint64_t cylinders;
3708#ifndef __XSCALE__
3709 int shift;
3710#endif /* !__XSCALE__ */
3711
3712 if (page_index->subpage != SMS_SUBPAGE_PAGE_0)
3713 panic("invalid subpage value %d",
3714 page_index->subpage);
3715
3716 /*
3717 * Rotation rate and sectors per track are set
3718 * above. We calculate the cylinders here based on
3719 * capacity. Due to the number of heads and
3720 * sectors per track we're using, smaller arrays
3721 * may turn out to have 0 cylinders. Linux and
3722 * FreeBSD don't pay attention to these mode pages
3723 * to figure out capacity, but Solaris does. It
3724 * seems to deal with 0 cylinders just fine, and
3725 * works out a fake geometry based on the capacity.
3726 */
3727 memcpy(&lun->mode_pages.rigid_disk_page[
3728 CTL_PAGE_DEFAULT], &rigid_disk_page_default,
3729 sizeof(rigid_disk_page_default));
3730 memcpy(&lun->mode_pages.rigid_disk_page[
3731 CTL_PAGE_CHANGEABLE],&rigid_disk_page_changeable,
3732 sizeof(rigid_disk_page_changeable));
3733
3734 sectors_per_cylinder = CTL_DEFAULT_SECTORS_PER_TRACK *
3735 CTL_DEFAULT_HEADS;
3736
3737 /*
3738 * The divide method here will be more accurate,
3739 * probably, but results in floating point being
3740 * used in the kernel on i386 (__udivdi3()). On the
3741 * XScale, though, __udivdi3() is implemented in
3742 * software.
3743 *
3744 * The shift method for cylinder calculation is
3745 * accurate if sectors_per_cylinder is a power of
3746 * 2. Otherwise it might be slightly off -- you
3747 * might have a bit of a truncation problem.
3748 */
3749#ifdef __XSCALE__
3750 cylinders = (lun->be_lun->maxlba + 1) /
3751 sectors_per_cylinder;
3752#else
3753 for (shift = 31; shift > 0; shift--) {
3754 if (sectors_per_cylinder & (1 << shift))
3755 break;
3756 }
3757 cylinders = (lun->be_lun->maxlba + 1) >> shift;
3758#endif
3759
3760 /*
3761 * We've basically got 3 bytes, or 24 bits for the
3762 * cylinder size in the mode page. If we're over,
3763 * just round down to 2^24.
3764 */
3765 if (cylinders > 0xffffff)
3766 cylinders = 0xffffff;
3767
3768 rigid_disk_page = &lun->mode_pages.rigid_disk_page[
3769 CTL_PAGE_DEFAULT];
3770 scsi_ulto3b(cylinders, rigid_disk_page->cylinders);
3771
3772 if ((value = ctl_get_opt(&lun->be_lun->options,
3773 "rpm")) != NULL) {
3774 scsi_ulto2b(strtol(value, NULL, 0),
3775 rigid_disk_page->rotation_rate);
3776 }
3777
3778 memcpy(&lun->mode_pages.rigid_disk_page[CTL_PAGE_CURRENT],
3779 &lun->mode_pages.rigid_disk_page[CTL_PAGE_DEFAULT],
3780 sizeof(rigid_disk_page_default));
3781 memcpy(&lun->mode_pages.rigid_disk_page[CTL_PAGE_SAVED],
3782 &lun->mode_pages.rigid_disk_page[CTL_PAGE_DEFAULT],
3783 sizeof(rigid_disk_page_default));
3784
3785 page_index->page_data =
3786 (uint8_t *)lun->mode_pages.rigid_disk_page;
3787 break;
3788 }
3789 case SMS_CACHING_PAGE: {
3790 struct scsi_caching_page *caching_page;
3791
3792 if (page_index->subpage != SMS_SUBPAGE_PAGE_0)
3793 panic("invalid subpage value %d",
3794 page_index->subpage);
3795 memcpy(&lun->mode_pages.caching_page[CTL_PAGE_DEFAULT],
3796 &caching_page_default,
3797 sizeof(caching_page_default));
3798 memcpy(&lun->mode_pages.caching_page[
3799 CTL_PAGE_CHANGEABLE], &caching_page_changeable,
3800 sizeof(caching_page_changeable));
3801 memcpy(&lun->mode_pages.caching_page[CTL_PAGE_SAVED],
3802 &caching_page_default,
3803 sizeof(caching_page_default));
3804 caching_page = &lun->mode_pages.caching_page[
3805 CTL_PAGE_SAVED];
3806 value = ctl_get_opt(&lun->be_lun->options, "writecache");
3807 if (value != NULL && strcmp(value, "off") == 0)
3808 caching_page->flags1 &= ~SCP_WCE;
3809 value = ctl_get_opt(&lun->be_lun->options, "readcache");
3810 if (value != NULL && strcmp(value, "off") == 0)
3811 caching_page->flags1 |= SCP_RCD;
3812 memcpy(&lun->mode_pages.caching_page[CTL_PAGE_CURRENT],
3813 &lun->mode_pages.caching_page[CTL_PAGE_SAVED],
3814 sizeof(caching_page_default));
3815 page_index->page_data =
3816 (uint8_t *)lun->mode_pages.caching_page;
3817 break;
3818 }
3819 case SMS_CONTROL_MODE_PAGE: {
3820 struct scsi_control_page *control_page;
3821
3822 if (page_index->subpage != SMS_SUBPAGE_PAGE_0)
3823 panic("invalid subpage value %d",
3824 page_index->subpage);
3825
3826 memcpy(&lun->mode_pages.control_page[CTL_PAGE_DEFAULT],
3827 &control_page_default,
3828 sizeof(control_page_default));
3829 memcpy(&lun->mode_pages.control_page[
3830 CTL_PAGE_CHANGEABLE], &control_page_changeable,
3831 sizeof(control_page_changeable));
3832 memcpy(&lun->mode_pages.control_page[CTL_PAGE_SAVED],
3833 &control_page_default,
3834 sizeof(control_page_default));
3835 control_page = &lun->mode_pages.control_page[
3836 CTL_PAGE_SAVED];
3837 value = ctl_get_opt(&lun->be_lun->options, "reordering");
3838 if (value != NULL && strcmp(value, "unrestricted") == 0) {
3839 control_page->queue_flags &= ~SCP_QUEUE_ALG_MASK;
3840 control_page->queue_flags |= SCP_QUEUE_ALG_UNRESTRICTED;
3841 }
3842 memcpy(&lun->mode_pages.control_page[CTL_PAGE_CURRENT],
3843 &lun->mode_pages.control_page[CTL_PAGE_SAVED],
3844 sizeof(control_page_default));
3845 page_index->page_data =
3846 (uint8_t *)lun->mode_pages.control_page;
3847 break;
3848
3849 }
3850 case SMS_INFO_EXCEPTIONS_PAGE: {
3851 switch (page_index->subpage) {
3852 case SMS_SUBPAGE_PAGE_0:
3853 memcpy(&lun->mode_pages.ie_page[CTL_PAGE_CURRENT],
3854 &ie_page_default,
3855 sizeof(ie_page_default));
3856 memcpy(&lun->mode_pages.ie_page[
3857 CTL_PAGE_CHANGEABLE], &ie_page_changeable,
3858 sizeof(ie_page_changeable));
3859 memcpy(&lun->mode_pages.ie_page[CTL_PAGE_DEFAULT],
3860 &ie_page_default,
3861 sizeof(ie_page_default));
3862 memcpy(&lun->mode_pages.ie_page[CTL_PAGE_SAVED],
3863 &ie_page_default,
3864 sizeof(ie_page_default));
3865 page_index->page_data =
3866 (uint8_t *)lun->mode_pages.ie_page;
3867 break;
3868 case 0x02: {
3869 struct ctl_logical_block_provisioning_page *page;
3870
3871 memcpy(&lun->mode_pages.lbp_page[CTL_PAGE_DEFAULT],
3872 &lbp_page_default,
3873 sizeof(lbp_page_default));
3874 memcpy(&lun->mode_pages.lbp_page[
3875 CTL_PAGE_CHANGEABLE], &lbp_page_changeable,
3876 sizeof(lbp_page_changeable));
3877 memcpy(&lun->mode_pages.lbp_page[CTL_PAGE_SAVED],
3878 &lbp_page_default,
3879 sizeof(lbp_page_default));
3880 page = &lun->mode_pages.lbp_page[CTL_PAGE_SAVED];
3881 value = ctl_get_opt(&lun->be_lun->options,
3882 "avail-threshold");
3883 if (value != NULL &&
3884 ctl_expand_number(value, &ival) == 0) {
3885 page->descr[0].flags |= SLBPPD_ENABLED |
3886 SLBPPD_ARMING_DEC;
3887 if (lun->be_lun->blocksize)
3888 ival /= lun->be_lun->blocksize;
3889 else
3890 ival /= 512;
3891 scsi_ulto4b(ival >> CTL_LBP_EXPONENT,
3892 page->descr[0].count);
3893 }
3894 value = ctl_get_opt(&lun->be_lun->options,
3895 "used-threshold");
3896 if (value != NULL &&
3897 ctl_expand_number(value, &ival) == 0) {
3898 page->descr[1].flags |= SLBPPD_ENABLED |
3899 SLBPPD_ARMING_INC;
3900 if (lun->be_lun->blocksize)
3901 ival /= lun->be_lun->blocksize;
3902 else
3903 ival /= 512;
3904 scsi_ulto4b(ival >> CTL_LBP_EXPONENT,
3905 page->descr[1].count);
3906 }
3907 value = ctl_get_opt(&lun->be_lun->options,
3908 "pool-avail-threshold");
3909 if (value != NULL &&
3910 ctl_expand_number(value, &ival) == 0) {
3911 page->descr[2].flags |= SLBPPD_ENABLED |
3912 SLBPPD_ARMING_DEC;
3913 if (lun->be_lun->blocksize)
3914 ival /= lun->be_lun->blocksize;
3915 else
3916 ival /= 512;
3917 scsi_ulto4b(ival >> CTL_LBP_EXPONENT,
3918 page->descr[2].count);
3919 }
3920 value = ctl_get_opt(&lun->be_lun->options,
3921 "pool-used-threshold");
3922 if (value != NULL &&
3923 ctl_expand_number(value, &ival) == 0) {
3924 page->descr[3].flags |= SLBPPD_ENABLED |
3925 SLBPPD_ARMING_INC;
3926 if (lun->be_lun->blocksize)
3927 ival /= lun->be_lun->blocksize;
3928 else
3929 ival /= 512;
3930 scsi_ulto4b(ival >> CTL_LBP_EXPONENT,
3931 page->descr[3].count);
3932 }
3933 memcpy(&lun->mode_pages.lbp_page[CTL_PAGE_CURRENT],
3934 &lun->mode_pages.lbp_page[CTL_PAGE_SAVED],
3935 sizeof(lbp_page_default));
3936 page_index->page_data =
3937 (uint8_t *)lun->mode_pages.lbp_page;
3938 }}
3939 break;
3940 }
3941 case SMS_VENDOR_SPECIFIC_PAGE:{
3942 switch (page_index->subpage) {
3943 case DBGCNF_SUBPAGE_CODE: {
3944 struct copan_debugconf_subpage *current_page,
3945 *saved_page;
3946
3947 memcpy(&lun->mode_pages.debugconf_subpage[
3948 CTL_PAGE_CURRENT],
3949 &debugconf_page_default,
3950 sizeof(debugconf_page_default));
3951 memcpy(&lun->mode_pages.debugconf_subpage[
3952 CTL_PAGE_CHANGEABLE],
3953 &debugconf_page_changeable,
3954 sizeof(debugconf_page_changeable));
3955 memcpy(&lun->mode_pages.debugconf_subpage[
3956 CTL_PAGE_DEFAULT],
3957 &debugconf_page_default,
3958 sizeof(debugconf_page_default));
3959 memcpy(&lun->mode_pages.debugconf_subpage[
3960 CTL_PAGE_SAVED],
3961 &debugconf_page_default,
3962 sizeof(debugconf_page_default));
3963 page_index->page_data =
3964 (uint8_t *)lun->mode_pages.debugconf_subpage;
3965
3966 current_page = (struct copan_debugconf_subpage *)
3967 (page_index->page_data +
3968 (page_index->page_len *
3969 CTL_PAGE_CURRENT));
3970 saved_page = (struct copan_debugconf_subpage *)
3971 (page_index->page_data +
3972 (page_index->page_len *
3973 CTL_PAGE_SAVED));
3974 break;
3975 }
3976 default:
3977 panic("invalid subpage value %d",
3978 page_index->subpage);
3979 break;
3980 }
3981 break;
3982 }
3983 default:
3984 panic("invalid page value %d",
3985 page_index->page_code & SMPH_PC_MASK);
3986 break;
3987 }
3988 }
3989
3990 return (CTL_RETVAL_COMPLETE);
3991}
3992
3993static int
3994ctl_init_log_page_index(struct ctl_lun *lun)
3995{
3996 struct ctl_page_index *page_index;
3997 int i, j, k, prev;
3998
3999 memcpy(&lun->log_pages.index, log_page_index_template,
4000 sizeof(log_page_index_template));
4001
4002 prev = -1;
4003 for (i = 0, j = 0, k = 0; i < CTL_NUM_LOG_PAGES; i++) {
4004
4005 page_index = &lun->log_pages.index[i];
4006 /*
4007 * If this is a disk-only mode page, there's no point in
4008 * setting it up. For some pages, we have to have some
4009 * basic information about the disk in order to calculate the
4010 * mode page data.
4011 */
4012 if ((lun->be_lun->lun_type != T_DIRECT)
4013 && (page_index->page_flags & CTL_PAGE_FLAG_DISK_ONLY))
4014 continue;
4015
4016 if (page_index->page_code == SLS_LOGICAL_BLOCK_PROVISIONING &&
4017 lun->backend->lun_attr == NULL)
4018 continue;
4019
4020 if (page_index->page_code != prev) {
4021 lun->log_pages.pages_page[j] = page_index->page_code;
4022 prev = page_index->page_code;
4023 j++;
4024 }
4025 lun->log_pages.subpages_page[k*2] = page_index->page_code;
4026 lun->log_pages.subpages_page[k*2+1] = page_index->subpage;
4027 k++;
4028 }
4029 lun->log_pages.index[0].page_data = &lun->log_pages.pages_page[0];
4030 lun->log_pages.index[0].page_len = j;
4031 lun->log_pages.index[1].page_data = &lun->log_pages.subpages_page[0];
4032 lun->log_pages.index[1].page_len = k * 2;
4033 lun->log_pages.index[2].page_data = &lun->log_pages.lbp_page[0];
4034 lun->log_pages.index[2].page_len = 12*CTL_NUM_LBP_PARAMS;
4035 lun->log_pages.index[3].page_data = (uint8_t *)&lun->log_pages.stat_page;
4036 lun->log_pages.index[3].page_len = sizeof(lun->log_pages.stat_page);
4037
4038 return (CTL_RETVAL_COMPLETE);
4039}
4040
4041static int
4042hex2bin(const char *str, uint8_t *buf, int buf_size)
4043{
4044 int i;
4045 u_char c;
4046
4047 memset(buf, 0, buf_size);
4048 while (isspace(str[0]))
4049 str++;
4050 if (str[0] == '0' && (str[1] == 'x' || str[1] == 'X'))
4051 str += 2;
4052 buf_size *= 2;
4053 for (i = 0; str[i] != 0 && i < buf_size; i++) {
4054 c = str[i];
4055 if (isdigit(c))
4056 c -= '0';
4057 else if (isalpha(c))
4058 c -= isupper(c) ? 'A' - 10 : 'a' - 10;
4059 else
4060 break;
4061 if (c >= 16)
4062 break;
4063 if ((i & 1) == 0)
4064 buf[i / 2] |= (c << 4);
4065 else
4066 buf[i / 2] |= c;
4067 }
4068 return ((i + 1) / 2);
4069}
4070
4071/*
4072 * LUN allocation.
4073 *
4074 * Requirements:
4075 * - caller allocates and zeros LUN storage, or passes in a NULL LUN if he
4076 * wants us to allocate the LUN and he can block.
4077 * - ctl_softc is always set
4078 * - be_lun is set if the LUN has a backend (needed for disk LUNs)
4079 *
4080 * Returns 0 for success, non-zero (errno) for failure.
4081 */
4082static int
4083ctl_alloc_lun(struct ctl_softc *ctl_softc, struct ctl_lun *ctl_lun,
4084 struct ctl_be_lun *const be_lun)
4085{
4086 struct ctl_lun *nlun, *lun;
4087 struct scsi_vpd_id_descriptor *desc;
4088 struct scsi_vpd_id_t10 *t10id;
4089 const char *eui, *naa, *scsiname, *vendor;
4090 int lun_number, i, lun_malloced;
4091 int devidlen, idlen1, idlen2 = 0, len;
4092
4093 if (be_lun == NULL)
4094 return (EINVAL);
4095
4096 /*
4097 * We currently only support Direct Access or Processor LUN types.
4098 */
4099 switch (be_lun->lun_type) {
4100 case T_DIRECT:
4101 break;
4102 case T_PROCESSOR:
4103 break;
4104 case T_SEQUENTIAL:
4105 case T_CHANGER:
4106 default:
4107 be_lun->lun_config_status(be_lun->be_lun,
4108 CTL_LUN_CONFIG_FAILURE);
4109 break;
4110 }
4111 if (ctl_lun == NULL) {
4112 lun = malloc(sizeof(*lun), M_CTL, M_WAITOK);
4113 lun_malloced = 1;
4114 } else {
4115 lun_malloced = 0;
4116 lun = ctl_lun;
4117 }
4118
4119 memset(lun, 0, sizeof(*lun));
4120 if (lun_malloced)
4121 lun->flags = CTL_LUN_MALLOCED;
4122
4123 /* Generate LUN ID. */
4124 devidlen = max(CTL_DEVID_MIN_LEN,
4125 strnlen(be_lun->device_id, CTL_DEVID_LEN));
4126 idlen1 = sizeof(*t10id) + devidlen;
4127 len = sizeof(struct scsi_vpd_id_descriptor) + idlen1;
4128 scsiname = ctl_get_opt(&be_lun->options, "scsiname");
4129 if (scsiname != NULL) {
4130 idlen2 = roundup2(strlen(scsiname) + 1, 4);
4131 len += sizeof(struct scsi_vpd_id_descriptor) + idlen2;
4132 }
4133 eui = ctl_get_opt(&be_lun->options, "eui");
4134 if (eui != NULL) {
4135 len += sizeof(struct scsi_vpd_id_descriptor) + 16;
4136 }
4137 naa = ctl_get_opt(&be_lun->options, "naa");
4138 if (naa != NULL) {
4139 len += sizeof(struct scsi_vpd_id_descriptor) + 16;
4140 }
4141 lun->lun_devid = malloc(sizeof(struct ctl_devid) + len,
4142 M_CTL, M_WAITOK | M_ZERO);
4143 desc = (struct scsi_vpd_id_descriptor *)lun->lun_devid->data;
4144 desc->proto_codeset = SVPD_ID_CODESET_ASCII;
4145 desc->id_type = SVPD_ID_PIV | SVPD_ID_ASSOC_LUN | SVPD_ID_TYPE_T10;
4146 desc->length = idlen1;
4147 t10id = (struct scsi_vpd_id_t10 *)&desc->identifier[0];
4148 memset(t10id->vendor, ' ', sizeof(t10id->vendor));
4149 if ((vendor = ctl_get_opt(&be_lun->options, "vendor")) == NULL) {
4150 strncpy((char *)t10id->vendor, CTL_VENDOR, sizeof(t10id->vendor));
4151 } else {
4152 strncpy(t10id->vendor, vendor,
4153 min(sizeof(t10id->vendor), strlen(vendor)));
4154 }
4155 strncpy((char *)t10id->vendor_spec_id,
4156 (char *)be_lun->device_id, devidlen);
4157 if (scsiname != NULL) {
4158 desc = (struct scsi_vpd_id_descriptor *)(&desc->identifier[0] +
4159 desc->length);
4160 desc->proto_codeset = SVPD_ID_CODESET_UTF8;
4161 desc->id_type = SVPD_ID_PIV | SVPD_ID_ASSOC_LUN |
4162 SVPD_ID_TYPE_SCSI_NAME;
4163 desc->length = idlen2;
4164 strlcpy(desc->identifier, scsiname, idlen2);
4165 }
4166 if (eui != NULL) {
4167 desc = (struct scsi_vpd_id_descriptor *)(&desc->identifier[0] +
4168 desc->length);
4169 desc->proto_codeset = SVPD_ID_CODESET_BINARY;
4170 desc->id_type = SVPD_ID_PIV | SVPD_ID_ASSOC_LUN |
4171 SVPD_ID_TYPE_EUI64;
4172 desc->length = hex2bin(eui, desc->identifier, 16);
4173 desc->length = desc->length > 12 ? 16 :
4174 (desc->length > 8 ? 12 : 8);
4175 len -= 16 - desc->length;
4176 }
4177 if (naa != NULL) {
4178 desc = (struct scsi_vpd_id_descriptor *)(&desc->identifier[0] +
4179 desc->length);
4180 desc->proto_codeset = SVPD_ID_CODESET_BINARY;
4181 desc->id_type = SVPD_ID_PIV | SVPD_ID_ASSOC_LUN |
4182 SVPD_ID_TYPE_NAA;
4183 desc->length = hex2bin(naa, desc->identifier, 16);
4184 desc->length = desc->length > 8 ? 16 : 8;
4185 len -= 16 - desc->length;
4186 }
4187 lun->lun_devid->len = len;
4188
4189 mtx_lock(&ctl_softc->ctl_lock);
4190 /*
4191 * See if the caller requested a particular LUN number. If so, see
4192 * if it is available. Otherwise, allocate the first available LUN.
4193 */
4194 if (be_lun->flags & CTL_LUN_FLAG_ID_REQ) {
4195 if ((be_lun->req_lun_id > (CTL_MAX_LUNS - 1))
4196 || (ctl_is_set(ctl_softc->ctl_lun_mask, be_lun->req_lun_id))) {
4197 mtx_unlock(&ctl_softc->ctl_lock);
4198 if (be_lun->req_lun_id > (CTL_MAX_LUNS - 1)) {
4199 printf("ctl: requested LUN ID %d is higher "
4200 "than CTL_MAX_LUNS - 1 (%d)\n",
4201 be_lun->req_lun_id, CTL_MAX_LUNS - 1);
4202 } else {
4203 /*
4204 * XXX KDM return an error, or just assign
4205 * another LUN ID in this case??
4206 */
4207 printf("ctl: requested LUN ID %d is already "
4208 "in use\n", be_lun->req_lun_id);
4209 }
4210 if (lun->flags & CTL_LUN_MALLOCED)
4211 free(lun, M_CTL);
4212 be_lun->lun_config_status(be_lun->be_lun,
4213 CTL_LUN_CONFIG_FAILURE);
4214 return (ENOSPC);
4215 }
4216 lun_number = be_lun->req_lun_id;
4217 } else {
4124 lun_number = ctl_ffz(ctl_softc->ctl_lun_mask, CTL_MAX_LUNS);
4218 lun_number = ctl_ffz(ctl_softc->ctl_lun_mask, 0, CTL_MAX_LUNS);
4219 if (lun_number == -1) {
4220 mtx_unlock(&ctl_softc->ctl_lock);
4221 printf("ctl: can't allocate LUN, out of LUNs\n");
4222 if (lun->flags & CTL_LUN_MALLOCED)
4223 free(lun, M_CTL);
4224 be_lun->lun_config_status(be_lun->be_lun,
4225 CTL_LUN_CONFIG_FAILURE);
4226 return (ENOSPC);
4227 }
4228 }
4229 ctl_set_mask(ctl_softc->ctl_lun_mask, lun_number);
4230
4231 mtx_init(&lun->lun_lock, "CTL LUN", NULL, MTX_DEF);
4232 lun->lun = lun_number;
4233 lun->be_lun = be_lun;
4234 /*
4235 * The processor LUN is always enabled. Disk LUNs come on line
4236 * disabled, and must be enabled by the backend.
4237 */
4238 lun->flags |= CTL_LUN_DISABLED;
4239 lun->backend = be_lun->be;
4240 be_lun->ctl_lun = lun;
4241 be_lun->lun_id = lun_number;
4242 atomic_add_int(&be_lun->be->num_luns, 1);
4243 if (be_lun->flags & CTL_LUN_FLAG_OFFLINE)
4244 lun->flags |= CTL_LUN_OFFLINE;
4245
4246 if (be_lun->flags & CTL_LUN_FLAG_POWERED_OFF)
4247 lun->flags |= CTL_LUN_STOPPED;
4248
4249 if (be_lun->flags & CTL_LUN_FLAG_INOPERABLE)
4250 lun->flags |= CTL_LUN_INOPERABLE;
4251
4252 if (be_lun->flags & CTL_LUN_FLAG_PRIMARY)
4253 lun->flags |= CTL_LUN_PRIMARY_SC;
4254
4255 lun->ctl_softc = ctl_softc;
4256#ifdef CTL_TIME_IO
4257 lun->last_busy = getsbinuptime();
4258#endif
4259 TAILQ_INIT(&lun->ooa_queue);
4260 TAILQ_INIT(&lun->blocked_queue);
4261 STAILQ_INIT(&lun->error_list);
4262 ctl_tpc_lun_init(lun);
4263
4264 /*
4265 * Initialize the mode and log page index.
4266 */
4267 ctl_init_page_index(lun);
4268 ctl_init_log_page_index(lun);
4269
4270 /*
4271 * Now, before we insert this lun on the lun list, set the lun
4272 * inventory changed UA for all other luns.
4273 */
4274 STAILQ_FOREACH(nlun, &ctl_softc->lun_list, links) {
4275 mtx_lock(&nlun->lun_lock);
4276 ctl_est_ua_all(nlun, -1, CTL_UA_LUN_CHANGE);
4277 mtx_unlock(&nlun->lun_lock);
4278 }
4279
4280 STAILQ_INSERT_TAIL(&ctl_softc->lun_list, lun, links);
4281
4282 ctl_softc->ctl_luns[lun_number] = lun;
4283
4284 ctl_softc->num_luns++;
4285
4286 /* Setup statistics gathering */
4287 lun->stats.device_type = be_lun->lun_type;
4288 lun->stats.lun_number = lun_number;
4289 if (lun->stats.device_type == T_DIRECT)
4290 lun->stats.blocksize = be_lun->blocksize;
4291 else
4292 lun->stats.flags = CTL_LUN_STATS_NO_BLOCKSIZE;
4293 for (i = 0;i < CTL_MAX_PORTS;i++)
4294 lun->stats.ports[i].targ_port = i;
4295
4296 mtx_unlock(&ctl_softc->ctl_lock);
4297
4298 lun->be_lun->lun_config_status(lun->be_lun->be_lun, CTL_LUN_CONFIG_OK);
4299 return (0);
4300}
4301
4302/*
4303 * Delete a LUN.
4304 * Assumptions:
4305 * - LUN has already been marked invalid and any pending I/O has been taken
4306 * care of.
4307 */
4308static int
4309ctl_free_lun(struct ctl_lun *lun)
4310{
4311 struct ctl_softc *softc;
4312 struct ctl_lun *nlun;
4313 int i;
4314
4315 softc = lun->ctl_softc;
4316
4317 mtx_assert(&softc->ctl_lock, MA_OWNED);
4318
4319 STAILQ_REMOVE(&softc->lun_list, lun, ctl_lun, links);
4320
4321 ctl_clear_mask(softc->ctl_lun_mask, lun->lun);
4322
4323 softc->ctl_luns[lun->lun] = NULL;
4324
4325 if (!TAILQ_EMPTY(&lun->ooa_queue))
4326 panic("Freeing a LUN %p with outstanding I/O!!\n", lun);
4327
4328 softc->num_luns--;
4329
4330 /*
4331 * Tell the backend to free resources, if this LUN has a backend.
4332 */
4333 atomic_subtract_int(&lun->be_lun->be->num_luns, 1);
4334 lun->be_lun->lun_shutdown(lun->be_lun->be_lun);
4335
4336 ctl_tpc_lun_shutdown(lun);
4337 mtx_destroy(&lun->lun_lock);
4338 free(lun->lun_devid, M_CTL);
4339 for (i = 0; i < CTL_MAX_PORTS; i++)
4340 free(lun->pending_ua[i], M_CTL);
4247 for (i = 0; i < 2 * CTL_MAX_PORTS; i++)
4341 for (i = 0; i < CTL_MAX_PORTS; i++)
4342 free(lun->pr_keys[i], M_CTL);
4343 free(lun->write_buffer, M_CTL);
4344 if (lun->flags & CTL_LUN_MALLOCED)
4345 free(lun, M_CTL);
4346
4347 STAILQ_FOREACH(nlun, &softc->lun_list, links) {
4348 mtx_lock(&nlun->lun_lock);
4349 ctl_est_ua_all(nlun, -1, CTL_UA_LUN_CHANGE);
4350 mtx_unlock(&nlun->lun_lock);
4351 }
4352
4353 return (0);
4354}
4355
4356static void
4357ctl_create_lun(struct ctl_be_lun *be_lun)
4358{
4359 struct ctl_softc *softc;
4360
4361 softc = control_softc;
4362
4363 /*
4364 * ctl_alloc_lun() should handle all potential failure cases.
4365 */
4366 ctl_alloc_lun(softc, NULL, be_lun);
4367}
4368
4369int
4370ctl_add_lun(struct ctl_be_lun *be_lun)
4371{
4372 struct ctl_softc *softc = control_softc;
4373
4374 mtx_lock(&softc->ctl_lock);
4375 STAILQ_INSERT_TAIL(&softc->pending_lun_queue, be_lun, links);
4376 mtx_unlock(&softc->ctl_lock);
4377 wakeup(&softc->pending_lun_queue);
4378
4379 return (0);
4380}
4381
4382int
4383ctl_enable_lun(struct ctl_be_lun *be_lun)
4384{
4385 struct ctl_softc *softc;
4386 struct ctl_port *port, *nport;
4387 struct ctl_lun *lun;
4388 int retval;
4389
4390 lun = (struct ctl_lun *)be_lun->ctl_lun;
4391 softc = lun->ctl_softc;
4392
4393 mtx_lock(&softc->ctl_lock);
4394 mtx_lock(&lun->lun_lock);
4395 if ((lun->flags & CTL_LUN_DISABLED) == 0) {
4396 /*
4397 * eh? Why did we get called if the LUN is already
4398 * enabled?
4399 */
4400 mtx_unlock(&lun->lun_lock);
4401 mtx_unlock(&softc->ctl_lock);
4402 return (0);
4403 }
4404 lun->flags &= ~CTL_LUN_DISABLED;
4405 mtx_unlock(&lun->lun_lock);
4406
4407 for (port = STAILQ_FIRST(&softc->port_list); port != NULL; port = nport) {
4408 nport = STAILQ_NEXT(port, links);
4409 if ((port->status & CTL_PORT_STATUS_ONLINE) == 0 ||
4410 port->lun_map != NULL || port->lun_enable == NULL)
4411 continue;
4412
4413 /*
4414 * Drop the lock while we call the FETD's enable routine.
4415 * This can lead to a callback into CTL (at least in the
4416 * case of the internal initiator frontend.
4417 */
4418 mtx_unlock(&softc->ctl_lock);
4419 retval = port->lun_enable(port->targ_lun_arg, lun->lun);
4420 mtx_lock(&softc->ctl_lock);
4421 if (retval != 0) {
4422 printf("%s: FETD %s port %d returned error "
4423 "%d for lun_enable on lun %jd\n",
4424 __func__, port->port_name, port->targ_port,
4425 retval, (intmax_t)lun->lun);
4426 }
4427 }
4428
4429 mtx_unlock(&softc->ctl_lock);
4430 ctl_isc_announce_lun(lun);
4431
4432 return (0);
4433}
4434
4435int
4436ctl_disable_lun(struct ctl_be_lun *be_lun)
4437{
4438 struct ctl_softc *softc;
4439 struct ctl_port *port;
4440 struct ctl_lun *lun;
4441 int retval;
4442
4443 lun = (struct ctl_lun *)be_lun->ctl_lun;
4444 softc = lun->ctl_softc;
4445
4446 mtx_lock(&softc->ctl_lock);
4447 mtx_lock(&lun->lun_lock);
4448 if (lun->flags & CTL_LUN_DISABLED) {
4449 mtx_unlock(&lun->lun_lock);
4450 mtx_unlock(&softc->ctl_lock);
4451 return (0);
4452 }
4453 lun->flags |= CTL_LUN_DISABLED;
4454 mtx_unlock(&lun->lun_lock);
4455
4456 STAILQ_FOREACH(port, &softc->port_list, links) {
4457 if ((port->status & CTL_PORT_STATUS_ONLINE) == 0 ||
4458 port->lun_map != NULL || port->lun_disable == NULL)
4459 continue;
4460
4461 /*
4462 * Drop the lock before we call the frontend's disable
4463 * routine, to avoid lock order reversals.
4464 *
4465 * XXX KDM what happens if the frontend list changes while
4466 * we're traversing it? It's unlikely, but should be handled.
4467 */
4468 mtx_unlock(&softc->ctl_lock);
4469 retval = port->lun_disable(port->targ_lun_arg, lun->lun);
4470 mtx_lock(&softc->ctl_lock);
4471 if (retval != 0) {
4472 printf("%s: FETD %s port %d returned error "
4473 "%d for lun_disable on lun %jd\n",
4474 __func__, port->port_name, port->targ_port,
4475 retval, (intmax_t)lun->lun);
4476 }
4477 }
4478
4479 mtx_unlock(&softc->ctl_lock);
4480 ctl_isc_announce_lun(lun);
4481
4482 return (0);
4483}
4484
4485int
4486ctl_start_lun(struct ctl_be_lun *be_lun)
4487{
4488 struct ctl_lun *lun = (struct ctl_lun *)be_lun->ctl_lun;
4489
4490 mtx_lock(&lun->lun_lock);
4491 lun->flags &= ~CTL_LUN_STOPPED;
4492 mtx_unlock(&lun->lun_lock);
4493 return (0);
4494}
4495
4496int
4497ctl_stop_lun(struct ctl_be_lun *be_lun)
4498{
4499 struct ctl_lun *lun = (struct ctl_lun *)be_lun->ctl_lun;
4500
4501 mtx_lock(&lun->lun_lock);
4502 lun->flags |= CTL_LUN_STOPPED;
4503 mtx_unlock(&lun->lun_lock);
4504 return (0);
4505}
4506
4507int
4508ctl_lun_offline(struct ctl_be_lun *be_lun)
4509{
4510 struct ctl_lun *lun = (struct ctl_lun *)be_lun->ctl_lun;
4511
4512 mtx_lock(&lun->lun_lock);
4513 lun->flags |= CTL_LUN_OFFLINE;
4514 mtx_unlock(&lun->lun_lock);
4515 return (0);
4516}
4517
4518int
4519ctl_lun_online(struct ctl_be_lun *be_lun)
4520{
4521 struct ctl_lun *lun = (struct ctl_lun *)be_lun->ctl_lun;
4522
4523 mtx_lock(&lun->lun_lock);
4524 lun->flags &= ~CTL_LUN_OFFLINE;
4525 mtx_unlock(&lun->lun_lock);
4526 return (0);
4527}
4528
4529int
4530ctl_lun_primary(struct ctl_be_lun *be_lun)
4531{
4532 struct ctl_lun *lun = (struct ctl_lun *)be_lun->ctl_lun;
4533
4534 mtx_lock(&lun->lun_lock);
4535 lun->flags |= CTL_LUN_PRIMARY_SC;
4536 mtx_unlock(&lun->lun_lock);
4537 ctl_est_ua_all(lun, -1, CTL_UA_ASYM_ACC_CHANGE);
4538 ctl_isc_announce_lun(lun);
4539 return (0);
4540}
4541
4542int
4543ctl_lun_secondary(struct ctl_be_lun *be_lun)
4544{
4545 struct ctl_lun *lun = (struct ctl_lun *)be_lun->ctl_lun;
4546
4547 mtx_lock(&lun->lun_lock);
4548 lun->flags &= ~CTL_LUN_PRIMARY_SC;
4549 mtx_unlock(&lun->lun_lock);
4550 ctl_est_ua_all(lun, -1, CTL_UA_ASYM_ACC_CHANGE);
4551 ctl_isc_announce_lun(lun);
4552 return (0);
4553}
4554
4555int
4556ctl_invalidate_lun(struct ctl_be_lun *be_lun)
4557{
4558 struct ctl_softc *softc;
4559 struct ctl_lun *lun;
4560
4561 lun = (struct ctl_lun *)be_lun->ctl_lun;
4562 softc = lun->ctl_softc;
4563
4564 mtx_lock(&lun->lun_lock);
4565
4566 /*
4567 * The LUN needs to be disabled before it can be marked invalid.
4568 */
4569 if ((lun->flags & CTL_LUN_DISABLED) == 0) {
4570 mtx_unlock(&lun->lun_lock);
4571 return (-1);
4572 }
4573 /*
4574 * Mark the LUN invalid.
4575 */
4576 lun->flags |= CTL_LUN_INVALID;
4577
4578 /*
4579 * If there is nothing in the OOA queue, go ahead and free the LUN.
4580 * If we have something in the OOA queue, we'll free it when the
4581 * last I/O completes.
4582 */
4583 if (TAILQ_EMPTY(&lun->ooa_queue)) {
4584 mtx_unlock(&lun->lun_lock);
4585 mtx_lock(&softc->ctl_lock);
4586 ctl_free_lun(lun);
4587 mtx_unlock(&softc->ctl_lock);
4588 } else
4589 mtx_unlock(&lun->lun_lock);
4590
4591 return (0);
4592}
4593
4594int
4595ctl_lun_inoperable(struct ctl_be_lun *be_lun)
4596{
4597 struct ctl_lun *lun = (struct ctl_lun *)be_lun->ctl_lun;
4598
4599 mtx_lock(&lun->lun_lock);
4600 lun->flags |= CTL_LUN_INOPERABLE;
4601 mtx_unlock(&lun->lun_lock);
4602 return (0);
4603}
4604
4605int
4606ctl_lun_operable(struct ctl_be_lun *be_lun)
4607{
4608 struct ctl_lun *lun = (struct ctl_lun *)be_lun->ctl_lun;
4609
4610 mtx_lock(&lun->lun_lock);
4611 lun->flags &= ~CTL_LUN_INOPERABLE;
4612 mtx_unlock(&lun->lun_lock);
4613 return (0);
4614}
4615
4616void
4617ctl_lun_capacity_changed(struct ctl_be_lun *be_lun)
4618{
4619 struct ctl_lun *lun = (struct ctl_lun *)be_lun->ctl_lun;
4620 union ctl_ha_msg msg;
4621
4622 mtx_lock(&lun->lun_lock);
4623 ctl_est_ua_all(lun, -1, CTL_UA_CAPACITY_CHANGED);
4624 mtx_unlock(&lun->lun_lock);
4625 if (lun->ctl_softc->ha_mode == CTL_HA_MODE_XFER) {
4626 /* Send msg to other side. */
4627 bzero(&msg.ua, sizeof(msg.ua));
4628 msg.hdr.msg_type = CTL_MSG_UA;
4629 msg.hdr.nexus.initid = -1;
4630 msg.hdr.nexus.targ_port = -1;
4631 msg.hdr.nexus.targ_lun = lun->lun;
4632 msg.hdr.nexus.targ_mapped_lun = lun->lun;
4633 msg.ua.ua_all = 1;
4634 msg.ua.ua_set = 1;
4635 msg.ua.ua_type = CTL_UA_CAPACITY_CHANGED;
4636 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg, sizeof(msg.ua),
4637 M_WAITOK);
4638 }
4639}
4640
4641/*
4642 * Backend "memory move is complete" callback for requests that never
4643 * make it down to say RAIDCore's configuration code.
4644 */
4645int
4646ctl_config_move_done(union ctl_io *io)
4647{
4648 int retval;
4649
4650 CTL_DEBUG_PRINT(("ctl_config_move_done\n"));
4651 KASSERT(io->io_hdr.io_type == CTL_IO_SCSI,
4652 ("Config I/O type isn't CTL_IO_SCSI (%d)!", io->io_hdr.io_type));
4653
4654 if ((io->io_hdr.port_status != 0) &&
4655 ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_STATUS_NONE ||
4656 (io->io_hdr.status & CTL_STATUS_MASK) == CTL_SUCCESS)) {
4657 /*
4658 * For hardware error sense keys, the sense key
4659 * specific value is defined to be a retry count,
4660 * but we use it to pass back an internal FETD
4661 * error code. XXX KDM Hopefully the FETD is only
4662 * using 16 bits for an error code, since that's
4663 * all the space we have in the sks field.
4664 */
4665 ctl_set_internal_failure(&io->scsiio,
4666 /*sks_valid*/ 1,
4667 /*retry_count*/
4668 io->io_hdr.port_status);
4669 }
4670
4671 if (ctl_debug & CTL_DEBUG_CDB_DATA)
4672 ctl_data_print(io);
4673 if (((io->io_hdr.flags & CTL_FLAG_DATA_MASK) == CTL_FLAG_DATA_IN) ||
4674 ((io->io_hdr.status & CTL_STATUS_MASK) != CTL_STATUS_NONE &&
4675 (io->io_hdr.status & CTL_STATUS_MASK) != CTL_SUCCESS) ||
4676 ((io->io_hdr.flags & CTL_FLAG_ABORT) != 0)) {
4677 /*
4678 * XXX KDM just assuming a single pointer here, and not a
4679 * S/G list. If we start using S/G lists for config data,
4680 * we'll need to know how to clean them up here as well.
4681 */
4682 if (io->io_hdr.flags & CTL_FLAG_ALLOCATED)
4683 free(io->scsiio.kern_data_ptr, M_CTL);
4684 ctl_done(io);
4685 retval = CTL_RETVAL_COMPLETE;
4686 } else {
4687 /*
4688 * XXX KDM now we need to continue data movement. Some
4689 * options:
4690 * - call ctl_scsiio() again? We don't do this for data
4691 * writes, because for those at least we know ahead of
4692 * time where the write will go and how long it is. For
4693 * config writes, though, that information is largely
4694 * contained within the write itself, thus we need to
4695 * parse out the data again.
4696 *
4697 * - Call some other function once the data is in?
4698 */
4560 if (ctl_debug & CTL_DEBUG_CDB_DATA)
4561 ctl_data_print(io);
4699
4700 /*
4701 * XXX KDM call ctl_scsiio() again for now, and check flag
4702 * bits to see whether we're allocated or not.
4703 */
4704 retval = ctl_scsiio(&io->scsiio);
4705 }
4706 return (retval);
4707}
4708
4709/*
4710 * This gets called by a backend driver when it is done with a
4711 * data_submit method.
4712 */
4713void
4714ctl_data_submit_done(union ctl_io *io)
4715{
4716 /*
4717 * If the IO_CONT flag is set, we need to call the supplied
4718 * function to continue processing the I/O, instead of completing
4719 * the I/O just yet.
4720 *
4721 * If there is an error, though, we don't want to keep processing.
4722 * Instead, just send status back to the initiator.
4723 */
4724 if ((io->io_hdr.flags & CTL_FLAG_IO_CONT) &&
4725 (io->io_hdr.flags & CTL_FLAG_ABORT) == 0 &&
4726 ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_STATUS_NONE ||
4727 (io->io_hdr.status & CTL_STATUS_MASK) == CTL_SUCCESS)) {
4728 io->scsiio.io_cont(io);
4729 return;
4730 }
4731 ctl_done(io);
4732}
4733
4734/*
4735 * This gets called by a backend driver when it is done with a
4736 * configuration write.
4737 */
4738void
4739ctl_config_write_done(union ctl_io *io)
4740{
4741 uint8_t *buf;
4742
4743 /*
4744 * If the IO_CONT flag is set, we need to call the supplied
4745 * function to continue processing the I/O, instead of completing
4746 * the I/O just yet.
4747 *
4748 * If there is an error, though, we don't want to keep processing.
4749 * Instead, just send status back to the initiator.
4750 */
4751 if ((io->io_hdr.flags & CTL_FLAG_IO_CONT) &&
4752 (io->io_hdr.flags & CTL_FLAG_ABORT) == 0 &&
4753 ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_STATUS_NONE ||
4754 (io->io_hdr.status & CTL_STATUS_MASK) == CTL_SUCCESS)) {
4755 io->scsiio.io_cont(io);
4756 return;
4757 }
4758 /*
4759 * Since a configuration write can be done for commands that actually
4760 * have data allocated, like write buffer, and commands that have
4761 * no data, like start/stop unit, we need to check here.
4762 */
4763 if (io->io_hdr.flags & CTL_FLAG_ALLOCATED)
4764 buf = io->scsiio.kern_data_ptr;
4765 else
4766 buf = NULL;
4767 ctl_done(io);
4768 if (buf)
4769 free(buf, M_CTL);
4770}
4771
4772void
4773ctl_config_read_done(union ctl_io *io)
4774{
4775 uint8_t *buf;
4776
4777 /*
4778 * If there is some error -- we are done, skip data transfer.
4779 */
4780 if ((io->io_hdr.flags & CTL_FLAG_ABORT) != 0 ||
4781 ((io->io_hdr.status & CTL_STATUS_MASK) != CTL_STATUS_NONE &&
4782 (io->io_hdr.status & CTL_STATUS_MASK) != CTL_SUCCESS)) {
4783 if (io->io_hdr.flags & CTL_FLAG_ALLOCATED)
4784 buf = io->scsiio.kern_data_ptr;
4785 else
4786 buf = NULL;
4787 ctl_done(io);
4788 if (buf)
4789 free(buf, M_CTL);
4790 return;
4791 }
4792
4793 /*
4794 * If the IO_CONT flag is set, we need to call the supplied
4795 * function to continue processing the I/O, instead of completing
4796 * the I/O just yet.
4797 */
4798 if (io->io_hdr.flags & CTL_FLAG_IO_CONT) {
4799 io->scsiio.io_cont(io);
4800 return;
4801 }
4802
4803 ctl_datamove(io);
4804}
4805
4806/*
4807 * SCSI release command.
4808 */
4809int
4810ctl_scsi_release(struct ctl_scsiio *ctsio)
4811{
4812 int length, longid, thirdparty_id, resv_id;
4813 struct ctl_lun *lun;
4814 uint32_t residx;
4815
4816 length = 0;
4817 resv_id = 0;
4818
4819 CTL_DEBUG_PRINT(("ctl_scsi_release\n"));
4820
4684 residx = ctl_get_resindex(&ctsio->io_hdr.nexus);
4821 residx = ctl_get_initindex(&ctsio->io_hdr.nexus);
4822 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
4823
4824 switch (ctsio->cdb[0]) {
4825 case RELEASE_10: {
4826 struct scsi_release_10 *cdb;
4827
4828 cdb = (struct scsi_release_10 *)ctsio->cdb;
4829
4830 if (cdb->byte2 & SR10_LONGID)
4831 longid = 1;
4832 else
4833 thirdparty_id = cdb->thirdparty_id;
4834
4835 resv_id = cdb->resv_id;
4836 length = scsi_2btoul(cdb->length);
4837 break;
4838 }
4839 }
4840
4841
4842 /*
4843 * XXX KDM right now, we only support LUN reservation. We don't
4844 * support 3rd party reservations, or extent reservations, which
4845 * might actually need the parameter list. If we've gotten this
4846 * far, we've got a LUN reservation. Anything else got kicked out
4847 * above. So, according to SPC, ignore the length.
4848 */
4849 length = 0;
4850
4851 if (((ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) == 0)
4852 && (length > 0)) {
4853 ctsio->kern_data_ptr = malloc(length, M_CTL, M_WAITOK);
4854 ctsio->kern_data_len = length;
4855 ctsio->kern_total_len = length;
4856 ctsio->kern_data_resid = 0;
4857 ctsio->kern_rel_offset = 0;
4858 ctsio->kern_sg_entries = 0;
4859 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
4860 ctsio->be_move_done = ctl_config_move_done;
4861 ctl_datamove((union ctl_io *)ctsio);
4862
4863 return (CTL_RETVAL_COMPLETE);
4864 }
4865
4866 if (length > 0)
4867 thirdparty_id = scsi_8btou64(ctsio->kern_data_ptr);
4868
4869 mtx_lock(&lun->lun_lock);
4870
4871 /*
4872 * According to SPC, it is not an error for an intiator to attempt
4873 * to release a reservation on a LUN that isn't reserved, or that
4874 * is reserved by another initiator. The reservation can only be
4875 * released, though, by the initiator who made it or by one of
4876 * several reset type events.
4877 */
4878 if ((lun->flags & CTL_LUN_RESERVED) && (lun->res_idx == residx))
4879 lun->flags &= ~CTL_LUN_RESERVED;
4880
4881 mtx_unlock(&lun->lun_lock);
4882
4883 if (ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) {
4884 free(ctsio->kern_data_ptr, M_CTL);
4885 ctsio->io_hdr.flags &= ~CTL_FLAG_ALLOCATED;
4886 }
4887
4888 ctl_set_success(ctsio);
4889 ctl_done((union ctl_io *)ctsio);
4890 return (CTL_RETVAL_COMPLETE);
4891}
4892
4893int
4894ctl_scsi_reserve(struct ctl_scsiio *ctsio)
4895{
4896 int extent, thirdparty, longid;
4897 int resv_id, length;
4898 uint64_t thirdparty_id;
4899 struct ctl_lun *lun;
4900 uint32_t residx;
4901
4902 extent = 0;
4903 thirdparty = 0;
4904 longid = 0;
4905 resv_id = 0;
4906 length = 0;
4907 thirdparty_id = 0;
4908
4909 CTL_DEBUG_PRINT(("ctl_reserve\n"));
4910
4774 residx = ctl_get_resindex(&ctsio->io_hdr.nexus);
4911 residx = ctl_get_initindex(&ctsio->io_hdr.nexus);
4912 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
4913
4914 switch (ctsio->cdb[0]) {
4915 case RESERVE_10: {
4916 struct scsi_reserve_10 *cdb;
4917
4918 cdb = (struct scsi_reserve_10 *)ctsio->cdb;
4919
4920 if (cdb->byte2 & SR10_LONGID)
4921 longid = 1;
4922 else
4923 thirdparty_id = cdb->thirdparty_id;
4924
4925 resv_id = cdb->resv_id;
4926 length = scsi_2btoul(cdb->length);
4927 break;
4928 }
4929 }
4930
4931 /*
4932 * XXX KDM right now, we only support LUN reservation. We don't
4933 * support 3rd party reservations, or extent reservations, which
4934 * might actually need the parameter list. If we've gotten this
4935 * far, we've got a LUN reservation. Anything else got kicked out
4936 * above. So, according to SPC, ignore the length.
4937 */
4938 length = 0;
4939
4940 if (((ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) == 0)
4941 && (length > 0)) {
4942 ctsio->kern_data_ptr = malloc(length, M_CTL, M_WAITOK);
4943 ctsio->kern_data_len = length;
4944 ctsio->kern_total_len = length;
4945 ctsio->kern_data_resid = 0;
4946 ctsio->kern_rel_offset = 0;
4947 ctsio->kern_sg_entries = 0;
4948 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
4949 ctsio->be_move_done = ctl_config_move_done;
4950 ctl_datamove((union ctl_io *)ctsio);
4951
4952 return (CTL_RETVAL_COMPLETE);
4953 }
4954
4955 if (length > 0)
4956 thirdparty_id = scsi_8btou64(ctsio->kern_data_ptr);
4957
4958 mtx_lock(&lun->lun_lock);
4959 if ((lun->flags & CTL_LUN_RESERVED) && (lun->res_idx != residx)) {
4960 ctl_set_reservation_conflict(ctsio);
4961 goto bailout;
4962 }
4963
4964 lun->flags |= CTL_LUN_RESERVED;
4965 lun->res_idx = residx;
4966
4967 ctl_set_success(ctsio);
4968
4969bailout:
4970 mtx_unlock(&lun->lun_lock);
4971
4972 if (ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) {
4973 free(ctsio->kern_data_ptr, M_CTL);
4974 ctsio->io_hdr.flags &= ~CTL_FLAG_ALLOCATED;
4975 }
4976
4977 ctl_done((union ctl_io *)ctsio);
4978 return (CTL_RETVAL_COMPLETE);
4979}
4980
4981int
4982ctl_start_stop(struct ctl_scsiio *ctsio)
4983{
4984 struct scsi_start_stop_unit *cdb;
4985 struct ctl_lun *lun;
4986 int retval;
4987
4988 CTL_DEBUG_PRINT(("ctl_start_stop\n"));
4989
4990 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
4991 retval = 0;
4992
4993 cdb = (struct scsi_start_stop_unit *)ctsio->cdb;
4994
4995 /*
4996 * XXX KDM
4997 * We don't support the immediate bit on a stop unit. In order to
4998 * do that, we would need to code up a way to know that a stop is
4999 * pending, and hold off any new commands until it completes, one
5000 * way or another. Then we could accept or reject those commands
5001 * depending on its status. We would almost need to do the reverse
5002 * of what we do below for an immediate start -- return the copy of
5003 * the ctl_io to the FETD with status to send to the host (and to
5004 * free the copy!) and then free the original I/O once the stop
5005 * actually completes. That way, the OOA queue mechanism can work
5006 * to block commands that shouldn't proceed. Another alternative
5007 * would be to put the copy in the queue in place of the original,
5008 * and return the original back to the caller. That could be
5009 * slightly safer..
5010 */
5011 if ((cdb->byte2 & SSS_IMMED)
5012 && ((cdb->how & SSS_START) == 0)) {
5013 ctl_set_invalid_field(ctsio,
5014 /*sks_valid*/ 1,
5015 /*command*/ 1,
5016 /*field*/ 1,
5017 /*bit_valid*/ 1,
5018 /*bit*/ 0);
5019 ctl_done((union ctl_io *)ctsio);
5020 return (CTL_RETVAL_COMPLETE);
5021 }
5022
5023 if ((lun->flags & CTL_LUN_PR_RESERVED)
5024 && ((cdb->how & SSS_START)==0)) {
5025 uint32_t residx;
5026
4890 residx = ctl_get_resindex(&ctsio->io_hdr.nexus);
5027 residx = ctl_get_initindex(&ctsio->io_hdr.nexus);
5028 if (ctl_get_prkey(lun, residx) == 0
5029 || (lun->pr_res_idx!=residx && lun->res_type < 4)) {
5030
5031 ctl_set_reservation_conflict(ctsio);
5032 ctl_done((union ctl_io *)ctsio);
5033 return (CTL_RETVAL_COMPLETE);
5034 }
5035 }
5036
5037 /*
5038 * If there is no backend on this device, we can't start or stop
5039 * it. In theory we shouldn't get any start/stop commands in the
5040 * first place at this level if the LUN doesn't have a backend.
5041 * That should get stopped by the command decode code.
5042 */
5043 if (lun->backend == NULL) {
5044 ctl_set_invalid_opcode(ctsio);
5045 ctl_done((union ctl_io *)ctsio);
5046 return (CTL_RETVAL_COMPLETE);
5047 }
5048
5049 /*
5050 * XXX KDM Copan-specific offline behavior.
5051 * Figure out a reasonable way to port this?
5052 */
5053#ifdef NEEDTOPORT
5054 mtx_lock(&lun->lun_lock);
5055
5056 if (((cdb->byte2 & SSS_ONOFFLINE) == 0)
5057 && (lun->flags & CTL_LUN_OFFLINE)) {
5058 /*
5059 * If the LUN is offline, and the on/offline bit isn't set,
5060 * reject the start or stop. Otherwise, let it through.
5061 */
5062 mtx_unlock(&lun->lun_lock);
5063 ctl_set_lun_not_ready(ctsio);
5064 ctl_done((union ctl_io *)ctsio);
5065 } else {
5066 mtx_unlock(&lun->lun_lock);
5067#endif /* NEEDTOPORT */
5068 /*
5069 * This could be a start or a stop when we're online,
5070 * or a stop/offline or start/online. A start or stop when
5071 * we're offline is covered in the case above.
5072 */
5073 /*
5074 * In the non-immediate case, we send the request to
5075 * the backend and return status to the user when
5076 * it is done.
5077 *
5078 * In the immediate case, we allocate a new ctl_io
5079 * to hold a copy of the request, and send that to
5080 * the backend. We then set good status on the
5081 * user's request and return it immediately.
5082 */
5083 if (cdb->byte2 & SSS_IMMED) {
5084 union ctl_io *new_io;
5085
5086 new_io = ctl_alloc_io(ctsio->io_hdr.pool);
5087 ctl_copy_io((union ctl_io *)ctsio, new_io);
5088 retval = lun->backend->config_write(new_io);
5089 ctl_set_success(ctsio);
5090 ctl_done((union ctl_io *)ctsio);
5091 } else {
5092 retval = lun->backend->config_write(
5093 (union ctl_io *)ctsio);
5094 }
5095#ifdef NEEDTOPORT
5096 }
5097#endif
5098 return (retval);
5099}
5100
5101/*
5102 * We support the SYNCHRONIZE CACHE command (10 and 16 byte versions), but
5103 * we don't really do anything with the LBA and length fields if the user
5104 * passes them in. Instead we'll just flush out the cache for the entire
5105 * LUN.
5106 */
5107int
5108ctl_sync_cache(struct ctl_scsiio *ctsio)
5109{
5110 struct ctl_lun *lun;
5111 struct ctl_softc *softc;
5112 struct ctl_lba_len_flags *lbalen;
5113 uint64_t starting_lba;
5114 uint32_t block_count;
5115 int retval;
5116 uint8_t byte2;
5117
5118 CTL_DEBUG_PRINT(("ctl_sync_cache\n"));
5119
5120 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
5121 softc = lun->ctl_softc;
5122 retval = 0;
5123
5124 switch (ctsio->cdb[0]) {
5125 case SYNCHRONIZE_CACHE: {
5126 struct scsi_sync_cache *cdb;
5127 cdb = (struct scsi_sync_cache *)ctsio->cdb;
5128
5129 starting_lba = scsi_4btoul(cdb->begin_lba);
5130 block_count = scsi_2btoul(cdb->lb_count);
5131 byte2 = cdb->byte2;
5132 break;
5133 }
5134 case SYNCHRONIZE_CACHE_16: {
5135 struct scsi_sync_cache_16 *cdb;
5136 cdb = (struct scsi_sync_cache_16 *)ctsio->cdb;
5137
5138 starting_lba = scsi_8btou64(cdb->begin_lba);
5139 block_count = scsi_4btoul(cdb->lb_count);
5140 byte2 = cdb->byte2;
5141 break;
5142 }
5143 default:
5144 ctl_set_invalid_opcode(ctsio);
5145 ctl_done((union ctl_io *)ctsio);
5146 goto bailout;
5147 break; /* NOTREACHED */
5148 }
5149
5150 /*
5151 * We check the LBA and length, but don't do anything with them.
5152 * A SYNCHRONIZE CACHE will cause the entire cache for this lun to
5153 * get flushed. This check will just help satisfy anyone who wants
5154 * to see an error for an out of range LBA.
5155 */
5156 if ((starting_lba + block_count) > (lun->be_lun->maxlba + 1)) {
5157 ctl_set_lba_out_of_range(ctsio);
5158 ctl_done((union ctl_io *)ctsio);
5159 goto bailout;
5160 }
5161
5162 /*
5163 * If this LUN has no backend, we can't flush the cache anyway.
5164 */
5165 if (lun->backend == NULL) {
5166 ctl_set_invalid_opcode(ctsio);
5167 ctl_done((union ctl_io *)ctsio);
5168 goto bailout;
5169 }
5170
5171 lbalen = (struct ctl_lba_len_flags *)&ctsio->io_hdr.ctl_private[CTL_PRIV_LBA_LEN];
5172 lbalen->lba = starting_lba;
5173 lbalen->len = block_count;
5174 lbalen->flags = byte2;
5175
5176 /*
5177 * Check to see whether we're configured to send the SYNCHRONIZE
5178 * CACHE command directly to the back end.
5179 */
5180 mtx_lock(&lun->lun_lock);
5181 if ((softc->flags & CTL_FLAG_REAL_SYNC)
5182 && (++(lun->sync_count) >= lun->sync_interval)) {
5183 lun->sync_count = 0;
5184 mtx_unlock(&lun->lun_lock);
5185 retval = lun->backend->config_write((union ctl_io *)ctsio);
5186 } else {
5187 mtx_unlock(&lun->lun_lock);
5188 ctl_set_success(ctsio);
5189 ctl_done((union ctl_io *)ctsio);
5190 }
5191
5192bailout:
5193
5194 return (retval);
5195}
5196
5197int
5198ctl_format(struct ctl_scsiio *ctsio)
5199{
5200 struct scsi_format *cdb;
5201 struct ctl_lun *lun;
5202 int length, defect_list_len;
5203
5204 CTL_DEBUG_PRINT(("ctl_format\n"));
5205
5206 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
5207
5208 cdb = (struct scsi_format *)ctsio->cdb;
5209
5210 length = 0;
5211 if (cdb->byte2 & SF_FMTDATA) {
5212 if (cdb->byte2 & SF_LONGLIST)
5213 length = sizeof(struct scsi_format_header_long);
5214 else
5215 length = sizeof(struct scsi_format_header_short);
5216 }
5217
5218 if (((ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) == 0)
5219 && (length > 0)) {
5220 ctsio->kern_data_ptr = malloc(length, M_CTL, M_WAITOK);
5221 ctsio->kern_data_len = length;
5222 ctsio->kern_total_len = length;
5223 ctsio->kern_data_resid = 0;
5224 ctsio->kern_rel_offset = 0;
5225 ctsio->kern_sg_entries = 0;
5226 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
5227 ctsio->be_move_done = ctl_config_move_done;
5228 ctl_datamove((union ctl_io *)ctsio);
5229
5230 return (CTL_RETVAL_COMPLETE);
5231 }
5232
5233 defect_list_len = 0;
5234
5235 if (cdb->byte2 & SF_FMTDATA) {
5236 if (cdb->byte2 & SF_LONGLIST) {
5237 struct scsi_format_header_long *header;
5238
5239 header = (struct scsi_format_header_long *)
5240 ctsio->kern_data_ptr;
5241
5242 defect_list_len = scsi_4btoul(header->defect_list_len);
5243 if (defect_list_len != 0) {
5244 ctl_set_invalid_field(ctsio,
5245 /*sks_valid*/ 1,
5246 /*command*/ 0,
5247 /*field*/ 2,
5248 /*bit_valid*/ 0,
5249 /*bit*/ 0);
5250 goto bailout;
5251 }
5252 } else {
5253 struct scsi_format_header_short *header;
5254
5255 header = (struct scsi_format_header_short *)
5256 ctsio->kern_data_ptr;
5257
5258 defect_list_len = scsi_2btoul(header->defect_list_len);
5259 if (defect_list_len != 0) {
5260 ctl_set_invalid_field(ctsio,
5261 /*sks_valid*/ 1,
5262 /*command*/ 0,
5263 /*field*/ 2,
5264 /*bit_valid*/ 0,
5265 /*bit*/ 0);
5266 goto bailout;
5267 }
5268 }
5269 }
5270
5271 /*
5272 * The format command will clear out the "Medium format corrupted"
5273 * status if set by the configuration code. That status is really
5274 * just a way to notify the host that we have lost the media, and
5275 * get them to issue a command that will basically make them think
5276 * they're blowing away the media.
5277 */
5278 mtx_lock(&lun->lun_lock);
5279 lun->flags &= ~CTL_LUN_INOPERABLE;
5280 mtx_unlock(&lun->lun_lock);
5281
5282 ctl_set_success(ctsio);
5283bailout:
5284
5285 if (ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) {
5286 free(ctsio->kern_data_ptr, M_CTL);
5287 ctsio->io_hdr.flags &= ~CTL_FLAG_ALLOCATED;
5288 }
5289
5290 ctl_done((union ctl_io *)ctsio);
5291 return (CTL_RETVAL_COMPLETE);
5292}
5293
5294int
5295ctl_read_buffer(struct ctl_scsiio *ctsio)
5296{
5297 struct scsi_read_buffer *cdb;
5298 struct ctl_lun *lun;
5299 int buffer_offset, len;
5300 static uint8_t descr[4];
5301 static uint8_t echo_descr[4] = { 0 };
5302
5303 CTL_DEBUG_PRINT(("ctl_read_buffer\n"));
5304
5305 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
5306 cdb = (struct scsi_read_buffer *)ctsio->cdb;
5307
5308 if ((cdb->byte2 & RWB_MODE) != RWB_MODE_DATA &&
5309 (cdb->byte2 & RWB_MODE) != RWB_MODE_ECHO_DESCR &&
5310 (cdb->byte2 & RWB_MODE) != RWB_MODE_DESCR) {
5311 ctl_set_invalid_field(ctsio,
5312 /*sks_valid*/ 1,
5313 /*command*/ 1,
5314 /*field*/ 1,
5315 /*bit_valid*/ 1,
5316 /*bit*/ 4);
5317 ctl_done((union ctl_io *)ctsio);
5318 return (CTL_RETVAL_COMPLETE);
5319 }
5320
5321 len = scsi_3btoul(cdb->length);
5322 buffer_offset = scsi_3btoul(cdb->offset);
5323
5324 if (buffer_offset + len > CTL_WRITE_BUFFER_SIZE) {
5325 ctl_set_invalid_field(ctsio,
5326 /*sks_valid*/ 1,
5327 /*command*/ 1,
5328 /*field*/ 6,
5329 /*bit_valid*/ 0,
5330 /*bit*/ 0);
5331 ctl_done((union ctl_io *)ctsio);
5332 return (CTL_RETVAL_COMPLETE);
5333 }
5334
5335 if ((cdb->byte2 & RWB_MODE) == RWB_MODE_DESCR) {
5336 descr[0] = 0;
5337 scsi_ulto3b(CTL_WRITE_BUFFER_SIZE, &descr[1]);
5338 ctsio->kern_data_ptr = descr;
5339 len = min(len, sizeof(descr));
5340 } else if ((cdb->byte2 & RWB_MODE) == RWB_MODE_ECHO_DESCR) {
5341 ctsio->kern_data_ptr = echo_descr;
5342 len = min(len, sizeof(echo_descr));
5343 } else {
5344 if (lun->write_buffer == NULL) {
5345 lun->write_buffer = malloc(CTL_WRITE_BUFFER_SIZE,
5346 M_CTL, M_WAITOK);
5347 }
5348 ctsio->kern_data_ptr = lun->write_buffer + buffer_offset;
5349 }
5350 ctsio->kern_data_len = len;
5351 ctsio->kern_total_len = len;
5352 ctsio->kern_data_resid = 0;
5353 ctsio->kern_rel_offset = 0;
5354 ctsio->kern_sg_entries = 0;
5355 ctl_set_success(ctsio);
5356 ctsio->be_move_done = ctl_config_move_done;
5357 ctl_datamove((union ctl_io *)ctsio);
5358 return (CTL_RETVAL_COMPLETE);
5359}
5360
5361int
5362ctl_write_buffer(struct ctl_scsiio *ctsio)
5363{
5364 struct scsi_write_buffer *cdb;
5365 struct ctl_lun *lun;
5366 int buffer_offset, len;
5367
5368 CTL_DEBUG_PRINT(("ctl_write_buffer\n"));
5369
5370 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
5371 cdb = (struct scsi_write_buffer *)ctsio->cdb;
5372
5373 if ((cdb->byte2 & RWB_MODE) != RWB_MODE_DATA) {
5374 ctl_set_invalid_field(ctsio,
5375 /*sks_valid*/ 1,
5376 /*command*/ 1,
5377 /*field*/ 1,
5378 /*bit_valid*/ 1,
5379 /*bit*/ 4);
5380 ctl_done((union ctl_io *)ctsio);
5381 return (CTL_RETVAL_COMPLETE);
5382 }
5383
5384 len = scsi_3btoul(cdb->length);
5385 buffer_offset = scsi_3btoul(cdb->offset);
5386
5387 if (buffer_offset + len > CTL_WRITE_BUFFER_SIZE) {
5388 ctl_set_invalid_field(ctsio,
5389 /*sks_valid*/ 1,
5390 /*command*/ 1,
5391 /*field*/ 6,
5392 /*bit_valid*/ 0,
5393 /*bit*/ 0);
5394 ctl_done((union ctl_io *)ctsio);
5395 return (CTL_RETVAL_COMPLETE);
5396 }
5397
5398 /*
5399 * If we've got a kernel request that hasn't been malloced yet,
5400 * malloc it and tell the caller the data buffer is here.
5401 */
5402 if ((ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) == 0) {
5403 if (lun->write_buffer == NULL) {
5404 lun->write_buffer = malloc(CTL_WRITE_BUFFER_SIZE,
5405 M_CTL, M_WAITOK);
5406 }
5407 ctsio->kern_data_ptr = lun->write_buffer + buffer_offset;
5408 ctsio->kern_data_len = len;
5409 ctsio->kern_total_len = len;
5410 ctsio->kern_data_resid = 0;
5411 ctsio->kern_rel_offset = 0;
5412 ctsio->kern_sg_entries = 0;
5413 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
5414 ctsio->be_move_done = ctl_config_move_done;
5415 ctl_datamove((union ctl_io *)ctsio);
5416
5417 return (CTL_RETVAL_COMPLETE);
5418 }
5419
5420 ctl_set_success(ctsio);
5421 ctl_done((union ctl_io *)ctsio);
5422 return (CTL_RETVAL_COMPLETE);
5423}
5424
5425int
5426ctl_write_same(struct ctl_scsiio *ctsio)
5427{
5428 struct ctl_lun *lun;
5429 struct ctl_lba_len_flags *lbalen;
5430 uint64_t lba;
5431 uint32_t num_blocks;
5432 int len, retval;
5433 uint8_t byte2;
5434
5435 retval = CTL_RETVAL_COMPLETE;
5436
5437 CTL_DEBUG_PRINT(("ctl_write_same\n"));
5438
5439 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
5440
5441 switch (ctsio->cdb[0]) {
5442 case WRITE_SAME_10: {
5443 struct scsi_write_same_10 *cdb;
5444
5445 cdb = (struct scsi_write_same_10 *)ctsio->cdb;
5446
5447 lba = scsi_4btoul(cdb->addr);
5448 num_blocks = scsi_2btoul(cdb->length);
5449 byte2 = cdb->byte2;
5450 break;
5451 }
5452 case WRITE_SAME_16: {
5453 struct scsi_write_same_16 *cdb;
5454
5455 cdb = (struct scsi_write_same_16 *)ctsio->cdb;
5456
5457 lba = scsi_8btou64(cdb->addr);
5458 num_blocks = scsi_4btoul(cdb->length);
5459 byte2 = cdb->byte2;
5460 break;
5461 }
5462 default:
5463 /*
5464 * We got a command we don't support. This shouldn't
5465 * happen, commands should be filtered out above us.
5466 */
5467 ctl_set_invalid_opcode(ctsio);
5468 ctl_done((union ctl_io *)ctsio);
5469
5470 return (CTL_RETVAL_COMPLETE);
5471 break; /* NOTREACHED */
5472 }
5473
5474 /* NDOB and ANCHOR flags can be used only together with UNMAP */
5475 if ((byte2 & SWS_UNMAP) == 0 &&
5476 (byte2 & (SWS_NDOB | SWS_ANCHOR)) != 0) {
5477 ctl_set_invalid_field(ctsio, /*sks_valid*/ 1,
5478 /*command*/ 1, /*field*/ 1, /*bit_valid*/ 1, /*bit*/ 0);
5479 ctl_done((union ctl_io *)ctsio);
5480 return (CTL_RETVAL_COMPLETE);
5481 }
5482
5483 /*
5484 * The first check is to make sure we're in bounds, the second
5485 * check is to catch wrap-around problems. If the lba + num blocks
5486 * is less than the lba, then we've wrapped around and the block
5487 * range is invalid anyway.
5488 */
5489 if (((lba + num_blocks) > (lun->be_lun->maxlba + 1))
5490 || ((lba + num_blocks) < lba)) {
5491 ctl_set_lba_out_of_range(ctsio);
5492 ctl_done((union ctl_io *)ctsio);
5493 return (CTL_RETVAL_COMPLETE);
5494 }
5495
5496 /* Zero number of blocks means "to the last logical block" */
5497 if (num_blocks == 0) {
5498 if ((lun->be_lun->maxlba + 1) - lba > UINT32_MAX) {
5499 ctl_set_invalid_field(ctsio,
5500 /*sks_valid*/ 0,
5501 /*command*/ 1,
5502 /*field*/ 0,
5503 /*bit_valid*/ 0,
5504 /*bit*/ 0);
5505 ctl_done((union ctl_io *)ctsio);
5506 return (CTL_RETVAL_COMPLETE);
5507 }
5508 num_blocks = (lun->be_lun->maxlba + 1) - lba;
5509 }
5510
5511 len = lun->be_lun->blocksize;
5512
5513 /*
5514 * If we've got a kernel request that hasn't been malloced yet,
5515 * malloc it and tell the caller the data buffer is here.
5516 */
5517 if ((byte2 & SWS_NDOB) == 0 &&
5518 (ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) == 0) {
5519 ctsio->kern_data_ptr = malloc(len, M_CTL, M_WAITOK);;
5520 ctsio->kern_data_len = len;
5521 ctsio->kern_total_len = len;
5522 ctsio->kern_data_resid = 0;
5523 ctsio->kern_rel_offset = 0;
5524 ctsio->kern_sg_entries = 0;
5525 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
5526 ctsio->be_move_done = ctl_config_move_done;
5527 ctl_datamove((union ctl_io *)ctsio);
5528
5529 return (CTL_RETVAL_COMPLETE);
5530 }
5531
5532 lbalen = (struct ctl_lba_len_flags *)&ctsio->io_hdr.ctl_private[CTL_PRIV_LBA_LEN];
5533 lbalen->lba = lba;
5534 lbalen->len = num_blocks;
5535 lbalen->flags = byte2;
5536 retval = lun->backend->config_write((union ctl_io *)ctsio);
5537
5538 return (retval);
5539}
5540
5541int
5542ctl_unmap(struct ctl_scsiio *ctsio)
5543{
5544 struct ctl_lun *lun;
5545 struct scsi_unmap *cdb;
5546 struct ctl_ptr_len_flags *ptrlen;
5547 struct scsi_unmap_header *hdr;
5548 struct scsi_unmap_desc *buf, *end, *endnz, *range;
5549 uint64_t lba;
5550 uint32_t num_blocks;
5551 int len, retval;
5552 uint8_t byte2;
5553
5554 retval = CTL_RETVAL_COMPLETE;
5555
5556 CTL_DEBUG_PRINT(("ctl_unmap\n"));
5557
5558 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
5559 cdb = (struct scsi_unmap *)ctsio->cdb;
5560
5561 len = scsi_2btoul(cdb->length);
5562 byte2 = cdb->byte2;
5563
5564 /*
5565 * If we've got a kernel request that hasn't been malloced yet,
5566 * malloc it and tell the caller the data buffer is here.
5567 */
5568 if ((ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) == 0) {
5569 ctsio->kern_data_ptr = malloc(len, M_CTL, M_WAITOK);;
5570 ctsio->kern_data_len = len;
5571 ctsio->kern_total_len = len;
5572 ctsio->kern_data_resid = 0;
5573 ctsio->kern_rel_offset = 0;
5574 ctsio->kern_sg_entries = 0;
5575 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
5576 ctsio->be_move_done = ctl_config_move_done;
5577 ctl_datamove((union ctl_io *)ctsio);
5578
5579 return (CTL_RETVAL_COMPLETE);
5580 }
5581
5582 len = ctsio->kern_total_len - ctsio->kern_data_resid;
5583 hdr = (struct scsi_unmap_header *)ctsio->kern_data_ptr;
5584 if (len < sizeof (*hdr) ||
5585 len < (scsi_2btoul(hdr->length) + sizeof(hdr->length)) ||
5586 len < (scsi_2btoul(hdr->desc_length) + sizeof (*hdr)) ||
5587 scsi_2btoul(hdr->desc_length) % sizeof(*buf) != 0) {
5588 ctl_set_invalid_field(ctsio,
5589 /*sks_valid*/ 0,
5590 /*command*/ 0,
5591 /*field*/ 0,
5592 /*bit_valid*/ 0,
5593 /*bit*/ 0);
5594 goto done;
5595 }
5596 len = scsi_2btoul(hdr->desc_length);
5597 buf = (struct scsi_unmap_desc *)(hdr + 1);
5598 end = buf + len / sizeof(*buf);
5599
5600 endnz = buf;
5601 for (range = buf; range < end; range++) {
5602 lba = scsi_8btou64(range->lba);
5603 num_blocks = scsi_4btoul(range->length);
5604 if (((lba + num_blocks) > (lun->be_lun->maxlba + 1))
5605 || ((lba + num_blocks) < lba)) {
5606 ctl_set_lba_out_of_range(ctsio);
5607 ctl_done((union ctl_io *)ctsio);
5608 return (CTL_RETVAL_COMPLETE);
5609 }
5610 if (num_blocks != 0)
5611 endnz = range + 1;
5612 }
5613
5614 /*
5615 * Block backend can not handle zero last range.
5616 * Filter it out and return if there is nothing left.
5617 */
5618 len = (uint8_t *)endnz - (uint8_t *)buf;
5619 if (len == 0) {
5620 ctl_set_success(ctsio);
5621 goto done;
5622 }
5623
5624 mtx_lock(&lun->lun_lock);
5625 ptrlen = (struct ctl_ptr_len_flags *)
5626 &ctsio->io_hdr.ctl_private[CTL_PRIV_LBA_LEN];
5627 ptrlen->ptr = (void *)buf;
5628 ptrlen->len = len;
5629 ptrlen->flags = byte2;
5630 ctl_check_blocked(lun);
5631 mtx_unlock(&lun->lun_lock);
5632
5633 retval = lun->backend->config_write((union ctl_io *)ctsio);
5634 return (retval);
5635
5636done:
5637 if (ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) {
5638 free(ctsio->kern_data_ptr, M_CTL);
5639 ctsio->io_hdr.flags &= ~CTL_FLAG_ALLOCATED;
5640 }
5641 ctl_done((union ctl_io *)ctsio);
5642 return (CTL_RETVAL_COMPLETE);
5643}
5644
5645/*
5646 * Note that this function currently doesn't actually do anything inside
5647 * CTL to enforce things if the DQue bit is turned on.
5648 *
5649 * Also note that this function can't be used in the default case, because
5650 * the DQue bit isn't set in the changeable mask for the control mode page
5651 * anyway. This is just here as an example for how to implement a page
5652 * handler, and a placeholder in case we want to allow the user to turn
5653 * tagged queueing on and off.
5654 *
5655 * The D_SENSE bit handling is functional, however, and will turn
5656 * descriptor sense on and off for a given LUN.
5657 */
5658int
5659ctl_control_page_handler(struct ctl_scsiio *ctsio,
5660 struct ctl_page_index *page_index, uint8_t *page_ptr)
5661{
5662 struct scsi_control_page *current_cp, *saved_cp, *user_cp;
5663 struct ctl_lun *lun;
5664 int set_ua;
5665 uint32_t initidx;
5666
5667 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
5668 initidx = ctl_get_initindex(&ctsio->io_hdr.nexus);
5669 set_ua = 0;
5670
5671 user_cp = (struct scsi_control_page *)page_ptr;
5672 current_cp = (struct scsi_control_page *)
5673 (page_index->page_data + (page_index->page_len *
5674 CTL_PAGE_CURRENT));
5675 saved_cp = (struct scsi_control_page *)
5676 (page_index->page_data + (page_index->page_len *
5677 CTL_PAGE_SAVED));
5678
5679 mtx_lock(&lun->lun_lock);
5680 if (((current_cp->rlec & SCP_DSENSE) == 0)
5681 && ((user_cp->rlec & SCP_DSENSE) != 0)) {
5682 /*
5683 * Descriptor sense is currently turned off and the user
5684 * wants to turn it on.
5685 */
5686 current_cp->rlec |= SCP_DSENSE;
5687 saved_cp->rlec |= SCP_DSENSE;
5688 lun->flags |= CTL_LUN_SENSE_DESC;
5689 set_ua = 1;
5690 } else if (((current_cp->rlec & SCP_DSENSE) != 0)
5691 && ((user_cp->rlec & SCP_DSENSE) == 0)) {
5692 /*
5693 * Descriptor sense is currently turned on, and the user
5694 * wants to turn it off.
5695 */
5696 current_cp->rlec &= ~SCP_DSENSE;
5697 saved_cp->rlec &= ~SCP_DSENSE;
5698 lun->flags &= ~CTL_LUN_SENSE_DESC;
5699 set_ua = 1;
5700 }
5701 if ((current_cp->queue_flags & SCP_QUEUE_ALG_MASK) !=
5702 (user_cp->queue_flags & SCP_QUEUE_ALG_MASK)) {
5703 current_cp->queue_flags &= ~SCP_QUEUE_ALG_MASK;
5704 current_cp->queue_flags |= user_cp->queue_flags & SCP_QUEUE_ALG_MASK;
5705 saved_cp->queue_flags &= ~SCP_QUEUE_ALG_MASK;
5706 saved_cp->queue_flags |= user_cp->queue_flags & SCP_QUEUE_ALG_MASK;
5707 set_ua = 1;
5708 }
5709 if ((current_cp->eca_and_aen & SCP_SWP) !=
5710 (user_cp->eca_and_aen & SCP_SWP)) {
5711 current_cp->eca_and_aen &= ~SCP_SWP;
5712 current_cp->eca_and_aen |= user_cp->eca_and_aen & SCP_SWP;
5713 saved_cp->eca_and_aen &= ~SCP_SWP;
5714 saved_cp->eca_and_aen |= user_cp->eca_and_aen & SCP_SWP;
5715 set_ua = 1;
5716 }
5717 if (set_ua != 0)
5718 ctl_est_ua_all(lun, initidx, CTL_UA_MODE_CHANGE);
5719 mtx_unlock(&lun->lun_lock);
5720
5721 return (0);
5722}
5723
5724int
5725ctl_caching_sp_handler(struct ctl_scsiio *ctsio,
5726 struct ctl_page_index *page_index, uint8_t *page_ptr)
5727{
5728 struct scsi_caching_page *current_cp, *saved_cp, *user_cp;
5729 struct ctl_lun *lun;
5730 int set_ua;
5731 uint32_t initidx;
5732
5733 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
5734 initidx = ctl_get_initindex(&ctsio->io_hdr.nexus);
5735 set_ua = 0;
5736
5737 user_cp = (struct scsi_caching_page *)page_ptr;
5738 current_cp = (struct scsi_caching_page *)
5739 (page_index->page_data + (page_index->page_len *
5740 CTL_PAGE_CURRENT));
5741 saved_cp = (struct scsi_caching_page *)
5742 (page_index->page_data + (page_index->page_len *
5743 CTL_PAGE_SAVED));
5744
5745 mtx_lock(&lun->lun_lock);
5746 if ((current_cp->flags1 & (SCP_WCE | SCP_RCD)) !=
5747 (user_cp->flags1 & (SCP_WCE | SCP_RCD))) {
5748 current_cp->flags1 &= ~(SCP_WCE | SCP_RCD);
5749 current_cp->flags1 |= user_cp->flags1 & (SCP_WCE | SCP_RCD);
5750 saved_cp->flags1 &= ~(SCP_WCE | SCP_RCD);
5751 saved_cp->flags1 |= user_cp->flags1 & (SCP_WCE | SCP_RCD);
5752 set_ua = 1;
5753 }
5754 if (set_ua != 0)
5755 ctl_est_ua_all(lun, initidx, CTL_UA_MODE_CHANGE);
5756 mtx_unlock(&lun->lun_lock);
5757
5758 return (0);
5759}
5760
5761int
5762ctl_debugconf_sp_select_handler(struct ctl_scsiio *ctsio,
5763 struct ctl_page_index *page_index,
5764 uint8_t *page_ptr)
5765{
5766 uint8_t *c;
5767 int i;
5768
5769 c = ((struct copan_debugconf_subpage *)page_ptr)->ctl_time_io_secs;
5770 ctl_time_io_secs =
5771 (c[0] << 8) |
5772 (c[1] << 0) |
5773 0;
5774 CTL_DEBUG_PRINT(("set ctl_time_io_secs to %d\n", ctl_time_io_secs));
5775 printf("set ctl_time_io_secs to %d\n", ctl_time_io_secs);
5776 printf("page data:");
5777 for (i=0; i<8; i++)
5778 printf(" %.2x",page_ptr[i]);
5779 printf("\n");
5780 return (0);
5781}
5782
5783int
5784ctl_debugconf_sp_sense_handler(struct ctl_scsiio *ctsio,
5785 struct ctl_page_index *page_index,
5786 int pc)
5787{
5788 struct copan_debugconf_subpage *page;
5789
5790 page = (struct copan_debugconf_subpage *)page_index->page_data +
5791 (page_index->page_len * pc);
5792
5793 switch (pc) {
5794 case SMS_PAGE_CTRL_CHANGEABLE >> 6:
5795 case SMS_PAGE_CTRL_DEFAULT >> 6:
5796 case SMS_PAGE_CTRL_SAVED >> 6:
5797 /*
5798 * We don't update the changable or default bits for this page.
5799 */
5800 break;
5801 case SMS_PAGE_CTRL_CURRENT >> 6:
5802 page->ctl_time_io_secs[0] = ctl_time_io_secs >> 8;
5803 page->ctl_time_io_secs[1] = ctl_time_io_secs >> 0;
5804 break;
5805 default:
5806#ifdef NEEDTOPORT
5807 EPRINT(0, "Invalid PC %d!!", pc);
5808#endif /* NEEDTOPORT */
5809 break;
5810 }
5811 return (0);
5812}
5813
5814
5815static int
5816ctl_do_mode_select(union ctl_io *io)
5817{
5818 struct scsi_mode_page_header *page_header;
5819 struct ctl_page_index *page_index;
5820 struct ctl_scsiio *ctsio;
5821 int control_dev, page_len;
5822 int page_len_offset, page_len_size;
5823 union ctl_modepage_info *modepage_info;
5824 struct ctl_lun *lun;
5825 int *len_left, *len_used;
5826 int retval, i;
5827
5828 ctsio = &io->scsiio;
5829 page_index = NULL;
5830 page_len = 0;
5831 retval = CTL_RETVAL_COMPLETE;
5832
5833 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
5834
5835 if (lun->be_lun->lun_type != T_DIRECT)
5836 control_dev = 1;
5837 else
5838 control_dev = 0;
5839
5840 modepage_info = (union ctl_modepage_info *)
5841 ctsio->io_hdr.ctl_private[CTL_PRIV_MODEPAGE].bytes;
5842 len_left = &modepage_info->header.len_left;
5843 len_used = &modepage_info->header.len_used;
5844
5845do_next_page:
5846
5847 page_header = (struct scsi_mode_page_header *)
5848 (ctsio->kern_data_ptr + *len_used);
5849
5850 if (*len_left == 0) {
5851 free(ctsio->kern_data_ptr, M_CTL);
5852 ctl_set_success(ctsio);
5853 ctl_done((union ctl_io *)ctsio);
5854 return (CTL_RETVAL_COMPLETE);
5855 } else if (*len_left < sizeof(struct scsi_mode_page_header)) {
5856
5857 free(ctsio->kern_data_ptr, M_CTL);
5858 ctl_set_param_len_error(ctsio);
5859 ctl_done((union ctl_io *)ctsio);
5860 return (CTL_RETVAL_COMPLETE);
5861
5862 } else if ((page_header->page_code & SMPH_SPF)
5863 && (*len_left < sizeof(struct scsi_mode_page_header_sp))) {
5864
5865 free(ctsio->kern_data_ptr, M_CTL);
5866 ctl_set_param_len_error(ctsio);
5867 ctl_done((union ctl_io *)ctsio);
5868 return (CTL_RETVAL_COMPLETE);
5869 }
5870
5871
5872 /*
5873 * XXX KDM should we do something with the block descriptor?
5874 */
5875 for (i = 0; i < CTL_NUM_MODE_PAGES; i++) {
5876
5877 if ((control_dev != 0)
5878 && (lun->mode_pages.index[i].page_flags &
5879 CTL_PAGE_FLAG_DISK_ONLY))
5880 continue;
5881
5882 if ((lun->mode_pages.index[i].page_code & SMPH_PC_MASK) !=
5883 (page_header->page_code & SMPH_PC_MASK))
5884 continue;
5885
5886 /*
5887 * If neither page has a subpage code, then we've got a
5888 * match.
5889 */
5890 if (((lun->mode_pages.index[i].page_code & SMPH_SPF) == 0)
5891 && ((page_header->page_code & SMPH_SPF) == 0)) {
5892 page_index = &lun->mode_pages.index[i];
5893 page_len = page_header->page_length;
5894 break;
5895 }
5896
5897 /*
5898 * If both pages have subpages, then the subpage numbers
5899 * have to match.
5900 */
5901 if ((lun->mode_pages.index[i].page_code & SMPH_SPF)
5902 && (page_header->page_code & SMPH_SPF)) {
5903 struct scsi_mode_page_header_sp *sph;
5904
5905 sph = (struct scsi_mode_page_header_sp *)page_header;
5906
5907 if (lun->mode_pages.index[i].subpage ==
5908 sph->subpage) {
5909 page_index = &lun->mode_pages.index[i];
5910 page_len = scsi_2btoul(sph->page_length);
5911 break;
5912 }
5913 }
5914 }
5915
5916 /*
5917 * If we couldn't find the page, or if we don't have a mode select
5918 * handler for it, send back an error to the user.
5919 */
5920 if ((page_index == NULL)
5921 || (page_index->select_handler == NULL)) {
5922 ctl_set_invalid_field(ctsio,
5923 /*sks_valid*/ 1,
5924 /*command*/ 0,
5925 /*field*/ *len_used,
5926 /*bit_valid*/ 0,
5927 /*bit*/ 0);
5928 free(ctsio->kern_data_ptr, M_CTL);
5929 ctl_done((union ctl_io *)ctsio);
5930 return (CTL_RETVAL_COMPLETE);
5931 }
5932
5933 if (page_index->page_code & SMPH_SPF) {
5934 page_len_offset = 2;
5935 page_len_size = 2;
5936 } else {
5937 page_len_size = 1;
5938 page_len_offset = 1;
5939 }
5940
5941 /*
5942 * If the length the initiator gives us isn't the one we specify in
5943 * the mode page header, or if they didn't specify enough data in
5944 * the CDB to avoid truncating this page, kick out the request.
5945 */
5946 if ((page_len != (page_index->page_len - page_len_offset -
5947 page_len_size))
5948 || (*len_left < page_index->page_len)) {
5949
5950
5951 ctl_set_invalid_field(ctsio,
5952 /*sks_valid*/ 1,
5953 /*command*/ 0,
5954 /*field*/ *len_used + page_len_offset,
5955 /*bit_valid*/ 0,
5956 /*bit*/ 0);
5957 free(ctsio->kern_data_ptr, M_CTL);
5958 ctl_done((union ctl_io *)ctsio);
5959 return (CTL_RETVAL_COMPLETE);
5960 }
5961
5962 /*
5963 * Run through the mode page, checking to make sure that the bits
5964 * the user changed are actually legal for him to change.
5965 */
5966 for (i = 0; i < page_index->page_len; i++) {
5967 uint8_t *user_byte, *change_mask, *current_byte;
5968 int bad_bit;
5969 int j;
5970
5971 user_byte = (uint8_t *)page_header + i;
5972 change_mask = page_index->page_data +
5973 (page_index->page_len * CTL_PAGE_CHANGEABLE) + i;
5974 current_byte = page_index->page_data +
5975 (page_index->page_len * CTL_PAGE_CURRENT) + i;
5976
5977 /*
5978 * Check to see whether the user set any bits in this byte
5979 * that he is not allowed to set.
5980 */
5981 if ((*user_byte & ~(*change_mask)) ==
5982 (*current_byte & ~(*change_mask)))
5983 continue;
5984
5985 /*
5986 * Go through bit by bit to determine which one is illegal.
5987 */
5988 bad_bit = 0;
5989 for (j = 7; j >= 0; j--) {
5990 if ((((1 << i) & ~(*change_mask)) & *user_byte) !=
5991 (((1 << i) & ~(*change_mask)) & *current_byte)) {
5992 bad_bit = i;
5993 break;
5994 }
5995 }
5996 ctl_set_invalid_field(ctsio,
5997 /*sks_valid*/ 1,
5998 /*command*/ 0,
5999 /*field*/ *len_used + i,
6000 /*bit_valid*/ 1,
6001 /*bit*/ bad_bit);
6002 free(ctsio->kern_data_ptr, M_CTL);
6003 ctl_done((union ctl_io *)ctsio);
6004 return (CTL_RETVAL_COMPLETE);
6005 }
6006
6007 /*
6008 * Decrement these before we call the page handler, since we may
6009 * end up getting called back one way or another before the handler
6010 * returns to this context.
6011 */
6012 *len_left -= page_index->page_len;
6013 *len_used += page_index->page_len;
6014
6015 retval = page_index->select_handler(ctsio, page_index,
6016 (uint8_t *)page_header);
6017
6018 /*
6019 * If the page handler returns CTL_RETVAL_QUEUED, then we need to
6020 * wait until this queued command completes to finish processing
6021 * the mode page. If it returns anything other than
6022 * CTL_RETVAL_COMPLETE (e.g. CTL_RETVAL_ERROR), then it should have
6023 * already set the sense information, freed the data pointer, and
6024 * completed the io for us.
6025 */
6026 if (retval != CTL_RETVAL_COMPLETE)
6027 goto bailout_no_done;
6028
6029 /*
6030 * If the initiator sent us more than one page, parse the next one.
6031 */
6032 if (*len_left > 0)
6033 goto do_next_page;
6034
6035 ctl_set_success(ctsio);
6036 free(ctsio->kern_data_ptr, M_CTL);
6037 ctl_done((union ctl_io *)ctsio);
6038
6039bailout_no_done:
6040
6041 return (CTL_RETVAL_COMPLETE);
6042
6043}
6044
6045int
6046ctl_mode_select(struct ctl_scsiio *ctsio)
6047{
6048 int param_len, pf, sp;
6049 int header_size, bd_len;
6050 int len_left, len_used;
6051 struct ctl_page_index *page_index;
6052 struct ctl_lun *lun;
6053 int control_dev, page_len;
6054 union ctl_modepage_info *modepage_info;
6055 int retval;
6056
6057 pf = 0;
6058 sp = 0;
6059 page_len = 0;
6060 len_used = 0;
6061 len_left = 0;
6062 retval = 0;
6063 bd_len = 0;
6064 page_index = NULL;
6065
6066 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
6067
6068 if (lun->be_lun->lun_type != T_DIRECT)
6069 control_dev = 1;
6070 else
6071 control_dev = 0;
6072
6073 switch (ctsio->cdb[0]) {
6074 case MODE_SELECT_6: {
6075 struct scsi_mode_select_6 *cdb;
6076
6077 cdb = (struct scsi_mode_select_6 *)ctsio->cdb;
6078
6079 pf = (cdb->byte2 & SMS_PF) ? 1 : 0;
6080 sp = (cdb->byte2 & SMS_SP) ? 1 : 0;
6081
6082 param_len = cdb->length;
6083 header_size = sizeof(struct scsi_mode_header_6);
6084 break;
6085 }
6086 case MODE_SELECT_10: {
6087 struct scsi_mode_select_10 *cdb;
6088
6089 cdb = (struct scsi_mode_select_10 *)ctsio->cdb;
6090
6091 pf = (cdb->byte2 & SMS_PF) ? 1 : 0;
6092 sp = (cdb->byte2 & SMS_SP) ? 1 : 0;
6093
6094 param_len = scsi_2btoul(cdb->length);
6095 header_size = sizeof(struct scsi_mode_header_10);
6096 break;
6097 }
6098 default:
6099 ctl_set_invalid_opcode(ctsio);
6100 ctl_done((union ctl_io *)ctsio);
6101 return (CTL_RETVAL_COMPLETE);
6102 break; /* NOTREACHED */
6103 }
6104
6105 /*
6106 * From SPC-3:
6107 * "A parameter list length of zero indicates that the Data-Out Buffer
6108 * shall be empty. This condition shall not be considered as an error."
6109 */
6110 if (param_len == 0) {
6111 ctl_set_success(ctsio);
6112 ctl_done((union ctl_io *)ctsio);
6113 return (CTL_RETVAL_COMPLETE);
6114 }
6115
6116 /*
6117 * Since we'll hit this the first time through, prior to
6118 * allocation, we don't need to free a data buffer here.
6119 */
6120 if (param_len < header_size) {
6121 ctl_set_param_len_error(ctsio);
6122 ctl_done((union ctl_io *)ctsio);
6123 return (CTL_RETVAL_COMPLETE);
6124 }
6125
6126 /*
6127 * Allocate the data buffer and grab the user's data. In theory,
6128 * we shouldn't have to sanity check the parameter list length here
6129 * because the maximum size is 64K. We should be able to malloc
6130 * that much without too many problems.
6131 */
6132 if ((ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) == 0) {
6133 ctsio->kern_data_ptr = malloc(param_len, M_CTL, M_WAITOK);
6134 ctsio->kern_data_len = param_len;
6135 ctsio->kern_total_len = param_len;
6136 ctsio->kern_data_resid = 0;
6137 ctsio->kern_rel_offset = 0;
6138 ctsio->kern_sg_entries = 0;
6139 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
6140 ctsio->be_move_done = ctl_config_move_done;
6141 ctl_datamove((union ctl_io *)ctsio);
6142
6143 return (CTL_RETVAL_COMPLETE);
6144 }
6145
6146 switch (ctsio->cdb[0]) {
6147 case MODE_SELECT_6: {
6148 struct scsi_mode_header_6 *mh6;
6149
6150 mh6 = (struct scsi_mode_header_6 *)ctsio->kern_data_ptr;
6151 bd_len = mh6->blk_desc_len;
6152 break;
6153 }
6154 case MODE_SELECT_10: {
6155 struct scsi_mode_header_10 *mh10;
6156
6157 mh10 = (struct scsi_mode_header_10 *)ctsio->kern_data_ptr;
6158 bd_len = scsi_2btoul(mh10->blk_desc_len);
6159 break;
6160 }
6161 default:
6162 panic("Invalid CDB type %#x", ctsio->cdb[0]);
6163 break;
6164 }
6165
6166 if (param_len < (header_size + bd_len)) {
6167 free(ctsio->kern_data_ptr, M_CTL);
6168 ctl_set_param_len_error(ctsio);
6169 ctl_done((union ctl_io *)ctsio);
6170 return (CTL_RETVAL_COMPLETE);
6171 }
6172
6173 /*
6174 * Set the IO_CONT flag, so that if this I/O gets passed to
6175 * ctl_config_write_done(), it'll get passed back to
6176 * ctl_do_mode_select() for further processing, or completion if
6177 * we're all done.
6178 */
6179 ctsio->io_hdr.flags |= CTL_FLAG_IO_CONT;
6180 ctsio->io_cont = ctl_do_mode_select;
6181
6182 modepage_info = (union ctl_modepage_info *)
6183 ctsio->io_hdr.ctl_private[CTL_PRIV_MODEPAGE].bytes;
6184
6185 memset(modepage_info, 0, sizeof(*modepage_info));
6186
6187 len_left = param_len - header_size - bd_len;
6188 len_used = header_size + bd_len;
6189
6190 modepage_info->header.len_left = len_left;
6191 modepage_info->header.len_used = len_used;
6192
6193 return (ctl_do_mode_select((union ctl_io *)ctsio));
6194}
6195
6196int
6197ctl_mode_sense(struct ctl_scsiio *ctsio)
6198{
6199 struct ctl_lun *lun;
6200 int pc, page_code, dbd, llba, subpage;
6201 int alloc_len, page_len, header_len, total_len;
6202 struct scsi_mode_block_descr *block_desc;
6203 struct ctl_page_index *page_index;
6204 int control_dev;
6205
6206 dbd = 0;
6207 llba = 0;
6208 block_desc = NULL;
6209 page_index = NULL;
6210
6211 CTL_DEBUG_PRINT(("ctl_mode_sense\n"));
6212
6213 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
6214
6215 if (lun->be_lun->lun_type != T_DIRECT)
6216 control_dev = 1;
6217 else
6218 control_dev = 0;
6219
6220 switch (ctsio->cdb[0]) {
6221 case MODE_SENSE_6: {
6222 struct scsi_mode_sense_6 *cdb;
6223
6224 cdb = (struct scsi_mode_sense_6 *)ctsio->cdb;
6225
6226 header_len = sizeof(struct scsi_mode_hdr_6);
6227 if (cdb->byte2 & SMS_DBD)
6228 dbd = 1;
6229 else
6230 header_len += sizeof(struct scsi_mode_block_descr);
6231
6232 pc = (cdb->page & SMS_PAGE_CTRL_MASK) >> 6;
6233 page_code = cdb->page & SMS_PAGE_CODE;
6234 subpage = cdb->subpage;
6235 alloc_len = cdb->length;
6236 break;
6237 }
6238 case MODE_SENSE_10: {
6239 struct scsi_mode_sense_10 *cdb;
6240
6241 cdb = (struct scsi_mode_sense_10 *)ctsio->cdb;
6242
6243 header_len = sizeof(struct scsi_mode_hdr_10);
6244
6245 if (cdb->byte2 & SMS_DBD)
6246 dbd = 1;
6247 else
6248 header_len += sizeof(struct scsi_mode_block_descr);
6249 if (cdb->byte2 & SMS10_LLBAA)
6250 llba = 1;
6251 pc = (cdb->page & SMS_PAGE_CTRL_MASK) >> 6;
6252 page_code = cdb->page & SMS_PAGE_CODE;
6253 subpage = cdb->subpage;
6254 alloc_len = scsi_2btoul(cdb->length);
6255 break;
6256 }
6257 default:
6258 ctl_set_invalid_opcode(ctsio);
6259 ctl_done((union ctl_io *)ctsio);
6260 return (CTL_RETVAL_COMPLETE);
6261 break; /* NOTREACHED */
6262 }
6263
6264 /*
6265 * We have to make a first pass through to calculate the size of
6266 * the pages that match the user's query. Then we allocate enough
6267 * memory to hold it, and actually copy the data into the buffer.
6268 */
6269 switch (page_code) {
6270 case SMS_ALL_PAGES_PAGE: {
6271 int i;
6272
6273 page_len = 0;
6274
6275 /*
6276 * At the moment, values other than 0 and 0xff here are
6277 * reserved according to SPC-3.
6278 */
6279 if ((subpage != SMS_SUBPAGE_PAGE_0)
6280 && (subpage != SMS_SUBPAGE_ALL)) {
6281 ctl_set_invalid_field(ctsio,
6282 /*sks_valid*/ 1,
6283 /*command*/ 1,
6284 /*field*/ 3,
6285 /*bit_valid*/ 0,
6286 /*bit*/ 0);
6287 ctl_done((union ctl_io *)ctsio);
6288 return (CTL_RETVAL_COMPLETE);
6289 }
6290
6291 for (i = 0; i < CTL_NUM_MODE_PAGES; i++) {
6292 if ((control_dev != 0)
6293 && (lun->mode_pages.index[i].page_flags &
6294 CTL_PAGE_FLAG_DISK_ONLY))
6295 continue;
6296
6297 /*
6298 * We don't use this subpage if the user didn't
6299 * request all subpages.
6300 */
6301 if ((lun->mode_pages.index[i].subpage != 0)
6302 && (subpage == SMS_SUBPAGE_PAGE_0))
6303 continue;
6304
6305#if 0
6306 printf("found page %#x len %d\n",
6307 lun->mode_pages.index[i].page_code &
6308 SMPH_PC_MASK,
6309 lun->mode_pages.index[i].page_len);
6310#endif
6311 page_len += lun->mode_pages.index[i].page_len;
6312 }
6313 break;
6314 }
6315 default: {
6316 int i;
6317
6318 page_len = 0;
6319
6320 for (i = 0; i < CTL_NUM_MODE_PAGES; i++) {
6321 /* Look for the right page code */
6322 if ((lun->mode_pages.index[i].page_code &
6323 SMPH_PC_MASK) != page_code)
6324 continue;
6325
6326 /* Look for the right subpage or the subpage wildcard*/
6327 if ((lun->mode_pages.index[i].subpage != subpage)
6328 && (subpage != SMS_SUBPAGE_ALL))
6329 continue;
6330
6331 /* Make sure the page is supported for this dev type */
6332 if ((control_dev != 0)
6333 && (lun->mode_pages.index[i].page_flags &
6334 CTL_PAGE_FLAG_DISK_ONLY))
6335 continue;
6336
6337#if 0
6338 printf("found page %#x len %d\n",
6339 lun->mode_pages.index[i].page_code &
6340 SMPH_PC_MASK,
6341 lun->mode_pages.index[i].page_len);
6342#endif
6343
6344 page_len += lun->mode_pages.index[i].page_len;
6345 }
6346
6347 if (page_len == 0) {
6348 ctl_set_invalid_field(ctsio,
6349 /*sks_valid*/ 1,
6350 /*command*/ 1,
6351 /*field*/ 2,
6352 /*bit_valid*/ 1,
6353 /*bit*/ 5);
6354 ctl_done((union ctl_io *)ctsio);
6355 return (CTL_RETVAL_COMPLETE);
6356 }
6357 break;
6358 }
6359 }
6360
6361 total_len = header_len + page_len;
6362#if 0
6363 printf("header_len = %d, page_len = %d, total_len = %d\n",
6364 header_len, page_len, total_len);
6365#endif
6366
6367 ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO);
6368 ctsio->kern_sg_entries = 0;
6369 ctsio->kern_data_resid = 0;
6370 ctsio->kern_rel_offset = 0;
6371 if (total_len < alloc_len) {
6372 ctsio->residual = alloc_len - total_len;
6373 ctsio->kern_data_len = total_len;
6374 ctsio->kern_total_len = total_len;
6375 } else {
6376 ctsio->residual = 0;
6377 ctsio->kern_data_len = alloc_len;
6378 ctsio->kern_total_len = alloc_len;
6379 }
6380
6381 switch (ctsio->cdb[0]) {
6382 case MODE_SENSE_6: {
6383 struct scsi_mode_hdr_6 *header;
6384
6385 header = (struct scsi_mode_hdr_6 *)ctsio->kern_data_ptr;
6386
6387 header->datalen = MIN(total_len - 1, 254);
6388 if (control_dev == 0) {
6389 header->dev_specific = 0x10; /* DPOFUA */
6390 if ((lun->be_lun->flags & CTL_LUN_FLAG_READONLY) ||
6391 (lun->mode_pages.control_page[CTL_PAGE_CURRENT]
6392 .eca_and_aen & SCP_SWP) != 0)
6393 header->dev_specific |= 0x80; /* WP */
6394 }
6395 if (dbd)
6396 header->block_descr_len = 0;
6397 else
6398 header->block_descr_len =
6399 sizeof(struct scsi_mode_block_descr);
6400 block_desc = (struct scsi_mode_block_descr *)&header[1];
6401 break;
6402 }
6403 case MODE_SENSE_10: {
6404 struct scsi_mode_hdr_10 *header;
6405 int datalen;
6406
6407 header = (struct scsi_mode_hdr_10 *)ctsio->kern_data_ptr;
6408
6409 datalen = MIN(total_len - 2, 65533);
6410 scsi_ulto2b(datalen, header->datalen);
6411 if (control_dev == 0) {
6412 header->dev_specific = 0x10; /* DPOFUA */
6413 if ((lun->be_lun->flags & CTL_LUN_FLAG_READONLY) ||
6414 (lun->mode_pages.control_page[CTL_PAGE_CURRENT]
6415 .eca_and_aen & SCP_SWP) != 0)
6416 header->dev_specific |= 0x80; /* WP */
6417 }
6418 if (dbd)
6419 scsi_ulto2b(0, header->block_descr_len);
6420 else
6421 scsi_ulto2b(sizeof(struct scsi_mode_block_descr),
6422 header->block_descr_len);
6423 block_desc = (struct scsi_mode_block_descr *)&header[1];
6424 break;
6425 }
6426 default:
6427 panic("invalid CDB type %#x", ctsio->cdb[0]);
6428 break; /* NOTREACHED */
6429 }
6430
6431 /*
6432 * If we've got a disk, use its blocksize in the block
6433 * descriptor. Otherwise, just set it to 0.
6434 */
6435 if (dbd == 0) {
6436 if (control_dev == 0)
6437 scsi_ulto3b(lun->be_lun->blocksize,
6438 block_desc->block_len);
6439 else
6440 scsi_ulto3b(0, block_desc->block_len);
6441 }
6442
6443 switch (page_code) {
6444 case SMS_ALL_PAGES_PAGE: {
6445 int i, data_used;
6446
6447 data_used = header_len;
6448 for (i = 0; i < CTL_NUM_MODE_PAGES; i++) {
6449 struct ctl_page_index *page_index;
6450
6451 page_index = &lun->mode_pages.index[i];
6452
6453 if ((control_dev != 0)
6454 && (page_index->page_flags &
6455 CTL_PAGE_FLAG_DISK_ONLY))
6456 continue;
6457
6458 /*
6459 * We don't use this subpage if the user didn't
6460 * request all subpages. We already checked (above)
6461 * to make sure the user only specified a subpage
6462 * of 0 or 0xff in the SMS_ALL_PAGES_PAGE case.
6463 */
6464 if ((page_index->subpage != 0)
6465 && (subpage == SMS_SUBPAGE_PAGE_0))
6466 continue;
6467
6468 /*
6469 * Call the handler, if it exists, to update the
6470 * page to the latest values.
6471 */
6472 if (page_index->sense_handler != NULL)
6473 page_index->sense_handler(ctsio, page_index,pc);
6474
6475 memcpy(ctsio->kern_data_ptr + data_used,
6476 page_index->page_data +
6477 (page_index->page_len * pc),
6478 page_index->page_len);
6479 data_used += page_index->page_len;
6480 }
6481 break;
6482 }
6483 default: {
6484 int i, data_used;
6485
6486 data_used = header_len;
6487
6488 for (i = 0; i < CTL_NUM_MODE_PAGES; i++) {
6489 struct ctl_page_index *page_index;
6490
6491 page_index = &lun->mode_pages.index[i];
6492
6493 /* Look for the right page code */
6494 if ((page_index->page_code & SMPH_PC_MASK) != page_code)
6495 continue;
6496
6497 /* Look for the right subpage or the subpage wildcard*/
6498 if ((page_index->subpage != subpage)
6499 && (subpage != SMS_SUBPAGE_ALL))
6500 continue;
6501
6502 /* Make sure the page is supported for this dev type */
6503 if ((control_dev != 0)
6504 && (page_index->page_flags &
6505 CTL_PAGE_FLAG_DISK_ONLY))
6506 continue;
6507
6508 /*
6509 * Call the handler, if it exists, to update the
6510 * page to the latest values.
6511 */
6512 if (page_index->sense_handler != NULL)
6513 page_index->sense_handler(ctsio, page_index,pc);
6514
6515 memcpy(ctsio->kern_data_ptr + data_used,
6516 page_index->page_data +
6517 (page_index->page_len * pc),
6518 page_index->page_len);
6519 data_used += page_index->page_len;
6520 }
6521 break;
6522 }
6523 }
6524
6525 ctl_set_success(ctsio);
6526 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
6527 ctsio->be_move_done = ctl_config_move_done;
6528 ctl_datamove((union ctl_io *)ctsio);
6529 return (CTL_RETVAL_COMPLETE);
6530}
6531
6532int
6533ctl_lbp_log_sense_handler(struct ctl_scsiio *ctsio,
6534 struct ctl_page_index *page_index,
6535 int pc)
6536{
6537 struct ctl_lun *lun;
6538 struct scsi_log_param_header *phdr;
6539 uint8_t *data;
6540 uint64_t val;
6541
6542 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
6543 data = page_index->page_data;
6544
6545 if (lun->backend->lun_attr != NULL &&
6546 (val = lun->backend->lun_attr(lun->be_lun->be_lun, "blocksavail"))
6547 != UINT64_MAX) {
6548 phdr = (struct scsi_log_param_header *)data;
6549 scsi_ulto2b(0x0001, phdr->param_code);
6550 phdr->param_control = SLP_LBIN | SLP_LP;
6551 phdr->param_len = 8;
6552 data = (uint8_t *)(phdr + 1);
6553 scsi_ulto4b(val >> CTL_LBP_EXPONENT, data);
6554 data[4] = 0x02; /* per-pool */
6555 data += phdr->param_len;
6556 }
6557
6558 if (lun->backend->lun_attr != NULL &&
6559 (val = lun->backend->lun_attr(lun->be_lun->be_lun, "blocksused"))
6560 != UINT64_MAX) {
6561 phdr = (struct scsi_log_param_header *)data;
6562 scsi_ulto2b(0x0002, phdr->param_code);
6563 phdr->param_control = SLP_LBIN | SLP_LP;
6564 phdr->param_len = 8;
6565 data = (uint8_t *)(phdr + 1);
6566 scsi_ulto4b(val >> CTL_LBP_EXPONENT, data);
6567 data[4] = 0x01; /* per-LUN */
6568 data += phdr->param_len;
6569 }
6570
6571 if (lun->backend->lun_attr != NULL &&
6572 (val = lun->backend->lun_attr(lun->be_lun->be_lun, "poolblocksavail"))
6573 != UINT64_MAX) {
6574 phdr = (struct scsi_log_param_header *)data;
6575 scsi_ulto2b(0x00f1, phdr->param_code);
6576 phdr->param_control = SLP_LBIN | SLP_LP;
6577 phdr->param_len = 8;
6578 data = (uint8_t *)(phdr + 1);
6579 scsi_ulto4b(val >> CTL_LBP_EXPONENT, data);
6580 data[4] = 0x02; /* per-pool */
6581 data += phdr->param_len;
6582 }
6583
6584 if (lun->backend->lun_attr != NULL &&
6585 (val = lun->backend->lun_attr(lun->be_lun->be_lun, "poolblocksused"))
6586 != UINT64_MAX) {
6587 phdr = (struct scsi_log_param_header *)data;
6588 scsi_ulto2b(0x00f2, phdr->param_code);
6589 phdr->param_control = SLP_LBIN | SLP_LP;
6590 phdr->param_len = 8;
6591 data = (uint8_t *)(phdr + 1);
6592 scsi_ulto4b(val >> CTL_LBP_EXPONENT, data);
6593 data[4] = 0x02; /* per-pool */
6594 data += phdr->param_len;
6595 }
6596
6597 page_index->page_len = data - page_index->page_data;
6598 return (0);
6599}
6600
6601int
6602ctl_sap_log_sense_handler(struct ctl_scsiio *ctsio,
6603 struct ctl_page_index *page_index,
6604 int pc)
6605{
6606 struct ctl_lun *lun;
6607 struct stat_page *data;
6608 uint64_t rn, wn, rb, wb;
6609 struct bintime rt, wt;
6610 int i;
6611
6612 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
6613 data = (struct stat_page *)page_index->page_data;
6614
6615 scsi_ulto2b(SLP_SAP, data->sap.hdr.param_code);
6616 data->sap.hdr.param_control = SLP_LBIN;
6617 data->sap.hdr.param_len = sizeof(struct scsi_log_stat_and_perf) -
6618 sizeof(struct scsi_log_param_header);
6619 rn = wn = rb = wb = 0;
6620 bintime_clear(&rt);
6621 bintime_clear(&wt);
6622 for (i = 0; i < CTL_MAX_PORTS; i++) {
6623 rn += lun->stats.ports[i].operations[CTL_STATS_READ];
6624 wn += lun->stats.ports[i].operations[CTL_STATS_WRITE];
6625 rb += lun->stats.ports[i].bytes[CTL_STATS_READ];
6626 wb += lun->stats.ports[i].bytes[CTL_STATS_WRITE];
6627 bintime_add(&rt, &lun->stats.ports[i].time[CTL_STATS_READ]);
6628 bintime_add(&wt, &lun->stats.ports[i].time[CTL_STATS_WRITE]);
6629 }
6630 scsi_u64to8b(rn, data->sap.read_num);
6631 scsi_u64to8b(wn, data->sap.write_num);
6632 if (lun->stats.blocksize > 0) {
6633 scsi_u64to8b(wb / lun->stats.blocksize,
6634 data->sap.recvieved_lba);
6635 scsi_u64to8b(rb / lun->stats.blocksize,
6636 data->sap.transmitted_lba);
6637 }
6638 scsi_u64to8b((uint64_t)rt.sec * 1000 + rt.frac / (UINT64_MAX / 1000),
6639 data->sap.read_int);
6640 scsi_u64to8b((uint64_t)wt.sec * 1000 + wt.frac / (UINT64_MAX / 1000),
6641 data->sap.write_int);
6642 scsi_u64to8b(0, data->sap.weighted_num);
6643 scsi_u64to8b(0, data->sap.weighted_int);
6644 scsi_ulto2b(SLP_IT, data->it.hdr.param_code);
6645 data->it.hdr.param_control = SLP_LBIN;
6646 data->it.hdr.param_len = sizeof(struct scsi_log_idle_time) -
6647 sizeof(struct scsi_log_param_header);
6648#ifdef CTL_TIME_IO
6649 scsi_u64to8b(lun->idle_time / SBT_1MS, data->it.idle_int);
6650#endif
6651 scsi_ulto2b(SLP_TI, data->ti.hdr.param_code);
6652 data->it.hdr.param_control = SLP_LBIN;
6653 data->ti.hdr.param_len = sizeof(struct scsi_log_time_interval) -
6654 sizeof(struct scsi_log_param_header);
6655 scsi_ulto4b(3, data->ti.exponent);
6656 scsi_ulto4b(1, data->ti.integer);
6657
6658 page_index->page_len = sizeof(*data);
6659 return (0);
6660}
6661
6662int
6663ctl_log_sense(struct ctl_scsiio *ctsio)
6664{
6665 struct ctl_lun *lun;
6666 int i, pc, page_code, subpage;
6667 int alloc_len, total_len;
6668 struct ctl_page_index *page_index;
6669 struct scsi_log_sense *cdb;
6670 struct scsi_log_header *header;
6671
6672 CTL_DEBUG_PRINT(("ctl_log_sense\n"));
6673
6674 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
6675 cdb = (struct scsi_log_sense *)ctsio->cdb;
6676 pc = (cdb->page & SLS_PAGE_CTRL_MASK) >> 6;
6677 page_code = cdb->page & SLS_PAGE_CODE;
6678 subpage = cdb->subpage;
6679 alloc_len = scsi_2btoul(cdb->length);
6680
6681 page_index = NULL;
6682 for (i = 0; i < CTL_NUM_LOG_PAGES; i++) {
6683 page_index = &lun->log_pages.index[i];
6684
6685 /* Look for the right page code */
6686 if ((page_index->page_code & SL_PAGE_CODE) != page_code)
6687 continue;
6688
6689 /* Look for the right subpage or the subpage wildcard*/
6690 if (page_index->subpage != subpage)
6691 continue;
6692
6693 break;
6694 }
6695 if (i >= CTL_NUM_LOG_PAGES) {
6696 ctl_set_invalid_field(ctsio,
6697 /*sks_valid*/ 1,
6698 /*command*/ 1,
6699 /*field*/ 2,
6700 /*bit_valid*/ 0,
6701 /*bit*/ 0);
6702 ctl_done((union ctl_io *)ctsio);
6703 return (CTL_RETVAL_COMPLETE);
6704 }
6705
6706 total_len = sizeof(struct scsi_log_header) + page_index->page_len;
6707
6708 ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO);
6709 ctsio->kern_sg_entries = 0;
6710 ctsio->kern_data_resid = 0;
6711 ctsio->kern_rel_offset = 0;
6712 if (total_len < alloc_len) {
6713 ctsio->residual = alloc_len - total_len;
6714 ctsio->kern_data_len = total_len;
6715 ctsio->kern_total_len = total_len;
6716 } else {
6717 ctsio->residual = 0;
6718 ctsio->kern_data_len = alloc_len;
6719 ctsio->kern_total_len = alloc_len;
6720 }
6721
6722 header = (struct scsi_log_header *)ctsio->kern_data_ptr;
6723 header->page = page_index->page_code;
6724 if (page_index->subpage) {
6725 header->page |= SL_SPF;
6726 header->subpage = page_index->subpage;
6727 }
6728 scsi_ulto2b(page_index->page_len, header->datalen);
6729
6730 /*
6731 * Call the handler, if it exists, to update the
6732 * page to the latest values.
6733 */
6734 if (page_index->sense_handler != NULL)
6735 page_index->sense_handler(ctsio, page_index, pc);
6736
6737 memcpy(header + 1, page_index->page_data, page_index->page_len);
6738
6739 ctl_set_success(ctsio);
6740 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
6741 ctsio->be_move_done = ctl_config_move_done;
6742 ctl_datamove((union ctl_io *)ctsio);
6743 return (CTL_RETVAL_COMPLETE);
6744}
6745
6746int
6747ctl_read_capacity(struct ctl_scsiio *ctsio)
6748{
6749 struct scsi_read_capacity *cdb;
6750 struct scsi_read_capacity_data *data;
6751 struct ctl_lun *lun;
6752 uint32_t lba;
6753
6754 CTL_DEBUG_PRINT(("ctl_read_capacity\n"));
6755
6756 cdb = (struct scsi_read_capacity *)ctsio->cdb;
6757
6758 lba = scsi_4btoul(cdb->addr);
6759 if (((cdb->pmi & SRC_PMI) == 0)
6760 && (lba != 0)) {
6761 ctl_set_invalid_field(/*ctsio*/ ctsio,
6762 /*sks_valid*/ 1,
6763 /*command*/ 1,
6764 /*field*/ 2,
6765 /*bit_valid*/ 0,
6766 /*bit*/ 0);
6767 ctl_done((union ctl_io *)ctsio);
6768 return (CTL_RETVAL_COMPLETE);
6769 }
6770
6771 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
6772
6773 ctsio->kern_data_ptr = malloc(sizeof(*data), M_CTL, M_WAITOK | M_ZERO);
6774 data = (struct scsi_read_capacity_data *)ctsio->kern_data_ptr;
6775 ctsio->residual = 0;
6776 ctsio->kern_data_len = sizeof(*data);
6777 ctsio->kern_total_len = sizeof(*data);
6778 ctsio->kern_data_resid = 0;
6779 ctsio->kern_rel_offset = 0;
6780 ctsio->kern_sg_entries = 0;
6781
6782 /*
6783 * If the maximum LBA is greater than 0xfffffffe, the user must
6784 * issue a SERVICE ACTION IN (16) command, with the read capacity
6785 * serivce action set.
6786 */
6787 if (lun->be_lun->maxlba > 0xfffffffe)
6788 scsi_ulto4b(0xffffffff, data->addr);
6789 else
6790 scsi_ulto4b(lun->be_lun->maxlba, data->addr);
6791
6792 /*
6793 * XXX KDM this may not be 512 bytes...
6794 */
6795 scsi_ulto4b(lun->be_lun->blocksize, data->length);
6796
6797 ctl_set_success(ctsio);
6798 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
6799 ctsio->be_move_done = ctl_config_move_done;
6800 ctl_datamove((union ctl_io *)ctsio);
6801 return (CTL_RETVAL_COMPLETE);
6802}
6803
6804int
6805ctl_read_capacity_16(struct ctl_scsiio *ctsio)
6806{
6807 struct scsi_read_capacity_16 *cdb;
6808 struct scsi_read_capacity_data_long *data;
6809 struct ctl_lun *lun;
6810 uint64_t lba;
6811 uint32_t alloc_len;
6812
6813 CTL_DEBUG_PRINT(("ctl_read_capacity_16\n"));
6814
6815 cdb = (struct scsi_read_capacity_16 *)ctsio->cdb;
6816
6817 alloc_len = scsi_4btoul(cdb->alloc_len);
6818 lba = scsi_8btou64(cdb->addr);
6819
6820 if ((cdb->reladr & SRC16_PMI)
6821 && (lba != 0)) {
6822 ctl_set_invalid_field(/*ctsio*/ ctsio,
6823 /*sks_valid*/ 1,
6824 /*command*/ 1,
6825 /*field*/ 2,
6826 /*bit_valid*/ 0,
6827 /*bit*/ 0);
6828 ctl_done((union ctl_io *)ctsio);
6829 return (CTL_RETVAL_COMPLETE);
6830 }
6831
6832 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
6833
6834 ctsio->kern_data_ptr = malloc(sizeof(*data), M_CTL, M_WAITOK | M_ZERO);
6835 data = (struct scsi_read_capacity_data_long *)ctsio->kern_data_ptr;
6836
6837 if (sizeof(*data) < alloc_len) {
6838 ctsio->residual = alloc_len - sizeof(*data);
6839 ctsio->kern_data_len = sizeof(*data);
6840 ctsio->kern_total_len = sizeof(*data);
6841 } else {
6842 ctsio->residual = 0;
6843 ctsio->kern_data_len = alloc_len;
6844 ctsio->kern_total_len = alloc_len;
6845 }
6846 ctsio->kern_data_resid = 0;
6847 ctsio->kern_rel_offset = 0;
6848 ctsio->kern_sg_entries = 0;
6849
6850 scsi_u64to8b(lun->be_lun->maxlba, data->addr);
6851 /* XXX KDM this may not be 512 bytes... */
6852 scsi_ulto4b(lun->be_lun->blocksize, data->length);
6853 data->prot_lbppbe = lun->be_lun->pblockexp & SRC16_LBPPBE;
6854 scsi_ulto2b(lun->be_lun->pblockoff & SRC16_LALBA_A, data->lalba_lbp);
6855 if (lun->be_lun->flags & CTL_LUN_FLAG_UNMAP)
6856 data->lalba_lbp[0] |= SRC16_LBPME | SRC16_LBPRZ;
6857
6858 ctl_set_success(ctsio);
6859 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
6860 ctsio->be_move_done = ctl_config_move_done;
6861 ctl_datamove((union ctl_io *)ctsio);
6862 return (CTL_RETVAL_COMPLETE);
6863}
6864
6865int
6866ctl_get_lba_status(struct ctl_scsiio *ctsio)
6867{
6868 struct scsi_get_lba_status *cdb;
6869 struct scsi_get_lba_status_data *data;
6870 struct ctl_lun *lun;
6871 struct ctl_lba_len_flags *lbalen;
6872 uint64_t lba;
6873 uint32_t alloc_len, total_len;
6874 int retval;
6875
6876 CTL_DEBUG_PRINT(("ctl_get_lba_status\n"));
6877
6878 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
6879 cdb = (struct scsi_get_lba_status *)ctsio->cdb;
6880 lba = scsi_8btou64(cdb->addr);
6881 alloc_len = scsi_4btoul(cdb->alloc_len);
6882
6883 if (lba > lun->be_lun->maxlba) {
6884 ctl_set_lba_out_of_range(ctsio);
6885 ctl_done((union ctl_io *)ctsio);
6886 return (CTL_RETVAL_COMPLETE);
6887 }
6888
6889 total_len = sizeof(*data) + sizeof(data->descr[0]);
6890 ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO);
6891 data = (struct scsi_get_lba_status_data *)ctsio->kern_data_ptr;
6892
6893 if (total_len < alloc_len) {
6894 ctsio->residual = alloc_len - total_len;
6895 ctsio->kern_data_len = total_len;
6896 ctsio->kern_total_len = total_len;
6897 } else {
6898 ctsio->residual = 0;
6899 ctsio->kern_data_len = alloc_len;
6900 ctsio->kern_total_len = alloc_len;
6901 }
6902 ctsio->kern_data_resid = 0;
6903 ctsio->kern_rel_offset = 0;
6904 ctsio->kern_sg_entries = 0;
6905
6906 /* Fill dummy data in case backend can't tell anything. */
6907 scsi_ulto4b(4 + sizeof(data->descr[0]), data->length);
6908 scsi_u64to8b(lba, data->descr[0].addr);
6909 scsi_ulto4b(MIN(UINT32_MAX, lun->be_lun->maxlba + 1 - lba),
6910 data->descr[0].length);
6911 data->descr[0].status = 0; /* Mapped or unknown. */
6912
6913 ctl_set_success(ctsio);
6914 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
6915 ctsio->be_move_done = ctl_config_move_done;
6916
6917 lbalen = (struct ctl_lba_len_flags *)&ctsio->io_hdr.ctl_private[CTL_PRIV_LBA_LEN];
6918 lbalen->lba = lba;
6919 lbalen->len = total_len;
6920 lbalen->flags = 0;
6921 retval = lun->backend->config_read((union ctl_io *)ctsio);
6922 return (CTL_RETVAL_COMPLETE);
6923}
6924
6925int
6926ctl_read_defect(struct ctl_scsiio *ctsio)
6927{
6928 struct scsi_read_defect_data_10 *ccb10;
6929 struct scsi_read_defect_data_12 *ccb12;
6930 struct scsi_read_defect_data_hdr_10 *data10;
6931 struct scsi_read_defect_data_hdr_12 *data12;
6932 uint32_t alloc_len, data_len;
6933 uint8_t format;
6934
6935 CTL_DEBUG_PRINT(("ctl_read_defect\n"));
6936
6937 if (ctsio->cdb[0] == READ_DEFECT_DATA_10) {
6938 ccb10 = (struct scsi_read_defect_data_10 *)&ctsio->cdb;
6939 format = ccb10->format;
6940 alloc_len = scsi_2btoul(ccb10->alloc_length);
6941 data_len = sizeof(*data10);
6942 } else {
6943 ccb12 = (struct scsi_read_defect_data_12 *)&ctsio->cdb;
6944 format = ccb12->format;
6945 alloc_len = scsi_4btoul(ccb12->alloc_length);
6946 data_len = sizeof(*data12);
6947 }
6948 if (alloc_len == 0) {
6949 ctl_set_success(ctsio);
6950 ctl_done((union ctl_io *)ctsio);
6951 return (CTL_RETVAL_COMPLETE);
6952 }
6953
6954 ctsio->kern_data_ptr = malloc(data_len, M_CTL, M_WAITOK | M_ZERO);
6955 if (data_len < alloc_len) {
6956 ctsio->residual = alloc_len - data_len;
6957 ctsio->kern_data_len = data_len;
6958 ctsio->kern_total_len = data_len;
6959 } else {
6960 ctsio->residual = 0;
6961 ctsio->kern_data_len = alloc_len;
6962 ctsio->kern_total_len = alloc_len;
6963 }
6964 ctsio->kern_data_resid = 0;
6965 ctsio->kern_rel_offset = 0;
6966 ctsio->kern_sg_entries = 0;
6967
6968 if (ctsio->cdb[0] == READ_DEFECT_DATA_10) {
6969 data10 = (struct scsi_read_defect_data_hdr_10 *)
6970 ctsio->kern_data_ptr;
6971 data10->format = format;
6972 scsi_ulto2b(0, data10->length);
6973 } else {
6974 data12 = (struct scsi_read_defect_data_hdr_12 *)
6975 ctsio->kern_data_ptr;
6976 data12->format = format;
6977 scsi_ulto2b(0, data12->generation);
6978 scsi_ulto4b(0, data12->length);
6979 }
6980
6981 ctl_set_success(ctsio);
6982 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
6983 ctsio->be_move_done = ctl_config_move_done;
6984 ctl_datamove((union ctl_io *)ctsio);
6985 return (CTL_RETVAL_COMPLETE);
6986}
6987
6988int
6989ctl_report_tagret_port_groups(struct ctl_scsiio *ctsio)
6990{
6991 struct scsi_maintenance_in *cdb;
6992 int retval;
6856 int alloc_len, ext, total_len = 0, g, p, pc, pg, gs, os;
6993 int alloc_len, ext, total_len = 0, g, pc, pg, gs, os;
6994 int num_target_port_groups, num_target_ports;
6995 struct ctl_lun *lun;
6996 struct ctl_softc *softc;
6997 struct ctl_port *port;
6998 struct scsi_target_group_data *rtg_ptr;
6999 struct scsi_target_group_data_extended *rtg_ext_ptr;
7000 struct scsi_target_port_group_descriptor *tpg_desc;
7001
7002 CTL_DEBUG_PRINT(("ctl_report_tagret_port_groups\n"));
7003
7004 cdb = (struct scsi_maintenance_in *)ctsio->cdb;
7005 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
7006 softc = lun->ctl_softc;
7007
7008 retval = CTL_RETVAL_COMPLETE;
7009
7010 switch (cdb->byte2 & STG_PDF_MASK) {
7011 case STG_PDF_LENGTH:
7012 ext = 0;
7013 break;
7014 case STG_PDF_EXTENDED:
7015 ext = 1;
7016 break;
7017 default:
7018 ctl_set_invalid_field(/*ctsio*/ ctsio,
7019 /*sks_valid*/ 1,
7020 /*command*/ 1,
7021 /*field*/ 2,
7022 /*bit_valid*/ 1,
7023 /*bit*/ 5);
7024 ctl_done((union ctl_io *)ctsio);
7025 return(retval);
7026 }
7027
7028 if (softc->is_single)
7029 num_target_port_groups = 1;
7030 else
7031 num_target_port_groups = NUM_TARGET_PORT_GROUPS;
7032 num_target_ports = 0;
7033 mtx_lock(&softc->ctl_lock);
7034 STAILQ_FOREACH(port, &softc->port_list, links) {
7035 if ((port->status & CTL_PORT_STATUS_ONLINE) == 0)
7036 continue;
7037 if (ctl_lun_map_to_port(port, lun->lun) >= CTL_MAX_LUNS)
7038 continue;
7039 num_target_ports++;
7040 }
7041 mtx_unlock(&softc->ctl_lock);
7042
7043 if (ext)
7044 total_len = sizeof(struct scsi_target_group_data_extended);
7045 else
7046 total_len = sizeof(struct scsi_target_group_data);
7047 total_len += sizeof(struct scsi_target_port_group_descriptor) *
7048 num_target_port_groups +
6912 sizeof(struct scsi_target_port_descriptor) *
6913 num_target_ports * num_target_port_groups;
7049 sizeof(struct scsi_target_port_descriptor) * num_target_ports;
7050
7051 alloc_len = scsi_4btoul(cdb->length);
7052
7053 ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO);
7054
7055 ctsio->kern_sg_entries = 0;
7056
7057 if (total_len < alloc_len) {
7058 ctsio->residual = alloc_len - total_len;
7059 ctsio->kern_data_len = total_len;
7060 ctsio->kern_total_len = total_len;
7061 } else {
7062 ctsio->residual = 0;
7063 ctsio->kern_data_len = alloc_len;
7064 ctsio->kern_total_len = alloc_len;
7065 }
7066 ctsio->kern_data_resid = 0;
7067 ctsio->kern_rel_offset = 0;
7068
7069 if (ext) {
7070 rtg_ext_ptr = (struct scsi_target_group_data_extended *)
7071 ctsio->kern_data_ptr;
7072 scsi_ulto4b(total_len - 4, rtg_ext_ptr->length);
7073 rtg_ext_ptr->format_type = 0x10;
7074 rtg_ext_ptr->implicit_transition_time = 0;
7075 tpg_desc = &rtg_ext_ptr->groups[0];
7076 } else {
7077 rtg_ptr = (struct scsi_target_group_data *)
7078 ctsio->kern_data_ptr;
7079 scsi_ulto4b(total_len - 4, rtg_ptr->length);
7080 tpg_desc = &rtg_ptr->groups[0];
7081 }
7082
7083 mtx_lock(&softc->ctl_lock);
6948 pg = softc->port_offset / CTL_MAX_PORTS;
6949 if (softc->flags & CTL_FLAG_ACTIVE_SHELF) {
6950 if (softc->ha_mode == CTL_HA_MODE_ACT_STBY) {
6951 gs = TPG_ASYMMETRIC_ACCESS_OPTIMIZED;
6952 os = TPG_ASYMMETRIC_ACCESS_STANDBY;
6953 } else if (lun->flags & CTL_LUN_PRIMARY_SC) {
6954 gs = TPG_ASYMMETRIC_ACCESS_OPTIMIZED;
6955 os = TPG_ASYMMETRIC_ACCESS_NONOPTIMIZED;
6956 } else {
6957 gs = TPG_ASYMMETRIC_ACCESS_NONOPTIMIZED;
6958 os = TPG_ASYMMETRIC_ACCESS_OPTIMIZED;
6959 }
6960 } else {
7084 pg = softc->port_min / softc->port_cnt;
7085 if (softc->ha_link == CTL_HA_LINK_OFFLINE)
7086 gs = TPG_ASYMMETRIC_ACCESS_UNAVAILABLE;
7087 else if (softc->ha_link == CTL_HA_LINK_UNKNOWN)
7088 gs = TPG_ASYMMETRIC_ACCESS_TRANSITIONING;
7089 else if (softc->ha_mode == CTL_HA_MODE_ACT_STBY)
7090 gs = TPG_ASYMMETRIC_ACCESS_STANDBY;
7091 else
7092 gs = TPG_ASYMMETRIC_ACCESS_NONOPTIMIZED;
7093 if (lun->flags & CTL_LUN_PRIMARY_SC) {
7094 os = gs;
7095 gs = TPG_ASYMMETRIC_ACCESS_OPTIMIZED;
7096 } else
7097 os = TPG_ASYMMETRIC_ACCESS_OPTIMIZED;
6963 }
7098 for (g = 0; g < num_target_port_groups; g++) {
7099 tpg_desc->pref_state = (g == pg) ? gs : os;
6966 tpg_desc->support = TPG_AO_SUP | TPG_AN_SUP | TPG_S_SUP;
7100 tpg_desc->support = TPG_AO_SUP | TPG_AN_SUP | TPG_S_SUP |
7101 TPG_U_SUP | TPG_T_SUP;
7102 scsi_ulto2b(g + 1, tpg_desc->target_port_group);
7103 tpg_desc->status = TPG_IMPLICIT;
7104 pc = 0;
7105 STAILQ_FOREACH(port, &softc->port_list, links) {
7106 if (port->targ_port < g * softc->port_cnt ||
7107 port->targ_port >= (g + 1) * softc->port_cnt)
7108 continue;
7109 if ((port->status & CTL_PORT_STATUS_ONLINE) == 0)
7110 continue;
7111 if (ctl_lun_map_to_port(port, lun->lun) >= CTL_MAX_LUNS)
7112 continue;
6975 p = port->targ_port % CTL_MAX_PORTS + g * CTL_MAX_PORTS;
6976 scsi_ulto2b(p, tpg_desc->descriptors[pc].
7113 scsi_ulto2b(port->targ_port, tpg_desc->descriptors[pc].
7114 relative_target_port_identifier);
7115 pc++;
7116 }
7117 tpg_desc->target_port_count = pc;
7118 tpg_desc = (struct scsi_target_port_group_descriptor *)
7119 &tpg_desc->descriptors[pc];
7120 }
7121 mtx_unlock(&softc->ctl_lock);
7122
7123 ctl_set_success(ctsio);
7124 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
7125 ctsio->be_move_done = ctl_config_move_done;
7126 ctl_datamove((union ctl_io *)ctsio);
7127 return(retval);
7128}
7129
7130int
7131ctl_report_supported_opcodes(struct ctl_scsiio *ctsio)
7132{
7133 struct ctl_lun *lun;
7134 struct scsi_report_supported_opcodes *cdb;
7135 const struct ctl_cmd_entry *entry, *sentry;
7136 struct scsi_report_supported_opcodes_all *all;
7137 struct scsi_report_supported_opcodes_descr *descr;
7138 struct scsi_report_supported_opcodes_one *one;
7139 int retval;
7140 int alloc_len, total_len;
7141 int opcode, service_action, i, j, num;
7142
7143 CTL_DEBUG_PRINT(("ctl_report_supported_opcodes\n"));
7144
7145 cdb = (struct scsi_report_supported_opcodes *)ctsio->cdb;
7146 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
7147
7148 retval = CTL_RETVAL_COMPLETE;
7149
7150 opcode = cdb->requested_opcode;
7151 service_action = scsi_2btoul(cdb->requested_service_action);
7152 switch (cdb->options & RSO_OPTIONS_MASK) {
7153 case RSO_OPTIONS_ALL:
7154 num = 0;
7155 for (i = 0; i < 256; i++) {
7156 entry = &ctl_cmd_table[i];
7157 if (entry->flags & CTL_CMD_FLAG_SA5) {
7158 for (j = 0; j < 32; j++) {
7159 sentry = &((const struct ctl_cmd_entry *)
7160 entry->execute)[j];
7161 if (ctl_cmd_applicable(
7162 lun->be_lun->lun_type, sentry))
7163 num++;
7164 }
7165 } else {
7166 if (ctl_cmd_applicable(lun->be_lun->lun_type,
7167 entry))
7168 num++;
7169 }
7170 }
7171 total_len = sizeof(struct scsi_report_supported_opcodes_all) +
7172 num * sizeof(struct scsi_report_supported_opcodes_descr);
7173 break;
7174 case RSO_OPTIONS_OC:
7175 if (ctl_cmd_table[opcode].flags & CTL_CMD_FLAG_SA5) {
7176 ctl_set_invalid_field(/*ctsio*/ ctsio,
7177 /*sks_valid*/ 1,
7178 /*command*/ 1,
7179 /*field*/ 2,
7180 /*bit_valid*/ 1,
7181 /*bit*/ 2);
7182 ctl_done((union ctl_io *)ctsio);
7183 return (CTL_RETVAL_COMPLETE);
7184 }
7185 total_len = sizeof(struct scsi_report_supported_opcodes_one) + 32;
7186 break;
7187 case RSO_OPTIONS_OC_SA:
7188 if ((ctl_cmd_table[opcode].flags & CTL_CMD_FLAG_SA5) == 0 ||
7189 service_action >= 32) {
7190 ctl_set_invalid_field(/*ctsio*/ ctsio,
7191 /*sks_valid*/ 1,
7192 /*command*/ 1,
7193 /*field*/ 2,
7194 /*bit_valid*/ 1,
7195 /*bit*/ 2);
7196 ctl_done((union ctl_io *)ctsio);
7197 return (CTL_RETVAL_COMPLETE);
7198 }
7199 total_len = sizeof(struct scsi_report_supported_opcodes_one) + 32;
7200 break;
7201 default:
7202 ctl_set_invalid_field(/*ctsio*/ ctsio,
7203 /*sks_valid*/ 1,
7204 /*command*/ 1,
7205 /*field*/ 2,
7206 /*bit_valid*/ 1,
7207 /*bit*/ 2);
7208 ctl_done((union ctl_io *)ctsio);
7209 return (CTL_RETVAL_COMPLETE);
7210 }
7211
7212 alloc_len = scsi_4btoul(cdb->length);
7213
7214 ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO);
7215
7216 ctsio->kern_sg_entries = 0;
7217
7218 if (total_len < alloc_len) {
7219 ctsio->residual = alloc_len - total_len;
7220 ctsio->kern_data_len = total_len;
7221 ctsio->kern_total_len = total_len;
7222 } else {
7223 ctsio->residual = 0;
7224 ctsio->kern_data_len = alloc_len;
7225 ctsio->kern_total_len = alloc_len;
7226 }
7227 ctsio->kern_data_resid = 0;
7228 ctsio->kern_rel_offset = 0;
7229
7230 switch (cdb->options & RSO_OPTIONS_MASK) {
7231 case RSO_OPTIONS_ALL:
7232 all = (struct scsi_report_supported_opcodes_all *)
7233 ctsio->kern_data_ptr;
7234 num = 0;
7235 for (i = 0; i < 256; i++) {
7236 entry = &ctl_cmd_table[i];
7237 if (entry->flags & CTL_CMD_FLAG_SA5) {
7238 for (j = 0; j < 32; j++) {
7239 sentry = &((const struct ctl_cmd_entry *)
7240 entry->execute)[j];
7241 if (!ctl_cmd_applicable(
7242 lun->be_lun->lun_type, sentry))
7243 continue;
7244 descr = &all->descr[num++];
7245 descr->opcode = i;
7246 scsi_ulto2b(j, descr->service_action);
7247 descr->flags = RSO_SERVACTV;
7248 scsi_ulto2b(sentry->length,
7249 descr->cdb_length);
7250 }
7251 } else {
7252 if (!ctl_cmd_applicable(lun->be_lun->lun_type,
7253 entry))
7254 continue;
7255 descr = &all->descr[num++];
7256 descr->opcode = i;
7257 scsi_ulto2b(0, descr->service_action);
7258 descr->flags = 0;
7259 scsi_ulto2b(entry->length, descr->cdb_length);
7260 }
7261 }
7262 scsi_ulto4b(
7263 num * sizeof(struct scsi_report_supported_opcodes_descr),
7264 all->length);
7265 break;
7266 case RSO_OPTIONS_OC:
7267 one = (struct scsi_report_supported_opcodes_one *)
7268 ctsio->kern_data_ptr;
7269 entry = &ctl_cmd_table[opcode];
7270 goto fill_one;
7271 case RSO_OPTIONS_OC_SA:
7272 one = (struct scsi_report_supported_opcodes_one *)
7273 ctsio->kern_data_ptr;
7274 entry = &ctl_cmd_table[opcode];
7275 entry = &((const struct ctl_cmd_entry *)
7276 entry->execute)[service_action];
7277fill_one:
7278 if (ctl_cmd_applicable(lun->be_lun->lun_type, entry)) {
7279 one->support = 3;
7280 scsi_ulto2b(entry->length, one->cdb_length);
7281 one->cdb_usage[0] = opcode;
7282 memcpy(&one->cdb_usage[1], entry->usage,
7283 entry->length - 1);
7284 } else
7285 one->support = 1;
7286 break;
7287 }
7288
7289 ctl_set_success(ctsio);
7290 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
7291 ctsio->be_move_done = ctl_config_move_done;
7292 ctl_datamove((union ctl_io *)ctsio);
7293 return(retval);
7294}
7295
7296int
7297ctl_report_supported_tmf(struct ctl_scsiio *ctsio)
7298{
7299 struct scsi_report_supported_tmf *cdb;
7300 struct scsi_report_supported_tmf_data *data;
7301 int retval;
7302 int alloc_len, total_len;
7303
7304 CTL_DEBUG_PRINT(("ctl_report_supported_tmf\n"));
7305
7306 cdb = (struct scsi_report_supported_tmf *)ctsio->cdb;
7307
7308 retval = CTL_RETVAL_COMPLETE;
7309
7310 total_len = sizeof(struct scsi_report_supported_tmf_data);
7311 alloc_len = scsi_4btoul(cdb->length);
7312
7313 ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO);
7314
7315 ctsio->kern_sg_entries = 0;
7316
7317 if (total_len < alloc_len) {
7318 ctsio->residual = alloc_len - total_len;
7319 ctsio->kern_data_len = total_len;
7320 ctsio->kern_total_len = total_len;
7321 } else {
7322 ctsio->residual = 0;
7323 ctsio->kern_data_len = alloc_len;
7324 ctsio->kern_total_len = alloc_len;
7325 }
7326 ctsio->kern_data_resid = 0;
7327 ctsio->kern_rel_offset = 0;
7328
7329 data = (struct scsi_report_supported_tmf_data *)ctsio->kern_data_ptr;
7330 data->byte1 |= RST_ATS | RST_ATSS | RST_CTSS | RST_LURS | RST_TRS;
7331 data->byte2 |= RST_ITNRS;
7332
7333 ctl_set_success(ctsio);
7334 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
7335 ctsio->be_move_done = ctl_config_move_done;
7336 ctl_datamove((union ctl_io *)ctsio);
7337 return (retval);
7338}
7339
7340int
7341ctl_report_timestamp(struct ctl_scsiio *ctsio)
7342{
7343 struct scsi_report_timestamp *cdb;
7344 struct scsi_report_timestamp_data *data;
7345 struct timeval tv;
7346 int64_t timestamp;
7347 int retval;
7348 int alloc_len, total_len;
7349
7350 CTL_DEBUG_PRINT(("ctl_report_timestamp\n"));
7351
7352 cdb = (struct scsi_report_timestamp *)ctsio->cdb;
7353
7354 retval = CTL_RETVAL_COMPLETE;
7355
7356 total_len = sizeof(struct scsi_report_timestamp_data);
7357 alloc_len = scsi_4btoul(cdb->length);
7358
7359 ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO);
7360
7361 ctsio->kern_sg_entries = 0;
7362
7363 if (total_len < alloc_len) {
7364 ctsio->residual = alloc_len - total_len;
7365 ctsio->kern_data_len = total_len;
7366 ctsio->kern_total_len = total_len;
7367 } else {
7368 ctsio->residual = 0;
7369 ctsio->kern_data_len = alloc_len;
7370 ctsio->kern_total_len = alloc_len;
7371 }
7372 ctsio->kern_data_resid = 0;
7373 ctsio->kern_rel_offset = 0;
7374
7375 data = (struct scsi_report_timestamp_data *)ctsio->kern_data_ptr;
7376 scsi_ulto2b(sizeof(*data) - 2, data->length);
7377 data->origin = RTS_ORIG_OUTSIDE;
7378 getmicrotime(&tv);
7379 timestamp = (int64_t)tv.tv_sec * 1000 + tv.tv_usec / 1000;
7380 scsi_ulto4b(timestamp >> 16, data->timestamp);
7381 scsi_ulto2b(timestamp & 0xffff, &data->timestamp[4]);
7382
7383 ctl_set_success(ctsio);
7384 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
7385 ctsio->be_move_done = ctl_config_move_done;
7386 ctl_datamove((union ctl_io *)ctsio);
7387 return (retval);
7388}
7389
7390int
7391ctl_persistent_reserve_in(struct ctl_scsiio *ctsio)
7392{
7393 struct scsi_per_res_in *cdb;
7394 int alloc_len, total_len = 0;
7395 /* struct scsi_per_res_in_rsrv in_data; */
7396 struct ctl_lun *lun;
7397 struct ctl_softc *softc;
7398 uint64_t key;
7399
7400 CTL_DEBUG_PRINT(("ctl_persistent_reserve_in\n"));
7401
7402 cdb = (struct scsi_per_res_in *)ctsio->cdb;
7403
7404 alloc_len = scsi_2btoul(cdb->length);
7405
7406 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
7407 softc = lun->ctl_softc;
7408
7409retry:
7410 mtx_lock(&lun->lun_lock);
7411 switch (cdb->action) {
7412 case SPRI_RK: /* read keys */
7413 total_len = sizeof(struct scsi_per_res_in_keys) +
7414 lun->pr_key_count *
7415 sizeof(struct scsi_per_res_key);
7416 break;
7417 case SPRI_RR: /* read reservation */
7418 if (lun->flags & CTL_LUN_PR_RESERVED)
7419 total_len = sizeof(struct scsi_per_res_in_rsrv);
7420 else
7421 total_len = sizeof(struct scsi_per_res_in_header);
7422 break;
7423 case SPRI_RC: /* report capabilities */
7424 total_len = sizeof(struct scsi_per_res_cap);
7425 break;
7426 case SPRI_RS: /* read full status */
7427 total_len = sizeof(struct scsi_per_res_in_header) +
7428 (sizeof(struct scsi_per_res_in_full_desc) + 256) *
7429 lun->pr_key_count;
7430 break;
7431 default:
7432 panic("Invalid PR type %x", cdb->action);
7433 }
7434 mtx_unlock(&lun->lun_lock);
7435
7436 ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO);
7437
7438 if (total_len < alloc_len) {
7439 ctsio->residual = alloc_len - total_len;
7440 ctsio->kern_data_len = total_len;
7441 ctsio->kern_total_len = total_len;
7442 } else {
7443 ctsio->residual = 0;
7444 ctsio->kern_data_len = alloc_len;
7445 ctsio->kern_total_len = alloc_len;
7446 }
7447
7448 ctsio->kern_data_resid = 0;
7449 ctsio->kern_rel_offset = 0;
7450 ctsio->kern_sg_entries = 0;
7451
7452 mtx_lock(&lun->lun_lock);
7453 switch (cdb->action) {
7454 case SPRI_RK: { // read keys
7455 struct scsi_per_res_in_keys *res_keys;
7456 int i, key_count;
7457
7458 res_keys = (struct scsi_per_res_in_keys*)ctsio->kern_data_ptr;
7459
7460 /*
7461 * We had to drop the lock to allocate our buffer, which
7462 * leaves time for someone to come in with another
7463 * persistent reservation. (That is unlikely, though,
7464 * since this should be the only persistent reservation
7465 * command active right now.)
7466 */
7467 if (total_len != (sizeof(struct scsi_per_res_in_keys) +
7468 (lun->pr_key_count *
7469 sizeof(struct scsi_per_res_key)))){
7470 mtx_unlock(&lun->lun_lock);
7471 free(ctsio->kern_data_ptr, M_CTL);
7472 printf("%s: reservation length changed, retrying\n",
7473 __func__);
7474 goto retry;
7475 }
7476
7477 scsi_ulto4b(lun->PRGeneration, res_keys->header.generation);
7478
7479 scsi_ulto4b(sizeof(struct scsi_per_res_key) *
7480 lun->pr_key_count, res_keys->header.length);
7481
7345 for (i = 0, key_count = 0; i < 2*CTL_MAX_INITIATORS; i++) {
7482 for (i = 0, key_count = 0; i < CTL_MAX_INITIATORS; i++) {
7483 if ((key = ctl_get_prkey(lun, i)) == 0)
7484 continue;
7485
7486 /*
7487 * We used lun->pr_key_count to calculate the
7488 * size to allocate. If it turns out the number of
7489 * initiators with the registered flag set is
7490 * larger than that (i.e. they haven't been kept in
7491 * sync), we've got a problem.
7492 */
7493 if (key_count >= lun->pr_key_count) {
7494#ifdef NEEDTOPORT
7495 csevent_log(CSC_CTL | CSC_SHELF_SW |
7496 CTL_PR_ERROR,
7497 csevent_LogType_Fault,
7498 csevent_AlertLevel_Yellow,
7499 csevent_FRU_ShelfController,
7500 csevent_FRU_Firmware,
7501 csevent_FRU_Unknown,
7502 "registered keys %d >= key "
7503 "count %d", key_count,
7504 lun->pr_key_count);
7505#endif
7506 key_count++;
7507 continue;
7508 }
7509 scsi_u64to8b(key, res_keys->keys[key_count].key);
7510 key_count++;
7511 }
7512 break;
7513 }
7514 case SPRI_RR: { // read reservation
7515 struct scsi_per_res_in_rsrv *res;
7516 int tmp_len, header_only;
7517
7518 res = (struct scsi_per_res_in_rsrv *)ctsio->kern_data_ptr;
7519
7520 scsi_ulto4b(lun->PRGeneration, res->header.generation);
7521
7522 if (lun->flags & CTL_LUN_PR_RESERVED)
7523 {
7524 tmp_len = sizeof(struct scsi_per_res_in_rsrv);
7525 scsi_ulto4b(sizeof(struct scsi_per_res_in_rsrv_data),
7526 res->header.length);
7527 header_only = 0;
7528 } else {
7529 tmp_len = sizeof(struct scsi_per_res_in_header);
7530 scsi_ulto4b(0, res->header.length);
7531 header_only = 1;
7532 }
7533
7534 /*
7535 * We had to drop the lock to allocate our buffer, which
7536 * leaves time for someone to come in with another
7537 * persistent reservation. (That is unlikely, though,
7538 * since this should be the only persistent reservation
7539 * command active right now.)
7540 */
7541 if (tmp_len != total_len) {
7542 mtx_unlock(&lun->lun_lock);
7543 free(ctsio->kern_data_ptr, M_CTL);
7544 printf("%s: reservation status changed, retrying\n",
7545 __func__);
7546 goto retry;
7547 }
7548
7549 /*
7550 * No reservation held, so we're done.
7551 */
7552 if (header_only != 0)
7553 break;
7554
7555 /*
7556 * If the registration is an All Registrants type, the key
7557 * is 0, since it doesn't really matter.
7558 */
7559 if (lun->pr_res_idx != CTL_PR_ALL_REGISTRANTS) {
7560 scsi_u64to8b(ctl_get_prkey(lun, lun->pr_res_idx),
7561 res->data.reservation);
7562 }
7563 res->data.scopetype = lun->res_type;
7564 break;
7565 }
7566 case SPRI_RC: //report capabilities
7567 {
7568 struct scsi_per_res_cap *res_cap;
7569 uint16_t type_mask;
7570
7571 res_cap = (struct scsi_per_res_cap *)ctsio->kern_data_ptr;
7572 scsi_ulto2b(sizeof(*res_cap), res_cap->length);
7573 res_cap->flags2 |= SPRI_TMV | SPRI_ALLOW_5;
7574 type_mask = SPRI_TM_WR_EX_AR |
7575 SPRI_TM_EX_AC_RO |
7576 SPRI_TM_WR_EX_RO |
7577 SPRI_TM_EX_AC |
7578 SPRI_TM_WR_EX |
7579 SPRI_TM_EX_AC_AR;
7580 scsi_ulto2b(type_mask, res_cap->type_mask);
7581 break;
7582 }
7583 case SPRI_RS: { // read full status
7584 struct scsi_per_res_in_full *res_status;
7585 struct scsi_per_res_in_full_desc *res_desc;
7586 struct ctl_port *port;
7587 int i, len;
7588
7589 res_status = (struct scsi_per_res_in_full*)ctsio->kern_data_ptr;
7590
7591 /*
7592 * We had to drop the lock to allocate our buffer, which
7593 * leaves time for someone to come in with another
7594 * persistent reservation. (That is unlikely, though,
7595 * since this should be the only persistent reservation
7596 * command active right now.)
7597 */
7598 if (total_len < (sizeof(struct scsi_per_res_in_header) +
7599 (sizeof(struct scsi_per_res_in_full_desc) + 256) *
7600 lun->pr_key_count)){
7601 mtx_unlock(&lun->lun_lock);
7602 free(ctsio->kern_data_ptr, M_CTL);
7603 printf("%s: reservation length changed, retrying\n",
7604 __func__);
7605 goto retry;
7606 }
7607
7608 scsi_ulto4b(lun->PRGeneration, res_status->header.generation);
7609
7610 res_desc = &res_status->desc[0];
7474 for (i = 0; i < 2*CTL_MAX_INITIATORS; i++) {
7611 for (i = 0; i < CTL_MAX_INITIATORS; i++) {
7612 if ((key = ctl_get_prkey(lun, i)) == 0)
7613 continue;
7614
7615 scsi_u64to8b(key, res_desc->res_key.key);
7616 if ((lun->flags & CTL_LUN_PR_RESERVED) &&
7617 (lun->pr_res_idx == i ||
7618 lun->pr_res_idx == CTL_PR_ALL_REGISTRANTS)) {
7619 res_desc->flags = SPRI_FULL_R_HOLDER;
7620 res_desc->scopetype = lun->res_type;
7621 }
7622 scsi_ulto2b(i / CTL_MAX_INIT_PER_PORT,
7623 res_desc->rel_trgt_port_id);
7624 len = 0;
7488 port = softc->ctl_ports[
7489 ctl_port_idx(i / CTL_MAX_INIT_PER_PORT)];
7625 port = softc->ctl_ports[i / CTL_MAX_INIT_PER_PORT];
7626 if (port != NULL)
7627 len = ctl_create_iid(port,
7628 i % CTL_MAX_INIT_PER_PORT,
7629 res_desc->transport_id);
7630 scsi_ulto4b(len, res_desc->additional_length);
7631 res_desc = (struct scsi_per_res_in_full_desc *)
7632 &res_desc->transport_id[len];
7633 }
7634 scsi_ulto4b((uint8_t *)res_desc - (uint8_t *)&res_status->desc[0],
7635 res_status->header.length);
7636 break;
7637 }
7638 default:
7639 /*
7640 * This is a bug, because we just checked for this above,
7641 * and should have returned an error.
7642 */
7643 panic("Invalid PR type %x", cdb->action);
7644 break; /* NOTREACHED */
7645 }
7646 mtx_unlock(&lun->lun_lock);
7647
7648 ctl_set_success(ctsio);
7649 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
7650 ctsio->be_move_done = ctl_config_move_done;
7651 ctl_datamove((union ctl_io *)ctsio);
7652 return (CTL_RETVAL_COMPLETE);
7653}
7654
7519static void
7520ctl_est_res_ua(struct ctl_lun *lun, uint32_t residx, ctl_ua_type ua)
7521{
7522 int off = lun->ctl_softc->persis_offset;
7523
7524 if (residx >= off && residx < off + CTL_MAX_INITIATORS)
7525 ctl_est_ua(lun, residx - off, ua);
7526}
7527
7655/*
7656 * Returns 0 if ctl_persistent_reserve_out() should continue, non-zero if
7657 * it should return.
7658 */
7659static int
7660ctl_pro_preempt(struct ctl_softc *softc, struct ctl_lun *lun, uint64_t res_key,
7661 uint64_t sa_res_key, uint8_t type, uint32_t residx,
7662 struct ctl_scsiio *ctsio, struct scsi_per_res_out *cdb,
7663 struct scsi_per_res_out_parms* param)
7664{
7665 union ctl_ha_msg persis_io;
7539 int retval, i;
7540 int isc_retval;
7666 int i;
7667
7542 retval = 0;
7543
7668 mtx_lock(&lun->lun_lock);
7669 if (sa_res_key == 0) {
7670 if (lun->pr_res_idx == CTL_PR_ALL_REGISTRANTS) {
7671 /* validate scope and type */
7672 if ((cdb->scope_type & SPR_SCOPE_MASK) !=
7673 SPR_LU_SCOPE) {
7674 mtx_unlock(&lun->lun_lock);
7675 ctl_set_invalid_field(/*ctsio*/ ctsio,
7676 /*sks_valid*/ 1,
7677 /*command*/ 1,
7678 /*field*/ 2,
7679 /*bit_valid*/ 1,
7680 /*bit*/ 4);
7681 ctl_done((union ctl_io *)ctsio);
7682 return (1);
7683 }
7684
7685 if (type>8 || type==2 || type==4 || type==0) {
7686 mtx_unlock(&lun->lun_lock);
7687 ctl_set_invalid_field(/*ctsio*/ ctsio,
7688 /*sks_valid*/ 1,
7689 /*command*/ 1,
7690 /*field*/ 2,
7691 /*bit_valid*/ 1,
7692 /*bit*/ 0);
7693 ctl_done((union ctl_io *)ctsio);
7694 return (1);
7695 }
7696
7697 /*
7698 * Unregister everybody else and build UA for
7699 * them
7700 */
7577 for(i=0; i < 2*CTL_MAX_INITIATORS; i++) {
7701 for(i = 0; i < CTL_MAX_INITIATORS; i++) {
7702 if (i == residx || ctl_get_prkey(lun, i) == 0)
7703 continue;
7704
7705 ctl_clr_prkey(lun, i);
7582 ctl_est_res_ua(lun, i, CTL_UA_REG_PREEMPT);
7706 ctl_est_ua(lun, i, CTL_UA_REG_PREEMPT);
7707 }
7708 lun->pr_key_count = 1;
7709 lun->res_type = type;
7710 if (lun->res_type != SPR_TYPE_WR_EX_AR
7711 && lun->res_type != SPR_TYPE_EX_AC_AR)
7712 lun->pr_res_idx = residx;
7713 lun->PRGeneration++;
7714 mtx_unlock(&lun->lun_lock);
7715
7716 /* send msg to other side */
7717 persis_io.hdr.nexus = ctsio->io_hdr.nexus;
7718 persis_io.hdr.msg_type = CTL_MSG_PERS_ACTION;
7719 persis_io.pr.pr_info.action = CTL_PR_PREEMPT;
7720 persis_io.pr.pr_info.residx = lun->pr_res_idx;
7721 persis_io.pr.pr_info.res_type = type;
7722 memcpy(persis_io.pr.pr_info.sa_res_key,
7723 param->serv_act_res_key,
7724 sizeof(param->serv_act_res_key));
7599 if ((isc_retval=ctl_ha_msg_send(CTL_HA_CHAN_CTL,
7600 &persis_io, sizeof(persis_io), 0)) >
7601 CTL_HA_STATUS_SUCCESS) {
7602 printf("CTL:Persis Out error returned "
7603 "from ctl_ha_msg_send %d\n",
7604 isc_retval);
7605 }
7725 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &persis_io,
7726 sizeof(persis_io.pr), M_WAITOK);
7727 } else {
7728 /* not all registrants */
7729 mtx_unlock(&lun->lun_lock);
7730 free(ctsio->kern_data_ptr, M_CTL);
7731 ctl_set_invalid_field(ctsio,
7732 /*sks_valid*/ 1,
7733 /*command*/ 0,
7734 /*field*/ 8,
7735 /*bit_valid*/ 0,
7736 /*bit*/ 0);
7737 ctl_done((union ctl_io *)ctsio);
7738 return (1);
7739 }
7740 } else if (lun->pr_res_idx == CTL_PR_ALL_REGISTRANTS
7741 || !(lun->flags & CTL_LUN_PR_RESERVED)) {
7742 int found = 0;
7743
7744 if (res_key == sa_res_key) {
7745 /* special case */
7746 /*
7747 * The spec implies this is not good but doesn't
7748 * say what to do. There are two choices either
7749 * generate a res conflict or check condition
7750 * with illegal field in parameter data. Since
7751 * that is what is done when the sa_res_key is
7752 * zero I'll take that approach since this has
7753 * to do with the sa_res_key.
7754 */
7755 mtx_unlock(&lun->lun_lock);
7756 free(ctsio->kern_data_ptr, M_CTL);
7757 ctl_set_invalid_field(ctsio,
7758 /*sks_valid*/ 1,
7759 /*command*/ 0,
7760 /*field*/ 8,
7761 /*bit_valid*/ 0,
7762 /*bit*/ 0);
7763 ctl_done((union ctl_io *)ctsio);
7764 return (1);
7765 }
7766
7646 for (i=0; i < 2*CTL_MAX_INITIATORS; i++) {
7767 for (i = 0; i < CTL_MAX_INITIATORS; i++) {
7768 if (ctl_get_prkey(lun, i) != sa_res_key)
7769 continue;
7770
7771 found = 1;
7772 ctl_clr_prkey(lun, i);
7773 lun->pr_key_count--;
7653 ctl_est_res_ua(lun, i, CTL_UA_REG_PREEMPT);
7774 ctl_est_ua(lun, i, CTL_UA_REG_PREEMPT);
7775 }
7776 if (!found) {
7777 mtx_unlock(&lun->lun_lock);
7778 free(ctsio->kern_data_ptr, M_CTL);
7779 ctl_set_reservation_conflict(ctsio);
7780 ctl_done((union ctl_io *)ctsio);
7781 return (CTL_RETVAL_COMPLETE);
7782 }
7783 lun->PRGeneration++;
7784 mtx_unlock(&lun->lun_lock);
7785
7786 /* send msg to other side */
7787 persis_io.hdr.nexus = ctsio->io_hdr.nexus;
7788 persis_io.hdr.msg_type = CTL_MSG_PERS_ACTION;
7789 persis_io.pr.pr_info.action = CTL_PR_PREEMPT;
7790 persis_io.pr.pr_info.residx = lun->pr_res_idx;
7791 persis_io.pr.pr_info.res_type = type;
7792 memcpy(persis_io.pr.pr_info.sa_res_key,
7793 param->serv_act_res_key,
7794 sizeof(param->serv_act_res_key));
7671 if ((isc_retval=ctl_ha_msg_send(CTL_HA_CHAN_CTL,
7672 &persis_io, sizeof(persis_io), 0)) >
7673 CTL_HA_STATUS_SUCCESS) {
7674 printf("CTL:Persis Out error returned from "
7675 "ctl_ha_msg_send %d\n", isc_retval);
7676 }
7795 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &persis_io,
7796 sizeof(persis_io.pr), M_WAITOK);
7797 } else {
7798 /* Reserved but not all registrants */
7799 /* sa_res_key is res holder */
7800 if (sa_res_key == ctl_get_prkey(lun, lun->pr_res_idx)) {
7801 /* validate scope and type */
7802 if ((cdb->scope_type & SPR_SCOPE_MASK) !=
7803 SPR_LU_SCOPE) {
7804 mtx_unlock(&lun->lun_lock);
7805 ctl_set_invalid_field(/*ctsio*/ ctsio,
7806 /*sks_valid*/ 1,
7807 /*command*/ 1,
7808 /*field*/ 2,
7809 /*bit_valid*/ 1,
7810 /*bit*/ 4);
7811 ctl_done((union ctl_io *)ctsio);
7812 return (1);
7813 }
7814
7815 if (type>8 || type==2 || type==4 || type==0) {
7816 mtx_unlock(&lun->lun_lock);
7817 ctl_set_invalid_field(/*ctsio*/ ctsio,
7818 /*sks_valid*/ 1,
7819 /*command*/ 1,
7820 /*field*/ 2,
7821 /*bit_valid*/ 1,
7822 /*bit*/ 0);
7823 ctl_done((union ctl_io *)ctsio);
7824 return (1);
7825 }
7826
7827 /*
7828 * Do the following:
7829 * if sa_res_key != res_key remove all
7830 * registrants w/sa_res_key and generate UA
7831 * for these registrants(Registrations
7832 * Preempted) if it wasn't an exclusive
7833 * reservation generate UA(Reservations
7834 * Preempted) for all other registered nexuses
7835 * if the type has changed. Establish the new
7836 * reservation and holder. If res_key and
7837 * sa_res_key are the same do the above
7838 * except don't unregister the res holder.
7839 */
7840
7721 for(i=0; i < 2*CTL_MAX_INITIATORS; i++) {
7841 for(i = 0; i < CTL_MAX_INITIATORS; i++) {
7842 if (i == residx || ctl_get_prkey(lun, i) == 0)
7843 continue;
7844
7845 if (sa_res_key == ctl_get_prkey(lun, i)) {
7846 ctl_clr_prkey(lun, i);
7847 lun->pr_key_count--;
7728 ctl_est_res_ua(lun, i, CTL_UA_REG_PREEMPT);
7848 ctl_est_ua(lun, i, CTL_UA_REG_PREEMPT);
7849 } else if (type != lun->res_type
7850 && (lun->res_type == SPR_TYPE_WR_EX_RO
7851 || lun->res_type ==SPR_TYPE_EX_AC_RO)){
7732 ctl_est_res_ua(lun, i, CTL_UA_RES_RELEASE);
7852 ctl_est_ua(lun, i, CTL_UA_RES_RELEASE);
7853 }
7854 }
7855 lun->res_type = type;
7856 if (lun->res_type != SPR_TYPE_WR_EX_AR
7857 && lun->res_type != SPR_TYPE_EX_AC_AR)
7858 lun->pr_res_idx = residx;
7859 else
7860 lun->pr_res_idx = CTL_PR_ALL_REGISTRANTS;
7861 lun->PRGeneration++;
7862 mtx_unlock(&lun->lun_lock);
7863
7864 persis_io.hdr.nexus = ctsio->io_hdr.nexus;
7865 persis_io.hdr.msg_type = CTL_MSG_PERS_ACTION;
7866 persis_io.pr.pr_info.action = CTL_PR_PREEMPT;
7867 persis_io.pr.pr_info.residx = lun->pr_res_idx;
7868 persis_io.pr.pr_info.res_type = type;
7869 memcpy(persis_io.pr.pr_info.sa_res_key,
7870 param->serv_act_res_key,
7871 sizeof(param->serv_act_res_key));
7750 if ((isc_retval=ctl_ha_msg_send(CTL_HA_CHAN_CTL,
7751 &persis_io, sizeof(persis_io), 0)) >
7752 CTL_HA_STATUS_SUCCESS) {
7753 printf("CTL:Persis Out error returned "
7754 "from ctl_ha_msg_send %d\n",
7755 isc_retval);
7756 }
7872 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &persis_io,
7873 sizeof(persis_io.pr), M_WAITOK);
7874 } else {
7875 /*
7876 * sa_res_key is not the res holder just
7877 * remove registrants
7878 */
7879 int found=0;
7880
7764 for (i=0; i < 2*CTL_MAX_INITIATORS; i++) {
7881 for (i = 0; i < CTL_MAX_INITIATORS; i++) {
7882 if (sa_res_key != ctl_get_prkey(lun, i))
7883 continue;
7884
7885 found = 1;
7886 ctl_clr_prkey(lun, i);
7887 lun->pr_key_count--;
7771 ctl_est_res_ua(lun, i, CTL_UA_REG_PREEMPT);
7888 ctl_est_ua(lun, i, CTL_UA_REG_PREEMPT);
7889 }
7890
7891 if (!found) {
7892 mtx_unlock(&lun->lun_lock);
7893 free(ctsio->kern_data_ptr, M_CTL);
7894 ctl_set_reservation_conflict(ctsio);
7895 ctl_done((union ctl_io *)ctsio);
7896 return (1);
7897 }
7898 lun->PRGeneration++;
7899 mtx_unlock(&lun->lun_lock);
7900
7901 persis_io.hdr.nexus = ctsio->io_hdr.nexus;
7902 persis_io.hdr.msg_type = CTL_MSG_PERS_ACTION;
7903 persis_io.pr.pr_info.action = CTL_PR_PREEMPT;
7904 persis_io.pr.pr_info.residx = lun->pr_res_idx;
7905 persis_io.pr.pr_info.res_type = type;
7906 memcpy(persis_io.pr.pr_info.sa_res_key,
7907 param->serv_act_res_key,
7908 sizeof(param->serv_act_res_key));
7789 if ((isc_retval=ctl_ha_msg_send(CTL_HA_CHAN_CTL,
7790 &persis_io, sizeof(persis_io), 0)) >
7791 CTL_HA_STATUS_SUCCESS) {
7792 printf("CTL:Persis Out error returned "
7793 "from ctl_ha_msg_send %d\n",
7794 isc_retval);
7795 }
7909 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &persis_io,
7910 sizeof(persis_io.pr), M_WAITOK);
7911 }
7912 }
7798
7799 lun->PRGeneration++;
7800 mtx_unlock(&lun->lun_lock);
7801
7802 return (retval);
7913 return (0);
7914}
7915
7916static void
7917ctl_pro_preempt_other(struct ctl_lun *lun, union ctl_ha_msg *msg)
7918{
7919 uint64_t sa_res_key;
7920 int i;
7921
7922 sa_res_key = scsi_8btou64(msg->pr.pr_info.sa_res_key);
7923
7924 if (lun->pr_res_idx == CTL_PR_ALL_REGISTRANTS
7925 || lun->pr_res_idx == CTL_PR_NO_RESERVATION
7926 || sa_res_key != ctl_get_prkey(lun, lun->pr_res_idx)) {
7927 if (sa_res_key == 0) {
7928 /*
7929 * Unregister everybody else and build UA for
7930 * them
7931 */
7821 for(i=0; i < 2*CTL_MAX_INITIATORS; i++) {
7932 for(i = 0; i < CTL_MAX_INITIATORS; i++) {
7933 if (i == msg->pr.pr_info.residx ||
7934 ctl_get_prkey(lun, i) == 0)
7935 continue;
7936
7937 ctl_clr_prkey(lun, i);
7827 ctl_est_res_ua(lun, i, CTL_UA_REG_PREEMPT);
7938 ctl_est_ua(lun, i, CTL_UA_REG_PREEMPT);
7939 }
7940
7941 lun->pr_key_count = 1;
7942 lun->res_type = msg->pr.pr_info.res_type;
7943 if (lun->res_type != SPR_TYPE_WR_EX_AR
7944 && lun->res_type != SPR_TYPE_EX_AC_AR)
7945 lun->pr_res_idx = msg->pr.pr_info.residx;
7946 } else {
7836 for (i=0; i < 2*CTL_MAX_INITIATORS; i++) {
7947 for (i = 0; i < CTL_MAX_INITIATORS; i++) {
7948 if (sa_res_key == ctl_get_prkey(lun, i))
7949 continue;
7950
7951 ctl_clr_prkey(lun, i);
7952 lun->pr_key_count--;
7842 ctl_est_res_ua(lun, i, CTL_UA_REG_PREEMPT);
7953 ctl_est_ua(lun, i, CTL_UA_REG_PREEMPT);
7954 }
7955 }
7956 } else {
7846 for (i=0; i < 2*CTL_MAX_INITIATORS; i++) {
7957 for (i = 0; i < CTL_MAX_INITIATORS; i++) {
7958 if (i == msg->pr.pr_info.residx ||
7959 ctl_get_prkey(lun, i) == 0)
7960 continue;
7961
7962 if (sa_res_key == ctl_get_prkey(lun, i)) {
7963 ctl_clr_prkey(lun, i);
7964 lun->pr_key_count--;
7854 ctl_est_res_ua(lun, i, CTL_UA_REG_PREEMPT);
7965 ctl_est_ua(lun, i, CTL_UA_REG_PREEMPT);
7966 } else if (msg->pr.pr_info.res_type != lun->res_type
7967 && (lun->res_type == SPR_TYPE_WR_EX_RO
7968 || lun->res_type == SPR_TYPE_EX_AC_RO)) {
7858 ctl_est_res_ua(lun, i, CTL_UA_RES_RELEASE);
7969 ctl_est_ua(lun, i, CTL_UA_RES_RELEASE);
7970 }
7971 }
7972 lun->res_type = msg->pr.pr_info.res_type;
7973 if (lun->res_type != SPR_TYPE_WR_EX_AR
7974 && lun->res_type != SPR_TYPE_EX_AC_AR)
7975 lun->pr_res_idx = msg->pr.pr_info.residx;
7976 else
7977 lun->pr_res_idx = CTL_PR_ALL_REGISTRANTS;
7978 }
7979 lun->PRGeneration++;
7980
7981}
7982
7983
7984int
7985ctl_persistent_reserve_out(struct ctl_scsiio *ctsio)
7986{
7987 int retval;
7877 int isc_retval;
7988 u_int32_t param_len;
7989 struct scsi_per_res_out *cdb;
7990 struct ctl_lun *lun;
7991 struct scsi_per_res_out_parms* param;
7992 struct ctl_softc *softc;
7993 uint32_t residx;
7994 uint64_t res_key, sa_res_key, key;
7995 uint8_t type;
7996 union ctl_ha_msg persis_io;
7997 int i;
7998
7999 CTL_DEBUG_PRINT(("ctl_persistent_reserve_out\n"));
8000
8001 retval = CTL_RETVAL_COMPLETE;
8002
8003 cdb = (struct scsi_per_res_out *)ctsio->cdb;
8004 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
8005 softc = lun->ctl_softc;
8006
8007 /*
8008 * We only support whole-LUN scope. The scope & type are ignored for
8009 * register, register and ignore existing key and clear.
8010 * We sometimes ignore scope and type on preempts too!!
8011 * Verify reservation type here as well.
8012 */
8013 type = cdb->scope_type & SPR_TYPE_MASK;
8014 if ((cdb->action == SPRO_RESERVE)
8015 || (cdb->action == SPRO_RELEASE)) {
8016 if ((cdb->scope_type & SPR_SCOPE_MASK) != SPR_LU_SCOPE) {
8017 ctl_set_invalid_field(/*ctsio*/ ctsio,
8018 /*sks_valid*/ 1,
8019 /*command*/ 1,
8020 /*field*/ 2,
8021 /*bit_valid*/ 1,
8022 /*bit*/ 4);
8023 ctl_done((union ctl_io *)ctsio);
8024 return (CTL_RETVAL_COMPLETE);
8025 }
8026
8027 if (type>8 || type==2 || type==4 || type==0) {
8028 ctl_set_invalid_field(/*ctsio*/ ctsio,
8029 /*sks_valid*/ 1,
8030 /*command*/ 1,
8031 /*field*/ 2,
8032 /*bit_valid*/ 1,
8033 /*bit*/ 0);
8034 ctl_done((union ctl_io *)ctsio);
8035 return (CTL_RETVAL_COMPLETE);
8036 }
8037 }
8038
8039 param_len = scsi_4btoul(cdb->length);
8040
8041 if ((ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) == 0) {
8042 ctsio->kern_data_ptr = malloc(param_len, M_CTL, M_WAITOK);
8043 ctsio->kern_data_len = param_len;
8044 ctsio->kern_total_len = param_len;
8045 ctsio->kern_data_resid = 0;
8046 ctsio->kern_rel_offset = 0;
8047 ctsio->kern_sg_entries = 0;
8048 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
8049 ctsio->be_move_done = ctl_config_move_done;
8050 ctl_datamove((union ctl_io *)ctsio);
8051
8052 return (CTL_RETVAL_COMPLETE);
8053 }
8054
8055 param = (struct scsi_per_res_out_parms *)ctsio->kern_data_ptr;
8056
7947 residx = ctl_get_resindex(&ctsio->io_hdr.nexus);
8057 residx = ctl_get_initindex(&ctsio->io_hdr.nexus);
8058 res_key = scsi_8btou64(param->res_key.key);
8059 sa_res_key = scsi_8btou64(param->serv_act_res_key);
8060
8061 /*
8062 * Validate the reservation key here except for SPRO_REG_IGNO
8063 * This must be done for all other service actions
8064 */
8065 if ((cdb->action & SPRO_ACTION_MASK) != SPRO_REG_IGNO) {
8066 mtx_lock(&lun->lun_lock);
8067 if ((key = ctl_get_prkey(lun, residx)) != 0) {
8068 if (res_key != key) {
8069 /*
8070 * The current key passed in doesn't match
8071 * the one the initiator previously
8072 * registered.
8073 */
8074 mtx_unlock(&lun->lun_lock);
8075 free(ctsio->kern_data_ptr, M_CTL);
8076 ctl_set_reservation_conflict(ctsio);
8077 ctl_done((union ctl_io *)ctsio);
8078 return (CTL_RETVAL_COMPLETE);
8079 }
8080 } else if ((cdb->action & SPRO_ACTION_MASK) != SPRO_REGISTER) {
8081 /*
8082 * We are not registered
8083 */
8084 mtx_unlock(&lun->lun_lock);
8085 free(ctsio->kern_data_ptr, M_CTL);
8086 ctl_set_reservation_conflict(ctsio);
8087 ctl_done((union ctl_io *)ctsio);
8088 return (CTL_RETVAL_COMPLETE);
8089 } else if (res_key != 0) {
8090 /*
8091 * We are not registered and trying to register but
8092 * the register key isn't zero.
8093 */
8094 mtx_unlock(&lun->lun_lock);
8095 free(ctsio->kern_data_ptr, M_CTL);
8096 ctl_set_reservation_conflict(ctsio);
8097 ctl_done((union ctl_io *)ctsio);
8098 return (CTL_RETVAL_COMPLETE);
8099 }
8100 mtx_unlock(&lun->lun_lock);
8101 }
8102
8103 switch (cdb->action & SPRO_ACTION_MASK) {
8104 case SPRO_REGISTER:
8105 case SPRO_REG_IGNO: {
8106
8107#if 0
8108 printf("Registration received\n");
8109#endif
8110
8111 /*
8112 * We don't support any of these options, as we report in
8113 * the read capabilities request (see
8114 * ctl_persistent_reserve_in(), above).
8115 */
8116 if ((param->flags & SPR_SPEC_I_PT)
8117 || (param->flags & SPR_ALL_TG_PT)
8118 || (param->flags & SPR_APTPL)) {
8119 int bit_ptr;
8120
8121 if (param->flags & SPR_APTPL)
8122 bit_ptr = 0;
8123 else if (param->flags & SPR_ALL_TG_PT)
8124 bit_ptr = 2;
8125 else /* SPR_SPEC_I_PT */
8126 bit_ptr = 3;
8127
8128 free(ctsio->kern_data_ptr, M_CTL);
8129 ctl_set_invalid_field(ctsio,
8130 /*sks_valid*/ 1,
8131 /*command*/ 0,
8132 /*field*/ 20,
8133 /*bit_valid*/ 1,
8134 /*bit*/ bit_ptr);
8135 ctl_done((union ctl_io *)ctsio);
8136 return (CTL_RETVAL_COMPLETE);
8137 }
8138
8139 mtx_lock(&lun->lun_lock);
8140
8141 /*
8142 * The initiator wants to clear the
8143 * key/unregister.
8144 */
8145 if (sa_res_key == 0) {
8146 if ((res_key == 0
8147 && (cdb->action & SPRO_ACTION_MASK) == SPRO_REGISTER)
8148 || ((cdb->action & SPRO_ACTION_MASK) == SPRO_REG_IGNO
8149 && ctl_get_prkey(lun, residx) == 0)) {
8150 mtx_unlock(&lun->lun_lock);
8151 goto done;
8152 }
8153
8154 ctl_clr_prkey(lun, residx);
8155 lun->pr_key_count--;
8156
8157 if (residx == lun->pr_res_idx) {
8158 lun->flags &= ~CTL_LUN_PR_RESERVED;
8159 lun->pr_res_idx = CTL_PR_NO_RESERVATION;
8160
8161 if ((lun->res_type == SPR_TYPE_WR_EX_RO
8162 || lun->res_type == SPR_TYPE_EX_AC_RO)
8163 && lun->pr_key_count) {
8164 /*
8165 * If the reservation is a registrants
8166 * only type we need to generate a UA
8167 * for other registered inits. The
8168 * sense code should be RESERVATIONS
8169 * RELEASED
8170 */
8171
8062 for (i = 0; i < CTL_MAX_INITIATORS;i++){
8063 if (ctl_get_prkey(lun, i +
8064 softc->persis_offset) == 0)
8172 for (i = softc->init_min; i < softc->init_max; i++){
8173 if (ctl_get_prkey(lun, i) == 0)
8174 continue;
8175 ctl_est_ua(lun, i,
8176 CTL_UA_RES_RELEASE);
8177 }
8178 }
8179 lun->res_type = 0;
8180 } else if (lun->pr_res_idx == CTL_PR_ALL_REGISTRANTS) {
8181 if (lun->pr_key_count==0) {
8182 lun->flags &= ~CTL_LUN_PR_RESERVED;
8183 lun->res_type = 0;
8184 lun->pr_res_idx = CTL_PR_NO_RESERVATION;
8185 }
8186 }
8187 lun->PRGeneration++;
8188 mtx_unlock(&lun->lun_lock);
8189
8190 persis_io.hdr.nexus = ctsio->io_hdr.nexus;
8191 persis_io.hdr.msg_type = CTL_MSG_PERS_ACTION;
8192 persis_io.pr.pr_info.action = CTL_PR_UNREG_KEY;
8193 persis_io.pr.pr_info.residx = residx;
8082 if ((isc_retval = ctl_ha_msg_send(CTL_HA_CHAN_CTL,
8083 &persis_io, sizeof(persis_io), 0 )) >
8084 CTL_HA_STATUS_SUCCESS) {
8085 printf("CTL:Persis Out error returned from "
8086 "ctl_ha_msg_send %d\n", isc_retval);
8087 }
8194 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &persis_io,
8195 sizeof(persis_io.pr), M_WAITOK);
8196 } else /* sa_res_key != 0 */ {
8197
8198 /*
8199 * If we aren't registered currently then increment
8200 * the key count and set the registered flag.
8201 */
8202 ctl_alloc_prkey(lun, residx);
8203 if (ctl_get_prkey(lun, residx) == 0)
8204 lun->pr_key_count++;
8205 ctl_set_prkey(lun, residx, sa_res_key);
8206 lun->PRGeneration++;
8207 mtx_unlock(&lun->lun_lock);
8208
8209 persis_io.hdr.nexus = ctsio->io_hdr.nexus;
8210 persis_io.hdr.msg_type = CTL_MSG_PERS_ACTION;
8211 persis_io.pr.pr_info.action = CTL_PR_REG_KEY;
8212 persis_io.pr.pr_info.residx = residx;
8213 memcpy(persis_io.pr.pr_info.sa_res_key,
8214 param->serv_act_res_key,
8215 sizeof(param->serv_act_res_key));
8106 if ((isc_retval=ctl_ha_msg_send(CTL_HA_CHAN_CTL,
8107 &persis_io, sizeof(persis_io), 0)) >
8108 CTL_HA_STATUS_SUCCESS) {
8109 printf("CTL:Persis Out error returned from "
8110 "ctl_ha_msg_send %d\n", isc_retval);
8111 }
8216 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &persis_io,
8217 sizeof(persis_io.pr), M_WAITOK);
8218 }
8113 lun->PRGeneration++;
8114 mtx_unlock(&lun->lun_lock);
8219
8220 break;
8221 }
8222 case SPRO_RESERVE:
8223#if 0
8224 printf("Reserve executed type %d\n", type);
8225#endif
8226 mtx_lock(&lun->lun_lock);
8227 if (lun->flags & CTL_LUN_PR_RESERVED) {
8228 /*
8229 * if this isn't the reservation holder and it's
8230 * not a "all registrants" type or if the type is
8231 * different then we have a conflict
8232 */
8233 if ((lun->pr_res_idx != residx
8234 && lun->pr_res_idx != CTL_PR_ALL_REGISTRANTS)
8235 || lun->res_type != type) {
8236 mtx_unlock(&lun->lun_lock);
8237 free(ctsio->kern_data_ptr, M_CTL);
8238 ctl_set_reservation_conflict(ctsio);
8239 ctl_done((union ctl_io *)ctsio);
8240 return (CTL_RETVAL_COMPLETE);
8241 }
8242 mtx_unlock(&lun->lun_lock);
8243 } else /* create a reservation */ {
8244 /*
8245 * If it's not an "all registrants" type record
8246 * reservation holder
8247 */
8248 if (type != SPR_TYPE_WR_EX_AR
8249 && type != SPR_TYPE_EX_AC_AR)
8250 lun->pr_res_idx = residx; /* Res holder */
8251 else
8252 lun->pr_res_idx = CTL_PR_ALL_REGISTRANTS;
8253
8254 lun->flags |= CTL_LUN_PR_RESERVED;
8255 lun->res_type = type;
8256
8257 mtx_unlock(&lun->lun_lock);
8258
8259 /* send msg to other side */
8260 persis_io.hdr.nexus = ctsio->io_hdr.nexus;
8261 persis_io.hdr.msg_type = CTL_MSG_PERS_ACTION;
8262 persis_io.pr.pr_info.action = CTL_PR_RESERVE;
8263 persis_io.pr.pr_info.residx = lun->pr_res_idx;
8264 persis_io.pr.pr_info.res_type = type;
8161 if ((isc_retval=ctl_ha_msg_send(CTL_HA_CHAN_CTL,
8162 &persis_io, sizeof(persis_io), 0)) >
8163 CTL_HA_STATUS_SUCCESS) {
8164 printf("CTL:Persis Out error returned from "
8165 "ctl_ha_msg_send %d\n", isc_retval);
8166 }
8265 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &persis_io,
8266 sizeof(persis_io.pr), M_WAITOK);
8267 }
8268 break;
8269
8270 case SPRO_RELEASE:
8271 mtx_lock(&lun->lun_lock);
8272 if ((lun->flags & CTL_LUN_PR_RESERVED) == 0) {
8273 /* No reservation exists return good status */
8274 mtx_unlock(&lun->lun_lock);
8275 goto done;
8276 }
8277 /*
8278 * Is this nexus a reservation holder?
8279 */
8280 if (lun->pr_res_idx != residx
8281 && lun->pr_res_idx != CTL_PR_ALL_REGISTRANTS) {
8282 /*
8283 * not a res holder return good status but
8284 * do nothing
8285 */
8286 mtx_unlock(&lun->lun_lock);
8287 goto done;
8288 }
8289
8290 if (lun->res_type != type) {
8291 mtx_unlock(&lun->lun_lock);
8292 free(ctsio->kern_data_ptr, M_CTL);
8293 ctl_set_illegal_pr_release(ctsio);
8294 ctl_done((union ctl_io *)ctsio);
8295 return (CTL_RETVAL_COMPLETE);
8296 }
8297
8298 /* okay to release */
8299 lun->flags &= ~CTL_LUN_PR_RESERVED;
8300 lun->pr_res_idx = CTL_PR_NO_RESERVATION;
8301 lun->res_type = 0;
8302
8303 /*
8304 * if this isn't an exclusive access
8305 * res generate UA for all other
8306 * registrants.
8307 */
8308 if (type != SPR_TYPE_EX_AC
8309 && type != SPR_TYPE_WR_EX) {
8210 for (i = 0; i < CTL_MAX_INITIATORS; i++) {
8211 if (i == residx ||
8212 ctl_get_prkey(lun,
8213 i + softc->persis_offset) == 0)
8310 for (i = softc->init_min; i < softc->init_max; i++) {
8311 if (i == residx || ctl_get_prkey(lun, i) == 0)
8312 continue;
8313 ctl_est_ua(lun, i, CTL_UA_RES_RELEASE);
8314 }
8315 }
8316 mtx_unlock(&lun->lun_lock);
8317
8318 /* Send msg to other side */
8319 persis_io.hdr.nexus = ctsio->io_hdr.nexus;
8320 persis_io.hdr.msg_type = CTL_MSG_PERS_ACTION;
8321 persis_io.pr.pr_info.action = CTL_PR_RELEASE;
8223 if ((isc_retval=ctl_ha_msg_send( CTL_HA_CHAN_CTL, &persis_io,
8224 sizeof(persis_io), 0)) > CTL_HA_STATUS_SUCCESS) {
8225 printf("CTL:Persis Out error returned from "
8226 "ctl_ha_msg_send %d\n", isc_retval);
8227 }
8322 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &persis_io,
8323 sizeof(persis_io.pr), M_WAITOK);
8324 break;
8325
8326 case SPRO_CLEAR:
8327 /* send msg to other side */
8328
8329 mtx_lock(&lun->lun_lock);
8330 lun->flags &= ~CTL_LUN_PR_RESERVED;
8331 lun->res_type = 0;
8332 lun->pr_key_count = 0;
8333 lun->pr_res_idx = CTL_PR_NO_RESERVATION;
8334
8335 ctl_clr_prkey(lun, residx);
8240 for (i=0; i < 2*CTL_MAX_INITIATORS; i++)
8336 for (i = 0; i < CTL_MAX_INITIATORS; i++)
8337 if (ctl_get_prkey(lun, i) != 0) {
8338 ctl_clr_prkey(lun, i);
8243 ctl_est_res_ua(lun, i, CTL_UA_REG_PREEMPT);
8339 ctl_est_ua(lun, i, CTL_UA_REG_PREEMPT);
8340 }
8341 lun->PRGeneration++;
8342 mtx_unlock(&lun->lun_lock);
8343
8344 persis_io.hdr.nexus = ctsio->io_hdr.nexus;
8345 persis_io.hdr.msg_type = CTL_MSG_PERS_ACTION;
8346 persis_io.pr.pr_info.action = CTL_PR_CLEAR;
8250 if ((isc_retval=ctl_ha_msg_send(CTL_HA_CHAN_CTL, &persis_io,
8251 sizeof(persis_io), 0)) > CTL_HA_STATUS_SUCCESS) {
8252 printf("CTL:Persis Out error returned from "
8253 "ctl_ha_msg_send %d\n", isc_retval);
8254 }
8347 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &persis_io,
8348 sizeof(persis_io.pr), M_WAITOK);
8349 break;
8350
8351 case SPRO_PREEMPT:
8352 case SPRO_PRE_ABO: {
8353 int nretval;
8354
8355 nretval = ctl_pro_preempt(softc, lun, res_key, sa_res_key, type,
8356 residx, ctsio, cdb, param);
8357 if (nretval != 0)
8358 return (CTL_RETVAL_COMPLETE);
8359 break;
8360 }
8361 default:
8362 panic("Invalid PR type %x", cdb->action);
8363 }
8364
8365done:
8366 free(ctsio->kern_data_ptr, M_CTL);
8367 ctl_set_success(ctsio);
8368 ctl_done((union ctl_io *)ctsio);
8369
8370 return (retval);
8371}
8372
8373/*
8374 * This routine is for handling a message from the other SC pertaining to
8375 * persistent reserve out. All the error checking will have been done
8376 * so only perorming the action need be done here to keep the two
8377 * in sync.
8378 */
8379static void
8380ctl_hndl_per_res_out_on_other_sc(union ctl_ha_msg *msg)
8381{
8382 struct ctl_lun *lun;
8383 struct ctl_softc *softc;
8384 int i;
8385 uint32_t targ_lun;
8386
8387 softc = control_softc;
8388
8389 targ_lun = msg->hdr.nexus.targ_mapped_lun;
8390 lun = softc->ctl_luns[targ_lun];
8391 mtx_lock(&lun->lun_lock);
8392 switch(msg->pr.pr_info.action) {
8393 case CTL_PR_REG_KEY:
8394 ctl_alloc_prkey(lun, msg->pr.pr_info.residx);
8395 if (ctl_get_prkey(lun, msg->pr.pr_info.residx) == 0)
8396 lun->pr_key_count++;
8397 ctl_set_prkey(lun, msg->pr.pr_info.residx,
8398 scsi_8btou64(msg->pr.pr_info.sa_res_key));
8399 lun->PRGeneration++;
8400 break;
8401
8402 case CTL_PR_UNREG_KEY:
8403 ctl_clr_prkey(lun, msg->pr.pr_info.residx);
8404 lun->pr_key_count--;
8405
8406 /* XXX Need to see if the reservation has been released */
8407 /* if so do we need to generate UA? */
8408 if (msg->pr.pr_info.residx == lun->pr_res_idx) {
8409 lun->flags &= ~CTL_LUN_PR_RESERVED;
8410 lun->pr_res_idx = CTL_PR_NO_RESERVATION;
8411
8412 if ((lun->res_type == SPR_TYPE_WR_EX_RO
8413 || lun->res_type == SPR_TYPE_EX_AC_RO)
8414 && lun->pr_key_count) {
8415 /*
8416 * If the reservation is a registrants
8417 * only type we need to generate a UA
8418 * for other registered inits. The
8419 * sense code should be RESERVATIONS
8420 * RELEASED
8421 */
8422
8329 for (i = 0; i < CTL_MAX_INITIATORS; i++) {
8330 if (ctl_get_prkey(lun, i +
8331 softc->persis_offset) == 0)
8423 for (i = softc->init_min; i < softc->init_max; i++) {
8424 if (ctl_get_prkey(lun, i) == 0)
8425 continue;
8426
8427 ctl_est_ua(lun, i, CTL_UA_RES_RELEASE);
8428 }
8429 }
8430 lun->res_type = 0;
8431 } else if (lun->pr_res_idx == CTL_PR_ALL_REGISTRANTS) {
8432 if (lun->pr_key_count==0) {
8433 lun->flags &= ~CTL_LUN_PR_RESERVED;
8434 lun->res_type = 0;
8435 lun->pr_res_idx = CTL_PR_NO_RESERVATION;
8436 }
8437 }
8438 lun->PRGeneration++;
8439 break;
8440
8441 case CTL_PR_RESERVE:
8442 lun->flags |= CTL_LUN_PR_RESERVED;
8443 lun->res_type = msg->pr.pr_info.res_type;
8444 lun->pr_res_idx = msg->pr.pr_info.residx;
8445
8446 break;
8447
8448 case CTL_PR_RELEASE:
8449 /*
8450 * if this isn't an exclusive access res generate UA for all
8451 * other registrants.
8452 */
8453 if (lun->res_type != SPR_TYPE_EX_AC
8454 && lun->res_type != SPR_TYPE_WR_EX) {
8362 for (i = 0; i < CTL_MAX_INITIATORS; i++)
8363 if (ctl_get_prkey(lun, i + softc->persis_offset) != 0)
8455 for (i = softc->init_min; i < softc->init_max; i++)
8456 if (ctl_get_prkey(lun, i) != 0)
8457 ctl_est_ua(lun, i, CTL_UA_RES_RELEASE);
8458 }
8459
8460 lun->flags &= ~CTL_LUN_PR_RESERVED;
8461 lun->pr_res_idx = CTL_PR_NO_RESERVATION;
8462 lun->res_type = 0;
8463 break;
8464
8465 case CTL_PR_PREEMPT:
8466 ctl_pro_preempt_other(lun, msg);
8467 break;
8468 case CTL_PR_CLEAR:
8469 lun->flags &= ~CTL_LUN_PR_RESERVED;
8470 lun->res_type = 0;
8471 lun->pr_key_count = 0;
8472 lun->pr_res_idx = CTL_PR_NO_RESERVATION;
8473
8381 for (i=0; i < 2*CTL_MAX_INITIATORS; i++) {
8474 for (i=0; i < CTL_MAX_INITIATORS; i++) {
8475 if (ctl_get_prkey(lun, i) == 0)
8476 continue;
8477 ctl_clr_prkey(lun, i);
8385 ctl_est_res_ua(lun, i, CTL_UA_REG_PREEMPT);
8478 ctl_est_ua(lun, i, CTL_UA_REG_PREEMPT);
8479 }
8480 lun->PRGeneration++;
8481 break;
8482 }
8483
8484 mtx_unlock(&lun->lun_lock);
8485}
8486
8487int
8488ctl_read_write(struct ctl_scsiio *ctsio)
8489{
8490 struct ctl_lun *lun;
8491 struct ctl_lba_len_flags *lbalen;
8492 uint64_t lba;
8493 uint32_t num_blocks;
8494 int flags, retval;
8495 int isread;
8496
8497 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
8498
8499 CTL_DEBUG_PRINT(("ctl_read_write: command: %#x\n", ctsio->cdb[0]));
8500
8501 flags = 0;
8502 retval = CTL_RETVAL_COMPLETE;
8503
8504 isread = ctsio->cdb[0] == READ_6 || ctsio->cdb[0] == READ_10
8505 || ctsio->cdb[0] == READ_12 || ctsio->cdb[0] == READ_16;
8506 switch (ctsio->cdb[0]) {
8507 case READ_6:
8508 case WRITE_6: {
8509 struct scsi_rw_6 *cdb;
8510
8511 cdb = (struct scsi_rw_6 *)ctsio->cdb;
8512
8513 lba = scsi_3btoul(cdb->addr);
8514 /* only 5 bits are valid in the most significant address byte */
8515 lba &= 0x1fffff;
8516 num_blocks = cdb->length;
8517 /*
8518 * This is correct according to SBC-2.
8519 */
8520 if (num_blocks == 0)
8521 num_blocks = 256;
8522 break;
8523 }
8524 case READ_10:
8525 case WRITE_10: {
8526 struct scsi_rw_10 *cdb;
8527
8528 cdb = (struct scsi_rw_10 *)ctsio->cdb;
8529 if (cdb->byte2 & SRW10_FUA)
8530 flags |= CTL_LLF_FUA;
8531 if (cdb->byte2 & SRW10_DPO)
8532 flags |= CTL_LLF_DPO;
8533 lba = scsi_4btoul(cdb->addr);
8534 num_blocks = scsi_2btoul(cdb->length);
8535 break;
8536 }
8537 case WRITE_VERIFY_10: {
8538 struct scsi_write_verify_10 *cdb;
8539
8540 cdb = (struct scsi_write_verify_10 *)ctsio->cdb;
8541 flags |= CTL_LLF_FUA;
8542 if (cdb->byte2 & SWV_DPO)
8543 flags |= CTL_LLF_DPO;
8544 lba = scsi_4btoul(cdb->addr);
8545 num_blocks = scsi_2btoul(cdb->length);
8546 break;
8547 }
8548 case READ_12:
8549 case WRITE_12: {
8550 struct scsi_rw_12 *cdb;
8551
8552 cdb = (struct scsi_rw_12 *)ctsio->cdb;
8553 if (cdb->byte2 & SRW12_FUA)
8554 flags |= CTL_LLF_FUA;
8555 if (cdb->byte2 & SRW12_DPO)
8556 flags |= CTL_LLF_DPO;
8557 lba = scsi_4btoul(cdb->addr);
8558 num_blocks = scsi_4btoul(cdb->length);
8559 break;
8560 }
8561 case WRITE_VERIFY_12: {
8562 struct scsi_write_verify_12 *cdb;
8563
8564 cdb = (struct scsi_write_verify_12 *)ctsio->cdb;
8565 flags |= CTL_LLF_FUA;
8566 if (cdb->byte2 & SWV_DPO)
8567 flags |= CTL_LLF_DPO;
8568 lba = scsi_4btoul(cdb->addr);
8569 num_blocks = scsi_4btoul(cdb->length);
8570 break;
8571 }
8572 case READ_16:
8573 case WRITE_16: {
8574 struct scsi_rw_16 *cdb;
8575
8576 cdb = (struct scsi_rw_16 *)ctsio->cdb;
8577 if (cdb->byte2 & SRW12_FUA)
8578 flags |= CTL_LLF_FUA;
8579 if (cdb->byte2 & SRW12_DPO)
8580 flags |= CTL_LLF_DPO;
8581 lba = scsi_8btou64(cdb->addr);
8582 num_blocks = scsi_4btoul(cdb->length);
8583 break;
8584 }
8585 case WRITE_ATOMIC_16: {
8586 struct scsi_rw_16 *cdb;
8587
8588 if (lun->be_lun->atomicblock == 0) {
8589 ctl_set_invalid_opcode(ctsio);
8590 ctl_done((union ctl_io *)ctsio);
8591 return (CTL_RETVAL_COMPLETE);
8592 }
8593
8594 cdb = (struct scsi_rw_16 *)ctsio->cdb;
8595 if (cdb->byte2 & SRW12_FUA)
8596 flags |= CTL_LLF_FUA;
8597 if (cdb->byte2 & SRW12_DPO)
8598 flags |= CTL_LLF_DPO;
8599 lba = scsi_8btou64(cdb->addr);
8600 num_blocks = scsi_4btoul(cdb->length);
8601 if (num_blocks > lun->be_lun->atomicblock) {
8602 ctl_set_invalid_field(ctsio, /*sks_valid*/ 1,
8603 /*command*/ 1, /*field*/ 12, /*bit_valid*/ 0,
8604 /*bit*/ 0);
8605 ctl_done((union ctl_io *)ctsio);
8606 return (CTL_RETVAL_COMPLETE);
8607 }
8608 break;
8609 }
8610 case WRITE_VERIFY_16: {
8611 struct scsi_write_verify_16 *cdb;
8612
8613 cdb = (struct scsi_write_verify_16 *)ctsio->cdb;
8614 flags |= CTL_LLF_FUA;
8615 if (cdb->byte2 & SWV_DPO)
8616 flags |= CTL_LLF_DPO;
8617 lba = scsi_8btou64(cdb->addr);
8618 num_blocks = scsi_4btoul(cdb->length);
8619 break;
8620 }
8621 default:
8622 /*
8623 * We got a command we don't support. This shouldn't
8624 * happen, commands should be filtered out above us.
8625 */
8626 ctl_set_invalid_opcode(ctsio);
8627 ctl_done((union ctl_io *)ctsio);
8628
8629 return (CTL_RETVAL_COMPLETE);
8630 break; /* NOTREACHED */
8631 }
8632
8633 /*
8634 * The first check is to make sure we're in bounds, the second
8635 * check is to catch wrap-around problems. If the lba + num blocks
8636 * is less than the lba, then we've wrapped around and the block
8637 * range is invalid anyway.
8638 */
8639 if (((lba + num_blocks) > (lun->be_lun->maxlba + 1))
8640 || ((lba + num_blocks) < lba)) {
8641 ctl_set_lba_out_of_range(ctsio);
8642 ctl_done((union ctl_io *)ctsio);
8643 return (CTL_RETVAL_COMPLETE);
8644 }
8645
8646 /*
8647 * According to SBC-3, a transfer length of 0 is not an error.
8648 * Note that this cannot happen with WRITE(6) or READ(6), since 0
8649 * translates to 256 blocks for those commands.
8650 */
8651 if (num_blocks == 0) {
8652 ctl_set_success(ctsio);
8653 ctl_done((union ctl_io *)ctsio);
8654 return (CTL_RETVAL_COMPLETE);
8655 }
8656
8657 /* Set FUA and/or DPO if caches are disabled. */
8658 if (isread) {
8659 if ((lun->mode_pages.caching_page[CTL_PAGE_CURRENT].flags1 &
8660 SCP_RCD) != 0)
8661 flags |= CTL_LLF_FUA | CTL_LLF_DPO;
8662 } else {
8663 if ((lun->mode_pages.caching_page[CTL_PAGE_CURRENT].flags1 &
8664 SCP_WCE) == 0)
8665 flags |= CTL_LLF_FUA;
8666 }
8667
8668 lbalen = (struct ctl_lba_len_flags *)
8669 &ctsio->io_hdr.ctl_private[CTL_PRIV_LBA_LEN];
8670 lbalen->lba = lba;
8671 lbalen->len = num_blocks;
8672 lbalen->flags = (isread ? CTL_LLF_READ : CTL_LLF_WRITE) | flags;
8673
8674 ctsio->kern_total_len = num_blocks * lun->be_lun->blocksize;
8675 ctsio->kern_rel_offset = 0;
8676
8677 CTL_DEBUG_PRINT(("ctl_read_write: calling data_submit()\n"));
8678
8679 retval = lun->backend->data_submit((union ctl_io *)ctsio);
8680
8681 return (retval);
8682}
8683
8684static int
8685ctl_cnw_cont(union ctl_io *io)
8686{
8687 struct ctl_scsiio *ctsio;
8688 struct ctl_lun *lun;
8689 struct ctl_lba_len_flags *lbalen;
8690 int retval;
8691
8692 ctsio = &io->scsiio;
8693 ctsio->io_hdr.status = CTL_STATUS_NONE;
8694 ctsio->io_hdr.flags &= ~CTL_FLAG_IO_CONT;
8695 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
8696 lbalen = (struct ctl_lba_len_flags *)
8697 &ctsio->io_hdr.ctl_private[CTL_PRIV_LBA_LEN];
8698 lbalen->flags &= ~CTL_LLF_COMPARE;
8699 lbalen->flags |= CTL_LLF_WRITE;
8700
8701 CTL_DEBUG_PRINT(("ctl_cnw_cont: calling data_submit()\n"));
8702 retval = lun->backend->data_submit((union ctl_io *)ctsio);
8703 return (retval);
8704}
8705
8706int
8707ctl_cnw(struct ctl_scsiio *ctsio)
8708{
8709 struct ctl_lun *lun;
8710 struct ctl_lba_len_flags *lbalen;
8711 uint64_t lba;
8712 uint32_t num_blocks;
8713 int flags, retval;
8714
8715 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
8716
8717 CTL_DEBUG_PRINT(("ctl_cnw: command: %#x\n", ctsio->cdb[0]));
8718
8719 flags = 0;
8720 retval = CTL_RETVAL_COMPLETE;
8721
8722 switch (ctsio->cdb[0]) {
8723 case COMPARE_AND_WRITE: {
8724 struct scsi_compare_and_write *cdb;
8725
8726 cdb = (struct scsi_compare_and_write *)ctsio->cdb;
8727 if (cdb->byte2 & SRW10_FUA)
8728 flags |= CTL_LLF_FUA;
8729 if (cdb->byte2 & SRW10_DPO)
8730 flags |= CTL_LLF_DPO;
8731 lba = scsi_8btou64(cdb->addr);
8732 num_blocks = cdb->length;
8733 break;
8734 }
8735 default:
8736 /*
8737 * We got a command we don't support. This shouldn't
8738 * happen, commands should be filtered out above us.
8739 */
8740 ctl_set_invalid_opcode(ctsio);
8741 ctl_done((union ctl_io *)ctsio);
8742
8743 return (CTL_RETVAL_COMPLETE);
8744 break; /* NOTREACHED */
8745 }
8746
8747 /*
8748 * The first check is to make sure we're in bounds, the second
8749 * check is to catch wrap-around problems. If the lba + num blocks
8750 * is less than the lba, then we've wrapped around and the block
8751 * range is invalid anyway.
8752 */
8753 if (((lba + num_blocks) > (lun->be_lun->maxlba + 1))
8754 || ((lba + num_blocks) < lba)) {
8755 ctl_set_lba_out_of_range(ctsio);
8756 ctl_done((union ctl_io *)ctsio);
8757 return (CTL_RETVAL_COMPLETE);
8758 }
8759
8760 /*
8761 * According to SBC-3, a transfer length of 0 is not an error.
8762 */
8763 if (num_blocks == 0) {
8764 ctl_set_success(ctsio);
8765 ctl_done((union ctl_io *)ctsio);
8766 return (CTL_RETVAL_COMPLETE);
8767 }
8768
8769 /* Set FUA if write cache is disabled. */
8770 if ((lun->mode_pages.caching_page[CTL_PAGE_CURRENT].flags1 &
8771 SCP_WCE) == 0)
8772 flags |= CTL_LLF_FUA;
8773
8774 ctsio->kern_total_len = 2 * num_blocks * lun->be_lun->blocksize;
8775 ctsio->kern_rel_offset = 0;
8776
8777 /*
8778 * Set the IO_CONT flag, so that if this I/O gets passed to
8779 * ctl_data_submit_done(), it'll get passed back to
8780 * ctl_ctl_cnw_cont() for further processing.
8781 */
8782 ctsio->io_hdr.flags |= CTL_FLAG_IO_CONT;
8783 ctsio->io_cont = ctl_cnw_cont;
8784
8785 lbalen = (struct ctl_lba_len_flags *)
8786 &ctsio->io_hdr.ctl_private[CTL_PRIV_LBA_LEN];
8787 lbalen->lba = lba;
8788 lbalen->len = num_blocks;
8789 lbalen->flags = CTL_LLF_COMPARE | flags;
8790
8791 CTL_DEBUG_PRINT(("ctl_cnw: calling data_submit()\n"));
8792 retval = lun->backend->data_submit((union ctl_io *)ctsio);
8793 return (retval);
8794}
8795
8796int
8797ctl_verify(struct ctl_scsiio *ctsio)
8798{
8799 struct ctl_lun *lun;
8800 struct ctl_lba_len_flags *lbalen;
8801 uint64_t lba;
8802 uint32_t num_blocks;
8803 int bytchk, flags;
8804 int retval;
8805
8806 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
8807
8808 CTL_DEBUG_PRINT(("ctl_verify: command: %#x\n", ctsio->cdb[0]));
8809
8810 bytchk = 0;
8811 flags = CTL_LLF_FUA;
8812 retval = CTL_RETVAL_COMPLETE;
8813
8814 switch (ctsio->cdb[0]) {
8815 case VERIFY_10: {
8816 struct scsi_verify_10 *cdb;
8817
8818 cdb = (struct scsi_verify_10 *)ctsio->cdb;
8819 if (cdb->byte2 & SVFY_BYTCHK)
8820 bytchk = 1;
8821 if (cdb->byte2 & SVFY_DPO)
8822 flags |= CTL_LLF_DPO;
8823 lba = scsi_4btoul(cdb->addr);
8824 num_blocks = scsi_2btoul(cdb->length);
8825 break;
8826 }
8827 case VERIFY_12: {
8828 struct scsi_verify_12 *cdb;
8829
8830 cdb = (struct scsi_verify_12 *)ctsio->cdb;
8831 if (cdb->byte2 & SVFY_BYTCHK)
8832 bytchk = 1;
8833 if (cdb->byte2 & SVFY_DPO)
8834 flags |= CTL_LLF_DPO;
8835 lba = scsi_4btoul(cdb->addr);
8836 num_blocks = scsi_4btoul(cdb->length);
8837 break;
8838 }
8839 case VERIFY_16: {
8840 struct scsi_rw_16 *cdb;
8841
8842 cdb = (struct scsi_rw_16 *)ctsio->cdb;
8843 if (cdb->byte2 & SVFY_BYTCHK)
8844 bytchk = 1;
8845 if (cdb->byte2 & SVFY_DPO)
8846 flags |= CTL_LLF_DPO;
8847 lba = scsi_8btou64(cdb->addr);
8848 num_blocks = scsi_4btoul(cdb->length);
8849 break;
8850 }
8851 default:
8852 /*
8853 * We got a command we don't support. This shouldn't
8854 * happen, commands should be filtered out above us.
8855 */
8856 ctl_set_invalid_opcode(ctsio);
8857 ctl_done((union ctl_io *)ctsio);
8858 return (CTL_RETVAL_COMPLETE);
8859 }
8860
8861 /*
8862 * The first check is to make sure we're in bounds, the second
8863 * check is to catch wrap-around problems. If the lba + num blocks
8864 * is less than the lba, then we've wrapped around and the block
8865 * range is invalid anyway.
8866 */
8867 if (((lba + num_blocks) > (lun->be_lun->maxlba + 1))
8868 || ((lba + num_blocks) < lba)) {
8869 ctl_set_lba_out_of_range(ctsio);
8870 ctl_done((union ctl_io *)ctsio);
8871 return (CTL_RETVAL_COMPLETE);
8872 }
8873
8874 /*
8875 * According to SBC-3, a transfer length of 0 is not an error.
8876 */
8877 if (num_blocks == 0) {
8878 ctl_set_success(ctsio);
8879 ctl_done((union ctl_io *)ctsio);
8880 return (CTL_RETVAL_COMPLETE);
8881 }
8882
8883 lbalen = (struct ctl_lba_len_flags *)
8884 &ctsio->io_hdr.ctl_private[CTL_PRIV_LBA_LEN];
8885 lbalen->lba = lba;
8886 lbalen->len = num_blocks;
8887 if (bytchk) {
8888 lbalen->flags = CTL_LLF_COMPARE | flags;
8889 ctsio->kern_total_len = num_blocks * lun->be_lun->blocksize;
8890 } else {
8891 lbalen->flags = CTL_LLF_VERIFY | flags;
8892 ctsio->kern_total_len = 0;
8893 }
8894 ctsio->kern_rel_offset = 0;
8895
8896 CTL_DEBUG_PRINT(("ctl_verify: calling data_submit()\n"));
8897 retval = lun->backend->data_submit((union ctl_io *)ctsio);
8898 return (retval);
8899}
8900
8901int
8902ctl_report_luns(struct ctl_scsiio *ctsio)
8903{
8904 struct ctl_softc *softc = control_softc;
8905 struct scsi_report_luns *cdb;
8906 struct scsi_report_luns_data *lun_data;
8907 struct ctl_lun *lun, *request_lun;
8908 struct ctl_port *port;
8909 int num_luns, retval;
8910 uint32_t alloc_len, lun_datalen;
8911 int num_filled, well_known;
8912 uint32_t initidx, targ_lun_id, lun_id;
8913
8914 retval = CTL_RETVAL_COMPLETE;
8915 well_known = 0;
8916
8917 cdb = (struct scsi_report_luns *)ctsio->cdb;
8918 port = ctl_io_port(&ctsio->io_hdr);
8919
8920 CTL_DEBUG_PRINT(("ctl_report_luns\n"));
8921
8922 mtx_lock(&softc->ctl_lock);
8923 num_luns = 0;
8924 for (targ_lun_id = 0; targ_lun_id < CTL_MAX_LUNS; targ_lun_id++) {
8925 if (ctl_lun_map_from_port(port, targ_lun_id) < CTL_MAX_LUNS)
8926 num_luns++;
8927 }
8928 mtx_unlock(&softc->ctl_lock);
8929
8930 switch (cdb->select_report) {
8931 case RPL_REPORT_DEFAULT:
8932 case RPL_REPORT_ALL:
8933 break;
8934 case RPL_REPORT_WELLKNOWN:
8935 well_known = 1;
8936 num_luns = 0;
8937 break;
8938 default:
8939 ctl_set_invalid_field(ctsio,
8940 /*sks_valid*/ 1,
8941 /*command*/ 1,
8942 /*field*/ 2,
8943 /*bit_valid*/ 0,
8944 /*bit*/ 0);
8945 ctl_done((union ctl_io *)ctsio);
8946 return (retval);
8947 break; /* NOTREACHED */
8948 }
8949
8950 alloc_len = scsi_4btoul(cdb->length);
8951 /*
8952 * The initiator has to allocate at least 16 bytes for this request,
8953 * so he can at least get the header and the first LUN. Otherwise
8954 * we reject the request (per SPC-3 rev 14, section 6.21).
8955 */
8956 if (alloc_len < (sizeof(struct scsi_report_luns_data) +
8957 sizeof(struct scsi_report_luns_lundata))) {
8958 ctl_set_invalid_field(ctsio,
8959 /*sks_valid*/ 1,
8960 /*command*/ 1,
8961 /*field*/ 6,
8962 /*bit_valid*/ 0,
8963 /*bit*/ 0);
8964 ctl_done((union ctl_io *)ctsio);
8965 return (retval);
8966 }
8967
8968 request_lun = (struct ctl_lun *)
8969 ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
8970
8971 lun_datalen = sizeof(*lun_data) +
8972 (num_luns * sizeof(struct scsi_report_luns_lundata));
8973
8974 ctsio->kern_data_ptr = malloc(lun_datalen, M_CTL, M_WAITOK | M_ZERO);
8975 lun_data = (struct scsi_report_luns_data *)ctsio->kern_data_ptr;
8976 ctsio->kern_sg_entries = 0;
8977
8978 initidx = ctl_get_initindex(&ctsio->io_hdr.nexus);
8979
8980 mtx_lock(&softc->ctl_lock);
8981 for (targ_lun_id = 0, num_filled = 0; targ_lun_id < CTL_MAX_LUNS && num_filled < num_luns; targ_lun_id++) {
8982 lun_id = ctl_lun_map_from_port(port, targ_lun_id);
8983 if (lun_id >= CTL_MAX_LUNS)
8984 continue;
8985 lun = softc->ctl_luns[lun_id];
8986 if (lun == NULL)
8987 continue;
8988
8989 if (targ_lun_id <= 0xff) {
8990 /*
8991 * Peripheral addressing method, bus number 0.
8992 */
8993 lun_data->luns[num_filled].lundata[0] =
8994 RPL_LUNDATA_ATYP_PERIPH;
8995 lun_data->luns[num_filled].lundata[1] = targ_lun_id;
8996 num_filled++;
8997 } else if (targ_lun_id <= 0x3fff) {
8998 /*
8999 * Flat addressing method.
9000 */
9001 lun_data->luns[num_filled].lundata[0] =
9002 RPL_LUNDATA_ATYP_FLAT | (targ_lun_id >> 8);
9003 lun_data->luns[num_filled].lundata[1] =
9004 (targ_lun_id & 0xff);
9005 num_filled++;
9006 } else if (targ_lun_id <= 0xffffff) {
9007 /*
9008 * Extended flat addressing method.
9009 */
9010 lun_data->luns[num_filled].lundata[0] =
9011 RPL_LUNDATA_ATYP_EXTLUN | 0x12;
9012 scsi_ulto3b(targ_lun_id,
9013 &lun_data->luns[num_filled].lundata[1]);
9014 num_filled++;
9015 } else {
9016 printf("ctl_report_luns: bogus LUN number %jd, "
9017 "skipping\n", (intmax_t)targ_lun_id);
9018 }
9019 /*
9020 * According to SPC-3, rev 14 section 6.21:
9021 *
9022 * "The execution of a REPORT LUNS command to any valid and
9023 * installed logical unit shall clear the REPORTED LUNS DATA
9024 * HAS CHANGED unit attention condition for all logical
9025 * units of that target with respect to the requesting
9026 * initiator. A valid and installed logical unit is one
9027 * having a PERIPHERAL QUALIFIER of 000b in the standard
9028 * INQUIRY data (see 6.4.2)."
9029 *
9030 * If request_lun is NULL, the LUN this report luns command
9031 * was issued to is either disabled or doesn't exist. In that
9032 * case, we shouldn't clear any pending lun change unit
9033 * attention.
9034 */
9035 if (request_lun != NULL) {
9036 mtx_lock(&lun->lun_lock);
9037 ctl_clr_ua(lun, initidx, CTL_UA_LUN_CHANGE);
9038 mtx_unlock(&lun->lun_lock);
9039 }
9040 }
9041 mtx_unlock(&softc->ctl_lock);
9042
9043 /*
9044 * It's quite possible that we've returned fewer LUNs than we allocated
9045 * space for. Trim it.
9046 */
9047 lun_datalen = sizeof(*lun_data) +
9048 (num_filled * sizeof(struct scsi_report_luns_lundata));
9049
9050 if (lun_datalen < alloc_len) {
9051 ctsio->residual = alloc_len - lun_datalen;
9052 ctsio->kern_data_len = lun_datalen;
9053 ctsio->kern_total_len = lun_datalen;
9054 } else {
9055 ctsio->residual = 0;
9056 ctsio->kern_data_len = alloc_len;
9057 ctsio->kern_total_len = alloc_len;
9058 }
9059 ctsio->kern_data_resid = 0;
9060 ctsio->kern_rel_offset = 0;
9061 ctsio->kern_sg_entries = 0;
9062
9063 /*
9064 * We set this to the actual data length, regardless of how much
9065 * space we actually have to return results. If the user looks at
9066 * this value, he'll know whether or not he allocated enough space
9067 * and reissue the command if necessary. We don't support well
9068 * known logical units, so if the user asks for that, return none.
9069 */
9070 scsi_ulto4b(lun_datalen - 8, lun_data->length);
9071
9072 /*
9073 * We can only return SCSI_STATUS_CHECK_COND when we can't satisfy
9074 * this request.
9075 */
9076 ctl_set_success(ctsio);
9077 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
9078 ctsio->be_move_done = ctl_config_move_done;
9079 ctl_datamove((union ctl_io *)ctsio);
9080 return (retval);
9081}
9082
9083int
9084ctl_request_sense(struct ctl_scsiio *ctsio)
9085{
9086 struct scsi_request_sense *cdb;
9087 struct scsi_sense_data *sense_ptr;
9088 struct ctl_softc *ctl_softc;
9089 struct ctl_lun *lun;
9090 uint32_t initidx;
9091 int have_error;
9092 scsi_sense_data_type sense_format;
9093 ctl_ua_type ua_type;
9094
9095 cdb = (struct scsi_request_sense *)ctsio->cdb;
9096
9097 ctl_softc = control_softc;
9098 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
9099
9100 CTL_DEBUG_PRINT(("ctl_request_sense\n"));
9101
9102 /*
9103 * Determine which sense format the user wants.
9104 */
9105 if (cdb->byte2 & SRS_DESC)
9106 sense_format = SSD_TYPE_DESC;
9107 else
9108 sense_format = SSD_TYPE_FIXED;
9109
9110 ctsio->kern_data_ptr = malloc(sizeof(*sense_ptr), M_CTL, M_WAITOK);
9111 sense_ptr = (struct scsi_sense_data *)ctsio->kern_data_ptr;
9112 ctsio->kern_sg_entries = 0;
9113
9114 /*
9115 * struct scsi_sense_data, which is currently set to 256 bytes, is
9116 * larger than the largest allowed value for the length field in the
9117 * REQUEST SENSE CDB, which is 252 bytes as of SPC-4.
9118 */
9119 ctsio->residual = 0;
9120 ctsio->kern_data_len = cdb->length;
9121 ctsio->kern_total_len = cdb->length;
9122
9123 ctsio->kern_data_resid = 0;
9124 ctsio->kern_rel_offset = 0;
9125 ctsio->kern_sg_entries = 0;
9126
9127 /*
9128 * If we don't have a LUN, we don't have any pending sense.
9129 */
9130 if (lun == NULL)
9131 goto no_sense;
9132
9133 have_error = 0;
9134 initidx = ctl_get_initindex(&ctsio->io_hdr.nexus);
9135 /*
9136 * Check for pending sense, and then for pending unit attentions.
9137 * Pending sense gets returned first, then pending unit attentions.
9138 */
9139 mtx_lock(&lun->lun_lock);
9140#ifdef CTL_WITH_CA
9141 if (ctl_is_set(lun->have_ca, initidx)) {
9142 scsi_sense_data_type stored_format;
9143
9144 /*
9145 * Check to see which sense format was used for the stored
9146 * sense data.
9147 */
9148 stored_format = scsi_sense_type(&lun->pending_sense[initidx]);
9149
9150 /*
9151 * If the user requested a different sense format than the
9152 * one we stored, then we need to convert it to the other
9153 * format. If we're going from descriptor to fixed format
9154 * sense data, we may lose things in translation, depending
9155 * on what options were used.
9156 *
9157 * If the stored format is SSD_TYPE_NONE (i.e. invalid),
9158 * for some reason we'll just copy it out as-is.
9159 */
9160 if ((stored_format == SSD_TYPE_FIXED)
9161 && (sense_format == SSD_TYPE_DESC))
9162 ctl_sense_to_desc((struct scsi_sense_data_fixed *)
9163 &lun->pending_sense[initidx],
9164 (struct scsi_sense_data_desc *)sense_ptr);
9165 else if ((stored_format == SSD_TYPE_DESC)
9166 && (sense_format == SSD_TYPE_FIXED))
9167 ctl_sense_to_fixed((struct scsi_sense_data_desc *)
9168 &lun->pending_sense[initidx],
9169 (struct scsi_sense_data_fixed *)sense_ptr);
9170 else
9171 memcpy(sense_ptr, &lun->pending_sense[initidx],
9172 MIN(sizeof(*sense_ptr),
9173 sizeof(lun->pending_sense[initidx])));
9174
9175 ctl_clear_mask(lun->have_ca, initidx);
9176 have_error = 1;
9177 } else
9178#endif
9179 {
9180 ua_type = ctl_build_ua(lun, initidx, sense_ptr, sense_format);
9181 if (ua_type != CTL_UA_NONE)
9182 have_error = 1;
9183 if (ua_type == CTL_UA_LUN_CHANGE) {
9184 mtx_unlock(&lun->lun_lock);
9185 mtx_lock(&ctl_softc->ctl_lock);
9186 ctl_clr_ua_allluns(ctl_softc, initidx, ua_type);
9187 mtx_unlock(&ctl_softc->ctl_lock);
9188 mtx_lock(&lun->lun_lock);
9189 }
9190
9191 }
9192 mtx_unlock(&lun->lun_lock);
9193
9194 /*
9195 * We already have a pending error, return it.
9196 */
9197 if (have_error != 0) {
9198 /*
9199 * We report the SCSI status as OK, since the status of the
9200 * request sense command itself is OK.
9201 * We report 0 for the sense length, because we aren't doing
9202 * autosense in this case. We're reporting sense as
9203 * parameter data.
9204 */
9205 ctl_set_success(ctsio);
9206 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
9207 ctsio->be_move_done = ctl_config_move_done;
9208 ctl_datamove((union ctl_io *)ctsio);
9209 return (CTL_RETVAL_COMPLETE);
9210 }
9211
9212no_sense:
9213
9214 /*
9215 * No sense information to report, so we report that everything is
9216 * okay.
9217 */
9218 ctl_set_sense_data(sense_ptr,
9219 lun,
9220 sense_format,
9221 /*current_error*/ 1,
9222 /*sense_key*/ SSD_KEY_NO_SENSE,
9223 /*asc*/ 0x00,
9224 /*ascq*/ 0x00,
9225 SSD_ELEM_NONE);
9226
9227 /*
9228 * We report 0 for the sense length, because we aren't doing
9229 * autosense in this case. We're reporting sense as parameter data.
9230 */
9231 ctl_set_success(ctsio);
9232 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
9233 ctsio->be_move_done = ctl_config_move_done;
9234 ctl_datamove((union ctl_io *)ctsio);
9235 return (CTL_RETVAL_COMPLETE);
9236}
9237
9238int
9239ctl_tur(struct ctl_scsiio *ctsio)
9240{
9241
9242 CTL_DEBUG_PRINT(("ctl_tur\n"));
9243
9244 ctl_set_success(ctsio);
9245 ctl_done((union ctl_io *)ctsio);
9246
9247 return (CTL_RETVAL_COMPLETE);
9248}
9249
9157#ifdef notyet
9158static int
9159ctl_cmddt_inquiry(struct ctl_scsiio *ctsio)
9160{
9161
9162}
9163#endif
9164
9250/*
9251 * SCSI VPD page 0x00, the Supported VPD Pages page.
9252 */
9253static int
9254ctl_inquiry_evpd_supported(struct ctl_scsiio *ctsio, int alloc_len)
9255{
9256 struct scsi_vpd_supported_pages *pages;
9257 int sup_page_size;
9258 struct ctl_lun *lun;
9259 int p;
9260
9261 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
9262
9263 sup_page_size = sizeof(struct scsi_vpd_supported_pages) *
9264 SCSI_EVPD_NUM_SUPPORTED_PAGES;
9265 ctsio->kern_data_ptr = malloc(sup_page_size, M_CTL, M_WAITOK | M_ZERO);
9266 pages = (struct scsi_vpd_supported_pages *)ctsio->kern_data_ptr;
9267 ctsio->kern_sg_entries = 0;
9268
9269 if (sup_page_size < alloc_len) {
9270 ctsio->residual = alloc_len - sup_page_size;
9271 ctsio->kern_data_len = sup_page_size;
9272 ctsio->kern_total_len = sup_page_size;
9273 } else {
9274 ctsio->residual = 0;
9275 ctsio->kern_data_len = alloc_len;
9276 ctsio->kern_total_len = alloc_len;
9277 }
9278 ctsio->kern_data_resid = 0;
9279 ctsio->kern_rel_offset = 0;
9280 ctsio->kern_sg_entries = 0;
9281
9282 /*
9283 * The control device is always connected. The disk device, on the
9284 * other hand, may not be online all the time. Need to change this
9285 * to figure out whether the disk device is actually online or not.
9286 */
9287 if (lun != NULL)
9288 pages->device = (SID_QUAL_LU_CONNECTED << 5) |
9289 lun->be_lun->lun_type;
9290 else
9291 pages->device = (SID_QUAL_LU_OFFLINE << 5) | T_DIRECT;
9292
9293 p = 0;
9294 /* Supported VPD pages */
9295 pages->page_list[p++] = SVPD_SUPPORTED_PAGES;
9296 /* Serial Number */
9297 pages->page_list[p++] = SVPD_UNIT_SERIAL_NUMBER;
9298 /* Device Identification */
9299 pages->page_list[p++] = SVPD_DEVICE_ID;
9300 /* Extended INQUIRY Data */
9301 pages->page_list[p++] = SVPD_EXTENDED_INQUIRY_DATA;
9302 /* Mode Page Policy */
9303 pages->page_list[p++] = SVPD_MODE_PAGE_POLICY;
9304 /* SCSI Ports */
9305 pages->page_list[p++] = SVPD_SCSI_PORTS;
9306 /* Third-party Copy */
9307 pages->page_list[p++] = SVPD_SCSI_TPC;
9308 if (lun != NULL && lun->be_lun->lun_type == T_DIRECT) {
9309 /* Block limits */
9310 pages->page_list[p++] = SVPD_BLOCK_LIMITS;
9311 /* Block Device Characteristics */
9312 pages->page_list[p++] = SVPD_BDC;
9313 /* Logical Block Provisioning */
9314 pages->page_list[p++] = SVPD_LBP;
9315 }
9316 pages->length = p;
9317
9318 ctl_set_success(ctsio);
9319 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
9320 ctsio->be_move_done = ctl_config_move_done;
9321 ctl_datamove((union ctl_io *)ctsio);
9322 return (CTL_RETVAL_COMPLETE);
9323}
9324
9325/*
9326 * SCSI VPD page 0x80, the Unit Serial Number page.
9327 */
9328static int
9329ctl_inquiry_evpd_serial(struct ctl_scsiio *ctsio, int alloc_len)
9330{
9331 struct scsi_vpd_unit_serial_number *sn_ptr;
9332 struct ctl_lun *lun;
9333 int data_len;
9334
9335 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
9336
9337 data_len = 4 + CTL_SN_LEN;
9338 ctsio->kern_data_ptr = malloc(data_len, M_CTL, M_WAITOK | M_ZERO);
9339 sn_ptr = (struct scsi_vpd_unit_serial_number *)ctsio->kern_data_ptr;
9340 if (data_len < alloc_len) {
9341 ctsio->residual = alloc_len - data_len;
9342 ctsio->kern_data_len = data_len;
9343 ctsio->kern_total_len = data_len;
9344 } else {
9345 ctsio->residual = 0;
9346 ctsio->kern_data_len = alloc_len;
9347 ctsio->kern_total_len = alloc_len;
9348 }
9349 ctsio->kern_data_resid = 0;
9350 ctsio->kern_rel_offset = 0;
9351 ctsio->kern_sg_entries = 0;
9352
9353 /*
9354 * The control device is always connected. The disk device, on the
9355 * other hand, may not be online all the time. Need to change this
9356 * to figure out whether the disk device is actually online or not.
9357 */
9358 if (lun != NULL)
9359 sn_ptr->device = (SID_QUAL_LU_CONNECTED << 5) |
9360 lun->be_lun->lun_type;
9361 else
9362 sn_ptr->device = (SID_QUAL_LU_OFFLINE << 5) | T_DIRECT;
9363
9364 sn_ptr->page_code = SVPD_UNIT_SERIAL_NUMBER;
9365 sn_ptr->length = CTL_SN_LEN;
9366 /*
9367 * If we don't have a LUN, we just leave the serial number as
9368 * all spaces.
9369 */
9370 if (lun != NULL) {
9371 strncpy((char *)sn_ptr->serial_num,
9372 (char *)lun->be_lun->serial_num, CTL_SN_LEN);
9373 } else
9374 memset(sn_ptr->serial_num, 0x20, CTL_SN_LEN);
9375
9376 ctl_set_success(ctsio);
9377 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
9378 ctsio->be_move_done = ctl_config_move_done;
9379 ctl_datamove((union ctl_io *)ctsio);
9380 return (CTL_RETVAL_COMPLETE);
9381}
9382
9383
9384/*
9385 * SCSI VPD page 0x86, the Extended INQUIRY Data page.
9386 */
9387static int
9388ctl_inquiry_evpd_eid(struct ctl_scsiio *ctsio, int alloc_len)
9389{
9390 struct scsi_vpd_extended_inquiry_data *eid_ptr;
9391 struct ctl_lun *lun;
9392 int data_len;
9393
9394 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
9395
9396 data_len = sizeof(struct scsi_vpd_extended_inquiry_data);
9397 ctsio->kern_data_ptr = malloc(data_len, M_CTL, M_WAITOK | M_ZERO);
9398 eid_ptr = (struct scsi_vpd_extended_inquiry_data *)ctsio->kern_data_ptr;
9399 ctsio->kern_sg_entries = 0;
9400
9401 if (data_len < alloc_len) {
9402 ctsio->residual = alloc_len - data_len;
9403 ctsio->kern_data_len = data_len;
9404 ctsio->kern_total_len = data_len;
9405 } else {
9406 ctsio->residual = 0;
9407 ctsio->kern_data_len = alloc_len;
9408 ctsio->kern_total_len = alloc_len;
9409 }
9410 ctsio->kern_data_resid = 0;
9411 ctsio->kern_rel_offset = 0;
9412 ctsio->kern_sg_entries = 0;
9413
9414 /*
9415 * The control device is always connected. The disk device, on the
9416 * other hand, may not be online all the time.
9417 */
9418 if (lun != NULL)
9419 eid_ptr->device = (SID_QUAL_LU_CONNECTED << 5) |
9420 lun->be_lun->lun_type;
9421 else
9422 eid_ptr->device = (SID_QUAL_LU_OFFLINE << 5) | T_DIRECT;
9423 eid_ptr->page_code = SVPD_EXTENDED_INQUIRY_DATA;
9424 scsi_ulto2b(data_len - 4, eid_ptr->page_length);
9425 /*
9426 * We support head of queue, ordered and simple tags.
9427 */
9428 eid_ptr->flags2 = SVPD_EID_HEADSUP | SVPD_EID_ORDSUP | SVPD_EID_SIMPSUP;
9429 /*
9430 * Volatile cache supported.
9431 */
9432 eid_ptr->flags3 = SVPD_EID_V_SUP;
9433
9434 /*
9435 * This means that we clear the REPORTED LUNS DATA HAS CHANGED unit
9436 * attention for a particular IT nexus on all LUNs once we report
9437 * it to that nexus once. This bit is required as of SPC-4.
9438 */
9439 eid_ptr->flags4 = SVPD_EID_LUICLT;
9440
9441 /*
9442 * XXX KDM in order to correctly answer this, we would need
9443 * information from the SIM to determine how much sense data it
9444 * can send. So this would really be a path inquiry field, most
9445 * likely. This can be set to a maximum of 252 according to SPC-4,
9446 * but the hardware may or may not be able to support that much.
9447 * 0 just means that the maximum sense data length is not reported.
9448 */
9449 eid_ptr->max_sense_length = 0;
9450
9451 ctl_set_success(ctsio);
9452 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
9453 ctsio->be_move_done = ctl_config_move_done;
9454 ctl_datamove((union ctl_io *)ctsio);
9455 return (CTL_RETVAL_COMPLETE);
9456}
9457
9458static int
9459ctl_inquiry_evpd_mpp(struct ctl_scsiio *ctsio, int alloc_len)
9460{
9461 struct scsi_vpd_mode_page_policy *mpp_ptr;
9462 struct ctl_lun *lun;
9463 int data_len;
9464
9465 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
9466
9467 data_len = sizeof(struct scsi_vpd_mode_page_policy) +
9468 sizeof(struct scsi_vpd_mode_page_policy_descr);
9469
9470 ctsio->kern_data_ptr = malloc(data_len, M_CTL, M_WAITOK | M_ZERO);
9471 mpp_ptr = (struct scsi_vpd_mode_page_policy *)ctsio->kern_data_ptr;
9472 ctsio->kern_sg_entries = 0;
9473
9474 if (data_len < alloc_len) {
9475 ctsio->residual = alloc_len - data_len;
9476 ctsio->kern_data_len = data_len;
9477 ctsio->kern_total_len = data_len;
9478 } else {
9479 ctsio->residual = 0;
9480 ctsio->kern_data_len = alloc_len;
9481 ctsio->kern_total_len = alloc_len;
9482 }
9483 ctsio->kern_data_resid = 0;
9484 ctsio->kern_rel_offset = 0;
9485 ctsio->kern_sg_entries = 0;
9486
9487 /*
9488 * The control device is always connected. The disk device, on the
9489 * other hand, may not be online all the time.
9490 */
9491 if (lun != NULL)
9492 mpp_ptr->device = (SID_QUAL_LU_CONNECTED << 5) |
9493 lun->be_lun->lun_type;
9494 else
9495 mpp_ptr->device = (SID_QUAL_LU_OFFLINE << 5) | T_DIRECT;
9496 mpp_ptr->page_code = SVPD_MODE_PAGE_POLICY;
9497 scsi_ulto2b(data_len - 4, mpp_ptr->page_length);
9498 mpp_ptr->descr[0].page_code = 0x3f;
9499 mpp_ptr->descr[0].subpage_code = 0xff;
9500 mpp_ptr->descr[0].policy = SVPD_MPP_SHARED;
9501
9502 ctl_set_success(ctsio);
9503 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
9504 ctsio->be_move_done = ctl_config_move_done;
9505 ctl_datamove((union ctl_io *)ctsio);
9506 return (CTL_RETVAL_COMPLETE);
9507}
9508
9509/*
9510 * SCSI VPD page 0x83, the Device Identification page.
9511 */
9512static int
9513ctl_inquiry_evpd_devid(struct ctl_scsiio *ctsio, int alloc_len)
9514{
9515 struct scsi_vpd_device_id *devid_ptr;
9516 struct scsi_vpd_id_descriptor *desc;
9517 struct ctl_softc *softc;
9518 struct ctl_lun *lun;
9519 struct ctl_port *port;
9520 int data_len;
9521 uint8_t proto;
9522
9523 softc = control_softc;
9524
9440 port = softc->ctl_ports[ctl_port_idx(ctsio->io_hdr.nexus.targ_port)];
9525 port = ctl_io_port(&ctsio->io_hdr);
9526 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
9527
9528 data_len = sizeof(struct scsi_vpd_device_id) +
9529 sizeof(struct scsi_vpd_id_descriptor) +
9530 sizeof(struct scsi_vpd_id_rel_trgt_port_id) +
9531 sizeof(struct scsi_vpd_id_descriptor) +
9532 sizeof(struct scsi_vpd_id_trgt_port_grp_id);
9533 if (lun && lun->lun_devid)
9534 data_len += lun->lun_devid->len;
9450 if (port->port_devid)
9535 if (port && port->port_devid)
9536 data_len += port->port_devid->len;
9452 if (port->target_devid)
9537 if (port && port->target_devid)
9538 data_len += port->target_devid->len;
9539
9540 ctsio->kern_data_ptr = malloc(data_len, M_CTL, M_WAITOK | M_ZERO);
9541 devid_ptr = (struct scsi_vpd_device_id *)ctsio->kern_data_ptr;
9542 ctsio->kern_sg_entries = 0;
9543
9544 if (data_len < alloc_len) {
9545 ctsio->residual = alloc_len - data_len;
9546 ctsio->kern_data_len = data_len;
9547 ctsio->kern_total_len = data_len;
9548 } else {
9549 ctsio->residual = 0;
9550 ctsio->kern_data_len = alloc_len;
9551 ctsio->kern_total_len = alloc_len;
9552 }
9553 ctsio->kern_data_resid = 0;
9554 ctsio->kern_rel_offset = 0;
9555 ctsio->kern_sg_entries = 0;
9556
9557 /*
9558 * The control device is always connected. The disk device, on the
9559 * other hand, may not be online all the time.
9560 */
9561 if (lun != NULL)
9562 devid_ptr->device = (SID_QUAL_LU_CONNECTED << 5) |
9563 lun->be_lun->lun_type;
9564 else
9565 devid_ptr->device = (SID_QUAL_LU_OFFLINE << 5) | T_DIRECT;
9566 devid_ptr->page_code = SVPD_DEVICE_ID;
9567 scsi_ulto2b(data_len - 4, devid_ptr->length);
9568
9484 if (port->port_type == CTL_PORT_FC)
9569 if (port && port->port_type == CTL_PORT_FC)
9570 proto = SCSI_PROTO_FC << 4;
9486 else if (port->port_type == CTL_PORT_ISCSI)
9571 else if (port && port->port_type == CTL_PORT_ISCSI)
9572 proto = SCSI_PROTO_ISCSI << 4;
9573 else
9574 proto = SCSI_PROTO_SPI << 4;
9575 desc = (struct scsi_vpd_id_descriptor *)devid_ptr->desc_list;
9576
9577 /*
9578 * We're using a LUN association here. i.e., this device ID is a
9579 * per-LUN identifier.
9580 */
9581 if (lun && lun->lun_devid) {
9582 memcpy(desc, lun->lun_devid->data, lun->lun_devid->len);
9583 desc = (struct scsi_vpd_id_descriptor *)((uint8_t *)desc +
9584 lun->lun_devid->len);
9585 }
9586
9587 /*
9588 * This is for the WWPN which is a port association.
9589 */
9505 if (port->port_devid) {
9590 if (port && port->port_devid) {
9591 memcpy(desc, port->port_devid->data, port->port_devid->len);
9592 desc = (struct scsi_vpd_id_descriptor *)((uint8_t *)desc +
9593 port->port_devid->len);
9594 }
9595
9596 /*
9597 * This is for the Relative Target Port(type 4h) identifier
9598 */
9599 desc->proto_codeset = proto | SVPD_ID_CODESET_BINARY;
9600 desc->id_type = SVPD_ID_PIV | SVPD_ID_ASSOC_PORT |
9601 SVPD_ID_TYPE_RELTARG;
9602 desc->length = 4;
9603 scsi_ulto2b(ctsio->io_hdr.nexus.targ_port, &desc->identifier[2]);
9604 desc = (struct scsi_vpd_id_descriptor *)(&desc->identifier[0] +
9605 sizeof(struct scsi_vpd_id_rel_trgt_port_id));
9606
9607 /*
9608 * This is for the Target Port Group(type 5h) identifier
9609 */
9610 desc->proto_codeset = proto | SVPD_ID_CODESET_BINARY;
9611 desc->id_type = SVPD_ID_PIV | SVPD_ID_ASSOC_PORT |
9612 SVPD_ID_TYPE_TPORTGRP;
9613 desc->length = 4;
9529 scsi_ulto2b(ctsio->io_hdr.nexus.targ_port / CTL_MAX_PORTS + 1,
9614 scsi_ulto2b(ctsio->io_hdr.nexus.targ_port / softc->port_cnt + 1,
9615 &desc->identifier[2]);
9616 desc = (struct scsi_vpd_id_descriptor *)(&desc->identifier[0] +
9617 sizeof(struct scsi_vpd_id_trgt_port_grp_id));
9618
9619 /*
9620 * This is for the Target identifier
9621 */
9537 if (port->target_devid) {
9622 if (port && port->target_devid) {
9623 memcpy(desc, port->target_devid->data, port->target_devid->len);
9624 }
9625
9626 ctl_set_success(ctsio);
9627 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
9628 ctsio->be_move_done = ctl_config_move_done;
9629 ctl_datamove((union ctl_io *)ctsio);
9630 return (CTL_RETVAL_COMPLETE);
9631}
9632
9633static int
9634ctl_inquiry_evpd_scsi_ports(struct ctl_scsiio *ctsio, int alloc_len)
9635{
9636 struct ctl_softc *softc = control_softc;
9637 struct scsi_vpd_scsi_ports *sp;
9638 struct scsi_vpd_port_designation *pd;
9639 struct scsi_vpd_port_designation_cont *pdc;
9640 struct ctl_lun *lun;
9641 struct ctl_port *port;
9557 int data_len, num_target_ports, iid_len, id_len, g, pg, p;
9558 int num_target_port_groups;
9642 int data_len, num_target_ports, iid_len, id_len;
9643
9644 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
9645
9562 if (softc->is_single)
9563 num_target_port_groups = 1;
9564 else
9565 num_target_port_groups = NUM_TARGET_PORT_GROUPS;
9646 num_target_ports = 0;
9647 iid_len = 0;
9648 id_len = 0;
9649 mtx_lock(&softc->ctl_lock);
9650 STAILQ_FOREACH(port, &softc->port_list, links) {
9651 if ((port->status & CTL_PORT_STATUS_ONLINE) == 0)
9652 continue;
9653 if (lun != NULL &&
9654 ctl_lun_map_to_port(port, lun->lun) >= CTL_MAX_LUNS)
9655 continue;
9656 num_target_ports++;
9657 if (port->init_devid)
9658 iid_len += port->init_devid->len;
9659 if (port->port_devid)
9660 id_len += port->port_devid->len;
9661 }
9662 mtx_unlock(&softc->ctl_lock);
9663
9584 data_len = sizeof(struct scsi_vpd_scsi_ports) + num_target_port_groups *
9664 data_len = sizeof(struct scsi_vpd_scsi_ports) +
9665 num_target_ports * (sizeof(struct scsi_vpd_port_designation) +
9666 sizeof(struct scsi_vpd_port_designation_cont)) + iid_len + id_len;
9667 ctsio->kern_data_ptr = malloc(data_len, M_CTL, M_WAITOK | M_ZERO);
9668 sp = (struct scsi_vpd_scsi_ports *)ctsio->kern_data_ptr;
9669 ctsio->kern_sg_entries = 0;
9670
9671 if (data_len < alloc_len) {
9672 ctsio->residual = alloc_len - data_len;
9673 ctsio->kern_data_len = data_len;
9674 ctsio->kern_total_len = data_len;
9675 } else {
9676 ctsio->residual = 0;
9677 ctsio->kern_data_len = alloc_len;
9678 ctsio->kern_total_len = alloc_len;
9679 }
9680 ctsio->kern_data_resid = 0;
9681 ctsio->kern_rel_offset = 0;
9682 ctsio->kern_sg_entries = 0;
9683
9684 /*
9685 * The control device is always connected. The disk device, on the
9686 * other hand, may not be online all the time. Need to change this
9687 * to figure out whether the disk device is actually online or not.
9688 */
9689 if (lun != NULL)
9690 sp->device = (SID_QUAL_LU_CONNECTED << 5) |
9691 lun->be_lun->lun_type;
9692 else
9693 sp->device = (SID_QUAL_LU_OFFLINE << 5) | T_DIRECT;
9694
9695 sp->page_code = SVPD_SCSI_PORTS;
9696 scsi_ulto2b(data_len - sizeof(struct scsi_vpd_scsi_ports),
9697 sp->page_length);
9698 pd = &sp->design[0];
9699
9700 mtx_lock(&softc->ctl_lock);
9621 pg = softc->port_offset / CTL_MAX_PORTS;
9622 for (g = 0; g < num_target_port_groups; g++) {
9623 STAILQ_FOREACH(port, &softc->port_list, links) {
9624 if ((port->status & CTL_PORT_STATUS_ONLINE) == 0)
9625 continue;
9626 if (lun != NULL &&
9627 ctl_lun_map_to_port(port, lun->lun) >= CTL_MAX_LUNS)
9628 continue;
9629 p = port->targ_port % CTL_MAX_PORTS + g * CTL_MAX_PORTS;
9630 scsi_ulto2b(p, pd->relative_port_id);
9631 if (port->init_devid && g == pg) {
9632 iid_len = port->init_devid->len;
9633 memcpy(pd->initiator_transportid,
9634 port->init_devid->data, port->init_devid->len);
9635 } else
9636 iid_len = 0;
9637 scsi_ulto2b(iid_len, pd->initiator_transportid_length);
9638 pdc = (struct scsi_vpd_port_designation_cont *)
9639 (&pd->initiator_transportid[iid_len]);
9640 if (port->port_devid && g == pg) {
9641 id_len = port->port_devid->len;
9642 memcpy(pdc->target_port_descriptors,
9643 port->port_devid->data, port->port_devid->len);
9644 } else
9645 id_len = 0;
9646 scsi_ulto2b(id_len, pdc->target_port_descriptors_length);
9647 pd = (struct scsi_vpd_port_designation *)
9648 ((uint8_t *)pdc->target_port_descriptors + id_len);
9649 }
9701 STAILQ_FOREACH(port, &softc->port_list, links) {
9702 if ((port->status & CTL_PORT_STATUS_ONLINE) == 0)
9703 continue;
9704 if (lun != NULL &&
9705 ctl_lun_map_to_port(port, lun->lun) >= CTL_MAX_LUNS)
9706 continue;
9707 scsi_ulto2b(port->targ_port, pd->relative_port_id);
9708 if (port->init_devid) {
9709 iid_len = port->init_devid->len;
9710 memcpy(pd->initiator_transportid,
9711 port->init_devid->data, port->init_devid->len);
9712 } else
9713 iid_len = 0;
9714 scsi_ulto2b(iid_len, pd->initiator_transportid_length);
9715 pdc = (struct scsi_vpd_port_designation_cont *)
9716 (&pd->initiator_transportid[iid_len]);
9717 if (port->port_devid) {
9718 id_len = port->port_devid->len;
9719 memcpy(pdc->target_port_descriptors,
9720 port->port_devid->data, port->port_devid->len);
9721 } else
9722 id_len = 0;
9723 scsi_ulto2b(id_len, pdc->target_port_descriptors_length);
9724 pd = (struct scsi_vpd_port_designation *)
9725 ((uint8_t *)pdc->target_port_descriptors + id_len);
9726 }
9727 mtx_unlock(&softc->ctl_lock);
9728
9729 ctl_set_success(ctsio);
9730 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
9731 ctsio->be_move_done = ctl_config_move_done;
9732 ctl_datamove((union ctl_io *)ctsio);
9733 return (CTL_RETVAL_COMPLETE);
9734}
9735
9736static int
9737ctl_inquiry_evpd_block_limits(struct ctl_scsiio *ctsio, int alloc_len)
9738{
9739 struct scsi_vpd_block_limits *bl_ptr;
9740 struct ctl_lun *lun;
9741 int bs;
9742
9743 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
9744
9745 ctsio->kern_data_ptr = malloc(sizeof(*bl_ptr), M_CTL, M_WAITOK | M_ZERO);
9746 bl_ptr = (struct scsi_vpd_block_limits *)ctsio->kern_data_ptr;
9747 ctsio->kern_sg_entries = 0;
9748
9749 if (sizeof(*bl_ptr) < alloc_len) {
9750 ctsio->residual = alloc_len - sizeof(*bl_ptr);
9751 ctsio->kern_data_len = sizeof(*bl_ptr);
9752 ctsio->kern_total_len = sizeof(*bl_ptr);
9753 } else {
9754 ctsio->residual = 0;
9755 ctsio->kern_data_len = alloc_len;
9756 ctsio->kern_total_len = alloc_len;
9757 }
9758 ctsio->kern_data_resid = 0;
9759 ctsio->kern_rel_offset = 0;
9760 ctsio->kern_sg_entries = 0;
9761
9762 /*
9763 * The control device is always connected. The disk device, on the
9764 * other hand, may not be online all the time. Need to change this
9765 * to figure out whether the disk device is actually online or not.
9766 */
9767 if (lun != NULL)
9768 bl_ptr->device = (SID_QUAL_LU_CONNECTED << 5) |
9769 lun->be_lun->lun_type;
9770 else
9771 bl_ptr->device = (SID_QUAL_LU_OFFLINE << 5) | T_DIRECT;
9772
9773 bl_ptr->page_code = SVPD_BLOCK_LIMITS;
9774 scsi_ulto2b(sizeof(*bl_ptr) - 4, bl_ptr->page_length);
9775 bl_ptr->max_cmp_write_len = 0xff;
9776 scsi_ulto4b(0xffffffff, bl_ptr->max_txfer_len);
9777 if (lun != NULL) {
9778 bs = lun->be_lun->blocksize;
9779 scsi_ulto4b(lun->be_lun->opttxferlen, bl_ptr->opt_txfer_len);
9780 if (lun->be_lun->flags & CTL_LUN_FLAG_UNMAP) {
9781 scsi_ulto4b(0xffffffff, bl_ptr->max_unmap_lba_cnt);
9782 scsi_ulto4b(0xffffffff, bl_ptr->max_unmap_blk_cnt);
9783 if (lun->be_lun->ublockexp != 0) {
9784 scsi_ulto4b((1 << lun->be_lun->ublockexp),
9785 bl_ptr->opt_unmap_grain);
9786 scsi_ulto4b(0x80000000 | lun->be_lun->ublockoff,
9787 bl_ptr->unmap_grain_align);
9788 }
9789 }
9790 scsi_ulto4b(lun->be_lun->atomicblock,
9791 bl_ptr->max_atomic_transfer_length);
9792 scsi_ulto4b(0, bl_ptr->atomic_alignment);
9793 scsi_ulto4b(0, bl_ptr->atomic_transfer_length_granularity);
9794 }
9795 scsi_u64to8b(UINT64_MAX, bl_ptr->max_write_same_length);
9796
9797 ctl_set_success(ctsio);
9798 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
9799 ctsio->be_move_done = ctl_config_move_done;
9800 ctl_datamove((union ctl_io *)ctsio);
9801 return (CTL_RETVAL_COMPLETE);
9802}
9803
9804static int
9805ctl_inquiry_evpd_bdc(struct ctl_scsiio *ctsio, int alloc_len)
9806{
9807 struct scsi_vpd_block_device_characteristics *bdc_ptr;
9808 struct ctl_lun *lun;
9809 const char *value;
9810 u_int i;
9811
9812 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
9813
9814 ctsio->kern_data_ptr = malloc(sizeof(*bdc_ptr), M_CTL, M_WAITOK | M_ZERO);
9815 bdc_ptr = (struct scsi_vpd_block_device_characteristics *)ctsio->kern_data_ptr;
9816 ctsio->kern_sg_entries = 0;
9817
9818 if (sizeof(*bdc_ptr) < alloc_len) {
9819 ctsio->residual = alloc_len - sizeof(*bdc_ptr);
9820 ctsio->kern_data_len = sizeof(*bdc_ptr);
9821 ctsio->kern_total_len = sizeof(*bdc_ptr);
9822 } else {
9823 ctsio->residual = 0;
9824 ctsio->kern_data_len = alloc_len;
9825 ctsio->kern_total_len = alloc_len;
9826 }
9827 ctsio->kern_data_resid = 0;
9828 ctsio->kern_rel_offset = 0;
9829 ctsio->kern_sg_entries = 0;
9830
9831 /*
9832 * The control device is always connected. The disk device, on the
9833 * other hand, may not be online all the time. Need to change this
9834 * to figure out whether the disk device is actually online or not.
9835 */
9836 if (lun != NULL)
9837 bdc_ptr->device = (SID_QUAL_LU_CONNECTED << 5) |
9838 lun->be_lun->lun_type;
9839 else
9840 bdc_ptr->device = (SID_QUAL_LU_OFFLINE << 5) | T_DIRECT;
9841 bdc_ptr->page_code = SVPD_BDC;
9842 scsi_ulto2b(sizeof(*bdc_ptr) - 4, bdc_ptr->page_length);
9843 if (lun != NULL &&
9844 (value = ctl_get_opt(&lun->be_lun->options, "rpm")) != NULL)
9845 i = strtol(value, NULL, 0);
9846 else
9847 i = CTL_DEFAULT_ROTATION_RATE;
9848 scsi_ulto2b(i, bdc_ptr->medium_rotation_rate);
9849 if (lun != NULL &&
9850 (value = ctl_get_opt(&lun->be_lun->options, "formfactor")) != NULL)
9851 i = strtol(value, NULL, 0);
9852 else
9853 i = 0;
9854 bdc_ptr->wab_wac_ff = (i & 0x0f);
9855 bdc_ptr->flags = SVPD_FUAB | SVPD_VBULS;
9856
9857 ctl_set_success(ctsio);
9858 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
9859 ctsio->be_move_done = ctl_config_move_done;
9860 ctl_datamove((union ctl_io *)ctsio);
9861 return (CTL_RETVAL_COMPLETE);
9862}
9863
9864static int
9865ctl_inquiry_evpd_lbp(struct ctl_scsiio *ctsio, int alloc_len)
9866{
9867 struct scsi_vpd_logical_block_prov *lbp_ptr;
9868 struct ctl_lun *lun;
9869
9870 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
9871
9872 ctsio->kern_data_ptr = malloc(sizeof(*lbp_ptr), M_CTL, M_WAITOK | M_ZERO);
9873 lbp_ptr = (struct scsi_vpd_logical_block_prov *)ctsio->kern_data_ptr;
9874 ctsio->kern_sg_entries = 0;
9875
9876 if (sizeof(*lbp_ptr) < alloc_len) {
9877 ctsio->residual = alloc_len - sizeof(*lbp_ptr);
9878 ctsio->kern_data_len = sizeof(*lbp_ptr);
9879 ctsio->kern_total_len = sizeof(*lbp_ptr);
9880 } else {
9881 ctsio->residual = 0;
9882 ctsio->kern_data_len = alloc_len;
9883 ctsio->kern_total_len = alloc_len;
9884 }
9885 ctsio->kern_data_resid = 0;
9886 ctsio->kern_rel_offset = 0;
9887 ctsio->kern_sg_entries = 0;
9888
9889 /*
9890 * The control device is always connected. The disk device, on the
9891 * other hand, may not be online all the time. Need to change this
9892 * to figure out whether the disk device is actually online or not.
9893 */
9894 if (lun != NULL)
9895 lbp_ptr->device = (SID_QUAL_LU_CONNECTED << 5) |
9896 lun->be_lun->lun_type;
9897 else
9898 lbp_ptr->device = (SID_QUAL_LU_OFFLINE << 5) | T_DIRECT;
9899
9900 lbp_ptr->page_code = SVPD_LBP;
9901 scsi_ulto2b(sizeof(*lbp_ptr) - 4, lbp_ptr->page_length);
9902 lbp_ptr->threshold_exponent = CTL_LBP_EXPONENT;
9903 if (lun != NULL && lun->be_lun->flags & CTL_LUN_FLAG_UNMAP) {
9904 lbp_ptr->flags = SVPD_LBP_UNMAP | SVPD_LBP_WS16 |
9905 SVPD_LBP_WS10 | SVPD_LBP_RZ | SVPD_LBP_ANC_SUP;
9906 lbp_ptr->prov_type = SVPD_LBP_THIN;
9907 }
9908
9909 ctl_set_success(ctsio);
9910 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
9911 ctsio->be_move_done = ctl_config_move_done;
9912 ctl_datamove((union ctl_io *)ctsio);
9913 return (CTL_RETVAL_COMPLETE);
9914}
9915
9916/*
9917 * INQUIRY with the EVPD bit set.
9918 */
9919static int
9920ctl_inquiry_evpd(struct ctl_scsiio *ctsio)
9921{
9922 struct ctl_lun *lun;
9923 struct scsi_inquiry *cdb;
9924 int alloc_len, retval;
9925
9926 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
9927 cdb = (struct scsi_inquiry *)ctsio->cdb;
9928 alloc_len = scsi_2btoul(cdb->length);
9929
9930 switch (cdb->page_code) {
9931 case SVPD_SUPPORTED_PAGES:
9932 retval = ctl_inquiry_evpd_supported(ctsio, alloc_len);
9933 break;
9934 case SVPD_UNIT_SERIAL_NUMBER:
9935 retval = ctl_inquiry_evpd_serial(ctsio, alloc_len);
9936 break;
9937 case SVPD_DEVICE_ID:
9938 retval = ctl_inquiry_evpd_devid(ctsio, alloc_len);
9939 break;
9940 case SVPD_EXTENDED_INQUIRY_DATA:
9941 retval = ctl_inquiry_evpd_eid(ctsio, alloc_len);
9942 break;
9943 case SVPD_MODE_PAGE_POLICY:
9944 retval = ctl_inquiry_evpd_mpp(ctsio, alloc_len);
9945 break;
9946 case SVPD_SCSI_PORTS:
9947 retval = ctl_inquiry_evpd_scsi_ports(ctsio, alloc_len);
9948 break;
9949 case SVPD_SCSI_TPC:
9950 retval = ctl_inquiry_evpd_tpc(ctsio, alloc_len);
9951 break;
9952 case SVPD_BLOCK_LIMITS:
9953 if (lun == NULL || lun->be_lun->lun_type != T_DIRECT)
9954 goto err;
9955 retval = ctl_inquiry_evpd_block_limits(ctsio, alloc_len);
9956 break;
9957 case SVPD_BDC:
9958 if (lun == NULL || lun->be_lun->lun_type != T_DIRECT)
9959 goto err;
9960 retval = ctl_inquiry_evpd_bdc(ctsio, alloc_len);
9961 break;
9962 case SVPD_LBP:
9963 if (lun == NULL || lun->be_lun->lun_type != T_DIRECT)
9964 goto err;
9965 retval = ctl_inquiry_evpd_lbp(ctsio, alloc_len);
9966 break;
9967 default:
9968err:
9969 ctl_set_invalid_field(ctsio,
9970 /*sks_valid*/ 1,
9971 /*command*/ 1,
9972 /*field*/ 2,
9973 /*bit_valid*/ 0,
9974 /*bit*/ 0);
9975 ctl_done((union ctl_io *)ctsio);
9976 retval = CTL_RETVAL_COMPLETE;
9977 break;
9978 }
9979
9980 return (retval);
9981}
9982
9983/*
9984 * Standard INQUIRY data.
9985 */
9986static int
9987ctl_inquiry_std(struct ctl_scsiio *ctsio)
9988{
9989 struct scsi_inquiry_data *inq_ptr;
9990 struct scsi_inquiry *cdb;
9991 struct ctl_softc *softc;
9992 struct ctl_port *port;
9993 struct ctl_lun *lun;
9994 char *val;
9995 uint32_t alloc_len, data_len;
9996 ctl_port_type port_type;
9997
9998 softc = control_softc;
9999
10000 /*
10001 * Figure out whether we're talking to a Fibre Channel port or not.
10002 * We treat the ioctl front end, and any SCSI adapters, as packetized
10003 * SCSI front ends.
10004 */
9928 port_type = softc->ctl_ports[
9929 ctl_port_idx(ctsio->io_hdr.nexus.targ_port)]->port_type;
10005 port = ctl_io_port(&ctsio->io_hdr);
10006 if (port != NULL)
10007 port_type = port->port_type;
10008 else
10009 port_type = CTL_PORT_SCSI;
10010 if (port_type == CTL_PORT_IOCTL || port_type == CTL_PORT_INTERNAL)
10011 port_type = CTL_PORT_SCSI;
10012
10013 lun = ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
10014 cdb = (struct scsi_inquiry *)ctsio->cdb;
10015 alloc_len = scsi_2btoul(cdb->length);
10016
10017 /*
10018 * We malloc the full inquiry data size here and fill it
10019 * in. If the user only asks for less, we'll give him
10020 * that much.
10021 */
10022 data_len = offsetof(struct scsi_inquiry_data, vendor_specific1);
10023 ctsio->kern_data_ptr = malloc(data_len, M_CTL, M_WAITOK | M_ZERO);
10024 inq_ptr = (struct scsi_inquiry_data *)ctsio->kern_data_ptr;
10025 ctsio->kern_sg_entries = 0;
10026 ctsio->kern_data_resid = 0;
10027 ctsio->kern_rel_offset = 0;
10028
10029 if (data_len < alloc_len) {
10030 ctsio->residual = alloc_len - data_len;
10031 ctsio->kern_data_len = data_len;
10032 ctsio->kern_total_len = data_len;
10033 } else {
10034 ctsio->residual = 0;
10035 ctsio->kern_data_len = alloc_len;
10036 ctsio->kern_total_len = alloc_len;
10037 }
10038
9959 /*
9960 * If we have a LUN configured, report it as connected. Otherwise,
9961 * report that it is offline or no device is supported, depending
9962 * on the value of inquiry_pq_no_lun.
9963 *
9964 * According to the spec (SPC-4 r34), the peripheral qualifier
9965 * SID_QUAL_LU_OFFLINE (001b) is used in the following scenario:
9966 *
9967 * "A peripheral device having the specified peripheral device type
9968 * is not connected to this logical unit. However, the device
9969 * server is capable of supporting the specified peripheral device
9970 * type on this logical unit."
9971 *
9972 * According to the same spec, the peripheral qualifier
9973 * SID_QUAL_BAD_LU (011b) is used in this scenario:
9974 *
9975 * "The device server is not capable of supporting a peripheral
9976 * device on this logical unit. For this peripheral qualifier the
9977 * peripheral device type shall be set to 1Fh. All other peripheral
9978 * device type values are reserved for this peripheral qualifier."
9979 *
9980 * Given the text, it would seem that we probably want to report that
9981 * the LUN is offline here. There is no LUN connected, but we can
9982 * support a LUN at the given LUN number.
9983 *
9984 * In the real world, though, it sounds like things are a little
9985 * different:
9986 *
9987 * - Linux, when presented with a LUN with the offline peripheral
9988 * qualifier, will create an sg driver instance for it. So when
9989 * you attach it to CTL, you wind up with a ton of sg driver
9990 * instances. (One for every LUN that Linux bothered to probe.)
9991 * Linux does this despite the fact that it issues a REPORT LUNs
9992 * to LUN 0 to get the inventory of supported LUNs.
9993 *
9994 * - There is other anecdotal evidence (from Emulex folks) about
9995 * arrays that use the offline peripheral qualifier for LUNs that
9996 * are on the "passive" path in an active/passive array.
9997 *
9998 * So the solution is provide a hopefully reasonable default
9999 * (return bad/no LUN) and allow the user to change the behavior
10000 * with a tunable/sysctl variable.
10001 */
10002 if (lun != NULL)
10003 inq_ptr->device = (SID_QUAL_LU_CONNECTED << 5) |
10004 lun->be_lun->lun_type;
10005 else if (softc->inquiry_pq_no_lun == 0)
10006 inq_ptr->device = (SID_QUAL_LU_OFFLINE << 5) | T_DIRECT;
10007 else
10039 if (lun != NULL) {
10040 if ((lun->flags & CTL_LUN_PRIMARY_SC) ||
10041 softc->ha_link >= CTL_HA_LINK_UNKNOWN) {
10042 inq_ptr->device = (SID_QUAL_LU_CONNECTED << 5) |
10043 lun->be_lun->lun_type;
10044 } else {
10045 inq_ptr->device = (SID_QUAL_LU_OFFLINE << 5) |
10046 lun->be_lun->lun_type;
10047 }
10048 } else
10049 inq_ptr->device = (SID_QUAL_BAD_LU << 5) | T_NODEVICE;
10050
10051 /* RMB in byte 2 is 0 */
10052 inq_ptr->version = SCSI_REV_SPC4;
10053
10054 /*
10055 * According to SAM-3, even if a device only supports a single
10056 * level of LUN addressing, it should still set the HISUP bit:
10057 *
10058 * 4.9.1 Logical unit numbers overview
10059 *
10060 * All logical unit number formats described in this standard are
10061 * hierarchical in structure even when only a single level in that
10062 * hierarchy is used. The HISUP bit shall be set to one in the
10063 * standard INQUIRY data (see SPC-2) when any logical unit number
10064 * format described in this standard is used. Non-hierarchical
10065 * formats are outside the scope of this standard.
10066 *
10067 * Therefore we set the HiSup bit here.
10068 *
10069 * The reponse format is 2, per SPC-3.
10070 */
10071 inq_ptr->response_format = SID_HiSup | 2;
10072
10073 inq_ptr->additional_length = data_len -
10074 (offsetof(struct scsi_inquiry_data, additional_length) + 1);
10075 CTL_DEBUG_PRINT(("additional_length = %d\n",
10076 inq_ptr->additional_length));
10077
10078 inq_ptr->spc3_flags = SPC3_SID_3PC | SPC3_SID_TPGS_IMPLICIT;
10079 /* 16 bit addressing */
10080 if (port_type == CTL_PORT_SCSI)
10081 inq_ptr->spc2_flags = SPC2_SID_ADDR16;
10082 /* XXX set the SID_MultiP bit here if we're actually going to
10083 respond on multiple ports */
10084 inq_ptr->spc2_flags |= SPC2_SID_MultiP;
10085
10086 /* 16 bit data bus, synchronous transfers */
10087 if (port_type == CTL_PORT_SCSI)
10088 inq_ptr->flags = SID_WBus16 | SID_Sync;
10089 /*
10090 * XXX KDM do we want to support tagged queueing on the control
10091 * device at all?
10092 */
10093 if ((lun == NULL)
10094 || (lun->be_lun->lun_type != T_PROCESSOR))
10095 inq_ptr->flags |= SID_CmdQue;
10096 /*
10097 * Per SPC-3, unused bytes in ASCII strings are filled with spaces.
10098 * We have 8 bytes for the vendor name, and 16 bytes for the device
10099 * name and 4 bytes for the revision.
10100 */
10101 if (lun == NULL || (val = ctl_get_opt(&lun->be_lun->options,
10102 "vendor")) == NULL) {
10103 strncpy(inq_ptr->vendor, CTL_VENDOR, sizeof(inq_ptr->vendor));
10104 } else {
10105 memset(inq_ptr->vendor, ' ', sizeof(inq_ptr->vendor));
10106 strncpy(inq_ptr->vendor, val,
10107 min(sizeof(inq_ptr->vendor), strlen(val)));
10108 }
10109 if (lun == NULL) {
10110 strncpy(inq_ptr->product, CTL_DIRECT_PRODUCT,
10111 sizeof(inq_ptr->product));
10112 } else if ((val = ctl_get_opt(&lun->be_lun->options, "product")) == NULL) {
10113 switch (lun->be_lun->lun_type) {
10114 case T_DIRECT:
10115 strncpy(inq_ptr->product, CTL_DIRECT_PRODUCT,
10116 sizeof(inq_ptr->product));
10117 break;
10118 case T_PROCESSOR:
10119 strncpy(inq_ptr->product, CTL_PROCESSOR_PRODUCT,
10120 sizeof(inq_ptr->product));
10121 break;
10122 default:
10123 strncpy(inq_ptr->product, CTL_UNKNOWN_PRODUCT,
10124 sizeof(inq_ptr->product));
10125 break;
10126 }
10127 } else {
10128 memset(inq_ptr->product, ' ', sizeof(inq_ptr->product));
10129 strncpy(inq_ptr->product, val,
10130 min(sizeof(inq_ptr->product), strlen(val)));
10131 }
10132
10133 /*
10134 * XXX make this a macro somewhere so it automatically gets
10135 * incremented when we make changes.
10136 */
10137 if (lun == NULL || (val = ctl_get_opt(&lun->be_lun->options,
10138 "revision")) == NULL) {
10139 strncpy(inq_ptr->revision, "0001", sizeof(inq_ptr->revision));
10140 } else {
10141 memset(inq_ptr->revision, ' ', sizeof(inq_ptr->revision));
10142 strncpy(inq_ptr->revision, val,
10143 min(sizeof(inq_ptr->revision), strlen(val)));
10144 }
10145
10146 /*
10147 * For parallel SCSI, we support double transition and single
10148 * transition clocking. We also support QAS (Quick Arbitration
10149 * and Selection) and Information Unit transfers on both the
10150 * control and array devices.
10151 */
10152 if (port_type == CTL_PORT_SCSI)
10153 inq_ptr->spi3data = SID_SPI_CLOCK_DT_ST | SID_SPI_QAS |
10154 SID_SPI_IUS;
10155
10156 /* SAM-5 (no version claimed) */
10157 scsi_ulto2b(0x00A0, inq_ptr->version1);
10158 /* SPC-4 (no version claimed) */
10159 scsi_ulto2b(0x0460, inq_ptr->version2);
10160 if (port_type == CTL_PORT_FC) {
10161 /* FCP-2 ANSI INCITS.350:2003 */
10162 scsi_ulto2b(0x0917, inq_ptr->version3);
10163 } else if (port_type == CTL_PORT_SCSI) {
10164 /* SPI-4 ANSI INCITS.362:200x */
10165 scsi_ulto2b(0x0B56, inq_ptr->version3);
10166 } else if (port_type == CTL_PORT_ISCSI) {
10167 /* iSCSI (no version claimed) */
10168 scsi_ulto2b(0x0960, inq_ptr->version3);
10169 } else if (port_type == CTL_PORT_SAS) {
10170 /* SAS (no version claimed) */
10171 scsi_ulto2b(0x0BE0, inq_ptr->version3);
10172 }
10173
10174 if (lun == NULL) {
10175 /* SBC-4 (no version claimed) */
10176 scsi_ulto2b(0x0600, inq_ptr->version4);
10177 } else {
10178 switch (lun->be_lun->lun_type) {
10179 case T_DIRECT:
10180 /* SBC-4 (no version claimed) */
10181 scsi_ulto2b(0x0600, inq_ptr->version4);
10182 break;
10183 case T_PROCESSOR:
10184 default:
10185 break;
10186 }
10187 }
10188
10189 ctl_set_success(ctsio);
10190 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
10191 ctsio->be_move_done = ctl_config_move_done;
10192 ctl_datamove((union ctl_io *)ctsio);
10193 return (CTL_RETVAL_COMPLETE);
10194}
10195
10196int
10197ctl_inquiry(struct ctl_scsiio *ctsio)
10198{
10199 struct scsi_inquiry *cdb;
10200 int retval;
10201
10202 CTL_DEBUG_PRINT(("ctl_inquiry\n"));
10203
10204 cdb = (struct scsi_inquiry *)ctsio->cdb;
10205 if (cdb->byte2 & SI_EVPD)
10206 retval = ctl_inquiry_evpd(ctsio);
10207 else if (cdb->page_code == 0)
10208 retval = ctl_inquiry_std(ctsio);
10209 else {
10210 ctl_set_invalid_field(ctsio,
10211 /*sks_valid*/ 1,
10212 /*command*/ 1,
10213 /*field*/ 2,
10214 /*bit_valid*/ 0,
10215 /*bit*/ 0);
10216 ctl_done((union ctl_io *)ctsio);
10217 return (CTL_RETVAL_COMPLETE);
10218 }
10219
10220 return (retval);
10221}
10222
10223/*
10224 * For known CDB types, parse the LBA and length.
10225 */
10226static int
10227ctl_get_lba_len(union ctl_io *io, uint64_t *lba, uint64_t *len)
10228{
10229 if (io->io_hdr.io_type != CTL_IO_SCSI)
10230 return (1);
10231
10232 switch (io->scsiio.cdb[0]) {
10233 case COMPARE_AND_WRITE: {
10234 struct scsi_compare_and_write *cdb;
10235
10236 cdb = (struct scsi_compare_and_write *)io->scsiio.cdb;
10237
10238 *lba = scsi_8btou64(cdb->addr);
10239 *len = cdb->length;
10240 break;
10241 }
10242 case READ_6:
10243 case WRITE_6: {
10244 struct scsi_rw_6 *cdb;
10245
10246 cdb = (struct scsi_rw_6 *)io->scsiio.cdb;
10247
10248 *lba = scsi_3btoul(cdb->addr);
10249 /* only 5 bits are valid in the most significant address byte */
10250 *lba &= 0x1fffff;
10251 *len = cdb->length;
10252 break;
10253 }
10254 case READ_10:
10255 case WRITE_10: {
10256 struct scsi_rw_10 *cdb;
10257
10258 cdb = (struct scsi_rw_10 *)io->scsiio.cdb;
10259
10260 *lba = scsi_4btoul(cdb->addr);
10261 *len = scsi_2btoul(cdb->length);
10262 break;
10263 }
10264 case WRITE_VERIFY_10: {
10265 struct scsi_write_verify_10 *cdb;
10266
10267 cdb = (struct scsi_write_verify_10 *)io->scsiio.cdb;
10268
10269 *lba = scsi_4btoul(cdb->addr);
10270 *len = scsi_2btoul(cdb->length);
10271 break;
10272 }
10273 case READ_12:
10274 case WRITE_12: {
10275 struct scsi_rw_12 *cdb;
10276
10277 cdb = (struct scsi_rw_12 *)io->scsiio.cdb;
10278
10279 *lba = scsi_4btoul(cdb->addr);
10280 *len = scsi_4btoul(cdb->length);
10281 break;
10282 }
10283 case WRITE_VERIFY_12: {
10284 struct scsi_write_verify_12 *cdb;
10285
10286 cdb = (struct scsi_write_verify_12 *)io->scsiio.cdb;
10287
10288 *lba = scsi_4btoul(cdb->addr);
10289 *len = scsi_4btoul(cdb->length);
10290 break;
10291 }
10292 case READ_16:
10293 case WRITE_16:
10294 case WRITE_ATOMIC_16: {
10295 struct scsi_rw_16 *cdb;
10296
10297 cdb = (struct scsi_rw_16 *)io->scsiio.cdb;
10298
10299 *lba = scsi_8btou64(cdb->addr);
10300 *len = scsi_4btoul(cdb->length);
10301 break;
10302 }
10303 case WRITE_VERIFY_16: {
10304 struct scsi_write_verify_16 *cdb;
10305
10306 cdb = (struct scsi_write_verify_16 *)io->scsiio.cdb;
10307
10308 *lba = scsi_8btou64(cdb->addr);
10309 *len = scsi_4btoul(cdb->length);
10310 break;
10311 }
10312 case WRITE_SAME_10: {
10313 struct scsi_write_same_10 *cdb;
10314
10315 cdb = (struct scsi_write_same_10 *)io->scsiio.cdb;
10316
10317 *lba = scsi_4btoul(cdb->addr);
10318 *len = scsi_2btoul(cdb->length);
10319 break;
10320 }
10321 case WRITE_SAME_16: {
10322 struct scsi_write_same_16 *cdb;
10323
10324 cdb = (struct scsi_write_same_16 *)io->scsiio.cdb;
10325
10326 *lba = scsi_8btou64(cdb->addr);
10327 *len = scsi_4btoul(cdb->length);
10328 break;
10329 }
10330 case VERIFY_10: {
10331 struct scsi_verify_10 *cdb;
10332
10333 cdb = (struct scsi_verify_10 *)io->scsiio.cdb;
10334
10335 *lba = scsi_4btoul(cdb->addr);
10336 *len = scsi_2btoul(cdb->length);
10337 break;
10338 }
10339 case VERIFY_12: {
10340 struct scsi_verify_12 *cdb;
10341
10342 cdb = (struct scsi_verify_12 *)io->scsiio.cdb;
10343
10344 *lba = scsi_4btoul(cdb->addr);
10345 *len = scsi_4btoul(cdb->length);
10346 break;
10347 }
10348 case VERIFY_16: {
10349 struct scsi_verify_16 *cdb;
10350
10351 cdb = (struct scsi_verify_16 *)io->scsiio.cdb;
10352
10353 *lba = scsi_8btou64(cdb->addr);
10354 *len = scsi_4btoul(cdb->length);
10355 break;
10356 }
10357 case UNMAP: {
10358 *lba = 0;
10359 *len = UINT64_MAX;
10360 break;
10361 }
10362 case SERVICE_ACTION_IN: { /* GET LBA STATUS */
10363 struct scsi_get_lba_status *cdb;
10364
10365 cdb = (struct scsi_get_lba_status *)io->scsiio.cdb;
10366 *lba = scsi_8btou64(cdb->addr);
10367 *len = UINT32_MAX;
10368 break;
10369 }
10370 default:
10371 return (1);
10372 break; /* NOTREACHED */
10373 }
10374
10375 return (0);
10376}
10377
10378static ctl_action
10379ctl_extent_check_lba(uint64_t lba1, uint64_t len1, uint64_t lba2, uint64_t len2,
10380 bool seq)
10381{
10382 uint64_t endlba1, endlba2;
10383
10384 endlba1 = lba1 + len1 - (seq ? 0 : 1);
10385 endlba2 = lba2 + len2 - 1;
10386
10387 if ((endlba1 < lba2) || (endlba2 < lba1))
10388 return (CTL_ACTION_PASS);
10389 else
10390 return (CTL_ACTION_BLOCK);
10391}
10392
10393static int
10394ctl_extent_check_unmap(union ctl_io *io, uint64_t lba2, uint64_t len2)
10395{
10396 struct ctl_ptr_len_flags *ptrlen;
10397 struct scsi_unmap_desc *buf, *end, *range;
10398 uint64_t lba;
10399 uint32_t len;
10400
10401 /* If not UNMAP -- go other way. */
10402 if (io->io_hdr.io_type != CTL_IO_SCSI ||
10403 io->scsiio.cdb[0] != UNMAP)
10404 return (CTL_ACTION_ERROR);
10405
10406 /* If UNMAP without data -- block and wait for data. */
10407 ptrlen = (struct ctl_ptr_len_flags *)
10408 &io->io_hdr.ctl_private[CTL_PRIV_LBA_LEN];
10409 if ((io->io_hdr.flags & CTL_FLAG_ALLOCATED) == 0 ||
10410 ptrlen->ptr == NULL)
10411 return (CTL_ACTION_BLOCK);
10412
10413 /* UNMAP with data -- check for collision. */
10414 buf = (struct scsi_unmap_desc *)ptrlen->ptr;
10415 end = buf + ptrlen->len / sizeof(*buf);
10416 for (range = buf; range < end; range++) {
10417 lba = scsi_8btou64(range->lba);
10418 len = scsi_4btoul(range->length);
10419 if ((lba < lba2 + len2) && (lba + len > lba2))
10420 return (CTL_ACTION_BLOCK);
10421 }
10422 return (CTL_ACTION_PASS);
10423}
10424
10425static ctl_action
10426ctl_extent_check(union ctl_io *io1, union ctl_io *io2, bool seq)
10427{
10428 uint64_t lba1, lba2;
10429 uint64_t len1, len2;
10430 int retval;
10431
10432 if (ctl_get_lba_len(io2, &lba2, &len2) != 0)
10433 return (CTL_ACTION_ERROR);
10434
10435 retval = ctl_extent_check_unmap(io1, lba2, len2);
10436 if (retval != CTL_ACTION_ERROR)
10437 return (retval);
10438
10439 if (ctl_get_lba_len(io1, &lba1, &len1) != 0)
10440 return (CTL_ACTION_ERROR);
10441
10442 return (ctl_extent_check_lba(lba1, len1, lba2, len2, seq));
10443}
10444
10445static ctl_action
10446ctl_extent_check_seq(union ctl_io *io1, union ctl_io *io2)
10447{
10448 uint64_t lba1, lba2;
10449 uint64_t len1, len2;
10450
10451 if (ctl_get_lba_len(io1, &lba1, &len1) != 0)
10452 return (CTL_ACTION_ERROR);
10453 if (ctl_get_lba_len(io2, &lba2, &len2) != 0)
10454 return (CTL_ACTION_ERROR);
10455
10456 if (lba1 + len1 == lba2)
10457 return (CTL_ACTION_BLOCK);
10458 return (CTL_ACTION_PASS);
10459}
10460
10461static ctl_action
10462ctl_check_for_blockage(struct ctl_lun *lun, union ctl_io *pending_io,
10463 union ctl_io *ooa_io)
10464{
10465 const struct ctl_cmd_entry *pending_entry, *ooa_entry;
10466 ctl_serialize_action *serialize_row;
10467
10468 /*
10469 * The initiator attempted multiple untagged commands at the same
10470 * time. Can't do that.
10471 */
10472 if ((pending_io->scsiio.tag_type == CTL_TAG_UNTAGGED)
10473 && (ooa_io->scsiio.tag_type == CTL_TAG_UNTAGGED)
10474 && ((pending_io->io_hdr.nexus.targ_port ==
10475 ooa_io->io_hdr.nexus.targ_port)
10476 && (pending_io->io_hdr.nexus.initid ==
10477 ooa_io->io_hdr.nexus.initid))
10478 && ((ooa_io->io_hdr.flags & (CTL_FLAG_ABORT |
10479 CTL_FLAG_STATUS_SENT)) == 0))
10480 return (CTL_ACTION_OVERLAP);
10481
10482 /*
10483 * The initiator attempted to send multiple tagged commands with
10484 * the same ID. (It's fine if different initiators have the same
10485 * tag ID.)
10486 *
10487 * Even if all of those conditions are true, we don't kill the I/O
10488 * if the command ahead of us has been aborted. We won't end up
10489 * sending it to the FETD, and it's perfectly legal to resend a
10490 * command with the same tag number as long as the previous
10491 * instance of this tag number has been aborted somehow.
10492 */
10493 if ((pending_io->scsiio.tag_type != CTL_TAG_UNTAGGED)
10494 && (ooa_io->scsiio.tag_type != CTL_TAG_UNTAGGED)
10495 && (pending_io->scsiio.tag_num == ooa_io->scsiio.tag_num)
10496 && ((pending_io->io_hdr.nexus.targ_port ==
10497 ooa_io->io_hdr.nexus.targ_port)
10498 && (pending_io->io_hdr.nexus.initid ==
10499 ooa_io->io_hdr.nexus.initid))
10500 && ((ooa_io->io_hdr.flags & (CTL_FLAG_ABORT |
10501 CTL_FLAG_STATUS_SENT)) == 0))
10502 return (CTL_ACTION_OVERLAP_TAG);
10503
10504 /*
10505 * If we get a head of queue tag, SAM-3 says that we should
10506 * immediately execute it.
10507 *
10508 * What happens if this command would normally block for some other
10509 * reason? e.g. a request sense with a head of queue tag
10510 * immediately after a write. Normally that would block, but this
10511 * will result in its getting executed immediately...
10512 *
10513 * We currently return "pass" instead of "skip", so we'll end up
10514 * going through the rest of the queue to check for overlapped tags.
10515 *
10516 * XXX KDM check for other types of blockage first??
10517 */
10518 if (pending_io->scsiio.tag_type == CTL_TAG_HEAD_OF_QUEUE)
10519 return (CTL_ACTION_PASS);
10520
10521 /*
10522 * Ordered tags have to block until all items ahead of them
10523 * have completed. If we get called with an ordered tag, we always
10524 * block, if something else is ahead of us in the queue.
10525 */
10526 if (pending_io->scsiio.tag_type == CTL_TAG_ORDERED)
10527 return (CTL_ACTION_BLOCK);
10528
10529 /*
10530 * Simple tags get blocked until all head of queue and ordered tags
10531 * ahead of them have completed. I'm lumping untagged commands in
10532 * with simple tags here. XXX KDM is that the right thing to do?
10533 */
10534 if (((pending_io->scsiio.tag_type == CTL_TAG_UNTAGGED)
10535 || (pending_io->scsiio.tag_type == CTL_TAG_SIMPLE))
10536 && ((ooa_io->scsiio.tag_type == CTL_TAG_HEAD_OF_QUEUE)
10537 || (ooa_io->scsiio.tag_type == CTL_TAG_ORDERED)))
10538 return (CTL_ACTION_BLOCK);
10539
10540 pending_entry = ctl_get_cmd_entry(&pending_io->scsiio, NULL);
10541 ooa_entry = ctl_get_cmd_entry(&ooa_io->scsiio, NULL);
10542
10543 serialize_row = ctl_serialize_table[ooa_entry->seridx];
10544
10545 switch (serialize_row[pending_entry->seridx]) {
10546 case CTL_SER_BLOCK:
10547 return (CTL_ACTION_BLOCK);
10548 case CTL_SER_EXTENT:
10549 return (ctl_extent_check(ooa_io, pending_io,
10550 (lun->be_lun && lun->be_lun->serseq == CTL_LUN_SERSEQ_ON)));
10551 case CTL_SER_EXTENTOPT:
10552 if ((lun->mode_pages.control_page[CTL_PAGE_CURRENT].queue_flags
10553 & SCP_QUEUE_ALG_MASK) != SCP_QUEUE_ALG_UNRESTRICTED)
10554 return (ctl_extent_check(ooa_io, pending_io,
10555 (lun->be_lun &&
10556 lun->be_lun->serseq == CTL_LUN_SERSEQ_ON)));
10557 return (CTL_ACTION_PASS);
10558 case CTL_SER_EXTENTSEQ:
10559 if (lun->be_lun && lun->be_lun->serseq != CTL_LUN_SERSEQ_OFF)
10560 return (ctl_extent_check_seq(ooa_io, pending_io));
10561 return (CTL_ACTION_PASS);
10562 case CTL_SER_PASS:
10563 return (CTL_ACTION_PASS);
10564 case CTL_SER_BLOCKOPT:
10565 if ((lun->mode_pages.control_page[CTL_PAGE_CURRENT].queue_flags
10566 & SCP_QUEUE_ALG_MASK) != SCP_QUEUE_ALG_UNRESTRICTED)
10567 return (CTL_ACTION_BLOCK);
10568 return (CTL_ACTION_PASS);
10569 case CTL_SER_SKIP:
10570 return (CTL_ACTION_SKIP);
10571 default:
10572 panic("invalid serialization value %d",
10573 serialize_row[pending_entry->seridx]);
10574 }
10575
10576 return (CTL_ACTION_ERROR);
10577}
10578
10579/*
10580 * Check for blockage or overlaps against the OOA (Order Of Arrival) queue.
10581 * Assumptions:
10582 * - pending_io is generally either incoming, or on the blocked queue
10583 * - starting I/O is the I/O we want to start the check with.
10584 */
10585static ctl_action
10586ctl_check_ooa(struct ctl_lun *lun, union ctl_io *pending_io,
10587 union ctl_io *starting_io)
10588{
10589 union ctl_io *ooa_io;
10590 ctl_action action;
10591
10592 mtx_assert(&lun->lun_lock, MA_OWNED);
10593
10594 /*
10595 * Run back along the OOA queue, starting with the current
10596 * blocked I/O and going through every I/O before it on the
10597 * queue. If starting_io is NULL, we'll just end up returning
10598 * CTL_ACTION_PASS.
10599 */
10600 for (ooa_io = starting_io; ooa_io != NULL;
10601 ooa_io = (union ctl_io *)TAILQ_PREV(&ooa_io->io_hdr, ctl_ooaq,
10602 ooa_links)){
10603
10604 /*
10605 * This routine just checks to see whether
10606 * cur_blocked is blocked by ooa_io, which is ahead
10607 * of it in the queue. It doesn't queue/dequeue
10608 * cur_blocked.
10609 */
10610 action = ctl_check_for_blockage(lun, pending_io, ooa_io);
10611 switch (action) {
10612 case CTL_ACTION_BLOCK:
10613 case CTL_ACTION_OVERLAP:
10614 case CTL_ACTION_OVERLAP_TAG:
10615 case CTL_ACTION_SKIP:
10616 case CTL_ACTION_ERROR:
10617 return (action);
10618 break; /* NOTREACHED */
10619 case CTL_ACTION_PASS:
10620 break;
10621 default:
10622 panic("invalid action %d", action);
10623 break; /* NOTREACHED */
10624 }
10625 }
10626
10627 return (CTL_ACTION_PASS);
10628}
10629
10630/*
10631 * Assumptions:
10632 * - An I/O has just completed, and has been removed from the per-LUN OOA
10633 * queue, so some items on the blocked queue may now be unblocked.
10634 */
10635static int
10636ctl_check_blocked(struct ctl_lun *lun)
10637{
10638 struct ctl_softc *softc = lun->ctl_softc;
10639 union ctl_io *cur_blocked, *next_blocked;
10640
10641 mtx_assert(&lun->lun_lock, MA_OWNED);
10642
10643 /*
10644 * Run forward from the head of the blocked queue, checking each
10645 * entry against the I/Os prior to it on the OOA queue to see if
10646 * there is still any blockage.
10647 *
10648 * We cannot use the TAILQ_FOREACH() macro, because it can't deal
10649 * with our removing a variable on it while it is traversing the
10650 * list.
10651 */
10652 for (cur_blocked = (union ctl_io *)TAILQ_FIRST(&lun->blocked_queue);
10653 cur_blocked != NULL; cur_blocked = next_blocked) {
10654 union ctl_io *prev_ooa;
10655 ctl_action action;
10656
10657 next_blocked = (union ctl_io *)TAILQ_NEXT(&cur_blocked->io_hdr,
10658 blocked_links);
10659
10660 prev_ooa = (union ctl_io *)TAILQ_PREV(&cur_blocked->io_hdr,
10661 ctl_ooaq, ooa_links);
10662
10663 /*
10664 * If cur_blocked happens to be the first item in the OOA
10665 * queue now, prev_ooa will be NULL, and the action
10666 * returned will just be CTL_ACTION_PASS.
10667 */
10668 action = ctl_check_ooa(lun, cur_blocked, prev_ooa);
10669
10670 switch (action) {
10671 case CTL_ACTION_BLOCK:
10672 /* Nothing to do here, still blocked */
10673 break;
10674 case CTL_ACTION_OVERLAP:
10675 case CTL_ACTION_OVERLAP_TAG:
10676 /*
10677 * This shouldn't happen! In theory we've already
10678 * checked this command for overlap...
10679 */
10680 break;
10681 case CTL_ACTION_PASS:
10682 case CTL_ACTION_SKIP: {
10683 const struct ctl_cmd_entry *entry;
10642 int isc_retval;
10684
10685 /*
10686 * The skip case shouldn't happen, this transaction
10687 * should have never made it onto the blocked queue.
10688 */
10689 /*
10690 * This I/O is no longer blocked, we can remove it
10691 * from the blocked queue. Since this is a TAILQ
10692 * (doubly linked list), we can do O(1) removals
10693 * from any place on the list.
10694 */
10695 TAILQ_REMOVE(&lun->blocked_queue, &cur_blocked->io_hdr,
10696 blocked_links);
10697 cur_blocked->io_hdr.flags &= ~CTL_FLAG_BLOCKED;
10698
10658 if (cur_blocked->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC){
10699 if ((softc->ha_mode != CTL_HA_MODE_XFER) &&
10700 (cur_blocked->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC)){
10701 /*
10702 * Need to send IO back to original side to
10703 * run
10704 */
10705 union ctl_ha_msg msg_info;
10706
10707 cur_blocked->io_hdr.flags &= ~CTL_FLAG_IO_ACTIVE;
10708 msg_info.hdr.original_sc =
10709 cur_blocked->io_hdr.original_sc;
10710 msg_info.hdr.serializing_sc = cur_blocked;
10711 msg_info.hdr.msg_type = CTL_MSG_R2R;
10669 if ((isc_retval=ctl_ha_msg_send(CTL_HA_CHAN_CTL,
10670 &msg_info, sizeof(msg_info), 0)) >
10671 CTL_HA_STATUS_SUCCESS) {
10672 printf("CTL:Check Blocked error from "
10673 "ctl_ha_msg_send %d\n",
10674 isc_retval);
10675 }
10712 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info,
10713 sizeof(msg_info.hdr), M_NOWAIT);
10714 break;
10715 }
10716 entry = ctl_get_cmd_entry(&cur_blocked->scsiio, NULL);
10717
10718 /*
10719 * Check this I/O for LUN state changes that may
10720 * have happened while this command was blocked.
10721 * The LUN state may have been changed by a command
10722 * ahead of us in the queue, so we need to re-check
10723 * for any states that can be caused by SCSI
10724 * commands.
10725 */
10726 if (ctl_scsiio_lun_check(lun, entry,
10727 &cur_blocked->scsiio) == 0) {
10728 cur_blocked->io_hdr.flags |=
10729 CTL_FLAG_IS_WAS_ON_RTR;
10730 ctl_enqueue_rtr(cur_blocked);
10731 } else
10732 ctl_done(cur_blocked);
10733 break;
10734 }
10735 default:
10736 /*
10737 * This probably shouldn't happen -- we shouldn't
10738 * get CTL_ACTION_ERROR, or anything else.
10739 */
10740 break;
10741 }
10742 }
10743
10744 return (CTL_RETVAL_COMPLETE);
10745}
10746
10747/*
10748 * This routine (with one exception) checks LUN flags that can be set by
10749 * commands ahead of us in the OOA queue. These flags have to be checked
10750 * when a command initially comes in, and when we pull a command off the
10751 * blocked queue and are preparing to execute it. The reason we have to
10752 * check these flags for commands on the blocked queue is that the LUN
10753 * state may have been changed by a command ahead of us while we're on the
10754 * blocked queue.
10755 *
10756 * Ordering is somewhat important with these checks, so please pay
10757 * careful attention to the placement of any new checks.
10758 */
10759static int
10760ctl_scsiio_lun_check(struct ctl_lun *lun,
10761 const struct ctl_cmd_entry *entry, struct ctl_scsiio *ctsio)
10762{
10763 struct ctl_softc *softc = lun->ctl_softc;
10764 int retval;
10765 uint32_t residx;
10766
10767 retval = 0;
10768
10769 mtx_assert(&lun->lun_lock, MA_OWNED);
10770
10771 /*
10734 * If this shelf is a secondary shelf controller, we have to reject
10735 * any media access commands.
10772 * If this shelf is a secondary shelf controller, we may have to
10773 * reject some commands disallowed by HA mode and link state.
10774 */
10737 if ((softc->flags & CTL_FLAG_ACTIVE_SHELF) == 0 &&
10738 (entry->flags & CTL_CMD_FLAG_OK_ON_SECONDARY) == 0) {
10739 ctl_set_lun_standby(ctsio);
10740 retval = 1;
10741 goto bailout;
10775 if ((lun->flags & CTL_LUN_PRIMARY_SC) == 0) {
10776 if (softc->ha_link == CTL_HA_LINK_OFFLINE &&
10777 (entry->flags & CTL_CMD_FLAG_OK_ON_UNAVAIL) == 0) {
10778 ctl_set_lun_unavail(ctsio);
10779 retval = 1;
10780 goto bailout;
10781 }
10782 if ((lun->flags & CTL_LUN_PEER_SC_PRIMARY) == 0 &&
10783 (entry->flags & CTL_CMD_FLAG_OK_ON_UNAVAIL) == 0) {
10784 ctl_set_lun_transit(ctsio);
10785 retval = 1;
10786 goto bailout;
10787 }
10788 if (softc->ha_mode == CTL_HA_MODE_ACT_STBY &&
10789 (entry->flags & CTL_CMD_FLAG_OK_ON_STANDBY) == 0) {
10790 ctl_set_lun_standby(ctsio);
10791 retval = 1;
10792 goto bailout;
10793 }
10794
10795 /* The rest of checks are only done on executing side */
10796 if (softc->ha_mode == CTL_HA_MODE_XFER)
10797 goto bailout;
10798 }
10799
10800 if (entry->pattern & CTL_LUN_PAT_WRITE) {
10801 if (lun->be_lun &&
10802 lun->be_lun->flags & CTL_LUN_FLAG_READONLY) {
10803 ctl_set_sense(ctsio, /*current_error*/ 1,
10804 /*sense_key*/ SSD_KEY_DATA_PROTECT,
10805 /*asc*/ 0x27, /*ascq*/ 0x01, SSD_ELEM_NONE);
10806 retval = 1;
10807 goto bailout;
10808 }
10809 if ((lun->mode_pages.control_page[CTL_PAGE_CURRENT]
10810 .eca_and_aen & SCP_SWP) != 0) {
10811 ctl_set_sense(ctsio, /*current_error*/ 1,
10812 /*sense_key*/ SSD_KEY_DATA_PROTECT,
10813 /*asc*/ 0x27, /*ascq*/ 0x02, SSD_ELEM_NONE);
10814 retval = 1;
10815 goto bailout;
10816 }
10817 }
10818
10819 /*
10820 * Check for a reservation conflict. If this command isn't allowed
10821 * even on reserved LUNs, and if this initiator isn't the one who
10822 * reserved us, reject the command with a reservation conflict.
10823 */
10768 residx = ctl_get_resindex(&ctsio->io_hdr.nexus);
10824 residx = ctl_get_initindex(&ctsio->io_hdr.nexus);
10825 if ((lun->flags & CTL_LUN_RESERVED)
10826 && ((entry->flags & CTL_CMD_FLAG_ALLOW_ON_RESV) == 0)) {
10827 if (lun->res_idx != residx) {
10828 ctl_set_reservation_conflict(ctsio);
10829 retval = 1;
10830 goto bailout;
10831 }
10832 }
10833
10834 if ((lun->flags & CTL_LUN_PR_RESERVED) == 0 ||
10835 (entry->flags & CTL_CMD_FLAG_ALLOW_ON_PR_RESV)) {
10836 /* No reservation or command is allowed. */;
10837 } else if ((entry->flags & CTL_CMD_FLAG_ALLOW_ON_PR_WRESV) &&
10838 (lun->res_type == SPR_TYPE_WR_EX ||
10839 lun->res_type == SPR_TYPE_WR_EX_RO ||
10840 lun->res_type == SPR_TYPE_WR_EX_AR)) {
10841 /* The command is allowed for Write Exclusive resv. */;
10842 } else {
10843 /*
10844 * if we aren't registered or it's a res holder type
10845 * reservation and this isn't the res holder then set a
10846 * conflict.
10847 */
10848 if (ctl_get_prkey(lun, residx) == 0
10849 || (residx != lun->pr_res_idx && lun->res_type < 4)) {
10850 ctl_set_reservation_conflict(ctsio);
10851 retval = 1;
10852 goto bailout;
10853 }
10798
10854 }
10855
10856 if ((lun->flags & CTL_LUN_OFFLINE)
10802 && ((entry->flags & CTL_CMD_FLAG_OK_ON_OFFLINE) == 0)) {
10857 && ((entry->flags & CTL_CMD_FLAG_OK_ON_STANDBY) == 0)) {
10858 ctl_set_lun_not_ready(ctsio);
10859 retval = 1;
10860 goto bailout;
10861 }
10862
10808 /*
10809 * If the LUN is stopped, see if this particular command is allowed
10810 * for a stopped lun. Otherwise, reject it with 0x04,0x02.
10811 */
10863 if ((lun->flags & CTL_LUN_STOPPED)
10864 && ((entry->flags & CTL_CMD_FLAG_OK_ON_STOPPED) == 0)) {
10865 /* "Logical unit not ready, initializing cmd. required" */
10866 ctl_set_lun_stopped(ctsio);
10867 retval = 1;
10868 goto bailout;
10869 }
10870
10871 if ((lun->flags & CTL_LUN_INOPERABLE)
10872 && ((entry->flags & CTL_CMD_FLAG_OK_ON_INOPERABLE) == 0)) {
10873 /* "Medium format corrupted" */
10874 ctl_set_medium_format_corrupted(ctsio);
10875 retval = 1;
10876 goto bailout;
10877 }
10878
10879bailout:
10880 return (retval);
10830
10881}
10882
10883static void
10884ctl_failover_io(union ctl_io *io, int have_lock)
10885{
10886 ctl_set_busy(&io->scsiio);
10887 ctl_done(io);
10888}
10889
10840#ifdef notyet
10890static void
10842ctl_failover(void)
10891ctl_failover_lun(struct ctl_lun *lun)
10892{
10844 struct ctl_lun *lun;
10845 struct ctl_softc *softc;
10846 union ctl_io *next_io, *pending_io;
10847 union ctl_io *io;
10848 int lun_idx;
10893 struct ctl_softc *softc = lun->ctl_softc;
10894 struct ctl_io_hdr *io, *next_io;
10895
10850 softc = control_softc;
10851
10852 mtx_lock(&softc->ctl_lock);
10853 /*
10854 * Remove any cmds from the other SC from the rtr queue. These
10855 * will obviously only be for LUNs for which we're the primary.
10856 * We can't send status or get/send data for these commands.
10857 * Since they haven't been executed yet, we can just remove them.
10858 * We'll either abort them or delete them below, depending on
10859 * which HA mode we're in.
10860 */
10861#ifdef notyet
10862 mtx_lock(&softc->queue_lock);
10863 for (io = (union ctl_io *)STAILQ_FIRST(&softc->rtr_queue);
10864 io != NULL; io = next_io) {
10865 next_io = (union ctl_io *)STAILQ_NEXT(&io->io_hdr, links);
10866 if (io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC)
10867 STAILQ_REMOVE(&softc->rtr_queue, &io->io_hdr,
10868 ctl_io_hdr, links);
10869 }
10870 mtx_unlock(&softc->queue_lock);
10871#endif
10872
10873 for (lun_idx=0; lun_idx < softc->num_luns; lun_idx++) {
10874 lun = softc->ctl_luns[lun_idx];
10875 if (lun==NULL)
10876 continue;
10877
10878 /*
10879 * Processor LUNs are primary on both sides.
10880 * XXX will this always be true?
10881 */
10882 if (lun->be_lun->lun_type == T_PROCESSOR)
10883 continue;
10884
10885 if ((lun->flags & CTL_LUN_PRIMARY_SC)
10886 && (softc->ha_mode == CTL_HA_MODE_SER_ONLY)) {
10887 printf("FAILOVER: primary lun %d\n", lun_idx);
10888 /*
10889 * Remove all commands from the other SC. First from the
10890 * blocked queue then from the ooa queue. Once we have
10891 * removed them. Call ctl_check_blocked to see if there
10892 * is anything that can run.
10893 */
10894 for (io = (union ctl_io *)TAILQ_FIRST(
10895 &lun->blocked_queue); io != NULL; io = next_io) {
10896
10897 next_io = (union ctl_io *)TAILQ_NEXT(
10898 &io->io_hdr, blocked_links);
10899
10900 if (io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC) {
10901 TAILQ_REMOVE(&lun->blocked_queue,
10902 &io->io_hdr,blocked_links);
10903 io->io_hdr.flags &= ~CTL_FLAG_BLOCKED;
10904 TAILQ_REMOVE(&lun->ooa_queue,
10905 &io->io_hdr, ooa_links);
10906
10907 ctl_free_io(io);
10896 CTL_DEBUG_PRINT(("FAILOVER for lun %ju\n", lun->lun));
10897 if (softc->ha_mode == CTL_HA_MODE_XFER) {
10898 TAILQ_FOREACH_SAFE(io, &lun->ooa_queue, ooa_links, next_io) {
10899 /* We are master */
10900 if (io->flags & CTL_FLAG_FROM_OTHER_SC) {
10901 if (io->flags & CTL_FLAG_IO_ACTIVE) {
10902 io->flags |= CTL_FLAG_ABORT;
10903 } else { /* This can be only due to DATAMOVE */
10904 io->msg_type = CTL_MSG_DATAMOVE_DONE;
10905 io->flags |= CTL_FLAG_IO_ACTIVE;
10906 io->port_status = 31340;
10907 ctl_enqueue_isc((union ctl_io *)io);
10908 }
10909 }
10910
10911 for (io = (union ctl_io *)TAILQ_FIRST(&lun->ooa_queue);
10912 io != NULL; io = next_io) {
10913
10914 next_io = (union ctl_io *)TAILQ_NEXT(
10915 &io->io_hdr, ooa_links);
10916
10917 if (io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC) {
10918
10919 TAILQ_REMOVE(&lun->ooa_queue,
10920 &io->io_hdr,
10921 ooa_links);
10922
10923 ctl_free_io(io);
10924 }
10925 }
10926 ctl_check_blocked(lun);
10927 } else if ((lun->flags & CTL_LUN_PRIMARY_SC)
10928 && (softc->ha_mode == CTL_HA_MODE_XFER)) {
10929
10930 printf("FAILOVER: primary lun %d\n", lun_idx);
10931 /*
10932 * Abort all commands from the other SC. We can't
10933 * send status back for them now. These should get
10934 * cleaned up when they are completed or come out
10935 * for a datamove operation.
10936 */
10937 for (io = (union ctl_io *)TAILQ_FIRST(&lun->ooa_queue);
10938 io != NULL; io = next_io) {
10939 next_io = (union ctl_io *)TAILQ_NEXT(
10940 &io->io_hdr, ooa_links);
10941
10942 if (io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC)
10943 io->io_hdr.flags |= CTL_FLAG_ABORT;
10944 }
10945 } else if (((lun->flags & CTL_LUN_PRIMARY_SC) == 0)
10946 && (softc->ha_mode == CTL_HA_MODE_XFER)) {
10947
10948 printf("FAILOVER: secondary lun %d\n", lun_idx);
10949
10950 lun->flags |= CTL_LUN_PRIMARY_SC;
10951
10952 /*
10953 * We send all I/O that was sent to this controller
10954 * and redirected to the other side back with
10955 * busy status, and have the initiator retry it.
10956 * Figuring out how much data has been transferred,
10957 * etc. and picking up where we left off would be
10958 * very tricky.
10959 *
10960 * XXX KDM need to remove I/O from the blocked
10961 * queue as well!
10962 */
10963 for (pending_io = (union ctl_io *)TAILQ_FIRST(
10964 &lun->ooa_queue); pending_io != NULL;
10965 pending_io = next_io) {
10966
10967 next_io = (union ctl_io *)TAILQ_NEXT(
10968 &pending_io->io_hdr, ooa_links);
10969
10970 pending_io->io_hdr.flags &=
10971 ~CTL_FLAG_SENT_2OTHER_SC;
10972
10973 if (pending_io->io_hdr.flags &
10974 CTL_FLAG_IO_ACTIVE) {
10975 pending_io->io_hdr.flags |=
10976 CTL_FLAG_FAILOVER;
10910 /* We are slave */
10911 if (io->flags & CTL_FLAG_SENT_2OTHER_SC) {
10912 io->flags &= ~CTL_FLAG_SENT_2OTHER_SC;
10913 if (io->flags & CTL_FLAG_IO_ACTIVE) {
10914 io->flags |= CTL_FLAG_FAILOVER;
10915 } else {
10978 ctl_set_busy(&pending_io->scsiio);
10979 ctl_done(pending_io);
10916 ctl_set_busy(&((union ctl_io *)io)->
10917 scsiio);
10918 ctl_done((union ctl_io *)io);
10919 }
10920 }
10982
10983 ctl_est_ua_all(lun, -1, CTL_UA_ASYM_ACC_CHANGE);
10984 } else if (((lun->flags & CTL_LUN_PRIMARY_SC) == 0)
10985 && (softc->ha_mode == CTL_HA_MODE_SER_ONLY)) {
10986 printf("FAILOVER: secondary lun %d\n", lun_idx);
10987 /*
10988 * if the first io on the OOA is not on the RtR queue
10989 * add it.
10990 */
10991 lun->flags |= CTL_LUN_PRIMARY_SC;
10992
10993 pending_io = (union ctl_io *)TAILQ_FIRST(
10994 &lun->ooa_queue);
10995 if (pending_io==NULL) {
10996 printf("Nothing on OOA queue\n");
10997 continue;
10921 }
10922 } else { /* SERIALIZE modes */
10923 TAILQ_FOREACH_SAFE(io, &lun->blocked_queue, blocked_links,
10924 next_io) {
10925 /* We are master */
10926 if (io->flags & CTL_FLAG_FROM_OTHER_SC) {
10927 TAILQ_REMOVE(&lun->blocked_queue, io,
10928 blocked_links);
10929 io->flags &= ~CTL_FLAG_BLOCKED;
10930 TAILQ_REMOVE(&lun->ooa_queue, io, ooa_links);
10931 ctl_free_io((union ctl_io *)io);
10932 }
10999
11000 pending_io->io_hdr.flags &= ~CTL_FLAG_SENT_2OTHER_SC;
11001 if ((pending_io->io_hdr.flags &
11002 CTL_FLAG_IS_WAS_ON_RTR) == 0) {
11003 pending_io->io_hdr.flags |=
11004 CTL_FLAG_IS_WAS_ON_RTR;
11005 ctl_enqueue_rtr(pending_io);
10933 }
10934 TAILQ_FOREACH_SAFE(io, &lun->ooa_queue, ooa_links, next_io) {
10935 /* We are master */
10936 if (io->flags & CTL_FLAG_FROM_OTHER_SC) {
10937 TAILQ_REMOVE(&lun->ooa_queue, io, ooa_links);
10938 ctl_free_io((union ctl_io *)io);
10939 }
11007#if 0
11008 else
11009 {
11010 printf("Tag 0x%04x is running\n",
11011 pending_io->scsiio.tag_num);
11012 }
11013#endif
11014
11015 next_io = (union ctl_io *)TAILQ_NEXT(
11016 &pending_io->io_hdr, ooa_links);
11017 for (pending_io=next_io; pending_io != NULL;
11018 pending_io = next_io) {
11019 pending_io->io_hdr.flags &=
11020 ~CTL_FLAG_SENT_2OTHER_SC;
11021 next_io = (union ctl_io *)TAILQ_NEXT(
11022 &pending_io->io_hdr, ooa_links);
11023 if (pending_io->io_hdr.flags &
11024 CTL_FLAG_IS_WAS_ON_RTR) {
11025#if 0
11026 printf("Tag 0x%04x is running\n",
11027 pending_io->scsiio.tag_num);
11028#endif
11029 continue;
10940 /* We are slave */
10941 if (io->flags & CTL_FLAG_SENT_2OTHER_SC) {
10942 io->flags &= ~CTL_FLAG_SENT_2OTHER_SC;
10943 if (!(io->flags & CTL_FLAG_IO_ACTIVE)) {
10944 ctl_set_busy(&((union ctl_io *)io)->
10945 scsiio);
10946 ctl_done((union ctl_io *)io);
10947 }
11031
11032 switch (ctl_check_ooa(lun, pending_io,
11033 (union ctl_io *)TAILQ_PREV(
11034 &pending_io->io_hdr, ctl_ooaq,
11035 ooa_links))) {
11036
11037 case CTL_ACTION_BLOCK:
11038 TAILQ_INSERT_TAIL(&lun->blocked_queue,
11039 &pending_io->io_hdr,
11040 blocked_links);
11041 pending_io->io_hdr.flags |=
11042 CTL_FLAG_BLOCKED;
11043 break;
11044 case CTL_ACTION_PASS:
11045 case CTL_ACTION_SKIP:
11046 pending_io->io_hdr.flags |=
11047 CTL_FLAG_IS_WAS_ON_RTR;
11048 ctl_enqueue_rtr(pending_io);
11049 break;
11050 case CTL_ACTION_OVERLAP:
11051 ctl_set_overlapped_cmd(
11052 (struct ctl_scsiio *)pending_io);
11053 ctl_done(pending_io);
11054 break;
11055 case CTL_ACTION_OVERLAP_TAG:
11056 ctl_set_overlapped_tag(
11057 (struct ctl_scsiio *)pending_io,
11058 pending_io->scsiio.tag_num & 0xff);
11059 ctl_done(pending_io);
11060 break;
11061 case CTL_ACTION_ERROR:
11062 default:
11063 ctl_set_internal_failure(
11064 (struct ctl_scsiio *)pending_io,
11065 0, // sks_valid
11066 0); //retry count
11067 ctl_done(pending_io);
11068 break;
11069 }
10948 }
11071
11072 ctl_est_ua_all(lun, -1, CTL_UA_ASYM_ACC_CHANGE);
11073 } else {
11074 panic("Unhandled HA mode failover, LUN flags = %#x, "
11075 "ha_mode = #%x", lun->flags, softc->ha_mode);
10949 }
10950 ctl_check_blocked(lun);
10951 }
11078 ctl_pause_rtr = 0;
11079 mtx_unlock(&softc->ctl_lock);
10952}
11081#endif
10953
10954static int
10955ctl_scsiio_precheck(struct ctl_softc *softc, struct ctl_scsiio *ctsio)
10956{
10957 struct ctl_lun *lun;
10958 const struct ctl_cmd_entry *entry;
10959 uint32_t initidx, targ_lun;
10960 int retval;
10961
10962 retval = 0;
10963
10964 lun = NULL;
10965
10966 targ_lun = ctsio->io_hdr.nexus.targ_mapped_lun;
10967 if ((targ_lun < CTL_MAX_LUNS)
10968 && ((lun = softc->ctl_luns[targ_lun]) != NULL)) {
10969 /*
10970 * If the LUN is invalid, pretend that it doesn't exist.
10971 * It will go away as soon as all pending I/O has been
10972 * completed.
10973 */
10974 mtx_lock(&lun->lun_lock);
10975 if (lun->flags & CTL_LUN_DISABLED) {
10976 mtx_unlock(&lun->lun_lock);
10977 lun = NULL;
10978 ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr = NULL;
10979 ctsio->io_hdr.ctl_private[CTL_PRIV_BACKEND_LUN].ptr = NULL;
10980 } else {
10981 ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr = lun;
10982 ctsio->io_hdr.ctl_private[CTL_PRIV_BACKEND_LUN].ptr =
10983 lun->be_lun;
11113 if (lun->be_lun->lun_type == T_PROCESSOR) {
11114 ctsio->io_hdr.flags |= CTL_FLAG_CONTROL_DEV;
11115 }
10984
10985 /*
10986 * Every I/O goes into the OOA queue for a
10987 * particular LUN, and stays there until completion.
10988 */
10989#ifdef CTL_TIME_IO
10990 if (TAILQ_EMPTY(&lun->ooa_queue)) {
10991 lun->idle_time += getsbinuptime() -
10992 lun->last_busy;
10993 }
10994#endif
10995 TAILQ_INSERT_TAIL(&lun->ooa_queue, &ctsio->io_hdr,
10996 ooa_links);
10997 }
10998 } else {
10999 ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr = NULL;
11000 ctsio->io_hdr.ctl_private[CTL_PRIV_BACKEND_LUN].ptr = NULL;
11001 }
11002
11003 /* Get command entry and return error if it is unsuppotyed. */
11004 entry = ctl_validate_command(ctsio);
11005 if (entry == NULL) {
11006 if (lun)
11007 mtx_unlock(&lun->lun_lock);
11008 return (retval);
11009 }
11010
11011 ctsio->io_hdr.flags &= ~CTL_FLAG_DATA_MASK;
11012 ctsio->io_hdr.flags |= entry->flags & CTL_FLAG_DATA_MASK;
11013
11014 /*
11015 * Check to see whether we can send this command to LUNs that don't
11016 * exist. This should pretty much only be the case for inquiry
11017 * and request sense. Further checks, below, really require having
11018 * a LUN, so we can't really check the command anymore. Just put
11019 * it on the rtr queue.
11020 */
11021 if (lun == NULL) {
11022 if (entry->flags & CTL_CMD_FLAG_OK_ON_ALL_LUNS) {
11023 ctsio->io_hdr.flags |= CTL_FLAG_IS_WAS_ON_RTR;
11024 ctl_enqueue_rtr((union ctl_io *)ctsio);
11025 return (retval);
11026 }
11027
11028 ctl_set_unsupported_lun(ctsio);
11029 ctl_done((union ctl_io *)ctsio);
11030 CTL_DEBUG_PRINT(("ctl_scsiio_precheck: bailing out due to invalid LUN\n"));
11031 return (retval);
11032 } else {
11033 /*
11034 * Make sure we support this particular command on this LUN.
11035 * e.g., we don't support writes to the control LUN.
11036 */
11037 if (!ctl_cmd_applicable(lun->be_lun->lun_type, entry)) {
11038 mtx_unlock(&lun->lun_lock);
11039 ctl_set_invalid_opcode(ctsio);
11040 ctl_done((union ctl_io *)ctsio);
11041 return (retval);
11042 }
11043 }
11044
11045 initidx = ctl_get_initindex(&ctsio->io_hdr.nexus);
11046
11047#ifdef CTL_WITH_CA
11048 /*
11049 * If we've got a request sense, it'll clear the contingent
11050 * allegiance condition. Otherwise, if we have a CA condition for
11051 * this initiator, clear it, because it sent down a command other
11052 * than request sense.
11053 */
11054 if ((ctsio->cdb[0] != REQUEST_SENSE)
11055 && (ctl_is_set(lun->have_ca, initidx)))
11056 ctl_clear_mask(lun->have_ca, initidx);
11057#endif
11058
11059 /*
11060 * If the command has this flag set, it handles its own unit
11061 * attention reporting, we shouldn't do anything. Otherwise we
11062 * check for any pending unit attentions, and send them back to the
11063 * initiator. We only do this when a command initially comes in,
11064 * not when we pull it off the blocked queue.
11065 *
11066 * According to SAM-3, section 5.3.2, the order that things get
11067 * presented back to the host is basically unit attentions caused
11068 * by some sort of reset event, busy status, reservation conflicts
11069 * or task set full, and finally any other status.
11070 *
11071 * One issue here is that some of the unit attentions we report
11072 * don't fall into the "reset" category (e.g. "reported luns data
11073 * has changed"). So reporting it here, before the reservation
11074 * check, may be technically wrong. I guess the only thing to do
11075 * would be to check for and report the reset events here, and then
11076 * check for the other unit attention types after we check for a
11077 * reservation conflict.
11078 *
11079 * XXX KDM need to fix this
11080 */
11081 if ((entry->flags & CTL_CMD_FLAG_NO_SENSE) == 0) {
11082 ctl_ua_type ua_type;
11083 scsi_sense_data_type sense_format;
11084
11085 if (lun->flags & CTL_LUN_SENSE_DESC)
11086 sense_format = SSD_TYPE_DESC;
11087 else
11088 sense_format = SSD_TYPE_FIXED;
11089
11090 ua_type = ctl_build_ua(lun, initidx, &ctsio->sense_data,
11091 sense_format);
11092 if (ua_type != CTL_UA_NONE) {
11093 mtx_unlock(&lun->lun_lock);
11094 ctsio->scsi_status = SCSI_STATUS_CHECK_COND;
11095 ctsio->io_hdr.status = CTL_SCSI_ERROR | CTL_AUTOSENSE;
11096 ctsio->sense_len = SSD_FULL_SIZE;
11097 ctl_done((union ctl_io *)ctsio);
11098 return (retval);
11099 }
11100 }
11101
11102
11103 if (ctl_scsiio_lun_check(lun, entry, ctsio) != 0) {
11104 mtx_unlock(&lun->lun_lock);
11105 ctl_done((union ctl_io *)ctsio);
11106 return (retval);
11107 }
11108
11109 /*
11110 * XXX CHD this is where we want to send IO to other side if
11111 * this LUN is secondary on this SC. We will need to make a copy
11112 * of the IO and flag the IO on this side as SENT_2OTHER and the flag
11113 * the copy we send as FROM_OTHER.
11114 * We also need to stuff the address of the original IO so we can
11115 * find it easily. Something similar will need be done on the other
11116 * side so when we are done we can find the copy.
11117 */
11250 if ((lun->flags & CTL_LUN_PRIMARY_SC) == 0) {
11118 if ((lun->flags & CTL_LUN_PRIMARY_SC) == 0 &&
11119 (lun->flags & CTL_LUN_PEER_SC_PRIMARY) != 0) {
11120 union ctl_ha_msg msg_info;
11121 int isc_retval;
11122
11123 ctsio->io_hdr.flags |= CTL_FLAG_SENT_2OTHER_SC;
11124 ctsio->io_hdr.flags &= ~CTL_FLAG_IO_ACTIVE;
11125 mtx_unlock(&lun->lun_lock);
11126
11127 msg_info.hdr.msg_type = CTL_MSG_SERIALIZE;
11128 msg_info.hdr.original_sc = (union ctl_io *)ctsio;
11258#if 0
11259 printf("1. ctsio %p\n", ctsio);
11260#endif
11129 msg_info.hdr.serializing_sc = NULL;
11130 msg_info.hdr.nexus = ctsio->io_hdr.nexus;
11131 msg_info.scsi.tag_num = ctsio->tag_num;
11132 msg_info.scsi.tag_type = ctsio->tag_type;
11133 msg_info.scsi.cdb_len = ctsio->cdb_len;
11134 memcpy(msg_info.scsi.cdb, ctsio->cdb, CTL_MAX_CDBLEN);
11135
11267 ctsio->io_hdr.flags &= ~CTL_FLAG_IO_ACTIVE;
11268
11269 if ((isc_retval=ctl_ha_msg_send(CTL_HA_CHAN_CTL,
11270 (void *)&msg_info, sizeof(msg_info), 0)) >
11271 CTL_HA_STATUS_SUCCESS) {
11272 printf("CTL:precheck, ctl_ha_msg_send returned %d\n",
11273 isc_retval);
11274 printf("CTL:opcode is %x\n", ctsio->cdb[0]);
11275 } else {
11276#if 0
11277 printf("CTL:Precheck sent msg, opcode is %x\n",opcode);
11278#endif
11136 if ((isc_retval = ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info,
11137 sizeof(msg_info.scsi) - sizeof(msg_info.scsi.sense_data),
11138 M_WAITOK)) > CTL_HA_STATUS_SUCCESS) {
11139 ctl_set_busy(ctsio);
11140 ctl_done((union ctl_io *)ctsio);
11141 return (retval);
11142 }
11280
11281 /*
11282 * XXX KDM this I/O is off the incoming queue, but hasn't
11283 * been inserted on any other queue. We may need to come
11284 * up with a holding queue while we wait for serialization
11285 * so that we have an idea of what we're waiting for from
11286 * the other side.
11287 */
11288 mtx_unlock(&lun->lun_lock);
11143 return (retval);
11144 }
11145
11146 switch (ctl_check_ooa(lun, (union ctl_io *)ctsio,
11147 (union ctl_io *)TAILQ_PREV(&ctsio->io_hdr,
11148 ctl_ooaq, ooa_links))) {
11149 case CTL_ACTION_BLOCK:
11150 ctsio->io_hdr.flags |= CTL_FLAG_BLOCKED;
11151 TAILQ_INSERT_TAIL(&lun->blocked_queue, &ctsio->io_hdr,
11152 blocked_links);
11153 mtx_unlock(&lun->lun_lock);
11154 return (retval);
11155 case CTL_ACTION_PASS:
11156 case CTL_ACTION_SKIP:
11157 ctsio->io_hdr.flags |= CTL_FLAG_IS_WAS_ON_RTR;
11158 mtx_unlock(&lun->lun_lock);
11159 ctl_enqueue_rtr((union ctl_io *)ctsio);
11160 break;
11161 case CTL_ACTION_OVERLAP:
11162 mtx_unlock(&lun->lun_lock);
11163 ctl_set_overlapped_cmd(ctsio);
11164 ctl_done((union ctl_io *)ctsio);
11165 break;
11166 case CTL_ACTION_OVERLAP_TAG:
11167 mtx_unlock(&lun->lun_lock);
11168 ctl_set_overlapped_tag(ctsio, ctsio->tag_num & 0xff);
11169 ctl_done((union ctl_io *)ctsio);
11170 break;
11171 case CTL_ACTION_ERROR:
11172 default:
11173 mtx_unlock(&lun->lun_lock);
11174 ctl_set_internal_failure(ctsio,
11175 /*sks_valid*/ 0,
11176 /*retry_count*/ 0);
11177 ctl_done((union ctl_io *)ctsio);
11178 break;
11179 }
11180 return (retval);
11181}
11182
11183const struct ctl_cmd_entry *
11184ctl_get_cmd_entry(struct ctl_scsiio *ctsio, int *sa)
11185{
11186 const struct ctl_cmd_entry *entry;
11187 int service_action;
11188
11189 entry = &ctl_cmd_table[ctsio->cdb[0]];
11190 if (sa)
11191 *sa = ((entry->flags & CTL_CMD_FLAG_SA5) != 0);
11192 if (entry->flags & CTL_CMD_FLAG_SA5) {
11193 service_action = ctsio->cdb[1] & SERVICE_ACTION_MASK;
11194 entry = &((const struct ctl_cmd_entry *)
11195 entry->execute)[service_action];
11196 }
11197 return (entry);
11198}
11199
11200const struct ctl_cmd_entry *
11201ctl_validate_command(struct ctl_scsiio *ctsio)
11202{
11203 const struct ctl_cmd_entry *entry;
11204 int i, sa;
11205 uint8_t diff;
11206
11207 entry = ctl_get_cmd_entry(ctsio, &sa);
11208 if (entry->execute == NULL) {
11209 if (sa)
11210 ctl_set_invalid_field(ctsio,
11211 /*sks_valid*/ 1,
11212 /*command*/ 1,
11213 /*field*/ 1,
11214 /*bit_valid*/ 1,
11215 /*bit*/ 4);
11216 else
11217 ctl_set_invalid_opcode(ctsio);
11218 ctl_done((union ctl_io *)ctsio);
11219 return (NULL);
11220 }
11221 KASSERT(entry->length > 0,
11222 ("Not defined length for command 0x%02x/0x%02x",
11223 ctsio->cdb[0], ctsio->cdb[1]));
11224 for (i = 1; i < entry->length; i++) {
11225 diff = ctsio->cdb[i] & ~entry->usage[i - 1];
11226 if (diff == 0)
11227 continue;
11228 ctl_set_invalid_field(ctsio,
11229 /*sks_valid*/ 1,
11230 /*command*/ 1,
11231 /*field*/ i,
11232 /*bit_valid*/ 1,
11233 /*bit*/ fls(diff) - 1);
11234 ctl_done((union ctl_io *)ctsio);
11235 return (NULL);
11236 }
11237 return (entry);
11238}
11239
11240static int
11241ctl_cmd_applicable(uint8_t lun_type, const struct ctl_cmd_entry *entry)
11242{
11243
11244 switch (lun_type) {
11245 case T_PROCESSOR:
11246 if (((entry->flags & CTL_CMD_FLAG_OK_ON_PROC) == 0) &&
11247 ((entry->flags & CTL_CMD_FLAG_OK_ON_ALL_LUNS) == 0))
11248 return (0);
11249 break;
11250 case T_DIRECT:
11251 if (((entry->flags & CTL_CMD_FLAG_OK_ON_SLUN) == 0) &&
11252 ((entry->flags & CTL_CMD_FLAG_OK_ON_ALL_LUNS) == 0))
11253 return (0);
11254 break;
11255 default:
11256 return (0);
11257 }
11258 return (1);
11259}
11260
11261static int
11262ctl_scsiio(struct ctl_scsiio *ctsio)
11263{
11264 int retval;
11265 const struct ctl_cmd_entry *entry;
11266
11267 retval = CTL_RETVAL_COMPLETE;
11268
11269 CTL_DEBUG_PRINT(("ctl_scsiio cdb[0]=%02X\n", ctsio->cdb[0]));
11270
11271 entry = ctl_get_cmd_entry(ctsio, NULL);
11272
11273 /*
11274 * If this I/O has been aborted, just send it straight to
11275 * ctl_done() without executing it.
11276 */
11277 if (ctsio->io_hdr.flags & CTL_FLAG_ABORT) {
11278 ctl_done((union ctl_io *)ctsio);
11279 goto bailout;
11280 }
11281
11282 /*
11283 * All the checks should have been handled by ctl_scsiio_precheck().
11284 * We should be clear now to just execute the I/O.
11285 */
11286 retval = entry->execute(ctsio);
11287
11288bailout:
11289 return (retval);
11290}
11291
11292/*
11293 * Since we only implement one target right now, a bus reset simply resets
11294 * our single target.
11295 */
11296static int
11297ctl_bus_reset(struct ctl_softc *softc, union ctl_io *io)
11298{
11299 return(ctl_target_reset(softc, io, CTL_UA_BUS_RESET));
11300}
11301
11302static int
11303ctl_target_reset(struct ctl_softc *softc, union ctl_io *io,
11304 ctl_ua_type ua_type)
11305{
11306 struct ctl_lun *lun;
11307 int retval;
11308
11309 if (!(io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC)) {
11310 union ctl_ha_msg msg_info;
11311
11458 io->io_hdr.flags |= CTL_FLAG_SENT_2OTHER_SC;
11312 msg_info.hdr.nexus = io->io_hdr.nexus;
11313 if (ua_type==CTL_UA_TARG_RESET)
11314 msg_info.task.task_action = CTL_TASK_TARGET_RESET;
11315 else
11316 msg_info.task.task_action = CTL_TASK_BUS_RESET;
11317 msg_info.hdr.msg_type = CTL_MSG_MANAGE_TASKS;
11318 msg_info.hdr.original_sc = NULL;
11319 msg_info.hdr.serializing_sc = NULL;
11467 if (CTL_HA_STATUS_SUCCESS != ctl_ha_msg_send(CTL_HA_CHAN_CTL,
11468 (void *)&msg_info, sizeof(msg_info), 0)) {
11469 }
11320 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info,
11321 sizeof(msg_info.task), M_WAITOK);
11322 }
11323 retval = 0;
11324
11325 mtx_lock(&softc->ctl_lock);
11326 STAILQ_FOREACH(lun, &softc->lun_list, links)
11327 retval += ctl_lun_reset(lun, io, ua_type);
11328 mtx_unlock(&softc->ctl_lock);
11329
11330 return (retval);
11331}
11332
11333/*
11334 * The LUN should always be set. The I/O is optional, and is used to
11335 * distinguish between I/Os sent by this initiator, and by other
11336 * initiators. We set unit attention for initiators other than this one.
11337 * SAM-3 is vague on this point. It does say that a unit attention should
11338 * be established for other initiators when a LUN is reset (see section
11339 * 5.7.3), but it doesn't specifically say that the unit attention should
11340 * be established for this particular initiator when a LUN is reset. Here
11341 * is the relevant text, from SAM-3 rev 8:
11342 *
11343 * 5.7.2 When a SCSI initiator port aborts its own tasks
11344 *
11345 * When a SCSI initiator port causes its own task(s) to be aborted, no
11346 * notification that the task(s) have been aborted shall be returned to
11347 * the SCSI initiator port other than the completion response for the
11348 * command or task management function action that caused the task(s) to
11349 * be aborted and notification(s) associated with related effects of the
11350 * action (e.g., a reset unit attention condition).
11351 *
11352 * XXX KDM for now, we're setting unit attention for all initiators.
11353 */
11354static int
11355ctl_lun_reset(struct ctl_lun *lun, union ctl_io *io, ctl_ua_type ua_type)
11356{
11357 union ctl_io *xio;
11358#if 0
11359 uint32_t initidx;
11360#endif
11361#ifdef CTL_WITH_CA
11362 int i;
11363#endif
11364
11365 mtx_lock(&lun->lun_lock);
11366 /*
11367 * Run through the OOA queue and abort each I/O.
11368 */
11369 for (xio = (union ctl_io *)TAILQ_FIRST(&lun->ooa_queue); xio != NULL;
11370 xio = (union ctl_io *)TAILQ_NEXT(&xio->io_hdr, ooa_links)) {
11371 xio->io_hdr.flags |= CTL_FLAG_ABORT | CTL_FLAG_ABORT_STATUS;
11372 }
11373
11374 /*
11375 * This version sets unit attention for every
11376 */
11377#if 0
11378 initidx = ctl_get_initindex(&io->io_hdr.nexus);
11379 ctl_est_ua_all(lun, initidx, ua_type);
11380#else
11381 ctl_est_ua_all(lun, -1, ua_type);
11382#endif
11383
11384 /*
11385 * A reset (any kind, really) clears reservations established with
11386 * RESERVE/RELEASE. It does not clear reservations established
11387 * with PERSISTENT RESERVE OUT, but we don't support that at the
11388 * moment anyway. See SPC-2, section 5.6. SPC-3 doesn't address
11389 * reservations made with the RESERVE/RELEASE commands, because
11390 * those commands are obsolete in SPC-3.
11391 */
11392 lun->flags &= ~CTL_LUN_RESERVED;
11393
11394#ifdef CTL_WITH_CA
11395 for (i = 0; i < CTL_MAX_INITIATORS; i++)
11396 ctl_clear_mask(lun->have_ca, i);
11397#endif
11398 mtx_unlock(&lun->lun_lock);
11399
11400 return (0);
11401}
11402
11403static void
11404ctl_abort_tasks_lun(struct ctl_lun *lun, uint32_t targ_port, uint32_t init_id,
11405 int other_sc)
11406{
11407 union ctl_io *xio;
11408
11409 mtx_assert(&lun->lun_lock, MA_OWNED);
11410
11411 /*
11412 * Run through the OOA queue and attempt to find the given I/O.
11413 * The target port, initiator ID, tag type and tag number have to
11414 * match the values that we got from the initiator. If we have an
11415 * untagged command to abort, simply abort the first untagged command
11416 * we come to. We only allow one untagged command at a time of course.
11417 */
11418 for (xio = (union ctl_io *)TAILQ_FIRST(&lun->ooa_queue); xio != NULL;
11419 xio = (union ctl_io *)TAILQ_NEXT(&xio->io_hdr, ooa_links)) {
11420
11421 if ((targ_port == UINT32_MAX ||
11422 targ_port == xio->io_hdr.nexus.targ_port) &&
11423 (init_id == UINT32_MAX ||
11424 init_id == xio->io_hdr.nexus.initid)) {
11425 if (targ_port != xio->io_hdr.nexus.targ_port ||
11426 init_id != xio->io_hdr.nexus.initid)
11427 xio->io_hdr.flags |= CTL_FLAG_ABORT_STATUS;
11428 xio->io_hdr.flags |= CTL_FLAG_ABORT;
11429 if (!other_sc && !(lun->flags & CTL_LUN_PRIMARY_SC)) {
11430 union ctl_ha_msg msg_info;
11431
11432 msg_info.hdr.nexus = xio->io_hdr.nexus;
11433 msg_info.task.task_action = CTL_TASK_ABORT_TASK;
11434 msg_info.task.tag_num = xio->scsiio.tag_num;
11435 msg_info.task.tag_type = xio->scsiio.tag_type;
11436 msg_info.hdr.msg_type = CTL_MSG_MANAGE_TASKS;
11437 msg_info.hdr.original_sc = NULL;
11438 msg_info.hdr.serializing_sc = NULL;
11587 ctl_ha_msg_send(CTL_HA_CHAN_CTL,
11588 (void *)&msg_info, sizeof(msg_info), 0);
11439 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info,
11440 sizeof(msg_info.task), M_NOWAIT);
11441 }
11442 }
11443 }
11444}
11445
11446static int
11447ctl_abort_task_set(union ctl_io *io)
11448{
11449 struct ctl_softc *softc = control_softc;
11450 struct ctl_lun *lun;
11451 uint32_t targ_lun;
11452
11453 /*
11454 * Look up the LUN.
11455 */
11456 targ_lun = io->io_hdr.nexus.targ_mapped_lun;
11457 mtx_lock(&softc->ctl_lock);
11458 if ((targ_lun < CTL_MAX_LUNS) && (softc->ctl_luns[targ_lun] != NULL))
11459 lun = softc->ctl_luns[targ_lun];
11460 else {
11461 mtx_unlock(&softc->ctl_lock);
11462 return (1);
11463 }
11464
11465 mtx_lock(&lun->lun_lock);
11466 mtx_unlock(&softc->ctl_lock);
11467 if (io->taskio.task_action == CTL_TASK_ABORT_TASK_SET) {
11468 ctl_abort_tasks_lun(lun, io->io_hdr.nexus.targ_port,
11469 io->io_hdr.nexus.initid,
11470 (io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC) != 0);
11471 } else { /* CTL_TASK_CLEAR_TASK_SET */
11472 ctl_abort_tasks_lun(lun, UINT32_MAX, UINT32_MAX,
11473 (io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC) != 0);
11474 }
11475 mtx_unlock(&lun->lun_lock);
11476 return (0);
11477}
11478
11479static int
11480ctl_i_t_nexus_reset(union ctl_io *io)
11481{
11482 struct ctl_softc *softc = control_softc;
11483 struct ctl_lun *lun;
11632 uint32_t initidx, residx;
11484 uint32_t initidx;
11485
11486 initidx = ctl_get_initindex(&io->io_hdr.nexus);
11635 residx = ctl_get_resindex(&io->io_hdr.nexus);
11487 mtx_lock(&softc->ctl_lock);
11488 STAILQ_FOREACH(lun, &softc->lun_list, links) {
11489 mtx_lock(&lun->lun_lock);
11490 ctl_abort_tasks_lun(lun, io->io_hdr.nexus.targ_port,
11491 io->io_hdr.nexus.initid,
11492 (io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC) != 0);
11493#ifdef CTL_WITH_CA
11494 ctl_clear_mask(lun->have_ca, initidx);
11495#endif
11645 if ((lun->flags & CTL_LUN_RESERVED) && (lun->res_idx == residx))
11496 if ((lun->flags & CTL_LUN_RESERVED) && (lun->res_idx == initidx))
11497 lun->flags &= ~CTL_LUN_RESERVED;
11498 ctl_est_ua(lun, initidx, CTL_UA_I_T_NEXUS_LOSS);
11499 mtx_unlock(&lun->lun_lock);
11500 }
11501 mtx_unlock(&softc->ctl_lock);
11502 return (0);
11503}
11504
11505static int
11506ctl_abort_task(union ctl_io *io)
11507{
11508 union ctl_io *xio;
11509 struct ctl_lun *lun;
11510 struct ctl_softc *softc;
11511#if 0
11512 struct sbuf sb;
11513 char printbuf[128];
11514#endif
11515 int found;
11516 uint32_t targ_lun;
11517
11518 softc = control_softc;
11519 found = 0;
11520
11521 /*
11522 * Look up the LUN.
11523 */
11524 targ_lun = io->io_hdr.nexus.targ_mapped_lun;
11525 mtx_lock(&softc->ctl_lock);
11526 if ((targ_lun < CTL_MAX_LUNS)
11527 && (softc->ctl_luns[targ_lun] != NULL))
11528 lun = softc->ctl_luns[targ_lun];
11529 else {
11530 mtx_unlock(&softc->ctl_lock);
11531 return (1);
11532 }
11533
11534#if 0
11535 printf("ctl_abort_task: called for lun %lld, tag %d type %d\n",
11536 lun->lun, io->taskio.tag_num, io->taskio.tag_type);
11537#endif
11538
11539 mtx_lock(&lun->lun_lock);
11540 mtx_unlock(&softc->ctl_lock);
11541 /*
11542 * Run through the OOA queue and attempt to find the given I/O.
11543 * The target port, initiator ID, tag type and tag number have to
11544 * match the values that we got from the initiator. If we have an
11545 * untagged command to abort, simply abort the first untagged command
11546 * we come to. We only allow one untagged command at a time of course.
11547 */
11548 for (xio = (union ctl_io *)TAILQ_FIRST(&lun->ooa_queue); xio != NULL;
11549 xio = (union ctl_io *)TAILQ_NEXT(&xio->io_hdr, ooa_links)) {
11550#if 0
11551 sbuf_new(&sb, printbuf, sizeof(printbuf), SBUF_FIXEDLEN);
11552
11553 sbuf_printf(&sb, "LUN %lld tag %d type %d%s%s%s%s: ",
11554 lun->lun, xio->scsiio.tag_num,
11555 xio->scsiio.tag_type,
11556 (xio->io_hdr.blocked_links.tqe_prev
11557 == NULL) ? "" : " BLOCKED",
11558 (xio->io_hdr.flags &
11559 CTL_FLAG_DMA_INPROG) ? " DMA" : "",
11560 (xio->io_hdr.flags &
11561 CTL_FLAG_ABORT) ? " ABORT" : "",
11562 (xio->io_hdr.flags &
11563 CTL_FLAG_IS_WAS_ON_RTR ? " RTR" : ""));
11564 ctl_scsi_command_string(&xio->scsiio, NULL, &sb);
11565 sbuf_finish(&sb);
11566 printf("%s\n", sbuf_data(&sb));
11567#endif
11568
11569 if ((xio->io_hdr.nexus.targ_port != io->io_hdr.nexus.targ_port)
11570 || (xio->io_hdr.nexus.initid != io->io_hdr.nexus.initid)
11571 || (xio->io_hdr.flags & CTL_FLAG_ABORT))
11572 continue;
11573
11574 /*
11575 * If the abort says that the task is untagged, the
11576 * task in the queue must be untagged. Otherwise,
11577 * we just check to see whether the tag numbers
11578 * match. This is because the QLogic firmware
11579 * doesn't pass back the tag type in an abort
11580 * request.
11581 */
11582#if 0
11583 if (((xio->scsiio.tag_type == CTL_TAG_UNTAGGED)
11584 && (io->taskio.tag_type == CTL_TAG_UNTAGGED))
11585 || (xio->scsiio.tag_num == io->taskio.tag_num))
11586#endif
11587 /*
11588 * XXX KDM we've got problems with FC, because it
11589 * doesn't send down a tag type with aborts. So we
11590 * can only really go by the tag number...
11591 * This may cause problems with parallel SCSI.
11592 * Need to figure that out!!
11593 */
11594 if (xio->scsiio.tag_num == io->taskio.tag_num) {
11595 xio->io_hdr.flags |= CTL_FLAG_ABORT;
11596 found = 1;
11597 if ((io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC) == 0 &&
11598 !(lun->flags & CTL_LUN_PRIMARY_SC)) {
11599 union ctl_ha_msg msg_info;
11600
11750 io->io_hdr.flags |= CTL_FLAG_SENT_2OTHER_SC;
11601 msg_info.hdr.nexus = io->io_hdr.nexus;
11602 msg_info.task.task_action = CTL_TASK_ABORT_TASK;
11603 msg_info.task.tag_num = io->taskio.tag_num;
11604 msg_info.task.tag_type = io->taskio.tag_type;
11605 msg_info.hdr.msg_type = CTL_MSG_MANAGE_TASKS;
11606 msg_info.hdr.original_sc = NULL;
11607 msg_info.hdr.serializing_sc = NULL;
11608#if 0
11609 printf("Sent Abort to other side\n");
11610#endif
11761 if (ctl_ha_msg_send(CTL_HA_CHAN_CTL,
11762 (void *)&msg_info, sizeof(msg_info), 0) !=
11763 CTL_HA_STATUS_SUCCESS) {
11764 }
11611 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info,
11612 sizeof(msg_info.task), M_NOWAIT);
11613 }
11614#if 0
11615 printf("ctl_abort_task: found I/O to abort\n");
11616#endif
11617 }
11618 }
11619 mtx_unlock(&lun->lun_lock);
11620
11621 if (found == 0) {
11622 /*
11623 * This isn't really an error. It's entirely possible for
11624 * the abort and command completion to cross on the wire.
11625 * This is more of an informative/diagnostic error.
11626 */
11627#if 0
11628 printf("ctl_abort_task: ABORT sent for nonexistent I/O: "
11629 "%u:%u:%u tag %d type %d\n",
11630 io->io_hdr.nexus.initid,
11631 io->io_hdr.nexus.targ_port,
11632 io->io_hdr.nexus.targ_lun, io->taskio.tag_num,
11633 io->taskio.tag_type);
11634#endif
11635 }
11636 return (0);
11637}
11638
11639static void
11640ctl_run_task(union ctl_io *io)
11641{
11642 struct ctl_softc *softc = control_softc;
11643 int retval = 1;
11644 const char *task_desc;
11645
11646 CTL_DEBUG_PRINT(("ctl_run_task\n"));
11647
11648 KASSERT(io->io_hdr.io_type == CTL_IO_TASK,
11649 ("ctl_run_task: Unextected io_type %d\n",
11650 io->io_hdr.io_type));
11651
11652 task_desc = ctl_scsi_task_string(&io->taskio);
11653 if (task_desc != NULL) {
11654#ifdef NEEDTOPORT
11655 csevent_log(CSC_CTL | CSC_SHELF_SW |
11656 CTL_TASK_REPORT,
11657 csevent_LogType_Trace,
11658 csevent_Severity_Information,
11659 csevent_AlertLevel_Green,
11660 csevent_FRU_Firmware,
11661 csevent_FRU_Unknown,
11662 "CTL: received task: %s",task_desc);
11663#endif
11664 } else {
11665#ifdef NEEDTOPORT
11666 csevent_log(CSC_CTL | CSC_SHELF_SW |
11667 CTL_TASK_REPORT,
11668 csevent_LogType_Trace,
11669 csevent_Severity_Information,
11670 csevent_AlertLevel_Green,
11671 csevent_FRU_Firmware,
11672 csevent_FRU_Unknown,
11673 "CTL: received unknown task "
11674 "type: %d (%#x)",
11675 io->taskio.task_action,
11676 io->taskio.task_action);
11677#endif
11678 }
11679 switch (io->taskio.task_action) {
11680 case CTL_TASK_ABORT_TASK:
11681 retval = ctl_abort_task(io);
11682 break;
11683 case CTL_TASK_ABORT_TASK_SET:
11684 case CTL_TASK_CLEAR_TASK_SET:
11685 retval = ctl_abort_task_set(io);
11686 break;
11687 case CTL_TASK_CLEAR_ACA:
11688 break;
11689 case CTL_TASK_I_T_NEXUS_RESET:
11690 retval = ctl_i_t_nexus_reset(io);
11691 break;
11692 case CTL_TASK_LUN_RESET: {
11693 struct ctl_lun *lun;
11694 uint32_t targ_lun;
11695
11696 targ_lun = io->io_hdr.nexus.targ_mapped_lun;
11697 mtx_lock(&softc->ctl_lock);
11698 if ((targ_lun < CTL_MAX_LUNS)
11699 && (softc->ctl_luns[targ_lun] != NULL))
11700 lun = softc->ctl_luns[targ_lun];
11701 else {
11702 mtx_unlock(&softc->ctl_lock);
11703 retval = 1;
11704 break;
11705 }
11706 retval = ctl_lun_reset(lun, io, CTL_UA_LUN_RESET);
11707 mtx_unlock(&softc->ctl_lock);
11708
11859 if (!(io->io_hdr.flags &
11860 CTL_FLAG_FROM_OTHER_SC)) {
11709 if ((io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC) == 0) {
11710 union ctl_ha_msg msg_info;
11711
11863 io->io_hdr.flags |=
11864 CTL_FLAG_SENT_2OTHER_SC;
11865 msg_info.hdr.msg_type =
11866 CTL_MSG_MANAGE_TASKS;
11712 msg_info.hdr.msg_type = CTL_MSG_MANAGE_TASKS;
11713 msg_info.hdr.nexus = io->io_hdr.nexus;
11868 msg_info.task.task_action =
11869 CTL_TASK_LUN_RESET;
11714 msg_info.task.task_action = CTL_TASK_LUN_RESET;
11715 msg_info.hdr.original_sc = NULL;
11716 msg_info.hdr.serializing_sc = NULL;
11872 if (CTL_HA_STATUS_SUCCESS !=
11873 ctl_ha_msg_send(CTL_HA_CHAN_CTL,
11874 (void *)&msg_info,
11875 sizeof(msg_info), 0)) {
11876 }
11717 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info,
11718 sizeof(msg_info.task), M_WAITOK);
11719 }
11878
11879 retval = ctl_lun_reset(lun, io,
11880 CTL_UA_LUN_RESET);
11881 mtx_unlock(&softc->ctl_lock);
11720 break;
11721 }
11722 case CTL_TASK_TARGET_RESET:
11723 retval = ctl_target_reset(softc, io, CTL_UA_TARG_RESET);
11724 break;
11725 case CTL_TASK_BUS_RESET:
11726 retval = ctl_bus_reset(softc, io);
11727 break;
11728 case CTL_TASK_PORT_LOGIN:
11729 break;
11730 case CTL_TASK_PORT_LOGOUT:
11731 break;
11732 default:
11733 printf("ctl_run_task: got unknown task management event %d\n",
11734 io->taskio.task_action);
11735 break;
11736 }
11737 if (retval == 0)
11738 io->io_hdr.status = CTL_SUCCESS;
11739 else
11740 io->io_hdr.status = CTL_ERROR;
11741 ctl_done(io);
11742}
11743
11744/*
11745 * For HA operation. Handle commands that come in from the other
11746 * controller.
11747 */
11748static void
11749ctl_handle_isc(union ctl_io *io)
11750{
11751 int free_io;
11752 struct ctl_lun *lun;
11753 struct ctl_softc *softc;
11754 uint32_t targ_lun;
11755
11756 softc = control_softc;
11757
11758 targ_lun = io->io_hdr.nexus.targ_mapped_lun;
11759 lun = softc->ctl_luns[targ_lun];
11760
11761 switch (io->io_hdr.msg_type) {
11762 case CTL_MSG_SERIALIZE:
11763 free_io = ctl_serialize_other_sc_cmd(&io->scsiio);
11764 break;
11765 case CTL_MSG_R2R: {
11766 const struct ctl_cmd_entry *entry;
11767
11768 /*
11769 * This is only used in SER_ONLY mode.
11770 */
11771 free_io = 0;
11772 entry = ctl_get_cmd_entry(&io->scsiio, NULL);
11773 mtx_lock(&lun->lun_lock);
11774 if (ctl_scsiio_lun_check(lun,
11775 entry, (struct ctl_scsiio *)io) != 0) {
11776 mtx_unlock(&lun->lun_lock);
11777 ctl_done(io);
11778 break;
11779 }
11780 io->io_hdr.flags |= CTL_FLAG_IS_WAS_ON_RTR;
11781 mtx_unlock(&lun->lun_lock);
11782 ctl_enqueue_rtr(io);
11783 break;
11784 }
11785 case CTL_MSG_FINISH_IO:
11786 if (softc->ha_mode == CTL_HA_MODE_XFER) {
11787 free_io = 0;
11788 ctl_done(io);
11789 } else {
11790 free_io = 1;
11791 mtx_lock(&lun->lun_lock);
11792 TAILQ_REMOVE(&lun->ooa_queue, &io->io_hdr,
11793 ooa_links);
11794 ctl_check_blocked(lun);
11795 mtx_unlock(&lun->lun_lock);
11796 }
11797 break;
11798 case CTL_MSG_PERS_ACTION:
11799 ctl_hndl_per_res_out_on_other_sc(
11800 (union ctl_ha_msg *)&io->presio.pr_msg);
11801 free_io = 1;
11802 break;
11803 case CTL_MSG_BAD_JUJU:
11804 free_io = 0;
11805 ctl_done(io);
11806 break;
11807 case CTL_MSG_DATAMOVE:
11808 /* Only used in XFER mode */
11809 free_io = 0;
11810 ctl_datamove_remote(io);
11811 break;
11812 case CTL_MSG_DATAMOVE_DONE:
11813 /* Only used in XFER mode */
11814 free_io = 0;
11815 io->scsiio.be_move_done(io);
11816 break;
11817 case CTL_MSG_FAILOVER:
11818 mtx_lock(&lun->lun_lock);
11819 ctl_failover_lun(lun);
11820 mtx_unlock(&lun->lun_lock);
11821 free_io = 1;
11822 break;
11823 default:
11824 free_io = 1;
11825 printf("%s: Invalid message type %d\n",
11826 __func__, io->io_hdr.msg_type);
11827 break;
11828 }
11829 if (free_io)
11830 ctl_free_io(io);
11831
11832}
11833
11834
11835/*
11836 * Returns the match type in the case of a match, or CTL_LUN_PAT_NONE if
11837 * there is no match.
11838 */
11839static ctl_lun_error_pattern
11840ctl_cmd_pattern_match(struct ctl_scsiio *ctsio, struct ctl_error_desc *desc)
11841{
11842 const struct ctl_cmd_entry *entry;
11843 ctl_lun_error_pattern filtered_pattern, pattern;
11844
11845 pattern = desc->error_pattern;
11846
11847 /*
11848 * XXX KDM we need more data passed into this function to match a
11849 * custom pattern, and we actually need to implement custom pattern
11850 * matching.
11851 */
11852 if (pattern & CTL_LUN_PAT_CMD)
11853 return (CTL_LUN_PAT_CMD);
11854
11855 if ((pattern & CTL_LUN_PAT_MASK) == CTL_LUN_PAT_ANY)
11856 return (CTL_LUN_PAT_ANY);
11857
11858 entry = ctl_get_cmd_entry(ctsio, NULL);
11859
11860 filtered_pattern = entry->pattern & pattern;
11861
11862 /*
11863 * If the user requested specific flags in the pattern (e.g.
11864 * CTL_LUN_PAT_RANGE), make sure the command supports all of those
11865 * flags.
11866 *
11867 * If the user did not specify any flags, it doesn't matter whether
11868 * or not the command supports the flags.
11869 */
11870 if ((filtered_pattern & ~CTL_LUN_PAT_MASK) !=
11871 (pattern & ~CTL_LUN_PAT_MASK))
11872 return (CTL_LUN_PAT_NONE);
11873
11874 /*
11875 * If the user asked for a range check, see if the requested LBA
11876 * range overlaps with this command's LBA range.
11877 */
11878 if (filtered_pattern & CTL_LUN_PAT_RANGE) {
11879 uint64_t lba1;
11880 uint64_t len1;
11881 ctl_action action;
11882 int retval;
11883
11884 retval = ctl_get_lba_len((union ctl_io *)ctsio, &lba1, &len1);
11885 if (retval != 0)
11886 return (CTL_LUN_PAT_NONE);
11887
11888 action = ctl_extent_check_lba(lba1, len1, desc->lba_range.lba,
11889 desc->lba_range.len, FALSE);
11890 /*
11891 * A "pass" means that the LBA ranges don't overlap, so
11892 * this doesn't match the user's range criteria.
11893 */
11894 if (action == CTL_ACTION_PASS)
11895 return (CTL_LUN_PAT_NONE);
11896 }
11897
11898 return (filtered_pattern);
11899}
11900
11901static void
11902ctl_inject_error(struct ctl_lun *lun, union ctl_io *io)
11903{
11904 struct ctl_error_desc *desc, *desc2;
11905
11906 mtx_assert(&lun->lun_lock, MA_OWNED);
11907
11908 STAILQ_FOREACH_SAFE(desc, &lun->error_list, links, desc2) {
11909 ctl_lun_error_pattern pattern;
11910 /*
11911 * Check to see whether this particular command matches
11912 * the pattern in the descriptor.
11913 */
11914 pattern = ctl_cmd_pattern_match(&io->scsiio, desc);
11915 if ((pattern & CTL_LUN_PAT_MASK) == CTL_LUN_PAT_NONE)
11916 continue;
11917
11918 switch (desc->lun_error & CTL_LUN_INJ_TYPE) {
11919 case CTL_LUN_INJ_ABORTED:
11920 ctl_set_aborted(&io->scsiio);
11921 break;
11922 case CTL_LUN_INJ_MEDIUM_ERR:
11923 ctl_set_medium_error(&io->scsiio);
11924 break;
11925 case CTL_LUN_INJ_UA:
11926 /* 29h/00h POWER ON, RESET, OR BUS DEVICE RESET
11927 * OCCURRED */
11928 ctl_set_ua(&io->scsiio, 0x29, 0x00);
11929 break;
11930 case CTL_LUN_INJ_CUSTOM:
11931 /*
11932 * We're assuming the user knows what he is doing.
11933 * Just copy the sense information without doing
11934 * checks.
11935 */
11936 bcopy(&desc->custom_sense, &io->scsiio.sense_data,
11937 MIN(sizeof(desc->custom_sense),
11938 sizeof(io->scsiio.sense_data)));
11939 io->scsiio.scsi_status = SCSI_STATUS_CHECK_COND;
11940 io->scsiio.sense_len = SSD_FULL_SIZE;
11941 io->io_hdr.status = CTL_SCSI_ERROR | CTL_AUTOSENSE;
11942 break;
11943 case CTL_LUN_INJ_NONE:
11944 default:
11945 /*
11946 * If this is an error injection type we don't know
11947 * about, clear the continuous flag (if it is set)
11948 * so it will get deleted below.
11949 */
11950 desc->lun_error &= ~CTL_LUN_INJ_CONTINUOUS;
11951 break;
11952 }
11953 /*
11954 * By default, each error injection action is a one-shot
11955 */
11956 if (desc->lun_error & CTL_LUN_INJ_CONTINUOUS)
11957 continue;
11958
11959 STAILQ_REMOVE(&lun->error_list, desc, ctl_error_desc, links);
11960
11961 free(desc, M_CTL);
11962 }
11963}
11964
11965#ifdef CTL_IO_DELAY
11966static void
11967ctl_datamove_timer_wakeup(void *arg)
11968{
11969 union ctl_io *io;
11970
11971 io = (union ctl_io *)arg;
11972
11973 ctl_datamove(io);
11974}
11975#endif /* CTL_IO_DELAY */
11976
11977void
11978ctl_datamove(union ctl_io *io)
11979{
11980 void (*fe_datamove)(union ctl_io *io);
11981
11982 mtx_assert(&control_softc->ctl_lock, MA_NOTOWNED);
11983
11984 CTL_DEBUG_PRINT(("ctl_datamove\n"));
11985
11986#ifdef CTL_TIME_IO
11987 if ((time_uptime - io->io_hdr.start_time) > ctl_time_io_secs) {
11988 char str[256];
11989 char path_str[64];
11990 struct sbuf sb;
11991
11992 ctl_scsi_path_string(io, path_str, sizeof(path_str));
11993 sbuf_new(&sb, str, sizeof(str), SBUF_FIXEDLEN);
11994
11995 sbuf_cat(&sb, path_str);
11996 switch (io->io_hdr.io_type) {
11997 case CTL_IO_SCSI:
11998 ctl_scsi_command_string(&io->scsiio, NULL, &sb);
11999 sbuf_printf(&sb, "\n");
12000 sbuf_cat(&sb, path_str);
12001 sbuf_printf(&sb, "Tag: 0x%04x, type %d\n",
12002 io->scsiio.tag_num, io->scsiio.tag_type);
12003 break;
12004 case CTL_IO_TASK:
12005 sbuf_printf(&sb, "Task I/O type: %d, Tag: 0x%04x, "
12006 "Tag Type: %d\n", io->taskio.task_action,
12007 io->taskio.tag_num, io->taskio.tag_type);
12008 break;
12009 default:
12010 printf("Invalid CTL I/O type %d\n", io->io_hdr.io_type);
12011 panic("Invalid CTL I/O type %d\n", io->io_hdr.io_type);
12012 break;
12013 }
12014 sbuf_cat(&sb, path_str);
12015 sbuf_printf(&sb, "ctl_datamove: %jd seconds\n",
12016 (intmax_t)time_uptime - io->io_hdr.start_time);
12017 sbuf_finish(&sb);
12018 printf("%s", sbuf_data(&sb));
12019 }
12020#endif /* CTL_TIME_IO */
12021
12022#ifdef CTL_IO_DELAY
12023 if (io->io_hdr.flags & CTL_FLAG_DELAY_DONE) {
12180 struct ctl_lun *lun;
12181
12182 lun =(struct ctl_lun *)io->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
12183
12024 io->io_hdr.flags &= ~CTL_FLAG_DELAY_DONE;
12025 } else {
12026 struct ctl_lun *lun;
12027
12028 lun =(struct ctl_lun *)io->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
12029 if ((lun != NULL)
12030 && (lun->delay_info.datamove_delay > 0)) {
12031
12032 callout_init(&io->io_hdr.delay_callout, /*mpsafe*/ 1);
12033 io->io_hdr.flags |= CTL_FLAG_DELAY_DONE;
12034 callout_reset(&io->io_hdr.delay_callout,
12035 lun->delay_info.datamove_delay * hz,
12036 ctl_datamove_timer_wakeup, io);
12037 if (lun->delay_info.datamove_type ==
12038 CTL_DELAY_TYPE_ONESHOT)
12039 lun->delay_info.datamove_delay = 0;
12040 return;
12041 }
12042 }
12043#endif
12044
12045 /*
12046 * This command has been aborted. Set the port status, so we fail
12047 * the data move.
12048 */
12049 if (io->io_hdr.flags & CTL_FLAG_ABORT) {
12050 printf("ctl_datamove: tag 0x%04x on (%u:%u:%u) aborted\n",
12051 io->scsiio.tag_num, io->io_hdr.nexus.initid,
12052 io->io_hdr.nexus.targ_port,
12053 io->io_hdr.nexus.targ_lun);
12054 io->io_hdr.port_status = 31337;
12055 /*
12056 * Note that the backend, in this case, will get the
12057 * callback in its context. In other cases it may get
12058 * called in the frontend's interrupt thread context.
12059 */
12060 io->scsiio.be_move_done(io);
12061 return;
12062 }
12063
12064 /* Don't confuse frontend with zero length data move. */
12065 if (io->scsiio.kern_data_len == 0) {
12066 io->scsiio.be_move_done(io);
12067 return;
12068 }
12069
12070 /*
12071 * If we're in XFER mode and this I/O is from the other shelf
12072 * controller, we need to send the DMA to the other side to
12073 * actually transfer the data to/from the host. In serialize only
12074 * mode the transfer happens below CTL and ctl_datamove() is only
12075 * called on the machine that originally received the I/O.
12076 */
12077 if ((control_softc->ha_mode == CTL_HA_MODE_XFER)
12078 && (io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC)) {
12079 union ctl_ha_msg msg;
12080 uint32_t sg_entries_sent;
12081 int do_sg_copy;
12082 int i;
12083
12084 memset(&msg, 0, sizeof(msg));
12085 msg.hdr.msg_type = CTL_MSG_DATAMOVE;
12086 msg.hdr.original_sc = io->io_hdr.original_sc;
12087 msg.hdr.serializing_sc = io;
12088 msg.hdr.nexus = io->io_hdr.nexus;
12089 msg.dt.flags = io->io_hdr.flags;
12090 /*
12091 * We convert everything into a S/G list here. We can't
12092 * pass by reference, only by value between controllers.
12093 * So we can't pass a pointer to the S/G list, only as many
12094 * S/G entries as we can fit in here. If it's possible for
12095 * us to get more than CTL_HA_MAX_SG_ENTRIES S/G entries,
12096 * then we need to break this up into multiple transfers.
12097 */
12098 if (io->scsiio.kern_sg_entries == 0) {
12099 msg.dt.kern_sg_entries = 1;
12100#if 0
12101 /*
12261 * If this is in cached memory, flush the cache
12262 * before we send the DMA request to the other
12263 * controller. We want to do this in either the
12264 * read or the write case. The read case is
12265 * straightforward. In the write case, we want to
12266 * make sure nothing is in the local cache that
12267 * could overwrite the DMAed data.
12268 */
12269 if ((io->io_hdr.flags & CTL_FLAG_NO_DATASYNC) == 0) {
12270 /*
12271 * XXX KDM use bus_dmamap_sync() here.
12272 */
12273 }
12274
12275 /*
12102 * Convert to a physical address if this is a
12103 * virtual address.
12104 */
12105 if (io->io_hdr.flags & CTL_FLAG_BUS_ADDR) {
12106 msg.dt.sg_list[0].addr =
12107 io->scsiio.kern_data_ptr;
12108 } else {
12109 /*
12110 * XXX KDM use busdma here!
12111 */
12286#if 0
12112 msg.dt.sg_list[0].addr = (void *)
12113 vtophys(io->scsiio.kern_data_ptr);
12289#endif
12114 }
12115#else
12116 KASSERT((io->io_hdr.flags & CTL_FLAG_BUS_ADDR) == 0,
12117 ("HA does not support BUS_ADDR"));
12118 msg.dt.sg_list[0].addr = io->scsiio.kern_data_ptr;
12119#endif
12120
12121 msg.dt.sg_list[0].len = io->scsiio.kern_data_len;
12122 do_sg_copy = 0;
12123 } else {
12295 struct ctl_sg_entry *sgl;
12296
12297 do_sg_copy = 1;
12124 msg.dt.kern_sg_entries = io->scsiio.kern_sg_entries;
12299 sgl = (struct ctl_sg_entry *)io->scsiio.kern_data_ptr;
12300 if ((io->io_hdr.flags & CTL_FLAG_NO_DATASYNC) == 0) {
12301 /*
12302 * XXX KDM use bus_dmamap_sync() here.
12303 */
12304 }
12125 do_sg_copy = 1;
12126 }
12127
12128 msg.dt.kern_data_len = io->scsiio.kern_data_len;
12129 msg.dt.kern_total_len = io->scsiio.kern_total_len;
12130 msg.dt.kern_data_resid = io->scsiio.kern_data_resid;
12131 msg.dt.kern_rel_offset = io->scsiio.kern_rel_offset;
12132 msg.dt.sg_sequence = 0;
12133
12134 /*
12135 * Loop until we've sent all of the S/G entries. On the
12136 * other end, we'll recompose these S/G entries into one
12137 * contiguous list before passing it to the
12138 */
12139 for (sg_entries_sent = 0; sg_entries_sent <
12140 msg.dt.kern_sg_entries; msg.dt.sg_sequence++) {
12141 msg.dt.cur_sg_entries = MIN((sizeof(msg.dt.sg_list)/
12142 sizeof(msg.dt.sg_list[0])),
12143 msg.dt.kern_sg_entries - sg_entries_sent);
12144
12145 if (do_sg_copy != 0) {
12146 struct ctl_sg_entry *sgl;
12147 int j;
12148
12149 sgl = (struct ctl_sg_entry *)
12150 io->scsiio.kern_data_ptr;
12151 /*
12152 * If this is in cached memory, flush the cache
12153 * before we send the DMA request to the other
12154 * controller. We want to do this in either
12155 * the * read or the write case. The read
12156 * case is straightforward. In the write
12157 * case, we want to make sure nothing is
12158 * in the local cache that could overwrite
12159 * the DMAed data.
12160 */
12161
12162 for (i = sg_entries_sent, j = 0;
12163 i < msg.dt.cur_sg_entries; i++, j++) {
12164#if 0
12165 if ((io->io_hdr.flags &
12344 CTL_FLAG_NO_DATASYNC) == 0) {
12345 /*
12346 * XXX KDM use bus_dmamap_sync()
12347 */
12348 }
12349 if ((io->io_hdr.flags &
12166 CTL_FLAG_BUS_ADDR) == 0) {
12167 /*
12168 * XXX KDM use busdma.
12169 */
12354#if 0
12170 msg.dt.sg_list[j].addr =(void *)
12171 vtophys(sgl[i].addr);
12357#endif
12172 } else {
12173 msg.dt.sg_list[j].addr =
12174 sgl[i].addr;
12175 }
12176#else
12177 KASSERT((io->io_hdr.flags &
12178 CTL_FLAG_BUS_ADDR) == 0,
12179 ("HA does not support BUS_ADDR"));
12180 msg.dt.sg_list[j].addr = sgl[i].addr;
12181#endif
12182 msg.dt.sg_list[j].len = sgl[i].len;
12183 }
12184 }
12185
12186 sg_entries_sent += msg.dt.cur_sg_entries;
12187 if (sg_entries_sent >= msg.dt.kern_sg_entries)
12188 msg.dt.sg_last = 1;
12189 else
12190 msg.dt.sg_last = 0;
12191
12372 /*
12373 * XXX KDM drop and reacquire the lock here?
12374 */
12192 if (ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg,
12376 sizeof(msg), 0) > CTL_HA_STATUS_SUCCESS) {
12377 /*
12378 * XXX do something here.
12379 */
12193 sizeof(msg.dt) - sizeof(msg.dt.sg_list) +
12194 sizeof(struct ctl_sg_entry)*msg.dt.cur_sg_entries,
12195 M_WAITOK) > CTL_HA_STATUS_SUCCESS) {
12196 io->io_hdr.port_status = 31341;
12197 io->scsiio.be_move_done(io);
12198 return;
12199 }
12200
12201 msg.dt.sent_sg_entries = sg_entries_sent;
12202 }
12203 io->io_hdr.flags &= ~CTL_FLAG_IO_ACTIVE;
12385 if (io->io_hdr.flags & CTL_FLAG_FAILOVER)
12386 ctl_failover_io(io, /*have_lock*/ 0);
12387
12204 } else {
12205
12206 /*
12207 * Lookup the fe_datamove() function for this particular
12208 * front end.
12209 */
12394 fe_datamove =
12395 control_softc->ctl_ports[ctl_port_idx(io->io_hdr.nexus.targ_port)]->fe_datamove;
12210 fe_datamove = ctl_io_port(&io->io_hdr)->fe_datamove;
12211
12212 fe_datamove(io);
12213 }
12214}
12215
12216static void
12217ctl_send_datamove_done(union ctl_io *io, int have_lock)
12218{
12219 union ctl_ha_msg msg;
12405 int isc_status;
12220
12221 memset(&msg, 0, sizeof(msg));
12222
12223 msg.hdr.msg_type = CTL_MSG_DATAMOVE_DONE;
12224 msg.hdr.original_sc = io;
12225 msg.hdr.serializing_sc = io->io_hdr.serializing_sc;
12226 msg.hdr.nexus = io->io_hdr.nexus;
12227 msg.hdr.status = io->io_hdr.status;
12228 msg.scsi.tag_num = io->scsiio.tag_num;
12229 msg.scsi.tag_type = io->scsiio.tag_type;
12230 msg.scsi.scsi_status = io->scsiio.scsi_status;
12231 memcpy(&msg.scsi.sense_data, &io->scsiio.sense_data,
12418 sizeof(io->scsiio.sense_data));
12232 io->scsiio.sense_len);
12233 msg.scsi.sense_len = io->scsiio.sense_len;
12234 msg.scsi.sense_residual = io->scsiio.sense_residual;
12235 msg.scsi.fetd_status = io->io_hdr.port_status;
12236 msg.scsi.residual = io->scsiio.residual;
12237 io->io_hdr.flags &= ~CTL_FLAG_IO_ACTIVE;
12238
12239 if (io->io_hdr.flags & CTL_FLAG_FAILOVER) {
12240 ctl_failover_io(io, /*have_lock*/ have_lock);
12241 return;
12242 }
12243
12430 isc_status = ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg, sizeof(msg), 0);
12431 if (isc_status > CTL_HA_STATUS_SUCCESS) {
12432 /* XXX do something if this fails */
12433 }
12434
12244 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg,
12245 sizeof(msg.scsi) - sizeof(msg.scsi.sense_data) +
12246 msg.scsi.sense_len, M_WAITOK);
12247}
12248
12249/*
12250 * The DMA to the remote side is done, now we need to tell the other side
12251 * we're done so it can continue with its data movement.
12252 */
12253static void
12254ctl_datamove_remote_write_cb(struct ctl_ha_dt_req *rq)
12255{
12256 union ctl_io *io;
12257 int i;
12258
12259 io = rq->context;
12260
12261 if (rq->ret != CTL_HA_STATUS_SUCCESS) {
12262 printf("%s: ISC DMA write failed with error %d", __func__,
12263 rq->ret);
12264 ctl_set_internal_failure(&io->scsiio,
12265 /*sks_valid*/ 1,
12266 /*retry_count*/ rq->ret);
12267 }
12268
12269 ctl_dt_req_free(rq);
12270
12271 for (i = 0; i < io->scsiio.kern_sg_entries; i++)
12272 free(io->io_hdr.local_sglist[i].addr, M_CTL);
12273 free(io->io_hdr.remote_sglist, M_CTL);
12274 io->io_hdr.remote_sglist = NULL;
12275 io->io_hdr.local_sglist = NULL;
12276
12277 /*
12459 * In this case, we had to malloc the memory locally. Free it.
12460 */
12461 if ((io->io_hdr.flags & CTL_FLAG_AUTO_MIRROR) == 0) {
12462 int i;
12463 for (i = 0; i < io->scsiio.kern_sg_entries; i++)
12464 free(io->io_hdr.local_sglist[i].addr, M_CTL);
12465 }
12466 /*
12278 * The data is in local and remote memory, so now we need to send
12279 * status (good or back) back to the other side.
12280 */
12281 ctl_send_datamove_done(io, /*have_lock*/ 0);
12282}
12283
12284/*
12285 * We've moved the data from the host/controller into local memory. Now we
12286 * need to push it over to the remote controller's memory.
12287 */
12288static int
12289ctl_datamove_remote_dm_write_cb(union ctl_io *io)
12290{
12291 int retval;
12292
12293 retval = 0;
12294
12295 retval = ctl_datamove_remote_xfer(io, CTL_HA_DT_CMD_WRITE,
12296 ctl_datamove_remote_write_cb);
12297
12298 return (retval);
12299}
12300
12301static void
12302ctl_datamove_remote_write(union ctl_io *io)
12303{
12304 int retval;
12305 void (*fe_datamove)(union ctl_io *io);
12306
12307 /*
12308 * - Get the data from the host/HBA into local memory.
12309 * - DMA memory from the local controller to the remote controller.
12310 * - Send status back to the remote controller.
12311 */
12312
12313 retval = ctl_datamove_remote_sgl_setup(io);
12314 if (retval != 0)
12315 return;
12316
12317 /* Switch the pointer over so the FETD knows what to do */
12318 io->scsiio.kern_data_ptr = (uint8_t *)io->io_hdr.local_sglist;
12319
12320 /*
12321 * Use a custom move done callback, since we need to send completion
12322 * back to the other controller, not to the backend on this side.
12323 */
12324 io->scsiio.be_move_done = ctl_datamove_remote_dm_write_cb;
12325
12515 fe_datamove = control_softc->ctl_ports[ctl_port_idx(io->io_hdr.nexus.targ_port)]->fe_datamove;
12326 fe_datamove = ctl_io_port(&io->io_hdr)->fe_datamove;
12327
12328 fe_datamove(io);
12329
12330 return;
12331
12332}
12333
12334static int
12335ctl_datamove_remote_dm_read_cb(union ctl_io *io)
12336{
12337#if 0
12338 char str[256];
12339 char path_str[64];
12340 struct sbuf sb;
12341#endif
12342 int i;
12343
12532 /*
12533 * In this case, we had to malloc the memory locally. Free it.
12534 */
12535 if ((io->io_hdr.flags & CTL_FLAG_AUTO_MIRROR) == 0) {
12536 int i;
12537 for (i = 0; i < io->scsiio.kern_sg_entries; i++)
12538 free(io->io_hdr.local_sglist[i].addr, M_CTL);
12539 }
12344 for (i = 0; i < io->scsiio.kern_sg_entries; i++)
12345 free(io->io_hdr.local_sglist[i].addr, M_CTL);
12346 free(io->io_hdr.remote_sglist, M_CTL);
12347 io->io_hdr.remote_sglist = NULL;
12348 io->io_hdr.local_sglist = NULL;
12349
12350#if 0
12351 scsi_path_string(io, path_str, sizeof(path_str));
12352 sbuf_new(&sb, str, sizeof(str), SBUF_FIXEDLEN);
12353 sbuf_cat(&sb, path_str);
12354 scsi_command_string(&io->scsiio, NULL, &sb);
12355 sbuf_printf(&sb, "\n");
12356 sbuf_cat(&sb, path_str);
12357 sbuf_printf(&sb, "Tag: 0x%04x, type %d\n",
12358 io->scsiio.tag_num, io->scsiio.tag_type);
12359 sbuf_cat(&sb, path_str);
12360 sbuf_printf(&sb, "%s: flags %#x, status %#x\n", __func__,
12361 io->io_hdr.flags, io->io_hdr.status);
12362 sbuf_finish(&sb);
12363 printk("%s", sbuf_data(&sb));
12364#endif
12365
12366
12367 /*
12368 * The read is done, now we need to send status (good or bad) back
12369 * to the other side.
12370 */
12371 ctl_send_datamove_done(io, /*have_lock*/ 0);
12372
12373 return (0);
12374}
12375
12376static void
12377ctl_datamove_remote_read_cb(struct ctl_ha_dt_req *rq)
12378{
12379 union ctl_io *io;
12380 void (*fe_datamove)(union ctl_io *io);
12381
12382 io = rq->context;
12383
12384 if (rq->ret != CTL_HA_STATUS_SUCCESS) {
12576 printf("%s: ISC DMA read failed with error %d", __func__,
12385 printf("%s: ISC DMA read failed with error %d\n", __func__,
12386 rq->ret);
12387 ctl_set_internal_failure(&io->scsiio,
12388 /*sks_valid*/ 1,
12389 /*retry_count*/ rq->ret);
12390 }
12391
12392 ctl_dt_req_free(rq);
12393
12394 /* Switch the pointer over so the FETD knows what to do */
12395 io->scsiio.kern_data_ptr = (uint8_t *)io->io_hdr.local_sglist;
12396
12397 /*
12398 * Use a custom move done callback, since we need to send completion
12399 * back to the other controller, not to the backend on this side.
12400 */
12401 io->scsiio.be_move_done = ctl_datamove_remote_dm_read_cb;
12402
12403 /* XXX KDM add checks like the ones in ctl_datamove? */
12404
12596 fe_datamove = control_softc->ctl_ports[ctl_port_idx(io->io_hdr.nexus.targ_port)]->fe_datamove;
12405 fe_datamove = ctl_io_port(&io->io_hdr)->fe_datamove;
12406
12407 fe_datamove(io);
12408}
12409
12410static int
12411ctl_datamove_remote_sgl_setup(union ctl_io *io)
12412{
12413 struct ctl_sg_entry *local_sglist, *remote_sglist;
12605 struct ctl_sg_entry *local_dma_sglist, *remote_dma_sglist;
12414 struct ctl_softc *softc;
12415 uint32_t len_to_go;
12416 int retval;
12417 int i;
12418
12419 retval = 0;
12420 softc = control_softc;
12612
12421 local_sglist = io->io_hdr.local_sglist;
12614 local_dma_sglist = io->io_hdr.local_dma_sglist;
12422 remote_sglist = io->io_hdr.remote_sglist;
12616 remote_dma_sglist = io->io_hdr.remote_dma_sglist;
12423 len_to_go = io->scsiio.kern_data_len;
12424
12618 if (io->io_hdr.flags & CTL_FLAG_AUTO_MIRROR) {
12619 for (i = 0; i < io->scsiio.kern_sg_entries; i++) {
12620 local_sglist[i].len = remote_sglist[i].len;
12425 /*
12426 * The difficult thing here is that the size of the various
12427 * S/G segments may be different than the size from the
12428 * remote controller. That'll make it harder when DMAing
12429 * the data back to the other side.
12430 */
12431 for (i = 0; len_to_go > 0; i++) {
12432 local_sglist[i].len = MIN(len_to_go, CTL_HA_DATAMOVE_SEGMENT);
12433 local_sglist[i].addr =
12434 malloc(local_sglist[i].len, M_CTL, M_WAITOK);
12435
12622 /*
12623 * XXX Detect the situation where the RS-level I/O
12624 * redirector on the other side has already read the
12625 * data off of the AOR RS on this side, and
12626 * transferred it to remote (mirror) memory on the
12627 * other side. Since we already have the data in
12628 * memory here, we just need to use it.
12629 *
12630 * XXX KDM this can probably be removed once we
12631 * get the cache device code in and take the
12632 * current AOR implementation out.
12633 */
12634#ifdef NEEDTOPORT
12635 if ((remote_sglist[i].addr >=
12636 (void *)vtophys(softc->mirr->addr))
12637 && (remote_sglist[i].addr <
12638 ((void *)vtophys(softc->mirr->addr) +
12639 CacheMirrorOffset))) {
12640 local_sglist[i].addr = remote_sglist[i].addr -
12641 CacheMirrorOffset;
12642 if ((io->io_hdr.flags & CTL_FLAG_DATA_MASK) ==
12643 CTL_FLAG_DATA_IN)
12644 io->io_hdr.flags |= CTL_FLAG_REDIR_DONE;
12645 } else {
12646 local_sglist[i].addr = remote_sglist[i].addr +
12647 CacheMirrorOffset;
12648 }
12649#endif
12650#if 0
12651 printf("%s: local %p, remote %p, len %d\n",
12652 __func__, local_sglist[i].addr,
12653 remote_sglist[i].addr, local_sglist[i].len);
12654#endif
12655 }
12656 } else {
12657 uint32_t len_to_go;
12436 len_to_go -= local_sglist[i].len;
12437 }
12438 /*
12439 * Reset the number of S/G entries accordingly. The original
12440 * number of S/G entries is available in rem_sg_entries.
12441 */
12442 io->scsiio.kern_sg_entries = i;
12443
12659 /*
12660 * In this case, we don't have automatically allocated
12661 * memory for this I/O on this controller. This typically
12662 * happens with internal CTL I/O -- e.g. inquiry, mode
12663 * sense, etc. Anything coming from RAIDCore will have
12664 * a mirror area available.
12665 */
12666 len_to_go = io->scsiio.kern_data_len;
12667
12668 /*
12669 * Clear the no datasync flag, we have to use malloced
12670 * buffers.
12671 */
12672 io->io_hdr.flags &= ~CTL_FLAG_NO_DATASYNC;
12673
12674 /*
12675 * The difficult thing here is that the size of the various
12676 * S/G segments may be different than the size from the
12677 * remote controller. That'll make it harder when DMAing
12678 * the data back to the other side.
12679 */
12680 for (i = 0; (i < sizeof(io->io_hdr.remote_sglist) /
12681 sizeof(io->io_hdr.remote_sglist[0])) &&
12682 (len_to_go > 0); i++) {
12683 local_sglist[i].len = MIN(len_to_go, 131072);
12684 CTL_SIZE_8B(local_dma_sglist[i].len,
12685 local_sglist[i].len);
12686 local_sglist[i].addr =
12687 malloc(local_dma_sglist[i].len, M_CTL,M_WAITOK);
12688
12689 local_dma_sglist[i].addr = local_sglist[i].addr;
12690
12691 if (local_sglist[i].addr == NULL) {
12692 int j;
12693
12694 printf("malloc failed for %zd bytes!",
12695 local_dma_sglist[i].len);
12696 for (j = 0; j < i; j++) {
12697 free(local_sglist[j].addr, M_CTL);
12698 }
12699 ctl_set_internal_failure(&io->scsiio,
12700 /*sks_valid*/ 1,
12701 /*retry_count*/ 4857);
12702 retval = 1;
12703 goto bailout_error;
12704
12705 }
12706 /* XXX KDM do we need a sync here? */
12707
12708 len_to_go -= local_sglist[i].len;
12709 }
12710 /*
12711 * Reset the number of S/G entries accordingly. The
12712 * original number of S/G entries is available in
12713 * rem_sg_entries.
12714 */
12715 io->scsiio.kern_sg_entries = i;
12716
12444#if 0
12718 printf("%s: kern_sg_entries = %d\n", __func__,
12719 io->scsiio.kern_sg_entries);
12720 for (i = 0; i < io->scsiio.kern_sg_entries; i++)
12721 printf("%s: sg[%d] = %p, %d (DMA: %d)\n", __func__, i,
12722 local_sglist[i].addr, local_sglist[i].len,
12723 local_dma_sglist[i].len);
12445 printf("%s: kern_sg_entries = %d\n", __func__,
12446 io->scsiio.kern_sg_entries);
12447 for (i = 0; i < io->scsiio.kern_sg_entries; i++)
12448 printf("%s: sg[%d] = %p, %d\n", __func__, i,
12449 local_sglist[i].addr, local_sglist[i].len);
12450#endif
12725 }
12451
12727
12452 return (retval);
12729
12730bailout_error:
12731
12732 ctl_send_datamove_done(io, /*have_lock*/ 0);
12733
12734 return (retval);
12453}
12454
12455static int
12456ctl_datamove_remote_xfer(union ctl_io *io, unsigned command,
12457 ctl_ha_dt_cb callback)
12458{
12459 struct ctl_ha_dt_req *rq;
12460 struct ctl_sg_entry *remote_sglist, *local_sglist;
12743 struct ctl_sg_entry *remote_dma_sglist, *local_dma_sglist;
12461 uint32_t local_used, remote_used, total_used;
12745 int retval;
12746 int i, j;
12462 int i, j, isc_ret;
12463
12748 retval = 0;
12749
12464 rq = ctl_dt_req_alloc();
12465
12466 /*
12467 * If we failed to allocate the request, and if the DMA didn't fail
12468 * anyway, set busy status. This is just a resource allocation
12469 * failure.
12470 */
12471 if ((rq == NULL)
12472 && ((io->io_hdr.status & CTL_STATUS_MASK) != CTL_STATUS_NONE))
12473 ctl_set_busy(&io->scsiio);
12474
12475 if ((io->io_hdr.status & CTL_STATUS_MASK) != CTL_STATUS_NONE) {
12476
12477 if (rq != NULL)
12478 ctl_dt_req_free(rq);
12479
12480 /*
12481 * The data move failed. We need to return status back
12482 * to the other controller. No point in trying to DMA
12483 * data to the remote controller.
12484 */
12485
12486 ctl_send_datamove_done(io, /*have_lock*/ 0);
12487
12774 retval = 1;
12775
12776 goto bailout;
12488 return (1);
12489 }
12490
12491 local_sglist = io->io_hdr.local_sglist;
12780 local_dma_sglist = io->io_hdr.local_dma_sglist;
12492 remote_sglist = io->io_hdr.remote_sglist;
12782 remote_dma_sglist = io->io_hdr.remote_dma_sglist;
12493 local_used = 0;
12494 remote_used = 0;
12495 total_used = 0;
12496
12787 if (io->io_hdr.flags & CTL_FLAG_REDIR_DONE) {
12788 rq->ret = CTL_HA_STATUS_SUCCESS;
12789 rq->context = io;
12790 callback(rq);
12791 goto bailout;
12792 }
12793
12497 /*
12498 * Pull/push the data over the wire from/to the other controller.
12499 * This takes into account the possibility that the local and
12500 * remote sglists may not be identical in terms of the size of
12501 * the elements and the number of elements.
12502 *
12503 * One fundamental assumption here is that the length allocated for
12504 * both the local and remote sglists is identical. Otherwise, we've
12505 * essentially got a coding error of some sort.
12506 */
12507 isc_ret = CTL_HA_STATUS_SUCCESS;
12508 for (i = 0, j = 0; total_used < io->scsiio.kern_data_len; ) {
12805 int isc_ret;
12806 uint32_t cur_len, dma_length;
12509 uint32_t cur_len;
12510 uint8_t *tmp_ptr;
12511
12809 rq->id = CTL_HA_DATA_CTL;
12512 rq->command = command;
12513 rq->context = io;
12514
12515 /*
12516 * Both pointers should be aligned. But it is possible
12517 * that the allocation length is not. They should both
12518 * also have enough slack left over at the end, though,
12519 * to round up to the next 8 byte boundary.
12520 */
12521 cur_len = MIN(local_sglist[i].len - local_used,
12522 remote_sglist[j].len - remote_used);
12523 rq->size = cur_len;
12524
12822 /*
12823 * In this case, we have a size issue and need to decrease
12824 * the size, except in the case where we actually have less
12825 * than 8 bytes left. In that case, we need to increase
12826 * the DMA length to get the last bit.
12827 */
12828 if ((cur_len & 0x7) != 0) {
12829 if (cur_len > 0x7) {
12830 cur_len = cur_len - (cur_len & 0x7);
12831 dma_length = cur_len;
12832 } else {
12833 CTL_SIZE_8B(dma_length, cur_len);
12834 }
12835
12836 } else
12837 dma_length = cur_len;
12838
12839 /*
12840 * If we had to allocate memory for this I/O, instead of using
12841 * the non-cached mirror memory, we'll need to flush the cache
12842 * before trying to DMA to the other controller.
12843 *
12844 * We could end up doing this multiple times for the same
12845 * segment if we have a larger local segment than remote
12846 * segment. That shouldn't be an issue.
12847 */
12848 if ((io->io_hdr.flags & CTL_FLAG_NO_DATASYNC) == 0) {
12849 /*
12850 * XXX KDM use bus_dmamap_sync() here.
12851 */
12852 }
12853
12854 rq->size = dma_length;
12855
12525 tmp_ptr = (uint8_t *)local_sglist[i].addr;
12526 tmp_ptr += local_used;
12527
12528#if 0
12529 /* Use physical addresses when talking to ISC hardware */
12530 if ((io->io_hdr.flags & CTL_FLAG_BUS_ADDR) == 0) {
12531 /* XXX KDM use busdma */
12862#if 0
12532 rq->local = vtophys(tmp_ptr);
12864#endif
12533 } else
12534 rq->local = tmp_ptr;
12535#else
12536 KASSERT((io->io_hdr.flags & CTL_FLAG_BUS_ADDR) == 0,
12537 ("HA does not support BUS_ADDR"));
12538 rq->local = tmp_ptr;
12539#endif
12540
12541 tmp_ptr = (uint8_t *)remote_sglist[j].addr;
12542 tmp_ptr += remote_used;
12543 rq->remote = tmp_ptr;
12544
12545 rq->callback = NULL;
12546
12547 local_used += cur_len;
12548 if (local_used >= local_sglist[i].len) {
12549 i++;
12550 local_used = 0;
12551 }
12552
12553 remote_used += cur_len;
12554 if (remote_used >= remote_sglist[j].len) {
12555 j++;
12556 remote_used = 0;
12557 }
12558 total_used += cur_len;
12559
12560 if (total_used >= io->scsiio.kern_data_len)
12561 rq->callback = callback;
12562
12890 if ((rq->size & 0x7) != 0) {
12891 printf("%s: warning: size %d is not on 8b boundary\n",
12892 __func__, rq->size);
12893 }
12894 if (((uintptr_t)rq->local & 0x7) != 0) {
12895 printf("%s: warning: local %p not on 8b boundary\n",
12896 __func__, rq->local);
12897 }
12898 if (((uintptr_t)rq->remote & 0x7) != 0) {
12899 printf("%s: warning: remote %p not on 8b boundary\n",
12900 __func__, rq->local);
12901 }
12563#if 0
12564 printf("%s: %s: local %#x remote %#x size %d\n", __func__,
12565 (command == CTL_HA_DT_CMD_WRITE) ? "WRITE" : "READ",
12566 rq->local, rq->remote, rq->size);
12567#endif
12568
12569 isc_ret = ctl_dt_single(rq);
12909 if (isc_ret == CTL_HA_STATUS_WAIT)
12910 continue;
12911
12912 if (isc_ret == CTL_HA_STATUS_DISCONNECT) {
12913 rq->ret = CTL_HA_STATUS_SUCCESS;
12914 } else {
12915 rq->ret = isc_ret;
12916 }
12570 if (isc_ret > CTL_HA_STATUS_SUCCESS)
12571 break;
12572 }
12573 if (isc_ret != CTL_HA_STATUS_WAIT) {
12574 rq->ret = isc_ret;
12575 callback(rq);
12918 goto bailout;
12576 }
12577
12921bailout:
12922 return (retval);
12923
12578 return (0);
12579}
12580
12581static void
12582ctl_datamove_remote_read(union ctl_io *io)
12583{
12584 int retval;
12585 int i;
12586
12587 /*
12588 * This will send an error to the other controller in the case of a
12589 * failure.
12590 */
12591 retval = ctl_datamove_remote_sgl_setup(io);
12592 if (retval != 0)
12593 return;
12594
12595 retval = ctl_datamove_remote_xfer(io, CTL_HA_DT_CMD_READ,
12596 ctl_datamove_remote_read_cb);
12942 if ((retval != 0)
12943 && ((io->io_hdr.flags & CTL_FLAG_AUTO_MIRROR) == 0)) {
12597 if (retval != 0) {
12598 /*
12599 * Make sure we free memory if there was an error.. The
12600 * ctl_datamove_remote_xfer() function will send the
12601 * datamove done message, or call the callback with an
12602 * error if there is a problem.
12603 */
12604 for (i = 0; i < io->scsiio.kern_sg_entries; i++)
12605 free(io->io_hdr.local_sglist[i].addr, M_CTL);
12606 free(io->io_hdr.remote_sglist, M_CTL);
12607 io->io_hdr.remote_sglist = NULL;
12608 io->io_hdr.local_sglist = NULL;
12609 }
12610
12611 return;
12612}
12613
12614/*
12615 * Process a datamove request from the other controller. This is used for
12616 * XFER mode only, not SER_ONLY mode. For writes, we DMA into local memory
12617 * first. Once that is complete, the data gets DMAed into the remote
12618 * controller's memory. For reads, we DMA from the remote controller's
12619 * memory into our memory first, and then move it out to the FETD.
12620 */
12621static void
12622ctl_datamove_remote(union ctl_io *io)
12623{
12967 struct ctl_softc *softc;
12624
12969 softc = control_softc;
12625 mtx_assert(&control_softc->ctl_lock, MA_NOTOWNED);
12626
12971 mtx_assert(&softc->ctl_lock, MA_NOTOWNED);
12627 if (io->io_hdr.flags & CTL_FLAG_FAILOVER) {
12628 ctl_failover_io(io, /*have_lock*/ 0);
12629 return;
12630 }
12631
12632 /*
12633 * Note that we look for an aborted I/O here, but don't do some of
12634 * the other checks that ctl_datamove() normally does.
12635 * We don't need to run the datamove delay code, since that should
12636 * have been done if need be on the other controller.
12637 */
12638 if (io->io_hdr.flags & CTL_FLAG_ABORT) {
12639 printf("%s: tag 0x%04x on (%u:%u:%u) aborted\n", __func__,
12640 io->scsiio.tag_num, io->io_hdr.nexus.initid,
12641 io->io_hdr.nexus.targ_port,
12642 io->io_hdr.nexus.targ_lun);
12643 io->io_hdr.port_status = 31338;
12644 ctl_send_datamove_done(io, /*have_lock*/ 0);
12645 return;
12646 }
12647
12989 if ((io->io_hdr.flags & CTL_FLAG_DATA_MASK) == CTL_FLAG_DATA_OUT) {
12648 if ((io->io_hdr.flags & CTL_FLAG_DATA_MASK) == CTL_FLAG_DATA_OUT)
12649 ctl_datamove_remote_write(io);
12991 } else if ((io->io_hdr.flags & CTL_FLAG_DATA_MASK) == CTL_FLAG_DATA_IN){
12650 else if ((io->io_hdr.flags & CTL_FLAG_DATA_MASK) == CTL_FLAG_DATA_IN)
12651 ctl_datamove_remote_read(io);
12993 } else {
12994 union ctl_ha_msg msg;
12995 struct scsi_sense_data *sense;
12996 uint8_t sks[3];
12997 int retry_count;
12998
12999 memset(&msg, 0, sizeof(msg));
13000
13001 msg.hdr.msg_type = CTL_MSG_BAD_JUJU;
13002 msg.hdr.status = CTL_SCSI_ERROR;
13003 msg.scsi.scsi_status = SCSI_STATUS_CHECK_COND;
13004
13005 retry_count = 4243;
13006
13007 sense = &msg.scsi.sense_data;
13008 sks[0] = SSD_SCS_VALID;
13009 sks[1] = (retry_count >> 8) & 0xff;
13010 sks[2] = retry_count & 0xff;
13011
13012 /* "Internal target failure" */
13013 scsi_set_sense_data(sense,
13014 /*sense_format*/ SSD_TYPE_NONE,
13015 /*current_error*/ 1,
13016 /*sense_key*/ SSD_KEY_HARDWARE_ERROR,
13017 /*asc*/ 0x44,
13018 /*ascq*/ 0x00,
13019 /*type*/ SSD_ELEM_SKS,
13020 /*size*/ sizeof(sks),
13021 /*data*/ sks,
13022 SSD_ELEM_NONE);
13023
13024 io->io_hdr.flags &= ~CTL_FLAG_IO_ACTIVE;
13025 if (io->io_hdr.flags & CTL_FLAG_FAILOVER) {
13026 ctl_failover_io(io, /*have_lock*/ 1);
13027 return;
13028 }
13029
13030 if (ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg, sizeof(msg), 0) >
13031 CTL_HA_STATUS_SUCCESS) {
13032 /* XXX KDM what to do if this fails? */
13033 }
13034 return;
12652 else {
12653 io->io_hdr.port_status = 31339;
12654 ctl_send_datamove_done(io, /*have_lock*/ 0);
12655 }
13036
12656}
12657
12658static int
12659ctl_process_done(union ctl_io *io)
12660{
12661 struct ctl_lun *lun;
12662 struct ctl_softc *softc = control_softc;
12663 void (*fe_done)(union ctl_io *io);
13045 uint32_t targ_port = ctl_port_idx(io->io_hdr.nexus.targ_port);
12664 union ctl_ha_msg msg;
12665 uint32_t targ_port = io->io_hdr.nexus.targ_port;
12666
12667 CTL_DEBUG_PRINT(("ctl_process_done\n"));
12668
13049 fe_done = softc->ctl_ports[targ_port]->fe_done;
12669 if ((io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC) == 0)
12670 fe_done = softc->ctl_ports[targ_port]->fe_done;
12671 else
12672 fe_done = NULL;
12673
12674#ifdef CTL_TIME_IO
12675 if ((time_uptime - io->io_hdr.start_time) > ctl_time_io_secs) {
12676 char str[256];
12677 char path_str[64];
12678 struct sbuf sb;
12679
12680 ctl_scsi_path_string(io, path_str, sizeof(path_str));
12681 sbuf_new(&sb, str, sizeof(str), SBUF_FIXEDLEN);
12682
12683 sbuf_cat(&sb, path_str);
12684 switch (io->io_hdr.io_type) {
12685 case CTL_IO_SCSI:
12686 ctl_scsi_command_string(&io->scsiio, NULL, &sb);
12687 sbuf_printf(&sb, "\n");
12688 sbuf_cat(&sb, path_str);
12689 sbuf_printf(&sb, "Tag: 0x%04x, type %d\n",
12690 io->scsiio.tag_num, io->scsiio.tag_type);
12691 break;
12692 case CTL_IO_TASK:
12693 sbuf_printf(&sb, "Task I/O type: %d, Tag: 0x%04x, "
12694 "Tag Type: %d\n", io->taskio.task_action,
12695 io->taskio.tag_num, io->taskio.tag_type);
12696 break;
12697 default:
12698 printf("Invalid CTL I/O type %d\n", io->io_hdr.io_type);
12699 panic("Invalid CTL I/O type %d\n", io->io_hdr.io_type);
12700 break;
12701 }
12702 sbuf_cat(&sb, path_str);
12703 sbuf_printf(&sb, "ctl_process_done: %jd seconds\n",
12704 (intmax_t)time_uptime - io->io_hdr.start_time);
12705 sbuf_finish(&sb);
12706 printf("%s", sbuf_data(&sb));
12707 }
12708#endif /* CTL_TIME_IO */
12709
12710 switch (io->io_hdr.io_type) {
12711 case CTL_IO_SCSI:
12712 break;
12713 case CTL_IO_TASK:
12714 if (ctl_debug & CTL_DEBUG_INFO)
12715 ctl_io_error_print(io, NULL);
12716 if (io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC)
12717 ctl_free_io(io);
12718 else
12719 fe_done(io);
12720 return (CTL_RETVAL_COMPLETE);
12721 default:
12722 panic("ctl_process_done: invalid io type %d\n",
12723 io->io_hdr.io_type);
12724 break; /* NOTREACHED */
12725 }
12726
12727 lun = (struct ctl_lun *)io->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
12728 if (lun == NULL) {
12729 CTL_DEBUG_PRINT(("NULL LUN for lun %d\n",
12730 io->io_hdr.nexus.targ_mapped_lun));
12731 goto bailout;
12732 }
12733
12734 mtx_lock(&lun->lun_lock);
12735
12736 /*
12737 * Check to see if we have any errors to inject here. We only
12738 * inject errors for commands that don't already have errors set.
12739 */
12740 if ((STAILQ_FIRST(&lun->error_list) != NULL) &&
12741 ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_SUCCESS) &&
12742 ((io->io_hdr.flags & CTL_FLAG_STATUS_SENT) == 0))
12743 ctl_inject_error(lun, io);
12744
12745 /*
12746 * XXX KDM how do we treat commands that aren't completed
12747 * successfully?
12748 *
12749 * XXX KDM should we also track I/O latency?
12750 */
12751 if ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_SUCCESS &&
12752 io->io_hdr.io_type == CTL_IO_SCSI) {
12753#ifdef CTL_TIME_IO
12754 struct bintime cur_bt;
12755#endif
12756 int type;
12757
12758 if ((io->io_hdr.flags & CTL_FLAG_DATA_MASK) ==
12759 CTL_FLAG_DATA_IN)
12760 type = CTL_STATS_READ;
12761 else if ((io->io_hdr.flags & CTL_FLAG_DATA_MASK) ==
12762 CTL_FLAG_DATA_OUT)
12763 type = CTL_STATS_WRITE;
12764 else
12765 type = CTL_STATS_NO_IO;
12766
12767 lun->stats.ports[targ_port].bytes[type] +=
12768 io->scsiio.kern_total_len;
12769 lun->stats.ports[targ_port].operations[type]++;
12770#ifdef CTL_TIME_IO
12771 bintime_add(&lun->stats.ports[targ_port].dma_time[type],
12772 &io->io_hdr.dma_bt);
12773 lun->stats.ports[targ_port].num_dmas[type] +=
12774 io->io_hdr.num_dmas;
12775 getbintime(&cur_bt);
12776 bintime_sub(&cur_bt, &io->io_hdr.start_bt);
12777 bintime_add(&lun->stats.ports[targ_port].time[type], &cur_bt);
12778#endif
12779 }
12780
12781 /*
12782 * Remove this from the OOA queue.
12783 */
12784 TAILQ_REMOVE(&lun->ooa_queue, &io->io_hdr, ooa_links);
12785#ifdef CTL_TIME_IO
12786 if (TAILQ_EMPTY(&lun->ooa_queue))
12787 lun->last_busy = getsbinuptime();
12788#endif
12789
12790 /*
12791 * Run through the blocked queue on this LUN and see if anything
12792 * has become unblocked, now that this transaction is done.
12793 */
12794 ctl_check_blocked(lun);
12795
12796 /*
12797 * If the LUN has been invalidated, free it if there is nothing
12798 * left on its OOA queue.
12799 */
12800 if ((lun->flags & CTL_LUN_INVALID)
12801 && TAILQ_EMPTY(&lun->ooa_queue)) {
12802 mtx_unlock(&lun->lun_lock);
12803 mtx_lock(&softc->ctl_lock);
12804 ctl_free_lun(lun);
12805 mtx_unlock(&softc->ctl_lock);
12806 } else
12807 mtx_unlock(&lun->lun_lock);
12808
12809bailout:
12810
12811 /*
12812 * If this command has been aborted, make sure we set the status
12813 * properly. The FETD is responsible for freeing the I/O and doing
12814 * whatever it needs to do to clean up its state.
12815 */
12816 if (io->io_hdr.flags & CTL_FLAG_ABORT)
12817 ctl_set_task_aborted(&io->scsiio);
12818
12819 /*
12820 * If enabled, print command error status.
12821 */
12822 if ((io->io_hdr.status & CTL_STATUS_MASK) != CTL_SUCCESS &&
12823 (ctl_debug & CTL_DEBUG_INFO) != 0)
12824 ctl_io_error_print(io, NULL);
12825
12826 /*
12827 * Tell the FETD or the other shelf controller we're done with this
12828 * command. Note that only SCSI commands get to this point. Task
12829 * management commands are completed above.
13207 *
13208 * We only send status to the other controller if we're in XFER
13209 * mode. In SER_ONLY mode, the I/O is done on the controller that
13210 * received the I/O (from CTL's perspective), and so the status is
13211 * generated there.
13212 *
13213 * XXX KDM if we hold the lock here, we could cause a deadlock
13214 * if the frontend comes back in in this context to queue
13215 * something.
12830 */
12831 if ((softc->ha_mode != CTL_HA_MODE_XFER) &&
12832 (io->io_hdr.flags & CTL_FLAG_SENT_2OTHER_SC)) {
12833 memset(&msg, 0, sizeof(msg));
12834 msg.hdr.msg_type = CTL_MSG_FINISH_IO;
12835 msg.hdr.serializing_sc = io->io_hdr.serializing_sc;
12836 msg.hdr.nexus = io->io_hdr.nexus;
12837 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg,
12838 sizeof(msg.scsi) - sizeof(msg.scsi.sense_data),
12839 M_WAITOK);
12840 }
12841 if ((softc->ha_mode == CTL_HA_MODE_XFER)
12842 && (io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC)) {
13219 union ctl_ha_msg msg;
13220
12843 memset(&msg, 0, sizeof(msg));
12844 msg.hdr.msg_type = CTL_MSG_FINISH_IO;
12845 msg.hdr.original_sc = io->io_hdr.original_sc;
12846 msg.hdr.nexus = io->io_hdr.nexus;
12847 msg.hdr.status = io->io_hdr.status;
12848 msg.scsi.scsi_status = io->scsiio.scsi_status;
12849 msg.scsi.tag_num = io->scsiio.tag_num;
12850 msg.scsi.tag_type = io->scsiio.tag_type;
12851 msg.scsi.sense_len = io->scsiio.sense_len;
12852 msg.scsi.sense_residual = io->scsiio.sense_residual;
12853 msg.scsi.residual = io->scsiio.residual;
12854 memcpy(&msg.scsi.sense_data, &io->scsiio.sense_data,
13233 sizeof(io->scsiio.sense_data));
12855 io->scsiio.sense_len);
12856 /*
12857 * We copy this whether or not this is an I/O-related
12858 * command. Otherwise, we'd have to go and check to see
12859 * whether it's a read/write command, and it really isn't
12860 * worth it.
12861 */
12862 memcpy(&msg.scsi.lbalen,
12863 &io->io_hdr.ctl_private[CTL_PRIV_LBA_LEN].bytes,
12864 sizeof(msg.scsi.lbalen));
12865
13244 if (ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg,
13245 sizeof(msg), 0) > CTL_HA_STATUS_SUCCESS) {
13246 /* XXX do something here */
13247 }
13248
12866 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg,
12867 sizeof(msg.scsi) - sizeof(msg.scsi.sense_data) +
12868 msg.scsi.sense_len, M_WAITOK);
12869 ctl_free_io(io);
12870 } else
12871 fe_done(io);
12872
12873 return (CTL_RETVAL_COMPLETE);
12874}
12875
12876#ifdef CTL_WITH_CA
12877/*
12878 * Front end should call this if it doesn't do autosense. When the request
12879 * sense comes back in from the initiator, we'll dequeue this and send it.
12880 */
12881int
12882ctl_queue_sense(union ctl_io *io)
12883{
12884 struct ctl_lun *lun;
12885 struct ctl_port *port;
12886 struct ctl_softc *softc;
12887 uint32_t initidx, targ_lun;
12888
12889 softc = control_softc;
12890
12891 CTL_DEBUG_PRINT(("ctl_queue_sense\n"));
12892
12893 /*
12894 * LUN lookup will likely move to the ctl_work_thread() once we
12895 * have our new queueing infrastructure (that doesn't put things on
12896 * a per-LUN queue initially). That is so that we can handle
12897 * things like an INQUIRY to a LUN that we don't have enabled. We
12898 * can't deal with that right now.
12899 */
12900 mtx_lock(&softc->ctl_lock);
12901
12902 /*
12903 * If we don't have a LUN for this, just toss the sense
12904 * information.
12905 */
12906 port = ctl_io_port(&ctsio->io_hdr);
12907 targ_lun = ctl_lun_map_from_port(port, io->io_hdr.nexus.targ_lun);
12908 if ((targ_lun < CTL_MAX_LUNS)
12909 && (softc->ctl_luns[targ_lun] != NULL))
12910 lun = softc->ctl_luns[targ_lun];
12911 else
12912 goto bailout;
12913
12914 initidx = ctl_get_initindex(&io->io_hdr.nexus);
12915
12916 mtx_lock(&lun->lun_lock);
12917 /*
12918 * Already have CA set for this LUN...toss the sense information.
12919 */
12920 if (ctl_is_set(lun->have_ca, initidx)) {
12921 mtx_unlock(&lun->lun_lock);
12922 goto bailout;
12923 }
12924
12925 memcpy(&lun->pending_sense[initidx], &io->scsiio.sense_data,
12926 MIN(sizeof(lun->pending_sense[initidx]),
12927 sizeof(io->scsiio.sense_data)));
12928 ctl_set_mask(lun->have_ca, initidx);
12929 mtx_unlock(&lun->lun_lock);
12930
12931bailout:
12932 mtx_unlock(&softc->ctl_lock);
12933
12934 ctl_free_io(io);
12935
12936 return (CTL_RETVAL_COMPLETE);
12937}
12938#endif
12939
12940/*
12941 * Primary command inlet from frontend ports. All SCSI and task I/O
12942 * requests must go through this function.
12943 */
12944int
12945ctl_queue(union ctl_io *io)
12946{
12947 struct ctl_port *port;
12948
12949 CTL_DEBUG_PRINT(("ctl_queue cdb[0]=%02X\n", io->scsiio.cdb[0]));
12950
12951#ifdef CTL_TIME_IO
12952 io->io_hdr.start_time = time_uptime;
12953 getbintime(&io->io_hdr.start_bt);
12954#endif /* CTL_TIME_IO */
12955
12956 /* Map FE-specific LUN ID into global one. */
12957 port = ctl_io_port(&io->io_hdr);
12958 io->io_hdr.nexus.targ_mapped_lun =
12959 ctl_lun_map_from_port(port, io->io_hdr.nexus.targ_lun);
12960
12961 switch (io->io_hdr.io_type) {
12962 case CTL_IO_SCSI:
12963 case CTL_IO_TASK:
12964 if (ctl_debug & CTL_DEBUG_CDB)
12965 ctl_io_print(io);
12966 ctl_enqueue_incoming(io);
12967 break;
12968 default:
12969 printf("ctl_queue: unknown I/O type %d\n", io->io_hdr.io_type);
12970 return (EINVAL);
12971 }
12972
12973 return (CTL_RETVAL_COMPLETE);
12974}
12975
12976#ifdef CTL_IO_DELAY
12977static void
12978ctl_done_timer_wakeup(void *arg)
12979{
12980 union ctl_io *io;
12981
12982 io = (union ctl_io *)arg;
12983 ctl_done(io);
12984}
12985#endif /* CTL_IO_DELAY */
12986
12987void
12988ctl_done(union ctl_io *io)
12989{
12990
12991 /*
12992 * Enable this to catch duplicate completion issues.
12993 */
12994#if 0
12995 if (io->io_hdr.flags & CTL_FLAG_ALREADY_DONE) {
12996 printf("%s: type %d msg %d cdb %x iptl: "
12997 "%u:%u:%u tag 0x%04x "
12998 "flag %#x status %x\n",
12999 __func__,
13000 io->io_hdr.io_type,
13001 io->io_hdr.msg_type,
13002 io->scsiio.cdb[0],
13003 io->io_hdr.nexus.initid,
13004 io->io_hdr.nexus.targ_port,
13005 io->io_hdr.nexus.targ_lun,
13006 (io->io_hdr.io_type ==
13007 CTL_IO_TASK) ?
13008 io->taskio.tag_num :
13009 io->scsiio.tag_num,
13010 io->io_hdr.flags,
13011 io->io_hdr.status);
13012 } else
13013 io->io_hdr.flags |= CTL_FLAG_ALREADY_DONE;
13014#endif
13015
13016 /*
13017 * This is an internal copy of an I/O, and should not go through
13018 * the normal done processing logic.
13019 */
13020 if (io->io_hdr.flags & CTL_FLAG_INT_COPY)
13021 return;
13022
13403 /*
13404 * We need to send a msg to the serializing shelf to finish the IO
13405 * as well. We don't send a finish message to the other shelf if
13406 * this is a task management command. Task management commands
13407 * aren't serialized in the OOA queue, but rather just executed on
13408 * both shelf controllers for commands that originated on that
13409 * controller.
13410 */
13411 if ((io->io_hdr.flags & CTL_FLAG_SENT_2OTHER_SC)
13412 && (io->io_hdr.io_type != CTL_IO_TASK)) {
13413 union ctl_ha_msg msg_io;
13414
13415 msg_io.hdr.msg_type = CTL_MSG_FINISH_IO;
13416 msg_io.hdr.serializing_sc = io->io_hdr.serializing_sc;
13417 if (ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_io,
13418 sizeof(msg_io), 0 ) != CTL_HA_STATUS_SUCCESS) {
13419 }
13420 /* continue on to finish IO */
13421 }
13023#ifdef CTL_IO_DELAY
13024 if (io->io_hdr.flags & CTL_FLAG_DELAY_DONE) {
13025 struct ctl_lun *lun;
13026
13027 lun =(struct ctl_lun *)io->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
13028
13029 io->io_hdr.flags &= ~CTL_FLAG_DELAY_DONE;
13030 } else {
13031 struct ctl_lun *lun;
13032
13033 lun =(struct ctl_lun *)io->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
13034
13035 if ((lun != NULL)
13036 && (lun->delay_info.done_delay > 0)) {
13037
13038 callout_init(&io->io_hdr.delay_callout, /*mpsafe*/ 1);
13039 io->io_hdr.flags |= CTL_FLAG_DELAY_DONE;
13040 callout_reset(&io->io_hdr.delay_callout,
13041 lun->delay_info.done_delay * hz,
13042 ctl_done_timer_wakeup, io);
13043 if (lun->delay_info.done_type == CTL_DELAY_TYPE_ONESHOT)
13044 lun->delay_info.done_delay = 0;
13045 return;
13046 }
13047 }
13048#endif /* CTL_IO_DELAY */
13049
13050 ctl_enqueue_done(io);
13051}
13052
13452int
13453ctl_isc(struct ctl_scsiio *ctsio)
13454{
13455 struct ctl_lun *lun;
13456 int retval;
13457
13458 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
13459
13460 CTL_DEBUG_PRINT(("ctl_isc: command: %02x\n", ctsio->cdb[0]));
13461
13462 CTL_DEBUG_PRINT(("ctl_isc: calling data_submit()\n"));
13463
13464 retval = lun->backend->data_submit((union ctl_io *)ctsio);
13465
13466 return (retval);
13467}
13468
13469
13053static void
13054ctl_work_thread(void *arg)
13055{
13056 struct ctl_thread *thr = (struct ctl_thread *)arg;
13057 struct ctl_softc *softc = thr->ctl_softc;
13058 union ctl_io *io;
13059 int retval;
13060
13061 CTL_DEBUG_PRINT(("ctl_work_thread starting\n"));
13062
13063 for (;;) {
13064 retval = 0;
13065
13066 /*
13067 * We handle the queues in this order:
13068 * - ISC
13069 * - done queue (to free up resources, unblock other commands)
13070 * - RtR queue
13071 * - incoming queue
13072 *
13073 * If those queues are empty, we break out of the loop and
13074 * go to sleep.
13075 */
13076 mtx_lock(&thr->queue_lock);
13077 io = (union ctl_io *)STAILQ_FIRST(&thr->isc_queue);
13078 if (io != NULL) {
13079 STAILQ_REMOVE_HEAD(&thr->isc_queue, links);
13080 mtx_unlock(&thr->queue_lock);
13081 ctl_handle_isc(io);
13082 continue;
13083 }
13084 io = (union ctl_io *)STAILQ_FIRST(&thr->done_queue);
13085 if (io != NULL) {
13086 STAILQ_REMOVE_HEAD(&thr->done_queue, links);
13087 /* clear any blocked commands, call fe_done */
13088 mtx_unlock(&thr->queue_lock);
13089 retval = ctl_process_done(io);
13090 continue;
13091 }
13092 io = (union ctl_io *)STAILQ_FIRST(&thr->incoming_queue);
13093 if (io != NULL) {
13094 STAILQ_REMOVE_HEAD(&thr->incoming_queue, links);
13095 mtx_unlock(&thr->queue_lock);
13096 if (io->io_hdr.io_type == CTL_IO_TASK)
13097 ctl_run_task(io);
13098 else
13099 ctl_scsiio_precheck(softc, &io->scsiio);
13100 continue;
13101 }
13519 if (!ctl_pause_rtr) {
13520 io = (union ctl_io *)STAILQ_FIRST(&thr->rtr_queue);
13521 if (io != NULL) {
13522 STAILQ_REMOVE_HEAD(&thr->rtr_queue, links);
13523 mtx_unlock(&thr->queue_lock);
13524 retval = ctl_scsiio(&io->scsiio);
13525 if (retval != CTL_RETVAL_COMPLETE)
13526 CTL_DEBUG_PRINT(("ctl_scsiio failed\n"));
13527 continue;
13528 }
13102 io = (union ctl_io *)STAILQ_FIRST(&thr->rtr_queue);
13103 if (io != NULL) {
13104 STAILQ_REMOVE_HEAD(&thr->rtr_queue, links);
13105 mtx_unlock(&thr->queue_lock);
13106 retval = ctl_scsiio(&io->scsiio);
13107 if (retval != CTL_RETVAL_COMPLETE)
13108 CTL_DEBUG_PRINT(("ctl_scsiio failed\n"));
13109 continue;
13110 }
13111
13112 /* Sleep until we have something to do. */
13113 mtx_sleep(thr, &thr->queue_lock, PDROP | PRIBIO, "-", 0);
13114 }
13115}
13116
13117static void
13118ctl_lun_thread(void *arg)
13119{
13120 struct ctl_softc *softc = (struct ctl_softc *)arg;
13121 struct ctl_be_lun *be_lun;
13122 int retval;
13123
13124 CTL_DEBUG_PRINT(("ctl_lun_thread starting\n"));
13125
13126 for (;;) {
13127 retval = 0;
13128 mtx_lock(&softc->ctl_lock);
13129 be_lun = STAILQ_FIRST(&softc->pending_lun_queue);
13130 if (be_lun != NULL) {
13131 STAILQ_REMOVE_HEAD(&softc->pending_lun_queue, links);
13132 mtx_unlock(&softc->ctl_lock);
13133 ctl_create_lun(be_lun);
13134 continue;
13135 }
13136
13137 /* Sleep until we have something to do. */
13138 mtx_sleep(&softc->pending_lun_queue, &softc->ctl_lock,
13139 PDROP | PRIBIO, "-", 0);
13140 }
13141}
13142
13143static void
13144ctl_thresh_thread(void *arg)
13145{
13146 struct ctl_softc *softc = (struct ctl_softc *)arg;
13147 struct ctl_lun *lun;
13148 struct ctl_be_lun *be_lun;
13149 struct scsi_da_rw_recovery_page *rwpage;
13150 struct ctl_logical_block_provisioning_page *page;
13151 const char *attr;
13152 union ctl_ha_msg msg;
13153 uint64_t thres, val;
13572 int i, e;
13154 int i, e, set;
13155
13156 CTL_DEBUG_PRINT(("ctl_thresh_thread starting\n"));
13157
13158 for (;;) {
13159 mtx_lock(&softc->ctl_lock);
13160 STAILQ_FOREACH(lun, &softc->lun_list, links) {
13161 be_lun = lun->be_lun;
13162 if ((lun->flags & CTL_LUN_DISABLED) ||
13163 (lun->flags & CTL_LUN_OFFLINE) ||
13164 lun->backend->lun_attr == NULL)
13165 continue;
13166 if ((lun->flags & CTL_LUN_PRIMARY_SC) == 0 &&
13167 softc->ha_mode == CTL_HA_MODE_XFER)
13168 continue;
13169 rwpage = &lun->mode_pages.rw_er_page[CTL_PAGE_CURRENT];
13170 if ((rwpage->byte8 & SMS_RWER_LBPERE) == 0)
13171 continue;
13172 e = 0;
13173 page = &lun->mode_pages.lbp_page[CTL_PAGE_CURRENT];
13174 for (i = 0; i < CTL_NUM_LBP_THRESH; i++) {
13175 if ((page->descr[i].flags & SLBPPD_ENABLED) == 0)
13176 continue;
13177 thres = scsi_4btoul(page->descr[i].count);
13178 thres <<= CTL_LBP_EXPONENT;
13179 switch (page->descr[i].resource) {
13180 case 0x01:
13181 attr = "blocksavail";
13182 break;
13183 case 0x02:
13184 attr = "blocksused";
13185 break;
13186 case 0xf1:
13187 attr = "poolblocksavail";
13188 break;
13189 case 0xf2:
13190 attr = "poolblocksused";
13191 break;
13192 default:
13193 continue;
13194 }
13195 mtx_unlock(&softc->ctl_lock); // XXX
13196 val = lun->backend->lun_attr(
13197 lun->be_lun->be_lun, attr);
13198 mtx_lock(&softc->ctl_lock);
13199 if (val == UINT64_MAX)
13200 continue;
13201 if ((page->descr[i].flags & SLBPPD_ARMING_MASK)
13202 == SLBPPD_ARMING_INC)
13203 e |= (val >= thres);
13204 else
13205 e |= (val <= thres);
13206 }
13207 mtx_lock(&lun->lun_lock);
13208 if (e) {
13209 if (lun->lasttpt == 0 ||
13210 time_uptime - lun->lasttpt >= CTL_LBP_UA_PERIOD) {
13211 lun->lasttpt = time_uptime;
13212 ctl_est_ua_all(lun, -1, CTL_UA_THIN_PROV_THRES);
13628 }
13213 set = 1;
13214 } else
13215 set = 0;
13216 } else {
13217 lun->lasttpt = 0;
13218 ctl_clr_ua_all(lun, -1, CTL_UA_THIN_PROV_THRES);
13219 set = -1;
13220 }
13221 mtx_unlock(&lun->lun_lock);
13222 if (set != 0 &&
13223 lun->ctl_softc->ha_mode == CTL_HA_MODE_XFER) {
13224 /* Send msg to other side. */
13225 bzero(&msg.ua, sizeof(msg.ua));
13226 msg.hdr.msg_type = CTL_MSG_UA;
13227 msg.hdr.nexus.initid = -1;
13228 msg.hdr.nexus.targ_port = -1;
13229 msg.hdr.nexus.targ_lun = lun->lun;
13230 msg.hdr.nexus.targ_mapped_lun = lun->lun;
13231 msg.ua.ua_all = 1;
13232 msg.ua.ua_set = (set > 0);
13233 msg.ua.ua_type = CTL_UA_THIN_PROV_THRES;
13234 mtx_unlock(&softc->ctl_lock); // XXX
13235 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg,
13236 sizeof(msg.ua), M_WAITOK);
13237 mtx_lock(&softc->ctl_lock);
13238 }
13239 }
13240 mtx_unlock(&softc->ctl_lock);
13241 pause("-", CTL_LBP_PERIOD * hz);
13242 }
13243}
13244
13245static void
13246ctl_enqueue_incoming(union ctl_io *io)
13247{
13248 struct ctl_softc *softc = control_softc;
13249 struct ctl_thread *thr;
13250 u_int idx;
13251
13252 idx = (io->io_hdr.nexus.targ_port * 127 +
13253 io->io_hdr.nexus.initid) % worker_threads;
13254 thr = &softc->threads[idx];
13255 mtx_lock(&thr->queue_lock);
13256 STAILQ_INSERT_TAIL(&thr->incoming_queue, &io->io_hdr, links);
13257 mtx_unlock(&thr->queue_lock);
13258 wakeup(thr);
13259}
13260
13261static void
13262ctl_enqueue_rtr(union ctl_io *io)
13263{
13264 struct ctl_softc *softc = control_softc;
13265 struct ctl_thread *thr;
13266
13267 thr = &softc->threads[io->io_hdr.nexus.targ_mapped_lun % worker_threads];
13268 mtx_lock(&thr->queue_lock);
13269 STAILQ_INSERT_TAIL(&thr->rtr_queue, &io->io_hdr, links);
13270 mtx_unlock(&thr->queue_lock);
13271 wakeup(thr);
13272}
13273
13274static void
13275ctl_enqueue_done(union ctl_io *io)
13276{
13277 struct ctl_softc *softc = control_softc;
13278 struct ctl_thread *thr;
13279
13280 thr = &softc->threads[io->io_hdr.nexus.targ_mapped_lun % worker_threads];
13281 mtx_lock(&thr->queue_lock);
13282 STAILQ_INSERT_TAIL(&thr->done_queue, &io->io_hdr, links);
13283 mtx_unlock(&thr->queue_lock);
13284 wakeup(thr);
13285}
13286
13682#ifdef notyet
13287static void
13288ctl_enqueue_isc(union ctl_io *io)
13289{
13290 struct ctl_softc *softc = control_softc;
13291 struct ctl_thread *thr;
13292
13293 thr = &softc->threads[io->io_hdr.nexus.targ_mapped_lun % worker_threads];
13294 mtx_lock(&thr->queue_lock);
13295 STAILQ_INSERT_TAIL(&thr->isc_queue, &io->io_hdr, links);
13296 mtx_unlock(&thr->queue_lock);
13297 wakeup(thr);
13298}
13299
13696/* Initialization and failover */
13697
13698void
13699ctl_init_isc_msg(void)
13700{
13701 printf("CTL: Still calling this thing\n");
13702}
13703
13300/*
13705 * Init component
13706 * Initializes component into configuration defined by bootMode
13707 * (see hasc-sv.c)
13708 * returns hasc_Status:
13709 * OK
13710 * ERROR - fatal error
13711 */
13712static ctl_ha_comp_status
13713ctl_isc_init(struct ctl_ha_component *c)
13714{
13715 ctl_ha_comp_status ret = CTL_HA_COMP_STATUS_OK;
13716
13717 c->status = ret;
13718 return ret;
13719}
13720
13721/* Start component
13722 * Starts component in state requested. If component starts successfully,
13723 * it must set its own state to the requestrd state
13724 * When requested state is HASC_STATE_HA, the component may refine it
13725 * by adding _SLAVE or _MASTER flags.
13726 * Currently allowed state transitions are:
13727 * UNKNOWN->HA - initial startup
13728 * UNKNOWN->SINGLE - initial startup when no parter detected
13729 * HA->SINGLE - failover
13730 * returns ctl_ha_comp_status:
13731 * OK - component successfully started in requested state
13732 * FAILED - could not start the requested state, failover may
13733 * be possible
13734 * ERROR - fatal error detected, no future startup possible
13735 */
13736static ctl_ha_comp_status
13737ctl_isc_start(struct ctl_ha_component *c, ctl_ha_state state)
13738{
13739 ctl_ha_comp_status ret = CTL_HA_COMP_STATUS_OK;
13740
13741 printf("%s: go\n", __func__);
13742
13743 // UNKNOWN->HA or UNKNOWN->SINGLE (bootstrap)
13744 if (c->state == CTL_HA_STATE_UNKNOWN ) {
13745 control_softc->is_single = 0;
13746 if (ctl_ha_msg_create(CTL_HA_CHAN_CTL, ctl_isc_event_handler)
13747 != CTL_HA_STATUS_SUCCESS) {
13748 printf("ctl_isc_start: ctl_ha_msg_create failed.\n");
13749 ret = CTL_HA_COMP_STATUS_ERROR;
13750 }
13751 } else if (CTL_HA_STATE_IS_HA(c->state)
13752 && CTL_HA_STATE_IS_SINGLE(state)){
13753 // HA->SINGLE transition
13754 ctl_failover();
13755 control_softc->is_single = 1;
13756 } else {
13757 printf("ctl_isc_start:Invalid state transition %X->%X\n",
13758 c->state, state);
13759 ret = CTL_HA_COMP_STATUS_ERROR;
13760 }
13761 if (CTL_HA_STATE_IS_SINGLE(state))
13762 control_softc->is_single = 1;
13763
13764 c->state = state;
13765 c->status = ret;
13766 return ret;
13767}
13768
13769/*
13770 * Quiesce component
13771 * The component must clear any error conditions (set status to OK) and
13772 * prepare itself to another Start call
13773 * returns ctl_ha_comp_status:
13774 * OK
13775 * ERROR
13776 */
13777static ctl_ha_comp_status
13778ctl_isc_quiesce(struct ctl_ha_component *c)
13779{
13780 int ret = CTL_HA_COMP_STATUS_OK;
13781
13782 ctl_pause_rtr = 1;
13783 c->status = ret;
13784 return ret;
13785}
13786
13787struct ctl_ha_component ctl_ha_component_ctlisc =
13788{
13789 .name = "CTL ISC",
13790 .state = CTL_HA_STATE_UNKNOWN,
13791 .init = ctl_isc_init,
13792 .start = ctl_isc_start,
13793 .quiesce = ctl_isc_quiesce
13794};
13795#endif
13796
13797/*
13301 * vim: ts=8
13302 */