Deleted Added
sdiff udiff text old ( 312582 ) new ( 312834 )
full compact
1/*-
2 * Copyright (c) 2003-2009 Silicon Graphics International Corp.
3 * Copyright (c) 2012 The FreeBSD Foundation
4 * Copyright (c) 2015 Alexander Motin <mav@FreeBSD.org>
5 * All rights reserved.
6 *
7 * Portions of this software were developed by Edward Tomasz Napierala
8 * under sponsorship from the FreeBSD Foundation.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions, and the following disclaimer,
15 * without modification.
16 * 2. Redistributions in binary form must reproduce at minimum a disclaimer
17 * substantially similar to the "NO WARRANTY" disclaimer below
18 * ("Disclaimer") and any redistribution must be conditioned upon
19 * including a substantially similar Disclaimer requirement for further
20 * binary redistribution.
21 *
22 * NO WARRANTY
23 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
24 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
25 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
26 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
27 * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
28 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
29 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
30 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
31 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
32 * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
33 * POSSIBILITY OF SUCH DAMAGES.
34 *
35 * $Id$
36 */
37/*
38 * CAM Target Layer, a SCSI device emulation subsystem.
39 *
40 * Author: Ken Merry <ken@FreeBSD.org>
41 */
42
43#define _CTL_C
44
45#include <sys/cdefs.h>
46__FBSDID("$FreeBSD: stable/11/sys/cam/ctl/ctl.c 312582 2017-01-21 08:39:01Z mav $");
47
48#include <sys/param.h>
49#include <sys/systm.h>
50#include <sys/ctype.h>
51#include <sys/kernel.h>
52#include <sys/types.h>
53#include <sys/kthread.h>
54#include <sys/bio.h>
55#include <sys/fcntl.h>
56#include <sys/lock.h>
57#include <sys/module.h>
58#include <sys/mutex.h>
59#include <sys/condvar.h>
60#include <sys/malloc.h>
61#include <sys/conf.h>
62#include <sys/ioccom.h>
63#include <sys/queue.h>
64#include <sys/sbuf.h>
65#include <sys/smp.h>
66#include <sys/endian.h>
67#include <sys/sysctl.h>
68#include <vm/uma.h>
69
70#include <cam/cam.h>
71#include <cam/scsi/scsi_all.h>
72#include <cam/scsi/scsi_cd.h>
73#include <cam/scsi/scsi_da.h>
74#include <cam/ctl/ctl_io.h>
75#include <cam/ctl/ctl.h>
76#include <cam/ctl/ctl_frontend.h>
77#include <cam/ctl/ctl_util.h>
78#include <cam/ctl/ctl_backend.h>
79#include <cam/ctl/ctl_ioctl.h>
80#include <cam/ctl/ctl_ha.h>
81#include <cam/ctl/ctl_private.h>
82#include <cam/ctl/ctl_debug.h>
83#include <cam/ctl/ctl_scsi_all.h>
84#include <cam/ctl/ctl_error.h>
85
86struct ctl_softc *control_softc = NULL;
87
88/*
89 * Template mode pages.
90 */
91
92/*
93 * Note that these are default values only. The actual values will be
94 * filled in when the user does a mode sense.
95 */
96const static struct scsi_da_rw_recovery_page rw_er_page_default = {
97 /*page_code*/SMS_RW_ERROR_RECOVERY_PAGE,
98 /*page_length*/sizeof(struct scsi_da_rw_recovery_page) - 2,
99 /*byte3*/SMS_RWER_AWRE|SMS_RWER_ARRE,
100 /*read_retry_count*/0,
101 /*correction_span*/0,
102 /*head_offset_count*/0,
103 /*data_strobe_offset_cnt*/0,
104 /*byte8*/SMS_RWER_LBPERE,
105 /*write_retry_count*/0,
106 /*reserved2*/0,
107 /*recovery_time_limit*/{0, 0},
108};
109
110const static struct scsi_da_rw_recovery_page rw_er_page_changeable = {
111 /*page_code*/SMS_RW_ERROR_RECOVERY_PAGE,
112 /*page_length*/sizeof(struct scsi_da_rw_recovery_page) - 2,
113 /*byte3*/SMS_RWER_PER,
114 /*read_retry_count*/0,
115 /*correction_span*/0,
116 /*head_offset_count*/0,
117 /*data_strobe_offset_cnt*/0,
118 /*byte8*/SMS_RWER_LBPERE,
119 /*write_retry_count*/0,
120 /*reserved2*/0,
121 /*recovery_time_limit*/{0, 0},
122};
123
124const static struct scsi_format_page format_page_default = {
125 /*page_code*/SMS_FORMAT_DEVICE_PAGE,
126 /*page_length*/sizeof(struct scsi_format_page) - 2,
127 /*tracks_per_zone*/ {0, 0},
128 /*alt_sectors_per_zone*/ {0, 0},
129 /*alt_tracks_per_zone*/ {0, 0},
130 /*alt_tracks_per_lun*/ {0, 0},
131 /*sectors_per_track*/ {(CTL_DEFAULT_SECTORS_PER_TRACK >> 8) & 0xff,
132 CTL_DEFAULT_SECTORS_PER_TRACK & 0xff},
133 /*bytes_per_sector*/ {0, 0},
134 /*interleave*/ {0, 0},
135 /*track_skew*/ {0, 0},
136 /*cylinder_skew*/ {0, 0},
137 /*flags*/ SFP_HSEC,
138 /*reserved*/ {0, 0, 0}
139};
140
141const static struct scsi_format_page format_page_changeable = {
142 /*page_code*/SMS_FORMAT_DEVICE_PAGE,
143 /*page_length*/sizeof(struct scsi_format_page) - 2,
144 /*tracks_per_zone*/ {0, 0},
145 /*alt_sectors_per_zone*/ {0, 0},
146 /*alt_tracks_per_zone*/ {0, 0},
147 /*alt_tracks_per_lun*/ {0, 0},
148 /*sectors_per_track*/ {0, 0},
149 /*bytes_per_sector*/ {0, 0},
150 /*interleave*/ {0, 0},
151 /*track_skew*/ {0, 0},
152 /*cylinder_skew*/ {0, 0},
153 /*flags*/ 0,
154 /*reserved*/ {0, 0, 0}
155};
156
157const static struct scsi_rigid_disk_page rigid_disk_page_default = {
158 /*page_code*/SMS_RIGID_DISK_PAGE,
159 /*page_length*/sizeof(struct scsi_rigid_disk_page) - 2,
160 /*cylinders*/ {0, 0, 0},
161 /*heads*/ CTL_DEFAULT_HEADS,
162 /*start_write_precomp*/ {0, 0, 0},
163 /*start_reduced_current*/ {0, 0, 0},
164 /*step_rate*/ {0, 0},
165 /*landing_zone_cylinder*/ {0, 0, 0},
166 /*rpl*/ SRDP_RPL_DISABLED,
167 /*rotational_offset*/ 0,
168 /*reserved1*/ 0,
169 /*rotation_rate*/ {(CTL_DEFAULT_ROTATION_RATE >> 8) & 0xff,
170 CTL_DEFAULT_ROTATION_RATE & 0xff},
171 /*reserved2*/ {0, 0}
172};
173
174const static struct scsi_rigid_disk_page rigid_disk_page_changeable = {
175 /*page_code*/SMS_RIGID_DISK_PAGE,
176 /*page_length*/sizeof(struct scsi_rigid_disk_page) - 2,
177 /*cylinders*/ {0, 0, 0},
178 /*heads*/ 0,
179 /*start_write_precomp*/ {0, 0, 0},
180 /*start_reduced_current*/ {0, 0, 0},
181 /*step_rate*/ {0, 0},
182 /*landing_zone_cylinder*/ {0, 0, 0},
183 /*rpl*/ 0,
184 /*rotational_offset*/ 0,
185 /*reserved1*/ 0,
186 /*rotation_rate*/ {0, 0},
187 /*reserved2*/ {0, 0}
188};
189
190const static struct scsi_da_verify_recovery_page verify_er_page_default = {
191 /*page_code*/SMS_VERIFY_ERROR_RECOVERY_PAGE,
192 /*page_length*/sizeof(struct scsi_da_verify_recovery_page) - 2,
193 /*byte3*/0,
194 /*read_retry_count*/0,
195 /*reserved*/{ 0, 0, 0, 0, 0, 0 },
196 /*recovery_time_limit*/{0, 0},
197};
198
199const static struct scsi_da_verify_recovery_page verify_er_page_changeable = {
200 /*page_code*/SMS_VERIFY_ERROR_RECOVERY_PAGE,
201 /*page_length*/sizeof(struct scsi_da_verify_recovery_page) - 2,
202 /*byte3*/SMS_VER_PER,
203 /*read_retry_count*/0,
204 /*reserved*/{ 0, 0, 0, 0, 0, 0 },
205 /*recovery_time_limit*/{0, 0},
206};
207
208const static struct scsi_caching_page caching_page_default = {
209 /*page_code*/SMS_CACHING_PAGE,
210 /*page_length*/sizeof(struct scsi_caching_page) - 2,
211 /*flags1*/ SCP_DISC | SCP_WCE,
212 /*ret_priority*/ 0,
213 /*disable_pf_transfer_len*/ {0xff, 0xff},
214 /*min_prefetch*/ {0, 0},
215 /*max_prefetch*/ {0xff, 0xff},
216 /*max_pf_ceiling*/ {0xff, 0xff},
217 /*flags2*/ 0,
218 /*cache_segments*/ 0,
219 /*cache_seg_size*/ {0, 0},
220 /*reserved*/ 0,
221 /*non_cache_seg_size*/ {0, 0, 0}
222};
223
224const static struct scsi_caching_page caching_page_changeable = {
225 /*page_code*/SMS_CACHING_PAGE,
226 /*page_length*/sizeof(struct scsi_caching_page) - 2,
227 /*flags1*/ SCP_WCE | SCP_RCD,
228 /*ret_priority*/ 0,
229 /*disable_pf_transfer_len*/ {0, 0},
230 /*min_prefetch*/ {0, 0},
231 /*max_prefetch*/ {0, 0},
232 /*max_pf_ceiling*/ {0, 0},
233 /*flags2*/ 0,
234 /*cache_segments*/ 0,
235 /*cache_seg_size*/ {0, 0},
236 /*reserved*/ 0,
237 /*non_cache_seg_size*/ {0, 0, 0}
238};
239
240const static struct scsi_control_page control_page_default = {
241 /*page_code*/SMS_CONTROL_MODE_PAGE,
242 /*page_length*/sizeof(struct scsi_control_page) - 2,
243 /*rlec*/0,
244 /*queue_flags*/SCP_QUEUE_ALG_RESTRICTED,
245 /*eca_and_aen*/0,
246 /*flags4*/SCP_TAS,
247 /*aen_holdoff_period*/{0, 0},
248 /*busy_timeout_period*/{0, 0},
249 /*extended_selftest_completion_time*/{0, 0}
250};
251
252const static struct scsi_control_page control_page_changeable = {
253 /*page_code*/SMS_CONTROL_MODE_PAGE,
254 /*page_length*/sizeof(struct scsi_control_page) - 2,
255 /*rlec*/SCP_DSENSE,
256 /*queue_flags*/SCP_QUEUE_ALG_MASK | SCP_NUAR,
257 /*eca_and_aen*/SCP_SWP,
258 /*flags4*/0,
259 /*aen_holdoff_period*/{0, 0},
260 /*busy_timeout_period*/{0, 0},
261 /*extended_selftest_completion_time*/{0, 0}
262};
263
264#define CTL_CEM_LEN (sizeof(struct scsi_control_ext_page) - 4)
265
266const static struct scsi_control_ext_page control_ext_page_default = {
267 /*page_code*/SMS_CONTROL_MODE_PAGE | SMPH_SPF,
268 /*subpage_code*/0x01,
269 /*page_length*/{CTL_CEM_LEN >> 8, CTL_CEM_LEN},
270 /*flags*/0,
271 /*prio*/0,
272 /*max_sense*/0
273};
274
275const static struct scsi_control_ext_page control_ext_page_changeable = {
276 /*page_code*/SMS_CONTROL_MODE_PAGE | SMPH_SPF,
277 /*subpage_code*/0x01,
278 /*page_length*/{CTL_CEM_LEN >> 8, CTL_CEM_LEN},
279 /*flags*/0,
280 /*prio*/0,
281 /*max_sense*/0xff
282};
283
284const static struct scsi_info_exceptions_page ie_page_default = {
285 /*page_code*/SMS_INFO_EXCEPTIONS_PAGE,
286 /*page_length*/sizeof(struct scsi_info_exceptions_page) - 2,
287 /*info_flags*/SIEP_FLAGS_EWASC,
288 /*mrie*/SIEP_MRIE_NO,
289 /*interval_timer*/{0, 0, 0, 0},
290 /*report_count*/{0, 0, 0, 1}
291};
292
293const static struct scsi_info_exceptions_page ie_page_changeable = {
294 /*page_code*/SMS_INFO_EXCEPTIONS_PAGE,
295 /*page_length*/sizeof(struct scsi_info_exceptions_page) - 2,
296 /*info_flags*/SIEP_FLAGS_EWASC | SIEP_FLAGS_DEXCPT | SIEP_FLAGS_TEST |
297 SIEP_FLAGS_LOGERR,
298 /*mrie*/0x0f,
299 /*interval_timer*/{0xff, 0xff, 0xff, 0xff},
300 /*report_count*/{0xff, 0xff, 0xff, 0xff}
301};
302
303#define CTL_LBPM_LEN (sizeof(struct ctl_logical_block_provisioning_page) - 4)
304
305const static struct ctl_logical_block_provisioning_page lbp_page_default = {{
306 /*page_code*/SMS_INFO_EXCEPTIONS_PAGE | SMPH_SPF,
307 /*subpage_code*/0x02,
308 /*page_length*/{CTL_LBPM_LEN >> 8, CTL_LBPM_LEN},
309 /*flags*/0,
310 /*reserved*/{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
311 /*descr*/{}},
312 {{/*flags*/0,
313 /*resource*/0x01,
314 /*reserved*/{0, 0},
315 /*count*/{0, 0, 0, 0}},
316 {/*flags*/0,
317 /*resource*/0x02,
318 /*reserved*/{0, 0},
319 /*count*/{0, 0, 0, 0}},
320 {/*flags*/0,
321 /*resource*/0xf1,
322 /*reserved*/{0, 0},
323 /*count*/{0, 0, 0, 0}},
324 {/*flags*/0,
325 /*resource*/0xf2,
326 /*reserved*/{0, 0},
327 /*count*/{0, 0, 0, 0}}
328 }
329};
330
331const static struct ctl_logical_block_provisioning_page lbp_page_changeable = {{
332 /*page_code*/SMS_INFO_EXCEPTIONS_PAGE | SMPH_SPF,
333 /*subpage_code*/0x02,
334 /*page_length*/{CTL_LBPM_LEN >> 8, CTL_LBPM_LEN},
335 /*flags*/SLBPP_SITUA,
336 /*reserved*/{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
337 /*descr*/{}},
338 {{/*flags*/0,
339 /*resource*/0,
340 /*reserved*/{0, 0},
341 /*count*/{0, 0, 0, 0}},
342 {/*flags*/0,
343 /*resource*/0,
344 /*reserved*/{0, 0},
345 /*count*/{0, 0, 0, 0}},
346 {/*flags*/0,
347 /*resource*/0,
348 /*reserved*/{0, 0},
349 /*count*/{0, 0, 0, 0}},
350 {/*flags*/0,
351 /*resource*/0,
352 /*reserved*/{0, 0},
353 /*count*/{0, 0, 0, 0}}
354 }
355};
356
357const static struct scsi_cddvd_capabilities_page cddvd_page_default = {
358 /*page_code*/SMS_CDDVD_CAPS_PAGE,
359 /*page_length*/sizeof(struct scsi_cddvd_capabilities_page) - 2,
360 /*caps1*/0x3f,
361 /*caps2*/0x00,
362 /*caps3*/0xf0,
363 /*caps4*/0x00,
364 /*caps5*/0x29,
365 /*caps6*/0x00,
366 /*obsolete*/{0, 0},
367 /*nvol_levels*/{0, 0},
368 /*buffer_size*/{8, 0},
369 /*obsolete2*/{0, 0},
370 /*reserved*/0,
371 /*digital*/0,
372 /*obsolete3*/0,
373 /*copy_management*/0,
374 /*reserved2*/0,
375 /*rotation_control*/0,
376 /*cur_write_speed*/0,
377 /*num_speed_descr*/0,
378};
379
380const static struct scsi_cddvd_capabilities_page cddvd_page_changeable = {
381 /*page_code*/SMS_CDDVD_CAPS_PAGE,
382 /*page_length*/sizeof(struct scsi_cddvd_capabilities_page) - 2,
383 /*caps1*/0,
384 /*caps2*/0,
385 /*caps3*/0,
386 /*caps4*/0,
387 /*caps5*/0,
388 /*caps6*/0,
389 /*obsolete*/{0, 0},
390 /*nvol_levels*/{0, 0},
391 /*buffer_size*/{0, 0},
392 /*obsolete2*/{0, 0},
393 /*reserved*/0,
394 /*digital*/0,
395 /*obsolete3*/0,
396 /*copy_management*/0,
397 /*reserved2*/0,
398 /*rotation_control*/0,
399 /*cur_write_speed*/0,
400 /*num_speed_descr*/0,
401};
402
403SYSCTL_NODE(_kern_cam, OID_AUTO, ctl, CTLFLAG_RD, 0, "CAM Target Layer");
404static int worker_threads = -1;
405SYSCTL_INT(_kern_cam_ctl, OID_AUTO, worker_threads, CTLFLAG_RDTUN,
406 &worker_threads, 1, "Number of worker threads");
407static int ctl_debug = CTL_DEBUG_NONE;
408SYSCTL_INT(_kern_cam_ctl, OID_AUTO, debug, CTLFLAG_RWTUN,
409 &ctl_debug, 0, "Enabled debug flags");
410static int ctl_lun_map_size = 1024;
411SYSCTL_INT(_kern_cam_ctl, OID_AUTO, lun_map_size, CTLFLAG_RWTUN,
412 &ctl_lun_map_size, 0, "Size of per-port LUN map (max LUN + 1)");
413
414/*
415 * Supported pages (0x00), Serial number (0x80), Device ID (0x83),
416 * Extended INQUIRY Data (0x86), Mode Page Policy (0x87),
417 * SCSI Ports (0x88), Third-party Copy (0x8F), Block limits (0xB0),
418 * Block Device Characteristics (0xB1) and Logical Block Provisioning (0xB2)
419 */
420#define SCSI_EVPD_NUM_SUPPORTED_PAGES 10
421
422static void ctl_isc_event_handler(ctl_ha_channel chanel, ctl_ha_event event,
423 int param);
424static void ctl_copy_sense_data(union ctl_ha_msg *src, union ctl_io *dest);
425static void ctl_copy_sense_data_back(union ctl_io *src, union ctl_ha_msg *dest);
426static int ctl_init(void);
427void ctl_shutdown(void);
428static int ctl_open(struct cdev *dev, int flags, int fmt, struct thread *td);
429static int ctl_close(struct cdev *dev, int flags, int fmt, struct thread *td);
430static void ctl_serialize_other_sc_cmd(struct ctl_scsiio *ctsio);
431static void ctl_ioctl_fill_ooa(struct ctl_lun *lun, uint32_t *cur_fill_num,
432 struct ctl_ooa *ooa_hdr,
433 struct ctl_ooa_entry *kern_entries);
434static int ctl_ioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flag,
435 struct thread *td);
436static int ctl_alloc_lun(struct ctl_softc *ctl_softc, struct ctl_lun *lun,
437 struct ctl_be_lun *be_lun);
438static int ctl_free_lun(struct ctl_lun *lun);
439static void ctl_create_lun(struct ctl_be_lun *be_lun);
440static struct ctl_port * ctl_io_port(struct ctl_io_hdr *io_hdr);
441
442static int ctl_do_mode_select(union ctl_io *io);
443static int ctl_pro_preempt(struct ctl_softc *softc, struct ctl_lun *lun,
444 uint64_t res_key, uint64_t sa_res_key,
445 uint8_t type, uint32_t residx,
446 struct ctl_scsiio *ctsio,
447 struct scsi_per_res_out *cdb,
448 struct scsi_per_res_out_parms* param);
449static void ctl_pro_preempt_other(struct ctl_lun *lun,
450 union ctl_ha_msg *msg);
451static void ctl_hndl_per_res_out_on_other_sc(union ctl_ha_msg *msg);
452static int ctl_inquiry_evpd_supported(struct ctl_scsiio *ctsio, int alloc_len);
453static int ctl_inquiry_evpd_serial(struct ctl_scsiio *ctsio, int alloc_len);
454static int ctl_inquiry_evpd_devid(struct ctl_scsiio *ctsio, int alloc_len);
455static int ctl_inquiry_evpd_eid(struct ctl_scsiio *ctsio, int alloc_len);
456static int ctl_inquiry_evpd_mpp(struct ctl_scsiio *ctsio, int alloc_len);
457static int ctl_inquiry_evpd_scsi_ports(struct ctl_scsiio *ctsio,
458 int alloc_len);
459static int ctl_inquiry_evpd_block_limits(struct ctl_scsiio *ctsio,
460 int alloc_len);
461static int ctl_inquiry_evpd_bdc(struct ctl_scsiio *ctsio, int alloc_len);
462static int ctl_inquiry_evpd_lbp(struct ctl_scsiio *ctsio, int alloc_len);
463static int ctl_inquiry_evpd(struct ctl_scsiio *ctsio);
464static int ctl_inquiry_std(struct ctl_scsiio *ctsio);
465static int ctl_get_lba_len(union ctl_io *io, uint64_t *lba, uint64_t *len);
466static ctl_action ctl_extent_check(union ctl_io *io1, union ctl_io *io2,
467 bool seq);
468static ctl_action ctl_extent_check_seq(union ctl_io *io1, union ctl_io *io2);
469static ctl_action ctl_check_for_blockage(struct ctl_lun *lun,
470 union ctl_io *pending_io, union ctl_io *ooa_io);
471static ctl_action ctl_check_ooa(struct ctl_lun *lun, union ctl_io *pending_io,
472 union ctl_io *starting_io);
473static int ctl_check_blocked(struct ctl_lun *lun);
474static int ctl_scsiio_lun_check(struct ctl_lun *lun,
475 const struct ctl_cmd_entry *entry,
476 struct ctl_scsiio *ctsio);
477static void ctl_failover_lun(union ctl_io *io);
478static int ctl_scsiio_precheck(struct ctl_softc *ctl_softc,
479 struct ctl_scsiio *ctsio);
480static int ctl_scsiio(struct ctl_scsiio *ctsio);
481
482static int ctl_bus_reset(struct ctl_softc *ctl_softc, union ctl_io *io);
483static int ctl_target_reset(struct ctl_softc *ctl_softc, union ctl_io *io,
484 ctl_ua_type ua_type);
485static int ctl_do_lun_reset(struct ctl_lun *lun, union ctl_io *io,
486 ctl_ua_type ua_type);
487static int ctl_lun_reset(struct ctl_softc *ctl_softc, union ctl_io *io);
488static int ctl_abort_task(union ctl_io *io);
489static int ctl_abort_task_set(union ctl_io *io);
490static int ctl_query_task(union ctl_io *io, int task_set);
491static int ctl_i_t_nexus_reset(union ctl_io *io);
492static int ctl_query_async_event(union ctl_io *io);
493static void ctl_run_task(union ctl_io *io);
494#ifdef CTL_IO_DELAY
495static void ctl_datamove_timer_wakeup(void *arg);
496static void ctl_done_timer_wakeup(void *arg);
497#endif /* CTL_IO_DELAY */
498
499static void ctl_send_datamove_done(union ctl_io *io, int have_lock);
500static void ctl_datamove_remote_write_cb(struct ctl_ha_dt_req *rq);
501static int ctl_datamove_remote_dm_write_cb(union ctl_io *io);
502static void ctl_datamove_remote_write(union ctl_io *io);
503static int ctl_datamove_remote_dm_read_cb(union ctl_io *io);
504static void ctl_datamove_remote_read_cb(struct ctl_ha_dt_req *rq);
505static int ctl_datamove_remote_sgl_setup(union ctl_io *io);
506static int ctl_datamove_remote_xfer(union ctl_io *io, unsigned command,
507 ctl_ha_dt_cb callback);
508static void ctl_datamove_remote_read(union ctl_io *io);
509static void ctl_datamove_remote(union ctl_io *io);
510static void ctl_process_done(union ctl_io *io);
511static void ctl_lun_thread(void *arg);
512static void ctl_thresh_thread(void *arg);
513static void ctl_work_thread(void *arg);
514static void ctl_enqueue_incoming(union ctl_io *io);
515static void ctl_enqueue_rtr(union ctl_io *io);
516static void ctl_enqueue_done(union ctl_io *io);
517static void ctl_enqueue_isc(union ctl_io *io);
518static const struct ctl_cmd_entry *
519 ctl_get_cmd_entry(struct ctl_scsiio *ctsio, int *sa);
520static const struct ctl_cmd_entry *
521 ctl_validate_command(struct ctl_scsiio *ctsio);
522static int ctl_cmd_applicable(uint8_t lun_type,
523 const struct ctl_cmd_entry *entry);
524
525static uint64_t ctl_get_prkey(struct ctl_lun *lun, uint32_t residx);
526static void ctl_clr_prkey(struct ctl_lun *lun, uint32_t residx);
527static void ctl_alloc_prkey(struct ctl_lun *lun, uint32_t residx);
528static void ctl_set_prkey(struct ctl_lun *lun, uint32_t residx, uint64_t key);
529
530/*
531 * Load the serialization table. This isn't very pretty, but is probably
532 * the easiest way to do it.
533 */
534#include "ctl_ser_table.c"
535
536/*
537 * We only need to define open, close and ioctl routines for this driver.
538 */
539static struct cdevsw ctl_cdevsw = {
540 .d_version = D_VERSION,
541 .d_flags = 0,
542 .d_open = ctl_open,
543 .d_close = ctl_close,
544 .d_ioctl = ctl_ioctl,
545 .d_name = "ctl",
546};
547
548
549MALLOC_DEFINE(M_CTL, "ctlmem", "Memory used for CTL");
550
551static int ctl_module_event_handler(module_t, int /*modeventtype_t*/, void *);
552
553static moduledata_t ctl_moduledata = {
554 "ctl",
555 ctl_module_event_handler,
556 NULL
557};
558
559DECLARE_MODULE(ctl, ctl_moduledata, SI_SUB_CONFIGURE, SI_ORDER_THIRD);
560MODULE_VERSION(ctl, 1);
561
562static struct ctl_frontend ha_frontend =
563{
564 .name = "ha",
565};
566
567static void
568ctl_ha_datamove(union ctl_io *io)
569{
570 struct ctl_lun *lun;
571 struct ctl_sg_entry *sgl;
572 union ctl_ha_msg msg;
573 uint32_t sg_entries_sent;
574 int do_sg_copy, i, j;
575
576 lun = (struct ctl_lun *)io->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
577 memset(&msg.dt, 0, sizeof(msg.dt));
578 msg.hdr.msg_type = CTL_MSG_DATAMOVE;
579 msg.hdr.original_sc = io->io_hdr.original_sc;
580 msg.hdr.serializing_sc = io;
581 msg.hdr.nexus = io->io_hdr.nexus;
582 msg.hdr.status = io->io_hdr.status;
583 msg.dt.flags = io->io_hdr.flags;
584
585 /*
586 * We convert everything into a S/G list here. We can't
587 * pass by reference, only by value between controllers.
588 * So we can't pass a pointer to the S/G list, only as many
589 * S/G entries as we can fit in here. If it's possible for
590 * us to get more than CTL_HA_MAX_SG_ENTRIES S/G entries,
591 * then we need to break this up into multiple transfers.
592 */
593 if (io->scsiio.kern_sg_entries == 0) {
594 msg.dt.kern_sg_entries = 1;
595#if 0
596 if (io->io_hdr.flags & CTL_FLAG_BUS_ADDR) {
597 msg.dt.sg_list[0].addr = io->scsiio.kern_data_ptr;
598 } else {
599 /* XXX KDM use busdma here! */
600 msg.dt.sg_list[0].addr =
601 (void *)vtophys(io->scsiio.kern_data_ptr);
602 }
603#else
604 KASSERT((io->io_hdr.flags & CTL_FLAG_BUS_ADDR) == 0,
605 ("HA does not support BUS_ADDR"));
606 msg.dt.sg_list[0].addr = io->scsiio.kern_data_ptr;
607#endif
608 msg.dt.sg_list[0].len = io->scsiio.kern_data_len;
609 do_sg_copy = 0;
610 } else {
611 msg.dt.kern_sg_entries = io->scsiio.kern_sg_entries;
612 do_sg_copy = 1;
613 }
614
615 msg.dt.kern_data_len = io->scsiio.kern_data_len;
616 msg.dt.kern_total_len = io->scsiio.kern_total_len;
617 msg.dt.kern_data_resid = io->scsiio.kern_data_resid;
618 msg.dt.kern_rel_offset = io->scsiio.kern_rel_offset;
619 msg.dt.sg_sequence = 0;
620
621 /*
622 * Loop until we've sent all of the S/G entries. On the
623 * other end, we'll recompose these S/G entries into one
624 * contiguous list before processing.
625 */
626 for (sg_entries_sent = 0; sg_entries_sent < msg.dt.kern_sg_entries;
627 msg.dt.sg_sequence++) {
628 msg.dt.cur_sg_entries = MIN((sizeof(msg.dt.sg_list) /
629 sizeof(msg.dt.sg_list[0])),
630 msg.dt.kern_sg_entries - sg_entries_sent);
631 if (do_sg_copy != 0) {
632 sgl = (struct ctl_sg_entry *)io->scsiio.kern_data_ptr;
633 for (i = sg_entries_sent, j = 0;
634 i < msg.dt.cur_sg_entries; i++, j++) {
635#if 0
636 if (io->io_hdr.flags & CTL_FLAG_BUS_ADDR) {
637 msg.dt.sg_list[j].addr = sgl[i].addr;
638 } else {
639 /* XXX KDM use busdma here! */
640 msg.dt.sg_list[j].addr =
641 (void *)vtophys(sgl[i].addr);
642 }
643#else
644 KASSERT((io->io_hdr.flags &
645 CTL_FLAG_BUS_ADDR) == 0,
646 ("HA does not support BUS_ADDR"));
647 msg.dt.sg_list[j].addr = sgl[i].addr;
648#endif
649 msg.dt.sg_list[j].len = sgl[i].len;
650 }
651 }
652
653 sg_entries_sent += msg.dt.cur_sg_entries;
654 msg.dt.sg_last = (sg_entries_sent >= msg.dt.kern_sg_entries);
655 if (ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg,
656 sizeof(msg.dt) - sizeof(msg.dt.sg_list) +
657 sizeof(struct ctl_sg_entry) * msg.dt.cur_sg_entries,
658 M_WAITOK) > CTL_HA_STATUS_SUCCESS) {
659 io->io_hdr.port_status = 31341;
660 io->scsiio.be_move_done(io);
661 return;
662 }
663 msg.dt.sent_sg_entries = sg_entries_sent;
664 }
665
666 /*
667 * Officially handover the request from us to peer.
668 * If failover has just happened, then we must return error.
669 * If failover happen just after, then it is not our problem.
670 */
671 if (lun)
672 mtx_lock(&lun->lun_lock);
673 if (io->io_hdr.flags & CTL_FLAG_FAILOVER) {
674 if (lun)
675 mtx_unlock(&lun->lun_lock);
676 io->io_hdr.port_status = 31342;
677 io->scsiio.be_move_done(io);
678 return;
679 }
680 io->io_hdr.flags &= ~CTL_FLAG_IO_ACTIVE;
681 io->io_hdr.flags |= CTL_FLAG_DMA_INPROG;
682 if (lun)
683 mtx_unlock(&lun->lun_lock);
684}
685
686static void
687ctl_ha_done(union ctl_io *io)
688{
689 union ctl_ha_msg msg;
690
691 if (io->io_hdr.io_type == CTL_IO_SCSI) {
692 memset(&msg, 0, sizeof(msg));
693 msg.hdr.msg_type = CTL_MSG_FINISH_IO;
694 msg.hdr.original_sc = io->io_hdr.original_sc;
695 msg.hdr.nexus = io->io_hdr.nexus;
696 msg.hdr.status = io->io_hdr.status;
697 msg.scsi.scsi_status = io->scsiio.scsi_status;
698 msg.scsi.tag_num = io->scsiio.tag_num;
699 msg.scsi.tag_type = io->scsiio.tag_type;
700 msg.scsi.sense_len = io->scsiio.sense_len;
701 msg.scsi.sense_residual = io->scsiio.sense_residual;
702 msg.scsi.residual = io->scsiio.residual;
703 memcpy(&msg.scsi.sense_data, &io->scsiio.sense_data,
704 io->scsiio.sense_len);
705 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg,
706 sizeof(msg.scsi) - sizeof(msg.scsi.sense_data) +
707 msg.scsi.sense_len, M_WAITOK);
708 }
709 ctl_free_io(io);
710}
711
712static void
713ctl_isc_handler_finish_xfer(struct ctl_softc *ctl_softc,
714 union ctl_ha_msg *msg_info)
715{
716 struct ctl_scsiio *ctsio;
717
718 if (msg_info->hdr.original_sc == NULL) {
719 printf("%s: original_sc == NULL!\n", __func__);
720 /* XXX KDM now what? */
721 return;
722 }
723
724 ctsio = &msg_info->hdr.original_sc->scsiio;
725 ctsio->io_hdr.flags |= CTL_FLAG_IO_ACTIVE;
726 ctsio->io_hdr.msg_type = CTL_MSG_FINISH_IO;
727 ctsio->io_hdr.status = msg_info->hdr.status;
728 ctsio->scsi_status = msg_info->scsi.scsi_status;
729 ctsio->sense_len = msg_info->scsi.sense_len;
730 ctsio->sense_residual = msg_info->scsi.sense_residual;
731 ctsio->residual = msg_info->scsi.residual;
732 memcpy(&ctsio->sense_data, &msg_info->scsi.sense_data,
733 msg_info->scsi.sense_len);
734 ctl_enqueue_isc((union ctl_io *)ctsio);
735}
736
737static void
738ctl_isc_handler_finish_ser_only(struct ctl_softc *ctl_softc,
739 union ctl_ha_msg *msg_info)
740{
741 struct ctl_scsiio *ctsio;
742
743 if (msg_info->hdr.serializing_sc == NULL) {
744 printf("%s: serializing_sc == NULL!\n", __func__);
745 /* XXX KDM now what? */
746 return;
747 }
748
749 ctsio = &msg_info->hdr.serializing_sc->scsiio;
750 ctsio->io_hdr.msg_type = CTL_MSG_FINISH_IO;
751 ctl_enqueue_isc((union ctl_io *)ctsio);
752}
753
754void
755ctl_isc_announce_lun(struct ctl_lun *lun)
756{
757 struct ctl_softc *softc = lun->ctl_softc;
758 union ctl_ha_msg *msg;
759 struct ctl_ha_msg_lun_pr_key pr_key;
760 int i, k;
761
762 if (softc->ha_link != CTL_HA_LINK_ONLINE)
763 return;
764 mtx_lock(&lun->lun_lock);
765 i = sizeof(msg->lun);
766 if (lun->lun_devid)
767 i += lun->lun_devid->len;
768 i += sizeof(pr_key) * lun->pr_key_count;
769alloc:
770 mtx_unlock(&lun->lun_lock);
771 msg = malloc(i, M_CTL, M_WAITOK);
772 mtx_lock(&lun->lun_lock);
773 k = sizeof(msg->lun);
774 if (lun->lun_devid)
775 k += lun->lun_devid->len;
776 k += sizeof(pr_key) * lun->pr_key_count;
777 if (i < k) {
778 free(msg, M_CTL);
779 i = k;
780 goto alloc;
781 }
782 bzero(&msg->lun, sizeof(msg->lun));
783 msg->hdr.msg_type = CTL_MSG_LUN_SYNC;
784 msg->hdr.nexus.targ_lun = lun->lun;
785 msg->hdr.nexus.targ_mapped_lun = lun->lun;
786 msg->lun.flags = lun->flags;
787 msg->lun.pr_generation = lun->pr_generation;
788 msg->lun.pr_res_idx = lun->pr_res_idx;
789 msg->lun.pr_res_type = lun->pr_res_type;
790 msg->lun.pr_key_count = lun->pr_key_count;
791 i = 0;
792 if (lun->lun_devid) {
793 msg->lun.lun_devid_len = lun->lun_devid->len;
794 memcpy(&msg->lun.data[i], lun->lun_devid->data,
795 msg->lun.lun_devid_len);
796 i += msg->lun.lun_devid_len;
797 }
798 for (k = 0; k < CTL_MAX_INITIATORS; k++) {
799 if ((pr_key.pr_key = ctl_get_prkey(lun, k)) == 0)
800 continue;
801 pr_key.pr_iid = k;
802 memcpy(&msg->lun.data[i], &pr_key, sizeof(pr_key));
803 i += sizeof(pr_key);
804 }
805 mtx_unlock(&lun->lun_lock);
806 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg->port, sizeof(msg->port) + i,
807 M_WAITOK);
808 free(msg, M_CTL);
809
810 if (lun->flags & CTL_LUN_PRIMARY_SC) {
811 for (i = 0; i < CTL_NUM_MODE_PAGES; i++) {
812 ctl_isc_announce_mode(lun, -1,
813 lun->mode_pages.index[i].page_code & SMPH_PC_MASK,
814 lun->mode_pages.index[i].subpage);
815 }
816 }
817}
818
819void
820ctl_isc_announce_port(struct ctl_port *port)
821{
822 struct ctl_softc *softc = port->ctl_softc;
823 union ctl_ha_msg *msg;
824 int i;
825
826 if (port->targ_port < softc->port_min ||
827 port->targ_port >= softc->port_max ||
828 softc->ha_link != CTL_HA_LINK_ONLINE)
829 return;
830 i = sizeof(msg->port) + strlen(port->port_name) + 1;
831 if (port->lun_map)
832 i += port->lun_map_size * sizeof(uint32_t);
833 if (port->port_devid)
834 i += port->port_devid->len;
835 if (port->target_devid)
836 i += port->target_devid->len;
837 if (port->init_devid)
838 i += port->init_devid->len;
839 msg = malloc(i, M_CTL, M_WAITOK);
840 bzero(&msg->port, sizeof(msg->port));
841 msg->hdr.msg_type = CTL_MSG_PORT_SYNC;
842 msg->hdr.nexus.targ_port = port->targ_port;
843 msg->port.port_type = port->port_type;
844 msg->port.physical_port = port->physical_port;
845 msg->port.virtual_port = port->virtual_port;
846 msg->port.status = port->status;
847 i = 0;
848 msg->port.name_len = sprintf(&msg->port.data[i],
849 "%d:%s", softc->ha_id, port->port_name) + 1;
850 i += msg->port.name_len;
851 if (port->lun_map) {
852 msg->port.lun_map_len = port->lun_map_size * sizeof(uint32_t);
853 memcpy(&msg->port.data[i], port->lun_map,
854 msg->port.lun_map_len);
855 i += msg->port.lun_map_len;
856 }
857 if (port->port_devid) {
858 msg->port.port_devid_len = port->port_devid->len;
859 memcpy(&msg->port.data[i], port->port_devid->data,
860 msg->port.port_devid_len);
861 i += msg->port.port_devid_len;
862 }
863 if (port->target_devid) {
864 msg->port.target_devid_len = port->target_devid->len;
865 memcpy(&msg->port.data[i], port->target_devid->data,
866 msg->port.target_devid_len);
867 i += msg->port.target_devid_len;
868 }
869 if (port->init_devid) {
870 msg->port.init_devid_len = port->init_devid->len;
871 memcpy(&msg->port.data[i], port->init_devid->data,
872 msg->port.init_devid_len);
873 i += msg->port.init_devid_len;
874 }
875 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg->port, sizeof(msg->port) + i,
876 M_WAITOK);
877 free(msg, M_CTL);
878}
879
880void
881ctl_isc_announce_iid(struct ctl_port *port, int iid)
882{
883 struct ctl_softc *softc = port->ctl_softc;
884 union ctl_ha_msg *msg;
885 int i, l;
886
887 if (port->targ_port < softc->port_min ||
888 port->targ_port >= softc->port_max ||
889 softc->ha_link != CTL_HA_LINK_ONLINE)
890 return;
891 mtx_lock(&softc->ctl_lock);
892 i = sizeof(msg->iid);
893 l = 0;
894 if (port->wwpn_iid[iid].name)
895 l = strlen(port->wwpn_iid[iid].name) + 1;
896 i += l;
897 msg = malloc(i, M_CTL, M_NOWAIT);
898 if (msg == NULL) {
899 mtx_unlock(&softc->ctl_lock);
900 return;
901 }
902 bzero(&msg->iid, sizeof(msg->iid));
903 msg->hdr.msg_type = CTL_MSG_IID_SYNC;
904 msg->hdr.nexus.targ_port = port->targ_port;
905 msg->hdr.nexus.initid = iid;
906 msg->iid.in_use = port->wwpn_iid[iid].in_use;
907 msg->iid.name_len = l;
908 msg->iid.wwpn = port->wwpn_iid[iid].wwpn;
909 if (port->wwpn_iid[iid].name)
910 strlcpy(msg->iid.data, port->wwpn_iid[iid].name, l);
911 mtx_unlock(&softc->ctl_lock);
912 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg->iid, i, M_NOWAIT);
913 free(msg, M_CTL);
914}
915
916void
917ctl_isc_announce_mode(struct ctl_lun *lun, uint32_t initidx,
918 uint8_t page, uint8_t subpage)
919{
920 struct ctl_softc *softc = lun->ctl_softc;
921 union ctl_ha_msg msg;
922 u_int i;
923
924 if (softc->ha_link != CTL_HA_LINK_ONLINE)
925 return;
926 for (i = 0; i < CTL_NUM_MODE_PAGES; i++) {
927 if ((lun->mode_pages.index[i].page_code & SMPH_PC_MASK) ==
928 page && lun->mode_pages.index[i].subpage == subpage)
929 break;
930 }
931 if (i == CTL_NUM_MODE_PAGES)
932 return;
933
934 /* Don't try to replicate pages not present on this device. */
935 if (lun->mode_pages.index[i].page_data == NULL)
936 return;
937
938 bzero(&msg.mode, sizeof(msg.mode));
939 msg.hdr.msg_type = CTL_MSG_MODE_SYNC;
940 msg.hdr.nexus.targ_port = initidx / CTL_MAX_INIT_PER_PORT;
941 msg.hdr.nexus.initid = initidx % CTL_MAX_INIT_PER_PORT;
942 msg.hdr.nexus.targ_lun = lun->lun;
943 msg.hdr.nexus.targ_mapped_lun = lun->lun;
944 msg.mode.page_code = page;
945 msg.mode.subpage = subpage;
946 msg.mode.page_len = lun->mode_pages.index[i].page_len;
947 memcpy(msg.mode.data, lun->mode_pages.index[i].page_data,
948 msg.mode.page_len);
949 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg.mode, sizeof(msg.mode),
950 M_WAITOK);
951}
952
953static void
954ctl_isc_ha_link_up(struct ctl_softc *softc)
955{
956 struct ctl_port *port;
957 struct ctl_lun *lun;
958 union ctl_ha_msg msg;
959 int i;
960
961 /* Announce this node parameters to peer for validation. */
962 msg.login.msg_type = CTL_MSG_LOGIN;
963 msg.login.version = CTL_HA_VERSION;
964 msg.login.ha_mode = softc->ha_mode;
965 msg.login.ha_id = softc->ha_id;
966 msg.login.max_luns = CTL_MAX_LUNS;
967 msg.login.max_ports = CTL_MAX_PORTS;
968 msg.login.max_init_per_port = CTL_MAX_INIT_PER_PORT;
969 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg.login, sizeof(msg.login),
970 M_WAITOK);
971
972 STAILQ_FOREACH(port, &softc->port_list, links) {
973 ctl_isc_announce_port(port);
974 for (i = 0; i < CTL_MAX_INIT_PER_PORT; i++) {
975 if (port->wwpn_iid[i].in_use)
976 ctl_isc_announce_iid(port, i);
977 }
978 }
979 STAILQ_FOREACH(lun, &softc->lun_list, links)
980 ctl_isc_announce_lun(lun);
981}
982
983static void
984ctl_isc_ha_link_down(struct ctl_softc *softc)
985{
986 struct ctl_port *port;
987 struct ctl_lun *lun;
988 union ctl_io *io;
989 int i;
990
991 mtx_lock(&softc->ctl_lock);
992 STAILQ_FOREACH(lun, &softc->lun_list, links) {
993 mtx_lock(&lun->lun_lock);
994 if (lun->flags & CTL_LUN_PEER_SC_PRIMARY) {
995 lun->flags &= ~CTL_LUN_PEER_SC_PRIMARY;
996 ctl_est_ua_all(lun, -1, CTL_UA_ASYM_ACC_CHANGE);
997 }
998 mtx_unlock(&lun->lun_lock);
999
1000 mtx_unlock(&softc->ctl_lock);
1001 io = ctl_alloc_io(softc->othersc_pool);
1002 mtx_lock(&softc->ctl_lock);
1003 ctl_zero_io(io);
1004 io->io_hdr.msg_type = CTL_MSG_FAILOVER;
1005 io->io_hdr.nexus.targ_mapped_lun = lun->lun;
1006 ctl_enqueue_isc(io);
1007 }
1008
1009 STAILQ_FOREACH(port, &softc->port_list, links) {
1010 if (port->targ_port >= softc->port_min &&
1011 port->targ_port < softc->port_max)
1012 continue;
1013 port->status &= ~CTL_PORT_STATUS_ONLINE;
1014 for (i = 0; i < CTL_MAX_INIT_PER_PORT; i++) {
1015 port->wwpn_iid[i].in_use = 0;
1016 free(port->wwpn_iid[i].name, M_CTL);
1017 port->wwpn_iid[i].name = NULL;
1018 }
1019 }
1020 mtx_unlock(&softc->ctl_lock);
1021}
1022
1023static void
1024ctl_isc_ua(struct ctl_softc *softc, union ctl_ha_msg *msg, int len)
1025{
1026 struct ctl_lun *lun;
1027 uint32_t iid = ctl_get_initindex(&msg->hdr.nexus);
1028
1029 mtx_lock(&softc->ctl_lock);
1030 if (msg->hdr.nexus.targ_mapped_lun >= CTL_MAX_LUNS ||
1031 (lun = softc->ctl_luns[msg->hdr.nexus.targ_mapped_lun]) == NULL) {
1032 mtx_unlock(&softc->ctl_lock);
1033 return;
1034 }
1035 mtx_lock(&lun->lun_lock);
1036 mtx_unlock(&softc->ctl_lock);
1037 if (msg->ua.ua_type == CTL_UA_THIN_PROV_THRES && msg->ua.ua_set)
1038 memcpy(lun->ua_tpt_info, msg->ua.ua_info, 8);
1039 if (msg->ua.ua_all) {
1040 if (msg->ua.ua_set)
1041 ctl_est_ua_all(lun, iid, msg->ua.ua_type);
1042 else
1043 ctl_clr_ua_all(lun, iid, msg->ua.ua_type);
1044 } else {
1045 if (msg->ua.ua_set)
1046 ctl_est_ua(lun, iid, msg->ua.ua_type);
1047 else
1048 ctl_clr_ua(lun, iid, msg->ua.ua_type);
1049 }
1050 mtx_unlock(&lun->lun_lock);
1051}
1052
1053static void
1054ctl_isc_lun_sync(struct ctl_softc *softc, union ctl_ha_msg *msg, int len)
1055{
1056 struct ctl_lun *lun;
1057 struct ctl_ha_msg_lun_pr_key pr_key;
1058 int i, k;
1059 ctl_lun_flags oflags;
1060 uint32_t targ_lun;
1061
1062 targ_lun = msg->hdr.nexus.targ_mapped_lun;
1063 mtx_lock(&softc->ctl_lock);
1064 if (targ_lun >= CTL_MAX_LUNS ||
1065 (lun = softc->ctl_luns[targ_lun]) == NULL) {
1066 mtx_unlock(&softc->ctl_lock);
1067 return;
1068 }
1069 mtx_lock(&lun->lun_lock);
1070 mtx_unlock(&softc->ctl_lock);
1071 if (lun->flags & CTL_LUN_DISABLED) {
1072 mtx_unlock(&lun->lun_lock);
1073 return;
1074 }
1075 i = (lun->lun_devid != NULL) ? lun->lun_devid->len : 0;
1076 if (msg->lun.lun_devid_len != i || (i > 0 &&
1077 memcmp(&msg->lun.data[0], lun->lun_devid->data, i) != 0)) {
1078 mtx_unlock(&lun->lun_lock);
1079 printf("%s: Received conflicting HA LUN %d\n",
1080 __func__, targ_lun);
1081 return;
1082 } else {
1083 /* Record whether peer is primary. */
1084 oflags = lun->flags;
1085 if ((msg->lun.flags & CTL_LUN_PRIMARY_SC) &&
1086 (msg->lun.flags & CTL_LUN_DISABLED) == 0)
1087 lun->flags |= CTL_LUN_PEER_SC_PRIMARY;
1088 else
1089 lun->flags &= ~CTL_LUN_PEER_SC_PRIMARY;
1090 if (oflags != lun->flags)
1091 ctl_est_ua_all(lun, -1, CTL_UA_ASYM_ACC_CHANGE);
1092
1093 /* If peer is primary and we are not -- use data */
1094 if ((lun->flags & CTL_LUN_PRIMARY_SC) == 0 &&
1095 (lun->flags & CTL_LUN_PEER_SC_PRIMARY)) {
1096 lun->pr_generation = msg->lun.pr_generation;
1097 lun->pr_res_idx = msg->lun.pr_res_idx;
1098 lun->pr_res_type = msg->lun.pr_res_type;
1099 lun->pr_key_count = msg->lun.pr_key_count;
1100 for (k = 0; k < CTL_MAX_INITIATORS; k++)
1101 ctl_clr_prkey(lun, k);
1102 for (k = 0; k < msg->lun.pr_key_count; k++) {
1103 memcpy(&pr_key, &msg->lun.data[i],
1104 sizeof(pr_key));
1105 ctl_alloc_prkey(lun, pr_key.pr_iid);
1106 ctl_set_prkey(lun, pr_key.pr_iid,
1107 pr_key.pr_key);
1108 i += sizeof(pr_key);
1109 }
1110 }
1111
1112 mtx_unlock(&lun->lun_lock);
1113 CTL_DEBUG_PRINT(("%s: Known LUN %d, peer is %s\n",
1114 __func__, targ_lun,
1115 (msg->lun.flags & CTL_LUN_PRIMARY_SC) ?
1116 "primary" : "secondary"));
1117
1118 /* If we are primary but peer doesn't know -- notify */
1119 if ((lun->flags & CTL_LUN_PRIMARY_SC) &&
1120 (msg->lun.flags & CTL_LUN_PEER_SC_PRIMARY) == 0)
1121 ctl_isc_announce_lun(lun);
1122 }
1123}
1124
1125static void
1126ctl_isc_port_sync(struct ctl_softc *softc, union ctl_ha_msg *msg, int len)
1127{
1128 struct ctl_port *port;
1129 struct ctl_lun *lun;
1130 int i, new;
1131
1132 port = softc->ctl_ports[msg->hdr.nexus.targ_port];
1133 if (port == NULL) {
1134 CTL_DEBUG_PRINT(("%s: New port %d\n", __func__,
1135 msg->hdr.nexus.targ_port));
1136 new = 1;
1137 port = malloc(sizeof(*port), M_CTL, M_WAITOK | M_ZERO);
1138 port->frontend = &ha_frontend;
1139 port->targ_port = msg->hdr.nexus.targ_port;
1140 port->fe_datamove = ctl_ha_datamove;
1141 port->fe_done = ctl_ha_done;
1142 } else if (port->frontend == &ha_frontend) {
1143 CTL_DEBUG_PRINT(("%s: Updated port %d\n", __func__,
1144 msg->hdr.nexus.targ_port));
1145 new = 0;
1146 } else {
1147 printf("%s: Received conflicting HA port %d\n",
1148 __func__, msg->hdr.nexus.targ_port);
1149 return;
1150 }
1151 port->port_type = msg->port.port_type;
1152 port->physical_port = msg->port.physical_port;
1153 port->virtual_port = msg->port.virtual_port;
1154 port->status = msg->port.status;
1155 i = 0;
1156 free(port->port_name, M_CTL);
1157 port->port_name = strndup(&msg->port.data[i], msg->port.name_len,
1158 M_CTL);
1159 i += msg->port.name_len;
1160 if (msg->port.lun_map_len != 0) {
1161 if (port->lun_map == NULL ||
1162 port->lun_map_size * sizeof(uint32_t) <
1163 msg->port.lun_map_len) {
1164 port->lun_map_size = 0;
1165 free(port->lun_map, M_CTL);
1166 port->lun_map = malloc(msg->port.lun_map_len,
1167 M_CTL, M_WAITOK);
1168 }
1169 memcpy(port->lun_map, &msg->port.data[i], msg->port.lun_map_len);
1170 port->lun_map_size = msg->port.lun_map_len / sizeof(uint32_t);
1171 i += msg->port.lun_map_len;
1172 } else {
1173 port->lun_map_size = 0;
1174 free(port->lun_map, M_CTL);
1175 port->lun_map = NULL;
1176 }
1177 if (msg->port.port_devid_len != 0) {
1178 if (port->port_devid == NULL ||
1179 port->port_devid->len < msg->port.port_devid_len) {
1180 free(port->port_devid, M_CTL);
1181 port->port_devid = malloc(sizeof(struct ctl_devid) +
1182 msg->port.port_devid_len, M_CTL, M_WAITOK);
1183 }
1184 memcpy(port->port_devid->data, &msg->port.data[i],
1185 msg->port.port_devid_len);
1186 port->port_devid->len = msg->port.port_devid_len;
1187 i += msg->port.port_devid_len;
1188 } else {
1189 free(port->port_devid, M_CTL);
1190 port->port_devid = NULL;
1191 }
1192 if (msg->port.target_devid_len != 0) {
1193 if (port->target_devid == NULL ||
1194 port->target_devid->len < msg->port.target_devid_len) {
1195 free(port->target_devid, M_CTL);
1196 port->target_devid = malloc(sizeof(struct ctl_devid) +
1197 msg->port.target_devid_len, M_CTL, M_WAITOK);
1198 }
1199 memcpy(port->target_devid->data, &msg->port.data[i],
1200 msg->port.target_devid_len);
1201 port->target_devid->len = msg->port.target_devid_len;
1202 i += msg->port.target_devid_len;
1203 } else {
1204 free(port->target_devid, M_CTL);
1205 port->target_devid = NULL;
1206 }
1207 if (msg->port.init_devid_len != 0) {
1208 if (port->init_devid == NULL ||
1209 port->init_devid->len < msg->port.init_devid_len) {
1210 free(port->init_devid, M_CTL);
1211 port->init_devid = malloc(sizeof(struct ctl_devid) +
1212 msg->port.init_devid_len, M_CTL, M_WAITOK);
1213 }
1214 memcpy(port->init_devid->data, &msg->port.data[i],
1215 msg->port.init_devid_len);
1216 port->init_devid->len = msg->port.init_devid_len;
1217 i += msg->port.init_devid_len;
1218 } else {
1219 free(port->init_devid, M_CTL);
1220 port->init_devid = NULL;
1221 }
1222 if (new) {
1223 if (ctl_port_register(port) != 0) {
1224 printf("%s: ctl_port_register() failed with error\n",
1225 __func__);
1226 }
1227 }
1228 mtx_lock(&softc->ctl_lock);
1229 STAILQ_FOREACH(lun, &softc->lun_list, links) {
1230 if (ctl_lun_map_to_port(port, lun->lun) == UINT32_MAX)
1231 continue;
1232 mtx_lock(&lun->lun_lock);
1233 ctl_est_ua_all(lun, -1, CTL_UA_INQ_CHANGE);
1234 mtx_unlock(&lun->lun_lock);
1235 }
1236 mtx_unlock(&softc->ctl_lock);
1237}
1238
1239static void
1240ctl_isc_iid_sync(struct ctl_softc *softc, union ctl_ha_msg *msg, int len)
1241{
1242 struct ctl_port *port;
1243 int iid;
1244
1245 port = softc->ctl_ports[msg->hdr.nexus.targ_port];
1246 if (port == NULL) {
1247 printf("%s: Received IID for unknown port %d\n",
1248 __func__, msg->hdr.nexus.targ_port);
1249 return;
1250 }
1251 iid = msg->hdr.nexus.initid;
1252 port->wwpn_iid[iid].in_use = msg->iid.in_use;
1253 port->wwpn_iid[iid].wwpn = msg->iid.wwpn;
1254 free(port->wwpn_iid[iid].name, M_CTL);
1255 if (msg->iid.name_len) {
1256 port->wwpn_iid[iid].name = strndup(&msg->iid.data[0],
1257 msg->iid.name_len, M_CTL);
1258 } else
1259 port->wwpn_iid[iid].name = NULL;
1260}
1261
1262static void
1263ctl_isc_login(struct ctl_softc *softc, union ctl_ha_msg *msg, int len)
1264{
1265
1266 if (msg->login.version != CTL_HA_VERSION) {
1267 printf("CTL HA peers have different versions %d != %d\n",
1268 msg->login.version, CTL_HA_VERSION);
1269 ctl_ha_msg_abort(CTL_HA_CHAN_CTL);
1270 return;
1271 }
1272 if (msg->login.ha_mode != softc->ha_mode) {
1273 printf("CTL HA peers have different ha_mode %d != %d\n",
1274 msg->login.ha_mode, softc->ha_mode);
1275 ctl_ha_msg_abort(CTL_HA_CHAN_CTL);
1276 return;
1277 }
1278 if (msg->login.ha_id == softc->ha_id) {
1279 printf("CTL HA peers have same ha_id %d\n", msg->login.ha_id);
1280 ctl_ha_msg_abort(CTL_HA_CHAN_CTL);
1281 return;
1282 }
1283 if (msg->login.max_luns != CTL_MAX_LUNS ||
1284 msg->login.max_ports != CTL_MAX_PORTS ||
1285 msg->login.max_init_per_port != CTL_MAX_INIT_PER_PORT) {
1286 printf("CTL HA peers have different limits\n");
1287 ctl_ha_msg_abort(CTL_HA_CHAN_CTL);
1288 return;
1289 }
1290}
1291
1292static void
1293ctl_isc_mode_sync(struct ctl_softc *softc, union ctl_ha_msg *msg, int len)
1294{
1295 struct ctl_lun *lun;
1296 u_int i;
1297 uint32_t initidx, targ_lun;
1298
1299 targ_lun = msg->hdr.nexus.targ_mapped_lun;
1300 mtx_lock(&softc->ctl_lock);
1301 if (targ_lun >= CTL_MAX_LUNS ||
1302 (lun = softc->ctl_luns[targ_lun]) == NULL) {
1303 mtx_unlock(&softc->ctl_lock);
1304 return;
1305 }
1306 mtx_lock(&lun->lun_lock);
1307 mtx_unlock(&softc->ctl_lock);
1308 if (lun->flags & CTL_LUN_DISABLED) {
1309 mtx_unlock(&lun->lun_lock);
1310 return;
1311 }
1312 for (i = 0; i < CTL_NUM_MODE_PAGES; i++) {
1313 if ((lun->mode_pages.index[i].page_code & SMPH_PC_MASK) ==
1314 msg->mode.page_code &&
1315 lun->mode_pages.index[i].subpage == msg->mode.subpage)
1316 break;
1317 }
1318 if (i == CTL_NUM_MODE_PAGES) {
1319 mtx_unlock(&lun->lun_lock);
1320 return;
1321 }
1322 memcpy(lun->mode_pages.index[i].page_data, msg->mode.data,
1323 lun->mode_pages.index[i].page_len);
1324 initidx = ctl_get_initindex(&msg->hdr.nexus);
1325 if (initidx != -1)
1326 ctl_est_ua_all(lun, initidx, CTL_UA_MODE_CHANGE);
1327 mtx_unlock(&lun->lun_lock);
1328}
1329
1330/*
1331 * ISC (Inter Shelf Communication) event handler. Events from the HA
1332 * subsystem come in here.
1333 */
1334static void
1335ctl_isc_event_handler(ctl_ha_channel channel, ctl_ha_event event, int param)
1336{
1337 struct ctl_softc *softc = control_softc;
1338 union ctl_io *io;
1339 struct ctl_prio *presio;
1340 ctl_ha_status isc_status;
1341
1342 CTL_DEBUG_PRINT(("CTL: Isc Msg event %d\n", event));
1343 if (event == CTL_HA_EVT_MSG_RECV) {
1344 union ctl_ha_msg *msg, msgbuf;
1345
1346 if (param > sizeof(msgbuf))
1347 msg = malloc(param, M_CTL, M_WAITOK);
1348 else
1349 msg = &msgbuf;
1350 isc_status = ctl_ha_msg_recv(CTL_HA_CHAN_CTL, msg, param,
1351 M_WAITOK);
1352 if (isc_status != CTL_HA_STATUS_SUCCESS) {
1353 printf("%s: Error receiving message: %d\n",
1354 __func__, isc_status);
1355 if (msg != &msgbuf)
1356 free(msg, M_CTL);
1357 return;
1358 }
1359
1360 CTL_DEBUG_PRINT(("CTL: msg_type %d\n", msg->msg_type));
1361 switch (msg->hdr.msg_type) {
1362 case CTL_MSG_SERIALIZE:
1363 io = ctl_alloc_io(softc->othersc_pool);
1364 ctl_zero_io(io);
1365 // populate ctsio from msg
1366 io->io_hdr.io_type = CTL_IO_SCSI;
1367 io->io_hdr.msg_type = CTL_MSG_SERIALIZE;
1368 io->io_hdr.original_sc = msg->hdr.original_sc;
1369 io->io_hdr.flags |= CTL_FLAG_FROM_OTHER_SC |
1370 CTL_FLAG_IO_ACTIVE;
1371 /*
1372 * If we're in serialization-only mode, we don't
1373 * want to go through full done processing. Thus
1374 * the COPY flag.
1375 *
1376 * XXX KDM add another flag that is more specific.
1377 */
1378 if (softc->ha_mode != CTL_HA_MODE_XFER)
1379 io->io_hdr.flags |= CTL_FLAG_INT_COPY;
1380 io->io_hdr.nexus = msg->hdr.nexus;
1381#if 0
1382 printf("port %u, iid %u, lun %u\n",
1383 io->io_hdr.nexus.targ_port,
1384 io->io_hdr.nexus.initid,
1385 io->io_hdr.nexus.targ_lun);
1386#endif
1387 io->scsiio.tag_num = msg->scsi.tag_num;
1388 io->scsiio.tag_type = msg->scsi.tag_type;
1389#ifdef CTL_TIME_IO
1390 io->io_hdr.start_time = time_uptime;
1391 getbinuptime(&io->io_hdr.start_bt);
1392#endif /* CTL_TIME_IO */
1393 io->scsiio.cdb_len = msg->scsi.cdb_len;
1394 memcpy(io->scsiio.cdb, msg->scsi.cdb,
1395 CTL_MAX_CDBLEN);
1396 if (softc->ha_mode == CTL_HA_MODE_XFER) {
1397 const struct ctl_cmd_entry *entry;
1398
1399 entry = ctl_get_cmd_entry(&io->scsiio, NULL);
1400 io->io_hdr.flags &= ~CTL_FLAG_DATA_MASK;
1401 io->io_hdr.flags |=
1402 entry->flags & CTL_FLAG_DATA_MASK;
1403 }
1404 ctl_enqueue_isc(io);
1405 break;
1406
1407 /* Performed on the Originating SC, XFER mode only */
1408 case CTL_MSG_DATAMOVE: {
1409 struct ctl_sg_entry *sgl;
1410 int i, j;
1411
1412 io = msg->hdr.original_sc;
1413 if (io == NULL) {
1414 printf("%s: original_sc == NULL!\n", __func__);
1415 /* XXX KDM do something here */
1416 break;
1417 }
1418 io->io_hdr.msg_type = CTL_MSG_DATAMOVE;
1419 io->io_hdr.flags |= CTL_FLAG_IO_ACTIVE;
1420 /*
1421 * Keep track of this, we need to send it back over
1422 * when the datamove is complete.
1423 */
1424 io->io_hdr.serializing_sc = msg->hdr.serializing_sc;
1425 if (msg->hdr.status == CTL_SUCCESS)
1426 io->io_hdr.status = msg->hdr.status;
1427
1428 if (msg->dt.sg_sequence == 0) {
1429#ifdef CTL_TIME_IO
1430 getbinuptime(&io->io_hdr.dma_start_bt);
1431#endif
1432 i = msg->dt.kern_sg_entries +
1433 msg->dt.kern_data_len /
1434 CTL_HA_DATAMOVE_SEGMENT + 1;
1435 sgl = malloc(sizeof(*sgl) * i, M_CTL,
1436 M_WAITOK | M_ZERO);
1437 io->io_hdr.remote_sglist = sgl;
1438 io->io_hdr.local_sglist =
1439 &sgl[msg->dt.kern_sg_entries];
1440
1441 io->scsiio.kern_data_ptr = (uint8_t *)sgl;
1442
1443 io->scsiio.kern_sg_entries =
1444 msg->dt.kern_sg_entries;
1445 io->scsiio.rem_sg_entries =
1446 msg->dt.kern_sg_entries;
1447 io->scsiio.kern_data_len =
1448 msg->dt.kern_data_len;
1449 io->scsiio.kern_total_len =
1450 msg->dt.kern_total_len;
1451 io->scsiio.kern_data_resid =
1452 msg->dt.kern_data_resid;
1453 io->scsiio.kern_rel_offset =
1454 msg->dt.kern_rel_offset;
1455 io->io_hdr.flags &= ~CTL_FLAG_BUS_ADDR;
1456 io->io_hdr.flags |= msg->dt.flags &
1457 CTL_FLAG_BUS_ADDR;
1458 } else
1459 sgl = (struct ctl_sg_entry *)
1460 io->scsiio.kern_data_ptr;
1461
1462 for (i = msg->dt.sent_sg_entries, j = 0;
1463 i < (msg->dt.sent_sg_entries +
1464 msg->dt.cur_sg_entries); i++, j++) {
1465 sgl[i].addr = msg->dt.sg_list[j].addr;
1466 sgl[i].len = msg->dt.sg_list[j].len;
1467
1468#if 0
1469 printf("%s: DATAMOVE: %p,%lu j=%d, i=%d\n",
1470 __func__, sgl[i].addr, sgl[i].len, j, i);
1471#endif
1472 }
1473
1474 /*
1475 * If this is the last piece of the I/O, we've got
1476 * the full S/G list. Queue processing in the thread.
1477 * Otherwise wait for the next piece.
1478 */
1479 if (msg->dt.sg_last != 0)
1480 ctl_enqueue_isc(io);
1481 break;
1482 }
1483 /* Performed on the Serializing (primary) SC, XFER mode only */
1484 case CTL_MSG_DATAMOVE_DONE: {
1485 if (msg->hdr.serializing_sc == NULL) {
1486 printf("%s: serializing_sc == NULL!\n",
1487 __func__);
1488 /* XXX KDM now what? */
1489 break;
1490 }
1491 /*
1492 * We grab the sense information here in case
1493 * there was a failure, so we can return status
1494 * back to the initiator.
1495 */
1496 io = msg->hdr.serializing_sc;
1497 io->io_hdr.msg_type = CTL_MSG_DATAMOVE_DONE;
1498 io->io_hdr.flags &= ~CTL_FLAG_DMA_INPROG;
1499 io->io_hdr.flags |= CTL_FLAG_IO_ACTIVE;
1500 io->io_hdr.port_status = msg->scsi.fetd_status;
1501 io->scsiio.residual = msg->scsi.residual;
1502 if (msg->hdr.status != CTL_STATUS_NONE) {
1503 io->io_hdr.status = msg->hdr.status;
1504 io->scsiio.scsi_status = msg->scsi.scsi_status;
1505 io->scsiio.sense_len = msg->scsi.sense_len;
1506 io->scsiio.sense_residual =msg->scsi.sense_residual;
1507 memcpy(&io->scsiio.sense_data,
1508 &msg->scsi.sense_data,
1509 msg->scsi.sense_len);
1510 if (msg->hdr.status == CTL_SUCCESS)
1511 io->io_hdr.flags |= CTL_FLAG_STATUS_SENT;
1512 }
1513 ctl_enqueue_isc(io);
1514 break;
1515 }
1516
1517 /* Preformed on Originating SC, SER_ONLY mode */
1518 case CTL_MSG_R2R:
1519 io = msg->hdr.original_sc;
1520 if (io == NULL) {
1521 printf("%s: original_sc == NULL!\n",
1522 __func__);
1523 break;
1524 }
1525 io->io_hdr.flags |= CTL_FLAG_IO_ACTIVE;
1526 io->io_hdr.msg_type = CTL_MSG_R2R;
1527 io->io_hdr.serializing_sc = msg->hdr.serializing_sc;
1528 ctl_enqueue_isc(io);
1529 break;
1530
1531 /*
1532 * Performed on Serializing(i.e. primary SC) SC in SER_ONLY
1533 * mode.
1534 * Performed on the Originating (i.e. secondary) SC in XFER
1535 * mode
1536 */
1537 case CTL_MSG_FINISH_IO:
1538 if (softc->ha_mode == CTL_HA_MODE_XFER)
1539 ctl_isc_handler_finish_xfer(softc, msg);
1540 else
1541 ctl_isc_handler_finish_ser_only(softc, msg);
1542 break;
1543
1544 /* Preformed on Originating SC */
1545 case CTL_MSG_BAD_JUJU:
1546 io = msg->hdr.original_sc;
1547 if (io == NULL) {
1548 printf("%s: Bad JUJU!, original_sc is NULL!\n",
1549 __func__);
1550 break;
1551 }
1552 ctl_copy_sense_data(msg, io);
1553 /*
1554 * IO should have already been cleaned up on other
1555 * SC so clear this flag so we won't send a message
1556 * back to finish the IO there.
1557 */
1558 io->io_hdr.flags &= ~CTL_FLAG_SENT_2OTHER_SC;
1559 io->io_hdr.flags |= CTL_FLAG_IO_ACTIVE;
1560
1561 /* io = msg->hdr.serializing_sc; */
1562 io->io_hdr.msg_type = CTL_MSG_BAD_JUJU;
1563 ctl_enqueue_isc(io);
1564 break;
1565
1566 /* Handle resets sent from the other side */
1567 case CTL_MSG_MANAGE_TASKS: {
1568 struct ctl_taskio *taskio;
1569 taskio = (struct ctl_taskio *)ctl_alloc_io(
1570 softc->othersc_pool);
1571 ctl_zero_io((union ctl_io *)taskio);
1572 taskio->io_hdr.io_type = CTL_IO_TASK;
1573 taskio->io_hdr.flags |= CTL_FLAG_FROM_OTHER_SC;
1574 taskio->io_hdr.nexus = msg->hdr.nexus;
1575 taskio->task_action = msg->task.task_action;
1576 taskio->tag_num = msg->task.tag_num;
1577 taskio->tag_type = msg->task.tag_type;
1578#ifdef CTL_TIME_IO
1579 taskio->io_hdr.start_time = time_uptime;
1580 getbinuptime(&taskio->io_hdr.start_bt);
1581#endif /* CTL_TIME_IO */
1582 ctl_run_task((union ctl_io *)taskio);
1583 break;
1584 }
1585 /* Persistent Reserve action which needs attention */
1586 case CTL_MSG_PERS_ACTION:
1587 presio = (struct ctl_prio *)ctl_alloc_io(
1588 softc->othersc_pool);
1589 ctl_zero_io((union ctl_io *)presio);
1590 presio->io_hdr.msg_type = CTL_MSG_PERS_ACTION;
1591 presio->io_hdr.flags |= CTL_FLAG_FROM_OTHER_SC;
1592 presio->io_hdr.nexus = msg->hdr.nexus;
1593 presio->pr_msg = msg->pr;
1594 ctl_enqueue_isc((union ctl_io *)presio);
1595 break;
1596 case CTL_MSG_UA:
1597 ctl_isc_ua(softc, msg, param);
1598 break;
1599 case CTL_MSG_PORT_SYNC:
1600 ctl_isc_port_sync(softc, msg, param);
1601 break;
1602 case CTL_MSG_LUN_SYNC:
1603 ctl_isc_lun_sync(softc, msg, param);
1604 break;
1605 case CTL_MSG_IID_SYNC:
1606 ctl_isc_iid_sync(softc, msg, param);
1607 break;
1608 case CTL_MSG_LOGIN:
1609 ctl_isc_login(softc, msg, param);
1610 break;
1611 case CTL_MSG_MODE_SYNC:
1612 ctl_isc_mode_sync(softc, msg, param);
1613 break;
1614 default:
1615 printf("Received HA message of unknown type %d\n",
1616 msg->hdr.msg_type);
1617 ctl_ha_msg_abort(CTL_HA_CHAN_CTL);
1618 break;
1619 }
1620 if (msg != &msgbuf)
1621 free(msg, M_CTL);
1622 } else if (event == CTL_HA_EVT_LINK_CHANGE) {
1623 printf("CTL: HA link status changed from %d to %d\n",
1624 softc->ha_link, param);
1625 if (param == softc->ha_link)
1626 return;
1627 if (softc->ha_link == CTL_HA_LINK_ONLINE) {
1628 softc->ha_link = param;
1629 ctl_isc_ha_link_down(softc);
1630 } else {
1631 softc->ha_link = param;
1632 if (softc->ha_link == CTL_HA_LINK_ONLINE)
1633 ctl_isc_ha_link_up(softc);
1634 }
1635 return;
1636 } else {
1637 printf("ctl_isc_event_handler: Unknown event %d\n", event);
1638 return;
1639 }
1640}
1641
1642static void
1643ctl_copy_sense_data(union ctl_ha_msg *src, union ctl_io *dest)
1644{
1645
1646 memcpy(&dest->scsiio.sense_data, &src->scsi.sense_data,
1647 src->scsi.sense_len);
1648 dest->scsiio.scsi_status = src->scsi.scsi_status;
1649 dest->scsiio.sense_len = src->scsi.sense_len;
1650 dest->io_hdr.status = src->hdr.status;
1651}
1652
1653static void
1654ctl_copy_sense_data_back(union ctl_io *src, union ctl_ha_msg *dest)
1655{
1656
1657 memcpy(&dest->scsi.sense_data, &src->scsiio.sense_data,
1658 src->scsiio.sense_len);
1659 dest->scsi.scsi_status = src->scsiio.scsi_status;
1660 dest->scsi.sense_len = src->scsiio.sense_len;
1661 dest->hdr.status = src->io_hdr.status;
1662}
1663
1664void
1665ctl_est_ua(struct ctl_lun *lun, uint32_t initidx, ctl_ua_type ua)
1666{
1667 struct ctl_softc *softc = lun->ctl_softc;
1668 ctl_ua_type *pu;
1669
1670 if (initidx < softc->init_min || initidx >= softc->init_max)
1671 return;
1672 mtx_assert(&lun->lun_lock, MA_OWNED);
1673 pu = lun->pending_ua[initidx / CTL_MAX_INIT_PER_PORT];
1674 if (pu == NULL)
1675 return;
1676 pu[initidx % CTL_MAX_INIT_PER_PORT] |= ua;
1677}
1678
1679void
1680ctl_est_ua_port(struct ctl_lun *lun, int port, uint32_t except, ctl_ua_type ua)
1681{
1682 int i;
1683
1684 mtx_assert(&lun->lun_lock, MA_OWNED);
1685 if (lun->pending_ua[port] == NULL)
1686 return;
1687 for (i = 0; i < CTL_MAX_INIT_PER_PORT; i++) {
1688 if (port * CTL_MAX_INIT_PER_PORT + i == except)
1689 continue;
1690 lun->pending_ua[port][i] |= ua;
1691 }
1692}
1693
1694void
1695ctl_est_ua_all(struct ctl_lun *lun, uint32_t except, ctl_ua_type ua)
1696{
1697 struct ctl_softc *softc = lun->ctl_softc;
1698 int i;
1699
1700 mtx_assert(&lun->lun_lock, MA_OWNED);
1701 for (i = softc->port_min; i < softc->port_max; i++)
1702 ctl_est_ua_port(lun, i, except, ua);
1703}
1704
1705void
1706ctl_clr_ua(struct ctl_lun *lun, uint32_t initidx, ctl_ua_type ua)
1707{
1708 struct ctl_softc *softc = lun->ctl_softc;
1709 ctl_ua_type *pu;
1710
1711 if (initidx < softc->init_min || initidx >= softc->init_max)
1712 return;
1713 mtx_assert(&lun->lun_lock, MA_OWNED);
1714 pu = lun->pending_ua[initidx / CTL_MAX_INIT_PER_PORT];
1715 if (pu == NULL)
1716 return;
1717 pu[initidx % CTL_MAX_INIT_PER_PORT] &= ~ua;
1718}
1719
1720void
1721ctl_clr_ua_all(struct ctl_lun *lun, uint32_t except, ctl_ua_type ua)
1722{
1723 struct ctl_softc *softc = lun->ctl_softc;
1724 int i, j;
1725
1726 mtx_assert(&lun->lun_lock, MA_OWNED);
1727 for (i = softc->port_min; i < softc->port_max; i++) {
1728 if (lun->pending_ua[i] == NULL)
1729 continue;
1730 for (j = 0; j < CTL_MAX_INIT_PER_PORT; j++) {
1731 if (i * CTL_MAX_INIT_PER_PORT + j == except)
1732 continue;
1733 lun->pending_ua[i][j] &= ~ua;
1734 }
1735 }
1736}
1737
1738void
1739ctl_clr_ua_allluns(struct ctl_softc *ctl_softc, uint32_t initidx,
1740 ctl_ua_type ua_type)
1741{
1742 struct ctl_lun *lun;
1743
1744 mtx_assert(&ctl_softc->ctl_lock, MA_OWNED);
1745 STAILQ_FOREACH(lun, &ctl_softc->lun_list, links) {
1746 mtx_lock(&lun->lun_lock);
1747 ctl_clr_ua(lun, initidx, ua_type);
1748 mtx_unlock(&lun->lun_lock);
1749 }
1750}
1751
1752static int
1753ctl_ha_role_sysctl(SYSCTL_HANDLER_ARGS)
1754{
1755 struct ctl_softc *softc = (struct ctl_softc *)arg1;
1756 struct ctl_lun *lun;
1757 struct ctl_lun_req ireq;
1758 int error, value;
1759
1760 value = (softc->flags & CTL_FLAG_ACTIVE_SHELF) ? 0 : 1;
1761 error = sysctl_handle_int(oidp, &value, 0, req);
1762 if ((error != 0) || (req->newptr == NULL))
1763 return (error);
1764
1765 mtx_lock(&softc->ctl_lock);
1766 if (value == 0)
1767 softc->flags |= CTL_FLAG_ACTIVE_SHELF;
1768 else
1769 softc->flags &= ~CTL_FLAG_ACTIVE_SHELF;
1770 STAILQ_FOREACH(lun, &softc->lun_list, links) {
1771 mtx_unlock(&softc->ctl_lock);
1772 bzero(&ireq, sizeof(ireq));
1773 ireq.reqtype = CTL_LUNREQ_MODIFY;
1774 ireq.reqdata.modify.lun_id = lun->lun;
1775 lun->backend->ioctl(NULL, CTL_LUN_REQ, (caddr_t)&ireq, 0,
1776 curthread);
1777 if (ireq.status != CTL_LUN_OK) {
1778 printf("%s: CTL_LUNREQ_MODIFY returned %d '%s'\n",
1779 __func__, ireq.status, ireq.error_str);
1780 }
1781 mtx_lock(&softc->ctl_lock);
1782 }
1783 mtx_unlock(&softc->ctl_lock);
1784 return (0);
1785}
1786
1787static int
1788ctl_init(void)
1789{
1790 struct make_dev_args args;
1791 struct ctl_softc *softc;
1792 void *other_pool;
1793 int i, error;
1794
1795 softc = control_softc = malloc(sizeof(*control_softc), M_DEVBUF,
1796 M_WAITOK | M_ZERO);
1797
1798 make_dev_args_init(&args);
1799 args.mda_devsw = &ctl_cdevsw;
1800 args.mda_uid = UID_ROOT;
1801 args.mda_gid = GID_OPERATOR;
1802 args.mda_mode = 0600;
1803 args.mda_si_drv1 = softc;
1804 error = make_dev_s(&args, &softc->dev, "cam/ctl");
1805 if (error != 0) {
1806 free(control_softc, M_DEVBUF);
1807 return (error);
1808 }
1809
1810 sysctl_ctx_init(&softc->sysctl_ctx);
1811 softc->sysctl_tree = SYSCTL_ADD_NODE(&softc->sysctl_ctx,
1812 SYSCTL_STATIC_CHILDREN(_kern_cam), OID_AUTO, "ctl",
1813 CTLFLAG_RD, 0, "CAM Target Layer");
1814
1815 if (softc->sysctl_tree == NULL) {
1816 printf("%s: unable to allocate sysctl tree\n", __func__);
1817 destroy_dev(softc->dev);
1818 free(control_softc, M_DEVBUF);
1819 control_softc = NULL;
1820 return (ENOMEM);
1821 }
1822
1823 mtx_init(&softc->ctl_lock, "CTL mutex", NULL, MTX_DEF);
1824 softc->io_zone = uma_zcreate("CTL IO", sizeof(union ctl_io),
1825 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0);
1826 softc->flags = 0;
1827
1828 SYSCTL_ADD_INT(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree),
1829 OID_AUTO, "ha_mode", CTLFLAG_RDTUN, (int *)&softc->ha_mode, 0,
1830 "HA mode (0 - act/stby, 1 - serialize only, 2 - xfer)");
1831
1832 /*
1833 * In Copan's HA scheme, the "master" and "slave" roles are
1834 * figured out through the slot the controller is in. Although it
1835 * is an active/active system, someone has to be in charge.
1836 */
1837 SYSCTL_ADD_INT(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree),
1838 OID_AUTO, "ha_id", CTLFLAG_RDTUN, &softc->ha_id, 0,
1839 "HA head ID (0 - no HA)");
1840 if (softc->ha_id == 0 || softc->ha_id > NUM_HA_SHELVES) {
1841 softc->flags |= CTL_FLAG_ACTIVE_SHELF;
1842 softc->is_single = 1;
1843 softc->port_cnt = CTL_MAX_PORTS;
1844 softc->port_min = 0;
1845 } else {
1846 softc->port_cnt = CTL_MAX_PORTS / NUM_HA_SHELVES;
1847 softc->port_min = (softc->ha_id - 1) * softc->port_cnt;
1848 }
1849 softc->port_max = softc->port_min + softc->port_cnt;
1850 softc->init_min = softc->port_min * CTL_MAX_INIT_PER_PORT;
1851 softc->init_max = softc->port_max * CTL_MAX_INIT_PER_PORT;
1852
1853 SYSCTL_ADD_INT(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree),
1854 OID_AUTO, "ha_link", CTLFLAG_RD, (int *)&softc->ha_link, 0,
1855 "HA link state (0 - offline, 1 - unknown, 2 - online)");
1856
1857 STAILQ_INIT(&softc->lun_list);
1858 STAILQ_INIT(&softc->pending_lun_queue);
1859 STAILQ_INIT(&softc->fe_list);
1860 STAILQ_INIT(&softc->port_list);
1861 STAILQ_INIT(&softc->be_list);
1862 ctl_tpc_init(softc);
1863
1864 if (ctl_pool_create(softc, "othersc", CTL_POOL_ENTRIES_OTHER_SC,
1865 &other_pool) != 0)
1866 {
1867 printf("ctl: can't allocate %d entry other SC pool, "
1868 "exiting\n", CTL_POOL_ENTRIES_OTHER_SC);
1869 return (ENOMEM);
1870 }
1871 softc->othersc_pool = other_pool;
1872
1873 if (worker_threads <= 0)
1874 worker_threads = max(1, mp_ncpus / 4);
1875 if (worker_threads > CTL_MAX_THREADS)
1876 worker_threads = CTL_MAX_THREADS;
1877
1878 for (i = 0; i < worker_threads; i++) {
1879 struct ctl_thread *thr = &softc->threads[i];
1880
1881 mtx_init(&thr->queue_lock, "CTL queue mutex", NULL, MTX_DEF);
1882 thr->ctl_softc = softc;
1883 STAILQ_INIT(&thr->incoming_queue);
1884 STAILQ_INIT(&thr->rtr_queue);
1885 STAILQ_INIT(&thr->done_queue);
1886 STAILQ_INIT(&thr->isc_queue);
1887
1888 error = kproc_kthread_add(ctl_work_thread, thr,
1889 &softc->ctl_proc, &thr->thread, 0, 0, "ctl", "work%d", i);
1890 if (error != 0) {
1891 printf("error creating CTL work thread!\n");
1892 ctl_pool_free(other_pool);
1893 return (error);
1894 }
1895 }
1896 error = kproc_kthread_add(ctl_lun_thread, softc,
1897 &softc->ctl_proc, NULL, 0, 0, "ctl", "lun");
1898 if (error != 0) {
1899 printf("error creating CTL lun thread!\n");
1900 ctl_pool_free(other_pool);
1901 return (error);
1902 }
1903 error = kproc_kthread_add(ctl_thresh_thread, softc,
1904 &softc->ctl_proc, NULL, 0, 0, "ctl", "thresh");
1905 if (error != 0) {
1906 printf("error creating CTL threshold thread!\n");
1907 ctl_pool_free(other_pool);
1908 return (error);
1909 }
1910
1911 SYSCTL_ADD_PROC(&softc->sysctl_ctx,SYSCTL_CHILDREN(softc->sysctl_tree),
1912 OID_AUTO, "ha_role", CTLTYPE_INT | CTLFLAG_RWTUN,
1913 softc, 0, ctl_ha_role_sysctl, "I", "HA role for this head");
1914
1915 if (softc->is_single == 0) {
1916 ctl_frontend_register(&ha_frontend);
1917 if (ctl_ha_msg_init(softc) != CTL_HA_STATUS_SUCCESS) {
1918 printf("ctl_init: ctl_ha_msg_init failed.\n");
1919 softc->is_single = 1;
1920 } else
1921 if (ctl_ha_msg_register(CTL_HA_CHAN_CTL, ctl_isc_event_handler)
1922 != CTL_HA_STATUS_SUCCESS) {
1923 printf("ctl_init: ctl_ha_msg_register failed.\n");
1924 softc->is_single = 1;
1925 }
1926 }
1927 return (0);
1928}
1929
1930void
1931ctl_shutdown(void)
1932{
1933 struct ctl_softc *softc = control_softc;
1934 struct ctl_lun *lun, *next_lun;
1935
1936 if (softc->is_single == 0) {
1937 ctl_ha_msg_shutdown(softc);
1938 if (ctl_ha_msg_deregister(CTL_HA_CHAN_CTL)
1939 != CTL_HA_STATUS_SUCCESS)
1940 printf("%s: ctl_ha_msg_deregister failed.\n", __func__);
1941 if (ctl_ha_msg_destroy(softc) != CTL_HA_STATUS_SUCCESS)
1942 printf("%s: ctl_ha_msg_destroy failed.\n", __func__);
1943 ctl_frontend_deregister(&ha_frontend);
1944 }
1945
1946 mtx_lock(&softc->ctl_lock);
1947
1948 STAILQ_FOREACH_SAFE(lun, &softc->lun_list, links, next_lun)
1949 ctl_free_lun(lun);
1950
1951 mtx_unlock(&softc->ctl_lock);
1952
1953#if 0
1954 ctl_shutdown_thread(softc->work_thread);
1955 mtx_destroy(&softc->queue_lock);
1956#endif
1957
1958 ctl_tpc_shutdown(softc);
1959 uma_zdestroy(softc->io_zone);
1960 mtx_destroy(&softc->ctl_lock);
1961
1962 destroy_dev(softc->dev);
1963
1964 sysctl_ctx_free(&softc->sysctl_ctx);
1965
1966 free(control_softc, M_DEVBUF);
1967 control_softc = NULL;
1968}
1969
1970static int
1971ctl_module_event_handler(module_t mod, int what, void *arg)
1972{
1973
1974 switch (what) {
1975 case MOD_LOAD:
1976 return (ctl_init());
1977 case MOD_UNLOAD:
1978 return (EBUSY);
1979 default:
1980 return (EOPNOTSUPP);
1981 }
1982}
1983
1984/*
1985 * XXX KDM should we do some access checks here? Bump a reference count to
1986 * prevent a CTL module from being unloaded while someone has it open?
1987 */
1988static int
1989ctl_open(struct cdev *dev, int flags, int fmt, struct thread *td)
1990{
1991 return (0);
1992}
1993
1994static int
1995ctl_close(struct cdev *dev, int flags, int fmt, struct thread *td)
1996{
1997 return (0);
1998}
1999
2000/*
2001 * Remove an initiator by port number and initiator ID.
2002 * Returns 0 for success, -1 for failure.
2003 */
2004int
2005ctl_remove_initiator(struct ctl_port *port, int iid)
2006{
2007 struct ctl_softc *softc = port->ctl_softc;
2008
2009 mtx_assert(&softc->ctl_lock, MA_NOTOWNED);
2010
2011 if (iid > CTL_MAX_INIT_PER_PORT) {
2012 printf("%s: initiator ID %u > maximun %u!\n",
2013 __func__, iid, CTL_MAX_INIT_PER_PORT);
2014 return (-1);
2015 }
2016
2017 mtx_lock(&softc->ctl_lock);
2018 port->wwpn_iid[iid].in_use--;
2019 port->wwpn_iid[iid].last_use = time_uptime;
2020 mtx_unlock(&softc->ctl_lock);
2021 ctl_isc_announce_iid(port, iid);
2022
2023 return (0);
2024}
2025
2026/*
2027 * Add an initiator to the initiator map.
2028 * Returns iid for success, < 0 for failure.
2029 */
2030int
2031ctl_add_initiator(struct ctl_port *port, int iid, uint64_t wwpn, char *name)
2032{
2033 struct ctl_softc *softc = port->ctl_softc;
2034 time_t best_time;
2035 int i, best;
2036
2037 mtx_assert(&softc->ctl_lock, MA_NOTOWNED);
2038
2039 if (iid >= CTL_MAX_INIT_PER_PORT) {
2040 printf("%s: WWPN %#jx initiator ID %u > maximum %u!\n",
2041 __func__, wwpn, iid, CTL_MAX_INIT_PER_PORT);
2042 free(name, M_CTL);
2043 return (-1);
2044 }
2045
2046 mtx_lock(&softc->ctl_lock);
2047
2048 if (iid < 0 && (wwpn != 0 || name != NULL)) {
2049 for (i = 0; i < CTL_MAX_INIT_PER_PORT; i++) {
2050 if (wwpn != 0 && wwpn == port->wwpn_iid[i].wwpn) {
2051 iid = i;
2052 break;
2053 }
2054 if (name != NULL && port->wwpn_iid[i].name != NULL &&
2055 strcmp(name, port->wwpn_iid[i].name) == 0) {
2056 iid = i;
2057 break;
2058 }
2059 }
2060 }
2061
2062 if (iid < 0) {
2063 for (i = 0; i < CTL_MAX_INIT_PER_PORT; i++) {
2064 if (port->wwpn_iid[i].in_use == 0 &&
2065 port->wwpn_iid[i].wwpn == 0 &&
2066 port->wwpn_iid[i].name == NULL) {
2067 iid = i;
2068 break;
2069 }
2070 }
2071 }
2072
2073 if (iid < 0) {
2074 best = -1;
2075 best_time = INT32_MAX;
2076 for (i = 0; i < CTL_MAX_INIT_PER_PORT; i++) {
2077 if (port->wwpn_iid[i].in_use == 0) {
2078 if (port->wwpn_iid[i].last_use < best_time) {
2079 best = i;
2080 best_time = port->wwpn_iid[i].last_use;
2081 }
2082 }
2083 }
2084 iid = best;
2085 }
2086
2087 if (iid < 0) {
2088 mtx_unlock(&softc->ctl_lock);
2089 free(name, M_CTL);
2090 return (-2);
2091 }
2092
2093 if (port->wwpn_iid[iid].in_use > 0 && (wwpn != 0 || name != NULL)) {
2094 /*
2095 * This is not an error yet.
2096 */
2097 if (wwpn != 0 && wwpn == port->wwpn_iid[iid].wwpn) {
2098#if 0
2099 printf("%s: port %d iid %u WWPN %#jx arrived"
2100 " again\n", __func__, port->targ_port,
2101 iid, (uintmax_t)wwpn);
2102#endif
2103 goto take;
2104 }
2105 if (name != NULL && port->wwpn_iid[iid].name != NULL &&
2106 strcmp(name, port->wwpn_iid[iid].name) == 0) {
2107#if 0
2108 printf("%s: port %d iid %u name '%s' arrived"
2109 " again\n", __func__, port->targ_port,
2110 iid, name);
2111#endif
2112 goto take;
2113 }
2114
2115 /*
2116 * This is an error, but what do we do about it? The
2117 * driver is telling us we have a new WWPN for this
2118 * initiator ID, so we pretty much need to use it.
2119 */
2120 printf("%s: port %d iid %u WWPN %#jx '%s' arrived,"
2121 " but WWPN %#jx '%s' is still at that address\n",
2122 __func__, port->targ_port, iid, wwpn, name,
2123 (uintmax_t)port->wwpn_iid[iid].wwpn,
2124 port->wwpn_iid[iid].name);
2125
2126 /*
2127 * XXX KDM clear have_ca and ua_pending on each LUN for
2128 * this initiator.
2129 */
2130 }
2131take:
2132 free(port->wwpn_iid[iid].name, M_CTL);
2133 port->wwpn_iid[iid].name = name;
2134 port->wwpn_iid[iid].wwpn = wwpn;
2135 port->wwpn_iid[iid].in_use++;
2136 mtx_unlock(&softc->ctl_lock);
2137 ctl_isc_announce_iid(port, iid);
2138
2139 return (iid);
2140}
2141
2142static int
2143ctl_create_iid(struct ctl_port *port, int iid, uint8_t *buf)
2144{
2145 int len;
2146
2147 switch (port->port_type) {
2148 case CTL_PORT_FC:
2149 {
2150 struct scsi_transportid_fcp *id =
2151 (struct scsi_transportid_fcp *)buf;
2152 if (port->wwpn_iid[iid].wwpn == 0)
2153 return (0);
2154 memset(id, 0, sizeof(*id));
2155 id->format_protocol = SCSI_PROTO_FC;
2156 scsi_u64to8b(port->wwpn_iid[iid].wwpn, id->n_port_name);
2157 return (sizeof(*id));
2158 }
2159 case CTL_PORT_ISCSI:
2160 {
2161 struct scsi_transportid_iscsi_port *id =
2162 (struct scsi_transportid_iscsi_port *)buf;
2163 if (port->wwpn_iid[iid].name == NULL)
2164 return (0);
2165 memset(id, 0, 256);
2166 id->format_protocol = SCSI_TRN_ISCSI_FORMAT_PORT |
2167 SCSI_PROTO_ISCSI;
2168 len = strlcpy(id->iscsi_name, port->wwpn_iid[iid].name, 252) + 1;
2169 len = roundup2(min(len, 252), 4);
2170 scsi_ulto2b(len, id->additional_length);
2171 return (sizeof(*id) + len);
2172 }
2173 case CTL_PORT_SAS:
2174 {
2175 struct scsi_transportid_sas *id =
2176 (struct scsi_transportid_sas *)buf;
2177 if (port->wwpn_iid[iid].wwpn == 0)
2178 return (0);
2179 memset(id, 0, sizeof(*id));
2180 id->format_protocol = SCSI_PROTO_SAS;
2181 scsi_u64to8b(port->wwpn_iid[iid].wwpn, id->sas_address);
2182 return (sizeof(*id));
2183 }
2184 default:
2185 {
2186 struct scsi_transportid_spi *id =
2187 (struct scsi_transportid_spi *)buf;
2188 memset(id, 0, sizeof(*id));
2189 id->format_protocol = SCSI_PROTO_SPI;
2190 scsi_ulto2b(iid, id->scsi_addr);
2191 scsi_ulto2b(port->targ_port, id->rel_trgt_port_id);
2192 return (sizeof(*id));
2193 }
2194 }
2195}
2196
2197/*
2198 * Serialize a command that went down the "wrong" side, and so was sent to
2199 * this controller for execution. The logic is a little different than the
2200 * standard case in ctl_scsiio_precheck(). Errors in this case need to get
2201 * sent back to the other side, but in the success case, we execute the
2202 * command on this side (XFER mode) or tell the other side to execute it
2203 * (SER_ONLY mode).
2204 */
2205static void
2206ctl_serialize_other_sc_cmd(struct ctl_scsiio *ctsio)
2207{
2208 struct ctl_softc *softc = control_softc;
2209 union ctl_ha_msg msg_info;
2210 struct ctl_port *port;
2211 struct ctl_lun *lun;
2212 const struct ctl_cmd_entry *entry;
2213 uint32_t targ_lun;
2214
2215 targ_lun = ctsio->io_hdr.nexus.targ_mapped_lun;
2216 mtx_lock(&softc->ctl_lock);
2217
2218 /* Make sure that we know about this port. */
2219 port = ctl_io_port(&ctsio->io_hdr);
2220 if (port == NULL || (port->status & CTL_PORT_STATUS_ONLINE) == 0) {
2221 ctl_set_internal_failure(ctsio, /*sks_valid*/ 0,
2222 /*retry_count*/ 1);
2223 goto badjuju;
2224 }
2225
2226 /* Make sure that we know about this LUN. */
2227 if (targ_lun >= CTL_MAX_LUNS ||
2228 (lun = softc->ctl_luns[targ_lun]) == NULL) {
2229 mtx_unlock(&softc->ctl_lock);
2230
2231 /*
2232 * The other node would not send this request to us unless
2233 * received announce that we are primary node for this LUN.
2234 * If this LUN does not exist now, it is probably result of
2235 * a race, so respond to initiator in the most opaque way.
2236 */
2237 ctl_set_busy(ctsio);
2238 goto badjuju;
2239 }
2240 mtx_lock(&lun->lun_lock);
2241 mtx_unlock(&softc->ctl_lock);
2242
2243 /*
2244 * If the LUN is invalid, pretend that it doesn't exist.
2245 * It will go away as soon as all pending I/Os completed.
2246 */
2247 if (lun->flags & CTL_LUN_DISABLED) {
2248 mtx_unlock(&lun->lun_lock);
2249 ctl_set_busy(ctsio);
2250 goto badjuju;
2251 }
2252
2253 entry = ctl_get_cmd_entry(ctsio, NULL);
2254 if (ctl_scsiio_lun_check(lun, entry, ctsio) != 0) {
2255 mtx_unlock(&lun->lun_lock);
2256 goto badjuju;
2257 }
2258
2259 ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr = lun;
2260 ctsio->io_hdr.ctl_private[CTL_PRIV_BACKEND_LUN].ptr = lun->be_lun;
2261
2262 /*
2263 * Every I/O goes into the OOA queue for a
2264 * particular LUN, and stays there until completion.
2265 */
2266#ifdef CTL_TIME_IO
2267 if (TAILQ_EMPTY(&lun->ooa_queue))
2268 lun->idle_time += getsbinuptime() - lun->last_busy;
2269#endif
2270 TAILQ_INSERT_TAIL(&lun->ooa_queue, &ctsio->io_hdr, ooa_links);
2271
2272 switch (ctl_check_ooa(lun, (union ctl_io *)ctsio,
2273 (union ctl_io *)TAILQ_PREV(&ctsio->io_hdr, ctl_ooaq,
2274 ooa_links))) {
2275 case CTL_ACTION_BLOCK:
2276 ctsio->io_hdr.flags |= CTL_FLAG_BLOCKED;
2277 TAILQ_INSERT_TAIL(&lun->blocked_queue, &ctsio->io_hdr,
2278 blocked_links);
2279 mtx_unlock(&lun->lun_lock);
2280 break;
2281 case CTL_ACTION_PASS:
2282 case CTL_ACTION_SKIP:
2283 if (softc->ha_mode == CTL_HA_MODE_XFER) {
2284 ctsio->io_hdr.flags |= CTL_FLAG_IS_WAS_ON_RTR;
2285 ctl_enqueue_rtr((union ctl_io *)ctsio);
2286 mtx_unlock(&lun->lun_lock);
2287 } else {
2288 ctsio->io_hdr.flags &= ~CTL_FLAG_IO_ACTIVE;
2289 mtx_unlock(&lun->lun_lock);
2290
2291 /* send msg back to other side */
2292 msg_info.hdr.original_sc = ctsio->io_hdr.original_sc;
2293 msg_info.hdr.serializing_sc = (union ctl_io *)ctsio;
2294 msg_info.hdr.msg_type = CTL_MSG_R2R;
2295 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info,
2296 sizeof(msg_info.hdr), M_WAITOK);
2297 }
2298 break;
2299 case CTL_ACTION_OVERLAP:
2300 TAILQ_REMOVE(&lun->ooa_queue, &ctsio->io_hdr, ooa_links);
2301 mtx_unlock(&lun->lun_lock);
2302 ctl_set_overlapped_cmd(ctsio);
2303 goto badjuju;
2304 case CTL_ACTION_OVERLAP_TAG:
2305 TAILQ_REMOVE(&lun->ooa_queue, &ctsio->io_hdr, ooa_links);
2306 mtx_unlock(&lun->lun_lock);
2307 ctl_set_overlapped_tag(ctsio, ctsio->tag_num);
2308 goto badjuju;
2309 case CTL_ACTION_ERROR:
2310 default:
2311 TAILQ_REMOVE(&lun->ooa_queue, &ctsio->io_hdr, ooa_links);
2312 mtx_unlock(&lun->lun_lock);
2313
2314 ctl_set_internal_failure(ctsio, /*sks_valid*/ 0,
2315 /*retry_count*/ 0);
2316badjuju:
2317 ctl_copy_sense_data_back((union ctl_io *)ctsio, &msg_info);
2318 msg_info.hdr.original_sc = ctsio->io_hdr.original_sc;
2319 msg_info.hdr.serializing_sc = NULL;
2320 msg_info.hdr.msg_type = CTL_MSG_BAD_JUJU;
2321 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info,
2322 sizeof(msg_info.scsi), M_WAITOK);
2323 ctl_free_io((union ctl_io *)ctsio);
2324 break;
2325 }
2326}
2327
2328/*
2329 * Returns 0 for success, errno for failure.
2330 */
2331static void
2332ctl_ioctl_fill_ooa(struct ctl_lun *lun, uint32_t *cur_fill_num,
2333 struct ctl_ooa *ooa_hdr, struct ctl_ooa_entry *kern_entries)
2334{
2335 union ctl_io *io;
2336
2337 mtx_lock(&lun->lun_lock);
2338 for (io = (union ctl_io *)TAILQ_FIRST(&lun->ooa_queue); (io != NULL);
2339 (*cur_fill_num)++, io = (union ctl_io *)TAILQ_NEXT(&io->io_hdr,
2340 ooa_links)) {
2341 struct ctl_ooa_entry *entry;
2342
2343 /*
2344 * If we've got more than we can fit, just count the
2345 * remaining entries.
2346 */
2347 if (*cur_fill_num >= ooa_hdr->alloc_num)
2348 continue;
2349
2350 entry = &kern_entries[*cur_fill_num];
2351
2352 entry->tag_num = io->scsiio.tag_num;
2353 entry->lun_num = lun->lun;
2354#ifdef CTL_TIME_IO
2355 entry->start_bt = io->io_hdr.start_bt;
2356#endif
2357 bcopy(io->scsiio.cdb, entry->cdb, io->scsiio.cdb_len);
2358 entry->cdb_len = io->scsiio.cdb_len;
2359 if (io->io_hdr.flags & CTL_FLAG_BLOCKED)
2360 entry->cmd_flags |= CTL_OOACMD_FLAG_BLOCKED;
2361
2362 if (io->io_hdr.flags & CTL_FLAG_DMA_INPROG)
2363 entry->cmd_flags |= CTL_OOACMD_FLAG_DMA;
2364
2365 if (io->io_hdr.flags & CTL_FLAG_ABORT)
2366 entry->cmd_flags |= CTL_OOACMD_FLAG_ABORT;
2367
2368 if (io->io_hdr.flags & CTL_FLAG_IS_WAS_ON_RTR)
2369 entry->cmd_flags |= CTL_OOACMD_FLAG_RTR;
2370
2371 if (io->io_hdr.flags & CTL_FLAG_DMA_QUEUED)
2372 entry->cmd_flags |= CTL_OOACMD_FLAG_DMA_QUEUED;
2373 }
2374 mtx_unlock(&lun->lun_lock);
2375}
2376
2377static void *
2378ctl_copyin_alloc(void *user_addr, unsigned int len, char *error_str,
2379 size_t error_str_len)
2380{
2381 void *kptr;
2382
2383 kptr = malloc(len, M_CTL, M_WAITOK | M_ZERO);
2384
2385 if (copyin(user_addr, kptr, len) != 0) {
2386 snprintf(error_str, error_str_len, "Error copying %d bytes "
2387 "from user address %p to kernel address %p", len,
2388 user_addr, kptr);
2389 free(kptr, M_CTL);
2390 return (NULL);
2391 }
2392
2393 return (kptr);
2394}
2395
2396static void
2397ctl_free_args(int num_args, struct ctl_be_arg *args)
2398{
2399 int i;
2400
2401 if (args == NULL)
2402 return;
2403
2404 for (i = 0; i < num_args; i++) {
2405 free(args[i].kname, M_CTL);
2406 free(args[i].kvalue, M_CTL);
2407 }
2408
2409 free(args, M_CTL);
2410}
2411
2412static struct ctl_be_arg *
2413ctl_copyin_args(int num_args, struct ctl_be_arg *uargs,
2414 char *error_str, size_t error_str_len)
2415{
2416 struct ctl_be_arg *args;
2417 int i;
2418
2419 args = ctl_copyin_alloc(uargs, num_args * sizeof(*args),
2420 error_str, error_str_len);
2421
2422 if (args == NULL)
2423 goto bailout;
2424
2425 for (i = 0; i < num_args; i++) {
2426 args[i].kname = NULL;
2427 args[i].kvalue = NULL;
2428 }
2429
2430 for (i = 0; i < num_args; i++) {
2431 uint8_t *tmpptr;
2432
2433 if (args[i].namelen == 0) {
2434 snprintf(error_str, error_str_len, "Argument %d "
2435 "name length is zero", i);
2436 goto bailout;
2437 }
2438
2439 args[i].kname = ctl_copyin_alloc(args[i].name,
2440 args[i].namelen, error_str, error_str_len);
2441 if (args[i].kname == NULL)
2442 goto bailout;
2443
2444 if (args[i].kname[args[i].namelen - 1] != '\0') {
2445 snprintf(error_str, error_str_len, "Argument %d "
2446 "name is not NUL-terminated", i);
2447 goto bailout;
2448 }
2449
2450 if (args[i].flags & CTL_BEARG_RD) {
2451 if (args[i].vallen == 0) {
2452 snprintf(error_str, error_str_len, "Argument %d "
2453 "value length is zero", i);
2454 goto bailout;
2455 }
2456
2457 tmpptr = ctl_copyin_alloc(args[i].value,
2458 args[i].vallen, error_str, error_str_len);
2459 if (tmpptr == NULL)
2460 goto bailout;
2461
2462 if ((args[i].flags & CTL_BEARG_ASCII)
2463 && (tmpptr[args[i].vallen - 1] != '\0')) {
2464 snprintf(error_str, error_str_len, "Argument "
2465 "%d value is not NUL-terminated", i);
2466 free(tmpptr, M_CTL);
2467 goto bailout;
2468 }
2469 args[i].kvalue = tmpptr;
2470 } else {
2471 args[i].kvalue = malloc(args[i].vallen,
2472 M_CTL, M_WAITOK | M_ZERO);
2473 }
2474 }
2475
2476 return (args);
2477bailout:
2478
2479 ctl_free_args(num_args, args);
2480
2481 return (NULL);
2482}
2483
2484static void
2485ctl_copyout_args(int num_args, struct ctl_be_arg *args)
2486{
2487 int i;
2488
2489 for (i = 0; i < num_args; i++) {
2490 if (args[i].flags & CTL_BEARG_WR)
2491 copyout(args[i].kvalue, args[i].value, args[i].vallen);
2492 }
2493}
2494
2495/*
2496 * Escape characters that are illegal or not recommended in XML.
2497 */
2498int
2499ctl_sbuf_printf_esc(struct sbuf *sb, char *str, int size)
2500{
2501 char *end = str + size;
2502 int retval;
2503
2504 retval = 0;
2505
2506 for (; *str && str < end; str++) {
2507 switch (*str) {
2508 case '&':
2509 retval = sbuf_printf(sb, "&amp;");
2510 break;
2511 case '>':
2512 retval = sbuf_printf(sb, "&gt;");
2513 break;
2514 case '<':
2515 retval = sbuf_printf(sb, "&lt;");
2516 break;
2517 default:
2518 retval = sbuf_putc(sb, *str);
2519 break;
2520 }
2521
2522 if (retval != 0)
2523 break;
2524
2525 }
2526
2527 return (retval);
2528}
2529
2530static void
2531ctl_id_sbuf(struct ctl_devid *id, struct sbuf *sb)
2532{
2533 struct scsi_vpd_id_descriptor *desc;
2534 int i;
2535
2536 if (id == NULL || id->len < 4)
2537 return;
2538 desc = (struct scsi_vpd_id_descriptor *)id->data;
2539 switch (desc->id_type & SVPD_ID_TYPE_MASK) {
2540 case SVPD_ID_TYPE_T10:
2541 sbuf_printf(sb, "t10.");
2542 break;
2543 case SVPD_ID_TYPE_EUI64:
2544 sbuf_printf(sb, "eui.");
2545 break;
2546 case SVPD_ID_TYPE_NAA:
2547 sbuf_printf(sb, "naa.");
2548 break;
2549 case SVPD_ID_TYPE_SCSI_NAME:
2550 break;
2551 }
2552 switch (desc->proto_codeset & SVPD_ID_CODESET_MASK) {
2553 case SVPD_ID_CODESET_BINARY:
2554 for (i = 0; i < desc->length; i++)
2555 sbuf_printf(sb, "%02x", desc->identifier[i]);
2556 break;
2557 case SVPD_ID_CODESET_ASCII:
2558 sbuf_printf(sb, "%.*s", (int)desc->length,
2559 (char *)desc->identifier);
2560 break;
2561 case SVPD_ID_CODESET_UTF8:
2562 sbuf_printf(sb, "%s", (char *)desc->identifier);
2563 break;
2564 }
2565}
2566
2567static int
2568ctl_ioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flag,
2569 struct thread *td)
2570{
2571 struct ctl_softc *softc = dev->si_drv1;
2572 struct ctl_lun *lun;
2573 int retval;
2574
2575 retval = 0;
2576
2577 switch (cmd) {
2578 case CTL_IO:
2579 retval = ctl_ioctl_io(dev, cmd, addr, flag, td);
2580 break;
2581 case CTL_ENABLE_PORT:
2582 case CTL_DISABLE_PORT:
2583 case CTL_SET_PORT_WWNS: {
2584 struct ctl_port *port;
2585 struct ctl_port_entry *entry;
2586
2587 entry = (struct ctl_port_entry *)addr;
2588
2589 mtx_lock(&softc->ctl_lock);
2590 STAILQ_FOREACH(port, &softc->port_list, links) {
2591 int action, done;
2592
2593 if (port->targ_port < softc->port_min ||
2594 port->targ_port >= softc->port_max)
2595 continue;
2596
2597 action = 0;
2598 done = 0;
2599 if ((entry->port_type == CTL_PORT_NONE)
2600 && (entry->targ_port == port->targ_port)) {
2601 /*
2602 * If the user only wants to enable or
2603 * disable or set WWNs on a specific port,
2604 * do the operation and we're done.
2605 */
2606 action = 1;
2607 done = 1;
2608 } else if (entry->port_type & port->port_type) {
2609 /*
2610 * Compare the user's type mask with the
2611 * particular frontend type to see if we
2612 * have a match.
2613 */
2614 action = 1;
2615 done = 0;
2616
2617 /*
2618 * Make sure the user isn't trying to set
2619 * WWNs on multiple ports at the same time.
2620 */
2621 if (cmd == CTL_SET_PORT_WWNS) {
2622 printf("%s: Can't set WWNs on "
2623 "multiple ports\n", __func__);
2624 retval = EINVAL;
2625 break;
2626 }
2627 }
2628 if (action == 0)
2629 continue;
2630
2631 /*
2632 * XXX KDM we have to drop the lock here, because
2633 * the online/offline operations can potentially
2634 * block. We need to reference count the frontends
2635 * so they can't go away,
2636 */
2637 if (cmd == CTL_ENABLE_PORT) {
2638 mtx_unlock(&softc->ctl_lock);
2639 ctl_port_online(port);
2640 mtx_lock(&softc->ctl_lock);
2641 } else if (cmd == CTL_DISABLE_PORT) {
2642 mtx_unlock(&softc->ctl_lock);
2643 ctl_port_offline(port);
2644 mtx_lock(&softc->ctl_lock);
2645 } else if (cmd == CTL_SET_PORT_WWNS) {
2646 ctl_port_set_wwns(port,
2647 (entry->flags & CTL_PORT_WWNN_VALID) ?
2648 1 : 0, entry->wwnn,
2649 (entry->flags & CTL_PORT_WWPN_VALID) ?
2650 1 : 0, entry->wwpn);
2651 }
2652 if (done != 0)
2653 break;
2654 }
2655 mtx_unlock(&softc->ctl_lock);
2656 break;
2657 }
2658 case CTL_GET_OOA: {
2659 struct ctl_ooa *ooa_hdr;
2660 struct ctl_ooa_entry *entries;
2661 uint32_t cur_fill_num;
2662
2663 ooa_hdr = (struct ctl_ooa *)addr;
2664
2665 if ((ooa_hdr->alloc_len == 0)
2666 || (ooa_hdr->alloc_num == 0)) {
2667 printf("%s: CTL_GET_OOA: alloc len %u and alloc num %u "
2668 "must be non-zero\n", __func__,
2669 ooa_hdr->alloc_len, ooa_hdr->alloc_num);
2670 retval = EINVAL;
2671 break;
2672 }
2673
2674 if (ooa_hdr->alloc_len != (ooa_hdr->alloc_num *
2675 sizeof(struct ctl_ooa_entry))) {
2676 printf("%s: CTL_GET_OOA: alloc len %u must be alloc "
2677 "num %d * sizeof(struct ctl_ooa_entry) %zd\n",
2678 __func__, ooa_hdr->alloc_len,
2679 ooa_hdr->alloc_num,sizeof(struct ctl_ooa_entry));
2680 retval = EINVAL;
2681 break;
2682 }
2683
2684 entries = malloc(ooa_hdr->alloc_len, M_CTL, M_WAITOK | M_ZERO);
2685 if (entries == NULL) {
2686 printf("%s: could not allocate %d bytes for OOA "
2687 "dump\n", __func__, ooa_hdr->alloc_len);
2688 retval = ENOMEM;
2689 break;
2690 }
2691
2692 mtx_lock(&softc->ctl_lock);
2693 if ((ooa_hdr->flags & CTL_OOA_FLAG_ALL_LUNS) == 0 &&
2694 (ooa_hdr->lun_num >= CTL_MAX_LUNS ||
2695 softc->ctl_luns[ooa_hdr->lun_num] == NULL)) {
2696 mtx_unlock(&softc->ctl_lock);
2697 free(entries, M_CTL);
2698 printf("%s: CTL_GET_OOA: invalid LUN %ju\n",
2699 __func__, (uintmax_t)ooa_hdr->lun_num);
2700 retval = EINVAL;
2701 break;
2702 }
2703
2704 cur_fill_num = 0;
2705
2706 if (ooa_hdr->flags & CTL_OOA_FLAG_ALL_LUNS) {
2707 STAILQ_FOREACH(lun, &softc->lun_list, links) {
2708 ctl_ioctl_fill_ooa(lun, &cur_fill_num,
2709 ooa_hdr, entries);
2710 }
2711 } else {
2712 lun = softc->ctl_luns[ooa_hdr->lun_num];
2713 ctl_ioctl_fill_ooa(lun, &cur_fill_num, ooa_hdr,
2714 entries);
2715 }
2716 mtx_unlock(&softc->ctl_lock);
2717
2718 ooa_hdr->fill_num = min(cur_fill_num, ooa_hdr->alloc_num);
2719 ooa_hdr->fill_len = ooa_hdr->fill_num *
2720 sizeof(struct ctl_ooa_entry);
2721 retval = copyout(entries, ooa_hdr->entries, ooa_hdr->fill_len);
2722 if (retval != 0) {
2723 printf("%s: error copying out %d bytes for OOA dump\n",
2724 __func__, ooa_hdr->fill_len);
2725 }
2726
2727 getbinuptime(&ooa_hdr->cur_bt);
2728
2729 if (cur_fill_num > ooa_hdr->alloc_num) {
2730 ooa_hdr->dropped_num = cur_fill_num -ooa_hdr->alloc_num;
2731 ooa_hdr->status = CTL_OOA_NEED_MORE_SPACE;
2732 } else {
2733 ooa_hdr->dropped_num = 0;
2734 ooa_hdr->status = CTL_OOA_OK;
2735 }
2736
2737 free(entries, M_CTL);
2738 break;
2739 }
2740 case CTL_DELAY_IO: {
2741 struct ctl_io_delay_info *delay_info;
2742
2743 delay_info = (struct ctl_io_delay_info *)addr;
2744
2745#ifdef CTL_IO_DELAY
2746 mtx_lock(&softc->ctl_lock);
2747 if (delay_info->lun_id >= CTL_MAX_LUNS ||
2748 (lun = softc->ctl_luns[delay_info->lun_id]) == NULL) {
2749 mtx_unlock(&softc->ctl_lock);
2750 delay_info->status = CTL_DELAY_STATUS_INVALID_LUN;
2751 break;
2752 }
2753 mtx_lock(&lun->lun_lock);
2754 mtx_unlock(&softc->ctl_lock);
2755 delay_info->status = CTL_DELAY_STATUS_OK;
2756 switch (delay_info->delay_type) {
2757 case CTL_DELAY_TYPE_CONT:
2758 case CTL_DELAY_TYPE_ONESHOT:
2759 break;
2760 default:
2761 delay_info->status = CTL_DELAY_STATUS_INVALID_TYPE;
2762 break;
2763 }
2764 switch (delay_info->delay_loc) {
2765 case CTL_DELAY_LOC_DATAMOVE:
2766 lun->delay_info.datamove_type = delay_info->delay_type;
2767 lun->delay_info.datamove_delay = delay_info->delay_secs;
2768 break;
2769 case CTL_DELAY_LOC_DONE:
2770 lun->delay_info.done_type = delay_info->delay_type;
2771 lun->delay_info.done_delay = delay_info->delay_secs;
2772 break;
2773 default:
2774 delay_info->status = CTL_DELAY_STATUS_INVALID_LOC;
2775 break;
2776 }
2777 mtx_unlock(&lun->lun_lock);
2778#else
2779 delay_info->status = CTL_DELAY_STATUS_NOT_IMPLEMENTED;
2780#endif /* CTL_IO_DELAY */
2781 break;
2782 }
2783 case CTL_GETSTATS: {
2784 struct ctl_stats *stats;
2785 int i;
2786
2787 stats = (struct ctl_stats *)addr;
2788
2789 if ((sizeof(struct ctl_lun_io_stats) * softc->num_luns) >
2790 stats->alloc_len) {
2791 stats->status = CTL_SS_NEED_MORE_SPACE;
2792 stats->num_luns = softc->num_luns;
2793 break;
2794 }
2795 /*
2796 * XXX KDM no locking here. If the LUN list changes,
2797 * things can blow up.
2798 */
2799 i = 0;
2800 STAILQ_FOREACH(lun, &softc->lun_list, links) {
2801 retval = copyout(&lun->stats, &stats->lun_stats[i++],
2802 sizeof(lun->stats));
2803 if (retval != 0)
2804 break;
2805 }
2806 stats->num_luns = softc->num_luns;
2807 stats->fill_len = sizeof(struct ctl_lun_io_stats) *
2808 softc->num_luns;
2809 stats->status = CTL_SS_OK;
2810#ifdef CTL_TIME_IO
2811 stats->flags = CTL_STATS_FLAG_TIME_VALID;
2812#else
2813 stats->flags = CTL_STATS_FLAG_NONE;
2814#endif
2815 getnanouptime(&stats->timestamp);
2816 break;
2817 }
2818 case CTL_ERROR_INJECT: {
2819 struct ctl_error_desc *err_desc, *new_err_desc;
2820
2821 err_desc = (struct ctl_error_desc *)addr;
2822
2823 new_err_desc = malloc(sizeof(*new_err_desc), M_CTL,
2824 M_WAITOK | M_ZERO);
2825 bcopy(err_desc, new_err_desc, sizeof(*new_err_desc));
2826
2827 mtx_lock(&softc->ctl_lock);
2828 if (err_desc->lun_id >= CTL_MAX_LUNS ||
2829 (lun = softc->ctl_luns[err_desc->lun_id]) == NULL) {
2830 mtx_unlock(&softc->ctl_lock);
2831 free(new_err_desc, M_CTL);
2832 printf("%s: CTL_ERROR_INJECT: invalid LUN %ju\n",
2833 __func__, (uintmax_t)err_desc->lun_id);
2834 retval = EINVAL;
2835 break;
2836 }
2837 mtx_lock(&lun->lun_lock);
2838 mtx_unlock(&softc->ctl_lock);
2839
2840 /*
2841 * We could do some checking here to verify the validity
2842 * of the request, but given the complexity of error
2843 * injection requests, the checking logic would be fairly
2844 * complex.
2845 *
2846 * For now, if the request is invalid, it just won't get
2847 * executed and might get deleted.
2848 */
2849 STAILQ_INSERT_TAIL(&lun->error_list, new_err_desc, links);
2850
2851 /*
2852 * XXX KDM check to make sure the serial number is unique,
2853 * in case we somehow manage to wrap. That shouldn't
2854 * happen for a very long time, but it's the right thing to
2855 * do.
2856 */
2857 new_err_desc->serial = lun->error_serial;
2858 err_desc->serial = lun->error_serial;
2859 lun->error_serial++;
2860
2861 mtx_unlock(&lun->lun_lock);
2862 break;
2863 }
2864 case CTL_ERROR_INJECT_DELETE: {
2865 struct ctl_error_desc *delete_desc, *desc, *desc2;
2866 int delete_done;
2867
2868 delete_desc = (struct ctl_error_desc *)addr;
2869 delete_done = 0;
2870
2871 mtx_lock(&softc->ctl_lock);
2872 if (delete_desc->lun_id >= CTL_MAX_LUNS ||
2873 (lun = softc->ctl_luns[delete_desc->lun_id]) == NULL) {
2874 mtx_unlock(&softc->ctl_lock);
2875 printf("%s: CTL_ERROR_INJECT_DELETE: invalid LUN %ju\n",
2876 __func__, (uintmax_t)delete_desc->lun_id);
2877 retval = EINVAL;
2878 break;
2879 }
2880 mtx_lock(&lun->lun_lock);
2881 mtx_unlock(&softc->ctl_lock);
2882 STAILQ_FOREACH_SAFE(desc, &lun->error_list, links, desc2) {
2883 if (desc->serial != delete_desc->serial)
2884 continue;
2885
2886 STAILQ_REMOVE(&lun->error_list, desc, ctl_error_desc,
2887 links);
2888 free(desc, M_CTL);
2889 delete_done = 1;
2890 }
2891 mtx_unlock(&lun->lun_lock);
2892 if (delete_done == 0) {
2893 printf("%s: CTL_ERROR_INJECT_DELETE: can't find "
2894 "error serial %ju on LUN %u\n", __func__,
2895 delete_desc->serial, delete_desc->lun_id);
2896 retval = EINVAL;
2897 break;
2898 }
2899 break;
2900 }
2901 case CTL_DUMP_STRUCTS: {
2902 int j, k;
2903 struct ctl_port *port;
2904 struct ctl_frontend *fe;
2905
2906 mtx_lock(&softc->ctl_lock);
2907 printf("CTL Persistent Reservation information start:\n");
2908 STAILQ_FOREACH(lun, &softc->lun_list, links) {
2909 mtx_lock(&lun->lun_lock);
2910 if ((lun->flags & CTL_LUN_DISABLED) != 0) {
2911 mtx_unlock(&lun->lun_lock);
2912 continue;
2913 }
2914
2915 for (j = 0; j < CTL_MAX_PORTS; j++) {
2916 if (lun->pr_keys[j] == NULL)
2917 continue;
2918 for (k = 0; k < CTL_MAX_INIT_PER_PORT; k++){
2919 if (lun->pr_keys[j][k] == 0)
2920 continue;
2921 printf(" LUN %ju port %d iid %d key "
2922 "%#jx\n", lun->lun, j, k,
2923 (uintmax_t)lun->pr_keys[j][k]);
2924 }
2925 }
2926 mtx_unlock(&lun->lun_lock);
2927 }
2928 printf("CTL Persistent Reservation information end\n");
2929 printf("CTL Ports:\n");
2930 STAILQ_FOREACH(port, &softc->port_list, links) {
2931 printf(" Port %d '%s' Frontend '%s' Type %u pp %d vp %d WWNN "
2932 "%#jx WWPN %#jx\n", port->targ_port, port->port_name,
2933 port->frontend->name, port->port_type,
2934 port->physical_port, port->virtual_port,
2935 (uintmax_t)port->wwnn, (uintmax_t)port->wwpn);
2936 for (j = 0; j < CTL_MAX_INIT_PER_PORT; j++) {
2937 if (port->wwpn_iid[j].in_use == 0 &&
2938 port->wwpn_iid[j].wwpn == 0 &&
2939 port->wwpn_iid[j].name == NULL)
2940 continue;
2941
2942 printf(" iid %u use %d WWPN %#jx '%s'\n",
2943 j, port->wwpn_iid[j].in_use,
2944 (uintmax_t)port->wwpn_iid[j].wwpn,
2945 port->wwpn_iid[j].name);
2946 }
2947 }
2948 printf("CTL Port information end\n");
2949 mtx_unlock(&softc->ctl_lock);
2950 /*
2951 * XXX KDM calling this without a lock. We'd likely want
2952 * to drop the lock before calling the frontend's dump
2953 * routine anyway.
2954 */
2955 printf("CTL Frontends:\n");
2956 STAILQ_FOREACH(fe, &softc->fe_list, links) {
2957 printf(" Frontend '%s'\n", fe->name);
2958 if (fe->fe_dump != NULL)
2959 fe->fe_dump();
2960 }
2961 printf("CTL Frontend information end\n");
2962 break;
2963 }
2964 case CTL_LUN_REQ: {
2965 struct ctl_lun_req *lun_req;
2966 struct ctl_backend_driver *backend;
2967
2968 lun_req = (struct ctl_lun_req *)addr;
2969
2970 backend = ctl_backend_find(lun_req->backend);
2971 if (backend == NULL) {
2972 lun_req->status = CTL_LUN_ERROR;
2973 snprintf(lun_req->error_str,
2974 sizeof(lun_req->error_str),
2975 "Backend \"%s\" not found.",
2976 lun_req->backend);
2977 break;
2978 }
2979 if (lun_req->num_be_args > 0) {
2980 lun_req->kern_be_args = ctl_copyin_args(
2981 lun_req->num_be_args,
2982 lun_req->be_args,
2983 lun_req->error_str,
2984 sizeof(lun_req->error_str));
2985 if (lun_req->kern_be_args == NULL) {
2986 lun_req->status = CTL_LUN_ERROR;
2987 break;
2988 }
2989 }
2990
2991 retval = backend->ioctl(dev, cmd, addr, flag, td);
2992
2993 if (lun_req->num_be_args > 0) {
2994 ctl_copyout_args(lun_req->num_be_args,
2995 lun_req->kern_be_args);
2996 ctl_free_args(lun_req->num_be_args,
2997 lun_req->kern_be_args);
2998 }
2999 break;
3000 }
3001 case CTL_LUN_LIST: {
3002 struct sbuf *sb;
3003 struct ctl_lun_list *list;
3004 struct ctl_option *opt;
3005
3006 list = (struct ctl_lun_list *)addr;
3007
3008 /*
3009 * Allocate a fixed length sbuf here, based on the length
3010 * of the user's buffer. We could allocate an auto-extending
3011 * buffer, and then tell the user how much larger our
3012 * amount of data is than his buffer, but that presents
3013 * some problems:
3014 *
3015 * 1. The sbuf(9) routines use a blocking malloc, and so
3016 * we can't hold a lock while calling them with an
3017 * auto-extending buffer.
3018 *
3019 * 2. There is not currently a LUN reference counting
3020 * mechanism, outside of outstanding transactions on
3021 * the LUN's OOA queue. So a LUN could go away on us
3022 * while we're getting the LUN number, backend-specific
3023 * information, etc. Thus, given the way things
3024 * currently work, we need to hold the CTL lock while
3025 * grabbing LUN information.
3026 *
3027 * So, from the user's standpoint, the best thing to do is
3028 * allocate what he thinks is a reasonable buffer length,
3029 * and then if he gets a CTL_LUN_LIST_NEED_MORE_SPACE error,
3030 * double the buffer length and try again. (And repeat
3031 * that until he succeeds.)
3032 */
3033 sb = sbuf_new(NULL, NULL, list->alloc_len, SBUF_FIXEDLEN);
3034 if (sb == NULL) {
3035 list->status = CTL_LUN_LIST_ERROR;
3036 snprintf(list->error_str, sizeof(list->error_str),
3037 "Unable to allocate %d bytes for LUN list",
3038 list->alloc_len);
3039 break;
3040 }
3041
3042 sbuf_printf(sb, "<ctllunlist>\n");
3043
3044 mtx_lock(&softc->ctl_lock);
3045 STAILQ_FOREACH(lun, &softc->lun_list, links) {
3046 mtx_lock(&lun->lun_lock);
3047 retval = sbuf_printf(sb, "<lun id=\"%ju\">\n",
3048 (uintmax_t)lun->lun);
3049
3050 /*
3051 * Bail out as soon as we see that we've overfilled
3052 * the buffer.
3053 */
3054 if (retval != 0)
3055 break;
3056
3057 retval = sbuf_printf(sb, "\t<backend_type>%s"
3058 "</backend_type>\n",
3059 (lun->backend == NULL) ? "none" :
3060 lun->backend->name);
3061
3062 if (retval != 0)
3063 break;
3064
3065 retval = sbuf_printf(sb, "\t<lun_type>%d</lun_type>\n",
3066 lun->be_lun->lun_type);
3067
3068 if (retval != 0)
3069 break;
3070
3071 if (lun->backend == NULL) {
3072 retval = sbuf_printf(sb, "</lun>\n");
3073 if (retval != 0)
3074 break;
3075 continue;
3076 }
3077
3078 retval = sbuf_printf(sb, "\t<size>%ju</size>\n",
3079 (lun->be_lun->maxlba > 0) ?
3080 lun->be_lun->maxlba + 1 : 0);
3081
3082 if (retval != 0)
3083 break;
3084
3085 retval = sbuf_printf(sb, "\t<blocksize>%u</blocksize>\n",
3086 lun->be_lun->blocksize);
3087
3088 if (retval != 0)
3089 break;
3090
3091 retval = sbuf_printf(sb, "\t<serial_number>");
3092
3093 if (retval != 0)
3094 break;
3095
3096 retval = ctl_sbuf_printf_esc(sb,
3097 lun->be_lun->serial_num,
3098 sizeof(lun->be_lun->serial_num));
3099
3100 if (retval != 0)
3101 break;
3102
3103 retval = sbuf_printf(sb, "</serial_number>\n");
3104
3105 if (retval != 0)
3106 break;
3107
3108 retval = sbuf_printf(sb, "\t<device_id>");
3109
3110 if (retval != 0)
3111 break;
3112
3113 retval = ctl_sbuf_printf_esc(sb,
3114 lun->be_lun->device_id,
3115 sizeof(lun->be_lun->device_id));
3116
3117 if (retval != 0)
3118 break;
3119
3120 retval = sbuf_printf(sb, "</device_id>\n");
3121
3122 if (retval != 0)
3123 break;
3124
3125 if (lun->backend->lun_info != NULL) {
3126 retval = lun->backend->lun_info(lun->be_lun->be_lun, sb);
3127 if (retval != 0)
3128 break;
3129 }
3130 STAILQ_FOREACH(opt, &lun->be_lun->options, links) {
3131 retval = sbuf_printf(sb, "\t<%s>%s</%s>\n",
3132 opt->name, opt->value, opt->name);
3133 if (retval != 0)
3134 break;
3135 }
3136
3137 retval = sbuf_printf(sb, "</lun>\n");
3138
3139 if (retval != 0)
3140 break;
3141 mtx_unlock(&lun->lun_lock);
3142 }
3143 if (lun != NULL)
3144 mtx_unlock(&lun->lun_lock);
3145 mtx_unlock(&softc->ctl_lock);
3146
3147 if ((retval != 0)
3148 || ((retval = sbuf_printf(sb, "</ctllunlist>\n")) != 0)) {
3149 retval = 0;
3150 sbuf_delete(sb);
3151 list->status = CTL_LUN_LIST_NEED_MORE_SPACE;
3152 snprintf(list->error_str, sizeof(list->error_str),
3153 "Out of space, %d bytes is too small",
3154 list->alloc_len);
3155 break;
3156 }
3157
3158 sbuf_finish(sb);
3159
3160 retval = copyout(sbuf_data(sb), list->lun_xml,
3161 sbuf_len(sb) + 1);
3162
3163 list->fill_len = sbuf_len(sb) + 1;
3164 list->status = CTL_LUN_LIST_OK;
3165 sbuf_delete(sb);
3166 break;
3167 }
3168 case CTL_ISCSI: {
3169 struct ctl_iscsi *ci;
3170 struct ctl_frontend *fe;
3171
3172 ci = (struct ctl_iscsi *)addr;
3173
3174 fe = ctl_frontend_find("iscsi");
3175 if (fe == NULL) {
3176 ci->status = CTL_ISCSI_ERROR;
3177 snprintf(ci->error_str, sizeof(ci->error_str),
3178 "Frontend \"iscsi\" not found.");
3179 break;
3180 }
3181
3182 retval = fe->ioctl(dev, cmd, addr, flag, td);
3183 break;
3184 }
3185 case CTL_PORT_REQ: {
3186 struct ctl_req *req;
3187 struct ctl_frontend *fe;
3188
3189 req = (struct ctl_req *)addr;
3190
3191 fe = ctl_frontend_find(req->driver);
3192 if (fe == NULL) {
3193 req->status = CTL_LUN_ERROR;
3194 snprintf(req->error_str, sizeof(req->error_str),
3195 "Frontend \"%s\" not found.", req->driver);
3196 break;
3197 }
3198 if (req->num_args > 0) {
3199 req->kern_args = ctl_copyin_args(req->num_args,
3200 req->args, req->error_str, sizeof(req->error_str));
3201 if (req->kern_args == NULL) {
3202 req->status = CTL_LUN_ERROR;
3203 break;
3204 }
3205 }
3206
3207 if (fe->ioctl)
3208 retval = fe->ioctl(dev, cmd, addr, flag, td);
3209 else
3210 retval = ENODEV;
3211
3212 if (req->num_args > 0) {
3213 ctl_copyout_args(req->num_args, req->kern_args);
3214 ctl_free_args(req->num_args, req->kern_args);
3215 }
3216 break;
3217 }
3218 case CTL_PORT_LIST: {
3219 struct sbuf *sb;
3220 struct ctl_port *port;
3221 struct ctl_lun_list *list;
3222 struct ctl_option *opt;
3223 int j;
3224 uint32_t plun;
3225
3226 list = (struct ctl_lun_list *)addr;
3227
3228 sb = sbuf_new(NULL, NULL, list->alloc_len, SBUF_FIXEDLEN);
3229 if (sb == NULL) {
3230 list->status = CTL_LUN_LIST_ERROR;
3231 snprintf(list->error_str, sizeof(list->error_str),
3232 "Unable to allocate %d bytes for LUN list",
3233 list->alloc_len);
3234 break;
3235 }
3236
3237 sbuf_printf(sb, "<ctlportlist>\n");
3238
3239 mtx_lock(&softc->ctl_lock);
3240 STAILQ_FOREACH(port, &softc->port_list, links) {
3241 retval = sbuf_printf(sb, "<targ_port id=\"%ju\">\n",
3242 (uintmax_t)port->targ_port);
3243
3244 /*
3245 * Bail out as soon as we see that we've overfilled
3246 * the buffer.
3247 */
3248 if (retval != 0)
3249 break;
3250
3251 retval = sbuf_printf(sb, "\t<frontend_type>%s"
3252 "</frontend_type>\n", port->frontend->name);
3253 if (retval != 0)
3254 break;
3255
3256 retval = sbuf_printf(sb, "\t<port_type>%d</port_type>\n",
3257 port->port_type);
3258 if (retval != 0)
3259 break;
3260
3261 retval = sbuf_printf(sb, "\t<online>%s</online>\n",
3262 (port->status & CTL_PORT_STATUS_ONLINE) ? "YES" : "NO");
3263 if (retval != 0)
3264 break;
3265
3266 retval = sbuf_printf(sb, "\t<port_name>%s</port_name>\n",
3267 port->port_name);
3268 if (retval != 0)
3269 break;
3270
3271 retval = sbuf_printf(sb, "\t<physical_port>%d</physical_port>\n",
3272 port->physical_port);
3273 if (retval != 0)
3274 break;
3275
3276 retval = sbuf_printf(sb, "\t<virtual_port>%d</virtual_port>\n",
3277 port->virtual_port);
3278 if (retval != 0)
3279 break;
3280
3281 if (port->target_devid != NULL) {
3282 sbuf_printf(sb, "\t<target>");
3283 ctl_id_sbuf(port->target_devid, sb);
3284 sbuf_printf(sb, "</target>\n");
3285 }
3286
3287 if (port->port_devid != NULL) {
3288 sbuf_printf(sb, "\t<port>");
3289 ctl_id_sbuf(port->port_devid, sb);
3290 sbuf_printf(sb, "</port>\n");
3291 }
3292
3293 if (port->port_info != NULL) {
3294 retval = port->port_info(port->onoff_arg, sb);
3295 if (retval != 0)
3296 break;
3297 }
3298 STAILQ_FOREACH(opt, &port->options, links) {
3299 retval = sbuf_printf(sb, "\t<%s>%s</%s>\n",
3300 opt->name, opt->value, opt->name);
3301 if (retval != 0)
3302 break;
3303 }
3304
3305 if (port->lun_map != NULL) {
3306 sbuf_printf(sb, "\t<lun_map>on</lun_map>\n");
3307 for (j = 0; j < port->lun_map_size; j++) {
3308 plun = ctl_lun_map_from_port(port, j);
3309 if (plun == UINT32_MAX)
3310 continue;
3311 sbuf_printf(sb,
3312 "\t<lun id=\"%u\">%u</lun>\n",
3313 j, plun);
3314 }
3315 }
3316
3317 for (j = 0; j < CTL_MAX_INIT_PER_PORT; j++) {
3318 if (port->wwpn_iid[j].in_use == 0 ||
3319 (port->wwpn_iid[j].wwpn == 0 &&
3320 port->wwpn_iid[j].name == NULL))
3321 continue;
3322
3323 if (port->wwpn_iid[j].name != NULL)
3324 retval = sbuf_printf(sb,
3325 "\t<initiator id=\"%u\">%s</initiator>\n",
3326 j, port->wwpn_iid[j].name);
3327 else
3328 retval = sbuf_printf(sb,
3329 "\t<initiator id=\"%u\">naa.%08jx</initiator>\n",
3330 j, port->wwpn_iid[j].wwpn);
3331 if (retval != 0)
3332 break;
3333 }
3334 if (retval != 0)
3335 break;
3336
3337 retval = sbuf_printf(sb, "</targ_port>\n");
3338 if (retval != 0)
3339 break;
3340 }
3341 mtx_unlock(&softc->ctl_lock);
3342
3343 if ((retval != 0)
3344 || ((retval = sbuf_printf(sb, "</ctlportlist>\n")) != 0)) {
3345 retval = 0;
3346 sbuf_delete(sb);
3347 list->status = CTL_LUN_LIST_NEED_MORE_SPACE;
3348 snprintf(list->error_str, sizeof(list->error_str),
3349 "Out of space, %d bytes is too small",
3350 list->alloc_len);
3351 break;
3352 }
3353
3354 sbuf_finish(sb);
3355
3356 retval = copyout(sbuf_data(sb), list->lun_xml,
3357 sbuf_len(sb) + 1);
3358
3359 list->fill_len = sbuf_len(sb) + 1;
3360 list->status = CTL_LUN_LIST_OK;
3361 sbuf_delete(sb);
3362 break;
3363 }
3364 case CTL_LUN_MAP: {
3365 struct ctl_lun_map *lm = (struct ctl_lun_map *)addr;
3366 struct ctl_port *port;
3367
3368 mtx_lock(&softc->ctl_lock);
3369 if (lm->port < softc->port_min ||
3370 lm->port >= softc->port_max ||
3371 (port = softc->ctl_ports[lm->port]) == NULL) {
3372 mtx_unlock(&softc->ctl_lock);
3373 return (ENXIO);
3374 }
3375 if (port->status & CTL_PORT_STATUS_ONLINE) {
3376 STAILQ_FOREACH(lun, &softc->lun_list, links) {
3377 if (ctl_lun_map_to_port(port, lun->lun) ==
3378 UINT32_MAX)
3379 continue;
3380 mtx_lock(&lun->lun_lock);
3381 ctl_est_ua_port(lun, lm->port, -1,
3382 CTL_UA_LUN_CHANGE);
3383 mtx_unlock(&lun->lun_lock);
3384 }
3385 }
3386 mtx_unlock(&softc->ctl_lock); // XXX: port_enable sleeps
3387 if (lm->plun != UINT32_MAX) {
3388 if (lm->lun == UINT32_MAX)
3389 retval = ctl_lun_map_unset(port, lm->plun);
3390 else if (lm->lun < CTL_MAX_LUNS &&
3391 softc->ctl_luns[lm->lun] != NULL)
3392 retval = ctl_lun_map_set(port, lm->plun, lm->lun);
3393 else
3394 return (ENXIO);
3395 } else {
3396 if (lm->lun == UINT32_MAX)
3397 retval = ctl_lun_map_deinit(port);
3398 else
3399 retval = ctl_lun_map_init(port);
3400 }
3401 if (port->status & CTL_PORT_STATUS_ONLINE)
3402 ctl_isc_announce_port(port);
3403 break;
3404 }
3405 default: {
3406 /* XXX KDM should we fix this? */
3407#if 0
3408 struct ctl_backend_driver *backend;
3409 unsigned int type;
3410 int found;
3411
3412 found = 0;
3413
3414 /*
3415 * We encode the backend type as the ioctl type for backend
3416 * ioctls. So parse it out here, and then search for a
3417 * backend of this type.
3418 */
3419 type = _IOC_TYPE(cmd);
3420
3421 STAILQ_FOREACH(backend, &softc->be_list, links) {
3422 if (backend->type == type) {
3423 found = 1;
3424 break;
3425 }
3426 }
3427 if (found == 0) {
3428 printf("ctl: unknown ioctl command %#lx or backend "
3429 "%d\n", cmd, type);
3430 retval = EINVAL;
3431 break;
3432 }
3433 retval = backend->ioctl(dev, cmd, addr, flag, td);
3434#endif
3435 retval = ENOTTY;
3436 break;
3437 }
3438 }
3439 return (retval);
3440}
3441
3442uint32_t
3443ctl_get_initindex(struct ctl_nexus *nexus)
3444{
3445 return (nexus->initid + (nexus->targ_port * CTL_MAX_INIT_PER_PORT));
3446}
3447
3448int
3449ctl_lun_map_init(struct ctl_port *port)
3450{
3451 struct ctl_softc *softc = port->ctl_softc;
3452 struct ctl_lun *lun;
3453 int size = ctl_lun_map_size;
3454 uint32_t i;
3455
3456 if (port->lun_map == NULL || port->lun_map_size < size) {
3457 port->lun_map_size = 0;
3458 free(port->lun_map, M_CTL);
3459 port->lun_map = malloc(size * sizeof(uint32_t),
3460 M_CTL, M_NOWAIT);
3461 }
3462 if (port->lun_map == NULL)
3463 return (ENOMEM);
3464 for (i = 0; i < size; i++)
3465 port->lun_map[i] = UINT32_MAX;
3466 port->lun_map_size = size;
3467 if (port->status & CTL_PORT_STATUS_ONLINE) {
3468 if (port->lun_disable != NULL) {
3469 STAILQ_FOREACH(lun, &softc->lun_list, links)
3470 port->lun_disable(port->targ_lun_arg, lun->lun);
3471 }
3472 ctl_isc_announce_port(port);
3473 }
3474 return (0);
3475}
3476
3477int
3478ctl_lun_map_deinit(struct ctl_port *port)
3479{
3480 struct ctl_softc *softc = port->ctl_softc;
3481 struct ctl_lun *lun;
3482
3483 if (port->lun_map == NULL)
3484 return (0);
3485 port->lun_map_size = 0;
3486 free(port->lun_map, M_CTL);
3487 port->lun_map = NULL;
3488 if (port->status & CTL_PORT_STATUS_ONLINE) {
3489 if (port->lun_enable != NULL) {
3490 STAILQ_FOREACH(lun, &softc->lun_list, links)
3491 port->lun_enable(port->targ_lun_arg, lun->lun);
3492 }
3493 ctl_isc_announce_port(port);
3494 }
3495 return (0);
3496}
3497
3498int
3499ctl_lun_map_set(struct ctl_port *port, uint32_t plun, uint32_t glun)
3500{
3501 int status;
3502 uint32_t old;
3503
3504 if (port->lun_map == NULL) {
3505 status = ctl_lun_map_init(port);
3506 if (status != 0)
3507 return (status);
3508 }
3509 if (plun >= port->lun_map_size)
3510 return (EINVAL);
3511 old = port->lun_map[plun];
3512 port->lun_map[plun] = glun;
3513 if ((port->status & CTL_PORT_STATUS_ONLINE) && old == UINT32_MAX) {
3514 if (port->lun_enable != NULL)
3515 port->lun_enable(port->targ_lun_arg, plun);
3516 ctl_isc_announce_port(port);
3517 }
3518 return (0);
3519}
3520
3521int
3522ctl_lun_map_unset(struct ctl_port *port, uint32_t plun)
3523{
3524 uint32_t old;
3525
3526 if (port->lun_map == NULL || plun >= port->lun_map_size)
3527 return (0);
3528 old = port->lun_map[plun];
3529 port->lun_map[plun] = UINT32_MAX;
3530 if ((port->status & CTL_PORT_STATUS_ONLINE) && old != UINT32_MAX) {
3531 if (port->lun_disable != NULL)
3532 port->lun_disable(port->targ_lun_arg, plun);
3533 ctl_isc_announce_port(port);
3534 }
3535 return (0);
3536}
3537
3538uint32_t
3539ctl_lun_map_from_port(struct ctl_port *port, uint32_t lun_id)
3540{
3541
3542 if (port == NULL)
3543 return (UINT32_MAX);
3544 if (port->lun_map == NULL)
3545 return (lun_id);
3546 if (lun_id > port->lun_map_size)
3547 return (UINT32_MAX);
3548 return (port->lun_map[lun_id]);
3549}
3550
3551uint32_t
3552ctl_lun_map_to_port(struct ctl_port *port, uint32_t lun_id)
3553{
3554 uint32_t i;
3555
3556 if (port == NULL)
3557 return (UINT32_MAX);
3558 if (port->lun_map == NULL)
3559 return (lun_id);
3560 for (i = 0; i < port->lun_map_size; i++) {
3561 if (port->lun_map[i] == lun_id)
3562 return (i);
3563 }
3564 return (UINT32_MAX);
3565}
3566
3567uint32_t
3568ctl_decode_lun(uint64_t encoded)
3569{
3570 uint8_t lun[8];
3571 uint32_t result = 0xffffffff;
3572
3573 be64enc(lun, encoded);
3574 switch (lun[0] & RPL_LUNDATA_ATYP_MASK) {
3575 case RPL_LUNDATA_ATYP_PERIPH:
3576 if ((lun[0] & 0x3f) == 0 && lun[2] == 0 && lun[3] == 0 &&
3577 lun[4] == 0 && lun[5] == 0 && lun[6] == 0 && lun[7] == 0)
3578 result = lun[1];
3579 break;
3580 case RPL_LUNDATA_ATYP_FLAT:
3581 if (lun[2] == 0 && lun[3] == 0 && lun[4] == 0 && lun[5] == 0 &&
3582 lun[6] == 0 && lun[7] == 0)
3583 result = ((lun[0] & 0x3f) << 8) + lun[1];
3584 break;
3585 case RPL_LUNDATA_ATYP_EXTLUN:
3586 switch (lun[0] & RPL_LUNDATA_EXT_EAM_MASK) {
3587 case 0x02:
3588 switch (lun[0] & RPL_LUNDATA_EXT_LEN_MASK) {
3589 case 0x00:
3590 result = lun[1];
3591 break;
3592 case 0x10:
3593 result = (lun[1] << 16) + (lun[2] << 8) +
3594 lun[3];
3595 break;
3596 case 0x20:
3597 if (lun[1] == 0 && lun[6] == 0 && lun[7] == 0)
3598 result = (lun[2] << 24) +
3599 (lun[3] << 16) + (lun[4] << 8) +
3600 lun[5];
3601 break;
3602 }
3603 break;
3604 case RPL_LUNDATA_EXT_EAM_NOT_SPEC:
3605 result = 0xffffffff;
3606 break;
3607 }
3608 break;
3609 }
3610 return (result);
3611}
3612
3613uint64_t
3614ctl_encode_lun(uint32_t decoded)
3615{
3616 uint64_t l = decoded;
3617
3618 if (l <= 0xff)
3619 return (((uint64_t)RPL_LUNDATA_ATYP_PERIPH << 56) | (l << 48));
3620 if (l <= 0x3fff)
3621 return (((uint64_t)RPL_LUNDATA_ATYP_FLAT << 56) | (l << 48));
3622 if (l <= 0xffffff)
3623 return (((uint64_t)(RPL_LUNDATA_ATYP_EXTLUN | 0x12) << 56) |
3624 (l << 32));
3625 return ((((uint64_t)RPL_LUNDATA_ATYP_EXTLUN | 0x22) << 56) | (l << 16));
3626}
3627
3628static struct ctl_port *
3629ctl_io_port(struct ctl_io_hdr *io_hdr)
3630{
3631
3632 return (control_softc->ctl_ports[io_hdr->nexus.targ_port]);
3633}
3634
3635int
3636ctl_ffz(uint32_t *mask, uint32_t first, uint32_t last)
3637{
3638 int i;
3639
3640 for (i = first; i < last; i++) {
3641 if ((mask[i / 32] & (1 << (i % 32))) == 0)
3642 return (i);
3643 }
3644 return (-1);
3645}
3646
3647int
3648ctl_set_mask(uint32_t *mask, uint32_t bit)
3649{
3650 uint32_t chunk, piece;
3651
3652 chunk = bit >> 5;
3653 piece = bit % (sizeof(uint32_t) * 8);
3654
3655 if ((mask[chunk] & (1 << piece)) != 0)
3656 return (-1);
3657 else
3658 mask[chunk] |= (1 << piece);
3659
3660 return (0);
3661}
3662
3663int
3664ctl_clear_mask(uint32_t *mask, uint32_t bit)
3665{
3666 uint32_t chunk, piece;
3667
3668 chunk = bit >> 5;
3669 piece = bit % (sizeof(uint32_t) * 8);
3670
3671 if ((mask[chunk] & (1 << piece)) == 0)
3672 return (-1);
3673 else
3674 mask[chunk] &= ~(1 << piece);
3675
3676 return (0);
3677}
3678
3679int
3680ctl_is_set(uint32_t *mask, uint32_t bit)
3681{
3682 uint32_t chunk, piece;
3683
3684 chunk = bit >> 5;
3685 piece = bit % (sizeof(uint32_t) * 8);
3686
3687 if ((mask[chunk] & (1 << piece)) == 0)
3688 return (0);
3689 else
3690 return (1);
3691}
3692
3693static uint64_t
3694ctl_get_prkey(struct ctl_lun *lun, uint32_t residx)
3695{
3696 uint64_t *t;
3697
3698 t = lun->pr_keys[residx/CTL_MAX_INIT_PER_PORT];
3699 if (t == NULL)
3700 return (0);
3701 return (t[residx % CTL_MAX_INIT_PER_PORT]);
3702}
3703
3704static void
3705ctl_clr_prkey(struct ctl_lun *lun, uint32_t residx)
3706{
3707 uint64_t *t;
3708
3709 t = lun->pr_keys[residx/CTL_MAX_INIT_PER_PORT];
3710 if (t == NULL)
3711 return;
3712 t[residx % CTL_MAX_INIT_PER_PORT] = 0;
3713}
3714
3715static void
3716ctl_alloc_prkey(struct ctl_lun *lun, uint32_t residx)
3717{
3718 uint64_t *p;
3719 u_int i;
3720
3721 i = residx/CTL_MAX_INIT_PER_PORT;
3722 if (lun->pr_keys[i] != NULL)
3723 return;
3724 mtx_unlock(&lun->lun_lock);
3725 p = malloc(sizeof(uint64_t) * CTL_MAX_INIT_PER_PORT, M_CTL,
3726 M_WAITOK | M_ZERO);
3727 mtx_lock(&lun->lun_lock);
3728 if (lun->pr_keys[i] == NULL)
3729 lun->pr_keys[i] = p;
3730 else
3731 free(p, M_CTL);
3732}
3733
3734static void
3735ctl_set_prkey(struct ctl_lun *lun, uint32_t residx, uint64_t key)
3736{
3737 uint64_t *t;
3738
3739 t = lun->pr_keys[residx/CTL_MAX_INIT_PER_PORT];
3740 KASSERT(t != NULL, ("prkey %d is not allocated", residx));
3741 t[residx % CTL_MAX_INIT_PER_PORT] = key;
3742}
3743
3744/*
3745 * ctl_softc, pool_name, total_ctl_io are passed in.
3746 * npool is passed out.
3747 */
3748int
3749ctl_pool_create(struct ctl_softc *ctl_softc, const char *pool_name,
3750 uint32_t total_ctl_io, void **npool)
3751{
3752#ifdef IO_POOLS
3753 struct ctl_io_pool *pool;
3754
3755 pool = (struct ctl_io_pool *)malloc(sizeof(*pool), M_CTL,
3756 M_NOWAIT | M_ZERO);
3757 if (pool == NULL)
3758 return (ENOMEM);
3759
3760 snprintf(pool->name, sizeof(pool->name), "CTL IO %s", pool_name);
3761 pool->ctl_softc = ctl_softc;
3762 pool->zone = uma_zsecond_create(pool->name, NULL,
3763 NULL, NULL, NULL, ctl_softc->io_zone);
3764 /* uma_prealloc(pool->zone, total_ctl_io); */
3765
3766 *npool = pool;
3767#else
3768 *npool = ctl_softc->io_zone;
3769#endif
3770 return (0);
3771}
3772
3773void
3774ctl_pool_free(struct ctl_io_pool *pool)
3775{
3776
3777 if (pool == NULL)
3778 return;
3779
3780#ifdef IO_POOLS
3781 uma_zdestroy(pool->zone);
3782 free(pool, M_CTL);
3783#endif
3784}
3785
3786union ctl_io *
3787ctl_alloc_io(void *pool_ref)
3788{
3789 union ctl_io *io;
3790#ifdef IO_POOLS
3791 struct ctl_io_pool *pool = (struct ctl_io_pool *)pool_ref;
3792
3793 io = uma_zalloc(pool->zone, M_WAITOK);
3794#else
3795 io = uma_zalloc((uma_zone_t)pool_ref, M_WAITOK);
3796#endif
3797 if (io != NULL)
3798 io->io_hdr.pool = pool_ref;
3799 return (io);
3800}
3801
3802union ctl_io *
3803ctl_alloc_io_nowait(void *pool_ref)
3804{
3805 union ctl_io *io;
3806#ifdef IO_POOLS
3807 struct ctl_io_pool *pool = (struct ctl_io_pool *)pool_ref;
3808
3809 io = uma_zalloc(pool->zone, M_NOWAIT);
3810#else
3811 io = uma_zalloc((uma_zone_t)pool_ref, M_NOWAIT);
3812#endif
3813 if (io != NULL)
3814 io->io_hdr.pool = pool_ref;
3815 return (io);
3816}
3817
3818void
3819ctl_free_io(union ctl_io *io)
3820{
3821#ifdef IO_POOLS
3822 struct ctl_io_pool *pool;
3823#endif
3824
3825 if (io == NULL)
3826 return;
3827
3828#ifdef IO_POOLS
3829 pool = (struct ctl_io_pool *)io->io_hdr.pool;
3830 uma_zfree(pool->zone, io);
3831#else
3832 uma_zfree((uma_zone_t)io->io_hdr.pool, io);
3833#endif
3834}
3835
3836void
3837ctl_zero_io(union ctl_io *io)
3838{
3839 void *pool_ref;
3840
3841 if (io == NULL)
3842 return;
3843
3844 /*
3845 * May need to preserve linked list pointers at some point too.
3846 */
3847 pool_ref = io->io_hdr.pool;
3848 memset(io, 0, sizeof(*io));
3849 io->io_hdr.pool = pool_ref;
3850}
3851
3852int
3853ctl_expand_number(const char *buf, uint64_t *num)
3854{
3855 char *endptr;
3856 uint64_t number;
3857 unsigned shift;
3858
3859 number = strtoq(buf, &endptr, 0);
3860
3861 switch (tolower((unsigned char)*endptr)) {
3862 case 'e':
3863 shift = 60;
3864 break;
3865 case 'p':
3866 shift = 50;
3867 break;
3868 case 't':
3869 shift = 40;
3870 break;
3871 case 'g':
3872 shift = 30;
3873 break;
3874 case 'm':
3875 shift = 20;
3876 break;
3877 case 'k':
3878 shift = 10;
3879 break;
3880 case 'b':
3881 case '\0': /* No unit. */
3882 *num = number;
3883 return (0);
3884 default:
3885 /* Unrecognized unit. */
3886 return (-1);
3887 }
3888
3889 if ((number << shift) >> shift != number) {
3890 /* Overflow */
3891 return (-1);
3892 }
3893 *num = number << shift;
3894 return (0);
3895}
3896
3897
3898/*
3899 * This routine could be used in the future to load default and/or saved
3900 * mode page parameters for a particuar lun.
3901 */
3902static int
3903ctl_init_page_index(struct ctl_lun *lun)
3904{
3905 int i, page_code;
3906 struct ctl_page_index *page_index;
3907 const char *value;
3908 uint64_t ival;
3909
3910 memcpy(&lun->mode_pages.index, page_index_template,
3911 sizeof(page_index_template));
3912
3913 for (i = 0; i < CTL_NUM_MODE_PAGES; i++) {
3914
3915 page_index = &lun->mode_pages.index[i];
3916 if (lun->be_lun->lun_type == T_DIRECT &&
3917 (page_index->page_flags & CTL_PAGE_FLAG_DIRECT) == 0)
3918 continue;
3919 if (lun->be_lun->lun_type == T_PROCESSOR &&
3920 (page_index->page_flags & CTL_PAGE_FLAG_PROC) == 0)
3921 continue;
3922 if (lun->be_lun->lun_type == T_CDROM &&
3923 (page_index->page_flags & CTL_PAGE_FLAG_CDROM) == 0)
3924 continue;
3925
3926 page_code = page_index->page_code & SMPH_PC_MASK;
3927 switch (page_code) {
3928 case SMS_RW_ERROR_RECOVERY_PAGE: {
3929 KASSERT(page_index->subpage == SMS_SUBPAGE_PAGE_0,
3930 ("subpage %#x for page %#x is incorrect!",
3931 page_index->subpage, page_code));
3932 memcpy(&lun->mode_pages.rw_er_page[CTL_PAGE_CURRENT],
3933 &rw_er_page_default,
3934 sizeof(rw_er_page_default));
3935 memcpy(&lun->mode_pages.rw_er_page[CTL_PAGE_CHANGEABLE],
3936 &rw_er_page_changeable,
3937 sizeof(rw_er_page_changeable));
3938 memcpy(&lun->mode_pages.rw_er_page[CTL_PAGE_DEFAULT],
3939 &rw_er_page_default,
3940 sizeof(rw_er_page_default));
3941 memcpy(&lun->mode_pages.rw_er_page[CTL_PAGE_SAVED],
3942 &rw_er_page_default,
3943 sizeof(rw_er_page_default));
3944 page_index->page_data =
3945 (uint8_t *)lun->mode_pages.rw_er_page;
3946 break;
3947 }
3948 case SMS_FORMAT_DEVICE_PAGE: {
3949 struct scsi_format_page *format_page;
3950
3951 KASSERT(page_index->subpage == SMS_SUBPAGE_PAGE_0,
3952 ("subpage %#x for page %#x is incorrect!",
3953 page_index->subpage, page_code));
3954
3955 /*
3956 * Sectors per track are set above. Bytes per
3957 * sector need to be set here on a per-LUN basis.
3958 */
3959 memcpy(&lun->mode_pages.format_page[CTL_PAGE_CURRENT],
3960 &format_page_default,
3961 sizeof(format_page_default));
3962 memcpy(&lun->mode_pages.format_page[
3963 CTL_PAGE_CHANGEABLE], &format_page_changeable,
3964 sizeof(format_page_changeable));
3965 memcpy(&lun->mode_pages.format_page[CTL_PAGE_DEFAULT],
3966 &format_page_default,
3967 sizeof(format_page_default));
3968 memcpy(&lun->mode_pages.format_page[CTL_PAGE_SAVED],
3969 &format_page_default,
3970 sizeof(format_page_default));
3971
3972 format_page = &lun->mode_pages.format_page[
3973 CTL_PAGE_CURRENT];
3974 scsi_ulto2b(lun->be_lun->blocksize,
3975 format_page->bytes_per_sector);
3976
3977 format_page = &lun->mode_pages.format_page[
3978 CTL_PAGE_DEFAULT];
3979 scsi_ulto2b(lun->be_lun->blocksize,
3980 format_page->bytes_per_sector);
3981
3982 format_page = &lun->mode_pages.format_page[
3983 CTL_PAGE_SAVED];
3984 scsi_ulto2b(lun->be_lun->blocksize,
3985 format_page->bytes_per_sector);
3986
3987 page_index->page_data =
3988 (uint8_t *)lun->mode_pages.format_page;
3989 break;
3990 }
3991 case SMS_RIGID_DISK_PAGE: {
3992 struct scsi_rigid_disk_page *rigid_disk_page;
3993 uint32_t sectors_per_cylinder;
3994 uint64_t cylinders;
3995#ifndef __XSCALE__
3996 int shift;
3997#endif /* !__XSCALE__ */
3998
3999 KASSERT(page_index->subpage == SMS_SUBPAGE_PAGE_0,
4000 ("subpage %#x for page %#x is incorrect!",
4001 page_index->subpage, page_code));
4002
4003 /*
4004 * Rotation rate and sectors per track are set
4005 * above. We calculate the cylinders here based on
4006 * capacity. Due to the number of heads and
4007 * sectors per track we're using, smaller arrays
4008 * may turn out to have 0 cylinders. Linux and
4009 * FreeBSD don't pay attention to these mode pages
4010 * to figure out capacity, but Solaris does. It
4011 * seems to deal with 0 cylinders just fine, and
4012 * works out a fake geometry based on the capacity.
4013 */
4014 memcpy(&lun->mode_pages.rigid_disk_page[
4015 CTL_PAGE_DEFAULT], &rigid_disk_page_default,
4016 sizeof(rigid_disk_page_default));
4017 memcpy(&lun->mode_pages.rigid_disk_page[
4018 CTL_PAGE_CHANGEABLE],&rigid_disk_page_changeable,
4019 sizeof(rigid_disk_page_changeable));
4020
4021 sectors_per_cylinder = CTL_DEFAULT_SECTORS_PER_TRACK *
4022 CTL_DEFAULT_HEADS;
4023
4024 /*
4025 * The divide method here will be more accurate,
4026 * probably, but results in floating point being
4027 * used in the kernel on i386 (__udivdi3()). On the
4028 * XScale, though, __udivdi3() is implemented in
4029 * software.
4030 *
4031 * The shift method for cylinder calculation is
4032 * accurate if sectors_per_cylinder is a power of
4033 * 2. Otherwise it might be slightly off -- you
4034 * might have a bit of a truncation problem.
4035 */
4036#ifdef __XSCALE__
4037 cylinders = (lun->be_lun->maxlba + 1) /
4038 sectors_per_cylinder;
4039#else
4040 for (shift = 31; shift > 0; shift--) {
4041 if (sectors_per_cylinder & (1 << shift))
4042 break;
4043 }
4044 cylinders = (lun->be_lun->maxlba + 1) >> shift;
4045#endif
4046
4047 /*
4048 * We've basically got 3 bytes, or 24 bits for the
4049 * cylinder size in the mode page. If we're over,
4050 * just round down to 2^24.
4051 */
4052 if (cylinders > 0xffffff)
4053 cylinders = 0xffffff;
4054
4055 rigid_disk_page = &lun->mode_pages.rigid_disk_page[
4056 CTL_PAGE_DEFAULT];
4057 scsi_ulto3b(cylinders, rigid_disk_page->cylinders);
4058
4059 if ((value = ctl_get_opt(&lun->be_lun->options,
4060 "rpm")) != NULL) {
4061 scsi_ulto2b(strtol(value, NULL, 0),
4062 rigid_disk_page->rotation_rate);
4063 }
4064
4065 memcpy(&lun->mode_pages.rigid_disk_page[CTL_PAGE_CURRENT],
4066 &lun->mode_pages.rigid_disk_page[CTL_PAGE_DEFAULT],
4067 sizeof(rigid_disk_page_default));
4068 memcpy(&lun->mode_pages.rigid_disk_page[CTL_PAGE_SAVED],
4069 &lun->mode_pages.rigid_disk_page[CTL_PAGE_DEFAULT],
4070 sizeof(rigid_disk_page_default));
4071
4072 page_index->page_data =
4073 (uint8_t *)lun->mode_pages.rigid_disk_page;
4074 break;
4075 }
4076 case SMS_VERIFY_ERROR_RECOVERY_PAGE: {
4077 KASSERT(page_index->subpage == SMS_SUBPAGE_PAGE_0,
4078 ("subpage %#x for page %#x is incorrect!",
4079 page_index->subpage, page_code));
4080 memcpy(&lun->mode_pages.verify_er_page[CTL_PAGE_CURRENT],
4081 &verify_er_page_default,
4082 sizeof(verify_er_page_default));
4083 memcpy(&lun->mode_pages.verify_er_page[CTL_PAGE_CHANGEABLE],
4084 &verify_er_page_changeable,
4085 sizeof(verify_er_page_changeable));
4086 memcpy(&lun->mode_pages.verify_er_page[CTL_PAGE_DEFAULT],
4087 &verify_er_page_default,
4088 sizeof(verify_er_page_default));
4089 memcpy(&lun->mode_pages.verify_er_page[CTL_PAGE_SAVED],
4090 &verify_er_page_default,
4091 sizeof(verify_er_page_default));
4092 page_index->page_data =
4093 (uint8_t *)lun->mode_pages.verify_er_page;
4094 break;
4095 }
4096 case SMS_CACHING_PAGE: {
4097 struct scsi_caching_page *caching_page;
4098
4099 KASSERT(page_index->subpage == SMS_SUBPAGE_PAGE_0,
4100 ("subpage %#x for page %#x is incorrect!",
4101 page_index->subpage, page_code));
4102 memcpy(&lun->mode_pages.caching_page[CTL_PAGE_DEFAULT],
4103 &caching_page_default,
4104 sizeof(caching_page_default));
4105 memcpy(&lun->mode_pages.caching_page[
4106 CTL_PAGE_CHANGEABLE], &caching_page_changeable,
4107 sizeof(caching_page_changeable));
4108 memcpy(&lun->mode_pages.caching_page[CTL_PAGE_SAVED],
4109 &caching_page_default,
4110 sizeof(caching_page_default));
4111 caching_page = &lun->mode_pages.caching_page[
4112 CTL_PAGE_SAVED];
4113 value = ctl_get_opt(&lun->be_lun->options, "writecache");
4114 if (value != NULL && strcmp(value, "off") == 0)
4115 caching_page->flags1 &= ~SCP_WCE;
4116 value = ctl_get_opt(&lun->be_lun->options, "readcache");
4117 if (value != NULL && strcmp(value, "off") == 0)
4118 caching_page->flags1 |= SCP_RCD;
4119 memcpy(&lun->mode_pages.caching_page[CTL_PAGE_CURRENT],
4120 &lun->mode_pages.caching_page[CTL_PAGE_SAVED],
4121 sizeof(caching_page_default));
4122 page_index->page_data =
4123 (uint8_t *)lun->mode_pages.caching_page;
4124 break;
4125 }
4126 case SMS_CONTROL_MODE_PAGE: {
4127 switch (page_index->subpage) {
4128 case SMS_SUBPAGE_PAGE_0: {
4129 struct scsi_control_page *control_page;
4130
4131 memcpy(&lun->mode_pages.control_page[
4132 CTL_PAGE_DEFAULT],
4133 &control_page_default,
4134 sizeof(control_page_default));
4135 memcpy(&lun->mode_pages.control_page[
4136 CTL_PAGE_CHANGEABLE],
4137 &control_page_changeable,
4138 sizeof(control_page_changeable));
4139 memcpy(&lun->mode_pages.control_page[
4140 CTL_PAGE_SAVED],
4141 &control_page_default,
4142 sizeof(control_page_default));
4143 control_page = &lun->mode_pages.control_page[
4144 CTL_PAGE_SAVED];
4145 value = ctl_get_opt(&lun->be_lun->options,
4146 "reordering");
4147 if (value != NULL &&
4148 strcmp(value, "unrestricted") == 0) {
4149 control_page->queue_flags &=
4150 ~SCP_QUEUE_ALG_MASK;
4151 control_page->queue_flags |=
4152 SCP_QUEUE_ALG_UNRESTRICTED;
4153 }
4154 memcpy(&lun->mode_pages.control_page[
4155 CTL_PAGE_CURRENT],
4156 &lun->mode_pages.control_page[
4157 CTL_PAGE_SAVED],
4158 sizeof(control_page_default));
4159 page_index->page_data =
4160 (uint8_t *)lun->mode_pages.control_page;
4161 break;
4162 }
4163 case 0x01:
4164 memcpy(&lun->mode_pages.control_ext_page[
4165 CTL_PAGE_DEFAULT],
4166 &control_ext_page_default,
4167 sizeof(control_ext_page_default));
4168 memcpy(&lun->mode_pages.control_ext_page[
4169 CTL_PAGE_CHANGEABLE],
4170 &control_ext_page_changeable,
4171 sizeof(control_ext_page_changeable));
4172 memcpy(&lun->mode_pages.control_ext_page[
4173 CTL_PAGE_SAVED],
4174 &control_ext_page_default,
4175 sizeof(control_ext_page_default));
4176 memcpy(&lun->mode_pages.control_ext_page[
4177 CTL_PAGE_CURRENT],
4178 &lun->mode_pages.control_ext_page[
4179 CTL_PAGE_SAVED],
4180 sizeof(control_ext_page_default));
4181 page_index->page_data =
4182 (uint8_t *)lun->mode_pages.control_ext_page;
4183 break;
4184 default:
4185 panic("subpage %#x for page %#x is incorrect!",
4186 page_index->subpage, page_code);
4187 }
4188 break;
4189 }
4190 case SMS_INFO_EXCEPTIONS_PAGE: {
4191 switch (page_index->subpage) {
4192 case SMS_SUBPAGE_PAGE_0:
4193 memcpy(&lun->mode_pages.ie_page[CTL_PAGE_CURRENT],
4194 &ie_page_default,
4195 sizeof(ie_page_default));
4196 memcpy(&lun->mode_pages.ie_page[
4197 CTL_PAGE_CHANGEABLE], &ie_page_changeable,
4198 sizeof(ie_page_changeable));
4199 memcpy(&lun->mode_pages.ie_page[CTL_PAGE_DEFAULT],
4200 &ie_page_default,
4201 sizeof(ie_page_default));
4202 memcpy(&lun->mode_pages.ie_page[CTL_PAGE_SAVED],
4203 &ie_page_default,
4204 sizeof(ie_page_default));
4205 page_index->page_data =
4206 (uint8_t *)lun->mode_pages.ie_page;
4207 break;
4208 case 0x02: {
4209 struct ctl_logical_block_provisioning_page *page;
4210
4211 memcpy(&lun->mode_pages.lbp_page[CTL_PAGE_DEFAULT],
4212 &lbp_page_default,
4213 sizeof(lbp_page_default));
4214 memcpy(&lun->mode_pages.lbp_page[
4215 CTL_PAGE_CHANGEABLE], &lbp_page_changeable,
4216 sizeof(lbp_page_changeable));
4217 memcpy(&lun->mode_pages.lbp_page[CTL_PAGE_SAVED],
4218 &lbp_page_default,
4219 sizeof(lbp_page_default));
4220 page = &lun->mode_pages.lbp_page[CTL_PAGE_SAVED];
4221 value = ctl_get_opt(&lun->be_lun->options,
4222 "avail-threshold");
4223 if (value != NULL &&
4224 ctl_expand_number(value, &ival) == 0) {
4225 page->descr[0].flags |= SLBPPD_ENABLED |
4226 SLBPPD_ARMING_DEC;
4227 if (lun->be_lun->blocksize)
4228 ival /= lun->be_lun->blocksize;
4229 else
4230 ival /= 512;
4231 scsi_ulto4b(ival >> CTL_LBP_EXPONENT,
4232 page->descr[0].count);
4233 }
4234 value = ctl_get_opt(&lun->be_lun->options,
4235 "used-threshold");
4236 if (value != NULL &&
4237 ctl_expand_number(value, &ival) == 0) {
4238 page->descr[1].flags |= SLBPPD_ENABLED |
4239 SLBPPD_ARMING_INC;
4240 if (lun->be_lun->blocksize)
4241 ival /= lun->be_lun->blocksize;
4242 else
4243 ival /= 512;
4244 scsi_ulto4b(ival >> CTL_LBP_EXPONENT,
4245 page->descr[1].count);
4246 }
4247 value = ctl_get_opt(&lun->be_lun->options,
4248 "pool-avail-threshold");
4249 if (value != NULL &&
4250 ctl_expand_number(value, &ival) == 0) {
4251 page->descr[2].flags |= SLBPPD_ENABLED |
4252 SLBPPD_ARMING_DEC;
4253 if (lun->be_lun->blocksize)
4254 ival /= lun->be_lun->blocksize;
4255 else
4256 ival /= 512;
4257 scsi_ulto4b(ival >> CTL_LBP_EXPONENT,
4258 page->descr[2].count);
4259 }
4260 value = ctl_get_opt(&lun->be_lun->options,
4261 "pool-used-threshold");
4262 if (value != NULL &&
4263 ctl_expand_number(value, &ival) == 0) {
4264 page->descr[3].flags |= SLBPPD_ENABLED |
4265 SLBPPD_ARMING_INC;
4266 if (lun->be_lun->blocksize)
4267 ival /= lun->be_lun->blocksize;
4268 else
4269 ival /= 512;
4270 scsi_ulto4b(ival >> CTL_LBP_EXPONENT,
4271 page->descr[3].count);
4272 }
4273 memcpy(&lun->mode_pages.lbp_page[CTL_PAGE_CURRENT],
4274 &lun->mode_pages.lbp_page[CTL_PAGE_SAVED],
4275 sizeof(lbp_page_default));
4276 page_index->page_data =
4277 (uint8_t *)lun->mode_pages.lbp_page;
4278 break;
4279 }
4280 default:
4281 panic("subpage %#x for page %#x is incorrect!",
4282 page_index->subpage, page_code);
4283 }
4284 break;
4285 }
4286 case SMS_CDDVD_CAPS_PAGE:{
4287 KASSERT(page_index->subpage == SMS_SUBPAGE_PAGE_0,
4288 ("subpage %#x for page %#x is incorrect!",
4289 page_index->subpage, page_code));
4290 memcpy(&lun->mode_pages.cddvd_page[CTL_PAGE_DEFAULT],
4291 &cddvd_page_default,
4292 sizeof(cddvd_page_default));
4293 memcpy(&lun->mode_pages.cddvd_page[
4294 CTL_PAGE_CHANGEABLE], &cddvd_page_changeable,
4295 sizeof(cddvd_page_changeable));
4296 memcpy(&lun->mode_pages.cddvd_page[CTL_PAGE_SAVED],
4297 &cddvd_page_default,
4298 sizeof(cddvd_page_default));
4299 memcpy(&lun->mode_pages.cddvd_page[CTL_PAGE_CURRENT],
4300 &lun->mode_pages.cddvd_page[CTL_PAGE_SAVED],
4301 sizeof(cddvd_page_default));
4302 page_index->page_data =
4303 (uint8_t *)lun->mode_pages.cddvd_page;
4304 break;
4305 }
4306 default:
4307 panic("invalid page code value %#x", page_code);
4308 }
4309 }
4310
4311 return (CTL_RETVAL_COMPLETE);
4312}
4313
4314static int
4315ctl_init_log_page_index(struct ctl_lun *lun)
4316{
4317 struct ctl_page_index *page_index;
4318 int i, j, k, prev;
4319
4320 memcpy(&lun->log_pages.index, log_page_index_template,
4321 sizeof(log_page_index_template));
4322
4323 prev = -1;
4324 for (i = 0, j = 0, k = 0; i < CTL_NUM_LOG_PAGES; i++) {
4325
4326 page_index = &lun->log_pages.index[i];
4327 if (lun->be_lun->lun_type == T_DIRECT &&
4328 (page_index->page_flags & CTL_PAGE_FLAG_DIRECT) == 0)
4329 continue;
4330 if (lun->be_lun->lun_type == T_PROCESSOR &&
4331 (page_index->page_flags & CTL_PAGE_FLAG_PROC) == 0)
4332 continue;
4333 if (lun->be_lun->lun_type == T_CDROM &&
4334 (page_index->page_flags & CTL_PAGE_FLAG_CDROM) == 0)
4335 continue;
4336
4337 if (page_index->page_code == SLS_LOGICAL_BLOCK_PROVISIONING &&
4338 lun->backend->lun_attr == NULL)
4339 continue;
4340
4341 if (page_index->page_code != prev) {
4342 lun->log_pages.pages_page[j] = page_index->page_code;
4343 prev = page_index->page_code;
4344 j++;
4345 }
4346 lun->log_pages.subpages_page[k*2] = page_index->page_code;
4347 lun->log_pages.subpages_page[k*2+1] = page_index->subpage;
4348 k++;
4349 }
4350 lun->log_pages.index[0].page_data = &lun->log_pages.pages_page[0];
4351 lun->log_pages.index[0].page_len = j;
4352 lun->log_pages.index[1].page_data = &lun->log_pages.subpages_page[0];
4353 lun->log_pages.index[1].page_len = k * 2;
4354 lun->log_pages.index[2].page_data = &lun->log_pages.lbp_page[0];
4355 lun->log_pages.index[2].page_len = 12*CTL_NUM_LBP_PARAMS;
4356 lun->log_pages.index[3].page_data = (uint8_t *)&lun->log_pages.stat_page;
4357 lun->log_pages.index[3].page_len = sizeof(lun->log_pages.stat_page);
4358 lun->log_pages.index[4].page_data = (uint8_t *)&lun->log_pages.ie_page;
4359 lun->log_pages.index[4].page_len = sizeof(lun->log_pages.ie_page);
4360
4361 return (CTL_RETVAL_COMPLETE);
4362}
4363
4364static int
4365hex2bin(const char *str, uint8_t *buf, int buf_size)
4366{
4367 int i;
4368 u_char c;
4369
4370 memset(buf, 0, buf_size);
4371 while (isspace(str[0]))
4372 str++;
4373 if (str[0] == '0' && (str[1] == 'x' || str[1] == 'X'))
4374 str += 2;
4375 buf_size *= 2;
4376 for (i = 0; str[i] != 0 && i < buf_size; i++) {
4377 while (str[i] == '-') /* Skip dashes in UUIDs. */
4378 str++;
4379 c = str[i];
4380 if (isdigit(c))
4381 c -= '0';
4382 else if (isalpha(c))
4383 c -= isupper(c) ? 'A' - 10 : 'a' - 10;
4384 else
4385 break;
4386 if (c >= 16)
4387 break;
4388 if ((i & 1) == 0)
4389 buf[i / 2] |= (c << 4);
4390 else
4391 buf[i / 2] |= c;
4392 }
4393 return ((i + 1) / 2);
4394}
4395
4396/*
4397 * LUN allocation.
4398 *
4399 * Requirements:
4400 * - caller allocates and zeros LUN storage, or passes in a NULL LUN if he
4401 * wants us to allocate the LUN and he can block.
4402 * - ctl_softc is always set
4403 * - be_lun is set if the LUN has a backend (needed for disk LUNs)
4404 *
4405 * Returns 0 for success, non-zero (errno) for failure.
4406 */
4407static int
4408ctl_alloc_lun(struct ctl_softc *ctl_softc, struct ctl_lun *ctl_lun,
4409 struct ctl_be_lun *const be_lun)
4410{
4411 struct ctl_lun *nlun, *lun;
4412 struct scsi_vpd_id_descriptor *desc;
4413 struct scsi_vpd_id_t10 *t10id;
4414 const char *eui, *naa, *scsiname, *uuid, *vendor, *value;
4415 int lun_number, i, lun_malloced;
4416 int devidlen, idlen1, idlen2 = 0, len;
4417
4418 if (be_lun == NULL)
4419 return (EINVAL);
4420
4421 /*
4422 * We currently only support Direct Access or Processor LUN types.
4423 */
4424 switch (be_lun->lun_type) {
4425 case T_DIRECT:
4426 case T_PROCESSOR:
4427 case T_CDROM:
4428 break;
4429 case T_SEQUENTIAL:
4430 case T_CHANGER:
4431 default:
4432 be_lun->lun_config_status(be_lun->be_lun,
4433 CTL_LUN_CONFIG_FAILURE);
4434 break;
4435 }
4436 if (ctl_lun == NULL) {
4437 lun = malloc(sizeof(*lun), M_CTL, M_WAITOK);
4438 lun_malloced = 1;
4439 } else {
4440 lun_malloced = 0;
4441 lun = ctl_lun;
4442 }
4443
4444 memset(lun, 0, sizeof(*lun));
4445 if (lun_malloced)
4446 lun->flags = CTL_LUN_MALLOCED;
4447
4448 /* Generate LUN ID. */
4449 devidlen = max(CTL_DEVID_MIN_LEN,
4450 strnlen(be_lun->device_id, CTL_DEVID_LEN));
4451 idlen1 = sizeof(*t10id) + devidlen;
4452 len = sizeof(struct scsi_vpd_id_descriptor) + idlen1;
4453 scsiname = ctl_get_opt(&be_lun->options, "scsiname");
4454 if (scsiname != NULL) {
4455 idlen2 = roundup2(strlen(scsiname) + 1, 4);
4456 len += sizeof(struct scsi_vpd_id_descriptor) + idlen2;
4457 }
4458 eui = ctl_get_opt(&be_lun->options, "eui");
4459 if (eui != NULL) {
4460 len += sizeof(struct scsi_vpd_id_descriptor) + 16;
4461 }
4462 naa = ctl_get_opt(&be_lun->options, "naa");
4463 if (naa != NULL) {
4464 len += sizeof(struct scsi_vpd_id_descriptor) + 16;
4465 }
4466 uuid = ctl_get_opt(&be_lun->options, "uuid");
4467 if (uuid != NULL) {
4468 len += sizeof(struct scsi_vpd_id_descriptor) + 18;
4469 }
4470 lun->lun_devid = malloc(sizeof(struct ctl_devid) + len,
4471 M_CTL, M_WAITOK | M_ZERO);
4472 desc = (struct scsi_vpd_id_descriptor *)lun->lun_devid->data;
4473 desc->proto_codeset = SVPD_ID_CODESET_ASCII;
4474 desc->id_type = SVPD_ID_PIV | SVPD_ID_ASSOC_LUN | SVPD_ID_TYPE_T10;
4475 desc->length = idlen1;
4476 t10id = (struct scsi_vpd_id_t10 *)&desc->identifier[0];
4477 memset(t10id->vendor, ' ', sizeof(t10id->vendor));
4478 if ((vendor = ctl_get_opt(&be_lun->options, "vendor")) == NULL) {
4479 strncpy((char *)t10id->vendor, CTL_VENDOR, sizeof(t10id->vendor));
4480 } else {
4481 strncpy(t10id->vendor, vendor,
4482 min(sizeof(t10id->vendor), strlen(vendor)));
4483 }
4484 strncpy((char *)t10id->vendor_spec_id,
4485 (char *)be_lun->device_id, devidlen);
4486 if (scsiname != NULL) {
4487 desc = (struct scsi_vpd_id_descriptor *)(&desc->identifier[0] +
4488 desc->length);
4489 desc->proto_codeset = SVPD_ID_CODESET_UTF8;
4490 desc->id_type = SVPD_ID_PIV | SVPD_ID_ASSOC_LUN |
4491 SVPD_ID_TYPE_SCSI_NAME;
4492 desc->length = idlen2;
4493 strlcpy(desc->identifier, scsiname, idlen2);
4494 }
4495 if (eui != NULL) {
4496 desc = (struct scsi_vpd_id_descriptor *)(&desc->identifier[0] +
4497 desc->length);
4498 desc->proto_codeset = SVPD_ID_CODESET_BINARY;
4499 desc->id_type = SVPD_ID_PIV | SVPD_ID_ASSOC_LUN |
4500 SVPD_ID_TYPE_EUI64;
4501 desc->length = hex2bin(eui, desc->identifier, 16);
4502 desc->length = desc->length > 12 ? 16 :
4503 (desc->length > 8 ? 12 : 8);
4504 len -= 16 - desc->length;
4505 }
4506 if (naa != NULL) {
4507 desc = (struct scsi_vpd_id_descriptor *)(&desc->identifier[0] +
4508 desc->length);
4509 desc->proto_codeset = SVPD_ID_CODESET_BINARY;
4510 desc->id_type = SVPD_ID_PIV | SVPD_ID_ASSOC_LUN |
4511 SVPD_ID_TYPE_NAA;
4512 desc->length = hex2bin(naa, desc->identifier, 16);
4513 desc->length = desc->length > 8 ? 16 : 8;
4514 len -= 16 - desc->length;
4515 }
4516 if (uuid != NULL) {
4517 desc = (struct scsi_vpd_id_descriptor *)(&desc->identifier[0] +
4518 desc->length);
4519 desc->proto_codeset = SVPD_ID_CODESET_BINARY;
4520 desc->id_type = SVPD_ID_PIV | SVPD_ID_ASSOC_LUN |
4521 SVPD_ID_TYPE_UUID;
4522 desc->identifier[0] = 0x10;
4523 hex2bin(uuid, &desc->identifier[2], 16);
4524 desc->length = 18;
4525 }
4526 lun->lun_devid->len = len;
4527
4528 mtx_lock(&ctl_softc->ctl_lock);
4529 /*
4530 * See if the caller requested a particular LUN number. If so, see
4531 * if it is available. Otherwise, allocate the first available LUN.
4532 */
4533 if (be_lun->flags & CTL_LUN_FLAG_ID_REQ) {
4534 if ((be_lun->req_lun_id > (CTL_MAX_LUNS - 1))
4535 || (ctl_is_set(ctl_softc->ctl_lun_mask, be_lun->req_lun_id))) {
4536 mtx_unlock(&ctl_softc->ctl_lock);
4537 if (be_lun->req_lun_id > (CTL_MAX_LUNS - 1)) {
4538 printf("ctl: requested LUN ID %d is higher "
4539 "than CTL_MAX_LUNS - 1 (%d)\n",
4540 be_lun->req_lun_id, CTL_MAX_LUNS - 1);
4541 } else {
4542 /*
4543 * XXX KDM return an error, or just assign
4544 * another LUN ID in this case??
4545 */
4546 printf("ctl: requested LUN ID %d is already "
4547 "in use\n", be_lun->req_lun_id);
4548 }
4549 if (lun->flags & CTL_LUN_MALLOCED)
4550 free(lun, M_CTL);
4551 be_lun->lun_config_status(be_lun->be_lun,
4552 CTL_LUN_CONFIG_FAILURE);
4553 return (ENOSPC);
4554 }
4555 lun_number = be_lun->req_lun_id;
4556 } else {
4557 lun_number = ctl_ffz(ctl_softc->ctl_lun_mask, 0, CTL_MAX_LUNS);
4558 if (lun_number == -1) {
4559 mtx_unlock(&ctl_softc->ctl_lock);
4560 printf("ctl: can't allocate LUN, out of LUNs\n");
4561 if (lun->flags & CTL_LUN_MALLOCED)
4562 free(lun, M_CTL);
4563 be_lun->lun_config_status(be_lun->be_lun,
4564 CTL_LUN_CONFIG_FAILURE);
4565 return (ENOSPC);
4566 }
4567 }
4568 ctl_set_mask(ctl_softc->ctl_lun_mask, lun_number);
4569
4570 mtx_init(&lun->lun_lock, "CTL LUN", NULL, MTX_DEF);
4571 lun->lun = lun_number;
4572 lun->be_lun = be_lun;
4573 /*
4574 * The processor LUN is always enabled. Disk LUNs come on line
4575 * disabled, and must be enabled by the backend.
4576 */
4577 lun->flags |= CTL_LUN_DISABLED;
4578 lun->backend = be_lun->be;
4579 be_lun->ctl_lun = lun;
4580 be_lun->lun_id = lun_number;
4581 atomic_add_int(&be_lun->be->num_luns, 1);
4582 if (be_lun->flags & CTL_LUN_FLAG_EJECTED)
4583 lun->flags |= CTL_LUN_EJECTED;
4584 if (be_lun->flags & CTL_LUN_FLAG_NO_MEDIA)
4585 lun->flags |= CTL_LUN_NO_MEDIA;
4586 if (be_lun->flags & CTL_LUN_FLAG_STOPPED)
4587 lun->flags |= CTL_LUN_STOPPED;
4588
4589 if (be_lun->flags & CTL_LUN_FLAG_PRIMARY)
4590 lun->flags |= CTL_LUN_PRIMARY_SC;
4591
4592 value = ctl_get_opt(&be_lun->options, "removable");
4593 if (value != NULL) {
4594 if (strcmp(value, "on") == 0)
4595 lun->flags |= CTL_LUN_REMOVABLE;
4596 } else if (be_lun->lun_type == T_CDROM)
4597 lun->flags |= CTL_LUN_REMOVABLE;
4598
4599 lun->ctl_softc = ctl_softc;
4600#ifdef CTL_TIME_IO
4601 lun->last_busy = getsbinuptime();
4602#endif
4603 TAILQ_INIT(&lun->ooa_queue);
4604 TAILQ_INIT(&lun->blocked_queue);
4605 STAILQ_INIT(&lun->error_list);
4606 lun->ie_reported = 1;
4607 callout_init_mtx(&lun->ie_callout, &lun->lun_lock, 0);
4608 ctl_tpc_lun_init(lun);
4609
4610 /*
4611 * Initialize the mode and log page index.
4612 */
4613 ctl_init_page_index(lun);
4614 ctl_init_log_page_index(lun);
4615
4616 /*
4617 * Now, before we insert this lun on the lun list, set the lun
4618 * inventory changed UA for all other luns.
4619 */
4620 STAILQ_FOREACH(nlun, &ctl_softc->lun_list, links) {
4621 mtx_lock(&nlun->lun_lock);
4622 ctl_est_ua_all(nlun, -1, CTL_UA_LUN_CHANGE);
4623 mtx_unlock(&nlun->lun_lock);
4624 }
4625
4626 STAILQ_INSERT_TAIL(&ctl_softc->lun_list, lun, links);
4627
4628 ctl_softc->ctl_luns[lun_number] = lun;
4629
4630 ctl_softc->num_luns++;
4631
4632 /* Setup statistics gathering */
4633 lun->stats.device_type = be_lun->lun_type;
4634 lun->stats.lun_number = lun_number;
4635 lun->stats.blocksize = be_lun->blocksize;
4636 if (be_lun->blocksize == 0)
4637 lun->stats.flags = CTL_LUN_STATS_NO_BLOCKSIZE;
4638 for (i = 0;i < CTL_MAX_PORTS;i++)
4639 lun->stats.ports[i].targ_port = i;
4640
4641 mtx_unlock(&ctl_softc->ctl_lock);
4642
4643 lun->be_lun->lun_config_status(lun->be_lun->be_lun, CTL_LUN_CONFIG_OK);
4644 return (0);
4645}
4646
4647/*
4648 * Delete a LUN.
4649 * Assumptions:
4650 * - LUN has already been marked invalid and any pending I/O has been taken
4651 * care of.
4652 */
4653static int
4654ctl_free_lun(struct ctl_lun *lun)
4655{
4656 struct ctl_softc *softc;
4657 struct ctl_lun *nlun;
4658 int i;
4659
4660 softc = lun->ctl_softc;
4661
4662 mtx_assert(&softc->ctl_lock, MA_OWNED);
4663
4664 STAILQ_REMOVE(&softc->lun_list, lun, ctl_lun, links);
4665
4666 ctl_clear_mask(softc->ctl_lun_mask, lun->lun);
4667
4668 softc->ctl_luns[lun->lun] = NULL;
4669
4670 if (!TAILQ_EMPTY(&lun->ooa_queue))
4671 panic("Freeing a LUN %p with outstanding I/O!!\n", lun);
4672
4673 softc->num_luns--;
4674
4675 /*
4676 * Tell the backend to free resources, if this LUN has a backend.
4677 */
4678 atomic_subtract_int(&lun->be_lun->be->num_luns, 1);
4679 lun->be_lun->lun_shutdown(lun->be_lun->be_lun);
4680
4681 lun->ie_reportcnt = UINT32_MAX;
4682 callout_drain(&lun->ie_callout);
4683
4684 ctl_tpc_lun_shutdown(lun);
4685 mtx_destroy(&lun->lun_lock);
4686 free(lun->lun_devid, M_CTL);
4687 for (i = 0; i < CTL_MAX_PORTS; i++)
4688 free(lun->pending_ua[i], M_CTL);
4689 for (i = 0; i < CTL_MAX_PORTS; i++)
4690 free(lun->pr_keys[i], M_CTL);
4691 free(lun->write_buffer, M_CTL);
4692 if (lun->flags & CTL_LUN_MALLOCED)
4693 free(lun, M_CTL);
4694
4695 STAILQ_FOREACH(nlun, &softc->lun_list, links) {
4696 mtx_lock(&nlun->lun_lock);
4697 ctl_est_ua_all(nlun, -1, CTL_UA_LUN_CHANGE);
4698 mtx_unlock(&nlun->lun_lock);
4699 }
4700
4701 return (0);
4702}
4703
4704static void
4705ctl_create_lun(struct ctl_be_lun *be_lun)
4706{
4707
4708 /*
4709 * ctl_alloc_lun() should handle all potential failure cases.
4710 */
4711 ctl_alloc_lun(control_softc, NULL, be_lun);
4712}
4713
4714int
4715ctl_add_lun(struct ctl_be_lun *be_lun)
4716{
4717 struct ctl_softc *softc = control_softc;
4718
4719 mtx_lock(&softc->ctl_lock);
4720 STAILQ_INSERT_TAIL(&softc->pending_lun_queue, be_lun, links);
4721 mtx_unlock(&softc->ctl_lock);
4722 wakeup(&softc->pending_lun_queue);
4723
4724 return (0);
4725}
4726
4727int
4728ctl_enable_lun(struct ctl_be_lun *be_lun)
4729{
4730 struct ctl_softc *softc;
4731 struct ctl_port *port, *nport;
4732 struct ctl_lun *lun;
4733 int retval;
4734
4735 lun = (struct ctl_lun *)be_lun->ctl_lun;
4736 softc = lun->ctl_softc;
4737
4738 mtx_lock(&softc->ctl_lock);
4739 mtx_lock(&lun->lun_lock);
4740 if ((lun->flags & CTL_LUN_DISABLED) == 0) {
4741 /*
4742 * eh? Why did we get called if the LUN is already
4743 * enabled?
4744 */
4745 mtx_unlock(&lun->lun_lock);
4746 mtx_unlock(&softc->ctl_lock);
4747 return (0);
4748 }
4749 lun->flags &= ~CTL_LUN_DISABLED;
4750 mtx_unlock(&lun->lun_lock);
4751
4752 STAILQ_FOREACH_SAFE(port, &softc->port_list, links, nport) {
4753 if ((port->status & CTL_PORT_STATUS_ONLINE) == 0 ||
4754 port->lun_map != NULL || port->lun_enable == NULL)
4755 continue;
4756
4757 /*
4758 * Drop the lock while we call the FETD's enable routine.
4759 * This can lead to a callback into CTL (at least in the
4760 * case of the internal initiator frontend.
4761 */
4762 mtx_unlock(&softc->ctl_lock);
4763 retval = port->lun_enable(port->targ_lun_arg, lun->lun);
4764 mtx_lock(&softc->ctl_lock);
4765 if (retval != 0) {
4766 printf("%s: FETD %s port %d returned error "
4767 "%d for lun_enable on lun %jd\n",
4768 __func__, port->port_name, port->targ_port,
4769 retval, (intmax_t)lun->lun);
4770 }
4771 }
4772
4773 mtx_unlock(&softc->ctl_lock);
4774 ctl_isc_announce_lun(lun);
4775
4776 return (0);
4777}
4778
4779int
4780ctl_disable_lun(struct ctl_be_lun *be_lun)
4781{
4782 struct ctl_softc *softc;
4783 struct ctl_port *port;
4784 struct ctl_lun *lun;
4785 int retval;
4786
4787 lun = (struct ctl_lun *)be_lun->ctl_lun;
4788 softc = lun->ctl_softc;
4789
4790 mtx_lock(&softc->ctl_lock);
4791 mtx_lock(&lun->lun_lock);
4792 if (lun->flags & CTL_LUN_DISABLED) {
4793 mtx_unlock(&lun->lun_lock);
4794 mtx_unlock(&softc->ctl_lock);
4795 return (0);
4796 }
4797 lun->flags |= CTL_LUN_DISABLED;
4798 mtx_unlock(&lun->lun_lock);
4799
4800 STAILQ_FOREACH(port, &softc->port_list, links) {
4801 if ((port->status & CTL_PORT_STATUS_ONLINE) == 0 ||
4802 port->lun_map != NULL || port->lun_disable == NULL)
4803 continue;
4804
4805 /*
4806 * Drop the lock before we call the frontend's disable
4807 * routine, to avoid lock order reversals.
4808 *
4809 * XXX KDM what happens if the frontend list changes while
4810 * we're traversing it? It's unlikely, but should be handled.
4811 */
4812 mtx_unlock(&softc->ctl_lock);
4813 retval = port->lun_disable(port->targ_lun_arg, lun->lun);
4814 mtx_lock(&softc->ctl_lock);
4815 if (retval != 0) {
4816 printf("%s: FETD %s port %d returned error "
4817 "%d for lun_disable on lun %jd\n",
4818 __func__, port->port_name, port->targ_port,
4819 retval, (intmax_t)lun->lun);
4820 }
4821 }
4822
4823 mtx_unlock(&softc->ctl_lock);
4824 ctl_isc_announce_lun(lun);
4825
4826 return (0);
4827}
4828
4829int
4830ctl_start_lun(struct ctl_be_lun *be_lun)
4831{
4832 struct ctl_lun *lun = (struct ctl_lun *)be_lun->ctl_lun;
4833
4834 mtx_lock(&lun->lun_lock);
4835 lun->flags &= ~CTL_LUN_STOPPED;
4836 mtx_unlock(&lun->lun_lock);
4837 return (0);
4838}
4839
4840int
4841ctl_stop_lun(struct ctl_be_lun *be_lun)
4842{
4843 struct ctl_lun *lun = (struct ctl_lun *)be_lun->ctl_lun;
4844
4845 mtx_lock(&lun->lun_lock);
4846 lun->flags |= CTL_LUN_STOPPED;
4847 mtx_unlock(&lun->lun_lock);
4848 return (0);
4849}
4850
4851int
4852ctl_lun_no_media(struct ctl_be_lun *be_lun)
4853{
4854 struct ctl_lun *lun = (struct ctl_lun *)be_lun->ctl_lun;
4855
4856 mtx_lock(&lun->lun_lock);
4857 lun->flags |= CTL_LUN_NO_MEDIA;
4858 mtx_unlock(&lun->lun_lock);
4859 return (0);
4860}
4861
4862int
4863ctl_lun_has_media(struct ctl_be_lun *be_lun)
4864{
4865 struct ctl_lun *lun = (struct ctl_lun *)be_lun->ctl_lun;
4866 union ctl_ha_msg msg;
4867
4868 mtx_lock(&lun->lun_lock);
4869 lun->flags &= ~(CTL_LUN_NO_MEDIA | CTL_LUN_EJECTED);
4870 if (lun->flags & CTL_LUN_REMOVABLE)
4871 ctl_est_ua_all(lun, -1, CTL_UA_MEDIUM_CHANGE);
4872 mtx_unlock(&lun->lun_lock);
4873 if ((lun->flags & CTL_LUN_REMOVABLE) &&
4874 lun->ctl_softc->ha_mode == CTL_HA_MODE_XFER) {
4875 bzero(&msg.ua, sizeof(msg.ua));
4876 msg.hdr.msg_type = CTL_MSG_UA;
4877 msg.hdr.nexus.initid = -1;
4878 msg.hdr.nexus.targ_port = -1;
4879 msg.hdr.nexus.targ_lun = lun->lun;
4880 msg.hdr.nexus.targ_mapped_lun = lun->lun;
4881 msg.ua.ua_all = 1;
4882 msg.ua.ua_set = 1;
4883 msg.ua.ua_type = CTL_UA_MEDIUM_CHANGE;
4884 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg, sizeof(msg.ua),
4885 M_WAITOK);
4886 }
4887 return (0);
4888}
4889
4890int
4891ctl_lun_ejected(struct ctl_be_lun *be_lun)
4892{
4893 struct ctl_lun *lun = (struct ctl_lun *)be_lun->ctl_lun;
4894
4895 mtx_lock(&lun->lun_lock);
4896 lun->flags |= CTL_LUN_EJECTED;
4897 mtx_unlock(&lun->lun_lock);
4898 return (0);
4899}
4900
4901int
4902ctl_lun_primary(struct ctl_be_lun *be_lun)
4903{
4904 struct ctl_lun *lun = (struct ctl_lun *)be_lun->ctl_lun;
4905
4906 mtx_lock(&lun->lun_lock);
4907 lun->flags |= CTL_LUN_PRIMARY_SC;
4908 ctl_est_ua_all(lun, -1, CTL_UA_ASYM_ACC_CHANGE);
4909 mtx_unlock(&lun->lun_lock);
4910 ctl_isc_announce_lun(lun);
4911 return (0);
4912}
4913
4914int
4915ctl_lun_secondary(struct ctl_be_lun *be_lun)
4916{
4917 struct ctl_lun *lun = (struct ctl_lun *)be_lun->ctl_lun;
4918
4919 mtx_lock(&lun->lun_lock);
4920 lun->flags &= ~CTL_LUN_PRIMARY_SC;
4921 ctl_est_ua_all(lun, -1, CTL_UA_ASYM_ACC_CHANGE);
4922 mtx_unlock(&lun->lun_lock);
4923 ctl_isc_announce_lun(lun);
4924 return (0);
4925}
4926
4927int
4928ctl_invalidate_lun(struct ctl_be_lun *be_lun)
4929{
4930 struct ctl_softc *softc;
4931 struct ctl_lun *lun;
4932
4933 lun = (struct ctl_lun *)be_lun->ctl_lun;
4934 softc = lun->ctl_softc;
4935
4936 mtx_lock(&lun->lun_lock);
4937
4938 /*
4939 * The LUN needs to be disabled before it can be marked invalid.
4940 */
4941 if ((lun->flags & CTL_LUN_DISABLED) == 0) {
4942 mtx_unlock(&lun->lun_lock);
4943 return (-1);
4944 }
4945 /*
4946 * Mark the LUN invalid.
4947 */
4948 lun->flags |= CTL_LUN_INVALID;
4949
4950 /*
4951 * If there is nothing in the OOA queue, go ahead and free the LUN.
4952 * If we have something in the OOA queue, we'll free it when the
4953 * last I/O completes.
4954 */
4955 if (TAILQ_EMPTY(&lun->ooa_queue)) {
4956 mtx_unlock(&lun->lun_lock);
4957 mtx_lock(&softc->ctl_lock);
4958 ctl_free_lun(lun);
4959 mtx_unlock(&softc->ctl_lock);
4960 } else
4961 mtx_unlock(&lun->lun_lock);
4962
4963 return (0);
4964}
4965
4966void
4967ctl_lun_capacity_changed(struct ctl_be_lun *be_lun)
4968{
4969 struct ctl_lun *lun = (struct ctl_lun *)be_lun->ctl_lun;
4970 union ctl_ha_msg msg;
4971
4972 mtx_lock(&lun->lun_lock);
4973 ctl_est_ua_all(lun, -1, CTL_UA_CAPACITY_CHANGE);
4974 mtx_unlock(&lun->lun_lock);
4975 if (lun->ctl_softc->ha_mode == CTL_HA_MODE_XFER) {
4976 /* Send msg to other side. */
4977 bzero(&msg.ua, sizeof(msg.ua));
4978 msg.hdr.msg_type = CTL_MSG_UA;
4979 msg.hdr.nexus.initid = -1;
4980 msg.hdr.nexus.targ_port = -1;
4981 msg.hdr.nexus.targ_lun = lun->lun;
4982 msg.hdr.nexus.targ_mapped_lun = lun->lun;
4983 msg.ua.ua_all = 1;
4984 msg.ua.ua_set = 1;
4985 msg.ua.ua_type = CTL_UA_CAPACITY_CHANGE;
4986 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg, sizeof(msg.ua),
4987 M_WAITOK);
4988 }
4989}
4990
4991/*
4992 * Backend "memory move is complete" callback for requests that never
4993 * make it down to say RAIDCore's configuration code.
4994 */
4995int
4996ctl_config_move_done(union ctl_io *io)
4997{
4998 int retval;
4999
5000 CTL_DEBUG_PRINT(("ctl_config_move_done\n"));
5001 KASSERT(io->io_hdr.io_type == CTL_IO_SCSI,
5002 ("Config I/O type isn't CTL_IO_SCSI (%d)!", io->io_hdr.io_type));
5003
5004 if ((io->io_hdr.port_status != 0) &&
5005 ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_STATUS_NONE ||
5006 (io->io_hdr.status & CTL_STATUS_MASK) == CTL_SUCCESS)) {
5007 /*
5008 * For hardware error sense keys, the sense key
5009 * specific value is defined to be a retry count,
5010 * but we use it to pass back an internal FETD
5011 * error code. XXX KDM Hopefully the FETD is only
5012 * using 16 bits for an error code, since that's
5013 * all the space we have in the sks field.
5014 */
5015 ctl_set_internal_failure(&io->scsiio,
5016 /*sks_valid*/ 1,
5017 /*retry_count*/
5018 io->io_hdr.port_status);
5019 }
5020
5021 if (ctl_debug & CTL_DEBUG_CDB_DATA)
5022 ctl_data_print(io);
5023 if (((io->io_hdr.flags & CTL_FLAG_DATA_MASK) == CTL_FLAG_DATA_IN) ||
5024 ((io->io_hdr.status & CTL_STATUS_MASK) != CTL_STATUS_NONE &&
5025 (io->io_hdr.status & CTL_STATUS_MASK) != CTL_SUCCESS) ||
5026 ((io->io_hdr.flags & CTL_FLAG_ABORT) != 0)) {
5027 /*
5028 * XXX KDM just assuming a single pointer here, and not a
5029 * S/G list. If we start using S/G lists for config data,
5030 * we'll need to know how to clean them up here as well.
5031 */
5032 if (io->io_hdr.flags & CTL_FLAG_ALLOCATED)
5033 free(io->scsiio.kern_data_ptr, M_CTL);
5034 ctl_done(io);
5035 retval = CTL_RETVAL_COMPLETE;
5036 } else {
5037 /*
5038 * XXX KDM now we need to continue data movement. Some
5039 * options:
5040 * - call ctl_scsiio() again? We don't do this for data
5041 * writes, because for those at least we know ahead of
5042 * time where the write will go and how long it is. For
5043 * config writes, though, that information is largely
5044 * contained within the write itself, thus we need to
5045 * parse out the data again.
5046 *
5047 * - Call some other function once the data is in?
5048 */
5049
5050 /*
5051 * XXX KDM call ctl_scsiio() again for now, and check flag
5052 * bits to see whether we're allocated or not.
5053 */
5054 retval = ctl_scsiio(&io->scsiio);
5055 }
5056 return (retval);
5057}
5058
5059/*
5060 * This gets called by a backend driver when it is done with a
5061 * data_submit method.
5062 */
5063void
5064ctl_data_submit_done(union ctl_io *io)
5065{
5066 /*
5067 * If the IO_CONT flag is set, we need to call the supplied
5068 * function to continue processing the I/O, instead of completing
5069 * the I/O just yet.
5070 *
5071 * If there is an error, though, we don't want to keep processing.
5072 * Instead, just send status back to the initiator.
5073 */
5074 if ((io->io_hdr.flags & CTL_FLAG_IO_CONT) &&
5075 (io->io_hdr.flags & CTL_FLAG_ABORT) == 0 &&
5076 ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_STATUS_NONE ||
5077 (io->io_hdr.status & CTL_STATUS_MASK) == CTL_SUCCESS)) {
5078 io->scsiio.io_cont(io);
5079 return;
5080 }
5081 ctl_done(io);
5082}
5083
5084/*
5085 * This gets called by a backend driver when it is done with a
5086 * configuration write.
5087 */
5088void
5089ctl_config_write_done(union ctl_io *io)
5090{
5091 uint8_t *buf;
5092
5093 /*
5094 * If the IO_CONT flag is set, we need to call the supplied
5095 * function to continue processing the I/O, instead of completing
5096 * the I/O just yet.
5097 *
5098 * If there is an error, though, we don't want to keep processing.
5099 * Instead, just send status back to the initiator.
5100 */
5101 if ((io->io_hdr.flags & CTL_FLAG_IO_CONT) &&
5102 (io->io_hdr.flags & CTL_FLAG_ABORT) == 0 &&
5103 ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_STATUS_NONE ||
5104 (io->io_hdr.status & CTL_STATUS_MASK) == CTL_SUCCESS)) {
5105 io->scsiio.io_cont(io);
5106 return;
5107 }
5108 /*
5109 * Since a configuration write can be done for commands that actually
5110 * have data allocated, like write buffer, and commands that have
5111 * no data, like start/stop unit, we need to check here.
5112 */
5113 if (io->io_hdr.flags & CTL_FLAG_ALLOCATED)
5114 buf = io->scsiio.kern_data_ptr;
5115 else
5116 buf = NULL;
5117 ctl_done(io);
5118 if (buf)
5119 free(buf, M_CTL);
5120}
5121
5122void
5123ctl_config_read_done(union ctl_io *io)
5124{
5125 uint8_t *buf;
5126
5127 /*
5128 * If there is some error -- we are done, skip data transfer.
5129 */
5130 if ((io->io_hdr.flags & CTL_FLAG_ABORT) != 0 ||
5131 ((io->io_hdr.status & CTL_STATUS_MASK) != CTL_STATUS_NONE &&
5132 (io->io_hdr.status & CTL_STATUS_MASK) != CTL_SUCCESS)) {
5133 if (io->io_hdr.flags & CTL_FLAG_ALLOCATED)
5134 buf = io->scsiio.kern_data_ptr;
5135 else
5136 buf = NULL;
5137 ctl_done(io);
5138 if (buf)
5139 free(buf, M_CTL);
5140 return;
5141 }
5142
5143 /*
5144 * If the IO_CONT flag is set, we need to call the supplied
5145 * function to continue processing the I/O, instead of completing
5146 * the I/O just yet.
5147 */
5148 if (io->io_hdr.flags & CTL_FLAG_IO_CONT) {
5149 io->scsiio.io_cont(io);
5150 return;
5151 }
5152
5153 ctl_datamove(io);
5154}
5155
5156/*
5157 * SCSI release command.
5158 */
5159int
5160ctl_scsi_release(struct ctl_scsiio *ctsio)
5161{
5162 struct ctl_lun *lun;
5163 uint32_t residx;
5164
5165 CTL_DEBUG_PRINT(("ctl_scsi_release\n"));
5166
5167 residx = ctl_get_initindex(&ctsio->io_hdr.nexus);
5168 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
5169
5170 /*
5171 * XXX KDM right now, we only support LUN reservation. We don't
5172 * support 3rd party reservations, or extent reservations, which
5173 * might actually need the parameter list. If we've gotten this
5174 * far, we've got a LUN reservation. Anything else got kicked out
5175 * above. So, according to SPC, ignore the length.
5176 */
5177
5178 mtx_lock(&lun->lun_lock);
5179
5180 /*
5181 * According to SPC, it is not an error for an intiator to attempt
5182 * to release a reservation on a LUN that isn't reserved, or that
5183 * is reserved by another initiator. The reservation can only be
5184 * released, though, by the initiator who made it or by one of
5185 * several reset type events.
5186 */
5187 if ((lun->flags & CTL_LUN_RESERVED) && (lun->res_idx == residx))
5188 lun->flags &= ~CTL_LUN_RESERVED;
5189
5190 mtx_unlock(&lun->lun_lock);
5191
5192 ctl_set_success(ctsio);
5193 ctl_done((union ctl_io *)ctsio);
5194 return (CTL_RETVAL_COMPLETE);
5195}
5196
5197int
5198ctl_scsi_reserve(struct ctl_scsiio *ctsio)
5199{
5200 struct ctl_lun *lun;
5201 uint32_t residx;
5202
5203 CTL_DEBUG_PRINT(("ctl_reserve\n"));
5204
5205 residx = ctl_get_initindex(&ctsio->io_hdr.nexus);
5206 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
5207
5208 /*
5209 * XXX KDM right now, we only support LUN reservation. We don't
5210 * support 3rd party reservations, or extent reservations, which
5211 * might actually need the parameter list. If we've gotten this
5212 * far, we've got a LUN reservation. Anything else got kicked out
5213 * above. So, according to SPC, ignore the length.
5214 */
5215
5216 mtx_lock(&lun->lun_lock);
5217 if ((lun->flags & CTL_LUN_RESERVED) && (lun->res_idx != residx)) {
5218 ctl_set_reservation_conflict(ctsio);
5219 goto bailout;
5220 }
5221
5222 /* SPC-3 exceptions to SPC-2 RESERVE and RELEASE behavior. */
5223 if (lun->flags & CTL_LUN_PR_RESERVED) {
5224 ctl_set_success(ctsio);
5225 goto bailout;
5226 }
5227
5228 lun->flags |= CTL_LUN_RESERVED;
5229 lun->res_idx = residx;
5230 ctl_set_success(ctsio);
5231
5232bailout:
5233 mtx_unlock(&lun->lun_lock);
5234 ctl_done((union ctl_io *)ctsio);
5235 return (CTL_RETVAL_COMPLETE);
5236}
5237
5238int
5239ctl_start_stop(struct ctl_scsiio *ctsio)
5240{
5241 struct scsi_start_stop_unit *cdb;
5242 struct ctl_lun *lun;
5243 int retval;
5244
5245 CTL_DEBUG_PRINT(("ctl_start_stop\n"));
5246
5247 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
5248 cdb = (struct scsi_start_stop_unit *)ctsio->cdb;
5249
5250 if ((cdb->how & SSS_PC_MASK) == 0) {
5251 if ((lun->flags & CTL_LUN_PR_RESERVED) &&
5252 (cdb->how & SSS_START) == 0) {
5253 uint32_t residx;
5254
5255 residx = ctl_get_initindex(&ctsio->io_hdr.nexus);
5256 if (ctl_get_prkey(lun, residx) == 0 ||
5257 (lun->pr_res_idx != residx && lun->pr_res_type < 4)) {
5258
5259 ctl_set_reservation_conflict(ctsio);
5260 ctl_done((union ctl_io *)ctsio);
5261 return (CTL_RETVAL_COMPLETE);
5262 }
5263 }
5264
5265 if ((cdb->how & SSS_LOEJ) &&
5266 (lun->flags & CTL_LUN_REMOVABLE) == 0) {
5267 ctl_set_invalid_field(ctsio,
5268 /*sks_valid*/ 1,
5269 /*command*/ 1,
5270 /*field*/ 4,
5271 /*bit_valid*/ 1,
5272 /*bit*/ 1);
5273 ctl_done((union ctl_io *)ctsio);
5274 return (CTL_RETVAL_COMPLETE);
5275 }
5276
5277 if ((cdb->how & SSS_START) == 0 && (cdb->how & SSS_LOEJ) &&
5278 lun->prevent_count > 0) {
5279 /* "Medium removal prevented" */
5280 ctl_set_sense(ctsio, /*current_error*/ 1,
5281 /*sense_key*/(lun->flags & CTL_LUN_NO_MEDIA) ?
5282 SSD_KEY_NOT_READY : SSD_KEY_ILLEGAL_REQUEST,
5283 /*asc*/ 0x53, /*ascq*/ 0x02, SSD_ELEM_NONE);
5284 ctl_done((union ctl_io *)ctsio);
5285 return (CTL_RETVAL_COMPLETE);
5286 }
5287 }
5288
5289 retval = lun->backend->config_write((union ctl_io *)ctsio);
5290 return (retval);
5291}
5292
5293int
5294ctl_prevent_allow(struct ctl_scsiio *ctsio)
5295{
5296 struct ctl_lun *lun;
5297 struct scsi_prevent *cdb;
5298 int retval;
5299 uint32_t initidx;
5300
5301 CTL_DEBUG_PRINT(("ctl_prevent_allow\n"));
5302
5303 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
5304 cdb = (struct scsi_prevent *)ctsio->cdb;
5305
5306 if ((lun->flags & CTL_LUN_REMOVABLE) == 0) {
5307 ctl_set_invalid_opcode(ctsio);
5308 ctl_done((union ctl_io *)ctsio);
5309 return (CTL_RETVAL_COMPLETE);
5310 }
5311
5312 initidx = ctl_get_initindex(&ctsio->io_hdr.nexus);
5313 mtx_lock(&lun->lun_lock);
5314 if ((cdb->how & PR_PREVENT) &&
5315 ctl_is_set(lun->prevent, initidx) == 0) {
5316 ctl_set_mask(lun->prevent, initidx);
5317 lun->prevent_count++;
5318 } else if ((cdb->how & PR_PREVENT) == 0 &&
5319 ctl_is_set(lun->prevent, initidx)) {
5320 ctl_clear_mask(lun->prevent, initidx);
5321 lun->prevent_count--;
5322 }
5323 mtx_unlock(&lun->lun_lock);
5324 retval = lun->backend->config_write((union ctl_io *)ctsio);
5325 return (retval);
5326}
5327
5328/*
5329 * We support the SYNCHRONIZE CACHE command (10 and 16 byte versions), but
5330 * we don't really do anything with the LBA and length fields if the user
5331 * passes them in. Instead we'll just flush out the cache for the entire
5332 * LUN.
5333 */
5334int
5335ctl_sync_cache(struct ctl_scsiio *ctsio)
5336{
5337 struct ctl_lun *lun;
5338 struct ctl_softc *softc;
5339 struct ctl_lba_len_flags *lbalen;
5340 uint64_t starting_lba;
5341 uint32_t block_count;
5342 int retval;
5343 uint8_t byte2;
5344
5345 CTL_DEBUG_PRINT(("ctl_sync_cache\n"));
5346
5347 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
5348 softc = lun->ctl_softc;
5349 retval = 0;
5350
5351 switch (ctsio->cdb[0]) {
5352 case SYNCHRONIZE_CACHE: {
5353 struct scsi_sync_cache *cdb;
5354 cdb = (struct scsi_sync_cache *)ctsio->cdb;
5355
5356 starting_lba = scsi_4btoul(cdb->begin_lba);
5357 block_count = scsi_2btoul(cdb->lb_count);
5358 byte2 = cdb->byte2;
5359 break;
5360 }
5361 case SYNCHRONIZE_CACHE_16: {
5362 struct scsi_sync_cache_16 *cdb;
5363 cdb = (struct scsi_sync_cache_16 *)ctsio->cdb;
5364
5365 starting_lba = scsi_8btou64(cdb->begin_lba);
5366 block_count = scsi_4btoul(cdb->lb_count);
5367 byte2 = cdb->byte2;
5368 break;
5369 }
5370 default:
5371 ctl_set_invalid_opcode(ctsio);
5372 ctl_done((union ctl_io *)ctsio);
5373 goto bailout;
5374 break; /* NOTREACHED */
5375 }
5376
5377 /*
5378 * We check the LBA and length, but don't do anything with them.
5379 * A SYNCHRONIZE CACHE will cause the entire cache for this lun to
5380 * get flushed. This check will just help satisfy anyone who wants
5381 * to see an error for an out of range LBA.
5382 */
5383 if ((starting_lba + block_count) > (lun->be_lun->maxlba + 1)) {
5384 ctl_set_lba_out_of_range(ctsio,
5385 MAX(starting_lba, lun->be_lun->maxlba + 1));
5386 ctl_done((union ctl_io *)ctsio);
5387 goto bailout;
5388 }
5389
5390 lbalen = (struct ctl_lba_len_flags *)&ctsio->io_hdr.ctl_private[CTL_PRIV_LBA_LEN];
5391 lbalen->lba = starting_lba;
5392 lbalen->len = block_count;
5393 lbalen->flags = byte2;
5394 retval = lun->backend->config_write((union ctl_io *)ctsio);
5395
5396bailout:
5397 return (retval);
5398}
5399
5400int
5401ctl_format(struct ctl_scsiio *ctsio)
5402{
5403 struct scsi_format *cdb;
5404 struct ctl_lun *lun;
5405 int length, defect_list_len;
5406
5407 CTL_DEBUG_PRINT(("ctl_format\n"));
5408
5409 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
5410
5411 cdb = (struct scsi_format *)ctsio->cdb;
5412
5413 length = 0;
5414 if (cdb->byte2 & SF_FMTDATA) {
5415 if (cdb->byte2 & SF_LONGLIST)
5416 length = sizeof(struct scsi_format_header_long);
5417 else
5418 length = sizeof(struct scsi_format_header_short);
5419 }
5420
5421 if (((ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) == 0)
5422 && (length > 0)) {
5423 ctsio->kern_data_ptr = malloc(length, M_CTL, M_WAITOK);
5424 ctsio->kern_data_len = length;
5425 ctsio->kern_total_len = length;
5426 ctsio->kern_data_resid = 0;
5427 ctsio->kern_rel_offset = 0;
5428 ctsio->kern_sg_entries = 0;
5429 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
5430 ctsio->be_move_done = ctl_config_move_done;
5431 ctl_datamove((union ctl_io *)ctsio);
5432
5433 return (CTL_RETVAL_COMPLETE);
5434 }
5435
5436 defect_list_len = 0;
5437
5438 if (cdb->byte2 & SF_FMTDATA) {
5439 if (cdb->byte2 & SF_LONGLIST) {
5440 struct scsi_format_header_long *header;
5441
5442 header = (struct scsi_format_header_long *)
5443 ctsio->kern_data_ptr;
5444
5445 defect_list_len = scsi_4btoul(header->defect_list_len);
5446 if (defect_list_len != 0) {
5447 ctl_set_invalid_field(ctsio,
5448 /*sks_valid*/ 1,
5449 /*command*/ 0,
5450 /*field*/ 2,
5451 /*bit_valid*/ 0,
5452 /*bit*/ 0);
5453 goto bailout;
5454 }
5455 } else {
5456 struct scsi_format_header_short *header;
5457
5458 header = (struct scsi_format_header_short *)
5459 ctsio->kern_data_ptr;
5460
5461 defect_list_len = scsi_2btoul(header->defect_list_len);
5462 if (defect_list_len != 0) {
5463 ctl_set_invalid_field(ctsio,
5464 /*sks_valid*/ 1,
5465 /*command*/ 0,
5466 /*field*/ 2,
5467 /*bit_valid*/ 0,
5468 /*bit*/ 0);
5469 goto bailout;
5470 }
5471 }
5472 }
5473
5474 ctl_set_success(ctsio);
5475bailout:
5476
5477 if (ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) {
5478 free(ctsio->kern_data_ptr, M_CTL);
5479 ctsio->io_hdr.flags &= ~CTL_FLAG_ALLOCATED;
5480 }
5481
5482 ctl_done((union ctl_io *)ctsio);
5483 return (CTL_RETVAL_COMPLETE);
5484}
5485
5486int
5487ctl_read_buffer(struct ctl_scsiio *ctsio)
5488{
5489 struct ctl_lun *lun;
5490 uint64_t buffer_offset;
5491 uint32_t len;
5492 uint8_t byte2;
5493 static uint8_t descr[4];
5494 static uint8_t echo_descr[4] = { 0 };
5495
5496 CTL_DEBUG_PRINT(("ctl_read_buffer\n"));
5497 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
5498 switch (ctsio->cdb[0]) {
5499 case READ_BUFFER: {
5500 struct scsi_read_buffer *cdb;
5501
5502 cdb = (struct scsi_read_buffer *)ctsio->cdb;
5503 buffer_offset = scsi_3btoul(cdb->offset);
5504 len = scsi_3btoul(cdb->length);
5505 byte2 = cdb->byte2;
5506 break;
5507 }
5508 case READ_BUFFER_16: {
5509 struct scsi_read_buffer_16 *cdb;
5510
5511 cdb = (struct scsi_read_buffer_16 *)ctsio->cdb;
5512 buffer_offset = scsi_8btou64(cdb->offset);
5513 len = scsi_4btoul(cdb->length);
5514 byte2 = cdb->byte2;
5515 break;
5516 }
5517 default: /* This shouldn't happen. */
5518 ctl_set_invalid_opcode(ctsio);
5519 ctl_done((union ctl_io *)ctsio);
5520 return (CTL_RETVAL_COMPLETE);
5521 }
5522
5523 if (buffer_offset > CTL_WRITE_BUFFER_SIZE ||
5524 buffer_offset + len > CTL_WRITE_BUFFER_SIZE) {
5525 ctl_set_invalid_field(ctsio,
5526 /*sks_valid*/ 1,
5527 /*command*/ 1,
5528 /*field*/ 6,
5529 /*bit_valid*/ 0,
5530 /*bit*/ 0);
5531 ctl_done((union ctl_io *)ctsio);
5532 return (CTL_RETVAL_COMPLETE);
5533 }
5534
5535 if ((byte2 & RWB_MODE) == RWB_MODE_DESCR) {
5536 descr[0] = 0;
5537 scsi_ulto3b(CTL_WRITE_BUFFER_SIZE, &descr[1]);
5538 ctsio->kern_data_ptr = descr;
5539 len = min(len, sizeof(descr));
5540 } else if ((byte2 & RWB_MODE) == RWB_MODE_ECHO_DESCR) {
5541 ctsio->kern_data_ptr = echo_descr;
5542 len = min(len, sizeof(echo_descr));
5543 } else {
5544 if (lun->write_buffer == NULL) {
5545 lun->write_buffer = malloc(CTL_WRITE_BUFFER_SIZE,
5546 M_CTL, M_WAITOK);
5547 }
5548 ctsio->kern_data_ptr = lun->write_buffer + buffer_offset;
5549 }
5550 ctsio->kern_data_len = len;
5551 ctsio->kern_total_len = len;
5552 ctsio->kern_data_resid = 0;
5553 ctsio->kern_rel_offset = 0;
5554 ctsio->kern_sg_entries = 0;
5555 ctl_set_success(ctsio);
5556 ctsio->be_move_done = ctl_config_move_done;
5557 ctl_datamove((union ctl_io *)ctsio);
5558 return (CTL_RETVAL_COMPLETE);
5559}
5560
5561int
5562ctl_write_buffer(struct ctl_scsiio *ctsio)
5563{
5564 struct scsi_write_buffer *cdb;
5565 struct ctl_lun *lun;
5566 int buffer_offset, len;
5567
5568 CTL_DEBUG_PRINT(("ctl_write_buffer\n"));
5569
5570 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
5571 cdb = (struct scsi_write_buffer *)ctsio->cdb;
5572
5573 len = scsi_3btoul(cdb->length);
5574 buffer_offset = scsi_3btoul(cdb->offset);
5575
5576 if (buffer_offset + len > CTL_WRITE_BUFFER_SIZE) {
5577 ctl_set_invalid_field(ctsio,
5578 /*sks_valid*/ 1,
5579 /*command*/ 1,
5580 /*field*/ 6,
5581 /*bit_valid*/ 0,
5582 /*bit*/ 0);
5583 ctl_done((union ctl_io *)ctsio);
5584 return (CTL_RETVAL_COMPLETE);
5585 }
5586
5587 /*
5588 * If we've got a kernel request that hasn't been malloced yet,
5589 * malloc it and tell the caller the data buffer is here.
5590 */
5591 if ((ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) == 0) {
5592 if (lun->write_buffer == NULL) {
5593 lun->write_buffer = malloc(CTL_WRITE_BUFFER_SIZE,
5594 M_CTL, M_WAITOK);
5595 }
5596 ctsio->kern_data_ptr = lun->write_buffer + buffer_offset;
5597 ctsio->kern_data_len = len;
5598 ctsio->kern_total_len = len;
5599 ctsio->kern_data_resid = 0;
5600 ctsio->kern_rel_offset = 0;
5601 ctsio->kern_sg_entries = 0;
5602 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
5603 ctsio->be_move_done = ctl_config_move_done;
5604 ctl_datamove((union ctl_io *)ctsio);
5605
5606 return (CTL_RETVAL_COMPLETE);
5607 }
5608
5609 ctl_set_success(ctsio);
5610 ctl_done((union ctl_io *)ctsio);
5611 return (CTL_RETVAL_COMPLETE);
5612}
5613
5614int
5615ctl_write_same(struct ctl_scsiio *ctsio)
5616{
5617 struct ctl_lun *lun;
5618 struct ctl_lba_len_flags *lbalen;
5619 uint64_t lba;
5620 uint32_t num_blocks;
5621 int len, retval;
5622 uint8_t byte2;
5623
5624 CTL_DEBUG_PRINT(("ctl_write_same\n"));
5625
5626 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
5627
5628 switch (ctsio->cdb[0]) {
5629 case WRITE_SAME_10: {
5630 struct scsi_write_same_10 *cdb;
5631
5632 cdb = (struct scsi_write_same_10 *)ctsio->cdb;
5633
5634 lba = scsi_4btoul(cdb->addr);
5635 num_blocks = scsi_2btoul(cdb->length);
5636 byte2 = cdb->byte2;
5637 break;
5638 }
5639 case WRITE_SAME_16: {
5640 struct scsi_write_same_16 *cdb;
5641
5642 cdb = (struct scsi_write_same_16 *)ctsio->cdb;
5643
5644 lba = scsi_8btou64(cdb->addr);
5645 num_blocks = scsi_4btoul(cdb->length);
5646 byte2 = cdb->byte2;
5647 break;
5648 }
5649 default:
5650 /*
5651 * We got a command we don't support. This shouldn't
5652 * happen, commands should be filtered out above us.
5653 */
5654 ctl_set_invalid_opcode(ctsio);
5655 ctl_done((union ctl_io *)ctsio);
5656
5657 return (CTL_RETVAL_COMPLETE);
5658 break; /* NOTREACHED */
5659 }
5660
5661 /* ANCHOR flag can be used only together with UNMAP */
5662 if ((byte2 & SWS_UNMAP) == 0 && (byte2 & SWS_ANCHOR) != 0) {
5663 ctl_set_invalid_field(ctsio, /*sks_valid*/ 1,
5664 /*command*/ 1, /*field*/ 1, /*bit_valid*/ 1, /*bit*/ 0);
5665 ctl_done((union ctl_io *)ctsio);
5666 return (CTL_RETVAL_COMPLETE);
5667 }
5668
5669 /*
5670 * The first check is to make sure we're in bounds, the second
5671 * check is to catch wrap-around problems. If the lba + num blocks
5672 * is less than the lba, then we've wrapped around and the block
5673 * range is invalid anyway.
5674 */
5675 if (((lba + num_blocks) > (lun->be_lun->maxlba + 1))
5676 || ((lba + num_blocks) < lba)) {
5677 ctl_set_lba_out_of_range(ctsio,
5678 MAX(lba, lun->be_lun->maxlba + 1));
5679 ctl_done((union ctl_io *)ctsio);
5680 return (CTL_RETVAL_COMPLETE);
5681 }
5682
5683 /* Zero number of blocks means "to the last logical block" */
5684 if (num_blocks == 0) {
5685 if ((lun->be_lun->maxlba + 1) - lba > UINT32_MAX) {
5686 ctl_set_invalid_field(ctsio,
5687 /*sks_valid*/ 0,
5688 /*command*/ 1,
5689 /*field*/ 0,
5690 /*bit_valid*/ 0,
5691 /*bit*/ 0);
5692 ctl_done((union ctl_io *)ctsio);
5693 return (CTL_RETVAL_COMPLETE);
5694 }
5695 num_blocks = (lun->be_lun->maxlba + 1) - lba;
5696 }
5697
5698 len = lun->be_lun->blocksize;
5699
5700 /*
5701 * If we've got a kernel request that hasn't been malloced yet,
5702 * malloc it and tell the caller the data buffer is here.
5703 */
5704 if ((byte2 & SWS_NDOB) == 0 &&
5705 (ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) == 0) {
5706 ctsio->kern_data_ptr = malloc(len, M_CTL, M_WAITOK);
5707 ctsio->kern_data_len = len;
5708 ctsio->kern_total_len = len;
5709 ctsio->kern_data_resid = 0;
5710 ctsio->kern_rel_offset = 0;
5711 ctsio->kern_sg_entries = 0;
5712 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
5713 ctsio->be_move_done = ctl_config_move_done;
5714 ctl_datamove((union ctl_io *)ctsio);
5715
5716 return (CTL_RETVAL_COMPLETE);
5717 }
5718
5719 lbalen = (struct ctl_lba_len_flags *)&ctsio->io_hdr.ctl_private[CTL_PRIV_LBA_LEN];
5720 lbalen->lba = lba;
5721 lbalen->len = num_blocks;
5722 lbalen->flags = byte2;
5723 retval = lun->backend->config_write((union ctl_io *)ctsio);
5724
5725 return (retval);
5726}
5727
5728int
5729ctl_unmap(struct ctl_scsiio *ctsio)
5730{
5731 struct ctl_lun *lun;
5732 struct scsi_unmap *cdb;
5733 struct ctl_ptr_len_flags *ptrlen;
5734 struct scsi_unmap_header *hdr;
5735 struct scsi_unmap_desc *buf, *end, *endnz, *range;
5736 uint64_t lba;
5737 uint32_t num_blocks;
5738 int len, retval;
5739 uint8_t byte2;
5740
5741 CTL_DEBUG_PRINT(("ctl_unmap\n"));
5742
5743 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
5744 cdb = (struct scsi_unmap *)ctsio->cdb;
5745
5746 len = scsi_2btoul(cdb->length);
5747 byte2 = cdb->byte2;
5748
5749 /*
5750 * If we've got a kernel request that hasn't been malloced yet,
5751 * malloc it and tell the caller the data buffer is here.
5752 */
5753 if ((ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) == 0) {
5754 ctsio->kern_data_ptr = malloc(len, M_CTL, M_WAITOK);
5755 ctsio->kern_data_len = len;
5756 ctsio->kern_total_len = len;
5757 ctsio->kern_data_resid = 0;
5758 ctsio->kern_rel_offset = 0;
5759 ctsio->kern_sg_entries = 0;
5760 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
5761 ctsio->be_move_done = ctl_config_move_done;
5762 ctl_datamove((union ctl_io *)ctsio);
5763
5764 return (CTL_RETVAL_COMPLETE);
5765 }
5766
5767 len = ctsio->kern_total_len - ctsio->kern_data_resid;
5768 hdr = (struct scsi_unmap_header *)ctsio->kern_data_ptr;
5769 if (len < sizeof (*hdr) ||
5770 len < (scsi_2btoul(hdr->length) + sizeof(hdr->length)) ||
5771 len < (scsi_2btoul(hdr->desc_length) + sizeof (*hdr)) ||
5772 scsi_2btoul(hdr->desc_length) % sizeof(*buf) != 0) {
5773 ctl_set_invalid_field(ctsio,
5774 /*sks_valid*/ 0,
5775 /*command*/ 0,
5776 /*field*/ 0,
5777 /*bit_valid*/ 0,
5778 /*bit*/ 0);
5779 goto done;
5780 }
5781 len = scsi_2btoul(hdr->desc_length);
5782 buf = (struct scsi_unmap_desc *)(hdr + 1);
5783 end = buf + len / sizeof(*buf);
5784
5785 endnz = buf;
5786 for (range = buf; range < end; range++) {
5787 lba = scsi_8btou64(range->lba);
5788 num_blocks = scsi_4btoul(range->length);
5789 if (((lba + num_blocks) > (lun->be_lun->maxlba + 1))
5790 || ((lba + num_blocks) < lba)) {
5791 ctl_set_lba_out_of_range(ctsio,
5792 MAX(lba, lun->be_lun->maxlba + 1));
5793 ctl_done((union ctl_io *)ctsio);
5794 return (CTL_RETVAL_COMPLETE);
5795 }
5796 if (num_blocks != 0)
5797 endnz = range + 1;
5798 }
5799
5800 /*
5801 * Block backend can not handle zero last range.
5802 * Filter it out and return if there is nothing left.
5803 */
5804 len = (uint8_t *)endnz - (uint8_t *)buf;
5805 if (len == 0) {
5806 ctl_set_success(ctsio);
5807 goto done;
5808 }
5809
5810 mtx_lock(&lun->lun_lock);
5811 ptrlen = (struct ctl_ptr_len_flags *)
5812 &ctsio->io_hdr.ctl_private[CTL_PRIV_LBA_LEN];
5813 ptrlen->ptr = (void *)buf;
5814 ptrlen->len = len;
5815 ptrlen->flags = byte2;
5816 ctl_check_blocked(lun);
5817 mtx_unlock(&lun->lun_lock);
5818
5819 retval = lun->backend->config_write((union ctl_io *)ctsio);
5820 return (retval);
5821
5822done:
5823 if (ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) {
5824 free(ctsio->kern_data_ptr, M_CTL);
5825 ctsio->io_hdr.flags &= ~CTL_FLAG_ALLOCATED;
5826 }
5827 ctl_done((union ctl_io *)ctsio);
5828 return (CTL_RETVAL_COMPLETE);
5829}
5830
5831int
5832ctl_default_page_handler(struct ctl_scsiio *ctsio,
5833 struct ctl_page_index *page_index, uint8_t *page_ptr)
5834{
5835 struct ctl_lun *lun;
5836 uint8_t *current_cp;
5837 int set_ua;
5838 uint32_t initidx;
5839
5840 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
5841 initidx = ctl_get_initindex(&ctsio->io_hdr.nexus);
5842 set_ua = 0;
5843
5844 current_cp = (page_index->page_data + (page_index->page_len *
5845 CTL_PAGE_CURRENT));
5846
5847 mtx_lock(&lun->lun_lock);
5848 if (memcmp(current_cp, page_ptr, page_index->page_len)) {
5849 memcpy(current_cp, page_ptr, page_index->page_len);
5850 set_ua = 1;
5851 }
5852 if (set_ua != 0)
5853 ctl_est_ua_all(lun, initidx, CTL_UA_MODE_CHANGE);
5854 mtx_unlock(&lun->lun_lock);
5855 if (set_ua) {
5856 ctl_isc_announce_mode(lun,
5857 ctl_get_initindex(&ctsio->io_hdr.nexus),
5858 page_index->page_code, page_index->subpage);
5859 }
5860 return (CTL_RETVAL_COMPLETE);
5861}
5862
5863static void
5864ctl_ie_timer(void *arg)
5865{
5866 struct ctl_lun *lun = arg;
5867 uint64_t t;
5868
5869 if (lun->ie_asc == 0)
5870 return;
5871
5872 if (lun->MODE_IE.mrie == SIEP_MRIE_UA)
5873 ctl_est_ua_all(lun, -1, CTL_UA_IE);
5874 else
5875 lun->ie_reported = 0;
5876
5877 if (lun->ie_reportcnt < scsi_4btoul(lun->MODE_IE.report_count)) {
5878 lun->ie_reportcnt++;
5879 t = scsi_4btoul(lun->MODE_IE.interval_timer);
5880 if (t == 0 || t == UINT32_MAX)
5881 t = 3000; /* 5 min */
5882 callout_schedule(&lun->ie_callout, t * hz / 10);
5883 }
5884}
5885
5886int
5887ctl_ie_page_handler(struct ctl_scsiio *ctsio,
5888 struct ctl_page_index *page_index, uint8_t *page_ptr)
5889{
5890 struct scsi_info_exceptions_page *pg;
5891 struct ctl_lun *lun;
5892 uint64_t t;
5893
5894 (void)ctl_default_page_handler(ctsio, page_index, page_ptr);
5895
5896 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
5897 pg = (struct scsi_info_exceptions_page *)page_ptr;
5898 mtx_lock(&lun->lun_lock);
5899 if (pg->info_flags & SIEP_FLAGS_TEST) {
5900 lun->ie_asc = 0x5d;
5901 lun->ie_ascq = 0xff;
5902 if (pg->mrie == SIEP_MRIE_UA) {
5903 ctl_est_ua_all(lun, -1, CTL_UA_IE);
5904 lun->ie_reported = 1;
5905 } else {
5906 ctl_clr_ua_all(lun, -1, CTL_UA_IE);
5907 lun->ie_reported = -1;
5908 }
5909 lun->ie_reportcnt = 1;
5910 if (lun->ie_reportcnt < scsi_4btoul(pg->report_count)) {
5911 lun->ie_reportcnt++;
5912 t = scsi_4btoul(pg->interval_timer);
5913 if (t == 0 || t == UINT32_MAX)
5914 t = 3000; /* 5 min */
5915 callout_reset(&lun->ie_callout, t * hz / 10,
5916 ctl_ie_timer, lun);
5917 }
5918 } else {
5919 lun->ie_asc = 0;
5920 lun->ie_ascq = 0;
5921 lun->ie_reported = 1;
5922 ctl_clr_ua_all(lun, -1, CTL_UA_IE);
5923 lun->ie_reportcnt = UINT32_MAX;
5924 callout_stop(&lun->ie_callout);
5925 }
5926 mtx_unlock(&lun->lun_lock);
5927 return (CTL_RETVAL_COMPLETE);
5928}
5929
5930static int
5931ctl_do_mode_select(union ctl_io *io)
5932{
5933 struct scsi_mode_page_header *page_header;
5934 struct ctl_page_index *page_index;
5935 struct ctl_scsiio *ctsio;
5936 int page_len, page_len_offset, page_len_size;
5937 union ctl_modepage_info *modepage_info;
5938 struct ctl_lun *lun;
5939 uint16_t *len_left, *len_used;
5940 int retval, i;
5941
5942 ctsio = &io->scsiio;
5943 page_index = NULL;
5944 page_len = 0;
5945 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
5946
5947 modepage_info = (union ctl_modepage_info *)
5948 ctsio->io_hdr.ctl_private[CTL_PRIV_MODEPAGE].bytes;
5949 len_left = &modepage_info->header.len_left;
5950 len_used = &modepage_info->header.len_used;
5951
5952do_next_page:
5953
5954 page_header = (struct scsi_mode_page_header *)
5955 (ctsio->kern_data_ptr + *len_used);
5956
5957 if (*len_left == 0) {
5958 free(ctsio->kern_data_ptr, M_CTL);
5959 ctl_set_success(ctsio);
5960 ctl_done((union ctl_io *)ctsio);
5961 return (CTL_RETVAL_COMPLETE);
5962 } else if (*len_left < sizeof(struct scsi_mode_page_header)) {
5963
5964 free(ctsio->kern_data_ptr, M_CTL);
5965 ctl_set_param_len_error(ctsio);
5966 ctl_done((union ctl_io *)ctsio);
5967 return (CTL_RETVAL_COMPLETE);
5968
5969 } else if ((page_header->page_code & SMPH_SPF)
5970 && (*len_left < sizeof(struct scsi_mode_page_header_sp))) {
5971
5972 free(ctsio->kern_data_ptr, M_CTL);
5973 ctl_set_param_len_error(ctsio);
5974 ctl_done((union ctl_io *)ctsio);
5975 return (CTL_RETVAL_COMPLETE);
5976 }
5977
5978
5979 /*
5980 * XXX KDM should we do something with the block descriptor?
5981 */
5982 for (i = 0; i < CTL_NUM_MODE_PAGES; i++) {
5983 page_index = &lun->mode_pages.index[i];
5984 if (lun->be_lun->lun_type == T_DIRECT &&
5985 (page_index->page_flags & CTL_PAGE_FLAG_DIRECT) == 0)
5986 continue;
5987 if (lun->be_lun->lun_type == T_PROCESSOR &&
5988 (page_index->page_flags & CTL_PAGE_FLAG_PROC) == 0)
5989 continue;
5990 if (lun->be_lun->lun_type == T_CDROM &&
5991 (page_index->page_flags & CTL_PAGE_FLAG_CDROM) == 0)
5992 continue;
5993
5994 if ((page_index->page_code & SMPH_PC_MASK) !=
5995 (page_header->page_code & SMPH_PC_MASK))
5996 continue;
5997
5998 /*
5999 * If neither page has a subpage code, then we've got a
6000 * match.
6001 */
6002 if (((page_index->page_code & SMPH_SPF) == 0)
6003 && ((page_header->page_code & SMPH_SPF) == 0)) {
6004 page_len = page_header->page_length;
6005 break;
6006 }
6007
6008 /*
6009 * If both pages have subpages, then the subpage numbers
6010 * have to match.
6011 */
6012 if ((page_index->page_code & SMPH_SPF)
6013 && (page_header->page_code & SMPH_SPF)) {
6014 struct scsi_mode_page_header_sp *sph;
6015
6016 sph = (struct scsi_mode_page_header_sp *)page_header;
6017 if (page_index->subpage == sph->subpage) {
6018 page_len = scsi_2btoul(sph->page_length);
6019 break;
6020 }
6021 }
6022 }
6023
6024 /*
6025 * If we couldn't find the page, or if we don't have a mode select
6026 * handler for it, send back an error to the user.
6027 */
6028 if ((i >= CTL_NUM_MODE_PAGES)
6029 || (page_index->select_handler == NULL)) {
6030 ctl_set_invalid_field(ctsio,
6031 /*sks_valid*/ 1,
6032 /*command*/ 0,
6033 /*field*/ *len_used,
6034 /*bit_valid*/ 0,
6035 /*bit*/ 0);
6036 free(ctsio->kern_data_ptr, M_CTL);
6037 ctl_done((union ctl_io *)ctsio);
6038 return (CTL_RETVAL_COMPLETE);
6039 }
6040
6041 if (page_index->page_code & SMPH_SPF) {
6042 page_len_offset = 2;
6043 page_len_size = 2;
6044 } else {
6045 page_len_size = 1;
6046 page_len_offset = 1;
6047 }
6048
6049 /*
6050 * If the length the initiator gives us isn't the one we specify in
6051 * the mode page header, or if they didn't specify enough data in
6052 * the CDB to avoid truncating this page, kick out the request.
6053 */
6054 if (page_len != page_index->page_len - page_len_offset - page_len_size) {
6055 ctl_set_invalid_field(ctsio,
6056 /*sks_valid*/ 1,
6057 /*command*/ 0,
6058 /*field*/ *len_used + page_len_offset,
6059 /*bit_valid*/ 0,
6060 /*bit*/ 0);
6061 free(ctsio->kern_data_ptr, M_CTL);
6062 ctl_done((union ctl_io *)ctsio);
6063 return (CTL_RETVAL_COMPLETE);
6064 }
6065 if (*len_left < page_index->page_len) {
6066 free(ctsio->kern_data_ptr, M_CTL);
6067 ctl_set_param_len_error(ctsio);
6068 ctl_done((union ctl_io *)ctsio);
6069 return (CTL_RETVAL_COMPLETE);
6070 }
6071
6072 /*
6073 * Run through the mode page, checking to make sure that the bits
6074 * the user changed are actually legal for him to change.
6075 */
6076 for (i = 0; i < page_index->page_len; i++) {
6077 uint8_t *user_byte, *change_mask, *current_byte;
6078 int bad_bit;
6079 int j;
6080
6081 user_byte = (uint8_t *)page_header + i;
6082 change_mask = page_index->page_data +
6083 (page_index->page_len * CTL_PAGE_CHANGEABLE) + i;
6084 current_byte = page_index->page_data +
6085 (page_index->page_len * CTL_PAGE_CURRENT) + i;
6086
6087 /*
6088 * Check to see whether the user set any bits in this byte
6089 * that he is not allowed to set.
6090 */
6091 if ((*user_byte & ~(*change_mask)) ==
6092 (*current_byte & ~(*change_mask)))
6093 continue;
6094
6095 /*
6096 * Go through bit by bit to determine which one is illegal.
6097 */
6098 bad_bit = 0;
6099 for (j = 7; j >= 0; j--) {
6100 if ((((1 << i) & ~(*change_mask)) & *user_byte) !=
6101 (((1 << i) & ~(*change_mask)) & *current_byte)) {
6102 bad_bit = i;
6103 break;
6104 }
6105 }
6106 ctl_set_invalid_field(ctsio,
6107 /*sks_valid*/ 1,
6108 /*command*/ 0,
6109 /*field*/ *len_used + i,
6110 /*bit_valid*/ 1,
6111 /*bit*/ bad_bit);
6112 free(ctsio->kern_data_ptr, M_CTL);
6113 ctl_done((union ctl_io *)ctsio);
6114 return (CTL_RETVAL_COMPLETE);
6115 }
6116
6117 /*
6118 * Decrement these before we call the page handler, since we may
6119 * end up getting called back one way or another before the handler
6120 * returns to this context.
6121 */
6122 *len_left -= page_index->page_len;
6123 *len_used += page_index->page_len;
6124
6125 retval = page_index->select_handler(ctsio, page_index,
6126 (uint8_t *)page_header);
6127
6128 /*
6129 * If the page handler returns CTL_RETVAL_QUEUED, then we need to
6130 * wait until this queued command completes to finish processing
6131 * the mode page. If it returns anything other than
6132 * CTL_RETVAL_COMPLETE (e.g. CTL_RETVAL_ERROR), then it should have
6133 * already set the sense information, freed the data pointer, and
6134 * completed the io for us.
6135 */
6136 if (retval != CTL_RETVAL_COMPLETE)
6137 goto bailout_no_done;
6138
6139 /*
6140 * If the initiator sent us more than one page, parse the next one.
6141 */
6142 if (*len_left > 0)
6143 goto do_next_page;
6144
6145 ctl_set_success(ctsio);
6146 free(ctsio->kern_data_ptr, M_CTL);
6147 ctl_done((union ctl_io *)ctsio);
6148
6149bailout_no_done:
6150
6151 return (CTL_RETVAL_COMPLETE);
6152
6153}
6154
6155int
6156ctl_mode_select(struct ctl_scsiio *ctsio)
6157{
6158 struct ctl_lun *lun;
6159 union ctl_modepage_info *modepage_info;
6160 int bd_len, i, header_size, param_len, pf, rtd, sp;
6161 uint32_t initidx;
6162
6163 lun = ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
6164 initidx = ctl_get_initindex(&ctsio->io_hdr.nexus);
6165 switch (ctsio->cdb[0]) {
6166 case MODE_SELECT_6: {
6167 struct scsi_mode_select_6 *cdb;
6168
6169 cdb = (struct scsi_mode_select_6 *)ctsio->cdb;
6170
6171 pf = (cdb->byte2 & SMS_PF) ? 1 : 0;
6172 rtd = (cdb->byte2 & SMS_RTD) ? 1 : 0;
6173 sp = (cdb->byte2 & SMS_SP) ? 1 : 0;
6174 param_len = cdb->length;
6175 header_size = sizeof(struct scsi_mode_header_6);
6176 break;
6177 }
6178 case MODE_SELECT_10: {
6179 struct scsi_mode_select_10 *cdb;
6180
6181 cdb = (struct scsi_mode_select_10 *)ctsio->cdb;
6182
6183 pf = (cdb->byte2 & SMS_PF) ? 1 : 0;
6184 rtd = (cdb->byte2 & SMS_RTD) ? 1 : 0;
6185 sp = (cdb->byte2 & SMS_SP) ? 1 : 0;
6186 param_len = scsi_2btoul(cdb->length);
6187 header_size = sizeof(struct scsi_mode_header_10);
6188 break;
6189 }
6190 default:
6191 ctl_set_invalid_opcode(ctsio);
6192 ctl_done((union ctl_io *)ctsio);
6193 return (CTL_RETVAL_COMPLETE);
6194 }
6195
6196 if (rtd) {
6197 if (param_len != 0) {
6198 ctl_set_invalid_field(ctsio, /*sks_valid*/ 0,
6199 /*command*/ 1, /*field*/ 0,
6200 /*bit_valid*/ 0, /*bit*/ 0);
6201 ctl_done((union ctl_io *)ctsio);
6202 return (CTL_RETVAL_COMPLETE);
6203 }
6204
6205 /* Revert to defaults. */
6206 ctl_init_page_index(lun);
6207 mtx_lock(&lun->lun_lock);
6208 ctl_est_ua_all(lun, initidx, CTL_UA_MODE_CHANGE);
6209 mtx_unlock(&lun->lun_lock);
6210 for (i = 0; i < CTL_NUM_MODE_PAGES; i++) {
6211 ctl_isc_announce_mode(lun, -1,
6212 lun->mode_pages.index[i].page_code & SMPH_PC_MASK,
6213 lun->mode_pages.index[i].subpage);
6214 }
6215 ctl_set_success(ctsio);
6216 ctl_done((union ctl_io *)ctsio);
6217 return (CTL_RETVAL_COMPLETE);
6218 }
6219
6220 /*
6221 * From SPC-3:
6222 * "A parameter list length of zero indicates that the Data-Out Buffer
6223 * shall be empty. This condition shall not be considered as an error."
6224 */
6225 if (param_len == 0) {
6226 ctl_set_success(ctsio);
6227 ctl_done((union ctl_io *)ctsio);
6228 return (CTL_RETVAL_COMPLETE);
6229 }
6230
6231 /*
6232 * Since we'll hit this the first time through, prior to
6233 * allocation, we don't need to free a data buffer here.
6234 */
6235 if (param_len < header_size) {
6236 ctl_set_param_len_error(ctsio);
6237 ctl_done((union ctl_io *)ctsio);
6238 return (CTL_RETVAL_COMPLETE);
6239 }
6240
6241 /*
6242 * Allocate the data buffer and grab the user's data. In theory,
6243 * we shouldn't have to sanity check the parameter list length here
6244 * because the maximum size is 64K. We should be able to malloc
6245 * that much without too many problems.
6246 */
6247 if ((ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) == 0) {
6248 ctsio->kern_data_ptr = malloc(param_len, M_CTL, M_WAITOK);
6249 ctsio->kern_data_len = param_len;
6250 ctsio->kern_total_len = param_len;
6251 ctsio->kern_data_resid = 0;
6252 ctsio->kern_rel_offset = 0;
6253 ctsio->kern_sg_entries = 0;
6254 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
6255 ctsio->be_move_done = ctl_config_move_done;
6256 ctl_datamove((union ctl_io *)ctsio);
6257
6258 return (CTL_RETVAL_COMPLETE);
6259 }
6260
6261 switch (ctsio->cdb[0]) {
6262 case MODE_SELECT_6: {
6263 struct scsi_mode_header_6 *mh6;
6264
6265 mh6 = (struct scsi_mode_header_6 *)ctsio->kern_data_ptr;
6266 bd_len = mh6->blk_desc_len;
6267 break;
6268 }
6269 case MODE_SELECT_10: {
6270 struct scsi_mode_header_10 *mh10;
6271
6272 mh10 = (struct scsi_mode_header_10 *)ctsio->kern_data_ptr;
6273 bd_len = scsi_2btoul(mh10->blk_desc_len);
6274 break;
6275 }
6276 default:
6277 panic("%s: Invalid CDB type %#x", __func__, ctsio->cdb[0]);
6278 }
6279
6280 if (param_len < (header_size + bd_len)) {
6281 free(ctsio->kern_data_ptr, M_CTL);
6282 ctl_set_param_len_error(ctsio);
6283 ctl_done((union ctl_io *)ctsio);
6284 return (CTL_RETVAL_COMPLETE);
6285 }
6286
6287 /*
6288 * Set the IO_CONT flag, so that if this I/O gets passed to
6289 * ctl_config_write_done(), it'll get passed back to
6290 * ctl_do_mode_select() for further processing, or completion if
6291 * we're all done.
6292 */
6293 ctsio->io_hdr.flags |= CTL_FLAG_IO_CONT;
6294 ctsio->io_cont = ctl_do_mode_select;
6295
6296 modepage_info = (union ctl_modepage_info *)
6297 ctsio->io_hdr.ctl_private[CTL_PRIV_MODEPAGE].bytes;
6298 memset(modepage_info, 0, sizeof(*modepage_info));
6299 modepage_info->header.len_left = param_len - header_size - bd_len;
6300 modepage_info->header.len_used = header_size + bd_len;
6301
6302 return (ctl_do_mode_select((union ctl_io *)ctsio));
6303}
6304
6305int
6306ctl_mode_sense(struct ctl_scsiio *ctsio)
6307{
6308 struct ctl_lun *lun;
6309 int pc, page_code, dbd, llba, subpage;
6310 int alloc_len, page_len, header_len, total_len;
6311 struct scsi_mode_block_descr *block_desc;
6312 struct ctl_page_index *page_index;
6313
6314 dbd = 0;
6315 llba = 0;
6316 block_desc = NULL;
6317
6318 CTL_DEBUG_PRINT(("ctl_mode_sense\n"));
6319
6320 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
6321 switch (ctsio->cdb[0]) {
6322 case MODE_SENSE_6: {
6323 struct scsi_mode_sense_6 *cdb;
6324
6325 cdb = (struct scsi_mode_sense_6 *)ctsio->cdb;
6326
6327 header_len = sizeof(struct scsi_mode_hdr_6);
6328 if (cdb->byte2 & SMS_DBD)
6329 dbd = 1;
6330 else
6331 header_len += sizeof(struct scsi_mode_block_descr);
6332
6333 pc = (cdb->page & SMS_PAGE_CTRL_MASK) >> 6;
6334 page_code = cdb->page & SMS_PAGE_CODE;
6335 subpage = cdb->subpage;
6336 alloc_len = cdb->length;
6337 break;
6338 }
6339 case MODE_SENSE_10: {
6340 struct scsi_mode_sense_10 *cdb;
6341
6342 cdb = (struct scsi_mode_sense_10 *)ctsio->cdb;
6343
6344 header_len = sizeof(struct scsi_mode_hdr_10);
6345
6346 if (cdb->byte2 & SMS_DBD)
6347 dbd = 1;
6348 else
6349 header_len += sizeof(struct scsi_mode_block_descr);
6350 if (cdb->byte2 & SMS10_LLBAA)
6351 llba = 1;
6352 pc = (cdb->page & SMS_PAGE_CTRL_MASK) >> 6;
6353 page_code = cdb->page & SMS_PAGE_CODE;
6354 subpage = cdb->subpage;
6355 alloc_len = scsi_2btoul(cdb->length);
6356 break;
6357 }
6358 default:
6359 ctl_set_invalid_opcode(ctsio);
6360 ctl_done((union ctl_io *)ctsio);
6361 return (CTL_RETVAL_COMPLETE);
6362 break; /* NOTREACHED */
6363 }
6364
6365 /*
6366 * We have to make a first pass through to calculate the size of
6367 * the pages that match the user's query. Then we allocate enough
6368 * memory to hold it, and actually copy the data into the buffer.
6369 */
6370 switch (page_code) {
6371 case SMS_ALL_PAGES_PAGE: {
6372 u_int i;
6373
6374 page_len = 0;
6375
6376 /*
6377 * At the moment, values other than 0 and 0xff here are
6378 * reserved according to SPC-3.
6379 */
6380 if ((subpage != SMS_SUBPAGE_PAGE_0)
6381 && (subpage != SMS_SUBPAGE_ALL)) {
6382 ctl_set_invalid_field(ctsio,
6383 /*sks_valid*/ 1,
6384 /*command*/ 1,
6385 /*field*/ 3,
6386 /*bit_valid*/ 0,
6387 /*bit*/ 0);
6388 ctl_done((union ctl_io *)ctsio);
6389 return (CTL_RETVAL_COMPLETE);
6390 }
6391
6392 for (i = 0; i < CTL_NUM_MODE_PAGES; i++) {
6393 page_index = &lun->mode_pages.index[i];
6394
6395 /* Make sure the page is supported for this dev type */
6396 if (lun->be_lun->lun_type == T_DIRECT &&
6397 (page_index->page_flags & CTL_PAGE_FLAG_DIRECT) == 0)
6398 continue;
6399 if (lun->be_lun->lun_type == T_PROCESSOR &&
6400 (page_index->page_flags & CTL_PAGE_FLAG_PROC) == 0)
6401 continue;
6402 if (lun->be_lun->lun_type == T_CDROM &&
6403 (page_index->page_flags & CTL_PAGE_FLAG_CDROM) == 0)
6404 continue;
6405
6406 /*
6407 * We don't use this subpage if the user didn't
6408 * request all subpages.
6409 */
6410 if ((page_index->subpage != 0)
6411 && (subpage == SMS_SUBPAGE_PAGE_0))
6412 continue;
6413
6414#if 0
6415 printf("found page %#x len %d\n",
6416 page_index->page_code & SMPH_PC_MASK,
6417 page_index->page_len);
6418#endif
6419 page_len += page_index->page_len;
6420 }
6421 break;
6422 }
6423 default: {
6424 u_int i;
6425
6426 page_len = 0;
6427
6428 for (i = 0; i < CTL_NUM_MODE_PAGES; i++) {
6429 page_index = &lun->mode_pages.index[i];
6430
6431 /* Make sure the page is supported for this dev type */
6432 if (lun->be_lun->lun_type == T_DIRECT &&
6433 (page_index->page_flags & CTL_PAGE_FLAG_DIRECT) == 0)
6434 continue;
6435 if (lun->be_lun->lun_type == T_PROCESSOR &&
6436 (page_index->page_flags & CTL_PAGE_FLAG_PROC) == 0)
6437 continue;
6438 if (lun->be_lun->lun_type == T_CDROM &&
6439 (page_index->page_flags & CTL_PAGE_FLAG_CDROM) == 0)
6440 continue;
6441
6442 /* Look for the right page code */
6443 if ((page_index->page_code & SMPH_PC_MASK) != page_code)
6444 continue;
6445
6446 /* Look for the right subpage or the subpage wildcard*/
6447 if ((page_index->subpage != subpage)
6448 && (subpage != SMS_SUBPAGE_ALL))
6449 continue;
6450
6451#if 0
6452 printf("found page %#x len %d\n",
6453 page_index->page_code & SMPH_PC_MASK,
6454 page_index->page_len);
6455#endif
6456
6457 page_len += page_index->page_len;
6458 }
6459
6460 if (page_len == 0) {
6461 ctl_set_invalid_field(ctsio,
6462 /*sks_valid*/ 1,
6463 /*command*/ 1,
6464 /*field*/ 2,
6465 /*bit_valid*/ 1,
6466 /*bit*/ 5);
6467 ctl_done((union ctl_io *)ctsio);
6468 return (CTL_RETVAL_COMPLETE);
6469 }
6470 break;
6471 }
6472 }
6473
6474 total_len = header_len + page_len;
6475#if 0
6476 printf("header_len = %d, page_len = %d, total_len = %d\n",
6477 header_len, page_len, total_len);
6478#endif
6479
6480 ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO);
6481 ctsio->kern_sg_entries = 0;
6482 ctsio->kern_data_resid = 0;
6483 ctsio->kern_rel_offset = 0;
6484 if (total_len < alloc_len) {
6485 ctsio->residual = alloc_len - total_len;
6486 ctsio->kern_data_len = total_len;
6487 ctsio->kern_total_len = total_len;
6488 } else {
6489 ctsio->residual = 0;
6490 ctsio->kern_data_len = alloc_len;
6491 ctsio->kern_total_len = alloc_len;
6492 }
6493
6494 switch (ctsio->cdb[0]) {
6495 case MODE_SENSE_6: {
6496 struct scsi_mode_hdr_6 *header;
6497
6498 header = (struct scsi_mode_hdr_6 *)ctsio->kern_data_ptr;
6499
6500 header->datalen = MIN(total_len - 1, 254);
6501 if (lun->be_lun->lun_type == T_DIRECT) {
6502 header->dev_specific = 0x10; /* DPOFUA */
6503 if ((lun->be_lun->flags & CTL_LUN_FLAG_READONLY) ||
6504 (lun->MODE_CTRL.eca_and_aen & SCP_SWP) != 0)
6505 header->dev_specific |= 0x80; /* WP */
6506 }
6507 if (dbd)
6508 header->block_descr_len = 0;
6509 else
6510 header->block_descr_len =
6511 sizeof(struct scsi_mode_block_descr);
6512 block_desc = (struct scsi_mode_block_descr *)&header[1];
6513 break;
6514 }
6515 case MODE_SENSE_10: {
6516 struct scsi_mode_hdr_10 *header;
6517 int datalen;
6518
6519 header = (struct scsi_mode_hdr_10 *)ctsio->kern_data_ptr;
6520
6521 datalen = MIN(total_len - 2, 65533);
6522 scsi_ulto2b(datalen, header->datalen);
6523 if (lun->be_lun->lun_type == T_DIRECT) {
6524 header->dev_specific = 0x10; /* DPOFUA */
6525 if ((lun->be_lun->flags & CTL_LUN_FLAG_READONLY) ||
6526 (lun->MODE_CTRL.eca_and_aen & SCP_SWP) != 0)
6527 header->dev_specific |= 0x80; /* WP */
6528 }
6529 if (dbd)
6530 scsi_ulto2b(0, header->block_descr_len);
6531 else
6532 scsi_ulto2b(sizeof(struct scsi_mode_block_descr),
6533 header->block_descr_len);
6534 block_desc = (struct scsi_mode_block_descr *)&header[1];
6535 break;
6536 }
6537 default:
6538 panic("%s: Invalid CDB type %#x", __func__, ctsio->cdb[0]);
6539 }
6540
6541 /*
6542 * If we've got a disk, use its blocksize in the block
6543 * descriptor. Otherwise, just set it to 0.
6544 */
6545 if (dbd == 0) {
6546 if (lun->be_lun->lun_type == T_DIRECT)
6547 scsi_ulto3b(lun->be_lun->blocksize,
6548 block_desc->block_len);
6549 else
6550 scsi_ulto3b(0, block_desc->block_len);
6551 }
6552
6553 switch (page_code) {
6554 case SMS_ALL_PAGES_PAGE: {
6555 int i, data_used;
6556
6557 data_used = header_len;
6558 for (i = 0; i < CTL_NUM_MODE_PAGES; i++) {
6559 struct ctl_page_index *page_index;
6560
6561 page_index = &lun->mode_pages.index[i];
6562 if (lun->be_lun->lun_type == T_DIRECT &&
6563 (page_index->page_flags & CTL_PAGE_FLAG_DIRECT) == 0)
6564 continue;
6565 if (lun->be_lun->lun_type == T_PROCESSOR &&
6566 (page_index->page_flags & CTL_PAGE_FLAG_PROC) == 0)
6567 continue;
6568 if (lun->be_lun->lun_type == T_CDROM &&
6569 (page_index->page_flags & CTL_PAGE_FLAG_CDROM) == 0)
6570 continue;
6571
6572 /*
6573 * We don't use this subpage if the user didn't
6574 * request all subpages. We already checked (above)
6575 * to make sure the user only specified a subpage
6576 * of 0 or 0xff in the SMS_ALL_PAGES_PAGE case.
6577 */
6578 if ((page_index->subpage != 0)
6579 && (subpage == SMS_SUBPAGE_PAGE_0))
6580 continue;
6581
6582 /*
6583 * Call the handler, if it exists, to update the
6584 * page to the latest values.
6585 */
6586 if (page_index->sense_handler != NULL)
6587 page_index->sense_handler(ctsio, page_index,pc);
6588
6589 memcpy(ctsio->kern_data_ptr + data_used,
6590 page_index->page_data +
6591 (page_index->page_len * pc),
6592 page_index->page_len);
6593 data_used += page_index->page_len;
6594 }
6595 break;
6596 }
6597 default: {
6598 int i, data_used;
6599
6600 data_used = header_len;
6601
6602 for (i = 0; i < CTL_NUM_MODE_PAGES; i++) {
6603 struct ctl_page_index *page_index;
6604
6605 page_index = &lun->mode_pages.index[i];
6606
6607 /* Look for the right page code */
6608 if ((page_index->page_code & SMPH_PC_MASK) != page_code)
6609 continue;
6610
6611 /* Look for the right subpage or the subpage wildcard*/
6612 if ((page_index->subpage != subpage)
6613 && (subpage != SMS_SUBPAGE_ALL))
6614 continue;
6615
6616 /* Make sure the page is supported for this dev type */
6617 if (lun->be_lun->lun_type == T_DIRECT &&
6618 (page_index->page_flags & CTL_PAGE_FLAG_DIRECT) == 0)
6619 continue;
6620 if (lun->be_lun->lun_type == T_PROCESSOR &&
6621 (page_index->page_flags & CTL_PAGE_FLAG_PROC) == 0)
6622 continue;
6623 if (lun->be_lun->lun_type == T_CDROM &&
6624 (page_index->page_flags & CTL_PAGE_FLAG_CDROM) == 0)
6625 continue;
6626
6627 /*
6628 * Call the handler, if it exists, to update the
6629 * page to the latest values.
6630 */
6631 if (page_index->sense_handler != NULL)
6632 page_index->sense_handler(ctsio, page_index,pc);
6633
6634 memcpy(ctsio->kern_data_ptr + data_used,
6635 page_index->page_data +
6636 (page_index->page_len * pc),
6637 page_index->page_len);
6638 data_used += page_index->page_len;
6639 }
6640 break;
6641 }
6642 }
6643
6644 ctl_set_success(ctsio);
6645 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
6646 ctsio->be_move_done = ctl_config_move_done;
6647 ctl_datamove((union ctl_io *)ctsio);
6648 return (CTL_RETVAL_COMPLETE);
6649}
6650
6651int
6652ctl_lbp_log_sense_handler(struct ctl_scsiio *ctsio,
6653 struct ctl_page_index *page_index,
6654 int pc)
6655{
6656 struct ctl_lun *lun;
6657 struct scsi_log_param_header *phdr;
6658 uint8_t *data;
6659 uint64_t val;
6660
6661 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
6662 data = page_index->page_data;
6663
6664 if (lun->backend->lun_attr != NULL &&
6665 (val = lun->backend->lun_attr(lun->be_lun->be_lun, "blocksavail"))
6666 != UINT64_MAX) {
6667 phdr = (struct scsi_log_param_header *)data;
6668 scsi_ulto2b(0x0001, phdr->param_code);
6669 phdr->param_control = SLP_LBIN | SLP_LP;
6670 phdr->param_len = 8;
6671 data = (uint8_t *)(phdr + 1);
6672 scsi_ulto4b(val >> CTL_LBP_EXPONENT, data);
6673 data[4] = 0x02; /* per-pool */
6674 data += phdr->param_len;
6675 }
6676
6677 if (lun->backend->lun_attr != NULL &&
6678 (val = lun->backend->lun_attr(lun->be_lun->be_lun, "blocksused"))
6679 != UINT64_MAX) {
6680 phdr = (struct scsi_log_param_header *)data;
6681 scsi_ulto2b(0x0002, phdr->param_code);
6682 phdr->param_control = SLP_LBIN | SLP_LP;
6683 phdr->param_len = 8;
6684 data = (uint8_t *)(phdr + 1);
6685 scsi_ulto4b(val >> CTL_LBP_EXPONENT, data);
6686 data[4] = 0x01; /* per-LUN */
6687 data += phdr->param_len;
6688 }
6689
6690 if (lun->backend->lun_attr != NULL &&
6691 (val = lun->backend->lun_attr(lun->be_lun->be_lun, "poolblocksavail"))
6692 != UINT64_MAX) {
6693 phdr = (struct scsi_log_param_header *)data;
6694 scsi_ulto2b(0x00f1, phdr->param_code);
6695 phdr->param_control = SLP_LBIN | SLP_LP;
6696 phdr->param_len = 8;
6697 data = (uint8_t *)(phdr + 1);
6698 scsi_ulto4b(val >> CTL_LBP_EXPONENT, data);
6699 data[4] = 0x02; /* per-pool */
6700 data += phdr->param_len;
6701 }
6702
6703 if (lun->backend->lun_attr != NULL &&
6704 (val = lun->backend->lun_attr(lun->be_lun->be_lun, "poolblocksused"))
6705 != UINT64_MAX) {
6706 phdr = (struct scsi_log_param_header *)data;
6707 scsi_ulto2b(0x00f2, phdr->param_code);
6708 phdr->param_control = SLP_LBIN | SLP_LP;
6709 phdr->param_len = 8;
6710 data = (uint8_t *)(phdr + 1);
6711 scsi_ulto4b(val >> CTL_LBP_EXPONENT, data);
6712 data[4] = 0x02; /* per-pool */
6713 data += phdr->param_len;
6714 }
6715
6716 page_index->page_len = data - page_index->page_data;
6717 return (0);
6718}
6719
6720int
6721ctl_sap_log_sense_handler(struct ctl_scsiio *ctsio,
6722 struct ctl_page_index *page_index,
6723 int pc)
6724{
6725 struct ctl_lun *lun;
6726 struct stat_page *data;
6727 uint64_t rn, wn, rb, wb;
6728 struct bintime rt, wt;
6729 int i;
6730
6731 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
6732 data = (struct stat_page *)page_index->page_data;
6733
6734 scsi_ulto2b(SLP_SAP, data->sap.hdr.param_code);
6735 data->sap.hdr.param_control = SLP_LBIN;
6736 data->sap.hdr.param_len = sizeof(struct scsi_log_stat_and_perf) -
6737 sizeof(struct scsi_log_param_header);
6738 rn = wn = rb = wb = 0;
6739 bintime_clear(&rt);
6740 bintime_clear(&wt);
6741 for (i = 0; i < CTL_MAX_PORTS; i++) {
6742 rn += lun->stats.ports[i].operations[CTL_STATS_READ];
6743 wn += lun->stats.ports[i].operations[CTL_STATS_WRITE];
6744 rb += lun->stats.ports[i].bytes[CTL_STATS_READ];
6745 wb += lun->stats.ports[i].bytes[CTL_STATS_WRITE];
6746 bintime_add(&rt, &lun->stats.ports[i].time[CTL_STATS_READ]);
6747 bintime_add(&wt, &lun->stats.ports[i].time[CTL_STATS_WRITE]);
6748 }
6749 scsi_u64to8b(rn, data->sap.read_num);
6750 scsi_u64to8b(wn, data->sap.write_num);
6751 if (lun->stats.blocksize > 0) {
6752 scsi_u64to8b(wb / lun->stats.blocksize,
6753 data->sap.recvieved_lba);
6754 scsi_u64to8b(rb / lun->stats.blocksize,
6755 data->sap.transmitted_lba);
6756 }
6757 scsi_u64to8b((uint64_t)rt.sec * 1000 + rt.frac / (UINT64_MAX / 1000),
6758 data->sap.read_int);
6759 scsi_u64to8b((uint64_t)wt.sec * 1000 + wt.frac / (UINT64_MAX / 1000),
6760 data->sap.write_int);
6761 scsi_u64to8b(0, data->sap.weighted_num);
6762 scsi_u64to8b(0, data->sap.weighted_int);
6763 scsi_ulto2b(SLP_IT, data->it.hdr.param_code);
6764 data->it.hdr.param_control = SLP_LBIN;
6765 data->it.hdr.param_len = sizeof(struct scsi_log_idle_time) -
6766 sizeof(struct scsi_log_param_header);
6767#ifdef CTL_TIME_IO
6768 scsi_u64to8b(lun->idle_time / SBT_1MS, data->it.idle_int);
6769#endif
6770 scsi_ulto2b(SLP_TI, data->ti.hdr.param_code);
6771 data->it.hdr.param_control = SLP_LBIN;
6772 data->ti.hdr.param_len = sizeof(struct scsi_log_time_interval) -
6773 sizeof(struct scsi_log_param_header);
6774 scsi_ulto4b(3, data->ti.exponent);
6775 scsi_ulto4b(1, data->ti.integer);
6776 return (0);
6777}
6778
6779int
6780ctl_ie_log_sense_handler(struct ctl_scsiio *ctsio,
6781 struct ctl_page_index *page_index,
6782 int pc)
6783{
6784 struct ctl_lun *lun;
6785 struct scsi_log_informational_exceptions *data;
6786
6787 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
6788 data = (struct scsi_log_informational_exceptions *)page_index->page_data;
6789
6790 scsi_ulto2b(SLP_IE_GEN, data->hdr.param_code);
6791 data->hdr.param_control = SLP_LBIN;
6792 data->hdr.param_len = sizeof(struct scsi_log_informational_exceptions) -
6793 sizeof(struct scsi_log_param_header);
6794 data->ie_asc = lun->ie_asc;
6795 data->ie_ascq = lun->ie_ascq;
6796 data->temperature = 0xff;
6797 return (0);
6798}
6799
6800int
6801ctl_log_sense(struct ctl_scsiio *ctsio)
6802{
6803 struct ctl_lun *lun;
6804 int i, pc, page_code, subpage;
6805 int alloc_len, total_len;
6806 struct ctl_page_index *page_index;
6807 struct scsi_log_sense *cdb;
6808 struct scsi_log_header *header;
6809
6810 CTL_DEBUG_PRINT(("ctl_log_sense\n"));
6811
6812 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
6813 cdb = (struct scsi_log_sense *)ctsio->cdb;
6814 pc = (cdb->page & SLS_PAGE_CTRL_MASK) >> 6;
6815 page_code = cdb->page & SLS_PAGE_CODE;
6816 subpage = cdb->subpage;
6817 alloc_len = scsi_2btoul(cdb->length);
6818
6819 page_index = NULL;
6820 for (i = 0; i < CTL_NUM_LOG_PAGES; i++) {
6821 page_index = &lun->log_pages.index[i];
6822
6823 /* Look for the right page code */
6824 if ((page_index->page_code & SL_PAGE_CODE) != page_code)
6825 continue;
6826
6827 /* Look for the right subpage or the subpage wildcard*/
6828 if (page_index->subpage != subpage)
6829 continue;
6830
6831 break;
6832 }
6833 if (i >= CTL_NUM_LOG_PAGES) {
6834 ctl_set_invalid_field(ctsio,
6835 /*sks_valid*/ 1,
6836 /*command*/ 1,
6837 /*field*/ 2,
6838 /*bit_valid*/ 0,
6839 /*bit*/ 0);
6840 ctl_done((union ctl_io *)ctsio);
6841 return (CTL_RETVAL_COMPLETE);
6842 }
6843
6844 total_len = sizeof(struct scsi_log_header) + page_index->page_len;
6845
6846 ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO);
6847 ctsio->kern_sg_entries = 0;
6848 ctsio->kern_data_resid = 0;
6849 ctsio->kern_rel_offset = 0;
6850 if (total_len < alloc_len) {
6851 ctsio->residual = alloc_len - total_len;
6852 ctsio->kern_data_len = total_len;
6853 ctsio->kern_total_len = total_len;
6854 } else {
6855 ctsio->residual = 0;
6856 ctsio->kern_data_len = alloc_len;
6857 ctsio->kern_total_len = alloc_len;
6858 }
6859
6860 header = (struct scsi_log_header *)ctsio->kern_data_ptr;
6861 header->page = page_index->page_code;
6862 if (page_index->page_code == SLS_LOGICAL_BLOCK_PROVISIONING)
6863 header->page |= SL_DS;
6864 if (page_index->subpage) {
6865 header->page |= SL_SPF;
6866 header->subpage = page_index->subpage;
6867 }
6868 scsi_ulto2b(page_index->page_len, header->datalen);
6869
6870 /*
6871 * Call the handler, if it exists, to update the
6872 * page to the latest values.
6873 */
6874 if (page_index->sense_handler != NULL)
6875 page_index->sense_handler(ctsio, page_index, pc);
6876
6877 memcpy(header + 1, page_index->page_data, page_index->page_len);
6878
6879 ctl_set_success(ctsio);
6880 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
6881 ctsio->be_move_done = ctl_config_move_done;
6882 ctl_datamove((union ctl_io *)ctsio);
6883 return (CTL_RETVAL_COMPLETE);
6884}
6885
6886int
6887ctl_read_capacity(struct ctl_scsiio *ctsio)
6888{
6889 struct scsi_read_capacity *cdb;
6890 struct scsi_read_capacity_data *data;
6891 struct ctl_lun *lun;
6892 uint32_t lba;
6893
6894 CTL_DEBUG_PRINT(("ctl_read_capacity\n"));
6895
6896 cdb = (struct scsi_read_capacity *)ctsio->cdb;
6897
6898 lba = scsi_4btoul(cdb->addr);
6899 if (((cdb->pmi & SRC_PMI) == 0)
6900 && (lba != 0)) {
6901 ctl_set_invalid_field(/*ctsio*/ ctsio,
6902 /*sks_valid*/ 1,
6903 /*command*/ 1,
6904 /*field*/ 2,
6905 /*bit_valid*/ 0,
6906 /*bit*/ 0);
6907 ctl_done((union ctl_io *)ctsio);
6908 return (CTL_RETVAL_COMPLETE);
6909 }
6910
6911 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
6912
6913 ctsio->kern_data_ptr = malloc(sizeof(*data), M_CTL, M_WAITOK | M_ZERO);
6914 data = (struct scsi_read_capacity_data *)ctsio->kern_data_ptr;
6915 ctsio->residual = 0;
6916 ctsio->kern_data_len = sizeof(*data);
6917 ctsio->kern_total_len = sizeof(*data);
6918 ctsio->kern_data_resid = 0;
6919 ctsio->kern_rel_offset = 0;
6920 ctsio->kern_sg_entries = 0;
6921
6922 /*
6923 * If the maximum LBA is greater than 0xfffffffe, the user must
6924 * issue a SERVICE ACTION IN (16) command, with the read capacity
6925 * serivce action set.
6926 */
6927 if (lun->be_lun->maxlba > 0xfffffffe)
6928 scsi_ulto4b(0xffffffff, data->addr);
6929 else
6930 scsi_ulto4b(lun->be_lun->maxlba, data->addr);
6931
6932 /*
6933 * XXX KDM this may not be 512 bytes...
6934 */
6935 scsi_ulto4b(lun->be_lun->blocksize, data->length);
6936
6937 ctl_set_success(ctsio);
6938 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
6939 ctsio->be_move_done = ctl_config_move_done;
6940 ctl_datamove((union ctl_io *)ctsio);
6941 return (CTL_RETVAL_COMPLETE);
6942}
6943
6944int
6945ctl_read_capacity_16(struct ctl_scsiio *ctsio)
6946{
6947 struct scsi_read_capacity_16 *cdb;
6948 struct scsi_read_capacity_data_long *data;
6949 struct ctl_lun *lun;
6950 uint64_t lba;
6951 uint32_t alloc_len;
6952
6953 CTL_DEBUG_PRINT(("ctl_read_capacity_16\n"));
6954
6955 cdb = (struct scsi_read_capacity_16 *)ctsio->cdb;
6956
6957 alloc_len = scsi_4btoul(cdb->alloc_len);
6958 lba = scsi_8btou64(cdb->addr);
6959
6960 if ((cdb->reladr & SRC16_PMI)
6961 && (lba != 0)) {
6962 ctl_set_invalid_field(/*ctsio*/ ctsio,
6963 /*sks_valid*/ 1,
6964 /*command*/ 1,
6965 /*field*/ 2,
6966 /*bit_valid*/ 0,
6967 /*bit*/ 0);
6968 ctl_done((union ctl_io *)ctsio);
6969 return (CTL_RETVAL_COMPLETE);
6970 }
6971
6972 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
6973
6974 ctsio->kern_data_ptr = malloc(sizeof(*data), M_CTL, M_WAITOK | M_ZERO);
6975 data = (struct scsi_read_capacity_data_long *)ctsio->kern_data_ptr;
6976
6977 if (sizeof(*data) < alloc_len) {
6978 ctsio->residual = alloc_len - sizeof(*data);
6979 ctsio->kern_data_len = sizeof(*data);
6980 ctsio->kern_total_len = sizeof(*data);
6981 } else {
6982 ctsio->residual = 0;
6983 ctsio->kern_data_len = alloc_len;
6984 ctsio->kern_total_len = alloc_len;
6985 }
6986 ctsio->kern_data_resid = 0;
6987 ctsio->kern_rel_offset = 0;
6988 ctsio->kern_sg_entries = 0;
6989
6990 scsi_u64to8b(lun->be_lun->maxlba, data->addr);
6991 /* XXX KDM this may not be 512 bytes... */
6992 scsi_ulto4b(lun->be_lun->blocksize, data->length);
6993 data->prot_lbppbe = lun->be_lun->pblockexp & SRC16_LBPPBE;
6994 scsi_ulto2b(lun->be_lun->pblockoff & SRC16_LALBA_A, data->lalba_lbp);
6995 if (lun->be_lun->flags & CTL_LUN_FLAG_UNMAP)
6996 data->lalba_lbp[0] |= SRC16_LBPME | SRC16_LBPRZ;
6997
6998 ctl_set_success(ctsio);
6999 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
7000 ctsio->be_move_done = ctl_config_move_done;
7001 ctl_datamove((union ctl_io *)ctsio);
7002 return (CTL_RETVAL_COMPLETE);
7003}
7004
7005int
7006ctl_get_lba_status(struct ctl_scsiio *ctsio)
7007{
7008 struct scsi_get_lba_status *cdb;
7009 struct scsi_get_lba_status_data *data;
7010 struct ctl_lun *lun;
7011 struct ctl_lba_len_flags *lbalen;
7012 uint64_t lba;
7013 uint32_t alloc_len, total_len;
7014 int retval;
7015
7016 CTL_DEBUG_PRINT(("ctl_get_lba_status\n"));
7017
7018 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
7019 cdb = (struct scsi_get_lba_status *)ctsio->cdb;
7020 lba = scsi_8btou64(cdb->addr);
7021 alloc_len = scsi_4btoul(cdb->alloc_len);
7022
7023 if (lba > lun->be_lun->maxlba) {
7024 ctl_set_lba_out_of_range(ctsio, lba);
7025 ctl_done((union ctl_io *)ctsio);
7026 return (CTL_RETVAL_COMPLETE);
7027 }
7028
7029 total_len = sizeof(*data) + sizeof(data->descr[0]);
7030 ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO);
7031 data = (struct scsi_get_lba_status_data *)ctsio->kern_data_ptr;
7032
7033 if (total_len < alloc_len) {
7034 ctsio->residual = alloc_len - total_len;
7035 ctsio->kern_data_len = total_len;
7036 ctsio->kern_total_len = total_len;
7037 } else {
7038 ctsio->residual = 0;
7039 ctsio->kern_data_len = alloc_len;
7040 ctsio->kern_total_len = alloc_len;
7041 }
7042 ctsio->kern_data_resid = 0;
7043 ctsio->kern_rel_offset = 0;
7044 ctsio->kern_sg_entries = 0;
7045
7046 /* Fill dummy data in case backend can't tell anything. */
7047 scsi_ulto4b(4 + sizeof(data->descr[0]), data->length);
7048 scsi_u64to8b(lba, data->descr[0].addr);
7049 scsi_ulto4b(MIN(UINT32_MAX, lun->be_lun->maxlba + 1 - lba),
7050 data->descr[0].length);
7051 data->descr[0].status = 0; /* Mapped or unknown. */
7052
7053 ctl_set_success(ctsio);
7054 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
7055 ctsio->be_move_done = ctl_config_move_done;
7056
7057 lbalen = (struct ctl_lba_len_flags *)&ctsio->io_hdr.ctl_private[CTL_PRIV_LBA_LEN];
7058 lbalen->lba = lba;
7059 lbalen->len = total_len;
7060 lbalen->flags = 0;
7061 retval = lun->backend->config_read((union ctl_io *)ctsio);
7062 return (CTL_RETVAL_COMPLETE);
7063}
7064
7065int
7066ctl_read_defect(struct ctl_scsiio *ctsio)
7067{
7068 struct scsi_read_defect_data_10 *ccb10;
7069 struct scsi_read_defect_data_12 *ccb12;
7070 struct scsi_read_defect_data_hdr_10 *data10;
7071 struct scsi_read_defect_data_hdr_12 *data12;
7072 uint32_t alloc_len, data_len;
7073 uint8_t format;
7074
7075 CTL_DEBUG_PRINT(("ctl_read_defect\n"));
7076
7077 if (ctsio->cdb[0] == READ_DEFECT_DATA_10) {
7078 ccb10 = (struct scsi_read_defect_data_10 *)&ctsio->cdb;
7079 format = ccb10->format;
7080 alloc_len = scsi_2btoul(ccb10->alloc_length);
7081 data_len = sizeof(*data10);
7082 } else {
7083 ccb12 = (struct scsi_read_defect_data_12 *)&ctsio->cdb;
7084 format = ccb12->format;
7085 alloc_len = scsi_4btoul(ccb12->alloc_length);
7086 data_len = sizeof(*data12);
7087 }
7088 if (alloc_len == 0) {
7089 ctl_set_success(ctsio);
7090 ctl_done((union ctl_io *)ctsio);
7091 return (CTL_RETVAL_COMPLETE);
7092 }
7093
7094 ctsio->kern_data_ptr = malloc(data_len, M_CTL, M_WAITOK | M_ZERO);
7095 if (data_len < alloc_len) {
7096 ctsio->residual = alloc_len - data_len;
7097 ctsio->kern_data_len = data_len;
7098 ctsio->kern_total_len = data_len;
7099 } else {
7100 ctsio->residual = 0;
7101 ctsio->kern_data_len = alloc_len;
7102 ctsio->kern_total_len = alloc_len;
7103 }
7104 ctsio->kern_data_resid = 0;
7105 ctsio->kern_rel_offset = 0;
7106 ctsio->kern_sg_entries = 0;
7107
7108 if (ctsio->cdb[0] == READ_DEFECT_DATA_10) {
7109 data10 = (struct scsi_read_defect_data_hdr_10 *)
7110 ctsio->kern_data_ptr;
7111 data10->format = format;
7112 scsi_ulto2b(0, data10->length);
7113 } else {
7114 data12 = (struct scsi_read_defect_data_hdr_12 *)
7115 ctsio->kern_data_ptr;
7116 data12->format = format;
7117 scsi_ulto2b(0, data12->generation);
7118 scsi_ulto4b(0, data12->length);
7119 }
7120
7121 ctl_set_success(ctsio);
7122 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
7123 ctsio->be_move_done = ctl_config_move_done;
7124 ctl_datamove((union ctl_io *)ctsio);
7125 return (CTL_RETVAL_COMPLETE);
7126}
7127
7128int
7129ctl_report_tagret_port_groups(struct ctl_scsiio *ctsio)
7130{
7131 struct scsi_maintenance_in *cdb;
7132 int retval;
7133 int alloc_len, ext, total_len = 0, g, pc, pg, ts, os;
7134 int num_ha_groups, num_target_ports, shared_group;
7135 struct ctl_lun *lun;
7136 struct ctl_softc *softc;
7137 struct ctl_port *port;
7138 struct scsi_target_group_data *rtg_ptr;
7139 struct scsi_target_group_data_extended *rtg_ext_ptr;
7140 struct scsi_target_port_group_descriptor *tpg_desc;
7141
7142 CTL_DEBUG_PRINT(("ctl_report_tagret_port_groups\n"));
7143
7144 cdb = (struct scsi_maintenance_in *)ctsio->cdb;
7145 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
7146 softc = lun->ctl_softc;
7147
7148 retval = CTL_RETVAL_COMPLETE;
7149
7150 switch (cdb->byte2 & STG_PDF_MASK) {
7151 case STG_PDF_LENGTH:
7152 ext = 0;
7153 break;
7154 case STG_PDF_EXTENDED:
7155 ext = 1;
7156 break;
7157 default:
7158 ctl_set_invalid_field(/*ctsio*/ ctsio,
7159 /*sks_valid*/ 1,
7160 /*command*/ 1,
7161 /*field*/ 2,
7162 /*bit_valid*/ 1,
7163 /*bit*/ 5);
7164 ctl_done((union ctl_io *)ctsio);
7165 return(retval);
7166 }
7167
7168 num_target_ports = 0;
7169 shared_group = (softc->is_single != 0);
7170 mtx_lock(&softc->ctl_lock);
7171 STAILQ_FOREACH(port, &softc->port_list, links) {
7172 if ((port->status & CTL_PORT_STATUS_ONLINE) == 0)
7173 continue;
7174 if (ctl_lun_map_to_port(port, lun->lun) == UINT32_MAX)
7175 continue;
7176 num_target_ports++;
7177 if (port->status & CTL_PORT_STATUS_HA_SHARED)
7178 shared_group = 1;
7179 }
7180 mtx_unlock(&softc->ctl_lock);
7181 num_ha_groups = (softc->is_single) ? 0 : NUM_HA_SHELVES;
7182
7183 if (ext)
7184 total_len = sizeof(struct scsi_target_group_data_extended);
7185 else
7186 total_len = sizeof(struct scsi_target_group_data);
7187 total_len += sizeof(struct scsi_target_port_group_descriptor) *
7188 (shared_group + num_ha_groups) +
7189 sizeof(struct scsi_target_port_descriptor) * num_target_ports;
7190
7191 alloc_len = scsi_4btoul(cdb->length);
7192
7193 ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO);
7194
7195 ctsio->kern_sg_entries = 0;
7196
7197 if (total_len < alloc_len) {
7198 ctsio->residual = alloc_len - total_len;
7199 ctsio->kern_data_len = total_len;
7200 ctsio->kern_total_len = total_len;
7201 } else {
7202 ctsio->residual = 0;
7203 ctsio->kern_data_len = alloc_len;
7204 ctsio->kern_total_len = alloc_len;
7205 }
7206 ctsio->kern_data_resid = 0;
7207 ctsio->kern_rel_offset = 0;
7208
7209 if (ext) {
7210 rtg_ext_ptr = (struct scsi_target_group_data_extended *)
7211 ctsio->kern_data_ptr;
7212 scsi_ulto4b(total_len - 4, rtg_ext_ptr->length);
7213 rtg_ext_ptr->format_type = 0x10;
7214 rtg_ext_ptr->implicit_transition_time = 0;
7215 tpg_desc = &rtg_ext_ptr->groups[0];
7216 } else {
7217 rtg_ptr = (struct scsi_target_group_data *)
7218 ctsio->kern_data_ptr;
7219 scsi_ulto4b(total_len - 4, rtg_ptr->length);
7220 tpg_desc = &rtg_ptr->groups[0];
7221 }
7222
7223 mtx_lock(&softc->ctl_lock);
7224 pg = softc->port_min / softc->port_cnt;
7225 if (lun->flags & (CTL_LUN_PRIMARY_SC | CTL_LUN_PEER_SC_PRIMARY)) {
7226 /* Some shelf is known to be primary. */
7227 if (softc->ha_link == CTL_HA_LINK_OFFLINE)
7228 os = TPG_ASYMMETRIC_ACCESS_UNAVAILABLE;
7229 else if (softc->ha_link == CTL_HA_LINK_UNKNOWN)
7230 os = TPG_ASYMMETRIC_ACCESS_TRANSITIONING;
7231 else if (softc->ha_mode == CTL_HA_MODE_ACT_STBY)
7232 os = TPG_ASYMMETRIC_ACCESS_STANDBY;
7233 else
7234 os = TPG_ASYMMETRIC_ACCESS_NONOPTIMIZED;
7235 if (lun->flags & CTL_LUN_PRIMARY_SC) {
7236 ts = TPG_ASYMMETRIC_ACCESS_OPTIMIZED;
7237 } else {
7238 ts = os;
7239 os = TPG_ASYMMETRIC_ACCESS_OPTIMIZED;
7240 }
7241 } else {
7242 /* No known primary shelf. */
7243 if (softc->ha_link == CTL_HA_LINK_OFFLINE) {
7244 ts = TPG_ASYMMETRIC_ACCESS_UNAVAILABLE;
7245 os = TPG_ASYMMETRIC_ACCESS_OPTIMIZED;
7246 } else if (softc->ha_link == CTL_HA_LINK_UNKNOWN) {
7247 ts = TPG_ASYMMETRIC_ACCESS_TRANSITIONING;
7248 os = TPG_ASYMMETRIC_ACCESS_OPTIMIZED;
7249 } else {
7250 ts = os = TPG_ASYMMETRIC_ACCESS_TRANSITIONING;
7251 }
7252 }
7253 if (shared_group) {
7254 tpg_desc->pref_state = ts;
7255 tpg_desc->support = TPG_AO_SUP | TPG_AN_SUP | TPG_S_SUP |
7256 TPG_U_SUP | TPG_T_SUP;
7257 scsi_ulto2b(1, tpg_desc->target_port_group);
7258 tpg_desc->status = TPG_IMPLICIT;
7259 pc = 0;
7260 STAILQ_FOREACH(port, &softc->port_list, links) {
7261 if ((port->status & CTL_PORT_STATUS_ONLINE) == 0)
7262 continue;
7263 if (!softc->is_single &&
7264 (port->status & CTL_PORT_STATUS_HA_SHARED) == 0)
7265 continue;
7266 if (ctl_lun_map_to_port(port, lun->lun) == UINT32_MAX)
7267 continue;
7268 scsi_ulto2b(port->targ_port, tpg_desc->descriptors[pc].
7269 relative_target_port_identifier);
7270 pc++;
7271 }
7272 tpg_desc->target_port_count = pc;
7273 tpg_desc = (struct scsi_target_port_group_descriptor *)
7274 &tpg_desc->descriptors[pc];
7275 }
7276 for (g = 0; g < num_ha_groups; g++) {
7277 tpg_desc->pref_state = (g == pg) ? ts : os;
7278 tpg_desc->support = TPG_AO_SUP | TPG_AN_SUP | TPG_S_SUP |
7279 TPG_U_SUP | TPG_T_SUP;
7280 scsi_ulto2b(2 + g, tpg_desc->target_port_group);
7281 tpg_desc->status = TPG_IMPLICIT;
7282 pc = 0;
7283 STAILQ_FOREACH(port, &softc->port_list, links) {
7284 if (port->targ_port < g * softc->port_cnt ||
7285 port->targ_port >= (g + 1) * softc->port_cnt)
7286 continue;
7287 if ((port->status & CTL_PORT_STATUS_ONLINE) == 0)
7288 continue;
7289 if (port->status & CTL_PORT_STATUS_HA_SHARED)
7290 continue;
7291 if (ctl_lun_map_to_port(port, lun->lun) == UINT32_MAX)
7292 continue;
7293 scsi_ulto2b(port->targ_port, tpg_desc->descriptors[pc].
7294 relative_target_port_identifier);
7295 pc++;
7296 }
7297 tpg_desc->target_port_count = pc;
7298 tpg_desc = (struct scsi_target_port_group_descriptor *)
7299 &tpg_desc->descriptors[pc];
7300 }
7301 mtx_unlock(&softc->ctl_lock);
7302
7303 ctl_set_success(ctsio);
7304 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
7305 ctsio->be_move_done = ctl_config_move_done;
7306 ctl_datamove((union ctl_io *)ctsio);
7307 return(retval);
7308}
7309
7310int
7311ctl_report_supported_opcodes(struct ctl_scsiio *ctsio)
7312{
7313 struct ctl_lun *lun;
7314 struct scsi_report_supported_opcodes *cdb;
7315 const struct ctl_cmd_entry *entry, *sentry;
7316 struct scsi_report_supported_opcodes_all *all;
7317 struct scsi_report_supported_opcodes_descr *descr;
7318 struct scsi_report_supported_opcodes_one *one;
7319 int retval;
7320 int alloc_len, total_len;
7321 int opcode, service_action, i, j, num;
7322
7323 CTL_DEBUG_PRINT(("ctl_report_supported_opcodes\n"));
7324
7325 cdb = (struct scsi_report_supported_opcodes *)ctsio->cdb;
7326 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
7327
7328 retval = CTL_RETVAL_COMPLETE;
7329
7330 opcode = cdb->requested_opcode;
7331 service_action = scsi_2btoul(cdb->requested_service_action);
7332 switch (cdb->options & RSO_OPTIONS_MASK) {
7333 case RSO_OPTIONS_ALL:
7334 num = 0;
7335 for (i = 0; i < 256; i++) {
7336 entry = &ctl_cmd_table[i];
7337 if (entry->flags & CTL_CMD_FLAG_SA5) {
7338 for (j = 0; j < 32; j++) {
7339 sentry = &((const struct ctl_cmd_entry *)
7340 entry->execute)[j];
7341 if (ctl_cmd_applicable(
7342 lun->be_lun->lun_type, sentry))
7343 num++;
7344 }
7345 } else {
7346 if (ctl_cmd_applicable(lun->be_lun->lun_type,
7347 entry))
7348 num++;
7349 }
7350 }
7351 total_len = sizeof(struct scsi_report_supported_opcodes_all) +
7352 num * sizeof(struct scsi_report_supported_opcodes_descr);
7353 break;
7354 case RSO_OPTIONS_OC:
7355 if (ctl_cmd_table[opcode].flags & CTL_CMD_FLAG_SA5) {
7356 ctl_set_invalid_field(/*ctsio*/ ctsio,
7357 /*sks_valid*/ 1,
7358 /*command*/ 1,
7359 /*field*/ 2,
7360 /*bit_valid*/ 1,
7361 /*bit*/ 2);
7362 ctl_done((union ctl_io *)ctsio);
7363 return (CTL_RETVAL_COMPLETE);
7364 }
7365 total_len = sizeof(struct scsi_report_supported_opcodes_one) + 32;
7366 break;
7367 case RSO_OPTIONS_OC_SA:
7368 if ((ctl_cmd_table[opcode].flags & CTL_CMD_FLAG_SA5) == 0 ||
7369 service_action >= 32) {
7370 ctl_set_invalid_field(/*ctsio*/ ctsio,
7371 /*sks_valid*/ 1,
7372 /*command*/ 1,
7373 /*field*/ 2,
7374 /*bit_valid*/ 1,
7375 /*bit*/ 2);
7376 ctl_done((union ctl_io *)ctsio);
7377 return (CTL_RETVAL_COMPLETE);
7378 }
7379 /* FALLTHROUGH */
7380 case RSO_OPTIONS_OC_ASA:
7381 total_len = sizeof(struct scsi_report_supported_opcodes_one) + 32;
7382 break;
7383 default:
7384 ctl_set_invalid_field(/*ctsio*/ ctsio,
7385 /*sks_valid*/ 1,
7386 /*command*/ 1,
7387 /*field*/ 2,
7388 /*bit_valid*/ 1,
7389 /*bit*/ 2);
7390 ctl_done((union ctl_io *)ctsio);
7391 return (CTL_RETVAL_COMPLETE);
7392 }
7393
7394 alloc_len = scsi_4btoul(cdb->length);
7395
7396 ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO);
7397
7398 ctsio->kern_sg_entries = 0;
7399
7400 if (total_len < alloc_len) {
7401 ctsio->residual = alloc_len - total_len;
7402 ctsio->kern_data_len = total_len;
7403 ctsio->kern_total_len = total_len;
7404 } else {
7405 ctsio->residual = 0;
7406 ctsio->kern_data_len = alloc_len;
7407 ctsio->kern_total_len = alloc_len;
7408 }
7409 ctsio->kern_data_resid = 0;
7410 ctsio->kern_rel_offset = 0;
7411
7412 switch (cdb->options & RSO_OPTIONS_MASK) {
7413 case RSO_OPTIONS_ALL:
7414 all = (struct scsi_report_supported_opcodes_all *)
7415 ctsio->kern_data_ptr;
7416 num = 0;
7417 for (i = 0; i < 256; i++) {
7418 entry = &ctl_cmd_table[i];
7419 if (entry->flags & CTL_CMD_FLAG_SA5) {
7420 for (j = 0; j < 32; j++) {
7421 sentry = &((const struct ctl_cmd_entry *)
7422 entry->execute)[j];
7423 if (!ctl_cmd_applicable(
7424 lun->be_lun->lun_type, sentry))
7425 continue;
7426 descr = &all->descr[num++];
7427 descr->opcode = i;
7428 scsi_ulto2b(j, descr->service_action);
7429 descr->flags = RSO_SERVACTV;
7430 scsi_ulto2b(sentry->length,
7431 descr->cdb_length);
7432 }
7433 } else {
7434 if (!ctl_cmd_applicable(lun->be_lun->lun_type,
7435 entry))
7436 continue;
7437 descr = &all->descr[num++];
7438 descr->opcode = i;
7439 scsi_ulto2b(0, descr->service_action);
7440 descr->flags = 0;
7441 scsi_ulto2b(entry->length, descr->cdb_length);
7442 }
7443 }
7444 scsi_ulto4b(
7445 num * sizeof(struct scsi_report_supported_opcodes_descr),
7446 all->length);
7447 break;
7448 case RSO_OPTIONS_OC:
7449 one = (struct scsi_report_supported_opcodes_one *)
7450 ctsio->kern_data_ptr;
7451 entry = &ctl_cmd_table[opcode];
7452 goto fill_one;
7453 case RSO_OPTIONS_OC_SA:
7454 one = (struct scsi_report_supported_opcodes_one *)
7455 ctsio->kern_data_ptr;
7456 entry = &ctl_cmd_table[opcode];
7457 entry = &((const struct ctl_cmd_entry *)
7458 entry->execute)[service_action];
7459fill_one:
7460 if (ctl_cmd_applicable(lun->be_lun->lun_type, entry)) {
7461 one->support = 3;
7462 scsi_ulto2b(entry->length, one->cdb_length);
7463 one->cdb_usage[0] = opcode;
7464 memcpy(&one->cdb_usage[1], entry->usage,
7465 entry->length - 1);
7466 } else
7467 one->support = 1;
7468 break;
7469 case RSO_OPTIONS_OC_ASA:
7470 one = (struct scsi_report_supported_opcodes_one *)
7471 ctsio->kern_data_ptr;
7472 entry = &ctl_cmd_table[opcode];
7473 if (entry->flags & CTL_CMD_FLAG_SA5) {
7474 entry = &((const struct ctl_cmd_entry *)
7475 entry->execute)[service_action];
7476 } else if (service_action != 0) {
7477 one->support = 1;
7478 break;
7479 }
7480 goto fill_one;
7481 }
7482
7483 ctl_set_success(ctsio);
7484 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
7485 ctsio->be_move_done = ctl_config_move_done;
7486 ctl_datamove((union ctl_io *)ctsio);
7487 return(retval);
7488}
7489
7490int
7491ctl_report_supported_tmf(struct ctl_scsiio *ctsio)
7492{
7493 struct scsi_report_supported_tmf *cdb;
7494 struct scsi_report_supported_tmf_ext_data *data;
7495 int retval;
7496 int alloc_len, total_len;
7497
7498 CTL_DEBUG_PRINT(("ctl_report_supported_tmf\n"));
7499
7500 cdb = (struct scsi_report_supported_tmf *)ctsio->cdb;
7501
7502 retval = CTL_RETVAL_COMPLETE;
7503
7504 if (cdb->options & RST_REPD)
7505 total_len = sizeof(struct scsi_report_supported_tmf_ext_data);
7506 else
7507 total_len = sizeof(struct scsi_report_supported_tmf_data);
7508 alloc_len = scsi_4btoul(cdb->length);
7509
7510 ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO);
7511
7512 ctsio->kern_sg_entries = 0;
7513
7514 if (total_len < alloc_len) {
7515 ctsio->residual = alloc_len - total_len;
7516 ctsio->kern_data_len = total_len;
7517 ctsio->kern_total_len = total_len;
7518 } else {
7519 ctsio->residual = 0;
7520 ctsio->kern_data_len = alloc_len;
7521 ctsio->kern_total_len = alloc_len;
7522 }
7523 ctsio->kern_data_resid = 0;
7524 ctsio->kern_rel_offset = 0;
7525
7526 data = (struct scsi_report_supported_tmf_ext_data *)ctsio->kern_data_ptr;
7527 data->byte1 |= RST_ATS | RST_ATSS | RST_CTSS | RST_LURS | RST_QTS |
7528 RST_TRS;
7529 data->byte2 |= RST_QAES | RST_QTSS | RST_ITNRS;
7530 data->length = total_len - 4;
7531
7532 ctl_set_success(ctsio);
7533 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
7534 ctsio->be_move_done = ctl_config_move_done;
7535 ctl_datamove((union ctl_io *)ctsio);
7536 return (retval);
7537}
7538
7539int
7540ctl_report_timestamp(struct ctl_scsiio *ctsio)
7541{
7542 struct scsi_report_timestamp *cdb;
7543 struct scsi_report_timestamp_data *data;
7544 struct timeval tv;
7545 int64_t timestamp;
7546 int retval;
7547 int alloc_len, total_len;
7548
7549 CTL_DEBUG_PRINT(("ctl_report_timestamp\n"));
7550
7551 cdb = (struct scsi_report_timestamp *)ctsio->cdb;
7552
7553 retval = CTL_RETVAL_COMPLETE;
7554
7555 total_len = sizeof(struct scsi_report_timestamp_data);
7556 alloc_len = scsi_4btoul(cdb->length);
7557
7558 ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO);
7559
7560 ctsio->kern_sg_entries = 0;
7561
7562 if (total_len < alloc_len) {
7563 ctsio->residual = alloc_len - total_len;
7564 ctsio->kern_data_len = total_len;
7565 ctsio->kern_total_len = total_len;
7566 } else {
7567 ctsio->residual = 0;
7568 ctsio->kern_data_len = alloc_len;
7569 ctsio->kern_total_len = alloc_len;
7570 }
7571 ctsio->kern_data_resid = 0;
7572 ctsio->kern_rel_offset = 0;
7573
7574 data = (struct scsi_report_timestamp_data *)ctsio->kern_data_ptr;
7575 scsi_ulto2b(sizeof(*data) - 2, data->length);
7576 data->origin = RTS_ORIG_OUTSIDE;
7577 getmicrotime(&tv);
7578 timestamp = (int64_t)tv.tv_sec * 1000 + tv.tv_usec / 1000;
7579 scsi_ulto4b(timestamp >> 16, data->timestamp);
7580 scsi_ulto2b(timestamp & 0xffff, &data->timestamp[4]);
7581
7582 ctl_set_success(ctsio);
7583 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
7584 ctsio->be_move_done = ctl_config_move_done;
7585 ctl_datamove((union ctl_io *)ctsio);
7586 return (retval);
7587}
7588
7589int
7590ctl_persistent_reserve_in(struct ctl_scsiio *ctsio)
7591{
7592 struct scsi_per_res_in *cdb;
7593 int alloc_len, total_len = 0;
7594 /* struct scsi_per_res_in_rsrv in_data; */
7595 struct ctl_lun *lun;
7596 struct ctl_softc *softc;
7597 uint64_t key;
7598
7599 CTL_DEBUG_PRINT(("ctl_persistent_reserve_in\n"));
7600
7601 cdb = (struct scsi_per_res_in *)ctsio->cdb;
7602
7603 alloc_len = scsi_2btoul(cdb->length);
7604
7605 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
7606 softc = lun->ctl_softc;
7607
7608retry:
7609 mtx_lock(&lun->lun_lock);
7610 switch (cdb->action) {
7611 case SPRI_RK: /* read keys */
7612 total_len = sizeof(struct scsi_per_res_in_keys) +
7613 lun->pr_key_count *
7614 sizeof(struct scsi_per_res_key);
7615 break;
7616 case SPRI_RR: /* read reservation */
7617 if (lun->flags & CTL_LUN_PR_RESERVED)
7618 total_len = sizeof(struct scsi_per_res_in_rsrv);
7619 else
7620 total_len = sizeof(struct scsi_per_res_in_header);
7621 break;
7622 case SPRI_RC: /* report capabilities */
7623 total_len = sizeof(struct scsi_per_res_cap);
7624 break;
7625 case SPRI_RS: /* read full status */
7626 total_len = sizeof(struct scsi_per_res_in_header) +
7627 (sizeof(struct scsi_per_res_in_full_desc) + 256) *
7628 lun->pr_key_count;
7629 break;
7630 default:
7631 panic("%s: Invalid PR type %#x", __func__, cdb->action);
7632 }
7633 mtx_unlock(&lun->lun_lock);
7634
7635 ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO);
7636
7637 if (total_len < alloc_len) {
7638 ctsio->residual = alloc_len - total_len;
7639 ctsio->kern_data_len = total_len;
7640 ctsio->kern_total_len = total_len;
7641 } else {
7642 ctsio->residual = 0;
7643 ctsio->kern_data_len = alloc_len;
7644 ctsio->kern_total_len = alloc_len;
7645 }
7646
7647 ctsio->kern_data_resid = 0;
7648 ctsio->kern_rel_offset = 0;
7649 ctsio->kern_sg_entries = 0;
7650
7651 mtx_lock(&lun->lun_lock);
7652 switch (cdb->action) {
7653 case SPRI_RK: { // read keys
7654 struct scsi_per_res_in_keys *res_keys;
7655 int i, key_count;
7656
7657 res_keys = (struct scsi_per_res_in_keys*)ctsio->kern_data_ptr;
7658
7659 /*
7660 * We had to drop the lock to allocate our buffer, which
7661 * leaves time for someone to come in with another
7662 * persistent reservation. (That is unlikely, though,
7663 * since this should be the only persistent reservation
7664 * command active right now.)
7665 */
7666 if (total_len != (sizeof(struct scsi_per_res_in_keys) +
7667 (lun->pr_key_count *
7668 sizeof(struct scsi_per_res_key)))){
7669 mtx_unlock(&lun->lun_lock);
7670 free(ctsio->kern_data_ptr, M_CTL);
7671 printf("%s: reservation length changed, retrying\n",
7672 __func__);
7673 goto retry;
7674 }
7675
7676 scsi_ulto4b(lun->pr_generation, res_keys->header.generation);
7677
7678 scsi_ulto4b(sizeof(struct scsi_per_res_key) *
7679 lun->pr_key_count, res_keys->header.length);
7680
7681 for (i = 0, key_count = 0; i < CTL_MAX_INITIATORS; i++) {
7682 if ((key = ctl_get_prkey(lun, i)) == 0)
7683 continue;
7684
7685 /*
7686 * We used lun->pr_key_count to calculate the
7687 * size to allocate. If it turns out the number of
7688 * initiators with the registered flag set is
7689 * larger than that (i.e. they haven't been kept in
7690 * sync), we've got a problem.
7691 */
7692 if (key_count >= lun->pr_key_count) {
7693 key_count++;
7694 continue;
7695 }
7696 scsi_u64to8b(key, res_keys->keys[key_count].key);
7697 key_count++;
7698 }
7699 break;
7700 }
7701 case SPRI_RR: { // read reservation
7702 struct scsi_per_res_in_rsrv *res;
7703 int tmp_len, header_only;
7704
7705 res = (struct scsi_per_res_in_rsrv *)ctsio->kern_data_ptr;
7706
7707 scsi_ulto4b(lun->pr_generation, res->header.generation);
7708
7709 if (lun->flags & CTL_LUN_PR_RESERVED)
7710 {
7711 tmp_len = sizeof(struct scsi_per_res_in_rsrv);
7712 scsi_ulto4b(sizeof(struct scsi_per_res_in_rsrv_data),
7713 res->header.length);
7714 header_only = 0;
7715 } else {
7716 tmp_len = sizeof(struct scsi_per_res_in_header);
7717 scsi_ulto4b(0, res->header.length);
7718 header_only = 1;
7719 }
7720
7721 /*
7722 * We had to drop the lock to allocate our buffer, which
7723 * leaves time for someone to come in with another
7724 * persistent reservation. (That is unlikely, though,
7725 * since this should be the only persistent reservation
7726 * command active right now.)
7727 */
7728 if (tmp_len != total_len) {
7729 mtx_unlock(&lun->lun_lock);
7730 free(ctsio->kern_data_ptr, M_CTL);
7731 printf("%s: reservation status changed, retrying\n",
7732 __func__);
7733 goto retry;
7734 }
7735
7736 /*
7737 * No reservation held, so we're done.
7738 */
7739 if (header_only != 0)
7740 break;
7741
7742 /*
7743 * If the registration is an All Registrants type, the key
7744 * is 0, since it doesn't really matter.
7745 */
7746 if (lun->pr_res_idx != CTL_PR_ALL_REGISTRANTS) {
7747 scsi_u64to8b(ctl_get_prkey(lun, lun->pr_res_idx),
7748 res->data.reservation);
7749 }
7750 res->data.scopetype = lun->pr_res_type;
7751 break;
7752 }
7753 case SPRI_RC: //report capabilities
7754 {
7755 struct scsi_per_res_cap *res_cap;
7756 uint16_t type_mask;
7757
7758 res_cap = (struct scsi_per_res_cap *)ctsio->kern_data_ptr;
7759 scsi_ulto2b(sizeof(*res_cap), res_cap->length);
7760 res_cap->flags1 = SPRI_CRH;
7761 res_cap->flags2 = SPRI_TMV | SPRI_ALLOW_5;
7762 type_mask = SPRI_TM_WR_EX_AR |
7763 SPRI_TM_EX_AC_RO |
7764 SPRI_TM_WR_EX_RO |
7765 SPRI_TM_EX_AC |
7766 SPRI_TM_WR_EX |
7767 SPRI_TM_EX_AC_AR;
7768 scsi_ulto2b(type_mask, res_cap->type_mask);
7769 break;
7770 }
7771 case SPRI_RS: { // read full status
7772 struct scsi_per_res_in_full *res_status;
7773 struct scsi_per_res_in_full_desc *res_desc;
7774 struct ctl_port *port;
7775 int i, len;
7776
7777 res_status = (struct scsi_per_res_in_full*)ctsio->kern_data_ptr;
7778
7779 /*
7780 * We had to drop the lock to allocate our buffer, which
7781 * leaves time for someone to come in with another
7782 * persistent reservation. (That is unlikely, though,
7783 * since this should be the only persistent reservation
7784 * command active right now.)
7785 */
7786 if (total_len < (sizeof(struct scsi_per_res_in_header) +
7787 (sizeof(struct scsi_per_res_in_full_desc) + 256) *
7788 lun->pr_key_count)){
7789 mtx_unlock(&lun->lun_lock);
7790 free(ctsio->kern_data_ptr, M_CTL);
7791 printf("%s: reservation length changed, retrying\n",
7792 __func__);
7793 goto retry;
7794 }
7795
7796 scsi_ulto4b(lun->pr_generation, res_status->header.generation);
7797
7798 res_desc = &res_status->desc[0];
7799 for (i = 0; i < CTL_MAX_INITIATORS; i++) {
7800 if ((key = ctl_get_prkey(lun, i)) == 0)
7801 continue;
7802
7803 scsi_u64to8b(key, res_desc->res_key.key);
7804 if ((lun->flags & CTL_LUN_PR_RESERVED) &&
7805 (lun->pr_res_idx == i ||
7806 lun->pr_res_idx == CTL_PR_ALL_REGISTRANTS)) {
7807 res_desc->flags = SPRI_FULL_R_HOLDER;
7808 res_desc->scopetype = lun->pr_res_type;
7809 }
7810 scsi_ulto2b(i / CTL_MAX_INIT_PER_PORT,
7811 res_desc->rel_trgt_port_id);
7812 len = 0;
7813 port = softc->ctl_ports[i / CTL_MAX_INIT_PER_PORT];
7814 if (port != NULL)
7815 len = ctl_create_iid(port,
7816 i % CTL_MAX_INIT_PER_PORT,
7817 res_desc->transport_id);
7818 scsi_ulto4b(len, res_desc->additional_length);
7819 res_desc = (struct scsi_per_res_in_full_desc *)
7820 &res_desc->transport_id[len];
7821 }
7822 scsi_ulto4b((uint8_t *)res_desc - (uint8_t *)&res_status->desc[0],
7823 res_status->header.length);
7824 break;
7825 }
7826 default:
7827 panic("%s: Invalid PR type %#x", __func__, cdb->action);
7828 }
7829 mtx_unlock(&lun->lun_lock);
7830
7831 ctl_set_success(ctsio);
7832 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
7833 ctsio->be_move_done = ctl_config_move_done;
7834 ctl_datamove((union ctl_io *)ctsio);
7835 return (CTL_RETVAL_COMPLETE);
7836}
7837
7838/*
7839 * Returns 0 if ctl_persistent_reserve_out() should continue, non-zero if
7840 * it should return.
7841 */
7842static int
7843ctl_pro_preempt(struct ctl_softc *softc, struct ctl_lun *lun, uint64_t res_key,
7844 uint64_t sa_res_key, uint8_t type, uint32_t residx,
7845 struct ctl_scsiio *ctsio, struct scsi_per_res_out *cdb,
7846 struct scsi_per_res_out_parms* param)
7847{
7848 union ctl_ha_msg persis_io;
7849 int i;
7850
7851 mtx_lock(&lun->lun_lock);
7852 if (sa_res_key == 0) {
7853 if (lun->pr_res_idx == CTL_PR_ALL_REGISTRANTS) {
7854 /* validate scope and type */
7855 if ((cdb->scope_type & SPR_SCOPE_MASK) !=
7856 SPR_LU_SCOPE) {
7857 mtx_unlock(&lun->lun_lock);
7858 ctl_set_invalid_field(/*ctsio*/ ctsio,
7859 /*sks_valid*/ 1,
7860 /*command*/ 1,
7861 /*field*/ 2,
7862 /*bit_valid*/ 1,
7863 /*bit*/ 4);
7864 ctl_done((union ctl_io *)ctsio);
7865 return (1);
7866 }
7867
7868 if (type>8 || type==2 || type==4 || type==0) {
7869 mtx_unlock(&lun->lun_lock);
7870 ctl_set_invalid_field(/*ctsio*/ ctsio,
7871 /*sks_valid*/ 1,
7872 /*command*/ 1,
7873 /*field*/ 2,
7874 /*bit_valid*/ 1,
7875 /*bit*/ 0);
7876 ctl_done((union ctl_io *)ctsio);
7877 return (1);
7878 }
7879
7880 /*
7881 * Unregister everybody else and build UA for
7882 * them
7883 */
7884 for(i = 0; i < CTL_MAX_INITIATORS; i++) {
7885 if (i == residx || ctl_get_prkey(lun, i) == 0)
7886 continue;
7887
7888 ctl_clr_prkey(lun, i);
7889 ctl_est_ua(lun, i, CTL_UA_REG_PREEMPT);
7890 }
7891 lun->pr_key_count = 1;
7892 lun->pr_res_type = type;
7893 if (lun->pr_res_type != SPR_TYPE_WR_EX_AR &&
7894 lun->pr_res_type != SPR_TYPE_EX_AC_AR)
7895 lun->pr_res_idx = residx;
7896 lun->pr_generation++;
7897 mtx_unlock(&lun->lun_lock);
7898
7899 /* send msg to other side */
7900 persis_io.hdr.nexus = ctsio->io_hdr.nexus;
7901 persis_io.hdr.msg_type = CTL_MSG_PERS_ACTION;
7902 persis_io.pr.pr_info.action = CTL_PR_PREEMPT;
7903 persis_io.pr.pr_info.residx = lun->pr_res_idx;
7904 persis_io.pr.pr_info.res_type = type;
7905 memcpy(persis_io.pr.pr_info.sa_res_key,
7906 param->serv_act_res_key,
7907 sizeof(param->serv_act_res_key));
7908 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &persis_io,
7909 sizeof(persis_io.pr), M_WAITOK);
7910 } else {
7911 /* not all registrants */
7912 mtx_unlock(&lun->lun_lock);
7913 free(ctsio->kern_data_ptr, M_CTL);
7914 ctl_set_invalid_field(ctsio,
7915 /*sks_valid*/ 1,
7916 /*command*/ 0,
7917 /*field*/ 8,
7918 /*bit_valid*/ 0,
7919 /*bit*/ 0);
7920 ctl_done((union ctl_io *)ctsio);
7921 return (1);
7922 }
7923 } else if (lun->pr_res_idx == CTL_PR_ALL_REGISTRANTS
7924 || !(lun->flags & CTL_LUN_PR_RESERVED)) {
7925 int found = 0;
7926
7927 if (res_key == sa_res_key) {
7928 /* special case */
7929 /*
7930 * The spec implies this is not good but doesn't
7931 * say what to do. There are two choices either
7932 * generate a res conflict or check condition
7933 * with illegal field in parameter data. Since
7934 * that is what is done when the sa_res_key is
7935 * zero I'll take that approach since this has
7936 * to do with the sa_res_key.
7937 */
7938 mtx_unlock(&lun->lun_lock);
7939 free(ctsio->kern_data_ptr, M_CTL);
7940 ctl_set_invalid_field(ctsio,
7941 /*sks_valid*/ 1,
7942 /*command*/ 0,
7943 /*field*/ 8,
7944 /*bit_valid*/ 0,
7945 /*bit*/ 0);
7946 ctl_done((union ctl_io *)ctsio);
7947 return (1);
7948 }
7949
7950 for (i = 0; i < CTL_MAX_INITIATORS; i++) {
7951 if (ctl_get_prkey(lun, i) != sa_res_key)
7952 continue;
7953
7954 found = 1;
7955 ctl_clr_prkey(lun, i);
7956 lun->pr_key_count--;
7957 ctl_est_ua(lun, i, CTL_UA_REG_PREEMPT);
7958 }
7959 if (!found) {
7960 mtx_unlock(&lun->lun_lock);
7961 free(ctsio->kern_data_ptr, M_CTL);
7962 ctl_set_reservation_conflict(ctsio);
7963 ctl_done((union ctl_io *)ctsio);
7964 return (CTL_RETVAL_COMPLETE);
7965 }
7966 lun->pr_generation++;
7967 mtx_unlock(&lun->lun_lock);
7968
7969 /* send msg to other side */
7970 persis_io.hdr.nexus = ctsio->io_hdr.nexus;
7971 persis_io.hdr.msg_type = CTL_MSG_PERS_ACTION;
7972 persis_io.pr.pr_info.action = CTL_PR_PREEMPT;
7973 persis_io.pr.pr_info.residx = lun->pr_res_idx;
7974 persis_io.pr.pr_info.res_type = type;
7975 memcpy(persis_io.pr.pr_info.sa_res_key,
7976 param->serv_act_res_key,
7977 sizeof(param->serv_act_res_key));
7978 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &persis_io,
7979 sizeof(persis_io.pr), M_WAITOK);
7980 } else {
7981 /* Reserved but not all registrants */
7982 /* sa_res_key is res holder */
7983 if (sa_res_key == ctl_get_prkey(lun, lun->pr_res_idx)) {
7984 /* validate scope and type */
7985 if ((cdb->scope_type & SPR_SCOPE_MASK) !=
7986 SPR_LU_SCOPE) {
7987 mtx_unlock(&lun->lun_lock);
7988 ctl_set_invalid_field(/*ctsio*/ ctsio,
7989 /*sks_valid*/ 1,
7990 /*command*/ 1,
7991 /*field*/ 2,
7992 /*bit_valid*/ 1,
7993 /*bit*/ 4);
7994 ctl_done((union ctl_io *)ctsio);
7995 return (1);
7996 }
7997
7998 if (type>8 || type==2 || type==4 || type==0) {
7999 mtx_unlock(&lun->lun_lock);
8000 ctl_set_invalid_field(/*ctsio*/ ctsio,
8001 /*sks_valid*/ 1,
8002 /*command*/ 1,
8003 /*field*/ 2,
8004 /*bit_valid*/ 1,
8005 /*bit*/ 0);
8006 ctl_done((union ctl_io *)ctsio);
8007 return (1);
8008 }
8009
8010 /*
8011 * Do the following:
8012 * if sa_res_key != res_key remove all
8013 * registrants w/sa_res_key and generate UA
8014 * for these registrants(Registrations
8015 * Preempted) if it wasn't an exclusive
8016 * reservation generate UA(Reservations
8017 * Preempted) for all other registered nexuses
8018 * if the type has changed. Establish the new
8019 * reservation and holder. If res_key and
8020 * sa_res_key are the same do the above
8021 * except don't unregister the res holder.
8022 */
8023
8024 for(i = 0; i < CTL_MAX_INITIATORS; i++) {
8025 if (i == residx || ctl_get_prkey(lun, i) == 0)
8026 continue;
8027
8028 if (sa_res_key == ctl_get_prkey(lun, i)) {
8029 ctl_clr_prkey(lun, i);
8030 lun->pr_key_count--;
8031 ctl_est_ua(lun, i, CTL_UA_REG_PREEMPT);
8032 } else if (type != lun->pr_res_type &&
8033 (lun->pr_res_type == SPR_TYPE_WR_EX_RO ||
8034 lun->pr_res_type == SPR_TYPE_EX_AC_RO)) {
8035 ctl_est_ua(lun, i, CTL_UA_RES_RELEASE);
8036 }
8037 }
8038 lun->pr_res_type = type;
8039 if (lun->pr_res_type != SPR_TYPE_WR_EX_AR &&
8040 lun->pr_res_type != SPR_TYPE_EX_AC_AR)
8041 lun->pr_res_idx = residx;
8042 else
8043 lun->pr_res_idx = CTL_PR_ALL_REGISTRANTS;
8044 lun->pr_generation++;
8045 mtx_unlock(&lun->lun_lock);
8046
8047 persis_io.hdr.nexus = ctsio->io_hdr.nexus;
8048 persis_io.hdr.msg_type = CTL_MSG_PERS_ACTION;
8049 persis_io.pr.pr_info.action = CTL_PR_PREEMPT;
8050 persis_io.pr.pr_info.residx = lun->pr_res_idx;
8051 persis_io.pr.pr_info.res_type = type;
8052 memcpy(persis_io.pr.pr_info.sa_res_key,
8053 param->serv_act_res_key,
8054 sizeof(param->serv_act_res_key));
8055 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &persis_io,
8056 sizeof(persis_io.pr), M_WAITOK);
8057 } else {
8058 /*
8059 * sa_res_key is not the res holder just
8060 * remove registrants
8061 */
8062 int found=0;
8063
8064 for (i = 0; i < CTL_MAX_INITIATORS; i++) {
8065 if (sa_res_key != ctl_get_prkey(lun, i))
8066 continue;
8067
8068 found = 1;
8069 ctl_clr_prkey(lun, i);
8070 lun->pr_key_count--;
8071 ctl_est_ua(lun, i, CTL_UA_REG_PREEMPT);
8072 }
8073
8074 if (!found) {
8075 mtx_unlock(&lun->lun_lock);
8076 free(ctsio->kern_data_ptr, M_CTL);
8077 ctl_set_reservation_conflict(ctsio);
8078 ctl_done((union ctl_io *)ctsio);
8079 return (1);
8080 }
8081 lun->pr_generation++;
8082 mtx_unlock(&lun->lun_lock);
8083
8084 persis_io.hdr.nexus = ctsio->io_hdr.nexus;
8085 persis_io.hdr.msg_type = CTL_MSG_PERS_ACTION;
8086 persis_io.pr.pr_info.action = CTL_PR_PREEMPT;
8087 persis_io.pr.pr_info.residx = lun->pr_res_idx;
8088 persis_io.pr.pr_info.res_type = type;
8089 memcpy(persis_io.pr.pr_info.sa_res_key,
8090 param->serv_act_res_key,
8091 sizeof(param->serv_act_res_key));
8092 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &persis_io,
8093 sizeof(persis_io.pr), M_WAITOK);
8094 }
8095 }
8096 return (0);
8097}
8098
8099static void
8100ctl_pro_preempt_other(struct ctl_lun *lun, union ctl_ha_msg *msg)
8101{
8102 uint64_t sa_res_key;
8103 int i;
8104
8105 sa_res_key = scsi_8btou64(msg->pr.pr_info.sa_res_key);
8106
8107 if (lun->pr_res_idx == CTL_PR_ALL_REGISTRANTS
8108 || lun->pr_res_idx == CTL_PR_NO_RESERVATION
8109 || sa_res_key != ctl_get_prkey(lun, lun->pr_res_idx)) {
8110 if (sa_res_key == 0) {
8111 /*
8112 * Unregister everybody else and build UA for
8113 * them
8114 */
8115 for(i = 0; i < CTL_MAX_INITIATORS; i++) {
8116 if (i == msg->pr.pr_info.residx ||
8117 ctl_get_prkey(lun, i) == 0)
8118 continue;
8119
8120 ctl_clr_prkey(lun, i);
8121 ctl_est_ua(lun, i, CTL_UA_REG_PREEMPT);
8122 }
8123
8124 lun->pr_key_count = 1;
8125 lun->pr_res_type = msg->pr.pr_info.res_type;
8126 if (lun->pr_res_type != SPR_TYPE_WR_EX_AR &&
8127 lun->pr_res_type != SPR_TYPE_EX_AC_AR)
8128 lun->pr_res_idx = msg->pr.pr_info.residx;
8129 } else {
8130 for (i = 0; i < CTL_MAX_INITIATORS; i++) {
8131 if (sa_res_key == ctl_get_prkey(lun, i))
8132 continue;
8133
8134 ctl_clr_prkey(lun, i);
8135 lun->pr_key_count--;
8136 ctl_est_ua(lun, i, CTL_UA_REG_PREEMPT);
8137 }
8138 }
8139 } else {
8140 for (i = 0; i < CTL_MAX_INITIATORS; i++) {
8141 if (i == msg->pr.pr_info.residx ||
8142 ctl_get_prkey(lun, i) == 0)
8143 continue;
8144
8145 if (sa_res_key == ctl_get_prkey(lun, i)) {
8146 ctl_clr_prkey(lun, i);
8147 lun->pr_key_count--;
8148 ctl_est_ua(lun, i, CTL_UA_REG_PREEMPT);
8149 } else if (msg->pr.pr_info.res_type != lun->pr_res_type
8150 && (lun->pr_res_type == SPR_TYPE_WR_EX_RO ||
8151 lun->pr_res_type == SPR_TYPE_EX_AC_RO)) {
8152 ctl_est_ua(lun, i, CTL_UA_RES_RELEASE);
8153 }
8154 }
8155 lun->pr_res_type = msg->pr.pr_info.res_type;
8156 if (lun->pr_res_type != SPR_TYPE_WR_EX_AR &&
8157 lun->pr_res_type != SPR_TYPE_EX_AC_AR)
8158 lun->pr_res_idx = msg->pr.pr_info.residx;
8159 else
8160 lun->pr_res_idx = CTL_PR_ALL_REGISTRANTS;
8161 }
8162 lun->pr_generation++;
8163
8164}
8165
8166
8167int
8168ctl_persistent_reserve_out(struct ctl_scsiio *ctsio)
8169{
8170 int retval;
8171 u_int32_t param_len;
8172 struct scsi_per_res_out *cdb;
8173 struct ctl_lun *lun;
8174 struct scsi_per_res_out_parms* param;
8175 struct ctl_softc *softc;
8176 uint32_t residx;
8177 uint64_t res_key, sa_res_key, key;
8178 uint8_t type;
8179 union ctl_ha_msg persis_io;
8180 int i;
8181
8182 CTL_DEBUG_PRINT(("ctl_persistent_reserve_out\n"));
8183
8184 retval = CTL_RETVAL_COMPLETE;
8185
8186 cdb = (struct scsi_per_res_out *)ctsio->cdb;
8187 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
8188 softc = lun->ctl_softc;
8189
8190 /*
8191 * We only support whole-LUN scope. The scope & type are ignored for
8192 * register, register and ignore existing key and clear.
8193 * We sometimes ignore scope and type on preempts too!!
8194 * Verify reservation type here as well.
8195 */
8196 type = cdb->scope_type & SPR_TYPE_MASK;
8197 if ((cdb->action == SPRO_RESERVE)
8198 || (cdb->action == SPRO_RELEASE)) {
8199 if ((cdb->scope_type & SPR_SCOPE_MASK) != SPR_LU_SCOPE) {
8200 ctl_set_invalid_field(/*ctsio*/ ctsio,
8201 /*sks_valid*/ 1,
8202 /*command*/ 1,
8203 /*field*/ 2,
8204 /*bit_valid*/ 1,
8205 /*bit*/ 4);
8206 ctl_done((union ctl_io *)ctsio);
8207 return (CTL_RETVAL_COMPLETE);
8208 }
8209
8210 if (type>8 || type==2 || type==4 || type==0) {
8211 ctl_set_invalid_field(/*ctsio*/ ctsio,
8212 /*sks_valid*/ 1,
8213 /*command*/ 1,
8214 /*field*/ 2,
8215 /*bit_valid*/ 1,
8216 /*bit*/ 0);
8217 ctl_done((union ctl_io *)ctsio);
8218 return (CTL_RETVAL_COMPLETE);
8219 }
8220 }
8221
8222 param_len = scsi_4btoul(cdb->length);
8223
8224 if ((ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) == 0) {
8225 ctsio->kern_data_ptr = malloc(param_len, M_CTL, M_WAITOK);
8226 ctsio->kern_data_len = param_len;
8227 ctsio->kern_total_len = param_len;
8228 ctsio->kern_data_resid = 0;
8229 ctsio->kern_rel_offset = 0;
8230 ctsio->kern_sg_entries = 0;
8231 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
8232 ctsio->be_move_done = ctl_config_move_done;
8233 ctl_datamove((union ctl_io *)ctsio);
8234
8235 return (CTL_RETVAL_COMPLETE);
8236 }
8237
8238 param = (struct scsi_per_res_out_parms *)ctsio->kern_data_ptr;
8239
8240 residx = ctl_get_initindex(&ctsio->io_hdr.nexus);
8241 res_key = scsi_8btou64(param->res_key.key);
8242 sa_res_key = scsi_8btou64(param->serv_act_res_key);
8243
8244 /*
8245 * Validate the reservation key here except for SPRO_REG_IGNO
8246 * This must be done for all other service actions
8247 */
8248 if ((cdb->action & SPRO_ACTION_MASK) != SPRO_REG_IGNO) {
8249 mtx_lock(&lun->lun_lock);
8250 if ((key = ctl_get_prkey(lun, residx)) != 0) {
8251 if (res_key != key) {
8252 /*
8253 * The current key passed in doesn't match
8254 * the one the initiator previously
8255 * registered.
8256 */
8257 mtx_unlock(&lun->lun_lock);
8258 free(ctsio->kern_data_ptr, M_CTL);
8259 ctl_set_reservation_conflict(ctsio);
8260 ctl_done((union ctl_io *)ctsio);
8261 return (CTL_RETVAL_COMPLETE);
8262 }
8263 } else if ((cdb->action & SPRO_ACTION_MASK) != SPRO_REGISTER) {
8264 /*
8265 * We are not registered
8266 */
8267 mtx_unlock(&lun->lun_lock);
8268 free(ctsio->kern_data_ptr, M_CTL);
8269 ctl_set_reservation_conflict(ctsio);
8270 ctl_done((union ctl_io *)ctsio);
8271 return (CTL_RETVAL_COMPLETE);
8272 } else if (res_key != 0) {
8273 /*
8274 * We are not registered and trying to register but
8275 * the register key isn't zero.
8276 */
8277 mtx_unlock(&lun->lun_lock);
8278 free(ctsio->kern_data_ptr, M_CTL);
8279 ctl_set_reservation_conflict(ctsio);
8280 ctl_done((union ctl_io *)ctsio);
8281 return (CTL_RETVAL_COMPLETE);
8282 }
8283 mtx_unlock(&lun->lun_lock);
8284 }
8285
8286 switch (cdb->action & SPRO_ACTION_MASK) {
8287 case SPRO_REGISTER:
8288 case SPRO_REG_IGNO: {
8289
8290#if 0
8291 printf("Registration received\n");
8292#endif
8293
8294 /*
8295 * We don't support any of these options, as we report in
8296 * the read capabilities request (see
8297 * ctl_persistent_reserve_in(), above).
8298 */
8299 if ((param->flags & SPR_SPEC_I_PT)
8300 || (param->flags & SPR_ALL_TG_PT)
8301 || (param->flags & SPR_APTPL)) {
8302 int bit_ptr;
8303
8304 if (param->flags & SPR_APTPL)
8305 bit_ptr = 0;
8306 else if (param->flags & SPR_ALL_TG_PT)
8307 bit_ptr = 2;
8308 else /* SPR_SPEC_I_PT */
8309 bit_ptr = 3;
8310
8311 free(ctsio->kern_data_ptr, M_CTL);
8312 ctl_set_invalid_field(ctsio,
8313 /*sks_valid*/ 1,
8314 /*command*/ 0,
8315 /*field*/ 20,
8316 /*bit_valid*/ 1,
8317 /*bit*/ bit_ptr);
8318 ctl_done((union ctl_io *)ctsio);
8319 return (CTL_RETVAL_COMPLETE);
8320 }
8321
8322 mtx_lock(&lun->lun_lock);
8323
8324 /*
8325 * The initiator wants to clear the
8326 * key/unregister.
8327 */
8328 if (sa_res_key == 0) {
8329 if ((res_key == 0
8330 && (cdb->action & SPRO_ACTION_MASK) == SPRO_REGISTER)
8331 || ((cdb->action & SPRO_ACTION_MASK) == SPRO_REG_IGNO
8332 && ctl_get_prkey(lun, residx) == 0)) {
8333 mtx_unlock(&lun->lun_lock);
8334 goto done;
8335 }
8336
8337 ctl_clr_prkey(lun, residx);
8338 lun->pr_key_count--;
8339
8340 if (residx == lun->pr_res_idx) {
8341 lun->flags &= ~CTL_LUN_PR_RESERVED;
8342 lun->pr_res_idx = CTL_PR_NO_RESERVATION;
8343
8344 if ((lun->pr_res_type == SPR_TYPE_WR_EX_RO ||
8345 lun->pr_res_type == SPR_TYPE_EX_AC_RO) &&
8346 lun->pr_key_count) {
8347 /*
8348 * If the reservation is a registrants
8349 * only type we need to generate a UA
8350 * for other registered inits. The
8351 * sense code should be RESERVATIONS
8352 * RELEASED
8353 */
8354
8355 for (i = softc->init_min; i < softc->init_max; i++){
8356 if (ctl_get_prkey(lun, i) == 0)
8357 continue;
8358 ctl_est_ua(lun, i,
8359 CTL_UA_RES_RELEASE);
8360 }
8361 }
8362 lun->pr_res_type = 0;
8363 } else if (lun->pr_res_idx == CTL_PR_ALL_REGISTRANTS) {
8364 if (lun->pr_key_count==0) {
8365 lun->flags &= ~CTL_LUN_PR_RESERVED;
8366 lun->pr_res_type = 0;
8367 lun->pr_res_idx = CTL_PR_NO_RESERVATION;
8368 }
8369 }
8370 lun->pr_generation++;
8371 mtx_unlock(&lun->lun_lock);
8372
8373 persis_io.hdr.nexus = ctsio->io_hdr.nexus;
8374 persis_io.hdr.msg_type = CTL_MSG_PERS_ACTION;
8375 persis_io.pr.pr_info.action = CTL_PR_UNREG_KEY;
8376 persis_io.pr.pr_info.residx = residx;
8377 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &persis_io,
8378 sizeof(persis_io.pr), M_WAITOK);
8379 } else /* sa_res_key != 0 */ {
8380
8381 /*
8382 * If we aren't registered currently then increment
8383 * the key count and set the registered flag.
8384 */
8385 ctl_alloc_prkey(lun, residx);
8386 if (ctl_get_prkey(lun, residx) == 0)
8387 lun->pr_key_count++;
8388 ctl_set_prkey(lun, residx, sa_res_key);
8389 lun->pr_generation++;
8390 mtx_unlock(&lun->lun_lock);
8391
8392 persis_io.hdr.nexus = ctsio->io_hdr.nexus;
8393 persis_io.hdr.msg_type = CTL_MSG_PERS_ACTION;
8394 persis_io.pr.pr_info.action = CTL_PR_REG_KEY;
8395 persis_io.pr.pr_info.residx = residx;
8396 memcpy(persis_io.pr.pr_info.sa_res_key,
8397 param->serv_act_res_key,
8398 sizeof(param->serv_act_res_key));
8399 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &persis_io,
8400 sizeof(persis_io.pr), M_WAITOK);
8401 }
8402
8403 break;
8404 }
8405 case SPRO_RESERVE:
8406#if 0
8407 printf("Reserve executed type %d\n", type);
8408#endif
8409 mtx_lock(&lun->lun_lock);
8410 if (lun->flags & CTL_LUN_PR_RESERVED) {
8411 /*
8412 * if this isn't the reservation holder and it's
8413 * not a "all registrants" type or if the type is
8414 * different then we have a conflict
8415 */
8416 if ((lun->pr_res_idx != residx
8417 && lun->pr_res_idx != CTL_PR_ALL_REGISTRANTS)
8418 || lun->pr_res_type != type) {
8419 mtx_unlock(&lun->lun_lock);
8420 free(ctsio->kern_data_ptr, M_CTL);
8421 ctl_set_reservation_conflict(ctsio);
8422 ctl_done((union ctl_io *)ctsio);
8423 return (CTL_RETVAL_COMPLETE);
8424 }
8425 mtx_unlock(&lun->lun_lock);
8426 } else /* create a reservation */ {
8427 /*
8428 * If it's not an "all registrants" type record
8429 * reservation holder
8430 */
8431 if (type != SPR_TYPE_WR_EX_AR
8432 && type != SPR_TYPE_EX_AC_AR)
8433 lun->pr_res_idx = residx; /* Res holder */
8434 else
8435 lun->pr_res_idx = CTL_PR_ALL_REGISTRANTS;
8436
8437 lun->flags |= CTL_LUN_PR_RESERVED;
8438 lun->pr_res_type = type;
8439
8440 mtx_unlock(&lun->lun_lock);
8441
8442 /* send msg to other side */
8443 persis_io.hdr.nexus = ctsio->io_hdr.nexus;
8444 persis_io.hdr.msg_type = CTL_MSG_PERS_ACTION;
8445 persis_io.pr.pr_info.action = CTL_PR_RESERVE;
8446 persis_io.pr.pr_info.residx = lun->pr_res_idx;
8447 persis_io.pr.pr_info.res_type = type;
8448 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &persis_io,
8449 sizeof(persis_io.pr), M_WAITOK);
8450 }
8451 break;
8452
8453 case SPRO_RELEASE:
8454 mtx_lock(&lun->lun_lock);
8455 if ((lun->flags & CTL_LUN_PR_RESERVED) == 0) {
8456 /* No reservation exists return good status */
8457 mtx_unlock(&lun->lun_lock);
8458 goto done;
8459 }
8460 /*
8461 * Is this nexus a reservation holder?
8462 */
8463 if (lun->pr_res_idx != residx
8464 && lun->pr_res_idx != CTL_PR_ALL_REGISTRANTS) {
8465 /*
8466 * not a res holder return good status but
8467 * do nothing
8468 */
8469 mtx_unlock(&lun->lun_lock);
8470 goto done;
8471 }
8472
8473 if (lun->pr_res_type != type) {
8474 mtx_unlock(&lun->lun_lock);
8475 free(ctsio->kern_data_ptr, M_CTL);
8476 ctl_set_illegal_pr_release(ctsio);
8477 ctl_done((union ctl_io *)ctsio);
8478 return (CTL_RETVAL_COMPLETE);
8479 }
8480
8481 /* okay to release */
8482 lun->flags &= ~CTL_LUN_PR_RESERVED;
8483 lun->pr_res_idx = CTL_PR_NO_RESERVATION;
8484 lun->pr_res_type = 0;
8485
8486 /*
8487 * If this isn't an exclusive access reservation and NUAR
8488 * is not set, generate UA for all other registrants.
8489 */
8490 if (type != SPR_TYPE_EX_AC && type != SPR_TYPE_WR_EX &&
8491 (lun->MODE_CTRL.queue_flags & SCP_NUAR) == 0) {
8492 for (i = softc->init_min; i < softc->init_max; i++) {
8493 if (i == residx || ctl_get_prkey(lun, i) == 0)
8494 continue;
8495 ctl_est_ua(lun, i, CTL_UA_RES_RELEASE);
8496 }
8497 }
8498 mtx_unlock(&lun->lun_lock);
8499
8500 /* Send msg to other side */
8501 persis_io.hdr.nexus = ctsio->io_hdr.nexus;
8502 persis_io.hdr.msg_type = CTL_MSG_PERS_ACTION;
8503 persis_io.pr.pr_info.action = CTL_PR_RELEASE;
8504 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &persis_io,
8505 sizeof(persis_io.pr), M_WAITOK);
8506 break;
8507
8508 case SPRO_CLEAR:
8509 /* send msg to other side */
8510
8511 mtx_lock(&lun->lun_lock);
8512 lun->flags &= ~CTL_LUN_PR_RESERVED;
8513 lun->pr_res_type = 0;
8514 lun->pr_key_count = 0;
8515 lun->pr_res_idx = CTL_PR_NO_RESERVATION;
8516
8517 ctl_clr_prkey(lun, residx);
8518 for (i = 0; i < CTL_MAX_INITIATORS; i++)
8519 if (ctl_get_prkey(lun, i) != 0) {
8520 ctl_clr_prkey(lun, i);
8521 ctl_est_ua(lun, i, CTL_UA_REG_PREEMPT);
8522 }
8523 lun->pr_generation++;
8524 mtx_unlock(&lun->lun_lock);
8525
8526 persis_io.hdr.nexus = ctsio->io_hdr.nexus;
8527 persis_io.hdr.msg_type = CTL_MSG_PERS_ACTION;
8528 persis_io.pr.pr_info.action = CTL_PR_CLEAR;
8529 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &persis_io,
8530 sizeof(persis_io.pr), M_WAITOK);
8531 break;
8532
8533 case SPRO_PREEMPT:
8534 case SPRO_PRE_ABO: {
8535 int nretval;
8536
8537 nretval = ctl_pro_preempt(softc, lun, res_key, sa_res_key, type,
8538 residx, ctsio, cdb, param);
8539 if (nretval != 0)
8540 return (CTL_RETVAL_COMPLETE);
8541 break;
8542 }
8543 default:
8544 panic("%s: Invalid PR type %#x", __func__, cdb->action);
8545 }
8546
8547done:
8548 free(ctsio->kern_data_ptr, M_CTL);
8549 ctl_set_success(ctsio);
8550 ctl_done((union ctl_io *)ctsio);
8551
8552 return (retval);
8553}
8554
8555/*
8556 * This routine is for handling a message from the other SC pertaining to
8557 * persistent reserve out. All the error checking will have been done
8558 * so only perorming the action need be done here to keep the two
8559 * in sync.
8560 */
8561static void
8562ctl_hndl_per_res_out_on_other_sc(union ctl_ha_msg *msg)
8563{
8564 struct ctl_softc *softc = control_softc;
8565 struct ctl_lun *lun;
8566 int i;
8567 uint32_t residx, targ_lun;
8568
8569 targ_lun = msg->hdr.nexus.targ_mapped_lun;
8570 mtx_lock(&softc->ctl_lock);
8571 if (targ_lun >= CTL_MAX_LUNS ||
8572 (lun = softc->ctl_luns[targ_lun]) == NULL) {
8573 mtx_unlock(&softc->ctl_lock);
8574 return;
8575 }
8576 mtx_lock(&lun->lun_lock);
8577 mtx_unlock(&softc->ctl_lock);
8578 if (lun->flags & CTL_LUN_DISABLED) {
8579 mtx_unlock(&lun->lun_lock);
8580 return;
8581 }
8582 residx = ctl_get_initindex(&msg->hdr.nexus);
8583 switch(msg->pr.pr_info.action) {
8584 case CTL_PR_REG_KEY:
8585 ctl_alloc_prkey(lun, msg->pr.pr_info.residx);
8586 if (ctl_get_prkey(lun, msg->pr.pr_info.residx) == 0)
8587 lun->pr_key_count++;
8588 ctl_set_prkey(lun, msg->pr.pr_info.residx,
8589 scsi_8btou64(msg->pr.pr_info.sa_res_key));
8590 lun->pr_generation++;
8591 break;
8592
8593 case CTL_PR_UNREG_KEY:
8594 ctl_clr_prkey(lun, msg->pr.pr_info.residx);
8595 lun->pr_key_count--;
8596
8597 /* XXX Need to see if the reservation has been released */
8598 /* if so do we need to generate UA? */
8599 if (msg->pr.pr_info.residx == lun->pr_res_idx) {
8600 lun->flags &= ~CTL_LUN_PR_RESERVED;
8601 lun->pr_res_idx = CTL_PR_NO_RESERVATION;
8602
8603 if ((lun->pr_res_type == SPR_TYPE_WR_EX_RO ||
8604 lun->pr_res_type == SPR_TYPE_EX_AC_RO) &&
8605 lun->pr_key_count) {
8606 /*
8607 * If the reservation is a registrants
8608 * only type we need to generate a UA
8609 * for other registered inits. The
8610 * sense code should be RESERVATIONS
8611 * RELEASED
8612 */
8613
8614 for (i = softc->init_min; i < softc->init_max; i++) {
8615 if (ctl_get_prkey(lun, i) == 0)
8616 continue;
8617
8618 ctl_est_ua(lun, i, CTL_UA_RES_RELEASE);
8619 }
8620 }
8621 lun->pr_res_type = 0;
8622 } else if (lun->pr_res_idx == CTL_PR_ALL_REGISTRANTS) {
8623 if (lun->pr_key_count==0) {
8624 lun->flags &= ~CTL_LUN_PR_RESERVED;
8625 lun->pr_res_type = 0;
8626 lun->pr_res_idx = CTL_PR_NO_RESERVATION;
8627 }
8628 }
8629 lun->pr_generation++;
8630 break;
8631
8632 case CTL_PR_RESERVE:
8633 lun->flags |= CTL_LUN_PR_RESERVED;
8634 lun->pr_res_type = msg->pr.pr_info.res_type;
8635 lun->pr_res_idx = msg->pr.pr_info.residx;
8636
8637 break;
8638
8639 case CTL_PR_RELEASE:
8640 /*
8641 * If this isn't an exclusive access reservation and NUAR
8642 * is not set, generate UA for all other registrants.
8643 */
8644 if (lun->pr_res_type != SPR_TYPE_EX_AC &&
8645 lun->pr_res_type != SPR_TYPE_WR_EX &&
8646 (lun->MODE_CTRL.queue_flags & SCP_NUAR) == 0) {
8647 for (i = softc->init_min; i < softc->init_max; i++)
8648 if (i == residx || ctl_get_prkey(lun, i) == 0)
8649 continue;
8650 ctl_est_ua(lun, i, CTL_UA_RES_RELEASE);
8651 }
8652
8653 lun->flags &= ~CTL_LUN_PR_RESERVED;
8654 lun->pr_res_idx = CTL_PR_NO_RESERVATION;
8655 lun->pr_res_type = 0;
8656 break;
8657
8658 case CTL_PR_PREEMPT:
8659 ctl_pro_preempt_other(lun, msg);
8660 break;
8661 case CTL_PR_CLEAR:
8662 lun->flags &= ~CTL_LUN_PR_RESERVED;
8663 lun->pr_res_type = 0;
8664 lun->pr_key_count = 0;
8665 lun->pr_res_idx = CTL_PR_NO_RESERVATION;
8666
8667 for (i=0; i < CTL_MAX_INITIATORS; i++) {
8668 if (ctl_get_prkey(lun, i) == 0)
8669 continue;
8670 ctl_clr_prkey(lun, i);
8671 ctl_est_ua(lun, i, CTL_UA_REG_PREEMPT);
8672 }
8673 lun->pr_generation++;
8674 break;
8675 }
8676
8677 mtx_unlock(&lun->lun_lock);
8678}
8679
8680int
8681ctl_read_write(struct ctl_scsiio *ctsio)
8682{
8683 struct ctl_lun *lun;
8684 struct ctl_lba_len_flags *lbalen;
8685 uint64_t lba;
8686 uint32_t num_blocks;
8687 int flags, retval;
8688 int isread;
8689
8690 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
8691
8692 CTL_DEBUG_PRINT(("ctl_read_write: command: %#x\n", ctsio->cdb[0]));
8693
8694 flags = 0;
8695 isread = ctsio->cdb[0] == READ_6 || ctsio->cdb[0] == READ_10
8696 || ctsio->cdb[0] == READ_12 || ctsio->cdb[0] == READ_16;
8697 switch (ctsio->cdb[0]) {
8698 case READ_6:
8699 case WRITE_6: {
8700 struct scsi_rw_6 *cdb;
8701
8702 cdb = (struct scsi_rw_6 *)ctsio->cdb;
8703
8704 lba = scsi_3btoul(cdb->addr);
8705 /* only 5 bits are valid in the most significant address byte */
8706 lba &= 0x1fffff;
8707 num_blocks = cdb->length;
8708 /*
8709 * This is correct according to SBC-2.
8710 */
8711 if (num_blocks == 0)
8712 num_blocks = 256;
8713 break;
8714 }
8715 case READ_10:
8716 case WRITE_10: {
8717 struct scsi_rw_10 *cdb;
8718
8719 cdb = (struct scsi_rw_10 *)ctsio->cdb;
8720 if (cdb->byte2 & SRW10_FUA)
8721 flags |= CTL_LLF_FUA;
8722 if (cdb->byte2 & SRW10_DPO)
8723 flags |= CTL_LLF_DPO;
8724 lba = scsi_4btoul(cdb->addr);
8725 num_blocks = scsi_2btoul(cdb->length);
8726 break;
8727 }
8728 case WRITE_VERIFY_10: {
8729 struct scsi_write_verify_10 *cdb;
8730
8731 cdb = (struct scsi_write_verify_10 *)ctsio->cdb;
8732 flags |= CTL_LLF_FUA;
8733 if (cdb->byte2 & SWV_DPO)
8734 flags |= CTL_LLF_DPO;
8735 lba = scsi_4btoul(cdb->addr);
8736 num_blocks = scsi_2btoul(cdb->length);
8737 break;
8738 }
8739 case READ_12:
8740 case WRITE_12: {
8741 struct scsi_rw_12 *cdb;
8742
8743 cdb = (struct scsi_rw_12 *)ctsio->cdb;
8744 if (cdb->byte2 & SRW12_FUA)
8745 flags |= CTL_LLF_FUA;
8746 if (cdb->byte2 & SRW12_DPO)
8747 flags |= CTL_LLF_DPO;
8748 lba = scsi_4btoul(cdb->addr);
8749 num_blocks = scsi_4btoul(cdb->length);
8750 break;
8751 }
8752 case WRITE_VERIFY_12: {
8753 struct scsi_write_verify_12 *cdb;
8754
8755 cdb = (struct scsi_write_verify_12 *)ctsio->cdb;
8756 flags |= CTL_LLF_FUA;
8757 if (cdb->byte2 & SWV_DPO)
8758 flags |= CTL_LLF_DPO;
8759 lba = scsi_4btoul(cdb->addr);
8760 num_blocks = scsi_4btoul(cdb->length);
8761 break;
8762 }
8763 case READ_16:
8764 case WRITE_16: {
8765 struct scsi_rw_16 *cdb;
8766
8767 cdb = (struct scsi_rw_16 *)ctsio->cdb;
8768 if (cdb->byte2 & SRW12_FUA)
8769 flags |= CTL_LLF_FUA;
8770 if (cdb->byte2 & SRW12_DPO)
8771 flags |= CTL_LLF_DPO;
8772 lba = scsi_8btou64(cdb->addr);
8773 num_blocks = scsi_4btoul(cdb->length);
8774 break;
8775 }
8776 case WRITE_ATOMIC_16: {
8777 struct scsi_write_atomic_16 *cdb;
8778
8779 if (lun->be_lun->atomicblock == 0) {
8780 ctl_set_invalid_opcode(ctsio);
8781 ctl_done((union ctl_io *)ctsio);
8782 return (CTL_RETVAL_COMPLETE);
8783 }
8784
8785 cdb = (struct scsi_write_atomic_16 *)ctsio->cdb;
8786 if (cdb->byte2 & SRW12_FUA)
8787 flags |= CTL_LLF_FUA;
8788 if (cdb->byte2 & SRW12_DPO)
8789 flags |= CTL_LLF_DPO;
8790 lba = scsi_8btou64(cdb->addr);
8791 num_blocks = scsi_2btoul(cdb->length);
8792 if (num_blocks > lun->be_lun->atomicblock) {
8793 ctl_set_invalid_field(ctsio, /*sks_valid*/ 1,
8794 /*command*/ 1, /*field*/ 12, /*bit_valid*/ 0,
8795 /*bit*/ 0);
8796 ctl_done((union ctl_io *)ctsio);
8797 return (CTL_RETVAL_COMPLETE);
8798 }
8799 break;
8800 }
8801 case WRITE_VERIFY_16: {
8802 struct scsi_write_verify_16 *cdb;
8803
8804 cdb = (struct scsi_write_verify_16 *)ctsio->cdb;
8805 flags |= CTL_LLF_FUA;
8806 if (cdb->byte2 & SWV_DPO)
8807 flags |= CTL_LLF_DPO;
8808 lba = scsi_8btou64(cdb->addr);
8809 num_blocks = scsi_4btoul(cdb->length);
8810 break;
8811 }
8812 default:
8813 /*
8814 * We got a command we don't support. This shouldn't
8815 * happen, commands should be filtered out above us.
8816 */
8817 ctl_set_invalid_opcode(ctsio);
8818 ctl_done((union ctl_io *)ctsio);
8819
8820 return (CTL_RETVAL_COMPLETE);
8821 break; /* NOTREACHED */
8822 }
8823
8824 /*
8825 * The first check is to make sure we're in bounds, the second
8826 * check is to catch wrap-around problems. If the lba + num blocks
8827 * is less than the lba, then we've wrapped around and the block
8828 * range is invalid anyway.
8829 */
8830 if (((lba + num_blocks) > (lun->be_lun->maxlba + 1))
8831 || ((lba + num_blocks) < lba)) {
8832 ctl_set_lba_out_of_range(ctsio,
8833 MAX(lba, lun->be_lun->maxlba + 1));
8834 ctl_done((union ctl_io *)ctsio);
8835 return (CTL_RETVAL_COMPLETE);
8836 }
8837
8838 /*
8839 * According to SBC-3, a transfer length of 0 is not an error.
8840 * Note that this cannot happen with WRITE(6) or READ(6), since 0
8841 * translates to 256 blocks for those commands.
8842 */
8843 if (num_blocks == 0) {
8844 ctl_set_success(ctsio);
8845 ctl_done((union ctl_io *)ctsio);
8846 return (CTL_RETVAL_COMPLETE);
8847 }
8848
8849 /* Set FUA and/or DPO if caches are disabled. */
8850 if (isread) {
8851 if ((lun->MODE_CACHING.flags1 & SCP_RCD) != 0)
8852 flags |= CTL_LLF_FUA | CTL_LLF_DPO;
8853 } else {
8854 if ((lun->MODE_CACHING.flags1 & SCP_WCE) == 0)
8855 flags |= CTL_LLF_FUA;
8856 }
8857
8858 lbalen = (struct ctl_lba_len_flags *)
8859 &ctsio->io_hdr.ctl_private[CTL_PRIV_LBA_LEN];
8860 lbalen->lba = lba;
8861 lbalen->len = num_blocks;
8862 lbalen->flags = (isread ? CTL_LLF_READ : CTL_LLF_WRITE) | flags;
8863
8864 ctsio->kern_total_len = num_blocks * lun->be_lun->blocksize;
8865 ctsio->kern_rel_offset = 0;
8866
8867 CTL_DEBUG_PRINT(("ctl_read_write: calling data_submit()\n"));
8868
8869 retval = lun->backend->data_submit((union ctl_io *)ctsio);
8870 return (retval);
8871}
8872
8873static int
8874ctl_cnw_cont(union ctl_io *io)
8875{
8876 struct ctl_scsiio *ctsio;
8877 struct ctl_lun *lun;
8878 struct ctl_lba_len_flags *lbalen;
8879 int retval;
8880
8881 ctsio = &io->scsiio;
8882 ctsio->io_hdr.status = CTL_STATUS_NONE;
8883 ctsio->io_hdr.flags &= ~CTL_FLAG_IO_CONT;
8884 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
8885 lbalen = (struct ctl_lba_len_flags *)
8886 &ctsio->io_hdr.ctl_private[CTL_PRIV_LBA_LEN];
8887 lbalen->flags &= ~CTL_LLF_COMPARE;
8888 lbalen->flags |= CTL_LLF_WRITE;
8889
8890 CTL_DEBUG_PRINT(("ctl_cnw_cont: calling data_submit()\n"));
8891 retval = lun->backend->data_submit((union ctl_io *)ctsio);
8892 return (retval);
8893}
8894
8895int
8896ctl_cnw(struct ctl_scsiio *ctsio)
8897{
8898 struct ctl_lun *lun;
8899 struct ctl_lba_len_flags *lbalen;
8900 uint64_t lba;
8901 uint32_t num_blocks;
8902 int flags, retval;
8903
8904 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
8905
8906 CTL_DEBUG_PRINT(("ctl_cnw: command: %#x\n", ctsio->cdb[0]));
8907
8908 flags = 0;
8909 switch (ctsio->cdb[0]) {
8910 case COMPARE_AND_WRITE: {
8911 struct scsi_compare_and_write *cdb;
8912
8913 cdb = (struct scsi_compare_and_write *)ctsio->cdb;
8914 if (cdb->byte2 & SRW10_FUA)
8915 flags |= CTL_LLF_FUA;
8916 if (cdb->byte2 & SRW10_DPO)
8917 flags |= CTL_LLF_DPO;
8918 lba = scsi_8btou64(cdb->addr);
8919 num_blocks = cdb->length;
8920 break;
8921 }
8922 default:
8923 /*
8924 * We got a command we don't support. This shouldn't
8925 * happen, commands should be filtered out above us.
8926 */
8927 ctl_set_invalid_opcode(ctsio);
8928 ctl_done((union ctl_io *)ctsio);
8929
8930 return (CTL_RETVAL_COMPLETE);
8931 break; /* NOTREACHED */
8932 }
8933
8934 /*
8935 * The first check is to make sure we're in bounds, the second
8936 * check is to catch wrap-around problems. If the lba + num blocks
8937 * is less than the lba, then we've wrapped around and the block
8938 * range is invalid anyway.
8939 */
8940 if (((lba + num_blocks) > (lun->be_lun->maxlba + 1))
8941 || ((lba + num_blocks) < lba)) {
8942 ctl_set_lba_out_of_range(ctsio,
8943 MAX(lba, lun->be_lun->maxlba + 1));
8944 ctl_done((union ctl_io *)ctsio);
8945 return (CTL_RETVAL_COMPLETE);
8946 }
8947
8948 /*
8949 * According to SBC-3, a transfer length of 0 is not an error.
8950 */
8951 if (num_blocks == 0) {
8952 ctl_set_success(ctsio);
8953 ctl_done((union ctl_io *)ctsio);
8954 return (CTL_RETVAL_COMPLETE);
8955 }
8956
8957 /* Set FUA if write cache is disabled. */
8958 if ((lun->MODE_CACHING.flags1 & SCP_WCE) == 0)
8959 flags |= CTL_LLF_FUA;
8960
8961 ctsio->kern_total_len = 2 * num_blocks * lun->be_lun->blocksize;
8962 ctsio->kern_rel_offset = 0;
8963
8964 /*
8965 * Set the IO_CONT flag, so that if this I/O gets passed to
8966 * ctl_data_submit_done(), it'll get passed back to
8967 * ctl_ctl_cnw_cont() for further processing.
8968 */
8969 ctsio->io_hdr.flags |= CTL_FLAG_IO_CONT;
8970 ctsio->io_cont = ctl_cnw_cont;
8971
8972 lbalen = (struct ctl_lba_len_flags *)
8973 &ctsio->io_hdr.ctl_private[CTL_PRIV_LBA_LEN];
8974 lbalen->lba = lba;
8975 lbalen->len = num_blocks;
8976 lbalen->flags = CTL_LLF_COMPARE | flags;
8977
8978 CTL_DEBUG_PRINT(("ctl_cnw: calling data_submit()\n"));
8979 retval = lun->backend->data_submit((union ctl_io *)ctsio);
8980 return (retval);
8981}
8982
8983int
8984ctl_verify(struct ctl_scsiio *ctsio)
8985{
8986 struct ctl_lun *lun;
8987 struct ctl_lba_len_flags *lbalen;
8988 uint64_t lba;
8989 uint32_t num_blocks;
8990 int bytchk, flags;
8991 int retval;
8992
8993 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
8994
8995 CTL_DEBUG_PRINT(("ctl_verify: command: %#x\n", ctsio->cdb[0]));
8996
8997 bytchk = 0;
8998 flags = CTL_LLF_FUA;
8999 switch (ctsio->cdb[0]) {
9000 case VERIFY_10: {
9001 struct scsi_verify_10 *cdb;
9002
9003 cdb = (struct scsi_verify_10 *)ctsio->cdb;
9004 if (cdb->byte2 & SVFY_BYTCHK)
9005 bytchk = 1;
9006 if (cdb->byte2 & SVFY_DPO)
9007 flags |= CTL_LLF_DPO;
9008 lba = scsi_4btoul(cdb->addr);
9009 num_blocks = scsi_2btoul(cdb->length);
9010 break;
9011 }
9012 case VERIFY_12: {
9013 struct scsi_verify_12 *cdb;
9014
9015 cdb = (struct scsi_verify_12 *)ctsio->cdb;
9016 if (cdb->byte2 & SVFY_BYTCHK)
9017 bytchk = 1;
9018 if (cdb->byte2 & SVFY_DPO)
9019 flags |= CTL_LLF_DPO;
9020 lba = scsi_4btoul(cdb->addr);
9021 num_blocks = scsi_4btoul(cdb->length);
9022 break;
9023 }
9024 case VERIFY_16: {
9025 struct scsi_rw_16 *cdb;
9026
9027 cdb = (struct scsi_rw_16 *)ctsio->cdb;
9028 if (cdb->byte2 & SVFY_BYTCHK)
9029 bytchk = 1;
9030 if (cdb->byte2 & SVFY_DPO)
9031 flags |= CTL_LLF_DPO;
9032 lba = scsi_8btou64(cdb->addr);
9033 num_blocks = scsi_4btoul(cdb->length);
9034 break;
9035 }
9036 default:
9037 /*
9038 * We got a command we don't support. This shouldn't
9039 * happen, commands should be filtered out above us.
9040 */
9041 ctl_set_invalid_opcode(ctsio);
9042 ctl_done((union ctl_io *)ctsio);
9043 return (CTL_RETVAL_COMPLETE);
9044 }
9045
9046 /*
9047 * The first check is to make sure we're in bounds, the second
9048 * check is to catch wrap-around problems. If the lba + num blocks
9049 * is less than the lba, then we've wrapped around and the block
9050 * range is invalid anyway.
9051 */
9052 if (((lba + num_blocks) > (lun->be_lun->maxlba + 1))
9053 || ((lba + num_blocks) < lba)) {
9054 ctl_set_lba_out_of_range(ctsio,
9055 MAX(lba, lun->be_lun->maxlba + 1));
9056 ctl_done((union ctl_io *)ctsio);
9057 return (CTL_RETVAL_COMPLETE);
9058 }
9059
9060 /*
9061 * According to SBC-3, a transfer length of 0 is not an error.
9062 */
9063 if (num_blocks == 0) {
9064 ctl_set_success(ctsio);
9065 ctl_done((union ctl_io *)ctsio);
9066 return (CTL_RETVAL_COMPLETE);
9067 }
9068
9069 lbalen = (struct ctl_lba_len_flags *)
9070 &ctsio->io_hdr.ctl_private[CTL_PRIV_LBA_LEN];
9071 lbalen->lba = lba;
9072 lbalen->len = num_blocks;
9073 if (bytchk) {
9074 lbalen->flags = CTL_LLF_COMPARE | flags;
9075 ctsio->kern_total_len = num_blocks * lun->be_lun->blocksize;
9076 } else {
9077 lbalen->flags = CTL_LLF_VERIFY | flags;
9078 ctsio->kern_total_len = 0;
9079 }
9080 ctsio->kern_rel_offset = 0;
9081
9082 CTL_DEBUG_PRINT(("ctl_verify: calling data_submit()\n"));
9083 retval = lun->backend->data_submit((union ctl_io *)ctsio);
9084 return (retval);
9085}
9086
9087int
9088ctl_report_luns(struct ctl_scsiio *ctsio)
9089{
9090 struct ctl_softc *softc;
9091 struct scsi_report_luns *cdb;
9092 struct scsi_report_luns_data *lun_data;
9093 struct ctl_lun *lun, *request_lun;
9094 struct ctl_port *port;
9095 int num_filled, num_luns, num_port_luns, retval;
9096 uint32_t alloc_len, lun_datalen;
9097 uint32_t initidx, targ_lun_id, lun_id;
9098
9099 retval = CTL_RETVAL_COMPLETE;
9100 cdb = (struct scsi_report_luns *)ctsio->cdb;
9101 port = ctl_io_port(&ctsio->io_hdr);
9102 softc = port->ctl_softc;
9103
9104 CTL_DEBUG_PRINT(("ctl_report_luns\n"));
9105
9106 num_luns = 0;
9107 num_port_luns = port->lun_map ? port->lun_map_size : CTL_MAX_LUNS;
9108 mtx_lock(&softc->ctl_lock);
9109 for (targ_lun_id = 0; targ_lun_id < num_port_luns; targ_lun_id++) {
9110 if (ctl_lun_map_from_port(port, targ_lun_id) != UINT32_MAX)
9111 num_luns++;
9112 }
9113 mtx_unlock(&softc->ctl_lock);
9114
9115 switch (cdb->select_report) {
9116 case RPL_REPORT_DEFAULT:
9117 case RPL_REPORT_ALL:
9118 case RPL_REPORT_NONSUBSID:
9119 break;
9120 case RPL_REPORT_WELLKNOWN:
9121 case RPL_REPORT_ADMIN:
9122 case RPL_REPORT_CONGLOM:
9123 num_luns = 0;
9124 break;
9125 default:
9126 ctl_set_invalid_field(ctsio,
9127 /*sks_valid*/ 1,
9128 /*command*/ 1,
9129 /*field*/ 2,
9130 /*bit_valid*/ 0,
9131 /*bit*/ 0);
9132 ctl_done((union ctl_io *)ctsio);
9133 return (retval);
9134 break; /* NOTREACHED */
9135 }
9136
9137 alloc_len = scsi_4btoul(cdb->length);
9138 /*
9139 * The initiator has to allocate at least 16 bytes for this request,
9140 * so he can at least get the header and the first LUN. Otherwise
9141 * we reject the request (per SPC-3 rev 14, section 6.21).
9142 */
9143 if (alloc_len < (sizeof(struct scsi_report_luns_data) +
9144 sizeof(struct scsi_report_luns_lundata))) {
9145 ctl_set_invalid_field(ctsio,
9146 /*sks_valid*/ 1,
9147 /*command*/ 1,
9148 /*field*/ 6,
9149 /*bit_valid*/ 0,
9150 /*bit*/ 0);
9151 ctl_done((union ctl_io *)ctsio);
9152 return (retval);
9153 }
9154
9155 request_lun = (struct ctl_lun *)
9156 ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
9157
9158 lun_datalen = sizeof(*lun_data) +
9159 (num_luns * sizeof(struct scsi_report_luns_lundata));
9160
9161 ctsio->kern_data_ptr = malloc(lun_datalen, M_CTL, M_WAITOK | M_ZERO);
9162 lun_data = (struct scsi_report_luns_data *)ctsio->kern_data_ptr;
9163 ctsio->kern_sg_entries = 0;
9164
9165 initidx = ctl_get_initindex(&ctsio->io_hdr.nexus);
9166
9167 mtx_lock(&softc->ctl_lock);
9168 for (targ_lun_id = 0, num_filled = 0;
9169 targ_lun_id < num_port_luns && num_filled < num_luns;
9170 targ_lun_id++) {
9171 lun_id = ctl_lun_map_from_port(port, targ_lun_id);
9172 if (lun_id == UINT32_MAX)
9173 continue;
9174 lun = softc->ctl_luns[lun_id];
9175 if (lun == NULL)
9176 continue;
9177
9178 be64enc(lun_data->luns[num_filled++].lundata,
9179 ctl_encode_lun(targ_lun_id));
9180
9181 /*
9182 * According to SPC-3, rev 14 section 6.21:
9183 *
9184 * "The execution of a REPORT LUNS command to any valid and
9185 * installed logical unit shall clear the REPORTED LUNS DATA
9186 * HAS CHANGED unit attention condition for all logical
9187 * units of that target with respect to the requesting
9188 * initiator. A valid and installed logical unit is one
9189 * having a PERIPHERAL QUALIFIER of 000b in the standard
9190 * INQUIRY data (see 6.4.2)."
9191 *
9192 * If request_lun is NULL, the LUN this report luns command
9193 * was issued to is either disabled or doesn't exist. In that
9194 * case, we shouldn't clear any pending lun change unit
9195 * attention.
9196 */
9197 if (request_lun != NULL) {
9198 mtx_lock(&lun->lun_lock);
9199 ctl_clr_ua(lun, initidx, CTL_UA_LUN_CHANGE);
9200 mtx_unlock(&lun->lun_lock);
9201 }
9202 }
9203 mtx_unlock(&softc->ctl_lock);
9204
9205 /*
9206 * It's quite possible that we've returned fewer LUNs than we allocated
9207 * space for. Trim it.
9208 */
9209 lun_datalen = sizeof(*lun_data) +
9210 (num_filled * sizeof(struct scsi_report_luns_lundata));
9211
9212 if (lun_datalen < alloc_len) {
9213 ctsio->residual = alloc_len - lun_datalen;
9214 ctsio->kern_data_len = lun_datalen;
9215 ctsio->kern_total_len = lun_datalen;
9216 } else {
9217 ctsio->residual = 0;
9218 ctsio->kern_data_len = alloc_len;
9219 ctsio->kern_total_len = alloc_len;
9220 }
9221 ctsio->kern_data_resid = 0;
9222 ctsio->kern_rel_offset = 0;
9223 ctsio->kern_sg_entries = 0;
9224
9225 /*
9226 * We set this to the actual data length, regardless of how much
9227 * space we actually have to return results. If the user looks at
9228 * this value, he'll know whether or not he allocated enough space
9229 * and reissue the command if necessary. We don't support well
9230 * known logical units, so if the user asks for that, return none.
9231 */
9232 scsi_ulto4b(lun_datalen - 8, lun_data->length);
9233
9234 /*
9235 * We can only return SCSI_STATUS_CHECK_COND when we can't satisfy
9236 * this request.
9237 */
9238 ctl_set_success(ctsio);
9239 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
9240 ctsio->be_move_done = ctl_config_move_done;
9241 ctl_datamove((union ctl_io *)ctsio);
9242 return (retval);
9243}
9244
9245int
9246ctl_request_sense(struct ctl_scsiio *ctsio)
9247{
9248 struct scsi_request_sense *cdb;
9249 struct scsi_sense_data *sense_ptr;
9250 struct ctl_softc *softc;
9251 struct ctl_lun *lun;
9252 uint32_t initidx;
9253 int have_error;
9254 u_int sense_len = SSD_FULL_SIZE;
9255 scsi_sense_data_type sense_format;
9256 ctl_ua_type ua_type;
9257 uint8_t asc = 0, ascq = 0;
9258
9259 cdb = (struct scsi_request_sense *)ctsio->cdb;
9260
9261 softc = control_softc;
9262 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
9263
9264 CTL_DEBUG_PRINT(("ctl_request_sense\n"));
9265
9266 /*
9267 * Determine which sense format the user wants.
9268 */
9269 if (cdb->byte2 & SRS_DESC)
9270 sense_format = SSD_TYPE_DESC;
9271 else
9272 sense_format = SSD_TYPE_FIXED;
9273
9274 ctsio->kern_data_ptr = malloc(sizeof(*sense_ptr), M_CTL, M_WAITOK);
9275 sense_ptr = (struct scsi_sense_data *)ctsio->kern_data_ptr;
9276 ctsio->kern_sg_entries = 0;
9277
9278 /*
9279 * struct scsi_sense_data, which is currently set to 256 bytes, is
9280 * larger than the largest allowed value for the length field in the
9281 * REQUEST SENSE CDB, which is 252 bytes as of SPC-4.
9282 */
9283 ctsio->residual = 0;
9284 ctsio->kern_data_len = cdb->length;
9285 ctsio->kern_total_len = cdb->length;
9286
9287 ctsio->kern_data_resid = 0;
9288 ctsio->kern_rel_offset = 0;
9289 ctsio->kern_sg_entries = 0;
9290
9291 /*
9292 * If we don't have a LUN, we don't have any pending sense.
9293 */
9294 if (lun == NULL ||
9295 ((lun->flags & CTL_LUN_PRIMARY_SC) == 0 &&
9296 softc->ha_link < CTL_HA_LINK_UNKNOWN)) {
9297 /* "Logical unit not supported" */
9298 ctl_set_sense_data(sense_ptr, &sense_len, NULL, sense_format,
9299 /*current_error*/ 1,
9300 /*sense_key*/ SSD_KEY_ILLEGAL_REQUEST,
9301 /*asc*/ 0x25,
9302 /*ascq*/ 0x00,
9303 SSD_ELEM_NONE);
9304 goto send;
9305 }
9306
9307 have_error = 0;
9308 initidx = ctl_get_initindex(&ctsio->io_hdr.nexus);
9309 /*
9310 * Check for pending sense, and then for pending unit attentions.
9311 * Pending sense gets returned first, then pending unit attentions.
9312 */
9313 mtx_lock(&lun->lun_lock);
9314#ifdef CTL_WITH_CA
9315 if (ctl_is_set(lun->have_ca, initidx)) {
9316 scsi_sense_data_type stored_format;
9317
9318 /*
9319 * Check to see which sense format was used for the stored
9320 * sense data.
9321 */
9322 stored_format = scsi_sense_type(&lun->pending_sense[initidx]);
9323
9324 /*
9325 * If the user requested a different sense format than the
9326 * one we stored, then we need to convert it to the other
9327 * format. If we're going from descriptor to fixed format
9328 * sense data, we may lose things in translation, depending
9329 * on what options were used.
9330 *
9331 * If the stored format is SSD_TYPE_NONE (i.e. invalid),
9332 * for some reason we'll just copy it out as-is.
9333 */
9334 if ((stored_format == SSD_TYPE_FIXED)
9335 && (sense_format == SSD_TYPE_DESC))
9336 ctl_sense_to_desc((struct scsi_sense_data_fixed *)
9337 &lun->pending_sense[initidx],
9338 (struct scsi_sense_data_desc *)sense_ptr);
9339 else if ((stored_format == SSD_TYPE_DESC)
9340 && (sense_format == SSD_TYPE_FIXED))
9341 ctl_sense_to_fixed((struct scsi_sense_data_desc *)
9342 &lun->pending_sense[initidx],
9343 (struct scsi_sense_data_fixed *)sense_ptr);
9344 else
9345 memcpy(sense_ptr, &lun->pending_sense[initidx],
9346 MIN(sizeof(*sense_ptr),
9347 sizeof(lun->pending_sense[initidx])));
9348
9349 ctl_clear_mask(lun->have_ca, initidx);
9350 have_error = 1;
9351 } else
9352#endif
9353 if (have_error == 0) {
9354 ua_type = ctl_build_ua(lun, initidx, sense_ptr, &sense_len,
9355 sense_format);
9356 if (ua_type != CTL_UA_NONE)
9357 have_error = 1;
9358 }
9359 if (have_error == 0) {
9360 /*
9361 * Report informational exception if have one and allowed.
9362 */
9363 if (lun->MODE_IE.mrie != SIEP_MRIE_NO) {
9364 asc = lun->ie_asc;
9365 ascq = lun->ie_ascq;
9366 }
9367 ctl_set_sense_data(sense_ptr, &sense_len, lun, sense_format,
9368 /*current_error*/ 1,
9369 /*sense_key*/ SSD_KEY_NO_SENSE,
9370 /*asc*/ asc,
9371 /*ascq*/ ascq,
9372 SSD_ELEM_NONE);
9373 }
9374 mtx_unlock(&lun->lun_lock);
9375
9376send:
9377 /*
9378 * We report the SCSI status as OK, since the status of the command
9379 * itself is OK. We're reporting sense as parameter data.
9380 */
9381 ctl_set_success(ctsio);
9382 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
9383 ctsio->be_move_done = ctl_config_move_done;
9384 ctl_datamove((union ctl_io *)ctsio);
9385 return (CTL_RETVAL_COMPLETE);
9386}
9387
9388int
9389ctl_tur(struct ctl_scsiio *ctsio)
9390{
9391
9392 CTL_DEBUG_PRINT(("ctl_tur\n"));
9393
9394 ctl_set_success(ctsio);
9395 ctl_done((union ctl_io *)ctsio);
9396
9397 return (CTL_RETVAL_COMPLETE);
9398}
9399
9400/*
9401 * SCSI VPD page 0x00, the Supported VPD Pages page.
9402 */
9403static int
9404ctl_inquiry_evpd_supported(struct ctl_scsiio *ctsio, int alloc_len)
9405{
9406 struct scsi_vpd_supported_pages *pages;
9407 int sup_page_size;
9408 struct ctl_lun *lun;
9409 int p;
9410
9411 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
9412
9413 sup_page_size = sizeof(struct scsi_vpd_supported_pages) *
9414 SCSI_EVPD_NUM_SUPPORTED_PAGES;
9415 ctsio->kern_data_ptr = malloc(sup_page_size, M_CTL, M_WAITOK | M_ZERO);
9416 pages = (struct scsi_vpd_supported_pages *)ctsio->kern_data_ptr;
9417 ctsio->kern_sg_entries = 0;
9418
9419 if (sup_page_size < alloc_len) {
9420 ctsio->residual = alloc_len - sup_page_size;
9421 ctsio->kern_data_len = sup_page_size;
9422 ctsio->kern_total_len = sup_page_size;
9423 } else {
9424 ctsio->residual = 0;
9425 ctsio->kern_data_len = alloc_len;
9426 ctsio->kern_total_len = alloc_len;
9427 }
9428 ctsio->kern_data_resid = 0;
9429 ctsio->kern_rel_offset = 0;
9430 ctsio->kern_sg_entries = 0;
9431
9432 /*
9433 * The control device is always connected. The disk device, on the
9434 * other hand, may not be online all the time. Need to change this
9435 * to figure out whether the disk device is actually online or not.
9436 */
9437 if (lun != NULL)
9438 pages->device = (SID_QUAL_LU_CONNECTED << 5) |
9439 lun->be_lun->lun_type;
9440 else
9441 pages->device = (SID_QUAL_LU_OFFLINE << 5) | T_DIRECT;
9442
9443 p = 0;
9444 /* Supported VPD pages */
9445 pages->page_list[p++] = SVPD_SUPPORTED_PAGES;
9446 /* Serial Number */
9447 pages->page_list[p++] = SVPD_UNIT_SERIAL_NUMBER;
9448 /* Device Identification */
9449 pages->page_list[p++] = SVPD_DEVICE_ID;
9450 /* Extended INQUIRY Data */
9451 pages->page_list[p++] = SVPD_EXTENDED_INQUIRY_DATA;
9452 /* Mode Page Policy */
9453 pages->page_list[p++] = SVPD_MODE_PAGE_POLICY;
9454 /* SCSI Ports */
9455 pages->page_list[p++] = SVPD_SCSI_PORTS;
9456 /* Third-party Copy */
9457 pages->page_list[p++] = SVPD_SCSI_TPC;
9458 if (lun != NULL && lun->be_lun->lun_type == T_DIRECT) {
9459 /* Block limits */
9460 pages->page_list[p++] = SVPD_BLOCK_LIMITS;
9461 /* Block Device Characteristics */
9462 pages->page_list[p++] = SVPD_BDC;
9463 /* Logical Block Provisioning */
9464 pages->page_list[p++] = SVPD_LBP;
9465 }
9466 pages->length = p;
9467
9468 ctl_set_success(ctsio);
9469 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
9470 ctsio->be_move_done = ctl_config_move_done;
9471 ctl_datamove((union ctl_io *)ctsio);
9472 return (CTL_RETVAL_COMPLETE);
9473}
9474
9475/*
9476 * SCSI VPD page 0x80, the Unit Serial Number page.
9477 */
9478static int
9479ctl_inquiry_evpd_serial(struct ctl_scsiio *ctsio, int alloc_len)
9480{
9481 struct scsi_vpd_unit_serial_number *sn_ptr;
9482 struct ctl_lun *lun;
9483 int data_len;
9484
9485 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
9486
9487 data_len = 4 + CTL_SN_LEN;
9488 ctsio->kern_data_ptr = malloc(data_len, M_CTL, M_WAITOK | M_ZERO);
9489 sn_ptr = (struct scsi_vpd_unit_serial_number *)ctsio->kern_data_ptr;
9490 if (data_len < alloc_len) {
9491 ctsio->residual = alloc_len - data_len;
9492 ctsio->kern_data_len = data_len;
9493 ctsio->kern_total_len = data_len;
9494 } else {
9495 ctsio->residual = 0;
9496 ctsio->kern_data_len = alloc_len;
9497 ctsio->kern_total_len = alloc_len;
9498 }
9499 ctsio->kern_data_resid = 0;
9500 ctsio->kern_rel_offset = 0;
9501 ctsio->kern_sg_entries = 0;
9502
9503 /*
9504 * The control device is always connected. The disk device, on the
9505 * other hand, may not be online all the time. Need to change this
9506 * to figure out whether the disk device is actually online or not.
9507 */
9508 if (lun != NULL)
9509 sn_ptr->device = (SID_QUAL_LU_CONNECTED << 5) |
9510 lun->be_lun->lun_type;
9511 else
9512 sn_ptr->device = (SID_QUAL_LU_OFFLINE << 5) | T_DIRECT;
9513
9514 sn_ptr->page_code = SVPD_UNIT_SERIAL_NUMBER;
9515 sn_ptr->length = CTL_SN_LEN;
9516 /*
9517 * If we don't have a LUN, we just leave the serial number as
9518 * all spaces.
9519 */
9520 if (lun != NULL) {
9521 strncpy((char *)sn_ptr->serial_num,
9522 (char *)lun->be_lun->serial_num, CTL_SN_LEN);
9523 } else
9524 memset(sn_ptr->serial_num, 0x20, CTL_SN_LEN);
9525
9526 ctl_set_success(ctsio);
9527 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
9528 ctsio->be_move_done = ctl_config_move_done;
9529 ctl_datamove((union ctl_io *)ctsio);
9530 return (CTL_RETVAL_COMPLETE);
9531}
9532
9533
9534/*
9535 * SCSI VPD page 0x86, the Extended INQUIRY Data page.
9536 */
9537static int
9538ctl_inquiry_evpd_eid(struct ctl_scsiio *ctsio, int alloc_len)
9539{
9540 struct scsi_vpd_extended_inquiry_data *eid_ptr;
9541 struct ctl_lun *lun;
9542 int data_len;
9543
9544 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
9545
9546 data_len = sizeof(struct scsi_vpd_extended_inquiry_data);
9547 ctsio->kern_data_ptr = malloc(data_len, M_CTL, M_WAITOK | M_ZERO);
9548 eid_ptr = (struct scsi_vpd_extended_inquiry_data *)ctsio->kern_data_ptr;
9549 ctsio->kern_sg_entries = 0;
9550
9551 if (data_len < alloc_len) {
9552 ctsio->residual = alloc_len - data_len;
9553 ctsio->kern_data_len = data_len;
9554 ctsio->kern_total_len = data_len;
9555 } else {
9556 ctsio->residual = 0;
9557 ctsio->kern_data_len = alloc_len;
9558 ctsio->kern_total_len = alloc_len;
9559 }
9560 ctsio->kern_data_resid = 0;
9561 ctsio->kern_rel_offset = 0;
9562 ctsio->kern_sg_entries = 0;
9563
9564 /*
9565 * The control device is always connected. The disk device, on the
9566 * other hand, may not be online all the time.
9567 */
9568 if (lun != NULL)
9569 eid_ptr->device = (SID_QUAL_LU_CONNECTED << 5) |
9570 lun->be_lun->lun_type;
9571 else
9572 eid_ptr->device = (SID_QUAL_LU_OFFLINE << 5) | T_DIRECT;
9573 eid_ptr->page_code = SVPD_EXTENDED_INQUIRY_DATA;
9574 scsi_ulto2b(data_len - 4, eid_ptr->page_length);
9575 /*
9576 * We support head of queue, ordered and simple tags.
9577 */
9578 eid_ptr->flags2 = SVPD_EID_HEADSUP | SVPD_EID_ORDSUP | SVPD_EID_SIMPSUP;
9579 /*
9580 * Volatile cache supported.
9581 */
9582 eid_ptr->flags3 = SVPD_EID_V_SUP;
9583
9584 /*
9585 * This means that we clear the REPORTED LUNS DATA HAS CHANGED unit
9586 * attention for a particular IT nexus on all LUNs once we report
9587 * it to that nexus once. This bit is required as of SPC-4.
9588 */
9589 eid_ptr->flags4 = SVPD_EID_LUICLR;
9590
9591 /*
9592 * We support revert to defaults (RTD) bit in MODE SELECT.
9593 */
9594 eid_ptr->flags5 = SVPD_EID_RTD_SUP;
9595
9596 /*
9597 * XXX KDM in order to correctly answer this, we would need
9598 * information from the SIM to determine how much sense data it
9599 * can send. So this would really be a path inquiry field, most
9600 * likely. This can be set to a maximum of 252 according to SPC-4,
9601 * but the hardware may or may not be able to support that much.
9602 * 0 just means that the maximum sense data length is not reported.
9603 */
9604 eid_ptr->max_sense_length = 0;
9605
9606 ctl_set_success(ctsio);
9607 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
9608 ctsio->be_move_done = ctl_config_move_done;
9609 ctl_datamove((union ctl_io *)ctsio);
9610 return (CTL_RETVAL_COMPLETE);
9611}
9612
9613static int
9614ctl_inquiry_evpd_mpp(struct ctl_scsiio *ctsio, int alloc_len)
9615{
9616 struct scsi_vpd_mode_page_policy *mpp_ptr;
9617 struct ctl_lun *lun;
9618 int data_len;
9619
9620 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
9621
9622 data_len = sizeof(struct scsi_vpd_mode_page_policy) +
9623 sizeof(struct scsi_vpd_mode_page_policy_descr);
9624
9625 ctsio->kern_data_ptr = malloc(data_len, M_CTL, M_WAITOK | M_ZERO);
9626 mpp_ptr = (struct scsi_vpd_mode_page_policy *)ctsio->kern_data_ptr;
9627 ctsio->kern_sg_entries = 0;
9628
9629 if (data_len < alloc_len) {
9630 ctsio->residual = alloc_len - data_len;
9631 ctsio->kern_data_len = data_len;
9632 ctsio->kern_total_len = data_len;
9633 } else {
9634 ctsio->residual = 0;
9635 ctsio->kern_data_len = alloc_len;
9636 ctsio->kern_total_len = alloc_len;
9637 }
9638 ctsio->kern_data_resid = 0;
9639 ctsio->kern_rel_offset = 0;
9640 ctsio->kern_sg_entries = 0;
9641
9642 /*
9643 * The control device is always connected. The disk device, on the
9644 * other hand, may not be online all the time.
9645 */
9646 if (lun != NULL)
9647 mpp_ptr->device = (SID_QUAL_LU_CONNECTED << 5) |
9648 lun->be_lun->lun_type;
9649 else
9650 mpp_ptr->device = (SID_QUAL_LU_OFFLINE << 5) | T_DIRECT;
9651 mpp_ptr->page_code = SVPD_MODE_PAGE_POLICY;
9652 scsi_ulto2b(data_len - 4, mpp_ptr->page_length);
9653 mpp_ptr->descr[0].page_code = 0x3f;
9654 mpp_ptr->descr[0].subpage_code = 0xff;
9655 mpp_ptr->descr[0].policy = SVPD_MPP_SHARED;
9656
9657 ctl_set_success(ctsio);
9658 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
9659 ctsio->be_move_done = ctl_config_move_done;
9660 ctl_datamove((union ctl_io *)ctsio);
9661 return (CTL_RETVAL_COMPLETE);
9662}
9663
9664/*
9665 * SCSI VPD page 0x83, the Device Identification page.
9666 */
9667static int
9668ctl_inquiry_evpd_devid(struct ctl_scsiio *ctsio, int alloc_len)
9669{
9670 struct scsi_vpd_device_id *devid_ptr;
9671 struct scsi_vpd_id_descriptor *desc;
9672 struct ctl_softc *softc;
9673 struct ctl_lun *lun;
9674 struct ctl_port *port;
9675 int data_len, g;
9676 uint8_t proto;
9677
9678 softc = control_softc;
9679
9680 port = ctl_io_port(&ctsio->io_hdr);
9681 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
9682
9683 data_len = sizeof(struct scsi_vpd_device_id) +
9684 sizeof(struct scsi_vpd_id_descriptor) +
9685 sizeof(struct scsi_vpd_id_rel_trgt_port_id) +
9686 sizeof(struct scsi_vpd_id_descriptor) +
9687 sizeof(struct scsi_vpd_id_trgt_port_grp_id);
9688 if (lun && lun->lun_devid)
9689 data_len += lun->lun_devid->len;
9690 if (port && port->port_devid)
9691 data_len += port->port_devid->len;
9692 if (port && port->target_devid)
9693 data_len += port->target_devid->len;
9694
9695 ctsio->kern_data_ptr = malloc(data_len, M_CTL, M_WAITOK | M_ZERO);
9696 devid_ptr = (struct scsi_vpd_device_id *)ctsio->kern_data_ptr;
9697 ctsio->kern_sg_entries = 0;
9698
9699 if (data_len < alloc_len) {
9700 ctsio->residual = alloc_len - data_len;
9701 ctsio->kern_data_len = data_len;
9702 ctsio->kern_total_len = data_len;
9703 } else {
9704 ctsio->residual = 0;
9705 ctsio->kern_data_len = alloc_len;
9706 ctsio->kern_total_len = alloc_len;
9707 }
9708 ctsio->kern_data_resid = 0;
9709 ctsio->kern_rel_offset = 0;
9710 ctsio->kern_sg_entries = 0;
9711
9712 /*
9713 * The control device is always connected. The disk device, on the
9714 * other hand, may not be online all the time.
9715 */
9716 if (lun != NULL)
9717 devid_ptr->device = (SID_QUAL_LU_CONNECTED << 5) |
9718 lun->be_lun->lun_type;
9719 else
9720 devid_ptr->device = (SID_QUAL_LU_OFFLINE << 5) | T_DIRECT;
9721 devid_ptr->page_code = SVPD_DEVICE_ID;
9722 scsi_ulto2b(data_len - 4, devid_ptr->length);
9723
9724 if (port && port->port_type == CTL_PORT_FC)
9725 proto = SCSI_PROTO_FC << 4;
9726 else if (port && port->port_type == CTL_PORT_ISCSI)
9727 proto = SCSI_PROTO_ISCSI << 4;
9728 else
9729 proto = SCSI_PROTO_SPI << 4;
9730 desc = (struct scsi_vpd_id_descriptor *)devid_ptr->desc_list;
9731
9732 /*
9733 * We're using a LUN association here. i.e., this device ID is a
9734 * per-LUN identifier.
9735 */
9736 if (lun && lun->lun_devid) {
9737 memcpy(desc, lun->lun_devid->data, lun->lun_devid->len);
9738 desc = (struct scsi_vpd_id_descriptor *)((uint8_t *)desc +
9739 lun->lun_devid->len);
9740 }
9741
9742 /*
9743 * This is for the WWPN which is a port association.
9744 */
9745 if (port && port->port_devid) {
9746 memcpy(desc, port->port_devid->data, port->port_devid->len);
9747 desc = (struct scsi_vpd_id_descriptor *)((uint8_t *)desc +
9748 port->port_devid->len);
9749 }
9750
9751 /*
9752 * This is for the Relative Target Port(type 4h) identifier
9753 */
9754 desc->proto_codeset = proto | SVPD_ID_CODESET_BINARY;
9755 desc->id_type = SVPD_ID_PIV | SVPD_ID_ASSOC_PORT |
9756 SVPD_ID_TYPE_RELTARG;
9757 desc->length = 4;
9758 scsi_ulto2b(ctsio->io_hdr.nexus.targ_port, &desc->identifier[2]);
9759 desc = (struct scsi_vpd_id_descriptor *)(&desc->identifier[0] +
9760 sizeof(struct scsi_vpd_id_rel_trgt_port_id));
9761
9762 /*
9763 * This is for the Target Port Group(type 5h) identifier
9764 */
9765 desc->proto_codeset = proto | SVPD_ID_CODESET_BINARY;
9766 desc->id_type = SVPD_ID_PIV | SVPD_ID_ASSOC_PORT |
9767 SVPD_ID_TYPE_TPORTGRP;
9768 desc->length = 4;
9769 if (softc->is_single ||
9770 (port && port->status & CTL_PORT_STATUS_HA_SHARED))
9771 g = 1;
9772 else
9773 g = 2 + ctsio->io_hdr.nexus.targ_port / softc->port_cnt;
9774 scsi_ulto2b(g, &desc->identifier[2]);
9775 desc = (struct scsi_vpd_id_descriptor *)(&desc->identifier[0] +
9776 sizeof(struct scsi_vpd_id_trgt_port_grp_id));
9777
9778 /*
9779 * This is for the Target identifier
9780 */
9781 if (port && port->target_devid) {
9782 memcpy(desc, port->target_devid->data, port->target_devid->len);
9783 }
9784
9785 ctl_set_success(ctsio);
9786 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
9787 ctsio->be_move_done = ctl_config_move_done;
9788 ctl_datamove((union ctl_io *)ctsio);
9789 return (CTL_RETVAL_COMPLETE);
9790}
9791
9792static int
9793ctl_inquiry_evpd_scsi_ports(struct ctl_scsiio *ctsio, int alloc_len)
9794{
9795 struct ctl_softc *softc = control_softc;
9796 struct scsi_vpd_scsi_ports *sp;
9797 struct scsi_vpd_port_designation *pd;
9798 struct scsi_vpd_port_designation_cont *pdc;
9799 struct ctl_lun *lun;
9800 struct ctl_port *port;
9801 int data_len, num_target_ports, iid_len, id_len;
9802
9803 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
9804
9805 num_target_ports = 0;
9806 iid_len = 0;
9807 id_len = 0;
9808 mtx_lock(&softc->ctl_lock);
9809 STAILQ_FOREACH(port, &softc->port_list, links) {
9810 if ((port->status & CTL_PORT_STATUS_ONLINE) == 0)
9811 continue;
9812 if (lun != NULL &&
9813 ctl_lun_map_to_port(port, lun->lun) == UINT32_MAX)
9814 continue;
9815 num_target_ports++;
9816 if (port->init_devid)
9817 iid_len += port->init_devid->len;
9818 if (port->port_devid)
9819 id_len += port->port_devid->len;
9820 }
9821 mtx_unlock(&softc->ctl_lock);
9822
9823 data_len = sizeof(struct scsi_vpd_scsi_ports) +
9824 num_target_ports * (sizeof(struct scsi_vpd_port_designation) +
9825 sizeof(struct scsi_vpd_port_designation_cont)) + iid_len + id_len;
9826 ctsio->kern_data_ptr = malloc(data_len, M_CTL, M_WAITOK | M_ZERO);
9827 sp = (struct scsi_vpd_scsi_ports *)ctsio->kern_data_ptr;
9828 ctsio->kern_sg_entries = 0;
9829
9830 if (data_len < alloc_len) {
9831 ctsio->residual = alloc_len - data_len;
9832 ctsio->kern_data_len = data_len;
9833 ctsio->kern_total_len = data_len;
9834 } else {
9835 ctsio->residual = 0;
9836 ctsio->kern_data_len = alloc_len;
9837 ctsio->kern_total_len = alloc_len;
9838 }
9839 ctsio->kern_data_resid = 0;
9840 ctsio->kern_rel_offset = 0;
9841 ctsio->kern_sg_entries = 0;
9842
9843 /*
9844 * The control device is always connected. The disk device, on the
9845 * other hand, may not be online all the time. Need to change this
9846 * to figure out whether the disk device is actually online or not.
9847 */
9848 if (lun != NULL)
9849 sp->device = (SID_QUAL_LU_CONNECTED << 5) |
9850 lun->be_lun->lun_type;
9851 else
9852 sp->device = (SID_QUAL_LU_OFFLINE << 5) | T_DIRECT;
9853
9854 sp->page_code = SVPD_SCSI_PORTS;
9855 scsi_ulto2b(data_len - sizeof(struct scsi_vpd_scsi_ports),
9856 sp->page_length);
9857 pd = &sp->design[0];
9858
9859 mtx_lock(&softc->ctl_lock);
9860 STAILQ_FOREACH(port, &softc->port_list, links) {
9861 if ((port->status & CTL_PORT_STATUS_ONLINE) == 0)
9862 continue;
9863 if (lun != NULL &&
9864 ctl_lun_map_to_port(port, lun->lun) == UINT32_MAX)
9865 continue;
9866 scsi_ulto2b(port->targ_port, pd->relative_port_id);
9867 if (port->init_devid) {
9868 iid_len = port->init_devid->len;
9869 memcpy(pd->initiator_transportid,
9870 port->init_devid->data, port->init_devid->len);
9871 } else
9872 iid_len = 0;
9873 scsi_ulto2b(iid_len, pd->initiator_transportid_length);
9874 pdc = (struct scsi_vpd_port_designation_cont *)
9875 (&pd->initiator_transportid[iid_len]);
9876 if (port->port_devid) {
9877 id_len = port->port_devid->len;
9878 memcpy(pdc->target_port_descriptors,
9879 port->port_devid->data, port->port_devid->len);
9880 } else
9881 id_len = 0;
9882 scsi_ulto2b(id_len, pdc->target_port_descriptors_length);
9883 pd = (struct scsi_vpd_port_designation *)
9884 ((uint8_t *)pdc->target_port_descriptors + id_len);
9885 }
9886 mtx_unlock(&softc->ctl_lock);
9887
9888 ctl_set_success(ctsio);
9889 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
9890 ctsio->be_move_done = ctl_config_move_done;
9891 ctl_datamove((union ctl_io *)ctsio);
9892 return (CTL_RETVAL_COMPLETE);
9893}
9894
9895static int
9896ctl_inquiry_evpd_block_limits(struct ctl_scsiio *ctsio, int alloc_len)
9897{
9898 struct scsi_vpd_block_limits *bl_ptr;
9899 struct ctl_lun *lun;
9900 uint64_t ival;
9901
9902 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
9903
9904 ctsio->kern_data_ptr = malloc(sizeof(*bl_ptr), M_CTL, M_WAITOK | M_ZERO);
9905 bl_ptr = (struct scsi_vpd_block_limits *)ctsio->kern_data_ptr;
9906 ctsio->kern_sg_entries = 0;
9907
9908 if (sizeof(*bl_ptr) < alloc_len) {
9909 ctsio->residual = alloc_len - sizeof(*bl_ptr);
9910 ctsio->kern_data_len = sizeof(*bl_ptr);
9911 ctsio->kern_total_len = sizeof(*bl_ptr);
9912 } else {
9913 ctsio->residual = 0;
9914 ctsio->kern_data_len = alloc_len;
9915 ctsio->kern_total_len = alloc_len;
9916 }
9917 ctsio->kern_data_resid = 0;
9918 ctsio->kern_rel_offset = 0;
9919 ctsio->kern_sg_entries = 0;
9920
9921 /*
9922 * The control device is always connected. The disk device, on the
9923 * other hand, may not be online all the time. Need to change this
9924 * to figure out whether the disk device is actually online or not.
9925 */
9926 if (lun != NULL)
9927 bl_ptr->device = (SID_QUAL_LU_CONNECTED << 5) |
9928 lun->be_lun->lun_type;
9929 else
9930 bl_ptr->device = (SID_QUAL_LU_OFFLINE << 5) | T_DIRECT;
9931
9932 bl_ptr->page_code = SVPD_BLOCK_LIMITS;
9933 scsi_ulto2b(sizeof(*bl_ptr) - 4, bl_ptr->page_length);
9934 bl_ptr->max_cmp_write_len = 0xff;
9935 scsi_ulto4b(0xffffffff, bl_ptr->max_txfer_len);
9936 if (lun != NULL) {
9937 scsi_ulto4b(lun->be_lun->opttxferlen, bl_ptr->opt_txfer_len);
9938 if (lun->be_lun->flags & CTL_LUN_FLAG_UNMAP) {
9939 ival = 0xffffffff;
9940 ctl_get_opt_number(&lun->be_lun->options,
9941 "unmap_max_lba", &ival);
9942 scsi_ulto4b(ival, bl_ptr->max_unmap_lba_cnt);
9943 ival = 0xffffffff;
9944 ctl_get_opt_number(&lun->be_lun->options,
9945 "unmap_max_descr", &ival);
9946 scsi_ulto4b(ival, bl_ptr->max_unmap_blk_cnt);
9947 if (lun->be_lun->ublockexp != 0) {
9948 scsi_ulto4b((1 << lun->be_lun->ublockexp),
9949 bl_ptr->opt_unmap_grain);
9950 scsi_ulto4b(0x80000000 | lun->be_lun->ublockoff,
9951 bl_ptr->unmap_grain_align);
9952 }
9953 }
9954 scsi_ulto4b(lun->be_lun->atomicblock,
9955 bl_ptr->max_atomic_transfer_length);
9956 scsi_ulto4b(0, bl_ptr->atomic_alignment);
9957 scsi_ulto4b(0, bl_ptr->atomic_transfer_length_granularity);
9958 scsi_ulto4b(0, bl_ptr->max_atomic_transfer_length_with_atomic_boundary);
9959 scsi_ulto4b(0, bl_ptr->max_atomic_boundary_size);
9960 ival = UINT64_MAX;
9961 ctl_get_opt_number(&lun->be_lun->options, "write_same_max_lba", &ival);
9962 scsi_u64to8b(ival, bl_ptr->max_write_same_length);
9963 }
9964
9965 ctl_set_success(ctsio);
9966 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
9967 ctsio->be_move_done = ctl_config_move_done;
9968 ctl_datamove((union ctl_io *)ctsio);
9969 return (CTL_RETVAL_COMPLETE);
9970}
9971
9972static int
9973ctl_inquiry_evpd_bdc(struct ctl_scsiio *ctsio, int alloc_len)
9974{
9975 struct scsi_vpd_block_device_characteristics *bdc_ptr;
9976 struct ctl_lun *lun;
9977 const char *value;
9978 u_int i;
9979
9980 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
9981
9982 ctsio->kern_data_ptr = malloc(sizeof(*bdc_ptr), M_CTL, M_WAITOK | M_ZERO);
9983 bdc_ptr = (struct scsi_vpd_block_device_characteristics *)ctsio->kern_data_ptr;
9984 ctsio->kern_sg_entries = 0;
9985
9986 if (sizeof(*bdc_ptr) < alloc_len) {
9987 ctsio->residual = alloc_len - sizeof(*bdc_ptr);
9988 ctsio->kern_data_len = sizeof(*bdc_ptr);
9989 ctsio->kern_total_len = sizeof(*bdc_ptr);
9990 } else {
9991 ctsio->residual = 0;
9992 ctsio->kern_data_len = alloc_len;
9993 ctsio->kern_total_len = alloc_len;
9994 }
9995 ctsio->kern_data_resid = 0;
9996 ctsio->kern_rel_offset = 0;
9997 ctsio->kern_sg_entries = 0;
9998
9999 /*
10000 * The control device is always connected. The disk device, on the
10001 * other hand, may not be online all the time. Need to change this
10002 * to figure out whether the disk device is actually online or not.
10003 */
10004 if (lun != NULL)
10005 bdc_ptr->device = (SID_QUAL_LU_CONNECTED << 5) |
10006 lun->be_lun->lun_type;
10007 else
10008 bdc_ptr->device = (SID_QUAL_LU_OFFLINE << 5) | T_DIRECT;
10009 bdc_ptr->page_code = SVPD_BDC;
10010 scsi_ulto2b(sizeof(*bdc_ptr) - 4, bdc_ptr->page_length);
10011 if (lun != NULL &&
10012 (value = ctl_get_opt(&lun->be_lun->options, "rpm")) != NULL)
10013 i = strtol(value, NULL, 0);
10014 else
10015 i = CTL_DEFAULT_ROTATION_RATE;
10016 scsi_ulto2b(i, bdc_ptr->medium_rotation_rate);
10017 if (lun != NULL &&
10018 (value = ctl_get_opt(&lun->be_lun->options, "formfactor")) != NULL)
10019 i = strtol(value, NULL, 0);
10020 else
10021 i = 0;
10022 bdc_ptr->wab_wac_ff = (i & 0x0f);
10023 bdc_ptr->flags = SVPD_FUAB | SVPD_VBULS;
10024
10025 ctl_set_success(ctsio);
10026 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
10027 ctsio->be_move_done = ctl_config_move_done;
10028 ctl_datamove((union ctl_io *)ctsio);
10029 return (CTL_RETVAL_COMPLETE);
10030}
10031
10032static int
10033ctl_inquiry_evpd_lbp(struct ctl_scsiio *ctsio, int alloc_len)
10034{
10035 struct scsi_vpd_logical_block_prov *lbp_ptr;
10036 struct ctl_lun *lun;
10037 const char *value;
10038
10039 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
10040
10041 ctsio->kern_data_ptr = malloc(sizeof(*lbp_ptr), M_CTL, M_WAITOK | M_ZERO);
10042 lbp_ptr = (struct scsi_vpd_logical_block_prov *)ctsio->kern_data_ptr;
10043 ctsio->kern_sg_entries = 0;
10044
10045 if (sizeof(*lbp_ptr) < alloc_len) {
10046 ctsio->residual = alloc_len - sizeof(*lbp_ptr);
10047 ctsio->kern_data_len = sizeof(*lbp_ptr);
10048 ctsio->kern_total_len = sizeof(*lbp_ptr);
10049 } else {
10050 ctsio->residual = 0;
10051 ctsio->kern_data_len = alloc_len;
10052 ctsio->kern_total_len = alloc_len;
10053 }
10054 ctsio->kern_data_resid = 0;
10055 ctsio->kern_rel_offset = 0;
10056 ctsio->kern_sg_entries = 0;
10057
10058 /*
10059 * The control device is always connected. The disk device, on the
10060 * other hand, may not be online all the time. Need to change this
10061 * to figure out whether the disk device is actually online or not.
10062 */
10063 if (lun != NULL)
10064 lbp_ptr->device = (SID_QUAL_LU_CONNECTED << 5) |
10065 lun->be_lun->lun_type;
10066 else
10067 lbp_ptr->device = (SID_QUAL_LU_OFFLINE << 5) | T_DIRECT;
10068
10069 lbp_ptr->page_code = SVPD_LBP;
10070 scsi_ulto2b(sizeof(*lbp_ptr) - 4, lbp_ptr->page_length);
10071 lbp_ptr->threshold_exponent = CTL_LBP_EXPONENT;
10072 if (lun != NULL && lun->be_lun->flags & CTL_LUN_FLAG_UNMAP) {
10073 lbp_ptr->flags = SVPD_LBP_UNMAP | SVPD_LBP_WS16 |
10074 SVPD_LBP_WS10 | SVPD_LBP_RZ | SVPD_LBP_ANC_SUP;
10075 value = ctl_get_opt(&lun->be_lun->options, "provisioning_type");
10076 if (value != NULL) {
10077 if (strcmp(value, "resource") == 0)
10078 lbp_ptr->prov_type = SVPD_LBP_RESOURCE;
10079 else if (strcmp(value, "thin") == 0)
10080 lbp_ptr->prov_type = SVPD_LBP_THIN;
10081 } else
10082 lbp_ptr->prov_type = SVPD_LBP_THIN;
10083 }
10084
10085 ctl_set_success(ctsio);
10086 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
10087 ctsio->be_move_done = ctl_config_move_done;
10088 ctl_datamove((union ctl_io *)ctsio);
10089 return (CTL_RETVAL_COMPLETE);
10090}
10091
10092/*
10093 * INQUIRY with the EVPD bit set.
10094 */
10095static int
10096ctl_inquiry_evpd(struct ctl_scsiio *ctsio)
10097{
10098 struct ctl_lun *lun;
10099 struct scsi_inquiry *cdb;
10100 int alloc_len, retval;
10101
10102 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
10103 cdb = (struct scsi_inquiry *)ctsio->cdb;
10104 alloc_len = scsi_2btoul(cdb->length);
10105
10106 switch (cdb->page_code) {
10107 case SVPD_SUPPORTED_PAGES:
10108 retval = ctl_inquiry_evpd_supported(ctsio, alloc_len);
10109 break;
10110 case SVPD_UNIT_SERIAL_NUMBER:
10111 retval = ctl_inquiry_evpd_serial(ctsio, alloc_len);
10112 break;
10113 case SVPD_DEVICE_ID:
10114 retval = ctl_inquiry_evpd_devid(ctsio, alloc_len);
10115 break;
10116 case SVPD_EXTENDED_INQUIRY_DATA:
10117 retval = ctl_inquiry_evpd_eid(ctsio, alloc_len);
10118 break;
10119 case SVPD_MODE_PAGE_POLICY:
10120 retval = ctl_inquiry_evpd_mpp(ctsio, alloc_len);
10121 break;
10122 case SVPD_SCSI_PORTS:
10123 retval = ctl_inquiry_evpd_scsi_ports(ctsio, alloc_len);
10124 break;
10125 case SVPD_SCSI_TPC:
10126 retval = ctl_inquiry_evpd_tpc(ctsio, alloc_len);
10127 break;
10128 case SVPD_BLOCK_LIMITS:
10129 if (lun == NULL || lun->be_lun->lun_type != T_DIRECT)
10130 goto err;
10131 retval = ctl_inquiry_evpd_block_limits(ctsio, alloc_len);
10132 break;
10133 case SVPD_BDC:
10134 if (lun == NULL || lun->be_lun->lun_type != T_DIRECT)
10135 goto err;
10136 retval = ctl_inquiry_evpd_bdc(ctsio, alloc_len);
10137 break;
10138 case SVPD_LBP:
10139 if (lun == NULL || lun->be_lun->lun_type != T_DIRECT)
10140 goto err;
10141 retval = ctl_inquiry_evpd_lbp(ctsio, alloc_len);
10142 break;
10143 default:
10144err:
10145 ctl_set_invalid_field(ctsio,
10146 /*sks_valid*/ 1,
10147 /*command*/ 1,
10148 /*field*/ 2,
10149 /*bit_valid*/ 0,
10150 /*bit*/ 0);
10151 ctl_done((union ctl_io *)ctsio);
10152 retval = CTL_RETVAL_COMPLETE;
10153 break;
10154 }
10155
10156 return (retval);
10157}
10158
10159/*
10160 * Standard INQUIRY data.
10161 */
10162static int
10163ctl_inquiry_std(struct ctl_scsiio *ctsio)
10164{
10165 struct scsi_inquiry_data *inq_ptr;
10166 struct scsi_inquiry *cdb;
10167 struct ctl_softc *softc = control_softc;
10168 struct ctl_port *port;
10169 struct ctl_lun *lun;
10170 char *val;
10171 uint32_t alloc_len, data_len;
10172 ctl_port_type port_type;
10173
10174 port = ctl_io_port(&ctsio->io_hdr);
10175 port_type = port->port_type;
10176 if (port_type == CTL_PORT_IOCTL || port_type == CTL_PORT_INTERNAL)
10177 port_type = CTL_PORT_SCSI;
10178
10179 lun = ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
10180 cdb = (struct scsi_inquiry *)ctsio->cdb;
10181 alloc_len = scsi_2btoul(cdb->length);
10182
10183 /*
10184 * We malloc the full inquiry data size here and fill it
10185 * in. If the user only asks for less, we'll give him
10186 * that much.
10187 */
10188 data_len = offsetof(struct scsi_inquiry_data, vendor_specific1);
10189 ctsio->kern_data_ptr = malloc(data_len, M_CTL, M_WAITOK | M_ZERO);
10190 inq_ptr = (struct scsi_inquiry_data *)ctsio->kern_data_ptr;
10191 ctsio->kern_sg_entries = 0;
10192 ctsio->kern_data_resid = 0;
10193 ctsio->kern_rel_offset = 0;
10194
10195 if (data_len < alloc_len) {
10196 ctsio->residual = alloc_len - data_len;
10197 ctsio->kern_data_len = data_len;
10198 ctsio->kern_total_len = data_len;
10199 } else {
10200 ctsio->residual = 0;
10201 ctsio->kern_data_len = alloc_len;
10202 ctsio->kern_total_len = alloc_len;
10203 }
10204
10205 if (lun != NULL) {
10206 if ((lun->flags & CTL_LUN_PRIMARY_SC) ||
10207 softc->ha_link >= CTL_HA_LINK_UNKNOWN) {
10208 inq_ptr->device = (SID_QUAL_LU_CONNECTED << 5) |
10209 lun->be_lun->lun_type;
10210 } else {
10211 inq_ptr->device = (SID_QUAL_LU_OFFLINE << 5) |
10212 lun->be_lun->lun_type;
10213 }
10214 if (lun->flags & CTL_LUN_REMOVABLE)
10215 inq_ptr->dev_qual2 |= SID_RMB;
10216 } else
10217 inq_ptr->device = (SID_QUAL_BAD_LU << 5) | T_NODEVICE;
10218
10219 /* RMB in byte 2 is 0 */
10220 inq_ptr->version = SCSI_REV_SPC5;
10221
10222 /*
10223 * According to SAM-3, even if a device only supports a single
10224 * level of LUN addressing, it should still set the HISUP bit:
10225 *
10226 * 4.9.1 Logical unit numbers overview
10227 *
10228 * All logical unit number formats described in this standard are
10229 * hierarchical in structure even when only a single level in that
10230 * hierarchy is used. The HISUP bit shall be set to one in the
10231 * standard INQUIRY data (see SPC-2) when any logical unit number
10232 * format described in this standard is used. Non-hierarchical
10233 * formats are outside the scope of this standard.
10234 *
10235 * Therefore we set the HiSup bit here.
10236 *
10237 * The response format is 2, per SPC-3.
10238 */
10239 inq_ptr->response_format = SID_HiSup | 2;
10240
10241 inq_ptr->additional_length = data_len -
10242 (offsetof(struct scsi_inquiry_data, additional_length) + 1);
10243 CTL_DEBUG_PRINT(("additional_length = %d\n",
10244 inq_ptr->additional_length));
10245
10246 inq_ptr->spc3_flags = SPC3_SID_3PC | SPC3_SID_TPGS_IMPLICIT;
10247 if (port_type == CTL_PORT_SCSI)
10248 inq_ptr->spc2_flags = SPC2_SID_ADDR16;
10249 inq_ptr->spc2_flags |= SPC2_SID_MultiP;
10250 inq_ptr->flags = SID_CmdQue;
10251 if (port_type == CTL_PORT_SCSI)
10252 inq_ptr->flags |= SID_WBus16 | SID_Sync;
10253
10254 /*
10255 * Per SPC-3, unused bytes in ASCII strings are filled with spaces.
10256 * We have 8 bytes for the vendor name, and 16 bytes for the device
10257 * name and 4 bytes for the revision.
10258 */
10259 if (lun == NULL || (val = ctl_get_opt(&lun->be_lun->options,
10260 "vendor")) == NULL) {
10261 strncpy(inq_ptr->vendor, CTL_VENDOR, sizeof(inq_ptr->vendor));
10262 } else {
10263 memset(inq_ptr->vendor, ' ', sizeof(inq_ptr->vendor));
10264 strncpy(inq_ptr->vendor, val,
10265 min(sizeof(inq_ptr->vendor), strlen(val)));
10266 }
10267 if (lun == NULL) {
10268 strncpy(inq_ptr->product, CTL_DIRECT_PRODUCT,
10269 sizeof(inq_ptr->product));
10270 } else if ((val = ctl_get_opt(&lun->be_lun->options, "product")) == NULL) {
10271 switch (lun->be_lun->lun_type) {
10272 case T_DIRECT:
10273 strncpy(inq_ptr->product, CTL_DIRECT_PRODUCT,
10274 sizeof(inq_ptr->product));
10275 break;
10276 case T_PROCESSOR:
10277 strncpy(inq_ptr->product, CTL_PROCESSOR_PRODUCT,
10278 sizeof(inq_ptr->product));
10279 break;
10280 case T_CDROM:
10281 strncpy(inq_ptr->product, CTL_CDROM_PRODUCT,
10282 sizeof(inq_ptr->product));
10283 break;
10284 default:
10285 strncpy(inq_ptr->product, CTL_UNKNOWN_PRODUCT,
10286 sizeof(inq_ptr->product));
10287 break;
10288 }
10289 } else {
10290 memset(inq_ptr->product, ' ', sizeof(inq_ptr->product));
10291 strncpy(inq_ptr->product, val,
10292 min(sizeof(inq_ptr->product), strlen(val)));
10293 }
10294
10295 /*
10296 * XXX make this a macro somewhere so it automatically gets
10297 * incremented when we make changes.
10298 */
10299 if (lun == NULL || (val = ctl_get_opt(&lun->be_lun->options,
10300 "revision")) == NULL) {
10301 strncpy(inq_ptr->revision, "0001", sizeof(inq_ptr->revision));
10302 } else {
10303 memset(inq_ptr->revision, ' ', sizeof(inq_ptr->revision));
10304 strncpy(inq_ptr->revision, val,
10305 min(sizeof(inq_ptr->revision), strlen(val)));
10306 }
10307
10308 /*
10309 * For parallel SCSI, we support double transition and single
10310 * transition clocking. We also support QAS (Quick Arbitration
10311 * and Selection) and Information Unit transfers on both the
10312 * control and array devices.
10313 */
10314 if (port_type == CTL_PORT_SCSI)
10315 inq_ptr->spi3data = SID_SPI_CLOCK_DT_ST | SID_SPI_QAS |
10316 SID_SPI_IUS;
10317
10318 /* SAM-6 (no version claimed) */
10319 scsi_ulto2b(0x00C0, inq_ptr->version1);
10320 /* SPC-5 (no version claimed) */
10321 scsi_ulto2b(0x05C0, inq_ptr->version2);
10322 if (port_type == CTL_PORT_FC) {
10323 /* FCP-2 ANSI INCITS.350:2003 */
10324 scsi_ulto2b(0x0917, inq_ptr->version3);
10325 } else if (port_type == CTL_PORT_SCSI) {
10326 /* SPI-4 ANSI INCITS.362:200x */
10327 scsi_ulto2b(0x0B56, inq_ptr->version3);
10328 } else if (port_type == CTL_PORT_ISCSI) {
10329 /* iSCSI (no version claimed) */
10330 scsi_ulto2b(0x0960, inq_ptr->version3);
10331 } else if (port_type == CTL_PORT_SAS) {
10332 /* SAS (no version claimed) */
10333 scsi_ulto2b(0x0BE0, inq_ptr->version3);
10334 }
10335
10336 if (lun == NULL) {
10337 /* SBC-4 (no version claimed) */
10338 scsi_ulto2b(0x0600, inq_ptr->version4);
10339 } else {
10340 switch (lun->be_lun->lun_type) {
10341 case T_DIRECT:
10342 /* SBC-4 (no version claimed) */
10343 scsi_ulto2b(0x0600, inq_ptr->version4);
10344 break;
10345 case T_PROCESSOR:
10346 break;
10347 case T_CDROM:
10348 /* MMC-6 (no version claimed) */
10349 scsi_ulto2b(0x04E0, inq_ptr->version4);
10350 break;
10351 default:
10352 break;
10353 }
10354 }
10355
10356 ctl_set_success(ctsio);
10357 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
10358 ctsio->be_move_done = ctl_config_move_done;
10359 ctl_datamove((union ctl_io *)ctsio);
10360 return (CTL_RETVAL_COMPLETE);
10361}
10362
10363int
10364ctl_inquiry(struct ctl_scsiio *ctsio)
10365{
10366 struct scsi_inquiry *cdb;
10367 int retval;
10368
10369 CTL_DEBUG_PRINT(("ctl_inquiry\n"));
10370
10371 cdb = (struct scsi_inquiry *)ctsio->cdb;
10372 if (cdb->byte2 & SI_EVPD)
10373 retval = ctl_inquiry_evpd(ctsio);
10374 else if (cdb->page_code == 0)
10375 retval = ctl_inquiry_std(ctsio);
10376 else {
10377 ctl_set_invalid_field(ctsio,
10378 /*sks_valid*/ 1,
10379 /*command*/ 1,
10380 /*field*/ 2,
10381 /*bit_valid*/ 0,
10382 /*bit*/ 0);
10383 ctl_done((union ctl_io *)ctsio);
10384 return (CTL_RETVAL_COMPLETE);
10385 }
10386
10387 return (retval);
10388}
10389
10390int
10391ctl_get_config(struct ctl_scsiio *ctsio)
10392{
10393 struct scsi_get_config_header *hdr;
10394 struct scsi_get_config_feature *feature;
10395 struct scsi_get_config *cdb;
10396 struct ctl_lun *lun;
10397 uint32_t alloc_len, data_len;
10398 int rt, starting;
10399
10400 lun = ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
10401 cdb = (struct scsi_get_config *)ctsio->cdb;
10402 rt = (cdb->rt & SGC_RT_MASK);
10403 starting = scsi_2btoul(cdb->starting_feature);
10404 alloc_len = scsi_2btoul(cdb->length);
10405
10406 data_len = sizeof(struct scsi_get_config_header) +
10407 sizeof(struct scsi_get_config_feature) + 8 +
10408 sizeof(struct scsi_get_config_feature) + 8 +
10409 sizeof(struct scsi_get_config_feature) + 4 +
10410 sizeof(struct scsi_get_config_feature) + 4 +
10411 sizeof(struct scsi_get_config_feature) + 8 +
10412 sizeof(struct scsi_get_config_feature) +
10413 sizeof(struct scsi_get_config_feature) + 4 +
10414 sizeof(struct scsi_get_config_feature) + 4 +
10415 sizeof(struct scsi_get_config_feature) + 4 +
10416 sizeof(struct scsi_get_config_feature) + 4 +
10417 sizeof(struct scsi_get_config_feature) + 4 +
10418 sizeof(struct scsi_get_config_feature) + 4;
10419 ctsio->kern_data_ptr = malloc(data_len, M_CTL, M_WAITOK | M_ZERO);
10420 ctsio->kern_sg_entries = 0;
10421 ctsio->kern_data_resid = 0;
10422 ctsio->kern_rel_offset = 0;
10423
10424 hdr = (struct scsi_get_config_header *)ctsio->kern_data_ptr;
10425 if (lun->flags & CTL_LUN_NO_MEDIA)
10426 scsi_ulto2b(0x0000, hdr->current_profile);
10427 else
10428 scsi_ulto2b(0x0010, hdr->current_profile);
10429 feature = (struct scsi_get_config_feature *)(hdr + 1);
10430
10431 if (starting > 0x003b)
10432 goto done;
10433 if (starting > 0x003a)
10434 goto f3b;
10435 if (starting > 0x002b)
10436 goto f3a;
10437 if (starting > 0x002a)
10438 goto f2b;
10439 if (starting > 0x001f)
10440 goto f2a;
10441 if (starting > 0x001e)
10442 goto f1f;
10443 if (starting > 0x001d)
10444 goto f1e;
10445 if (starting > 0x0010)
10446 goto f1d;
10447 if (starting > 0x0003)
10448 goto f10;
10449 if (starting > 0x0002)
10450 goto f3;
10451 if (starting > 0x0001)
10452 goto f2;
10453 if (starting > 0x0000)
10454 goto f1;
10455
10456 /* Profile List */
10457 scsi_ulto2b(0x0000, feature->feature_code);
10458 feature->flags = SGC_F_PERSISTENT | SGC_F_CURRENT;
10459 feature->add_length = 8;
10460 scsi_ulto2b(0x0008, &feature->feature_data[0]); /* CD-ROM */
10461 feature->feature_data[2] = 0x00;
10462 scsi_ulto2b(0x0010, &feature->feature_data[4]); /* DVD-ROM */
10463 feature->feature_data[6] = 0x01;
10464 feature = (struct scsi_get_config_feature *)
10465 &feature->feature_data[feature->add_length];
10466
10467f1: /* Core */
10468 scsi_ulto2b(0x0001, feature->feature_code);
10469 feature->flags = 0x08 | SGC_F_PERSISTENT | SGC_F_CURRENT;
10470 feature->add_length = 8;
10471 scsi_ulto4b(0x00000000, &feature->feature_data[0]);
10472 feature->feature_data[4] = 0x03;
10473 feature = (struct scsi_get_config_feature *)
10474 &feature->feature_data[feature->add_length];
10475
10476f2: /* Morphing */
10477 scsi_ulto2b(0x0002, feature->feature_code);
10478 feature->flags = 0x04 | SGC_F_PERSISTENT | SGC_F_CURRENT;
10479 feature->add_length = 4;
10480 feature->feature_data[0] = 0x02;
10481 feature = (struct scsi_get_config_feature *)
10482 &feature->feature_data[feature->add_length];
10483
10484f3: /* Removable Medium */
10485 scsi_ulto2b(0x0003, feature->feature_code);
10486 feature->flags = 0x04 | SGC_F_PERSISTENT | SGC_F_CURRENT;
10487 feature->add_length = 4;
10488 feature->feature_data[0] = 0x39;
10489 feature = (struct scsi_get_config_feature *)
10490 &feature->feature_data[feature->add_length];
10491
10492 if (rt == SGC_RT_CURRENT && (lun->flags & CTL_LUN_NO_MEDIA))
10493 goto done;
10494
10495f10: /* Random Read */
10496 scsi_ulto2b(0x0010, feature->feature_code);
10497 feature->flags = 0x00;
10498 if ((lun->flags & CTL_LUN_NO_MEDIA) == 0)
10499 feature->flags |= SGC_F_CURRENT;
10500 feature->add_length = 8;
10501 scsi_ulto4b(lun->be_lun->blocksize, &feature->feature_data[0]);
10502 scsi_ulto2b(1, &feature->feature_data[4]);
10503 feature->feature_data[6] = 0x00;
10504 feature = (struct scsi_get_config_feature *)
10505 &feature->feature_data[feature->add_length];
10506
10507f1d: /* Multi-Read */
10508 scsi_ulto2b(0x001D, feature->feature_code);
10509 feature->flags = 0x00;
10510 if ((lun->flags & CTL_LUN_NO_MEDIA) == 0)
10511 feature->flags |= SGC_F_CURRENT;
10512 feature->add_length = 0;
10513 feature = (struct scsi_get_config_feature *)
10514 &feature->feature_data[feature->add_length];
10515
10516f1e: /* CD Read */
10517 scsi_ulto2b(0x001E, feature->feature_code);
10518 feature->flags = 0x00;
10519 if ((lun->flags & CTL_LUN_NO_MEDIA) == 0)
10520 feature->flags |= SGC_F_CURRENT;
10521 feature->add_length = 4;
10522 feature->feature_data[0] = 0x00;
10523 feature = (struct scsi_get_config_feature *)
10524 &feature->feature_data[feature->add_length];
10525
10526f1f: /* DVD Read */
10527 scsi_ulto2b(0x001F, feature->feature_code);
10528 feature->flags = 0x08;
10529 if ((lun->flags & CTL_LUN_NO_MEDIA) == 0)
10530 feature->flags |= SGC_F_CURRENT;
10531 feature->add_length = 4;
10532 feature->feature_data[0] = 0x01;
10533 feature->feature_data[2] = 0x03;
10534 feature = (struct scsi_get_config_feature *)
10535 &feature->feature_data[feature->add_length];
10536
10537f2a: /* DVD+RW */
10538 scsi_ulto2b(0x002A, feature->feature_code);
10539 feature->flags = 0x04;
10540 if ((lun->flags & CTL_LUN_NO_MEDIA) == 0)
10541 feature->flags |= SGC_F_CURRENT;
10542 feature->add_length = 4;
10543 feature->feature_data[0] = 0x00;
10544 feature->feature_data[1] = 0x00;
10545 feature = (struct scsi_get_config_feature *)
10546 &feature->feature_data[feature->add_length];
10547
10548f2b: /* DVD+R */
10549 scsi_ulto2b(0x002B, feature->feature_code);
10550 feature->flags = 0x00;
10551 if ((lun->flags & CTL_LUN_NO_MEDIA) == 0)
10552 feature->flags |= SGC_F_CURRENT;
10553 feature->add_length = 4;
10554 feature->feature_data[0] = 0x00;
10555 feature = (struct scsi_get_config_feature *)
10556 &feature->feature_data[feature->add_length];
10557
10558f3a: /* DVD+RW Dual Layer */
10559 scsi_ulto2b(0x003A, feature->feature_code);
10560 feature->flags = 0x00;
10561 if ((lun->flags & CTL_LUN_NO_MEDIA) == 0)
10562 feature->flags |= SGC_F_CURRENT;
10563 feature->add_length = 4;
10564 feature->feature_data[0] = 0x00;
10565 feature->feature_data[1] = 0x00;
10566 feature = (struct scsi_get_config_feature *)
10567 &feature->feature_data[feature->add_length];
10568
10569f3b: /* DVD+R Dual Layer */
10570 scsi_ulto2b(0x003B, feature->feature_code);
10571 feature->flags = 0x00;
10572 if ((lun->flags & CTL_LUN_NO_MEDIA) == 0)
10573 feature->flags |= SGC_F_CURRENT;
10574 feature->add_length = 4;
10575 feature->feature_data[0] = 0x00;
10576 feature = (struct scsi_get_config_feature *)
10577 &feature->feature_data[feature->add_length];
10578
10579done:
10580 data_len = (uint8_t *)feature - (uint8_t *)hdr;
10581 if (rt == SGC_RT_SPECIFIC && data_len > 4) {
10582 feature = (struct scsi_get_config_feature *)(hdr + 1);
10583 if (scsi_2btoul(feature->feature_code) == starting)
10584 feature = (struct scsi_get_config_feature *)
10585 &feature->feature_data[feature->add_length];
10586 data_len = (uint8_t *)feature - (uint8_t *)hdr;
10587 }
10588 scsi_ulto4b(data_len - 4, hdr->data_length);
10589 if (data_len < alloc_len) {
10590 ctsio->residual = alloc_len - data_len;
10591 ctsio->kern_data_len = data_len;
10592 ctsio->kern_total_len = data_len;
10593 } else {
10594 ctsio->residual = 0;
10595 ctsio->kern_data_len = alloc_len;
10596 ctsio->kern_total_len = alloc_len;
10597 }
10598
10599 ctl_set_success(ctsio);
10600 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
10601 ctsio->be_move_done = ctl_config_move_done;
10602 ctl_datamove((union ctl_io *)ctsio);
10603 return (CTL_RETVAL_COMPLETE);
10604}
10605
10606int
10607ctl_get_event_status(struct ctl_scsiio *ctsio)
10608{
10609 struct scsi_get_event_status_header *hdr;
10610 struct scsi_get_event_status *cdb;
10611 struct ctl_lun *lun;
10612 uint32_t alloc_len, data_len;
10613 int notif_class;
10614
10615 lun = ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
10616 cdb = (struct scsi_get_event_status *)ctsio->cdb;
10617 if ((cdb->byte2 & SGESN_POLLED) == 0) {
10618 ctl_set_invalid_field(ctsio, /*sks_valid*/ 1, /*command*/ 1,
10619 /*field*/ 1, /*bit_valid*/ 1, /*bit*/ 0);
10620 ctl_done((union ctl_io *)ctsio);
10621 return (CTL_RETVAL_COMPLETE);
10622 }
10623 notif_class = cdb->notif_class;
10624 alloc_len = scsi_2btoul(cdb->length);
10625
10626 data_len = sizeof(struct scsi_get_event_status_header);
10627 ctsio->kern_data_ptr = malloc(data_len, M_CTL, M_WAITOK | M_ZERO);
10628 ctsio->kern_sg_entries = 0;
10629 ctsio->kern_data_resid = 0;
10630 ctsio->kern_rel_offset = 0;
10631
10632 if (data_len < alloc_len) {
10633 ctsio->residual = alloc_len - data_len;
10634 ctsio->kern_data_len = data_len;
10635 ctsio->kern_total_len = data_len;
10636 } else {
10637 ctsio->residual = 0;
10638 ctsio->kern_data_len = alloc_len;
10639 ctsio->kern_total_len = alloc_len;
10640 }
10641
10642 hdr = (struct scsi_get_event_status_header *)ctsio->kern_data_ptr;
10643 scsi_ulto2b(0, hdr->descr_length);
10644 hdr->nea_class = SGESN_NEA;
10645 hdr->supported_class = 0;
10646
10647 ctl_set_success(ctsio);
10648 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
10649 ctsio->be_move_done = ctl_config_move_done;
10650 ctl_datamove((union ctl_io *)ctsio);
10651 return (CTL_RETVAL_COMPLETE);
10652}
10653
10654int
10655ctl_mechanism_status(struct ctl_scsiio *ctsio)
10656{
10657 struct scsi_mechanism_status_header *hdr;
10658 struct scsi_mechanism_status *cdb;
10659 struct ctl_lun *lun;
10660 uint32_t alloc_len, data_len;
10661
10662 lun = ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
10663 cdb = (struct scsi_mechanism_status *)ctsio->cdb;
10664 alloc_len = scsi_2btoul(cdb->length);
10665
10666 data_len = sizeof(struct scsi_mechanism_status_header);
10667 ctsio->kern_data_ptr = malloc(data_len, M_CTL, M_WAITOK | M_ZERO);
10668 ctsio->kern_sg_entries = 0;
10669 ctsio->kern_data_resid = 0;
10670 ctsio->kern_rel_offset = 0;
10671
10672 if (data_len < alloc_len) {
10673 ctsio->residual = alloc_len - data_len;
10674 ctsio->kern_data_len = data_len;
10675 ctsio->kern_total_len = data_len;
10676 } else {
10677 ctsio->residual = 0;
10678 ctsio->kern_data_len = alloc_len;
10679 ctsio->kern_total_len = alloc_len;
10680 }
10681
10682 hdr = (struct scsi_mechanism_status_header *)ctsio->kern_data_ptr;
10683 hdr->state1 = 0x00;
10684 hdr->state2 = 0xe0;
10685 scsi_ulto3b(0, hdr->lba);
10686 hdr->slots_num = 0;
10687 scsi_ulto2b(0, hdr->slots_length);
10688
10689 ctl_set_success(ctsio);
10690 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
10691 ctsio->be_move_done = ctl_config_move_done;
10692 ctl_datamove((union ctl_io *)ctsio);
10693 return (CTL_RETVAL_COMPLETE);
10694}
10695
10696static void
10697ctl_ultomsf(uint32_t lba, uint8_t *buf)
10698{
10699
10700 lba += 150;
10701 buf[0] = 0;
10702 buf[1] = bin2bcd((lba / 75) / 60);
10703 buf[2] = bin2bcd((lba / 75) % 60);
10704 buf[3] = bin2bcd(lba % 75);
10705}
10706
10707int
10708ctl_read_toc(struct ctl_scsiio *ctsio)
10709{
10710 struct scsi_read_toc_hdr *hdr;
10711 struct scsi_read_toc_type01_descr *descr;
10712 struct scsi_read_toc *cdb;
10713 struct ctl_lun *lun;
10714 uint32_t alloc_len, data_len;
10715 int format, msf;
10716
10717 lun = ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
10718 cdb = (struct scsi_read_toc *)ctsio->cdb;
10719 msf = (cdb->byte2 & CD_MSF) != 0;
10720 format = cdb->format;
10721 alloc_len = scsi_2btoul(cdb->data_len);
10722
10723 data_len = sizeof(struct scsi_read_toc_hdr);
10724 if (format == 0)
10725 data_len += 2 * sizeof(struct scsi_read_toc_type01_descr);
10726 else
10727 data_len += sizeof(struct scsi_read_toc_type01_descr);
10728 ctsio->kern_data_ptr = malloc(data_len, M_CTL, M_WAITOK | M_ZERO);
10729 ctsio->kern_sg_entries = 0;
10730 ctsio->kern_data_resid = 0;
10731 ctsio->kern_rel_offset = 0;
10732
10733 if (data_len < alloc_len) {
10734 ctsio->residual = alloc_len - data_len;
10735 ctsio->kern_data_len = data_len;
10736 ctsio->kern_total_len = data_len;
10737 } else {
10738 ctsio->residual = 0;
10739 ctsio->kern_data_len = alloc_len;
10740 ctsio->kern_total_len = alloc_len;
10741 }
10742
10743 hdr = (struct scsi_read_toc_hdr *)ctsio->kern_data_ptr;
10744 if (format == 0) {
10745 scsi_ulto2b(0x12, hdr->data_length);
10746 hdr->first = 1;
10747 hdr->last = 1;
10748 descr = (struct scsi_read_toc_type01_descr *)(hdr + 1);
10749 descr->addr_ctl = 0x14;
10750 descr->track_number = 1;
10751 if (msf)
10752 ctl_ultomsf(0, descr->track_start);
10753 else
10754 scsi_ulto4b(0, descr->track_start);
10755 descr++;
10756 descr->addr_ctl = 0x14;
10757 descr->track_number = 0xaa;
10758 if (msf)
10759 ctl_ultomsf(lun->be_lun->maxlba+1, descr->track_start);
10760 else
10761 scsi_ulto4b(lun->be_lun->maxlba+1, descr->track_start);
10762 } else {
10763 scsi_ulto2b(0x0a, hdr->data_length);
10764 hdr->first = 1;
10765 hdr->last = 1;
10766 descr = (struct scsi_read_toc_type01_descr *)(hdr + 1);
10767 descr->addr_ctl = 0x14;
10768 descr->track_number = 1;
10769 if (msf)
10770 ctl_ultomsf(0, descr->track_start);
10771 else
10772 scsi_ulto4b(0, descr->track_start);
10773 }
10774
10775 ctl_set_success(ctsio);
10776 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
10777 ctsio->be_move_done = ctl_config_move_done;
10778 ctl_datamove((union ctl_io *)ctsio);
10779 return (CTL_RETVAL_COMPLETE);
10780}
10781
10782/*
10783 * For known CDB types, parse the LBA and length.
10784 */
10785static int
10786ctl_get_lba_len(union ctl_io *io, uint64_t *lba, uint64_t *len)
10787{
10788 if (io->io_hdr.io_type != CTL_IO_SCSI)
10789 return (1);
10790
10791 switch (io->scsiio.cdb[0]) {
10792 case COMPARE_AND_WRITE: {
10793 struct scsi_compare_and_write *cdb;
10794
10795 cdb = (struct scsi_compare_and_write *)io->scsiio.cdb;
10796
10797 *lba = scsi_8btou64(cdb->addr);
10798 *len = cdb->length;
10799 break;
10800 }
10801 case READ_6:
10802 case WRITE_6: {
10803 struct scsi_rw_6 *cdb;
10804
10805 cdb = (struct scsi_rw_6 *)io->scsiio.cdb;
10806
10807 *lba = scsi_3btoul(cdb->addr);
10808 /* only 5 bits are valid in the most significant address byte */
10809 *lba &= 0x1fffff;
10810 *len = cdb->length;
10811 break;
10812 }
10813 case READ_10:
10814 case WRITE_10: {
10815 struct scsi_rw_10 *cdb;
10816
10817 cdb = (struct scsi_rw_10 *)io->scsiio.cdb;
10818
10819 *lba = scsi_4btoul(cdb->addr);
10820 *len = scsi_2btoul(cdb->length);
10821 break;
10822 }
10823 case WRITE_VERIFY_10: {
10824 struct scsi_write_verify_10 *cdb;
10825
10826 cdb = (struct scsi_write_verify_10 *)io->scsiio.cdb;
10827
10828 *lba = scsi_4btoul(cdb->addr);
10829 *len = scsi_2btoul(cdb->length);
10830 break;
10831 }
10832 case READ_12:
10833 case WRITE_12: {
10834 struct scsi_rw_12 *cdb;
10835
10836 cdb = (struct scsi_rw_12 *)io->scsiio.cdb;
10837
10838 *lba = scsi_4btoul(cdb->addr);
10839 *len = scsi_4btoul(cdb->length);
10840 break;
10841 }
10842 case WRITE_VERIFY_12: {
10843 struct scsi_write_verify_12 *cdb;
10844
10845 cdb = (struct scsi_write_verify_12 *)io->scsiio.cdb;
10846
10847 *lba = scsi_4btoul(cdb->addr);
10848 *len = scsi_4btoul(cdb->length);
10849 break;
10850 }
10851 case READ_16:
10852 case WRITE_16: {
10853 struct scsi_rw_16 *cdb;
10854
10855 cdb = (struct scsi_rw_16 *)io->scsiio.cdb;
10856
10857 *lba = scsi_8btou64(cdb->addr);
10858 *len = scsi_4btoul(cdb->length);
10859 break;
10860 }
10861 case WRITE_ATOMIC_16: {
10862 struct scsi_write_atomic_16 *cdb;
10863
10864 cdb = (struct scsi_write_atomic_16 *)io->scsiio.cdb;
10865
10866 *lba = scsi_8btou64(cdb->addr);
10867 *len = scsi_2btoul(cdb->length);
10868 break;
10869 }
10870 case WRITE_VERIFY_16: {
10871 struct scsi_write_verify_16 *cdb;
10872
10873 cdb = (struct scsi_write_verify_16 *)io->scsiio.cdb;
10874
10875 *lba = scsi_8btou64(cdb->addr);
10876 *len = scsi_4btoul(cdb->length);
10877 break;
10878 }
10879 case WRITE_SAME_10: {
10880 struct scsi_write_same_10 *cdb;
10881
10882 cdb = (struct scsi_write_same_10 *)io->scsiio.cdb;
10883
10884 *lba = scsi_4btoul(cdb->addr);
10885 *len = scsi_2btoul(cdb->length);
10886 break;
10887 }
10888 case WRITE_SAME_16: {
10889 struct scsi_write_same_16 *cdb;
10890
10891 cdb = (struct scsi_write_same_16 *)io->scsiio.cdb;
10892
10893 *lba = scsi_8btou64(cdb->addr);
10894 *len = scsi_4btoul(cdb->length);
10895 break;
10896 }
10897 case VERIFY_10: {
10898 struct scsi_verify_10 *cdb;
10899
10900 cdb = (struct scsi_verify_10 *)io->scsiio.cdb;
10901
10902 *lba = scsi_4btoul(cdb->addr);
10903 *len = scsi_2btoul(cdb->length);
10904 break;
10905 }
10906 case VERIFY_12: {
10907 struct scsi_verify_12 *cdb;
10908
10909 cdb = (struct scsi_verify_12 *)io->scsiio.cdb;
10910
10911 *lba = scsi_4btoul(cdb->addr);
10912 *len = scsi_4btoul(cdb->length);
10913 break;
10914 }
10915 case VERIFY_16: {
10916 struct scsi_verify_16 *cdb;
10917
10918 cdb = (struct scsi_verify_16 *)io->scsiio.cdb;
10919
10920 *lba = scsi_8btou64(cdb->addr);
10921 *len = scsi_4btoul(cdb->length);
10922 break;
10923 }
10924 case UNMAP: {
10925 *lba = 0;
10926 *len = UINT64_MAX;
10927 break;
10928 }
10929 case SERVICE_ACTION_IN: { /* GET LBA STATUS */
10930 struct scsi_get_lba_status *cdb;
10931
10932 cdb = (struct scsi_get_lba_status *)io->scsiio.cdb;
10933 *lba = scsi_8btou64(cdb->addr);
10934 *len = UINT32_MAX;
10935 break;
10936 }
10937 default:
10938 return (1);
10939 break; /* NOTREACHED */
10940 }
10941
10942 return (0);
10943}
10944
10945static ctl_action
10946ctl_extent_check_lba(uint64_t lba1, uint64_t len1, uint64_t lba2, uint64_t len2,
10947 bool seq)
10948{
10949 uint64_t endlba1, endlba2;
10950
10951 endlba1 = lba1 + len1 - (seq ? 0 : 1);
10952 endlba2 = lba2 + len2 - 1;
10953
10954 if ((endlba1 < lba2) || (endlba2 < lba1))
10955 return (CTL_ACTION_PASS);
10956 else
10957 return (CTL_ACTION_BLOCK);
10958}
10959
10960static int
10961ctl_extent_check_unmap(union ctl_io *io, uint64_t lba2, uint64_t len2)
10962{
10963 struct ctl_ptr_len_flags *ptrlen;
10964 struct scsi_unmap_desc *buf, *end, *range;
10965 uint64_t lba;
10966 uint32_t len;
10967
10968 /* If not UNMAP -- go other way. */
10969 if (io->io_hdr.io_type != CTL_IO_SCSI ||
10970 io->scsiio.cdb[0] != UNMAP)
10971 return (CTL_ACTION_ERROR);
10972
10973 /* If UNMAP without data -- block and wait for data. */
10974 ptrlen = (struct ctl_ptr_len_flags *)
10975 &io->io_hdr.ctl_private[CTL_PRIV_LBA_LEN];
10976 if ((io->io_hdr.flags & CTL_FLAG_ALLOCATED) == 0 ||
10977 ptrlen->ptr == NULL)
10978 return (CTL_ACTION_BLOCK);
10979
10980 /* UNMAP with data -- check for collision. */
10981 buf = (struct scsi_unmap_desc *)ptrlen->ptr;
10982 end = buf + ptrlen->len / sizeof(*buf);
10983 for (range = buf; range < end; range++) {
10984 lba = scsi_8btou64(range->lba);
10985 len = scsi_4btoul(range->length);
10986 if ((lba < lba2 + len2) && (lba + len > lba2))
10987 return (CTL_ACTION_BLOCK);
10988 }
10989 return (CTL_ACTION_PASS);
10990}
10991
10992static ctl_action
10993ctl_extent_check(union ctl_io *io1, union ctl_io *io2, bool seq)
10994{
10995 uint64_t lba1, lba2;
10996 uint64_t len1, len2;
10997 int retval;
10998
10999 if (ctl_get_lba_len(io2, &lba2, &len2) != 0)
11000 return (CTL_ACTION_ERROR);
11001
11002 retval = ctl_extent_check_unmap(io1, lba2, len2);
11003 if (retval != CTL_ACTION_ERROR)
11004 return (retval);
11005
11006 if (ctl_get_lba_len(io1, &lba1, &len1) != 0)
11007 return (CTL_ACTION_ERROR);
11008
11009 if (io1->io_hdr.flags & CTL_FLAG_SERSEQ_DONE)
11010 seq = FALSE;
11011 return (ctl_extent_check_lba(lba1, len1, lba2, len2, seq));
11012}
11013
11014static ctl_action
11015ctl_extent_check_seq(union ctl_io *io1, union ctl_io *io2)
11016{
11017 uint64_t lba1, lba2;
11018 uint64_t len1, len2;
11019
11020 if (io1->io_hdr.flags & CTL_FLAG_SERSEQ_DONE)
11021 return (CTL_ACTION_PASS);
11022 if (ctl_get_lba_len(io1, &lba1, &len1) != 0)
11023 return (CTL_ACTION_ERROR);
11024 if (ctl_get_lba_len(io2, &lba2, &len2) != 0)
11025 return (CTL_ACTION_ERROR);
11026
11027 if (lba1 + len1 == lba2)
11028 return (CTL_ACTION_BLOCK);
11029 return (CTL_ACTION_PASS);
11030}
11031
11032static ctl_action
11033ctl_check_for_blockage(struct ctl_lun *lun, union ctl_io *pending_io,
11034 union ctl_io *ooa_io)
11035{
11036 const struct ctl_cmd_entry *pending_entry, *ooa_entry;
11037 const ctl_serialize_action *serialize_row;
11038
11039 /*
11040 * The initiator attempted multiple untagged commands at the same
11041 * time. Can't do that.
11042 */
11043 if ((pending_io->scsiio.tag_type == CTL_TAG_UNTAGGED)
11044 && (ooa_io->scsiio.tag_type == CTL_TAG_UNTAGGED)
11045 && ((pending_io->io_hdr.nexus.targ_port ==
11046 ooa_io->io_hdr.nexus.targ_port)
11047 && (pending_io->io_hdr.nexus.initid ==
11048 ooa_io->io_hdr.nexus.initid))
11049 && ((ooa_io->io_hdr.flags & (CTL_FLAG_ABORT |
11050 CTL_FLAG_STATUS_SENT)) == 0))
11051 return (CTL_ACTION_OVERLAP);
11052
11053 /*
11054 * The initiator attempted to send multiple tagged commands with
11055 * the same ID. (It's fine if different initiators have the same
11056 * tag ID.)
11057 *
11058 * Even if all of those conditions are true, we don't kill the I/O
11059 * if the command ahead of us has been aborted. We won't end up
11060 * sending it to the FETD, and it's perfectly legal to resend a
11061 * command with the same tag number as long as the previous
11062 * instance of this tag number has been aborted somehow.
11063 */
11064 if ((pending_io->scsiio.tag_type != CTL_TAG_UNTAGGED)
11065 && (ooa_io->scsiio.tag_type != CTL_TAG_UNTAGGED)
11066 && (pending_io->scsiio.tag_num == ooa_io->scsiio.tag_num)
11067 && ((pending_io->io_hdr.nexus.targ_port ==
11068 ooa_io->io_hdr.nexus.targ_port)
11069 && (pending_io->io_hdr.nexus.initid ==
11070 ooa_io->io_hdr.nexus.initid))
11071 && ((ooa_io->io_hdr.flags & (CTL_FLAG_ABORT |
11072 CTL_FLAG_STATUS_SENT)) == 0))
11073 return (CTL_ACTION_OVERLAP_TAG);
11074
11075 /*
11076 * If we get a head of queue tag, SAM-3 says that we should
11077 * immediately execute it.
11078 *
11079 * What happens if this command would normally block for some other
11080 * reason? e.g. a request sense with a head of queue tag
11081 * immediately after a write. Normally that would block, but this
11082 * will result in its getting executed immediately...
11083 *
11084 * We currently return "pass" instead of "skip", so we'll end up
11085 * going through the rest of the queue to check for overlapped tags.
11086 *
11087 * XXX KDM check for other types of blockage first??
11088 */
11089 if (pending_io->scsiio.tag_type == CTL_TAG_HEAD_OF_QUEUE)
11090 return (CTL_ACTION_PASS);
11091
11092 /*
11093 * Ordered tags have to block until all items ahead of them
11094 * have completed. If we get called with an ordered tag, we always
11095 * block, if something else is ahead of us in the queue.
11096 */
11097 if (pending_io->scsiio.tag_type == CTL_TAG_ORDERED)
11098 return (CTL_ACTION_BLOCK);
11099
11100 /*
11101 * Simple tags get blocked until all head of queue and ordered tags
11102 * ahead of them have completed. I'm lumping untagged commands in
11103 * with simple tags here. XXX KDM is that the right thing to do?
11104 */
11105 if (((pending_io->scsiio.tag_type == CTL_TAG_UNTAGGED)
11106 || (pending_io->scsiio.tag_type == CTL_TAG_SIMPLE))
11107 && ((ooa_io->scsiio.tag_type == CTL_TAG_HEAD_OF_QUEUE)
11108 || (ooa_io->scsiio.tag_type == CTL_TAG_ORDERED)))
11109 return (CTL_ACTION_BLOCK);
11110
11111 pending_entry = ctl_get_cmd_entry(&pending_io->scsiio, NULL);
11112 KASSERT(pending_entry->seridx < CTL_SERIDX_COUNT,
11113 ("%s: Invalid seridx %d for pending CDB %02x %02x @ %p",
11114 __func__, pending_entry->seridx, pending_io->scsiio.cdb[0],
11115 pending_io->scsiio.cdb[1], pending_io));
11116 ooa_entry = ctl_get_cmd_entry(&ooa_io->scsiio, NULL);
11117 if (ooa_entry->seridx == CTL_SERIDX_INVLD)
11118 return (CTL_ACTION_PASS); /* Unsupported command in OOA queue */
11119 KASSERT(ooa_entry->seridx < CTL_SERIDX_COUNT,
11120 ("%s: Invalid seridx %d for ooa CDB %02x %02x @ %p",
11121 __func__, ooa_entry->seridx, ooa_io->scsiio.cdb[0],
11122 ooa_io->scsiio.cdb[1], ooa_io));
11123
11124 serialize_row = ctl_serialize_table[ooa_entry->seridx];
11125
11126 switch (serialize_row[pending_entry->seridx]) {
11127 case CTL_SER_BLOCK:
11128 return (CTL_ACTION_BLOCK);
11129 case CTL_SER_EXTENT:
11130 return (ctl_extent_check(ooa_io, pending_io,
11131 (lun->be_lun && lun->be_lun->serseq == CTL_LUN_SERSEQ_ON)));
11132 case CTL_SER_EXTENTOPT:
11133 if ((lun->MODE_CTRL.queue_flags & SCP_QUEUE_ALG_MASK) !=
11134 SCP_QUEUE_ALG_UNRESTRICTED)
11135 return (ctl_extent_check(ooa_io, pending_io,
11136 (lun->be_lun &&
11137 lun->be_lun->serseq == CTL_LUN_SERSEQ_ON)));
11138 return (CTL_ACTION_PASS);
11139 case CTL_SER_EXTENTSEQ:
11140 if (lun->be_lun && lun->be_lun->serseq != CTL_LUN_SERSEQ_OFF)
11141 return (ctl_extent_check_seq(ooa_io, pending_io));
11142 return (CTL_ACTION_PASS);
11143 case CTL_SER_PASS:
11144 return (CTL_ACTION_PASS);
11145 case CTL_SER_BLOCKOPT:
11146 if ((lun->MODE_CTRL.queue_flags & SCP_QUEUE_ALG_MASK) !=
11147 SCP_QUEUE_ALG_UNRESTRICTED)
11148 return (CTL_ACTION_BLOCK);
11149 return (CTL_ACTION_PASS);
11150 case CTL_SER_SKIP:
11151 return (CTL_ACTION_SKIP);
11152 default:
11153 panic("%s: Invalid serialization value %d for %d => %d",
11154 __func__, serialize_row[pending_entry->seridx],
11155 pending_entry->seridx, ooa_entry->seridx);
11156 }
11157
11158 return (CTL_ACTION_ERROR);
11159}
11160
11161/*
11162 * Check for blockage or overlaps against the OOA (Order Of Arrival) queue.
11163 * Assumptions:
11164 * - pending_io is generally either incoming, or on the blocked queue
11165 * - starting I/O is the I/O we want to start the check with.
11166 */
11167static ctl_action
11168ctl_check_ooa(struct ctl_lun *lun, union ctl_io *pending_io,
11169 union ctl_io *starting_io)
11170{
11171 union ctl_io *ooa_io;
11172 ctl_action action;
11173
11174 mtx_assert(&lun->lun_lock, MA_OWNED);
11175
11176 /*
11177 * Run back along the OOA queue, starting with the current
11178 * blocked I/O and going through every I/O before it on the
11179 * queue. If starting_io is NULL, we'll just end up returning
11180 * CTL_ACTION_PASS.
11181 */
11182 for (ooa_io = starting_io; ooa_io != NULL;
11183 ooa_io = (union ctl_io *)TAILQ_PREV(&ooa_io->io_hdr, ctl_ooaq,
11184 ooa_links)){
11185
11186 /*
11187 * This routine just checks to see whether
11188 * cur_blocked is blocked by ooa_io, which is ahead
11189 * of it in the queue. It doesn't queue/dequeue
11190 * cur_blocked.
11191 */
11192 action = ctl_check_for_blockage(lun, pending_io, ooa_io);
11193 switch (action) {
11194 case CTL_ACTION_BLOCK:
11195 case CTL_ACTION_OVERLAP:
11196 case CTL_ACTION_OVERLAP_TAG:
11197 case CTL_ACTION_SKIP:
11198 case CTL_ACTION_ERROR:
11199 return (action);
11200 break; /* NOTREACHED */
11201 case CTL_ACTION_PASS:
11202 break;
11203 default:
11204 panic("%s: Invalid action %d\n", __func__, action);
11205 }
11206 }
11207
11208 return (CTL_ACTION_PASS);
11209}
11210
11211/*
11212 * Assumptions:
11213 * - An I/O has just completed, and has been removed from the per-LUN OOA
11214 * queue, so some items on the blocked queue may now be unblocked.
11215 */
11216static int
11217ctl_check_blocked(struct ctl_lun *lun)
11218{
11219 struct ctl_softc *softc = lun->ctl_softc;
11220 union ctl_io *cur_blocked, *next_blocked;
11221
11222 mtx_assert(&lun->lun_lock, MA_OWNED);
11223
11224 /*
11225 * Run forward from the head of the blocked queue, checking each
11226 * entry against the I/Os prior to it on the OOA queue to see if
11227 * there is still any blockage.
11228 *
11229 * We cannot use the TAILQ_FOREACH() macro, because it can't deal
11230 * with our removing a variable on it while it is traversing the
11231 * list.
11232 */
11233 for (cur_blocked = (union ctl_io *)TAILQ_FIRST(&lun->blocked_queue);
11234 cur_blocked != NULL; cur_blocked = next_blocked) {
11235 union ctl_io *prev_ooa;
11236 ctl_action action;
11237
11238 next_blocked = (union ctl_io *)TAILQ_NEXT(&cur_blocked->io_hdr,
11239 blocked_links);
11240
11241 prev_ooa = (union ctl_io *)TAILQ_PREV(&cur_blocked->io_hdr,
11242 ctl_ooaq, ooa_links);
11243
11244 /*
11245 * If cur_blocked happens to be the first item in the OOA
11246 * queue now, prev_ooa will be NULL, and the action
11247 * returned will just be CTL_ACTION_PASS.
11248 */
11249 action = ctl_check_ooa(lun, cur_blocked, prev_ooa);
11250
11251 switch (action) {
11252 case CTL_ACTION_BLOCK:
11253 /* Nothing to do here, still blocked */
11254 break;
11255 case CTL_ACTION_OVERLAP:
11256 case CTL_ACTION_OVERLAP_TAG:
11257 /*
11258 * This shouldn't happen! In theory we've already
11259 * checked this command for overlap...
11260 */
11261 break;
11262 case CTL_ACTION_PASS:
11263 case CTL_ACTION_SKIP: {
11264 const struct ctl_cmd_entry *entry;
11265
11266 /*
11267 * The skip case shouldn't happen, this transaction
11268 * should have never made it onto the blocked queue.
11269 */
11270 /*
11271 * This I/O is no longer blocked, we can remove it
11272 * from the blocked queue. Since this is a TAILQ
11273 * (doubly linked list), we can do O(1) removals
11274 * from any place on the list.
11275 */
11276 TAILQ_REMOVE(&lun->blocked_queue, &cur_blocked->io_hdr,
11277 blocked_links);
11278 cur_blocked->io_hdr.flags &= ~CTL_FLAG_BLOCKED;
11279
11280 if ((softc->ha_mode != CTL_HA_MODE_XFER) &&
11281 (cur_blocked->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC)){
11282 /*
11283 * Need to send IO back to original side to
11284 * run
11285 */
11286 union ctl_ha_msg msg_info;
11287
11288 cur_blocked->io_hdr.flags &= ~CTL_FLAG_IO_ACTIVE;
11289 msg_info.hdr.original_sc =
11290 cur_blocked->io_hdr.original_sc;
11291 msg_info.hdr.serializing_sc = cur_blocked;
11292 msg_info.hdr.msg_type = CTL_MSG_R2R;
11293 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info,
11294 sizeof(msg_info.hdr), M_NOWAIT);
11295 break;
11296 }
11297 entry = ctl_get_cmd_entry(&cur_blocked->scsiio, NULL);
11298
11299 /*
11300 * Check this I/O for LUN state changes that may
11301 * have happened while this command was blocked.
11302 * The LUN state may have been changed by a command
11303 * ahead of us in the queue, so we need to re-check
11304 * for any states that can be caused by SCSI
11305 * commands.
11306 */
11307 if (ctl_scsiio_lun_check(lun, entry,
11308 &cur_blocked->scsiio) == 0) {
11309 cur_blocked->io_hdr.flags |=
11310 CTL_FLAG_IS_WAS_ON_RTR;
11311 ctl_enqueue_rtr(cur_blocked);
11312 } else
11313 ctl_done(cur_blocked);
11314 break;
11315 }
11316 default:
11317 /*
11318 * This probably shouldn't happen -- we shouldn't
11319 * get CTL_ACTION_ERROR, or anything else.
11320 */
11321 break;
11322 }
11323 }
11324
11325 return (CTL_RETVAL_COMPLETE);
11326}
11327
11328/*
11329 * This routine (with one exception) checks LUN flags that can be set by
11330 * commands ahead of us in the OOA queue. These flags have to be checked
11331 * when a command initially comes in, and when we pull a command off the
11332 * blocked queue and are preparing to execute it. The reason we have to
11333 * check these flags for commands on the blocked queue is that the LUN
11334 * state may have been changed by a command ahead of us while we're on the
11335 * blocked queue.
11336 *
11337 * Ordering is somewhat important with these checks, so please pay
11338 * careful attention to the placement of any new checks.
11339 */
11340static int
11341ctl_scsiio_lun_check(struct ctl_lun *lun,
11342 const struct ctl_cmd_entry *entry, struct ctl_scsiio *ctsio)
11343{
11344 struct ctl_softc *softc = lun->ctl_softc;
11345 int retval;
11346 uint32_t residx;
11347
11348 retval = 0;
11349
11350 mtx_assert(&lun->lun_lock, MA_OWNED);
11351
11352 /*
11353 * If this shelf is a secondary shelf controller, we may have to
11354 * reject some commands disallowed by HA mode and link state.
11355 */
11356 if ((lun->flags & CTL_LUN_PRIMARY_SC) == 0) {
11357 if (softc->ha_link == CTL_HA_LINK_OFFLINE &&
11358 (entry->flags & CTL_CMD_FLAG_OK_ON_UNAVAIL) == 0) {
11359 ctl_set_lun_unavail(ctsio);
11360 retval = 1;
11361 goto bailout;
11362 }
11363 if ((lun->flags & CTL_LUN_PEER_SC_PRIMARY) == 0 &&
11364 (entry->flags & CTL_CMD_FLAG_OK_ON_UNAVAIL) == 0) {
11365 ctl_set_lun_transit(ctsio);
11366 retval = 1;
11367 goto bailout;
11368 }
11369 if (softc->ha_mode == CTL_HA_MODE_ACT_STBY &&
11370 (entry->flags & CTL_CMD_FLAG_OK_ON_STANDBY) == 0) {
11371 ctl_set_lun_standby(ctsio);
11372 retval = 1;
11373 goto bailout;
11374 }
11375
11376 /* The rest of checks are only done on executing side */
11377 if (softc->ha_mode == CTL_HA_MODE_XFER)
11378 goto bailout;
11379 }
11380
11381 if (entry->pattern & CTL_LUN_PAT_WRITE) {
11382 if (lun->be_lun &&
11383 lun->be_lun->flags & CTL_LUN_FLAG_READONLY) {
11384 ctl_set_hw_write_protected(ctsio);
11385 retval = 1;
11386 goto bailout;
11387 }
11388 if ((lun->MODE_CTRL.eca_and_aen & SCP_SWP) != 0) {
11389 ctl_set_sense(ctsio, /*current_error*/ 1,
11390 /*sense_key*/ SSD_KEY_DATA_PROTECT,
11391 /*asc*/ 0x27, /*ascq*/ 0x02, SSD_ELEM_NONE);
11392 retval = 1;
11393 goto bailout;
11394 }
11395 }
11396
11397 /*
11398 * Check for a reservation conflict. If this command isn't allowed
11399 * even on reserved LUNs, and if this initiator isn't the one who
11400 * reserved us, reject the command with a reservation conflict.
11401 */
11402 residx = ctl_get_initindex(&ctsio->io_hdr.nexus);
11403 if ((lun->flags & CTL_LUN_RESERVED)
11404 && ((entry->flags & CTL_CMD_FLAG_ALLOW_ON_RESV) == 0)) {
11405 if (lun->res_idx != residx) {
11406 ctl_set_reservation_conflict(ctsio);
11407 retval = 1;
11408 goto bailout;
11409 }
11410 }
11411
11412 if ((lun->flags & CTL_LUN_PR_RESERVED) == 0 ||
11413 (entry->flags & CTL_CMD_FLAG_ALLOW_ON_PR_RESV)) {
11414 /* No reservation or command is allowed. */;
11415 } else if ((entry->flags & CTL_CMD_FLAG_ALLOW_ON_PR_WRESV) &&
11416 (lun->pr_res_type == SPR_TYPE_WR_EX ||
11417 lun->pr_res_type == SPR_TYPE_WR_EX_RO ||
11418 lun->pr_res_type == SPR_TYPE_WR_EX_AR)) {
11419 /* The command is allowed for Write Exclusive resv. */;
11420 } else {
11421 /*
11422 * if we aren't registered or it's a res holder type
11423 * reservation and this isn't the res holder then set a
11424 * conflict.
11425 */
11426 if (ctl_get_prkey(lun, residx) == 0 ||
11427 (residx != lun->pr_res_idx && lun->pr_res_type < 4)) {
11428 ctl_set_reservation_conflict(ctsio);
11429 retval = 1;
11430 goto bailout;
11431 }
11432 }
11433
11434 if ((entry->flags & CTL_CMD_FLAG_OK_ON_NO_MEDIA) == 0) {
11435 if (lun->flags & CTL_LUN_EJECTED)
11436 ctl_set_lun_ejected(ctsio);
11437 else if (lun->flags & CTL_LUN_NO_MEDIA) {
11438 if (lun->flags & CTL_LUN_REMOVABLE)
11439 ctl_set_lun_no_media(ctsio);
11440 else
11441 ctl_set_lun_int_reqd(ctsio);
11442 } else if (lun->flags & CTL_LUN_STOPPED)
11443 ctl_set_lun_stopped(ctsio);
11444 else
11445 goto bailout;
11446 retval = 1;
11447 goto bailout;
11448 }
11449
11450bailout:
11451 return (retval);
11452}
11453
11454static void
11455ctl_failover_io(union ctl_io *io, int have_lock)
11456{
11457 ctl_set_busy(&io->scsiio);
11458 ctl_done(io);
11459}
11460
11461static void
11462ctl_failover_lun(union ctl_io *rio)
11463{
11464 struct ctl_softc *softc = control_softc;
11465 struct ctl_lun *lun;
11466 struct ctl_io_hdr *io, *next_io;
11467 uint32_t targ_lun;
11468
11469 targ_lun = rio->io_hdr.nexus.targ_mapped_lun;
11470 CTL_DEBUG_PRINT(("FAILOVER for lun %ju\n", targ_lun));
11471
11472 /* Find and lock the LUN. */
11473 mtx_lock(&softc->ctl_lock);
11474 if (targ_lun > CTL_MAX_LUNS ||
11475 (lun = softc->ctl_luns[targ_lun]) == NULL) {
11476 mtx_unlock(&softc->ctl_lock);
11477 return;
11478 }
11479 mtx_lock(&lun->lun_lock);
11480 mtx_unlock(&softc->ctl_lock);
11481 if (lun->flags & CTL_LUN_DISABLED) {
11482 mtx_unlock(&lun->lun_lock);
11483 return;
11484 }
11485
11486 if (softc->ha_mode == CTL_HA_MODE_XFER) {
11487 TAILQ_FOREACH_SAFE(io, &lun->ooa_queue, ooa_links, next_io) {
11488 /* We are master */
11489 if (io->flags & CTL_FLAG_FROM_OTHER_SC) {
11490 if (io->flags & CTL_FLAG_IO_ACTIVE) {
11491 io->flags |= CTL_FLAG_ABORT;
11492 io->flags |= CTL_FLAG_FAILOVER;
11493 } else { /* This can be only due to DATAMOVE */
11494 io->msg_type = CTL_MSG_DATAMOVE_DONE;
11495 io->flags &= ~CTL_FLAG_DMA_INPROG;
11496 io->flags |= CTL_FLAG_IO_ACTIVE;
11497 io->port_status = 31340;
11498 ctl_enqueue_isc((union ctl_io *)io);
11499 }
11500 }
11501 /* We are slave */
11502 if (io->flags & CTL_FLAG_SENT_2OTHER_SC) {
11503 io->flags &= ~CTL_FLAG_SENT_2OTHER_SC;
11504 if (io->flags & CTL_FLAG_IO_ACTIVE) {
11505 io->flags |= CTL_FLAG_FAILOVER;
11506 } else {
11507 ctl_set_busy(&((union ctl_io *)io)->
11508 scsiio);
11509 ctl_done((union ctl_io *)io);
11510 }
11511 }
11512 }
11513 } else { /* SERIALIZE modes */
11514 TAILQ_FOREACH_SAFE(io, &lun->blocked_queue, blocked_links,
11515 next_io) {
11516 /* We are master */
11517 if (io->flags & CTL_FLAG_FROM_OTHER_SC) {
11518 TAILQ_REMOVE(&lun->blocked_queue, io,
11519 blocked_links);
11520 io->flags &= ~CTL_FLAG_BLOCKED;
11521 TAILQ_REMOVE(&lun->ooa_queue, io, ooa_links);
11522 ctl_free_io((union ctl_io *)io);
11523 }
11524 }
11525 TAILQ_FOREACH_SAFE(io, &lun->ooa_queue, ooa_links, next_io) {
11526 /* We are master */
11527 if (io->flags & CTL_FLAG_FROM_OTHER_SC) {
11528 TAILQ_REMOVE(&lun->ooa_queue, io, ooa_links);
11529 ctl_free_io((union ctl_io *)io);
11530 }
11531 /* We are slave */
11532 if (io->flags & CTL_FLAG_SENT_2OTHER_SC) {
11533 io->flags &= ~CTL_FLAG_SENT_2OTHER_SC;
11534 if (!(io->flags & CTL_FLAG_IO_ACTIVE)) {
11535 ctl_set_busy(&((union ctl_io *)io)->
11536 scsiio);
11537 ctl_done((union ctl_io *)io);
11538 }
11539 }
11540 }
11541 ctl_check_blocked(lun);
11542 }
11543 mtx_unlock(&lun->lun_lock);
11544}
11545
11546static int
11547ctl_scsiio_precheck(struct ctl_softc *softc, struct ctl_scsiio *ctsio)
11548{
11549 struct ctl_lun *lun;
11550 const struct ctl_cmd_entry *entry;
11551 uint32_t initidx, targ_lun;
11552 int retval = 0;
11553
11554 lun = NULL;
11555 targ_lun = ctsio->io_hdr.nexus.targ_mapped_lun;
11556 if (targ_lun < CTL_MAX_LUNS)
11557 lun = softc->ctl_luns[targ_lun];
11558 if (lun) {
11559 /*
11560 * If the LUN is invalid, pretend that it doesn't exist.
11561 * It will go away as soon as all pending I/O has been
11562 * completed.
11563 */
11564 mtx_lock(&lun->lun_lock);
11565 if (lun->flags & CTL_LUN_DISABLED) {
11566 mtx_unlock(&lun->lun_lock);
11567 lun = NULL;
11568 }
11569 }
11570 ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr = lun;
11571 if (lun) {
11572 ctsio->io_hdr.ctl_private[CTL_PRIV_BACKEND_LUN].ptr =
11573 lun->be_lun;
11574
11575 /*
11576 * Every I/O goes into the OOA queue for a particular LUN,
11577 * and stays there until completion.
11578 */
11579#ifdef CTL_TIME_IO
11580 if (TAILQ_EMPTY(&lun->ooa_queue))
11581 lun->idle_time += getsbinuptime() - lun->last_busy;
11582#endif
11583 TAILQ_INSERT_TAIL(&lun->ooa_queue, &ctsio->io_hdr, ooa_links);
11584 }
11585
11586 /* Get command entry and return error if it is unsuppotyed. */
11587 entry = ctl_validate_command(ctsio);
11588 if (entry == NULL) {
11589 if (lun)
11590 mtx_unlock(&lun->lun_lock);
11591 return (retval);
11592 }
11593
11594 ctsio->io_hdr.flags &= ~CTL_FLAG_DATA_MASK;
11595 ctsio->io_hdr.flags |= entry->flags & CTL_FLAG_DATA_MASK;
11596
11597 /*
11598 * Check to see whether we can send this command to LUNs that don't
11599 * exist. This should pretty much only be the case for inquiry
11600 * and request sense. Further checks, below, really require having
11601 * a LUN, so we can't really check the command anymore. Just put
11602 * it on the rtr queue.
11603 */
11604 if (lun == NULL) {
11605 if (entry->flags & CTL_CMD_FLAG_OK_ON_NO_LUN) {
11606 ctsio->io_hdr.flags |= CTL_FLAG_IS_WAS_ON_RTR;
11607 ctl_enqueue_rtr((union ctl_io *)ctsio);
11608 return (retval);
11609 }
11610
11611 ctl_set_unsupported_lun(ctsio);
11612 ctl_done((union ctl_io *)ctsio);
11613 CTL_DEBUG_PRINT(("ctl_scsiio_precheck: bailing out due to invalid LUN\n"));
11614 return (retval);
11615 } else {
11616 /*
11617 * Make sure we support this particular command on this LUN.
11618 * e.g., we don't support writes to the control LUN.
11619 */
11620 if (!ctl_cmd_applicable(lun->be_lun->lun_type, entry)) {
11621 mtx_unlock(&lun->lun_lock);
11622 ctl_set_invalid_opcode(ctsio);
11623 ctl_done((union ctl_io *)ctsio);
11624 return (retval);
11625 }
11626 }
11627
11628 initidx = ctl_get_initindex(&ctsio->io_hdr.nexus);
11629
11630#ifdef CTL_WITH_CA
11631 /*
11632 * If we've got a request sense, it'll clear the contingent
11633 * allegiance condition. Otherwise, if we have a CA condition for
11634 * this initiator, clear it, because it sent down a command other
11635 * than request sense.
11636 */
11637 if ((ctsio->cdb[0] != REQUEST_SENSE)
11638 && (ctl_is_set(lun->have_ca, initidx)))
11639 ctl_clear_mask(lun->have_ca, initidx);
11640#endif
11641
11642 /*
11643 * If the command has this flag set, it handles its own unit
11644 * attention reporting, we shouldn't do anything. Otherwise we
11645 * check for any pending unit attentions, and send them back to the
11646 * initiator. We only do this when a command initially comes in,
11647 * not when we pull it off the blocked queue.
11648 *
11649 * According to SAM-3, section 5.3.2, the order that things get
11650 * presented back to the host is basically unit attentions caused
11651 * by some sort of reset event, busy status, reservation conflicts
11652 * or task set full, and finally any other status.
11653 *
11654 * One issue here is that some of the unit attentions we report
11655 * don't fall into the "reset" category (e.g. "reported luns data
11656 * has changed"). So reporting it here, before the reservation
11657 * check, may be technically wrong. I guess the only thing to do
11658 * would be to check for and report the reset events here, and then
11659 * check for the other unit attention types after we check for a
11660 * reservation conflict.
11661 *
11662 * XXX KDM need to fix this
11663 */
11664 if ((entry->flags & CTL_CMD_FLAG_NO_SENSE) == 0) {
11665 ctl_ua_type ua_type;
11666 u_int sense_len = 0;
11667
11668 ua_type = ctl_build_ua(lun, initidx, &ctsio->sense_data,
11669 &sense_len, SSD_TYPE_NONE);
11670 if (ua_type != CTL_UA_NONE) {
11671 mtx_unlock(&lun->lun_lock);
11672 ctsio->scsi_status = SCSI_STATUS_CHECK_COND;
11673 ctsio->io_hdr.status = CTL_SCSI_ERROR | CTL_AUTOSENSE;
11674 ctsio->sense_len = sense_len;
11675 ctl_done((union ctl_io *)ctsio);
11676 return (retval);
11677 }
11678 }
11679
11680
11681 if (ctl_scsiio_lun_check(lun, entry, ctsio) != 0) {
11682 mtx_unlock(&lun->lun_lock);
11683 ctl_done((union ctl_io *)ctsio);
11684 return (retval);
11685 }
11686
11687 /*
11688 * XXX CHD this is where we want to send IO to other side if
11689 * this LUN is secondary on this SC. We will need to make a copy
11690 * of the IO and flag the IO on this side as SENT_2OTHER and the flag
11691 * the copy we send as FROM_OTHER.
11692 * We also need to stuff the address of the original IO so we can
11693 * find it easily. Something similar will need be done on the other
11694 * side so when we are done we can find the copy.
11695 */
11696 if ((lun->flags & CTL_LUN_PRIMARY_SC) == 0 &&
11697 (lun->flags & CTL_LUN_PEER_SC_PRIMARY) != 0 &&
11698 (entry->flags & CTL_CMD_FLAG_RUN_HERE) == 0) {
11699 union ctl_ha_msg msg_info;
11700 int isc_retval;
11701
11702 ctsio->io_hdr.flags |= CTL_FLAG_SENT_2OTHER_SC;
11703 ctsio->io_hdr.flags &= ~CTL_FLAG_IO_ACTIVE;
11704 mtx_unlock(&lun->lun_lock);
11705
11706 msg_info.hdr.msg_type = CTL_MSG_SERIALIZE;
11707 msg_info.hdr.original_sc = (union ctl_io *)ctsio;
11708 msg_info.hdr.serializing_sc = NULL;
11709 msg_info.hdr.nexus = ctsio->io_hdr.nexus;
11710 msg_info.scsi.tag_num = ctsio->tag_num;
11711 msg_info.scsi.tag_type = ctsio->tag_type;
11712 msg_info.scsi.cdb_len = ctsio->cdb_len;
11713 memcpy(msg_info.scsi.cdb, ctsio->cdb, CTL_MAX_CDBLEN);
11714
11715 if ((isc_retval = ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info,
11716 sizeof(msg_info.scsi) - sizeof(msg_info.scsi.sense_data),
11717 M_WAITOK)) > CTL_HA_STATUS_SUCCESS) {
11718 ctl_set_busy(ctsio);
11719 ctl_done((union ctl_io *)ctsio);
11720 return (retval);
11721 }
11722 return (retval);
11723 }
11724
11725 switch (ctl_check_ooa(lun, (union ctl_io *)ctsio,
11726 (union ctl_io *)TAILQ_PREV(&ctsio->io_hdr,
11727 ctl_ooaq, ooa_links))) {
11728 case CTL_ACTION_BLOCK:
11729 ctsio->io_hdr.flags |= CTL_FLAG_BLOCKED;
11730 TAILQ_INSERT_TAIL(&lun->blocked_queue, &ctsio->io_hdr,
11731 blocked_links);
11732 mtx_unlock(&lun->lun_lock);
11733 return (retval);
11734 case CTL_ACTION_PASS:
11735 case CTL_ACTION_SKIP:
11736 ctsio->io_hdr.flags |= CTL_FLAG_IS_WAS_ON_RTR;
11737 mtx_unlock(&lun->lun_lock);
11738 ctl_enqueue_rtr((union ctl_io *)ctsio);
11739 break;
11740 case CTL_ACTION_OVERLAP:
11741 mtx_unlock(&lun->lun_lock);
11742 ctl_set_overlapped_cmd(ctsio);
11743 ctl_done((union ctl_io *)ctsio);
11744 break;
11745 case CTL_ACTION_OVERLAP_TAG:
11746 mtx_unlock(&lun->lun_lock);
11747 ctl_set_overlapped_tag(ctsio, ctsio->tag_num & 0xff);
11748 ctl_done((union ctl_io *)ctsio);
11749 break;
11750 case CTL_ACTION_ERROR:
11751 default:
11752 mtx_unlock(&lun->lun_lock);
11753 ctl_set_internal_failure(ctsio,
11754 /*sks_valid*/ 0,
11755 /*retry_count*/ 0);
11756 ctl_done((union ctl_io *)ctsio);
11757 break;
11758 }
11759 return (retval);
11760}
11761
11762const struct ctl_cmd_entry *
11763ctl_get_cmd_entry(struct ctl_scsiio *ctsio, int *sa)
11764{
11765 const struct ctl_cmd_entry *entry;
11766 int service_action;
11767
11768 entry = &ctl_cmd_table[ctsio->cdb[0]];
11769 if (sa)
11770 *sa = ((entry->flags & CTL_CMD_FLAG_SA5) != 0);
11771 if (entry->flags & CTL_CMD_FLAG_SA5) {
11772 service_action = ctsio->cdb[1] & SERVICE_ACTION_MASK;
11773 entry = &((const struct ctl_cmd_entry *)
11774 entry->execute)[service_action];
11775 }
11776 return (entry);
11777}
11778
11779const struct ctl_cmd_entry *
11780ctl_validate_command(struct ctl_scsiio *ctsio)
11781{
11782 const struct ctl_cmd_entry *entry;
11783 int i, sa;
11784 uint8_t diff;
11785
11786 entry = ctl_get_cmd_entry(ctsio, &sa);
11787 if (entry->execute == NULL) {
11788 if (sa)
11789 ctl_set_invalid_field(ctsio,
11790 /*sks_valid*/ 1,
11791 /*command*/ 1,
11792 /*field*/ 1,
11793 /*bit_valid*/ 1,
11794 /*bit*/ 4);
11795 else
11796 ctl_set_invalid_opcode(ctsio);
11797 ctl_done((union ctl_io *)ctsio);
11798 return (NULL);
11799 }
11800 KASSERT(entry->length > 0,
11801 ("Not defined length for command 0x%02x/0x%02x",
11802 ctsio->cdb[0], ctsio->cdb[1]));
11803 for (i = 1; i < entry->length; i++) {
11804 diff = ctsio->cdb[i] & ~entry->usage[i - 1];
11805 if (diff == 0)
11806 continue;
11807 ctl_set_invalid_field(ctsio,
11808 /*sks_valid*/ 1,
11809 /*command*/ 1,
11810 /*field*/ i,
11811 /*bit_valid*/ 1,
11812 /*bit*/ fls(diff) - 1);
11813 ctl_done((union ctl_io *)ctsio);
11814 return (NULL);
11815 }
11816 return (entry);
11817}
11818
11819static int
11820ctl_cmd_applicable(uint8_t lun_type, const struct ctl_cmd_entry *entry)
11821{
11822
11823 switch (lun_type) {
11824 case T_DIRECT:
11825 if ((entry->flags & CTL_CMD_FLAG_OK_ON_DIRECT) == 0)
11826 return (0);
11827 break;
11828 case T_PROCESSOR:
11829 if ((entry->flags & CTL_CMD_FLAG_OK_ON_PROC) == 0)
11830 return (0);
11831 break;
11832 case T_CDROM:
11833 if ((entry->flags & CTL_CMD_FLAG_OK_ON_CDROM) == 0)
11834 return (0);
11835 break;
11836 default:
11837 return (0);
11838 }
11839 return (1);
11840}
11841
11842static int
11843ctl_scsiio(struct ctl_scsiio *ctsio)
11844{
11845 int retval;
11846 const struct ctl_cmd_entry *entry;
11847
11848 retval = CTL_RETVAL_COMPLETE;
11849
11850 CTL_DEBUG_PRINT(("ctl_scsiio cdb[0]=%02X\n", ctsio->cdb[0]));
11851
11852 entry = ctl_get_cmd_entry(ctsio, NULL);
11853
11854 /*
11855 * If this I/O has been aborted, just send it straight to
11856 * ctl_done() without executing it.
11857 */
11858 if (ctsio->io_hdr.flags & CTL_FLAG_ABORT) {
11859 ctl_done((union ctl_io *)ctsio);
11860 goto bailout;
11861 }
11862
11863 /*
11864 * All the checks should have been handled by ctl_scsiio_precheck().
11865 * We should be clear now to just execute the I/O.
11866 */
11867 retval = entry->execute(ctsio);
11868
11869bailout:
11870 return (retval);
11871}
11872
11873/*
11874 * Since we only implement one target right now, a bus reset simply resets
11875 * our single target.
11876 */
11877static int
11878ctl_bus_reset(struct ctl_softc *softc, union ctl_io *io)
11879{
11880 return(ctl_target_reset(softc, io, CTL_UA_BUS_RESET));
11881}
11882
11883static int
11884ctl_target_reset(struct ctl_softc *softc, union ctl_io *io,
11885 ctl_ua_type ua_type)
11886{
11887 struct ctl_port *port;
11888 struct ctl_lun *lun;
11889 int retval;
11890
11891 if (!(io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC)) {
11892 union ctl_ha_msg msg_info;
11893
11894 msg_info.hdr.nexus = io->io_hdr.nexus;
11895 if (ua_type==CTL_UA_TARG_RESET)
11896 msg_info.task.task_action = CTL_TASK_TARGET_RESET;
11897 else
11898 msg_info.task.task_action = CTL_TASK_BUS_RESET;
11899 msg_info.hdr.msg_type = CTL_MSG_MANAGE_TASKS;
11900 msg_info.hdr.original_sc = NULL;
11901 msg_info.hdr.serializing_sc = NULL;
11902 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info,
11903 sizeof(msg_info.task), M_WAITOK);
11904 }
11905 retval = 0;
11906
11907 mtx_lock(&softc->ctl_lock);
11908 port = ctl_io_port(&io->io_hdr);
11909 STAILQ_FOREACH(lun, &softc->lun_list, links) {
11910 if (port != NULL &&
11911 ctl_lun_map_to_port(port, lun->lun) == UINT32_MAX)
11912 continue;
11913 retval += ctl_do_lun_reset(lun, io, ua_type);
11914 }
11915 mtx_unlock(&softc->ctl_lock);
11916 io->taskio.task_status = CTL_TASK_FUNCTION_COMPLETE;
11917 return (retval);
11918}
11919
11920/*
11921 * The LUN should always be set. The I/O is optional, and is used to
11922 * distinguish between I/Os sent by this initiator, and by other
11923 * initiators. We set unit attention for initiators other than this one.
11924 * SAM-3 is vague on this point. It does say that a unit attention should
11925 * be established for other initiators when a LUN is reset (see section
11926 * 5.7.3), but it doesn't specifically say that the unit attention should
11927 * be established for this particular initiator when a LUN is reset. Here
11928 * is the relevant text, from SAM-3 rev 8:
11929 *
11930 * 5.7.2 When a SCSI initiator port aborts its own tasks
11931 *
11932 * When a SCSI initiator port causes its own task(s) to be aborted, no
11933 * notification that the task(s) have been aborted shall be returned to
11934 * the SCSI initiator port other than the completion response for the
11935 * command or task management function action that caused the task(s) to
11936 * be aborted and notification(s) associated with related effects of the
11937 * action (e.g., a reset unit attention condition).
11938 *
11939 * XXX KDM for now, we're setting unit attention for all initiators.
11940 */
11941static int
11942ctl_do_lun_reset(struct ctl_lun *lun, union ctl_io *io, ctl_ua_type ua_type)
11943{
11944 union ctl_io *xio;
11945#if 0
11946 uint32_t initidx;
11947#endif
11948 int i;
11949
11950 mtx_lock(&lun->lun_lock);
11951 /*
11952 * Run through the OOA queue and abort each I/O.
11953 */
11954 for (xio = (union ctl_io *)TAILQ_FIRST(&lun->ooa_queue); xio != NULL;
11955 xio = (union ctl_io *)TAILQ_NEXT(&xio->io_hdr, ooa_links)) {
11956 xio->io_hdr.flags |= CTL_FLAG_ABORT | CTL_FLAG_ABORT_STATUS;
11957 }
11958
11959 /*
11960 * This version sets unit attention for every
11961 */
11962#if 0
11963 initidx = ctl_get_initindex(&io->io_hdr.nexus);
11964 ctl_est_ua_all(lun, initidx, ua_type);
11965#else
11966 ctl_est_ua_all(lun, -1, ua_type);
11967#endif
11968
11969 /*
11970 * A reset (any kind, really) clears reservations established with
11971 * RESERVE/RELEASE. It does not clear reservations established
11972 * with PERSISTENT RESERVE OUT, but we don't support that at the
11973 * moment anyway. See SPC-2, section 5.6. SPC-3 doesn't address
11974 * reservations made with the RESERVE/RELEASE commands, because
11975 * those commands are obsolete in SPC-3.
11976 */
11977 lun->flags &= ~CTL_LUN_RESERVED;
11978
11979#ifdef CTL_WITH_CA
11980 for (i = 0; i < CTL_MAX_INITIATORS; i++)
11981 ctl_clear_mask(lun->have_ca, i);
11982#endif
11983 lun->prevent_count = 0;
11984 for (i = 0; i < CTL_MAX_INITIATORS; i++)
11985 ctl_clear_mask(lun->prevent, i);
11986 mtx_unlock(&lun->lun_lock);
11987
11988 return (0);
11989}
11990
11991static int
11992ctl_lun_reset(struct ctl_softc *softc, union ctl_io *io)
11993{
11994 struct ctl_lun *lun;
11995 uint32_t targ_lun;
11996 int retval;
11997
11998 targ_lun = io->io_hdr.nexus.targ_mapped_lun;
11999 mtx_lock(&softc->ctl_lock);
12000 if (targ_lun >= CTL_MAX_LUNS ||
12001 (lun = softc->ctl_luns[targ_lun]) == NULL) {
12002 mtx_unlock(&softc->ctl_lock);
12003 io->taskio.task_status = CTL_TASK_LUN_DOES_NOT_EXIST;
12004 return (1);
12005 }
12006 retval = ctl_do_lun_reset(lun, io, CTL_UA_LUN_RESET);
12007 mtx_unlock(&softc->ctl_lock);
12008 io->taskio.task_status = CTL_TASK_FUNCTION_COMPLETE;
12009
12010 if ((io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC) == 0) {
12011 union ctl_ha_msg msg_info;
12012
12013 msg_info.hdr.msg_type = CTL_MSG_MANAGE_TASKS;
12014 msg_info.hdr.nexus = io->io_hdr.nexus;
12015 msg_info.task.task_action = CTL_TASK_LUN_RESET;
12016 msg_info.hdr.original_sc = NULL;
12017 msg_info.hdr.serializing_sc = NULL;
12018 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info,
12019 sizeof(msg_info.task), M_WAITOK);
12020 }
12021 return (retval);
12022}
12023
12024static void
12025ctl_abort_tasks_lun(struct ctl_lun *lun, uint32_t targ_port, uint32_t init_id,
12026 int other_sc)
12027{
12028 union ctl_io *xio;
12029
12030 mtx_assert(&lun->lun_lock, MA_OWNED);
12031
12032 /*
12033 * Run through the OOA queue and attempt to find the given I/O.
12034 * The target port, initiator ID, tag type and tag number have to
12035 * match the values that we got from the initiator. If we have an
12036 * untagged command to abort, simply abort the first untagged command
12037 * we come to. We only allow one untagged command at a time of course.
12038 */
12039 for (xio = (union ctl_io *)TAILQ_FIRST(&lun->ooa_queue); xio != NULL;
12040 xio = (union ctl_io *)TAILQ_NEXT(&xio->io_hdr, ooa_links)) {
12041
12042 if ((targ_port == UINT32_MAX ||
12043 targ_port == xio->io_hdr.nexus.targ_port) &&
12044 (init_id == UINT32_MAX ||
12045 init_id == xio->io_hdr.nexus.initid)) {
12046 if (targ_port != xio->io_hdr.nexus.targ_port ||
12047 init_id != xio->io_hdr.nexus.initid)
12048 xio->io_hdr.flags |= CTL_FLAG_ABORT_STATUS;
12049 xio->io_hdr.flags |= CTL_FLAG_ABORT;
12050 if (!other_sc && !(lun->flags & CTL_LUN_PRIMARY_SC)) {
12051 union ctl_ha_msg msg_info;
12052
12053 msg_info.hdr.nexus = xio->io_hdr.nexus;
12054 msg_info.task.task_action = CTL_TASK_ABORT_TASK;
12055 msg_info.task.tag_num = xio->scsiio.tag_num;
12056 msg_info.task.tag_type = xio->scsiio.tag_type;
12057 msg_info.hdr.msg_type = CTL_MSG_MANAGE_TASKS;
12058 msg_info.hdr.original_sc = NULL;
12059 msg_info.hdr.serializing_sc = NULL;
12060 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info,
12061 sizeof(msg_info.task), M_NOWAIT);
12062 }
12063 }
12064 }
12065}
12066
12067static int
12068ctl_abort_task_set(union ctl_io *io)
12069{
12070 struct ctl_softc *softc = control_softc;
12071 struct ctl_lun *lun;
12072 uint32_t targ_lun;
12073
12074 /*
12075 * Look up the LUN.
12076 */
12077 targ_lun = io->io_hdr.nexus.targ_mapped_lun;
12078 mtx_lock(&softc->ctl_lock);
12079 if (targ_lun >= CTL_MAX_LUNS ||
12080 (lun = softc->ctl_luns[targ_lun]) == NULL) {
12081 mtx_unlock(&softc->ctl_lock);
12082 io->taskio.task_status = CTL_TASK_LUN_DOES_NOT_EXIST;
12083 return (1);
12084 }
12085
12086 mtx_lock(&lun->lun_lock);
12087 mtx_unlock(&softc->ctl_lock);
12088 if (io->taskio.task_action == CTL_TASK_ABORT_TASK_SET) {
12089 ctl_abort_tasks_lun(lun, io->io_hdr.nexus.targ_port,
12090 io->io_hdr.nexus.initid,
12091 (io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC) != 0);
12092 } else { /* CTL_TASK_CLEAR_TASK_SET */
12093 ctl_abort_tasks_lun(lun, UINT32_MAX, UINT32_MAX,
12094 (io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC) != 0);
12095 }
12096 mtx_unlock(&lun->lun_lock);
12097 io->taskio.task_status = CTL_TASK_FUNCTION_COMPLETE;
12098 return (0);
12099}
12100
12101static int
12102ctl_i_t_nexus_reset(union ctl_io *io)
12103{
12104 struct ctl_softc *softc = control_softc;
12105 struct ctl_lun *lun;
12106 uint32_t initidx;
12107
12108 if (!(io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC)) {
12109 union ctl_ha_msg msg_info;
12110
12111 msg_info.hdr.nexus = io->io_hdr.nexus;
12112 msg_info.task.task_action = CTL_TASK_I_T_NEXUS_RESET;
12113 msg_info.hdr.msg_type = CTL_MSG_MANAGE_TASKS;
12114 msg_info.hdr.original_sc = NULL;
12115 msg_info.hdr.serializing_sc = NULL;
12116 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info,
12117 sizeof(msg_info.task), M_WAITOK);
12118 }
12119
12120 initidx = ctl_get_initindex(&io->io_hdr.nexus);
12121 mtx_lock(&softc->ctl_lock);
12122 STAILQ_FOREACH(lun, &softc->lun_list, links) {
12123 mtx_lock(&lun->lun_lock);
12124 ctl_abort_tasks_lun(lun, io->io_hdr.nexus.targ_port,
12125 io->io_hdr.nexus.initid, 1);
12126#ifdef CTL_WITH_CA
12127 ctl_clear_mask(lun->have_ca, initidx);
12128#endif
12129 if ((lun->flags & CTL_LUN_RESERVED) && (lun->res_idx == initidx))
12130 lun->flags &= ~CTL_LUN_RESERVED;
12131 if (ctl_is_set(lun->prevent, initidx)) {
12132 ctl_clear_mask(lun->prevent, initidx);
12133 lun->prevent_count--;
12134 }
12135 ctl_est_ua(lun, initidx, CTL_UA_I_T_NEXUS_LOSS);
12136 mtx_unlock(&lun->lun_lock);
12137 }
12138 mtx_unlock(&softc->ctl_lock);
12139 io->taskio.task_status = CTL_TASK_FUNCTION_COMPLETE;
12140 return (0);
12141}
12142
12143static int
12144ctl_abort_task(union ctl_io *io)
12145{
12146 union ctl_io *xio;
12147 struct ctl_lun *lun;
12148 struct ctl_softc *softc;
12149#if 0
12150 struct sbuf sb;
12151 char printbuf[128];
12152#endif
12153 int found;
12154 uint32_t targ_lun;
12155
12156 softc = control_softc;
12157 found = 0;
12158
12159 /*
12160 * Look up the LUN.
12161 */
12162 targ_lun = io->io_hdr.nexus.targ_mapped_lun;
12163 mtx_lock(&softc->ctl_lock);
12164 if (targ_lun >= CTL_MAX_LUNS ||
12165 (lun = softc->ctl_luns[targ_lun]) == NULL) {
12166 mtx_unlock(&softc->ctl_lock);
12167 io->taskio.task_status = CTL_TASK_LUN_DOES_NOT_EXIST;
12168 return (1);
12169 }
12170
12171#if 0
12172 printf("ctl_abort_task: called for lun %lld, tag %d type %d\n",
12173 lun->lun, io->taskio.tag_num, io->taskio.tag_type);
12174#endif
12175
12176 mtx_lock(&lun->lun_lock);
12177 mtx_unlock(&softc->ctl_lock);
12178 /*
12179 * Run through the OOA queue and attempt to find the given I/O.
12180 * The target port, initiator ID, tag type and tag number have to
12181 * match the values that we got from the initiator. If we have an
12182 * untagged command to abort, simply abort the first untagged command
12183 * we come to. We only allow one untagged command at a time of course.
12184 */
12185 for (xio = (union ctl_io *)TAILQ_FIRST(&lun->ooa_queue); xio != NULL;
12186 xio = (union ctl_io *)TAILQ_NEXT(&xio->io_hdr, ooa_links)) {
12187#if 0
12188 sbuf_new(&sb, printbuf, sizeof(printbuf), SBUF_FIXEDLEN);
12189
12190 sbuf_printf(&sb, "LUN %lld tag %d type %d%s%s%s%s: ",
12191 lun->lun, xio->scsiio.tag_num,
12192 xio->scsiio.tag_type,
12193 (xio->io_hdr.blocked_links.tqe_prev
12194 == NULL) ? "" : " BLOCKED",
12195 (xio->io_hdr.flags &
12196 CTL_FLAG_DMA_INPROG) ? " DMA" : "",
12197 (xio->io_hdr.flags &
12198 CTL_FLAG_ABORT) ? " ABORT" : "",
12199 (xio->io_hdr.flags &
12200 CTL_FLAG_IS_WAS_ON_RTR ? " RTR" : ""));
12201 ctl_scsi_command_string(&xio->scsiio, NULL, &sb);
12202 sbuf_finish(&sb);
12203 printf("%s\n", sbuf_data(&sb));
12204#endif
12205
12206 if ((xio->io_hdr.nexus.targ_port != io->io_hdr.nexus.targ_port)
12207 || (xio->io_hdr.nexus.initid != io->io_hdr.nexus.initid)
12208 || (xio->io_hdr.flags & CTL_FLAG_ABORT))
12209 continue;
12210
12211 /*
12212 * If the abort says that the task is untagged, the
12213 * task in the queue must be untagged. Otherwise,
12214 * we just check to see whether the tag numbers
12215 * match. This is because the QLogic firmware
12216 * doesn't pass back the tag type in an abort
12217 * request.
12218 */
12219#if 0
12220 if (((xio->scsiio.tag_type == CTL_TAG_UNTAGGED)
12221 && (io->taskio.tag_type == CTL_TAG_UNTAGGED))
12222 || (xio->scsiio.tag_num == io->taskio.tag_num))
12223#endif
12224 /*
12225 * XXX KDM we've got problems with FC, because it
12226 * doesn't send down a tag type with aborts. So we
12227 * can only really go by the tag number...
12228 * This may cause problems with parallel SCSI.
12229 * Need to figure that out!!
12230 */
12231 if (xio->scsiio.tag_num == io->taskio.tag_num) {
12232 xio->io_hdr.flags |= CTL_FLAG_ABORT;
12233 found = 1;
12234 if ((io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC) == 0 &&
12235 !(lun->flags & CTL_LUN_PRIMARY_SC)) {
12236 union ctl_ha_msg msg_info;
12237
12238 msg_info.hdr.nexus = io->io_hdr.nexus;
12239 msg_info.task.task_action = CTL_TASK_ABORT_TASK;
12240 msg_info.task.tag_num = io->taskio.tag_num;
12241 msg_info.task.tag_type = io->taskio.tag_type;
12242 msg_info.hdr.msg_type = CTL_MSG_MANAGE_TASKS;
12243 msg_info.hdr.original_sc = NULL;
12244 msg_info.hdr.serializing_sc = NULL;
12245#if 0
12246 printf("Sent Abort to other side\n");
12247#endif
12248 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info,
12249 sizeof(msg_info.task), M_NOWAIT);
12250 }
12251#if 0
12252 printf("ctl_abort_task: found I/O to abort\n");
12253#endif
12254 }
12255 }
12256 mtx_unlock(&lun->lun_lock);
12257
12258 if (found == 0) {
12259 /*
12260 * This isn't really an error. It's entirely possible for
12261 * the abort and command completion to cross on the wire.
12262 * This is more of an informative/diagnostic error.
12263 */
12264#if 0
12265 printf("ctl_abort_task: ABORT sent for nonexistent I/O: "
12266 "%u:%u:%u tag %d type %d\n",
12267 io->io_hdr.nexus.initid,
12268 io->io_hdr.nexus.targ_port,
12269 io->io_hdr.nexus.targ_lun, io->taskio.tag_num,
12270 io->taskio.tag_type);
12271#endif
12272 }
12273 io->taskio.task_status = CTL_TASK_FUNCTION_COMPLETE;
12274 return (0);
12275}
12276
12277static int
12278ctl_query_task(union ctl_io *io, int task_set)
12279{
12280 union ctl_io *xio;
12281 struct ctl_lun *lun;
12282 struct ctl_softc *softc;
12283 int found = 0;
12284 uint32_t targ_lun;
12285
12286 softc = control_softc;
12287 targ_lun = io->io_hdr.nexus.targ_mapped_lun;
12288 mtx_lock(&softc->ctl_lock);
12289 if (targ_lun >= CTL_MAX_LUNS ||
12290 (lun = softc->ctl_luns[targ_lun]) == NULL) {
12291 mtx_unlock(&softc->ctl_lock);
12292 io->taskio.task_status = CTL_TASK_LUN_DOES_NOT_EXIST;
12293 return (1);
12294 }
12295 mtx_lock(&lun->lun_lock);
12296 mtx_unlock(&softc->ctl_lock);
12297 for (xio = (union ctl_io *)TAILQ_FIRST(&lun->ooa_queue); xio != NULL;
12298 xio = (union ctl_io *)TAILQ_NEXT(&xio->io_hdr, ooa_links)) {
12299
12300 if ((xio->io_hdr.nexus.targ_port != io->io_hdr.nexus.targ_port)
12301 || (xio->io_hdr.nexus.initid != io->io_hdr.nexus.initid)
12302 || (xio->io_hdr.flags & CTL_FLAG_ABORT))
12303 continue;
12304
12305 if (task_set || xio->scsiio.tag_num == io->taskio.tag_num) {
12306 found = 1;
12307 break;
12308 }
12309 }
12310 mtx_unlock(&lun->lun_lock);
12311 if (found)
12312 io->taskio.task_status = CTL_TASK_FUNCTION_SUCCEEDED;
12313 else
12314 io->taskio.task_status = CTL_TASK_FUNCTION_COMPLETE;
12315 return (0);
12316}
12317
12318static int
12319ctl_query_async_event(union ctl_io *io)
12320{
12321 struct ctl_lun *lun;
12322 struct ctl_softc *softc;
12323 ctl_ua_type ua;
12324 uint32_t targ_lun, initidx;
12325
12326 softc = control_softc;
12327 targ_lun = io->io_hdr.nexus.targ_mapped_lun;
12328 mtx_lock(&softc->ctl_lock);
12329 if (targ_lun >= CTL_MAX_LUNS ||
12330 (lun = softc->ctl_luns[targ_lun]) == NULL) {
12331 mtx_unlock(&softc->ctl_lock);
12332 io->taskio.task_status = CTL_TASK_LUN_DOES_NOT_EXIST;
12333 return (1);
12334 }
12335 mtx_lock(&lun->lun_lock);
12336 mtx_unlock(&softc->ctl_lock);
12337 initidx = ctl_get_initindex(&io->io_hdr.nexus);
12338 ua = ctl_build_qae(lun, initidx, io->taskio.task_resp);
12339 mtx_unlock(&lun->lun_lock);
12340 if (ua != CTL_UA_NONE)
12341 io->taskio.task_status = CTL_TASK_FUNCTION_SUCCEEDED;
12342 else
12343 io->taskio.task_status = CTL_TASK_FUNCTION_COMPLETE;
12344 return (0);
12345}
12346
12347static void
12348ctl_run_task(union ctl_io *io)
12349{
12350 struct ctl_softc *softc = control_softc;
12351 int retval = 1;
12352
12353 CTL_DEBUG_PRINT(("ctl_run_task\n"));
12354 KASSERT(io->io_hdr.io_type == CTL_IO_TASK,
12355 ("ctl_run_task: Unextected io_type %d\n", io->io_hdr.io_type));
12356 io->taskio.task_status = CTL_TASK_FUNCTION_NOT_SUPPORTED;
12357 bzero(io->taskio.task_resp, sizeof(io->taskio.task_resp));
12358 switch (io->taskio.task_action) {
12359 case CTL_TASK_ABORT_TASK:
12360 retval = ctl_abort_task(io);
12361 break;
12362 case CTL_TASK_ABORT_TASK_SET:
12363 case CTL_TASK_CLEAR_TASK_SET:
12364 retval = ctl_abort_task_set(io);
12365 break;
12366 case CTL_TASK_CLEAR_ACA:
12367 break;
12368 case CTL_TASK_I_T_NEXUS_RESET:
12369 retval = ctl_i_t_nexus_reset(io);
12370 break;
12371 case CTL_TASK_LUN_RESET:
12372 retval = ctl_lun_reset(softc, io);
12373 break;
12374 case CTL_TASK_TARGET_RESET:
12375 retval = ctl_target_reset(softc, io, CTL_UA_TARG_RESET);
12376 break;
12377 case CTL_TASK_BUS_RESET:
12378 retval = ctl_bus_reset(softc, io);
12379 break;
12380 case CTL_TASK_PORT_LOGIN:
12381 break;
12382 case CTL_TASK_PORT_LOGOUT:
12383 break;
12384 case CTL_TASK_QUERY_TASK:
12385 retval = ctl_query_task(io, 0);
12386 break;
12387 case CTL_TASK_QUERY_TASK_SET:
12388 retval = ctl_query_task(io, 1);
12389 break;
12390 case CTL_TASK_QUERY_ASYNC_EVENT:
12391 retval = ctl_query_async_event(io);
12392 break;
12393 default:
12394 printf("%s: got unknown task management event %d\n",
12395 __func__, io->taskio.task_action);
12396 break;
12397 }
12398 if (retval == 0)
12399 io->io_hdr.status = CTL_SUCCESS;
12400 else
12401 io->io_hdr.status = CTL_ERROR;
12402 ctl_done(io);
12403}
12404
12405/*
12406 * For HA operation. Handle commands that come in from the other
12407 * controller.
12408 */
12409static void
12410ctl_handle_isc(union ctl_io *io)
12411{
12412 struct ctl_softc *softc = control_softc;
12413 struct ctl_lun *lun;
12414 const struct ctl_cmd_entry *entry;
12415 uint32_t targ_lun;
12416
12417 targ_lun = io->io_hdr.nexus.targ_mapped_lun;
12418 switch (io->io_hdr.msg_type) {
12419 case CTL_MSG_SERIALIZE:
12420 ctl_serialize_other_sc_cmd(&io->scsiio);
12421 break;
12422 case CTL_MSG_R2R: /* Only used in SER_ONLY mode. */
12423 entry = ctl_get_cmd_entry(&io->scsiio, NULL);
12424 if (targ_lun >= CTL_MAX_LUNS ||
12425 (lun = softc->ctl_luns[targ_lun]) == NULL) {
12426 ctl_done(io);
12427 break;
12428 }
12429 mtx_lock(&lun->lun_lock);
12430 if (ctl_scsiio_lun_check(lun, entry, &io->scsiio) != 0) {
12431 mtx_unlock(&lun->lun_lock);
12432 ctl_done(io);
12433 break;
12434 }
12435 io->io_hdr.flags |= CTL_FLAG_IS_WAS_ON_RTR;
12436 mtx_unlock(&lun->lun_lock);
12437 ctl_enqueue_rtr(io);
12438 break;
12439 case CTL_MSG_FINISH_IO:
12440 if (softc->ha_mode == CTL_HA_MODE_XFER) {
12441 ctl_done(io);
12442 break;
12443 }
12444 if (targ_lun >= CTL_MAX_LUNS ||
12445 (lun = softc->ctl_luns[targ_lun]) == NULL) {
12446 ctl_free_io(io);
12447 break;
12448 }
12449 mtx_lock(&lun->lun_lock);
12450 TAILQ_REMOVE(&lun->ooa_queue, &io->io_hdr, ooa_links);
12451 ctl_check_blocked(lun);
12452 mtx_unlock(&lun->lun_lock);
12453 ctl_free_io(io);
12454 break;
12455 case CTL_MSG_PERS_ACTION:
12456 ctl_hndl_per_res_out_on_other_sc(
12457 (union ctl_ha_msg *)&io->presio.pr_msg);
12458 ctl_free_io(io);
12459 break;
12460 case CTL_MSG_BAD_JUJU:
12461 ctl_done(io);
12462 break;
12463 case CTL_MSG_DATAMOVE: /* Only used in XFER mode */
12464 ctl_datamove_remote(io);
12465 break;
12466 case CTL_MSG_DATAMOVE_DONE: /* Only used in XFER mode */
12467 io->scsiio.be_move_done(io);
12468 break;
12469 case CTL_MSG_FAILOVER:
12470 ctl_failover_lun(io);
12471 ctl_free_io(io);
12472 break;
12473 default:
12474 printf("%s: Invalid message type %d\n",
12475 __func__, io->io_hdr.msg_type);
12476 ctl_free_io(io);
12477 break;
12478 }
12479
12480}
12481
12482
12483/*
12484 * Returns the match type in the case of a match, or CTL_LUN_PAT_NONE if
12485 * there is no match.
12486 */
12487static ctl_lun_error_pattern
12488ctl_cmd_pattern_match(struct ctl_scsiio *ctsio, struct ctl_error_desc *desc)
12489{
12490 const struct ctl_cmd_entry *entry;
12491 ctl_lun_error_pattern filtered_pattern, pattern;
12492
12493 pattern = desc->error_pattern;
12494
12495 /*
12496 * XXX KDM we need more data passed into this function to match a
12497 * custom pattern, and we actually need to implement custom pattern
12498 * matching.
12499 */
12500 if (pattern & CTL_LUN_PAT_CMD)
12501 return (CTL_LUN_PAT_CMD);
12502
12503 if ((pattern & CTL_LUN_PAT_MASK) == CTL_LUN_PAT_ANY)
12504 return (CTL_LUN_PAT_ANY);
12505
12506 entry = ctl_get_cmd_entry(ctsio, NULL);
12507
12508 filtered_pattern = entry->pattern & pattern;
12509
12510 /*
12511 * If the user requested specific flags in the pattern (e.g.
12512 * CTL_LUN_PAT_RANGE), make sure the command supports all of those
12513 * flags.
12514 *
12515 * If the user did not specify any flags, it doesn't matter whether
12516 * or not the command supports the flags.
12517 */
12518 if ((filtered_pattern & ~CTL_LUN_PAT_MASK) !=
12519 (pattern & ~CTL_LUN_PAT_MASK))
12520 return (CTL_LUN_PAT_NONE);
12521
12522 /*
12523 * If the user asked for a range check, see if the requested LBA
12524 * range overlaps with this command's LBA range.
12525 */
12526 if (filtered_pattern & CTL_LUN_PAT_RANGE) {
12527 uint64_t lba1;
12528 uint64_t len1;
12529 ctl_action action;
12530 int retval;
12531
12532 retval = ctl_get_lba_len((union ctl_io *)ctsio, &lba1, &len1);
12533 if (retval != 0)
12534 return (CTL_LUN_PAT_NONE);
12535
12536 action = ctl_extent_check_lba(lba1, len1, desc->lba_range.lba,
12537 desc->lba_range.len, FALSE);
12538 /*
12539 * A "pass" means that the LBA ranges don't overlap, so
12540 * this doesn't match the user's range criteria.
12541 */
12542 if (action == CTL_ACTION_PASS)
12543 return (CTL_LUN_PAT_NONE);
12544 }
12545
12546 return (filtered_pattern);
12547}
12548
12549static void
12550ctl_inject_error(struct ctl_lun *lun, union ctl_io *io)
12551{
12552 struct ctl_error_desc *desc, *desc2;
12553
12554 mtx_assert(&lun->lun_lock, MA_OWNED);
12555
12556 STAILQ_FOREACH_SAFE(desc, &lun->error_list, links, desc2) {
12557 ctl_lun_error_pattern pattern;
12558 /*
12559 * Check to see whether this particular command matches
12560 * the pattern in the descriptor.
12561 */
12562 pattern = ctl_cmd_pattern_match(&io->scsiio, desc);
12563 if ((pattern & CTL_LUN_PAT_MASK) == CTL_LUN_PAT_NONE)
12564 continue;
12565
12566 switch (desc->lun_error & CTL_LUN_INJ_TYPE) {
12567 case CTL_LUN_INJ_ABORTED:
12568 ctl_set_aborted(&io->scsiio);
12569 break;
12570 case CTL_LUN_INJ_MEDIUM_ERR:
12571 ctl_set_medium_error(&io->scsiio,
12572 (io->io_hdr.flags & CTL_FLAG_DATA_MASK) !=
12573 CTL_FLAG_DATA_OUT);
12574 break;
12575 case CTL_LUN_INJ_UA:
12576 /* 29h/00h POWER ON, RESET, OR BUS DEVICE RESET
12577 * OCCURRED */
12578 ctl_set_ua(&io->scsiio, 0x29, 0x00);
12579 break;
12580 case CTL_LUN_INJ_CUSTOM:
12581 /*
12582 * We're assuming the user knows what he is doing.
12583 * Just copy the sense information without doing
12584 * checks.
12585 */
12586 bcopy(&desc->custom_sense, &io->scsiio.sense_data,
12587 MIN(sizeof(desc->custom_sense),
12588 sizeof(io->scsiio.sense_data)));
12589 io->scsiio.scsi_status = SCSI_STATUS_CHECK_COND;
12590 io->scsiio.sense_len = SSD_FULL_SIZE;
12591 io->io_hdr.status = CTL_SCSI_ERROR | CTL_AUTOSENSE;
12592 break;
12593 case CTL_LUN_INJ_NONE:
12594 default:
12595 /*
12596 * If this is an error injection type we don't know
12597 * about, clear the continuous flag (if it is set)
12598 * so it will get deleted below.
12599 */
12600 desc->lun_error &= ~CTL_LUN_INJ_CONTINUOUS;
12601 break;
12602 }
12603 /*
12604 * By default, each error injection action is a one-shot
12605 */
12606 if (desc->lun_error & CTL_LUN_INJ_CONTINUOUS)
12607 continue;
12608
12609 STAILQ_REMOVE(&lun->error_list, desc, ctl_error_desc, links);
12610
12611 free(desc, M_CTL);
12612 }
12613}
12614
12615#ifdef CTL_IO_DELAY
12616static void
12617ctl_datamove_timer_wakeup(void *arg)
12618{
12619 union ctl_io *io;
12620
12621 io = (union ctl_io *)arg;
12622
12623 ctl_datamove(io);
12624}
12625#endif /* CTL_IO_DELAY */
12626
12627void
12628ctl_datamove(union ctl_io *io)
12629{
12630 struct ctl_lun *lun;
12631 void (*fe_datamove)(union ctl_io *io);
12632
12633 mtx_assert(&control_softc->ctl_lock, MA_NOTOWNED);
12634
12635 CTL_DEBUG_PRINT(("ctl_datamove\n"));
12636
12637 lun = (struct ctl_lun *)io->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
12638#ifdef CTL_TIME_IO
12639 if ((time_uptime - io->io_hdr.start_time) > ctl_time_io_secs) {
12640 char str[256];
12641 char path_str[64];
12642 struct sbuf sb;
12643
12644 ctl_scsi_path_string(io, path_str, sizeof(path_str));
12645 sbuf_new(&sb, str, sizeof(str), SBUF_FIXEDLEN);
12646
12647 sbuf_cat(&sb, path_str);
12648 switch (io->io_hdr.io_type) {
12649 case CTL_IO_SCSI:
12650 ctl_scsi_command_string(&io->scsiio, NULL, &sb);
12651 sbuf_printf(&sb, "\n");
12652 sbuf_cat(&sb, path_str);
12653 sbuf_printf(&sb, "Tag: 0x%04x, type %d\n",
12654 io->scsiio.tag_num, io->scsiio.tag_type);
12655 break;
12656 case CTL_IO_TASK:
12657 sbuf_printf(&sb, "Task I/O type: %d, Tag: 0x%04x, "
12658 "Tag Type: %d\n", io->taskio.task_action,
12659 io->taskio.tag_num, io->taskio.tag_type);
12660 break;
12661 default:
12662 panic("%s: Invalid CTL I/O type %d\n",
12663 __func__, io->io_hdr.io_type);
12664 }
12665 sbuf_cat(&sb, path_str);
12666 sbuf_printf(&sb, "ctl_datamove: %jd seconds\n",
12667 (intmax_t)time_uptime - io->io_hdr.start_time);
12668 sbuf_finish(&sb);
12669 printf("%s", sbuf_data(&sb));
12670 }
12671#endif /* CTL_TIME_IO */
12672
12673#ifdef CTL_IO_DELAY
12674 if (io->io_hdr.flags & CTL_FLAG_DELAY_DONE) {
12675 io->io_hdr.flags &= ~CTL_FLAG_DELAY_DONE;
12676 } else {
12677 if ((lun != NULL)
12678 && (lun->delay_info.datamove_delay > 0)) {
12679
12680 callout_init(&io->io_hdr.delay_callout, /*mpsafe*/ 1);
12681 io->io_hdr.flags |= CTL_FLAG_DELAY_DONE;
12682 callout_reset(&io->io_hdr.delay_callout,
12683 lun->delay_info.datamove_delay * hz,
12684 ctl_datamove_timer_wakeup, io);
12685 if (lun->delay_info.datamove_type ==
12686 CTL_DELAY_TYPE_ONESHOT)
12687 lun->delay_info.datamove_delay = 0;
12688 return;
12689 }
12690 }
12691#endif
12692
12693 /*
12694 * This command has been aborted. Set the port status, so we fail
12695 * the data move.
12696 */
12697 if (io->io_hdr.flags & CTL_FLAG_ABORT) {
12698 printf("ctl_datamove: tag 0x%04x on (%u:%u:%u) aborted\n",
12699 io->scsiio.tag_num, io->io_hdr.nexus.initid,
12700 io->io_hdr.nexus.targ_port,
12701 io->io_hdr.nexus.targ_lun);
12702 io->io_hdr.port_status = 31337;
12703 /*
12704 * Note that the backend, in this case, will get the
12705 * callback in its context. In other cases it may get
12706 * called in the frontend's interrupt thread context.
12707 */
12708 io->scsiio.be_move_done(io);
12709 return;
12710 }
12711
12712 /* Don't confuse frontend with zero length data move. */
12713 if (io->scsiio.kern_data_len == 0) {
12714 io->scsiio.be_move_done(io);
12715 return;
12716 }
12717
12718 fe_datamove = ctl_io_port(&io->io_hdr)->fe_datamove;
12719 fe_datamove(io);
12720}
12721
12722static void
12723ctl_send_datamove_done(union ctl_io *io, int have_lock)
12724{
12725 union ctl_ha_msg msg;
12726#ifdef CTL_TIME_IO
12727 struct bintime cur_bt;
12728#endif
12729
12730 memset(&msg, 0, sizeof(msg));
12731 msg.hdr.msg_type = CTL_MSG_DATAMOVE_DONE;
12732 msg.hdr.original_sc = io;
12733 msg.hdr.serializing_sc = io->io_hdr.serializing_sc;
12734 msg.hdr.nexus = io->io_hdr.nexus;
12735 msg.hdr.status = io->io_hdr.status;
12736 msg.scsi.tag_num = io->scsiio.tag_num;
12737 msg.scsi.tag_type = io->scsiio.tag_type;
12738 msg.scsi.scsi_status = io->scsiio.scsi_status;
12739 memcpy(&msg.scsi.sense_data, &io->scsiio.sense_data,
12740 io->scsiio.sense_len);
12741 msg.scsi.sense_len = io->scsiio.sense_len;
12742 msg.scsi.sense_residual = io->scsiio.sense_residual;
12743 msg.scsi.fetd_status = io->io_hdr.port_status;
12744 msg.scsi.residual = io->scsiio.residual;
12745 io->io_hdr.flags &= ~CTL_FLAG_IO_ACTIVE;
12746 if (io->io_hdr.flags & CTL_FLAG_FAILOVER) {
12747 ctl_failover_io(io, /*have_lock*/ have_lock);
12748 return;
12749 }
12750 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg,
12751 sizeof(msg.scsi) - sizeof(msg.scsi.sense_data) +
12752 msg.scsi.sense_len, M_WAITOK);
12753
12754#ifdef CTL_TIME_IO
12755 getbinuptime(&cur_bt);
12756 bintime_sub(&cur_bt, &io->io_hdr.dma_start_bt);
12757 bintime_add(&io->io_hdr.dma_bt, &cur_bt);
12758#endif
12759 io->io_hdr.num_dmas++;
12760}
12761
12762/*
12763 * The DMA to the remote side is done, now we need to tell the other side
12764 * we're done so it can continue with its data movement.
12765 */
12766static void
12767ctl_datamove_remote_write_cb(struct ctl_ha_dt_req *rq)
12768{
12769 union ctl_io *io;
12770 uint32_t i;
12771
12772 io = rq->context;
12773
12774 if (rq->ret != CTL_HA_STATUS_SUCCESS) {
12775 printf("%s: ISC DMA write failed with error %d", __func__,
12776 rq->ret);
12777 ctl_set_internal_failure(&io->scsiio,
12778 /*sks_valid*/ 1,
12779 /*retry_count*/ rq->ret);
12780 }
12781
12782 ctl_dt_req_free(rq);
12783
12784 for (i = 0; i < io->scsiio.kern_sg_entries; i++)
12785 free(io->io_hdr.local_sglist[i].addr, M_CTL);
12786 free(io->io_hdr.remote_sglist, M_CTL);
12787 io->io_hdr.remote_sglist = NULL;
12788 io->io_hdr.local_sglist = NULL;
12789
12790 /*
12791 * The data is in local and remote memory, so now we need to send
12792 * status (good or back) back to the other side.
12793 */
12794 ctl_send_datamove_done(io, /*have_lock*/ 0);
12795}
12796
12797/*
12798 * We've moved the data from the host/controller into local memory. Now we
12799 * need to push it over to the remote controller's memory.
12800 */
12801static int
12802ctl_datamove_remote_dm_write_cb(union ctl_io *io)
12803{
12804 int retval;
12805
12806 retval = ctl_datamove_remote_xfer(io, CTL_HA_DT_CMD_WRITE,
12807 ctl_datamove_remote_write_cb);
12808 return (retval);
12809}
12810
12811static void
12812ctl_datamove_remote_write(union ctl_io *io)
12813{
12814 int retval;
12815 void (*fe_datamove)(union ctl_io *io);
12816
12817 /*
12818 * - Get the data from the host/HBA into local memory.
12819 * - DMA memory from the local controller to the remote controller.
12820 * - Send status back to the remote controller.
12821 */
12822
12823 retval = ctl_datamove_remote_sgl_setup(io);
12824 if (retval != 0)
12825 return;
12826
12827 /* Switch the pointer over so the FETD knows what to do */
12828 io->scsiio.kern_data_ptr = (uint8_t *)io->io_hdr.local_sglist;
12829
12830 /*
12831 * Use a custom move done callback, since we need to send completion
12832 * back to the other controller, not to the backend on this side.
12833 */
12834 io->scsiio.be_move_done = ctl_datamove_remote_dm_write_cb;
12835
12836 fe_datamove = ctl_io_port(&io->io_hdr)->fe_datamove;
12837 fe_datamove(io);
12838}
12839
12840static int
12841ctl_datamove_remote_dm_read_cb(union ctl_io *io)
12842{
12843#if 0
12844 char str[256];
12845 char path_str[64];
12846 struct sbuf sb;
12847#endif
12848 uint32_t i;
12849
12850 for (i = 0; i < io->scsiio.kern_sg_entries; i++)
12851 free(io->io_hdr.local_sglist[i].addr, M_CTL);
12852 free(io->io_hdr.remote_sglist, M_CTL);
12853 io->io_hdr.remote_sglist = NULL;
12854 io->io_hdr.local_sglist = NULL;
12855
12856#if 0
12857 scsi_path_string(io, path_str, sizeof(path_str));
12858 sbuf_new(&sb, str, sizeof(str), SBUF_FIXEDLEN);
12859 sbuf_cat(&sb, path_str);
12860 scsi_command_string(&io->scsiio, NULL, &sb);
12861 sbuf_printf(&sb, "\n");
12862 sbuf_cat(&sb, path_str);
12863 sbuf_printf(&sb, "Tag: 0x%04x, type %d\n",
12864 io->scsiio.tag_num, io->scsiio.tag_type);
12865 sbuf_cat(&sb, path_str);
12866 sbuf_printf(&sb, "%s: flags %#x, status %#x\n", __func__,
12867 io->io_hdr.flags, io->io_hdr.status);
12868 sbuf_finish(&sb);
12869 printk("%s", sbuf_data(&sb));
12870#endif
12871
12872
12873 /*
12874 * The read is done, now we need to send status (good or bad) back
12875 * to the other side.
12876 */
12877 ctl_send_datamove_done(io, /*have_lock*/ 0);
12878
12879 return (0);
12880}
12881
12882static void
12883ctl_datamove_remote_read_cb(struct ctl_ha_dt_req *rq)
12884{
12885 union ctl_io *io;
12886 void (*fe_datamove)(union ctl_io *io);
12887
12888 io = rq->context;
12889
12890 if (rq->ret != CTL_HA_STATUS_SUCCESS) {
12891 printf("%s: ISC DMA read failed with error %d\n", __func__,
12892 rq->ret);
12893 ctl_set_internal_failure(&io->scsiio,
12894 /*sks_valid*/ 1,
12895 /*retry_count*/ rq->ret);
12896 }
12897
12898 ctl_dt_req_free(rq);
12899
12900 /* Switch the pointer over so the FETD knows what to do */
12901 io->scsiio.kern_data_ptr = (uint8_t *)io->io_hdr.local_sglist;
12902
12903 /*
12904 * Use a custom move done callback, since we need to send completion
12905 * back to the other controller, not to the backend on this side.
12906 */
12907 io->scsiio.be_move_done = ctl_datamove_remote_dm_read_cb;
12908
12909 /* XXX KDM add checks like the ones in ctl_datamove? */
12910
12911 fe_datamove = ctl_io_port(&io->io_hdr)->fe_datamove;
12912 fe_datamove(io);
12913}
12914
12915static int
12916ctl_datamove_remote_sgl_setup(union ctl_io *io)
12917{
12918 struct ctl_sg_entry *local_sglist;
12919 uint32_t len_to_go;
12920 int retval;
12921 int i;
12922
12923 retval = 0;
12924 local_sglist = io->io_hdr.local_sglist;
12925 len_to_go = io->scsiio.kern_data_len;
12926
12927 /*
12928 * The difficult thing here is that the size of the various
12929 * S/G segments may be different than the size from the
12930 * remote controller. That'll make it harder when DMAing
12931 * the data back to the other side.
12932 */
12933 for (i = 0; len_to_go > 0; i++) {
12934 local_sglist[i].len = MIN(len_to_go, CTL_HA_DATAMOVE_SEGMENT);
12935 local_sglist[i].addr =
12936 malloc(local_sglist[i].len, M_CTL, M_WAITOK);
12937
12938 len_to_go -= local_sglist[i].len;
12939 }
12940 /*
12941 * Reset the number of S/G entries accordingly. The original
12942 * number of S/G entries is available in rem_sg_entries.
12943 */
12944 io->scsiio.kern_sg_entries = i;
12945
12946#if 0
12947 printf("%s: kern_sg_entries = %d\n", __func__,
12948 io->scsiio.kern_sg_entries);
12949 for (i = 0; i < io->scsiio.kern_sg_entries; i++)
12950 printf("%s: sg[%d] = %p, %lu\n", __func__, i,
12951 local_sglist[i].addr, local_sglist[i].len);
12952#endif
12953
12954 return (retval);
12955}
12956
12957static int
12958ctl_datamove_remote_xfer(union ctl_io *io, unsigned command,
12959 ctl_ha_dt_cb callback)
12960{
12961 struct ctl_ha_dt_req *rq;
12962 struct ctl_sg_entry *remote_sglist, *local_sglist;
12963 uint32_t local_used, remote_used, total_used;
12964 int i, j, isc_ret;
12965
12966 rq = ctl_dt_req_alloc();
12967
12968 /*
12969 * If we failed to allocate the request, and if the DMA didn't fail
12970 * anyway, set busy status. This is just a resource allocation
12971 * failure.
12972 */
12973 if ((rq == NULL)
12974 && ((io->io_hdr.status & CTL_STATUS_MASK) != CTL_STATUS_NONE &&
12975 (io->io_hdr.status & CTL_STATUS_MASK) != CTL_SUCCESS))
12976 ctl_set_busy(&io->scsiio);
12977
12978 if ((io->io_hdr.status & CTL_STATUS_MASK) != CTL_STATUS_NONE &&
12979 (io->io_hdr.status & CTL_STATUS_MASK) != CTL_SUCCESS) {
12980
12981 if (rq != NULL)
12982 ctl_dt_req_free(rq);
12983
12984 /*
12985 * The data move failed. We need to return status back
12986 * to the other controller. No point in trying to DMA
12987 * data to the remote controller.
12988 */
12989
12990 ctl_send_datamove_done(io, /*have_lock*/ 0);
12991
12992 return (1);
12993 }
12994
12995 local_sglist = io->io_hdr.local_sglist;
12996 remote_sglist = io->io_hdr.remote_sglist;
12997 local_used = 0;
12998 remote_used = 0;
12999 total_used = 0;
13000
13001 /*
13002 * Pull/push the data over the wire from/to the other controller.
13003 * This takes into account the possibility that the local and
13004 * remote sglists may not be identical in terms of the size of
13005 * the elements and the number of elements.
13006 *
13007 * One fundamental assumption here is that the length allocated for
13008 * both the local and remote sglists is identical. Otherwise, we've
13009 * essentially got a coding error of some sort.
13010 */
13011 isc_ret = CTL_HA_STATUS_SUCCESS;
13012 for (i = 0, j = 0; total_used < io->scsiio.kern_data_len; ) {
13013 uint32_t cur_len;
13014 uint8_t *tmp_ptr;
13015
13016 rq->command = command;
13017 rq->context = io;
13018
13019 /*
13020 * Both pointers should be aligned. But it is possible
13021 * that the allocation length is not. They should both
13022 * also have enough slack left over at the end, though,
13023 * to round up to the next 8 byte boundary.
13024 */
13025 cur_len = MIN(local_sglist[i].len - local_used,
13026 remote_sglist[j].len - remote_used);
13027 rq->size = cur_len;
13028
13029 tmp_ptr = (uint8_t *)local_sglist[i].addr;
13030 tmp_ptr += local_used;
13031
13032#if 0
13033 /* Use physical addresses when talking to ISC hardware */
13034 if ((io->io_hdr.flags & CTL_FLAG_BUS_ADDR) == 0) {
13035 /* XXX KDM use busdma */
13036 rq->local = vtophys(tmp_ptr);
13037 } else
13038 rq->local = tmp_ptr;
13039#else
13040 KASSERT((io->io_hdr.flags & CTL_FLAG_BUS_ADDR) == 0,
13041 ("HA does not support BUS_ADDR"));
13042 rq->local = tmp_ptr;
13043#endif
13044
13045 tmp_ptr = (uint8_t *)remote_sglist[j].addr;
13046 tmp_ptr += remote_used;
13047 rq->remote = tmp_ptr;
13048
13049 rq->callback = NULL;
13050
13051 local_used += cur_len;
13052 if (local_used >= local_sglist[i].len) {
13053 i++;
13054 local_used = 0;
13055 }
13056
13057 remote_used += cur_len;
13058 if (remote_used >= remote_sglist[j].len) {
13059 j++;
13060 remote_used = 0;
13061 }
13062 total_used += cur_len;
13063
13064 if (total_used >= io->scsiio.kern_data_len)
13065 rq->callback = callback;
13066
13067#if 0
13068 printf("%s: %s: local %p remote %p size %d\n", __func__,
13069 (command == CTL_HA_DT_CMD_WRITE) ? "WRITE" : "READ",
13070 rq->local, rq->remote, rq->size);
13071#endif
13072
13073 isc_ret = ctl_dt_single(rq);
13074 if (isc_ret > CTL_HA_STATUS_SUCCESS)
13075 break;
13076 }
13077 if (isc_ret != CTL_HA_STATUS_WAIT) {
13078 rq->ret = isc_ret;
13079 callback(rq);
13080 }
13081
13082 return (0);
13083}
13084
13085static void
13086ctl_datamove_remote_read(union ctl_io *io)
13087{
13088 int retval;
13089 uint32_t i;
13090
13091 /*
13092 * This will send an error to the other controller in the case of a
13093 * failure.
13094 */
13095 retval = ctl_datamove_remote_sgl_setup(io);
13096 if (retval != 0)
13097 return;
13098
13099 retval = ctl_datamove_remote_xfer(io, CTL_HA_DT_CMD_READ,
13100 ctl_datamove_remote_read_cb);
13101 if (retval != 0) {
13102 /*
13103 * Make sure we free memory if there was an error.. The
13104 * ctl_datamove_remote_xfer() function will send the
13105 * datamove done message, or call the callback with an
13106 * error if there is a problem.
13107 */
13108 for (i = 0; i < io->scsiio.kern_sg_entries; i++)
13109 free(io->io_hdr.local_sglist[i].addr, M_CTL);
13110 free(io->io_hdr.remote_sglist, M_CTL);
13111 io->io_hdr.remote_sglist = NULL;
13112 io->io_hdr.local_sglist = NULL;
13113 }
13114}
13115
13116/*
13117 * Process a datamove request from the other controller. This is used for
13118 * XFER mode only, not SER_ONLY mode. For writes, we DMA into local memory
13119 * first. Once that is complete, the data gets DMAed into the remote
13120 * controller's memory. For reads, we DMA from the remote controller's
13121 * memory into our memory first, and then move it out to the FETD.
13122 */
13123static void
13124ctl_datamove_remote(union ctl_io *io)
13125{
13126
13127 mtx_assert(&control_softc->ctl_lock, MA_NOTOWNED);
13128
13129 if (io->io_hdr.flags & CTL_FLAG_FAILOVER) {
13130 ctl_failover_io(io, /*have_lock*/ 0);
13131 return;
13132 }
13133
13134 /*
13135 * Note that we look for an aborted I/O here, but don't do some of
13136 * the other checks that ctl_datamove() normally does.
13137 * We don't need to run the datamove delay code, since that should
13138 * have been done if need be on the other controller.
13139 */
13140 if (io->io_hdr.flags & CTL_FLAG_ABORT) {
13141 printf("%s: tag 0x%04x on (%u:%u:%u) aborted\n", __func__,
13142 io->scsiio.tag_num, io->io_hdr.nexus.initid,
13143 io->io_hdr.nexus.targ_port,
13144 io->io_hdr.nexus.targ_lun);
13145 io->io_hdr.port_status = 31338;
13146 ctl_send_datamove_done(io, /*have_lock*/ 0);
13147 return;
13148 }
13149
13150 if ((io->io_hdr.flags & CTL_FLAG_DATA_MASK) == CTL_FLAG_DATA_OUT)
13151 ctl_datamove_remote_write(io);
13152 else if ((io->io_hdr.flags & CTL_FLAG_DATA_MASK) == CTL_FLAG_DATA_IN)
13153 ctl_datamove_remote_read(io);
13154 else {
13155 io->io_hdr.port_status = 31339;
13156 ctl_send_datamove_done(io, /*have_lock*/ 0);
13157 }
13158}
13159
13160static void
13161ctl_process_done(union ctl_io *io)
13162{
13163 struct ctl_lun *lun;
13164 struct ctl_softc *softc = control_softc;
13165 void (*fe_done)(union ctl_io *io);
13166 union ctl_ha_msg msg;
13167 uint32_t targ_port = io->io_hdr.nexus.targ_port;
13168
13169 CTL_DEBUG_PRINT(("ctl_process_done\n"));
13170 fe_done = softc->ctl_ports[targ_port]->fe_done;
13171
13172#ifdef CTL_TIME_IO
13173 if ((time_uptime - io->io_hdr.start_time) > ctl_time_io_secs) {
13174 char str[256];
13175 char path_str[64];
13176 struct sbuf sb;
13177
13178 ctl_scsi_path_string(io, path_str, sizeof(path_str));
13179 sbuf_new(&sb, str, sizeof(str), SBUF_FIXEDLEN);
13180
13181 sbuf_cat(&sb, path_str);
13182 switch (io->io_hdr.io_type) {
13183 case CTL_IO_SCSI:
13184 ctl_scsi_command_string(&io->scsiio, NULL, &sb);
13185 sbuf_printf(&sb, "\n");
13186 sbuf_cat(&sb, path_str);
13187 sbuf_printf(&sb, "Tag: 0x%04x, type %d\n",
13188 io->scsiio.tag_num, io->scsiio.tag_type);
13189 break;
13190 case CTL_IO_TASK:
13191 sbuf_printf(&sb, "Task I/O type: %d, Tag: 0x%04x, "
13192 "Tag Type: %d\n", io->taskio.task_action,
13193 io->taskio.tag_num, io->taskio.tag_type);
13194 break;
13195 default:
13196 panic("%s: Invalid CTL I/O type %d\n",
13197 __func__, io->io_hdr.io_type);
13198 }
13199 sbuf_cat(&sb, path_str);
13200 sbuf_printf(&sb, "ctl_process_done: %jd seconds\n",
13201 (intmax_t)time_uptime - io->io_hdr.start_time);
13202 sbuf_finish(&sb);
13203 printf("%s", sbuf_data(&sb));
13204 }
13205#endif /* CTL_TIME_IO */
13206
13207 switch (io->io_hdr.io_type) {
13208 case CTL_IO_SCSI:
13209 break;
13210 case CTL_IO_TASK:
13211 if (ctl_debug & CTL_DEBUG_INFO)
13212 ctl_io_error_print(io, NULL);
13213 fe_done(io);
13214 return;
13215 default:
13216 panic("%s: Invalid CTL I/O type %d\n",
13217 __func__, io->io_hdr.io_type);
13218 }
13219
13220 lun = (struct ctl_lun *)io->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
13221 if (lun == NULL) {
13222 CTL_DEBUG_PRINT(("NULL LUN for lun %d\n",
13223 io->io_hdr.nexus.targ_mapped_lun));
13224 goto bailout;
13225 }
13226
13227 mtx_lock(&lun->lun_lock);
13228
13229 /*
13230 * Check to see if we have any informational exception and status
13231 * of this command can be modified to report it in form of either
13232 * RECOVERED ERROR or NO SENSE, depending on MRIE mode page field.
13233 */
13234 if (lun->ie_reported == 0 && lun->ie_asc != 0 &&
13235 io->io_hdr.status == CTL_SUCCESS &&
13236 (io->io_hdr.flags & CTL_FLAG_STATUS_SENT) == 0) {
13237 uint8_t mrie = lun->MODE_IE.mrie;
13238 uint8_t per = ((lun->MODE_RWER.byte3 & SMS_RWER_PER) ||
13239 (lun->MODE_VER.byte3 & SMS_VER_PER));
13240 if (((mrie == SIEP_MRIE_REC_COND && per) ||
13241 mrie == SIEP_MRIE_REC_UNCOND ||
13242 mrie == SIEP_MRIE_NO_SENSE) &&
13243 (ctl_get_cmd_entry(&io->scsiio, NULL)->flags &
13244 CTL_CMD_FLAG_NO_SENSE) == 0) {
13245 ctl_set_sense(&io->scsiio,
13246 /*current_error*/ 1,
13247 /*sense_key*/ (mrie == SIEP_MRIE_NO_SENSE) ?
13248 SSD_KEY_NO_SENSE : SSD_KEY_RECOVERED_ERROR,
13249 /*asc*/ lun->ie_asc,
13250 /*ascq*/ lun->ie_ascq,
13251 SSD_ELEM_NONE);
13252 lun->ie_reported = 1;
13253 }
13254 } else if (lun->ie_reported < 0)
13255 lun->ie_reported = 0;
13256
13257 /*
13258 * Check to see if we have any errors to inject here. We only
13259 * inject errors for commands that don't already have errors set.
13260 */
13261 if (!STAILQ_EMPTY(&lun->error_list) &&
13262 ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_SUCCESS) &&
13263 ((io->io_hdr.flags & CTL_FLAG_STATUS_SENT) == 0))
13264 ctl_inject_error(lun, io);
13265
13266 /*
13267 * XXX KDM how do we treat commands that aren't completed
13268 * successfully?
13269 *
13270 * XXX KDM should we also track I/O latency?
13271 */
13272 if ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_SUCCESS &&
13273 io->io_hdr.io_type == CTL_IO_SCSI) {
13274#ifdef CTL_TIME_IO
13275 struct bintime cur_bt;
13276#endif
13277 int type;
13278
13279 if ((io->io_hdr.flags & CTL_FLAG_DATA_MASK) ==
13280 CTL_FLAG_DATA_IN)
13281 type = CTL_STATS_READ;
13282 else if ((io->io_hdr.flags & CTL_FLAG_DATA_MASK) ==
13283 CTL_FLAG_DATA_OUT)
13284 type = CTL_STATS_WRITE;
13285 else
13286 type = CTL_STATS_NO_IO;
13287
13288 lun->stats.ports[targ_port].bytes[type] +=
13289 io->scsiio.kern_total_len;
13290 lun->stats.ports[targ_port].operations[type]++;
13291#ifdef CTL_TIME_IO
13292 bintime_add(&lun->stats.ports[targ_port].dma_time[type],
13293 &io->io_hdr.dma_bt);
13294 getbinuptime(&cur_bt);
13295 bintime_sub(&cur_bt, &io->io_hdr.start_bt);
13296 bintime_add(&lun->stats.ports[targ_port].time[type], &cur_bt);
13297#endif
13298 lun->stats.ports[targ_port].num_dmas[type] +=
13299 io->io_hdr.num_dmas;
13300 }
13301
13302 /*
13303 * Remove this from the OOA queue.
13304 */
13305 TAILQ_REMOVE(&lun->ooa_queue, &io->io_hdr, ooa_links);
13306#ifdef CTL_TIME_IO
13307 if (TAILQ_EMPTY(&lun->ooa_queue))
13308 lun->last_busy = getsbinuptime();
13309#endif
13310
13311 /*
13312 * Run through the blocked queue on this LUN and see if anything
13313 * has become unblocked, now that this transaction is done.
13314 */
13315 ctl_check_blocked(lun);
13316
13317 /*
13318 * If the LUN has been invalidated, free it if there is nothing
13319 * left on its OOA queue.
13320 */
13321 if ((lun->flags & CTL_LUN_INVALID)
13322 && TAILQ_EMPTY(&lun->ooa_queue)) {
13323 mtx_unlock(&lun->lun_lock);
13324 mtx_lock(&softc->ctl_lock);
13325 ctl_free_lun(lun);
13326 mtx_unlock(&softc->ctl_lock);
13327 } else
13328 mtx_unlock(&lun->lun_lock);
13329
13330bailout:
13331
13332 /*
13333 * If this command has been aborted, make sure we set the status
13334 * properly. The FETD is responsible for freeing the I/O and doing
13335 * whatever it needs to do to clean up its state.
13336 */
13337 if (io->io_hdr.flags & CTL_FLAG_ABORT)
13338 ctl_set_task_aborted(&io->scsiio);
13339
13340 /*
13341 * If enabled, print command error status.
13342 */
13343 if ((io->io_hdr.status & CTL_STATUS_MASK) != CTL_SUCCESS &&
13344 (ctl_debug & CTL_DEBUG_INFO) != 0)
13345 ctl_io_error_print(io, NULL);
13346
13347 /*
13348 * Tell the FETD or the other shelf controller we're done with this
13349 * command. Note that only SCSI commands get to this point. Task
13350 * management commands are completed above.
13351 */
13352 if ((softc->ha_mode != CTL_HA_MODE_XFER) &&
13353 (io->io_hdr.flags & CTL_FLAG_SENT_2OTHER_SC)) {
13354 memset(&msg, 0, sizeof(msg));
13355 msg.hdr.msg_type = CTL_MSG_FINISH_IO;
13356 msg.hdr.serializing_sc = io->io_hdr.serializing_sc;
13357 msg.hdr.nexus = io->io_hdr.nexus;
13358 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg,
13359 sizeof(msg.scsi) - sizeof(msg.scsi.sense_data),
13360 M_WAITOK);
13361 }
13362
13363 fe_done(io);
13364}
13365
13366#ifdef CTL_WITH_CA
13367/*
13368 * Front end should call this if it doesn't do autosense. When the request
13369 * sense comes back in from the initiator, we'll dequeue this and send it.
13370 */
13371int
13372ctl_queue_sense(union ctl_io *io)
13373{
13374 struct ctl_lun *lun;
13375 struct ctl_port *port;
13376 struct ctl_softc *softc;
13377 uint32_t initidx, targ_lun;
13378
13379 CTL_DEBUG_PRINT(("ctl_queue_sense\n"));
13380
13381 softc = control_softc;
13382 port = ctl_io_port(&ctsio->io_hdr);
13383 targ_lun = ctl_lun_map_from_port(port, io->io_hdr.nexus.targ_lun);
13384
13385 /*
13386 * LUN lookup will likely move to the ctl_work_thread() once we
13387 * have our new queueing infrastructure (that doesn't put things on
13388 * a per-LUN queue initially). That is so that we can handle
13389 * things like an INQUIRY to a LUN that we don't have enabled. We
13390 * can't deal with that right now.
13391 * If we don't have a LUN for this, just toss the sense information.
13392 */
13393 mtx_lock(&softc->ctl_lock);
13394 if (targ_lun >= CTL_MAX_LUNS ||
13395 (lun = softc->ctl_luns[targ_lun]) == NULL) {
13396 mtx_unlock(&softc->ctl_lock);
13397 goto bailout;
13398 }
13399 mtx_lock(&lun->lun_lock);
13400 mtx_unlock(&softc->ctl_lock);
13401
13402 /*
13403 * Already have CA set for this LUN...toss the sense information.
13404 */
13405 initidx = ctl_get_initindex(&io->io_hdr.nexus);
13406 if (ctl_is_set(lun->have_ca, initidx)) {
13407 mtx_unlock(&lun->lun_lock);
13408 goto bailout;
13409 }
13410
13411 memcpy(&lun->pending_sense[initidx], &io->scsiio.sense_data,
13412 MIN(sizeof(lun->pending_sense[initidx]),
13413 sizeof(io->scsiio.sense_data)));
13414 ctl_set_mask(lun->have_ca, initidx);
13415 mtx_unlock(&lun->lun_lock);
13416
13417bailout:
13418 ctl_free_io(io);
13419 return (CTL_RETVAL_COMPLETE);
13420}
13421#endif
13422
13423/*
13424 * Primary command inlet from frontend ports. All SCSI and task I/O
13425 * requests must go through this function.
13426 */
13427int
13428ctl_queue(union ctl_io *io)
13429{
13430 struct ctl_port *port;
13431
13432 CTL_DEBUG_PRINT(("ctl_queue cdb[0]=%02X\n", io->scsiio.cdb[0]));
13433
13434#ifdef CTL_TIME_IO
13435 io->io_hdr.start_time = time_uptime;
13436 getbinuptime(&io->io_hdr.start_bt);
13437#endif /* CTL_TIME_IO */
13438
13439 /* Map FE-specific LUN ID into global one. */
13440 port = ctl_io_port(&io->io_hdr);
13441 io->io_hdr.nexus.targ_mapped_lun =
13442 ctl_lun_map_from_port(port, io->io_hdr.nexus.targ_lun);
13443
13444 switch (io->io_hdr.io_type) {
13445 case CTL_IO_SCSI:
13446 case CTL_IO_TASK:
13447 if (ctl_debug & CTL_DEBUG_CDB)
13448 ctl_io_print(io);
13449 ctl_enqueue_incoming(io);
13450 break;
13451 default:
13452 printf("ctl_queue: unknown I/O type %d\n", io->io_hdr.io_type);
13453 return (EINVAL);
13454 }
13455
13456 return (CTL_RETVAL_COMPLETE);
13457}
13458
13459#ifdef CTL_IO_DELAY
13460static void
13461ctl_done_timer_wakeup(void *arg)
13462{
13463 union ctl_io *io;
13464
13465 io = (union ctl_io *)arg;
13466 ctl_done(io);
13467}
13468#endif /* CTL_IO_DELAY */
13469
13470void
13471ctl_serseq_done(union ctl_io *io)
13472{
13473 struct ctl_lun *lun;
13474
13475 lun = (struct ctl_lun *)io->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
13476 if (lun->be_lun == NULL ||
13477 lun->be_lun->serseq == CTL_LUN_SERSEQ_OFF)
13478 return;
13479 mtx_lock(&lun->lun_lock);
13480 io->io_hdr.flags |= CTL_FLAG_SERSEQ_DONE;
13481 ctl_check_blocked(lun);
13482 mtx_unlock(&lun->lun_lock);
13483}
13484
13485void
13486ctl_done(union ctl_io *io)
13487{
13488
13489 /*
13490 * Enable this to catch duplicate completion issues.
13491 */
13492#if 0
13493 if (io->io_hdr.flags & CTL_FLAG_ALREADY_DONE) {
13494 printf("%s: type %d msg %d cdb %x iptl: "
13495 "%u:%u:%u tag 0x%04x "
13496 "flag %#x status %x\n",
13497 __func__,
13498 io->io_hdr.io_type,
13499 io->io_hdr.msg_type,
13500 io->scsiio.cdb[0],
13501 io->io_hdr.nexus.initid,
13502 io->io_hdr.nexus.targ_port,
13503 io->io_hdr.nexus.targ_lun,
13504 (io->io_hdr.io_type ==
13505 CTL_IO_TASK) ?
13506 io->taskio.tag_num :
13507 io->scsiio.tag_num,
13508 io->io_hdr.flags,
13509 io->io_hdr.status);
13510 } else
13511 io->io_hdr.flags |= CTL_FLAG_ALREADY_DONE;
13512#endif
13513
13514 /*
13515 * This is an internal copy of an I/O, and should not go through
13516 * the normal done processing logic.
13517 */
13518 if (io->io_hdr.flags & CTL_FLAG_INT_COPY)
13519 return;
13520
13521#ifdef CTL_IO_DELAY
13522 if (io->io_hdr.flags & CTL_FLAG_DELAY_DONE) {
13523 io->io_hdr.flags &= ~CTL_FLAG_DELAY_DONE;
13524 } else {
13525 struct ctl_lun *lun;
13526
13527 lun =(struct ctl_lun *)io->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
13528
13529 if ((lun != NULL)
13530 && (lun->delay_info.done_delay > 0)) {
13531
13532 callout_init(&io->io_hdr.delay_callout, /*mpsafe*/ 1);
13533 io->io_hdr.flags |= CTL_FLAG_DELAY_DONE;
13534 callout_reset(&io->io_hdr.delay_callout,
13535 lun->delay_info.done_delay * hz,
13536 ctl_done_timer_wakeup, io);
13537 if (lun->delay_info.done_type == CTL_DELAY_TYPE_ONESHOT)
13538 lun->delay_info.done_delay = 0;
13539 return;
13540 }
13541 }
13542#endif /* CTL_IO_DELAY */
13543
13544 ctl_enqueue_done(io);
13545}
13546
13547static void
13548ctl_work_thread(void *arg)
13549{
13550 struct ctl_thread *thr = (struct ctl_thread *)arg;
13551 struct ctl_softc *softc = thr->ctl_softc;
13552 union ctl_io *io;
13553 int retval;
13554
13555 CTL_DEBUG_PRINT(("ctl_work_thread starting\n"));
13556
13557 for (;;) {
13558 /*
13559 * We handle the queues in this order:
13560 * - ISC
13561 * - done queue (to free up resources, unblock other commands)
13562 * - RtR queue
13563 * - incoming queue
13564 *
13565 * If those queues are empty, we break out of the loop and
13566 * go to sleep.
13567 */
13568 mtx_lock(&thr->queue_lock);
13569 io = (union ctl_io *)STAILQ_FIRST(&thr->isc_queue);
13570 if (io != NULL) {
13571 STAILQ_REMOVE_HEAD(&thr->isc_queue, links);
13572 mtx_unlock(&thr->queue_lock);
13573 ctl_handle_isc(io);
13574 continue;
13575 }
13576 io = (union ctl_io *)STAILQ_FIRST(&thr->done_queue);
13577 if (io != NULL) {
13578 STAILQ_REMOVE_HEAD(&thr->done_queue, links);
13579 /* clear any blocked commands, call fe_done */
13580 mtx_unlock(&thr->queue_lock);
13581 ctl_process_done(io);
13582 continue;
13583 }
13584 io = (union ctl_io *)STAILQ_FIRST(&thr->incoming_queue);
13585 if (io != NULL) {
13586 STAILQ_REMOVE_HEAD(&thr->incoming_queue, links);
13587 mtx_unlock(&thr->queue_lock);
13588 if (io->io_hdr.io_type == CTL_IO_TASK)
13589 ctl_run_task(io);
13590 else
13591 ctl_scsiio_precheck(softc, &io->scsiio);
13592 continue;
13593 }
13594 io = (union ctl_io *)STAILQ_FIRST(&thr->rtr_queue);
13595 if (io != NULL) {
13596 STAILQ_REMOVE_HEAD(&thr->rtr_queue, links);
13597 mtx_unlock(&thr->queue_lock);
13598 retval = ctl_scsiio(&io->scsiio);
13599 if (retval != CTL_RETVAL_COMPLETE)
13600 CTL_DEBUG_PRINT(("ctl_scsiio failed\n"));
13601 continue;
13602 }
13603
13604 /* Sleep until we have something to do. */
13605 mtx_sleep(thr, &thr->queue_lock, PDROP | PRIBIO, "-", 0);
13606 }
13607}
13608
13609static void
13610ctl_lun_thread(void *arg)
13611{
13612 struct ctl_softc *softc = (struct ctl_softc *)arg;
13613 struct ctl_be_lun *be_lun;
13614
13615 CTL_DEBUG_PRINT(("ctl_lun_thread starting\n"));
13616
13617 for (;;) {
13618 mtx_lock(&softc->ctl_lock);
13619 be_lun = STAILQ_FIRST(&softc->pending_lun_queue);
13620 if (be_lun != NULL) {
13621 STAILQ_REMOVE_HEAD(&softc->pending_lun_queue, links);
13622 mtx_unlock(&softc->ctl_lock);
13623 ctl_create_lun(be_lun);
13624 continue;
13625 }
13626
13627 /* Sleep until we have something to do. */
13628 mtx_sleep(&softc->pending_lun_queue, &softc->ctl_lock,
13629 PDROP | PRIBIO, "-", 0);
13630 }
13631}
13632
13633static void
13634ctl_thresh_thread(void *arg)
13635{
13636 struct ctl_softc *softc = (struct ctl_softc *)arg;
13637 struct ctl_lun *lun;
13638 struct ctl_logical_block_provisioning_page *page;
13639 const char *attr;
13640 union ctl_ha_msg msg;
13641 uint64_t thres, val;
13642 int i, e, set;
13643
13644 CTL_DEBUG_PRINT(("ctl_thresh_thread starting\n"));
13645
13646 for (;;) {
13647 mtx_lock(&softc->ctl_lock);
13648 STAILQ_FOREACH(lun, &softc->lun_list, links) {
13649 if ((lun->flags & CTL_LUN_DISABLED) ||
13650 (lun->flags & CTL_LUN_NO_MEDIA) ||
13651 lun->backend->lun_attr == NULL)
13652 continue;
13653 if ((lun->flags & CTL_LUN_PRIMARY_SC) == 0 &&
13654 softc->ha_mode == CTL_HA_MODE_XFER)
13655 continue;
13656 if ((lun->MODE_RWER.byte8 & SMS_RWER_LBPERE) == 0)
13657 continue;
13658 e = 0;
13659 page = &lun->MODE_LBP;
13660 for (i = 0; i < CTL_NUM_LBP_THRESH; i++) {
13661 if ((page->descr[i].flags & SLBPPD_ENABLED) == 0)
13662 continue;
13663 thres = scsi_4btoul(page->descr[i].count);
13664 thres <<= CTL_LBP_EXPONENT;
13665 switch (page->descr[i].resource) {
13666 case 0x01:
13667 attr = "blocksavail";
13668 break;
13669 case 0x02:
13670 attr = "blocksused";
13671 break;
13672 case 0xf1:
13673 attr = "poolblocksavail";
13674 break;
13675 case 0xf2:
13676 attr = "poolblocksused";
13677 break;
13678 default:
13679 continue;
13680 }
13681 mtx_unlock(&softc->ctl_lock); // XXX
13682 val = lun->backend->lun_attr(
13683 lun->be_lun->be_lun, attr);
13684 mtx_lock(&softc->ctl_lock);
13685 if (val == UINT64_MAX)
13686 continue;
13687 if ((page->descr[i].flags & SLBPPD_ARMING_MASK)
13688 == SLBPPD_ARMING_INC)
13689 e = (val >= thres);
13690 else
13691 e = (val <= thres);
13692 if (e)
13693 break;
13694 }
13695 mtx_lock(&lun->lun_lock);
13696 if (e) {
13697 scsi_u64to8b((uint8_t *)&page->descr[i] -
13698 (uint8_t *)page, lun->ua_tpt_info);
13699 if (lun->lasttpt == 0 ||
13700 time_uptime - lun->lasttpt >= CTL_LBP_UA_PERIOD) {
13701 lun->lasttpt = time_uptime;
13702 ctl_est_ua_all(lun, -1, CTL_UA_THIN_PROV_THRES);
13703 set = 1;
13704 } else
13705 set = 0;
13706 } else {
13707 lun->lasttpt = 0;
13708 ctl_clr_ua_all(lun, -1, CTL_UA_THIN_PROV_THRES);
13709 set = -1;
13710 }
13711 mtx_unlock(&lun->lun_lock);
13712 if (set != 0 &&
13713 lun->ctl_softc->ha_mode == CTL_HA_MODE_XFER) {
13714 /* Send msg to other side. */
13715 bzero(&msg.ua, sizeof(msg.ua));
13716 msg.hdr.msg_type = CTL_MSG_UA;
13717 msg.hdr.nexus.initid = -1;
13718 msg.hdr.nexus.targ_port = -1;
13719 msg.hdr.nexus.targ_lun = lun->lun;
13720 msg.hdr.nexus.targ_mapped_lun = lun->lun;
13721 msg.ua.ua_all = 1;
13722 msg.ua.ua_set = (set > 0);
13723 msg.ua.ua_type = CTL_UA_THIN_PROV_THRES;
13724 memcpy(msg.ua.ua_info, lun->ua_tpt_info, 8);
13725 mtx_unlock(&softc->ctl_lock); // XXX
13726 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg,
13727 sizeof(msg.ua), M_WAITOK);
13728 mtx_lock(&softc->ctl_lock);
13729 }
13730 }
13731 mtx_unlock(&softc->ctl_lock);
13732 pause("-", CTL_LBP_PERIOD * hz);
13733 }
13734}
13735
13736static void
13737ctl_enqueue_incoming(union ctl_io *io)
13738{
13739 struct ctl_softc *softc = control_softc;
13740 struct ctl_thread *thr;
13741 u_int idx;
13742
13743 idx = (io->io_hdr.nexus.targ_port * 127 +
13744 io->io_hdr.nexus.initid) % worker_threads;
13745 thr = &softc->threads[idx];
13746 mtx_lock(&thr->queue_lock);
13747 STAILQ_INSERT_TAIL(&thr->incoming_queue, &io->io_hdr, links);
13748 mtx_unlock(&thr->queue_lock);
13749 wakeup(thr);
13750}
13751
13752static void
13753ctl_enqueue_rtr(union ctl_io *io)
13754{
13755 struct ctl_softc *softc = control_softc;
13756 struct ctl_thread *thr;
13757
13758 thr = &softc->threads[io->io_hdr.nexus.targ_mapped_lun % worker_threads];
13759 mtx_lock(&thr->queue_lock);
13760 STAILQ_INSERT_TAIL(&thr->rtr_queue, &io->io_hdr, links);
13761 mtx_unlock(&thr->queue_lock);
13762 wakeup(thr);
13763}
13764
13765static void
13766ctl_enqueue_done(union ctl_io *io)
13767{
13768 struct ctl_softc *softc = control_softc;
13769 struct ctl_thread *thr;
13770
13771 thr = &softc->threads[io->io_hdr.nexus.targ_mapped_lun % worker_threads];
13772 mtx_lock(&thr->queue_lock);
13773 STAILQ_INSERT_TAIL(&thr->done_queue, &io->io_hdr, links);
13774 mtx_unlock(&thr->queue_lock);
13775 wakeup(thr);
13776}
13777
13778static void
13779ctl_enqueue_isc(union ctl_io *io)
13780{
13781 struct ctl_softc *softc = control_softc;
13782 struct ctl_thread *thr;
13783
13784 thr = &softc->threads[io->io_hdr.nexus.targ_mapped_lun % worker_threads];
13785 mtx_lock(&thr->queue_lock);
13786 STAILQ_INSERT_TAIL(&thr->isc_queue, &io->io_hdr, links);
13787 mtx_unlock(&thr->queue_lock);
13788 wakeup(thr);
13789}
13790
13791/*
13792 * vim: ts=8
13793 */