1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * SCSI Primary Commands (SPC) parsing and emulation.
4 *
5 * (c) Copyright 2002-2013 Datera, Inc.
6 *
7 * Nicholas A. Bellinger <nab@kernel.org>
8 */
9
10#include <linux/kernel.h>
11#include <linux/module.h>
12#include <asm/unaligned.h>
13
14#include <scsi/scsi_proto.h>
15#include <scsi/scsi_common.h>
16#include <scsi/scsi_tcq.h>
17
18#include <target/target_core_base.h>
19#include <target/target_core_backend.h>
20#include <target/target_core_fabric.h>
21
22#include "target_core_internal.h"
23#include "target_core_alua.h"
24#include "target_core_pr.h"
25#include "target_core_ua.h"
26#include "target_core_xcopy.h"
27
28static void spc_fill_alua_data(struct se_lun *lun, unsigned char *buf)
29{
30	struct t10_alua_tg_pt_gp *tg_pt_gp;
31
32	/*
33	 * Set SCCS for MAINTENANCE_IN + REPORT_TARGET_PORT_GROUPS.
34	 */
35	buf[5]	= 0x80;
36
37	/*
38	 * Set TPGS field for explicit and/or implicit ALUA access type
39	 * and opteration.
40	 *
41	 * See spc4r17 section 6.4.2 Table 135
42	 */
43	rcu_read_lock();
44	tg_pt_gp = rcu_dereference(lun->lun_tg_pt_gp);
45	if (tg_pt_gp)
46		buf[5] |= tg_pt_gp->tg_pt_gp_alua_access_type;
47	rcu_read_unlock();
48}
49
50static u16
51spc_find_scsi_transport_vd(int proto_id)
52{
53	switch (proto_id) {
54	case SCSI_PROTOCOL_FCP:
55		return SCSI_VERSION_DESCRIPTOR_FCP4;
56	case SCSI_PROTOCOL_ISCSI:
57		return SCSI_VERSION_DESCRIPTOR_ISCSI;
58	case SCSI_PROTOCOL_SAS:
59		return SCSI_VERSION_DESCRIPTOR_SAS3;
60	case SCSI_PROTOCOL_SBP:
61		return SCSI_VERSION_DESCRIPTOR_SBP3;
62	case SCSI_PROTOCOL_SRP:
63		return SCSI_VERSION_DESCRIPTOR_SRP;
64	default:
65		pr_warn("Cannot find VERSION DESCRIPTOR value for unknown SCSI"
66			" transport PROTOCOL IDENTIFIER %#x\n", proto_id);
67		return 0;
68	}
69}
70
71sense_reason_t
72spc_emulate_inquiry_std(struct se_cmd *cmd, unsigned char *buf)
73{
74	struct se_lun *lun = cmd->se_lun;
75	struct se_portal_group *tpg = lun->lun_tpg;
76	struct se_device *dev = cmd->se_dev;
77	struct se_session *sess = cmd->se_sess;
78
79	/* Set RMB (removable media) for tape devices */
80	if (dev->transport->get_device_type(dev) == TYPE_TAPE)
81		buf[1] = 0x80;
82
83	buf[2] = 0x06; /* SPC-4 */
84
85	/*
86	 * NORMACA and HISUP = 0, RESPONSE DATA FORMAT = 2
87	 *
88	 * SPC4 says:
89	 *   A RESPONSE DATA FORMAT field set to 2h indicates that the
90	 *   standard INQUIRY data is in the format defined in this
91	 *   standard. Response data format values less than 2h are
92	 *   obsolete. Response data format values greater than 2h are
93	 *   reserved.
94	 */
95	buf[3] = 2;
96
97	/*
98	 * Enable SCCS and TPGS fields for Emulated ALUA
99	 */
100	spc_fill_alua_data(lun, buf);
101
102	/*
103	 * Set Third-Party Copy (3PC) bit to indicate support for EXTENDED_COPY
104	 */
105	if (dev->dev_attrib.emulate_3pc)
106		buf[5] |= 0x8;
107	/*
108	 * Set Protection (PROTECT) bit when DIF has been enabled on the
109	 * device, and the fabric supports VERIFY + PASS.  Also report
110	 * PROTECT=1 if sess_prot_type has been configured to allow T10-PI
111	 * to unprotected devices.
112	 */
113	if (sess->sup_prot_ops & (TARGET_PROT_DIN_PASS | TARGET_PROT_DOUT_PASS)) {
114		if (dev->dev_attrib.pi_prot_type || cmd->se_sess->sess_prot_type)
115			buf[5] |= 0x1;
116	}
117
118	/*
119	 * Set MULTIP bit to indicate presence of multiple SCSI target ports
120	 */
121	if (dev->export_count > 1)
122		buf[6] |= 0x10;
123
124	buf[7] = 0x2; /* CmdQue=1 */
125
126	/*
127	 * ASCII data fields described as being left-aligned shall have any
128	 * unused bytes at the end of the field (i.e., highest offset) and the
129	 * unused bytes shall be filled with ASCII space characters (20h).
130	 */
131	memset(&buf[8], 0x20,
132	       INQUIRY_VENDOR_LEN + INQUIRY_MODEL_LEN + INQUIRY_REVISION_LEN);
133	memcpy(&buf[8], dev->t10_wwn.vendor,
134	       strnlen(dev->t10_wwn.vendor, INQUIRY_VENDOR_LEN));
135	memcpy(&buf[16], dev->t10_wwn.model,
136	       strnlen(dev->t10_wwn.model, INQUIRY_MODEL_LEN));
137	memcpy(&buf[32], dev->t10_wwn.revision,
138	       strnlen(dev->t10_wwn.revision, INQUIRY_REVISION_LEN));
139
140	/*
141	 * Set the VERSION DESCRIPTOR fields
142	 */
143	put_unaligned_be16(SCSI_VERSION_DESCRIPTOR_SAM5, &buf[58]);
144	put_unaligned_be16(spc_find_scsi_transport_vd(tpg->proto_id), &buf[60]);
145	put_unaligned_be16(SCSI_VERSION_DESCRIPTOR_SPC4, &buf[62]);
146	if (cmd->se_dev->transport->get_device_type(dev) == TYPE_DISK)
147		put_unaligned_be16(SCSI_VERSION_DESCRIPTOR_SBC3, &buf[64]);
148
149	buf[4] = 91; /* Set additional length to 91 */
150
151	return 0;
152}
153EXPORT_SYMBOL(spc_emulate_inquiry_std);
154
155/* unit serial number */
156static sense_reason_t
157spc_emulate_evpd_80(struct se_cmd *cmd, unsigned char *buf)
158{
159	struct se_device *dev = cmd->se_dev;
160	u16 len;
161
162	if (dev->dev_flags & DF_EMULATED_VPD_UNIT_SERIAL) {
163		len = sprintf(&buf[4], "%s", dev->t10_wwn.unit_serial);
164		len++; /* Extra Byte for NULL Terminator */
165		buf[3] = len;
166	}
167	return 0;
168}
169
170/*
171 * Generate NAA IEEE Registered Extended designator
172 */
173void spc_gen_naa_6h_vendor_specific(struct se_device *dev,
174				    unsigned char *buf)
175{
176	unsigned char *p = &dev->t10_wwn.unit_serial[0];
177	u32 company_id = dev->t10_wwn.company_id;
178	int cnt, off = 0;
179	bool next = true;
180
181	/*
182	 * Start NAA IEEE Registered Extended Identifier/Designator
183	 */
184	buf[off] = 0x6 << 4;
185
186	/* IEEE COMPANY_ID */
187	buf[off++] |= (company_id >> 20) & 0xf;
188	buf[off++] = (company_id >> 12) & 0xff;
189	buf[off++] = (company_id >> 4) & 0xff;
190	buf[off] = (company_id & 0xf) << 4;
191
192	/*
193	 * Generate up to 36 bits of VENDOR SPECIFIC IDENTIFIER starting on
194	 * byte 3 bit 3-0 for NAA IEEE Registered Extended DESIGNATOR field
195	 * format, followed by 64 bits of VENDOR SPECIFIC IDENTIFIER EXTENSION
196	 * to complete the payload.  These are based from VPD=0x80 PRODUCT SERIAL
197	 * NUMBER set via vpd_unit_serial in target_core_configfs.c to ensure
198	 * per device uniqeness.
199	 */
200	for (cnt = off + 13; *p && off < cnt; p++) {
201		int val = hex_to_bin(*p);
202
203		if (val < 0)
204			continue;
205
206		if (next) {
207			next = false;
208			buf[off++] |= val;
209		} else {
210			next = true;
211			buf[off] = val << 4;
212		}
213	}
214}
215
216/*
217 * Device identification VPD, for a complete list of
218 * DESIGNATOR TYPEs see spc4r17 Table 459.
219 */
220sense_reason_t
221spc_emulate_evpd_83(struct se_cmd *cmd, unsigned char *buf)
222{
223	struct se_device *dev = cmd->se_dev;
224	struct se_lun *lun = cmd->se_lun;
225	struct se_portal_group *tpg = NULL;
226	struct t10_alua_lu_gp_member *lu_gp_mem;
227	struct t10_alua_tg_pt_gp *tg_pt_gp;
228	unsigned char *prod = &dev->t10_wwn.model[0];
229	u32 off = 0;
230	u16 len = 0, id_len;
231
232	off = 4;
233
234	/*
235	 * NAA IEEE Registered Extended Assigned designator format, see
236	 * spc4r17 section 7.7.3.6.5
237	 *
238	 * We depend upon a target_core_mod/ConfigFS provided
239	 * /sys/kernel/config/target/core/$HBA/$DEV/wwn/vpd_unit_serial
240	 * value in order to return the NAA id.
241	 */
242	if (!(dev->dev_flags & DF_EMULATED_VPD_UNIT_SERIAL))
243		goto check_t10_vend_desc;
244
245	/* CODE SET == Binary */
246	buf[off++] = 0x1;
247
248	/* Set ASSOCIATION == addressed logical unit: 0)b */
249	buf[off] = 0x00;
250
251	/* Identifier/Designator type == NAA identifier */
252	buf[off++] |= 0x3;
253	off++;
254
255	/* Identifier/Designator length */
256	buf[off++] = 0x10;
257
258	/* NAA IEEE Registered Extended designator */
259	spc_gen_naa_6h_vendor_specific(dev, &buf[off]);
260
261	len = 20;
262	off = (len + 4);
263
264check_t10_vend_desc:
265	/*
266	 * T10 Vendor Identifier Page, see spc4r17 section 7.7.3.4
267	 */
268	id_len = 8; /* For Vendor field */
269
270	if (dev->dev_flags & DF_EMULATED_VPD_UNIT_SERIAL)
271		id_len += sprintf(&buf[off+12], "%s:%s", prod,
272				&dev->t10_wwn.unit_serial[0]);
273	buf[off] = 0x2; /* ASCII */
274	buf[off+1] = 0x1; /* T10 Vendor ID */
275	buf[off+2] = 0x0;
276	/* left align Vendor ID and pad with spaces */
277	memset(&buf[off+4], 0x20, INQUIRY_VENDOR_LEN);
278	memcpy(&buf[off+4], dev->t10_wwn.vendor,
279	       strnlen(dev->t10_wwn.vendor, INQUIRY_VENDOR_LEN));
280	/* Extra Byte for NULL Terminator */
281	id_len++;
282	/* Identifier Length */
283	buf[off+3] = id_len;
284	/* Header size for Designation descriptor */
285	len += (id_len + 4);
286	off += (id_len + 4);
287
288	if (1) {
289		struct t10_alua_lu_gp *lu_gp;
290		u32 padding, scsi_name_len, scsi_target_len;
291		u16 lu_gp_id = 0;
292		u16 tg_pt_gp_id = 0;
293		u16 tpgt;
294
295		tpg = lun->lun_tpg;
296		/*
297		 * Relative target port identifer, see spc4r17
298		 * section 7.7.3.7
299		 *
300		 * Get the PROTOCOL IDENTIFIER as defined by spc4r17
301		 * section 7.5.1 Table 362
302		 */
303		buf[off] = tpg->proto_id << 4;
304		buf[off++] |= 0x1; /* CODE SET == Binary */
305		buf[off] = 0x80; /* Set PIV=1 */
306		/* Set ASSOCIATION == target port: 01b */
307		buf[off] |= 0x10;
308		/* DESIGNATOR TYPE == Relative target port identifer */
309		buf[off++] |= 0x4;
310		off++; /* Skip over Reserved */
311		buf[off++] = 4; /* DESIGNATOR LENGTH */
312		/* Skip over Obsolete field in RTPI payload
313		 * in Table 472 */
314		off += 2;
315		put_unaligned_be16(lun->lun_tpg->tpg_rtpi, &buf[off]);
316		off += 2;
317		len += 8; /* Header size + Designation descriptor */
318		/*
319		 * Target port group identifier, see spc4r17
320		 * section 7.7.3.8
321		 *
322		 * Get the PROTOCOL IDENTIFIER as defined by spc4r17
323		 * section 7.5.1 Table 362
324		 */
325		rcu_read_lock();
326		tg_pt_gp = rcu_dereference(lun->lun_tg_pt_gp);
327		if (!tg_pt_gp) {
328			rcu_read_unlock();
329			goto check_lu_gp;
330		}
331		tg_pt_gp_id = tg_pt_gp->tg_pt_gp_id;
332		rcu_read_unlock();
333
334		buf[off] = tpg->proto_id << 4;
335		buf[off++] |= 0x1; /* CODE SET == Binary */
336		buf[off] = 0x80; /* Set PIV=1 */
337		/* Set ASSOCIATION == target port: 01b */
338		buf[off] |= 0x10;
339		/* DESIGNATOR TYPE == Target port group identifier */
340		buf[off++] |= 0x5;
341		off++; /* Skip over Reserved */
342		buf[off++] = 4; /* DESIGNATOR LENGTH */
343		off += 2; /* Skip over Reserved Field */
344		put_unaligned_be16(tg_pt_gp_id, &buf[off]);
345		off += 2;
346		len += 8; /* Header size + Designation descriptor */
347		/*
348		 * Logical Unit Group identifier, see spc4r17
349		 * section 7.7.3.8
350		 */
351check_lu_gp:
352		lu_gp_mem = dev->dev_alua_lu_gp_mem;
353		if (!lu_gp_mem)
354			goto check_scsi_name;
355
356		spin_lock(&lu_gp_mem->lu_gp_mem_lock);
357		lu_gp = lu_gp_mem->lu_gp;
358		if (!lu_gp) {
359			spin_unlock(&lu_gp_mem->lu_gp_mem_lock);
360			goto check_scsi_name;
361		}
362		lu_gp_id = lu_gp->lu_gp_id;
363		spin_unlock(&lu_gp_mem->lu_gp_mem_lock);
364
365		buf[off++] |= 0x1; /* CODE SET == Binary */
366		/* DESIGNATOR TYPE == Logical Unit Group identifier */
367		buf[off++] |= 0x6;
368		off++; /* Skip over Reserved */
369		buf[off++] = 4; /* DESIGNATOR LENGTH */
370		off += 2; /* Skip over Reserved Field */
371		put_unaligned_be16(lu_gp_id, &buf[off]);
372		off += 2;
373		len += 8; /* Header size + Designation descriptor */
374		/*
375		 * SCSI name string designator, see spc4r17
376		 * section 7.7.3.11
377		 *
378		 * Get the PROTOCOL IDENTIFIER as defined by spc4r17
379		 * section 7.5.1 Table 362
380		 */
381check_scsi_name:
382		buf[off] = tpg->proto_id << 4;
383		buf[off++] |= 0x3; /* CODE SET == UTF-8 */
384		buf[off] = 0x80; /* Set PIV=1 */
385		/* Set ASSOCIATION == target port: 01b */
386		buf[off] |= 0x10;
387		/* DESIGNATOR TYPE == SCSI name string */
388		buf[off++] |= 0x8;
389		off += 2; /* Skip over Reserved and length */
390		/*
391		 * SCSI name string identifer containing, $FABRIC_MOD
392		 * dependent information.  For LIO-Target and iSCSI
393		 * Target Port, this means "<iSCSI name>,t,0x<TPGT> in
394		 * UTF-8 encoding.
395		 */
396		tpgt = tpg->se_tpg_tfo->tpg_get_tag(tpg);
397		scsi_name_len = sprintf(&buf[off], "%s,t,0x%04x",
398					tpg->se_tpg_tfo->tpg_get_wwn(tpg), tpgt);
399		scsi_name_len += 1 /* Include  NULL terminator */;
400		/*
401		 * The null-terminated, null-padded (see 4.4.2) SCSI
402		 * NAME STRING field contains a UTF-8 format string.
403		 * The number of bytes in the SCSI NAME STRING field
404		 * (i.e., the value in the DESIGNATOR LENGTH field)
405		 * shall be no larger than 256 and shall be a multiple
406		 * of four.
407		 */
408		padding = ((-scsi_name_len) & 3);
409		if (padding)
410			scsi_name_len += padding;
411		if (scsi_name_len > 256)
412			scsi_name_len = 256;
413
414		buf[off-1] = scsi_name_len;
415		off += scsi_name_len;
416		/* Header size + Designation descriptor */
417		len += (scsi_name_len + 4);
418
419		/*
420		 * Target device designator
421		 */
422		buf[off] = tpg->proto_id << 4;
423		buf[off++] |= 0x3; /* CODE SET == UTF-8 */
424		buf[off] = 0x80; /* Set PIV=1 */
425		/* Set ASSOCIATION == target device: 10b */
426		buf[off] |= 0x20;
427		/* DESIGNATOR TYPE == SCSI name string */
428		buf[off++] |= 0x8;
429		off += 2; /* Skip over Reserved and length */
430		/*
431		 * SCSI name string identifer containing, $FABRIC_MOD
432		 * dependent information.  For LIO-Target and iSCSI
433		 * Target Port, this means "<iSCSI name>" in
434		 * UTF-8 encoding.
435		 */
436		scsi_target_len = sprintf(&buf[off], "%s",
437					  tpg->se_tpg_tfo->tpg_get_wwn(tpg));
438		scsi_target_len += 1 /* Include  NULL terminator */;
439		/*
440		 * The null-terminated, null-padded (see 4.4.2) SCSI
441		 * NAME STRING field contains a UTF-8 format string.
442		 * The number of bytes in the SCSI NAME STRING field
443		 * (i.e., the value in the DESIGNATOR LENGTH field)
444		 * shall be no larger than 256 and shall be a multiple
445		 * of four.
446		 */
447		padding = ((-scsi_target_len) & 3);
448		if (padding)
449			scsi_target_len += padding;
450		if (scsi_target_len > 256)
451			scsi_target_len = 256;
452
453		buf[off-1] = scsi_target_len;
454		off += scsi_target_len;
455
456		/* Header size + Designation descriptor */
457		len += (scsi_target_len + 4);
458	}
459	put_unaligned_be16(len, &buf[2]); /* Page Length for VPD 0x83 */
460	return 0;
461}
462EXPORT_SYMBOL(spc_emulate_evpd_83);
463
464/* Extended INQUIRY Data VPD Page */
465static sense_reason_t
466spc_emulate_evpd_86(struct se_cmd *cmd, unsigned char *buf)
467{
468	struct se_device *dev = cmd->se_dev;
469	struct se_session *sess = cmd->se_sess;
470
471	buf[3] = 0x3c;
472	/*
473	 * Set GRD_CHK + REF_CHK for TYPE1 protection, or GRD_CHK
474	 * only for TYPE3 protection.
475	 */
476	if (sess->sup_prot_ops & (TARGET_PROT_DIN_PASS | TARGET_PROT_DOUT_PASS)) {
477		if (dev->dev_attrib.pi_prot_type == TARGET_DIF_TYPE1_PROT ||
478		    cmd->se_sess->sess_prot_type == TARGET_DIF_TYPE1_PROT)
479			buf[4] = 0x5;
480		else if (dev->dev_attrib.pi_prot_type == TARGET_DIF_TYPE3_PROT ||
481			 cmd->se_sess->sess_prot_type == TARGET_DIF_TYPE3_PROT)
482			buf[4] = 0x4;
483	}
484
485	/* logical unit supports type 1 and type 3 protection */
486	if ((dev->transport->get_device_type(dev) == TYPE_DISK) &&
487	    (sess->sup_prot_ops & (TARGET_PROT_DIN_PASS | TARGET_PROT_DOUT_PASS)) &&
488	    (dev->dev_attrib.pi_prot_type || cmd->se_sess->sess_prot_type)) {
489		buf[4] |= (0x3 << 3);
490	}
491
492	/* Set HEADSUP, ORDSUP, SIMPSUP */
493	buf[5] = 0x07;
494
495	/* If WriteCache emulation is enabled, set V_SUP */
496	if (target_check_wce(dev))
497		buf[6] = 0x01;
498	/* If an LBA map is present set R_SUP */
499	spin_lock(&cmd->se_dev->t10_alua.lba_map_lock);
500	if (!list_empty(&dev->t10_alua.lba_map_list))
501		buf[8] = 0x10;
502	spin_unlock(&cmd->se_dev->t10_alua.lba_map_lock);
503	return 0;
504}
505
506/* Block Limits VPD page */
507static sense_reason_t
508spc_emulate_evpd_b0(struct se_cmd *cmd, unsigned char *buf)
509{
510	struct se_device *dev = cmd->se_dev;
511	u32 mtl = 0;
512	int have_tp = 0, opt, min;
513	u32 io_max_blocks;
514
515	/*
516	 * Following spc3r22 section 6.5.3 Block Limits VPD page, when
517	 * emulate_tpu=1 or emulate_tpws=1 we will be expect a
518	 * different page length for Thin Provisioning.
519	 */
520	if (dev->dev_attrib.emulate_tpu || dev->dev_attrib.emulate_tpws)
521		have_tp = 1;
522
523	buf[0] = dev->transport->get_device_type(dev);
524	buf[3] = have_tp ? 0x3c : 0x10;
525
526	/* Set WSNZ to 1 */
527	buf[4] = 0x01;
528	/*
529	 * Set MAXIMUM COMPARE AND WRITE LENGTH
530	 */
531	if (dev->dev_attrib.emulate_caw)
532		buf[5] = 0x01;
533
534	/*
535	 * Set OPTIMAL TRANSFER LENGTH GRANULARITY
536	 */
537	if (dev->transport->get_io_min && (min = dev->transport->get_io_min(dev)))
538		put_unaligned_be16(min / dev->dev_attrib.block_size, &buf[6]);
539	else
540		put_unaligned_be16(1, &buf[6]);
541
542	/*
543	 * Set MAXIMUM TRANSFER LENGTH
544	 *
545	 * XXX: Currently assumes single PAGE_SIZE per scatterlist for fabrics
546	 * enforcing maximum HW scatter-gather-list entry limit
547	 */
548	if (cmd->se_tfo->max_data_sg_nents) {
549		mtl = (cmd->se_tfo->max_data_sg_nents * PAGE_SIZE) /
550		       dev->dev_attrib.block_size;
551	}
552	io_max_blocks = mult_frac(dev->dev_attrib.hw_max_sectors,
553			dev->dev_attrib.hw_block_size,
554			dev->dev_attrib.block_size);
555	put_unaligned_be32(min_not_zero(mtl, io_max_blocks), &buf[8]);
556
557	/*
558	 * Set OPTIMAL TRANSFER LENGTH
559	 */
560	if (dev->transport->get_io_opt && (opt = dev->transport->get_io_opt(dev)))
561		put_unaligned_be32(opt / dev->dev_attrib.block_size, &buf[12]);
562	else
563		put_unaligned_be32(dev->dev_attrib.optimal_sectors, &buf[12]);
564
565	/*
566	 * Exit now if we don't support TP.
567	 */
568	if (!have_tp)
569		goto max_write_same;
570
571	/*
572	 * Set MAXIMUM UNMAP LBA COUNT
573	 */
574	put_unaligned_be32(dev->dev_attrib.max_unmap_lba_count, &buf[20]);
575
576	/*
577	 * Set MAXIMUM UNMAP BLOCK DESCRIPTOR COUNT
578	 */
579	put_unaligned_be32(dev->dev_attrib.max_unmap_block_desc_count,
580			   &buf[24]);
581
582	/*
583	 * Set OPTIMAL UNMAP GRANULARITY
584	 */
585	put_unaligned_be32(dev->dev_attrib.unmap_granularity, &buf[28]);
586
587	/*
588	 * UNMAP GRANULARITY ALIGNMENT
589	 */
590	put_unaligned_be32(dev->dev_attrib.unmap_granularity_alignment,
591			   &buf[32]);
592	if (dev->dev_attrib.unmap_granularity_alignment != 0)
593		buf[32] |= 0x80; /* Set the UGAVALID bit */
594
595	/*
596	 * MAXIMUM WRITE SAME LENGTH
597	 */
598max_write_same:
599	put_unaligned_be64(dev->dev_attrib.max_write_same_len, &buf[36]);
600
601	return 0;
602}
603
604/* Block Device Characteristics VPD page */
605static sense_reason_t
606spc_emulate_evpd_b1(struct se_cmd *cmd, unsigned char *buf)
607{
608	struct se_device *dev = cmd->se_dev;
609
610	buf[0] = dev->transport->get_device_type(dev);
611	buf[3] = 0x3c;
612	buf[5] = dev->dev_attrib.is_nonrot ? 1 : 0;
613
614	return 0;
615}
616
617/* Thin Provisioning VPD */
618static sense_reason_t
619spc_emulate_evpd_b2(struct se_cmd *cmd, unsigned char *buf)
620{
621	struct se_device *dev = cmd->se_dev;
622
623	/*
624	 * From spc3r22 section 6.5.4 Thin Provisioning VPD page:
625	 *
626	 * The PAGE LENGTH field is defined in SPC-4. If the DP bit is set to
627	 * zero, then the page length shall be set to 0004h.  If the DP bit
628	 * is set to one, then the page length shall be set to the value
629	 * defined in table 162.
630	 */
631	buf[0] = dev->transport->get_device_type(dev);
632
633	/*
634	 * Set Hardcoded length mentioned above for DP=0
635	 */
636	put_unaligned_be16(0x0004, &buf[2]);
637
638	/*
639	 * The THRESHOLD EXPONENT field indicates the threshold set size in
640	 * LBAs as a power of 2 (i.e., the threshold set size is equal to
641	 * 2(threshold exponent)).
642	 *
643	 * Note that this is currently set to 0x00 as mkp says it will be
644	 * changing again.  We can enable this once it has settled in T10
645	 * and is actually used by Linux/SCSI ML code.
646	 */
647	buf[4] = 0x00;
648
649	/*
650	 * A TPU bit set to one indicates that the device server supports
651	 * the UNMAP command (see 5.25). A TPU bit set to zero indicates
652	 * that the device server does not support the UNMAP command.
653	 */
654	if (dev->dev_attrib.emulate_tpu != 0)
655		buf[5] = 0x80;
656
657	/*
658	 * A TPWS bit set to one indicates that the device server supports
659	 * the use of the WRITE SAME (16) command (see 5.42) to unmap LBAs.
660	 * A TPWS bit set to zero indicates that the device server does not
661	 * support the use of the WRITE SAME (16) command to unmap LBAs.
662	 */
663	if (dev->dev_attrib.emulate_tpws != 0)
664		buf[5] |= 0x40 | 0x20;
665
666	/*
667	 * The unmap_zeroes_data set means that the underlying device supports
668	 * REQ_OP_DISCARD and has the discard_zeroes_data bit set. This
669	 * satisfies the SBC requirements for LBPRZ, meaning that a subsequent
670	 * read will return zeroes after an UNMAP or WRITE SAME (16) to an LBA
671	 * See sbc4r36 6.6.4.
672	 */
673	if (((dev->dev_attrib.emulate_tpu != 0) ||
674	     (dev->dev_attrib.emulate_tpws != 0)) &&
675	     (dev->dev_attrib.unmap_zeroes_data != 0))
676		buf[5] |= 0x04;
677
678	return 0;
679}
680
681/* Referrals VPD page */
682static sense_reason_t
683spc_emulate_evpd_b3(struct se_cmd *cmd, unsigned char *buf)
684{
685	struct se_device *dev = cmd->se_dev;
686
687	buf[0] = dev->transport->get_device_type(dev);
688	buf[3] = 0x0c;
689	put_unaligned_be32(dev->t10_alua.lba_map_segment_size, &buf[8]);
690	put_unaligned_be32(dev->t10_alua.lba_map_segment_multiplier, &buf[12]);
691
692	return 0;
693}
694
695static sense_reason_t
696spc_emulate_evpd_00(struct se_cmd *cmd, unsigned char *buf);
697
698static struct {
699	uint8_t		page;
700	sense_reason_t	(*emulate)(struct se_cmd *, unsigned char *);
701} evpd_handlers[] = {
702	{ .page = 0x00, .emulate = spc_emulate_evpd_00 },
703	{ .page = 0x80, .emulate = spc_emulate_evpd_80 },
704	{ .page = 0x83, .emulate = spc_emulate_evpd_83 },
705	{ .page = 0x86, .emulate = spc_emulate_evpd_86 },
706	{ .page = 0xb0, .emulate = spc_emulate_evpd_b0 },
707	{ .page = 0xb1, .emulate = spc_emulate_evpd_b1 },
708	{ .page = 0xb2, .emulate = spc_emulate_evpd_b2 },
709	{ .page = 0xb3, .emulate = spc_emulate_evpd_b3 },
710};
711
712/* supported vital product data pages */
713static sense_reason_t
714spc_emulate_evpd_00(struct se_cmd *cmd, unsigned char *buf)
715{
716	int p;
717
718	/*
719	 * Only report the INQUIRY EVPD=1 pages after a valid NAA
720	 * Registered Extended LUN WWN has been set via ConfigFS
721	 * during device creation/restart.
722	 */
723	if (cmd->se_dev->dev_flags & DF_EMULATED_VPD_UNIT_SERIAL) {
724		buf[3] = ARRAY_SIZE(evpd_handlers);
725		for (p = 0; p < ARRAY_SIZE(evpd_handlers); ++p)
726			buf[p + 4] = evpd_handlers[p].page;
727	}
728
729	return 0;
730}
731
732static sense_reason_t
733spc_emulate_inquiry(struct se_cmd *cmd)
734{
735	struct se_device *dev = cmd->se_dev;
736	unsigned char *rbuf;
737	unsigned char *cdb = cmd->t_task_cdb;
738	unsigned char *buf;
739	sense_reason_t ret;
740	int p;
741	int len = 0;
742
743	buf = kzalloc(SE_INQUIRY_BUF, GFP_KERNEL);
744	if (!buf) {
745		pr_err("Unable to allocate response buffer for INQUIRY\n");
746		return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
747	}
748
749	buf[0] = dev->transport->get_device_type(dev);
750
751	if (!(cdb[1] & 0x1)) {
752		if (cdb[2]) {
753			pr_err("INQUIRY with EVPD==0 but PAGE CODE=%02x\n",
754			       cdb[2]);
755			ret = TCM_INVALID_CDB_FIELD;
756			goto out;
757		}
758
759		ret = spc_emulate_inquiry_std(cmd, buf);
760		len = buf[4] + 5;
761		goto out;
762	}
763
764	for (p = 0; p < ARRAY_SIZE(evpd_handlers); ++p) {
765		if (cdb[2] == evpd_handlers[p].page) {
766			buf[1] = cdb[2];
767			ret = evpd_handlers[p].emulate(cmd, buf);
768			len = get_unaligned_be16(&buf[2]) + 4;
769			goto out;
770		}
771	}
772
773	pr_debug("Unknown VPD Code: 0x%02x\n", cdb[2]);
774	ret = TCM_INVALID_CDB_FIELD;
775
776out:
777	rbuf = transport_kmap_data_sg(cmd);
778	if (rbuf) {
779		memcpy(rbuf, buf, min_t(u32, SE_INQUIRY_BUF, cmd->data_length));
780		transport_kunmap_data_sg(cmd);
781	}
782	kfree(buf);
783
784	if (!ret)
785		target_complete_cmd_with_length(cmd, SAM_STAT_GOOD, len);
786	return ret;
787}
788
789static int spc_modesense_rwrecovery(struct se_cmd *cmd, u8 pc, u8 *p)
790{
791	p[0] = 0x01;
792	p[1] = 0x0a;
793
794	/* No changeable values for now */
795	if (pc == 1)
796		goto out;
797
798out:
799	return 12;
800}
801
802static int spc_modesense_control(struct se_cmd *cmd, u8 pc, u8 *p)
803{
804	struct se_device *dev = cmd->se_dev;
805	struct se_session *sess = cmd->se_sess;
806
807	p[0] = 0x0a;
808	p[1] = 0x0a;
809
810	/* No changeable values for now */
811	if (pc == 1)
812		goto out;
813
814	/* GLTSD: No implicit save of log parameters */
815	p[2] = (1 << 1);
816	if (target_sense_desc_format(dev))
817		/* D_SENSE: Descriptor format sense data for 64bit sectors */
818		p[2] |= (1 << 2);
819
820	/*
821	 * From spc4r23, 7.4.7 Control mode page
822	 *
823	 * The QUEUE ALGORITHM MODIFIER field (see table 368) specifies
824	 * restrictions on the algorithm used for reordering commands
825	 * having the SIMPLE task attribute (see SAM-4).
826	 *
827	 *                    Table 368 -- QUEUE ALGORITHM MODIFIER field
828	 *                         Code      Description
829	 *                          0h       Restricted reordering
830	 *                          1h       Unrestricted reordering allowed
831	 *                          2h to 7h    Reserved
832	 *                          8h to Fh    Vendor specific
833	 *
834	 * A value of zero in the QUEUE ALGORITHM MODIFIER field specifies that
835	 * the device server shall order the processing sequence of commands
836	 * having the SIMPLE task attribute such that data integrity is maintained
837	 * for that I_T nexus (i.e., if the transmission of new SCSI transport protocol
838	 * requests is halted at any time, the final value of all data observable
839	 * on the medium shall be the same as if all the commands had been processed
840	 * with the ORDERED task attribute).
841	 *
842	 * A value of one in the QUEUE ALGORITHM MODIFIER field specifies that the
843	 * device server may reorder the processing sequence of commands having the
844	 * SIMPLE task attribute in any manner. Any data integrity exposures related to
845	 * command sequence order shall be explicitly handled by the application client
846	 * through the selection of appropriate ommands and task attributes.
847	 */
848	p[3] = (dev->dev_attrib.emulate_rest_reord == 1) ? 0x00 : 0x10;
849	/*
850	 * From spc4r17, section 7.4.6 Control mode Page
851	 *
852	 * Unit Attention interlocks control (UN_INTLCK_CTRL) to code 00b
853	 *
854	 * 00b: The logical unit shall clear any unit attention condition
855	 * reported in the same I_T_L_Q nexus transaction as a CHECK CONDITION
856	 * status and shall not establish a unit attention condition when a com-
857	 * mand is completed with BUSY, TASK SET FULL, or RESERVATION CONFLICT
858	 * status.
859	 *
860	 * 10b: The logical unit shall not clear any unit attention condition
861	 * reported in the same I_T_L_Q nexus transaction as a CHECK CONDITION
862	 * status and shall not establish a unit attention condition when
863	 * a command is completed with BUSY, TASK SET FULL, or RESERVATION
864	 * CONFLICT status.
865	 *
866	 * 11b a The logical unit shall not clear any unit attention condition
867	 * reported in the same I_T_L_Q nexus transaction as a CHECK CONDITION
868	 * status and shall establish a unit attention condition for the
869	 * initiator port associated with the I_T nexus on which the BUSY,
870	 * TASK SET FULL, or RESERVATION CONFLICT status is being returned.
871	 * Depending on the status, the additional sense code shall be set to
872	 * PREVIOUS BUSY STATUS, PREVIOUS TASK SET FULL STATUS, or PREVIOUS
873	 * RESERVATION CONFLICT STATUS. Until it is cleared by a REQUEST SENSE
874	 * command, a unit attention condition shall be established only once
875	 * for a BUSY, TASK SET FULL, or RESERVATION CONFLICT status regardless
876	 * to the number of commands completed with one of those status codes.
877	 */
878	switch (dev->dev_attrib.emulate_ua_intlck_ctrl) {
879	case TARGET_UA_INTLCK_CTRL_ESTABLISH_UA:
880		p[4] = 0x30;
881		break;
882	case TARGET_UA_INTLCK_CTRL_NO_CLEAR:
883		p[4] = 0x20;
884		break;
885	default:	/* TARGET_UA_INTLCK_CTRL_CLEAR */
886		p[4] = 0x00;
887		break;
888	}
889	/*
890	 * From spc4r17, section 7.4.6 Control mode Page
891	 *
892	 * Task Aborted Status (TAS) bit set to zero.
893	 *
894	 * A task aborted status (TAS) bit set to zero specifies that aborted
895	 * tasks shall be terminated by the device server without any response
896	 * to the application client. A TAS bit set to one specifies that tasks
897	 * aborted by the actions of an I_T nexus other than the I_T nexus on
898	 * which the command was received shall be completed with TASK ABORTED
899	 * status (see SAM-4).
900	 */
901	p[5] = (dev->dev_attrib.emulate_tas) ? 0x40 : 0x00;
902	/*
903	 * From spc4r30, section 7.5.7 Control mode page
904	 *
905	 * Application Tag Owner (ATO) bit set to one.
906	 *
907	 * If the ATO bit is set to one the device server shall not modify the
908	 * LOGICAL BLOCK APPLICATION TAG field and, depending on the protection
909	 * type, shall not modify the contents of the LOGICAL BLOCK REFERENCE
910	 * TAG field.
911	 */
912	if (sess->sup_prot_ops & (TARGET_PROT_DIN_PASS | TARGET_PROT_DOUT_PASS)) {
913		if (dev->dev_attrib.pi_prot_type || sess->sess_prot_type)
914			p[5] |= 0x80;
915	}
916
917	p[8] = 0xff;
918	p[9] = 0xff;
919	p[11] = 30;
920
921out:
922	return 12;
923}
924
925static int spc_modesense_caching(struct se_cmd *cmd, u8 pc, u8 *p)
926{
927	struct se_device *dev = cmd->se_dev;
928
929	p[0] = 0x08;
930	p[1] = 0x12;
931
932	/* No changeable values for now */
933	if (pc == 1)
934		goto out;
935
936	if (target_check_wce(dev))
937		p[2] = 0x04; /* Write Cache Enable */
938	p[12] = 0x20; /* Disabled Read Ahead */
939
940out:
941	return 20;
942}
943
944static int spc_modesense_informational_exceptions(struct se_cmd *cmd, u8 pc, unsigned char *p)
945{
946	p[0] = 0x1c;
947	p[1] = 0x0a;
948
949	/* No changeable values for now */
950	if (pc == 1)
951		goto out;
952
953out:
954	return 12;
955}
956
957static struct {
958	uint8_t		page;
959	uint8_t		subpage;
960	int		(*emulate)(struct se_cmd *, u8, unsigned char *);
961} modesense_handlers[] = {
962	{ .page = 0x01, .subpage = 0x00, .emulate = spc_modesense_rwrecovery },
963	{ .page = 0x08, .subpage = 0x00, .emulate = spc_modesense_caching },
964	{ .page = 0x0a, .subpage = 0x00, .emulate = spc_modesense_control },
965	{ .page = 0x1c, .subpage = 0x00, .emulate = spc_modesense_informational_exceptions },
966};
967
968static void spc_modesense_write_protect(unsigned char *buf, int type)
969{
970	/*
971	 * I believe that the WP bit (bit 7) in the mode header is the same for
972	 * all device types..
973	 */
974	switch (type) {
975	case TYPE_DISK:
976	case TYPE_TAPE:
977	default:
978		buf[0] |= 0x80; /* WP bit */
979		break;
980	}
981}
982
983static void spc_modesense_dpofua(unsigned char *buf, int type)
984{
985	switch (type) {
986	case TYPE_DISK:
987		buf[0] |= 0x10; /* DPOFUA bit */
988		break;
989	default:
990		break;
991	}
992}
993
994static int spc_modesense_blockdesc(unsigned char *buf, u64 blocks, u32 block_size)
995{
996	*buf++ = 8;
997	put_unaligned_be32(min(blocks, 0xffffffffull), buf);
998	buf += 4;
999	put_unaligned_be32(block_size, buf);
1000	return 9;
1001}
1002
1003static int spc_modesense_long_blockdesc(unsigned char *buf, u64 blocks, u32 block_size)
1004{
1005	if (blocks <= 0xffffffff)
1006		return spc_modesense_blockdesc(buf + 3, blocks, block_size) + 3;
1007
1008	*buf++ = 1;		/* LONGLBA */
1009	buf += 2;
1010	*buf++ = 16;
1011	put_unaligned_be64(blocks, buf);
1012	buf += 12;
1013	put_unaligned_be32(block_size, buf);
1014
1015	return 17;
1016}
1017
1018static sense_reason_t spc_emulate_modesense(struct se_cmd *cmd)
1019{
1020	struct se_device *dev = cmd->se_dev;
1021	char *cdb = cmd->t_task_cdb;
1022	unsigned char buf[SE_MODE_PAGE_BUF], *rbuf;
1023	int type = dev->transport->get_device_type(dev);
1024	int ten = (cmd->t_task_cdb[0] == MODE_SENSE_10);
1025	bool dbd = !!(cdb[1] & 0x08);
1026	bool llba = ten ? !!(cdb[1] & 0x10) : false;
1027	u8 pc = cdb[2] >> 6;
1028	u8 page = cdb[2] & 0x3f;
1029	u8 subpage = cdb[3];
1030	int length = 0;
1031	int ret;
1032	int i;
1033
1034	memset(buf, 0, SE_MODE_PAGE_BUF);
1035
1036	/*
1037	 * Skip over MODE DATA LENGTH + MEDIUM TYPE fields to byte 3 for
1038	 * MODE_SENSE_10 and byte 2 for MODE_SENSE (6).
1039	 */
1040	length = ten ? 3 : 2;
1041
1042	/* DEVICE-SPECIFIC PARAMETER */
1043	if (cmd->se_lun->lun_access_ro || target_lun_is_rdonly(cmd))
1044		spc_modesense_write_protect(&buf[length], type);
1045
1046	/*
1047	 * SBC only allows us to enable FUA and DPO together.  Fortunately
1048	 * DPO is explicitly specified as a hint, so a noop is a perfectly
1049	 * valid implementation.
1050	 */
1051	if (target_check_fua(dev))
1052		spc_modesense_dpofua(&buf[length], type);
1053
1054	++length;
1055
1056	/* BLOCK DESCRIPTOR */
1057
1058	/*
1059	 * For now we only include a block descriptor for disk (SBC)
1060	 * devices; other command sets use a slightly different format.
1061	 */
1062	if (!dbd && type == TYPE_DISK) {
1063		u64 blocks = dev->transport->get_blocks(dev);
1064		u32 block_size = dev->dev_attrib.block_size;
1065
1066		if (ten) {
1067			if (llba) {
1068				length += spc_modesense_long_blockdesc(&buf[length],
1069								       blocks, block_size);
1070			} else {
1071				length += 3;
1072				length += spc_modesense_blockdesc(&buf[length],
1073								  blocks, block_size);
1074			}
1075		} else {
1076			length += spc_modesense_blockdesc(&buf[length], blocks,
1077							  block_size);
1078		}
1079	} else {
1080		if (ten)
1081			length += 4;
1082		else
1083			length += 1;
1084	}
1085
1086	if (page == 0x3f) {
1087		if (subpage != 0x00 && subpage != 0xff) {
1088			pr_warn("MODE_SENSE: Invalid subpage code: 0x%02x\n", subpage);
1089			return TCM_INVALID_CDB_FIELD;
1090		}
1091
1092		for (i = 0; i < ARRAY_SIZE(modesense_handlers); ++i) {
1093			/*
1094			 * Tricky way to say all subpage 00h for
1095			 * subpage==0, all subpages for subpage==0xff
1096			 * (and we just checked above that those are
1097			 * the only two possibilities).
1098			 */
1099			if ((modesense_handlers[i].subpage & ~subpage) == 0) {
1100				ret = modesense_handlers[i].emulate(cmd, pc, &buf[length]);
1101				if (!ten && length + ret >= 255)
1102					break;
1103				length += ret;
1104			}
1105		}
1106
1107		goto set_length;
1108	}
1109
1110	for (i = 0; i < ARRAY_SIZE(modesense_handlers); ++i)
1111		if (modesense_handlers[i].page == page &&
1112		    modesense_handlers[i].subpage == subpage) {
1113			length += modesense_handlers[i].emulate(cmd, pc, &buf[length]);
1114			goto set_length;
1115		}
1116
1117	/*
1118	 * We don't intend to implement:
1119	 *  - obsolete page 03h "format parameters" (checked by Solaris)
1120	 */
1121	if (page != 0x03)
1122		pr_err("MODE SENSE: unimplemented page/subpage: 0x%02x/0x%02x\n",
1123		       page, subpage);
1124
1125	return TCM_UNKNOWN_MODE_PAGE;
1126
1127set_length:
1128	if (ten)
1129		put_unaligned_be16(length - 2, buf);
1130	else
1131		buf[0] = length - 1;
1132
1133	rbuf = transport_kmap_data_sg(cmd);
1134	if (rbuf) {
1135		memcpy(rbuf, buf, min_t(u32, SE_MODE_PAGE_BUF, cmd->data_length));
1136		transport_kunmap_data_sg(cmd);
1137	}
1138
1139	target_complete_cmd_with_length(cmd, SAM_STAT_GOOD, length);
1140	return 0;
1141}
1142
1143static sense_reason_t spc_emulate_modeselect(struct se_cmd *cmd)
1144{
1145	char *cdb = cmd->t_task_cdb;
1146	bool ten = cdb[0] == MODE_SELECT_10;
1147	int off = ten ? 8 : 4;
1148	bool pf = !!(cdb[1] & 0x10);
1149	u8 page, subpage;
1150	unsigned char *buf;
1151	unsigned char tbuf[SE_MODE_PAGE_BUF];
1152	int length;
1153	sense_reason_t ret = 0;
1154	int i;
1155
1156	if (!cmd->data_length) {
1157		target_complete_cmd(cmd, SAM_STAT_GOOD);
1158		return 0;
1159	}
1160
1161	if (cmd->data_length < off + 2)
1162		return TCM_PARAMETER_LIST_LENGTH_ERROR;
1163
1164	buf = transport_kmap_data_sg(cmd);
1165	if (!buf)
1166		return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
1167
1168	if (!pf) {
1169		ret = TCM_INVALID_CDB_FIELD;
1170		goto out;
1171	}
1172
1173	page = buf[off] & 0x3f;
1174	subpage = buf[off] & 0x40 ? buf[off + 1] : 0;
1175
1176	for (i = 0; i < ARRAY_SIZE(modesense_handlers); ++i)
1177		if (modesense_handlers[i].page == page &&
1178		    modesense_handlers[i].subpage == subpage) {
1179			memset(tbuf, 0, SE_MODE_PAGE_BUF);
1180			length = modesense_handlers[i].emulate(cmd, 0, tbuf);
1181			goto check_contents;
1182		}
1183
1184	ret = TCM_UNKNOWN_MODE_PAGE;
1185	goto out;
1186
1187check_contents:
1188	if (cmd->data_length < off + length) {
1189		ret = TCM_PARAMETER_LIST_LENGTH_ERROR;
1190		goto out;
1191	}
1192
1193	if (memcmp(buf + off, tbuf, length))
1194		ret = TCM_INVALID_PARAMETER_LIST;
1195
1196out:
1197	transport_kunmap_data_sg(cmd);
1198
1199	if (!ret)
1200		target_complete_cmd(cmd, SAM_STAT_GOOD);
1201	return ret;
1202}
1203
1204static sense_reason_t spc_emulate_request_sense(struct se_cmd *cmd)
1205{
1206	unsigned char *cdb = cmd->t_task_cdb;
1207	unsigned char *rbuf;
1208	u8 ua_asc = 0, ua_ascq = 0;
1209	unsigned char buf[SE_SENSE_BUF];
1210	bool desc_format = target_sense_desc_format(cmd->se_dev);
1211
1212	memset(buf, 0, SE_SENSE_BUF);
1213
1214	if (cdb[1] & 0x01) {
1215		pr_err("REQUEST_SENSE description emulation not"
1216			" supported\n");
1217		return TCM_INVALID_CDB_FIELD;
1218	}
1219
1220	rbuf = transport_kmap_data_sg(cmd);
1221	if (!rbuf)
1222		return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
1223
1224	if (!core_scsi3_ua_clear_for_request_sense(cmd, &ua_asc, &ua_ascq))
1225		scsi_build_sense_buffer(desc_format, buf, UNIT_ATTENTION,
1226					ua_asc, ua_ascq);
1227	else
1228		scsi_build_sense_buffer(desc_format, buf, NO_SENSE, 0x0, 0x0);
1229
1230	memcpy(rbuf, buf, min_t(u32, sizeof(buf), cmd->data_length));
1231	transport_kunmap_data_sg(cmd);
1232
1233	target_complete_cmd(cmd, SAM_STAT_GOOD);
1234	return 0;
1235}
1236
1237sense_reason_t spc_emulate_report_luns(struct se_cmd *cmd)
1238{
1239	struct se_dev_entry *deve;
1240	struct se_session *sess = cmd->se_sess;
1241	struct se_node_acl *nacl;
1242	struct scsi_lun slun;
1243	unsigned char *buf;
1244	u32 lun_count = 0, offset = 8;
1245	__be32 len;
1246
1247	buf = transport_kmap_data_sg(cmd);
1248	if (cmd->data_length && !buf)
1249		return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
1250
1251	/*
1252	 * If no struct se_session pointer is present, this struct se_cmd is
1253	 * coming via a target_core_mod PASSTHROUGH op, and not through
1254	 * a $FABRIC_MOD.  In that case, report LUN=0 only.
1255	 */
1256	if (!sess)
1257		goto done;
1258
1259	nacl = sess->se_node_acl;
1260
1261	rcu_read_lock();
1262	hlist_for_each_entry_rcu(deve, &nacl->lun_entry_hlist, link) {
1263		/*
1264		 * We determine the correct LUN LIST LENGTH even once we
1265		 * have reached the initial allocation length.
1266		 * See SPC2-R20 7.19.
1267		 */
1268		lun_count++;
1269		if (offset >= cmd->data_length)
1270			continue;
1271
1272		int_to_scsilun(deve->mapped_lun, &slun);
1273		memcpy(buf + offset, &slun,
1274		       min(8u, cmd->data_length - offset));
1275		offset += 8;
1276	}
1277	rcu_read_unlock();
1278
1279	/*
1280	 * See SPC3 r07, page 159.
1281	 */
1282done:
1283	/*
1284	 * If no LUNs are accessible, report virtual LUN 0.
1285	 */
1286	if (lun_count == 0) {
1287		int_to_scsilun(0, &slun);
1288		if (cmd->data_length > 8)
1289			memcpy(buf + offset, &slun,
1290			       min(8u, cmd->data_length - offset));
1291		lun_count = 1;
1292	}
1293
1294	if (buf) {
1295		len = cpu_to_be32(lun_count * 8);
1296		memcpy(buf, &len, min_t(int, sizeof len, cmd->data_length));
1297		transport_kunmap_data_sg(cmd);
1298	}
1299
1300	target_complete_cmd_with_length(cmd, SAM_STAT_GOOD, 8 + lun_count * 8);
1301	return 0;
1302}
1303EXPORT_SYMBOL(spc_emulate_report_luns);
1304
1305static sense_reason_t
1306spc_emulate_testunitready(struct se_cmd *cmd)
1307{
1308	target_complete_cmd(cmd, SAM_STAT_GOOD);
1309	return 0;
1310}
1311
1312static void set_dpofua_usage_bits(u8 *usage_bits, struct se_device *dev)
1313{
1314	if (!target_check_fua(dev))
1315		usage_bits[1] &= ~0x18;
1316	else
1317		usage_bits[1] |= 0x18;
1318}
1319
1320static void set_dpofua_usage_bits32(u8 *usage_bits, struct se_device *dev)
1321{
1322	if (!target_check_fua(dev))
1323		usage_bits[10] &= ~0x18;
1324	else
1325		usage_bits[10] |= 0x18;
1326}
1327
1328static struct target_opcode_descriptor tcm_opcode_read6 = {
1329	.support = SCSI_SUPPORT_FULL,
1330	.opcode = READ_6,
1331	.cdb_size = 6,
1332	.usage_bits = {READ_6, 0x1f, 0xff, 0xff,
1333		       0xff, SCSI_CONTROL_MASK},
1334};
1335
1336static struct target_opcode_descriptor tcm_opcode_read10 = {
1337	.support = SCSI_SUPPORT_FULL,
1338	.opcode = READ_10,
1339	.cdb_size = 10,
1340	.usage_bits = {READ_10, 0xf8, 0xff, 0xff,
1341		       0xff, 0xff, SCSI_GROUP_NUMBER_MASK, 0xff,
1342		       0xff, SCSI_CONTROL_MASK},
1343	.update_usage_bits = set_dpofua_usage_bits,
1344};
1345
1346static struct target_opcode_descriptor tcm_opcode_read12 = {
1347	.support = SCSI_SUPPORT_FULL,
1348	.opcode = READ_12,
1349	.cdb_size = 12,
1350	.usage_bits = {READ_12, 0xf8, 0xff, 0xff,
1351		       0xff, 0xff, 0xff, 0xff,
1352		       0xff, 0xff, SCSI_GROUP_NUMBER_MASK, SCSI_CONTROL_MASK},
1353	.update_usage_bits = set_dpofua_usage_bits,
1354};
1355
1356static struct target_opcode_descriptor tcm_opcode_read16 = {
1357	.support = SCSI_SUPPORT_FULL,
1358	.opcode = READ_16,
1359	.cdb_size = 16,
1360	.usage_bits = {READ_16, 0xf8, 0xff, 0xff,
1361		       0xff, 0xff, 0xff, 0xff,
1362		       0xff, 0xff, 0xff, 0xff,
1363		       0xff, 0xff, SCSI_GROUP_NUMBER_MASK, SCSI_CONTROL_MASK},
1364	.update_usage_bits = set_dpofua_usage_bits,
1365};
1366
1367static struct target_opcode_descriptor tcm_opcode_write6 = {
1368	.support = SCSI_SUPPORT_FULL,
1369	.opcode = WRITE_6,
1370	.cdb_size = 6,
1371	.usage_bits = {WRITE_6, 0x1f, 0xff, 0xff,
1372		       0xff, SCSI_CONTROL_MASK},
1373};
1374
1375static struct target_opcode_descriptor tcm_opcode_write10 = {
1376	.support = SCSI_SUPPORT_FULL,
1377	.opcode = WRITE_10,
1378	.cdb_size = 10,
1379	.usage_bits = {WRITE_10, 0xf8, 0xff, 0xff,
1380		       0xff, 0xff, SCSI_GROUP_NUMBER_MASK, 0xff,
1381		       0xff, SCSI_CONTROL_MASK},
1382	.update_usage_bits = set_dpofua_usage_bits,
1383};
1384
1385static struct target_opcode_descriptor tcm_opcode_write_verify10 = {
1386	.support = SCSI_SUPPORT_FULL,
1387	.opcode = WRITE_VERIFY,
1388	.cdb_size = 10,
1389	.usage_bits = {WRITE_VERIFY, 0xf0, 0xff, 0xff,
1390		       0xff, 0xff, SCSI_GROUP_NUMBER_MASK, 0xff,
1391		       0xff, SCSI_CONTROL_MASK},
1392	.update_usage_bits = set_dpofua_usage_bits,
1393};
1394
1395static struct target_opcode_descriptor tcm_opcode_write12 = {
1396	.support = SCSI_SUPPORT_FULL,
1397	.opcode = WRITE_12,
1398	.cdb_size = 12,
1399	.usage_bits = {WRITE_12, 0xf8, 0xff, 0xff,
1400		       0xff, 0xff, 0xff, 0xff,
1401		       0xff, 0xff, SCSI_GROUP_NUMBER_MASK, SCSI_CONTROL_MASK},
1402	.update_usage_bits = set_dpofua_usage_bits,
1403};
1404
1405static struct target_opcode_descriptor tcm_opcode_write16 = {
1406	.support = SCSI_SUPPORT_FULL,
1407	.opcode = WRITE_16,
1408	.cdb_size = 16,
1409	.usage_bits = {WRITE_16, 0xf8, 0xff, 0xff,
1410		       0xff, 0xff, 0xff, 0xff,
1411		       0xff, 0xff, 0xff, 0xff,
1412		       0xff, 0xff, SCSI_GROUP_NUMBER_MASK, SCSI_CONTROL_MASK},
1413	.update_usage_bits = set_dpofua_usage_bits,
1414};
1415
1416static struct target_opcode_descriptor tcm_opcode_write_verify16 = {
1417	.support = SCSI_SUPPORT_FULL,
1418	.opcode = WRITE_VERIFY_16,
1419	.cdb_size = 16,
1420	.usage_bits = {WRITE_VERIFY_16, 0xf0, 0xff, 0xff,
1421		       0xff, 0xff, 0xff, 0xff,
1422		       0xff, 0xff, 0xff, 0xff,
1423		       0xff, 0xff, SCSI_GROUP_NUMBER_MASK, SCSI_CONTROL_MASK},
1424	.update_usage_bits = set_dpofua_usage_bits,
1425};
1426
1427static bool tcm_is_ws_enabled(struct target_opcode_descriptor *descr,
1428			      struct se_cmd *cmd)
1429{
1430	struct exec_cmd_ops *ops = cmd->protocol_data;
1431	struct se_device *dev = cmd->se_dev;
1432
1433	return (dev->dev_attrib.emulate_tpws && !!ops->execute_unmap) ||
1434	       !!ops->execute_write_same;
1435}
1436
1437static struct target_opcode_descriptor tcm_opcode_write_same32 = {
1438	.support = SCSI_SUPPORT_FULL,
1439	.serv_action_valid = 1,
1440	.opcode = VARIABLE_LENGTH_CMD,
1441	.service_action = WRITE_SAME_32,
1442	.cdb_size = 32,
1443	.usage_bits = {VARIABLE_LENGTH_CMD, SCSI_CONTROL_MASK, 0x00, 0x00,
1444		       0x00, 0x00, SCSI_GROUP_NUMBER_MASK, 0x18,
1445		       0x00, WRITE_SAME_32, 0xe8, 0x00,
1446		       0xff, 0xff, 0xff, 0xff,
1447		       0xff, 0xff, 0xff, 0xff,
1448		       0x00, 0x00, 0x00, 0x00,
1449		       0x00, 0x00, 0x00, 0x00,
1450		       0xff, 0xff, 0xff, 0xff},
1451	.enabled = tcm_is_ws_enabled,
1452	.update_usage_bits = set_dpofua_usage_bits32,
1453};
1454
1455static bool tcm_is_caw_enabled(struct target_opcode_descriptor *descr,
1456			       struct se_cmd *cmd)
1457{
1458	struct se_device *dev = cmd->se_dev;
1459
1460	return dev->dev_attrib.emulate_caw;
1461}
1462
1463static struct target_opcode_descriptor tcm_opcode_compare_write = {
1464	.support = SCSI_SUPPORT_FULL,
1465	.opcode = COMPARE_AND_WRITE,
1466	.cdb_size = 16,
1467	.usage_bits = {COMPARE_AND_WRITE, 0x18, 0xff, 0xff,
1468		       0xff, 0xff, 0xff, 0xff,
1469		       0xff, 0xff, 0x00, 0x00,
1470		       0x00, 0xff, SCSI_GROUP_NUMBER_MASK, SCSI_CONTROL_MASK},
1471	.enabled = tcm_is_caw_enabled,
1472	.update_usage_bits = set_dpofua_usage_bits,
1473};
1474
1475static struct target_opcode_descriptor tcm_opcode_read_capacity = {
1476	.support = SCSI_SUPPORT_FULL,
1477	.opcode = READ_CAPACITY,
1478	.cdb_size = 10,
1479	.usage_bits = {READ_CAPACITY, 0x00, 0xff, 0xff,
1480		       0xff, 0xff, 0x00, 0x00,
1481		       0x01, SCSI_CONTROL_MASK},
1482};
1483
1484static struct target_opcode_descriptor tcm_opcode_read_capacity16 = {
1485	.support = SCSI_SUPPORT_FULL,
1486	.serv_action_valid = 1,
1487	.opcode = SERVICE_ACTION_IN_16,
1488	.service_action = SAI_READ_CAPACITY_16,
1489	.cdb_size = 16,
1490	.usage_bits = {SERVICE_ACTION_IN_16, SAI_READ_CAPACITY_16, 0x00, 0x00,
1491		       0x00, 0x00, 0x00, 0x00,
1492		       0x00, 0x00, 0xff, 0xff,
1493		       0xff, 0xff, 0x00, SCSI_CONTROL_MASK},
1494};
1495
1496static bool tcm_is_rep_ref_enabled(struct target_opcode_descriptor *descr,
1497				   struct se_cmd *cmd)
1498{
1499	struct se_device *dev = cmd->se_dev;
1500
1501	spin_lock(&dev->t10_alua.lba_map_lock);
1502	if (list_empty(&dev->t10_alua.lba_map_list)) {
1503		spin_unlock(&dev->t10_alua.lba_map_lock);
1504		return false;
1505	}
1506	spin_unlock(&dev->t10_alua.lba_map_lock);
1507	return true;
1508}
1509
1510static struct target_opcode_descriptor tcm_opcode_read_report_refferals = {
1511	.support = SCSI_SUPPORT_FULL,
1512	.serv_action_valid = 1,
1513	.opcode = SERVICE_ACTION_IN_16,
1514	.service_action = SAI_REPORT_REFERRALS,
1515	.cdb_size = 16,
1516	.usage_bits = {SERVICE_ACTION_IN_16, SAI_REPORT_REFERRALS, 0x00, 0x00,
1517		       0x00, 0x00, 0x00, 0x00,
1518		       0x00, 0x00, 0xff, 0xff,
1519		       0xff, 0xff, 0x00, SCSI_CONTROL_MASK},
1520	.enabled = tcm_is_rep_ref_enabled,
1521};
1522
1523static struct target_opcode_descriptor tcm_opcode_sync_cache = {
1524	.support = SCSI_SUPPORT_FULL,
1525	.opcode = SYNCHRONIZE_CACHE,
1526	.cdb_size = 10,
1527	.usage_bits = {SYNCHRONIZE_CACHE, 0x02, 0xff, 0xff,
1528		       0xff, 0xff, SCSI_GROUP_NUMBER_MASK, 0xff,
1529		       0xff, SCSI_CONTROL_MASK},
1530};
1531
1532static struct target_opcode_descriptor tcm_opcode_sync_cache16 = {
1533	.support = SCSI_SUPPORT_FULL,
1534	.opcode = SYNCHRONIZE_CACHE_16,
1535	.cdb_size = 16,
1536	.usage_bits = {SYNCHRONIZE_CACHE_16, 0x02, 0xff, 0xff,
1537		       0xff, 0xff, 0xff, 0xff,
1538		       0xff, 0xff, 0xff, 0xff,
1539		       0xff, 0xff, SCSI_GROUP_NUMBER_MASK, SCSI_CONTROL_MASK},
1540};
1541
1542static bool tcm_is_unmap_enabled(struct target_opcode_descriptor *descr,
1543				 struct se_cmd *cmd)
1544{
1545	struct exec_cmd_ops *ops = cmd->protocol_data;
1546	struct se_device *dev = cmd->se_dev;
1547
1548	return ops->execute_unmap && dev->dev_attrib.emulate_tpu;
1549}
1550
1551static struct target_opcode_descriptor tcm_opcode_unmap = {
1552	.support = SCSI_SUPPORT_FULL,
1553	.opcode = UNMAP,
1554	.cdb_size = 10,
1555	.usage_bits = {UNMAP, 0x00, 0x00, 0x00,
1556		       0x00, 0x00, SCSI_GROUP_NUMBER_MASK, 0xff,
1557		       0xff, SCSI_CONTROL_MASK},
1558	.enabled = tcm_is_unmap_enabled,
1559};
1560
1561static struct target_opcode_descriptor tcm_opcode_write_same = {
1562	.support = SCSI_SUPPORT_FULL,
1563	.opcode = WRITE_SAME,
1564	.cdb_size = 10,
1565	.usage_bits = {WRITE_SAME, 0xe8, 0xff, 0xff,
1566		       0xff, 0xff, SCSI_GROUP_NUMBER_MASK, 0xff,
1567		       0xff, SCSI_CONTROL_MASK},
1568	.enabled = tcm_is_ws_enabled,
1569};
1570
1571static struct target_opcode_descriptor tcm_opcode_write_same16 = {
1572	.support = SCSI_SUPPORT_FULL,
1573	.opcode = WRITE_SAME_16,
1574	.cdb_size = 16,
1575	.usage_bits = {WRITE_SAME_16, 0xe8, 0xff, 0xff,
1576		       0xff, 0xff, 0xff, 0xff,
1577		       0xff, 0xff, 0xff, 0xff,
1578		       0xff, 0xff, SCSI_GROUP_NUMBER_MASK, SCSI_CONTROL_MASK},
1579	.enabled = tcm_is_ws_enabled,
1580};
1581
1582static struct target_opcode_descriptor tcm_opcode_verify = {
1583	.support = SCSI_SUPPORT_FULL,
1584	.opcode = VERIFY,
1585	.cdb_size = 10,
1586	.usage_bits = {VERIFY, 0x00, 0xff, 0xff,
1587		       0xff, 0xff, SCSI_GROUP_NUMBER_MASK, 0xff,
1588		       0xff, SCSI_CONTROL_MASK},
1589};
1590
1591static struct target_opcode_descriptor tcm_opcode_verify16 = {
1592	.support = SCSI_SUPPORT_FULL,
1593	.opcode = VERIFY_16,
1594	.cdb_size = 16,
1595	.usage_bits = {VERIFY_16, 0x00, 0xff, 0xff,
1596		       0xff, 0xff, 0xff, 0xff,
1597		       0xff, 0xff, 0xff, 0xff,
1598		       0xff, 0xff, SCSI_GROUP_NUMBER_MASK, SCSI_CONTROL_MASK},
1599};
1600
1601static struct target_opcode_descriptor tcm_opcode_start_stop = {
1602	.support = SCSI_SUPPORT_FULL,
1603	.opcode = START_STOP,
1604	.cdb_size = 6,
1605	.usage_bits = {START_STOP, 0x01, 0x00, 0x00,
1606		       0x01, SCSI_CONTROL_MASK},
1607};
1608
1609static struct target_opcode_descriptor tcm_opcode_mode_select = {
1610	.support = SCSI_SUPPORT_FULL,
1611	.opcode = MODE_SELECT,
1612	.cdb_size = 6,
1613	.usage_bits = {MODE_SELECT, 0x10, 0x00, 0x00,
1614		       0xff, SCSI_CONTROL_MASK},
1615};
1616
1617static struct target_opcode_descriptor tcm_opcode_mode_select10 = {
1618	.support = SCSI_SUPPORT_FULL,
1619	.opcode = MODE_SELECT_10,
1620	.cdb_size = 10,
1621	.usage_bits = {MODE_SELECT_10, 0x10, 0x00, 0x00,
1622		       0x00, 0x00, 0x00, 0xff,
1623		       0xff, SCSI_CONTROL_MASK},
1624};
1625
1626static struct target_opcode_descriptor tcm_opcode_mode_sense = {
1627	.support = SCSI_SUPPORT_FULL,
1628	.opcode = MODE_SENSE,
1629	.cdb_size = 6,
1630	.usage_bits = {MODE_SENSE, 0x08, 0xff, 0xff,
1631		       0xff, SCSI_CONTROL_MASK},
1632};
1633
1634static struct target_opcode_descriptor tcm_opcode_mode_sense10 = {
1635	.support = SCSI_SUPPORT_FULL,
1636	.opcode = MODE_SENSE_10,
1637	.cdb_size = 10,
1638	.usage_bits = {MODE_SENSE_10, 0x18, 0xff, 0xff,
1639		       0x00, 0x00, 0x00, 0xff,
1640		       0xff, SCSI_CONTROL_MASK},
1641};
1642
1643static struct target_opcode_descriptor tcm_opcode_pri_read_keys = {
1644	.support = SCSI_SUPPORT_FULL,
1645	.serv_action_valid = 1,
1646	.opcode = PERSISTENT_RESERVE_IN,
1647	.service_action = PRI_READ_KEYS,
1648	.cdb_size = 10,
1649	.usage_bits = {PERSISTENT_RESERVE_IN, PRI_READ_KEYS, 0x00, 0x00,
1650		       0x00, 0x00, 0x00, 0xff,
1651		       0xff, SCSI_CONTROL_MASK},
1652};
1653
1654static struct target_opcode_descriptor tcm_opcode_pri_read_resrv = {
1655	.support = SCSI_SUPPORT_FULL,
1656	.serv_action_valid = 1,
1657	.opcode = PERSISTENT_RESERVE_IN,
1658	.service_action = PRI_READ_RESERVATION,
1659	.cdb_size = 10,
1660	.usage_bits = {PERSISTENT_RESERVE_IN, PRI_READ_RESERVATION, 0x00, 0x00,
1661		       0x00, 0x00, 0x00, 0xff,
1662		       0xff, SCSI_CONTROL_MASK},
1663};
1664
1665static bool tcm_is_pr_enabled(struct target_opcode_descriptor *descr,
1666			      struct se_cmd *cmd)
1667{
1668	struct se_device *dev = cmd->se_dev;
1669
1670	if (!dev->dev_attrib.emulate_pr)
1671		return false;
1672
1673	if (!(dev->transport_flags & TRANSPORT_FLAG_PASSTHROUGH_PGR))
1674		return true;
1675
1676	switch (descr->opcode) {
1677	case RESERVE:
1678	case RESERVE_10:
1679	case RELEASE:
1680	case RELEASE_10:
1681		/*
1682		 * The pr_ops which are used by the backend modules don't
1683		 * support these commands.
1684		 */
1685		return false;
1686	case PERSISTENT_RESERVE_OUT:
1687		switch (descr->service_action) {
1688		case PRO_REGISTER_AND_MOVE:
1689		case PRO_REPLACE_LOST_RESERVATION:
1690			/*
1691			 * The backend modules don't have access to ports and
1692			 * I_T nexuses so they can't handle these type of
1693			 * requests.
1694			 */
1695			return false;
1696		}
1697		break;
1698	case PERSISTENT_RESERVE_IN:
1699		if (descr->service_action == PRI_READ_FULL_STATUS)
1700			return false;
1701		break;
1702	}
1703
1704	return true;
1705}
1706
1707static struct target_opcode_descriptor tcm_opcode_pri_read_caps = {
1708	.support = SCSI_SUPPORT_FULL,
1709	.serv_action_valid = 1,
1710	.opcode = PERSISTENT_RESERVE_IN,
1711	.service_action = PRI_REPORT_CAPABILITIES,
1712	.cdb_size = 10,
1713	.usage_bits = {PERSISTENT_RESERVE_IN, PRI_REPORT_CAPABILITIES, 0x00, 0x00,
1714		       0x00, 0x00, 0x00, 0xff,
1715		       0xff, SCSI_CONTROL_MASK},
1716	.enabled = tcm_is_pr_enabled,
1717};
1718
1719static struct target_opcode_descriptor tcm_opcode_pri_read_full_status = {
1720	.support = SCSI_SUPPORT_FULL,
1721	.serv_action_valid = 1,
1722	.opcode = PERSISTENT_RESERVE_IN,
1723	.service_action = PRI_READ_FULL_STATUS,
1724	.cdb_size = 10,
1725	.usage_bits = {PERSISTENT_RESERVE_IN, PRI_READ_FULL_STATUS, 0x00, 0x00,
1726		       0x00, 0x00, 0x00, 0xff,
1727		       0xff, SCSI_CONTROL_MASK},
1728	.enabled = tcm_is_pr_enabled,
1729};
1730
1731static struct target_opcode_descriptor tcm_opcode_pro_register = {
1732	.support = SCSI_SUPPORT_FULL,
1733	.serv_action_valid = 1,
1734	.opcode = PERSISTENT_RESERVE_OUT,
1735	.service_action = PRO_REGISTER,
1736	.cdb_size = 10,
1737	.usage_bits = {PERSISTENT_RESERVE_OUT, PRO_REGISTER, 0xff, 0x00,
1738		       0x00, 0xff, 0xff, 0xff,
1739		       0xff, SCSI_CONTROL_MASK},
1740	.enabled = tcm_is_pr_enabled,
1741};
1742
1743static struct target_opcode_descriptor tcm_opcode_pro_reserve = {
1744	.support = SCSI_SUPPORT_FULL,
1745	.serv_action_valid = 1,
1746	.opcode = PERSISTENT_RESERVE_OUT,
1747	.service_action = PRO_RESERVE,
1748	.cdb_size = 10,
1749	.usage_bits = {PERSISTENT_RESERVE_OUT, PRO_RESERVE, 0xff, 0x00,
1750		       0x00, 0xff, 0xff, 0xff,
1751		       0xff, SCSI_CONTROL_MASK},
1752	.enabled = tcm_is_pr_enabled,
1753};
1754
1755static struct target_opcode_descriptor tcm_opcode_pro_release = {
1756	.support = SCSI_SUPPORT_FULL,
1757	.serv_action_valid = 1,
1758	.opcode = PERSISTENT_RESERVE_OUT,
1759	.service_action = PRO_RELEASE,
1760	.cdb_size = 10,
1761	.usage_bits = {PERSISTENT_RESERVE_OUT, PRO_RELEASE, 0xff, 0x00,
1762		       0x00, 0xff, 0xff, 0xff,
1763		       0xff, SCSI_CONTROL_MASK},
1764	.enabled = tcm_is_pr_enabled,
1765};
1766
1767static struct target_opcode_descriptor tcm_opcode_pro_clear = {
1768	.support = SCSI_SUPPORT_FULL,
1769	.serv_action_valid = 1,
1770	.opcode = PERSISTENT_RESERVE_OUT,
1771	.service_action = PRO_CLEAR,
1772	.cdb_size = 10,
1773	.usage_bits = {PERSISTENT_RESERVE_OUT, PRO_CLEAR, 0xff, 0x00,
1774		       0x00, 0xff, 0xff, 0xff,
1775		       0xff, SCSI_CONTROL_MASK},
1776	.enabled = tcm_is_pr_enabled,
1777};
1778
1779static struct target_opcode_descriptor tcm_opcode_pro_preempt = {
1780	.support = SCSI_SUPPORT_FULL,
1781	.serv_action_valid = 1,
1782	.opcode = PERSISTENT_RESERVE_OUT,
1783	.service_action = PRO_PREEMPT,
1784	.cdb_size = 10,
1785	.usage_bits = {PERSISTENT_RESERVE_OUT, PRO_PREEMPT, 0xff, 0x00,
1786		       0x00, 0xff, 0xff, 0xff,
1787		       0xff, SCSI_CONTROL_MASK},
1788	.enabled = tcm_is_pr_enabled,
1789};
1790
1791static struct target_opcode_descriptor tcm_opcode_pro_preempt_abort = {
1792	.support = SCSI_SUPPORT_FULL,
1793	.serv_action_valid = 1,
1794	.opcode = PERSISTENT_RESERVE_OUT,
1795	.service_action = PRO_PREEMPT_AND_ABORT,
1796	.cdb_size = 10,
1797	.usage_bits = {PERSISTENT_RESERVE_OUT, PRO_PREEMPT_AND_ABORT, 0xff, 0x00,
1798		       0x00, 0xff, 0xff, 0xff,
1799		       0xff, SCSI_CONTROL_MASK},
1800	.enabled = tcm_is_pr_enabled,
1801};
1802
1803static struct target_opcode_descriptor tcm_opcode_pro_reg_ign_exist = {
1804	.support = SCSI_SUPPORT_FULL,
1805	.serv_action_valid = 1,
1806	.opcode = PERSISTENT_RESERVE_OUT,
1807	.service_action = PRO_REGISTER_AND_IGNORE_EXISTING_KEY,
1808	.cdb_size = 10,
1809	.usage_bits = {
1810		PERSISTENT_RESERVE_OUT, PRO_REGISTER_AND_IGNORE_EXISTING_KEY,
1811		0xff, 0x00,
1812		0x00, 0xff, 0xff, 0xff,
1813		0xff, SCSI_CONTROL_MASK},
1814	.enabled = tcm_is_pr_enabled,
1815};
1816
1817static struct target_opcode_descriptor tcm_opcode_pro_register_move = {
1818	.support = SCSI_SUPPORT_FULL,
1819	.serv_action_valid = 1,
1820	.opcode = PERSISTENT_RESERVE_OUT,
1821	.service_action = PRO_REGISTER_AND_MOVE,
1822	.cdb_size = 10,
1823	.usage_bits = {PERSISTENT_RESERVE_OUT, PRO_REGISTER_AND_MOVE, 0xff, 0x00,
1824		       0x00, 0xff, 0xff, 0xff,
1825		       0xff, SCSI_CONTROL_MASK},
1826	.enabled = tcm_is_pr_enabled,
1827};
1828
1829static struct target_opcode_descriptor tcm_opcode_release = {
1830	.support = SCSI_SUPPORT_FULL,
1831	.opcode = RELEASE,
1832	.cdb_size = 6,
1833	.usage_bits = {RELEASE, 0x00, 0x00, 0x00,
1834		       0x00, SCSI_CONTROL_MASK},
1835	.enabled = tcm_is_pr_enabled,
1836};
1837
1838static struct target_opcode_descriptor tcm_opcode_release10 = {
1839	.support = SCSI_SUPPORT_FULL,
1840	.opcode = RELEASE_10,
1841	.cdb_size = 10,
1842	.usage_bits = {RELEASE_10, 0x00, 0x00, 0x00,
1843		       0x00, 0x00, 0x00, 0xff,
1844		       0xff, SCSI_CONTROL_MASK},
1845	.enabled = tcm_is_pr_enabled,
1846};
1847
1848static struct target_opcode_descriptor tcm_opcode_reserve = {
1849	.support = SCSI_SUPPORT_FULL,
1850	.opcode = RESERVE,
1851	.cdb_size = 6,
1852	.usage_bits = {RESERVE, 0x00, 0x00, 0x00,
1853		       0x00, SCSI_CONTROL_MASK},
1854	.enabled = tcm_is_pr_enabled,
1855};
1856
1857static struct target_opcode_descriptor tcm_opcode_reserve10 = {
1858	.support = SCSI_SUPPORT_FULL,
1859	.opcode = RESERVE_10,
1860	.cdb_size = 10,
1861	.usage_bits = {RESERVE_10, 0x00, 0x00, 0x00,
1862		       0x00, 0x00, 0x00, 0xff,
1863		       0xff, SCSI_CONTROL_MASK},
1864	.enabled = tcm_is_pr_enabled,
1865};
1866
1867static struct target_opcode_descriptor tcm_opcode_request_sense = {
1868	.support = SCSI_SUPPORT_FULL,
1869	.opcode = REQUEST_SENSE,
1870	.cdb_size = 6,
1871	.usage_bits = {REQUEST_SENSE, 0x00, 0x00, 0x00,
1872		       0xff, SCSI_CONTROL_MASK},
1873};
1874
1875static struct target_opcode_descriptor tcm_opcode_inquiry = {
1876	.support = SCSI_SUPPORT_FULL,
1877	.opcode = INQUIRY,
1878	.cdb_size = 6,
1879	.usage_bits = {INQUIRY, 0x01, 0xff, 0xff,
1880		       0xff, SCSI_CONTROL_MASK},
1881};
1882
1883static bool tcm_is_3pc_enabled(struct target_opcode_descriptor *descr,
1884			       struct se_cmd *cmd)
1885{
1886	struct se_device *dev = cmd->se_dev;
1887
1888	return dev->dev_attrib.emulate_3pc;
1889}
1890
1891static struct target_opcode_descriptor tcm_opcode_extended_copy_lid1 = {
1892	.support = SCSI_SUPPORT_FULL,
1893	.serv_action_valid = 1,
1894	.opcode = EXTENDED_COPY,
1895	.cdb_size = 16,
1896	.usage_bits = {EXTENDED_COPY, 0x00, 0x00, 0x00,
1897		       0x00, 0x00, 0x00, 0x00,
1898		       0x00, 0x00, 0xff, 0xff,
1899		       0xff, 0xff, 0x00, SCSI_CONTROL_MASK},
1900	.enabled = tcm_is_3pc_enabled,
1901};
1902
1903static struct target_opcode_descriptor tcm_opcode_rcv_copy_res_op_params = {
1904	.support = SCSI_SUPPORT_FULL,
1905	.serv_action_valid = 1,
1906	.opcode = RECEIVE_COPY_RESULTS,
1907	.service_action = RCR_SA_OPERATING_PARAMETERS,
1908	.cdb_size = 16,
1909	.usage_bits = {RECEIVE_COPY_RESULTS, RCR_SA_OPERATING_PARAMETERS,
1910		       0x00, 0x00,
1911		       0x00, 0x00, 0x00, 0x00,
1912		       0x00, 0x00, 0xff, 0xff,
1913		       0xff, 0xff, 0x00, SCSI_CONTROL_MASK},
1914	.enabled = tcm_is_3pc_enabled,
1915};
1916
1917static struct target_opcode_descriptor tcm_opcode_report_luns = {
1918	.support = SCSI_SUPPORT_FULL,
1919	.opcode = REPORT_LUNS,
1920	.cdb_size = 12,
1921	.usage_bits = {REPORT_LUNS, 0x00, 0xff, 0x00,
1922		       0x00, 0x00, 0xff, 0xff,
1923		       0xff, 0xff, 0x00, SCSI_CONTROL_MASK},
1924};
1925
1926static struct target_opcode_descriptor tcm_opcode_test_unit_ready = {
1927	.support = SCSI_SUPPORT_FULL,
1928	.opcode = TEST_UNIT_READY,
1929	.cdb_size = 6,
1930	.usage_bits = {TEST_UNIT_READY, 0x00, 0x00, 0x00,
1931		       0x00, SCSI_CONTROL_MASK},
1932};
1933
1934static struct target_opcode_descriptor tcm_opcode_report_target_pgs = {
1935	.support = SCSI_SUPPORT_FULL,
1936	.serv_action_valid = 1,
1937	.opcode = MAINTENANCE_IN,
1938	.service_action = MI_REPORT_TARGET_PGS,
1939	.cdb_size = 12,
1940	.usage_bits = {MAINTENANCE_IN, 0xE0 | MI_REPORT_TARGET_PGS, 0x00, 0x00,
1941		       0x00, 0x00, 0xff, 0xff,
1942		       0xff, 0xff, 0x00, SCSI_CONTROL_MASK},
1943};
1944
1945static bool spc_rsoc_enabled(struct target_opcode_descriptor *descr,
1946			     struct se_cmd *cmd)
1947{
1948	struct se_device *dev = cmd->se_dev;
1949
1950	return dev->dev_attrib.emulate_rsoc;
1951}
1952
1953static struct target_opcode_descriptor tcm_opcode_report_supp_opcodes = {
1954	.support = SCSI_SUPPORT_FULL,
1955	.serv_action_valid = 1,
1956	.opcode = MAINTENANCE_IN,
1957	.service_action = MI_REPORT_SUPPORTED_OPERATION_CODES,
1958	.cdb_size = 12,
1959	.usage_bits = {MAINTENANCE_IN, MI_REPORT_SUPPORTED_OPERATION_CODES,
1960		       0x87, 0xff,
1961		       0xff, 0xff, 0xff, 0xff,
1962		       0xff, 0xff, 0x00, SCSI_CONTROL_MASK},
1963	.enabled = spc_rsoc_enabled,
1964};
1965
1966static bool tcm_is_set_tpg_enabled(struct target_opcode_descriptor *descr,
1967				   struct se_cmd *cmd)
1968{
1969	struct t10_alua_tg_pt_gp *l_tg_pt_gp;
1970	struct se_lun *l_lun = cmd->se_lun;
1971
1972	rcu_read_lock();
1973	l_tg_pt_gp = rcu_dereference(l_lun->lun_tg_pt_gp);
1974	if (!l_tg_pt_gp) {
1975		rcu_read_unlock();
1976		return false;
1977	}
1978	if (!(l_tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_EXPLICIT_ALUA)) {
1979		rcu_read_unlock();
1980		return false;
1981	}
1982	rcu_read_unlock();
1983
1984	return true;
1985}
1986
1987static struct target_opcode_descriptor tcm_opcode_set_tpg = {
1988	.support = SCSI_SUPPORT_FULL,
1989	.serv_action_valid = 1,
1990	.opcode = MAINTENANCE_OUT,
1991	.service_action = MO_SET_TARGET_PGS,
1992	.cdb_size = 12,
1993	.usage_bits = {MAINTENANCE_OUT, MO_SET_TARGET_PGS, 0x00, 0x00,
1994		       0x00, 0x00, 0xff, 0xff,
1995		       0xff, 0xff, 0x00, SCSI_CONTROL_MASK},
1996	.enabled = tcm_is_set_tpg_enabled,
1997};
1998
1999static struct target_opcode_descriptor *tcm_supported_opcodes[] = {
2000	&tcm_opcode_read6,
2001	&tcm_opcode_read10,
2002	&tcm_opcode_read12,
2003	&tcm_opcode_read16,
2004	&tcm_opcode_write6,
2005	&tcm_opcode_write10,
2006	&tcm_opcode_write_verify10,
2007	&tcm_opcode_write12,
2008	&tcm_opcode_write16,
2009	&tcm_opcode_write_verify16,
2010	&tcm_opcode_write_same32,
2011	&tcm_opcode_compare_write,
2012	&tcm_opcode_read_capacity,
2013	&tcm_opcode_read_capacity16,
2014	&tcm_opcode_read_report_refferals,
2015	&tcm_opcode_sync_cache,
2016	&tcm_opcode_sync_cache16,
2017	&tcm_opcode_unmap,
2018	&tcm_opcode_write_same,
2019	&tcm_opcode_write_same16,
2020	&tcm_opcode_verify,
2021	&tcm_opcode_verify16,
2022	&tcm_opcode_start_stop,
2023	&tcm_opcode_mode_select,
2024	&tcm_opcode_mode_select10,
2025	&tcm_opcode_mode_sense,
2026	&tcm_opcode_mode_sense10,
2027	&tcm_opcode_pri_read_keys,
2028	&tcm_opcode_pri_read_resrv,
2029	&tcm_opcode_pri_read_caps,
2030	&tcm_opcode_pri_read_full_status,
2031	&tcm_opcode_pro_register,
2032	&tcm_opcode_pro_reserve,
2033	&tcm_opcode_pro_release,
2034	&tcm_opcode_pro_clear,
2035	&tcm_opcode_pro_preempt,
2036	&tcm_opcode_pro_preempt_abort,
2037	&tcm_opcode_pro_reg_ign_exist,
2038	&tcm_opcode_pro_register_move,
2039	&tcm_opcode_release,
2040	&tcm_opcode_release10,
2041	&tcm_opcode_reserve,
2042	&tcm_opcode_reserve10,
2043	&tcm_opcode_request_sense,
2044	&tcm_opcode_inquiry,
2045	&tcm_opcode_extended_copy_lid1,
2046	&tcm_opcode_rcv_copy_res_op_params,
2047	&tcm_opcode_report_luns,
2048	&tcm_opcode_test_unit_ready,
2049	&tcm_opcode_report_target_pgs,
2050	&tcm_opcode_report_supp_opcodes,
2051	&tcm_opcode_set_tpg,
2052};
2053
2054static int
2055spc_rsoc_encode_command_timeouts_descriptor(unsigned char *buf, u8 ctdp,
2056				struct target_opcode_descriptor *descr)
2057{
2058	if (!ctdp)
2059		return 0;
2060
2061	put_unaligned_be16(0xa, buf);
2062	buf[3] = descr->specific_timeout;
2063	put_unaligned_be32(descr->nominal_timeout, &buf[4]);
2064	put_unaligned_be32(descr->recommended_timeout, &buf[8]);
2065
2066	return 12;
2067}
2068
2069static int
2070spc_rsoc_encode_command_descriptor(unsigned char *buf, u8 ctdp,
2071				   struct target_opcode_descriptor *descr)
2072{
2073	int td_size = 0;
2074
2075	buf[0] = descr->opcode;
2076
2077	put_unaligned_be16(descr->service_action, &buf[2]);
2078
2079	buf[5] = (ctdp << 1) | descr->serv_action_valid;
2080	put_unaligned_be16(descr->cdb_size, &buf[6]);
2081
2082	td_size = spc_rsoc_encode_command_timeouts_descriptor(&buf[8], ctdp,
2083							      descr);
2084
2085	return 8 + td_size;
2086}
2087
2088static int
2089spc_rsoc_encode_one_command_descriptor(unsigned char *buf, u8 ctdp,
2090				       struct target_opcode_descriptor *descr,
2091				       struct se_device *dev)
2092{
2093	int td_size = 0;
2094
2095	if (!descr) {
2096		buf[1] = (ctdp << 7) | SCSI_SUPPORT_NOT_SUPPORTED;
2097		return 2;
2098	}
2099
2100	buf[1] = (ctdp << 7) | SCSI_SUPPORT_FULL;
2101	put_unaligned_be16(descr->cdb_size, &buf[2]);
2102	memcpy(&buf[4], descr->usage_bits, descr->cdb_size);
2103	if (descr->update_usage_bits)
2104		descr->update_usage_bits(&buf[4], dev);
2105
2106	td_size = spc_rsoc_encode_command_timeouts_descriptor(
2107			&buf[4 + descr->cdb_size], ctdp, descr);
2108
2109	return 4 + descr->cdb_size + td_size;
2110}
2111
2112static sense_reason_t
2113spc_rsoc_get_descr(struct se_cmd *cmd, struct target_opcode_descriptor **opcode)
2114{
2115	struct target_opcode_descriptor *descr;
2116	struct se_session *sess = cmd->se_sess;
2117	unsigned char *cdb = cmd->t_task_cdb;
2118	u8 opts = cdb[2] & 0x3;
2119	u8 requested_opcode;
2120	u16 requested_sa;
2121	int i;
2122
2123	requested_opcode = cdb[3];
2124	requested_sa = ((u16)cdb[4]) << 8 | cdb[5];
2125	*opcode = NULL;
2126
2127	if (opts > 3) {
2128		pr_debug("TARGET_CORE[%s]: Invalid REPORT SUPPORTED OPERATION CODES"
2129			" with unsupported REPORTING OPTIONS %#x for 0x%08llx from %s\n",
2130			cmd->se_tfo->fabric_name, opts,
2131			cmd->se_lun->unpacked_lun,
2132			sess->se_node_acl->initiatorname);
2133		return TCM_INVALID_CDB_FIELD;
2134	}
2135
2136	for (i = 0; i < ARRAY_SIZE(tcm_supported_opcodes); i++) {
2137		descr = tcm_supported_opcodes[i];
2138		if (descr->opcode != requested_opcode)
2139			continue;
2140
2141		switch (opts) {
2142		case 0x1:
2143			/*
2144			 * If the REQUESTED OPERATION CODE field specifies an
2145			 * operation code for which the device server implements
2146			 * service actions, then the device server shall
2147			 * terminate the command with CHECK CONDITION status,
2148			 * with the sense key set to ILLEGAL REQUEST, and the
2149			 * additional sense code set to INVALID FIELD IN CDB
2150			 */
2151			if (descr->serv_action_valid)
2152				return TCM_INVALID_CDB_FIELD;
2153
2154			if (!descr->enabled || descr->enabled(descr, cmd))
2155				*opcode = descr;
2156			break;
2157		case 0x2:
2158			/*
2159			 * If the REQUESTED OPERATION CODE field specifies an
2160			 * operation code for which the device server does not
2161			 * implement service actions, then the device server
2162			 * shall terminate the command with CHECK CONDITION
2163			 * status, with the sense key set to ILLEGAL REQUEST,
2164			 * and the additional sense code set to INVALID FIELD IN CDB.
2165			 */
2166			if (descr->serv_action_valid &&
2167			    descr->service_action == requested_sa) {
2168				if (!descr->enabled || descr->enabled(descr,
2169								      cmd))
2170					*opcode = descr;
2171			} else if (!descr->serv_action_valid)
2172				return TCM_INVALID_CDB_FIELD;
2173			break;
2174		case 0x3:
2175			/*
2176			 * The command support data for the operation code and
2177			 * service action a specified in the REQUESTED OPERATION
2178			 * CODE field and REQUESTED SERVICE ACTION field shall
2179			 * be returned in the one_command parameter data format.
2180			 */
2181			if (descr->service_action == requested_sa)
2182				if (!descr->enabled || descr->enabled(descr,
2183								      cmd))
2184					*opcode = descr;
2185			break;
2186		}
2187	}
2188
2189	return 0;
2190}
2191
2192static sense_reason_t
2193spc_emulate_report_supp_op_codes(struct se_cmd *cmd)
2194{
2195	int descr_num = ARRAY_SIZE(tcm_supported_opcodes);
2196	struct target_opcode_descriptor *descr = NULL;
2197	unsigned char *cdb = cmd->t_task_cdb;
2198	u8 rctd = (cdb[2] >> 7) & 0x1;
2199	unsigned char *buf = NULL;
2200	int response_length = 0;
2201	u8 opts = cdb[2] & 0x3;
2202	unsigned char *rbuf;
2203	sense_reason_t ret = 0;
2204	int i;
2205
2206	if (!cmd->se_dev->dev_attrib.emulate_rsoc)
2207		return TCM_UNSUPPORTED_SCSI_OPCODE;
2208
2209	rbuf = transport_kmap_data_sg(cmd);
2210	if (cmd->data_length && !rbuf) {
2211		ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
2212		goto out;
2213	}
2214
2215	if (opts == 0)
2216		response_length = 4 + (8 + rctd * 12) * descr_num;
2217	else {
2218		ret = spc_rsoc_get_descr(cmd, &descr);
2219		if (ret)
2220			goto out;
2221
2222		if (descr)
2223			response_length = 4 + descr->cdb_size + rctd * 12;
2224		else
2225			response_length = 2;
2226	}
2227
2228	buf = kzalloc(response_length, GFP_KERNEL);
2229	if (!buf) {
2230		ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
2231		goto out;
2232	}
2233	response_length = 0;
2234
2235	if (opts == 0) {
2236		response_length += 4;
2237
2238		for (i = 0; i < ARRAY_SIZE(tcm_supported_opcodes); i++) {
2239			descr = tcm_supported_opcodes[i];
2240			if (descr->enabled && !descr->enabled(descr, cmd))
2241				continue;
2242
2243			response_length += spc_rsoc_encode_command_descriptor(
2244					&buf[response_length], rctd, descr);
2245		}
2246		put_unaligned_be32(response_length - 3, buf);
2247	} else {
2248		response_length = spc_rsoc_encode_one_command_descriptor(
2249				&buf[response_length], rctd, descr,
2250				cmd->se_dev);
2251	}
2252
2253	memcpy(rbuf, buf, min_t(u32, response_length, cmd->data_length));
2254out:
2255	kfree(buf);
2256	transport_kunmap_data_sg(cmd);
2257
2258	if (!ret)
2259		target_complete_cmd_with_length(cmd, SAM_STAT_GOOD, response_length);
2260	return ret;
2261}
2262
2263sense_reason_t
2264spc_parse_cdb(struct se_cmd *cmd, unsigned int *size)
2265{
2266	struct se_device *dev = cmd->se_dev;
2267	unsigned char *cdb = cmd->t_task_cdb;
2268
2269	switch (cdb[0]) {
2270	case RESERVE:
2271	case RESERVE_10:
2272	case RELEASE:
2273	case RELEASE_10:
2274		if (!dev->dev_attrib.emulate_pr)
2275			return TCM_UNSUPPORTED_SCSI_OPCODE;
2276
2277		if (dev->transport_flags & TRANSPORT_FLAG_PASSTHROUGH_PGR)
2278			return TCM_UNSUPPORTED_SCSI_OPCODE;
2279		break;
2280	case PERSISTENT_RESERVE_IN:
2281	case PERSISTENT_RESERVE_OUT:
2282		if (!dev->dev_attrib.emulate_pr)
2283			return TCM_UNSUPPORTED_SCSI_OPCODE;
2284		break;
2285	}
2286
2287	switch (cdb[0]) {
2288	case MODE_SELECT:
2289		*size = cdb[4];
2290		cmd->execute_cmd = spc_emulate_modeselect;
2291		break;
2292	case MODE_SELECT_10:
2293		*size = get_unaligned_be16(&cdb[7]);
2294		cmd->execute_cmd = spc_emulate_modeselect;
2295		break;
2296	case MODE_SENSE:
2297		*size = cdb[4];
2298		cmd->execute_cmd = spc_emulate_modesense;
2299		break;
2300	case MODE_SENSE_10:
2301		*size = get_unaligned_be16(&cdb[7]);
2302		cmd->execute_cmd = spc_emulate_modesense;
2303		break;
2304	case LOG_SELECT:
2305	case LOG_SENSE:
2306		*size = get_unaligned_be16(&cdb[7]);
2307		break;
2308	case PERSISTENT_RESERVE_IN:
2309		*size = get_unaligned_be16(&cdb[7]);
2310		cmd->execute_cmd = target_scsi3_emulate_pr_in;
2311		break;
2312	case PERSISTENT_RESERVE_OUT:
2313		*size = get_unaligned_be32(&cdb[5]);
2314		cmd->execute_cmd = target_scsi3_emulate_pr_out;
2315		break;
2316	case RELEASE:
2317	case RELEASE_10:
2318		if (cdb[0] == RELEASE_10)
2319			*size = get_unaligned_be16(&cdb[7]);
2320		else
2321			*size = cmd->data_length;
2322
2323		cmd->execute_cmd = target_scsi2_reservation_release;
2324		break;
2325	case RESERVE:
2326	case RESERVE_10:
2327		/*
2328		 * The SPC-2 RESERVE does not contain a size in the SCSI CDB.
2329		 * Assume the passthrough or $FABRIC_MOD will tell us about it.
2330		 */
2331		if (cdb[0] == RESERVE_10)
2332			*size = get_unaligned_be16(&cdb[7]);
2333		else
2334			*size = cmd->data_length;
2335
2336		cmd->execute_cmd = target_scsi2_reservation_reserve;
2337		break;
2338	case REQUEST_SENSE:
2339		*size = cdb[4];
2340		cmd->execute_cmd = spc_emulate_request_sense;
2341		break;
2342	case INQUIRY:
2343		*size = get_unaligned_be16(&cdb[3]);
2344
2345		/*
2346		 * Do implicit HEAD_OF_QUEUE processing for INQUIRY.
2347		 * See spc4r17 section 5.3
2348		 */
2349		cmd->sam_task_attr = TCM_HEAD_TAG;
2350		cmd->execute_cmd = spc_emulate_inquiry;
2351		break;
2352	case SECURITY_PROTOCOL_IN:
2353	case SECURITY_PROTOCOL_OUT:
2354		*size = get_unaligned_be32(&cdb[6]);
2355		break;
2356	case EXTENDED_COPY:
2357		*size = get_unaligned_be32(&cdb[10]);
2358		cmd->execute_cmd = target_do_xcopy;
2359		break;
2360	case RECEIVE_COPY_RESULTS:
2361		*size = get_unaligned_be32(&cdb[10]);
2362		cmd->execute_cmd = target_do_receive_copy_results;
2363		break;
2364	case READ_ATTRIBUTE:
2365	case WRITE_ATTRIBUTE:
2366		*size = get_unaligned_be32(&cdb[10]);
2367		break;
2368	case RECEIVE_DIAGNOSTIC:
2369	case SEND_DIAGNOSTIC:
2370		*size = get_unaligned_be16(&cdb[3]);
2371		break;
2372	case WRITE_BUFFER:
2373		*size = get_unaligned_be24(&cdb[6]);
2374		break;
2375	case REPORT_LUNS:
2376		cmd->execute_cmd = spc_emulate_report_luns;
2377		*size = get_unaligned_be32(&cdb[6]);
2378		/*
2379		 * Do implicit HEAD_OF_QUEUE processing for REPORT_LUNS
2380		 * See spc4r17 section 5.3
2381		 */
2382		cmd->sam_task_attr = TCM_HEAD_TAG;
2383		break;
2384	case TEST_UNIT_READY:
2385		cmd->execute_cmd = spc_emulate_testunitready;
2386		*size = 0;
2387		break;
2388	case MAINTENANCE_IN:
2389		if (dev->transport->get_device_type(dev) != TYPE_ROM) {
2390			/*
2391			 * MAINTENANCE_IN from SCC-2
2392			 * Check for emulated MI_REPORT_TARGET_PGS
2393			 */
2394			if ((cdb[1] & 0x1f) == MI_REPORT_TARGET_PGS) {
2395				cmd->execute_cmd =
2396					target_emulate_report_target_port_groups;
2397			}
2398			if ((cdb[1] & 0x1f) ==
2399			    MI_REPORT_SUPPORTED_OPERATION_CODES)
2400				cmd->execute_cmd =
2401					spc_emulate_report_supp_op_codes;
2402			*size = get_unaligned_be32(&cdb[6]);
2403		} else {
2404			/*
2405			 * GPCMD_SEND_KEY from multi media commands
2406			 */
2407			*size = get_unaligned_be16(&cdb[8]);
2408		}
2409		break;
2410	case MAINTENANCE_OUT:
2411		if (dev->transport->get_device_type(dev) != TYPE_ROM) {
2412			/*
2413			 * MAINTENANCE_OUT from SCC-2
2414			 * Check for emulated MO_SET_TARGET_PGS.
2415			 */
2416			if (cdb[1] == MO_SET_TARGET_PGS) {
2417				cmd->execute_cmd =
2418					target_emulate_set_target_port_groups;
2419			}
2420			*size = get_unaligned_be32(&cdb[6]);
2421		} else {
2422			/*
2423			 * GPCMD_SEND_KEY from multi media commands
2424			 */
2425			*size = get_unaligned_be16(&cdb[8]);
2426		}
2427		break;
2428	default:
2429		return TCM_UNSUPPORTED_SCSI_OPCODE;
2430	}
2431
2432	return 0;
2433}
2434EXPORT_SYMBOL(spc_parse_cdb);
2435