1// SPDX-License-Identifier: GPL-2.0-or-later
2/*******************************************************************************
3 * Filename:  target_core_configfs.c
4 *
5 * This file contains ConfigFS logic for the Generic Target Engine project.
6 *
7 * (c) Copyright 2008-2013 Datera, Inc.
8 *
9 * Nicholas A. Bellinger <nab@kernel.org>
10 *
11 * based on configfs Copyright (C) 2005 Oracle.  All rights reserved.
12 *
13 ****************************************************************************/
14
15#include <linux/kstrtox.h>
16#include <linux/module.h>
17#include <linux/moduleparam.h>
18#include <generated/utsrelease.h>
19#include <linux/utsname.h>
20#include <linux/init.h>
21#include <linux/fs.h>
22#include <linux/namei.h>
23#include <linux/slab.h>
24#include <linux/types.h>
25#include <linux/delay.h>
26#include <linux/unistd.h>
27#include <linux/string.h>
28#include <linux/parser.h>
29#include <linux/syscalls.h>
30#include <linux/configfs.h>
31#include <linux/spinlock.h>
32
33#include <target/target_core_base.h>
34#include <target/target_core_backend.h>
35#include <target/target_core_fabric.h>
36
37#include "target_core_internal.h"
38#include "target_core_alua.h"
39#include "target_core_pr.h"
40#include "target_core_rd.h"
41#include "target_core_xcopy.h"
42
43#define TB_CIT_SETUP(_name, _item_ops, _group_ops, _attrs)		\
44static void target_core_setup_##_name##_cit(struct target_backend *tb)	\
45{									\
46	struct config_item_type *cit = &tb->tb_##_name##_cit;		\
47									\
48	cit->ct_item_ops = _item_ops;					\
49	cit->ct_group_ops = _group_ops;					\
50	cit->ct_attrs = _attrs;						\
51	cit->ct_owner = tb->ops->owner;					\
52	pr_debug("Setup generic %s\n", __stringify(_name));		\
53}
54
55#define TB_CIT_SETUP_DRV(_name, _item_ops, _group_ops)			\
56static void target_core_setup_##_name##_cit(struct target_backend *tb)	\
57{									\
58	struct config_item_type *cit = &tb->tb_##_name##_cit;		\
59									\
60	cit->ct_item_ops = _item_ops;					\
61	cit->ct_group_ops = _group_ops;					\
62	cit->ct_attrs = tb->ops->tb_##_name##_attrs;			\
63	cit->ct_owner = tb->ops->owner;					\
64	pr_debug("Setup generic %s\n", __stringify(_name));		\
65}
66
67extern struct t10_alua_lu_gp *default_lu_gp;
68
69static LIST_HEAD(g_tf_list);
70static DEFINE_MUTEX(g_tf_lock);
71
72static struct config_group target_core_hbagroup;
73static struct config_group alua_group;
74static struct config_group alua_lu_gps_group;
75
76static unsigned int target_devices;
77static DEFINE_MUTEX(target_devices_lock);
78
79static inline struct se_hba *
80item_to_hba(struct config_item *item)
81{
82	return container_of(to_config_group(item), struct se_hba, hba_group);
83}
84
85/*
86 * Attributes for /sys/kernel/config/target/
87 */
88static ssize_t target_core_item_version_show(struct config_item *item,
89		char *page)
90{
91	return sprintf(page, "Target Engine Core ConfigFS Infrastructure %s"
92		" on %s/%s on "UTS_RELEASE"\n", TARGET_CORE_VERSION,
93		utsname()->sysname, utsname()->machine);
94}
95
96CONFIGFS_ATTR_RO(target_core_item_, version);
97
98char db_root[DB_ROOT_LEN] = DB_ROOT_DEFAULT;
99static char db_root_stage[DB_ROOT_LEN];
100
101static ssize_t target_core_item_dbroot_show(struct config_item *item,
102					    char *page)
103{
104	return sprintf(page, "%s\n", db_root);
105}
106
107static ssize_t target_core_item_dbroot_store(struct config_item *item,
108					const char *page, size_t count)
109{
110	ssize_t read_bytes;
111	struct file *fp;
112	ssize_t r = -EINVAL;
113
114	mutex_lock(&target_devices_lock);
115	if (target_devices) {
116		pr_err("db_root: cannot be changed because it's in use\n");
117		goto unlock;
118	}
119
120	if (count > (DB_ROOT_LEN - 1)) {
121		pr_err("db_root: count %d exceeds DB_ROOT_LEN-1: %u\n",
122		       (int)count, DB_ROOT_LEN - 1);
123		goto unlock;
124	}
125
126	read_bytes = snprintf(db_root_stage, DB_ROOT_LEN, "%s", page);
127	if (!read_bytes)
128		goto unlock;
129
130	if (db_root_stage[read_bytes - 1] == '\n')
131		db_root_stage[read_bytes - 1] = '\0';
132
133	/* validate new db root before accepting it */
134	fp = filp_open(db_root_stage, O_RDONLY, 0);
135	if (IS_ERR(fp)) {
136		pr_err("db_root: cannot open: %s\n", db_root_stage);
137		goto unlock;
138	}
139	if (!S_ISDIR(file_inode(fp)->i_mode)) {
140		filp_close(fp, NULL);
141		pr_err("db_root: not a directory: %s\n", db_root_stage);
142		goto unlock;
143	}
144	filp_close(fp, NULL);
145
146	strncpy(db_root, db_root_stage, read_bytes);
147	pr_debug("Target_Core_ConfigFS: db_root set to %s\n", db_root);
148
149	r = read_bytes;
150
151unlock:
152	mutex_unlock(&target_devices_lock);
153	return r;
154}
155
156CONFIGFS_ATTR(target_core_item_, dbroot);
157
158static struct target_fabric_configfs *target_core_get_fabric(
159	const char *name)
160{
161	struct target_fabric_configfs *tf;
162
163	if (!name)
164		return NULL;
165
166	mutex_lock(&g_tf_lock);
167	list_for_each_entry(tf, &g_tf_list, tf_list) {
168		const char *cmp_name = tf->tf_ops->fabric_alias;
169		if (!cmp_name)
170			cmp_name = tf->tf_ops->fabric_name;
171		if (!strcmp(cmp_name, name)) {
172			atomic_inc(&tf->tf_access_cnt);
173			mutex_unlock(&g_tf_lock);
174			return tf;
175		}
176	}
177	mutex_unlock(&g_tf_lock);
178
179	return NULL;
180}
181
182/*
183 * Called from struct target_core_group_ops->make_group()
184 */
185static struct config_group *target_core_register_fabric(
186	struct config_group *group,
187	const char *name)
188{
189	struct target_fabric_configfs *tf;
190	int ret;
191
192	pr_debug("Target_Core_ConfigFS: REGISTER -> group: %p name:"
193			" %s\n", group, name);
194
195	tf = target_core_get_fabric(name);
196	if (!tf) {
197		pr_debug("target_core_register_fabric() trying autoload for %s\n",
198			 name);
199
200		/*
201		 * Below are some hardcoded request_module() calls to automatically
202		 * local fabric modules when the following is called:
203		 *
204		 * mkdir -p /sys/kernel/config/target/$MODULE_NAME
205		 *
206		 * Note that this does not limit which TCM fabric module can be
207		 * registered, but simply provids auto loading logic for modules with
208		 * mkdir(2) system calls with known TCM fabric modules.
209		 */
210
211		if (!strncmp(name, "iscsi", 5)) {
212			/*
213			 * Automatically load the LIO Target fabric module when the
214			 * following is called:
215			 *
216			 * mkdir -p $CONFIGFS/target/iscsi
217			 */
218			ret = request_module("iscsi_target_mod");
219			if (ret < 0) {
220				pr_debug("request_module() failed for"
221				         " iscsi_target_mod.ko: %d\n", ret);
222				return ERR_PTR(-EINVAL);
223			}
224		} else if (!strncmp(name, "loopback", 8)) {
225			/*
226			 * Automatically load the tcm_loop fabric module when the
227			 * following is called:
228			 *
229			 * mkdir -p $CONFIGFS/target/loopback
230			 */
231			ret = request_module("tcm_loop");
232			if (ret < 0) {
233				pr_debug("request_module() failed for"
234				         " tcm_loop.ko: %d\n", ret);
235				return ERR_PTR(-EINVAL);
236			}
237		}
238
239		tf = target_core_get_fabric(name);
240	}
241
242	if (!tf) {
243		pr_debug("target_core_get_fabric() failed for %s\n",
244		         name);
245		return ERR_PTR(-EINVAL);
246	}
247	pr_debug("Target_Core_ConfigFS: REGISTER -> Located fabric:"
248			" %s\n", tf->tf_ops->fabric_name);
249	/*
250	 * On a successful target_core_get_fabric() look, the returned
251	 * struct target_fabric_configfs *tf will contain a usage reference.
252	 */
253	pr_debug("Target_Core_ConfigFS: REGISTER tfc_wwn_cit -> %p\n",
254			&tf->tf_wwn_cit);
255
256	config_group_init_type_name(&tf->tf_group, name, &tf->tf_wwn_cit);
257
258	config_group_init_type_name(&tf->tf_disc_group, "discovery_auth",
259			&tf->tf_discovery_cit);
260	configfs_add_default_group(&tf->tf_disc_group, &tf->tf_group);
261
262	pr_debug("Target_Core_ConfigFS: REGISTER -> Allocated Fabric: %s\n",
263		 config_item_name(&tf->tf_group.cg_item));
264	return &tf->tf_group;
265}
266
267/*
268 * Called from struct target_core_group_ops->drop_item()
269 */
270static void target_core_deregister_fabric(
271	struct config_group *group,
272	struct config_item *item)
273{
274	struct target_fabric_configfs *tf = container_of(
275		to_config_group(item), struct target_fabric_configfs, tf_group);
276
277	pr_debug("Target_Core_ConfigFS: DEREGISTER -> Looking up %s in"
278		" tf list\n", config_item_name(item));
279
280	pr_debug("Target_Core_ConfigFS: DEREGISTER -> located fabric:"
281			" %s\n", tf->tf_ops->fabric_name);
282	atomic_dec(&tf->tf_access_cnt);
283
284	pr_debug("Target_Core_ConfigFS: DEREGISTER -> Releasing ci"
285			" %s\n", config_item_name(item));
286
287	configfs_remove_default_groups(&tf->tf_group);
288	config_item_put(item);
289}
290
291static struct configfs_group_operations target_core_fabric_group_ops = {
292	.make_group	= &target_core_register_fabric,
293	.drop_item	= &target_core_deregister_fabric,
294};
295
296/*
297 * All item attributes appearing in /sys/kernel/target/ appear here.
298 */
299static struct configfs_attribute *target_core_fabric_item_attrs[] = {
300	&target_core_item_attr_version,
301	&target_core_item_attr_dbroot,
302	NULL,
303};
304
305/*
306 * Provides Fabrics Groups and Item Attributes for /sys/kernel/config/target/
307 */
308static const struct config_item_type target_core_fabrics_item = {
309	.ct_group_ops	= &target_core_fabric_group_ops,
310	.ct_attrs	= target_core_fabric_item_attrs,
311	.ct_owner	= THIS_MODULE,
312};
313
314static struct configfs_subsystem target_core_fabrics = {
315	.su_group = {
316		.cg_item = {
317			.ci_namebuf = "target",
318			.ci_type = &target_core_fabrics_item,
319		},
320	},
321};
322
323int target_depend_item(struct config_item *item)
324{
325	return configfs_depend_item(&target_core_fabrics, item);
326}
327EXPORT_SYMBOL(target_depend_item);
328
329void target_undepend_item(struct config_item *item)
330{
331	return configfs_undepend_item(item);
332}
333EXPORT_SYMBOL(target_undepend_item);
334
335/*##############################################################################
336// Start functions called by external Target Fabrics Modules
337//############################################################################*/
338static int target_disable_feature(struct se_portal_group *se_tpg)
339{
340	return 0;
341}
342
343static u32 target_default_get_inst_index(struct se_portal_group *se_tpg)
344{
345	return 1;
346}
347
348static u32 target_default_sess_get_index(struct se_session *se_sess)
349{
350	return 0;
351}
352
353static void target_set_default_node_attributes(struct se_node_acl *se_acl)
354{
355}
356
357static int target_default_get_cmd_state(struct se_cmd *se_cmd)
358{
359	return 0;
360}
361
362static int target_fabric_tf_ops_check(const struct target_core_fabric_ops *tfo)
363{
364	if (tfo->fabric_alias) {
365		if (strlen(tfo->fabric_alias) >= TARGET_FABRIC_NAME_SIZE) {
366			pr_err("Passed alias: %s exceeds "
367				"TARGET_FABRIC_NAME_SIZE\n", tfo->fabric_alias);
368			return -EINVAL;
369		}
370	}
371	if (!tfo->fabric_name) {
372		pr_err("Missing tfo->fabric_name\n");
373		return -EINVAL;
374	}
375	if (strlen(tfo->fabric_name) >= TARGET_FABRIC_NAME_SIZE) {
376		pr_err("Passed name: %s exceeds "
377			"TARGET_FABRIC_NAME_SIZE\n", tfo->fabric_name);
378		return -EINVAL;
379	}
380	if (!tfo->tpg_get_wwn) {
381		pr_err("Missing tfo->tpg_get_wwn()\n");
382		return -EINVAL;
383	}
384	if (!tfo->tpg_get_tag) {
385		pr_err("Missing tfo->tpg_get_tag()\n");
386		return -EINVAL;
387	}
388	if (!tfo->release_cmd) {
389		pr_err("Missing tfo->release_cmd()\n");
390		return -EINVAL;
391	}
392	if (!tfo->write_pending) {
393		pr_err("Missing tfo->write_pending()\n");
394		return -EINVAL;
395	}
396	if (!tfo->queue_data_in) {
397		pr_err("Missing tfo->queue_data_in()\n");
398		return -EINVAL;
399	}
400	if (!tfo->queue_status) {
401		pr_err("Missing tfo->queue_status()\n");
402		return -EINVAL;
403	}
404	if (!tfo->queue_tm_rsp) {
405		pr_err("Missing tfo->queue_tm_rsp()\n");
406		return -EINVAL;
407	}
408	if (!tfo->aborted_task) {
409		pr_err("Missing tfo->aborted_task()\n");
410		return -EINVAL;
411	}
412	if (!tfo->check_stop_free) {
413		pr_err("Missing tfo->check_stop_free()\n");
414		return -EINVAL;
415	}
416	/*
417	 * We at least require tfo->fabric_make_wwn(), tfo->fabric_drop_wwn()
418	 * tfo->fabric_make_tpg() and tfo->fabric_drop_tpg() in
419	 * target_core_fabric_configfs.c WWN+TPG group context code.
420	 */
421	if (!tfo->fabric_make_wwn) {
422		pr_err("Missing tfo->fabric_make_wwn()\n");
423		return -EINVAL;
424	}
425	if (!tfo->fabric_drop_wwn) {
426		pr_err("Missing tfo->fabric_drop_wwn()\n");
427		return -EINVAL;
428	}
429	if (!tfo->fabric_make_tpg) {
430		pr_err("Missing tfo->fabric_make_tpg()\n");
431		return -EINVAL;
432	}
433	if (!tfo->fabric_drop_tpg) {
434		pr_err("Missing tfo->fabric_drop_tpg()\n");
435		return -EINVAL;
436	}
437
438	return 0;
439}
440
441static void target_set_default_ops(struct target_core_fabric_ops *tfo)
442{
443	if (!tfo->tpg_check_demo_mode)
444		tfo->tpg_check_demo_mode = target_disable_feature;
445
446	if (!tfo->tpg_check_demo_mode_cache)
447		tfo->tpg_check_demo_mode_cache = target_disable_feature;
448
449	if (!tfo->tpg_check_demo_mode_write_protect)
450		tfo->tpg_check_demo_mode_write_protect = target_disable_feature;
451
452	if (!tfo->tpg_check_prod_mode_write_protect)
453		tfo->tpg_check_prod_mode_write_protect = target_disable_feature;
454
455	if (!tfo->tpg_get_inst_index)
456		tfo->tpg_get_inst_index = target_default_get_inst_index;
457
458	if (!tfo->sess_get_index)
459		tfo->sess_get_index = target_default_sess_get_index;
460
461	if (!tfo->set_default_node_attributes)
462		tfo->set_default_node_attributes = target_set_default_node_attributes;
463
464	if (!tfo->get_cmd_state)
465		tfo->get_cmd_state = target_default_get_cmd_state;
466}
467
468int target_register_template(const struct target_core_fabric_ops *fo)
469{
470	struct target_core_fabric_ops *tfo;
471	struct target_fabric_configfs *tf;
472	int ret;
473
474	ret = target_fabric_tf_ops_check(fo);
475	if (ret)
476		return ret;
477
478	tf = kzalloc(sizeof(struct target_fabric_configfs), GFP_KERNEL);
479	if (!tf) {
480		pr_err("%s: could not allocate memory!\n", __func__);
481		return -ENOMEM;
482	}
483	tfo = kzalloc(sizeof(struct target_core_fabric_ops), GFP_KERNEL);
484	if (!tfo) {
485		kfree(tf);
486		pr_err("%s: could not allocate memory!\n", __func__);
487		return -ENOMEM;
488	}
489	memcpy(tfo, fo, sizeof(*tfo));
490	target_set_default_ops(tfo);
491
492	INIT_LIST_HEAD(&tf->tf_list);
493	atomic_set(&tf->tf_access_cnt, 0);
494	tf->tf_ops = tfo;
495	target_fabric_setup_cits(tf);
496
497	mutex_lock(&g_tf_lock);
498	list_add_tail(&tf->tf_list, &g_tf_list);
499	mutex_unlock(&g_tf_lock);
500
501	return 0;
502}
503EXPORT_SYMBOL(target_register_template);
504
505void target_unregister_template(const struct target_core_fabric_ops *fo)
506{
507	struct target_fabric_configfs *t;
508
509	mutex_lock(&g_tf_lock);
510	list_for_each_entry(t, &g_tf_list, tf_list) {
511		if (!strcmp(t->tf_ops->fabric_name, fo->fabric_name)) {
512			BUG_ON(atomic_read(&t->tf_access_cnt));
513			list_del(&t->tf_list);
514			mutex_unlock(&g_tf_lock);
515			/*
516			 * Wait for any outstanding fabric se_deve_entry->rcu_head
517			 * callbacks to complete post kfree_rcu(), before allowing
518			 * fabric driver unload of TFO->module to proceed.
519			 */
520			rcu_barrier();
521			kfree(t->tf_tpg_base_cit.ct_attrs);
522			kfree(t->tf_ops);
523			kfree(t);
524			return;
525		}
526	}
527	mutex_unlock(&g_tf_lock);
528}
529EXPORT_SYMBOL(target_unregister_template);
530
531/*##############################################################################
532// Stop functions called by external Target Fabrics Modules
533//############################################################################*/
534
535static inline struct se_dev_attrib *to_attrib(struct config_item *item)
536{
537	return container_of(to_config_group(item), struct se_dev_attrib,
538			da_group);
539}
540
541/* Start functions for struct config_item_type tb_dev_attrib_cit */
542#define DEF_CONFIGFS_ATTRIB_SHOW(_name)					\
543static ssize_t _name##_show(struct config_item *item, char *page)	\
544{									\
545	return snprintf(page, PAGE_SIZE, "%u\n", to_attrib(item)->_name); \
546}
547
548DEF_CONFIGFS_ATTRIB_SHOW(emulate_model_alias);
549DEF_CONFIGFS_ATTRIB_SHOW(emulate_dpo);
550DEF_CONFIGFS_ATTRIB_SHOW(emulate_fua_write);
551DEF_CONFIGFS_ATTRIB_SHOW(emulate_fua_read);
552DEF_CONFIGFS_ATTRIB_SHOW(emulate_write_cache);
553DEF_CONFIGFS_ATTRIB_SHOW(emulate_ua_intlck_ctrl);
554DEF_CONFIGFS_ATTRIB_SHOW(emulate_tas);
555DEF_CONFIGFS_ATTRIB_SHOW(emulate_tpu);
556DEF_CONFIGFS_ATTRIB_SHOW(emulate_tpws);
557DEF_CONFIGFS_ATTRIB_SHOW(emulate_caw);
558DEF_CONFIGFS_ATTRIB_SHOW(emulate_3pc);
559DEF_CONFIGFS_ATTRIB_SHOW(emulate_pr);
560DEF_CONFIGFS_ATTRIB_SHOW(pi_prot_type);
561DEF_CONFIGFS_ATTRIB_SHOW(hw_pi_prot_type);
562DEF_CONFIGFS_ATTRIB_SHOW(pi_prot_verify);
563DEF_CONFIGFS_ATTRIB_SHOW(enforce_pr_isids);
564DEF_CONFIGFS_ATTRIB_SHOW(is_nonrot);
565DEF_CONFIGFS_ATTRIB_SHOW(emulate_rest_reord);
566DEF_CONFIGFS_ATTRIB_SHOW(force_pr_aptpl);
567DEF_CONFIGFS_ATTRIB_SHOW(hw_block_size);
568DEF_CONFIGFS_ATTRIB_SHOW(block_size);
569DEF_CONFIGFS_ATTRIB_SHOW(hw_max_sectors);
570DEF_CONFIGFS_ATTRIB_SHOW(optimal_sectors);
571DEF_CONFIGFS_ATTRIB_SHOW(hw_queue_depth);
572DEF_CONFIGFS_ATTRIB_SHOW(queue_depth);
573DEF_CONFIGFS_ATTRIB_SHOW(max_unmap_lba_count);
574DEF_CONFIGFS_ATTRIB_SHOW(max_unmap_block_desc_count);
575DEF_CONFIGFS_ATTRIB_SHOW(unmap_granularity);
576DEF_CONFIGFS_ATTRIB_SHOW(unmap_granularity_alignment);
577DEF_CONFIGFS_ATTRIB_SHOW(unmap_zeroes_data);
578DEF_CONFIGFS_ATTRIB_SHOW(max_write_same_len);
579DEF_CONFIGFS_ATTRIB_SHOW(emulate_rsoc);
580DEF_CONFIGFS_ATTRIB_SHOW(submit_type);
581
582#define DEF_CONFIGFS_ATTRIB_STORE_U32(_name)				\
583static ssize_t _name##_store(struct config_item *item, const char *page,\
584		size_t count)						\
585{									\
586	struct se_dev_attrib *da = to_attrib(item);			\
587	u32 val;							\
588	int ret;							\
589									\
590	ret = kstrtou32(page, 0, &val);					\
591	if (ret < 0)							\
592		return ret;						\
593	da->_name = val;						\
594	return count;							\
595}
596
597DEF_CONFIGFS_ATTRIB_STORE_U32(max_unmap_lba_count);
598DEF_CONFIGFS_ATTRIB_STORE_U32(max_unmap_block_desc_count);
599DEF_CONFIGFS_ATTRIB_STORE_U32(unmap_granularity);
600DEF_CONFIGFS_ATTRIB_STORE_U32(unmap_granularity_alignment);
601DEF_CONFIGFS_ATTRIB_STORE_U32(max_write_same_len);
602
603#define DEF_CONFIGFS_ATTRIB_STORE_BOOL(_name)				\
604static ssize_t _name##_store(struct config_item *item, const char *page,	\
605		size_t count)						\
606{									\
607	struct se_dev_attrib *da = to_attrib(item);			\
608	bool flag;							\
609	int ret;							\
610									\
611	ret = kstrtobool(page, &flag);					\
612	if (ret < 0)							\
613		return ret;						\
614	da->_name = flag;						\
615	return count;							\
616}
617
618DEF_CONFIGFS_ATTRIB_STORE_BOOL(emulate_fua_write);
619DEF_CONFIGFS_ATTRIB_STORE_BOOL(emulate_caw);
620DEF_CONFIGFS_ATTRIB_STORE_BOOL(emulate_3pc);
621DEF_CONFIGFS_ATTRIB_STORE_BOOL(emulate_pr);
622DEF_CONFIGFS_ATTRIB_STORE_BOOL(enforce_pr_isids);
623DEF_CONFIGFS_ATTRIB_STORE_BOOL(is_nonrot);
624
625#define DEF_CONFIGFS_ATTRIB_STORE_STUB(_name)				\
626static ssize_t _name##_store(struct config_item *item, const char *page,\
627		size_t count)						\
628{									\
629	printk_once(KERN_WARNING					\
630		"ignoring deprecated %s attribute\n",			\
631		__stringify(_name));					\
632	return count;							\
633}
634
635DEF_CONFIGFS_ATTRIB_STORE_STUB(emulate_dpo);
636DEF_CONFIGFS_ATTRIB_STORE_STUB(emulate_fua_read);
637
638static void dev_set_t10_wwn_model_alias(struct se_device *dev)
639{
640	const char *configname;
641
642	configname = config_item_name(&dev->dev_group.cg_item);
643	if (strlen(configname) >= INQUIRY_MODEL_LEN) {
644		pr_warn("dev[%p]: Backstore name '%s' is too long for "
645			"INQUIRY_MODEL, truncating to 15 characters\n", dev,
646			configname);
647	}
648	/*
649	 * XXX We can't use sizeof(dev->t10_wwn.model) (INQUIRY_MODEL_LEN + 1)
650	 * here without potentially breaking existing setups, so continue to
651	 * truncate one byte shorter than what can be carried in INQUIRY.
652	 */
653	strscpy(dev->t10_wwn.model, configname, INQUIRY_MODEL_LEN);
654}
655
656static ssize_t emulate_model_alias_store(struct config_item *item,
657		const char *page, size_t count)
658{
659	struct se_dev_attrib *da = to_attrib(item);
660	struct se_device *dev = da->da_dev;
661	bool flag;
662	int ret;
663
664	if (dev->export_count) {
665		pr_err("dev[%p]: Unable to change model alias"
666			" while export_count is %d\n",
667			dev, dev->export_count);
668		return -EINVAL;
669	}
670
671	ret = kstrtobool(page, &flag);
672	if (ret < 0)
673		return ret;
674
675	BUILD_BUG_ON(sizeof(dev->t10_wwn.model) != INQUIRY_MODEL_LEN + 1);
676	if (flag) {
677		dev_set_t10_wwn_model_alias(dev);
678	} else {
679		strscpy(dev->t10_wwn.model, dev->transport->inquiry_prod,
680			sizeof(dev->t10_wwn.model));
681	}
682	da->emulate_model_alias = flag;
683	return count;
684}
685
686static ssize_t emulate_write_cache_store(struct config_item *item,
687		const char *page, size_t count)
688{
689	struct se_dev_attrib *da = to_attrib(item);
690	bool flag;
691	int ret;
692
693	ret = kstrtobool(page, &flag);
694	if (ret < 0)
695		return ret;
696
697	if (flag && da->da_dev->transport->get_write_cache) {
698		pr_err("emulate_write_cache not supported for this device\n");
699		return -EINVAL;
700	}
701
702	da->emulate_write_cache = flag;
703	pr_debug("dev[%p]: SE Device WRITE_CACHE_EMULATION flag: %d\n",
704			da->da_dev, flag);
705	return count;
706}
707
708static ssize_t emulate_ua_intlck_ctrl_store(struct config_item *item,
709		const char *page, size_t count)
710{
711	struct se_dev_attrib *da = to_attrib(item);
712	u32 val;
713	int ret;
714
715	ret = kstrtou32(page, 0, &val);
716	if (ret < 0)
717		return ret;
718
719	if (val != TARGET_UA_INTLCK_CTRL_CLEAR
720	 && val != TARGET_UA_INTLCK_CTRL_NO_CLEAR
721	 && val != TARGET_UA_INTLCK_CTRL_ESTABLISH_UA) {
722		pr_err("Illegal value %d\n", val);
723		return -EINVAL;
724	}
725
726	if (da->da_dev->export_count) {
727		pr_err("dev[%p]: Unable to change SE Device"
728			" UA_INTRLCK_CTRL while export_count is %d\n",
729			da->da_dev, da->da_dev->export_count);
730		return -EINVAL;
731	}
732	da->emulate_ua_intlck_ctrl = val;
733	pr_debug("dev[%p]: SE Device UA_INTRLCK_CTRL flag: %d\n",
734		da->da_dev, val);
735	return count;
736}
737
738static ssize_t emulate_tas_store(struct config_item *item,
739		const char *page, size_t count)
740{
741	struct se_dev_attrib *da = to_attrib(item);
742	bool flag;
743	int ret;
744
745	ret = kstrtobool(page, &flag);
746	if (ret < 0)
747		return ret;
748
749	if (da->da_dev->export_count) {
750		pr_err("dev[%p]: Unable to change SE Device TAS while"
751			" export_count is %d\n",
752			da->da_dev, da->da_dev->export_count);
753		return -EINVAL;
754	}
755	da->emulate_tas = flag;
756	pr_debug("dev[%p]: SE Device TASK_ABORTED status bit: %s\n",
757		da->da_dev, flag ? "Enabled" : "Disabled");
758
759	return count;
760}
761
762static int target_try_configure_unmap(struct se_device *dev,
763				      const char *config_opt)
764{
765	if (!dev->transport->configure_unmap) {
766		pr_err("Generic Block Discard not supported\n");
767		return -ENOSYS;
768	}
769
770	if (!target_dev_configured(dev)) {
771		pr_err("Generic Block Discard setup for %s requires device to be configured\n",
772		       config_opt);
773		return -ENODEV;
774	}
775
776	if (!dev->transport->configure_unmap(dev)) {
777		pr_err("Generic Block Discard setup for %s failed\n",
778		       config_opt);
779		return -ENOSYS;
780	}
781
782	return 0;
783}
784
785static ssize_t emulate_tpu_store(struct config_item *item,
786		const char *page, size_t count)
787{
788	struct se_dev_attrib *da = to_attrib(item);
789	struct se_device *dev = da->da_dev;
790	bool flag;
791	int ret;
792
793	ret = kstrtobool(page, &flag);
794	if (ret < 0)
795		return ret;
796
797	/*
798	 * We expect this value to be non-zero when generic Block Layer
799	 * Discard supported is detected iblock_create_virtdevice().
800	 */
801	if (flag && !da->max_unmap_block_desc_count) {
802		ret = target_try_configure_unmap(dev, "emulate_tpu");
803		if (ret)
804			return ret;
805	}
806
807	da->emulate_tpu = flag;
808	pr_debug("dev[%p]: SE Device Thin Provisioning UNMAP bit: %d\n",
809		da->da_dev, flag);
810	return count;
811}
812
813static ssize_t emulate_tpws_store(struct config_item *item,
814		const char *page, size_t count)
815{
816	struct se_dev_attrib *da = to_attrib(item);
817	struct se_device *dev = da->da_dev;
818	bool flag;
819	int ret;
820
821	ret = kstrtobool(page, &flag);
822	if (ret < 0)
823		return ret;
824
825	/*
826	 * We expect this value to be non-zero when generic Block Layer
827	 * Discard supported is detected iblock_create_virtdevice().
828	 */
829	if (flag && !da->max_unmap_block_desc_count) {
830		ret = target_try_configure_unmap(dev, "emulate_tpws");
831		if (ret)
832			return ret;
833	}
834
835	da->emulate_tpws = flag;
836	pr_debug("dev[%p]: SE Device Thin Provisioning WRITE_SAME: %d\n",
837				da->da_dev, flag);
838	return count;
839}
840
841static ssize_t pi_prot_type_store(struct config_item *item,
842		const char *page, size_t count)
843{
844	struct se_dev_attrib *da = to_attrib(item);
845	int old_prot = da->pi_prot_type, ret;
846	struct se_device *dev = da->da_dev;
847	u32 flag;
848
849	ret = kstrtou32(page, 0, &flag);
850	if (ret < 0)
851		return ret;
852
853	if (flag != 0 && flag != 1 && flag != 2 && flag != 3) {
854		pr_err("Illegal value %d for pi_prot_type\n", flag);
855		return -EINVAL;
856	}
857	if (flag == 2) {
858		pr_err("DIF TYPE2 protection currently not supported\n");
859		return -ENOSYS;
860	}
861	if (da->hw_pi_prot_type) {
862		pr_warn("DIF protection enabled on underlying hardware,"
863			" ignoring\n");
864		return count;
865	}
866	if (!dev->transport->init_prot || !dev->transport->free_prot) {
867		/* 0 is only allowed value for non-supporting backends */
868		if (flag == 0)
869			return count;
870
871		pr_err("DIF protection not supported by backend: %s\n",
872		       dev->transport->name);
873		return -ENOSYS;
874	}
875	if (!target_dev_configured(dev)) {
876		pr_err("DIF protection requires device to be configured\n");
877		return -ENODEV;
878	}
879	if (dev->export_count) {
880		pr_err("dev[%p]: Unable to change SE Device PROT type while"
881		       " export_count is %d\n", dev, dev->export_count);
882		return -EINVAL;
883	}
884
885	da->pi_prot_type = flag;
886
887	if (flag && !old_prot) {
888		ret = dev->transport->init_prot(dev);
889		if (ret) {
890			da->pi_prot_type = old_prot;
891			da->pi_prot_verify = (bool) da->pi_prot_type;
892			return ret;
893		}
894
895	} else if (!flag && old_prot) {
896		dev->transport->free_prot(dev);
897	}
898
899	da->pi_prot_verify = (bool) da->pi_prot_type;
900	pr_debug("dev[%p]: SE Device Protection Type: %d\n", dev, flag);
901	return count;
902}
903
904/* always zero, but attr needs to remain RW to avoid userspace breakage */
905static ssize_t pi_prot_format_show(struct config_item *item, char *page)
906{
907	return snprintf(page, PAGE_SIZE, "0\n");
908}
909
910static ssize_t pi_prot_format_store(struct config_item *item,
911		const char *page, size_t count)
912{
913	struct se_dev_attrib *da = to_attrib(item);
914	struct se_device *dev = da->da_dev;
915	bool flag;
916	int ret;
917
918	ret = kstrtobool(page, &flag);
919	if (ret < 0)
920		return ret;
921
922	if (!flag)
923		return count;
924
925	if (!dev->transport->format_prot) {
926		pr_err("DIF protection format not supported by backend %s\n",
927		       dev->transport->name);
928		return -ENOSYS;
929	}
930	if (!target_dev_configured(dev)) {
931		pr_err("DIF protection format requires device to be configured\n");
932		return -ENODEV;
933	}
934	if (dev->export_count) {
935		pr_err("dev[%p]: Unable to format SE Device PROT type while"
936		       " export_count is %d\n", dev, dev->export_count);
937		return -EINVAL;
938	}
939
940	ret = dev->transport->format_prot(dev);
941	if (ret)
942		return ret;
943
944	pr_debug("dev[%p]: SE Device Protection Format complete\n", dev);
945	return count;
946}
947
948static ssize_t pi_prot_verify_store(struct config_item *item,
949		const char *page, size_t count)
950{
951	struct se_dev_attrib *da = to_attrib(item);
952	bool flag;
953	int ret;
954
955	ret = kstrtobool(page, &flag);
956	if (ret < 0)
957		return ret;
958
959	if (!flag) {
960		da->pi_prot_verify = flag;
961		return count;
962	}
963	if (da->hw_pi_prot_type) {
964		pr_warn("DIF protection enabled on underlying hardware,"
965			" ignoring\n");
966		return count;
967	}
968	if (!da->pi_prot_type) {
969		pr_warn("DIF protection not supported by backend, ignoring\n");
970		return count;
971	}
972	da->pi_prot_verify = flag;
973
974	return count;
975}
976
977static ssize_t force_pr_aptpl_store(struct config_item *item,
978		const char *page, size_t count)
979{
980	struct se_dev_attrib *da = to_attrib(item);
981	bool flag;
982	int ret;
983
984	ret = kstrtobool(page, &flag);
985	if (ret < 0)
986		return ret;
987	if (da->da_dev->export_count) {
988		pr_err("dev[%p]: Unable to set force_pr_aptpl while"
989		       " export_count is %d\n",
990		       da->da_dev, da->da_dev->export_count);
991		return -EINVAL;
992	}
993
994	da->force_pr_aptpl = flag;
995	pr_debug("dev[%p]: SE Device force_pr_aptpl: %d\n", da->da_dev, flag);
996	return count;
997}
998
999static ssize_t emulate_rest_reord_store(struct config_item *item,
1000		const char *page, size_t count)
1001{
1002	struct se_dev_attrib *da = to_attrib(item);
1003	bool flag;
1004	int ret;
1005
1006	ret = kstrtobool(page, &flag);
1007	if (ret < 0)
1008		return ret;
1009
1010	if (flag != 0) {
1011		printk(KERN_ERR "dev[%p]: SE Device emulation of restricted"
1012			" reordering not implemented\n", da->da_dev);
1013		return -ENOSYS;
1014	}
1015	da->emulate_rest_reord = flag;
1016	pr_debug("dev[%p]: SE Device emulate_rest_reord: %d\n",
1017		da->da_dev, flag);
1018	return count;
1019}
1020
1021static ssize_t unmap_zeroes_data_store(struct config_item *item,
1022		const char *page, size_t count)
1023{
1024	struct se_dev_attrib *da = to_attrib(item);
1025	struct se_device *dev = da->da_dev;
1026	bool flag;
1027	int ret;
1028
1029	ret = kstrtobool(page, &flag);
1030	if (ret < 0)
1031		return ret;
1032
1033	if (da->da_dev->export_count) {
1034		pr_err("dev[%p]: Unable to change SE Device"
1035		       " unmap_zeroes_data while export_count is %d\n",
1036		       da->da_dev, da->da_dev->export_count);
1037		return -EINVAL;
1038	}
1039	/*
1040	 * We expect this value to be non-zero when generic Block Layer
1041	 * Discard supported is detected iblock_configure_device().
1042	 */
1043	if (flag && !da->max_unmap_block_desc_count) {
1044		ret = target_try_configure_unmap(dev, "unmap_zeroes_data");
1045		if (ret)
1046			return ret;
1047	}
1048	da->unmap_zeroes_data = flag;
1049	pr_debug("dev[%p]: SE Device Thin Provisioning LBPRZ bit: %d\n",
1050		 da->da_dev, flag);
1051	return count;
1052}
1053
1054/*
1055 * Note, this can only be called on unexported SE Device Object.
1056 */
1057static ssize_t queue_depth_store(struct config_item *item,
1058		const char *page, size_t count)
1059{
1060	struct se_dev_attrib *da = to_attrib(item);
1061	struct se_device *dev = da->da_dev;
1062	u32 val;
1063	int ret;
1064
1065	ret = kstrtou32(page, 0, &val);
1066	if (ret < 0)
1067		return ret;
1068
1069	if (dev->export_count) {
1070		pr_err("dev[%p]: Unable to change SE Device TCQ while"
1071			" export_count is %d\n",
1072			dev, dev->export_count);
1073		return -EINVAL;
1074	}
1075	if (!val) {
1076		pr_err("dev[%p]: Illegal ZERO value for queue_depth\n", dev);
1077		return -EINVAL;
1078	}
1079
1080	if (val > dev->dev_attrib.queue_depth) {
1081		if (val > dev->dev_attrib.hw_queue_depth) {
1082			pr_err("dev[%p]: Passed queue_depth:"
1083				" %u exceeds TCM/SE_Device MAX"
1084				" TCQ: %u\n", dev, val,
1085				dev->dev_attrib.hw_queue_depth);
1086			return -EINVAL;
1087		}
1088	}
1089	da->queue_depth = dev->queue_depth = val;
1090	pr_debug("dev[%p]: SE Device TCQ Depth changed to: %u\n", dev, val);
1091	return count;
1092}
1093
1094static ssize_t optimal_sectors_store(struct config_item *item,
1095		const char *page, size_t count)
1096{
1097	struct se_dev_attrib *da = to_attrib(item);
1098	u32 val;
1099	int ret;
1100
1101	ret = kstrtou32(page, 0, &val);
1102	if (ret < 0)
1103		return ret;
1104
1105	if (da->da_dev->export_count) {
1106		pr_err("dev[%p]: Unable to change SE Device"
1107			" optimal_sectors while export_count is %d\n",
1108			da->da_dev, da->da_dev->export_count);
1109		return -EINVAL;
1110	}
1111	if (val > da->hw_max_sectors) {
1112		pr_err("dev[%p]: Passed optimal_sectors %u cannot be"
1113			" greater than hw_max_sectors: %u\n",
1114			da->da_dev, val, da->hw_max_sectors);
1115		return -EINVAL;
1116	}
1117
1118	da->optimal_sectors = val;
1119	pr_debug("dev[%p]: SE Device optimal_sectors changed to %u\n",
1120			da->da_dev, val);
1121	return count;
1122}
1123
1124static ssize_t block_size_store(struct config_item *item,
1125		const char *page, size_t count)
1126{
1127	struct se_dev_attrib *da = to_attrib(item);
1128	u32 val;
1129	int ret;
1130
1131	ret = kstrtou32(page, 0, &val);
1132	if (ret < 0)
1133		return ret;
1134
1135	if (da->da_dev->export_count) {
1136		pr_err("dev[%p]: Unable to change SE Device block_size"
1137			" while export_count is %d\n",
1138			da->da_dev, da->da_dev->export_count);
1139		return -EINVAL;
1140	}
1141
1142	if (val != 512 && val != 1024 && val != 2048 && val != 4096) {
1143		pr_err("dev[%p]: Illegal value for block_device: %u"
1144			" for SE device, must be 512, 1024, 2048 or 4096\n",
1145			da->da_dev, val);
1146		return -EINVAL;
1147	}
1148
1149	da->block_size = val;
1150
1151	pr_debug("dev[%p]: SE Device block_size changed to %u\n",
1152			da->da_dev, val);
1153	return count;
1154}
1155
1156static ssize_t alua_support_show(struct config_item *item, char *page)
1157{
1158	struct se_dev_attrib *da = to_attrib(item);
1159	u8 flags = da->da_dev->transport_flags;
1160
1161	return snprintf(page, PAGE_SIZE, "%d\n",
1162			flags & TRANSPORT_FLAG_PASSTHROUGH_ALUA ? 0 : 1);
1163}
1164
1165static ssize_t alua_support_store(struct config_item *item,
1166		const char *page, size_t count)
1167{
1168	struct se_dev_attrib *da = to_attrib(item);
1169	struct se_device *dev = da->da_dev;
1170	bool flag, oldflag;
1171	int ret;
1172
1173	ret = kstrtobool(page, &flag);
1174	if (ret < 0)
1175		return ret;
1176
1177	oldflag = !(dev->transport_flags & TRANSPORT_FLAG_PASSTHROUGH_ALUA);
1178	if (flag == oldflag)
1179		return count;
1180
1181	if (!(dev->transport->transport_flags_changeable &
1182	      TRANSPORT_FLAG_PASSTHROUGH_ALUA)) {
1183		pr_err("dev[%p]: Unable to change SE Device alua_support:"
1184			" alua_support has fixed value\n", dev);
1185		return -ENOSYS;
1186	}
1187
1188	if (flag)
1189		dev->transport_flags &= ~TRANSPORT_FLAG_PASSTHROUGH_ALUA;
1190	else
1191		dev->transport_flags |= TRANSPORT_FLAG_PASSTHROUGH_ALUA;
1192	return count;
1193}
1194
1195static ssize_t pgr_support_show(struct config_item *item, char *page)
1196{
1197	struct se_dev_attrib *da = to_attrib(item);
1198	u8 flags = da->da_dev->transport_flags;
1199
1200	return snprintf(page, PAGE_SIZE, "%d\n",
1201			flags & TRANSPORT_FLAG_PASSTHROUGH_PGR ? 0 : 1);
1202}
1203
1204static ssize_t pgr_support_store(struct config_item *item,
1205		const char *page, size_t count)
1206{
1207	struct se_dev_attrib *da = to_attrib(item);
1208	struct se_device *dev = da->da_dev;
1209	bool flag, oldflag;
1210	int ret;
1211
1212	ret = kstrtobool(page, &flag);
1213	if (ret < 0)
1214		return ret;
1215
1216	oldflag = !(dev->transport_flags & TRANSPORT_FLAG_PASSTHROUGH_PGR);
1217	if (flag == oldflag)
1218		return count;
1219
1220	if (!(dev->transport->transport_flags_changeable &
1221	      TRANSPORT_FLAG_PASSTHROUGH_PGR)) {
1222		pr_err("dev[%p]: Unable to change SE Device pgr_support:"
1223			" pgr_support has fixed value\n", dev);
1224		return -ENOSYS;
1225	}
1226
1227	if (flag)
1228		dev->transport_flags &= ~TRANSPORT_FLAG_PASSTHROUGH_PGR;
1229	else
1230		dev->transport_flags |= TRANSPORT_FLAG_PASSTHROUGH_PGR;
1231	return count;
1232}
1233
1234static ssize_t emulate_rsoc_store(struct config_item *item,
1235		const char *page, size_t count)
1236{
1237	struct se_dev_attrib *da = to_attrib(item);
1238	bool flag;
1239	int ret;
1240
1241	ret = kstrtobool(page, &flag);
1242	if (ret < 0)
1243		return ret;
1244
1245	da->emulate_rsoc = flag;
1246	pr_debug("dev[%p]: SE Device REPORT_SUPPORTED_OPERATION_CODES_EMULATION flag: %d\n",
1247			da->da_dev, flag);
1248	return count;
1249}
1250
1251static ssize_t submit_type_store(struct config_item *item, const char *page,
1252				 size_t count)
1253{
1254	struct se_dev_attrib *da = to_attrib(item);
1255	int ret;
1256	u8 val;
1257
1258	ret = kstrtou8(page, 0, &val);
1259	if (ret < 0)
1260		return ret;
1261
1262	if (val > TARGET_QUEUE_SUBMIT)
1263		return -EINVAL;
1264
1265	da->submit_type = val;
1266	return count;
1267}
1268
1269CONFIGFS_ATTR(, emulate_model_alias);
1270CONFIGFS_ATTR(, emulate_dpo);
1271CONFIGFS_ATTR(, emulate_fua_write);
1272CONFIGFS_ATTR(, emulate_fua_read);
1273CONFIGFS_ATTR(, emulate_write_cache);
1274CONFIGFS_ATTR(, emulate_ua_intlck_ctrl);
1275CONFIGFS_ATTR(, emulate_tas);
1276CONFIGFS_ATTR(, emulate_tpu);
1277CONFIGFS_ATTR(, emulate_tpws);
1278CONFIGFS_ATTR(, emulate_caw);
1279CONFIGFS_ATTR(, emulate_3pc);
1280CONFIGFS_ATTR(, emulate_pr);
1281CONFIGFS_ATTR(, emulate_rsoc);
1282CONFIGFS_ATTR(, pi_prot_type);
1283CONFIGFS_ATTR_RO(, hw_pi_prot_type);
1284CONFIGFS_ATTR(, pi_prot_format);
1285CONFIGFS_ATTR(, pi_prot_verify);
1286CONFIGFS_ATTR(, enforce_pr_isids);
1287CONFIGFS_ATTR(, is_nonrot);
1288CONFIGFS_ATTR(, emulate_rest_reord);
1289CONFIGFS_ATTR(, force_pr_aptpl);
1290CONFIGFS_ATTR_RO(, hw_block_size);
1291CONFIGFS_ATTR(, block_size);
1292CONFIGFS_ATTR_RO(, hw_max_sectors);
1293CONFIGFS_ATTR(, optimal_sectors);
1294CONFIGFS_ATTR_RO(, hw_queue_depth);
1295CONFIGFS_ATTR(, queue_depth);
1296CONFIGFS_ATTR(, max_unmap_lba_count);
1297CONFIGFS_ATTR(, max_unmap_block_desc_count);
1298CONFIGFS_ATTR(, unmap_granularity);
1299CONFIGFS_ATTR(, unmap_granularity_alignment);
1300CONFIGFS_ATTR(, unmap_zeroes_data);
1301CONFIGFS_ATTR(, max_write_same_len);
1302CONFIGFS_ATTR(, alua_support);
1303CONFIGFS_ATTR(, pgr_support);
1304CONFIGFS_ATTR(, submit_type);
1305
1306/*
1307 * dev_attrib attributes for devices using the target core SBC/SPC
1308 * interpreter.  Any backend using spc_parse_cdb should be using
1309 * these.
1310 */
1311struct configfs_attribute *sbc_attrib_attrs[] = {
1312	&attr_emulate_model_alias,
1313	&attr_emulate_dpo,
1314	&attr_emulate_fua_write,
1315	&attr_emulate_fua_read,
1316	&attr_emulate_write_cache,
1317	&attr_emulate_ua_intlck_ctrl,
1318	&attr_emulate_tas,
1319	&attr_emulate_tpu,
1320	&attr_emulate_tpws,
1321	&attr_emulate_caw,
1322	&attr_emulate_3pc,
1323	&attr_emulate_pr,
1324	&attr_pi_prot_type,
1325	&attr_hw_pi_prot_type,
1326	&attr_pi_prot_format,
1327	&attr_pi_prot_verify,
1328	&attr_enforce_pr_isids,
1329	&attr_is_nonrot,
1330	&attr_emulate_rest_reord,
1331	&attr_force_pr_aptpl,
1332	&attr_hw_block_size,
1333	&attr_block_size,
1334	&attr_hw_max_sectors,
1335	&attr_optimal_sectors,
1336	&attr_hw_queue_depth,
1337	&attr_queue_depth,
1338	&attr_max_unmap_lba_count,
1339	&attr_max_unmap_block_desc_count,
1340	&attr_unmap_granularity,
1341	&attr_unmap_granularity_alignment,
1342	&attr_unmap_zeroes_data,
1343	&attr_max_write_same_len,
1344	&attr_alua_support,
1345	&attr_pgr_support,
1346	&attr_emulate_rsoc,
1347	&attr_submit_type,
1348	NULL,
1349};
1350EXPORT_SYMBOL(sbc_attrib_attrs);
1351
1352/*
1353 * Minimal dev_attrib attributes for devices passing through CDBs.
1354 * In this case we only provide a few read-only attributes for
1355 * backwards compatibility.
1356 */
1357struct configfs_attribute *passthrough_attrib_attrs[] = {
1358	&attr_hw_pi_prot_type,
1359	&attr_hw_block_size,
1360	&attr_hw_max_sectors,
1361	&attr_hw_queue_depth,
1362	&attr_emulate_pr,
1363	&attr_alua_support,
1364	&attr_pgr_support,
1365	&attr_submit_type,
1366	NULL,
1367};
1368EXPORT_SYMBOL(passthrough_attrib_attrs);
1369
1370/*
1371 * pr related dev_attrib attributes for devices passing through CDBs,
1372 * but allowing in core pr emulation.
1373 */
1374struct configfs_attribute *passthrough_pr_attrib_attrs[] = {
1375	&attr_enforce_pr_isids,
1376	&attr_force_pr_aptpl,
1377	NULL,
1378};
1379EXPORT_SYMBOL(passthrough_pr_attrib_attrs);
1380
1381TB_CIT_SETUP_DRV(dev_attrib, NULL, NULL);
1382TB_CIT_SETUP_DRV(dev_action, NULL, NULL);
1383
1384/* End functions for struct config_item_type tb_dev_attrib_cit */
1385
1386/*  Start functions for struct config_item_type tb_dev_wwn_cit */
1387
1388static struct t10_wwn *to_t10_wwn(struct config_item *item)
1389{
1390	return container_of(to_config_group(item), struct t10_wwn, t10_wwn_group);
1391}
1392
1393static ssize_t target_check_inquiry_data(char *buf)
1394{
1395	size_t len;
1396	int i;
1397
1398	len = strlen(buf);
1399
1400	/*
1401	 * SPC 4.3.1:
1402	 * ASCII data fields shall contain only ASCII printable characters
1403	 * (i.e., code values 20h to 7Eh) and may be terminated with one or
1404	 * more ASCII null (00h) characters.
1405	 */
1406	for (i = 0; i < len; i++) {
1407		if (buf[i] < 0x20 || buf[i] > 0x7E) {
1408			pr_err("Emulated T10 Inquiry Data contains non-ASCII-printable characters\n");
1409			return -EINVAL;
1410		}
1411	}
1412
1413	return len;
1414}
1415
1416/*
1417 * STANDARD and VPD page 0x83 T10 Vendor Identification
1418 */
1419static ssize_t target_wwn_vendor_id_show(struct config_item *item,
1420		char *page)
1421{
1422	return sprintf(page, "%s\n", &to_t10_wwn(item)->vendor[0]);
1423}
1424
1425static ssize_t target_wwn_vendor_id_store(struct config_item *item,
1426		const char *page, size_t count)
1427{
1428	struct t10_wwn *t10_wwn = to_t10_wwn(item);
1429	struct se_device *dev = t10_wwn->t10_dev;
1430	/* +2 to allow for a trailing (stripped) '\n' and null-terminator */
1431	unsigned char buf[INQUIRY_VENDOR_LEN + 2];
1432	char *stripped = NULL;
1433	ssize_t len;
1434	ssize_t ret;
1435
1436	len = strscpy(buf, page, sizeof(buf));
1437	if (len > 0) {
1438		/* Strip any newline added from userspace. */
1439		stripped = strstrip(buf);
1440		len = strlen(stripped);
1441	}
1442	if (len < 0 || len > INQUIRY_VENDOR_LEN) {
1443		pr_err("Emulated T10 Vendor Identification exceeds"
1444			" INQUIRY_VENDOR_LEN: " __stringify(INQUIRY_VENDOR_LEN)
1445			"\n");
1446		return -EOVERFLOW;
1447	}
1448
1449	ret = target_check_inquiry_data(stripped);
1450
1451	if (ret < 0)
1452		return ret;
1453
1454	/*
1455	 * Check to see if any active exports exist.  If they do exist, fail
1456	 * here as changing this information on the fly (underneath the
1457	 * initiator side OS dependent multipath code) could cause negative
1458	 * effects.
1459	 */
1460	if (dev->export_count) {
1461		pr_err("Unable to set T10 Vendor Identification while"
1462			" active %d exports exist\n", dev->export_count);
1463		return -EINVAL;
1464	}
1465
1466	BUILD_BUG_ON(sizeof(dev->t10_wwn.vendor) != INQUIRY_VENDOR_LEN + 1);
1467	strscpy(dev->t10_wwn.vendor, stripped, sizeof(dev->t10_wwn.vendor));
1468
1469	pr_debug("Target_Core_ConfigFS: Set emulated T10 Vendor Identification:"
1470		 " %s\n", dev->t10_wwn.vendor);
1471
1472	return count;
1473}
1474
1475static ssize_t target_wwn_product_id_show(struct config_item *item,
1476		char *page)
1477{
1478	return sprintf(page, "%s\n", &to_t10_wwn(item)->model[0]);
1479}
1480
1481static ssize_t target_wwn_product_id_store(struct config_item *item,
1482		const char *page, size_t count)
1483{
1484	struct t10_wwn *t10_wwn = to_t10_wwn(item);
1485	struct se_device *dev = t10_wwn->t10_dev;
1486	/* +2 to allow for a trailing (stripped) '\n' and null-terminator */
1487	unsigned char buf[INQUIRY_MODEL_LEN + 2];
1488	char *stripped = NULL;
1489	ssize_t len;
1490	ssize_t ret;
1491
1492	len = strscpy(buf, page, sizeof(buf));
1493	if (len > 0) {
1494		/* Strip any newline added from userspace. */
1495		stripped = strstrip(buf);
1496		len = strlen(stripped);
1497	}
1498	if (len < 0 || len > INQUIRY_MODEL_LEN) {
1499		pr_err("Emulated T10 Vendor exceeds INQUIRY_MODEL_LEN: "
1500			 __stringify(INQUIRY_MODEL_LEN)
1501			"\n");
1502		return -EOVERFLOW;
1503	}
1504
1505	ret = target_check_inquiry_data(stripped);
1506
1507	if (ret < 0)
1508		return ret;
1509
1510	/*
1511	 * Check to see if any active exports exist.  If they do exist, fail
1512	 * here as changing this information on the fly (underneath the
1513	 * initiator side OS dependent multipath code) could cause negative
1514	 * effects.
1515	 */
1516	if (dev->export_count) {
1517		pr_err("Unable to set T10 Model while active %d exports exist\n",
1518			dev->export_count);
1519		return -EINVAL;
1520	}
1521
1522	BUILD_BUG_ON(sizeof(dev->t10_wwn.model) != INQUIRY_MODEL_LEN + 1);
1523	strscpy(dev->t10_wwn.model, stripped, sizeof(dev->t10_wwn.model));
1524
1525	pr_debug("Target_Core_ConfigFS: Set emulated T10 Model Identification: %s\n",
1526		 dev->t10_wwn.model);
1527
1528	return count;
1529}
1530
1531static ssize_t target_wwn_revision_show(struct config_item *item,
1532		char *page)
1533{
1534	return sprintf(page, "%s\n", &to_t10_wwn(item)->revision[0]);
1535}
1536
1537static ssize_t target_wwn_revision_store(struct config_item *item,
1538		const char *page, size_t count)
1539{
1540	struct t10_wwn *t10_wwn = to_t10_wwn(item);
1541	struct se_device *dev = t10_wwn->t10_dev;
1542	/* +2 to allow for a trailing (stripped) '\n' and null-terminator */
1543	unsigned char buf[INQUIRY_REVISION_LEN + 2];
1544	char *stripped = NULL;
1545	ssize_t len;
1546	ssize_t ret;
1547
1548	len = strscpy(buf, page, sizeof(buf));
1549	if (len > 0) {
1550		/* Strip any newline added from userspace. */
1551		stripped = strstrip(buf);
1552		len = strlen(stripped);
1553	}
1554	if (len < 0 || len > INQUIRY_REVISION_LEN) {
1555		pr_err("Emulated T10 Revision exceeds INQUIRY_REVISION_LEN: "
1556			 __stringify(INQUIRY_REVISION_LEN)
1557			"\n");
1558		return -EOVERFLOW;
1559	}
1560
1561	ret = target_check_inquiry_data(stripped);
1562
1563	if (ret < 0)
1564		return ret;
1565
1566	/*
1567	 * Check to see if any active exports exist.  If they do exist, fail
1568	 * here as changing this information on the fly (underneath the
1569	 * initiator side OS dependent multipath code) could cause negative
1570	 * effects.
1571	 */
1572	if (dev->export_count) {
1573		pr_err("Unable to set T10 Revision while active %d exports exist\n",
1574			dev->export_count);
1575		return -EINVAL;
1576	}
1577
1578	BUILD_BUG_ON(sizeof(dev->t10_wwn.revision) != INQUIRY_REVISION_LEN + 1);
1579	strscpy(dev->t10_wwn.revision, stripped, sizeof(dev->t10_wwn.revision));
1580
1581	pr_debug("Target_Core_ConfigFS: Set emulated T10 Revision: %s\n",
1582		 dev->t10_wwn.revision);
1583
1584	return count;
1585}
1586
1587static ssize_t
1588target_wwn_company_id_show(struct config_item *item,
1589				char *page)
1590{
1591	return snprintf(page, PAGE_SIZE, "%#08x\n",
1592			to_t10_wwn(item)->company_id);
1593}
1594
1595static ssize_t
1596target_wwn_company_id_store(struct config_item *item,
1597				 const char *page, size_t count)
1598{
1599	struct t10_wwn *t10_wwn = to_t10_wwn(item);
1600	struct se_device *dev = t10_wwn->t10_dev;
1601	u32 val;
1602	int ret;
1603
1604	/*
1605	 * The IEEE COMPANY_ID field should contain a 24-bit canonical
1606	 * form OUI assigned by the IEEE.
1607	 */
1608	ret = kstrtou32(page, 0, &val);
1609	if (ret < 0)
1610		return ret;
1611
1612	if (val >= 0x1000000)
1613		return -EOVERFLOW;
1614
1615	/*
1616	 * Check to see if any active exports exist. If they do exist, fail
1617	 * here as changing this information on the fly (underneath the
1618	 * initiator side OS dependent multipath code) could cause negative
1619	 * effects.
1620	 */
1621	if (dev->export_count) {
1622		pr_err("Unable to set Company ID while %u exports exist\n",
1623		       dev->export_count);
1624		return -EINVAL;
1625	}
1626
1627	t10_wwn->company_id = val;
1628
1629	pr_debug("Target_Core_ConfigFS: Set IEEE Company ID: %#08x\n",
1630		 t10_wwn->company_id);
1631
1632	return count;
1633}
1634
1635/*
1636 * VPD page 0x80 Unit serial
1637 */
1638static ssize_t target_wwn_vpd_unit_serial_show(struct config_item *item,
1639		char *page)
1640{
1641	return sprintf(page, "T10 VPD Unit Serial Number: %s\n",
1642		&to_t10_wwn(item)->unit_serial[0]);
1643}
1644
1645static ssize_t target_wwn_vpd_unit_serial_store(struct config_item *item,
1646		const char *page, size_t count)
1647{
1648	struct t10_wwn *t10_wwn = to_t10_wwn(item);
1649	struct se_device *dev = t10_wwn->t10_dev;
1650	unsigned char buf[INQUIRY_VPD_SERIAL_LEN] = { };
1651
1652	/*
1653	 * If Linux/SCSI subsystem_api_t plugin got a VPD Unit Serial
1654	 * from the struct scsi_device level firmware, do not allow
1655	 * VPD Unit Serial to be emulated.
1656	 *
1657	 * Note this struct scsi_device could also be emulating VPD
1658	 * information from its drivers/scsi LLD.  But for now we assume
1659	 * it is doing 'the right thing' wrt a world wide unique
1660	 * VPD Unit Serial Number that OS dependent multipath can depend on.
1661	 */
1662	if (dev->dev_flags & DF_FIRMWARE_VPD_UNIT_SERIAL) {
1663		pr_err("Underlying SCSI device firmware provided VPD"
1664			" Unit Serial, ignoring request\n");
1665		return -EOPNOTSUPP;
1666	}
1667
1668	if (strlen(page) >= INQUIRY_VPD_SERIAL_LEN) {
1669		pr_err("Emulated VPD Unit Serial exceeds"
1670		" INQUIRY_VPD_SERIAL_LEN: %d\n", INQUIRY_VPD_SERIAL_LEN);
1671		return -EOVERFLOW;
1672	}
1673	/*
1674	 * Check to see if any active $FABRIC_MOD exports exist.  If they
1675	 * do exist, fail here as changing this information on the fly
1676	 * (underneath the initiator side OS dependent multipath code)
1677	 * could cause negative effects.
1678	 */
1679	if (dev->export_count) {
1680		pr_err("Unable to set VPD Unit Serial while"
1681			" active %d $FABRIC_MOD exports exist\n",
1682			dev->export_count);
1683		return -EINVAL;
1684	}
1685
1686	/*
1687	 * This currently assumes ASCII encoding for emulated VPD Unit Serial.
1688	 *
1689	 * Also, strip any newline added from the userspace
1690	 * echo $UUID > $TARGET/$HBA/$STORAGE_OBJECT/wwn/vpd_unit_serial
1691	 */
1692	snprintf(buf, INQUIRY_VPD_SERIAL_LEN, "%s", page);
1693	snprintf(dev->t10_wwn.unit_serial, INQUIRY_VPD_SERIAL_LEN,
1694			"%s", strstrip(buf));
1695	dev->dev_flags |= DF_EMULATED_VPD_UNIT_SERIAL;
1696
1697	pr_debug("Target_Core_ConfigFS: Set emulated VPD Unit Serial:"
1698			" %s\n", dev->t10_wwn.unit_serial);
1699
1700	return count;
1701}
1702
1703/*
1704 * VPD page 0x83 Protocol Identifier
1705 */
1706static ssize_t target_wwn_vpd_protocol_identifier_show(struct config_item *item,
1707		char *page)
1708{
1709	struct t10_wwn *t10_wwn = to_t10_wwn(item);
1710	struct t10_vpd *vpd;
1711	unsigned char buf[VPD_TMP_BUF_SIZE] = { };
1712	ssize_t len = 0;
1713
1714	spin_lock(&t10_wwn->t10_vpd_lock);
1715	list_for_each_entry(vpd, &t10_wwn->t10_vpd_list, vpd_list) {
1716		if (!vpd->protocol_identifier_set)
1717			continue;
1718
1719		transport_dump_vpd_proto_id(vpd, buf, VPD_TMP_BUF_SIZE);
1720
1721		if (len + strlen(buf) >= PAGE_SIZE)
1722			break;
1723
1724		len += sprintf(page+len, "%s", buf);
1725	}
1726	spin_unlock(&t10_wwn->t10_vpd_lock);
1727
1728	return len;
1729}
1730
1731/*
1732 * Generic wrapper for dumping VPD identifiers by association.
1733 */
1734#define DEF_DEV_WWN_ASSOC_SHOW(_name, _assoc)				\
1735static ssize_t target_wwn_##_name##_show(struct config_item *item,	\
1736		char *page)						\
1737{									\
1738	struct t10_wwn *t10_wwn = to_t10_wwn(item);			\
1739	struct t10_vpd *vpd;						\
1740	unsigned char buf[VPD_TMP_BUF_SIZE];				\
1741	ssize_t len = 0;						\
1742									\
1743	spin_lock(&t10_wwn->t10_vpd_lock);				\
1744	list_for_each_entry(vpd, &t10_wwn->t10_vpd_list, vpd_list) {	\
1745		if (vpd->association != _assoc)				\
1746			continue;					\
1747									\
1748		memset(buf, 0, VPD_TMP_BUF_SIZE);			\
1749		transport_dump_vpd_assoc(vpd, buf, VPD_TMP_BUF_SIZE);	\
1750		if (len + strlen(buf) >= PAGE_SIZE)			\
1751			break;						\
1752		len += sprintf(page+len, "%s", buf);			\
1753									\
1754		memset(buf, 0, VPD_TMP_BUF_SIZE);			\
1755		transport_dump_vpd_ident_type(vpd, buf, VPD_TMP_BUF_SIZE); \
1756		if (len + strlen(buf) >= PAGE_SIZE)			\
1757			break;						\
1758		len += sprintf(page+len, "%s", buf);			\
1759									\
1760		memset(buf, 0, VPD_TMP_BUF_SIZE);			\
1761		transport_dump_vpd_ident(vpd, buf, VPD_TMP_BUF_SIZE); \
1762		if (len + strlen(buf) >= PAGE_SIZE)			\
1763			break;						\
1764		len += sprintf(page+len, "%s", buf);			\
1765	}								\
1766	spin_unlock(&t10_wwn->t10_vpd_lock);				\
1767									\
1768	return len;							\
1769}
1770
1771/* VPD page 0x83 Association: Logical Unit */
1772DEF_DEV_WWN_ASSOC_SHOW(vpd_assoc_logical_unit, 0x00);
1773/* VPD page 0x83 Association: Target Port */
1774DEF_DEV_WWN_ASSOC_SHOW(vpd_assoc_target_port, 0x10);
1775/* VPD page 0x83 Association: SCSI Target Device */
1776DEF_DEV_WWN_ASSOC_SHOW(vpd_assoc_scsi_target_device, 0x20);
1777
1778CONFIGFS_ATTR(target_wwn_, vendor_id);
1779CONFIGFS_ATTR(target_wwn_, product_id);
1780CONFIGFS_ATTR(target_wwn_, revision);
1781CONFIGFS_ATTR(target_wwn_, company_id);
1782CONFIGFS_ATTR(target_wwn_, vpd_unit_serial);
1783CONFIGFS_ATTR_RO(target_wwn_, vpd_protocol_identifier);
1784CONFIGFS_ATTR_RO(target_wwn_, vpd_assoc_logical_unit);
1785CONFIGFS_ATTR_RO(target_wwn_, vpd_assoc_target_port);
1786CONFIGFS_ATTR_RO(target_wwn_, vpd_assoc_scsi_target_device);
1787
1788static struct configfs_attribute *target_core_dev_wwn_attrs[] = {
1789	&target_wwn_attr_vendor_id,
1790	&target_wwn_attr_product_id,
1791	&target_wwn_attr_revision,
1792	&target_wwn_attr_company_id,
1793	&target_wwn_attr_vpd_unit_serial,
1794	&target_wwn_attr_vpd_protocol_identifier,
1795	&target_wwn_attr_vpd_assoc_logical_unit,
1796	&target_wwn_attr_vpd_assoc_target_port,
1797	&target_wwn_attr_vpd_assoc_scsi_target_device,
1798	NULL,
1799};
1800
1801TB_CIT_SETUP(dev_wwn, NULL, NULL, target_core_dev_wwn_attrs);
1802
1803/*  End functions for struct config_item_type tb_dev_wwn_cit */
1804
1805/*  Start functions for struct config_item_type tb_dev_pr_cit */
1806
1807static struct se_device *pr_to_dev(struct config_item *item)
1808{
1809	return container_of(to_config_group(item), struct se_device,
1810			dev_pr_group);
1811}
1812
1813static ssize_t target_core_dev_pr_show_spc3_res(struct se_device *dev,
1814		char *page)
1815{
1816	struct se_node_acl *se_nacl;
1817	struct t10_pr_registration *pr_reg;
1818	char i_buf[PR_REG_ISID_ID_LEN] = { };
1819
1820	pr_reg = dev->dev_pr_res_holder;
1821	if (!pr_reg)
1822		return sprintf(page, "No SPC-3 Reservation holder\n");
1823
1824	se_nacl = pr_reg->pr_reg_nacl;
1825	core_pr_dump_initiator_port(pr_reg, i_buf, PR_REG_ISID_ID_LEN);
1826
1827	return sprintf(page, "SPC-3 Reservation: %s Initiator: %s%s\n",
1828		se_nacl->se_tpg->se_tpg_tfo->fabric_name,
1829		se_nacl->initiatorname, i_buf);
1830}
1831
1832static ssize_t target_core_dev_pr_show_spc2_res(struct se_device *dev,
1833		char *page)
1834{
1835	struct se_session *sess = dev->reservation_holder;
1836	struct se_node_acl *se_nacl;
1837	ssize_t len;
1838
1839	if (sess) {
1840		se_nacl = sess->se_node_acl;
1841		len = sprintf(page,
1842			      "SPC-2 Reservation: %s Initiator: %s\n",
1843			      se_nacl->se_tpg->se_tpg_tfo->fabric_name,
1844			      se_nacl->initiatorname);
1845	} else {
1846		len = sprintf(page, "No SPC-2 Reservation holder\n");
1847	}
1848	return len;
1849}
1850
1851static ssize_t target_pr_res_holder_show(struct config_item *item, char *page)
1852{
1853	struct se_device *dev = pr_to_dev(item);
1854	int ret;
1855
1856	if (!dev->dev_attrib.emulate_pr)
1857		return sprintf(page, "SPC_RESERVATIONS_DISABLED\n");
1858
1859	if (dev->transport_flags & TRANSPORT_FLAG_PASSTHROUGH_PGR)
1860		return sprintf(page, "Passthrough\n");
1861
1862	spin_lock(&dev->dev_reservation_lock);
1863	if (dev->dev_reservation_flags & DRF_SPC2_RESERVATIONS)
1864		ret = target_core_dev_pr_show_spc2_res(dev, page);
1865	else
1866		ret = target_core_dev_pr_show_spc3_res(dev, page);
1867	spin_unlock(&dev->dev_reservation_lock);
1868	return ret;
1869}
1870
1871static ssize_t target_pr_res_pr_all_tgt_pts_show(struct config_item *item,
1872		char *page)
1873{
1874	struct se_device *dev = pr_to_dev(item);
1875	ssize_t len = 0;
1876
1877	spin_lock(&dev->dev_reservation_lock);
1878	if (!dev->dev_pr_res_holder) {
1879		len = sprintf(page, "No SPC-3 Reservation holder\n");
1880	} else if (dev->dev_pr_res_holder->pr_reg_all_tg_pt) {
1881		len = sprintf(page, "SPC-3 Reservation: All Target"
1882			" Ports registration\n");
1883	} else {
1884		len = sprintf(page, "SPC-3 Reservation: Single"
1885			" Target Port registration\n");
1886	}
1887
1888	spin_unlock(&dev->dev_reservation_lock);
1889	return len;
1890}
1891
1892static ssize_t target_pr_res_pr_generation_show(struct config_item *item,
1893		char *page)
1894{
1895	return sprintf(page, "0x%08x\n", pr_to_dev(item)->t10_pr.pr_generation);
1896}
1897
1898
1899static ssize_t target_pr_res_pr_holder_tg_port_show(struct config_item *item,
1900		char *page)
1901{
1902	struct se_device *dev = pr_to_dev(item);
1903	struct se_node_acl *se_nacl;
1904	struct se_portal_group *se_tpg;
1905	struct t10_pr_registration *pr_reg;
1906	const struct target_core_fabric_ops *tfo;
1907	ssize_t len = 0;
1908
1909	spin_lock(&dev->dev_reservation_lock);
1910	pr_reg = dev->dev_pr_res_holder;
1911	if (!pr_reg) {
1912		len = sprintf(page, "No SPC-3 Reservation holder\n");
1913		goto out_unlock;
1914	}
1915
1916	se_nacl = pr_reg->pr_reg_nacl;
1917	se_tpg = se_nacl->se_tpg;
1918	tfo = se_tpg->se_tpg_tfo;
1919
1920	len += sprintf(page+len, "SPC-3 Reservation: %s"
1921		" Target Node Endpoint: %s\n", tfo->fabric_name,
1922		tfo->tpg_get_wwn(se_tpg));
1923	len += sprintf(page+len, "SPC-3 Reservation: Relative Port"
1924		" Identifier Tag: %hu %s Portal Group Tag: %hu"
1925		" %s Logical Unit: %llu\n", pr_reg->tg_pt_sep_rtpi,
1926		tfo->fabric_name, tfo->tpg_get_tag(se_tpg),
1927		tfo->fabric_name, pr_reg->pr_aptpl_target_lun);
1928
1929out_unlock:
1930	spin_unlock(&dev->dev_reservation_lock);
1931	return len;
1932}
1933
1934
1935static ssize_t target_pr_res_pr_registered_i_pts_show(struct config_item *item,
1936		char *page)
1937{
1938	struct se_device *dev = pr_to_dev(item);
1939	const struct target_core_fabric_ops *tfo;
1940	struct t10_pr_registration *pr_reg;
1941	unsigned char buf[384];
1942	char i_buf[PR_REG_ISID_ID_LEN];
1943	ssize_t len = 0;
1944	int reg_count = 0;
1945
1946	len += sprintf(page+len, "SPC-3 PR Registrations:\n");
1947
1948	spin_lock(&dev->t10_pr.registration_lock);
1949	list_for_each_entry(pr_reg, &dev->t10_pr.registration_list,
1950			pr_reg_list) {
1951
1952		memset(buf, 0, 384);
1953		memset(i_buf, 0, PR_REG_ISID_ID_LEN);
1954		tfo = pr_reg->pr_reg_nacl->se_tpg->se_tpg_tfo;
1955		core_pr_dump_initiator_port(pr_reg, i_buf,
1956					PR_REG_ISID_ID_LEN);
1957		sprintf(buf, "%s Node: %s%s Key: 0x%016Lx PRgen: 0x%08x\n",
1958			tfo->fabric_name,
1959			pr_reg->pr_reg_nacl->initiatorname, i_buf, pr_reg->pr_res_key,
1960			pr_reg->pr_res_generation);
1961
1962		if (len + strlen(buf) >= PAGE_SIZE)
1963			break;
1964
1965		len += sprintf(page+len, "%s", buf);
1966		reg_count++;
1967	}
1968	spin_unlock(&dev->t10_pr.registration_lock);
1969
1970	if (!reg_count)
1971		len += sprintf(page+len, "None\n");
1972
1973	return len;
1974}
1975
1976static ssize_t target_pr_res_pr_type_show(struct config_item *item, char *page)
1977{
1978	struct se_device *dev = pr_to_dev(item);
1979	struct t10_pr_registration *pr_reg;
1980	ssize_t len = 0;
1981
1982	spin_lock(&dev->dev_reservation_lock);
1983	pr_reg = dev->dev_pr_res_holder;
1984	if (pr_reg) {
1985		len = sprintf(page, "SPC-3 Reservation Type: %s\n",
1986			core_scsi3_pr_dump_type(pr_reg->pr_res_type));
1987	} else {
1988		len = sprintf(page, "No SPC-3 Reservation holder\n");
1989	}
1990
1991	spin_unlock(&dev->dev_reservation_lock);
1992	return len;
1993}
1994
1995static ssize_t target_pr_res_type_show(struct config_item *item, char *page)
1996{
1997	struct se_device *dev = pr_to_dev(item);
1998
1999	if (!dev->dev_attrib.emulate_pr)
2000		return sprintf(page, "SPC_RESERVATIONS_DISABLED\n");
2001	if (dev->transport_flags & TRANSPORT_FLAG_PASSTHROUGH_PGR)
2002		return sprintf(page, "SPC_PASSTHROUGH\n");
2003	if (dev->dev_reservation_flags & DRF_SPC2_RESERVATIONS)
2004		return sprintf(page, "SPC2_RESERVATIONS\n");
2005
2006	return sprintf(page, "SPC3_PERSISTENT_RESERVATIONS\n");
2007}
2008
2009static ssize_t target_pr_res_aptpl_active_show(struct config_item *item,
2010		char *page)
2011{
2012	struct se_device *dev = pr_to_dev(item);
2013
2014	if (!dev->dev_attrib.emulate_pr ||
2015	    (dev->transport_flags & TRANSPORT_FLAG_PASSTHROUGH_PGR))
2016		return 0;
2017
2018	return sprintf(page, "APTPL Bit Status: %s\n",
2019		(dev->t10_pr.pr_aptpl_active) ? "Activated" : "Disabled");
2020}
2021
2022static ssize_t target_pr_res_aptpl_metadata_show(struct config_item *item,
2023		char *page)
2024{
2025	struct se_device *dev = pr_to_dev(item);
2026
2027	if (!dev->dev_attrib.emulate_pr ||
2028	    (dev->transport_flags & TRANSPORT_FLAG_PASSTHROUGH_PGR))
2029		return 0;
2030
2031	return sprintf(page, "Ready to process PR APTPL metadata..\n");
2032}
2033
2034enum {
2035	Opt_initiator_fabric, Opt_initiator_node, Opt_initiator_sid,
2036	Opt_sa_res_key, Opt_res_holder, Opt_res_type, Opt_res_scope,
2037	Opt_res_all_tg_pt, Opt_mapped_lun, Opt_target_fabric,
2038	Opt_target_node, Opt_tpgt, Opt_port_rtpi, Opt_target_lun, Opt_err
2039};
2040
2041static match_table_t tokens = {
2042	{Opt_initiator_fabric, "initiator_fabric=%s"},
2043	{Opt_initiator_node, "initiator_node=%s"},
2044	{Opt_initiator_sid, "initiator_sid=%s"},
2045	{Opt_sa_res_key, "sa_res_key=%s"},
2046	{Opt_res_holder, "res_holder=%d"},
2047	{Opt_res_type, "res_type=%d"},
2048	{Opt_res_scope, "res_scope=%d"},
2049	{Opt_res_all_tg_pt, "res_all_tg_pt=%d"},
2050	{Opt_mapped_lun, "mapped_lun=%u"},
2051	{Opt_target_fabric, "target_fabric=%s"},
2052	{Opt_target_node, "target_node=%s"},
2053	{Opt_tpgt, "tpgt=%d"},
2054	{Opt_port_rtpi, "port_rtpi=%d"},
2055	{Opt_target_lun, "target_lun=%u"},
2056	{Opt_err, NULL}
2057};
2058
2059static ssize_t target_pr_res_aptpl_metadata_store(struct config_item *item,
2060		const char *page, size_t count)
2061{
2062	struct se_device *dev = pr_to_dev(item);
2063	unsigned char *i_fabric = NULL, *i_port = NULL, *isid = NULL;
2064	unsigned char *t_fabric = NULL, *t_port = NULL;
2065	char *orig, *ptr, *opts;
2066	substring_t args[MAX_OPT_ARGS];
2067	unsigned long long tmp_ll;
2068	u64 sa_res_key = 0;
2069	u64 mapped_lun = 0, target_lun = 0;
2070	int ret = -1, res_holder = 0, all_tg_pt = 0, arg, token;
2071	u16 tpgt = 0;
2072	u8 type = 0;
2073
2074	if (!dev->dev_attrib.emulate_pr ||
2075	    (dev->transport_flags & TRANSPORT_FLAG_PASSTHROUGH_PGR))
2076		return count;
2077	if (dev->dev_reservation_flags & DRF_SPC2_RESERVATIONS)
2078		return count;
2079
2080	if (dev->export_count) {
2081		pr_debug("Unable to process APTPL metadata while"
2082			" active fabric exports exist\n");
2083		return -EINVAL;
2084	}
2085
2086	opts = kstrdup(page, GFP_KERNEL);
2087	if (!opts)
2088		return -ENOMEM;
2089
2090	orig = opts;
2091	while ((ptr = strsep(&opts, ",\n")) != NULL) {
2092		if (!*ptr)
2093			continue;
2094
2095		token = match_token(ptr, tokens, args);
2096		switch (token) {
2097		case Opt_initiator_fabric:
2098			i_fabric = match_strdup(args);
2099			if (!i_fabric) {
2100				ret = -ENOMEM;
2101				goto out;
2102			}
2103			break;
2104		case Opt_initiator_node:
2105			i_port = match_strdup(args);
2106			if (!i_port) {
2107				ret = -ENOMEM;
2108				goto out;
2109			}
2110			if (strlen(i_port) >= PR_APTPL_MAX_IPORT_LEN) {
2111				pr_err("APTPL metadata initiator_node="
2112					" exceeds PR_APTPL_MAX_IPORT_LEN: %d\n",
2113					PR_APTPL_MAX_IPORT_LEN);
2114				ret = -EINVAL;
2115				break;
2116			}
2117			break;
2118		case Opt_initiator_sid:
2119			isid = match_strdup(args);
2120			if (!isid) {
2121				ret = -ENOMEM;
2122				goto out;
2123			}
2124			if (strlen(isid) >= PR_REG_ISID_LEN) {
2125				pr_err("APTPL metadata initiator_isid"
2126					"= exceeds PR_REG_ISID_LEN: %d\n",
2127					PR_REG_ISID_LEN);
2128				ret = -EINVAL;
2129				break;
2130			}
2131			break;
2132		case Opt_sa_res_key:
2133			ret = match_u64(args,  &tmp_ll);
2134			if (ret < 0) {
2135				pr_err("kstrtoull() failed for sa_res_key=\n");
2136				goto out;
2137			}
2138			sa_res_key = (u64)tmp_ll;
2139			break;
2140		/*
2141		 * PR APTPL Metadata for Reservation
2142		 */
2143		case Opt_res_holder:
2144			ret = match_int(args, &arg);
2145			if (ret)
2146				goto out;
2147			res_holder = arg;
2148			break;
2149		case Opt_res_type:
2150			ret = match_int(args, &arg);
2151			if (ret)
2152				goto out;
2153			type = (u8)arg;
2154			break;
2155		case Opt_res_scope:
2156			ret = match_int(args, &arg);
2157			if (ret)
2158				goto out;
2159			break;
2160		case Opt_res_all_tg_pt:
2161			ret = match_int(args, &arg);
2162			if (ret)
2163				goto out;
2164			all_tg_pt = (int)arg;
2165			break;
2166		case Opt_mapped_lun:
2167			ret = match_u64(args, &tmp_ll);
2168			if (ret)
2169				goto out;
2170			mapped_lun = (u64)tmp_ll;
2171			break;
2172		/*
2173		 * PR APTPL Metadata for Target Port
2174		 */
2175		case Opt_target_fabric:
2176			t_fabric = match_strdup(args);
2177			if (!t_fabric) {
2178				ret = -ENOMEM;
2179				goto out;
2180			}
2181			break;
2182		case Opt_target_node:
2183			t_port = match_strdup(args);
2184			if (!t_port) {
2185				ret = -ENOMEM;
2186				goto out;
2187			}
2188			if (strlen(t_port) >= PR_APTPL_MAX_TPORT_LEN) {
2189				pr_err("APTPL metadata target_node="
2190					" exceeds PR_APTPL_MAX_TPORT_LEN: %d\n",
2191					PR_APTPL_MAX_TPORT_LEN);
2192				ret = -EINVAL;
2193				break;
2194			}
2195			break;
2196		case Opt_tpgt:
2197			ret = match_int(args, &arg);
2198			if (ret)
2199				goto out;
2200			tpgt = (u16)arg;
2201			break;
2202		case Opt_port_rtpi:
2203			ret = match_int(args, &arg);
2204			if (ret)
2205				goto out;
2206			break;
2207		case Opt_target_lun:
2208			ret = match_u64(args, &tmp_ll);
2209			if (ret)
2210				goto out;
2211			target_lun = (u64)tmp_ll;
2212			break;
2213		default:
2214			break;
2215		}
2216	}
2217
2218	if (!i_port || !t_port || !sa_res_key) {
2219		pr_err("Illegal parameters for APTPL registration\n");
2220		ret = -EINVAL;
2221		goto out;
2222	}
2223
2224	if (res_holder && !(type)) {
2225		pr_err("Illegal PR type: 0x%02x for reservation"
2226				" holder\n", type);
2227		ret = -EINVAL;
2228		goto out;
2229	}
2230
2231	ret = core_scsi3_alloc_aptpl_registration(&dev->t10_pr, sa_res_key,
2232			i_port, isid, mapped_lun, t_port, tpgt, target_lun,
2233			res_holder, all_tg_pt, type);
2234out:
2235	kfree(i_fabric);
2236	kfree(i_port);
2237	kfree(isid);
2238	kfree(t_fabric);
2239	kfree(t_port);
2240	kfree(orig);
2241	return (ret == 0) ? count : ret;
2242}
2243
2244
2245CONFIGFS_ATTR_RO(target_pr_, res_holder);
2246CONFIGFS_ATTR_RO(target_pr_, res_pr_all_tgt_pts);
2247CONFIGFS_ATTR_RO(target_pr_, res_pr_generation);
2248CONFIGFS_ATTR_RO(target_pr_, res_pr_holder_tg_port);
2249CONFIGFS_ATTR_RO(target_pr_, res_pr_registered_i_pts);
2250CONFIGFS_ATTR_RO(target_pr_, res_pr_type);
2251CONFIGFS_ATTR_RO(target_pr_, res_type);
2252CONFIGFS_ATTR_RO(target_pr_, res_aptpl_active);
2253CONFIGFS_ATTR(target_pr_, res_aptpl_metadata);
2254
2255static struct configfs_attribute *target_core_dev_pr_attrs[] = {
2256	&target_pr_attr_res_holder,
2257	&target_pr_attr_res_pr_all_tgt_pts,
2258	&target_pr_attr_res_pr_generation,
2259	&target_pr_attr_res_pr_holder_tg_port,
2260	&target_pr_attr_res_pr_registered_i_pts,
2261	&target_pr_attr_res_pr_type,
2262	&target_pr_attr_res_type,
2263	&target_pr_attr_res_aptpl_active,
2264	&target_pr_attr_res_aptpl_metadata,
2265	NULL,
2266};
2267
2268TB_CIT_SETUP(dev_pr, NULL, NULL, target_core_dev_pr_attrs);
2269
2270/*  End functions for struct config_item_type tb_dev_pr_cit */
2271
2272/*  Start functions for struct config_item_type tb_dev_cit */
2273
2274static inline struct se_device *to_device(struct config_item *item)
2275{
2276	return container_of(to_config_group(item), struct se_device, dev_group);
2277}
2278
2279static ssize_t target_dev_info_show(struct config_item *item, char *page)
2280{
2281	struct se_device *dev = to_device(item);
2282	int bl = 0;
2283	ssize_t read_bytes = 0;
2284
2285	transport_dump_dev_state(dev, page, &bl);
2286	read_bytes += bl;
2287	read_bytes += dev->transport->show_configfs_dev_params(dev,
2288			page+read_bytes);
2289	return read_bytes;
2290}
2291
2292static ssize_t target_dev_control_store(struct config_item *item,
2293		const char *page, size_t count)
2294{
2295	struct se_device *dev = to_device(item);
2296
2297	return dev->transport->set_configfs_dev_params(dev, page, count);
2298}
2299
2300static ssize_t target_dev_alias_show(struct config_item *item, char *page)
2301{
2302	struct se_device *dev = to_device(item);
2303
2304	if (!(dev->dev_flags & DF_USING_ALIAS))
2305		return 0;
2306
2307	return snprintf(page, PAGE_SIZE, "%s\n", dev->dev_alias);
2308}
2309
2310static ssize_t target_dev_alias_store(struct config_item *item,
2311		const char *page, size_t count)
2312{
2313	struct se_device *dev = to_device(item);
2314	struct se_hba *hba = dev->se_hba;
2315	ssize_t read_bytes;
2316
2317	if (count > (SE_DEV_ALIAS_LEN-1)) {
2318		pr_err("alias count: %d exceeds"
2319			" SE_DEV_ALIAS_LEN-1: %u\n", (int)count,
2320			SE_DEV_ALIAS_LEN-1);
2321		return -EINVAL;
2322	}
2323
2324	read_bytes = snprintf(&dev->dev_alias[0], SE_DEV_ALIAS_LEN, "%s", page);
2325	if (!read_bytes)
2326		return -EINVAL;
2327	if (dev->dev_alias[read_bytes - 1] == '\n')
2328		dev->dev_alias[read_bytes - 1] = '\0';
2329
2330	dev->dev_flags |= DF_USING_ALIAS;
2331
2332	pr_debug("Target_Core_ConfigFS: %s/%s set alias: %s\n",
2333		config_item_name(&hba->hba_group.cg_item),
2334		config_item_name(&dev->dev_group.cg_item),
2335		dev->dev_alias);
2336
2337	return read_bytes;
2338}
2339
2340static ssize_t target_dev_udev_path_show(struct config_item *item, char *page)
2341{
2342	struct se_device *dev = to_device(item);
2343
2344	if (!(dev->dev_flags & DF_USING_UDEV_PATH))
2345		return 0;
2346
2347	return snprintf(page, PAGE_SIZE, "%s\n", dev->udev_path);
2348}
2349
2350static ssize_t target_dev_udev_path_store(struct config_item *item,
2351		const char *page, size_t count)
2352{
2353	struct se_device *dev = to_device(item);
2354	struct se_hba *hba = dev->se_hba;
2355	ssize_t read_bytes;
2356
2357	if (count > (SE_UDEV_PATH_LEN-1)) {
2358		pr_err("udev_path count: %d exceeds"
2359			" SE_UDEV_PATH_LEN-1: %u\n", (int)count,
2360			SE_UDEV_PATH_LEN-1);
2361		return -EINVAL;
2362	}
2363
2364	read_bytes = snprintf(&dev->udev_path[0], SE_UDEV_PATH_LEN,
2365			"%s", page);
2366	if (!read_bytes)
2367		return -EINVAL;
2368	if (dev->udev_path[read_bytes - 1] == '\n')
2369		dev->udev_path[read_bytes - 1] = '\0';
2370
2371	dev->dev_flags |= DF_USING_UDEV_PATH;
2372
2373	pr_debug("Target_Core_ConfigFS: %s/%s set udev_path: %s\n",
2374		config_item_name(&hba->hba_group.cg_item),
2375		config_item_name(&dev->dev_group.cg_item),
2376		dev->udev_path);
2377
2378	return read_bytes;
2379}
2380
2381static ssize_t target_dev_enable_show(struct config_item *item, char *page)
2382{
2383	struct se_device *dev = to_device(item);
2384
2385	return snprintf(page, PAGE_SIZE, "%d\n", target_dev_configured(dev));
2386}
2387
2388static ssize_t target_dev_enable_store(struct config_item *item,
2389		const char *page, size_t count)
2390{
2391	struct se_device *dev = to_device(item);
2392	char *ptr;
2393	int ret;
2394
2395	ptr = strstr(page, "1");
2396	if (!ptr) {
2397		pr_err("For dev_enable ops, only valid value"
2398				" is \"1\"\n");
2399		return -EINVAL;
2400	}
2401
2402	ret = target_configure_device(dev);
2403	if (ret)
2404		return ret;
2405	return count;
2406}
2407
2408static ssize_t target_dev_alua_lu_gp_show(struct config_item *item, char *page)
2409{
2410	struct se_device *dev = to_device(item);
2411	struct config_item *lu_ci;
2412	struct t10_alua_lu_gp *lu_gp;
2413	struct t10_alua_lu_gp_member *lu_gp_mem;
2414	ssize_t len = 0;
2415
2416	lu_gp_mem = dev->dev_alua_lu_gp_mem;
2417	if (!lu_gp_mem)
2418		return 0;
2419
2420	spin_lock(&lu_gp_mem->lu_gp_mem_lock);
2421	lu_gp = lu_gp_mem->lu_gp;
2422	if (lu_gp) {
2423		lu_ci = &lu_gp->lu_gp_group.cg_item;
2424		len += sprintf(page, "LU Group Alias: %s\nLU Group ID: %hu\n",
2425			config_item_name(lu_ci), lu_gp->lu_gp_id);
2426	}
2427	spin_unlock(&lu_gp_mem->lu_gp_mem_lock);
2428
2429	return len;
2430}
2431
2432static ssize_t target_dev_alua_lu_gp_store(struct config_item *item,
2433		const char *page, size_t count)
2434{
2435	struct se_device *dev = to_device(item);
2436	struct se_hba *hba = dev->se_hba;
2437	struct t10_alua_lu_gp *lu_gp = NULL, *lu_gp_new = NULL;
2438	struct t10_alua_lu_gp_member *lu_gp_mem;
2439	unsigned char buf[LU_GROUP_NAME_BUF] = { };
2440	int move = 0;
2441
2442	lu_gp_mem = dev->dev_alua_lu_gp_mem;
2443	if (!lu_gp_mem)
2444		return count;
2445
2446	if (count > LU_GROUP_NAME_BUF) {
2447		pr_err("ALUA LU Group Alias too large!\n");
2448		return -EINVAL;
2449	}
2450	memcpy(buf, page, count);
2451	/*
2452	 * Any ALUA logical unit alias besides "NULL" means we will be
2453	 * making a new group association.
2454	 */
2455	if (strcmp(strstrip(buf), "NULL")) {
2456		/*
2457		 * core_alua_get_lu_gp_by_name() will increment reference to
2458		 * struct t10_alua_lu_gp.  This reference is released with
2459		 * core_alua_get_lu_gp_by_name below().
2460		 */
2461		lu_gp_new = core_alua_get_lu_gp_by_name(strstrip(buf));
2462		if (!lu_gp_new)
2463			return -ENODEV;
2464	}
2465
2466	spin_lock(&lu_gp_mem->lu_gp_mem_lock);
2467	lu_gp = lu_gp_mem->lu_gp;
2468	if (lu_gp) {
2469		/*
2470		 * Clearing an existing lu_gp association, and replacing
2471		 * with NULL
2472		 */
2473		if (!lu_gp_new) {
2474			pr_debug("Target_Core_ConfigFS: Releasing %s/%s"
2475				" from ALUA LU Group: core/alua/lu_gps/%s, ID:"
2476				" %hu\n",
2477				config_item_name(&hba->hba_group.cg_item),
2478				config_item_name(&dev->dev_group.cg_item),
2479				config_item_name(&lu_gp->lu_gp_group.cg_item),
2480				lu_gp->lu_gp_id);
2481
2482			__core_alua_drop_lu_gp_mem(lu_gp_mem, lu_gp);
2483			spin_unlock(&lu_gp_mem->lu_gp_mem_lock);
2484
2485			return count;
2486		}
2487		/*
2488		 * Removing existing association of lu_gp_mem with lu_gp
2489		 */
2490		__core_alua_drop_lu_gp_mem(lu_gp_mem, lu_gp);
2491		move = 1;
2492	}
2493	/*
2494	 * Associate lu_gp_mem with lu_gp_new.
2495	 */
2496	__core_alua_attach_lu_gp_mem(lu_gp_mem, lu_gp_new);
2497	spin_unlock(&lu_gp_mem->lu_gp_mem_lock);
2498
2499	pr_debug("Target_Core_ConfigFS: %s %s/%s to ALUA LU Group:"
2500		" core/alua/lu_gps/%s, ID: %hu\n",
2501		(move) ? "Moving" : "Adding",
2502		config_item_name(&hba->hba_group.cg_item),
2503		config_item_name(&dev->dev_group.cg_item),
2504		config_item_name(&lu_gp_new->lu_gp_group.cg_item),
2505		lu_gp_new->lu_gp_id);
2506
2507	core_alua_put_lu_gp_from_name(lu_gp_new);
2508	return count;
2509}
2510
2511static ssize_t target_dev_lba_map_show(struct config_item *item, char *page)
2512{
2513	struct se_device *dev = to_device(item);
2514	struct t10_alua_lba_map *map;
2515	struct t10_alua_lba_map_member *mem;
2516	char *b = page;
2517	int bl = 0;
2518	char state;
2519
2520	spin_lock(&dev->t10_alua.lba_map_lock);
2521	if (!list_empty(&dev->t10_alua.lba_map_list))
2522	    bl += sprintf(b + bl, "%u %u\n",
2523			  dev->t10_alua.lba_map_segment_size,
2524			  dev->t10_alua.lba_map_segment_multiplier);
2525	list_for_each_entry(map, &dev->t10_alua.lba_map_list, lba_map_list) {
2526		bl += sprintf(b + bl, "%llu %llu",
2527			      map->lba_map_first_lba, map->lba_map_last_lba);
2528		list_for_each_entry(mem, &map->lba_map_mem_list,
2529				    lba_map_mem_list) {
2530			switch (mem->lba_map_mem_alua_state) {
2531			case ALUA_ACCESS_STATE_ACTIVE_OPTIMIZED:
2532				state = 'O';
2533				break;
2534			case ALUA_ACCESS_STATE_ACTIVE_NON_OPTIMIZED:
2535				state = 'A';
2536				break;
2537			case ALUA_ACCESS_STATE_STANDBY:
2538				state = 'S';
2539				break;
2540			case ALUA_ACCESS_STATE_UNAVAILABLE:
2541				state = 'U';
2542				break;
2543			default:
2544				state = '.';
2545				break;
2546			}
2547			bl += sprintf(b + bl, " %d:%c",
2548				      mem->lba_map_mem_alua_pg_id, state);
2549		}
2550		bl += sprintf(b + bl, "\n");
2551	}
2552	spin_unlock(&dev->t10_alua.lba_map_lock);
2553	return bl;
2554}
2555
2556static ssize_t target_dev_lba_map_store(struct config_item *item,
2557		const char *page, size_t count)
2558{
2559	struct se_device *dev = to_device(item);
2560	struct t10_alua_lba_map *lba_map = NULL;
2561	struct list_head lba_list;
2562	char *map_entries, *orig, *ptr;
2563	char state;
2564	int pg_num = -1, pg;
2565	int ret = 0, num = 0, pg_id, alua_state;
2566	unsigned long start_lba = -1, end_lba = -1;
2567	unsigned long segment_size = -1, segment_mult = -1;
2568
2569	orig = map_entries = kstrdup(page, GFP_KERNEL);
2570	if (!map_entries)
2571		return -ENOMEM;
2572
2573	INIT_LIST_HEAD(&lba_list);
2574	while ((ptr = strsep(&map_entries, "\n")) != NULL) {
2575		if (!*ptr)
2576			continue;
2577
2578		if (num == 0) {
2579			if (sscanf(ptr, "%lu %lu\n",
2580				   &segment_size, &segment_mult) != 2) {
2581				pr_err("Invalid line %d\n", num);
2582				ret = -EINVAL;
2583				break;
2584			}
2585			num++;
2586			continue;
2587		}
2588		if (sscanf(ptr, "%lu %lu", &start_lba, &end_lba) != 2) {
2589			pr_err("Invalid line %d\n", num);
2590			ret = -EINVAL;
2591			break;
2592		}
2593		ptr = strchr(ptr, ' ');
2594		if (!ptr) {
2595			pr_err("Invalid line %d, missing end lba\n", num);
2596			ret = -EINVAL;
2597			break;
2598		}
2599		ptr++;
2600		ptr = strchr(ptr, ' ');
2601		if (!ptr) {
2602			pr_err("Invalid line %d, missing state definitions\n",
2603			       num);
2604			ret = -EINVAL;
2605			break;
2606		}
2607		ptr++;
2608		lba_map = core_alua_allocate_lba_map(&lba_list,
2609						     start_lba, end_lba);
2610		if (IS_ERR(lba_map)) {
2611			ret = PTR_ERR(lba_map);
2612			break;
2613		}
2614		pg = 0;
2615		while (sscanf(ptr, "%d:%c", &pg_id, &state) == 2) {
2616			switch (state) {
2617			case 'O':
2618				alua_state = ALUA_ACCESS_STATE_ACTIVE_OPTIMIZED;
2619				break;
2620			case 'A':
2621				alua_state = ALUA_ACCESS_STATE_ACTIVE_NON_OPTIMIZED;
2622				break;
2623			case 'S':
2624				alua_state = ALUA_ACCESS_STATE_STANDBY;
2625				break;
2626			case 'U':
2627				alua_state = ALUA_ACCESS_STATE_UNAVAILABLE;
2628				break;
2629			default:
2630				pr_err("Invalid ALUA state '%c'\n", state);
2631				ret = -EINVAL;
2632				goto out;
2633			}
2634
2635			ret = core_alua_allocate_lba_map_mem(lba_map,
2636							     pg_id, alua_state);
2637			if (ret) {
2638				pr_err("Invalid target descriptor %d:%c "
2639				       "at line %d\n",
2640				       pg_id, state, num);
2641				break;
2642			}
2643			pg++;
2644			ptr = strchr(ptr, ' ');
2645			if (ptr)
2646				ptr++;
2647			else
2648				break;
2649		}
2650		if (pg_num == -1)
2651		    pg_num = pg;
2652		else if (pg != pg_num) {
2653			pr_err("Only %d from %d port groups definitions "
2654			       "at line %d\n", pg, pg_num, num);
2655			ret = -EINVAL;
2656			break;
2657		}
2658		num++;
2659	}
2660out:
2661	if (ret) {
2662		core_alua_free_lba_map(&lba_list);
2663		count = ret;
2664	} else
2665		core_alua_set_lba_map(dev, &lba_list,
2666				      segment_size, segment_mult);
2667	kfree(orig);
2668	return count;
2669}
2670
2671CONFIGFS_ATTR_RO(target_dev_, info);
2672CONFIGFS_ATTR_WO(target_dev_, control);
2673CONFIGFS_ATTR(target_dev_, alias);
2674CONFIGFS_ATTR(target_dev_, udev_path);
2675CONFIGFS_ATTR(target_dev_, enable);
2676CONFIGFS_ATTR(target_dev_, alua_lu_gp);
2677CONFIGFS_ATTR(target_dev_, lba_map);
2678
2679static struct configfs_attribute *target_core_dev_attrs[] = {
2680	&target_dev_attr_info,
2681	&target_dev_attr_control,
2682	&target_dev_attr_alias,
2683	&target_dev_attr_udev_path,
2684	&target_dev_attr_enable,
2685	&target_dev_attr_alua_lu_gp,
2686	&target_dev_attr_lba_map,
2687	NULL,
2688};
2689
2690static void target_core_dev_release(struct config_item *item)
2691{
2692	struct config_group *dev_cg = to_config_group(item);
2693	struct se_device *dev =
2694		container_of(dev_cg, struct se_device, dev_group);
2695
2696	target_free_device(dev);
2697}
2698
2699/*
2700 * Used in target_core_fabric_configfs.c to verify valid se_device symlink
2701 * within target_fabric_port_link()
2702 */
2703struct configfs_item_operations target_core_dev_item_ops = {
2704	.release		= target_core_dev_release,
2705};
2706
2707TB_CIT_SETUP(dev, &target_core_dev_item_ops, NULL, target_core_dev_attrs);
2708
2709/* End functions for struct config_item_type tb_dev_cit */
2710
2711/* Start functions for struct config_item_type target_core_alua_lu_gp_cit */
2712
2713static inline struct t10_alua_lu_gp *to_lu_gp(struct config_item *item)
2714{
2715	return container_of(to_config_group(item), struct t10_alua_lu_gp,
2716			lu_gp_group);
2717}
2718
2719static ssize_t target_lu_gp_lu_gp_id_show(struct config_item *item, char *page)
2720{
2721	struct t10_alua_lu_gp *lu_gp = to_lu_gp(item);
2722
2723	if (!lu_gp->lu_gp_valid_id)
2724		return 0;
2725	return sprintf(page, "%hu\n", lu_gp->lu_gp_id);
2726}
2727
2728static ssize_t target_lu_gp_lu_gp_id_store(struct config_item *item,
2729		const char *page, size_t count)
2730{
2731	struct t10_alua_lu_gp *lu_gp = to_lu_gp(item);
2732	struct config_group *alua_lu_gp_cg = &lu_gp->lu_gp_group;
2733	unsigned long lu_gp_id;
2734	int ret;
2735
2736	ret = kstrtoul(page, 0, &lu_gp_id);
2737	if (ret < 0) {
2738		pr_err("kstrtoul() returned %d for"
2739			" lu_gp_id\n", ret);
2740		return ret;
2741	}
2742	if (lu_gp_id > 0x0000ffff) {
2743		pr_err("ALUA lu_gp_id: %lu exceeds maximum:"
2744			" 0x0000ffff\n", lu_gp_id);
2745		return -EINVAL;
2746	}
2747
2748	ret = core_alua_set_lu_gp_id(lu_gp, (u16)lu_gp_id);
2749	if (ret < 0)
2750		return -EINVAL;
2751
2752	pr_debug("Target_Core_ConfigFS: Set ALUA Logical Unit"
2753		" Group: core/alua/lu_gps/%s to ID: %hu\n",
2754		config_item_name(&alua_lu_gp_cg->cg_item),
2755		lu_gp->lu_gp_id);
2756
2757	return count;
2758}
2759
2760static ssize_t target_lu_gp_members_show(struct config_item *item, char *page)
2761{
2762	struct t10_alua_lu_gp *lu_gp = to_lu_gp(item);
2763	struct se_device *dev;
2764	struct se_hba *hba;
2765	struct t10_alua_lu_gp_member *lu_gp_mem;
2766	ssize_t len = 0, cur_len;
2767	unsigned char buf[LU_GROUP_NAME_BUF] = { };
2768
2769	spin_lock(&lu_gp->lu_gp_lock);
2770	list_for_each_entry(lu_gp_mem, &lu_gp->lu_gp_mem_list, lu_gp_mem_list) {
2771		dev = lu_gp_mem->lu_gp_mem_dev;
2772		hba = dev->se_hba;
2773
2774		cur_len = snprintf(buf, LU_GROUP_NAME_BUF, "%s/%s\n",
2775			config_item_name(&hba->hba_group.cg_item),
2776			config_item_name(&dev->dev_group.cg_item));
2777		cur_len++; /* Extra byte for NULL terminator */
2778
2779		if ((cur_len + len) > PAGE_SIZE) {
2780			pr_warn("Ran out of lu_gp_show_attr"
2781				"_members buffer\n");
2782			break;
2783		}
2784		memcpy(page+len, buf, cur_len);
2785		len += cur_len;
2786	}
2787	spin_unlock(&lu_gp->lu_gp_lock);
2788
2789	return len;
2790}
2791
2792CONFIGFS_ATTR(target_lu_gp_, lu_gp_id);
2793CONFIGFS_ATTR_RO(target_lu_gp_, members);
2794
2795static struct configfs_attribute *target_core_alua_lu_gp_attrs[] = {
2796	&target_lu_gp_attr_lu_gp_id,
2797	&target_lu_gp_attr_members,
2798	NULL,
2799};
2800
2801static void target_core_alua_lu_gp_release(struct config_item *item)
2802{
2803	struct t10_alua_lu_gp *lu_gp = container_of(to_config_group(item),
2804			struct t10_alua_lu_gp, lu_gp_group);
2805
2806	core_alua_free_lu_gp(lu_gp);
2807}
2808
2809static struct configfs_item_operations target_core_alua_lu_gp_ops = {
2810	.release		= target_core_alua_lu_gp_release,
2811};
2812
2813static const struct config_item_type target_core_alua_lu_gp_cit = {
2814	.ct_item_ops		= &target_core_alua_lu_gp_ops,
2815	.ct_attrs		= target_core_alua_lu_gp_attrs,
2816	.ct_owner		= THIS_MODULE,
2817};
2818
2819/* End functions for struct config_item_type target_core_alua_lu_gp_cit */
2820
2821/* Start functions for struct config_item_type target_core_alua_lu_gps_cit */
2822
2823static struct config_group *target_core_alua_create_lu_gp(
2824	struct config_group *group,
2825	const char *name)
2826{
2827	struct t10_alua_lu_gp *lu_gp;
2828	struct config_group *alua_lu_gp_cg = NULL;
2829	struct config_item *alua_lu_gp_ci = NULL;
2830
2831	lu_gp = core_alua_allocate_lu_gp(name, 0);
2832	if (IS_ERR(lu_gp))
2833		return NULL;
2834
2835	alua_lu_gp_cg = &lu_gp->lu_gp_group;
2836	alua_lu_gp_ci = &alua_lu_gp_cg->cg_item;
2837
2838	config_group_init_type_name(alua_lu_gp_cg, name,
2839			&target_core_alua_lu_gp_cit);
2840
2841	pr_debug("Target_Core_ConfigFS: Allocated ALUA Logical Unit"
2842		" Group: core/alua/lu_gps/%s\n",
2843		config_item_name(alua_lu_gp_ci));
2844
2845	return alua_lu_gp_cg;
2846
2847}
2848
2849static void target_core_alua_drop_lu_gp(
2850	struct config_group *group,
2851	struct config_item *item)
2852{
2853	struct t10_alua_lu_gp *lu_gp = container_of(to_config_group(item),
2854			struct t10_alua_lu_gp, lu_gp_group);
2855
2856	pr_debug("Target_Core_ConfigFS: Releasing ALUA Logical Unit"
2857		" Group: core/alua/lu_gps/%s, ID: %hu\n",
2858		config_item_name(item), lu_gp->lu_gp_id);
2859	/*
2860	 * core_alua_free_lu_gp() is called from target_core_alua_lu_gp_ops->release()
2861	 * -> target_core_alua_lu_gp_release()
2862	 */
2863	config_item_put(item);
2864}
2865
2866static struct configfs_group_operations target_core_alua_lu_gps_group_ops = {
2867	.make_group		= &target_core_alua_create_lu_gp,
2868	.drop_item		= &target_core_alua_drop_lu_gp,
2869};
2870
2871static const struct config_item_type target_core_alua_lu_gps_cit = {
2872	.ct_item_ops		= NULL,
2873	.ct_group_ops		= &target_core_alua_lu_gps_group_ops,
2874	.ct_owner		= THIS_MODULE,
2875};
2876
2877/* End functions for struct config_item_type target_core_alua_lu_gps_cit */
2878
2879/* Start functions for struct config_item_type target_core_alua_tg_pt_gp_cit */
2880
2881static inline struct t10_alua_tg_pt_gp *to_tg_pt_gp(struct config_item *item)
2882{
2883	return container_of(to_config_group(item), struct t10_alua_tg_pt_gp,
2884			tg_pt_gp_group);
2885}
2886
2887static ssize_t target_tg_pt_gp_alua_access_state_show(struct config_item *item,
2888		char *page)
2889{
2890	return sprintf(page, "%d\n",
2891		       to_tg_pt_gp(item)->tg_pt_gp_alua_access_state);
2892}
2893
2894static ssize_t target_tg_pt_gp_alua_access_state_store(struct config_item *item,
2895		const char *page, size_t count)
2896{
2897	struct t10_alua_tg_pt_gp *tg_pt_gp = to_tg_pt_gp(item);
2898	struct se_device *dev = tg_pt_gp->tg_pt_gp_dev;
2899	unsigned long tmp;
2900	int new_state, ret;
2901
2902	if (!tg_pt_gp->tg_pt_gp_valid_id) {
2903		pr_err("Unable to do implicit ALUA on invalid tg_pt_gp ID\n");
2904		return -EINVAL;
2905	}
2906	if (!target_dev_configured(dev)) {
2907		pr_err("Unable to set alua_access_state while device is"
2908		       " not configured\n");
2909		return -ENODEV;
2910	}
2911
2912	ret = kstrtoul(page, 0, &tmp);
2913	if (ret < 0) {
2914		pr_err("Unable to extract new ALUA access state from"
2915				" %s\n", page);
2916		return ret;
2917	}
2918	new_state = (int)tmp;
2919
2920	if (!(tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_IMPLICIT_ALUA)) {
2921		pr_err("Unable to process implicit configfs ALUA"
2922			" transition while TPGS_IMPLICIT_ALUA is disabled\n");
2923		return -EINVAL;
2924	}
2925	if (tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_EXPLICIT_ALUA &&
2926	    new_state == ALUA_ACCESS_STATE_LBA_DEPENDENT) {
2927		/* LBA DEPENDENT is only allowed with implicit ALUA */
2928		pr_err("Unable to process implicit configfs ALUA transition"
2929		       " while explicit ALUA management is enabled\n");
2930		return -EINVAL;
2931	}
2932
2933	ret = core_alua_do_port_transition(tg_pt_gp, dev,
2934					NULL, NULL, new_state, 0);
2935	return (!ret) ? count : -EINVAL;
2936}
2937
2938static ssize_t target_tg_pt_gp_alua_access_status_show(struct config_item *item,
2939		char *page)
2940{
2941	struct t10_alua_tg_pt_gp *tg_pt_gp = to_tg_pt_gp(item);
2942	return sprintf(page, "%s\n",
2943		core_alua_dump_status(tg_pt_gp->tg_pt_gp_alua_access_status));
2944}
2945
2946static ssize_t target_tg_pt_gp_alua_access_status_store(
2947		struct config_item *item, const char *page, size_t count)
2948{
2949	struct t10_alua_tg_pt_gp *tg_pt_gp = to_tg_pt_gp(item);
2950	unsigned long tmp;
2951	int new_status, ret;
2952
2953	if (!tg_pt_gp->tg_pt_gp_valid_id) {
2954		pr_err("Unable to set ALUA access status on invalid tg_pt_gp ID\n");
2955		return -EINVAL;
2956	}
2957
2958	ret = kstrtoul(page, 0, &tmp);
2959	if (ret < 0) {
2960		pr_err("Unable to extract new ALUA access status"
2961				" from %s\n", page);
2962		return ret;
2963	}
2964	new_status = (int)tmp;
2965
2966	if ((new_status != ALUA_STATUS_NONE) &&
2967	    (new_status != ALUA_STATUS_ALTERED_BY_EXPLICIT_STPG) &&
2968	    (new_status != ALUA_STATUS_ALTERED_BY_IMPLICIT_ALUA)) {
2969		pr_err("Illegal ALUA access status: 0x%02x\n",
2970				new_status);
2971		return -EINVAL;
2972	}
2973
2974	tg_pt_gp->tg_pt_gp_alua_access_status = new_status;
2975	return count;
2976}
2977
2978static ssize_t target_tg_pt_gp_alua_access_type_show(struct config_item *item,
2979		char *page)
2980{
2981	return core_alua_show_access_type(to_tg_pt_gp(item), page);
2982}
2983
2984static ssize_t target_tg_pt_gp_alua_access_type_store(struct config_item *item,
2985		const char *page, size_t count)
2986{
2987	return core_alua_store_access_type(to_tg_pt_gp(item), page, count);
2988}
2989
2990#define ALUA_SUPPORTED_STATE_ATTR(_name, _bit)				\
2991static ssize_t target_tg_pt_gp_alua_support_##_name##_show(		\
2992		struct config_item *item, char *p)			\
2993{									\
2994	struct t10_alua_tg_pt_gp *t = to_tg_pt_gp(item);		\
2995	return sprintf(p, "%d\n",					\
2996		!!(t->tg_pt_gp_alua_supported_states & _bit));		\
2997}									\
2998									\
2999static ssize_t target_tg_pt_gp_alua_support_##_name##_store(		\
3000		struct config_item *item, const char *p, size_t c)	\
3001{									\
3002	struct t10_alua_tg_pt_gp *t = to_tg_pt_gp(item);		\
3003	unsigned long tmp;						\
3004	int ret;							\
3005									\
3006	if (!t->tg_pt_gp_valid_id) {					\
3007		pr_err("Unable to set " #_name " ALUA state on invalid tg_pt_gp ID\n"); \
3008		return -EINVAL;						\
3009	}								\
3010									\
3011	ret = kstrtoul(p, 0, &tmp);					\
3012	if (ret < 0) {							\
3013		pr_err("Invalid value '%s', must be '0' or '1'\n", p);	\
3014		return -EINVAL;						\
3015	}								\
3016	if (tmp > 1) {							\
3017		pr_err("Invalid value '%ld', must be '0' or '1'\n", tmp); \
3018		return -EINVAL;						\
3019	}								\
3020	if (tmp)							\
3021		t->tg_pt_gp_alua_supported_states |= _bit;		\
3022	else								\
3023		t->tg_pt_gp_alua_supported_states &= ~_bit;		\
3024									\
3025	return c;							\
3026}
3027
3028ALUA_SUPPORTED_STATE_ATTR(transitioning, ALUA_T_SUP);
3029ALUA_SUPPORTED_STATE_ATTR(offline, ALUA_O_SUP);
3030ALUA_SUPPORTED_STATE_ATTR(lba_dependent, ALUA_LBD_SUP);
3031ALUA_SUPPORTED_STATE_ATTR(unavailable, ALUA_U_SUP);
3032ALUA_SUPPORTED_STATE_ATTR(standby, ALUA_S_SUP);
3033ALUA_SUPPORTED_STATE_ATTR(active_optimized, ALUA_AO_SUP);
3034ALUA_SUPPORTED_STATE_ATTR(active_nonoptimized, ALUA_AN_SUP);
3035
3036static ssize_t target_tg_pt_gp_alua_write_metadata_show(
3037		struct config_item *item, char *page)
3038{
3039	return sprintf(page, "%d\n",
3040		to_tg_pt_gp(item)->tg_pt_gp_write_metadata);
3041}
3042
3043static ssize_t target_tg_pt_gp_alua_write_metadata_store(
3044		struct config_item *item, const char *page, size_t count)
3045{
3046	struct t10_alua_tg_pt_gp *tg_pt_gp = to_tg_pt_gp(item);
3047	unsigned long tmp;
3048	int ret;
3049
3050	ret = kstrtoul(page, 0, &tmp);
3051	if (ret < 0) {
3052		pr_err("Unable to extract alua_write_metadata\n");
3053		return ret;
3054	}
3055
3056	if ((tmp != 0) && (tmp != 1)) {
3057		pr_err("Illegal value for alua_write_metadata:"
3058			" %lu\n", tmp);
3059		return -EINVAL;
3060	}
3061	tg_pt_gp->tg_pt_gp_write_metadata = (int)tmp;
3062
3063	return count;
3064}
3065
3066static ssize_t target_tg_pt_gp_nonop_delay_msecs_show(struct config_item *item,
3067		char *page)
3068{
3069	return core_alua_show_nonop_delay_msecs(to_tg_pt_gp(item), page);
3070}
3071
3072static ssize_t target_tg_pt_gp_nonop_delay_msecs_store(struct config_item *item,
3073		const char *page, size_t count)
3074{
3075	return core_alua_store_nonop_delay_msecs(to_tg_pt_gp(item), page,
3076			count);
3077}
3078
3079static ssize_t target_tg_pt_gp_trans_delay_msecs_show(struct config_item *item,
3080		char *page)
3081{
3082	return core_alua_show_trans_delay_msecs(to_tg_pt_gp(item), page);
3083}
3084
3085static ssize_t target_tg_pt_gp_trans_delay_msecs_store(struct config_item *item,
3086		const char *page, size_t count)
3087{
3088	return core_alua_store_trans_delay_msecs(to_tg_pt_gp(item), page,
3089			count);
3090}
3091
3092static ssize_t target_tg_pt_gp_implicit_trans_secs_show(
3093		struct config_item *item, char *page)
3094{
3095	return core_alua_show_implicit_trans_secs(to_tg_pt_gp(item), page);
3096}
3097
3098static ssize_t target_tg_pt_gp_implicit_trans_secs_store(
3099		struct config_item *item, const char *page, size_t count)
3100{
3101	return core_alua_store_implicit_trans_secs(to_tg_pt_gp(item), page,
3102			count);
3103}
3104
3105static ssize_t target_tg_pt_gp_preferred_show(struct config_item *item,
3106		char *page)
3107{
3108	return core_alua_show_preferred_bit(to_tg_pt_gp(item), page);
3109}
3110
3111static ssize_t target_tg_pt_gp_preferred_store(struct config_item *item,
3112		const char *page, size_t count)
3113{
3114	return core_alua_store_preferred_bit(to_tg_pt_gp(item), page, count);
3115}
3116
3117static ssize_t target_tg_pt_gp_tg_pt_gp_id_show(struct config_item *item,
3118		char *page)
3119{
3120	struct t10_alua_tg_pt_gp *tg_pt_gp = to_tg_pt_gp(item);
3121
3122	if (!tg_pt_gp->tg_pt_gp_valid_id)
3123		return 0;
3124	return sprintf(page, "%hu\n", tg_pt_gp->tg_pt_gp_id);
3125}
3126
3127static ssize_t target_tg_pt_gp_tg_pt_gp_id_store(struct config_item *item,
3128		const char *page, size_t count)
3129{
3130	struct t10_alua_tg_pt_gp *tg_pt_gp = to_tg_pt_gp(item);
3131	struct config_group *alua_tg_pt_gp_cg = &tg_pt_gp->tg_pt_gp_group;
3132	unsigned long tg_pt_gp_id;
3133	int ret;
3134
3135	ret = kstrtoul(page, 0, &tg_pt_gp_id);
3136	if (ret < 0) {
3137		pr_err("ALUA tg_pt_gp_id: invalid value '%s' for tg_pt_gp_id\n",
3138		       page);
3139		return ret;
3140	}
3141	if (tg_pt_gp_id > 0x0000ffff) {
3142		pr_err("ALUA tg_pt_gp_id: %lu exceeds maximum: 0x0000ffff\n",
3143		       tg_pt_gp_id);
3144		return -EINVAL;
3145	}
3146
3147	ret = core_alua_set_tg_pt_gp_id(tg_pt_gp, (u16)tg_pt_gp_id);
3148	if (ret < 0)
3149		return -EINVAL;
3150
3151	pr_debug("Target_Core_ConfigFS: Set ALUA Target Port Group: "
3152		"core/alua/tg_pt_gps/%s to ID: %hu\n",
3153		config_item_name(&alua_tg_pt_gp_cg->cg_item),
3154		tg_pt_gp->tg_pt_gp_id);
3155
3156	return count;
3157}
3158
3159static ssize_t target_tg_pt_gp_members_show(struct config_item *item,
3160		char *page)
3161{
3162	struct t10_alua_tg_pt_gp *tg_pt_gp = to_tg_pt_gp(item);
3163	struct se_lun *lun;
3164	ssize_t len = 0, cur_len;
3165	unsigned char buf[TG_PT_GROUP_NAME_BUF] = { };
3166
3167	spin_lock(&tg_pt_gp->tg_pt_gp_lock);
3168	list_for_each_entry(lun, &tg_pt_gp->tg_pt_gp_lun_list,
3169			lun_tg_pt_gp_link) {
3170		struct se_portal_group *tpg = lun->lun_tpg;
3171
3172		cur_len = snprintf(buf, TG_PT_GROUP_NAME_BUF, "%s/%s/tpgt_%hu"
3173			"/%s\n", tpg->se_tpg_tfo->fabric_name,
3174			tpg->se_tpg_tfo->tpg_get_wwn(tpg),
3175			tpg->se_tpg_tfo->tpg_get_tag(tpg),
3176			config_item_name(&lun->lun_group.cg_item));
3177		cur_len++; /* Extra byte for NULL terminator */
3178
3179		if ((cur_len + len) > PAGE_SIZE) {
3180			pr_warn("Ran out of lu_gp_show_attr"
3181				"_members buffer\n");
3182			break;
3183		}
3184		memcpy(page+len, buf, cur_len);
3185		len += cur_len;
3186	}
3187	spin_unlock(&tg_pt_gp->tg_pt_gp_lock);
3188
3189	return len;
3190}
3191
3192CONFIGFS_ATTR(target_tg_pt_gp_, alua_access_state);
3193CONFIGFS_ATTR(target_tg_pt_gp_, alua_access_status);
3194CONFIGFS_ATTR(target_tg_pt_gp_, alua_access_type);
3195CONFIGFS_ATTR(target_tg_pt_gp_, alua_support_transitioning);
3196CONFIGFS_ATTR(target_tg_pt_gp_, alua_support_offline);
3197CONFIGFS_ATTR(target_tg_pt_gp_, alua_support_lba_dependent);
3198CONFIGFS_ATTR(target_tg_pt_gp_, alua_support_unavailable);
3199CONFIGFS_ATTR(target_tg_pt_gp_, alua_support_standby);
3200CONFIGFS_ATTR(target_tg_pt_gp_, alua_support_active_optimized);
3201CONFIGFS_ATTR(target_tg_pt_gp_, alua_support_active_nonoptimized);
3202CONFIGFS_ATTR(target_tg_pt_gp_, alua_write_metadata);
3203CONFIGFS_ATTR(target_tg_pt_gp_, nonop_delay_msecs);
3204CONFIGFS_ATTR(target_tg_pt_gp_, trans_delay_msecs);
3205CONFIGFS_ATTR(target_tg_pt_gp_, implicit_trans_secs);
3206CONFIGFS_ATTR(target_tg_pt_gp_, preferred);
3207CONFIGFS_ATTR(target_tg_pt_gp_, tg_pt_gp_id);
3208CONFIGFS_ATTR_RO(target_tg_pt_gp_, members);
3209
3210static struct configfs_attribute *target_core_alua_tg_pt_gp_attrs[] = {
3211	&target_tg_pt_gp_attr_alua_access_state,
3212	&target_tg_pt_gp_attr_alua_access_status,
3213	&target_tg_pt_gp_attr_alua_access_type,
3214	&target_tg_pt_gp_attr_alua_support_transitioning,
3215	&target_tg_pt_gp_attr_alua_support_offline,
3216	&target_tg_pt_gp_attr_alua_support_lba_dependent,
3217	&target_tg_pt_gp_attr_alua_support_unavailable,
3218	&target_tg_pt_gp_attr_alua_support_standby,
3219	&target_tg_pt_gp_attr_alua_support_active_nonoptimized,
3220	&target_tg_pt_gp_attr_alua_support_active_optimized,
3221	&target_tg_pt_gp_attr_alua_write_metadata,
3222	&target_tg_pt_gp_attr_nonop_delay_msecs,
3223	&target_tg_pt_gp_attr_trans_delay_msecs,
3224	&target_tg_pt_gp_attr_implicit_trans_secs,
3225	&target_tg_pt_gp_attr_preferred,
3226	&target_tg_pt_gp_attr_tg_pt_gp_id,
3227	&target_tg_pt_gp_attr_members,
3228	NULL,
3229};
3230
3231static void target_core_alua_tg_pt_gp_release(struct config_item *item)
3232{
3233	struct t10_alua_tg_pt_gp *tg_pt_gp = container_of(to_config_group(item),
3234			struct t10_alua_tg_pt_gp, tg_pt_gp_group);
3235
3236	core_alua_free_tg_pt_gp(tg_pt_gp);
3237}
3238
3239static struct configfs_item_operations target_core_alua_tg_pt_gp_ops = {
3240	.release		= target_core_alua_tg_pt_gp_release,
3241};
3242
3243static const struct config_item_type target_core_alua_tg_pt_gp_cit = {
3244	.ct_item_ops		= &target_core_alua_tg_pt_gp_ops,
3245	.ct_attrs		= target_core_alua_tg_pt_gp_attrs,
3246	.ct_owner		= THIS_MODULE,
3247};
3248
3249/* End functions for struct config_item_type target_core_alua_tg_pt_gp_cit */
3250
3251/* Start functions for struct config_item_type tb_alua_tg_pt_gps_cit */
3252
3253static struct config_group *target_core_alua_create_tg_pt_gp(
3254	struct config_group *group,
3255	const char *name)
3256{
3257	struct t10_alua *alua = container_of(group, struct t10_alua,
3258					alua_tg_pt_gps_group);
3259	struct t10_alua_tg_pt_gp *tg_pt_gp;
3260	struct config_group *alua_tg_pt_gp_cg = NULL;
3261	struct config_item *alua_tg_pt_gp_ci = NULL;
3262
3263	tg_pt_gp = core_alua_allocate_tg_pt_gp(alua->t10_dev, name, 0);
3264	if (!tg_pt_gp)
3265		return NULL;
3266
3267	alua_tg_pt_gp_cg = &tg_pt_gp->tg_pt_gp_group;
3268	alua_tg_pt_gp_ci = &alua_tg_pt_gp_cg->cg_item;
3269
3270	config_group_init_type_name(alua_tg_pt_gp_cg, name,
3271			&target_core_alua_tg_pt_gp_cit);
3272
3273	pr_debug("Target_Core_ConfigFS: Allocated ALUA Target Port"
3274		" Group: alua/tg_pt_gps/%s\n",
3275		config_item_name(alua_tg_pt_gp_ci));
3276
3277	return alua_tg_pt_gp_cg;
3278}
3279
3280static void target_core_alua_drop_tg_pt_gp(
3281	struct config_group *group,
3282	struct config_item *item)
3283{
3284	struct t10_alua_tg_pt_gp *tg_pt_gp = container_of(to_config_group(item),
3285			struct t10_alua_tg_pt_gp, tg_pt_gp_group);
3286
3287	pr_debug("Target_Core_ConfigFS: Releasing ALUA Target Port"
3288		" Group: alua/tg_pt_gps/%s, ID: %hu\n",
3289		config_item_name(item), tg_pt_gp->tg_pt_gp_id);
3290	/*
3291	 * core_alua_free_tg_pt_gp() is called from target_core_alua_tg_pt_gp_ops->release()
3292	 * -> target_core_alua_tg_pt_gp_release().
3293	 */
3294	config_item_put(item);
3295}
3296
3297static struct configfs_group_operations target_core_alua_tg_pt_gps_group_ops = {
3298	.make_group		= &target_core_alua_create_tg_pt_gp,
3299	.drop_item		= &target_core_alua_drop_tg_pt_gp,
3300};
3301
3302TB_CIT_SETUP(dev_alua_tg_pt_gps, NULL, &target_core_alua_tg_pt_gps_group_ops, NULL);
3303
3304/* End functions for struct config_item_type tb_alua_tg_pt_gps_cit */
3305
3306/* Start functions for struct config_item_type target_core_alua_cit */
3307
3308/*
3309 * target_core_alua_cit is a ConfigFS group that lives under
3310 * /sys/kernel/config/target/core/alua.  There are default groups
3311 * core/alua/lu_gps and core/alua/tg_pt_gps that are attached to
3312 * target_core_alua_cit in target_core_init_configfs() below.
3313 */
3314static const struct config_item_type target_core_alua_cit = {
3315	.ct_item_ops		= NULL,
3316	.ct_attrs		= NULL,
3317	.ct_owner		= THIS_MODULE,
3318};
3319
3320/* End functions for struct config_item_type target_core_alua_cit */
3321
3322/* Start functions for struct config_item_type tb_dev_stat_cit */
3323
3324static struct config_group *target_core_stat_mkdir(
3325	struct config_group *group,
3326	const char *name)
3327{
3328	return ERR_PTR(-ENOSYS);
3329}
3330
3331static void target_core_stat_rmdir(
3332	struct config_group *group,
3333	struct config_item *item)
3334{
3335	return;
3336}
3337
3338static struct configfs_group_operations target_core_stat_group_ops = {
3339	.make_group		= &target_core_stat_mkdir,
3340	.drop_item		= &target_core_stat_rmdir,
3341};
3342
3343TB_CIT_SETUP(dev_stat, NULL, &target_core_stat_group_ops, NULL);
3344
3345/* End functions for struct config_item_type tb_dev_stat_cit */
3346
3347/* Start functions for struct config_item_type target_core_hba_cit */
3348
3349static struct config_group *target_core_make_subdev(
3350	struct config_group *group,
3351	const char *name)
3352{
3353	struct t10_alua_tg_pt_gp *tg_pt_gp;
3354	struct config_item *hba_ci = &group->cg_item;
3355	struct se_hba *hba = item_to_hba(hba_ci);
3356	struct target_backend *tb = hba->backend;
3357	struct se_device *dev;
3358	int errno = -ENOMEM, ret;
3359
3360	ret = mutex_lock_interruptible(&hba->hba_access_mutex);
3361	if (ret)
3362		return ERR_PTR(ret);
3363
3364	dev = target_alloc_device(hba, name);
3365	if (!dev)
3366		goto out_unlock;
3367
3368	config_group_init_type_name(&dev->dev_group, name, &tb->tb_dev_cit);
3369
3370	config_group_init_type_name(&dev->dev_action_group, "action",
3371			&tb->tb_dev_action_cit);
3372	configfs_add_default_group(&dev->dev_action_group, &dev->dev_group);
3373
3374	config_group_init_type_name(&dev->dev_attrib.da_group, "attrib",
3375			&tb->tb_dev_attrib_cit);
3376	configfs_add_default_group(&dev->dev_attrib.da_group, &dev->dev_group);
3377
3378	config_group_init_type_name(&dev->dev_pr_group, "pr",
3379			&tb->tb_dev_pr_cit);
3380	configfs_add_default_group(&dev->dev_pr_group, &dev->dev_group);
3381
3382	config_group_init_type_name(&dev->t10_wwn.t10_wwn_group, "wwn",
3383			&tb->tb_dev_wwn_cit);
3384	configfs_add_default_group(&dev->t10_wwn.t10_wwn_group,
3385			&dev->dev_group);
3386
3387	config_group_init_type_name(&dev->t10_alua.alua_tg_pt_gps_group,
3388			"alua", &tb->tb_dev_alua_tg_pt_gps_cit);
3389	configfs_add_default_group(&dev->t10_alua.alua_tg_pt_gps_group,
3390			&dev->dev_group);
3391
3392	config_group_init_type_name(&dev->dev_stat_grps.stat_group,
3393			"statistics", &tb->tb_dev_stat_cit);
3394	configfs_add_default_group(&dev->dev_stat_grps.stat_group,
3395			&dev->dev_group);
3396
3397	/*
3398	 * Add core/$HBA/$DEV/alua/default_tg_pt_gp
3399	 */
3400	tg_pt_gp = core_alua_allocate_tg_pt_gp(dev, "default_tg_pt_gp", 1);
3401	if (!tg_pt_gp)
3402		goto out_free_device;
3403	dev->t10_alua.default_tg_pt_gp = tg_pt_gp;
3404
3405	config_group_init_type_name(&tg_pt_gp->tg_pt_gp_group,
3406			"default_tg_pt_gp", &target_core_alua_tg_pt_gp_cit);
3407	configfs_add_default_group(&tg_pt_gp->tg_pt_gp_group,
3408			&dev->t10_alua.alua_tg_pt_gps_group);
3409
3410	/*
3411	 * Add core/$HBA/$DEV/statistics/ default groups
3412	 */
3413	target_stat_setup_dev_default_groups(dev);
3414
3415	mutex_lock(&target_devices_lock);
3416	target_devices++;
3417	mutex_unlock(&target_devices_lock);
3418
3419	mutex_unlock(&hba->hba_access_mutex);
3420	return &dev->dev_group;
3421
3422out_free_device:
3423	target_free_device(dev);
3424out_unlock:
3425	mutex_unlock(&hba->hba_access_mutex);
3426	return ERR_PTR(errno);
3427}
3428
3429static void target_core_drop_subdev(
3430	struct config_group *group,
3431	struct config_item *item)
3432{
3433	struct config_group *dev_cg = to_config_group(item);
3434	struct se_device *dev =
3435		container_of(dev_cg, struct se_device, dev_group);
3436	struct se_hba *hba;
3437
3438	hba = item_to_hba(&dev->se_hba->hba_group.cg_item);
3439
3440	mutex_lock(&hba->hba_access_mutex);
3441
3442	configfs_remove_default_groups(&dev->dev_stat_grps.stat_group);
3443	configfs_remove_default_groups(&dev->t10_alua.alua_tg_pt_gps_group);
3444
3445	/*
3446	 * core_alua_free_tg_pt_gp() is called from ->default_tg_pt_gp
3447	 * directly from target_core_alua_tg_pt_gp_release().
3448	 */
3449	dev->t10_alua.default_tg_pt_gp = NULL;
3450
3451	configfs_remove_default_groups(dev_cg);
3452
3453	/*
3454	 * se_dev is released from target_core_dev_item_ops->release()
3455	 */
3456	config_item_put(item);
3457
3458	mutex_lock(&target_devices_lock);
3459	target_devices--;
3460	mutex_unlock(&target_devices_lock);
3461
3462	mutex_unlock(&hba->hba_access_mutex);
3463}
3464
3465static struct configfs_group_operations target_core_hba_group_ops = {
3466	.make_group		= target_core_make_subdev,
3467	.drop_item		= target_core_drop_subdev,
3468};
3469
3470
3471static inline struct se_hba *to_hba(struct config_item *item)
3472{
3473	return container_of(to_config_group(item), struct se_hba, hba_group);
3474}
3475
3476static ssize_t target_hba_info_show(struct config_item *item, char *page)
3477{
3478	struct se_hba *hba = to_hba(item);
3479
3480	return sprintf(page, "HBA Index: %d plugin: %s version: %s\n",
3481			hba->hba_id, hba->backend->ops->name,
3482			TARGET_CORE_VERSION);
3483}
3484
3485static ssize_t target_hba_mode_show(struct config_item *item, char *page)
3486{
3487	struct se_hba *hba = to_hba(item);
3488	int hba_mode = 0;
3489
3490	if (hba->hba_flags & HBA_FLAGS_PSCSI_MODE)
3491		hba_mode = 1;
3492
3493	return sprintf(page, "%d\n", hba_mode);
3494}
3495
3496static ssize_t target_hba_mode_store(struct config_item *item,
3497		const char *page, size_t count)
3498{
3499	struct se_hba *hba = to_hba(item);
3500	unsigned long mode_flag;
3501	int ret;
3502
3503	if (hba->backend->ops->pmode_enable_hba == NULL)
3504		return -EINVAL;
3505
3506	ret = kstrtoul(page, 0, &mode_flag);
3507	if (ret < 0) {
3508		pr_err("Unable to extract hba mode flag: %d\n", ret);
3509		return ret;
3510	}
3511
3512	if (hba->dev_count) {
3513		pr_err("Unable to set hba_mode with active devices\n");
3514		return -EINVAL;
3515	}
3516
3517	ret = hba->backend->ops->pmode_enable_hba(hba, mode_flag);
3518	if (ret < 0)
3519		return -EINVAL;
3520	if (ret > 0)
3521		hba->hba_flags |= HBA_FLAGS_PSCSI_MODE;
3522	else if (ret == 0)
3523		hba->hba_flags &= ~HBA_FLAGS_PSCSI_MODE;
3524
3525	return count;
3526}
3527
3528CONFIGFS_ATTR_RO(target_, hba_info);
3529CONFIGFS_ATTR(target_, hba_mode);
3530
3531static void target_core_hba_release(struct config_item *item)
3532{
3533	struct se_hba *hba = container_of(to_config_group(item),
3534				struct se_hba, hba_group);
3535	core_delete_hba(hba);
3536}
3537
3538static struct configfs_attribute *target_core_hba_attrs[] = {
3539	&target_attr_hba_info,
3540	&target_attr_hba_mode,
3541	NULL,
3542};
3543
3544static struct configfs_item_operations target_core_hba_item_ops = {
3545	.release		= target_core_hba_release,
3546};
3547
3548static const struct config_item_type target_core_hba_cit = {
3549	.ct_item_ops		= &target_core_hba_item_ops,
3550	.ct_group_ops		= &target_core_hba_group_ops,
3551	.ct_attrs		= target_core_hba_attrs,
3552	.ct_owner		= THIS_MODULE,
3553};
3554
3555static struct config_group *target_core_call_addhbatotarget(
3556	struct config_group *group,
3557	const char *name)
3558{
3559	char *se_plugin_str, *str, *str2;
3560	struct se_hba *hba;
3561	char buf[TARGET_CORE_NAME_MAX_LEN] = { };
3562	unsigned long plugin_dep_id = 0;
3563	int ret;
3564
3565	if (strlen(name) >= TARGET_CORE_NAME_MAX_LEN) {
3566		pr_err("Passed *name strlen(): %d exceeds"
3567			" TARGET_CORE_NAME_MAX_LEN: %d\n", (int)strlen(name),
3568			TARGET_CORE_NAME_MAX_LEN);
3569		return ERR_PTR(-ENAMETOOLONG);
3570	}
3571	snprintf(buf, TARGET_CORE_NAME_MAX_LEN, "%s", name);
3572
3573	str = strstr(buf, "_");
3574	if (!str) {
3575		pr_err("Unable to locate \"_\" for $SUBSYSTEM_PLUGIN_$HOST_ID\n");
3576		return ERR_PTR(-EINVAL);
3577	}
3578	se_plugin_str = buf;
3579	/*
3580	 * Special case for subsystem plugins that have "_" in their names.
3581	 * Namely rd_direct and rd_mcp..
3582	 */
3583	str2 = strstr(str+1, "_");
3584	if (str2) {
3585		*str2 = '\0'; /* Terminate for *se_plugin_str */
3586		str2++; /* Skip to start of plugin dependent ID */
3587		str = str2;
3588	} else {
3589		*str = '\0'; /* Terminate for *se_plugin_str */
3590		str++; /* Skip to start of plugin dependent ID */
3591	}
3592
3593	ret = kstrtoul(str, 0, &plugin_dep_id);
3594	if (ret < 0) {
3595		pr_err("kstrtoul() returned %d for"
3596				" plugin_dep_id\n", ret);
3597		return ERR_PTR(ret);
3598	}
3599	/*
3600	 * Load up TCM subsystem plugins if they have not already been loaded.
3601	 */
3602	transport_subsystem_check_init();
3603
3604	hba = core_alloc_hba(se_plugin_str, plugin_dep_id, 0);
3605	if (IS_ERR(hba))
3606		return ERR_CAST(hba);
3607
3608	config_group_init_type_name(&hba->hba_group, name,
3609			&target_core_hba_cit);
3610
3611	return &hba->hba_group;
3612}
3613
3614static void target_core_call_delhbafromtarget(
3615	struct config_group *group,
3616	struct config_item *item)
3617{
3618	/*
3619	 * core_delete_hba() is called from target_core_hba_item_ops->release()
3620	 * -> target_core_hba_release()
3621	 */
3622	config_item_put(item);
3623}
3624
3625static struct configfs_group_operations target_core_group_ops = {
3626	.make_group	= target_core_call_addhbatotarget,
3627	.drop_item	= target_core_call_delhbafromtarget,
3628};
3629
3630static const struct config_item_type target_core_cit = {
3631	.ct_item_ops	= NULL,
3632	.ct_group_ops	= &target_core_group_ops,
3633	.ct_attrs	= NULL,
3634	.ct_owner	= THIS_MODULE,
3635};
3636
3637/* Stop functions for struct config_item_type target_core_hba_cit */
3638
3639void target_setup_backend_cits(struct target_backend *tb)
3640{
3641	target_core_setup_dev_cit(tb);
3642	target_core_setup_dev_action_cit(tb);
3643	target_core_setup_dev_attrib_cit(tb);
3644	target_core_setup_dev_pr_cit(tb);
3645	target_core_setup_dev_wwn_cit(tb);
3646	target_core_setup_dev_alua_tg_pt_gps_cit(tb);
3647	target_core_setup_dev_stat_cit(tb);
3648}
3649
3650static void target_init_dbroot(void)
3651{
3652	struct file *fp;
3653
3654	snprintf(db_root_stage, DB_ROOT_LEN, DB_ROOT_PREFERRED);
3655	fp = filp_open(db_root_stage, O_RDONLY, 0);
3656	if (IS_ERR(fp)) {
3657		pr_err("db_root: cannot open: %s\n", db_root_stage);
3658		return;
3659	}
3660	if (!S_ISDIR(file_inode(fp)->i_mode)) {
3661		filp_close(fp, NULL);
3662		pr_err("db_root: not a valid directory: %s\n", db_root_stage);
3663		return;
3664	}
3665	filp_close(fp, NULL);
3666
3667	strncpy(db_root, db_root_stage, DB_ROOT_LEN);
3668	pr_debug("Target_Core_ConfigFS: db_root set to %s\n", db_root);
3669}
3670
3671static int __init target_core_init_configfs(void)
3672{
3673	struct configfs_subsystem *subsys = &target_core_fabrics;
3674	struct t10_alua_lu_gp *lu_gp;
3675	struct cred *kern_cred;
3676	const struct cred *old_cred;
3677	int ret;
3678
3679	pr_debug("TARGET_CORE[0]: Loading Generic Kernel Storage"
3680		" Engine: %s on %s/%s on "UTS_RELEASE"\n",
3681		TARGET_CORE_VERSION, utsname()->sysname, utsname()->machine);
3682
3683	config_group_init(&subsys->su_group);
3684	mutex_init(&subsys->su_mutex);
3685
3686	ret = init_se_kmem_caches();
3687	if (ret < 0)
3688		return ret;
3689	/*
3690	 * Create $CONFIGFS/target/core default group for HBA <-> Storage Object
3691	 * and ALUA Logical Unit Group and Target Port Group infrastructure.
3692	 */
3693	config_group_init_type_name(&target_core_hbagroup, "core",
3694			&target_core_cit);
3695	configfs_add_default_group(&target_core_hbagroup, &subsys->su_group);
3696
3697	/*
3698	 * Create ALUA infrastructure under /sys/kernel/config/target/core/alua/
3699	 */
3700	config_group_init_type_name(&alua_group, "alua", &target_core_alua_cit);
3701	configfs_add_default_group(&alua_group, &target_core_hbagroup);
3702
3703	/*
3704	 * Add ALUA Logical Unit Group and Target Port Group ConfigFS
3705	 * groups under /sys/kernel/config/target/core/alua/
3706	 */
3707	config_group_init_type_name(&alua_lu_gps_group, "lu_gps",
3708			&target_core_alua_lu_gps_cit);
3709	configfs_add_default_group(&alua_lu_gps_group, &alua_group);
3710
3711	/*
3712	 * Add core/alua/lu_gps/default_lu_gp
3713	 */
3714	lu_gp = core_alua_allocate_lu_gp("default_lu_gp", 1);
3715	if (IS_ERR(lu_gp)) {
3716		ret = -ENOMEM;
3717		goto out_global;
3718	}
3719
3720	config_group_init_type_name(&lu_gp->lu_gp_group, "default_lu_gp",
3721				&target_core_alua_lu_gp_cit);
3722	configfs_add_default_group(&lu_gp->lu_gp_group, &alua_lu_gps_group);
3723
3724	default_lu_gp = lu_gp;
3725
3726	/*
3727	 * Register the target_core_mod subsystem with configfs.
3728	 */
3729	ret = configfs_register_subsystem(subsys);
3730	if (ret < 0) {
3731		pr_err("Error %d while registering subsystem %s\n",
3732			ret, subsys->su_group.cg_item.ci_namebuf);
3733		goto out_global;
3734	}
3735	pr_debug("TARGET_CORE[0]: Initialized ConfigFS Fabric"
3736		" Infrastructure: "TARGET_CORE_VERSION" on %s/%s"
3737		" on "UTS_RELEASE"\n", utsname()->sysname, utsname()->machine);
3738	/*
3739	 * Register built-in RAMDISK subsystem logic for virtual LUN 0
3740	 */
3741	ret = rd_module_init();
3742	if (ret < 0)
3743		goto out;
3744
3745	ret = core_dev_setup_virtual_lun0();
3746	if (ret < 0)
3747		goto out;
3748
3749	ret = target_xcopy_setup_pt();
3750	if (ret < 0)
3751		goto out;
3752
3753	/* We use the kernel credentials to access the target directory */
3754	kern_cred = prepare_kernel_cred(&init_task);
3755	if (!kern_cred) {
3756		ret = -ENOMEM;
3757		goto out;
3758	}
3759	old_cred = override_creds(kern_cred);
3760	target_init_dbroot();
3761	revert_creds(old_cred);
3762	put_cred(kern_cred);
3763
3764	return 0;
3765
3766out:
3767	target_xcopy_release_pt();
3768	configfs_unregister_subsystem(subsys);
3769	core_dev_release_virtual_lun0();
3770	rd_module_exit();
3771out_global:
3772	if (default_lu_gp) {
3773		core_alua_free_lu_gp(default_lu_gp);
3774		default_lu_gp = NULL;
3775	}
3776	release_se_kmem_caches();
3777	return ret;
3778}
3779
3780static void __exit target_core_exit_configfs(void)
3781{
3782	configfs_remove_default_groups(&alua_lu_gps_group);
3783	configfs_remove_default_groups(&alua_group);
3784	configfs_remove_default_groups(&target_core_hbagroup);
3785
3786	/*
3787	 * We expect subsys->su_group.default_groups to be released
3788	 * by configfs subsystem provider logic..
3789	 */
3790	configfs_unregister_subsystem(&target_core_fabrics);
3791
3792	core_alua_free_lu_gp(default_lu_gp);
3793	default_lu_gp = NULL;
3794
3795	pr_debug("TARGET_CORE[0]: Released ConfigFS Fabric"
3796			" Infrastructure\n");
3797
3798	core_dev_release_virtual_lun0();
3799	rd_module_exit();
3800	target_xcopy_release_pt();
3801	release_se_kmem_caches();
3802}
3803
3804MODULE_DESCRIPTION("Target_Core_Mod/ConfigFS");
3805MODULE_AUTHOR("nab@Linux-iSCSI.org");
3806MODULE_LICENSE("GPL");
3807
3808module_init(target_core_init_configfs);
3809module_exit(target_core_exit_configfs);
3810