1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Sysfs interface for the NVMe core driver.
4 *
5 * Copyright (c) 2011-2014, Intel Corporation.
6 */
7
8#include <linux/nvme-auth.h>
9
10#include "nvme.h"
11#include "fabrics.h"
12
13static ssize_t nvme_sysfs_reset(struct device *dev,
14				struct device_attribute *attr, const char *buf,
15				size_t count)
16{
17	struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
18	int ret;
19
20	ret = nvme_reset_ctrl_sync(ctrl);
21	if (ret < 0)
22		return ret;
23	return count;
24}
25static DEVICE_ATTR(reset_controller, S_IWUSR, NULL, nvme_sysfs_reset);
26
27static ssize_t nvme_sysfs_rescan(struct device *dev,
28				struct device_attribute *attr, const char *buf,
29				size_t count)
30{
31	struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
32
33	nvme_queue_scan(ctrl);
34	return count;
35}
36static DEVICE_ATTR(rescan_controller, S_IWUSR, NULL, nvme_sysfs_rescan);
37
38static ssize_t nvme_adm_passthru_err_log_enabled_show(struct device *dev,
39		struct device_attribute *attr, char *buf)
40{
41	struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
42
43	return sysfs_emit(buf,
44			  ctrl->passthru_err_log_enabled ? "on\n" : "off\n");
45}
46
47static ssize_t nvme_adm_passthru_err_log_enabled_store(struct device *dev,
48		struct device_attribute *attr, const char *buf, size_t count)
49{
50	struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
51	bool passthru_err_log_enabled;
52	int err;
53
54	err = kstrtobool(buf, &passthru_err_log_enabled);
55	if (err)
56		return -EINVAL;
57
58	ctrl->passthru_err_log_enabled = passthru_err_log_enabled;
59
60	return count;
61}
62
63static inline struct nvme_ns_head *dev_to_ns_head(struct device *dev)
64{
65	struct gendisk *disk = dev_to_disk(dev);
66
67	if (nvme_disk_is_ns_head(disk))
68		return disk->private_data;
69	return nvme_get_ns_from_dev(dev)->head;
70}
71
72static ssize_t nvme_io_passthru_err_log_enabled_show(struct device *dev,
73		struct device_attribute *attr, char *buf)
74{
75	struct nvme_ns_head *head = dev_to_ns_head(dev);
76
77	return sysfs_emit(buf, head->passthru_err_log_enabled ? "on\n" : "off\n");
78}
79
80static ssize_t nvme_io_passthru_err_log_enabled_store(struct device *dev,
81		struct device_attribute *attr, const char *buf, size_t count)
82{
83	struct nvme_ns_head *head = dev_to_ns_head(dev);
84	bool passthru_err_log_enabled;
85	int err;
86
87	err = kstrtobool(buf, &passthru_err_log_enabled);
88	if (err)
89		return -EINVAL;
90	head->passthru_err_log_enabled = passthru_err_log_enabled;
91
92	return count;
93}
94
95static struct device_attribute dev_attr_adm_passthru_err_log_enabled = \
96	__ATTR(passthru_err_log_enabled, S_IRUGO | S_IWUSR, \
97	nvme_adm_passthru_err_log_enabled_show, nvme_adm_passthru_err_log_enabled_store);
98
99static struct device_attribute dev_attr_io_passthru_err_log_enabled = \
100	__ATTR(passthru_err_log_enabled, S_IRUGO | S_IWUSR, \
101	nvme_io_passthru_err_log_enabled_show, nvme_io_passthru_err_log_enabled_store);
102
103static ssize_t wwid_show(struct device *dev, struct device_attribute *attr,
104		char *buf)
105{
106	struct nvme_ns_head *head = dev_to_ns_head(dev);
107	struct nvme_ns_ids *ids = &head->ids;
108	struct nvme_subsystem *subsys = head->subsys;
109	int serial_len = sizeof(subsys->serial);
110	int model_len = sizeof(subsys->model);
111
112	if (!uuid_is_null(&ids->uuid))
113		return sysfs_emit(buf, "uuid.%pU\n", &ids->uuid);
114
115	if (memchr_inv(ids->nguid, 0, sizeof(ids->nguid)))
116		return sysfs_emit(buf, "eui.%16phN\n", ids->nguid);
117
118	if (memchr_inv(ids->eui64, 0, sizeof(ids->eui64)))
119		return sysfs_emit(buf, "eui.%8phN\n", ids->eui64);
120
121	while (serial_len > 0 && (subsys->serial[serial_len - 1] == ' ' ||
122				  subsys->serial[serial_len - 1] == '\0'))
123		serial_len--;
124	while (model_len > 0 && (subsys->model[model_len - 1] == ' ' ||
125				 subsys->model[model_len - 1] == '\0'))
126		model_len--;
127
128	return sysfs_emit(buf, "nvme.%04x-%*phN-%*phN-%08x\n", subsys->vendor_id,
129		serial_len, subsys->serial, model_len, subsys->model,
130		head->ns_id);
131}
132static DEVICE_ATTR_RO(wwid);
133
134static ssize_t nguid_show(struct device *dev, struct device_attribute *attr,
135		char *buf)
136{
137	return sysfs_emit(buf, "%pU\n", dev_to_ns_head(dev)->ids.nguid);
138}
139static DEVICE_ATTR_RO(nguid);
140
141static ssize_t uuid_show(struct device *dev, struct device_attribute *attr,
142		char *buf)
143{
144	struct nvme_ns_ids *ids = &dev_to_ns_head(dev)->ids;
145
146	/* For backward compatibility expose the NGUID to userspace if
147	 * we have no UUID set
148	 */
149	if (uuid_is_null(&ids->uuid)) {
150		dev_warn_once(dev,
151			"No UUID available providing old NGUID\n");
152		return sysfs_emit(buf, "%pU\n", ids->nguid);
153	}
154	return sysfs_emit(buf, "%pU\n", &ids->uuid);
155}
156static DEVICE_ATTR_RO(uuid);
157
158static ssize_t eui_show(struct device *dev, struct device_attribute *attr,
159		char *buf)
160{
161	return sysfs_emit(buf, "%8ph\n", dev_to_ns_head(dev)->ids.eui64);
162}
163static DEVICE_ATTR_RO(eui);
164
165static ssize_t nsid_show(struct device *dev, struct device_attribute *attr,
166		char *buf)
167{
168	return sysfs_emit(buf, "%d\n", dev_to_ns_head(dev)->ns_id);
169}
170static DEVICE_ATTR_RO(nsid);
171
172static ssize_t csi_show(struct device *dev, struct device_attribute *attr,
173		char *buf)
174{
175	return sysfs_emit(buf, "%u\n", dev_to_ns_head(dev)->ids.csi);
176}
177static DEVICE_ATTR_RO(csi);
178
179static ssize_t metadata_bytes_show(struct device *dev,
180		struct device_attribute *attr, char *buf)
181{
182	return sysfs_emit(buf, "%u\n", dev_to_ns_head(dev)->ms);
183}
184static DEVICE_ATTR_RO(metadata_bytes);
185
186static int ns_head_update_nuse(struct nvme_ns_head *head)
187{
188	struct nvme_id_ns *id;
189	struct nvme_ns *ns;
190	int srcu_idx, ret = -EWOULDBLOCK;
191
192	/* Avoid issuing commands too often by rate limiting the update */
193	if (!__ratelimit(&head->rs_nuse))
194		return 0;
195
196	srcu_idx = srcu_read_lock(&head->srcu);
197	ns = nvme_find_path(head);
198	if (!ns)
199		goto out_unlock;
200
201	ret = nvme_identify_ns(ns->ctrl, head->ns_id, &id);
202	if (ret)
203		goto out_unlock;
204
205	head->nuse = le64_to_cpu(id->nuse);
206	kfree(id);
207
208out_unlock:
209	srcu_read_unlock(&head->srcu, srcu_idx);
210	return ret;
211}
212
213static int ns_update_nuse(struct nvme_ns *ns)
214{
215	struct nvme_id_ns *id;
216	int ret;
217
218	/* Avoid issuing commands too often by rate limiting the update. */
219	if (!__ratelimit(&ns->head->rs_nuse))
220		return 0;
221
222	ret = nvme_identify_ns(ns->ctrl, ns->head->ns_id, &id);
223	if (ret)
224		return ret;
225
226	ns->head->nuse = le64_to_cpu(id->nuse);
227	kfree(id);
228	return 0;
229}
230
231static ssize_t nuse_show(struct device *dev, struct device_attribute *attr,
232		char *buf)
233{
234	struct nvme_ns_head *head = dev_to_ns_head(dev);
235	struct gendisk *disk = dev_to_disk(dev);
236	struct block_device *bdev = disk->part0;
237	int ret;
238
239	if (nvme_disk_is_ns_head(bdev->bd_disk))
240		ret = ns_head_update_nuse(head);
241	else
242		ret = ns_update_nuse(bdev->bd_disk->private_data);
243	if (ret)
244		return ret;
245
246	return sysfs_emit(buf, "%llu\n", head->nuse);
247}
248static DEVICE_ATTR_RO(nuse);
249
250static struct attribute *nvme_ns_attrs[] = {
251	&dev_attr_wwid.attr,
252	&dev_attr_uuid.attr,
253	&dev_attr_nguid.attr,
254	&dev_attr_eui.attr,
255	&dev_attr_csi.attr,
256	&dev_attr_nsid.attr,
257	&dev_attr_metadata_bytes.attr,
258	&dev_attr_nuse.attr,
259#ifdef CONFIG_NVME_MULTIPATH
260	&dev_attr_ana_grpid.attr,
261	&dev_attr_ana_state.attr,
262#endif
263	&dev_attr_io_passthru_err_log_enabled.attr,
264	NULL,
265};
266
267static umode_t nvme_ns_attrs_are_visible(struct kobject *kobj,
268		struct attribute *a, int n)
269{
270	struct device *dev = container_of(kobj, struct device, kobj);
271	struct nvme_ns_ids *ids = &dev_to_ns_head(dev)->ids;
272
273	if (a == &dev_attr_uuid.attr) {
274		if (uuid_is_null(&ids->uuid) &&
275		    !memchr_inv(ids->nguid, 0, sizeof(ids->nguid)))
276			return 0;
277	}
278	if (a == &dev_attr_nguid.attr) {
279		if (!memchr_inv(ids->nguid, 0, sizeof(ids->nguid)))
280			return 0;
281	}
282	if (a == &dev_attr_eui.attr) {
283		if (!memchr_inv(ids->eui64, 0, sizeof(ids->eui64)))
284			return 0;
285	}
286#ifdef CONFIG_NVME_MULTIPATH
287	if (a == &dev_attr_ana_grpid.attr || a == &dev_attr_ana_state.attr) {
288		/* per-path attr */
289		if (nvme_disk_is_ns_head(dev_to_disk(dev)))
290			return 0;
291		if (!nvme_ctrl_use_ana(nvme_get_ns_from_dev(dev)->ctrl))
292			return 0;
293	}
294#endif
295	return a->mode;
296}
297
298static const struct attribute_group nvme_ns_attr_group = {
299	.attrs		= nvme_ns_attrs,
300	.is_visible	= nvme_ns_attrs_are_visible,
301};
302
303const struct attribute_group *nvme_ns_attr_groups[] = {
304	&nvme_ns_attr_group,
305	NULL,
306};
307
308#define nvme_show_str_function(field)						\
309static ssize_t  field##_show(struct device *dev,				\
310			    struct device_attribute *attr, char *buf)		\
311{										\
312        struct nvme_ctrl *ctrl = dev_get_drvdata(dev);				\
313        return sysfs_emit(buf, "%.*s\n",					\
314		(int)sizeof(ctrl->subsys->field), ctrl->subsys->field);		\
315}										\
316static DEVICE_ATTR(field, S_IRUGO, field##_show, NULL);
317
318nvme_show_str_function(model);
319nvme_show_str_function(serial);
320nvme_show_str_function(firmware_rev);
321
322#define nvme_show_int_function(field)						\
323static ssize_t  field##_show(struct device *dev,				\
324			    struct device_attribute *attr, char *buf)		\
325{										\
326        struct nvme_ctrl *ctrl = dev_get_drvdata(dev);				\
327        return sysfs_emit(buf, "%d\n", ctrl->field);				\
328}										\
329static DEVICE_ATTR(field, S_IRUGO, field##_show, NULL);
330
331nvme_show_int_function(cntlid);
332nvme_show_int_function(numa_node);
333nvme_show_int_function(queue_count);
334nvme_show_int_function(sqsize);
335nvme_show_int_function(kato);
336
337static ssize_t nvme_sysfs_delete(struct device *dev,
338				struct device_attribute *attr, const char *buf,
339				size_t count)
340{
341	struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
342
343	if (!test_bit(NVME_CTRL_STARTED_ONCE, &ctrl->flags))
344		return -EBUSY;
345
346	if (device_remove_file_self(dev, attr))
347		nvme_delete_ctrl_sync(ctrl);
348	return count;
349}
350static DEVICE_ATTR(delete_controller, S_IWUSR, NULL, nvme_sysfs_delete);
351
352static ssize_t nvme_sysfs_show_transport(struct device *dev,
353					 struct device_attribute *attr,
354					 char *buf)
355{
356	struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
357
358	return sysfs_emit(buf, "%s\n", ctrl->ops->name);
359}
360static DEVICE_ATTR(transport, S_IRUGO, nvme_sysfs_show_transport, NULL);
361
362static ssize_t nvme_sysfs_show_state(struct device *dev,
363				     struct device_attribute *attr,
364				     char *buf)
365{
366	struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
367	unsigned state = (unsigned)nvme_ctrl_state(ctrl);
368	static const char *const state_name[] = {
369		[NVME_CTRL_NEW]		= "new",
370		[NVME_CTRL_LIVE]	= "live",
371		[NVME_CTRL_RESETTING]	= "resetting",
372		[NVME_CTRL_CONNECTING]	= "connecting",
373		[NVME_CTRL_DELETING]	= "deleting",
374		[NVME_CTRL_DELETING_NOIO]= "deleting (no IO)",
375		[NVME_CTRL_DEAD]	= "dead",
376	};
377
378	if (state < ARRAY_SIZE(state_name) && state_name[state])
379		return sysfs_emit(buf, "%s\n", state_name[state]);
380
381	return sysfs_emit(buf, "unknown state\n");
382}
383
384static DEVICE_ATTR(state, S_IRUGO, nvme_sysfs_show_state, NULL);
385
386static ssize_t nvme_sysfs_show_subsysnqn(struct device *dev,
387					 struct device_attribute *attr,
388					 char *buf)
389{
390	struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
391
392	return sysfs_emit(buf, "%s\n", ctrl->subsys->subnqn);
393}
394static DEVICE_ATTR(subsysnqn, S_IRUGO, nvme_sysfs_show_subsysnqn, NULL);
395
396static ssize_t nvme_sysfs_show_hostnqn(struct device *dev,
397					struct device_attribute *attr,
398					char *buf)
399{
400	struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
401
402	return sysfs_emit(buf, "%s\n", ctrl->opts->host->nqn);
403}
404static DEVICE_ATTR(hostnqn, S_IRUGO, nvme_sysfs_show_hostnqn, NULL);
405
406static ssize_t nvme_sysfs_show_hostid(struct device *dev,
407					struct device_attribute *attr,
408					char *buf)
409{
410	struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
411
412	return sysfs_emit(buf, "%pU\n", &ctrl->opts->host->id);
413}
414static DEVICE_ATTR(hostid, S_IRUGO, nvme_sysfs_show_hostid, NULL);
415
416static ssize_t nvme_sysfs_show_address(struct device *dev,
417					 struct device_attribute *attr,
418					 char *buf)
419{
420	struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
421
422	return ctrl->ops->get_address(ctrl, buf, PAGE_SIZE);
423}
424static DEVICE_ATTR(address, S_IRUGO, nvme_sysfs_show_address, NULL);
425
426static ssize_t nvme_ctrl_loss_tmo_show(struct device *dev,
427		struct device_attribute *attr, char *buf)
428{
429	struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
430	struct nvmf_ctrl_options *opts = ctrl->opts;
431
432	if (ctrl->opts->max_reconnects == -1)
433		return sysfs_emit(buf, "off\n");
434	return sysfs_emit(buf, "%d\n",
435			  opts->max_reconnects * opts->reconnect_delay);
436}
437
438static ssize_t nvme_ctrl_loss_tmo_store(struct device *dev,
439		struct device_attribute *attr, const char *buf, size_t count)
440{
441	struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
442	struct nvmf_ctrl_options *opts = ctrl->opts;
443	int ctrl_loss_tmo, err;
444
445	err = kstrtoint(buf, 10, &ctrl_loss_tmo);
446	if (err)
447		return -EINVAL;
448
449	if (ctrl_loss_tmo < 0)
450		opts->max_reconnects = -1;
451	else
452		opts->max_reconnects = DIV_ROUND_UP(ctrl_loss_tmo,
453						opts->reconnect_delay);
454	return count;
455}
456static DEVICE_ATTR(ctrl_loss_tmo, S_IRUGO | S_IWUSR,
457	nvme_ctrl_loss_tmo_show, nvme_ctrl_loss_tmo_store);
458
459static ssize_t nvme_ctrl_reconnect_delay_show(struct device *dev,
460		struct device_attribute *attr, char *buf)
461{
462	struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
463
464	if (ctrl->opts->reconnect_delay == -1)
465		return sysfs_emit(buf, "off\n");
466	return sysfs_emit(buf, "%d\n", ctrl->opts->reconnect_delay);
467}
468
469static ssize_t nvme_ctrl_reconnect_delay_store(struct device *dev,
470		struct device_attribute *attr, const char *buf, size_t count)
471{
472	struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
473	unsigned int v;
474	int err;
475
476	err = kstrtou32(buf, 10, &v);
477	if (err)
478		return err;
479
480	ctrl->opts->reconnect_delay = v;
481	return count;
482}
483static DEVICE_ATTR(reconnect_delay, S_IRUGO | S_IWUSR,
484	nvme_ctrl_reconnect_delay_show, nvme_ctrl_reconnect_delay_store);
485
486static ssize_t nvme_ctrl_fast_io_fail_tmo_show(struct device *dev,
487		struct device_attribute *attr, char *buf)
488{
489	struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
490
491	if (ctrl->opts->fast_io_fail_tmo == -1)
492		return sysfs_emit(buf, "off\n");
493	return sysfs_emit(buf, "%d\n", ctrl->opts->fast_io_fail_tmo);
494}
495
496static ssize_t nvme_ctrl_fast_io_fail_tmo_store(struct device *dev,
497		struct device_attribute *attr, const char *buf, size_t count)
498{
499	struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
500	struct nvmf_ctrl_options *opts = ctrl->opts;
501	int fast_io_fail_tmo, err;
502
503	err = kstrtoint(buf, 10, &fast_io_fail_tmo);
504	if (err)
505		return -EINVAL;
506
507	if (fast_io_fail_tmo < 0)
508		opts->fast_io_fail_tmo = -1;
509	else
510		opts->fast_io_fail_tmo = fast_io_fail_tmo;
511	return count;
512}
513static DEVICE_ATTR(fast_io_fail_tmo, S_IRUGO | S_IWUSR,
514	nvme_ctrl_fast_io_fail_tmo_show, nvme_ctrl_fast_io_fail_tmo_store);
515
516static ssize_t cntrltype_show(struct device *dev,
517			      struct device_attribute *attr, char *buf)
518{
519	static const char * const type[] = {
520		[NVME_CTRL_IO] = "io\n",
521		[NVME_CTRL_DISC] = "discovery\n",
522		[NVME_CTRL_ADMIN] = "admin\n",
523	};
524	struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
525
526	if (ctrl->cntrltype > NVME_CTRL_ADMIN || !type[ctrl->cntrltype])
527		return sysfs_emit(buf, "reserved\n");
528
529	return sysfs_emit(buf, type[ctrl->cntrltype]);
530}
531static DEVICE_ATTR_RO(cntrltype);
532
533static ssize_t dctype_show(struct device *dev,
534			   struct device_attribute *attr, char *buf)
535{
536	static const char * const type[] = {
537		[NVME_DCTYPE_NOT_REPORTED] = "none\n",
538		[NVME_DCTYPE_DDC] = "ddc\n",
539		[NVME_DCTYPE_CDC] = "cdc\n",
540	};
541	struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
542
543	if (ctrl->dctype > NVME_DCTYPE_CDC || !type[ctrl->dctype])
544		return sysfs_emit(buf, "reserved\n");
545
546	return sysfs_emit(buf, type[ctrl->dctype]);
547}
548static DEVICE_ATTR_RO(dctype);
549
550#ifdef CONFIG_NVME_HOST_AUTH
551static ssize_t nvme_ctrl_dhchap_secret_show(struct device *dev,
552		struct device_attribute *attr, char *buf)
553{
554	struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
555	struct nvmf_ctrl_options *opts = ctrl->opts;
556
557	if (!opts->dhchap_secret)
558		return sysfs_emit(buf, "none\n");
559	return sysfs_emit(buf, "%s\n", opts->dhchap_secret);
560}
561
562static ssize_t nvme_ctrl_dhchap_secret_store(struct device *dev,
563		struct device_attribute *attr, const char *buf, size_t count)
564{
565	struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
566	struct nvmf_ctrl_options *opts = ctrl->opts;
567	char *dhchap_secret;
568
569	if (!ctrl->opts->dhchap_secret)
570		return -EINVAL;
571	if (count < 7)
572		return -EINVAL;
573	if (memcmp(buf, "DHHC-1:", 7))
574		return -EINVAL;
575
576	dhchap_secret = kzalloc(count + 1, GFP_KERNEL);
577	if (!dhchap_secret)
578		return -ENOMEM;
579	memcpy(dhchap_secret, buf, count);
580	nvme_auth_stop(ctrl);
581	if (strcmp(dhchap_secret, opts->dhchap_secret)) {
582		struct nvme_dhchap_key *key, *host_key;
583		int ret;
584
585		ret = nvme_auth_generate_key(dhchap_secret, &key);
586		if (ret) {
587			kfree(dhchap_secret);
588			return ret;
589		}
590		kfree(opts->dhchap_secret);
591		opts->dhchap_secret = dhchap_secret;
592		host_key = ctrl->host_key;
593		mutex_lock(&ctrl->dhchap_auth_mutex);
594		ctrl->host_key = key;
595		mutex_unlock(&ctrl->dhchap_auth_mutex);
596		nvme_auth_free_key(host_key);
597	} else
598		kfree(dhchap_secret);
599	/* Start re-authentication */
600	dev_info(ctrl->device, "re-authenticating controller\n");
601	queue_work(nvme_wq, &ctrl->dhchap_auth_work);
602
603	return count;
604}
605
606static DEVICE_ATTR(dhchap_secret, S_IRUGO | S_IWUSR,
607	nvme_ctrl_dhchap_secret_show, nvme_ctrl_dhchap_secret_store);
608
609static ssize_t nvme_ctrl_dhchap_ctrl_secret_show(struct device *dev,
610		struct device_attribute *attr, char *buf)
611{
612	struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
613	struct nvmf_ctrl_options *opts = ctrl->opts;
614
615	if (!opts->dhchap_ctrl_secret)
616		return sysfs_emit(buf, "none\n");
617	return sysfs_emit(buf, "%s\n", opts->dhchap_ctrl_secret);
618}
619
620static ssize_t nvme_ctrl_dhchap_ctrl_secret_store(struct device *dev,
621		struct device_attribute *attr, const char *buf, size_t count)
622{
623	struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
624	struct nvmf_ctrl_options *opts = ctrl->opts;
625	char *dhchap_secret;
626
627	if (!ctrl->opts->dhchap_ctrl_secret)
628		return -EINVAL;
629	if (count < 7)
630		return -EINVAL;
631	if (memcmp(buf, "DHHC-1:", 7))
632		return -EINVAL;
633
634	dhchap_secret = kzalloc(count + 1, GFP_KERNEL);
635	if (!dhchap_secret)
636		return -ENOMEM;
637	memcpy(dhchap_secret, buf, count);
638	nvme_auth_stop(ctrl);
639	if (strcmp(dhchap_secret, opts->dhchap_ctrl_secret)) {
640		struct nvme_dhchap_key *key, *ctrl_key;
641		int ret;
642
643		ret = nvme_auth_generate_key(dhchap_secret, &key);
644		if (ret) {
645			kfree(dhchap_secret);
646			return ret;
647		}
648		kfree(opts->dhchap_ctrl_secret);
649		opts->dhchap_ctrl_secret = dhchap_secret;
650		ctrl_key = ctrl->ctrl_key;
651		mutex_lock(&ctrl->dhchap_auth_mutex);
652		ctrl->ctrl_key = key;
653		mutex_unlock(&ctrl->dhchap_auth_mutex);
654		nvme_auth_free_key(ctrl_key);
655	} else
656		kfree(dhchap_secret);
657	/* Start re-authentication */
658	dev_info(ctrl->device, "re-authenticating controller\n");
659	queue_work(nvme_wq, &ctrl->dhchap_auth_work);
660
661	return count;
662}
663
664static DEVICE_ATTR(dhchap_ctrl_secret, S_IRUGO | S_IWUSR,
665	nvme_ctrl_dhchap_ctrl_secret_show, nvme_ctrl_dhchap_ctrl_secret_store);
666#endif
667
668#ifdef CONFIG_NVME_TCP_TLS
669static ssize_t tls_key_show(struct device *dev,
670			    struct device_attribute *attr, char *buf)
671{
672	struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
673
674	if (!ctrl->tls_key)
675		return 0;
676	return sysfs_emit(buf, "%08x", key_serial(ctrl->tls_key));
677}
678static DEVICE_ATTR_RO(tls_key);
679#endif
680
681static struct attribute *nvme_dev_attrs[] = {
682	&dev_attr_reset_controller.attr,
683	&dev_attr_rescan_controller.attr,
684	&dev_attr_model.attr,
685	&dev_attr_serial.attr,
686	&dev_attr_firmware_rev.attr,
687	&dev_attr_cntlid.attr,
688	&dev_attr_delete_controller.attr,
689	&dev_attr_transport.attr,
690	&dev_attr_subsysnqn.attr,
691	&dev_attr_address.attr,
692	&dev_attr_state.attr,
693	&dev_attr_numa_node.attr,
694	&dev_attr_queue_count.attr,
695	&dev_attr_sqsize.attr,
696	&dev_attr_hostnqn.attr,
697	&dev_attr_hostid.attr,
698	&dev_attr_ctrl_loss_tmo.attr,
699	&dev_attr_reconnect_delay.attr,
700	&dev_attr_fast_io_fail_tmo.attr,
701	&dev_attr_kato.attr,
702	&dev_attr_cntrltype.attr,
703	&dev_attr_dctype.attr,
704#ifdef CONFIG_NVME_HOST_AUTH
705	&dev_attr_dhchap_secret.attr,
706	&dev_attr_dhchap_ctrl_secret.attr,
707#endif
708#ifdef CONFIG_NVME_TCP_TLS
709	&dev_attr_tls_key.attr,
710#endif
711	&dev_attr_adm_passthru_err_log_enabled.attr,
712	NULL
713};
714
715static umode_t nvme_dev_attrs_are_visible(struct kobject *kobj,
716		struct attribute *a, int n)
717{
718	struct device *dev = container_of(kobj, struct device, kobj);
719	struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
720
721	if (a == &dev_attr_delete_controller.attr && !ctrl->ops->delete_ctrl)
722		return 0;
723	if (a == &dev_attr_address.attr && !ctrl->ops->get_address)
724		return 0;
725	if (a == &dev_attr_hostnqn.attr && !ctrl->opts)
726		return 0;
727	if (a == &dev_attr_hostid.attr && !ctrl->opts)
728		return 0;
729	if (a == &dev_attr_ctrl_loss_tmo.attr && !ctrl->opts)
730		return 0;
731	if (a == &dev_attr_reconnect_delay.attr && !ctrl->opts)
732		return 0;
733	if (a == &dev_attr_fast_io_fail_tmo.attr && !ctrl->opts)
734		return 0;
735#ifdef CONFIG_NVME_HOST_AUTH
736	if (a == &dev_attr_dhchap_secret.attr && !ctrl->opts)
737		return 0;
738	if (a == &dev_attr_dhchap_ctrl_secret.attr && !ctrl->opts)
739		return 0;
740#endif
741#ifdef CONFIG_NVME_TCP_TLS
742	if (a == &dev_attr_tls_key.attr &&
743	    (!ctrl->opts || strcmp(ctrl->opts->transport, "tcp")))
744		return 0;
745#endif
746
747	return a->mode;
748}
749
750const struct attribute_group nvme_dev_attrs_group = {
751	.attrs		= nvme_dev_attrs,
752	.is_visible	= nvme_dev_attrs_are_visible,
753};
754EXPORT_SYMBOL_GPL(nvme_dev_attrs_group);
755
756const struct attribute_group *nvme_dev_attr_groups[] = {
757	&nvme_dev_attrs_group,
758	NULL,
759};
760
761#define SUBSYS_ATTR_RO(_name, _mode, _show)			\
762	struct device_attribute subsys_attr_##_name = \
763		__ATTR(_name, _mode, _show, NULL)
764
765static ssize_t nvme_subsys_show_nqn(struct device *dev,
766				    struct device_attribute *attr,
767				    char *buf)
768{
769	struct nvme_subsystem *subsys =
770		container_of(dev, struct nvme_subsystem, dev);
771
772	return sysfs_emit(buf, "%s\n", subsys->subnqn);
773}
774static SUBSYS_ATTR_RO(subsysnqn, S_IRUGO, nvme_subsys_show_nqn);
775
776static ssize_t nvme_subsys_show_type(struct device *dev,
777				    struct device_attribute *attr,
778				    char *buf)
779{
780	struct nvme_subsystem *subsys =
781		container_of(dev, struct nvme_subsystem, dev);
782
783	switch (subsys->subtype) {
784	case NVME_NQN_DISC:
785		return sysfs_emit(buf, "discovery\n");
786	case NVME_NQN_NVME:
787		return sysfs_emit(buf, "nvm\n");
788	default:
789		return sysfs_emit(buf, "reserved\n");
790	}
791}
792static SUBSYS_ATTR_RO(subsystype, S_IRUGO, nvme_subsys_show_type);
793
794#define nvme_subsys_show_str_function(field)				\
795static ssize_t subsys_##field##_show(struct device *dev,		\
796			    struct device_attribute *attr, char *buf)	\
797{									\
798	struct nvme_subsystem *subsys =					\
799		container_of(dev, struct nvme_subsystem, dev);		\
800	return sysfs_emit(buf, "%.*s\n",				\
801			   (int)sizeof(subsys->field), subsys->field);	\
802}									\
803static SUBSYS_ATTR_RO(field, S_IRUGO, subsys_##field##_show);
804
805nvme_subsys_show_str_function(model);
806nvme_subsys_show_str_function(serial);
807nvme_subsys_show_str_function(firmware_rev);
808
809static struct attribute *nvme_subsys_attrs[] = {
810	&subsys_attr_model.attr,
811	&subsys_attr_serial.attr,
812	&subsys_attr_firmware_rev.attr,
813	&subsys_attr_subsysnqn.attr,
814	&subsys_attr_subsystype.attr,
815#ifdef CONFIG_NVME_MULTIPATH
816	&subsys_attr_iopolicy.attr,
817#endif
818	NULL,
819};
820
821static const struct attribute_group nvme_subsys_attrs_group = {
822	.attrs = nvme_subsys_attrs,
823};
824
825const struct attribute_group *nvme_subsys_attrs_groups[] = {
826	&nvme_subsys_attrs_group,
827	NULL,
828};
829