1/* SPDX-License-Identifier: GPL-2.0 */
2/*
3 * Copyright (c) 2015-2016 HGST, a Western Digital Company.
4 */
5
6#ifndef _NVMET_H
7#define _NVMET_H
8
9#include <linux/dma-mapping.h>
10#include <linux/types.h>
11#include <linux/device.h>
12#include <linux/kref.h>
13#include <linux/percpu-refcount.h>
14#include <linux/list.h>
15#include <linux/mutex.h>
16#include <linux/uuid.h>
17#include <linux/nvme.h>
18#include <linux/configfs.h>
19#include <linux/rcupdate.h>
20#include <linux/blkdev.h>
21#include <linux/radix-tree.h>
22#include <linux/t10-pi.h>
23
24#define NVMET_DEFAULT_VS		NVME_VS(1, 3, 0)
25
26#define NVMET_ASYNC_EVENTS		4
27#define NVMET_ERROR_LOG_SLOTS		128
28#define NVMET_NO_ERROR_LOC		((u16)-1)
29#define NVMET_DEFAULT_CTRL_MODEL	"Linux"
30#define NVMET_MN_MAX_SIZE		40
31#define NVMET_SN_MAX_SIZE		20
32#define NVMET_FR_MAX_SIZE		8
33
34/*
35 * Supported optional AENs:
36 */
37#define NVMET_AEN_CFG_OPTIONAL \
38	(NVME_AEN_CFG_NS_ATTR | NVME_AEN_CFG_ANA_CHANGE)
39#define NVMET_DISC_AEN_CFG_OPTIONAL \
40	(NVME_AEN_CFG_DISC_CHANGE)
41
42/*
43 * Plus mandatory SMART AENs (we'll never send them, but allow enabling them):
44 */
45#define NVMET_AEN_CFG_ALL \
46	(NVME_SMART_CRIT_SPARE | NVME_SMART_CRIT_TEMPERATURE | \
47	 NVME_SMART_CRIT_RELIABILITY | NVME_SMART_CRIT_MEDIA | \
48	 NVME_SMART_CRIT_VOLATILE_MEMORY | NVMET_AEN_CFG_OPTIONAL)
49
50/* Helper Macros when NVMe error is NVME_SC_CONNECT_INVALID_PARAM
51 * The 16 bit shift is to set IATTR bit to 1, which means offending
52 * offset starts in the data section of connect()
53 */
54#define IPO_IATTR_CONNECT_DATA(x)	\
55	(cpu_to_le32((1 << 16) | (offsetof(struct nvmf_connect_data, x))))
56#define IPO_IATTR_CONNECT_SQE(x)	\
57	(cpu_to_le32(offsetof(struct nvmf_connect_command, x)))
58
59struct nvmet_ns {
60	struct percpu_ref	ref;
61	struct file		*bdev_file;
62	struct block_device	*bdev;
63	struct file		*file;
64	bool			readonly;
65	u32			nsid;
66	u32			blksize_shift;
67	loff_t			size;
68	u8			nguid[16];
69	uuid_t			uuid;
70	u32			anagrpid;
71
72	bool			buffered_io;
73	bool			enabled;
74	struct nvmet_subsys	*subsys;
75	const char		*device_path;
76
77	struct config_group	device_group;
78	struct config_group	group;
79
80	struct completion	disable_done;
81	mempool_t		*bvec_pool;
82
83	struct pci_dev		*p2p_dev;
84	int			use_p2pmem;
85	int			pi_type;
86	int			metadata_size;
87	u8			csi;
88};
89
90static inline struct nvmet_ns *to_nvmet_ns(struct config_item *item)
91{
92	return container_of(to_config_group(item), struct nvmet_ns, group);
93}
94
95static inline struct device *nvmet_ns_dev(struct nvmet_ns *ns)
96{
97	return ns->bdev ? disk_to_dev(ns->bdev->bd_disk) : NULL;
98}
99
100struct nvmet_cq {
101	u16			qid;
102	u16			size;
103};
104
105struct nvmet_sq {
106	struct nvmet_ctrl	*ctrl;
107	struct percpu_ref	ref;
108	u16			qid;
109	u16			size;
110	u32			sqhd;
111	bool			sqhd_disabled;
112#ifdef CONFIG_NVME_TARGET_AUTH
113	bool			authenticated;
114	struct delayed_work	auth_expired_work;
115	u16			dhchap_tid;
116	u16			dhchap_status;
117	int			dhchap_step;
118	u8			*dhchap_c1;
119	u8			*dhchap_c2;
120	u32			dhchap_s1;
121	u32			dhchap_s2;
122	u8			*dhchap_skey;
123	int			dhchap_skey_len;
124#endif
125	struct completion	free_done;
126	struct completion	confirm_done;
127};
128
129struct nvmet_ana_group {
130	struct config_group	group;
131	struct nvmet_port	*port;
132	u32			grpid;
133};
134
135static inline struct nvmet_ana_group *to_ana_group(struct config_item *item)
136{
137	return container_of(to_config_group(item), struct nvmet_ana_group,
138			group);
139}
140
141/**
142 * struct nvmet_port -	Common structure to keep port
143 *				information for the target.
144 * @entry:		Entry into referrals or transport list.
145 * @disc_addr:		Address information is stored in a format defined
146 *				for a discovery log page entry.
147 * @group:		ConfigFS group for this element's folder.
148 * @priv:		Private data for the transport.
149 */
150struct nvmet_port {
151	struct list_head		entry;
152	struct nvmf_disc_rsp_page_entry	disc_addr;
153	struct config_group		group;
154	struct config_group		subsys_group;
155	struct list_head		subsystems;
156	struct config_group		referrals_group;
157	struct list_head		referrals;
158	struct list_head		global_entry;
159	struct config_group		ana_groups_group;
160	struct nvmet_ana_group		ana_default_group;
161	enum nvme_ana_state		*ana_state;
162	struct key			*keyring;
163	void				*priv;
164	bool				enabled;
165	int				inline_data_size;
166	int				max_queue_size;
167	const struct nvmet_fabrics_ops	*tr_ops;
168	bool				pi_enable;
169};
170
171static inline struct nvmet_port *to_nvmet_port(struct config_item *item)
172{
173	return container_of(to_config_group(item), struct nvmet_port,
174			group);
175}
176
177static inline struct nvmet_port *ana_groups_to_port(
178		struct config_item *item)
179{
180	return container_of(to_config_group(item), struct nvmet_port,
181			ana_groups_group);
182}
183
184static inline u8 nvmet_port_disc_addr_treq_secure_channel(struct nvmet_port *port)
185{
186	return (port->disc_addr.treq & NVME_TREQ_SECURE_CHANNEL_MASK);
187}
188
189static inline bool nvmet_port_secure_channel_required(struct nvmet_port *port)
190{
191    return nvmet_port_disc_addr_treq_secure_channel(port) == NVMF_TREQ_REQUIRED;
192}
193
194struct nvmet_ctrl {
195	struct nvmet_subsys	*subsys;
196	struct nvmet_sq		**sqs;
197
198	bool			reset_tbkas;
199
200	struct mutex		lock;
201	u64			cap;
202	u32			cc;
203	u32			csts;
204
205	uuid_t			hostid;
206	u16			cntlid;
207	u32			kato;
208
209	struct nvmet_port	*port;
210
211	u32			aen_enabled;
212	unsigned long		aen_masked;
213	struct nvmet_req	*async_event_cmds[NVMET_ASYNC_EVENTS];
214	unsigned int		nr_async_event_cmds;
215	struct list_head	async_events;
216	struct work_struct	async_event_work;
217
218	struct list_head	subsys_entry;
219	struct kref		ref;
220	struct delayed_work	ka_work;
221	struct work_struct	fatal_err_work;
222
223	const struct nvmet_fabrics_ops *ops;
224
225	__le32			*changed_ns_list;
226	u32			nr_changed_ns;
227
228	char			subsysnqn[NVMF_NQN_FIELD_LEN];
229	char			hostnqn[NVMF_NQN_FIELD_LEN];
230
231	struct device		*p2p_client;
232	struct radix_tree_root	p2p_ns_map;
233
234	spinlock_t		error_lock;
235	u64			err_counter;
236	struct nvme_error_slot	slots[NVMET_ERROR_LOG_SLOTS];
237	bool			pi_support;
238#ifdef CONFIG_NVME_TARGET_AUTH
239	struct nvme_dhchap_key	*host_key;
240	struct nvme_dhchap_key	*ctrl_key;
241	u8			shash_id;
242	struct crypto_kpp	*dh_tfm;
243	u8			dh_gid;
244	u8			*dh_key;
245	size_t			dh_keysize;
246#endif
247};
248
249struct nvmet_subsys {
250	enum nvme_subsys_type	type;
251
252	struct mutex		lock;
253	struct kref		ref;
254
255	struct xarray		namespaces;
256	unsigned int		nr_namespaces;
257	u32			max_nsid;
258	u16			cntlid_min;
259	u16			cntlid_max;
260
261	struct list_head	ctrls;
262
263	struct list_head	hosts;
264	bool			allow_any_host;
265
266	u16			max_qid;
267
268	u64			ver;
269	char			serial[NVMET_SN_MAX_SIZE];
270	bool			subsys_discovered;
271	char			*subsysnqn;
272	bool			pi_support;
273
274	struct config_group	group;
275
276	struct config_group	namespaces_group;
277	struct config_group	allowed_hosts_group;
278
279	char			*model_number;
280	u32			ieee_oui;
281	char			*firmware_rev;
282
283#ifdef CONFIG_NVME_TARGET_PASSTHRU
284	struct nvme_ctrl	*passthru_ctrl;
285	char			*passthru_ctrl_path;
286	struct config_group	passthru_group;
287	unsigned int		admin_timeout;
288	unsigned int		io_timeout;
289	unsigned int		clear_ids;
290#endif /* CONFIG_NVME_TARGET_PASSTHRU */
291
292#ifdef CONFIG_BLK_DEV_ZONED
293	u8			zasl;
294#endif /* CONFIG_BLK_DEV_ZONED */
295};
296
297static inline struct nvmet_subsys *to_subsys(struct config_item *item)
298{
299	return container_of(to_config_group(item), struct nvmet_subsys, group);
300}
301
302static inline struct nvmet_subsys *namespaces_to_subsys(
303		struct config_item *item)
304{
305	return container_of(to_config_group(item), struct nvmet_subsys,
306			namespaces_group);
307}
308
309struct nvmet_host {
310	struct config_group	group;
311	u8			*dhchap_secret;
312	u8			*dhchap_ctrl_secret;
313	u8			dhchap_key_hash;
314	u8			dhchap_ctrl_key_hash;
315	u8			dhchap_hash_id;
316	u8			dhchap_dhgroup_id;
317};
318
319static inline struct nvmet_host *to_host(struct config_item *item)
320{
321	return container_of(to_config_group(item), struct nvmet_host, group);
322}
323
324static inline char *nvmet_host_name(struct nvmet_host *host)
325{
326	return config_item_name(&host->group.cg_item);
327}
328
329struct nvmet_host_link {
330	struct list_head	entry;
331	struct nvmet_host	*host;
332};
333
334struct nvmet_subsys_link {
335	struct list_head	entry;
336	struct nvmet_subsys	*subsys;
337};
338
339struct nvmet_req;
340struct nvmet_fabrics_ops {
341	struct module *owner;
342	unsigned int type;
343	unsigned int msdbd;
344	unsigned int flags;
345#define NVMF_KEYED_SGLS			(1 << 0)
346#define NVMF_METADATA_SUPPORTED		(1 << 1)
347	void (*queue_response)(struct nvmet_req *req);
348	int (*add_port)(struct nvmet_port *port);
349	void (*remove_port)(struct nvmet_port *port);
350	void (*delete_ctrl)(struct nvmet_ctrl *ctrl);
351	void (*disc_traddr)(struct nvmet_req *req,
352			struct nvmet_port *port, char *traddr);
353	u16 (*install_queue)(struct nvmet_sq *nvme_sq);
354	void (*discovery_chg)(struct nvmet_port *port);
355	u8 (*get_mdts)(const struct nvmet_ctrl *ctrl);
356	u16 (*get_max_queue_size)(const struct nvmet_ctrl *ctrl);
357};
358
359#define NVMET_MAX_INLINE_BIOVEC	8
360#define NVMET_MAX_INLINE_DATA_LEN NVMET_MAX_INLINE_BIOVEC * PAGE_SIZE
361
362struct nvmet_req {
363	struct nvme_command	*cmd;
364	struct nvme_completion	*cqe;
365	struct nvmet_sq		*sq;
366	struct nvmet_cq		*cq;
367	struct nvmet_ns		*ns;
368	struct scatterlist	*sg;
369	struct scatterlist	*metadata_sg;
370	struct bio_vec		inline_bvec[NVMET_MAX_INLINE_BIOVEC];
371	union {
372		struct {
373			struct bio      inline_bio;
374		} b;
375		struct {
376			bool			mpool_alloc;
377			struct kiocb            iocb;
378			struct bio_vec          *bvec;
379			struct work_struct      work;
380		} f;
381		struct {
382			struct bio		inline_bio;
383			struct request		*rq;
384			struct work_struct      work;
385			bool			use_workqueue;
386		} p;
387#ifdef CONFIG_BLK_DEV_ZONED
388		struct {
389			struct bio		inline_bio;
390			struct work_struct	zmgmt_work;
391		} z;
392#endif /* CONFIG_BLK_DEV_ZONED */
393	};
394	int			sg_cnt;
395	int			metadata_sg_cnt;
396	/* data length as parsed from the SGL descriptor: */
397	size_t			transfer_len;
398	size_t			metadata_len;
399
400	struct nvmet_port	*port;
401
402	void (*execute)(struct nvmet_req *req);
403	const struct nvmet_fabrics_ops *ops;
404
405	struct pci_dev		*p2p_dev;
406	struct device		*p2p_client;
407	u16			error_loc;
408	u64			error_slba;
409};
410
411#define NVMET_MAX_MPOOL_BVEC		16
412extern struct kmem_cache *nvmet_bvec_cache;
413extern struct workqueue_struct *buffered_io_wq;
414extern struct workqueue_struct *zbd_wq;
415extern struct workqueue_struct *nvmet_wq;
416
417static inline void nvmet_set_result(struct nvmet_req *req, u32 result)
418{
419	req->cqe->result.u32 = cpu_to_le32(result);
420}
421
422/*
423 * NVMe command writes actually are DMA reads for us on the target side.
424 */
425static inline enum dma_data_direction
426nvmet_data_dir(struct nvmet_req *req)
427{
428	return nvme_is_write(req->cmd) ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
429}
430
431struct nvmet_async_event {
432	struct list_head	entry;
433	u8			event_type;
434	u8			event_info;
435	u8			log_page;
436};
437
438static inline void nvmet_clear_aen_bit(struct nvmet_req *req, u32 bn)
439{
440	int rae = le32_to_cpu(req->cmd->common.cdw10) & 1 << 15;
441
442	if (!rae)
443		clear_bit(bn, &req->sq->ctrl->aen_masked);
444}
445
446static inline bool nvmet_aen_bit_disabled(struct nvmet_ctrl *ctrl, u32 bn)
447{
448	if (!(READ_ONCE(ctrl->aen_enabled) & (1 << bn)))
449		return true;
450	return test_and_set_bit(bn, &ctrl->aen_masked);
451}
452
453void nvmet_get_feat_kato(struct nvmet_req *req);
454void nvmet_get_feat_async_event(struct nvmet_req *req);
455u16 nvmet_set_feat_kato(struct nvmet_req *req);
456u16 nvmet_set_feat_async_event(struct nvmet_req *req, u32 mask);
457void nvmet_execute_async_event(struct nvmet_req *req);
458void nvmet_start_keep_alive_timer(struct nvmet_ctrl *ctrl);
459void nvmet_stop_keep_alive_timer(struct nvmet_ctrl *ctrl);
460
461u16 nvmet_parse_connect_cmd(struct nvmet_req *req);
462void nvmet_bdev_set_limits(struct block_device *bdev, struct nvme_id_ns *id);
463u16 nvmet_bdev_parse_io_cmd(struct nvmet_req *req);
464u16 nvmet_file_parse_io_cmd(struct nvmet_req *req);
465u16 nvmet_bdev_zns_parse_io_cmd(struct nvmet_req *req);
466u16 nvmet_parse_admin_cmd(struct nvmet_req *req);
467u16 nvmet_parse_discovery_cmd(struct nvmet_req *req);
468u16 nvmet_parse_fabrics_admin_cmd(struct nvmet_req *req);
469u16 nvmet_parse_fabrics_io_cmd(struct nvmet_req *req);
470
471bool nvmet_req_init(struct nvmet_req *req, struct nvmet_cq *cq,
472		struct nvmet_sq *sq, const struct nvmet_fabrics_ops *ops);
473void nvmet_req_uninit(struct nvmet_req *req);
474bool nvmet_check_transfer_len(struct nvmet_req *req, size_t len);
475bool nvmet_check_data_len_lte(struct nvmet_req *req, size_t data_len);
476void nvmet_req_complete(struct nvmet_req *req, u16 status);
477int nvmet_req_alloc_sgls(struct nvmet_req *req);
478void nvmet_req_free_sgls(struct nvmet_req *req);
479
480void nvmet_execute_set_features(struct nvmet_req *req);
481void nvmet_execute_get_features(struct nvmet_req *req);
482void nvmet_execute_keep_alive(struct nvmet_req *req);
483
484void nvmet_cq_setup(struct nvmet_ctrl *ctrl, struct nvmet_cq *cq, u16 qid,
485		u16 size);
486void nvmet_sq_setup(struct nvmet_ctrl *ctrl, struct nvmet_sq *sq, u16 qid,
487		u16 size);
488void nvmet_sq_destroy(struct nvmet_sq *sq);
489int nvmet_sq_init(struct nvmet_sq *sq);
490
491void nvmet_ctrl_fatal_error(struct nvmet_ctrl *ctrl);
492
493void nvmet_update_cc(struct nvmet_ctrl *ctrl, u32 new);
494u16 nvmet_alloc_ctrl(const char *subsysnqn, const char *hostnqn,
495		struct nvmet_req *req, u32 kato, struct nvmet_ctrl **ctrlp);
496struct nvmet_ctrl *nvmet_ctrl_find_get(const char *subsysnqn,
497				       const char *hostnqn, u16 cntlid,
498				       struct nvmet_req *req);
499void nvmet_ctrl_put(struct nvmet_ctrl *ctrl);
500u16 nvmet_check_ctrl_status(struct nvmet_req *req);
501
502struct nvmet_subsys *nvmet_subsys_alloc(const char *subsysnqn,
503		enum nvme_subsys_type type);
504void nvmet_subsys_put(struct nvmet_subsys *subsys);
505void nvmet_subsys_del_ctrls(struct nvmet_subsys *subsys);
506
507u16 nvmet_req_find_ns(struct nvmet_req *req);
508void nvmet_put_namespace(struct nvmet_ns *ns);
509int nvmet_ns_enable(struct nvmet_ns *ns);
510void nvmet_ns_disable(struct nvmet_ns *ns);
511struct nvmet_ns *nvmet_ns_alloc(struct nvmet_subsys *subsys, u32 nsid);
512void nvmet_ns_free(struct nvmet_ns *ns);
513
514void nvmet_send_ana_event(struct nvmet_subsys *subsys,
515		struct nvmet_port *port);
516void nvmet_port_send_ana_event(struct nvmet_port *port);
517
518int nvmet_register_transport(const struct nvmet_fabrics_ops *ops);
519void nvmet_unregister_transport(const struct nvmet_fabrics_ops *ops);
520
521void nvmet_port_del_ctrls(struct nvmet_port *port,
522			  struct nvmet_subsys *subsys);
523
524int nvmet_enable_port(struct nvmet_port *port);
525void nvmet_disable_port(struct nvmet_port *port);
526
527void nvmet_referral_enable(struct nvmet_port *parent, struct nvmet_port *port);
528void nvmet_referral_disable(struct nvmet_port *parent, struct nvmet_port *port);
529
530u16 nvmet_copy_to_sgl(struct nvmet_req *req, off_t off, const void *buf,
531		size_t len);
532u16 nvmet_copy_from_sgl(struct nvmet_req *req, off_t off, void *buf,
533		size_t len);
534u16 nvmet_zero_sgl(struct nvmet_req *req, off_t off, size_t len);
535
536u32 nvmet_get_log_page_len(struct nvme_command *cmd);
537u64 nvmet_get_log_page_offset(struct nvme_command *cmd);
538
539extern struct list_head *nvmet_ports;
540void nvmet_port_disc_changed(struct nvmet_port *port,
541		struct nvmet_subsys *subsys);
542void nvmet_subsys_disc_changed(struct nvmet_subsys *subsys,
543		struct nvmet_host *host);
544void nvmet_add_async_event(struct nvmet_ctrl *ctrl, u8 event_type,
545		u8 event_info, u8 log_page);
546
547#define NVMET_MIN_QUEUE_SIZE	16
548#define NVMET_MAX_QUEUE_SIZE	1024
549#define NVMET_NR_QUEUES		128
550#define NVMET_MAX_CMD(ctrl)	(NVME_CAP_MQES(ctrl->cap) + 1)
551
552/*
553 * Nice round number that makes a list of nsids fit into a page.
554 * Should become tunable at some point in the future.
555 */
556#define NVMET_MAX_NAMESPACES	1024
557
558/*
559 * 0 is not a valid ANA group ID, so we start numbering at 1.
560 *
561 * ANA Group 1 exists without manual intervention, has namespaces assigned to it
562 * by default, and is available in an optimized state through all ports.
563 */
564#define NVMET_MAX_ANAGRPS	128
565#define NVMET_DEFAULT_ANA_GRPID	1
566
567#define NVMET_KAS		10
568#define NVMET_DISC_KATO_MS		120000
569
570int __init nvmet_init_configfs(void);
571void __exit nvmet_exit_configfs(void);
572
573int __init nvmet_init_discovery(void);
574void nvmet_exit_discovery(void);
575
576extern struct nvmet_subsys *nvmet_disc_subsys;
577extern struct rw_semaphore nvmet_config_sem;
578
579extern u32 nvmet_ana_group_enabled[NVMET_MAX_ANAGRPS + 1];
580extern u64 nvmet_ana_chgcnt;
581extern struct rw_semaphore nvmet_ana_sem;
582
583bool nvmet_host_allowed(struct nvmet_subsys *subsys, const char *hostnqn);
584
585int nvmet_bdev_ns_enable(struct nvmet_ns *ns);
586int nvmet_file_ns_enable(struct nvmet_ns *ns);
587void nvmet_bdev_ns_disable(struct nvmet_ns *ns);
588void nvmet_file_ns_disable(struct nvmet_ns *ns);
589u16 nvmet_bdev_flush(struct nvmet_req *req);
590u16 nvmet_file_flush(struct nvmet_req *req);
591void nvmet_ns_changed(struct nvmet_subsys *subsys, u32 nsid);
592void nvmet_bdev_ns_revalidate(struct nvmet_ns *ns);
593void nvmet_file_ns_revalidate(struct nvmet_ns *ns);
594bool nvmet_ns_revalidate(struct nvmet_ns *ns);
595u16 blk_to_nvme_status(struct nvmet_req *req, blk_status_t blk_sts);
596
597bool nvmet_bdev_zns_enable(struct nvmet_ns *ns);
598void nvmet_execute_identify_ctrl_zns(struct nvmet_req *req);
599void nvmet_execute_identify_ns_zns(struct nvmet_req *req);
600void nvmet_bdev_execute_zone_mgmt_recv(struct nvmet_req *req);
601void nvmet_bdev_execute_zone_mgmt_send(struct nvmet_req *req);
602void nvmet_bdev_execute_zone_append(struct nvmet_req *req);
603
604static inline u32 nvmet_rw_data_len(struct nvmet_req *req)
605{
606	return ((u32)le16_to_cpu(req->cmd->rw.length) + 1) <<
607			req->ns->blksize_shift;
608}
609
610static inline u32 nvmet_rw_metadata_len(struct nvmet_req *req)
611{
612	if (!IS_ENABLED(CONFIG_BLK_DEV_INTEGRITY))
613		return 0;
614	return ((u32)le16_to_cpu(req->cmd->rw.length) + 1) *
615			req->ns->metadata_size;
616}
617
618static inline u32 nvmet_dsm_len(struct nvmet_req *req)
619{
620	return (le32_to_cpu(req->cmd->dsm.nr) + 1) *
621		sizeof(struct nvme_dsm_range);
622}
623
624static inline struct nvmet_subsys *nvmet_req_subsys(struct nvmet_req *req)
625{
626	return req->sq->ctrl->subsys;
627}
628
629static inline bool nvmet_is_disc_subsys(struct nvmet_subsys *subsys)
630{
631    return subsys->type != NVME_NQN_NVME;
632}
633
634#ifdef CONFIG_NVME_TARGET_PASSTHRU
635void nvmet_passthru_subsys_free(struct nvmet_subsys *subsys);
636int nvmet_passthru_ctrl_enable(struct nvmet_subsys *subsys);
637void nvmet_passthru_ctrl_disable(struct nvmet_subsys *subsys);
638u16 nvmet_parse_passthru_admin_cmd(struct nvmet_req *req);
639u16 nvmet_parse_passthru_io_cmd(struct nvmet_req *req);
640static inline bool nvmet_is_passthru_subsys(struct nvmet_subsys *subsys)
641{
642	return subsys->passthru_ctrl;
643}
644#else /* CONFIG_NVME_TARGET_PASSTHRU */
645static inline void nvmet_passthru_subsys_free(struct nvmet_subsys *subsys)
646{
647}
648static inline void nvmet_passthru_ctrl_disable(struct nvmet_subsys *subsys)
649{
650}
651static inline u16 nvmet_parse_passthru_admin_cmd(struct nvmet_req *req)
652{
653	return 0;
654}
655static inline u16 nvmet_parse_passthru_io_cmd(struct nvmet_req *req)
656{
657	return 0;
658}
659static inline bool nvmet_is_passthru_subsys(struct nvmet_subsys *subsys)
660{
661	return NULL;
662}
663#endif /* CONFIG_NVME_TARGET_PASSTHRU */
664
665static inline bool nvmet_is_passthru_req(struct nvmet_req *req)
666{
667	return nvmet_is_passthru_subsys(nvmet_req_subsys(req));
668}
669
670void nvmet_passthrough_override_cap(struct nvmet_ctrl *ctrl);
671
672u16 errno_to_nvme_status(struct nvmet_req *req, int errno);
673u16 nvmet_report_invalid_opcode(struct nvmet_req *req);
674
675/* Convert a 32-bit number to a 16-bit 0's based number */
676static inline __le16 to0based(u32 a)
677{
678	return cpu_to_le16(max(1U, min(1U << 16, a)) - 1);
679}
680
681static inline bool nvmet_ns_has_pi(struct nvmet_ns *ns)
682{
683	if (!IS_ENABLED(CONFIG_BLK_DEV_INTEGRITY))
684		return false;
685	return ns->pi_type && ns->metadata_size == sizeof(struct t10_pi_tuple);
686}
687
688static inline __le64 nvmet_sect_to_lba(struct nvmet_ns *ns, sector_t sect)
689{
690	return cpu_to_le64(sect >> (ns->blksize_shift - SECTOR_SHIFT));
691}
692
693static inline sector_t nvmet_lba_to_sect(struct nvmet_ns *ns, __le64 lba)
694{
695	return le64_to_cpu(lba) << (ns->blksize_shift - SECTOR_SHIFT);
696}
697
698static inline bool nvmet_use_inline_bvec(struct nvmet_req *req)
699{
700	return req->transfer_len <= NVMET_MAX_INLINE_DATA_LEN &&
701	       req->sg_cnt <= NVMET_MAX_INLINE_BIOVEC;
702}
703
704static inline void nvmet_req_bio_put(struct nvmet_req *req, struct bio *bio)
705{
706	if (bio != &req->b.inline_bio)
707		bio_put(bio);
708}
709
710#ifdef CONFIG_NVME_TARGET_AUTH
711void nvmet_execute_auth_send(struct nvmet_req *req);
712void nvmet_execute_auth_receive(struct nvmet_req *req);
713int nvmet_auth_set_key(struct nvmet_host *host, const char *secret,
714		       bool set_ctrl);
715int nvmet_auth_set_host_hash(struct nvmet_host *host, const char *hash);
716int nvmet_setup_auth(struct nvmet_ctrl *ctrl);
717void nvmet_auth_sq_init(struct nvmet_sq *sq);
718void nvmet_destroy_auth(struct nvmet_ctrl *ctrl);
719void nvmet_auth_sq_free(struct nvmet_sq *sq);
720int nvmet_setup_dhgroup(struct nvmet_ctrl *ctrl, u8 dhgroup_id);
721bool nvmet_check_auth_status(struct nvmet_req *req);
722int nvmet_auth_host_hash(struct nvmet_req *req, u8 *response,
723			 unsigned int hash_len);
724int nvmet_auth_ctrl_hash(struct nvmet_req *req, u8 *response,
725			 unsigned int hash_len);
726static inline bool nvmet_has_auth(struct nvmet_ctrl *ctrl)
727{
728	return ctrl->host_key != NULL;
729}
730int nvmet_auth_ctrl_exponential(struct nvmet_req *req,
731				u8 *buf, int buf_size);
732int nvmet_auth_ctrl_sesskey(struct nvmet_req *req,
733			    u8 *buf, int buf_size);
734#else
735static inline int nvmet_setup_auth(struct nvmet_ctrl *ctrl)
736{
737	return 0;
738}
739static inline void nvmet_auth_sq_init(struct nvmet_sq *sq)
740{
741}
742static inline void nvmet_destroy_auth(struct nvmet_ctrl *ctrl) {};
743static inline void nvmet_auth_sq_free(struct nvmet_sq *sq) {};
744static inline bool nvmet_check_auth_status(struct nvmet_req *req)
745{
746	return true;
747}
748static inline bool nvmet_has_auth(struct nvmet_ctrl *ctrl)
749{
750	return false;
751}
752static inline const char *nvmet_dhchap_dhgroup_name(u8 dhgid) { return NULL; }
753#endif
754
755#endif /* _NVMET_H */
756