nvme.h revision 328690
1/*-
2 * Copyright (C) 2012-2013 Intel Corporation
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 *    notice, this list of conditions and the following disclaimer in the
12 *    documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 *
26 * $FreeBSD: stable/11/sys/dev/nvme/nvme.h 328690 2018-02-01 16:51:11Z mav $
27 */
28
29#ifndef __NVME_H__
30#define __NVME_H__
31
32#ifdef _KERNEL
33#include <sys/types.h>
34#endif
35
36#include <sys/param.h>
37
38#define	NVME_PASSTHROUGH_CMD		_IOWR('n', 0, struct nvme_pt_command)
39#define	NVME_RESET_CONTROLLER		_IO('n', 1)
40
41#define	NVME_IO_TEST			_IOWR('n', 100, struct nvme_io_test)
42#define	NVME_BIO_TEST			_IOWR('n', 101, struct nvme_io_test)
43
44/*
45 * Use to mark a command to apply to all namespaces, or to retrieve global
46 *  log pages.
47 */
48#define NVME_GLOBAL_NAMESPACE_TAG	((uint32_t)0xFFFFFFFF)
49
50/* Cap nvme to 1MB transfers driver explodes with larger sizes */
51#define NVME_MAX_XFER_SIZE		(MAXPHYS < (1<<20) ? MAXPHYS : (1<<20))
52
53union cap_lo_register {
54	uint32_t	raw;
55	struct {
56		/** maximum queue entries supported */
57		uint32_t mqes		: 16;
58
59		/** contiguous queues required */
60		uint32_t cqr		: 1;
61
62		/** arbitration mechanism supported */
63		uint32_t ams		: 2;
64
65		uint32_t reserved1	: 5;
66
67		/** timeout */
68		uint32_t to		: 8;
69	} bits __packed;
70} __packed;
71
72_Static_assert(sizeof(union cap_lo_register) == 4, "bad size for cap_lo_register");
73
74union cap_hi_register {
75	uint32_t	raw;
76	struct {
77		/** doorbell stride */
78		uint32_t dstrd		: 4;
79
80		uint32_t reserved3	: 1;
81
82		/** command sets supported */
83		uint32_t css_nvm	: 1;
84
85		uint32_t css_reserved	: 3;
86		uint32_t reserved2	: 7;
87
88		/** memory page size minimum */
89		uint32_t mpsmin		: 4;
90
91		/** memory page size maximum */
92		uint32_t mpsmax		: 4;
93
94		uint32_t reserved1	: 8;
95	} bits __packed;
96} __packed;
97
98_Static_assert(sizeof(union cap_hi_register) == 4, "bad size of cap_hi_register");
99
100union cc_register {
101	uint32_t	raw;
102	struct {
103		/** enable */
104		uint32_t en		: 1;
105
106		uint32_t reserved1	: 3;
107
108		/** i/o command set selected */
109		uint32_t css		: 3;
110
111		/** memory page size */
112		uint32_t mps		: 4;
113
114		/** arbitration mechanism selected */
115		uint32_t ams		: 3;
116
117		/** shutdown notification */
118		uint32_t shn		: 2;
119
120		/** i/o submission queue entry size */
121		uint32_t iosqes		: 4;
122
123		/** i/o completion queue entry size */
124		uint32_t iocqes		: 4;
125
126		uint32_t reserved2	: 8;
127	} bits __packed;
128} __packed;
129
130_Static_assert(sizeof(union cc_register) == 4, "bad size for cc_register");
131
132enum shn_value {
133	NVME_SHN_NORMAL		= 0x1,
134	NVME_SHN_ABRUPT		= 0x2,
135};
136
137union csts_register {
138	uint32_t	raw;
139	struct {
140		/** ready */
141		uint32_t rdy		: 1;
142
143		/** controller fatal status */
144		uint32_t cfs		: 1;
145
146		/** shutdown status */
147		uint32_t shst		: 2;
148
149		uint32_t reserved1	: 28;
150	} bits __packed;
151} __packed;
152
153_Static_assert(sizeof(union csts_register) == 4, "bad size for csts_register");
154
155enum shst_value {
156	NVME_SHST_NORMAL	= 0x0,
157	NVME_SHST_OCCURRING	= 0x1,
158	NVME_SHST_COMPLETE	= 0x2,
159};
160
161union aqa_register {
162	uint32_t	raw;
163	struct {
164		/** admin submission queue size */
165		uint32_t asqs		: 12;
166
167		uint32_t reserved1	: 4;
168
169		/** admin completion queue size */
170		uint32_t acqs		: 12;
171
172		uint32_t reserved2	: 4;
173	} bits __packed;
174} __packed;
175
176_Static_assert(sizeof(union aqa_register) == 4, "bad size for aqa_resgister");
177
178struct nvme_registers
179{
180	/** controller capabilities */
181	union cap_lo_register	cap_lo;
182	union cap_hi_register	cap_hi;
183
184	uint32_t		vs;	/* version */
185	uint32_t		intms;	/* interrupt mask set */
186	uint32_t		intmc;	/* interrupt mask clear */
187
188	/** controller configuration */
189	union cc_register	cc;
190
191	uint32_t		reserved1;
192
193	/** controller status */
194	union csts_register	csts;
195
196	uint32_t		reserved2;
197
198	/** admin queue attributes */
199	union aqa_register	aqa;
200
201	uint64_t		asq;	/* admin submission queue base addr */
202	uint64_t		acq;	/* admin completion queue base addr */
203	uint32_t		reserved3[0x3f2];
204
205	struct {
206	    uint32_t		sq_tdbl; /* submission queue tail doorbell */
207	    uint32_t		cq_hdbl; /* completion queue head doorbell */
208	} doorbell[1] __packed;
209} __packed;
210
211_Static_assert(sizeof(struct nvme_registers) == 0x1008, "bad size for nvme_registers");
212
213struct nvme_command
214{
215	/* dword 0 */
216	uint16_t opc	:  8;	/* opcode */
217	uint16_t fuse	:  2;	/* fused operation */
218	uint16_t rsvd1	:  6;
219	uint16_t cid;		/* command identifier */
220
221	/* dword 1 */
222	uint32_t nsid;		/* namespace identifier */
223
224	/* dword 2-3 */
225	uint32_t rsvd2;
226	uint32_t rsvd3;
227
228	/* dword 4-5 */
229	uint64_t mptr;		/* metadata pointer */
230
231	/* dword 6-7 */
232	uint64_t prp1;		/* prp entry 1 */
233
234	/* dword 8-9 */
235	uint64_t prp2;		/* prp entry 2 */
236
237	/* dword 10-15 */
238	uint32_t cdw10;		/* command-specific */
239	uint32_t cdw11;		/* command-specific */
240	uint32_t cdw12;		/* command-specific */
241	uint32_t cdw13;		/* command-specific */
242	uint32_t cdw14;		/* command-specific */
243	uint32_t cdw15;		/* command-specific */
244} __packed;
245
246_Static_assert(sizeof(struct nvme_command) == 16 * 4, "bad size for nvme_command");
247
248struct nvme_status {
249
250	uint16_t p	:  1;	/* phase tag */
251	uint16_t sc	:  8;	/* status code */
252	uint16_t sct	:  3;	/* status code type */
253	uint16_t rsvd2	:  2;
254	uint16_t m	:  1;	/* more */
255	uint16_t dnr	:  1;	/* do not retry */
256} __packed;
257
258_Static_assert(sizeof(struct nvme_status) == 2, "bad size for nvme_status");
259
260struct nvme_completion {
261
262	/* dword 0 */
263	uint32_t		cdw0;	/* command-specific */
264
265	/* dword 1 */
266	uint32_t		rsvd1;
267
268	/* dword 2 */
269	uint16_t		sqhd;	/* submission queue head pointer */
270	uint16_t		sqid;	/* submission queue identifier */
271
272	/* dword 3 */
273	uint16_t		cid;	/* command identifier */
274	struct nvme_status	status;
275} __packed;
276
277_Static_assert(sizeof(struct nvme_completion) == 4 * 4, "bad size for nvme_completion");
278
279struct nvme_dsm_range {
280
281	uint32_t attributes;
282	uint32_t length;
283	uint64_t starting_lba;
284} __packed;
285
286_Static_assert(sizeof(struct nvme_dsm_range) == 16, "bad size for nvme_dsm_ranage");
287
288/* status code types */
289enum nvme_status_code_type {
290	NVME_SCT_GENERIC		= 0x0,
291	NVME_SCT_COMMAND_SPECIFIC	= 0x1,
292	NVME_SCT_MEDIA_ERROR		= 0x2,
293	/* 0x3-0x6 - reserved */
294	NVME_SCT_VENDOR_SPECIFIC	= 0x7,
295};
296
297/* generic command status codes */
298enum nvme_generic_command_status_code {
299	NVME_SC_SUCCESS				= 0x00,
300	NVME_SC_INVALID_OPCODE			= 0x01,
301	NVME_SC_INVALID_FIELD			= 0x02,
302	NVME_SC_COMMAND_ID_CONFLICT		= 0x03,
303	NVME_SC_DATA_TRANSFER_ERROR		= 0x04,
304	NVME_SC_ABORTED_POWER_LOSS		= 0x05,
305	NVME_SC_INTERNAL_DEVICE_ERROR		= 0x06,
306	NVME_SC_ABORTED_BY_REQUEST		= 0x07,
307	NVME_SC_ABORTED_SQ_DELETION		= 0x08,
308	NVME_SC_ABORTED_FAILED_FUSED		= 0x09,
309	NVME_SC_ABORTED_MISSING_FUSED		= 0x0a,
310	NVME_SC_INVALID_NAMESPACE_OR_FORMAT	= 0x0b,
311	NVME_SC_COMMAND_SEQUENCE_ERROR		= 0x0c,
312
313	NVME_SC_LBA_OUT_OF_RANGE		= 0x80,
314	NVME_SC_CAPACITY_EXCEEDED		= 0x81,
315	NVME_SC_NAMESPACE_NOT_READY		= 0x82,
316};
317
318/* command specific status codes */
319enum nvme_command_specific_status_code {
320	NVME_SC_COMPLETION_QUEUE_INVALID	= 0x00,
321	NVME_SC_INVALID_QUEUE_IDENTIFIER	= 0x01,
322	NVME_SC_MAXIMUM_QUEUE_SIZE_EXCEEDED	= 0x02,
323	NVME_SC_ABORT_COMMAND_LIMIT_EXCEEDED	= 0x03,
324	/* 0x04 - reserved */
325	NVME_SC_ASYNC_EVENT_REQUEST_LIMIT_EXCEEDED = 0x05,
326	NVME_SC_INVALID_FIRMWARE_SLOT		= 0x06,
327	NVME_SC_INVALID_FIRMWARE_IMAGE		= 0x07,
328	NVME_SC_INVALID_INTERRUPT_VECTOR	= 0x08,
329	NVME_SC_INVALID_LOG_PAGE		= 0x09,
330	NVME_SC_INVALID_FORMAT			= 0x0a,
331	NVME_SC_FIRMWARE_REQUIRES_RESET		= 0x0b,
332
333	NVME_SC_CONFLICTING_ATTRIBUTES		= 0x80,
334	NVME_SC_INVALID_PROTECTION_INFO		= 0x81,
335	NVME_SC_ATTEMPTED_WRITE_TO_RO_PAGE	= 0x82,
336};
337
338/* media error status codes */
339enum nvme_media_error_status_code {
340	NVME_SC_WRITE_FAULTS			= 0x80,
341	NVME_SC_UNRECOVERED_READ_ERROR		= 0x81,
342	NVME_SC_GUARD_CHECK_ERROR		= 0x82,
343	NVME_SC_APPLICATION_TAG_CHECK_ERROR	= 0x83,
344	NVME_SC_REFERENCE_TAG_CHECK_ERROR	= 0x84,
345	NVME_SC_COMPARE_FAILURE			= 0x85,
346	NVME_SC_ACCESS_DENIED			= 0x86,
347};
348
349/* admin opcodes */
350enum nvme_admin_opcode {
351	NVME_OPC_DELETE_IO_SQ			= 0x00,
352	NVME_OPC_CREATE_IO_SQ			= 0x01,
353	NVME_OPC_GET_LOG_PAGE			= 0x02,
354	/* 0x03 - reserved */
355	NVME_OPC_DELETE_IO_CQ			= 0x04,
356	NVME_OPC_CREATE_IO_CQ			= 0x05,
357	NVME_OPC_IDENTIFY			= 0x06,
358	/* 0x07 - reserved */
359	NVME_OPC_ABORT				= 0x08,
360	NVME_OPC_SET_FEATURES			= 0x09,
361	NVME_OPC_GET_FEATURES			= 0x0a,
362	/* 0x0b - reserved */
363	NVME_OPC_ASYNC_EVENT_REQUEST		= 0x0c,
364	NVME_OPC_NAMESPACE_MANAGEMENT		= 0x0d,
365	/* 0x0e-0x0f - reserved */
366	NVME_OPC_FIRMWARE_ACTIVATE		= 0x10,
367	NVME_OPC_FIRMWARE_IMAGE_DOWNLOAD	= 0x11,
368	NVME_OPC_NAMESPACE_ATTACHMENT		= 0x15,
369
370	NVME_OPC_FORMAT_NVM			= 0x80,
371	NVME_OPC_SECURITY_SEND			= 0x81,
372	NVME_OPC_SECURITY_RECEIVE		= 0x82,
373};
374
375/* nvme nvm opcodes */
376enum nvme_nvm_opcode {
377	NVME_OPC_FLUSH				= 0x00,
378	NVME_OPC_WRITE				= 0x01,
379	NVME_OPC_READ				= 0x02,
380	/* 0x03 - reserved */
381	NVME_OPC_WRITE_UNCORRECTABLE		= 0x04,
382	NVME_OPC_COMPARE			= 0x05,
383	/* 0x06-0x07 - reserved */
384	NVME_OPC_DATASET_MANAGEMENT		= 0x09,
385};
386
387enum nvme_feature {
388	/* 0x00 - reserved */
389	NVME_FEAT_ARBITRATION			= 0x01,
390	NVME_FEAT_POWER_MANAGEMENT		= 0x02,
391	NVME_FEAT_LBA_RANGE_TYPE		= 0x03,
392	NVME_FEAT_TEMPERATURE_THRESHOLD		= 0x04,
393	NVME_FEAT_ERROR_RECOVERY		= 0x05,
394	NVME_FEAT_VOLATILE_WRITE_CACHE		= 0x06,
395	NVME_FEAT_NUMBER_OF_QUEUES		= 0x07,
396	NVME_FEAT_INTERRUPT_COALESCING		= 0x08,
397	NVME_FEAT_INTERRUPT_VECTOR_CONFIGURATION = 0x09,
398	NVME_FEAT_WRITE_ATOMICITY		= 0x0A,
399	NVME_FEAT_ASYNC_EVENT_CONFIGURATION	= 0x0B,
400	NVME_FEAT_AUTONOMOUS_POWER_STATE_TRANSITION = 0x0C,
401	NVME_FEAT_HOST_MEMORY_BUFFER		= 0x0D,
402	NVME_FEAT_TIMESTAMP			= 0x0E,
403	NVME_FEAT_KEEP_ALIVE_TIMER		= 0x0F,
404	NVME_FEAT_HOST_CONTROLLED_THERMAL_MGMT	= 0x10,
405	NVME_FEAT_NON_OP_POWER_STATE_CONFIG	= 0x11,
406	/* 0x12-0x77 - reserved */
407	/* 0x78-0x7f - NVMe Management Interface */
408	NVME_FEAT_SOFTWARE_PROGRESS_MARKER	= 0x80,
409	/* 0x81-0xBF - command set specific (reserved) */
410	/* 0xC0-0xFF - vendor specific */
411};
412
413enum nvme_dsm_attribute {
414	NVME_DSM_ATTR_INTEGRAL_READ		= 0x1,
415	NVME_DSM_ATTR_INTEGRAL_WRITE		= 0x2,
416	NVME_DSM_ATTR_DEALLOCATE		= 0x4,
417};
418
419enum nvme_activate_action {
420	NVME_AA_REPLACE_NO_ACTIVATE		= 0x0,
421	NVME_AA_REPLACE_ACTIVATE		= 0x1,
422	NVME_AA_ACTIVATE			= 0x2,
423};
424
425struct nvme_power_state {
426	/** Maximum Power */
427	uint16_t	mp;			/* Maximum Power */
428	uint8_t		ps_rsvd1;
429	uint8_t		mps      : 1;		/* Max Power Scale */
430	uint8_t		nops     : 1;		/* Non-Operational State */
431	uint8_t		ps_rsvd2 : 6;
432	uint32_t	enlat;			/* Entry Latency */
433	uint32_t	exlat;			/* Exit Latency */
434	uint8_t		rrt      : 5;		/* Relative Read Throughput */
435	uint8_t		ps_rsvd3 : 3;
436	uint8_t		rrl      : 5;		/* Relative Read Latency */
437	uint8_t		ps_rsvd4 : 3;
438	uint8_t		rwt      : 5;		/* Relative Write Throughput */
439	uint8_t		ps_rsvd5 : 3;
440	uint8_t		rwl      : 5;		/* Relative Write Latency */
441	uint8_t		ps_rsvd6 : 3;
442	uint16_t	idlp;			/* Idle Power */
443	uint8_t		ps_rsvd7 : 6;
444	uint8_t		ips      : 2;		/* Idle Power Scale */
445	uint8_t		ps_rsvd8;
446	uint16_t	actp;			/* Active Power */
447	uint8_t		apw      : 3;		/* Active Power Workload */
448	uint8_t		ps_rsvd9 : 3;
449	uint8_t		aps      : 2;		/* Active Power Scale */
450	uint8_t		ps_rsvd10[9];
451} __packed;
452
453_Static_assert(sizeof(struct nvme_power_state) == 32, "bad size for nvme_power_state");
454
455#define NVME_SERIAL_NUMBER_LENGTH	20
456#define NVME_MODEL_NUMBER_LENGTH	40
457#define NVME_FIRMWARE_REVISION_LENGTH	8
458
459struct nvme_controller_data {
460
461	/* bytes 0-255: controller capabilities and features */
462
463	/** pci vendor id */
464	uint16_t		vid;
465
466	/** pci subsystem vendor id */
467	uint16_t		ssvid;
468
469	/** serial number */
470	uint8_t			sn[NVME_SERIAL_NUMBER_LENGTH];
471
472	/** model number */
473	uint8_t			mn[NVME_MODEL_NUMBER_LENGTH];
474
475	/** firmware revision */
476	uint8_t			fr[NVME_FIRMWARE_REVISION_LENGTH];
477
478	/** recommended arbitration burst */
479	uint8_t			rab;
480
481	/** ieee oui identifier */
482	uint8_t			ieee[3];
483
484	/** multi-interface capabilities */
485	uint8_t			mic;
486
487	/** maximum data transfer size */
488	uint8_t			mdts;
489
490	/** Controller ID */
491	uint16_t		ctrlr_id;
492
493	/** Version */
494	uint32_t		ver;
495
496	/** RTD3 Resume Latency */
497	uint32_t		rtd3r;
498
499	/** RTD3 Enter Latency */
500	uint32_t		rtd3e;
501
502	/** Optional Asynchronous Events Supported */
503	uint32_t		oaes;	/* bitfield really */
504
505	/** Controller Attributes */
506	uint32_t		ctratt;	/* bitfield really */
507
508	uint8_t			reserved1[12];
509
510	/** FRU Globally Unique Identifier */
511	uint8_t			fguid[16];
512
513	uint8_t			reserved2[128];
514
515	/* bytes 256-511: admin command set attributes */
516
517	/** optional admin command support */
518	struct {
519		/* supports security send/receive commands */
520		uint16_t	security  : 1;
521
522		/* supports format nvm command */
523		uint16_t	format    : 1;
524
525		/* supports firmware activate/download commands */
526		uint16_t	firmware  : 1;
527
528		/* supports namespace management commands */
529		uint16_t	nsmgmt	  : 1;
530
531		uint16_t	oacs_rsvd : 12;
532	} __packed oacs;
533
534	/** abort command limit */
535	uint8_t			acl;
536
537	/** asynchronous event request limit */
538	uint8_t			aerl;
539
540	/** firmware updates */
541	struct {
542		/* first slot is read-only */
543		uint8_t		slot1_ro  : 1;
544
545		/* number of firmware slots */
546		uint8_t		num_slots : 3;
547
548		uint8_t		frmw_rsvd : 4;
549	} __packed frmw;
550
551	/** log page attributes */
552	struct {
553		/* per namespace smart/health log page */
554		uint8_t		ns_smart : 1;
555
556		uint8_t		lpa_rsvd : 7;
557	} __packed lpa;
558
559	/** error log page entries */
560	uint8_t			elpe;
561
562	/** number of power states supported */
563	uint8_t			npss;
564
565	/** admin vendor specific command configuration */
566	struct {
567		/* admin vendor specific commands use spec format */
568		uint8_t		spec_format : 1;
569
570		uint8_t		avscc_rsvd  : 7;
571	} __packed avscc;
572
573	/** Autonomous Power State Transition Attributes */
574	struct {
575		/* Autonmous Power State Transitions supported */
576		uint8_t		apst_supp : 1;
577
578		uint8_t		apsta_rsvd : 7;
579	} __packed apsta;
580
581	/** Warning Composite Temperature Threshold */
582	uint16_t		wctemp;
583
584	/** Critical Composite Temperature Threshold */
585	uint16_t		cctemp;
586
587	/** Maximum Time for Firmware Activation */
588	uint16_t		mtfa;
589
590	/** Host Memory Buffer Preferred Size */
591	uint32_t		hmpre;
592
593	/** Host Memory Buffer Minimum Size */
594	uint32_t		hmmin;
595
596	/** Name space capabilities  */
597	struct {
598		/* if nsmgmt, report tnvmcap and unvmcap */
599		uint8_t    tnvmcap[16];
600		uint8_t    unvmcap[16];
601	} __packed untncap;
602
603	/** Replay Protected Memory Block Support */
604	uint32_t		rpmbs; /* Really a bitfield */
605
606	/** Extended Device Self-test Time */
607	uint16_t		edstt;
608
609	/** Device Self-test Options */
610	uint8_t			dsto; /* Really a bitfield */
611
612	/** Firmware Update Granularity */
613	uint8_t			fwug;
614
615	/** Keep Alive Support */
616	uint16_t		kas;
617
618	/** Host Controlled Thermal Management Attributes */
619	uint16_t		hctma; /* Really a bitfield */
620
621	/** Minimum Thermal Management Temperature */
622	uint16_t		mntmt;
623
624	/** Maximum Thermal Management Temperature */
625	uint16_t		mxtmt;
626
627	/** Sanitize Capabilities */
628	uint32_t		sanicap; /* Really a bitfield */
629
630	uint8_t reserved3[180];
631	/* bytes 512-703: nvm command set attributes */
632
633	/** submission queue entry size */
634	struct {
635		uint8_t		min : 4;
636		uint8_t		max : 4;
637	} __packed sqes;
638
639	/** completion queue entry size */
640	struct {
641		uint8_t		min : 4;
642		uint8_t		max : 4;
643	} __packed cqes;
644
645	/** Maximum Outstanding Commands */
646	uint16_t		maxcmd;
647
648	/** number of namespaces */
649	uint32_t		nn;
650
651	/** optional nvm command support */
652	struct {
653		uint16_t	compare : 1;
654		uint16_t	write_unc : 1;
655		uint16_t	dsm: 1;
656		uint16_t	reserved: 13;
657	} __packed oncs;
658
659	/** fused operation support */
660	uint16_t		fuses;
661
662	/** format nvm attributes */
663	uint8_t			fna;
664
665	/** volatile write cache */
666	struct {
667		uint8_t		present : 1;
668		uint8_t		reserved : 7;
669	} __packed vwc;
670
671	/* TODO: flesh out remaining nvm command set attributes */
672	uint8_t			reserved5[178];
673
674	/* bytes 704-2047: i/o command set attributes */
675	uint8_t			reserved6[1344];
676
677	/* bytes 2048-3071: power state descriptors */
678	struct nvme_power_state power_state[32];
679
680	/* bytes 3072-4095: vendor specific */
681	uint8_t			vs[1024];
682} __packed __aligned(4);
683
684_Static_assert(sizeof(struct nvme_controller_data) == 4096, "bad size for nvme_controller_data");
685
686struct nvme_namespace_data {
687
688	/** namespace size */
689	uint64_t		nsze;
690
691	/** namespace capacity */
692	uint64_t		ncap;
693
694	/** namespace utilization */
695	uint64_t		nuse;
696
697	/** namespace features */
698	struct {
699		/** thin provisioning */
700		uint8_t		thin_prov : 1;
701		uint8_t		reserved1 : 7;
702	} __packed nsfeat;
703
704	/** number of lba formats */
705	uint8_t			nlbaf;
706
707	/** formatted lba size */
708	struct {
709		uint8_t		format    : 4;
710		uint8_t		extended  : 1;
711		uint8_t		reserved2 : 3;
712	} __packed flbas;
713
714	/** metadata capabilities */
715	struct {
716		/* metadata can be transferred as part of data prp list */
717		uint8_t		extended  : 1;
718
719		/* metadata can be transferred with separate metadata pointer */
720		uint8_t		pointer   : 1;
721
722		uint8_t		reserved3 : 6;
723	} __packed mc;
724
725	/** end-to-end data protection capabilities */
726	struct {
727		/* protection information type 1 */
728		uint8_t		pit1     : 1;
729
730		/* protection information type 2 */
731		uint8_t		pit2     : 1;
732
733		/* protection information type 3 */
734		uint8_t		pit3     : 1;
735
736		/* first eight bytes of metadata */
737		uint8_t		md_start : 1;
738
739		/* last eight bytes of metadata */
740		uint8_t		md_end   : 1;
741	} __packed dpc;
742
743	/** end-to-end data protection type settings */
744	struct {
745		/* protection information type */
746		uint8_t		pit       : 3;
747
748		/* 1 == protection info transferred at start of metadata */
749		/* 0 == protection info transferred at end of metadata */
750		uint8_t		md_start  : 1;
751
752		uint8_t		reserved4 : 4;
753	} __packed dps;
754
755	uint8_t			reserved5[98];
756
757	/** lba format support */
758	struct {
759		/** metadata size */
760		uint32_t	ms	  : 16;
761
762		/** lba data size */
763		uint32_t	lbads	  : 8;
764
765		/** relative performance */
766		uint32_t	rp	  : 2;
767
768		uint32_t	reserved6 : 6;
769	} __packed lbaf[16];
770
771	uint8_t			reserved6[192];
772
773	uint8_t			vendor_specific[3712];
774} __packed __aligned(4);
775
776_Static_assert(sizeof(struct nvme_namespace_data) == 4096, "bad size for nvme_namepsace_data");
777
778enum nvme_log_page {
779
780	/* 0x00 - reserved */
781	NVME_LOG_ERROR			= 0x01,
782	NVME_LOG_HEALTH_INFORMATION	= 0x02,
783	NVME_LOG_FIRMWARE_SLOT		= 0x03,
784	NVME_LOG_CHANGED_NAMESPACE	= 0x04,
785	NVME_LOG_COMMAND_EFFECT		= 0x05,
786	/* 0x06-0x7F - reserved */
787	/* 0x80-0xBF - I/O command set specific */
788	NVME_LOG_RES_NOTIFICATION	= 0x80,
789	/* 0xC0-0xFF - vendor specific */
790
791	/*
792	 * The following are Intel Specific log pages, but they seem
793	 * to be widely implemented.
794	 */
795	INTEL_LOG_READ_LAT_LOG		= 0xc1,
796	INTEL_LOG_WRITE_LAT_LOG		= 0xc2,
797	INTEL_LOG_TEMP_STATS		= 0xc5,
798	INTEL_LOG_ADD_SMART		= 0xca,
799	INTEL_LOG_DRIVE_MKT_NAME	= 0xdd,
800
801	/*
802	 * HGST log page, with lots ofs sub pages.
803	 */
804	HGST_INFO_LOG			= 0xc1,
805};
806
807struct nvme_error_information_entry {
808
809	uint64_t		error_count;
810	uint16_t		sqid;
811	uint16_t		cid;
812	struct nvme_status	status;
813	uint16_t		error_location;
814	uint64_t		lba;
815	uint32_t		nsid;
816	uint8_t			vendor_specific;
817	uint8_t			reserved[35];
818} __packed __aligned(4);
819
820_Static_assert(sizeof(struct nvme_error_information_entry) == 64, "bad size for nvme_error_information_entry");
821
822union nvme_critical_warning_state {
823
824	uint8_t		raw;
825
826	struct {
827		uint8_t	available_spare		: 1;
828		uint8_t	temperature		: 1;
829		uint8_t	device_reliability	: 1;
830		uint8_t	read_only		: 1;
831		uint8_t	volatile_memory_backup	: 1;
832		uint8_t	reserved		: 3;
833	} __packed bits;
834} __packed;
835
836_Static_assert(sizeof(union nvme_critical_warning_state) == 1, "bad size for nvme_critical_warning_state");
837
838struct nvme_health_information_page {
839
840	union nvme_critical_warning_state	critical_warning;
841
842	uint16_t		temperature;
843	uint8_t			available_spare;
844	uint8_t			available_spare_threshold;
845	uint8_t			percentage_used;
846
847	uint8_t			reserved[26];
848
849	/*
850	 * Note that the following are 128-bit values, but are
851	 *  defined as an array of 2 64-bit values.
852	 */
853	/* Data Units Read is always in 512-byte units. */
854	uint64_t		data_units_read[2];
855	/* Data Units Written is always in 512-byte units. */
856	uint64_t		data_units_written[2];
857	/* For NVM command set, this includes Compare commands. */
858	uint64_t		host_read_commands[2];
859	uint64_t		host_write_commands[2];
860	/* Controller Busy Time is reported in minutes. */
861	uint64_t		controller_busy_time[2];
862	uint64_t		power_cycles[2];
863	uint64_t		power_on_hours[2];
864	uint64_t		unsafe_shutdowns[2];
865	uint64_t		media_errors[2];
866	uint64_t		num_error_info_log_entries[2];
867	uint32_t		warning_temp_time;
868	uint32_t		error_temp_time;
869	uint16_t		temp_sensor[8];
870
871	uint8_t			reserved2[296];
872} __packed __aligned(4);
873
874_Static_assert(sizeof(struct nvme_health_information_page) == 512, "bad size for nvme_health_information_page");
875
876struct nvme_firmware_page {
877
878	struct {
879		uint8_t	slot		: 3; /* slot for current FW */
880		uint8_t	reserved	: 5;
881	} __packed afi;
882
883	uint8_t			reserved[7];
884	uint64_t		revision[7]; /* revisions for 7 slots */
885	uint8_t			reserved2[448];
886} __packed __aligned(4);
887
888_Static_assert(sizeof(struct nvme_firmware_page) == 512, "bad size for nvme_firmware_page");
889
890struct intel_log_temp_stats
891{
892	uint64_t	current;
893	uint64_t	overtemp_flag_last;
894	uint64_t	overtemp_flag_life;
895	uint64_t	max_temp;
896	uint64_t	min_temp;
897	uint64_t	_rsvd[5];
898	uint64_t	max_oper_temp;
899	uint64_t	min_oper_temp;
900	uint64_t	est_offset;
901} __packed __aligned(4);
902
903_Static_assert(sizeof(struct intel_log_temp_stats) == 13 * 8, "bad size for intel_log_temp_stats");
904
905#define NVME_TEST_MAX_THREADS	128
906
907struct nvme_io_test {
908
909	enum nvme_nvm_opcode	opc;
910	uint32_t		size;
911	uint32_t		time;	/* in seconds */
912	uint32_t		num_threads;
913	uint32_t		flags;
914	uint64_t		io_completed[NVME_TEST_MAX_THREADS];
915};
916
917enum nvme_io_test_flags {
918
919	/*
920	 * Specifies whether dev_refthread/dev_relthread should be
921	 *  called during NVME_BIO_TEST.  Ignored for other test
922	 *  types.
923	 */
924	NVME_TEST_FLAG_REFTHREAD =	0x1,
925};
926
927struct nvme_pt_command {
928
929	/*
930	 * cmd is used to specify a passthrough command to a controller or
931	 *  namespace.
932	 *
933	 * The following fields from cmd may be specified by the caller:
934	 *	* opc  (opcode)
935	 *	* nsid (namespace id) - for admin commands only
936	 *	* cdw10-cdw15
937	 *
938	 * Remaining fields must be set to 0 by the caller.
939	 */
940	struct nvme_command	cmd;
941
942	/*
943	 * cpl returns completion status for the passthrough command
944	 *  specified by cmd.
945	 *
946	 * The following fields will be filled out by the driver, for
947	 *  consumption by the caller:
948	 *	* cdw0
949	 *	* status (except for phase)
950	 *
951	 * Remaining fields will be set to 0 by the driver.
952	 */
953	struct nvme_completion	cpl;
954
955	/* buf is the data buffer associated with this passthrough command. */
956	void *			buf;
957
958	/*
959	 * len is the length of the data buffer associated with this
960	 *  passthrough command.
961	 */
962	uint32_t		len;
963
964	/*
965	 * is_read = 1 if the passthrough command will read data into the
966	 *  supplied buffer from the controller.
967	 *
968	 * is_read = 0 if the passthrough command will write data from the
969	 *  supplied buffer to the controller.
970	 */
971	uint32_t		is_read;
972
973	/*
974	 * driver_lock is used by the driver only.  It must be set to 0
975	 *  by the caller.
976	 */
977	struct mtx *		driver_lock;
978};
979
980#define nvme_completion_is_error(cpl)					\
981	((cpl)->status.sc != 0 || (cpl)->status.sct != 0)
982
983void	nvme_strvis(uint8_t *dst, const uint8_t *src, int dstlen, int srclen);
984
985#ifdef _KERNEL
986
987struct bio;
988
989struct nvme_namespace;
990struct nvme_controller;
991struct nvme_consumer;
992
993typedef void (*nvme_cb_fn_t)(void *, const struct nvme_completion *);
994
995typedef void *(*nvme_cons_ns_fn_t)(struct nvme_namespace *, void *);
996typedef void *(*nvme_cons_ctrlr_fn_t)(struct nvme_controller *);
997typedef void (*nvme_cons_async_fn_t)(void *, const struct nvme_completion *,
998				     uint32_t, void *, uint32_t);
999typedef void (*nvme_cons_fail_fn_t)(void *);
1000
1001enum nvme_namespace_flags {
1002	NVME_NS_DEALLOCATE_SUPPORTED	= 0x1,
1003	NVME_NS_FLUSH_SUPPORTED		= 0x2,
1004};
1005
1006int	nvme_ctrlr_passthrough_cmd(struct nvme_controller *ctrlr,
1007				   struct nvme_pt_command *pt,
1008				   uint32_t nsid, int is_user_buffer,
1009				   int is_admin_cmd);
1010
1011/* Admin functions */
1012void	nvme_ctrlr_cmd_set_feature(struct nvme_controller *ctrlr,
1013				   uint8_t feature, uint32_t cdw11,
1014				   void *payload, uint32_t payload_size,
1015				   nvme_cb_fn_t cb_fn, void *cb_arg);
1016void	nvme_ctrlr_cmd_get_feature(struct nvme_controller *ctrlr,
1017				   uint8_t feature, uint32_t cdw11,
1018				   void *payload, uint32_t payload_size,
1019				   nvme_cb_fn_t cb_fn, void *cb_arg);
1020void	nvme_ctrlr_cmd_get_log_page(struct nvme_controller *ctrlr,
1021				    uint8_t log_page, uint32_t nsid,
1022				    void *payload, uint32_t payload_size,
1023				    nvme_cb_fn_t cb_fn, void *cb_arg);
1024
1025/* NVM I/O functions */
1026int	nvme_ns_cmd_write(struct nvme_namespace *ns, void *payload,
1027			  uint64_t lba, uint32_t lba_count, nvme_cb_fn_t cb_fn,
1028			  void *cb_arg);
1029int	nvme_ns_cmd_write_bio(struct nvme_namespace *ns, struct bio *bp,
1030			      nvme_cb_fn_t cb_fn, void *cb_arg);
1031int	nvme_ns_cmd_read(struct nvme_namespace *ns, void *payload,
1032			 uint64_t lba, uint32_t lba_count, nvme_cb_fn_t cb_fn,
1033			 void *cb_arg);
1034int	nvme_ns_cmd_read_bio(struct nvme_namespace *ns, struct bio *bp,
1035			      nvme_cb_fn_t cb_fn, void *cb_arg);
1036int	nvme_ns_cmd_deallocate(struct nvme_namespace *ns, void *payload,
1037			       uint8_t num_ranges, nvme_cb_fn_t cb_fn,
1038			       void *cb_arg);
1039int	nvme_ns_cmd_flush(struct nvme_namespace *ns, nvme_cb_fn_t cb_fn,
1040			  void *cb_arg);
1041int	nvme_ns_dump(struct nvme_namespace *ns, void *virt, off_t offset,
1042		     size_t len);
1043
1044/* Registration functions */
1045struct nvme_consumer *	nvme_register_consumer(nvme_cons_ns_fn_t    ns_fn,
1046					       nvme_cons_ctrlr_fn_t ctrlr_fn,
1047					       nvme_cons_async_fn_t async_fn,
1048					       nvme_cons_fail_fn_t  fail_fn);
1049void		nvme_unregister_consumer(struct nvme_consumer *consumer);
1050
1051/* Controller helper functions */
1052device_t	nvme_ctrlr_get_device(struct nvme_controller *ctrlr);
1053const struct nvme_controller_data *
1054		nvme_ctrlr_get_data(struct nvme_controller *ctrlr);
1055
1056/* Namespace helper functions */
1057uint32_t	nvme_ns_get_max_io_xfer_size(struct nvme_namespace *ns);
1058uint32_t	nvme_ns_get_sector_size(struct nvme_namespace *ns);
1059uint64_t	nvme_ns_get_num_sectors(struct nvme_namespace *ns);
1060uint64_t	nvme_ns_get_size(struct nvme_namespace *ns);
1061uint32_t	nvme_ns_get_flags(struct nvme_namespace *ns);
1062const char *	nvme_ns_get_serial_number(struct nvme_namespace *ns);
1063const char *	nvme_ns_get_model_number(struct nvme_namespace *ns);
1064const struct nvme_namespace_data *
1065		nvme_ns_get_data(struct nvme_namespace *ns);
1066uint32_t	nvme_ns_get_stripesize(struct nvme_namespace *ns);
1067
1068int	nvme_ns_bio_process(struct nvme_namespace *ns, struct bio *bp,
1069			    nvme_cb_fn_t cb_fn);
1070
1071/* Command building helper functions -- shared with CAM */
1072static inline
1073void	nvme_ns_flush_cmd(struct nvme_command *cmd, uint16_t nsid)
1074{
1075
1076	cmd->opc = NVME_OPC_FLUSH;
1077	cmd->nsid = nsid;
1078}
1079
1080static inline
1081void	nvme_ns_rw_cmd(struct nvme_command *cmd, uint32_t rwcmd, uint16_t nsid,
1082    uint64_t lba, uint32_t count)
1083{
1084	cmd->opc = rwcmd;
1085	cmd->nsid = nsid;
1086	cmd->cdw10 = lba & 0xffffffffu;
1087	cmd->cdw11 = lba >> 32;
1088	cmd->cdw12 = count-1;
1089	cmd->cdw13 = 0;
1090	cmd->cdw14 = 0;
1091	cmd->cdw15 = 0;
1092}
1093
1094static inline
1095void	nvme_ns_write_cmd(struct nvme_command *cmd, uint16_t nsid,
1096    uint64_t lba, uint32_t count)
1097{
1098	nvme_ns_rw_cmd(cmd, NVME_OPC_WRITE, nsid, lba, count);
1099}
1100
1101static inline
1102void	nvme_ns_read_cmd(struct nvme_command *cmd, uint16_t nsid,
1103    uint64_t lba, uint32_t count)
1104{
1105	nvme_ns_rw_cmd(cmd, NVME_OPC_READ, nsid, lba, count);
1106}
1107
1108static inline
1109void	nvme_ns_trim_cmd(struct nvme_command *cmd, uint16_t nsid,
1110    uint32_t num_ranges)
1111{
1112	cmd->opc = NVME_OPC_DATASET_MANAGEMENT;
1113	cmd->nsid = nsid;
1114	cmd->cdw10 = num_ranges - 1;
1115	cmd->cdw11 = NVME_DSM_ATTR_DEALLOCATE;
1116}
1117
1118extern int nvme_use_nvd;
1119
1120#endif /* _KERNEL */
1121
1122#endif /* __NVME_H__ */
1123