nvme_ctrlr.c revision 328677
1264790Sbapt/*-
2264790Sbapt * Copyright (C) 2012-2016 Intel Corporation
3264790Sbapt * All rights reserved.
4264790Sbapt *
5264790Sbapt * Redistribution and use in source and binary forms, with or without
6264790Sbapt * modification, are permitted provided that the following conditions
7264790Sbapt * are met:
8264790Sbapt * 1. Redistributions of source code must retain the above copyright
9264790Sbapt *    notice, this list of conditions and the following disclaimer.
10264790Sbapt * 2. Redistributions in binary form must reproduce the above copyright
11264790Sbapt *    notice, this list of conditions and the following disclaimer in the
12264790Sbapt *    documentation and/or other materials provided with the distribution.
13264790Sbapt *
14264790Sbapt * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15264790Sbapt * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16264790Sbapt * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17264790Sbapt * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18264790Sbapt * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19264790Sbapt * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20264790Sbapt * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21264790Sbapt * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22264790Sbapt * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23264790Sbapt * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24264790Sbapt * SUCH DAMAGE.
25264790Sbapt */
26264790Sbapt
27264790Sbapt#include <sys/cdefs.h>
28264790Sbapt__FBSDID("$FreeBSD: stable/11/sys/dev/nvme/nvme_ctrlr.c 328677 2018-02-01 16:27:10Z mav $");
29264790Sbapt
30264790Sbapt#include "opt_cam.h"
31264790Sbapt
32264790Sbapt#include <sys/param.h>
33264790Sbapt#include <sys/systm.h>
34264790Sbapt#include <sys/buf.h>
35264790Sbapt#include <sys/bus.h>
36264790Sbapt#include <sys/conf.h>
37264790Sbapt#include <sys/ioccom.h>
38264790Sbapt#include <sys/proc.h>
39264790Sbapt#include <sys/smp.h>
40264790Sbapt#include <sys/uio.h>
41264790Sbapt
42264790Sbapt#include <dev/pci/pcireg.h>
43264790Sbapt#include <dev/pci/pcivar.h>
44264790Sbapt
45264790Sbapt#include "nvme_private.h"
46264790Sbapt
47264790Sbaptstatic void nvme_ctrlr_construct_and_submit_aer(struct nvme_controller *ctrlr,
48264790Sbapt						struct nvme_async_event_request *aer);
49264790Sbaptstatic void nvme_ctrlr_setup_interrupts(struct nvme_controller *ctrlr);
50264790Sbapt
51264790Sbaptstatic int
52nvme_ctrlr_allocate_bar(struct nvme_controller *ctrlr)
53{
54
55	ctrlr->resource_id = PCIR_BAR(0);
56
57	ctrlr->resource = bus_alloc_resource_any(ctrlr->dev, SYS_RES_MEMORY,
58	    &ctrlr->resource_id, RF_ACTIVE);
59
60	if(ctrlr->resource == NULL) {
61		nvme_printf(ctrlr, "unable to allocate pci resource\n");
62		return (ENOMEM);
63	}
64
65	ctrlr->bus_tag = rman_get_bustag(ctrlr->resource);
66	ctrlr->bus_handle = rman_get_bushandle(ctrlr->resource);
67	ctrlr->regs = (struct nvme_registers *)ctrlr->bus_handle;
68
69	/*
70	 * The NVMe spec allows for the MSI-X table to be placed behind
71	 *  BAR 4/5, separate from the control/doorbell registers.  Always
72	 *  try to map this bar, because it must be mapped prior to calling
73	 *  pci_alloc_msix().  If the table isn't behind BAR 4/5,
74	 *  bus_alloc_resource() will just return NULL which is OK.
75	 */
76	ctrlr->bar4_resource_id = PCIR_BAR(4);
77	ctrlr->bar4_resource = bus_alloc_resource_any(ctrlr->dev, SYS_RES_MEMORY,
78	    &ctrlr->bar4_resource_id, RF_ACTIVE);
79
80	return (0);
81}
82
83static int
84nvme_ctrlr_construct_admin_qpair(struct nvme_controller *ctrlr)
85{
86	struct nvme_qpair	*qpair;
87	uint32_t		num_entries;
88	int			error;
89
90	qpair = &ctrlr->adminq;
91
92	num_entries = NVME_ADMIN_ENTRIES;
93	TUNABLE_INT_FETCH("hw.nvme.admin_entries", &num_entries);
94	/*
95	 * If admin_entries was overridden to an invalid value, revert it
96	 *  back to our default value.
97	 */
98	if (num_entries < NVME_MIN_ADMIN_ENTRIES ||
99	    num_entries > NVME_MAX_ADMIN_ENTRIES) {
100		nvme_printf(ctrlr, "invalid hw.nvme.admin_entries=%d "
101		    "specified\n", num_entries);
102		num_entries = NVME_ADMIN_ENTRIES;
103	}
104
105	/*
106	 * The admin queue's max xfer size is treated differently than the
107	 *  max I/O xfer size.  16KB is sufficient here - maybe even less?
108	 */
109	error = nvme_qpair_construct(qpair,
110				     0, /* qpair ID */
111				     0, /* vector */
112				     num_entries,
113				     NVME_ADMIN_TRACKERS,
114				     ctrlr);
115	return (error);
116}
117
118static int
119nvme_ctrlr_construct_io_qpairs(struct nvme_controller *ctrlr)
120{
121	struct nvme_qpair	*qpair;
122	union cap_lo_register	cap_lo;
123	int			i, error, num_entries, num_trackers;
124
125	num_entries = NVME_IO_ENTRIES;
126	TUNABLE_INT_FETCH("hw.nvme.io_entries", &num_entries);
127
128	/*
129	 * NVMe spec sets a hard limit of 64K max entries, but
130	 *  devices may specify a smaller limit, so we need to check
131	 *  the MQES field in the capabilities register.
132	 */
133	cap_lo.raw = nvme_mmio_read_4(ctrlr, cap_lo);
134	num_entries = min(num_entries, cap_lo.bits.mqes+1);
135
136	num_trackers = NVME_IO_TRACKERS;
137	TUNABLE_INT_FETCH("hw.nvme.io_trackers", &num_trackers);
138
139	num_trackers = max(num_trackers, NVME_MIN_IO_TRACKERS);
140	num_trackers = min(num_trackers, NVME_MAX_IO_TRACKERS);
141	/*
142	 * No need to have more trackers than entries in the submit queue.
143	 *  Note also that for a queue size of N, we can only have (N-1)
144	 *  commands outstanding, hence the "-1" here.
145	 */
146	num_trackers = min(num_trackers, (num_entries-1));
147
148	/*
149	 * This was calculated previously when setting up interrupts, but
150	 *  a controller could theoretically support fewer I/O queues than
151	 *  MSI-X vectors.  So calculate again here just to be safe.
152	 */
153	ctrlr->num_cpus_per_ioq = howmany(mp_ncpus, ctrlr->num_io_queues);
154
155	ctrlr->ioq = malloc(ctrlr->num_io_queues * sizeof(struct nvme_qpair),
156	    M_NVME, M_ZERO | M_WAITOK);
157
158	for (i = 0; i < ctrlr->num_io_queues; i++) {
159		qpair = &ctrlr->ioq[i];
160
161		/*
162		 * Admin queue has ID=0. IO queues start at ID=1 -
163		 *  hence the 'i+1' here.
164		 *
165		 * For I/O queues, use the controller-wide max_xfer_size
166		 *  calculated in nvme_attach().
167		 */
168		error = nvme_qpair_construct(qpair,
169				     i+1, /* qpair ID */
170				     ctrlr->msix_enabled ? i+1 : 0, /* vector */
171				     num_entries,
172				     num_trackers,
173				     ctrlr);
174		if (error)
175			return (error);
176
177		/*
178		 * Do not bother binding interrupts if we only have one I/O
179		 *  interrupt thread for this controller.
180		 */
181		if (ctrlr->num_io_queues > 1)
182			bus_bind_intr(ctrlr->dev, qpair->res,
183			    i * ctrlr->num_cpus_per_ioq);
184	}
185
186	return (0);
187}
188
189static void
190nvme_ctrlr_fail(struct nvme_controller *ctrlr)
191{
192	int i;
193
194	ctrlr->is_failed = TRUE;
195	nvme_qpair_fail(&ctrlr->adminq);
196	if (ctrlr->ioq != NULL) {
197		for (i = 0; i < ctrlr->num_io_queues; i++)
198			nvme_qpair_fail(&ctrlr->ioq[i]);
199	}
200	nvme_notify_fail_consumers(ctrlr);
201}
202
203void
204nvme_ctrlr_post_failed_request(struct nvme_controller *ctrlr,
205    struct nvme_request *req)
206{
207
208	mtx_lock(&ctrlr->lock);
209	STAILQ_INSERT_TAIL(&ctrlr->fail_req, req, stailq);
210	mtx_unlock(&ctrlr->lock);
211	taskqueue_enqueue(ctrlr->taskqueue, &ctrlr->fail_req_task);
212}
213
214static void
215nvme_ctrlr_fail_req_task(void *arg, int pending)
216{
217	struct nvme_controller	*ctrlr = arg;
218	struct nvme_request	*req;
219
220	mtx_lock(&ctrlr->lock);
221	while (!STAILQ_EMPTY(&ctrlr->fail_req)) {
222		req = STAILQ_FIRST(&ctrlr->fail_req);
223		STAILQ_REMOVE_HEAD(&ctrlr->fail_req, stailq);
224		nvme_qpair_manual_complete_request(req->qpair, req,
225		    NVME_SCT_GENERIC, NVME_SC_ABORTED_BY_REQUEST, TRUE);
226	}
227	mtx_unlock(&ctrlr->lock);
228}
229
230static int
231nvme_ctrlr_wait_for_ready(struct nvme_controller *ctrlr, int desired_val)
232{
233	int ms_waited;
234	union cc_register cc;
235	union csts_register csts;
236
237	cc.raw = nvme_mmio_read_4(ctrlr, cc);
238	csts.raw = nvme_mmio_read_4(ctrlr, csts);
239
240	if (cc.bits.en != desired_val) {
241		nvme_printf(ctrlr, "%s called with desired_val = %d "
242		    "but cc.en = %d\n", __func__, desired_val, cc.bits.en);
243		return (ENXIO);
244	}
245
246	ms_waited = 0;
247
248	while (csts.bits.rdy != desired_val) {
249		DELAY(1000);
250		if (ms_waited++ > ctrlr->ready_timeout_in_ms) {
251			nvme_printf(ctrlr, "controller ready did not become %d "
252			    "within %d ms\n", desired_val, ctrlr->ready_timeout_in_ms);
253			return (ENXIO);
254		}
255		csts.raw = nvme_mmio_read_4(ctrlr, csts);
256	}
257
258	return (0);
259}
260
261static void
262nvme_ctrlr_disable(struct nvme_controller *ctrlr)
263{
264	union cc_register cc;
265	union csts_register csts;
266
267	cc.raw = nvme_mmio_read_4(ctrlr, cc);
268	csts.raw = nvme_mmio_read_4(ctrlr, csts);
269
270	if (cc.bits.en == 1 && csts.bits.rdy == 0)
271		nvme_ctrlr_wait_for_ready(ctrlr, 1);
272
273	cc.bits.en = 0;
274	nvme_mmio_write_4(ctrlr, cc, cc.raw);
275	DELAY(5000);
276	nvme_ctrlr_wait_for_ready(ctrlr, 0);
277}
278
279static int
280nvme_ctrlr_enable(struct nvme_controller *ctrlr)
281{
282	union cc_register	cc;
283	union csts_register	csts;
284	union aqa_register	aqa;
285
286	cc.raw = nvme_mmio_read_4(ctrlr, cc);
287	csts.raw = nvme_mmio_read_4(ctrlr, csts);
288
289	if (cc.bits.en == 1) {
290		if (csts.bits.rdy == 1)
291			return (0);
292		else
293			return (nvme_ctrlr_wait_for_ready(ctrlr, 1));
294	}
295
296	nvme_mmio_write_8(ctrlr, asq, ctrlr->adminq.cmd_bus_addr);
297	DELAY(5000);
298	nvme_mmio_write_8(ctrlr, acq, ctrlr->adminq.cpl_bus_addr);
299	DELAY(5000);
300
301	aqa.raw = 0;
302	/* acqs and asqs are 0-based. */
303	aqa.bits.acqs = ctrlr->adminq.num_entries-1;
304	aqa.bits.asqs = ctrlr->adminq.num_entries-1;
305	nvme_mmio_write_4(ctrlr, aqa, aqa.raw);
306	DELAY(5000);
307
308	cc.bits.en = 1;
309	cc.bits.css = 0;
310	cc.bits.ams = 0;
311	cc.bits.shn = 0;
312	cc.bits.iosqes = 6; /* SQ entry size == 64 == 2^6 */
313	cc.bits.iocqes = 4; /* CQ entry size == 16 == 2^4 */
314
315	/* This evaluates to 0, which is according to spec. */
316	cc.bits.mps = (PAGE_SIZE >> 13);
317
318	nvme_mmio_write_4(ctrlr, cc, cc.raw);
319	DELAY(5000);
320
321	return (nvme_ctrlr_wait_for_ready(ctrlr, 1));
322}
323
324int
325nvme_ctrlr_hw_reset(struct nvme_controller *ctrlr)
326{
327	int i;
328
329	nvme_admin_qpair_disable(&ctrlr->adminq);
330	/*
331	 * I/O queues are not allocated before the initial HW
332	 *  reset, so do not try to disable them.  Use is_initialized
333	 *  to determine if this is the initial HW reset.
334	 */
335	if (ctrlr->is_initialized) {
336		for (i = 0; i < ctrlr->num_io_queues; i++)
337			nvme_io_qpair_disable(&ctrlr->ioq[i]);
338	}
339
340	DELAY(100*1000);
341
342	nvme_ctrlr_disable(ctrlr);
343	return (nvme_ctrlr_enable(ctrlr));
344}
345
346void
347nvme_ctrlr_reset(struct nvme_controller *ctrlr)
348{
349	int cmpset;
350
351	cmpset = atomic_cmpset_32(&ctrlr->is_resetting, 0, 1);
352
353	if (cmpset == 0 || ctrlr->is_failed)
354		/*
355		 * Controller is already resetting or has failed.  Return
356		 *  immediately since there is no need to kick off another
357		 *  reset in these cases.
358		 */
359		return;
360
361	taskqueue_enqueue(ctrlr->taskqueue, &ctrlr->reset_task);
362}
363
364static int
365nvme_ctrlr_identify(struct nvme_controller *ctrlr)
366{
367	struct nvme_completion_poll_status	status;
368
369	status.done = FALSE;
370	nvme_ctrlr_cmd_identify_controller(ctrlr, &ctrlr->cdata,
371	    nvme_completion_poll_cb, &status);
372	while (status.done == FALSE)
373		pause("nvme", 1);
374	if (nvme_completion_is_error(&status.cpl)) {
375		nvme_printf(ctrlr, "nvme_identify_controller failed!\n");
376		return (ENXIO);
377	}
378
379	/*
380	 * Use MDTS to ensure our default max_xfer_size doesn't exceed what the
381	 *  controller supports.
382	 */
383	if (ctrlr->cdata.mdts > 0)
384		ctrlr->max_xfer_size = min(ctrlr->max_xfer_size,
385		    ctrlr->min_page_size * (1 << (ctrlr->cdata.mdts)));
386
387	return (0);
388}
389
390static int
391nvme_ctrlr_set_num_qpairs(struct nvme_controller *ctrlr)
392{
393	struct nvme_completion_poll_status	status;
394	int					cq_allocated, sq_allocated;
395
396	status.done = FALSE;
397	nvme_ctrlr_cmd_set_num_queues(ctrlr, ctrlr->num_io_queues,
398	    nvme_completion_poll_cb, &status);
399	while (status.done == FALSE)
400		pause("nvme", 1);
401	if (nvme_completion_is_error(&status.cpl)) {
402		nvme_printf(ctrlr, "nvme_ctrlr_set_num_qpairs failed!\n");
403		return (ENXIO);
404	}
405
406	/*
407	 * Data in cdw0 is 0-based.
408	 * Lower 16-bits indicate number of submission queues allocated.
409	 * Upper 16-bits indicate number of completion queues allocated.
410	 */
411	sq_allocated = (status.cpl.cdw0 & 0xFFFF) + 1;
412	cq_allocated = (status.cpl.cdw0 >> 16) + 1;
413
414	/*
415	 * Controller may allocate more queues than we requested,
416	 *  so use the minimum of the number requested and what was
417	 *  actually allocated.
418	 */
419	ctrlr->num_io_queues = min(ctrlr->num_io_queues, sq_allocated);
420	ctrlr->num_io_queues = min(ctrlr->num_io_queues, cq_allocated);
421
422	return (0);
423}
424
425static int
426nvme_ctrlr_create_qpairs(struct nvme_controller *ctrlr)
427{
428	struct nvme_completion_poll_status	status;
429	struct nvme_qpair			*qpair;
430	int					i;
431
432	for (i = 0; i < ctrlr->num_io_queues; i++) {
433		qpair = &ctrlr->ioq[i];
434
435		status.done = FALSE;
436		nvme_ctrlr_cmd_create_io_cq(ctrlr, qpair, qpair->vector,
437		    nvme_completion_poll_cb, &status);
438		while (status.done == FALSE)
439			pause("nvme", 1);
440		if (nvme_completion_is_error(&status.cpl)) {
441			nvme_printf(ctrlr, "nvme_create_io_cq failed!\n");
442			return (ENXIO);
443		}
444
445		status.done = FALSE;
446		nvme_ctrlr_cmd_create_io_sq(qpair->ctrlr, qpair,
447		    nvme_completion_poll_cb, &status);
448		while (status.done == FALSE)
449			pause("nvme", 1);
450		if (nvme_completion_is_error(&status.cpl)) {
451			nvme_printf(ctrlr, "nvme_create_io_sq failed!\n");
452			return (ENXIO);
453		}
454	}
455
456	return (0);
457}
458
459static int
460nvme_ctrlr_construct_namespaces(struct nvme_controller *ctrlr)
461{
462	struct nvme_namespace	*ns;
463	int			i;
464
465	for (i = 0; i < min(ctrlr->cdata.nn, NVME_MAX_NAMESPACES); i++) {
466		ns = &ctrlr->ns[i];
467		nvme_ns_construct(ns, i+1, ctrlr);
468	}
469
470	return (0);
471}
472
473static boolean_t
474is_log_page_id_valid(uint8_t page_id)
475{
476
477	switch (page_id) {
478	case NVME_LOG_ERROR:
479	case NVME_LOG_HEALTH_INFORMATION:
480	case NVME_LOG_FIRMWARE_SLOT:
481		return (TRUE);
482	}
483
484	return (FALSE);
485}
486
487static uint32_t
488nvme_ctrlr_get_log_page_size(struct nvme_controller *ctrlr, uint8_t page_id)
489{
490	uint32_t	log_page_size;
491
492	switch (page_id) {
493	case NVME_LOG_ERROR:
494		log_page_size = min(
495		    sizeof(struct nvme_error_information_entry) *
496		    ctrlr->cdata.elpe,
497		    NVME_MAX_AER_LOG_SIZE);
498		break;
499	case NVME_LOG_HEALTH_INFORMATION:
500		log_page_size = sizeof(struct nvme_health_information_page);
501		break;
502	case NVME_LOG_FIRMWARE_SLOT:
503		log_page_size = sizeof(struct nvme_firmware_page);
504		break;
505	default:
506		log_page_size = 0;
507		break;
508	}
509
510	return (log_page_size);
511}
512
513static void
514nvme_ctrlr_log_critical_warnings(struct nvme_controller *ctrlr,
515    union nvme_critical_warning_state state)
516{
517
518	if (state.bits.available_spare == 1)
519		nvme_printf(ctrlr, "available spare space below threshold\n");
520
521	if (state.bits.temperature == 1)
522		nvme_printf(ctrlr, "temperature above threshold\n");
523
524	if (state.bits.device_reliability == 1)
525		nvme_printf(ctrlr, "device reliability degraded\n");
526
527	if (state.bits.read_only == 1)
528		nvme_printf(ctrlr, "media placed in read only mode\n");
529
530	if (state.bits.volatile_memory_backup == 1)
531		nvme_printf(ctrlr, "volatile memory backup device failed\n");
532
533	if (state.bits.reserved != 0)
534		nvme_printf(ctrlr,
535		    "unknown critical warning(s): state = 0x%02x\n", state.raw);
536}
537
538static void
539nvme_ctrlr_async_event_log_page_cb(void *arg, const struct nvme_completion *cpl)
540{
541	struct nvme_async_event_request		*aer = arg;
542	struct nvme_health_information_page	*health_info;
543
544	/*
545	 * If the log page fetch for some reason completed with an error,
546	 *  don't pass log page data to the consumers.  In practice, this case
547	 *  should never happen.
548	 */
549	if (nvme_completion_is_error(cpl))
550		nvme_notify_async_consumers(aer->ctrlr, &aer->cpl,
551		    aer->log_page_id, NULL, 0);
552	else {
553		if (aer->log_page_id == NVME_LOG_HEALTH_INFORMATION) {
554			health_info = (struct nvme_health_information_page *)
555			    aer->log_page_buffer;
556			nvme_ctrlr_log_critical_warnings(aer->ctrlr,
557			    health_info->critical_warning);
558			/*
559			 * Critical warnings reported through the
560			 *  SMART/health log page are persistent, so
561			 *  clear the associated bits in the async event
562			 *  config so that we do not receive repeated
563			 *  notifications for the same event.
564			 */
565			aer->ctrlr->async_event_config.raw &=
566			    ~health_info->critical_warning.raw;
567			nvme_ctrlr_cmd_set_async_event_config(aer->ctrlr,
568			    aer->ctrlr->async_event_config, NULL, NULL);
569		}
570
571
572		/*
573		 * Pass the cpl data from the original async event completion,
574		 *  not the log page fetch.
575		 */
576		nvme_notify_async_consumers(aer->ctrlr, &aer->cpl,
577		    aer->log_page_id, aer->log_page_buffer, aer->log_page_size);
578	}
579
580	/*
581	 * Repost another asynchronous event request to replace the one
582	 *  that just completed.
583	 */
584	nvme_ctrlr_construct_and_submit_aer(aer->ctrlr, aer);
585}
586
587static void
588nvme_ctrlr_async_event_cb(void *arg, const struct nvme_completion *cpl)
589{
590	struct nvme_async_event_request	*aer = arg;
591
592	if (nvme_completion_is_error(cpl)) {
593		/*
594		 *  Do not retry failed async event requests.  This avoids
595		 *  infinite loops where a new async event request is submitted
596		 *  to replace the one just failed, only to fail again and
597		 *  perpetuate the loop.
598		 */
599		return;
600	}
601
602	/* Associated log page is in bits 23:16 of completion entry dw0. */
603	aer->log_page_id = (cpl->cdw0 & 0xFF0000) >> 16;
604
605	nvme_printf(aer->ctrlr, "async event occurred (log page id=0x%x)\n",
606	    aer->log_page_id);
607
608	if (is_log_page_id_valid(aer->log_page_id)) {
609		aer->log_page_size = nvme_ctrlr_get_log_page_size(aer->ctrlr,
610		    aer->log_page_id);
611		memcpy(&aer->cpl, cpl, sizeof(*cpl));
612		nvme_ctrlr_cmd_get_log_page(aer->ctrlr, aer->log_page_id,
613		    NVME_GLOBAL_NAMESPACE_TAG, aer->log_page_buffer,
614		    aer->log_page_size, nvme_ctrlr_async_event_log_page_cb,
615		    aer);
616		/* Wait to notify consumers until after log page is fetched. */
617	} else {
618		nvme_notify_async_consumers(aer->ctrlr, cpl, aer->log_page_id,
619		    NULL, 0);
620
621		/*
622		 * Repost another asynchronous event request to replace the one
623		 *  that just completed.
624		 */
625		nvme_ctrlr_construct_and_submit_aer(aer->ctrlr, aer);
626	}
627}
628
629static void
630nvme_ctrlr_construct_and_submit_aer(struct nvme_controller *ctrlr,
631    struct nvme_async_event_request *aer)
632{
633	struct nvme_request *req;
634
635	aer->ctrlr = ctrlr;
636	req = nvme_allocate_request_null(nvme_ctrlr_async_event_cb, aer);
637	aer->req = req;
638
639	/*
640	 * Disable timeout here, since asynchronous event requests should by
641	 *  nature never be timed out.
642	 */
643	req->timeout = FALSE;
644	req->cmd.opc = NVME_OPC_ASYNC_EVENT_REQUEST;
645	nvme_ctrlr_submit_admin_request(ctrlr, req);
646}
647
648static void
649nvme_ctrlr_configure_aer(struct nvme_controller *ctrlr)
650{
651	struct nvme_completion_poll_status	status;
652	struct nvme_async_event_request		*aer;
653	uint32_t				i;
654
655	ctrlr->async_event_config.raw = 0xFF;
656	ctrlr->async_event_config.bits.reserved = 0;
657
658	status.done = FALSE;
659	nvme_ctrlr_cmd_get_feature(ctrlr, NVME_FEAT_TEMPERATURE_THRESHOLD,
660	    0, NULL, 0, nvme_completion_poll_cb, &status);
661	while (status.done == FALSE)
662		pause("nvme", 1);
663	if (nvme_completion_is_error(&status.cpl) ||
664	    (status.cpl.cdw0 & 0xFFFF) == 0xFFFF ||
665	    (status.cpl.cdw0 & 0xFFFF) == 0x0000) {
666		nvme_printf(ctrlr, "temperature threshold not supported\n");
667		ctrlr->async_event_config.bits.temperature = 0;
668	}
669
670	nvme_ctrlr_cmd_set_async_event_config(ctrlr,
671	    ctrlr->async_event_config, NULL, NULL);
672
673	/* aerl is a zero-based value, so we need to add 1 here. */
674	ctrlr->num_aers = min(NVME_MAX_ASYNC_EVENTS, (ctrlr->cdata.aerl+1));
675
676	for (i = 0; i < ctrlr->num_aers; i++) {
677		aer = &ctrlr->aer[i];
678		nvme_ctrlr_construct_and_submit_aer(ctrlr, aer);
679	}
680}
681
682static void
683nvme_ctrlr_configure_int_coalescing(struct nvme_controller *ctrlr)
684{
685
686	ctrlr->int_coal_time = 0;
687	TUNABLE_INT_FETCH("hw.nvme.int_coal_time",
688	    &ctrlr->int_coal_time);
689
690	ctrlr->int_coal_threshold = 0;
691	TUNABLE_INT_FETCH("hw.nvme.int_coal_threshold",
692	    &ctrlr->int_coal_threshold);
693
694	nvme_ctrlr_cmd_set_interrupt_coalescing(ctrlr, ctrlr->int_coal_time,
695	    ctrlr->int_coal_threshold, NULL, NULL);
696}
697
698static void
699nvme_ctrlr_start(void *ctrlr_arg)
700{
701	struct nvme_controller *ctrlr = ctrlr_arg;
702	uint32_t old_num_io_queues;
703	int i;
704
705	/*
706	 * Only reset adminq here when we are restarting the
707	 *  controller after a reset.  During initialization,
708	 *  we have already submitted admin commands to get
709	 *  the number of I/O queues supported, so cannot reset
710	 *  the adminq again here.
711	 */
712	if (ctrlr->is_resetting) {
713		nvme_qpair_reset(&ctrlr->adminq);
714	}
715
716	for (i = 0; i < ctrlr->num_io_queues; i++)
717		nvme_qpair_reset(&ctrlr->ioq[i]);
718
719	nvme_admin_qpair_enable(&ctrlr->adminq);
720
721	if (nvme_ctrlr_identify(ctrlr) != 0) {
722		nvme_ctrlr_fail(ctrlr);
723		return;
724	}
725
726	/*
727	 * The number of qpairs are determined during controller initialization,
728	 *  including using NVMe SET_FEATURES/NUMBER_OF_QUEUES to determine the
729	 *  HW limit.  We call SET_FEATURES again here so that it gets called
730	 *  after any reset for controllers that depend on the driver to
731	 *  explicit specify how many queues it will use.  This value should
732	 *  never change between resets, so panic if somehow that does happen.
733	 */
734	if (ctrlr->is_resetting) {
735		old_num_io_queues = ctrlr->num_io_queues;
736		if (nvme_ctrlr_set_num_qpairs(ctrlr) != 0) {
737			nvme_ctrlr_fail(ctrlr);
738			return;
739		}
740
741		if (old_num_io_queues != ctrlr->num_io_queues) {
742			panic("num_io_queues changed from %u to %u",
743			      old_num_io_queues, ctrlr->num_io_queues);
744		}
745	}
746
747	if (nvme_ctrlr_create_qpairs(ctrlr) != 0) {
748		nvme_ctrlr_fail(ctrlr);
749		return;
750	}
751
752	if (nvme_ctrlr_construct_namespaces(ctrlr) != 0) {
753		nvme_ctrlr_fail(ctrlr);
754		return;
755	}
756
757	nvme_ctrlr_configure_aer(ctrlr);
758	nvme_ctrlr_configure_int_coalescing(ctrlr);
759
760	for (i = 0; i < ctrlr->num_io_queues; i++)
761		nvme_io_qpair_enable(&ctrlr->ioq[i]);
762}
763
764void
765nvme_ctrlr_start_config_hook(void *arg)
766{
767	struct nvme_controller *ctrlr = arg;
768
769	nvme_qpair_reset(&ctrlr->adminq);
770	nvme_admin_qpair_enable(&ctrlr->adminq);
771
772	if (nvme_ctrlr_set_num_qpairs(ctrlr) == 0 &&
773	    nvme_ctrlr_construct_io_qpairs(ctrlr) == 0)
774		nvme_ctrlr_start(ctrlr);
775	else
776		nvme_ctrlr_fail(ctrlr);
777
778	nvme_sysctl_initialize_ctrlr(ctrlr);
779	config_intrhook_disestablish(&ctrlr->config_hook);
780
781	ctrlr->is_initialized = 1;
782	nvme_notify_new_controller(ctrlr);
783}
784
785static void
786nvme_ctrlr_reset_task(void *arg, int pending)
787{
788	struct nvme_controller	*ctrlr = arg;
789	int			status;
790
791	nvme_printf(ctrlr, "resetting controller\n");
792	status = nvme_ctrlr_hw_reset(ctrlr);
793	/*
794	 * Use pause instead of DELAY, so that we yield to any nvme interrupt
795	 *  handlers on this CPU that were blocked on a qpair lock. We want
796	 *  all nvme interrupts completed before proceeding with restarting the
797	 *  controller.
798	 *
799	 * XXX - any way to guarantee the interrupt handlers have quiesced?
800	 */
801	pause("nvmereset", hz / 10);
802	if (status == 0)
803		nvme_ctrlr_start(ctrlr);
804	else
805		nvme_ctrlr_fail(ctrlr);
806
807	atomic_cmpset_32(&ctrlr->is_resetting, 1, 0);
808}
809
810void
811nvme_ctrlr_intx_handler(void *arg)
812{
813	struct nvme_controller *ctrlr = arg;
814
815	nvme_mmio_write_4(ctrlr, intms, 1);
816
817	nvme_qpair_process_completions(&ctrlr->adminq);
818
819	if (ctrlr->ioq && ctrlr->ioq[0].cpl)
820		nvme_qpair_process_completions(&ctrlr->ioq[0]);
821
822	nvme_mmio_write_4(ctrlr, intmc, 1);
823}
824
825static int
826nvme_ctrlr_configure_intx(struct nvme_controller *ctrlr)
827{
828
829	ctrlr->msix_enabled = 0;
830	ctrlr->num_io_queues = 1;
831	ctrlr->num_cpus_per_ioq = mp_ncpus;
832	ctrlr->rid = 0;
833	ctrlr->res = bus_alloc_resource_any(ctrlr->dev, SYS_RES_IRQ,
834	    &ctrlr->rid, RF_SHAREABLE | RF_ACTIVE);
835
836	if (ctrlr->res == NULL) {
837		nvme_printf(ctrlr, "unable to allocate shared IRQ\n");
838		return (ENOMEM);
839	}
840
841	bus_setup_intr(ctrlr->dev, ctrlr->res,
842	    INTR_TYPE_MISC | INTR_MPSAFE, NULL, nvme_ctrlr_intx_handler,
843	    ctrlr, &ctrlr->tag);
844
845	if (ctrlr->tag == NULL) {
846		nvme_printf(ctrlr, "unable to setup intx handler\n");
847		return (ENOMEM);
848	}
849
850	return (0);
851}
852
853static void
854nvme_pt_done(void *arg, const struct nvme_completion *cpl)
855{
856	struct nvme_pt_command *pt = arg;
857
858	bzero(&pt->cpl, sizeof(pt->cpl));
859	pt->cpl.cdw0 = cpl->cdw0;
860	pt->cpl.status = cpl->status;
861	pt->cpl.status.p = 0;
862
863	mtx_lock(pt->driver_lock);
864	wakeup(pt);
865	mtx_unlock(pt->driver_lock);
866}
867
868int
869nvme_ctrlr_passthrough_cmd(struct nvme_controller *ctrlr,
870    struct nvme_pt_command *pt, uint32_t nsid, int is_user_buffer,
871    int is_admin_cmd)
872{
873	struct nvme_request	*req;
874	struct mtx		*mtx;
875	struct buf		*buf = NULL;
876	int			ret = 0;
877	vm_offset_t		addr, end;
878
879	if (pt->len > 0) {
880		/*
881		 * vmapbuf calls vm_fault_quick_hold_pages which only maps full
882		 * pages. Ensure this request has fewer than MAXPHYS bytes when
883		 * extended to full pages.
884		 */
885		addr = (vm_offset_t)pt->buf;
886		end = round_page(addr + pt->len);
887		addr = trunc_page(addr);
888		if (end - addr > MAXPHYS)
889			return EIO;
890
891		if (pt->len > ctrlr->max_xfer_size) {
892			nvme_printf(ctrlr, "pt->len (%d) "
893			    "exceeds max_xfer_size (%d)\n", pt->len,
894			    ctrlr->max_xfer_size);
895			return EIO;
896		}
897		if (is_user_buffer) {
898			/*
899			 * Ensure the user buffer is wired for the duration of
900			 *  this passthrough command.
901			 */
902			PHOLD(curproc);
903			buf = getpbuf(NULL);
904			buf->b_data = pt->buf;
905			buf->b_bufsize = pt->len;
906			buf->b_iocmd = pt->is_read ? BIO_READ : BIO_WRITE;
907#ifdef NVME_UNMAPPED_BIO_SUPPORT
908			if (vmapbuf(buf, 1) < 0) {
909#else
910			if (vmapbuf(buf) < 0) {
911#endif
912				ret = EFAULT;
913				goto err;
914			}
915			req = nvme_allocate_request_vaddr(buf->b_data, pt->len,
916			    nvme_pt_done, pt);
917		} else
918			req = nvme_allocate_request_vaddr(pt->buf, pt->len,
919			    nvme_pt_done, pt);
920	} else
921		req = nvme_allocate_request_null(nvme_pt_done, pt);
922
923	req->cmd.opc	= pt->cmd.opc;
924	req->cmd.cdw10	= pt->cmd.cdw10;
925	req->cmd.cdw11	= pt->cmd.cdw11;
926	req->cmd.cdw12	= pt->cmd.cdw12;
927	req->cmd.cdw13	= pt->cmd.cdw13;
928	req->cmd.cdw14	= pt->cmd.cdw14;
929	req->cmd.cdw15	= pt->cmd.cdw15;
930
931	req->cmd.nsid = nsid;
932
933	if (is_admin_cmd)
934		mtx = &ctrlr->lock;
935	else
936		mtx = &ctrlr->ns[nsid-1].lock;
937
938	mtx_lock(mtx);
939	pt->driver_lock = mtx;
940
941	if (is_admin_cmd)
942		nvme_ctrlr_submit_admin_request(ctrlr, req);
943	else
944		nvme_ctrlr_submit_io_request(ctrlr, req);
945
946	mtx_sleep(pt, mtx, PRIBIO, "nvme_pt", 0);
947	mtx_unlock(mtx);
948
949	pt->driver_lock = NULL;
950
951err:
952	if (buf != NULL) {
953		relpbuf(buf, NULL);
954		PRELE(curproc);
955	}
956
957	return (ret);
958}
959
960static int
961nvme_ctrlr_ioctl(struct cdev *cdev, u_long cmd, caddr_t arg, int flag,
962    struct thread *td)
963{
964	struct nvme_controller			*ctrlr;
965	struct nvme_pt_command			*pt;
966
967	ctrlr = cdev->si_drv1;
968
969	switch (cmd) {
970	case NVME_RESET_CONTROLLER:
971		nvme_ctrlr_reset(ctrlr);
972		break;
973	case NVME_PASSTHROUGH_CMD:
974		pt = (struct nvme_pt_command *)arg;
975		return (nvme_ctrlr_passthrough_cmd(ctrlr, pt, pt->cmd.nsid,
976		    1 /* is_user_buffer */, 1 /* is_admin_cmd */));
977	default:
978		return (ENOTTY);
979	}
980
981	return (0);
982}
983
984static struct cdevsw nvme_ctrlr_cdevsw = {
985	.d_version =	D_VERSION,
986	.d_flags =	0,
987	.d_ioctl =	nvme_ctrlr_ioctl
988};
989
990static void
991nvme_ctrlr_setup_interrupts(struct nvme_controller *ctrlr)
992{
993	device_t	dev;
994	int		per_cpu_io_queues;
995	int		min_cpus_per_ioq;
996	int		num_vectors_requested, num_vectors_allocated;
997	int		num_vectors_available;
998
999	dev = ctrlr->dev;
1000	min_cpus_per_ioq = 1;
1001	TUNABLE_INT_FETCH("hw.nvme.min_cpus_per_ioq", &min_cpus_per_ioq);
1002
1003	if (min_cpus_per_ioq < 1) {
1004		min_cpus_per_ioq = 1;
1005	} else if (min_cpus_per_ioq > mp_ncpus) {
1006		min_cpus_per_ioq = mp_ncpus;
1007	}
1008
1009	per_cpu_io_queues = 1;
1010	TUNABLE_INT_FETCH("hw.nvme.per_cpu_io_queues", &per_cpu_io_queues);
1011
1012	if (per_cpu_io_queues == 0) {
1013		min_cpus_per_ioq = mp_ncpus;
1014	}
1015
1016	ctrlr->force_intx = 0;
1017	TUNABLE_INT_FETCH("hw.nvme.force_intx", &ctrlr->force_intx);
1018
1019	/*
1020	 * FreeBSD currently cannot allocate more than about 190 vectors at
1021	 *  boot, meaning that systems with high core count and many devices
1022	 *  requesting per-CPU interrupt vectors will not get their full
1023	 *  allotment.  So first, try to allocate as many as we may need to
1024	 *  understand what is available, then immediately release them.
1025	 *  Then figure out how many of those we will actually use, based on
1026	 *  assigning an equal number of cores to each I/O queue.
1027	 */
1028
1029	/* One vector for per core I/O queue, plus one vector for admin queue. */
1030	num_vectors_available = min(pci_msix_count(dev), mp_ncpus + 1);
1031	if (pci_alloc_msix(dev, &num_vectors_available) != 0) {
1032		num_vectors_available = 0;
1033	}
1034	pci_release_msi(dev);
1035
1036	if (ctrlr->force_intx || num_vectors_available < 2) {
1037		nvme_ctrlr_configure_intx(ctrlr);
1038		return;
1039	}
1040
1041	/*
1042	 * Do not use all vectors for I/O queues - one must be saved for the
1043	 *  admin queue.
1044	 */
1045	ctrlr->num_cpus_per_ioq = max(min_cpus_per_ioq,
1046	    howmany(mp_ncpus, num_vectors_available - 1));
1047
1048	ctrlr->num_io_queues = howmany(mp_ncpus, ctrlr->num_cpus_per_ioq);
1049	num_vectors_requested = ctrlr->num_io_queues + 1;
1050	num_vectors_allocated = num_vectors_requested;
1051
1052	/*
1053	 * Now just allocate the number of vectors we need.  This should
1054	 *  succeed, since we previously called pci_alloc_msix()
1055	 *  successfully returning at least this many vectors, but just to
1056	 *  be safe, if something goes wrong just revert to INTx.
1057	 */
1058	if (pci_alloc_msix(dev, &num_vectors_allocated) != 0) {
1059		nvme_ctrlr_configure_intx(ctrlr);
1060		return;
1061	}
1062
1063	if (num_vectors_allocated < num_vectors_requested) {
1064		pci_release_msi(dev);
1065		nvme_ctrlr_configure_intx(ctrlr);
1066		return;
1067	}
1068
1069	ctrlr->msix_enabled = 1;
1070}
1071
1072int
1073nvme_ctrlr_construct(struct nvme_controller *ctrlr, device_t dev)
1074{
1075	union cap_lo_register	cap_lo;
1076	union cap_hi_register	cap_hi;
1077	int			status, timeout_period;
1078
1079	ctrlr->dev = dev;
1080
1081	mtx_init(&ctrlr->lock, "nvme ctrlr lock", NULL, MTX_DEF);
1082
1083	status = nvme_ctrlr_allocate_bar(ctrlr);
1084
1085	if (status != 0)
1086		return (status);
1087
1088	/*
1089	 * Software emulators may set the doorbell stride to something
1090	 *  other than zero, but this driver is not set up to handle that.
1091	 */
1092	cap_hi.raw = nvme_mmio_read_4(ctrlr, cap_hi);
1093	if (cap_hi.bits.dstrd != 0)
1094		return (ENXIO);
1095
1096	ctrlr->min_page_size = 1 << (12 + cap_hi.bits.mpsmin);
1097
1098	/* Get ready timeout value from controller, in units of 500ms. */
1099	cap_lo.raw = nvme_mmio_read_4(ctrlr, cap_lo);
1100	ctrlr->ready_timeout_in_ms = cap_lo.bits.to * 500;
1101
1102	timeout_period = NVME_DEFAULT_TIMEOUT_PERIOD;
1103	TUNABLE_INT_FETCH("hw.nvme.timeout_period", &timeout_period);
1104	timeout_period = min(timeout_period, NVME_MAX_TIMEOUT_PERIOD);
1105	timeout_period = max(timeout_period, NVME_MIN_TIMEOUT_PERIOD);
1106	ctrlr->timeout_period = timeout_period;
1107
1108	nvme_retry_count = NVME_DEFAULT_RETRY_COUNT;
1109	TUNABLE_INT_FETCH("hw.nvme.retry_count", &nvme_retry_count);
1110
1111	ctrlr->enable_aborts = 0;
1112	TUNABLE_INT_FETCH("hw.nvme.enable_aborts", &ctrlr->enable_aborts);
1113
1114	nvme_ctrlr_setup_interrupts(ctrlr);
1115
1116	ctrlr->max_xfer_size = NVME_MAX_XFER_SIZE;
1117	if (nvme_ctrlr_construct_admin_qpair(ctrlr) != 0)
1118		return (ENXIO);
1119
1120	ctrlr->cdev = make_dev(&nvme_ctrlr_cdevsw, device_get_unit(dev),
1121	    UID_ROOT, GID_WHEEL, 0600, "nvme%d", device_get_unit(dev));
1122
1123	if (ctrlr->cdev == NULL)
1124		return (ENXIO);
1125
1126	ctrlr->cdev->si_drv1 = (void *)ctrlr;
1127
1128	ctrlr->taskqueue = taskqueue_create("nvme_taskq", M_WAITOK,
1129	    taskqueue_thread_enqueue, &ctrlr->taskqueue);
1130	taskqueue_start_threads(&ctrlr->taskqueue, 1, PI_DISK, "nvme taskq");
1131
1132	ctrlr->is_resetting = 0;
1133	ctrlr->is_initialized = 0;
1134	ctrlr->notification_sent = 0;
1135	TASK_INIT(&ctrlr->reset_task, 0, nvme_ctrlr_reset_task, ctrlr);
1136
1137	TASK_INIT(&ctrlr->fail_req_task, 0, nvme_ctrlr_fail_req_task, ctrlr);
1138	STAILQ_INIT(&ctrlr->fail_req);
1139	ctrlr->is_failed = FALSE;
1140
1141	return (0);
1142}
1143
1144void
1145nvme_ctrlr_destruct(struct nvme_controller *ctrlr, device_t dev)
1146{
1147	int				i;
1148
1149	/*
1150	 *  Notify the controller of a shutdown, even though this is due to
1151	 *   a driver unload, not a system shutdown (this path is not invoked
1152	 *   during shutdown).  This ensures the controller receives a
1153	 *   shutdown notification in case the system is shutdown before
1154	 *   reloading the driver.
1155	 */
1156	nvme_ctrlr_shutdown(ctrlr);
1157
1158	nvme_ctrlr_disable(ctrlr);
1159	taskqueue_free(ctrlr->taskqueue);
1160
1161	for (i = 0; i < NVME_MAX_NAMESPACES; i++)
1162		nvme_ns_destruct(&ctrlr->ns[i]);
1163
1164	if (ctrlr->cdev)
1165		destroy_dev(ctrlr->cdev);
1166
1167	for (i = 0; i < ctrlr->num_io_queues; i++) {
1168		nvme_io_qpair_destroy(&ctrlr->ioq[i]);
1169	}
1170
1171	free(ctrlr->ioq, M_NVME);
1172
1173	nvme_admin_qpair_destroy(&ctrlr->adminq);
1174
1175	if (ctrlr->resource != NULL) {
1176		bus_release_resource(dev, SYS_RES_MEMORY,
1177		    ctrlr->resource_id, ctrlr->resource);
1178	}
1179
1180	if (ctrlr->bar4_resource != NULL) {
1181		bus_release_resource(dev, SYS_RES_MEMORY,
1182		    ctrlr->bar4_resource_id, ctrlr->bar4_resource);
1183	}
1184
1185	if (ctrlr->tag)
1186		bus_teardown_intr(ctrlr->dev, ctrlr->res, ctrlr->tag);
1187
1188	if (ctrlr->res)
1189		bus_release_resource(ctrlr->dev, SYS_RES_IRQ,
1190		    rman_get_rid(ctrlr->res), ctrlr->res);
1191
1192	if (ctrlr->msix_enabled)
1193		pci_release_msi(dev);
1194}
1195
1196void
1197nvme_ctrlr_shutdown(struct nvme_controller *ctrlr)
1198{
1199	union cc_register	cc;
1200	union csts_register	csts;
1201	int			ticks = 0;
1202
1203	cc.raw = nvme_mmio_read_4(ctrlr, cc);
1204	cc.bits.shn = NVME_SHN_NORMAL;
1205	nvme_mmio_write_4(ctrlr, cc, cc.raw);
1206	csts.raw = nvme_mmio_read_4(ctrlr, csts);
1207	while ((csts.bits.shst != NVME_SHST_COMPLETE) && (ticks++ < 5*hz)) {
1208		pause("nvme shn", 1);
1209		csts.raw = nvme_mmio_read_4(ctrlr, csts);
1210	}
1211	if (csts.bits.shst != NVME_SHST_COMPLETE)
1212		nvme_printf(ctrlr, "did not complete shutdown within 5 seconds "
1213		    "of notification\n");
1214}
1215
1216void
1217nvme_ctrlr_submit_admin_request(struct nvme_controller *ctrlr,
1218    struct nvme_request *req)
1219{
1220
1221	nvme_qpair_submit_request(&ctrlr->adminq, req);
1222}
1223
1224void
1225nvme_ctrlr_submit_io_request(struct nvme_controller *ctrlr,
1226    struct nvme_request *req)
1227{
1228	struct nvme_qpair       *qpair;
1229
1230	qpair = &ctrlr->ioq[curcpu / ctrlr->num_cpus_per_ioq];
1231	nvme_qpair_submit_request(qpair, req);
1232}
1233
1234device_t
1235nvme_ctrlr_get_device(struct nvme_controller *ctrlr)
1236{
1237
1238	return (ctrlr->dev);
1239}
1240
1241const struct nvme_controller_data *
1242nvme_ctrlr_get_data(struct nvme_controller *ctrlr)
1243{
1244
1245	return (&ctrlr->cdata);
1246}
1247