1/*-
2 * Copyright (C) 2012-2016 Intel Corporation
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 *    notice, this list of conditions and the following disclaimer in the
12 *    documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 */
26
27#include <sys/cdefs.h>
28__FBSDID("$FreeBSD$");
29
30#include <sys/param.h>
31#include <sys/systm.h>
32#include <sys/buf.h>
33#include <sys/bus.h>
34#include <sys/conf.h>
35#include <sys/ioccom.h>
36#include <sys/proc.h>
37#include <sys/smp.h>
38#include <sys/uio.h>
39
40#include <dev/pci/pcireg.h>
41#include <dev/pci/pcivar.h>
42
43#include "nvme_private.h"
44
45static void nvme_ctrlr_construct_and_submit_aer(struct nvme_controller *ctrlr,
46						struct nvme_async_event_request *aer);
47static void nvme_ctrlr_setup_interrupts(struct nvme_controller *ctrlr);
48
49static int
50nvme_ctrlr_allocate_bar(struct nvme_controller *ctrlr)
51{
52
53	ctrlr->resource_id = PCIR_BAR(0);
54
55	ctrlr->resource = bus_alloc_resource(ctrlr->dev, SYS_RES_MEMORY,
56	    &ctrlr->resource_id, 0, ~0, 1, RF_ACTIVE);
57
58	if(ctrlr->resource == NULL) {
59		nvme_printf(ctrlr, "unable to allocate pci resource\n");
60		return (ENOMEM);
61	}
62
63	ctrlr->bus_tag = rman_get_bustag(ctrlr->resource);
64	ctrlr->bus_handle = rman_get_bushandle(ctrlr->resource);
65	ctrlr->regs = (struct nvme_registers *)ctrlr->bus_handle;
66
67	/*
68	 * The NVMe spec allows for the MSI-X table to be placed behind
69	 *  BAR 4/5, separate from the control/doorbell registers.  Always
70	 *  try to map this bar, because it must be mapped prior to calling
71	 *  pci_alloc_msix().  If the table isn't behind BAR 4/5,
72	 *  bus_alloc_resource() will just return NULL which is OK.
73	 */
74	ctrlr->bar4_resource_id = PCIR_BAR(4);
75	ctrlr->bar4_resource = bus_alloc_resource(ctrlr->dev, SYS_RES_MEMORY,
76	    &ctrlr->bar4_resource_id, 0, ~0, 1, RF_ACTIVE);
77
78	return (0);
79}
80
81static void
82nvme_ctrlr_construct_admin_qpair(struct nvme_controller *ctrlr)
83{
84	struct nvme_qpair	*qpair;
85	uint32_t		num_entries;
86
87	qpair = &ctrlr->adminq;
88
89	num_entries = NVME_ADMIN_ENTRIES;
90	TUNABLE_INT_FETCH("hw.nvme.admin_entries", &num_entries);
91	/*
92	 * If admin_entries was overridden to an invalid value, revert it
93	 *  back to our default value.
94	 */
95	if (num_entries < NVME_MIN_ADMIN_ENTRIES ||
96	    num_entries > NVME_MAX_ADMIN_ENTRIES) {
97		nvme_printf(ctrlr, "invalid hw.nvme.admin_entries=%d "
98		    "specified\n", num_entries);
99		num_entries = NVME_ADMIN_ENTRIES;
100	}
101
102	/*
103	 * The admin queue's max xfer size is treated differently than the
104	 *  max I/O xfer size.  16KB is sufficient here - maybe even less?
105	 */
106	nvme_qpair_construct(qpair,
107			     0, /* qpair ID */
108			     0, /* vector */
109			     num_entries,
110			     NVME_ADMIN_TRACKERS,
111			     ctrlr);
112}
113
114static int
115nvme_ctrlr_construct_io_qpairs(struct nvme_controller *ctrlr)
116{
117	struct nvme_qpair	*qpair;
118	union cap_lo_register	cap_lo;
119	int			i, num_entries, num_trackers;
120
121	num_entries = NVME_IO_ENTRIES;
122	TUNABLE_INT_FETCH("hw.nvme.io_entries", &num_entries);
123
124	/*
125	 * NVMe spec sets a hard limit of 64K max entries, but
126	 *  devices may specify a smaller limit, so we need to check
127	 *  the MQES field in the capabilities register.
128	 */
129	cap_lo.raw = nvme_mmio_read_4(ctrlr, cap_lo);
130	num_entries = min(num_entries, cap_lo.bits.mqes+1);
131
132	num_trackers = NVME_IO_TRACKERS;
133	TUNABLE_INT_FETCH("hw.nvme.io_trackers", &num_trackers);
134
135	num_trackers = max(num_trackers, NVME_MIN_IO_TRACKERS);
136	num_trackers = min(num_trackers, NVME_MAX_IO_TRACKERS);
137	/*
138	 * No need to have more trackers than entries in the submit queue.
139	 *  Note also that for a queue size of N, we can only have (N-1)
140	 *  commands outstanding, hence the "-1" here.
141	 */
142	num_trackers = min(num_trackers, (num_entries-1));
143
144	/*
145	 * This was calculated previously when setting up interrupts, but
146	 *  a controller could theoretically support fewer I/O queues than
147	 *  MSI-X vectors.  So calculate again here just to be safe.
148	 */
149	ctrlr->num_cpus_per_ioq = howmany(mp_ncpus, ctrlr->num_io_queues);
150
151	ctrlr->ioq = malloc(ctrlr->num_io_queues * sizeof(struct nvme_qpair),
152	    M_NVME, M_ZERO | M_WAITOK);
153
154	for (i = 0; i < ctrlr->num_io_queues; i++) {
155		qpair = &ctrlr->ioq[i];
156
157		/*
158		 * Admin queue has ID=0. IO queues start at ID=1 -
159		 *  hence the 'i+1' here.
160		 *
161		 * For I/O queues, use the controller-wide max_xfer_size
162		 *  calculated in nvme_attach().
163		 */
164		nvme_qpair_construct(qpair,
165				     i+1, /* qpair ID */
166				     ctrlr->msix_enabled ? i+1 : 0, /* vector */
167				     num_entries,
168				     num_trackers,
169				     ctrlr);
170
171		/*
172		 * Do not bother binding interrupts if we only have one I/O
173		 *  interrupt thread for this controller.
174		 */
175		if (ctrlr->num_io_queues > 1)
176			bus_bind_intr(ctrlr->dev, qpair->res,
177			    i * ctrlr->num_cpus_per_ioq);
178	}
179
180	return (0);
181}
182
183static void
184nvme_ctrlr_fail(struct nvme_controller *ctrlr)
185{
186	int i;
187
188	ctrlr->is_failed = TRUE;
189	nvme_qpair_fail(&ctrlr->adminq);
190	for (i = 0; i < ctrlr->num_io_queues; i++)
191		nvme_qpair_fail(&ctrlr->ioq[i]);
192	nvme_notify_fail_consumers(ctrlr);
193}
194
195void
196nvme_ctrlr_post_failed_request(struct nvme_controller *ctrlr,
197    struct nvme_request *req)
198{
199
200	mtx_lock(&ctrlr->lock);
201	STAILQ_INSERT_TAIL(&ctrlr->fail_req, req, stailq);
202	mtx_unlock(&ctrlr->lock);
203	taskqueue_enqueue(ctrlr->taskqueue, &ctrlr->fail_req_task);
204}
205
206static void
207nvme_ctrlr_fail_req_task(void *arg, int pending)
208{
209	struct nvme_controller	*ctrlr = arg;
210	struct nvme_request	*req;
211
212	mtx_lock(&ctrlr->lock);
213	while (!STAILQ_EMPTY(&ctrlr->fail_req)) {
214		req = STAILQ_FIRST(&ctrlr->fail_req);
215		STAILQ_REMOVE_HEAD(&ctrlr->fail_req, stailq);
216		nvme_qpair_manual_complete_request(req->qpair, req,
217		    NVME_SCT_GENERIC, NVME_SC_ABORTED_BY_REQUEST, TRUE);
218	}
219	mtx_unlock(&ctrlr->lock);
220}
221
222static int
223nvme_ctrlr_wait_for_ready(struct nvme_controller *ctrlr, int desired_val)
224{
225	int ms_waited;
226	union cc_register cc;
227	union csts_register csts;
228
229	cc.raw = nvme_mmio_read_4(ctrlr, cc);
230	csts.raw = nvme_mmio_read_4(ctrlr, csts);
231
232	if (cc.bits.en != desired_val) {
233		nvme_printf(ctrlr, "%s called with desired_val = %d "
234		    "but cc.en = %d\n", __func__, desired_val, cc.bits.en);
235		return (ENXIO);
236	}
237
238	ms_waited = 0;
239
240	while (csts.bits.rdy != desired_val) {
241		DELAY(1000);
242		if (ms_waited++ > ctrlr->ready_timeout_in_ms) {
243			nvme_printf(ctrlr, "controller ready did not become %d "
244			    "within %d ms\n", desired_val, ctrlr->ready_timeout_in_ms);
245			return (ENXIO);
246		}
247		csts.raw = nvme_mmio_read_4(ctrlr, csts);
248	}
249
250	return (0);
251}
252
253static void
254nvme_ctrlr_disable(struct nvme_controller *ctrlr)
255{
256	union cc_register cc;
257	union csts_register csts;
258
259	cc.raw = nvme_mmio_read_4(ctrlr, cc);
260	csts.raw = nvme_mmio_read_4(ctrlr, csts);
261
262	if (cc.bits.en == 1 && csts.bits.rdy == 0)
263		nvme_ctrlr_wait_for_ready(ctrlr, 1);
264
265	cc.bits.en = 0;
266	nvme_mmio_write_4(ctrlr, cc, cc.raw);
267	DELAY(5000);
268	nvme_ctrlr_wait_for_ready(ctrlr, 0);
269}
270
271static int
272nvme_ctrlr_enable(struct nvme_controller *ctrlr)
273{
274	union cc_register	cc;
275	union csts_register	csts;
276	union aqa_register	aqa;
277
278	cc.raw = nvme_mmio_read_4(ctrlr, cc);
279	csts.raw = nvme_mmio_read_4(ctrlr, csts);
280
281	if (cc.bits.en == 1) {
282		if (csts.bits.rdy == 1)
283			return (0);
284		else
285			return (nvme_ctrlr_wait_for_ready(ctrlr, 1));
286	}
287
288	nvme_mmio_write_8(ctrlr, asq, ctrlr->adminq.cmd_bus_addr);
289	DELAY(5000);
290	nvme_mmio_write_8(ctrlr, acq, ctrlr->adminq.cpl_bus_addr);
291	DELAY(5000);
292
293	aqa.raw = 0;
294	/* acqs and asqs are 0-based. */
295	aqa.bits.acqs = ctrlr->adminq.num_entries-1;
296	aqa.bits.asqs = ctrlr->adminq.num_entries-1;
297	nvme_mmio_write_4(ctrlr, aqa, aqa.raw);
298	DELAY(5000);
299
300	cc.bits.en = 1;
301	cc.bits.css = 0;
302	cc.bits.ams = 0;
303	cc.bits.shn = 0;
304	cc.bits.iosqes = 6; /* SQ entry size == 64 == 2^6 */
305	cc.bits.iocqes = 4; /* CQ entry size == 16 == 2^4 */
306
307	/* This evaluates to 0, which is according to spec. */
308	cc.bits.mps = (PAGE_SIZE >> 13);
309
310	nvme_mmio_write_4(ctrlr, cc, cc.raw);
311	DELAY(5000);
312
313	return (nvme_ctrlr_wait_for_ready(ctrlr, 1));
314}
315
316int
317nvme_ctrlr_hw_reset(struct nvme_controller *ctrlr)
318{
319	int i;
320
321	nvme_admin_qpair_disable(&ctrlr->adminq);
322	/*
323	 * I/O queues are not allocated before the initial HW
324	 *  reset, so do not try to disable them.  Use is_initialized
325	 *  to determine if this is the initial HW reset.
326	 */
327	if (ctrlr->is_initialized) {
328		for (i = 0; i < ctrlr->num_io_queues; i++)
329			nvme_io_qpair_disable(&ctrlr->ioq[i]);
330	}
331
332	DELAY(100*1000);
333
334	nvme_ctrlr_disable(ctrlr);
335	return (nvme_ctrlr_enable(ctrlr));
336}
337
338void
339nvme_ctrlr_reset(struct nvme_controller *ctrlr)
340{
341	int cmpset;
342
343	cmpset = atomic_cmpset_32(&ctrlr->is_resetting, 0, 1);
344
345	if (cmpset == 0 || ctrlr->is_failed)
346		/*
347		 * Controller is already resetting or has failed.  Return
348		 *  immediately since there is no need to kick off another
349		 *  reset in these cases.
350		 */
351		return;
352
353	taskqueue_enqueue(ctrlr->taskqueue, &ctrlr->reset_task);
354}
355
356static int
357nvme_ctrlr_identify(struct nvme_controller *ctrlr)
358{
359	struct nvme_completion_poll_status	status;
360
361	status.done = FALSE;
362	nvme_ctrlr_cmd_identify_controller(ctrlr, &ctrlr->cdata,
363	    nvme_completion_poll_cb, &status);
364	while (status.done == FALSE)
365		pause("nvme", 1);
366	if (nvme_completion_is_error(&status.cpl)) {
367		nvme_printf(ctrlr, "nvme_identify_controller failed!\n");
368		return (ENXIO);
369	}
370
371	/*
372	 * Use MDTS to ensure our default max_xfer_size doesn't exceed what the
373	 *  controller supports.
374	 */
375	if (ctrlr->cdata.mdts > 0)
376		ctrlr->max_xfer_size = min(ctrlr->max_xfer_size,
377		    ctrlr->min_page_size * (1 << (ctrlr->cdata.mdts)));
378
379	return (0);
380}
381
382static int
383nvme_ctrlr_set_num_qpairs(struct nvme_controller *ctrlr)
384{
385	struct nvme_completion_poll_status	status;
386	int					cq_allocated, sq_allocated;
387
388	status.done = FALSE;
389	nvme_ctrlr_cmd_set_num_queues(ctrlr, ctrlr->num_io_queues,
390	    nvme_completion_poll_cb, &status);
391	while (status.done == FALSE)
392		pause("nvme", 1);
393	if (nvme_completion_is_error(&status.cpl)) {
394		nvme_printf(ctrlr, "nvme_set_num_queues failed!\n");
395		return (ENXIO);
396	}
397
398	/*
399	 * Data in cdw0 is 0-based.
400	 * Lower 16-bits indicate number of submission queues allocated.
401	 * Upper 16-bits indicate number of completion queues allocated.
402	 */
403	sq_allocated = (status.cpl.cdw0 & 0xFFFF) + 1;
404	cq_allocated = (status.cpl.cdw0 >> 16) + 1;
405
406	/*
407	 * Controller may allocate more queues than we requested,
408	 *  so use the minimum of the number requested and what was
409	 *  actually allocated.
410	 */
411	ctrlr->num_io_queues = min(ctrlr->num_io_queues, sq_allocated);
412	ctrlr->num_io_queues = min(ctrlr->num_io_queues, cq_allocated);
413
414	return (0);
415}
416
417static int
418nvme_ctrlr_create_qpairs(struct nvme_controller *ctrlr)
419{
420	struct nvme_completion_poll_status	status;
421	struct nvme_qpair			*qpair;
422	int					i;
423
424	for (i = 0; i < ctrlr->num_io_queues; i++) {
425		qpair = &ctrlr->ioq[i];
426
427		status.done = FALSE;
428		nvme_ctrlr_cmd_create_io_cq(ctrlr, qpair, qpair->vector,
429		    nvme_completion_poll_cb, &status);
430		while (status.done == FALSE)
431			pause("nvme", 1);
432		if (nvme_completion_is_error(&status.cpl)) {
433			nvme_printf(ctrlr, "nvme_create_io_cq failed!\n");
434			return (ENXIO);
435		}
436
437		status.done = FALSE;
438		nvme_ctrlr_cmd_create_io_sq(qpair->ctrlr, qpair,
439		    nvme_completion_poll_cb, &status);
440		while (status.done == FALSE)
441			pause("nvme", 1);
442		if (nvme_completion_is_error(&status.cpl)) {
443			nvme_printf(ctrlr, "nvme_create_io_sq failed!\n");
444			return (ENXIO);
445		}
446	}
447
448	return (0);
449}
450
451static int
452nvme_ctrlr_construct_namespaces(struct nvme_controller *ctrlr)
453{
454	struct nvme_namespace	*ns;
455	int			i, status;
456
457	for (i = 0; i < ctrlr->cdata.nn; i++) {
458		ns = &ctrlr->ns[i];
459		status = nvme_ns_construct(ns, i+1, ctrlr);
460		if (status != 0)
461			return (status);
462	}
463
464	return (0);
465}
466
467static boolean_t
468is_log_page_id_valid(uint8_t page_id)
469{
470
471	switch (page_id) {
472	case NVME_LOG_ERROR:
473	case NVME_LOG_HEALTH_INFORMATION:
474	case NVME_LOG_FIRMWARE_SLOT:
475		return (TRUE);
476	}
477
478	return (FALSE);
479}
480
481static uint32_t
482nvme_ctrlr_get_log_page_size(struct nvme_controller *ctrlr, uint8_t page_id)
483{
484	uint32_t	log_page_size;
485
486	switch (page_id) {
487	case NVME_LOG_ERROR:
488		log_page_size = min(
489		    sizeof(struct nvme_error_information_entry) *
490		    ctrlr->cdata.elpe,
491		    NVME_MAX_AER_LOG_SIZE);
492		break;
493	case NVME_LOG_HEALTH_INFORMATION:
494		log_page_size = sizeof(struct nvme_health_information_page);
495		break;
496	case NVME_LOG_FIRMWARE_SLOT:
497		log_page_size = sizeof(struct nvme_firmware_page);
498		break;
499	default:
500		log_page_size = 0;
501		break;
502	}
503
504	return (log_page_size);
505}
506
507static void
508nvme_ctrlr_log_critical_warnings(struct nvme_controller *ctrlr,
509    union nvme_critical_warning_state state)
510{
511
512	if (state.bits.available_spare == 1)
513		nvme_printf(ctrlr, "available spare space below threshold\n");
514
515	if (state.bits.temperature == 1)
516		nvme_printf(ctrlr, "temperature above threshold\n");
517
518	if (state.bits.device_reliability == 1)
519		nvme_printf(ctrlr, "device reliability degraded\n");
520
521	if (state.bits.read_only == 1)
522		nvme_printf(ctrlr, "media placed in read only mode\n");
523
524	if (state.bits.volatile_memory_backup == 1)
525		nvme_printf(ctrlr, "volatile memory backup device failed\n");
526
527	if (state.bits.reserved != 0)
528		nvme_printf(ctrlr,
529		    "unknown critical warning(s): state = 0x%02x\n", state.raw);
530}
531
532static void
533nvme_ctrlr_async_event_log_page_cb(void *arg, const struct nvme_completion *cpl)
534{
535	struct nvme_async_event_request		*aer = arg;
536	struct nvme_health_information_page	*health_info;
537
538	/*
539	 * If the log page fetch for some reason completed with an error,
540	 *  don't pass log page data to the consumers.  In practice, this case
541	 *  should never happen.
542	 */
543	if (nvme_completion_is_error(cpl))
544		nvme_notify_async_consumers(aer->ctrlr, &aer->cpl,
545		    aer->log_page_id, NULL, 0);
546	else {
547		if (aer->log_page_id == NVME_LOG_HEALTH_INFORMATION) {
548			health_info = (struct nvme_health_information_page *)
549			    aer->log_page_buffer;
550			nvme_ctrlr_log_critical_warnings(aer->ctrlr,
551			    health_info->critical_warning);
552			/*
553			 * Critical warnings reported through the
554			 *  SMART/health log page are persistent, so
555			 *  clear the associated bits in the async event
556			 *  config so that we do not receive repeated
557			 *  notifications for the same event.
558			 */
559			aer->ctrlr->async_event_config.raw &=
560			    ~health_info->critical_warning.raw;
561			nvme_ctrlr_cmd_set_async_event_config(aer->ctrlr,
562			    aer->ctrlr->async_event_config, NULL, NULL);
563		}
564
565
566		/*
567		 * Pass the cpl data from the original async event completion,
568		 *  not the log page fetch.
569		 */
570		nvme_notify_async_consumers(aer->ctrlr, &aer->cpl,
571		    aer->log_page_id, aer->log_page_buffer, aer->log_page_size);
572	}
573
574	/*
575	 * Repost another asynchronous event request to replace the one
576	 *  that just completed.
577	 */
578	nvme_ctrlr_construct_and_submit_aer(aer->ctrlr, aer);
579}
580
581static void
582nvme_ctrlr_async_event_cb(void *arg, const struct nvme_completion *cpl)
583{
584	struct nvme_async_event_request	*aer = arg;
585
586	if (nvme_completion_is_error(cpl)) {
587		/*
588		 *  Do not retry failed async event requests.  This avoids
589		 *  infinite loops where a new async event request is submitted
590		 *  to replace the one just failed, only to fail again and
591		 *  perpetuate the loop.
592		 */
593		return;
594	}
595
596	/* Associated log page is in bits 23:16 of completion entry dw0. */
597	aer->log_page_id = (cpl->cdw0 & 0xFF0000) >> 16;
598
599	nvme_printf(aer->ctrlr, "async event occurred (log page id=0x%x)\n",
600	    aer->log_page_id);
601
602	if (is_log_page_id_valid(aer->log_page_id)) {
603		aer->log_page_size = nvme_ctrlr_get_log_page_size(aer->ctrlr,
604		    aer->log_page_id);
605		memcpy(&aer->cpl, cpl, sizeof(*cpl));
606		nvme_ctrlr_cmd_get_log_page(aer->ctrlr, aer->log_page_id,
607		    NVME_GLOBAL_NAMESPACE_TAG, aer->log_page_buffer,
608		    aer->log_page_size, nvme_ctrlr_async_event_log_page_cb,
609		    aer);
610		/* Wait to notify consumers until after log page is fetched. */
611	} else {
612		nvme_notify_async_consumers(aer->ctrlr, cpl, aer->log_page_id,
613		    NULL, 0);
614
615		/*
616		 * Repost another asynchronous event request to replace the one
617		 *  that just completed.
618		 */
619		nvme_ctrlr_construct_and_submit_aer(aer->ctrlr, aer);
620	}
621}
622
623static void
624nvme_ctrlr_construct_and_submit_aer(struct nvme_controller *ctrlr,
625    struct nvme_async_event_request *aer)
626{
627	struct nvme_request *req;
628
629	aer->ctrlr = ctrlr;
630	req = nvme_allocate_request_null(nvme_ctrlr_async_event_cb, aer);
631	aer->req = req;
632
633	/*
634	 * Disable timeout here, since asynchronous event requests should by
635	 *  nature never be timed out.
636	 */
637	req->timeout = FALSE;
638	req->cmd.opc = NVME_OPC_ASYNC_EVENT_REQUEST;
639	nvme_ctrlr_submit_admin_request(ctrlr, req);
640}
641
642static void
643nvme_ctrlr_configure_aer(struct nvme_controller *ctrlr)
644{
645	struct nvme_completion_poll_status	status;
646	struct nvme_async_event_request		*aer;
647	uint32_t				i;
648
649	ctrlr->async_event_config.raw = 0xFF;
650	ctrlr->async_event_config.bits.reserved = 0;
651
652	status.done = FALSE;
653	nvme_ctrlr_cmd_get_feature(ctrlr, NVME_FEAT_TEMPERATURE_THRESHOLD,
654	    0, NULL, 0, nvme_completion_poll_cb, &status);
655	while (status.done == FALSE)
656		pause("nvme", 1);
657	if (nvme_completion_is_error(&status.cpl) ||
658	    (status.cpl.cdw0 & 0xFFFF) == 0xFFFF ||
659	    (status.cpl.cdw0 & 0xFFFF) == 0x0000) {
660		nvme_printf(ctrlr, "temperature threshold not supported\n");
661		ctrlr->async_event_config.bits.temperature = 0;
662	}
663
664	nvme_ctrlr_cmd_set_async_event_config(ctrlr,
665	    ctrlr->async_event_config, NULL, NULL);
666
667	/* aerl is a zero-based value, so we need to add 1 here. */
668	ctrlr->num_aers = min(NVME_MAX_ASYNC_EVENTS, (ctrlr->cdata.aerl+1));
669
670	for (i = 0; i < ctrlr->num_aers; i++) {
671		aer = &ctrlr->aer[i];
672		nvme_ctrlr_construct_and_submit_aer(ctrlr, aer);
673	}
674}
675
676static void
677nvme_ctrlr_configure_int_coalescing(struct nvme_controller *ctrlr)
678{
679
680	ctrlr->int_coal_time = 0;
681	TUNABLE_INT_FETCH("hw.nvme.int_coal_time",
682	    &ctrlr->int_coal_time);
683
684	ctrlr->int_coal_threshold = 0;
685	TUNABLE_INT_FETCH("hw.nvme.int_coal_threshold",
686	    &ctrlr->int_coal_threshold);
687
688	nvme_ctrlr_cmd_set_interrupt_coalescing(ctrlr, ctrlr->int_coal_time,
689	    ctrlr->int_coal_threshold, NULL, NULL);
690}
691
692static void
693nvme_ctrlr_start(void *ctrlr_arg)
694{
695	struct nvme_controller *ctrlr = ctrlr_arg;
696	uint32_t old_num_io_queues;
697	int i;
698
699	/*
700	 * Only reset adminq here when we are restarting the
701	 *  controller after a reset.  During initialization,
702	 *  we have already submitted admin commands to get
703	 *  the number of I/O queues supported, so cannot reset
704	 *  the adminq again here.
705	 */
706	if (ctrlr->is_resetting) {
707		nvme_qpair_reset(&ctrlr->adminq);
708	}
709
710	for (i = 0; i < ctrlr->num_io_queues; i++)
711		nvme_qpair_reset(&ctrlr->ioq[i]);
712
713	nvme_admin_qpair_enable(&ctrlr->adminq);
714
715	if (nvme_ctrlr_identify(ctrlr) != 0) {
716		nvme_ctrlr_fail(ctrlr);
717		return;
718	}
719
720	/*
721	 * The number of qpairs are determined during controller initialization,
722	 *  including using NVMe SET_FEATURES/NUMBER_OF_QUEUES to determine the
723	 *  HW limit.  We call SET_FEATURES again here so that it gets called
724	 *  after any reset for controllers that depend on the driver to
725	 *  explicit specify how many queues it will use.  This value should
726	 *  never change between resets, so panic if somehow that does happen.
727	 */
728	if (ctrlr->is_resetting) {
729		old_num_io_queues = ctrlr->num_io_queues;
730		if (nvme_ctrlr_set_num_qpairs(ctrlr) != 0) {
731			nvme_ctrlr_fail(ctrlr);
732			return;
733		}
734
735		if (old_num_io_queues != ctrlr->num_io_queues) {
736			panic("num_io_queues changed from %u to %u",
737			      old_num_io_queues, ctrlr->num_io_queues);
738		}
739	}
740
741	if (nvme_ctrlr_create_qpairs(ctrlr) != 0) {
742		nvme_ctrlr_fail(ctrlr);
743		return;
744	}
745
746	if (nvme_ctrlr_construct_namespaces(ctrlr) != 0) {
747		nvme_ctrlr_fail(ctrlr);
748		return;
749	}
750
751	nvme_ctrlr_configure_aer(ctrlr);
752	nvme_ctrlr_configure_int_coalescing(ctrlr);
753
754	for (i = 0; i < ctrlr->num_io_queues; i++)
755		nvme_io_qpair_enable(&ctrlr->ioq[i]);
756}
757
758void
759nvme_ctrlr_start_config_hook(void *arg)
760{
761	struct nvme_controller *ctrlr = arg;
762
763	nvme_qpair_reset(&ctrlr->adminq);
764	nvme_admin_qpair_enable(&ctrlr->adminq);
765
766	if (nvme_ctrlr_set_num_qpairs(ctrlr) == 0 &&
767	    nvme_ctrlr_construct_io_qpairs(ctrlr) == 0)
768		nvme_ctrlr_start(ctrlr);
769	else
770		nvme_ctrlr_fail(ctrlr);
771
772	nvme_sysctl_initialize_ctrlr(ctrlr);
773	config_intrhook_disestablish(&ctrlr->config_hook);
774
775	ctrlr->is_initialized = 1;
776	nvme_notify_new_controller(ctrlr);
777}
778
779static void
780nvme_ctrlr_reset_task(void *arg, int pending)
781{
782	struct nvme_controller	*ctrlr = arg;
783	int			status;
784
785	nvme_printf(ctrlr, "resetting controller\n");
786	status = nvme_ctrlr_hw_reset(ctrlr);
787	/*
788	 * Use pause instead of DELAY, so that we yield to any nvme interrupt
789	 *  handlers on this CPU that were blocked on a qpair lock. We want
790	 *  all nvme interrupts completed before proceeding with restarting the
791	 *  controller.
792	 *
793	 * XXX - any way to guarantee the interrupt handlers have quiesced?
794	 */
795	pause("nvmereset", hz / 10);
796	if (status == 0)
797		nvme_ctrlr_start(ctrlr);
798	else
799		nvme_ctrlr_fail(ctrlr);
800
801	atomic_cmpset_32(&ctrlr->is_resetting, 1, 0);
802}
803
804static void
805nvme_ctrlr_intx_handler(void *arg)
806{
807	struct nvme_controller *ctrlr = arg;
808
809	nvme_mmio_write_4(ctrlr, intms, 1);
810
811	nvme_qpair_process_completions(&ctrlr->adminq);
812
813	if (ctrlr->ioq && ctrlr->ioq[0].cpl)
814		nvme_qpair_process_completions(&ctrlr->ioq[0]);
815
816	nvme_mmio_write_4(ctrlr, intmc, 1);
817}
818
819static int
820nvme_ctrlr_configure_intx(struct nvme_controller *ctrlr)
821{
822
823	ctrlr->msix_enabled = 0;
824	ctrlr->num_io_queues = 1;
825	ctrlr->num_cpus_per_ioq = mp_ncpus;
826	ctrlr->rid = 0;
827	ctrlr->res = bus_alloc_resource_any(ctrlr->dev, SYS_RES_IRQ,
828	    &ctrlr->rid, RF_SHAREABLE | RF_ACTIVE);
829
830	if (ctrlr->res == NULL) {
831		nvme_printf(ctrlr, "unable to allocate shared IRQ\n");
832		return (ENOMEM);
833	}
834
835	bus_setup_intr(ctrlr->dev, ctrlr->res,
836	    INTR_TYPE_MISC | INTR_MPSAFE, NULL, nvme_ctrlr_intx_handler,
837	    ctrlr, &ctrlr->tag);
838
839	if (ctrlr->tag == NULL) {
840		nvme_printf(ctrlr, "unable to setup intx handler\n");
841		return (ENOMEM);
842	}
843
844	return (0);
845}
846
847static void
848nvme_pt_done(void *arg, const struct nvme_completion *cpl)
849{
850	struct nvme_pt_command *pt = arg;
851
852	bzero(&pt->cpl, sizeof(pt->cpl));
853	pt->cpl.cdw0 = cpl->cdw0;
854	pt->cpl.status = cpl->status;
855	pt->cpl.status.p = 0;
856
857	mtx_lock(pt->driver_lock);
858	wakeup(pt);
859	mtx_unlock(pt->driver_lock);
860}
861
862int
863nvme_ctrlr_passthrough_cmd(struct nvme_controller *ctrlr,
864    struct nvme_pt_command *pt, uint32_t nsid, int is_user_buffer,
865    int is_admin_cmd)
866{
867	struct nvme_request	*req;
868	struct mtx		*mtx;
869	struct buf		*buf = NULL;
870	int			ret = 0;
871
872	if (pt->len > 0) {
873		if (pt->len > ctrlr->max_xfer_size) {
874			nvme_printf(ctrlr, "pt->len (%d) "
875			    "exceeds max_xfer_size (%d)\n", pt->len,
876			    ctrlr->max_xfer_size);
877			return EIO;
878		}
879		if (is_user_buffer) {
880			/*
881			 * Ensure the user buffer is wired for the duration of
882			 *  this passthrough command.
883			 */
884			PHOLD(curproc);
885			buf = getpbuf(NULL);
886			buf->b_saveaddr = buf->b_data;
887			buf->b_data = pt->buf;
888			buf->b_bufsize = pt->len;
889			buf->b_iocmd = pt->is_read ? BIO_READ : BIO_WRITE;
890#ifdef NVME_UNMAPPED_BIO_SUPPORT
891			if (vmapbuf(buf, 1) < 0) {
892#else
893			if (vmapbuf(buf) < 0) {
894#endif
895				ret = EFAULT;
896				goto err;
897			}
898			req = nvme_allocate_request_vaddr(buf->b_data, pt->len,
899			    nvme_pt_done, pt);
900		} else
901			req = nvme_allocate_request_vaddr(pt->buf, pt->len,
902			    nvme_pt_done, pt);
903	} else
904		req = nvme_allocate_request_null(nvme_pt_done, pt);
905
906	req->cmd.opc	= pt->cmd.opc;
907	req->cmd.cdw10	= pt->cmd.cdw10;
908	req->cmd.cdw11	= pt->cmd.cdw11;
909	req->cmd.cdw12	= pt->cmd.cdw12;
910	req->cmd.cdw13	= pt->cmd.cdw13;
911	req->cmd.cdw14	= pt->cmd.cdw14;
912	req->cmd.cdw15	= pt->cmd.cdw15;
913
914	req->cmd.nsid = nsid;
915
916	if (is_admin_cmd)
917		mtx = &ctrlr->lock;
918	else
919		mtx = &ctrlr->ns[nsid-1].lock;
920
921	mtx_lock(mtx);
922	pt->driver_lock = mtx;
923
924	if (is_admin_cmd)
925		nvme_ctrlr_submit_admin_request(ctrlr, req);
926	else
927		nvme_ctrlr_submit_io_request(ctrlr, req);
928
929	mtx_sleep(pt, mtx, PRIBIO, "nvme_pt", 0);
930	mtx_unlock(mtx);
931
932	pt->driver_lock = NULL;
933
934err:
935	if (buf != NULL) {
936		relpbuf(buf, NULL);
937		PRELE(curproc);
938	}
939
940	return (ret);
941}
942
943static int
944nvme_ctrlr_ioctl(struct cdev *cdev, u_long cmd, caddr_t arg, int flag,
945    struct thread *td)
946{
947	struct nvme_controller			*ctrlr;
948	struct nvme_pt_command			*pt;
949
950	ctrlr = cdev->si_drv1;
951
952	switch (cmd) {
953	case NVME_RESET_CONTROLLER:
954		nvme_ctrlr_reset(ctrlr);
955		break;
956	case NVME_PASSTHROUGH_CMD:
957		pt = (struct nvme_pt_command *)arg;
958		return (nvme_ctrlr_passthrough_cmd(ctrlr, pt, pt->cmd.nsid,
959		    1 /* is_user_buffer */, 1 /* is_admin_cmd */));
960	default:
961		return (ENOTTY);
962	}
963
964	return (0);
965}
966
967static struct cdevsw nvme_ctrlr_cdevsw = {
968	.d_version =	D_VERSION,
969	.d_flags =	0,
970	.d_ioctl =	nvme_ctrlr_ioctl
971};
972
973static void
974nvme_ctrlr_setup_interrupts(struct nvme_controller *ctrlr)
975{
976	device_t	dev;
977	int		per_cpu_io_queues;
978	int		min_cpus_per_ioq;
979	int		num_vectors_requested, num_vectors_allocated;
980	int		num_vectors_available;
981
982	dev = ctrlr->dev;
983	min_cpus_per_ioq = 1;
984	TUNABLE_INT_FETCH("hw.nvme.min_cpus_per_ioq", &min_cpus_per_ioq);
985
986	if (min_cpus_per_ioq < 1) {
987		min_cpus_per_ioq = 1;
988	} else if (min_cpus_per_ioq > mp_ncpus) {
989		min_cpus_per_ioq = mp_ncpus;
990	}
991
992	per_cpu_io_queues = 1;
993	TUNABLE_INT_FETCH("hw.nvme.per_cpu_io_queues", &per_cpu_io_queues);
994
995	if (per_cpu_io_queues == 0) {
996		min_cpus_per_ioq = mp_ncpus;
997	}
998
999	ctrlr->force_intx = 0;
1000	TUNABLE_INT_FETCH("hw.nvme.force_intx", &ctrlr->force_intx);
1001
1002	/*
1003	 * FreeBSD currently cannot allocate more than about 190 vectors at
1004	 *  boot, meaning that systems with high core count and many devices
1005	 *  requesting per-CPU interrupt vectors will not get their full
1006	 *  allotment.  So first, try to allocate as many as we may need to
1007	 *  understand what is available, then immediately release them.
1008	 *  Then figure out how many of those we will actually use, based on
1009	 *  assigning an equal number of cores to each I/O queue.
1010	 */
1011
1012	/* One vector for per core I/O queue, plus one vector for admin queue. */
1013	num_vectors_available = min(pci_msix_count(dev), mp_ncpus + 1);
1014	if (pci_alloc_msix(dev, &num_vectors_available) != 0) {
1015		num_vectors_available = 0;
1016	}
1017	pci_release_msi(dev);
1018
1019	if (ctrlr->force_intx || num_vectors_available < 2) {
1020		nvme_ctrlr_configure_intx(ctrlr);
1021		return;
1022	}
1023
1024	/*
1025	 * Do not use all vectors for I/O queues - one must be saved for the
1026	 *  admin queue.
1027	 */
1028	ctrlr->num_cpus_per_ioq = max(min_cpus_per_ioq,
1029	    howmany(mp_ncpus, num_vectors_available - 1));
1030
1031	ctrlr->num_io_queues = howmany(mp_ncpus, ctrlr->num_cpus_per_ioq);
1032	num_vectors_requested = ctrlr->num_io_queues + 1;
1033	num_vectors_allocated = num_vectors_requested;
1034
1035	/*
1036	 * Now just allocate the number of vectors we need.  This should
1037	 *  succeed, since we previously called pci_alloc_msix()
1038	 *  successfully returning at least this many vectors, but just to
1039	 *  be safe, if something goes wrong just revert to INTx.
1040	 */
1041	if (pci_alloc_msix(dev, &num_vectors_allocated) != 0) {
1042		nvme_ctrlr_configure_intx(ctrlr);
1043		return;
1044	}
1045
1046	if (num_vectors_allocated < num_vectors_requested) {
1047		pci_release_msi(dev);
1048		nvme_ctrlr_configure_intx(ctrlr);
1049		return;
1050	}
1051
1052	ctrlr->msix_enabled = 1;
1053}
1054
1055int
1056nvme_ctrlr_construct(struct nvme_controller *ctrlr, device_t dev)
1057{
1058	union cap_lo_register	cap_lo;
1059	union cap_hi_register	cap_hi;
1060	int			status, timeout_period;
1061
1062	ctrlr->dev = dev;
1063
1064	mtx_init(&ctrlr->lock, "nvme ctrlr lock", NULL, MTX_DEF);
1065
1066	status = nvme_ctrlr_allocate_bar(ctrlr);
1067
1068	if (status != 0)
1069		return (status);
1070
1071	/*
1072	 * Software emulators may set the doorbell stride to something
1073	 *  other than zero, but this driver is not set up to handle that.
1074	 */
1075	cap_hi.raw = nvme_mmio_read_4(ctrlr, cap_hi);
1076	if (cap_hi.bits.dstrd != 0)
1077		return (ENXIO);
1078
1079	ctrlr->min_page_size = 1 << (12 + cap_hi.bits.mpsmin);
1080
1081	/* Get ready timeout value from controller, in units of 500ms. */
1082	cap_lo.raw = nvme_mmio_read_4(ctrlr, cap_lo);
1083	ctrlr->ready_timeout_in_ms = cap_lo.bits.to * 500;
1084
1085	timeout_period = NVME_DEFAULT_TIMEOUT_PERIOD;
1086	TUNABLE_INT_FETCH("hw.nvme.timeout_period", &timeout_period);
1087	timeout_period = min(timeout_period, NVME_MAX_TIMEOUT_PERIOD);
1088	timeout_period = max(timeout_period, NVME_MIN_TIMEOUT_PERIOD);
1089	ctrlr->timeout_period = timeout_period;
1090
1091	nvme_retry_count = NVME_DEFAULT_RETRY_COUNT;
1092	TUNABLE_INT_FETCH("hw.nvme.retry_count", &nvme_retry_count);
1093
1094	ctrlr->enable_aborts = 0;
1095	TUNABLE_INT_FETCH("hw.nvme.enable_aborts", &ctrlr->enable_aborts);
1096
1097	nvme_ctrlr_setup_interrupts(ctrlr);
1098
1099	ctrlr->max_xfer_size = NVME_MAX_XFER_SIZE;
1100	nvme_ctrlr_construct_admin_qpair(ctrlr);
1101
1102	ctrlr->cdev = make_dev(&nvme_ctrlr_cdevsw, device_get_unit(dev),
1103	    UID_ROOT, GID_WHEEL, 0600, "nvme%d", device_get_unit(dev));
1104
1105	if (ctrlr->cdev == NULL)
1106		return (ENXIO);
1107
1108	ctrlr->cdev->si_drv1 = (void *)ctrlr;
1109
1110	ctrlr->taskqueue = taskqueue_create("nvme_taskq", M_WAITOK,
1111	    taskqueue_thread_enqueue, &ctrlr->taskqueue);
1112	taskqueue_start_threads(&ctrlr->taskqueue, 1, PI_DISK, "nvme taskq");
1113
1114	ctrlr->is_resetting = 0;
1115	ctrlr->is_initialized = 0;
1116	ctrlr->notification_sent = 0;
1117	TASK_INIT(&ctrlr->reset_task, 0, nvme_ctrlr_reset_task, ctrlr);
1118
1119	TASK_INIT(&ctrlr->fail_req_task, 0, nvme_ctrlr_fail_req_task, ctrlr);
1120	STAILQ_INIT(&ctrlr->fail_req);
1121	ctrlr->is_failed = FALSE;
1122
1123	return (0);
1124}
1125
1126void
1127nvme_ctrlr_destruct(struct nvme_controller *ctrlr, device_t dev)
1128{
1129	int				i;
1130
1131	/*
1132	 *  Notify the controller of a shutdown, even though this is due to
1133	 *   a driver unload, not a system shutdown (this path is not invoked
1134	 *   during shutdown).  This ensures the controller receives a
1135	 *   shutdown notification in case the system is shutdown before
1136	 *   reloading the driver.
1137	 */
1138	nvme_ctrlr_shutdown(ctrlr);
1139
1140	nvme_ctrlr_disable(ctrlr);
1141	taskqueue_free(ctrlr->taskqueue);
1142
1143	for (i = 0; i < NVME_MAX_NAMESPACES; i++)
1144		nvme_ns_destruct(&ctrlr->ns[i]);
1145
1146	if (ctrlr->cdev)
1147		destroy_dev(ctrlr->cdev);
1148
1149	for (i = 0; i < ctrlr->num_io_queues; i++) {
1150		nvme_io_qpair_destroy(&ctrlr->ioq[i]);
1151	}
1152
1153	free(ctrlr->ioq, M_NVME);
1154
1155	nvme_admin_qpair_destroy(&ctrlr->adminq);
1156
1157	if (ctrlr->resource != NULL) {
1158		bus_release_resource(dev, SYS_RES_MEMORY,
1159		    ctrlr->resource_id, ctrlr->resource);
1160	}
1161
1162	if (ctrlr->bar4_resource != NULL) {
1163		bus_release_resource(dev, SYS_RES_MEMORY,
1164		    ctrlr->bar4_resource_id, ctrlr->bar4_resource);
1165	}
1166
1167	if (ctrlr->tag)
1168		bus_teardown_intr(ctrlr->dev, ctrlr->res, ctrlr->tag);
1169
1170	if (ctrlr->res)
1171		bus_release_resource(ctrlr->dev, SYS_RES_IRQ,
1172		    rman_get_rid(ctrlr->res), ctrlr->res);
1173
1174	if (ctrlr->msix_enabled)
1175		pci_release_msi(dev);
1176}
1177
1178void
1179nvme_ctrlr_shutdown(struct nvme_controller *ctrlr)
1180{
1181	union cc_register	cc;
1182	union csts_register	csts;
1183	int			ticks = 0;
1184
1185	cc.raw = nvme_mmio_read_4(ctrlr, cc);
1186	cc.bits.shn = NVME_SHN_NORMAL;
1187	nvme_mmio_write_4(ctrlr, cc, cc.raw);
1188	csts.raw = nvme_mmio_read_4(ctrlr, csts);
1189	while ((csts.bits.shst != NVME_SHST_COMPLETE) && (ticks++ < 5*hz)) {
1190		pause("nvme shn", 1);
1191		csts.raw = nvme_mmio_read_4(ctrlr, csts);
1192	}
1193	if (csts.bits.shst != NVME_SHST_COMPLETE)
1194		nvme_printf(ctrlr, "did not complete shutdown within 5 seconds "
1195		    "of notification\n");
1196}
1197
1198void
1199nvme_ctrlr_submit_admin_request(struct nvme_controller *ctrlr,
1200    struct nvme_request *req)
1201{
1202
1203	nvme_qpair_submit_request(&ctrlr->adminq, req);
1204}
1205
1206void
1207nvme_ctrlr_submit_io_request(struct nvme_controller *ctrlr,
1208    struct nvme_request *req)
1209{
1210	struct nvme_qpair       *qpair;
1211
1212	qpair = &ctrlr->ioq[curcpu / ctrlr->num_cpus_per_ioq];
1213	nvme_qpair_submit_request(qpair, req);
1214}
1215
1216device_t
1217nvme_ctrlr_get_device(struct nvme_controller *ctrlr)
1218{
1219
1220	return (ctrlr->dev);
1221}
1222
1223const struct nvme_controller_data *
1224nvme_ctrlr_get_data(struct nvme_controller *ctrlr)
1225{
1226
1227	return (&ctrlr->cdata);
1228}
1229