nvme_ctrlr.c revision 248769
1/*-
2 * Copyright (C) 2012 Intel Corporation
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 *    notice, this list of conditions and the following disclaimer in the
12 *    documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 */
26
27#include <sys/cdefs.h>
28__FBSDID("$FreeBSD: head/sys/dev/nvme/nvme_ctrlr.c 248769 2013-03-26 22:09:51Z jimharris $");
29
30#include <sys/param.h>
31#include <sys/bus.h>
32#include <sys/conf.h>
33#include <sys/ioccom.h>
34#include <sys/smp.h>
35
36#include <dev/pci/pcireg.h>
37#include <dev/pci/pcivar.h>
38
39#include "nvme_private.h"
40
41static void nvme_ctrlr_construct_and_submit_aer(struct nvme_controller *ctrlr,
42						struct nvme_async_event_request *aer);
43
44static int
45nvme_ctrlr_allocate_bar(struct nvme_controller *ctrlr)
46{
47
48	/* Chatham puts the NVMe MMRs behind BAR 2/3, not BAR 0/1. */
49	if (pci_get_devid(ctrlr->dev) == CHATHAM_PCI_ID)
50		ctrlr->resource_id = PCIR_BAR(2);
51	else
52		ctrlr->resource_id = PCIR_BAR(0);
53
54	ctrlr->resource = bus_alloc_resource(ctrlr->dev, SYS_RES_MEMORY,
55	    &ctrlr->resource_id, 0, ~0, 1, RF_ACTIVE);
56
57	if(ctrlr->resource == NULL) {
58		device_printf(ctrlr->dev, "unable to allocate pci resource\n");
59		return (ENOMEM);
60	}
61
62	ctrlr->bus_tag = rman_get_bustag(ctrlr->resource);
63	ctrlr->bus_handle = rman_get_bushandle(ctrlr->resource);
64	ctrlr->regs = (struct nvme_registers *)ctrlr->bus_handle;
65
66	/*
67	 * The NVMe spec allows for the MSI-X table to be placed behind
68	 *  BAR 4/5, separate from the control/doorbell registers.  Always
69	 *  try to map this bar, because it must be mapped prior to calling
70	 *  pci_alloc_msix().  If the table isn't behind BAR 4/5,
71	 *  bus_alloc_resource() will just return NULL which is OK.
72	 */
73	ctrlr->bar4_resource_id = PCIR_BAR(4);
74	ctrlr->bar4_resource = bus_alloc_resource(ctrlr->dev, SYS_RES_MEMORY,
75	    &ctrlr->bar4_resource_id, 0, ~0, 1, RF_ACTIVE);
76
77	return (0);
78}
79
80#ifdef CHATHAM2
81static int
82nvme_ctrlr_allocate_chatham_bar(struct nvme_controller *ctrlr)
83{
84
85	ctrlr->chatham_resource_id = PCIR_BAR(CHATHAM_CONTROL_BAR);
86	ctrlr->chatham_resource = bus_alloc_resource(ctrlr->dev,
87	    SYS_RES_MEMORY, &ctrlr->chatham_resource_id, 0, ~0, 1,
88	    RF_ACTIVE);
89
90	if(ctrlr->chatham_resource == NULL) {
91		device_printf(ctrlr->dev, "unable to alloc pci resource\n");
92		return (ENOMEM);
93	}
94
95	ctrlr->chatham_bus_tag = rman_get_bustag(ctrlr->chatham_resource);
96	ctrlr->chatham_bus_handle =
97	    rman_get_bushandle(ctrlr->chatham_resource);
98
99	return (0);
100}
101
102static void
103nvme_ctrlr_setup_chatham(struct nvme_controller *ctrlr)
104{
105	uint64_t reg1, reg2, reg3;
106	uint64_t temp1, temp2;
107	uint32_t temp3;
108	uint32_t use_flash_timings = 0;
109
110	DELAY(10000);
111
112	temp3 = chatham_read_4(ctrlr, 0x8080);
113
114	device_printf(ctrlr->dev, "Chatham version: 0x%x\n", temp3);
115
116	ctrlr->chatham_lbas = chatham_read_4(ctrlr, 0x8068) - 0x110;
117	ctrlr->chatham_size = ctrlr->chatham_lbas * 512;
118
119	device_printf(ctrlr->dev, "Chatham size: %jd\n",
120	    (intmax_t)ctrlr->chatham_size);
121
122	reg1 = reg2 = reg3 = ctrlr->chatham_size - 1;
123
124	TUNABLE_INT_FETCH("hw.nvme.use_flash_timings", &use_flash_timings);
125	if (use_flash_timings) {
126		device_printf(ctrlr->dev, "Chatham: using flash timings\n");
127		temp1 = 0x00001b58000007d0LL;
128		temp2 = 0x000000cb00000131LL;
129	} else {
130		device_printf(ctrlr->dev, "Chatham: using DDR timings\n");
131		temp1 = temp2 = 0x0LL;
132	}
133
134	chatham_write_8(ctrlr, 0x8000, reg1);
135	chatham_write_8(ctrlr, 0x8008, reg2);
136	chatham_write_8(ctrlr, 0x8010, reg3);
137
138	chatham_write_8(ctrlr, 0x8020, temp1);
139	temp3 = chatham_read_4(ctrlr, 0x8020);
140
141	chatham_write_8(ctrlr, 0x8028, temp2);
142	temp3 = chatham_read_4(ctrlr, 0x8028);
143
144	chatham_write_8(ctrlr, 0x8030, temp1);
145	chatham_write_8(ctrlr, 0x8038, temp2);
146	chatham_write_8(ctrlr, 0x8040, temp1);
147	chatham_write_8(ctrlr, 0x8048, temp2);
148	chatham_write_8(ctrlr, 0x8050, temp1);
149	chatham_write_8(ctrlr, 0x8058, temp2);
150
151	DELAY(10000);
152}
153
154static void
155nvme_chatham_populate_cdata(struct nvme_controller *ctrlr)
156{
157	struct nvme_controller_data *cdata;
158
159	cdata = &ctrlr->cdata;
160
161	cdata->vid = 0x8086;
162	cdata->ssvid = 0x2011;
163
164	/*
165	 * Chatham2 puts garbage data in these fields when we
166	 *  invoke IDENTIFY_CONTROLLER, so we need to re-zero
167	 *  the fields before calling bcopy().
168	 */
169	memset(cdata->sn, 0, sizeof(cdata->sn));
170	memcpy(cdata->sn, "2012", strlen("2012"));
171	memset(cdata->mn, 0, sizeof(cdata->mn));
172	memcpy(cdata->mn, "CHATHAM2", strlen("CHATHAM2"));
173	memset(cdata->fr, 0, sizeof(cdata->fr));
174	memcpy(cdata->fr, "0", strlen("0"));
175	cdata->rab = 8;
176	cdata->aerl = 3;
177	cdata->lpa.ns_smart = 1;
178	cdata->sqes.min = 6;
179	cdata->sqes.max = 6;
180	cdata->sqes.min = 4;
181	cdata->sqes.max = 4;
182	cdata->nn = 1;
183
184	/* Chatham2 doesn't support DSM command */
185	cdata->oncs.dsm = 0;
186
187	cdata->vwc.present = 1;
188}
189#endif /* CHATHAM2 */
190
191static void
192nvme_ctrlr_construct_admin_qpair(struct nvme_controller *ctrlr)
193{
194	struct nvme_qpair	*qpair;
195	uint32_t		num_entries;
196
197	qpair = &ctrlr->adminq;
198
199	num_entries = NVME_ADMIN_ENTRIES;
200	TUNABLE_INT_FETCH("hw.nvme.admin_entries", &num_entries);
201	/*
202	 * If admin_entries was overridden to an invalid value, revert it
203	 *  back to our default value.
204	 */
205	if (num_entries < NVME_MIN_ADMIN_ENTRIES ||
206	    num_entries > NVME_MAX_ADMIN_ENTRIES) {
207		printf("nvme: invalid hw.nvme.admin_entries=%d specified\n",
208		    num_entries);
209		num_entries = NVME_ADMIN_ENTRIES;
210	}
211
212	/*
213	 * The admin queue's max xfer size is treated differently than the
214	 *  max I/O xfer size.  16KB is sufficient here - maybe even less?
215	 */
216	nvme_qpair_construct(qpair,
217			     0, /* qpair ID */
218			     0, /* vector */
219			     num_entries,
220			     NVME_ADMIN_TRACKERS,
221			     16*1024, /* max xfer size */
222			     ctrlr);
223}
224
225static int
226nvme_ctrlr_construct_io_qpairs(struct nvme_controller *ctrlr)
227{
228	struct nvme_qpair	*qpair;
229	union cap_lo_register	cap_lo;
230	int			i, num_entries, num_trackers;
231
232	num_entries = NVME_IO_ENTRIES;
233	TUNABLE_INT_FETCH("hw.nvme.io_entries", &num_entries);
234
235	/*
236	 * NVMe spec sets a hard limit of 64K max entries, but
237	 *  devices may specify a smaller limit, so we need to check
238	 *  the MQES field in the capabilities register.
239	 */
240	cap_lo.raw = nvme_mmio_read_4(ctrlr, cap_lo);
241	num_entries = min(num_entries, cap_lo.bits.mqes+1);
242
243	num_trackers = NVME_IO_TRACKERS;
244	TUNABLE_INT_FETCH("hw.nvme.io_trackers", &num_trackers);
245
246	num_trackers = max(num_trackers, NVME_MIN_IO_TRACKERS);
247	num_trackers = min(num_trackers, NVME_MAX_IO_TRACKERS);
248	/*
249	 * No need to have more trackers than entries in the submit queue.
250	 *  Note also that for a queue size of N, we can only have (N-1)
251	 *  commands outstanding, hence the "-1" here.
252	 */
253	num_trackers = min(num_trackers, (num_entries-1));
254
255	ctrlr->max_xfer_size = NVME_MAX_XFER_SIZE;
256	TUNABLE_INT_FETCH("hw.nvme.max_xfer_size", &ctrlr->max_xfer_size);
257	/*
258	 * Check that tunable doesn't specify a size greater than what our
259	 *  driver supports, and is an even PAGE_SIZE multiple.
260	 */
261	if (ctrlr->max_xfer_size > NVME_MAX_XFER_SIZE ||
262	    ctrlr->max_xfer_size % PAGE_SIZE)
263		ctrlr->max_xfer_size = NVME_MAX_XFER_SIZE;
264
265	ctrlr->ioq = malloc(ctrlr->num_io_queues * sizeof(struct nvme_qpair),
266	    M_NVME, M_ZERO | M_NOWAIT);
267
268	if (ctrlr->ioq == NULL)
269		return (ENOMEM);
270
271	for (i = 0; i < ctrlr->num_io_queues; i++) {
272		qpair = &ctrlr->ioq[i];
273
274		/*
275		 * Admin queue has ID=0. IO queues start at ID=1 -
276		 *  hence the 'i+1' here.
277		 *
278		 * For I/O queues, use the controller-wide max_xfer_size
279		 *  calculated in nvme_attach().
280		 */
281		nvme_qpair_construct(qpair,
282				     i+1, /* qpair ID */
283				     ctrlr->msix_enabled ? i+1 : 0, /* vector */
284				     num_entries,
285				     num_trackers,
286				     ctrlr->max_xfer_size,
287				     ctrlr);
288
289		if (ctrlr->per_cpu_io_queues)
290			bus_bind_intr(ctrlr->dev, qpair->res, i);
291	}
292
293	return (0);
294}
295
296static void
297nvme_ctrlr_fail(struct nvme_controller *ctrlr)
298{
299	int i;
300
301	ctrlr->is_failed = TRUE;
302	nvme_qpair_fail(&ctrlr->adminq);
303	for (i = 0; i < ctrlr->num_io_queues; i++)
304		nvme_qpair_fail(&ctrlr->ioq[i]);
305	nvme_notify_fail_consumers(ctrlr);
306}
307
308void
309nvme_ctrlr_post_failed_request(struct nvme_controller *ctrlr,
310    struct nvme_request *req)
311{
312
313	mtx_lock(&ctrlr->fail_req_lock);
314	STAILQ_INSERT_TAIL(&ctrlr->fail_req, req, stailq);
315	mtx_unlock(&ctrlr->fail_req_lock);
316	taskqueue_enqueue(ctrlr->taskqueue, &ctrlr->fail_req_task);
317}
318
319static void
320nvme_ctrlr_fail_req_task(void *arg, int pending)
321{
322	struct nvme_controller	*ctrlr = arg;
323	struct nvme_request	*req;
324
325	mtx_lock(&ctrlr->fail_req_lock);
326	while (!STAILQ_EMPTY(&ctrlr->fail_req)) {
327		req = STAILQ_FIRST(&ctrlr->fail_req);
328		STAILQ_REMOVE_HEAD(&ctrlr->fail_req, stailq);
329		nvme_qpair_manual_complete_request(req->qpair, req,
330		    NVME_SCT_GENERIC, NVME_SC_ABORTED_BY_REQUEST, TRUE);
331	}
332	mtx_unlock(&ctrlr->fail_req_lock);
333}
334
335static int
336nvme_ctrlr_wait_for_ready(struct nvme_controller *ctrlr)
337{
338	int ms_waited;
339	union cc_register cc;
340	union csts_register csts;
341
342	cc.raw = nvme_mmio_read_4(ctrlr, cc);
343	csts.raw = nvme_mmio_read_4(ctrlr, csts);
344
345	if (!cc.bits.en) {
346		device_printf(ctrlr->dev, "%s called with cc.en = 0\n",
347		    __func__);
348		return (ENXIO);
349	}
350
351	ms_waited = 0;
352
353	while (!csts.bits.rdy) {
354		DELAY(1000);
355		if (ms_waited++ > ctrlr->ready_timeout_in_ms) {
356			device_printf(ctrlr->dev, "controller did not become "
357			    "ready within %d ms\n", ctrlr->ready_timeout_in_ms);
358			return (ENXIO);
359		}
360		csts.raw = nvme_mmio_read_4(ctrlr, csts);
361	}
362
363	return (0);
364}
365
366static void
367nvme_ctrlr_disable(struct nvme_controller *ctrlr)
368{
369	union cc_register cc;
370	union csts_register csts;
371
372	cc.raw = nvme_mmio_read_4(ctrlr, cc);
373	csts.raw = nvme_mmio_read_4(ctrlr, csts);
374
375	if (cc.bits.en == 1 && csts.bits.rdy == 0)
376		nvme_ctrlr_wait_for_ready(ctrlr);
377
378	cc.bits.en = 0;
379	nvme_mmio_write_4(ctrlr, cc, cc.raw);
380	DELAY(5000);
381}
382
383static int
384nvme_ctrlr_enable(struct nvme_controller *ctrlr)
385{
386	union cc_register	cc;
387	union csts_register	csts;
388	union aqa_register	aqa;
389
390	cc.raw = nvme_mmio_read_4(ctrlr, cc);
391	csts.raw = nvme_mmio_read_4(ctrlr, csts);
392
393	if (cc.bits.en == 1) {
394		if (csts.bits.rdy == 1)
395			return (0);
396		else
397			return (nvme_ctrlr_wait_for_ready(ctrlr));
398	}
399
400	nvme_mmio_write_8(ctrlr, asq, ctrlr->adminq.cmd_bus_addr);
401	DELAY(5000);
402	nvme_mmio_write_8(ctrlr, acq, ctrlr->adminq.cpl_bus_addr);
403	DELAY(5000);
404
405	aqa.raw = 0;
406	/* acqs and asqs are 0-based. */
407	aqa.bits.acqs = ctrlr->adminq.num_entries-1;
408	aqa.bits.asqs = ctrlr->adminq.num_entries-1;
409	nvme_mmio_write_4(ctrlr, aqa, aqa.raw);
410	DELAY(5000);
411
412	cc.bits.en = 1;
413	cc.bits.css = 0;
414	cc.bits.ams = 0;
415	cc.bits.shn = 0;
416	cc.bits.iosqes = 6; /* SQ entry size == 64 == 2^6 */
417	cc.bits.iocqes = 4; /* CQ entry size == 16 == 2^4 */
418
419	/* This evaluates to 0, which is according to spec. */
420	cc.bits.mps = (PAGE_SIZE >> 13);
421
422	nvme_mmio_write_4(ctrlr, cc, cc.raw);
423	DELAY(5000);
424
425	return (nvme_ctrlr_wait_for_ready(ctrlr));
426}
427
428int
429nvme_ctrlr_hw_reset(struct nvme_controller *ctrlr)
430{
431	int i;
432
433	nvme_admin_qpair_disable(&ctrlr->adminq);
434	for (i = 0; i < ctrlr->num_io_queues; i++)
435		nvme_io_qpair_disable(&ctrlr->ioq[i]);
436
437	DELAY(100*1000);
438
439	nvme_ctrlr_disable(ctrlr);
440	return (nvme_ctrlr_enable(ctrlr));
441}
442
443void
444nvme_ctrlr_reset(struct nvme_controller *ctrlr)
445{
446	int cmpset;
447
448	cmpset = atomic_cmpset_32(&ctrlr->is_resetting, 0, 1);
449
450	if (cmpset == 0 || ctrlr->is_failed)
451		/*
452		 * Controller is already resetting or has failed.  Return
453		 *  immediately since there is no need to kick off another
454		 *  reset in these cases.
455		 */
456		return;
457
458	taskqueue_enqueue(ctrlr->taskqueue, &ctrlr->reset_task);
459}
460
461static int
462nvme_ctrlr_identify(struct nvme_controller *ctrlr)
463{
464	struct nvme_completion_poll_status	status;
465
466	status.done = FALSE;
467	nvme_ctrlr_cmd_identify_controller(ctrlr, &ctrlr->cdata,
468	    nvme_completion_poll_cb, &status);
469	while (status.done == FALSE)
470		DELAY(5);
471	if (nvme_completion_is_error(&status.cpl)) {
472		printf("nvme_identify_controller failed!\n");
473		return (ENXIO);
474	}
475
476#ifdef CHATHAM2
477	if (pci_get_devid(ctrlr->dev) == CHATHAM_PCI_ID)
478		nvme_chatham_populate_cdata(ctrlr);
479#endif
480
481	/*
482	 * Use MDTS to ensure our default max_xfer_size doesn't exceed what the
483	 *  controller supports.
484	 */
485	if (ctrlr->cdata.mdts > 0)
486		ctrlr->max_xfer_size = min(ctrlr->max_xfer_size,
487		    ctrlr->min_page_size * (1 << (ctrlr->cdata.mdts)));
488
489	return (0);
490}
491
492static int
493nvme_ctrlr_set_num_qpairs(struct nvme_controller *ctrlr)
494{
495	struct nvme_completion_poll_status	status;
496	int					cq_allocated, sq_allocated;
497
498	status.done = FALSE;
499	nvme_ctrlr_cmd_set_num_queues(ctrlr, ctrlr->num_io_queues,
500	    nvme_completion_poll_cb, &status);
501	while (status.done == FALSE)
502		DELAY(5);
503	if (nvme_completion_is_error(&status.cpl)) {
504		printf("nvme_set_num_queues failed!\n");
505		return (ENXIO);
506	}
507
508	/*
509	 * Data in cdw0 is 0-based.
510	 * Lower 16-bits indicate number of submission queues allocated.
511	 * Upper 16-bits indicate number of completion queues allocated.
512	 */
513	sq_allocated = (status.cpl.cdw0 & 0xFFFF) + 1;
514	cq_allocated = (status.cpl.cdw0 >> 16) + 1;
515
516	/*
517	 * Check that the controller was able to allocate the number of
518	 *  queues we requested.  If not, revert to one IO queue.
519	 */
520	if (sq_allocated < ctrlr->num_io_queues ||
521	    cq_allocated < ctrlr->num_io_queues) {
522		ctrlr->num_io_queues = 1;
523		ctrlr->per_cpu_io_queues = 0;
524
525		/* TODO: destroy extra queues that were created
526		 *  previously but now found to be not needed.
527		 */
528	}
529
530	return (0);
531}
532
533static int
534nvme_ctrlr_create_qpairs(struct nvme_controller *ctrlr)
535{
536	struct nvme_completion_poll_status	status;
537	struct nvme_qpair			*qpair;
538	int					i;
539
540	for (i = 0; i < ctrlr->num_io_queues; i++) {
541		qpair = &ctrlr->ioq[i];
542
543		status.done = FALSE;
544		nvme_ctrlr_cmd_create_io_cq(ctrlr, qpair, qpair->vector,
545		    nvme_completion_poll_cb, &status);
546		while (status.done == FALSE)
547			DELAY(5);
548		if (nvme_completion_is_error(&status.cpl)) {
549			printf("nvme_create_io_cq failed!\n");
550			return (ENXIO);
551		}
552
553		status.done = FALSE;
554		nvme_ctrlr_cmd_create_io_sq(qpair->ctrlr, qpair,
555		    nvme_completion_poll_cb, &status);
556		while (status.done == FALSE)
557			DELAY(5);
558		if (nvme_completion_is_error(&status.cpl)) {
559			printf("nvme_create_io_sq failed!\n");
560			return (ENXIO);
561		}
562	}
563
564	return (0);
565}
566
567static int
568nvme_ctrlr_construct_namespaces(struct nvme_controller *ctrlr)
569{
570	struct nvme_namespace	*ns;
571	int			i, status;
572
573	for (i = 0; i < ctrlr->cdata.nn; i++) {
574		ns = &ctrlr->ns[i];
575		status = nvme_ns_construct(ns, i+1, ctrlr);
576		if (status != 0)
577			return (status);
578	}
579
580	return (0);
581}
582
583static boolean_t
584is_log_page_id_valid(uint8_t page_id)
585{
586
587	switch (page_id) {
588	case NVME_LOG_ERROR:
589	case NVME_LOG_HEALTH_INFORMATION:
590	case NVME_LOG_FIRMWARE_SLOT:
591		return (TRUE);
592	}
593
594	return (FALSE);
595}
596
597static uint32_t
598nvme_ctrlr_get_log_page_size(struct nvme_controller *ctrlr, uint8_t page_id)
599{
600	uint32_t	log_page_size;
601
602	switch (page_id) {
603	case NVME_LOG_ERROR:
604		log_page_size = min(
605		    sizeof(struct nvme_error_information_entry) *
606		    ctrlr->cdata.elpe,
607		    NVME_MAX_AER_LOG_SIZE);
608		break;
609	case NVME_LOG_HEALTH_INFORMATION:
610		log_page_size = sizeof(struct nvme_health_information_page);
611		break;
612	case NVME_LOG_FIRMWARE_SLOT:
613		log_page_size = sizeof(struct nvme_firmware_page);
614		break;
615	default:
616		log_page_size = 0;
617		break;
618	}
619
620	return (log_page_size);
621}
622
623static void
624nvme_ctrlr_async_event_log_page_cb(void *arg, const struct nvme_completion *cpl)
625{
626	struct nvme_async_event_request	*aer = arg;
627
628	/*
629	 * If the log page fetch for some reason completed with an error,
630	 *  don't pass log page data to the consumers.  In practice, this case
631	 *  should never happen.
632	 */
633	if (nvme_completion_is_error(cpl))
634		nvme_notify_async_consumers(aer->ctrlr, &aer->cpl,
635		    aer->log_page_id, NULL, 0);
636	else
637		/*
638		 * Pass the cpl data from the original async event completion,
639		 *  not the log page fetch.
640		 */
641		nvme_notify_async_consumers(aer->ctrlr, &aer->cpl,
642		    aer->log_page_id, aer->log_page_buffer, aer->log_page_size);
643
644	/*
645	 * Repost another asynchronous event request to replace the one
646	 *  that just completed.
647	 */
648	nvme_ctrlr_construct_and_submit_aer(aer->ctrlr, aer);
649}
650
651static void
652nvme_ctrlr_async_event_cb(void *arg, const struct nvme_completion *cpl)
653{
654	struct nvme_async_event_request	*aer = arg;
655
656	if (cpl->status.sc == NVME_SC_ABORTED_SQ_DELETION) {
657		/*
658		 *  This is simulated when controller is being shut down, to
659		 *  effectively abort outstanding asynchronous event requests
660		 *  and make sure all memory is freed.  Do not repost the
661		 *  request in this case.
662		 */
663		return;
664	}
665
666	printf("Asynchronous event occurred.\n");
667
668	/* Associated log page is in bits 23:16 of completion entry dw0. */
669	aer->log_page_id = (cpl->cdw0 & 0xFF0000) >> 16;
670
671	if (is_log_page_id_valid(aer->log_page_id)) {
672		aer->log_page_size = nvme_ctrlr_get_log_page_size(aer->ctrlr,
673		    aer->log_page_id);
674		memcpy(&aer->cpl, cpl, sizeof(*cpl));
675		nvme_ctrlr_cmd_get_log_page(aer->ctrlr, aer->log_page_id,
676		    NVME_GLOBAL_NAMESPACE_TAG, aer->log_page_buffer,
677		    aer->log_page_size, nvme_ctrlr_async_event_log_page_cb,
678		    aer);
679		/* Wait to notify consumers until after log page is fetched. */
680	} else {
681		nvme_notify_async_consumers(aer->ctrlr, cpl, aer->log_page_id,
682		    NULL, 0);
683
684		/*
685		 * Repost another asynchronous event request to replace the one
686		 *  that just completed.
687		 */
688		nvme_ctrlr_construct_and_submit_aer(aer->ctrlr, aer);
689	}
690}
691
692static void
693nvme_ctrlr_construct_and_submit_aer(struct nvme_controller *ctrlr,
694    struct nvme_async_event_request *aer)
695{
696	struct nvme_request *req;
697
698	aer->ctrlr = ctrlr;
699	req = nvme_allocate_request(NULL, 0, nvme_ctrlr_async_event_cb, aer);
700	aer->req = req;
701
702	/*
703	 * Disable timeout here, since asynchronous event requests should by
704	 *  nature never be timed out.
705	 */
706	req->timeout = FALSE;
707	req->cmd.opc = NVME_OPC_ASYNC_EVENT_REQUEST;
708	nvme_ctrlr_submit_admin_request(ctrlr, req);
709}
710
711static void
712nvme_ctrlr_configure_aer(struct nvme_controller *ctrlr)
713{
714	union nvme_critical_warning_state	state;
715	struct nvme_async_event_request		*aer;
716	uint32_t				i;
717
718	state.raw = 0xFF;
719	state.bits.reserved = 0;
720	nvme_ctrlr_cmd_set_async_event_config(ctrlr, state, NULL, NULL);
721
722	/* aerl is a zero-based value, so we need to add 1 here. */
723	ctrlr->num_aers = min(NVME_MAX_ASYNC_EVENTS, (ctrlr->cdata.aerl+1));
724
725	/* Chatham doesn't support AERs. */
726	if (pci_get_devid(ctrlr->dev) == CHATHAM_PCI_ID)
727		ctrlr->num_aers = 0;
728
729	for (i = 0; i < ctrlr->num_aers; i++) {
730		aer = &ctrlr->aer[i];
731		nvme_ctrlr_construct_and_submit_aer(ctrlr, aer);
732	}
733}
734
735static void
736nvme_ctrlr_configure_int_coalescing(struct nvme_controller *ctrlr)
737{
738
739	ctrlr->int_coal_time = 0;
740	TUNABLE_INT_FETCH("hw.nvme.int_coal_time",
741	    &ctrlr->int_coal_time);
742
743	ctrlr->int_coal_threshold = 0;
744	TUNABLE_INT_FETCH("hw.nvme.int_coal_threshold",
745	    &ctrlr->int_coal_threshold);
746
747	nvme_ctrlr_cmd_set_interrupt_coalescing(ctrlr, ctrlr->int_coal_time,
748	    ctrlr->int_coal_threshold, NULL, NULL);
749}
750
751static void
752nvme_ctrlr_start(void *ctrlr_arg)
753{
754	struct nvme_controller *ctrlr = ctrlr_arg;
755	int i;
756
757	nvme_qpair_reset(&ctrlr->adminq);
758	for (i = 0; i < ctrlr->num_io_queues; i++)
759		nvme_qpair_reset(&ctrlr->ioq[i]);
760
761	nvme_admin_qpair_enable(&ctrlr->adminq);
762
763	if (nvme_ctrlr_identify(ctrlr) != 0) {
764		nvme_ctrlr_fail(ctrlr);
765		return;
766	}
767
768	if (nvme_ctrlr_set_num_qpairs(ctrlr) != 0) {
769		nvme_ctrlr_fail(ctrlr);
770		return;
771	}
772
773	if (nvme_ctrlr_create_qpairs(ctrlr) != 0) {
774		nvme_ctrlr_fail(ctrlr);
775		return;
776	}
777
778	if (nvme_ctrlr_construct_namespaces(ctrlr) != 0) {
779		nvme_ctrlr_fail(ctrlr);
780		return;
781	}
782
783	nvme_ctrlr_configure_aer(ctrlr);
784	nvme_ctrlr_configure_int_coalescing(ctrlr);
785
786	for (i = 0; i < ctrlr->num_io_queues; i++)
787		nvme_io_qpair_enable(&ctrlr->ioq[i]);
788
789	/*
790	 * Clear software progress marker to 0, to indicate to pre-boot
791	 *  software that OS driver load was successful.
792	 *
793	 * Chatham does not support this feature.
794	 */
795	if (pci_get_devid(ctrlr->dev) != CHATHAM_PCI_ID)
796		nvme_ctrlr_cmd_set_feature(ctrlr,
797		    NVME_FEAT_SOFTWARE_PROGRESS_MARKER, 0, NULL, 0, NULL, NULL);
798}
799
800void
801nvme_ctrlr_start_config_hook(void *arg)
802{
803	struct nvme_controller *ctrlr = arg;
804
805	nvme_ctrlr_start(ctrlr);
806	config_intrhook_disestablish(&ctrlr->config_hook);
807}
808
809static void
810nvme_ctrlr_reset_task(void *arg, int pending)
811{
812	struct nvme_controller	*ctrlr = arg;
813	int			status;
814
815	device_printf(ctrlr->dev, "resetting controller");
816	status = nvme_ctrlr_hw_reset(ctrlr);
817	/*
818	 * Use pause instead of DELAY, so that we yield to any nvme interrupt
819	 *  handlers on this CPU that were blocked on a qpair lock. We want
820	 *  all nvme interrupts completed before proceeding with restarting the
821	 *  controller.
822	 *
823	 * XXX - any way to guarantee the interrupt handlers have quiesced?
824	 */
825	pause("nvmereset", hz / 10);
826	if (status == 0)
827		nvme_ctrlr_start(ctrlr);
828	else
829		nvme_ctrlr_fail(ctrlr);
830
831	atomic_cmpset_32(&ctrlr->is_resetting, 1, 0);
832}
833
834static void
835nvme_ctrlr_intx_handler(void *arg)
836{
837	struct nvme_controller *ctrlr = arg;
838
839	nvme_mmio_write_4(ctrlr, intms, 1);
840
841	nvme_qpair_process_completions(&ctrlr->adminq);
842
843	if (ctrlr->ioq[0].cpl)
844		nvme_qpair_process_completions(&ctrlr->ioq[0]);
845
846	nvme_mmio_write_4(ctrlr, intmc, 1);
847}
848
849static int
850nvme_ctrlr_configure_intx(struct nvme_controller *ctrlr)
851{
852
853	ctrlr->num_io_queues = 1;
854	ctrlr->per_cpu_io_queues = 0;
855	ctrlr->rid = 0;
856	ctrlr->res = bus_alloc_resource_any(ctrlr->dev, SYS_RES_IRQ,
857	    &ctrlr->rid, RF_SHAREABLE | RF_ACTIVE);
858
859	if (ctrlr->res == NULL) {
860		device_printf(ctrlr->dev, "unable to allocate shared IRQ\n");
861		return (ENOMEM);
862	}
863
864	bus_setup_intr(ctrlr->dev, ctrlr->res,
865	    INTR_TYPE_MISC | INTR_MPSAFE, NULL, nvme_ctrlr_intx_handler,
866	    ctrlr, &ctrlr->tag);
867
868	if (ctrlr->tag == NULL) {
869		device_printf(ctrlr->dev,
870		    "unable to setup legacy interrupt handler\n");
871		return (ENOMEM);
872	}
873
874	return (0);
875}
876
877static int
878nvme_ctrlr_ioctl(struct cdev *cdev, u_long cmd, caddr_t arg, int flag,
879    struct thread *td)
880{
881	struct nvme_completion_poll_status	status;
882	struct nvme_controller			*ctrlr;
883
884	ctrlr = cdev->si_drv1;
885
886	switch (cmd) {
887	case NVME_IDENTIFY_CONTROLLER:
888#ifdef CHATHAM2
889		/*
890		 * Don't refresh data on Chatham, since Chatham returns
891		 *  garbage on IDENTIFY anyways.
892		 */
893		if (pci_get_devid(ctrlr->dev) == CHATHAM_PCI_ID) {
894			memcpy(arg, &ctrlr->cdata, sizeof(ctrlr->cdata));
895			break;
896		}
897#endif
898		/* Refresh data before returning to user. */
899		status.done = FALSE;
900		nvme_ctrlr_cmd_identify_controller(ctrlr, &ctrlr->cdata,
901		    nvme_completion_poll_cb, &status);
902		while (status.done == FALSE)
903			DELAY(5);
904		if (nvme_completion_is_error(&status.cpl))
905			return (ENXIO);
906		memcpy(arg, &ctrlr->cdata, sizeof(ctrlr->cdata));
907		break;
908	case NVME_RESET_CONTROLLER:
909		nvme_ctrlr_reset(ctrlr);
910		break;
911	default:
912		return (ENOTTY);
913	}
914
915	return (0);
916}
917
918static struct cdevsw nvme_ctrlr_cdevsw = {
919	.d_version =	D_VERSION,
920	.d_flags =	0,
921	.d_ioctl =	nvme_ctrlr_ioctl
922};
923
924int
925nvme_ctrlr_construct(struct nvme_controller *ctrlr, device_t dev)
926{
927	union cap_lo_register	cap_lo;
928	union cap_hi_register	cap_hi;
929	int			num_vectors, per_cpu_io_queues, status = 0;
930	int			timeout_period;
931
932	ctrlr->dev = dev;
933
934	status = nvme_ctrlr_allocate_bar(ctrlr);
935
936	if (status != 0)
937		return (status);
938
939#ifdef CHATHAM2
940	if (pci_get_devid(dev) == CHATHAM_PCI_ID) {
941		status = nvme_ctrlr_allocate_chatham_bar(ctrlr);
942		if (status != 0)
943			return (status);
944		nvme_ctrlr_setup_chatham(ctrlr);
945	}
946#endif
947
948	/*
949	 * Software emulators may set the doorbell stride to something
950	 *  other than zero, but this driver is not set up to handle that.
951	 */
952	cap_hi.raw = nvme_mmio_read_4(ctrlr, cap_hi);
953	if (cap_hi.bits.dstrd != 0)
954		return (ENXIO);
955
956	ctrlr->min_page_size = 1 << (12 + cap_hi.bits.mpsmin);
957
958	/* Get ready timeout value from controller, in units of 500ms. */
959	cap_lo.raw = nvme_mmio_read_4(ctrlr, cap_lo);
960	ctrlr->ready_timeout_in_ms = cap_lo.bits.to * 500;
961
962	timeout_period = NVME_DEFAULT_TIMEOUT_PERIOD;
963	TUNABLE_INT_FETCH("hw.nvme.timeout_period", &timeout_period);
964	timeout_period = min(timeout_period, NVME_MAX_TIMEOUT_PERIOD);
965	timeout_period = max(timeout_period, NVME_MIN_TIMEOUT_PERIOD);
966	ctrlr->timeout_period = timeout_period;
967
968	nvme_retry_count = NVME_DEFAULT_RETRY_COUNT;
969	TUNABLE_INT_FETCH("hw.nvme.retry_count", &nvme_retry_count);
970
971	per_cpu_io_queues = 1;
972	TUNABLE_INT_FETCH("hw.nvme.per_cpu_io_queues", &per_cpu_io_queues);
973	ctrlr->per_cpu_io_queues = per_cpu_io_queues ? TRUE : FALSE;
974
975	if (ctrlr->per_cpu_io_queues)
976		ctrlr->num_io_queues = mp_ncpus;
977	else
978		ctrlr->num_io_queues = 1;
979
980	ctrlr->force_intx = 0;
981	TUNABLE_INT_FETCH("hw.nvme.force_intx", &ctrlr->force_intx);
982
983	ctrlr->enable_aborts = 0;
984	TUNABLE_INT_FETCH("hw.nvme.enable_aborts", &ctrlr->enable_aborts);
985
986	ctrlr->msix_enabled = 1;
987
988	if (ctrlr->force_intx) {
989		ctrlr->msix_enabled = 0;
990		goto intx;
991	}
992
993	/* One vector per IO queue, plus one vector for admin queue. */
994	num_vectors = ctrlr->num_io_queues + 1;
995
996	if (pci_msix_count(dev) < num_vectors) {
997		ctrlr->msix_enabled = 0;
998		goto intx;
999	}
1000
1001	if (pci_alloc_msix(dev, &num_vectors) != 0)
1002		ctrlr->msix_enabled = 0;
1003
1004intx:
1005
1006	if (!ctrlr->msix_enabled)
1007		nvme_ctrlr_configure_intx(ctrlr);
1008
1009	nvme_ctrlr_construct_admin_qpair(ctrlr);
1010
1011	status = nvme_ctrlr_construct_io_qpairs(ctrlr);
1012
1013	if (status != 0)
1014		return (status);
1015
1016	ctrlr->cdev = make_dev(&nvme_ctrlr_cdevsw, 0, UID_ROOT, GID_WHEEL, 0600,
1017	    "nvme%d", device_get_unit(dev));
1018
1019	if (ctrlr->cdev == NULL)
1020		return (ENXIO);
1021
1022	ctrlr->cdev->si_drv1 = (void *)ctrlr;
1023
1024	ctrlr->taskqueue = taskqueue_create("nvme_taskq", M_WAITOK,
1025	    taskqueue_thread_enqueue, &ctrlr->taskqueue);
1026	taskqueue_start_threads(&ctrlr->taskqueue, 1, PI_DISK, "nvme taskq");
1027
1028	ctrlr->is_resetting = 0;
1029	TASK_INIT(&ctrlr->reset_task, 0, nvme_ctrlr_reset_task, ctrlr);
1030
1031	TASK_INIT(&ctrlr->fail_req_task, 0, nvme_ctrlr_fail_req_task, ctrlr);
1032	mtx_init(&ctrlr->fail_req_lock, "nvme ctrlr fail req lock", NULL,
1033	    MTX_DEF);
1034	STAILQ_INIT(&ctrlr->fail_req);
1035	ctrlr->is_failed = FALSE;
1036
1037	return (0);
1038}
1039
1040void
1041nvme_ctrlr_destruct(struct nvme_controller *ctrlr, device_t dev)
1042{
1043	int				i;
1044
1045	nvme_ctrlr_disable(ctrlr);
1046	taskqueue_free(ctrlr->taskqueue);
1047
1048	for (i = 0; i < NVME_MAX_NAMESPACES; i++)
1049		nvme_ns_destruct(&ctrlr->ns[i]);
1050
1051	if (ctrlr->cdev)
1052		destroy_dev(ctrlr->cdev);
1053
1054	for (i = 0; i < ctrlr->num_io_queues; i++) {
1055		nvme_io_qpair_destroy(&ctrlr->ioq[i]);
1056	}
1057
1058	free(ctrlr->ioq, M_NVME);
1059
1060	nvme_admin_qpair_destroy(&ctrlr->adminq);
1061
1062	if (ctrlr->resource != NULL) {
1063		bus_release_resource(dev, SYS_RES_MEMORY,
1064		    ctrlr->resource_id, ctrlr->resource);
1065	}
1066
1067	if (ctrlr->bar4_resource != NULL) {
1068		bus_release_resource(dev, SYS_RES_MEMORY,
1069		    ctrlr->bar4_resource_id, ctrlr->bar4_resource);
1070	}
1071
1072#ifdef CHATHAM2
1073	if (ctrlr->chatham_resource != NULL) {
1074		bus_release_resource(dev, SYS_RES_MEMORY,
1075		    ctrlr->chatham_resource_id, ctrlr->chatham_resource);
1076	}
1077#endif
1078
1079	if (ctrlr->tag)
1080		bus_teardown_intr(ctrlr->dev, ctrlr->res, ctrlr->tag);
1081
1082	if (ctrlr->res)
1083		bus_release_resource(ctrlr->dev, SYS_RES_IRQ,
1084		    rman_get_rid(ctrlr->res), ctrlr->res);
1085
1086	if (ctrlr->msix_enabled)
1087		pci_release_msi(dev);
1088}
1089
1090void
1091nvme_ctrlr_submit_admin_request(struct nvme_controller *ctrlr,
1092    struct nvme_request *req)
1093{
1094
1095	nvme_qpair_submit_request(&ctrlr->adminq, req);
1096}
1097
1098void
1099nvme_ctrlr_submit_io_request(struct nvme_controller *ctrlr,
1100    struct nvme_request *req)
1101{
1102	struct nvme_qpair       *qpair;
1103
1104	if (ctrlr->per_cpu_io_queues)
1105		qpair = &ctrlr->ioq[curcpu];
1106	else
1107		qpair = &ctrlr->ioq[0];
1108
1109	nvme_qpair_submit_request(qpair, req);
1110}
1111
1112device_t
1113nvme_ctrlr_get_device(struct nvme_controller *ctrlr)
1114{
1115
1116	return (ctrlr->dev);
1117}
1118
1119const struct nvme_controller_data *
1120nvme_ctrlr_get_data(struct nvme_controller *ctrlr)
1121{
1122
1123	return (&ctrlr->cdata);
1124}
1125