1/*-
2 * Copyright (C) 2012-2014 Intel Corporation
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 *    notice, this list of conditions and the following disclaimer in the
12 *    documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 */
26
27#include <sys/cdefs.h>
28__FBSDID("$FreeBSD$");
29
30#include <sys/param.h>
31#include <sys/bus.h>
32#include <sys/conf.h>
33#include <sys/module.h>
34
35#include <vm/uma.h>
36
37#include <dev/pci/pcireg.h>
38#include <dev/pci/pcivar.h>
39
40#include "nvme_private.h"
41
42struct nvme_consumer {
43	uint32_t		id;
44	nvme_cons_ns_fn_t	ns_fn;
45	nvme_cons_ctrlr_fn_t	ctrlr_fn;
46	nvme_cons_async_fn_t	async_fn;
47	nvme_cons_fail_fn_t	fail_fn;
48};
49
50struct nvme_consumer nvme_consumer[NVME_MAX_CONSUMERS];
51#define	INVALID_CONSUMER_ID	0xFFFF
52
53uma_zone_t	nvme_request_zone;
54int32_t		nvme_retry_count;
55
56MALLOC_DEFINE(M_NVME, "nvme", "nvme(4) memory allocations");
57
58static int    nvme_probe(device_t);
59static int    nvme_attach(device_t);
60static int    nvme_detach(device_t);
61static int    nvme_modevent(module_t mod, int type, void *arg);
62
63static devclass_t nvme_devclass;
64
65static device_method_t nvme_pci_methods[] = {
66	/* Device interface */
67	DEVMETHOD(device_probe,     nvme_probe),
68	DEVMETHOD(device_attach,    nvme_attach),
69	DEVMETHOD(device_detach,    nvme_detach),
70	{ 0, 0 }
71};
72
73static driver_t nvme_pci_driver = {
74	"nvme",
75	nvme_pci_methods,
76	sizeof(struct nvme_controller),
77};
78
79DRIVER_MODULE(nvme, pci, nvme_pci_driver, nvme_devclass, nvme_modevent, 0);
80MODULE_VERSION(nvme, 1);
81
82static struct _pcsid
83{
84	u_int32_t   type;
85	const char  *desc;
86} pci_ids[] = {
87	{ 0x01118086,		"NVMe Controller"  },
88	{ CHATHAM_PCI_ID,	"Chatham Prototype NVMe Controller"  },
89	{ IDT32_PCI_ID,		"IDT NVMe Controller (32 channel)"  },
90	{ IDT8_PCI_ID,		"IDT NVMe Controller (8 channel)" },
91	{ 0x00000000,		NULL  }
92};
93
94static int
95nvme_probe (device_t device)
96{
97	struct _pcsid	*ep;
98	u_int32_t	type;
99
100	type = pci_get_devid(device);
101	ep = pci_ids;
102
103	while (ep->type && ep->type != type)
104		++ep;
105
106	if (ep->desc) {
107		device_set_desc(device, ep->desc);
108		return (BUS_PROBE_DEFAULT);
109	}
110
111#if defined(PCIS_STORAGE_NVM)
112	if (pci_get_class(device)    == PCIC_STORAGE &&
113	    pci_get_subclass(device) == PCIS_STORAGE_NVM &&
114	    pci_get_progif(device)   == PCIP_STORAGE_NVM_ENTERPRISE_NVMHCI_1_0) {
115		device_set_desc(device, "Generic NVMe Device");
116		return (BUS_PROBE_GENERIC);
117	}
118#endif
119
120	return (ENXIO);
121}
122
123static void
124nvme_init(void)
125{
126	uint32_t	i;
127
128	nvme_request_zone = uma_zcreate("nvme_request",
129	    sizeof(struct nvme_request), NULL, NULL, NULL, NULL, 0, 0);
130
131	for (i = 0; i < NVME_MAX_CONSUMERS; i++)
132		nvme_consumer[i].id = INVALID_CONSUMER_ID;
133}
134
135SYSINIT(nvme_register, SI_SUB_DRIVERS, SI_ORDER_SECOND, nvme_init, NULL);
136
137static void
138nvme_uninit(void)
139{
140	uma_zdestroy(nvme_request_zone);
141}
142
143SYSUNINIT(nvme_unregister, SI_SUB_DRIVERS, SI_ORDER_SECOND, nvme_uninit, NULL);
144
145static void
146nvme_load(void)
147{
148}
149
150static void
151nvme_unload(void)
152{
153}
154
155static void
156nvme_shutdown(void)
157{
158	device_t		*devlist;
159	struct nvme_controller	*ctrlr;
160	int			dev, devcount;
161
162	if (devclass_get_devices(nvme_devclass, &devlist, &devcount))
163		return;
164
165	for (dev = 0; dev < devcount; dev++) {
166		ctrlr = DEVICE2SOFTC(devlist[dev]);
167		nvme_ctrlr_shutdown(ctrlr);
168	}
169
170	free(devlist, M_TEMP);
171}
172
173static int
174nvme_modevent(module_t mod, int type, void *arg)
175{
176
177	switch (type) {
178	case MOD_LOAD:
179		nvme_load();
180		break;
181	case MOD_UNLOAD:
182		nvme_unload();
183		break;
184	case MOD_SHUTDOWN:
185		nvme_shutdown();
186		break;
187	default:
188		break;
189	}
190
191	return (0);
192}
193
194void
195nvme_dump_command(struct nvme_command *cmd)
196{
197	printf(
198"opc:%x f:%x r1:%x cid:%x nsid:%x r2:%x r3:%x mptr:%jx prp1:%jx prp2:%jx cdw:%x %x %x %x %x %x\n",
199	    cmd->opc, cmd->fuse, cmd->rsvd1, cmd->cid, cmd->nsid,
200	    cmd->rsvd2, cmd->rsvd3,
201	    (uintmax_t)cmd->mptr, (uintmax_t)cmd->prp1, (uintmax_t)cmd->prp2,
202	    cmd->cdw10, cmd->cdw11, cmd->cdw12, cmd->cdw13, cmd->cdw14,
203	    cmd->cdw15);
204}
205
206void
207nvme_dump_completion(struct nvme_completion *cpl)
208{
209	printf("cdw0:%08x sqhd:%04x sqid:%04x "
210	    "cid:%04x p:%x sc:%02x sct:%x m:%x dnr:%x\n",
211	    cpl->cdw0, cpl->sqhd, cpl->sqid,
212	    cpl->cid, cpl->status.p, cpl->status.sc, cpl->status.sct,
213	    cpl->status.m, cpl->status.dnr);
214}
215
216static int
217nvme_attach(device_t dev)
218{
219	struct nvme_controller	*ctrlr = DEVICE2SOFTC(dev);
220	int			status;
221
222	status = nvme_ctrlr_construct(ctrlr, dev);
223
224	if (status != 0) {
225		nvme_ctrlr_destruct(ctrlr, dev);
226		return (status);
227	}
228
229	/*
230	 * Reset controller twice to ensure we do a transition from cc.en==1
231	 *  to cc.en==0.  This is because we don't really know what status
232	 *  the controller was left in when boot handed off to OS.
233	 */
234	status = nvme_ctrlr_hw_reset(ctrlr);
235	if (status != 0) {
236		nvme_ctrlr_destruct(ctrlr, dev);
237		return (status);
238	}
239
240	status = nvme_ctrlr_hw_reset(ctrlr);
241	if (status != 0) {
242		nvme_ctrlr_destruct(ctrlr, dev);
243		return (status);
244	}
245
246	nvme_sysctl_initialize_ctrlr(ctrlr);
247
248	pci_enable_busmaster(dev);
249
250	ctrlr->config_hook.ich_func = nvme_ctrlr_start_config_hook;
251	ctrlr->config_hook.ich_arg = ctrlr;
252
253	config_intrhook_establish(&ctrlr->config_hook);
254
255	return (0);
256}
257
258static int
259nvme_detach (device_t dev)
260{
261	struct nvme_controller	*ctrlr = DEVICE2SOFTC(dev);
262
263	nvme_ctrlr_destruct(ctrlr, dev);
264	pci_disable_busmaster(dev);
265	return (0);
266}
267
268static void
269nvme_notify(struct nvme_consumer *cons,
270	    struct nvme_controller *ctrlr)
271{
272	struct nvme_namespace	*ns;
273	void			*ctrlr_cookie;
274	int			cmpset, ns_idx;
275
276	/*
277	 * The consumer may register itself after the nvme devices
278	 *  have registered with the kernel, but before the
279	 *  driver has completed initialization.  In that case,
280	 *  return here, and when initialization completes, the
281	 *  controller will make sure the consumer gets notified.
282	 */
283	if (!ctrlr->is_initialized)
284		return;
285
286	cmpset = atomic_cmpset_32(&ctrlr->notification_sent, 0, 1);
287
288	if (cmpset == 0)
289		return;
290
291	if (cons->ctrlr_fn != NULL)
292		ctrlr_cookie = (*cons->ctrlr_fn)(ctrlr);
293	else
294		ctrlr_cookie = NULL;
295	ctrlr->cons_cookie[cons->id] = ctrlr_cookie;
296	if (ctrlr->is_failed) {
297		if (cons->fail_fn != NULL)
298			(*cons->fail_fn)(ctrlr_cookie);
299		/*
300		 * Do not notify consumers about the namespaces of a
301		 *  failed controller.
302		 */
303		return;
304	}
305	for (ns_idx = 0; ns_idx < ctrlr->cdata.nn; ns_idx++) {
306		ns = &ctrlr->ns[ns_idx];
307		if (cons->ns_fn != NULL)
308			ns->cons_cookie[cons->id] =
309			    (*cons->ns_fn)(ns, ctrlr_cookie);
310	}
311}
312
313void
314nvme_notify_new_controller(struct nvme_controller *ctrlr)
315{
316	int i;
317
318	for (i = 0; i < NVME_MAX_CONSUMERS; i++) {
319		if (nvme_consumer[i].id != INVALID_CONSUMER_ID) {
320			nvme_notify(&nvme_consumer[i], ctrlr);
321		}
322	}
323}
324
325static void
326nvme_notify_new_consumer(struct nvme_consumer *cons)
327{
328	device_t		*devlist;
329	struct nvme_controller	*ctrlr;
330	int			dev_idx, devcount;
331
332	if (devclass_get_devices(nvme_devclass, &devlist, &devcount))
333		return;
334
335	for (dev_idx = 0; dev_idx < devcount; dev_idx++) {
336		ctrlr = DEVICE2SOFTC(devlist[dev_idx]);
337		nvme_notify(cons, ctrlr);
338	}
339
340	free(devlist, M_TEMP);
341}
342
343void
344nvme_notify_async_consumers(struct nvme_controller *ctrlr,
345			    const struct nvme_completion *async_cpl,
346			    uint32_t log_page_id, void *log_page_buffer,
347			    uint32_t log_page_size)
348{
349	struct nvme_consumer	*cons;
350	uint32_t		i;
351
352	for (i = 0; i < NVME_MAX_CONSUMERS; i++) {
353		cons = &nvme_consumer[i];
354		if (cons->id != INVALID_CONSUMER_ID && cons->async_fn != NULL)
355			(*cons->async_fn)(ctrlr->cons_cookie[i], async_cpl,
356			    log_page_id, log_page_buffer, log_page_size);
357	}
358}
359
360void
361nvme_notify_fail_consumers(struct nvme_controller *ctrlr)
362{
363	struct nvme_consumer	*cons;
364	uint32_t		i;
365
366	for (i = 0; i < NVME_MAX_CONSUMERS; i++) {
367		cons = &nvme_consumer[i];
368		if (cons->id != INVALID_CONSUMER_ID && cons->fail_fn != NULL)
369			cons->fail_fn(ctrlr->cons_cookie[i]);
370	}
371}
372
373struct nvme_consumer *
374nvme_register_consumer(nvme_cons_ns_fn_t ns_fn, nvme_cons_ctrlr_fn_t ctrlr_fn,
375		       nvme_cons_async_fn_t async_fn,
376		       nvme_cons_fail_fn_t fail_fn)
377{
378	int i;
379
380	/*
381	 * TODO: add locking around consumer registration.  Not an issue
382	 *  right now since we only have one nvme consumer - nvd(4).
383	 */
384	for (i = 0; i < NVME_MAX_CONSUMERS; i++)
385		if (nvme_consumer[i].id == INVALID_CONSUMER_ID) {
386			nvme_consumer[i].id = i;
387			nvme_consumer[i].ns_fn = ns_fn;
388			nvme_consumer[i].ctrlr_fn = ctrlr_fn;
389			nvme_consumer[i].async_fn = async_fn;
390			nvme_consumer[i].fail_fn = fail_fn;
391
392			nvme_notify_new_consumer(&nvme_consumer[i]);
393			return (&nvme_consumer[i]);
394		}
395
396	printf("nvme(4): consumer not registered - no slots available\n");
397	return (NULL);
398}
399
400void
401nvme_unregister_consumer(struct nvme_consumer *consumer)
402{
403
404	consumer->id = INVALID_CONSUMER_ID;
405}
406
407void
408nvme_completion_poll_cb(void *arg, const struct nvme_completion *cpl)
409{
410	struct nvme_completion_poll_status	*status = arg;
411
412	/*
413	 * Copy status into the argument passed by the caller, so that
414	 *  the caller can check the status to determine if the
415	 *  the request passed or failed.
416	 */
417	memcpy(&status->cpl, cpl, sizeof(*cpl));
418	wmb();
419	status->done = TRUE;
420}
421