nvd.c revision 328664
1/*-
2 * Copyright (C) 2012-2016 Intel Corporation
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 *    notice, this list of conditions and the following disclaimer in the
12 *    documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 */
26
27#include <sys/cdefs.h>
28__FBSDID("$FreeBSD: stable/11/sys/dev/nvd/nvd.c 328664 2018-02-01 15:27:47Z mav $");
29
30#include <sys/param.h>
31#include <sys/bio.h>
32#include <sys/kernel.h>
33#include <sys/malloc.h>
34#include <sys/module.h>
35#include <sys/sysctl.h>
36#include <sys/systm.h>
37#include <sys/taskqueue.h>
38
39#include <geom/geom.h>
40#include <geom/geom_disk.h>
41
42#include <dev/nvme/nvme.h>
43
44#define NVD_STR		"nvd"
45
46struct nvd_disk;
47
48static disk_ioctl_t nvd_ioctl;
49static disk_strategy_t nvd_strategy;
50static dumper_t nvd_dump;
51
52static void nvd_done(void *arg, const struct nvme_completion *cpl);
53
54static void *nvd_new_disk(struct nvme_namespace *ns, void *ctrlr);
55static void destroy_geom_disk(struct nvd_disk *ndisk);
56
57static void *nvd_new_controller(struct nvme_controller *ctrlr);
58static void nvd_controller_fail(void *ctrlr);
59
60static int nvd_load(void);
61static void nvd_unload(void);
62
63MALLOC_DEFINE(M_NVD, "nvd", "nvd(4) allocations");
64
65struct nvme_consumer *consumer_handle;
66
67struct nvd_disk {
68
69	struct bio_queue_head	bioq;
70	struct task		bioqtask;
71	struct mtx		bioqlock;
72
73	struct disk		*disk;
74	struct taskqueue	*tq;
75	struct nvme_namespace	*ns;
76
77	uint32_t		cur_depth;
78	uint32_t		ordered_in_flight;
79
80	TAILQ_ENTRY(nvd_disk)	global_tailq;
81	TAILQ_ENTRY(nvd_disk)	ctrlr_tailq;
82};
83
84struct nvd_controller {
85
86	TAILQ_ENTRY(nvd_controller)	tailq;
87	TAILQ_HEAD(, nvd_disk)		disk_head;
88};
89
90static TAILQ_HEAD(, nvd_controller)	ctrlr_head;
91static TAILQ_HEAD(disk_list, nvd_disk)	disk_head;
92
93static SYSCTL_NODE(_hw, OID_AUTO, nvd, CTLFLAG_RD, 0, "nvd driver parameters");
94/*
95 * The NVMe specification does not define a maximum or optimal delete size, so
96 *  technically max delete size is min(full size of the namespace, 2^32 - 1
97 *  LBAs).  A single delete for a multi-TB NVMe namespace though may take much
98 *  longer to complete than the nvme(4) I/O timeout period.  So choose a sensible
99 *  default here that is still suitably large to minimize the number of overall
100 *  delete operations.
101 */
102static uint64_t nvd_delete_max = (1024 * 1024 * 1024);  /* 1GB */
103SYSCTL_UQUAD(_hw_nvd, OID_AUTO, delete_max, CTLFLAG_RDTUN, &nvd_delete_max, 0,
104	     "nvd maximum BIO_DELETE size in bytes");
105
106static int nvd_modevent(module_t mod, int type, void *arg)
107{
108	int error = 0;
109
110	switch (type) {
111	case MOD_LOAD:
112		error = nvd_load();
113		break;
114	case MOD_UNLOAD:
115		nvd_unload();
116		break;
117	default:
118		break;
119	}
120
121	return (error);
122}
123
124moduledata_t nvd_mod = {
125	NVD_STR,
126	(modeventhand_t)nvd_modevent,
127	0
128};
129
130DECLARE_MODULE(nvd, nvd_mod, SI_SUB_DRIVERS, SI_ORDER_ANY);
131MODULE_VERSION(nvd, 1);
132MODULE_DEPEND(nvd, nvme, 1, 1, 1);
133
134static int
135nvd_load()
136{
137
138	TAILQ_INIT(&ctrlr_head);
139	TAILQ_INIT(&disk_head);
140
141	consumer_handle = nvme_register_consumer(nvd_new_disk,
142	    nvd_new_controller, NULL, nvd_controller_fail);
143
144	return (consumer_handle != NULL ? 0 : -1);
145}
146
147static void
148nvd_unload()
149{
150	struct nvd_controller	*ctrlr;
151	struct nvd_disk		*disk;
152
153	while (!TAILQ_EMPTY(&ctrlr_head)) {
154		ctrlr = TAILQ_FIRST(&ctrlr_head);
155		TAILQ_REMOVE(&ctrlr_head, ctrlr, tailq);
156		free(ctrlr, M_NVD);
157	}
158
159	while (!TAILQ_EMPTY(&disk_head)) {
160		disk = TAILQ_FIRST(&disk_head);
161		TAILQ_REMOVE(&disk_head, disk, global_tailq);
162		destroy_geom_disk(disk);
163		free(disk, M_NVD);
164	}
165
166	nvme_unregister_consumer(consumer_handle);
167}
168
169static int
170nvd_bio_submit(struct nvd_disk *ndisk, struct bio *bp)
171{
172	int err;
173
174	bp->bio_driver1 = NULL;
175	atomic_add_int(&ndisk->cur_depth, 1);
176	err = nvme_ns_bio_process(ndisk->ns, bp, nvd_done);
177	if (err) {
178		atomic_add_int(&ndisk->cur_depth, -1);
179		if (__predict_false(bp->bio_flags & BIO_ORDERED))
180			atomic_add_int(&ndisk->ordered_in_flight, -1);
181		bp->bio_error = err;
182		bp->bio_flags |= BIO_ERROR;
183		bp->bio_resid = bp->bio_bcount;
184		biodone(bp);
185		return (-1);
186	}
187
188	return (0);
189}
190
191static void
192nvd_strategy(struct bio *bp)
193{
194	struct nvd_disk *ndisk;
195
196	ndisk = (struct nvd_disk *)bp->bio_disk->d_drv1;
197
198	if (__predict_false(bp->bio_flags & BIO_ORDERED))
199		atomic_add_int(&ndisk->ordered_in_flight, 1);
200
201	if (__predict_true(ndisk->ordered_in_flight == 0)) {
202		nvd_bio_submit(ndisk, bp);
203		return;
204	}
205
206	/*
207	 * There are ordered bios in flight, so we need to submit
208	 *  bios through the task queue to enforce ordering.
209	 */
210	mtx_lock(&ndisk->bioqlock);
211	bioq_insert_tail(&ndisk->bioq, bp);
212	mtx_unlock(&ndisk->bioqlock);
213	taskqueue_enqueue(ndisk->tq, &ndisk->bioqtask);
214}
215
216static int
217nvd_ioctl(struct disk *ndisk, u_long cmd, void *data, int fflag,
218    struct thread *td)
219{
220	int ret = 0;
221
222	switch (cmd) {
223	default:
224		ret = EIO;
225	}
226
227	return (ret);
228}
229
230static int
231nvd_dump(void *arg, void *virt, vm_offset_t phys, off_t offset, size_t len)
232{
233	struct nvd_disk *ndisk;
234	struct disk *dp;
235
236	dp = arg;
237	ndisk = dp->d_drv1;
238
239	return (nvme_ns_dump(ndisk->ns, virt, offset, len));
240}
241
242static void
243nvd_done(void *arg, const struct nvme_completion *cpl)
244{
245	struct bio *bp;
246	struct nvd_disk *ndisk;
247
248	bp = (struct bio *)arg;
249
250	ndisk = bp->bio_disk->d_drv1;
251
252	atomic_add_int(&ndisk->cur_depth, -1);
253	if (__predict_false(bp->bio_flags & BIO_ORDERED))
254		atomic_add_int(&ndisk->ordered_in_flight, -1);
255
256	biodone(bp);
257}
258
259static void
260nvd_bioq_process(void *arg, int pending)
261{
262	struct nvd_disk *ndisk = arg;
263	struct bio *bp;
264
265	for (;;) {
266		mtx_lock(&ndisk->bioqlock);
267		bp = bioq_takefirst(&ndisk->bioq);
268		mtx_unlock(&ndisk->bioqlock);
269		if (bp == NULL)
270			break;
271
272		if (nvd_bio_submit(ndisk, bp) != 0) {
273			continue;
274		}
275
276#ifdef BIO_ORDERED
277		/*
278		 * BIO_ORDERED flag dictates that the bio with BIO_ORDERED
279		 *  flag set must be completed before proceeding with
280		 *  additional bios.
281		 */
282		if (bp->bio_flags & BIO_ORDERED) {
283			while (ndisk->cur_depth > 0) {
284				pause("nvd flush", 1);
285			}
286		}
287#endif
288	}
289}
290
291static void *
292nvd_new_controller(struct nvme_controller *ctrlr)
293{
294	struct nvd_controller	*nvd_ctrlr;
295
296	nvd_ctrlr = malloc(sizeof(struct nvd_controller), M_NVD,
297	    M_ZERO | M_WAITOK);
298
299	TAILQ_INIT(&nvd_ctrlr->disk_head);
300	TAILQ_INSERT_TAIL(&ctrlr_head, nvd_ctrlr, tailq);
301
302	return (nvd_ctrlr);
303}
304
305static void *
306nvd_new_disk(struct nvme_namespace *ns, void *ctrlr_arg)
307{
308	uint8_t			descr[NVME_MODEL_NUMBER_LENGTH+1];
309	struct nvd_disk		*ndisk;
310	struct disk		*disk;
311	struct nvd_controller	*ctrlr = ctrlr_arg;
312
313	ndisk = malloc(sizeof(struct nvd_disk), M_NVD, M_ZERO | M_WAITOK);
314
315	disk = disk_alloc();
316	disk->d_strategy = nvd_strategy;
317	disk->d_ioctl = nvd_ioctl;
318	disk->d_dump = nvd_dump;
319	disk->d_name = NVD_STR;
320	disk->d_drv1 = ndisk;
321
322	disk->d_maxsize = nvme_ns_get_max_io_xfer_size(ns);
323	disk->d_sectorsize = nvme_ns_get_sector_size(ns);
324	disk->d_mediasize = (off_t)nvme_ns_get_size(ns);
325	disk->d_delmaxsize = (off_t)nvme_ns_get_size(ns);
326	if (disk->d_delmaxsize > nvd_delete_max)
327		disk->d_delmaxsize = nvd_delete_max;
328	disk->d_stripesize = nvme_ns_get_stripesize(ns);
329
330	if (TAILQ_EMPTY(&disk_head))
331		disk->d_unit = 0;
332	else
333		disk->d_unit =
334		    TAILQ_LAST(&disk_head, disk_list)->disk->d_unit + 1;
335
336	disk->d_flags = DISKFLAG_DIRECT_COMPLETION;
337
338	if (nvme_ns_get_flags(ns) & NVME_NS_DEALLOCATE_SUPPORTED)
339		disk->d_flags |= DISKFLAG_CANDELETE;
340
341	if (nvme_ns_get_flags(ns) & NVME_NS_FLUSH_SUPPORTED)
342		disk->d_flags |= DISKFLAG_CANFLUSHCACHE;
343
344/* ifdef used here to ease porting to stable branches at a later point. */
345#ifdef DISKFLAG_UNMAPPED_BIO
346	disk->d_flags |= DISKFLAG_UNMAPPED_BIO;
347#endif
348
349	/*
350	 * d_ident and d_descr are both far bigger than the length of either
351	 *  the serial or model number strings.
352	 */
353	nvme_strvis(disk->d_ident, nvme_ns_get_serial_number(ns),
354	    sizeof(disk->d_ident), NVME_SERIAL_NUMBER_LENGTH);
355	nvme_strvis(descr, nvme_ns_get_model_number(ns), sizeof(descr),
356	    NVME_MODEL_NUMBER_LENGTH);
357	strlcpy(disk->d_descr, descr, sizeof(descr));
358
359	disk->d_rotation_rate = DISK_RR_NON_ROTATING;
360
361	ndisk->ns = ns;
362	ndisk->disk = disk;
363	ndisk->cur_depth = 0;
364	ndisk->ordered_in_flight = 0;
365
366	mtx_init(&ndisk->bioqlock, "NVD bioq lock", NULL, MTX_DEF);
367	bioq_init(&ndisk->bioq);
368
369	TASK_INIT(&ndisk->bioqtask, 0, nvd_bioq_process, ndisk);
370	ndisk->tq = taskqueue_create("nvd_taskq", M_WAITOK,
371	    taskqueue_thread_enqueue, &ndisk->tq);
372	taskqueue_start_threads(&ndisk->tq, 1, PI_DISK, "nvd taskq");
373
374	TAILQ_INSERT_TAIL(&disk_head, ndisk, global_tailq);
375	TAILQ_INSERT_TAIL(&ctrlr->disk_head, ndisk, ctrlr_tailq);
376
377	disk_create(disk, DISK_VERSION);
378
379	printf(NVD_STR"%u: <%s> NVMe namespace\n", disk->d_unit, descr);
380	printf(NVD_STR"%u: %juMB (%ju %u byte sectors)\n", disk->d_unit,
381		(uintmax_t)disk->d_mediasize / (1024*1024),
382		(uintmax_t)disk->d_mediasize / disk->d_sectorsize,
383		disk->d_sectorsize);
384
385	return (NULL);
386}
387
388static void
389destroy_geom_disk(struct nvd_disk *ndisk)
390{
391	struct bio	*bp;
392	struct disk	*disk;
393	uint32_t	unit;
394	int		cnt = 0;
395
396	disk = ndisk->disk;
397	unit = disk->d_unit;
398	taskqueue_free(ndisk->tq);
399
400	disk_destroy(ndisk->disk);
401
402	mtx_lock(&ndisk->bioqlock);
403	for (;;) {
404		bp = bioq_takefirst(&ndisk->bioq);
405		if (bp == NULL)
406			break;
407		bp->bio_error = EIO;
408		bp->bio_flags |= BIO_ERROR;
409		bp->bio_resid = bp->bio_bcount;
410		cnt++;
411		biodone(bp);
412	}
413
414	printf(NVD_STR"%u: lost device - %d outstanding\n", unit, cnt);
415	printf(NVD_STR"%u: removing device entry\n", unit);
416
417	mtx_unlock(&ndisk->bioqlock);
418
419	mtx_destroy(&ndisk->bioqlock);
420}
421
422static void
423nvd_controller_fail(void *ctrlr_arg)
424{
425	struct nvd_controller	*ctrlr = ctrlr_arg;
426	struct nvd_disk		*disk;
427
428	while (!TAILQ_EMPTY(&ctrlr->disk_head)) {
429		disk = TAILQ_FIRST(&ctrlr->disk_head);
430		TAILQ_REMOVE(&disk_head, disk, global_tailq);
431		TAILQ_REMOVE(&ctrlr->disk_head, disk, ctrlr_tailq);
432		destroy_geom_disk(disk);
433		free(disk, M_NVD);
434	}
435
436	TAILQ_REMOVE(&ctrlr_head, ctrlr, tailq);
437	free(ctrlr, M_NVD);
438}
439
440