1/*-
2 * Copyright (C) 2012-2013 Intel Corporation
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 *    notice, this list of conditions and the following disclaimer in the
12 *    documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 */
26
27#include <sys/cdefs.h>
28__FBSDID("$FreeBSD$");
29
30#include <sys/param.h>
31#include <sys/bio.h>
32#include <sys/kernel.h>
33#include <sys/malloc.h>
34#include <sys/module.h>
35#include <sys/systm.h>
36#include <sys/taskqueue.h>
37
38#include <geom/geom.h>
39#include <geom/geom_disk.h>
40
41#include <dev/nvme/nvme.h>
42
43#define NVD_STR		"nvd"
44
45struct nvd_disk;
46
47static disk_ioctl_t nvd_ioctl;
48static disk_strategy_t nvd_strategy;
49
50static void *nvd_new_disk(struct nvme_namespace *ns, void *ctrlr);
51static void destroy_geom_disk(struct nvd_disk *ndisk);
52
53static void *nvd_new_controller(struct nvme_controller *ctrlr);
54static void nvd_controller_fail(void *ctrlr);
55
56static int nvd_load(void);
57static void nvd_unload(void);
58
59MALLOC_DEFINE(M_NVD, "nvd", "nvd(4) allocations");
60
61struct nvme_consumer *consumer_handle;
62
63struct nvd_disk {
64
65	struct bio_queue_head	bioq;
66	struct task		bioqtask;
67	struct mtx		bioqlock;
68
69	struct disk		*disk;
70	struct taskqueue	*tq;
71	struct nvme_namespace	*ns;
72
73	uint32_t		cur_depth;
74
75	TAILQ_ENTRY(nvd_disk)	global_tailq;
76	TAILQ_ENTRY(nvd_disk)	ctrlr_tailq;
77};
78
79struct nvd_controller {
80
81	TAILQ_ENTRY(nvd_controller)	tailq;
82	TAILQ_HEAD(, nvd_disk)		disk_head;
83};
84
85static TAILQ_HEAD(, nvd_controller)	ctrlr_head;
86static TAILQ_HEAD(disk_list, nvd_disk)	disk_head;
87
88static int nvd_modevent(module_t mod, int type, void *arg)
89{
90	int error = 0;
91
92	switch (type) {
93	case MOD_LOAD:
94		error = nvd_load();
95		break;
96	case MOD_UNLOAD:
97		nvd_unload();
98		break;
99	default:
100		break;
101	}
102
103	return (error);
104}
105
106moduledata_t nvd_mod = {
107	NVD_STR,
108	(modeventhand_t)nvd_modevent,
109	0
110};
111
112DECLARE_MODULE(nvd, nvd_mod, SI_SUB_DRIVERS, SI_ORDER_ANY);
113MODULE_VERSION(nvd, 1);
114MODULE_DEPEND(nvd, nvme, 1, 1, 1);
115
116static int
117nvd_load()
118{
119
120	TAILQ_INIT(&ctrlr_head);
121	TAILQ_INIT(&disk_head);
122
123	consumer_handle = nvme_register_consumer(nvd_new_disk,
124	    nvd_new_controller, NULL, nvd_controller_fail);
125
126	return (consumer_handle != NULL ? 0 : -1);
127}
128
129static void
130nvd_unload()
131{
132	struct nvd_controller	*ctrlr;
133	struct nvd_disk		*disk;
134
135	while (!TAILQ_EMPTY(&ctrlr_head)) {
136		ctrlr = TAILQ_FIRST(&ctrlr_head);
137		TAILQ_REMOVE(&ctrlr_head, ctrlr, tailq);
138		free(ctrlr, M_NVD);
139	}
140
141	while (!TAILQ_EMPTY(&disk_head)) {
142		disk = TAILQ_FIRST(&disk_head);
143		TAILQ_REMOVE(&disk_head, disk, global_tailq);
144		destroy_geom_disk(disk);
145		free(disk, M_NVD);
146	}
147
148	nvme_unregister_consumer(consumer_handle);
149}
150
151static void
152nvd_strategy(struct bio *bp)
153{
154	struct nvd_disk *ndisk;
155
156	ndisk = (struct nvd_disk *)bp->bio_disk->d_drv1;
157
158	mtx_lock(&ndisk->bioqlock);
159	bioq_insert_tail(&ndisk->bioq, bp);
160	mtx_unlock(&ndisk->bioqlock);
161	taskqueue_enqueue(ndisk->tq, &ndisk->bioqtask);
162}
163
164static int
165nvd_ioctl(struct disk *ndisk, u_long cmd, void *data, int fflag,
166    struct thread *td)
167{
168	int ret = 0;
169
170	switch (cmd) {
171	default:
172		ret = EIO;
173	}
174
175	return (ret);
176}
177
178static void
179nvd_done(void *arg, const struct nvme_completion *cpl)
180{
181	struct bio *bp;
182	struct nvd_disk *ndisk;
183
184	bp = (struct bio *)arg;
185
186	ndisk = bp->bio_disk->d_drv1;
187
188	atomic_add_int(&ndisk->cur_depth, -1);
189
190	biodone(bp);
191}
192
193static void
194nvd_bioq_process(void *arg, int pending)
195{
196	struct nvd_disk *ndisk = arg;
197	struct bio *bp;
198	int err;
199
200	for (;;) {
201		mtx_lock(&ndisk->bioqlock);
202		bp = bioq_takefirst(&ndisk->bioq);
203		mtx_unlock(&ndisk->bioqlock);
204		if (bp == NULL)
205			break;
206
207#ifdef BIO_ORDERED
208		/*
209		 * BIO_ORDERED flag dictates that all outstanding bios
210		 *  must be completed before processing the bio with
211		 *  BIO_ORDERED flag set.
212		 */
213		if (bp->bio_flags & BIO_ORDERED) {
214			while (ndisk->cur_depth > 0) {
215				pause("nvd flush", 1);
216			}
217		}
218#endif
219
220		bp->bio_driver1 = NULL;
221		atomic_add_int(&ndisk->cur_depth, 1);
222
223		err = nvme_ns_bio_process(ndisk->ns, bp, nvd_done);
224
225		if (err) {
226			atomic_add_int(&ndisk->cur_depth, -1);
227			bp->bio_error = err;
228			bp->bio_flags |= BIO_ERROR;
229			bp->bio_resid = bp->bio_bcount;
230			biodone(bp);
231		}
232
233#ifdef BIO_ORDERED
234		/*
235		 * BIO_ORDERED flag dictates that the bio with BIO_ORDERED
236		 *  flag set must be completed before proceeding with
237		 *  additional bios.
238		 */
239		if (bp->bio_flags & BIO_ORDERED) {
240			while (ndisk->cur_depth > 0) {
241				pause("nvd flush", 1);
242			}
243		}
244#endif
245	}
246}
247
248static void *
249nvd_new_controller(struct nvme_controller *ctrlr)
250{
251	struct nvd_controller	*nvd_ctrlr;
252
253	nvd_ctrlr = malloc(sizeof(struct nvd_controller), M_NVD,
254	    M_ZERO | M_WAITOK);
255
256	TAILQ_INIT(&nvd_ctrlr->disk_head);
257	TAILQ_INSERT_TAIL(&ctrlr_head, nvd_ctrlr, tailq);
258
259	return (nvd_ctrlr);
260}
261
262static void *
263nvd_new_disk(struct nvme_namespace *ns, void *ctrlr_arg)
264{
265	uint8_t			descr[NVME_MODEL_NUMBER_LENGTH+1];
266	struct nvd_disk		*ndisk;
267	struct disk		*disk;
268	struct nvd_controller	*ctrlr = ctrlr_arg;
269
270	ndisk = malloc(sizeof(struct nvd_disk), M_NVD, M_ZERO | M_WAITOK);
271
272	disk = disk_alloc();
273	disk->d_strategy = nvd_strategy;
274	disk->d_ioctl = nvd_ioctl;
275	disk->d_name = NVD_STR;
276	disk->d_drv1 = ndisk;
277
278	disk->d_maxsize = nvme_ns_get_max_io_xfer_size(ns);
279	disk->d_sectorsize = nvme_ns_get_sector_size(ns);
280	disk->d_mediasize = (off_t)nvme_ns_get_size(ns);
281
282	if (TAILQ_EMPTY(&disk_head))
283		disk->d_unit = 0;
284	else
285		disk->d_unit =
286		    TAILQ_LAST(&disk_head, disk_list)->disk->d_unit + 1;
287
288	disk->d_flags = 0;
289
290	if (nvme_ns_get_flags(ns) & NVME_NS_DEALLOCATE_SUPPORTED)
291		disk->d_flags |= DISKFLAG_CANDELETE;
292
293	if (nvme_ns_get_flags(ns) & NVME_NS_FLUSH_SUPPORTED)
294		disk->d_flags |= DISKFLAG_CANFLUSHCACHE;
295
296/* ifdef used here to ease porting to stable branches at a later point. */
297#ifdef DISKFLAG_UNMAPPED_BIO
298	disk->d_flags |= DISKFLAG_UNMAPPED_BIO;
299#endif
300
301	/*
302	 * d_ident and d_descr are both far bigger than the length of either
303	 *  the serial or model number strings.
304	 */
305	nvme_strvis(disk->d_ident, nvme_ns_get_serial_number(ns),
306	    sizeof(disk->d_ident), NVME_SERIAL_NUMBER_LENGTH);
307
308	nvme_strvis(descr, nvme_ns_get_model_number(ns), sizeof(descr),
309	    NVME_MODEL_NUMBER_LENGTH);
310
311#if __FreeBSD_version >= 900034
312	strlcpy(disk->d_descr, descr, sizeof(descr));
313#endif
314
315	ndisk->ns = ns;
316	ndisk->disk = disk;
317	ndisk->cur_depth = 0;
318
319	mtx_init(&ndisk->bioqlock, "NVD bioq lock", NULL, MTX_DEF);
320	bioq_init(&ndisk->bioq);
321
322	TASK_INIT(&ndisk->bioqtask, 0, nvd_bioq_process, ndisk);
323	ndisk->tq = taskqueue_create("nvd_taskq", M_WAITOK,
324	    taskqueue_thread_enqueue, &ndisk->tq);
325	taskqueue_start_threads(&ndisk->tq, 1, PI_DISK, "nvd taskq");
326
327	TAILQ_INSERT_TAIL(&disk_head, ndisk, global_tailq);
328	TAILQ_INSERT_TAIL(&ctrlr->disk_head, ndisk, ctrlr_tailq);
329
330	disk_create(disk, DISK_VERSION);
331
332	printf(NVD_STR"%u: <%s> NVMe namespace\n", disk->d_unit, descr);
333	printf(NVD_STR"%u: %juMB (%ju %u byte sectors)\n", disk->d_unit,
334		(uintmax_t)disk->d_mediasize / (1024*1024),
335		(uintmax_t)disk->d_mediasize / disk->d_sectorsize,
336		disk->d_sectorsize);
337
338	return (NULL);
339}
340
341static void
342destroy_geom_disk(struct nvd_disk *ndisk)
343{
344	struct bio	*bp;
345	struct disk	*disk;
346	uint32_t	unit;
347	int		cnt = 0;
348
349	disk = ndisk->disk;
350	unit = disk->d_unit;
351	taskqueue_free(ndisk->tq);
352
353	disk_destroy(ndisk->disk);
354
355	mtx_lock(&ndisk->bioqlock);
356	for (;;) {
357		bp = bioq_takefirst(&ndisk->bioq);
358		if (bp == NULL)
359			break;
360		bp->bio_error = EIO;
361		bp->bio_flags |= BIO_ERROR;
362		bp->bio_resid = bp->bio_bcount;
363		cnt++;
364		biodone(bp);
365	}
366
367	printf(NVD_STR"%u: lost device - %d outstanding\n", unit, cnt);
368	printf(NVD_STR"%u: removing device entry\n", unit);
369
370	mtx_unlock(&ndisk->bioqlock);
371
372	mtx_destroy(&ndisk->bioqlock);
373}
374
375static void
376nvd_controller_fail(void *ctrlr_arg)
377{
378	struct nvd_controller	*ctrlr = ctrlr_arg;
379	struct nvd_disk		*disk;
380
381	while (!TAILQ_EMPTY(&ctrlr->disk_head)) {
382		disk = TAILQ_FIRST(&ctrlr->disk_head);
383		TAILQ_REMOVE(&disk_head, disk, global_tailq);
384		TAILQ_REMOVE(&ctrlr->disk_head, disk, ctrlr_tailq);
385		destroy_geom_disk(disk);
386		free(disk, M_NVD);
387	}
388
389	TAILQ_REMOVE(&ctrlr_head, ctrlr, tailq);
390	free(ctrlr, M_NVD);
391}
392
393