1/*-
2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3 *
4 * Copyright (C) 2012-2013 Intel Corporation
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 *    notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 *    notice, this list of conditions and the following disclaimer in the
14 *    documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
27 */
28
29#include <sys/cdefs.h>
30__FBSDID("$FreeBSD$");
31
32#include <sys/param.h>
33#include <sys/bio.h>
34#include <sys/conf.h>
35#include <sys/fcntl.h>
36#include <sys/kthread.h>
37#include <sys/module.h>
38#include <sys/proc.h>
39#include <sys/syscallsubr.h>
40#include <sys/sysctl.h>
41#include <sys/sysproto.h>
42#include <sys/systm.h>
43#include <sys/unistd.h>
44
45#include <geom/geom.h>
46
47#include "nvme_private.h"
48
49struct nvme_io_test_thread {
50	uint32_t		idx;
51	struct nvme_namespace	*ns;
52	enum nvme_nvm_opcode	opc;
53	struct timeval		start;
54	void			*buf;
55	uint32_t		size;
56	uint32_t		time;
57	uint64_t		io_completed;
58};
59
60struct nvme_io_test_internal {
61	struct nvme_namespace	*ns;
62	enum nvme_nvm_opcode	opc;
63	struct timeval		start;
64	uint32_t		time;
65	uint32_t		size;
66	uint32_t		td_active;
67	uint32_t		td_idx;
68	uint32_t		flags;
69	uint64_t		io_completed[NVME_TEST_MAX_THREADS];
70};
71
72static void
73nvme_ns_bio_test_cb(struct bio *bio)
74{
75	struct mtx *mtx;
76
77	mtx = mtx_pool_find(mtxpool_sleep, bio);
78	mtx_lock(mtx);
79	wakeup(bio);
80	mtx_unlock(mtx);
81}
82
83static void
84nvme_ns_bio_test(void *arg)
85{
86	struct nvme_io_test_internal	*io_test = arg;
87	struct cdevsw			*csw;
88	struct mtx			*mtx;
89	struct bio			*bio;
90	struct cdev			*dev;
91	void				*buf;
92	struct timeval			t;
93	uint64_t			io_completed = 0, offset;
94	uint32_t			idx;
95	int				ref;
96
97	buf = malloc(io_test->size, M_NVME, M_WAITOK);
98	idx = atomic_fetchadd_int(&io_test->td_idx, 1);
99	dev = io_test->ns->cdev;
100
101	offset = idx * 2048ULL * nvme_ns_get_sector_size(io_test->ns);
102
103	while (1) {
104		bio = g_alloc_bio();
105
106		memset(bio, 0, sizeof(*bio));
107		bio->bio_cmd = (io_test->opc == NVME_OPC_READ) ?
108		    BIO_READ : BIO_WRITE;
109		bio->bio_done = nvme_ns_bio_test_cb;
110		bio->bio_dev = dev;
111		bio->bio_offset = offset;
112		bio->bio_data = buf;
113		bio->bio_bcount = io_test->size;
114
115		if (io_test->flags & NVME_TEST_FLAG_REFTHREAD) {
116			csw = dev_refthread(dev, &ref);
117		} else
118			csw = dev->si_devsw;
119
120		if (csw == NULL)
121			panic("Unable to retrieve device switch");
122		mtx = mtx_pool_find(mtxpool_sleep, bio);
123		mtx_lock(mtx);
124		(*csw->d_strategy)(bio);
125		msleep(bio, mtx, PRIBIO, "biotestwait", 0);
126		mtx_unlock(mtx);
127
128		if (io_test->flags & NVME_TEST_FLAG_REFTHREAD) {
129			dev_relthread(dev, ref);
130		}
131
132		if ((bio->bio_flags & BIO_ERROR) || (bio->bio_resid > 0))
133			break;
134
135		g_destroy_bio(bio);
136
137		io_completed++;
138
139		getmicrouptime(&t);
140		timevalsub(&t, &io_test->start);
141
142		if (t.tv_sec >= io_test->time)
143			break;
144
145		offset += io_test->size;
146		if ((offset + io_test->size) > nvme_ns_get_size(io_test->ns))
147			offset = 0;
148	}
149
150	io_test->io_completed[idx] = io_completed;
151	wakeup_one(io_test);
152
153	free(buf, M_NVME);
154
155	atomic_subtract_int(&io_test->td_active, 1);
156	mb();
157
158	kthread_exit();
159}
160
161static void
162nvme_ns_io_test_cb(void *arg, const struct nvme_completion *cpl)
163{
164	struct nvme_io_test_thread	*tth = arg;
165	struct timeval			t;
166
167	tth->io_completed++;
168
169	if (nvme_completion_is_error(cpl)) {
170		printf("%s: error occurred\n", __func__);
171		wakeup_one(tth);
172		return;
173	}
174
175	getmicrouptime(&t);
176	timevalsub(&t, &tth->start);
177
178	if (t.tv_sec >= tth->time) {
179		wakeup_one(tth);
180		return;
181	}
182
183	switch (tth->opc) {
184	case NVME_OPC_WRITE:
185		nvme_ns_cmd_write(tth->ns, tth->buf, tth->idx * 2048,
186		    tth->size/nvme_ns_get_sector_size(tth->ns),
187		    nvme_ns_io_test_cb, tth);
188		break;
189	case NVME_OPC_READ:
190		nvme_ns_cmd_read(tth->ns, tth->buf, tth->idx * 2048,
191		    tth->size/nvme_ns_get_sector_size(tth->ns),
192		    nvme_ns_io_test_cb, tth);
193		break;
194	default:
195		break;
196	}
197}
198
199static void
200nvme_ns_io_test(void *arg)
201{
202	struct nvme_io_test_internal	*io_test = arg;
203	struct nvme_io_test_thread	*tth;
204	struct nvme_completion		cpl;
205	int				error;
206
207	tth = malloc(sizeof(*tth), M_NVME, M_WAITOK | M_ZERO);
208	tth->ns = io_test->ns;
209	tth->opc = io_test->opc;
210	memcpy(&tth->start, &io_test->start, sizeof(tth->start));
211	tth->buf = malloc(io_test->size, M_NVME, M_WAITOK);
212	tth->size = io_test->size;
213	tth->time = io_test->time;
214	tth->idx = atomic_fetchadd_int(&io_test->td_idx, 1);
215
216	memset(&cpl, 0, sizeof(cpl));
217
218	nvme_ns_io_test_cb(tth, &cpl);
219
220	error = tsleep(tth, 0, "test_wait", tth->time*hz*2);
221
222	if (error)
223		printf("%s: error = %d\n", __func__, error);
224
225	io_test->io_completed[tth->idx] = tth->io_completed;
226	wakeup_one(io_test);
227
228	free(tth->buf, M_NVME);
229	free(tth, M_NVME);
230
231	atomic_subtract_int(&io_test->td_active, 1);
232	mb();
233
234	kthread_exit();
235}
236
237void
238nvme_ns_test(struct nvme_namespace *ns, u_long cmd, caddr_t arg)
239{
240	struct nvme_io_test		*io_test;
241	struct nvme_io_test_internal	*io_test_internal;
242	void				(*fn)(void *);
243	int				i;
244
245	io_test = (struct nvme_io_test *)arg;
246
247	if ((io_test->opc != NVME_OPC_READ) &&
248	    (io_test->opc != NVME_OPC_WRITE))
249		return;
250
251	if (io_test->size % nvme_ns_get_sector_size(ns))
252		return;
253
254	io_test_internal = malloc(sizeof(*io_test_internal), M_NVME,
255	    M_WAITOK | M_ZERO);
256	io_test_internal->opc = io_test->opc;
257	io_test_internal->ns = ns;
258	io_test_internal->td_active = io_test->num_threads;
259	io_test_internal->time = io_test->time;
260	io_test_internal->size = io_test->size;
261	io_test_internal->flags = io_test->flags;
262
263	if (cmd == NVME_IO_TEST)
264		fn = nvme_ns_io_test;
265	else
266		fn = nvme_ns_bio_test;
267
268	getmicrouptime(&io_test_internal->start);
269
270	for (i = 0; i < io_test->num_threads; i++)
271		kthread_add(fn, io_test_internal,
272		    NULL, NULL, 0, 0, "nvme_io_test[%d]", i);
273
274	tsleep(io_test_internal, 0, "nvme_test", io_test->time * 2 * hz);
275
276	while (io_test_internal->td_active > 0)
277		DELAY(10);
278
279	memcpy(io_test->io_completed, io_test_internal->io_completed,
280	    sizeof(io_test->io_completed));
281
282	free(io_test_internal, M_NVME);
283}
284