libnvmm.c revision 1.4
1/*	$NetBSD: libnvmm.c,v 1.4 2018/12/12 10:42:34 maxv Exp $	*/
2
3/*
4 * Copyright (c) 2018 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Maxime Villard.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 *    notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 *    notice, this list of conditions and the following disclaimer in the
17 *    documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 */
31
32#include <sys/cdefs.h>
33
34#include <stdio.h>
35#include <stdlib.h>
36#include <string.h>
37#include <unistd.h>
38#include <fcntl.h>
39#include <errno.h>
40#include <sys/ioctl.h>
41#include <sys/mman.h>
42#include <sys/queue.h>
43
44#include "nvmm.h"
45
46typedef struct __area {
47	LIST_ENTRY(__area) list;
48	gpaddr_t gpa;
49	uintptr_t hva;
50	size_t size;
51} area_t;
52
53typedef LIST_HEAD(, __area) area_list_t;
54
55static int nvmm_fd = -1;
56static size_t nvmm_page_size = 0;
57
58/* -------------------------------------------------------------------------- */
59
60static bool
61__area_isvalid(struct nvmm_machine *mach, uintptr_t hva, gpaddr_t gpa,
62    size_t size)
63{
64	area_list_t *areas = mach->areas;
65	area_t *ent;
66
67	LIST_FOREACH(ent, areas, list) {
68		/* Collision on HVA */
69		if (hva >= ent->hva && hva < ent->hva + ent->size) {
70			return false;
71		}
72		if (hva + size >= ent->hva &&
73		    hva + size < ent->hva + ent->size) {
74			return false;
75		}
76		if (hva <= ent->hva && hva + size >= ent->hva + ent->size) {
77			return false;
78		}
79
80		/* Collision on GPA */
81		if (gpa >= ent->gpa && gpa < ent->gpa + ent->size) {
82			return false;
83		}
84		if (gpa + size >= ent->gpa &&
85		    gpa + size < ent->gpa + ent->size) {
86			return false;
87		}
88		if (gpa <= ent->gpa && gpa + size >= ent->gpa + ent->size) {
89			return false;
90		}
91	}
92
93	return true;
94}
95
96static int
97__area_add(struct nvmm_machine *mach, uintptr_t hva, gpaddr_t gpa, size_t size)
98{
99	area_list_t *areas = mach->areas;
100	area_t *area;
101
102	if (!__area_isvalid(mach, hva, gpa, size)) {
103		errno = EINVAL;
104		return -1;
105	}
106
107	area = malloc(sizeof(*area));
108	if (area == NULL)
109		return -1;
110	area->gpa = gpa;
111	area->hva = hva;
112	area->size = size;
113
114	LIST_INSERT_HEAD(areas, area, list);
115
116	return 0;
117}
118
119static int
120__area_delete(struct nvmm_machine *mach, uintptr_t hva, gpaddr_t gpa,
121    size_t size)
122{
123	area_list_t *areas = mach->areas;
124	area_t *ent, *nxt;
125
126	LIST_FOREACH_SAFE(ent, areas, list, nxt) {
127		if (hva == ent->hva && gpa == ent->gpa && size == ent->size) {
128			LIST_REMOVE(ent, list);
129			free(ent);
130			return 0;
131		}
132	}
133
134	return -1;
135}
136
137static void
138__area_remove_all(struct nvmm_machine *mach)
139{
140	area_list_t *areas = mach->areas;
141	area_t *ent;
142
143	while ((ent = LIST_FIRST(areas)) != NULL) {
144		LIST_REMOVE(ent, list);
145		free(ent);
146	}
147
148	free(areas);
149}
150
151/* -------------------------------------------------------------------------- */
152
153static int
154nvmm_init(void)
155{
156	if (nvmm_fd != -1)
157		return 0;
158	nvmm_fd = open("/dev/nvmm", O_RDWR);
159	if (nvmm_fd == -1)
160		return -1;
161	nvmm_page_size = sysconf(_SC_PAGESIZE);
162	return 0;
163}
164
165int
166nvmm_capability(struct nvmm_capability *cap)
167{
168	struct nvmm_ioc_capability args;
169	int ret;
170
171	if (nvmm_init() == -1) {
172		return -1;
173	}
174
175	ret = ioctl(nvmm_fd, NVMM_IOC_CAPABILITY, &args);
176	if (ret == -1)
177		return -1;
178
179	memcpy(cap, &args.cap, sizeof(args.cap));
180
181	return 0;
182}
183
184int
185nvmm_machine_create(struct nvmm_machine *mach)
186{
187	struct nvmm_ioc_machine_create args;
188	area_list_t *areas;
189	int ret;
190
191	if (nvmm_init() == -1) {
192		return -1;
193	}
194
195	areas = calloc(1, sizeof(*areas));
196	if (areas == NULL)
197		return -1;
198
199	ret = ioctl(nvmm_fd, NVMM_IOC_MACHINE_CREATE, &args);
200	if (ret == -1) {
201		free(areas);
202		return -1;
203	}
204
205	memset(mach, 0, sizeof(*mach));
206	LIST_INIT(areas);
207	mach->areas = areas;
208	mach->machid = args.machid;
209
210	return 0;
211}
212
213int
214nvmm_machine_destroy(struct nvmm_machine *mach)
215{
216	struct nvmm_ioc_machine_destroy args;
217	int ret;
218
219	if (nvmm_init() == -1) {
220		return -1;
221	}
222
223	args.machid = mach->machid;
224
225	ret = ioctl(nvmm_fd, NVMM_IOC_MACHINE_DESTROY, &args);
226	if (ret == -1)
227		return -1;
228
229	__area_remove_all(mach);
230
231	return 0;
232}
233
234int
235nvmm_machine_configure(struct nvmm_machine *mach, uint64_t op, void *conf)
236{
237	struct nvmm_ioc_machine_configure args;
238	int ret;
239
240	if (nvmm_init() == -1) {
241		return -1;
242	}
243
244	args.machid = mach->machid;
245	args.op = op;
246	args.conf = conf;
247
248	ret = ioctl(nvmm_fd, NVMM_IOC_MACHINE_CONFIGURE, &args);
249	if (ret == -1)
250		return -1;
251
252	return 0;
253}
254
255int
256nvmm_vcpu_create(struct nvmm_machine *mach, nvmm_cpuid_t cpuid)
257{
258	struct nvmm_ioc_vcpu_create args;
259	int ret;
260
261	if (nvmm_init() == -1) {
262		return -1;
263	}
264
265	args.machid = mach->machid;
266	args.cpuid = cpuid;
267
268	ret = ioctl(nvmm_fd, NVMM_IOC_VCPU_CREATE, &args);
269	if (ret == -1)
270		return -1;
271
272	return 0;
273}
274
275int
276nvmm_vcpu_destroy(struct nvmm_machine *mach, nvmm_cpuid_t cpuid)
277{
278	struct nvmm_ioc_vcpu_destroy args;
279	int ret;
280
281	if (nvmm_init() == -1) {
282		return -1;
283	}
284
285	args.machid = mach->machid;
286	args.cpuid = cpuid;
287
288	ret = ioctl(nvmm_fd, NVMM_IOC_VCPU_DESTROY, &args);
289	if (ret == -1)
290		return -1;
291
292	return 0;
293}
294
295int
296nvmm_vcpu_setstate(struct nvmm_machine *mach, nvmm_cpuid_t cpuid,
297    void *state, uint64_t flags)
298{
299	struct nvmm_ioc_vcpu_setstate args;
300	int ret;
301
302	if (nvmm_init() == -1) {
303		return -1;
304	}
305
306	args.machid = mach->machid;
307	args.cpuid = cpuid;
308	args.state = state;
309	args.flags = flags;
310
311	ret = ioctl(nvmm_fd, NVMM_IOC_VCPU_SETSTATE, &args);
312	if (ret == -1)
313		return -1;
314
315	return 0;
316}
317
318int
319nvmm_vcpu_getstate(struct nvmm_machine *mach, nvmm_cpuid_t cpuid,
320    void *state, uint64_t flags)
321{
322	struct nvmm_ioc_vcpu_getstate args;
323	int ret;
324
325	if (nvmm_init() == -1) {
326		return -1;
327	}
328
329	args.machid = mach->machid;
330	args.cpuid = cpuid;
331	args.state = state;
332	args.flags = flags;
333
334	ret = ioctl(nvmm_fd, NVMM_IOC_VCPU_GETSTATE, &args);
335	if (ret == -1)
336		return -1;
337
338	return 0;
339}
340
341int
342nvmm_vcpu_inject(struct nvmm_machine *mach, nvmm_cpuid_t cpuid,
343    struct nvmm_event *event)
344{
345	struct nvmm_ioc_vcpu_inject args;
346	int ret;
347
348	if (nvmm_init() == -1) {
349		return -1;
350	}
351
352	args.machid = mach->machid;
353	args.cpuid = cpuid;
354	memcpy(&args.event, event, sizeof(args.event));
355
356	ret = ioctl(nvmm_fd, NVMM_IOC_VCPU_INJECT, &args);
357	if (ret == -1)
358		return -1;
359
360	return 0;
361}
362
363int
364nvmm_vcpu_run(struct nvmm_machine *mach, nvmm_cpuid_t cpuid,
365    struct nvmm_exit *exit)
366{
367	struct nvmm_ioc_vcpu_run args;
368	int ret;
369
370	if (nvmm_init() == -1) {
371		return -1;
372	}
373
374	args.machid = mach->machid;
375	args.cpuid = cpuid;
376	memset(&args.exit, 0, sizeof(args.exit));
377
378	ret = ioctl(nvmm_fd, NVMM_IOC_VCPU_RUN, &args);
379	if (ret == -1)
380		return -1;
381
382	memcpy(exit, &args.exit, sizeof(args.exit));
383
384	return 0;
385}
386
387int
388nvmm_gpa_map(struct nvmm_machine *mach, uintptr_t hva, gpaddr_t gpa,
389    size_t size, int flags)
390{
391	struct nvmm_ioc_gpa_map args;
392	int ret;
393
394	if (nvmm_init() == -1) {
395		return -1;
396	}
397
398	ret = __area_add(mach, hva, gpa, size);
399	if (ret == -1)
400		return -1;
401
402	args.machid = mach->machid;
403	args.hva = hva;
404	args.gpa = gpa;
405	args.size = size;
406	args.flags = flags;
407
408	ret = ioctl(nvmm_fd, NVMM_IOC_GPA_MAP, &args);
409	if (ret == -1) {
410		/* Can't recover. */
411		abort();
412	}
413
414	return 0;
415}
416
417int
418nvmm_gpa_unmap(struct nvmm_machine *mach, uintptr_t hva, gpaddr_t gpa,
419    size_t size)
420{
421	struct nvmm_ioc_gpa_unmap args;
422	int ret;
423
424	if (nvmm_init() == -1) {
425		return -1;
426	}
427
428	ret = __area_delete(mach, hva, gpa, size);
429	if (ret == -1)
430		return -1;
431
432	args.machid = mach->machid;
433	args.gpa = gpa;
434	args.size = size;
435
436	ret = ioctl(nvmm_fd, NVMM_IOC_GPA_UNMAP, &args);
437	if (ret == -1)
438		return -1;
439
440	ret = munmap((void *)hva, size);
441
442	return ret;
443}
444
445/*
446 * nvmm_gva_to_gpa(): architecture-specific.
447 */
448
449int
450nvmm_gpa_to_hva(struct nvmm_machine *mach, gpaddr_t gpa, uintptr_t *hva)
451{
452	area_list_t *areas = mach->areas;
453	area_t *ent;
454
455	if (gpa % nvmm_page_size != 0) {
456		errno = EINVAL;
457		return -1;
458	}
459
460	LIST_FOREACH(ent, areas, list) {
461		if (gpa < ent->gpa) {
462			continue;
463		}
464		if (gpa >= ent->gpa + ent->size) {
465			continue;
466		}
467
468		*hva = ent->hva + (gpa - ent->gpa);
469		return 0;
470	}
471
472	errno = ENOENT;
473	return -1;
474}
475
476/*
477 * nvmm_assist_io(): architecture-specific.
478 */
479