libnvmm.c revision 1.2
1/*	$NetBSD: libnvmm.c,v 1.2 2018/11/19 21:45:37 maxv Exp $	*/
2
3/*
4 * Copyright (c) 2018 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Maxime Villard.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 *    notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 *    notice, this list of conditions and the following disclaimer in the
17 *    documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 */
31
32#include <sys/cdefs.h>
33
34#include <stdio.h>
35#include <stdlib.h>
36#include <string.h>
37#include <unistd.h>
38#include <fcntl.h>
39#include <errno.h>
40#include <sys/ioctl.h>
41#include <sys/mman.h>
42
43#include "nvmm.h"
44
45static int nvmm_fd = -1;
46static size_t nvmm_page_size = 0;
47
48/* -------------------------------------------------------------------------- */
49
50static int
51_nvmm_area_add(struct nvmm_machine *mach, gpaddr_t gpa, uintptr_t hva,
52    size_t size)
53{
54	struct nvmm_area *area;
55	void *ptr;
56	size_t i;
57
58	for (i = 0; i < mach->nareas; i++) {
59		if (gpa >= mach->areas[i].gpa &&
60		    gpa < mach->areas[i].gpa + mach->areas[i].size) {
61			goto error;
62		}
63		if (gpa + size > mach->areas[i].gpa &&
64		    gpa + size <= mach->areas[i].gpa + mach->areas[i].size) {
65			goto error;
66		}
67		if (gpa < mach->areas[i].gpa &&
68		    gpa + size >= mach->areas[i].gpa + mach->areas[i].size) {
69			goto error;
70		}
71	}
72
73	ptr = realloc(mach->areas, (mach->nareas + 1) *
74	    sizeof(struct nvmm_area));
75	if (ptr == NULL)
76		return -1;
77	mach->areas = ptr;
78
79	area = &mach->areas[mach->nareas++];
80	area->gpa = gpa;
81	area->hva = hva;
82	area->size = size;
83
84	return 0;
85
86error:
87	errno = EEXIST;
88	return -1;
89}
90
91static int
92_nvmm_area_delete(struct nvmm_machine *mach, gpaddr_t gpa, uintptr_t hva,
93    size_t size)
94{
95	size_t i;
96
97	for (i = 0; i < mach->nareas; i++) {
98		if (gpa == mach->areas[i].gpa &&
99		    hva == mach->areas[i].hva &&
100		    size == mach->areas[i].size) {
101			break;
102		}
103	}
104	if (i == mach->nareas) {
105		errno = ENOENT;
106		return -1;
107	}
108
109	memmove(&mach->areas[i], &mach->areas[i+1],
110	    (mach->nareas - i - 1) * sizeof(struct nvmm_area));
111	mach->nareas--;
112
113	return 0;
114}
115
116/* -------------------------------------------------------------------------- */
117
118static int
119nvmm_init(void)
120{
121	if (nvmm_fd != -1)
122		return 0;
123	nvmm_fd = open("/dev/nvmm", O_RDWR);
124	if (nvmm_fd == -1)
125		return -1;
126	nvmm_page_size = sysconf(_SC_PAGESIZE);
127	return 0;
128}
129
130int
131nvmm_capability(struct nvmm_capability *cap)
132{
133	struct nvmm_ioc_capability args;
134	int ret;
135
136	if (nvmm_init() == -1) {
137		return -1;
138	}
139
140	ret = ioctl(nvmm_fd, NVMM_IOC_CAPABILITY, &args);
141	if (ret == -1)
142		return -1;
143
144	memcpy(cap, &args.cap, sizeof(args.cap));
145
146	return 0;
147}
148
149int
150nvmm_machine_create(struct nvmm_machine *mach)
151{
152	struct nvmm_ioc_machine_create args;
153	int ret;
154
155	if (nvmm_init() == -1) {
156		return -1;
157	}
158
159	ret = ioctl(nvmm_fd, NVMM_IOC_MACHINE_CREATE, &args);
160	if (ret == -1)
161		return -1;
162
163	memset(mach, 0, sizeof(*mach));
164	mach->machid = args.machid;
165
166	return 0;
167}
168
169int
170nvmm_machine_destroy(struct nvmm_machine *mach)
171{
172	struct nvmm_ioc_machine_destroy args;
173	int ret;
174
175	if (nvmm_init() == -1) {
176		return -1;
177	}
178
179	args.machid = mach->machid;
180
181	ret = ioctl(nvmm_fd, NVMM_IOC_MACHINE_DESTROY, &args);
182	if (ret == -1)
183		return -1;
184
185	free(mach->areas);
186
187	return 0;
188}
189
190int
191nvmm_machine_configure(struct nvmm_machine *mach, uint64_t op, void *conf)
192{
193	struct nvmm_ioc_machine_configure args;
194	int ret;
195
196	if (nvmm_init() == -1) {
197		return -1;
198	}
199
200	args.machid = mach->machid;
201	args.op = op;
202	args.conf = conf;
203
204	ret = ioctl(nvmm_fd, NVMM_IOC_MACHINE_CONFIGURE, &args);
205	if (ret == -1)
206		return -1;
207
208	return 0;
209}
210
211int
212nvmm_vcpu_create(struct nvmm_machine *mach, nvmm_cpuid_t cpuid)
213{
214	struct nvmm_ioc_vcpu_create args;
215	int ret;
216
217	if (nvmm_init() == -1) {
218		return -1;
219	}
220
221	args.machid = mach->machid;
222	args.cpuid = cpuid;
223
224	ret = ioctl(nvmm_fd, NVMM_IOC_VCPU_CREATE, &args);
225	if (ret == -1)
226		return -1;
227
228	return 0;
229}
230
231int
232nvmm_vcpu_destroy(struct nvmm_machine *mach, nvmm_cpuid_t cpuid)
233{
234	struct nvmm_ioc_vcpu_destroy args;
235	int ret;
236
237	if (nvmm_init() == -1) {
238		return -1;
239	}
240
241	args.machid = mach->machid;
242	args.cpuid = cpuid;
243
244	ret = ioctl(nvmm_fd, NVMM_IOC_VCPU_DESTROY, &args);
245	if (ret == -1)
246		return -1;
247
248	return 0;
249}
250
251int
252nvmm_vcpu_setstate(struct nvmm_machine *mach, nvmm_cpuid_t cpuid,
253    void *state, uint64_t flags)
254{
255	struct nvmm_ioc_vcpu_setstate args;
256	int ret;
257
258	if (nvmm_init() == -1) {
259		return -1;
260	}
261
262	args.machid = mach->machid;
263	args.cpuid = cpuid;
264	args.state = state;
265	args.flags = flags;
266
267	ret = ioctl(nvmm_fd, NVMM_IOC_VCPU_SETSTATE, &args);
268	if (ret == -1)
269		return -1;
270
271	return 0;
272}
273
274int
275nvmm_vcpu_getstate(struct nvmm_machine *mach, nvmm_cpuid_t cpuid,
276    void *state, uint64_t flags)
277{
278	struct nvmm_ioc_vcpu_getstate args;
279	int ret;
280
281	if (nvmm_init() == -1) {
282		return -1;
283	}
284
285	args.machid = mach->machid;
286	args.cpuid = cpuid;
287	args.state = state;
288	args.flags = flags;
289
290	ret = ioctl(nvmm_fd, NVMM_IOC_VCPU_GETSTATE, &args);
291	if (ret == -1)
292		return -1;
293
294	return 0;
295}
296
297int
298nvmm_vcpu_inject(struct nvmm_machine *mach, nvmm_cpuid_t cpuid,
299    struct nvmm_event *event)
300{
301	struct nvmm_ioc_vcpu_inject args;
302	int ret;
303
304	if (nvmm_init() == -1) {
305		return -1;
306	}
307
308	args.machid = mach->machid;
309	args.cpuid = cpuid;
310	memcpy(&args.event, event, sizeof(args.event));
311
312	ret = ioctl(nvmm_fd, NVMM_IOC_VCPU_INJECT, &args);
313	if (ret == -1)
314		return -1;
315
316	return 0;
317}
318
319int
320nvmm_vcpu_run(struct nvmm_machine *mach, nvmm_cpuid_t cpuid,
321    struct nvmm_exit *exit)
322{
323	struct nvmm_ioc_vcpu_run args;
324	int ret;
325
326	if (nvmm_init() == -1) {
327		return -1;
328	}
329
330	args.machid = mach->machid;
331	args.cpuid = cpuid;
332	memset(&args.exit, 0, sizeof(args.exit));
333
334	ret = ioctl(nvmm_fd, NVMM_IOC_VCPU_RUN, &args);
335	if (ret == -1)
336		return -1;
337
338	memcpy(exit, &args.exit, sizeof(args.exit));
339
340	return 0;
341}
342
343int
344nvmm_gpa_map(struct nvmm_machine *mach, uintptr_t hva, gpaddr_t gpa,
345    size_t size, int flags)
346{
347	struct nvmm_ioc_gpa_map args;
348	int ret;
349
350	if (nvmm_init() == -1) {
351		return -1;
352	}
353
354	args.machid = mach->machid;
355	args.hva = hva;
356	args.gpa = gpa;
357	args.size = size;
358	args.flags = flags;
359
360	ret = ioctl(nvmm_fd, NVMM_IOC_GPA_MAP, &args);
361	if (ret == -1)
362		return -1;
363
364	ret = _nvmm_area_add(mach, gpa, hva, size);
365	if (ret == -1) {
366		nvmm_gpa_unmap(mach, hva, gpa, size);
367		return -1;
368	}
369
370	return 0;
371}
372
373int
374nvmm_gpa_unmap(struct nvmm_machine *mach, uintptr_t hva, gpaddr_t gpa,
375    size_t size)
376{
377	struct nvmm_ioc_gpa_unmap args;
378	int ret;
379
380	if (nvmm_init() == -1) {
381		return -1;
382	}
383
384	ret = _nvmm_area_delete(mach, gpa, hva, size);
385	if (ret == -1)
386		return -1;
387
388	args.machid = mach->machid;
389	args.gpa = gpa;
390	args.size = size;
391
392	ret = ioctl(nvmm_fd, NVMM_IOC_GPA_UNMAP, &args);
393	if (ret == -1)
394		return -1;
395
396	ret = munmap((void *)hva, size);
397
398	return ret;
399}
400
401/*
402 * nvmm_gva_to_gpa(): architecture-specific.
403 */
404
405int
406nvmm_gpa_to_hva(struct nvmm_machine *mach, gpaddr_t gpa, uintptr_t *hva)
407{
408	size_t i;
409
410	if (gpa % nvmm_page_size != 0) {
411		errno = EINVAL;
412		return -1;
413	}
414
415	for (i = 0; i < mach->nareas; i++) {
416		if (gpa < mach->areas[i].gpa) {
417			continue;
418		}
419		if (gpa >= mach->areas[i].gpa + mach->areas[i].size) {
420			continue;
421		}
422
423		*hva = mach->areas[i].hva + (gpa - mach->areas[i].gpa);
424		return 0;
425	}
426
427	errno = ENOENT;
428	return -1;
429}
430
431/*
432 * nvmm_assist_io(): architecture-specific.
433 */
434