vmm_dev.c revision 249435
1/*- 2 * Copyright (c) 2011 NetApp, Inc. 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 14 * THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND 15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17 * ARE DISCLAIMED. IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE 18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24 * SUCH DAMAGE. 25 * 26 * $FreeBSD: head/sys/amd64/vmm/vmm_dev.c 249435 2013-04-13 05:11:21Z neel $ 27 */ 28 29#include <sys/cdefs.h> 30__FBSDID("$FreeBSD: head/sys/amd64/vmm/vmm_dev.c 249435 2013-04-13 05:11:21Z neel $"); 31 32#include <sys/param.h> 33#include <sys/kernel.h> 34#include <sys/queue.h> 35#include <sys/lock.h> 36#include <sys/mutex.h> 37#include <sys/malloc.h> 38#include <sys/conf.h> 39#include <sys/sysctl.h> 40#include <sys/libkern.h> 41#include <sys/ioccom.h> 42#include <sys/mman.h> 43#include <sys/uio.h> 44 45#include <vm/vm.h> 46#include <vm/pmap.h> 47 48#include <machine/pmap.h> 49#include <machine/vmparam.h> 50 51#include <machine/vmm.h> 52#include "vmm_lapic.h" 53#include "vmm_stat.h" 54#include "vmm_mem.h" 55#include "io/ppt.h" 56#include <machine/vmm_dev.h> 57 58struct vmmdev_softc { 59 struct vm *vm; /* vm instance cookie */ 60 struct cdev *cdev; 61 SLIST_ENTRY(vmmdev_softc) link; 62}; 63static SLIST_HEAD(, vmmdev_softc) head; 64 65static struct mtx vmmdev_mtx; 66 67static MALLOC_DEFINE(M_VMMDEV, "vmmdev", "vmmdev"); 68 69SYSCTL_DECL(_hw_vmm); 70 71static struct vmmdev_softc * 72vmmdev_lookup(const char *name) 73{ 74 struct vmmdev_softc *sc; 75 76#ifdef notyet /* XXX kernel is not compiled with invariants */ 77 mtx_assert(&vmmdev_mtx, MA_OWNED); 78#endif 79 80 SLIST_FOREACH(sc, &head, link) { 81 if (strcmp(name, vm_name(sc->vm)) == 0) 82 break; 83 } 84 85 return (sc); 86} 87 88static struct vmmdev_softc * 89vmmdev_lookup2(struct cdev *cdev) 90{ 91 92 return (cdev->si_drv1); 93} 94 95static int 96vmmdev_rw(struct cdev *cdev, struct uio *uio, int flags) 97{ 98 int error, off, c; 99 vm_paddr_t hpa, gpa; 100 struct vmmdev_softc *sc; 101 102 static char zerobuf[PAGE_SIZE]; 103 104 error = 0; 105 mtx_lock(&vmmdev_mtx); 106 sc = vmmdev_lookup2(cdev); 107 if (sc == NULL) 108 error = ENXIO; 109 110 while (uio->uio_resid > 0 && error == 0) { 111 gpa = uio->uio_offset; 112 off = gpa & PAGE_MASK; 113 c = min(uio->uio_resid, PAGE_SIZE - off); 114 115 /* 116 * The VM has a hole in its physical memory map. If we want to 117 * use 'dd' to inspect memory beyond the hole we need to 118 * provide bogus data for memory that lies in the hole. 119 * 120 * Since this device does not support lseek(2), dd(1) will 121 * read(2) blocks of data to simulate the lseek(2). 122 */ 123 hpa = vm_gpa2hpa(sc->vm, gpa, c); 124 if (hpa == (vm_paddr_t)-1) { 125 if (uio->uio_rw == UIO_READ) 126 error = uiomove(zerobuf, c, uio); 127 else 128 error = EFAULT; 129 } else 130 error = uiomove((void *)PHYS_TO_DMAP(hpa), c, uio); 131 } 132 133 mtx_unlock(&vmmdev_mtx); 134 return (error); 135} 136 137static int 138vmmdev_ioctl(struct cdev *cdev, u_long cmd, caddr_t data, int fflag, 139 struct thread *td) 140{ 141 int error, vcpu, state_changed; 142 enum vcpu_state new_state; 143 struct vmmdev_softc *sc; 144 struct vm_memory_segment *seg; 145 struct vm_register *vmreg; 146 struct vm_seg_desc* vmsegdesc; 147 struct vm_run *vmrun; 148 struct vm_event *vmevent; 149 struct vm_lapic_irq *vmirq; 150 struct vm_capability *vmcap; 151 struct vm_pptdev *pptdev; 152 struct vm_pptdev_mmio *pptmmio; 153 struct vm_pptdev_msi *pptmsi; 154 struct vm_pptdev_msix *pptmsix; 155 struct vm_nmi *vmnmi; 156 struct vm_stats *vmstats; 157 struct vm_stat_desc *statdesc; 158 struct vm_x2apic *x2apic; 159 160 sc = vmmdev_lookup2(cdev); 161 if (sc == NULL) 162 return (ENXIO); 163 164 vcpu = -1; 165 state_changed = 0; 166 167 /* 168 * Some VMM ioctls can operate only on vcpus that are not running. 169 */ 170 switch (cmd) { 171 case VM_RUN: 172 case VM_GET_REGISTER: 173 case VM_SET_REGISTER: 174 case VM_GET_SEGMENT_DESCRIPTOR: 175 case VM_SET_SEGMENT_DESCRIPTOR: 176 case VM_INJECT_EVENT: 177 case VM_GET_CAPABILITY: 178 case VM_SET_CAPABILITY: 179 case VM_PPTDEV_MSI: 180 case VM_PPTDEV_MSIX: 181 case VM_SET_X2APIC_STATE: 182 /* 183 * XXX fragile, handle with care 184 * Assumes that the first field of the ioctl data is the vcpu. 185 */ 186 vcpu = *(int *)data; 187 if (vcpu < 0 || vcpu >= VM_MAXCPU) { 188 error = EINVAL; 189 goto done; 190 } 191 192 if (cmd == VM_RUN) 193 new_state = VCPU_RUNNING; 194 else 195 new_state = VCPU_CANNOT_RUN; 196 197 error = vcpu_set_state(sc->vm, vcpu, new_state); 198 if (error) 199 goto done; 200 201 state_changed = 1; 202 break; 203 204 case VM_MAP_PPTDEV_MMIO: 205 case VM_BIND_PPTDEV: 206 case VM_UNBIND_PPTDEV: 207 case VM_MAP_MEMORY: 208 /* 209 * ioctls that operate on the entire virtual machine must 210 * prevent all vcpus from running. 211 */ 212 error = 0; 213 for (vcpu = 0; vcpu < VM_MAXCPU; vcpu++) { 214 error = vcpu_set_state(sc->vm, vcpu, VCPU_CANNOT_RUN); 215 if (error) 216 break; 217 } 218 219 if (error) { 220 while (--vcpu >= 0) 221 vcpu_set_state(sc->vm, vcpu, VCPU_IDLE); 222 goto done; 223 } 224 225 state_changed = 2; 226 break; 227 228 default: 229 break; 230 } 231 232 switch(cmd) { 233 case VM_RUN: 234 vmrun = (struct vm_run *)data; 235 error = vm_run(sc->vm, vmrun); 236 break; 237 case VM_STAT_DESC: { 238 const char *desc; 239 statdesc = (struct vm_stat_desc *)data; 240 desc = vmm_stat_desc(statdesc->index); 241 if (desc != NULL) { 242 error = 0; 243 strlcpy(statdesc->desc, desc, sizeof(statdesc->desc)); 244 } else 245 error = EINVAL; 246 break; 247 } 248 case VM_STATS: { 249 CTASSERT(MAX_VM_STATS >= MAX_VMM_STAT_TYPES); 250 vmstats = (struct vm_stats *)data; 251 getmicrotime(&vmstats->tv); 252 error = vmm_stat_copy(sc->vm, vmstats->cpuid, 253 &vmstats->num_entries, vmstats->statbuf); 254 break; 255 } 256 case VM_PPTDEV_MSI: 257 pptmsi = (struct vm_pptdev_msi *)data; 258 error = ppt_setup_msi(sc->vm, pptmsi->vcpu, 259 pptmsi->bus, pptmsi->slot, pptmsi->func, 260 pptmsi->destcpu, pptmsi->vector, 261 pptmsi->numvec); 262 break; 263 case VM_PPTDEV_MSIX: 264 pptmsix = (struct vm_pptdev_msix *)data; 265 error = ppt_setup_msix(sc->vm, pptmsix->vcpu, 266 pptmsix->bus, pptmsix->slot, 267 pptmsix->func, pptmsix->idx, 268 pptmsix->msg, pptmsix->vector_control, 269 pptmsix->addr); 270 break; 271 case VM_MAP_PPTDEV_MMIO: 272 pptmmio = (struct vm_pptdev_mmio *)data; 273 error = ppt_map_mmio(sc->vm, pptmmio->bus, pptmmio->slot, 274 pptmmio->func, pptmmio->gpa, pptmmio->len, 275 pptmmio->hpa); 276 break; 277 case VM_BIND_PPTDEV: 278 pptdev = (struct vm_pptdev *)data; 279 error = ppt_assign_device(sc->vm, pptdev->bus, pptdev->slot, 280 pptdev->func); 281 break; 282 case VM_UNBIND_PPTDEV: 283 pptdev = (struct vm_pptdev *)data; 284 error = ppt_unassign_device(sc->vm, pptdev->bus, pptdev->slot, 285 pptdev->func); 286 break; 287 case VM_INJECT_EVENT: 288 vmevent = (struct vm_event *)data; 289 error = vm_inject_event(sc->vm, vmevent->cpuid, vmevent->type, 290 vmevent->vector, 291 vmevent->error_code, 292 vmevent->error_code_valid); 293 break; 294 case VM_INJECT_NMI: 295 vmnmi = (struct vm_nmi *)data; 296 error = vm_inject_nmi(sc->vm, vmnmi->cpuid); 297 break; 298 case VM_LAPIC_IRQ: 299 vmirq = (struct vm_lapic_irq *)data; 300 error = lapic_set_intr(sc->vm, vmirq->cpuid, vmirq->vector); 301 break; 302 case VM_MAP_MEMORY: 303 seg = (struct vm_memory_segment *)data; 304 error = vm_malloc(sc->vm, seg->gpa, seg->len); 305 break; 306 case VM_GET_MEMORY_SEG: 307 seg = (struct vm_memory_segment *)data; 308 seg->len = 0; 309 (void)vm_gpabase2memseg(sc->vm, seg->gpa, seg); 310 error = 0; 311 break; 312 case VM_GET_REGISTER: 313 vmreg = (struct vm_register *)data; 314 error = vm_get_register(sc->vm, vmreg->cpuid, vmreg->regnum, 315 &vmreg->regval); 316 break; 317 case VM_SET_REGISTER: 318 vmreg = (struct vm_register *)data; 319 error = vm_set_register(sc->vm, vmreg->cpuid, vmreg->regnum, 320 vmreg->regval); 321 break; 322 case VM_SET_SEGMENT_DESCRIPTOR: 323 vmsegdesc = (struct vm_seg_desc *)data; 324 error = vm_set_seg_desc(sc->vm, vmsegdesc->cpuid, 325 vmsegdesc->regnum, 326 &vmsegdesc->desc); 327 break; 328 case VM_GET_SEGMENT_DESCRIPTOR: 329 vmsegdesc = (struct vm_seg_desc *)data; 330 error = vm_get_seg_desc(sc->vm, vmsegdesc->cpuid, 331 vmsegdesc->regnum, 332 &vmsegdesc->desc); 333 break; 334 case VM_GET_CAPABILITY: 335 vmcap = (struct vm_capability *)data; 336 error = vm_get_capability(sc->vm, vmcap->cpuid, 337 vmcap->captype, 338 &vmcap->capval); 339 break; 340 case VM_SET_CAPABILITY: 341 vmcap = (struct vm_capability *)data; 342 error = vm_set_capability(sc->vm, vmcap->cpuid, 343 vmcap->captype, 344 vmcap->capval); 345 break; 346 case VM_SET_X2APIC_STATE: 347 x2apic = (struct vm_x2apic *)data; 348 error = vm_set_x2apic_state(sc->vm, 349 x2apic->cpuid, x2apic->state); 350 break; 351 case VM_GET_X2APIC_STATE: 352 x2apic = (struct vm_x2apic *)data; 353 error = vm_get_x2apic_state(sc->vm, 354 x2apic->cpuid, &x2apic->state); 355 break; 356 default: 357 error = ENOTTY; 358 break; 359 } 360 361 if (state_changed == 1) { 362 vcpu_set_state(sc->vm, vcpu, VCPU_IDLE); 363 } else if (state_changed == 2) { 364 for (vcpu = 0; vcpu < VM_MAXCPU; vcpu++) 365 vcpu_set_state(sc->vm, vcpu, VCPU_IDLE); 366 } 367 368done: 369 return (error); 370} 371 372static int 373vmmdev_mmap(struct cdev *cdev, vm_ooffset_t offset, vm_paddr_t *paddr, 374 int nprot, vm_memattr_t *memattr) 375{ 376 int error; 377 struct vmmdev_softc *sc; 378 379 error = -1; 380 mtx_lock(&vmmdev_mtx); 381 382 sc = vmmdev_lookup2(cdev); 383 if (sc != NULL && (nprot & PROT_EXEC) == 0) { 384 *paddr = vm_gpa2hpa(sc->vm, (vm_paddr_t)offset, PAGE_SIZE); 385 if (*paddr != (vm_paddr_t)-1) 386 error = 0; 387 } 388 389 mtx_unlock(&vmmdev_mtx); 390 391 return (error); 392} 393 394static void 395vmmdev_destroy(struct vmmdev_softc *sc, boolean_t unlink) 396{ 397 398 /* 399 * XXX must stop virtual machine instances that may be still 400 * running and cleanup their state. 401 */ 402 if (sc->cdev) 403 destroy_dev(sc->cdev); 404 405 if (sc->vm) 406 vm_destroy(sc->vm); 407 408 if (unlink) { 409 mtx_lock(&vmmdev_mtx); 410 SLIST_REMOVE(&head, sc, vmmdev_softc, link); 411 mtx_unlock(&vmmdev_mtx); 412 } 413 414 free(sc, M_VMMDEV); 415} 416 417static int 418sysctl_vmm_destroy(SYSCTL_HANDLER_ARGS) 419{ 420 int error; 421 char buf[VM_MAX_NAMELEN]; 422 struct vmmdev_softc *sc; 423 424 strlcpy(buf, "beavis", sizeof(buf)); 425 error = sysctl_handle_string(oidp, buf, sizeof(buf), req); 426 if (error != 0 || req->newptr == NULL) 427 return (error); 428 429 /* 430 * XXX TODO if any process has this device open then fail 431 */ 432 433 mtx_lock(&vmmdev_mtx); 434 sc = vmmdev_lookup(buf); 435 if (sc == NULL) { 436 mtx_unlock(&vmmdev_mtx); 437 return (EINVAL); 438 } 439 440 sc->cdev->si_drv1 = NULL; 441 mtx_unlock(&vmmdev_mtx); 442 443 vmmdev_destroy(sc, TRUE); 444 445 return (0); 446} 447SYSCTL_PROC(_hw_vmm, OID_AUTO, destroy, CTLTYPE_STRING | CTLFLAG_RW, 448 NULL, 0, sysctl_vmm_destroy, "A", NULL); 449 450static struct cdevsw vmmdevsw = { 451 .d_name = "vmmdev", 452 .d_version = D_VERSION, 453 .d_ioctl = vmmdev_ioctl, 454 .d_mmap = vmmdev_mmap, 455 .d_read = vmmdev_rw, 456 .d_write = vmmdev_rw, 457}; 458 459static int 460sysctl_vmm_create(SYSCTL_HANDLER_ARGS) 461{ 462 int error; 463 struct vm *vm; 464 struct vmmdev_softc *sc, *sc2; 465 char buf[VM_MAX_NAMELEN]; 466 467 strlcpy(buf, "beavis", sizeof(buf)); 468 error = sysctl_handle_string(oidp, buf, sizeof(buf), req); 469 if (error != 0 || req->newptr == NULL) 470 return (error); 471 472 mtx_lock(&vmmdev_mtx); 473 sc = vmmdev_lookup(buf); 474 mtx_unlock(&vmmdev_mtx); 475 if (sc != NULL) 476 return (EEXIST); 477 478 error = vm_create(buf, &vm); 479 if (error != 0) 480 return (error); 481 482 sc = malloc(sizeof(struct vmmdev_softc), M_VMMDEV, M_WAITOK | M_ZERO); 483 sc->vm = vm; 484 485 /* 486 * Lookup the name again just in case somebody sneaked in when we 487 * dropped the lock. 488 */ 489 mtx_lock(&vmmdev_mtx); 490 sc2 = vmmdev_lookup(buf); 491 if (sc2 == NULL) 492 SLIST_INSERT_HEAD(&head, sc, link); 493 mtx_unlock(&vmmdev_mtx); 494 495 if (sc2 != NULL) { 496 vmmdev_destroy(sc, FALSE); 497 return (EEXIST); 498 } 499 500 error = make_dev_p(MAKEDEV_CHECKNAME, &sc->cdev, &vmmdevsw, NULL, 501 UID_ROOT, GID_WHEEL, 0600, "vmm/%s", buf); 502 if (error != 0) { 503 vmmdev_destroy(sc, TRUE); 504 return (error); 505 } 506 sc->cdev->si_drv1 = sc; 507 508 return (0); 509} 510SYSCTL_PROC(_hw_vmm, OID_AUTO, create, CTLTYPE_STRING | CTLFLAG_RW, 511 NULL, 0, sysctl_vmm_create, "A", NULL); 512 513void 514vmmdev_init(void) 515{ 516 mtx_init(&vmmdev_mtx, "vmm device mutex", NULL, MTX_DEF); 517} 518 519int 520vmmdev_cleanup(void) 521{ 522 int error; 523 524 if (SLIST_EMPTY(&head)) 525 error = 0; 526 else 527 error = EBUSY; 528 529 return (error); 530} 531