1/*- 2 * Copyright (c) 2011 NetApp, Inc. 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 14 * THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND 15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17 * ARE DISCLAIMED. IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE 18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24 * SUCH DAMAGE. 25 * 26 * $FreeBSD$ 27 */ 28 29#include <sys/cdefs.h> 30__FBSDID("$FreeBSD$"); 31 32#include <sys/param.h> 33#include <sys/kernel.h> 34#include <sys/queue.h> 35#include <sys/lock.h> 36#include <sys/mutex.h> 37#include <sys/malloc.h> 38#include <sys/conf.h> 39#include <sys/sysctl.h> 40#include <sys/libkern.h> 41#include <sys/ioccom.h> 42#include <sys/mman.h> 43#include <sys/uio.h> 44 45#include <vm/vm.h> 46#include <vm/pmap.h> 47 48#include <machine/pmap.h> 49#include <machine/vmparam.h> 50 51#include <machine/vmm.h> 52#include "vmm_lapic.h" 53#include "vmm_stat.h" 54#include "vmm_mem.h" 55#include "io/ppt.h" 56#include <machine/vmm_dev.h> 57 58struct vmmdev_softc { 59 struct vm *vm; /* vm instance cookie */ 60 struct cdev *cdev; 61 SLIST_ENTRY(vmmdev_softc) link; 62}; 63static SLIST_HEAD(, vmmdev_softc) head; 64 65static struct mtx vmmdev_mtx; 66 67static MALLOC_DEFINE(M_VMMDEV, "vmmdev", "vmmdev"); 68 69SYSCTL_DECL(_hw_vmm); 70 71static struct vmmdev_softc * 72vmmdev_lookup(const char *name) 73{ 74 struct vmmdev_softc *sc; 75 76#ifdef notyet /* XXX kernel is not compiled with invariants */ 77 mtx_assert(&vmmdev_mtx, MA_OWNED); 78#endif 79 80 SLIST_FOREACH(sc, &head, link) { 81 if (strcmp(name, vm_name(sc->vm)) == 0) 82 break; 83 } 84 85 return (sc); 86} 87 88static struct vmmdev_softc * 89vmmdev_lookup2(struct cdev *cdev) 90{ 91 struct vmmdev_softc *sc; 92 93#ifdef notyet /* XXX kernel is not compiled with invariants */ 94 mtx_assert(&vmmdev_mtx, MA_OWNED); 95#endif 96 97 SLIST_FOREACH(sc, &head, link) { 98 if (sc->cdev == cdev) 99 break; 100 } 101 102 return (sc); 103} 104 105static int 106vmmdev_rw(struct cdev *cdev, struct uio *uio, int flags) 107{ 108 int error, off, c; 109 vm_paddr_t hpa, gpa; 110 struct vmmdev_softc *sc; 111 112 static char zerobuf[PAGE_SIZE]; 113 114 error = 0; 115 mtx_lock(&vmmdev_mtx); 116 sc = vmmdev_lookup2(cdev); 117 118 while (uio->uio_resid > 0 && error == 0) { 119 gpa = uio->uio_offset; 120 off = gpa & PAGE_MASK; 121 c = min(uio->uio_resid, PAGE_SIZE - off); 122 123 /* 124 * The VM has a hole in its physical memory map. If we want to 125 * use 'dd' to inspect memory beyond the hole we need to 126 * provide bogus data for memory that lies in the hole. 127 * 128 * Since this device does not support lseek(2), dd(1) will 129 * read(2) blocks of data to simulate the lseek(2). 130 */ 131 hpa = vm_gpa2hpa(sc->vm, gpa, c); 132 if (hpa == (vm_paddr_t)-1) { 133 if (uio->uio_rw == UIO_READ) 134 error = uiomove(zerobuf, c, uio); 135 else 136 error = EFAULT; 137 } else 138 error = uiomove((void *)PHYS_TO_DMAP(hpa), c, uio); 139 } 140 141 mtx_unlock(&vmmdev_mtx); 142 return (error); 143} 144 145static int 146vmmdev_ioctl(struct cdev *cdev, u_long cmd, caddr_t data, int fflag, 147 struct thread *td) 148{ 149 int error, vcpu; 150 struct vmmdev_softc *sc; 151 struct vm_memory_segment *seg; 152 struct vm_register *vmreg; 153 struct vm_seg_desc* vmsegdesc; 154 struct vm_pin *vmpin; 155 struct vm_run *vmrun; 156 struct vm_event *vmevent; 157 struct vm_lapic_irq *vmirq; 158 struct vm_capability *vmcap; 159 struct vm_pptdev *pptdev; 160 struct vm_pptdev_mmio *pptmmio; 161 struct vm_pptdev_msi *pptmsi; 162 struct vm_pptdev_msix *pptmsix; 163 struct vm_nmi *vmnmi; 164 struct vm_stats *vmstats; 165 struct vm_stat_desc *statdesc;
|
166 struct vm_x2apic *x2apic; |
167 168 mtx_lock(&vmmdev_mtx); 169 sc = vmmdev_lookup2(cdev); 170 if (sc == NULL) { 171 mtx_unlock(&vmmdev_mtx); 172 return (ENXIO); 173 } 174 175 /* 176 * Some VMM ioctls can operate only on vcpus that are not running. 177 */ 178 switch (cmd) { 179 case VM_RUN: 180 case VM_SET_PINNING: 181 case VM_GET_REGISTER: 182 case VM_SET_REGISTER: 183 case VM_GET_SEGMENT_DESCRIPTOR: 184 case VM_SET_SEGMENT_DESCRIPTOR: 185 case VM_INJECT_EVENT: 186 case VM_GET_CAPABILITY: 187 case VM_SET_CAPABILITY: 188 case VM_PPTDEV_MSI:
|
189 case VM_SET_X2APIC_STATE: |
190 /* 191 * XXX fragile, handle with care 192 * Assumes that the first field of the ioctl data is the vcpu. 193 */ 194 vcpu = *(int *)data; 195 if (vcpu < 0 || vcpu >= VM_MAXCPU) { 196 error = EINVAL; 197 goto done; 198 } 199 200 if (vcpu_is_running(sc->vm, vcpu, NULL)) { 201 error = EBUSY; 202 goto done; 203 } 204 break; 205 default: 206 break; 207 } 208 209 switch(cmd) { 210 case VM_RUN: 211 vmrun = (struct vm_run *)data; 212 213 vm_set_run_state(sc->vm, vmrun->cpuid, VCPU_RUNNING); 214 mtx_unlock(&vmmdev_mtx); 215 216 error = vm_run(sc->vm, vmrun); 217 218 mtx_lock(&vmmdev_mtx); 219 vm_set_run_state(sc->vm, vmrun->cpuid, VCPU_STOPPED); 220 break; 221 case VM_STAT_DESC: { 222 const char *desc; 223 statdesc = (struct vm_stat_desc *)data; 224 desc = vmm_stat_desc(statdesc->index); 225 if (desc != NULL) { 226 error = 0; 227 strlcpy(statdesc->desc, desc, sizeof(statdesc->desc)); 228 } else 229 error = EINVAL; 230 break; 231 } 232 case VM_STATS: { 233 CTASSERT(MAX_VM_STATS >= MAX_VMM_STAT_TYPES); 234 vmstats = (struct vm_stats *)data; 235 getmicrotime(&vmstats->tv); 236 error = vmm_stat_copy(sc->vm, vmstats->cpuid, 237 &vmstats->num_entries, vmstats->statbuf); 238 break; 239 } 240 case VM_PPTDEV_MSI: 241 pptmsi = (struct vm_pptdev_msi *)data; 242 error = ppt_setup_msi(sc->vm, pptmsi->vcpu, 243 pptmsi->bus, pptmsi->slot, pptmsi->func, 244 pptmsi->destcpu, pptmsi->vector, 245 pptmsi->numvec); 246 break; 247 case VM_PPTDEV_MSIX: 248 pptmsix = (struct vm_pptdev_msix *)data; 249 error = ppt_setup_msix(sc->vm, pptmsix->vcpu, 250 pptmsix->bus, pptmsix->slot, 251 pptmsix->func, pptmsix->idx, 252 pptmsix->msg, pptmsix->vector_control, 253 pptmsix->addr); 254 break; 255 case VM_MAP_PPTDEV_MMIO: 256 pptmmio = (struct vm_pptdev_mmio *)data; 257 error = ppt_map_mmio(sc->vm, pptmmio->bus, pptmmio->slot, 258 pptmmio->func, pptmmio->gpa, pptmmio->len, 259 pptmmio->hpa); 260 break; 261 case VM_BIND_PPTDEV: 262 pptdev = (struct vm_pptdev *)data; 263 error = ppt_assign_device(sc->vm, pptdev->bus, pptdev->slot, 264 pptdev->func); 265 break; 266 case VM_UNBIND_PPTDEV: 267 pptdev = (struct vm_pptdev *)data; 268 error = ppt_unassign_device(sc->vm, pptdev->bus, pptdev->slot, 269 pptdev->func); 270 break; 271 case VM_INJECT_EVENT: 272 vmevent = (struct vm_event *)data; 273 error = vm_inject_event(sc->vm, vmevent->cpuid, vmevent->type, 274 vmevent->vector, 275 vmevent->error_code, 276 vmevent->error_code_valid); 277 break; 278 case VM_INJECT_NMI: 279 vmnmi = (struct vm_nmi *)data; 280 error = vm_inject_nmi(sc->vm, vmnmi->cpuid); 281 break; 282 case VM_LAPIC_IRQ: 283 vmirq = (struct vm_lapic_irq *)data; 284 error = lapic_set_intr(sc->vm, vmirq->cpuid, vmirq->vector); 285 break; 286 case VM_SET_PINNING: 287 vmpin = (struct vm_pin *)data; 288 error = vm_set_pinning(sc->vm, vmpin->vm_cpuid, 289 vmpin->host_cpuid); 290 break; 291 case VM_GET_PINNING: 292 vmpin = (struct vm_pin *)data; 293 error = vm_get_pinning(sc->vm, vmpin->vm_cpuid, 294 &vmpin->host_cpuid); 295 break; 296 case VM_MAP_MEMORY: 297 seg = (struct vm_memory_segment *)data; 298 error = vm_malloc(sc->vm, seg->gpa, seg->len, &seg->hpa); 299 break; 300 case VM_GET_MEMORY_SEG: 301 seg = (struct vm_memory_segment *)data; 302 seg->hpa = seg->len = 0; 303 (void)vm_gpabase2memseg(sc->vm, seg->gpa, seg); 304 error = 0; 305 break; 306 case VM_GET_REGISTER: 307 vmreg = (struct vm_register *)data; 308 error = vm_get_register(sc->vm, vmreg->cpuid, vmreg->regnum, 309 &vmreg->regval); 310 break; 311 case VM_SET_REGISTER: 312 vmreg = (struct vm_register *)data; 313 error = vm_set_register(sc->vm, vmreg->cpuid, vmreg->regnum, 314 vmreg->regval); 315 break; 316 case VM_SET_SEGMENT_DESCRIPTOR: 317 vmsegdesc = (struct vm_seg_desc *)data; 318 error = vm_set_seg_desc(sc->vm, vmsegdesc->cpuid, 319 vmsegdesc->regnum, 320 &vmsegdesc->desc); 321 break; 322 case VM_GET_SEGMENT_DESCRIPTOR: 323 vmsegdesc = (struct vm_seg_desc *)data; 324 error = vm_get_seg_desc(sc->vm, vmsegdesc->cpuid, 325 vmsegdesc->regnum, 326 &vmsegdesc->desc); 327 break; 328 case VM_GET_CAPABILITY: 329 vmcap = (struct vm_capability *)data; 330 error = vm_get_capability(sc->vm, vmcap->cpuid, 331 vmcap->captype, 332 &vmcap->capval); 333 break; 334 case VM_SET_CAPABILITY: 335 vmcap = (struct vm_capability *)data; 336 error = vm_set_capability(sc->vm, vmcap->cpuid, 337 vmcap->captype, 338 vmcap->capval); 339 break;
|
340 case VM_SET_X2APIC_STATE: 341 x2apic = (struct vm_x2apic *)data; 342 error = vm_set_x2apic_state(sc->vm, 343 x2apic->cpuid, x2apic->state); 344 break; 345 case VM_GET_X2APIC_STATE: 346 x2apic = (struct vm_x2apic *)data; 347 error = vm_get_x2apic_state(sc->vm, 348 x2apic->cpuid, &x2apic->state); 349 break; |
350 default: 351 error = ENOTTY; 352 break; 353 } 354done: 355 mtx_unlock(&vmmdev_mtx); 356 357 return (error); 358} 359 360static int 361vmmdev_mmap(struct cdev *cdev, vm_ooffset_t offset, vm_paddr_t *paddr, 362 int nprot, vm_memattr_t *memattr) 363{ 364 int error; 365 struct vmmdev_softc *sc; 366 367 error = -1; 368 mtx_lock(&vmmdev_mtx); 369 370 sc = vmmdev_lookup2(cdev); 371 if (sc != NULL && (nprot & PROT_EXEC) == 0) { 372 *paddr = vm_gpa2hpa(sc->vm, (vm_paddr_t)offset, PAGE_SIZE); 373 if (*paddr != (vm_paddr_t)-1) 374 error = 0; 375 } 376 377 mtx_unlock(&vmmdev_mtx); 378 379 return (error); 380} 381 382static void 383vmmdev_destroy(struct vmmdev_softc *sc) 384{ 385 386#ifdef notyet /* XXX kernel is not compiled with invariants */ 387 mtx_assert(&vmmdev_mtx, MA_OWNED); 388#endif 389 390 /* 391 * XXX must stop virtual machine instances that may be still 392 * running and cleanup their state. 393 */ 394 SLIST_REMOVE(&head, sc, vmmdev_softc, link); 395 destroy_dev(sc->cdev); 396 vm_destroy(sc->vm); 397 free(sc, M_VMMDEV); 398} 399 400static int 401sysctl_vmm_destroy(SYSCTL_HANDLER_ARGS) 402{ 403 int error; 404 char buf[VM_MAX_NAMELEN]; 405 struct vmmdev_softc *sc; 406 407 strlcpy(buf, "beavis", sizeof(buf)); 408 error = sysctl_handle_string(oidp, buf, sizeof(buf), req); 409 if (error != 0 || req->newptr == NULL) 410 return (error); 411 412 mtx_lock(&vmmdev_mtx); 413 sc = vmmdev_lookup(buf); 414 if (sc == NULL) { 415 mtx_unlock(&vmmdev_mtx); 416 return (EINVAL); 417 } 418 vmmdev_destroy(sc); 419 mtx_unlock(&vmmdev_mtx); 420 return (0); 421} 422SYSCTL_PROC(_hw_vmm, OID_AUTO, destroy, CTLTYPE_STRING | CTLFLAG_RW, 423 NULL, 0, sysctl_vmm_destroy, "A", NULL); 424 425static struct cdevsw vmmdevsw = { 426 .d_name = "vmmdev", 427 .d_version = D_VERSION, 428 .d_ioctl = vmmdev_ioctl, 429 .d_mmap = vmmdev_mmap, 430 .d_read = vmmdev_rw, 431 .d_write = vmmdev_rw, 432}; 433 434static int 435sysctl_vmm_create(SYSCTL_HANDLER_ARGS) 436{ 437 int error; 438 struct vm *vm; 439 struct vmmdev_softc *sc; 440 char buf[VM_MAX_NAMELEN]; 441 442 strlcpy(buf, "beavis", sizeof(buf)); 443 error = sysctl_handle_string(oidp, buf, sizeof(buf), req); 444 if (error != 0 || req->newptr == NULL) 445 return (error); 446 447 mtx_lock(&vmmdev_mtx); 448 449 sc = vmmdev_lookup(buf); 450 if (sc != NULL) { 451 mtx_unlock(&vmmdev_mtx); 452 return (EEXIST); 453 } 454 455 vm = vm_create(buf); 456 if (vm == NULL) { 457 mtx_unlock(&vmmdev_mtx); 458 return (EINVAL); 459 } 460 461 sc = malloc(sizeof(struct vmmdev_softc), M_VMMDEV, M_WAITOK | M_ZERO); 462 sc->vm = vm; 463 sc->cdev = make_dev(&vmmdevsw, 0, UID_ROOT, GID_WHEEL, 0600, 464 "vmm/%s", buf); 465 sc->cdev->si_drv1 = sc; 466 SLIST_INSERT_HEAD(&head, sc, link); 467 468 mtx_unlock(&vmmdev_mtx); 469 return (0); 470} 471SYSCTL_PROC(_hw_vmm, OID_AUTO, create, CTLTYPE_STRING | CTLFLAG_RW, 472 NULL, 0, sysctl_vmm_create, "A", NULL); 473 474static int 475sysctl_vmm_mem_total(SYSCTL_HANDLER_ARGS) 476{ 477 size_t val = vmm_mem_get_mem_total(); 478 return sysctl_handle_long(oidp, &val, 0, req); 479} 480SYSCTL_PROC(_hw_vmm, OID_AUTO, mem_total, CTLTYPE_LONG | CTLFLAG_RD, 481 0, 0, sysctl_vmm_mem_total, "LU", "Amount of Total memory"); 482 483static int 484sysctl_vmm_mem_free(SYSCTL_HANDLER_ARGS) 485{ 486 size_t val = vmm_mem_get_mem_free(); 487 return sysctl_handle_long(oidp, &val, 0, req); 488} 489SYSCTL_PROC(_hw_vmm, OID_AUTO, mem_free, CTLTYPE_LONG | CTLFLAG_RD, 490 0, 0, sysctl_vmm_mem_free, "LU", "Amount of Free memory"); 491 492void 493vmmdev_init(void) 494{ 495 mtx_init(&vmmdev_mtx, "vmm device mutex", NULL, MTX_DEF); 496} 497 498void 499vmmdev_cleanup(void) 500{ 501 struct vmmdev_softc *sc, *sc2; 502 503 mtx_lock(&vmmdev_mtx); 504 505 SLIST_FOREACH_SAFE(sc, &head, link, sc2) 506 vmmdev_destroy(sc); 507 508 mtx_unlock(&vmmdev_mtx); 509}
|