vmt_subr.c revision 1.7
1/* $NetBSD: vmt_subr.c,v 1.7 2024/03/20 23:33:22 msaitoh Exp $ */ 2/* $OpenBSD: vmt.c,v 1.11 2011/01/27 21:29:25 dtucker Exp $ */ 3 4/* 5 * Copyright (c) 2007 David Crawshaw <david@zentus.com> 6 * Copyright (c) 2008 David Gwynne <dlg@openbsd.org> 7 * 8 * Permission to use, copy, modify, and distribute this software for any 9 * purpose with or without fee is hereby granted, provided that the above 10 * copyright notice and this permission notice appear in all copies. 11 * 12 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 13 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 14 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 15 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 16 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 17 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 18 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 19 */ 20 21/* 22 * Protocol reverse engineered by Ken Kato: 23 * https://sites.google.com/site/chitchatvmback/backdoor 24 */ 25 26#include <sys/param.h> 27#include <sys/types.h> 28#include <sys/callout.h> 29#include <sys/device.h> 30#include <sys/endian.h> 31#include <sys/kernel.h> 32#include <sys/kmem.h> 33#include <sys/module.h> 34#include <sys/proc.h> 35#include <sys/reboot.h> 36#include <sys/socket.h> 37#include <sys/sysctl.h> 38#include <sys/syslog.h> 39#include <sys/systm.h> 40#include <sys/timetc.h> 41 42#include <net/if.h> 43#include <netinet/in.h> 44 45#include <dev/sysmon/sysmonvar.h> 46#include <dev/sysmon/sysmon_taskq.h> 47#include <dev/vmt/vmtreg.h> 48#include <dev/vmt/vmtvar.h> 49 50/* #define VMT_DEBUG */ 51 52static int vmt_sysctl_setup_root(device_t); 53static int vmt_sysctl_setup_clock_sync(device_t, const struct sysctlnode *); 54static int vmt_sysctl_update_clock_sync_period(SYSCTLFN_PROTO); 55 56static void vm_cmd(struct vm_backdoor *); 57static void vm_ins(struct vm_backdoor *); 58static void vm_outs(struct vm_backdoor *); 59 60/* Functions for communicating with the VM Host. */ 61static int vm_rpc_open(struct vm_rpc *, uint32_t); 62static int vm_rpc_close(struct vm_rpc *); 63static int vm_rpc_send(const struct vm_rpc *, const uint8_t *, uint32_t); 64static int vm_rpc_send_str(const struct vm_rpc *, const uint8_t *); 65static int vm_rpc_get_length(const struct vm_rpc *, uint32_t *, uint16_t *); 66static int vm_rpc_get_data(const struct vm_rpc *, char *, uint32_t, uint16_t); 67static int vm_rpc_send_rpci_tx_buf(struct vmt_softc *, const uint8_t *, uint32_t); 68static int vm_rpc_send_rpci_tx(struct vmt_softc *, const char *, ...) 69 __printflike(2, 3); 70static int vm_rpci_response_successful(struct vmt_softc *); 71 72static void vmt_tclo_state_change_success(struct vmt_softc *, int, char); 73static void vmt_do_reboot(struct vmt_softc *); 74static void vmt_do_shutdown(struct vmt_softc *); 75static bool vmt_shutdown(device_t, int); 76 77static void vmt_update_guest_info(struct vmt_softc *); 78static void vmt_update_guest_uptime(struct vmt_softc *); 79static void vmt_sync_guest_clock(struct vmt_softc *); 80 81static void vmt_tick(void *); 82static void vmt_clock_sync_tick(void *); 83static void vmt_pswitch_event(void *); 84 85static void vmt_tclo_tick(void *); 86static int vmt_tclo_process(struct vmt_softc *, const char *); 87static void vmt_tclo_reset(struct vmt_softc *); 88static void vmt_tclo_ping(struct vmt_softc *); 89static void vmt_tclo_halt(struct vmt_softc *); 90static void vmt_tclo_reboot(struct vmt_softc *); 91static void vmt_tclo_poweron(struct vmt_softc *); 92static void vmt_tclo_suspend(struct vmt_softc *); 93static void vmt_tclo_resume(struct vmt_softc *); 94static void vmt_tclo_capreg(struct vmt_softc *); 95static void vmt_tclo_broadcastip(struct vmt_softc *); 96 97struct vmt_tclo_rpc { 98 const char *name; 99 void (*cb)(struct vmt_softc *); 100} vmt_tclo_rpc[] = { 101 /* Keep sorted by name (case-sensitive) */ 102 { "Capabilities_Register", vmt_tclo_capreg }, 103 { "OS_Halt", vmt_tclo_halt }, 104 { "OS_PowerOn", vmt_tclo_poweron }, 105 { "OS_Reboot", vmt_tclo_reboot }, 106 { "OS_Resume", vmt_tclo_resume }, 107 { "OS_Suspend", vmt_tclo_suspend }, 108 { "Set_Option broadcastIP 1", vmt_tclo_broadcastip }, 109 { "ping", vmt_tclo_ping }, 110 { "reset", vmt_tclo_reset }, 111 { NULL }, 112#if 0 113 /* Various unsupported commands */ 114 { "Set_Option autohide 0" }, 115 { "Set_Option copypaste 1" }, 116 { "Set_Option enableDnD 1" }, 117 { "Set_Option enableMessageBusTunnel 0" }, 118 { "Set_Option linkRootHgfsShare 0" }, 119 { "Set_Option mapRootHgfsShare 0" }, 120 { "Set_Option synctime 1" }, 121 { "Set_Option synctime.period 0" }, 122 { "Set_Option time.synchronize.tools.enable 1" }, 123 { "Set_Option time.synchronize.tools.percentCorrection 0" }, 124 { "Set_Option time.synchronize.tools.slewCorrection 1" }, 125 { "Set_Option time.synchronize.tools.startup 1" }, 126 { "Set_Option toolScripts.afterPowerOn 1" }, 127 { "Set_Option toolScripts.afterResume 1" }, 128 { "Set_Option toolScripts.beforePowerOff 1" }, 129 { "Set_Option toolScripts.beforeSuspend 1" }, 130 { "Time_Synchronize 0" }, 131 { "Vix_1_Relayed_Command \"38cdcae40e075d66\"" }, 132#endif 133}; 134 135extern char hostname[MAXHOSTNAMELEN]; 136 137static void 138vmt_probe_cmd(struct vm_backdoor *frame, uint16_t cmd) 139{ 140 memset(frame, 0, sizeof(*frame)); 141 142 frame->eax = VM_MAGIC; 143 frame->ebx = ~VM_MAGIC & VM_REG_WORD_MASK; 144 frame->ecx = VM_REG_CMD(0xffff, cmd); 145 frame->edx = VM_REG_CMD(0, VM_PORT_CMD); 146 147 vm_cmd(frame); 148} 149 150bool 151vmt_probe(void) 152{ 153 struct vm_backdoor frame; 154 155 vmt_probe_cmd(&frame, VM_CMD_GET_VERSION); 156 if (__SHIFTOUT(frame.eax, VM_REG_WORD_MASK) == 0xffffffff || 157 __SHIFTOUT(frame.ebx, VM_REG_WORD_MASK) != VM_MAGIC) 158 return false; 159 160 vmt_probe_cmd(&frame, VM_CMD_GET_SPEED); 161 if (__SHIFTOUT(frame.eax, VM_REG_WORD_MASK) == VM_MAGIC) 162 return false; 163 164 return true; 165} 166 167void 168vmt_common_attach(struct vmt_softc *sc) 169{ 170 device_t self; 171 struct vm_backdoor frame; 172 int rv; 173 174 self = sc->sc_dev; 175 sc->sc_log = NULL; 176 177 /* check again */ 178 vmt_probe_cmd(&frame, VM_CMD_GET_VERSION); 179 if (__SHIFTOUT(frame.eax, VM_REG_WORD_MASK) == 0xffffffff || 180 __SHIFTOUT(frame.ebx, VM_REG_WORD_MASK) != VM_MAGIC) { 181 aprint_error_dev(self, "failed to get VMware version\n"); 182 return; 183 } 184 185 /* show uuid */ 186 { 187 struct uuid uuid; 188 uint32_t u; 189 190 vmt_probe_cmd(&frame, VM_CMD_GET_BIOS_UUID); 191 uuid.time_low = 192 bswap32(__SHIFTOUT(frame.eax, VM_REG_WORD_MASK)); 193 u = bswap32(__SHIFTOUT(frame.ebx, VM_REG_WORD_MASK)); 194 uuid.time_mid = u >> 16; 195 uuid.time_hi_and_version = u; 196 u = bswap32(__SHIFTOUT(frame.ecx, VM_REG_WORD_MASK)); 197 uuid.clock_seq_hi_and_reserved = u >> 24; 198 uuid.clock_seq_low = u >> 16; 199 uuid.node[0] = u >> 8; 200 uuid.node[1] = u; 201 u = bswap32(__SHIFTOUT(frame.edx, VM_REG_WORD_MASK)); 202 uuid.node[2] = u >> 24; 203 uuid.node[3] = u >> 16; 204 uuid.node[4] = u >> 8; 205 uuid.node[5] = u; 206 207 uuid_snprintf(sc->sc_uuid, sizeof(sc->sc_uuid), &uuid); 208 aprint_verbose_dev(sc->sc_dev, "UUID: %s\n", sc->sc_uuid); 209 } 210 211 callout_init(&sc->sc_tick, 0); 212 callout_init(&sc->sc_tclo_tick, 0); 213 callout_init(&sc->sc_clock_sync_tick, 0); 214 215 sc->sc_clock_sync_period_seconds = VMT_CLOCK_SYNC_PERIOD_SECONDS; 216 217 rv = vmt_sysctl_setup_root(self); 218 if (rv != 0) { 219 aprint_error_dev(self, "failed to initialize sysctl " 220 "(err %d)\n", rv); 221 goto free; 222 } 223 224 sc->sc_rpc_buf = kmem_alloc(VMT_RPC_BUFLEN, KM_SLEEP); 225 226 if (vm_rpc_open(&sc->sc_tclo_rpc, VM_RPC_OPEN_TCLO) != 0) { 227 aprint_error_dev(self, "failed to open backdoor RPC channel " 228 "(TCLO protocol)\n"); 229 goto free; 230 } 231 sc->sc_tclo_rpc_open = true; 232 233 /* don't know if this is important at all yet */ 234 if (vm_rpc_send_rpci_tx(sc, 235 "tools.capability.hgfs_server toolbox 1") != 0) { 236 aprint_error_dev(self, 237 "failed to set HGFS server capability\n"); 238 goto free; 239 } 240 241 pmf_device_register1(self, NULL, NULL, vmt_shutdown); 242 243 sysmon_task_queue_init(); 244 245 sc->sc_ev_power.ev_smpsw.smpsw_type = PSWITCH_TYPE_POWER; 246 sc->sc_ev_power.ev_smpsw.smpsw_name = device_xname(self); 247 sc->sc_ev_power.ev_code = PSWITCH_EVENT_PRESSED; 248 sysmon_pswitch_register(&sc->sc_ev_power.ev_smpsw); 249 sc->sc_ev_reset.ev_smpsw.smpsw_type = PSWITCH_TYPE_RESET; 250 sc->sc_ev_reset.ev_smpsw.smpsw_name = device_xname(self); 251 sc->sc_ev_reset.ev_code = PSWITCH_EVENT_PRESSED; 252 sysmon_pswitch_register(&sc->sc_ev_reset.ev_smpsw); 253 sc->sc_ev_sleep.ev_smpsw.smpsw_type = PSWITCH_TYPE_SLEEP; 254 sc->sc_ev_sleep.ev_smpsw.smpsw_name = device_xname(self); 255 sc->sc_ev_sleep.ev_code = PSWITCH_EVENT_RELEASED; 256 sysmon_pswitch_register(&sc->sc_ev_sleep.ev_smpsw); 257 sc->sc_smpsw_valid = true; 258 259 callout_setfunc(&sc->sc_tick, vmt_tick, sc); 260 callout_schedule(&sc->sc_tick, hz); 261 262 callout_setfunc(&sc->sc_tclo_tick, vmt_tclo_tick, sc); 263 callout_schedule(&sc->sc_tclo_tick, hz); 264 sc->sc_tclo_ping = 1; 265 266 callout_setfunc(&sc->sc_clock_sync_tick, vmt_clock_sync_tick, sc); 267 callout_schedule(&sc->sc_clock_sync_tick, 268 mstohz(sc->sc_clock_sync_period_seconds * 1000)); 269 270 vmt_sync_guest_clock(sc); 271 272 return; 273 274free: 275 if (sc->sc_rpc_buf) 276 kmem_free(sc->sc_rpc_buf, VMT_RPC_BUFLEN); 277 pmf_device_register(self, NULL, NULL); 278 if (sc->sc_log) 279 sysctl_teardown(&sc->sc_log); 280} 281 282int 283vmt_common_detach(struct vmt_softc *sc) 284{ 285 if (sc->sc_tclo_rpc_open) 286 vm_rpc_close(&sc->sc_tclo_rpc); 287 288 if (sc->sc_smpsw_valid) { 289 sysmon_pswitch_unregister(&sc->sc_ev_sleep.ev_smpsw); 290 sysmon_pswitch_unregister(&sc->sc_ev_reset.ev_smpsw); 291 sysmon_pswitch_unregister(&sc->sc_ev_power.ev_smpsw); 292 } 293 294 callout_halt(&sc->sc_tick, NULL); 295 callout_destroy(&sc->sc_tick); 296 297 callout_halt(&sc->sc_tclo_tick, NULL); 298 callout_destroy(&sc->sc_tclo_tick); 299 300 callout_halt(&sc->sc_clock_sync_tick, NULL); 301 callout_destroy(&sc->sc_clock_sync_tick); 302 303 if (sc->sc_rpc_buf) 304 kmem_free(sc->sc_rpc_buf, VMT_RPC_BUFLEN); 305 306 if (sc->sc_log) { 307 sysctl_teardown(&sc->sc_log); 308 sc->sc_log = NULL; 309 } 310 311 return 0; 312} 313 314static int 315vmt_sysctl_setup_root(device_t self) 316{ 317 const struct sysctlnode *machdep_node, *vmt_node; 318 struct vmt_softc *sc = device_private(self); 319 int rv; 320 321 rv = sysctl_createv(&sc->sc_log, 0, NULL, &machdep_node, 322 CTLFLAG_PERMANENT, CTLTYPE_NODE, "machdep", NULL, 323 NULL, 0, NULL, 0, CTL_MACHDEP, CTL_EOL); 324 if (rv != 0) 325 goto fail; 326 327 rv = sysctl_createv(&sc->sc_log, 0, &machdep_node, &vmt_node, 328 0, CTLTYPE_NODE, device_xname(self), NULL, 329 NULL, 0, NULL, 0, CTL_CREATE, CTL_EOL); 330 if (rv != 0) 331 goto fail; 332 333 rv = sysctl_createv(&sc->sc_log, 0, &vmt_node, NULL, 334 CTLFLAG_READONLY, CTLTYPE_STRING, "uuid", 335 SYSCTL_DESCR("UUID of virtual machine"), 336 NULL, 0, sc->sc_uuid, 0, 337 CTL_CREATE, CTL_EOL); 338 339 rv = vmt_sysctl_setup_clock_sync(self, vmt_node); 340 if (rv != 0) 341 goto fail; 342 343 return 0; 344 345fail: 346 sysctl_teardown(&sc->sc_log); 347 sc->sc_log = NULL; 348 349 return rv; 350} 351 352static int 353vmt_sysctl_setup_clock_sync(device_t self, const struct sysctlnode *root_node) 354{ 355 const struct sysctlnode *node, *period_node; 356 struct vmt_softc *sc = device_private(self); 357 int rv; 358 359 rv = sysctl_createv(&sc->sc_log, 0, &root_node, &node, 360 0, CTLTYPE_NODE, "clock_sync", NULL, 361 NULL, 0, NULL, 0, CTL_CREATE, CTL_EOL); 362 if (rv != 0) 363 return rv; 364 365 rv = sysctl_createv(&sc->sc_log, 0, &node, &period_node, 366 CTLFLAG_READWRITE, CTLTYPE_INT, "period", 367 SYSCTL_DESCR("Period, in seconds, at which to update the " 368 "guest's clock"), 369 vmt_sysctl_update_clock_sync_period, 0, (void *)sc, 0, 370 CTL_CREATE, CTL_EOL); 371 return rv; 372} 373 374static int 375vmt_sysctl_update_clock_sync_period(SYSCTLFN_ARGS) 376{ 377 int error, period; 378 struct sysctlnode node; 379 struct vmt_softc *sc; 380 381 node = *rnode; 382 sc = (struct vmt_softc *)node.sysctl_data; 383 384 period = sc->sc_clock_sync_period_seconds; 385 node.sysctl_data = . 386 error = sysctl_lookup(SYSCTLFN_CALL(&node)); 387 if (error || newp == NULL) 388 return error; 389 390 if (sc->sc_clock_sync_period_seconds != period) { 391 callout_halt(&sc->sc_clock_sync_tick, NULL); 392 sc->sc_clock_sync_period_seconds = period; 393 if (sc->sc_clock_sync_period_seconds > 0) 394 callout_schedule(&sc->sc_clock_sync_tick, 395 mstohz(sc->sc_clock_sync_period_seconds * 1000)); 396 } 397 return 0; 398} 399 400static void 401vmt_clock_sync_tick(void *xarg) 402{ 403 struct vmt_softc *sc = xarg; 404 405 vmt_sync_guest_clock(sc); 406 407 callout_schedule(&sc->sc_clock_sync_tick, 408 mstohz(sc->sc_clock_sync_period_seconds * 1000)); 409} 410 411static void 412vmt_update_guest_uptime(struct vmt_softc *sc) 413{ 414 /* host wants uptime in hundredths of a second */ 415 if (vm_rpc_send_rpci_tx(sc, "SetGuestInfo %d %" PRId64 "00", 416 VM_GUEST_INFO_UPTIME, time_uptime) != 0) { 417 device_printf(sc->sc_dev, "unable to set guest uptime\n"); 418 sc->sc_rpc_error = 1; 419 } 420} 421 422static void 423vmt_update_guest_info(struct vmt_softc *sc) 424{ 425 if (strncmp(sc->sc_hostname, hostname, sizeof(sc->sc_hostname)) != 0) { 426 strlcpy(sc->sc_hostname, hostname, sizeof(sc->sc_hostname)); 427 if (vm_rpc_send_rpci_tx(sc, "SetGuestInfo %d %s", 428 VM_GUEST_INFO_DNS_NAME, sc->sc_hostname) != 0) { 429 device_printf(sc->sc_dev, "unable to set hostname\n"); 430 sc->sc_rpc_error = 1; 431 } 432 } 433 434 /* 435 * we're supposed to pass the full network address information back 436 * here, but that involves xdr (sunrpc) data encoding, which seems 437 * a bit unreasonable. 438 */ 439 440 if (sc->sc_set_guest_os == 0) { 441 if (vm_rpc_send_rpci_tx(sc, "SetGuestInfo %d %s %s %s", 442 VM_GUEST_INFO_OS_NAME_FULL, 443 ostype, osrelease, machine_arch) != 0) { 444 device_printf(sc->sc_dev, 445 "unable to set full guest OS\n"); 446 sc->sc_rpc_error = 1; 447 } 448 449 /* 450 * Host doesn't like it if we send an OS name it doesn't 451 * recognise, so use "other" for i386 and "other-64" for amd64. 452 */ 453 if (vm_rpc_send_rpci_tx(sc, "SetGuestInfo %d %s", 454 VM_GUEST_INFO_OS_NAME, VM_OS_NAME) != 0) { 455 device_printf(sc->sc_dev, "unable to set guest OS\n"); 456 sc->sc_rpc_error = 1; 457 } 458 459 sc->sc_set_guest_os = 1; 460 } 461} 462 463static void 464vmt_sync_guest_clock(struct vmt_softc *sc) 465{ 466 struct vm_backdoor frame; 467 struct timespec ts; 468 469 memset(&frame, 0, sizeof(frame)); 470 frame.eax = VM_MAGIC; 471 frame.ecx = VM_CMD_GET_TIME_FULL; 472 frame.edx = VM_REG_CMD(0, VM_PORT_CMD); 473 vm_cmd(&frame); 474 475 if (__SHIFTOUT(frame.eax, VM_REG_WORD_MASK) != 0xffffffff) { 476 ts.tv_sec = ((uint64_t)( 477 __SHIFTOUT(frame.esi, VM_REG_WORD_MASK) << 32)) | 478 __SHIFTOUT(frame.edx, VM_REG_WORD_MASK); 479 ts.tv_nsec = __SHIFTOUT(frame.ebx, VM_REG_WORD_MASK) * 1000; 480 tc_setclock(&ts); 481 } 482} 483 484static void 485vmt_tick(void *xarg) 486{ 487 struct vmt_softc *sc = xarg; 488 489 vmt_update_guest_info(sc); 490 vmt_update_guest_uptime(sc); 491 492 callout_schedule(&sc->sc_tick, hz * 15); 493} 494 495static void 496vmt_tclo_state_change_success(struct vmt_softc *sc, int success, char state) 497{ 498 if (vm_rpc_send_rpci_tx(sc, "tools.os.statechange.status %d %d", 499 success, state) != 0) { 500 device_printf(sc->sc_dev, 501 "unable to send state change result\n"); 502 sc->sc_rpc_error = 1; 503 } 504} 505 506static void 507vmt_do_shutdown(struct vmt_softc *sc) 508{ 509 vmt_tclo_state_change_success(sc, 1, VM_STATE_CHANGE_HALT); 510 vm_rpc_send_str(&sc->sc_tclo_rpc, VM_RPC_REPLY_OK); 511 512 device_printf(sc->sc_dev, "host requested shutdown\n"); 513 sysmon_task_queue_sched(0, vmt_pswitch_event, &sc->sc_ev_power); 514} 515 516static void 517vmt_do_reboot(struct vmt_softc *sc) 518{ 519 vmt_tclo_state_change_success(sc, 1, VM_STATE_CHANGE_REBOOT); 520 vm_rpc_send_str(&sc->sc_tclo_rpc, VM_RPC_REPLY_OK); 521 522 device_printf(sc->sc_dev, "host requested reboot\n"); 523 sysmon_task_queue_sched(0, vmt_pswitch_event, &sc->sc_ev_reset); 524} 525 526static void 527vmt_do_resume(struct vmt_softc *sc) 528{ 529 device_printf(sc->sc_dev, "guest resuming from suspended state\n"); 530 531 vmt_sync_guest_clock(sc); 532 533 /* force guest info update */ 534 sc->sc_hostname[0] = '\0'; 535 sc->sc_set_guest_os = 0; 536 vmt_update_guest_info(sc); 537 538 vmt_tclo_state_change_success(sc, 1, VM_STATE_CHANGE_RESUME); 539 if (vm_rpc_send_str(&sc->sc_tclo_rpc, VM_RPC_REPLY_OK) != 0) { 540 device_printf(sc->sc_dev, "error sending resume response\n"); 541 sc->sc_rpc_error = 1; 542 } 543 544 sysmon_task_queue_sched(0, vmt_pswitch_event, &sc->sc_ev_sleep); 545} 546 547static bool 548vmt_shutdown(device_t self, int flags) 549{ 550 struct vmt_softc *sc = device_private(self); 551 552 if (vm_rpc_send_rpci_tx(sc, 553 "tools.capability.hgfs_server toolbox 0") != 0) { 554 device_printf(sc->sc_dev, 555 "failed to disable hgfs server capability\n"); 556 } 557 558 if (vm_rpc_send(&sc->sc_tclo_rpc, NULL, 0) != 0) { 559 device_printf(sc->sc_dev, "failed to send shutdown ping\n"); 560 } 561 562 vm_rpc_close(&sc->sc_tclo_rpc); 563 564 return true; 565} 566 567static void 568vmt_pswitch_event(void *xarg) 569{ 570 struct vmt_event *ev = xarg; 571 572 sysmon_pswitch_event(&ev->ev_smpsw, ev->ev_code); 573} 574 575static void 576vmt_tclo_reset(struct vmt_softc *sc) 577{ 578 579 if (sc->sc_rpc_error != 0) { 580 device_printf(sc->sc_dev, "resetting rpc\n"); 581 vm_rpc_close(&sc->sc_tclo_rpc); 582 583 /* reopen and send the reset reply next time around */ 584 return; 585 } 586 587 if (vm_rpc_send_str(&sc->sc_tclo_rpc, VM_RPC_RESET_REPLY) != 0) { 588 device_printf(sc->sc_dev, "failed to send reset reply\n"); 589 sc->sc_rpc_error = 1; 590 } 591 592} 593 594static void 595vmt_tclo_ping(struct vmt_softc *sc) 596{ 597 598 vmt_update_guest_info(sc); 599 if (vm_rpc_send_str(&sc->sc_tclo_rpc, VM_RPC_REPLY_OK) != 0) { 600 device_printf(sc->sc_dev, "error sending ping response\n"); 601 sc->sc_rpc_error = 1; 602 } 603} 604 605static void 606vmt_tclo_halt(struct vmt_softc *sc) 607{ 608 609 vmt_do_shutdown(sc); 610} 611 612static void 613vmt_tclo_reboot(struct vmt_softc *sc) 614{ 615 616 vmt_do_reboot(sc); 617} 618 619static void 620vmt_tclo_poweron(struct vmt_softc *sc) 621{ 622 623 vmt_tclo_state_change_success(sc, 1, VM_STATE_CHANGE_POWERON); 624 if (vm_rpc_send_str(&sc->sc_tclo_rpc, VM_RPC_REPLY_OK) != 0) { 625 device_printf(sc->sc_dev, "error sending poweron response\n"); 626 sc->sc_rpc_error = 1; 627 } 628} 629 630static void 631vmt_tclo_suspend(struct vmt_softc *sc) 632{ 633 634 log(LOG_KERN | LOG_NOTICE, 635 "VMware guest entering suspended state\n"); 636 637 vmt_tclo_state_change_success(sc, 1, VM_STATE_CHANGE_SUSPEND); 638 if (vm_rpc_send_str(&sc->sc_tclo_rpc, VM_RPC_REPLY_OK) != 0) { 639 device_printf(sc->sc_dev, "error sending suspend response\n"); 640 sc->sc_rpc_error = 1; 641 } 642} 643 644static void 645vmt_tclo_resume(struct vmt_softc *sc) 646{ 647 648 vmt_do_resume(sc); /* XXX msaitoh extract */ 649} 650 651static void 652vmt_tclo_capreg(struct vmt_softc *sc) 653{ 654 655 /* don't know if this is important at all */ 656 if (vm_rpc_send_rpci_tx(sc, 657 "vmx.capability.unified_loop toolbox") != 0) { 658 device_printf(sc->sc_dev, "unable to set unified loop\n"); 659 sc->sc_rpc_error = 1; 660 } 661 if (vm_rpci_response_successful(sc) == 0) { 662 device_printf(sc->sc_dev, 663 "host rejected unified loop setting\n"); 664 } 665 666 /* the trailing space is apparently important here */ 667 if (vm_rpc_send_rpci_tx(sc, 668 "tools.capability.statechange ") != 0) { 669 device_printf(sc->sc_dev, 670 "unable to send statechange capability\n"); 671 sc->sc_rpc_error = 1; 672 } 673 if (vm_rpci_response_successful(sc) == 0) { 674 device_printf(sc->sc_dev, 675 "host rejected statechange capability\n"); 676 } 677 678 if (vm_rpc_send_rpci_tx(sc, 679 "tools.set.version %u", VM_VERSION_UNMANAGED) != 0) { 680 device_printf(sc->sc_dev, "unable to set tools version\n"); 681 sc->sc_rpc_error = 1; 682 } 683 684 vmt_update_guest_uptime(sc); 685 686 if (vm_rpc_send_str(&sc->sc_tclo_rpc, VM_RPC_REPLY_OK) != 0) { 687 device_printf(sc->sc_dev, 688 "error sending capabilities_register response\n"); 689 sc->sc_rpc_error = 1; 690 } 691} 692 693static void 694vmt_tclo_broadcastip(struct vmt_softc *sc) 695{ 696 struct ifaddr *iface_addr = NULL; 697 struct ifnet *iface; 698 struct sockaddr_in *guest_ip; 699 int s; 700 struct psref psref; 701 702 /* find first available ipv4 address */ 703 guest_ip = NULL; 704 s = pserialize_read_enter(); 705 IFNET_READER_FOREACH(iface) { 706 707 /* skip loopback */ 708 if (strncmp(iface->if_xname, "lo", 2) == 0 && 709 iface->if_xname[2] >= '0' && 710 iface->if_xname[2] <= '9') { 711 continue; 712 } 713 714 IFADDR_READER_FOREACH(iface_addr, iface) { 715 if (iface_addr->ifa_addr->sa_family != AF_INET) { 716 continue; 717 } 718 719 guest_ip = satosin(iface_addr->ifa_addr); 720 ifa_acquire(iface_addr, &psref); 721 goto got; 722 } 723 } 724got: 725 pserialize_read_exit(s); 726 727 if (guest_ip != NULL) { 728 if (vm_rpc_send_rpci_tx(sc, "info-set guestinfo.ip %s", 729 inet_ntoa(guest_ip->sin_addr)) != 0) { 730 device_printf(sc->sc_dev, 731 "unable to send guest IP address\n"); 732 sc->sc_rpc_error = 1; 733 } 734 ifa_release(iface_addr, &psref); 735 736 if (vm_rpc_send_str(&sc->sc_tclo_rpc, 737 VM_RPC_REPLY_OK) != 0) { 738 device_printf(sc->sc_dev, 739 "error sending broadcastIP response\n"); 740 sc->sc_rpc_error = 1; 741 } 742 } else { 743 if (vm_rpc_send_str(&sc->sc_tclo_rpc, 744 VM_RPC_REPLY_ERROR_IP_ADDR) != 0) { 745 device_printf(sc->sc_dev, 746 "error sending broadcastIP" 747 " error response\n"); 748 sc->sc_rpc_error = 1; 749 } 750 } 751} 752 753int 754vmt_tclo_process(struct vmt_softc *sc, const char *name) 755{ 756 int i; 757 758 /* Search for rpc command and call handler */ 759 for (i = 0; vmt_tclo_rpc[i].name != NULL; i++) { 760 if (strcmp(vmt_tclo_rpc[i].name, sc->sc_rpc_buf) == 0) { 761 vmt_tclo_rpc[i].cb(sc); 762 return (0); 763 } 764 } 765 766 device_printf(sc->sc_dev, "unknown command: \"%s\"\n", name); 767 768 return (-1); 769} 770 771static void 772vmt_tclo_tick(void *xarg) 773{ 774 struct vmt_softc *sc = xarg; 775 u_int32_t rlen; 776 u_int16_t ack; 777 778 /* reopen tclo channel if it's currently closed */ 779 if (sc->sc_tclo_rpc.channel == 0 && 780 sc->sc_tclo_rpc.cookie1 == 0 && 781 sc->sc_tclo_rpc.cookie2 == 0) { 782 if (vm_rpc_open(&sc->sc_tclo_rpc, VM_RPC_OPEN_TCLO) != 0) { 783 device_printf(sc->sc_dev, 784 "unable to reopen TCLO channel\n"); 785 callout_schedule(&sc->sc_tclo_tick, hz * 15); 786 return; 787 } 788 789 if (vm_rpc_send_str(&sc->sc_tclo_rpc, 790 VM_RPC_RESET_REPLY) != 0) { 791 device_printf(sc->sc_dev, 792 "failed to send reset reply\n"); 793 sc->sc_rpc_error = 1; 794 goto out; 795 } else { 796 sc->sc_rpc_error = 0; 797 } 798 } 799 800 if (sc->sc_tclo_ping) { 801 if (vm_rpc_send(&sc->sc_tclo_rpc, NULL, 0) != 0) { 802 device_printf(sc->sc_dev, 803 "failed to send TCLO outgoing ping\n"); 804 sc->sc_rpc_error = 1; 805 goto out; 806 } 807 } 808 809 if (vm_rpc_get_length(&sc->sc_tclo_rpc, &rlen, &ack) != 0) { 810 device_printf(sc->sc_dev, 811 "failed to get length of incoming TCLO data\n"); 812 sc->sc_rpc_error = 1; 813 goto out; 814 } 815 816 if (rlen == 0) { 817 sc->sc_tclo_ping = 1; 818 goto out; 819 } 820 821 if (rlen >= VMT_RPC_BUFLEN) { 822 rlen = VMT_RPC_BUFLEN - 1; 823 } 824 if (vm_rpc_get_data(&sc->sc_tclo_rpc, sc->sc_rpc_buf, rlen, ack) != 0) { 825 device_printf(sc->sc_dev, 826 "failed to get incoming TCLO data\n"); 827 sc->sc_rpc_error = 1; 828 goto out; 829 } 830 sc->sc_tclo_ping = 0; 831 832#ifdef VMT_DEBUG 833 printf("vmware: received message '%s'\n", sc->sc_rpc_buf); 834#endif 835 836 if (vmt_tclo_process(sc, sc->sc_rpc_buf) != 0) { 837 if (vm_rpc_send_str(&sc->sc_tclo_rpc, 838 VM_RPC_REPLY_ERROR) != 0) { 839 device_printf(sc->sc_dev, 840 "error sending unknown command reply\n"); 841 sc->sc_rpc_error = 1; 842 } 843 } 844 845out: 846 /* On error, give time to recover and wait a second */ 847 callout_schedule(&sc->sc_tclo_tick, 848 (sc->sc_tclo_ping || sc->sc_rpc_error) ? hz : 1); 849} 850 851static void 852vm_cmd(struct vm_backdoor *frame) 853{ 854 BACKDOOR_OP(BACKDOOR_OP_CMD, frame); 855} 856 857static void 858vm_ins(struct vm_backdoor *frame) 859{ 860 BACKDOOR_OP(BACKDOOR_OP_IN, frame); 861} 862 863static void 864vm_outs(struct vm_backdoor *frame) 865{ 866 BACKDOOR_OP(BACKDOOR_OP_OUT, frame); 867} 868 869static int 870vm_rpc_open(struct vm_rpc *rpc, uint32_t proto) 871{ 872 struct vm_backdoor frame; 873 874 memset(&frame, 0, sizeof(frame)); 875 frame.eax = VM_MAGIC; 876 frame.ebx = proto | VM_RPC_FLAG_COOKIE; 877 frame.ecx = VM_REG_CMD_RPC(VM_RPC_OPEN); 878 frame.edx = VM_REG_PORT_CMD(0); 879 880 vm_cmd(&frame); 881 882 if (__SHIFTOUT(frame.ecx, VM_REG_HIGH_MASK) != 1 || 883 __SHIFTOUT(frame.edx, VM_REG_LOW_MASK) != 0) { 884 /* open-vm-tools retries without VM_RPC_FLAG_COOKIE here.. */ 885 printf("vmware: open failed, eax=%#"PRIxREGISTER 886 ", ecx=%#"PRIxREGISTER", edx=%#"PRIxREGISTER"\n", 887 frame.eax, frame.ecx, frame.edx); 888 return EIO; 889 } 890 891 rpc->channel = __SHIFTOUT(frame.edx, VM_REG_HIGH_MASK); 892 rpc->cookie1 = __SHIFTOUT(frame.esi, VM_REG_WORD_MASK); 893 rpc->cookie2 = __SHIFTOUT(frame.edi, VM_REG_WORD_MASK); 894 895 return 0; 896} 897 898static int 899vm_rpc_close(struct vm_rpc *rpc) 900{ 901 struct vm_backdoor frame; 902 903 memset(&frame, 0, sizeof(frame)); 904 frame.eax = VM_MAGIC; 905 frame.ebx = 0; 906 frame.ecx = VM_REG_CMD_RPC(VM_RPC_CLOSE); 907 frame.edx = VM_REG_PORT_CMD(rpc->channel); 908 frame.edi = rpc->cookie2; 909 frame.esi = rpc->cookie1; 910 911 vm_cmd(&frame); 912 913 if (__SHIFTOUT(frame.ecx, VM_REG_HIGH_MASK) == 0 || 914 __SHIFTOUT(frame.ecx, VM_REG_LOW_MASK) != 0) { 915 printf("vmware: close failed, " 916 "eax=%#"PRIxREGISTER", ecx=%#"PRIxREGISTER"\n", 917 frame.eax, frame.ecx); 918 return EIO; 919 } 920 921 rpc->channel = 0; 922 rpc->cookie1 = 0; 923 rpc->cookie2 = 0; 924 925 return 0; 926} 927 928static int 929vm_rpc_send(const struct vm_rpc *rpc, const uint8_t *buf, uint32_t length) 930{ 931 struct vm_backdoor frame; 932 933 /* Send the length of the command. */ 934 memset(&frame, 0, sizeof(frame)); 935 frame.eax = VM_MAGIC; 936 frame.ebx = length; 937 frame.ecx = VM_REG_CMD_RPC(VM_RPC_SET_LENGTH); 938 frame.edx = VM_REG_PORT_CMD(rpc->channel); 939 frame.esi = rpc->cookie1; 940 frame.edi = rpc->cookie2; 941 942 vm_cmd(&frame); 943 944 if ((__SHIFTOUT(frame.ecx, VM_REG_HIGH_MASK) & VM_RPC_REPLY_SUCCESS) == 945 0) { 946 printf("vmware: sending length failed, " 947 "eax=%#"PRIxREGISTER", ecx=%#"PRIxREGISTER"\n", 948 frame.eax, frame.ecx); 949 return EIO; 950 } 951 952 if (length == 0) 953 return 0; /* Only need to poke once if command is null. */ 954 955 /* Send the command using enhanced RPC. */ 956 memset(&frame, 0, sizeof(frame)); 957 frame.eax = VM_MAGIC; 958 frame.ebx = VM_RPC_ENH_DATA; 959 frame.ecx = length; 960 frame.edx = VM_REG_PORT_RPC(rpc->channel); 961 frame.ebp = rpc->cookie1; 962 frame.edi = rpc->cookie2; 963 frame.esi = (register_t)buf; 964 965 vm_outs(&frame); 966 967 if (__SHIFTOUT(frame.ebx, VM_REG_WORD_MASK) != VM_RPC_ENH_DATA) { 968 /* open-vm-tools retries on VM_RPC_REPLY_CHECKPOINT */ 969 printf("vmware: send failed, ebx=%#"PRIxREGISTER"\n", 970 frame.ebx); 971 return EIO; 972 } 973 974 return 0; 975} 976 977static int 978vm_rpc_send_str(const struct vm_rpc *rpc, const uint8_t *str) 979{ 980 return vm_rpc_send(rpc, str, strlen(str)); 981} 982 983static int 984vm_rpc_get_data(const struct vm_rpc *rpc, char *data, uint32_t length, 985 uint16_t dataid) 986{ 987 struct vm_backdoor frame; 988 989 /* Get data using enhanced RPC. */ 990 memset(&frame, 0, sizeof(frame)); 991 frame.eax = VM_MAGIC; 992 frame.ebx = VM_RPC_ENH_DATA; 993 frame.ecx = length; 994 frame.edx = VM_REG_PORT_RPC(rpc->channel); 995 frame.esi = rpc->cookie1; 996 frame.edi = (register_t)data; 997 frame.ebp = rpc->cookie2; 998 999 vm_ins(&frame); 1000 1001 /* NUL-terminate the data */ 1002 data[length] = '\0'; 1003 1004 if (__SHIFTOUT(frame.ebx, VM_REG_WORD_MASK) != VM_RPC_ENH_DATA) { 1005 printf("vmware: get data failed, ebx=%#"PRIxREGISTER"\n", 1006 frame.ebx); 1007 return EIO; 1008 } 1009 1010 /* Acknowledge data received. */ 1011 memset(&frame, 0, sizeof(frame)); 1012 frame.eax = VM_MAGIC; 1013 frame.ebx = dataid; 1014 frame.ecx = VM_REG_CMD_RPC(VM_RPC_GET_END); 1015 frame.edx = VM_REG_PORT_CMD(rpc->channel); 1016 frame.esi = rpc->cookie1; 1017 frame.edi = rpc->cookie2; 1018 1019 vm_cmd(&frame); 1020 1021 if (__SHIFTOUT(frame.ecx, VM_REG_HIGH_MASK) == 0) { 1022 printf("vmware: ack data failed, " 1023 "eax=%#"PRIxREGISTER", ecx=%#"PRIxREGISTER"\n", 1024 frame.eax, frame.ecx); 1025 return EIO; 1026 } 1027 1028 return 0; 1029} 1030 1031static int 1032vm_rpc_get_length(const struct vm_rpc *rpc, uint32_t *length, uint16_t *dataid) 1033{ 1034 struct vm_backdoor frame; 1035 1036 memset(&frame, 0, sizeof(frame)); 1037 frame.eax = VM_MAGIC; 1038 frame.ebx = 0; 1039 frame.ecx = VM_REG_CMD_RPC(VM_RPC_GET_LENGTH); 1040 frame.edx = VM_REG_PORT_CMD(rpc->channel); 1041 frame.esi = rpc->cookie1; 1042 frame.edi = rpc->cookie2; 1043 1044 vm_cmd(&frame); 1045 1046 if ((__SHIFTOUT(frame.ecx, VM_REG_HIGH_MASK) & VM_RPC_REPLY_SUCCESS) == 1047 0) { 1048 printf("vmware: get length failed, " 1049 "eax=%#"PRIxREGISTER", ecx=%#"PRIxREGISTER"\n", 1050 frame.eax, frame.ecx); 1051 return EIO; 1052 } 1053 if ((__SHIFTOUT(frame.ecx, VM_REG_HIGH_MASK) & VM_RPC_REPLY_DORECV) == 1054 0) { 1055 *length = 0; 1056 *dataid = 0; 1057 } else { 1058 *length = __SHIFTOUT(frame.ebx, VM_REG_WORD_MASK); 1059 *dataid = __SHIFTOUT(frame.edx, VM_REG_HIGH_MASK); 1060 } 1061 1062 return 0; 1063} 1064 1065static int 1066vm_rpci_response_successful(struct vmt_softc *sc) 1067{ 1068 return (sc->sc_rpc_buf[0] == '1' && sc->sc_rpc_buf[1] == ' '); 1069} 1070 1071static int 1072vm_rpc_send_rpci_tx_buf(struct vmt_softc *sc, const uint8_t *buf, 1073 uint32_t length) 1074{ 1075 struct vm_rpc rpci; 1076 u_int32_t rlen; 1077 u_int16_t ack; 1078 int result = 0; 1079 1080 if (vm_rpc_open(&rpci, VM_RPC_OPEN_RPCI) != 0) { 1081 device_printf(sc->sc_dev, "rpci channel open failed\n"); 1082 return EIO; 1083 } 1084 1085 if (vm_rpc_send(&rpci, sc->sc_rpc_buf, length) != 0) { 1086 device_printf(sc->sc_dev, "unable to send rpci command\n"); 1087 result = EIO; 1088 goto out; 1089 } 1090 1091 if (vm_rpc_get_length(&rpci, &rlen, &ack) != 0) { 1092 device_printf(sc->sc_dev, 1093 "failed to get length of rpci response data\n"); 1094 result = EIO; 1095 goto out; 1096 } 1097 1098 if (rlen > 0) { 1099 if (rlen >= VMT_RPC_BUFLEN) { 1100 rlen = VMT_RPC_BUFLEN - 1; 1101 } 1102 1103 if (vm_rpc_get_data(&rpci, sc->sc_rpc_buf, rlen, ack) != 0) { 1104 device_printf(sc->sc_dev, 1105 "failed to get rpci response data\n"); 1106 result = EIO; 1107 goto out; 1108 } 1109 } 1110 1111out: 1112 if (vm_rpc_close(&rpci) != 0) { 1113 device_printf(sc->sc_dev, "unable to close rpci channel\n"); 1114 } 1115 1116 return result; 1117} 1118 1119static int 1120vm_rpc_send_rpci_tx(struct vmt_softc *sc, const char *fmt, ...) 1121{ 1122 va_list args; 1123 int len; 1124 1125 va_start(args, fmt); 1126 len = vsnprintf(sc->sc_rpc_buf, VMT_RPC_BUFLEN, fmt, args); 1127 va_end(args); 1128 1129 if (len >= VMT_RPC_BUFLEN) { 1130 device_printf(sc->sc_dev, 1131 "rpci command didn't fit in buffer\n"); 1132 return EIO; 1133 } 1134 1135 return vm_rpc_send_rpci_tx_buf(sc, sc->sc_rpc_buf, len); 1136} 1137 1138#if 0 1139 struct vm_backdoor frame; 1140 1141 memset(&frame, 0, sizeof(frame)); 1142 1143 frame.eax = VM_MAGIC; 1144 frame.ecx = VM_CMD_GET_VERSION; 1145 frame.edx = VM_PORT_CMD; 1146 1147 printf("\n"); 1148 printf("eax %#"PRIxREGISTER"\n", frame.eax); 1149 printf("ebx %#"PRIxREGISTER"\n", frame.ebx); 1150 printf("ecx %#"PRIxREGISTER"\n", frame.ecx); 1151 printf("edx %#"PRIxREGISTER"\n", frame.edx) 1152 printf("ebp %#"PRIxREGISTER"\n", frame.ebp); 1153 printf("edi %#"PRIxREGISTER"\n", frame.edi); 1154 printf("esi %#"PRIxREGISTER"\n", frame.esi); 1155 1156 vm_cmd(&frame); 1157 1158 printf("-\n"); 1159 printf("eax %#"PRIxREGISTER"\n", frame.eax); 1160 printf("ebx %#"PRIxREGISTER"\n", frame.ebx); 1161 printf("ecx %#"PRIxREGISTER"\n", frame.ecx); 1162 printf("edx %#"PRIxREGISTER"\n", frame.edx); 1163 printf("ebp %#"PRIxREGISTER"\n", frame.ebp); 1164 printf("edi %#"PRIxREGISTER"\n", frame.edi); 1165 printf("esi %#"PRIxREGISTER"\n", frame.esi); 1166#endif 1167 1168/* 1169 * Notes on tracing backdoor activity in vmware-guestd: 1170 * 1171 * - Find the addresses of the inl / rep insb / rep outsb 1172 * instructions used to perform backdoor operations. 1173 * One way to do this is to disassemble vmware-guestd: 1174 * 1175 * $ objdump -S /emul/freebsd/sbin/vmware-guestd > vmware-guestd.S 1176 * 1177 * and search for '<tab>in ' in the resulting file. The rep insb and 1178 * rep outsb code is directly below that. 1179 * 1180 * - Run vmware-guestd under gdb, setting up breakpoints as follows: 1181 * (the addresses shown here are the ones from VMware-server-1.0.10-203137, 1182 * the last version that actually works in FreeBSD emulation on OpenBSD) 1183 * 1184 * break *0x805497b (address of 'in' instruction) 1185 * commands 1 1186 * silent 1187 * echo INOUT\n 1188 * print/x $ecx 1189 * print/x $ebx 1190 * print/x $edx 1191 * continue 1192 * end 1193 * break *0x805497c (address of instruction after 'in') 1194 * commands 2 1195 * silent 1196 * echo ===\n 1197 * print/x $ecx 1198 * print/x $ebx 1199 * print/x $edx 1200 * echo \n 1201 * continue 1202 * end 1203 * break *0x80549b7 (address of instruction before 'rep insb') 1204 * commands 3 1205 * silent 1206 * set variable $inaddr = $edi 1207 * set variable $incount = $ecx 1208 * continue 1209 * end 1210 * break *0x80549ba (address of instruction after 'rep insb') 1211 * commands 4 1212 * silent 1213 * echo IN\n 1214 * print $incount 1215 * x/s $inaddr 1216 * echo \n 1217 * continue 1218 * end 1219 * break *0x80549fb (address of instruction before 'rep outsb') 1220 * commands 5 1221 * silent 1222 * echo OUT\n 1223 * print $ecx 1224 * x/s $esi 1225 * echo \n 1226 * continue 1227 * end 1228 * 1229 * This will produce a log of the backdoor operations, including the 1230 * data sent and received and the relevant register values. You can then 1231 * match the register values to the various constants in this file. 1232 */ 1233