1249353Sneel/*-
2336190Saraujo * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3336190Saraujo *
4249353Sneel * Copyright (c) 2013 Anish Gupta (akgupt3@gmail.com)
5249353Sneel * All rights reserved.
6249353Sneel *
7249353Sneel * Redistribution and use in source and binary forms, with or without
8249353Sneel * modification, are permitted provided that the following conditions
9249353Sneel * are met:
10249353Sneel * 1. Redistributions of source code must retain the above copyright
11249967Sneel *    notice unmodified, this list of conditions, and the following
12249967Sneel *    disclaimer.
13249353Sneel * 2. Redistributions in binary form must reproduce the above copyright
14249353Sneel *    notice, this list of conditions and the following disclaimer in the
15249353Sneel *    documentation and/or other materials provided with the distribution.
16249353Sneel *
17249967Sneel * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18249967Sneel * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19249967Sneel * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20249967Sneel * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21249967Sneel * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22249967Sneel * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23249967Sneel * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24249967Sneel * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25249967Sneel * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26249967Sneel * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27249353Sneel */
28249353Sneel
29249353Sneel#include <sys/cdefs.h>
30249353Sneel__FBSDID("$FreeBSD: stable/11/sys/amd64/vmm/amd/vmcb.c 336190 2018-07-11 07:19:42Z araujo $");
31249353Sneel
32249353Sneel#include <sys/param.h>
33249353Sneel#include <sys/systm.h>
34249353Sneel
35249353Sneel#include <machine/segments.h>
36249353Sneel#include <machine/specialreg.h>
37249353Sneel#include <machine/vmm.h>
38249353Sneel
39271939Sneel#include "vmm_ktr.h"
40271939Sneel
41249353Sneel#include "vmcb.h"
42249967Sneel#include "svm.h"
43271939Sneel#include "svm_softc.h"
44249353Sneel
45249353Sneel/*
46249493Sneel * The VMCB aka Virtual Machine Control Block is a 4KB aligned page
47249493Sneel * in memory that describes the virtual machine.
48249493Sneel *
49249493Sneel * The VMCB contains:
50249493Sneel * - instructions or events in the guest to intercept
51249493Sneel * - control bits that modify execution environment of the guest
52249493Sneel * - guest processor state (e.g. general purpose registers)
53249493Sneel */
54249493Sneel
55249493Sneel/*
56271939Sneel * Return VMCB segment area.
57271939Sneel */
58271939Sneelstatic struct vmcb_segment *
59271939Sneelvmcb_segptr(struct vmcb *vmcb, int type)
60271939Sneel{
61271939Sneel	struct vmcb_state *state;
62271939Sneel	struct vmcb_segment *seg;
63271939Sneel
64271939Sneel	state = &vmcb->state;
65271939Sneel
66271939Sneel	switch (type) {
67271939Sneel	case VM_REG_GUEST_CS:
68271939Sneel		seg = &state->cs;
69271939Sneel		break;
70271939Sneel
71271939Sneel	case VM_REG_GUEST_DS:
72271939Sneel		seg = &state->ds;
73271939Sneel		break;
74271939Sneel
75271939Sneel	case VM_REG_GUEST_ES:
76271939Sneel		seg = &state->es;
77271939Sneel		break;
78271939Sneel
79271939Sneel	case VM_REG_GUEST_FS:
80271939Sneel		seg = &state->fs;
81271939Sneel		break;
82271939Sneel
83271939Sneel	case VM_REG_GUEST_GS:
84271939Sneel		seg = &state->gs;
85271939Sneel		break;
86271939Sneel
87271939Sneel	case VM_REG_GUEST_SS:
88271939Sneel		seg = &state->ss;
89271939Sneel		break;
90271939Sneel
91271939Sneel	case VM_REG_GUEST_GDTR:
92271939Sneel		seg = &state->gdt;
93271939Sneel		break;
94271939Sneel
95271939Sneel	case VM_REG_GUEST_IDTR:
96271939Sneel		seg = &state->idt;
97271939Sneel		break;
98271939Sneel
99271939Sneel	case VM_REG_GUEST_LDTR:
100271939Sneel		seg = &state->ldt;
101271939Sneel		break;
102271939Sneel
103271939Sneel	case VM_REG_GUEST_TR:
104271939Sneel		seg = &state->tr;
105271939Sneel		break;
106271939Sneel
107271939Sneel	default:
108271939Sneel		seg = NULL;
109271939Sneel		break;
110271939Sneel	}
111271939Sneel
112271939Sneel	return (seg);
113271939Sneel}
114271939Sneel
115272916Sneelstatic int
116272916Sneelvmcb_access(struct svm_softc *softc, int vcpu, int write, int ident,
117272916Sneel	uint64_t *val)
118272916Sneel{
119272916Sneel	struct vmcb *vmcb;
120272916Sneel	int off, bytes;
121272916Sneel	char *ptr;
122272916Sneel
123272916Sneel	vmcb	= svm_get_vmcb(softc, vcpu);
124272916Sneel	off	= VMCB_ACCESS_OFFSET(ident);
125272916Sneel	bytes	= VMCB_ACCESS_BYTES(ident);
126272916Sneel
127272916Sneel	if ((off + bytes) >= sizeof (struct vmcb))
128272916Sneel		return (EINVAL);
129272916Sneel
130272916Sneel	ptr = (char *)vmcb;
131272916Sneel
132272916Sneel	if (!write)
133272916Sneel		*val = 0;
134272916Sneel
135272916Sneel	switch (bytes) {
136272916Sneel	case 8:
137272916Sneel	case 4:
138272916Sneel	case 2:
139272916Sneel		if (write)
140272916Sneel			memcpy(ptr + off, val, bytes);
141272916Sneel		else
142272916Sneel			memcpy(val, ptr + off, bytes);
143272916Sneel		break;
144272916Sneel	default:
145272916Sneel		VCPU_CTR1(softc->vm, vcpu,
146272916Sneel		    "Invalid size %d for VMCB access: %d", bytes);
147272916Sneel		return (EINVAL);
148272916Sneel	}
149272916Sneel
150272916Sneel	/* Invalidate all VMCB state cached by h/w. */
151272916Sneel	if (write)
152272916Sneel		svm_set_dirty(softc, vcpu, 0xffffffff);
153272916Sneel
154272916Sneel	return (0);
155272916Sneel}
156272916Sneel
157271939Sneel/*
158249353Sneel * Read from segment selector, control and general purpose register of VMCB.
159249353Sneel */
160249353Sneelint
161271939Sneelvmcb_read(struct svm_softc *sc, int vcpu, int ident, uint64_t *retval)
162249353Sneel{
163271939Sneel	struct vmcb *vmcb;
164249353Sneel	struct vmcb_state *state;
165249353Sneel	struct vmcb_segment *seg;
166249353Sneel	int err;
167249353Sneel
168271939Sneel	vmcb = svm_get_vmcb(sc, vcpu);
169249353Sneel	state = &vmcb->state;
170249353Sneel	err = 0;
171249353Sneel
172272916Sneel	if (VMCB_ACCESS_OK(ident))
173272916Sneel		return (vmcb_access(sc, vcpu, 0, ident, retval));
174272916Sneel
175249353Sneel	switch (ident) {
176249353Sneel	case VM_REG_GUEST_CR0:
177249353Sneel		*retval = state->cr0;
178249353Sneel		break;
179249353Sneel
180267096Sgrehan	case VM_REG_GUEST_CR2:
181267096Sgrehan		*retval = state->cr2;
182267096Sgrehan		break;
183267096Sgrehan
184249353Sneel	case VM_REG_GUEST_CR3:
185249353Sneel		*retval = state->cr3;
186249353Sneel		break;
187249353Sneel
188249353Sneel	case VM_REG_GUEST_CR4:
189249353Sneel		*retval = state->cr4;
190249353Sneel		break;
191249353Sneel
192330623Sjhb	case VM_REG_GUEST_DR6:
193330623Sjhb		*retval = state->dr6;
194330623Sjhb		break;
195330623Sjhb
196249353Sneel	case VM_REG_GUEST_DR7:
197249353Sneel		*retval = state->dr7;
198249353Sneel		break;
199249353Sneel
200249353Sneel	case VM_REG_GUEST_EFER:
201249353Sneel		*retval = state->efer;
202249353Sneel		break;
203249353Sneel
204249353Sneel	case VM_REG_GUEST_RAX:
205249353Sneel		*retval = state->rax;
206249353Sneel		break;
207249353Sneel
208249353Sneel	case VM_REG_GUEST_RFLAGS:
209249353Sneel		*retval = state->rflags;
210249353Sneel		break;
211249353Sneel
212249353Sneel	case VM_REG_GUEST_RIP:
213249353Sneel		*retval = state->rip;
214249353Sneel		break;
215249353Sneel
216249353Sneel	case VM_REG_GUEST_RSP:
217249353Sneel		*retval = state->rsp;
218249353Sneel		break;
219249353Sneel
220249353Sneel	case VM_REG_GUEST_CS:
221249353Sneel	case VM_REG_GUEST_DS:
222249353Sneel	case VM_REG_GUEST_ES:
223249353Sneel	case VM_REG_GUEST_FS:
224249353Sneel	case VM_REG_GUEST_GS:
225249353Sneel	case VM_REG_GUEST_SS:
226249353Sneel	case VM_REG_GUEST_LDTR:
227249353Sneel	case VM_REG_GUEST_TR:
228271939Sneel		seg = vmcb_segptr(vmcb, ident);
229271939Sneel		KASSERT(seg != NULL, ("%s: unable to get segment %d from VMCB",
230271939Sneel		    __func__, ident));
231249353Sneel		*retval = seg->selector;
232249353Sneel		break;
233249353Sneel
234271939Sneel	case VM_REG_GUEST_GDTR:
235271939Sneel	case VM_REG_GUEST_IDTR:
236271939Sneel		/* GDTR and IDTR don't have segment selectors */
237271939Sneel		err = EINVAL;
238271939Sneel		break;
239249353Sneel	default:
240249353Sneel		err =  EINVAL;
241249353Sneel		break;
242249353Sneel	}
243249353Sneel
244249353Sneel	return (err);
245249353Sneel}
246249353Sneel
247249353Sneel/*
248249353Sneel * Write to segment selector, control and general purpose register of VMCB.
249249353Sneel */
250249353Sneelint
251271939Sneelvmcb_write(struct svm_softc *sc, int vcpu, int ident, uint64_t val)
252249353Sneel{
253271939Sneel	struct vmcb *vmcb;
254249353Sneel	struct vmcb_state *state;
255249353Sneel	struct vmcb_segment *seg;
256271939Sneel	int err, dirtyseg;
257249353Sneel
258271939Sneel	vmcb = svm_get_vmcb(sc, vcpu);
259249353Sneel	state = &vmcb->state;
260271939Sneel	dirtyseg = 0;
261249353Sneel	err = 0;
262249353Sneel
263272916Sneel	if (VMCB_ACCESS_OK(ident))
264272916Sneel		return (vmcb_access(sc, vcpu, 1, ident, &val));
265272916Sneel
266249353Sneel	switch (ident) {
267249353Sneel	case VM_REG_GUEST_CR0:
268249353Sneel		state->cr0 = val;
269271939Sneel		svm_set_dirty(sc, vcpu, VMCB_CACHE_CR);
270249353Sneel		break;
271249353Sneel
272267096Sgrehan	case VM_REG_GUEST_CR2:
273267096Sgrehan		state->cr2 = val;
274271939Sneel		svm_set_dirty(sc, vcpu, VMCB_CACHE_CR2);
275267096Sgrehan		break;
276267096Sgrehan
277249353Sneel	case VM_REG_GUEST_CR3:
278249353Sneel		state->cr3 = val;
279271939Sneel		svm_set_dirty(sc, vcpu, VMCB_CACHE_CR);
280249353Sneel		break;
281249353Sneel
282249353Sneel	case VM_REG_GUEST_CR4:
283249353Sneel		state->cr4 = val;
284271939Sneel		svm_set_dirty(sc, vcpu, VMCB_CACHE_CR);
285249353Sneel		break;
286249353Sneel
287330623Sjhb	case VM_REG_GUEST_DR6:
288330623Sjhb		state->dr6 = val;
289330623Sjhb		svm_set_dirty(sc, vcpu, VMCB_CACHE_DR);
290330623Sjhb		break;
291330623Sjhb
292249353Sneel	case VM_REG_GUEST_DR7:
293249353Sneel		state->dr7 = val;
294330623Sjhb		svm_set_dirty(sc, vcpu, VMCB_CACHE_DR);
295249353Sneel		break;
296249353Sneel
297249353Sneel	case VM_REG_GUEST_EFER:
298249353Sneel		/* EFER_SVM must always be set when the guest is executing */
299249353Sneel		state->efer = val | EFER_SVM;
300271939Sneel		svm_set_dirty(sc, vcpu, VMCB_CACHE_CR);
301249353Sneel		break;
302249353Sneel
303249353Sneel	case VM_REG_GUEST_RAX:
304249353Sneel		state->rax = val;
305249353Sneel		break;
306249353Sneel
307249353Sneel	case VM_REG_GUEST_RFLAGS:
308249353Sneel		state->rflags = val;
309249353Sneel		break;
310249353Sneel
311249353Sneel	case VM_REG_GUEST_RIP:
312249353Sneel		state->rip = val;
313249353Sneel		break;
314249353Sneel
315249353Sneel	case VM_REG_GUEST_RSP:
316249353Sneel		state->rsp = val;
317249353Sneel		break;
318249353Sneel
319249353Sneel	case VM_REG_GUEST_CS:
320249353Sneel	case VM_REG_GUEST_DS:
321249353Sneel	case VM_REG_GUEST_ES:
322271939Sneel	case VM_REG_GUEST_SS:
323271939Sneel		dirtyseg = 1;		/* FALLTHROUGH */
324249353Sneel	case VM_REG_GUEST_FS:
325249353Sneel	case VM_REG_GUEST_GS:
326249353Sneel	case VM_REG_GUEST_LDTR:
327249353Sneel	case VM_REG_GUEST_TR:
328271939Sneel		seg = vmcb_segptr(vmcb, ident);
329271939Sneel		KASSERT(seg != NULL, ("%s: unable to get segment %d from VMCB",
330271939Sneel		    __func__, ident));
331249353Sneel		seg->selector = val;
332271939Sneel		if (dirtyseg)
333271939Sneel			svm_set_dirty(sc, vcpu, VMCB_CACHE_SEG);
334249353Sneel		break;
335249353Sneel
336271939Sneel	case VM_REG_GUEST_GDTR:
337271939Sneel	case VM_REG_GUEST_IDTR:
338271939Sneel		/* GDTR and IDTR don't have segment selectors */
339271939Sneel		err = EINVAL;
340271939Sneel		break;
341249353Sneel	default:
342249353Sneel		err = EINVAL;
343271939Sneel		break;
344249353Sneel	}
345249353Sneel
346249353Sneel	return (err);
347249353Sneel}
348249353Sneel
349271939Sneelint
350271939Sneelvmcb_seg(struct vmcb *vmcb, int ident, struct vmcb_segment *seg2)
351249353Sneel{
352249353Sneel	struct vmcb_segment *seg;
353249353Sneel
354271939Sneel	seg = vmcb_segptr(vmcb, ident);
355271939Sneel	if (seg != NULL) {
356271939Sneel		bcopy(seg, seg2, sizeof(struct vmcb_segment));
357271939Sneel		return (0);
358271939Sneel	} else {
359271939Sneel		return (EINVAL);
360271939Sneel	}
361271939Sneel}
362249353Sneel
363271939Sneelint
364271939Sneelvmcb_setdesc(void *arg, int vcpu, int reg, struct seg_desc *desc)
365271939Sneel{
366271939Sneel	struct vmcb *vmcb;
367271939Sneel	struct svm_softc *sc;
368271939Sneel	struct vmcb_segment *seg;
369271939Sneel	uint16_t attrib;
370249353Sneel
371271939Sneel	sc = arg;
372271939Sneel	vmcb = svm_get_vmcb(sc, vcpu);
373249353Sneel
374271939Sneel	seg = vmcb_segptr(vmcb, reg);
375271939Sneel	KASSERT(seg != NULL, ("%s: invalid segment descriptor %d",
376271939Sneel	    __func__, reg));
377249353Sneel
378271939Sneel	seg->base = desc->base;
379271939Sneel	seg->limit = desc->limit;
380271939Sneel	if (reg != VM_REG_GUEST_GDTR && reg != VM_REG_GUEST_IDTR) {
381271939Sneel		/*
382271939Sneel		 * Map seg_desc access to VMCB attribute format.
383271939Sneel		 *
384271939Sneel		 * SVM uses the 'P' bit in the segment attributes to indicate a
385271939Sneel		 * NULL segment so clear it if the segment is marked unusable.
386271939Sneel		 */
387271939Sneel		attrib = ((desc->access & 0xF000) >> 4) | (desc->access & 0xFF);
388271939Sneel		if (SEG_DESC_UNUSABLE(desc->access)) {
389271939Sneel			attrib &= ~0x80;
390271939Sneel		}
391271939Sneel		seg->attrib = attrib;
392271939Sneel	}
393249353Sneel
394271939Sneel	VCPU_CTR4(sc->vm, vcpu, "Setting desc %d: base (%#lx), limit (%#x), "
395271939Sneel	    "attrib (%#x)", reg, seg->base, seg->limit, seg->attrib);
396249353Sneel
397271939Sneel	switch (reg) {
398271939Sneel	case VM_REG_GUEST_CS:
399271939Sneel	case VM_REG_GUEST_DS:
400271939Sneel	case VM_REG_GUEST_ES:
401249353Sneel	case VM_REG_GUEST_SS:
402271939Sneel		svm_set_dirty(sc, vcpu, VMCB_CACHE_SEG);
403273766Saraujo		break;
404249353Sneel	case VM_REG_GUEST_GDTR:
405249353Sneel	case VM_REG_GUEST_IDTR:
406271939Sneel		svm_set_dirty(sc, vcpu, VMCB_CACHE_DT);
407249353Sneel		break;
408271939Sneel	default:
409249353Sneel		break;
410271939Sneel	}
411249353Sneel
412271939Sneel	return (0);
413271939Sneel}
414249353Sneel
415271939Sneelint
416271939Sneelvmcb_getdesc(void *arg, int vcpu, int reg, struct seg_desc *desc)
417271939Sneel{
418271939Sneel	struct vmcb *vmcb;
419271939Sneel	struct svm_softc *sc;
420271939Sneel	struct vmcb_segment *seg;
421271939Sneel
422271939Sneel	sc = arg;
423271939Sneel	vmcb = svm_get_vmcb(sc, vcpu);
424271939Sneel	seg = vmcb_segptr(vmcb, reg);
425271939Sneel	KASSERT(seg != NULL, ("%s: invalid segment descriptor %d",
426271939Sneel	    __func__, reg));
427271939Sneel
428271939Sneel	desc->base = seg->base;
429271939Sneel	desc->limit = seg->limit;
430271939Sneel	desc->access = 0;
431271939Sneel
432271939Sneel	if (reg != VM_REG_GUEST_GDTR && reg != VM_REG_GUEST_IDTR) {
433271939Sneel		/* Map seg_desc access to VMCB attribute format */
434271939Sneel		desc->access = ((seg->attrib & 0xF00) << 4) |
435271939Sneel		    (seg->attrib & 0xFF);
436271939Sneel
437271939Sneel		/*
438271939Sneel		 * VT-x uses bit 16 to indicate a segment that has been loaded
439271939Sneel		 * with a NULL selector (aka unusable). The 'desc->access'
440271939Sneel		 * field is interpreted in the VT-x format by the
441271939Sneel		 * processor-independent code.
442271939Sneel		 *
443271939Sneel		 * SVM uses the 'P' bit to convey the same information so
444271939Sneel		 * convert it into the VT-x format. For more details refer to
445271939Sneel		 * section "Segment State in the VMCB" in APMv2.
446271939Sneel		 */
447271939Sneel		if (reg != VM_REG_GUEST_CS && reg != VM_REG_GUEST_TR) {
448271939Sneel			if ((desc->access & 0x80) == 0)
449271939Sneel				desc->access |= 0x10000;  /* Unusable segment */
450271939Sneel		}
451249353Sneel	}
452249353Sneel
453271939Sneel	return (0);
454249353Sneel}
455