x86_xpmap.c revision 1.67
1/*	$NetBSD: x86_xpmap.c,v 1.67 2016/11/15 17:01:12 maxv Exp $	*/
2
3/*
4 * Copyright (c) 2006 Mathieu Ropert <mro@adviseo.fr>
5 *
6 * Permission to use, copy, modify, and distribute this software for any
7 * purpose with or without fee is hereby granted, provided that the above
8 * copyright notice and this permission notice appear in all copies.
9 *
10 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
11 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
12 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
13 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
14 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
15 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
16 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17 */
18
19/*
20 * Copyright (c) 2006, 2007 Manuel Bouyer.
21 *
22 * Redistribution and use in source and binary forms, with or without
23 * modification, are permitted provided that the following conditions
24 * are met:
25 * 1. Redistributions of source code must retain the above copyright
26 *    notice, this list of conditions and the following disclaimer.
27 * 2. Redistributions in binary form must reproduce the above copyright
28 *    notice, this list of conditions and the following disclaimer in the
29 *    documentation and/or other materials provided with the distribution.
30 *
31 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
32 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
33 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
34 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
35 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
36 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
37 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
38 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
39 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
40 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
41 */
42
43/*
44 * Copyright (c) 2004 Christian Limpach.
45 * All rights reserved.
46 *
47 * Redistribution and use in source and binary forms, with or without
48 * modification, are permitted provided that the following conditions
49 * are met:
50 * 1. Redistributions of source code must retain the above copyright
51 *    notice, this list of conditions and the following disclaimer.
52 * 2. Redistributions in binary form must reproduce the above copyright
53 *    notice, this list of conditions and the following disclaimer in the
54 *    documentation and/or other materials provided with the distribution.
55 *
56 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
57 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
58 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
59 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
60 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
61 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
62 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
63 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
64 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
65 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
66 */
67
68#include <sys/cdefs.h>
69__KERNEL_RCSID(0, "$NetBSD: x86_xpmap.c,v 1.67 2016/11/15 17:01:12 maxv Exp $");
70
71#include "opt_xen.h"
72#include "opt_ddb.h"
73#include "ksyms.h"
74
75#include <sys/param.h>
76#include <sys/systm.h>
77#include <sys/mutex.h>
78#include <sys/cpu.h>
79
80#include <uvm/uvm.h>
81
82#include <x86/pmap.h>
83#include <machine/gdt.h>
84#include <xen/xenfunc.h>
85
86#include <dev/isa/isareg.h>
87#include <machine/isa_machdep.h>
88
89#undef	XENDEBUG
90/* #define XENDEBUG_SYNC */
91
92#ifdef XENDEBUG
93#define	XENPRINTF(x) printf x
94#define	XENPRINTK2(x) /* printk x */
95static char XBUF[256];
96#else
97#define	XENPRINTF(x)
98#define	XENPRINTK2(x)
99#endif
100
101volatile shared_info_t *HYPERVISOR_shared_info;
102/* Xen requires the start_info struct to be page aligned */
103union start_info_union start_info_union __aligned(PAGE_SIZE);
104unsigned long *xpmap_phys_to_machine_mapping;
105kmutex_t pte_lock;
106
107void xen_failsafe_handler(void);
108
109#define HYPERVISOR_mmu_update_self(req, count, success_count) \
110	HYPERVISOR_mmu_update((req), (count), (success_count), DOMID_SELF)
111
112extern volatile struct xencons_interface *xencons_interface; /* XXX */
113extern struct xenstore_domain_interface *xenstore_interface; /* XXX */
114
115static void xen_bt_set_readonly(vaddr_t);
116static void xen_bootstrap_tables(vaddr_t, vaddr_t, size_t, size_t, bool);
117
118vaddr_t xen_locore(void);
119
120/*
121 * kcpuset internally uses an array of uint32_t while xen uses an array of
122 * u_long. As we're little-endian we can cast one to the other.
123 */
124typedef union {
125#ifdef _LP64
126	uint32_t xcpum_km[2];
127#else
128	uint32_t xcpum_km[1];
129#endif
130	u_long xcpum_xm;
131} xcpumask_t;
132
133void
134xen_failsafe_handler(void)
135{
136
137	panic("xen_failsafe_handler called!\n");
138}
139
140void
141xen_set_ldt(vaddr_t base, uint32_t entries)
142{
143	vaddr_t va;
144	vaddr_t end;
145	pt_entry_t *ptp;
146	int s;
147
148#ifdef __x86_64__
149	end = base + (entries << 3);
150#else
151	end = base + entries * sizeof(union descriptor);
152#endif
153
154	for (va = base; va < end; va += PAGE_SIZE) {
155		KASSERT(va >= VM_MIN_KERNEL_ADDRESS);
156		ptp = kvtopte(va);
157		XENPRINTF(("xen_set_ldt %#" PRIxVADDR " %d %p\n",
158		    base, entries, ptp));
159		pmap_pte_clearbits(ptp, PG_RW);
160	}
161	s = splvm();
162	xpq_queue_set_ldt(base, entries);
163	splx(s);
164}
165
166#ifdef XENDEBUG
167void xpq_debug_dump(void);
168#endif
169
170#define XPQUEUE_SIZE 2048
171static mmu_update_t xpq_queue_array[MAXCPUS][XPQUEUE_SIZE];
172static int xpq_idx_array[MAXCPUS];
173
174#ifdef i386
175extern union descriptor tmpgdt[];
176#endif
177
178void
179xpq_flush_queue(void)
180{
181	int i, ok = 0, ret;
182
183	mmu_update_t *xpq_queue = xpq_queue_array[curcpu()->ci_cpuid];
184	int xpq_idx = xpq_idx_array[curcpu()->ci_cpuid];
185
186	XENPRINTK2(("flush queue %p entries %d\n", xpq_queue, xpq_idx));
187	for (i = 0; i < xpq_idx; i++)
188		XENPRINTK2(("%d: 0x%08" PRIx64 " 0x%08" PRIx64 "\n", i,
189		    xpq_queue[i].ptr, xpq_queue[i].val));
190
191retry:
192	ret = HYPERVISOR_mmu_update_self(xpq_queue, xpq_idx, &ok);
193
194	if (xpq_idx != 0 && ret < 0) {
195		struct cpu_info *ci;
196		CPU_INFO_ITERATOR cii;
197
198		printf("xpq_flush_queue: %d entries (%d successful) on "
199		    "cpu%d (%ld)\n",
200		    xpq_idx, ok, curcpu()->ci_index, curcpu()->ci_cpuid);
201
202		if (ok != 0) {
203			xpq_queue += ok;
204			xpq_idx -= ok;
205			ok = 0;
206			goto retry;
207		}
208
209		for (CPU_INFO_FOREACH(cii, ci)) {
210			xpq_queue = xpq_queue_array[ci->ci_cpuid];
211			xpq_idx = xpq_idx_array[ci->ci_cpuid];
212			printf("cpu%d (%ld):\n", ci->ci_index, ci->ci_cpuid);
213			for (i = 0; i < xpq_idx; i++) {
214				printf("  0x%016" PRIx64 ": 0x%016" PRIx64 "\n",
215				   xpq_queue[i].ptr, xpq_queue[i].val);
216			}
217#ifdef __x86_64__
218			for (i = 0; i < PDIR_SLOT_PTE; i++) {
219				if (ci->ci_kpm_pdir[i] == 0)
220					continue;
221				printf(" kpm_pdir[%d]: 0x%" PRIx64 "\n",
222				    i, ci->ci_kpm_pdir[i]);
223			}
224#endif
225		}
226		panic("HYPERVISOR_mmu_update failed, ret: %d\n", ret);
227	}
228	xpq_idx_array[curcpu()->ci_cpuid] = 0;
229}
230
231static inline void
232xpq_increment_idx(void)
233{
234
235	if (__predict_false(++xpq_idx_array[curcpu()->ci_cpuid] == XPQUEUE_SIZE))
236		xpq_flush_queue();
237}
238
239void
240xpq_queue_machphys_update(paddr_t ma, paddr_t pa)
241{
242
243	mmu_update_t *xpq_queue = xpq_queue_array[curcpu()->ci_cpuid];
244	int xpq_idx = xpq_idx_array[curcpu()->ci_cpuid];
245
246	XENPRINTK2(("xpq_queue_machphys_update ma=0x%" PRIx64 " pa=0x%" PRIx64
247	    "\n", (int64_t)ma, (int64_t)pa));
248
249	xpq_queue[xpq_idx].ptr = ma | MMU_MACHPHYS_UPDATE;
250	xpq_queue[xpq_idx].val = pa >> PAGE_SHIFT;
251	xpq_increment_idx();
252#ifdef XENDEBUG_SYNC
253	xpq_flush_queue();
254#endif
255}
256
257void
258xpq_queue_pte_update(paddr_t ptr, pt_entry_t val)
259{
260
261	mmu_update_t *xpq_queue = xpq_queue_array[curcpu()->ci_cpuid];
262	int xpq_idx = xpq_idx_array[curcpu()->ci_cpuid];
263
264	KASSERT((ptr & 3) == 0);
265	xpq_queue[xpq_idx].ptr = (paddr_t)ptr | MMU_NORMAL_PT_UPDATE;
266	xpq_queue[xpq_idx].val = val;
267	xpq_increment_idx();
268#ifdef XENDEBUG_SYNC
269	xpq_flush_queue();
270#endif
271}
272
273void
274xpq_queue_pt_switch(paddr_t pa)
275{
276	struct mmuext_op op;
277	xpq_flush_queue();
278
279	XENPRINTK2(("xpq_queue_pt_switch: 0x%" PRIx64 " 0x%" PRIx64 "\n",
280	    (int64_t)pa, (int64_t)pa));
281	op.cmd = MMUEXT_NEW_BASEPTR;
282	op.arg1.mfn = pa >> PAGE_SHIFT;
283	if (HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0)
284		panic("xpq_queue_pt_switch");
285}
286
287void
288xpq_queue_pin_table(paddr_t pa, int lvl)
289{
290	struct mmuext_op op;
291
292	xpq_flush_queue();
293
294	XENPRINTK2(("xpq_queue_pin_l%d_table: %#" PRIxPADDR "\n",
295	    lvl + 1, pa));
296
297	op.arg1.mfn = pa >> PAGE_SHIFT;
298	op.cmd = lvl;
299
300	if (HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0)
301		panic("xpq_queue_pin_table");
302}
303
304void
305xpq_queue_unpin_table(paddr_t pa)
306{
307	struct mmuext_op op;
308
309	xpq_flush_queue();
310
311	XENPRINTK2(("xpq_queue_unpin_table: %#" PRIxPADDR "\n", pa));
312	op.arg1.mfn = pa >> PAGE_SHIFT;
313	op.cmd = MMUEXT_UNPIN_TABLE;
314	if (HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0)
315		panic("xpq_queue_unpin_table");
316}
317
318void
319xpq_queue_set_ldt(vaddr_t va, uint32_t entries)
320{
321	struct mmuext_op op;
322
323	xpq_flush_queue();
324
325	XENPRINTK2(("xpq_queue_set_ldt\n"));
326	KASSERT(va == (va & ~PAGE_MASK));
327	op.cmd = MMUEXT_SET_LDT;
328	op.arg1.linear_addr = va;
329	op.arg2.nr_ents = entries;
330	if (HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0)
331		panic("xpq_queue_set_ldt");
332}
333
334void
335xpq_queue_tlb_flush(void)
336{
337	struct mmuext_op op;
338
339	xpq_flush_queue();
340
341	XENPRINTK2(("xpq_queue_tlb_flush\n"));
342	op.cmd = MMUEXT_TLB_FLUSH_LOCAL;
343	if (HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0)
344		panic("xpq_queue_tlb_flush");
345}
346
347void
348xpq_flush_cache(void)
349{
350	int s = splvm();
351
352	xpq_flush_queue();
353
354	XENPRINTK2(("xpq_queue_flush_cache\n"));
355	asm("wbinvd":::"memory");
356	splx(s); /* XXX: removeme */
357}
358
359void
360xpq_queue_invlpg(vaddr_t va)
361{
362	struct mmuext_op op;
363	xpq_flush_queue();
364
365	XENPRINTK2(("xpq_queue_invlpg %#" PRIxVADDR "\n", va));
366	op.cmd = MMUEXT_INVLPG_LOCAL;
367	op.arg1.linear_addr = (va & ~PAGE_MASK);
368	if (HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0)
369		panic("xpq_queue_invlpg");
370}
371
372void
373xen_mcast_invlpg(vaddr_t va, kcpuset_t *kc)
374{
375	xcpumask_t xcpumask;
376	mmuext_op_t op;
377
378	kcpuset_export_u32(kc, &xcpumask.xcpum_km[0], sizeof(xcpumask));
379
380	/* Flush pending page updates */
381	xpq_flush_queue();
382
383	op.cmd = MMUEXT_INVLPG_MULTI;
384	op.arg1.linear_addr = va;
385	op.arg2.vcpumask = &xcpumask.xcpum_xm;
386
387	if (HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0) {
388		panic("xpq_queue_invlpg_all");
389	}
390
391	return;
392}
393
394void
395xen_bcast_invlpg(vaddr_t va)
396{
397	mmuext_op_t op;
398
399	/* Flush pending page updates */
400	xpq_flush_queue();
401
402	op.cmd = MMUEXT_INVLPG_ALL;
403	op.arg1.linear_addr = va;
404
405	if (HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0) {
406		panic("xpq_queue_invlpg_all");
407	}
408
409	return;
410}
411
412/* This is a synchronous call. */
413void
414xen_mcast_tlbflush(kcpuset_t *kc)
415{
416	xcpumask_t xcpumask;
417	mmuext_op_t op;
418
419	kcpuset_export_u32(kc, &xcpumask.xcpum_km[0], sizeof(xcpumask));
420
421	/* Flush pending page updates */
422	xpq_flush_queue();
423
424	op.cmd = MMUEXT_TLB_FLUSH_MULTI;
425	op.arg2.vcpumask = &xcpumask.xcpum_xm;
426
427	if (HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0) {
428		panic("xpq_queue_invlpg_all");
429	}
430
431	return;
432}
433
434/* This is a synchronous call. */
435void
436xen_bcast_tlbflush(void)
437{
438	mmuext_op_t op;
439
440	/* Flush pending page updates */
441	xpq_flush_queue();
442
443	op.cmd = MMUEXT_TLB_FLUSH_ALL;
444
445	if (HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0) {
446		panic("xpq_queue_invlpg_all");
447	}
448
449	return;
450}
451
452/* This is a synchronous call. */
453void
454xen_vcpu_mcast_invlpg(vaddr_t sva, vaddr_t eva, kcpuset_t *kc)
455{
456	KASSERT(eva > sva);
457
458	/* Flush pending page updates */
459	xpq_flush_queue();
460
461	/* Align to nearest page boundary */
462	sva &= ~PAGE_MASK;
463	eva &= ~PAGE_MASK;
464
465	for ( ; sva <= eva; sva += PAGE_SIZE) {
466		xen_mcast_invlpg(sva, kc);
467	}
468
469	return;
470}
471
472/* This is a synchronous call. */
473void
474xen_vcpu_bcast_invlpg(vaddr_t sva, vaddr_t eva)
475{
476	KASSERT(eva > sva);
477
478	/* Flush pending page updates */
479	xpq_flush_queue();
480
481	/* Align to nearest page boundary */
482	sva &= ~PAGE_MASK;
483	eva &= ~PAGE_MASK;
484
485	for ( ; sva <= eva; sva += PAGE_SIZE) {
486		xen_bcast_invlpg(sva);
487	}
488
489	return;
490}
491
492/* Copy a page */
493void
494xen_copy_page(paddr_t srcpa, paddr_t dstpa)
495{
496	mmuext_op_t op;
497
498	op.cmd = MMUEXT_COPY_PAGE;
499	op.arg1.mfn = xpmap_ptom(dstpa) >> PAGE_SHIFT;
500	op.arg2.src_mfn = xpmap_ptom(srcpa) >> PAGE_SHIFT;
501
502	if (HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0) {
503		panic(__func__);
504	}
505}
506
507/* Zero a physical page */
508void
509xen_pagezero(paddr_t pa)
510{
511	mmuext_op_t op;
512
513	op.cmd = MMUEXT_CLEAR_PAGE;
514	op.arg1.mfn = xpmap_ptom(pa) >> PAGE_SHIFT;
515
516	if (HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0) {
517		panic(__func__);
518	}
519}
520
521int
522xpq_update_foreign(paddr_t ptr, pt_entry_t val, int dom)
523{
524	mmu_update_t op;
525	int ok;
526
527	xpq_flush_queue();
528
529	op.ptr = ptr;
530	op.val = val;
531	if (HYPERVISOR_mmu_update(&op, 1, &ok, dom) < 0)
532		return EFAULT;
533	return (0);
534}
535
536#ifdef XENDEBUG
537void
538xpq_debug_dump(void)
539{
540	int i;
541
542	mmu_update_t *xpq_queue = xpq_queue_array[curcpu()->ci_cpuid];
543	int xpq_idx = xpq_idx_array[curcpu()->ci_cpuid];
544
545	XENPRINTK2(("idx: %d\n", xpq_idx));
546	for (i = 0; i < xpq_idx; i++) {
547		snprintf(XBUF, sizeof(XBUF), "%" PRIx64 " %08" PRIx64,
548		    xpq_queue[i].ptr, xpq_queue[i].val);
549		if (++i < xpq_idx)
550			snprintf(XBUF + strlen(XBUF),
551			    sizeof(XBUF) - strlen(XBUF),
552			    "%" PRIx64 " %08" PRIx64,
553			    xpq_queue[i].ptr, xpq_queue[i].val);
554		if (++i < xpq_idx)
555			snprintf(XBUF + strlen(XBUF),
556			    sizeof(XBUF) - strlen(XBUF),
557			    "%" PRIx64 " %08" PRIx64,
558			    xpq_queue[i].ptr, xpq_queue[i].val);
559		if (++i < xpq_idx)
560			snprintf(XBUF + strlen(XBUF),
561			    sizeof(XBUF) - strlen(XBUF),
562			    "%" PRIx64 " %08" PRIx64,
563			    xpq_queue[i].ptr, xpq_queue[i].val);
564		XENPRINTK2(("%d: %s\n", xpq_idx, XBUF));
565	}
566}
567#endif
568
569
570#if L2_SLOT_KERNBASE > 0
571#define TABLE_L2_ENTRIES (2 * (NKL2_KIMG_ENTRIES + 1))
572#else
573#define TABLE_L2_ENTRIES (NKL2_KIMG_ENTRIES + 1)
574#endif
575
576#ifdef PAE
577/*
578 * For PAE, we consider a single contiguous L2 "superpage" of 4 pages, all of
579 * them mapped by the L3 page. We also need a shadow page for L3[3].
580 */
581static const int l2_4_count = 6;
582#elif defined(__x86_64__)
583static const int l2_4_count = PTP_LEVELS;
584#else
585static const int l2_4_count = PTP_LEVELS - 1;
586#endif
587
588/*
589 * Xen locore: get rid of the Xen bootstrap tables. Build and switch to new page
590 * tables.
591 */
592vaddr_t
593xen_locore(void)
594{
595	size_t count, oldcount, mapsize;
596	vaddr_t bootstrap_tables, init_tables;
597
598	xen_init_features();
599
600	memset(xpq_idx_array, 0, sizeof(xpq_idx_array));
601
602	xpmap_phys_to_machine_mapping =
603	    (unsigned long *)xen_start_info.mfn_list;
604
605	/* Space after Xen boostrap tables should be free */
606	init_tables = xen_start_info.pt_base;
607	bootstrap_tables = init_tables +
608	    (xen_start_info.nr_pt_frames * PAGE_SIZE);
609
610	/*
611	 * Calculate how much space we need. First, everything mapped before
612	 * the Xen bootstrap tables.
613	 */
614	mapsize = init_tables - KERNTEXTOFF;
615	/* after the tables we'll have:
616	 *  - UAREA
617	 *  - dummy user PGD (x86_64)
618	 *  - HYPERVISOR_shared_info
619	 *  - early_zerop
620	 *  - ISA I/O mem (if needed)
621	 */
622	mapsize += UPAGES * PAGE_SIZE;
623#ifdef __x86_64__
624	mapsize += PAGE_SIZE;
625#endif
626	mapsize += PAGE_SIZE;
627	mapsize += PAGE_SIZE;
628#ifdef DOM0OPS
629	if (xendomain_is_dom0()) {
630		mapsize += IOM_SIZE;
631	}
632#endif
633
634	/*
635	 * At this point, mapsize doesn't include the table size.
636	 */
637#ifdef __x86_64__
638	count = TABLE_L2_ENTRIES;
639#else
640	count = (mapsize + (NBPD_L2 - 1)) >> L2_SHIFT;
641#endif
642
643	/*
644	 * Now compute how many L2 pages we need exactly. This is useful only
645	 * on i386, since the initial count for amd64 is already enough.
646	 */
647	while (KERNTEXTOFF + mapsize + (count + l2_4_count) * PAGE_SIZE >
648	    KERNBASE + (count << L2_SHIFT)) {
649		count++;
650	}
651
652#ifndef __x86_64__
653	/*
654	 * One more L2 page: we'll allocate several pages after kva_start
655	 * in pmap_bootstrap() before pmap_growkernel(), which have not been
656	 * counted here. It's not a big issue to allocate one more L2 as
657	 * pmap_growkernel() will be called anyway.
658	 */
659	count++;
660	nkptp[1] = count;
661#endif
662
663	/*
664	 * Install bootstrap pages. We may need more L2 pages than will
665	 * have the final table here, as it's installed after the final table.
666	 */
667	oldcount = count;
668
669bootstrap_again:
670
671	/*
672	 * Xen space we'll reclaim may not be enough for our new page tables,
673	 * move bootstrap tables if necessary.
674	 */
675	if (bootstrap_tables < init_tables + ((count + l2_4_count) * PAGE_SIZE))
676		bootstrap_tables = init_tables +
677		    ((count + l2_4_count) * PAGE_SIZE);
678
679	/*
680	 * Make sure the number of L2 pages we have is enough to map everything
681	 * from KERNBASE to the bootstrap tables themselves.
682	 */
683	if (bootstrap_tables + ((oldcount + l2_4_count) * PAGE_SIZE) >
684	    KERNBASE + (oldcount << L2_SHIFT)) {
685		oldcount++;
686		goto bootstrap_again;
687	}
688
689	/* Create temporary tables */
690	xen_bootstrap_tables(init_tables, bootstrap_tables,
691	    xen_start_info.nr_pt_frames, oldcount, false);
692
693	/* Create final tables */
694	xen_bootstrap_tables(bootstrap_tables, init_tables,
695	    oldcount + l2_4_count, count, true);
696
697	/* Zero out free space after tables */
698	memset((void *)(init_tables + ((count + l2_4_count) * PAGE_SIZE)), 0,
699	    (UPAGES + 1) * PAGE_SIZE);
700
701	/* Finally, flush TLB. */
702	xpq_queue_tlb_flush();
703
704	return (init_tables + ((count + l2_4_count) * PAGE_SIZE));
705}
706
707/*
708 * Build a new table and switch to it.
709 * old_count is # of old tables (including PGD, PDTPE and PDE).
710 * new_count is # of new tables (PTE only).
711 * We assume the areas don't overlap.
712 */
713static void
714xen_bootstrap_tables(vaddr_t old_pgd, vaddr_t new_pgd, size_t old_count,
715    size_t new_count, bool final)
716{
717	pd_entry_t *pdtpe, *pde, *pte;
718	pd_entry_t *bt_pgd;
719	paddr_t addr;
720	vaddr_t page, avail, map_end;
721	int i;
722	extern char __rodata_start;
723	extern char __data_start;
724	extern char __kernel_end;
725	extern char *early_zerop; /* from pmap.c */
726	pt_entry_t pg_nx;
727	u_int descs[4];
728
729	/*
730	 * Set the NX/XD bit, if available. descs[3] = %edx.
731	 */
732	x86_cpuid(0x80000001, descs);
733	pg_nx = (descs[3] & CPUID_NOX) ? PG_NX : 0;
734
735	/*
736	 * Layout of RW area after the kernel image:
737	 *     xencons_interface (if present)
738	 *     xenstore_interface (if present)
739	 *     table pages (new_count + l2_4_count entries)
740	 * Extra mappings (only when final is true):
741	 *     UAREA
742	 *     dummy user PGD (x86_64 only) / GDT page (i386 only)
743	 *     HYPERVISOR_shared_info
744	 *     early_zerop
745	 *     ISA I/O mem (if needed)
746	 */
747	map_end = new_pgd + ((new_count + l2_4_count) * PAGE_SIZE);
748	if (final) {
749		map_end += (UPAGES + 1) * PAGE_SIZE;
750		HYPERVISOR_shared_info = (shared_info_t *)map_end;
751		map_end += PAGE_SIZE;
752		early_zerop = (char *)map_end;
753		map_end += PAGE_SIZE;
754	}
755
756	/*
757	 * We always set atdevbase, as it's used by init386 to find the first
758	 * available VA. map_end is updated only if we are dom0, so
759	 * atdevbase -> atdevbase + IOM_SIZE will be mapped only in
760	 * this case.
761	 */
762	if (final) {
763		atdevbase = map_end;
764#ifdef DOM0OPS
765		if (xendomain_is_dom0()) {
766			/* ISA I/O mem */
767			map_end += IOM_SIZE;
768		}
769#endif
770	}
771
772	__PRINTK(("xen_bootstrap_tables map_end 0x%lx\n", map_end));
773	__PRINTK(("console %#lx ", xen_start_info.console_mfn));
774	__PRINTK(("xenstore %#" PRIx32 "\n", xen_start_info.store_mfn));
775
776	/*
777	 * Create bootstrap page tables. What we need:
778	 * - a PGD (level 4)
779	 * - a PDTPE (level 3)
780	 * - a PDE (level 2)
781	 * - some PTEs (level 1)
782	 */
783
784	bt_pgd = (pd_entry_t *)new_pgd;
785	memset(bt_pgd, 0, PAGE_SIZE);
786	avail = new_pgd + PAGE_SIZE;
787
788#if PTP_LEVELS > 3
789	/* Per-cpu L4 */
790	pd_entry_t *bt_cpu_pgd = bt_pgd;
791	/* pmap_kernel() "shadow" L4 */
792	bt_pgd = (pd_entry_t *)avail;
793	memset(bt_pgd, 0, PAGE_SIZE);
794	avail += PAGE_SIZE;
795
796	/* Install L3 */
797	pdtpe = (pd_entry_t *)avail;
798	memset(pdtpe, 0, PAGE_SIZE);
799	avail += PAGE_SIZE;
800
801	addr = ((u_long)pdtpe) - KERNBASE;
802	bt_pgd[pl4_pi(KERNTEXTOFF)] = bt_cpu_pgd[pl4_pi(KERNTEXTOFF)] =
803	    xpmap_ptom_masked(addr) | PG_k | PG_V | PG_RW;
804#else
805	pdtpe = bt_pgd;
806#endif
807
808#if PTP_LEVELS > 2
809	/* Level 2 */
810	pde = (pd_entry_t *)avail;
811	memset(pde, 0, PAGE_SIZE);
812	avail += PAGE_SIZE;
813
814	addr = ((u_long)pde) - KERNBASE;
815	pdtpe[pl3_pi(KERNTEXTOFF)] =
816	    xpmap_ptom_masked(addr) | PG_k | PG_V | PG_RW;
817#elif defined(PAE)
818	/* Our PAE-style level 2: 5 contigous pages (4 L2 + 1 shadow) */
819	pde = (pd_entry_t *)avail;
820	memset(pde, 0, PAGE_SIZE * 5);
821	avail += PAGE_SIZE * 5;
822	addr = ((u_long)pde) - KERNBASE;
823
824	/*
825	 * Enter L2 pages in L3. The real L2 kernel PD will be the last one
826	 * (so that pde[L2_SLOT_KERN] always points to the shadow).
827	 */
828	for (i = 0; i < 3; i++, addr += PAGE_SIZE) {
829		/*
830		 * Xen doesn't want RW mappings in L3 entries, it'll add it
831		 * itself.
832		 */
833		pdtpe[i] = xpmap_ptom_masked(addr) | PG_k | PG_V;
834	}
835	addr += PAGE_SIZE;
836	pdtpe[3] = xpmap_ptom_masked(addr) | PG_k | PG_V;
837#else
838	pde = bt_pgd;
839#endif
840
841	/* Level 1 */
842	page = KERNTEXTOFF;
843	for (i = 0; i < new_count; i ++) {
844		vaddr_t cur_page = page;
845
846		pte = (pd_entry_t *)avail;
847		avail += PAGE_SIZE;
848
849		memset(pte, 0, PAGE_SIZE);
850		while (pl2_pi(page) == pl2_pi(cur_page)) {
851			if (page >= map_end) {
852				/* not mapped at all */
853				pte[pl1_pi(page)] = 0;
854				page += PAGE_SIZE;
855				continue;
856			}
857			pte[pl1_pi(page)] = xpmap_ptom_masked(page - KERNBASE);
858			if (page == (vaddr_t)HYPERVISOR_shared_info) {
859				pte[pl1_pi(page)] = xen_start_info.shared_info;
860			}
861			if ((xpmap_ptom_masked(page - KERNBASE) >> PAGE_SHIFT)
862			    == xen_start_info.console.domU.mfn) {
863				xencons_interface = (void *)page;
864				pte[pl1_pi(page)] = xen_start_info.console_mfn;
865				pte[pl1_pi(page)] <<= PAGE_SHIFT;
866			}
867			if ((xpmap_ptom_masked(page - KERNBASE) >> PAGE_SHIFT)
868			    == xen_start_info.store_mfn) {
869				xenstore_interface = (void *)page;
870				pte[pl1_pi(page)] = xen_start_info.store_mfn;
871				pte[pl1_pi(page)] <<= PAGE_SHIFT;
872			}
873#ifdef DOM0OPS
874			if (page >= (vaddr_t)atdevbase &&
875			    page < (vaddr_t)atdevbase + IOM_SIZE) {
876				pte[pl1_pi(page)] =
877				    IOM_BEGIN + (page - (vaddr_t)atdevbase);
878				pte[pl1_pi(page)] |= pg_nx;
879			}
880#endif
881
882			pte[pl1_pi(page)] |= PG_k | PG_V;
883			if (page < (vaddr_t)&__rodata_start) {
884				/* Map the kernel text RX. */
885				pte[pl1_pi(page)] |= PG_RO;
886			} else if (page >= (vaddr_t)&__rodata_start &&
887			    page < (vaddr_t)&__data_start) {
888				/* Map the kernel rodata R. */
889				pte[pl1_pi(page)] |= PG_RO | pg_nx;
890			} else if (page >= old_pgd &&
891			    page < old_pgd + (old_count * PAGE_SIZE)) {
892				/* Map the old page tables R. */
893				pte[pl1_pi(page)] |= PG_RO | pg_nx;
894			} else if (page >= new_pgd &&
895			    page < new_pgd + ((new_count + l2_4_count) * PAGE_SIZE)) {
896				/* Map the new page tables R. */
897				pte[pl1_pi(page)] |= PG_RO | pg_nx;
898#ifdef i386
899			} else if (page == (vaddr_t)tmpgdt) {
900				/*
901				 * Map bootstrap gdt R/O. Later, we will re-add
902				 * this page to uvm after making it writable.
903				 */
904				pte[pl1_pi(page)] = 0;
905				page += PAGE_SIZE;
906				continue;
907#endif
908			} else if (page >= (vaddr_t)&__data_start &&
909			    page < (vaddr_t)&__kernel_end) {
910				/* Map the kernel data+bss RW. */
911				pte[pl1_pi(page)] |= PG_RW | pg_nx;
912			} else {
913				/* Map the page RW. */
914				pte[pl1_pi(page)] |= PG_RW | pg_nx;
915			}
916
917			page += PAGE_SIZE;
918		}
919
920		addr = ((u_long)pte) - KERNBASE;
921		pde[pl2_pi(cur_page)] =
922		    xpmap_ptom_masked(addr) | PG_k | PG_RW | PG_V;
923
924		/* Mark readonly */
925		xen_bt_set_readonly((vaddr_t)pte);
926	}
927
928	/* Install recursive page tables mapping */
929#ifdef PAE
930	/*
931	 * We need a shadow page for the kernel's L2 page.
932	 * The real L2 kernel PD will be the last one (so that
933	 * pde[L2_SLOT_KERN] always points to the shadow).
934	 */
935	memcpy(&pde[L2_SLOT_KERN + NPDPG], &pde[L2_SLOT_KERN], PAGE_SIZE);
936	cpu_info_primary.ci_kpm_pdir = &pde[L2_SLOT_KERN + NPDPG];
937	cpu_info_primary.ci_kpm_pdirpa =
938	    (vaddr_t) cpu_info_primary.ci_kpm_pdir - KERNBASE;
939
940	/*
941	 * We don't enter a recursive entry from the L3 PD. Instead, we enter
942	 * the first 4 L2 pages, which includes the kernel's L2 shadow. But we
943	 * have to enter the shadow after switching %cr3, or Xen will refcount
944	 * some PTEs with the wrong type.
945	 */
946	addr = (u_long)pde - KERNBASE;
947	for (i = 0; i < 3; i++, addr += PAGE_SIZE) {
948		pde[PDIR_SLOT_PTE + i] = xpmap_ptom_masked(addr) | PG_k | PG_V |
949		    pg_nx;
950	}
951#if 0
952	addr += PAGE_SIZE; /* point to shadow L2 */
953	pde[PDIR_SLOT_PTE + 3] = xpmap_ptom_masked(addr) | PG_k | PG_V;
954#endif
955	/* Mark tables RO, and pin the kernel's shadow as L2 */
956	addr = (u_long)pde - KERNBASE;
957	for (i = 0; i < 5; i++, addr += PAGE_SIZE) {
958		xen_bt_set_readonly(((vaddr_t)pde) + PAGE_SIZE * i);
959#if 0
960		if (i == 2 || i == 3)
961			continue;
962		xpq_queue_pin_l2_table(xpmap_ptom_masked(addr));
963#endif
964	}
965	if (final) {
966		addr = (u_long)pde - KERNBASE + 3 * PAGE_SIZE;
967		xpq_queue_pin_l2_table(xpmap_ptom_masked(addr));
968	}
969#if 0
970	addr = (u_long)pde - KERNBASE + 2 * PAGE_SIZE;
971	xpq_queue_pin_l2_table(xpmap_ptom_masked(addr));
972#endif
973#else /* PAE */
974
975	/* Recursive entry in pmap_kernel(). */
976	bt_pgd[PDIR_SLOT_PTE] = xpmap_ptom_masked((paddr_t)bt_pgd - KERNBASE)
977	    | PG_k | PG_RO | PG_V | pg_nx;
978#ifdef __x86_64__
979	/* Recursive entry in higher-level per-cpu PD. */
980	bt_cpu_pgd[PDIR_SLOT_PTE] = xpmap_ptom_masked((paddr_t)bt_cpu_pgd - KERNBASE)
981	    | PG_k | PG_RO | PG_V | pg_nx;
982#endif
983
984	/* Mark tables RO */
985	xen_bt_set_readonly((vaddr_t)pde);
986#endif
987#if PTP_LEVELS > 2 || defined(PAE)
988	xen_bt_set_readonly((vaddr_t)pdtpe);
989#endif
990#if PTP_LEVELS > 3
991	xen_bt_set_readonly(new_pgd);
992#endif
993
994	/* Pin the PGD */
995#ifdef __x86_64__
996	xpq_queue_pin_l4_table(xpmap_ptom_masked(new_pgd - KERNBASE));
997#elif PAE
998	xpq_queue_pin_l3_table(xpmap_ptom_masked(new_pgd - KERNBASE));
999#else
1000	xpq_queue_pin_l2_table(xpmap_ptom_masked(new_pgd - KERNBASE));
1001#endif
1002
1003	/* Save phys. addr of PDP, for libkvm. */
1004#ifdef PAE
1005	PDPpaddr = (u_long)pde - KERNBASE; /* PDP is the L2 with PAE */
1006#else
1007	PDPpaddr = (u_long)bt_pgd - KERNBASE;
1008#endif
1009
1010	/* Switch to new tables */
1011	xpq_queue_pt_switch(xpmap_ptom_masked(new_pgd - KERNBASE));
1012
1013#ifdef PAE
1014	if (final) {
1015		/* Save the address of the L3 page */
1016		cpu_info_primary.ci_pae_l3_pdir = pdtpe;
1017		cpu_info_primary.ci_pae_l3_pdirpa = (new_pgd - KERNBASE);
1018
1019		/* Now enter the kernel's PTE mappings */
1020		addr = (u_long)pde - KERNBASE + PAGE_SIZE * 3;
1021		xpq_queue_pte_update(
1022		    xpmap_ptom(((vaddr_t)&pde[PDIR_SLOT_PTE + 3]) - KERNBASE),
1023		    xpmap_ptom_masked(addr) | PG_k | PG_V);
1024		xpq_flush_queue();
1025	}
1026#elif defined(__x86_64__)
1027	if (final) {
1028		/* Save the address of the real per-cpu L4 page. */
1029		cpu_info_primary.ci_kpm_pdir = bt_cpu_pgd;
1030		cpu_info_primary.ci_kpm_pdirpa = ((paddr_t)bt_cpu_pgd - KERNBASE);
1031	}
1032#endif
1033	__USE(pdtpe);
1034
1035	/*
1036	 * Now we can safely reclaim the space taken by the old tables.
1037	 */
1038
1039	/* Unpin old PGD */
1040	xpq_queue_unpin_table(xpmap_ptom_masked(old_pgd - KERNBASE));
1041
1042	/* Mark old tables RW */
1043	page = old_pgd;
1044	addr = xpmap_mtop((paddr_t)pde[pl2_pi(page)] & PG_FRAME);
1045	pte = (pd_entry_t *)((u_long)addr + KERNBASE);
1046	pte += pl1_pi(page);
1047	while (page < old_pgd + (old_count * PAGE_SIZE) && page < map_end) {
1048		addr = xpmap_ptom(((u_long)pte) - KERNBASE);
1049		xpq_queue_pte_update(addr, *pte | PG_RW);
1050		page += PAGE_SIZE;
1051		/*
1052		 * Our PTEs are contiguous so it's safe to just "++" here.
1053		 */
1054		pte++;
1055	}
1056	xpq_flush_queue();
1057}
1058
1059
1060/*
1061 * Bootstrap helper functions
1062 */
1063
1064/*
1065 * Mark a page readonly
1066 * XXX: assuming vaddr = paddr + KERNBASE
1067 */
1068
1069static void
1070xen_bt_set_readonly(vaddr_t page)
1071{
1072	pt_entry_t entry;
1073
1074	entry = xpmap_ptom_masked(page - KERNBASE);
1075	entry |= PG_k | PG_V;
1076
1077	HYPERVISOR_update_va_mapping(page, entry, UVMF_INVLPG);
1078}
1079
1080#ifdef __x86_64__
1081void
1082xen_set_user_pgd(paddr_t page)
1083{
1084	struct mmuext_op op;
1085	int s = splvm();
1086
1087	xpq_flush_queue();
1088	op.cmd = MMUEXT_NEW_USER_BASEPTR;
1089	op.arg1.mfn = xpmap_ptom_masked(page) >> PAGE_SHIFT;
1090	if (HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0)
1091		panic("xen_set_user_pgd: failed to install new user page"
1092			" directory %#" PRIxPADDR, page);
1093	splx(s);
1094}
1095#endif /* __x86_64__ */
1096