1/*
2 * Copyright (c) 2012 The Chromium OS Authors. All rights reserved.
3 * Use of this source code is governed by a BSD-style license that can be
4 * found in the LICENSE file.
5 *
6 * Alternatively, this software may be distributed under the terms of the
7 * GNU General Public License ("GPL") version 2 as published by the Free
8 * Software Foundation.
9 */
10
11#include <common.h>
12#include <log.h>
13#include <physmem.h>
14#include <asm/cpu.h>
15#include <asm/global_data.h>
16#include <linux/compiler.h>
17#include <linux/sizes.h>
18
19DECLARE_GLOBAL_DATA_PTR;
20
21/* Large pages are 2MB. */
22#define LARGE_PAGE_SIZE ((1 << 20) * 2)
23
24/*
25 * Paging data structures.
26 */
27
28struct pdpe {
29	uint64_t p:1;
30	uint64_t mbz_0:2;
31	uint64_t pwt:1;
32	uint64_t pcd:1;
33	uint64_t mbz_1:4;
34	uint64_t avl:3;
35	uint64_t base:40;
36	uint64_t mbz_2:12;
37};
38
39typedef struct pdpe pdpt_t[512];
40
41struct pde {
42	uint64_t p:1;      /* present */
43	uint64_t rw:1;     /* read/write */
44	uint64_t us:1;     /* user/supervisor */
45	uint64_t pwt:1;    /* page-level writethrough */
46	uint64_t pcd:1;    /* page-level cache disable */
47	uint64_t a:1;      /* accessed */
48	uint64_t d:1;      /* dirty */
49	uint64_t ps:1;     /* page size */
50	uint64_t g:1;      /* global page */
51	uint64_t avl:3;    /* available to software */
52	uint64_t pat:1;    /* page-attribute table */
53	uint64_t mbz_0:8;  /* must be zero */
54	uint64_t base:31;  /* base address */
55};
56
57typedef struct pde pdt_t[512];
58
59static pdpt_t pdpt __aligned(4096);
60static pdt_t pdts[4] __aligned(4096);
61
62/*
63 * Map a virtual address to a physical address and optionally invalidate any
64 * old mapping.
65 *
66 * @param virt		The virtual address to use.
67 * @param phys		The physical address to use.
68 * @param invlpg	Whether to use invlpg to clear any old mappings.
69 */
70static void x86_phys_map_page(uintptr_t virt, phys_addr_t phys, int invlpg)
71{
72	/* Extract the two bit PDPT index and the 9 bit PDT index. */
73	uintptr_t pdpt_idx = (virt >> 30) & 0x3;
74	uintptr_t pdt_idx = (virt >> 21) & 0x1ff;
75
76	/* Set up a handy pointer to the appropriate PDE. */
77	struct pde *pde = &(pdts[pdpt_idx][pdt_idx]);
78
79	memset(pde, 0, sizeof(struct pde));
80	pde->p = 1;
81	pde->rw = 1;
82	pde->us = 1;
83	pde->ps = 1;
84	pde->base = phys >> 21;
85
86	if (invlpg) {
87		/* Flush any stale mapping out of the TLBs. */
88		__asm__ __volatile__(
89			"invlpg %0\n\t"
90			:
91			: "m" (*(uint8_t *)virt)
92		);
93	}
94}
95
96/* Identity map the lower 4GB and turn on paging with PAE. */
97static void x86_phys_enter_paging(void)
98{
99	phys_addr_t page_addr;
100	unsigned i;
101
102	/* Zero out the page tables. */
103	memset(pdpt, 0, sizeof(pdpt));
104	memset(pdts, 0, sizeof(pdts));
105
106	/* Set up the PDPT. */
107	for (i = 0; i < ARRAY_SIZE(pdts); i++) {
108		pdpt[i].p = 1;
109		pdpt[i].base = ((uintptr_t)&pdts[i]) >> 12;
110	}
111
112	/* Identity map everything up to 4GB. */
113	for (page_addr = 0; page_addr < (1ULL << 32);
114			page_addr += LARGE_PAGE_SIZE) {
115		/* There's no reason to invalidate the TLB with paging off. */
116		x86_phys_map_page(page_addr, page_addr, 0);
117	}
118
119	cpu_enable_paging_pae((ulong)pdpt);
120}
121
122/* Disable paging and PAE mode. */
123static void x86_phys_exit_paging(void)
124{
125	cpu_disable_paging_pae();
126}
127
128/*
129 * Set physical memory to a particular value when the whole region fits on one
130 * page.
131 *
132 * @param map_addr	The address that starts the physical page.
133 * @param offset	How far into that page to start setting a value.
134 * @param c		The value to set memory to.
135 * @param size		The size in bytes of the area to set.
136 */
137static void x86_phys_memset_page(phys_addr_t map_addr, uintptr_t offset, int c,
138				 unsigned size)
139{
140	/*
141	 * U-Boot should be far away from the beginning of memory, so that's a
142	 * good place to map our window on top of.
143	 */
144	const uintptr_t window = LARGE_PAGE_SIZE;
145
146	/* Make sure the window is below U-Boot. */
147	assert(window + LARGE_PAGE_SIZE <
148	       gd->relocaddr - CONFIG_SYS_MALLOC_LEN - SZ_32K);
149	/* Map the page into the window and then memset the appropriate part. */
150	x86_phys_map_page(window, map_addr, 1);
151	memset((void *)(window + offset), c, size);
152}
153
154/*
155 * A physical memory anologue to memset with matching parameters and return
156 * value.
157 */
158phys_addr_t arch_phys_memset(phys_addr_t start, int c, phys_size_t size)
159{
160	const phys_addr_t max_addr = (phys_addr_t)~(uintptr_t)0;
161	const phys_addr_t orig_start = start;
162
163	if (!size)
164		return orig_start;
165
166	/* Handle memory below 4GB. */
167	if (start <= max_addr) {
168		phys_size_t low_size = min(max_addr + 1 - start, size);
169		void *start_ptr = (void *)(uintptr_t)start;
170
171		assert(((phys_addr_t)(uintptr_t)start) == start);
172		memset(start_ptr, c, low_size);
173		start += low_size;
174		size -= low_size;
175	}
176
177	/* Use paging and PAE to handle memory above 4GB up to 64GB. */
178	if (size) {
179		phys_addr_t map_addr = start & ~(LARGE_PAGE_SIZE - 1);
180		phys_addr_t offset = start - map_addr;
181
182		x86_phys_enter_paging();
183
184		/* Handle the first partial page. */
185		if (offset) {
186			phys_addr_t end =
187				min(map_addr + LARGE_PAGE_SIZE, start + size);
188			phys_size_t cur_size = end - start;
189			x86_phys_memset_page(map_addr, offset, c, cur_size);
190			size -= cur_size;
191			map_addr += LARGE_PAGE_SIZE;
192		}
193		/* Handle the complete pages. */
194		while (size > LARGE_PAGE_SIZE) {
195			x86_phys_memset_page(map_addr, 0, c, LARGE_PAGE_SIZE);
196			size -= LARGE_PAGE_SIZE;
197			map_addr += LARGE_PAGE_SIZE;
198		}
199		/* Handle the last partial page. */
200		if (size)
201			x86_phys_memset_page(map_addr, 0, c, size);
202
203		x86_phys_exit_paging();
204	}
205	return orig_start;
206}
207