1/*
2 * Copyright 2013, winocm. <winocm@icloud.com>
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without modification,
6 * are permitted provided that the following conditions are met:
7 *
8 *   Redistributions of source code must retain the above copyright notice, this
9 *   list of conditions and the following disclaimer.
10 *
11 *   Redistributions in binary form must reproduce the above copyright notice, this
12 *   list of conditions and the following disclaimer in the documentation and/or
13 *   other materials provided with the distribution.
14 *
15 *   If you are going to use this software in any form that does not involve
16 *   releasing the source to this project or improving it, let me know beforehand.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
19 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
20 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
21 * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
22 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
23 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
24 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
25 * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
27 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 */
29
30#ifndef _ARM_PMAP_H_
31#define _ARM_PMAP_H_
32
33#include <vm/pmap.h>
34#include <mach/kern_return.h>
35#include <mach/machine/vm_types.h>
36#include <mach/vm_prot.h>
37#include <mach/vm_statistics.h>
38#include <mach/machine/vm_param.h>
39#include <kern/kern_types.h>
40#include <kern/thread.h>
41#include <kern/lock.h>
42#include <kern/ledger.h>
43
44#ifdef MACH_KERNEL_PRIVATE
45#include <pexpert/arm/boot.h>
46
47#include <kern/queue.h>
48#include <vm/vm_page.h>
49
50#define mmu_texcb_small(x) ((((x) & 0x1c) << 4) | (((x) & 3) << 2))
51
52#define MMU_TEXCB_ORDERED 0
53#define MMU_TEXCB_SH_DEVICE 1
54#define MMU_TEXCB_WT_NWA 2
55#define MMU_TEXCB_WB_NWA 3
56#define MMU_TEXCB_NCA 4
57#define MMU_TEXCB_WB_WA 7
58#define MMU_TEXCB_NSH_DEVICE 8
59// cacheable memory -[outer]_[inner]
60#define MMU_TEXCB_CA 0x10
61#define MMU_TEXCB_CA_NCA 0
62#define MMU_TEXCB_CA_WB_WA 1
63#define MMU_TEXCB_CA_WT_NWA 2
64#define MMU_TEXCB_CA_WB_NWA 3
65
66#define MMU_CODE (MMU_TEXCB_CA | MMU_TEXCB_CA_WT_NWA | (MMU_TEXCB_CA_WT_NWA<<2))
67#define MMU_DATA (MMU_TEXCB_CA | MMU_TEXCB_CA_WB_WA | (MMU_TEXCB_CA_WB_WA<<2))
68#define MMU_DMA (MMU_TEXCB_CA | MMU_TEXCB_CA_WT_NWA | (MMU_TEXCB_CA_WT_NWA<<2))
69#define MMU_DEVICE_SHARED MMU_TEXCB_SH_DEVICE
70#define MMU_DEVICE_NSHARED MMU_TEXCB_NSH_DEVICE
71
72/*
73 * pmap locking
74 */
75
76#if 0
77#define PMAP_LOCK(pmap) {		     \
78    simple_lock(&(pmap)->lock);	    \
79}
80
81#define PMAP_UNLOCK(pmap) {			      \
82    simple_unlock(&(pmap)->lock);		  \
83}
84#endif
85
86#define l2_size(size) ((uint32_t)((size >> 20) << 10))
87
88void arm_vm_init(uint32_t mem_limit, boot_args * args);
89
90typedef uint32_t pd_entry_t;    /* L1 table entry */
91typedef uint32_t pt_entry_t;    /* L2 table entry */
92
93#pragma pack(4)                 /* Make sure the structure stays as we defined it */
94/* new pmap struct */
95typedef uint32_t paddr_t;       /* Physical address */
96typedef uint32_t vaddr_t;       /* Virtual address */
97
98struct pmap {
99    paddr_t pm_l1_phys;         /* L1 table address */
100    vaddr_t pm_l1_virt;         /* L1 virtual table address */
101    vaddr_t pm_l2_cache;        /* L2 page tables */
102     decl_simple_lock_data(, lock)  /* lock on map */
103    int pm_refcnt;              /* pmap reference count */
104    ledger_t ledger;            /* self ledger */
105    boolean_t pm_shared;        /* nested pmap? */
106    int pm_nx;                  /* protection for pmap */
107    task_map_t pm_task_map;     /* process task map */
108    struct pmap_statistics pm_stats;
109    uint32_t pm_l1_size;
110    uint32_t pm_asid;           /* Old... */
111    vm_object_t pm_obj;
112#define PMAP_ASID_MAX_CPUS  (48)  /* Must be a multiple of 8 */
113    asid_t pmap_asid_cpus[PMAP_ASID_MAX_CPUS];
114    volatile uint8_t pmap_asid_coherency_vector[PMAP_ASID_MAX_CPUS];
115};
116
117typedef struct arm_l1_entry_t {
118    uint32_t is_coarse:1;       /* Is it a coarse page/section descriptor? */
119    uint32_t is_section:1;
120    uint32_t bufferable:1;      /* Zero on coarse. */
121    uint32_t cacheable:1;       /* Zero on coarse. */
122    uint32_t sbz:1;             /* Should be zero. */
123    uint32_t domain:4;          /* Domain entry */
124    uint32_t ecc:1;             /* P-bit */
125    uint32_t pfn:22;
126} arm_l1_entry_t;
127
128typedef struct arm_l2_entry_t {
129    uint32_t nx:1;              /* 1 on 64kB pages, not supported. */
130    uint32_t valid:1;           /* 0 on 64kB pages, not supported. */
131    uint32_t bufferable:1;
132    uint32_t cacheable:1;
133    uint32_t ap:2;
134    uint32_t tex:3;
135    uint32_t apx:1;
136    uint32_t shareable:1;
137    uint32_t non_global:1;
138    uint32_t pfn:20;
139} arm_l2_entry_t;
140
141typedef struct arm_l1_t {
142    union {
143        arm_l1_entry_t l1;
144        uint32_t ulong;
145    };
146} arm_l1_t;
147
148typedef struct arm_l2_t {
149    union {
150        arm_l2_entry_t l2;
151        uint32_t ulong;
152    };
153} arm_l2_t;
154#pragma pack()
155
156#define	PMAP_SWITCH_USER(th, map, my_cpu) pmap_switch(map->pmap), th->map = map;
157
158#define pmap_kernel_va(VA)	\
159	(((VA) >= VM_MIN_KERNEL_ADDRESS) && ((VA) <= vm_last_addr))
160
161#define SUPERPAGE_NBASEPAGES 0
162
163#define PMAP_DEFAULT_CACHE	0
164#define PMAP_INHIBIT_CACHE	1
165#define PMAP_GUARDED_CACHE	2
166#define PMAP_ACTIVATE_CACHE	4
167#define PMAP_NO_GUARD_CACHE	8
168
169/* corresponds to cached, coherent, not writethru, not guarded */
170#define VM_WIMG_DEFAULT		(VM_MEM_COHERENT)
171#define	VM_WIMG_COPYBACK	(VM_MEM_COHERENT)
172#define VM_WIMG_IO		(VM_MEM_COHERENT | 	\
173				VM_MEM_NOT_CACHEABLE | VM_MEM_GUARDED)
174#define VM_WIMG_WTHRU		(VM_MEM_WRITE_THROUGH | VM_MEM_COHERENT | VM_MEM_GUARDED)
175/* write combining mode, aka store gather */
176#define VM_WIMG_WCOMB		(VM_MEM_NOT_CACHEABLE | VM_MEM_COHERENT)
177#define	VM_WIMG_INNERWBACK	VM_MEM_COHERENT
178
179#define L1SHIFT       20
180
181/*
182 * prototypes.
183 */
184
185/*
186 * "these two macros are your friends"
187 */
188
189#define align_up(p, s) align_down((uintptr_t)(p)+s-1, s)
190#define align_down(p, s) ((uintptr_t)(p)&~(s-1))
191
192#define virt_to_phys(p) ((unsigned int)((((unsigned long)(p)) - gVirtBase) + gPhysBase))
193#define phys_to_virt(p) ((unsigned int)((((unsigned long)(p)) - gPhysBase) + gVirtBase))
194
195#define L1_SIZE 0x4000          /* 16kb: covers 2048*2 1MB sections */
196#define L2_SIZE 0x400           /* 1kb: covers 256 4kb sections */
197
198#define tte_offset(addr) (((addr >> 0x14) & 0xfff) << 2)
199#define pte_offset(addr) (((addr & ~(L1_SECT_ADDR_MASK)) >> PAGE_SHIFT) << 2)
200#define addr_to_tte(base, addr) (base + tte_offset(addr))
201
202#define L1_PTE_ADDR_MASK 0xfffffc00 /* Bits [31:10] */
203#define L1_SECT_ADDR_MASK 0xfff00000    /* Bits [31:20] */
204
205#define L1_PTE_ADDR(tte) (tte & L1_PTE_ADDR_MASK)
206
207#define L1_TYPE_MASK 3          /* two least bits */
208
209#define L1_TYPE_FAULT 0
210#define L1_TYPE_PTE 1
211#define L1_TYPE_SECT 2
212#define L1_TYPE_RESERVED 3
213
214#define L2_ADDR_MASK 0xfffff000
215#define L2_ADDR(pte) (pte & L2_ADDR_MASK)
216
217#define L2_ACCESS_NONE 0x0
218#define L2_ACCESS_PRW 0x10
219#define L2_ACCESS_PRO 0x210
220
221#define L2_ACCESS_USER      (1 << 5)
222#define L2_ACCESS_APX       (1 << 9)
223
224#define tte_is_page_table(tte) ((tte & L1_TYPE_MASK) == L1_TYPE_PTE)
225
226#define L2_SMALL_PAGE 0x2
227#define L2_NX_BIT 0x1           /* XN bit */
228#define L2_C_BIT 0x8            /* C bit */
229#define L2_B_BIT 0x4            /* B bit */
230#define L2_S_BIT 0x400          /* S bit */
231#define L2_NG_BIT 0x800         /* nG bit */
232
233extern addr64_t kvtophys(vm_offset_t va);   /* Get physical address from kernel virtual */
234extern vm_map_offset_t kvtophys64(vm_map_offset_t va);  /* Get 64-bit physical address from kernel virtual */
235extern vm_offset_t pmap_map(vm_offset_t virt, vm_map_offset_t start,
236                            vm_map_offset_t end, vm_prot_t prot,
237                            unsigned int flags);
238
239extern boolean_t pmap_map_bd(vm_offset_t virt, vm_map_offset_t start,
240                             vm_map_offset_t end, vm_prot_t prot,
241                             unsigned int flags);
242
243extern kern_return_t pmap_add_physical_memory(vm_offset_t spa, vm_offset_t epa,
244                                              boolean_t available,
245                                              unsigned int attr);
246extern void pmap_bootstrap(uint64_t msize, vm_offset_t * first_avail,
247                           unsigned int kmapsize);
248
249extern vm_offset_t pmap_get_phys(pmap_t pmap, void *virt);
250
251extern vm_offset_t pmap_boot_map(vm_size_t size);
252
253extern void sync_cache64(addr64_t pa, unsigned length);
254extern void sync_ppage(ppnum_t pa);
255extern void sync_cache_virtual(vm_offset_t va, unsigned length);
256extern void flush_dcache(vm_offset_t va, unsigned length, boolean_t phys);
257extern void flush_dcache64(addr64_t va, unsigned length, boolean_t phys);
258extern void invalidate_dcache(vm_offset_t va, unsigned length, boolean_t phys);
259extern void invalidate_dcache64(addr64_t va, unsigned length, boolean_t phys);
260extern void invalidate_icache(vm_offset_t va, unsigned length, boolean_t phys);
261extern void invalidate_icache64(addr64_t va, unsigned length, boolean_t phys);
262extern void pmap_map_block(pmap_t pmap, addr64_t va, ppnum_t pa, uint32_t size,
263                           vm_prot_t prot, int attr, unsigned int flags);
264extern int pmap_map_block_rc(pmap_t pmap, addr64_t va, ppnum_t pa,
265                             uint32_t size, vm_prot_t prot, int attr,
266                             unsigned int flags);
267
268extern ppnum_t pmap_find_phys(pmap_t pmap, addr64_t va);
269extern void MapUserMemoryWindowInit(void);
270extern addr64_t MapUserMemoryWindow(vm_map_t map, addr64_t va);
271extern boolean_t pmap_eligible_for_execute(ppnum_t pa);
272extern int pmap_list_resident_pages(struct pmap *pmap, vm_offset_t * listp,
273                                    int space);
274extern void pmap_init_sharedpage(vm_offset_t cpg);
275extern void pmap_disable_NX(pmap_t pmap);
276
277extern boolean_t pmap_valid_page(ppnum_t pn);
278extern void pmap_deallocate_l1(pmap_t pmap);
279extern vm_offset_t pmap_pte(pmap_t pmap, vm_offset_t virt);
280extern void pt_fake_zone_init(int);
281extern void pt_fake_zone_info(int *, vm_size_t *, vm_size_t *, vm_size_t *,
282                              vm_size_t *, uint64_t *, int *, int *, int *);
283
284extern void pmap_create_sharedpage(void);
285
286/* Not required for arm: */
287static inline void pmap_set_4GB_pagezero(__unused pmap_t pmap)
288{
289}
290
291static inline void pmap_clear_4GB_pagezero(__unused pmap_t pmap)
292{
293}
294
295typedef struct mem_region {
296    vm_offset_t start;          /* Address of base of region */
297    struct phys_entry *phys_table;  /* base of region's table */
298    unsigned int end;           /* End address+1 */
299} mem_region_t;
300
301typedef uint32_t pmap_paddr_t;
302
303#define PMAP_MEM_REGION_MAX 26
304extern mem_region_t pmap_mem_regions[PMAP_MEM_REGION_MAX];
305extern int pmap_mem_regions_count;
306
307void pmap_common_init(pmap_t pmap);
308void pmap_static_init(void);
309
310void l2_map_linear_range(uint32_t pa_cache_start, uint32_t phys_start,
311                         uint32_t phys_end);
312void l2_map_linear_range_no_cache(uint32_t pa_cache_start, uint32_t phys_start,
313                                  uint32_t phys_end);
314void l2_cache_to_range(uint32_t pa_cache_start, uint32_t va, uint32_t tteb,
315                       uint32_t size, int zero);
316#endif
317
318#endif
319