1/*
2 * Copyright 2013, winocm. <winocm@icloud.com>
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without modification,
6 * are permitted provided that the following conditions are met:
7 *
8 *   Redistributions of source code must retain the above copyright notice, this
9 *   list of conditions and the following disclaimer.
10 *
11 *   Redistributions in binary form must reproduce the above copyright notice, this
12 *   list of conditions and the following disclaimer in the documentation and/or
13 *   other materials provided with the distribution.
14 *
15 *   If you are going to use this software in any form that does not involve
16 *   releasing the source to this project or improving it, let me know beforehand.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
19 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
20 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
21 * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
22 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
23 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
24 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
25 * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
27 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 */
29/*
30 * ARM physical memory map.
31 *
32 * This is painful.
33 */
34
35#include <mach_debug.h>
36#include <debug.h>
37#include <mach/vm_types.h>
38#include <mach/vm_param.h>
39#include <mach/thread_status.h>
40#include <kern/misc_protos.h>
41#include <kern/assert.h>
42#include <kern/cpu_number.h>
43#include <kern/thread.h>
44#include <arm/pmap.h>
45#include <arm/misc_protos.h>
46#include <kern/ledger.h>
47#include <kern/zalloc.h>
48#include <kern/lock.h>
49#include <kern/kalloc.h>
50#include <vm/vm_protos.h>
51#include <vm/vm_map.h>
52#include <vm/vm_kern.h>
53#include <mach/vm_param.h>
54#include <mach/vm_prot.h>
55#include <vm/vm_object.h>
56#include <vm/vm_page.h>
57#include <arm/cpu_capabilities.h>
58
59//#ifndef DEBUG_PMAP
60#define kprintf(args...)
61//#endif
62
63/*
64 * Kernel's physical memory map.
65 */
66
67static pmap_paddr_t avail_remaining;
68vm_offset_t free_l1_tables;
69
70typedef enum {
71    ATTR_NONE   = 0x0,
72    ATTR_READ   = 0x1,
73    ATTR_WRITE  = 0x2,
74    ATTR_WIRED  = 0x4,
75} attr_bits_t;
76
77#define PV_ENTRY_NULL   ((pv_entry_t) 0)
78
79#define pa_index(pa)    (atop(pa - gPhysBase))
80#define pai_to_pvh(pai)        (&pv_head_table[pai])
81
82unsigned int	inuse_ptepages_count = 0;
83unsigned int 	alloc_ptepages_count = 0;
84
85struct pmap    kernel_pmap_store;
86pmap_t kernel_pmap = &kernel_pmap_store;
87
88struct zone    *pmap_zone;  /* zone of pmap structures */
89int         free_pmap_count;
90pmap_t      free_pmap_list;
91
92static struct vm_object pmap_object_store;
93vm_object_t pmap_object;
94
95extern uint64_t first_avail, avail_end;
96extern uint64_t ram_begin;
97
98vm_offset_t virt_begin, virt_end;
99
100typedef struct pv_entry {
101    struct pv_entry *next;              /* next pv_entry */
102    pmap_t          pmap;               /* pmap where mapping lies */
103    vm_offset_t     va;                 /* virtual address for mapping */
104    uint32_t        attr;               /* protection bits for a page */
105} pv_entry, *pv_entry_t;
106
107pv_entry_t    pv_head_table;        /* array of entries, one per page */
108
109boolean_t pmap_initialized = FALSE;
110
111/*
112 * We raise the interrupt level to splvm, to block interprocessor
113 * interrupts during pmap operations.  We must take the CPU out of
114 * the cpus_active set while interrupts are blocked.
115 */
116#define SPLVM(spl)    { \
117    spl = splhigh(); \
118}
119
120#define SPLX(spl)    { \
121    splx(spl); \
122}
123
124/*
125 * Lock on pmap system
126 */
127lock_t pmap_system_lock;
128
129#define PMAP_READ_LOCK(pmap, spl) {    \
130    SPLVM(spl);            \
131    lock_read(&pmap_system_lock);    \
132    simple_lock(&(pmap)->lock);    \
133}
134
135#define PMAP_WRITE_LOCK(spl) {        \
136    SPLVM(spl);            \
137    lock_write(&pmap_system_lock);    \
138}
139
140#define PMAP_READ_UNLOCK(pmap, spl) {        \
141    simple_unlock(&(pmap)->lock);        \
142    lock_read_done(&pmap_system_lock);    \
143    SPLX(spl);                \
144}
145
146#define PMAP_WRITE_UNLOCK(spl) {        \
147    lock_write_done(&pmap_system_lock);    \
148    SPLX(spl);                \
149}
150
151#define PMAP_WRITE_TO_READ_LOCK(pmap) {        \
152    simple_lock(&(pmap)->lock);        \
153    lock_write_to_read(&pmap_system_lock);    \
154}
155
156
157/**
158 * pmap_common_init
159 *
160 * Initialize common elements of pmaps.
161 */
162void pmap_common_init(pmap_t pmap)
163{
164    usimple_lock_init(&pmap->lock, 0);
165    ledger_reference(pmap->ledger);
166    pmap->pm_refcnt = 1;
167}
168
169/**
170 * pmap_virtual_space
171 *
172 * Get virtual space parameters.
173 */
174void
175pmap_virtual_space(vm_offset_t *startp,
176                   vm_offset_t *endp)
177{
178    *startp = virt_begin;
179    *endp = virt_end;
180    kprintf("pmap_virtual_space: region 0x%08x-0x%08x\n", virt_begin, virt_end);
181}
182
183/**
184 * pmap_static_init
185 *
186 * Initialize the basic kernel pmap.
187 */
188void pmap_static_init(void)
189{
190    kdb_printf("pmap_static_init: Bootstrapping pmap\n");
191
192    return;
193}
194
195/**
196 * pmap_next_page_hi
197 *
198 * Allocate physical pages.
199 */
200boolean_t pmap_next_page_hi(ppnum_t * pnum)
201{
202    return pmap_next_page(pnum);
203}
204
205/**
206 * pmap_next_page
207 *
208 * Allocate physical pages.
209 */
210boolean_t pmap_next_page(ppnum_t *addrp)
211{
212    if(first_avail >= avail_end) {
213        kprintf("pmap_next_page: ran out of possible pages, last page was 0x%08x", first_avail);
214        return FALSE;
215    }
216
217    *addrp = first_avail;
218    first_avail += PAGE_SIZE;
219    avail_remaining--;
220
221    return TRUE;
222}
223
224/**
225 * pmap_reference
226 *
227 * Increment reference count of the specified pmap.
228 */
229void
230pmap_reference(pmap_t pmap)
231{
232    if (pmap != PMAP_NULL)
233        (void)hw_atomic_add(&pmap->pm_refcnt, 1); /* Bump the count */
234}
235
236/**
237 * pmap_verify_free
238 *
239 * Look at the page and verify that it is free.
240 */
241boolean_t pmap_verify_free(ppnum_t pa) {
242    pv_entry_t  pv_h;
243    int         pai;
244    boolean_t   result;
245
246    assert(pa != vm_page_fictitious_addr);
247
248    if(!pmap_initialized)
249        return TRUE;
250
251    if(!pmap_valid_page(pa))
252        return TRUE;
253
254    pai = pa_index(pa);
255    pv_h = pai_to_pvh(pai);
256
257    result = (pv_h->pmap == PMAP_NULL);
258
259    return TRUE;    /* result, since pmap_remove is not done yet */
260}
261
262/**
263 * pmap_free_pages
264 *
265 * Free pages. Is bad.
266 */
267unsigned int pmap_free_pages(void)
268{
269    return avail_remaining;
270}
271
272/**
273 * pmap_valid_page
274 */
275boolean_t
276pmap_valid_page(ppnum_t x)
277{
278    return ((ram_begin <= x) && (x < avail_end));
279}
280
281/**
282 * pmap_is_noencrypt/pmap_clear_noencrypt/whatever.
283 *
284 * Not my problem right now.
285 */
286boolean_t
287pmap_is_noencrypt(ppnum_t pn)
288{
289    return (FALSE);
290}
291
292void
293pmap_clear_noencrypt(ppnum_t pn)
294{
295    return;
296}
297
298void
299pmap_set_noencrypt(ppnum_t pn)
300{
301    return;
302}
303
304/**
305 * pmap_map
306 *
307 * Map specified virtual address range to a physical one.
308 */
309vm_offset_t
310pmap_map(vm_offset_t virt,
311         vm_map_offset_t start_addr,
312         vm_map_offset_t end_addr,
313         vm_prot_t prot,
314         unsigned int flags)
315{
316    int        ps;
317
318    ps = PAGE_SIZE;
319    while (start_addr < end_addr) {
320        pmap_enter(kernel_pmap, (vm_map_offset_t)virt, (start_addr), prot, flags, FALSE, TRUE);
321        virt += ps;
322        start_addr += ps;
323    }
324    return(virt);
325}
326
327
328/**
329 * io_map
330 *
331 * Maps an IO region and returns its virtual address.
332 */
333vm_offset_t
334io_map(vm_offset_t phys_addr, vm_size_t size, unsigned int flags)
335{
336	vm_offset_t	start;
337
338	if (kernel_map == VM_MAP_NULL) {
339	    /*
340	     * VM is not initialized.  Grab memory.
341	     */
342	    start = virt_begin;
343	    virt_begin += round_page(size);
344
345	    (void) pmap_map_bd(start, phys_addr, phys_addr + round_page(size),
346			       VM_PROT_READ|VM_PROT_WRITE,
347			       flags);
348	}
349	else {
350	    (void) kmem_alloc_pageable(kernel_map, &start, round_page(size));
351	    (void) pmap_map(start, phys_addr, phys_addr + round_page(size),
352			    VM_PROT_READ|VM_PROT_WRITE,
353			    flags);
354	}
355
356	return (start);
357}
358
359vm_offset_t io_map_spec(vm_map_offset_t phys_addr, vm_size_t size, unsigned int flags)
360{
361  return (io_map(phys_addr, size, flags));
362}
363
364void
365compute_pmap_gc_throttle(void *arg __unused)
366{
367
368}
369
370/**
371 * pmap_init
372 *
373 * Stage 2 initialization of the pmap subsystem.
374 */
375
376void
377pmap_init(void)
378{
379    kprintf("pmap_init: I am hell. Love me.\n");
380
381    free_pmap_list = NULL;                    /* Set that there are no free pmaps */
382    free_pmap_count = 0;
383
384    pmap_zone = zinit((sizeof(struct pmap)), 400 * (sizeof(struct pmap)), 4096, "pmap");
385
386    pmap_object = &pmap_object_store;
387    _vm_object_allocate(mem_size, &pmap_object_store);
388
389    pmap_initialized = TRUE;
390
391    return;
392}
393
394unsigned int pmap_disconnect(ppnum_t pa)
395{
396    pmap_page_protect(pa, 0);
397    return 0;
398}
399
400void
401pmap_pageable(__unused pmap_t pmap,
402              __unused vm_map_offset_t start,
403              __unused vm_map_offset_t end,
404              __unused boolean_t pageable)
405{
406    return;
407}
408
409void mapping_free_prime(void)
410{
411    return;
412}
413
414/*
415 * wat.
416 */
417void mapping_adjust(void) {
418    return;
419}
420
421void
422pmap_remove(pmap_t map,
423            vm_map_offset_t s,
424            vm_map_offset_t e)
425{
426    return;
427}
428
429/**
430 * pmap_destroy
431 *
432 * Destroy the current pmap and all mappings inside of it.
433 */
434void
435pmap_destroy(pmap_t pmap)
436{
437    int refcount;
438
439    assert(pmap != NULL);
440
441    PMAP_LOCK(pmap);
442    refcount = --(pmap->pm_refcnt);
443    PMAP_UNLOCK(pmap);
444
445    if(refcount) {
446        /* Still in use! */
447        return;
448    }
449
450    ledger_dereference(pmap->ledger);
451    zfree(pmap_zone, pmap);
452    return;
453}
454
455/**
456 * pmap_resident_count
457 *
458 * Return the number of resident pages in a specified pmap.
459 */
460int pmap_resident_count(pmap_t pmap)
461{
462    assert(pmap);
463    return (pmap)->pm_stats.resident_count;
464}
465
466/**
467 * pmap_zero_page
468 *
469 * Zero a physical page.
470 */
471void pmap_zero_page(ppnum_t p)
472{
473    assert(p != vm_page_fictitious_addr);
474    assert(pmap_valid_page(p));
475    bzero(phys_to_virt(p), PAGE_SIZE);
476}
477
478/**
479 * ohai.
480 */
481void pmap_clear_refmod(ppnum_t pn, unsigned int mask)
482{
483    return;
484}
485
486uint64_t pmap_nesting_size_min = 0x8000000;
487uint64_t pmap_nesting_size_max = 0x8000000;
488
489void
490pmap_protect(
491	pmap_t		map,
492	vm_map_offset_t	sva,
493	vm_map_offset_t	eva,
494	vm_prot_t	prot)
495{
496	return;
497}
498
499/**
500 * pmap_copy_page
501 *
502 * Copy a specified page to another page.
503 */
504void
505pmap_copy_page(
506	ppnum_t src,
507	ppnum_t dst)
508{
509	assert(src != vm_page_fictitious_addr);
510	assert(dst != vm_page_fictitious_addr);
511
512	memcpy((void *)phystokv(dst), (void *)phystokv(src), PAGE_SIZE);
513}
514
515int pmap_list_resident_pages(pmap_t pmap, vm_offset_t *listp, int space)
516{
517    return 0;
518}
519