1/*
2 * Copyright (c) 2000-2007 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29/*
30 * @OSF_COPYRIGHT@
31 */
32
33/*
34 * Copyright 2013, winocm. <winocm@icloud.com>
35 * All rights reserved.
36 *
37 * Redistribution and use in source and binary forms, with or without modification,
38 * are permitted provided that the following conditions are met:
39 *
40 *   Redistributions of source code must retain the above copyright notice, this
41 *   list of conditions and the following disclaimer.
42 *
43 *   Redistributions in binary form must reproduce the above copyright notice, this
44 *   list of conditions and the following disclaimer in the documentation and/or
45 *   other materials provided with the distribution.
46 *
47 *   If you are going to use this software in any form that does not involve
48 *   releasing the source to this project or improving it, let me know beforehand.
49 *
50 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
51 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
52 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
53 * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
54 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
55 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
56 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
57 * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
58 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
59 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
60 */
61
62/*
63 * ARM machine routines
64 */
65
66#include <arm/machine_routines.h>
67#include <arm/io_map_entries.h>
68#include <arm/cpu_affinity.h>
69#include <mach/processor.h>
70#include <kern/processor.h>
71#include <kern/machine.h>
72#include <kern/cpu_data.h>
73#include <kern/cpu_number.h>
74#include <kern/thread.h>
75
76#include <vm/pmap.h>
77
78#include <arm/trap.h>
79#include <mach/vm_param.h>
80#include <arm/pmap.h>
81
82#include <pexpert/arm/boot.h>
83
84#include <arm/cpuid.h>
85
86#define DBG(x...)	kprintf("DBG: " x)
87
88uint32_t MutexSpin;
89uint32_t LockTimeOut;
90
91/**
92 * ml_io_map
93 *
94 * Map memory mapped IO space to a virtual address.
95 */
96vm_offset_t ml_io_map(vm_offset_t phys_addr, vm_size_t size)
97{
98    return (io_map(phys_addr, size, VM_WIMG_IO));
99}
100
101/**
102 * ml_static_malloc
103 *
104 * Unused.
105 */
106vm_offset_t ml_static_malloc(__unused vm_size_t size)
107{
108    return ((vm_offset_t) NULL);
109}
110
111/**
112 * ml_vtophys
113 *
114 * Shim for kvtophys.
115 */
116vm_offset_t ml_vtophys(vm_offset_t vaddr)
117{
118    return (vm_offset_t) kvtophys(vaddr);
119}
120
121/**
122 * ml_stack_remaining
123 *
124 * return the remaining kernel stack size
125 */
126vm_offset_t ml_stack_remaining(void)
127{
128    uintptr_t local = (uintptr_t)&local;
129
130    if (ml_at_interrupt_context() != 0) {
131        return (local - (current_cpu_datap()->cpu_int_stack_top - INTSTACK_SIZE));
132    } else {
133        return (local - current_thread()->kernel_stack);
134    }
135}
136
137/**
138 * ml_init_interrupt
139 *
140 * Set interrupts enabled to true.
141 */
142void ml_init_interrupt(void)
143{
144    (void) ml_set_interrupts_enabled(TRUE);
145}
146
147/**
148 * ml_cpu_up
149 *
150 * This is called from the machine-independent routine cpu_up()
151 * to perform machine-dependent info updates. Defer to cpu_thread_init().
152 */
153void ml_cpu_up(void)
154{
155    (void) hw_atomic_add(&machine_info.physical_cpu, 1);
156    (void) hw_atomic_add(&machine_info.logical_cpu, 1);
157    return;
158}
159
160/**
161 * ml_cpu_down
162 *
163 * This is called from the machine-independent routine cpu_down()
164 * to perform machine-dependent info updates.
165 */
166void ml_cpu_down(void)
167{
168    (void) hw_atomic_sub(&machine_info.physical_cpu, 1);
169    (void) hw_atomic_sub(&machine_info.logical_cpu, 1);
170    return;
171}
172
173/**
174 * ml_cpu_cache_size
175 *
176 * Get the cpu memory size based on the level requested.
177 * 0 == ram, 1 == L1 cache, 2 == L2 cache, ect.
178 */
179uint64_t ml_cpu_cache_size(unsigned int level)
180{
181    if (level == 0) {
182        return machine_info.max_mem;
183    } else if ( 1 <= level && level <= MAX_CACHE_DEPTH) {
184        return arm_processor_id.cache_levels[level - 1].size;
185    } else {
186        return 0;
187    }
188}
189
190/**
191 * ovbcopy
192 *
193 * Overlapped bcopy.
194 */
195void ovbcopy(void *from, void *to, vm_size_t bytes)
196{
197    bcopy(from, to, bytes);
198}
199
200/**
201 * bzero_phys
202 *
203 * Zero out a physical address.
204 */
205void bzero_phys(addr64_t src64, uint32_t bytes)
206{
207#ifndef __LP64__
208    bzero(phys_to_virt((uint32_t) src64), bytes);
209#else
210    bzero(phys_to_virt((uint64_t) src64), bytes);
211#endif
212}
213
214/**
215 * bcopy_phys
216 *
217 * The equivalent of bzero_phys but with additional copying features.
218 * (patent pending)
219 */
220void bcopy_phys(addr64_t src64, addr64_t dst64, vm_size_t bytes)
221{
222#ifndef __LP64__
223    bcopy(phys_to_virt((uint32_t) src64), phys_to_virt((uint32_t) dst64), bytes);
224#else
225    bcopy(phys_to_virt((uint64_t) src64), phys_to_virt((uint64_t) dst64), bytes);
226#endif
227    return;
228}
229
230/**
231 * ml_init_lock_timeout
232 */
233void ml_init_lock_timeout(void)
234{
235    uint64_t abstime;
236    uint32_t mtxspin;
237    uint64_t default_timeout_ns = NSEC_PER_SEC >> 2;
238    uint32_t slto;
239    uint32_t prt;
240
241    if (PE_parse_boot_argn("slto_us", &slto, sizeof(slto)))
242        default_timeout_ns = slto * NSEC_PER_USEC;
243
244    /*
245     * LockTimeOut is absolutetime
246     */
247    nanoseconds_to_absolutetime(default_timeout_ns, &abstime);
248    LockTimeOut = (uint32_t) abstime;
249
250    if (PE_parse_boot_argn("mtxspin", &mtxspin, sizeof(mtxspin))) {
251        if (mtxspin > USEC_PER_SEC >> 4)
252            mtxspin = USEC_PER_SEC >> 4;
253        nanoseconds_to_absolutetime(mtxspin * NSEC_PER_USEC, &abstime);
254    } else {
255        nanoseconds_to_absolutetime(10 * NSEC_PER_USEC, &abstime);
256    }
257
258    MutexSpin = (unsigned int) abstime;
259}
260
261/**
262 * ml_get_max_cpus
263 *
264 * Since this is a uniprocessor system, we return 0.
265 */
266int ml_get_max_cpus(void)
267{
268    return (machine_info.max_cpus);
269}
270
271void ml_init_max_cpus(unsigned long max_cpus)
272{
273    machine_info.max_cpus = max_cpus;
274    machine_info.physical_cpu_max = max_cpus;
275    machine_info.logical_cpu_max = max_cpus;
276    return;
277}
278
279void ml_install_interrupt_handler(void *nub, int source, void *target,
280                                  IOInterruptHandler handler, void *refCon)
281{
282    boolean_t current_state;
283    cpu_data_t *datap;
284
285    current_state = ml_get_interrupts_enabled();
286
287    datap = current_cpu_datap();
288    assert(datap);
289    datap->nub = nub;
290    datap->target = target;
291    datap->handler = handler;
292    datap->refCon = refCon;
293
294    (void) ml_set_interrupts_enabled(current_state);
295
296    initialize_screen(NULL, kPEAcquireScreen);
297}
298
299/**
300 * ml_processor_register
301 *
302 * Register a processor with the system and add it to the master list.
303 */
304
305/*
306 * initialize and bring up the CPU.
307 */
308kern_return_t ml_processor_register(cpu_id_t cpu_id,
309                                    processor_t * processor_out,
310                                    ipi_handler_t * ipi_handler)
311{
312    return KERN_SUCCESS;
313}
314
315/*
316 * Stubbed.
317 */
318void ml_thread_policy(__unused thread_t thread, __unused unsigned policy_id,
319                      __unused unsigned policy_info)
320{
321
322}
323
324int ml_get_max_affinity_sets(void)
325{
326    return 1;
327}
328
329processor_set_t ml_affinity_to_pset(uint32_t affinity_num)
330{
331    return processor_pset(master_processor);
332}
333
334vm_offset_t ml_static_ptovirt(vm_offset_t paddr)
335{
336    return phys_to_virt(paddr);
337}
338
339void ml_get_power_state(boolean_t * icp, boolean_t * pidlep)
340{
341    *pidlep = FALSE;
342}
343
344void machine_track_platform_idle(boolean_t entry)
345{
346    return;
347}
348
349void ml_static_mfree(vm_offset_t vaddr, vm_size_t size)
350{
351    addr64_t vaddr_cur;
352    ppnum_t ppn;
353    uint32_t freed_pages = 0;
354    assert(vaddr >= VM_MIN_KERNEL_ADDRESS);
355
356    assert((vaddr & (PAGE_SIZE-1)) == 0); /* must be page aligned */
357
358    /* Disconnect all mappings for the pages. */
359    for (vaddr_cur = vaddr;
360         vaddr_cur < round_page_64(vaddr+size);
361         vaddr_cur += PAGE_SIZE) {
362        ppn = pmap_find_phys(kernel_pmap, vaddr_cur);
363        if (ppn != (vm_offset_t)NULL) {
364                kernel_pmap->pm_stats.resident_count++;
365            if (kernel_pmap->pm_stats.resident_count >
366                kernel_pmap->pm_stats.resident_max) {
367                kernel_pmap->pm_stats.resident_max =
368                    kernel_pmap->pm_stats.resident_count;
369            }
370            pmap_remove(kernel_pmap, vaddr_cur, vaddr_cur+PAGE_SIZE);
371            freed_pages++;
372        }
373    }
374
375#if DEBUG
376    kprintf("ml_static_mfree: Released 0x%x pages at VA %p, size:0x%llx, last ppn: 0x%x\n", freed_pages, (void *)vaddr, (uint64_t)size, ppn);
377#endif
378}
379
380/*
381 *	kvtophys(addr)
382 *
383 *	Convert a kernel virtual address to a physical address
384 */
385addr64_t kvtophys(vm_offset_t addr)
386{
387    pmap_paddr_t pa;
388    pa = ((pmap_paddr_t) pmap_extract(kernel_pmap, addr));
389    return (addr64_t) pa;
390}
391
392/*
393 *	Routine:        ml_nofault_copy
394 *	Function:	Perform a physical mode copy if the source and
395 *			destination have valid translations in the kernel pmap.
396 *			If translations are present, they are assumed to
397 *			be wired; i.e. no attempt is made to guarantee that the
398 *			translations obtained remained valid for
399 *			the duration of the copy process.
400 */
401
402vm_size_t ml_nofault_copy(vm_offset_t virtsrc, vm_offset_t virtdst, vm_size_t size)
403{
404    addr64_t cur_phys_dst, cur_phys_src;
405    uint32_t count, nbytes = 0;
406
407    while (size > 0) {
408        if (!(cur_phys_src = kvtophys(virtsrc)))
409            break;
410        if (!(cur_phys_dst = kvtophys(virtdst)))
411            break;
412        if (!pmap_valid_page(atop(cur_phys_dst))
413            || !pmap_valid_page(atop(cur_phys_src)))
414            break;
415        count = (uint32_t) (PAGE_SIZE - (cur_phys_src & PAGE_MASK));
416        if (count > (PAGE_SIZE - (cur_phys_dst & PAGE_MASK)))
417            count = (uint32_t) (PAGE_SIZE - (cur_phys_dst & PAGE_MASK));
418        if (count > size)
419            count = (uint32_t) size;
420
421        bcopy_phys(cur_phys_src, cur_phys_dst, count);
422
423        nbytes += count;
424        virtsrc += count;
425        virtdst += count;
426        size -= count;
427    }
428
429    return nbytes;
430}
431
432boolean_t ml_thread_is64bit(thread_t thread)
433{
434    return FALSE;
435}
436
437/*
438 *  Routine:        ml_cpu_get_info
439 *  Function:
440 */
441extern int arm_pdcache_line_size, arm_pdcache_size, arm_picache_size;
442
443void ml_cpu_get_info(ml_cpu_info_t * ml_cpu_info)
444{
445    if (ml_cpu_info == 0)
446        return;
447
448    ml_cpu_info->vector_unit = 1;
449
450    ml_cpu_info->cache_line_size = arm_processor_id.cache_levels[0].linesize;
451
452    ml_cpu_info->l1_icache_size = arm_processor_id.cache_levels[0].size;
453    ml_cpu_info->l1_dcache_size = arm_processor_id.cache_levels[0].size;
454
455    ml_cpu_info->l2_settings = (arm_processor_id.cache_levels[1].size > 0) ? 1 : 0;
456    ml_cpu_info->l2_cache_size = (arm_processor_id.cache_levels[1].size > 0) ? arm_processor_id.cache_levels[1].size : 0xFFFFFFFF;
457
458    ml_cpu_info->l3_settings = 0;
459    ml_cpu_info->l3_cache_size = 0xFFFFFFFF;
460
461    return;
462}
463
464boolean_t ml_delay_should_spin(uint64_t interval)
465{
466    return FALSE;
467}
468
469/*
470 * Not right now.
471 */
472boolean_t ml_at_interrupt_context(void)
473{
474    boolean_t ret;
475    cpu_data_t* datap = current_cpu_datap();
476    assert(datap);
477
478    ret = (datap->cpu_interrupt_level) ? TRUE : FALSE;
479
480    return(ret);
481}
482
483/**
484 * ml_cpu_cache_sharing
485 */
486uint64_t ml_cpu_cache_sharing(unsigned int level)
487{
488    return 1;
489}
490
491/*
492 * ml_phys_read_*.
493 */
494
495#define ml_phys_read_write_comb_gen(ret, cast, type, suffix)        \
496    ret ml_phys_read ##suffix (type paddr) {                        \
497        return (ret)(*(cast*)(phys_to_virt(paddr)));                \
498    }                                                               \
499    void ml_phys_write ##suffix (type paddr, ret data) {            \
500        (*(volatile cast*)(phys_to_virt(paddr))) = (cast)data;      \
501    }
502
503ml_phys_read_write_comb_gen(unsigned int, unsigned char, vm_offset_t, _byte)
504ml_phys_read_write_comb_gen(unsigned int, unsigned char, addr64_t, _byte_64)
505
506ml_phys_read_write_comb_gen(unsigned int, unsigned short, vm_offset_t, _half)
507ml_phys_read_write_comb_gen(unsigned int, unsigned short, addr64_t, _half_64)
508
509ml_phys_read_write_comb_gen(unsigned int, unsigned int, vm_offset_t,)
510ml_phys_read_write_comb_gen(unsigned int, unsigned int, addr64_t, _64)
511ml_phys_read_write_comb_gen(unsigned int, unsigned int, vm_offset_t, _word)
512ml_phys_read_write_comb_gen(unsigned int, unsigned int, addr64_t, _word_64)
513
514ml_phys_read_write_comb_gen(unsigned long long, unsigned long long, vm_offset_t, _double)
515ml_phys_read_write_comb_gen(unsigned long long, unsigned long long, addr64_t, _double_64)
516