1/*
2 * Copyright 2014, General Dynamics C4 Systems
3 *
4 * SPDX-License-Identifier: GPL-2.0-only
5 */
6
7#pragma once
8
9#include <config.h>
10#include <stdint.h>
11#include <arch/types.h>
12#include <arch/object/structures.h>
13#include <arch/machine/hardware.h>
14#include <plat/machine/hardware.h>
15#include <armv/machine.h>
16#include <arch/model/smp.h>
17
18#include <machine/io.h>
19#include <mode/machine_pl2.h>
20#include <mode/hardware.h>
21#include <kernel/stack.h>
22
23/* The new spec requires the use of vmsr/vmrs to access floating point
24 * registers (including control registers).
25 *
26 * GCC will still accept the old MRC/MCR instructions but Clang will not.
27 * Both result in the same encoding and are here only to satisfy compilers. */
28#define VMRS(vfp_reg, v) asm volatile(".fpu vfp\n" \
29                                      "vmrs %0, " vfp_reg : "=r"(v))
30#define VMSR(vfp_reg, v)                                 \
31    do {                                                 \
32        word_t _v = v;                                   \
33        asm volatile(".fpu vfp\n"                        \
34                     "vmsr " vfp_reg ", %0" :: "r"(_v)); \
35    } while(0)
36
37/* VFP registers. */
38#define FPEXC      "fpexc" /* 32-bit Floating-Point Exception Control register */
39#define FPSCR      "fpscr" /* 32-bit Floating-Point Status and Control register */
40
41/** Generic timer CP15 registers **/
42#define CNTFRQ     " p15, 0,  %0, c14,  c0, 0" /* 32-bit RW Counter Frequency register */
43#define CNTPCT     " p15, 0, %Q0, %R0, c14   " /* 64-bit RO Physical Count register */
44#define CNTKCTL    " p15, 0,  %0, c14,  c1, 0" /* 32-bit RW Timer PL1 Control register */
45#define CNTP_TVAL  " p15, 0,  %0, c14,  c2, 0" /* 32-bit RW PL1 Physical TimerValue register */
46#define CNTP_CTL   " p15, 0,  %0, c14,  c2, 1" /* 32-bit RW PL1 Physical Timer Control register */
47#define CNTV_TVAL  " p15, 0,  %0, c14,  c3, 0" /* 32-bit RW Virtual TimerValue register */
48#define CNTV_CTL   " p15, 0,  %0, c14,  c3, 1" /* 32-bit RW Virtual Timer Control register */
49#define CNTVCT     " p15, 1, %Q0, %R0, c14   " /* 64-bit RO Virtual Count register */
50#define CNTP_CVAL  " p15, 2, %Q0, %R0, c14   " /* 64-bit RW PL1 Physical Timer CompareValue register */
51#define CNTV_CVAL  " p15, 3, %Q0, %R0, c14   " /* 64-bit RW Virtual Timer CompareValue register */
52#define CNTVOFF    " p15, 4, %Q0, %R0, c14   " /* 64-bit RW Virtual Offset register */
53#define CNTHCTL    " p15, 4,  %0, c14,  c1, 0" /* 32-bit RW Timer PL2 Control register */
54#define CNTHP_TVAL " p15, 4,  %0, c14,  c2, 0" /* 32-bit RW PL2 Physical TimerValue register */
55#define CNTHP_CTL  " p15, 4,  %0, c14,  c2, 1" /* 32-bit RW PL2 Physical Timer Control register */
56#define CNTHP_CVAL " p15, 6, %Q0, %R0, c14   " /* 64-bit RW PL2 Physical Timer CompareValue register */
57#define PMUSERENR  " p15, 0,  %0,  c9, c14, 0" /* 32-bit RW PMU PL0 enable */
58#define ID_DFR0    " p15, 0,  %0,  c0,  c1, 2" /* 32-bit RO Debug feature register */
59#define ID_PFR1    " p15, 0,  %0,  c0,  c1, 1" /* 32-bit RO CPU feature register */
60#define CPACR      " p15, 0,  %0,  c1,  c0, 2" /* 32-bit Architectural Feature Access Control Register */
61#define VMPIDR     " p15, 4,  %0,  c0,  c0, 5" /* 32-bit RW Virtualization Multiprocessor ID Register */
62
63/* Use Hypervisor Physical timer */
64#ifdef CONFIG_ARM_HYPERVISOR_SUPPORT
65#define CNT_TVAL CNTHP_TVAL
66#define CNT_CT   CNTPCT
67#define CNT_CTL  CNTHP_CTL
68#define CNT_CVAL CNTHP_CVAL
69#else
70/* Use virtual timer */
71#define CNT_TVAL CNTV_TVAL
72#define CNT_CT   CNTVCT
73#define CNT_CTL  CNTV_CTL
74#define CNT_CVAL CNTV_CVAL
75#endif
76
77#ifdef ENABLE_SMP_SUPPORT
78/* Use the first two SGI (Software Generated Interrupt) IDs
79 * for seL4 IPI implementation. SGIs are per-core banked.
80 */
81#define irq_remote_call_ipi        0
82#define irq_reschedule_ipi         1
83#endif /* ENABLE_SMP_SUPPORT */
84
85word_t PURE getRestartPC(tcb_t *thread);
86void setNextPC(tcb_t *thread, word_t v);
87
88/* Architecture specific machine operations */
89
90static inline word_t getProcessorID(void)
91{
92    word_t processor_id;
93    MRC("p15, 0, %0, c0, c0, 0", processor_id);
94    return processor_id;
95}
96
97
98static inline word_t readSystemControlRegister(void)
99{
100    word_t scr;
101    MRC("p15, 0, %0, c1, c0, 0", scr);
102    return scr;
103}
104
105
106static inline void writeSystemControlRegister(word_t scr)
107{
108    MCR("p15, 0, %0, c1, c0, 0", scr);
109}
110
111
112static inline word_t readAuxiliaryControlRegister(void)
113{
114    word_t acr;
115    MRC("p15, 0, %0, c1, c0, 1", acr);
116    return acr;
117}
118
119
120static inline void writeAuxiliaryControlRegister(word_t acr)
121{
122    MCR("p15, 0, %0, c1, c0, 1", acr);
123}
124
125/** MODIFIES: [*] */
126/** DONT_TRANSLATE */
127static inline void clearExMonitor(void)
128{
129    word_t tmp;
130    asm volatile("strex r0, r1, [%0]" : : "r"(&tmp) : "r0");
131}
132
133static inline void flushBTAC(void)
134{
135    asm volatile("mcr p15, 0, %0, c7, c5, 6" : : "r"(0));
136}
137
138static inline void writeContextID(word_t id)
139{
140    if (config_set(CONFIG_ARM_HYPERVISOR_SUPPORT)) {
141        writeContextIDPL2(id);
142    } else {
143        asm volatile("mcr p15, 0, %0, c13, c0, 1" : : "r"(id));
144        isb();
145    }
146}
147
148/* Address space control */
149
150static inline word_t readTTBR0(void)
151{
152    word_t val = 0;
153    asm volatile("mrc p15, 0, %0, c2, c0, 0":"=r"(val):);
154    return val;
155}
156
157static inline void writeTTBR0(word_t val)
158{
159    asm volatile("mcr p15, 0, %0, c2, c0, 0":: "r"(val));
160}
161
162static inline void writeTTBR0Ptr(paddr_t addr)
163{
164    /* Mask supplied address (retain top 19 bits).  Set the lookup cache bits:
165     * outer write-back cacheable, no allocate on write, inner non-cacheable.
166     */
167    writeTTBR0((addr & 0xffffe000) | 0x18);
168}
169
170static inline word_t readTTBR1(void)
171{
172    word_t val = 0;
173    asm volatile("mrc p15, 0, %0, c2, c0, 1":"=r"(val):);
174    return val;
175}
176
177static inline void writeTTBR1(word_t val)
178{
179    asm volatile("mcr p15, 0, %0, c2, c0, 1":: "r"(val));
180}
181
182
183static inline word_t readTTBCR(void)
184{
185    word_t val = 0;
186    asm volatile("mrc p15, 0, %0, c2, c0, 2":"=r"(val):);
187    return val;
188}
189
190static inline void writeTTBCR(word_t val)
191{
192    asm volatile("mcr p15, 0, %0, c2, c0, 2":: "r"(val));
193}
194
195static inline void writeTPIDRURW(word_t reg)
196{
197#ifdef CONFIG_KERNEL_GLOBALS_FRAME
198    armKSGlobalsFrame[GLOBALS_TPIDRURW] = reg;
199#else
200    asm volatile("mcr p15, 0, %0, c13, c0, 2" :: "r"(reg));
201#endif
202}
203
204static inline word_t readTPIDRURW(void)
205{
206#ifdef CONFIG_KERNEL_GLOBALS_FRAME
207    return armKSGlobalsFrame[GLOBALS_TPIDRURW];
208#else
209    word_t reg;
210    asm volatile("mrc p15, 0, %0, c13, c0, 2" : "=r"(reg));
211    return reg;
212#endif
213}
214
215static inline void writeTPIDRURO(word_t reg)
216{
217    asm volatile("mcr p15, 0, %0, c13, c0, 3" :: "r"(reg));
218}
219
220static inline word_t readTPIDRURO(void)
221{
222    word_t reg;
223    asm volatile("mrc p15, 0, %0, c13, c0, 3" : "=r"(reg));
224    return reg;
225}
226
227
228static inline void writeTPIDRPRW(word_t reg)
229{
230    asm volatile("mcr p15, 0, %0, c13, c0, 4" :: "r"(reg));
231}
232
233static inline word_t readTPIDRPRW(void)
234{
235    word_t reg;
236    asm volatile("mrc p15, 0, %0, c13, c0, 4" :"=r"(reg));
237    return reg;
238}
239
240static void arm_save_thread_id(tcb_t *thread)
241{
242#ifndef CONFIG_KERNEL_GLOBALS_FRAME
243    /* TPIDRURW is writeable from EL0 but not with globals frame. */
244    setRegister(thread, TPIDRURW, readTPIDRURW());
245    /* This register is read only from userlevel, but could still be updated
246     * if the thread is running in a higher priveleged level with a VCPU attached.
247     */
248    setRegister(thread, TPIDRURO, readTPIDRURO());
249#endif /* CONFIG_KERNEL_GLOBALS_FRAME */
250}
251
252static void arm_load_thread_id(tcb_t *thread)
253{
254    writeTPIDRURW(getRegister(thread, TPIDRURW));
255    writeTPIDRURO(getRegister(thread, TPIDRURO));
256}
257
258static inline word_t readMPIDR(void)
259{
260    word_t reg;
261    asm volatile("mrc p15, 0, %0, c0, c0, 5" : "=r"(reg));
262    return reg;
263}
264
265static inline void writeDACR(word_t reg)
266{
267    asm volatile("mcr p15, 0, %0, c3, c0, 0" :: "r"(reg));
268}
269
270static inline word_t readDACR(void)
271{
272    word_t reg;
273    asm volatile("mrc p15, 0, %0, c3, c0, 0" : "=r"(reg));
274    return reg;
275}
276
277static inline void setCurrentPD(paddr_t addr)
278{
279    /* Before changing the PD ensure all memory stores have completed */
280    if (config_set(CONFIG_ARM_HYPERVISOR_SUPPORT)) {
281        setCurrentPDPL2(addr);
282    } else {
283        dsb();
284        writeTTBR0Ptr(addr);
285        /* Ensure the PD switch completes before we do anything else */
286        isb();
287    }
288}
289
290static inline void setKernelStack(word_t stack_address)
291{
292#ifndef CONFIG_ARCH_ARM_V6
293    /* Setup kernel stack pointer.
294     * Load the (per-core) kernel stack pointer to TPIDRPRW for faster reloads on traps.
295     */
296    if (config_set(CONFIG_ARM_HYPERVISOR_SUPPORT)) {
297        writeHTPIDR(stack_address);
298    } else {
299        writeTPIDRPRW(stack_address);
300    }
301#endif /* CONFIG_ARCH_ARM_V6 */
302}
303
304static inline word_t getKernelStack(void)
305{
306#ifndef CONFIG_ARCH_ARM_V6
307    if (config_set(CONFIG_ARM_HYPERVISOR_SUPPORT)) {
308        return readHTPIDR();
309    } else {
310        return readTPIDRPRW();
311    }
312#else
313    return ((word_t) kernel_stack_alloc[0]) + BIT(CONFIG_KERNEL_STACK_BITS);
314#endif /* CONFIG_ARCH_ARM_V6 */
315}
316
317#ifdef ENABLE_SMP_SUPPORT
318static inline word_t getHWCPUID(void)
319{
320    /* See ARM Referce Manual (ARMv7-A and ARMv7-R edition), Section B4.1.106
321     * for more details about MPIDR register.
322     */
323    return readMPIDR() & 0xff;
324}
325#endif /* ENABLE_SMP_SUPPORT */
326
327/* TLB control */
328
329static inline void invalidateLocalTLB(void)
330{
331    dsb();
332    asm volatile("mcr p15, 0, %0, c8, c7, 0" : : "r"(0));
333    dsb();
334    isb();
335}
336
337static inline void invalidateLocalTLB_ASID(hw_asid_t hw_asid)
338{
339    if (config_set(CONFIG_ARM_HYPERVISOR_SUPPORT)) {
340        invalidateLocalTLB();
341    } else {
342        dsb();
343        asm volatile("mcr p15, 0, %0, c8, c7, 2" : : "r"(hw_asid));
344        dsb();
345        isb();
346    }
347}
348
349static inline void invalidateLocalTLB_VAASID(word_t mva_plus_asid)
350{
351    if (config_set(CONFIG_ARM_HYPERVISOR_SUPPORT)) {
352        invalidateLocalTLB();
353    } else {
354        dsb();
355        asm volatile("mcr p15, 0, %0, c8, c7, 1" : : "r"(mva_plus_asid));
356        dsb();
357        isb();
358    }
359}
360
361void lockTLBEntry(vptr_t vaddr);
362
363static inline void cleanByVA(vptr_t vaddr, paddr_t paddr)
364{
365#ifdef CONFIG_ARM_CORTEX_A8
366    /* Erratum 586324 -- perform a dummy cached load before flushing. */
367    asm volatile("ldr r0, [sp]" : : : "r0");
368    /* Erratum 586320 -- clean twice with interrupts disabled. */
369    asm volatile("mcr p15, 0, %0, c7, c10, 1" : : "r"(vaddr));
370    asm volatile("mcr p15, 0, %0, c7, c10, 1" : : "r"(vaddr));
371#else
372    asm volatile("mcr p15, 0, %0, c7, c10, 1" : : "r"(vaddr));
373#endif
374    /* Erratum 586323 - end with DMB to ensure the write goes out. */
375    dmb();
376}
377
378/* D-Cache clean to PoU (L2 cache) (v6/v7 common) */
379static inline void cleanByVA_PoU(vptr_t vaddr, paddr_t paddr)
380{
381#ifdef CONFIG_ARM_CORTEX_A8
382    /* Erratum 586324 -- perform a dummy cached load before flushing. */
383    asm volatile("ldr r0, [sp]" : : : "r0");
384    asm volatile("mcr p15, 0, %0, c7, c11, 1" : : "r"(vaddr));
385#elif defined(CONFIG_ARCH_ARM_V6)
386    /* V6 doesn't distinguish PoU and PoC, so use the basic flush. */
387    asm volatile("mcr p15, 0, %0, c7, c10, 1" : : "r"(vaddr));
388#elif defined(CONFIG_PLAT_EXYNOS5)
389    /* Flush to coherency for table walks... Why? */
390    asm volatile("mcr p15, 0, %0, c7, c10, 1" : : "r"(vaddr));
391#elif defined(CONFIG_PLAT_IMX7)
392    asm volatile("mcr p15, 0, %0, c7, c10, 1" : : "r"(vaddr));
393#elif defined(CONFIG_PLAT_TK1)
394    asm volatile("mcr p15, 0, %0, c7, c10, 1" : : "r"(vaddr));
395#elif defined(CONFIG_ARM_CORTEX_A53)
396    asm volatile("mcr p15, 0, %0, c7, c10, 1" : : "r"(vaddr));
397#else
398    asm volatile("mcr p15, 0, %0, c7, c11, 1" : : "r"(vaddr));
399#endif
400    /* Erratum 586323 - end with DMB to ensure the write goes out. */
401    dmb();
402}
403
404/* D-Cache invalidate to PoC (v6/v7 common) */
405static inline void invalidateByVA(vptr_t vaddr, paddr_t paddr)
406{
407#ifdef CONFIG_ARM_CORTEX_A8
408    /* Erratum 586324 -- perform a dummy cached load before flushing. */
409    asm volatile("ldr r0, [sp]" : : : "r0");
410    asm volatile("mcr p15, 0, %0, c7, c6, 1" : : "r"(vaddr));
411#else
412    asm volatile("mcr p15, 0, %0, c7, c6, 1" : : "r"(vaddr));
413#endif
414    dmb();
415}
416
417/* I-Cache invalidate to PoU (L2 cache) (v6/v7 common) */
418static inline void invalidateByVA_I(vptr_t vaddr, paddr_t paddr)
419{
420#ifdef CONFIG_ARM_CORTEX_A8
421    /* On A8, we just invalidate the lot. */
422    asm volatile("mcr p15, 0, %0, c7, c5, 0" : : "r"(0));
423#else
424    asm volatile("mcr p15, 0, %0, c7, c5, 1" : : "r"(vaddr));
425#endif
426    isb();
427}
428
429/* I-Cache invalidate all to PoU (L2 cache) (v6/v7 common) */
430static inline void invalidate_I_PoU(void)
431{
432#ifdef CONFIG_ARM_CORTEX_A8
433    /* Erratum 586324 -- perform a dummy cached load before flushing. */
434    asm volatile("ldr r0, [sp]" : : : "r0");
435#endif
436    asm volatile("mcr p15, 0, %0, c7, c5, 0" : : "r"(0));
437    isb();
438}
439
440/* D-Cache clean & invalidate to PoC (v6/v7 common) */
441static inline void cleanInvalByVA(vptr_t vaddr, paddr_t paddr)
442{
443#ifdef CONFIG_ARM_CORTEX_A8
444    /* Erratum 586324 -- perform a dummy cached load before flushing. */
445    asm volatile("ldr r0, [sp]" : : : "r0");
446    /* Erratum 586320 -- clean twice with interrupts disabled. */
447    asm volatile("mcr p15, 0, %0, c7, c14, 1" : : "r"(vaddr));
448    asm volatile("mcr p15, 0, %0, c7, c14, 1" : : "r"(vaddr));
449#else
450    asm volatile("mcr p15, 0, %0, c7, c14, 1" : : "r"(vaddr));
451#endif
452    dsb();
453}
454
455/* Invalidate branch predictors by VA (v6/v7 common) */
456static inline void branchFlush(vptr_t vaddr, paddr_t paddr)
457{
458    asm volatile("mcr p15, 0, %0, c7, c5, 7" : : "r"(vaddr));
459}
460
461/* Fault status */
462
463static inline word_t PURE getIFSR(void)
464{
465    word_t IFSR;
466    asm volatile("mrc p15, 0, %0, c5, c0, 1" : "=r"(IFSR));
467    return IFSR;
468}
469
470static inline void setIFSR(word_t ifsr)
471{
472    asm volatile("mcr p15, 0, %0, c5, c0, 1" : : "r"(ifsr));
473}
474
475static inline word_t PURE getDFSR(void)
476{
477    word_t DFSR;
478    asm volatile("mrc p15, 0, %0, c5, c0, 0" : "=r"(DFSR));
479    return DFSR;
480}
481
482static inline void setDFSR(word_t dfsr)
483{
484    asm volatile("mcr p15, 0, %0, c5, c0, 0" : : "r"(dfsr));
485}
486
487static inline word_t PURE getADFSR(void)
488{
489    word_t ADFSR;
490    asm volatile("mrc p15, 0, %0, c5, c1, 0" : "=r"(ADFSR));
491    return ADFSR;
492}
493
494static inline void setADFSR(word_t adfsr)
495{
496    asm volatile("mcr p15, 0, %0, c5, c1, 0" : : "r"(adfsr));
497}
498
499static inline word_t PURE getAIFSR(void)
500{
501    word_t AIFSR;
502    asm volatile("mrc p15, 0, %0, c5, c1, 1" : "=r"(AIFSR));
503    return AIFSR;
504}
505
506static inline void setAIFSR(word_t aifsr)
507{
508    asm volatile("mcr p15, 0, %0, c5, c1, 1" : : "r"(aifsr));
509}
510
511static inline word_t PURE getDFAR(void)
512{
513    word_t DFAR;
514    asm volatile("mrc p15, 0, %0, c6, c0, 0" : "=r"(DFAR));
515    return DFAR;
516}
517
518static inline void setDFAR(word_t dfar)
519{
520    asm volatile("mcr p15, 0, %0, c6, c0, 0" : : "r"(dfar));
521}
522
523static inline word_t PURE getIFAR(void)
524{
525    word_t IFAR;
526    asm volatile("mrc p15, 0, %0, c6, c0, 2" : "=r"(IFAR));
527    return IFAR;
528}
529
530static inline void setIFAR(word_t ifar)
531{
532    asm volatile("mcr p15, 0, %0, c6, c0, 2" : : "r"(ifar));
533}
534
535static inline word_t getPRRR(void)
536{
537    word_t PRRR;
538    asm volatile("mrc p15, 0, %0, c10, c2, 0" : "=r"(PRRR));
539    return PRRR;
540}
541
542static inline void setPRRR(word_t prrr)
543{
544    asm volatile("mcr p15, 0, %0, c10, c2, 0" : : "r"(prrr));
545}
546
547static inline word_t getNMRR(void)
548{
549    word_t NMRR;
550    asm volatile("mrc p15, 0, %0, c10, c2, 1" : "=r"(NMRR));
551    return NMRR;
552}
553
554static inline void setNMRR(word_t nmrr)
555{
556    asm volatile("mcr p15, 0, %0, c10, c2, 1" : : "r"(nmrr));
557}
558
559static inline word_t PURE getFAR(void)
560{
561    word_t FAR;
562    asm volatile("mrc p15, 0, %0, c6, c0, 0" : "=r"(FAR));
563    return FAR;
564}
565
566static inline word_t getCIDR(void)
567{
568    word_t CIDR;
569    asm volatile("mrc p15, 0, %0, c13, c0, 1" : "=r"(CIDR));
570    return CIDR;
571}
572
573static inline void setCIDR(word_t cidr)
574{
575    asm volatile("mcr p15, 0, %0, c13, c0, 1" : : "r"(cidr));
576}
577
578static inline word_t getACTLR(void)
579{
580    word_t ACTLR;
581    asm volatile("mrc p15, 0, %0, c1, c0, 1" : "=r"(ACTLR));
582    return ACTLR;
583}
584
585static inline void setACTLR(word_t actlr)
586{
587    asm volatile("mcr p15, 0, %0, c1, c0, 1" :: "r"(actlr));
588}
589
590void arch_clean_invalidate_caches(void);
591void arch_clean_invalidate_L1_caches(word_t type);
592