1// Copyright 2016 The Fuchsia Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5// All code doing MMIO access must go through this API rather than using direct
6// pointer dereferences.
7
8#pragma once
9
10#include <stdint.h>
11
12#ifdef __aarch64__
13// The Linux/ARM64 KVM hypervisor does not support MMIO access via load/store
14// instructions that use writeback, which the compiler might decide to generate.
15// (The ARM64 virtualization hardware requires software assistance for the
16// writeback forms but not for the non-writeback forms, and KVM just doesn't
17// bother to implement that software assistance.)  To minimize the demands on a
18// hypervisor we might run under, we use inline assembly definitions here to
19// ensure that only the non-writeback load/store instructions are used.
20
21static inline void writeb(uint8_t v, volatile void* a) {
22    __asm__("strb %w1, %0" : "=m" (*(volatile uint8_t*)a) : "r" (v));
23}
24static inline void writew(uint16_t v, volatile void* a) {
25    __asm__("strh %w1, %0" : "=m" (*(volatile uint16_t*)a) : "r" (v));
26}
27static inline void writel(uint32_t v, volatile void* a) {
28    __asm__("str %w1, %0" : "=m" (*(volatile uint32_t*)a) : "r" (v));
29}
30static inline void writell(uint64_t v, volatile void* a) {
31    __asm__("str %1, %0" : "=m" (*(volatile uint64_t*)a) : "r" (v));
32}
33
34static inline uint8_t readb(const volatile void* a) {
35    uint8_t v;
36    __asm__("ldrb %w0, %1" : "=r" (v) : "m" (*(volatile uint8_t*)a));
37    return v;
38}
39static inline uint16_t readw(const volatile void* a) {
40    uint16_t v;
41    __asm__("ldrh %w0, %1" : "=r" (v) : "m" (*(volatile uint16_t*)a));
42    return v;
43}
44static inline uint32_t readl(const volatile void* a) {
45    uint32_t v;
46    __asm__("ldr %w0, %1" : "=r" (v) : "m" (*(volatile uint32_t*)a));
47    return v;
48}
49static inline uint64_t readll(const volatile void* a) {
50    uint64_t v;
51    __asm__("ldr %0, %1" : "=r" (v) : "m" (*(volatile uint64_t*)a));
52    return v;
53}
54
55#else
56
57static inline void writeb(uint8_t v, volatile void* a) {
58    *(volatile uint8_t*)a = v;
59}
60static inline void writew(uint16_t v, volatile void* a) {
61    *(volatile uint16_t*)a = v;
62}
63static inline void writel(uint32_t v, volatile void* a) {
64    *(volatile uint32_t*)a = v;
65}
66static inline void writell(uint64_t v, volatile void* a) {
67    *(volatile uint64_t*)a = v;
68}
69
70static inline uint8_t readb(const volatile void* a) {
71    return *(const volatile uint8_t*)a;
72}
73static inline uint16_t readw(const volatile void* a) {
74    return *(const volatile uint16_t*)a;
75}
76static inline uint32_t readl(const volatile void* a) {
77    return *(const volatile uint32_t*)a;
78}
79static inline uint64_t readll(const volatile void* a) {
80    return *(const volatile uint64_t*)a;
81}
82
83#endif
84
85#define RMWREG8(addr, startbit, width, val) \
86    writeb((readb(addr) & ~(((1 << (width)) - 1) << (startbit))) | ((val) << (startbit)), (addr))
87#define RMWREG16(addr, startbit, width, val) \
88    writew((readw(addr) & ~(((1 << (width)) - 1) << (startbit))) | ((val) << (startbit)), (addr))
89#define RMWREG32(addr, startbit, width, val) \
90    writel((readl(addr) & ~(((1 << (width)) - 1) << (startbit))) | ((val) << (startbit)), (addr))
91#define RMWREG64(addr, startbit, width, val) \
92    writell((readll(addr) & ~(((1ull << (width)) - 1) << (startbit))) | ((val) << (startbit)), (addr))
93
94#define set_bitsb(v, a) writeb(readb(a) | (v), (a))
95#define clr_bitsb(v, a) writeb(readb(a) & ~(v), (a))
96
97#define set_bitsw(v, a) writew(readw(a) | (v), (a))
98#define clr_bitsw(v, a) writew(readw(a) & ~(v), (a))
99
100#define set_bitsl(v, a) writel(readl(a) | (v), (a))
101#define clr_bitsl(v, a) writel(readl(a) & ~(v), (a))
102
103#define set_bitsll(v, a) writell(readll(a) | (v), (a))
104#define clr_bitsll(v, a) writell(readll(a) & ~(v), (a))
105