1/*
2 * Copyright 2020, Data61, CSIRO (ABN 41 687 119 230)
3 *
4 * SPDX-License-Identifier: GPL-2.0-only
5 */
6#include <config.h>
7#include <types.h>
8#include <plat/machine/devices_gen.h>
9#include <drivers/smmu/smmuv2.h>
10
11/*supported stages of translations*/
12#define STAGE1_TRANS           (1 << 0)
13#define STAGE2_TRANS           (1 << 1)
14#define NESTED_TRANS           (1 << 2)
15/*supported translation table formats*/
16#define AARCH32S_FMT           (1 << 0)
17#define AARCH32L_FMT           (1 << 1)
18#define NO_AARCH32_FMT         (1 << 2)
19#define TRANS_PAGES_4KB        (1 << 3)
20#define TRANS_PAGES_16KB       (1 << 4)
21#define TRANS_PAGES_64KB       (1 << 5)
22
23/*the default vritual address bits for partition TTBR0 and TTBR1*/
24#define SMMU_VA_DEFAULT_BITS      48
25
26struct  smmu_feature {
27    bool_t stream_match;              /*stream match register funtionality included*/
28    bool_t trans_op;                  /*address translation operations supported*/
29    bool_t cotable_walk;              /*coherent translation table walk*/
30    bool_t broadcast_tlb;             /*broadcast TLB maintenance*/
31    bool_t vmid16;                    /*16 bits VMIDs are supported*/
32    uint32_t supported_trans;         /*supported translation stages*/
33    uint32_t supported_fmt;           /*supported translation formats*/
34    uint32_t num_cfault_ints;         /*supported number of context fault interrupts*/
35    uint32_t num_stream_ids;          /*number of stream IDs*/
36    uint32_t num_stream_map_groups;   /*num stream mapping register groups*/
37    uint32_t smmu_page_size;          /*page size in SMMU register address space*/
38    uint32_t smmu_num_pages;          /*number of pages in global or context bank address space*/
39    uint32_t num_s2_cbanks;           /*cbanks that support stage 2 only*/
40    uint32_t num_cbanks;              /*total number of context banks*/
41    uint32_t va_bits;                 /*upstream address size*/
42    uint32_t pa_bits;                 /*PA address size*/
43    uint32_t ipa_bits;                /*IPA address size*/
44    pptr_t cb_base;                   /*base of context bank address space*/
45};
46
47struct smmu_table_config {
48    uint32_t tcr[2];                    /*SMMU_CBn_TCRm*/
49    uint32_t mair[2];                  /*SMMU_CBn_MAIRm*/
50    uint64_t ttbr[2];                  /*SMMU_CBn_TTBRm*/
51};
52
53static struct smmu_feature smmu_dev_knowledge;
54static struct smmu_table_config smmu_stage_table_config;
55
56
57static inline uint32_t smmu_read_reg32(pptr_t base, uint32_t index)
58{
59    return *(volatile uint32_t *)(base + index);
60}
61
62static inline void smmu_write_reg32(pptr_t base, uint32_t index, uint32_t val)
63{
64    *(volatile uint32_t *)(base + index) = val;
65}
66
67static inline uint64_t smmu_read_reg64(pptr_t base, uint32_t index)
68{
69    return *(volatile uint64_t *)(base + index);
70}
71
72static inline void smmu_write_reg64(pptr_t base, uint32_t index, uint64_t val)
73{
74    *(volatile uint64_t *)(base + index) = val;
75}
76
77static void smmu_tlb_sync(pptr_t base, uint32_t sync, uint32_t status)
78{
79    int count = 0;
80    smmu_write_reg32(base, sync, SMMU_TLB_SYNC_MASK);
81    while (count < TLBSYNC_LOOP) {
82        /*pulling the active flag, reading the TLB command state.*/
83        if (!(smmu_read_reg32(base, status) & TLBSTATUS_GSACTIVE)) {
84            break;
85        }
86        count++;
87    }
88}
89
90static inline uint32_t smmu_obs_size_to_bits(uint32_t size)
91{
92    /*coverting the output bus address size into address bit, defined in
93    IDx registers*/
94    switch (size) {
95    case 0:
96        return 32;
97    case 1:
98        return 36;
99    case 2:
100        return 40;
101    case 3:
102        return 42;
103    case 4:
104        return 44;
105    default:
106        return 48;
107    }
108}
109static inline uint32_t smmu_ubs_size_to_bits(uint32_t size)
110{
111    /*coverting the upstream address size into address bit, defined in
112    IDx registers*/
113    switch (size) {
114    case 0:
115        return 32;
116    case 1:
117        return 36;
118    case 2:
119        return 40;
120    case 3:
121        return 42;
122    case 4:
123        return 44;
124    case 5:
125        return 49;
126    default:
127        return 64;
128    }
129}
130
131BOOT_CODE static void smmu_mapping_init(void)
132{
133    /*Creating mapping for the rest of SMMU address space.
134     * the code assumes registers in each SMMU page are located in a 4K page
135     * even though the alignement of the (physical) pages can be 64K.
136     * We make this assumption to compact the SMMU virtual address window.*/
137
138    /* This is a temporary solution. A correct solution should be adjust
139     * the virutal address space layout of the kernel, leaving enough virtual
140     * address space to SMMU windows. For example, SMMU on TX2 requires a 8M space
141     * in total, including those empty areas resulted from the 64K alignment.
142     * Also, kernel requires device space to be configured statically. To
143     * support populate device space using HW config, we need to modify
144     * kernel_frame_t and map_kernel_frame, allowing devices mapped in a
145     * seperate page table using HW config.*/
146
147    /*the current implementation has been only tested on the TX2 platform*/
148
149    /*init the GR1 region, start: smmu_pptr + 4K, size 4K*/
150    map_kernel_frame(SMMU_GR1_PADDR(smmu_dev_knowledge.smmu_page_size),
151                     SMMU_GR1_PPTR,
152                     VMKernelOnly,
153                     vm_attributes_new(true, false, false));
154    /*GID registers*/
155    map_kernel_frame(SMMU_GID_PADDR(smmu_dev_knowledge.smmu_page_size),
156                     SMMU_GID_PPTR,
157                     VMKernelOnly,
158                     vm_attributes_new(true, false, false));
159    /*PM registers*/
160    map_kernel_frame(SMMU_PM_PADDR(smmu_dev_knowledge.smmu_page_size),
161                     SMMU_PM_PPTR,
162                     VMKernelOnly,
163                     vm_attributes_new(true, false, false));
164    /*SSD registers*/
165    map_kernel_frame(SMMU_SSD_PADDR(smmu_dev_knowledge.smmu_page_size),
166                     SMMU_SSD_PPTR,
167                     VMKernelOnly,
168                     vm_attributes_new(true, false, false));
169    /*map the context banks, each bank maps to a 4K page*/
170    for (int i = 0; i < smmu_dev_knowledge.num_cbanks; i++) {
171        map_kernel_frame(SMMU_CBn_PADDR(smmu_dev_knowledge.cb_base, i, smmu_dev_knowledge.smmu_page_size),
172                         SMMU_CBn_BASE_PPTR(i),
173                         VMKernelOnly,
174                         vm_attributes_new(true, false, false));
175    }
176}
177
178BOOT_CODE static void smmu_config_prob(void)
179{
180    uint32_t reg, field;
181    /*ID0*/
182    reg = smmu_read_reg32(SMMU_GR0_PPTR, SMMU_IDR0);
183    /*stages supported*/
184    if (reg & IDR0_S1TS) {
185        smmu_dev_knowledge.supported_trans |= STAGE1_TRANS;
186    }
187    if (reg & IDR0_S2TS) {
188        smmu_dev_knowledge.supported_trans |= STAGE2_TRANS;
189    }
190    if (reg & IDR0_NTS) {
191        smmu_dev_knowledge.supported_trans |= NESTED_TRANS;
192    }
193    /*stream matching register*/
194    if (reg & IDR0_SMS) {
195        smmu_dev_knowledge.stream_match = true;
196    }
197    /*address translation operation*/
198    if ((reg & IDR0_ATOSNS) == 0) {
199        smmu_dev_knowledge.trans_op = true;
200    }
201    /*AARCH32 translation format support*/
202    field = IDR0_PTFS_VAL(reg & IDR0_PTFS);
203    if (field == PTFS_AARCH32S_AARCH32L) {
204        smmu_dev_knowledge.supported_fmt |= AARCH32L_FMT;
205        smmu_dev_knowledge.supported_fmt |= AARCH32S_FMT;
206    } else if (field == PTFS_AARCH32L_ONLY) {
207        smmu_dev_knowledge.supported_fmt |= AARCH32L_FMT;
208    } else {
209        smmu_dev_knowledge.supported_fmt |= NO_AARCH32_FMT;
210    }
211    /*number of context fault intrrupts
212    * However, in smmuv2, each context bank has dedicated interrupt pin
213    * hence no requirement to specify implemented interrupts here.*/
214    smmu_dev_knowledge.num_cfault_ints = IDR0_NUMIRPT_VAL(reg & IDR0_NUMIRPT);
215    /*coherent translation table walk*/
216    if (reg & IDR0_CTTW) {
217        smmu_dev_knowledge.cotable_walk = true;
218    }
219    /*broadcast TLB maintenance*/
220    if (reg & IDR0_BTM) {
221        smmu_dev_knowledge.broadcast_tlb = true;
222    }
223    /*number of stream IDs*/
224    smmu_dev_knowledge.num_stream_ids = (1 << IDR0_NUMSIDB_VAL(reg & IDR0_NUMSIDB)) - 1;
225    /*number of stream mapping register groups*/
226    smmu_dev_knowledge.num_stream_map_groups = reg & IDR0_NUMSMRG;
227
228    /*ID1*/
229    reg = smmu_read_reg32(SMMU_GR0_PPTR, SMMU_IDR1);
230    /*smmu page size*/
231    if (reg & IDR1_PAGESIZE) {
232        smmu_dev_knowledge.smmu_page_size = SMMU_PAGE_64KB;
233    } else {
234        smmu_dev_knowledge.smmu_page_size = SMMU_PAGE_4KB;
235    }
236    /*smmu num pages, 2^(numdxb + 1)*/
237    field = IDR1_NUMPAGENDXB_VAL(reg & IDR1_NUMPAGENDXB);
238    smmu_dev_knowledge.smmu_num_pages = 1 << (field + 1);
239    /*num of stage 2 context banks*/
240    smmu_dev_knowledge.num_s2_cbanks = IDR1_NUMS2CB_VAL(reg & IDR1_NUMS2CB);
241    /*total num of context banks*/
242    smmu_dev_knowledge.num_cbanks = reg & IDR1_NUMCB;
243    /*calcuate the context bank base*/
244    smmu_dev_knowledge.cb_base = SMMU_CB_BASE_PADDR(
245                                     SMMU_GLOBAL_SIZE(smmu_dev_knowledge.smmu_num_pages, smmu_dev_knowledge.smmu_page_size));
246
247    /*ID2*/
248    reg = smmu_read_reg32(SMMU_GR0_PPTR, SMMU_IDR2);
249    /*VNID16S*/
250    if (reg & IDR2_VMID16S) {
251        smmu_dev_knowledge.vmid16 = true;
252    }
253    /*PTFSV8_64KB*/
254    if (reg & IDR2_PTFSV8_64) {
255        smmu_dev_knowledge.supported_fmt |= TRANS_PAGES_64KB;
256    }
257    /*PTFSV8_16KB*/
258    if (reg & IDR2_PTFSV8_16) {
259        smmu_dev_knowledge.supported_fmt |= TRANS_PAGES_16KB;
260    }
261    /*PTFSV8_64KB*/
262
263    if (reg & IDR2_PTFSV8_4) {
264        smmu_dev_knowledge.supported_fmt |= TRANS_PAGES_4KB;
265    }
266    /*UBS virtual address size*/
267    smmu_dev_knowledge.va_bits = smmu_ubs_size_to_bits(IDR2_UBS_VAL(reg & IDR2_UBS));
268    /*OAS*/
269    smmu_dev_knowledge.pa_bits = smmu_obs_size_to_bits(IDR2_OAS_VAL(reg & IDR2_OAS));
270    /*IAS*/
271    smmu_dev_knowledge.ipa_bits = smmu_obs_size_to_bits(reg & IDR2_IAS);
272}
273
274
275
276BOOT_CODE  static void smmu_dev_reset(void)
277{
278    uint32_t reg = 0;
279    pptr_t cb_bank_ptr;
280    uint32_t major;
281
282    /*clear the fault syndrom registers*/
283    smmu_write_reg32(SMMU_GR0_PPTR, SMMU_sGFSYNR0, reg);
284    smmu_write_reg32(SMMU_GR0_PPTR, SMMU_sGFSYNR1, reg);
285    /*clear the global FSR by writing back the read value*/
286    reg = smmu_read_reg32(SMMU_GR0_PPTR, SMMU_sGFSR);
287    smmu_write_reg32(SMMU_GR0_PPTR, SMMU_sGFSR, reg);
288
289    /*reset stream to context config as using context banks*/
290    reg = S2CR_PRIVCFG_SET(S2CR_PRIVCFG_DEFAULT);
291    reg |= S2CR_TYPE_SET(S2CR_TYPE_CB);
292
293    /*the number of stream-to-context is realted to the stream indexing method*/
294    if (smmu_dev_knowledge.stream_match) {
295        /*stream matching*/
296        for (int i = 0; i < smmu_dev_knowledge.num_stream_map_groups; i++) {
297            smmu_write_reg32(SMMU_GR0_PPTR, SMMU_S2CRn(i), reg);
298        }
299        /*reset the stream match registers as invalid*/
300        reg = SMR_VALID_SET(SMR_VALID_DIS);
301        for (int i = 0; i < smmu_dev_knowledge.num_stream_map_groups; i++) {
302            smmu_write_reg32(SMMU_GR0_PPTR, SMMU_SMRn(i), reg);
303        }
304    } else {
305        /*stream ID*/
306        for (int i = 0; i < smmu_dev_knowledge.num_stream_ids; i++) {
307            smmu_write_reg32(SMMU_GR0_PPTR, SMMU_S2CRn(i), reg);
308        }
309    }
310
311    /*special init requested by the smmu-500: start*/
312    reg = smmu_read_reg32(SMMU_GR0_PPTR, SMMU_IDR7);
313    major = IDR7_MAJOR_VAL(reg & IDR7_MAJOR);
314    /*init the auxiliary configuration register*/
315    reg = smmu_read_reg32(SMMU_GR0_PPTR, SMMU_sACR);
316    /*unlock the write access to SMMU_CBn_ACTLR,
317    only provided in version 2 and above*/
318    if (major >= 2) {
319        reg &= ~ACR_CACHE_LOCK;
320    }
321    /*enable the TLB to cache bypassing*/
322    reg |= ACR_S2CRB_TLBEN | ACR_SMTNMB_TLBEN;
323    smmu_write_reg32(SMMU_GR0_PPTR, SMMU_sACR, reg);
324    /*special init requested by the smmu-500: end*/
325
326    for (int i = 0; i < smmu_dev_knowledge.num_cbanks; i++) {
327        cb_bank_ptr = SMMU_CBn_BASE_PPTR(i);
328        /*disable context banks and clear the context bank fault registers*/
329        smmu_write_reg32(cb_bank_ptr, SMMU_CBn_SCTLR, 0);
330        /*clear the syndrom register*/
331        smmu_write_reg64(cb_bank_ptr, SMMU_CBn_FAR, 0ULL);
332        smmu_write_reg32(cb_bank_ptr, SMMU_CBn_FSR, CBn_FSR_CLEAR_ALL);
333        /*special init requested by the smmu-500: start*/
334        /*disable MMU-500's next page prefetch due to errata 841119 and 826419*/
335        reg = smmu_read_reg32(cb_bank_ptr, SMMU_CBn_ACTLR);
336        reg &= ~CBn_ACTLR_CPRE;
337        smmu_write_reg32(cb_bank_ptr, SMMU_CBn_ACTLR, reg);
338        /*special init requested by the smmu-500: end*/
339    }
340
341    /*invalidate TLB */
342    smmu_write_reg32(SMMU_GR0_PPTR, SMMU_TLBIALLH, SMMU_TLB_INVALL_MASK);
343    smmu_write_reg32(SMMU_GR0_PPTR, SMMU_TLBIALLNSNH, SMMU_TLB_INVALL_MASK);
344
345    reg = smmu_read_reg32(SMMU_GR0_PPTR, SMMU_sCR0);
346    /*enable global fault reporting*/
347    reg |= CR0_GFRE | CR0_GFIE | CR0_GCFGFRE | CR0_GCFGFIE;
348    /*raise fault for any transaction that does not match to
349    any stream mapping table entires*/
350    reg |= CR0_USFCFG;
351    /*raise fault for stream match conflict*/
352    reg |= CR0_SMCFCFG;
353    /*enable the VMID private name space*/
354    reg |= CR0_VMIDPNE;
355    /*TLB is maintained together with the rest of the system*/
356    reg &= ~CR0_PTM;
357    /*enable force TLB broadcast on bypassing transactions*/
358    reg |= CR0_FB;
359    /*enable client access, ie transaction enforced by SMMU*/
360    reg &= ~CR0_CLIENTPD;
361    /*upgrade barrier to full system*/
362    reg &= ~CR0_BSU(CR0_BSU_ALL);
363    /*syn above issued TLB operations*/
364    smmu_tlb_sync(SMMU_GR0_PPTR, SMMU_sTLBGSYNC, SMMU_sTLBGSTATUS);
365    /*enable the SMMU*/
366    smmu_write_reg32(SMMU_GR0_PPTR, SMMU_sCR0, reg);
367}
368
369BOOT_CODE void plat_smmu_init(void)
370{
371    smmu_config_prob();
372    smmu_mapping_init();
373    smmu_dev_reset();
374}
375
376#ifdef CONFIG_ARM_HYPERVISOR_SUPPORT
377static void smmu_config_stage2(struct smmu_table_config *cfg,
378                               vspace_root_t *vspace)
379{
380    uint32_t reg = 0;
381    /*SMMU_CBn_TCR*/
382    reg |= CBn_TCR_SH0_SET(CBn_TCR_SH_INNER);
383    reg |= CBn_TCR_ORGN0_SET(CBn_TCR_GN_WB_WA_CACHE);
384    reg |= CBn_TCR_IRGN0_SET(CBn_TCR_GN_WB_WA_CACHE);
385    reg |= CBn_TCR_TG0_SET(CBn_TCR_TG_4K);
386    /*setting according to the vcpu_init_vtcr in vcpu.h*/
387#ifdef CONFIG_ARM_PA_SIZE_BITS_40
388    reg |= CBn_TCR_T0SZ_SET(24);
389    reg |= CBn_TCR_PASize_SET(CBn_TCR2_PASize_40);
390    reg |= CBn_TCR_SL0_SET(CBn_TCR_SL0_4KB_L1);
391#else
392    reg |= CBn_TCR_T0SZ_SET(20);
393    reg |= CBn_TCR_PASize_SET(CBn_TCR2_PASize_44);
394    reg |= CBn_TCR_SL0_SET(CBn_TCR_SL0_4KB_L0);
395#endif
396    /*reserved as 1*/
397    reg |= BIT(31);
398    cfg->tcr[0] = reg;
399    /*vttbr*/
400    cfg->ttbr[0] = ttbr_new(0, pptr_to_paddr(vspace)).words[0];
401}
402#else
403static void smmu_config_stage1(struct smmu_table_config *cfg,
404                               bool_t coherence, uint32_t pa_bits,
405                               vspace_root_t *vspace, asid_t asid)
406{
407    uint32_t reg = 0;
408    /*SMMU_CBn_TCR*/
409    if (coherence) {
410        reg |= CBn_TCR_SH0_SET(CBn_TCR_SH_INNER);
411        reg |= CBn_TCR_ORGN0_SET(CBn_TCR_GN_WB_WA_CACHE);
412        reg |= CBn_TCR_IRGN0_SET(CBn_TCR_GN_WB_WA_CACHE);
413    } else {
414        reg |= CBn_TCR_SH0_SET(CBn_TCR_SH_OUTER);
415        reg |= CBn_TCR_ORGN0_SET(CBn_TCR_GN_NCACHE);
416        reg |= CBn_TCR_IRGN0_SET(CBn_TCR_GN_NCACHE);
417    }
418    /*page size is configed as 4k*/
419    reg |= CBn_TCR_TG0_SET(CBn_TCR_TG_4K);
420    /*the TTBR0 size, caculated according to the aarch64 formula*/
421    reg |= CBn_TCR_T0SZ_SET(64 - SMMU_VA_DEFAULT_BITS);
422    /*disable (speculative) page table walks through TTBR1*/
423    reg |= CBn_TCR_EPD1_DIS;
424    cfg->tcr[0] = reg;
425    /*TCR2*/
426    reg = 0;
427    switch (pa_bits) {
428    case 32:
429        reg |= CBn_TCR2_PASize_SET(CBn_TCR2_PASize_32);
430        break;
431    case 36:
432        reg |= CBn_TCR2_PASize_SET(CBn_TCR2_PASize_36);
433        break;
434    case 40:
435        reg |= CBn_TCR2_PASize_SET(CBn_TCR2_PASize_40);
436        break;
437    case 42:
438        reg |= CBn_TCR2_PASize_SET(CBn_TCR2_PASize_42);
439        break;
440    case 44:
441        reg |= CBn_TCR2_PASize_SET(CBn_TCR2_PASize_44);
442        break;
443    default:
444        reg |= CBn_TCR2_PASize_SET(CBn_TCR2_PASize_48);
445        break;
446    }
447    /*currently only support AArch64*/
448    reg |= CBn_TCR2_SEP_SET(CBn_TCR2_SEP_UPSTREAM_SIZE) | CBn_TCR2_AS_SET(CBn_TCR2_AS_16);
449    cfg->tcr[1] = reg;
450    /*MAIR0, configured according to the MAIR values in cores*/
451    reg = CBn_MAIRm_ATTR_DEVICE_nGnRnE << CBn_MAIRm_ATTR_SHIFT(CBn_MAIRm_ATTR_ID_DEVICE_nGnRnE);
452    reg |= CBn_MAIRm_ATTR_DEVICE_nGnRE << CBn_MAIRm_ATTR_SHIFT(CBn_MAIRm_ATTR_ID_DEVICE_nGnRE);
453    reg |= CBn_MAIRm_ATTR_DEVICE_GRE << CBn_MAIRm_ATTR_SHIFT(CBn_MAIRm_ATTR_ID_DEVICE_GRE);
454    reg |= CBn_MAIRm_ATTR_NC << CBn_MAIRm_ATTR_SHIFT(CBn_MAIRm_ATTR_ID_NC);
455    cfg->mair[0] = reg;
456    /*MAIR1*/
457    reg = CBn_MAIRm_ATTR_CACHE << CBn_MAIRm_ATTR_SHIFT(CBn_MAIRm_ATTR_ID_CACHE);
458    cfg->mair[1] = reg;
459    /*TTBRs*/
460    /*The SMMU only uses user-level address space, TTBR0.*/
461    cfg->ttbr[0] = ttbr_new(asid, pptr_to_paddr(vspace)).words[0];
462    cfg->ttbr[1] = 0;
463}
464#endif /*CONFIG_ARM_HYPERVISOR_SUPPORT*/
465
466
467void smmu_cb_assign_vspace(word_t cb, vspace_root_t *vspace, asid_t asid)
468{
469    uint32_t reg = 0;
470    uint32_t vmid = cb;
471    /* For the stage 2 translation, the VMID space is designed as a private
472     * space, its value is equal to the context bank index. Using private VMID
473     * space avoids synchronising with vspace management on VMID reallocations.
474     * Also, VMID used by SMMU need to be vaild all time once device transactions
475     * are enabled. To maintain the TLB coherency, we introduces a set of mechanism
476      * that connects vspace to context banks linked via ASID. */
477#ifdef CONFIG_ARM_HYPERVISOR_SUPPORT
478    smmu_config_stage2(&smmu_stage_table_config,
479                       vspace);
480#else
481    smmu_config_stage1(&smmu_stage_table_config,
482                       smmu_dev_knowledge.cotable_walk,
483                       smmu_dev_knowledge.ipa_bits,
484                       vspace,
485                       asid);
486#endif /*CONFIG_ARM_HYPERVISOR_SUPPORT*/
487    /*CBA2R*/
488    /*currently only support aarch64*/
489    reg = CBA2Rn_VA64_SET;
490    if (smmu_dev_knowledge.vmid16) {
491        reg |= CBA2Rn_VMID_SET(vmid);
492    }
493    smmu_write_reg32(SMMU_GR1_PPTR, SMMU_CBA2Rn(cb), reg);
494
495    /*CBAR*/
496#ifdef CONFIG_ARM_HYPERVISOR_SUPPORT
497    /*stage 2 translation only, CBAR_TYPE_S2_TRANS*/
498    reg = CBARn_TYPE_SET(CBARn_TYPE_STAGE2);
499    /*8 bit VMID*/
500    if (!smmu_dev_knowledge.vmid16) {
501        reg |= CBARn_VMID_SET(vmid);
502    }
503#else
504    /*stage 1 translation only, CBAR_TYPE_S1_TRANS_S2_BYPASS*/
505    reg = CBARn_TYPE_SET(CBARn_TYPE_STAGE1);
506    /*configured as the weakest shareability/memory types,
507     * so they can be overwritten by ttbcr or pte */
508    reg |= CBARn_BPSHCFG_SET(CBARn_BPSHCFG_NONE);
509    reg |= CBARn_MemAttr_SET(MemAttr_OWB_IWB);
510#endif  /*CONFIG_ARM_HYPERVISOR_SUPPORT*/
511    smmu_write_reg32(SMMU_GR1_PPTR, SMMU_CBARn(cb), reg);
512    /*TCR*/
513    smmu_write_reg32(SMMU_CBn_BASE_PPTR(cb), SMMU_CBn_TCR, smmu_stage_table_config.tcr[0]);
514    /* stage 1 transaltion requires both ttbr 1 and ttbr 0
515     * stage 2 transaltion requires ttbr 0*/
516#ifndef CONFIG_ARM_HYPERVISOR_SUPPORT
517    /*TCR2 is required by stage 1 only*/
518    smmu_write_reg32(SMMU_CBn_BASE_PPTR(cb), SMMU_CBn_TCR2, smmu_stage_table_config.tcr[1]);
519    smmu_write_reg64(SMMU_CBn_BASE_PPTR(cb), SMMU_CBn_TTBR1, smmu_stage_table_config.ttbr[1]);
520#endif /*!CONFIG_ARM_HYPERVISOR_SUPPORT*/
521
522    /*ttbr0 (user space), for both stage 1 and stage 2*/
523    smmu_write_reg64(SMMU_CBn_BASE_PPTR(cb), SMMU_CBn_TTBR0, smmu_stage_table_config.ttbr[0]);
524#ifndef CONFIG_ARM_HYPERVISOR_SUPPORT
525    /*MAIRs is required by stage 1 only*/
526    smmu_write_reg32(SMMU_CBn_BASE_PPTR(cb), SMMU_CBn_MAIR0, smmu_stage_table_config.mair[0]);
527    smmu_write_reg32(SMMU_CBn_BASE_PPTR(cb), SMMU_CBn_MAIR1, smmu_stage_table_config.mair[1]);
528#endif /*!CONFIG_ARM_HYPERVISOR_SUPPORT*/
529    /*SCTLR, */
530    reg = CBn_SCTLR_CFIE | CBn_SCTLR_CFRE | CBn_SCTLR_AFE | CBn_SCTLR_TRE | CBn_SCTLR_M;
531    smmu_write_reg32(SMMU_CBn_BASE_PPTR(cb), SMMU_CBn_SCTLR, reg);
532}
533
534void smmu_cb_disable(word_t cb, asid_t asid)
535{
536
537    uint32_t reg = smmu_read_reg32(SMMU_CBn_BASE_PPTR(cb), SMMU_CBn_SCTLR);
538    reg &= ~CBn_SCTLR_M;
539    smmu_write_reg32(SMMU_CBn_BASE_PPTR(cb), SMMU_CBn_SCTLR, reg);
540    smmu_tlb_invalidate_cb(cb, asid);
541}
542
543void smmu_sid_bind_cb(word_t sid, word_t cb)
544{
545
546    uint32_t reg = 0;
547    reg = S2CR_PRIVCFG_SET(S2CR_PRIVCFG_DEFAULT);
548    reg |= S2CR_TYPE_SET(S2CR_TYPE_CB);
549    reg |= S2CR_CBNDX_SET(cb);
550    smmu_write_reg32(SMMU_GR0_PPTR, SMMU_S2CRn(sid), reg);
551    /* The number of stream-to-context mapping
552     * is related to the stream indexing method.
553     * We currently supports mapping one stream ID to one context bank.*/
554    if (smmu_dev_knowledge.stream_match) {
555        reg = SMR_VALID_SET(SMR_VALID_EN) | SMR_ID_SET(sid);
556        smmu_write_reg32(SMMU_GR0_PPTR, SMMU_SMRn(sid), reg);
557    }
558}
559
560void smmu_sid_unbind(word_t sid)
561{
562
563    uint32_t reg =  S2CR_TYPE_SET(S2CR_TYPE_FAULT);
564    smmu_write_reg32(SMMU_GR0_PPTR, SMMU_S2CRn(sid), reg);
565    if (smmu_dev_knowledge.stream_match) {
566        reg = SMR_VALID_SET(SMR_VALID_DIS);
567        smmu_write_reg32(SMMU_GR0_PPTR, SMMU_SMRn(sid), reg);
568    }
569}
570
571void smmu_tlb_invalidate_all(void)
572{
573#ifdef CONFIG_ARM_HYPERVISOR_SUPPORT
574    /*on hyp entries*/
575    smmu_write_reg32(SMMU_GR0_PPTR, SMMU_TLBIALLH, SMMU_TLB_INVALL_MASK);
576#else
577    /*on non-secure non-hyp entries*/
578    smmu_write_reg32(SMMU_GR0_PPTR, SMMU_TLBIALLNSNH, SMMU_TLB_INVALL_MASK);
579#endif
580    /*syn above TLB operations*/
581    smmu_tlb_sync(SMMU_GR0_PPTR, SMMU_sTLBGSYNC, SMMU_sTLBGSTATUS);
582}
583
584void smmu_tlb_invalidate_cb(int cb, asid_t asid)
585{
586#ifdef CONFIG_ARM_HYPERVISOR_SUPPORT
587    /*stage 2*/
588    /* SMMU uses a private VMID space. Each context bank assigns its VMID with its
589     * context bnak number.*/
590    uint32_t reg = TLBIVMID_SET(cb);
591    smmu_write_reg32(SMMU_GR0_PPTR, SMMU_TLBIVMID, reg);
592    smmu_tlb_sync(SMMU_GR0_PPTR, SMMU_sTLBGSYNC, SMMU_sTLBGSTATUS);
593#else
594    /*stage 1*/
595    uint32_t reg = CBn_TLBIASID_SET(asid);
596    smmu_write_reg32(SMMU_CBn_BASE_PPTR(cb), SMMU_CBn_TLBIASID, reg);
597    smmu_tlb_sync(SMMU_CBn_BASE_PPTR(cb), SMMU_CBn_TLBSYNC, SMMU_CBn_TLBSTATUS);
598#endif
599}
600
601void smmu_tlb_invalidate_cb_va(int cb, asid_t asid, vptr_t vaddr)
602{
603#ifdef CONFIG_ARM_HYPERVISOR_SUPPORT
604    /*stage 2*/
605    /* invalidate all unlocated TLB entries in the stage 2 translation
606    * associated with the given IPA*/
607    uint64_t reg = CBn_TLBIIPAS2_SET(vaddr);
608    smmu_write_reg64(SMMU_CBn_BASE_PPTR(cb), SMMU_CBn_TLBIIPAS2, reg);
609    smmu_tlb_sync(SMMU_CBn_BASE_PPTR(cb), SMMU_CBn_TLBSYNC, SMMU_CBn_TLBSTATUS);
610#else
611    /*stage 1*/
612    uint64_t reg = CBn_TLBIVA_SET(asid, vaddr);
613    smmu_write_reg64(SMMU_CBn_BASE_PPTR(cb), SMMU_CBn_TLBIVA, reg);
614    smmu_tlb_sync(SMMU_CBn_BASE_PPTR(cb), SMMU_CBn_TLBSYNC, SMMU_CBn_TLBSTATUS);
615#endif
616}
617
618void smmu_read_fault_state(uint32_t *status, uint32_t *syndrome_0, uint32_t *syndrome_1)
619{
620    *status = smmu_read_reg32(SMMU_GR0_PPTR, SMMU_sGFSR);
621    *syndrome_0 = smmu_read_reg32(SMMU_GR0_PPTR, SMMU_sGFSYNR0);
622    *syndrome_1 = smmu_read_reg32(SMMU_GR0_PPTR, SMMU_sGFSYNR1);
623}
624
625void smmu_clear_fault_state(void)
626{
627    uint32_t reg = smmu_read_reg32(SMMU_GR0_PPTR, SMMU_sGFSR);
628    smmu_write_reg32(SMMU_GR0_PPTR, SMMU_sGFSR, reg);
629}
630
631void smmu_cb_read_fault_state(int cb, uint32_t *status, word_t *address)
632{
633    *status = smmu_read_reg32(SMMU_CBn_BASE_PPTR(cb), SMMU_CBn_FSR);
634    *address = smmu_read_reg64(SMMU_CBn_BASE_PPTR(cb), SMMU_CBn_FAR);
635}
636
637void smmu_cb_clear_fault_state(int cb)
638{
639    smmu_write_reg32(SMMU_CBn_BASE_PPTR(cb), SMMU_CBn_FSR, CBn_FSR_CLEAR_ALL);
640}
641
642