1/* SPDX-License-Identifier: GPL-2.0 */ 2/* 3 * pgtsrmmu.h: SRMMU page table defines and code. 4 * 5 * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu) 6 */ 7 8#ifndef _SPARC_PGTSRMMU_H 9#define _SPARC_PGTSRMMU_H 10 11#include <asm/page.h> 12 13#ifdef __ASSEMBLY__ 14#include <asm/thread_info.h> /* TI_UWINMASK for WINDOW_FLUSH */ 15#endif 16 17/* Number of contexts is implementation-dependent; 64k is the most we support */ 18#define SRMMU_MAX_CONTEXTS 65536 19 20#define SRMMU_PTE_TABLE_SIZE (PTRS_PER_PTE*4) 21#define SRMMU_PMD_TABLE_SIZE (PTRS_PER_PMD*4) 22#define SRMMU_PGD_TABLE_SIZE (PTRS_PER_PGD*4) 23 24/* Definition of the values in the ET field of PTD's and PTE's */ 25#define SRMMU_ET_MASK 0x3 26#define SRMMU_ET_INVALID 0x0 27#define SRMMU_ET_PTD 0x1 28#define SRMMU_ET_PTE 0x2 29#define SRMMU_ET_REPTE 0x3 /* AIEEE, SuperSparc II reverse endian page! */ 30 31/* Physical page extraction from PTP's and PTE's. */ 32#define SRMMU_CTX_PMASK 0xfffffff0 33#define SRMMU_PTD_PMASK 0xfffffff0 34#define SRMMU_PTE_PMASK 0xffffff00 35 36/* The pte non-page bits. Some notes: 37 * 1) cache, dirty, valid, and ref are frobbable 38 * for both supervisor and user pages. 39 * 2) exec and write will only give the desired effect 40 * on user pages 41 * 3) use priv and priv_readonly for changing the 42 * characteristics of supervisor ptes 43 */ 44#define SRMMU_CACHE 0x80 45#define SRMMU_DIRTY 0x40 46#define SRMMU_REF 0x20 47#define SRMMU_NOREAD 0x10 48#define SRMMU_EXEC 0x08 49#define SRMMU_WRITE 0x04 50#define SRMMU_VALID 0x02 /* SRMMU_ET_PTE */ 51#define SRMMU_PRIV 0x1c 52#define SRMMU_PRIV_RDONLY 0x18 53 54#define SRMMU_CHG_MASK (0xffffff00 | SRMMU_REF | SRMMU_DIRTY) 55 56/* SRMMU swap entry encoding */ 57#define SRMMU_SWP_TYPE_MASK 0x1f 58#define SRMMU_SWP_TYPE_SHIFT 7 59#define SRMMU_SWP_OFF_MASK 0xfffff 60#define SRMMU_SWP_OFF_SHIFT (SRMMU_SWP_TYPE_SHIFT + 5) 61/* We borrow bit 6 to store the exclusive marker in swap PTEs. */ 62#define SRMMU_SWP_EXCLUSIVE SRMMU_DIRTY 63 64/* Some day I will implement true fine grained access bits for 65 * user pages because the SRMMU gives us the capabilities to 66 * enforce all the protection levels that vma's can have. 67 * XXX But for now... 68 */ 69#define SRMMU_PAGE_NONE __pgprot(SRMMU_CACHE | \ 70 SRMMU_PRIV | SRMMU_REF) 71#define SRMMU_PAGE_SHARED __pgprot(SRMMU_VALID | SRMMU_CACHE | \ 72 SRMMU_EXEC | SRMMU_WRITE | SRMMU_REF) 73#define SRMMU_PAGE_COPY __pgprot(SRMMU_VALID | SRMMU_CACHE | \ 74 SRMMU_EXEC | SRMMU_REF) 75#define SRMMU_PAGE_RDONLY __pgprot(SRMMU_VALID | SRMMU_CACHE | \ 76 SRMMU_EXEC | SRMMU_REF) 77#define SRMMU_PAGE_KERNEL __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_PRIV | \ 78 SRMMU_DIRTY | SRMMU_REF) 79 80/* SRMMU Register addresses in ASI 0x4. These are valid for all 81 * current SRMMU implementations that exist. 82 */ 83#define SRMMU_CTRL_REG 0x00000000 84#define SRMMU_CTXTBL_PTR 0x00000100 85#define SRMMU_CTX_REG 0x00000200 86#define SRMMU_FAULT_STATUS 0x00000300 87#define SRMMU_FAULT_ADDR 0x00000400 88 89#define WINDOW_FLUSH(tmp1, tmp2) \ 90 mov 0, tmp1; \ 9198: ld [%g6 + TI_UWINMASK], tmp2; \ 92 orcc %g0, tmp2, %g0; \ 93 add tmp1, 1, tmp1; \ 94 bne 98b; \ 95 save %sp, -64, %sp; \ 9699: subcc tmp1, 1, tmp1; \ 97 bne 99b; \ 98 restore %g0, %g0, %g0; 99 100#ifndef __ASSEMBLY__ 101extern unsigned long last_valid_pfn; 102 103/* This makes sense. Honest it does - Anton */ 104/* XXX Yes but it's ugly as sin. FIXME. -KMW */ 105extern void *srmmu_nocache_pool; 106#define __nocache_pa(VADDR) (((unsigned long)VADDR) - SRMMU_NOCACHE_VADDR + __pa((unsigned long)srmmu_nocache_pool)) 107#define __nocache_va(PADDR) (__va((unsigned long)PADDR) - (unsigned long)srmmu_nocache_pool + SRMMU_NOCACHE_VADDR) 108#define __nocache_fix(VADDR) ((__typeof__(VADDR))__va(__nocache_pa(VADDR))) 109 110/* Accessing the MMU control register. */ 111unsigned int srmmu_get_mmureg(void); 112void srmmu_set_mmureg(unsigned long regval); 113void srmmu_set_ctable_ptr(unsigned long paddr); 114void srmmu_set_context(int context); 115int srmmu_get_context(void); 116unsigned int srmmu_get_fstatus(void); 117unsigned int srmmu_get_faddr(void); 118 119/* This is guaranteed on all SRMMU's. */ 120static inline void srmmu_flush_whole_tlb(void) 121{ 122 __asm__ __volatile__("sta %%g0, [%0] %1\n\t": : 123 "r" (0x400), /* Flush entire TLB!! */ 124 "i" (ASI_M_FLUSH_PROBE) : "memory"); 125 126} 127 128static inline int 129srmmu_get_pte (unsigned long addr) 130{ 131 register unsigned long entry; 132 133 __asm__ __volatile__("\n\tlda [%1] %2,%0\n\t" : 134 "=r" (entry): 135 "r" ((addr & 0xfffff000) | 0x400), "i" (ASI_M_FLUSH_PROBE)); 136 return entry; 137} 138 139#endif /* !(__ASSEMBLY__) */ 140 141#endif /* !(_SPARC_PGTSRMMU_H) */ 142