1#ifndef _PPC64_PAGE_H 2#define _PPC64_PAGE_H 3 4/* 5 * Copyright (C) 2001 PPC64 Team, IBM Corp 6 * 7 * This program is free software; you can redistribute it and/or 8 * modify it under the terms of the GNU General Public License 9 * as published by the Free Software Foundation; either version 10 * 2 of the License, or (at your option) any later version. 11 */ 12 13#include <linux/config.h> 14 15/* PAGE_SHIFT determines the page size */ 16#define PAGE_SHIFT 12 17#define PAGE_SIZE (1UL << PAGE_SHIFT) 18#define PAGE_MASK (~(PAGE_SIZE-1)) 19#define PAGE_OFFSET_MASK (PAGE_SIZE-1) 20 21#define SID_SHIFT 28 22#define SID_MASK 0xfffffffff 23#define GET_ESID(x) (((x) >> SID_SHIFT) & SID_MASK) 24 25/* Define an illegal instr to trap on the bug. 26 * We don't use 0 because that marks the end of a function 27 * in the ELF ABI. That's "Boo Boo" in case you wonder... 28 */ 29#define BUG_OPCODE .long 0x00b00b00 /* For asm */ 30#define BUG_ILLEGAL_INSTR "0x00b00b00" /* For BUG macro */ 31 32#ifdef __KERNEL__ 33#ifndef __ASSEMBLY__ 34#include <asm/naca.h> 35 36#define STRICT_MM_TYPECHECKS 37 38#define REGION_SIZE 4UL 39#define OFFSET_SIZE 60UL 40#define REGION_SHIFT 60UL 41#define OFFSET_SHIFT 0UL 42#define REGION_MASK (((1UL<<REGION_SIZE)-1UL)<<REGION_SHIFT) 43#define REGION_STRIDE (1UL << REGION_SHIFT) 44 45#ifdef ___powerpc64__ 46typedef union ppc64_va { 47 struct { 48 unsigned long off : OFFSET_SIZE; /* intra-region offset */ 49 unsigned long reg : REGION_SIZE; /* region number */ 50 } f; 51 unsigned long l; 52 void *p; 53} ppc64_va; 54#endif /* ___powerpc64__ */ 55 56static __inline__ void clear_page(void *addr) 57{ 58 unsigned long lines, line_size; 59 60 line_size = naca->dCacheL1LineSize; 61 lines = naca->dCacheL1LinesPerPage; 62 63 __asm__ __volatile__( 64" mtctr %1\n\ 651: dcbz 0,%0\n\ 66 add %0,%0,%3\n\ 67 bdnz+ 1b" 68 : "=r" (addr) 69 : "r" (lines), "0" (addr), "r" (line_size) 70 : "ctr", "memory"); 71} 72 73extern void copy_page(void *to, void *from); 74struct page; 75extern void clear_user_page(void *page, unsigned long vaddr); 76extern void copy_user_page(void *to, void *from, unsigned long vaddr); 77 78#ifdef STRICT_MM_TYPECHECKS 79/* 80 * These are used to make use of C type-checking. 81 * Entries in the pte table are 64b, while entries in the pgd & pmd are 32b. 82 */ 83typedef struct { unsigned long pte; } pte_t; 84typedef struct { unsigned int pmd; } pmd_t; 85typedef struct { unsigned int pgd; } pgd_t; 86typedef struct { unsigned long pgprot; } pgprot_t; 87 88#define pte_val(x) ((x).pte) 89#define pmd_val(x) ((x).pmd) 90#define pgd_val(x) ((x).pgd) 91#define pgprot_val(x) ((x).pgprot) 92 93#define __pte(x) ((pte_t) { (x) } ) 94#define __pmd(x) ((pmd_t) { (x) } ) 95#define __pgd(x) ((pgd_t) { (x) } ) 96#define __pgprot(x) ((pgprot_t) { (x) } ) 97 98#else 99/* 100 * .. while these make it easier on the compiler 101 */ 102typedef unsigned long pte_t; 103typedef unsigned int pmd_t; 104typedef unsigned int pgd_t; 105typedef unsigned long pgprot_t; 106 107#define pte_val(x) (x) 108#define pmd_val(x) (x) 109#define pgd_val(x) (x) 110#define pgprot_val(x) (x) 111 112#define __pte(x) (x) 113#define __pmd(x) (x) 114#define __pgd(x) (x) 115#define __pgprot(x) (x) 116 117#endif 118 119#ifdef CONFIG_XMON 120#include <asm/ptrace.h> 121extern void xmon(struct pt_regs *excp); 122#define BUG() do { \ 123 printk("kernel BUG at %s:%d!\n", __FILE__, __LINE__); \ 124 xmon(0); \ 125} while (0) 126#elif defined(CONFIG_KDB) 127#include <asm/ptrace.h> 128#include <linux/kdb.h> 129/* extern void kdb(kdb_reason_t reason, int error, kdb_eframe_t ef); */ 130#define BUG() do { \ 131 printk("kernel BUG at %s:%d!\n", __FILE__, __LINE__); \ 132 kdb(KDB_REASON_OOPS, 0, (kdb_eframe_t) 0); \ 133} while (0) 134#else 135#define BUG() do { \ 136 printk("kernel BUG at %s:%d!\n", __FILE__, __LINE__); \ 137 __asm__ __volatile__(".long " BUG_ILLEGAL_INSTR); \ 138} while (0) 139#endif 140 141#define PAGE_BUG(page) do { BUG(); } while (0) 142 143/* Pure 2^n version of get_order */ 144static inline int get_order(unsigned long size) 145{ 146 int order; 147 148 size = (size-1) >> (PAGE_SHIFT-1); 149 order = -1; 150 do { 151 size >>= 1; 152 order++; 153 } while (size); 154 return order; 155} 156 157#define __pa(x) ((unsigned long)(x)-PAGE_OFFSET) 158 159#endif /* __ASSEMBLY__ */ 160 161/* align addr on a size boundry - adjust address up/down if needed */ 162#define _ALIGN_UP(addr,size) (((addr)+((size)-1))&(~((size)-1))) 163#define _ALIGN_DOWN(addr,size) ((addr)&(~((size)-1))) 164 165/* align addr on a size boundry - adjust address up if needed */ 166#define _ALIGN(addr,size) _ALIGN_UP(addr,size) 167 168/* to align the pointer to the (next) double word boundary */ 169#define DOUBLEWORD_ALIGN(addr) _ALIGN(addr,sizeof(unsigned long)) 170 171/* to align the pointer to the (next) page boundary */ 172#define PAGE_ALIGN(addr) _ALIGN(addr, PAGE_SIZE) 173 174#ifdef MODULE 175#define __page_aligned __attribute__((__aligned__(PAGE_SIZE))) 176#else 177#define __page_aligned \ 178 __attribute__((__aligned__(PAGE_SIZE), \ 179 __section__(".data.page_aligned"))) 180#endif 181 182 183/* This must match the -Ttext linker address */ 184/* Note: tophys & tovirt make assumptions about how */ 185/* KERNELBASE is defined for performance reasons. */ 186/* When KERNELBASE moves, those macros may have */ 187/* to change! */ 188#define PAGE_OFFSET 0xC000000000000000 189#define KERNELBASE PAGE_OFFSET 190#define VMALLOCBASE 0xD000000000000000 191#define IOREGIONBASE 0xE000000000000000 192#define BOLTEDBASE 0xB000000000000000 193 194#define IO_REGION_ID (IOREGIONBASE>>REGION_SHIFT) 195#define VMALLOC_REGION_ID (VMALLOCBASE>>REGION_SHIFT) 196#define KERNEL_REGION_ID (KERNELBASE>>REGION_SHIFT) 197#define BOLTED_REGION_ID (BOLTEDBASE>>REGION_SHIFT) 198#define USER_REGION_ID (0UL) 199#define REGION_ID(X) (((unsigned long)(X))>>REGION_SHIFT) 200 201/* 202 * Define valid/invalid EA bits (for all ranges) 203 */ 204#define VALID_EA_BITS (0x000001ffffffffffUL) 205#define INVALID_EA_BITS (~(REGION_MASK|VALID_EA_BITS)) 206 207#define IS_VALID_REGION_ID(x) \ 208 (((x) == USER_REGION_ID) || ((x) >= BOLTED_REGION_ID)) 209#define IS_VALID_EA(x) \ 210 ((!((x) & INVALID_EA_BITS)) && IS_VALID_REGION_ID(REGION_ID(x))) 211 212#define __bpn_to_ba(x) ((((unsigned long)(x))<<PAGE_SHIFT) + KERNELBASE) 213#define __ba_to_bpn(x) ((((unsigned long)(x)) & ~REGION_MASK) >> PAGE_SHIFT) 214 215#define __va(x) ((void *)((unsigned long)(x) + KERNELBASE)) 216 217/* Given that physical addresses do not map 1-1 to absolute addresses, we 218 * use these macros to better specify exactly what we want to do. 219 * The only restriction on their use is that the absolute address 220 * macros cannot be used until after the LMB structure has been 221 * initialized in prom.c. -Peter 222 */ 223#define __v2p(x) ((void *) __pa(x)) 224#define __v2a(x) ((void *) phys_to_absolute(__pa(x))) 225#define __p2a(x) ((void *) phys_to_absolute(x)) 226#define __p2v(x) ((void *) __va(x)) 227#define __a2p(x) ((void *) absolute_to_phys(x)) 228#define __a2v(x) ((void *) __va(absolute_to_phys(x))) 229 230#define virt_to_page(kaddr) (mem_map+(__pa((unsigned long)kaddr) >> PAGE_SHIFT)) 231 232#define VALID_PAGE(page) ((page - mem_map) < max_mapnr) 233 234#define MAP_NR(addr) (__pa(addr) >> PAGE_SHIFT) 235 236#define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | VM_EXEC | \ 237 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC) 238 239#endif /* __KERNEL__ */ 240#endif /* _PPC64_PAGE_H */ 241