1/* 2 * Copyright (c) 2000-2004 Apple Computer, Inc. All rights reserved. 3 * 4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ 5 * 6 * This file contains Original Code and/or Modifications of Original Code 7 * as defined in and that are subject to the Apple Public Source License 8 * Version 2.0 (the 'License'). You may not use this file except in 9 * compliance with the License. The rights granted to you under the License 10 * may not be used to create, or enable the creation or redistribution of, 11 * unlawful or unlicensed copies of an Apple operating system, or to 12 * circumvent, violate, or enable the circumvention or violation of, any 13 * terms of an Apple operating system software license agreement. 14 * 15 * Please obtain a copy of the License at 16 * http://www.opensource.apple.com/apsl/ and read it before using this file. 17 * 18 * The Original Code and all software distributed under the License are 19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER 20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, 21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, 22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. 23 * Please see the License for the specific language governing rights and 24 * limitations under the License. 25 * 26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ 27 */ 28/* 29 * @OSF_COPYRIGHT@ 30 */ 31/* 32 * Mach Operating System 33 * Copyright (c) 1991,1990,1989 Carnegie Mellon University 34 * All Rights Reserved. 35 * 36 * Permission to use, copy, modify and distribute this software and its 37 * documentation is hereby granted, provided that both the copyright 38 * notice and this permission notice appear in all copies of the 39 * software, derivative works or modified versions, and any portions 40 * thereof, and that both notices appear in supporting documentation. 41 * 42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" 43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR 44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. 45 * 46 * Carnegie Mellon requests users of this software to return to 47 * 48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU 49 * School of Computer Science 50 * Carnegie Mellon University 51 * Pittsburgh PA 15213-3890 52 * 53 * any improvements or extensions that they make and grant Carnegie Mellon 54 * the rights to redistribute these changes. 55 */ 56 57#include <mach_rt.h> 58#include <mach_debug.h> 59#include <mach_ldebug.h> 60 61#include <sys/kdebug.h> 62 63#include <mach/kern_return.h> 64#include <mach/thread_status.h> 65#include <mach/vm_param.h> 66 67#include <kern/counters.h> 68#include <kern/mach_param.h> 69#include <kern/task.h> 70#include <kern/thread.h> 71#include <kern/sched_prim.h> 72#include <kern/misc_protos.h> 73#include <kern/assert.h> 74#include <kern/spl.h> 75#include <ipc/ipc_port.h> 76#include <vm/vm_kern.h> 77#include <vm/vm_map.h> 78#include <vm/pmap.h> 79 80#include <i386/cpu_data.h> 81#include <i386/cpu_number.h> 82#include <i386/thread.h> 83#include <i386/eflags.h> 84#include <i386/proc_reg.h> 85#include <i386/seg.h> 86#include <i386/tss.h> 87#include <i386/user_ldt.h> 88#include <i386/fpu.h> 89#include <i386/misc_protos.h> 90 91/* 92 * pmap_zero_page zeros the specified (machine independent) page. 93 */ 94void 95pmap_zero_page( 96 ppnum_t pn) 97{ 98 assert(pn != vm_page_fictitious_addr); 99 assert(pn != vm_page_guard_addr); 100 bzero_phys((addr64_t)i386_ptob(pn), PAGE_SIZE); 101} 102 103/* 104 * pmap_zero_part_page 105 * zeros the specified (machine independent) part of a page. 106 */ 107void 108pmap_zero_part_page( 109 ppnum_t pn, 110 vm_offset_t offset, 111 vm_size_t len) 112{ 113 assert(pn != vm_page_fictitious_addr); 114 assert(pn != vm_page_guard_addr); 115 assert(offset + len <= PAGE_SIZE); 116 bzero_phys((addr64_t)(i386_ptob(pn) + offset), (uint32_t)len); 117} 118 119/* 120 * pmap_copy_page copies the specified (machine independent) pages. 121 */ 122void 123pmap_copy_part_page( 124 ppnum_t psrc, 125 vm_offset_t src_offset, 126 ppnum_t pdst, 127 vm_offset_t dst_offset, 128 vm_size_t len) 129{ 130 pmap_paddr_t src, dst; 131 132 assert(psrc != vm_page_fictitious_addr); 133 assert(pdst != vm_page_fictitious_addr); 134 assert(psrc != vm_page_guard_addr); 135 assert(pdst != vm_page_guard_addr); 136 137 src = i386_ptob(psrc); 138 dst = i386_ptob(pdst); 139 140 assert((((uintptr_t)dst & PAGE_MASK) + dst_offset + len) <= PAGE_SIZE); 141 assert((((uintptr_t)src & PAGE_MASK) + src_offset + len) <= PAGE_SIZE); 142 143 bcopy_phys((addr64_t)src + (src_offset & INTEL_OFFMASK), 144 (addr64_t)dst + (dst_offset & INTEL_OFFMASK), 145 len); 146} 147 148/* 149 * pmap_copy_part_lpage copies part of a virtually addressed page 150 * to a physically addressed page. 151 */ 152void 153pmap_copy_part_lpage( 154 __unused vm_offset_t src, 155 __unused ppnum_t pdst, 156 __unused vm_offset_t dst_offset, 157 __unused vm_size_t len) 158{ 159#ifdef __i386__ 160 mapwindow_t *map; 161#endif 162 163 assert(pdst != vm_page_fictitious_addr); 164 assert(pdst != vm_page_guard_addr); 165 assert((dst_offset + len) <= PAGE_SIZE); 166 167#ifdef __i386__ 168 mp_disable_preemption(); 169 170 map = pmap_get_mapwindow(INTEL_PTE_VALID | INTEL_PTE_RW | (i386_ptob(pdst) & PG_FRAME) | 171 INTEL_PTE_REF | INTEL_PTE_MOD); 172 173 memcpy((void *) (map->prv_CADDR + (dst_offset & INTEL_OFFMASK)), (void *) src, len); 174 175 pmap_put_mapwindow(map); 176 177 mp_enable_preemption(); 178#endif 179} 180 181/* 182 * pmap_copy_part_rpage copies part of a physically addressed page 183 * to a virtually addressed page. 184 */ 185void 186pmap_copy_part_rpage( 187 __unused ppnum_t psrc, 188 __unused vm_offset_t src_offset, 189 __unused vm_offset_t dst, 190 __unused vm_size_t len) 191{ 192#ifdef __i386__ 193 mapwindow_t *map; 194#endif 195 196 assert(psrc != vm_page_fictitious_addr); 197 assert(psrc != vm_page_guard_addr); 198 assert((src_offset + len) <= PAGE_SIZE); 199 200#ifdef __i386__ 201 mp_disable_preemption(); 202 203 map = pmap_get_mapwindow(INTEL_PTE_VALID | INTEL_PTE_RW | (i386_ptob(psrc) & PG_FRAME) | 204 INTEL_PTE_REF); 205 206 memcpy((void *) dst, (void *) (map->prv_CADDR + (src_offset & INTEL_OFFMASK)), len); 207 208 pmap_put_mapwindow(map); 209 210 mp_enable_preemption(); 211#endif 212} 213 214/* 215 * kvtophys(addr) 216 * 217 * Convert a kernel virtual address to a physical address 218 */ 219addr64_t 220kvtophys( 221 vm_offset_t addr) 222{ 223 pmap_paddr_t pa; 224 225 pa = ((pmap_paddr_t)pmap_find_phys(kernel_pmap, addr)) << INTEL_PGSHIFT; 226 if (pa) 227 pa |= (addr & INTEL_OFFMASK); 228 229 return ((addr64_t)pa); 230} 231 232extern pt_entry_t *debugger_ptep; 233extern vm_map_offset_t debugger_window_kva; 234 235__private_extern__ void ml_copy_phys(addr64_t src64, addr64_t dst64, vm_size_t bytes) { 236 void *src, *dst; 237 238 mp_disable_preemption(); 239#if NCOPY_WINDOWS > 0 240 mapwindow_t *src_map, *dst_map; 241 /* We rely on MTRRs here */ 242 src_map = pmap_get_mapwindow((pt_entry_t)(INTEL_PTE_VALID | ((pmap_paddr_t)src64 & PG_FRAME) | INTEL_PTE_REF)); 243 dst_map = pmap_get_mapwindow((pt_entry_t)(INTEL_PTE_VALID | INTEL_PTE_RW | ((pmap_paddr_t)dst64 & PG_FRAME) | INTEL_PTE_REF | INTEL_PTE_MOD)); 244 src = (void *) ((uintptr_t)src_map->prv_CADDR | ((uint32_t)src64 & INTEL_OFFMASK)); 245 dst = (void *) ((uintptr_t)dst_map->prv_CADDR | ((uint32_t)dst64 & INTEL_OFFMASK)); 246#elif defined(__x86_64__) 247 addr64_t debug_pa = 0; 248 249 /* If either destination or source are outside the 250 * physical map, establish a physical window onto the target frame. 251 */ 252 assert(physmap_enclosed(src64) || physmap_enclosed(dst64)); 253 254 if (physmap_enclosed(src64) == FALSE) { 255 src = (void *)(debugger_window_kva | (src64 & INTEL_OFFMASK)); 256 dst = PHYSMAP_PTOV(dst64); 257 debug_pa = src64 & PG_FRAME; 258 } else if (physmap_enclosed(dst64) == FALSE) { 259 src = PHYSMAP_PTOV(src64); 260 dst = (void *)(debugger_window_kva | (dst64 & INTEL_OFFMASK)); 261 debug_pa = dst64 & PG_FRAME; 262 } else { 263 src = PHYSMAP_PTOV(src64); 264 dst = PHYSMAP_PTOV(dst64); 265 } 266 /* DRK: debugger only routine, we don't bother checking for an 267 * identical mapping. 268 */ 269 if (debug_pa) { 270 if (debugger_window_kva == 0) 271 panic("%s: invoked in non-debug mode", __FUNCTION__); 272 /* Establish a cache-inhibited physical window; some platforms 273 * may not cover arbitrary ranges with MTRRs 274 */ 275 pmap_store_pte(debugger_ptep, debug_pa | INTEL_PTE_NCACHE | INTEL_PTE_RW | INTEL_PTE_REF| INTEL_PTE_MOD | INTEL_PTE_VALID); 276 flush_tlb_raw(); 277#if DEBUG 278 kprintf("Remapping debugger physical window at %p to 0x%llx\n", (void *)debugger_window_kva, debug_pa); 279#endif 280 } 281#endif 282 /* ensure we stay within a page */ 283 if (((((uint32_t)src64 & (I386_PGBYTES-1)) + bytes) > I386_PGBYTES) || ((((uint32_t)dst64 & (I386_PGBYTES-1)) + bytes) > I386_PGBYTES) ) { 284 panic("ml_copy_phys spans pages, src: 0x%llx, dst: 0x%llx", src64, dst64); 285 } 286 287 switch (bytes) { 288 case 1: 289 *((uint8_t *) dst) = *((volatile uint8_t *) src); 290 break; 291 case 2: 292 *((uint16_t *) dst) = *((volatile uint16_t *) src); 293 break; 294 case 4: 295 *((uint32_t *) dst) = *((volatile uint32_t *) src); 296 break; 297 /* Should perform two 32-bit reads */ 298 case 8: 299 *((uint64_t *) dst) = *((volatile uint64_t *) src); 300 break; 301 default: 302 bcopy(src, dst, bytes); 303 break; 304 } 305#if NCOPY_WINDOWS > 0 306 pmap_put_mapwindow(src_map); 307 pmap_put_mapwindow(dst_map); 308#endif 309 mp_enable_preemption(); 310} 311