1/* 2 * Copyright (c) 2000-2013 Apple Inc. All rights reserved. 3 * 4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ 5 * 6 * This file contains Original Code and/or Modifications of Original Code 7 * as defined in and that are subject to the Apple Public Source License 8 * Version 2.0 (the 'License'). You may not use this file except in 9 * compliance with the License. The rights granted to you under the License 10 * may not be used to create, or enable the creation or redistribution of, 11 * unlawful or unlicensed copies of an Apple operating system, or to 12 * circumvent, violate, or enable the circumvention or violation of, any 13 * terms of an Apple operating system software license agreement. 14 * 15 * Please obtain a copy of the License at 16 * http://www.opensource.apple.com/apsl/ and read it before using this file. 17 * 18 * The Original Code and all software distributed under the License are 19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER 20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, 21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, 22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. 23 * Please see the License for the specific language governing rights and 24 * limitations under the License. 25 * 26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ 27 */ 28/* 29 * @OSF_COPYRIGHT@ 30 */ 31/* 32 * Mach Operating System 33 * Copyright (c) 1991,1990,1989 Carnegie Mellon University 34 * All Rights Reserved. 35 * 36 * Permission to use, copy, modify and distribute this software and its 37 * documentation is hereby granted, provided that both the copyright 38 * notice and this permission notice appear in all copies of the 39 * software, derivative works or modified versions, and any portions 40 * thereof, and that both notices appear in supporting documentation. 41 * 42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" 43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR 44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. 45 * 46 * Carnegie Mellon requests users of this software to return to 47 * 48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU 49 * School of Computer Science 50 * Carnegie Mellon University 51 * Pittsburgh PA 15213-3890 52 * 53 * any improvements or extensions that they make and grant Carnegie Mellon 54 * the rights to redistribute these changes. 55 */ 56/* 57 */ 58#include <mach_assert.h> 59 60#include <string.h> 61#include <mach/boolean.h> 62#include <mach/i386/vm_types.h> 63#include <mach/i386/vm_param.h> 64#include <kern/kern_types.h> 65#include <kern/misc_protos.h> 66#include <sys/errno.h> 67#include <i386/param.h> 68#include <i386/misc_protos.h> 69#include <i386/cpu_data.h> 70#include <i386/machine_routines.h> 71#include <i386/cpuid.h> 72#include <i386/vmx.h> 73#include <vm/pmap.h> 74#include <vm/vm_map.h> 75#include <vm/vm_kern.h> 76#include <vm/vm_fault.h> 77 78#include <libkern/OSAtomic.h> 79#include <sys/kdebug.h> 80 81#if !MACH_KDP 82#include <kdp/kdp_callout.h> 83#endif /* !MACH_KDP */ 84 85#if 0 86 87#undef KERNEL_DEBUG 88#define KERNEL_DEBUG KERNEL_DEBUG_CONSTANT 89#define KDEBUG 1 90 91#endif 92 93/* XXX - should be gone from here */ 94extern void invalidate_icache64(addr64_t addr, unsigned cnt, int phys); 95extern void flush_dcache64(addr64_t addr, unsigned count, int phys); 96extern boolean_t phys_page_exists(ppnum_t); 97extern void bcopy_no_overwrite(const char *from, char *to,vm_size_t bytes); 98extern void pmap_set_reference(ppnum_t pn); 99extern void mapping_set_mod(ppnum_t pa); 100extern void mapping_set_ref(ppnum_t pn); 101 102extern void ovbcopy(const char *from, 103 char *to, 104 vm_size_t nbytes); 105void machine_callstack(uintptr_t *buf, vm_size_t callstack_max); 106 107 108#define value_64bit(value) ((value) & 0xFFFFFFFF00000000ULL) 109#define low32(x) ((unsigned int)((x) & 0x00000000FFFFFFFFULL)) 110 111#define INT_SIZE (BYTE_SIZE * sizeof (int)) 112 113/* 114 * Set indicated bit in bit string. 115 */ 116void 117setbit(int bitno, int *s) 118{ 119 s[bitno / INT_SIZE] |= 1 << (bitno % INT_SIZE); 120} 121 122/* 123 * Clear indicated bit in bit string. 124 */ 125void 126clrbit(int bitno, int *s) 127{ 128 s[bitno / INT_SIZE] &= ~(1 << (bitno % INT_SIZE)); 129} 130 131/* 132 * Test if indicated bit is set in bit string. 133 */ 134int 135testbit(int bitno, int *s) 136{ 137 return s[bitno / INT_SIZE] & (1 << (bitno % INT_SIZE)); 138} 139 140/* 141 * Find first bit set in bit string. 142 */ 143int 144ffsbit(int *s) 145{ 146 int offset; 147 148 for (offset = 0; !*s; offset += (int)INT_SIZE, ++s); 149 return offset + __builtin_ctz(*s); 150} 151 152int 153ffs(unsigned int mask) 154{ 155 if (mask == 0) 156 return 0; 157 158 /* 159 * NOTE: cannot use __builtin_ffs because it generates a call to 160 * 'ffs' 161 */ 162 return 1 + __builtin_ctz(mask); 163} 164 165void 166bzero_phys_nc( 167 addr64_t src64, 168 uint32_t bytes) 169{ 170 bzero_phys(src64,bytes); 171} 172 173void 174bzero_phys( 175 addr64_t src64, 176 uint32_t bytes) 177{ 178 bzero(PHYSMAP_PTOV(src64), bytes); 179} 180 181 182/* 183 * bcopy_phys - like bcopy but copies from/to physical addresses. 184 */ 185 186void 187bcopy_phys( 188 addr64_t src64, 189 addr64_t dst64, 190 vm_size_t bytes) 191{ 192 /* Not necessary for K64 - but ensure we stay within a page */ 193 if (((((uint32_t)src64 & (NBPG-1)) + bytes) > NBPG) || 194 ((((uint32_t)dst64 & (NBPG-1)) + bytes) > NBPG) ) { 195 panic("bcopy_phys alignment"); 196 } 197 bcopy(PHYSMAP_PTOV(src64), PHYSMAP_PTOV(dst64), bytes); 198} 199 200/* 201 * allow a function to get a quick virtual mapping of a physical page 202 */ 203 204int 205apply_func_phys( 206 addr64_t dst64, 207 vm_size_t bytes, 208 int (*func)(void * buffer, vm_size_t bytes, void * arg), 209 void * arg) 210{ 211 /* Not necessary for K64 - but ensure we stay within a page */ 212 if (((((uint32_t)dst64 & (NBPG-1)) + bytes) > NBPG) ) { 213 panic("apply_func_phys alignment"); 214 } 215 216 return func(PHYSMAP_PTOV(dst64), bytes, arg); 217} 218 219/* 220 * ovbcopy - like bcopy, but recognizes overlapping ranges and handles 221 * them correctly. 222 */ 223 224void 225ovbcopy( 226 const char *from, 227 char *to, 228 vm_size_t bytes) /* num bytes to copy */ 229{ 230 /* Assume that bcopy copies left-to-right (low addr first). */ 231 if (from + bytes <= to || to + bytes <= from || to == from) 232 bcopy_no_overwrite(from, to, bytes); /* non-overlapping or no-op*/ 233 else if (from > to) 234 bcopy_no_overwrite(from, to, bytes); /* overlapping but OK */ 235 else { 236 /* to > from: overlapping, and must copy right-to-left. */ 237 from += bytes - 1; 238 to += bytes - 1; 239 while (bytes-- > 0) 240 *to-- = *from--; 241 } 242} 243 244 245/* 246 * Read data from a physical address. Memory should not be cache inhibited. 247 */ 248 249 250static inline unsigned int 251ml_phys_read_data(pmap_paddr_t paddr, int size) 252{ 253 unsigned int result = 0; 254 unsigned char s1; 255 unsigned short s2; 256 257 if (!physmap_enclosed(paddr)) 258 panic("%s: 0x%llx out of bounds\n", __FUNCTION__, paddr); 259 260 switch (size) { 261 case 1: 262 s1 = *(volatile unsigned char *)PHYSMAP_PTOV(paddr); 263 result = s1; 264 break; 265 case 2: 266 s2 = *(volatile unsigned short *)PHYSMAP_PTOV(paddr); 267 result = s2; 268 break; 269 case 4: 270 result = *(volatile unsigned int *)PHYSMAP_PTOV(paddr); 271 break; 272 default: 273 panic("Invalid size %d for ml_phys_read_data\n", size); 274 break; 275 } 276 return result; 277} 278 279static unsigned long long 280ml_phys_read_long_long(pmap_paddr_t paddr ) 281{ 282 if (!physmap_enclosed(paddr)) 283 panic("%s: 0x%llx out of bounds\n", __FUNCTION__, paddr); 284 return *(volatile unsigned long long *)PHYSMAP_PTOV(paddr); 285} 286 287unsigned int ml_phys_read( vm_offset_t paddr) 288{ 289 return ml_phys_read_data((pmap_paddr_t)paddr, 4); 290} 291 292unsigned int ml_phys_read_word(vm_offset_t paddr) { 293 294 return ml_phys_read_data((pmap_paddr_t)paddr, 4); 295} 296 297unsigned int ml_phys_read_64(addr64_t paddr64) 298{ 299 return ml_phys_read_data((pmap_paddr_t)paddr64, 4); 300} 301 302unsigned int ml_phys_read_word_64(addr64_t paddr64) 303{ 304 return ml_phys_read_data((pmap_paddr_t)paddr64, 4); 305} 306 307unsigned int ml_phys_read_half(vm_offset_t paddr) 308{ 309 return ml_phys_read_data((pmap_paddr_t)paddr, 2); 310} 311 312unsigned int ml_phys_read_half_64(addr64_t paddr64) 313{ 314 return ml_phys_read_data((pmap_paddr_t)paddr64, 2); 315} 316 317unsigned int ml_phys_read_byte(vm_offset_t paddr) 318{ 319 return ml_phys_read_data((pmap_paddr_t)paddr, 1); 320} 321 322unsigned int ml_phys_read_byte_64(addr64_t paddr64) 323{ 324 return ml_phys_read_data((pmap_paddr_t)paddr64, 1); 325} 326 327unsigned long long ml_phys_read_double(vm_offset_t paddr) 328{ 329 return ml_phys_read_long_long((pmap_paddr_t)paddr); 330} 331 332unsigned long long ml_phys_read_double_64(addr64_t paddr64) 333{ 334 return ml_phys_read_long_long((pmap_paddr_t)paddr64); 335} 336 337 338 339/* 340 * Write data to a physical address. Memory should not be cache inhibited. 341 */ 342 343static inline void 344ml_phys_write_data(pmap_paddr_t paddr, unsigned long data, int size) 345{ 346 if (!physmap_enclosed(paddr)) 347 panic("%s: 0x%llx out of bounds\n", __FUNCTION__, paddr); 348 349 switch (size) { 350 case 1: 351 *(volatile unsigned char *)PHYSMAP_PTOV(paddr) = (unsigned char)data; 352 break; 353 case 2: 354 *(volatile unsigned short *)PHYSMAP_PTOV(paddr) = (unsigned short)data; 355 break; 356 case 4: 357 *(volatile unsigned int *)PHYSMAP_PTOV(paddr) = (unsigned int)data; 358 break; 359 default: 360 panic("Invalid size %d for ml_phys_write_data\n", size); 361 break; 362 } 363} 364 365static void 366ml_phys_write_long_long(pmap_paddr_t paddr, unsigned long long data) 367{ 368 if (!physmap_enclosed(paddr)) 369 panic("%s: 0x%llx out of bounds\n", __FUNCTION__, paddr); 370 371 *(volatile unsigned long long *)PHYSMAP_PTOV(paddr) = data; 372} 373 374void ml_phys_write_byte(vm_offset_t paddr, unsigned int data) 375{ 376 ml_phys_write_data((pmap_paddr_t)paddr, data, 1); 377} 378 379void ml_phys_write_byte_64(addr64_t paddr64, unsigned int data) 380{ 381 ml_phys_write_data((pmap_paddr_t)paddr64, data, 1); 382} 383 384void ml_phys_write_half(vm_offset_t paddr, unsigned int data) 385{ 386 ml_phys_write_data((pmap_paddr_t)paddr, data, 2); 387} 388 389void ml_phys_write_half_64(addr64_t paddr64, unsigned int data) 390{ 391 ml_phys_write_data((pmap_paddr_t)paddr64, data, 2); 392} 393 394void ml_phys_write(vm_offset_t paddr, unsigned int data) 395{ 396 ml_phys_write_data((pmap_paddr_t)paddr, data, 4); 397} 398 399void ml_phys_write_64(addr64_t paddr64, unsigned int data) 400{ 401 ml_phys_write_data((pmap_paddr_t)paddr64, data, 4); 402} 403 404void ml_phys_write_word(vm_offset_t paddr, unsigned int data) 405{ 406 ml_phys_write_data((pmap_paddr_t)paddr, data, 4); 407} 408 409void ml_phys_write_word_64(addr64_t paddr64, unsigned int data) 410{ 411 ml_phys_write_data((pmap_paddr_t)paddr64, data, 4); 412} 413 414void ml_phys_write_double(vm_offset_t paddr, unsigned long long data) 415{ 416 ml_phys_write_long_long((pmap_paddr_t)paddr, data); 417} 418 419void ml_phys_write_double_64(addr64_t paddr64, unsigned long long data) 420{ 421 ml_phys_write_long_long((pmap_paddr_t)paddr64, data); 422} 423 424 425/* PCI config cycle probing 426 * 427 * 428 * Read the memory location at physical address paddr. 429 * *Does not* recover from machine checks, unlike the PowerPC implementation. 430 * Should probably be deprecated. 431 */ 432 433boolean_t 434ml_probe_read(vm_offset_t paddr, unsigned int *val) 435{ 436 if ((PAGE_SIZE - (paddr & PAGE_MASK)) < 4) 437 return FALSE; 438 439 *val = ml_phys_read((pmap_paddr_t)paddr); 440 441 return TRUE; 442} 443 444/* 445 * Read the memory location at physical address paddr. 446 * This is a part of a device probe, so there is a good chance we will 447 * have a machine check here. So we have to be able to handle that. 448 * We assume that machine checks are enabled both in MSR and HIDs 449 */ 450boolean_t 451ml_probe_read_64(addr64_t paddr64, unsigned int *val) 452{ 453 if ((PAGE_SIZE - (paddr64 & PAGE_MASK)) < 4) 454 return FALSE; 455 456 *val = ml_phys_read_64((pmap_paddr_t)paddr64); 457 return TRUE; 458} 459 460 461int bcmp( 462 const void *pa, 463 const void *pb, 464 size_t len) 465{ 466 const char *a = (const char *)pa; 467 const char *b = (const char *)pb; 468 469 if (len == 0) 470 return 0; 471 472 do 473 if (*a++ != *b++) 474 break; 475 while (--len); 476 477 return (int)len; 478} 479 480int 481memcmp(const void *s1, const void *s2, size_t n) 482{ 483 if (n != 0) { 484 const unsigned char *p1 = s1, *p2 = s2; 485 486 do { 487 if (*p1++ != *p2++) 488 return (*--p1 - *--p2); 489 } while (--n != 0); 490 } 491 return (0); 492} 493 494void * 495memmove(void *dst, const void *src, size_t ulen) 496{ 497 bcopy(src, dst, ulen); 498 return dst; 499} 500 501/* 502 * Abstract: 503 * strlen returns the number of characters in "string" preceeding 504 * the terminating null character. 505 */ 506 507size_t 508strlen( 509 register const char *string) 510{ 511 register const char *ret = string; 512 513 while (*string++ != '\0') 514 continue; 515 return string - 1 - ret; 516} 517 518uint32_t 519hw_compare_and_store(uint32_t oldval, uint32_t newval, volatile uint32_t *dest) 520{ 521 return OSCompareAndSwap((UInt32)oldval, 522 (UInt32)newval, 523 (volatile UInt32 *)dest); 524} 525 526#if MACH_ASSERT 527 528/* 529 * Machine-dependent routine to fill in an array with up to callstack_max 530 * levels of return pc information. 531 */ 532void machine_callstack( 533 __unused uintptr_t *buf, 534 __unused vm_size_t callstack_max) 535{ 536} 537 538#endif /* MACH_ASSERT */ 539 540void fillPage(ppnum_t pa, unsigned int fill) 541{ 542 pmap_paddr_t src; 543 int i; 544 int cnt = PAGE_SIZE / sizeof(unsigned int); 545 unsigned int *addr; 546 547 src = i386_ptob(pa); 548 for (i = 0, addr = (unsigned int *)PHYSMAP_PTOV(src); i < cnt; i++) 549 *addr++ = fill; 550} 551 552static inline void __clflush(void *ptr) 553{ 554 __asm__ volatile("clflush (%0)" : : "r" (ptr)); 555} 556 557void dcache_incoherent_io_store64(addr64_t pa, unsigned int count) 558{ 559 addr64_t linesize = cpuid_info()->cache_linesize; 560 addr64_t bound = (pa + count + linesize - 1) & ~(linesize - 1); 561 562 mfence(); 563 564 while (pa < bound) { 565 __clflush(PHYSMAP_PTOV(pa)); 566 pa += linesize; 567 } 568 569 mfence(); 570} 571 572void dcache_incoherent_io_flush64(addr64_t pa, unsigned int count) 573{ 574 return(dcache_incoherent_io_store64(pa,count)); 575} 576 577void 578flush_dcache64(addr64_t addr, unsigned count, int phys) 579{ 580 if (phys) { 581 dcache_incoherent_io_flush64(addr, count); 582 } 583 else { 584 uint64_t linesize = cpuid_info()->cache_linesize; 585 addr64_t bound = (addr + count + linesize -1) & ~(linesize - 1); 586 mfence(); 587 while (addr < bound) { 588 __clflush((void *) (uintptr_t) addr); 589 addr += linesize; 590 } 591 mfence(); 592 } 593} 594 595void 596invalidate_icache64(__unused addr64_t addr, 597 __unused unsigned count, 598 __unused int phys) 599{ 600} 601 602 603addr64_t vm_last_addr; 604 605void 606mapping_set_mod(ppnum_t pn) 607{ 608 pmap_set_modify(pn); 609} 610 611void 612mapping_set_ref(ppnum_t pn) 613{ 614 pmap_set_reference(pn); 615} 616 617extern i386_cpu_info_t cpuid_cpu_info; 618void 619cache_flush_page_phys(ppnum_t pa) 620{ 621 boolean_t istate; 622 unsigned char *cacheline_addr; 623 i386_cpu_info_t *cpuid_infop = cpuid_info(); 624 int cacheline_size; 625 int cachelines_to_flush; 626 627 cacheline_size = cpuid_infop->cache_linesize; 628 if (cacheline_size == 0) 629 panic("cacheline_size=0 cpuid_infop=%p\n", cpuid_infop); 630 cachelines_to_flush = PAGE_SIZE/cacheline_size; 631 632 mfence(); 633 634 istate = ml_set_interrupts_enabled(FALSE); 635 636 for (cacheline_addr = (unsigned char *)PHYSMAP_PTOV(i386_ptob(pa)); 637 cachelines_to_flush > 0; 638 cachelines_to_flush--, cacheline_addr += cacheline_size) { 639 __clflush((void *) cacheline_addr); 640 } 641 642 (void) ml_set_interrupts_enabled(istate); 643 644 mfence(); 645} 646 647 648#if !MACH_KDP 649void 650kdp_register_callout(kdp_callout_fn_t fn, void *arg) 651{ 652#pragma unused(fn,arg) 653} 654#endif 655 656#if !CONFIG_VMX 657int host_vmxon(boolean_t exclusive __unused) 658{ 659 return VMX_UNSUPPORTED; 660} 661 662void host_vmxoff(void) 663{ 664 return; 665} 666#endif 667