1/* 2 * Copyright (c) 1999-2008 Apple Inc. All rights reserved. 3 * 4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ 5 * 6 * This file contains Original Code and/or Modifications of Original Code 7 * as defined in and that are subject to the Apple Public Source License 8 * Version 2.0 (the 'License'). You may not use this file except in 9 * compliance with the License. The rights granted to you under the License 10 * may not be used to create, or enable the creation or redistribution of, 11 * unlawful or unlicensed copies of an Apple operating system, or to 12 * circumvent, violate, or enable the circumvention or violation of, any 13 * terms of an Apple operating system software license agreement. 14 * 15 * Please obtain a copy of the License at 16 * http://www.opensource.apple.com/apsl/ and read it before using this file. 17 * 18 * The Original Code and all software distributed under the License are 19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER 20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, 21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, 22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. 23 * Please see the License for the specific language governing rights and 24 * limitations under the License. 25 * 26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ 27 */ 28/* 29 * File: ubc_subr.c 30 * Author: Umesh Vaishampayan [umeshv@apple.com] 31 * 05-Aug-1999 umeshv Created. 32 * 33 * Functions related to Unified Buffer cache. 34 * 35 * Caller of UBC functions MUST have a valid reference on the vnode. 36 * 37 */ 38 39#include <sys/types.h> 40#include <sys/param.h> 41#include <sys/systm.h> 42#include <sys/lock.h> 43#include <sys/mman.h> 44#include <sys/mount_internal.h> 45#include <sys/vnode_internal.h> 46#include <sys/ubc_internal.h> 47#include <sys/ucred.h> 48#include <sys/proc_internal.h> 49#include <sys/kauth.h> 50#include <sys/buf.h> 51#include <sys/user.h> 52#include <sys/codesign.h> 53 54#include <mach/mach_types.h> 55#include <mach/memory_object_types.h> 56#include <mach/memory_object_control.h> 57#include <mach/vm_map.h> 58#include <mach/mach_vm.h> 59#include <mach/upl.h> 60 61#include <kern/kern_types.h> 62#include <kern/kalloc.h> 63#include <kern/zalloc.h> 64#include <kern/thread.h> 65#include <vm/vm_kern.h> 66#include <vm/vm_protos.h> /* last */ 67 68#include <libkern/crypto/sha1.h> 69#include <libkern/libkern.h> 70 71#include <sys/kasl.h> 72#include <sys/syslog.h> 73 74#include <security/mac_framework.h> 75 76/* XXX These should be in a BSD accessible Mach header, but aren't. */ 77extern kern_return_t memory_object_pages_resident(memory_object_control_t, 78 boolean_t *); 79extern kern_return_t memory_object_signed(memory_object_control_t control, 80 boolean_t is_signed); 81extern boolean_t memory_object_is_slid(memory_object_control_t control); 82extern boolean_t memory_object_is_signed(memory_object_control_t); 83 84extern void Debugger(const char *message); 85 86 87/* XXX no one uses this interface! */ 88kern_return_t ubc_page_op_with_control( 89 memory_object_control_t control, 90 off_t f_offset, 91 int ops, 92 ppnum_t *phys_entryp, 93 int *flagsp); 94 95 96#if DIAGNOSTIC 97#if defined(assert) 98#undef assert 99#endif 100#define assert(cond) \ 101 ((void) ((cond) ? 0 : panic("Assert failed: %s", # cond))) 102#else 103#include <kern/assert.h> 104#endif /* DIAGNOSTIC */ 105 106static int ubc_info_init_internal(struct vnode *vp, int withfsize, off_t filesize); 107static int ubc_umcallback(vnode_t, void *); 108static int ubc_msync_internal(vnode_t, off_t, off_t, off_t *, int, int *); 109static void ubc_cs_free(struct ubc_info *uip); 110 111struct zone *ubc_info_zone; 112 113 114/* 115 * CODESIGNING 116 * Routines to navigate code signing data structures in the kernel... 117 */ 118 119extern int cs_debug; 120 121static boolean_t 122cs_valid_range( 123 const void *start, 124 const void *end, 125 const void *lower_bound, 126 const void *upper_bound) 127{ 128 if (upper_bound < lower_bound || 129 end < start) { 130 return FALSE; 131 } 132 133 if (start < lower_bound || 134 end > upper_bound) { 135 return FALSE; 136 } 137 138 return TRUE; 139} 140 141static void 142hex_str( 143 const unsigned char *hash, 144 size_t len, 145 char *buf) 146{ 147 unsigned int n; 148 for (n = 0; n < len; n++) 149 snprintf(buf + 2*n, 3, "%02.2x", hash[n]); 150} 151 152 153/* 154 * Locate the CodeDirectory from an embedded signature blob 155 */ 156static const 157CS_CodeDirectory *findCodeDirectory( 158 const CS_SuperBlob *embedded, 159 char *lower_bound, 160 char *upper_bound) 161{ 162 const CS_CodeDirectory *cd = NULL; 163 164 if (embedded && 165 cs_valid_range(embedded, embedded + 1, lower_bound, upper_bound) && 166 ntohl(embedded->magic) == CSMAGIC_EMBEDDED_SIGNATURE) { 167 const CS_BlobIndex *limit; 168 const CS_BlobIndex *p; 169 170 limit = &embedded->index[ntohl(embedded->count)]; 171 if (!cs_valid_range(&embedded->index[0], limit, 172 lower_bound, upper_bound)) { 173 return NULL; 174 } 175 for (p = embedded->index; p < limit; ++p) { 176 if (ntohl(p->type) == CSSLOT_CODEDIRECTORY) { 177 const unsigned char *base; 178 179 base = (const unsigned char *)embedded; 180 cd = (const CS_CodeDirectory *)(base + ntohl(p->offset)); 181 break; 182 } 183 } 184 } else { 185 /* 186 * Detached signatures come as a bare CS_CodeDirectory, 187 * without a blob. 188 */ 189 cd = (const CS_CodeDirectory *) embedded; 190 } 191 192 if (cd && 193 cs_valid_range(cd, cd + 1, lower_bound, upper_bound) && 194 cs_valid_range(cd, (const char *) cd + ntohl(cd->length), 195 lower_bound, upper_bound) && 196 cs_valid_range(cd, (const char *) cd + ntohl(cd->hashOffset), 197 lower_bound, upper_bound) && 198 cs_valid_range(cd, (const char *) cd + 199 ntohl(cd->hashOffset) + 200 (ntohl(cd->nCodeSlots) * SHA1_RESULTLEN), 201 lower_bound, upper_bound) && 202 203 ntohl(cd->magic) == CSMAGIC_CODEDIRECTORY) { 204 return cd; 205 } 206 207 // not found or not a valid code directory 208 return NULL; 209} 210 211 212/* 213 * Locating a page hash 214 */ 215static const unsigned char * 216hashes( 217 const CS_CodeDirectory *cd, 218 unsigned page, 219 char *lower_bound, 220 char *upper_bound) 221{ 222 const unsigned char *base, *top, *hash; 223 uint32_t nCodeSlots = ntohl(cd->nCodeSlots); 224 225 assert(cs_valid_range(cd, cd + 1, lower_bound, upper_bound)); 226 227 if((ntohl(cd->version) >= CS_SUPPORTSSCATTER) && (ntohl(cd->scatterOffset))) { 228 /* Get first scatter struct */ 229 const SC_Scatter *scatter = (const SC_Scatter*) 230 ((const char*)cd + ntohl(cd->scatterOffset)); 231 uint32_t hashindex=0, scount, sbase=0; 232 /* iterate all scatter structs */ 233 do { 234 if((const char*)scatter > (const char*)cd + ntohl(cd->length)) { 235 if(cs_debug) { 236 printf("CODE SIGNING: Scatter extends past Code Directory\n"); 237 } 238 return NULL; 239 } 240 241 scount = ntohl(scatter->count); 242 uint32_t new_base = ntohl(scatter->base); 243 244 /* last scatter? */ 245 if (scount == 0) { 246 return NULL; 247 } 248 249 if((hashindex > 0) && (new_base <= sbase)) { 250 if(cs_debug) { 251 printf("CODE SIGNING: unordered Scatter, prev base %d, cur base %d\n", 252 sbase, new_base); 253 } 254 return NULL; /* unordered scatter array */ 255 } 256 sbase = new_base; 257 258 /* this scatter beyond page we're looking for? */ 259 if (sbase > page) { 260 return NULL; 261 } 262 263 if (sbase+scount >= page) { 264 /* Found the scatter struct that is 265 * referencing our page */ 266 267 /* base = address of first hash covered by scatter */ 268 base = (const unsigned char *)cd + ntohl(cd->hashOffset) + 269 hashindex * SHA1_RESULTLEN; 270 /* top = address of first hash after this scatter */ 271 top = base + scount * SHA1_RESULTLEN; 272 if (!cs_valid_range(base, top, lower_bound, 273 upper_bound) || 274 hashindex > nCodeSlots) { 275 return NULL; 276 } 277 278 break; 279 } 280 281 /* this scatter struct is before the page we're looking 282 * for. Iterate. */ 283 hashindex+=scount; 284 scatter++; 285 } while(1); 286 287 hash = base + (page - sbase) * SHA1_RESULTLEN; 288 } else { 289 base = (const unsigned char *)cd + ntohl(cd->hashOffset); 290 top = base + nCodeSlots * SHA1_RESULTLEN; 291 if (!cs_valid_range(base, top, lower_bound, upper_bound) || 292 page > nCodeSlots) { 293 return NULL; 294 } 295 assert(page < nCodeSlots); 296 297 hash = base + page * SHA1_RESULTLEN; 298 } 299 300 if (!cs_valid_range(hash, hash + SHA1_RESULTLEN, 301 lower_bound, upper_bound)) { 302 hash = NULL; 303 } 304 305 return hash; 306} 307 308/* 309 * cs_validate_codedirectory 310 * 311 * Validate that pointers inside the code directory to make sure that 312 * all offsets and lengths are constrained within the buffer. 313 * 314 * Parameters: cd Pointer to code directory buffer 315 * length Length of buffer 316 * 317 * Returns: 0 Success 318 * EBADEXEC Invalid code signature 319 */ 320 321static int 322cs_validate_codedirectory(const CS_CodeDirectory *cd, size_t length) 323{ 324 325 if (length < sizeof(*cd)) 326 return EBADEXEC; 327 if (ntohl(cd->magic) != CSMAGIC_CODEDIRECTORY) 328 return EBADEXEC; 329 if (cd->hashSize != SHA1_RESULTLEN) 330 return EBADEXEC; 331 if (cd->pageSize != PAGE_SHIFT) 332 return EBADEXEC; 333 if (cd->hashType != CS_HASHTYPE_SHA1) 334 return EBADEXEC; 335 336 if (length < ntohl(cd->hashOffset)) 337 return EBADEXEC; 338 339 /* check that nSpecialSlots fits in the buffer in front of hashOffset */ 340 if (ntohl(cd->hashOffset) / SHA1_RESULTLEN < ntohl(cd->nSpecialSlots)) 341 return EBADEXEC; 342 343 /* check that codeslots fits in the buffer */ 344 if ((length - ntohl(cd->hashOffset)) / SHA1_RESULTLEN < ntohl(cd->nCodeSlots)) 345 return EBADEXEC; 346 347 if (ntohl(cd->version) >= CS_SUPPORTSSCATTER && cd->scatterOffset) { 348 349 if (length < ntohl(cd->scatterOffset)) 350 return EBADEXEC; 351 352 SC_Scatter *scatter = (SC_Scatter *) 353 (((uint8_t *)cd) + ntohl(cd->scatterOffset)); 354 uint32_t nPages = 0; 355 356 /* 357 * Check each scatter buffer, since we don't know the 358 * length of the scatter buffer array, we have to 359 * check each entry. 360 */ 361 while(1) { 362 /* check that the end of each scatter buffer in within the length */ 363 if (((const uint8_t *)scatter) + sizeof(scatter[0]) > (const uint8_t *)cd + length) 364 return EBADEXEC; 365 uint32_t scount = ntohl(scatter->count); 366 if (scount == 0) 367 break; 368 if (nPages + scount < nPages) 369 return EBADEXEC; 370 nPages += scount; 371 scatter++; 372 373 /* XXX check that basees doesn't overlap */ 374 /* XXX check that targetOffset doesn't overlap */ 375 } 376#if 0 /* rdar://12579439 */ 377 if (nPages != ntohl(cd->nCodeSlots)) 378 return EBADEXEC; 379#endif 380 } 381 382 if (length < ntohl(cd->identOffset)) 383 return EBADEXEC; 384 385 /* identifier is NUL terminated string */ 386 if (cd->identOffset) { 387 uint8_t *ptr = (uint8_t *)cd + ntohl(cd->identOffset); 388 if (memchr(ptr, 0, length - ntohl(cd->identOffset)) == NULL) 389 return EBADEXEC; 390 } 391 392 return 0; 393} 394 395/* 396 * 397 */ 398 399static int 400cs_validate_blob(const CS_GenericBlob *blob, size_t length) 401{ 402 if (length < sizeof(CS_GenericBlob) || length < ntohl(blob->length)) 403 return EBADEXEC; 404 return 0; 405} 406 407/* 408 * cs_validate_csblob 409 * 410 * Validate that superblob/embedded code directory to make sure that 411 * all internal pointers are valid. 412 * 413 * Will validate both a superblob csblob and a "raw" code directory. 414 * 415 * 416 * Parameters: buffer Pointer to code signature 417 * length Length of buffer 418 * rcd returns pointer to code directory 419 * 420 * Returns: 0 Success 421 * EBADEXEC Invalid code signature 422 */ 423 424static int 425cs_validate_csblob(const uint8_t *addr, size_t length, 426 const CS_CodeDirectory **rcd) 427{ 428 const CS_GenericBlob *blob = (const CS_GenericBlob *)(void *)addr; 429 int error; 430 431 *rcd = NULL; 432 433 error = cs_validate_blob(blob, length); 434 if (error) 435 return error; 436 437 length = ntohl(blob->length); 438 439 if (ntohl(blob->magic) == CSMAGIC_EMBEDDED_SIGNATURE) { 440 const CS_SuperBlob *sb = (const CS_SuperBlob *)blob; 441 uint32_t n, count = ntohl(sb->count); 442 443 if (length < sizeof(CS_SuperBlob)) 444 return EBADEXEC; 445 446 /* check that the array of BlobIndex fits in the rest of the data */ 447 if ((length - sizeof(CS_SuperBlob)) / sizeof(CS_BlobIndex) < count) 448 return EBADEXEC; 449 450 /* now check each BlobIndex */ 451 for (n = 0; n < count; n++) { 452 const CS_BlobIndex *blobIndex = &sb->index[n]; 453 if (length < ntohl(blobIndex->offset)) 454 return EBADEXEC; 455 456 const CS_GenericBlob *subBlob = 457 (const CS_GenericBlob *)(void *)(addr + ntohl(blobIndex->offset)); 458 459 size_t subLength = length - ntohl(blobIndex->offset); 460 461 if ((error = cs_validate_blob(subBlob, subLength)) != 0) 462 return error; 463 subLength = ntohl(subBlob->length); 464 465 /* extra validation for CDs, that is also returned */ 466 if (ntohl(blobIndex->type) == CSSLOT_CODEDIRECTORY) { 467 const CS_CodeDirectory *cd = (const CS_CodeDirectory *)subBlob; 468 if ((error = cs_validate_codedirectory(cd, subLength)) != 0) 469 return error; 470 *rcd = cd; 471 } 472 } 473 474 } else if (ntohl(blob->magic) == CSMAGIC_CODEDIRECTORY) { 475 476 if ((error = cs_validate_codedirectory((const CS_CodeDirectory *)(void *)addr, length)) != 0) 477 return error; 478 *rcd = (const CS_CodeDirectory *)blob; 479 } else { 480 return EBADEXEC; 481 } 482 483 if (*rcd == NULL) 484 return EBADEXEC; 485 486 return 0; 487} 488 489/* 490 * cs_find_blob_bytes 491 * 492 * Find an blob from the superblob/code directory. The blob must have 493 * been been validated by cs_validate_csblob() before calling 494 * this. Use cs_find_blob() instead. 495 * 496 * Will also find a "raw" code directory if its stored as well as 497 * searching the superblob. 498 * 499 * Parameters: buffer Pointer to code signature 500 * length Length of buffer 501 * type type of blob to find 502 * magic the magic number for that blob 503 * 504 * Returns: pointer Success 505 * NULL Buffer not found 506 */ 507 508static const CS_GenericBlob * 509cs_find_blob_bytes(const uint8_t *addr, size_t length, uint32_t type, uint32_t magic) 510{ 511 const CS_GenericBlob *blob = (const CS_GenericBlob *)(void *)addr; 512 513 if (ntohl(blob->magic) == CSMAGIC_EMBEDDED_SIGNATURE) { 514 const CS_SuperBlob *sb = (const CS_SuperBlob *)blob; 515 size_t n, count = ntohl(sb->count); 516 517 for (n = 0; n < count; n++) { 518 if (ntohl(sb->index[n].type) != type) 519 continue; 520 uint32_t offset = ntohl(sb->index[n].offset); 521 if (length - sizeof(const CS_GenericBlob) < offset) 522 return NULL; 523 blob = (const CS_GenericBlob *)(void *)(addr + offset); 524 if (ntohl(blob->magic) != magic) 525 continue; 526 return blob; 527 } 528 } else if (type == CSSLOT_CODEDIRECTORY 529 && ntohl(blob->magic) == CSMAGIC_CODEDIRECTORY 530 && magic == CSMAGIC_CODEDIRECTORY) 531 return blob; 532 return NULL; 533} 534 535 536static const CS_GenericBlob * 537cs_find_blob(struct cs_blob *csblob, uint32_t type, uint32_t magic) 538{ 539 if ((csblob->csb_flags & CS_VALID) == 0) 540 return NULL; 541 return cs_find_blob_bytes((const uint8_t *)csblob->csb_mem_kaddr, csblob->csb_mem_size, type, magic); 542} 543 544static const uint8_t * 545cs_find_special_slot(const CS_CodeDirectory *cd, uint32_t slot) 546{ 547 /* there is no zero special slot since that is the first code slot */ 548 if (ntohl(cd->nSpecialSlots) < slot || slot == 0) 549 return NULL; 550 551 return ((const uint8_t *)cd + ntohl(cd->hashOffset) - (SHA1_RESULTLEN * slot)); 552} 553 554/* 555 * CODESIGNING 556 * End of routines to navigate code signing data structures in the kernel. 557 */ 558 559/* 560 * ENTITLEMENTS 561 * Routines to navigate entitlements in the kernel. 562 */ 563 564/* Retrieve the entitlements blob for a process. 565 * Returns: 566 * EINVAL no text vnode associated with the process 567 * EBADEXEC invalid code signing data 568 * 0 no error occurred 569 * 570 * On success, out_start and out_length will point to the 571 * entitlements blob if found; or will be set to NULL/zero 572 * if there were no entitlements. 573 */ 574 575static uint8_t sha1_zero[SHA1_RESULTLEN] = { 0 }; 576 577int 578cs_entitlements_blob_get(proc_t p, void **out_start, size_t *out_length) 579{ 580 uint8_t computed_hash[SHA1_RESULTLEN]; 581 const CS_GenericBlob *entitlements; 582 const CS_CodeDirectory *code_dir; 583 struct cs_blob *csblob; 584 const uint8_t *embedded_hash; 585 SHA1_CTX context; 586 587 *out_start = NULL; 588 *out_length = 0; 589 590 if (NULL == p->p_textvp) 591 return EINVAL; 592 593 if ((csblob = ubc_cs_blob_get(p->p_textvp, -1, p->p_textoff)) == NULL) 594 return 0; 595 596 if ((code_dir = (const CS_CodeDirectory *)cs_find_blob(csblob, CSSLOT_CODEDIRECTORY, CSMAGIC_CODEDIRECTORY)) == NULL) 597 return 0; 598 599 entitlements = cs_find_blob(csblob, CSSLOT_ENTITLEMENTS, CSMAGIC_EMBEDDED_ENTITLEMENTS); 600 embedded_hash = cs_find_special_slot(code_dir, CSSLOT_ENTITLEMENTS); 601 602 if (embedded_hash == NULL) { 603 if (entitlements) 604 return EBADEXEC; 605 return 0; 606 } else if (entitlements == NULL && memcmp(embedded_hash, sha1_zero, SHA1_RESULTLEN) != 0) { 607 return EBADEXEC; 608 } 609 610 SHA1Init(&context); 611 SHA1Update(&context, entitlements, ntohl(entitlements->length)); 612 SHA1Final(computed_hash, &context); 613 if (memcmp(computed_hash, embedded_hash, SHA1_RESULTLEN) != 0) 614 return EBADEXEC; 615 616 *out_start = (void *)entitlements; 617 *out_length = ntohl(entitlements->length); 618 619 return 0; 620} 621 622/* Retrieve the codesign identity for a process. 623 * Returns: 624 * NULL an error occured 625 * string the cs_identity 626 */ 627 628const char * 629cs_identity_get(proc_t p) 630{ 631 const CS_CodeDirectory *code_dir; 632 struct cs_blob *csblob; 633 634 if (NULL == p->p_textvp) 635 return NULL; 636 637 if ((csblob = ubc_cs_blob_get(p->p_textvp, -1, p->p_textoff)) == NULL) 638 return NULL; 639 640 if ((code_dir = (const CS_CodeDirectory *)cs_find_blob(csblob, CSSLOT_CODEDIRECTORY, CSMAGIC_CODEDIRECTORY)) == NULL) 641 return NULL; 642 643 if (code_dir->identOffset == 0) 644 return NULL; 645 646 return ((const char *)code_dir) + ntohl(code_dir->identOffset); 647} 648 649 650 651/* Retrieve the codesign blob for a process. 652 * Returns: 653 * EINVAL no text vnode associated with the process 654 * 0 no error occurred 655 * 656 * On success, out_start and out_length will point to the 657 * cms blob if found; or will be set to NULL/zero 658 * if there were no blob. 659 */ 660 661int 662cs_blob_get(proc_t p, void **out_start, size_t *out_length) 663{ 664 struct cs_blob *csblob; 665 666 *out_start = NULL; 667 *out_length = 0; 668 669 if (NULL == p->p_textvp) 670 return EINVAL; 671 672 if ((csblob = ubc_cs_blob_get(p->p_textvp, -1, p->p_textoff)) == NULL) 673 return 0; 674 675 *out_start = (void *)csblob->csb_mem_kaddr; 676 *out_length = csblob->csb_mem_size; 677 678 return 0; 679} 680 681uint8_t * 682cs_get_cdhash(struct proc *p) 683{ 684 struct cs_blob *csblob; 685 686 if (NULL == p->p_textvp) 687 return NULL; 688 689 if ((csblob = ubc_cs_blob_get(p->p_textvp, -1, p->p_textoff)) == NULL) 690 return NULL; 691 692 return csblob->csb_sha1; 693} 694 695/* 696 * ENTITLEMENTS 697 * End of routines to navigate entitlements in the kernel. 698 */ 699 700 701 702/* 703 * ubc_init 704 * 705 * Initialization of the zone for Unified Buffer Cache. 706 * 707 * Parameters: (void) 708 * 709 * Returns: (void) 710 * 711 * Implicit returns: 712 * ubc_info_zone(global) initialized for subsequent allocations 713 */ 714__private_extern__ void 715ubc_init(void) 716{ 717 int i; 718 719 i = (vm_size_t) sizeof (struct ubc_info); 720 721 ubc_info_zone = zinit (i, 10000*i, 8192, "ubc_info zone"); 722 723 zone_change(ubc_info_zone, Z_NOENCRYPT, TRUE); 724} 725 726 727/* 728 * ubc_info_init 729 * 730 * Allocate and attach an empty ubc_info structure to a vnode 731 * 732 * Parameters: vp Pointer to the vnode 733 * 734 * Returns: 0 Success 735 * vnode_size:ENOMEM Not enough space 736 * vnode_size:??? Other error from vnode_getattr 737 * 738 */ 739int 740ubc_info_init(struct vnode *vp) 741{ 742 return(ubc_info_init_internal(vp, 0, 0)); 743} 744 745 746/* 747 * ubc_info_init_withsize 748 * 749 * Allocate and attach a sized ubc_info structure to a vnode 750 * 751 * Parameters: vp Pointer to the vnode 752 * filesize The size of the file 753 * 754 * Returns: 0 Success 755 * vnode_size:ENOMEM Not enough space 756 * vnode_size:??? Other error from vnode_getattr 757 */ 758int 759ubc_info_init_withsize(struct vnode *vp, off_t filesize) 760{ 761 return(ubc_info_init_internal(vp, 1, filesize)); 762} 763 764 765/* 766 * ubc_info_init_internal 767 * 768 * Allocate and attach a ubc_info structure to a vnode 769 * 770 * Parameters: vp Pointer to the vnode 771 * withfsize{0,1} Zero if the size should be obtained 772 * from the vnode; otherwise, use filesize 773 * filesize The size of the file, if withfsize == 1 774 * 775 * Returns: 0 Success 776 * vnode_size:ENOMEM Not enough space 777 * vnode_size:??? Other error from vnode_getattr 778 * 779 * Notes: We call a blocking zalloc(), and the zone was created as an 780 * expandable and collectable zone, so if no memory is available, 781 * it is possible for zalloc() to block indefinitely. zalloc() 782 * may also panic if the zone of zones is exhausted, since it's 783 * NOT expandable. 784 * 785 * We unconditionally call vnode_pager_setup(), even if this is 786 * a reuse of a ubc_info; in that case, we should probably assert 787 * that it does not already have a pager association, but do not. 788 * 789 * Since memory_object_create_named() can only fail from receiving 790 * an invalid pager argument, the explicit check and panic is 791 * merely precautionary. 792 */ 793static int 794ubc_info_init_internal(vnode_t vp, int withfsize, off_t filesize) 795{ 796 register struct ubc_info *uip; 797 void * pager; 798 int error = 0; 799 kern_return_t kret; 800 memory_object_control_t control; 801 802 uip = vp->v_ubcinfo; 803 804 /* 805 * If there is not already a ubc_info attached to the vnode, we 806 * attach one; otherwise, we will reuse the one that's there. 807 */ 808 if (uip == UBC_INFO_NULL) { 809 810 uip = (struct ubc_info *) zalloc(ubc_info_zone); 811 bzero((char *)uip, sizeof(struct ubc_info)); 812 813 uip->ui_vnode = vp; 814 uip->ui_flags = UI_INITED; 815 uip->ui_ucred = NOCRED; 816 } 817 assert(uip->ui_flags != UI_NONE); 818 assert(uip->ui_vnode == vp); 819 820 /* now set this ubc_info in the vnode */ 821 vp->v_ubcinfo = uip; 822 823 /* 824 * Allocate a pager object for this vnode 825 * 826 * XXX The value of the pager parameter is currently ignored. 827 * XXX Presumably, this API changed to avoid the race between 828 * XXX setting the pager and the UI_HASPAGER flag. 829 */ 830 pager = (void *)vnode_pager_setup(vp, uip->ui_pager); 831 assert(pager); 832 833 /* 834 * Explicitly set the pager into the ubc_info, after setting the 835 * UI_HASPAGER flag. 836 */ 837 SET(uip->ui_flags, UI_HASPAGER); 838 uip->ui_pager = pager; 839 840 /* 841 * Note: We can not use VNOP_GETATTR() to get accurate 842 * value of ui_size because this may be an NFS vnode, and 843 * nfs_getattr() can call vinvalbuf(); if this happens, 844 * ubc_info is not set up to deal with that event. 845 * So use bogus size. 846 */ 847 848 /* 849 * create a vnode - vm_object association 850 * memory_object_create_named() creates a "named" reference on the 851 * memory object we hold this reference as long as the vnode is 852 * "alive." Since memory_object_create_named() took its own reference 853 * on the vnode pager we passed it, we can drop the reference 854 * vnode_pager_setup() returned here. 855 */ 856 kret = memory_object_create_named(pager, 857 (memory_object_size_t)uip->ui_size, &control); 858 vnode_pager_deallocate(pager); 859 if (kret != KERN_SUCCESS) 860 panic("ubc_info_init: memory_object_create_named returned %d", kret); 861 862 assert(control); 863 uip->ui_control = control; /* cache the value of the mo control */ 864 SET(uip->ui_flags, UI_HASOBJREF); /* with a named reference */ 865 866 if (withfsize == 0) { 867 /* initialize the size */ 868 error = vnode_size(vp, &uip->ui_size, vfs_context_current()); 869 if (error) 870 uip->ui_size = 0; 871 } else { 872 uip->ui_size = filesize; 873 } 874 vp->v_lflag |= VNAMED_UBC; /* vnode has a named ubc reference */ 875 876 return (error); 877} 878 879 880/* 881 * ubc_info_free 882 * 883 * Free a ubc_info structure 884 * 885 * Parameters: uip A pointer to the ubc_info to free 886 * 887 * Returns: (void) 888 * 889 * Notes: If there is a credential that has subsequently been associated 890 * with the ubc_info via a call to ubc_setcred(), the reference 891 * to the credential is dropped. 892 * 893 * It's actually impossible for a ubc_info.ui_control to take the 894 * value MEMORY_OBJECT_CONTROL_NULL. 895 */ 896static void 897ubc_info_free(struct ubc_info *uip) 898{ 899 if (IS_VALID_CRED(uip->ui_ucred)) { 900 kauth_cred_unref(&uip->ui_ucred); 901 } 902 903 if (uip->ui_control != MEMORY_OBJECT_CONTROL_NULL) 904 memory_object_control_deallocate(uip->ui_control); 905 906 cluster_release(uip); 907 ubc_cs_free(uip); 908 909 zfree(ubc_info_zone, uip); 910 return; 911} 912 913 914void 915ubc_info_deallocate(struct ubc_info *uip) 916{ 917 ubc_info_free(uip); 918} 919 920 921/* 922 * ubc_setsize 923 * 924 * Tell the VM that the the size of the file represented by the vnode has 925 * changed 926 * 927 * Parameters: vp The vp whose backing file size is 928 * being changed 929 * nsize The new size of the backing file 930 * 931 * Returns: 1 Success 932 * 0 Failure 933 * 934 * Notes: This function will indicate failure if the new size that's 935 * being attempted to be set is negative. 936 * 937 * This function will fail if there is no ubc_info currently 938 * associated with the vnode. 939 * 940 * This function will indicate success it the new size is the 941 * same or larger than the old size (in this case, the remainder 942 * of the file will require modification or use of an existing upl 943 * to access successfully). 944 * 945 * This function will fail if the new file size is smaller, and 946 * the memory region being invalidated was unable to actually be 947 * invalidated and/or the last page could not be flushed, if the 948 * new size is not aligned to a page boundary. This is usually 949 * indicative of an I/O error. 950 */ 951int 952ubc_setsize(struct vnode *vp, off_t nsize) 953{ 954 off_t osize; /* ui_size before change */ 955 off_t lastpg, olastpgend, lastoff; 956 struct ubc_info *uip; 957 memory_object_control_t control; 958 kern_return_t kret = KERN_SUCCESS; 959 960 if (nsize < (off_t)0) 961 return (0); 962 963 if (!UBCINFOEXISTS(vp)) 964 return (0); 965 966 uip = vp->v_ubcinfo; 967 osize = uip->ui_size; 968 /* 969 * Update the size before flushing the VM 970 */ 971 uip->ui_size = nsize; 972 973 if (nsize >= osize) { /* Nothing more to do */ 974 if (nsize > osize) { 975 lock_vnode_and_post(vp, NOTE_EXTEND); 976 } 977 978 return (1); /* return success */ 979 } 980 981 /* 982 * When the file shrinks, invalidate the pages beyond the 983 * new size. Also get rid of garbage beyond nsize on the 984 * last page. The ui_size already has the nsize, so any 985 * subsequent page-in will zero-fill the tail properly 986 */ 987 lastpg = trunc_page_64(nsize); 988 olastpgend = round_page_64(osize); 989 control = uip->ui_control; 990 assert(control); 991 lastoff = (nsize & PAGE_MASK_64); 992 993 if (lastoff) { 994 upl_t upl; 995 upl_page_info_t *pl; 996 997 998 /* 999 * new EOF ends up in the middle of a page 1000 * zero the tail of this page if its currently 1001 * present in the cache 1002 */ 1003 kret = ubc_create_upl(vp, lastpg, PAGE_SIZE, &upl, &pl, UPL_SET_LITE); 1004 1005 if (kret != KERN_SUCCESS) 1006 panic("ubc_setsize: ubc_create_upl (error = %d)\n", kret); 1007 1008 if (upl_valid_page(pl, 0)) 1009 cluster_zero(upl, (uint32_t)lastoff, PAGE_SIZE - (uint32_t)lastoff, NULL); 1010 1011 ubc_upl_abort_range(upl, 0, PAGE_SIZE, UPL_ABORT_FREE_ON_EMPTY); 1012 1013 lastpg += PAGE_SIZE_64; 1014 } 1015 if (olastpgend > lastpg) { 1016 int flags; 1017 1018 if (lastpg == 0) 1019 flags = MEMORY_OBJECT_DATA_FLUSH_ALL; 1020 else 1021 flags = MEMORY_OBJECT_DATA_FLUSH; 1022 /* 1023 * invalidate the pages beyond the new EOF page 1024 * 1025 */ 1026 kret = memory_object_lock_request(control, 1027 (memory_object_offset_t)lastpg, 1028 (memory_object_size_t)(olastpgend - lastpg), NULL, NULL, 1029 MEMORY_OBJECT_RETURN_NONE, flags, VM_PROT_NO_CHANGE); 1030 if (kret != KERN_SUCCESS) 1031 printf("ubc_setsize: invalidate failed (error = %d)\n", kret); 1032 } 1033 return ((kret == KERN_SUCCESS) ? 1 : 0); 1034} 1035 1036 1037/* 1038 * ubc_getsize 1039 * 1040 * Get the size of the file assocated with the specified vnode 1041 * 1042 * Parameters: vp The vnode whose size is of interest 1043 * 1044 * Returns: 0 There is no ubc_info associated with 1045 * this vnode, or the size is zero 1046 * !0 The size of the file 1047 * 1048 * Notes: Using this routine, it is not possible for a caller to 1049 * successfully distinguish between a vnode associate with a zero 1050 * length file, and a vnode with no associated ubc_info. The 1051 * caller therefore needs to not care, or needs to ensure that 1052 * they have previously successfully called ubc_info_init() or 1053 * ubc_info_init_withsize(). 1054 */ 1055off_t 1056ubc_getsize(struct vnode *vp) 1057{ 1058 /* people depend on the side effect of this working this way 1059 * as they call this for directory 1060 */ 1061 if (!UBCINFOEXISTS(vp)) 1062 return ((off_t)0); 1063 return (vp->v_ubcinfo->ui_size); 1064} 1065 1066 1067/* 1068 * ubc_umount 1069 * 1070 * Call ubc_sync_range(vp, 0, EOF, UBC_PUSHALL) on all the vnodes for this 1071 * mount point 1072 * 1073 * Parameters: mp The mount point 1074 * 1075 * Returns: 0 Success 1076 * 1077 * Notes: There is no failure indication for this function. 1078 * 1079 * This function is used in the unmount path; since it may block 1080 * I/O indefinitely, it should not be used in the forced unmount 1081 * path, since a device unavailability could also block that 1082 * indefinitely. 1083 * 1084 * Because there is no device ejection interlock on USB, FireWire, 1085 * or similar devices, it's possible that an ejection that begins 1086 * subsequent to the vnode_iterate() completing, either on one of 1087 * those devices, or a network mount for which the server quits 1088 * responding, etc., may cause the caller to block indefinitely. 1089 */ 1090__private_extern__ int 1091ubc_umount(struct mount *mp) 1092{ 1093 vnode_iterate(mp, 0, ubc_umcallback, 0); 1094 return(0); 1095} 1096 1097 1098/* 1099 * ubc_umcallback 1100 * 1101 * Used by ubc_umount() as an internal implementation detail; see ubc_umount() 1102 * and vnode_iterate() for details of implementation. 1103 */ 1104static int 1105ubc_umcallback(vnode_t vp, __unused void * args) 1106{ 1107 1108 if (UBCINFOEXISTS(vp)) { 1109 1110 (void) ubc_msync(vp, (off_t)0, ubc_getsize(vp), NULL, UBC_PUSHALL); 1111 } 1112 return (VNODE_RETURNED); 1113} 1114 1115 1116/* 1117 * ubc_getcred 1118 * 1119 * Get the credentials currently active for the ubc_info associated with the 1120 * vnode. 1121 * 1122 * Parameters: vp The vnode whose ubc_info credentials 1123 * are to be retrieved 1124 * 1125 * Returns: !NOCRED The credentials 1126 * NOCRED If there is no ubc_info for the vnode, 1127 * or if there is one, but it has not had 1128 * any credentials associated with it via 1129 * a call to ubc_setcred() 1130 */ 1131kauth_cred_t 1132ubc_getcred(struct vnode *vp) 1133{ 1134 if (UBCINFOEXISTS(vp)) 1135 return (vp->v_ubcinfo->ui_ucred); 1136 1137 return (NOCRED); 1138} 1139 1140 1141/* 1142 * ubc_setthreadcred 1143 * 1144 * If they are not already set, set the credentials of the ubc_info structure 1145 * associated with the vnode to those of the supplied thread; otherwise leave 1146 * them alone. 1147 * 1148 * Parameters: vp The vnode whose ubc_info creds are to 1149 * be set 1150 * p The process whose credentials are to 1151 * be used, if not running on an assumed 1152 * credential 1153 * thread The thread whose credentials are to 1154 * be used 1155 * 1156 * Returns: 1 This vnode has no associated ubc_info 1157 * 0 Success 1158 * 1159 * Notes: This function takes a proc parameter to account for bootstrap 1160 * issues where a task or thread may call this routine, either 1161 * before credentials have been initialized by bsd_init(), or if 1162 * there is no BSD info asscoiate with a mach thread yet. This 1163 * is known to happen in both the initial swap and memory mapping 1164 * calls. 1165 * 1166 * This function is generally used only in the following cases: 1167 * 1168 * o a memory mapped file via the mmap() system call 1169 * o a memory mapped file via the deprecated map_fd() call 1170 * o a swap store backing file 1171 * o subsequent to a successful write via vn_write() 1172 * 1173 * The information is then used by the NFS client in order to 1174 * cons up a wire message in either the page-in or page-out path. 1175 * 1176 * There are two potential problems with the use of this API: 1177 * 1178 * o Because the write path only set it on a successful 1179 * write, there is a race window between setting the 1180 * credential and its use to evict the pages to the 1181 * remote file server 1182 * 1183 * o Because a page-in may occur prior to a write, the 1184 * credential may not be set at this time, if the page-in 1185 * is not the result of a mapping established via mmap() 1186 * or map_fd(). 1187 * 1188 * In both these cases, this will be triggered from the paging 1189 * path, which will instead use the credential of the current 1190 * process, which in this case is either the dynamic_pager or 1191 * the kernel task, both of which utilize "root" credentials. 1192 * 1193 * This may potentially permit operations to occur which should 1194 * be denied, or it may cause to be denied operations which 1195 * should be permitted, depending on the configuration of the NFS 1196 * server. 1197 */ 1198int 1199ubc_setthreadcred(struct vnode *vp, proc_t p, thread_t thread) 1200{ 1201 struct ubc_info *uip; 1202 kauth_cred_t credp; 1203 struct uthread *uthread = get_bsdthread_info(thread); 1204 1205 if (!UBCINFOEXISTS(vp)) 1206 return (1); 1207 1208 vnode_lock(vp); 1209 1210 uip = vp->v_ubcinfo; 1211 credp = uip->ui_ucred; 1212 1213 if (!IS_VALID_CRED(credp)) { 1214 /* use per-thread cred, if assumed identity, else proc cred */ 1215 if (uthread == NULL || (uthread->uu_flag & UT_SETUID) == 0) { 1216 uip->ui_ucred = kauth_cred_proc_ref(p); 1217 } else { 1218 uip->ui_ucred = uthread->uu_ucred; 1219 kauth_cred_ref(uip->ui_ucred); 1220 } 1221 } 1222 vnode_unlock(vp); 1223 1224 return (0); 1225} 1226 1227 1228/* 1229 * ubc_setcred 1230 * 1231 * If they are not already set, set the credentials of the ubc_info structure 1232 * associated with the vnode to those of the process; otherwise leave them 1233 * alone. 1234 * 1235 * Parameters: vp The vnode whose ubc_info creds are to 1236 * be set 1237 * p The process whose credentials are to 1238 * be used 1239 * 1240 * Returns: 0 This vnode has no associated ubc_info 1241 * 1 Success 1242 * 1243 * Notes: The return values for this function are inverted from nearly 1244 * all other uses in the kernel. 1245 * 1246 * See also ubc_setthreadcred(), above. 1247 * 1248 * This function is considered deprecated, and generally should 1249 * not be used, as it is incompatible with per-thread credentials; 1250 * it exists for legacy KPI reasons. 1251 * 1252 * DEPRECATION: ubc_setcred() is being deprecated. Please use 1253 * ubc_setthreadcred() instead. 1254 */ 1255int 1256ubc_setcred(struct vnode *vp, proc_t p) 1257{ 1258 struct ubc_info *uip; 1259 kauth_cred_t credp; 1260 1261 /* If there is no ubc_info, deny the operation */ 1262 if ( !UBCINFOEXISTS(vp)) 1263 return (0); 1264 1265 /* 1266 * Check to see if there is already a credential reference in the 1267 * ubc_info; if there is not, take one on the supplied credential. 1268 */ 1269 vnode_lock(vp); 1270 uip = vp->v_ubcinfo; 1271 credp = uip->ui_ucred; 1272 if (!IS_VALID_CRED(credp)) { 1273 uip->ui_ucred = kauth_cred_proc_ref(p); 1274 } 1275 vnode_unlock(vp); 1276 1277 return (1); 1278} 1279 1280/* 1281 * ubc_getpager 1282 * 1283 * Get the pager associated with the ubc_info associated with the vnode. 1284 * 1285 * Parameters: vp The vnode to obtain the pager from 1286 * 1287 * Returns: !VNODE_PAGER_NULL The memory_object_t for the pager 1288 * VNODE_PAGER_NULL There is no ubc_info for this vnode 1289 * 1290 * Notes: For each vnode that has a ubc_info associated with it, that 1291 * ubc_info SHALL have a pager associated with it, so in the 1292 * normal case, it's impossible to return VNODE_PAGER_NULL for 1293 * a vnode with an associated ubc_info. 1294 */ 1295__private_extern__ memory_object_t 1296ubc_getpager(struct vnode *vp) 1297{ 1298 if (UBCINFOEXISTS(vp)) 1299 return (vp->v_ubcinfo->ui_pager); 1300 1301 return (0); 1302} 1303 1304 1305/* 1306 * ubc_getobject 1307 * 1308 * Get the memory object control associated with the ubc_info associated with 1309 * the vnode 1310 * 1311 * Parameters: vp The vnode to obtain the memory object 1312 * from 1313 * flags DEPRECATED 1314 * 1315 * Returns: !MEMORY_OBJECT_CONTROL_NULL 1316 * MEMORY_OBJECT_CONTROL_NULL 1317 * 1318 * Notes: Historically, if the flags were not "do not reactivate", this 1319 * function would look up the memory object using the pager if 1320 * it did not exist (this could be the case if the vnode had 1321 * been previously reactivated). The flags would also permit a 1322 * hold to be requested, which would have created an object 1323 * reference, if one had not already existed. This usage is 1324 * deprecated, as it would permit a race between finding and 1325 * taking the reference vs. a single reference being dropped in 1326 * another thread. 1327 */ 1328memory_object_control_t 1329ubc_getobject(struct vnode *vp, __unused int flags) 1330{ 1331 if (UBCINFOEXISTS(vp)) 1332 return((vp->v_ubcinfo->ui_control)); 1333 1334 return (MEMORY_OBJECT_CONTROL_NULL); 1335} 1336 1337boolean_t 1338ubc_strict_uncached_IO(struct vnode *vp) 1339{ 1340 boolean_t result = FALSE; 1341 1342 if (UBCINFOEXISTS(vp)) { 1343 result = memory_object_is_slid(vp->v_ubcinfo->ui_control); 1344 } 1345 return result; 1346} 1347 1348/* 1349 * ubc_blktooff 1350 * 1351 * Convert a given block number to a memory backing object (file) offset for a 1352 * given vnode 1353 * 1354 * Parameters: vp The vnode in which the block is located 1355 * blkno The block number to convert 1356 * 1357 * Returns: !-1 The offset into the backing object 1358 * -1 There is no ubc_info associated with 1359 * the vnode 1360 * -1 An error occurred in the underlying VFS 1361 * while translating the block to an 1362 * offset; the most likely cause is that 1363 * the caller specified a block past the 1364 * end of the file, but this could also be 1365 * any other error from VNOP_BLKTOOFF(). 1366 * 1367 * Note: Representing the error in band loses some information, but does 1368 * not occlude a valid offset, since an off_t of -1 is normally 1369 * used to represent EOF. If we had a more reliable constant in 1370 * our header files for it (i.e. explicitly cast to an off_t), we 1371 * would use it here instead. 1372 */ 1373off_t 1374ubc_blktooff(vnode_t vp, daddr64_t blkno) 1375{ 1376 off_t file_offset = -1; 1377 int error; 1378 1379 if (UBCINFOEXISTS(vp)) { 1380 error = VNOP_BLKTOOFF(vp, blkno, &file_offset); 1381 if (error) 1382 file_offset = -1; 1383 } 1384 1385 return (file_offset); 1386} 1387 1388 1389/* 1390 * ubc_offtoblk 1391 * 1392 * Convert a given offset in a memory backing object into a block number for a 1393 * given vnode 1394 * 1395 * Parameters: vp The vnode in which the offset is 1396 * located 1397 * offset The offset into the backing object 1398 * 1399 * Returns: !-1 The returned block number 1400 * -1 There is no ubc_info associated with 1401 * the vnode 1402 * -1 An error occurred in the underlying VFS 1403 * while translating the block to an 1404 * offset; the most likely cause is that 1405 * the caller specified a block past the 1406 * end of the file, but this could also be 1407 * any other error from VNOP_OFFTOBLK(). 1408 * 1409 * Note: Representing the error in band loses some information, but does 1410 * not occlude a valid block number, since block numbers exceed 1411 * the valid range for offsets, due to their relative sizes. If 1412 * we had a more reliable constant than -1 in our header files 1413 * for it (i.e. explicitly cast to an daddr64_t), we would use it 1414 * here instead. 1415 */ 1416daddr64_t 1417ubc_offtoblk(vnode_t vp, off_t offset) 1418{ 1419 daddr64_t blkno = -1; 1420 int error = 0; 1421 1422 if (UBCINFOEXISTS(vp)) { 1423 error = VNOP_OFFTOBLK(vp, offset, &blkno); 1424 if (error) 1425 blkno = -1; 1426 } 1427 1428 return (blkno); 1429} 1430 1431 1432/* 1433 * ubc_pages_resident 1434 * 1435 * Determine whether or not a given vnode has pages resident via the memory 1436 * object control associated with the ubc_info associated with the vnode 1437 * 1438 * Parameters: vp The vnode we want to know about 1439 * 1440 * Returns: 1 Yes 1441 * 0 No 1442 */ 1443int 1444ubc_pages_resident(vnode_t vp) 1445{ 1446 kern_return_t kret; 1447 boolean_t has_pages_resident; 1448 1449 if (!UBCINFOEXISTS(vp)) 1450 return (0); 1451 1452 /* 1453 * The following call may fail if an invalid ui_control is specified, 1454 * or if there is no VM object associated with the control object. In 1455 * either case, reacting to it as if there were no pages resident will 1456 * result in correct behavior. 1457 */ 1458 kret = memory_object_pages_resident(vp->v_ubcinfo->ui_control, &has_pages_resident); 1459 1460 if (kret != KERN_SUCCESS) 1461 return (0); 1462 1463 if (has_pages_resident == TRUE) 1464 return (1); 1465 1466 return (0); 1467} 1468 1469 1470/* 1471 * ubc_sync_range 1472 * 1473 * Clean and/or invalidate a range in the memory object that backs this vnode 1474 * 1475 * Parameters: vp The vnode whose associated ubc_info's 1476 * associated memory object is to have a 1477 * range invalidated within it 1478 * beg_off The start of the range, as an offset 1479 * end_off The end of the range, as an offset 1480 * flags See ubc_msync_internal() 1481 * 1482 * Returns: 1 Success 1483 * 0 Failure 1484 * 1485 * Notes: see ubc_msync_internal() for more detailed information. 1486 * 1487 * DEPRECATED: This interface is obsolete due to a failure to return error 1488 * information needed in order to correct failures. The currently 1489 * recommended interface is ubc_msync(). 1490 */ 1491int 1492ubc_sync_range(vnode_t vp, off_t beg_off, off_t end_off, int flags) 1493{ 1494 return (ubc_msync_internal(vp, beg_off, end_off, NULL, flags, NULL)); 1495} 1496 1497 1498/* 1499 * ubc_msync 1500 * 1501 * Clean and/or invalidate a range in the memory object that backs this vnode 1502 * 1503 * Parameters: vp The vnode whose associated ubc_info's 1504 * associated memory object is to have a 1505 * range invalidated within it 1506 * beg_off The start of the range, as an offset 1507 * end_off The end of the range, as an offset 1508 * resid_off The address of an off_t supplied by the 1509 * caller; may be set to NULL to ignore 1510 * flags See ubc_msync_internal() 1511 * 1512 * Returns: 0 Success 1513 * !0 Failure; an errno is returned 1514 * 1515 * Implicit Returns: 1516 * *resid_off, modified If non-NULL, the contents are ALWAYS 1517 * modified; they are initialized to the 1518 * beg_off, and in case of an I/O error, 1519 * the difference between beg_off and the 1520 * current value will reflect what was 1521 * able to be written before the error 1522 * occurred. If no error is returned, the 1523 * value of the resid_off is undefined; do 1524 * NOT use it in place of end_off if you 1525 * intend to increment from the end of the 1526 * last call and call iteratively. 1527 * 1528 * Notes: see ubc_msync_internal() for more detailed information. 1529 * 1530 */ 1531errno_t 1532ubc_msync(vnode_t vp, off_t beg_off, off_t end_off, off_t *resid_off, int flags) 1533{ 1534 int retval; 1535 int io_errno = 0; 1536 1537 if (resid_off) 1538 *resid_off = beg_off; 1539 1540 retval = ubc_msync_internal(vp, beg_off, end_off, resid_off, flags, &io_errno); 1541 1542 if (retval == 0 && io_errno == 0) 1543 return (EINVAL); 1544 return (io_errno); 1545} 1546 1547 1548/* 1549 * Clean and/or invalidate a range in the memory object that backs this vnode 1550 * 1551 * Parameters: vp The vnode whose associated ubc_info's 1552 * associated memory object is to have a 1553 * range invalidated within it 1554 * beg_off The start of the range, as an offset 1555 * end_off The end of the range, as an offset 1556 * resid_off The address of an off_t supplied by the 1557 * caller; may be set to NULL to ignore 1558 * flags MUST contain at least one of the flags 1559 * UBC_INVALIDATE, UBC_PUSHDIRTY, or 1560 * UBC_PUSHALL; if UBC_PUSHDIRTY is used, 1561 * UBC_SYNC may also be specified to cause 1562 * this function to block until the 1563 * operation is complete. The behavior 1564 * of UBC_SYNC is otherwise undefined. 1565 * io_errno The address of an int to contain the 1566 * errno from a failed I/O operation, if 1567 * one occurs; may be set to NULL to 1568 * ignore 1569 * 1570 * Returns: 1 Success 1571 * 0 Failure 1572 * 1573 * Implicit Returns: 1574 * *resid_off, modified The contents of this offset MAY be 1575 * modified; in case of an I/O error, the 1576 * difference between beg_off and the 1577 * current value will reflect what was 1578 * able to be written before the error 1579 * occurred. 1580 * *io_errno, modified The contents of this offset are set to 1581 * an errno, if an error occurs; if the 1582 * caller supplies an io_errno parameter, 1583 * they should be careful to initialize it 1584 * to 0 before calling this function to 1585 * enable them to distinguish an error 1586 * with a valid *resid_off from an invalid 1587 * one, and to avoid potentially falsely 1588 * reporting an error, depending on use. 1589 * 1590 * Notes: If there is no ubc_info associated with the vnode supplied, 1591 * this function immediately returns success. 1592 * 1593 * If the value of end_off is less than or equal to beg_off, this 1594 * function immediately returns success; that is, end_off is NOT 1595 * inclusive. 1596 * 1597 * IMPORTANT: one of the flags UBC_INVALIDATE, UBC_PUSHDIRTY, or 1598 * UBC_PUSHALL MUST be specified; that is, it is NOT possible to 1599 * attempt to block on in-progress I/O by calling this function 1600 * with UBC_PUSHDIRTY, and then later call it with just UBC_SYNC 1601 * in order to block pending on the I/O already in progress. 1602 * 1603 * The start offset is truncated to the page boundary and the 1604 * size is adjusted to include the last page in the range; that 1605 * is, end_off on exactly a page boundary will not change if it 1606 * is rounded, and the range of bytes written will be from the 1607 * truncate beg_off to the rounded (end_off - 1). 1608 */ 1609static int 1610ubc_msync_internal(vnode_t vp, off_t beg_off, off_t end_off, off_t *resid_off, int flags, int *io_errno) 1611{ 1612 memory_object_size_t tsize; 1613 kern_return_t kret; 1614 int request_flags = 0; 1615 int flush_flags = MEMORY_OBJECT_RETURN_NONE; 1616 1617 if ( !UBCINFOEXISTS(vp)) 1618 return (0); 1619 if ((flags & (UBC_INVALIDATE | UBC_PUSHDIRTY | UBC_PUSHALL)) == 0) 1620 return (0); 1621 if (end_off <= beg_off) 1622 return (1); 1623 1624 if (flags & UBC_INVALIDATE) 1625 /* 1626 * discard the resident pages 1627 */ 1628 request_flags = (MEMORY_OBJECT_DATA_FLUSH | MEMORY_OBJECT_DATA_NO_CHANGE); 1629 1630 if (flags & UBC_SYNC) 1631 /* 1632 * wait for all the I/O to complete before returning 1633 */ 1634 request_flags |= MEMORY_OBJECT_IO_SYNC; 1635 1636 if (flags & UBC_PUSHDIRTY) 1637 /* 1638 * we only return the dirty pages in the range 1639 */ 1640 flush_flags = MEMORY_OBJECT_RETURN_DIRTY; 1641 1642 if (flags & UBC_PUSHALL) 1643 /* 1644 * then return all the interesting pages in the range (both 1645 * dirty and precious) to the pager 1646 */ 1647 flush_flags = MEMORY_OBJECT_RETURN_ALL; 1648 1649 beg_off = trunc_page_64(beg_off); 1650 end_off = round_page_64(end_off); 1651 tsize = (memory_object_size_t)end_off - beg_off; 1652 1653 /* flush and/or invalidate pages in the range requested */ 1654 kret = memory_object_lock_request(vp->v_ubcinfo->ui_control, 1655 beg_off, tsize, 1656 (memory_object_offset_t *)resid_off, 1657 io_errno, flush_flags, request_flags, 1658 VM_PROT_NO_CHANGE); 1659 1660 return ((kret == KERN_SUCCESS) ? 1 : 0); 1661} 1662 1663 1664/* 1665 * ubc_msync_internal 1666 * 1667 * Explicitly map a vnode that has an associate ubc_info, and add a reference 1668 * to it for the ubc system, if there isn't one already, so it will not be 1669 * recycled while it's in use, and set flags on the ubc_info to indicate that 1670 * we have done this 1671 * 1672 * Parameters: vp The vnode to map 1673 * flags The mapping flags for the vnode; this 1674 * will be a combination of one or more of 1675 * PROT_READ, PROT_WRITE, and PROT_EXEC 1676 * 1677 * Returns: 0 Success 1678 * EPERM Permission was denied 1679 * 1680 * Notes: An I/O reference on the vnode must already be held on entry 1681 * 1682 * If there is no ubc_info associated with the vnode, this function 1683 * will return success. 1684 * 1685 * If a permission error occurs, this function will return 1686 * failure; all other failures will cause this function to return 1687 * success. 1688 * 1689 * IMPORTANT: This is an internal use function, and its symbols 1690 * are not exported, hence its error checking is not very robust. 1691 * It is primarily used by: 1692 * 1693 * o mmap(), when mapping a file 1694 * o The deprecated map_fd() interface, when mapping a file 1695 * o When mapping a shared file (a shared library in the 1696 * shared segment region) 1697 * o When loading a program image during the exec process 1698 * 1699 * ...all of these uses ignore the return code, and any fault that 1700 * results later because of a failure is handled in the fix-up path 1701 * of the fault handler. The interface exists primarily as a 1702 * performance hint. 1703 * 1704 * Given that third party implementation of the type of interfaces 1705 * that would use this function, such as alternative executable 1706 * formats, etc., are unsupported, this function is not exported 1707 * for general use. 1708 * 1709 * The extra reference is held until the VM system unmaps the 1710 * vnode from its own context to maintain a vnode reference in 1711 * cases like open()/mmap()/close(), which leave the backing 1712 * object referenced by a mapped memory region in a process 1713 * address space. 1714 */ 1715__private_extern__ int 1716ubc_map(vnode_t vp, int flags) 1717{ 1718 struct ubc_info *uip; 1719 int error = 0; 1720 int need_ref = 0; 1721 int need_wakeup = 0; 1722 1723 if (UBCINFOEXISTS(vp)) { 1724 1725 vnode_lock(vp); 1726 uip = vp->v_ubcinfo; 1727 1728 while (ISSET(uip->ui_flags, UI_MAPBUSY)) { 1729 SET(uip->ui_flags, UI_MAPWAITING); 1730 (void) msleep(&uip->ui_flags, &vp->v_lock, 1731 PRIBIO, "ubc_map", NULL); 1732 } 1733 SET(uip->ui_flags, UI_MAPBUSY); 1734 vnode_unlock(vp); 1735 1736 error = VNOP_MMAP(vp, flags, vfs_context_current()); 1737 1738 if (error != EPERM) 1739 error = 0; 1740 1741 vnode_lock_spin(vp); 1742 1743 if (error == 0) { 1744 if ( !ISSET(uip->ui_flags, UI_ISMAPPED)) 1745 need_ref = 1; 1746 SET(uip->ui_flags, (UI_WASMAPPED | UI_ISMAPPED)); 1747 if (flags & PROT_WRITE) { 1748 SET(uip->ui_flags, UI_MAPPEDWRITE); 1749 } 1750 } 1751 CLR(uip->ui_flags, UI_MAPBUSY); 1752 1753 if (ISSET(uip->ui_flags, UI_MAPWAITING)) { 1754 CLR(uip->ui_flags, UI_MAPWAITING); 1755 need_wakeup = 1; 1756 } 1757 vnode_unlock(vp); 1758 1759 if (need_wakeup) 1760 wakeup(&uip->ui_flags); 1761 1762 if (need_ref) 1763 vnode_ref(vp); 1764 } 1765 return (error); 1766} 1767 1768 1769/* 1770 * ubc_destroy_named 1771 * 1772 * Destroy the named memory object associated with the ubc_info control object 1773 * associated with the designated vnode, if there is a ubc_info associated 1774 * with the vnode, and a control object is associated with it 1775 * 1776 * Parameters: vp The designated vnode 1777 * 1778 * Returns: (void) 1779 * 1780 * Notes: This function is called on vnode termination for all vnodes, 1781 * and must therefore not assume that there is a ubc_info that is 1782 * associated with the vnode, nor that there is a control object 1783 * associated with the ubc_info. 1784 * 1785 * If all the conditions necessary are present, this function 1786 * calls memory_object_destory(), which will in turn end up 1787 * calling ubc_unmap() to release any vnode references that were 1788 * established via ubc_map(). 1789 * 1790 * IMPORTANT: This is an internal use function that is used 1791 * exclusively by the internal use function vclean(). 1792 */ 1793__private_extern__ void 1794ubc_destroy_named(vnode_t vp) 1795{ 1796 memory_object_control_t control; 1797 struct ubc_info *uip; 1798 kern_return_t kret; 1799 1800 if (UBCINFOEXISTS(vp)) { 1801 uip = vp->v_ubcinfo; 1802 1803 /* Terminate the memory object */ 1804 control = ubc_getobject(vp, UBC_HOLDOBJECT); 1805 if (control != MEMORY_OBJECT_CONTROL_NULL) { 1806 kret = memory_object_destroy(control, 0); 1807 if (kret != KERN_SUCCESS) 1808 panic("ubc_destroy_named: memory_object_destroy failed"); 1809 } 1810 } 1811} 1812 1813 1814/* 1815 * ubc_isinuse 1816 * 1817 * Determine whether or not a vnode is currently in use by ubc at a level in 1818 * excess of the requested busycount 1819 * 1820 * Parameters: vp The vnode to check 1821 * busycount The threshold busy count, used to bias 1822 * the count usually already held by the 1823 * caller to avoid races 1824 * 1825 * Returns: 1 The vnode is in use over the threshold 1826 * 0 The vnode is not in use over the 1827 * threshold 1828 * 1829 * Notes: Because the vnode is only held locked while actually asking 1830 * the use count, this function only represents a snapshot of the 1831 * current state of the vnode. If more accurate information is 1832 * required, an additional busycount should be held by the caller 1833 * and a non-zero busycount used. 1834 * 1835 * If there is no ubc_info associated with the vnode, this 1836 * function will report that the vnode is not in use by ubc. 1837 */ 1838int 1839ubc_isinuse(struct vnode *vp, int busycount) 1840{ 1841 if ( !UBCINFOEXISTS(vp)) 1842 return (0); 1843 return(ubc_isinuse_locked(vp, busycount, 0)); 1844} 1845 1846 1847/* 1848 * ubc_isinuse_locked 1849 * 1850 * Determine whether or not a vnode is currently in use by ubc at a level in 1851 * excess of the requested busycount 1852 * 1853 * Parameters: vp The vnode to check 1854 * busycount The threshold busy count, used to bias 1855 * the count usually already held by the 1856 * caller to avoid races 1857 * locked True if the vnode is already locked by 1858 * the caller 1859 * 1860 * Returns: 1 The vnode is in use over the threshold 1861 * 0 The vnode is not in use over the 1862 * threshold 1863 * 1864 * Notes: If the vnode is not locked on entry, it is locked while 1865 * actually asking the use count. If this is the case, this 1866 * function only represents a snapshot of the current state of 1867 * the vnode. If more accurate information is required, the 1868 * vnode lock should be held by the caller, otherwise an 1869 * additional busycount should be held by the caller and a 1870 * non-zero busycount used. 1871 * 1872 * If there is no ubc_info associated with the vnode, this 1873 * function will report that the vnode is not in use by ubc. 1874 */ 1875int 1876ubc_isinuse_locked(struct vnode *vp, int busycount, int locked) 1877{ 1878 int retval = 0; 1879 1880 1881 if (!locked) 1882 vnode_lock_spin(vp); 1883 1884 if ((vp->v_usecount - vp->v_kusecount) > busycount) 1885 retval = 1; 1886 1887 if (!locked) 1888 vnode_unlock(vp); 1889 return (retval); 1890} 1891 1892 1893/* 1894 * ubc_unmap 1895 * 1896 * Reverse the effects of a ubc_map() call for a given vnode 1897 * 1898 * Parameters: vp vnode to unmap from ubc 1899 * 1900 * Returns: (void) 1901 * 1902 * Notes: This is an internal use function used by vnode_pager_unmap(). 1903 * It will attempt to obtain a reference on the supplied vnode, 1904 * and if it can do so, and there is an associated ubc_info, and 1905 * the flags indicate that it was mapped via ubc_map(), then the 1906 * flag is cleared, the mapping removed, and the reference taken 1907 * by ubc_map() is released. 1908 * 1909 * IMPORTANT: This MUST only be called by the VM 1910 * to prevent race conditions. 1911 */ 1912__private_extern__ void 1913ubc_unmap(struct vnode *vp) 1914{ 1915 struct ubc_info *uip; 1916 int need_rele = 0; 1917 int need_wakeup = 0; 1918 1919 if (vnode_getwithref(vp)) 1920 return; 1921 1922 if (UBCINFOEXISTS(vp)) { 1923 vnode_lock(vp); 1924 uip = vp->v_ubcinfo; 1925 1926 while (ISSET(uip->ui_flags, UI_MAPBUSY)) { 1927 SET(uip->ui_flags, UI_MAPWAITING); 1928 (void) msleep(&uip->ui_flags, &vp->v_lock, 1929 PRIBIO, "ubc_unmap", NULL); 1930 } 1931 SET(uip->ui_flags, UI_MAPBUSY); 1932 1933 if (ISSET(uip->ui_flags, UI_ISMAPPED)) { 1934 CLR(uip->ui_flags, UI_ISMAPPED); 1935 need_rele = 1; 1936 } 1937 vnode_unlock(vp); 1938 1939 if (need_rele) { 1940 (void)VNOP_MNOMAP(vp, vfs_context_current()); 1941 vnode_rele(vp); 1942 } 1943 1944 vnode_lock_spin(vp); 1945 1946 CLR(uip->ui_flags, UI_MAPBUSY); 1947 if (ISSET(uip->ui_flags, UI_MAPWAITING)) { 1948 CLR(uip->ui_flags, UI_MAPWAITING); 1949 need_wakeup = 1; 1950 } 1951 vnode_unlock(vp); 1952 1953 if (need_wakeup) 1954 wakeup(&uip->ui_flags); 1955 1956 } 1957 /* 1958 * the drop of the vnode ref will cleanup 1959 */ 1960 vnode_put(vp); 1961} 1962 1963 1964/* 1965 * ubc_page_op 1966 * 1967 * Manipulate individual page state for a vnode with an associated ubc_info 1968 * with an associated memory object control. 1969 * 1970 * Parameters: vp The vnode backing the page 1971 * f_offset A file offset interior to the page 1972 * ops The operations to perform, as a bitmap 1973 * (see below for more information) 1974 * phys_entryp The address of a ppnum_t; may be NULL 1975 * to ignore 1976 * flagsp A pointer to an int to contain flags; 1977 * may be NULL to ignore 1978 * 1979 * Returns: KERN_SUCCESS Success 1980 * KERN_INVALID_ARGUMENT If the memory object control has no VM 1981 * object associated 1982 * KERN_INVALID_OBJECT If UPL_POP_PHYSICAL and the object is 1983 * not physically contiguous 1984 * KERN_INVALID_OBJECT If !UPL_POP_PHYSICAL and the object is 1985 * physically contiguous 1986 * KERN_FAILURE If the page cannot be looked up 1987 * 1988 * Implicit Returns: 1989 * *phys_entryp (modified) If phys_entryp is non-NULL and 1990 * UPL_POP_PHYSICAL 1991 * *flagsp (modified) If flagsp is non-NULL and there was 1992 * !UPL_POP_PHYSICAL and a KERN_SUCCESS 1993 * 1994 * Notes: For object boundaries, it is considerably more efficient to 1995 * ensure that f_offset is in fact on a page boundary, as this 1996 * will avoid internal use of the hash table to identify the 1997 * page, and would therefore skip a number of early optimizations. 1998 * Since this is a page operation anyway, the caller should try 1999 * to pass only a page aligned offset because of this. 2000 * 2001 * *flagsp may be modified even if this function fails. If it is 2002 * modified, it will contain the condition of the page before the 2003 * requested operation was attempted; these will only include the 2004 * bitmap flags, and not the PL_POP_PHYSICAL, UPL_POP_DUMP, 2005 * UPL_POP_SET, or UPL_POP_CLR bits. 2006 * 2007 * The flags field may contain a specific operation, such as 2008 * UPL_POP_PHYSICAL or UPL_POP_DUMP: 2009 * 2010 * o UPL_POP_PHYSICAL Fail if not contiguous; if 2011 * *phys_entryp and successful, set 2012 * *phys_entryp 2013 * o UPL_POP_DUMP Dump the specified page 2014 * 2015 * Otherwise, it is treated as a bitmap of one or more page 2016 * operations to perform on the final memory object; allowable 2017 * bit values are: 2018 * 2019 * o UPL_POP_DIRTY The page is dirty 2020 * o UPL_POP_PAGEOUT The page is paged out 2021 * o UPL_POP_PRECIOUS The page is precious 2022 * o UPL_POP_ABSENT The page is absent 2023 * o UPL_POP_BUSY The page is busy 2024 * 2025 * If the page status is only being queried and not modified, then 2026 * not other bits should be specified. However, if it is being 2027 * modified, exactly ONE of the following bits should be set: 2028 * 2029 * o UPL_POP_SET Set the current bitmap bits 2030 * o UPL_POP_CLR Clear the current bitmap bits 2031 * 2032 * Thus to effect a combination of setting an clearing, it may be 2033 * necessary to call this function twice. If this is done, the 2034 * set should be used before the clear, since clearing may trigger 2035 * a wakeup on the destination page, and if the page is backed by 2036 * an encrypted swap file, setting will trigger the decryption 2037 * needed before the wakeup occurs. 2038 */ 2039kern_return_t 2040ubc_page_op( 2041 struct vnode *vp, 2042 off_t f_offset, 2043 int ops, 2044 ppnum_t *phys_entryp, 2045 int *flagsp) 2046{ 2047 memory_object_control_t control; 2048 2049 control = ubc_getobject(vp, UBC_FLAGS_NONE); 2050 if (control == MEMORY_OBJECT_CONTROL_NULL) 2051 return KERN_INVALID_ARGUMENT; 2052 2053 return (memory_object_page_op(control, 2054 (memory_object_offset_t)f_offset, 2055 ops, 2056 phys_entryp, 2057 flagsp)); 2058} 2059 2060 2061/* 2062 * ubc_range_op 2063 * 2064 * Manipulate page state for a range of memory for a vnode with an associated 2065 * ubc_info with an associated memory object control, when page level state is 2066 * not required to be returned from the call (i.e. there are no phys_entryp or 2067 * flagsp parameters to this call, and it takes a range which may contain 2068 * multiple pages, rather than an offset interior to a single page). 2069 * 2070 * Parameters: vp The vnode backing the page 2071 * f_offset_beg A file offset interior to the start page 2072 * f_offset_end A file offset interior to the end page 2073 * ops The operations to perform, as a bitmap 2074 * (see below for more information) 2075 * range The address of an int; may be NULL to 2076 * ignore 2077 * 2078 * Returns: KERN_SUCCESS Success 2079 * KERN_INVALID_ARGUMENT If the memory object control has no VM 2080 * object associated 2081 * KERN_INVALID_OBJECT If the object is physically contiguous 2082 * 2083 * Implicit Returns: 2084 * *range (modified) If range is non-NULL, its contents will 2085 * be modified to contain the number of 2086 * bytes successfully operated upon. 2087 * 2088 * Notes: IMPORTANT: This function cannot be used on a range that 2089 * consists of physically contiguous pages. 2090 * 2091 * For object boundaries, it is considerably more efficient to 2092 * ensure that f_offset_beg and f_offset_end are in fact on page 2093 * boundaries, as this will avoid internal use of the hash table 2094 * to identify the page, and would therefore skip a number of 2095 * early optimizations. Since this is an operation on a set of 2096 * pages anyway, the caller should try to pass only a page aligned 2097 * offsets because of this. 2098 * 2099 * *range will be modified only if this function succeeds. 2100 * 2101 * The flags field MUST contain a specific operation; allowable 2102 * values are: 2103 * 2104 * o UPL_ROP_ABSENT Returns the extent of the range 2105 * presented which is absent, starting 2106 * with the start address presented 2107 * 2108 * o UPL_ROP_PRESENT Returns the extent of the range 2109 * presented which is present (resident), 2110 * starting with the start address 2111 * presented 2112 * o UPL_ROP_DUMP Dump the pages which are found in the 2113 * target object for the target range. 2114 * 2115 * IMPORTANT: For UPL_ROP_ABSENT and UPL_ROP_PRESENT; if there are 2116 * multiple regions in the range, only the first matching region 2117 * is returned. 2118 */ 2119kern_return_t 2120ubc_range_op( 2121 struct vnode *vp, 2122 off_t f_offset_beg, 2123 off_t f_offset_end, 2124 int ops, 2125 int *range) 2126{ 2127 memory_object_control_t control; 2128 2129 control = ubc_getobject(vp, UBC_FLAGS_NONE); 2130 if (control == MEMORY_OBJECT_CONTROL_NULL) 2131 return KERN_INVALID_ARGUMENT; 2132 2133 return (memory_object_range_op(control, 2134 (memory_object_offset_t)f_offset_beg, 2135 (memory_object_offset_t)f_offset_end, 2136 ops, 2137 range)); 2138} 2139 2140 2141/* 2142 * ubc_create_upl 2143 * 2144 * Given a vnode, cause the population of a portion of the vm_object; based on 2145 * the nature of the request, the pages returned may contain valid data, or 2146 * they may be uninitialized. 2147 * 2148 * Parameters: vp The vnode from which to create the upl 2149 * f_offset The start offset into the backing store 2150 * represented by the vnode 2151 * bufsize The size of the upl to create 2152 * uplp Pointer to the upl_t to receive the 2153 * created upl; MUST NOT be NULL 2154 * plp Pointer to receive the internal page 2155 * list for the created upl; MAY be NULL 2156 * to ignore 2157 * 2158 * Returns: KERN_SUCCESS The requested upl has been created 2159 * KERN_INVALID_ARGUMENT The bufsize argument is not an even 2160 * multiple of the page size 2161 * KERN_INVALID_ARGUMENT There is no ubc_info associated with 2162 * the vnode, or there is no memory object 2163 * control associated with the ubc_info 2164 * memory_object_upl_request:KERN_INVALID_VALUE 2165 * The supplied upl_flags argument is 2166 * invalid 2167 * Implicit Returns: 2168 * *uplp (modified) 2169 * *plp (modified) If non-NULL, the value of *plp will be 2170 * modified to point to the internal page 2171 * list; this modification may occur even 2172 * if this function is unsuccessful, in 2173 * which case the contents may be invalid 2174 * 2175 * Note: If successful, the returned *uplp MUST subsequently be freed 2176 * via a call to ubc_upl_commit(), ubc_upl_commit_range(), 2177 * ubc_upl_abort(), or ubc_upl_abort_range(). 2178 */ 2179kern_return_t 2180ubc_create_upl( 2181 struct vnode *vp, 2182 off_t f_offset, 2183 int bufsize, 2184 upl_t *uplp, 2185 upl_page_info_t **plp, 2186 int uplflags) 2187{ 2188 memory_object_control_t control; 2189 kern_return_t kr; 2190 2191 if (plp != NULL) 2192 *plp = NULL; 2193 *uplp = NULL; 2194 2195 if (bufsize & 0xfff) 2196 return KERN_INVALID_ARGUMENT; 2197 2198 if (bufsize > MAX_UPL_SIZE * PAGE_SIZE) 2199 return KERN_INVALID_ARGUMENT; 2200 2201 if (uplflags & (UPL_UBC_MSYNC | UPL_UBC_PAGEOUT | UPL_UBC_PAGEIN)) { 2202 2203 if (uplflags & UPL_UBC_MSYNC) { 2204 uplflags &= UPL_RET_ONLY_DIRTY; 2205 2206 uplflags |= UPL_COPYOUT_FROM | UPL_CLEAN_IN_PLACE | 2207 UPL_SET_INTERNAL | UPL_SET_LITE; 2208 2209 } else if (uplflags & UPL_UBC_PAGEOUT) { 2210 uplflags &= UPL_RET_ONLY_DIRTY; 2211 2212 if (uplflags & UPL_RET_ONLY_DIRTY) 2213 uplflags |= UPL_NOBLOCK; 2214 2215 uplflags |= UPL_FOR_PAGEOUT | UPL_CLEAN_IN_PLACE | 2216 UPL_COPYOUT_FROM | UPL_SET_INTERNAL | UPL_SET_LITE; 2217 } else { 2218 uplflags |= UPL_RET_ONLY_ABSENT | 2219 UPL_NO_SYNC | UPL_CLEAN_IN_PLACE | 2220 UPL_SET_INTERNAL | UPL_SET_LITE; 2221 2222 /* 2223 * if the requested size == PAGE_SIZE, we don't want to set 2224 * the UPL_NOBLOCK since we may be trying to recover from a 2225 * previous partial pagein I/O that occurred because we were low 2226 * on memory and bailed early in order to honor the UPL_NOBLOCK... 2227 * since we're only asking for a single page, we can block w/o fear 2228 * of tying up pages while waiting for more to become available 2229 */ 2230 if (bufsize > PAGE_SIZE) 2231 uplflags |= UPL_NOBLOCK; 2232 } 2233 } else { 2234 uplflags &= ~UPL_FOR_PAGEOUT; 2235 2236 if (uplflags & UPL_WILL_BE_DUMPED) { 2237 uplflags &= ~UPL_WILL_BE_DUMPED; 2238 uplflags |= (UPL_NO_SYNC|UPL_SET_INTERNAL); 2239 } else 2240 uplflags |= (UPL_NO_SYNC|UPL_CLEAN_IN_PLACE|UPL_SET_INTERNAL); 2241 } 2242 control = ubc_getobject(vp, UBC_FLAGS_NONE); 2243 if (control == MEMORY_OBJECT_CONTROL_NULL) 2244 return KERN_INVALID_ARGUMENT; 2245 2246 kr = memory_object_upl_request(control, f_offset, bufsize, uplp, NULL, NULL, uplflags); 2247 if (kr == KERN_SUCCESS && plp != NULL) 2248 *plp = UPL_GET_INTERNAL_PAGE_LIST(*uplp); 2249 return kr; 2250} 2251 2252 2253/* 2254 * ubc_upl_maxbufsize 2255 * 2256 * Return the maximum bufsize ubc_create_upl( ) will take. 2257 * 2258 * Parameters: none 2259 * 2260 * Returns: maximum size buffer (in bytes) ubc_create_upl( ) will take. 2261 */ 2262upl_size_t 2263ubc_upl_maxbufsize( 2264 void) 2265{ 2266 return(MAX_UPL_SIZE * PAGE_SIZE); 2267} 2268 2269/* 2270 * ubc_upl_map 2271 * 2272 * Map the page list assocated with the supplied upl into the kernel virtual 2273 * address space at the virtual address indicated by the dst_addr argument; 2274 * the entire upl is mapped 2275 * 2276 * Parameters: upl The upl to map 2277 * dst_addr The address at which to map the upl 2278 * 2279 * Returns: KERN_SUCCESS The upl has been mapped 2280 * KERN_INVALID_ARGUMENT The upl is UPL_NULL 2281 * KERN_FAILURE The upl is already mapped 2282 * vm_map_enter:KERN_INVALID_ARGUMENT 2283 * A failure code from vm_map_enter() due 2284 * to an invalid argument 2285 */ 2286kern_return_t 2287ubc_upl_map( 2288 upl_t upl, 2289 vm_offset_t *dst_addr) 2290{ 2291 return (vm_upl_map(kernel_map, upl, dst_addr)); 2292} 2293 2294 2295/* 2296 * ubc_upl_unmap 2297 * 2298 * Unmap the page list assocated with the supplied upl from the kernel virtual 2299 * address space; the entire upl is unmapped. 2300 * 2301 * Parameters: upl The upl to unmap 2302 * 2303 * Returns: KERN_SUCCESS The upl has been unmapped 2304 * KERN_FAILURE The upl is not currently mapped 2305 * KERN_INVALID_ARGUMENT If the upl is UPL_NULL 2306 */ 2307kern_return_t 2308ubc_upl_unmap( 2309 upl_t upl) 2310{ 2311 return(vm_upl_unmap(kernel_map, upl)); 2312} 2313 2314 2315/* 2316 * ubc_upl_commit 2317 * 2318 * Commit the contents of the upl to the backing store 2319 * 2320 * Parameters: upl The upl to commit 2321 * 2322 * Returns: KERN_SUCCESS The upl has been committed 2323 * KERN_INVALID_ARGUMENT The supplied upl was UPL_NULL 2324 * KERN_FAILURE The supplied upl does not represent 2325 * device memory, and the offset plus the 2326 * size would exceed the actual size of 2327 * the upl 2328 * 2329 * Notes: In practice, the only return value for this function should be 2330 * KERN_SUCCESS, unless there has been data structure corruption; 2331 * since the upl is deallocated regardless of success or failure, 2332 * there's really nothing to do about this other than panic. 2333 * 2334 * IMPORTANT: Use of this function should not be mixed with use of 2335 * ubc_upl_commit_range(), due to the unconditional deallocation 2336 * by this function. 2337 */ 2338kern_return_t 2339ubc_upl_commit( 2340 upl_t upl) 2341{ 2342 upl_page_info_t *pl; 2343 kern_return_t kr; 2344 2345 pl = UPL_GET_INTERNAL_PAGE_LIST(upl); 2346 kr = upl_commit(upl, pl, MAX_UPL_SIZE); 2347 upl_deallocate(upl); 2348 return kr; 2349} 2350 2351 2352/* 2353 * ubc_upl_commit 2354 * 2355 * Commit the contents of the specified range of the upl to the backing store 2356 * 2357 * Parameters: upl The upl to commit 2358 * offset The offset into the upl 2359 * size The size of the region to be committed, 2360 * starting at the specified offset 2361 * flags commit type (see below) 2362 * 2363 * Returns: KERN_SUCCESS The range has been committed 2364 * KERN_INVALID_ARGUMENT The supplied upl was UPL_NULL 2365 * KERN_FAILURE The supplied upl does not represent 2366 * device memory, and the offset plus the 2367 * size would exceed the actual size of 2368 * the upl 2369 * 2370 * Notes: IMPORTANT: If the commit is successful, and the object is now 2371 * empty, the upl will be deallocated. Since the caller cannot 2372 * check that this is the case, the UPL_COMMIT_FREE_ON_EMPTY flag 2373 * should generally only be used when the offset is 0 and the size 2374 * is equal to the upl size. 2375 * 2376 * The flags argument is a bitmap of flags on the rage of pages in 2377 * the upl to be committed; allowable flags are: 2378 * 2379 * o UPL_COMMIT_FREE_ON_EMPTY Free the upl when it is 2380 * both empty and has been 2381 * successfully committed 2382 * o UPL_COMMIT_CLEAR_DIRTY Clear each pages dirty 2383 * bit; will prevent a 2384 * later pageout 2385 * o UPL_COMMIT_SET_DIRTY Set each pages dirty 2386 * bit; will cause a later 2387 * pageout 2388 * o UPL_COMMIT_INACTIVATE Clear each pages 2389 * reference bit; the page 2390 * will not be accessed 2391 * o UPL_COMMIT_ALLOW_ACCESS Unbusy each page; pages 2392 * become busy when an 2393 * IOMemoryDescriptor is 2394 * mapped or redirected, 2395 * and we have to wait for 2396 * an IOKit driver 2397 * 2398 * The flag UPL_COMMIT_NOTIFY_EMPTY is used internally, and should 2399 * not be specified by the caller. 2400 * 2401 * The UPL_COMMIT_CLEAR_DIRTY and UPL_COMMIT_SET_DIRTY flags are 2402 * mutually exclusive, and should not be combined. 2403 */ 2404kern_return_t 2405ubc_upl_commit_range( 2406 upl_t upl, 2407 upl_offset_t offset, 2408 upl_size_t size, 2409 int flags) 2410{ 2411 upl_page_info_t *pl; 2412 boolean_t empty; 2413 kern_return_t kr; 2414 2415 if (flags & UPL_COMMIT_FREE_ON_EMPTY) 2416 flags |= UPL_COMMIT_NOTIFY_EMPTY; 2417 2418 if (flags & UPL_COMMIT_KERNEL_ONLY_FLAGS) { 2419 return KERN_INVALID_ARGUMENT; 2420 } 2421 2422 pl = UPL_GET_INTERNAL_PAGE_LIST(upl); 2423 2424 kr = upl_commit_range(upl, offset, size, flags, 2425 pl, MAX_UPL_SIZE, &empty); 2426 2427 if((flags & UPL_COMMIT_FREE_ON_EMPTY) && empty) 2428 upl_deallocate(upl); 2429 2430 return kr; 2431} 2432 2433 2434/* 2435 * ubc_upl_abort_range 2436 * 2437 * Abort the contents of the specified range of the specified upl 2438 * 2439 * Parameters: upl The upl to abort 2440 * offset The offset into the upl 2441 * size The size of the region to be aborted, 2442 * starting at the specified offset 2443 * abort_flags abort type (see below) 2444 * 2445 * Returns: KERN_SUCCESS The range has been aborted 2446 * KERN_INVALID_ARGUMENT The supplied upl was UPL_NULL 2447 * KERN_FAILURE The supplied upl does not represent 2448 * device memory, and the offset plus the 2449 * size would exceed the actual size of 2450 * the upl 2451 * 2452 * Notes: IMPORTANT: If the abort is successful, and the object is now 2453 * empty, the upl will be deallocated. Since the caller cannot 2454 * check that this is the case, the UPL_ABORT_FREE_ON_EMPTY flag 2455 * should generally only be used when the offset is 0 and the size 2456 * is equal to the upl size. 2457 * 2458 * The abort_flags argument is a bitmap of flags on the range of 2459 * pages in the upl to be aborted; allowable flags are: 2460 * 2461 * o UPL_ABORT_FREE_ON_EMPTY Free the upl when it is both 2462 * empty and has been successfully 2463 * aborted 2464 * o UPL_ABORT_RESTART The operation must be restarted 2465 * o UPL_ABORT_UNAVAILABLE The pages are unavailable 2466 * o UPL_ABORT_ERROR An I/O error occurred 2467 * o UPL_ABORT_DUMP_PAGES Just free the pages 2468 * o UPL_ABORT_NOTIFY_EMPTY RESERVED 2469 * o UPL_ABORT_ALLOW_ACCESS RESERVED 2470 * 2471 * The UPL_ABORT_NOTIFY_EMPTY is an internal use flag and should 2472 * not be specified by the caller. It is intended to fulfill the 2473 * same role as UPL_COMMIT_NOTIFY_EMPTY does in the function 2474 * ubc_upl_commit_range(), but is never referenced internally. 2475 * 2476 * The UPL_ABORT_ALLOW_ACCESS is defined, but neither set nor 2477 * referenced; do not use it. 2478 */ 2479kern_return_t 2480ubc_upl_abort_range( 2481 upl_t upl, 2482 upl_offset_t offset, 2483 upl_size_t size, 2484 int abort_flags) 2485{ 2486 kern_return_t kr; 2487 boolean_t empty = FALSE; 2488 2489 if (abort_flags & UPL_ABORT_FREE_ON_EMPTY) 2490 abort_flags |= UPL_ABORT_NOTIFY_EMPTY; 2491 2492 kr = upl_abort_range(upl, offset, size, abort_flags, &empty); 2493 2494 if((abort_flags & UPL_ABORT_FREE_ON_EMPTY) && empty) 2495 upl_deallocate(upl); 2496 2497 return kr; 2498} 2499 2500 2501/* 2502 * ubc_upl_abort 2503 * 2504 * Abort the contents of the specified upl 2505 * 2506 * Parameters: upl The upl to abort 2507 * abort_type abort type (see below) 2508 * 2509 * Returns: KERN_SUCCESS The range has been aborted 2510 * KERN_INVALID_ARGUMENT The supplied upl was UPL_NULL 2511 * KERN_FAILURE The supplied upl does not represent 2512 * device memory, and the offset plus the 2513 * size would exceed the actual size of 2514 * the upl 2515 * 2516 * Notes: IMPORTANT: If the abort is successful, and the object is now 2517 * empty, the upl will be deallocated. Since the caller cannot 2518 * check that this is the case, the UPL_ABORT_FREE_ON_EMPTY flag 2519 * should generally only be used when the offset is 0 and the size 2520 * is equal to the upl size. 2521 * 2522 * The abort_type is a bitmap of flags on the range of 2523 * pages in the upl to be aborted; allowable flags are: 2524 * 2525 * o UPL_ABORT_FREE_ON_EMPTY Free the upl when it is both 2526 * empty and has been successfully 2527 * aborted 2528 * o UPL_ABORT_RESTART The operation must be restarted 2529 * o UPL_ABORT_UNAVAILABLE The pages are unavailable 2530 * o UPL_ABORT_ERROR An I/O error occurred 2531 * o UPL_ABORT_DUMP_PAGES Just free the pages 2532 * o UPL_ABORT_NOTIFY_EMPTY RESERVED 2533 * o UPL_ABORT_ALLOW_ACCESS RESERVED 2534 * 2535 * The UPL_ABORT_NOTIFY_EMPTY is an internal use flag and should 2536 * not be specified by the caller. It is intended to fulfill the 2537 * same role as UPL_COMMIT_NOTIFY_EMPTY does in the function 2538 * ubc_upl_commit_range(), but is never referenced internally. 2539 * 2540 * The UPL_ABORT_ALLOW_ACCESS is defined, but neither set nor 2541 * referenced; do not use it. 2542 */ 2543kern_return_t 2544ubc_upl_abort( 2545 upl_t upl, 2546 int abort_type) 2547{ 2548 kern_return_t kr; 2549 2550 kr = upl_abort(upl, abort_type); 2551 upl_deallocate(upl); 2552 return kr; 2553} 2554 2555 2556/* 2557 * ubc_upl_pageinfo 2558 * 2559 * Retrieve the internal page list for the specified upl 2560 * 2561 * Parameters: upl The upl to obtain the page list from 2562 * 2563 * Returns: !NULL The (upl_page_info_t *) for the page 2564 * list internal to the upl 2565 * NULL Error/no page list associated 2566 * 2567 * Notes: IMPORTANT: The function is only valid on internal objects 2568 * where the list request was made with the UPL_INTERNAL flag. 2569 * 2570 * This function is a utility helper function, since some callers 2571 * may not have direct access to the header defining the macro, 2572 * due to abstraction layering constraints. 2573 */ 2574upl_page_info_t * 2575ubc_upl_pageinfo( 2576 upl_t upl) 2577{ 2578 return (UPL_GET_INTERNAL_PAGE_LIST(upl)); 2579} 2580 2581 2582int 2583UBCINFOEXISTS(struct vnode * vp) 2584{ 2585 return((vp) && ((vp)->v_type == VREG) && ((vp)->v_ubcinfo != UBC_INFO_NULL)); 2586} 2587 2588 2589void 2590ubc_upl_range_needed( 2591 upl_t upl, 2592 int index, 2593 int count) 2594{ 2595 upl_range_needed(upl, index, count); 2596} 2597 2598 2599/* 2600 * CODE SIGNING 2601 */ 2602#define CS_BLOB_PAGEABLE 0 2603static volatile SInt32 cs_blob_size = 0; 2604static volatile SInt32 cs_blob_count = 0; 2605static SInt32 cs_blob_size_peak = 0; 2606static UInt32 cs_blob_size_max = 0; 2607static SInt32 cs_blob_count_peak = 0; 2608 2609int cs_validation = 1; 2610 2611#ifndef SECURE_KERNEL 2612SYSCTL_INT(_vm, OID_AUTO, cs_validation, CTLFLAG_RW | CTLFLAG_LOCKED, &cs_validation, 0, "Do validate code signatures"); 2613#endif 2614SYSCTL_INT(_vm, OID_AUTO, cs_blob_count, CTLFLAG_RD | CTLFLAG_LOCKED, (int *)(uintptr_t)&cs_blob_count, 0, "Current number of code signature blobs"); 2615SYSCTL_INT(_vm, OID_AUTO, cs_blob_size, CTLFLAG_RD | CTLFLAG_LOCKED, (int *)(uintptr_t)&cs_blob_size, 0, "Current size of all code signature blobs"); 2616SYSCTL_INT(_vm, OID_AUTO, cs_blob_count_peak, CTLFLAG_RD | CTLFLAG_LOCKED, &cs_blob_count_peak, 0, "Peak number of code signature blobs"); 2617SYSCTL_INT(_vm, OID_AUTO, cs_blob_size_peak, CTLFLAG_RD | CTLFLAG_LOCKED, &cs_blob_size_peak, 0, "Peak size of code signature blobs"); 2618SYSCTL_INT(_vm, OID_AUTO, cs_blob_size_max, CTLFLAG_RD | CTLFLAG_LOCKED, &cs_blob_size_max, 0, "Size of biggest code signature blob"); 2619 2620 2621kern_return_t 2622ubc_cs_blob_allocate( 2623 vm_offset_t *blob_addr_p, 2624 vm_size_t *blob_size_p) 2625{ 2626 kern_return_t kr; 2627 2628#if CS_BLOB_PAGEABLE 2629 *blob_size_p = round_page(*blob_size_p); 2630 kr = kmem_alloc(kernel_map, blob_addr_p, *blob_size_p); 2631#else /* CS_BLOB_PAGEABLE */ 2632 *blob_addr_p = (vm_offset_t) kalloc(*blob_size_p); 2633 if (*blob_addr_p == 0) { 2634 kr = KERN_NO_SPACE; 2635 } else { 2636 kr = KERN_SUCCESS; 2637 } 2638#endif /* CS_BLOB_PAGEABLE */ 2639 return kr; 2640} 2641 2642void 2643ubc_cs_blob_deallocate( 2644 vm_offset_t blob_addr, 2645 vm_size_t blob_size) 2646{ 2647#if CS_BLOB_PAGEABLE 2648 kmem_free(kernel_map, blob_addr, blob_size); 2649#else /* CS_BLOB_PAGEABLE */ 2650 kfree((void *) blob_addr, blob_size); 2651#endif /* CS_BLOB_PAGEABLE */ 2652} 2653 2654int 2655ubc_cs_sigpup_add( 2656 struct vnode *vp, 2657 vm_address_t address, 2658 vm_size_t size) 2659{ 2660 kern_return_t kr; 2661 struct ubc_info *uip; 2662 struct cs_blob *blob; 2663 memory_object_control_t control; 2664 const CS_CodeDirectory *cd; 2665 int error; 2666 2667 control = ubc_getobject(vp, UBC_FLAGS_NONE); 2668 if (control == MEMORY_OBJECT_CONTROL_NULL) 2669 return KERN_INVALID_ARGUMENT; 2670 2671 if (memory_object_is_signed(control)) 2672 return 0; 2673 2674 blob = (struct cs_blob *) kalloc(sizeof (struct cs_blob)); 2675 if (blob == NULL) 2676 return ENOMEM; 2677 2678 /* fill in the new blob */ 2679 blob->csb_cpu_type = CPU_TYPE_ANY; 2680 blob->csb_base_offset = 0; 2681 blob->csb_mem_size = size; 2682 blob->csb_mem_offset = 0; 2683 blob->csb_mem_handle = IPC_PORT_NULL; 2684 blob->csb_mem_kaddr = address; 2685 blob->csb_sigpup = 1; 2686 2687 /* 2688 * Validate the blob's contents 2689 */ 2690 cd = findCodeDirectory( 2691 (const CS_SuperBlob *) address, 2692 (char *) address, 2693 (char *) address + blob->csb_mem_size); 2694 if (cd == NULL) { 2695 /* no code directory => useless blob ! */ 2696 error = EINVAL; 2697 goto out; 2698 } 2699 2700 blob->csb_flags = ntohl(cd->flags) | CS_VALID; 2701 blob->csb_end_offset = round_page(ntohl(cd->codeLimit)); 2702 if((ntohl(cd->version) >= CS_SUPPORTSSCATTER) && (ntohl(cd->scatterOffset))) { 2703 const SC_Scatter *scatter = (const SC_Scatter*) 2704 ((const char*)cd + ntohl(cd->scatterOffset)); 2705 blob->csb_start_offset = ntohl(scatter->base) * PAGE_SIZE; 2706 } else { 2707 blob->csb_start_offset = (blob->csb_end_offset - (ntohl(cd->nCodeSlots) * PAGE_SIZE)); 2708 } 2709 2710 /* 2711 * We don't need to check with the policy module, since the input data is supposed to be already checked 2712 */ 2713 2714 vnode_lock(vp); 2715 if (! UBCINFOEXISTS(vp)) { 2716 vnode_unlock(vp); 2717 if (cs_debug) 2718 printf("out ubc object\n"); 2719 error = ENOENT; 2720 goto out; 2721 } 2722 uip = vp->v_ubcinfo; 2723 2724 /* someone raced us to adding the code directory */ 2725 if (uip->cs_blobs != NULL) { 2726 if (cs_debug) 2727 printf("sigpup: vnode already have CD ?\n"); 2728 vnode_unlock(vp); 2729 error = EEXIST; 2730 goto out; 2731 } 2732 2733 blob->csb_next = uip->cs_blobs; 2734 uip->cs_blobs = blob; 2735 2736 OSAddAtomic(+1, &cs_blob_count); 2737 OSAddAtomic((SInt32) +blob->csb_mem_size, &cs_blob_size); 2738 2739 /* mark this vnode's VM object as having "signed pages" */ 2740 kr = memory_object_signed(uip->ui_control, TRUE); 2741 if (kr != KERN_SUCCESS) { 2742 vnode_unlock(vp); 2743 if (cs_debug) 2744 printf("sigpup: not signable ?\n"); 2745 error = ENOENT; 2746 goto out; 2747 } 2748 2749 vnode_unlock(vp); 2750 2751 error = 0; 2752out: 2753 if (error) { 2754 if (cs_debug) 2755 printf("sigpup: not signable ?\n"); 2756 /* we failed; release what we allocated */ 2757 if (blob) { 2758 kfree(blob, sizeof (*blob)); 2759 blob = NULL; 2760 } 2761 } 2762 2763 return error; 2764} 2765 2766int 2767ubc_cs_blob_add( 2768 struct vnode *vp, 2769 cpu_type_t cputype, 2770 off_t base_offset, 2771 vm_address_t addr, 2772 off_t blob_offset, 2773 vm_size_t size) 2774{ 2775 kern_return_t kr; 2776 struct ubc_info *uip; 2777 struct cs_blob *blob, *oblob; 2778 int error; 2779 ipc_port_t blob_handle; 2780 memory_object_size_t blob_size; 2781 const CS_CodeDirectory *cd; 2782 off_t blob_start_offset, blob_end_offset; 2783 SHA1_CTX sha1ctxt; 2784 boolean_t record_mtime; 2785 2786 record_mtime = FALSE; 2787 2788 blob_handle = IPC_PORT_NULL; 2789 2790 blob = (struct cs_blob *) kalloc(sizeof (struct cs_blob)); 2791 if (blob == NULL) { 2792 return ENOMEM; 2793 } 2794 2795#if CS_BLOB_PAGEABLE 2796 /* get a memory entry on the blob */ 2797 blob_size = (memory_object_size_t) size; 2798 kr = mach_make_memory_entry_64(kernel_map, 2799 &blob_size, 2800 addr, 2801 VM_PROT_READ, 2802 &blob_handle, 2803 IPC_PORT_NULL); 2804 if (kr != KERN_SUCCESS) { 2805 error = ENOMEM; 2806 goto out; 2807 } 2808 if (memory_object_round_page(blob_size) != 2809 (memory_object_size_t) round_page(size)) { 2810 printf("ubc_cs_blob_add: size mismatch 0x%llx 0x%lx !?\n", 2811 blob_size, (size_t)size); 2812 panic("XXX FBDP size mismatch 0x%llx 0x%lx\n", blob_size, (size_t)size); 2813 error = EINVAL; 2814 goto out; 2815 } 2816#else 2817 blob_size = (memory_object_size_t) size; 2818 blob_handle = IPC_PORT_NULL; 2819#endif 2820 2821 /* fill in the new blob */ 2822 blob->csb_cpu_type = cputype; 2823 blob->csb_sigpup = 0; 2824 blob->csb_base_offset = base_offset; 2825 blob->csb_blob_offset = blob_offset; 2826 blob->csb_mem_size = size; 2827 blob->csb_mem_offset = 0; 2828 blob->csb_mem_handle = blob_handle; 2829 blob->csb_mem_kaddr = addr; 2830 blob->csb_flags = 0; 2831 2832 /* 2833 * Validate the blob's contents 2834 */ 2835 2836 error = cs_validate_csblob((const uint8_t *)addr, size, &cd); 2837 if (error) { 2838 if (cs_debug) 2839 printf("CODESIGNING: csblob invalid: %d\n", error); 2840 blob->csb_flags = 0; 2841 blob->csb_start_offset = 0; 2842 blob->csb_end_offset = 0; 2843 memset(blob->csb_sha1, 0, SHA1_RESULTLEN); 2844 /* let the vnode checker determine if the signature is valid or not */ 2845 } else { 2846 const unsigned char *sha1_base; 2847 int sha1_size; 2848 2849 blob->csb_flags = (ntohl(cd->flags) & CS_ALLOWED_MACHO) | CS_VALID; 2850 blob->csb_end_offset = round_page(ntohl(cd->codeLimit)); 2851 if((ntohl(cd->version) >= CS_SUPPORTSSCATTER) && (ntohl(cd->scatterOffset))) { 2852 const SC_Scatter *scatter = (const SC_Scatter*) 2853 ((const char*)cd + ntohl(cd->scatterOffset)); 2854 blob->csb_start_offset = ntohl(scatter->base) * PAGE_SIZE; 2855 } else { 2856 blob->csb_start_offset = (blob->csb_end_offset - 2857 (ntohl(cd->nCodeSlots) * PAGE_SIZE)); 2858 } 2859 /* compute the blob's SHA1 hash */ 2860 sha1_base = (const unsigned char *) cd; 2861 sha1_size = ntohl(cd->length); 2862 SHA1Init(&sha1ctxt); 2863 SHA1Update(&sha1ctxt, sha1_base, sha1_size); 2864 SHA1Final(blob->csb_sha1, &sha1ctxt); 2865 } 2866 2867 /* 2868 * Let policy module check whether the blob's signature is accepted. 2869 */ 2870#if CONFIG_MACF 2871 error = mac_vnode_check_signature(vp, base_offset, blob->csb_sha1, (void*)addr, size); 2872 if (error) 2873 goto out; 2874#endif 2875 2876 /* 2877 * Validate the blob's coverage 2878 */ 2879 blob_start_offset = blob->csb_base_offset + blob->csb_start_offset; 2880 blob_end_offset = blob->csb_base_offset + blob->csb_end_offset; 2881 2882 if (blob_start_offset >= blob_end_offset || 2883 blob_start_offset < 0 || 2884 blob_end_offset <= 0) { 2885 /* reject empty or backwards blob */ 2886 error = EINVAL; 2887 goto out; 2888 } 2889 2890 vnode_lock(vp); 2891 if (! UBCINFOEXISTS(vp)) { 2892 vnode_unlock(vp); 2893 error = ENOENT; 2894 goto out; 2895 } 2896 uip = vp->v_ubcinfo; 2897 2898 /* check if this new blob overlaps with an existing blob */ 2899 for (oblob = uip->cs_blobs; 2900 oblob != NULL; 2901 oblob = oblob->csb_next) { 2902 off_t oblob_start_offset, oblob_end_offset; 2903 2904 oblob_start_offset = (oblob->csb_base_offset + 2905 oblob->csb_start_offset); 2906 oblob_end_offset = (oblob->csb_base_offset + 2907 oblob->csb_end_offset); 2908 if (blob_start_offset >= oblob_end_offset || 2909 blob_end_offset <= oblob_start_offset) { 2910 /* no conflict with this existing blob */ 2911 } else { 2912 /* conflict ! */ 2913 if (blob_start_offset == oblob_start_offset && 2914 blob_end_offset == oblob_end_offset && 2915 blob->csb_mem_size == oblob->csb_mem_size && 2916 blob->csb_flags == oblob->csb_flags && 2917 (blob->csb_cpu_type == CPU_TYPE_ANY || 2918 oblob->csb_cpu_type == CPU_TYPE_ANY || 2919 blob->csb_cpu_type == oblob->csb_cpu_type) && 2920 !bcmp(blob->csb_sha1, 2921 oblob->csb_sha1, 2922 SHA1_RESULTLEN)) { 2923 /* 2924 * We already have this blob: 2925 * we'll return success but 2926 * throw away the new blob. 2927 */ 2928 if (oblob->csb_cpu_type == CPU_TYPE_ANY) { 2929 /* 2930 * The old blob matches this one 2931 * but doesn't have any CPU type. 2932 * Update it with whatever the caller 2933 * provided this time. 2934 */ 2935 oblob->csb_cpu_type = cputype; 2936 } 2937 /* 2938 * If the same blob moved around in the Mach-O, we 2939 * want to remember the new blob offset to avoid 2940 * coming back here again and again. 2941 */ 2942 oblob->csb_blob_offset = blob_offset; 2943 2944 vnode_unlock(vp); 2945 error = EAGAIN; 2946 goto out; 2947 } else { 2948 /* different blob: reject the new one */ 2949 char pathbuf[MAXPATHLEN]; 2950 char new_sha1_str[2*SHA1_RESULTLEN+1]; 2951 char old_sha1_str[2*SHA1_RESULTLEN+1]; 2952 char arch_str[20]; 2953 const char *pathp = "?unknown"; 2954 int pblen = sizeof(pathbuf); 2955 if (vn_getpath(vp, pathbuf, &pblen) == 0) { 2956 /* pblen == strlen(pathbuf) + 1. Assume strlen(pathbuf) > 0 */ 2957 for (pathp = pathbuf + pblen - 2; pathp > pathbuf && pathp[-1] != '/'; pathp--) ; 2958 } 2959 snprintf(arch_str, sizeof(arch_str), "%x", cputype); 2960 hex_str(oblob->csb_sha1, SHA1_RESULTLEN, old_sha1_str); 2961 hex_str(blob->csb_sha1, SHA1_RESULTLEN, new_sha1_str); 2962 kern_asl_msg(LOG_NOTICE, "messagetracer", 2963 6, 2964 "com.apple.message.domain", "com.apple.kernel.cs.replace", 2965 "com.apple.message.signature", pathp, 2966 "com.apple.message.signature2", arch_str, 2967 "com.apple.message.signature3", old_sha1_str, 2968 "com.apple.message.result", new_sha1_str, 2969 "com.apple.message.summarize", "YES", 2970 NULL 2971 ); 2972 printf("CODESIGNING: rejected new signature for architecture %d of file %s\n", 2973 cputype, pathbuf); 2974 vnode_unlock(vp); 2975 error = EALREADY; 2976 goto out; 2977 } 2978 } 2979 2980 } 2981 2982 /* mark this vnode's VM object as having "signed pages" */ 2983 kr = memory_object_signed(uip->ui_control, TRUE); 2984 if (kr != KERN_SUCCESS) { 2985 vnode_unlock(vp); 2986 error = ENOENT; 2987 goto out; 2988 } 2989 2990 if (uip->cs_blobs == NULL) { 2991 /* loading 1st blob: record the file's current "modify time" */ 2992 record_mtime = TRUE; 2993 } 2994 2995 /* 2996 * Add this blob to the list of blobs for this vnode. 2997 * We always add at the front of the list and we never remove a 2998 * blob from the list, so ubc_cs_get_blobs() can return whatever 2999 * the top of the list was and that list will remain valid 3000 * while we validate a page, even after we release the vnode's lock. 3001 */ 3002 blob->csb_next = uip->cs_blobs; 3003 uip->cs_blobs = blob; 3004 3005 OSAddAtomic(+1, &cs_blob_count); 3006 if (cs_blob_count > cs_blob_count_peak) { 3007 cs_blob_count_peak = cs_blob_count; /* XXX atomic ? */ 3008 } 3009 OSAddAtomic((SInt32) +blob->csb_mem_size, &cs_blob_size); 3010 if ((SInt32) cs_blob_size > cs_blob_size_peak) { 3011 cs_blob_size_peak = (SInt32) cs_blob_size; /* XXX atomic ? */ 3012 } 3013 if ((UInt32) blob->csb_mem_size > cs_blob_size_max) { 3014 cs_blob_size_max = (UInt32) blob->csb_mem_size; 3015 } 3016 3017 if (cs_debug > 1) { 3018 proc_t p; 3019 const char *name = vnode_getname_printable(vp); 3020 p = current_proc(); 3021 printf("CODE SIGNING: proc %d(%s) " 3022 "loaded %s signatures for file (%s) " 3023 "range 0x%llx:0x%llx flags 0x%x\n", 3024 p->p_pid, p->p_comm, 3025 blob->csb_cpu_type == -1 ? "detached" : "embedded", 3026 name, 3027 blob->csb_base_offset + blob->csb_start_offset, 3028 blob->csb_base_offset + blob->csb_end_offset, 3029 blob->csb_flags); 3030 vnode_putname_printable(name); 3031 } 3032 3033 vnode_unlock(vp); 3034 3035 if (record_mtime) { 3036 vnode_mtime(vp, &uip->cs_mtime, vfs_context_current()); 3037 } 3038 3039 error = 0; /* success ! */ 3040 3041out: 3042 if (error) { 3043 /* we failed; release what we allocated */ 3044 if (blob) { 3045 kfree(blob, sizeof (*blob)); 3046 blob = NULL; 3047 } 3048 if (blob_handle != IPC_PORT_NULL) { 3049 mach_memory_entry_port_release(blob_handle); 3050 blob_handle = IPC_PORT_NULL; 3051 } 3052 } 3053 3054 if (error == EAGAIN) { 3055 /* 3056 * See above: error is EAGAIN if we were asked 3057 * to add an existing blob again. We cleaned the new 3058 * blob and we want to return success. 3059 */ 3060 error = 0; 3061 /* 3062 * Since we're not failing, consume the data we received. 3063 */ 3064 ubc_cs_blob_deallocate(addr, size); 3065 } 3066 3067 return error; 3068} 3069 3070 3071struct cs_blob * 3072ubc_cs_blob_get( 3073 struct vnode *vp, 3074 cpu_type_t cputype, 3075 off_t offset) 3076{ 3077 struct ubc_info *uip; 3078 struct cs_blob *blob; 3079 off_t offset_in_blob; 3080 3081 vnode_lock_spin(vp); 3082 3083 if (! UBCINFOEXISTS(vp)) { 3084 blob = NULL; 3085 goto out; 3086 } 3087 3088 uip = vp->v_ubcinfo; 3089 for (blob = uip->cs_blobs; 3090 blob != NULL; 3091 blob = blob->csb_next) { 3092 if (cputype != -1 && blob->csb_cpu_type == cputype) { 3093 break; 3094 } 3095 if (offset != -1) { 3096 offset_in_blob = offset - blob->csb_base_offset; 3097 if (offset_in_blob >= blob->csb_start_offset && 3098 offset_in_blob < blob->csb_end_offset) { 3099 /* our offset is covered by this blob */ 3100 break; 3101 } 3102 } 3103 } 3104 3105 if (cs_debug && blob != NULL && blob->csb_sigpup) 3106 printf("found sig pup blob\n"); 3107out: 3108 vnode_unlock(vp); 3109 3110 return blob; 3111} 3112 3113static void 3114ubc_cs_free( 3115 struct ubc_info *uip) 3116{ 3117 struct cs_blob *blob, *next_blob; 3118 3119 for (blob = uip->cs_blobs; 3120 blob != NULL; 3121 blob = next_blob) { 3122 next_blob = blob->csb_next; 3123 if (blob->csb_mem_kaddr != 0 && !blob->csb_sigpup) { 3124 ubc_cs_blob_deallocate(blob->csb_mem_kaddr, 3125 blob->csb_mem_size); 3126 blob->csb_mem_kaddr = 0; 3127 } 3128 if (blob->csb_mem_handle != IPC_PORT_NULL) { 3129 mach_memory_entry_port_release(blob->csb_mem_handle); 3130 } 3131 blob->csb_mem_handle = IPC_PORT_NULL; 3132 OSAddAtomic(-1, &cs_blob_count); 3133 OSAddAtomic((SInt32) -blob->csb_mem_size, &cs_blob_size); 3134 kfree(blob, sizeof (*blob)); 3135 } 3136#if CHECK_CS_VALIDATION_BITMAP 3137 ubc_cs_validation_bitmap_deallocate( uip->ui_vnode ); 3138#endif 3139 uip->cs_blobs = NULL; 3140} 3141 3142struct cs_blob * 3143ubc_get_cs_blobs( 3144 struct vnode *vp) 3145{ 3146 struct ubc_info *uip; 3147 struct cs_blob *blobs; 3148 3149 /* 3150 * No need to take the vnode lock here. The caller must be holding 3151 * a reference on the vnode (via a VM mapping or open file descriptor), 3152 * so the vnode will not go away. The ubc_info stays until the vnode 3153 * goes away. And we only modify "blobs" by adding to the head of the 3154 * list. 3155 * The ubc_info could go away entirely if the vnode gets reclaimed as 3156 * part of a forced unmount. In the case of a code-signature validation 3157 * during a page fault, the "paging_in_progress" reference on the VM 3158 * object guarantess that the vnode pager (and the ubc_info) won't go 3159 * away during the fault. 3160 * Other callers need to protect against vnode reclaim by holding the 3161 * vnode lock, for example. 3162 */ 3163 3164 if (! UBCINFOEXISTS(vp)) { 3165 blobs = NULL; 3166 goto out; 3167 } 3168 3169 uip = vp->v_ubcinfo; 3170 blobs = uip->cs_blobs; 3171 3172out: 3173 return blobs; 3174} 3175 3176void 3177ubc_get_cs_mtime( 3178 struct vnode *vp, 3179 struct timespec *cs_mtime) 3180{ 3181 struct ubc_info *uip; 3182 3183 if (! UBCINFOEXISTS(vp)) { 3184 cs_mtime->tv_sec = 0; 3185 cs_mtime->tv_nsec = 0; 3186 return; 3187 } 3188 3189 uip = vp->v_ubcinfo; 3190 cs_mtime->tv_sec = uip->cs_mtime.tv_sec; 3191 cs_mtime->tv_nsec = uip->cs_mtime.tv_nsec; 3192} 3193 3194unsigned long cs_validate_page_no_hash = 0; 3195unsigned long cs_validate_page_bad_hash = 0; 3196boolean_t 3197cs_validate_page( 3198 void *_blobs, 3199 memory_object_t pager, 3200 memory_object_offset_t page_offset, 3201 const void *data, 3202 boolean_t *tainted) 3203{ 3204 SHA1_CTX sha1ctxt; 3205 unsigned char actual_hash[SHA1_RESULTLEN]; 3206 unsigned char expected_hash[SHA1_RESULTLEN]; 3207 boolean_t found_hash; 3208 struct cs_blob *blobs, *blob; 3209 const CS_CodeDirectory *cd; 3210 const CS_SuperBlob *embedded; 3211 const unsigned char *hash; 3212 boolean_t validated; 3213 off_t offset; /* page offset in the file */ 3214 size_t size; 3215 off_t codeLimit = 0; 3216 char *lower_bound, *upper_bound; 3217 vm_offset_t kaddr, blob_addr; 3218 vm_size_t ksize; 3219 kern_return_t kr; 3220 3221 offset = page_offset; 3222 3223 /* retrieve the expected hash */ 3224 found_hash = FALSE; 3225 blobs = (struct cs_blob *) _blobs; 3226 3227 for (blob = blobs; 3228 blob != NULL; 3229 blob = blob->csb_next) { 3230 offset = page_offset - blob->csb_base_offset; 3231 if (offset < blob->csb_start_offset || 3232 offset >= blob->csb_end_offset) { 3233 /* our page is not covered by this blob */ 3234 continue; 3235 } 3236 3237 /* map the blob in the kernel address space */ 3238 kaddr = blob->csb_mem_kaddr; 3239 if (kaddr == 0) { 3240 ksize = (vm_size_t) (blob->csb_mem_size + 3241 blob->csb_mem_offset); 3242 kr = vm_map(kernel_map, 3243 &kaddr, 3244 ksize, 3245 0, 3246 VM_FLAGS_ANYWHERE, 3247 blob->csb_mem_handle, 3248 0, 3249 TRUE, 3250 VM_PROT_READ, 3251 VM_PROT_READ, 3252 VM_INHERIT_NONE); 3253 if (kr != KERN_SUCCESS) { 3254 /* XXX FBDP what to do !? */ 3255 printf("cs_validate_page: failed to map blob, " 3256 "size=0x%lx kr=0x%x\n", 3257 (size_t)blob->csb_mem_size, kr); 3258 break; 3259 } 3260 } 3261 if (blob->csb_sigpup && cs_debug) 3262 printf("checking for a sigpup CD\n"); 3263 3264 blob_addr = kaddr + blob->csb_mem_offset; 3265 3266 lower_bound = CAST_DOWN(char *, blob_addr); 3267 upper_bound = lower_bound + blob->csb_mem_size; 3268 3269 embedded = (const CS_SuperBlob *) blob_addr; 3270 cd = findCodeDirectory(embedded, lower_bound, upper_bound); 3271 if (cd != NULL) { 3272 if (cd->pageSize != PAGE_SHIFT || 3273 cd->hashType != CS_HASHTYPE_SHA1 || 3274 cd->hashSize != SHA1_RESULTLEN) { 3275 /* bogus blob ? */ 3276 if (blob->csb_sigpup && cs_debug) 3277 printf("page foo bogus sigpup CD\n"); 3278 continue; 3279 } 3280 3281 offset = page_offset - blob->csb_base_offset; 3282 if (offset < blob->csb_start_offset || 3283 offset >= blob->csb_end_offset) { 3284 /* our page is not covered by this blob */ 3285 if (blob->csb_sigpup && cs_debug) 3286 printf("OOB sigpup CD\n"); 3287 continue; 3288 } 3289 3290 codeLimit = ntohl(cd->codeLimit); 3291 if (blob->csb_sigpup && cs_debug) 3292 printf("sigpup codesize %d\n", (int)codeLimit); 3293 3294 hash = hashes(cd, (unsigned)atop(offset), 3295 lower_bound, upper_bound); 3296 if (hash != NULL) { 3297 bcopy(hash, expected_hash, 3298 sizeof (expected_hash)); 3299 found_hash = TRUE; 3300 if (blob->csb_sigpup && cs_debug) 3301 printf("sigpup hash\n"); 3302 } 3303 3304 break; 3305 } else { 3306 if (blob->csb_sigpup && cs_debug) 3307 printf("sig pup had no valid CD\n"); 3308 3309 } 3310 } 3311 3312 if (found_hash == FALSE) { 3313 /* 3314 * We can't verify this page because there is no signature 3315 * for it (yet). It's possible that this part of the object 3316 * is not signed, or that signatures for that part have not 3317 * been loaded yet. 3318 * Report that the page has not been validated and let the 3319 * caller decide if it wants to accept it or not. 3320 */ 3321 cs_validate_page_no_hash++; 3322 if (cs_debug > 1) { 3323 printf("CODE SIGNING: cs_validate_page: " 3324 "mobj %p off 0x%llx: no hash to validate !?\n", 3325 pager, page_offset); 3326 } 3327 validated = FALSE; 3328 *tainted = FALSE; 3329 } else { 3330 3331 size = PAGE_SIZE; 3332 if ((off_t)(offset + size) > codeLimit) { 3333 /* partial page at end of segment */ 3334 assert(offset < codeLimit); 3335 size = (size_t) (codeLimit & PAGE_MASK); 3336 } 3337 /* compute the actual page's SHA1 hash */ 3338 SHA1Init(&sha1ctxt); 3339 SHA1UpdateUsePhysicalAddress(&sha1ctxt, data, size); 3340 SHA1Final(actual_hash, &sha1ctxt); 3341 3342 if (bcmp(expected_hash, actual_hash, SHA1_RESULTLEN) != 0) { 3343 char asha1_str[2*SHA1_RESULTLEN+1]; 3344 char esha1_str[2*SHA1_RESULTLEN+1]; 3345 hex_str(actual_hash, SHA1_RESULTLEN, asha1_str); 3346 hex_str(expected_hash, SHA1_RESULTLEN, esha1_str); 3347 if (cs_debug) { 3348 printf("CODE SIGNING: cs_validate_page: " 3349 "mobj %p off 0x%llx size 0x%lx: actual %s expected %s\n", 3350 pager, page_offset, size, asha1_str, esha1_str); 3351 } 3352 cs_validate_page_bad_hash++; 3353 if (!*tainted) { 3354 char page_offset_str[20]; 3355 snprintf(page_offset_str, sizeof(page_offset_str), "%llx", page_offset); 3356 kern_asl_msg(LOG_NOTICE, "messagetracer", 3357 5, 3358 "com.apple.message.domain", "com.apple.kernel.cs.mismatch", 3359 "com.apple.message.signature", page_offset_str, 3360 "com.apple.message.signature2", asha1_str, 3361 "com.apple.message.signature3", esha1_str, 3362 "com.apple.message.summarize", "YES", 3363 NULL 3364 ); 3365 } 3366 *tainted = TRUE; 3367 } else { 3368 if (cs_debug > 10) { 3369 printf("CODE SIGNING: cs_validate_page: " 3370 "mobj %p off 0x%llx size 0x%lx: " 3371 "SHA1 OK\n", 3372 pager, page_offset, size); 3373 } 3374 *tainted = FALSE; 3375 } 3376 validated = TRUE; 3377 } 3378 3379 return validated; 3380} 3381 3382int 3383ubc_cs_getcdhash( 3384 vnode_t vp, 3385 off_t offset, 3386 unsigned char *cdhash) 3387{ 3388 struct cs_blob *blobs, *blob; 3389 off_t rel_offset; 3390 int ret; 3391 3392 vnode_lock(vp); 3393 3394 blobs = ubc_get_cs_blobs(vp); 3395 for (blob = blobs; 3396 blob != NULL; 3397 blob = blob->csb_next) { 3398 /* compute offset relative to this blob */ 3399 rel_offset = offset - blob->csb_base_offset; 3400 if (rel_offset >= blob->csb_start_offset && 3401 rel_offset < blob->csb_end_offset) { 3402 /* this blob does cover our "offset" ! */ 3403 break; 3404 } 3405 } 3406 3407 if (blob == NULL) { 3408 /* we didn't find a blob covering "offset" */ 3409 ret = EBADEXEC; /* XXX any better error ? */ 3410 } else { 3411 /* get the SHA1 hash of that blob */ 3412 bcopy(blob->csb_sha1, cdhash, sizeof (blob->csb_sha1)); 3413 ret = 0; 3414 } 3415 3416 vnode_unlock(vp); 3417 3418 return ret; 3419} 3420 3421#if CHECK_CS_VALIDATION_BITMAP 3422#define stob(s) ((atop_64((s)) + 07) >> 3) 3423extern boolean_t root_fs_upgrade_try; 3424 3425/* 3426 * Should we use the code-sign bitmap to avoid repeated code-sign validation? 3427 * Depends: 3428 * a) Is the target vnode on the root filesystem? 3429 * b) Has someone tried to mount the root filesystem read-write? 3430 * If answers are (a) yes AND (b) no, then we can use the bitmap. 3431 */ 3432#define USE_CODE_SIGN_BITMAP(vp) ( (vp != NULL) && (vp->v_mount != NULL) && (vp->v_mount->mnt_flag & MNT_ROOTFS) && !root_fs_upgrade_try) 3433kern_return_t 3434ubc_cs_validation_bitmap_allocate( 3435 vnode_t vp) 3436{ 3437 kern_return_t kr = KERN_SUCCESS; 3438 struct ubc_info *uip; 3439 char *target_bitmap; 3440 vm_object_size_t bitmap_size; 3441 3442 if ( ! USE_CODE_SIGN_BITMAP(vp) || (! UBCINFOEXISTS(vp))) { 3443 kr = KERN_INVALID_ARGUMENT; 3444 } else { 3445 uip = vp->v_ubcinfo; 3446 3447 if ( uip->cs_valid_bitmap == NULL ) { 3448 bitmap_size = stob(uip->ui_size); 3449 target_bitmap = (char*) kalloc( (vm_size_t)bitmap_size ); 3450 if (target_bitmap == 0) { 3451 kr = KERN_NO_SPACE; 3452 } else { 3453 kr = KERN_SUCCESS; 3454 } 3455 if( kr == KERN_SUCCESS ) { 3456 memset( target_bitmap, 0, (size_t)bitmap_size); 3457 uip->cs_valid_bitmap = (void*)target_bitmap; 3458 uip->cs_valid_bitmap_size = bitmap_size; 3459 } 3460 } 3461 } 3462 return kr; 3463} 3464 3465kern_return_t 3466ubc_cs_check_validation_bitmap ( 3467 vnode_t vp, 3468 memory_object_offset_t offset, 3469 int optype) 3470{ 3471 kern_return_t kr = KERN_SUCCESS; 3472 3473 if ( ! USE_CODE_SIGN_BITMAP(vp) || ! UBCINFOEXISTS(vp)) { 3474 kr = KERN_INVALID_ARGUMENT; 3475 } else { 3476 struct ubc_info *uip = vp->v_ubcinfo; 3477 char *target_bitmap = uip->cs_valid_bitmap; 3478 3479 if ( target_bitmap == NULL ) { 3480 kr = KERN_INVALID_ARGUMENT; 3481 } else { 3482 uint64_t bit, byte; 3483 bit = atop_64( offset ); 3484 byte = bit >> 3; 3485 3486 if ( byte > uip->cs_valid_bitmap_size ) { 3487 kr = KERN_INVALID_ARGUMENT; 3488 } else { 3489 3490 if (optype == CS_BITMAP_SET) { 3491 target_bitmap[byte] |= (1 << (bit & 07)); 3492 kr = KERN_SUCCESS; 3493 } else if (optype == CS_BITMAP_CLEAR) { 3494 target_bitmap[byte] &= ~(1 << (bit & 07)); 3495 kr = KERN_SUCCESS; 3496 } else if (optype == CS_BITMAP_CHECK) { 3497 if ( target_bitmap[byte] & (1 << (bit & 07))) { 3498 kr = KERN_SUCCESS; 3499 } else { 3500 kr = KERN_FAILURE; 3501 } 3502 } 3503 } 3504 } 3505 } 3506 return kr; 3507} 3508 3509void 3510ubc_cs_validation_bitmap_deallocate( 3511 vnode_t vp) 3512{ 3513 struct ubc_info *uip; 3514 void *target_bitmap; 3515 vm_object_size_t bitmap_size; 3516 3517 if ( UBCINFOEXISTS(vp)) { 3518 uip = vp->v_ubcinfo; 3519 3520 if ( (target_bitmap = uip->cs_valid_bitmap) != NULL ) { 3521 bitmap_size = uip->cs_valid_bitmap_size; 3522 kfree( target_bitmap, (vm_size_t) bitmap_size ); 3523 uip->cs_valid_bitmap = NULL; 3524 } 3525 } 3526} 3527#else 3528kern_return_t ubc_cs_validation_bitmap_allocate(__unused vnode_t vp){ 3529 return KERN_INVALID_ARGUMENT; 3530} 3531 3532kern_return_t ubc_cs_check_validation_bitmap( 3533 __unused struct vnode *vp, 3534 __unused memory_object_offset_t offset, 3535 __unused int optype){ 3536 3537 return KERN_INVALID_ARGUMENT; 3538} 3539 3540void ubc_cs_validation_bitmap_deallocate(__unused vnode_t vp){ 3541 return; 3542} 3543#endif /* CHECK_CS_VALIDATION_BITMAP */ 3544