• Home
  • History
  • Annotate
  • Raw
  • Download
  • only in /macosx-10.5.8/xnu-1228.15.4/osfmk/vm/

Lines Matching refs:copy

15  * Please obtain a copy of the License at
36 * Permission to use, copy, modify and distribute this software and its
164 vm_map_copy_t copy,
170 vm_map_copy_t copy,
184 vm_map_copy_t copy,
235 vm_map_copy_t copy,
243 boolean_t copy,
272 * Macros to copy a vm_map_entry. We must be careful to correctly
348 * this module provides for an efficient virtual copy of
358 * maps. This requires a change in the copy on write strategy;
374 * The symmetric (shadow) copy strategy implements virtual copy
376 * another, and then marking both regions as copy-on-write.
383 * its copy objects. See vm_object_copy_quickly() in vm_object.c.
638 * given map (or map copy). No fields are filled.
643 #define vm_map_copy_entry_create(copy) \
644 _vm_map_entry_create(&(copy)->cpy_hdr)
684 _vm_map_entry_dispose(&(copy)->cpy_hdr, (entry))
777 #define vm_map_copy_entry_link(copy, after_where, entry) \
778 _vm_map_entry_link(&(copy)->cpy_hdr, after_where, (entry))
803 #define vm_map_copy_entry_unlink(copy, entry) \
804 _vm_map_entry_unlink(&(copy)->cpy_hdr, (entry))
2175 boolean_t copy,
2206 copy = FALSE;
2247 copy,
2415 if (copy) {
2442 * Perform the copy if requested
2445 if (copy) {
2451 &copy);
2465 * memory managers to specify symmetric copy,
2473 &copy);
2496 copy,
2830 #define vm_map_copy_clip_start(copy, entry, startaddr) \
2833 _vm_map_clip_start(&(copy)->cpy_hdr,(entry),(startaddr)); \
2922 #define vm_map_copy_clip_end(copy, entry, endaddr) \
2925 _vm_map_clip_end(&(copy)->cpy_hdr,(entry),(endaddr)); \
3091 (object->copy == VM_OBJECT_NULL) &&
3254 /* caller is asking specifically to copy the */
3275 * it would cause copy-on-write to fail. We've already
3693 /* cause any needs copy to be */
3828 * copy-on-write region, or an object for a zero-fill
3833 * If wiring a copy-on-write page, we need to copy it now
3886 * temporary copy of this entry and use that for the
4717 * copy current entry. see comment in vm_map_wire()
4959 * Dispose of a map copy object (returned by
4964 vm_map_copy_t copy)
4968 /* tr3("enter: copy 0x%x type %d", copy, copy->type);*/
4970 if (copy == VM_MAP_COPY_NULL)
4973 switch (copy->type) {
4975 while (vm_map_copy_first_entry(copy) !=
4976 vm_map_copy_to_entry(copy)) {
4977 vm_map_entry_t entry = vm_map_copy_first_entry(copy);
4979 vm_map_copy_entry_unlink(copy, entry);
4981 vm_map_copy_entry_dispose(copy, entry);
4985 vm_object_deallocate(copy->cpy_object);
4994 kfree(copy, copy->cpy_kalloc_size);
4997 zfree(vm_map_copy_zone, copy);
5004 * Move the information in a map copy object to
5005 * a new map copy object, leaving the old one
5012 * copy object will be deallocated; therefore,
5013 * these routines must make a copy of the copy
5019 vm_map_copy_t copy)
5023 if (copy == VM_MAP_COPY_NULL)
5027 * Allocate a new copy object, and copy the information
5032 *new_copy = *copy;
5034 if (copy->type == VM_MAP_COPY_ENTRY_LIST) {
5037 * changed to point to the new copy object.
5039 vm_map_copy_first_entry(copy)->vme_prev
5041 vm_map_copy_last_entry(copy)->vme_next
5046 * Change the old copy object into one that contains
5049 copy->type = VM_MAP_COPY_OBJECT;
5050 copy->cpy_object = VM_OBJECT_NULL;
5075 * address and round the copy size or we'll end up
5201 * Copy the memory described by the map copy
5202 * object (copy; returned by vm_map_copyin) onto
5214 * If successful, consumes the copy object.
5220 * the new copy. This replacement is done either on
5226 * to copy each page, as the external memory management
5230 * to use 'vm_trickery' to copy the aligned data. This is
5252 vm_map_copy_t copy,
5269 * Check for null copy object.
5272 if (copy == VM_MAP_COPY_NULL)
5280 if (copy->type == VM_MAP_COPY_KERNEL_BUFFER) {
5283 copy, TRUE));
5291 assert(copy->type == VM_MAP_COPY_ENTRY_LIST);
5293 if (copy->size == 0) {
5294 vm_map_copy_discard(copy);
5301 * address and round the copy size or we'll end up
5305 if (!page_aligned(copy->size) ||
5306 !page_aligned (copy->offset) ||
5310 dst_end = vm_map_round_page(dst_addr + copy->size);
5312 dst_end = dst_addr + copy->size;
5438 * the copy cannot be interrupted.
5456 total_size = copy->size;
5466 copy_size = copy->size;
5471 /* deconstruct the copy object and do in parts */
5575 /* adjust the copy object */
5581 new_offset = copy->offset;
5582 copy_entry = vm_map_copy_first_entry(copy);
5584 vm_map_copy_to_entry(copy)){
5590 vm_map_copy_clip_end(copy,
5602 vm_map_copy_to_entry(copy);
5604 copy->cpy_hdr.links.prev;
5605 copy->cpy_hdr.links.prev = copy_entry;
5606 copy->size = copy_size;
5608 copy->cpy_hdr.nentries;
5610 copy->cpy_hdr.nentries = nentries;
5625 copy,
5632 copy,
5638 copy,
5644 copy->cpy_hdr.nentries +=
5646 copy->cpy_hdr.links.prev->vme_next =
5648 copy->cpy_hdr.links.prev
5650 copy->size = total_size;
5657 /* otherwise copy no longer exists, it was */
5659 copy = (vm_map_copy_t)
5661 vm_map_copy_first_entry(copy) =
5662 vm_map_copy_last_entry(copy) =
5663 vm_map_copy_to_entry(copy);
5664 copy->type = VM_MAP_COPY_ENTRY_LIST;
5665 copy->offset = new_offset;
5669 /* put back remainder of copy in container */
5671 copy->cpy_hdr.nentries = remaining_entries;
5672 copy->cpy_hdr.links.next = next_copy;
5673 copy->cpy_hdr.links.prev = previous_prev;
5674 copy->size = total_size;
5676 vm_map_copy_to_entry(copy);
5706 /* adjust the copy object */
5711 new_offset = copy->offset;
5712 copy_entry = vm_map_copy_first_entry(copy);
5713 while(copy_entry != vm_map_copy_to_entry(copy)) {
5719 vm_map_copy_clip_end(copy, copy_entry,
5730 vm_map_copy_to_entry(copy);
5732 copy->cpy_hdr.links.prev;
5733 copy->cpy_hdr.links.prev = copy_entry;
5734 copy->size = copy_size;
5736 copy->cpy_hdr.nentries;
5738 copy->cpy_hdr.nentries = nentries;
5758 dst_map, tmp_entry, copy,
5761 copy->cpy_hdr.nentries +=
5763 copy->cpy_hdr.links.prev->vme_next =
5765 copy->cpy_hdr.links.prev =
5767 copy->size += copy_size;
5776 * if the copy and dst address are misaligned but the same
5778 * misaligned parts and copy aligned the rest. If they are
5779 * aligned but len is unaligned we simply need to copy
5785 tmp_entry, copy, base_addr)) != KERN_SUCCESS) {
5787 copy->cpy_hdr.nentries +=
5789 copy->cpy_hdr.links.prev->vme_next =
5791 copy->cpy_hdr.links.prev =
5793 copy->size += copy_size;
5803 copy->offset = new_offset;
5805 copy->cpy_hdr.nentries = remaining_entries;
5806 copy->cpy_hdr.links.next = next_copy;
5807 copy->cpy_hdr.links.prev = previous_prev;
5808 next_copy->vme_prev = vm_map_copy_to_entry(copy);
5809 copy->size = total_size;
5833 vm_map_copy_discard(copy);
5842 vm_map_copy_t copy,
5846 dst_map, dst_addr, copy, interruptible, (pmap_t) NULL);
5854 * Physically copy unaligned data
5859 * page offsets and sizes) to do the copy. We attempt to copy as
5864 * (copy) object should be one map entry, the target range may be split
5877 vm_map_copy_t copy,
5880 vm_map_entry_t copy_entry = vm_map_copy_first_entry(copy);
5895 src_offset = copy->offset - vm_object_trunc_page(copy->offset);
5896 amount_left = copy->size;
5920 * we can only copy dst_size bytes before
5926 * we can only copy src_size bytes before
5927 * we have to get the next source copy entry
5936 * Entry needs copy, create a shadow shadow object for
5955 * unlike with the virtual (aligned) copy we're going
6008 * all done with this copy entry, dispose.
6010 vm_map_copy_entry_unlink(copy, copy_entry);
6012 vm_map_copy_entry_dispose(copy, copy_entry);
6014 if ((copy_entry = vm_map_copy_first_entry(copy))
6015 == vm_map_copy_to_entry(copy) && amount_left) {
6075 * with those from the copy. The following code is the
6086 vm_map_copy_t copy,
6096 while ((copy_entry = vm_map_copy_first_entry(copy))
6097 != vm_map_copy_to_entry(copy))
6145 vm_map_copy_clip_end(copy, copy_entry,
6156 * we can perform the copy by throwing it away and
6175 vm_map_copy_entry_unlink(copy, copy_entry);
6176 vm_map_copy_entry_dispose(copy, copy_entry);
6239 vm_map_copy_entry_unlink(copy, copy_entry);
6240 vm_map_copy_entry_dispose(copy, copy_entry);
6313 vm_map_copy_clip_end(copy, copy_entry,
6315 vm_map_copy_entry_unlink(copy, copy_entry);
6317 vm_map_copy_entry_dispose(copy, copy_entry);
6357 * If successful, returns a new copy object.
6368 vm_map_copy_t copy;
6371 copy = (vm_map_copy_t) kalloc(kalloc_size);
6372 if (copy == VM_MAP_COPY_NULL) {
6375 copy->type = VM_MAP_COPY_KERNEL_BUFFER;
6376 copy->size = len;
6377 copy->offset = 0;
6378 copy->cpy_kdata = (void *) (copy + 1);
6379 copy->cpy_kalloc_size = kalloc_size;
6381 kr = copyinmap(src_map, src_addr, copy->cpy_kdata, len);
6383 kfree(copy, kalloc_size);
6394 *copy_result = copy;
6406 * If successful, consumes the copy object.
6414 vm_map_copy_t copy,
6428 vm_map_round_page(copy->size),
6448 * the copy.
6450 if (copyout(copy->cpy_kdata, *addr, copy->size)) {
6460 * of the copy.
6465 if (copyout(copy->cpy_kdata, *addr, copy->size)) {
6475 /* the copy failed, clean up */
6483 vm_map_round_page(copy->size)),
6488 /* copy was successful, dicard the copy structure */
6489 kfree(copy, copy->cpy_kalloc_size);
6499 * Link a copy chain ("copy") into a map at the
6502 * The copy chain is destroyed.
6506 #define vm_map_copy_insert(map, where, copy) \
6513 VMCI_copy = (copy); \
6527 * Copy out a copy chain ("copy") into newly-allocated
6530 * If successful, consumes the copy object.
6537 vm_map_copy_t copy)
6548 * Check for null copy object.
6551 if (copy == VM_MAP_COPY_NULL) {
6557 * Check for special copy object, created
6561 if (copy->type == VM_MAP_COPY_OBJECT) {
6562 vm_object_t object = copy->cpy_object;
6566 offset = vm_object_trunc_page(copy->offset);
6567 size = vm_map_round_page(copy->size +
6568 (vm_map_size_t)(copy->offset - offset));
6577 /* Account for non-pagealigned copy object */
6578 *dst_addr += (vm_map_offset_t)(copy->offset - offset);
6579 zfree(vm_map_copy_zone, copy);
6588 if (copy->type == VM_MAP_COPY_KERNEL_BUFFER) {
6590 copy, FALSE));
6597 vm_copy_start = vm_object_trunc_page(copy->offset);
6598 size = vm_map_round_page((vm_map_size_t)copy->offset + copy->size)
6636 * entries from the copy into the destination
6640 if (copy->cpy_hdr.entries_pageable != dst_map->hdr.entries_pageable) {
6651 old_zone = (copy->cpy_hdr.entries_pageable)
6654 entry = vm_map_copy_first_entry(copy);
6657 * Reinitialize the copy so that vm_map_copy_entry_link
6660 copy->cpy_hdr.nentries = 0;
6661 copy->cpy_hdr.entries_pageable = dst_map->hdr.entries_pageable;
6662 vm_map_copy_first_entry(copy) =
6663 vm_map_copy_last_entry(copy) =
6664 vm_map_copy_to_entry(copy);
6669 while (entry != vm_map_copy_to_entry(copy)) {
6670 new = vm_map_copy_entry_create(copy);
6673 vm_map_copy_entry_link(copy,
6674 vm_map_copy_last_entry(copy),
6683 * Adjust the addresses in the copy chain, and
6688 for (entry = vm_map_copy_first_entry(copy);
6689 entry != vm_map_copy_to_entry(copy);
6779 *dst_addr = start + (copy->offset - vm_copy_start);
6785 SAVE_HINT_MAP_WRITE(dst_map, vm_map_copy_last_entry(copy));
6790 * Link in the copy
6793 vm_map_copy_insert(dst_map, last, copy);
6838 * vm_map_copy_overwrite). If the copy is unused, it
6869 vm_map_entry_t new_entry = VM_MAP_ENTRY_NULL; /* Map entry for copy */
6872 * where copy is taking place now
6882 vm_map_copy_t copy; /* Resulting copy */
6902 * If the copy is sufficiently small, use a kernel buffer instead
6903 * of making a virtual copy. The theory being that the cost of
6904 * setting up VM (and taking C-O-W faults) dominates the copy costs
6926 copy = (vm_map_copy_t) zalloc(vm_map_copy_zone);
6927 vm_map_copy_first_entry(copy) =
6928 vm_map_copy_last_entry(copy) = vm_map_copy_to_entry(copy);
6929 copy->type = VM_MAP_COPY_ENTRY_LIST;
6930 copy->cpy_hdr.nentries = 0;
6931 copy->cpy_hdr.entries_pageable = TRUE;
6933 copy->offset = src_addr;
6934 copy->size = len;
6936 new_entry = vm_map_copy_entry_create(copy);
6944 vm_map_copy_entry_dispose(copy,new_entry); \
6945 vm_map_copy_discard(copy); \
6986 vm_object_t src_object; /* Object to copy */
6991 * for copy-on-write?
6998 * dropped to make copy
7042 /* to do physical copy from the device mem */
7058 new_entry = vm_map_copy_entry_create(copy);
7095 * Attempt non-blocking copy-on-write optimizations.
7105 * from the source to the copy. The copy is
7106 * copy-on-write only if the source is.
7137 * Handle copy-on-write obligations
7187 * Perform the copy
7245 * changed while the copy was being made.
7312 * Link in the new copy entry.
7315 vm_map_copy_entry_link(copy, vm_map_copy_last_entry(copy),
7371 * copy was successful.
7390 /* Fix-up start and end points in copy. This is necessary */
7391 /* when the various entries in the copy object were picked */
7394 tmp_entry = vm_map_copy_first_entry(copy);
7395 while (tmp_entry != vm_map_copy_to_entry(copy)) {
7403 *copy_result = copy;
7412 * Create a copy object from an object.
7423 vm_map_copy_t copy; /* Resulting copy */
7426 * We drop the object into a special copy object
7430 copy = (vm_map_copy_t) zalloc(vm_map_copy_zone);
7431 copy->type = VM_MAP_COPY_OBJECT;
7432 copy->cpy_object = object;
7433 copy->offset = offset;
7434 copy->size = size;
7436 *copy_result = copy;
7452 * objects use asynchronous copy algorithm for
7486 * copy, and therefore we already have
7505 * copy that we participated in.
7532 * with a symmetric copy, but we point
7547 * must not perform an asymmetric copy
7548 * of this object, since such a copy
7558 * to use an asymmetric copy strategy);
7561 * therefore a copy of the object is
7566 * are made to a copy.)
7572 * be subject to a symmetrical copy.
7584 * copy on write reasons, then we have
7619 * If object was using a symmetric copy strategy,
7620 * change its copy strategy to the default
7621 * asymmetric copy strategy, which is copy_delay
7680 vm_map_copy_t copy;
7690 if (vm_map_copyin_maxprot(old_map, start, entry_size, FALSE, &copy)
7716 * Insert the copy into the new map
7719 vm_map_copy_insert(new_map, last, copy);
7833 * Handle copy-on-write obligations
8186 /* new copy object, */
8236 /* substitute copy object for */
8310 * If the entry was copy-on-write, we either ...
8345 * We're attempting to read a copy-on-write
8371 * was copy-on-write or empty, it has been fixed up. Also
9712 { "share", "copy", "none", "?"};
9836 * Pretty-print a copy object for ddb.
9843 vm_map_copy_t copy;
9846 copy = (vm_map_copy_t)(long)
9849 printf("copy object 0x%x\n", copy);
9853 iprintf("type=%d", copy->type);
9854 switch (copy->type) {
9871 printf(", offset=0x%llx", (unsigned long long)copy->offset);
9872 printf(", size=0x%x\n", copy->size);
9874 switch (copy->type) {
9876 vm_map_header_print(&copy->cpy_hdr);
9877 for (entry = vm_map_copy_first_entry(copy);
9878 entry && entry != vm_map_copy_to_entry(copy);
9885 iprintf("object=0x%x\n", copy->cpy_object);
9889 iprintf("kernel buffer=0x%x", copy->cpy_kdata);
9890 printf(", kalloc_size=0x%x\n", copy->cpy_kalloc_size);
10002 boolean_t copy,
10099 * copy, and therefore we already have
10167 if (!copy) {
10229 * Perform the copy.
10267 * changed while the copy was being made.
10341 boolean_t copy,
10369 size, copy, &map_header,
10954 /* count, it picks up the copy-on-write cases */
11621 * This is basically a copy of the MEMLOCK rlimit value maintained by the BSD side of
11622 * the kernel. The limits are checked in the mach VM side, so we keep a copy so we