• Home
  • History
  • Annotate
  • Raw
  • Download
  • only in /macosx-10.5.8/xnu-1228.15.4/osfmk/default_pager/

Lines Matching defs:vs

203 	vstruct_t	 vs,
209 vstruct_t vs,
214 vstruct_t vs,
425 port is currently backed with a vs structure in the alias field
572 alias_struct->vs = (struct vstruct *)bs;
668 vstruct_t vs;
672 VSL_LOCK(); /* get the lock on the list of vs's */
684 /* lock and the vs locks are not being held by bumping the */
694 vs = (vstruct_t) queue_first((queue_entry_t)&(vstruct_list.vsl_queue));
695 if(vs == (vstruct_t)&vstruct_list) {
699 VS_LOCK(vs);
700 vs_async_wait(vs); /* wait for any pending async writes */
701 if ((vs_count != 0) && (vs != NULL))
702 vs->vs_async_pending += 1; /* hold parties calling */
704 VS_UNLOCK(vs);
706 while((vs_count != 0) && (vs != NULL)) {
736 vs, ps, upl);
745 VS_LOCK(vs);
746 vs->vs_async_pending -= 1; /* release vs_async_wait */
747 if (vs->vs_async_pending == 0 && vs->vs_waiting_async) {
748 vs->vs_waiting_async = FALSE;
749 VS_UNLOCK(vs);
750 thread_wakeup(&vs->vs_async_pending);
752 VS_UNLOCK(vs);
764 next_vs = (vstruct_t) queue_next(&(vs->vs_links));
766 (vs != next_vs) && (vs_count != 1)) {
775 VS_LOCK(vs);
776 vs->vs_async_pending -= 1;
777 if (vs->vs_async_pending == 0 && vs->vs_waiting_async) {
778 vs->vs_waiting_async = FALSE;
779 VS_UNLOCK(vs);
780 thread_wakeup(&vs->vs_async_pending);
782 VS_UNLOCK(vs);
784 if((vs == next_vs) || (next_vs == (vstruct_t)&vstruct_list))
785 vs = NULL;
787 vs = next_vs;
1130 alias_struct->vs = (struct vstruct *)vsa;
1182 alias_struct->vs = reply_port;
1226 vstruct_t vs;
1229 vs = (vstruct_t) zalloc(vstruct_zone);
1230 if (vs == VSTRUCT_NULL) {
1234 VS_LOCK_INIT(vs);
1239 vs->vs_pager_ops = NULL;
1240 vs->vs_control = MEMORY_OBJECT_CONTROL_NULL;
1241 vs->vs_references = 1;
1242 vs->vs_seqno = 0;
1245 vs->vs_waiting_seqno = FALSE;
1246 vs->vs_waiting_read = FALSE;
1247 vs->vs_waiting_write = FALSE;
1248 vs->vs_waiting_async = FALSE;
1250 mutex_init(&vs->vs_waiting_seqno, 0);
1251 mutex_init(&vs->vs_waiting_read, 0);
1252 mutex_init(&vs->vs_waiting_write, 0);
1253 mutex_init(&vs->vs_waiting_refs, 0);
1254 mutex_init(&vs->vs_waiting_async, 0);
1257 vs->vs_readers = 0;
1258 vs->vs_writers = 0;
1260 vs->vs_errors = 0;
1262 vs->vs_clshift = local_log2(bs_get_global_clsize(0));
1263 vs->vs_size = ((atop_32(round_page_32(size)) - 1) >> vs->vs_clshift) + 1;
1264 vs->vs_async_pending = 0;
1270 if (INDIRECT_CLMAP(vs->vs_size)) {
1271 vs->vs_imap = (struct vs_map **)
1272 kalloc(INDIRECT_CLMAP_SIZE(vs->vs_size));
1273 vs->vs_indirect = TRUE;
1275 vs->vs_dmap = (struct vs_map *)
1276 kalloc(CLMAP_SIZE(vs->vs_size));
1277 vs->vs_indirect = FALSE;
1279 vs->vs_xfer_pending = FALSE;
1281 ("map=0x%x, indirect=%d\n", (int) vs->vs_dmap, vs->vs_indirect));
1286 if (!vs->vs_dmap) {
1287 kfree(vs, sizeof *vs);
1294 if (vs->vs_indirect)
1295 memset(vs->vs_imap, 0,
1296 INDIRECT_CLMAP_SIZE(vs->vs_size));
1298 for (i = 0; i < vs->vs_size; i++)
1299 VSM_CLR(vs->vs_dmap[i]);
1301 VS_MAP_LOCK_INIT(vs);
1303 bs_commit(vs->vs_size);
1305 return vs;
1452 vstruct_t vs,
1473 * dp_memory_object calls which rely on the vs lock, or by
1481 * data, a vstruct, and backing store are protected by the vs lock.
1506 } else if ((ps = ps_select_segment(vs->vs_clshift, psindex)) ==
1627 vstruct_t vs)
1632 VS_MAP_LOCK(vs);
1643 if (vs->vs_indirect) {
1644 for (i = 0; i < INDIRECT_CLMAP_ENTRIES(vs->vs_size); i++) {
1645 if (vs->vs_imap[i] != NULL) {
1646 ps_dealloc_vsmap(vs->vs_imap[i], CLMAP_ENTRIES);
1647 kfree(vs->vs_imap[i], CLMAP_THRESHOLD);
1650 kfree(vs->vs_imap, INDIRECT_CLMAP_SIZE(vs->vs_size));
1655 ps_dealloc_vsmap(vs->vs_dmap, vs->vs_size);
1656 kfree(vs->vs_dmap, CLMAP_SIZE(vs->vs_size));
1658 VS_MAP_UNLOCK(vs);
1660 bs_commit(- vs->vs_size);
1662 zfree(vstruct_zone, vs);
1668 vstruct_t vs,
1678 if (vs->vs_size >= new_size) {
1707 old_map_size = INDIRECT_CLMAP_SIZE(vs->vs_size);
1708 if (vs->vs_indirect &&
1710 bs_commit(new_size - vs->vs_size);
1711 vs->vs_size = new_size;
1721 if (vs->vs_indirect) {
1723 memcpy(new_imap, vs->vs_imap, old_map_size);
1725 old_map = (void *) vs->vs_imap;
1754 old_map = (void *) vs->vs_dmap;
1755 old_map_size = CLMAP_SIZE(vs->vs_size);
1758 memcpy(new_dmap, vs->vs_dmap, old_map_size);
1761 for (i = vs->vs_size; i < newdsize; i++)
1765 vs->vs_imap = new_imap;
1766 vs->vs_indirect = TRUE;
1768 vs->vs_dmap = new_dmap;
1769 bs_commit(new_size - vs->vs_size);
1770 vs->vs_size = new_size;
1778 vstruct_t vs,
1791 VS_MAP_LOCK(vs);
1793 ASSERT(vs->vs_dmap);
1794 cluster = atop_32(offset) >> vs->vs_clshift;
1804 if (cluster >= vs->vs_size) {
1807 VS_MAP_UNLOCK(vs);
1810 if (ps_map_extend(vs, cluster + 1)) {
1811 VS_MAP_UNLOCK(vs);
1826 if (vs->vs_indirect) {
1830 vsmap = vs->vs_imap[ind_block];
1833 VS_MAP_UNLOCK(vs);
1840 VS_MAP_UNLOCK(vs);
1846 vs->vs_imap[ind_block] = vsmap;
1849 vsmap = vs->vs_dmap;
1864 VS_MAP_UNLOCK(vs);
1878 VS_MAP_UNLOCK(vs);
1884 newcl = ps_allocate_cluster(vs, &psindex,
1887 VS_MAP_UNLOCK(vs);
1901 clmap->cl_numpages = VSCLSIZE(vs);
1910 newcl = ptoa_32(newcl) << vs->vs_clshift;
1911 newoff = offset & ((1<<(vm_page_shift + vs->vs_clshift)) - 1);
1918 while ((size > 0) && (i < VSCLSIZE(vs))) {
1931 CLMAP_SHIFT(clmap, vs);
1932 CLMAP_SHIFTALLOC(clmap, vs);
1950 for (i = VSCLSIZE(vs) - clmap->cl_numpages; size > 0;
1955 ASSERT(i <= VSCLSIZE(vs));
1969 VS_MAP_UNLOCK(vs);
1972 VS_MAP_UNLOCK(vs);
1975 ("returning 0x%X,vs=0x%X,vsmap=0x%X,flag=%d\n",
1976 newcl+newoff, (int) vs, (int) vsmap, flag));
1989 vstruct_t vs,
1996 VS_MAP_LOCK(vs);
2006 cluster = atop_32(offset) >> vs->vs_clshift;
2007 if (vs->vs_indirect) /* indirect map */
2008 vsmap = vs->vs_imap[cluster/CLMAP_ENTRIES];
2010 vsmap = vs->vs_dmap;
2012 VS_MAP_UNLOCK(vs);
2026 if ( (newoff = (offset&((1<<(vm_page_shift+vs->vs_clshift))-1))) ) {
2034 while ((i < VSCLSIZE(vs)) && (length > 0)) {
2052 VS_MAP_UNLOCK(vs);
2059 vstruct_t vs,
2076 (void) ps_clmap(vs, offset, &clmap, CL_FIND, size, error);
2083 vstruct_t vs,
2106 ps_vs_write_complete(vs, offset, size, error);
2109 VS_LOCK(vs);
2110 ASSERT(vs->vs_async_pending > 0);
2111 vs->vs_async_pending -= size;
2112 if (vs->vs_async_pending == 0 && vs->vs_waiting_async) {
2113 vs->vs_waiting_async = FALSE;
2114 VS_UNLOCK(vs);
2115 /* mutex_unlock(&vs->vs_waiting_async); */
2116 thread_wakeup(&vs->vs_async_pending);
2118 VS_UNLOCK(vs);
2135 ((struct vstruct_alias *)(reply_port->alias))->vs;
2185 ((struct vstruct_alias *)(reply_port->alias))->vs;
2499 __unused vstruct_t vs,
2514 ps_clunmap(vs, offset, size);
2524 vstruct_t vs,
2547 pages_in_cl = 1 << vs->vs_clshift;
2558 if ((ps_clmap(vs, vs_offset & ~cl_mask, &clmap, CL_FIND, 0, 0) == (vm_offset_t)-1) ||
2577 memory_object_super_upl_request(vs->vs_control, (memory_object_offset_t)vs_offset,
2619 if (memory_object_cluster_size(vs->vs_control, &start, &cnt, (memory_object_fault_info_t)fault_info) == KERN_SUCCESS) {
2660 ps_offset[seg_index] = ps_clmap(vs, vs_offset & ~cl_mask, &clmap, CL_FIND, 0, 0);
2751 ps_offset[seg_index] = ps_clmap(vs, cur_offset & ~cl_mask, &clmap, CL_FIND, 0, 0);
2798 memory_object_super_upl_request(vs->vs_control, (memory_object_offset_t)vs_offset,
2828 pvs_object_data_provided(vs, upl, vs_offset, xfer_size);
2858 pvs_object_data_provided(vs, upl, vs_offset, lsize);
2893 vstruct_t vs,
2918 pages_in_cl = 1 << vs->vs_clshift;
2970 memory_object_super_upl_request(vs->vs_control,
2984 ps_clmap(vs,
3110 ps_vs_write_complete(vs,
3143 assert(cnt <= (vm_page_size << vs->vs_clshift));
3151 /* from the offset in the vs in question */
3156 actual_offset = ps_clmap(vs, mobj_target_addr,
3170 ps_vs_write_complete(vs, mobj_target_addr,
3197 vstruct_t vs)
3204 if (vs->vs_indirect) {
3206 for (i = 0; i < INDIRECT_CLMAP_ENTRIES(vs->vs_size); i++) {
3207 vsmap = vs->vs_imap[i];
3216 for (k = 0; k < VSCLSIZE(vs); k++) {
3223 vsmap = vs->vs_dmap;
3232 for (k = 0; k < VSCLSIZE(vs); k++) {
3244 vstruct_t vs,
3255 if (vs->vs_indirect) {
3257 for (i = 0; i < INDIRECT_CLMAP_ENTRIES(vs->vs_size); i++) {
3258 vsmap = vs->vs_imap[i];
3261 VSCLSIZE(vs));
3268 offset += vm_page_size * VSCLSIZE(vs);
3272 for (k = 0; k < VSCLSIZE(vs); k++) {
3284 vsmap = vs->vs_dmap;
3291 offset += vm_page_size * VSCLSIZE(vs);
3295 for (k = 0; k < VSCLSIZE(vs); k++) {
3312 vstruct_t vs,
3321 VS_LOCK(vs); /* block all work on this vstruct */
3324 vs->vs_xfer_pending = TRUE;
3325 vs_wait_for_sync_writers(vs);
3326 vs_start_write(vs);
3327 vs_wait_for_readers(vs);
3328 /* we will unlock the vs to allow other writes while transferring */
3329 /* and will be guaranteed of the persistance of the vs struct */
3333 /* vs. Now that we are also supporting simple lock versions of */
3339 VS_UNLOCK(vs);
3341 if (vs->vs_indirect) {
3345 for (i = 0; i < INDIRECT_CLMAP_ENTRIES(vs->vs_size); i++) {
3346 vsmap = vs->vs_imap[i];
3351 VSCLSIZE(vs) * i);
3352 if(i+1 == INDIRECT_CLMAP_ENTRIES(vs->vs_size))
3353 vsmap_size = vs->vs_size - (CLMAP_ENTRIES * i);
3361 if(vs_cluster_transfer(vs,
3362 (vm_page_size * (j << vs->vs_clshift))
3364 vm_page_size << vs->vs_clshift,
3367 VS_LOCK(vs);
3368 vs->vs_xfer_pending = FALSE;
3369 VS_UNLOCK(vs);
3370 vs_finish_write(vs);
3374 VS_LOCK(vs);
3375 vs->vs_xfer_pending = FALSE;
3376 VS_UNLOCK(vs);
3377 vs_finish_write(vs);
3378 VS_LOCK(vs);
3379 vs->vs_xfer_pending = TRUE;
3380 vs_wait_for_sync_writers(vs);
3381 vs_start_write(vs);
3382 vs_wait_for_readers(vs);
3383 VS_UNLOCK(vs);
3384 if (!(vs->vs_indirect)) {
3390 vsmap = vs->vs_dmap;
3392 VS_LOCK(vs);
3393 vs->vs_xfer_pending = FALSE;
3394 VS_UNLOCK(vs);
3395 vs_finish_write(vs);
3399 for (j = 0; j < vs->vs_size; j++) {
3404 if(vs_cluster_transfer(vs,
3405 vm_page_size * (j << vs->vs_clshift),
3406 vm_page_size << vs->vs_clshift,
3408 VS_LOCK(vs);
3409 vs->vs_xfer_pending = FALSE;
3410 VS_UNLOCK(vs);
3411 vs_finish_write(vs);
3415 VS_LOCK(vs);
3416 vs->vs_xfer_pending = FALSE;
3417 VS_UNLOCK(vs);
3418 vs_finish_write(vs);
3419 VS_LOCK(vs);
3420 vs->vs_xfer_pending = TRUE;
3421 VS_UNLOCK(vs);
3422 vs_wait_for_sync_writers(vs);
3423 vs_start_write(vs);
3424 vs_wait_for_readers(vs);
3425 if (vs->vs_indirect) {
3431 VS_LOCK(vs);
3432 vs->vs_xfer_pending = FALSE;
3433 VS_UNLOCK(vs);
3434 vs_finish_write(vs);
3442 vstruct_t vs,
3448 cluster = atop_32(offset) >> vs->vs_clshift;
3449 if (vs->vs_indirect) {
3453 vsmap = vs->vs_imap[ind_block];
3457 vsmap = vs->vs_dmap;
3464 vstruct_t vs,
3512 vsmap_ptr = vs_get_map_entry(vs, offset);
3513 actual_offset = ps_clmap(vs, offset, &clmap, CL_FIND, 0, 0);
3524 clsize = vm_page_size << vs->vs_clshift;
3568 ps_clunmap(vs, offset, unavail_size);
3571 if((offset & ((vm_page_size << vs->vs_clshift) - 1))
3628 if(vs_cluster_write(vs, upl, offset,
3633 ps_clunmap(vs, offset, size);
3640 ((vm_page_size << vs->vs_clshift)
3647 ps_clunmap(vs, offset, size);
3652 ps_clunmap(vs, offset, size);
3674 ps_clunmap(vs, offset, size);
3689 ps_clunmap(vs, offset, size);
3935 * Monitor the amount of available backing store vs. the amount of