• Home
  • History
  • Annotate
  • Raw
  • Download
  • only in /macosx-10.10.1/xnu-2782.1.97/osfmk/default_pager/

Lines Matching refs:vs

226 	vstruct_t	 vs,
232 vstruct_t vs,
237 vstruct_t vs,
454 port is currently backed with a vs structure in the alias field
601 alias_struct->vs = (struct vstruct *)bs;
701 vstruct_t vs;
705 VSL_LOCK(); /* get the lock on the list of vs's */
717 /* lock and the vs locks are not being held by bumping the */
727 vs = (vstruct_t) queue_first((queue_entry_t)&(vstruct_list.vsl_queue));
728 if(vs == (vstruct_t)&vstruct_list) {
732 VS_LOCK(vs);
733 vs_async_wait(vs); /* wait for any pending async writes */
734 if ((vs_count != 0) && (vs != NULL))
735 vs->vs_async_pending += 1; /* hold parties calling */
741 VS_UNLOCK(vs);
743 while((vs_count != 0) && (vs != NULL)) {
779 vs, ps, upl);
788 VS_LOCK(vs);
789 vs->vs_async_pending -= 1; /* release vs_async_wait */
790 if (vs->vs_async_pending == 0 && vs->vs_waiting_async) {
791 vs->vs_waiting_async = FALSE;
792 VS_UNLOCK(vs);
793 thread_wakeup(&vs->vs_async_pending);
795 VS_UNLOCK(vs);
807 next_vs = (vstruct_t) queue_next(&(vs->vs_links));
809 (vs != next_vs) && (vs_count != 1)) {
818 VS_LOCK(vs);
819 vs->vs_async_pending -= 1;
820 if (vs->vs_async_pending == 0 && vs->vs_waiting_async) {
821 vs->vs_waiting_async = FALSE;
822 VS_UNLOCK(vs);
823 thread_wakeup(&vs->vs_async_pending);
825 VS_UNLOCK(vs);
827 if((vs == next_vs) || (next_vs == (vstruct_t)&vstruct_list))
828 vs = NULL;
830 vs = next_vs;
1237 alias_struct->vs = (struct vstruct *)vsa;
1289 alias_struct->vs = reply_port;
1333 vstruct_t vs;
1336 vs = (vstruct_t) zalloc(vstruct_zone);
1337 if (vs == VSTRUCT_NULL) {
1341 VS_LOCK_INIT(vs);
1346 vs->vs_pager_ops = NULL;
1347 vs->vs_control = MEMORY_OBJECT_CONTROL_NULL;
1348 vs->vs_references = 1;
1349 vs->vs_seqno = 0;
1351 vs->vs_waiting_seqno = FALSE;
1352 vs->vs_waiting_read = FALSE;
1353 vs->vs_waiting_write = FALSE;
1354 vs->vs_waiting_async = FALSE;
1356 vs->vs_readers = 0;
1357 vs->vs_writers = 0;
1359 vs->vs_errors = 0;
1361 vs->vs_clshift = local_log2(bs_get_global_clsize(0));
1362 vs->vs_size = ((atop_32(round_page_32(size)) - 1) >> vs->vs_clshift) + 1;
1363 vs->vs_async_pending = 0;
1369 if (INDIRECT_CLMAP(vs->vs_size)) {
1370 vs->vs_imap = (struct vs_map **)
1371 kalloc(INDIRECT_CLMAP_SIZE(vs->vs_size));
1372 vs->vs_indirect = TRUE;
1374 vs->vs_dmap = (struct vs_map *)
1375 kalloc(CLMAP_SIZE(vs->vs_size));
1376 vs->vs_indirect = FALSE;
1378 vs->vs_xfer_pending = FALSE;
1380 ("map=0x%x, indirect=%d\n", (int) vs->vs_dmap, vs->vs_indirect));
1385 if (!vs->vs_dmap) {
1386 kfree(vs, sizeof *vs);
1393 if (vs->vs_indirect)
1394 memset(vs->vs_imap, 0,
1395 INDIRECT_CLMAP_SIZE(vs->vs_size));
1397 for (i = 0; i < vs->vs_size; i++)
1398 VSM_CLR(vs->vs_dmap[i]);
1400 VS_MAP_LOCK_INIT(vs);
1402 bs_commit(vs->vs_size);
1404 return vs;
1588 vstruct_t vs,
1609 * dp_memory_object calls which rely on the vs lock, or by
1617 * data, a vstruct, and backing store are protected by the vs lock.
1648 } else if ((ps = ps_select_segment(vs->vs_clshift, psindex)) ==
1852 vstruct_t vs)
1857 VS_MAP_LOCK(vs);
1868 if (vs->vs_indirect) {
1869 for (i = 0; i < INDIRECT_CLMAP_ENTRIES(vs->vs_size); i++) {
1870 if (vs->vs_imap[i] != NULL) {
1871 ps_dealloc_vsmap(vs->vs_imap[i], CLMAP_ENTRIES);
1872 kfree(vs->vs_imap[i], CLMAP_THRESHOLD);
1875 kfree(vs->vs_imap, INDIRECT_CLMAP_SIZE(vs->vs_size));
1880 ps_dealloc_vsmap(vs->vs_dmap, vs->vs_size);
1881 kfree(vs->vs_dmap, CLMAP_SIZE(vs->vs_size));
1883 VS_MAP_UNLOCK(vs);
1885 bs_commit(- vs->vs_size);
1887 VS_MAP_LOCK_DESTROY(vs);
1889 zfree(vstruct_zone, vs);
1894 vstruct_t vs,
1906 VS_MAP_LOCK(vs);
1913 fault_info.hi_offset = ptoa_32(vs->vs_size << vs->vs_clshift);
1926 if (vs->vs_indirect) {
1928 for (i = 0; i < INDIRECT_CLMAP_ENTRIES(vs->vs_size); i++) {
1929 vsmap = vs->vs_imap[i];
1934 VSCLSIZE(vs) * i);
1935 if (i+1 == INDIRECT_CLMAP_ENTRIES(vs->vs_size))
1936 vsmap_size = vs->vs_size - (CLMAP_ENTRIES * i);
1945 clmap_off += vm_page_size * VSCLSIZE(vs);
1948 VS_MAP_UNLOCK(vs);
1950 vs,
1955 VS_MAP_LOCK(vs); /* XXX what if it changed ? */
1968 vs->vs_imap[i] = NULL;
1972 // kfree(vs->vs_imap, INDIRECT_CLMAP_SIZE(vs->vs_size));
1978 vsmap = vs->vs_dmap;
1985 for (j = 0; j < vs->vs_size;) {
1991 clmap_off = vm_page_size * (j << vs->vs_clshift);
1992 VS_MAP_UNLOCK(vs);
1994 vs,
1999 VS_MAP_LOCK(vs); /* XXX what if it changed ? */
2011 ps_dealloc_vsmap(vs->vs_dmap, vs->vs_size);
2012 // kfree(vs->vs_dmap, CLMAP_SIZE(vs->vs_size));
2016 VS_MAP_UNLOCK(vs);
2024 vstruct_t vs,
2034 if (vs->vs_size >= new_size) {
2063 old_map_size = INDIRECT_CLMAP_SIZE(vs->vs_size);
2064 if (vs->vs_indirect &&
2066 bs_commit(new_size - vs->vs_size);
2067 vs->vs_size = new_size;
2077 if (vs->vs_indirect) {
2079 memcpy(new_imap, vs->vs_imap, old_map_size);
2081 old_map = (void *) vs->vs_imap;
2110 old_map = (void *) vs->vs_dmap;
2111 old_map_size = CLMAP_SIZE(vs->vs_size);
2114 memcpy(new_dmap, vs->vs_dmap, old_map_size);
2117 for (i = vs->vs_size; i < newdsize; i++)
2121 vs->vs_imap = new_imap;
2122 vs->vs_indirect = TRUE;
2124 vs->vs_dmap = new_dmap;
2125 bs_commit(new_size - vs->vs_size);
2126 vs->vs_size = new_size;
2134 vstruct_t vs,
2147 VS_MAP_LOCK(vs);
2149 ASSERT(vs->vs_dmap);
2150 cluster = atop_32(offset) >> vs->vs_clshift;
2160 if (cluster >= vs->vs_size) {
2163 VS_MAP_UNLOCK(vs);
2166 if (ps_map_extend(vs, cluster + 1)) {
2167 VS_MAP_UNLOCK(vs);
2182 if (vs->vs_indirect) {
2186 vsmap = vs->vs_imap[ind_block];
2189 VS_MAP_UNLOCK(vs);
2196 VS_MAP_UNLOCK(vs);
2202 vs->vs_imap[ind_block] = vsmap;
2205 vsmap = vs->vs_dmap;
2220 VS_MAP_UNLOCK(vs);
2234 VS_MAP_UNLOCK(vs);
2240 newcl = ps_allocate_cluster(vs, &psindex,
2243 VS_MAP_UNLOCK(vs);
2257 clmap->cl_numpages = VSCLSIZE(vs);
2266 newcl = ptoa_32(newcl) << vs->vs_clshift;
2267 newoff = offset & ((1<<(vm_page_shift + vs->vs_clshift)) - 1);
2274 while ((size > 0) && (i < VSCLSIZE(vs))) {
2287 CLMAP_SHIFT(clmap, vs);
2288 CLMAP_SHIFTALLOC(clmap, vs);
2306 for (i = VSCLSIZE(vs) - clmap->cl_numpages; size > 0;
2311 ASSERT(i <= VSCLSIZE(vs));
2325 VS_MAP_UNLOCK(vs);
2328 VS_MAP_UNLOCK(vs);
2331 ("returning 0x%X,vs=0x%X,vsmap=0x%X,flag=%d\n",
2332 newcl+newoff, (int) vs, (int) vsmap, flag));
2345 vstruct_t vs,
2355 VS_MAP_LOCK(vs);
2365 cluster = atop_32(offset) >> vs->vs_clshift;
2366 if (vs->vs_indirect) /* indirect map */
2367 vsmap = vs->vs_imap[cluster/CLMAP_ENTRIES];
2369 vsmap = vs->vs_dmap;
2372 VS_MAP_UNLOCK(vs);
2387 if ( (newoff = (offset&((1<<(vm_page_shift+vs->vs_clshift))-1))) ) {
2395 while ((i < VSCLSIZE(vs)) && (length > 0)) {
2409 vs->vs_clshift,
2410 VSCLSIZE(vs) * vm_page_size);
2420 VS_MAP_UNLOCK(vs);
2427 vstruct_t vs,
2444 (void) ps_clmap(vs, offset, &clmap, CL_FIND, size, error);
2451 vstruct_t vs,
2474 ps_vs_write_complete(vs, offset, size, error);
2477 VS_LOCK(vs);
2478 ASSERT(vs->vs_async_pending > 0);
2479 vs->vs_async_pending -= size;
2480 if (vs->vs_async_pending == 0 && vs->vs_waiting_async) {
2481 vs->vs_waiting_async = FALSE;
2482 VS_UNLOCK(vs);
2483 thread_wakeup(&vs->vs_async_pending);
2485 VS_UNLOCK(vs);
2502 ((struct vstruct_alias *)(reply_port->ip_alias))->vs;
2552 ((struct vstruct_alias *)(reply_port->defpager_importance.alias))->vs;
2864 __unused vstruct_t vs,
2884 ps_clunmap(vs, offset, size);
2890 ps_clunmap(vs, offset, size);
2911 vstruct_t vs,
2940 pages_in_cl = 1 << vs->vs_clshift;
2978 if ((ps_clmap(vs, vs_offset & ~cl_mask, &clmap, CL_FIND, 0, 0) == (dp_offset_t)-1) ||
3007 memory_object_super_upl_request(vs->vs_control, (memory_object_offset_t)vs_offset,
3071 if (memory_object_cluster_size(vs->vs_control, &cluster_start, &cluster_length, &io_streaming, (memory_object_fault_info_t)fault_info) == KERN_SUCCESS) {
3119 ps_offset[seg_index] = ps_clmap(vs, vs_offset & ~cl_mask, &clmap, CL_FIND, 0, 0);
3210 ps_offset[seg_index] = ps_clmap(vs, cur_offset & ~cl_mask, &clmap, CL_FIND, 0, 0);
3257 memory_object_super_upl_request(vs->vs_control, (memory_object_offset_t)vs_offset,
3325 pvs_object_data_provided(vs, upl, vs_offset, lsize);
3353 vstruct_t vs,
3400 pages_in_cl = 1 << vs->vs_clshift;
3444 memory_object_super_upl_request(vs->vs_control,
3492 ps_clmap(vs,
3633 vs,
3656 assert(cnt <= (unsigned) (vm_page_size << vs->vs_clshift));
3664 /* from the offset in the vs in question */
3669 actual_offset = ps_clmap(vs, mobj_target_addr,
3683 ps_vs_write_complete(vs, mobj_target_addr,
3710 vstruct_t vs)
3717 if (vs->vs_indirect) {
3719 for (i = 0; i < INDIRECT_CLMAP_ENTRIES(vs->vs_size); i++) {
3720 vsmap = vs->vs_imap[i];
3729 for (k = 0; k < VSCLSIZE(vs); k++) {
3736 vsmap = vs->vs_dmap;
3745 for (k = 0; k < VSCLSIZE(vs); k++) {
3757 vstruct_t vs,
3768 if (vs->vs_indirect) {
3770 for (i = 0; i < INDIRECT_CLMAP_ENTRIES(vs->vs_size); i++) {
3771 vsmap = vs->vs_imap[i];
3774 VSCLSIZE(vs));
3781 offset += vm_page_size * VSCLSIZE(vs);
3785 for (k = 0; k < VSCLSIZE(vs); k++) {
3797 vsmap = vs->vs_dmap;
3804 offset += vm_page_size * VSCLSIZE(vs);
3808 for (k = 0; k < VSCLSIZE(vs); k++) {
3825 vstruct_t vs,
3834 VS_LOCK(vs); /* block all work on this vstruct */
3837 vs->vs_xfer_pending = TRUE;
3838 vs_wait_for_sync_writers(vs);
3839 vs_start_write(vs);
3840 vs_wait_for_readers(vs);
3841 /* we will unlock the vs to allow other writes while transferring */
3842 /* and will be guaranteed of the persistance of the vs struct */
3846 /* vs. Now that we are also supporting simple lock versions of */
3852 VS_UNLOCK(vs);
3854 if (vs->vs_indirect) {
3858 for (i = 0; i < INDIRECT_CLMAP_ENTRIES(vs->vs_size); i++) {
3859 vsmap = vs->vs_imap[i];
3864 VSCLSIZE(vs) * i);
3865 if(i+1 == INDIRECT_CLMAP_ENTRIES(vs->vs_size))
3866 vsmap_size = vs->vs_size - (CLMAP_ENTRIES * i);
3874 if(vs_cluster_transfer(vs,
3875 (vm_page_size * (j << vs->vs_clshift))
3877 vm_page_size << vs->vs_clshift,
3880 VS_LOCK(vs);
3881 vs->vs_xfer_pending = FALSE;
3882 VS_UNLOCK(vs);
3883 vs_finish_write(vs);
3887 VS_LOCK(vs);
3888 vs->vs_xfer_pending = FALSE;
3889 VS_UNLOCK(vs);
3890 vs_finish_write(vs);
3899 VS_LOCK(vs);
3900 vs->vs_xfer_pending = TRUE;
3901 vs_wait_for_sync_writers(vs);
3902 vs_start_write(vs);
3903 vs_wait_for_readers(vs);
3904 VS_UNLOCK(vs);
3905 if (!(vs->vs_indirect)) {
3911 vsmap = vs->vs_dmap;
3913 VS_LOCK(vs);
3914 vs->vs_xfer_pending = FALSE;
3915 VS_UNLOCK(vs);
3916 vs_finish_write(vs);
3920 for (j = 0; j < vs->vs_size; j++) {
3925 if(vs_cluster_transfer(vs,
3926 vm_page_size * (j << vs->vs_clshift),
3927 vm_page_size << vs->vs_clshift,
3929 VS_LOCK(vs);
3930 vs->vs_xfer_pending = FALSE;
3931 VS_UNLOCK(vs);
3932 vs_finish_write(vs);
3936 VS_LOCK(vs);
3937 vs->vs_xfer_pending = FALSE;
3938 VS_UNLOCK(vs);
3939 vs_finish_write(vs);
3940 VS_LOCK(vs);
3941 vs->vs_xfer_pending = TRUE;
3942 vs_wait_for_sync_writers(vs);
3943 vs_start_write(vs);
3944 vs_wait_for_readers(vs);
3945 VS_UNLOCK(vs);
3946 if (vs->vs_indirect) {
3952 VS_LOCK(vs);
3953 vs->vs_xfer_pending = FALSE;
3954 VS_UNLOCK(vs);
3955 vs_finish_write(vs);
3963 vstruct_t vs,
3969 cluster = atop_32(offset) >> vs->vs_clshift;
3970 if (vs->vs_indirect) {
3974 vsmap = vs->vs_imap[ind_block];
3978 vsmap = vs->vs_dmap;
3985 vstruct_t vs,
4033 vsmap_ptr = vs_get_map_entry(vs, offset);
4034 actual_offset = ps_clmap(vs, offset, &clmap, CL_FIND, 0, 0);
4045 clsize = vm_page_size << vs->vs_clshift;
4089 ps_clunmap(vs, offset, unavail_size);
4092 if((offset & ((vm_page_size << vs->vs_clshift) - 1))
4149 if(vs_cluster_write(vs, upl, offset,
4154 ps_clunmap(vs, offset, size);
4161 ((vm_page_size << vs->vs_clshift)
4168 ps_clunmap(vs, offset, size);
4173 ps_clunmap(vs, offset, size);
4195 ps_clunmap(vs, offset, size);
4210 ps_clunmap(vs, offset, size);
4557 * Monitor the amount of available backing store vs. the amount of