Lines Matching refs:vs

226 	vstruct_t	 vs,
232 vstruct_t vs,
237 vstruct_t vs,
454 port is currently backed with a vs structure in the alias field
601 alias_struct->vs = (struct vstruct *)bs;
701 vstruct_t vs;
705 VSL_LOCK(); /* get the lock on the list of vs's */
717 /* lock and the vs locks are not being held by bumping the */
727 vs = (vstruct_t) queue_first((queue_entry_t)&(vstruct_list.vsl_queue));
728 if(vs == (vstruct_t)&vstruct_list) {
732 VS_LOCK(vs);
733 vs_async_wait(vs); /* wait for any pending async writes */
734 if ((vs_count != 0) && (vs != NULL))
735 vs->vs_async_pending += 1; /* hold parties calling */
741 VS_UNLOCK(vs);
743 while((vs_count != 0) && (vs != NULL)) {
779 vs, ps, upl);
788 VS_LOCK(vs);
789 vs->vs_async_pending -= 1; /* release vs_async_wait */
790 if (vs->vs_async_pending == 0 && vs->vs_waiting_async) {
791 vs->vs_waiting_async = FALSE;
792 VS_UNLOCK(vs);
793 thread_wakeup(&vs->vs_async_pending);
795 VS_UNLOCK(vs);
807 next_vs = (vstruct_t) queue_next(&(vs->vs_links));
809 (vs != next_vs) && (vs_count != 1)) {
818 VS_LOCK(vs);
819 vs->vs_async_pending -= 1;
820 if (vs->vs_async_pending == 0 && vs->vs_waiting_async) {
821 vs->vs_waiting_async = FALSE;
822 VS_UNLOCK(vs);
823 thread_wakeup(&vs->vs_async_pending);
825 VS_UNLOCK(vs);
827 if((vs == next_vs) || (next_vs == (vstruct_t)&vstruct_list))
828 vs = NULL;
830 vs = next_vs;
1237 alias_struct->vs = (struct vstruct *)vsa;
1289 alias_struct->vs = reply_port;
1333 vstruct_t vs;
1336 vs = (vstruct_t) zalloc(vstruct_zone);
1337 if (vs == VSTRUCT_NULL) {
1341 VS_LOCK_INIT(vs);
1346 vs->vs_pager_ops = NULL;
1347 vs->vs_control = MEMORY_OBJECT_CONTROL_NULL;
1348 vs->vs_references = 1;
1349 vs->vs_seqno = 0;
1351 vs->vs_waiting_seqno = FALSE;
1352 vs->vs_waiting_read = FALSE;
1353 vs->vs_waiting_write = FALSE;
1354 vs->vs_waiting_async = FALSE;
1356 vs->vs_readers = 0;
1357 vs->vs_writers = 0;
1359 vs->vs_errors = 0;
1361 vs->vs_clshift = local_log2(bs_get_global_clsize(0));
1362 vs->vs_size = ((atop_32(round_page_32(size)) - 1) >> vs->vs_clshift) + 1;
1363 vs->vs_async_pending = 0;
1369 if (INDIRECT_CLMAP(vs->vs_size)) {
1370 vs->vs_imap = (struct vs_map **)
1371 kalloc(INDIRECT_CLMAP_SIZE(vs->vs_size));
1372 vs->vs_indirect = TRUE;
1374 vs->vs_dmap = (struct vs_map *)
1375 kalloc(CLMAP_SIZE(vs->vs_size));
1376 vs->vs_indirect = FALSE;
1378 vs->vs_xfer_pending = FALSE;
1380 ("map=0x%x, indirect=%d\n", (int) vs->vs_dmap, vs->vs_indirect));
1385 if (!vs->vs_dmap) {
1386 kfree(vs, sizeof *vs);
1393 if (vs->vs_indirect)
1394 memset(vs->vs_imap, 0,
1395 INDIRECT_CLMAP_SIZE(vs->vs_size));
1397 for (i = 0; i < vs->vs_size; i++)
1398 VSM_CLR(vs->vs_dmap[i]);
1400 VS_MAP_LOCK_INIT(vs);
1402 bs_commit(vs->vs_size);
1404 return vs;
1588 vstruct_t vs,
1609 * dp_memory_object calls which rely on the vs lock, or by
1617 * data, a vstruct, and backing store are protected by the vs lock.
1648 } else if ((ps = ps_select_segment(vs->vs_clshift, psindex)) ==
1852 vstruct_t vs)
1857 VS_MAP_LOCK(vs);
1868 if (vs->vs_indirect) {
1869 for (i = 0; i < INDIRECT_CLMAP_ENTRIES(vs->vs_size); i++) {
1870 if (vs->vs_imap[i] != NULL) {
1871 ps_dealloc_vsmap(vs->vs_imap[i], CLMAP_ENTRIES);
1872 kfree(vs->vs_imap[i], CLMAP_THRESHOLD);
1875 kfree(vs->vs_imap, INDIRECT_CLMAP_SIZE(vs->vs_size));
1880 ps_dealloc_vsmap(vs->vs_dmap, vs->vs_size);
1881 kfree(vs->vs_dmap, CLMAP_SIZE(vs->vs_size));
1883 VS_MAP_UNLOCK(vs);
1885 bs_commit(- vs->vs_size);
1887 VS_MAP_LOCK_DESTROY(vs);
1889 zfree(vstruct_zone, vs);
1894 vstruct_t vs,
1906 VS_MAP_LOCK(vs);
1912 fault_info.hi_offset = ptoa_32(vs->vs_size << vs->vs_clshift);
1925 if (vs->vs_indirect) {
1927 for (i = 0; i < INDIRECT_CLMAP_ENTRIES(vs->vs_size); i++) {
1928 vsmap = vs->vs_imap[i];
1933 VSCLSIZE(vs) * i);
1934 if (i+1 == INDIRECT_CLMAP_ENTRIES(vs->vs_size))
1935 vsmap_size = vs->vs_size - (CLMAP_ENTRIES * i);
1944 clmap_off += vm_page_size * VSCLSIZE(vs);
1947 VS_MAP_UNLOCK(vs);
1949 vs,
1953 VS_MAP_LOCK(vs); /* XXX what if it changed ? */
1963 vs->vs_imap[i] = NULL;
1967 // kfree(vs->vs_imap, INDIRECT_CLMAP_SIZE(vs->vs_size));
1973 vsmap = vs->vs_dmap;
1980 for (j = 0; j < vs->vs_size;) {
1986 clmap_off = vm_page_size * (j << vs->vs_clshift);
1987 VS_MAP_UNLOCK(vs);
1989 vs,
1993 VS_MAP_LOCK(vs); /* XXX what if it changed ? */
2002 ps_dealloc_vsmap(vs->vs_dmap, vs->vs_size);
2003 // kfree(vs->vs_dmap, CLMAP_SIZE(vs->vs_size));
2007 VS_MAP_UNLOCK(vs);
2013 vstruct_t vs,
2023 if (vs->vs_size >= new_size) {
2052 old_map_size = INDIRECT_CLMAP_SIZE(vs->vs_size);
2053 if (vs->vs_indirect &&
2055 bs_commit(new_size - vs->vs_size);
2056 vs->vs_size = new_size;
2066 if (vs->vs_indirect) {
2068 memcpy(new_imap, vs->vs_imap, old_map_size);
2070 old_map = (void *) vs->vs_imap;
2099 old_map = (void *) vs->vs_dmap;
2100 old_map_size = CLMAP_SIZE(vs->vs_size);
2103 memcpy(new_dmap, vs->vs_dmap, old_map_size);
2106 for (i = vs->vs_size; i < newdsize; i++)
2110 vs->vs_imap = new_imap;
2111 vs->vs_indirect = TRUE;
2113 vs->vs_dmap = new_dmap;
2114 bs_commit(new_size - vs->vs_size);
2115 vs->vs_size = new_size;
2123 vstruct_t vs,
2136 VS_MAP_LOCK(vs);
2138 ASSERT(vs->vs_dmap);
2139 cluster = atop_32(offset) >> vs->vs_clshift;
2149 if (cluster >= vs->vs_size) {
2152 VS_MAP_UNLOCK(vs);
2155 if (ps_map_extend(vs, cluster + 1)) {
2156 VS_MAP_UNLOCK(vs);
2171 if (vs->vs_indirect) {
2175 vsmap = vs->vs_imap[ind_block];
2178 VS_MAP_UNLOCK(vs);
2185 VS_MAP_UNLOCK(vs);
2191 vs->vs_imap[ind_block] = vsmap;
2194 vsmap = vs->vs_dmap;
2209 VS_MAP_UNLOCK(vs);
2223 VS_MAP_UNLOCK(vs);
2229 newcl = ps_allocate_cluster(vs, &psindex,
2232 VS_MAP_UNLOCK(vs);
2246 clmap->cl_numpages = VSCLSIZE(vs);
2255 newcl = ptoa_32(newcl) << vs->vs_clshift;
2256 newoff = offset & ((1<<(vm_page_shift + vs->vs_clshift)) - 1);
2263 while ((size > 0) && (i < VSCLSIZE(vs))) {
2276 CLMAP_SHIFT(clmap, vs);
2277 CLMAP_SHIFTALLOC(clmap, vs);
2295 for (i = VSCLSIZE(vs) - clmap->cl_numpages; size > 0;
2300 ASSERT(i <= VSCLSIZE(vs));
2314 VS_MAP_UNLOCK(vs);
2317 VS_MAP_UNLOCK(vs);
2320 ("returning 0x%X,vs=0x%X,vsmap=0x%X,flag=%d\n",
2321 newcl+newoff, (int) vs, (int) vsmap, flag));
2334 vstruct_t vs,
2344 VS_MAP_LOCK(vs);
2354 cluster = atop_32(offset) >> vs->vs_clshift;
2355 if (vs->vs_indirect) /* indirect map */
2356 vsmap = vs->vs_imap[cluster/CLMAP_ENTRIES];
2358 vsmap = vs->vs_dmap;
2361 VS_MAP_UNLOCK(vs);
2376 if ( (newoff = (offset&((1<<(vm_page_shift+vs->vs_clshift))-1))) ) {
2384 while ((i < VSCLSIZE(vs)) && (length > 0)) {
2398 vs->vs_clshift,
2399 VSCLSIZE(vs) * vm_page_size);
2409 VS_MAP_UNLOCK(vs);
2416 vstruct_t vs,
2433 (void) ps_clmap(vs, offset, &clmap, CL_FIND, size, error);
2440 vstruct_t vs,
2463 ps_vs_write_complete(vs, offset, size, error);
2466 VS_LOCK(vs);
2467 ASSERT(vs->vs_async_pending > 0);
2468 vs->vs_async_pending -= size;
2469 if (vs->vs_async_pending == 0 && vs->vs_waiting_async) {
2470 vs->vs_waiting_async = FALSE;
2471 VS_UNLOCK(vs);
2472 thread_wakeup(&vs->vs_async_pending);
2474 VS_UNLOCK(vs);
2491 ((struct vstruct_alias *)(reply_port->alias))->vs;
2541 ((struct vstruct_alias *)(reply_port->alias))->vs;
2853 __unused vstruct_t vs,
2872 ps_clunmap(vs, offset, size);
2893 vstruct_t vs,
2921 pages_in_cl = 1 << vs->vs_clshift;
2956 if ((ps_clmap(vs, vs_offset & ~cl_mask, &clmap, CL_FIND, 0, 0) == (dp_offset_t)-1) ||
2985 memory_object_super_upl_request(vs->vs_control, (memory_object_offset_t)vs_offset,
3049 if (memory_object_cluster_size(vs->vs_control, &cluster_start, &cluster_length, &io_streaming, (memory_object_fault_info_t)fault_info) == KERN_SUCCESS) {
3096 ps_offset[seg_index] = ps_clmap(vs, vs_offset & ~cl_mask, &clmap, CL_FIND, 0, 0);
3187 ps_offset[seg_index] = ps_clmap(vs, cur_offset & ~cl_mask, &clmap, CL_FIND, 0, 0);
3234 memory_object_super_upl_request(vs->vs_control, (memory_object_offset_t)vs_offset,
3264 pvs_object_data_provided(vs, upl, vs_offset, xfer_size);
3294 pvs_object_data_provided(vs, upl, vs_offset, lsize);
3329 vstruct_t vs,
3376 pages_in_cl = 1 << vs->vs_clshift;
3420 memory_object_super_upl_request(vs->vs_control,
3468 ps_clmap(vs,
3609 vs,
3632 assert(cnt <= (unsigned) (vm_page_size << vs->vs_clshift));
3640 /* from the offset in the vs in question */
3645 actual_offset = ps_clmap(vs, mobj_target_addr,
3659 ps_vs_write_complete(vs, mobj_target_addr,
3686 vstruct_t vs)
3693 if (vs->vs_indirect) {
3695 for (i = 0; i < INDIRECT_CLMAP_ENTRIES(vs->vs_size); i++) {
3696 vsmap = vs->vs_imap[i];
3705 for (k = 0; k < VSCLSIZE(vs); k++) {
3712 vsmap = vs->vs_dmap;
3721 for (k = 0; k < VSCLSIZE(vs); k++) {
3733 vstruct_t vs,
3744 if (vs->vs_indirect) {
3746 for (i = 0; i < INDIRECT_CLMAP_ENTRIES(vs->vs_size); i++) {
3747 vsmap = vs->vs_imap[i];
3750 VSCLSIZE(vs));
3757 offset += vm_page_size * VSCLSIZE(vs);
3761 for (k = 0; k < VSCLSIZE(vs); k++) {
3773 vsmap = vs->vs_dmap;
3780 offset += vm_page_size * VSCLSIZE(vs);
3784 for (k = 0; k < VSCLSIZE(vs); k++) {
3801 vstruct_t vs,
3810 VS_LOCK(vs); /* block all work on this vstruct */
3813 vs->vs_xfer_pending = TRUE;
3814 vs_wait_for_sync_writers(vs);
3815 vs_start_write(vs);
3816 vs_wait_for_readers(vs);
3817 /* we will unlock the vs to allow other writes while transferring */
3818 /* and will be guaranteed of the persistance of the vs struct */
3822 /* vs. Now that we are also supporting simple lock versions of */
3828 VS_UNLOCK(vs);
3830 if (vs->vs_indirect) {
3834 for (i = 0; i < INDIRECT_CLMAP_ENTRIES(vs->vs_size); i++) {
3835 vsmap = vs->vs_imap[i];
3840 VSCLSIZE(vs) * i);
3841 if(i+1 == INDIRECT_CLMAP_ENTRIES(vs->vs_size))
3842 vsmap_size = vs->vs_size - (CLMAP_ENTRIES * i);
3850 if(vs_cluster_transfer(vs,
3851 (vm_page_size * (j << vs->vs_clshift))
3853 vm_page_size << vs->vs_clshift,
3856 VS_LOCK(vs);
3857 vs->vs_xfer_pending = FALSE;
3858 VS_UNLOCK(vs);
3859 vs_finish_write(vs);
3863 VS_LOCK(vs);
3864 vs->vs_xfer_pending = FALSE;
3865 VS_UNLOCK(vs);
3866 vs_finish_write(vs);
3875 VS_LOCK(vs);
3876 vs->vs_xfer_pending = TRUE;
3877 vs_wait_for_sync_writers(vs);
3878 vs_start_write(vs);
3879 vs_wait_for_readers(vs);
3880 VS_UNLOCK(vs);
3881 if (!(vs->vs_indirect)) {
3887 vsmap = vs->vs_dmap;
3889 VS_LOCK(vs);
3890 vs->vs_xfer_pending = FALSE;
3891 VS_UNLOCK(vs);
3892 vs_finish_write(vs);
3896 for (j = 0; j < vs->vs_size; j++) {
3901 if(vs_cluster_transfer(vs,
3902 vm_page_size * (j << vs->vs_clshift),
3903 vm_page_size << vs->vs_clshift,
3905 VS_LOCK(vs);
3906 vs->vs_xfer_pending = FALSE;
3907 VS_UNLOCK(vs);
3908 vs_finish_write(vs);
3912 VS_LOCK(vs);
3913 vs->vs_xfer_pending = FALSE;
3914 VS_UNLOCK(vs);
3915 vs_finish_write(vs);
3916 VS_LOCK(vs);
3917 vs->vs_xfer_pending = TRUE;
3918 vs_wait_for_sync_writers(vs);
3919 vs_start_write(vs);
3920 vs_wait_for_readers(vs);
3921 VS_UNLOCK(vs);
3922 if (vs->vs_indirect) {
3928 VS_LOCK(vs);
3929 vs->vs_xfer_pending = FALSE;
3930 VS_UNLOCK(vs);
3931 vs_finish_write(vs);
3939 vstruct_t vs,
3945 cluster = atop_32(offset) >> vs->vs_clshift;
3946 if (vs->vs_indirect) {
3950 vsmap = vs->vs_imap[ind_block];
3954 vsmap = vs->vs_dmap;
3961 vstruct_t vs,
4009 vsmap_ptr = vs_get_map_entry(vs, offset);
4010 actual_offset = ps_clmap(vs, offset, &clmap, CL_FIND, 0, 0);
4021 clsize = vm_page_size << vs->vs_clshift;
4065 ps_clunmap(vs, offset, unavail_size);
4068 if((offset & ((vm_page_size << vs->vs_clshift) - 1))
4125 if(vs_cluster_write(vs, upl, offset,
4130 ps_clunmap(vs, offset, size);
4137 ((vm_page_size << vs->vs_clshift)
4144 ps_clunmap(vs, offset, size);
4149 ps_clunmap(vs, offset, size);
4171 ps_clunmap(vs, offset, size);
4186 ps_clunmap(vs, offset, size);
4561 * Monitor the amount of available backing store vs. the amount of