Lines Matching refs:wbp

430 	struct cl_writebehind *wbp;
434 if ((wbp = ubc->cl_wbehind) == NULL) {
439 MALLOC_ZONE(wbp, struct cl_writebehind *, sizeof *wbp, M_CLWRBEHIND, M_WAITOK);
441 bzero(wbp, sizeof *wbp);
442 lck_mtx_init(&wbp->cl_lockw, cl_mtx_grp, cl_mtx_attr);
447 ubc->cl_wbehind = wbp;
449 lck_mtx_destroy(&wbp->cl_lockw, cl_mtx_grp);
450 FREE_ZONE((void *)wbp, sizeof *wbp, M_CLWRBEHIND);
451 wbp = ubc->cl_wbehind;
456 lck_mtx_lock(&wbp->cl_lockw);
458 return (wbp);
465 struct cl_writebehind *wbp;
467 if ((wbp = cluster_get_wbp(vp, 0)) != NULL) {
469 if (wbp->cl_number) {
470 lck_mtx_lock(&wbp->cl_lockw);
472 cluster_try_push(wbp, vp, newEOF, PUSH_ALL | PUSH_SYNC, 0, callback, callback_arg);
474 lck_mtx_unlock(&wbp->cl_lockw);
2768 struct cl_writebehind *wbp;
3130 wbp = cluster_get_wbp(vp, CLW_ALLOCATE | CLW_RETURNLOCKED);
3132 if (wbp->cl_scmap) {
3139 sparse_cluster_add(&(wbp->cl_scmap), vp, &cl, newEOF, callback, callback_arg);
3141 lck_mtx_unlock(&wbp->cl_lockw);
3152 wbp->cl_number = 0;
3154 sparse_cluster_push(&(wbp->cl_scmap), vp, newEOF, PUSH_ALL, 0, callback, callback_arg);
3165 if (write_off == wbp->cl_last_write)
3166 wbp->cl_seq_written += write_cnt;
3168 wbp->cl_seq_written = write_cnt;
3170 wbp->cl_last_write = write_off + write_cnt;
3174 if (wbp->cl_number == 0)
3180 for (cl_index = 0; cl_index < wbp->cl_number; cl_index++) {
3188 if (cl.b_addr >= wbp->cl_clusters[cl_index].b_addr) {
3192 if (cl.e_addr <= (wbp->cl_clusters[cl_index].b_addr + max_cluster_pgcount)) {
3197 if (cl.e_addr > wbp->cl_clusters[cl_index].e_addr)
3201 wbp->cl_clusters[cl_index].e_addr = cl.e_addr;
3204 if (cl.b_addr < (wbp->cl_clusters[cl_index].b_addr + max_cluster_pgcount)) {
3215 wbp->cl_clusters[cl_index].e_addr = wbp->cl_clusters[cl_index].b_addr + max_cluster_pgcount;
3217 cl.b_addr = wbp->cl_clusters[cl_index].e_addr;
3231 if ((wbp->cl_clusters[cl_index].e_addr - cl.b_addr) <= max_cluster_pgcount) {
3238 wbp->cl_clusters[cl_index].b_addr = cl.b_addr;
3240 if (cl.e_addr > wbp->cl_clusters[cl_index].e_addr) {
3248 wbp->cl_clusters[cl_index].e_addr = cl.e_addr;
3262 if (cl.e_addr > wbp->cl_clusters[cl_index].e_addr - max_cluster_pgcount) {
3268 wbp->cl_clusters[cl_index].b_addr = wbp->cl_clusters[cl_index].e_addr - max_cluster_pgcount;
3270 cl.e_addr = wbp->cl_clusters[cl_index].b_addr;
3281 if (cl_index < wbp->cl_number)
3289 wbp->cl_number == MAX_CLUSTERS &&
3290 wbp->cl_seq_written >= (MAX_CLUSTERS * (max_cluster_pgcount * PAGE_SIZE))) {
3299 cluster_try_push(wbp, vp, newEOF, 0, 0, callback, callback_arg);
3301 if (wbp->cl_number < MAX_CLUSTERS) {
3326 ret_cluster_try_push = cluster_try_push(wbp, vp, newEOF, (flags & IO_NOCACHE) ? 0 : PUSH_DELAY, 0, callback, callback_arg);
3338 sparse_cluster_switch(wbp, vp, newEOF, callback, callback_arg);
3339 sparse_cluster_add(&(wbp->cl_scmap), vp, &cl, newEOF, callback, callback_arg);
3341 lck_mtx_unlock(&wbp->cl_lockw);
3346 wbp->cl_clusters[wbp->cl_number].b_addr = cl.b_addr;
3347 wbp->cl_clusters[wbp->cl_number].e_addr = cl.e_addr;
3349 wbp->cl_clusters[wbp->cl_number].io_flags = 0;
3352 wbp->cl_clusters[wbp->cl_number].io_flags |= CLW_IONOCACHE;
3355 wbp->cl_clusters[wbp->cl_number].io_flags |= CLW_IOPASSIVE;
3357 wbp->cl_number++;
3359 lck_mtx_unlock(&wbp->cl_lockw);
5017 struct cl_writebehind *wbp;
5027 if ((wbp = cluster_get_wbp(vp, CLW_RETURNLOCKED)) == NULL) {
5031 if (wbp->cl_number == 0 && wbp->cl_scmap == NULL) {
5032 lck_mtx_unlock(&wbp->cl_lockw);
5038 wbp->cl_scmap, wbp->cl_number, flags, 0, 0);
5047 while (wbp->cl_sparse_wait) {
5050 msleep((caddr_t)&wbp->cl_sparse_wait, &wbp->cl_lockw, PRIBIO + 1, "cluster_push_ext", NULL);
5056 wbp->cl_sparse_wait = 1;
5064 while (wbp->cl_sparse_pushes) {
5067 msleep((caddr_t)&wbp->cl_sparse_pushes, &wbp->cl_lockw, PRIBIO + 1, "cluster_push_ext", NULL);
5072 if (wbp->cl_scmap) {
5075 if (wbp->cl_sparse_pushes < SPARSE_PUSH_LIMIT) {
5077 scmap = wbp->cl_scmap;
5078 wbp->cl_scmap = NULL;
5080 wbp->cl_sparse_pushes++;
5082 lck_mtx_unlock(&wbp->cl_lockw);
5086 lck_mtx_lock(&wbp->cl_lockw);
5088 wbp->cl_sparse_pushes--;
5090 if (wbp->cl_sparse_wait && wbp->cl_sparse_pushes == 0)
5091 wakeup((caddr_t)&wbp->cl_sparse_pushes);
5093 sparse_cluster_push(&(wbp->cl_scmap), vp, ubc_getsize(vp), PUSH_ALL, flags | IO_PASSIVE, callback, callback_arg);
5097 retval = cluster_try_push(wbp, vp, ubc_getsize(vp), PUSH_ALL, flags | IO_PASSIVE, callback, callback_arg);
5099 lck_mtx_unlock(&wbp->cl_lockw);
5110 lck_mtx_lock(&wbp->cl_lockw);
5112 wbp->cl_sparse_wait = 0;
5113 wakeup((caddr_t)&wbp->cl_sparse_wait);
5115 lck_mtx_unlock(&wbp->cl_lockw);
5118 wbp->cl_scmap, wbp->cl_number, retval, 0, 0);
5127 struct cl_writebehind *wbp;
5130 if ((wbp = ubc->cl_wbehind)) {
5132 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 81)) | DBG_FUNC_START, ubc, wbp->cl_scmap, 0, 0, 0);
5134 if (wbp->cl_scmap)
5135 vfs_drt_control(&(wbp->cl_scmap), 0);
5142 if (wbp != NULL) {
5143 lck_mtx_destroy(&wbp->cl_lockw, cl_mtx_grp);
5144 FREE_ZONE((void *)wbp, sizeof *wbp, M_CLWRBEHIND);
5153 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 81)) | DBG_FUNC_END, ubc, rap, wbp, 0, 0);
5158 cluster_try_push(struct cl_writebehind *wbp, vnode_t vp, off_t EOF, int push_flag, int io_flags, int (*callback)(buf_t, void *), void *callback_arg)
5174 if (wbp->cl_number == 0)
5183 * and clear wbp->cl_number so that new clusters can
5186 for (cl_index = 0; cl_index < wbp->cl_number; cl_index++) {
5187 for (min_index = -1, cl_index1 = 0; cl_index1 < wbp->cl_number; cl_index1++) {
5188 if (wbp->cl_clusters[cl_index1].b_addr == wbp->cl_clusters[cl_index1].e_addr)
5192 else if (wbp->cl_clusters[cl_index1].b_addr < wbp->cl_clusters[min_index].b_addr)
5198 l_clusters[cl_index].b_addr = wbp->cl_clusters[min_index].b_addr;
5199 l_clusters[cl_index].e_addr = wbp->cl_clusters[min_index].e_addr;
5200 l_clusters[cl_index].io_flags = wbp->cl_clusters[min_index].io_flags;
5202 wbp->cl_clusters[min_index].b_addr = wbp->cl_clusters[min_index].e_addr;
5204 wbp->cl_number = 0;
5274 if ((MAX_CLUSTERS - wbp->cl_number) < (cl_len - cl_pushed)) {
5284 sparse_cluster_switch(wbp, vp, EOF, callback, callback_arg);
5289 wbp->cl_clusters[cl_index1].b_addr = l_clusters[cl_index].b_addr;
5290 wbp->cl_clusters[cl_index1].e_addr = l_clusters[cl_index].e_addr;
5291 wbp->cl_clusters[cl_index1].io_flags = l_clusters[cl_index].io_flags;
5298 wbp->cl_number = cl_index1;
5304 sparse_cluster_switch(wbp, vp, EOF, callback, callback_arg);
5310 * represented by wbp->cl_number
5312 for (cl_index = 0, cl_index1 = wbp->cl_number; cl_index < cl_len; cl_index++) {
5316 wbp->cl_clusters[cl_index1].b_addr = l_clusters[cl_index].b_addr;
5317 wbp->cl_clusters[cl_index1].e_addr = l_clusters[cl_index].e_addr;
5318 wbp->cl_clusters[cl_index1].io_flags = l_clusters[cl_index].io_flags;
5325 wbp->cl_number = cl_index1;
5328 return (MAX_CLUSTERS - wbp->cl_number);
5507 sparse_cluster_switch(struct cl_writebehind *wbp, vnode_t vp, off_t EOF, int (*callback)(buf_t, void *), void *callback_arg)
5511 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 78)) | DBG_FUNC_START, vp, wbp->cl_scmap, 0, 0, 0);
5513 for (cl_index = 0; cl_index < wbp->cl_number; cl_index++) {
5517 for (cl.b_addr = wbp->cl_clusters[cl_index].b_addr; cl.b_addr < wbp->cl_clusters[cl_index].e_addr; cl.b_addr++) {
5523 sparse_cluster_add(&(wbp->cl_scmap), vp, &cl, EOF, callback, callback_arg);
5528 wbp->cl_number = 0;
5530 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 78)) | DBG_FUNC_END, vp, wbp->cl_scmap, 0, 0, 0);