Lines Matching refs:dirty

31  * Different methods for tracking dirty:
32 * VMW_BO_DIRTY_PAGETABLE - Scan the pagetable for hardware dirty bits
43 * similarly a certain percentage of dirty pages trigger a transition to
54 * @start: First currently dirty bit
55 * @end: Last currently dirty bit + 1
56 * @method: The currently used dirty method
62 * dirty page.
75 * vmw_bo_dirty_scan_pagetable - Perform a pagetable scan for dirty bits
78 * Scans the pagetable for dirty bits. Clear those bits and modify the
79 * dirty structure with the results. This function may change the
80 * dirty-tracking method.
84 struct vmw_bo_dirty *dirty = vbo->dirty;
91 offset, dirty->bitmap_size,
92 offset, &dirty->bitmap[0],
93 &dirty->start, &dirty->end);
95 dirty->change_count++;
97 dirty->change_count = 0;
99 if (dirty->change_count > VMW_DIRTY_NUM_CHANGE_TRIGGERS) {
100 dirty->change_count = 0;
101 dirty->method = VMW_BO_DIRTY_MKWRITE;
103 offset, dirty->bitmap_size);
105 offset, dirty->bitmap_size,
106 offset, &dirty->bitmap[0],
107 &dirty->start, &dirty->end);
112 * vmw_bo_dirty_scan_mkwrite - Reset the mkwrite dirty-tracking method
118 * This function may change the dirty-tracking method.
122 struct vmw_bo_dirty *dirty = vbo->dirty;
127 if (dirty->end <= dirty->start)
131 dirty->start + offset,
132 dirty->end - dirty->start);
134 if (100UL * num_marked / dirty->bitmap_size >
136 dirty->change_count++;
138 dirty->change_count = 0;
140 if (dirty->change_count > VMW_DIRTY_NUM_CHANGE_TRIGGERS) {
142 pgoff_t end = dirty->bitmap_size;
144 dirty->method = VMW_BO_DIRTY_PAGETABLE;
146 &dirty->bitmap[0],
148 bitmap_clear(&dirty->bitmap[0], 0, dirty->bitmap_size);
149 if (dirty->start < dirty->end)
150 bitmap_set(&dirty->bitmap[0], dirty->start,
151 dirty->end - dirty->start);
152 dirty->change_count = 0;
157 * vmw_bo_dirty_scan - Scan for dirty pages and add them to the dirty
161 * This function may change the dirty tracking method.
165 struct vmw_bo_dirty *dirty = vbo->dirty;
167 if (dirty->method == VMW_BO_DIRTY_PAGETABLE)
174 * vmw_bo_dirty_pre_unmap - write-protect and pick up dirty pages before
180 * If we're using the _PAGETABLE scan method, we may leak dirty pages
182 * up all dirty pages.
187 struct vmw_bo_dirty *dirty = vbo->dirty;
191 if (dirty->method != VMW_BO_DIRTY_PAGETABLE || start >= end)
197 &dirty->bitmap[0], &dirty->start,
198 &dirty->end);
221 * vmw_bo_dirty_add - Add a dirty-tracking user to a buffer object
224 * This function registers a dirty-tracking user to a buffer object.
232 struct vmw_bo_dirty *dirty = vbo->dirty;
237 if (dirty) {
238 dirty->ref_count++;
242 size = sizeof(*dirty) + BITS_TO_LONGS(num_pages) * sizeof(long);
243 dirty = kvzalloc(size, GFP_KERNEL);
244 if (!dirty) {
249 dirty->bitmap_size = num_pages;
250 dirty->start = dirty->bitmap_size;
251 dirty->end = 0;
252 dirty->ref_count = 1;
254 dirty->method = VMW_BO_DIRTY_PAGETABLE;
259 dirty->method = VMW_BO_DIRTY_MKWRITE;
261 /* Write-protect and then pick up already dirty bits */
265 &dirty->bitmap[0],
266 &dirty->start, &dirty->end);
269 vbo->dirty = dirty;
278 * vmw_bo_dirty_release - Release a dirty-tracking user from a buffer object
281 * This function releases a dirty-tracking user from a buffer object.
282 * If the reference count reaches zero, then the dirty-tracking object is
289 struct vmw_bo_dirty *dirty = vbo->dirty;
291 if (dirty && --dirty->ref_count == 0) {
292 kvfree(dirty);
293 vbo->dirty = NULL;
298 * vmw_bo_dirty_transfer_to_res - Pick up a resource's dirty region from
302 * This function will pick up all dirty ranges affecting the resource from
305 * dirty tracking.
310 struct vmw_bo_dirty *dirty = vbo->dirty;
319 if (res_start >= dirty->end || res_end <= dirty->start)
322 cur = max(res_start, dirty->start);
323 res_end = max(res_end, dirty->end);
327 start = find_next_bit(&dirty->bitmap[0], res_end, cur);
331 end = find_next_zero_bit(&dirty->bitmap[0], res_end, start + 1);
334 bitmap_clear(&dirty->bitmap[0], start, num);
338 if (res_start <= dirty->start && res_end > dirty->start)
339 dirty->start = res_end;
340 if (res_start < dirty->end && res_end >= dirty->end)
341 dirty->end = res_start;
345 * vmw_bo_dirty_clear_res - Clear a resource's dirty region from
349 * This function will clear all dirty ranges affecting the resource from
350 * it's backup mob's dirty tracking.
357 struct vmw_bo_dirty *dirty = vbo->dirty;
362 if (res_start >= dirty->end || res_end <= dirty->start)
365 res_start = max(res_start, dirty->start);
366 res_end = min(res_end, dirty->end);
367 bitmap_clear(&dirty->bitmap[0], res_start, res_end - res_start);
369 if (res_start <= dirty->start && res_end > dirty->start)
370 dirty->start = res_end;
371 if (res_start < dirty->end && res_end >= dirty->end)
372 dirty->end = res_start;
402 if (vbo->dirty && vbo->dirty->method == VMW_BO_DIRTY_MKWRITE &&
403 !test_bit(page_offset, &vbo->dirty->bitmap[0])) {
404 struct vmw_bo_dirty *dirty = vbo->dirty;
406 __set_bit(page_offset, &dirty->bitmap[0]);
407 dirty->start = min(dirty->start, page_offset);
408 dirty->end = max(dirty->end, page_offset + 1);
433 if (vbo->dirty) {
451 * If we don't track dirty using the MKWRITE method, make sure
455 if (vbo->dirty && vbo->dirty->method == VMW_BO_DIRTY_MKWRITE)