Lines Matching defs:pages

19 				 struct sg_table *pages)
31 /* Make the pages coherent with the GPU (flushing any swapin). */
36 drm_clflush_sg(pages);
40 obj->mm.get_page.sg_pos = pages->sgl;
42 obj->mm.get_dma_page.sg_pos = pages->sgl;
45 obj->mm.pages = pages;
47 obj->mm.page_sizes.phys = i915_sg_dma_sizes(pages->sgl);
55 * 64K or 4K pages, although in practice this will depend on a number of
116 /* Ensure that the associated pages are gathered from the backing storage
119 * i915_gem_object_unpin_pages() - once the pages are no longer referenced
120 * either as a result of memory pressure (reaping pages under the shrinker)
211 struct sg_table *pages;
215 pages = fetch_and_zero(&obj->mm.pages);
216 if (IS_ERR_OR_NULL(pages))
217 return pages;
235 return pages;
240 struct sg_table *pages;
255 pages = __i915_gem_object_unset_pages(obj);
259 * NULL pages. In the future, when we have more asynchronous
263 if (!IS_ERR_OR_NULL(pages))
264 obj->ops->put_pages(obj, pages);
274 struct page *stack[32], **pages = stack, *page;
286 * vmap) to provide virtual mappings of the high pages.
301 if (n_pages == 1 && !PageHighMem(sg_page(obj->mm.pages->sgl)))
302 return page_address(sg_page(obj->mm.pages->sgl));
312 pages = kvmalloc_array(n_pages, sizeof(*pages), GFP_KERNEL);
313 if (!pages)
318 for_each_sgt_page(page, iter, obj->mm.pages)
319 pages[i++] = page;
320 vaddr = vmap(pages, n_pages, 0, pgprot);
321 if (pages != stack)
322 kvfree(pages);
348 for_each_sgt_daddr(addr, iter, obj->mm.pages)
357 /* get, pin, and map the pages of the object into kernel space */
401 * pages should be allocated and mapped as write-combined only.
504 * We allow removing the mapping from underneath pinned pages!
560 * individual pages from this range, cancel updating the