Lines Matching defs:order

56  * @vaddr: original vaddr return for the mapping and order in the lower bits
85 /* Allocate pages of size 1 << order with the given gfp_flags */
87 unsigned int order)
94 /* Don't set the __GFP_COMP flag for higher order allocations.
98 if (order)
103 p = alloc_pages_node(pool->nid, gfp_flags, order);
105 p->private = order;
114 if (order)
117 vaddr = dma_alloc_attrs(pool->dev, (1ULL << order) * PAGE_SIZE,
130 dma->vaddr = (unsigned long)vaddr | order;
139 /* Reset the caching and pages of size 1 << order */
141 unsigned int order, struct page *p)
152 set_pages_wb(p, 1 << order);
156 __free_pages(p, order);
160 if (order)
165 dma_free_attrs(pool->dev, (1UL << order) * PAGE_SIZE, vaddr, dma->addr,
173 gfp_t gfp_flags, unsigned int order,
192 if (bus_dmamap_create(dmat, (1ULL << order) * PAGE_SIZE, 1,
193 (1ULL << order) * PAGE_SIZE, 0, flags | dmaflags, &dma->map))
196 if (bus_dmamem_alloc_range(dmat, (1ULL << order) * PAGE_SIZE,
203 if (bus_dmamem_alloc(dmat, (1ULL << order) * PAGE_SIZE,
210 (1ULL << order) * PAGE_SIZE, flags)) {
233 unsigned int order, struct vm_page *p)
242 set_pages_wb(p, 1 << order);
278 /* Map pages of 1 << order size and fill the DMA address array */
279 static int ttm_pool_map(struct ttm_pool *pool, unsigned int order,
290 size_t size = (1ULL << order) * PAGE_SIZE;
297 for (i = 1 << order; i ; --i) {
305 /* Unmap pages of 1 << order size */
319 static int ttm_pool_map(struct ttm_pool *pool, unsigned int order,
329 for (i = 1 << order; i ; --i) {
347 unsigned int i, num_pages = 1 << pt->order;
364 atomic_long_add(1 << pt->order, &allocated_pages);
377 atomic_long_sub(1 << pt->order, &allocated_pages);
388 enum ttm_caching caching, unsigned int order)
392 pt->order = order;
413 ttm_pool_free_page(pt->pool, pt->caching, pt->order, p);
422 /* Return the pool_type to use for the given caching and order */
425 unsigned int order)
428 return &pool->caching[caching].orders[order];
434 return &pool->caching[caching].orders[order];
437 return &global_dma32_write_combined[order];
439 return &global_write_combined[order];
442 return &pool->caching[caching].orders[order];
445 return &global_dma32_uncached[order];
447 return &global_uncached[order];
470 ttm_pool_free_page(pt->pool, pt->caching, pt->order, p);
471 num_pages = 1 << pt->order;
481 /* Return the allocation order based for a page */
496 static int ttm_pool_page_allocated(struct ttm_pool *pool, unsigned int order,
506 r = ttm_pool_map(pool, order, p, dma_addr);
511 *num_pages -= 1 << order;
512 for (i = 1 << order; i; --i, ++(*pages), ++p, ++(*orders)) {
514 **orders = order;
538 unsigned int order;
544 order = tt->orders[i];
545 nr = (1UL << order);
549 pt = ttm_pool_select_type(pool, caching, order);
553 ttm_pool_free_page(pool, caching, order, *pages);
579 unsigned int order;
600 for (order = min_t(unsigned int, MAX_ORDER, __fls(num_pages));
602 order = min_t(unsigned int, order, __fls(num_pages))) {
606 pt = ttm_pool_select_type(pool, tt->caching, order);
616 r = ttm_pool_page_allocated(pool, order, p,
624 if (num_pages < (1 << order))
632 while (num_pages >= (1 << order) &&
633 (p = ttm_pool_alloc_page(pool, gfp_flags, order, tt->dmat))) {
642 r = ttm_pool_page_allocated(pool, order, p, &dma_addr,
651 if (order) {
652 --order;
667 ttm_pool_free_page(pool, page_caching, order, p);
810 /* Print a nice header for the order */