Lines Matching refs:pool

27  * A simple DMA pool losely based on dmapool.c. It has certain advantages
79 * The pool structure. There are usually six pools:
87 * @type: Type of the pool
89 * used with irqsave/irqrestore variants because pool allocator maybe called
98 * @nfrees: Stats when pool is shrinking.
99 * @nrefills: Stats when the pool is grown.
101 * @name: Name of the pool.
138 * Limits for the pool. They are handled without locks because only place where
153 * @dev: The 'struct device' associated with the 'pool'
154 * @pool: The 'struct dma_pool' associated with the 'dev'
159 struct dma_pool *pool;
167 * @options: Limits for the pool.
308 static int ttm_set_pages_caching(struct dma_pool *pool,
313 if (pool->type & IS_UC) {
317 pool->dev_name, cpages);
319 if (pool->type & IS_WC) {
323 pool->dev_name, cpages);
328 static void __ttm_dma_free_page(struct dma_pool *pool, struct dma_page *d_page)
331 dma_free_coherent(pool->dev, pool->size, d_page->vaddr, dma);
336 static struct dma_page *__ttm_dma_alloc_page(struct dma_pool *pool)
344 d_page->vaddr = dma_alloc_coherent(pool->dev, pool->size,
346 pool->gfp_flags);
371 static void ttm_pool_update_free_locked(struct dma_pool *pool,
374 pool->npages_free -= freed_pages;
375 pool->nfrees += freed_pages;
380 static void ttm_dma_pages_put(struct dma_pool *pool, struct list_head *d_pages,
385 /* Don't set WB on WB page pool. */
386 if (npages && !(pool->type & IS_CACHED) &&
389 pool->dev_name, npages);
393 __ttm_dma_free_page(pool, d_page);
397 static void ttm_dma_page_put(struct dma_pool *pool, struct dma_page *d_page)
399 /* Don't set WB on WB page pool. */
400 if (!(pool->type & IS_CACHED) && set_pages_array_wb(&d_page->p, 1))
402 pool->dev_name, 1);
405 __ttm_dma_free_page(pool, d_page);
409 * Free pages from pool.
414 * @pool: to free the pages from
415 * @nr_free: If set to true will free all pages in pool
417 static unsigned ttm_dma_page_pool_free(struct dma_pool *pool, unsigned nr_free)
431 pool->dev_name, pool->name, current->pid,
439 pr_err("%s: Failed to allocate memory for pool free operation\n",
440 pool->dev_name);
445 spin_lock_irqsave(&pool->lock, irq_flags);
448 list_for_each_entry_safe_reverse(dma_p, tmp, &pool->free_list,
460 ttm_pool_update_free_locked(pool, freed_pages);
463 * we unlock the pool to prevent stalling.
465 spin_unlock_irqrestore(&pool->lock, irq_flags);
467 ttm_dma_pages_put(pool, &d_pages, pages_to_free,
495 /* remove range of pages from the pool */
497 ttm_pool_update_free_locked(pool, freed_pages);
501 spin_unlock_irqrestore(&pool->lock, irq_flags);
504 ttm_dma_pages_put(pool, &d_pages, pages_to_free, freed_pages);
513 struct dma_pool *pool;
522 pool = p->pool;
523 if (pool->type != type)
531 list_for_each_entry_reverse(pool, &dev->dma_pools, pools) {
532 if (pool->type != type)
535 ttm_dma_page_pool_free(pool, FREE_ALL_PAGES);
536 WARN_ON(((pool->npages_in_use + pool->npages_free) != 0));
541 list_del(&pool->pools);
542 kfree(pool);
550 * Albeit the pool might have already been freed earlier.
554 struct dma_pool *pool = *(struct dma_pool **)res;
556 if (pool)
557 ttm_dma_free_pool(dev, pool->type);
571 struct dma_pool *pool = NULL, **ptr;
585 pool = kmalloc_node(sizeof(struct dma_pool), GFP_KERNEL,
587 if (!pool)
597 sec_pool->pool = pool;
599 INIT_LIST_HEAD(&pool->free_list);
600 INIT_LIST_HEAD(&pool->inuse_list);
601 INIT_LIST_HEAD(&pool->pools);
602 spin_lock_init(&pool->lock);
603 pool->dev = dev;
604 pool->npages_free = pool->npages_in_use = 0;
605 pool->nfrees = 0;
606 pool->gfp_flags = flags;
607 pool->size = PAGE_SIZE;
608 pool->type = type;
609 pool->nrefills = 0;
610 p = pool->name;
613 p += snprintf(p, sizeof(pool->name) - (p - pool->name),
620 snprintf(pool->dev_name, sizeof(pool->dev_name), "%s %s",
627 list_add(&pool->pools, &dev->dma_pools);
630 *ptr = pool;
633 return pool;
637 kfree(pool);
644 struct dma_pool *pool, *tmp, *found = NULL;
660 list_for_each_entry_safe(pool, tmp, &dev->dma_pools, pools) {
661 if (pool->type != type)
663 found = pool;
672 * pool.
674 static void ttm_dma_handle_caching_state_failure(struct dma_pool *pool,
692 __ttm_dma_free_page(pool, d_page);
707 static int ttm_dma_pool_alloc_new_pages(struct dma_pool *pool,
724 pool->dev_name);
730 pool->dev_name, pool->name, current->pid, count);
734 dma_p = __ttm_dma_alloc_page(pool);
737 pool->dev_name, i);
739 /* store already allocated pages in the pool after
742 r = ttm_set_pages_caching(pool, caching_array,
746 pool, d_pages, caching_array,
763 r = ttm_set_pages_caching(pool, caching_array,
767 pool, d_pages, caching_array,
778 r = ttm_set_pages_caching(pool, caching_array, cpages);
780 ttm_dma_handle_caching_state_failure(pool, d_pages,
791 static int ttm_dma_page_pool_fill_locked(struct dma_pool *pool,
795 int r = pool->npages_free;
797 if (count > pool->npages_free) {
802 spin_unlock_irqrestore(&pool->lock, *irq_flags);
806 r = ttm_dma_pool_alloc_new_pages(pool, &d_pages, count);
808 spin_lock_irqsave(&pool->lock, *irq_flags);
811 list_splice(&d_pages, &pool->free_list);
812 ++pool->nrefills;
813 pool->npages_free += count;
819 pr_err("%s: Failed to fill %s pool (r:%d)!\n",
820 pool->dev_name, pool->name, r);
825 list_splice_tail(&d_pages, &pool->free_list);
826 pool->npages_free += cpages;
838 static int ttm_dma_pool_get_pages(struct dma_pool *pool,
847 spin_lock_irqsave(&pool->lock, irq_flags);
848 count = ttm_dma_page_pool_fill_locked(pool, &irq_flags);
850 d_page = list_first_entry(&pool->free_list, struct dma_page, page_list);
855 pool->npages_in_use += 1;
856 pool->npages_free -= 1;
858 spin_unlock_irqrestore(&pool->lock, irq_flags);
870 struct dma_pool *pool;
887 pool = ttm_dma_find_pool(dev, type);
888 if (!pool) {
889 pool = ttm_dma_pool_init(dev, gfp_flags, type);
890 if (IS_ERR_OR_NULL(pool)) {
897 ret = ttm_dma_pool_get_pages(pool, ttm_dma, i);
932 total += p->pool->npages_free;
937 /* Put all pages in pages list to correct pool to wait for reuse */
941 struct dma_pool *pool;
949 pool = ttm_dma_find_pool(dev, type);
950 if (!pool)
953 is_cached = (ttm_dma_find_pool(pool->dev,
954 ttm_to_type(ttm->page_flags, tt_cached)) == pool);
962 spin_lock_irqsave(&pool->lock, irq_flags);
963 pool->npages_in_use -= count;
965 pool->nfrees += count;
967 pool->npages_free += count;
968 list_splice(&ttm_dma->pages_list, &pool->free_list);
970 if (pool->npages_free > _manager->options.max_size) {
971 npages = pool->npages_free - _manager->options.max_size;
978 spin_unlock_irqrestore(&pool->lock, irq_flags);
984 ttm_dma_page_put(pool, d_page);
999 /* shrink pool if necessary (only on !is_cached pools)*/
1001 ttm_dma_page_pool_free(pool, npages);
1007 * Callback for mm to request pool to reduce number of page held.
1034 shrink_pages = ttm_dma_page_pool_free(p->pool, nr_free);
1036 p->pool->dev_name, p->pool->name, current->pid,
1040 /* return estimated number of unused pages in pool */
1062 pr_info("Initializing DMA pool allocator\n");
1092 pr_info("Finalizing DMA pool allocator\n");
1096 dev_dbg(p->dev, "(%s:%d) Freeing.\n", p->pool->name,
1099 ttm_dma_pool_match, p->pool));
1100 ttm_dma_free_pool(p->dev, p->pool->type);
1109 struct dma_pool *pool = NULL;
1110 char *h[] = {"pool", "refills", "pages freed", "inuse", "available",
1114 seq_printf(m, "No pool allocator running.\n");
1124 pool = p->pool;
1126 pool->name, pool->nrefills,
1127 pool->nfrees, pool->npages_in_use,
1128 pool->npages_free,
1129 pool->dev_name);