1247835Skib/*
2247835Skib * Copyright 2011 (c) Oracle Corp.
3247835Skib
4247835Skib * Permission is hereby granted, free of charge, to any person obtaining a
5247835Skib * copy of this software and associated documentation files (the "Software"),
6247835Skib * to deal in the Software without restriction, including without limitation
7247835Skib * the rights to use, copy, modify, merge, publish, distribute, sub license,
8247835Skib * and/or sell copies of the Software, and to permit persons to whom the
9247835Skib * Software is furnished to do so, subject to the following conditions:
10247835Skib *
11247835Skib * The above copyright notice and this permission notice (including the
12247835Skib * next paragraph) shall be included in all copies or substantial portions
13247835Skib * of the Software.
14247835Skib *
15247835Skib * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16247835Skib * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17247835Skib * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18247835Skib * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19247835Skib * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20247835Skib * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21247835Skib * DEALINGS IN THE SOFTWARE.
22247835Skib *
23247835Skib * Author: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
24247835Skib */
25247835Skib
26247835Skib/*
27247835Skib * A simple DMA pool losely based on dmapool.c. It has certain advantages
28247835Skib * over the DMA pools:
29247835Skib * - Pool collects resently freed pages for reuse (and hooks up to
30247835Skib *   the shrinker).
31247835Skib * - Tracks currently in use pages
32247835Skib * - Tracks whether the page is UC, WB or cached (and reverts to WB
33247835Skib *   when freed).
34247835Skib */
35247835Skib
36247835Skib#include <sys/cdefs.h>
37247835Skib__FBSDID("$FreeBSD$");
38247835Skib
39247835Skib#define pr_fmt(fmt) "[TTM] " fmt
40247835Skib
41247835Skib#include <linux/dma-mapping.h>
42247835Skib#include <linux/list.h>
43247835Skib#include <linux/seq_file.h> /* for seq_printf */
44247835Skib#include <linux/slab.h>
45247835Skib#include <linux/spinlock.h>
46247835Skib#include <linux/highmem.h>
47247835Skib#include <linux/mm_types.h>
48247835Skib#include <linux/module.h>
49247835Skib#include <linux/mm.h>
50247835Skib#include <linux/atomic.h>
51247835Skib#include <linux/device.h>
52247835Skib#include <linux/kthread.h>
53247835Skib#include <drm/ttm/ttm_bo_driver.h>
54247835Skib#include <drm/ttm/ttm_page_alloc.h>
55247835Skib#ifdef TTM_HAS_AGP
56247835Skib#include <asm/agp.h>
57247835Skib#endif
58247835Skib
59247835Skib#define NUM_PAGES_TO_ALLOC		(PAGE_SIZE/sizeof(struct page *))
60247835Skib#define SMALL_ALLOCATION		4
61247835Skib#define FREE_ALL_PAGES			(~0U)
62247835Skib/* times are in msecs */
63247835Skib#define IS_UNDEFINED			(0)
64247835Skib#define IS_WC				(1<<1)
65247835Skib#define IS_UC				(1<<2)
66247835Skib#define IS_CACHED			(1<<3)
67247835Skib#define IS_DMA32			(1<<4)
68247835Skib
69247835Skibenum pool_type {
70247835Skib	POOL_IS_UNDEFINED,
71247835Skib	POOL_IS_WC = IS_WC,
72247835Skib	POOL_IS_UC = IS_UC,
73247835Skib	POOL_IS_CACHED = IS_CACHED,
74247835Skib	POOL_IS_WC_DMA32 = IS_WC | IS_DMA32,
75247835Skib	POOL_IS_UC_DMA32 = IS_UC | IS_DMA32,
76247835Skib	POOL_IS_CACHED_DMA32 = IS_CACHED | IS_DMA32,
77247835Skib};
78247835Skib/*
79247835Skib * The pool structure. There are usually six pools:
80247835Skib *  - generic (not restricted to DMA32):
81247835Skib *      - write combined, uncached, cached.
82247835Skib *  - dma32 (up to 2^32 - so up 4GB):
83247835Skib *      - write combined, uncached, cached.
84247835Skib * for each 'struct device'. The 'cached' is for pages that are actively used.
85247835Skib * The other ones can be shrunk by the shrinker API if neccessary.
86247835Skib * @pools: The 'struct device->dma_pools' link.
87247835Skib * @type: Type of the pool
88247835Skib * @lock: Protects the inuse_list and free_list from concurrnet access. Must be
89247835Skib * used with irqsave/irqrestore variants because pool allocator maybe called
90247835Skib * from delayed work.
91247835Skib * @inuse_list: Pool of pages that are in use. The order is very important and
92247835Skib *   it is in the order that the TTM pages that are put back are in.
93247835Skib * @free_list: Pool of pages that are free to be used. No order requirements.
94247835Skib * @dev: The device that is associated with these pools.
95247835Skib * @size: Size used during DMA allocation.
96247835Skib * @npages_free: Count of available pages for re-use.
97247835Skib * @npages_in_use: Count of pages that are in use.
98247835Skib * @nfrees: Stats when pool is shrinking.
99247835Skib * @nrefills: Stats when the pool is grown.
100247835Skib * @gfp_flags: Flags to pass for alloc_page.
101247835Skib * @name: Name of the pool.
102247835Skib * @dev_name: Name derieved from dev - similar to how dev_info works.
103247835Skib *   Used during shutdown as the dev_info during release is unavailable.
104247835Skib */
105247835Skibstruct dma_pool {
106247835Skib	struct list_head pools; /* The 'struct device->dma_pools link */
107247835Skib	enum pool_type type;
108247835Skib	spinlock_t lock;
109247835Skib	struct list_head inuse_list;
110247835Skib	struct list_head free_list;
111247835Skib	struct device *dev;
112247835Skib	unsigned size;
113247835Skib	unsigned npages_free;
114247835Skib	unsigned npages_in_use;
115247835Skib	unsigned long nfrees; /* Stats when shrunk. */
116247835Skib	unsigned long nrefills; /* Stats when grown. */
117247835Skib	gfp_t gfp_flags;
118247835Skib	char name[13]; /* "cached dma32" */
119247835Skib	char dev_name[64]; /* Constructed from dev */
120247835Skib};
121247835Skib
122247835Skib/*
123247835Skib * The accounting page keeping track of the allocated page along with
124247835Skib * the DMA address.
125247835Skib * @page_list: The link to the 'page_list' in 'struct dma_pool'.
126247835Skib * @vaddr: The virtual address of the page
127247835Skib * @dma: The bus address of the page. If the page is not allocated
128247835Skib *   via the DMA API, it will be -1.
129247835Skib */
130247835Skibstruct dma_page {
131247835Skib	struct list_head page_list;
132247835Skib	void *vaddr;
133247835Skib	struct page *p;
134247835Skib	dma_addr_t dma;
135247835Skib};
136247835Skib
137247835Skib/*
138247835Skib * Limits for the pool. They are handled without locks because only place where
139247835Skib * they may change is in sysfs store. They won't have immediate effect anyway
140247835Skib * so forcing serialization to access them is pointless.
141247835Skib */
142247835Skib
143247835Skibstruct ttm_pool_opts {
144247835Skib	unsigned	alloc_size;
145247835Skib	unsigned	max_size;
146247835Skib	unsigned	small;
147247835Skib};
148247835Skib
149247835Skib/*
150247835Skib * Contains the list of all of the 'struct device' and their corresponding
151247835Skib * DMA pools. Guarded by _mutex->lock.
152247835Skib * @pools: The link to 'struct ttm_pool_manager->pools'
153247835Skib * @dev: The 'struct device' associated with the 'pool'
154247835Skib * @pool: The 'struct dma_pool' associated with the 'dev'
155247835Skib */
156247835Skibstruct device_pools {
157247835Skib	struct list_head pools;
158247835Skib	struct device *dev;
159247835Skib	struct dma_pool *pool;
160247835Skib};
161247835Skib
162247835Skib/*
163247835Skib * struct ttm_pool_manager - Holds memory pools for fast allocation
164247835Skib *
165247835Skib * @lock: Lock used when adding/removing from pools
166247835Skib * @pools: List of 'struct device' and 'struct dma_pool' tuples.
167247835Skib * @options: Limits for the pool.
168247835Skib * @npools: Total amount of pools in existence.
169247835Skib * @shrinker: The structure used by [un|]register_shrinker
170247835Skib */
171247835Skibstruct ttm_pool_manager {
172247835Skib	struct mutex		lock;
173247835Skib	struct list_head	pools;
174247835Skib	struct ttm_pool_opts	options;
175247835Skib	unsigned		npools;
176247835Skib	struct shrinker		mm_shrink;
177247835Skib	struct kobject		kobj;
178247835Skib};
179247835Skib
180247835Skibstatic struct ttm_pool_manager *_manager;
181247835Skib
182247835Skibstatic struct attribute ttm_page_pool_max = {
183247835Skib	.name = "pool_max_size",
184247835Skib	.mode = S_IRUGO | S_IWUSR
185247835Skib};
186247835Skibstatic struct attribute ttm_page_pool_small = {
187247835Skib	.name = "pool_small_allocation",
188247835Skib	.mode = S_IRUGO | S_IWUSR
189247835Skib};
190247835Skibstatic struct attribute ttm_page_pool_alloc_size = {
191247835Skib	.name = "pool_allocation_size",
192247835Skib	.mode = S_IRUGO | S_IWUSR
193247835Skib};
194247835Skib
195247835Skibstatic struct attribute *ttm_pool_attrs[] = {
196247835Skib	&ttm_page_pool_max,
197247835Skib	&ttm_page_pool_small,
198247835Skib	&ttm_page_pool_alloc_size,
199247835Skib	NULL
200247835Skib};
201247835Skib
202247835Skibstatic void ttm_pool_kobj_release(struct kobject *kobj)
203247835Skib{
204247835Skib	struct ttm_pool_manager *m =
205247835Skib		container_of(kobj, struct ttm_pool_manager, kobj);
206247835Skib	kfree(m);
207247835Skib}
208247835Skib
209247835Skibstatic ssize_t ttm_pool_store(struct kobject *kobj, struct attribute *attr,
210247835Skib			      const char *buffer, size_t size)
211247835Skib{
212247835Skib	struct ttm_pool_manager *m =
213247835Skib		container_of(kobj, struct ttm_pool_manager, kobj);
214247835Skib	int chars;
215247835Skib	unsigned val;
216247835Skib	chars = sscanf(buffer, "%u", &val);
217247835Skib	if (chars == 0)
218247835Skib		return size;
219247835Skib
220247835Skib	/* Convert kb to number of pages */
221247835Skib	val = val / (PAGE_SIZE >> 10);
222247835Skib
223247835Skib	if (attr == &ttm_page_pool_max)
224247835Skib		m->options.max_size = val;
225247835Skib	else if (attr == &ttm_page_pool_small)
226247835Skib		m->options.small = val;
227247835Skib	else if (attr == &ttm_page_pool_alloc_size) {
228247835Skib		if (val > NUM_PAGES_TO_ALLOC*8) {
229247835Skib			pr_err("Setting allocation size to %lu is not allowed. Recommended size is %lu\n",
230247835Skib			       NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 7),
231247835Skib			       NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 10));
232247835Skib			return size;
233247835Skib		} else if (val > NUM_PAGES_TO_ALLOC) {
234247835Skib			pr_warn("Setting allocation size to larger than %lu is not recommended\n",
235247835Skib				NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 10));
236247835Skib		}
237247835Skib		m->options.alloc_size = val;
238247835Skib	}
239247835Skib
240247835Skib	return size;
241247835Skib}
242247835Skib
243247835Skibstatic ssize_t ttm_pool_show(struct kobject *kobj, struct attribute *attr,
244247835Skib			     char *buffer)
245247835Skib{
246247835Skib	struct ttm_pool_manager *m =
247247835Skib		container_of(kobj, struct ttm_pool_manager, kobj);
248247835Skib	unsigned val = 0;
249247835Skib
250247835Skib	if (attr == &ttm_page_pool_max)
251247835Skib		val = m->options.max_size;
252247835Skib	else if (attr == &ttm_page_pool_small)
253247835Skib		val = m->options.small;
254247835Skib	else if (attr == &ttm_page_pool_alloc_size)
255247835Skib		val = m->options.alloc_size;
256247835Skib
257247835Skib	val = val * (PAGE_SIZE >> 10);
258247835Skib
259247835Skib	return snprintf(buffer, PAGE_SIZE, "%u\n", val);
260247835Skib}
261247835Skib
262247835Skibstatic const struct sysfs_ops ttm_pool_sysfs_ops = {
263247835Skib	.show = &ttm_pool_show,
264247835Skib	.store = &ttm_pool_store,
265247835Skib};
266247835Skib
267247835Skibstatic struct kobj_type ttm_pool_kobj_type = {
268247835Skib	.release = &ttm_pool_kobj_release,
269247835Skib	.sysfs_ops = &ttm_pool_sysfs_ops,
270247835Skib	.default_attrs = ttm_pool_attrs,
271247835Skib};
272247835Skib
273247835Skib#ifndef CONFIG_X86
274247835Skibstatic int set_pages_array_wb(struct page **pages, int addrinarray)
275247835Skib{
276247835Skib#ifdef TTM_HAS_AGP
277247835Skib	int i;
278247835Skib
279247835Skib	for (i = 0; i < addrinarray; i++)
280247835Skib		unmap_page_from_agp(pages[i]);
281247835Skib#endif
282247835Skib	return 0;
283247835Skib}
284247835Skib
285247835Skibstatic int set_pages_array_wc(struct page **pages, int addrinarray)
286247835Skib{
287247835Skib#ifdef TTM_HAS_AGP
288247835Skib	int i;
289247835Skib
290247835Skib	for (i = 0; i < addrinarray; i++)
291247835Skib		map_page_into_agp(pages[i]);
292247835Skib#endif
293247835Skib	return 0;
294247835Skib}
295247835Skib
296247835Skibstatic int set_pages_array_uc(struct page **pages, int addrinarray)
297247835Skib{
298247835Skib#ifdef TTM_HAS_AGP
299247835Skib	int i;
300247835Skib
301247835Skib	for (i = 0; i < addrinarray; i++)
302247835Skib		map_page_into_agp(pages[i]);
303247835Skib#endif
304247835Skib	return 0;
305247835Skib}
306247835Skib#endif /* for !CONFIG_X86 */
307247835Skib
308247835Skibstatic int ttm_set_pages_caching(struct dma_pool *pool,
309247835Skib				 struct page **pages, unsigned cpages)
310247835Skib{
311247835Skib	int r = 0;
312247835Skib	/* Set page caching */
313247835Skib	if (pool->type & IS_UC) {
314247835Skib		r = set_pages_array_uc(pages, cpages);
315247835Skib		if (r)
316247835Skib			pr_err("%s: Failed to set %d pages to uc!\n",
317247835Skib			       pool->dev_name, cpages);
318247835Skib	}
319247835Skib	if (pool->type & IS_WC) {
320247835Skib		r = set_pages_array_wc(pages, cpages);
321247835Skib		if (r)
322247835Skib			pr_err("%s: Failed to set %d pages to wc!\n",
323247835Skib			       pool->dev_name, cpages);
324247835Skib	}
325247835Skib	return r;
326247835Skib}
327247835Skib
328247835Skibstatic void __ttm_dma_free_page(struct dma_pool *pool, struct dma_page *d_page)
329247835Skib{
330247835Skib	dma_addr_t dma = d_page->dma;
331247835Skib	dma_free_coherent(pool->dev, pool->size, d_page->vaddr, dma);
332247835Skib
333247835Skib	kfree(d_page);
334247835Skib	d_page = NULL;
335247835Skib}
336247835Skibstatic struct dma_page *__ttm_dma_alloc_page(struct dma_pool *pool)
337247835Skib{
338247835Skib	struct dma_page *d_page;
339247835Skib
340247835Skib	d_page = kmalloc(sizeof(struct dma_page), GFP_KERNEL);
341247835Skib	if (!d_page)
342247835Skib		return NULL;
343247835Skib
344247835Skib	d_page->vaddr = dma_alloc_coherent(pool->dev, pool->size,
345247835Skib					   &d_page->dma,
346247835Skib					   pool->gfp_flags);
347247835Skib	if (d_page->vaddr)
348247835Skib		d_page->p = virt_to_page(d_page->vaddr);
349247835Skib	else {
350247835Skib		kfree(d_page);
351247835Skib		d_page = NULL;
352247835Skib	}
353247835Skib	return d_page;
354247835Skib}
355247835Skibstatic enum pool_type ttm_to_type(int flags, enum ttm_caching_state cstate)
356247835Skib{
357247835Skib	enum pool_type type = IS_UNDEFINED;
358247835Skib
359247835Skib	if (flags & TTM_PAGE_FLAG_DMA32)
360247835Skib		type |= IS_DMA32;
361247835Skib	if (cstate == tt_cached)
362247835Skib		type |= IS_CACHED;
363247835Skib	else if (cstate == tt_uncached)
364247835Skib		type |= IS_UC;
365247835Skib	else
366247835Skib		type |= IS_WC;
367247835Skib
368247835Skib	return type;
369247835Skib}
370247835Skib
371247835Skibstatic void ttm_pool_update_free_locked(struct dma_pool *pool,
372247835Skib					unsigned freed_pages)
373247835Skib{
374247835Skib	pool->npages_free -= freed_pages;
375247835Skib	pool->nfrees += freed_pages;
376247835Skib
377247835Skib}
378247835Skib
379247835Skib/* set memory back to wb and free the pages. */
380247835Skibstatic void ttm_dma_pages_put(struct dma_pool *pool, struct list_head *d_pages,
381247835Skib			      struct page *pages[], unsigned npages)
382247835Skib{
383247835Skib	struct dma_page *d_page, *tmp;
384247835Skib
385247835Skib	/* Don't set WB on WB page pool. */
386247835Skib	if (npages && !(pool->type & IS_CACHED) &&
387247835Skib	    set_pages_array_wb(pages, npages))
388247835Skib		pr_err("%s: Failed to set %d pages to wb!\n",
389247835Skib		       pool->dev_name, npages);
390247835Skib
391247835Skib	list_for_each_entry_safe(d_page, tmp, d_pages, page_list) {
392247835Skib		list_del(&d_page->page_list);
393247835Skib		__ttm_dma_free_page(pool, d_page);
394247835Skib	}
395247835Skib}
396247835Skib
397247835Skibstatic void ttm_dma_page_put(struct dma_pool *pool, struct dma_page *d_page)
398247835Skib{
399247835Skib	/* Don't set WB on WB page pool. */
400247835Skib	if (!(pool->type & IS_CACHED) && set_pages_array_wb(&d_page->p, 1))
401247835Skib		pr_err("%s: Failed to set %d pages to wb!\n",
402247835Skib		       pool->dev_name, 1);
403247835Skib
404247835Skib	list_del(&d_page->page_list);
405247835Skib	__ttm_dma_free_page(pool, d_page);
406247835Skib}
407247835Skib
408247835Skib/*
409247835Skib * Free pages from pool.
410247835Skib *
411247835Skib * To prevent hogging the ttm_swap process we only free NUM_PAGES_TO_ALLOC
412247835Skib * number of pages in one go.
413247835Skib *
414247835Skib * @pool: to free the pages from
415247835Skib * @nr_free: If set to true will free all pages in pool
416247835Skib **/
417247835Skibstatic unsigned ttm_dma_page_pool_free(struct dma_pool *pool, unsigned nr_free)
418247835Skib{
419247835Skib	unsigned long irq_flags;
420247835Skib	struct dma_page *dma_p, *tmp;
421247835Skib	struct page **pages_to_free;
422247835Skib	struct list_head d_pages;
423247835Skib	unsigned freed_pages = 0,
424247835Skib		 npages_to_free = nr_free;
425247835Skib
426247835Skib	if (NUM_PAGES_TO_ALLOC < nr_free)
427247835Skib		npages_to_free = NUM_PAGES_TO_ALLOC;
428247835Skib#if 0
429247835Skib	if (nr_free > 1) {
430247835Skib		pr_debug("%s: (%s:%d) Attempting to free %d (%d) pages\n",
431247835Skib			 pool->dev_name, pool->name, current->pid,
432247835Skib			 npages_to_free, nr_free);
433247835Skib	}
434247835Skib#endif
435247835Skib	pages_to_free = kmalloc(npages_to_free * sizeof(struct page *),
436247835Skib			GFP_KERNEL);
437247835Skib
438247835Skib	if (!pages_to_free) {
439247835Skib		pr_err("%s: Failed to allocate memory for pool free operation\n",
440247835Skib		       pool->dev_name);
441247835Skib		return 0;
442247835Skib	}
443247835Skib	INIT_LIST_HEAD(&d_pages);
444247835Skibrestart:
445247835Skib	spin_lock_irqsave(&pool->lock, irq_flags);
446247835Skib
447247835Skib	/* We picking the oldest ones off the list */
448247835Skib	list_for_each_entry_safe_reverse(dma_p, tmp, &pool->free_list,
449247835Skib					 page_list) {
450247835Skib		if (freed_pages >= npages_to_free)
451247835Skib			break;
452247835Skib
453247835Skib		/* Move the dma_page from one list to another. */
454247835Skib		list_move(&dma_p->page_list, &d_pages);
455247835Skib
456247835Skib		pages_to_free[freed_pages++] = dma_p->p;
457247835Skib		/* We can only remove NUM_PAGES_TO_ALLOC at a time. */
458247835Skib		if (freed_pages >= NUM_PAGES_TO_ALLOC) {
459247835Skib
460247835Skib			ttm_pool_update_free_locked(pool, freed_pages);
461247835Skib			/**
462247835Skib			 * Because changing page caching is costly
463247835Skib			 * we unlock the pool to prevent stalling.
464247835Skib			 */
465247835Skib			spin_unlock_irqrestore(&pool->lock, irq_flags);
466247835Skib
467247835Skib			ttm_dma_pages_put(pool, &d_pages, pages_to_free,
468247835Skib					  freed_pages);
469247835Skib
470247835Skib			INIT_LIST_HEAD(&d_pages);
471247835Skib
472247835Skib			if (likely(nr_free != FREE_ALL_PAGES))
473247835Skib				nr_free -= freed_pages;
474247835Skib
475247835Skib			if (NUM_PAGES_TO_ALLOC >= nr_free)
476247835Skib				npages_to_free = nr_free;
477247835Skib			else
478247835Skib				npages_to_free = NUM_PAGES_TO_ALLOC;
479247835Skib
480247835Skib			freed_pages = 0;
481247835Skib
482247835Skib			/* free all so restart the processing */
483247835Skib			if (nr_free)
484247835Skib				goto restart;
485247835Skib
486247835Skib			/* Not allowed to fall through or break because
487247835Skib			 * following context is inside spinlock while we are
488247835Skib			 * outside here.
489247835Skib			 */
490247835Skib			goto out;
491247835Skib
492247835Skib		}
493247835Skib	}
494247835Skib
495247835Skib	/* remove range of pages from the pool */
496247835Skib	if (freed_pages) {
497247835Skib		ttm_pool_update_free_locked(pool, freed_pages);
498247835Skib		nr_free -= freed_pages;
499247835Skib	}
500247835Skib
501247835Skib	spin_unlock_irqrestore(&pool->lock, irq_flags);
502247835Skib
503247835Skib	if (freed_pages)
504247835Skib		ttm_dma_pages_put(pool, &d_pages, pages_to_free, freed_pages);
505247835Skibout:
506247835Skib	kfree(pages_to_free);
507247835Skib	return nr_free;
508247835Skib}
509247835Skib
510247835Skibstatic void ttm_dma_free_pool(struct device *dev, enum pool_type type)
511247835Skib{
512247835Skib	struct device_pools *p;
513247835Skib	struct dma_pool *pool;
514247835Skib
515247835Skib	if (!dev)
516247835Skib		return;
517247835Skib
518247835Skib	mutex_lock(&_manager->lock);
519247835Skib	list_for_each_entry_reverse(p, &_manager->pools, pools) {
520247835Skib		if (p->dev != dev)
521247835Skib			continue;
522247835Skib		pool = p->pool;
523247835Skib		if (pool->type != type)
524247835Skib			continue;
525247835Skib
526247835Skib		list_del(&p->pools);
527247835Skib		kfree(p);
528247835Skib		_manager->npools--;
529247835Skib		break;
530247835Skib	}
531247835Skib	list_for_each_entry_reverse(pool, &dev->dma_pools, pools) {
532247835Skib		if (pool->type != type)
533247835Skib			continue;
534247835Skib		/* Takes a spinlock.. */
535247835Skib		ttm_dma_page_pool_free(pool, FREE_ALL_PAGES);
536247835Skib		WARN_ON(((pool->npages_in_use + pool->npages_free) != 0));
537247835Skib		/* This code path is called after _all_ references to the
538247835Skib		 * struct device has been dropped - so nobody should be
539247835Skib		 * touching it. In case somebody is trying to _add_ we are
540247835Skib		 * guarded by the mutex. */
541247835Skib		list_del(&pool->pools);
542247835Skib		kfree(pool);
543247835Skib		break;
544247835Skib	}
545247835Skib	mutex_unlock(&_manager->lock);
546247835Skib}
547247835Skib
548247835Skib/*
549247835Skib * On free-ing of the 'struct device' this deconstructor is run.
550247835Skib * Albeit the pool might have already been freed earlier.
551247835Skib */
552247835Skibstatic void ttm_dma_pool_release(struct device *dev, void *res)
553247835Skib{
554247835Skib	struct dma_pool *pool = *(struct dma_pool **)res;
555247835Skib
556247835Skib	if (pool)
557247835Skib		ttm_dma_free_pool(dev, pool->type);
558247835Skib}
559247835Skib
560247835Skibstatic int ttm_dma_pool_match(struct device *dev, void *res, void *match_data)
561247835Skib{
562247835Skib	return *(struct dma_pool **)res == match_data;
563247835Skib}
564247835Skib
565247835Skibstatic struct dma_pool *ttm_dma_pool_init(struct device *dev, gfp_t flags,
566247835Skib					  enum pool_type type)
567247835Skib{
568247835Skib	char *n[] = {"wc", "uc", "cached", " dma32", "unknown",};
569247835Skib	enum pool_type t[] = {IS_WC, IS_UC, IS_CACHED, IS_DMA32, IS_UNDEFINED};
570247835Skib	struct device_pools *sec_pool = NULL;
571247835Skib	struct dma_pool *pool = NULL, **ptr;
572247835Skib	unsigned i;
573247835Skib	int ret = -ENODEV;
574247835Skib	char *p;
575247835Skib
576247835Skib	if (!dev)
577247835Skib		return NULL;
578247835Skib
579247835Skib	ptr = devres_alloc(ttm_dma_pool_release, sizeof(*ptr), GFP_KERNEL);
580247835Skib	if (!ptr)
581247835Skib		return NULL;
582247835Skib
583247835Skib	ret = -ENOMEM;
584247835Skib
585247835Skib	pool = kmalloc_node(sizeof(struct dma_pool), GFP_KERNEL,
586247835Skib			    dev_to_node(dev));
587247835Skib	if (!pool)
588247835Skib		goto err_mem;
589247835Skib
590247835Skib	sec_pool = kmalloc_node(sizeof(struct device_pools), GFP_KERNEL,
591247835Skib				dev_to_node(dev));
592247835Skib	if (!sec_pool)
593247835Skib		goto err_mem;
594247835Skib
595247835Skib	INIT_LIST_HEAD(&sec_pool->pools);
596247835Skib	sec_pool->dev = dev;
597247835Skib	sec_pool->pool =  pool;
598247835Skib
599247835Skib	INIT_LIST_HEAD(&pool->free_list);
600247835Skib	INIT_LIST_HEAD(&pool->inuse_list);
601247835Skib	INIT_LIST_HEAD(&pool->pools);
602247835Skib	spin_lock_init(&pool->lock);
603247835Skib	pool->dev = dev;
604247835Skib	pool->npages_free = pool->npages_in_use = 0;
605247835Skib	pool->nfrees = 0;
606247835Skib	pool->gfp_flags = flags;
607247835Skib	pool->size = PAGE_SIZE;
608247835Skib	pool->type = type;
609247835Skib	pool->nrefills = 0;
610247835Skib	p = pool->name;
611247835Skib	for (i = 0; i < 5; i++) {
612247835Skib		if (type & t[i]) {
613247835Skib			p += snprintf(p, sizeof(pool->name) - (p - pool->name),
614247835Skib				      "%s", n[i]);
615247835Skib		}
616247835Skib	}
617247835Skib	*p = 0;
618247835Skib	/* We copy the name for pr_ calls b/c when dma_pool_destroy is called
619247835Skib	 * - the kobj->name has already been deallocated.*/
620247835Skib	snprintf(pool->dev_name, sizeof(pool->dev_name), "%s %s",
621247835Skib		 dev_driver_string(dev), dev_name(dev));
622247835Skib	mutex_lock(&_manager->lock);
623247835Skib	/* You can get the dma_pool from either the global: */
624247835Skib	list_add(&sec_pool->pools, &_manager->pools);
625247835Skib	_manager->npools++;
626247835Skib	/* or from 'struct device': */
627247835Skib	list_add(&pool->pools, &dev->dma_pools);
628247835Skib	mutex_unlock(&_manager->lock);
629247835Skib
630247835Skib	*ptr = pool;
631247835Skib	devres_add(dev, ptr);
632247835Skib
633247835Skib	return pool;
634247835Skiberr_mem:
635247835Skib	devres_free(ptr);
636247835Skib	kfree(sec_pool);
637247835Skib	kfree(pool);
638247835Skib	return ERR_PTR(ret);
639247835Skib}
640247835Skib
641247835Skibstatic struct dma_pool *ttm_dma_find_pool(struct device *dev,
642247835Skib					  enum pool_type type)
643247835Skib{
644247835Skib	struct dma_pool *pool, *tmp, *found = NULL;
645247835Skib
646247835Skib	if (type == IS_UNDEFINED)
647247835Skib		return found;
648247835Skib
649247835Skib	/* NB: We iterate on the 'struct dev' which has no spinlock, but
650247835Skib	 * it does have a kref which we have taken. The kref is taken during
651247835Skib	 * graphic driver loading - in the drm_pci_init it calls either
652247835Skib	 * pci_dev_get or pci_register_driver which both end up taking a kref
653247835Skib	 * on 'struct device'.
654247835Skib	 *
655247835Skib	 * On teardown, the graphic drivers end up quiescing the TTM (put_pages)
656247835Skib	 * and calls the dev_res deconstructors: ttm_dma_pool_release. The nice
657247835Skib	 * thing is at that point of time there are no pages associated with the
658247835Skib	 * driver so this function will not be called.
659247835Skib	 */
660247835Skib	list_for_each_entry_safe(pool, tmp, &dev->dma_pools, pools) {
661247835Skib		if (pool->type != type)
662247835Skib			continue;
663247835Skib		found = pool;
664247835Skib		break;
665247835Skib	}
666247835Skib	return found;
667247835Skib}
668247835Skib
669247835Skib/*
670247835Skib * Free pages the pages that failed to change the caching state. If there
671247835Skib * are pages that have changed their caching state already put them to the
672247835Skib * pool.
673247835Skib */
674247835Skibstatic void ttm_dma_handle_caching_state_failure(struct dma_pool *pool,
675247835Skib						 struct list_head *d_pages,
676247835Skib						 struct page **failed_pages,
677247835Skib						 unsigned cpages)
678247835Skib{
679247835Skib	struct dma_page *d_page, *tmp;
680247835Skib	struct page *p;
681247835Skib	unsigned i = 0;
682247835Skib
683247835Skib	p = failed_pages[0];
684247835Skib	if (!p)
685247835Skib		return;
686247835Skib	/* Find the failed page. */
687247835Skib	list_for_each_entry_safe(d_page, tmp, d_pages, page_list) {
688247835Skib		if (d_page->p != p)
689247835Skib			continue;
690247835Skib		/* .. and then progress over the full list. */
691247835Skib		list_del(&d_page->page_list);
692247835Skib		__ttm_dma_free_page(pool, d_page);
693247835Skib		if (++i < cpages)
694247835Skib			p = failed_pages[i];
695247835Skib		else
696247835Skib			break;
697247835Skib	}
698247835Skib
699247835Skib}
700247835Skib
701247835Skib/*
702247835Skib * Allocate 'count' pages, and put 'need' number of them on the
703247835Skib * 'pages' and as well on the 'dma_address' starting at 'dma_offset' offset.
704247835Skib * The full list of pages should also be on 'd_pages'.
705247835Skib * We return zero for success, and negative numbers as errors.
706247835Skib */
707247835Skibstatic int ttm_dma_pool_alloc_new_pages(struct dma_pool *pool,
708247835Skib					struct list_head *d_pages,
709247835Skib					unsigned count)
710247835Skib{
711247835Skib	struct page **caching_array;
712247835Skib	struct dma_page *dma_p;
713247835Skib	struct page *p;
714247835Skib	int r = 0;
715247835Skib	unsigned i, cpages;
716247835Skib	unsigned max_cpages = min(count,
717247835Skib			(unsigned)(PAGE_SIZE/sizeof(struct page *)));
718247835Skib
719247835Skib	/* allocate array for page caching change */
720247835Skib	caching_array = kmalloc(max_cpages*sizeof(struct page *), GFP_KERNEL);
721247835Skib
722247835Skib	if (!caching_array) {
723247835Skib		pr_err("%s: Unable to allocate table for new pages\n",
724247835Skib		       pool->dev_name);
725247835Skib		return -ENOMEM;
726247835Skib	}
727247835Skib
728247835Skib	if (count > 1) {
729247835Skib		pr_debug("%s: (%s:%d) Getting %d pages\n",
730247835Skib			 pool->dev_name, pool->name, current->pid, count);
731247835Skib	}
732247835Skib
733247835Skib	for (i = 0, cpages = 0; i < count; ++i) {
734247835Skib		dma_p = __ttm_dma_alloc_page(pool);
735247835Skib		if (!dma_p) {
736247835Skib			pr_err("%s: Unable to get page %u\n",
737247835Skib			       pool->dev_name, i);
738247835Skib
739247835Skib			/* store already allocated pages in the pool after
740247835Skib			 * setting the caching state */
741247835Skib			if (cpages) {
742247835Skib				r = ttm_set_pages_caching(pool, caching_array,
743247835Skib							  cpages);
744247835Skib				if (r)
745247835Skib					ttm_dma_handle_caching_state_failure(
746247835Skib						pool, d_pages, caching_array,
747247835Skib						cpages);
748247835Skib			}
749247835Skib			r = -ENOMEM;
750247835Skib			goto out;
751247835Skib		}
752247835Skib		p = dma_p->p;
753247835Skib#ifdef CONFIG_HIGHMEM
754247835Skib		/* gfp flags of highmem page should never be dma32 so we
755247835Skib		 * we should be fine in such case
756247835Skib		 */
757247835Skib		if (!PageHighMem(p))
758247835Skib#endif
759247835Skib		{
760247835Skib			caching_array[cpages++] = p;
761247835Skib			if (cpages == max_cpages) {
762247835Skib				/* Note: Cannot hold the spinlock */
763247835Skib				r = ttm_set_pages_caching(pool, caching_array,
764247835Skib						 cpages);
765247835Skib				if (r) {
766247835Skib					ttm_dma_handle_caching_state_failure(
767247835Skib						pool, d_pages, caching_array,
768247835Skib						cpages);
769247835Skib					goto out;
770247835Skib				}
771247835Skib				cpages = 0;
772247835Skib			}
773247835Skib		}
774247835Skib		list_add(&dma_p->page_list, d_pages);
775247835Skib	}
776247835Skib
777247835Skib	if (cpages) {
778247835Skib		r = ttm_set_pages_caching(pool, caching_array, cpages);
779247835Skib		if (r)
780247835Skib			ttm_dma_handle_caching_state_failure(pool, d_pages,
781247835Skib					caching_array, cpages);
782247835Skib	}
783247835Skibout:
784247835Skib	kfree(caching_array);
785247835Skib	return r;
786247835Skib}
787247835Skib
788247835Skib/*
789247835Skib * @return count of pages still required to fulfill the request.
790247835Skib */
791247835Skibstatic int ttm_dma_page_pool_fill_locked(struct dma_pool *pool,
792247835Skib					 unsigned long *irq_flags)
793247835Skib{
794247835Skib	unsigned count = _manager->options.small;
795247835Skib	int r = pool->npages_free;
796247835Skib
797247835Skib	if (count > pool->npages_free) {
798247835Skib		struct list_head d_pages;
799247835Skib
800247835Skib		INIT_LIST_HEAD(&d_pages);
801247835Skib
802247835Skib		spin_unlock_irqrestore(&pool->lock, *irq_flags);
803247835Skib
804247835Skib		/* Returns how many more are neccessary to fulfill the
805247835Skib		 * request. */
806247835Skib		r = ttm_dma_pool_alloc_new_pages(pool, &d_pages, count);
807247835Skib
808247835Skib		spin_lock_irqsave(&pool->lock, *irq_flags);
809247835Skib		if (!r) {
810247835Skib			/* Add the fresh to the end.. */
811247835Skib			list_splice(&d_pages, &pool->free_list);
812247835Skib			++pool->nrefills;
813247835Skib			pool->npages_free += count;
814247835Skib			r = count;
815247835Skib		} else {
816247835Skib			struct dma_page *d_page;
817247835Skib			unsigned cpages = 0;
818247835Skib
819247835Skib			pr_err("%s: Failed to fill %s pool (r:%d)!\n",
820247835Skib			       pool->dev_name, pool->name, r);
821247835Skib
822247835Skib			list_for_each_entry(d_page, &d_pages, page_list) {
823247835Skib				cpages++;
824247835Skib			}
825247835Skib			list_splice_tail(&d_pages, &pool->free_list);
826247835Skib			pool->npages_free += cpages;
827247835Skib			r = cpages;
828247835Skib		}
829247835Skib	}
830247835Skib	return r;
831247835Skib}
832247835Skib
833247835Skib/*
834247835Skib * @return count of pages still required to fulfill the request.
835247835Skib * The populate list is actually a stack (not that is matters as TTM
836247835Skib * allocates one page at a time.
837247835Skib */
838247835Skibstatic int ttm_dma_pool_get_pages(struct dma_pool *pool,
839247835Skib				  struct ttm_dma_tt *ttm_dma,
840247835Skib				  unsigned index)
841247835Skib{
842247835Skib	struct dma_page *d_page;
843247835Skib	struct ttm_tt *ttm = &ttm_dma->ttm;
844247835Skib	unsigned long irq_flags;
845247835Skib	int count, r = -ENOMEM;
846247835Skib
847247835Skib	spin_lock_irqsave(&pool->lock, irq_flags);
848247835Skib	count = ttm_dma_page_pool_fill_locked(pool, &irq_flags);
849247835Skib	if (count) {
850247835Skib		d_page = list_first_entry(&pool->free_list, struct dma_page, page_list);
851247835Skib		ttm->pages[index] = d_page->p;
852247835Skib		ttm_dma->dma_address[index] = d_page->dma;
853247835Skib		list_move_tail(&d_page->page_list, &ttm_dma->pages_list);
854247835Skib		r = 0;
855247835Skib		pool->npages_in_use += 1;
856247835Skib		pool->npages_free -= 1;
857247835Skib	}
858247835Skib	spin_unlock_irqrestore(&pool->lock, irq_flags);
859247835Skib	return r;
860247835Skib}
861247835Skib
862247835Skib/*
863247835Skib * On success pages list will hold count number of correctly
864247835Skib * cached pages. On failure will hold the negative return value (-ENOMEM, etc).
865247835Skib */
866247835Skibint ttm_dma_populate(struct ttm_dma_tt *ttm_dma, struct device *dev)
867247835Skib{
868247835Skib	struct ttm_tt *ttm = &ttm_dma->ttm;
869247835Skib	struct ttm_mem_global *mem_glob = ttm->glob->mem_glob;
870247835Skib	struct dma_pool *pool;
871247835Skib	enum pool_type type;
872247835Skib	unsigned i;
873247835Skib	gfp_t gfp_flags;
874247835Skib	int ret;
875247835Skib
876247835Skib	if (ttm->state != tt_unpopulated)
877247835Skib		return 0;
878247835Skib
879247835Skib	type = ttm_to_type(ttm->page_flags, ttm->caching_state);
880247835Skib	if (ttm->page_flags & TTM_PAGE_FLAG_DMA32)
881247835Skib		gfp_flags = GFP_USER | GFP_DMA32;
882247835Skib	else
883247835Skib		gfp_flags = GFP_HIGHUSER;
884247835Skib	if (ttm->page_flags & TTM_PAGE_FLAG_ZERO_ALLOC)
885247835Skib		gfp_flags |= __GFP_ZERO;
886247835Skib
887247835Skib	pool = ttm_dma_find_pool(dev, type);
888247835Skib	if (!pool) {
889247835Skib		pool = ttm_dma_pool_init(dev, gfp_flags, type);
890247835Skib		if (IS_ERR_OR_NULL(pool)) {
891247835Skib			return -ENOMEM;
892247835Skib		}
893247835Skib	}
894247835Skib
895247835Skib	INIT_LIST_HEAD(&ttm_dma->pages_list);
896247835Skib	for (i = 0; i < ttm->num_pages; ++i) {
897247835Skib		ret = ttm_dma_pool_get_pages(pool, ttm_dma, i);
898247835Skib		if (ret != 0) {
899247835Skib			ttm_dma_unpopulate(ttm_dma, dev);
900247835Skib			return -ENOMEM;
901247835Skib		}
902247835Skib
903247835Skib		ret = ttm_mem_global_alloc_page(mem_glob, ttm->pages[i],
904247835Skib						false, false);
905247835Skib		if (unlikely(ret != 0)) {
906247835Skib			ttm_dma_unpopulate(ttm_dma, dev);
907247835Skib			return -ENOMEM;
908247835Skib		}
909247835Skib	}
910247835Skib
911247835Skib	if (unlikely(ttm->page_flags & TTM_PAGE_FLAG_SWAPPED)) {
912247835Skib		ret = ttm_tt_swapin(ttm);
913247835Skib		if (unlikely(ret != 0)) {
914247835Skib			ttm_dma_unpopulate(ttm_dma, dev);
915247835Skib			return ret;
916247835Skib		}
917247835Skib	}
918247835Skib
919247835Skib	ttm->state = tt_unbound;
920247835Skib	return 0;
921247835Skib}
922247835SkibEXPORT_SYMBOL_GPL(ttm_dma_populate);
923247835Skib
924247835Skib/* Get good estimation how many pages are free in pools */
925247835Skibstatic int ttm_dma_pool_get_num_unused_pages(void)
926247835Skib{
927247835Skib	struct device_pools *p;
928247835Skib	unsigned total = 0;
929247835Skib
930247835Skib	mutex_lock(&_manager->lock);
931247835Skib	list_for_each_entry(p, &_manager->pools, pools)
932247835Skib		total += p->pool->npages_free;
933247835Skib	mutex_unlock(&_manager->lock);
934247835Skib	return total;
935247835Skib}
936247835Skib
937247835Skib/* Put all pages in pages list to correct pool to wait for reuse */
938247835Skibvoid ttm_dma_unpopulate(struct ttm_dma_tt *ttm_dma, struct device *dev)
939247835Skib{
940247835Skib	struct ttm_tt *ttm = &ttm_dma->ttm;
941247835Skib	struct dma_pool *pool;
942247835Skib	struct dma_page *d_page, *next;
943247835Skib	enum pool_type type;
944247835Skib	bool is_cached = false;
945247835Skib	unsigned count = 0, i, npages = 0;
946247835Skib	unsigned long irq_flags;
947247835Skib
948247835Skib	type = ttm_to_type(ttm->page_flags, ttm->caching_state);
949247835Skib	pool = ttm_dma_find_pool(dev, type);
950247835Skib	if (!pool)
951247835Skib		return;
952247835Skib
953247835Skib	is_cached = (ttm_dma_find_pool(pool->dev,
954247835Skib		     ttm_to_type(ttm->page_flags, tt_cached)) == pool);
955247835Skib
956247835Skib	/* make sure pages array match list and count number of pages */
957247835Skib	list_for_each_entry(d_page, &ttm_dma->pages_list, page_list) {
958247835Skib		ttm->pages[count] = d_page->p;
959247835Skib		count++;
960247835Skib	}
961247835Skib
962247835Skib	spin_lock_irqsave(&pool->lock, irq_flags);
963247835Skib	pool->npages_in_use -= count;
964247835Skib	if (is_cached) {
965247835Skib		pool->nfrees += count;
966247835Skib	} else {
967247835Skib		pool->npages_free += count;
968247835Skib		list_splice(&ttm_dma->pages_list, &pool->free_list);
969247835Skib		npages = count;
970247835Skib		if (pool->npages_free > _manager->options.max_size) {
971247835Skib			npages = pool->npages_free - _manager->options.max_size;
972247835Skib			/* free at least NUM_PAGES_TO_ALLOC number of pages
973247835Skib			 * to reduce calls to set_memory_wb */
974247835Skib			if (npages < NUM_PAGES_TO_ALLOC)
975247835Skib				npages = NUM_PAGES_TO_ALLOC;
976247835Skib		}
977247835Skib	}
978247835Skib	spin_unlock_irqrestore(&pool->lock, irq_flags);
979247835Skib
980247835Skib	if (is_cached) {
981247835Skib		list_for_each_entry_safe(d_page, next, &ttm_dma->pages_list, page_list) {
982247835Skib			ttm_mem_global_free_page(ttm->glob->mem_glob,
983247835Skib						 d_page->p);
984247835Skib			ttm_dma_page_put(pool, d_page);
985247835Skib		}
986247835Skib	} else {
987247835Skib		for (i = 0; i < count; i++) {
988247835Skib			ttm_mem_global_free_page(ttm->glob->mem_glob,
989247835Skib						 ttm->pages[i]);
990247835Skib		}
991247835Skib	}
992247835Skib
993247835Skib	INIT_LIST_HEAD(&ttm_dma->pages_list);
994247835Skib	for (i = 0; i < ttm->num_pages; i++) {
995247835Skib		ttm->pages[i] = NULL;
996247835Skib		ttm_dma->dma_address[i] = 0;
997247835Skib	}
998247835Skib
999247835Skib	/* shrink pool if necessary (only on !is_cached pools)*/
1000247835Skib	if (npages)
1001247835Skib		ttm_dma_page_pool_free(pool, npages);
1002247835Skib	ttm->state = tt_unpopulated;
1003247835Skib}
1004247835SkibEXPORT_SYMBOL_GPL(ttm_dma_unpopulate);
1005247835Skib
1006247835Skib/**
1007247835Skib * Callback for mm to request pool to reduce number of page held.
1008247835Skib */
1009247835Skibstatic int ttm_dma_pool_mm_shrink(struct shrinker *shrink,
1010247835Skib				  struct shrink_control *sc)
1011247835Skib{
1012247835Skib	static atomic_t start_pool = ATOMIC_INIT(0);
1013247835Skib	unsigned idx = 0;
1014247835Skib	unsigned pool_offset = atomic_add_return(1, &start_pool);
1015247835Skib	unsigned shrink_pages = sc->nr_to_scan;
1016247835Skib	struct device_pools *p;
1017247835Skib
1018247835Skib	if (list_empty(&_manager->pools))
1019247835Skib		return 0;
1020247835Skib
1021247835Skib	mutex_lock(&_manager->lock);
1022247835Skib	pool_offset = pool_offset % _manager->npools;
1023247835Skib	list_for_each_entry(p, &_manager->pools, pools) {
1024247835Skib		unsigned nr_free;
1025247835Skib
1026247835Skib		if (!p->dev)
1027247835Skib			continue;
1028247835Skib		if (shrink_pages == 0)
1029247835Skib			break;
1030247835Skib		/* Do it in round-robin fashion. */
1031247835Skib		if (++idx < pool_offset)
1032247835Skib			continue;
1033247835Skib		nr_free = shrink_pages;
1034247835Skib		shrink_pages = ttm_dma_page_pool_free(p->pool, nr_free);
1035247835Skib		pr_debug("%s: (%s:%d) Asked to shrink %d, have %d more to go\n",
1036247835Skib			 p->pool->dev_name, p->pool->name, current->pid,
1037247835Skib			 nr_free, shrink_pages);
1038247835Skib	}
1039247835Skib	mutex_unlock(&_manager->lock);
1040247835Skib	/* return estimated number of unused pages in pool */
1041247835Skib	return ttm_dma_pool_get_num_unused_pages();
1042247835Skib}
1043247835Skib
1044247835Skibstatic void ttm_dma_pool_mm_shrink_init(struct ttm_pool_manager *manager)
1045247835Skib{
1046247835Skib	manager->mm_shrink.shrink = &ttm_dma_pool_mm_shrink;
1047247835Skib	manager->mm_shrink.seeks = 1;
1048247835Skib	register_shrinker(&manager->mm_shrink);
1049247835Skib}
1050247835Skib
1051247835Skibstatic void ttm_dma_pool_mm_shrink_fini(struct ttm_pool_manager *manager)
1052247835Skib{
1053247835Skib	unregister_shrinker(&manager->mm_shrink);
1054247835Skib}
1055247835Skib
1056247835Skibint ttm_dma_page_alloc_init(struct ttm_mem_global *glob, unsigned max_pages)
1057247835Skib{
1058247835Skib	int ret = -ENOMEM;
1059247835Skib
1060247835Skib	WARN_ON(_manager);
1061247835Skib
1062247835Skib	pr_info("Initializing DMA pool allocator\n");
1063247835Skib
1064247835Skib	_manager = kzalloc(sizeof(*_manager), GFP_KERNEL);
1065247835Skib	if (!_manager)
1066247835Skib		goto err;
1067247835Skib
1068247835Skib	mutex_init(&_manager->lock);
1069247835Skib	INIT_LIST_HEAD(&_manager->pools);
1070247835Skib
1071247835Skib	_manager->options.max_size = max_pages;
1072247835Skib	_manager->options.small = SMALL_ALLOCATION;
1073247835Skib	_manager->options.alloc_size = NUM_PAGES_TO_ALLOC;
1074247835Skib
1075247835Skib	/* This takes care of auto-freeing the _manager */
1076247835Skib	ret = kobject_init_and_add(&_manager->kobj, &ttm_pool_kobj_type,
1077247835Skib				   &glob->kobj, "dma_pool");
1078247835Skib	if (unlikely(ret != 0)) {
1079247835Skib		kobject_put(&_manager->kobj);
1080247835Skib		goto err;
1081247835Skib	}
1082247835Skib	ttm_dma_pool_mm_shrink_init(_manager);
1083247835Skib	return 0;
1084247835Skiberr:
1085247835Skib	return ret;
1086247835Skib}
1087247835Skib
1088247835Skibvoid ttm_dma_page_alloc_fini(void)
1089247835Skib{
1090247835Skib	struct device_pools *p, *t;
1091247835Skib
1092247835Skib	pr_info("Finalizing DMA pool allocator\n");
1093247835Skib	ttm_dma_pool_mm_shrink_fini(_manager);
1094247835Skib
1095247835Skib	list_for_each_entry_safe_reverse(p, t, &_manager->pools, pools) {
1096247835Skib		dev_dbg(p->dev, "(%s:%d) Freeing.\n", p->pool->name,
1097247835Skib			current->pid);
1098247835Skib		WARN_ON(devres_destroy(p->dev, ttm_dma_pool_release,
1099247835Skib			ttm_dma_pool_match, p->pool));
1100247835Skib		ttm_dma_free_pool(p->dev, p->pool->type);
1101247835Skib	}
1102247835Skib	kobject_put(&_manager->kobj);
1103247835Skib	_manager = NULL;
1104247835Skib}
1105247835Skib
1106247835Skibint ttm_dma_page_alloc_debugfs(struct seq_file *m, void *data)
1107247835Skib{
1108247835Skib	struct device_pools *p;
1109247835Skib	struct dma_pool *pool = NULL;
1110247835Skib	char *h[] = {"pool", "refills", "pages freed", "inuse", "available",
1111247835Skib		     "name", "virt", "busaddr"};
1112247835Skib
1113247835Skib	if (!_manager) {
1114247835Skib		seq_printf(m, "No pool allocator running.\n");
1115247835Skib		return 0;
1116247835Skib	}
1117247835Skib	seq_printf(m, "%13s %12s %13s %8s %8s %8s\n",
1118247835Skib		   h[0], h[1], h[2], h[3], h[4], h[5]);
1119247835Skib	mutex_lock(&_manager->lock);
1120247835Skib	list_for_each_entry(p, &_manager->pools, pools) {
1121247835Skib		struct device *dev = p->dev;
1122247835Skib		if (!dev)
1123247835Skib			continue;
1124247835Skib		pool = p->pool;
1125247835Skib		seq_printf(m, "%13s %12ld %13ld %8d %8d %8s\n",
1126247835Skib				pool->name, pool->nrefills,
1127247835Skib				pool->nfrees, pool->npages_in_use,
1128247835Skib				pool->npages_free,
1129247835Skib				pool->dev_name);
1130247835Skib	}
1131247835Skib	mutex_unlock(&_manager->lock);
1132247835Skib	return 0;
1133247835Skib}
1134247835SkibEXPORT_SYMBOL_GPL(ttm_dma_page_alloc_debugfs);
1135