ttm_page_alloc.c revision 273862
1247835Skib/*
2247835Skib * Copyright (c) Red Hat Inc.
3247835Skib
4247835Skib * Permission is hereby granted, free of charge, to any person obtaining a
5247835Skib * copy of this software and associated documentation files (the "Software"),
6247835Skib * to deal in the Software without restriction, including without limitation
7247835Skib * the rights to use, copy, modify, merge, publish, distribute, sub license,
8247835Skib * and/or sell copies of the Software, and to permit persons to whom the
9247835Skib * Software is furnished to do so, subject to the following conditions:
10247835Skib *
11247835Skib * The above copyright notice and this permission notice (including the
12247835Skib * next paragraph) shall be included in all copies or substantial portions
13247835Skib * of the Software.
14247835Skib *
15247835Skib * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16247835Skib * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17247835Skib * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18247835Skib * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19247835Skib * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20247835Skib * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21247835Skib * DEALINGS IN THE SOFTWARE.
22247835Skib *
23247835Skib * Authors: Dave Airlie <airlied@redhat.com>
24247835Skib *          Jerome Glisse <jglisse@redhat.com>
25247835Skib *          Pauli Nieminen <suokkos@gmail.com>
26247835Skib */
27247835Skib/*
28247835Skib * Copyright (c) 2013 The FreeBSD Foundation
29247835Skib * All rights reserved.
30247835Skib *
31247835Skib * Portions of this software were developed by Konstantin Belousov
32247835Skib * <kib@FreeBSD.org> under sponsorship from the FreeBSD Foundation.
33247835Skib */
34247835Skib
35247835Skib/* simple list based uncached page pool
36247835Skib * - Pool collects resently freed pages for reuse
37247835Skib * - Use page->lru to keep a free list
38247835Skib * - doesn't track currently in use pages
39247835Skib */
40247835Skib
41247835Skib#include <sys/cdefs.h>
42247835Skib__FBSDID("$FreeBSD: head/sys/dev/drm2/ttm/ttm_page_alloc.c 273862 2014-10-30 14:26:36Z tijl $");
43247835Skib
44247835Skib#include <dev/drm2/drmP.h>
45247835Skib#include <dev/drm2/ttm/ttm_bo_driver.h>
46247835Skib#include <dev/drm2/ttm/ttm_page_alloc.h>
47247835Skib
48247835Skib#define NUM_PAGES_TO_ALLOC		(PAGE_SIZE/sizeof(vm_page_t))
49247835Skib#define SMALL_ALLOCATION		16
50247835Skib#define FREE_ALL_PAGES			(~0U)
51247835Skib/* times are in msecs */
52247835Skib#define PAGE_FREE_INTERVAL		1000
53247835Skib
54247835Skib/**
55247835Skib * struct ttm_page_pool - Pool to reuse recently allocated uc/wc pages.
56247835Skib *
57247835Skib * @lock: Protects the shared pool from concurrnet access. Must be used with
58247835Skib * irqsave/irqrestore variants because pool allocator maybe called from
59247835Skib * delayed work.
60247835Skib * @fill_lock: Prevent concurrent calls to fill.
61247835Skib * @list: Pool of free uc/wc pages for fast reuse.
62247835Skib * @gfp_flags: Flags to pass for alloc_page.
63247835Skib * @npages: Number of pages in pool.
64247835Skib */
65247835Skibstruct ttm_page_pool {
66247835Skib	struct mtx		lock;
67247835Skib	bool			fill_lock;
68247835Skib	bool			dma32;
69247835Skib	struct pglist		list;
70247835Skib	int			ttm_page_alloc_flags;
71247835Skib	unsigned		npages;
72247835Skib	char			*name;
73247835Skib	unsigned long		nfrees;
74247835Skib	unsigned long		nrefills;
75247835Skib};
76247835Skib
77247835Skib/**
78247835Skib * Limits for the pool. They are handled without locks because only place where
79247835Skib * they may change is in sysfs store. They won't have immediate effect anyway
80247835Skib * so forcing serialization to access them is pointless.
81247835Skib */
82247835Skib
83247835Skibstruct ttm_pool_opts {
84247835Skib	unsigned	alloc_size;
85247835Skib	unsigned	max_size;
86247835Skib	unsigned	small;
87247835Skib};
88247835Skib
89247835Skib#define NUM_POOLS 4
90247835Skib
91247835Skib/**
92247835Skib * struct ttm_pool_manager - Holds memory pools for fst allocation
93247835Skib *
94247835Skib * Manager is read only object for pool code so it doesn't need locking.
95247835Skib *
96247835Skib * @free_interval: minimum number of jiffies between freeing pages from pool.
97247835Skib * @page_alloc_inited: reference counting for pool allocation.
98247835Skib * @work: Work that is used to shrink the pool. Work is only run when there is
99247835Skib * some pages to free.
100247835Skib * @small_allocation: Limit in number of pages what is small allocation.
101247835Skib *
102247835Skib * @pools: All pool objects in use.
103247835Skib **/
104247835Skibstruct ttm_pool_manager {
105247835Skib	unsigned int kobj_ref;
106247835Skib	eventhandler_tag lowmem_handler;
107247835Skib	struct ttm_pool_opts	options;
108247835Skib
109247835Skib	union {
110247849Skib		struct ttm_page_pool	u_pools[NUM_POOLS];
111247849Skib		struct _utag {
112247849Skib			struct ttm_page_pool	u_wc_pool;
113247849Skib			struct ttm_page_pool	u_uc_pool;
114247849Skib			struct ttm_page_pool	u_wc_pool_dma32;
115247849Skib			struct ttm_page_pool	u_uc_pool_dma32;
116247849Skib		} _ut;
117247849Skib	} _u;
118247835Skib};
119247835Skib
120247849Skib#define	pools _u.u_pools
121247849Skib#define	wc_pool _u._ut.u_wc_pool
122247849Skib#define	uc_pool _u._ut.u_uc_pool
123247849Skib#define	wc_pool_dma32 _u._ut.u_wc_pool_dma32
124247849Skib#define	uc_pool_dma32 _u._ut.u_uc_pool_dma32
125247849Skib
126247835SkibMALLOC_DEFINE(M_TTM_POOLMGR, "ttm_poolmgr", "TTM Pool Manager");
127247835Skib
128247835Skibstatic void
129247835Skibttm_vm_page_free(vm_page_t m)
130247835Skib{
131247835Skib
132247835Skib	KASSERT(m->object == NULL, ("ttm page %p is owned", m));
133247835Skib	KASSERT(m->wire_count == 1, ("ttm lost wire %p", m));
134247835Skib	KASSERT((m->flags & PG_FICTITIOUS) != 0, ("ttm lost fictitious %p", m));
135247835Skib	KASSERT((m->oflags & VPO_UNMANAGED) == 0, ("ttm got unmanaged %p", m));
136247835Skib	m->flags &= ~PG_FICTITIOUS;
137247835Skib	m->oflags |= VPO_UNMANAGED;
138267548Sattilio	vm_page_unwire(m, PQ_INACTIVE);
139247835Skib	vm_page_free(m);
140247835Skib}
141247835Skib
142247835Skibstatic vm_memattr_t
143247835Skibttm_caching_state_to_vm(enum ttm_caching_state cstate)
144247835Skib{
145247835Skib
146247835Skib	switch (cstate) {
147247835Skib	case tt_uncached:
148247835Skib		return (VM_MEMATTR_UNCACHEABLE);
149247835Skib	case tt_wc:
150247835Skib		return (VM_MEMATTR_WRITE_COMBINING);
151247835Skib	case tt_cached:
152247835Skib		return (VM_MEMATTR_WRITE_BACK);
153247835Skib	}
154247835Skib	panic("caching state %d\n", cstate);
155247835Skib}
156247835Skib
157247835Skibstatic void ttm_pool_kobj_release(struct ttm_pool_manager *m)
158247835Skib{
159247835Skib
160247835Skib	free(m, M_TTM_POOLMGR);
161247835Skib}
162247835Skib
163247835Skib#if 0
164247835Skib/* XXXKIB sysctl */
165247835Skibstatic ssize_t ttm_pool_store(struct ttm_pool_manager *m,
166247835Skib		struct attribute *attr, const char *buffer, size_t size)
167247835Skib{
168247835Skib	int chars;
169247835Skib	unsigned val;
170247835Skib	chars = sscanf(buffer, "%u", &val);
171247835Skib	if (chars == 0)
172247835Skib		return size;
173247835Skib
174247835Skib	/* Convert kb to number of pages */
175247835Skib	val = val / (PAGE_SIZE >> 10);
176247835Skib
177247835Skib	if (attr == &ttm_page_pool_max)
178247835Skib		m->options.max_size = val;
179247835Skib	else if (attr == &ttm_page_pool_small)
180247835Skib		m->options.small = val;
181247835Skib	else if (attr == &ttm_page_pool_alloc_size) {
182247835Skib		if (val > NUM_PAGES_TO_ALLOC*8) {
183247835Skib			pr_err("Setting allocation size to %lu is not allowed. Recommended size is %lu\n",
184247835Skib			       NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 7),
185247835Skib			       NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 10));
186247835Skib			return size;
187247835Skib		} else if (val > NUM_PAGES_TO_ALLOC) {
188247835Skib			pr_warn("Setting allocation size to larger than %lu is not recommended\n",
189247835Skib				NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 10));
190247835Skib		}
191247835Skib		m->options.alloc_size = val;
192247835Skib	}
193247835Skib
194247835Skib	return size;
195247835Skib}
196247835Skib
197247835Skibstatic ssize_t ttm_pool_show(struct ttm_pool_manager *m,
198247835Skib		struct attribute *attr, char *buffer)
199247835Skib{
200247835Skib	unsigned val = 0;
201247835Skib
202247835Skib	if (attr == &ttm_page_pool_max)
203247835Skib		val = m->options.max_size;
204247835Skib	else if (attr == &ttm_page_pool_small)
205247835Skib		val = m->options.small;
206247835Skib	else if (attr == &ttm_page_pool_alloc_size)
207247835Skib		val = m->options.alloc_size;
208247835Skib
209247835Skib	val = val * (PAGE_SIZE >> 10);
210247835Skib
211247835Skib	return snprintf(buffer, PAGE_SIZE, "%u\n", val);
212247835Skib}
213247835Skib#endif
214247835Skib
215247835Skibstatic struct ttm_pool_manager *_manager;
216247835Skib
217247835Skibstatic int set_pages_array_wb(vm_page_t *pages, int addrinarray)
218247835Skib{
219273862Stijl#ifdef TTM_HAS_AGP
220247835Skib	int i;
221247835Skib
222273862Stijl	for (i = 0; i < addrinarray; i++)
223273862Stijl		pmap_page_set_memattr(pages[i], VM_MEMATTR_WRITE_BACK);
224247835Skib#endif
225247835Skib	return 0;
226247835Skib}
227247835Skib
228247835Skibstatic int set_pages_array_wc(vm_page_t *pages, int addrinarray)
229247835Skib{
230273862Stijl#ifdef TTM_HAS_AGP
231247835Skib	int i;
232247835Skib
233273862Stijl	for (i = 0; i < addrinarray; i++)
234273862Stijl		pmap_page_set_memattr(pages[i], VM_MEMATTR_WRITE_COMBINING);
235247835Skib#endif
236247835Skib	return 0;
237247835Skib}
238247835Skib
239247835Skibstatic int set_pages_array_uc(vm_page_t *pages, int addrinarray)
240247835Skib{
241273862Stijl#ifdef TTM_HAS_AGP
242247835Skib	int i;
243247835Skib
244273862Stijl	for (i = 0; i < addrinarray; i++)
245273862Stijl		pmap_page_set_memattr(pages[i], VM_MEMATTR_UNCACHEABLE);
246247835Skib#endif
247247835Skib	return 0;
248247835Skib}
249247835Skib
250247835Skib/**
251247835Skib * Select the right pool or requested caching state and ttm flags. */
252247835Skibstatic struct ttm_page_pool *ttm_get_pool(int flags,
253247835Skib		enum ttm_caching_state cstate)
254247835Skib{
255247835Skib	int pool_index;
256247835Skib
257247835Skib	if (cstate == tt_cached)
258247835Skib		return NULL;
259247835Skib
260247835Skib	if (cstate == tt_wc)
261247835Skib		pool_index = 0x0;
262247835Skib	else
263247835Skib		pool_index = 0x1;
264247835Skib
265247835Skib	if (flags & TTM_PAGE_FLAG_DMA32)
266247835Skib		pool_index |= 0x2;
267247835Skib
268247835Skib	return &_manager->pools[pool_index];
269247835Skib}
270247835Skib
271247835Skib/* set memory back to wb and free the pages. */
272247835Skibstatic void ttm_pages_put(vm_page_t *pages, unsigned npages)
273247835Skib{
274247835Skib	unsigned i;
275247835Skib
276247835Skib	/* Our VM handles vm memattr automatically on the page free. */
277247835Skib	if (set_pages_array_wb(pages, npages))
278247835Skib		printf("[TTM] Failed to set %d pages to wb!\n", npages);
279247835Skib	for (i = 0; i < npages; ++i)
280247835Skib		ttm_vm_page_free(pages[i]);
281247835Skib}
282247835Skib
283247835Skibstatic void ttm_pool_update_free_locked(struct ttm_page_pool *pool,
284247835Skib		unsigned freed_pages)
285247835Skib{
286247835Skib	pool->npages -= freed_pages;
287247835Skib	pool->nfrees += freed_pages;
288247835Skib}
289247835Skib
290247835Skib/**
291247835Skib * Free pages from pool.
292247835Skib *
293247835Skib * To prevent hogging the ttm_swap process we only free NUM_PAGES_TO_ALLOC
294247835Skib * number of pages in one go.
295247835Skib *
296247835Skib * @pool: to free the pages from
297247835Skib * @free_all: If set to true will free all pages in pool
298247835Skib **/
299247835Skibstatic int ttm_page_pool_free(struct ttm_page_pool *pool, unsigned nr_free)
300247835Skib{
301247835Skib	vm_page_t p, p1;
302247835Skib	vm_page_t *pages_to_free;
303247835Skib	unsigned freed_pages = 0,
304247835Skib		 npages_to_free = nr_free;
305254873Sdumbbell	unsigned i;
306247835Skib
307247835Skib	if (NUM_PAGES_TO_ALLOC < nr_free)
308247835Skib		npages_to_free = NUM_PAGES_TO_ALLOC;
309247835Skib
310247835Skib	pages_to_free = malloc(npages_to_free * sizeof(vm_page_t),
311247835Skib	    M_TEMP, M_WAITOK | M_ZERO);
312247835Skib
313247835Skibrestart:
314247835Skib	mtx_lock(&pool->lock);
315247835Skib
316254182Skib	TAILQ_FOREACH_REVERSE_SAFE(p, &pool->list, pglist, plinks.q, p1) {
317247835Skib		if (freed_pages >= npages_to_free)
318247835Skib			break;
319247835Skib
320247835Skib		pages_to_free[freed_pages++] = p;
321247835Skib		/* We can only remove NUM_PAGES_TO_ALLOC at a time. */
322247835Skib		if (freed_pages >= NUM_PAGES_TO_ALLOC) {
323247835Skib			/* remove range of pages from the pool */
324254873Sdumbbell			for (i = 0; i < freed_pages; i++)
325254873Sdumbbell				TAILQ_REMOVE(&pool->list, pages_to_free[i], plinks.q);
326247835Skib
327247835Skib			ttm_pool_update_free_locked(pool, freed_pages);
328247835Skib			/**
329247835Skib			 * Because changing page caching is costly
330247835Skib			 * we unlock the pool to prevent stalling.
331247835Skib			 */
332247835Skib			mtx_unlock(&pool->lock);
333247835Skib
334247835Skib			ttm_pages_put(pages_to_free, freed_pages);
335247835Skib			if (likely(nr_free != FREE_ALL_PAGES))
336247835Skib				nr_free -= freed_pages;
337247835Skib
338247835Skib			if (NUM_PAGES_TO_ALLOC >= nr_free)
339247835Skib				npages_to_free = nr_free;
340247835Skib			else
341247835Skib				npages_to_free = NUM_PAGES_TO_ALLOC;
342247835Skib
343247835Skib			freed_pages = 0;
344247835Skib
345247835Skib			/* free all so restart the processing */
346247835Skib			if (nr_free)
347247835Skib				goto restart;
348247835Skib
349247835Skib			/* Not allowed to fall through or break because
350247835Skib			 * following context is inside spinlock while we are
351247835Skib			 * outside here.
352247835Skib			 */
353247835Skib			goto out;
354247835Skib
355247835Skib		}
356247835Skib	}
357247835Skib
358247835Skib	/* remove range of pages from the pool */
359247835Skib	if (freed_pages) {
360254873Sdumbbell		for (i = 0; i < freed_pages; i++)
361254873Sdumbbell			TAILQ_REMOVE(&pool->list, pages_to_free[i], plinks.q);
362247835Skib
363247835Skib		ttm_pool_update_free_locked(pool, freed_pages);
364247835Skib		nr_free -= freed_pages;
365247835Skib	}
366247835Skib
367247835Skib	mtx_unlock(&pool->lock);
368247835Skib
369247835Skib	if (freed_pages)
370247835Skib		ttm_pages_put(pages_to_free, freed_pages);
371247835Skibout:
372247835Skib	free(pages_to_free, M_TEMP);
373247835Skib	return nr_free;
374247835Skib}
375247835Skib
376247835Skib/* Get good estimation how many pages are free in pools */
377247835Skibstatic int ttm_pool_get_num_unused_pages(void)
378247835Skib{
379247835Skib	unsigned i;
380247835Skib	int total = 0;
381247835Skib	for (i = 0; i < NUM_POOLS; ++i)
382247835Skib		total += _manager->pools[i].npages;
383247835Skib
384247835Skib	return total;
385247835Skib}
386247835Skib
387247835Skib/**
388247835Skib * Callback for mm to request pool to reduce number of page held.
389247835Skib */
390247835Skibstatic int ttm_pool_mm_shrink(void *arg)
391247835Skib{
392247835Skib	static unsigned int start_pool = 0;
393247835Skib	unsigned i;
394247835Skib	unsigned pool_offset = atomic_fetchadd_int(&start_pool, 1);
395247835Skib	struct ttm_page_pool *pool;
396247835Skib	int shrink_pages = 100; /* XXXKIB */
397247835Skib
398247835Skib	pool_offset = pool_offset % NUM_POOLS;
399247835Skib	/* select start pool in round robin fashion */
400247835Skib	for (i = 0; i < NUM_POOLS; ++i) {
401247835Skib		unsigned nr_free = shrink_pages;
402247835Skib		if (shrink_pages == 0)
403247835Skib			break;
404247835Skib		pool = &_manager->pools[(i + pool_offset)%NUM_POOLS];
405247835Skib		shrink_pages = ttm_page_pool_free(pool, nr_free);
406247835Skib	}
407247835Skib	/* return estimated number of unused pages in pool */
408247835Skib	return ttm_pool_get_num_unused_pages();
409247835Skib}
410247835Skib
411247835Skibstatic void ttm_pool_mm_shrink_init(struct ttm_pool_manager *manager)
412247835Skib{
413247835Skib
414247835Skib	manager->lowmem_handler = EVENTHANDLER_REGISTER(vm_lowmem,
415247835Skib	    ttm_pool_mm_shrink, manager, EVENTHANDLER_PRI_ANY);
416247835Skib}
417247835Skib
418247835Skibstatic void ttm_pool_mm_shrink_fini(struct ttm_pool_manager *manager)
419247835Skib{
420247835Skib
421247835Skib	EVENTHANDLER_DEREGISTER(vm_lowmem, manager->lowmem_handler);
422247835Skib}
423247835Skib
424247835Skibstatic int ttm_set_pages_caching(vm_page_t *pages,
425247835Skib		enum ttm_caching_state cstate, unsigned cpages)
426247835Skib{
427247835Skib	int r = 0;
428247835Skib	/* Set page caching */
429247835Skib	switch (cstate) {
430247835Skib	case tt_uncached:
431247835Skib		r = set_pages_array_uc(pages, cpages);
432247835Skib		if (r)
433247835Skib			printf("[TTM] Failed to set %d pages to uc!\n", cpages);
434247835Skib		break;
435247835Skib	case tt_wc:
436247835Skib		r = set_pages_array_wc(pages, cpages);
437247835Skib		if (r)
438247835Skib			printf("[TTM] Failed to set %d pages to wc!\n", cpages);
439247835Skib		break;
440247835Skib	default:
441247835Skib		break;
442247835Skib	}
443247835Skib	return r;
444247835Skib}
445247835Skib
446247835Skib/**
447247835Skib * Free pages the pages that failed to change the caching state. If there is
448247835Skib * any pages that have changed their caching state already put them to the
449247835Skib * pool.
450247835Skib */
451247835Skibstatic void ttm_handle_caching_state_failure(struct pglist *pages,
452247835Skib		int ttm_flags, enum ttm_caching_state cstate,
453247835Skib		vm_page_t *failed_pages, unsigned cpages)
454247835Skib{
455247835Skib	unsigned i;
456247835Skib	/* Failed pages have to be freed */
457247835Skib	for (i = 0; i < cpages; ++i) {
458254182Skib		TAILQ_REMOVE(pages, failed_pages[i], plinks.q);
459247835Skib		ttm_vm_page_free(failed_pages[i]);
460247835Skib	}
461247835Skib}
462247835Skib
463247835Skib/**
464247835Skib * Allocate new pages with correct caching.
465247835Skib *
466247835Skib * This function is reentrant if caller updates count depending on number of
467247835Skib * pages returned in pages array.
468247835Skib */
469247835Skibstatic int ttm_alloc_new_pages(struct pglist *pages, int ttm_alloc_flags,
470247835Skib		int ttm_flags, enum ttm_caching_state cstate, unsigned count)
471247835Skib{
472247835Skib	vm_page_t *caching_array;
473247835Skib	vm_page_t p;
474247835Skib	int r = 0;
475247835Skib	unsigned i, cpages, aflags;
476247835Skib	unsigned max_cpages = min(count,
477247835Skib			(unsigned)(PAGE_SIZE/sizeof(vm_page_t)));
478247835Skib
479247835Skib	aflags = VM_ALLOC_NORMAL | VM_ALLOC_WIRED | VM_ALLOC_NOOBJ |
480247835Skib	    ((ttm_alloc_flags & TTM_PAGE_FLAG_ZERO_ALLOC) != 0 ?
481247835Skib	    VM_ALLOC_ZERO : 0);
482247835Skib
483247835Skib	/* allocate array for page caching change */
484247835Skib	caching_array = malloc(max_cpages * sizeof(vm_page_t), M_TEMP,
485247835Skib	    M_WAITOK | M_ZERO);
486247835Skib
487247835Skib	for (i = 0, cpages = 0; i < count; ++i) {
488247835Skib		p = vm_page_alloc_contig(NULL, 0, aflags, 1, 0,
489247835Skib		    (ttm_alloc_flags & TTM_PAGE_FLAG_DMA32) ? 0xffffffff :
490247835Skib		    VM_MAX_ADDRESS, PAGE_SIZE, 0,
491247835Skib		    ttm_caching_state_to_vm(cstate));
492247835Skib		if (!p) {
493247835Skib			printf("[TTM] Unable to get page %u\n", i);
494247835Skib
495247835Skib			/* store already allocated pages in the pool after
496247835Skib			 * setting the caching state */
497247835Skib			if (cpages) {
498247835Skib				r = ttm_set_pages_caching(caching_array,
499247835Skib							  cstate, cpages);
500247835Skib				if (r)
501247835Skib					ttm_handle_caching_state_failure(pages,
502247835Skib						ttm_flags, cstate,
503247835Skib						caching_array, cpages);
504247835Skib			}
505247835Skib			r = -ENOMEM;
506247835Skib			goto out;
507247835Skib		}
508247835Skib		p->oflags &= ~VPO_UNMANAGED;
509247835Skib		p->flags |= PG_FICTITIOUS;
510247835Skib
511247835Skib#ifdef CONFIG_HIGHMEM /* KIB: nop */
512247835Skib		/* gfp flags of highmem page should never be dma32 so we
513247835Skib		 * we should be fine in such case
514247835Skib		 */
515247835Skib		if (!PageHighMem(p))
516247835Skib#endif
517247835Skib		{
518247835Skib			caching_array[cpages++] = p;
519247835Skib			if (cpages == max_cpages) {
520247835Skib
521247835Skib				r = ttm_set_pages_caching(caching_array,
522247835Skib						cstate, cpages);
523247835Skib				if (r) {
524247835Skib					ttm_handle_caching_state_failure(pages,
525247835Skib						ttm_flags, cstate,
526247835Skib						caching_array, cpages);
527247835Skib					goto out;
528247835Skib				}
529247835Skib				cpages = 0;
530247835Skib			}
531247835Skib		}
532247835Skib
533254182Skib		TAILQ_INSERT_HEAD(pages, p, plinks.q);
534247835Skib	}
535247835Skib
536247835Skib	if (cpages) {
537247835Skib		r = ttm_set_pages_caching(caching_array, cstate, cpages);
538247835Skib		if (r)
539247835Skib			ttm_handle_caching_state_failure(pages,
540247835Skib					ttm_flags, cstate,
541247835Skib					caching_array, cpages);
542247835Skib	}
543247835Skibout:
544247835Skib	free(caching_array, M_TEMP);
545247835Skib
546247835Skib	return r;
547247835Skib}
548247835Skib
549247835Skib/**
550247835Skib * Fill the given pool if there aren't enough pages and the requested number of
551247835Skib * pages is small.
552247835Skib */
553247835Skibstatic void ttm_page_pool_fill_locked(struct ttm_page_pool *pool,
554247835Skib    int ttm_flags, enum ttm_caching_state cstate, unsigned count)
555247835Skib{
556247835Skib	vm_page_t p;
557247835Skib	int r;
558247835Skib	unsigned cpages = 0;
559247835Skib	/**
560247835Skib	 * Only allow one pool fill operation at a time.
561247835Skib	 * If pool doesn't have enough pages for the allocation new pages are
562247835Skib	 * allocated from outside of pool.
563247835Skib	 */
564247835Skib	if (pool->fill_lock)
565247835Skib		return;
566247835Skib
567247835Skib	pool->fill_lock = true;
568247835Skib
569247835Skib	/* If allocation request is small and there are not enough
570247835Skib	 * pages in a pool we fill the pool up first. */
571247835Skib	if (count < _manager->options.small
572247835Skib		&& count > pool->npages) {
573247835Skib		struct pglist new_pages;
574247835Skib		unsigned alloc_size = _manager->options.alloc_size;
575247835Skib
576247835Skib		/**
577247835Skib		 * Can't change page caching if in irqsave context. We have to
578247835Skib		 * drop the pool->lock.
579247835Skib		 */
580247835Skib		mtx_unlock(&pool->lock);
581247835Skib
582247835Skib		TAILQ_INIT(&new_pages);
583247835Skib		r = ttm_alloc_new_pages(&new_pages, pool->ttm_page_alloc_flags,
584247835Skib		    ttm_flags, cstate, alloc_size);
585247835Skib		mtx_lock(&pool->lock);
586247835Skib
587247835Skib		if (!r) {
588254182Skib			TAILQ_CONCAT(&pool->list, &new_pages, plinks.q);
589247835Skib			++pool->nrefills;
590247835Skib			pool->npages += alloc_size;
591247835Skib		} else {
592247835Skib			printf("[TTM] Failed to fill pool (%p)\n", pool);
593247835Skib			/* If we have any pages left put them to the pool. */
594254182Skib			TAILQ_FOREACH(p, &pool->list, plinks.q) {
595247835Skib				++cpages;
596247835Skib			}
597254182Skib			TAILQ_CONCAT(&pool->list, &new_pages, plinks.q);
598247835Skib			pool->npages += cpages;
599247835Skib		}
600247835Skib
601247835Skib	}
602247835Skib	pool->fill_lock = false;
603247835Skib}
604247835Skib
605247835Skib/**
606247835Skib * Cut 'count' number of pages from the pool and put them on the return list.
607247835Skib *
608247835Skib * @return count of pages still required to fulfill the request.
609247835Skib */
610247835Skibstatic unsigned ttm_page_pool_get_pages(struct ttm_page_pool *pool,
611247835Skib					struct pglist *pages,
612247835Skib					int ttm_flags,
613247835Skib					enum ttm_caching_state cstate,
614247835Skib					unsigned count)
615247835Skib{
616247835Skib	vm_page_t p;
617247835Skib	unsigned i;
618247835Skib
619247835Skib	mtx_lock(&pool->lock);
620247835Skib	ttm_page_pool_fill_locked(pool, ttm_flags, cstate, count);
621247835Skib
622247835Skib	if (count >= pool->npages) {
623247835Skib		/* take all pages from the pool */
624254182Skib		TAILQ_CONCAT(pages, &pool->list, plinks.q);
625247835Skib		count -= pool->npages;
626247835Skib		pool->npages = 0;
627247835Skib		goto out;
628247835Skib	}
629247835Skib	for (i = 0; i < count; i++) {
630247835Skib		p = TAILQ_FIRST(&pool->list);
631254182Skib		TAILQ_REMOVE(&pool->list, p, plinks.q);
632254182Skib		TAILQ_INSERT_TAIL(pages, p, plinks.q);
633247835Skib	}
634247835Skib	pool->npages -= count;
635247835Skib	count = 0;
636247835Skibout:
637247835Skib	mtx_unlock(&pool->lock);
638247835Skib	return count;
639247835Skib}
640247835Skib
641247835Skib/* Put all pages in pages list to correct pool to wait for reuse */
642247835Skibstatic void ttm_put_pages(vm_page_t *pages, unsigned npages, int flags,
643247835Skib			  enum ttm_caching_state cstate)
644247835Skib{
645247835Skib	struct ttm_page_pool *pool = ttm_get_pool(flags, cstate);
646247835Skib	unsigned i;
647247835Skib
648247835Skib	if (pool == NULL) {
649247835Skib		/* No pool for this memory type so free the pages */
650247835Skib		for (i = 0; i < npages; i++) {
651247835Skib			if (pages[i]) {
652247835Skib				ttm_vm_page_free(pages[i]);
653247835Skib				pages[i] = NULL;
654247835Skib			}
655247835Skib		}
656247835Skib		return;
657247835Skib	}
658247835Skib
659247835Skib	mtx_lock(&pool->lock);
660247835Skib	for (i = 0; i < npages; i++) {
661247835Skib		if (pages[i]) {
662254182Skib			TAILQ_INSERT_TAIL(&pool->list, pages[i], plinks.q);
663247835Skib			pages[i] = NULL;
664247835Skib			pool->npages++;
665247835Skib		}
666247835Skib	}
667247835Skib	/* Check that we don't go over the pool limit */
668247835Skib	npages = 0;
669247835Skib	if (pool->npages > _manager->options.max_size) {
670247835Skib		npages = pool->npages - _manager->options.max_size;
671247835Skib		/* free at least NUM_PAGES_TO_ALLOC number of pages
672247835Skib		 * to reduce calls to set_memory_wb */
673247835Skib		if (npages < NUM_PAGES_TO_ALLOC)
674247835Skib			npages = NUM_PAGES_TO_ALLOC;
675247835Skib	}
676247835Skib	mtx_unlock(&pool->lock);
677247835Skib	if (npages)
678247835Skib		ttm_page_pool_free(pool, npages);
679247835Skib}
680247835Skib
681247835Skib/*
682247835Skib * On success pages list will hold count number of correctly
683247835Skib * cached pages.
684247835Skib */
685247835Skibstatic int ttm_get_pages(vm_page_t *pages, unsigned npages, int flags,
686247835Skib			 enum ttm_caching_state cstate)
687247835Skib{
688247835Skib	struct ttm_page_pool *pool = ttm_get_pool(flags, cstate);
689247835Skib	struct pglist plist;
690247835Skib	vm_page_t p = NULL;
691247835Skib	int gfp_flags, aflags;
692247835Skib	unsigned count;
693247835Skib	int r;
694247835Skib
695247835Skib	aflags = VM_ALLOC_NORMAL | VM_ALLOC_NOOBJ | VM_ALLOC_WIRED |
696247835Skib	    ((flags & TTM_PAGE_FLAG_ZERO_ALLOC) != 0 ? VM_ALLOC_ZERO : 0);
697247835Skib
698247835Skib	/* No pool for cached pages */
699247835Skib	if (pool == NULL) {
700247835Skib		for (r = 0; r < npages; ++r) {
701247835Skib			p = vm_page_alloc_contig(NULL, 0, aflags, 1, 0,
702247835Skib			    (flags & TTM_PAGE_FLAG_DMA32) ? 0xffffffff :
703247835Skib			    VM_MAX_ADDRESS, PAGE_SIZE,
704247835Skib			    0, ttm_caching_state_to_vm(cstate));
705247835Skib			if (!p) {
706247835Skib				printf("[TTM] Unable to allocate page\n");
707247835Skib				return -ENOMEM;
708247835Skib			}
709247835Skib			p->oflags &= ~VPO_UNMANAGED;
710247835Skib			p->flags |= PG_FICTITIOUS;
711247835Skib			pages[r] = p;
712247835Skib		}
713247835Skib		return 0;
714247835Skib	}
715247835Skib
716247835Skib	/* combine zero flag to pool flags */
717247835Skib	gfp_flags = flags | pool->ttm_page_alloc_flags;
718247835Skib
719247835Skib	/* First we take pages from the pool */
720247835Skib	TAILQ_INIT(&plist);
721247835Skib	npages = ttm_page_pool_get_pages(pool, &plist, flags, cstate, npages);
722247835Skib	count = 0;
723254182Skib	TAILQ_FOREACH(p, &plist, plinks.q) {
724247835Skib		pages[count++] = p;
725247835Skib	}
726247835Skib
727247835Skib	/* clear the pages coming from the pool if requested */
728247835Skib	if (flags & TTM_PAGE_FLAG_ZERO_ALLOC) {
729254182Skib		TAILQ_FOREACH(p, &plist, plinks.q) {
730247835Skib			pmap_zero_page(p);
731247835Skib		}
732247835Skib	}
733247835Skib
734247835Skib	/* If pool didn't have enough pages allocate new one. */
735247835Skib	if (npages > 0) {
736247835Skib		/* ttm_alloc_new_pages doesn't reference pool so we can run
737247835Skib		 * multiple requests in parallel.
738247835Skib		 **/
739247835Skib		TAILQ_INIT(&plist);
740247835Skib		r = ttm_alloc_new_pages(&plist, gfp_flags, flags, cstate,
741247835Skib		    npages);
742254182Skib		TAILQ_FOREACH(p, &plist, plinks.q) {
743247835Skib			pages[count++] = p;
744247835Skib		}
745247835Skib		if (r) {
746247835Skib			/* If there is any pages in the list put them back to
747247835Skib			 * the pool. */
748247835Skib			printf("[TTM] Failed to allocate extra pages for large request\n");
749247835Skib			ttm_put_pages(pages, count, flags, cstate);
750247835Skib			return r;
751247835Skib		}
752247835Skib	}
753247835Skib
754247835Skib	return 0;
755247835Skib}
756247835Skib
757247835Skibstatic void ttm_page_pool_init_locked(struct ttm_page_pool *pool, int flags,
758247835Skib				      char *name)
759247835Skib{
760247835Skib	mtx_init(&pool->lock, "ttmpool", NULL, MTX_DEF);
761247835Skib	pool->fill_lock = false;
762247835Skib	TAILQ_INIT(&pool->list);
763247835Skib	pool->npages = pool->nfrees = 0;
764247835Skib	pool->ttm_page_alloc_flags = flags;
765247835Skib	pool->name = name;
766247835Skib}
767247835Skib
768247835Skibint ttm_page_alloc_init(struct ttm_mem_global *glob, unsigned max_pages)
769247835Skib{
770247835Skib
771247835Skib	if (_manager != NULL)
772247835Skib		printf("[TTM] manager != NULL\n");
773247835Skib	printf("[TTM] Initializing pool allocator\n");
774247835Skib
775247835Skib	_manager = malloc(sizeof(*_manager), M_TTM_POOLMGR, M_WAITOK | M_ZERO);
776247835Skib
777247835Skib	ttm_page_pool_init_locked(&_manager->wc_pool, 0, "wc");
778247835Skib	ttm_page_pool_init_locked(&_manager->uc_pool, 0, "uc");
779247835Skib	ttm_page_pool_init_locked(&_manager->wc_pool_dma32,
780247835Skib	    TTM_PAGE_FLAG_DMA32, "wc dma");
781247835Skib	ttm_page_pool_init_locked(&_manager->uc_pool_dma32,
782247835Skib	    TTM_PAGE_FLAG_DMA32, "uc dma");
783247835Skib
784247835Skib	_manager->options.max_size = max_pages;
785247835Skib	_manager->options.small = SMALL_ALLOCATION;
786247835Skib	_manager->options.alloc_size = NUM_PAGES_TO_ALLOC;
787247835Skib
788247835Skib	refcount_init(&_manager->kobj_ref, 1);
789247835Skib	ttm_pool_mm_shrink_init(_manager);
790247835Skib
791247835Skib	return 0;
792247835Skib}
793247835Skib
794247835Skibvoid ttm_page_alloc_fini(void)
795247835Skib{
796247835Skib	int i;
797247835Skib
798247835Skib	printf("[TTM] Finalizing pool allocator\n");
799247835Skib	ttm_pool_mm_shrink_fini(_manager);
800247835Skib
801247835Skib	for (i = 0; i < NUM_POOLS; ++i)
802247835Skib		ttm_page_pool_free(&_manager->pools[i], FREE_ALL_PAGES);
803247835Skib
804247835Skib	if (refcount_release(&_manager->kobj_ref))
805247835Skib		ttm_pool_kobj_release(_manager);
806247835Skib	_manager = NULL;
807247835Skib}
808247835Skib
809247835Skibint ttm_pool_populate(struct ttm_tt *ttm)
810247835Skib{
811247835Skib	struct ttm_mem_global *mem_glob = ttm->glob->mem_glob;
812247835Skib	unsigned i;
813247835Skib	int ret;
814247835Skib
815247835Skib	if (ttm->state != tt_unpopulated)
816247835Skib		return 0;
817247835Skib
818247835Skib	for (i = 0; i < ttm->num_pages; ++i) {
819247835Skib		ret = ttm_get_pages(&ttm->pages[i], 1,
820247835Skib				    ttm->page_flags,
821247835Skib				    ttm->caching_state);
822247835Skib		if (ret != 0) {
823247835Skib			ttm_pool_unpopulate(ttm);
824247835Skib			return -ENOMEM;
825247835Skib		}
826247835Skib
827247835Skib		ret = ttm_mem_global_alloc_page(mem_glob, ttm->pages[i],
828247835Skib						false, false);
829247835Skib		if (unlikely(ret != 0)) {
830247835Skib			ttm_pool_unpopulate(ttm);
831247835Skib			return -ENOMEM;
832247835Skib		}
833247835Skib	}
834247835Skib
835247835Skib	if (unlikely(ttm->page_flags & TTM_PAGE_FLAG_SWAPPED)) {
836247835Skib		ret = ttm_tt_swapin(ttm);
837247835Skib		if (unlikely(ret != 0)) {
838247835Skib			ttm_pool_unpopulate(ttm);
839247835Skib			return ret;
840247835Skib		}
841247835Skib	}
842247835Skib
843247835Skib	ttm->state = tt_unbound;
844247835Skib	return 0;
845247835Skib}
846247835Skib
847247835Skibvoid ttm_pool_unpopulate(struct ttm_tt *ttm)
848247835Skib{
849247835Skib	unsigned i;
850247835Skib
851247835Skib	for (i = 0; i < ttm->num_pages; ++i) {
852247835Skib		if (ttm->pages[i]) {
853247835Skib			ttm_mem_global_free_page(ttm->glob->mem_glob,
854247835Skib						 ttm->pages[i]);
855247835Skib			ttm_put_pages(&ttm->pages[i], 1,
856247835Skib				      ttm->page_flags,
857247835Skib				      ttm->caching_state);
858247835Skib		}
859247835Skib	}
860247835Skib	ttm->state = tt_unpopulated;
861247835Skib}
862247835Skib
863247835Skib#if 0
864247835Skib/* XXXKIB sysctl */
865247835Skibint ttm_page_alloc_debugfs(struct seq_file *m, void *data)
866247835Skib{
867247835Skib	struct ttm_page_pool *p;
868247835Skib	unsigned i;
869247835Skib	char *h[] = {"pool", "refills", "pages freed", "size"};
870247835Skib	if (!_manager) {
871247835Skib		seq_printf(m, "No pool allocator running.\n");
872247835Skib		return 0;
873247835Skib	}
874247835Skib	seq_printf(m, "%6s %12s %13s %8s\n",
875247835Skib			h[0], h[1], h[2], h[3]);
876247835Skib	for (i = 0; i < NUM_POOLS; ++i) {
877247835Skib		p = &_manager->pools[i];
878247835Skib
879247835Skib		seq_printf(m, "%6s %12ld %13ld %8d\n",
880247835Skib				p->name, p->nrefills,
881247835Skib				p->nfrees, p->npages);
882247835Skib	}
883247835Skib	return 0;
884247835Skib}
885247835Skib#endif
886