1247835Skib/*
2247835Skib * Copyright (c) Red Hat Inc.
3247835Skib
4247835Skib * Permission is hereby granted, free of charge, to any person obtaining a
5247835Skib * copy of this software and associated documentation files (the "Software"),
6247835Skib * to deal in the Software without restriction, including without limitation
7247835Skib * the rights to use, copy, modify, merge, publish, distribute, sub license,
8247835Skib * and/or sell copies of the Software, and to permit persons to whom the
9247835Skib * Software is furnished to do so, subject to the following conditions:
10247835Skib *
11247835Skib * The above copyright notice and this permission notice (including the
12247835Skib * next paragraph) shall be included in all copies or substantial portions
13247835Skib * of the Software.
14247835Skib *
15247835Skib * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16247835Skib * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17247835Skib * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18247835Skib * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19247835Skib * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20247835Skib * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21247835Skib * DEALINGS IN THE SOFTWARE.
22247835Skib *
23247835Skib * Authors: Dave Airlie <airlied@redhat.com>
24247835Skib *          Jerome Glisse <jglisse@redhat.com>
25247835Skib *          Pauli Nieminen <suokkos@gmail.com>
26247835Skib */
27247835Skib/*
28247835Skib * Copyright (c) 2013 The FreeBSD Foundation
29247835Skib * All rights reserved.
30247835Skib *
31247835Skib * Portions of this software were developed by Konstantin Belousov
32247835Skib * <kib@FreeBSD.org> under sponsorship from the FreeBSD Foundation.
33247835Skib */
34247835Skib
35247835Skib/* simple list based uncached page pool
36247835Skib * - Pool collects resently freed pages for reuse
37247835Skib * - Use page->lru to keep a free list
38247835Skib * - doesn't track currently in use pages
39247835Skib */
40247835Skib
41247835Skib#include <sys/cdefs.h>
42247835Skib__FBSDID("$FreeBSD$");
43247835Skib
44247835Skib#include <dev/drm2/drmP.h>
45247835Skib#include <dev/drm2/ttm/ttm_bo_driver.h>
46247835Skib#include <dev/drm2/ttm/ttm_page_alloc.h>
47285002Savg#include <vm/vm_pageout.h>
48247835Skib
49247835Skib#define NUM_PAGES_TO_ALLOC		(PAGE_SIZE/sizeof(vm_page_t))
50247835Skib#define SMALL_ALLOCATION		16
51247835Skib#define FREE_ALL_PAGES			(~0U)
52247835Skib/* times are in msecs */
53247835Skib#define PAGE_FREE_INTERVAL		1000
54247835Skib
55247835Skib/**
56247835Skib * struct ttm_page_pool - Pool to reuse recently allocated uc/wc pages.
57247835Skib *
58247835Skib * @lock: Protects the shared pool from concurrnet access. Must be used with
59247835Skib * irqsave/irqrestore variants because pool allocator maybe called from
60247835Skib * delayed work.
61247835Skib * @fill_lock: Prevent concurrent calls to fill.
62247835Skib * @list: Pool of free uc/wc pages for fast reuse.
63247835Skib * @gfp_flags: Flags to pass for alloc_page.
64247835Skib * @npages: Number of pages in pool.
65247835Skib */
66247835Skibstruct ttm_page_pool {
67247835Skib	struct mtx		lock;
68247835Skib	bool			fill_lock;
69247835Skib	bool			dma32;
70247835Skib	struct pglist		list;
71247835Skib	int			ttm_page_alloc_flags;
72247835Skib	unsigned		npages;
73247835Skib	char			*name;
74247835Skib	unsigned long		nfrees;
75247835Skib	unsigned long		nrefills;
76247835Skib};
77247835Skib
78247835Skib/**
79247835Skib * Limits for the pool. They are handled without locks because only place where
80247835Skib * they may change is in sysfs store. They won't have immediate effect anyway
81247835Skib * so forcing serialization to access them is pointless.
82247835Skib */
83247835Skib
84247835Skibstruct ttm_pool_opts {
85247835Skib	unsigned	alloc_size;
86247835Skib	unsigned	max_size;
87247835Skib	unsigned	small;
88247835Skib};
89247835Skib
90247835Skib#define NUM_POOLS 4
91247835Skib
92247835Skib/**
93247835Skib * struct ttm_pool_manager - Holds memory pools for fst allocation
94247835Skib *
95247835Skib * Manager is read only object for pool code so it doesn't need locking.
96247835Skib *
97247835Skib * @free_interval: minimum number of jiffies between freeing pages from pool.
98247835Skib * @page_alloc_inited: reference counting for pool allocation.
99247835Skib * @work: Work that is used to shrink the pool. Work is only run when there is
100247835Skib * some pages to free.
101247835Skib * @small_allocation: Limit in number of pages what is small allocation.
102247835Skib *
103247835Skib * @pools: All pool objects in use.
104247835Skib **/
105247835Skibstruct ttm_pool_manager {
106247835Skib	unsigned int kobj_ref;
107247835Skib	eventhandler_tag lowmem_handler;
108247835Skib	struct ttm_pool_opts	options;
109247835Skib
110247835Skib	union {
111247849Skib		struct ttm_page_pool	u_pools[NUM_POOLS];
112247849Skib		struct _utag {
113247849Skib			struct ttm_page_pool	u_wc_pool;
114247849Skib			struct ttm_page_pool	u_uc_pool;
115247849Skib			struct ttm_page_pool	u_wc_pool_dma32;
116247849Skib			struct ttm_page_pool	u_uc_pool_dma32;
117247849Skib		} _ut;
118247849Skib	} _u;
119247835Skib};
120247835Skib
121247849Skib#define	pools _u.u_pools
122247849Skib#define	wc_pool _u._ut.u_wc_pool
123247849Skib#define	uc_pool _u._ut.u_uc_pool
124247849Skib#define	wc_pool_dma32 _u._ut.u_wc_pool_dma32
125247849Skib#define	uc_pool_dma32 _u._ut.u_uc_pool_dma32
126247849Skib
127247835SkibMALLOC_DEFINE(M_TTM_POOLMGR, "ttm_poolmgr", "TTM Pool Manager");
128247835Skib
129247835Skibstatic void
130247835Skibttm_vm_page_free(vm_page_t m)
131247835Skib{
132247835Skib
133247835Skib	KASSERT(m->object == NULL, ("ttm page %p is owned", m));
134247835Skib	KASSERT(m->wire_count == 1, ("ttm lost wire %p", m));
135247835Skib	KASSERT((m->flags & PG_FICTITIOUS) != 0, ("ttm lost fictitious %p", m));
136247835Skib	KASSERT((m->oflags & VPO_UNMANAGED) == 0, ("ttm got unmanaged %p", m));
137247835Skib	m->flags &= ~PG_FICTITIOUS;
138247835Skib	m->oflags |= VPO_UNMANAGED;
139247835Skib	vm_page_unwire(m, 0);
140247835Skib	vm_page_free(m);
141247835Skib}
142247835Skib
143247835Skibstatic vm_memattr_t
144247835Skibttm_caching_state_to_vm(enum ttm_caching_state cstate)
145247835Skib{
146247835Skib
147247835Skib	switch (cstate) {
148247835Skib	case tt_uncached:
149247835Skib		return (VM_MEMATTR_UNCACHEABLE);
150247835Skib	case tt_wc:
151247835Skib		return (VM_MEMATTR_WRITE_COMBINING);
152247835Skib	case tt_cached:
153247835Skib		return (VM_MEMATTR_WRITE_BACK);
154247835Skib	}
155247835Skib	panic("caching state %d\n", cstate);
156247835Skib}
157247835Skib
158285002Savgstatic vm_page_t
159285002Savgttm_vm_page_alloc_dma32(int req, vm_memattr_t memattr)
160285002Savg{
161285002Savg	vm_page_t p;
162285002Savg	int tries;
163285002Savg
164285002Savg	for (tries = 0; ; tries++) {
165285002Savg		p = vm_page_alloc_contig(NULL, 0, req, 1, 0, 0xffffffff,
166285002Savg		    PAGE_SIZE, 0, memattr);
167285002Savg		if (p != NULL || tries > 2)
168285002Savg			return (p);
169285002Savg
170285002Savg		/*
171285002Savg		 * Before growing the cache see if this is just a normal
172285002Savg		 * memory shortage.
173285002Savg		 */
174285002Savg		VM_WAIT;
175285002Savg		vm_pageout_grow_cache(tries, 0, 0xffffffff);
176285002Savg	}
177285002Savg}
178285002Savg
179285002Savgstatic vm_page_t
180285002Savgttm_vm_page_alloc_any(int req, vm_memattr_t memattr)
181285002Savg{
182285002Savg	vm_page_t p;
183285002Savg
184285002Savg	while (1) {
185285002Savg		p = vm_page_alloc(NULL, 0, req);
186285002Savg		if (p != NULL)
187285002Savg			break;
188285002Savg		VM_WAIT;
189285002Savg	}
190285002Savg	pmap_page_set_memattr(p, memattr);
191285002Savg	return (p);
192285002Savg}
193285002Savg
194285002Savgstatic vm_page_t
195285002Savgttm_vm_page_alloc(int flags, enum ttm_caching_state cstate)
196285002Savg{
197285002Savg	vm_page_t p;
198285002Savg	vm_memattr_t memattr;
199285002Savg	int req;
200285002Savg
201285002Savg	memattr = ttm_caching_state_to_vm(cstate);
202285002Savg	req = VM_ALLOC_NORMAL | VM_ALLOC_WIRED | VM_ALLOC_NOOBJ;
203285002Savg	if ((flags & TTM_PAGE_FLAG_ZERO_ALLOC) != 0)
204285002Savg		req |= VM_ALLOC_ZERO;
205285002Savg
206285002Savg	if ((flags & TTM_PAGE_FLAG_DMA32) != 0)
207285002Savg		p = ttm_vm_page_alloc_dma32(req, memattr);
208285002Savg	else
209285002Savg		p = ttm_vm_page_alloc_any(req, memattr);
210285002Savg
211285002Savg	if (p != NULL) {
212285002Savg		p->oflags &= ~VPO_UNMANAGED;
213285002Savg		p->flags |= PG_FICTITIOUS;
214285002Savg	}
215285002Savg	return (p);
216285002Savg}
217285002Savg
218247835Skibstatic void ttm_pool_kobj_release(struct ttm_pool_manager *m)
219247835Skib{
220247835Skib
221247835Skib	free(m, M_TTM_POOLMGR);
222247835Skib}
223247835Skib
224247835Skib#if 0
225247835Skib/* XXXKIB sysctl */
226247835Skibstatic ssize_t ttm_pool_store(struct ttm_pool_manager *m,
227247835Skib		struct attribute *attr, const char *buffer, size_t size)
228247835Skib{
229247835Skib	int chars;
230247835Skib	unsigned val;
231247835Skib	chars = sscanf(buffer, "%u", &val);
232247835Skib	if (chars == 0)
233247835Skib		return size;
234247835Skib
235247835Skib	/* Convert kb to number of pages */
236247835Skib	val = val / (PAGE_SIZE >> 10);
237247835Skib
238247835Skib	if (attr == &ttm_page_pool_max)
239247835Skib		m->options.max_size = val;
240247835Skib	else if (attr == &ttm_page_pool_small)
241247835Skib		m->options.small = val;
242247835Skib	else if (attr == &ttm_page_pool_alloc_size) {
243247835Skib		if (val > NUM_PAGES_TO_ALLOC*8) {
244247835Skib			pr_err("Setting allocation size to %lu is not allowed. Recommended size is %lu\n",
245247835Skib			       NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 7),
246247835Skib			       NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 10));
247247835Skib			return size;
248247835Skib		} else if (val > NUM_PAGES_TO_ALLOC) {
249247835Skib			pr_warn("Setting allocation size to larger than %lu is not recommended\n",
250247835Skib				NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 10));
251247835Skib		}
252247835Skib		m->options.alloc_size = val;
253247835Skib	}
254247835Skib
255247835Skib	return size;
256247835Skib}
257247835Skib
258247835Skibstatic ssize_t ttm_pool_show(struct ttm_pool_manager *m,
259247835Skib		struct attribute *attr, char *buffer)
260247835Skib{
261247835Skib	unsigned val = 0;
262247835Skib
263247835Skib	if (attr == &ttm_page_pool_max)
264247835Skib		val = m->options.max_size;
265247835Skib	else if (attr == &ttm_page_pool_small)
266247835Skib		val = m->options.small;
267247835Skib	else if (attr == &ttm_page_pool_alloc_size)
268247835Skib		val = m->options.alloc_size;
269247835Skib
270247835Skib	val = val * (PAGE_SIZE >> 10);
271247835Skib
272247835Skib	return snprintf(buffer, PAGE_SIZE, "%u\n", val);
273247835Skib}
274247835Skib#endif
275247835Skib
276247835Skibstatic struct ttm_pool_manager *_manager;
277247835Skib
278247835Skibstatic int set_pages_array_wb(vm_page_t *pages, int addrinarray)
279247835Skib{
280275408Stijl#ifdef TTM_HAS_AGP
281247835Skib	int i;
282247835Skib
283275408Stijl	for (i = 0; i < addrinarray; i++)
284275408Stijl		pmap_page_set_memattr(pages[i], VM_MEMATTR_WRITE_BACK);
285247835Skib#endif
286247835Skib	return 0;
287247835Skib}
288247835Skib
289247835Skibstatic int set_pages_array_wc(vm_page_t *pages, int addrinarray)
290247835Skib{
291275408Stijl#ifdef TTM_HAS_AGP
292247835Skib	int i;
293247835Skib
294275408Stijl	for (i = 0; i < addrinarray; i++)
295275408Stijl		pmap_page_set_memattr(pages[i], VM_MEMATTR_WRITE_COMBINING);
296247835Skib#endif
297247835Skib	return 0;
298247835Skib}
299247835Skib
300247835Skibstatic int set_pages_array_uc(vm_page_t *pages, int addrinarray)
301247835Skib{
302275408Stijl#ifdef TTM_HAS_AGP
303247835Skib	int i;
304247835Skib
305275408Stijl	for (i = 0; i < addrinarray; i++)
306275408Stijl		pmap_page_set_memattr(pages[i], VM_MEMATTR_UNCACHEABLE);
307247835Skib#endif
308247835Skib	return 0;
309247835Skib}
310247835Skib
311247835Skib/**
312247835Skib * Select the right pool or requested caching state and ttm flags. */
313247835Skibstatic struct ttm_page_pool *ttm_get_pool(int flags,
314247835Skib		enum ttm_caching_state cstate)
315247835Skib{
316247835Skib	int pool_index;
317247835Skib
318247835Skib	if (cstate == tt_cached)
319247835Skib		return NULL;
320247835Skib
321247835Skib	if (cstate == tt_wc)
322247835Skib		pool_index = 0x0;
323247835Skib	else
324247835Skib		pool_index = 0x1;
325247835Skib
326247835Skib	if (flags & TTM_PAGE_FLAG_DMA32)
327247835Skib		pool_index |= 0x2;
328247835Skib
329247835Skib	return &_manager->pools[pool_index];
330247835Skib}
331247835Skib
332247835Skib/* set memory back to wb and free the pages. */
333247835Skibstatic void ttm_pages_put(vm_page_t *pages, unsigned npages)
334247835Skib{
335247835Skib	unsigned i;
336247835Skib
337247835Skib	/* Our VM handles vm memattr automatically on the page free. */
338247835Skib	if (set_pages_array_wb(pages, npages))
339247835Skib		printf("[TTM] Failed to set %d pages to wb!\n", npages);
340247835Skib	for (i = 0; i < npages; ++i)
341247835Skib		ttm_vm_page_free(pages[i]);
342247835Skib}
343247835Skib
344247835Skibstatic void ttm_pool_update_free_locked(struct ttm_page_pool *pool,
345247835Skib		unsigned freed_pages)
346247835Skib{
347247835Skib	pool->npages -= freed_pages;
348247835Skib	pool->nfrees += freed_pages;
349247835Skib}
350247835Skib
351247835Skib/**
352247835Skib * Free pages from pool.
353247835Skib *
354247835Skib * To prevent hogging the ttm_swap process we only free NUM_PAGES_TO_ALLOC
355247835Skib * number of pages in one go.
356247835Skib *
357247835Skib * @pool: to free the pages from
358247835Skib * @free_all: If set to true will free all pages in pool
359247835Skib **/
360247835Skibstatic int ttm_page_pool_free(struct ttm_page_pool *pool, unsigned nr_free)
361247835Skib{
362247835Skib	vm_page_t p, p1;
363247835Skib	vm_page_t *pages_to_free;
364247835Skib	unsigned freed_pages = 0,
365247835Skib		 npages_to_free = nr_free;
366254873Sdumbbell	unsigned i;
367247835Skib
368247835Skib	if (NUM_PAGES_TO_ALLOC < nr_free)
369247835Skib		npages_to_free = NUM_PAGES_TO_ALLOC;
370247835Skib
371247835Skib	pages_to_free = malloc(npages_to_free * sizeof(vm_page_t),
372247835Skib	    M_TEMP, M_WAITOK | M_ZERO);
373247835Skib
374247835Skibrestart:
375247835Skib	mtx_lock(&pool->lock);
376247835Skib
377254182Skib	TAILQ_FOREACH_REVERSE_SAFE(p, &pool->list, pglist, plinks.q, p1) {
378247835Skib		if (freed_pages >= npages_to_free)
379247835Skib			break;
380247835Skib
381247835Skib		pages_to_free[freed_pages++] = p;
382247835Skib		/* We can only remove NUM_PAGES_TO_ALLOC at a time. */
383247835Skib		if (freed_pages >= NUM_PAGES_TO_ALLOC) {
384247835Skib			/* remove range of pages from the pool */
385254873Sdumbbell			for (i = 0; i < freed_pages; i++)
386254873Sdumbbell				TAILQ_REMOVE(&pool->list, pages_to_free[i], plinks.q);
387247835Skib
388247835Skib			ttm_pool_update_free_locked(pool, freed_pages);
389247835Skib			/**
390247835Skib			 * Because changing page caching is costly
391247835Skib			 * we unlock the pool to prevent stalling.
392247835Skib			 */
393247835Skib			mtx_unlock(&pool->lock);
394247835Skib
395247835Skib			ttm_pages_put(pages_to_free, freed_pages);
396247835Skib			if (likely(nr_free != FREE_ALL_PAGES))
397247835Skib				nr_free -= freed_pages;
398247835Skib
399247835Skib			if (NUM_PAGES_TO_ALLOC >= nr_free)
400247835Skib				npages_to_free = nr_free;
401247835Skib			else
402247835Skib				npages_to_free = NUM_PAGES_TO_ALLOC;
403247835Skib
404247835Skib			freed_pages = 0;
405247835Skib
406247835Skib			/* free all so restart the processing */
407247835Skib			if (nr_free)
408247835Skib				goto restart;
409247835Skib
410247835Skib			/* Not allowed to fall through or break because
411247835Skib			 * following context is inside spinlock while we are
412247835Skib			 * outside here.
413247835Skib			 */
414247835Skib			goto out;
415247835Skib
416247835Skib		}
417247835Skib	}
418247835Skib
419247835Skib	/* remove range of pages from the pool */
420247835Skib	if (freed_pages) {
421254873Sdumbbell		for (i = 0; i < freed_pages; i++)
422254873Sdumbbell			TAILQ_REMOVE(&pool->list, pages_to_free[i], plinks.q);
423247835Skib
424247835Skib		ttm_pool_update_free_locked(pool, freed_pages);
425247835Skib		nr_free -= freed_pages;
426247835Skib	}
427247835Skib
428247835Skib	mtx_unlock(&pool->lock);
429247835Skib
430247835Skib	if (freed_pages)
431247835Skib		ttm_pages_put(pages_to_free, freed_pages);
432247835Skibout:
433247835Skib	free(pages_to_free, M_TEMP);
434247835Skib	return nr_free;
435247835Skib}
436247835Skib
437247835Skib/* Get good estimation how many pages are free in pools */
438247835Skibstatic int ttm_pool_get_num_unused_pages(void)
439247835Skib{
440247835Skib	unsigned i;
441247835Skib	int total = 0;
442247835Skib	for (i = 0; i < NUM_POOLS; ++i)
443247835Skib		total += _manager->pools[i].npages;
444247835Skib
445247835Skib	return total;
446247835Skib}
447247835Skib
448247835Skib/**
449247835Skib * Callback for mm to request pool to reduce number of page held.
450247835Skib */
451247835Skibstatic int ttm_pool_mm_shrink(void *arg)
452247835Skib{
453247835Skib	static unsigned int start_pool = 0;
454247835Skib	unsigned i;
455247835Skib	unsigned pool_offset = atomic_fetchadd_int(&start_pool, 1);
456247835Skib	struct ttm_page_pool *pool;
457247835Skib	int shrink_pages = 100; /* XXXKIB */
458247835Skib
459247835Skib	pool_offset = pool_offset % NUM_POOLS;
460247835Skib	/* select start pool in round robin fashion */
461247835Skib	for (i = 0; i < NUM_POOLS; ++i) {
462247835Skib		unsigned nr_free = shrink_pages;
463247835Skib		if (shrink_pages == 0)
464247835Skib			break;
465247835Skib		pool = &_manager->pools[(i + pool_offset)%NUM_POOLS];
466247835Skib		shrink_pages = ttm_page_pool_free(pool, nr_free);
467247835Skib	}
468247835Skib	/* return estimated number of unused pages in pool */
469247835Skib	return ttm_pool_get_num_unused_pages();
470247835Skib}
471247835Skib
472247835Skibstatic void ttm_pool_mm_shrink_init(struct ttm_pool_manager *manager)
473247835Skib{
474247835Skib
475247835Skib	manager->lowmem_handler = EVENTHANDLER_REGISTER(vm_lowmem,
476247835Skib	    ttm_pool_mm_shrink, manager, EVENTHANDLER_PRI_ANY);
477247835Skib}
478247835Skib
479247835Skibstatic void ttm_pool_mm_shrink_fini(struct ttm_pool_manager *manager)
480247835Skib{
481247835Skib
482247835Skib	EVENTHANDLER_DEREGISTER(vm_lowmem, manager->lowmem_handler);
483247835Skib}
484247835Skib
485247835Skibstatic int ttm_set_pages_caching(vm_page_t *pages,
486247835Skib		enum ttm_caching_state cstate, unsigned cpages)
487247835Skib{
488247835Skib	int r = 0;
489247835Skib	/* Set page caching */
490247835Skib	switch (cstate) {
491247835Skib	case tt_uncached:
492247835Skib		r = set_pages_array_uc(pages, cpages);
493247835Skib		if (r)
494247835Skib			printf("[TTM] Failed to set %d pages to uc!\n", cpages);
495247835Skib		break;
496247835Skib	case tt_wc:
497247835Skib		r = set_pages_array_wc(pages, cpages);
498247835Skib		if (r)
499247835Skib			printf("[TTM] Failed to set %d pages to wc!\n", cpages);
500247835Skib		break;
501247835Skib	default:
502247835Skib		break;
503247835Skib	}
504247835Skib	return r;
505247835Skib}
506247835Skib
507247835Skib/**
508247835Skib * Free pages the pages that failed to change the caching state. If there is
509247835Skib * any pages that have changed their caching state already put them to the
510247835Skib * pool.
511247835Skib */
512247835Skibstatic void ttm_handle_caching_state_failure(struct pglist *pages,
513247835Skib		int ttm_flags, enum ttm_caching_state cstate,
514247835Skib		vm_page_t *failed_pages, unsigned cpages)
515247835Skib{
516247835Skib	unsigned i;
517247835Skib	/* Failed pages have to be freed */
518247835Skib	for (i = 0; i < cpages; ++i) {
519254182Skib		TAILQ_REMOVE(pages, failed_pages[i], plinks.q);
520247835Skib		ttm_vm_page_free(failed_pages[i]);
521247835Skib	}
522247835Skib}
523247835Skib
524247835Skib/**
525247835Skib * Allocate new pages with correct caching.
526247835Skib *
527247835Skib * This function is reentrant if caller updates count depending on number of
528247835Skib * pages returned in pages array.
529247835Skib */
530247835Skibstatic int ttm_alloc_new_pages(struct pglist *pages, int ttm_alloc_flags,
531247835Skib		int ttm_flags, enum ttm_caching_state cstate, unsigned count)
532247835Skib{
533247835Skib	vm_page_t *caching_array;
534247835Skib	vm_page_t p;
535247835Skib	int r = 0;
536285002Savg	unsigned i, cpages;
537247835Skib	unsigned max_cpages = min(count,
538247835Skib			(unsigned)(PAGE_SIZE/sizeof(vm_page_t)));
539247835Skib
540247835Skib	/* allocate array for page caching change */
541247835Skib	caching_array = malloc(max_cpages * sizeof(vm_page_t), M_TEMP,
542247835Skib	    M_WAITOK | M_ZERO);
543247835Skib
544247835Skib	for (i = 0, cpages = 0; i < count; ++i) {
545285002Savg		p = ttm_vm_page_alloc(ttm_alloc_flags, cstate);
546247835Skib		if (!p) {
547247835Skib			printf("[TTM] Unable to get page %u\n", i);
548247835Skib
549247835Skib			/* store already allocated pages in the pool after
550247835Skib			 * setting the caching state */
551247835Skib			if (cpages) {
552247835Skib				r = ttm_set_pages_caching(caching_array,
553247835Skib							  cstate, cpages);
554247835Skib				if (r)
555247835Skib					ttm_handle_caching_state_failure(pages,
556247835Skib						ttm_flags, cstate,
557247835Skib						caching_array, cpages);
558247835Skib			}
559247835Skib			r = -ENOMEM;
560247835Skib			goto out;
561247835Skib		}
562247835Skib
563247835Skib#ifdef CONFIG_HIGHMEM /* KIB: nop */
564247835Skib		/* gfp flags of highmem page should never be dma32 so we
565247835Skib		 * we should be fine in such case
566247835Skib		 */
567247835Skib		if (!PageHighMem(p))
568247835Skib#endif
569247835Skib		{
570247835Skib			caching_array[cpages++] = p;
571247835Skib			if (cpages == max_cpages) {
572247835Skib
573247835Skib				r = ttm_set_pages_caching(caching_array,
574247835Skib						cstate, cpages);
575247835Skib				if (r) {
576247835Skib					ttm_handle_caching_state_failure(pages,
577247835Skib						ttm_flags, cstate,
578247835Skib						caching_array, cpages);
579247835Skib					goto out;
580247835Skib				}
581247835Skib				cpages = 0;
582247835Skib			}
583247835Skib		}
584247835Skib
585254182Skib		TAILQ_INSERT_HEAD(pages, p, plinks.q);
586247835Skib	}
587247835Skib
588247835Skib	if (cpages) {
589247835Skib		r = ttm_set_pages_caching(caching_array, cstate, cpages);
590247835Skib		if (r)
591247835Skib			ttm_handle_caching_state_failure(pages,
592247835Skib					ttm_flags, cstate,
593247835Skib					caching_array, cpages);
594247835Skib	}
595247835Skibout:
596247835Skib	free(caching_array, M_TEMP);
597247835Skib
598247835Skib	return r;
599247835Skib}
600247835Skib
601247835Skib/**
602247835Skib * Fill the given pool if there aren't enough pages and the requested number of
603247835Skib * pages is small.
604247835Skib */
605247835Skibstatic void ttm_page_pool_fill_locked(struct ttm_page_pool *pool,
606247835Skib    int ttm_flags, enum ttm_caching_state cstate, unsigned count)
607247835Skib{
608247835Skib	vm_page_t p;
609247835Skib	int r;
610247835Skib	unsigned cpages = 0;
611247835Skib	/**
612247835Skib	 * Only allow one pool fill operation at a time.
613247835Skib	 * If pool doesn't have enough pages for the allocation new pages are
614247835Skib	 * allocated from outside of pool.
615247835Skib	 */
616247835Skib	if (pool->fill_lock)
617247835Skib		return;
618247835Skib
619247835Skib	pool->fill_lock = true;
620247835Skib
621247835Skib	/* If allocation request is small and there are not enough
622247835Skib	 * pages in a pool we fill the pool up first. */
623247835Skib	if (count < _manager->options.small
624247835Skib		&& count > pool->npages) {
625247835Skib		struct pglist new_pages;
626247835Skib		unsigned alloc_size = _manager->options.alloc_size;
627247835Skib
628247835Skib		/**
629247835Skib		 * Can't change page caching if in irqsave context. We have to
630247835Skib		 * drop the pool->lock.
631247835Skib		 */
632247835Skib		mtx_unlock(&pool->lock);
633247835Skib
634247835Skib		TAILQ_INIT(&new_pages);
635247835Skib		r = ttm_alloc_new_pages(&new_pages, pool->ttm_page_alloc_flags,
636247835Skib		    ttm_flags, cstate, alloc_size);
637247835Skib		mtx_lock(&pool->lock);
638247835Skib
639247835Skib		if (!r) {
640254182Skib			TAILQ_CONCAT(&pool->list, &new_pages, plinks.q);
641247835Skib			++pool->nrefills;
642247835Skib			pool->npages += alloc_size;
643247835Skib		} else {
644247835Skib			printf("[TTM] Failed to fill pool (%p)\n", pool);
645247835Skib			/* If we have any pages left put them to the pool. */
646254182Skib			TAILQ_FOREACH(p, &pool->list, plinks.q) {
647247835Skib				++cpages;
648247835Skib			}
649254182Skib			TAILQ_CONCAT(&pool->list, &new_pages, plinks.q);
650247835Skib			pool->npages += cpages;
651247835Skib		}
652247835Skib
653247835Skib	}
654247835Skib	pool->fill_lock = false;
655247835Skib}
656247835Skib
657247835Skib/**
658247835Skib * Cut 'count' number of pages from the pool and put them on the return list.
659247835Skib *
660247835Skib * @return count of pages still required to fulfill the request.
661247835Skib */
662247835Skibstatic unsigned ttm_page_pool_get_pages(struct ttm_page_pool *pool,
663247835Skib					struct pglist *pages,
664247835Skib					int ttm_flags,
665247835Skib					enum ttm_caching_state cstate,
666247835Skib					unsigned count)
667247835Skib{
668247835Skib	vm_page_t p;
669247835Skib	unsigned i;
670247835Skib
671247835Skib	mtx_lock(&pool->lock);
672247835Skib	ttm_page_pool_fill_locked(pool, ttm_flags, cstate, count);
673247835Skib
674247835Skib	if (count >= pool->npages) {
675247835Skib		/* take all pages from the pool */
676254182Skib		TAILQ_CONCAT(pages, &pool->list, plinks.q);
677247835Skib		count -= pool->npages;
678247835Skib		pool->npages = 0;
679247835Skib		goto out;
680247835Skib	}
681247835Skib	for (i = 0; i < count; i++) {
682247835Skib		p = TAILQ_FIRST(&pool->list);
683254182Skib		TAILQ_REMOVE(&pool->list, p, plinks.q);
684254182Skib		TAILQ_INSERT_TAIL(pages, p, plinks.q);
685247835Skib	}
686247835Skib	pool->npages -= count;
687247835Skib	count = 0;
688247835Skibout:
689247835Skib	mtx_unlock(&pool->lock);
690247835Skib	return count;
691247835Skib}
692247835Skib
693247835Skib/* Put all pages in pages list to correct pool to wait for reuse */
694247835Skibstatic void ttm_put_pages(vm_page_t *pages, unsigned npages, int flags,
695247835Skib			  enum ttm_caching_state cstate)
696247835Skib{
697247835Skib	struct ttm_page_pool *pool = ttm_get_pool(flags, cstate);
698247835Skib	unsigned i;
699247835Skib
700247835Skib	if (pool == NULL) {
701247835Skib		/* No pool for this memory type so free the pages */
702247835Skib		for (i = 0; i < npages; i++) {
703247835Skib			if (pages[i]) {
704247835Skib				ttm_vm_page_free(pages[i]);
705247835Skib				pages[i] = NULL;
706247835Skib			}
707247835Skib		}
708247835Skib		return;
709247835Skib	}
710247835Skib
711247835Skib	mtx_lock(&pool->lock);
712247835Skib	for (i = 0; i < npages; i++) {
713247835Skib		if (pages[i]) {
714254182Skib			TAILQ_INSERT_TAIL(&pool->list, pages[i], plinks.q);
715247835Skib			pages[i] = NULL;
716247835Skib			pool->npages++;
717247835Skib		}
718247835Skib	}
719247835Skib	/* Check that we don't go over the pool limit */
720247835Skib	npages = 0;
721247835Skib	if (pool->npages > _manager->options.max_size) {
722247835Skib		npages = pool->npages - _manager->options.max_size;
723247835Skib		/* free at least NUM_PAGES_TO_ALLOC number of pages
724247835Skib		 * to reduce calls to set_memory_wb */
725247835Skib		if (npages < NUM_PAGES_TO_ALLOC)
726247835Skib			npages = NUM_PAGES_TO_ALLOC;
727247835Skib	}
728247835Skib	mtx_unlock(&pool->lock);
729247835Skib	if (npages)
730247835Skib		ttm_page_pool_free(pool, npages);
731247835Skib}
732247835Skib
733247835Skib/*
734247835Skib * On success pages list will hold count number of correctly
735247835Skib * cached pages.
736247835Skib */
737247835Skibstatic int ttm_get_pages(vm_page_t *pages, unsigned npages, int flags,
738247835Skib			 enum ttm_caching_state cstate)
739247835Skib{
740247835Skib	struct ttm_page_pool *pool = ttm_get_pool(flags, cstate);
741247835Skib	struct pglist plist;
742247835Skib	vm_page_t p = NULL;
743285002Savg	int gfp_flags;
744247835Skib	unsigned count;
745247835Skib	int r;
746247835Skib
747247835Skib	/* No pool for cached pages */
748247835Skib	if (pool == NULL) {
749247835Skib		for (r = 0; r < npages; ++r) {
750285002Savg			p = ttm_vm_page_alloc(flags, cstate);
751247835Skib			if (!p) {
752247835Skib				printf("[TTM] Unable to allocate page\n");
753247835Skib				return -ENOMEM;
754247835Skib			}
755247835Skib			pages[r] = p;
756247835Skib		}
757247835Skib		return 0;
758247835Skib	}
759247835Skib
760247835Skib	/* combine zero flag to pool flags */
761247835Skib	gfp_flags = flags | pool->ttm_page_alloc_flags;
762247835Skib
763247835Skib	/* First we take pages from the pool */
764247835Skib	TAILQ_INIT(&plist);
765247835Skib	npages = ttm_page_pool_get_pages(pool, &plist, flags, cstate, npages);
766247835Skib	count = 0;
767254182Skib	TAILQ_FOREACH(p, &plist, plinks.q) {
768247835Skib		pages[count++] = p;
769247835Skib	}
770247835Skib
771247835Skib	/* clear the pages coming from the pool if requested */
772247835Skib	if (flags & TTM_PAGE_FLAG_ZERO_ALLOC) {
773254182Skib		TAILQ_FOREACH(p, &plist, plinks.q) {
774247835Skib			pmap_zero_page(p);
775247835Skib		}
776247835Skib	}
777247835Skib
778247835Skib	/* If pool didn't have enough pages allocate new one. */
779247835Skib	if (npages > 0) {
780247835Skib		/* ttm_alloc_new_pages doesn't reference pool so we can run
781247835Skib		 * multiple requests in parallel.
782247835Skib		 **/
783247835Skib		TAILQ_INIT(&plist);
784247835Skib		r = ttm_alloc_new_pages(&plist, gfp_flags, flags, cstate,
785247835Skib		    npages);
786254182Skib		TAILQ_FOREACH(p, &plist, plinks.q) {
787247835Skib			pages[count++] = p;
788247835Skib		}
789247835Skib		if (r) {
790247835Skib			/* If there is any pages in the list put them back to
791247835Skib			 * the pool. */
792247835Skib			printf("[TTM] Failed to allocate extra pages for large request\n");
793247835Skib			ttm_put_pages(pages, count, flags, cstate);
794247835Skib			return r;
795247835Skib		}
796247835Skib	}
797247835Skib
798247835Skib	return 0;
799247835Skib}
800247835Skib
801247835Skibstatic void ttm_page_pool_init_locked(struct ttm_page_pool *pool, int flags,
802247835Skib				      char *name)
803247835Skib{
804247835Skib	mtx_init(&pool->lock, "ttmpool", NULL, MTX_DEF);
805247835Skib	pool->fill_lock = false;
806247835Skib	TAILQ_INIT(&pool->list);
807247835Skib	pool->npages = pool->nfrees = 0;
808247835Skib	pool->ttm_page_alloc_flags = flags;
809247835Skib	pool->name = name;
810247835Skib}
811247835Skib
812247835Skibint ttm_page_alloc_init(struct ttm_mem_global *glob, unsigned max_pages)
813247835Skib{
814247835Skib
815247835Skib	if (_manager != NULL)
816247835Skib		printf("[TTM] manager != NULL\n");
817247835Skib	printf("[TTM] Initializing pool allocator\n");
818247835Skib
819247835Skib	_manager = malloc(sizeof(*_manager), M_TTM_POOLMGR, M_WAITOK | M_ZERO);
820247835Skib
821247835Skib	ttm_page_pool_init_locked(&_manager->wc_pool, 0, "wc");
822247835Skib	ttm_page_pool_init_locked(&_manager->uc_pool, 0, "uc");
823247835Skib	ttm_page_pool_init_locked(&_manager->wc_pool_dma32,
824247835Skib	    TTM_PAGE_FLAG_DMA32, "wc dma");
825247835Skib	ttm_page_pool_init_locked(&_manager->uc_pool_dma32,
826247835Skib	    TTM_PAGE_FLAG_DMA32, "uc dma");
827247835Skib
828247835Skib	_manager->options.max_size = max_pages;
829247835Skib	_manager->options.small = SMALL_ALLOCATION;
830247835Skib	_manager->options.alloc_size = NUM_PAGES_TO_ALLOC;
831247835Skib
832247835Skib	refcount_init(&_manager->kobj_ref, 1);
833247835Skib	ttm_pool_mm_shrink_init(_manager);
834247835Skib
835247835Skib	return 0;
836247835Skib}
837247835Skib
838247835Skibvoid ttm_page_alloc_fini(void)
839247835Skib{
840247835Skib	int i;
841247835Skib
842247835Skib	printf("[TTM] Finalizing pool allocator\n");
843247835Skib	ttm_pool_mm_shrink_fini(_manager);
844247835Skib
845247835Skib	for (i = 0; i < NUM_POOLS; ++i)
846247835Skib		ttm_page_pool_free(&_manager->pools[i], FREE_ALL_PAGES);
847247835Skib
848247835Skib	if (refcount_release(&_manager->kobj_ref))
849247835Skib		ttm_pool_kobj_release(_manager);
850247835Skib	_manager = NULL;
851247835Skib}
852247835Skib
853247835Skibint ttm_pool_populate(struct ttm_tt *ttm)
854247835Skib{
855247835Skib	struct ttm_mem_global *mem_glob = ttm->glob->mem_glob;
856247835Skib	unsigned i;
857247835Skib	int ret;
858247835Skib
859247835Skib	if (ttm->state != tt_unpopulated)
860247835Skib		return 0;
861247835Skib
862247835Skib	for (i = 0; i < ttm->num_pages; ++i) {
863247835Skib		ret = ttm_get_pages(&ttm->pages[i], 1,
864247835Skib				    ttm->page_flags,
865247835Skib				    ttm->caching_state);
866247835Skib		if (ret != 0) {
867247835Skib			ttm_pool_unpopulate(ttm);
868247835Skib			return -ENOMEM;
869247835Skib		}
870247835Skib
871247835Skib		ret = ttm_mem_global_alloc_page(mem_glob, ttm->pages[i],
872247835Skib						false, false);
873247835Skib		if (unlikely(ret != 0)) {
874247835Skib			ttm_pool_unpopulate(ttm);
875247835Skib			return -ENOMEM;
876247835Skib		}
877247835Skib	}
878247835Skib
879247835Skib	if (unlikely(ttm->page_flags & TTM_PAGE_FLAG_SWAPPED)) {
880247835Skib		ret = ttm_tt_swapin(ttm);
881247835Skib		if (unlikely(ret != 0)) {
882247835Skib			ttm_pool_unpopulate(ttm);
883247835Skib			return ret;
884247835Skib		}
885247835Skib	}
886247835Skib
887247835Skib	ttm->state = tt_unbound;
888247835Skib	return 0;
889247835Skib}
890247835Skib
891247835Skibvoid ttm_pool_unpopulate(struct ttm_tt *ttm)
892247835Skib{
893247835Skib	unsigned i;
894247835Skib
895247835Skib	for (i = 0; i < ttm->num_pages; ++i) {
896247835Skib		if (ttm->pages[i]) {
897247835Skib			ttm_mem_global_free_page(ttm->glob->mem_glob,
898247835Skib						 ttm->pages[i]);
899247835Skib			ttm_put_pages(&ttm->pages[i], 1,
900247835Skib				      ttm->page_flags,
901247835Skib				      ttm->caching_state);
902247835Skib		}
903247835Skib	}
904247835Skib	ttm->state = tt_unpopulated;
905247835Skib}
906247835Skib
907247835Skib#if 0
908247835Skib/* XXXKIB sysctl */
909247835Skibint ttm_page_alloc_debugfs(struct seq_file *m, void *data)
910247835Skib{
911247835Skib	struct ttm_page_pool *p;
912247835Skib	unsigned i;
913247835Skib	char *h[] = {"pool", "refills", "pages freed", "size"};
914247835Skib	if (!_manager) {
915247835Skib		seq_printf(m, "No pool allocator running.\n");
916247835Skib		return 0;
917247835Skib	}
918247835Skib	seq_printf(m, "%6s %12s %13s %8s\n",
919247835Skib			h[0], h[1], h[2], h[3]);
920247835Skib	for (i = 0; i < NUM_POOLS; ++i) {
921247835Skib		p = &_manager->pools[i];
922247835Skib
923247835Skib		seq_printf(m, "%6s %12ld %13ld %8d\n",
924247835Skib				p->name, p->nrefills,
925247835Skib				p->nfrees, p->npages);
926247835Skib	}
927247835Skib	return 0;
928247835Skib}
929247835Skib#endif
930