1247835Skib/*
2247835Skib * Copyright (c) Red Hat Inc.
3247835Skib
4247835Skib * Permission is hereby granted, free of charge, to any person obtaining a
5247835Skib * copy of this software and associated documentation files (the "Software"),
6247835Skib * to deal in the Software without restriction, including without limitation
7247835Skib * the rights to use, copy, modify, merge, publish, distribute, sub license,
8247835Skib * and/or sell copies of the Software, and to permit persons to whom the
9247835Skib * Software is furnished to do so, subject to the following conditions:
10247835Skib *
11247835Skib * The above copyright notice and this permission notice (including the
12247835Skib * next paragraph) shall be included in all copies or substantial portions
13247835Skib * of the Software.
14247835Skib *
15247835Skib * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16247835Skib * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17247835Skib * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18247835Skib * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19247835Skib * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20247835Skib * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21247835Skib * DEALINGS IN THE SOFTWARE.
22247835Skib *
23247835Skib * Authors: Dave Airlie <airlied@redhat.com>
24247835Skib *          Jerome Glisse <jglisse@redhat.com>
25247835Skib *          Pauli Nieminen <suokkos@gmail.com>
26247835Skib */
27247835Skib/*
28247835Skib * Copyright (c) 2013 The FreeBSD Foundation
29247835Skib * All rights reserved.
30247835Skib *
31247835Skib * Portions of this software were developed by Konstantin Belousov
32247835Skib * <kib@FreeBSD.org> under sponsorship from the FreeBSD Foundation.
33247835Skib */
34247835Skib
35247835Skib/* simple list based uncached page pool
36247835Skib * - Pool collects resently freed pages for reuse
37247835Skib * - Use page->lru to keep a free list
38247835Skib * - doesn't track currently in use pages
39247835Skib */
40247835Skib
41247835Skib#include <sys/cdefs.h>
42247835Skib__FBSDID("$FreeBSD: stable/11/sys/dev/drm2/ttm/ttm_page_alloc.c 318848 2017-05-25 01:17:07Z markj $");
43247835Skib
44247835Skib#include <dev/drm2/drmP.h>
45247835Skib#include <dev/drm2/ttm/ttm_bo_driver.h>
46247835Skib#include <dev/drm2/ttm/ttm_page_alloc.h>
47278153Skib#include <vm/vm_pageout.h>
48247835Skib
49247835Skib#define NUM_PAGES_TO_ALLOC		(PAGE_SIZE/sizeof(vm_page_t))
50247835Skib#define SMALL_ALLOCATION		16
51247835Skib#define FREE_ALL_PAGES			(~0U)
52247835Skib/* times are in msecs */
53247835Skib#define PAGE_FREE_INTERVAL		1000
54247835Skib
55247835Skib/**
56247835Skib * struct ttm_page_pool - Pool to reuse recently allocated uc/wc pages.
57247835Skib *
58247835Skib * @lock: Protects the shared pool from concurrnet access. Must be used with
59247835Skib * irqsave/irqrestore variants because pool allocator maybe called from
60247835Skib * delayed work.
61247835Skib * @fill_lock: Prevent concurrent calls to fill.
62247835Skib * @list: Pool of free uc/wc pages for fast reuse.
63247835Skib * @gfp_flags: Flags to pass for alloc_page.
64247835Skib * @npages: Number of pages in pool.
65247835Skib */
66247835Skibstruct ttm_page_pool {
67247835Skib	struct mtx		lock;
68247835Skib	bool			fill_lock;
69247835Skib	bool			dma32;
70247835Skib	struct pglist		list;
71247835Skib	int			ttm_page_alloc_flags;
72247835Skib	unsigned		npages;
73247835Skib	char			*name;
74247835Skib	unsigned long		nfrees;
75247835Skib	unsigned long		nrefills;
76247835Skib};
77247835Skib
78247835Skib/**
79247835Skib * Limits for the pool. They are handled without locks because only place where
80247835Skib * they may change is in sysfs store. They won't have immediate effect anyway
81247835Skib * so forcing serialization to access them is pointless.
82247835Skib */
83247835Skib
84247835Skibstruct ttm_pool_opts {
85247835Skib	unsigned	alloc_size;
86247835Skib	unsigned	max_size;
87247835Skib	unsigned	small;
88247835Skib};
89247835Skib
90247835Skib#define NUM_POOLS 4
91247835Skib
92247835Skib/**
93247835Skib * struct ttm_pool_manager - Holds memory pools for fst allocation
94247835Skib *
95247835Skib * Manager is read only object for pool code so it doesn't need locking.
96247835Skib *
97247835Skib * @free_interval: minimum number of jiffies between freeing pages from pool.
98247835Skib * @page_alloc_inited: reference counting for pool allocation.
99247835Skib * @work: Work that is used to shrink the pool. Work is only run when there is
100247835Skib * some pages to free.
101247835Skib * @small_allocation: Limit in number of pages what is small allocation.
102247835Skib *
103247835Skib * @pools: All pool objects in use.
104247835Skib **/
105247835Skibstruct ttm_pool_manager {
106247835Skib	unsigned int kobj_ref;
107247835Skib	eventhandler_tag lowmem_handler;
108247835Skib	struct ttm_pool_opts	options;
109247835Skib
110247835Skib	union {
111247849Skib		struct ttm_page_pool	u_pools[NUM_POOLS];
112247849Skib		struct _utag {
113247849Skib			struct ttm_page_pool	u_wc_pool;
114247849Skib			struct ttm_page_pool	u_uc_pool;
115247849Skib			struct ttm_page_pool	u_wc_pool_dma32;
116247849Skib			struct ttm_page_pool	u_uc_pool_dma32;
117247849Skib		} _ut;
118247849Skib	} _u;
119247835Skib};
120247835Skib
121247849Skib#define	pools _u.u_pools
122247849Skib#define	wc_pool _u._ut.u_wc_pool
123247849Skib#define	uc_pool _u._ut.u_uc_pool
124247849Skib#define	wc_pool_dma32 _u._ut.u_wc_pool_dma32
125247849Skib#define	uc_pool_dma32 _u._ut.u_uc_pool_dma32
126247849Skib
127247835SkibMALLOC_DEFINE(M_TTM_POOLMGR, "ttm_poolmgr", "TTM Pool Manager");
128247835Skib
129247835Skibstatic void
130247835Skibttm_vm_page_free(vm_page_t m)
131247835Skib{
132247835Skib
133247835Skib	KASSERT(m->object == NULL, ("ttm page %p is owned", m));
134247835Skib	KASSERT(m->wire_count == 1, ("ttm lost wire %p", m));
135247835Skib	KASSERT((m->flags & PG_FICTITIOUS) != 0, ("ttm lost fictitious %p", m));
136247835Skib	KASSERT((m->oflags & VPO_UNMANAGED) == 0, ("ttm got unmanaged %p", m));
137247835Skib	m->flags &= ~PG_FICTITIOUS;
138247835Skib	m->oflags |= VPO_UNMANAGED;
139318848Smarkj	vm_page_unwire(m, PQ_NONE);
140247835Skib	vm_page_free(m);
141247835Skib}
142247835Skib
143247835Skibstatic vm_memattr_t
144247835Skibttm_caching_state_to_vm(enum ttm_caching_state cstate)
145247835Skib{
146247835Skib
147247835Skib	switch (cstate) {
148247835Skib	case tt_uncached:
149247835Skib		return (VM_MEMATTR_UNCACHEABLE);
150247835Skib	case tt_wc:
151247835Skib		return (VM_MEMATTR_WRITE_COMBINING);
152247835Skib	case tt_cached:
153247835Skib		return (VM_MEMATTR_WRITE_BACK);
154247835Skib	}
155247835Skib	panic("caching state %d\n", cstate);
156247835Skib}
157247835Skib
158284416Savgstatic vm_page_t
159284416Savgttm_vm_page_alloc_dma32(int req, vm_memattr_t memattr)
160284416Savg{
161284416Savg	vm_page_t p;
162284416Savg	int tries;
163284416Savg
164284416Savg	for (tries = 0; ; tries++) {
165284416Savg		p = vm_page_alloc_contig(NULL, 0, req, 1, 0, 0xffffffff,
166284416Savg		    PAGE_SIZE, 0, memattr);
167284416Savg		if (p != NULL || tries > 2)
168284416Savg			return (p);
169292469Salc		if (!vm_page_reclaim_contig(req, 1, 0, 0xffffffff,
170292469Salc		    PAGE_SIZE, 0))
171292469Salc			VM_WAIT;
172284416Savg	}
173284416Savg}
174284416Savg
175284416Savgstatic vm_page_t
176284416Savgttm_vm_page_alloc_any(int req, vm_memattr_t memattr)
177284416Savg{
178284416Savg	vm_page_t p;
179284416Savg
180284416Savg	while (1) {
181284416Savg		p = vm_page_alloc(NULL, 0, req);
182284416Savg		if (p != NULL)
183284416Savg			break;
184284416Savg		VM_WAIT;
185284416Savg	}
186284416Savg	pmap_page_set_memattr(p, memattr);
187284416Savg	return (p);
188284416Savg}
189284416Savg
190284416Savgstatic vm_page_t
191284416Savgttm_vm_page_alloc(int flags, enum ttm_caching_state cstate)
192284416Savg{
193284416Savg	vm_page_t p;
194284416Savg	vm_memattr_t memattr;
195284416Savg	int req;
196284416Savg
197284416Savg	memattr = ttm_caching_state_to_vm(cstate);
198284416Savg	req = VM_ALLOC_NORMAL | VM_ALLOC_WIRED | VM_ALLOC_NOOBJ;
199284416Savg	if ((flags & TTM_PAGE_FLAG_ZERO_ALLOC) != 0)
200284416Savg		req |= VM_ALLOC_ZERO;
201284416Savg
202284416Savg	if ((flags & TTM_PAGE_FLAG_DMA32) != 0)
203284416Savg		p = ttm_vm_page_alloc_dma32(req, memattr);
204284416Savg	else
205284416Savg		p = ttm_vm_page_alloc_any(req, memattr);
206284416Savg
207284416Savg	if (p != NULL) {
208284416Savg		p->oflags &= ~VPO_UNMANAGED;
209284416Savg		p->flags |= PG_FICTITIOUS;
210284416Savg	}
211284416Savg	return (p);
212284416Savg}
213284416Savg
214247835Skibstatic void ttm_pool_kobj_release(struct ttm_pool_manager *m)
215247835Skib{
216247835Skib
217247835Skib	free(m, M_TTM_POOLMGR);
218247835Skib}
219247835Skib
220247835Skib#if 0
221247835Skib/* XXXKIB sysctl */
222247835Skibstatic ssize_t ttm_pool_store(struct ttm_pool_manager *m,
223247835Skib		struct attribute *attr, const char *buffer, size_t size)
224247835Skib{
225247835Skib	int chars;
226247835Skib	unsigned val;
227247835Skib	chars = sscanf(buffer, "%u", &val);
228247835Skib	if (chars == 0)
229247835Skib		return size;
230247835Skib
231247835Skib	/* Convert kb to number of pages */
232247835Skib	val = val / (PAGE_SIZE >> 10);
233247835Skib
234247835Skib	if (attr == &ttm_page_pool_max)
235247835Skib		m->options.max_size = val;
236247835Skib	else if (attr == &ttm_page_pool_small)
237247835Skib		m->options.small = val;
238247835Skib	else if (attr == &ttm_page_pool_alloc_size) {
239247835Skib		if (val > NUM_PAGES_TO_ALLOC*8) {
240247835Skib			pr_err("Setting allocation size to %lu is not allowed. Recommended size is %lu\n",
241247835Skib			       NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 7),
242247835Skib			       NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 10));
243247835Skib			return size;
244247835Skib		} else if (val > NUM_PAGES_TO_ALLOC) {
245247835Skib			pr_warn("Setting allocation size to larger than %lu is not recommended\n",
246247835Skib				NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 10));
247247835Skib		}
248247835Skib		m->options.alloc_size = val;
249247835Skib	}
250247835Skib
251247835Skib	return size;
252247835Skib}
253247835Skib
254247835Skibstatic ssize_t ttm_pool_show(struct ttm_pool_manager *m,
255247835Skib		struct attribute *attr, char *buffer)
256247835Skib{
257247835Skib	unsigned val = 0;
258247835Skib
259247835Skib	if (attr == &ttm_page_pool_max)
260247835Skib		val = m->options.max_size;
261247835Skib	else if (attr == &ttm_page_pool_small)
262247835Skib		val = m->options.small;
263247835Skib	else if (attr == &ttm_page_pool_alloc_size)
264247835Skib		val = m->options.alloc_size;
265247835Skib
266247835Skib	val = val * (PAGE_SIZE >> 10);
267247835Skib
268247835Skib	return snprintf(buffer, PAGE_SIZE, "%u\n", val);
269247835Skib}
270247835Skib#endif
271247835Skib
272247835Skibstatic struct ttm_pool_manager *_manager;
273247835Skib
274247835Skibstatic int set_pages_array_wb(vm_page_t *pages, int addrinarray)
275247835Skib{
276273862Stijl#ifdef TTM_HAS_AGP
277247835Skib	int i;
278247835Skib
279273862Stijl	for (i = 0; i < addrinarray; i++)
280273862Stijl		pmap_page_set_memattr(pages[i], VM_MEMATTR_WRITE_BACK);
281247835Skib#endif
282247835Skib	return 0;
283247835Skib}
284247835Skib
285247835Skibstatic int set_pages_array_wc(vm_page_t *pages, int addrinarray)
286247835Skib{
287273862Stijl#ifdef TTM_HAS_AGP
288247835Skib	int i;
289247835Skib
290273862Stijl	for (i = 0; i < addrinarray; i++)
291273862Stijl		pmap_page_set_memattr(pages[i], VM_MEMATTR_WRITE_COMBINING);
292247835Skib#endif
293247835Skib	return 0;
294247835Skib}
295247835Skib
296247835Skibstatic int set_pages_array_uc(vm_page_t *pages, int addrinarray)
297247835Skib{
298273862Stijl#ifdef TTM_HAS_AGP
299247835Skib	int i;
300247835Skib
301273862Stijl	for (i = 0; i < addrinarray; i++)
302273862Stijl		pmap_page_set_memattr(pages[i], VM_MEMATTR_UNCACHEABLE);
303247835Skib#endif
304247835Skib	return 0;
305247835Skib}
306247835Skib
307247835Skib/**
308247835Skib * Select the right pool or requested caching state and ttm flags. */
309247835Skibstatic struct ttm_page_pool *ttm_get_pool(int flags,
310247835Skib		enum ttm_caching_state cstate)
311247835Skib{
312247835Skib	int pool_index;
313247835Skib
314247835Skib	if (cstate == tt_cached)
315247835Skib		return NULL;
316247835Skib
317247835Skib	if (cstate == tt_wc)
318247835Skib		pool_index = 0x0;
319247835Skib	else
320247835Skib		pool_index = 0x1;
321247835Skib
322247835Skib	if (flags & TTM_PAGE_FLAG_DMA32)
323247835Skib		pool_index |= 0x2;
324247835Skib
325247835Skib	return &_manager->pools[pool_index];
326247835Skib}
327247835Skib
328247835Skib/* set memory back to wb and free the pages. */
329247835Skibstatic void ttm_pages_put(vm_page_t *pages, unsigned npages)
330247835Skib{
331247835Skib	unsigned i;
332247835Skib
333247835Skib	/* Our VM handles vm memattr automatically on the page free. */
334247835Skib	if (set_pages_array_wb(pages, npages))
335247835Skib		printf("[TTM] Failed to set %d pages to wb!\n", npages);
336247835Skib	for (i = 0; i < npages; ++i)
337247835Skib		ttm_vm_page_free(pages[i]);
338247835Skib}
339247835Skib
340247835Skibstatic void ttm_pool_update_free_locked(struct ttm_page_pool *pool,
341247835Skib		unsigned freed_pages)
342247835Skib{
343247835Skib	pool->npages -= freed_pages;
344247835Skib	pool->nfrees += freed_pages;
345247835Skib}
346247835Skib
347247835Skib/**
348247835Skib * Free pages from pool.
349247835Skib *
350247835Skib * To prevent hogging the ttm_swap process we only free NUM_PAGES_TO_ALLOC
351247835Skib * number of pages in one go.
352247835Skib *
353247835Skib * @pool: to free the pages from
354247835Skib * @free_all: If set to true will free all pages in pool
355247835Skib **/
356247835Skibstatic int ttm_page_pool_free(struct ttm_page_pool *pool, unsigned nr_free)
357247835Skib{
358247835Skib	vm_page_t p, p1;
359247835Skib	vm_page_t *pages_to_free;
360247835Skib	unsigned freed_pages = 0,
361247835Skib		 npages_to_free = nr_free;
362254873Sdumbbell	unsigned i;
363247835Skib
364247835Skib	if (NUM_PAGES_TO_ALLOC < nr_free)
365247835Skib		npages_to_free = NUM_PAGES_TO_ALLOC;
366247835Skib
367247835Skib	pages_to_free = malloc(npages_to_free * sizeof(vm_page_t),
368247835Skib	    M_TEMP, M_WAITOK | M_ZERO);
369247835Skib
370247835Skibrestart:
371247835Skib	mtx_lock(&pool->lock);
372247835Skib
373254182Skib	TAILQ_FOREACH_REVERSE_SAFE(p, &pool->list, pglist, plinks.q, p1) {
374247835Skib		if (freed_pages >= npages_to_free)
375247835Skib			break;
376247835Skib
377247835Skib		pages_to_free[freed_pages++] = p;
378247835Skib		/* We can only remove NUM_PAGES_TO_ALLOC at a time. */
379247835Skib		if (freed_pages >= NUM_PAGES_TO_ALLOC) {
380247835Skib			/* remove range of pages from the pool */
381254873Sdumbbell			for (i = 0; i < freed_pages; i++)
382254873Sdumbbell				TAILQ_REMOVE(&pool->list, pages_to_free[i], plinks.q);
383247835Skib
384247835Skib			ttm_pool_update_free_locked(pool, freed_pages);
385247835Skib			/**
386247835Skib			 * Because changing page caching is costly
387247835Skib			 * we unlock the pool to prevent stalling.
388247835Skib			 */
389247835Skib			mtx_unlock(&pool->lock);
390247835Skib
391247835Skib			ttm_pages_put(pages_to_free, freed_pages);
392247835Skib			if (likely(nr_free != FREE_ALL_PAGES))
393247835Skib				nr_free -= freed_pages;
394247835Skib
395247835Skib			if (NUM_PAGES_TO_ALLOC >= nr_free)
396247835Skib				npages_to_free = nr_free;
397247835Skib			else
398247835Skib				npages_to_free = NUM_PAGES_TO_ALLOC;
399247835Skib
400247835Skib			freed_pages = 0;
401247835Skib
402247835Skib			/* free all so restart the processing */
403247835Skib			if (nr_free)
404247835Skib				goto restart;
405247835Skib
406247835Skib			/* Not allowed to fall through or break because
407247835Skib			 * following context is inside spinlock while we are
408247835Skib			 * outside here.
409247835Skib			 */
410247835Skib			goto out;
411247835Skib
412247835Skib		}
413247835Skib	}
414247835Skib
415247835Skib	/* remove range of pages from the pool */
416247835Skib	if (freed_pages) {
417254873Sdumbbell		for (i = 0; i < freed_pages; i++)
418254873Sdumbbell			TAILQ_REMOVE(&pool->list, pages_to_free[i], plinks.q);
419247835Skib
420247835Skib		ttm_pool_update_free_locked(pool, freed_pages);
421247835Skib		nr_free -= freed_pages;
422247835Skib	}
423247835Skib
424247835Skib	mtx_unlock(&pool->lock);
425247835Skib
426247835Skib	if (freed_pages)
427247835Skib		ttm_pages_put(pages_to_free, freed_pages);
428247835Skibout:
429247835Skib	free(pages_to_free, M_TEMP);
430247835Skib	return nr_free;
431247835Skib}
432247835Skib
433247835Skib/* Get good estimation how many pages are free in pools */
434247835Skibstatic int ttm_pool_get_num_unused_pages(void)
435247835Skib{
436247835Skib	unsigned i;
437247835Skib	int total = 0;
438247835Skib	for (i = 0; i < NUM_POOLS; ++i)
439247835Skib		total += _manager->pools[i].npages;
440247835Skib
441247835Skib	return total;
442247835Skib}
443247835Skib
444247835Skib/**
445247835Skib * Callback for mm to request pool to reduce number of page held.
446247835Skib */
447247835Skibstatic int ttm_pool_mm_shrink(void *arg)
448247835Skib{
449247835Skib	static unsigned int start_pool = 0;
450247835Skib	unsigned i;
451247835Skib	unsigned pool_offset = atomic_fetchadd_int(&start_pool, 1);
452247835Skib	struct ttm_page_pool *pool;
453247835Skib	int shrink_pages = 100; /* XXXKIB */
454247835Skib
455247835Skib	pool_offset = pool_offset % NUM_POOLS;
456247835Skib	/* select start pool in round robin fashion */
457247835Skib	for (i = 0; i < NUM_POOLS; ++i) {
458247835Skib		unsigned nr_free = shrink_pages;
459247835Skib		if (shrink_pages == 0)
460247835Skib			break;
461247835Skib		pool = &_manager->pools[(i + pool_offset)%NUM_POOLS];
462247835Skib		shrink_pages = ttm_page_pool_free(pool, nr_free);
463247835Skib	}
464247835Skib	/* return estimated number of unused pages in pool */
465247835Skib	return ttm_pool_get_num_unused_pages();
466247835Skib}
467247835Skib
468247835Skibstatic void ttm_pool_mm_shrink_init(struct ttm_pool_manager *manager)
469247835Skib{
470247835Skib
471247835Skib	manager->lowmem_handler = EVENTHANDLER_REGISTER(vm_lowmem,
472247835Skib	    ttm_pool_mm_shrink, manager, EVENTHANDLER_PRI_ANY);
473247835Skib}
474247835Skib
475247835Skibstatic void ttm_pool_mm_shrink_fini(struct ttm_pool_manager *manager)
476247835Skib{
477247835Skib
478247835Skib	EVENTHANDLER_DEREGISTER(vm_lowmem, manager->lowmem_handler);
479247835Skib}
480247835Skib
481247835Skibstatic int ttm_set_pages_caching(vm_page_t *pages,
482247835Skib		enum ttm_caching_state cstate, unsigned cpages)
483247835Skib{
484247835Skib	int r = 0;
485247835Skib	/* Set page caching */
486247835Skib	switch (cstate) {
487247835Skib	case tt_uncached:
488247835Skib		r = set_pages_array_uc(pages, cpages);
489247835Skib		if (r)
490247835Skib			printf("[TTM] Failed to set %d pages to uc!\n", cpages);
491247835Skib		break;
492247835Skib	case tt_wc:
493247835Skib		r = set_pages_array_wc(pages, cpages);
494247835Skib		if (r)
495247835Skib			printf("[TTM] Failed to set %d pages to wc!\n", cpages);
496247835Skib		break;
497247835Skib	default:
498247835Skib		break;
499247835Skib	}
500247835Skib	return r;
501247835Skib}
502247835Skib
503247835Skib/**
504247835Skib * Free pages the pages that failed to change the caching state. If there is
505247835Skib * any pages that have changed their caching state already put them to the
506247835Skib * pool.
507247835Skib */
508247835Skibstatic void ttm_handle_caching_state_failure(struct pglist *pages,
509247835Skib		int ttm_flags, enum ttm_caching_state cstate,
510247835Skib		vm_page_t *failed_pages, unsigned cpages)
511247835Skib{
512247835Skib	unsigned i;
513247835Skib	/* Failed pages have to be freed */
514247835Skib	for (i = 0; i < cpages; ++i) {
515254182Skib		TAILQ_REMOVE(pages, failed_pages[i], plinks.q);
516247835Skib		ttm_vm_page_free(failed_pages[i]);
517247835Skib	}
518247835Skib}
519247835Skib
520247835Skib/**
521247835Skib * Allocate new pages with correct caching.
522247835Skib *
523247835Skib * This function is reentrant if caller updates count depending on number of
524247835Skib * pages returned in pages array.
525247835Skib */
526247835Skibstatic int ttm_alloc_new_pages(struct pglist *pages, int ttm_alloc_flags,
527247835Skib		int ttm_flags, enum ttm_caching_state cstate, unsigned count)
528247835Skib{
529247835Skib	vm_page_t *caching_array;
530247835Skib	vm_page_t p;
531247835Skib	int r = 0;
532284416Savg	unsigned i, cpages;
533247835Skib	unsigned max_cpages = min(count,
534247835Skib			(unsigned)(PAGE_SIZE/sizeof(vm_page_t)));
535247835Skib
536247835Skib	/* allocate array for page caching change */
537247835Skib	caching_array = malloc(max_cpages * sizeof(vm_page_t), M_TEMP,
538247835Skib	    M_WAITOK | M_ZERO);
539247835Skib
540247835Skib	for (i = 0, cpages = 0; i < count; ++i) {
541284416Savg		p = ttm_vm_page_alloc(ttm_alloc_flags, cstate);
542247835Skib		if (!p) {
543247835Skib			printf("[TTM] Unable to get page %u\n", i);
544247835Skib
545247835Skib			/* store already allocated pages in the pool after
546247835Skib			 * setting the caching state */
547247835Skib			if (cpages) {
548247835Skib				r = ttm_set_pages_caching(caching_array,
549247835Skib							  cstate, cpages);
550247835Skib				if (r)
551247835Skib					ttm_handle_caching_state_failure(pages,
552247835Skib						ttm_flags, cstate,
553247835Skib						caching_array, cpages);
554247835Skib			}
555247835Skib			r = -ENOMEM;
556247835Skib			goto out;
557247835Skib		}
558247835Skib
559247835Skib#ifdef CONFIG_HIGHMEM /* KIB: nop */
560247835Skib		/* gfp flags of highmem page should never be dma32 so we
561247835Skib		 * we should be fine in such case
562247835Skib		 */
563247835Skib		if (!PageHighMem(p))
564247835Skib#endif
565247835Skib		{
566247835Skib			caching_array[cpages++] = p;
567247835Skib			if (cpages == max_cpages) {
568247835Skib
569247835Skib				r = ttm_set_pages_caching(caching_array,
570247835Skib						cstate, cpages);
571247835Skib				if (r) {
572247835Skib					ttm_handle_caching_state_failure(pages,
573247835Skib						ttm_flags, cstate,
574247835Skib						caching_array, cpages);
575247835Skib					goto out;
576247835Skib				}
577247835Skib				cpages = 0;
578247835Skib			}
579247835Skib		}
580247835Skib
581254182Skib		TAILQ_INSERT_HEAD(pages, p, plinks.q);
582247835Skib	}
583247835Skib
584247835Skib	if (cpages) {
585247835Skib		r = ttm_set_pages_caching(caching_array, cstate, cpages);
586247835Skib		if (r)
587247835Skib			ttm_handle_caching_state_failure(pages,
588247835Skib					ttm_flags, cstate,
589247835Skib					caching_array, cpages);
590247835Skib	}
591247835Skibout:
592247835Skib	free(caching_array, M_TEMP);
593247835Skib
594247835Skib	return r;
595247835Skib}
596247835Skib
597247835Skib/**
598247835Skib * Fill the given pool if there aren't enough pages and the requested number of
599247835Skib * pages is small.
600247835Skib */
601247835Skibstatic void ttm_page_pool_fill_locked(struct ttm_page_pool *pool,
602247835Skib    int ttm_flags, enum ttm_caching_state cstate, unsigned count)
603247835Skib{
604247835Skib	vm_page_t p;
605247835Skib	int r;
606247835Skib	unsigned cpages = 0;
607247835Skib	/**
608247835Skib	 * Only allow one pool fill operation at a time.
609247835Skib	 * If pool doesn't have enough pages for the allocation new pages are
610247835Skib	 * allocated from outside of pool.
611247835Skib	 */
612247835Skib	if (pool->fill_lock)
613247835Skib		return;
614247835Skib
615247835Skib	pool->fill_lock = true;
616247835Skib
617247835Skib	/* If allocation request is small and there are not enough
618247835Skib	 * pages in a pool we fill the pool up first. */
619247835Skib	if (count < _manager->options.small
620247835Skib		&& count > pool->npages) {
621247835Skib		struct pglist new_pages;
622247835Skib		unsigned alloc_size = _manager->options.alloc_size;
623247835Skib
624247835Skib		/**
625247835Skib		 * Can't change page caching if in irqsave context. We have to
626247835Skib		 * drop the pool->lock.
627247835Skib		 */
628247835Skib		mtx_unlock(&pool->lock);
629247835Skib
630247835Skib		TAILQ_INIT(&new_pages);
631247835Skib		r = ttm_alloc_new_pages(&new_pages, pool->ttm_page_alloc_flags,
632247835Skib		    ttm_flags, cstate, alloc_size);
633247835Skib		mtx_lock(&pool->lock);
634247835Skib
635247835Skib		if (!r) {
636254182Skib			TAILQ_CONCAT(&pool->list, &new_pages, plinks.q);
637247835Skib			++pool->nrefills;
638247835Skib			pool->npages += alloc_size;
639247835Skib		} else {
640247835Skib			printf("[TTM] Failed to fill pool (%p)\n", pool);
641247835Skib			/* If we have any pages left put them to the pool. */
642254182Skib			TAILQ_FOREACH(p, &pool->list, plinks.q) {
643247835Skib				++cpages;
644247835Skib			}
645254182Skib			TAILQ_CONCAT(&pool->list, &new_pages, plinks.q);
646247835Skib			pool->npages += cpages;
647247835Skib		}
648247835Skib
649247835Skib	}
650247835Skib	pool->fill_lock = false;
651247835Skib}
652247835Skib
653247835Skib/**
654247835Skib * Cut 'count' number of pages from the pool and put them on the return list.
655247835Skib *
656247835Skib * @return count of pages still required to fulfill the request.
657247835Skib */
658247835Skibstatic unsigned ttm_page_pool_get_pages(struct ttm_page_pool *pool,
659247835Skib					struct pglist *pages,
660247835Skib					int ttm_flags,
661247835Skib					enum ttm_caching_state cstate,
662247835Skib					unsigned count)
663247835Skib{
664247835Skib	vm_page_t p;
665247835Skib	unsigned i;
666247835Skib
667247835Skib	mtx_lock(&pool->lock);
668247835Skib	ttm_page_pool_fill_locked(pool, ttm_flags, cstate, count);
669247835Skib
670247835Skib	if (count >= pool->npages) {
671247835Skib		/* take all pages from the pool */
672254182Skib		TAILQ_CONCAT(pages, &pool->list, plinks.q);
673247835Skib		count -= pool->npages;
674247835Skib		pool->npages = 0;
675247835Skib		goto out;
676247835Skib	}
677247835Skib	for (i = 0; i < count; i++) {
678247835Skib		p = TAILQ_FIRST(&pool->list);
679254182Skib		TAILQ_REMOVE(&pool->list, p, plinks.q);
680254182Skib		TAILQ_INSERT_TAIL(pages, p, plinks.q);
681247835Skib	}
682247835Skib	pool->npages -= count;
683247835Skib	count = 0;
684247835Skibout:
685247835Skib	mtx_unlock(&pool->lock);
686247835Skib	return count;
687247835Skib}
688247835Skib
689247835Skib/* Put all pages in pages list to correct pool to wait for reuse */
690247835Skibstatic void ttm_put_pages(vm_page_t *pages, unsigned npages, int flags,
691247835Skib			  enum ttm_caching_state cstate)
692247835Skib{
693247835Skib	struct ttm_page_pool *pool = ttm_get_pool(flags, cstate);
694247835Skib	unsigned i;
695247835Skib
696247835Skib	if (pool == NULL) {
697247835Skib		/* No pool for this memory type so free the pages */
698247835Skib		for (i = 0; i < npages; i++) {
699247835Skib			if (pages[i]) {
700247835Skib				ttm_vm_page_free(pages[i]);
701247835Skib				pages[i] = NULL;
702247835Skib			}
703247835Skib		}
704247835Skib		return;
705247835Skib	}
706247835Skib
707247835Skib	mtx_lock(&pool->lock);
708247835Skib	for (i = 0; i < npages; i++) {
709247835Skib		if (pages[i]) {
710254182Skib			TAILQ_INSERT_TAIL(&pool->list, pages[i], plinks.q);
711247835Skib			pages[i] = NULL;
712247835Skib			pool->npages++;
713247835Skib		}
714247835Skib	}
715247835Skib	/* Check that we don't go over the pool limit */
716247835Skib	npages = 0;
717247835Skib	if (pool->npages > _manager->options.max_size) {
718247835Skib		npages = pool->npages - _manager->options.max_size;
719247835Skib		/* free at least NUM_PAGES_TO_ALLOC number of pages
720247835Skib		 * to reduce calls to set_memory_wb */
721247835Skib		if (npages < NUM_PAGES_TO_ALLOC)
722247835Skib			npages = NUM_PAGES_TO_ALLOC;
723247835Skib	}
724247835Skib	mtx_unlock(&pool->lock);
725247835Skib	if (npages)
726247835Skib		ttm_page_pool_free(pool, npages);
727247835Skib}
728247835Skib
729247835Skib/*
730247835Skib * On success pages list will hold count number of correctly
731247835Skib * cached pages.
732247835Skib */
733247835Skibstatic int ttm_get_pages(vm_page_t *pages, unsigned npages, int flags,
734247835Skib			 enum ttm_caching_state cstate)
735247835Skib{
736247835Skib	struct ttm_page_pool *pool = ttm_get_pool(flags, cstate);
737247835Skib	struct pglist plist;
738247835Skib	vm_page_t p = NULL;
739284416Savg	int gfp_flags;
740247835Skib	unsigned count;
741247835Skib	int r;
742247835Skib
743247835Skib	/* No pool for cached pages */
744247835Skib	if (pool == NULL) {
745247835Skib		for (r = 0; r < npages; ++r) {
746284416Savg			p = ttm_vm_page_alloc(flags, cstate);
747247835Skib			if (!p) {
748247835Skib				printf("[TTM] Unable to allocate page\n");
749247835Skib				return -ENOMEM;
750247835Skib			}
751247835Skib			pages[r] = p;
752247835Skib		}
753247835Skib		return 0;
754247835Skib	}
755247835Skib
756247835Skib	/* combine zero flag to pool flags */
757247835Skib	gfp_flags = flags | pool->ttm_page_alloc_flags;
758247835Skib
759247835Skib	/* First we take pages from the pool */
760247835Skib	TAILQ_INIT(&plist);
761247835Skib	npages = ttm_page_pool_get_pages(pool, &plist, flags, cstate, npages);
762247835Skib	count = 0;
763254182Skib	TAILQ_FOREACH(p, &plist, plinks.q) {
764247835Skib		pages[count++] = p;
765247835Skib	}
766247835Skib
767247835Skib	/* clear the pages coming from the pool if requested */
768247835Skib	if (flags & TTM_PAGE_FLAG_ZERO_ALLOC) {
769254182Skib		TAILQ_FOREACH(p, &plist, plinks.q) {
770247835Skib			pmap_zero_page(p);
771247835Skib		}
772247835Skib	}
773247835Skib
774247835Skib	/* If pool didn't have enough pages allocate new one. */
775247835Skib	if (npages > 0) {
776247835Skib		/* ttm_alloc_new_pages doesn't reference pool so we can run
777247835Skib		 * multiple requests in parallel.
778247835Skib		 **/
779247835Skib		TAILQ_INIT(&plist);
780247835Skib		r = ttm_alloc_new_pages(&plist, gfp_flags, flags, cstate,
781247835Skib		    npages);
782254182Skib		TAILQ_FOREACH(p, &plist, plinks.q) {
783247835Skib			pages[count++] = p;
784247835Skib		}
785247835Skib		if (r) {
786247835Skib			/* If there is any pages in the list put them back to
787247835Skib			 * the pool. */
788247835Skib			printf("[TTM] Failed to allocate extra pages for large request\n");
789247835Skib			ttm_put_pages(pages, count, flags, cstate);
790247835Skib			return r;
791247835Skib		}
792247835Skib	}
793247835Skib
794247835Skib	return 0;
795247835Skib}
796247835Skib
797247835Skibstatic void ttm_page_pool_init_locked(struct ttm_page_pool *pool, int flags,
798247835Skib				      char *name)
799247835Skib{
800247835Skib	mtx_init(&pool->lock, "ttmpool", NULL, MTX_DEF);
801247835Skib	pool->fill_lock = false;
802247835Skib	TAILQ_INIT(&pool->list);
803247835Skib	pool->npages = pool->nfrees = 0;
804247835Skib	pool->ttm_page_alloc_flags = flags;
805247835Skib	pool->name = name;
806247835Skib}
807247835Skib
808247835Skibint ttm_page_alloc_init(struct ttm_mem_global *glob, unsigned max_pages)
809247835Skib{
810247835Skib
811247835Skib	if (_manager != NULL)
812247835Skib		printf("[TTM] manager != NULL\n");
813247835Skib	printf("[TTM] Initializing pool allocator\n");
814247835Skib
815247835Skib	_manager = malloc(sizeof(*_manager), M_TTM_POOLMGR, M_WAITOK | M_ZERO);
816247835Skib
817247835Skib	ttm_page_pool_init_locked(&_manager->wc_pool, 0, "wc");
818247835Skib	ttm_page_pool_init_locked(&_manager->uc_pool, 0, "uc");
819247835Skib	ttm_page_pool_init_locked(&_manager->wc_pool_dma32,
820247835Skib	    TTM_PAGE_FLAG_DMA32, "wc dma");
821247835Skib	ttm_page_pool_init_locked(&_manager->uc_pool_dma32,
822247835Skib	    TTM_PAGE_FLAG_DMA32, "uc dma");
823247835Skib
824247835Skib	_manager->options.max_size = max_pages;
825247835Skib	_manager->options.small = SMALL_ALLOCATION;
826247835Skib	_manager->options.alloc_size = NUM_PAGES_TO_ALLOC;
827247835Skib
828247835Skib	refcount_init(&_manager->kobj_ref, 1);
829247835Skib	ttm_pool_mm_shrink_init(_manager);
830247835Skib
831247835Skib	return 0;
832247835Skib}
833247835Skib
834247835Skibvoid ttm_page_alloc_fini(void)
835247835Skib{
836247835Skib	int i;
837247835Skib
838247835Skib	printf("[TTM] Finalizing pool allocator\n");
839247835Skib	ttm_pool_mm_shrink_fini(_manager);
840247835Skib
841247835Skib	for (i = 0; i < NUM_POOLS; ++i)
842247835Skib		ttm_page_pool_free(&_manager->pools[i], FREE_ALL_PAGES);
843247835Skib
844247835Skib	if (refcount_release(&_manager->kobj_ref))
845247835Skib		ttm_pool_kobj_release(_manager);
846247835Skib	_manager = NULL;
847247835Skib}
848247835Skib
849247835Skibint ttm_pool_populate(struct ttm_tt *ttm)
850247835Skib{
851247835Skib	struct ttm_mem_global *mem_glob = ttm->glob->mem_glob;
852247835Skib	unsigned i;
853247835Skib	int ret;
854247835Skib
855247835Skib	if (ttm->state != tt_unpopulated)
856247835Skib		return 0;
857247835Skib
858247835Skib	for (i = 0; i < ttm->num_pages; ++i) {
859247835Skib		ret = ttm_get_pages(&ttm->pages[i], 1,
860247835Skib				    ttm->page_flags,
861247835Skib				    ttm->caching_state);
862247835Skib		if (ret != 0) {
863247835Skib			ttm_pool_unpopulate(ttm);
864247835Skib			return -ENOMEM;
865247835Skib		}
866247835Skib
867247835Skib		ret = ttm_mem_global_alloc_page(mem_glob, ttm->pages[i],
868247835Skib						false, false);
869247835Skib		if (unlikely(ret != 0)) {
870247835Skib			ttm_pool_unpopulate(ttm);
871247835Skib			return -ENOMEM;
872247835Skib		}
873247835Skib	}
874247835Skib
875247835Skib	if (unlikely(ttm->page_flags & TTM_PAGE_FLAG_SWAPPED)) {
876247835Skib		ret = ttm_tt_swapin(ttm);
877247835Skib		if (unlikely(ret != 0)) {
878247835Skib			ttm_pool_unpopulate(ttm);
879247835Skib			return ret;
880247835Skib		}
881247835Skib	}
882247835Skib
883247835Skib	ttm->state = tt_unbound;
884247835Skib	return 0;
885247835Skib}
886247835Skib
887247835Skibvoid ttm_pool_unpopulate(struct ttm_tt *ttm)
888247835Skib{
889247835Skib	unsigned i;
890247835Skib
891247835Skib	for (i = 0; i < ttm->num_pages; ++i) {
892247835Skib		if (ttm->pages[i]) {
893247835Skib			ttm_mem_global_free_page(ttm->glob->mem_glob,
894247835Skib						 ttm->pages[i]);
895247835Skib			ttm_put_pages(&ttm->pages[i], 1,
896247835Skib				      ttm->page_flags,
897247835Skib				      ttm->caching_state);
898247835Skib		}
899247835Skib	}
900247835Skib	ttm->state = tt_unpopulated;
901247835Skib}
902247835Skib
903247835Skib#if 0
904247835Skib/* XXXKIB sysctl */
905247835Skibint ttm_page_alloc_debugfs(struct seq_file *m, void *data)
906247835Skib{
907247835Skib	struct ttm_page_pool *p;
908247835Skib	unsigned i;
909247835Skib	char *h[] = {"pool", "refills", "pages freed", "size"};
910247835Skib	if (!_manager) {
911247835Skib		seq_printf(m, "No pool allocator running.\n");
912247835Skib		return 0;
913247835Skib	}
914247835Skib	seq_printf(m, "%6s %12s %13s %8s\n",
915247835Skib			h[0], h[1], h[2], h[3]);
916247835Skib	for (i = 0; i < NUM_POOLS; ++i) {
917247835Skib		p = &_manager->pools[i];
918247835Skib
919247835Skib		seq_printf(m, "%6s %12ld %13ld %8d\n",
920247835Skib				p->name, p->nrefills,
921247835Skib				p->nfrees, p->npages);
922247835Skib	}
923247835Skib	return 0;
924247835Skib}
925247835Skib#endif
926