1/*	$NetBSD: ttm_tt.c,v 1.19 2022/06/26 17:53:06 riastradh Exp $	*/
2
3/* SPDX-License-Identifier: GPL-2.0 OR MIT */
4/**************************************************************************
5 *
6 * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
7 * All Rights Reserved.
8 *
9 * Permission is hereby granted, free of charge, to any person obtaining a
10 * copy of this software and associated documentation files (the
11 * "Software"), to deal in the Software without restriction, including
12 * without limitation the rights to use, copy, modify, merge, publish,
13 * distribute, sub license, and/or sell copies of the Software, and to
14 * permit persons to whom the Software is furnished to do so, subject to
15 * the following conditions:
16 *
17 * The above copyright notice and this permission notice (including the
18 * next paragraph) shall be included in all copies or substantial portions
19 * of the Software.
20 *
21 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
22 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
23 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
24 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
25 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
26 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
27 * USE OR OTHER DEALINGS IN THE SOFTWARE.
28 *
29 **************************************************************************/
30/*
31 * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
32 */
33
34#include <sys/cdefs.h>
35__KERNEL_RCSID(0, "$NetBSD: ttm_tt.c,v 1.19 2022/06/26 17:53:06 riastradh Exp $");
36
37#define pr_fmt(fmt) "[TTM] " fmt
38
39#include <linux/sched.h>
40#include <linux/pagemap.h>
41#include <linux/shmem_fs.h>
42#include <linux/file.h>
43#include <drm/drm_cache.h>
44#include <drm/drm_mem_util.h>
45#include <drm/ttm/ttm_bo_driver.h>
46#include <drm/ttm/ttm_page_alloc.h>
47#include <drm/bus_dma_hacks.h>
48#include <drm/ttm/ttm_set_memory.h>
49
50/**
51 * Allocates a ttm structure for the given BO.
52 */
53int ttm_tt_create(struct ttm_buffer_object *bo, bool zero_alloc)
54{
55	struct ttm_bo_device *bdev = bo->bdev;
56	uint32_t page_flags = 0;
57
58	dma_resv_assert_held(bo->base.resv);
59
60	if (bdev->need_dma32)
61		page_flags |= TTM_PAGE_FLAG_DMA32;
62
63	if (bdev->no_retry)
64		page_flags |= TTM_PAGE_FLAG_NO_RETRY;
65
66	switch (bo->type) {
67	case ttm_bo_type_device:
68		if (zero_alloc)
69			page_flags |= TTM_PAGE_FLAG_ZERO_ALLOC;
70		break;
71	case ttm_bo_type_kernel:
72		break;
73	case ttm_bo_type_sg:
74		page_flags |= TTM_PAGE_FLAG_SG;
75		break;
76	default:
77		bo->ttm = NULL;
78		pr_err("Illegal buffer object type\n");
79		return -EINVAL;
80	}
81
82	bo->ttm = bdev->driver->ttm_tt_create(bo, page_flags);
83	if (unlikely(bo->ttm == NULL))
84		return -ENOMEM;
85
86	return 0;
87}
88
89/**
90 * Allocates storage for pointers to the pages that back the ttm.
91 */
92static int ttm_tt_alloc_page_directory(struct ttm_tt *ttm)
93{
94	ttm->pages = kvmalloc_array(ttm->num_pages, sizeof(void*),
95			GFP_KERNEL | __GFP_ZERO);
96	if (!ttm->pages)
97		return -ENOMEM;
98	return 0;
99}
100
101static int ttm_sg_tt_alloc_page_directory(struct ttm_dma_tt *);
102
103static int ttm_dma_tt_alloc_page_directory(struct ttm_dma_tt *ttm)
104{
105#ifdef __NetBSD__
106	int r;
107
108	/* Create array of pages at ttm->ttm.pages.  */
109	r = ttm_tt_alloc_page_directory(&ttm->ttm);
110	if (r)
111		return r;
112
113	/* Create bus DMA map at ttm->dma_address.  */
114	r = ttm_sg_tt_alloc_page_directory(ttm);
115	if (r) {
116		kvfree(ttm->ttm.pages);
117		ttm->ttm.pages = NULL;
118		return r;
119	}
120
121	/* Success!  */
122	return 0;
123#else
124	ttm->ttm.pages = kvmalloc_array(ttm->ttm.num_pages,
125					  sizeof(*ttm->ttm.pages) +
126					  sizeof(*ttm->dma_address),
127					  GFP_KERNEL | __GFP_ZERO);
128	if (!ttm->ttm.pages)
129		return -ENOMEM;
130	ttm->dma_address = (void *) (ttm->ttm.pages + ttm->ttm.num_pages);
131	return 0;
132#endif
133}
134
135static int ttm_sg_tt_alloc_page_directory(struct ttm_dma_tt *ttm)
136{
137#ifdef __NetBSD__
138	ttm->dma_address = NULL;
139	/* XXX errno NetBSD->Linux */
140	return -bus_dmamap_create(ttm->ttm.bdev->dmat,
141	    ttm->ttm.num_pages << PAGE_SHIFT, ttm->ttm.num_pages, PAGE_SIZE, 0,
142	    BUS_DMA_WAITOK, &ttm->dma_address);
143#else
144	ttm->dma_address = kvmalloc_array(ttm->ttm.num_pages,
145					  sizeof(*ttm->dma_address),
146					  GFP_KERNEL | __GFP_ZERO);
147	if (!ttm->dma_address)
148		return -ENOMEM;
149	return 0;
150#endif
151}
152
153static int ttm_tt_set_page_caching(struct page *p,
154				   enum ttm_caching_state c_old,
155				   enum ttm_caching_state c_new)
156{
157#ifdef __NetBSD__
158	return 0;
159#else
160	int ret = 0;
161
162	if (PageHighMem(p))
163		return 0;
164
165	if (c_old != tt_cached) {
166		/* p isn't in the default caching state, set it to
167		 * writeback first to free its current memtype. */
168
169		ret = ttm_set_pages_wb(p, 1);
170		if (ret)
171			return ret;
172	}
173
174	if (c_new == tt_wc)
175		ret = ttm_set_pages_wc(p, 1);
176	else if (c_new == tt_uncached)
177		ret = ttm_set_pages_uc(p, 1);
178
179	return ret;
180#endif
181}
182
183/*
184 * Change caching policy for the linear kernel map
185 * for range of pages in a ttm.
186 */
187
188static int ttm_tt_set_caching(struct ttm_tt *ttm,
189			      enum ttm_caching_state c_state)
190{
191	int i, j;
192	struct page *cur_page;
193	int ret;
194
195	if (ttm->caching_state == c_state)
196		return 0;
197
198	if (ttm->state == tt_unpopulated) {
199		/* Change caching but don't populate */
200		ttm->caching_state = c_state;
201		return 0;
202	}
203
204	if (ttm->caching_state == tt_cached)
205		drm_clflush_pages(ttm->pages, ttm->num_pages);
206
207	for (i = 0; i < ttm->num_pages; ++i) {
208		cur_page = ttm->pages[i];
209		if (likely(cur_page != NULL)) {
210			ret = ttm_tt_set_page_caching(cur_page,
211						      ttm->caching_state,
212						      c_state);
213			if (unlikely(ret != 0))
214				goto out_err;
215		}
216	}
217
218	ttm->caching_state = c_state;
219
220	return 0;
221
222out_err:
223	for (j = 0; j < i; ++j) {
224		cur_page = ttm->pages[j];
225		if (likely(cur_page != NULL)) {
226			(void)ttm_tt_set_page_caching(cur_page, c_state,
227						      ttm->caching_state);
228		}
229	}
230
231	return ret;
232}
233
234int ttm_tt_set_placement_caching(struct ttm_tt *ttm, uint32_t placement)
235{
236	enum ttm_caching_state state;
237
238	if (placement & TTM_PL_FLAG_WC)
239		state = tt_wc;
240	else if (placement & TTM_PL_FLAG_UNCACHED)
241		state = tt_uncached;
242	else
243		state = tt_cached;
244
245	return ttm_tt_set_caching(ttm, state);
246}
247EXPORT_SYMBOL(ttm_tt_set_placement_caching);
248
249void ttm_tt_destroy(struct ttm_tt *ttm)
250{
251	if (ttm == NULL)
252		return;
253
254	ttm_tt_unbind(ttm);
255
256	if (ttm->state == tt_unbound)
257		ttm_tt_unpopulate(ttm);
258
259#ifndef __NetBSD__
260	if (!(ttm->page_flags & TTM_PAGE_FLAG_PERSISTENT_SWAP) &&
261	    ttm->swap_storage)
262		fput(ttm->swap_storage);
263
264	ttm->swap_storage = NULL;
265#endif
266	ttm->func->destroy(ttm);
267}
268
269static void ttm_tt_init_fields(struct ttm_tt *ttm,
270			       struct ttm_buffer_object *bo,
271			       uint32_t page_flags)
272{
273	ttm->bdev = bo->bdev;
274	ttm->num_pages = bo->num_pages;
275	ttm->caching_state = tt_cached;
276	ttm->page_flags = page_flags;
277	ttm->state = tt_unpopulated;
278#ifdef __NetBSD__
279	WARN(bo->num_pages == 0,
280	    "zero-size allocation in %s, please file a NetBSD PR",
281	    __func__);	/* paranoia -- can't prove in five minutes */
282	ttm->swap_storage = uao_create(PAGE_SIZE * MAX(1, bo->num_pages), 0);
283	uao_set_pgfl(ttm->swap_storage, bus_dmamem_pgfl(ttm->bdev->dmat));
284#else
285	ttm->swap_storage = NULL;
286#endif
287	ttm->sg = bo->sg;
288}
289
290int ttm_tt_init(struct ttm_tt *ttm, struct ttm_buffer_object *bo,
291		uint32_t page_flags)
292{
293	ttm_tt_init_fields(ttm, bo, page_flags);
294
295	if (ttm_tt_alloc_page_directory(ttm)) {
296		ttm_tt_destroy(ttm);
297		pr_err("Failed allocating page table\n");
298		return -ENOMEM;
299	}
300	return 0;
301}
302EXPORT_SYMBOL(ttm_tt_init);
303
304void ttm_tt_fini(struct ttm_tt *ttm)
305{
306	kvfree(ttm->pages);
307	ttm->pages = NULL;
308#ifdef __NetBSD__
309	uao_detach(ttm->swap_storage);
310	ttm->swap_storage = NULL;
311#endif
312}
313EXPORT_SYMBOL(ttm_tt_fini);
314
315int ttm_dma_tt_init(struct ttm_dma_tt *ttm_dma, struct ttm_buffer_object *bo,
316		    uint32_t page_flags)
317{
318	struct ttm_tt *ttm = &ttm_dma->ttm;
319
320	ttm_tt_init_fields(ttm, bo, page_flags);
321
322	INIT_LIST_HEAD(&ttm_dma->pages_list);
323	if (ttm_dma_tt_alloc_page_directory(ttm_dma)) {
324		ttm_tt_destroy(ttm);
325		pr_err("Failed allocating page table\n");
326		return -ENOMEM;
327	}
328	return 0;
329}
330EXPORT_SYMBOL(ttm_dma_tt_init);
331
332int ttm_sg_tt_init(struct ttm_dma_tt *ttm_dma, struct ttm_buffer_object *bo,
333		   uint32_t page_flags)
334{
335	struct ttm_tt *ttm = &ttm_dma->ttm;
336	int ret;
337
338	ttm_tt_init_fields(ttm, bo, page_flags);
339
340	INIT_LIST_HEAD(&ttm_dma->pages_list);
341	if (page_flags & TTM_PAGE_FLAG_SG)
342		ret = ttm_sg_tt_alloc_page_directory(ttm_dma);
343	else
344		ret = ttm_dma_tt_alloc_page_directory(ttm_dma);
345	if (ret) {
346		ttm_tt_destroy(ttm);
347		pr_err("Failed allocating page table\n");
348		return -ENOMEM;
349	}
350	return 0;
351}
352EXPORT_SYMBOL(ttm_sg_tt_init);
353
354void ttm_dma_tt_fini(struct ttm_dma_tt *ttm_dma)
355{
356	struct ttm_tt *ttm = &ttm_dma->ttm;
357
358#ifdef __NetBSD__
359	if (ttm_dma->dma_address) {
360		bus_dmamap_destroy(ttm->bdev->dmat, ttm_dma->dma_address);
361		ttm_dma->dma_address = NULL;
362	}
363	ttm_tt_fini(ttm);
364#else
365	if (ttm->pages)
366		kvfree(ttm->pages);
367	else
368		kvfree(ttm_dma->dma_address);
369	ttm->pages = NULL;
370	ttm_dma->dma_address = NULL;
371#endif
372}
373EXPORT_SYMBOL(ttm_dma_tt_fini);
374
375void ttm_tt_unbind(struct ttm_tt *ttm)
376{
377	int ret __diagused;
378
379	if (ttm->state == tt_bound) {
380		ret = ttm->func->unbind(ttm);
381		BUG_ON(ret);
382		ttm->state = tt_unbound;
383	}
384}
385
386int ttm_tt_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem,
387		struct ttm_operation_ctx *ctx)
388{
389	int ret = 0;
390
391	if (!ttm)
392		return -EINVAL;
393
394	if (ttm->state == tt_bound)
395		return 0;
396
397	ret = ttm_tt_populate(ttm, ctx);
398	if (ret)
399		return ret;
400
401	ret = ttm->func->bind(ttm, bo_mem);
402	if (unlikely(ret != 0))
403		return ret;
404
405	ttm->state = tt_bound;
406
407	return 0;
408}
409EXPORT_SYMBOL(ttm_tt_bind);
410
411#ifdef __NetBSD__
412/*
413 * ttm_tt_wire(ttm)
414 *
415 *	Wire the uvm pages of ttm and fill the ttm page array.  ttm
416 *	must be unpopulated, and must be marked swapped.  This does not
417 *	change either state -- the caller is expected to include it
418 *	among other operations for such a state transition.
419 */
420int
421ttm_tt_wire(struct ttm_tt *ttm)
422{
423	struct uvm_object *uobj = ttm->swap_storage;
424	struct vm_page *vm_page;
425	unsigned i;
426	int error;
427
428	KASSERTMSG((ttm->state == tt_unpopulated),
429	    "ttm_tt %p must be unpopulated for wiring, but state=%d",
430	    ttm, (int)ttm->state);
431	KASSERT(ISSET(ttm->page_flags, TTM_PAGE_FLAG_SWAPPED));
432	KASSERT(uobj != NULL);
433
434	error = uvm_obj_wirepages(uobj, 0, (ttm->num_pages << PAGE_SHIFT),
435	    NULL);
436	if (error)
437		/* XXX errno NetBSD->Linux */
438		return -error;
439
440	rw_enter(uobj->vmobjlock, RW_READER);
441	for (i = 0; i < ttm->num_pages; i++) {
442		vm_page = uvm_pagelookup(uobj, ptoa(i));
443		ttm->pages[i] = container_of(vm_page, struct page, p_vmp);
444	}
445	rw_exit(uobj->vmobjlock);
446
447	/* Success!  */
448	return 0;
449}
450
451/*
452 * ttm_tt_unwire(ttm)
453 *
454 *	Nullify the ttm page array and unwire the uvm pages of ttm.
455 *	ttm must be unbound and must be marked swapped.  This does not
456 *	change either state -- the caller is expected to include it
457 *	among other operations for such a state transition.
458 */
459void
460ttm_tt_unwire(struct ttm_tt *ttm)
461{
462	struct uvm_object *uobj = ttm->swap_storage;
463	unsigned i;
464
465	KASSERTMSG((ttm->state == tt_unbound),
466	    "ttm_tt %p must be unbound for unwiring, but state=%d",
467	    ttm, (int)ttm->state);
468	KASSERT(!ISSET(ttm->page_flags, TTM_PAGE_FLAG_SWAPPED));
469	KASSERT(uobj != NULL);
470
471	uvm_obj_unwirepages(uobj, 0, (ttm->num_pages << PAGE_SHIFT));
472	for (i = 0; i < ttm->num_pages; i++)
473		ttm->pages[i] = NULL;
474}
475#endif
476
477#ifndef __NetBSD__
478int ttm_tt_swapin(struct ttm_tt *ttm)
479{
480	struct address_space *swap_space;
481	struct file *swap_storage;
482	struct page *from_page;
483	struct page *to_page;
484	int i;
485	int ret = -ENOMEM;
486
487	swap_storage = ttm->swap_storage;
488	BUG_ON(swap_storage == NULL);
489
490	swap_space = swap_storage->f_mapping;
491
492	for (i = 0; i < ttm->num_pages; ++i) {
493		gfp_t gfp_mask = mapping_gfp_mask(swap_space);
494
495		gfp_mask |= (ttm->page_flags & TTM_PAGE_FLAG_NO_RETRY ? __GFP_RETRY_MAYFAIL : 0);
496		from_page = shmem_read_mapping_page_gfp(swap_space, i, gfp_mask);
497
498		if (IS_ERR(from_page)) {
499			ret = PTR_ERR(from_page);
500			goto out_err;
501		}
502		to_page = ttm->pages[i];
503		if (unlikely(to_page == NULL))
504			goto out_err;
505
506		copy_highpage(to_page, from_page);
507		put_page(from_page);
508	}
509
510	if (!(ttm->page_flags & TTM_PAGE_FLAG_PERSISTENT_SWAP))
511		fput(swap_storage);
512	ttm->swap_storage = NULL;
513	ttm->page_flags &= ~TTM_PAGE_FLAG_SWAPPED;
514
515	return 0;
516out_err:
517	return ret;
518}
519#endif
520
521int ttm_tt_swapout(struct ttm_tt *ttm, struct file *persistent_swap_storage)
522{
523#ifdef __NetBSD__
524
525	KASSERTMSG((ttm->state == tt_unpopulated || ttm->state == tt_unbound),
526	    "ttm_tt %p must be unpopulated or unbound for swapout,"
527	    " but state=%d",
528	    ttm, (int)ttm->state);
529	KASSERTMSG((ttm->caching_state == tt_cached),
530	    "ttm_tt %p must be cached for swapout, but caching_state=%d",
531	    ttm, (int)ttm->caching_state);
532	KASSERT(persistent_swap_storage == NULL);
533
534	ttm->bdev->driver->ttm_tt_swapout(ttm);
535	return 0;
536#else
537	struct address_space *swap_space;
538	struct file *swap_storage;
539	struct page *from_page;
540	struct page *to_page;
541	int i;
542	int ret = -ENOMEM;
543
544	BUG_ON(ttm->state != tt_unbound && ttm->state != tt_unpopulated);
545	BUG_ON(ttm->caching_state != tt_cached);
546
547	if (!persistent_swap_storage) {
548		swap_storage = shmem_file_setup("ttm swap",
549						ttm->num_pages << PAGE_SHIFT,
550						0);
551		if (IS_ERR(swap_storage)) {
552			pr_err("Failed allocating swap storage\n");
553			return PTR_ERR(swap_storage);
554		}
555	} else {
556		swap_storage = persistent_swap_storage;
557	}
558
559	swap_space = swap_storage->f_mapping;
560
561	for (i = 0; i < ttm->num_pages; ++i) {
562		gfp_t gfp_mask = mapping_gfp_mask(swap_space);
563
564		gfp_mask |= (ttm->page_flags & TTM_PAGE_FLAG_NO_RETRY ? __GFP_RETRY_MAYFAIL : 0);
565
566		from_page = ttm->pages[i];
567		if (unlikely(from_page == NULL))
568			continue;
569
570		to_page = shmem_read_mapping_page_gfp(swap_space, i, gfp_mask);
571		if (IS_ERR(to_page)) {
572			ret = PTR_ERR(to_page);
573			goto out_err;
574		}
575		copy_highpage(to_page, from_page);
576		set_page_dirty(to_page);
577		mark_page_accessed(to_page);
578		put_page(to_page);
579	}
580
581	ttm_tt_unpopulate(ttm);
582	ttm->swap_storage = swap_storage;
583	ttm->page_flags |= TTM_PAGE_FLAG_SWAPPED;
584	if (persistent_swap_storage)
585		ttm->page_flags |= TTM_PAGE_FLAG_PERSISTENT_SWAP;
586
587	return 0;
588out_err:
589	if (!persistent_swap_storage)
590		fput(swap_storage);
591
592	return ret;
593#endif
594}
595
596static void ttm_tt_add_mapping(struct ttm_tt *ttm)
597{
598#ifndef __NetBSD__
599	pgoff_t i;
600
601	if (ttm->page_flags & TTM_PAGE_FLAG_SG)
602		return;
603
604	for (i = 0; i < ttm->num_pages; ++i)
605		ttm->pages[i]->mapping = ttm->bdev->dev_mapping;
606#endif
607}
608
609int ttm_tt_populate(struct ttm_tt *ttm, struct ttm_operation_ctx *ctx)
610{
611	int ret;
612
613	if (ttm->state != tt_unpopulated)
614		return 0;
615
616	if (ttm->bdev->driver->ttm_tt_populate)
617		ret = ttm->bdev->driver->ttm_tt_populate(ttm, ctx);
618	else
619#ifdef __NetBSD__
620		panic("no ttm population");
621#else
622		ret = ttm_pool_populate(ttm, ctx);
623#endif
624	if (!ret)
625		ttm_tt_add_mapping(ttm);
626	return ret;
627}
628
629static void ttm_tt_clear_mapping(struct ttm_tt *ttm)
630{
631#ifndef __NetBSD__
632	pgoff_t i;
633	struct page **page = ttm->pages;
634
635	if (ttm->page_flags & TTM_PAGE_FLAG_SG)
636		return;
637
638	for (i = 0; i < ttm->num_pages; ++i) {
639		(*page)->mapping = NULL;
640		(*page++)->index = 0;
641	}
642#endif
643}
644
645void ttm_tt_unpopulate(struct ttm_tt *ttm)
646{
647	if (ttm->state == tt_unpopulated)
648		return;
649
650	ttm_tt_clear_mapping(ttm);
651	if (ttm->bdev->driver->ttm_tt_unpopulate)
652		ttm->bdev->driver->ttm_tt_unpopulate(ttm);
653	else
654#ifdef __NetBSD__
655		panic("no ttm pool unpopulation");
656#else
657		ttm_pool_unpopulate(ttm);
658#endif
659}
660