1/*	$NetBSD: radeon_ttm.c,v 1.26 2022/07/20 01:22:38 riastradh Exp $	*/
2
3/*
4 * Copyright 2009 Jerome Glisse.
5 * All Rights Reserved.
6 *
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the
9 * "Software"), to deal in the Software without restriction, including
10 * without limitation the rights to use, copy, modify, merge, publish,
11 * distribute, sub license, and/or sell copies of the Software, and to
12 * permit persons to whom the Software is furnished to do so, subject to
13 * the following conditions:
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
19 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
20 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
21 * USE OR OTHER DEALINGS IN THE SOFTWARE.
22 *
23 * The above copyright notice and this permission notice (including the
24 * next paragraph) shall be included in all copies or substantial portions
25 * of the Software.
26 *
27 */
28/*
29 * Authors:
30 *    Jerome Glisse <glisse@freedesktop.org>
31 *    Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
32 *    Dave Airlie
33 */
34
35#include <sys/cdefs.h>
36__KERNEL_RCSID(0, "$NetBSD: radeon_ttm.c,v 1.26 2022/07/20 01:22:38 riastradh Exp $");
37
38#include <linux/dma-mapping.h>
39#include <linux/pagemap.h>
40#include <linux/pci.h>
41#include <linux/seq_file.h>
42#include <linux/slab.h>
43#include <linux/swap.h>
44#include <linux/swiotlb.h>
45
46#include <drm/drm_agpsupport.h>
47#include <drm/drm_debugfs.h>
48#include <drm/drm_device.h>
49#include <drm/drm_file.h>
50#include <drm/drm_prime.h>
51#include <drm/radeon_drm.h>
52#include <drm/ttm/ttm_bo_api.h>
53#include <drm/ttm/ttm_bo_driver.h>
54#include <drm/ttm/ttm_module.h>
55#include <drm/ttm/ttm_page_alloc.h>
56#include <drm/ttm/ttm_placement.h>
57
58#include "radeon_reg.h"
59#include "radeon.h"
60
61#ifdef __NetBSD__
62#include <uvm/uvm_extern.h>
63#include <uvm/uvm_fault.h>
64#include <uvm/uvm_param.h>
65#include <drm/bus_dma_hacks.h>
66#endif
67
68static int radeon_ttm_debugfs_init(struct radeon_device *rdev);
69static void radeon_ttm_debugfs_fini(struct radeon_device *rdev);
70
71static struct radeon_device *radeon_get_rdev(struct ttm_bo_device *bdev)
72{
73	struct radeon_mman *mman;
74	struct radeon_device *rdev;
75
76	mman = container_of(bdev, struct radeon_mman, bdev);
77	rdev = container_of(mman, struct radeon_device, mman);
78	return rdev;
79}
80
81static int radeon_invalidate_caches(struct ttm_bo_device *bdev, uint32_t flags)
82{
83	return 0;
84}
85
86static int radeon_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
87				struct ttm_mem_type_manager *man)
88{
89	struct radeon_device *rdev;
90
91	rdev = radeon_get_rdev(bdev);
92
93	switch (type) {
94	case TTM_PL_SYSTEM:
95		/* System memory */
96		man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
97		man->available_caching = TTM_PL_MASK_CACHING;
98		man->default_caching = TTM_PL_FLAG_CACHED;
99		break;
100	case TTM_PL_TT:
101		man->func = &ttm_bo_manager_func;
102		man->gpu_offset = rdev->mc.gtt_start;
103		man->available_caching = TTM_PL_MASK_CACHING;
104		man->default_caching = TTM_PL_FLAG_CACHED;
105		man->flags = TTM_MEMTYPE_FLAG_MAPPABLE | TTM_MEMTYPE_FLAG_CMA;
106#if IS_ENABLED(CONFIG_AGP)
107		if (rdev->flags & RADEON_IS_AGP) {
108			if (!rdev->ddev->agp) {
109				DRM_ERROR("AGP is not enabled for memory type %u\n",
110					  (unsigned)type);
111				return -EINVAL;
112			}
113			if (!rdev->ddev->agp->cant_use_aperture)
114				man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
115			man->available_caching = TTM_PL_FLAG_UNCACHED |
116						 TTM_PL_FLAG_WC;
117			man->default_caching = TTM_PL_FLAG_WC;
118		}
119#endif
120		break;
121	case TTM_PL_VRAM:
122		/* "On-card" video ram */
123		man->func = &ttm_bo_manager_func;
124		man->gpu_offset = rdev->mc.vram_start;
125		man->flags = TTM_MEMTYPE_FLAG_FIXED |
126			     TTM_MEMTYPE_FLAG_MAPPABLE;
127		man->available_caching = TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_WC;
128		man->default_caching = TTM_PL_FLAG_WC;
129		break;
130	default:
131		DRM_ERROR("Unsupported memory type %u\n", (unsigned)type);
132		return -EINVAL;
133	}
134	return 0;
135}
136
137static void radeon_evict_flags(struct ttm_buffer_object *bo,
138				struct ttm_placement *placement)
139{
140	static const struct ttm_place placements = {
141		.fpfn = 0,
142		.lpfn = 0,
143		.flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM
144	};
145
146	struct radeon_bo *rbo;
147
148	if (!radeon_ttm_bo_is_radeon_bo(bo)) {
149		placement->placement = &placements;
150		placement->busy_placement = &placements;
151		placement->num_placement = 1;
152		placement->num_busy_placement = 1;
153		return;
154	}
155	rbo = container_of(bo, struct radeon_bo, tbo);
156	switch (bo->mem.mem_type) {
157	case TTM_PL_VRAM:
158		if (rbo->rdev->ring[radeon_copy_ring_index(rbo->rdev)].ready == false)
159			radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_CPU);
160		else if (rbo->rdev->mc.visible_vram_size < rbo->rdev->mc.real_vram_size &&
161			 bo->mem.start < (rbo->rdev->mc.visible_vram_size >> PAGE_SHIFT)) {
162			unsigned fpfn = rbo->rdev->mc.visible_vram_size >> PAGE_SHIFT;
163			int i;
164
165			/* Try evicting to the CPU inaccessible part of VRAM
166			 * first, but only set GTT as busy placement, so this
167			 * BO will be evicted to GTT rather than causing other
168			 * BOs to be evicted from VRAM
169			 */
170			radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_VRAM |
171							 RADEON_GEM_DOMAIN_GTT);
172			rbo->placement.num_busy_placement = 0;
173			for (i = 0; i < rbo->placement.num_placement; i++) {
174				if (rbo->placements[i].flags & TTM_PL_FLAG_VRAM) {
175					if (rbo->placements[i].fpfn < fpfn)
176						rbo->placements[i].fpfn = fpfn;
177				} else {
178					rbo->placement.busy_placement =
179						&rbo->placements[i];
180					rbo->placement.num_busy_placement = 1;
181				}
182			}
183		} else
184			radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_GTT);
185		break;
186	case TTM_PL_TT:
187	default:
188		radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_CPU);
189	}
190	*placement = rbo->placement;
191}
192
193static int radeon_verify_access(struct ttm_buffer_object *bo, struct file *filp)
194{
195	struct radeon_bo *rbo = container_of(bo, struct radeon_bo, tbo);
196
197	if (radeon_ttm_tt_has_userptr(bo->ttm))
198		return -EPERM;
199#ifdef __NetBSD__
200	struct drm_file *drm_file = filp->f_data;
201	return drm_vma_node_verify_access(&rbo->tbo.base.vma_node, drm_file);
202#else
203	return drm_vma_node_verify_access(&rbo->tbo.base.vma_node,
204					  filp->private_data);
205#endif
206}
207
208static void radeon_move_null(struct ttm_buffer_object *bo,
209			     struct ttm_mem_reg *new_mem)
210{
211	struct ttm_mem_reg *old_mem = &bo->mem;
212
213	BUG_ON(old_mem->mm_node != NULL);
214	*old_mem = *new_mem;
215	new_mem->mm_node = NULL;
216}
217
218static int radeon_move_blit(struct ttm_buffer_object *bo,
219			bool evict, bool no_wait_gpu,
220			struct ttm_mem_reg *new_mem,
221			struct ttm_mem_reg *old_mem)
222{
223	struct radeon_device *rdev;
224	uint64_t old_start, new_start;
225	struct radeon_fence *fence;
226	unsigned num_pages;
227	int r, ridx;
228
229	rdev = radeon_get_rdev(bo->bdev);
230	ridx = radeon_copy_ring_index(rdev);
231	old_start = (u64)old_mem->start << PAGE_SHIFT;
232	new_start = (u64)new_mem->start << PAGE_SHIFT;
233
234	switch (old_mem->mem_type) {
235	case TTM_PL_VRAM:
236		old_start += rdev->mc.vram_start;
237		break;
238	case TTM_PL_TT:
239		old_start += rdev->mc.gtt_start;
240		break;
241	default:
242		DRM_ERROR("Unknown placement %d\n", old_mem->mem_type);
243		return -EINVAL;
244	}
245	switch (new_mem->mem_type) {
246	case TTM_PL_VRAM:
247		new_start += rdev->mc.vram_start;
248		break;
249	case TTM_PL_TT:
250		new_start += rdev->mc.gtt_start;
251		break;
252	default:
253		DRM_ERROR("Unknown placement %d\n", old_mem->mem_type);
254		return -EINVAL;
255	}
256	if (!rdev->ring[ridx].ready) {
257		DRM_ERROR("Trying to move memory with ring turned off.\n");
258		return -EINVAL;
259	}
260
261	BUILD_BUG_ON((PAGE_SIZE % RADEON_GPU_PAGE_SIZE) != 0);
262
263	num_pages = new_mem->num_pages * (PAGE_SIZE / RADEON_GPU_PAGE_SIZE);
264	fence = radeon_copy(rdev, old_start, new_start, num_pages, bo->base.resv);
265	if (IS_ERR(fence))
266		return PTR_ERR(fence);
267
268	r = ttm_bo_move_accel_cleanup(bo, &fence->base, evict, new_mem);
269	radeon_fence_unref(&fence);
270	return r;
271}
272
273static int radeon_move_vram_ram(struct ttm_buffer_object *bo,
274				bool evict, bool interruptible,
275				bool no_wait_gpu,
276				struct ttm_mem_reg *new_mem)
277{
278	struct ttm_operation_ctx ctx = { interruptible, no_wait_gpu };
279	struct ttm_mem_reg *old_mem = &bo->mem;
280	struct ttm_mem_reg tmp_mem;
281	struct ttm_place placements;
282	struct ttm_placement placement;
283	int r;
284
285	tmp_mem = *new_mem;
286	tmp_mem.mm_node = NULL;
287	placement.num_placement = 1;
288	placement.placement = &placements;
289	placement.num_busy_placement = 1;
290	placement.busy_placement = &placements;
291	placements.fpfn = 0;
292	placements.lpfn = 0;
293	placements.flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT;
294	r = ttm_bo_mem_space(bo, &placement, &tmp_mem, &ctx);
295	if (unlikely(r)) {
296		return r;
297	}
298
299	r = ttm_tt_set_placement_caching(bo->ttm, tmp_mem.placement);
300	if (unlikely(r)) {
301		goto out_cleanup;
302	}
303
304	r = ttm_tt_bind(bo->ttm, &tmp_mem, &ctx);
305	if (unlikely(r)) {
306		goto out_cleanup;
307	}
308	r = radeon_move_blit(bo, true, no_wait_gpu, &tmp_mem, old_mem);
309	if (unlikely(r)) {
310		goto out_cleanup;
311	}
312	r = ttm_bo_move_ttm(bo, &ctx, new_mem);
313out_cleanup:
314	ttm_bo_mem_put(bo, &tmp_mem);
315	return r;
316}
317
318static int radeon_move_ram_vram(struct ttm_buffer_object *bo,
319				bool evict, bool interruptible,
320				bool no_wait_gpu,
321				struct ttm_mem_reg *new_mem)
322{
323	struct ttm_operation_ctx ctx = { interruptible, no_wait_gpu };
324	struct ttm_mem_reg *old_mem = &bo->mem;
325	struct ttm_mem_reg tmp_mem;
326	struct ttm_placement placement;
327	struct ttm_place placements;
328	int r;
329
330	tmp_mem = *new_mem;
331	tmp_mem.mm_node = NULL;
332	placement.num_placement = 1;
333	placement.placement = &placements;
334	placement.num_busy_placement = 1;
335	placement.busy_placement = &placements;
336	placements.fpfn = 0;
337	placements.lpfn = 0;
338	placements.flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT;
339	r = ttm_bo_mem_space(bo, &placement, &tmp_mem, &ctx);
340	if (unlikely(r)) {
341		return r;
342	}
343	r = ttm_bo_move_ttm(bo, &ctx, &tmp_mem);
344	if (unlikely(r)) {
345		goto out_cleanup;
346	}
347	r = radeon_move_blit(bo, true, no_wait_gpu, new_mem, old_mem);
348	if (unlikely(r)) {
349		goto out_cleanup;
350	}
351out_cleanup:
352	ttm_bo_mem_put(bo, &tmp_mem);
353	return r;
354}
355
356static int radeon_bo_move(struct ttm_buffer_object *bo, bool evict,
357			  struct ttm_operation_ctx *ctx,
358			  struct ttm_mem_reg *new_mem)
359{
360	struct radeon_device *rdev;
361	struct radeon_bo *rbo;
362	struct ttm_mem_reg *old_mem = &bo->mem;
363	int r;
364
365	r = ttm_bo_wait(bo, ctx->interruptible, ctx->no_wait_gpu);
366	if (r)
367		return r;
368
369	/* Can't move a pinned BO */
370	rbo = container_of(bo, struct radeon_bo, tbo);
371	if (WARN_ON_ONCE(rbo->pin_count > 0))
372		return -EINVAL;
373
374	rdev = radeon_get_rdev(bo->bdev);
375	if (old_mem->mem_type == TTM_PL_SYSTEM && bo->ttm == NULL) {
376		radeon_move_null(bo, new_mem);
377		return 0;
378	}
379	if ((old_mem->mem_type == TTM_PL_TT &&
380	     new_mem->mem_type == TTM_PL_SYSTEM) ||
381	    (old_mem->mem_type == TTM_PL_SYSTEM &&
382	     new_mem->mem_type == TTM_PL_TT)) {
383		/* bind is enough */
384		radeon_move_null(bo, new_mem);
385		return 0;
386	}
387	if (!rdev->ring[radeon_copy_ring_index(rdev)].ready ||
388	    rdev->asic->copy.copy == NULL) {
389		/* use memcpy */
390		goto memcpy;
391	}
392
393	if (old_mem->mem_type == TTM_PL_VRAM &&
394	    new_mem->mem_type == TTM_PL_SYSTEM) {
395		r = radeon_move_vram_ram(bo, evict, ctx->interruptible,
396					ctx->no_wait_gpu, new_mem);
397	} else if (old_mem->mem_type == TTM_PL_SYSTEM &&
398		   new_mem->mem_type == TTM_PL_VRAM) {
399		r = radeon_move_ram_vram(bo, evict, ctx->interruptible,
400					    ctx->no_wait_gpu, new_mem);
401	} else {
402		r = radeon_move_blit(bo, evict, ctx->no_wait_gpu,
403				     new_mem, old_mem);
404	}
405
406	if (r) {
407memcpy:
408		r = ttm_bo_move_memcpy(bo, ctx, new_mem);
409		if (r) {
410			return r;
411		}
412	}
413
414	/* update statistics */
415	atomic64_add((u64)bo->num_pages << PAGE_SHIFT, &rdev->num_bytes_moved);
416	return 0;
417}
418
419static int radeon_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
420{
421	struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
422	struct radeon_device *rdev = radeon_get_rdev(bdev);
423
424	mem->bus.addr = NULL;
425	mem->bus.offset = 0;
426	mem->bus.size = mem->num_pages << PAGE_SHIFT;
427	mem->bus.base = 0;
428	mem->bus.is_iomem = false;
429	if (!(man->flags & TTM_MEMTYPE_FLAG_MAPPABLE))
430		return -EINVAL;
431	switch (mem->mem_type) {
432	case TTM_PL_SYSTEM:
433		/* system memory */
434		return 0;
435	case TTM_PL_TT:
436#if IS_ENABLED(CONFIG_AGP)
437		if (rdev->flags & RADEON_IS_AGP) {
438			/* RADEON_IS_AGP is set only if AGP is active */
439			mem->bus.offset = mem->start << PAGE_SHIFT;
440			mem->bus.base = rdev->mc.agp_base;
441			mem->bus.is_iomem = !rdev->ddev->agp->cant_use_aperture;
442			KASSERTMSG((mem->bus.base & (PAGE_SIZE - 1)) == 0,
443			    "agp aperture is not page-aligned: %" PRIx64 "",
444			    (uint64_t)mem->bus.base);
445			KASSERT((mem->bus.offset & (PAGE_SIZE - 1)) == 0);
446		}
447#endif
448		break;
449	case TTM_PL_VRAM:
450		mem->bus.offset = mem->start << PAGE_SHIFT;
451		/* check if it's visible */
452		if ((mem->bus.offset + mem->bus.size) > rdev->mc.visible_vram_size)
453			return -EINVAL;
454		mem->bus.base = rdev->mc.aper_base;
455		mem->bus.is_iomem = true;
456#ifndef __NetBSD__		/* alpha hose handled through bus_space(9) */
457#ifdef __alpha__
458		/*
459		 * Alpha: use bus.addr to hold the ioremap() return,
460		 * so we can modify bus.base below.
461		 */
462		if (mem->placement & TTM_PL_FLAG_WC)
463			mem->bus.addr =
464				ioremap_wc(mem->bus.base + mem->bus.offset,
465					   mem->bus.size);
466		else
467			mem->bus.addr =
468				ioremap(mem->bus.base + mem->bus.offset,
469						mem->bus.size);
470		if (!mem->bus.addr)
471			return -ENOMEM;
472
473		/*
474		 * Alpha: Use just the bus offset plus
475		 * the hose/domain memory base for bus.base.
476		 * It then can be used to build PTEs for VRAM
477		 * access, as done in ttm_bo_vm_fault().
478		 */
479		mem->bus.base = (mem->bus.base & 0x0ffffffffUL) +
480			rdev->ddev->hose->dense_mem_base;
481#endif
482#endif
483		KASSERTMSG((mem->bus.base & (PAGE_SIZE - 1)) == 0,
484		    "mc aperture is not page-aligned: %" PRIx64 "",
485		    (uint64_t)mem->bus.base);
486		KASSERT((mem->bus.offset & (PAGE_SIZE - 1)) == 0);
487		break;
488	default:
489		return -EINVAL;
490	}
491	return 0;
492}
493
494static void radeon_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
495{
496}
497
498/*
499 * TTM backend functions.
500 */
501struct radeon_ttm_tt {
502	struct ttm_dma_tt		ttm;
503	struct radeon_device		*rdev;
504	u64				offset;
505
506	uint64_t			userptr;
507#ifdef __NetBSD__
508	struct vmspace			*usermm;
509#else
510	struct mm_struct		*usermm;
511#endif
512	uint32_t			userflags;
513};
514
515/* prepare the sg table with the user pages */
516static int radeon_ttm_tt_pin_userptr(struct ttm_tt *ttm)
517{
518	struct radeon_device *rdev = radeon_get_rdev(ttm->bdev);
519	struct radeon_ttm_tt *gtt = (void *)ttm;
520#ifndef __NetBSD__
521	unsigned pinned = 0, nents;
522#endif
523	int r;
524
525	int write = !(gtt->userflags & RADEON_GEM_USERPTR_READONLY);
526#ifndef __NetBSD__
527	enum dma_data_direction direction = write ?
528		DMA_BIDIRECTIONAL : DMA_TO_DEVICE;
529#endif
530
531#ifdef __NetBSD__
532	if (curproc->p_vmspace != gtt->usermm)
533		return -EPERM;
534#else
535	if (current->mm != gtt->usermm)
536		return -EPERM;
537#endif
538
539	if (gtt->userflags & RADEON_GEM_USERPTR_ANONONLY) {
540		/* check that we only pin down anonymous memory
541		   to prevent problems with writeback */
542		unsigned long end = gtt->userptr + ttm->num_pages * PAGE_SIZE;
543#ifdef __NetBSD__
544		/* XXX ???  TOCTOU, anyone?  */
545		/* XXX should do range_test */
546		struct vm_map_entry *entry;
547		bool ok;
548		vm_map_lock_read(&gtt->usermm->vm_map);
549		ok = uvm_map_lookup_entry(&gtt->usermm->vm_map,
550		    (vaddr_t)gtt->userptr, &entry);
551		if (ok)
552			ok = !UVM_ET_ISOBJ(entry) && end <= entry->end;
553		vm_map_unlock_read(&gtt->usermm->vm_map);
554		if (!ok)
555			return -EPERM;
556#else
557		struct vm_area_struct *vma;
558		vma = find_vma(gtt->usermm, gtt->userptr);
559		if (!vma || vma->vm_file || vma->vm_end < end)
560			return -EPERM;
561#endif
562	}
563
564#ifdef __NetBSD__
565	struct iovec iov = {
566		.iov_base = (void *)(vaddr_t)gtt->userptr,
567		.iov_len = ttm->num_pages << PAGE_SHIFT,
568	};
569	struct uio uio = {
570		.uio_iov = &iov,
571		.uio_iovcnt = 1,
572		.uio_offset = 0,
573		.uio_resid = ttm->num_pages << PAGE_SHIFT,
574		.uio_rw = (write ? UIO_READ : UIO_WRITE), /* XXX ??? */
575		.uio_vmspace = gtt->usermm,
576	};
577	unsigned long i;
578
579	/* Wire the relevant part of the user's address space.  */
580	/* XXX What happens if user does munmap?  */
581	/* XXX errno NetBSD->Linux */
582	r = -uvm_vslock(gtt->usermm, (void *)(vaddr_t)gtt->userptr,
583	    ttm->num_pages << PAGE_SHIFT,
584	    (write ? VM_PROT_WRITE : VM_PROT_READ)); /* XXX ??? */
585	if (r)
586		goto fail0;
587
588	/* Load it up for DMA.  */
589	/* XXX errno NetBSD->Linux */
590	r = -bus_dmamap_load_uio(rdev->ddev->dmat, gtt->ttm.dma_address, &uio,
591	    BUS_DMA_WAITOK);
592	if (r)
593		goto fail1;
594
595	/* Get each of the pages as ttm requests.  */
596	for (i = 0; i < ttm->num_pages; i++) {
597		vaddr_t va = (vaddr_t)gtt->userptr + (i << PAGE_SHIFT);
598		paddr_t pa;
599		struct vm_page *vmp;
600
601		if (!pmap_extract(gtt->usermm->vm_map.pmap, va, &pa)) {
602			r = -EFAULT;
603			goto fail2;
604		}
605		vmp = PHYS_TO_VM_PAGE(pa);
606		ttm->pages[i] = container_of(vmp, struct page, p_vmp);
607	}
608
609	/* Success!  */
610	return 0;
611
612fail2:	while (i --> 0)
613		ttm->pages[i] = NULL; /* paranoia */
614	bus_dmamap_unload(rdev->ddev->dmat, gtt->ttm.dma_address);
615fail1:	uvm_vsunlock(gtt->usermm, (void *)(vaddr_t)gtt->userptr,
616	    ttm->num_pages << PAGE_SHIFT);
617fail0:	return r;
618#else
619	do {
620		unsigned num_pages = ttm->num_pages - pinned;
621		uint64_t userptr = gtt->userptr + pinned * PAGE_SIZE;
622		struct page **pages = ttm->pages + pinned;
623
624		r = get_user_pages(userptr, num_pages, write ? FOLL_WRITE : 0,
625				   pages, NULL);
626		if (r < 0)
627			goto release_pages;
628
629		pinned += r;
630
631	} while (pinned < ttm->num_pages);
632
633	r = sg_alloc_table_from_pages(ttm->sg, ttm->pages, ttm->num_pages, 0,
634				      ttm->num_pages << PAGE_SHIFT,
635				      GFP_KERNEL);
636	if (r)
637		goto release_sg;
638
639	r = -ENOMEM;
640	nents = dma_map_sg(rdev->dev, ttm->sg->sgl, ttm->sg->nents, direction);
641	if (nents != ttm->sg->nents)
642		goto release_sg;
643
644	drm_prime_sg_to_page_addr_arrays(ttm->sg, ttm->pages,
645					 gtt->ttm.dma_address, ttm->num_pages);
646
647	return 0;
648
649release_sg:
650	kfree(ttm->sg);
651
652release_pages:
653	release_pages(ttm->pages, pinned);
654	return r;
655#endif
656}
657
658static void radeon_ttm_tt_unpin_userptr(struct ttm_tt *ttm)
659{
660#ifdef __NetBSD__
661	struct radeon_device *rdev = radeon_get_rdev(ttm->bdev);
662	struct radeon_ttm_tt *gtt = (void *)ttm;
663
664	bus_dmamap_unload(rdev->ddev->dmat, gtt->ttm.dma_address);
665	uvm_vsunlock(gtt->usermm, (void *)(vaddr_t)gtt->userptr,
666	    ttm->num_pages << PAGE_SHIFT);
667#else
668	struct radeon_device *rdev = radeon_get_rdev(ttm->bdev);
669	struct radeon_ttm_tt *gtt = (void *)ttm;
670	struct sg_page_iter sg_iter;
671
672	int write = !(gtt->userflags & RADEON_GEM_USERPTR_READONLY);
673	enum dma_data_direction direction = write ?
674		DMA_BIDIRECTIONAL : DMA_TO_DEVICE;
675
676	/* double check that we don't free the table twice */
677	if (!ttm->sg->sgl)
678		return;
679
680	/* free the sg table and pages again */
681	dma_unmap_sg(rdev->dev, ttm->sg->sgl, ttm->sg->nents, direction);
682
683	for_each_sg_page(ttm->sg->sgl, &sg_iter, ttm->sg->nents, 0) {
684		struct page *page = sg_page_iter_page(&sg_iter);
685		if (!(gtt->userflags & RADEON_GEM_USERPTR_READONLY))
686			set_page_dirty(page);
687
688		mark_page_accessed(page);
689		put_page(page);
690	}
691
692	sg_free_table(ttm->sg);
693#endif
694}
695
696static int radeon_ttm_backend_bind(struct ttm_tt *ttm,
697				   struct ttm_mem_reg *bo_mem)
698{
699	struct radeon_ttm_tt *gtt = (void*)ttm;
700	uint32_t flags = RADEON_GART_PAGE_VALID | RADEON_GART_PAGE_READ |
701		RADEON_GART_PAGE_WRITE;
702	int r;
703
704	if (gtt->userptr) {
705		radeon_ttm_tt_pin_userptr(ttm);
706		flags &= ~RADEON_GART_PAGE_WRITE;
707	}
708
709	gtt->offset = (unsigned long)(bo_mem->start << PAGE_SHIFT);
710	if (!ttm->num_pages) {
711		WARN(1, "nothing to bind %lu pages for mreg %p back %p!\n",
712		     ttm->num_pages, bo_mem, ttm);
713	}
714	if (ttm->caching_state == tt_cached)
715		flags |= RADEON_GART_PAGE_SNOOP;
716	r = radeon_gart_bind(gtt->rdev, gtt->offset, ttm->num_pages,
717			     ttm->pages, gtt->ttm.dma_address, flags);
718	if (r) {
719		DRM_ERROR("failed to bind %lu pages at 0x%08X\n",
720			  ttm->num_pages, (unsigned)gtt->offset);
721		return r;
722	}
723	return 0;
724}
725
726static int radeon_ttm_backend_unbind(struct ttm_tt *ttm)
727{
728	struct radeon_ttm_tt *gtt = (void *)ttm;
729
730	radeon_gart_unbind(gtt->rdev, gtt->offset, ttm->num_pages);
731
732	if (gtt->userptr)
733		radeon_ttm_tt_unpin_userptr(ttm);
734
735	return 0;
736}
737
738static void radeon_ttm_backend_destroy(struct ttm_tt *ttm)
739{
740	struct radeon_ttm_tt *gtt = (void *)ttm;
741
742	ttm_dma_tt_fini(&gtt->ttm);
743	kfree(gtt);
744}
745
746static struct ttm_backend_func radeon_backend_func = {
747	.bind = &radeon_ttm_backend_bind,
748	.unbind = &radeon_ttm_backend_unbind,
749	.destroy = &radeon_ttm_backend_destroy,
750};
751
752static struct ttm_tt *radeon_ttm_tt_create(struct ttm_buffer_object *bo,
753					   uint32_t page_flags)
754{
755	struct radeon_device *rdev;
756	struct radeon_ttm_tt *gtt;
757
758	rdev = radeon_get_rdev(bo->bdev);
759#if IS_ENABLED(CONFIG_AGP)
760	if (rdev->flags & RADEON_IS_AGP) {
761		return ttm_agp_tt_create(bo, rdev->ddev->agp->bridge,
762					 page_flags);
763	}
764#endif
765
766	gtt = kzalloc(sizeof(struct radeon_ttm_tt), GFP_KERNEL);
767	if (gtt == NULL) {
768		return NULL;
769	}
770	gtt->ttm.ttm.func = &radeon_backend_func;
771	gtt->rdev = rdev;
772	if (ttm_dma_tt_init(&gtt->ttm, bo, page_flags)) {
773		kfree(gtt);
774		return NULL;
775	}
776	return &gtt->ttm.ttm;
777}
778
779static struct radeon_ttm_tt *radeon_ttm_tt_to_gtt(struct ttm_tt *ttm)
780{
781	if (!ttm || ttm->func != &radeon_backend_func)
782		return NULL;
783	return (struct radeon_ttm_tt *)ttm;
784}
785
786static int radeon_ttm_tt_populate(struct ttm_tt *ttm,
787			struct ttm_operation_ctx *ctx)
788{
789	struct radeon_ttm_tt *gtt = radeon_ttm_tt_to_gtt(ttm);
790#if !defined(__NetBSD__) || IS_ENABLED(CONFIG_AGP)
791	struct radeon_device *rdev;
792#endif
793	bool slave = !!(ttm->page_flags & TTM_PAGE_FLAG_SG);
794
795	if (gtt && gtt->userptr) {
796#ifdef __NetBSD__
797		ttm->sg = NULL;
798#else
799		ttm->sg = kzalloc(sizeof(struct sg_table), GFP_KERNEL);
800		if (!ttm->sg)
801			return -ENOMEM;
802#endif
803
804		ttm->page_flags |= TTM_PAGE_FLAG_SG;
805		ttm->state = tt_unbound;
806		return 0;
807	}
808
809	if (slave && ttm->sg) {
810#ifdef __NetBSD__
811		int r = drm_prime_bus_dmamap_load_sgt(ttm->bdev->dmat,
812		    gtt->ttm.dma_address, ttm->sg);
813		if (r)
814			return r;
815#else
816		drm_prime_sg_to_page_addr_arrays(ttm->sg, ttm->pages,
817						 gtt->ttm.dma_address, ttm->num_pages);
818#endif
819		ttm->state = tt_unbound;
820		return 0;
821	}
822
823#if !defined(__NetBSD__) || IS_ENABLED(CONFIG_AGP)
824	rdev = radeon_get_rdev(ttm->bdev);
825#endif
826#if IS_ENABLED(CONFIG_AGP)
827	if (rdev->flags & RADEON_IS_AGP) {
828		return ttm_agp_tt_populate(ttm, ctx);
829	}
830#endif
831
832#ifdef __NetBSD__
833	/* XXX errno NetBSD->Linux */
834	return ttm_bus_dma_populate(&gtt->ttm);
835#else
836
837#ifdef CONFIG_SWIOTLB
838	if (rdev->need_swiotlb && swiotlb_nr_tbl()) {
839		return ttm_dma_populate(&gtt->ttm, rdev->dev, ctx);
840	}
841#endif
842
843	return ttm_populate_and_map_pages(rdev->dev, &gtt->ttm, ctx);
844#endif
845}
846
847static void radeon_ttm_tt_unpopulate(struct ttm_tt *ttm)
848{
849#if !defined(__NetBSD__) || IS_ENABLED(CONFIG_AGP)
850	struct radeon_device *rdev;
851#endif
852	struct radeon_ttm_tt *gtt = radeon_ttm_tt_to_gtt(ttm);
853	bool slave = !!(ttm->page_flags & TTM_PAGE_FLAG_SG);
854
855#ifdef __NetBSD__
856	if (slave && ttm->sg) {
857		bus_dmamap_unload(ttm->bdev->dmat, gtt->ttm.dma_address);
858	}
859#endif
860	if (gtt && gtt->userptr) {
861		kfree(ttm->sg);
862		ttm->page_flags &= ~TTM_PAGE_FLAG_SG;
863		return;
864	}
865
866	if (slave)
867		return;
868
869#if !defined(__NetBSD__) || IS_ENABLED(CONFIG_AGP)
870	rdev = radeon_get_rdev(ttm->bdev);
871#endif
872#if IS_ENABLED(CONFIG_AGP)
873	if (rdev->flags & RADEON_IS_AGP) {
874		ttm_agp_tt_unpopulate(ttm);
875		return;
876	}
877#endif
878
879#ifdef __NetBSD__
880	ttm_bus_dma_unpopulate(&gtt->ttm);
881	return;
882#else
883
884#ifdef CONFIG_SWIOTLB
885	if (rdev->need_swiotlb && swiotlb_nr_tbl()) {
886		ttm_dma_unpopulate(&gtt->ttm, rdev->dev);
887		return;
888	}
889#endif
890
891	ttm_unmap_and_unpopulate_pages(rdev->dev, &gtt->ttm);
892#endif
893}
894
895#ifdef __NetBSD__
896static void radeon_ttm_tt_swapout(struct ttm_tt *ttm)
897{
898	struct radeon_ttm_tt *gtt = container_of(ttm, struct radeon_ttm_tt,
899	    ttm.ttm);
900	struct ttm_dma_tt *ttm_dma = &gtt->ttm;
901
902	ttm_bus_dma_swapout(ttm_dma);
903}
904
905static int	radeon_ttm_fault(struct uvm_faultinfo *, vaddr_t,
906		    struct vm_page **, int, int, vm_prot_t, int);
907
908static const struct uvm_pagerops radeon_uvm_ops = {
909	.pgo_reference = &ttm_bo_uvm_reference,
910	.pgo_detach = &ttm_bo_uvm_detach,
911	.pgo_fault = &radeon_ttm_fault,
912};
913#endif
914
915int radeon_ttm_tt_set_userptr(struct ttm_tt *ttm, uint64_t addr,
916			      uint32_t flags)
917{
918	struct radeon_ttm_tt *gtt = radeon_ttm_tt_to_gtt(ttm);
919
920	if (gtt == NULL)
921		return -EINVAL;
922
923	gtt->userptr = addr;
924#ifdef __NetBSD__
925	gtt->usermm = curproc->p_vmspace;
926#else
927	gtt->usermm = current->mm;
928#endif
929	gtt->userflags = flags;
930	return 0;
931}
932
933bool radeon_ttm_tt_has_userptr(struct ttm_tt *ttm)
934{
935	struct radeon_ttm_tt *gtt = radeon_ttm_tt_to_gtt(ttm);
936
937	if (gtt == NULL)
938		return false;
939
940	return !!gtt->userptr;
941}
942
943bool radeon_ttm_tt_is_readonly(struct ttm_tt *ttm)
944{
945	struct radeon_ttm_tt *gtt = radeon_ttm_tt_to_gtt(ttm);
946
947	if (gtt == NULL)
948		return false;
949
950	return !!(gtt->userflags & RADEON_GEM_USERPTR_READONLY);
951}
952
953static struct ttm_bo_driver radeon_bo_driver = {
954	.ttm_tt_create = &radeon_ttm_tt_create,
955	.ttm_tt_populate = &radeon_ttm_tt_populate,
956	.ttm_tt_unpopulate = &radeon_ttm_tt_unpopulate,
957#ifdef __NetBSD__
958	.ttm_tt_swapout = &radeon_ttm_tt_swapout,
959	.ttm_uvm_ops = &radeon_uvm_ops,
960#endif
961	.invalidate_caches = &radeon_invalidate_caches,
962	.init_mem_type = &radeon_init_mem_type,
963	.eviction_valuable = ttm_bo_eviction_valuable,
964	.evict_flags = &radeon_evict_flags,
965	.move = &radeon_bo_move,
966	.verify_access = &radeon_verify_access,
967	.move_notify = &radeon_bo_move_notify,
968	.fault_reserve_notify = &radeon_bo_fault_reserve_notify,
969	.io_mem_reserve = &radeon_ttm_io_mem_reserve,
970	.io_mem_free = &radeon_ttm_io_mem_free,
971};
972
973int radeon_ttm_init(struct radeon_device *rdev)
974{
975	int r;
976
977	/* No others user of address space so set it to 0 */
978	r = ttm_bo_device_init(&rdev->mman.bdev,
979			       &radeon_bo_driver,
980#ifdef __NetBSD__
981			       rdev->ddev->bst,
982			       rdev->ddev->dmat,
983#else
984			       rdev->ddev->anon_inode->i_mapping,
985#endif
986			       rdev->ddev->vma_offset_manager,
987			       dma_addressing_limited(pci_dev_dev(rdev->pdev)));
988	if (r) {
989		DRM_ERROR("failed initializing buffer object driver(%d).\n", r);
990		return r;
991	}
992	rdev->mman.initialized = true;
993	r = ttm_bo_init_mm(&rdev->mman.bdev, TTM_PL_VRAM,
994				rdev->mc.real_vram_size >> PAGE_SHIFT);
995	if (r) {
996		DRM_ERROR("Failed initializing VRAM heap.\n");
997		return r;
998	}
999	/* Change the size here instead of the init above so only lpfn is affected */
1000	radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size);
1001
1002	r = radeon_bo_create(rdev, 256 * 1024, PAGE_SIZE, true,
1003			     RADEON_GEM_DOMAIN_VRAM, 0, NULL,
1004			     NULL, &rdev->stolen_vga_memory);
1005	if (r) {
1006		return r;
1007	}
1008	r = radeon_bo_reserve(rdev->stolen_vga_memory, false);
1009	if (r)
1010		return r;
1011	r = radeon_bo_pin(rdev->stolen_vga_memory, RADEON_GEM_DOMAIN_VRAM, NULL);
1012	radeon_bo_unreserve(rdev->stolen_vga_memory);
1013	if (r) {
1014		radeon_bo_unref(&rdev->stolen_vga_memory);
1015		return r;
1016	}
1017	DRM_INFO("radeon: %uM of VRAM memory ready\n",
1018		 (unsigned) (rdev->mc.real_vram_size / (1024 * 1024)));
1019	r = ttm_bo_init_mm(&rdev->mman.bdev, TTM_PL_TT,
1020				rdev->mc.gtt_size >> PAGE_SHIFT);
1021	if (r) {
1022		DRM_ERROR("Failed initializing GTT heap.\n");
1023		return r;
1024	}
1025	DRM_INFO("radeon: %uM of GTT memory ready.\n",
1026		 (unsigned)(rdev->mc.gtt_size / (1024 * 1024)));
1027
1028	r = radeon_ttm_debugfs_init(rdev);
1029	if (r) {
1030		DRM_ERROR("Failed to init debugfs\n");
1031		return r;
1032	}
1033	return 0;
1034}
1035
1036void radeon_ttm_fini(struct radeon_device *rdev)
1037{
1038	int r;
1039
1040	if (!rdev->mman.initialized)
1041		return;
1042	radeon_ttm_debugfs_fini(rdev);
1043	if (rdev->stolen_vga_memory) {
1044		r = radeon_bo_reserve(rdev->stolen_vga_memory, false);
1045		if (r == 0) {
1046			radeon_bo_unpin(rdev->stolen_vga_memory);
1047			radeon_bo_unreserve(rdev->stolen_vga_memory);
1048		}
1049		radeon_bo_unref(&rdev->stolen_vga_memory);
1050	}
1051	ttm_bo_clean_mm(&rdev->mman.bdev, TTM_PL_VRAM);
1052	ttm_bo_clean_mm(&rdev->mman.bdev, TTM_PL_TT);
1053	ttm_bo_device_release(&rdev->mman.bdev);
1054	radeon_gart_fini(rdev);
1055	rdev->mman.initialized = false;
1056	DRM_INFO("radeon: ttm finalized\n");
1057}
1058
1059/* this should only be called at bootup or when userspace
1060 * isn't running */
1061void radeon_ttm_set_active_vram_size(struct radeon_device *rdev, u64 size)
1062{
1063	struct ttm_mem_type_manager *man;
1064
1065	if (!rdev->mman.initialized)
1066		return;
1067
1068	man = &rdev->mman.bdev.man[TTM_PL_VRAM];
1069	/* this just adjusts TTM size idea, which sets lpfn to the correct value */
1070	man->size = size >> PAGE_SHIFT;
1071}
1072
1073#ifdef __NetBSD__
1074
1075static int
1076radeon_ttm_fault(struct uvm_faultinfo *ufi, vaddr_t vaddr,
1077    struct vm_page **pps, int npages, int centeridx, vm_prot_t access_type,
1078    int flags)
1079{
1080	struct uvm_object *const uobj = ufi->entry->object.uvm_obj;
1081	struct ttm_buffer_object *const bo = container_of(uobj,
1082	    struct ttm_buffer_object, uvmobj);
1083	struct radeon_device *const rdev = radeon_get_rdev(bo->bdev);
1084	int error;
1085
1086	KASSERT(rdev != NULL);
1087	down_read(&rdev->pm.mclk_lock);
1088	error = ttm_bo_uvm_fault(ufi, vaddr, pps, npages, centeridx,
1089	    access_type, flags);
1090	up_read(&rdev->pm.mclk_lock);
1091
1092	return error;
1093}
1094
1095int
1096radeon_mmap_object(struct drm_device *dev, off_t offset, size_t size,
1097    vm_prot_t prot, struct uvm_object **uobjp, voff_t *uoffsetp,
1098    struct file *file)
1099{
1100	struct radeon_device *rdev = dev->dev_private;
1101
1102	KASSERT(0 == (offset & (PAGE_SIZE - 1)));
1103
1104	if (__predict_false(rdev == NULL))	/* XXX How?? */
1105		return -EINVAL;
1106
1107	return ttm_bo_mmap_object(&rdev->mman.bdev, offset, size, prot,
1108	    uobjp, uoffsetp, file);
1109}
1110
1111#else
1112
1113static vm_fault_t radeon_ttm_fault(struct vm_fault *vmf)
1114{
1115	struct ttm_buffer_object *bo;
1116	struct radeon_device *rdev;
1117	vm_fault_t ret;
1118
1119	bo = (struct ttm_buffer_object *)vmf->vma->vm_private_data;
1120	if (bo == NULL)
1121		return VM_FAULT_NOPAGE;
1122
1123	rdev = radeon_get_rdev(bo->bdev);
1124	down_read(&rdev->pm.mclk_lock);
1125	ret = ttm_bo_vm_fault(vmf);
1126	up_read(&rdev->pm.mclk_lock);
1127	return ret;
1128}
1129
1130static struct vm_operations_struct radeon_ttm_vm_ops = {
1131	.fault = radeon_ttm_fault,
1132	.open = ttm_bo_vm_open,
1133	.close = ttm_bo_vm_close,
1134	.access = ttm_bo_vm_access
1135};
1136
1137int radeon_mmap(struct file *filp, struct vm_area_struct *vma)
1138{
1139	int r;
1140	struct drm_file *file_priv = filp->private_data;
1141	struct radeon_device *rdev = file_priv->minor->dev->dev_private;
1142
1143	if (rdev == NULL)
1144		return -EINVAL;
1145
1146	r = ttm_bo_mmap(filp, vma, &rdev->mman.bdev);
1147	if (unlikely(r != 0))
1148		return r;
1149
1150	vma->vm_ops = &radeon_ttm_vm_ops;
1151	return 0;
1152}
1153
1154#endif	/* __NetBSD__ */
1155
1156#if defined(CONFIG_DEBUG_FS)
1157
1158static int radeon_mm_dump_table(struct seq_file *m, void *data)
1159{
1160	struct drm_info_node *node = (struct drm_info_node *)m->private;
1161	unsigned ttm_pl = *(int*)node->info_ent->data;
1162	struct drm_device *dev = node->minor->dev;
1163	struct radeon_device *rdev = dev->dev_private;
1164	struct ttm_mem_type_manager *man = &rdev->mman.bdev.man[ttm_pl];
1165	struct drm_printer p = drm_seq_file_printer(m);
1166
1167	man->func->debug(man, &p);
1168	return 0;
1169}
1170
1171
1172static int ttm_pl_vram = TTM_PL_VRAM;
1173static int ttm_pl_tt = TTM_PL_TT;
1174
1175static struct drm_info_list radeon_ttm_debugfs_list[] = {
1176	{"radeon_vram_mm", radeon_mm_dump_table, 0, &ttm_pl_vram},
1177	{"radeon_gtt_mm", radeon_mm_dump_table, 0, &ttm_pl_tt},
1178	{"ttm_page_pool", ttm_page_alloc_debugfs, 0, NULL},
1179#ifdef CONFIG_SWIOTLB
1180	{"ttm_dma_page_pool", ttm_dma_page_alloc_debugfs, 0, NULL}
1181#endif
1182};
1183
1184static int radeon_ttm_vram_open(struct inode *inode, struct file *filep)
1185{
1186	struct radeon_device *rdev = inode->i_private;
1187	i_size_write(inode, rdev->mc.mc_vram_size);
1188	filep->private_data = inode->i_private;
1189	return 0;
1190}
1191
1192static ssize_t radeon_ttm_vram_read(struct file *f, char __user *buf,
1193				    size_t size, loff_t *pos)
1194{
1195	struct radeon_device *rdev = f->private_data;
1196	ssize_t result = 0;
1197	int r;
1198
1199	if (size & 0x3 || *pos & 0x3)
1200		return -EINVAL;
1201
1202	while (size) {
1203		unsigned long flags;
1204		uint32_t value;
1205
1206		if (*pos >= rdev->mc.mc_vram_size)
1207			return result;
1208
1209		spin_lock_irqsave(&rdev->mmio_idx_lock, flags);
1210		WREG32(RADEON_MM_INDEX, ((uint32_t)*pos) | 0x80000000);
1211		if (rdev->family >= CHIP_CEDAR)
1212			WREG32(EVERGREEN_MM_INDEX_HI, *pos >> 31);
1213		value = RREG32(RADEON_MM_DATA);
1214		spin_unlock_irqrestore(&rdev->mmio_idx_lock, flags);
1215
1216		r = put_user(value, (uint32_t *)buf);
1217		if (r)
1218			return r;
1219
1220		result += 4;
1221		buf += 4;
1222		*pos += 4;
1223		size -= 4;
1224	}
1225
1226	return result;
1227}
1228
1229static const struct file_operations radeon_ttm_vram_fops = {
1230	.owner = THIS_MODULE,
1231	.open = radeon_ttm_vram_open,
1232	.read = radeon_ttm_vram_read,
1233	.llseek = default_llseek
1234};
1235
1236static int radeon_ttm_gtt_open(struct inode *inode, struct file *filep)
1237{
1238	struct radeon_device *rdev = inode->i_private;
1239	i_size_write(inode, rdev->mc.gtt_size);
1240	filep->private_data = inode->i_private;
1241	return 0;
1242}
1243
1244static ssize_t radeon_ttm_gtt_read(struct file *f, char __user *buf,
1245				   size_t size, loff_t *pos)
1246{
1247	struct radeon_device *rdev = f->private_data;
1248	ssize_t result = 0;
1249	int r;
1250
1251	while (size) {
1252		loff_t p = *pos / PAGE_SIZE;
1253		unsigned off = *pos & ~PAGE_MASK;
1254		size_t cur_size = min_t(size_t, size, PAGE_SIZE - off);
1255		struct page *page;
1256		void *ptr;
1257
1258		if (p >= rdev->gart.num_cpu_pages)
1259			return result;
1260
1261		page = rdev->gart.pages[p];
1262		if (page) {
1263			ptr = kmap(page);
1264			ptr += off;
1265
1266			r = copy_to_user(buf, ptr, cur_size);
1267			kunmap(rdev->gart.pages[p]);
1268		} else
1269			r = clear_user(buf, cur_size);
1270
1271		if (r)
1272			return -EFAULT;
1273
1274		result += cur_size;
1275		buf += cur_size;
1276		*pos += cur_size;
1277		size -= cur_size;
1278	}
1279
1280	return result;
1281}
1282
1283static const struct file_operations radeon_ttm_gtt_fops = {
1284	.owner = THIS_MODULE,
1285	.open = radeon_ttm_gtt_open,
1286	.read = radeon_ttm_gtt_read,
1287	.llseek = default_llseek
1288};
1289
1290#endif
1291
1292static int radeon_ttm_debugfs_init(struct radeon_device *rdev)
1293{
1294#if defined(CONFIG_DEBUG_FS)
1295	unsigned count;
1296
1297	struct drm_minor *minor = rdev->ddev->primary;
1298	struct dentry *root = minor->debugfs_root;
1299
1300	rdev->mman.vram = debugfs_create_file("radeon_vram", S_IFREG | S_IRUGO,
1301					      root, rdev,
1302					      &radeon_ttm_vram_fops);
1303
1304	rdev->mman.gtt = debugfs_create_file("radeon_gtt", S_IFREG | S_IRUGO,
1305					     root, rdev, &radeon_ttm_gtt_fops);
1306
1307	count = ARRAY_SIZE(radeon_ttm_debugfs_list);
1308
1309#ifdef CONFIG_SWIOTLB
1310	if (!(rdev->need_swiotlb && swiotlb_nr_tbl()))
1311		--count;
1312#endif
1313
1314	return radeon_debugfs_add_files(rdev, radeon_ttm_debugfs_list, count);
1315#else
1316
1317	return 0;
1318#endif
1319}
1320
1321static void radeon_ttm_debugfs_fini(struct radeon_device *rdev)
1322{
1323#if defined(CONFIG_DEBUG_FS)
1324
1325	debugfs_remove(rdev->mman.vram);
1326	rdev->mman.vram = NULL;
1327
1328	debugfs_remove(rdev->mman.gtt);
1329	rdev->mman.gtt = NULL;
1330#endif
1331}
1332