1/* $NetBSD: drm_memory.c,v 1.3 2021/12/18 23:44:57 riastradh Exp $ */ 2 3/* 4 * \file drm_memory.c 5 * Memory management wrappers for DRM 6 * 7 * \author Rickard E. (Rik) Faith <faith@valinux.com> 8 * \author Gareth Hughes <gareth@valinux.com> 9 */ 10 11/* 12 * Created: Thu Feb 4 14:00:34 1999 by faith@valinux.com 13 * 14 * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas. 15 * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California. 16 * All Rights Reserved. 17 * 18 * Permission is hereby granted, free of charge, to any person obtaining a 19 * copy of this software and associated documentation files (the "Software"), 20 * to deal in the Software without restriction, including without limitation 21 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 22 * and/or sell copies of the Software, and to permit persons to whom the 23 * Software is furnished to do so, subject to the following conditions: 24 * 25 * The above copyright notice and this permission notice (including the next 26 * paragraph) shall be included in all copies or substantial portions of the 27 * Software. 28 * 29 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 30 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 31 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 32 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR 33 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 34 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 35 * OTHER DEALINGS IN THE SOFTWARE. 36 */ 37 38#include <sys/cdefs.h> 39__KERNEL_RCSID(0, "$NetBSD: drm_memory.c,v 1.3 2021/12/18 23:44:57 riastradh Exp $"); 40 41#include <linux/export.h> 42#include <linux/highmem.h> 43#include <linux/pci.h> 44#include <linux/vmalloc.h> 45#include <xen/xen.h> 46 47#include <drm/drm_agpsupport.h> 48#include <drm/drm_cache.h> 49#include <drm/drm_device.h> 50 51#include "drm_legacy.h" 52 53#if IS_ENABLED(CONFIG_AGP) 54 55#ifdef HAVE_PAGE_AGP 56# include <asm/agp.h> 57#else 58# ifdef __powerpc__ 59# define PAGE_AGP pgprot_noncached_wc(PAGE_KERNEL) 60# else 61# define PAGE_AGP PAGE_KERNEL 62# endif 63#endif 64 65static void *agp_remap(unsigned long offset, unsigned long size, 66 struct drm_device *dev) 67{ 68 unsigned long i, num_pages = 69 PAGE_ALIGN(size) / PAGE_SIZE; 70 struct drm_agp_mem *agpmem; 71 struct page **page_map; 72 struct page **phys_page_map; 73 void *addr; 74 75 size = PAGE_ALIGN(size); 76 77#ifdef __alpha__ 78 offset -= dev->hose->mem_space->start; 79#endif 80 81 list_for_each_entry(agpmem, &dev->agp->memory, head) 82 if (agpmem->bound <= offset 83 && (agpmem->bound + (agpmem->pages << PAGE_SHIFT)) >= 84 (offset + size)) 85 break; 86 if (&agpmem->head == &dev->agp->memory) 87 return NULL; 88 89 /* 90 * OK, we're mapping AGP space on a chipset/platform on which memory accesses by 91 * the CPU do not get remapped by the GART. We fix this by using the kernel's 92 * page-table instead (that's probably faster anyhow...). 93 */ 94 /* note: use vmalloc() because num_pages could be large... */ 95 page_map = vmalloc(array_size(num_pages, sizeof(struct page *))); 96 if (!page_map) 97 return NULL; 98 99 phys_page_map = (agpmem->memory->pages + (offset - agpmem->bound) / PAGE_SIZE); 100 for (i = 0; i < num_pages; ++i) 101 page_map[i] = phys_page_map[i]; 102 addr = vmap(page_map, num_pages, VM_IOREMAP, PAGE_AGP); 103 vfree(page_map); 104 105 return addr; 106} 107 108/** Wrapper around agp_free_memory() */ 109void drm_free_agp(struct agp_memory *handle, int pages) 110{ 111 agp_free_memory(handle); 112} 113 114/** Wrapper around agp_bind_memory() */ 115int drm_bind_agp(struct agp_memory *handle, unsigned int start) 116{ 117 return agp_bind_memory(handle, start); 118} 119 120/** Wrapper around agp_unbind_memory() */ 121int drm_unbind_agp(struct agp_memory *handle) 122{ 123 return agp_unbind_memory(handle); 124} 125 126#else /* CONFIG_AGP */ 127static inline void *agp_remap(unsigned long offset, unsigned long size, 128 struct drm_device *dev) 129{ 130 return NULL; 131} 132 133#endif /* CONFIG_AGP */ 134 135void drm_legacy_ioremap(struct drm_local_map *map, struct drm_device *dev) 136{ 137 if (dev->agp && dev->agp->cant_use_aperture && map->type == _DRM_AGP) 138 map->handle = agp_remap(map->offset, map->size, dev); 139 else 140 map->handle = ioremap(map->offset, map->size); 141} 142EXPORT_SYMBOL(drm_legacy_ioremap); 143 144void drm_legacy_ioremap_wc(struct drm_local_map *map, struct drm_device *dev) 145{ 146 if (dev->agp && dev->agp->cant_use_aperture && map->type == _DRM_AGP) 147 map->handle = agp_remap(map->offset, map->size, dev); 148 else 149 map->handle = ioremap_wc(map->offset, map->size); 150} 151EXPORT_SYMBOL(drm_legacy_ioremap_wc); 152 153void drm_legacy_ioremapfree(struct drm_local_map *map, struct drm_device *dev) 154{ 155 if (!map->handle || !map->size) 156 return; 157 158 if (dev->agp && dev->agp->cant_use_aperture && map->type == _DRM_AGP) 159 vunmap(map->handle); 160 else 161 iounmap(map->handle); 162} 163EXPORT_SYMBOL(drm_legacy_ioremapfree); 164 165bool drm_need_swiotlb(int dma_bits) 166{ 167 struct resource *tmp; 168 resource_size_t max_iomem = 0; 169 170 /* 171 * Xen paravirtual hosts require swiotlb regardless of requested dma 172 * transfer size. 173 * 174 * NOTE: Really, what it requires is use of the dma_alloc_coherent 175 * allocator used in ttm_dma_populate() instead of 176 * ttm_populate_and_map_pages(), which bounce buffers so much in 177 * Xen it leads to swiotlb buffer exhaustion. 178 */ 179 if (xen_pv_domain()) 180 return true; 181 182 /* 183 * Enforce dma_alloc_coherent when memory encryption is active as well 184 * for the same reasons as for Xen paravirtual hosts. 185 */ 186 if (mem_encrypt_active()) 187 return true; 188 189 for (tmp = iomem_resource.child; tmp; tmp = tmp->sibling) { 190 max_iomem = max(max_iomem, tmp->end); 191 } 192 193 return max_iomem > ((u64)1 << dma_bits); 194} 195EXPORT_SYMBOL(drm_need_swiotlb); 196