1/* $NetBSD: qxl_kms.c,v 1.3 2021/12/18 23:45:42 riastradh Exp $ */ 2 3/* 4 * Copyright 2013 Red Hat Inc. 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a 7 * copy of this software and associated documentation files (the "Software"), 8 * to deal in the Software without restriction, including without limitation 9 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 10 * and/or sell copies of the Software, and to permit persons to whom the 11 * Software is furnished to do so, subject to the following conditions: 12 * 13 * The above copyright notice and this permission notice shall be included in 14 * all copies or substantial portions of the Software. 15 * 16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 22 * OTHER DEALINGS IN THE SOFTWARE. 23 * 24 * Authors: Dave Airlie 25 * Alon Levy 26 */ 27 28#include <sys/cdefs.h> 29__KERNEL_RCSID(0, "$NetBSD: qxl_kms.c,v 1.3 2021/12/18 23:45:42 riastradh Exp $"); 30 31#include <linux/io-mapping.h> 32#include <linux/pci.h> 33 34#include <drm/drm_drv.h> 35#include <drm/drm_probe_helper.h> 36 37#include "qxl_drv.h" 38#include "qxl_object.h" 39 40int qxl_log_level; 41 42static bool qxl_check_device(struct qxl_device *qdev) 43{ 44 struct qxl_rom *rom = qdev->rom; 45 46 if (rom->magic != 0x4f525851) { 47 DRM_ERROR("bad rom signature %x\n", rom->magic); 48 return false; 49 } 50 51 DRM_INFO("Device Version %d.%d\n", rom->id, rom->update_id); 52 DRM_INFO("Compression level %d log level %d\n", rom->compression_level, 53 rom->log_level); 54 DRM_INFO("%d io pages at offset 0x%x\n", 55 rom->num_io_pages, rom->pages_offset); 56 DRM_INFO("%d byte draw area at offset 0x%x\n", 57 rom->surface0_area_size, rom->draw_area_offset); 58 59 qdev->vram_size = rom->surface0_area_size; 60 DRM_INFO("RAM header offset: 0x%x\n", rom->ram_header_offset); 61 return true; 62} 63 64static void setup_hw_slot(struct qxl_device *qdev, struct qxl_memslot *slot) 65{ 66 qdev->ram_header->mem_slot.mem_start = slot->start_phys_addr; 67 qdev->ram_header->mem_slot.mem_end = slot->start_phys_addr + slot->size; 68 qxl_io_memslot_add(qdev, qdev->rom->slots_start + slot->index); 69} 70 71static void setup_slot(struct qxl_device *qdev, 72 struct qxl_memslot *slot, 73 unsigned int slot_index, 74 const char *slot_name, 75 unsigned long start_phys_addr, 76 unsigned long size) 77{ 78 uint64_t high_bits; 79 80 slot->index = slot_index; 81 slot->name = slot_name; 82 slot->start_phys_addr = start_phys_addr; 83 slot->size = size; 84 85 setup_hw_slot(qdev, slot); 86 87 slot->generation = qdev->rom->slot_generation; 88 high_bits = (qdev->rom->slots_start + slot->index) 89 << qdev->rom->slot_gen_bits; 90 high_bits |= slot->generation; 91 high_bits <<= (64 - (qdev->rom->slot_gen_bits + qdev->rom->slot_id_bits)); 92 slot->high_bits = high_bits; 93 94 DRM_INFO("slot %d (%s): base 0x%08lx, size 0x%08lx, gpu_offset 0x%lx\n", 95 slot->index, slot->name, 96 (unsigned long)slot->start_phys_addr, 97 (unsigned long)slot->size, 98 (unsigned long)slot->gpu_offset); 99} 100 101void qxl_reinit_memslots(struct qxl_device *qdev) 102{ 103 setup_hw_slot(qdev, &qdev->main_slot); 104 setup_hw_slot(qdev, &qdev->surfaces_slot); 105} 106 107static void qxl_gc_work(struct work_struct *work) 108{ 109 struct qxl_device *qdev = container_of(work, struct qxl_device, gc_work); 110 111 qxl_garbage_collect(qdev); 112} 113 114int qxl_device_init(struct qxl_device *qdev, 115 struct drm_driver *drv, 116 struct pci_dev *pdev) 117{ 118 int r, sb; 119 120 r = drm_dev_init(&qdev->ddev, drv, &pdev->dev); 121 if (r) { 122 pr_err("Unable to init drm dev"); 123 goto error; 124 } 125 126 qdev->ddev.pdev = pdev; 127 pci_set_drvdata(pdev, &qdev->ddev); 128 qdev->ddev.dev_private = qdev; 129 130 mutex_init(&qdev->gem.mutex); 131 mutex_init(&qdev->update_area_mutex); 132 mutex_init(&qdev->release_mutex); 133 mutex_init(&qdev->surf_evict_mutex); 134 qxl_gem_init(qdev); 135 136 qdev->rom_base = pci_resource_start(pdev, 2); 137 qdev->rom_size = pci_resource_len(pdev, 2); 138 qdev->vram_base = pci_resource_start(pdev, 0); 139 qdev->io_base = pci_resource_start(pdev, 3); 140 141 qdev->vram_mapping = io_mapping_create_wc(qdev->vram_base, pci_resource_len(pdev, 0)); 142 if (!qdev->vram_mapping) { 143 pr_err("Unable to create vram_mapping"); 144 r = -ENOMEM; 145 goto error; 146 } 147 148 if (pci_resource_len(pdev, 4) > 0) { 149 /* 64bit surface bar present */ 150 sb = 4; 151 qdev->surfaceram_base = pci_resource_start(pdev, sb); 152 qdev->surfaceram_size = pci_resource_len(pdev, sb); 153 qdev->surface_mapping = 154 io_mapping_create_wc(qdev->surfaceram_base, 155 qdev->surfaceram_size); 156 } 157 if (qdev->surface_mapping == NULL) { 158 /* 64bit surface bar not present (or mapping failed) */ 159 sb = 1; 160 qdev->surfaceram_base = pci_resource_start(pdev, sb); 161 qdev->surfaceram_size = pci_resource_len(pdev, sb); 162 qdev->surface_mapping = 163 io_mapping_create_wc(qdev->surfaceram_base, 164 qdev->surfaceram_size); 165 if (!qdev->surface_mapping) { 166 pr_err("Unable to create surface_mapping"); 167 r = -ENOMEM; 168 goto vram_mapping_free; 169 } 170 } 171 172 DRM_DEBUG_KMS("qxl: vram %llx-%llx(%dM %dk), surface %llx-%llx(%dM %dk, %s)\n", 173 (unsigned long long)qdev->vram_base, 174 (unsigned long long)pci_resource_end(pdev, 0), 175 (int)pci_resource_len(pdev, 0) / 1024 / 1024, 176 (int)pci_resource_len(pdev, 0) / 1024, 177 (unsigned long long)qdev->surfaceram_base, 178 (unsigned long long)pci_resource_end(pdev, sb), 179 (int)qdev->surfaceram_size / 1024 / 1024, 180 (int)qdev->surfaceram_size / 1024, 181 (sb == 4) ? "64bit" : "32bit"); 182 183 qdev->rom = ioremap(qdev->rom_base, qdev->rom_size); 184 if (!qdev->rom) { 185 pr_err("Unable to ioremap ROM\n"); 186 r = -ENOMEM; 187 goto surface_mapping_free; 188 } 189 190 if (!qxl_check_device(qdev)) { 191 r = -ENODEV; 192 goto rom_unmap; 193 } 194 195 r = qxl_bo_init(qdev); 196 if (r) { 197 DRM_ERROR("bo init failed %d\n", r); 198 goto rom_unmap; 199 } 200 201 qdev->ram_header = ioremap(qdev->vram_base + 202 qdev->rom->ram_header_offset, 203 sizeof(*qdev->ram_header)); 204 if (!qdev->ram_header) { 205 DRM_ERROR("Unable to ioremap RAM header\n"); 206 r = -ENOMEM; 207 goto bo_fini; 208 } 209 210 qdev->command_ring = qxl_ring_create(&(qdev->ram_header->cmd_ring_hdr), 211 sizeof(struct qxl_command), 212 QXL_COMMAND_RING_SIZE, 213 qdev->io_base + QXL_IO_NOTIFY_CMD, 214 false, 215 &qdev->display_event); 216 if (!qdev->command_ring) { 217 DRM_ERROR("Unable to create command ring\n"); 218 r = -ENOMEM; 219 goto ram_header_unmap; 220 } 221 222 qdev->cursor_ring = qxl_ring_create( 223 &(qdev->ram_header->cursor_ring_hdr), 224 sizeof(struct qxl_command), 225 QXL_CURSOR_RING_SIZE, 226 qdev->io_base + QXL_IO_NOTIFY_CMD, 227 false, 228 &qdev->cursor_event); 229 230 if (!qdev->cursor_ring) { 231 DRM_ERROR("Unable to create cursor ring\n"); 232 r = -ENOMEM; 233 goto command_ring_free; 234 } 235 236 qdev->release_ring = qxl_ring_create( 237 &(qdev->ram_header->release_ring_hdr), 238 sizeof(uint64_t), 239 QXL_RELEASE_RING_SIZE, 0, true, 240 NULL); 241 242 if (!qdev->release_ring) { 243 DRM_ERROR("Unable to create release ring\n"); 244 r = -ENOMEM; 245 goto cursor_ring_free; 246 } 247 248 idr_init(&qdev->release_idr); 249 spin_lock_init(&qdev->release_idr_lock); 250 spin_lock_init(&qdev->release_lock); 251 252 idr_init(&qdev->surf_id_idr); 253 spin_lock_init(&qdev->surf_id_idr_lock); 254 255 mutex_init(&qdev->async_io_mutex); 256 257 /* reset the device into a known state - no memslots, no primary 258 * created, no surfaces. */ 259 qxl_io_reset(qdev); 260 261 /* must initialize irq before first async io - slot creation */ 262 r = qxl_irq_init(qdev); 263 if (r) { 264 DRM_ERROR("Unable to init qxl irq\n"); 265 goto release_ring_free; 266 } 267 268 /* 269 * Note that virtual is surface0. We rely on the single ioremap done 270 * before. 271 */ 272 setup_slot(qdev, &qdev->main_slot, 0, "main", 273 (unsigned long)qdev->vram_base, 274 (unsigned long)qdev->rom->ram_header_offset); 275 setup_slot(qdev, &qdev->surfaces_slot, 1, "surfaces", 276 (unsigned long)qdev->surfaceram_base, 277 (unsigned long)qdev->surfaceram_size); 278 279 INIT_WORK(&qdev->gc_work, qxl_gc_work); 280 281 return 0; 282 283release_ring_free: 284 qxl_ring_free(qdev->release_ring); 285cursor_ring_free: 286 qxl_ring_free(qdev->cursor_ring); 287command_ring_free: 288 qxl_ring_free(qdev->command_ring); 289ram_header_unmap: 290 iounmap(qdev->ram_header); 291bo_fini: 292 qxl_bo_fini(qdev); 293rom_unmap: 294 iounmap(qdev->rom); 295surface_mapping_free: 296 io_mapping_free(qdev->surface_mapping); 297vram_mapping_free: 298 io_mapping_free(qdev->vram_mapping); 299error: 300 return r; 301} 302 303void qxl_device_fini(struct qxl_device *qdev) 304{ 305 qxl_bo_unref(&qdev->current_release_bo[0]); 306 qxl_bo_unref(&qdev->current_release_bo[1]); 307 flush_work(&qdev->gc_work); 308 qxl_ring_free(qdev->command_ring); 309 qxl_ring_free(qdev->cursor_ring); 310 qxl_ring_free(qdev->release_ring); 311 qxl_gem_fini(qdev); 312 qxl_bo_fini(qdev); 313 io_mapping_free(qdev->surface_mapping); 314 io_mapping_free(qdev->vram_mapping); 315 iounmap(qdev->ram_header); 316 iounmap(qdev->rom); 317 qdev->rom = NULL; 318} 319