1/* mach64_dma.c -- DMA support for mach64 (Rage Pro) driver -*- linux-c -*- */ 2/** 3 * \file mach64_dma.c 4 * DMA support for mach64 (Rage Pro) driver 5 * 6 * \author Gareth Hughes <gareth@valinux.com> 7 * \author Frank C. Earl <fearl@airmail.net> 8 * \author Leif Delgass <ldelgass@retinalburn.net> 9 * \author Jos�� Fonseca <j_r_fonseca@yahoo.co.uk> 10 */ 11 12/*- 13 * Copyright 2000 Gareth Hughes 14 * Copyright 2002 Frank C. Earl 15 * Copyright 2002-2003 Leif Delgass 16 * All Rights Reserved. 17 * 18 * Permission is hereby granted, free of charge, to any person obtaining a 19 * copy of this software and associated documentation files (the "Software"), 20 * to deal in the Software without restriction, including without limitation 21 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 22 * and/or sell copies of the Software, and to permit persons to whom the 23 * Software is furnished to do so, subject to the following conditions: 24 * 25 * The above copyright notice and this permission notice (including the next 26 * paragraph) shall be included in all copies or substantial portions of the 27 * Software. 28 * 29 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 30 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 31 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 32 * THE COPYRIGHT OWNER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER 33 * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 34 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 35 */ 36 37#include <sys/cdefs.h> 38__FBSDID("$FreeBSD: releng/10.3/sys/dev/drm/mach64_dma.c 260100 2013-12-30 20:27:58Z dim $"); 39 40#include "dev/drm/drmP.h" 41#include "dev/drm/drm.h" 42#include "dev/drm/mach64_drm.h" 43#include "dev/drm/mach64_drv.h" 44 45/*******************************************************************/ 46/** \name Engine, FIFO control */ 47/*@{*/ 48 49/** 50 * Waits for free entries in the FIFO. 51 * 52 * \note Most writes to Mach64 registers are automatically routed through 53 * command FIFO which is 16 entry deep. Prior to writing to any draw engine 54 * register one has to ensure that enough FIFO entries are available by calling 55 * this function. Failure to do so may cause the engine to lock. 56 * 57 * \param dev_priv pointer to device private data structure. 58 * \param entries number of free entries in the FIFO to wait for. 59 * 60 * \returns zero on success, or -EBUSY if the timeout (specificed by 61 * drm_mach64_private::usec_timeout) occurs. 62 */ 63int mach64_do_wait_for_fifo(drm_mach64_private_t *dev_priv, int entries) 64{ 65 int slots = 0, i; 66 67 for (i = 0; i < dev_priv->usec_timeout; i++) { 68 slots = (MACH64_READ(MACH64_FIFO_STAT) & MACH64_FIFO_SLOT_MASK); 69 if (slots <= (0x8000 >> entries)) 70 return 0; 71 DRM_UDELAY(1); 72 } 73 74 DRM_INFO("failed! slots=%d entries=%d\n", slots, entries); 75 return -EBUSY; 76} 77 78/** 79 * Wait for the draw engine to be idle. 80 */ 81int mach64_do_wait_for_idle(drm_mach64_private_t *dev_priv) 82{ 83 int i, ret; 84 85 ret = mach64_do_wait_for_fifo(dev_priv, 16); 86 if (ret < 0) 87 return ret; 88 89 for (i = 0; i < dev_priv->usec_timeout; i++) { 90 if (!(MACH64_READ(MACH64_GUI_STAT) & MACH64_GUI_ACTIVE)) 91 return 0; 92 DRM_UDELAY(1); 93 } 94 95 DRM_INFO("failed! GUI_STAT=0x%08x\n", MACH64_READ(MACH64_GUI_STAT)); 96 mach64_dump_ring_info(dev_priv); 97 return -EBUSY; 98} 99 100/** 101 * Wait for free entries in the ring buffer. 102 * 103 * The Mach64 bus master can be configured to act as a virtual FIFO, using a 104 * circular buffer (commonly referred as "ring buffer" in other drivers) with 105 * pointers to engine commands. This allows the CPU to do other things while 106 * the graphics engine is busy, i.e., DMA mode. 107 * 108 * This function should be called before writing new entries to the ring 109 * buffer. 110 * 111 * \param dev_priv pointer to device private data structure. 112 * \param n number of free entries in the ring buffer to wait for. 113 * 114 * \returns zero on success, or -EBUSY if the timeout (specificed by 115 * drm_mach64_private_t::usec_timeout) occurs. 116 * 117 * \sa mach64_dump_ring_info() 118 */ 119int mach64_wait_ring(drm_mach64_private_t *dev_priv, int n) 120{ 121 drm_mach64_descriptor_ring_t *ring = &dev_priv->ring; 122 int i; 123 124 for (i = 0; i < dev_priv->usec_timeout; i++) { 125 mach64_update_ring_snapshot(dev_priv); 126 if (ring->space >= n) { 127 if (i > 0) 128 DRM_DEBUG("%d usecs\n", i); 129 return 0; 130 } 131 DRM_UDELAY(1); 132 } 133 134 /* FIXME: This is being ignored... */ 135 DRM_ERROR("failed!\n"); 136 mach64_dump_ring_info(dev_priv); 137 return -EBUSY; 138} 139 140/** 141 * Wait until all DMA requests have been processed... 142 * 143 * \sa mach64_wait_ring() 144 */ 145static int mach64_ring_idle(drm_mach64_private_t *dev_priv) 146{ 147 drm_mach64_descriptor_ring_t *ring = &dev_priv->ring; 148 u32 head; 149 int i; 150 151 head = ring->head; 152 i = 0; 153 while (i < dev_priv->usec_timeout) { 154 mach64_update_ring_snapshot(dev_priv); 155 if (ring->head == ring->tail && 156 !(MACH64_READ(MACH64_GUI_STAT) & MACH64_GUI_ACTIVE)) { 157 if (i > 0) 158 DRM_DEBUG("%d usecs\n", i); 159 return 0; 160 } 161 if (ring->head == head) { 162 ++i; 163 } else { 164 head = ring->head; 165 i = 0; 166 } 167 DRM_UDELAY(1); 168 } 169 170 DRM_INFO("failed! GUI_STAT=0x%08x\n", MACH64_READ(MACH64_GUI_STAT)); 171 mach64_dump_ring_info(dev_priv); 172 return -EBUSY; 173} 174 175/** 176 * Reset the ring buffer descriptors. 177 * 178 * \sa mach64_do_engine_reset() 179 */ 180static void mach64_ring_reset(drm_mach64_private_t *dev_priv) 181{ 182 drm_mach64_descriptor_ring_t *ring = &dev_priv->ring; 183 184 mach64_do_release_used_buffers(dev_priv); 185 ring->head_addr = ring->start_addr; 186 ring->head = ring->tail = 0; 187 ring->space = ring->size; 188 189 MACH64_WRITE(MACH64_BM_GUI_TABLE_CMD, 190 ring->head_addr | MACH64_CIRCULAR_BUF_SIZE_16KB); 191 192 dev_priv->ring_running = 0; 193} 194 195/** 196 * Ensure the all the queued commands will be processed. 197 */ 198int mach64_do_dma_flush(drm_mach64_private_t *dev_priv) 199{ 200 /* FIXME: It's not necessary to wait for idle when flushing 201 * we just need to ensure the ring will be completely processed 202 * in finite time without another ioctl 203 */ 204 return mach64_ring_idle(dev_priv); 205} 206 207/** 208 * Stop all DMA activity. 209 */ 210int mach64_do_dma_idle(drm_mach64_private_t *dev_priv) 211{ 212 int ret; 213 214 /* wait for completion */ 215 if ((ret = mach64_ring_idle(dev_priv)) < 0) { 216 DRM_ERROR("failed BM_GUI_TABLE=0x%08x tail: %u\n", 217 MACH64_READ(MACH64_BM_GUI_TABLE), 218 dev_priv->ring.tail); 219 return ret; 220 } 221 222 mach64_ring_stop(dev_priv); 223 224 /* clean up after pass */ 225 mach64_do_release_used_buffers(dev_priv); 226 return 0; 227} 228 229/** 230 * Reset the engine. This will stop the DMA if it is running. 231 */ 232int mach64_do_engine_reset(drm_mach64_private_t *dev_priv) 233{ 234 u32 tmp; 235 236 DRM_DEBUG("\n"); 237 238 /* Kill off any outstanding DMA transfers. 239 */ 240 tmp = MACH64_READ(MACH64_BUS_CNTL); 241 MACH64_WRITE(MACH64_BUS_CNTL, tmp | MACH64_BUS_MASTER_DIS); 242 243 /* Reset the GUI engine (high to low transition). 244 */ 245 tmp = MACH64_READ(MACH64_GEN_TEST_CNTL); 246 MACH64_WRITE(MACH64_GEN_TEST_CNTL, tmp & ~MACH64_GUI_ENGINE_ENABLE); 247 /* Enable the GUI engine 248 */ 249 tmp = MACH64_READ(MACH64_GEN_TEST_CNTL); 250 MACH64_WRITE(MACH64_GEN_TEST_CNTL, tmp | MACH64_GUI_ENGINE_ENABLE); 251 252 /* ensure engine is not locked up by clearing any FIFO or HOST errors 253 */ 254 tmp = MACH64_READ(MACH64_BUS_CNTL); 255 MACH64_WRITE(MACH64_BUS_CNTL, tmp | 0x00a00000); 256 257 /* Once GUI engine is restored, disable bus mastering */ 258 MACH64_WRITE(MACH64_SRC_CNTL, 0); 259 260 /* Reset descriptor ring */ 261 mach64_ring_reset(dev_priv); 262 263 return 0; 264} 265 266/*@}*/ 267 268 269/*******************************************************************/ 270/** \name Debugging output */ 271/*@{*/ 272 273/** 274 * Dump engine registers values. 275 */ 276void mach64_dump_engine_info(drm_mach64_private_t *dev_priv) 277{ 278 DRM_INFO("\n"); 279 if (!dev_priv->is_pci) { 280 DRM_INFO(" AGP_BASE = 0x%08x\n", 281 MACH64_READ(MACH64_AGP_BASE)); 282 DRM_INFO(" AGP_CNTL = 0x%08x\n", 283 MACH64_READ(MACH64_AGP_CNTL)); 284 } 285 DRM_INFO(" ALPHA_TST_CNTL = 0x%08x\n", 286 MACH64_READ(MACH64_ALPHA_TST_CNTL)); 287 DRM_INFO("\n"); 288 DRM_INFO(" BM_COMMAND = 0x%08x\n", 289 MACH64_READ(MACH64_BM_COMMAND)); 290 DRM_INFO("BM_FRAME_BUF_OFFSET = 0x%08x\n", 291 MACH64_READ(MACH64_BM_FRAME_BUF_OFFSET)); 292 DRM_INFO(" BM_GUI_TABLE = 0x%08x\n", 293 MACH64_READ(MACH64_BM_GUI_TABLE)); 294 DRM_INFO(" BM_STATUS = 0x%08x\n", 295 MACH64_READ(MACH64_BM_STATUS)); 296 DRM_INFO(" BM_SYSTEM_MEM_ADDR = 0x%08x\n", 297 MACH64_READ(MACH64_BM_SYSTEM_MEM_ADDR)); 298 DRM_INFO(" BM_SYSTEM_TABLE = 0x%08x\n", 299 MACH64_READ(MACH64_BM_SYSTEM_TABLE)); 300 DRM_INFO(" BUS_CNTL = 0x%08x\n", 301 MACH64_READ(MACH64_BUS_CNTL)); 302 DRM_INFO("\n"); 303 /* DRM_INFO( " CLOCK_CNTL = 0x%08x\n", MACH64_READ( MACH64_CLOCK_CNTL ) ); */ 304 DRM_INFO(" CLR_CMP_CLR = 0x%08x\n", 305 MACH64_READ(MACH64_CLR_CMP_CLR)); 306 DRM_INFO(" CLR_CMP_CNTL = 0x%08x\n", 307 MACH64_READ(MACH64_CLR_CMP_CNTL)); 308 /* DRM_INFO( " CLR_CMP_MSK = 0x%08x\n", MACH64_READ( MACH64_CLR_CMP_MSK ) ); */ 309 DRM_INFO(" CONFIG_CHIP_ID = 0x%08x\n", 310 MACH64_READ(MACH64_CONFIG_CHIP_ID)); 311 DRM_INFO(" CONFIG_CNTL = 0x%08x\n", 312 MACH64_READ(MACH64_CONFIG_CNTL)); 313 DRM_INFO(" CONFIG_STAT0 = 0x%08x\n", 314 MACH64_READ(MACH64_CONFIG_STAT0)); 315 DRM_INFO(" CONFIG_STAT1 = 0x%08x\n", 316 MACH64_READ(MACH64_CONFIG_STAT1)); 317 DRM_INFO(" CONFIG_STAT2 = 0x%08x\n", 318 MACH64_READ(MACH64_CONFIG_STAT2)); 319 DRM_INFO(" CRC_SIG = 0x%08x\n", MACH64_READ(MACH64_CRC_SIG)); 320 DRM_INFO(" CUSTOM_MACRO_CNTL = 0x%08x\n", 321 MACH64_READ(MACH64_CUSTOM_MACRO_CNTL)); 322 DRM_INFO("\n"); 323 /* DRM_INFO( " DAC_CNTL = 0x%08x\n", MACH64_READ( MACH64_DAC_CNTL ) ); */ 324 /* DRM_INFO( " DAC_REGS = 0x%08x\n", MACH64_READ( MACH64_DAC_REGS ) ); */ 325 DRM_INFO(" DP_BKGD_CLR = 0x%08x\n", 326 MACH64_READ(MACH64_DP_BKGD_CLR)); 327 DRM_INFO(" DP_FRGD_CLR = 0x%08x\n", 328 MACH64_READ(MACH64_DP_FRGD_CLR)); 329 DRM_INFO(" DP_MIX = 0x%08x\n", MACH64_READ(MACH64_DP_MIX)); 330 DRM_INFO(" DP_PIX_WIDTH = 0x%08x\n", 331 MACH64_READ(MACH64_DP_PIX_WIDTH)); 332 DRM_INFO(" DP_SRC = 0x%08x\n", MACH64_READ(MACH64_DP_SRC)); 333 DRM_INFO(" DP_WRITE_MASK = 0x%08x\n", 334 MACH64_READ(MACH64_DP_WRITE_MASK)); 335 DRM_INFO(" DSP_CONFIG = 0x%08x\n", 336 MACH64_READ(MACH64_DSP_CONFIG)); 337 DRM_INFO(" DSP_ON_OFF = 0x%08x\n", 338 MACH64_READ(MACH64_DSP_ON_OFF)); 339 DRM_INFO(" DST_CNTL = 0x%08x\n", 340 MACH64_READ(MACH64_DST_CNTL)); 341 DRM_INFO(" DST_OFF_PITCH = 0x%08x\n", 342 MACH64_READ(MACH64_DST_OFF_PITCH)); 343 DRM_INFO("\n"); 344 /* DRM_INFO( " EXT_DAC_REGS = 0x%08x\n", MACH64_READ( MACH64_EXT_DAC_REGS ) ); */ 345 DRM_INFO(" EXT_MEM_CNTL = 0x%08x\n", 346 MACH64_READ(MACH64_EXT_MEM_CNTL)); 347 DRM_INFO("\n"); 348 DRM_INFO(" FIFO_STAT = 0x%08x\n", 349 MACH64_READ(MACH64_FIFO_STAT)); 350 DRM_INFO("\n"); 351 DRM_INFO(" GEN_TEST_CNTL = 0x%08x\n", 352 MACH64_READ(MACH64_GEN_TEST_CNTL)); 353 /* DRM_INFO( " GP_IO = 0x%08x\n", MACH64_READ( MACH64_GP_IO ) ); */ 354 DRM_INFO(" GUI_CMDFIFO_DATA = 0x%08x\n", 355 MACH64_READ(MACH64_GUI_CMDFIFO_DATA)); 356 DRM_INFO(" GUI_CMDFIFO_DEBUG = 0x%08x\n", 357 MACH64_READ(MACH64_GUI_CMDFIFO_DEBUG)); 358 DRM_INFO(" GUI_CNTL = 0x%08x\n", 359 MACH64_READ(MACH64_GUI_CNTL)); 360 DRM_INFO(" GUI_STAT = 0x%08x\n", 361 MACH64_READ(MACH64_GUI_STAT)); 362 DRM_INFO(" GUI_TRAJ_CNTL = 0x%08x\n", 363 MACH64_READ(MACH64_GUI_TRAJ_CNTL)); 364 DRM_INFO("\n"); 365 DRM_INFO(" HOST_CNTL = 0x%08x\n", 366 MACH64_READ(MACH64_HOST_CNTL)); 367 DRM_INFO(" HW_DEBUG = 0x%08x\n", 368 MACH64_READ(MACH64_HW_DEBUG)); 369 DRM_INFO("\n"); 370 DRM_INFO(" MEM_ADDR_CONFIG = 0x%08x\n", 371 MACH64_READ(MACH64_MEM_ADDR_CONFIG)); 372 DRM_INFO(" MEM_BUF_CNTL = 0x%08x\n", 373 MACH64_READ(MACH64_MEM_BUF_CNTL)); 374 DRM_INFO("\n"); 375 DRM_INFO(" PAT_REG0 = 0x%08x\n", 376 MACH64_READ(MACH64_PAT_REG0)); 377 DRM_INFO(" PAT_REG1 = 0x%08x\n", 378 MACH64_READ(MACH64_PAT_REG1)); 379 DRM_INFO("\n"); 380 DRM_INFO(" SC_LEFT = 0x%08x\n", MACH64_READ(MACH64_SC_LEFT)); 381 DRM_INFO(" SC_RIGHT = 0x%08x\n", 382 MACH64_READ(MACH64_SC_RIGHT)); 383 DRM_INFO(" SC_TOP = 0x%08x\n", MACH64_READ(MACH64_SC_TOP)); 384 DRM_INFO(" SC_BOTTOM = 0x%08x\n", 385 MACH64_READ(MACH64_SC_BOTTOM)); 386 DRM_INFO("\n"); 387 DRM_INFO(" SCALE_3D_CNTL = 0x%08x\n", 388 MACH64_READ(MACH64_SCALE_3D_CNTL)); 389 DRM_INFO(" SCRATCH_REG0 = 0x%08x\n", 390 MACH64_READ(MACH64_SCRATCH_REG0)); 391 DRM_INFO(" SCRATCH_REG1 = 0x%08x\n", 392 MACH64_READ(MACH64_SCRATCH_REG1)); 393 DRM_INFO(" SETUP_CNTL = 0x%08x\n", 394 MACH64_READ(MACH64_SETUP_CNTL)); 395 DRM_INFO(" SRC_CNTL = 0x%08x\n", 396 MACH64_READ(MACH64_SRC_CNTL)); 397 DRM_INFO("\n"); 398 DRM_INFO(" TEX_CNTL = 0x%08x\n", 399 MACH64_READ(MACH64_TEX_CNTL)); 400 DRM_INFO(" TEX_SIZE_PITCH = 0x%08x\n", 401 MACH64_READ(MACH64_TEX_SIZE_PITCH)); 402 DRM_INFO(" TIMER_CONFIG = 0x%08x\n", 403 MACH64_READ(MACH64_TIMER_CONFIG)); 404 DRM_INFO("\n"); 405 DRM_INFO(" Z_CNTL = 0x%08x\n", MACH64_READ(MACH64_Z_CNTL)); 406 DRM_INFO(" Z_OFF_PITCH = 0x%08x\n", 407 MACH64_READ(MACH64_Z_OFF_PITCH)); 408 DRM_INFO("\n"); 409} 410 411#define MACH64_DUMP_CONTEXT 3 412 413/** 414 * Used by mach64_dump_ring_info() to dump the contents of the current buffer 415 * pointed by the ring head. 416 */ 417static void mach64_dump_buf_info(drm_mach64_private_t *dev_priv, 418 struct drm_buf *buf) 419{ 420 u32 addr = GETBUFADDR(buf); 421 u32 used = buf->used >> 2; 422 u32 sys_addr = MACH64_READ(MACH64_BM_SYSTEM_MEM_ADDR); 423 u32 *p = GETBUFPTR(buf); 424 int skipped = 0; 425 426 DRM_INFO("buffer contents:\n"); 427 428 while (used) { 429 u32 reg, count; 430 431 reg = le32_to_cpu(*p++); 432 if (addr <= GETBUFADDR(buf) + MACH64_DUMP_CONTEXT * 4 || 433 (addr >= sys_addr - MACH64_DUMP_CONTEXT * 4 && 434 addr <= sys_addr + MACH64_DUMP_CONTEXT * 4) || 435 addr >= 436 GETBUFADDR(buf) + buf->used - MACH64_DUMP_CONTEXT * 4) { 437 DRM_INFO("%08x: 0x%08x\n", addr, reg); 438 } 439 addr += 4; 440 used--; 441 442 count = (reg >> 16) + 1; 443 reg = reg & 0xffff; 444 reg = MMSELECT(reg); 445 while (count && used) { 446 if (addr <= GETBUFADDR(buf) + MACH64_DUMP_CONTEXT * 4 || 447 (addr >= sys_addr - MACH64_DUMP_CONTEXT * 4 && 448 addr <= sys_addr + MACH64_DUMP_CONTEXT * 4) || 449 addr >= 450 GETBUFADDR(buf) + buf->used - 451 MACH64_DUMP_CONTEXT * 4) { 452 DRM_INFO("%08x: 0x%04x = 0x%08x\n", addr, 453 reg, le32_to_cpu(*p)); 454 skipped = 0; 455 } else { 456 if (!skipped) { 457 DRM_INFO(" ...\n"); 458 skipped = 1; 459 } 460 } 461 p++; 462 addr += 4; 463 used--; 464 465 reg += 4; 466 count--; 467 } 468 } 469 470 DRM_INFO("\n"); 471} 472 473/** 474 * Dump the ring state and contents, including the contents of the buffer being 475 * processed by the graphics engine. 476 */ 477void mach64_dump_ring_info(drm_mach64_private_t *dev_priv) 478{ 479 drm_mach64_descriptor_ring_t *ring = &dev_priv->ring; 480 int i, skipped; 481 482 DRM_INFO("\n"); 483 484 DRM_INFO("ring contents:\n"); 485 DRM_INFO(" head_addr: 0x%08x head: %u tail: %u\n\n", 486 ring->head_addr, ring->head, ring->tail); 487 488 skipped = 0; 489 for (i = 0; i < ring->size / sizeof(u32); i += 4) { 490 if (i <= MACH64_DUMP_CONTEXT * 4 || 491 i >= ring->size / sizeof(u32) - MACH64_DUMP_CONTEXT * 4 || 492 (i >= ring->tail - MACH64_DUMP_CONTEXT * 4 && 493 i <= ring->tail + MACH64_DUMP_CONTEXT * 4) || 494 (i >= ring->head - MACH64_DUMP_CONTEXT * 4 && 495 i <= ring->head + MACH64_DUMP_CONTEXT * 4)) { 496 DRM_INFO(" 0x%08x: 0x%08x 0x%08x 0x%08x 0x%08x%s%s\n", 497 (u32)(ring->start_addr + i * sizeof(u32)), 498 le32_to_cpu(((u32 *) ring->start)[i + 0]), 499 le32_to_cpu(((u32 *) ring->start)[i + 1]), 500 le32_to_cpu(((u32 *) ring->start)[i + 2]), 501 le32_to_cpu(((u32 *) ring->start)[i + 3]), 502 i == ring->head ? " (head)" : "", 503 i == ring->tail ? " (tail)" : ""); 504 skipped = 0; 505 } else { 506 if (!skipped) { 507 DRM_INFO(" ...\n"); 508 skipped = 1; 509 } 510 } 511 } 512 513 DRM_INFO("\n"); 514 515 if (ring->head < ring->size / sizeof(u32)) { 516 struct list_head *ptr; 517 u32 addr = le32_to_cpu(((u32 *) ring->start)[ring->head + 1]); 518 519 list_for_each(ptr, &dev_priv->pending) { 520 drm_mach64_freelist_t *entry = 521 list_entry(ptr, drm_mach64_freelist_t, list); 522 struct drm_buf *buf = entry->buf; 523 524 u32 buf_addr = GETBUFADDR(buf); 525 526 if (buf_addr <= addr && addr < buf_addr + buf->used) 527 mach64_dump_buf_info(dev_priv, buf); 528 } 529 } 530 531 DRM_INFO("\n"); 532 DRM_INFO(" BM_GUI_TABLE = 0x%08x\n", 533 MACH64_READ(MACH64_BM_GUI_TABLE)); 534 DRM_INFO("\n"); 535 DRM_INFO("BM_FRAME_BUF_OFFSET = 0x%08x\n", 536 MACH64_READ(MACH64_BM_FRAME_BUF_OFFSET)); 537 DRM_INFO(" BM_SYSTEM_MEM_ADDR = 0x%08x\n", 538 MACH64_READ(MACH64_BM_SYSTEM_MEM_ADDR)); 539 DRM_INFO(" BM_COMMAND = 0x%08x\n", 540 MACH64_READ(MACH64_BM_COMMAND)); 541 DRM_INFO("\n"); 542 DRM_INFO(" BM_STATUS = 0x%08x\n", 543 MACH64_READ(MACH64_BM_STATUS)); 544 DRM_INFO(" BUS_CNTL = 0x%08x\n", 545 MACH64_READ(MACH64_BUS_CNTL)); 546 DRM_INFO(" FIFO_STAT = 0x%08x\n", 547 MACH64_READ(MACH64_FIFO_STAT)); 548 DRM_INFO(" GUI_STAT = 0x%08x\n", 549 MACH64_READ(MACH64_GUI_STAT)); 550 DRM_INFO(" SRC_CNTL = 0x%08x\n", 551 MACH64_READ(MACH64_SRC_CNTL)); 552} 553 554/*@}*/ 555 556 557/*******************************************************************/ 558/** \name DMA descriptor ring macros */ 559/*@{*/ 560 561/** 562 * Remove the end mark from the ring's old tail position. 563 * 564 * It should be called after calling mach64_set_dma_eol to mark the ring's new 565 * tail position. 566 * 567 * We update the end marks while the bus master engine is in operation. Since 568 * the bus master engine may potentially be reading from the same position 569 * that we write, we must change atomically to avoid having intermediary bad 570 * data. 571 */ 572static __inline__ void mach64_clear_dma_eol(volatile u32 *addr) 573{ 574#if defined(__i386__) 575 int nr = 31; 576 577 /* Taken from include/asm-i386/bitops.h linux header */ 578 __asm__ __volatile__("lock;" "btrl %1,%0":"=m"(*addr) 579 :"Ir"(nr)); 580#elif defined(__powerpc__) 581 u32 old; 582 u32 mask = cpu_to_le32(MACH64_DMA_EOL); 583 584 /* Taken from the include/asm-ppc/bitops.h linux header */ 585 __asm__ __volatile__("\n\ 5861: lwarx %0,0,%3 \n\ 587 andc %0,%0,%2 \n\ 588 stwcx. %0,0,%3 \n\ 589 bne- 1b":"=&r"(old), "=m"(*addr) 590 :"r"(mask), "r"(addr), "m"(*addr) 591 :"cc"); 592#elif defined(__alpha__) 593 u32 temp; 594 u32 mask = ~MACH64_DMA_EOL; 595 596 /* Taken from the include/asm-alpha/bitops.h linux header */ 597 __asm__ __volatile__("1: ldl_l %0,%3\n" 598 " and %0,%2,%0\n" 599 " stl_c %0,%1\n" 600 " beq %0,2f\n" 601 ".subsection 2\n" 602 "2: br 1b\n" 603 ".previous":"=&r"(temp), "=m"(*addr) 604 :"Ir"(mask), "m"(*addr)); 605#else 606 u32 mask = cpu_to_le32(~MACH64_DMA_EOL); 607 608 *addr &= mask; 609#endif 610} 611 612#define RING_LOCALS \ 613 int _ring_tail, _ring_write; unsigned int _ring_mask; volatile u32 *_ring 614 615#define RING_WRITE_OFS _ring_write 616 617#define BEGIN_RING(n) \ 618 do { \ 619 if (MACH64_VERBOSE) { \ 620 DRM_INFO( "BEGIN_RING( %d ) \n", \ 621 (n) ); \ 622 } \ 623 if (dev_priv->ring.space <= (n) * sizeof(u32)) { \ 624 int ret; \ 625 if ((ret = mach64_wait_ring( dev_priv, (n) * sizeof(u32))) < 0 ) { \ 626 DRM_ERROR( "wait_ring failed, resetting engine\n"); \ 627 mach64_dump_engine_info( dev_priv ); \ 628 mach64_do_engine_reset( dev_priv ); \ 629 return ret; \ 630 } \ 631 } \ 632 dev_priv->ring.space -= (n) * sizeof(u32); \ 633 _ring = (u32 *) dev_priv->ring.start; \ 634 _ring_tail = _ring_write = dev_priv->ring.tail; \ 635 _ring_mask = dev_priv->ring.tail_mask; \ 636 } while (0) 637 638#define OUT_RING( x ) \ 639do { \ 640 if (MACH64_VERBOSE) { \ 641 DRM_INFO( " OUT_RING( 0x%08x ) at 0x%x\n", \ 642 (unsigned int)(x), _ring_write ); \ 643 } \ 644 _ring[_ring_write++] = cpu_to_le32( x ); \ 645 _ring_write &= _ring_mask; \ 646} while (0) 647 648#define ADVANCE_RING() \ 649do { \ 650 if (MACH64_VERBOSE) { \ 651 DRM_INFO( "ADVANCE_RING() wr=0x%06x tail=0x%06x\n", \ 652 _ring_write, _ring_tail ); \ 653 } \ 654 DRM_MEMORYBARRIER(); \ 655 mach64_clear_dma_eol( &_ring[(_ring_tail - 2) & _ring_mask] ); \ 656 DRM_MEMORYBARRIER(); \ 657 dev_priv->ring.tail = _ring_write; \ 658 mach64_ring_tick( dev_priv, &(dev_priv)->ring ); \ 659} while (0) 660 661/** 662 * Queue a DMA buffer of registers writes into the ring buffer. 663 */ 664int mach64_add_buf_to_ring(drm_mach64_private_t *dev_priv, 665 drm_mach64_freelist_t *entry) 666{ 667 int bytes, pages, remainder; 668 u32 address, page; 669 int i; 670 struct drm_buf *buf = entry->buf; 671 RING_LOCALS; 672 673 bytes = buf->used; 674 address = GETBUFADDR( buf ); 675 pages = (bytes + MACH64_DMA_CHUNKSIZE - 1) / MACH64_DMA_CHUNKSIZE; 676 677 BEGIN_RING( pages * 4 ); 678 679 for ( i = 0 ; i < pages-1 ; i++ ) { 680 page = address + i * MACH64_DMA_CHUNKSIZE; 681 OUT_RING( MACH64_APERTURE_OFFSET + MACH64_BM_ADDR ); 682 OUT_RING( page ); 683 OUT_RING( MACH64_DMA_CHUNKSIZE | MACH64_DMA_HOLD_OFFSET ); 684 OUT_RING( 0 ); 685 } 686 687 /* generate the final descriptor for any remaining commands in this buffer */ 688 page = address + i * MACH64_DMA_CHUNKSIZE; 689 remainder = bytes - i * MACH64_DMA_CHUNKSIZE; 690 691 /* Save dword offset of last descriptor for this buffer. 692 * This is needed to check for completion of the buffer in freelist_get 693 */ 694 entry->ring_ofs = RING_WRITE_OFS; 695 696 OUT_RING( MACH64_APERTURE_OFFSET + MACH64_BM_ADDR ); 697 OUT_RING( page ); 698 OUT_RING( remainder | MACH64_DMA_HOLD_OFFSET | MACH64_DMA_EOL ); 699 OUT_RING( 0 ); 700 701 ADVANCE_RING(); 702 703 return 0; 704} 705 706/** 707 * Queue DMA buffer controlling host data tranfers (e.g., blit). 708 * 709 * Almost identical to mach64_add_buf_to_ring. 710 */ 711int mach64_add_hostdata_buf_to_ring(drm_mach64_private_t *dev_priv, 712 drm_mach64_freelist_t *entry) 713{ 714 int bytes, pages, remainder; 715 u32 address, page; 716 int i; 717 struct drm_buf *buf = entry->buf; 718 RING_LOCALS; 719 720 bytes = buf->used - MACH64_HOSTDATA_BLIT_OFFSET; 721 pages = (bytes + MACH64_DMA_CHUNKSIZE - 1) / MACH64_DMA_CHUNKSIZE; 722 address = GETBUFADDR( buf ); 723 724 BEGIN_RING( 4 + pages * 4 ); 725 726 OUT_RING( MACH64_APERTURE_OFFSET + MACH64_BM_ADDR ); 727 OUT_RING( address ); 728 OUT_RING( MACH64_HOSTDATA_BLIT_OFFSET | MACH64_DMA_HOLD_OFFSET ); 729 OUT_RING( 0 ); 730 address += MACH64_HOSTDATA_BLIT_OFFSET; 731 732 for ( i = 0 ; i < pages-1 ; i++ ) { 733 page = address + i * MACH64_DMA_CHUNKSIZE; 734 OUT_RING( MACH64_APERTURE_OFFSET + MACH64_BM_HOSTDATA ); 735 OUT_RING( page ); 736 OUT_RING( MACH64_DMA_CHUNKSIZE | MACH64_DMA_HOLD_OFFSET ); 737 OUT_RING( 0 ); 738 } 739 740 /* generate the final descriptor for any remaining commands in this buffer */ 741 page = address + i * MACH64_DMA_CHUNKSIZE; 742 remainder = bytes - i * MACH64_DMA_CHUNKSIZE; 743 744 /* Save dword offset of last descriptor for this buffer. 745 * This is needed to check for completion of the buffer in freelist_get 746 */ 747 entry->ring_ofs = RING_WRITE_OFS; 748 749 OUT_RING( MACH64_APERTURE_OFFSET + MACH64_BM_HOSTDATA ); 750 OUT_RING( page ); 751 OUT_RING( remainder | MACH64_DMA_HOLD_OFFSET | MACH64_DMA_EOL ); 752 OUT_RING( 0 ); 753 754 ADVANCE_RING(); 755 756 return 0; 757} 758 759/*@}*/ 760 761 762/*******************************************************************/ 763/** \name DMA test and initialization */ 764/*@{*/ 765 766/** 767 * Perform a simple DMA operation using the pattern registers to test whether 768 * DMA works. 769 * 770 * \return zero if successful. 771 * 772 * \note This function was the testbed for many experiences regarding Mach64 773 * DMA operation. It is left here since it so tricky to get DMA operating 774 * properly in some architectures and hardware. 775 */ 776static int mach64_bm_dma_test(struct drm_device * dev) 777{ 778 drm_mach64_private_t *dev_priv = dev->dev_private; 779 drm_dma_handle_t *cpu_addr_dmah; 780 u32 data_addr; 781 u32 *table, *data; 782 u32 expected[2]; 783 u32 src_cntl, pat_reg0, pat_reg1; 784 int i, count, failed; 785 786 DRM_DEBUG("\n"); 787 788 table = (u32 *) dev_priv->ring.start; 789 790 /* FIXME: get a dma buffer from the freelist here */ 791 DRM_DEBUG("Allocating data memory ...\n"); 792#ifdef __FreeBSD__ 793 DRM_UNLOCK(); 794#endif 795 cpu_addr_dmah = 796 drm_pci_alloc(dev, 0x1000, 0x1000, 0xfffffffful); 797#ifdef __FreeBSD__ 798 DRM_LOCK(); 799#endif 800 if (!cpu_addr_dmah) { 801 DRM_INFO("data-memory allocation failed!\n"); 802 return -ENOMEM; 803 } else { 804 data = (u32 *) cpu_addr_dmah->vaddr; 805 data_addr = (u32) cpu_addr_dmah->busaddr; 806 } 807 808 /* Save the X server's value for SRC_CNTL and restore it 809 * in case our test fails. This prevents the X server 810 * from disabling it's cache for this register 811 */ 812 src_cntl = MACH64_READ(MACH64_SRC_CNTL); 813 pat_reg0 = MACH64_READ(MACH64_PAT_REG0); 814 pat_reg1 = MACH64_READ(MACH64_PAT_REG1); 815 816 mach64_do_wait_for_fifo(dev_priv, 3); 817 818 MACH64_WRITE(MACH64_SRC_CNTL, 0); 819 MACH64_WRITE(MACH64_PAT_REG0, 0x11111111); 820 MACH64_WRITE(MACH64_PAT_REG1, 0x11111111); 821 822 mach64_do_wait_for_idle(dev_priv); 823 824 for (i = 0; i < 2; i++) { 825 u32 reg; 826 reg = MACH64_READ((MACH64_PAT_REG0 + i * 4)); 827 DRM_DEBUG("(Before DMA Transfer) reg %d = 0x%08x\n", i, reg); 828 if (reg != 0x11111111) { 829 DRM_INFO("Error initializing test registers\n"); 830 DRM_INFO("resetting engine ...\n"); 831 mach64_do_engine_reset(dev_priv); 832 DRM_INFO("freeing data buffer memory.\n"); 833 drm_pci_free(dev, cpu_addr_dmah); 834 return -EIO; 835 } 836 } 837 838 /* fill up a buffer with sets of 2 consecutive writes starting with PAT_REG0 */ 839 count = 0; 840 841 data[count++] = cpu_to_le32(DMAREG(MACH64_PAT_REG0) | (1 << 16)); 842 data[count++] = expected[0] = 0x22222222; 843 data[count++] = expected[1] = 0xaaaaaaaa; 844 845 while (count < 1020) { 846 data[count++] = 847 cpu_to_le32(DMAREG(MACH64_PAT_REG0) | (1 << 16)); 848 data[count++] = 0x22222222; 849 data[count++] = 0xaaaaaaaa; 850 } 851 data[count++] = cpu_to_le32(DMAREG(MACH64_SRC_CNTL) | (0 << 16)); 852 data[count++] = 0; 853 854 DRM_DEBUG("Preparing table ...\n"); 855 table[MACH64_DMA_FRAME_BUF_OFFSET] = cpu_to_le32(MACH64_BM_ADDR + 856 MACH64_APERTURE_OFFSET); 857 table[MACH64_DMA_SYS_MEM_ADDR] = cpu_to_le32(data_addr); 858 table[MACH64_DMA_COMMAND] = cpu_to_le32(count * sizeof(u32) 859 | MACH64_DMA_HOLD_OFFSET 860 | MACH64_DMA_EOL); 861 table[MACH64_DMA_RESERVED] = 0; 862 863 DRM_DEBUG("table[0] = 0x%08x\n", table[0]); 864 DRM_DEBUG("table[1] = 0x%08x\n", table[1]); 865 DRM_DEBUG("table[2] = 0x%08x\n", table[2]); 866 DRM_DEBUG("table[3] = 0x%08x\n", table[3]); 867 868 for (i = 0; i < 6; i++) { 869 DRM_DEBUG(" data[%d] = 0x%08x\n", i, data[i]); 870 } 871 DRM_DEBUG(" ...\n"); 872 for (i = count - 5; i < count; i++) { 873 DRM_DEBUG(" data[%d] = 0x%08x\n", i, data[i]); 874 } 875 876 DRM_MEMORYBARRIER(); 877 878 DRM_DEBUG("waiting for idle...\n"); 879 if ((i = mach64_do_wait_for_idle(dev_priv))) { 880 DRM_INFO("mach64_do_wait_for_idle failed (result=%d)\n", i); 881 DRM_INFO("resetting engine ...\n"); 882 mach64_do_engine_reset(dev_priv); 883 mach64_do_wait_for_fifo(dev_priv, 3); 884 MACH64_WRITE(MACH64_SRC_CNTL, src_cntl); 885 MACH64_WRITE(MACH64_PAT_REG0, pat_reg0); 886 MACH64_WRITE(MACH64_PAT_REG1, pat_reg1); 887 DRM_INFO("freeing data buffer memory.\n"); 888 drm_pci_free(dev, cpu_addr_dmah); 889 return i; 890 } 891 DRM_DEBUG("waiting for idle...done\n"); 892 893 DRM_DEBUG("BUS_CNTL = 0x%08x\n", MACH64_READ(MACH64_BUS_CNTL)); 894 DRM_DEBUG("SRC_CNTL = 0x%08x\n", MACH64_READ(MACH64_SRC_CNTL)); 895 DRM_DEBUG("\n"); 896 DRM_DEBUG("data bus addr = 0x%08x\n", data_addr); 897 DRM_DEBUG("table bus addr = 0x%08x\n", dev_priv->ring.start_addr); 898 899 DRM_DEBUG("starting DMA transfer...\n"); 900 MACH64_WRITE(MACH64_BM_GUI_TABLE_CMD, 901 dev_priv->ring.start_addr | MACH64_CIRCULAR_BUF_SIZE_16KB); 902 903 MACH64_WRITE(MACH64_SRC_CNTL, 904 MACH64_SRC_BM_ENABLE | MACH64_SRC_BM_SYNC | 905 MACH64_SRC_BM_OP_SYSTEM_TO_REG); 906 907 /* Kick off the transfer */ 908 DRM_DEBUG("starting DMA transfer... done.\n"); 909 MACH64_WRITE(MACH64_DST_HEIGHT_WIDTH, 0); 910 911 DRM_DEBUG("waiting for idle...\n"); 912 913 if ((i = mach64_do_wait_for_idle(dev_priv))) { 914 /* engine locked up, dump register state and reset */ 915 DRM_INFO("mach64_do_wait_for_idle failed (result=%d)\n", i); 916 mach64_dump_engine_info(dev_priv); 917 DRM_INFO("resetting engine ...\n"); 918 mach64_do_engine_reset(dev_priv); 919 mach64_do_wait_for_fifo(dev_priv, 3); 920 MACH64_WRITE(MACH64_SRC_CNTL, src_cntl); 921 MACH64_WRITE(MACH64_PAT_REG0, pat_reg0); 922 MACH64_WRITE(MACH64_PAT_REG1, pat_reg1); 923 DRM_INFO("freeing data buffer memory.\n"); 924 drm_pci_free(dev, cpu_addr_dmah); 925 return i; 926 } 927 928 DRM_DEBUG("waiting for idle...done\n"); 929 930 /* restore SRC_CNTL */ 931 mach64_do_wait_for_fifo(dev_priv, 1); 932 MACH64_WRITE(MACH64_SRC_CNTL, src_cntl); 933 934 failed = 0; 935 936 /* Check register values to see if the GUI master operation succeeded */ 937 for (i = 0; i < 2; i++) { 938 u32 reg; 939 reg = MACH64_READ((MACH64_PAT_REG0 + i * 4)); 940 DRM_DEBUG("(After DMA Transfer) reg %d = 0x%08x\n", i, reg); 941 if (reg != expected[i]) { 942 failed = -1; 943 } 944 } 945 946 /* restore pattern registers */ 947 mach64_do_wait_for_fifo(dev_priv, 2); 948 MACH64_WRITE(MACH64_PAT_REG0, pat_reg0); 949 MACH64_WRITE(MACH64_PAT_REG1, pat_reg1); 950 951 DRM_DEBUG("freeing data buffer memory.\n"); 952 drm_pci_free(dev, cpu_addr_dmah); 953 DRM_DEBUG("returning ...\n"); 954 955 return failed; 956} 957 958/** 959 * Called during the DMA initialization ioctl to initialize all the necessary 960 * software and hardware state for DMA operation. 961 */ 962static int mach64_do_dma_init(struct drm_device * dev, drm_mach64_init_t * init) 963{ 964 drm_mach64_private_t *dev_priv; 965 u32 tmp; 966 int i, ret; 967 968 DRM_DEBUG("\n"); 969 970 dev_priv = drm_alloc(sizeof(drm_mach64_private_t), DRM_MEM_DRIVER); 971 if (dev_priv == NULL) 972 return -ENOMEM; 973 974 memset(dev_priv, 0, sizeof(drm_mach64_private_t)); 975 976 dev_priv->is_pci = init->is_pci; 977 978 dev_priv->fb_bpp = init->fb_bpp; 979 dev_priv->front_offset = init->front_offset; 980 dev_priv->front_pitch = init->front_pitch; 981 dev_priv->back_offset = init->back_offset; 982 dev_priv->back_pitch = init->back_pitch; 983 984 dev_priv->depth_bpp = init->depth_bpp; 985 dev_priv->depth_offset = init->depth_offset; 986 dev_priv->depth_pitch = init->depth_pitch; 987 988 dev_priv->front_offset_pitch = (((dev_priv->front_pitch / 8) << 22) | 989 (dev_priv->front_offset >> 3)); 990 dev_priv->back_offset_pitch = (((dev_priv->back_pitch / 8) << 22) | 991 (dev_priv->back_offset >> 3)); 992 dev_priv->depth_offset_pitch = (((dev_priv->depth_pitch / 8) << 22) | 993 (dev_priv->depth_offset >> 3)); 994 995 dev_priv->usec_timeout = 1000000; 996 997 /* Set up the freelist, placeholder list and pending list */ 998 INIT_LIST_HEAD(&dev_priv->free_list); 999 INIT_LIST_HEAD(&dev_priv->placeholders); 1000 INIT_LIST_HEAD(&dev_priv->pending); 1001 1002 dev_priv->sarea = drm_getsarea(dev); 1003 if (!dev_priv->sarea) { 1004 DRM_ERROR("can not find sarea!\n"); 1005 dev->dev_private = (void *)dev_priv; 1006 mach64_do_cleanup_dma(dev); 1007 return -EINVAL; 1008 } 1009 dev_priv->fb = drm_core_findmap(dev, init->fb_offset); 1010 if (!dev_priv->fb) { 1011 DRM_ERROR("can not find frame buffer map!\n"); 1012 dev->dev_private = (void *)dev_priv; 1013 mach64_do_cleanup_dma(dev); 1014 return -EINVAL; 1015 } 1016 dev_priv->mmio = drm_core_findmap(dev, init->mmio_offset); 1017 if (!dev_priv->mmio) { 1018 DRM_ERROR("can not find mmio map!\n"); 1019 dev->dev_private = (void *)dev_priv; 1020 mach64_do_cleanup_dma(dev); 1021 return -EINVAL; 1022 } 1023 1024 dev_priv->ring_map = drm_core_findmap(dev, init->ring_offset); 1025 if (!dev_priv->ring_map) { 1026 DRM_ERROR("can not find ring map!\n"); 1027 dev->dev_private = (void *)dev_priv; 1028 mach64_do_cleanup_dma(dev); 1029 return -EINVAL; 1030 } 1031 1032 dev_priv->sarea_priv = (drm_mach64_sarea_t *) 1033 ((u8 *) dev_priv->sarea->virtual + init->sarea_priv_offset); 1034 1035 if (!dev_priv->is_pci) { 1036 drm_core_ioremap(dev_priv->ring_map, dev); 1037 if (!dev_priv->ring_map->virtual) { 1038 DRM_ERROR("can not ioremap virtual address for" 1039 " descriptor ring\n"); 1040 dev->dev_private = (void *)dev_priv; 1041 mach64_do_cleanup_dma(dev); 1042 return -ENOMEM; 1043 } 1044 dev->agp_buffer_token = init->buffers_offset; 1045 dev->agp_buffer_map = 1046 drm_core_findmap(dev, init->buffers_offset); 1047 if (!dev->agp_buffer_map) { 1048 DRM_ERROR("can not find dma buffer map!\n"); 1049 dev->dev_private = (void *)dev_priv; 1050 mach64_do_cleanup_dma(dev); 1051 return -EINVAL; 1052 } 1053 /* there might be a nicer way to do this - 1054 dev isn't passed all the way though the mach64 - DA */ 1055 dev_priv->dev_buffers = dev->agp_buffer_map; 1056 1057 drm_core_ioremap(dev->agp_buffer_map, dev); 1058 if (!dev->agp_buffer_map->virtual) { 1059 DRM_ERROR("can not ioremap virtual address for" 1060 " dma buffer\n"); 1061 dev->dev_private = (void *)dev_priv; 1062 mach64_do_cleanup_dma(dev); 1063 return -ENOMEM; 1064 } 1065 dev_priv->agp_textures = 1066 drm_core_findmap(dev, init->agp_textures_offset); 1067 if (!dev_priv->agp_textures) { 1068 DRM_ERROR("can not find agp texture region!\n"); 1069 dev->dev_private = (void *)dev_priv; 1070 mach64_do_cleanup_dma(dev); 1071 return -EINVAL; 1072 } 1073 } 1074 1075 dev->dev_private = (void *)dev_priv; 1076 1077 dev_priv->driver_mode = init->dma_mode; 1078 1079 /* changing the FIFO size from the default causes problems with DMA */ 1080 tmp = MACH64_READ(MACH64_GUI_CNTL); 1081 if ((tmp & MACH64_CMDFIFO_SIZE_MASK) != MACH64_CMDFIFO_SIZE_128) { 1082 DRM_INFO("Setting FIFO size to 128 entries\n"); 1083 /* FIFO must be empty to change the FIFO depth */ 1084 if ((ret = mach64_do_wait_for_idle(dev_priv))) { 1085 DRM_ERROR 1086 ("wait for idle failed before changing FIFO depth!\n"); 1087 mach64_do_cleanup_dma(dev); 1088 return ret; 1089 } 1090 MACH64_WRITE(MACH64_GUI_CNTL, ((tmp & ~MACH64_CMDFIFO_SIZE_MASK) 1091 | MACH64_CMDFIFO_SIZE_128)); 1092 /* need to read GUI_STAT for proper sync according to docs */ 1093 if ((ret = mach64_do_wait_for_idle(dev_priv))) { 1094 DRM_ERROR 1095 ("wait for idle failed when changing FIFO depth!\n"); 1096 mach64_do_cleanup_dma(dev); 1097 return ret; 1098 } 1099 } 1100 1101 dev_priv->ring.size = 0x4000; /* 16KB */ 1102 dev_priv->ring.start = dev_priv->ring_map->virtual; 1103 dev_priv->ring.start_addr = (u32) dev_priv->ring_map->offset; 1104 1105 memset(dev_priv->ring.start, 0, dev_priv->ring.size); 1106 DRM_INFO("descriptor ring: cpu addr %p, bus addr: 0x%08x\n", 1107 dev_priv->ring.start, dev_priv->ring.start_addr); 1108 1109 ret = 0; 1110 if (dev_priv->driver_mode != MACH64_MODE_MMIO) { 1111 1112 /* enable block 1 registers and bus mastering */ 1113 MACH64_WRITE(MACH64_BUS_CNTL, ((MACH64_READ(MACH64_BUS_CNTL) 1114 | MACH64_BUS_EXT_REG_EN) 1115 & ~MACH64_BUS_MASTER_DIS)); 1116 1117 /* try a DMA GUI-mastering pass and fall back to MMIO if it fails */ 1118 DRM_DEBUG("Starting DMA test...\n"); 1119 if ((ret = mach64_bm_dma_test(dev))) { 1120 dev_priv->driver_mode = MACH64_MODE_MMIO; 1121 } 1122 } 1123 1124 switch (dev_priv->driver_mode) { 1125 case MACH64_MODE_MMIO: 1126 MACH64_WRITE(MACH64_BUS_CNTL, (MACH64_READ(MACH64_BUS_CNTL) 1127 | MACH64_BUS_EXT_REG_EN 1128 | MACH64_BUS_MASTER_DIS)); 1129 if (init->dma_mode == MACH64_MODE_MMIO) 1130 DRM_INFO("Forcing pseudo-DMA mode\n"); 1131 else 1132 DRM_INFO 1133 ("DMA test failed (ret=%d), using pseudo-DMA mode\n", 1134 ret); 1135 break; 1136 case MACH64_MODE_DMA_SYNC: 1137 DRM_INFO("DMA test succeeded, using synchronous DMA mode\n"); 1138 break; 1139 case MACH64_MODE_DMA_ASYNC: 1140 default: 1141 DRM_INFO("DMA test succeeded, using asynchronous DMA mode\n"); 1142 } 1143 1144 dev_priv->ring_running = 0; 1145 1146 /* setup offsets for physical address of table start and end */ 1147 dev_priv->ring.head_addr = dev_priv->ring.start_addr; 1148 dev_priv->ring.head = dev_priv->ring.tail = 0; 1149 dev_priv->ring.tail_mask = (dev_priv->ring.size / sizeof(u32)) - 1; 1150 dev_priv->ring.space = dev_priv->ring.size; 1151 1152 /* setup physical address and size of descriptor table */ 1153 mach64_do_wait_for_fifo(dev_priv, 1); 1154 MACH64_WRITE(MACH64_BM_GUI_TABLE_CMD, 1155 (dev_priv->ring. 1156 head_addr | MACH64_CIRCULAR_BUF_SIZE_16KB)); 1157 1158 /* init frame counter */ 1159 dev_priv->sarea_priv->frames_queued = 0; 1160 for (i = 0; i < MACH64_MAX_QUEUED_FRAMES; i++) { 1161 dev_priv->frame_ofs[i] = ~0; /* All ones indicates placeholder */ 1162 } 1163 1164 /* Allocate the DMA buffer freelist */ 1165 if ((ret = mach64_init_freelist(dev))) { 1166 DRM_ERROR("Freelist allocation failed\n"); 1167 mach64_do_cleanup_dma(dev); 1168 return ret; 1169 } 1170 1171 return 0; 1172} 1173 1174/*******************************************************************/ 1175/** MMIO Pseudo-DMA (intended primarily for debugging, not performance) 1176 */ 1177 1178int mach64_do_dispatch_pseudo_dma(drm_mach64_private_t *dev_priv) 1179{ 1180 drm_mach64_descriptor_ring_t *ring = &dev_priv->ring; 1181 volatile u32 *ring_read; 1182 struct list_head *ptr; 1183 drm_mach64_freelist_t *entry; 1184 struct drm_buf *buf = NULL; 1185 u32 *buf_ptr; 1186 u32 used, reg, target; 1187 int fifo, count, found, ret, no_idle_wait; 1188 1189 fifo = count = reg = no_idle_wait = 0; 1190 target = MACH64_BM_ADDR; 1191 1192 if ((ret = mach64_do_wait_for_idle(dev_priv)) < 0) { 1193 DRM_INFO("idle failed before pseudo-dma dispatch, resetting engine\n"); 1194 mach64_dump_engine_info(dev_priv); 1195 mach64_do_engine_reset(dev_priv); 1196 return ret; 1197 } 1198 1199 ring_read = (u32 *) ring->start; 1200 1201 while (ring->tail != ring->head) { 1202 u32 buf_addr, new_target, offset; 1203 u32 bytes, remaining, head, eol; 1204 1205 head = ring->head; 1206 1207 new_target = 1208 le32_to_cpu(ring_read[head++]) - MACH64_APERTURE_OFFSET; 1209 buf_addr = le32_to_cpu(ring_read[head++]); 1210 eol = le32_to_cpu(ring_read[head]) & MACH64_DMA_EOL; 1211 bytes = le32_to_cpu(ring_read[head++]) 1212 & ~(MACH64_DMA_HOLD_OFFSET | MACH64_DMA_EOL); 1213 head++; 1214 head &= ring->tail_mask; 1215 1216 /* can't wait for idle between a blit setup descriptor 1217 * and a HOSTDATA descriptor or the engine will lock 1218 */ 1219 if (new_target == MACH64_BM_HOSTDATA 1220 && target == MACH64_BM_ADDR) 1221 no_idle_wait = 1; 1222 1223 target = new_target; 1224 1225 found = 0; 1226 offset = 0; 1227 list_for_each(ptr, &dev_priv->pending) { 1228 entry = list_entry(ptr, drm_mach64_freelist_t, list); 1229 buf = entry->buf; 1230 offset = buf_addr - GETBUFADDR(buf); 1231 if (offset < MACH64_BUFFER_SIZE) { 1232 found = 1; 1233 break; 1234 } 1235 } 1236 1237 if (!found || buf == NULL) { 1238 DRM_ERROR 1239 ("Couldn't find pending buffer: head: %u tail: %u buf_addr: 0x%08x %s\n", 1240 head, ring->tail, buf_addr, (eol ? "eol" : "")); 1241 mach64_dump_ring_info(dev_priv); 1242 mach64_do_engine_reset(dev_priv); 1243 return -EINVAL; 1244 } 1245 1246 /* Hand feed the buffer to the card via MMIO, waiting for the fifo 1247 * every 16 writes 1248 */ 1249 DRM_DEBUG("target: (0x%08x) %s\n", target, 1250 (target == 1251 MACH64_BM_HOSTDATA ? "BM_HOSTDATA" : "BM_ADDR")); 1252 DRM_DEBUG("offset: %u bytes: %u used: %u\n", offset, bytes, 1253 buf->used); 1254 1255 remaining = (buf->used - offset) >> 2; /* dwords remaining in buffer */ 1256 used = bytes >> 2; /* dwords in buffer for this descriptor */ 1257 buf_ptr = (u32 *) ((char *)GETBUFPTR(buf) + offset); 1258 1259 while (used) { 1260 1261 if (count == 0) { 1262 if (target == MACH64_BM_HOSTDATA) { 1263 reg = DMAREG(MACH64_HOST_DATA0); 1264 count = 1265 (remaining > 16) ? 16 : remaining; 1266 fifo = 0; 1267 } else { 1268 reg = le32_to_cpu(*buf_ptr++); 1269 used--; 1270 count = (reg >> 16) + 1; 1271 } 1272 1273 reg = reg & 0xffff; 1274 reg = MMSELECT(reg); 1275 } 1276 while (count && used) { 1277 if (!fifo) { 1278 if (no_idle_wait) { 1279 if ((ret = 1280 mach64_do_wait_for_fifo 1281 (dev_priv, 16)) < 0) { 1282 no_idle_wait = 0; 1283 return ret; 1284 } 1285 } else { 1286 if ((ret = 1287 mach64_do_wait_for_idle 1288 (dev_priv)) < 0) { 1289 return ret; 1290 } 1291 } 1292 fifo = 16; 1293 } 1294 --fifo; 1295 MACH64_WRITE(reg, le32_to_cpu(*buf_ptr++)); 1296 used--; 1297 remaining--; 1298 1299 reg += 4; 1300 count--; 1301 } 1302 } 1303 ring->head = head; 1304 ring->head_addr = ring->start_addr + (ring->head * sizeof(u32)); 1305 ring->space += (4 * sizeof(u32)); 1306 } 1307 1308 if ((ret = mach64_do_wait_for_idle(dev_priv)) < 0) { 1309 return ret; 1310 } 1311 MACH64_WRITE(MACH64_BM_GUI_TABLE_CMD, 1312 ring->head_addr | MACH64_CIRCULAR_BUF_SIZE_16KB); 1313 1314 DRM_DEBUG("completed\n"); 1315 return 0; 1316} 1317 1318/*@}*/ 1319 1320 1321/*******************************************************************/ 1322/** \name DMA cleanup */ 1323/*@{*/ 1324 1325int mach64_do_cleanup_dma(struct drm_device * dev) 1326{ 1327 DRM_DEBUG("\n"); 1328 1329 /* Make sure interrupts are disabled here because the uninstall ioctl 1330 * may not have been called from userspace and after dev_private 1331 * is freed, it's too late. 1332 */ 1333 if (dev->irq) 1334 drm_irq_uninstall(dev); 1335 1336 if (dev->dev_private) { 1337 drm_mach64_private_t *dev_priv = dev->dev_private; 1338 1339 if (!dev_priv->is_pci) { 1340 if (dev_priv->ring_map) 1341 drm_core_ioremapfree(dev_priv->ring_map, dev); 1342 1343 if (dev->agp_buffer_map) { 1344 drm_core_ioremapfree(dev->agp_buffer_map, dev); 1345 dev->agp_buffer_map = NULL; 1346 } 1347 } 1348 1349 mach64_destroy_freelist(dev); 1350 1351 drm_free(dev_priv, sizeof(drm_mach64_private_t), 1352 DRM_MEM_DRIVER); 1353 dev->dev_private = NULL; 1354 } 1355 1356 return 0; 1357} 1358 1359/*@}*/ 1360 1361 1362/*******************************************************************/ 1363/** \name IOCTL handlers */ 1364/*@{*/ 1365 1366int mach64_dma_init(struct drm_device *dev, void *data, 1367 struct drm_file *file_priv) 1368{ 1369 drm_mach64_init_t *init = data; 1370 1371 DRM_DEBUG("\n"); 1372 1373 LOCK_TEST_WITH_RETURN(dev, file_priv); 1374 1375 switch (init->func) { 1376 case DRM_MACH64_INIT_DMA: 1377 return mach64_do_dma_init(dev, init); 1378 case DRM_MACH64_CLEANUP_DMA: 1379 return mach64_do_cleanup_dma(dev); 1380 } 1381 1382 return -EINVAL; 1383} 1384 1385int mach64_dma_idle(struct drm_device *dev, void *data, 1386 struct drm_file *file_priv) 1387{ 1388 drm_mach64_private_t *dev_priv = dev->dev_private; 1389 1390 DRM_DEBUG("\n"); 1391 1392 LOCK_TEST_WITH_RETURN(dev, file_priv); 1393 1394 return mach64_do_dma_idle(dev_priv); 1395} 1396 1397int mach64_dma_flush(struct drm_device *dev, void *data, 1398 struct drm_file *file_priv) 1399{ 1400 drm_mach64_private_t *dev_priv = dev->dev_private; 1401 1402 DRM_DEBUG("\n"); 1403 1404 LOCK_TEST_WITH_RETURN(dev, file_priv); 1405 1406 return mach64_do_dma_flush(dev_priv); 1407} 1408 1409int mach64_engine_reset(struct drm_device *dev, void *data, 1410 struct drm_file *file_priv) 1411{ 1412 drm_mach64_private_t *dev_priv = dev->dev_private; 1413 1414 DRM_DEBUG("\n"); 1415 1416 LOCK_TEST_WITH_RETURN(dev, file_priv); 1417 1418 return mach64_do_engine_reset(dev_priv); 1419} 1420 1421/*@}*/ 1422 1423 1424/*******************************************************************/ 1425/** \name Freelist management */ 1426/*@{*/ 1427 1428int mach64_init_freelist(struct drm_device * dev) 1429{ 1430 struct drm_device_dma *dma = dev->dma; 1431 drm_mach64_private_t *dev_priv = dev->dev_private; 1432 drm_mach64_freelist_t *entry; 1433 struct list_head *ptr; 1434 int i; 1435 1436 DRM_DEBUG("adding %d buffers to freelist\n", dma->buf_count); 1437 1438 for (i = 0; i < dma->buf_count; i++) { 1439 if ((entry = 1440 (drm_mach64_freelist_t *) 1441 drm_alloc(sizeof(drm_mach64_freelist_t), 1442 DRM_MEM_BUFLISTS)) == NULL) 1443 return -ENOMEM; 1444 memset(entry, 0, sizeof(drm_mach64_freelist_t)); 1445 entry->buf = dma->buflist[i]; 1446 ptr = &entry->list; 1447 list_add_tail(ptr, &dev_priv->free_list); 1448 } 1449 1450 return 0; 1451} 1452 1453void mach64_destroy_freelist(struct drm_device * dev) 1454{ 1455 drm_mach64_private_t *dev_priv = dev->dev_private; 1456 drm_mach64_freelist_t *entry; 1457 struct list_head *ptr; 1458 struct list_head *tmp; 1459 1460 DRM_DEBUG("\n"); 1461 1462 list_for_each_safe(ptr, tmp, &dev_priv->pending) { 1463 list_del(ptr); 1464 entry = list_entry(ptr, drm_mach64_freelist_t, list); 1465 drm_free(entry, sizeof(*entry), DRM_MEM_BUFLISTS); 1466 } 1467 list_for_each_safe(ptr, tmp, &dev_priv->placeholders) { 1468 list_del(ptr); 1469 entry = list_entry(ptr, drm_mach64_freelist_t, list); 1470 drm_free(entry, sizeof(*entry), DRM_MEM_BUFLISTS); 1471 } 1472 1473 list_for_each_safe(ptr, tmp, &dev_priv->free_list) { 1474 list_del(ptr); 1475 entry = list_entry(ptr, drm_mach64_freelist_t, list); 1476 drm_free(entry, sizeof(*entry), DRM_MEM_BUFLISTS); 1477 } 1478} 1479 1480/* IMPORTANT: This function should only be called when the engine is idle or locked up, 1481 * as it assumes all buffers in the pending list have been completed by the hardware. 1482 */ 1483int mach64_do_release_used_buffers(drm_mach64_private_t *dev_priv) 1484{ 1485 struct list_head *ptr; 1486 struct list_head *tmp; 1487 drm_mach64_freelist_t *entry; 1488 int i; 1489 1490 if (list_empty(&dev_priv->pending)) 1491 return 0; 1492 1493 /* Iterate the pending list and move all buffers into the freelist... */ 1494 i = 0; 1495 list_for_each_safe(ptr, tmp, &dev_priv->pending) { 1496 entry = list_entry(ptr, drm_mach64_freelist_t, list); 1497 if (entry->discard) { 1498 entry->buf->pending = 0; 1499 list_del(ptr); 1500 list_add_tail(ptr, &dev_priv->free_list); 1501 i++; 1502 } 1503 } 1504 1505 DRM_DEBUG("released %d buffers from pending list\n", i); 1506 1507 return 0; 1508} 1509 1510static int mach64_do_reclaim_completed(drm_mach64_private_t *dev_priv) 1511{ 1512 drm_mach64_descriptor_ring_t *ring = &dev_priv->ring; 1513 struct list_head *ptr; 1514 struct list_head *tmp; 1515 drm_mach64_freelist_t *entry; 1516 u32 head, tail, ofs; 1517 1518 mach64_ring_tick(dev_priv, ring); 1519 head = ring->head; 1520 tail = ring->tail; 1521 1522 if (head == tail) { 1523#if MACH64_EXTRA_CHECKING 1524 if (MACH64_READ(MACH64_GUI_STAT) & MACH64_GUI_ACTIVE) { 1525 DRM_ERROR("Empty ring with non-idle engine!\n"); 1526 mach64_dump_ring_info(dev_priv); 1527 return -1; 1528 } 1529#endif 1530 /* last pass is complete, so release everything */ 1531 mach64_do_release_used_buffers(dev_priv); 1532 DRM_DEBUG("idle engine, freed all buffers.\n"); 1533 if (list_empty(&dev_priv->free_list)) { 1534 DRM_ERROR("Freelist empty with idle engine\n"); 1535 return -1; 1536 } 1537 return 0; 1538 } 1539 /* Look for a completed buffer and bail out of the loop 1540 * as soon as we find one -- don't waste time trying 1541 * to free extra bufs here, leave that to do_release_used_buffers 1542 */ 1543 list_for_each_safe(ptr, tmp, &dev_priv->pending) { 1544 entry = list_entry(ptr, drm_mach64_freelist_t, list); 1545 ofs = entry->ring_ofs; 1546 if (entry->discard && 1547 ((head < tail && (ofs < head || ofs >= tail)) || 1548 (head > tail && (ofs < head && ofs >= tail)))) { 1549#if MACH64_EXTRA_CHECKING 1550 int i; 1551 1552 for (i = head; i != tail; i = (i + 4) & ring->tail_mask) 1553 { 1554 u32 o1 = le32_to_cpu(((u32 *) ring-> 1555 start)[i + 1]); 1556 u32 o2 = GETBUFADDR(entry->buf); 1557 1558 if (o1 == o2) { 1559 DRM_ERROR 1560 ("Attempting to free used buffer: " 1561 "i=%d buf=0x%08x\n", 1562 i, o1); 1563 mach64_dump_ring_info(dev_priv); 1564 return -1; 1565 } 1566 } 1567#endif 1568 /* found a processed buffer */ 1569 entry->buf->pending = 0; 1570 list_del(ptr); 1571 list_add_tail(ptr, &dev_priv->free_list); 1572 DRM_DEBUG 1573 ("freed processed buffer (head=%d tail=%d " 1574 "buf ring ofs=%d).\n", 1575 head, tail, ofs); 1576 return 0; 1577 } 1578 } 1579 1580 return 1; 1581} 1582 1583struct drm_buf *mach64_freelist_get(drm_mach64_private_t *dev_priv) 1584{ 1585 drm_mach64_descriptor_ring_t *ring = &dev_priv->ring; 1586 drm_mach64_freelist_t *entry; 1587 struct list_head *ptr; 1588 int t; 1589 1590 if (list_empty(&dev_priv->free_list)) { 1591 if (list_empty(&dev_priv->pending)) { 1592 DRM_ERROR 1593 ("Couldn't get buffer - pending and free lists empty\n"); 1594 t = 0; 1595 list_for_each(ptr, &dev_priv->placeholders) { 1596 t++; 1597 } 1598 DRM_INFO("Placeholders: %d\n", t); 1599 return NULL; 1600 } 1601 1602 for (t = 0; t < dev_priv->usec_timeout; t++) { 1603 int ret; 1604 1605 ret = mach64_do_reclaim_completed(dev_priv); 1606 if (ret == 0) 1607 goto _freelist_entry_found; 1608 if (ret < 0) 1609 return NULL; 1610 1611 DRM_UDELAY(1); 1612 } 1613 mach64_dump_ring_info(dev_priv); 1614 DRM_ERROR 1615 ("timeout waiting for buffers: ring head_addr: 0x%08x head: %d tail: %d\n", 1616 ring->head_addr, ring->head, ring->tail); 1617 return NULL; 1618 } 1619 1620 _freelist_entry_found: 1621 ptr = dev_priv->free_list.next; 1622 list_del(ptr); 1623 entry = list_entry(ptr, drm_mach64_freelist_t, list); 1624 entry->buf->used = 0; 1625 list_add_tail(ptr, &dev_priv->placeholders); 1626 return entry->buf; 1627} 1628 1629int mach64_freelist_put(drm_mach64_private_t *dev_priv, struct drm_buf *copy_buf) 1630{ 1631 struct list_head *ptr; 1632 drm_mach64_freelist_t *entry; 1633 1634#if MACH64_EXTRA_CHECKING 1635 list_for_each(ptr, &dev_priv->pending) { 1636 entry = list_entry(ptr, drm_mach64_freelist_t, list); 1637 if (copy_buf == entry->buf) { 1638 DRM_ERROR("Trying to release a pending buf\n"); 1639 return -EFAULT; 1640 } 1641 } 1642#endif 1643 ptr = dev_priv->placeholders.next; 1644 entry = list_entry(ptr, drm_mach64_freelist_t, list); 1645 copy_buf->pending = 0; 1646 copy_buf->used = 0; 1647 entry->buf = copy_buf; 1648 entry->discard = 1; 1649 list_del(ptr); 1650 list_add_tail(ptr, &dev_priv->free_list); 1651 1652 return 0; 1653} 1654 1655/*@}*/ 1656 1657 1658/*******************************************************************/ 1659/** \name DMA buffer request and submission IOCTL handler */ 1660/*@{*/ 1661 1662static int mach64_dma_get_buffers(struct drm_device *dev, 1663 struct drm_file *file_priv, 1664 struct drm_dma * d) 1665{ 1666 int i; 1667 struct drm_buf *buf; 1668 drm_mach64_private_t *dev_priv = dev->dev_private; 1669 1670 for (i = d->granted_count; i < d->request_count; i++) { 1671 buf = mach64_freelist_get(dev_priv); 1672#if MACH64_EXTRA_CHECKING 1673 if (!buf) 1674 return -EFAULT; 1675#else 1676 if (!buf) 1677 return -EAGAIN; 1678#endif 1679 1680 buf->file_priv = file_priv; 1681 1682 if (DRM_COPY_TO_USER(&d->request_indices[i], &buf->idx, 1683 sizeof(buf->idx))) 1684 return -EFAULT; 1685 if (DRM_COPY_TO_USER(&d->request_sizes[i], &buf->total, 1686 sizeof(buf->total))) 1687 return -EFAULT; 1688 1689 d->granted_count++; 1690 } 1691 return 0; 1692} 1693 1694int mach64_dma_buffers(struct drm_device *dev, void *data, 1695 struct drm_file *file_priv) 1696{ 1697 struct drm_device_dma *dma = dev->dma; 1698 struct drm_dma *d = data; 1699 int ret = 0; 1700 1701 LOCK_TEST_WITH_RETURN(dev, file_priv); 1702 1703 /* Please don't send us buffers. 1704 */ 1705 if (d->send_count != 0) { 1706 DRM_ERROR("Process %d trying to send %d buffers via drmDMA\n", 1707 DRM_CURRENTPID, d->send_count); 1708 return -EINVAL; 1709 } 1710 1711 /* We'll send you buffers. 1712 */ 1713 if (d->request_count < 0 || d->request_count > dma->buf_count) { 1714 DRM_ERROR("Process %d trying to get %d buffers (of %d max)\n", 1715 DRM_CURRENTPID, d->request_count, dma->buf_count); 1716 ret = -EINVAL; 1717 } 1718 1719 d->granted_count = 0; 1720 1721 if (d->request_count) { 1722 ret = mach64_dma_get_buffers(dev, file_priv, d); 1723 } 1724 1725 return ret; 1726} 1727 1728void mach64_driver_lastclose(struct drm_device * dev) 1729{ 1730 mach64_do_cleanup_dma(dev); 1731} 1732 1733/*@}*/ 1734