1/* i915_drv.h -- Private header for the I915 driver -*- linux-c -*- 2 */ 3/* 4 * 5 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas. 6 * All Rights Reserved. 7 * 8 * Permission is hereby granted, free of charge, to any person obtaining a 9 * copy of this software and associated documentation files (the 10 * "Software"), to deal in the Software without restriction, including 11 * without limitation the rights to use, copy, modify, merge, publish, 12 * distribute, sub license, and/or sell copies of the Software, and to 13 * permit persons to whom the Software is furnished to do so, subject to 14 * the following conditions: 15 * 16 * The above copyright notice and this permission notice (including the 17 * next paragraph) shall be included in all copies or substantial portions 18 * of the Software. 19 * 20 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS 21 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 22 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. 23 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR 24 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, 25 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE 26 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 27 * 28 */ 29 30#include <sys/cdefs.h>
| 1/* i915_drv.h -- Private header for the I915 driver -*- linux-c -*- 2 */ 3/* 4 * 5 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas. 6 * All Rights Reserved. 7 * 8 * Permission is hereby granted, free of charge, to any person obtaining a 9 * copy of this software and associated documentation files (the 10 * "Software"), to deal in the Software without restriction, including 11 * without limitation the rights to use, copy, modify, merge, publish, 12 * distribute, sub license, and/or sell copies of the Software, and to 13 * permit persons to whom the Software is furnished to do so, subject to 14 * the following conditions: 15 * 16 * The above copyright notice and this permission notice (including the 17 * next paragraph) shall be included in all copies or substantial portions 18 * of the Software. 19 * 20 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS 21 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 22 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. 23 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR 24 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, 25 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE 26 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 27 * 28 */ 29 30#include <sys/cdefs.h>
|
242 /** 243 * List of objects currently involved in rendering from the 244 * ringbuffer. 245 * 246 * A reference is held on the buffer while on this list. 247 */ 248 struct list_head active_list; 249 250 /** 251 * List of objects which are not in the ringbuffer but which 252 * still have a write_domain which needs to be flushed before 253 * unbinding. 254 * 255 * A reference is held on the buffer while on this list. 256 */ 257 struct list_head flushing_list; 258 259 /** 260 * LRU list of objects which are not in the ringbuffer and 261 * are ready to unbind, but are still in the GTT. 262 * 263 * A reference is not held on the buffer while on this list, 264 * as merely being GTT-bound shouldn't prevent its being 265 * freed, and we'll pull it off the list in the free path. 266 */ 267 struct list_head inactive_list; 268 269 /** 270 * List of breadcrumbs associated with GPU requests currently 271 * outstanding. 272 */ 273 struct list_head request_list; 274#ifdef __linux__ 275 /** 276 * We leave the user IRQ off as much as possible, 277 * but this means that requests will finish and never 278 * be retired once the system goes idle. Set a timer to 279 * fire periodically while the ring is running. When it 280 * fires, go retire requests. 281 */ 282 struct delayed_work retire_work; 283#endif 284 uint32_t next_gem_seqno; 285 286 /** 287 * Waiting sequence number, if any 288 */ 289 uint32_t waiting_gem_seqno; 290 291 /** 292 * Last seq seen at irq time 293 */ 294 uint32_t irq_gem_seqno; 295 296 /** 297 * Flag if the X Server, and thus DRM, is not currently in 298 * control of the device. 299 * 300 * This is set between LeaveVT and EnterVT. It needs to be 301 * replaced with a semaphore. It also needs to be 302 * transitioned away from for kernel modesetting. 303 */ 304 int suspended; 305 306 /** 307 * Flag if the hardware appears to be wedged. 308 * 309 * This is set when attempts to idle the device timeout. 310 * It prevents command submission from occuring and makes 311 * every pending request fail 312 */ 313 int wedged; 314 315 /** Bit 6 swizzling required for X tiling */ 316 uint32_t bit_6_swizzle_x; 317 /** Bit 6 swizzling required for Y tiling */ 318 uint32_t bit_6_swizzle_y; 319 } mm; 320} drm_i915_private_t; 321 322enum intel_chip_family { 323 CHIP_I8XX = 0x01, 324 CHIP_I9XX = 0x02, 325 CHIP_I915 = 0x04, 326 CHIP_I965 = 0x08, 327}; 328 329/** driver private structure attached to each drm_gem_object */ 330struct drm_i915_gem_object { 331 struct drm_gem_object *obj; 332 333 /** Current space allocated to this object in the GTT, if any. */ 334 struct drm_mm_node *gtt_space; 335 336 /** This object's place on the active/flushing/inactive lists */ 337 struct list_head list; 338 339 /** 340 * This is set if the object is on the active or flushing lists 341 * (has pending rendering), and is not set if it's on inactive (ready 342 * to be unbound). 343 */ 344 int active; 345 346 /** 347 * This is set if the object has been written to since last bound 348 * to the GTT 349 */ 350 int dirty; 351 352 /** AGP memory structure for our GTT binding. */ 353 DRM_AGP_MEM *agp_mem; 354 355 struct page **page_list; 356 357 /** 358 * Current offset of the object in GTT space. 359 * 360 * This is the same as gtt_space->start 361 */ 362 uint32_t gtt_offset; 363 364 /** Boolean whether this object has a valid gtt offset. */ 365 int gtt_bound; 366 367 /** How many users have pinned this object in GTT space */ 368 int pin_count; 369 370 /** Breadcrumb of last rendering to the buffer. */ 371 uint32_t last_rendering_seqno; 372 373 /** Current tiling mode for the object. */ 374 uint32_t tiling_mode; 375 376 /** AGP mapping type (AGP_USER_MEMORY or AGP_USER_CACHED_MEMORY */ 377 uint32_t agp_type; 378 379 /** 380 * Flagging of which individual pages are valid in GEM_DOMAIN_CPU when 381 * GEM_DOMAIN_CPU is not in the object's read domain. 382 */ 383 uint8_t *page_cpu_valid; 384}; 385 386/** 387 * Request queue structure. 388 * 389 * The request queue allows us to note sequence numbers that have been emitted 390 * and may be associated with active buffers to be retired. 391 * 392 * By keeping this list, we can avoid having to do questionable 393 * sequence-number comparisons on buffer last_rendering_seqnos, and associate 394 * an emission time with seqnos for tracking how far ahead of the GPU we are. 395 */ 396struct drm_i915_gem_request { 397 /** GEM sequence number associated with this request. */ 398 uint32_t seqno; 399 400 /** Time at which this request was emitted, in jiffies. */ 401 unsigned long emitted_jiffies; 402 403 /** Cache domains that were flushed at the start of the request. */ 404 uint32_t flush_domains; 405 406 struct list_head list; 407}; 408 409struct drm_i915_file_private { 410 struct { 411 uint32_t last_gem_seqno; 412 uint32_t last_gem_throttle_seqno; 413 } mm; 414}; 415 416extern struct drm_ioctl_desc i915_ioctls[]; 417extern int i915_max_ioctl; 418 419 /* i915_dma.c */ 420extern void i915_kernel_lost_context(struct drm_device * dev); 421extern int i915_driver_load(struct drm_device *, unsigned long flags); 422extern int i915_driver_unload(struct drm_device *); 423extern int i915_driver_open(struct drm_device *dev, struct drm_file *file_priv); 424extern void i915_driver_lastclose(struct drm_device * dev); 425extern void i915_driver_preclose(struct drm_device *dev, 426 struct drm_file *file_priv); 427extern void i915_driver_postclose(struct drm_device *dev, 428 struct drm_file *file_priv); 429extern int i915_driver_device_is_agp(struct drm_device * dev); 430extern long i915_compat_ioctl(struct file *filp, unsigned int cmd, 431 unsigned long arg); 432extern int i915_emit_box(struct drm_device *dev, 433 struct drm_clip_rect __user *boxes, 434 int i, int DR1, int DR4); 435 436/* i915_irq.c */ 437extern int i915_irq_emit(struct drm_device *dev, void *data, 438 struct drm_file *file_priv); 439extern int i915_irq_wait(struct drm_device *dev, void *data, 440 struct drm_file *file_priv); 441void i915_user_irq_get(struct drm_device *dev); 442void i915_user_irq_put(struct drm_device *dev); 443 444extern irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS); 445extern void i915_driver_irq_preinstall(struct drm_device * dev); 446extern int i915_driver_irq_postinstall(struct drm_device *dev); 447extern void i915_driver_irq_uninstall(struct drm_device * dev); 448extern int i915_vblank_pipe_set(struct drm_device *dev, void *data, 449 struct drm_file *file_priv); 450extern int i915_vblank_pipe_get(struct drm_device *dev, void *data, 451 struct drm_file *file_priv); 452extern int i915_enable_vblank(struct drm_device *dev, int crtc); 453extern void i915_disable_vblank(struct drm_device *dev, int crtc); 454extern u32 i915_get_vblank_counter(struct drm_device *dev, int crtc); 455extern u32 g45_get_vblank_counter(struct drm_device *dev, int crtc); 456extern int i915_vblank_swap(struct drm_device *dev, void *data, 457 struct drm_file *file_priv); 458 459void 460i915_enable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask); 461 462void 463i915_disable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask); 464 465 466/* i915_mem.c */ 467extern int i915_mem_alloc(struct drm_device *dev, void *data, 468 struct drm_file *file_priv); 469extern int i915_mem_free(struct drm_device *dev, void *data, 470 struct drm_file *file_priv); 471extern int i915_mem_init_heap(struct drm_device *dev, void *data, 472 struct drm_file *file_priv); 473extern int i915_mem_destroy_heap(struct drm_device *dev, void *data, 474 struct drm_file *file_priv); 475extern void i915_mem_takedown(struct mem_block **heap); 476extern void i915_mem_release(struct drm_device * dev, 477 struct drm_file *file_priv, struct mem_block *heap); 478#ifdef I915_HAVE_GEM 479/* i915_gem.c */ 480int i915_gem_init_ioctl(struct drm_device *dev, void *data, 481 struct drm_file *file_priv); 482int i915_gem_create_ioctl(struct drm_device *dev, void *data, 483 struct drm_file *file_priv); 484int i915_gem_pread_ioctl(struct drm_device *dev, void *data, 485 struct drm_file *file_priv); 486int i915_gem_pwrite_ioctl(struct drm_device *dev, void *data, 487 struct drm_file *file_priv); 488int i915_gem_mmap_ioctl(struct drm_device *dev, void *data, 489 struct drm_file *file_priv); 490int i915_gem_set_domain_ioctl(struct drm_device *dev, void *data, 491 struct drm_file *file_priv); 492int i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data, 493 struct drm_file *file_priv); 494int i915_gem_execbuffer(struct drm_device *dev, void *data, 495 struct drm_file *file_priv); 496int i915_gem_pin_ioctl(struct drm_device *dev, void *data, 497 struct drm_file *file_priv); 498int i915_gem_unpin_ioctl(struct drm_device *dev, void *data, 499 struct drm_file *file_priv); 500int i915_gem_busy_ioctl(struct drm_device *dev, void *data, 501 struct drm_file *file_priv); 502int i915_gem_throttle_ioctl(struct drm_device *dev, void *data, 503 struct drm_file *file_priv); 504int i915_gem_entervt_ioctl(struct drm_device *dev, void *data, 505 struct drm_file *file_priv); 506int i915_gem_leavevt_ioctl(struct drm_device *dev, void *data, 507 struct drm_file *file_priv); 508int i915_gem_set_tiling(struct drm_device *dev, void *data, 509 struct drm_file *file_priv); 510int i915_gem_get_tiling(struct drm_device *dev, void *data, 511 struct drm_file *file_priv); 512void i915_gem_load(struct drm_device *dev); 513int i915_gem_proc_init(struct drm_minor *minor); 514void i915_gem_proc_cleanup(struct drm_minor *minor); 515int i915_gem_init_object(struct drm_gem_object *obj); 516void i915_gem_free_object(struct drm_gem_object *obj); 517int i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment); 518void i915_gem_object_unpin(struct drm_gem_object *obj); 519void i915_gem_lastclose(struct drm_device *dev); 520uint32_t i915_get_gem_seqno(struct drm_device *dev); 521void i915_gem_retire_requests(struct drm_device *dev); 522void i915_gem_retire_work_handler(struct work_struct *work); 523void i915_gem_clflush_object(struct drm_gem_object *obj); 524 525/* i915_gem_tiling.c */ 526void i915_gem_detect_bit_6_swizzle(struct drm_device *dev); 527 528/* i915_gem_debug.c */ 529void i915_gem_dump_object(struct drm_gem_object *obj, int len, 530 const char *where, uint32_t mark); 531#if WATCH_INACTIVE 532void i915_verify_inactive(struct drm_device *dev, char *file, int line); 533#else 534#define i915_verify_inactive(dev, file, line) 535#endif 536void i915_gem_object_check_coherency(struct drm_gem_object *obj, int handle); 537void i915_gem_dump_object(struct drm_gem_object *obj, int len, 538 const char *where, uint32_t mark); 539void i915_dump_lru(struct drm_device *dev, const char *where); 540#endif /* I915_HAVE_GEM */ 541 542/* i915_suspend.c */ 543extern int i915_save_state(struct drm_device *dev); 544extern int i915_restore_state(struct drm_device *dev); 545 546/* i915_opregion.c */ 547extern int intel_opregion_init(struct drm_device *dev); 548extern void intel_opregion_free(struct drm_device *dev); 549extern void opregion_asle_intr(struct drm_device *dev); 550extern void opregion_enable_asle(struct drm_device *dev); 551 552/** 553 * Lock test for when it's just for synchronization of ring access. 554 * 555 * In that case, we don't need to do it when GEM is initialized as nobody else 556 * has access to the ring. 557 */ 558#define RING_LOCK_TEST_WITH_RETURN(dev, file_priv) do { \ 559 if (((drm_i915_private_t *)dev->dev_private)->ring.ring_obj == NULL) \ 560 LOCK_TEST_WITH_RETURN(dev, file_priv); \ 561} while (0) 562 563#if defined(__FreeBSD__) 564typedef boolean_t bool; 565#endif 566 567#define I915_READ(reg) DRM_READ32(dev_priv->mmio_map, (reg)) 568#define I915_WRITE(reg,val) DRM_WRITE32(dev_priv->mmio_map, (reg), (val)) 569#define I915_READ16(reg) DRM_READ16(dev_priv->mmio_map, (reg)) 570#define I915_WRITE16(reg,val) DRM_WRITE16(dev_priv->mmio_map, (reg), (val)) 571#define I915_READ8(reg) DRM_READ8(dev_priv->mmio_map, (reg)) 572#define I915_WRITE8(reg,val) DRM_WRITE8(dev_priv->mmio_map, (reg), (val)) 573 574#define I915_VERBOSE 0 575 576#define RING_LOCALS unsigned int outring, ringmask, outcount; \ 577 volatile char *virt; 578 579#define BEGIN_LP_RING(n) do { \ 580 if (I915_VERBOSE) \ 581 DRM_DEBUG("BEGIN_LP_RING(%d)\n", (n)); \ 582 if (dev_priv->ring.space < (n)*4) \ 583 i915_wait_ring(dev, (n)*4, __func__); \ 584 outcount = 0; \ 585 outring = dev_priv->ring.tail; \ 586 ringmask = dev_priv->ring.tail_mask; \ 587 virt = dev_priv->ring.virtual_start; \ 588} while (0) 589 590#define OUT_RING(n) do { \ 591 if (I915_VERBOSE) DRM_DEBUG(" OUT_RING %x\n", (int)(n)); \ 592 *(volatile unsigned int *)(virt + outring) = (n); \ 593 outcount++; \ 594 outring += 4; \ 595 outring &= ringmask; \ 596} while (0) 597 598#define ADVANCE_LP_RING() do { \ 599 if (I915_VERBOSE) DRM_DEBUG("ADVANCE_LP_RING %x\n", outring); \ 600 dev_priv->ring.tail = outring; \ 601 dev_priv->ring.space -= outcount * 4; \ 602 I915_WRITE(PRB0_TAIL, outring); \ 603} while(0) 604 605/** 606 * Reads a dword out of the status page, which is written to from the command 607 * queue by automatic updates, MI_REPORT_HEAD, MI_STORE_DATA_INDEX, or 608 * MI_STORE_DATA_IMM. 609 * 610 * The following dwords have a reserved meaning: 611 * 0x00: ISR copy, updated when an ISR bit not set in the HWSTAM changes. 612 * 0x04: ring 0 head pointer 613 * 0x05: ring 1 head pointer (915-class) 614 * 0x06: ring 2 head pointer (915-class) 615 * 0x10-0x1b: Context status DWords (GM45) 616 * 0x1f: Last written status offset. (GM45) 617 * 618 * The area from dword 0x20 to 0x3ff is available for driver usage. 619 */ 620#define READ_HWSP(dev_priv, reg) (((volatile u32*)(dev_priv->hw_status_page))[reg]) 621#define READ_BREADCRUMB(dev_priv) READ_HWSP(dev_priv, I915_BREADCRUMB_INDEX) 622#define I915_GEM_HWS_INDEX 0x20 623#define I915_BREADCRUMB_INDEX 0x21 624 625extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller); 626 627#define IS_I830(dev) ((dev)->pci_device == 0x3577) 628#define IS_845G(dev) ((dev)->pci_device == 0x2562) 629#define IS_I85X(dev) ((dev)->pci_device == 0x3582) 630#define IS_I855(dev) ((dev)->pci_device == 0x3582) 631#define IS_I865G(dev) ((dev)->pci_device == 0x2572) 632 633#define IS_I915G(dev) ((dev)->pci_device == 0x2582 || (dev)->pci_device == 0x258a) 634#define IS_I915GM(dev) ((dev)->pci_device == 0x2592) 635#define IS_I945G(dev) ((dev)->pci_device == 0x2772) 636#define IS_I945GM(dev) ((dev)->pci_device == 0x27A2 ||\ 637 (dev)->pci_device == 0x27AE) 638#define IS_I965G(dev) ((dev)->pci_device == 0x2972 || \ 639 (dev)->pci_device == 0x2982 || \ 640 (dev)->pci_device == 0x2992 || \ 641 (dev)->pci_device == 0x29A2 || \ 642 (dev)->pci_device == 0x2A02 || \ 643 (dev)->pci_device == 0x2A12 || \ 644 (dev)->pci_device == 0x2A42 || \ 645 (dev)->pci_device == 0x2E02 || \ 646 (dev)->pci_device == 0x2E12 || \ 647 (dev)->pci_device == 0x2E22 || \ 648 (dev)->pci_device == 0x2E32) 649 650#define IS_I965GM(dev) ((dev)->pci_device == 0x2A02) 651 652#define IS_GM45(dev) ((dev)->pci_device == 0x2A42) 653 654#define IS_G4X(dev) ((dev)->pci_device == 0x2E02 || \ 655 (dev)->pci_device == 0x2E12 || \ 656 (dev)->pci_device == 0x2E22 || \ 657 (dev)->pci_device == 0x2E32 || \ 658 IS_GM45(dev)) 659 660#define IS_G33(dev) ((dev)->pci_device == 0x29C2 || \ 661 (dev)->pci_device == 0x29B2 || \ 662 (dev)->pci_device == 0x29D2) 663 664#define IS_I9XX(dev) (IS_I915G(dev) || IS_I915GM(dev) || IS_I945G(dev) || \ 665 IS_I945GM(dev) || IS_I965G(dev) || IS_G33(dev)) 666 667#define IS_MOBILE(dev) (IS_I830(dev) || IS_I85X(dev) || IS_I915GM(dev) || \ 668 IS_I945GM(dev) || IS_I965GM(dev) || IS_GM45(dev)) 669 670#define I915_NEED_GFX_HWS(dev) (IS_G33(dev) || IS_GM45(dev) || IS_G4X(dev)) 671 672#define PRIMARY_RINGBUFFER_SIZE (128*1024) 673 674#endif
| 242 /** 243 * List of objects currently involved in rendering from the 244 * ringbuffer. 245 * 246 * A reference is held on the buffer while on this list. 247 */ 248 struct list_head active_list; 249 250 /** 251 * List of objects which are not in the ringbuffer but which 252 * still have a write_domain which needs to be flushed before 253 * unbinding. 254 * 255 * A reference is held on the buffer while on this list. 256 */ 257 struct list_head flushing_list; 258 259 /** 260 * LRU list of objects which are not in the ringbuffer and 261 * are ready to unbind, but are still in the GTT. 262 * 263 * A reference is not held on the buffer while on this list, 264 * as merely being GTT-bound shouldn't prevent its being 265 * freed, and we'll pull it off the list in the free path. 266 */ 267 struct list_head inactive_list; 268 269 /** 270 * List of breadcrumbs associated with GPU requests currently 271 * outstanding. 272 */ 273 struct list_head request_list; 274#ifdef __linux__ 275 /** 276 * We leave the user IRQ off as much as possible, 277 * but this means that requests will finish and never 278 * be retired once the system goes idle. Set a timer to 279 * fire periodically while the ring is running. When it 280 * fires, go retire requests. 281 */ 282 struct delayed_work retire_work; 283#endif 284 uint32_t next_gem_seqno; 285 286 /** 287 * Waiting sequence number, if any 288 */ 289 uint32_t waiting_gem_seqno; 290 291 /** 292 * Last seq seen at irq time 293 */ 294 uint32_t irq_gem_seqno; 295 296 /** 297 * Flag if the X Server, and thus DRM, is not currently in 298 * control of the device. 299 * 300 * This is set between LeaveVT and EnterVT. It needs to be 301 * replaced with a semaphore. It also needs to be 302 * transitioned away from for kernel modesetting. 303 */ 304 int suspended; 305 306 /** 307 * Flag if the hardware appears to be wedged. 308 * 309 * This is set when attempts to idle the device timeout. 310 * It prevents command submission from occuring and makes 311 * every pending request fail 312 */ 313 int wedged; 314 315 /** Bit 6 swizzling required for X tiling */ 316 uint32_t bit_6_swizzle_x; 317 /** Bit 6 swizzling required for Y tiling */ 318 uint32_t bit_6_swizzle_y; 319 } mm; 320} drm_i915_private_t; 321 322enum intel_chip_family { 323 CHIP_I8XX = 0x01, 324 CHIP_I9XX = 0x02, 325 CHIP_I915 = 0x04, 326 CHIP_I965 = 0x08, 327}; 328 329/** driver private structure attached to each drm_gem_object */ 330struct drm_i915_gem_object { 331 struct drm_gem_object *obj; 332 333 /** Current space allocated to this object in the GTT, if any. */ 334 struct drm_mm_node *gtt_space; 335 336 /** This object's place on the active/flushing/inactive lists */ 337 struct list_head list; 338 339 /** 340 * This is set if the object is on the active or flushing lists 341 * (has pending rendering), and is not set if it's on inactive (ready 342 * to be unbound). 343 */ 344 int active; 345 346 /** 347 * This is set if the object has been written to since last bound 348 * to the GTT 349 */ 350 int dirty; 351 352 /** AGP memory structure for our GTT binding. */ 353 DRM_AGP_MEM *agp_mem; 354 355 struct page **page_list; 356 357 /** 358 * Current offset of the object in GTT space. 359 * 360 * This is the same as gtt_space->start 361 */ 362 uint32_t gtt_offset; 363 364 /** Boolean whether this object has a valid gtt offset. */ 365 int gtt_bound; 366 367 /** How many users have pinned this object in GTT space */ 368 int pin_count; 369 370 /** Breadcrumb of last rendering to the buffer. */ 371 uint32_t last_rendering_seqno; 372 373 /** Current tiling mode for the object. */ 374 uint32_t tiling_mode; 375 376 /** AGP mapping type (AGP_USER_MEMORY or AGP_USER_CACHED_MEMORY */ 377 uint32_t agp_type; 378 379 /** 380 * Flagging of which individual pages are valid in GEM_DOMAIN_CPU when 381 * GEM_DOMAIN_CPU is not in the object's read domain. 382 */ 383 uint8_t *page_cpu_valid; 384}; 385 386/** 387 * Request queue structure. 388 * 389 * The request queue allows us to note sequence numbers that have been emitted 390 * and may be associated with active buffers to be retired. 391 * 392 * By keeping this list, we can avoid having to do questionable 393 * sequence-number comparisons on buffer last_rendering_seqnos, and associate 394 * an emission time with seqnos for tracking how far ahead of the GPU we are. 395 */ 396struct drm_i915_gem_request { 397 /** GEM sequence number associated with this request. */ 398 uint32_t seqno; 399 400 /** Time at which this request was emitted, in jiffies. */ 401 unsigned long emitted_jiffies; 402 403 /** Cache domains that were flushed at the start of the request. */ 404 uint32_t flush_domains; 405 406 struct list_head list; 407}; 408 409struct drm_i915_file_private { 410 struct { 411 uint32_t last_gem_seqno; 412 uint32_t last_gem_throttle_seqno; 413 } mm; 414}; 415 416extern struct drm_ioctl_desc i915_ioctls[]; 417extern int i915_max_ioctl; 418 419 /* i915_dma.c */ 420extern void i915_kernel_lost_context(struct drm_device * dev); 421extern int i915_driver_load(struct drm_device *, unsigned long flags); 422extern int i915_driver_unload(struct drm_device *); 423extern int i915_driver_open(struct drm_device *dev, struct drm_file *file_priv); 424extern void i915_driver_lastclose(struct drm_device * dev); 425extern void i915_driver_preclose(struct drm_device *dev, 426 struct drm_file *file_priv); 427extern void i915_driver_postclose(struct drm_device *dev, 428 struct drm_file *file_priv); 429extern int i915_driver_device_is_agp(struct drm_device * dev); 430extern long i915_compat_ioctl(struct file *filp, unsigned int cmd, 431 unsigned long arg); 432extern int i915_emit_box(struct drm_device *dev, 433 struct drm_clip_rect __user *boxes, 434 int i, int DR1, int DR4); 435 436/* i915_irq.c */ 437extern int i915_irq_emit(struct drm_device *dev, void *data, 438 struct drm_file *file_priv); 439extern int i915_irq_wait(struct drm_device *dev, void *data, 440 struct drm_file *file_priv); 441void i915_user_irq_get(struct drm_device *dev); 442void i915_user_irq_put(struct drm_device *dev); 443 444extern irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS); 445extern void i915_driver_irq_preinstall(struct drm_device * dev); 446extern int i915_driver_irq_postinstall(struct drm_device *dev); 447extern void i915_driver_irq_uninstall(struct drm_device * dev); 448extern int i915_vblank_pipe_set(struct drm_device *dev, void *data, 449 struct drm_file *file_priv); 450extern int i915_vblank_pipe_get(struct drm_device *dev, void *data, 451 struct drm_file *file_priv); 452extern int i915_enable_vblank(struct drm_device *dev, int crtc); 453extern void i915_disable_vblank(struct drm_device *dev, int crtc); 454extern u32 i915_get_vblank_counter(struct drm_device *dev, int crtc); 455extern u32 g45_get_vblank_counter(struct drm_device *dev, int crtc); 456extern int i915_vblank_swap(struct drm_device *dev, void *data, 457 struct drm_file *file_priv); 458 459void 460i915_enable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask); 461 462void 463i915_disable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask); 464 465 466/* i915_mem.c */ 467extern int i915_mem_alloc(struct drm_device *dev, void *data, 468 struct drm_file *file_priv); 469extern int i915_mem_free(struct drm_device *dev, void *data, 470 struct drm_file *file_priv); 471extern int i915_mem_init_heap(struct drm_device *dev, void *data, 472 struct drm_file *file_priv); 473extern int i915_mem_destroy_heap(struct drm_device *dev, void *data, 474 struct drm_file *file_priv); 475extern void i915_mem_takedown(struct mem_block **heap); 476extern void i915_mem_release(struct drm_device * dev, 477 struct drm_file *file_priv, struct mem_block *heap); 478#ifdef I915_HAVE_GEM 479/* i915_gem.c */ 480int i915_gem_init_ioctl(struct drm_device *dev, void *data, 481 struct drm_file *file_priv); 482int i915_gem_create_ioctl(struct drm_device *dev, void *data, 483 struct drm_file *file_priv); 484int i915_gem_pread_ioctl(struct drm_device *dev, void *data, 485 struct drm_file *file_priv); 486int i915_gem_pwrite_ioctl(struct drm_device *dev, void *data, 487 struct drm_file *file_priv); 488int i915_gem_mmap_ioctl(struct drm_device *dev, void *data, 489 struct drm_file *file_priv); 490int i915_gem_set_domain_ioctl(struct drm_device *dev, void *data, 491 struct drm_file *file_priv); 492int i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data, 493 struct drm_file *file_priv); 494int i915_gem_execbuffer(struct drm_device *dev, void *data, 495 struct drm_file *file_priv); 496int i915_gem_pin_ioctl(struct drm_device *dev, void *data, 497 struct drm_file *file_priv); 498int i915_gem_unpin_ioctl(struct drm_device *dev, void *data, 499 struct drm_file *file_priv); 500int i915_gem_busy_ioctl(struct drm_device *dev, void *data, 501 struct drm_file *file_priv); 502int i915_gem_throttle_ioctl(struct drm_device *dev, void *data, 503 struct drm_file *file_priv); 504int i915_gem_entervt_ioctl(struct drm_device *dev, void *data, 505 struct drm_file *file_priv); 506int i915_gem_leavevt_ioctl(struct drm_device *dev, void *data, 507 struct drm_file *file_priv); 508int i915_gem_set_tiling(struct drm_device *dev, void *data, 509 struct drm_file *file_priv); 510int i915_gem_get_tiling(struct drm_device *dev, void *data, 511 struct drm_file *file_priv); 512void i915_gem_load(struct drm_device *dev); 513int i915_gem_proc_init(struct drm_minor *minor); 514void i915_gem_proc_cleanup(struct drm_minor *minor); 515int i915_gem_init_object(struct drm_gem_object *obj); 516void i915_gem_free_object(struct drm_gem_object *obj); 517int i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment); 518void i915_gem_object_unpin(struct drm_gem_object *obj); 519void i915_gem_lastclose(struct drm_device *dev); 520uint32_t i915_get_gem_seqno(struct drm_device *dev); 521void i915_gem_retire_requests(struct drm_device *dev); 522void i915_gem_retire_work_handler(struct work_struct *work); 523void i915_gem_clflush_object(struct drm_gem_object *obj); 524 525/* i915_gem_tiling.c */ 526void i915_gem_detect_bit_6_swizzle(struct drm_device *dev); 527 528/* i915_gem_debug.c */ 529void i915_gem_dump_object(struct drm_gem_object *obj, int len, 530 const char *where, uint32_t mark); 531#if WATCH_INACTIVE 532void i915_verify_inactive(struct drm_device *dev, char *file, int line); 533#else 534#define i915_verify_inactive(dev, file, line) 535#endif 536void i915_gem_object_check_coherency(struct drm_gem_object *obj, int handle); 537void i915_gem_dump_object(struct drm_gem_object *obj, int len, 538 const char *where, uint32_t mark); 539void i915_dump_lru(struct drm_device *dev, const char *where); 540#endif /* I915_HAVE_GEM */ 541 542/* i915_suspend.c */ 543extern int i915_save_state(struct drm_device *dev); 544extern int i915_restore_state(struct drm_device *dev); 545 546/* i915_opregion.c */ 547extern int intel_opregion_init(struct drm_device *dev); 548extern void intel_opregion_free(struct drm_device *dev); 549extern void opregion_asle_intr(struct drm_device *dev); 550extern void opregion_enable_asle(struct drm_device *dev); 551 552/** 553 * Lock test for when it's just for synchronization of ring access. 554 * 555 * In that case, we don't need to do it when GEM is initialized as nobody else 556 * has access to the ring. 557 */ 558#define RING_LOCK_TEST_WITH_RETURN(dev, file_priv) do { \ 559 if (((drm_i915_private_t *)dev->dev_private)->ring.ring_obj == NULL) \ 560 LOCK_TEST_WITH_RETURN(dev, file_priv); \ 561} while (0) 562 563#if defined(__FreeBSD__) 564typedef boolean_t bool; 565#endif 566 567#define I915_READ(reg) DRM_READ32(dev_priv->mmio_map, (reg)) 568#define I915_WRITE(reg,val) DRM_WRITE32(dev_priv->mmio_map, (reg), (val)) 569#define I915_READ16(reg) DRM_READ16(dev_priv->mmio_map, (reg)) 570#define I915_WRITE16(reg,val) DRM_WRITE16(dev_priv->mmio_map, (reg), (val)) 571#define I915_READ8(reg) DRM_READ8(dev_priv->mmio_map, (reg)) 572#define I915_WRITE8(reg,val) DRM_WRITE8(dev_priv->mmio_map, (reg), (val)) 573 574#define I915_VERBOSE 0 575 576#define RING_LOCALS unsigned int outring, ringmask, outcount; \ 577 volatile char *virt; 578 579#define BEGIN_LP_RING(n) do { \ 580 if (I915_VERBOSE) \ 581 DRM_DEBUG("BEGIN_LP_RING(%d)\n", (n)); \ 582 if (dev_priv->ring.space < (n)*4) \ 583 i915_wait_ring(dev, (n)*4, __func__); \ 584 outcount = 0; \ 585 outring = dev_priv->ring.tail; \ 586 ringmask = dev_priv->ring.tail_mask; \ 587 virt = dev_priv->ring.virtual_start; \ 588} while (0) 589 590#define OUT_RING(n) do { \ 591 if (I915_VERBOSE) DRM_DEBUG(" OUT_RING %x\n", (int)(n)); \ 592 *(volatile unsigned int *)(virt + outring) = (n); \ 593 outcount++; \ 594 outring += 4; \ 595 outring &= ringmask; \ 596} while (0) 597 598#define ADVANCE_LP_RING() do { \ 599 if (I915_VERBOSE) DRM_DEBUG("ADVANCE_LP_RING %x\n", outring); \ 600 dev_priv->ring.tail = outring; \ 601 dev_priv->ring.space -= outcount * 4; \ 602 I915_WRITE(PRB0_TAIL, outring); \ 603} while(0) 604 605/** 606 * Reads a dword out of the status page, which is written to from the command 607 * queue by automatic updates, MI_REPORT_HEAD, MI_STORE_DATA_INDEX, or 608 * MI_STORE_DATA_IMM. 609 * 610 * The following dwords have a reserved meaning: 611 * 0x00: ISR copy, updated when an ISR bit not set in the HWSTAM changes. 612 * 0x04: ring 0 head pointer 613 * 0x05: ring 1 head pointer (915-class) 614 * 0x06: ring 2 head pointer (915-class) 615 * 0x10-0x1b: Context status DWords (GM45) 616 * 0x1f: Last written status offset. (GM45) 617 * 618 * The area from dword 0x20 to 0x3ff is available for driver usage. 619 */ 620#define READ_HWSP(dev_priv, reg) (((volatile u32*)(dev_priv->hw_status_page))[reg]) 621#define READ_BREADCRUMB(dev_priv) READ_HWSP(dev_priv, I915_BREADCRUMB_INDEX) 622#define I915_GEM_HWS_INDEX 0x20 623#define I915_BREADCRUMB_INDEX 0x21 624 625extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller); 626 627#define IS_I830(dev) ((dev)->pci_device == 0x3577) 628#define IS_845G(dev) ((dev)->pci_device == 0x2562) 629#define IS_I85X(dev) ((dev)->pci_device == 0x3582) 630#define IS_I855(dev) ((dev)->pci_device == 0x3582) 631#define IS_I865G(dev) ((dev)->pci_device == 0x2572) 632 633#define IS_I915G(dev) ((dev)->pci_device == 0x2582 || (dev)->pci_device == 0x258a) 634#define IS_I915GM(dev) ((dev)->pci_device == 0x2592) 635#define IS_I945G(dev) ((dev)->pci_device == 0x2772) 636#define IS_I945GM(dev) ((dev)->pci_device == 0x27A2 ||\ 637 (dev)->pci_device == 0x27AE) 638#define IS_I965G(dev) ((dev)->pci_device == 0x2972 || \ 639 (dev)->pci_device == 0x2982 || \ 640 (dev)->pci_device == 0x2992 || \ 641 (dev)->pci_device == 0x29A2 || \ 642 (dev)->pci_device == 0x2A02 || \ 643 (dev)->pci_device == 0x2A12 || \ 644 (dev)->pci_device == 0x2A42 || \ 645 (dev)->pci_device == 0x2E02 || \ 646 (dev)->pci_device == 0x2E12 || \ 647 (dev)->pci_device == 0x2E22 || \ 648 (dev)->pci_device == 0x2E32) 649 650#define IS_I965GM(dev) ((dev)->pci_device == 0x2A02) 651 652#define IS_GM45(dev) ((dev)->pci_device == 0x2A42) 653 654#define IS_G4X(dev) ((dev)->pci_device == 0x2E02 || \ 655 (dev)->pci_device == 0x2E12 || \ 656 (dev)->pci_device == 0x2E22 || \ 657 (dev)->pci_device == 0x2E32 || \ 658 IS_GM45(dev)) 659 660#define IS_G33(dev) ((dev)->pci_device == 0x29C2 || \ 661 (dev)->pci_device == 0x29B2 || \ 662 (dev)->pci_device == 0x29D2) 663 664#define IS_I9XX(dev) (IS_I915G(dev) || IS_I915GM(dev) || IS_I945G(dev) || \ 665 IS_I945GM(dev) || IS_I965G(dev) || IS_G33(dev)) 666 667#define IS_MOBILE(dev) (IS_I830(dev) || IS_I85X(dev) || IS_I915GM(dev) || \ 668 IS_I945GM(dev) || IS_I965GM(dev) || IS_GM45(dev)) 669 670#define I915_NEED_GFX_HWS(dev) (IS_G33(dev) || IS_GM45(dev) || IS_G4X(dev)) 671 672#define PRIMARY_RINGBUFFER_SIZE (128*1024) 673 674#endif
|