i915_drv.h revision 240539
174131Sjlemon/* i915_drv.h -- Private header for the I915 driver -*- linux-c -*- 274131Sjlemon */ 374131Sjlemon/* 474131Sjlemon * 574131Sjlemon * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas. 674131Sjlemon * All Rights Reserved. 774131Sjlemon * 874131Sjlemon * Permission is hereby granted, free of charge, to any person obtaining a 974131Sjlemon * copy of this software and associated documentation files (the 1074131Sjlemon * "Software"), to deal in the Software without restriction, including 1174131Sjlemon * without limitation the rights to use, copy, modify, merge, publish, 1274131Sjlemon * distribute, sub license, and/or sell copies of the Software, and to 1374131Sjlemon * permit persons to whom the Software is furnished to do so, subject to 1474131Sjlemon * the following conditions: 1574131Sjlemon * 1674131Sjlemon * The above copyright notice and this permission notice (including the 1774131Sjlemon * next paragraph) shall be included in all copies or substantial portions 1874131Sjlemon * of the Software. 1974131Sjlemon * 2074131Sjlemon * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS 2174131Sjlemon * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 2274131Sjlemon * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. 2374131Sjlemon * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR 2474131Sjlemon * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, 2574131Sjlemon * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE 2674131Sjlemon * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 2774131Sjlemon * 2874131Sjlemon */ 2974131Sjlemon 3074131Sjlemon#include <sys/cdefs.h> 31119418Sobrien__FBSDID("$FreeBSD: head/sys/dev/drm2/i915/i915_drv.h 240539 2012-09-15 19:28:54Z ed $"); 32119418Sobrien 33119418Sobrien#ifndef _I915_DRV_H_ 3474131Sjlemon#define _I915_DRV_H_ 3574131Sjlemon 3674131Sjlemon#include <dev/agp/agp_i810.h> 3774131Sjlemon#include <dev/drm2/drm_mm.h> 3874131Sjlemon#include <dev/drm2/i915/i915_reg.h> 3974131Sjlemon#include <dev/drm2/i915/intel_ringbuffer.h> 4074131Sjlemon#include <dev/drm2/i915/intel_bios.h> 41129876Sphk 4274131Sjlemon/* General customization: 4374131Sjlemon */ 4474131Sjlemon 4574131Sjlemon#define DRIVER_AUTHOR "Tungsten Graphics, Inc." 4674131Sjlemon 4774131Sjlemon#define DRIVER_NAME "i915" 4874131Sjlemon#define DRIVER_DESC "Intel Graphics" 4974131Sjlemon#define DRIVER_DATE "20080730" 50109514Sobrien 5174131SjlemonMALLOC_DECLARE(DRM_I915_GEM); 52226154Smarius 5374131Sjlemonenum pipe { 5474131Sjlemon PIPE_A = 0, 5574131Sjlemon PIPE_B, 5674131Sjlemon PIPE_C, 5774131Sjlemon I915_MAX_PIPES 5874131Sjlemon}; 5974131Sjlemon#define pipe_name(p) ((p) + 'A') 6074131Sjlemon#define I915_NUM_PIPE 2 6174131Sjlemon 6274131Sjlemonenum plane { 6395722Sphk PLANE_A = 0, 6474131Sjlemon PLANE_B, 6574131Sjlemon PLANE_C, 6674131Sjlemon}; 6774131Sjlemon#define plane_name(p) ((p) + 'A') 6874131Sjlemon 6974131Sjlemon#define I915_GEM_GPU_DOMAINS (~(I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT)) 7074131Sjlemon 7174131Sjlemon#define for_each_pipe(p) for ((p) = 0; (p) < dev_priv->num_pipe; (p)++) 7274131Sjlemon 7374131Sjlemon/* Interface history: 7474131Sjlemon * 7574131Sjlemon * 1.1: Original. 7674131Sjlemon * 1.2: Add Power Management 7774131Sjlemon * 1.3: Add vblank support 7884145Sjlemon * 1.4: Fix cmdbuffer path, add heap destroy 7984145Sjlemon * 1.5: Add vblank pipe configuration 80215905Smarius * 1.6: - New ioctl for scheduling buffer swaps on vertical blank 8174131Sjlemon * - Support vertical blank on secondary display pipe 82164827Smarius */ 83221407Smarius#define DRIVER_MAJOR 1 84221407Smarius#define DRIVER_MINOR 6 85221407Smarius#define DRIVER_PATCHLEVEL 0 86221407Smarius 87221407Smarius#define WATCH_COHERENCY 0 88164827Smarius#define WATCH_BUF 0 89164827Smarius#define WATCH_EXEC 0 90164827Smarius#define WATCH_LRU 0 91221407Smarius#define WATCH_RELOC 0 92221407Smarius#define WATCH_INACTIVE 0 93221407Smarius#define WATCH_PWRITE 0 94221407Smarius 95221407Smarius#define I915_GEM_PHYS_CURSOR_0 1 96221407Smarius#define I915_GEM_PHYS_CURSOR_1 2 9774131Sjlemon#define I915_GEM_PHYS_OVERLAY_REGS 3 9874131Sjlemon#define I915_MAX_PHYS_OBJECT (I915_GEM_PHYS_OVERLAY_REGS) 9974131Sjlemon 10074131Sjlemonstruct drm_i915_gem_phys_object { 101164827Smarius int id; 10274131Sjlemon drm_dma_handle_t *handle; 10374131Sjlemon struct drm_i915_gem_object *cur_obj; 10474131Sjlemon}; 10574131Sjlemon 10674131Sjlemonstruct drm_i915_private; 10774131Sjlemon 108221407Smariusstruct drm_i915_display_funcs { 10974131Sjlemon void (*dpms)(struct drm_crtc *crtc, int mode); 11074131Sjlemon bool (*fbc_enabled)(struct drm_device *dev); 11174131Sjlemon void (*enable_fbc)(struct drm_crtc *crtc, unsigned long interval); 11274131Sjlemon void (*disable_fbc)(struct drm_device *dev); 11374131Sjlemon int (*get_display_clock_speed)(struct drm_device *dev); 11474131Sjlemon int (*get_fifo_size)(struct drm_device *dev, int plane); 11574131Sjlemon void (*update_wm)(struct drm_device *dev); 11674131Sjlemon void (*update_sprite_wm)(struct drm_device *dev, int pipe, 11774131Sjlemon uint32_t sprite_width, int pixel_size); 11874131Sjlemon int (*crtc_mode_set)(struct drm_crtc *crtc, 11974131Sjlemon struct drm_display_mode *mode, 12074131Sjlemon struct drm_display_mode *adjusted_mode, 12174131Sjlemon int x, int y, 12274131Sjlemon struct drm_framebuffer *old_fb); 12374131Sjlemon void (*write_eld)(struct drm_connector *connector, 12474131Sjlemon struct drm_crtc *crtc); 12574131Sjlemon void (*fdi_link_train)(struct drm_crtc *crtc); 12674131Sjlemon void (*init_clock_gating)(struct drm_device *dev); 12795718Sphk void (*init_pch_clock_gating)(struct drm_device *dev); 12874131Sjlemon int (*queue_flip)(struct drm_device *dev, struct drm_crtc *crtc, 12974131Sjlemon struct drm_framebuffer *fb, 13074131Sjlemon struct drm_i915_gem_object *obj); 13184145Sjlemon void (*force_wake_get)(struct drm_i915_private *dev_priv); 13274131Sjlemon void (*force_wake_put)(struct drm_i915_private *dev_priv); 13374131Sjlemon int (*update_plane)(struct drm_crtc *crtc, struct drm_framebuffer *fb, 13474131Sjlemon int x, int y); 13574131Sjlemon /* clock updates for mode set */ 13674131Sjlemon /* cursor updates */ 137221407Smarius /* render clock increase/decrease */ 13874131Sjlemon /* display clock increase/decrease */ 13974131Sjlemon /* pll clock increase/decrease */ 14084145Sjlemon}; 14174131Sjlemon 14274131Sjlemonstruct intel_device_info { 14374131Sjlemon u8 gen; 14484145Sjlemon u8 is_mobile:1; 14574131Sjlemon u8 is_i85x:1; 14674131Sjlemon u8 is_i915g:1; 14774131Sjlemon u8 is_i945gm:1; 14895718Sphk u8 is_g33:1; 14974131Sjlemon u8 need_gfx_hws:1; 15074131Sjlemon u8 is_g4x:1; 15174131Sjlemon u8 is_pineview:1; 15274131Sjlemon u8 is_broadwater:1; 15374131Sjlemon u8 is_crestline:1; 15474131Sjlemon u8 is_ivybridge:1; 15574131Sjlemon u8 has_fbc:1; 15674131Sjlemon u8 has_pipe_cxsr:1; 15774131Sjlemon u8 has_hotplug:1; 15874131Sjlemon u8 cursor_needs_physical:1; 15974131Sjlemon u8 has_overlay:1; 16074131Sjlemon u8 overlay_needs_physical:1; 16174131Sjlemon u8 supports_tv:1; 16274131Sjlemon u8 has_bsd_ring:1; 16374131Sjlemon u8 has_blt_ring:1; 16474131Sjlemon u8 has_llc:1; 16574131Sjlemon}; 16674131Sjlemon 16774131Sjlemon#define I915_PPGTT_PD_ENTRIES 512 16874131Sjlemon#define I915_PPGTT_PT_ENTRIES 1024 16974131Sjlemonstruct i915_hw_ppgtt { 17074131Sjlemon unsigned num_pd_entries; 17174131Sjlemon vm_page_t *pt_pages; 17274131Sjlemon uint32_t pd_offset; 17374131Sjlemon vm_paddr_t *pt_dma_addr; 17474131Sjlemon vm_paddr_t scratch_page_dma_addr; 17574131Sjlemon}; 17674131Sjlemon 17774131Sjlemonenum no_fbc_reason { 17874131Sjlemon FBC_NO_OUTPUT, /* no outputs enabled to compress */ 17974131Sjlemon FBC_STOLEN_TOO_SMALL, /* not enough space to hold compressed buffers */ 180215716Smarius FBC_UNSUPPORTED_MODE, /* interlace or doublescanned mode */ 181215716Smarius FBC_MODE_TOO_LARGE, /* mode too large for compression */ 182213384Smarius FBC_BAD_PLANE, /* fbc not supported on plane */ 183213384Smarius FBC_NOT_TILED, /* buffer not tiled */ 18474131Sjlemon FBC_MULTIPLE_PIPES, /* more than one pipe active */ 18595718Sphk FBC_MODULE_PARAM, 18674131Sjlemon}; 187215905Smarius 188215905Smariusstruct mem_block { 189215905Smarius struct mem_block *next; 190215905Smarius struct mem_block *prev; 191215905Smarius int start; 192215905Smarius int size; 193215905Smarius struct drm_file *file_priv; /* NULL: free, -1: heap, other: real files */ 194215905Smarius}; 195215905Smarius 196215905Smariusstruct opregion_header; 197215905Smariusstruct opregion_acpi; 198struct opregion_swsci; 199struct opregion_asle; 200 201struct intel_opregion { 202 struct opregion_header *header; 203 struct opregion_acpi *acpi; 204 struct opregion_swsci *swsci; 205 struct opregion_asle *asle; 206 void *vbt; 207 u32 *lid_state; 208}; 209#define OPREGION_SIZE (8*1024) 210 211#define I915_FENCE_REG_NONE -1 212#define I915_MAX_NUM_FENCES 16 213/* 16 fences + sign bit for FENCE_REG_NONE */ 214#define I915_MAX_NUM_FENCE_BITS 5 215 216struct drm_i915_fence_reg { 217 struct list_head lru_list; 218 struct drm_i915_gem_object *obj; 219 uint32_t setup_seqno; 220 int pin_count; 221}; 222 223struct sdvo_device_mapping { 224 u8 initialized; 225 u8 dvo_port; 226 u8 slave_addr; 227 u8 dvo_wiring; 228 u8 i2c_pin; 229 u8 ddc_pin; 230}; 231 232enum intel_pch { 233 PCH_IBX, /* Ibexpeak PCH */ 234 PCH_CPT, /* Cougarpoint PCH */ 235}; 236 237#define QUIRK_PIPEA_FORCE (1<<0) 238#define QUIRK_LVDS_SSC_DISABLE (1<<1) 239 240struct intel_fbdev; 241struct intel_fbc_work; 242 243typedef struct drm_i915_private { 244 struct drm_device *dev; 245 246 device_t *gmbus_bridge; 247 device_t *bbbus_bridge; 248 device_t *gmbus; 249 device_t *bbbus; 250 /** gmbus_sx protects against concurrent usage of the single hw gmbus 251 * controller on different i2c buses. */ 252 struct sx gmbus_sx; 253 254 int has_gem; 255 int relative_constants_mode; 256 257 drm_local_map_t *sarea; 258 drm_local_map_t *mmio_map; 259 260 /** gt_fifo_count and the subsequent register write are synchronized 261 * with dev->struct_mutex. */ 262 unsigned gt_fifo_count; 263 /** forcewake_count is protected by gt_lock */ 264 unsigned forcewake_count; 265 /** gt_lock is also taken in irq contexts. */ 266 struct mtx gt_lock; 267 268 drm_i915_sarea_t *sarea_priv; 269 /* drm_i915_ring_buffer_t ring; */ 270 struct intel_ring_buffer rings[I915_NUM_RINGS]; 271 uint32_t next_seqno; 272 273 drm_dma_handle_t *status_page_dmah; 274 void *hw_status_page; 275 dma_addr_t dma_status_page; 276 uint32_t counter; 277 unsigned int status_gfx_addr; 278 drm_local_map_t hws_map; 279 struct drm_gem_object *hws_obj; 280 281 struct drm_i915_gem_object *pwrctx; 282 struct drm_i915_gem_object *renderctx; 283 284 unsigned int cpp; 285 int back_offset; 286 int front_offset; 287 int current_page; 288 int page_flipping; 289 290 atomic_t irq_received; 291 u32 trace_irq_seqno; 292 293 /** Cached value of IER to avoid reads in updating the bitfield */ 294 u32 pipestat[2]; 295 u32 irq_mask; 296 u32 gt_irq_mask; 297 u32 pch_irq_mask; 298 struct mtx irq_lock; 299 300 u32 hotplug_supported_mask; 301 302 int tex_lru_log_granularity; 303 int allow_batchbuffer; 304 unsigned int sr01, adpa, ppcr, dvob, dvoc, lvds; 305 int vblank_pipe; 306 int num_pipe; 307 308 /* For hangcheck timer */ 309#define DRM_I915_HANGCHECK_PERIOD ((1500 /* in ms */ * hz) / 1000) 310 int hangcheck_count; 311 uint32_t last_acthd; 312 uint32_t last_acthd_bsd; 313 uint32_t last_acthd_blt; 314 uint32_t last_instdone; 315 uint32_t last_instdone1; 316 317 struct intel_opregion opregion; 318 319 320 /* overlay */ 321 struct intel_overlay *overlay; 322 bool sprite_scaling_enabled; 323 324 /* LVDS info */ 325 int backlight_level; /* restore backlight to this value */ 326 bool backlight_enabled; 327 struct drm_display_mode *lfp_lvds_vbt_mode; /* if any */ 328 struct drm_display_mode *sdvo_lvds_vbt_mode; /* if any */ 329 330 /* Feature bits from the VBIOS */ 331 unsigned int int_tv_support:1; 332 unsigned int lvds_dither:1; 333 unsigned int lvds_vbt:1; 334 unsigned int int_crt_support:1; 335 unsigned int lvds_use_ssc:1; 336 unsigned int display_clock_mode:1; 337 int lvds_ssc_freq; 338 struct { 339 int rate; 340 int lanes; 341 int preemphasis; 342 int vswing; 343 344 bool initialized; 345 bool support; 346 int bpp; 347 struct edp_power_seq pps; 348 } edp; 349 bool no_aux_handshake; 350 351 int crt_ddc_pin; 352 struct drm_i915_fence_reg fence_regs[I915_MAX_NUM_FENCES]; /* assume 965 */ 353 int fence_reg_start; /* 4 if userland hasn't ioctl'd us yet */ 354 int num_fence_regs; /* 8 on pre-965, 16 otherwise */ 355 356 /* PCH chipset type */ 357 enum intel_pch pch_type; 358 359 /* Display functions */ 360 struct drm_i915_display_funcs display; 361 362 unsigned long quirks; 363 364 /* Register state */ 365 bool modeset_on_lid; 366 u8 saveLBB; 367 u32 saveDSPACNTR; 368 u32 saveDSPBCNTR; 369 u32 saveDSPARB; 370 u32 saveHWS; 371 u32 savePIPEACONF; 372 u32 savePIPEBCONF; 373 u32 savePIPEASRC; 374 u32 savePIPEBSRC; 375 u32 saveFPA0; 376 u32 saveFPA1; 377 u32 saveDPLL_A; 378 u32 saveDPLL_A_MD; 379 u32 saveHTOTAL_A; 380 u32 saveHBLANK_A; 381 u32 saveHSYNC_A; 382 u32 saveVTOTAL_A; 383 u32 saveVBLANK_A; 384 u32 saveVSYNC_A; 385 u32 saveBCLRPAT_A; 386 u32 saveTRANSACONF; 387 u32 saveTRANS_HTOTAL_A; 388 u32 saveTRANS_HBLANK_A; 389 u32 saveTRANS_HSYNC_A; 390 u32 saveTRANS_VTOTAL_A; 391 u32 saveTRANS_VBLANK_A; 392 u32 saveTRANS_VSYNC_A; 393 u32 savePIPEASTAT; 394 u32 saveDSPASTRIDE; 395 u32 saveDSPASIZE; 396 u32 saveDSPAPOS; 397 u32 saveDSPAADDR; 398 u32 saveDSPASURF; 399 u32 saveDSPATILEOFF; 400 u32 savePFIT_PGM_RATIOS; 401 u32 saveBLC_HIST_CTL; 402 u32 saveBLC_PWM_CTL; 403 u32 saveBLC_PWM_CTL2; 404 u32 saveBLC_CPU_PWM_CTL; 405 u32 saveBLC_CPU_PWM_CTL2; 406 u32 saveFPB0; 407 u32 saveFPB1; 408 u32 saveDPLL_B; 409 u32 saveDPLL_B_MD; 410 u32 saveHTOTAL_B; 411 u32 saveHBLANK_B; 412 u32 saveHSYNC_B; 413 u32 saveVTOTAL_B; 414 u32 saveVBLANK_B; 415 u32 saveVSYNC_B; 416 u32 saveBCLRPAT_B; 417 u32 saveTRANSBCONF; 418 u32 saveTRANS_HTOTAL_B; 419 u32 saveTRANS_HBLANK_B; 420 u32 saveTRANS_HSYNC_B; 421 u32 saveTRANS_VTOTAL_B; 422 u32 saveTRANS_VBLANK_B; 423 u32 saveTRANS_VSYNC_B; 424 u32 savePIPEBSTAT; 425 u32 saveDSPBSTRIDE; 426 u32 saveDSPBSIZE; 427 u32 saveDSPBPOS; 428 u32 saveDSPBADDR; 429 u32 saveDSPBSURF; 430 u32 saveDSPBTILEOFF; 431 u32 saveVGA0; 432 u32 saveVGA1; 433 u32 saveVGA_PD; 434 u32 saveVGACNTRL; 435 u32 saveADPA; 436 u32 saveLVDS; 437 u32 savePP_ON_DELAYS; 438 u32 savePP_OFF_DELAYS; 439 u32 saveDVOA; 440 u32 saveDVOB; 441 u32 saveDVOC; 442 u32 savePP_ON; 443 u32 savePP_OFF; 444 u32 savePP_CONTROL; 445 u32 savePP_DIVISOR; 446 u32 savePFIT_CONTROL; 447 u32 save_palette_a[256]; 448 u32 save_palette_b[256]; 449 u32 saveDPFC_CB_BASE; 450 u32 saveFBC_CFB_BASE; 451 u32 saveFBC_LL_BASE; 452 u32 saveFBC_CONTROL; 453 u32 saveFBC_CONTROL2; 454 u32 saveIER; 455 u32 saveIIR; 456 u32 saveIMR; 457 u32 saveDEIER; 458 u32 saveDEIMR; 459 u32 saveGTIER; 460 u32 saveGTIMR; 461 u32 saveFDI_RXA_IMR; 462 u32 saveFDI_RXB_IMR; 463 u32 saveCACHE_MODE_0; 464 u32 saveMI_ARB_STATE; 465 u32 saveSWF0[16]; 466 u32 saveSWF1[16]; 467 u32 saveSWF2[3]; 468 u8 saveMSR; 469 u8 saveSR[8]; 470 u8 saveGR[25]; 471 u8 saveAR_INDEX; 472 u8 saveAR[21]; 473 u8 saveDACMASK; 474 u8 saveCR[37]; 475 uint64_t saveFENCE[I915_MAX_NUM_FENCES]; 476 u32 saveCURACNTR; 477 u32 saveCURAPOS; 478 u32 saveCURABASE; 479 u32 saveCURBCNTR; 480 u32 saveCURBPOS; 481 u32 saveCURBBASE; 482 u32 saveCURSIZE; 483 u32 saveDP_B; 484 u32 saveDP_C; 485 u32 saveDP_D; 486 u32 savePIPEA_GMCH_DATA_M; 487 u32 savePIPEB_GMCH_DATA_M; 488 u32 savePIPEA_GMCH_DATA_N; 489 u32 savePIPEB_GMCH_DATA_N; 490 u32 savePIPEA_DP_LINK_M; 491 u32 savePIPEB_DP_LINK_M; 492 u32 savePIPEA_DP_LINK_N; 493 u32 savePIPEB_DP_LINK_N; 494 u32 saveFDI_RXA_CTL; 495 u32 saveFDI_TXA_CTL; 496 u32 saveFDI_RXB_CTL; 497 u32 saveFDI_TXB_CTL; 498 u32 savePFA_CTL_1; 499 u32 savePFB_CTL_1; 500 u32 savePFA_WIN_SZ; 501 u32 savePFB_WIN_SZ; 502 u32 savePFA_WIN_POS; 503 u32 savePFB_WIN_POS; 504 u32 savePCH_DREF_CONTROL; 505 u32 saveDISP_ARB_CTL; 506 u32 savePIPEA_DATA_M1; 507 u32 savePIPEA_DATA_N1; 508 u32 savePIPEA_LINK_M1; 509 u32 savePIPEA_LINK_N1; 510 u32 savePIPEB_DATA_M1; 511 u32 savePIPEB_DATA_N1; 512 u32 savePIPEB_LINK_M1; 513 u32 savePIPEB_LINK_N1; 514 u32 saveMCHBAR_RENDER_STANDBY; 515 u32 savePCH_PORT_HOTPLUG; 516 517 struct { 518 /** Memory allocator for GTT stolen memory */ 519 struct drm_mm stolen; 520 /** Memory allocator for GTT */ 521 struct drm_mm gtt_space; 522 /** List of all objects in gtt_space. Used to restore gtt 523 * mappings on resume */ 524 struct list_head gtt_list; 525 526 /** Usable portion of the GTT for GEM */ 527 unsigned long gtt_start; 528 unsigned long gtt_mappable_end; 529 unsigned long gtt_end; 530 531 /** PPGTT used for aliasing the PPGTT with the GTT */ 532 struct i915_hw_ppgtt *aliasing_ppgtt; 533 534 /** 535 * List of objects currently involved in rendering from the 536 * ringbuffer. 537 * 538 * Includes buffers having the contents of their GPU caches 539 * flushed, not necessarily primitives. last_rendering_seqno 540 * represents when the rendering involved will be completed. 541 * 542 * A reference is held on the buffer while on this list. 543 */ 544 struct list_head active_list; 545 546 /** 547 * List of objects which are not in the ringbuffer but which 548 * still have a write_domain which needs to be flushed before 549 * unbinding. 550 * 551 * A reference is held on the buffer while on this list. 552 */ 553 struct list_head flushing_list; 554 555 /** 556 * LRU list of objects which are not in the ringbuffer and 557 * are ready to unbind, but are still in the GTT. 558 * 559 * last_rendering_seqno is 0 while an object is in this list. 560 * 561 * A reference is not held on the buffer while on this list, 562 * as merely being GTT-bound shouldn't prevent its being 563 * freed, and we'll pull it off the list in the free path. 564 */ 565 struct list_head inactive_list; 566 567 /** 568 * LRU list of objects which are not in the ringbuffer but 569 * are still pinned in the GTT. 570 */ 571 struct list_head pinned_list; 572 573 /** LRU list of objects with fence regs on them. */ 574 struct list_head fence_list; 575 576 /** 577 * List of objects currently pending being freed. 578 * 579 * These objects are no longer in use, but due to a signal 580 * we were prevented from freeing them at the appointed time. 581 */ 582 struct list_head deferred_free_list; 583 584 /** 585 * We leave the user IRQ off as much as possible, 586 * but this means that requests will finish and never 587 * be retired once the system goes idle. Set a timer to 588 * fire periodically while the ring is running. When it 589 * fires, go retire requests. 590 */ 591 struct timeout_task retire_task; 592 593 /** 594 * Are we in a non-interruptible section of code like 595 * modesetting? 596 */ 597 bool interruptible; 598 599 uint32_t next_gem_seqno; 600 601 /** 602 * Waiting sequence number, if any 603 */ 604 uint32_t waiting_gem_seqno; 605 606 /** 607 * Last seq seen at irq time 608 */ 609 uint32_t irq_gem_seqno; 610 611 /** 612 * Flag if the X Server, and thus DRM, is not currently in 613 * control of the device. 614 * 615 * This is set between LeaveVT and EnterVT. It needs to be 616 * replaced with a semaphore. It also needs to be 617 * transitioned away from for kernel modesetting. 618 */ 619 int suspended; 620 621 /** 622 * Flag if the hardware appears to be wedged. 623 * 624 * This is set when attempts to idle the device timeout. 625 * It prevents command submission from occuring and makes 626 * every pending request fail 627 */ 628 int wedged; 629 630 /** Bit 6 swizzling required for X tiling */ 631 uint32_t bit_6_swizzle_x; 632 /** Bit 6 swizzling required for Y tiling */ 633 uint32_t bit_6_swizzle_y; 634 635 /* storage for physical objects */ 636 struct drm_i915_gem_phys_object *phys_objs[I915_MAX_PHYS_OBJECT]; 637 638 /* accounting, useful for userland debugging */ 639 size_t gtt_total; 640 size_t mappable_gtt_total; 641 size_t object_memory; 642 u32 object_count; 643 644 struct intel_gtt gtt; 645 eventhandler_tag i915_lowmem; 646 } mm; 647 648 const struct intel_device_info *info; 649 650 struct sdvo_device_mapping sdvo_mappings[2]; 651 /* indicate whether the LVDS_BORDER should be enabled or not */ 652 unsigned int lvds_border_bits; 653 /* Panel fitter placement and size for Ironlake+ */ 654 u32 pch_pf_pos, pch_pf_size; 655 656 struct drm_crtc *plane_to_crtc_mapping[3]; 657 struct drm_crtc *pipe_to_crtc_mapping[3]; 658 /* wait_queue_head_t pending_flip_queue; XXXKIB */ 659 bool flip_pending_is_done; 660 661 /* Reclocking support */ 662 bool render_reclock_avail; 663 bool lvds_downclock_avail; 664 /* indicates the reduced downclock for LVDS*/ 665 int lvds_downclock; 666 struct task idle_task; 667 struct callout idle_callout; 668 bool busy; 669 u16 orig_clock; 670 int child_dev_num; 671 struct child_device_config *child_dev; 672 struct drm_connector *int_lvds_connector; 673 struct drm_connector *int_edp_connector; 674 675 device_t bridge_dev; 676 bool mchbar_need_disable; 677 int mch_res_rid; 678 struct resource *mch_res; 679 680 struct mtx rps_lock; 681 u32 pm_iir; 682 struct task rps_task; 683 684 u8 cur_delay; 685 u8 min_delay; 686 u8 max_delay; 687 u8 fmax; 688 u8 fstart; 689 690 u64 last_count1; 691 unsigned long last_time1; 692 unsigned long chipset_power; 693 u64 last_count2; 694 struct timespec last_time2; 695 unsigned long gfx_power; 696 int c_m; 697 int r_t; 698 u8 corr; 699 struct mtx *mchdev_lock; 700 701 enum no_fbc_reason no_fbc_reason; 702 703 unsigned long cfb_size; 704 unsigned int cfb_fb; 705 int cfb_plane; 706 int cfb_y; 707 struct intel_fbc_work *fbc_work; 708 709 unsigned int fsb_freq, mem_freq, is_ddr3; 710 711 struct taskqueue *tq; 712 struct task error_task; 713 struct task hotplug_task; 714 int error_completion; 715 struct mtx error_completion_lock; 716 struct drm_i915_error_state *first_error; 717 struct mtx error_lock; 718 struct callout hangcheck_timer; 719 720 unsigned long last_gpu_reset; 721 722 struct intel_fbdev *fbdev; 723 724 struct drm_property *broadcast_rgb_property; 725 struct drm_property *force_audio_property; 726} drm_i915_private_t; 727 728enum hdmi_force_audio { 729 HDMI_AUDIO_OFF_DVI = -2, /* no aux data for HDMI-DVI converter */ 730 HDMI_AUDIO_OFF, /* force turn off HDMI audio */ 731 HDMI_AUDIO_AUTO, /* trust EDID */ 732 HDMI_AUDIO_ON, /* force turn on HDMI audio */ 733}; 734 735enum i915_cache_level { 736 I915_CACHE_NONE, 737 I915_CACHE_LLC, 738 I915_CACHE_LLC_MLC, /* gen6+ */ 739}; 740 741enum intel_chip_family { 742 CHIP_I8XX = 0x01, 743 CHIP_I9XX = 0x02, 744 CHIP_I915 = 0x04, 745 CHIP_I965 = 0x08, 746}; 747 748/** driver private structure attached to each drm_gem_object */ 749struct drm_i915_gem_object { 750 struct drm_gem_object base; 751 752 /** Current space allocated to this object in the GTT, if any. */ 753 struct drm_mm_node *gtt_space; 754 struct list_head gtt_list; 755 /** This object's place on the active/flushing/inactive lists */ 756 struct list_head ring_list; 757 struct list_head mm_list; 758 /** This object's place on GPU write list */ 759 struct list_head gpu_write_list; 760 /** This object's place in the batchbuffer or on the eviction list */ 761 struct list_head exec_list; 762 763 /** 764 * This is set if the object is on the active or flushing lists 765 * (has pending rendering), and is not set if it's on inactive (ready 766 * to be unbound). 767 */ 768 unsigned int active:1; 769 770 /** 771 * This is set if the object has been written to since last bound 772 * to the GTT 773 */ 774 unsigned int dirty:1; 775 776 /** 777 * This is set if the object has been written to since the last 778 * GPU flush. 779 */ 780 unsigned int pending_gpu_write:1; 781 782 /** 783 * Fence register bits (if any) for this object. Will be set 784 * as needed when mapped into the GTT. 785 * Protected by dev->struct_mutex. 786 */ 787 signed int fence_reg:I915_MAX_NUM_FENCE_BITS; 788 789 /** 790 * Advice: are the backing pages purgeable? 791 */ 792 unsigned int madv:2; 793 794 /** 795 * Current tiling mode for the object. 796 */ 797 unsigned int tiling_mode:2; 798 unsigned int tiling_changed:1; 799 800 /** How many users have pinned this object in GTT space. The following 801 * users can each hold at most one reference: pwrite/pread, pin_ioctl 802 * (via user_pin_count), execbuffer (objects are not allowed multiple 803 * times for the same batchbuffer), and the framebuffer code. When 804 * switching/pageflipping, the framebuffer code has at most two buffers 805 * pinned per crtc. 806 * 807 * In the worst case this is 1 + 1 + 1 + 2*2 = 7. That would fit into 3 808 * bits with absolutely no headroom. So use 4 bits. */ 809 unsigned int pin_count:4; 810#define DRM_I915_GEM_OBJECT_MAX_PIN_COUNT 0xf 811 812 /** 813 * Is the object at the current location in the gtt mappable and 814 * fenceable? Used to avoid costly recalculations. 815 */ 816 unsigned int map_and_fenceable:1; 817 818 /** 819 * Whether the current gtt mapping needs to be mappable (and isn't just 820 * mappable by accident). Track pin and fault separate for a more 821 * accurate mappable working set. 822 */ 823 unsigned int fault_mappable:1; 824 unsigned int pin_mappable:1; 825 826 /* 827 * Is the GPU currently using a fence to access this buffer, 828 */ 829 unsigned int pending_fenced_gpu_access:1; 830 unsigned int fenced_gpu_access:1; 831 832 unsigned int cache_level:2; 833 834 unsigned int has_aliasing_ppgtt_mapping:1; 835 836 vm_page_t *pages; 837 838 /** 839 * DMAR support 840 */ 841 struct sglist *sg_list; 842 843 /** 844 * Used for performing relocations during execbuffer insertion. 845 */ 846 LIST_ENTRY(drm_i915_gem_object) exec_node; 847 unsigned long exec_handle; 848 struct drm_i915_gem_exec_object2 *exec_entry; 849 850 /** 851 * Current offset of the object in GTT space. 852 * 853 * This is the same as gtt_space->start 854 */ 855 uint32_t gtt_offset; 856 857 /** Breadcrumb of last rendering to the buffer. */ 858 uint32_t last_rendering_seqno; 859 struct intel_ring_buffer *ring; 860 861 /** Breadcrumb of last fenced GPU access to the buffer. */ 862 uint32_t last_fenced_seqno; 863 struct intel_ring_buffer *last_fenced_ring; 864 865 /** Current tiling stride for the object, if it's tiled. */ 866 uint32_t stride; 867 868 /** Record of address bit 17 of each page at last unbind. */ 869 unsigned long *bit_17; 870 871 /** 872 * If present, while GEM_DOMAIN_CPU is in the read domain this array 873 * flags which individual pages are valid. 874 */ 875 uint8_t *page_cpu_valid; 876 877 /** User space pin count and filp owning the pin */ 878 uint32_t user_pin_count; 879 struct drm_file *pin_filp; 880 881 /** for phy allocated objects */ 882 struct drm_i915_gem_phys_object *phys_obj; 883 884 /** 885 * Number of crtcs where this object is currently the fb, but 886 * will be page flipped away on the next vblank. When it 887 * reaches 0, dev_priv->pending_flip_queue will be woken up. 888 */ 889 int pending_flip; 890}; 891 892#define to_intel_bo(x) __containerof(x, struct drm_i915_gem_object, base) 893 894/** 895 * Request queue structure. 896 * 897 * The request queue allows us to note sequence numbers that have been emitted 898 * and may be associated with active buffers to be retired. 899 * 900 * By keeping this list, we can avoid having to do questionable 901 * sequence-number comparisons on buffer last_rendering_seqnos, and associate 902 * an emission time with seqnos for tracking how far ahead of the GPU we are. 903 */ 904struct drm_i915_gem_request { 905 /** On Which ring this request was generated */ 906 struct intel_ring_buffer *ring; 907 908 /** GEM sequence number associated with this request. */ 909 uint32_t seqno; 910 911 /** Postion in the ringbuffer of the end of the request */ 912 u32 tail; 913 914 /** Time at which this request was emitted, in jiffies. */ 915 unsigned long emitted_jiffies; 916 917 /** global list entry for this request */ 918 struct list_head list; 919 920 struct drm_i915_file_private *file_priv; 921 /** file_priv list entry for this request */ 922 struct list_head client_list; 923}; 924 925struct drm_i915_file_private { 926 struct { 927 struct list_head request_list; 928 struct mtx lck; 929 } mm; 930}; 931 932struct drm_i915_error_state { 933 u32 eir; 934 u32 pgtbl_er; 935 u32 pipestat[I915_MAX_PIPES]; 936 u32 tail[I915_NUM_RINGS]; 937 u32 head[I915_NUM_RINGS]; 938 u32 ipeir[I915_NUM_RINGS]; 939 u32 ipehr[I915_NUM_RINGS]; 940 u32 instdone[I915_NUM_RINGS]; 941 u32 acthd[I915_NUM_RINGS]; 942 u32 semaphore_mboxes[I915_NUM_RINGS][I915_NUM_RINGS - 1]; 943 /* our own tracking of ring head and tail */ 944 u32 cpu_ring_head[I915_NUM_RINGS]; 945 u32 cpu_ring_tail[I915_NUM_RINGS]; 946 u32 error; /* gen6+ */ 947 u32 instpm[I915_NUM_RINGS]; 948 u32 instps[I915_NUM_RINGS]; 949 u32 instdone1; 950 u32 seqno[I915_NUM_RINGS]; 951 u64 bbaddr; 952 u32 fault_reg[I915_NUM_RINGS]; 953 u32 done_reg; 954 u32 faddr[I915_NUM_RINGS]; 955 u64 fence[I915_MAX_NUM_FENCES]; 956 struct timeval time; 957 struct drm_i915_error_ring { 958 struct drm_i915_error_object { 959 int page_count; 960 u32 gtt_offset; 961 u32 *pages[0]; 962 } *ringbuffer, *batchbuffer; 963 struct drm_i915_error_request { 964 long jiffies; 965 u32 seqno; 966 u32 tail; 967 } *requests; 968 int num_requests; 969 } ring[I915_NUM_RINGS]; 970 struct drm_i915_error_buffer { 971 u32 size; 972 u32 name; 973 u32 seqno; 974 u32 gtt_offset; 975 u32 read_domains; 976 u32 write_domain; 977 s32 fence_reg:I915_MAX_NUM_FENCE_BITS; 978 s32 pinned:2; 979 u32 tiling:2; 980 u32 dirty:1; 981 u32 purgeable:1; 982 s32 ring:4; 983 u32 cache_level:2; 984 } *active_bo, *pinned_bo; 985 u32 active_bo_count, pinned_bo_count; 986 struct intel_overlay_error_state *overlay; 987 struct intel_display_error_state *display; 988}; 989 990/** 991 * RC6 is a special power stage which allows the GPU to enter an very 992 * low-voltage mode when idle, using down to 0V while at this stage. This 993 * stage is entered automatically when the GPU is idle when RC6 support is 994 * enabled, and as soon as new workload arises GPU wakes up automatically as well. 995 * 996 * There are different RC6 modes available in Intel GPU, which differentiate 997 * among each other with the latency required to enter and leave RC6 and 998 * voltage consumed by the GPU in different states. 999 * 1000 * The combination of the following flags define which states GPU is allowed 1001 * to enter, while RC6 is the normal RC6 state, RC6p is the deep RC6, and 1002 * RC6pp is deepest RC6. Their support by hardware varies according to the 1003 * GPU, BIOS, chipset and platform. RC6 is usually the safest one and the one 1004 * which brings the most power savings; deeper states save more power, but 1005 * require higher latency to switch to and wake up. 1006 */ 1007#define INTEL_RC6_ENABLE (1<<0) 1008#define INTEL_RC6p_ENABLE (1<<1) 1009#define INTEL_RC6pp_ENABLE (1<<2) 1010 1011extern int intel_iommu_enabled; 1012extern struct drm_ioctl_desc i915_ioctls[]; 1013extern struct drm_driver_info i915_driver_info; 1014extern struct cdev_pager_ops i915_gem_pager_ops; 1015extern unsigned int i915_fbpercrtc; 1016extern int i915_panel_ignore_lid; 1017extern unsigned int i915_powersave; 1018extern int i915_semaphores; 1019extern unsigned int i915_lvds_downclock; 1020extern int i915_panel_use_ssc; 1021extern int i915_vbt_sdvo_panel_type; 1022extern int i915_enable_rc6; 1023extern int i915_enable_fbc; 1024extern int i915_enable_ppgtt; 1025extern int i915_enable_hangcheck; 1026 1027const struct intel_device_info *i915_get_device_id(int device); 1028 1029int i915_reset(struct drm_device *dev, u8 flags); 1030 1031/* i915_debug.c */ 1032int i915_sysctl_init(struct drm_device *dev, struct sysctl_ctx_list *ctx, 1033 struct sysctl_oid *top); 1034void i915_sysctl_cleanup(struct drm_device *dev); 1035 1036 /* i915_dma.c */ 1037int i915_batchbuffer(struct drm_device *dev, void *data, 1038 struct drm_file *file_priv); 1039int i915_cmdbuffer(struct drm_device *dev, void *data, 1040 struct drm_file *file_priv); 1041int i915_getparam(struct drm_device *dev, void *data, 1042 struct drm_file *file_priv); 1043extern void i915_kernel_lost_context(struct drm_device * dev); 1044extern int i915_driver_load(struct drm_device *, unsigned long flags); 1045extern int i915_driver_unload(struct drm_device *); 1046extern int i915_driver_open(struct drm_device *dev, struct drm_file *file_priv); 1047extern void i915_driver_lastclose(struct drm_device * dev); 1048extern void i915_driver_preclose(struct drm_device *dev, 1049 struct drm_file *file_priv); 1050extern void i915_driver_postclose(struct drm_device *dev, 1051 struct drm_file *file_priv); 1052extern int i915_driver_device_is_agp(struct drm_device * dev); 1053extern long i915_compat_ioctl(struct file *filp, unsigned int cmd, 1054 unsigned long arg); 1055extern int i915_emit_box(struct drm_device *dev, 1056 struct drm_clip_rect __user *boxes, 1057 int i, int DR1, int DR4); 1058int i915_emit_box_p(struct drm_device *dev, struct drm_clip_rect *box, 1059 int DR1, int DR4); 1060 1061unsigned long i915_chipset_val(struct drm_i915_private *dev_priv); 1062unsigned long i915_mch_val(struct drm_i915_private *dev_priv); 1063void i915_update_gfx_val(struct drm_i915_private *dev_priv); 1064unsigned long i915_gfx_val(struct drm_i915_private *dev_priv); 1065unsigned long i915_read_mch_val(void); 1066bool i915_gpu_raise(void); 1067bool i915_gpu_lower(void); 1068bool i915_gpu_busy(void); 1069bool i915_gpu_turbo_disable(void); 1070 1071/* i915_irq.c */ 1072extern int i915_irq_emit(struct drm_device *dev, void *data, 1073 struct drm_file *file_priv); 1074extern int i915_irq_wait(struct drm_device *dev, void *data, 1075 struct drm_file *file_priv); 1076 1077extern void intel_irq_init(struct drm_device *dev); 1078 1079extern int i915_vblank_pipe_set(struct drm_device *dev, void *data, 1080 struct drm_file *file_priv); 1081extern int i915_vblank_pipe_get(struct drm_device *dev, void *data, 1082 struct drm_file *file_priv); 1083extern int i915_vblank_swap(struct drm_device *dev, void *data, 1084 struct drm_file *file_priv); 1085void intel_enable_asle(struct drm_device *dev); 1086void i915_hangcheck_elapsed(void *context); 1087void i915_handle_error(struct drm_device *dev, bool wedged); 1088 1089void i915_enable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask); 1090void i915_disable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask); 1091 1092void i915_destroy_error_state(struct drm_device *dev); 1093 1094/* i915_gem.c */ 1095int i915_gem_create(struct drm_file *file, struct drm_device *dev, uint64_t size, 1096 uint32_t *handle_p); 1097int i915_gem_init_ioctl(struct drm_device *dev, void *data, 1098 struct drm_file *file_priv); 1099int i915_gem_create_ioctl(struct drm_device *dev, void *data, 1100 struct drm_file *file_priv); 1101int i915_gem_pread_ioctl(struct drm_device *dev, void *data, 1102 struct drm_file *file_priv); 1103int i915_gem_pwrite_ioctl(struct drm_device *dev, void *data, 1104 struct drm_file *file_priv); 1105int i915_gem_mmap_ioctl(struct drm_device *dev, void *data, 1106 struct drm_file *file_priv); 1107int i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data, 1108 struct drm_file *file_priv); 1109int i915_gem_set_domain_ioctl(struct drm_device *dev, void *data, 1110 struct drm_file *file_priv); 1111int i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data, 1112 struct drm_file *file_priv); 1113int i915_gem_execbuffer(struct drm_device *dev, void *data, 1114 struct drm_file *file_priv); 1115int i915_gem_execbuffer2(struct drm_device *dev, void *data, 1116 struct drm_file *file_priv); 1117int i915_gem_pin_ioctl(struct drm_device *dev, void *data, 1118 struct drm_file *file_priv); 1119int i915_gem_unpin_ioctl(struct drm_device *dev, void *data, 1120 struct drm_file *file_priv); 1121int i915_gem_busy_ioctl(struct drm_device *dev, void *data, 1122 struct drm_file *file_priv); 1123int i915_gem_throttle_ioctl(struct drm_device *dev, void *data, 1124 struct drm_file *file_priv); 1125int i915_gem_madvise_ioctl(struct drm_device *dev, void *data, 1126 struct drm_file *file_priv); 1127int i915_gem_entervt_ioctl(struct drm_device *dev, void *data, 1128 struct drm_file *file_priv); 1129int i915_gem_leavevt_ioctl(struct drm_device *dev, void *data, 1130 struct drm_file *file_priv); 1131int i915_gem_set_tiling(struct drm_device *dev, void *data, 1132 struct drm_file *file_priv); 1133int i915_gem_get_tiling(struct drm_device *dev, void *data, 1134 struct drm_file *file_priv); 1135int i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data, 1136 struct drm_file *file_priv); 1137void i915_gem_load(struct drm_device *dev); 1138void i915_gem_unload(struct drm_device *dev); 1139int i915_gem_init_object(struct drm_gem_object *obj); 1140void i915_gem_free_object(struct drm_gem_object *obj); 1141int i915_gem_object_pin(struct drm_i915_gem_object *obj, uint32_t alignment, 1142 bool map_and_fenceable); 1143void i915_gem_object_unpin(struct drm_i915_gem_object *obj); 1144int i915_gem_object_unbind(struct drm_i915_gem_object *obj); 1145void i915_gem_lastclose(struct drm_device *dev); 1146uint32_t i915_get_gem_seqno(struct drm_device *dev); 1147 1148static inline void 1149i915_gem_object_pin_fence(struct drm_i915_gem_object *obj) 1150{ 1151 if (obj->fence_reg != I915_FENCE_REG_NONE) { 1152 struct drm_i915_private *dev_priv = obj->base.dev->dev_private; 1153 dev_priv->fence_regs[obj->fence_reg].pin_count++; 1154 } 1155} 1156 1157static inline void 1158i915_gem_object_unpin_fence(struct drm_i915_gem_object *obj) 1159{ 1160 if (obj->fence_reg != I915_FENCE_REG_NONE) { 1161 struct drm_i915_private *dev_priv = obj->base.dev->dev_private; 1162 dev_priv->fence_regs[obj->fence_reg].pin_count--; 1163 } 1164} 1165 1166void i915_gem_retire_requests(struct drm_device *dev); 1167void i915_gem_retire_requests_ring(struct intel_ring_buffer *ring); 1168void i915_gem_clflush_object(struct drm_i915_gem_object *obj); 1169struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev, 1170 size_t size); 1171int i915_gem_do_init(struct drm_device *dev, unsigned long start, 1172 unsigned long mappable_end, unsigned long end); 1173uint32_t i915_gem_get_unfenced_gtt_alignment(struct drm_device *dev, 1174 uint32_t size, int tiling_mode); 1175int i915_mutex_lock_interruptible(struct drm_device *dev); 1176int i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, 1177 bool write); 1178int i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj, 1179 u32 alignment, struct intel_ring_buffer *pipelined); 1180int i915_gem_object_finish_gpu(struct drm_i915_gem_object *obj); 1181int i915_gem_flush_ring(struct intel_ring_buffer *ring, 1182 uint32_t invalidate_domains, uint32_t flush_domains); 1183void i915_gem_release_mmap(struct drm_i915_gem_object *obj); 1184int i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj); 1185int i915_gem_object_put_fence(struct drm_i915_gem_object *obj); 1186int i915_gem_idle(struct drm_device *dev); 1187int i915_gem_init_hw(struct drm_device *dev); 1188void i915_gem_init_swizzling(struct drm_device *dev); 1189void i915_gem_init_ppgtt(struct drm_device *dev); 1190void i915_gem_cleanup_ringbuffer(struct drm_device *dev); 1191int i915_gpu_idle(struct drm_device *dev, bool do_retire); 1192void i915_gem_object_move_to_active(struct drm_i915_gem_object *obj, 1193 struct intel_ring_buffer *ring, uint32_t seqno); 1194int i915_add_request(struct intel_ring_buffer *ring, struct drm_file *file, 1195 struct drm_i915_gem_request *request); 1196int i915_gem_object_get_fence(struct drm_i915_gem_object *obj, 1197 struct intel_ring_buffer *pipelined); 1198void i915_gem_reset(struct drm_device *dev); 1199int i915_wait_request(struct intel_ring_buffer *ring, uint32_t seqno, 1200 bool do_retire); 1201int i915_gem_mmap(struct drm_device *dev, uint64_t offset, int prot); 1202int i915_gem_fault(struct drm_device *dev, uint64_t offset, int prot, 1203 uint64_t *phys); 1204void i915_gem_release(struct drm_device *dev, struct drm_file *file); 1205int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj, 1206 enum i915_cache_level cache_level); 1207 1208void i915_gem_free_all_phys_object(struct drm_device *dev); 1209void i915_gem_detach_phys_object(struct drm_device *dev, 1210 struct drm_i915_gem_object *obj); 1211int i915_gem_attach_phys_object(struct drm_device *dev, 1212 struct drm_i915_gem_object *obj, int id, int align); 1213 1214int i915_gem_dumb_create(struct drm_file *file_priv, struct drm_device *dev, 1215 struct drm_mode_create_dumb *args); 1216int i915_gem_mmap_gtt(struct drm_file *file_priv, struct drm_device *dev, 1217 uint32_t handle, uint64_t *offset); 1218int i915_gem_dumb_destroy(struct drm_file *file_priv, struct drm_device *dev, 1219 uint32_t handle); 1220 1221/* i915_gem_tiling.c */ 1222void i915_gem_detect_bit_6_swizzle(struct drm_device *dev); 1223void i915_gem_object_do_bit_17_swizzle(struct drm_i915_gem_object *obj); 1224void i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj); 1225 1226/* i915_gem_evict.c */ 1227int i915_gem_evict_something(struct drm_device *dev, int min_size, 1228 unsigned alignment, bool mappable); 1229int i915_gem_evict_everything(struct drm_device *dev, bool purgeable_only); 1230int i915_gem_evict_inactive(struct drm_device *dev, bool purgeable_only); 1231 1232/* i915_suspend.c */ 1233extern int i915_save_state(struct drm_device *dev); 1234extern int i915_restore_state(struct drm_device *dev); 1235 1236/* intel_iic.c */ 1237extern int intel_setup_gmbus(struct drm_device *dev); 1238extern void intel_teardown_gmbus(struct drm_device *dev); 1239extern void intel_gmbus_set_speed(device_t idev, int speed); 1240extern void intel_gmbus_force_bit(device_t idev, bool force_bit); 1241extern void intel_iic_reset(struct drm_device *dev); 1242 1243/* intel_opregion.c */ 1244int intel_opregion_setup(struct drm_device *dev); 1245extern int intel_opregion_init(struct drm_device *dev); 1246extern void intel_opregion_fini(struct drm_device *dev); 1247extern void opregion_asle_intr(struct drm_device *dev); 1248extern void opregion_enable_asle(struct drm_device *dev); 1249 1250/* i915_gem_gtt.c */ 1251int i915_gem_init_aliasing_ppgtt(struct drm_device *dev); 1252void i915_gem_cleanup_aliasing_ppgtt(struct drm_device *dev); 1253void i915_ppgtt_bind_object(struct i915_hw_ppgtt *ppgtt, 1254 struct drm_i915_gem_object *obj, enum i915_cache_level cache_level); 1255void i915_ppgtt_unbind_object(struct i915_hw_ppgtt *ppgtt, 1256 struct drm_i915_gem_object *obj); 1257 1258void i915_gem_restore_gtt_mappings(struct drm_device *dev); 1259int i915_gem_gtt_bind_object(struct drm_i915_gem_object *obj); 1260void i915_gem_gtt_unbind_object(struct drm_i915_gem_object *obj); 1261void i915_gem_gtt_rebind_object(struct drm_i915_gem_object *obj, 1262 enum i915_cache_level cache_level); 1263 1264/* modesetting */ 1265extern void intel_modeset_init(struct drm_device *dev); 1266extern void intel_modeset_gem_init(struct drm_device *dev); 1267extern void intel_modeset_cleanup(struct drm_device *dev); 1268extern int intel_modeset_vga_set_state(struct drm_device *dev, bool state); 1269extern bool intel_fbc_enabled(struct drm_device *dev); 1270extern void intel_disable_fbc(struct drm_device *dev); 1271extern bool ironlake_set_drps(struct drm_device *dev, u8 val); 1272extern void ironlake_init_pch_refclk(struct drm_device *dev); 1273extern void ironlake_enable_rc6(struct drm_device *dev); 1274extern void gen6_set_rps(struct drm_device *dev, u8 val); 1275extern void intel_detect_pch(struct drm_device *dev); 1276extern int intel_trans_dp_port_sel(struct drm_crtc *crtc); 1277 1278extern void __gen6_gt_force_wake_get(struct drm_i915_private *dev_priv); 1279extern void __gen6_gt_force_wake_mt_get(struct drm_i915_private *dev_priv); 1280extern void __gen6_gt_force_wake_put(struct drm_i915_private *dev_priv); 1281extern void __gen6_gt_force_wake_mt_put(struct drm_i915_private *dev_priv); 1282 1283extern struct intel_overlay_error_state *intel_overlay_capture_error_state( 1284 struct drm_device *dev); 1285extern void intel_overlay_print_error_state(struct sbuf *m, 1286 struct intel_overlay_error_state *error); 1287extern struct intel_display_error_state *intel_display_capture_error_state( 1288 struct drm_device *dev); 1289extern void intel_display_print_error_state(struct sbuf *m, 1290 struct drm_device *dev, struct intel_display_error_state *error); 1291 1292static inline void 1293trace_i915_reg_rw(boolean_t rw, int reg, uint64_t val, int sz) 1294{ 1295 1296 CTR4(KTR_DRM_REG, "[%x/%d] %c %x", reg, sz, rw ? "w" : "r", val); 1297} 1298 1299/* On SNB platform, before reading ring registers forcewake bit 1300 * must be set to prevent GT core from power down and stale values being 1301 * returned. 1302 */ 1303void gen6_gt_force_wake_get(struct drm_i915_private *dev_priv); 1304void gen6_gt_force_wake_put(struct drm_i915_private *dev_priv); 1305int __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv); 1306 1307/* We give fast paths for the really cool registers */ 1308#define NEEDS_FORCE_WAKE(dev_priv, reg) \ 1309 (((dev_priv)->info->gen >= 6) && \ 1310 ((reg) < 0x40000) && \ 1311 ((reg) != FORCEWAKE)) 1312 1313#define __i915_read(x, y) \ 1314 u##x i915_read##x(struct drm_i915_private *dev_priv, u32 reg); 1315 1316__i915_read(8, 8) 1317__i915_read(16, 16) 1318__i915_read(32, 32) 1319__i915_read(64, 64) 1320#undef __i915_read 1321 1322#define __i915_write(x, y) \ 1323 void i915_write##x(struct drm_i915_private *dev_priv, u32 reg, u##x val); 1324 1325__i915_write(8, 8) 1326__i915_write(16, 16) 1327__i915_write(32, 32) 1328__i915_write(64, 64) 1329#undef __i915_write 1330 1331#define I915_READ8(reg) i915_read8(dev_priv, (reg)) 1332#define I915_WRITE8(reg, val) i915_write8(dev_priv, (reg), (val)) 1333 1334#define I915_READ16(reg) i915_read16(dev_priv, (reg)) 1335#define I915_WRITE16(reg, val) i915_write16(dev_priv, (reg), (val)) 1336#define I915_READ16_NOTRACE(reg) DRM_READ16(dev_priv->mmio_map, (reg)) 1337#define I915_WRITE16_NOTRACE(reg, val) DRM_WRITE16(dev_priv->mmio_map, (reg), (val)) 1338 1339#define I915_READ(reg) i915_read32(dev_priv, (reg)) 1340#define I915_WRITE(reg, val) i915_write32(dev_priv, (reg), (val)) 1341#define I915_READ_NOTRACE(reg) DRM_READ32(dev_priv->mmio_map, (reg)) 1342#define I915_WRITE_NOTRACE(reg, val) DRM_WRITE32(dev_priv->mmio_map, (reg), (val)) 1343 1344#define I915_WRITE64(reg, val) i915_write64(dev_priv, (reg), (val)) 1345#define I915_READ64(reg) i915_read64(dev_priv, (reg)) 1346 1347#define POSTING_READ(reg) (void)I915_READ_NOTRACE(reg) 1348#define POSTING_READ16(reg) (void)I915_READ16_NOTRACE(reg) 1349 1350#define I915_VERBOSE 0 1351 1352#define LP_RING(d) (&((struct drm_i915_private *)(d))->rings[RCS]) 1353 1354#define BEGIN_LP_RING(n) \ 1355 intel_ring_begin(LP_RING(dev_priv), (n)) 1356 1357#define OUT_RING(x) \ 1358 intel_ring_emit(LP_RING(dev_priv), x) 1359 1360#define ADVANCE_LP_RING() \ 1361 intel_ring_advance(LP_RING(dev_priv)) 1362 1363#define RING_LOCK_TEST_WITH_RETURN(dev, file) do { \ 1364 if (LP_RING(dev->dev_private)->obj == NULL) \ 1365 LOCK_TEST_WITH_RETURN(dev, file); \ 1366} while (0) 1367 1368/** 1369 * Reads a dword out of the status page, which is written to from the command 1370 * queue by automatic updates, MI_REPORT_HEAD, MI_STORE_DATA_INDEX, or 1371 * MI_STORE_DATA_IMM. 1372 * 1373 * The following dwords have a reserved meaning: 1374 * 0x00: ISR copy, updated when an ISR bit not set in the HWSTAM changes. 1375 * 0x04: ring 0 head pointer 1376 * 0x05: ring 1 head pointer (915-class) 1377 * 0x06: ring 2 head pointer (915-class) 1378 * 0x10-0x1b: Context status DWords (GM45) 1379 * 0x1f: Last written status offset. (GM45) 1380 * 1381 * The area from dword 0x20 to 0x3ff is available for driver usage. 1382 */ 1383#define READ_HWSP(dev_priv, reg) (((volatile u32*)(dev_priv->hw_status_page))[reg]) 1384#define READ_BREADCRUMB(dev_priv) READ_HWSP(dev_priv, I915_BREADCRUMB_INDEX) 1385#define I915_GEM_HWS_INDEX 0x20 1386#define I915_BREADCRUMB_INDEX 0x21 1387 1388#define INTEL_INFO(dev) (((struct drm_i915_private *) (dev)->dev_private)->info) 1389 1390#define IS_I830(dev) ((dev)->pci_device == 0x3577) 1391#define IS_845G(dev) ((dev)->pci_device == 0x2562) 1392#define IS_I85X(dev) (INTEL_INFO(dev)->is_i85x) 1393#define IS_I865G(dev) ((dev)->pci_device == 0x2572) 1394#define IS_I915G(dev) (INTEL_INFO(dev)->is_i915g) 1395#define IS_I915GM(dev) ((dev)->pci_device == 0x2592) 1396#define IS_I945G(dev) ((dev)->pci_device == 0x2772) 1397#define IS_I945GM(dev) (INTEL_INFO(dev)->is_i945gm) 1398#define IS_BROADWATER(dev) (INTEL_INFO(dev)->is_broadwater) 1399#define IS_CRESTLINE(dev) (INTEL_INFO(dev)->is_crestline) 1400#define IS_GM45(dev) ((dev)->pci_device == 0x2A42) 1401#define IS_G4X(dev) (INTEL_INFO(dev)->is_g4x) 1402#define IS_PINEVIEW_G(dev) ((dev)->pci_device == 0xa001) 1403#define IS_PINEVIEW_M(dev) ((dev)->pci_device == 0xa011) 1404#define IS_PINEVIEW(dev) (INTEL_INFO(dev)->is_pineview) 1405#define IS_G33(dev) (INTEL_INFO(dev)->is_g33) 1406#define IS_IRONLAKE_D(dev) ((dev)->pci_device == 0x0042) 1407#define IS_IRONLAKE_M(dev) ((dev)->pci_device == 0x0046) 1408#define IS_IVYBRIDGE(dev) (INTEL_INFO(dev)->is_ivybridge) 1409#define IS_MOBILE(dev) (INTEL_INFO(dev)->is_mobile) 1410 1411/* XXXKIB LEGACY */ 1412#define IS_I965G(dev) ((dev)->pci_device == 0x2972 || \ 1413 (dev)->pci_device == 0x2982 || \ 1414 (dev)->pci_device == 0x2992 || \ 1415 (dev)->pci_device == 0x29A2 || \ 1416 (dev)->pci_device == 0x2A02 || \ 1417 (dev)->pci_device == 0x2A12 || \ 1418 (dev)->pci_device == 0x2A42 || \ 1419 (dev)->pci_device == 0x2E02 || \ 1420 (dev)->pci_device == 0x2E12 || \ 1421 (dev)->pci_device == 0x2E22 || \ 1422 (dev)->pci_device == 0x2E32) 1423 1424#define IS_I965GM(dev) ((dev)->pci_device == 0x2A02) 1425 1426#define IS_IGDG(dev) ((dev)->pci_device == 0xa001) 1427#define IS_IGDGM(dev) ((dev)->pci_device == 0xa011) 1428#define IS_IGD(dev) (IS_IGDG(dev) || IS_IGDGM(dev)) 1429 1430#define IS_I9XX(dev) (IS_I915G(dev) || IS_I915GM(dev) || IS_I945G(dev) || \ 1431 IS_I945GM(dev) || IS_I965G(dev) || IS_G33(dev)) 1432/* XXXKIB LEGACY END */ 1433 1434#define IS_GEN2(dev) (INTEL_INFO(dev)->gen == 2) 1435#define IS_GEN3(dev) (INTEL_INFO(dev)->gen == 3) 1436#define IS_GEN4(dev) (INTEL_INFO(dev)->gen == 4) 1437#define IS_GEN5(dev) (INTEL_INFO(dev)->gen == 5) 1438#define IS_GEN6(dev) (INTEL_INFO(dev)->gen == 6) 1439#define IS_GEN7(dev) (INTEL_INFO(dev)->gen == 7) 1440 1441#define HAS_BSD(dev) (INTEL_INFO(dev)->has_bsd_ring) 1442#define HAS_BLT(dev) (INTEL_INFO(dev)->has_blt_ring) 1443#define HAS_LLC(dev) (INTEL_INFO(dev)->has_llc) 1444#define I915_NEED_GFX_HWS(dev) (INTEL_INFO(dev)->need_gfx_hws) 1445 1446#define HAS_ALIASING_PPGTT(dev) (INTEL_INFO(dev)->gen >=6) 1447 1448#define HAS_OVERLAY(dev) (INTEL_INFO(dev)->has_overlay) 1449#define OVERLAY_NEEDS_PHYSICAL(dev) (INTEL_INFO(dev)->overlay_needs_physical) 1450 1451/* With the 945 and later, Y tiling got adjusted so that it was 32 128-byte 1452 * rows, which changed the alignment requirements and fence programming. 1453 */ 1454#define HAS_128_BYTE_Y_TILING(dev) (!IS_GEN2(dev) && !(IS_I915G(dev) || \ 1455 IS_I915GM(dev))) 1456#define SUPPORTS_DIGITAL_OUTPUTS(dev) (!IS_GEN2(dev) && !IS_PINEVIEW(dev)) 1457#define SUPPORTS_INTEGRATED_HDMI(dev) (IS_G4X(dev) || IS_GEN5(dev)) 1458#define SUPPORTS_INTEGRATED_DP(dev) (IS_G4X(dev) || IS_GEN5(dev)) 1459#define SUPPORTS_EDP(dev) (IS_IRONLAKE_M(dev)) 1460#define SUPPORTS_TV(dev) (INTEL_INFO(dev)->supports_tv) 1461#define I915_HAS_HOTPLUG(dev) (INTEL_INFO(dev)->has_hotplug) 1462/* dsparb controlled by hw only */ 1463#define DSPARB_HWCONTROL(dev) (IS_G4X(dev) || IS_IRONLAKE(dev)) 1464 1465#define HAS_FW_BLC(dev) (INTEL_INFO(dev)->gen > 2) 1466#define HAS_PIPE_CXSR(dev) (INTEL_INFO(dev)->has_pipe_cxsr) 1467#define I915_HAS_FBC(dev) (INTEL_INFO(dev)->has_fbc) 1468 1469#define HAS_PCH_SPLIT(dev) (IS_GEN5(dev) || IS_GEN6(dev) || IS_IVYBRIDGE(dev)) 1470#define HAS_PIPE_CONTROL(dev) (INTEL_INFO(dev)->gen >= 5) 1471 1472#define INTEL_PCH_TYPE(dev) (((struct drm_i915_private *)(dev)->dev_private)->pch_type) 1473#define HAS_PCH_CPT(dev) (INTEL_PCH_TYPE(dev) == PCH_CPT) 1474#define HAS_PCH_IBX(dev) (INTEL_PCH_TYPE(dev) == PCH_IBX) 1475 1476#define PRIMARY_RINGBUFFER_SIZE (128*1024) 1477 1478static inline bool 1479i915_seqno_passed(uint32_t seq1, uint32_t seq2) 1480{ 1481 1482 return ((int32_t)(seq1 - seq2) >= 0); 1483} 1484 1485u32 i915_gem_next_request_seqno(struct intel_ring_buffer *ring); 1486 1487#endif 1488