i915_debug.c revision 291428
1/* 2 * Copyright �� 2008 Intel Corporation 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice (including the next 12 * paragraph) shall be included in all copies or substantial portions of the 13 * Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 21 * IN THE SOFTWARE. 22 * 23 * Authors: 24 * Eric Anholt <eric@anholt.net> 25 * Keith Packard <keithp@keithp.com> 26 * 27 */ 28 29#include <sys/cdefs.h> 30__FBSDID("$FreeBSD: head/sys/dev/drm2/i915/i915_debug.c 291428 2015-11-28 15:22:46Z dumbbell $"); 31 32#include <dev/drm2/drmP.h> 33#include <dev/drm2/drm.h> 34#include <dev/drm2/i915/i915_drm.h> 35#include <dev/drm2/i915/i915_drv.h> 36#include <dev/drm2/i915/intel_drv.h> 37#include <dev/drm2/i915/intel_ringbuffer.h> 38 39#include <sys/sysctl.h> 40 41enum { 42 ACTIVE_LIST, 43 FLUSHING_LIST, 44 INACTIVE_LIST, 45 PINNED_LIST, 46}; 47 48static const char *yesno(int v) 49{ 50 return v ? "yes" : "no"; 51} 52 53static int i915_capabilities(struct drm_device *dev, struct sbuf *m, void *data) 54{ 55 const struct intel_device_info *info = INTEL_INFO(dev); 56 57 sbuf_printf(m, "gen: %d\n", info->gen); 58 if (HAS_PCH_SPLIT(dev)) 59 sbuf_printf(m, "pch: %d\n", INTEL_PCH_TYPE(dev)); 60#define B(x) sbuf_printf(m, #x ": %s\n", yesno(info->x)) 61 B(is_mobile); 62 B(is_i85x); 63 B(is_i915g); 64 B(is_i945gm); 65 B(is_g33); 66 B(need_gfx_hws); 67 B(is_g4x); 68 B(is_pineview); 69 B(has_fbc); 70 B(has_pipe_cxsr); 71 B(has_hotplug); 72 B(cursor_needs_physical); 73 B(has_overlay); 74 B(overlay_needs_physical); 75 B(supports_tv); 76 B(has_bsd_ring); 77 B(has_blt_ring); 78 B(has_llc); 79#undef B 80 81 return 0; 82} 83 84static const char *get_pin_flag(struct drm_i915_gem_object *obj) 85{ 86 if (obj->user_pin_count > 0) 87 return "P"; 88 else if (obj->pin_count > 0) 89 return "p"; 90 else 91 return " "; 92} 93 94static const char *get_tiling_flag(struct drm_i915_gem_object *obj) 95{ 96 switch (obj->tiling_mode) { 97 default: 98 case I915_TILING_NONE: return " "; 99 case I915_TILING_X: return "X"; 100 case I915_TILING_Y: return "Y"; 101 } 102} 103 104static const char *cache_level_str(int type) 105{ 106 switch (type) { 107 case I915_CACHE_NONE: return " uncached"; 108 case I915_CACHE_LLC: return " snooped (LLC)"; 109 case I915_CACHE_LLC_MLC: return " snooped (LLC+MLC)"; 110 default: return ""; 111 } 112} 113 114static void 115describe_obj(struct sbuf *m, struct drm_i915_gem_object *obj) 116{ 117 118 sbuf_printf(m, "%p: %s%s %8zdKiB %04x %04x %d %d%s%s%s", 119 &obj->base, 120 get_pin_flag(obj), 121 get_tiling_flag(obj), 122 obj->base.size / 1024, 123 obj->base.read_domains, 124 obj->base.write_domain, 125 obj->last_rendering_seqno, 126 obj->last_fenced_seqno, 127 cache_level_str(obj->cache_level), 128 obj->dirty ? " dirty" : "", 129 obj->madv == I915_MADV_DONTNEED ? " purgeable" : ""); 130 if (obj->base.name) 131 sbuf_printf(m, " (name: %d)", obj->base.name); 132 if (obj->pin_display) 133 sbuf_printf(m, " (display)"); 134 if (obj->fence_reg != I915_FENCE_REG_NONE) 135 sbuf_printf(m, " (fence: %d)", obj->fence_reg); 136 if (obj->gtt_space != NULL) 137 sbuf_printf(m, " (gtt offset: %08x, size: %08x)", 138 obj->gtt_offset, (unsigned int)obj->gtt_space->size); 139 if (obj->pin_mappable || obj->fault_mappable) { 140 char s[3], *t = s; 141 if (obj->pin_mappable) 142 *t++ = 'p'; 143 if (obj->fault_mappable) 144 *t++ = 'f'; 145 *t = '\0'; 146 sbuf_printf(m, " (%s mappable)", s); 147 } 148 if (obj->ring != NULL) 149 sbuf_printf(m, " (%s)", obj->ring->name); 150} 151 152static int i915_gem_object_list_info(struct drm_device *dev, struct sbuf *m, void *data) 153{ 154 uintptr_t list = (uintptr_t)data; 155 struct list_head *head; 156 drm_i915_private_t *dev_priv = dev->dev_private; 157 struct drm_i915_gem_object *obj; 158 size_t total_obj_size, total_gtt_size; 159 int count; 160 161 if (sx_xlock_sig(&dev->dev_struct_lock)) 162 return -EINTR; 163 164 switch (list) { 165 case ACTIVE_LIST: 166 sbuf_printf(m, "Active:\n"); 167 head = &dev_priv->mm.active_list; 168 break; 169 case INACTIVE_LIST: 170 sbuf_printf(m, "Inactive:\n"); 171 head = &dev_priv->mm.inactive_list; 172 break; 173 case FLUSHING_LIST: 174 sbuf_printf(m, "Flushing:\n"); 175 head = &dev_priv->mm.flushing_list; 176 break; 177 default: 178 DRM_UNLOCK(dev); 179 return -EINVAL; 180 } 181 182 total_obj_size = total_gtt_size = count = 0; 183 list_for_each_entry(obj, head, mm_list) { 184 sbuf_printf(m, " "); 185 describe_obj(m, obj); 186 sbuf_printf(m, "\n"); 187 total_obj_size += obj->base.size; 188 total_gtt_size += obj->gtt_space->size; 189 count++; 190 } 191 DRM_UNLOCK(dev); 192 193 sbuf_printf(m, "Total %d objects, %zu bytes, %zu GTT size\n", 194 count, total_obj_size, total_gtt_size); 195 return 0; 196} 197 198#define count_objects(list, member) do { \ 199 list_for_each_entry(obj, list, member) { \ 200 size += obj->gtt_space->size; \ 201 ++count; \ 202 if (obj->map_and_fenceable) { \ 203 mappable_size += obj->gtt_space->size; \ 204 ++mappable_count; \ 205 } \ 206 } \ 207} while (0) 208 209static int i915_gem_object_info(struct drm_device *dev, struct sbuf *m, void *data) 210{ 211 struct drm_i915_private *dev_priv = dev->dev_private; 212 u32 count, mappable_count; 213 size_t size, mappable_size; 214 struct drm_i915_gem_object *obj; 215 216 if (sx_xlock_sig(&dev->dev_struct_lock)) 217 return -EINTR; 218 sbuf_printf(m, "%u objects, %zu bytes\n", 219 dev_priv->mm.object_count, 220 dev_priv->mm.object_memory); 221 222 size = count = mappable_size = mappable_count = 0; 223 count_objects(&dev_priv->mm.gtt_list, gtt_list); 224 sbuf_printf(m, "%u [%u] objects, %zu [%zu] bytes in gtt\n", 225 count, mappable_count, size, mappable_size); 226 227 size = count = mappable_size = mappable_count = 0; 228 count_objects(&dev_priv->mm.active_list, mm_list); 229 count_objects(&dev_priv->mm.flushing_list, mm_list); 230 sbuf_printf(m, " %u [%u] active objects, %zu [%zu] bytes\n", 231 count, mappable_count, size, mappable_size); 232 233 size = count = mappable_size = mappable_count = 0; 234 count_objects(&dev_priv->mm.inactive_list, mm_list); 235 sbuf_printf(m, " %u [%u] inactive objects, %zu [%zu] bytes\n", 236 count, mappable_count, size, mappable_size); 237 238 size = count = mappable_size = mappable_count = 0; 239 list_for_each_entry(obj, &dev_priv->mm.gtt_list, gtt_list) { 240 if (obj->fault_mappable) { 241 size += obj->gtt_space->size; 242 ++count; 243 } 244 if (obj->pin_mappable) { 245 mappable_size += obj->gtt_space->size; 246 ++mappable_count; 247 } 248 } 249 sbuf_printf(m, "%u pinned mappable objects, %zu bytes\n", 250 mappable_count, mappable_size); 251 sbuf_printf(m, "%u fault mappable objects, %zu bytes\n", 252 count, size); 253 254 sbuf_printf(m, "%zu [%zu] gtt total\n", 255 dev_priv->mm.gtt_total, dev_priv->mm.mappable_gtt_total); 256 257 DRM_UNLOCK(dev); 258 259 return 0; 260} 261 262static int i915_gem_gtt_info(struct drm_device *dev, struct sbuf *m, void *data) 263{ 264 struct drm_i915_private *dev_priv = dev->dev_private; 265 uintptr_t list = (uintptr_t)data; 266 struct drm_i915_gem_object *obj; 267 size_t total_obj_size, total_gtt_size; 268 int count; 269 270 if (sx_xlock_sig(&dev->dev_struct_lock)) 271 return -EINTR; 272 273 total_obj_size = total_gtt_size = count = 0; 274 list_for_each_entry(obj, &dev_priv->mm.gtt_list, gtt_list) { 275 if (list == PINNED_LIST && obj->pin_count == 0) 276 continue; 277 278 sbuf_printf(m, " "); 279 describe_obj(m, obj); 280 sbuf_printf(m, "\n"); 281 total_obj_size += obj->base.size; 282 total_gtt_size += obj->gtt_space->size; 283 count++; 284 } 285 286 DRM_UNLOCK(dev); 287 288 sbuf_printf(m, "Total %d objects, %zu bytes, %zu GTT size\n", 289 count, total_obj_size, total_gtt_size); 290 291 return 0; 292} 293 294static int i915_gem_pageflip_info(struct drm_device *dev, struct sbuf *m, void *data) 295{ 296 struct intel_crtc *crtc; 297 298 list_for_each_entry(crtc, &dev->mode_config.crtc_list, base.head) { 299 const char pipe = pipe_name(crtc->pipe); 300 const char plane = plane_name(crtc->plane); 301 struct intel_unpin_work *work; 302 303 mtx_lock(&dev->event_lock); 304 work = crtc->unpin_work; 305 if (work == NULL) { 306 sbuf_printf(m, "No flip due on pipe %c (plane %c)\n", 307 pipe, plane); 308 } else { 309 if (!work->pending) { 310 sbuf_printf(m, "Flip queued on pipe %c (plane %c)\n", 311 pipe, plane); 312 } else { 313 sbuf_printf(m, "Flip pending (waiting for vsync) on pipe %c (plane %c)\n", 314 pipe, plane); 315 } 316 if (work->enable_stall_check) 317 sbuf_printf(m, "Stall check enabled, "); 318 else 319 sbuf_printf(m, "Stall check waiting for page flip ioctl, "); 320 sbuf_printf(m, "%d prepares\n", work->pending); 321 322 if (work->old_fb_obj) { 323 struct drm_i915_gem_object *obj = work->old_fb_obj; 324 if (obj) 325 sbuf_printf(m, "Old framebuffer gtt_offset 0x%08x\n", obj->gtt_offset); 326 } 327 if (work->pending_flip_obj) { 328 struct drm_i915_gem_object *obj = work->pending_flip_obj; 329 if (obj) 330 sbuf_printf(m, "New framebuffer gtt_offset 0x%08x\n", obj->gtt_offset); 331 } 332 } 333 mtx_unlock(&dev->event_lock); 334 } 335 336 return 0; 337} 338 339static int i915_gem_request_info(struct drm_device *dev, struct sbuf *m, void *data) 340{ 341 drm_i915_private_t *dev_priv = dev->dev_private; 342 struct drm_i915_gem_request *gem_request; 343 int count; 344 345 if (sx_xlock_sig(&dev->dev_struct_lock)) 346 return -EINTR; 347 348 count = 0; 349 if (!list_empty(&dev_priv->rings[RCS].request_list)) { 350 sbuf_printf(m, "Render requests:\n"); 351 list_for_each_entry(gem_request, 352 &dev_priv->rings[RCS].request_list, 353 list) { 354 sbuf_printf(m, " %d @ %d\n", 355 gem_request->seqno, 356 (int) (jiffies - gem_request->emitted_jiffies)); 357 } 358 count++; 359 } 360 if (!list_empty(&dev_priv->rings[VCS].request_list)) { 361 sbuf_printf(m, "BSD requests:\n"); 362 list_for_each_entry(gem_request, 363 &dev_priv->rings[VCS].request_list, 364 list) { 365 sbuf_printf(m, " %d @ %d\n", 366 gem_request->seqno, 367 (int) (jiffies - gem_request->emitted_jiffies)); 368 } 369 count++; 370 } 371 if (!list_empty(&dev_priv->rings[BCS].request_list)) { 372 sbuf_printf(m, "BLT requests:\n"); 373 list_for_each_entry(gem_request, 374 &dev_priv->rings[BCS].request_list, 375 list) { 376 sbuf_printf(m, " %d @ %d\n", 377 gem_request->seqno, 378 (int) (jiffies - gem_request->emitted_jiffies)); 379 } 380 count++; 381 } 382 DRM_UNLOCK(dev); 383 384 if (count == 0) 385 sbuf_printf(m, "No requests\n"); 386 387 return 0; 388} 389 390static void i915_ring_seqno_info(struct sbuf *m, 391 struct intel_ring_buffer *ring) 392{ 393 if (ring->get_seqno) { 394 sbuf_printf(m, "Current sequence (%s): %d\n", 395 ring->name, ring->get_seqno(ring)); 396 } 397} 398 399static int i915_gem_seqno_info(struct drm_device *dev, struct sbuf *m, void *data) 400{ 401 drm_i915_private_t *dev_priv = dev->dev_private; 402 int i; 403 404 if (sx_xlock_sig(&dev->dev_struct_lock)) 405 return -EINTR; 406 407 for (i = 0; i < I915_NUM_RINGS; i++) 408 i915_ring_seqno_info(m, &dev_priv->rings[i]); 409 410 DRM_UNLOCK(dev); 411 412 return 0; 413} 414 415 416static int i915_interrupt_info(struct drm_device *dev, struct sbuf *m, void *data) 417{ 418 drm_i915_private_t *dev_priv = dev->dev_private; 419 int i, pipe; 420 421 if (sx_xlock_sig(&dev->dev_struct_lock)) 422 return -EINTR; 423 424 if (IS_VALLEYVIEW(dev)) { 425 sbuf_printf(m, "Display IER:\t%08x\n", 426 I915_READ(VLV_IER)); 427 sbuf_printf(m, "Display IIR:\t%08x\n", 428 I915_READ(VLV_IIR)); 429 sbuf_printf(m, "Display IIR_RW:\t%08x\n", 430 I915_READ(VLV_IIR_RW)); 431 sbuf_printf(m, "Display IMR:\t%08x\n", 432 I915_READ(VLV_IMR)); 433 for_each_pipe(pipe) 434 sbuf_printf(m, "Pipe %c stat:\t%08x\n", 435 pipe_name(pipe), 436 I915_READ(PIPESTAT(pipe))); 437 438 sbuf_printf(m, "Master IER:\t%08x\n", 439 I915_READ(VLV_MASTER_IER)); 440 441 sbuf_printf(m, "Render IER:\t%08x\n", 442 I915_READ(GTIER)); 443 sbuf_printf(m, "Render IIR:\t%08x\n", 444 I915_READ(GTIIR)); 445 sbuf_printf(m, "Render IMR:\t%08x\n", 446 I915_READ(GTIMR)); 447 448 sbuf_printf(m, "PM IER:\t\t%08x\n", 449 I915_READ(GEN6_PMIER)); 450 sbuf_printf(m, "PM IIR:\t\t%08x\n", 451 I915_READ(GEN6_PMIIR)); 452 sbuf_printf(m, "PM IMR:\t\t%08x\n", 453 I915_READ(GEN6_PMIMR)); 454 455 sbuf_printf(m, "Port hotplug:\t%08x\n", 456 I915_READ(PORT_HOTPLUG_EN)); 457 sbuf_printf(m, "DPFLIPSTAT:\t%08x\n", 458 I915_READ(VLV_DPFLIPSTAT)); 459 sbuf_printf(m, "DPINVGTT:\t%08x\n", 460 I915_READ(DPINVGTT)); 461 462 } else if (!HAS_PCH_SPLIT(dev)) { 463 sbuf_printf(m, "Interrupt enable: %08x\n", 464 I915_READ(IER)); 465 sbuf_printf(m, "Interrupt identity: %08x\n", 466 I915_READ(IIR)); 467 sbuf_printf(m, "Interrupt mask: %08x\n", 468 I915_READ(IMR)); 469 for_each_pipe(pipe) 470 sbuf_printf(m, "Pipe %c stat: %08x\n", 471 pipe_name(pipe), 472 I915_READ(PIPESTAT(pipe))); 473 } else { 474 sbuf_printf(m, "North Display Interrupt enable: %08x\n", 475 I915_READ(DEIER)); 476 sbuf_printf(m, "North Display Interrupt identity: %08x\n", 477 I915_READ(DEIIR)); 478 sbuf_printf(m, "North Display Interrupt mask: %08x\n", 479 I915_READ(DEIMR)); 480 sbuf_printf(m, "South Display Interrupt enable: %08x\n", 481 I915_READ(SDEIER)); 482 sbuf_printf(m, "South Display Interrupt identity: %08x\n", 483 I915_READ(SDEIIR)); 484 sbuf_printf(m, "South Display Interrupt mask: %08x\n", 485 I915_READ(SDEIMR)); 486 sbuf_printf(m, "Graphics Interrupt enable: %08x\n", 487 I915_READ(GTIER)); 488 sbuf_printf(m, "Graphics Interrupt identity: %08x\n", 489 I915_READ(GTIIR)); 490 sbuf_printf(m, "Graphics Interrupt mask: %08x\n", 491 I915_READ(GTIMR)); 492 } 493 sbuf_printf(m, "Interrupts received: %d\n", 494 atomic_read(&dev_priv->irq_received)); 495 for (i = 0; i < I915_NUM_RINGS; i++) { 496 if (IS_GEN6(dev) || IS_GEN7(dev)) { 497 sbuf_printf(m, 498 "Graphics Interrupt mask (%s): %08x\n", 499 dev_priv->rings[i].name, I915_READ_IMR(&dev_priv->rings[i])); 500 } 501 i915_ring_seqno_info(m, &dev_priv->rings[i]); 502 } 503 DRM_UNLOCK(dev); 504 505 return 0; 506} 507 508static int i915_gem_fence_regs_info(struct drm_device *dev, struct sbuf *m, void *data) 509{ 510 drm_i915_private_t *dev_priv = dev->dev_private; 511 int i; 512 513 if (sx_xlock_sig(&dev->dev_struct_lock)) 514 return -EINTR; 515 516 sbuf_printf(m, "Reserved fences = %d\n", dev_priv->fence_reg_start); 517 sbuf_printf(m, "Total fences = %d\n", dev_priv->num_fence_regs); 518 for (i = 0; i < dev_priv->num_fence_regs; i++) { 519 struct drm_i915_gem_object *obj = dev_priv->fence_regs[i].obj; 520 521 sbuf_printf(m, "Fenced object[%2d] = ", i); 522 if (obj == NULL) 523 sbuf_printf(m, "unused"); 524 else 525 describe_obj(m, obj); 526 sbuf_printf(m, "\n"); 527 } 528 529 DRM_UNLOCK(dev); 530 return 0; 531} 532 533static int i915_hws_info(struct drm_device *dev, struct sbuf *m, void *data) 534{ 535 drm_i915_private_t *dev_priv = dev->dev_private; 536 struct intel_ring_buffer *ring; 537 const volatile u32 __iomem *hws; 538 int i; 539 540 ring = &dev_priv->rings[(uintptr_t)data]; 541 hws = (volatile u32 *)ring->status_page.page_addr; 542 if (hws == NULL) 543 return 0; 544 545 for (i = 0; i < 4096 / sizeof(u32) / 4; i += 4) { 546 sbuf_printf(m, "0x%08x: 0x%08x 0x%08x 0x%08x 0x%08x\n", 547 i * 4, 548 hws[i], hws[i + 1], hws[i + 2], hws[i + 3]); 549 } 550 return 0; 551} 552 553static const char *ring_str(int ring) 554{ 555 switch (ring) { 556 case RCS: return " render"; 557 case VCS: return " bsd"; 558 case BCS: return " blt"; 559 default: return ""; 560 } 561} 562 563static const char *pin_flag(int pinned) 564{ 565 if (pinned > 0) 566 return " P"; 567 else if (pinned < 0) 568 return " p"; 569 else 570 return ""; 571} 572 573static const char *tiling_flag(int tiling) 574{ 575 switch (tiling) { 576 default: 577 case I915_TILING_NONE: return ""; 578 case I915_TILING_X: return " X"; 579 case I915_TILING_Y: return " Y"; 580 } 581} 582 583static const char *dirty_flag(int dirty) 584{ 585 return dirty ? " dirty" : ""; 586} 587 588static const char *purgeable_flag(int purgeable) 589{ 590 return purgeable ? " purgeable" : ""; 591} 592 593static void print_error_buffers(struct sbuf *m, 594 const char *name, 595 struct drm_i915_error_buffer *err, 596 int count) 597{ 598 599 sbuf_printf(m, "%s [%d]:\n", name, count); 600 601 while (count--) { 602 sbuf_printf(m, " %08x %8u %04x %04x %08x%s%s%s%s%s%s%s", 603 err->gtt_offset, 604 err->size, 605 err->read_domains, 606 err->write_domain, 607 err->seqno, 608 pin_flag(err->pinned), 609 tiling_flag(err->tiling), 610 dirty_flag(err->dirty), 611 purgeable_flag(err->purgeable), 612 err->ring != -1 ? " " : "", 613 ring_str(err->ring), 614 cache_level_str(err->cache_level)); 615 616 if (err->name) 617 sbuf_printf(m, " (name: %d)", err->name); 618 if (err->fence_reg != I915_FENCE_REG_NONE) 619 sbuf_printf(m, " (fence: %d)", err->fence_reg); 620 621 sbuf_printf(m, "\n"); 622 err++; 623 } 624} 625 626static void i915_ring_error_state(struct sbuf *m, 627 struct drm_device *dev, 628 struct drm_i915_error_state *error, 629 unsigned ring) 630{ 631 632 MPASS((ring < I915_NUM_RINGS)); /* shut up confused gcc */ 633 sbuf_printf(m, "%s command stream:\n", ring_str(ring)); 634 sbuf_printf(m, " HEAD: 0x%08x\n", error->head[ring]); 635 sbuf_printf(m, " TAIL: 0x%08x\n", error->tail[ring]); 636 sbuf_printf(m, " ACTHD: 0x%08x\n", error->acthd[ring]); 637 sbuf_printf(m, " IPEIR: 0x%08x\n", error->ipeir[ring]); 638 sbuf_printf(m, " IPEHR: 0x%08x\n", error->ipehr[ring]); 639 sbuf_printf(m, " INSTDONE: 0x%08x\n", error->instdone[ring]); 640 if (ring == RCS && INTEL_INFO(dev)->gen >= 4) { 641 sbuf_printf(m, " INSTDONE1: 0x%08x\n", error->instdone1); 642 sbuf_printf(m, " BBADDR: 0x%08jx\n", (uintmax_t)error->bbaddr); 643 } 644 if (INTEL_INFO(dev)->gen >= 4) 645 sbuf_printf(m, " INSTPS: 0x%08x\n", error->instps[ring]); 646 sbuf_printf(m, " INSTPM: 0x%08x\n", error->instpm[ring]); 647 sbuf_printf(m, " FADDR: 0x%08x\n", error->faddr[ring]); 648 if (INTEL_INFO(dev)->gen >= 6) { 649 sbuf_printf(m, " FAULT_REG: 0x%08x\n", error->fault_reg[ring]); 650 sbuf_printf(m, " SYNC_0: 0x%08x\n", 651 error->semaphore_mboxes[ring][0]); 652 sbuf_printf(m, " SYNC_1: 0x%08x\n", 653 error->semaphore_mboxes[ring][1]); 654 } 655 sbuf_printf(m, " seqno: 0x%08x\n", error->seqno[ring]); 656 sbuf_printf(m, " waiting: %s\n", yesno(error->waiting[ring])); 657 sbuf_printf(m, " ring->head: 0x%08x\n", error->cpu_ring_head[ring]); 658 sbuf_printf(m, " ring->tail: 0x%08x\n", error->cpu_ring_tail[ring]); 659} 660 661static int i915_error_state(struct drm_device *dev, struct sbuf *m, 662 void *unused) 663{ 664 drm_i915_private_t *dev_priv = dev->dev_private; 665 struct drm_i915_error_state *error; 666 struct intel_ring_buffer *ring; 667 int i, j, page, offset, elt; 668 669 mtx_lock(&dev_priv->error_lock); 670 error = dev_priv->first_error; 671 if (error != NULL) 672 refcount_acquire(&error->ref); 673 mtx_unlock(&dev_priv->error_lock); 674 if (!error) { 675 sbuf_printf(m, "no error state collected\n"); 676 return 0; 677 } 678 679 sbuf_printf(m, "Time: %jd s %jd us\n", (intmax_t)error->time.tv_sec, 680 (intmax_t)error->time.tv_usec); 681 sbuf_printf(m, "PCI ID: 0x%04x\n", dev->pci_device); 682 sbuf_printf(m, "EIR: 0x%08x\n", error->eir); 683 sbuf_printf(m, "IER: 0x%08x\n", error->ier); 684 sbuf_printf(m, "PGTBL_ER: 0x%08x\n", error->pgtbl_er); 685 686 for (i = 0; i < dev_priv->num_fence_regs; i++) 687 sbuf_printf(m, " fence[%d] = %08jx\n", i, 688 (uintmax_t)error->fence[i]); 689 690 if (INTEL_INFO(dev)->gen >= 6) { 691 sbuf_printf(m, "ERROR: 0x%08x\n", error->error); 692 sbuf_printf(m, "DONE_REG: 0x%08x\n", error->done_reg); 693 } 694 695 for_each_ring(ring, dev_priv, i) 696 i915_ring_error_state(m, dev, error, i); 697 698 if (error->active_bo) 699 print_error_buffers(m, "Active", 700 error->active_bo, 701 error->active_bo_count); 702 703 if (error->pinned_bo) 704 print_error_buffers(m, "Pinned", 705 error->pinned_bo, 706 error->pinned_bo_count); 707 708 for (i = 0; i < ARRAY_SIZE(error->ring); i++) { 709 struct drm_i915_error_object *obj; 710 711 if ((obj = error->ring[i].batchbuffer)) { 712 sbuf_printf(m, "%s --- gtt_offset = 0x%08x\n", 713 dev_priv->rings[i].name, 714 obj->gtt_offset); 715 offset = 0; 716 for (page = 0; page < obj->page_count; page++) { 717 for (elt = 0; elt < PAGE_SIZE/4; elt++) { 718 sbuf_printf(m, "%08x : %08x\n", 719 offset, obj->pages[page][elt]); 720 offset += 4; 721 } 722 } 723 } 724 725 if (error->ring[i].num_requests) { 726 sbuf_printf(m, "%s --- %d requests\n", 727 dev_priv->rings[i].name, 728 error->ring[i].num_requests); 729 for (j = 0; j < error->ring[i].num_requests; j++) { 730 sbuf_printf(m, " seqno 0x%08x, emitted %ld, tail 0x%08x\n", 731 error->ring[i].requests[j].seqno, 732 error->ring[i].requests[j].jiffies, 733 error->ring[i].requests[j].tail); 734 } 735 } 736 737 if ((obj = error->ring[i].ringbuffer)) { 738 sbuf_printf(m, "%s --- ringbuffer = 0x%08x\n", 739 dev_priv->rings[i].name, 740 obj->gtt_offset); 741 offset = 0; 742 for (page = 0; page < obj->page_count; page++) { 743 for (elt = 0; elt < PAGE_SIZE/4; elt++) { 744 sbuf_printf(m, "%08x : %08x\n", 745 offset, 746 obj->pages[page][elt]); 747 offset += 4; 748 } 749 } 750 } 751 } 752 753 if (error->overlay) 754 intel_overlay_print_error_state(m, error->overlay); 755 756 if (error->display) 757 intel_display_print_error_state(m, dev, error->display); 758 759 if (refcount_release(&error->ref)) 760 i915_error_state_free(error); 761 762 return 0; 763} 764 765static int 766i915_error_state_write(struct drm_device *dev, const char *str, void *unused) 767{ 768 drm_i915_private_t *dev_priv = dev->dev_private; 769 struct drm_i915_error_state *error; 770 771 DRM_DEBUG_DRIVER("Resetting error state\n"); 772 mtx_lock(&dev_priv->error_lock); 773 error = dev_priv->first_error; 774 dev_priv->first_error = NULL; 775 mtx_unlock(&dev_priv->error_lock); 776 if (error != NULL && refcount_release(&error->ref)) 777 i915_error_state_free(error); 778 return (0); 779} 780 781static int i915_rstdby_delays(struct drm_device *dev, struct sbuf *m, void *unused) 782{ 783 drm_i915_private_t *dev_priv = dev->dev_private; 784 u16 crstanddelay; 785 786 if (sx_xlock_sig(&dev->dev_struct_lock)) 787 return -EINTR; 788 789 crstanddelay = I915_READ16(CRSTANDVID); 790 791 DRM_UNLOCK(dev); 792 793 sbuf_printf(m, "w/ctx: %d, w/o ctx: %d\n", (crstanddelay >> 8) & 0x3f, (crstanddelay & 0x3f)); 794 795 return 0; 796} 797 798static int i915_cur_delayinfo(struct drm_device *dev, struct sbuf *m, void *unused) 799{ 800 drm_i915_private_t *dev_priv = dev->dev_private; 801 802 if (IS_GEN5(dev)) { 803 u16 rgvswctl = I915_READ16(MEMSWCTL); 804 u16 rgvstat = I915_READ16(MEMSTAT_ILK); 805 806 sbuf_printf(m, "Requested P-state: %d\n", (rgvswctl >> 8) & 0xf); 807 sbuf_printf(m, "Requested VID: %d\n", rgvswctl & 0x3f); 808 sbuf_printf(m, "Current VID: %d\n", (rgvstat & MEMSTAT_VID_MASK) >> 809 MEMSTAT_VID_SHIFT); 810 sbuf_printf(m, "Current P-state: %d\n", 811 (rgvstat & MEMSTAT_PSTATE_MASK) >> MEMSTAT_PSTATE_SHIFT); 812 } else if (IS_GEN6(dev)) { 813 u32 gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS); 814 u32 rp_state_limits = I915_READ(GEN6_RP_STATE_LIMITS); 815 u32 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP); 816 u32 rpstat; 817 u32 rpupei, rpcurup, rpprevup; 818 u32 rpdownei, rpcurdown, rpprevdown; 819 int max_freq; 820 821 /* RPSTAT1 is in the GT power well */ 822 if (sx_xlock_sig(&dev->dev_struct_lock)) 823 return -EINTR; 824 gen6_gt_force_wake_get(dev_priv); 825 826 rpstat = I915_READ(GEN6_RPSTAT1); 827 rpupei = I915_READ(GEN6_RP_CUR_UP_EI); 828 rpcurup = I915_READ(GEN6_RP_CUR_UP); 829 rpprevup = I915_READ(GEN6_RP_PREV_UP); 830 rpdownei = I915_READ(GEN6_RP_CUR_DOWN_EI); 831 rpcurdown = I915_READ(GEN6_RP_CUR_DOWN); 832 rpprevdown = I915_READ(GEN6_RP_PREV_DOWN); 833 834 gen6_gt_force_wake_put(dev_priv); 835 DRM_UNLOCK(dev); 836 837 sbuf_printf(m, "GT_PERF_STATUS: 0x%08x\n", gt_perf_status); 838 sbuf_printf(m, "RPSTAT1: 0x%08x\n", rpstat); 839 sbuf_printf(m, "Render p-state ratio: %d\n", 840 (gt_perf_status & 0xff00) >> 8); 841 sbuf_printf(m, "Render p-state VID: %d\n", 842 gt_perf_status & 0xff); 843 sbuf_printf(m, "Render p-state limit: %d\n", 844 rp_state_limits & 0xff); 845 sbuf_printf(m, "CAGF: %dMHz\n", ((rpstat & GEN6_CAGF_MASK) >> 846 GEN6_CAGF_SHIFT) * 50); 847 sbuf_printf(m, "RP CUR UP EI: %dus\n", rpupei & 848 GEN6_CURICONT_MASK); 849 sbuf_printf(m, "RP CUR UP: %dus\n", rpcurup & 850 GEN6_CURBSYTAVG_MASK); 851 sbuf_printf(m, "RP PREV UP: %dus\n", rpprevup & 852 GEN6_CURBSYTAVG_MASK); 853 sbuf_printf(m, "RP CUR DOWN EI: %dus\n", rpdownei & 854 GEN6_CURIAVG_MASK); 855 sbuf_printf(m, "RP CUR DOWN: %dus\n", rpcurdown & 856 GEN6_CURBSYTAVG_MASK); 857 sbuf_printf(m, "RP PREV DOWN: %dus\n", rpprevdown & 858 GEN6_CURBSYTAVG_MASK); 859 860 max_freq = (rp_state_cap & 0xff0000) >> 16; 861 sbuf_printf(m, "Lowest (RPN) frequency: %dMHz\n", 862 max_freq * 50); 863 864 max_freq = (rp_state_cap & 0xff00) >> 8; 865 sbuf_printf(m, "Nominal (RP1) frequency: %dMHz\n", 866 max_freq * 50); 867 868 max_freq = rp_state_cap & 0xff; 869 sbuf_printf(m, "Max non-overclocked (RP0) frequency: %dMHz\n", 870 max_freq * 50); 871 } else { 872 sbuf_printf(m, "no P-state info available\n"); 873 } 874 875 return 0; 876} 877 878static int i915_delayfreq_table(struct drm_device *dev, struct sbuf *m, void *unused) 879{ 880 drm_i915_private_t *dev_priv = dev->dev_private; 881 u32 delayfreq; 882 int i; 883 884 if (sx_xlock_sig(&dev->dev_struct_lock)) 885 return -EINTR; 886 887 for (i = 0; i < 16; i++) { 888 delayfreq = I915_READ(PXVFREQ_BASE + i * 4); 889 sbuf_printf(m, "P%02dVIDFREQ: 0x%08x (VID: %d)\n", i, delayfreq, 890 (delayfreq & PXVFREQ_PX_MASK) >> PXVFREQ_PX_SHIFT); 891 } 892 893 DRM_UNLOCK(dev); 894 895 return 0; 896} 897 898static inline int MAP_TO_MV(int map) 899{ 900 return 1250 - (map * 25); 901} 902 903static int i915_inttoext_table(struct drm_device *dev, struct sbuf *m, void *unused) 904{ 905 drm_i915_private_t *dev_priv = dev->dev_private; 906 u32 inttoext; 907 int i; 908 909 if (sx_xlock_sig(&dev->dev_struct_lock)) 910 return -EINTR; 911 912 for (i = 1; i <= 32; i++) { 913 inttoext = I915_READ(INTTOEXT_BASE_ILK + i * 4); 914 sbuf_printf(m, "INTTOEXT%02d: 0x%08x\n", i, inttoext); 915 } 916 917 DRM_UNLOCK(dev); 918 919 return 0; 920} 921 922static int ironlake_drpc_info(struct drm_device *dev, struct sbuf *m) 923{ 924 drm_i915_private_t *dev_priv = dev->dev_private; 925 u32 rgvmodectl, rstdbyctl; 926 u16 crstandvid; 927 928 if (sx_xlock_sig(&dev->dev_struct_lock)) 929 return -EINTR; 930 931 rgvmodectl = I915_READ(MEMMODECTL); 932 rstdbyctl = I915_READ(RSTDBYCTL); 933 crstandvid = I915_READ16(CRSTANDVID); 934 935 DRM_UNLOCK(dev); 936 937 sbuf_printf(m, "HD boost: %s\n", (rgvmodectl & MEMMODE_BOOST_EN) ? 938 "yes" : "no"); 939 sbuf_printf(m, "Boost freq: %d\n", 940 (rgvmodectl & MEMMODE_BOOST_FREQ_MASK) >> 941 MEMMODE_BOOST_FREQ_SHIFT); 942 sbuf_printf(m, "HW control enabled: %s\n", 943 rgvmodectl & MEMMODE_HWIDLE_EN ? "yes" : "no"); 944 sbuf_printf(m, "SW control enabled: %s\n", 945 rgvmodectl & MEMMODE_SWMODE_EN ? "yes" : "no"); 946 sbuf_printf(m, "Gated voltage change: %s\n", 947 rgvmodectl & MEMMODE_RCLK_GATE ? "yes" : "no"); 948 sbuf_printf(m, "Starting frequency: P%d\n", 949 (rgvmodectl & MEMMODE_FSTART_MASK) >> MEMMODE_FSTART_SHIFT); 950 sbuf_printf(m, "Max P-state: P%d\n", 951 (rgvmodectl & MEMMODE_FMAX_MASK) >> MEMMODE_FMAX_SHIFT); 952 sbuf_printf(m, "Min P-state: P%d\n", (rgvmodectl & MEMMODE_FMIN_MASK)); 953 sbuf_printf(m, "RS1 VID: %d\n", (crstandvid & 0x3f)); 954 sbuf_printf(m, "RS2 VID: %d\n", ((crstandvid >> 8) & 0x3f)); 955 sbuf_printf(m, "Render standby enabled: %s\n", 956 (rstdbyctl & RCX_SW_EXIT) ? "no" : "yes"); 957 sbuf_printf(m, "Current RS state: "); 958 switch (rstdbyctl & RSX_STATUS_MASK) { 959 case RSX_STATUS_ON: 960 sbuf_printf(m, "on\n"); 961 break; 962 case RSX_STATUS_RC1: 963 sbuf_printf(m, "RC1\n"); 964 break; 965 case RSX_STATUS_RC1E: 966 sbuf_printf(m, "RC1E\n"); 967 break; 968 case RSX_STATUS_RS1: 969 sbuf_printf(m, "RS1\n"); 970 break; 971 case RSX_STATUS_RS2: 972 sbuf_printf(m, "RS2 (RC6)\n"); 973 break; 974 case RSX_STATUS_RS3: 975 sbuf_printf(m, "RC3 (RC6+)\n"); 976 break; 977 default: 978 sbuf_printf(m, "unknown\n"); 979 break; 980 } 981 982 return 0; 983} 984 985static int gen6_drpc_info(struct drm_device *dev, struct sbuf *m) 986{ 987 drm_i915_private_t *dev_priv = dev->dev_private; 988 u32 rpmodectl1, gt_core_status, rcctl1; 989 unsigned forcewake_count; 990 int count=0; 991 992 993 if (sx_xlock_sig(&dev->dev_struct_lock)) 994 return -EINTR; 995 996 mtx_lock(&dev_priv->gt_lock); 997 forcewake_count = dev_priv->forcewake_count; 998 mtx_unlock(&dev_priv->gt_lock); 999 1000 if (forcewake_count) { 1001 sbuf_printf(m, "RC information inaccurate because userspace " 1002 "holds a reference \n"); 1003 } else { 1004 /* NB: we cannot use forcewake, else we read the wrong values */ 1005 while (count++ < 50 && (I915_READ_NOTRACE(FORCEWAKE_ACK) & 1)) 1006 udelay(10); 1007 sbuf_printf(m, "RC information accurate: %s\n", yesno(count < 51)); 1008 } 1009 1010 gt_core_status = DRM_READ32(dev_priv->mmio_map, GEN6_GT_CORE_STATUS); 1011 trace_i915_reg_rw(false, GEN6_GT_CORE_STATUS, gt_core_status, 4); 1012 1013 rpmodectl1 = I915_READ(GEN6_RP_CONTROL); 1014 rcctl1 = I915_READ(GEN6_RC_CONTROL); 1015 DRM_UNLOCK(dev); 1016 1017 sbuf_printf(m, "Video Turbo Mode: %s\n", 1018 yesno(rpmodectl1 & GEN6_RP_MEDIA_TURBO)); 1019 sbuf_printf(m, "HW control enabled: %s\n", 1020 yesno(rpmodectl1 & GEN6_RP_ENABLE)); 1021 sbuf_printf(m, "SW control enabled: %s\n", 1022 yesno((rpmodectl1 & GEN6_RP_MEDIA_MODE_MASK) == 1023 GEN6_RP_MEDIA_SW_MODE)); 1024 sbuf_printf(m, "RC1e Enabled: %s\n", 1025 yesno(rcctl1 & GEN6_RC_CTL_RC1e_ENABLE)); 1026 sbuf_printf(m, "RC6 Enabled: %s\n", 1027 yesno(rcctl1 & GEN6_RC_CTL_RC6_ENABLE)); 1028 sbuf_printf(m, "Deep RC6 Enabled: %s\n", 1029 yesno(rcctl1 & GEN6_RC_CTL_RC6p_ENABLE)); 1030 sbuf_printf(m, "Deepest RC6 Enabled: %s\n", 1031 yesno(rcctl1 & GEN6_RC_CTL_RC6pp_ENABLE)); 1032 sbuf_printf(m, "Current RC state: "); 1033 switch (gt_core_status & GEN6_RCn_MASK) { 1034 case GEN6_RC0: 1035 if (gt_core_status & GEN6_CORE_CPD_STATE_MASK) 1036 sbuf_printf(m, "Core Power Down\n"); 1037 else 1038 sbuf_printf(m, "on\n"); 1039 break; 1040 case GEN6_RC3: 1041 sbuf_printf(m, "RC3\n"); 1042 break; 1043 case GEN6_RC6: 1044 sbuf_printf(m, "RC6\n"); 1045 break; 1046 case GEN6_RC7: 1047 sbuf_printf(m, "RC7\n"); 1048 break; 1049 default: 1050 sbuf_printf(m, "Unknown\n"); 1051 break; 1052 } 1053 1054 sbuf_printf(m, "Core Power Down: %s\n", 1055 yesno(gt_core_status & GEN6_CORE_CPD_STATE_MASK)); 1056 1057 /* Not exactly sure what this is */ 1058 sbuf_printf(m, "RC6 \"Locked to RPn\" residency since boot: %u\n", 1059 I915_READ(GEN6_GT_GFX_RC6_LOCKED)); 1060 sbuf_printf(m, "RC6 residency since boot: %u\n", 1061 I915_READ(GEN6_GT_GFX_RC6)); 1062 sbuf_printf(m, "RC6+ residency since boot: %u\n", 1063 I915_READ(GEN6_GT_GFX_RC6p)); 1064 sbuf_printf(m, "RC6++ residency since boot: %u\n", 1065 I915_READ(GEN6_GT_GFX_RC6pp)); 1066 1067 return 0; 1068} 1069 1070static int i915_drpc_info(struct drm_device *dev, struct sbuf *m, void *unused) 1071{ 1072 1073 if (IS_GEN6(dev) || IS_GEN7(dev)) 1074 return gen6_drpc_info(dev, m); 1075 else 1076 return ironlake_drpc_info(dev, m); 1077} 1078 1079static int i915_fbc_status(struct drm_device *dev, struct sbuf *m, void *unused) 1080{ 1081 drm_i915_private_t *dev_priv = dev->dev_private; 1082 1083 if (!I915_HAS_FBC(dev)) { 1084 sbuf_printf(m, "FBC unsupported on this chipset"); 1085 return 0; 1086 } 1087 1088 if (intel_fbc_enabled(dev)) { 1089 sbuf_printf(m, "FBC enabled"); 1090 } else { 1091 sbuf_printf(m, "FBC disabled: "); 1092 switch (dev_priv->no_fbc_reason) { 1093 case FBC_NO_OUTPUT: 1094 sbuf_printf(m, "no outputs"); 1095 break; 1096 case FBC_STOLEN_TOO_SMALL: 1097 sbuf_printf(m, "not enough stolen memory"); 1098 break; 1099 case FBC_UNSUPPORTED_MODE: 1100 sbuf_printf(m, "mode not supported"); 1101 break; 1102 case FBC_MODE_TOO_LARGE: 1103 sbuf_printf(m, "mode too large"); 1104 break; 1105 case FBC_BAD_PLANE: 1106 sbuf_printf(m, "FBC unsupported on plane"); 1107 break; 1108 case FBC_NOT_TILED: 1109 sbuf_printf(m, "scanout buffer not tiled"); 1110 break; 1111 case FBC_MULTIPLE_PIPES: 1112 sbuf_printf(m, "multiple pipes are enabled"); 1113 break; 1114 default: 1115 sbuf_printf(m, "unknown reason"); 1116 } 1117 } 1118 return 0; 1119} 1120 1121static int i915_sr_status(struct drm_device *dev, struct sbuf *m, void *unused) 1122{ 1123 drm_i915_private_t *dev_priv = dev->dev_private; 1124 bool sr_enabled = false; 1125 1126 if (HAS_PCH_SPLIT(dev)) 1127 sr_enabled = I915_READ(WM1_LP_ILK) & WM1_LP_SR_EN; 1128 else if (IS_CRESTLINE(dev) || IS_I945G(dev) || IS_I945GM(dev)) 1129 sr_enabled = I915_READ(FW_BLC_SELF) & FW_BLC_SELF_EN; 1130 else if (IS_I915GM(dev)) 1131 sr_enabled = I915_READ(INSTPM) & INSTPM_SELF_EN; 1132 else if (IS_PINEVIEW(dev)) 1133 sr_enabled = I915_READ(DSPFW3) & PINEVIEW_SELF_REFRESH_EN; 1134 1135 sbuf_printf(m, "self-refresh: %s", 1136 sr_enabled ? "enabled" : "disabled"); 1137 1138 return 0; 1139} 1140 1141static int i915_emon_status(struct drm_device *dev, struct sbuf *m, void *unused) 1142{ 1143 drm_i915_private_t *dev_priv = dev->dev_private; 1144 unsigned long temp, chipset, gfx; 1145 1146 if (!IS_GEN5(dev)) 1147 return -ENODEV; 1148 1149 if (sx_xlock_sig(&dev->dev_struct_lock)) 1150 return -EINTR; 1151 1152 temp = i915_mch_val(dev_priv); 1153 chipset = i915_chipset_val(dev_priv); 1154 gfx = i915_gfx_val(dev_priv); 1155 DRM_UNLOCK(dev); 1156 1157 sbuf_printf(m, "GMCH temp: %ld\n", temp); 1158 sbuf_printf(m, "Chipset power: %ld\n", chipset); 1159 sbuf_printf(m, "GFX power: %ld\n", gfx); 1160 sbuf_printf(m, "Total power: %ld\n", chipset + gfx); 1161 1162 return 0; 1163} 1164 1165static int i915_ring_freq_table(struct drm_device *dev, struct sbuf *m, 1166 void *unused) 1167{ 1168 drm_i915_private_t *dev_priv = dev->dev_private; 1169 int gpu_freq, ia_freq; 1170 1171 if (!(IS_GEN6(dev) || IS_GEN7(dev))) { 1172 sbuf_printf(m, "unsupported on this chipset"); 1173 return 0; 1174 } 1175 1176 if (sx_xlock_sig(&dev->dev_struct_lock)) 1177 return -EINTR; 1178 1179 sbuf_printf(m, "GPU freq (MHz)\tEffective CPU freq (MHz)\n"); 1180 1181 for (gpu_freq = dev_priv->min_delay; gpu_freq <= dev_priv->max_delay; 1182 gpu_freq++) { 1183 I915_WRITE(GEN6_PCODE_DATA, gpu_freq); 1184 I915_WRITE(GEN6_PCODE_MAILBOX, GEN6_PCODE_READY | 1185 GEN6_PCODE_READ_MIN_FREQ_TABLE); 1186 if (_intel_wait_for(dev, 1187 (I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0, 1188 10, 1, "915frq")) { 1189 DRM_ERROR("pcode read of freq table timed out\n"); 1190 continue; 1191 } 1192 ia_freq = I915_READ(GEN6_PCODE_DATA); 1193 sbuf_printf(m, "%d\t\t%d\n", gpu_freq * 50, ia_freq * 100); 1194 } 1195 1196 DRM_UNLOCK(dev); 1197 1198 return 0; 1199} 1200 1201static int i915_gfxec(struct drm_device *dev, struct sbuf *m, void *unused) 1202{ 1203 drm_i915_private_t *dev_priv = dev->dev_private; 1204 1205 if (sx_xlock_sig(&dev->dev_struct_lock)) 1206 return -EINTR; 1207 1208 sbuf_printf(m, "GFXEC: %ld\n", (unsigned long)I915_READ(0x112f4)); 1209 1210 DRM_UNLOCK(dev); 1211 1212 return 0; 1213} 1214 1215#if 0 1216static int i915_opregion(struct drm_device *dev, struct sbuf *m, void *unused) 1217{ 1218 drm_i915_private_t *dev_priv = dev->dev_private; 1219 struct intel_opregion *opregion = &dev_priv->opregion; 1220 1221 if (sx_xlock_sig(&dev->dev_struct_lock)) 1222 return -EINTR; 1223 1224 if (opregion->header) 1225 seq_write(m, opregion->header, OPREGION_SIZE); 1226 1227 DRM_UNLOCK(dev); 1228 1229 return 0; 1230} 1231#endif 1232 1233static int i915_gem_framebuffer_info(struct drm_device *dev, struct sbuf *m, void *data) 1234{ 1235 drm_i915_private_t *dev_priv = dev->dev_private; 1236 struct intel_fbdev *ifbdev; 1237 struct intel_framebuffer *fb; 1238 1239 if (sx_xlock_sig(&dev->dev_struct_lock)) 1240 return -EINTR; 1241 1242 ifbdev = dev_priv->fbdev; 1243 if (ifbdev == NULL) { 1244 DRM_UNLOCK(dev); 1245 return 0; 1246 } 1247 fb = to_intel_framebuffer(ifbdev->helper.fb); 1248 1249 sbuf_printf(m, "fbcon size: %d x %d, depth %d, %d bpp, obj ", 1250 fb->base.width, 1251 fb->base.height, 1252 fb->base.depth, 1253 fb->base.bits_per_pixel); 1254 describe_obj(m, fb->obj); 1255 sbuf_printf(m, "\n"); 1256 1257 list_for_each_entry(fb, &dev->mode_config.fb_list, base.head) { 1258 if (&fb->base == ifbdev->helper.fb) 1259 continue; 1260 1261 sbuf_printf(m, "user size: %d x %d, depth %d, %d bpp, obj ", 1262 fb->base.width, 1263 fb->base.height, 1264 fb->base.depth, 1265 fb->base.bits_per_pixel); 1266 describe_obj(m, fb->obj); 1267 sbuf_printf(m, "\n"); 1268 } 1269 1270 DRM_UNLOCK(dev); 1271 1272 return 0; 1273} 1274 1275static int i915_context_status(struct drm_device *dev, struct sbuf *m, void *data) 1276{ 1277 drm_i915_private_t *dev_priv; 1278 int ret; 1279 1280 dev_priv = dev->dev_private; 1281 ret = sx_xlock_sig(&dev->mode_config.mutex); 1282 if (ret != 0) 1283 return -EINTR; 1284 1285 if (dev_priv->pwrctx != NULL) { 1286 sbuf_printf(m, "power context "); 1287 describe_obj(m, dev_priv->pwrctx); 1288 sbuf_printf(m, "\n"); 1289 } 1290 1291 if (dev_priv->renderctx != NULL) { 1292 sbuf_printf(m, "render context "); 1293 describe_obj(m, dev_priv->renderctx); 1294 sbuf_printf(m, "\n"); 1295 } 1296 1297 sx_xunlock(&dev->mode_config.mutex); 1298 1299 return 0; 1300} 1301 1302static int i915_gen6_forcewake_count_info(struct drm_device *dev, struct sbuf *m, 1303 void *data) 1304{ 1305 struct drm_i915_private *dev_priv = dev->dev_private; 1306 unsigned forcewake_count; 1307 1308 mtx_lock(&dev_priv->gt_lock); 1309 forcewake_count = dev_priv->forcewake_count; 1310 mtx_unlock(&dev_priv->gt_lock); 1311 1312 sbuf_printf(m, "forcewake count = %u\n", forcewake_count); 1313 1314 return 0; 1315} 1316 1317static const char *swizzle_string(unsigned swizzle) 1318{ 1319 1320 switch(swizzle) { 1321 case I915_BIT_6_SWIZZLE_NONE: 1322 return "none"; 1323 case I915_BIT_6_SWIZZLE_9: 1324 return "bit9"; 1325 case I915_BIT_6_SWIZZLE_9_10: 1326 return "bit9/bit10"; 1327 case I915_BIT_6_SWIZZLE_9_11: 1328 return "bit9/bit11"; 1329 case I915_BIT_6_SWIZZLE_9_10_11: 1330 return "bit9/bit10/bit11"; 1331 case I915_BIT_6_SWIZZLE_9_17: 1332 return "bit9/bit17"; 1333 case I915_BIT_6_SWIZZLE_9_10_17: 1334 return "bit9/bit10/bit17"; 1335 case I915_BIT_6_SWIZZLE_UNKNOWN: 1336 return "unknown"; 1337 } 1338 1339 return "bug"; 1340} 1341 1342static int i915_swizzle_info(struct drm_device *dev, struct sbuf *m, void *data) 1343{ 1344 struct drm_i915_private *dev_priv = dev->dev_private; 1345 int ret; 1346 1347 ret = sx_xlock_sig(&dev->dev_struct_lock); 1348 if (ret) 1349 return -EINTR; 1350 1351 sbuf_printf(m, "bit6 swizzle for X-tiling = %s\n", 1352 swizzle_string(dev_priv->mm.bit_6_swizzle_x)); 1353 sbuf_printf(m, "bit6 swizzle for Y-tiling = %s\n", 1354 swizzle_string(dev_priv->mm.bit_6_swizzle_y)); 1355 1356 if (IS_GEN3(dev) || IS_GEN4(dev)) { 1357 sbuf_printf(m, "DDC = 0x%08x\n", 1358 I915_READ(DCC)); 1359 sbuf_printf(m, "C0DRB3 = 0x%04x\n", 1360 I915_READ16(C0DRB3)); 1361 sbuf_printf(m, "C1DRB3 = 0x%04x\n", 1362 I915_READ16(C1DRB3)); 1363 } else if (IS_GEN6(dev) || IS_GEN7(dev)) { 1364 sbuf_printf(m, "MAD_DIMM_C0 = 0x%08x\n", 1365 I915_READ(MAD_DIMM_C0)); 1366 sbuf_printf(m, "MAD_DIMM_C1 = 0x%08x\n", 1367 I915_READ(MAD_DIMM_C1)); 1368 sbuf_printf(m, "MAD_DIMM_C2 = 0x%08x\n", 1369 I915_READ(MAD_DIMM_C2)); 1370 sbuf_printf(m, "TILECTL = 0x%08x\n", 1371 I915_READ(TILECTL)); 1372 sbuf_printf(m, "ARB_MODE = 0x%08x\n", 1373 I915_READ(ARB_MODE)); 1374 sbuf_printf(m, "DISP_ARB_CTL = 0x%08x\n", 1375 I915_READ(DISP_ARB_CTL)); 1376 } 1377 DRM_UNLOCK(dev); 1378 1379 return 0; 1380} 1381 1382static int i915_ppgtt_info(struct drm_device *dev, struct sbuf *m, void *data) 1383{ 1384 struct drm_i915_private *dev_priv = dev->dev_private; 1385 struct intel_ring_buffer *ring; 1386 int i, ret; 1387 1388 1389 ret = sx_xlock_sig(&dev->dev_struct_lock); 1390 if (ret) 1391 return -EINTR; 1392 if (INTEL_INFO(dev)->gen == 6) 1393 sbuf_printf(m, "GFX_MODE: 0x%08x\n", I915_READ(GFX_MODE)); 1394 1395 for (i = 0; i < I915_NUM_RINGS; i++) { 1396 ring = &dev_priv->rings[i]; 1397 1398 sbuf_printf(m, "%s\n", ring->name); 1399 if (INTEL_INFO(dev)->gen == 7) 1400 sbuf_printf(m, "GFX_MODE: 0x%08x\n", I915_READ(RING_MODE_GEN7(ring))); 1401 sbuf_printf(m, "PP_DIR_BASE: 0x%08x\n", I915_READ(RING_PP_DIR_BASE(ring))); 1402 sbuf_printf(m, "PP_DIR_BASE_READ: 0x%08x\n", I915_READ(RING_PP_DIR_BASE_READ(ring))); 1403 sbuf_printf(m, "PP_DIR_DCLV: 0x%08x\n", I915_READ(RING_PP_DIR_DCLV(ring))); 1404 } 1405 if (dev_priv->mm.aliasing_ppgtt) { 1406 struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt; 1407 1408 sbuf_printf(m, "aliasing PPGTT:\n"); 1409 sbuf_printf(m, "pd gtt offset: 0x%08x\n", ppgtt->pd_offset); 1410 } 1411 sbuf_printf(m, "ECOCHK: 0x%08x\n", I915_READ(GAM_ECOCHK)); 1412 DRM_UNLOCK(dev); 1413 1414 return 0; 1415} 1416 1417static int i915_dpio_info(struct drm_device *dev, struct sbuf *m, void *data) 1418{ 1419 struct drm_i915_private *dev_priv = dev->dev_private; 1420 int ret; 1421 1422 1423 if (!IS_VALLEYVIEW(dev)) { 1424 sbuf_printf(m, "unsupported\n"); 1425 return 0; 1426 } 1427 1428 ret = sx_xlock_sig(&dev->mode_config.mutex); 1429 if (ret) 1430 return -EINTR; 1431 1432 sbuf_printf(m, "DPIO_CTL: 0x%08x\n", I915_READ(DPIO_CTL)); 1433 1434 sbuf_printf(m, "DPIO_DIV_A: 0x%08x\n", 1435 intel_dpio_read(dev_priv, _DPIO_DIV_A)); 1436 sbuf_printf(m, "DPIO_DIV_B: 0x%08x\n", 1437 intel_dpio_read(dev_priv, _DPIO_DIV_B)); 1438 1439 sbuf_printf(m, "DPIO_REFSFR_A: 0x%08x\n", 1440 intel_dpio_read(dev_priv, _DPIO_REFSFR_A)); 1441 sbuf_printf(m, "DPIO_REFSFR_B: 0x%08x\n", 1442 intel_dpio_read(dev_priv, _DPIO_REFSFR_B)); 1443 1444 sbuf_printf(m, "DPIO_CORE_CLK_A: 0x%08x\n", 1445 intel_dpio_read(dev_priv, _DPIO_CORE_CLK_A)); 1446 sbuf_printf(m, "DPIO_CORE_CLK_B: 0x%08x\n", 1447 intel_dpio_read(dev_priv, _DPIO_CORE_CLK_B)); 1448 1449 sbuf_printf(m, "DPIO_LFP_COEFF_A: 0x%08x\n", 1450 intel_dpio_read(dev_priv, _DPIO_LFP_COEFF_A)); 1451 sbuf_printf(m, "DPIO_LFP_COEFF_B: 0x%08x\n", 1452 intel_dpio_read(dev_priv, _DPIO_LFP_COEFF_B)); 1453 1454 sbuf_printf(m, "DPIO_FASTCLK_DISABLE: 0x%08x\n", 1455 intel_dpio_read(dev_priv, DPIO_FASTCLK_DISABLE)); 1456 1457 sx_xunlock(&dev->mode_config.mutex); 1458 1459 return 0; 1460} 1461 1462static int 1463i915_debug_set_wedged(SYSCTL_HANDLER_ARGS) 1464{ 1465 struct drm_device *dev = arg1; 1466 drm_i915_private_t *dev_priv = dev->dev_private; 1467 int error, wedged; 1468 1469 if (dev_priv == NULL) 1470 return (EBUSY); 1471 wedged = dev_priv->mm.wedged; 1472 error = sysctl_handle_int(oidp, &wedged, 0, req); 1473 if (error || !req->newptr) 1474 return (error); 1475 DRM_INFO("Manually setting wedged to %d\n", wedged); 1476 i915_handle_error(dev, wedged); 1477 return (error); 1478} 1479 1480static int 1481i915_max_freq(SYSCTL_HANDLER_ARGS) 1482{ 1483 struct drm_device *dev = arg1; 1484 drm_i915_private_t *dev_priv = dev->dev_private; 1485 int error, max_freq; 1486 1487 if (dev_priv == NULL) 1488 return (EBUSY); 1489 max_freq = dev_priv->max_delay * 50; 1490 error = sysctl_handle_int(oidp, &max_freq, 0, req); 1491 if (error || !req->newptr) 1492 return (error); 1493 DRM_DEBUG("Manually setting max freq to %d\n", max_freq); 1494 /* 1495 * Turbo will still be enabled, but won't go above the set value. 1496 */ 1497 dev_priv->max_delay = max_freq / 50; 1498 gen6_set_rps(dev, max_freq / 50); 1499 return (error); 1500} 1501 1502static int 1503i915_cache_sharing(SYSCTL_HANDLER_ARGS) 1504{ 1505 struct drm_device *dev = arg1; 1506 drm_i915_private_t *dev_priv = dev->dev_private; 1507 int error, snpcr, cache_sharing; 1508 1509 if (dev_priv == NULL) 1510 return (EBUSY); 1511 DRM_LOCK(dev); 1512 snpcr = I915_READ(GEN6_MBCUNIT_SNPCR); 1513 DRM_UNLOCK(dev); 1514 cache_sharing = (snpcr & GEN6_MBC_SNPCR_MASK) >> GEN6_MBC_SNPCR_SHIFT; 1515 error = sysctl_handle_int(oidp, &cache_sharing, 0, req); 1516 if (error || !req->newptr) 1517 return (error); 1518 if (cache_sharing < 0 || cache_sharing > 3) 1519 return (EINVAL); 1520 DRM_DEBUG("Manually setting uncore sharing to %d\n", cache_sharing); 1521 1522 DRM_LOCK(dev); 1523 /* Update the cache sharing policy here as well */ 1524 snpcr = I915_READ(GEN6_MBCUNIT_SNPCR); 1525 snpcr &= ~GEN6_MBC_SNPCR_MASK; 1526 snpcr |= (cache_sharing << GEN6_MBC_SNPCR_SHIFT); 1527 I915_WRITE(GEN6_MBCUNIT_SNPCR, snpcr); 1528 DRM_UNLOCK(dev); 1529 return (0); 1530} 1531 1532static int 1533i915_stop_rings(SYSCTL_HANDLER_ARGS) 1534{ 1535 struct drm_device *dev = arg1; 1536 drm_i915_private_t *dev_priv = dev->dev_private; 1537 int error, val; 1538 1539 if (dev_priv == NULL) 1540 return (EBUSY); 1541 DRM_LOCK(dev); 1542 val = dev_priv->stop_rings; 1543 DRM_UNLOCK(dev); 1544 error = sysctl_handle_int(oidp, &val, 0, req); 1545 if (error || !req->newptr) 1546 return (error); 1547 DRM_DEBUG("Stopping rings 0x%08x\n", val); 1548 1549 DRM_LOCK(dev); 1550 dev_priv->stop_rings = val; 1551 DRM_UNLOCK(dev); 1552 return (0); 1553} 1554 1555static struct i915_info_sysctl_list { 1556 const char *name; 1557 int (*ptr)(struct drm_device *dev, struct sbuf *m, void *data); 1558 int (*ptr_w)(struct drm_device *dev, const char *str, void *data); 1559 int flags; 1560 void *data; 1561} i915_info_sysctl_list[] = { 1562 {"i915_capabilities", i915_capabilities, NULL, 0}, 1563 {"i915_gem_objects", i915_gem_object_info, NULL, 0}, 1564 {"i915_gem_gtt", i915_gem_gtt_info, NULL, 0}, 1565 {"i915_gem_pinned", i915_gem_gtt_info, NULL, 0, (void *)PINNED_LIST}, 1566 {"i915_gem_active", i915_gem_object_list_info, NULL, 0, (void *)ACTIVE_LIST}, 1567 {"i915_gem_flushing", i915_gem_object_list_info, NULL, 0, (void *)FLUSHING_LIST}, 1568 {"i915_gem_inactive", i915_gem_object_list_info, NULL, 0, (void *)INACTIVE_LIST}, 1569 {"i915_gem_pageflip", i915_gem_pageflip_info, NULL, 0}, 1570 {"i915_gem_request", i915_gem_request_info, NULL, 0}, 1571 {"i915_gem_seqno", i915_gem_seqno_info, NULL, 0}, 1572 {"i915_gem_fence_regs", i915_gem_fence_regs_info, NULL, 0}, 1573 {"i915_gem_interrupt", i915_interrupt_info, NULL, 0}, 1574 {"i915_gem_hws", i915_hws_info, NULL, 0, (void *)RCS}, 1575 {"i915_gem_hws_blt", i915_hws_info, NULL, 0, (void *)BCS}, 1576 {"i915_gem_hws_bsd", i915_hws_info, NULL, 0, (void *)VCS}, 1577 {"i915_error_state", i915_error_state, i915_error_state_write, 0}, 1578 {"i915_rstdby_delays", i915_rstdby_delays, NULL, 0}, 1579 {"i915_cur_delayinfo", i915_cur_delayinfo, NULL, 0}, 1580 {"i915_delayfreq_table", i915_delayfreq_table, NULL, 0}, 1581 {"i915_inttoext_table", i915_inttoext_table, NULL, 0}, 1582 {"i915_drpc_info", i915_drpc_info, NULL, 0}, 1583 {"i915_emon_status", i915_emon_status, NULL, 0}, 1584 {"i915_ring_freq_table", i915_ring_freq_table, NULL, 0}, 1585 {"i915_gfxec", i915_gfxec, NULL, 0}, 1586 {"i915_fbc_status", i915_fbc_status, NULL, 0}, 1587 {"i915_sr_status", i915_sr_status, NULL, 0}, 1588#if 0 1589 {"i915_opregion", i915_opregion, NULL, 0}, 1590#endif 1591 {"i915_gem_framebuffer", i915_gem_framebuffer_info, NULL, 0}, 1592 {"i915_context_status", i915_context_status, NULL, 0}, 1593 {"i915_gen6_forcewake_count_info", i915_gen6_forcewake_count_info, 1594 NULL, 0}, 1595 {"i915_swizzle_info", i915_swizzle_info, NULL, 0}, 1596 {"i915_ppgtt_info", i915_ppgtt_info, NULL, 0}, 1597 {"i915_dpio", i915_dpio_info, NULL, 0}, 1598}; 1599 1600struct i915_info_sysctl_thunk { 1601 struct drm_device *dev; 1602 int idx; 1603 void *arg; 1604}; 1605 1606static int 1607i915_info_sysctl_handler(SYSCTL_HANDLER_ARGS) 1608{ 1609 struct sbuf m; 1610 struct i915_info_sysctl_thunk *thunk; 1611 struct drm_device *dev; 1612 drm_i915_private_t *dev_priv; 1613 char *p; 1614 int error; 1615 1616 thunk = arg1; 1617 dev = thunk->dev; 1618 dev_priv = dev->dev_private; 1619 if (dev_priv == NULL) 1620 return (EBUSY); 1621 error = sysctl_wire_old_buffer(req, 0); 1622 if (error != 0) 1623 return (error); 1624 sbuf_new_for_sysctl(&m, NULL, 128, req); 1625 error = -i915_info_sysctl_list[thunk->idx].ptr(dev, &m, 1626 thunk->arg); 1627 if (error == 0) 1628 error = sbuf_finish(&m); 1629 sbuf_delete(&m); 1630 if (error != 0 || req->newptr == NULL) 1631 return (error); 1632 if (req->newlen > 2048) 1633 return (E2BIG); 1634 p = malloc(req->newlen + 1, M_TEMP, M_WAITOK); 1635 error = SYSCTL_IN(req, p, req->newlen); 1636 if (error != 0) 1637 goto out; 1638 p[req->newlen] = '\0'; 1639 error = i915_info_sysctl_list[thunk->idx].ptr_w(dev, p, 1640 thunk->arg); 1641out: 1642 free(p, M_TEMP); 1643 return (error); 1644} 1645 1646extern int i915_gem_sync_exec_requests; 1647extern int i915_fix_mi_batchbuffer_end; 1648extern int i915_intr_pf; 1649extern long i915_gem_wired_pages_cnt; 1650 1651int 1652i915_sysctl_init(struct drm_device *dev, struct sysctl_ctx_list *ctx, 1653 struct sysctl_oid *top) 1654{ 1655 struct sysctl_oid *oid, *info; 1656 struct i915_info_sysctl_thunk *thunks; 1657 int i, error; 1658 1659 thunks = malloc(sizeof(*thunks) * ARRAY_SIZE(i915_info_sysctl_list), 1660 DRM_MEM_DRIVER, M_WAITOK | M_ZERO); 1661 for (i = 0; i < ARRAY_SIZE(i915_info_sysctl_list); i++) { 1662 thunks[i].dev = dev; 1663 thunks[i].idx = i; 1664 thunks[i].arg = i915_info_sysctl_list[i].data; 1665 } 1666 dev->sysctl_private = thunks; 1667 info = SYSCTL_ADD_NODE(ctx, SYSCTL_CHILDREN(top), OID_AUTO, "info", 1668 CTLFLAG_RW, NULL, NULL); 1669 if (info == NULL) 1670 return (-ENOMEM); 1671 for (i = 0; i < ARRAY_SIZE(i915_info_sysctl_list); i++) { 1672 oid = SYSCTL_ADD_OID(ctx, SYSCTL_CHILDREN(info), OID_AUTO, 1673 i915_info_sysctl_list[i].name, CTLTYPE_STRING | 1674 (i915_info_sysctl_list[i].ptr_w != NULL ? CTLFLAG_RW : 1675 CTLFLAG_RD), 1676 &thunks[i], 0, i915_info_sysctl_handler, "A", NULL); 1677 if (oid == NULL) 1678 return (-ENOMEM); 1679 } 1680 oid = SYSCTL_ADD_LONG(ctx, SYSCTL_CHILDREN(info), OID_AUTO, 1681 "i915_gem_wired_pages", CTLFLAG_RD, &i915_gem_wired_pages_cnt, 1682 NULL); 1683 oid = SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(top), OID_AUTO, "wedged", 1684 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE, dev, 0, 1685 i915_debug_set_wedged, "I", NULL); 1686 if (oid == NULL) 1687 return (-ENOMEM); 1688 oid = SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(top), OID_AUTO, "max_freq", 1689 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE, dev, 0, i915_max_freq, 1690 "I", NULL); 1691 if (oid == NULL) 1692 return (-ENOMEM); 1693 oid = SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(top), OID_AUTO, 1694 "cache_sharing", CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE, dev, 1695 0, i915_cache_sharing, "I", NULL); 1696 if (oid == NULL) 1697 return (-ENOMEM); 1698 oid = SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(top), OID_AUTO, 1699 "stop_rings", CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE, dev, 1700 0, i915_stop_rings, "I", NULL); 1701 if (oid == NULL) 1702 return (-ENOMEM); 1703 oid = SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(top), OID_AUTO, "sync_exec", 1704 CTLFLAG_RW, &i915_gem_sync_exec_requests, 0, NULL); 1705 if (oid == NULL) 1706 return (-ENOMEM); 1707 oid = SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(top), OID_AUTO, "fix_mi", 1708 CTLFLAG_RW, &i915_fix_mi_batchbuffer_end, 0, NULL); 1709 if (oid == NULL) 1710 return (-ENOMEM); 1711 oid = SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(top), OID_AUTO, "intr_pf", 1712 CTLFLAG_RW, &i915_intr_pf, 0, NULL); 1713 if (oid == NULL) 1714 return (-ENOMEM); 1715 1716 error = drm_add_busid_modesetting(dev, ctx, top); 1717 if (error != 0) 1718 return (error); 1719 1720 return (0); 1721} 1722 1723void 1724i915_sysctl_cleanup(struct drm_device *dev) 1725{ 1726 1727 free(dev->sysctl_private, DRM_MEM_DRIVER); 1728} 1729