i915_debug.c revision 277487
1/* 2 * Copyright �� 2008 Intel Corporation 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice (including the next 12 * paragraph) shall be included in all copies or substantial portions of the 13 * Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 21 * IN THE SOFTWARE. 22 * 23 * Authors: 24 * Eric Anholt <eric@anholt.net> 25 * Keith Packard <keithp@keithp.com> 26 * 27 */ 28 29#include <sys/cdefs.h> 30__FBSDID("$FreeBSD: head/sys/dev/drm2/i915/i915_debug.c 277487 2015-01-21 16:10:37Z kib $"); 31 32#include <dev/drm2/drmP.h> 33#include <dev/drm2/drm.h> 34#include <dev/drm2/i915/i915_drm.h> 35#include <dev/drm2/i915/i915_drv.h> 36#include <dev/drm2/i915/intel_drv.h> 37#include <dev/drm2/i915/intel_ringbuffer.h> 38 39#include <sys/sysctl.h> 40 41enum { 42 ACTIVE_LIST, 43 FLUSHING_LIST, 44 INACTIVE_LIST, 45 PINNED_LIST, 46}; 47 48static const char * 49yesno(int v) 50{ 51 return (v ? "yes" : "no"); 52} 53 54static int 55i915_capabilities(struct drm_device *dev, struct sbuf *m, void *data) 56{ 57 const struct intel_device_info *info = INTEL_INFO(dev); 58 59 sbuf_printf(m, "gen: %d\n", info->gen); 60 if (HAS_PCH_SPLIT(dev)) 61 sbuf_printf(m, "pch: %d\n", INTEL_PCH_TYPE(dev)); 62#define B(x) sbuf_printf(m, #x ": %s\n", yesno(info->x)) 63 B(is_mobile); 64 B(is_i85x); 65 B(is_i915g); 66 B(is_i945gm); 67 B(is_g33); 68 B(need_gfx_hws); 69 B(is_g4x); 70 B(is_pineview); 71 B(has_fbc); 72 B(has_pipe_cxsr); 73 B(has_hotplug); 74 B(cursor_needs_physical); 75 B(has_overlay); 76 B(overlay_needs_physical); 77 B(supports_tv); 78 B(has_bsd_ring); 79 B(has_blt_ring); 80 B(has_llc); 81#undef B 82 83 return (0); 84} 85 86static const char * 87get_pin_flag(struct drm_i915_gem_object *obj) 88{ 89 if (obj->user_pin_count > 0) 90 return "P"; 91 else if (obj->pin_count > 0) 92 return "p"; 93 else 94 return " "; 95} 96 97static const char * 98get_tiling_flag(struct drm_i915_gem_object *obj) 99{ 100 switch (obj->tiling_mode) { 101 default: 102 case I915_TILING_NONE: return (" "); 103 case I915_TILING_X: return ("X"); 104 case I915_TILING_Y: return ("Y"); 105 } 106} 107 108static const char * 109cache_level_str(int type) 110{ 111 switch (type) { 112 case I915_CACHE_NONE: return " uncached"; 113 case I915_CACHE_LLC: return " snooped (LLC)"; 114 case I915_CACHE_LLC_MLC: return " snooped (LLC+MLC)"; 115 default: return (""); 116 } 117} 118 119static void 120describe_obj(struct sbuf *m, struct drm_i915_gem_object *obj) 121{ 122 123 sbuf_printf(m, "%p: %s%s %8zdKiB %04x %04x %d %d%s%s%s", 124 &obj->base, 125 get_pin_flag(obj), 126 get_tiling_flag(obj), 127 obj->base.size / 1024, 128 obj->base.read_domains, 129 obj->base.write_domain, 130 obj->last_rendering_seqno, 131 obj->last_fenced_seqno, 132 cache_level_str(obj->cache_level), 133 obj->dirty ? " dirty" : "", 134 obj->madv == I915_MADV_DONTNEED ? " purgeable" : ""); 135 if (obj->base.name) 136 sbuf_printf(m, " (name: %d)", obj->base.name); 137 if (obj->pin_display) 138 sbuf_printf(m, " (display)"); 139 if (obj->fence_reg != I915_FENCE_REG_NONE) 140 sbuf_printf(m, " (fence: %d)", obj->fence_reg); 141 if (obj->gtt_space != NULL) 142 sbuf_printf(m, " (gtt offset: %08x, size: %08x)", 143 obj->gtt_offset, (unsigned int)obj->gtt_space->size); 144 if (obj->pin_mappable || obj->fault_mappable) { 145 char s[3], *t = s; 146 if (obj->pin_mappable) 147 *t++ = 'p'; 148 if (obj->fault_mappable) 149 *t++ = 'f'; 150 *t = '\0'; 151 sbuf_printf(m, " (%s mappable)", s); 152 } 153 if (obj->ring != NULL) 154 sbuf_printf(m, " (%s)", obj->ring->name); 155} 156 157static int 158i915_gem_object_list_info(struct drm_device *dev, struct sbuf *m, void *data) 159{ 160 uintptr_t list = (uintptr_t)data; 161 struct list_head *head; 162 drm_i915_private_t *dev_priv = dev->dev_private; 163 struct drm_i915_gem_object *obj; 164 size_t total_obj_size, total_gtt_size; 165 int count; 166 167 if (sx_xlock_sig(&dev->dev_struct_lock)) 168 return (EINTR); 169 170 switch (list) { 171 case ACTIVE_LIST: 172 sbuf_printf(m, "Active:\n"); 173 head = &dev_priv->mm.active_list; 174 break; 175 case INACTIVE_LIST: 176 sbuf_printf(m, "Inactive:\n"); 177 head = &dev_priv->mm.inactive_list; 178 break; 179 case FLUSHING_LIST: 180 sbuf_printf(m, "Flushing:\n"); 181 head = &dev_priv->mm.flushing_list; 182 break; 183 default: 184 DRM_UNLOCK(dev); 185 return (EINVAL); 186 } 187 188 total_obj_size = total_gtt_size = count = 0; 189 list_for_each_entry(obj, head, mm_list) { 190 sbuf_printf(m, " "); 191 describe_obj(m, obj); 192 sbuf_printf(m, "\n"); 193 total_obj_size += obj->base.size; 194 total_gtt_size += obj->gtt_space->size; 195 count++; 196 } 197 DRM_UNLOCK(dev); 198 199 sbuf_printf(m, "Total %d objects, %zu bytes, %zu GTT size\n", 200 count, total_obj_size, total_gtt_size); 201 return (0); 202} 203 204#define count_objects(list, member) do { \ 205 list_for_each_entry(obj, list, member) { \ 206 size += obj->gtt_space->size; \ 207 ++count; \ 208 if (obj->map_and_fenceable) { \ 209 mappable_size += obj->gtt_space->size; \ 210 ++mappable_count; \ 211 } \ 212 } \ 213} while (0) 214 215static int 216i915_gem_object_info(struct drm_device *dev, struct sbuf *m, void *data) 217{ 218 struct drm_i915_private *dev_priv = dev->dev_private; 219 u32 count, mappable_count; 220 size_t size, mappable_size; 221 struct drm_i915_gem_object *obj; 222 223 if (sx_xlock_sig(&dev->dev_struct_lock)) 224 return (EINTR); 225 sbuf_printf(m, "%u objects, %zu bytes\n", 226 dev_priv->mm.object_count, 227 dev_priv->mm.object_memory); 228 229 size = count = mappable_size = mappable_count = 0; 230 count_objects(&dev_priv->mm.gtt_list, gtt_list); 231 sbuf_printf(m, "%u [%u] objects, %zu [%zu] bytes in gtt\n", 232 count, mappable_count, size, mappable_size); 233 234 size = count = mappable_size = mappable_count = 0; 235 count_objects(&dev_priv->mm.active_list, mm_list); 236 count_objects(&dev_priv->mm.flushing_list, mm_list); 237 sbuf_printf(m, " %u [%u] active objects, %zu [%zu] bytes\n", 238 count, mappable_count, size, mappable_size); 239 240 size = count = mappable_size = mappable_count = 0; 241 count_objects(&dev_priv->mm.inactive_list, mm_list); 242 sbuf_printf(m, " %u [%u] inactive objects, %zu [%zu] bytes\n", 243 count, mappable_count, size, mappable_size); 244 245 size = count = mappable_size = mappable_count = 0; 246 list_for_each_entry(obj, &dev_priv->mm.gtt_list, gtt_list) { 247 if (obj->fault_mappable) { 248 size += obj->gtt_space->size; 249 ++count; 250 } 251 if (obj->pin_mappable) { 252 mappable_size += obj->gtt_space->size; 253 ++mappable_count; 254 } 255 } 256 sbuf_printf(m, "%u pinned mappable objects, %zu bytes\n", 257 mappable_count, mappable_size); 258 sbuf_printf(m, "%u fault mappable objects, %zu bytes\n", 259 count, size); 260 261 sbuf_printf(m, "%zu [%zu] gtt total\n", 262 dev_priv->mm.gtt_total, dev_priv->mm.mappable_gtt_total); 263 DRM_UNLOCK(dev); 264 265 return (0); 266} 267 268static int 269i915_gem_gtt_info(struct drm_device *dev, struct sbuf *m, void *data) 270{ 271 struct drm_i915_private *dev_priv = dev->dev_private; 272 uintptr_t list = (uintptr_t)data; 273 struct drm_i915_gem_object *obj; 274 size_t total_obj_size, total_gtt_size; 275 int count; 276 277 if (sx_xlock_sig(&dev->dev_struct_lock)) 278 return (EINTR); 279 280 total_obj_size = total_gtt_size = count = 0; 281 list_for_each_entry(obj, &dev_priv->mm.gtt_list, gtt_list) { 282 if (list == PINNED_LIST && obj->pin_count == 0) 283 continue; 284 285 sbuf_printf(m, " "); 286 describe_obj(m, obj); 287 sbuf_printf(m, "\n"); 288 total_obj_size += obj->base.size; 289 total_gtt_size += obj->gtt_space->size; 290 count++; 291 } 292 293 DRM_UNLOCK(dev); 294 295 sbuf_printf(m, "Total %d objects, %zu bytes, %zu GTT size\n", 296 count, total_obj_size, total_gtt_size); 297 298 return (0); 299} 300 301static int 302i915_gem_pageflip_info(struct drm_device *dev, struct sbuf *m, void *data) 303{ 304 struct intel_crtc *crtc; 305 struct drm_i915_gem_object *obj; 306 struct intel_unpin_work *work; 307 char pipe; 308 char plane; 309 310 if ((dev->driver->driver_features & DRIVER_MODESET) == 0) 311 return (0); 312 list_for_each_entry(crtc, &dev->mode_config.crtc_list, base.head) { 313 pipe = pipe_name(crtc->pipe); 314 plane = plane_name(crtc->plane); 315 316 mtx_lock(&dev->event_lock); 317 work = crtc->unpin_work; 318 if (work == NULL) { 319 sbuf_printf(m, "No flip due on pipe %c (plane %c)\n", 320 pipe, plane); 321 } else { 322 if (!work->pending) { 323 sbuf_printf(m, "Flip queued on pipe %c (plane %c)\n", 324 pipe, plane); 325 } else { 326 sbuf_printf(m, "Flip pending (waiting for vsync) on pipe %c (plane %c)\n", 327 pipe, plane); 328 } 329 if (work->enable_stall_check) 330 sbuf_printf(m, "Stall check enabled, "); 331 else 332 sbuf_printf(m, "Stall check waiting for page flip ioctl, "); 333 sbuf_printf(m, "%d prepares\n", work->pending); 334 335 if (work->old_fb_obj) { 336 obj = work->old_fb_obj; 337 if (obj) 338 sbuf_printf(m, "Old framebuffer gtt_offset 0x%08x\n", obj->gtt_offset); 339 } 340 if (work->pending_flip_obj) { 341 obj = work->pending_flip_obj; 342 if (obj) 343 sbuf_printf(m, "New framebuffer gtt_offset 0x%08x\n", obj->gtt_offset); 344 } 345 } 346 mtx_unlock(&dev->event_lock); 347 } 348 349 return (0); 350} 351 352static int 353i915_gem_request_info(struct drm_device *dev, struct sbuf *m, void *data) 354{ 355 drm_i915_private_t *dev_priv = dev->dev_private; 356 struct drm_i915_gem_request *gem_request; 357 int count; 358 359 if (sx_xlock_sig(&dev->dev_struct_lock)) 360 return (EINTR); 361 362 count = 0; 363 if (!list_empty(&dev_priv->rings[RCS].request_list)) { 364 sbuf_printf(m, "Render requests:\n"); 365 list_for_each_entry(gem_request, 366 &dev_priv->rings[RCS].request_list, 367 list) { 368 sbuf_printf(m, " %d @ %d\n", 369 gem_request->seqno, 370 (int) (jiffies - gem_request->emitted_jiffies)); 371 } 372 count++; 373 } 374 if (!list_empty(&dev_priv->rings[VCS].request_list)) { 375 sbuf_printf(m, "BSD requests:\n"); 376 list_for_each_entry(gem_request, 377 &dev_priv->rings[VCS].request_list, 378 list) { 379 sbuf_printf(m, " %d @ %d\n", 380 gem_request->seqno, 381 (int) (jiffies - gem_request->emitted_jiffies)); 382 } 383 count++; 384 } 385 if (!list_empty(&dev_priv->rings[BCS].request_list)) { 386 sbuf_printf(m, "BLT requests:\n"); 387 list_for_each_entry(gem_request, 388 &dev_priv->rings[BCS].request_list, 389 list) { 390 sbuf_printf(m, " %d @ %d\n", 391 gem_request->seqno, 392 (int) (jiffies - gem_request->emitted_jiffies)); 393 } 394 count++; 395 } 396 DRM_UNLOCK(dev); 397 398 if (count == 0) 399 sbuf_printf(m, "No requests\n"); 400 401 return 0; 402} 403 404static void 405i915_ring_seqno_info(struct sbuf *m, struct intel_ring_buffer *ring) 406{ 407 if (ring->get_seqno) { 408 sbuf_printf(m, "Current sequence (%s): %d\n", 409 ring->name, ring->get_seqno(ring)); 410 } 411} 412 413static int 414i915_gem_seqno_info(struct drm_device *dev, struct sbuf *m, void *data) 415{ 416 drm_i915_private_t *dev_priv = dev->dev_private; 417 int i; 418 419 if (sx_xlock_sig(&dev->dev_struct_lock)) 420 return (EINTR); 421 for (i = 0; i < I915_NUM_RINGS; i++) 422 i915_ring_seqno_info(m, &dev_priv->rings[i]); 423 DRM_UNLOCK(dev); 424 return (0); 425} 426 427 428static int 429i915_interrupt_info(struct drm_device *dev, struct sbuf *m, void *data) 430{ 431 drm_i915_private_t *dev_priv = dev->dev_private; 432 int i, pipe; 433 434 if (sx_xlock_sig(&dev->dev_struct_lock)) 435 return (EINTR); 436 437 if (IS_VALLEYVIEW(dev)) { 438 sbuf_printf(m, "Display IER:\t%08x\n", 439 I915_READ(VLV_IER)); 440 sbuf_printf(m, "Display IIR:\t%08x\n", 441 I915_READ(VLV_IIR)); 442 sbuf_printf(m, "Display IIR_RW:\t%08x\n", 443 I915_READ(VLV_IIR_RW)); 444 sbuf_printf(m, "Display IMR:\t%08x\n", 445 I915_READ(VLV_IMR)); 446 for_each_pipe(pipe) 447 sbuf_printf(m, "Pipe %c stat:\t%08x\n", 448 pipe_name(pipe), 449 I915_READ(PIPESTAT(pipe))); 450 451 sbuf_printf(m, "Master IER:\t%08x\n", 452 I915_READ(VLV_MASTER_IER)); 453 454 sbuf_printf(m, "Render IER:\t%08x\n", 455 I915_READ(GTIER)); 456 sbuf_printf(m, "Render IIR:\t%08x\n", 457 I915_READ(GTIIR)); 458 sbuf_printf(m, "Render IMR:\t%08x\n", 459 I915_READ(GTIMR)); 460 461 sbuf_printf(m, "PM IER:\t\t%08x\n", 462 I915_READ(GEN6_PMIER)); 463 sbuf_printf(m, "PM IIR:\t\t%08x\n", 464 I915_READ(GEN6_PMIIR)); 465 sbuf_printf(m, "PM IMR:\t\t%08x\n", 466 I915_READ(GEN6_PMIMR)); 467 468 sbuf_printf(m, "Port hotplug:\t%08x\n", 469 I915_READ(PORT_HOTPLUG_EN)); 470 sbuf_printf(m, "DPFLIPSTAT:\t%08x\n", 471 I915_READ(VLV_DPFLIPSTAT)); 472 sbuf_printf(m, "DPINVGTT:\t%08x\n", 473 I915_READ(DPINVGTT)); 474 475 } else if (!HAS_PCH_SPLIT(dev)) { 476 sbuf_printf(m, "Interrupt enable: %08x\n", 477 I915_READ(IER)); 478 sbuf_printf(m, "Interrupt identity: %08x\n", 479 I915_READ(IIR)); 480 sbuf_printf(m, "Interrupt mask: %08x\n", 481 I915_READ(IMR)); 482 for_each_pipe(pipe) 483 sbuf_printf(m, "Pipe %c stat: %08x\n", 484 pipe_name(pipe), 485 I915_READ(PIPESTAT(pipe))); 486 } else { 487 sbuf_printf(m, "North Display Interrupt enable: %08x\n", 488 I915_READ(DEIER)); 489 sbuf_printf(m, "North Display Interrupt identity: %08x\n", 490 I915_READ(DEIIR)); 491 sbuf_printf(m, "North Display Interrupt mask: %08x\n", 492 I915_READ(DEIMR)); 493 sbuf_printf(m, "South Display Interrupt enable: %08x\n", 494 I915_READ(SDEIER)); 495 sbuf_printf(m, "South Display Interrupt identity: %08x\n", 496 I915_READ(SDEIIR)); 497 sbuf_printf(m, "South Display Interrupt mask: %08x\n", 498 I915_READ(SDEIMR)); 499 sbuf_printf(m, "Graphics Interrupt enable: %08x\n", 500 I915_READ(GTIER)); 501 sbuf_printf(m, "Graphics Interrupt identity: %08x\n", 502 I915_READ(GTIIR)); 503 sbuf_printf(m, "Graphics Interrupt mask: %08x\n", 504 I915_READ(GTIMR)); 505 } 506 sbuf_printf(m, "Interrupts received: %d\n", 507 atomic_read(&dev_priv->irq_received)); 508 for (i = 0; i < I915_NUM_RINGS; i++) { 509 if (IS_GEN6(dev) || IS_GEN7(dev)) { 510 sbuf_printf(m, "Graphics Interrupt mask (%s): %08x\n", 511 dev_priv->rings[i].name, 512 I915_READ_IMR(&dev_priv->rings[i])); 513 } 514 i915_ring_seqno_info(m, &dev_priv->rings[i]); 515 } 516 DRM_UNLOCK(dev); 517 518 return (0); 519} 520 521static int 522i915_gem_fence_regs_info(struct drm_device *dev, struct sbuf *m, void *data) 523{ 524 drm_i915_private_t *dev_priv = dev->dev_private; 525 int i; 526 527 if (sx_xlock_sig(&dev->dev_struct_lock)) 528 return (EINTR); 529 530 sbuf_printf(m, "Reserved fences = %d\n", dev_priv->fence_reg_start); 531 sbuf_printf(m, "Total fences = %d\n", dev_priv->num_fence_regs); 532 for (i = 0; i < dev_priv->num_fence_regs; i++) { 533 struct drm_i915_gem_object *obj = dev_priv->fence_regs[i].obj; 534 535 sbuf_printf(m, "Fenced object[%2d] = ", i); 536 if (obj == NULL) 537 sbuf_printf(m, "unused"); 538 else 539 describe_obj(m, obj); 540 sbuf_printf(m, "\n"); 541 } 542 543 DRM_UNLOCK(dev); 544 return (0); 545} 546 547static int 548i915_hws_info(struct drm_device *dev, struct sbuf *m, void *data) 549{ 550 drm_i915_private_t *dev_priv = dev->dev_private; 551 struct intel_ring_buffer *ring; 552 const volatile u32 *hws; 553 int i; 554 555 ring = &dev_priv->rings[(uintptr_t)data]; 556 hws = (volatile u32 *)ring->status_page.page_addr; 557 if (hws == NULL) 558 return (0); 559 560 for (i = 0; i < 4096 / sizeof(u32) / 4; i += 4) { 561 sbuf_printf(m, "0x%08x: 0x%08x 0x%08x 0x%08x 0x%08x\n", 562 i * 4, 563 hws[i], hws[i + 1], hws[i + 2], hws[i + 3]); 564 } 565 return (0); 566} 567 568static const char * 569ring_str(int ring) 570{ 571 switch (ring) { 572 case RCS: return (" render"); 573 case VCS: return (" bsd"); 574 case BCS: return (" blt"); 575 default: return (""); 576 } 577} 578 579static const char * 580pin_flag(int pinned) 581{ 582 if (pinned > 0) 583 return (" P"); 584 else if (pinned < 0) 585 return (" p"); 586 else 587 return (""); 588} 589 590static const char *tiling_flag(int tiling) 591{ 592 switch (tiling) { 593 default: 594 case I915_TILING_NONE: return ""; 595 case I915_TILING_X: return " X"; 596 case I915_TILING_Y: return " Y"; 597 } 598} 599 600static const char *dirty_flag(int dirty) 601{ 602 return dirty ? " dirty" : ""; 603} 604 605static const char *purgeable_flag(int purgeable) 606{ 607 return purgeable ? " purgeable" : ""; 608} 609 610static void print_error_buffers(struct sbuf *m, const char *name, 611 struct drm_i915_error_buffer *err, int count) 612{ 613 614 sbuf_printf(m, "%s [%d]:\n", name, count); 615 616 while (count--) { 617 sbuf_printf(m, " %08x %8u %04x %04x %08x%s%s%s%s%s%s%s", 618 err->gtt_offset, 619 err->size, 620 err->read_domains, 621 err->write_domain, 622 err->seqno, 623 pin_flag(err->pinned), 624 tiling_flag(err->tiling), 625 dirty_flag(err->dirty), 626 purgeable_flag(err->purgeable), 627 err->ring != -1 ? " " : "", 628 ring_str(err->ring), 629 cache_level_str(err->cache_level)); 630 631 if (err->name) 632 sbuf_printf(m, " (name: %d)", err->name); 633 if (err->fence_reg != I915_FENCE_REG_NONE) 634 sbuf_printf(m, " (fence: %d)", err->fence_reg); 635 636 sbuf_printf(m, "\n"); 637 err++; 638 } 639} 640 641static void 642i915_ring_error_state(struct sbuf *m, struct drm_device *dev, 643 struct drm_i915_error_state *error, unsigned ring) 644{ 645 646 MPASS((ring < I915_NUM_RINGS)); /* shut up confused gcc */ 647 sbuf_printf(m, "%s command stream:\n", ring_str(ring)); 648 sbuf_printf(m, " HEAD: 0x%08x\n", error->head[ring]); 649 sbuf_printf(m, " TAIL: 0x%08x\n", error->tail[ring]); 650 sbuf_printf(m, " ACTHD: 0x%08x\n", error->acthd[ring]); 651 sbuf_printf(m, " IPEIR: 0x%08x\n", error->ipeir[ring]); 652 sbuf_printf(m, " IPEHR: 0x%08x\n", error->ipehr[ring]); 653 sbuf_printf(m, " INSTDONE: 0x%08x\n", error->instdone[ring]); 654 if (ring == RCS && INTEL_INFO(dev)->gen >= 4) { 655 sbuf_printf(m, " INSTDONE1: 0x%08x\n", error->instdone1); 656 sbuf_printf(m, " BBADDR: 0x%08jx\n", (uintmax_t)error->bbaddr); 657 } 658 if (INTEL_INFO(dev)->gen >= 4) 659 sbuf_printf(m, " INSTPS: 0x%08x\n", error->instps[ring]); 660 sbuf_printf(m, " INSTPM: 0x%08x\n", error->instpm[ring]); 661 sbuf_printf(m, " FADDR: 0x%08x\n", error->faddr[ring]); 662 if (INTEL_INFO(dev)->gen >= 6) { 663 sbuf_printf(m, " FAULT_REG: 0x%08x\n", error->fault_reg[ring]); 664 sbuf_printf(m, " SYNC_0: 0x%08x\n", 665 error->semaphore_mboxes[ring][0]); 666 sbuf_printf(m, " SYNC_1: 0x%08x\n", 667 error->semaphore_mboxes[ring][1]); 668 } 669 sbuf_printf(m, " seqno: 0x%08x\n", error->seqno[ring]); 670 sbuf_printf(m, " waiting: %s\n", yesno(error->waiting[ring])); 671 sbuf_printf(m, " ring->head: 0x%08x\n", error->cpu_ring_head[ring]); 672 sbuf_printf(m, " ring->tail: 0x%08x\n", error->cpu_ring_tail[ring]); 673} 674 675static int 676i915_error_state(struct drm_device *dev, struct sbuf *m, 677 void *unused) 678{ 679 drm_i915_private_t *dev_priv = dev->dev_private; 680 struct drm_i915_error_state *error; 681 struct intel_ring_buffer *ring; 682 int i, j, page, offset, elt; 683 684 mtx_lock(&dev_priv->error_lock); 685 error = dev_priv->first_error; 686 if (error != NULL) 687 refcount_acquire(&error->ref); 688 mtx_unlock(&dev_priv->error_lock); 689 if (error == NULL) { 690 sbuf_printf(m, "no error state collected\n"); 691 return (0); 692 } 693 694 error = dev_priv->first_error; 695 696 sbuf_printf(m, "Time: %jd s %jd us\n", (intmax_t)error->time.tv_sec, 697 (intmax_t)error->time.tv_usec); 698 sbuf_printf(m, "PCI ID: 0x%04x\n", dev->pci_device); 699 sbuf_printf(m, "EIR: 0x%08x\n", error->eir); 700 sbuf_printf(m, "IER: 0x%08x\n", error->ier); 701 sbuf_printf(m, "PGTBL_ER: 0x%08x\n", error->pgtbl_er); 702 703 for (i = 0; i < dev_priv->num_fence_regs; i++) 704 sbuf_printf(m, " fence[%d] = %08jx\n", i, 705 (uintmax_t)error->fence[i]); 706 707 if (INTEL_INFO(dev)->gen >= 6) { 708 sbuf_printf(m, "ERROR: 0x%08x\n", error->error); 709 sbuf_printf(m, "DONE_REG: 0x%08x\n", error->done_reg); 710 } 711 712 for_each_ring(ring, dev_priv, i) 713 i915_ring_error_state(m, dev, error, i); 714 715 if (error->active_bo) 716 print_error_buffers(m, "Active", 717 error->active_bo, 718 error->active_bo_count); 719 720 if (error->pinned_bo) 721 print_error_buffers(m, "Pinned", 722 error->pinned_bo, 723 error->pinned_bo_count); 724 725 for (i = 0; i < DRM_ARRAY_SIZE(error->ring); i++) { 726 struct drm_i915_error_object *obj; 727 728 if ((obj = error->ring[i].batchbuffer)) { 729 sbuf_printf(m, "%s --- gtt_offset = 0x%08x\n", 730 dev_priv->rings[i].name, 731 obj->gtt_offset); 732 offset = 0; 733 for (page = 0; page < obj->page_count; page++) { 734 for (elt = 0; elt < PAGE_SIZE/4; elt++) { 735 sbuf_printf(m, "%08x : %08x\n", 736 offset, obj->pages[page][elt]); 737 offset += 4; 738 } 739 } 740 } 741 742 if (error->ring[i].num_requests) { 743 sbuf_printf(m, "%s --- %d requests\n", 744 dev_priv->rings[i].name, 745 error->ring[i].num_requests); 746 for (j = 0; j < error->ring[i].num_requests; j++) { 747 sbuf_printf(m, " seqno 0x%08x, emitted %ld, tail 0x%08x\n", 748 error->ring[i].requests[j].seqno, 749 error->ring[i].requests[j].jiffies, 750 error->ring[i].requests[j].tail); 751 } 752 } 753 754 if ((obj = error->ring[i].ringbuffer)) { 755 sbuf_printf(m, "%s --- ringbuffer = 0x%08x\n", 756 dev_priv->rings[i].name, 757 obj->gtt_offset); 758 offset = 0; 759 for (page = 0; page < obj->page_count; page++) { 760 for (elt = 0; elt < PAGE_SIZE/4; elt++) { 761 sbuf_printf(m, "%08x : %08x\n", 762 offset, 763 obj->pages[page][elt]); 764 offset += 4; 765 } 766 } 767 } 768 } 769 770 if (error->overlay) 771 intel_overlay_print_error_state(m, error->overlay); 772 773 if (error->display) 774 intel_display_print_error_state(m, dev, error->display); 775 776 if (refcount_release(&error->ref)) 777 i915_error_state_free(error); 778 779 return (0); 780} 781 782static int 783i915_error_state_w(struct drm_device *dev, const char *str, void *unused) 784{ 785 drm_i915_private_t *dev_priv = dev->dev_private; 786 struct drm_i915_error_state *error; 787 788 DRM_DEBUG_DRIVER("Resetting error state\n"); 789 mtx_lock(&dev_priv->error_lock); 790 error = dev_priv->first_error; 791 dev_priv->first_error = NULL; 792 mtx_unlock(&dev_priv->error_lock); 793 if (error != NULL && refcount_release(&error->ref)) 794 i915_error_state_free(error); 795 return (0); 796} 797 798static int 799i915_rstdby_delays(struct drm_device *dev, struct sbuf *m, void *unused) 800{ 801 drm_i915_private_t *dev_priv = dev->dev_private; 802 u16 crstanddelay; 803 804 if (sx_xlock_sig(&dev->dev_struct_lock)) 805 return (EINTR); 806 crstanddelay = I915_READ16(CRSTANDVID); 807 DRM_UNLOCK(dev); 808 809 sbuf_printf(m, "w/ctx: %d, w/o ctx: %d\n", 810 (crstanddelay >> 8) & 0x3f, (crstanddelay & 0x3f)); 811 812 return 0; 813} 814 815static int 816i915_cur_delayinfo(struct drm_device *dev, struct sbuf *m, void *unused) 817{ 818 drm_i915_private_t *dev_priv = dev->dev_private; 819 820 if (IS_GEN5(dev)) { 821 u16 rgvswctl = I915_READ16(MEMSWCTL); 822 u16 rgvstat = I915_READ16(MEMSTAT_ILK); 823 824 sbuf_printf(m, "Requested P-state: %d\n", (rgvswctl >> 8) & 0xf); 825 sbuf_printf(m, "Requested VID: %d\n", rgvswctl & 0x3f); 826 sbuf_printf(m, "Current VID: %d\n", (rgvstat & MEMSTAT_VID_MASK) >> 827 MEMSTAT_VID_SHIFT); 828 sbuf_printf(m, "Current P-state: %d\n", 829 (rgvstat & MEMSTAT_PSTATE_MASK) >> MEMSTAT_PSTATE_SHIFT); 830 } else if (IS_GEN6(dev)) { 831 u32 gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS); 832 u32 rp_state_limits = I915_READ(GEN6_RP_STATE_LIMITS); 833 u32 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP); 834 u32 rpstat; 835 u32 rpupei, rpcurup, rpprevup; 836 u32 rpdownei, rpcurdown, rpprevdown; 837 int max_freq; 838 839 /* RPSTAT1 is in the GT power well */ 840 if (sx_xlock_sig(&dev->dev_struct_lock)) 841 return (EINTR); 842 gen6_gt_force_wake_get(dev_priv); 843 844 rpstat = I915_READ(GEN6_RPSTAT1); 845 rpupei = I915_READ(GEN6_RP_CUR_UP_EI); 846 rpcurup = I915_READ(GEN6_RP_CUR_UP); 847 rpprevup = I915_READ(GEN6_RP_PREV_UP); 848 rpdownei = I915_READ(GEN6_RP_CUR_DOWN_EI); 849 rpcurdown = I915_READ(GEN6_RP_CUR_DOWN); 850 rpprevdown = I915_READ(GEN6_RP_PREV_DOWN); 851 852 gen6_gt_force_wake_put(dev_priv); 853 DRM_UNLOCK(dev); 854 855 sbuf_printf(m, "GT_PERF_STATUS: 0x%08x\n", gt_perf_status); 856 sbuf_printf(m, "RPSTAT1: 0x%08x\n", rpstat); 857 sbuf_printf(m, "Render p-state ratio: %d\n", 858 (gt_perf_status & 0xff00) >> 8); 859 sbuf_printf(m, "Render p-state VID: %d\n", 860 gt_perf_status & 0xff); 861 sbuf_printf(m, "Render p-state limit: %d\n", 862 rp_state_limits & 0xff); 863 sbuf_printf(m, "CAGF: %dMHz\n", ((rpstat & GEN6_CAGF_MASK) >> 864 GEN6_CAGF_SHIFT) * 50); 865 sbuf_printf(m, "RP CUR UP EI: %dus\n", rpupei & 866 GEN6_CURICONT_MASK); 867 sbuf_printf(m, "RP CUR UP: %dus\n", rpcurup & 868 GEN6_CURBSYTAVG_MASK); 869 sbuf_printf(m, "RP PREV UP: %dus\n", rpprevup & 870 GEN6_CURBSYTAVG_MASK); 871 sbuf_printf(m, "RP CUR DOWN EI: %dus\n", rpdownei & 872 GEN6_CURIAVG_MASK); 873 sbuf_printf(m, "RP CUR DOWN: %dus\n", rpcurdown & 874 GEN6_CURBSYTAVG_MASK); 875 sbuf_printf(m, "RP PREV DOWN: %dus\n", rpprevdown & 876 GEN6_CURBSYTAVG_MASK); 877 878 max_freq = (rp_state_cap & 0xff0000) >> 16; 879 sbuf_printf(m, "Lowest (RPN) frequency: %dMHz\n", 880 max_freq * 50); 881 882 max_freq = (rp_state_cap & 0xff00) >> 8; 883 sbuf_printf(m, "Nominal (RP1) frequency: %dMHz\n", 884 max_freq * 50); 885 886 max_freq = rp_state_cap & 0xff; 887 sbuf_printf(m, "Max non-overclocked (RP0) frequency: %dMHz\n", 888 max_freq * 50); 889 } else { 890 sbuf_printf(m, "no P-state info available\n"); 891 } 892 893 return 0; 894} 895 896static int 897i915_delayfreq_table(struct drm_device *dev, struct sbuf *m, void *unused) 898{ 899 drm_i915_private_t *dev_priv = dev->dev_private; 900 u32 delayfreq; 901 int i; 902 903 if (sx_xlock_sig(&dev->dev_struct_lock)) 904 return (EINTR); 905 for (i = 0; i < 16; i++) { 906 delayfreq = I915_READ(PXVFREQ_BASE + i * 4); 907 sbuf_printf(m, "P%02dVIDFREQ: 0x%08x (VID: %d)\n", i, delayfreq, 908 (delayfreq & PXVFREQ_PX_MASK) >> PXVFREQ_PX_SHIFT); 909 } 910 DRM_UNLOCK(dev); 911 return (0); 912} 913 914static inline int 915MAP_TO_MV(int map) 916{ 917 return 1250 - (map * 25); 918} 919 920static int 921i915_inttoext_table(struct drm_device *dev, struct sbuf *m, void *unused) 922{ 923 drm_i915_private_t *dev_priv = dev->dev_private; 924 u32 inttoext; 925 int i; 926 927 if (sx_xlock_sig(&dev->dev_struct_lock)) 928 return (EINTR); 929 for (i = 1; i <= 32; i++) { 930 inttoext = I915_READ(INTTOEXT_BASE_ILK + i * 4); 931 sbuf_printf(m, "INTTOEXT%02d: 0x%08x\n", i, inttoext); 932 } 933 DRM_UNLOCK(dev); 934 935 return (0); 936} 937 938static int 939ironlake_drpc_info(struct drm_device *dev, struct sbuf *m) 940{ 941 drm_i915_private_t *dev_priv = dev->dev_private; 942 u32 rgvmodectl; 943 u32 rstdbyctl; 944 u16 crstandvid; 945 946 if (sx_xlock_sig(&dev->dev_struct_lock)) 947 return (EINTR); 948 rgvmodectl = I915_READ(MEMMODECTL); 949 rstdbyctl = I915_READ(RSTDBYCTL); 950 crstandvid = I915_READ16(CRSTANDVID); 951 DRM_UNLOCK(dev); 952 953 sbuf_printf(m, "HD boost: %s\n", (rgvmodectl & MEMMODE_BOOST_EN) ? 954 "yes" : "no"); 955 sbuf_printf(m, "Boost freq: %d\n", 956 (rgvmodectl & MEMMODE_BOOST_FREQ_MASK) >> 957 MEMMODE_BOOST_FREQ_SHIFT); 958 sbuf_printf(m, "HW control enabled: %s\n", 959 rgvmodectl & MEMMODE_HWIDLE_EN ? "yes" : "no"); 960 sbuf_printf(m, "SW control enabled: %s\n", 961 rgvmodectl & MEMMODE_SWMODE_EN ? "yes" : "no"); 962 sbuf_printf(m, "Gated voltage change: %s\n", 963 rgvmodectl & MEMMODE_RCLK_GATE ? "yes" : "no"); 964 sbuf_printf(m, "Starting frequency: P%d\n", 965 (rgvmodectl & MEMMODE_FSTART_MASK) >> MEMMODE_FSTART_SHIFT); 966 sbuf_printf(m, "Max P-state: P%d\n", 967 (rgvmodectl & MEMMODE_FMAX_MASK) >> MEMMODE_FMAX_SHIFT); 968 sbuf_printf(m, "Min P-state: P%d\n", (rgvmodectl & MEMMODE_FMIN_MASK)); 969 sbuf_printf(m, "RS1 VID: %d\n", (crstandvid & 0x3f)); 970 sbuf_printf(m, "RS2 VID: %d\n", ((crstandvid >> 8) & 0x3f)); 971 sbuf_printf(m, "Render standby enabled: %s\n", 972 (rstdbyctl & RCX_SW_EXIT) ? "no" : "yes"); 973 sbuf_printf(m, "Current RS state: "); 974 switch (rstdbyctl & RSX_STATUS_MASK) { 975 case RSX_STATUS_ON: 976 sbuf_printf(m, "on\n"); 977 break; 978 case RSX_STATUS_RC1: 979 sbuf_printf(m, "RC1\n"); 980 break; 981 case RSX_STATUS_RC1E: 982 sbuf_printf(m, "RC1E\n"); 983 break; 984 case RSX_STATUS_RS1: 985 sbuf_printf(m, "RS1\n"); 986 break; 987 case RSX_STATUS_RS2: 988 sbuf_printf(m, "RS2 (RC6)\n"); 989 break; 990 case RSX_STATUS_RS3: 991 sbuf_printf(m, "RC3 (RC6+)\n"); 992 break; 993 default: 994 sbuf_printf(m, "unknown\n"); 995 break; 996 } 997 998 return 0; 999} 1000 1001static int 1002gen6_drpc_info(struct drm_device *dev, struct sbuf *m) 1003{ 1004 drm_i915_private_t *dev_priv = dev->dev_private; 1005 u32 rpmodectl1, gt_core_status, rcctl1; 1006 unsigned forcewake_count; 1007 int count=0; 1008 1009 if (sx_xlock_sig(&dev->dev_struct_lock)) 1010 return (EINTR); 1011 1012 mtx_lock(&dev_priv->gt_lock); 1013 forcewake_count = dev_priv->forcewake_count; 1014 mtx_unlock(&dev_priv->gt_lock); 1015 1016 if (forcewake_count) { 1017 sbuf_printf(m, "RC information inaccurate because userspace " 1018 "holds a reference \n"); 1019 } else { 1020 /* NB: we cannot use forcewake, else we read the wrong values */ 1021 while (count++ < 50 && (I915_READ_NOTRACE(FORCEWAKE_ACK) & 1)) 1022 DRM_UDELAY(10); 1023 sbuf_printf(m, "RC information accurate: %s\n", yesno(count < 51)); 1024 } 1025 1026 gt_core_status = DRM_READ32(dev_priv->mmio_map, GEN6_GT_CORE_STATUS); 1027 trace_i915_reg_rw(false, GEN6_GT_CORE_STATUS, gt_core_status, 4); 1028 1029 rpmodectl1 = I915_READ(GEN6_RP_CONTROL); 1030 rcctl1 = I915_READ(GEN6_RC_CONTROL); 1031 DRM_UNLOCK(dev); 1032 1033 sbuf_printf(m, "Video Turbo Mode: %s\n", 1034 yesno(rpmodectl1 & GEN6_RP_MEDIA_TURBO)); 1035 sbuf_printf(m, "HW control enabled: %s\n", 1036 yesno(rpmodectl1 & GEN6_RP_ENABLE)); 1037 sbuf_printf(m, "SW control enabled: %s\n", 1038 yesno((rpmodectl1 & GEN6_RP_MEDIA_MODE_MASK) == 1039 GEN6_RP_MEDIA_SW_MODE)); 1040 sbuf_printf(m, "RC1e Enabled: %s\n", 1041 yesno(rcctl1 & GEN6_RC_CTL_RC1e_ENABLE)); 1042 sbuf_printf(m, "RC6 Enabled: %s\n", 1043 yesno(rcctl1 & GEN6_RC_CTL_RC6_ENABLE)); 1044 sbuf_printf(m, "Deep RC6 Enabled: %s\n", 1045 yesno(rcctl1 & GEN6_RC_CTL_RC6p_ENABLE)); 1046 sbuf_printf(m, "Deepest RC6 Enabled: %s\n", 1047 yesno(rcctl1 & GEN6_RC_CTL_RC6pp_ENABLE)); 1048 sbuf_printf(m, "Current RC state: "); 1049 switch (gt_core_status & GEN6_RCn_MASK) { 1050 case GEN6_RC0: 1051 if (gt_core_status & GEN6_CORE_CPD_STATE_MASK) 1052 sbuf_printf(m, "Core Power Down\n"); 1053 else 1054 sbuf_printf(m, "on\n"); 1055 break; 1056 case GEN6_RC3: 1057 sbuf_printf(m, "RC3\n"); 1058 break; 1059 case GEN6_RC6: 1060 sbuf_printf(m, "RC6\n"); 1061 break; 1062 case GEN6_RC7: 1063 sbuf_printf(m, "RC7\n"); 1064 break; 1065 default: 1066 sbuf_printf(m, "Unknown\n"); 1067 break; 1068 } 1069 1070 sbuf_printf(m, "Core Power Down: %s\n", 1071 yesno(gt_core_status & GEN6_CORE_CPD_STATE_MASK)); 1072 1073 /* Not exactly sure what this is */ 1074 sbuf_printf(m, "RC6 \"Locked to RPn\" residency since boot: %u\n", 1075 I915_READ(GEN6_GT_GFX_RC6_LOCKED)); 1076 sbuf_printf(m, "RC6 residency since boot: %u\n", 1077 I915_READ(GEN6_GT_GFX_RC6)); 1078 sbuf_printf(m, "RC6+ residency since boot: %u\n", 1079 I915_READ(GEN6_GT_GFX_RC6p)); 1080 sbuf_printf(m, "RC6++ residency since boot: %u\n", 1081 I915_READ(GEN6_GT_GFX_RC6pp)); 1082 1083 return 0; 1084} 1085 1086static int i915_drpc_info(struct drm_device *dev, struct sbuf *m, void *unused) 1087{ 1088 1089 if (IS_GEN6(dev) || IS_GEN7(dev)) 1090 return (gen6_drpc_info(dev, m)); 1091 else 1092 return (ironlake_drpc_info(dev, m)); 1093} 1094static int 1095i915_fbc_status(struct drm_device *dev, struct sbuf *m, void *unused) 1096{ 1097 drm_i915_private_t *dev_priv = dev->dev_private; 1098 1099 if (!I915_HAS_FBC(dev)) { 1100 sbuf_printf(m, "FBC unsupported on this chipset"); 1101 return 0; 1102 } 1103 1104 if (intel_fbc_enabled(dev)) { 1105 sbuf_printf(m, "FBC enabled"); 1106 } else { 1107 sbuf_printf(m, "FBC disabled: "); 1108 switch (dev_priv->no_fbc_reason) { 1109 case FBC_NO_OUTPUT: 1110 sbuf_printf(m, "no outputs"); 1111 break; 1112 case FBC_STOLEN_TOO_SMALL: 1113 sbuf_printf(m, "not enough stolen memory"); 1114 break; 1115 case FBC_UNSUPPORTED_MODE: 1116 sbuf_printf(m, "mode not supported"); 1117 break; 1118 case FBC_MODE_TOO_LARGE: 1119 sbuf_printf(m, "mode too large"); 1120 break; 1121 case FBC_BAD_PLANE: 1122 sbuf_printf(m, "FBC unsupported on plane"); 1123 break; 1124 case FBC_NOT_TILED: 1125 sbuf_printf(m, "scanout buffer not tiled"); 1126 break; 1127 case FBC_MULTIPLE_PIPES: 1128 sbuf_printf(m, "multiple pipes are enabled"); 1129 break; 1130 default: 1131 sbuf_printf(m, "unknown reason"); 1132 } 1133 } 1134 return 0; 1135} 1136 1137static int 1138i915_sr_status(struct drm_device *dev, struct sbuf *m, void *unused) 1139{ 1140 drm_i915_private_t *dev_priv = dev->dev_private; 1141 bool sr_enabled = false; 1142 1143 if (HAS_PCH_SPLIT(dev)) 1144 sr_enabled = I915_READ(WM1_LP_ILK) & WM1_LP_SR_EN; 1145 else if (IS_CRESTLINE(dev) || IS_I945G(dev) || IS_I945GM(dev)) 1146 sr_enabled = I915_READ(FW_BLC_SELF) & FW_BLC_SELF_EN; 1147 else if (IS_I915GM(dev)) 1148 sr_enabled = I915_READ(INSTPM) & INSTPM_SELF_EN; 1149 else if (IS_PINEVIEW(dev)) 1150 sr_enabled = I915_READ(DSPFW3) & PINEVIEW_SELF_REFRESH_EN; 1151 1152 sbuf_printf(m, "self-refresh: %s", 1153 sr_enabled ? "enabled" : "disabled"); 1154 1155 return (0); 1156} 1157 1158static int i915_ring_freq_table(struct drm_device *dev, struct sbuf *m, 1159 void *unused) 1160{ 1161 drm_i915_private_t *dev_priv = dev->dev_private; 1162 int gpu_freq, ia_freq; 1163 1164 if (!(IS_GEN6(dev) || IS_GEN7(dev))) { 1165 sbuf_printf(m, "unsupported on this chipset"); 1166 return (0); 1167 } 1168 1169 if (sx_xlock_sig(&dev->dev_struct_lock)) 1170 return (EINTR); 1171 1172 sbuf_printf(m, "GPU freq (MHz)\tEffective CPU freq (MHz)\n"); 1173 1174 for (gpu_freq = dev_priv->min_delay; gpu_freq <= dev_priv->max_delay; 1175 gpu_freq++) { 1176 I915_WRITE(GEN6_PCODE_DATA, gpu_freq); 1177 I915_WRITE(GEN6_PCODE_MAILBOX, GEN6_PCODE_READY | 1178 GEN6_PCODE_READ_MIN_FREQ_TABLE); 1179 if (_intel_wait_for(dev, 1180 (I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0, 1181 10, 1, "915frq")) { 1182 DRM_ERROR("pcode read of freq table timed out\n"); 1183 continue; 1184 } 1185 ia_freq = I915_READ(GEN6_PCODE_DATA); 1186 sbuf_printf(m, "%d\t\t%d\n", gpu_freq * 50, ia_freq * 100); 1187 } 1188 1189 DRM_UNLOCK(dev); 1190 1191 return (0); 1192} 1193 1194static int 1195i915_emon_status(struct drm_device *dev, struct sbuf *m, void *unused) 1196{ 1197 drm_i915_private_t *dev_priv = dev->dev_private; 1198 unsigned long temp, chipset, gfx; 1199 1200 if (!IS_GEN5(dev)) { 1201 sbuf_printf(m, "Not supported\n"); 1202 return (0); 1203 } 1204 1205 if (sx_xlock_sig(&dev->dev_struct_lock)) 1206 return (EINTR); 1207 temp = i915_mch_val(dev_priv); 1208 chipset = i915_chipset_val(dev_priv); 1209 gfx = i915_gfx_val(dev_priv); 1210 DRM_UNLOCK(dev); 1211 1212 sbuf_printf(m, "GMCH temp: %ld\n", temp); 1213 sbuf_printf(m, "Chipset power: %ld\n", chipset); 1214 sbuf_printf(m, "GFX power: %ld\n", gfx); 1215 sbuf_printf(m, "Total power: %ld\n", chipset + gfx); 1216 1217 return (0); 1218} 1219 1220static int 1221i915_gfxec(struct drm_device *dev, struct sbuf *m, void *unused) 1222{ 1223 drm_i915_private_t *dev_priv = dev->dev_private; 1224 1225 if (sx_xlock_sig(&dev->dev_struct_lock)) 1226 return (EINTR); 1227 sbuf_printf(m, "GFXEC: %ld\n", (unsigned long)I915_READ(0x112f4)); 1228 DRM_UNLOCK(dev); 1229 1230 return (0); 1231} 1232 1233#if 0 1234static int 1235i915_opregion(struct drm_device *dev, struct sbuf *m, void *unused) 1236{ 1237 drm_i915_private_t *dev_priv = dev->dev_private; 1238 struct intel_opregion *opregion = &dev_priv->opregion; 1239 1240 if (sx_xlock_sig(&dev->dev_struct_lock)) 1241 return (EINTR); 1242 if (opregion->header) 1243 seq_write(m, opregion->header, OPREGION_SIZE); 1244 DRM_UNLOCK(dev); 1245 1246 return 0; 1247} 1248#endif 1249 1250static int 1251i915_gem_framebuffer_info(struct drm_device *dev, struct sbuf *m, void *data) 1252{ 1253 drm_i915_private_t *dev_priv = dev->dev_private; 1254 struct intel_fbdev *ifbdev; 1255 struct intel_framebuffer *fb; 1256 1257 if (sx_xlock_sig(&dev->dev_struct_lock)) 1258 return (EINTR); 1259 1260 ifbdev = dev_priv->fbdev; 1261 if (ifbdev == NULL) { 1262 DRM_UNLOCK(dev); 1263 return (0); 1264 } 1265 fb = to_intel_framebuffer(ifbdev->helper.fb); 1266 1267 sbuf_printf(m, "fbcon size: %d x %d, depth %d, %d bpp, obj ", 1268 fb->base.width, 1269 fb->base.height, 1270 fb->base.depth, 1271 fb->base.bits_per_pixel); 1272 describe_obj(m, fb->obj); 1273 sbuf_printf(m, "\n"); 1274 1275 list_for_each_entry(fb, &dev->mode_config.fb_list, base.head) { 1276 if (&fb->base == ifbdev->helper.fb) 1277 continue; 1278 1279 sbuf_printf(m, "user size: %d x %d, depth %d, %d bpp, obj ", 1280 fb->base.width, 1281 fb->base.height, 1282 fb->base.depth, 1283 fb->base.bits_per_pixel); 1284 describe_obj(m, fb->obj); 1285 sbuf_printf(m, "\n"); 1286 } 1287 1288 DRM_UNLOCK(dev); 1289 1290 return (0); 1291} 1292 1293static int 1294i915_context_status(struct drm_device *dev, struct sbuf *m, void *data) 1295{ 1296 drm_i915_private_t *dev_priv; 1297 int ret; 1298 1299 if ((dev->driver->driver_features & DRIVER_MODESET) == 0) 1300 return (0); 1301 1302 dev_priv = dev->dev_private; 1303 ret = sx_xlock_sig(&dev->mode_config.mutex); 1304 if (ret != 0) 1305 return (EINTR); 1306 1307 if (dev_priv->pwrctx != NULL) { 1308 sbuf_printf(m, "power context "); 1309 describe_obj(m, dev_priv->pwrctx); 1310 sbuf_printf(m, "\n"); 1311 } 1312 1313 if (dev_priv->renderctx != NULL) { 1314 sbuf_printf(m, "render context "); 1315 describe_obj(m, dev_priv->renderctx); 1316 sbuf_printf(m, "\n"); 1317 } 1318 1319 sx_xunlock(&dev->mode_config.mutex); 1320 1321 return (0); 1322} 1323 1324static int 1325i915_gen6_forcewake_count_info(struct drm_device *dev, struct sbuf *m, 1326 void *data) 1327{ 1328 struct drm_i915_private *dev_priv; 1329 unsigned forcewake_count; 1330 1331 dev_priv = dev->dev_private; 1332 mtx_lock(&dev_priv->gt_lock); 1333 forcewake_count = dev_priv->forcewake_count; 1334 mtx_unlock(&dev_priv->gt_lock); 1335 1336 sbuf_printf(m, "forcewake count = %u\n", forcewake_count); 1337 1338 return (0); 1339} 1340 1341static const char * 1342swizzle_string(unsigned swizzle) 1343{ 1344 1345 switch(swizzle) { 1346 case I915_BIT_6_SWIZZLE_NONE: 1347 return "none"; 1348 case I915_BIT_6_SWIZZLE_9: 1349 return "bit9"; 1350 case I915_BIT_6_SWIZZLE_9_10: 1351 return "bit9/bit10"; 1352 case I915_BIT_6_SWIZZLE_9_11: 1353 return "bit9/bit11"; 1354 case I915_BIT_6_SWIZZLE_9_10_11: 1355 return "bit9/bit10/bit11"; 1356 case I915_BIT_6_SWIZZLE_9_17: 1357 return "bit9/bit17"; 1358 case I915_BIT_6_SWIZZLE_9_10_17: 1359 return "bit9/bit10/bit17"; 1360 case I915_BIT_6_SWIZZLE_UNKNOWN: 1361 return "unknown"; 1362 } 1363 1364 return "bug"; 1365} 1366 1367static int 1368i915_swizzle_info(struct drm_device *dev, struct sbuf *m, void *data) 1369{ 1370 struct drm_i915_private *dev_priv; 1371 int ret; 1372 1373 dev_priv = dev->dev_private; 1374 ret = sx_xlock_sig(&dev->dev_struct_lock); 1375 if (ret != 0) 1376 return (EINTR); 1377 1378 sbuf_printf(m, "bit6 swizzle for X-tiling = %s\n", 1379 swizzle_string(dev_priv->mm.bit_6_swizzle_x)); 1380 sbuf_printf(m, "bit6 swizzle for Y-tiling = %s\n", 1381 swizzle_string(dev_priv->mm.bit_6_swizzle_y)); 1382 1383 if (IS_GEN3(dev) || IS_GEN4(dev)) { 1384 sbuf_printf(m, "DDC = 0x%08x\n", 1385 I915_READ(DCC)); 1386 sbuf_printf(m, "C0DRB3 = 0x%04x\n", 1387 I915_READ16(C0DRB3)); 1388 sbuf_printf(m, "C1DRB3 = 0x%04x\n", 1389 I915_READ16(C1DRB3)); 1390 } else if (IS_GEN6(dev) || IS_GEN7(dev)) { 1391 sbuf_printf(m, "MAD_DIMM_C0 = 0x%08x\n", 1392 I915_READ(MAD_DIMM_C0)); 1393 sbuf_printf(m, "MAD_DIMM_C1 = 0x%08x\n", 1394 I915_READ(MAD_DIMM_C1)); 1395 sbuf_printf(m, "MAD_DIMM_C2 = 0x%08x\n", 1396 I915_READ(MAD_DIMM_C2)); 1397 sbuf_printf(m, "TILECTL = 0x%08x\n", 1398 I915_READ(TILECTL)); 1399 sbuf_printf(m, "ARB_MODE = 0x%08x\n", 1400 I915_READ(ARB_MODE)); 1401 sbuf_printf(m, "DISP_ARB_CTL = 0x%08x\n", 1402 I915_READ(DISP_ARB_CTL)); 1403 } 1404 DRM_UNLOCK(dev); 1405 1406 return (0); 1407} 1408 1409static int 1410i915_ppgtt_info(struct drm_device *dev, struct sbuf *m, void *data) 1411{ 1412 struct drm_i915_private *dev_priv; 1413 struct intel_ring_buffer *ring; 1414 int i, ret; 1415 1416 dev_priv = dev->dev_private; 1417 1418 ret = sx_xlock_sig(&dev->dev_struct_lock); 1419 if (ret != 0) 1420 return (EINTR); 1421 if (INTEL_INFO(dev)->gen == 6) 1422 sbuf_printf(m, "GFX_MODE: 0x%08x\n", I915_READ(GFX_MODE)); 1423 1424 for (i = 0; i < I915_NUM_RINGS; i++) { 1425 ring = &dev_priv->rings[i]; 1426 1427 sbuf_printf(m, "%s\n", ring->name); 1428 if (INTEL_INFO(dev)->gen == 7) 1429 sbuf_printf(m, "GFX_MODE: 0x%08x\n", I915_READ(RING_MODE_GEN7(ring))); 1430 sbuf_printf(m, "PP_DIR_BASE: 0x%08x\n", I915_READ(RING_PP_DIR_BASE(ring))); 1431 sbuf_printf(m, "PP_DIR_BASE_READ: 0x%08x\n", I915_READ(RING_PP_DIR_BASE_READ(ring))); 1432 sbuf_printf(m, "PP_DIR_DCLV: 0x%08x\n", I915_READ(RING_PP_DIR_DCLV(ring))); 1433 } 1434 if (dev_priv->mm.aliasing_ppgtt) { 1435 struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt; 1436 1437 sbuf_printf(m, "aliasing PPGTT:\n"); 1438 sbuf_printf(m, "pd gtt offset: 0x%08x\n", ppgtt->pd_offset); 1439 } 1440 sbuf_printf(m, "ECOCHK: 0x%08x\n", I915_READ(GAM_ECOCHK)); 1441 DRM_UNLOCK(dev); 1442 1443 return (0); 1444} 1445 1446static int i915_dpio_info(struct drm_device *dev, struct sbuf *m, void *data) 1447{ 1448 struct drm_i915_private *dev_priv; 1449 int ret; 1450 1451 if (!IS_VALLEYVIEW(dev)) { 1452 sbuf_printf(m, "unsupported\n"); 1453 return 0; 1454 } 1455 1456 dev_priv = dev->dev_private; 1457 1458 ret = sx_xlock_sig(&dev->mode_config.mutex); 1459 if (ret != 0) 1460 return (EINTR); 1461 1462 sbuf_printf(m, "DPIO_CTL: 0x%08x\n", I915_READ(DPIO_CTL)); 1463 1464 sbuf_printf(m, "DPIO_DIV_A: 0x%08x\n", 1465 intel_dpio_read(dev_priv, _DPIO_DIV_A)); 1466 sbuf_printf(m, "DPIO_DIV_B: 0x%08x\n", 1467 intel_dpio_read(dev_priv, _DPIO_DIV_B)); 1468 1469 sbuf_printf(m, "DPIO_REFSFR_A: 0x%08x\n", 1470 intel_dpio_read(dev_priv, _DPIO_REFSFR_A)); 1471 sbuf_printf(m, "DPIO_REFSFR_B: 0x%08x\n", 1472 intel_dpio_read(dev_priv, _DPIO_REFSFR_B)); 1473 1474 sbuf_printf(m, "DPIO_CORE_CLK_A: 0x%08x\n", 1475 intel_dpio_read(dev_priv, _DPIO_CORE_CLK_A)); 1476 sbuf_printf(m, "DPIO_CORE_CLK_B: 0x%08x\n", 1477 intel_dpio_read(dev_priv, _DPIO_CORE_CLK_B)); 1478 1479 sbuf_printf(m, "DPIO_LFP_COEFF_A: 0x%08x\n", 1480 intel_dpio_read(dev_priv, _DPIO_LFP_COEFF_A)); 1481 sbuf_printf(m, "DPIO_LFP_COEFF_B: 0x%08x\n", 1482 intel_dpio_read(dev_priv, _DPIO_LFP_COEFF_B)); 1483 1484 sbuf_printf(m, "DPIO_FASTCLK_DISABLE: 0x%08x\n", 1485 intel_dpio_read(dev_priv, DPIO_FASTCLK_DISABLE)); 1486 1487 sx_xunlock(&dev->mode_config.mutex); 1488 1489 return 0; 1490} 1491 1492static int 1493i915_debug_set_wedged(SYSCTL_HANDLER_ARGS) 1494{ 1495 struct drm_device *dev; 1496 drm_i915_private_t *dev_priv; 1497 int error, wedged; 1498 1499 dev = arg1; 1500 dev_priv = dev->dev_private; 1501 if (dev_priv == NULL) 1502 return (EBUSY); 1503 wedged = dev_priv->mm.wedged; 1504 error = sysctl_handle_int(oidp, &wedged, 0, req); 1505 if (error || !req->newptr) 1506 return (error); 1507 DRM_INFO("Manually setting wedged to %d\n", wedged); 1508 i915_handle_error(dev, wedged); 1509 return (error); 1510} 1511 1512static int 1513i915_max_freq(SYSCTL_HANDLER_ARGS) 1514{ 1515 struct drm_device *dev; 1516 drm_i915_private_t *dev_priv; 1517 int error, max_freq; 1518 1519 dev = arg1; 1520 dev_priv = dev->dev_private; 1521 if (dev_priv == NULL) 1522 return (EBUSY); 1523 max_freq = dev_priv->max_delay * 50; 1524 error = sysctl_handle_int(oidp, &max_freq, 0, req); 1525 if (error || !req->newptr) 1526 return (error); 1527 DRM_DEBUG("Manually setting max freq to %d\n", max_freq); 1528 /* 1529 * Turbo will still be enabled, but won't go above the set value. 1530 */ 1531 dev_priv->max_delay = max_freq / 50; 1532 gen6_set_rps(dev, max_freq / 50); 1533 return (error); 1534} 1535 1536static int 1537i915_cache_sharing(SYSCTL_HANDLER_ARGS) 1538{ 1539 struct drm_device *dev; 1540 drm_i915_private_t *dev_priv; 1541 int error, snpcr, cache_sharing; 1542 1543 dev = arg1; 1544 dev_priv = dev->dev_private; 1545 if (dev_priv == NULL) 1546 return (EBUSY); 1547 DRM_LOCK(dev); 1548 snpcr = I915_READ(GEN6_MBCUNIT_SNPCR); 1549 DRM_UNLOCK(dev); 1550 cache_sharing = (snpcr & GEN6_MBC_SNPCR_MASK) >> GEN6_MBC_SNPCR_SHIFT; 1551 error = sysctl_handle_int(oidp, &cache_sharing, 0, req); 1552 if (error || !req->newptr) 1553 return (error); 1554 if (cache_sharing < 0 || cache_sharing > 3) 1555 return (EINVAL); 1556 DRM_DEBUG("Manually setting uncore sharing to %d\n", cache_sharing); 1557 1558 DRM_LOCK(dev); 1559 /* Update the cache sharing policy here as well */ 1560 snpcr = I915_READ(GEN6_MBCUNIT_SNPCR); 1561 snpcr &= ~GEN6_MBC_SNPCR_MASK; 1562 snpcr |= (cache_sharing << GEN6_MBC_SNPCR_SHIFT); 1563 I915_WRITE(GEN6_MBCUNIT_SNPCR, snpcr); 1564 DRM_UNLOCK(dev); 1565 return (0); 1566} 1567 1568static int 1569i915_stop_rings(SYSCTL_HANDLER_ARGS) 1570{ 1571 struct drm_device *dev; 1572 drm_i915_private_t *dev_priv; 1573 int error, val; 1574 1575 dev = arg1; 1576 dev_priv = dev->dev_private; 1577 if (dev_priv == NULL) 1578 return (EBUSY); 1579 DRM_LOCK(dev); 1580 val = dev_priv->stop_rings; 1581 DRM_UNLOCK(dev); 1582 error = sysctl_handle_int(oidp, &val, 0, req); 1583 if (error || !req->newptr) 1584 return (error); 1585 DRM_DEBUG("Stopping rings 0x%08x\n", val); 1586 1587 DRM_LOCK(dev); 1588 dev_priv->stop_rings = val; 1589 DRM_UNLOCK(dev); 1590 return (0); 1591} 1592 1593static struct i915_info_sysctl_list { 1594 const char *name; 1595 int (*ptr)(struct drm_device *dev, struct sbuf *m, void *data); 1596 int (*ptr_w)(struct drm_device *dev, const char *str, void *data); 1597 int flags; 1598 void *data; 1599} i915_info_sysctl_list[] = { 1600 {"i915_capabilities", i915_capabilities, NULL, 0}, 1601 {"i915_gem_objects", i915_gem_object_info, NULL, 0}, 1602 {"i915_gem_gtt", i915_gem_gtt_info, NULL, 0}, 1603 {"i915_gem_pinned", i915_gem_gtt_info, NULL, 0, (void *)PINNED_LIST}, 1604 {"i915_gem_active", i915_gem_object_list_info, NULL, 0, 1605 (void *)ACTIVE_LIST}, 1606 {"i915_gem_flushing", i915_gem_object_list_info, NULL, 0, 1607 (void *)FLUSHING_LIST}, 1608 {"i915_gem_inactive", i915_gem_object_list_info, NULL, 0, 1609 (void *)INACTIVE_LIST}, 1610 {"i915_gem_pageflip", i915_gem_pageflip_info, NULL, 0}, 1611 {"i915_gem_request", i915_gem_request_info, NULL, 0}, 1612 {"i915_gem_seqno", i915_gem_seqno_info, NULL, 0}, 1613 {"i915_gem_fence_regs", i915_gem_fence_regs_info, NULL, 0}, 1614 {"i915_gem_interrupt", i915_interrupt_info, NULL, 0}, 1615 {"i915_gem_hws", i915_hws_info, NULL, 0, (void *)RCS}, 1616 {"i915_gem_hws_blt", i915_hws_info, NULL, 0, (void *)BCS}, 1617 {"i915_gem_hws_bsd", i915_hws_info, NULL, 0, (void *)VCS}, 1618 {"i915_error_state", i915_error_state, i915_error_state_w, 0}, 1619 {"i915_rstdby_delays", i915_rstdby_delays, NULL, 0}, 1620 {"i915_cur_delayinfo", i915_cur_delayinfo, NULL, 0}, 1621 {"i915_delayfreq_table", i915_delayfreq_table, NULL, 0}, 1622 {"i915_inttoext_table", i915_inttoext_table, NULL, 0}, 1623 {"i915_drpc_info", i915_drpc_info, NULL, 0}, 1624 {"i915_emon_status", i915_emon_status, NULL, 0}, 1625 {"i915_ring_freq_table", i915_ring_freq_table, NULL, 0}, 1626 {"i915_gfxec", i915_gfxec, NULL, 0}, 1627 {"i915_fbc_status", i915_fbc_status, NULL, 0}, 1628 {"i915_sr_status", i915_sr_status, NULL, 0}, 1629#if 0 1630 {"i915_opregion", i915_opregion, NULL, 0}, 1631#endif 1632 {"i915_gem_framebuffer", i915_gem_framebuffer_info, NULL, 0}, 1633 {"i915_context_status", i915_context_status, NULL, 0}, 1634 {"i915_gen6_forcewake_count_info", i915_gen6_forcewake_count_info, 1635 NULL, 0}, 1636 {"i915_swizzle_info", i915_swizzle_info, NULL, 0}, 1637 {"i915_ppgtt_info", i915_ppgtt_info, NULL, 0}, 1638 {"i915_dpio", i915_dpio_info, NULL, 0}, 1639}; 1640 1641struct i915_info_sysctl_thunk { 1642 struct drm_device *dev; 1643 int idx; 1644 void *arg; 1645}; 1646 1647static int 1648i915_info_sysctl_handler(SYSCTL_HANDLER_ARGS) 1649{ 1650 struct sbuf m; 1651 struct i915_info_sysctl_thunk *thunk; 1652 struct drm_device *dev; 1653 drm_i915_private_t *dev_priv; 1654 char *p; 1655 int error; 1656 1657 thunk = arg1; 1658 dev = thunk->dev; 1659 dev_priv = dev->dev_private; 1660 if (dev_priv == NULL) 1661 return (EBUSY); 1662 error = sysctl_wire_old_buffer(req, 0); 1663 if (error != 0) 1664 return (error); 1665 sbuf_new_for_sysctl(&m, NULL, 128, req); 1666 error = i915_info_sysctl_list[thunk->idx].ptr(dev, &m, 1667 thunk->arg); 1668 if (error == 0) 1669 error = sbuf_finish(&m); 1670 sbuf_delete(&m); 1671 if (error != 0 || req->newptr == NULL) 1672 return (error); 1673 if (req->newlen > 2048) 1674 return (E2BIG); 1675 p = malloc(req->newlen + 1, M_TEMP, M_WAITOK); 1676 error = SYSCTL_IN(req, p, req->newlen); 1677 if (error != 0) 1678 goto out; 1679 p[req->newlen] = '\0'; 1680 error = i915_info_sysctl_list[thunk->idx].ptr_w(dev, p, 1681 thunk->arg); 1682out: 1683 free(p, M_TEMP); 1684 return (error); 1685} 1686 1687extern int i915_gem_sync_exec_requests; 1688extern int i915_fix_mi_batchbuffer_end; 1689extern int i915_intr_pf; 1690extern long i915_gem_wired_pages_cnt; 1691 1692int 1693i915_sysctl_init(struct drm_device *dev, struct sysctl_ctx_list *ctx, 1694 struct sysctl_oid *top) 1695{ 1696 struct sysctl_oid *oid, *info; 1697 struct i915_info_sysctl_thunk *thunks; 1698 int i, error; 1699 1700 thunks = malloc(sizeof(*thunks) * DRM_ARRAY_SIZE(i915_info_sysctl_list), 1701 DRM_MEM_DRIVER, M_WAITOK | M_ZERO); 1702 for (i = 0; i < DRM_ARRAY_SIZE(i915_info_sysctl_list); i++) { 1703 thunks[i].dev = dev; 1704 thunks[i].idx = i; 1705 thunks[i].arg = i915_info_sysctl_list[i].data; 1706 } 1707 dev->sysctl_private = thunks; 1708 info = SYSCTL_ADD_NODE(ctx, SYSCTL_CHILDREN(top), OID_AUTO, "info", 1709 CTLFLAG_RW, NULL, NULL); 1710 if (info == NULL) 1711 return (ENOMEM); 1712 for (i = 0; i < DRM_ARRAY_SIZE(i915_info_sysctl_list); i++) { 1713 oid = SYSCTL_ADD_OID(ctx, SYSCTL_CHILDREN(info), OID_AUTO, 1714 i915_info_sysctl_list[i].name, CTLTYPE_STRING | 1715 (i915_info_sysctl_list[i].ptr_w != NULL ? CTLFLAG_RW : 1716 CTLFLAG_RD), 1717 &thunks[i], 0, i915_info_sysctl_handler, "A", NULL); 1718 if (oid == NULL) 1719 return (ENOMEM); 1720 } 1721 oid = SYSCTL_ADD_LONG(ctx, SYSCTL_CHILDREN(info), OID_AUTO, 1722 "i915_gem_wired_pages", CTLFLAG_RD, &i915_gem_wired_pages_cnt, 1723 NULL); 1724 oid = SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(top), OID_AUTO, "wedged", 1725 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE, dev, 0, 1726 i915_debug_set_wedged, "I", NULL); 1727 if (oid == NULL) 1728 return (ENOMEM); 1729 oid = SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(top), OID_AUTO, "max_freq", 1730 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE, dev, 0, i915_max_freq, 1731 "I", NULL); 1732 if (oid == NULL) 1733 return (ENOMEM); 1734 oid = SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(top), OID_AUTO, 1735 "cache_sharing", CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE, dev, 1736 0, i915_cache_sharing, "I", NULL); 1737 if (oid == NULL) 1738 return (ENOMEM); 1739 oid = SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(top), OID_AUTO, 1740 "stop_rings", CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE, dev, 1741 0, i915_stop_rings, "I", NULL); 1742 if (oid == NULL) 1743 return (ENOMEM); 1744 oid = SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(top), OID_AUTO, "sync_exec", 1745 CTLFLAG_RW, &i915_gem_sync_exec_requests, 0, NULL); 1746 if (oid == NULL) 1747 return (ENOMEM); 1748 oid = SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(top), OID_AUTO, "fix_mi", 1749 CTLFLAG_RW, &i915_fix_mi_batchbuffer_end, 0, NULL); 1750 if (oid == NULL) 1751 return (ENOMEM); 1752 oid = SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(top), OID_AUTO, "intr_pf", 1753 CTLFLAG_RW, &i915_intr_pf, 0, NULL); 1754 if (oid == NULL) 1755 return (ENOMEM); 1756 1757 error = drm_add_busid_modesetting(dev, ctx, top); 1758 if (error != 0) 1759 return (error); 1760 1761 return (0); 1762} 1763 1764void 1765i915_sysctl_cleanup(struct drm_device *dev) 1766{ 1767 1768 free(dev->sysctl_private, DRM_MEM_DRIVER); 1769} 1770