i915_debug.c revision 302408
1/* 2 * Copyright �� 2008 Intel Corporation 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice (including the next 12 * paragraph) shall be included in all copies or substantial portions of the 13 * Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 21 * IN THE SOFTWARE. 22 * 23 * Authors: 24 * Eric Anholt <eric@anholt.net> 25 * Keith Packard <keithp@keithp.com> 26 * 27 */ 28 29#include <sys/cdefs.h> 30__FBSDID("$FreeBSD: stable/11/sys/dev/drm2/i915/i915_debug.c 296548 2016-03-08 20:33:02Z dumbbell $"); 31 32#include <dev/drm2/drmP.h> 33#include <dev/drm2/drm.h> 34#include <dev/drm2/i915/i915_drm.h> 35#include <dev/drm2/i915/i915_drv.h> 36#include <dev/drm2/i915/intel_drv.h> 37#include <dev/drm2/i915/intel_ringbuffer.h> 38 39#include <sys/sysctl.h> 40 41#define seq_printf(m, fmt, ...) sbuf_printf((m), (fmt), ##__VA_ARGS__) 42 43//#if defined(CONFIG_DEBUG_FS) 44 45enum { 46 ACTIVE_LIST, 47 INACTIVE_LIST, 48 PINNED_LIST, 49}; 50 51static const char *yesno(int v) 52{ 53 return v ? "yes" : "no"; 54} 55 56static int i915_capabilities(struct drm_device *dev, struct sbuf *m, void *data) 57{ 58 const struct intel_device_info *info = INTEL_INFO(dev); 59 60 seq_printf(m, "gen: %d\n", info->gen); 61 seq_printf(m, "pch: %d\n", INTEL_PCH_TYPE(dev)); 62#define DEV_INFO_FLAG(x) seq_printf(m, #x ": %s\n", yesno(info->x)) 63#define DEV_INFO_SEP ; 64 DEV_INFO_FLAGS; 65#undef DEV_INFO_FLAG 66#undef DEV_INFO_SEP 67 68 return 0; 69} 70 71static const char *get_pin_flag(struct drm_i915_gem_object *obj) 72{ 73 if (obj->user_pin_count > 0) 74 return "P"; 75 else if (obj->pin_count > 0) 76 return "p"; 77 else 78 return " "; 79} 80 81static const char *get_tiling_flag(struct drm_i915_gem_object *obj) 82{ 83 switch (obj->tiling_mode) { 84 default: 85 case I915_TILING_NONE: return " "; 86 case I915_TILING_X: return "X"; 87 case I915_TILING_Y: return "Y"; 88 } 89} 90 91static const char *cache_level_str(int type) 92{ 93 switch (type) { 94 case I915_CACHE_NONE: return " uncached"; 95 case I915_CACHE_LLC: return " snooped (LLC)"; 96 case I915_CACHE_LLC_MLC: return " snooped (LLC+MLC)"; 97 default: return ""; 98 } 99} 100 101static void 102describe_obj(struct sbuf *m, struct drm_i915_gem_object *obj) 103{ 104 seq_printf(m, "%pK: %s%s %8zdKiB %04x %04x %d %d %d%s%s%s", 105 &obj->base, 106 get_pin_flag(obj), 107 get_tiling_flag(obj), 108 obj->base.size / 1024, 109 obj->base.read_domains, 110 obj->base.write_domain, 111 obj->last_read_seqno, 112 obj->last_write_seqno, 113 obj->last_fenced_seqno, 114 cache_level_str(obj->cache_level), 115 obj->dirty ? " dirty" : "", 116 obj->madv == I915_MADV_DONTNEED ? " purgeable" : ""); 117 if (obj->base.name) 118 seq_printf(m, " (name: %d)", obj->base.name); 119 if (obj->pin_count) 120 seq_printf(m, " (pinned x %d)", obj->pin_count); 121 if (obj->pin_display) 122 seq_printf(m, " (display)"); 123 if (obj->fence_reg != I915_FENCE_REG_NONE) 124 seq_printf(m, " (fence: %d)", obj->fence_reg); 125 if (obj->gtt_space != NULL) 126 seq_printf(m, " (gtt offset: %08x, size: %08x)", 127 obj->gtt_offset, (unsigned int)obj->gtt_space->size); 128 if (obj->pin_mappable || obj->fault_mappable) { 129 char s[3], *t = s; 130 if (obj->pin_mappable) 131 *t++ = 'p'; 132 if (obj->fault_mappable) 133 *t++ = 'f'; 134 *t = '\0'; 135 seq_printf(m, " (%s mappable)", s); 136 } 137 if (obj->ring != NULL) 138 seq_printf(m, " (%s)", obj->ring->name); 139} 140 141static int i915_gem_object_list_info(struct drm_device *dev, struct sbuf *m, void *data) 142{ 143 uintptr_t list = (uintptr_t)data; 144 struct list_head *head; 145 drm_i915_private_t *dev_priv = dev->dev_private; 146 struct drm_i915_gem_object *obj; 147 size_t total_obj_size, total_gtt_size; 148 int count; 149 150 if (sx_xlock_sig(&dev->dev_struct_lock)) 151 return -EINTR; 152 153 switch (list) { 154 case ACTIVE_LIST: 155 seq_printf(m, "Active:\n"); 156 head = &dev_priv->mm.active_list; 157 break; 158 case INACTIVE_LIST: 159 seq_printf(m, "Inactive:\n"); 160 head = &dev_priv->mm.inactive_list; 161 break; 162 default: 163 DRM_UNLOCK(dev); 164 return -EINVAL; 165 } 166 167 total_obj_size = total_gtt_size = count = 0; 168 list_for_each_entry(obj, head, mm_list) { 169 seq_printf(m, " "); 170 describe_obj(m, obj); 171 seq_printf(m, "\n"); 172 total_obj_size += obj->base.size; 173 total_gtt_size += obj->gtt_space->size; 174 count++; 175 } 176 DRM_UNLOCK(dev); 177 178 seq_printf(m, "Total %d objects, %zu bytes, %zu GTT size\n", 179 count, total_obj_size, total_gtt_size); 180 return 0; 181} 182 183#define count_objects(list, member) do { \ 184 list_for_each_entry(obj, list, member) { \ 185 size += obj->gtt_space->size; \ 186 ++count; \ 187 if (obj->map_and_fenceable) { \ 188 mappable_size += obj->gtt_space->size; \ 189 ++mappable_count; \ 190 } \ 191 } \ 192} while (0) 193 194static int i915_gem_object_info(struct drm_device *dev, struct sbuf *m, void *data) 195{ 196 struct drm_i915_private *dev_priv = dev->dev_private; 197 u32 count, mappable_count, purgeable_count; 198 size_t size, mappable_size, purgeable_size; 199 struct drm_i915_gem_object *obj; 200 201 if (sx_xlock_sig(&dev->dev_struct_lock)) 202 return -EINTR; 203 204 seq_printf(m, "%u objects, %zu bytes\n", 205 dev_priv->mm.object_count, 206 dev_priv->mm.object_memory); 207 208 size = count = mappable_size = mappable_count = 0; 209 count_objects(&dev_priv->mm.bound_list, gtt_list); 210 seq_printf(m, "%u [%u] objects, %zu [%zu] bytes in gtt\n", 211 count, mappable_count, size, mappable_size); 212 213 size = count = mappable_size = mappable_count = 0; 214 count_objects(&dev_priv->mm.active_list, mm_list); 215 seq_printf(m, " %u [%u] active objects, %zu [%zu] bytes\n", 216 count, mappable_count, size, mappable_size); 217 218 size = count = mappable_size = mappable_count = 0; 219 count_objects(&dev_priv->mm.inactive_list, mm_list); 220 seq_printf(m, " %u [%u] inactive objects, %zu [%zu] bytes\n", 221 count, mappable_count, size, mappable_size); 222 223 size = count = purgeable_size = purgeable_count = 0; 224 list_for_each_entry(obj, &dev_priv->mm.unbound_list, gtt_list) { 225 size += obj->base.size, ++count; 226 if (obj->madv == I915_MADV_DONTNEED) 227 purgeable_size += obj->base.size, ++purgeable_count; 228 } 229 seq_printf(m, "%u unbound objects, %zu bytes\n", count, size); 230 231 size = count = mappable_size = mappable_count = 0; 232 list_for_each_entry(obj, &dev_priv->mm.bound_list, gtt_list) { 233 if (obj->fault_mappable) { 234 size += obj->gtt_space->size; 235 ++count; 236 } 237 if (obj->pin_mappable) { 238 mappable_size += obj->gtt_space->size; 239 ++mappable_count; 240 } 241 if (obj->madv == I915_MADV_DONTNEED) { 242 purgeable_size += obj->base.size; 243 ++purgeable_count; 244 } 245 } 246 seq_printf(m, "%u purgeable objects, %zu bytes\n", 247 purgeable_count, purgeable_size); 248 seq_printf(m, "%u pinned mappable objects, %zu bytes\n", 249 mappable_count, mappable_size); 250 seq_printf(m, "%u fault mappable objects, %zu bytes\n", 251 count, size); 252 253 seq_printf(m, "%zu [%zu] gtt total\n", 254 dev_priv->mm.gtt_total, dev_priv->mm.mappable_gtt_total); 255 256 DRM_UNLOCK(dev); 257 258 return 0; 259} 260 261static int i915_gem_gtt_info(struct drm_device *dev, struct sbuf *m, void *data) 262{ 263 struct drm_i915_private *dev_priv = dev->dev_private; 264 uintptr_t list = (uintptr_t)data; 265 struct drm_i915_gem_object *obj; 266 size_t total_obj_size, total_gtt_size; 267 int count; 268 269 if (sx_xlock_sig(&dev->dev_struct_lock)) 270 return -EINTR; 271 272 total_obj_size = total_gtt_size = count = 0; 273 list_for_each_entry(obj, &dev_priv->mm.bound_list, gtt_list) { 274 if (list == PINNED_LIST && obj->pin_count == 0) 275 continue; 276 277 seq_printf(m, " "); 278 describe_obj(m, obj); 279 seq_printf(m, "\n"); 280 total_obj_size += obj->base.size; 281 total_gtt_size += obj->gtt_space->size; 282 count++; 283 } 284 285 DRM_UNLOCK(dev); 286 287 seq_printf(m, "Total %d objects, %zu bytes, %zu GTT size\n", 288 count, total_obj_size, total_gtt_size); 289 290 return 0; 291} 292 293static int i915_gem_pageflip_info(struct drm_device *dev, struct sbuf *m, void *data) 294{ 295 struct intel_crtc *crtc; 296 297 list_for_each_entry(crtc, &dev->mode_config.crtc_list, base.head) { 298 const char pipe = pipe_name(crtc->pipe); 299 const char plane = plane_name(crtc->plane); 300 struct intel_unpin_work *work; 301 302 mtx_lock(&dev->event_lock); 303 work = crtc->unpin_work; 304 if (work == NULL) { 305 seq_printf(m, "No flip due on pipe %c (plane %c)\n", 306 pipe, plane); 307 } else { 308 if (atomic_read(&work->pending) < INTEL_FLIP_COMPLETE) { 309 seq_printf(m, "Flip queued on pipe %c (plane %c)\n", 310 pipe, plane); 311 } else { 312 seq_printf(m, "Flip pending (waiting for vsync) on pipe %c (plane %c)\n", 313 pipe, plane); 314 } 315 if (work->enable_stall_check) 316 seq_printf(m, "Stall check enabled, "); 317 else 318 seq_printf(m, "Stall check waiting for page flip ioctl, "); 319 seq_printf(m, "%d prepares\n", atomic_read(&work->pending)); 320 321 if (work->old_fb_obj) { 322 struct drm_i915_gem_object *obj = work->old_fb_obj; 323 if (obj) 324 seq_printf(m, "Old framebuffer gtt_offset 0x%08x\n", obj->gtt_offset); 325 } 326 if (work->pending_flip_obj) { 327 struct drm_i915_gem_object *obj = work->pending_flip_obj; 328 if (obj) 329 seq_printf(m, "New framebuffer gtt_offset 0x%08x\n", obj->gtt_offset); 330 } 331 } 332 mtx_unlock(&dev->event_lock); 333 } 334 335 return 0; 336} 337 338static int i915_gem_request_info(struct drm_device *dev, struct sbuf *m, void *data) 339{ 340 drm_i915_private_t *dev_priv = dev->dev_private; 341 struct intel_ring_buffer *ring; 342 struct drm_i915_gem_request *gem_request; 343 int count, i; 344 345 if (sx_xlock_sig(&dev->dev_struct_lock)) 346 return -EINTR; 347 348 count = 0; 349 for_each_ring(ring, dev_priv, i) { 350 if (list_empty(&ring->request_list)) 351 continue; 352 353 seq_printf(m, "%s requests:\n", ring->name); 354 list_for_each_entry(gem_request, 355 &ring->request_list, 356 list) { 357 seq_printf(m, " %d @ %d\n", 358 gem_request->seqno, 359 (int) (jiffies - gem_request->emitted_jiffies)); 360 } 361 count++; 362 } 363 DRM_UNLOCK(dev); 364 365 if (count == 0) 366 seq_printf(m, "No requests\n"); 367 368 return 0; 369} 370 371static void i915_ring_seqno_info(struct sbuf *m, 372 struct intel_ring_buffer *ring) 373{ 374 if (ring->get_seqno) { 375 seq_printf(m, "Current sequence (%s): %d\n", 376 ring->name, ring->get_seqno(ring, false)); 377 } 378} 379 380static int i915_gem_seqno_info(struct drm_device *dev, struct sbuf *m, void *data) 381{ 382 drm_i915_private_t *dev_priv = dev->dev_private; 383 struct intel_ring_buffer *ring; 384 int i; 385 386 if (sx_xlock_sig(&dev->dev_struct_lock)) 387 return -EINTR; 388 389 for_each_ring(ring, dev_priv, i) 390 i915_ring_seqno_info(m, ring); 391 392 DRM_UNLOCK(dev); 393 394 return 0; 395} 396 397 398static int i915_interrupt_info(struct drm_device *dev, struct sbuf *m, void *data) 399{ 400 drm_i915_private_t *dev_priv = dev->dev_private; 401 struct intel_ring_buffer *ring; 402 int i, pipe; 403 404 if (sx_xlock_sig(&dev->dev_struct_lock)) 405 return -EINTR; 406 407 if (IS_VALLEYVIEW(dev)) { 408 seq_printf(m, "Display IER:\t%08x\n", 409 I915_READ(VLV_IER)); 410 seq_printf(m, "Display IIR:\t%08x\n", 411 I915_READ(VLV_IIR)); 412 seq_printf(m, "Display IIR_RW:\t%08x\n", 413 I915_READ(VLV_IIR_RW)); 414 seq_printf(m, "Display IMR:\t%08x\n", 415 I915_READ(VLV_IMR)); 416 for_each_pipe(pipe) 417 seq_printf(m, "Pipe %c stat:\t%08x\n", 418 pipe_name(pipe), 419 I915_READ(PIPESTAT(pipe))); 420 421 seq_printf(m, "Master IER:\t%08x\n", 422 I915_READ(VLV_MASTER_IER)); 423 424 seq_printf(m, "Render IER:\t%08x\n", 425 I915_READ(GTIER)); 426 seq_printf(m, "Render IIR:\t%08x\n", 427 I915_READ(GTIIR)); 428 seq_printf(m, "Render IMR:\t%08x\n", 429 I915_READ(GTIMR)); 430 431 seq_printf(m, "PM IER:\t\t%08x\n", 432 I915_READ(GEN6_PMIER)); 433 seq_printf(m, "PM IIR:\t\t%08x\n", 434 I915_READ(GEN6_PMIIR)); 435 seq_printf(m, "PM IMR:\t\t%08x\n", 436 I915_READ(GEN6_PMIMR)); 437 438 seq_printf(m, "Port hotplug:\t%08x\n", 439 I915_READ(PORT_HOTPLUG_EN)); 440 seq_printf(m, "DPFLIPSTAT:\t%08x\n", 441 I915_READ(VLV_DPFLIPSTAT)); 442 seq_printf(m, "DPINVGTT:\t%08x\n", 443 I915_READ(DPINVGTT)); 444 445 } else if (!HAS_PCH_SPLIT(dev)) { 446 seq_printf(m, "Interrupt enable: %08x\n", 447 I915_READ(IER)); 448 seq_printf(m, "Interrupt identity: %08x\n", 449 I915_READ(IIR)); 450 seq_printf(m, "Interrupt mask: %08x\n", 451 I915_READ(IMR)); 452 for_each_pipe(pipe) 453 seq_printf(m, "Pipe %c stat: %08x\n", 454 pipe_name(pipe), 455 I915_READ(PIPESTAT(pipe))); 456 } else { 457 seq_printf(m, "North Display Interrupt enable: %08x\n", 458 I915_READ(DEIER)); 459 seq_printf(m, "North Display Interrupt identity: %08x\n", 460 I915_READ(DEIIR)); 461 seq_printf(m, "North Display Interrupt mask: %08x\n", 462 I915_READ(DEIMR)); 463 seq_printf(m, "South Display Interrupt enable: %08x\n", 464 I915_READ(SDEIER)); 465 seq_printf(m, "South Display Interrupt identity: %08x\n", 466 I915_READ(SDEIIR)); 467 seq_printf(m, "South Display Interrupt mask: %08x\n", 468 I915_READ(SDEIMR)); 469 seq_printf(m, "Graphics Interrupt enable: %08x\n", 470 I915_READ(GTIER)); 471 seq_printf(m, "Graphics Interrupt identity: %08x\n", 472 I915_READ(GTIIR)); 473 seq_printf(m, "Graphics Interrupt mask: %08x\n", 474 I915_READ(GTIMR)); 475 } 476 seq_printf(m, "Interrupts received: %d\n", 477 atomic_read(&dev_priv->irq_received)); 478 for_each_ring(ring, dev_priv, i) { 479 if (IS_GEN6(dev) || IS_GEN7(dev)) { 480 seq_printf(m, 481 "Graphics Interrupt mask (%s): %08x\n", 482 ring->name, I915_READ_IMR(ring)); 483 } 484 i915_ring_seqno_info(m, ring); 485 } 486 DRM_UNLOCK(dev); 487 488 return 0; 489} 490 491static int i915_gem_fence_regs_info(struct drm_device *dev, struct sbuf *m, void *data) 492{ 493 drm_i915_private_t *dev_priv = dev->dev_private; 494 int i; 495 496 if (sx_xlock_sig(&dev->dev_struct_lock)) 497 return -EINTR; 498 499 seq_printf(m, "Reserved fences = %d\n", dev_priv->fence_reg_start); 500 seq_printf(m, "Total fences = %d\n", dev_priv->num_fence_regs); 501 for (i = 0; i < dev_priv->num_fence_regs; i++) { 502 struct drm_i915_gem_object *obj = dev_priv->fence_regs[i].obj; 503 504 seq_printf(m, "Fence %d, pin count = %d, object = ", 505 i, dev_priv->fence_regs[i].pin_count); 506 if (obj == NULL) 507 seq_printf(m, "unused"); 508 else 509 describe_obj(m, obj); 510 seq_printf(m, "\n"); 511 } 512 513 DRM_UNLOCK(dev); 514 return 0; 515} 516 517static int i915_hws_info(struct drm_device *dev, struct sbuf *m, void *data) 518{ 519 drm_i915_private_t *dev_priv = dev->dev_private; 520 struct intel_ring_buffer *ring; 521 const volatile u32 __iomem *hws; 522 int i; 523 524 ring = &dev_priv->ring[(uintptr_t)data]; 525 hws = (volatile u32 __iomem *)ring->status_page.page_addr; 526 if (hws == NULL) 527 return 0; 528 529 for (i = 0; i < 4096 / sizeof(u32) / 4; i += 4) { 530 seq_printf(m, "0x%08x: 0x%08x 0x%08x 0x%08x 0x%08x\n", 531 i * 4, 532 hws[i], hws[i + 1], hws[i + 2], hws[i + 3]); 533 } 534 return 0; 535} 536 537static const char *ring_str(int ring) 538{ 539 switch (ring) { 540 case RCS: return "render"; 541 case VCS: return "bsd"; 542 case BCS: return "blt"; 543 default: return ""; 544 } 545} 546 547static const char *pin_flag(int pinned) 548{ 549 if (pinned > 0) 550 return " P"; 551 else if (pinned < 0) 552 return " p"; 553 else 554 return ""; 555} 556 557static const char *tiling_flag(int tiling) 558{ 559 switch (tiling) { 560 default: 561 case I915_TILING_NONE: return ""; 562 case I915_TILING_X: return " X"; 563 case I915_TILING_Y: return " Y"; 564 } 565} 566 567static const char *dirty_flag(int dirty) 568{ 569 return dirty ? " dirty" : ""; 570} 571 572static const char *purgeable_flag(int purgeable) 573{ 574 return purgeable ? " purgeable" : ""; 575} 576 577static void print_error_buffers(struct sbuf *m, 578 const char *name, 579 struct drm_i915_error_buffer *err, 580 int count) 581{ 582 583 seq_printf(m, "%s [%d]:\n", name, count); 584 585 while (count--) { 586 seq_printf(m, " %08x %8u %04x %04x %x %x%s%s%s%s%s%s%s", 587 err->gtt_offset, 588 err->size, 589 err->read_domains, 590 err->write_domain, 591 err->rseqno, err->wseqno, 592 pin_flag(err->pinned), 593 tiling_flag(err->tiling), 594 dirty_flag(err->dirty), 595 purgeable_flag(err->purgeable), 596 err->ring != -1 ? " " : "", 597 ring_str(err->ring), 598 cache_level_str(err->cache_level)); 599 600 if (err->name) 601 seq_printf(m, " (name: %d)", err->name); 602 if (err->fence_reg != I915_FENCE_REG_NONE) 603 seq_printf(m, " (fence: %d)", err->fence_reg); 604 605 seq_printf(m, "\n"); 606 err++; 607 } 608} 609 610static void i915_ring_error_state(struct sbuf *m, 611 struct drm_device *dev, 612 struct drm_i915_error_state *error, 613 unsigned ring) 614{ 615 MPASS((ring < I915_NUM_RINGS)); /* shut up confused gcc */ 616 seq_printf(m, "%s command stream:\n", ring_str(ring)); 617 seq_printf(m, " HEAD: 0x%08x\n", error->head[ring]); 618 seq_printf(m, " TAIL: 0x%08x\n", error->tail[ring]); 619 seq_printf(m, " CTL: 0x%08x\n", error->ctl[ring]); 620 seq_printf(m, " ACTHD: 0x%08x\n", error->acthd[ring]); 621 seq_printf(m, " IPEIR: 0x%08x\n", error->ipeir[ring]); 622 seq_printf(m, " IPEHR: 0x%08x\n", error->ipehr[ring]); 623 seq_printf(m, " INSTDONE: 0x%08x\n", error->instdone[ring]); 624 if (ring == RCS && INTEL_INFO(dev)->gen >= 4) 625 seq_printf(m, " BBADDR: 0x%08jx\n", error->bbaddr); 626 627 if (INTEL_INFO(dev)->gen >= 4) 628 seq_printf(m, " INSTPS: 0x%08x\n", error->instps[ring]); 629 seq_printf(m, " INSTPM: 0x%08x\n", error->instpm[ring]); 630 seq_printf(m, " FADDR: 0x%08x\n", error->faddr[ring]); 631 if (INTEL_INFO(dev)->gen >= 6) { 632 seq_printf(m, " RC PSMI: 0x%08x\n", error->rc_psmi[ring]); 633 seq_printf(m, " FAULT_REG: 0x%08x\n", error->fault_reg[ring]); 634 seq_printf(m, " SYNC_0: 0x%08x [last synced 0x%08x]\n", 635 error->semaphore_mboxes[ring][0], 636 error->semaphore_seqno[ring][0]); 637 seq_printf(m, " SYNC_1: 0x%08x [last synced 0x%08x]\n", 638 error->semaphore_mboxes[ring][1], 639 error->semaphore_seqno[ring][1]); 640 } 641 seq_printf(m, " seqno: 0x%08x\n", error->seqno[ring]); 642 seq_printf(m, " waiting: %s\n", yesno(error->waiting[ring])); 643 seq_printf(m, " ring->head: 0x%08x\n", error->cpu_ring_head[ring]); 644 seq_printf(m, " ring->tail: 0x%08x\n", error->cpu_ring_tail[ring]); 645} 646 647static int i915_error_state(struct drm_device *dev, struct sbuf *m, 648 void *unused) 649{ 650 drm_i915_private_t *dev_priv = dev->dev_private; 651 struct drm_i915_error_state *error; 652 struct intel_ring_buffer *ring; 653 int i, j, page, offset, elt; 654 655 mtx_lock(&dev_priv->error_lock); 656 error = dev_priv->first_error; 657 if (error != NULL) 658 refcount_acquire(&error->ref); 659 mtx_unlock(&dev_priv->error_lock); 660 661 if (!error) { 662 seq_printf(m, "no error state collected\n"); 663 return 0; 664 } 665 666 seq_printf(m, "Time: %jd s %jd us\n", (intmax_t)error->time.tv_sec, 667 (intmax_t)error->time.tv_usec); 668 seq_printf(m, "Kernel: %s\n", version); 669 seq_printf(m, "PCI ID: 0x%04x\n", dev->pci_device); 670 seq_printf(m, "EIR: 0x%08x\n", error->eir); 671 seq_printf(m, "IER: 0x%08x\n", error->ier); 672 seq_printf(m, "PGTBL_ER: 0x%08x\n", error->pgtbl_er); 673 seq_printf(m, "FORCEWAKE: 0x%08x\n", error->forcewake); 674 seq_printf(m, "DERRMR: 0x%08x\n", error->derrmr); 675 seq_printf(m, "CCID: 0x%08x\n", error->ccid); 676 677 for (i = 0; i < dev_priv->num_fence_regs; i++) 678 seq_printf(m, " fence[%d] = %08jx\n", i, 679 (uintmax_t)error->fence[i]); 680 681 for (i = 0; i < ARRAY_SIZE(error->extra_instdone); i++) 682 seq_printf(m, " INSTDONE_%d: 0x%08x\n", i, error->extra_instdone[i]); 683 684 if (INTEL_INFO(dev)->gen >= 6) { 685 seq_printf(m, "ERROR: 0x%08x\n", error->error); 686 seq_printf(m, "DONE_REG: 0x%08x\n", error->done_reg); 687 } 688 689 if (INTEL_INFO(dev)->gen == 7) 690 seq_printf(m, "ERR_INT: 0x%08x\n", error->err_int); 691 692 for_each_ring(ring, dev_priv, i) 693 i915_ring_error_state(m, dev, error, i); 694 695 if (error->active_bo) 696 print_error_buffers(m, "Active", 697 error->active_bo, 698 error->active_bo_count); 699 700 if (error->pinned_bo) 701 print_error_buffers(m, "Pinned", 702 error->pinned_bo, 703 error->pinned_bo_count); 704 705 for (i = 0; i < ARRAY_SIZE(error->ring); i++) { 706 struct drm_i915_error_object *obj; 707 708 if ((obj = error->ring[i].batchbuffer)) { 709 seq_printf(m, "%s --- gtt_offset = 0x%08x\n", 710 dev_priv->ring[i].name, 711 obj->gtt_offset); 712 offset = 0; 713 for (page = 0; page < obj->page_count; page++) { 714 for (elt = 0; elt < PAGE_SIZE/4; elt++) { 715 seq_printf(m, "%08x : %08x\n", offset, obj->pages[page][elt]); 716 offset += 4; 717 } 718 } 719 } 720 721 if (error->ring[i].num_requests) { 722 seq_printf(m, "%s --- %d requests\n", 723 dev_priv->ring[i].name, 724 error->ring[i].num_requests); 725 for (j = 0; j < error->ring[i].num_requests; j++) { 726 seq_printf(m, " seqno 0x%08x, emitted %ld, tail 0x%08x\n", 727 error->ring[i].requests[j].seqno, 728 error->ring[i].requests[j].jiffies, 729 error->ring[i].requests[j].tail); 730 } 731 } 732 733 if ((obj = error->ring[i].ringbuffer)) { 734 seq_printf(m, "%s --- ringbuffer = 0x%08x\n", 735 dev_priv->ring[i].name, 736 obj->gtt_offset); 737 offset = 0; 738 for (page = 0; page < obj->page_count; page++) { 739 for (elt = 0; elt < PAGE_SIZE/4; elt++) { 740 seq_printf(m, "%08x : %08x\n", 741 offset, 742 obj->pages[page][elt]); 743 offset += 4; 744 } 745 } 746 } 747 } 748 749 if (error->overlay) 750 intel_overlay_print_error_state(m, error->overlay); 751 752 if (error->display) 753 intel_display_print_error_state(m, dev, error->display); 754 755 if (refcount_release(&error->ref)) 756 i915_error_state_free(error); 757 758 return 0; 759} 760 761static int 762i915_error_state_write(struct drm_device *dev, const char *str, void *unused) 763{ 764 765 DRM_DEBUG_DRIVER("Resetting error state\n"); 766 767 DRM_LOCK(dev); 768 i915_destroy_error_state(dev); 769 DRM_UNLOCK(dev); 770 771 return (0); 772} 773 774static int i915_rstdby_delays(struct drm_device *dev, struct sbuf *m, void *unused) 775{ 776 drm_i915_private_t *dev_priv = dev->dev_private; 777 u16 crstanddelay; 778 779 if (sx_xlock_sig(&dev->dev_struct_lock)) 780 return -EINTR; 781 782 crstanddelay = I915_READ16(CRSTANDVID); 783 784 DRM_UNLOCK(dev); 785 786 seq_printf(m, "w/ctx: %d, w/o ctx: %d\n", (crstanddelay >> 8) & 0x3f, (crstanddelay & 0x3f)); 787 788 return 0; 789} 790 791static int i915_cur_delayinfo(struct drm_device *dev, struct sbuf *m, void *unused) 792{ 793 drm_i915_private_t *dev_priv = dev->dev_private; 794 795 if (IS_GEN5(dev)) { 796 u16 rgvswctl = I915_READ16(MEMSWCTL); 797 u16 rgvstat = I915_READ16(MEMSTAT_ILK); 798 799 seq_printf(m, "Requested P-state: %d\n", (rgvswctl >> 8) & 0xf); 800 seq_printf(m, "Requested VID: %d\n", rgvswctl & 0x3f); 801 seq_printf(m, "Current VID: %d\n", (rgvstat & MEMSTAT_VID_MASK) >> 802 MEMSTAT_VID_SHIFT); 803 seq_printf(m, "Current P-state: %d\n", 804 (rgvstat & MEMSTAT_PSTATE_MASK) >> MEMSTAT_PSTATE_SHIFT); 805 } else if (IS_GEN6(dev) || IS_GEN7(dev)) { 806 u32 gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS); 807 u32 rp_state_limits = I915_READ(GEN6_RP_STATE_LIMITS); 808 u32 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP); 809 u32 rpstat, cagf; 810 u32 rpupei, rpcurup, rpprevup; 811 u32 rpdownei, rpcurdown, rpprevdown; 812 int max_freq; 813 814 /* RPSTAT1 is in the GT power well */ 815 if (sx_xlock_sig(&dev->dev_struct_lock)) 816 return -EINTR; 817 gen6_gt_force_wake_get(dev_priv); 818 819 rpstat = I915_READ(GEN6_RPSTAT1); 820 rpupei = I915_READ(GEN6_RP_CUR_UP_EI); 821 rpcurup = I915_READ(GEN6_RP_CUR_UP); 822 rpprevup = I915_READ(GEN6_RP_PREV_UP); 823 rpdownei = I915_READ(GEN6_RP_CUR_DOWN_EI); 824 rpcurdown = I915_READ(GEN6_RP_CUR_DOWN); 825 rpprevdown = I915_READ(GEN6_RP_PREV_DOWN); 826 if (IS_HASWELL(dev)) 827 cagf = (rpstat & HSW_CAGF_MASK) >> HSW_CAGF_SHIFT; 828 else 829 cagf = (rpstat & GEN6_CAGF_MASK) >> GEN6_CAGF_SHIFT; 830 cagf *= GT_FREQUENCY_MULTIPLIER; 831 832 gen6_gt_force_wake_put(dev_priv); 833 DRM_UNLOCK(dev); 834 835 seq_printf(m, "GT_PERF_STATUS: 0x%08x\n", gt_perf_status); 836 seq_printf(m, "RPSTAT1: 0x%08x\n", rpstat); 837 seq_printf(m, "Render p-state ratio: %d\n", 838 (gt_perf_status & 0xff00) >> 8); 839 seq_printf(m, "Render p-state VID: %d\n", 840 gt_perf_status & 0xff); 841 seq_printf(m, "Render p-state limit: %d\n", 842 rp_state_limits & 0xff); 843 seq_printf(m, "CAGF: %dMHz\n", cagf); 844 seq_printf(m, "RP CUR UP EI: %dus\n", rpupei & 845 GEN6_CURICONT_MASK); 846 seq_printf(m, "RP CUR UP: %dus\n", rpcurup & 847 GEN6_CURBSYTAVG_MASK); 848 seq_printf(m, "RP PREV UP: %dus\n", rpprevup & 849 GEN6_CURBSYTAVG_MASK); 850 seq_printf(m, "RP CUR DOWN EI: %dus\n", rpdownei & 851 GEN6_CURIAVG_MASK); 852 seq_printf(m, "RP CUR DOWN: %dus\n", rpcurdown & 853 GEN6_CURBSYTAVG_MASK); 854 seq_printf(m, "RP PREV DOWN: %dus\n", rpprevdown & 855 GEN6_CURBSYTAVG_MASK); 856 857 max_freq = (rp_state_cap & 0xff0000) >> 16; 858 seq_printf(m, "Lowest (RPN) frequency: %dMHz\n", 859 max_freq * GT_FREQUENCY_MULTIPLIER); 860 861 max_freq = (rp_state_cap & 0xff00) >> 8; 862 seq_printf(m, "Nominal (RP1) frequency: %dMHz\n", 863 max_freq * GT_FREQUENCY_MULTIPLIER); 864 865 max_freq = rp_state_cap & 0xff; 866 seq_printf(m, "Max non-overclocked (RP0) frequency: %dMHz\n", 867 max_freq * GT_FREQUENCY_MULTIPLIER); 868 } else { 869 seq_printf(m, "no P-state info available\n"); 870 } 871 872 return 0; 873} 874 875static int i915_delayfreq_table(struct drm_device *dev, struct sbuf *m, void *unused) 876{ 877 drm_i915_private_t *dev_priv = dev->dev_private; 878 u32 delayfreq; 879 int i; 880 881 if (sx_xlock_sig(&dev->dev_struct_lock)) 882 return -EINTR; 883 884 for (i = 0; i < 16; i++) { 885 delayfreq = I915_READ(PXVFREQ_BASE + i * 4); 886 seq_printf(m, "P%02dVIDFREQ: 0x%08x (VID: %d)\n", i, delayfreq, 887 (delayfreq & PXVFREQ_PX_MASK) >> PXVFREQ_PX_SHIFT); 888 } 889 890 DRM_UNLOCK(dev); 891 892 return 0; 893} 894 895static inline int MAP_TO_MV(int map) 896{ 897 return 1250 - (map * 25); 898} 899 900static int i915_inttoext_table(struct drm_device *dev, struct sbuf *m, void *unused) 901{ 902 drm_i915_private_t *dev_priv = dev->dev_private; 903 u32 inttoext; 904 int i; 905 906 if (sx_xlock_sig(&dev->dev_struct_lock)) 907 return -EINTR; 908 909 for (i = 1; i <= 32; i++) { 910 inttoext = I915_READ(INTTOEXT_BASE_ILK + i * 4); 911 seq_printf(m, "INTTOEXT%02d: 0x%08x\n", i, inttoext); 912 } 913 914 DRM_UNLOCK(dev); 915 916 return 0; 917} 918 919static int ironlake_drpc_info(struct drm_device *dev, struct sbuf *m) 920{ 921 drm_i915_private_t *dev_priv = dev->dev_private; 922 u32 rgvmodectl, rstdbyctl; 923 u16 crstandvid; 924 925 if (sx_xlock_sig(&dev->dev_struct_lock)) 926 return -EINTR; 927 928 rgvmodectl = I915_READ(MEMMODECTL); 929 rstdbyctl = I915_READ(RSTDBYCTL); 930 crstandvid = I915_READ16(CRSTANDVID); 931 932 DRM_UNLOCK(dev); 933 934 seq_printf(m, "HD boost: %s\n", (rgvmodectl & MEMMODE_BOOST_EN) ? 935 "yes" : "no"); 936 seq_printf(m, "Boost freq: %d\n", 937 (rgvmodectl & MEMMODE_BOOST_FREQ_MASK) >> 938 MEMMODE_BOOST_FREQ_SHIFT); 939 seq_printf(m, "HW control enabled: %s\n", 940 rgvmodectl & MEMMODE_HWIDLE_EN ? "yes" : "no"); 941 seq_printf(m, "SW control enabled: %s\n", 942 rgvmodectl & MEMMODE_SWMODE_EN ? "yes" : "no"); 943 seq_printf(m, "Gated voltage change: %s\n", 944 rgvmodectl & MEMMODE_RCLK_GATE ? "yes" : "no"); 945 seq_printf(m, "Starting frequency: P%d\n", 946 (rgvmodectl & MEMMODE_FSTART_MASK) >> MEMMODE_FSTART_SHIFT); 947 seq_printf(m, "Max P-state: P%d\n", 948 (rgvmodectl & MEMMODE_FMAX_MASK) >> MEMMODE_FMAX_SHIFT); 949 seq_printf(m, "Min P-state: P%d\n", (rgvmodectl & MEMMODE_FMIN_MASK)); 950 seq_printf(m, "RS1 VID: %d\n", (crstandvid & 0x3f)); 951 seq_printf(m, "RS2 VID: %d\n", ((crstandvid >> 8) & 0x3f)); 952 seq_printf(m, "Render standby enabled: %s\n", 953 (rstdbyctl & RCX_SW_EXIT) ? "no" : "yes"); 954 seq_printf(m, "Current RS state: "); 955 switch (rstdbyctl & RSX_STATUS_MASK) { 956 case RSX_STATUS_ON: 957 seq_printf(m, "on\n"); 958 break; 959 case RSX_STATUS_RC1: 960 seq_printf(m, "RC1\n"); 961 break; 962 case RSX_STATUS_RC1E: 963 seq_printf(m, "RC1E\n"); 964 break; 965 case RSX_STATUS_RS1: 966 seq_printf(m, "RS1\n"); 967 break; 968 case RSX_STATUS_RS2: 969 seq_printf(m, "RS2 (RC6)\n"); 970 break; 971 case RSX_STATUS_RS3: 972 seq_printf(m, "RC3 (RC6+)\n"); 973 break; 974 default: 975 seq_printf(m, "unknown\n"); 976 break; 977 } 978 979 return 0; 980} 981 982static int gen6_drpc_info(struct drm_device *dev, struct sbuf *m) 983{ 984 struct drm_i915_private *dev_priv = dev->dev_private; 985 u32 rpmodectl1, gt_core_status, rcctl1, rc6vids = 0; 986 unsigned forcewake_count; 987 int count=0; 988 989 990 if (sx_xlock_sig(&dev->dev_struct_lock)) 991 return -EINTR; 992 993 mtx_lock(&dev_priv->gt_lock); 994 forcewake_count = dev_priv->forcewake_count; 995 mtx_unlock(&dev_priv->gt_lock); 996 997 if (forcewake_count) { 998 seq_printf(m, "RC information inaccurate because somebody " 999 "holds a forcewake reference \n"); 1000 } else { 1001 /* NB: we cannot use forcewake, else we read the wrong values */ 1002 while (count++ < 50 && (I915_READ_NOTRACE(FORCEWAKE_ACK) & 1)) 1003 udelay(10); 1004 seq_printf(m, "RC information accurate: %s\n", yesno(count < 51)); 1005 } 1006 1007 gt_core_status = DRM_READ32(dev_priv->mmio_map, GEN6_GT_CORE_STATUS); 1008 trace_i915_reg_rw(false, GEN6_GT_CORE_STATUS, gt_core_status, 4); 1009 1010 rpmodectl1 = I915_READ(GEN6_RP_CONTROL); 1011 rcctl1 = I915_READ(GEN6_RC_CONTROL); 1012 DRM_UNLOCK(dev); 1013 sx_xlock(&dev_priv->rps.hw_lock); 1014 sandybridge_pcode_read(dev_priv, GEN6_PCODE_READ_RC6VIDS, &rc6vids); 1015 sx_xunlock(&dev_priv->rps.hw_lock); 1016 1017 seq_printf(m, "Video Turbo Mode: %s\n", 1018 yesno(rpmodectl1 & GEN6_RP_MEDIA_TURBO)); 1019 seq_printf(m, "HW control enabled: %s\n", 1020 yesno(rpmodectl1 & GEN6_RP_ENABLE)); 1021 seq_printf(m, "SW control enabled: %s\n", 1022 yesno((rpmodectl1 & GEN6_RP_MEDIA_MODE_MASK) == 1023 GEN6_RP_MEDIA_SW_MODE)); 1024 seq_printf(m, "RC1e Enabled: %s\n", 1025 yesno(rcctl1 & GEN6_RC_CTL_RC1e_ENABLE)); 1026 seq_printf(m, "RC6 Enabled: %s\n", 1027 yesno(rcctl1 & GEN6_RC_CTL_RC6_ENABLE)); 1028 seq_printf(m, "Deep RC6 Enabled: %s\n", 1029 yesno(rcctl1 & GEN6_RC_CTL_RC6p_ENABLE)); 1030 seq_printf(m, "Deepest RC6 Enabled: %s\n", 1031 yesno(rcctl1 & GEN6_RC_CTL_RC6pp_ENABLE)); 1032 seq_printf(m, "Current RC state: "); 1033 switch (gt_core_status & GEN6_RCn_MASK) { 1034 case GEN6_RC0: 1035 if (gt_core_status & GEN6_CORE_CPD_STATE_MASK) 1036 seq_printf(m, "Core Power Down\n"); 1037 else 1038 seq_printf(m, "on\n"); 1039 break; 1040 case GEN6_RC3: 1041 seq_printf(m, "RC3\n"); 1042 break; 1043 case GEN6_RC6: 1044 seq_printf(m, "RC6\n"); 1045 break; 1046 case GEN6_RC7: 1047 seq_printf(m, "RC7\n"); 1048 break; 1049 default: 1050 seq_printf(m, "Unknown\n"); 1051 break; 1052 } 1053 1054 seq_printf(m, "Core Power Down: %s\n", 1055 yesno(gt_core_status & GEN6_CORE_CPD_STATE_MASK)); 1056 1057 /* Not exactly sure what this is */ 1058 seq_printf(m, "RC6 \"Locked to RPn\" residency since boot: %u\n", 1059 I915_READ(GEN6_GT_GFX_RC6_LOCKED)); 1060 seq_printf(m, "RC6 residency since boot: %u\n", 1061 I915_READ(GEN6_GT_GFX_RC6)); 1062 seq_printf(m, "RC6+ residency since boot: %u\n", 1063 I915_READ(GEN6_GT_GFX_RC6p)); 1064 seq_printf(m, "RC6++ residency since boot: %u\n", 1065 I915_READ(GEN6_GT_GFX_RC6pp)); 1066 1067 seq_printf(m, "RC6 voltage: %dmV\n", 1068 GEN6_DECODE_RC6_VID(((rc6vids >> 0) & 0xff))); 1069 seq_printf(m, "RC6+ voltage: %dmV\n", 1070 GEN6_DECODE_RC6_VID(((rc6vids >> 8) & 0xff))); 1071 seq_printf(m, "RC6++ voltage: %dmV\n", 1072 GEN6_DECODE_RC6_VID(((rc6vids >> 16) & 0xff))); 1073 return 0; 1074} 1075 1076static int i915_drpc_info(struct drm_device *dev, struct sbuf *m, void *unused) 1077{ 1078 1079 if (IS_GEN6(dev) || IS_GEN7(dev)) 1080 return gen6_drpc_info(dev, m); 1081 else 1082 return ironlake_drpc_info(dev, m); 1083} 1084 1085static int i915_fbc_status(struct drm_device *dev, struct sbuf *m, void *unused) 1086{ 1087 drm_i915_private_t *dev_priv = dev->dev_private; 1088 1089 if (!I915_HAS_FBC(dev)) { 1090 seq_printf(m, "FBC unsupported on this chipset\n"); 1091 return 0; 1092 } 1093 1094 if (intel_fbc_enabled(dev)) { 1095 seq_printf(m, "FBC enabled\n"); 1096 } else { 1097 seq_printf(m, "FBC disabled: "); 1098 switch (dev_priv->no_fbc_reason) { 1099 case FBC_NO_OUTPUT: 1100 seq_printf(m, "no outputs"); 1101 break; 1102 case FBC_STOLEN_TOO_SMALL: 1103 seq_printf(m, "not enough stolen memory"); 1104 break; 1105 case FBC_UNSUPPORTED_MODE: 1106 seq_printf(m, "mode not supported"); 1107 break; 1108 case FBC_MODE_TOO_LARGE: 1109 seq_printf(m, "mode too large"); 1110 break; 1111 case FBC_BAD_PLANE: 1112 seq_printf(m, "FBC unsupported on plane"); 1113 break; 1114 case FBC_NOT_TILED: 1115 seq_printf(m, "scanout buffer not tiled"); 1116 break; 1117 case FBC_MULTIPLE_PIPES: 1118 seq_printf(m, "multiple pipes are enabled"); 1119 break; 1120 case FBC_MODULE_PARAM: 1121 seq_printf(m, "disabled per module param (default off)"); 1122 break; 1123 default: 1124 seq_printf(m, "unknown reason"); 1125 } 1126 seq_printf(m, "\n"); 1127 } 1128 return 0; 1129} 1130 1131static int i915_sr_status(struct drm_device *dev, struct sbuf *m, void *unused) 1132{ 1133 drm_i915_private_t *dev_priv = dev->dev_private; 1134 bool sr_enabled = false; 1135 1136 if (HAS_PCH_SPLIT(dev)) 1137 sr_enabled = I915_READ(WM1_LP_ILK) & WM1_LP_SR_EN; 1138 else if (IS_CRESTLINE(dev) || IS_I945G(dev) || IS_I945GM(dev)) 1139 sr_enabled = I915_READ(FW_BLC_SELF) & FW_BLC_SELF_EN; 1140 else if (IS_I915GM(dev)) 1141 sr_enabled = I915_READ(INSTPM) & INSTPM_SELF_EN; 1142 else if (IS_PINEVIEW(dev)) 1143 sr_enabled = I915_READ(DSPFW3) & PINEVIEW_SELF_REFRESH_EN; 1144 1145 seq_printf(m, "self-refresh: %s\n", 1146 sr_enabled ? "enabled" : "disabled"); 1147 1148 return 0; 1149} 1150 1151static int i915_emon_status(struct drm_device *dev, struct sbuf *m, void *unused) 1152{ 1153 drm_i915_private_t *dev_priv = dev->dev_private; 1154 unsigned long temp, chipset, gfx; 1155 1156 if (!IS_GEN5(dev)) 1157 return -ENODEV; 1158 1159 if (sx_xlock_sig(&dev->dev_struct_lock)) 1160 return -EINTR; 1161 1162 temp = i915_mch_val(dev_priv); 1163 chipset = i915_chipset_val(dev_priv); 1164 gfx = i915_gfx_val(dev_priv); 1165 DRM_UNLOCK(dev); 1166 1167 seq_printf(m, "GMCH temp: %ld\n", temp); 1168 seq_printf(m, "Chipset power: %ld\n", chipset); 1169 seq_printf(m, "GFX power: %ld\n", gfx); 1170 seq_printf(m, "Total power: %ld\n", chipset + gfx); 1171 1172 return 0; 1173} 1174 1175static int i915_ring_freq_table(struct drm_device *dev, struct sbuf *m, 1176 void *unused) 1177{ 1178 drm_i915_private_t *dev_priv = dev->dev_private; 1179 int gpu_freq, ia_freq; 1180 1181 if (!(IS_GEN6(dev) || IS_GEN7(dev))) { 1182 seq_printf(m, "unsupported on this chipset\n"); 1183 return 0; 1184 } 1185 1186 sx_xlock(&dev_priv->rps.hw_lock); 1187 1188 seq_printf(m, "GPU freq (MHz)\tEffective CPU freq (MHz)\n"); 1189 1190 for (gpu_freq = dev_priv->rps.min_delay; 1191 gpu_freq <= dev_priv->rps.max_delay; 1192 gpu_freq++) { 1193 ia_freq = gpu_freq; 1194 sandybridge_pcode_read(dev_priv, 1195 GEN6_PCODE_READ_MIN_FREQ_TABLE, 1196 &ia_freq); 1197 seq_printf(m, "%d\t\t%d\n", gpu_freq * GT_FREQUENCY_MULTIPLIER, ia_freq * 100); 1198 } 1199 1200 sx_xunlock(&dev_priv->rps.hw_lock); 1201 1202 return 0; 1203} 1204 1205static int i915_gfxec(struct drm_device *dev, struct sbuf *m, void *unused) 1206{ 1207 drm_i915_private_t *dev_priv = dev->dev_private; 1208 1209 if (sx_xlock_sig(&dev->dev_struct_lock)) 1210 return -EINTR; 1211 1212 seq_printf(m, "GFXEC: %ld\n", (unsigned long)I915_READ(0x112f4)); 1213 1214 DRM_UNLOCK(dev); 1215 1216 return 0; 1217} 1218 1219#if 0 1220static int i915_opregion(struct drm_device *dev, struct sbuf *m, void *unused) 1221{ 1222 drm_i915_private_t *dev_priv = dev->dev_private; 1223 struct intel_opregion *opregion = &dev_priv->opregion; 1224 1225 if (sx_xlock_sig(&dev->dev_struct_lock)) 1226 return -EINTR; 1227 1228 if (opregion->header) 1229 seq_write(m, opregion->header, OPREGION_SIZE); 1230 1231 DRM_UNLOCK(dev); 1232 1233 return 0; 1234} 1235#endif 1236 1237static int i915_gem_framebuffer_info(struct drm_device *dev, struct sbuf *m, void *data) 1238{ 1239 drm_i915_private_t *dev_priv = dev->dev_private; 1240 struct intel_fbdev *ifbdev; 1241 struct intel_framebuffer *fb; 1242 1243 if (sx_xlock_sig(&dev->dev_struct_lock)) 1244 return -EINTR; 1245 1246 ifbdev = dev_priv->fbdev; 1247 if (ifbdev == NULL) { 1248 DRM_UNLOCK(dev); 1249 return 0; 1250 } 1251 fb = to_intel_framebuffer(ifbdev->helper.fb); 1252 1253 seq_printf(m, "fbcon size: %d x %d, depth %d, %d bpp, obj ", 1254 fb->base.width, 1255 fb->base.height, 1256 fb->base.depth, 1257 fb->base.bits_per_pixel); 1258 describe_obj(m, fb->obj); 1259 seq_printf(m, "\n"); 1260 1261 list_for_each_entry(fb, &dev->mode_config.fb_list, base.head) { 1262 if (&fb->base == ifbdev->helper.fb) 1263 continue; 1264 1265 seq_printf(m, "user size: %d x %d, depth %d, %d bpp, obj ", 1266 fb->base.width, 1267 fb->base.height, 1268 fb->base.depth, 1269 fb->base.bits_per_pixel); 1270 describe_obj(m, fb->obj); 1271 seq_printf(m, "\n"); 1272 } 1273 1274 DRM_UNLOCK(dev); 1275 1276 return 0; 1277} 1278 1279static int i915_context_status(struct drm_device *dev, struct sbuf *m, void *data) 1280{ 1281 drm_i915_private_t *dev_priv = dev->dev_private; 1282 int ret; 1283 1284 ret = sx_xlock_sig(&dev->mode_config.mutex); 1285 if (ret != 0) 1286 return -EINTR; 1287 1288 if (dev_priv->ips.pwrctx) { 1289 seq_printf(m, "power context "); 1290 describe_obj(m, dev_priv->ips.pwrctx); 1291 seq_printf(m, "\n"); 1292 } 1293 1294 if (dev_priv->ips.renderctx) { 1295 seq_printf(m, "render context "); 1296 describe_obj(m, dev_priv->ips.renderctx); 1297 seq_printf(m, "\n"); 1298 } 1299 1300 sx_xunlock(&dev->mode_config.mutex); 1301 1302 return 0; 1303} 1304 1305static int i915_gen6_forcewake_count_info(struct drm_device *dev, struct sbuf *m, 1306 void *data) 1307{ 1308 struct drm_i915_private *dev_priv = dev->dev_private; 1309 unsigned forcewake_count; 1310 1311 mtx_lock(&dev_priv->gt_lock); 1312 forcewake_count = dev_priv->forcewake_count; 1313 mtx_unlock(&dev_priv->gt_lock); 1314 1315 seq_printf(m, "forcewake count = %u\n", forcewake_count); 1316 1317 return 0; 1318} 1319 1320static const char *swizzle_string(unsigned swizzle) 1321{ 1322 1323 switch(swizzle) { 1324 case I915_BIT_6_SWIZZLE_NONE: 1325 return "none"; 1326 case I915_BIT_6_SWIZZLE_9: 1327 return "bit9"; 1328 case I915_BIT_6_SWIZZLE_9_10: 1329 return "bit9/bit10"; 1330 case I915_BIT_6_SWIZZLE_9_11: 1331 return "bit9/bit11"; 1332 case I915_BIT_6_SWIZZLE_9_10_11: 1333 return "bit9/bit10/bit11"; 1334 case I915_BIT_6_SWIZZLE_9_17: 1335 return "bit9/bit17"; 1336 case I915_BIT_6_SWIZZLE_9_10_17: 1337 return "bit9/bit10/bit17"; 1338 case I915_BIT_6_SWIZZLE_UNKNOWN: 1339 return "unknown"; 1340 } 1341 1342 return "bug"; 1343} 1344 1345static int i915_swizzle_info(struct drm_device *dev, struct sbuf *m, void *data) 1346{ 1347 struct drm_i915_private *dev_priv = dev->dev_private; 1348 int ret; 1349 1350 ret = sx_xlock_sig(&dev->dev_struct_lock); 1351 if (ret) 1352 return -EINTR; 1353 1354 seq_printf(m, "bit6 swizzle for X-tiling = %s\n", 1355 swizzle_string(dev_priv->mm.bit_6_swizzle_x)); 1356 seq_printf(m, "bit6 swizzle for Y-tiling = %s\n", 1357 swizzle_string(dev_priv->mm.bit_6_swizzle_y)); 1358 1359 if (IS_GEN3(dev) || IS_GEN4(dev)) { 1360 seq_printf(m, "DDC = 0x%08x\n", 1361 I915_READ(DCC)); 1362 seq_printf(m, "C0DRB3 = 0x%04x\n", 1363 I915_READ16(C0DRB3)); 1364 seq_printf(m, "C1DRB3 = 0x%04x\n", 1365 I915_READ16(C1DRB3)); 1366 } else if (IS_GEN6(dev) || IS_GEN7(dev)) { 1367 seq_printf(m, "MAD_DIMM_C0 = 0x%08x\n", 1368 I915_READ(MAD_DIMM_C0)); 1369 seq_printf(m, "MAD_DIMM_C1 = 0x%08x\n", 1370 I915_READ(MAD_DIMM_C1)); 1371 seq_printf(m, "MAD_DIMM_C2 = 0x%08x\n", 1372 I915_READ(MAD_DIMM_C2)); 1373 seq_printf(m, "TILECTL = 0x%08x\n", 1374 I915_READ(TILECTL)); 1375 seq_printf(m, "ARB_MODE = 0x%08x\n", 1376 I915_READ(ARB_MODE)); 1377 seq_printf(m, "DISP_ARB_CTL = 0x%08x\n", 1378 I915_READ(DISP_ARB_CTL)); 1379 } 1380 DRM_UNLOCK(dev); 1381 1382 return 0; 1383} 1384 1385static int i915_ppgtt_info(struct drm_device *dev, struct sbuf *m, void *data) 1386{ 1387 struct drm_i915_private *dev_priv = dev->dev_private; 1388 struct intel_ring_buffer *ring; 1389 int i, ret; 1390 1391 1392 ret = sx_xlock_sig(&dev->dev_struct_lock); 1393 if (ret) 1394 return -EINTR; 1395 if (INTEL_INFO(dev)->gen == 6) 1396 seq_printf(m, "GFX_MODE: 0x%08x\n", I915_READ(GFX_MODE)); 1397 1398 for_each_ring(ring, dev_priv, i) { 1399 seq_printf(m, "%s\n", ring->name); 1400 if (INTEL_INFO(dev)->gen == 7) 1401 seq_printf(m, "GFX_MODE: 0x%08x\n", I915_READ(RING_MODE_GEN7(ring))); 1402 seq_printf(m, "PP_DIR_BASE: 0x%08x\n", I915_READ(RING_PP_DIR_BASE(ring))); 1403 seq_printf(m, "PP_DIR_BASE_READ: 0x%08x\n", I915_READ(RING_PP_DIR_BASE_READ(ring))); 1404 seq_printf(m, "PP_DIR_DCLV: 0x%08x\n", I915_READ(RING_PP_DIR_DCLV(ring))); 1405 } 1406 if (dev_priv->mm.aliasing_ppgtt) { 1407 struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt; 1408 1409 seq_printf(m, "aliasing PPGTT:\n"); 1410 seq_printf(m, "pd gtt offset: 0x%08x\n", ppgtt->pd_offset); 1411 } 1412 seq_printf(m, "ECOCHK: 0x%08x\n", I915_READ(GAM_ECOCHK)); 1413 DRM_UNLOCK(dev); 1414 1415 return 0; 1416} 1417 1418static int i915_dpio_info(struct drm_device *dev, struct sbuf *m, void *data) 1419{ 1420 struct drm_i915_private *dev_priv = dev->dev_private; 1421 int ret; 1422 1423 1424 if (!IS_VALLEYVIEW(dev)) { 1425 seq_printf(m, "unsupported\n"); 1426 return 0; 1427 } 1428 1429 ret = sx_xlock_sig(&dev->mode_config.mutex); 1430 if (ret) 1431 return -EINTR; 1432 1433 seq_printf(m, "DPIO_CTL: 0x%08x\n", I915_READ(DPIO_CTL)); 1434 1435 seq_printf(m, "DPIO_DIV_A: 0x%08x\n", 1436 intel_dpio_read(dev_priv, _DPIO_DIV_A)); 1437 seq_printf(m, "DPIO_DIV_B: 0x%08x\n", 1438 intel_dpio_read(dev_priv, _DPIO_DIV_B)); 1439 1440 seq_printf(m, "DPIO_REFSFR_A: 0x%08x\n", 1441 intel_dpio_read(dev_priv, _DPIO_REFSFR_A)); 1442 seq_printf(m, "DPIO_REFSFR_B: 0x%08x\n", 1443 intel_dpio_read(dev_priv, _DPIO_REFSFR_B)); 1444 1445 seq_printf(m, "DPIO_CORE_CLK_A: 0x%08x\n", 1446 intel_dpio_read(dev_priv, _DPIO_CORE_CLK_A)); 1447 seq_printf(m, "DPIO_CORE_CLK_B: 0x%08x\n", 1448 intel_dpio_read(dev_priv, _DPIO_CORE_CLK_B)); 1449 1450 seq_printf(m, "DPIO_LFP_COEFF_A: 0x%08x\n", 1451 intel_dpio_read(dev_priv, _DPIO_LFP_COEFF_A)); 1452 seq_printf(m, "DPIO_LFP_COEFF_B: 0x%08x\n", 1453 intel_dpio_read(dev_priv, _DPIO_LFP_COEFF_B)); 1454 1455 seq_printf(m, "DPIO_FASTCLK_DISABLE: 0x%08x\n", 1456 intel_dpio_read(dev_priv, DPIO_FASTCLK_DISABLE)); 1457 1458 sx_xunlock(&dev->mode_config.mutex); 1459 1460 return 0; 1461} 1462 1463static int 1464i915_wedged(SYSCTL_HANDLER_ARGS) 1465{ 1466 struct drm_device *dev = arg1; 1467 drm_i915_private_t *dev_priv = dev->dev_private; 1468 int val = 1, ret; 1469 1470 if (dev_priv == NULL) 1471 return (EBUSY); 1472 1473 val = atomic_read(&dev_priv->mm.wedged); 1474 ret = sysctl_handle_int(oidp, &val, 0, req); 1475 if (ret != 0 || !req->newptr) 1476 return (ret); 1477 1478 DRM_INFO("Manually setting wedged to %d\n", val); 1479 i915_handle_error(dev, val); 1480 1481 return (ret); 1482} 1483 1484static int 1485i915_ring_stop(SYSCTL_HANDLER_ARGS) 1486{ 1487 struct drm_device *dev = arg1; 1488 drm_i915_private_t *dev_priv = dev->dev_private; 1489 int val = 0, ret; 1490 1491 if (dev_priv == NULL) 1492 return (EBUSY); 1493 1494 val = dev_priv->stop_rings; 1495 ret = sysctl_handle_int(oidp, &val, 0, req); 1496 if (ret != 0 || !req->newptr) 1497 return (ret); 1498 1499 DRM_DEBUG_DRIVER("Stopping rings 0x%08x\n", val); 1500 1501 sx_xlock(&dev_priv->rps.hw_lock); 1502 dev_priv->stop_rings = val; 1503 sx_xunlock(&dev_priv->rps.hw_lock); 1504 1505 return (0); 1506} 1507 1508static int 1509i915_max_freq(SYSCTL_HANDLER_ARGS) 1510{ 1511 struct drm_device *dev = arg1; 1512 drm_i915_private_t *dev_priv = dev->dev_private; 1513 int val = 1, ret; 1514 1515 if (dev_priv == NULL) 1516 return (EBUSY); 1517 if (!(IS_GEN6(dev) || IS_GEN7(dev))) 1518 return (ENODEV); 1519 1520 sx_xlock(&dev_priv->rps.hw_lock); 1521 1522 val = dev_priv->rps.max_delay * GT_FREQUENCY_MULTIPLIER; 1523 ret = sysctl_handle_int(oidp, &val, 0, req); 1524 if (ret != 0 || !req->newptr) { 1525 sx_xunlock(&dev_priv->rps.hw_lock); 1526 return (ret); 1527 } 1528 1529 DRM_DEBUG_DRIVER("Manually setting max freq to %d\n", val); 1530 1531 /* 1532 * Turbo will still be enabled, but won't go above the set value. 1533 */ 1534 dev_priv->rps.max_delay = val / GT_FREQUENCY_MULTIPLIER; 1535 1536 gen6_set_rps(dev, val / GT_FREQUENCY_MULTIPLIER); 1537 sx_xunlock(&dev_priv->rps.hw_lock); 1538 1539 return (ret); 1540} 1541 1542static int 1543i915_min_freq(SYSCTL_HANDLER_ARGS) 1544{ 1545 struct drm_device *dev = arg1; 1546 drm_i915_private_t *dev_priv = dev->dev_private; 1547 int val = 1, ret; 1548 1549 if (dev_priv == NULL) 1550 return (EBUSY); 1551 if (!(IS_GEN6(dev) || IS_GEN7(dev))) 1552 return (ENODEV); 1553 1554 sx_xlock(&dev_priv->rps.hw_lock); 1555 1556 val = dev_priv->rps.min_delay * GT_FREQUENCY_MULTIPLIER; 1557 ret = sysctl_handle_int(oidp, &val, 0, req); 1558 if (ret != 0 || !req->newptr) { 1559 sx_xunlock(&dev_priv->rps.hw_lock); 1560 return (ret); 1561 } 1562 1563 DRM_DEBUG_DRIVER("Manually setting min freq to %d\n", val); 1564 1565 /* 1566 * Turbo will still be enabled, but won't go above the set value. 1567 */ 1568 dev_priv->rps.min_delay = val / GT_FREQUENCY_MULTIPLIER; 1569 1570 gen6_set_rps(dev, val / GT_FREQUENCY_MULTIPLIER); 1571 sx_xunlock(&dev_priv->rps.hw_lock); 1572 1573 return (ret); 1574} 1575 1576static int 1577i915_cache_sharing(SYSCTL_HANDLER_ARGS) 1578{ 1579 struct drm_device *dev = arg1; 1580 drm_i915_private_t *dev_priv = dev->dev_private; 1581 u32 snpcr; 1582 int val = 1, ret; 1583 1584 if (dev_priv == NULL) 1585 return (EBUSY); 1586 if (!(IS_GEN6(dev) || IS_GEN7(dev))) 1587 return (ENODEV); 1588 1589 sx_xlock(&dev_priv->rps.hw_lock); 1590 snpcr = I915_READ(GEN6_MBCUNIT_SNPCR); 1591 sx_xunlock(&dev_priv->rps.hw_lock); 1592 1593 val = (snpcr & GEN6_MBC_SNPCR_MASK) >> GEN6_MBC_SNPCR_SHIFT; 1594 ret = sysctl_handle_int(oidp, &val, 0, req); 1595 if (ret != 0 || !req->newptr) 1596 return (ret); 1597 1598 if (val < 0 || val > 3) 1599 return (EINVAL); 1600 1601 DRM_DEBUG_DRIVER("Manually setting uncore sharing to %d\n", val); 1602 1603 /* Update the cache sharing policy here as well */ 1604 snpcr = I915_READ(GEN6_MBCUNIT_SNPCR); 1605 snpcr &= ~GEN6_MBC_SNPCR_MASK; 1606 snpcr |= (val << GEN6_MBC_SNPCR_SHIFT); 1607 I915_WRITE(GEN6_MBCUNIT_SNPCR, snpcr); 1608 1609 return (0); 1610} 1611 1612static struct i915_info_sysctl_list { 1613 const char *name; 1614 int (*ptr)(struct drm_device *dev, struct sbuf *m, void *data); 1615 int (*ptr_w)(struct drm_device *dev, const char *str, void *data); 1616 int flags; 1617 void *data; 1618} i915_info_sysctl_list[] = { 1619 {"i915_capabilities", i915_capabilities, NULL, 0}, 1620 {"i915_gem_objects", i915_gem_object_info, NULL, 0}, 1621 {"i915_gem_gtt", i915_gem_gtt_info, NULL, 0}, 1622 {"i915_gem_pinned", i915_gem_gtt_info, NULL, 0, (void *)PINNED_LIST}, 1623 {"i915_gem_active", i915_gem_object_list_info, NULL, 0, (void *)ACTIVE_LIST}, 1624 {"i915_gem_inactive", i915_gem_object_list_info, NULL, 0, (void *)INACTIVE_LIST}, 1625 {"i915_gem_pageflip", i915_gem_pageflip_info, NULL, 0}, 1626 {"i915_gem_request", i915_gem_request_info, NULL, 0}, 1627 {"i915_gem_seqno", i915_gem_seqno_info, NULL, 0}, 1628 {"i915_gem_fence_regs", i915_gem_fence_regs_info, NULL, 0}, 1629 {"i915_gem_interrupt", i915_interrupt_info, NULL, 0}, 1630 {"i915_gem_hws", i915_hws_info, NULL, 0, (void *)RCS}, 1631 {"i915_gem_hws_blt", i915_hws_info, NULL, 0, (void *)BCS}, 1632 {"i915_gem_hws_bsd", i915_hws_info, NULL, 0, (void *)VCS}, 1633 {"i915_error_state", i915_error_state, i915_error_state_write, 0}, 1634 {"i915_rstdby_delays", i915_rstdby_delays, NULL, 0}, 1635 {"i915_cur_delayinfo", i915_cur_delayinfo, NULL, 0}, 1636 {"i915_delayfreq_table", i915_delayfreq_table, NULL, 0}, 1637 {"i915_inttoext_table", i915_inttoext_table, NULL, 0}, 1638 {"i915_drpc_info", i915_drpc_info, NULL, 0}, 1639 {"i915_emon_status", i915_emon_status, NULL, 0}, 1640 {"i915_ring_freq_table", i915_ring_freq_table, NULL, 0}, 1641 {"i915_gfxec", i915_gfxec, NULL, 0}, 1642 {"i915_fbc_status", i915_fbc_status, NULL, 0}, 1643 {"i915_sr_status", i915_sr_status, NULL, 0}, 1644#if 0 1645 {"i915_opregion", i915_opregion, NULL, 0}, 1646#endif 1647 {"i915_gem_framebuffer", i915_gem_framebuffer_info, NULL, 0}, 1648 {"i915_context_status", i915_context_status, NULL, 0}, 1649 {"i915_gen6_forcewake_count_info", i915_gen6_forcewake_count_info, 1650 NULL, 0}, 1651 {"i915_swizzle_info", i915_swizzle_info, NULL, 0}, 1652 {"i915_ppgtt_info", i915_ppgtt_info, NULL, 0}, 1653 {"i915_dpio", i915_dpio_info, NULL, 0}, 1654}; 1655 1656struct i915_info_sysctl_thunk { 1657 struct drm_device *dev; 1658 int idx; 1659 void *arg; 1660}; 1661 1662static int 1663i915_info_sysctl_handler(SYSCTL_HANDLER_ARGS) 1664{ 1665 struct sbuf m; 1666 struct i915_info_sysctl_thunk *thunk; 1667 struct drm_device *dev; 1668 drm_i915_private_t *dev_priv; 1669 char *p; 1670 int error; 1671 1672 thunk = arg1; 1673 dev = thunk->dev; 1674 dev_priv = dev->dev_private; 1675 if (dev_priv == NULL) 1676 return (EBUSY); 1677 error = sysctl_wire_old_buffer(req, 0); 1678 if (error != 0) 1679 return (error); 1680 sbuf_new_for_sysctl(&m, NULL, 128, req); 1681 error = -i915_info_sysctl_list[thunk->idx].ptr(dev, &m, 1682 thunk->arg); 1683 if (error == 0) 1684 error = sbuf_finish(&m); 1685 sbuf_delete(&m); 1686 if (error != 0 || req->newptr == NULL) 1687 return (error); 1688 if (req->newlen > 2048) 1689 return (E2BIG); 1690 p = malloc(req->newlen + 1, M_TEMP, M_WAITOK); 1691 error = SYSCTL_IN(req, p, req->newlen); 1692 if (error != 0) 1693 goto out; 1694 p[req->newlen] = '\0'; 1695 error = i915_info_sysctl_list[thunk->idx].ptr_w(dev, p, 1696 thunk->arg); 1697out: 1698 free(p, M_TEMP); 1699 return (error); 1700} 1701 1702extern int i915_intr_pf; 1703extern long i915_gem_wired_pages_cnt; 1704 1705int 1706i915_sysctl_init(struct drm_device *dev, struct sysctl_ctx_list *ctx, 1707 struct sysctl_oid *top) 1708{ 1709 struct sysctl_oid *oid, *info; 1710 struct i915_info_sysctl_thunk *thunks; 1711 int i, error; 1712 1713 thunks = malloc(sizeof(*thunks) * ARRAY_SIZE(i915_info_sysctl_list), 1714 DRM_MEM_DRIVER, M_WAITOK | M_ZERO); 1715 for (i = 0; i < ARRAY_SIZE(i915_info_sysctl_list); i++) { 1716 thunks[i].dev = dev; 1717 thunks[i].idx = i; 1718 thunks[i].arg = i915_info_sysctl_list[i].data; 1719 } 1720 dev->sysctl_private = thunks; 1721 info = SYSCTL_ADD_NODE(ctx, SYSCTL_CHILDREN(top), OID_AUTO, "info", 1722 CTLFLAG_RW, NULL, NULL); 1723 if (info == NULL) 1724 return (-ENOMEM); 1725 for (i = 0; i < ARRAY_SIZE(i915_info_sysctl_list); i++) { 1726 oid = SYSCTL_ADD_OID(ctx, SYSCTL_CHILDREN(info), OID_AUTO, 1727 i915_info_sysctl_list[i].name, CTLTYPE_STRING | 1728 (i915_info_sysctl_list[i].ptr_w != NULL ? CTLFLAG_RW : 1729 CTLFLAG_RD), 1730 &thunks[i], 0, i915_info_sysctl_handler, "A", NULL); 1731 if (oid == NULL) 1732 return (-ENOMEM); 1733 } 1734 oid = SYSCTL_ADD_LONG(ctx, SYSCTL_CHILDREN(info), OID_AUTO, 1735 "i915_gem_wired_pages", CTLFLAG_RD, &i915_gem_wired_pages_cnt, 1736 NULL); 1737 oid = SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(top), OID_AUTO, "wedged", 1738 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE, dev, 0, 1739 i915_wedged, "I", NULL); 1740 if (oid == NULL) 1741 return (-ENOMEM); 1742 oid = SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(top), OID_AUTO, "max_freq", 1743 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE, dev, 0, i915_max_freq, 1744 "I", NULL); 1745 if (oid == NULL) 1746 return (-ENOMEM); 1747 oid = SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(top), OID_AUTO, "min_freq", 1748 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE, dev, 0, i915_min_freq, 1749 "I", NULL); 1750 if (oid == NULL) 1751 return (-ENOMEM); 1752 oid = SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(top), OID_AUTO, 1753 "cache_sharing", CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE, dev, 1754 0, i915_cache_sharing, "I", NULL); 1755 if (oid == NULL) 1756 return (-ENOMEM); 1757 oid = SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(top), OID_AUTO, 1758 "ring_stop", CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE, dev, 1759 0, i915_ring_stop, "I", NULL); 1760 if (oid == NULL) 1761 return (-ENOMEM); 1762 oid = SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(top), OID_AUTO, "intr_pf", 1763 CTLFLAG_RW, &i915_intr_pf, 0, NULL); 1764 if (oid == NULL) 1765 return (-ENOMEM); 1766 1767 error = drm_add_busid_modesetting(dev, ctx, top); 1768 if (error != 0) 1769 return (error); 1770 1771 return (0); 1772} 1773 1774void 1775i915_sysctl_cleanup(struct drm_device *dev) 1776{ 1777 1778 free(dev->sysctl_private, DRM_MEM_DRIVER); 1779} 1780 1781//#endif /* CONFIG_DEBUG_FS */ 1782