1// SPDX-License-Identifier: MIT 2/* 3 * Copyright �� 2014-2019 Intel Corporation 4 */ 5 6#include <linux/debugfs.h> 7#include <linux/string_helpers.h> 8 9#include "gt/intel_gt.h" 10#include "i915_drv.h" 11#include "i915_irq.h" 12#include "i915_memcpy.h" 13#include "intel_guc_capture.h" 14#include "intel_guc_log.h" 15#include "intel_guc_print.h" 16 17#if defined(CONFIG_DRM_I915_DEBUG_GUC) 18#define GUC_LOG_DEFAULT_CRASH_BUFFER_SIZE SZ_2M 19#define GUC_LOG_DEFAULT_DEBUG_BUFFER_SIZE SZ_16M 20#define GUC_LOG_DEFAULT_CAPTURE_BUFFER_SIZE SZ_1M 21#elif defined(CONFIG_DRM_I915_DEBUG_GEM) 22#define GUC_LOG_DEFAULT_CRASH_BUFFER_SIZE SZ_1M 23#define GUC_LOG_DEFAULT_DEBUG_BUFFER_SIZE SZ_2M 24#define GUC_LOG_DEFAULT_CAPTURE_BUFFER_SIZE SZ_1M 25#else 26#define GUC_LOG_DEFAULT_CRASH_BUFFER_SIZE SZ_8K 27#define GUC_LOG_DEFAULT_DEBUG_BUFFER_SIZE SZ_64K 28#define GUC_LOG_DEFAULT_CAPTURE_BUFFER_SIZE SZ_1M 29#endif 30 31static void guc_log_copy_debuglogs_for_relay(struct intel_guc_log *log); 32 33struct guc_log_section { 34 u32 max; 35 u32 flag; 36 u32 default_val; 37 const char *name; 38}; 39 40static void _guc_log_init_sizes(struct intel_guc_log *log) 41{ 42 struct intel_guc *guc = log_to_guc(log); 43 static const struct guc_log_section sections[GUC_LOG_SECTIONS_LIMIT] = { 44 { 45 GUC_LOG_CRASH_MASK >> GUC_LOG_CRASH_SHIFT, 46 GUC_LOG_LOG_ALLOC_UNITS, 47 GUC_LOG_DEFAULT_CRASH_BUFFER_SIZE, 48 "crash dump" 49 }, 50 { 51 GUC_LOG_DEBUG_MASK >> GUC_LOG_DEBUG_SHIFT, 52 GUC_LOG_LOG_ALLOC_UNITS, 53 GUC_LOG_DEFAULT_DEBUG_BUFFER_SIZE, 54 "debug", 55 }, 56 { 57 GUC_LOG_CAPTURE_MASK >> GUC_LOG_CAPTURE_SHIFT, 58 GUC_LOG_CAPTURE_ALLOC_UNITS, 59 GUC_LOG_DEFAULT_CAPTURE_BUFFER_SIZE, 60 "capture", 61 } 62 }; 63 int i; 64 65 for (i = 0; i < GUC_LOG_SECTIONS_LIMIT; i++) 66 log->sizes[i].bytes = sections[i].default_val; 67 68 /* If debug size > 1MB then bump default crash size to keep the same units */ 69 if (log->sizes[GUC_LOG_SECTIONS_DEBUG].bytes >= SZ_1M && 70 GUC_LOG_DEFAULT_CRASH_BUFFER_SIZE < SZ_1M) 71 log->sizes[GUC_LOG_SECTIONS_CRASH].bytes = SZ_1M; 72 73 /* Prepare the GuC API structure fields: */ 74 for (i = 0; i < GUC_LOG_SECTIONS_LIMIT; i++) { 75 /* Convert to correct units */ 76 if ((log->sizes[i].bytes % SZ_1M) == 0) { 77 log->sizes[i].units = SZ_1M; 78 log->sizes[i].flag = sections[i].flag; 79 } else { 80 log->sizes[i].units = SZ_4K; 81 log->sizes[i].flag = 0; 82 } 83 84 if (!IS_ALIGNED(log->sizes[i].bytes, log->sizes[i].units)) 85 guc_err(guc, "Mis-aligned log %s size: 0x%X vs 0x%X!\n", 86 sections[i].name, log->sizes[i].bytes, log->sizes[i].units); 87 log->sizes[i].count = log->sizes[i].bytes / log->sizes[i].units; 88 89 if (!log->sizes[i].count) { 90 guc_err(guc, "Zero log %s size!\n", sections[i].name); 91 } else { 92 /* Size is +1 unit */ 93 log->sizes[i].count--; 94 } 95 96 /* Clip to field size */ 97 if (log->sizes[i].count > sections[i].max) { 98 guc_err(guc, "log %s size too large: %d vs %d!\n", 99 sections[i].name, log->sizes[i].count + 1, sections[i].max + 1); 100 log->sizes[i].count = sections[i].max; 101 } 102 } 103 104 if (log->sizes[GUC_LOG_SECTIONS_CRASH].units != log->sizes[GUC_LOG_SECTIONS_DEBUG].units) { 105 guc_err(guc, "Unit mismatch for crash and debug sections: %d vs %d!\n", 106 log->sizes[GUC_LOG_SECTIONS_CRASH].units, 107 log->sizes[GUC_LOG_SECTIONS_DEBUG].units); 108 log->sizes[GUC_LOG_SECTIONS_CRASH].units = log->sizes[GUC_LOG_SECTIONS_DEBUG].units; 109 log->sizes[GUC_LOG_SECTIONS_CRASH].count = 0; 110 } 111 112 log->sizes_initialised = true; 113} 114 115static void guc_log_init_sizes(struct intel_guc_log *log) 116{ 117 if (log->sizes_initialised) 118 return; 119 120 _guc_log_init_sizes(log); 121} 122 123static u32 intel_guc_log_section_size_crash(struct intel_guc_log *log) 124{ 125 guc_log_init_sizes(log); 126 127 return log->sizes[GUC_LOG_SECTIONS_CRASH].bytes; 128} 129 130static u32 intel_guc_log_section_size_debug(struct intel_guc_log *log) 131{ 132 guc_log_init_sizes(log); 133 134 return log->sizes[GUC_LOG_SECTIONS_DEBUG].bytes; 135} 136 137u32 intel_guc_log_section_size_capture(struct intel_guc_log *log) 138{ 139 guc_log_init_sizes(log); 140 141 return log->sizes[GUC_LOG_SECTIONS_CAPTURE].bytes; 142} 143 144static u32 intel_guc_log_size(struct intel_guc_log *log) 145{ 146 /* 147 * GuC Log buffer Layout: 148 * 149 * NB: Ordering must follow "enum guc_log_buffer_type". 150 * 151 * +===============================+ 00B 152 * | Debug state header | 153 * +-------------------------------+ 32B 154 * | Crash dump state header | 155 * +-------------------------------+ 64B 156 * | Capture state header | 157 * +-------------------------------+ 96B 158 * | | 159 * +===============================+ PAGE_SIZE (4KB) 160 * | Debug logs | 161 * +===============================+ + DEBUG_SIZE 162 * | Crash Dump logs | 163 * +===============================+ + CRASH_SIZE 164 * | Capture logs | 165 * +===============================+ + CAPTURE_SIZE 166 */ 167 return PAGE_SIZE + 168 intel_guc_log_section_size_crash(log) + 169 intel_guc_log_section_size_debug(log) + 170 intel_guc_log_section_size_capture(log); 171} 172 173/** 174 * DOC: GuC firmware log 175 * 176 * Firmware log is enabled by setting i915.guc_log_level to the positive level. 177 * Log data is printed out via reading debugfs i915_guc_log_dump. Reading from 178 * i915_guc_load_status will print out firmware loading status and scratch 179 * registers value. 180 */ 181 182static int guc_action_flush_log_complete(struct intel_guc *guc) 183{ 184 u32 action[] = { 185 INTEL_GUC_ACTION_LOG_BUFFER_FILE_FLUSH_COMPLETE, 186 GUC_DEBUG_LOG_BUFFER 187 }; 188 189 return intel_guc_send_nb(guc, action, ARRAY_SIZE(action), 0); 190} 191 192static int guc_action_flush_log(struct intel_guc *guc) 193{ 194 u32 action[] = { 195 INTEL_GUC_ACTION_FORCE_LOG_BUFFER_FLUSH, 196 0 197 }; 198 199 return intel_guc_send(guc, action, ARRAY_SIZE(action)); 200} 201 202static int guc_action_control_log(struct intel_guc *guc, bool enable, 203 bool default_logging, u32 verbosity) 204{ 205 u32 action[] = { 206 INTEL_GUC_ACTION_UK_LOG_ENABLE_LOGGING, 207 (enable ? GUC_LOG_CONTROL_LOGGING_ENABLED : 0) | 208 (verbosity << GUC_LOG_CONTROL_VERBOSITY_SHIFT) | 209 (default_logging ? GUC_LOG_CONTROL_DEFAULT_LOGGING : 0) 210 }; 211 212 GEM_BUG_ON(verbosity > GUC_LOG_VERBOSITY_MAX); 213 214 return intel_guc_send(guc, action, ARRAY_SIZE(action)); 215} 216 217#ifdef __linux__ 218 219/* 220 * Sub buffer switch callback. Called whenever relay has to switch to a new 221 * sub buffer, relay stays on the same sub buffer if 0 is returned. 222 */ 223static int subbuf_start_callback(struct rchan_buf *buf, 224 void *subbuf, 225 void *prev_subbuf, 226 size_t prev_padding) 227{ 228 /* 229 * Use no-overwrite mode by default, where relay will stop accepting 230 * new data if there are no empty sub buffers left. 231 * There is no strict synchronization enforced by relay between Consumer 232 * and Producer. In overwrite mode, there is a possibility of getting 233 * inconsistent/garbled data, the producer could be writing on to the 234 * same sub buffer from which Consumer is reading. This can't be avoided 235 * unless Consumer is fast enough and can always run in tandem with 236 * Producer. 237 */ 238 if (relay_buf_full(buf)) 239 return 0; 240 241 return 1; 242} 243 244/* 245 * file_create() callback. Creates relay file in debugfs. 246 */ 247static struct dentry *create_buf_file_callback(const char *filename, 248 struct dentry *parent, 249 umode_t mode, 250 struct rchan_buf *buf, 251 int *is_global) 252{ 253 struct dentry *buf_file; 254 255 /* 256 * This to enable the use of a single buffer for the relay channel and 257 * correspondingly have a single file exposed to User, through which 258 * it can collect the logs in order without any post-processing. 259 * Need to set 'is_global' even if parent is NULL for early logging. 260 */ 261 *is_global = 1; 262 263 if (!parent) 264 return NULL; 265 266 buf_file = debugfs_create_file(filename, mode, 267 parent, buf, &relay_file_operations); 268 if (IS_ERR(buf_file)) 269 return NULL; 270 271 return buf_file; 272} 273 274/* 275 * file_remove() default callback. Removes relay file in debugfs. 276 */ 277static int remove_buf_file_callback(struct dentry *dentry) 278{ 279 debugfs_remove(dentry); 280 return 0; 281} 282 283/* relay channel callbacks */ 284static const struct rchan_callbacks relay_callbacks = { 285 .subbuf_start = subbuf_start_callback, 286 .create_buf_file = create_buf_file_callback, 287 .remove_buf_file = remove_buf_file_callback, 288}; 289 290#endif /* __linux__ */ 291 292static void guc_move_to_next_buf(struct intel_guc_log *log) 293{ 294 STUB(); 295#ifdef notyet 296 /* 297 * Make sure the updates made in the sub buffer are visible when 298 * Consumer sees the following update to offset inside the sub buffer. 299 */ 300 smp_wmb(); 301 302 /* All data has been written, so now move the offset of sub buffer. */ 303 relay_reserve(log->relay.channel, log->vma->obj->base.size - 304 intel_guc_log_section_size_capture(log)); 305 306 /* Switch to the next sub buffer */ 307 relay_flush(log->relay.channel); 308#endif 309} 310 311static void *guc_get_write_buffer(struct intel_guc_log *log) 312{ 313 STUB(); 314 return NULL; 315#ifdef notyet 316 /* 317 * Just get the base address of a new sub buffer and copy data into it 318 * ourselves. NULL will be returned in no-overwrite mode, if all sub 319 * buffers are full. Could have used the relay_write() to indirectly 320 * copy the data, but that would have been bit convoluted, as we need to 321 * write to only certain locations inside a sub buffer which cannot be 322 * done without using relay_reserve() along with relay_write(). So its 323 * better to use relay_reserve() alone. 324 */ 325 return relay_reserve(log->relay.channel, 0); 326#endif 327} 328 329bool intel_guc_check_log_buf_overflow(struct intel_guc_log *log, 330 enum guc_log_buffer_type type, 331 unsigned int full_cnt) 332{ 333 unsigned int prev_full_cnt = log->stats[type].sampled_overflow; 334 bool overflow = false; 335 336 if (full_cnt != prev_full_cnt) { 337 overflow = true; 338 339 log->stats[type].overflow = full_cnt; 340 log->stats[type].sampled_overflow += full_cnt - prev_full_cnt; 341 342 if (full_cnt < prev_full_cnt) { 343 /* buffer_full_cnt is a 4 bit counter */ 344 log->stats[type].sampled_overflow += 16; 345 } 346 347 guc_notice_ratelimited(log_to_guc(log), "log buffer overflow\n"); 348 } 349 350 return overflow; 351} 352 353unsigned int intel_guc_get_log_buffer_size(struct intel_guc_log *log, 354 enum guc_log_buffer_type type) 355{ 356 switch (type) { 357 case GUC_DEBUG_LOG_BUFFER: 358 return intel_guc_log_section_size_debug(log); 359 case GUC_CRASH_DUMP_LOG_BUFFER: 360 return intel_guc_log_section_size_crash(log); 361 case GUC_CAPTURE_LOG_BUFFER: 362 return intel_guc_log_section_size_capture(log); 363 default: 364 MISSING_CASE(type); 365 } 366 367 return 0; 368} 369 370size_t intel_guc_get_log_buffer_offset(struct intel_guc_log *log, 371 enum guc_log_buffer_type type) 372{ 373 enum guc_log_buffer_type i; 374 size_t offset = PAGE_SIZE;/* for the log_buffer_states */ 375 376 for (i = GUC_DEBUG_LOG_BUFFER; i < GUC_MAX_LOG_BUFFER; ++i) { 377 if (i == type) 378 break; 379 offset += intel_guc_get_log_buffer_size(log, i); 380 } 381 382 return offset; 383} 384 385static void _guc_log_copy_debuglogs_for_relay(struct intel_guc_log *log) 386{ 387 struct intel_guc *guc = log_to_guc(log); 388 unsigned int buffer_size, read_offset, write_offset, bytes_to_copy, full_cnt; 389 struct guc_log_buffer_state *log_buf_state, *log_buf_snapshot_state; 390 struct guc_log_buffer_state log_buf_state_local; 391 enum guc_log_buffer_type type; 392 void *src_data, *dst_data; 393 bool new_overflow; 394 395 mutex_lock(&log->relay.lock); 396 397 if (guc_WARN_ON(guc, !intel_guc_log_relay_created(log))) 398 goto out_unlock; 399 400 /* Get the pointer to shared GuC log buffer */ 401 src_data = log->buf_addr; 402 log_buf_state = src_data; 403 404 /* Get the pointer to local buffer to store the logs */ 405 log_buf_snapshot_state = dst_data = guc_get_write_buffer(log); 406 407 if (unlikely(!log_buf_snapshot_state)) { 408 /* 409 * Used rate limited to avoid deluge of messages, logs might be 410 * getting consumed by User at a slow rate. 411 */ 412 guc_err_ratelimited(guc, "no sub-buffer to copy general logs\n"); 413 log->relay.full_count++; 414 415 goto out_unlock; 416 } 417 418 /* Actual logs are present from the 2nd page */ 419 src_data += PAGE_SIZE; 420 dst_data += PAGE_SIZE; 421 422 /* For relay logging, we exclude error state capture */ 423 for (type = GUC_DEBUG_LOG_BUFFER; type <= GUC_CRASH_DUMP_LOG_BUFFER; type++) { 424 /* 425 * Make a copy of the state structure, inside GuC log buffer 426 * (which is uncached mapped), on the stack to avoid reading 427 * from it multiple times. 428 */ 429 memcpy(&log_buf_state_local, log_buf_state, 430 sizeof(struct guc_log_buffer_state)); 431 buffer_size = intel_guc_get_log_buffer_size(log, type); 432 read_offset = log_buf_state_local.read_ptr; 433 write_offset = log_buf_state_local.sampled_write_ptr; 434 full_cnt = log_buf_state_local.buffer_full_cnt; 435 436 /* Bookkeeping stuff */ 437 log->stats[type].flush += log_buf_state_local.flush_to_file; 438 new_overflow = intel_guc_check_log_buf_overflow(log, type, full_cnt); 439 440 /* Update the state of shared log buffer */ 441 log_buf_state->read_ptr = write_offset; 442 log_buf_state->flush_to_file = 0; 443 log_buf_state++; 444 445 /* First copy the state structure in snapshot buffer */ 446 memcpy(log_buf_snapshot_state, &log_buf_state_local, 447 sizeof(struct guc_log_buffer_state)); 448 449 /* 450 * The write pointer could have been updated by GuC firmware, 451 * after sending the flush interrupt to Host, for consistency 452 * set write pointer value to same value of sampled_write_ptr 453 * in the snapshot buffer. 454 */ 455 log_buf_snapshot_state->write_ptr = write_offset; 456 log_buf_snapshot_state++; 457 458 /* Now copy the actual logs. */ 459 if (unlikely(new_overflow)) { 460 /* copy the whole buffer in case of overflow */ 461 read_offset = 0; 462 write_offset = buffer_size; 463 } else if (unlikely((read_offset > buffer_size) || 464 (write_offset > buffer_size))) { 465 guc_err(guc, "invalid log buffer state\n"); 466 /* copy whole buffer as offsets are unreliable */ 467 read_offset = 0; 468 write_offset = buffer_size; 469 } 470 471 /* Just copy the newly written data */ 472 if (read_offset > write_offset) { 473 i915_memcpy_from_wc(dst_data, src_data, write_offset); 474 bytes_to_copy = buffer_size - read_offset; 475 } else { 476 bytes_to_copy = write_offset - read_offset; 477 } 478 i915_memcpy_from_wc(dst_data + read_offset, 479 src_data + read_offset, bytes_to_copy); 480 481 src_data += buffer_size; 482 dst_data += buffer_size; 483 } 484 485 guc_move_to_next_buf(log); 486 487out_unlock: 488 mutex_unlock(&log->relay.lock); 489} 490 491static void copy_debug_logs_work(struct work_struct *work) 492{ 493 struct intel_guc_log *log = 494 container_of(work, struct intel_guc_log, relay.flush_work); 495 496 guc_log_copy_debuglogs_for_relay(log); 497} 498 499static int guc_log_relay_map(struct intel_guc_log *log) 500{ 501 lockdep_assert_held(&log->relay.lock); 502 503 if (!log->vma || !log->buf_addr) 504 return -ENODEV; 505 506 /* 507 * WC vmalloc mapping of log buffer pages was done at 508 * GuC Log Init time, but lets keep a ref for book-keeping 509 */ 510 i915_gem_object_get(log->vma->obj); 511 log->relay.buf_in_use = true; 512 513 return 0; 514} 515 516static void guc_log_relay_unmap(struct intel_guc_log *log) 517{ 518 lockdep_assert_held(&log->relay.lock); 519 520 i915_gem_object_put(log->vma->obj); 521 log->relay.buf_in_use = false; 522} 523 524void intel_guc_log_init_early(struct intel_guc_log *log) 525{ 526 rw_init(&log->relay.lock, "rllk"); 527 INIT_WORK(&log->relay.flush_work, copy_debug_logs_work); 528 log->relay.started = false; 529} 530 531static int guc_log_relay_create(struct intel_guc_log *log) 532{ 533 STUB(); 534 return -ENOSYS; 535#ifdef notyet 536 struct intel_guc *guc = log_to_guc(log); 537 struct drm_i915_private *i915 = guc_to_gt(guc)->i915; 538 struct rchan *guc_log_relay_chan; 539 size_t n_subbufs, subbuf_size; 540 int ret; 541 542 lockdep_assert_held(&log->relay.lock); 543 GEM_BUG_ON(!log->vma); 544 545 /* 546 * Keep the size of sub buffers same as shared log buffer 547 * but GuC log-events excludes the error-state-capture logs 548 */ 549 subbuf_size = log->vma->size - intel_guc_log_section_size_capture(log); 550 551 /* 552 * Store up to 8 snapshots, which is large enough to buffer sufficient 553 * boot time logs and provides enough leeway to User, in terms of 554 * latency, for consuming the logs from relay. Also doesn't take 555 * up too much memory. 556 */ 557 n_subbufs = 8; 558 559 if (!guc->dbgfs_node) 560 return -ENOENT; 561 562 guc_log_relay_chan = relay_open("guc_log", 563 guc->dbgfs_node, 564 subbuf_size, n_subbufs, 565 &relay_callbacks, i915); 566 if (!guc_log_relay_chan) { 567 guc_err(guc, "Couldn't create relay channel for logging\n"); 568 569 ret = -ENOMEM; 570 return ret; 571 } 572 573 GEM_BUG_ON(guc_log_relay_chan->subbuf_size < subbuf_size); 574 log->relay.channel = guc_log_relay_chan; 575 576 return 0; 577#endif 578} 579 580static void guc_log_relay_destroy(struct intel_guc_log *log) 581{ 582 STUB(); 583#ifdef notyet 584 lockdep_assert_held(&log->relay.lock); 585 586 relay_close(log->relay.channel); 587 log->relay.channel = NULL; 588#endif 589} 590 591static void guc_log_copy_debuglogs_for_relay(struct intel_guc_log *log) 592{ 593 struct intel_guc *guc = log_to_guc(log); 594 struct drm_i915_private *i915 = guc_to_gt(guc)->i915; 595 intel_wakeref_t wakeref; 596 597 _guc_log_copy_debuglogs_for_relay(log); 598 599 /* 600 * Generally device is expected to be active only at this 601 * time, so get/put should be really quick. 602 */ 603 with_intel_runtime_pm(&i915->runtime_pm, wakeref) 604 guc_action_flush_log_complete(guc); 605} 606 607static u32 __get_default_log_level(struct intel_guc_log *log) 608{ 609 struct intel_guc *guc = log_to_guc(log); 610 struct drm_i915_private *i915 = guc_to_gt(guc)->i915; 611 612 /* A negative value means "use platform/config default" */ 613 if (i915->params.guc_log_level < 0) { 614 return (IS_ENABLED(CONFIG_DRM_I915_DEBUG) || 615 IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)) ? 616 GUC_LOG_LEVEL_MAX : GUC_LOG_LEVEL_NON_VERBOSE; 617 } 618 619 if (i915->params.guc_log_level > GUC_LOG_LEVEL_MAX) { 620 guc_warn(guc, "Log verbosity param out of range: %d > %d!\n", 621 i915->params.guc_log_level, GUC_LOG_LEVEL_MAX); 622 return (IS_ENABLED(CONFIG_DRM_I915_DEBUG) || 623 IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)) ? 624 GUC_LOG_LEVEL_MAX : GUC_LOG_LEVEL_DISABLED; 625 } 626 627 GEM_BUG_ON(i915->params.guc_log_level < GUC_LOG_LEVEL_DISABLED); 628 GEM_BUG_ON(i915->params.guc_log_level > GUC_LOG_LEVEL_MAX); 629 return i915->params.guc_log_level; 630} 631 632int intel_guc_log_create(struct intel_guc_log *log) 633{ 634 struct intel_guc *guc = log_to_guc(log); 635 struct i915_vma *vma; 636 void *vaddr; 637 u32 guc_log_size; 638 int ret; 639 640 GEM_BUG_ON(log->vma); 641 642 guc_log_size = intel_guc_log_size(log); 643 644 vma = intel_guc_allocate_vma(guc, guc_log_size); 645 if (IS_ERR(vma)) { 646 ret = PTR_ERR(vma); 647 goto err; 648 } 649 650 log->vma = vma; 651 /* 652 * Create a WC (Uncached for read) vmalloc mapping up front immediate access to 653 * data from memory during critical events such as error capture 654 */ 655 vaddr = i915_gem_object_pin_map_unlocked(log->vma->obj, I915_MAP_WC); 656 if (IS_ERR(vaddr)) { 657 ret = PTR_ERR(vaddr); 658 i915_vma_unpin_and_release(&log->vma, 0); 659 goto err; 660 } 661 log->buf_addr = vaddr; 662 663 log->level = __get_default_log_level(log); 664 guc_dbg(guc, "guc_log_level=%d (%s, verbose:%s, verbosity:%d)\n", 665 log->level, str_enabled_disabled(log->level), 666 str_yes_no(GUC_LOG_LEVEL_IS_VERBOSE(log->level)), 667 GUC_LOG_LEVEL_TO_VERBOSITY(log->level)); 668 669 return 0; 670 671err: 672 guc_err(guc, "Failed to allocate or map log buffer %pe\n", ERR_PTR(ret)); 673 return ret; 674} 675 676void intel_guc_log_destroy(struct intel_guc_log *log) 677{ 678 log->buf_addr = NULL; 679 i915_vma_unpin_and_release(&log->vma, I915_VMA_RELEASE_MAP); 680} 681 682int intel_guc_log_set_level(struct intel_guc_log *log, u32 level) 683{ 684 struct intel_guc *guc = log_to_guc(log); 685 struct drm_i915_private *i915 = guc_to_gt(guc)->i915; 686 intel_wakeref_t wakeref; 687 int ret = 0; 688 689 BUILD_BUG_ON(GUC_LOG_VERBOSITY_MIN != 0); 690 GEM_BUG_ON(!log->vma); 691 692 /* 693 * GuC is recognizing log levels starting from 0 to max, we're using 0 694 * as indication that logging should be disabled. 695 */ 696 if (level < GUC_LOG_LEVEL_DISABLED || level > GUC_LOG_LEVEL_MAX) 697 return -EINVAL; 698 699 mutex_lock(&i915->drm.struct_mutex); 700 701 if (log->level == level) 702 goto out_unlock; 703 704 with_intel_runtime_pm(&i915->runtime_pm, wakeref) 705 ret = guc_action_control_log(guc, 706 GUC_LOG_LEVEL_IS_VERBOSE(level), 707 GUC_LOG_LEVEL_IS_ENABLED(level), 708 GUC_LOG_LEVEL_TO_VERBOSITY(level)); 709 if (ret) { 710 guc_dbg(guc, "guc_log_control action failed %pe\n", ERR_PTR(ret)); 711 goto out_unlock; 712 } 713 714 log->level = level; 715 716out_unlock: 717 mutex_unlock(&i915->drm.struct_mutex); 718 719 return ret; 720} 721 722bool intel_guc_log_relay_created(const struct intel_guc_log *log) 723{ 724 return log->buf_addr; 725} 726 727int intel_guc_log_relay_open(struct intel_guc_log *log) 728{ 729 int ret; 730 731 if (!log->vma) 732 return -ENODEV; 733 734 mutex_lock(&log->relay.lock); 735 736 if (intel_guc_log_relay_created(log)) { 737 ret = -EEXIST; 738 goto out_unlock; 739 } 740 741 /* 742 * We require SSE 4.1 for fast reads from the GuC log buffer and 743 * it should be present on the chipsets supporting GuC based 744 * submissions. 745 */ 746 if (!i915_has_memcpy_from_wc()) { 747 ret = -ENXIO; 748 goto out_unlock; 749 } 750 751 ret = guc_log_relay_create(log); 752 if (ret) 753 goto out_unlock; 754 755 ret = guc_log_relay_map(log); 756 if (ret) 757 goto out_relay; 758 759 mutex_unlock(&log->relay.lock); 760 761 return 0; 762 763out_relay: 764 guc_log_relay_destroy(log); 765out_unlock: 766 mutex_unlock(&log->relay.lock); 767 768 return ret; 769} 770 771int intel_guc_log_relay_start(struct intel_guc_log *log) 772{ 773 if (log->relay.started) 774 return -EEXIST; 775 776 /* 777 * When GuC is logging without us relaying to userspace, we're ignoring 778 * the flush notification. This means that we need to unconditionally 779 * flush on relay enabling, since GuC only notifies us once. 780 */ 781 queue_work(system_highpri_wq, &log->relay.flush_work); 782 783 log->relay.started = true; 784 785 return 0; 786} 787 788void intel_guc_log_relay_flush(struct intel_guc_log *log) 789{ 790 struct intel_guc *guc = log_to_guc(log); 791 intel_wakeref_t wakeref; 792 793 if (!log->relay.started) 794 return; 795 796 /* 797 * Before initiating the forceful flush, wait for any pending/ongoing 798 * flush to complete otherwise forceful flush may not actually happen. 799 */ 800 flush_work(&log->relay.flush_work); 801 802 with_intel_runtime_pm(guc_to_gt(guc)->uncore->rpm, wakeref) 803 guc_action_flush_log(guc); 804 805 /* GuC would have updated log buffer by now, so copy it */ 806 guc_log_copy_debuglogs_for_relay(log); 807} 808 809/* 810 * Stops the relay log. Called from intel_guc_log_relay_close(), so no 811 * possibility of race with start/flush since relay_write cannot race 812 * relay_close. 813 */ 814static void guc_log_relay_stop(struct intel_guc_log *log) 815{ 816 struct intel_guc *guc = log_to_guc(log); 817 struct drm_i915_private *i915 = guc_to_gt(guc)->i915; 818 819 if (!log->relay.started) 820 return; 821 822 intel_synchronize_irq(i915); 823 824 flush_work(&log->relay.flush_work); 825 826 log->relay.started = false; 827} 828 829void intel_guc_log_relay_close(struct intel_guc_log *log) 830{ 831 guc_log_relay_stop(log); 832 833 mutex_lock(&log->relay.lock); 834 GEM_BUG_ON(!intel_guc_log_relay_created(log)); 835 guc_log_relay_unmap(log); 836 guc_log_relay_destroy(log); 837 mutex_unlock(&log->relay.lock); 838} 839 840void intel_guc_log_handle_flush_event(struct intel_guc_log *log) 841{ 842 if (log->relay.started) 843 queue_work(system_highpri_wq, &log->relay.flush_work); 844} 845 846static const char * 847stringify_guc_log_type(enum guc_log_buffer_type type) 848{ 849 switch (type) { 850 case GUC_DEBUG_LOG_BUFFER: 851 return "DEBUG"; 852 case GUC_CRASH_DUMP_LOG_BUFFER: 853 return "CRASH"; 854 case GUC_CAPTURE_LOG_BUFFER: 855 return "CAPTURE"; 856 default: 857 MISSING_CASE(type); 858 } 859 860 return ""; 861} 862 863/** 864 * intel_guc_log_info - dump information about GuC log relay 865 * @log: the GuC log 866 * @p: the &drm_printer 867 * 868 * Pretty printer for GuC log info 869 */ 870void intel_guc_log_info(struct intel_guc_log *log, struct drm_printer *p) 871{ 872 enum guc_log_buffer_type type; 873 874 if (!intel_guc_log_relay_created(log)) { 875 drm_puts(p, "GuC log relay not created\n"); 876 return; 877 } 878 879 drm_puts(p, "GuC logging stats:\n"); 880 881 drm_printf(p, "\tRelay full count: %u\n", log->relay.full_count); 882 883 for (type = GUC_DEBUG_LOG_BUFFER; type < GUC_MAX_LOG_BUFFER; type++) { 884 drm_printf(p, "\t%s:\tflush count %10u, overflow count %10u\n", 885 stringify_guc_log_type(type), 886 log->stats[type].flush, 887 log->stats[type].sampled_overflow); 888 } 889} 890 891/** 892 * intel_guc_log_dump - dump the contents of the GuC log 893 * @log: the GuC log 894 * @p: the &drm_printer 895 * @dump_load_err: dump the log saved on GuC load error 896 * 897 * Pretty printer for the GuC log 898 */ 899int intel_guc_log_dump(struct intel_guc_log *log, struct drm_printer *p, 900 bool dump_load_err) 901{ 902 struct intel_guc *guc = log_to_guc(log); 903 struct intel_uc *uc = container_of(guc, struct intel_uc, guc); 904 struct drm_i915_gem_object *obj = NULL; 905 void *map; 906 u32 *page; 907 int i, j; 908 909 if (!intel_guc_is_supported(guc)) 910 return -ENODEV; 911 912 if (dump_load_err) 913 obj = uc->load_err_log; 914 else if (guc->log.vma) 915 obj = guc->log.vma->obj; 916 917 if (!obj) 918 return 0; 919 920 page = (u32 *)__get_free_page(GFP_KERNEL); 921 if (!page) 922 return -ENOMEM; 923 924 intel_guc_dump_time_info(guc, p); 925 926 map = i915_gem_object_pin_map_unlocked(obj, I915_MAP_WC); 927 if (IS_ERR(map)) { 928 guc_dbg(guc, "Failed to pin log object: %pe\n", map); 929 drm_puts(p, "(log data unaccessible)\n"); 930 free_page((unsigned long)page); 931 return PTR_ERR(map); 932 } 933 934 for (i = 0; i < obj->base.size; i += PAGE_SIZE) { 935 if (!i915_memcpy_from_wc(page, map + i, PAGE_SIZE)) 936 memcpy(page, map + i, PAGE_SIZE); 937 938 for (j = 0; j < PAGE_SIZE / sizeof(u32); j += 4) 939 drm_printf(p, "0x%08x 0x%08x 0x%08x 0x%08x\n", 940 *(page + j + 0), *(page + j + 1), 941 *(page + j + 2), *(page + j + 3)); 942 } 943 944 drm_puts(p, "\n"); 945 946 i915_gem_object_unpin_map(obj); 947 free_page((unsigned long)page); 948 949 return 0; 950} 951